#include "ssl_local.h"
#include "internal/ktls.h"
+#ifndef OPENSSL_NO_KTLS_RX
+ /*
+ * Count the number of records that were not processed yet from record boundary.
+ *
+ * This function assumes that there are only fully formed records read in the
+ * record layer. If read_ahead is enabled, then this might be false and this
+ * function will fail.
+ */
+static int count_unprocessed_records(SSL *s)
+{
+ SSL3_BUFFER *rbuf = RECORD_LAYER_get_rbuf(&s->rlayer);
+ PACKET pkt, subpkt;
+ int count = 0;
+
+ if (!PACKET_buf_init(&pkt, rbuf->buf + rbuf->offset, rbuf->left))
+ return -1;
+
+ while (PACKET_remaining(&pkt) > 0) {
+ /* Skip record type and version */
+ if (!PACKET_forward(&pkt, 3))
+ return -1;
+
+ /* Read until next record */
+ if (!PACKET_get_length_prefixed_2(&pkt, &subpkt))
+ return -1;
+
+ count += 1;
+ }
+
+ return count;
+}
+
+/*
+ * The kernel cannot offload receive if a partial TLS record has been read.
+ * Check the read buffer for unprocessed records. If the buffer contains a
+ * partial record, fail and return 0. Otherwise, update the sequence
+ * number at *rec_seq for the count of unprocessed records and return 1.
+ */
+static int check_rx_read_ahead(SSL *s, unsigned char *rec_seq)
+{
+ int bit, count_unprocessed;
+
+ count_unprocessed = count_unprocessed_records(s);
+ if (count_unprocessed < 0)
+ return 0;
+
+ /* increment the crypto_info record sequence */
+ while (count_unprocessed) {
+ for (bit = 7; bit >= 0; bit--) { /* increment */
+ ++rec_seq[bit];
+ if (rec_seq[bit] != 0)
+ break;
+ }
+ count_unprocessed--;
+
+ }
+
+ return 1;
+}
+#endif
+
#if defined(__FreeBSD__)
# include "crypto/cryptodev.h"
}
/* Function to configure kernel TLS structure */
-int ktls_configure_crypto(const SSL *s, const EVP_CIPHER *c, EVP_CIPHER_CTX *dd,
+int ktls_configure_crypto(SSL *s, const EVP_CIPHER *c, EVP_CIPHER_CTX *dd,
void *rl_sequence, ktls_crypto_info_t *crypto_info,
- unsigned char **rec_seq, unsigned char *iv,
+ int is_tx, unsigned char *iv,
unsigned char *key, unsigned char *mac_key,
size_t mac_secret_size)
{
crypto_info->tls_vminor = (s->version & 0x000000ff);
# ifdef TCP_RXTLS_ENABLE
memcpy(crypto_info->rec_seq, rl_sequence, sizeof(crypto_info->rec_seq));
- if (rec_seq != NULL)
- *rec_seq = crypto_info->rec_seq;
+ if (!is_tx && !check_rx_read_ahead(s, crypto_info->rec_seq))
+ return 0;
# else
- if (rec_seq != NULL)
- *rec_seq = NULL;
+ if (!is_tx)
+ return 0;
# endif
return 1;
};
}
/* Function to configure kernel TLS structure */
-int ktls_configure_crypto(const SSL *s, const EVP_CIPHER *c, EVP_CIPHER_CTX *dd,
+int ktls_configure_crypto(SSL *s, const EVP_CIPHER *c, EVP_CIPHER_CTX *dd,
void *rl_sequence, ktls_crypto_info_t *crypto_info,
- unsigned char **rec_seq, unsigned char *iv,
+ int is_tx, unsigned char *iv,
unsigned char *key, unsigned char *mac_key,
size_t mac_secret_size)
{
unsigned char geniv[12];
unsigned char *iiv = iv;
+# ifdef OPENSSL_NO_KTLS_RX
+ if (!is_tx)
+ return 0;
+# endif
+
if (s->version == TLS1_2_VERSION &&
EVP_CIPHER_get_mode(c) == EVP_CIPH_GCM_MODE) {
if (!EVP_CIPHER_CTX_get_updated_iv(dd, geniv,
memcpy(crypto_info->gcm128.key, key, EVP_CIPHER_get_key_length(c));
memcpy(crypto_info->gcm128.rec_seq, rl_sequence,
TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE);
- if (rec_seq != NULL)
- *rec_seq = crypto_info->gcm128.rec_seq;
+ if (!is_tx && !check_rx_read_ahead(s, crypto_info->gcm128.rec_seq))
+ return 0;
return 1;
# endif
# ifdef OPENSSL_KTLS_AES_GCM_256
memcpy(crypto_info->gcm256.key, key, EVP_CIPHER_get_key_length(c));
memcpy(crypto_info->gcm256.rec_seq, rl_sequence,
TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE);
- if (rec_seq != NULL)
- *rec_seq = crypto_info->gcm256.rec_seq;
+ if (!is_tx && !check_rx_read_ahead(s, crypto_info->gcm256.rec_seq))
+ return 0;
return 1;
# endif
# ifdef OPENSSL_KTLS_AES_CCM_128
memcpy(crypto_info->ccm128.key, key, EVP_CIPHER_get_key_length(c));
memcpy(crypto_info->ccm128.rec_seq, rl_sequence,
TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE);
- if (rec_seq != NULL)
- *rec_seq = crypto_info->ccm128.rec_seq;
+ if (!is_tx && !check_rx_read_ahead(s, crypto_info->ccm128.rec_seq))
+ return 0;
return 1;
# endif
# ifdef OPENSSL_KTLS_CHACHA20_POLY1305
EVP_CIPHER_get_key_length(c));
memcpy(crypto_info->chacha20poly1305.rec_seq, rl_sequence,
TLS_CIPHER_CHACHA20_POLY1305_REC_SEQ_SIZE);
- if (rec_seq != NULL)
- *rec_seq = crypto_info->chacha20poly1305.rec_seq;
+ if (!is_tx
+ && !check_rx_read_ahead(s,
+ crypto_info->chacha20poly1305.rec_seq))
+ return 0;
return 1;
# endif
default:
return ret;
}
-#ifndef OPENSSL_NO_KTLS
- /*
- * Count the number of records that were not processed yet from record boundary.
- *
- * This function assumes that there are only fully formed records read in the
- * record layer. If read_ahead is enabled, then this might be false and this
- * function will fail.
- */
-# ifndef OPENSSL_NO_KTLS_RX
-static int count_unprocessed_records(SSL *s)
-{
- SSL3_BUFFER *rbuf = RECORD_LAYER_get_rbuf(&s->rlayer);
- PACKET pkt, subpkt;
- int count = 0;
-
- if (!PACKET_buf_init(&pkt, rbuf->buf + rbuf->offset, rbuf->left))
- return -1;
-
- while (PACKET_remaining(&pkt) > 0) {
- /* Skip record type and version */
- if (!PACKET_forward(&pkt, 3))
- return -1;
-
- /* Read until next record */
- if (!PACKET_get_length_prefixed_2(&pkt, &subpkt))
- return -1;
-
- count += 1;
- }
-
- return count;
-}
-# endif
-#endif
-
-
int tls_provider_set_tls_params(SSL *s, EVP_CIPHER_CTX *ctx,
const EVP_CIPHER *ciph,
const EVP_MD *md)
int reuse_dd = 0;
#ifndef OPENSSL_NO_KTLS
ktls_crypto_info_t crypto_info;
- unsigned char *rec_seq;
void *rl_sequence;
-# ifndef OPENSSL_NO_KTLS_RX
- int count_unprocessed;
- int bit;
-# endif
BIO *bio;
#endif
else
rl_sequence = RECORD_LAYER_get_read_sequence(&s->rlayer);
- if (!ktls_configure_crypto(s, c, dd, rl_sequence, &crypto_info, &rec_seq,
- iv, key, ms, *mac_secret_size))
+ if (!ktls_configure_crypto(s, c, dd, rl_sequence, &crypto_info,
+ which & SSL3_CC_WRITE, iv, key, ms,
+ *mac_secret_size))
goto skip_ktls;
- if (which & SSL3_CC_READ) {
-# ifndef OPENSSL_NO_KTLS_RX
- count_unprocessed = count_unprocessed_records(s);
- if (count_unprocessed < 0)
- goto skip_ktls;
-
- /* increment the crypto_info record sequence */
- while (count_unprocessed) {
- for (bit = 7; bit >= 0; bit--) { /* increment */
- ++rec_seq[bit];
- if (rec_seq[bit] != 0)
- break;
- }
- count_unprocessed--;
- }
-# else
- goto skip_ktls;
-# endif
- }
-
/* ktls works with user provided buffers directly */
if (BIO_set_ktls(bio, &crypto_info, which & SSL3_CC_WRITE)) {
if (which & SSL3_CC_WRITE)