]> git.ipfire.org Git - thirdparty/openssl.git/blob - ssl/record/methods/ktls_meth.c
Rename SSL3_RECORD to TLS_RL_RECORD
[thirdparty/openssl.git] / ssl / record / methods / ktls_meth.c
1 /*
2 * Copyright 2018-2022 The OpenSSL Project Authors. All Rights Reserved.
3 *
4 * Licensed under the Apache License 2.0 (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
8 */
9
10 #include <openssl/evp.h>
11 #include <openssl/core_names.h>
12 #include <openssl/rand.h>
13 #include "../../ssl_local.h"
14 #include "../record_local.h"
15 #include "recmethod_local.h"
16 #include "internal/ktls.h"
17
18 static struct record_functions_st ossl_ktls_funcs;
19
20 #if defined(__FreeBSD__)
21 # include "crypto/cryptodev.h"
22
23 /*-
24 * Check if a given cipher is supported by the KTLS interface.
25 * The kernel might still fail the setsockopt() if no suitable
26 * provider is found, but this checks if the socket option
27 * supports the cipher suite used at all.
28 */
29 static int ktls_int_check_supported_cipher(OSSL_RECORD_LAYER *rl,
30 const EVP_CIPHER *c,
31 const EVP_MD *md,
32 size_t taglen)
33 {
34 switch (rl->version) {
35 case TLS1_VERSION:
36 case TLS1_1_VERSION:
37 case TLS1_2_VERSION:
38 #ifdef OPENSSL_KTLS_TLS13
39 case TLS1_3_VERSION:
40 #endif
41 break;
42 default:
43 return 0;
44 }
45
46 if (EVP_CIPHER_is_a(c, "AES-128-GCM")
47 || EVP_CIPHER_is_a(c, "AES-256-GCM")
48 # ifdef OPENSSL_KTLS_CHACHA20_POLY1305
49 || EVP_CIPHER_is_a(c, "CHACHA20-POLY1305")
50 # endif
51 )
52 return 1;
53
54 if (!EVP_CIPHER_is_a(c, "AES-128-CBC")
55 && !EVP_CIPHER_is_a(c, "AES-256-CBC"))
56 return 0;
57
58 if (rl->use_etm)
59 return 0;
60
61 if (md == NULL)
62 return 0;
63
64 if (EVP_MD_is_a(md, "SHA1")
65 || EVP_MD_is_a(md, "SHA2-256")
66 || EVP_MD_is_a(md, "SHA2-384"))
67 return 1;
68
69 return 0;
70 }
71
72 /* Function to configure kernel TLS structure */
73 static
74 int ktls_configure_crypto(OSSL_LIB_CTX *libctx, int version, const EVP_CIPHER *c,
75 EVP_MD *md, void *rl_sequence,
76 ktls_crypto_info_t *crypto_info, int is_tx,
77 unsigned char *iv, size_t ivlen,
78 unsigned char *key, size_t keylen,
79 unsigned char *mac_key, size_t mac_secret_size)
80 {
81 memset(crypto_info, 0, sizeof(*crypto_info));
82 if (EVP_CIPHER_is_a(c, "AES-128-GCM")
83 || EVP_CIPHER_is_a(c, "AES-256-GCM")) {
84 crypto_info->cipher_algorithm = CRYPTO_AES_NIST_GCM_16;
85 crypto_info->iv_len = ivlen;
86 } else
87 # ifdef OPENSSL_KTLS_CHACHA20_POLY1305
88 if (EVP_CIPHER_is_a(c, "CHACHA20-POLY1305")) {
89 crypto_info->cipher_algorithm = CRYPTO_CHACHA20_POLY1305;
90 crypto_info->iv_len = ivlen;
91 } else
92 # endif
93 if (EVP_CIPHER_is_a(c, "AES-128-CBC") || EVP_CIPHER_is_a(c, "AES-256-CBC")) {
94 if (md == NULL)
95 return 0;
96 if (EVP_MD_is_a(md, "SHA1"))
97 crypto_info->auth_algorithm = CRYPTO_SHA1_HMAC;
98 else if (EVP_MD_is_a(md, "SHA2-256")) {
99 crypto_info->auth_algorithm = CRYPTO_SHA2_256_HMAC;
100 else if (EVP_MD_is_a(md, "SHA2-384"))
101 crypto_info->auth_algorithm = CRYPTO_SHA2_384_HMAC;
102 else
103 return 0;
104 crypto_info->cipher_algorithm = CRYPTO_AES_CBC;
105 crypto_info->iv_len = ivlen;
106 crypto_info->auth_key = mac_key;
107 crypto_info->auth_key_len = mac_secret_size;
108 } else {
109 return 0;
110 }
111 crypto_info->cipher_key = key;
112 crypto_info->cipher_key_len = keylen;
113 crypto_info->iv = iv;
114 crypto_info->tls_vmajor = (version >> 8) & 0x000000ff;
115 crypto_info->tls_vminor = (version & 0x000000ff);
116 # ifdef TCP_RXTLS_ENABLE
117 memcpy(crypto_info->rec_seq, rl_sequence, sizeof(crypto_info->rec_seq));
118 # else
119 if (!is_tx)
120 return 0;
121 # endif
122 return 1;
123 };
124
125 #endif /* __FreeBSD__ */
126
127 #if defined(OPENSSL_SYS_LINUX)
128 /* Function to check supported ciphers in Linux */
129 static int ktls_int_check_supported_cipher(OSSL_RECORD_LAYER *rl,
130 const EVP_CIPHER *c,
131 const EVP_MD *md,
132 size_t taglen)
133 {
134 switch (rl->version) {
135 case TLS1_2_VERSION:
136 #ifdef OPENSSL_KTLS_TLS13
137 case TLS1_3_VERSION:
138 #endif
139 break;
140 default:
141 return 0;
142 }
143
144 /*
145 * Check that cipher is AES_GCM_128, AES_GCM_256, AES_CCM_128
146 * or Chacha20-Poly1305
147 */
148 # ifdef OPENSSL_KTLS_AES_CCM_128
149 if (EVP_CIPHER_is_a(c, "AES-128-CCM")) {
150 if (rl->version == TLS_1_3_VERSION /* broken on 5.x kernels */
151 || taglen != EVP_CCM_TLS_TAG_LEN)
152 return 0;
153 return 1;
154 } else
155 # endif
156 if (0
157 # ifdef OPENSSL_KTLS_AES_GCM_128
158 || EVP_CIPHER_is_a(c, "AES-128-GCM")
159 # endif
160 # ifdef OPENSSL_KTLS_AES_GCM_256
161 || EVP_CIPHER_is_a(c, "AES-256-GCM")
162 # endif
163 # ifdef OPENSSL_KTLS_CHACHA20_POLY1305
164 || EVP_CIPHER_is_a(c, "ChaCha20-Poly1305")
165 # endif
166 ) {
167 return 1;
168 }
169 return 0;
170 }
171
172 /* Function to configure kernel TLS structure */
173 static
174 int ktls_configure_crypto(OSSL_LIB_CTX *libctx, int version, const EVP_CIPHER *c,
175 const EVP_MD *md, void *rl_sequence,
176 ktls_crypto_info_t *crypto_info, int is_tx,
177 unsigned char *iv, size_t ivlen,
178 unsigned char *key, size_t keylen,
179 unsigned char *mac_key, size_t mac_secret_size)
180 {
181 unsigned char geniv[EVP_GCM_TLS_EXPLICIT_IV_LEN];
182 unsigned char *eiv = NULL;
183
184 # ifdef OPENSSL_NO_KTLS_RX
185 if (!is_tx)
186 return 0;
187 # endif
188
189 if (EVP_CIPHER_get_mode(c) == EVP_CIPH_GCM_MODE
190 || EVP_CIPHER_get_mode(c) == EVP_CIPH_CCM_MODE) {
191 if (!ossl_assert(EVP_GCM_TLS_FIXED_IV_LEN == EVP_CCM_TLS_FIXED_IV_LEN)
192 || !ossl_assert(EVP_GCM_TLS_EXPLICIT_IV_LEN
193 == EVP_CCM_TLS_EXPLICIT_IV_LEN))
194 return 0;
195 if (version == TLS1_2_VERSION) {
196 if (!ossl_assert(ivlen == EVP_GCM_TLS_FIXED_IV_LEN))
197 return 0;
198 if (is_tx) {
199 if (RAND_bytes_ex(libctx, geniv,
200 EVP_GCM_TLS_EXPLICIT_IV_LEN, 0) <= 0)
201 return 0;
202 } else {
203 memset(geniv, 0, EVP_GCM_TLS_EXPLICIT_IV_LEN);
204 }
205 eiv = geniv;
206 } else {
207 if (!ossl_assert(ivlen == EVP_GCM_TLS_FIXED_IV_LEN
208 + EVP_GCM_TLS_EXPLICIT_IV_LEN))
209 return 0;
210 eiv = iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE;
211 }
212 }
213
214 memset(crypto_info, 0, sizeof(*crypto_info));
215 switch (EVP_CIPHER_get_nid(c)) {
216 # ifdef OPENSSL_KTLS_AES_GCM_128
217 case NID_aes_128_gcm:
218 if (!ossl_assert(TLS_CIPHER_AES_GCM_128_SALT_SIZE
219 == EVP_GCM_TLS_FIXED_IV_LEN)
220 || !ossl_assert(TLS_CIPHER_AES_GCM_128_IV_SIZE
221 == EVP_GCM_TLS_EXPLICIT_IV_LEN))
222 return 0;
223 crypto_info->gcm128.info.cipher_type = TLS_CIPHER_AES_GCM_128;
224 crypto_info->gcm128.info.version = version;
225 crypto_info->tls_crypto_info_len = sizeof(crypto_info->gcm128);
226 memcpy(crypto_info->gcm128.iv, eiv, TLS_CIPHER_AES_GCM_128_IV_SIZE);
227 memcpy(crypto_info->gcm128.salt, iv, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
228 memcpy(crypto_info->gcm128.key, key, keylen);
229 memcpy(crypto_info->gcm128.rec_seq, rl_sequence,
230 TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE);
231 return 1;
232 # endif
233 # ifdef OPENSSL_KTLS_AES_GCM_256
234 case NID_aes_256_gcm:
235 if (!ossl_assert(TLS_CIPHER_AES_GCM_256_SALT_SIZE
236 == EVP_GCM_TLS_FIXED_IV_LEN)
237 || !ossl_assert(TLS_CIPHER_AES_GCM_256_IV_SIZE
238 == EVP_GCM_TLS_EXPLICIT_IV_LEN))
239 return 0;
240 crypto_info->gcm256.info.cipher_type = TLS_CIPHER_AES_GCM_256;
241 crypto_info->gcm256.info.version = version;
242 crypto_info->tls_crypto_info_len = sizeof(crypto_info->gcm256);
243 memcpy(crypto_info->gcm256.iv, eiv, TLS_CIPHER_AES_GCM_256_IV_SIZE);
244 memcpy(crypto_info->gcm256.salt, iv, TLS_CIPHER_AES_GCM_256_SALT_SIZE);
245 memcpy(crypto_info->gcm256.key, key, keylen);
246 memcpy(crypto_info->gcm256.rec_seq, rl_sequence,
247 TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE);
248
249 return 1;
250 # endif
251 # ifdef OPENSSL_KTLS_AES_CCM_128
252 case NID_aes_128_ccm:
253 if (!ossl_assert(TLS_CIPHER_AES_CCM_128_SALT_SIZE
254 == EVP_CCM_TLS_FIXED_IV_LEN)
255 || !ossl_assert(TLS_CIPHER_AES_CCM_128_IV_SIZE
256 == EVP_CCM_TLS_EXPLICIT_IV_LEN))
257 return 0;
258 crypto_info->ccm128.info.cipher_type = TLS_CIPHER_AES_CCM_128;
259 crypto_info->ccm128.info.version = version;
260 crypto_info->tls_crypto_info_len = sizeof(crypto_info->ccm128);
261 memcpy(crypto_info->ccm128.iv, eiv, TLS_CIPHER_AES_CCM_128_IV_SIZE);
262 memcpy(crypto_info->ccm128.salt, iv, TLS_CIPHER_AES_CCM_128_SALT_SIZE);
263 memcpy(crypto_info->ccm128.key, key, keylen);
264 memcpy(crypto_info->ccm128.rec_seq, rl_sequence,
265 TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE);
266 return 1;
267 # endif
268 # ifdef OPENSSL_KTLS_CHACHA20_POLY1305
269 case NID_chacha20_poly1305:
270 if (!ossl_assert(ivlen == TLS_CIPHER_CHACHA20_POLY1305_IV_SIZE))
271 return 0;
272 crypto_info->chacha20poly1305.info.cipher_type
273 = TLS_CIPHER_CHACHA20_POLY1305;
274 crypto_info->chacha20poly1305.info.version = version;
275 crypto_info->tls_crypto_info_len = sizeof(crypto_info->chacha20poly1305);
276 memcpy(crypto_info->chacha20poly1305.iv, iv, ivlen);
277 memcpy(crypto_info->chacha20poly1305.key, key, keylen);
278 memcpy(crypto_info->chacha20poly1305.rec_seq, rl_sequence,
279 TLS_CIPHER_CHACHA20_POLY1305_REC_SEQ_SIZE);
280 return 1;
281 # endif
282 default:
283 return 0;
284 }
285
286 }
287
288 #endif /* OPENSSL_SYS_LINUX */
289
290 static int ktls_set_crypto_state(OSSL_RECORD_LAYER *rl, int level,
291 unsigned char *key, size_t keylen,
292 unsigned char *iv, size_t ivlen,
293 unsigned char *mackey, size_t mackeylen,
294 const EVP_CIPHER *ciph,
295 size_t taglen,
296 int mactype,
297 const EVP_MD *md,
298 COMP_METHOD *comp)
299 {
300 ktls_crypto_info_t crypto_info;
301
302 /*
303 * Check if we are suitable for KTLS. If not suitable we return
304 * OSSL_RECORD_RETURN_NON_FATAL_ERR so that other record layers can be tried
305 * instead
306 */
307
308 if (comp != NULL)
309 return OSSL_RECORD_RETURN_NON_FATAL_ERR;
310
311 /* ktls supports only the maximum fragment size */
312 if (rl->max_frag_len != SSL3_RT_MAX_PLAIN_LENGTH)
313 return OSSL_RECORD_RETURN_NON_FATAL_ERR;
314
315 /* check that cipher is supported */
316 if (!ktls_int_check_supported_cipher(rl, ciph, md, taglen))
317 return OSSL_RECORD_RETURN_NON_FATAL_ERR;
318
319 /* All future data will get encrypted by ktls. Flush the BIO or skip ktls */
320 if (rl->direction == OSSL_RECORD_DIRECTION_WRITE) {
321 if (BIO_flush(rl->bio) <= 0)
322 return OSSL_RECORD_RETURN_NON_FATAL_ERR;
323
324 /* KTLS does not support record padding */
325 if (rl->padding != NULL || rl->block_padding > 0)
326 return OSSL_RECORD_RETURN_NON_FATAL_ERR;
327 }
328
329 if (!ktls_configure_crypto(rl->libctx, rl->version, ciph, md, rl->sequence,
330 &crypto_info,
331 rl->direction == OSSL_RECORD_DIRECTION_WRITE,
332 iv, ivlen, key, keylen, mackey, mackeylen))
333 return OSSL_RECORD_RETURN_NON_FATAL_ERR;
334
335 if (!BIO_set_ktls(rl->bio, &crypto_info, rl->direction))
336 return OSSL_RECORD_RETURN_NON_FATAL_ERR;
337
338 return OSSL_RECORD_RETURN_SUCCESS;
339 }
340
341 static int ktls_read_n(OSSL_RECORD_LAYER *rl, size_t n, size_t max, int extend,
342 int clearold, size_t *readbytes)
343 {
344 int ret;
345
346 ret = tls_default_read_n(rl, n, max, extend, clearold, readbytes);
347
348 if (ret < OSSL_RECORD_RETURN_RETRY) {
349 switch (errno) {
350 case EBADMSG:
351 RLAYERfatal(rl, SSL_AD_BAD_RECORD_MAC,
352 SSL_R_DECRYPTION_FAILED_OR_BAD_RECORD_MAC);
353 break;
354 case EMSGSIZE:
355 RLAYERfatal(rl, SSL_AD_RECORD_OVERFLOW,
356 SSL_R_PACKET_LENGTH_TOO_LONG);
357 break;
358 case EINVAL:
359 RLAYERfatal(rl, SSL_AD_PROTOCOL_VERSION,
360 SSL_R_WRONG_VERSION_NUMBER);
361 break;
362 default:
363 break;
364 }
365 }
366
367 return ret;
368 }
369
370 static int ktls_cipher(OSSL_RECORD_LAYER *rl, TLS_RL_RECORD *inrecs,
371 size_t n_recs, int sending, SSL_MAC_BUF *mac,
372 size_t macsize)
373 {
374 return 1;
375 }
376
377 static int ktls_validate_record_header(OSSL_RECORD_LAYER *rl, TLS_RL_RECORD *rec)
378 {
379 if (rec->rec_version != TLS1_2_VERSION) {
380 RLAYERfatal(rl, SSL_AD_DECODE_ERROR, SSL_R_WRONG_VERSION_NUMBER);
381 return 0;
382 }
383
384 return 1;
385 }
386
387 static int ktls_post_process_record(OSSL_RECORD_LAYER *rl, TLS_RL_RECORD *rec)
388 {
389 if (rl->version == TLS1_3_VERSION)
390 return tls13_common_post_process_record(rl, rec);
391
392 return 1;
393 }
394
395 static int
396 ktls_new_record_layer(OSSL_LIB_CTX *libctx, const char *propq, int vers,
397 int role, int direction, int level, uint16_t epoch,
398 unsigned char *key, size_t keylen, unsigned char *iv,
399 size_t ivlen, unsigned char *mackey, size_t mackeylen,
400 const EVP_CIPHER *ciph, size_t taglen,
401 int mactype,
402 const EVP_MD *md, COMP_METHOD *comp, BIO *prev,
403 BIO *transport, BIO *next, BIO_ADDR *local, BIO_ADDR *peer,
404 const OSSL_PARAM *settings, const OSSL_PARAM *options,
405 const OSSL_DISPATCH *fns, void *cbarg,
406 OSSL_RECORD_LAYER **retrl)
407 {
408 int ret;
409
410 ret = tls_int_new_record_layer(libctx, propq, vers, role, direction, level,
411 key, keylen, iv, ivlen, mackey, mackeylen,
412 ciph, taglen, mactype, md, comp, prev,
413 transport, next, local, peer, settings,
414 options, fns, cbarg, retrl);
415
416 if (ret != OSSL_RECORD_RETURN_SUCCESS)
417 return ret;
418
419 (*retrl)->funcs = &ossl_ktls_funcs;
420
421 ret = (*retrl)->funcs->set_crypto_state(*retrl, level, key, keylen, iv,
422 ivlen, mackey, mackeylen, ciph,
423 taglen, mactype, md, comp);
424
425 if (ret != OSSL_RECORD_RETURN_SUCCESS) {
426 OPENSSL_free(*retrl);
427 *retrl = NULL;
428 } else {
429 /*
430 * With KTLS we always try and read as much as possible and fill the
431 * buffer
432 */
433 (*retrl)->read_ahead = 1;
434 }
435 return ret;
436 }
437
438 static int ktls_allocate_write_buffers(OSSL_RECORD_LAYER *rl,
439 OSSL_RECORD_TEMPLATE *templates,
440 size_t numtempl, size_t *prefix)
441 {
442 if (!ossl_assert(numtempl == 1))
443 return 0;
444
445 /*
446 * We just use the end application buffer in the case of KTLS, so nothing
447 * to do. We pretend we set up one buffer.
448 */
449 rl->numwpipes = 1;
450
451 return 1;
452 }
453
454 static int ktls_initialise_write_packets(OSSL_RECORD_LAYER *rl,
455 OSSL_RECORD_TEMPLATE *templates,
456 size_t numtempl,
457 OSSL_RECORD_TEMPLATE *prefixtempl,
458 WPACKET *pkt,
459 TLS_BUFFER *bufs,
460 size_t *wpinited)
461 {
462 TLS_BUFFER *wb;
463
464 /*
465 * We just use the application buffer directly and don't use any WPACKET
466 * structures
467 */
468 wb = &bufs[0];
469 wb->type = templates[0].type;
470
471 /*
472 * ktls doesn't modify the buffer, but to avoid a warning we need
473 * to discard the const qualifier.
474 * This doesn't leak memory because the buffers have never been allocated
475 * with KTLS
476 */
477 TLS_BUFFER_set_buf(wb, (unsigned char *)templates[0].buf);
478 TLS_BUFFER_set_offset(wb, 0);
479 TLS_BUFFER_set_app_buffer(wb, 1);
480
481 return 1;
482 }
483
484 static int ktls_prepare_record_header(OSSL_RECORD_LAYER *rl,
485 WPACKET *thispkt,
486 OSSL_RECORD_TEMPLATE *templ,
487 unsigned int rectype,
488 unsigned char **recdata)
489 {
490 /* The kernel writes the record header, so nothing to do */
491 *recdata = NULL;
492
493 return 1;
494 }
495
496 static int ktls_prepare_for_encryption(OSSL_RECORD_LAYER *rl,
497 size_t mac_size,
498 WPACKET *thispkt,
499 TLS_RL_RECORD *thiswr)
500 {
501 /* No encryption, so nothing to do */
502 return 1;
503 }
504
505 static int ktls_post_encryption_processing(OSSL_RECORD_LAYER *rl,
506 size_t mac_size,
507 OSSL_RECORD_TEMPLATE *templ,
508 WPACKET *thispkt,
509 TLS_RL_RECORD *thiswr)
510 {
511 /* The kernel does anything that is needed, so nothing to do here */
512 return 1;
513 }
514
515 static int ktls_prepare_write_bio(OSSL_RECORD_LAYER *rl, int type)
516 {
517 /*
518 * To prevent coalescing of control and data messages,
519 * such as in buffer_write, we flush the BIO
520 */
521 if (type != SSL3_RT_APPLICATION_DATA) {
522 int ret, i = BIO_flush(rl->bio);
523
524 if (i <= 0) {
525 if (BIO_should_retry(rl->bio))
526 ret = OSSL_RECORD_RETURN_RETRY;
527 else
528 ret = OSSL_RECORD_RETURN_FATAL;
529 return ret;
530 }
531 BIO_set_ktls_ctrl_msg(rl->bio, type);
532 }
533
534 return OSSL_RECORD_RETURN_SUCCESS;
535 }
536
537 static int ktls_alloc_buffers(OSSL_RECORD_LAYER *rl)
538 {
539 /* We use the application buffer directly for writing */
540 if (rl->direction == OSSL_RECORD_DIRECTION_WRITE)
541 return 1;
542
543 return tls_alloc_buffers(rl);
544 }
545
546 static int ktls_free_buffers(OSSL_RECORD_LAYER *rl)
547 {
548 /* We use the application buffer directly for writing */
549 if (rl->direction == OSSL_RECORD_DIRECTION_WRITE)
550 return 1;
551
552 return tls_free_buffers(rl);
553 }
554
555 static struct record_functions_st ossl_ktls_funcs = {
556 ktls_set_crypto_state,
557 ktls_cipher,
558 NULL,
559 tls_default_set_protocol_version,
560 ktls_read_n,
561 tls_get_more_records,
562 ktls_validate_record_header,
563 ktls_post_process_record,
564 tls_get_max_records_default,
565 tls_write_records_default,
566 ktls_allocate_write_buffers,
567 ktls_initialise_write_packets,
568 NULL,
569 ktls_prepare_record_header,
570 NULL,
571 ktls_prepare_for_encryption,
572 ktls_post_encryption_processing,
573 ktls_prepare_write_bio
574 };
575
576 const OSSL_RECORD_METHOD ossl_ktls_record_method = {
577 ktls_new_record_layer,
578 tls_free,
579 tls_reset,
580 tls_unprocessed_read_pending,
581 tls_processed_read_pending,
582 tls_app_data_pending,
583 tls_get_max_records,
584 tls_write_records,
585 tls_retry_write_records,
586 tls_read_record,
587 tls_release_record,
588 tls_get_alert_code,
589 tls_set1_bio,
590 tls_set_protocol_version,
591 tls_set_plain_alerts,
592 tls_set_first_handshake,
593 tls_set_max_pipelines,
594 NULL,
595 tls_get_state,
596 tls_set_options,
597 tls_get_compression,
598 tls_set_max_frag_len,
599 NULL,
600 tls_increment_sequence_ctr,
601 ktls_alloc_buffers,
602 ktls_free_buffers
603 };