]> git.ipfire.org Git - thirdparty/kernel/stable.git/blame - drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
Merge tag 'drm/tegra/for-5.1-rc5' of git://anongit.freedesktop.org/tegra/linux into...
[thirdparty/kernel/stable.git] / drivers / net / ethernet / mellanox / mlx5 / core / fpga / tls.c
CommitLineData
1ae17322
IL
1/*
2 * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#include <linux/mlx5/device.h>
35#include "fpga/tls.h"
36#include "fpga/cmd.h"
37#include "fpga/sdk.h"
38#include "fpga/core.h"
39#include "accel/tls.h"
40
41struct mlx5_fpga_tls_command_context;
42
43typedef void (*mlx5_fpga_tls_command_complete)
44 (struct mlx5_fpga_conn *conn, struct mlx5_fpga_device *fdev,
45 struct mlx5_fpga_tls_command_context *ctx,
46 struct mlx5_fpga_dma_buf *resp);
47
48struct mlx5_fpga_tls_command_context {
49 struct list_head list;
50 /* There is no guarantee on the order between the TX completion
51 * and the command response.
52 * The TX completion is going to touch cmd->buf even in
53 * the case of successful transmission.
54 * So instead of requiring separate allocations for cmd
55 * and cmd->buf we've decided to use a reference counter
56 */
57 refcount_t ref;
58 struct mlx5_fpga_dma_buf buf;
59 mlx5_fpga_tls_command_complete complete;
60};
61
62static void
63mlx5_fpga_tls_put_command_ctx(struct mlx5_fpga_tls_command_context *ctx)
64{
65 if (refcount_dec_and_test(&ctx->ref))
66 kfree(ctx);
67}
68
69static void mlx5_fpga_tls_cmd_complete(struct mlx5_fpga_device *fdev,
70 struct mlx5_fpga_dma_buf *resp)
71{
72 struct mlx5_fpga_conn *conn = fdev->tls->conn;
73 struct mlx5_fpga_tls_command_context *ctx;
74 struct mlx5_fpga_tls *tls = fdev->tls;
75 unsigned long flags;
76
77 spin_lock_irqsave(&tls->pending_cmds_lock, flags);
78 ctx = list_first_entry(&tls->pending_cmds,
79 struct mlx5_fpga_tls_command_context, list);
80 list_del(&ctx->list);
81 spin_unlock_irqrestore(&tls->pending_cmds_lock, flags);
82 ctx->complete(conn, fdev, ctx, resp);
83}
84
85static void mlx5_fpga_cmd_send_complete(struct mlx5_fpga_conn *conn,
86 struct mlx5_fpga_device *fdev,
87 struct mlx5_fpga_dma_buf *buf,
88 u8 status)
89{
90 struct mlx5_fpga_tls_command_context *ctx =
91 container_of(buf, struct mlx5_fpga_tls_command_context, buf);
92
93 mlx5_fpga_tls_put_command_ctx(ctx);
94
95 if (unlikely(status))
96 mlx5_fpga_tls_cmd_complete(fdev, NULL);
97}
98
99static void mlx5_fpga_tls_cmd_send(struct mlx5_fpga_device *fdev,
100 struct mlx5_fpga_tls_command_context *cmd,
101 mlx5_fpga_tls_command_complete complete)
102{
103 struct mlx5_fpga_tls *tls = fdev->tls;
104 unsigned long flags;
105 int ret;
106
107 refcount_set(&cmd->ref, 2);
108 cmd->complete = complete;
109 cmd->buf.complete = mlx5_fpga_cmd_send_complete;
110
111 spin_lock_irqsave(&tls->pending_cmds_lock, flags);
112 /* mlx5_fpga_sbu_conn_sendmsg is called under pending_cmds_lock
113 * to make sure commands are inserted to the tls->pending_cmds list
114 * and the command QP in the same order.
115 */
116 ret = mlx5_fpga_sbu_conn_sendmsg(tls->conn, &cmd->buf);
117 if (likely(!ret))
118 list_add_tail(&cmd->list, &tls->pending_cmds);
119 else
120 complete(tls->conn, fdev, cmd, NULL);
121 spin_unlock_irqrestore(&tls->pending_cmds_lock, flags);
122}
123
124/* Start of context identifiers range (inclusive) */
125#define SWID_START 0
126/* End of context identifiers range (exclusive) */
127#define SWID_END BIT(24)
128
129static int mlx5_fpga_tls_alloc_swid(struct idr *idr, spinlock_t *idr_spinlock,
130 void *ptr)
131{
ab412e1d 132 unsigned long flags;
1ae17322
IL
133 int ret;
134
135 /* TLS metadata format is 1 byte for syndrome followed
136 * by 3 bytes of swid (software ID)
137 * swid must not exceed 3 bytes.
138 * See tls_rxtx.c:insert_pet() for details
139 */
140 BUILD_BUG_ON((SWID_END - 1) & 0xFF000000);
141
142 idr_preload(GFP_KERNEL);
ab412e1d 143 spin_lock_irqsave(idr_spinlock, flags);
1ae17322 144 ret = idr_alloc(idr, ptr, SWID_START, SWID_END, GFP_ATOMIC);
ab412e1d 145 spin_unlock_irqrestore(idr_spinlock, flags);
1ae17322
IL
146 idr_preload_end();
147
148 return ret;
149}
150
151static void mlx5_fpga_tls_release_swid(struct idr *idr,
152 spinlock_t *idr_spinlock, u32 swid)
153{
154 unsigned long flags;
155
156 spin_lock_irqsave(idr_spinlock, flags);
157 idr_remove(idr, swid);
158 spin_unlock_irqrestore(idr_spinlock, flags);
159}
160
ab412e1d
BP
161static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn,
162 struct mlx5_fpga_device *fdev,
163 struct mlx5_fpga_dma_buf *buf, u8 status)
164{
165 kfree(buf);
166}
167
1ae17322
IL
168struct mlx5_teardown_stream_context {
169 struct mlx5_fpga_tls_command_context cmd;
170 u32 swid;
171};
172
173static void
174mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn,
175 struct mlx5_fpga_device *fdev,
176 struct mlx5_fpga_tls_command_context *cmd,
177 struct mlx5_fpga_dma_buf *resp)
178{
179 struct mlx5_teardown_stream_context *ctx =
180 container_of(cmd, struct mlx5_teardown_stream_context, cmd);
181
182 if (resp) {
183 u32 syndrome = MLX5_GET(tls_resp, resp->sg[0].data, syndrome);
184
185 if (syndrome)
186 mlx5_fpga_err(fdev,
187 "Teardown stream failed with syndrome = %d",
188 syndrome);
ab412e1d 189 else if (MLX5_GET(tls_cmd, cmd->buf.sg[0].data, direction_sx))
1ae17322 190 mlx5_fpga_tls_release_swid(&fdev->tls->tx_idr,
ab412e1d
BP
191 &fdev->tls->tx_idr_spinlock,
192 ctx->swid);
193 else
194 mlx5_fpga_tls_release_swid(&fdev->tls->rx_idr,
195 &fdev->tls->rx_idr_spinlock,
1ae17322
IL
196 ctx->swid);
197 }
198 mlx5_fpga_tls_put_command_ctx(cmd);
199}
200
201static void mlx5_fpga_tls_flow_to_cmd(void *flow, void *cmd)
202{
203 memcpy(MLX5_ADDR_OF(tls_cmd, cmd, src_port), flow,
204 MLX5_BYTE_OFF(tls_flow, ipv6));
205
206 MLX5_SET(tls_cmd, cmd, ipv6, MLX5_GET(tls_flow, flow, ipv6));
207 MLX5_SET(tls_cmd, cmd, direction_sx,
208 MLX5_GET(tls_flow, flow, direction_sx));
209}
210
ab412e1d
BP
211int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
212 u64 rcd_sn)
213{
214 struct mlx5_fpga_dma_buf *buf;
215 int size = sizeof(*buf) + MLX5_TLS_COMMAND_SIZE;
216 void *flow;
217 void *cmd;
218 int ret;
219
8e949363
AP
220 rcu_read_lock();
221 flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle));
222 rcu_read_unlock();
223
224 if (!flow) {
225 WARN_ONCE(1, "Received NULL pointer for handle\n");
226 return -EINVAL;
227 }
228
ab412e1d
BP
229 buf = kzalloc(size, GFP_ATOMIC);
230 if (!buf)
231 return -ENOMEM;
232
233 cmd = (buf + 1);
234
ab412e1d
BP
235 mlx5_fpga_tls_flow_to_cmd(flow, cmd);
236
237 MLX5_SET(tls_cmd, cmd, swid, ntohl(handle));
238 MLX5_SET64(tls_cmd, cmd, tls_rcd_sn, be64_to_cpu(rcd_sn));
239 MLX5_SET(tls_cmd, cmd, tcp_sn, seq);
240 MLX5_SET(tls_cmd, cmd, command_type, CMD_RESYNC_RX);
241
242 buf->sg[0].data = cmd;
243 buf->sg[0].size = MLX5_TLS_COMMAND_SIZE;
244 buf->complete = mlx_tls_kfree_complete;
245
246 ret = mlx5_fpga_sbu_conn_sendmsg(mdev->fpga->tls->conn, buf);
8e949363
AP
247 if (ret < 0)
248 kfree(buf);
ab412e1d
BP
249
250 return ret;
251}
252
8cb77149
WY
253static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev,
254 void *flow, u32 swid, gfp_t flags)
1ae17322
IL
255{
256 struct mlx5_teardown_stream_context *ctx;
257 struct mlx5_fpga_dma_buf *buf;
258 void *cmd;
259
260 ctx = kzalloc(sizeof(*ctx) + MLX5_TLS_COMMAND_SIZE, flags);
261 if (!ctx)
262 return;
263
264 buf = &ctx->cmd.buf;
265 cmd = (ctx + 1);
266 MLX5_SET(tls_cmd, cmd, command_type, CMD_TEARDOWN_STREAM);
267 MLX5_SET(tls_cmd, cmd, swid, swid);
268
269 mlx5_fpga_tls_flow_to_cmd(flow, cmd);
270 kfree(flow);
271
272 buf->sg[0].data = cmd;
273 buf->sg[0].size = MLX5_TLS_COMMAND_SIZE;
274
275 ctx->swid = swid;
276 mlx5_fpga_tls_cmd_send(mdev->fpga, &ctx->cmd,
277 mlx5_fpga_tls_teardown_completion);
278}
279
ab412e1d
BP
280void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
281 gfp_t flags, bool direction_sx)
1ae17322
IL
282{
283 struct mlx5_fpga_tls *tls = mdev->fpga->tls;
284 void *flow;
285
286 rcu_read_lock();
ab412e1d
BP
287 if (direction_sx)
288 flow = idr_find(&tls->tx_idr, swid);
289 else
290 flow = idr_find(&tls->rx_idr, swid);
291
1ae17322
IL
292 rcu_read_unlock();
293
294 if (!flow) {
295 mlx5_fpga_err(mdev->fpga, "No flow information for swid %u\n",
296 swid);
297 return;
298 }
299
300 mlx5_fpga_tls_send_teardown_cmd(mdev, flow, swid, flags);
301}
302
303enum mlx5_fpga_setup_stream_status {
304 MLX5_FPGA_CMD_PENDING,
305 MLX5_FPGA_CMD_SEND_FAILED,
306 MLX5_FPGA_CMD_RESPONSE_RECEIVED,
307 MLX5_FPGA_CMD_ABANDONED,
308};
309
310struct mlx5_setup_stream_context {
311 struct mlx5_fpga_tls_command_context cmd;
312 atomic_t status;
313 u32 syndrome;
314 struct completion comp;
315};
316
317static void
318mlx5_fpga_tls_setup_completion(struct mlx5_fpga_conn *conn,
319 struct mlx5_fpga_device *fdev,
320 struct mlx5_fpga_tls_command_context *cmd,
321 struct mlx5_fpga_dma_buf *resp)
322{
323 struct mlx5_setup_stream_context *ctx =
324 container_of(cmd, struct mlx5_setup_stream_context, cmd);
325 int status = MLX5_FPGA_CMD_SEND_FAILED;
326 void *tls_cmd = ctx + 1;
327
328 /* If we failed to send to command resp == NULL */
329 if (resp) {
330 ctx->syndrome = MLX5_GET(tls_resp, resp->sg[0].data, syndrome);
331 status = MLX5_FPGA_CMD_RESPONSE_RECEIVED;
332 }
333
334 status = atomic_xchg_release(&ctx->status, status);
335 if (likely(status != MLX5_FPGA_CMD_ABANDONED)) {
336 complete(&ctx->comp);
337 return;
338 }
339
340 mlx5_fpga_err(fdev, "Command was abandoned, syndrome = %u\n",
341 ctx->syndrome);
342
343 if (!ctx->syndrome) {
344 /* The process was killed while waiting for the context to be
345 * added, and the add completed successfully.
346 * We need to destroy the HW context, and we can't can't reuse
347 * the command context because we might not have received
348 * the tx completion yet.
349 */
ab412e1d
BP
350 mlx5_fpga_tls_del_flow(fdev->mdev,
351 MLX5_GET(tls_cmd, tls_cmd, swid),
352 GFP_ATOMIC,
353 MLX5_GET(tls_cmd, tls_cmd,
354 direction_sx));
1ae17322
IL
355 }
356
357 mlx5_fpga_tls_put_command_ctx(cmd);
358}
359
360static int mlx5_fpga_tls_setup_stream_cmd(struct mlx5_core_dev *mdev,
361 struct mlx5_setup_stream_context *ctx)
362{
363 struct mlx5_fpga_dma_buf *buf;
364 void *cmd = ctx + 1;
365 int status, ret = 0;
366
367 buf = &ctx->cmd.buf;
368 buf->sg[0].data = cmd;
369 buf->sg[0].size = MLX5_TLS_COMMAND_SIZE;
370 MLX5_SET(tls_cmd, cmd, command_type, CMD_SETUP_STREAM);
371
372 init_completion(&ctx->comp);
373 atomic_set(&ctx->status, MLX5_FPGA_CMD_PENDING);
374 ctx->syndrome = -1;
375
376 mlx5_fpga_tls_cmd_send(mdev->fpga, &ctx->cmd,
377 mlx5_fpga_tls_setup_completion);
378 wait_for_completion_killable(&ctx->comp);
379
380 status = atomic_xchg_acquire(&ctx->status, MLX5_FPGA_CMD_ABANDONED);
381 if (unlikely(status == MLX5_FPGA_CMD_PENDING))
382 /* ctx is going to be released in mlx5_fpga_tls_setup_completion */
383 return -EINTR;
384
385 if (unlikely(ctx->syndrome))
386 ret = -ENOMEM;
387
388 mlx5_fpga_tls_put_command_ctx(&ctx->cmd);
389 return ret;
390}
391
392static void mlx5_fpga_tls_hw_qp_recv_cb(void *cb_arg,
393 struct mlx5_fpga_dma_buf *buf)
394{
395 struct mlx5_fpga_device *fdev = (struct mlx5_fpga_device *)cb_arg;
396
397 mlx5_fpga_tls_cmd_complete(fdev, buf);
398}
399
400bool mlx5_fpga_is_tls_device(struct mlx5_core_dev *mdev)
401{
402 if (!mdev->fpga || !MLX5_CAP_GEN(mdev, fpga))
403 return false;
404
405 if (MLX5_CAP_FPGA(mdev, ieee_vendor_id) !=
406 MLX5_FPGA_CAP_SANDBOX_VENDOR_ID_MLNX)
407 return false;
408
409 if (MLX5_CAP_FPGA(mdev, sandbox_product_id) !=
410 MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_TLS)
411 return false;
412
413 if (MLX5_CAP_FPGA(mdev, sandbox_product_version) != 0)
414 return false;
415
416 return true;
417}
418
419static int mlx5_fpga_tls_get_caps(struct mlx5_fpga_device *fdev,
420 u32 *p_caps)
421{
422 int err, cap_size = MLX5_ST_SZ_BYTES(tls_extended_cap);
423 u32 caps = 0;
424 void *buf;
425
426 buf = kzalloc(cap_size, GFP_KERNEL);
427 if (!buf)
428 return -ENOMEM;
429
430 err = mlx5_fpga_get_sbu_caps(fdev, cap_size, buf);
431 if (err)
432 goto out;
433
434 if (MLX5_GET(tls_extended_cap, buf, tx))
435 caps |= MLX5_ACCEL_TLS_TX;
436 if (MLX5_GET(tls_extended_cap, buf, rx))
437 caps |= MLX5_ACCEL_TLS_RX;
438 if (MLX5_GET(tls_extended_cap, buf, tls_v12))
439 caps |= MLX5_ACCEL_TLS_V12;
440 if (MLX5_GET(tls_extended_cap, buf, tls_v13))
441 caps |= MLX5_ACCEL_TLS_V13;
442 if (MLX5_GET(tls_extended_cap, buf, lro))
443 caps |= MLX5_ACCEL_TLS_LRO;
444 if (MLX5_GET(tls_extended_cap, buf, ipv6))
445 caps |= MLX5_ACCEL_TLS_IPV6;
446
447 if (MLX5_GET(tls_extended_cap, buf, aes_gcm_128))
448 caps |= MLX5_ACCEL_TLS_AES_GCM128;
449 if (MLX5_GET(tls_extended_cap, buf, aes_gcm_256))
450 caps |= MLX5_ACCEL_TLS_AES_GCM256;
451
452 *p_caps = caps;
453 err = 0;
454out:
455 kfree(buf);
456 return err;
457}
458
459int mlx5_fpga_tls_init(struct mlx5_core_dev *mdev)
460{
461 struct mlx5_fpga_device *fdev = mdev->fpga;
462 struct mlx5_fpga_conn_attr init_attr = {0};
463 struct mlx5_fpga_conn *conn;
464 struct mlx5_fpga_tls *tls;
465 int err = 0;
466
467 if (!mlx5_fpga_is_tls_device(mdev) || !fdev)
468 return 0;
469
470 tls = kzalloc(sizeof(*tls), GFP_KERNEL);
471 if (!tls)
472 return -ENOMEM;
473
474 err = mlx5_fpga_tls_get_caps(fdev, &tls->caps);
475 if (err)
476 goto error;
477
ab412e1d 478 if (!(tls->caps & (MLX5_ACCEL_TLS_V12 | MLX5_ACCEL_TLS_AES_GCM128))) {
1ae17322
IL
479 err = -ENOTSUPP;
480 goto error;
481 }
482
483 init_attr.rx_size = SBU_QP_QUEUE_SIZE;
484 init_attr.tx_size = SBU_QP_QUEUE_SIZE;
485 init_attr.recv_cb = mlx5_fpga_tls_hw_qp_recv_cb;
486 init_attr.cb_arg = fdev;
487 conn = mlx5_fpga_sbu_conn_create(fdev, &init_attr);
488 if (IS_ERR(conn)) {
489 err = PTR_ERR(conn);
490 mlx5_fpga_err(fdev, "Error creating TLS command connection %d\n",
491 err);
492 goto error;
493 }
494
495 tls->conn = conn;
496 spin_lock_init(&tls->pending_cmds_lock);
497 INIT_LIST_HEAD(&tls->pending_cmds);
498
499 idr_init(&tls->tx_idr);
ab412e1d
BP
500 idr_init(&tls->rx_idr);
501 spin_lock_init(&tls->tx_idr_spinlock);
502 spin_lock_init(&tls->rx_idr_spinlock);
1ae17322
IL
503 fdev->tls = tls;
504 return 0;
505
506error:
507 kfree(tls);
508 return err;
509}
510
511void mlx5_fpga_tls_cleanup(struct mlx5_core_dev *mdev)
512{
513 struct mlx5_fpga_device *fdev = mdev->fpga;
514
515 if (!fdev || !fdev->tls)
516 return;
517
518 mlx5_fpga_sbu_conn_destroy(fdev->tls->conn);
519 kfree(fdev->tls);
520 fdev->tls = NULL;
521}
522
523static void mlx5_fpga_tls_set_aes_gcm128_ctx(void *cmd,
524 struct tls_crypto_info *info,
525 __be64 *rcd_sn)
526{
527 struct tls12_crypto_info_aes_gcm_128 *crypto_info =
528 (struct tls12_crypto_info_aes_gcm_128 *)info;
529
530 memcpy(MLX5_ADDR_OF(tls_cmd, cmd, tls_rcd_sn), crypto_info->rec_seq,
531 TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE);
532
533 memcpy(MLX5_ADDR_OF(tls_cmd, cmd, tls_implicit_iv),
534 crypto_info->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
535 memcpy(MLX5_ADDR_OF(tls_cmd, cmd, encryption_key),
536 crypto_info->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE);
537
538 /* in AES-GCM 128 we need to write the key twice */
539 memcpy(MLX5_ADDR_OF(tls_cmd, cmd, encryption_key) +
540 TLS_CIPHER_AES_GCM_128_KEY_SIZE,
541 crypto_info->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE);
542
543 MLX5_SET(tls_cmd, cmd, alg, MLX5_TLS_ALG_AES_GCM_128);
544}
545
546static int mlx5_fpga_tls_set_key_material(void *cmd, u32 caps,
547 struct tls_crypto_info *crypto_info)
548{
549 __be64 rcd_sn;
550
551 switch (crypto_info->cipher_type) {
552 case TLS_CIPHER_AES_GCM_128:
553 if (!(caps & MLX5_ACCEL_TLS_AES_GCM128))
554 return -EINVAL;
555 mlx5_fpga_tls_set_aes_gcm128_ctx(cmd, crypto_info, &rcd_sn);
556 break;
557 default:
558 return -EINVAL;
559 }
560
561 return 0;
562}
563
ab412e1d
BP
564static int _mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
565 struct tls_crypto_info *crypto_info,
566 u32 swid, u32 tcp_sn)
1ae17322
IL
567{
568 u32 caps = mlx5_fpga_tls_device_caps(mdev);
569 struct mlx5_setup_stream_context *ctx;
570 int ret = -ENOMEM;
571 size_t cmd_size;
572 void *cmd;
573
574 cmd_size = MLX5_TLS_COMMAND_SIZE + sizeof(*ctx);
575 ctx = kzalloc(cmd_size, GFP_KERNEL);
576 if (!ctx)
577 goto out;
578
579 cmd = ctx + 1;
580 ret = mlx5_fpga_tls_set_key_material(cmd, caps, crypto_info);
581 if (ret)
582 goto free_ctx;
583
584 mlx5_fpga_tls_flow_to_cmd(flow, cmd);
585
586 MLX5_SET(tls_cmd, cmd, swid, swid);
587 MLX5_SET(tls_cmd, cmd, tcp_sn, tcp_sn);
588
589 return mlx5_fpga_tls_setup_stream_cmd(mdev, ctx);
590
591free_ctx:
592 kfree(ctx);
593out:
594 return ret;
595}
596
ab412e1d
BP
597int mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
598 struct tls_crypto_info *crypto_info,
599 u32 start_offload_tcp_sn, u32 *p_swid,
600 bool direction_sx)
1ae17322
IL
601{
602 struct mlx5_fpga_tls *tls = mdev->fpga->tls;
603 int ret = -ENOMEM;
604 u32 swid;
605
ab412e1d
BP
606 if (direction_sx)
607 ret = mlx5_fpga_tls_alloc_swid(&tls->tx_idr,
608 &tls->tx_idr_spinlock, flow);
609 else
610 ret = mlx5_fpga_tls_alloc_swid(&tls->rx_idr,
611 &tls->rx_idr_spinlock, flow);
612
1ae17322
IL
613 if (ret < 0)
614 return ret;
615
616 swid = ret;
ab412e1d 617 MLX5_SET(tls_flow, flow, direction_sx, direction_sx ? 1 : 0);
1ae17322 618
ab412e1d
BP
619 ret = _mlx5_fpga_tls_add_flow(mdev, flow, crypto_info, swid,
620 start_offload_tcp_sn);
1ae17322
IL
621 if (ret && ret != -EINTR)
622 goto free_swid;
623
624 *p_swid = swid;
625 return 0;
626free_swid:
ab412e1d
BP
627 if (direction_sx)
628 mlx5_fpga_tls_release_swid(&tls->tx_idr,
629 &tls->tx_idr_spinlock, swid);
630 else
631 mlx5_fpga_tls_release_swid(&tls->rx_idr,
632 &tls->rx_idr_spinlock, swid);
1ae17322
IL
633
634 return ret;
635}