]> git.ipfire.org Git - thirdparty/kernel/stable.git/blame - drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
hv_netvsc: fix error return code in netvsc_probe()
[thirdparty/kernel/stable.git] / drivers / net / ethernet / mellanox / mlx5 / core / fpga / tls.c
CommitLineData
1ae17322
IL
1/*
2 * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#include <linux/mlx5/device.h>
35#include "fpga/tls.h"
36#include "fpga/cmd.h"
37#include "fpga/sdk.h"
38#include "fpga/core.h"
39#include "accel/tls.h"
40
41struct mlx5_fpga_tls_command_context;
42
43typedef void (*mlx5_fpga_tls_command_complete)
44 (struct mlx5_fpga_conn *conn, struct mlx5_fpga_device *fdev,
45 struct mlx5_fpga_tls_command_context *ctx,
46 struct mlx5_fpga_dma_buf *resp);
47
48struct mlx5_fpga_tls_command_context {
49 struct list_head list;
50 /* There is no guarantee on the order between the TX completion
51 * and the command response.
52 * The TX completion is going to touch cmd->buf even in
53 * the case of successful transmission.
54 * So instead of requiring separate allocations for cmd
55 * and cmd->buf we've decided to use a reference counter
56 */
57 refcount_t ref;
58 struct mlx5_fpga_dma_buf buf;
59 mlx5_fpga_tls_command_complete complete;
60};
61
62static void
63mlx5_fpga_tls_put_command_ctx(struct mlx5_fpga_tls_command_context *ctx)
64{
65 if (refcount_dec_and_test(&ctx->ref))
66 kfree(ctx);
67}
68
69static void mlx5_fpga_tls_cmd_complete(struct mlx5_fpga_device *fdev,
70 struct mlx5_fpga_dma_buf *resp)
71{
72 struct mlx5_fpga_conn *conn = fdev->tls->conn;
73 struct mlx5_fpga_tls_command_context *ctx;
74 struct mlx5_fpga_tls *tls = fdev->tls;
75 unsigned long flags;
76
77 spin_lock_irqsave(&tls->pending_cmds_lock, flags);
78 ctx = list_first_entry(&tls->pending_cmds,
79 struct mlx5_fpga_tls_command_context, list);
80 list_del(&ctx->list);
81 spin_unlock_irqrestore(&tls->pending_cmds_lock, flags);
82 ctx->complete(conn, fdev, ctx, resp);
83}
84
85static void mlx5_fpga_cmd_send_complete(struct mlx5_fpga_conn *conn,
86 struct mlx5_fpga_device *fdev,
87 struct mlx5_fpga_dma_buf *buf,
88 u8 status)
89{
90 struct mlx5_fpga_tls_command_context *ctx =
91 container_of(buf, struct mlx5_fpga_tls_command_context, buf);
92
93 mlx5_fpga_tls_put_command_ctx(ctx);
94
95 if (unlikely(status))
96 mlx5_fpga_tls_cmd_complete(fdev, NULL);
97}
98
99static void mlx5_fpga_tls_cmd_send(struct mlx5_fpga_device *fdev,
100 struct mlx5_fpga_tls_command_context *cmd,
101 mlx5_fpga_tls_command_complete complete)
102{
103 struct mlx5_fpga_tls *tls = fdev->tls;
104 unsigned long flags;
105 int ret;
106
107 refcount_set(&cmd->ref, 2);
108 cmd->complete = complete;
109 cmd->buf.complete = mlx5_fpga_cmd_send_complete;
110
111 spin_lock_irqsave(&tls->pending_cmds_lock, flags);
112 /* mlx5_fpga_sbu_conn_sendmsg is called under pending_cmds_lock
113 * to make sure commands are inserted to the tls->pending_cmds list
114 * and the command QP in the same order.
115 */
116 ret = mlx5_fpga_sbu_conn_sendmsg(tls->conn, &cmd->buf);
117 if (likely(!ret))
118 list_add_tail(&cmd->list, &tls->pending_cmds);
119 else
120 complete(tls->conn, fdev, cmd, NULL);
121 spin_unlock_irqrestore(&tls->pending_cmds_lock, flags);
122}
123
124/* Start of context identifiers range (inclusive) */
125#define SWID_START 0
126/* End of context identifiers range (exclusive) */
127#define SWID_END BIT(24)
128
129static int mlx5_fpga_tls_alloc_swid(struct idr *idr, spinlock_t *idr_spinlock,
130 void *ptr)
131{
132 int ret;
133
134 /* TLS metadata format is 1 byte for syndrome followed
135 * by 3 bytes of swid (software ID)
136 * swid must not exceed 3 bytes.
137 * See tls_rxtx.c:insert_pet() for details
138 */
139 BUILD_BUG_ON((SWID_END - 1) & 0xFF000000);
140
141 idr_preload(GFP_KERNEL);
142 spin_lock_irq(idr_spinlock);
143 ret = idr_alloc(idr, ptr, SWID_START, SWID_END, GFP_ATOMIC);
144 spin_unlock_irq(idr_spinlock);
145 idr_preload_end();
146
147 return ret;
148}
149
150static void mlx5_fpga_tls_release_swid(struct idr *idr,
151 spinlock_t *idr_spinlock, u32 swid)
152{
153 unsigned long flags;
154
155 spin_lock_irqsave(idr_spinlock, flags);
156 idr_remove(idr, swid);
157 spin_unlock_irqrestore(idr_spinlock, flags);
158}
159
160struct mlx5_teardown_stream_context {
161 struct mlx5_fpga_tls_command_context cmd;
162 u32 swid;
163};
164
165static void
166mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn,
167 struct mlx5_fpga_device *fdev,
168 struct mlx5_fpga_tls_command_context *cmd,
169 struct mlx5_fpga_dma_buf *resp)
170{
171 struct mlx5_teardown_stream_context *ctx =
172 container_of(cmd, struct mlx5_teardown_stream_context, cmd);
173
174 if (resp) {
175 u32 syndrome = MLX5_GET(tls_resp, resp->sg[0].data, syndrome);
176
177 if (syndrome)
178 mlx5_fpga_err(fdev,
179 "Teardown stream failed with syndrome = %d",
180 syndrome);
181 else
182 mlx5_fpga_tls_release_swid(&fdev->tls->tx_idr,
183 &fdev->tls->idr_spinlock,
184 ctx->swid);
185 }
186 mlx5_fpga_tls_put_command_ctx(cmd);
187}
188
189static void mlx5_fpga_tls_flow_to_cmd(void *flow, void *cmd)
190{
191 memcpy(MLX5_ADDR_OF(tls_cmd, cmd, src_port), flow,
192 MLX5_BYTE_OFF(tls_flow, ipv6));
193
194 MLX5_SET(tls_cmd, cmd, ipv6, MLX5_GET(tls_flow, flow, ipv6));
195 MLX5_SET(tls_cmd, cmd, direction_sx,
196 MLX5_GET(tls_flow, flow, direction_sx));
197}
198
199void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev, void *flow,
200 u32 swid, gfp_t flags)
201{
202 struct mlx5_teardown_stream_context *ctx;
203 struct mlx5_fpga_dma_buf *buf;
204 void *cmd;
205
206 ctx = kzalloc(sizeof(*ctx) + MLX5_TLS_COMMAND_SIZE, flags);
207 if (!ctx)
208 return;
209
210 buf = &ctx->cmd.buf;
211 cmd = (ctx + 1);
212 MLX5_SET(tls_cmd, cmd, command_type, CMD_TEARDOWN_STREAM);
213 MLX5_SET(tls_cmd, cmd, swid, swid);
214
215 mlx5_fpga_tls_flow_to_cmd(flow, cmd);
216 kfree(flow);
217
218 buf->sg[0].data = cmd;
219 buf->sg[0].size = MLX5_TLS_COMMAND_SIZE;
220
221 ctx->swid = swid;
222 mlx5_fpga_tls_cmd_send(mdev->fpga, &ctx->cmd,
223 mlx5_fpga_tls_teardown_completion);
224}
225
226void mlx5_fpga_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid,
227 gfp_t flags)
228{
229 struct mlx5_fpga_tls *tls = mdev->fpga->tls;
230 void *flow;
231
232 rcu_read_lock();
233 flow = idr_find(&tls->tx_idr, swid);
234 rcu_read_unlock();
235
236 if (!flow) {
237 mlx5_fpga_err(mdev->fpga, "No flow information for swid %u\n",
238 swid);
239 return;
240 }
241
242 mlx5_fpga_tls_send_teardown_cmd(mdev, flow, swid, flags);
243}
244
245enum mlx5_fpga_setup_stream_status {
246 MLX5_FPGA_CMD_PENDING,
247 MLX5_FPGA_CMD_SEND_FAILED,
248 MLX5_FPGA_CMD_RESPONSE_RECEIVED,
249 MLX5_FPGA_CMD_ABANDONED,
250};
251
252struct mlx5_setup_stream_context {
253 struct mlx5_fpga_tls_command_context cmd;
254 atomic_t status;
255 u32 syndrome;
256 struct completion comp;
257};
258
259static void
260mlx5_fpga_tls_setup_completion(struct mlx5_fpga_conn *conn,
261 struct mlx5_fpga_device *fdev,
262 struct mlx5_fpga_tls_command_context *cmd,
263 struct mlx5_fpga_dma_buf *resp)
264{
265 struct mlx5_setup_stream_context *ctx =
266 container_of(cmd, struct mlx5_setup_stream_context, cmd);
267 int status = MLX5_FPGA_CMD_SEND_FAILED;
268 void *tls_cmd = ctx + 1;
269
270 /* If we failed to send to command resp == NULL */
271 if (resp) {
272 ctx->syndrome = MLX5_GET(tls_resp, resp->sg[0].data, syndrome);
273 status = MLX5_FPGA_CMD_RESPONSE_RECEIVED;
274 }
275
276 status = atomic_xchg_release(&ctx->status, status);
277 if (likely(status != MLX5_FPGA_CMD_ABANDONED)) {
278 complete(&ctx->comp);
279 return;
280 }
281
282 mlx5_fpga_err(fdev, "Command was abandoned, syndrome = %u\n",
283 ctx->syndrome);
284
285 if (!ctx->syndrome) {
286 /* The process was killed while waiting for the context to be
287 * added, and the add completed successfully.
288 * We need to destroy the HW context, and we can't can't reuse
289 * the command context because we might not have received
290 * the tx completion yet.
291 */
292 mlx5_fpga_tls_del_tx_flow(fdev->mdev,
293 MLX5_GET(tls_cmd, tls_cmd, swid),
294 GFP_ATOMIC);
295 }
296
297 mlx5_fpga_tls_put_command_ctx(cmd);
298}
299
300static int mlx5_fpga_tls_setup_stream_cmd(struct mlx5_core_dev *mdev,
301 struct mlx5_setup_stream_context *ctx)
302{
303 struct mlx5_fpga_dma_buf *buf;
304 void *cmd = ctx + 1;
305 int status, ret = 0;
306
307 buf = &ctx->cmd.buf;
308 buf->sg[0].data = cmd;
309 buf->sg[0].size = MLX5_TLS_COMMAND_SIZE;
310 MLX5_SET(tls_cmd, cmd, command_type, CMD_SETUP_STREAM);
311
312 init_completion(&ctx->comp);
313 atomic_set(&ctx->status, MLX5_FPGA_CMD_PENDING);
314 ctx->syndrome = -1;
315
316 mlx5_fpga_tls_cmd_send(mdev->fpga, &ctx->cmd,
317 mlx5_fpga_tls_setup_completion);
318 wait_for_completion_killable(&ctx->comp);
319
320 status = atomic_xchg_acquire(&ctx->status, MLX5_FPGA_CMD_ABANDONED);
321 if (unlikely(status == MLX5_FPGA_CMD_PENDING))
322 /* ctx is going to be released in mlx5_fpga_tls_setup_completion */
323 return -EINTR;
324
325 if (unlikely(ctx->syndrome))
326 ret = -ENOMEM;
327
328 mlx5_fpga_tls_put_command_ctx(&ctx->cmd);
329 return ret;
330}
331
332static void mlx5_fpga_tls_hw_qp_recv_cb(void *cb_arg,
333 struct mlx5_fpga_dma_buf *buf)
334{
335 struct mlx5_fpga_device *fdev = (struct mlx5_fpga_device *)cb_arg;
336
337 mlx5_fpga_tls_cmd_complete(fdev, buf);
338}
339
340bool mlx5_fpga_is_tls_device(struct mlx5_core_dev *mdev)
341{
342 if (!mdev->fpga || !MLX5_CAP_GEN(mdev, fpga))
343 return false;
344
345 if (MLX5_CAP_FPGA(mdev, ieee_vendor_id) !=
346 MLX5_FPGA_CAP_SANDBOX_VENDOR_ID_MLNX)
347 return false;
348
349 if (MLX5_CAP_FPGA(mdev, sandbox_product_id) !=
350 MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_TLS)
351 return false;
352
353 if (MLX5_CAP_FPGA(mdev, sandbox_product_version) != 0)
354 return false;
355
356 return true;
357}
358
359static int mlx5_fpga_tls_get_caps(struct mlx5_fpga_device *fdev,
360 u32 *p_caps)
361{
362 int err, cap_size = MLX5_ST_SZ_BYTES(tls_extended_cap);
363 u32 caps = 0;
364 void *buf;
365
366 buf = kzalloc(cap_size, GFP_KERNEL);
367 if (!buf)
368 return -ENOMEM;
369
370 err = mlx5_fpga_get_sbu_caps(fdev, cap_size, buf);
371 if (err)
372 goto out;
373
374 if (MLX5_GET(tls_extended_cap, buf, tx))
375 caps |= MLX5_ACCEL_TLS_TX;
376 if (MLX5_GET(tls_extended_cap, buf, rx))
377 caps |= MLX5_ACCEL_TLS_RX;
378 if (MLX5_GET(tls_extended_cap, buf, tls_v12))
379 caps |= MLX5_ACCEL_TLS_V12;
380 if (MLX5_GET(tls_extended_cap, buf, tls_v13))
381 caps |= MLX5_ACCEL_TLS_V13;
382 if (MLX5_GET(tls_extended_cap, buf, lro))
383 caps |= MLX5_ACCEL_TLS_LRO;
384 if (MLX5_GET(tls_extended_cap, buf, ipv6))
385 caps |= MLX5_ACCEL_TLS_IPV6;
386
387 if (MLX5_GET(tls_extended_cap, buf, aes_gcm_128))
388 caps |= MLX5_ACCEL_TLS_AES_GCM128;
389 if (MLX5_GET(tls_extended_cap, buf, aes_gcm_256))
390 caps |= MLX5_ACCEL_TLS_AES_GCM256;
391
392 *p_caps = caps;
393 err = 0;
394out:
395 kfree(buf);
396 return err;
397}
398
399int mlx5_fpga_tls_init(struct mlx5_core_dev *mdev)
400{
401 struct mlx5_fpga_device *fdev = mdev->fpga;
402 struct mlx5_fpga_conn_attr init_attr = {0};
403 struct mlx5_fpga_conn *conn;
404 struct mlx5_fpga_tls *tls;
405 int err = 0;
406
407 if (!mlx5_fpga_is_tls_device(mdev) || !fdev)
408 return 0;
409
410 tls = kzalloc(sizeof(*tls), GFP_KERNEL);
411 if (!tls)
412 return -ENOMEM;
413
414 err = mlx5_fpga_tls_get_caps(fdev, &tls->caps);
415 if (err)
416 goto error;
417
418 if (!(tls->caps & (MLX5_ACCEL_TLS_TX | MLX5_ACCEL_TLS_V12 |
419 MLX5_ACCEL_TLS_AES_GCM128))) {
420 err = -ENOTSUPP;
421 goto error;
422 }
423
424 init_attr.rx_size = SBU_QP_QUEUE_SIZE;
425 init_attr.tx_size = SBU_QP_QUEUE_SIZE;
426 init_attr.recv_cb = mlx5_fpga_tls_hw_qp_recv_cb;
427 init_attr.cb_arg = fdev;
428 conn = mlx5_fpga_sbu_conn_create(fdev, &init_attr);
429 if (IS_ERR(conn)) {
430 err = PTR_ERR(conn);
431 mlx5_fpga_err(fdev, "Error creating TLS command connection %d\n",
432 err);
433 goto error;
434 }
435
436 tls->conn = conn;
437 spin_lock_init(&tls->pending_cmds_lock);
438 INIT_LIST_HEAD(&tls->pending_cmds);
439
440 idr_init(&tls->tx_idr);
441 spin_lock_init(&tls->idr_spinlock);
442 fdev->tls = tls;
443 return 0;
444
445error:
446 kfree(tls);
447 return err;
448}
449
450void mlx5_fpga_tls_cleanup(struct mlx5_core_dev *mdev)
451{
452 struct mlx5_fpga_device *fdev = mdev->fpga;
453
454 if (!fdev || !fdev->tls)
455 return;
456
457 mlx5_fpga_sbu_conn_destroy(fdev->tls->conn);
458 kfree(fdev->tls);
459 fdev->tls = NULL;
460}
461
462static void mlx5_fpga_tls_set_aes_gcm128_ctx(void *cmd,
463 struct tls_crypto_info *info,
464 __be64 *rcd_sn)
465{
466 struct tls12_crypto_info_aes_gcm_128 *crypto_info =
467 (struct tls12_crypto_info_aes_gcm_128 *)info;
468
469 memcpy(MLX5_ADDR_OF(tls_cmd, cmd, tls_rcd_sn), crypto_info->rec_seq,
470 TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE);
471
472 memcpy(MLX5_ADDR_OF(tls_cmd, cmd, tls_implicit_iv),
473 crypto_info->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
474 memcpy(MLX5_ADDR_OF(tls_cmd, cmd, encryption_key),
475 crypto_info->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE);
476
477 /* in AES-GCM 128 we need to write the key twice */
478 memcpy(MLX5_ADDR_OF(tls_cmd, cmd, encryption_key) +
479 TLS_CIPHER_AES_GCM_128_KEY_SIZE,
480 crypto_info->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE);
481
482 MLX5_SET(tls_cmd, cmd, alg, MLX5_TLS_ALG_AES_GCM_128);
483}
484
485static int mlx5_fpga_tls_set_key_material(void *cmd, u32 caps,
486 struct tls_crypto_info *crypto_info)
487{
488 __be64 rcd_sn;
489
490 switch (crypto_info->cipher_type) {
491 case TLS_CIPHER_AES_GCM_128:
492 if (!(caps & MLX5_ACCEL_TLS_AES_GCM128))
493 return -EINVAL;
494 mlx5_fpga_tls_set_aes_gcm128_ctx(cmd, crypto_info, &rcd_sn);
495 break;
496 default:
497 return -EINVAL;
498 }
499
500 return 0;
501}
502
503static int mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
504 struct tls_crypto_info *crypto_info, u32 swid,
505 u32 tcp_sn)
506{
507 u32 caps = mlx5_fpga_tls_device_caps(mdev);
508 struct mlx5_setup_stream_context *ctx;
509 int ret = -ENOMEM;
510 size_t cmd_size;
511 void *cmd;
512
513 cmd_size = MLX5_TLS_COMMAND_SIZE + sizeof(*ctx);
514 ctx = kzalloc(cmd_size, GFP_KERNEL);
515 if (!ctx)
516 goto out;
517
518 cmd = ctx + 1;
519 ret = mlx5_fpga_tls_set_key_material(cmd, caps, crypto_info);
520 if (ret)
521 goto free_ctx;
522
523 mlx5_fpga_tls_flow_to_cmd(flow, cmd);
524
525 MLX5_SET(tls_cmd, cmd, swid, swid);
526 MLX5_SET(tls_cmd, cmd, tcp_sn, tcp_sn);
527
528 return mlx5_fpga_tls_setup_stream_cmd(mdev, ctx);
529
530free_ctx:
531 kfree(ctx);
532out:
533 return ret;
534}
535
536int mlx5_fpga_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow,
537 struct tls_crypto_info *crypto_info,
538 u32 start_offload_tcp_sn, u32 *p_swid)
539{
540 struct mlx5_fpga_tls *tls = mdev->fpga->tls;
541 int ret = -ENOMEM;
542 u32 swid;
543
544 ret = mlx5_fpga_tls_alloc_swid(&tls->tx_idr, &tls->idr_spinlock, flow);
545 if (ret < 0)
546 return ret;
547
548 swid = ret;
549 MLX5_SET(tls_flow, flow, direction_sx, 1);
550
551 ret = mlx5_fpga_tls_add_flow(mdev, flow, crypto_info, swid,
552 start_offload_tcp_sn);
553 if (ret && ret != -EINTR)
554 goto free_swid;
555
556 *p_swid = swid;
557 return 0;
558free_swid:
559 mlx5_fpga_tls_release_swid(&tls->tx_idr, &tls->idr_spinlock, swid);
560
561 return ret;
562}