2 * Copyright (c) 2013-2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/highmem.h>
34 #include <linux/module.h>
35 #include <linux/errno.h>
36 #include <linux/pci.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/slab.h>
39 #include <linux/delay.h>
40 #include <linux/random.h>
41 #include <linux/io-mapping.h>
42 #include <linux/mlx5/driver.h>
43 #include <linux/mlx5/eq.h>
44 #include <linux/debugfs.h>
46 #include "mlx5_core.h"
59 MLX5_CMD_DELIVERY_STAT_OK
= 0x0,
60 MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR
= 0x1,
61 MLX5_CMD_DELIVERY_STAT_TOK_ERR
= 0x2,
62 MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR
= 0x3,
63 MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR
= 0x4,
64 MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR
= 0x5,
65 MLX5_CMD_DELIVERY_STAT_FW_ERR
= 0x6,
66 MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR
= 0x7,
67 MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR
= 0x8,
68 MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR
= 0x9,
69 MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR
= 0x10,
72 static struct mlx5_cmd_work_ent
*alloc_cmd(struct mlx5_cmd
*cmd
,
73 struct mlx5_cmd_msg
*in
,
74 struct mlx5_cmd_msg
*out
,
75 void *uout
, int uout_size
,
77 void *context
, int page_queue
)
79 gfp_t alloc_flags
= cbk
? GFP_ATOMIC
: GFP_KERNEL
;
80 struct mlx5_cmd_work_ent
*ent
;
82 ent
= kzalloc(sizeof(*ent
), alloc_flags
);
84 return ERR_PTR(-ENOMEM
);
89 ent
->uout_size
= uout_size
;
91 ent
->context
= context
;
93 ent
->page_queue
= page_queue
;
98 static u8
alloc_token(struct mlx5_cmd
*cmd
)
102 spin_lock(&cmd
->token_lock
);
107 spin_unlock(&cmd
->token_lock
);
112 static int alloc_ent(struct mlx5_cmd
*cmd
)
117 spin_lock_irqsave(&cmd
->alloc_lock
, flags
);
118 ret
= find_first_bit(&cmd
->bitmask
, cmd
->max_reg_cmds
);
119 if (ret
< cmd
->max_reg_cmds
)
120 clear_bit(ret
, &cmd
->bitmask
);
121 spin_unlock_irqrestore(&cmd
->alloc_lock
, flags
);
123 return ret
< cmd
->max_reg_cmds
? ret
: -ENOMEM
;
126 static void free_ent(struct mlx5_cmd
*cmd
, int idx
)
130 spin_lock_irqsave(&cmd
->alloc_lock
, flags
);
131 set_bit(idx
, &cmd
->bitmask
);
132 spin_unlock_irqrestore(&cmd
->alloc_lock
, flags
);
135 static struct mlx5_cmd_layout
*get_inst(struct mlx5_cmd
*cmd
, int idx
)
137 return cmd
->cmd_buf
+ (idx
<< cmd
->log_stride
);
140 static int mlx5_calc_cmd_blocks(struct mlx5_cmd_msg
*msg
)
143 int blen
= size
- min_t(int, sizeof(msg
->first
.data
), size
);
145 return DIV_ROUND_UP(blen
, MLX5_CMD_DATA_BLOCK_SIZE
);
148 static u8
xor8_buf(void *buf
, size_t offset
, int len
)
153 int end
= len
+ offset
;
155 for (i
= offset
; i
< end
; i
++)
161 static int verify_block_sig(struct mlx5_cmd_prot_block
*block
)
163 size_t rsvd0_off
= offsetof(struct mlx5_cmd_prot_block
, rsvd0
);
164 int xor_len
= sizeof(*block
) - sizeof(block
->data
) - 1;
166 if (xor8_buf(block
, rsvd0_off
, xor_len
) != 0xff)
169 if (xor8_buf(block
, 0, sizeof(*block
)) != 0xff)
175 static void calc_block_sig(struct mlx5_cmd_prot_block
*block
)
177 int ctrl_xor_len
= sizeof(*block
) - sizeof(block
->data
) - 2;
178 size_t rsvd0_off
= offsetof(struct mlx5_cmd_prot_block
, rsvd0
);
180 block
->ctrl_sig
= ~xor8_buf(block
, rsvd0_off
, ctrl_xor_len
);
181 block
->sig
= ~xor8_buf(block
, 0, sizeof(*block
) - 1);
184 static void calc_chain_sig(struct mlx5_cmd_msg
*msg
)
186 struct mlx5_cmd_mailbox
*next
= msg
->next
;
187 int n
= mlx5_calc_cmd_blocks(msg
);
190 for (i
= 0; i
< n
&& next
; i
++) {
191 calc_block_sig(next
->buf
);
196 static void set_signature(struct mlx5_cmd_work_ent
*ent
, int csum
)
198 ent
->lay
->sig
= ~xor8_buf(ent
->lay
, 0, sizeof(*ent
->lay
));
200 calc_chain_sig(ent
->in
);
201 calc_chain_sig(ent
->out
);
205 static void poll_timeout(struct mlx5_cmd_work_ent
*ent
)
207 unsigned long poll_end
= jiffies
+ msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC
+ 1000);
211 own
= READ_ONCE(ent
->lay
->status_own
);
212 if (!(own
& CMD_OWNER_HW
)) {
217 } while (time_before(jiffies
, poll_end
));
219 ent
->ret
= -ETIMEDOUT
;
222 static void free_cmd(struct mlx5_cmd_work_ent
*ent
)
227 static int verify_signature(struct mlx5_cmd_work_ent
*ent
)
229 struct mlx5_cmd_mailbox
*next
= ent
->out
->next
;
230 int n
= mlx5_calc_cmd_blocks(ent
->out
);
235 sig
= xor8_buf(ent
->lay
, 0, sizeof(*ent
->lay
));
239 for (i
= 0; i
< n
&& next
; i
++) {
240 err
= verify_block_sig(next
->buf
);
250 static void dump_buf(void *buf
, int size
, int data_only
, int offset
)
255 for (i
= 0; i
< size
; i
+= 16) {
256 pr_debug("%03x: %08x %08x %08x %08x\n", offset
, be32_to_cpu(p
[0]),
257 be32_to_cpu(p
[1]), be32_to_cpu(p
[2]),
266 static int mlx5_internal_err_ret_value(struct mlx5_core_dev
*dev
, u16 op
,
267 u32
*synd
, u8
*status
)
273 case MLX5_CMD_OP_TEARDOWN_HCA
:
274 case MLX5_CMD_OP_DISABLE_HCA
:
275 case MLX5_CMD_OP_MANAGE_PAGES
:
276 case MLX5_CMD_OP_DESTROY_MKEY
:
277 case MLX5_CMD_OP_DESTROY_EQ
:
278 case MLX5_CMD_OP_DESTROY_CQ
:
279 case MLX5_CMD_OP_DESTROY_QP
:
280 case MLX5_CMD_OP_DESTROY_PSV
:
281 case MLX5_CMD_OP_DESTROY_SRQ
:
282 case MLX5_CMD_OP_DESTROY_XRC_SRQ
:
283 case MLX5_CMD_OP_DESTROY_XRQ
:
284 case MLX5_CMD_OP_DESTROY_DCT
:
285 case MLX5_CMD_OP_DEALLOC_Q_COUNTER
:
286 case MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT
:
287 case MLX5_CMD_OP_DESTROY_QOS_PARA_VPORT
:
288 case MLX5_CMD_OP_DEALLOC_PD
:
289 case MLX5_CMD_OP_DEALLOC_UAR
:
290 case MLX5_CMD_OP_DETACH_FROM_MCG
:
291 case MLX5_CMD_OP_DEALLOC_XRCD
:
292 case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN
:
293 case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT
:
294 case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY
:
295 case MLX5_CMD_OP_DESTROY_LAG
:
296 case MLX5_CMD_OP_DESTROY_VPORT_LAG
:
297 case MLX5_CMD_OP_DESTROY_TIR
:
298 case MLX5_CMD_OP_DESTROY_SQ
:
299 case MLX5_CMD_OP_DESTROY_RQ
:
300 case MLX5_CMD_OP_DESTROY_RMP
:
301 case MLX5_CMD_OP_DESTROY_TIS
:
302 case MLX5_CMD_OP_DESTROY_RQT
:
303 case MLX5_CMD_OP_DESTROY_FLOW_TABLE
:
304 case MLX5_CMD_OP_DESTROY_FLOW_GROUP
:
305 case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY
:
306 case MLX5_CMD_OP_DEALLOC_FLOW_COUNTER
:
307 case MLX5_CMD_OP_2ERR_QP
:
308 case MLX5_CMD_OP_2RST_QP
:
309 case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT
:
310 case MLX5_CMD_OP_MODIFY_FLOW_TABLE
:
311 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY
:
312 case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT
:
313 case MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT
:
314 case MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT
:
315 case MLX5_CMD_OP_FPGA_DESTROY_QP
:
316 case MLX5_CMD_OP_DESTROY_GENERAL_OBJECT
:
317 case MLX5_CMD_OP_DEALLOC_MEMIC
:
318 case MLX5_CMD_OP_PAGE_FAULT_RESUME
:
319 case MLX5_CMD_OP_QUERY_ESW_FUNCTIONS
:
320 return MLX5_CMD_STAT_OK
;
322 case MLX5_CMD_OP_QUERY_HCA_CAP
:
323 case MLX5_CMD_OP_QUERY_ADAPTER
:
324 case MLX5_CMD_OP_INIT_HCA
:
325 case MLX5_CMD_OP_ENABLE_HCA
:
326 case MLX5_CMD_OP_QUERY_PAGES
:
327 case MLX5_CMD_OP_SET_HCA_CAP
:
328 case MLX5_CMD_OP_QUERY_ISSI
:
329 case MLX5_CMD_OP_SET_ISSI
:
330 case MLX5_CMD_OP_CREATE_MKEY
:
331 case MLX5_CMD_OP_QUERY_MKEY
:
332 case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS
:
333 case MLX5_CMD_OP_CREATE_EQ
:
334 case MLX5_CMD_OP_QUERY_EQ
:
335 case MLX5_CMD_OP_GEN_EQE
:
336 case MLX5_CMD_OP_CREATE_CQ
:
337 case MLX5_CMD_OP_QUERY_CQ
:
338 case MLX5_CMD_OP_MODIFY_CQ
:
339 case MLX5_CMD_OP_CREATE_QP
:
340 case MLX5_CMD_OP_RST2INIT_QP
:
341 case MLX5_CMD_OP_INIT2RTR_QP
:
342 case MLX5_CMD_OP_RTR2RTS_QP
:
343 case MLX5_CMD_OP_RTS2RTS_QP
:
344 case MLX5_CMD_OP_SQERR2RTS_QP
:
345 case MLX5_CMD_OP_QUERY_QP
:
346 case MLX5_CMD_OP_SQD_RTS_QP
:
347 case MLX5_CMD_OP_INIT2INIT_QP
:
348 case MLX5_CMD_OP_CREATE_PSV
:
349 case MLX5_CMD_OP_CREATE_SRQ
:
350 case MLX5_CMD_OP_QUERY_SRQ
:
351 case MLX5_CMD_OP_ARM_RQ
:
352 case MLX5_CMD_OP_CREATE_XRC_SRQ
:
353 case MLX5_CMD_OP_QUERY_XRC_SRQ
:
354 case MLX5_CMD_OP_ARM_XRC_SRQ
:
355 case MLX5_CMD_OP_CREATE_XRQ
:
356 case MLX5_CMD_OP_QUERY_XRQ
:
357 case MLX5_CMD_OP_ARM_XRQ
:
358 case MLX5_CMD_OP_CREATE_DCT
:
359 case MLX5_CMD_OP_DRAIN_DCT
:
360 case MLX5_CMD_OP_QUERY_DCT
:
361 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION
:
362 case MLX5_CMD_OP_QUERY_VPORT_STATE
:
363 case MLX5_CMD_OP_MODIFY_VPORT_STATE
:
364 case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT
:
365 case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT
:
366 case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT
:
367 case MLX5_CMD_OP_QUERY_ROCE_ADDRESS
:
368 case MLX5_CMD_OP_SET_ROCE_ADDRESS
:
369 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT
:
370 case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT
:
371 case MLX5_CMD_OP_QUERY_HCA_VPORT_GID
:
372 case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY
:
373 case MLX5_CMD_OP_QUERY_VNIC_ENV
:
374 case MLX5_CMD_OP_QUERY_VPORT_COUNTER
:
375 case MLX5_CMD_OP_ALLOC_Q_COUNTER
:
376 case MLX5_CMD_OP_QUERY_Q_COUNTER
:
377 case MLX5_CMD_OP_SET_MONITOR_COUNTER
:
378 case MLX5_CMD_OP_ARM_MONITOR_COUNTER
:
379 case MLX5_CMD_OP_SET_PP_RATE_LIMIT
:
380 case MLX5_CMD_OP_QUERY_RATE_LIMIT
:
381 case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT
:
382 case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT
:
383 case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT
:
384 case MLX5_CMD_OP_CREATE_QOS_PARA_VPORT
:
385 case MLX5_CMD_OP_ALLOC_PD
:
386 case MLX5_CMD_OP_ALLOC_UAR
:
387 case MLX5_CMD_OP_CONFIG_INT_MODERATION
:
388 case MLX5_CMD_OP_ACCESS_REG
:
389 case MLX5_CMD_OP_ATTACH_TO_MCG
:
390 case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG
:
391 case MLX5_CMD_OP_MAD_IFC
:
392 case MLX5_CMD_OP_QUERY_MAD_DEMUX
:
393 case MLX5_CMD_OP_SET_MAD_DEMUX
:
394 case MLX5_CMD_OP_NOP
:
395 case MLX5_CMD_OP_ALLOC_XRCD
:
396 case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN
:
397 case MLX5_CMD_OP_QUERY_CONG_STATUS
:
398 case MLX5_CMD_OP_MODIFY_CONG_STATUS
:
399 case MLX5_CMD_OP_QUERY_CONG_PARAMS
:
400 case MLX5_CMD_OP_MODIFY_CONG_PARAMS
:
401 case MLX5_CMD_OP_QUERY_CONG_STATISTICS
:
402 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT
:
403 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY
:
404 case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY
:
405 case MLX5_CMD_OP_CREATE_LAG
:
406 case MLX5_CMD_OP_MODIFY_LAG
:
407 case MLX5_CMD_OP_QUERY_LAG
:
408 case MLX5_CMD_OP_CREATE_VPORT_LAG
:
409 case MLX5_CMD_OP_CREATE_TIR
:
410 case MLX5_CMD_OP_MODIFY_TIR
:
411 case MLX5_CMD_OP_QUERY_TIR
:
412 case MLX5_CMD_OP_CREATE_SQ
:
413 case MLX5_CMD_OP_MODIFY_SQ
:
414 case MLX5_CMD_OP_QUERY_SQ
:
415 case MLX5_CMD_OP_CREATE_RQ
:
416 case MLX5_CMD_OP_MODIFY_RQ
:
417 case MLX5_CMD_OP_QUERY_RQ
:
418 case MLX5_CMD_OP_CREATE_RMP
:
419 case MLX5_CMD_OP_MODIFY_RMP
:
420 case MLX5_CMD_OP_QUERY_RMP
:
421 case MLX5_CMD_OP_CREATE_TIS
:
422 case MLX5_CMD_OP_MODIFY_TIS
:
423 case MLX5_CMD_OP_QUERY_TIS
:
424 case MLX5_CMD_OP_CREATE_RQT
:
425 case MLX5_CMD_OP_MODIFY_RQT
:
426 case MLX5_CMD_OP_QUERY_RQT
:
428 case MLX5_CMD_OP_CREATE_FLOW_TABLE
:
429 case MLX5_CMD_OP_QUERY_FLOW_TABLE
:
430 case MLX5_CMD_OP_CREATE_FLOW_GROUP
:
431 case MLX5_CMD_OP_QUERY_FLOW_GROUP
:
432 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY
:
433 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER
:
434 case MLX5_CMD_OP_QUERY_FLOW_COUNTER
:
435 case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT
:
436 case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT
:
437 case MLX5_CMD_OP_FPGA_CREATE_QP
:
438 case MLX5_CMD_OP_FPGA_MODIFY_QP
:
439 case MLX5_CMD_OP_FPGA_QUERY_QP
:
440 case MLX5_CMD_OP_FPGA_QUERY_QP_COUNTERS
:
441 case MLX5_CMD_OP_CREATE_GENERAL_OBJECT
:
442 case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT
:
443 case MLX5_CMD_OP_QUERY_GENERAL_OBJECT
:
444 case MLX5_CMD_OP_CREATE_UCTX
:
445 case MLX5_CMD_OP_DESTROY_UCTX
:
446 case MLX5_CMD_OP_CREATE_UMEM
:
447 case MLX5_CMD_OP_DESTROY_UMEM
:
448 case MLX5_CMD_OP_ALLOC_MEMIC
:
449 case MLX5_CMD_OP_MODIFY_XRQ
:
450 case MLX5_CMD_OP_RELEASE_XRQ_ERROR
:
451 *status
= MLX5_DRIVER_STATUS_ABORTED
;
452 *synd
= MLX5_DRIVER_SYND
;
455 mlx5_core_err(dev
, "Unknown FW command (%d)\n", op
);
460 const char *mlx5_command_str(int command
)
462 #define MLX5_COMMAND_STR_CASE(__cmd) case MLX5_CMD_OP_ ## __cmd: return #__cmd
465 MLX5_COMMAND_STR_CASE(QUERY_HCA_CAP
);
466 MLX5_COMMAND_STR_CASE(QUERY_ADAPTER
);
467 MLX5_COMMAND_STR_CASE(INIT_HCA
);
468 MLX5_COMMAND_STR_CASE(TEARDOWN_HCA
);
469 MLX5_COMMAND_STR_CASE(ENABLE_HCA
);
470 MLX5_COMMAND_STR_CASE(DISABLE_HCA
);
471 MLX5_COMMAND_STR_CASE(QUERY_PAGES
);
472 MLX5_COMMAND_STR_CASE(MANAGE_PAGES
);
473 MLX5_COMMAND_STR_CASE(SET_HCA_CAP
);
474 MLX5_COMMAND_STR_CASE(QUERY_ISSI
);
475 MLX5_COMMAND_STR_CASE(SET_ISSI
);
476 MLX5_COMMAND_STR_CASE(SET_DRIVER_VERSION
);
477 MLX5_COMMAND_STR_CASE(CREATE_MKEY
);
478 MLX5_COMMAND_STR_CASE(QUERY_MKEY
);
479 MLX5_COMMAND_STR_CASE(DESTROY_MKEY
);
480 MLX5_COMMAND_STR_CASE(QUERY_SPECIAL_CONTEXTS
);
481 MLX5_COMMAND_STR_CASE(PAGE_FAULT_RESUME
);
482 MLX5_COMMAND_STR_CASE(CREATE_EQ
);
483 MLX5_COMMAND_STR_CASE(DESTROY_EQ
);
484 MLX5_COMMAND_STR_CASE(QUERY_EQ
);
485 MLX5_COMMAND_STR_CASE(GEN_EQE
);
486 MLX5_COMMAND_STR_CASE(CREATE_CQ
);
487 MLX5_COMMAND_STR_CASE(DESTROY_CQ
);
488 MLX5_COMMAND_STR_CASE(QUERY_CQ
);
489 MLX5_COMMAND_STR_CASE(MODIFY_CQ
);
490 MLX5_COMMAND_STR_CASE(CREATE_QP
);
491 MLX5_COMMAND_STR_CASE(DESTROY_QP
);
492 MLX5_COMMAND_STR_CASE(RST2INIT_QP
);
493 MLX5_COMMAND_STR_CASE(INIT2RTR_QP
);
494 MLX5_COMMAND_STR_CASE(RTR2RTS_QP
);
495 MLX5_COMMAND_STR_CASE(RTS2RTS_QP
);
496 MLX5_COMMAND_STR_CASE(SQERR2RTS_QP
);
497 MLX5_COMMAND_STR_CASE(2ERR_QP
);
498 MLX5_COMMAND_STR_CASE(2RST_QP
);
499 MLX5_COMMAND_STR_CASE(QUERY_QP
);
500 MLX5_COMMAND_STR_CASE(SQD_RTS_QP
);
501 MLX5_COMMAND_STR_CASE(INIT2INIT_QP
);
502 MLX5_COMMAND_STR_CASE(CREATE_PSV
);
503 MLX5_COMMAND_STR_CASE(DESTROY_PSV
);
504 MLX5_COMMAND_STR_CASE(CREATE_SRQ
);
505 MLX5_COMMAND_STR_CASE(DESTROY_SRQ
);
506 MLX5_COMMAND_STR_CASE(QUERY_SRQ
);
507 MLX5_COMMAND_STR_CASE(ARM_RQ
);
508 MLX5_COMMAND_STR_CASE(CREATE_XRC_SRQ
);
509 MLX5_COMMAND_STR_CASE(DESTROY_XRC_SRQ
);
510 MLX5_COMMAND_STR_CASE(QUERY_XRC_SRQ
);
511 MLX5_COMMAND_STR_CASE(ARM_XRC_SRQ
);
512 MLX5_COMMAND_STR_CASE(CREATE_DCT
);
513 MLX5_COMMAND_STR_CASE(DESTROY_DCT
);
514 MLX5_COMMAND_STR_CASE(DRAIN_DCT
);
515 MLX5_COMMAND_STR_CASE(QUERY_DCT
);
516 MLX5_COMMAND_STR_CASE(ARM_DCT_FOR_KEY_VIOLATION
);
517 MLX5_COMMAND_STR_CASE(QUERY_VPORT_STATE
);
518 MLX5_COMMAND_STR_CASE(MODIFY_VPORT_STATE
);
519 MLX5_COMMAND_STR_CASE(QUERY_ESW_VPORT_CONTEXT
);
520 MLX5_COMMAND_STR_CASE(MODIFY_ESW_VPORT_CONTEXT
);
521 MLX5_COMMAND_STR_CASE(QUERY_NIC_VPORT_CONTEXT
);
522 MLX5_COMMAND_STR_CASE(MODIFY_NIC_VPORT_CONTEXT
);
523 MLX5_COMMAND_STR_CASE(QUERY_ROCE_ADDRESS
);
524 MLX5_COMMAND_STR_CASE(SET_ROCE_ADDRESS
);
525 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_CONTEXT
);
526 MLX5_COMMAND_STR_CASE(MODIFY_HCA_VPORT_CONTEXT
);
527 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_GID
);
528 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_PKEY
);
529 MLX5_COMMAND_STR_CASE(QUERY_VNIC_ENV
);
530 MLX5_COMMAND_STR_CASE(QUERY_VPORT_COUNTER
);
531 MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER
);
532 MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER
);
533 MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER
);
534 MLX5_COMMAND_STR_CASE(SET_MONITOR_COUNTER
);
535 MLX5_COMMAND_STR_CASE(ARM_MONITOR_COUNTER
);
536 MLX5_COMMAND_STR_CASE(SET_PP_RATE_LIMIT
);
537 MLX5_COMMAND_STR_CASE(QUERY_RATE_LIMIT
);
538 MLX5_COMMAND_STR_CASE(CREATE_SCHEDULING_ELEMENT
);
539 MLX5_COMMAND_STR_CASE(DESTROY_SCHEDULING_ELEMENT
);
540 MLX5_COMMAND_STR_CASE(QUERY_SCHEDULING_ELEMENT
);
541 MLX5_COMMAND_STR_CASE(MODIFY_SCHEDULING_ELEMENT
);
542 MLX5_COMMAND_STR_CASE(CREATE_QOS_PARA_VPORT
);
543 MLX5_COMMAND_STR_CASE(DESTROY_QOS_PARA_VPORT
);
544 MLX5_COMMAND_STR_CASE(ALLOC_PD
);
545 MLX5_COMMAND_STR_CASE(DEALLOC_PD
);
546 MLX5_COMMAND_STR_CASE(ALLOC_UAR
);
547 MLX5_COMMAND_STR_CASE(DEALLOC_UAR
);
548 MLX5_COMMAND_STR_CASE(CONFIG_INT_MODERATION
);
549 MLX5_COMMAND_STR_CASE(ACCESS_REG
);
550 MLX5_COMMAND_STR_CASE(ATTACH_TO_MCG
);
551 MLX5_COMMAND_STR_CASE(DETACH_FROM_MCG
);
552 MLX5_COMMAND_STR_CASE(GET_DROPPED_PACKET_LOG
);
553 MLX5_COMMAND_STR_CASE(MAD_IFC
);
554 MLX5_COMMAND_STR_CASE(QUERY_MAD_DEMUX
);
555 MLX5_COMMAND_STR_CASE(SET_MAD_DEMUX
);
556 MLX5_COMMAND_STR_CASE(NOP
);
557 MLX5_COMMAND_STR_CASE(ALLOC_XRCD
);
558 MLX5_COMMAND_STR_CASE(DEALLOC_XRCD
);
559 MLX5_COMMAND_STR_CASE(ALLOC_TRANSPORT_DOMAIN
);
560 MLX5_COMMAND_STR_CASE(DEALLOC_TRANSPORT_DOMAIN
);
561 MLX5_COMMAND_STR_CASE(QUERY_CONG_STATUS
);
562 MLX5_COMMAND_STR_CASE(MODIFY_CONG_STATUS
);
563 MLX5_COMMAND_STR_CASE(QUERY_CONG_PARAMS
);
564 MLX5_COMMAND_STR_CASE(MODIFY_CONG_PARAMS
);
565 MLX5_COMMAND_STR_CASE(QUERY_CONG_STATISTICS
);
566 MLX5_COMMAND_STR_CASE(ADD_VXLAN_UDP_DPORT
);
567 MLX5_COMMAND_STR_CASE(DELETE_VXLAN_UDP_DPORT
);
568 MLX5_COMMAND_STR_CASE(SET_L2_TABLE_ENTRY
);
569 MLX5_COMMAND_STR_CASE(QUERY_L2_TABLE_ENTRY
);
570 MLX5_COMMAND_STR_CASE(DELETE_L2_TABLE_ENTRY
);
571 MLX5_COMMAND_STR_CASE(SET_WOL_ROL
);
572 MLX5_COMMAND_STR_CASE(QUERY_WOL_ROL
);
573 MLX5_COMMAND_STR_CASE(CREATE_LAG
);
574 MLX5_COMMAND_STR_CASE(MODIFY_LAG
);
575 MLX5_COMMAND_STR_CASE(QUERY_LAG
);
576 MLX5_COMMAND_STR_CASE(DESTROY_LAG
);
577 MLX5_COMMAND_STR_CASE(CREATE_VPORT_LAG
);
578 MLX5_COMMAND_STR_CASE(DESTROY_VPORT_LAG
);
579 MLX5_COMMAND_STR_CASE(CREATE_TIR
);
580 MLX5_COMMAND_STR_CASE(MODIFY_TIR
);
581 MLX5_COMMAND_STR_CASE(DESTROY_TIR
);
582 MLX5_COMMAND_STR_CASE(QUERY_TIR
);
583 MLX5_COMMAND_STR_CASE(CREATE_SQ
);
584 MLX5_COMMAND_STR_CASE(MODIFY_SQ
);
585 MLX5_COMMAND_STR_CASE(DESTROY_SQ
);
586 MLX5_COMMAND_STR_CASE(QUERY_SQ
);
587 MLX5_COMMAND_STR_CASE(CREATE_RQ
);
588 MLX5_COMMAND_STR_CASE(MODIFY_RQ
);
589 MLX5_COMMAND_STR_CASE(DESTROY_RQ
);
590 MLX5_COMMAND_STR_CASE(QUERY_RQ
);
591 MLX5_COMMAND_STR_CASE(CREATE_RMP
);
592 MLX5_COMMAND_STR_CASE(MODIFY_RMP
);
593 MLX5_COMMAND_STR_CASE(DESTROY_RMP
);
594 MLX5_COMMAND_STR_CASE(QUERY_RMP
);
595 MLX5_COMMAND_STR_CASE(CREATE_TIS
);
596 MLX5_COMMAND_STR_CASE(MODIFY_TIS
);
597 MLX5_COMMAND_STR_CASE(DESTROY_TIS
);
598 MLX5_COMMAND_STR_CASE(QUERY_TIS
);
599 MLX5_COMMAND_STR_CASE(CREATE_RQT
);
600 MLX5_COMMAND_STR_CASE(MODIFY_RQT
);
601 MLX5_COMMAND_STR_CASE(DESTROY_RQT
);
602 MLX5_COMMAND_STR_CASE(QUERY_RQT
);
603 MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ROOT
);
604 MLX5_COMMAND_STR_CASE(CREATE_FLOW_TABLE
);
605 MLX5_COMMAND_STR_CASE(DESTROY_FLOW_TABLE
);
606 MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE
);
607 MLX5_COMMAND_STR_CASE(CREATE_FLOW_GROUP
);
608 MLX5_COMMAND_STR_CASE(DESTROY_FLOW_GROUP
);
609 MLX5_COMMAND_STR_CASE(QUERY_FLOW_GROUP
);
610 MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ENTRY
);
611 MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE_ENTRY
);
612 MLX5_COMMAND_STR_CASE(DELETE_FLOW_TABLE_ENTRY
);
613 MLX5_COMMAND_STR_CASE(ALLOC_FLOW_COUNTER
);
614 MLX5_COMMAND_STR_CASE(DEALLOC_FLOW_COUNTER
);
615 MLX5_COMMAND_STR_CASE(QUERY_FLOW_COUNTER
);
616 MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE
);
617 MLX5_COMMAND_STR_CASE(ALLOC_PACKET_REFORMAT_CONTEXT
);
618 MLX5_COMMAND_STR_CASE(DEALLOC_PACKET_REFORMAT_CONTEXT
);
619 MLX5_COMMAND_STR_CASE(ALLOC_MODIFY_HEADER_CONTEXT
);
620 MLX5_COMMAND_STR_CASE(DEALLOC_MODIFY_HEADER_CONTEXT
);
621 MLX5_COMMAND_STR_CASE(FPGA_CREATE_QP
);
622 MLX5_COMMAND_STR_CASE(FPGA_MODIFY_QP
);
623 MLX5_COMMAND_STR_CASE(FPGA_QUERY_QP
);
624 MLX5_COMMAND_STR_CASE(FPGA_QUERY_QP_COUNTERS
);
625 MLX5_COMMAND_STR_CASE(FPGA_DESTROY_QP
);
626 MLX5_COMMAND_STR_CASE(CREATE_XRQ
);
627 MLX5_COMMAND_STR_CASE(DESTROY_XRQ
);
628 MLX5_COMMAND_STR_CASE(QUERY_XRQ
);
629 MLX5_COMMAND_STR_CASE(ARM_XRQ
);
630 MLX5_COMMAND_STR_CASE(CREATE_GENERAL_OBJECT
);
631 MLX5_COMMAND_STR_CASE(DESTROY_GENERAL_OBJECT
);
632 MLX5_COMMAND_STR_CASE(MODIFY_GENERAL_OBJECT
);
633 MLX5_COMMAND_STR_CASE(QUERY_GENERAL_OBJECT
);
634 MLX5_COMMAND_STR_CASE(QUERY_MODIFY_HEADER_CONTEXT
);
635 MLX5_COMMAND_STR_CASE(ALLOC_MEMIC
);
636 MLX5_COMMAND_STR_CASE(DEALLOC_MEMIC
);
637 MLX5_COMMAND_STR_CASE(QUERY_ESW_FUNCTIONS
);
638 MLX5_COMMAND_STR_CASE(CREATE_UCTX
);
639 MLX5_COMMAND_STR_CASE(DESTROY_UCTX
);
640 MLX5_COMMAND_STR_CASE(CREATE_UMEM
);
641 MLX5_COMMAND_STR_CASE(DESTROY_UMEM
);
642 MLX5_COMMAND_STR_CASE(RELEASE_XRQ_ERROR
);
643 MLX5_COMMAND_STR_CASE(MODIFY_XRQ
);
644 default: return "unknown command opcode";
648 static const char *cmd_status_str(u8 status
)
651 case MLX5_CMD_STAT_OK
:
653 case MLX5_CMD_STAT_INT_ERR
:
654 return "internal error";
655 case MLX5_CMD_STAT_BAD_OP_ERR
:
656 return "bad operation";
657 case MLX5_CMD_STAT_BAD_PARAM_ERR
:
658 return "bad parameter";
659 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR
:
660 return "bad system state";
661 case MLX5_CMD_STAT_BAD_RES_ERR
:
662 return "bad resource";
663 case MLX5_CMD_STAT_RES_BUSY
:
664 return "resource busy";
665 case MLX5_CMD_STAT_LIM_ERR
:
666 return "limits exceeded";
667 case MLX5_CMD_STAT_BAD_RES_STATE_ERR
:
668 return "bad resource state";
669 case MLX5_CMD_STAT_IX_ERR
:
671 case MLX5_CMD_STAT_NO_RES_ERR
:
672 return "no resources";
673 case MLX5_CMD_STAT_BAD_INP_LEN_ERR
:
674 return "bad input length";
675 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR
:
676 return "bad output length";
677 case MLX5_CMD_STAT_BAD_QP_STATE_ERR
:
678 return "bad QP state";
679 case MLX5_CMD_STAT_BAD_PKT_ERR
:
680 return "bad packet (discarded)";
681 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR
:
682 return "bad size too many outstanding CQEs";
684 return "unknown status";
688 static int cmd_status_to_err(u8 status
)
691 case MLX5_CMD_STAT_OK
: return 0;
692 case MLX5_CMD_STAT_INT_ERR
: return -EIO
;
693 case MLX5_CMD_STAT_BAD_OP_ERR
: return -EINVAL
;
694 case MLX5_CMD_STAT_BAD_PARAM_ERR
: return -EINVAL
;
695 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR
: return -EIO
;
696 case MLX5_CMD_STAT_BAD_RES_ERR
: return -EINVAL
;
697 case MLX5_CMD_STAT_RES_BUSY
: return -EBUSY
;
698 case MLX5_CMD_STAT_LIM_ERR
: return -ENOMEM
;
699 case MLX5_CMD_STAT_BAD_RES_STATE_ERR
: return -EINVAL
;
700 case MLX5_CMD_STAT_IX_ERR
: return -EINVAL
;
701 case MLX5_CMD_STAT_NO_RES_ERR
: return -EAGAIN
;
702 case MLX5_CMD_STAT_BAD_INP_LEN_ERR
: return -EIO
;
703 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR
: return -EIO
;
704 case MLX5_CMD_STAT_BAD_QP_STATE_ERR
: return -EINVAL
;
705 case MLX5_CMD_STAT_BAD_PKT_ERR
: return -EINVAL
;
706 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR
: return -EINVAL
;
707 default: return -EIO
;
711 struct mlx5_ifc_mbox_out_bits
{
713 u8 reserved_at_8
[0x18];
717 u8 reserved_at_40
[0x40];
720 struct mlx5_ifc_mbox_in_bits
{
724 u8 reserved_at_20
[0x10];
727 u8 reserved_at_40
[0x40];
730 void mlx5_cmd_mbox_status(void *out
, u8
*status
, u32
*syndrome
)
732 *status
= MLX5_GET(mbox_out
, out
, status
);
733 *syndrome
= MLX5_GET(mbox_out
, out
, syndrome
);
736 static int mlx5_cmd_check(struct mlx5_core_dev
*dev
, void *in
, void *out
)
744 mlx5_cmd_mbox_status(out
, &status
, &syndrome
);
748 opcode
= MLX5_GET(mbox_in
, in
, opcode
);
749 op_mod
= MLX5_GET(mbox_in
, in
, op_mod
);
750 uid
= MLX5_GET(mbox_in
, in
, uid
);
752 if (!uid
&& opcode
!= MLX5_CMD_OP_DESTROY_MKEY
)
753 mlx5_core_err_rl(dev
,
754 "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x)\n",
755 mlx5_command_str(opcode
), opcode
, op_mod
,
756 cmd_status_str(status
), status
, syndrome
);
759 "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x)\n",
760 mlx5_command_str(opcode
),
762 cmd_status_str(status
),
766 return cmd_status_to_err(status
);
769 static void dump_command(struct mlx5_core_dev
*dev
,
770 struct mlx5_cmd_work_ent
*ent
, int input
)
772 struct mlx5_cmd_msg
*msg
= input
? ent
->in
: ent
->out
;
773 u16 op
= MLX5_GET(mbox_in
, ent
->lay
->in
, opcode
);
774 struct mlx5_cmd_mailbox
*next
= msg
->next
;
775 int n
= mlx5_calc_cmd_blocks(msg
);
781 data_only
= !!(mlx5_core_debug_mask
& (1 << MLX5_CMD_DATA
));
784 mlx5_core_dbg_mask(dev
, 1 << MLX5_CMD_DATA
,
785 "dump command data %s(0x%x) %s\n",
786 mlx5_command_str(op
), op
,
787 input
? "INPUT" : "OUTPUT");
789 mlx5_core_dbg(dev
, "dump command %s(0x%x) %s\n",
790 mlx5_command_str(op
), op
,
791 input
? "INPUT" : "OUTPUT");
795 dump_buf(ent
->lay
->in
, sizeof(ent
->lay
->in
), 1, offset
);
796 offset
+= sizeof(ent
->lay
->in
);
798 dump_buf(ent
->lay
->out
, sizeof(ent
->lay
->out
), 1, offset
);
799 offset
+= sizeof(ent
->lay
->out
);
802 dump_buf(ent
->lay
, sizeof(*ent
->lay
), 0, offset
);
803 offset
+= sizeof(*ent
->lay
);
806 for (i
= 0; i
< n
&& next
; i
++) {
808 dump_len
= min_t(int, MLX5_CMD_DATA_BLOCK_SIZE
, msg
->len
- offset
);
809 dump_buf(next
->buf
, dump_len
, 1, offset
);
810 offset
+= MLX5_CMD_DATA_BLOCK_SIZE
;
812 mlx5_core_dbg(dev
, "command block:\n");
813 dump_buf(next
->buf
, sizeof(struct mlx5_cmd_prot_block
), 0, offset
);
814 offset
+= sizeof(struct mlx5_cmd_prot_block
);
823 static u16
msg_to_opcode(struct mlx5_cmd_msg
*in
)
825 return MLX5_GET(mbox_in
, in
->first
.data
, opcode
);
828 static void mlx5_cmd_comp_handler(struct mlx5_core_dev
*dev
, u64 vec
, bool forced
);
830 static void cb_timeout_handler(struct work_struct
*work
)
832 struct delayed_work
*dwork
= container_of(work
, struct delayed_work
,
834 struct mlx5_cmd_work_ent
*ent
= container_of(dwork
,
835 struct mlx5_cmd_work_ent
,
837 struct mlx5_core_dev
*dev
= container_of(ent
->cmd
, struct mlx5_core_dev
,
840 ent
->ret
= -ETIMEDOUT
;
841 mlx5_core_warn(dev
, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
842 mlx5_command_str(msg_to_opcode(ent
->in
)),
843 msg_to_opcode(ent
->in
));
844 mlx5_cmd_comp_handler(dev
, 1UL << ent
->idx
, true);
847 static void free_msg(struct mlx5_core_dev
*dev
, struct mlx5_cmd_msg
*msg
);
848 static void mlx5_free_cmd_msg(struct mlx5_core_dev
*dev
,
849 struct mlx5_cmd_msg
*msg
);
851 static void cmd_work_handler(struct work_struct
*work
)
853 struct mlx5_cmd_work_ent
*ent
= container_of(work
, struct mlx5_cmd_work_ent
, work
);
854 struct mlx5_cmd
*cmd
= ent
->cmd
;
855 struct mlx5_core_dev
*dev
= container_of(cmd
, struct mlx5_core_dev
, cmd
);
856 unsigned long cb_timeout
= msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC
);
857 struct mlx5_cmd_layout
*lay
;
858 struct semaphore
*sem
;
860 bool poll_cmd
= ent
->polling
;
864 sem
= ent
->page_queue
? &cmd
->pages_sem
: &cmd
->sem
;
866 if (!ent
->page_queue
) {
867 alloc_ret
= alloc_ent(cmd
);
869 mlx5_core_err_rl(dev
, "failed to allocate command entry\n");
871 ent
->callback(-EAGAIN
, ent
->context
);
872 mlx5_free_cmd_msg(dev
, ent
->out
);
873 free_msg(dev
, ent
->in
);
877 complete(&ent
->done
);
882 ent
->idx
= alloc_ret
;
884 ent
->idx
= cmd
->max_reg_cmds
;
885 spin_lock_irqsave(&cmd
->alloc_lock
, flags
);
886 clear_bit(ent
->idx
, &cmd
->bitmask
);
887 spin_unlock_irqrestore(&cmd
->alloc_lock
, flags
);
890 cmd
->ent_arr
[ent
->idx
] = ent
;
891 set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP
, &ent
->state
);
892 lay
= get_inst(cmd
, ent
->idx
);
894 memset(lay
, 0, sizeof(*lay
));
895 memcpy(lay
->in
, ent
->in
->first
.data
, sizeof(lay
->in
));
896 ent
->op
= be32_to_cpu(lay
->in
[0]) >> 16;
898 lay
->in_ptr
= cpu_to_be64(ent
->in
->next
->dma
);
899 lay
->inlen
= cpu_to_be32(ent
->in
->len
);
901 lay
->out_ptr
= cpu_to_be64(ent
->out
->next
->dma
);
902 lay
->outlen
= cpu_to_be32(ent
->out
->len
);
903 lay
->type
= MLX5_PCI_CMD_XPORT
;
904 lay
->token
= ent
->token
;
905 lay
->status_own
= CMD_OWNER_HW
;
906 set_signature(ent
, !cmd
->checksum_disabled
);
907 dump_command(dev
, ent
, 1);
908 ent
->ts1
= ktime_get_ns();
909 cmd_mode
= cmd
->mode
;
912 schedule_delayed_work(&ent
->cb_timeout_work
, cb_timeout
);
914 /* Skip sending command to fw if internal error */
915 if (pci_channel_offline(dev
->pdev
) ||
916 dev
->state
== MLX5_DEVICE_STATE_INTERNAL_ERROR
) {
920 ent
->ret
= mlx5_internal_err_ret_value(dev
, msg_to_opcode(ent
->in
), &drv_synd
, &status
);
921 MLX5_SET(mbox_out
, ent
->out
, status
, status
);
922 MLX5_SET(mbox_out
, ent
->out
, syndrome
, drv_synd
);
924 mlx5_cmd_comp_handler(dev
, 1UL << ent
->idx
, true);
928 /* ring doorbell after the descriptor is valid */
929 mlx5_core_dbg(dev
, "writing 0x%x to command doorbell\n", 1 << ent
->idx
);
931 iowrite32be(1 << ent
->idx
, &dev
->iseg
->cmd_dbell
);
932 /* if not in polling don't use ent after this point */
933 if (cmd_mode
== CMD_MODE_POLLING
|| poll_cmd
) {
935 /* make sure we read the descriptor after ownership is SW */
937 mlx5_cmd_comp_handler(dev
, 1UL << ent
->idx
, (ent
->ret
== -ETIMEDOUT
));
941 static const char *deliv_status_to_str(u8 status
)
944 case MLX5_CMD_DELIVERY_STAT_OK
:
946 case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR
:
947 return "signature error";
948 case MLX5_CMD_DELIVERY_STAT_TOK_ERR
:
949 return "token error";
950 case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR
:
951 return "bad block number";
952 case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR
:
953 return "output pointer not aligned to block size";
954 case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR
:
955 return "input pointer not aligned to block size";
956 case MLX5_CMD_DELIVERY_STAT_FW_ERR
:
957 return "firmware internal error";
958 case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR
:
959 return "command input length error";
960 case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR
:
961 return "command output length error";
962 case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR
:
963 return "reserved fields not cleared";
964 case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR
:
965 return "bad command descriptor type";
967 return "unknown status code";
971 static int wait_func(struct mlx5_core_dev
*dev
, struct mlx5_cmd_work_ent
*ent
)
973 unsigned long timeout
= msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC
);
974 struct mlx5_cmd
*cmd
= &dev
->cmd
;
977 if (cmd
->mode
== CMD_MODE_POLLING
|| ent
->polling
) {
978 wait_for_completion(&ent
->done
);
979 } else if (!wait_for_completion_timeout(&ent
->done
, timeout
)) {
980 ent
->ret
= -ETIMEDOUT
;
981 mlx5_cmd_comp_handler(dev
, 1UL << ent
->idx
, true);
986 if (err
== -ETIMEDOUT
) {
987 mlx5_core_warn(dev
, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
988 mlx5_command_str(msg_to_opcode(ent
->in
)),
989 msg_to_opcode(ent
->in
));
991 mlx5_core_dbg(dev
, "err %d, delivery status %s(%d)\n",
992 err
, deliv_status_to_str(ent
->status
), ent
->status
);
998 * 1. Callback functions may not sleep
999 * 2. page queue commands do not support asynchrous completion
1001 static int mlx5_cmd_invoke(struct mlx5_core_dev
*dev
, struct mlx5_cmd_msg
*in
,
1002 struct mlx5_cmd_msg
*out
, void *uout
, int uout_size
,
1003 mlx5_cmd_cbk_t callback
,
1004 void *context
, int page_queue
, u8
*status
,
1005 u8 token
, bool force_polling
)
1007 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1008 struct mlx5_cmd_work_ent
*ent
;
1009 struct mlx5_cmd_stats
*stats
;
1014 if (callback
&& page_queue
)
1017 ent
= alloc_cmd(cmd
, in
, out
, uout
, uout_size
, callback
, context
,
1020 return PTR_ERR(ent
);
1023 ent
->polling
= force_polling
;
1026 init_completion(&ent
->done
);
1028 INIT_DELAYED_WORK(&ent
->cb_timeout_work
, cb_timeout_handler
);
1029 INIT_WORK(&ent
->work
, cmd_work_handler
);
1031 cmd_work_handler(&ent
->work
);
1032 } else if (!queue_work(cmd
->wq
, &ent
->work
)) {
1033 mlx5_core_warn(dev
, "failed to queue work\n");
1041 err
= wait_func(dev
, ent
);
1042 if (err
== -ETIMEDOUT
)
1045 ds
= ent
->ts2
- ent
->ts1
;
1046 op
= MLX5_GET(mbox_in
, in
->first
.data
, opcode
);
1047 if (op
< ARRAY_SIZE(cmd
->stats
)) {
1048 stats
= &cmd
->stats
[op
];
1049 spin_lock_irq(&stats
->lock
);
1052 spin_unlock_irq(&stats
->lock
);
1054 mlx5_core_dbg_mask(dev
, 1 << MLX5_CMD_TIME
,
1055 "fw exec time for %s is %lld nsec\n",
1056 mlx5_command_str(op
), ds
);
1057 *status
= ent
->status
;
1065 static ssize_t
dbg_write(struct file
*filp
, const char __user
*buf
,
1066 size_t count
, loff_t
*pos
)
1068 struct mlx5_core_dev
*dev
= filp
->private_data
;
1069 struct mlx5_cmd_debug
*dbg
= &dev
->cmd
.dbg
;
1073 if (!dbg
->in_msg
|| !dbg
->out_msg
)
1076 if (count
< sizeof(lbuf
) - 1)
1079 if (copy_from_user(lbuf
, buf
, sizeof(lbuf
) - 1))
1082 lbuf
[sizeof(lbuf
) - 1] = 0;
1084 if (strcmp(lbuf
, "go"))
1087 err
= mlx5_cmd_exec(dev
, dbg
->in_msg
, dbg
->inlen
, dbg
->out_msg
, dbg
->outlen
);
1089 return err
? err
: count
;
1092 static const struct file_operations fops
= {
1093 .owner
= THIS_MODULE
,
1094 .open
= simple_open
,
1098 static int mlx5_copy_to_msg(struct mlx5_cmd_msg
*to
, void *from
, int size
,
1101 struct mlx5_cmd_prot_block
*block
;
1102 struct mlx5_cmd_mailbox
*next
;
1108 copy
= min_t(int, size
, sizeof(to
->first
.data
));
1109 memcpy(to
->first
.data
, from
, copy
);
1120 copy
= min_t(int, size
, MLX5_CMD_DATA_BLOCK_SIZE
);
1122 memcpy(block
->data
, from
, copy
);
1125 block
->token
= token
;
1132 static int mlx5_copy_from_msg(void *to
, struct mlx5_cmd_msg
*from
, int size
)
1134 struct mlx5_cmd_prot_block
*block
;
1135 struct mlx5_cmd_mailbox
*next
;
1141 copy
= min_t(int, size
, sizeof(from
->first
.data
));
1142 memcpy(to
, from
->first
.data
, copy
);
1153 copy
= min_t(int, size
, MLX5_CMD_DATA_BLOCK_SIZE
);
1156 memcpy(to
, block
->data
, copy
);
1165 static struct mlx5_cmd_mailbox
*alloc_cmd_box(struct mlx5_core_dev
*dev
,
1168 struct mlx5_cmd_mailbox
*mailbox
;
1170 mailbox
= kmalloc(sizeof(*mailbox
), flags
);
1172 return ERR_PTR(-ENOMEM
);
1174 mailbox
->buf
= dma_pool_zalloc(dev
->cmd
.pool
, flags
,
1176 if (!mailbox
->buf
) {
1177 mlx5_core_dbg(dev
, "failed allocation\n");
1179 return ERR_PTR(-ENOMEM
);
1181 mailbox
->next
= NULL
;
1186 static void free_cmd_box(struct mlx5_core_dev
*dev
,
1187 struct mlx5_cmd_mailbox
*mailbox
)
1189 dma_pool_free(dev
->cmd
.pool
, mailbox
->buf
, mailbox
->dma
);
1193 static struct mlx5_cmd_msg
*mlx5_alloc_cmd_msg(struct mlx5_core_dev
*dev
,
1194 gfp_t flags
, int size
,
1197 struct mlx5_cmd_mailbox
*tmp
, *head
= NULL
;
1198 struct mlx5_cmd_prot_block
*block
;
1199 struct mlx5_cmd_msg
*msg
;
1204 msg
= kzalloc(sizeof(*msg
), flags
);
1206 return ERR_PTR(-ENOMEM
);
1209 n
= mlx5_calc_cmd_blocks(msg
);
1211 for (i
= 0; i
< n
; i
++) {
1212 tmp
= alloc_cmd_box(dev
, flags
);
1214 mlx5_core_warn(dev
, "failed allocating block\n");
1221 block
->next
= cpu_to_be64(tmp
->next
? tmp
->next
->dma
: 0);
1222 block
->block_num
= cpu_to_be32(n
- i
- 1);
1223 block
->token
= token
;
1232 free_cmd_box(dev
, head
);
1237 return ERR_PTR(err
);
1240 static void mlx5_free_cmd_msg(struct mlx5_core_dev
*dev
,
1241 struct mlx5_cmd_msg
*msg
)
1243 struct mlx5_cmd_mailbox
*head
= msg
->next
;
1244 struct mlx5_cmd_mailbox
*next
;
1248 free_cmd_box(dev
, head
);
1254 static ssize_t
data_write(struct file
*filp
, const char __user
*buf
,
1255 size_t count
, loff_t
*pos
)
1257 struct mlx5_core_dev
*dev
= filp
->private_data
;
1258 struct mlx5_cmd_debug
*dbg
= &dev
->cmd
.dbg
;
1267 ptr
= memdup_user(buf
, count
);
1269 return PTR_ERR(ptr
);
1278 static ssize_t
data_read(struct file
*filp
, char __user
*buf
, size_t count
,
1281 struct mlx5_core_dev
*dev
= filp
->private_data
;
1282 struct mlx5_cmd_debug
*dbg
= &dev
->cmd
.dbg
;
1287 return simple_read_from_buffer(buf
, count
, pos
, dbg
->out_msg
,
1291 static const struct file_operations dfops
= {
1292 .owner
= THIS_MODULE
,
1293 .open
= simple_open
,
1294 .write
= data_write
,
1298 static ssize_t
outlen_read(struct file
*filp
, char __user
*buf
, size_t count
,
1301 struct mlx5_core_dev
*dev
= filp
->private_data
;
1302 struct mlx5_cmd_debug
*dbg
= &dev
->cmd
.dbg
;
1306 err
= snprintf(outlen
, sizeof(outlen
), "%d", dbg
->outlen
);
1310 return simple_read_from_buffer(buf
, count
, pos
, outlen
, err
);
1313 static ssize_t
outlen_write(struct file
*filp
, const char __user
*buf
,
1314 size_t count
, loff_t
*pos
)
1316 struct mlx5_core_dev
*dev
= filp
->private_data
;
1317 struct mlx5_cmd_debug
*dbg
= &dev
->cmd
.dbg
;
1318 char outlen_str
[8] = {0};
1323 if (*pos
!= 0 || count
> 6)
1326 kfree(dbg
->out_msg
);
1327 dbg
->out_msg
= NULL
;
1330 if (copy_from_user(outlen_str
, buf
, count
))
1333 err
= sscanf(outlen_str
, "%d", &outlen
);
1337 ptr
= kzalloc(outlen
, GFP_KERNEL
);
1342 dbg
->outlen
= outlen
;
1349 static const struct file_operations olfops
= {
1350 .owner
= THIS_MODULE
,
1351 .open
= simple_open
,
1352 .write
= outlen_write
,
1353 .read
= outlen_read
,
1356 static void set_wqname(struct mlx5_core_dev
*dev
)
1358 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1360 snprintf(cmd
->wq_name
, sizeof(cmd
->wq_name
), "mlx5_cmd_%s",
1361 dev_name(dev
->device
));
1364 static void clean_debug_files(struct mlx5_core_dev
*dev
)
1366 struct mlx5_cmd_debug
*dbg
= &dev
->cmd
.dbg
;
1368 if (!mlx5_debugfs_root
)
1371 mlx5_cmdif_debugfs_cleanup(dev
);
1372 debugfs_remove_recursive(dbg
->dbg_root
);
1375 static void create_debugfs_files(struct mlx5_core_dev
*dev
)
1377 struct mlx5_cmd_debug
*dbg
= &dev
->cmd
.dbg
;
1379 dbg
->dbg_root
= debugfs_create_dir("cmd", dev
->priv
.dbg_root
);
1381 debugfs_create_file("in", 0400, dbg
->dbg_root
, dev
, &dfops
);
1382 debugfs_create_file("out", 0200, dbg
->dbg_root
, dev
, &dfops
);
1383 debugfs_create_file("out_len", 0600, dbg
->dbg_root
, dev
, &olfops
);
1384 debugfs_create_u8("status", 0600, dbg
->dbg_root
, &dbg
->status
);
1385 debugfs_create_file("run", 0200, dbg
->dbg_root
, dev
, &fops
);
1387 mlx5_cmdif_debugfs_init(dev
);
1390 static void mlx5_cmd_change_mod(struct mlx5_core_dev
*dev
, int mode
)
1392 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1395 for (i
= 0; i
< cmd
->max_reg_cmds
; i
++)
1397 down(&cmd
->pages_sem
);
1401 up(&cmd
->pages_sem
);
1402 for (i
= 0; i
< cmd
->max_reg_cmds
; i
++)
1406 static int cmd_comp_notifier(struct notifier_block
*nb
,
1407 unsigned long type
, void *data
)
1409 struct mlx5_core_dev
*dev
;
1410 struct mlx5_cmd
*cmd
;
1411 struct mlx5_eqe
*eqe
;
1413 cmd
= mlx5_nb_cof(nb
, struct mlx5_cmd
, nb
);
1414 dev
= container_of(cmd
, struct mlx5_core_dev
, cmd
);
1417 mlx5_cmd_comp_handler(dev
, be32_to_cpu(eqe
->data
.cmd
.vector
), false);
1421 void mlx5_cmd_use_events(struct mlx5_core_dev
*dev
)
1423 MLX5_NB_INIT(&dev
->cmd
.nb
, cmd_comp_notifier
, CMD
);
1424 mlx5_eq_notifier_register(dev
, &dev
->cmd
.nb
);
1425 mlx5_cmd_change_mod(dev
, CMD_MODE_EVENTS
);
1428 void mlx5_cmd_use_polling(struct mlx5_core_dev
*dev
)
1430 mlx5_cmd_change_mod(dev
, CMD_MODE_POLLING
);
1431 mlx5_eq_notifier_unregister(dev
, &dev
->cmd
.nb
);
1434 static void free_msg(struct mlx5_core_dev
*dev
, struct mlx5_cmd_msg
*msg
)
1436 unsigned long flags
;
1439 spin_lock_irqsave(&msg
->parent
->lock
, flags
);
1440 list_add_tail(&msg
->list
, &msg
->parent
->head
);
1441 spin_unlock_irqrestore(&msg
->parent
->lock
, flags
);
1443 mlx5_free_cmd_msg(dev
, msg
);
1447 static void mlx5_cmd_comp_handler(struct mlx5_core_dev
*dev
, u64 vec
, bool forced
)
1449 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1450 struct mlx5_cmd_work_ent
*ent
;
1451 mlx5_cmd_cbk_t callback
;
1456 struct mlx5_cmd_stats
*stats
;
1457 unsigned long flags
;
1458 unsigned long vector
;
1460 /* there can be at most 32 command queues */
1461 vector
= vec
& 0xffffffff;
1462 for (i
= 0; i
< (1 << cmd
->log_sz
); i
++) {
1463 if (test_bit(i
, &vector
)) {
1464 struct semaphore
*sem
;
1466 ent
= cmd
->ent_arr
[i
];
1468 /* if we already completed the command, ignore it */
1469 if (!test_and_clear_bit(MLX5_CMD_ENT_STATE_PENDING_COMP
,
1471 /* only real completion can free the cmd slot */
1473 mlx5_core_err(dev
, "Command completion arrived after timeout (entry idx = %d).\n",
1475 free_ent(cmd
, ent
->idx
);
1482 cancel_delayed_work(&ent
->cb_timeout_work
);
1483 if (ent
->page_queue
)
1484 sem
= &cmd
->pages_sem
;
1487 ent
->ts2
= ktime_get_ns();
1488 memcpy(ent
->out
->first
.data
, ent
->lay
->out
, sizeof(ent
->lay
->out
));
1489 dump_command(dev
, ent
, 0);
1491 if (!cmd
->checksum_disabled
)
1492 ent
->ret
= verify_signature(ent
);
1495 if (vec
& MLX5_TRIGGERED_CMD_COMP
)
1496 ent
->status
= MLX5_DRIVER_STATUS_ABORTED
;
1498 ent
->status
= ent
->lay
->status_own
>> 1;
1500 mlx5_core_dbg(dev
, "command completed. ret 0x%x, delivery status %s(0x%x)\n",
1501 ent
->ret
, deliv_status_to_str(ent
->status
), ent
->status
);
1504 /* only real completion will free the entry slot */
1506 free_ent(cmd
, ent
->idx
);
1508 if (ent
->callback
) {
1509 ds
= ent
->ts2
- ent
->ts1
;
1510 if (ent
->op
< ARRAY_SIZE(cmd
->stats
)) {
1511 stats
= &cmd
->stats
[ent
->op
];
1512 spin_lock_irqsave(&stats
->lock
, flags
);
1515 spin_unlock_irqrestore(&stats
->lock
, flags
);
1518 callback
= ent
->callback
;
1519 context
= ent
->context
;
1522 err
= mlx5_copy_from_msg(ent
->uout
,
1526 err
= err
? err
: mlx5_cmd_check(dev
,
1527 ent
->in
->first
.data
,
1531 mlx5_free_cmd_msg(dev
, ent
->out
);
1532 free_msg(dev
, ent
->in
);
1534 err
= err
? err
: ent
->status
;
1537 callback(err
, context
);
1539 complete(&ent
->done
);
1546 void mlx5_cmd_trigger_completions(struct mlx5_core_dev
*dev
)
1548 unsigned long flags
;
1551 /* wait for pending handlers to complete */
1552 mlx5_eq_synchronize_cmd_irq(dev
);
1553 spin_lock_irqsave(&dev
->cmd
.alloc_lock
, flags
);
1554 vector
= ~dev
->cmd
.bitmask
& ((1ul << (1 << dev
->cmd
.log_sz
)) - 1);
1558 vector
|= MLX5_TRIGGERED_CMD_COMP
;
1559 spin_unlock_irqrestore(&dev
->cmd
.alloc_lock
, flags
);
1561 mlx5_core_dbg(dev
, "vector 0x%llx\n", vector
);
1562 mlx5_cmd_comp_handler(dev
, vector
, true);
1566 spin_unlock_irqrestore(&dev
->cmd
.alloc_lock
, flags
);
1569 void mlx5_cmd_flush(struct mlx5_core_dev
*dev
)
1571 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1574 for (i
= 0; i
< cmd
->max_reg_cmds
; i
++)
1575 while (down_trylock(&cmd
->sem
))
1576 mlx5_cmd_trigger_completions(dev
);
1578 while (down_trylock(&cmd
->pages_sem
))
1579 mlx5_cmd_trigger_completions(dev
);
1582 up(&cmd
->pages_sem
);
1583 for (i
= 0; i
< cmd
->max_reg_cmds
; i
++)
1587 static int status_to_err(u8 status
)
1590 case MLX5_CMD_DELIVERY_STAT_OK
:
1591 case MLX5_DRIVER_STATUS_ABORTED
:
1593 case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR
:
1594 case MLX5_CMD_DELIVERY_STAT_TOK_ERR
:
1596 case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR
:
1597 case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR
:
1598 case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR
:
1599 return -EFAULT
; /* Bad address */
1600 case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR
:
1601 case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR
:
1602 case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR
:
1603 case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR
:
1605 case MLX5_CMD_DELIVERY_STAT_FW_ERR
:
1612 static struct mlx5_cmd_msg
*alloc_msg(struct mlx5_core_dev
*dev
, int in_size
,
1615 struct mlx5_cmd_msg
*msg
= ERR_PTR(-ENOMEM
);
1616 struct cmd_msg_cache
*ch
= NULL
;
1617 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1623 for (i
= 0; i
< MLX5_NUM_COMMAND_CACHES
; i
++) {
1624 ch
= &cmd
->cache
[i
];
1625 if (in_size
> ch
->max_inbox_size
)
1627 spin_lock_irq(&ch
->lock
);
1628 if (list_empty(&ch
->head
)) {
1629 spin_unlock_irq(&ch
->lock
);
1632 msg
= list_entry(ch
->head
.next
, typeof(*msg
), list
);
1633 /* For cached lists, we must explicitly state what is
1637 list_del(&msg
->list
);
1638 spin_unlock_irq(&ch
->lock
);
1646 msg
= mlx5_alloc_cmd_msg(dev
, gfp
, in_size
, 0);
1650 static int is_manage_pages(void *in
)
1652 return MLX5_GET(mbox_in
, in
, opcode
) == MLX5_CMD_OP_MANAGE_PAGES
;
1655 static int cmd_exec(struct mlx5_core_dev
*dev
, void *in
, int in_size
, void *out
,
1656 int out_size
, mlx5_cmd_cbk_t callback
, void *context
,
1659 struct mlx5_cmd_msg
*inb
;
1660 struct mlx5_cmd_msg
*outb
;
1668 if (pci_channel_offline(dev
->pdev
) ||
1669 dev
->state
== MLX5_DEVICE_STATE_INTERNAL_ERROR
) {
1670 u16 opcode
= MLX5_GET(mbox_in
, in
, opcode
);
1672 err
= mlx5_internal_err_ret_value(dev
, opcode
, &drv_synd
, &status
);
1673 MLX5_SET(mbox_out
, out
, status
, status
);
1674 MLX5_SET(mbox_out
, out
, syndrome
, drv_synd
);
1678 pages_queue
= is_manage_pages(in
);
1679 gfp
= callback
? GFP_ATOMIC
: GFP_KERNEL
;
1681 inb
= alloc_msg(dev
, in_size
, gfp
);
1687 token
= alloc_token(&dev
->cmd
);
1689 err
= mlx5_copy_to_msg(inb
, in
, in_size
, token
);
1691 mlx5_core_warn(dev
, "err %d\n", err
);
1695 outb
= mlx5_alloc_cmd_msg(dev
, gfp
, out_size
, token
);
1697 err
= PTR_ERR(outb
);
1701 err
= mlx5_cmd_invoke(dev
, inb
, outb
, out
, out_size
, callback
, context
,
1702 pages_queue
, &status
, token
, force_polling
);
1706 mlx5_core_dbg(dev
, "err %d, status %d\n", err
, status
);
1708 err
= status_to_err(status
);
1713 err
= mlx5_copy_from_msg(out
, outb
, out_size
);
1717 mlx5_free_cmd_msg(dev
, outb
);
1725 int mlx5_cmd_exec(struct mlx5_core_dev
*dev
, void *in
, int in_size
, void *out
,
1730 err
= cmd_exec(dev
, in
, in_size
, out
, out_size
, NULL
, NULL
, false);
1731 return err
? : mlx5_cmd_check(dev
, in
, out
);
1733 EXPORT_SYMBOL(mlx5_cmd_exec
);
1735 void mlx5_cmd_init_async_ctx(struct mlx5_core_dev
*dev
,
1736 struct mlx5_async_ctx
*ctx
)
1739 /* Starts at 1 to avoid doing wake_up if we are not cleaning up */
1740 atomic_set(&ctx
->num_inflight
, 1);
1741 init_waitqueue_head(&ctx
->wait
);
1743 EXPORT_SYMBOL(mlx5_cmd_init_async_ctx
);
1746 * mlx5_cmd_cleanup_async_ctx - Clean up an async_ctx
1747 * @ctx: The ctx to clean
1749 * Upon return all callbacks given to mlx5_cmd_exec_cb() have been called. The
1750 * caller must ensure that mlx5_cmd_exec_cb() is not called during or after
1751 * the call mlx5_cleanup_async_ctx().
1753 void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx
*ctx
)
1755 atomic_dec(&ctx
->num_inflight
);
1756 wait_event(ctx
->wait
, atomic_read(&ctx
->num_inflight
) == 0);
1758 EXPORT_SYMBOL(mlx5_cmd_cleanup_async_ctx
);
1760 static void mlx5_cmd_exec_cb_handler(int status
, void *_work
)
1762 struct mlx5_async_work
*work
= _work
;
1763 struct mlx5_async_ctx
*ctx
= work
->ctx
;
1765 work
->user_callback(status
, work
);
1766 if (atomic_dec_and_test(&ctx
->num_inflight
))
1767 wake_up(&ctx
->wait
);
1770 int mlx5_cmd_exec_cb(struct mlx5_async_ctx
*ctx
, void *in
, int in_size
,
1771 void *out
, int out_size
, mlx5_async_cbk_t callback
,
1772 struct mlx5_async_work
*work
)
1777 work
->user_callback
= callback
;
1778 if (WARN_ON(!atomic_inc_not_zero(&ctx
->num_inflight
)))
1780 ret
= cmd_exec(ctx
->dev
, in
, in_size
, out
, out_size
,
1781 mlx5_cmd_exec_cb_handler
, work
, false);
1782 if (ret
&& atomic_dec_and_test(&ctx
->num_inflight
))
1783 wake_up(&ctx
->wait
);
1787 EXPORT_SYMBOL(mlx5_cmd_exec_cb
);
1789 int mlx5_cmd_exec_polling(struct mlx5_core_dev
*dev
, void *in
, int in_size
,
1790 void *out
, int out_size
)
1794 err
= cmd_exec(dev
, in
, in_size
, out
, out_size
, NULL
, NULL
, true);
1796 return err
? : mlx5_cmd_check(dev
, in
, out
);
1798 EXPORT_SYMBOL(mlx5_cmd_exec_polling
);
1800 static void destroy_msg_cache(struct mlx5_core_dev
*dev
)
1802 struct cmd_msg_cache
*ch
;
1803 struct mlx5_cmd_msg
*msg
;
1804 struct mlx5_cmd_msg
*n
;
1807 for (i
= 0; i
< MLX5_NUM_COMMAND_CACHES
; i
++) {
1808 ch
= &dev
->cmd
.cache
[i
];
1809 list_for_each_entry_safe(msg
, n
, &ch
->head
, list
) {
1810 list_del(&msg
->list
);
1811 mlx5_free_cmd_msg(dev
, msg
);
1816 static unsigned cmd_cache_num_ent
[MLX5_NUM_COMMAND_CACHES
] = {
1820 static unsigned cmd_cache_ent_size
[MLX5_NUM_COMMAND_CACHES
] = {
1821 16 + MLX5_CMD_DATA_BLOCK_SIZE
,
1822 16 + MLX5_CMD_DATA_BLOCK_SIZE
* 2,
1823 16 + MLX5_CMD_DATA_BLOCK_SIZE
* 16,
1824 16 + MLX5_CMD_DATA_BLOCK_SIZE
* 256,
1825 16 + MLX5_CMD_DATA_BLOCK_SIZE
* 512,
1828 static void create_msg_cache(struct mlx5_core_dev
*dev
)
1830 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1831 struct cmd_msg_cache
*ch
;
1832 struct mlx5_cmd_msg
*msg
;
1836 /* Initialize and fill the caches with initial entries */
1837 for (k
= 0; k
< MLX5_NUM_COMMAND_CACHES
; k
++) {
1838 ch
= &cmd
->cache
[k
];
1839 spin_lock_init(&ch
->lock
);
1840 INIT_LIST_HEAD(&ch
->head
);
1841 ch
->num_ent
= cmd_cache_num_ent
[k
];
1842 ch
->max_inbox_size
= cmd_cache_ent_size
[k
];
1843 for (i
= 0; i
< ch
->num_ent
; i
++) {
1844 msg
= mlx5_alloc_cmd_msg(dev
, GFP_KERNEL
| __GFP_NOWARN
,
1845 ch
->max_inbox_size
, 0);
1849 list_add_tail(&msg
->list
, &ch
->head
);
1854 static int alloc_cmd_page(struct mlx5_core_dev
*dev
, struct mlx5_cmd
*cmd
)
1856 struct device
*ddev
= dev
->device
;
1858 cmd
->cmd_alloc_buf
= dma_alloc_coherent(ddev
, MLX5_ADAPTER_PAGE_SIZE
,
1859 &cmd
->alloc_dma
, GFP_KERNEL
);
1860 if (!cmd
->cmd_alloc_buf
)
1863 /* make sure it is aligned to 4K */
1864 if (!((uintptr_t)cmd
->cmd_alloc_buf
& (MLX5_ADAPTER_PAGE_SIZE
- 1))) {
1865 cmd
->cmd_buf
= cmd
->cmd_alloc_buf
;
1866 cmd
->dma
= cmd
->alloc_dma
;
1867 cmd
->alloc_size
= MLX5_ADAPTER_PAGE_SIZE
;
1871 dma_free_coherent(ddev
, MLX5_ADAPTER_PAGE_SIZE
, cmd
->cmd_alloc_buf
,
1873 cmd
->cmd_alloc_buf
= dma_alloc_coherent(ddev
,
1874 2 * MLX5_ADAPTER_PAGE_SIZE
- 1,
1875 &cmd
->alloc_dma
, GFP_KERNEL
);
1876 if (!cmd
->cmd_alloc_buf
)
1879 cmd
->cmd_buf
= PTR_ALIGN(cmd
->cmd_alloc_buf
, MLX5_ADAPTER_PAGE_SIZE
);
1880 cmd
->dma
= ALIGN(cmd
->alloc_dma
, MLX5_ADAPTER_PAGE_SIZE
);
1881 cmd
->alloc_size
= 2 * MLX5_ADAPTER_PAGE_SIZE
- 1;
1885 static void free_cmd_page(struct mlx5_core_dev
*dev
, struct mlx5_cmd
*cmd
)
1887 struct device
*ddev
= dev
->device
;
1889 dma_free_coherent(ddev
, cmd
->alloc_size
, cmd
->cmd_alloc_buf
,
1893 int mlx5_cmd_init(struct mlx5_core_dev
*dev
)
1895 int size
= sizeof(struct mlx5_cmd_prot_block
);
1896 int align
= roundup_pow_of_two(size
);
1897 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1903 memset(cmd
, 0, sizeof(*cmd
));
1904 cmd_if_rev
= cmdif_rev(dev
);
1905 if (cmd_if_rev
!= CMD_IF_REV
) {
1907 "Driver cmdif rev(%d) differs from firmware's(%d)\n",
1908 CMD_IF_REV
, cmd_if_rev
);
1912 cmd
->pool
= dma_pool_create("mlx5_cmd", dev
->device
, size
, align
, 0);
1916 err
= alloc_cmd_page(dev
, cmd
);
1920 cmd_l
= ioread32be(&dev
->iseg
->cmdq_addr_l_sz
) & 0xff;
1921 cmd
->log_sz
= cmd_l
>> 4 & 0xf;
1922 cmd
->log_stride
= cmd_l
& 0xf;
1923 if (1 << cmd
->log_sz
> MLX5_MAX_COMMANDS
) {
1924 mlx5_core_err(dev
, "firmware reports too many outstanding commands %d\n",
1930 if (cmd
->log_sz
+ cmd
->log_stride
> MLX5_ADAPTER_PAGE_SHIFT
) {
1931 mlx5_core_err(dev
, "command queue size overflow\n");
1936 cmd
->checksum_disabled
= 1;
1937 cmd
->max_reg_cmds
= (1 << cmd
->log_sz
) - 1;
1938 cmd
->bitmask
= (1UL << cmd
->max_reg_cmds
) - 1;
1940 cmd
->cmdif_rev
= ioread32be(&dev
->iseg
->cmdif_rev_fw_sub
) >> 16;
1941 if (cmd
->cmdif_rev
> CMD_IF_REV
) {
1942 mlx5_core_err(dev
, "driver does not support command interface version. driver %d, firmware %d\n",
1943 CMD_IF_REV
, cmd
->cmdif_rev
);
1948 spin_lock_init(&cmd
->alloc_lock
);
1949 spin_lock_init(&cmd
->token_lock
);
1950 for (i
= 0; i
< ARRAY_SIZE(cmd
->stats
); i
++)
1951 spin_lock_init(&cmd
->stats
[i
].lock
);
1953 sema_init(&cmd
->sem
, cmd
->max_reg_cmds
);
1954 sema_init(&cmd
->pages_sem
, 1);
1956 cmd_h
= (u32
)((u64
)(cmd
->dma
) >> 32);
1957 cmd_l
= (u32
)(cmd
->dma
);
1958 if (cmd_l
& 0xfff) {
1959 mlx5_core_err(dev
, "invalid command queue address\n");
1964 iowrite32be(cmd_h
, &dev
->iseg
->cmdq_addr_h
);
1965 iowrite32be(cmd_l
, &dev
->iseg
->cmdq_addr_l_sz
);
1967 /* Make sure firmware sees the complete address before we proceed */
1970 mlx5_core_dbg(dev
, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd
->dma
));
1972 cmd
->mode
= CMD_MODE_POLLING
;
1974 create_msg_cache(dev
);
1977 cmd
->wq
= create_singlethread_workqueue(cmd
->wq_name
);
1979 mlx5_core_err(dev
, "failed to create command workqueue\n");
1984 create_debugfs_files(dev
);
1989 destroy_msg_cache(dev
);
1992 free_cmd_page(dev
, cmd
);
1995 dma_pool_destroy(cmd
->pool
);
1999 EXPORT_SYMBOL(mlx5_cmd_init
);
2001 void mlx5_cmd_cleanup(struct mlx5_core_dev
*dev
)
2003 struct mlx5_cmd
*cmd
= &dev
->cmd
;
2005 clean_debug_files(dev
);
2006 destroy_workqueue(cmd
->wq
);
2007 destroy_msg_cache(dev
);
2008 free_cmd_page(dev
, cmd
);
2009 dma_pool_destroy(cmd
->pool
);
2011 EXPORT_SYMBOL(mlx5_cmd_cleanup
);