2 * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
5 * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/pci.h>
38 #include "t4vf_common.h"
39 #include "t4vf_defs.h"
41 #include "../cxgb4/t4_regs.h"
42 #include "../cxgb4/t4_values.h"
43 #include "../cxgb4/t4fw_api.h"
46 * Wait for the device to become ready (signified by our "who am I" register
47 * returning a value other than all 1's). Return an error if it doesn't
50 int t4vf_wait_dev_ready(struct adapter
*adapter
)
52 const u32 whoami
= T4VF_PL_BASE_ADDR
+ PL_VF_WHOAMI
;
53 const u32 notready1
= 0xffffffff;
54 const u32 notready2
= 0xeeeeeeee;
57 val
= t4_read_reg(adapter
, whoami
);
58 if (val
!= notready1
&& val
!= notready2
)
61 val
= t4_read_reg(adapter
, whoami
);
62 if (val
!= notready1
&& val
!= notready2
)
69 * Get the reply to a mailbox command and store it in @rpl in big-endian order
70 * (since the firmware data structures are specified in a big-endian layout).
72 static void get_mbox_rpl(struct adapter
*adapter
, __be64
*rpl
, int size
,
75 for ( ; size
; size
-= 8, mbox_data
+= 8)
76 *rpl
++ = cpu_to_be64(t4_read_reg64(adapter
, mbox_data
));
80 * t4vf_record_mbox - record a Firmware Mailbox Command/Reply in the log
81 * @adapter: the adapter
82 * @cmd: the Firmware Mailbox Command or Reply
83 * @size: command length in bytes
84 * @access: the time (ms) needed to access the Firmware Mailbox
85 * @execute: the time (ms) the command spent being executed
87 static void t4vf_record_mbox(struct adapter
*adapter
, const __be64
*cmd
,
88 int size
, int access
, int execute
)
90 struct mbox_cmd_log
*log
= adapter
->mbox_log
;
91 struct mbox_cmd
*entry
;
94 entry
= mbox_cmd_log_entry(log
, log
->cursor
++);
95 if (log
->cursor
== log
->size
)
98 for (i
= 0; i
< size
/ 8; i
++)
99 entry
->cmd
[i
] = be64_to_cpu(cmd
[i
]);
100 while (i
< MBOX_LEN
/ 8)
102 entry
->timestamp
= jiffies
;
103 entry
->seqno
= log
->seqno
++;
104 entry
->access
= access
;
105 entry
->execute
= execute
;
109 * t4vf_wr_mbox_core - send a command to FW through the mailbox
110 * @adapter: the adapter
111 * @cmd: the command to write
112 * @size: command length in bytes
113 * @rpl: where to optionally store the reply
114 * @sleep_ok: if true we may sleep while awaiting command completion
116 * Sends the given command to FW through the mailbox and waits for the
117 * FW to execute the command. If @rpl is not %NULL it is used to store
118 * the FW's reply to the command. The command and its optional reply
119 * are of the same length. FW can take up to 500 ms to respond.
120 * @sleep_ok determines whether we may sleep while awaiting the response.
121 * If sleeping is allowed we use progressive backoff otherwise we spin.
123 * The return value is 0 on success or a negative errno on failure. A
124 * failure can happen either because we are not able to execute the
125 * command or FW executes it but signals an error. In the latter case
126 * the return value is the error code indicated by FW (negated).
128 int t4vf_wr_mbox_core(struct adapter
*adapter
, const void *cmd
, int size
,
129 void *rpl
, bool sleep_ok
)
131 static const int delay
[] = {
132 1, 1, 3, 5, 10, 10, 20, 50, 100
135 u16 access
= 0, execute
= 0;
137 int i
, ms
, delay_idx
, ret
;
139 u32 mbox_ctl
= T4VF_CIM_BASE_ADDR
+ CIM_VF_EXT_MAILBOX_CTRL
;
140 u32 cmd_op
= FW_CMD_OP_G(be32_to_cpu(((struct fw_cmd_hdr
*)cmd
)->hi
));
141 __be64 cmd_rpl
[MBOX_LEN
/ 8];
142 struct mbox_list entry
;
144 /* In T6, mailbox size is changed to 128 bytes to avoid
145 * invalidating the entire prefetch buffer.
147 if (CHELSIO_CHIP_VERSION(adapter
->params
.chip
) <= CHELSIO_T5
)
148 mbox_data
= T4VF_MBDATA_BASE_ADDR
;
150 mbox_data
= T6VF_MBDATA_BASE_ADDR
;
153 * Commands must be multiples of 16 bytes in length and may not be
154 * larger than the size of the Mailbox Data register array.
156 if ((size
% 16) != 0 ||
157 size
> NUM_CIM_VF_MAILBOX_DATA_INSTANCES
* 4)
160 /* Queue ourselves onto the mailbox access list. When our entry is at
161 * the front of the list, we have rights to access the mailbox. So we
162 * wait [for a while] till we're at the front [or bail out with an
165 spin_lock(&adapter
->mbox_lock
);
166 list_add_tail(&entry
.list
, &adapter
->mlist
.list
);
167 spin_unlock(&adapter
->mbox_lock
);
172 for (i
= 0; ; i
+= ms
) {
173 /* If we've waited too long, return a busy indication. This
174 * really ought to be based on our initial position in the
175 * mailbox access list but this is a start. We very rearely
176 * contend on access to the mailbox ...
178 if (i
> FW_CMD_MAX_TIMEOUT
) {
179 spin_lock(&adapter
->mbox_lock
);
180 list_del(&entry
.list
);
181 spin_unlock(&adapter
->mbox_lock
);
183 t4vf_record_mbox(adapter
, cmd
, size
, access
, ret
);
187 /* If we're at the head, break out and start the mailbox
190 if (list_first_entry(&adapter
->mlist
.list
, struct mbox_list
,
194 /* Delay for a bit before checking again ... */
196 ms
= delay
[delay_idx
]; /* last element may repeat */
197 if (delay_idx
< ARRAY_SIZE(delay
) - 1)
206 * Loop trying to get ownership of the mailbox. Return an error
207 * if we can't gain ownership.
209 v
= MBOWNER_G(t4_read_reg(adapter
, mbox_ctl
));
210 for (i
= 0; v
== MBOX_OWNER_NONE
&& i
< 3; i
++)
211 v
= MBOWNER_G(t4_read_reg(adapter
, mbox_ctl
));
212 if (v
!= MBOX_OWNER_DRV
) {
213 spin_lock(&adapter
->mbox_lock
);
214 list_del(&entry
.list
);
215 spin_unlock(&adapter
->mbox_lock
);
216 ret
= (v
== MBOX_OWNER_FW
) ? -EBUSY
: -ETIMEDOUT
;
217 t4vf_record_mbox(adapter
, cmd
, size
, access
, ret
);
222 * Write the command array into the Mailbox Data register array and
223 * transfer ownership of the mailbox to the firmware.
225 * For the VFs, the Mailbox Data "registers" are actually backed by
226 * T4's "MA" interface rather than PL Registers (as is the case for
227 * the PFs). Because these are in different coherency domains, the
228 * write to the VF's PL-register-backed Mailbox Control can race in
229 * front of the writes to the MA-backed VF Mailbox Data "registers".
230 * So we need to do a read-back on at least one byte of the VF Mailbox
231 * Data registers before doing the write to the VF Mailbox Control
234 if (cmd_op
!= FW_VI_STATS_CMD
)
235 t4vf_record_mbox(adapter
, cmd
, size
, access
, 0);
236 for (i
= 0, p
= cmd
; i
< size
; i
+= 8)
237 t4_write_reg64(adapter
, mbox_data
+ i
, be64_to_cpu(*p
++));
238 t4_read_reg(adapter
, mbox_data
); /* flush write */
240 t4_write_reg(adapter
, mbox_ctl
,
241 MBMSGVALID_F
| MBOWNER_V(MBOX_OWNER_FW
));
242 t4_read_reg(adapter
, mbox_ctl
); /* flush write */
245 * Spin waiting for firmware to acknowledge processing our command.
250 for (i
= 0; i
< FW_CMD_MAX_TIMEOUT
; i
+= ms
) {
252 ms
= delay
[delay_idx
];
253 if (delay_idx
< ARRAY_SIZE(delay
) - 1)
260 * If we're the owner, see if this is the reply we wanted.
262 v
= t4_read_reg(adapter
, mbox_ctl
);
263 if (MBOWNER_G(v
) == MBOX_OWNER_DRV
) {
265 * If the Message Valid bit isn't on, revoke ownership
266 * of the mailbox and continue waiting for our reply.
268 if ((v
& MBMSGVALID_F
) == 0) {
269 t4_write_reg(adapter
, mbox_ctl
,
270 MBOWNER_V(MBOX_OWNER_NONE
));
275 * We now have our reply. Extract the command return
276 * value, copy the reply back to our caller's buffer
277 * (if specified) and revoke ownership of the mailbox.
278 * We return the (negated) firmware command return
279 * code (this depends on FW_SUCCESS == 0).
281 get_mbox_rpl(adapter
, cmd_rpl
, size
, mbox_data
);
283 /* return value in low-order little-endian word */
284 v
= be64_to_cpu(cmd_rpl
[0]);
287 /* request bit in high-order BE word */
288 WARN_ON((be32_to_cpu(*(const __be32
*)cmd
)
289 & FW_CMD_REQUEST_F
) == 0);
290 memcpy(rpl
, cmd_rpl
, size
);
291 WARN_ON((be32_to_cpu(*(__be32
*)rpl
)
292 & FW_CMD_REQUEST_F
) != 0);
294 t4_write_reg(adapter
, mbox_ctl
,
295 MBOWNER_V(MBOX_OWNER_NONE
));
297 if (cmd_op
!= FW_VI_STATS_CMD
)
298 t4vf_record_mbox(adapter
, cmd_rpl
, size
, access
,
300 spin_lock(&adapter
->mbox_lock
);
301 list_del(&entry
.list
);
302 spin_unlock(&adapter
->mbox_lock
);
303 return -FW_CMD_RETVAL_G(v
);
307 /* We timed out. Return the error ... */
309 t4vf_record_mbox(adapter
, cmd
, size
, access
, ret
);
310 spin_lock(&adapter
->mbox_lock
);
311 list_del(&entry
.list
);
312 spin_unlock(&adapter
->mbox_lock
);
316 #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
317 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_25G | \
318 FW_PORT_CAP_SPEED_40G | FW_PORT_CAP_SPEED_100G | \
322 * init_link_config - initialize a link's SW state
323 * @lc: structure holding the link state
324 * @caps: link capabilities
326 * Initializes the SW state maintained for each link, including the link's
327 * capabilities and default speed/flow-control/autonegotiation settings.
329 static void init_link_config(struct link_config
*lc
, unsigned int caps
)
331 lc
->supported
= caps
;
332 lc
->lp_advertising
= 0;
333 lc
->requested_speed
= 0;
335 lc
->requested_fc
= lc
->fc
= PAUSE_RX
| PAUSE_TX
;
336 if (lc
->supported
& FW_PORT_CAP_ANEG
) {
337 lc
->advertising
= lc
->supported
& ADVERT_MASK
;
338 lc
->autoneg
= AUTONEG_ENABLE
;
339 lc
->requested_fc
|= PAUSE_AUTONEG
;
342 lc
->autoneg
= AUTONEG_DISABLE
;
347 * t4vf_port_init - initialize port hardware/software state
348 * @adapter: the adapter
349 * @pidx: the adapter port index
351 int t4vf_port_init(struct adapter
*adapter
, int pidx
)
353 struct port_info
*pi
= adap2pinfo(adapter
, pidx
);
354 struct fw_vi_cmd vi_cmd
, vi_rpl
;
355 struct fw_port_cmd port_cmd
, port_rpl
;
359 * Execute a VI Read command to get our Virtual Interface information
360 * like MAC address, etc.
362 memset(&vi_cmd
, 0, sizeof(vi_cmd
));
363 vi_cmd
.op_to_vfn
= cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD
) |
366 vi_cmd
.alloc_to_len16
= cpu_to_be32(FW_LEN16(vi_cmd
));
367 vi_cmd
.type_viid
= cpu_to_be16(FW_VI_CMD_VIID_V(pi
->viid
));
368 v
= t4vf_wr_mbox(adapter
, &vi_cmd
, sizeof(vi_cmd
), &vi_rpl
);
372 BUG_ON(pi
->port_id
!= FW_VI_CMD_PORTID_G(vi_rpl
.portid_pkd
));
373 pi
->rss_size
= FW_VI_CMD_RSSSIZE_G(be16_to_cpu(vi_rpl
.rsssize_pkd
));
374 t4_os_set_hw_addr(adapter
, pidx
, vi_rpl
.mac
);
377 * If we don't have read access to our port information, we're done
378 * now. Otherwise, execute a PORT Read command to get it ...
380 if (!(adapter
->params
.vfres
.r_caps
& FW_CMD_CAP_PORT
))
383 memset(&port_cmd
, 0, sizeof(port_cmd
));
384 port_cmd
.op_to_portid
= cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD
) |
387 FW_PORT_CMD_PORTID_V(pi
->port_id
));
388 port_cmd
.action_to_len16
=
389 cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO
) |
391 v
= t4vf_wr_mbox(adapter
, &port_cmd
, sizeof(port_cmd
), &port_rpl
);
395 v
= be32_to_cpu(port_rpl
.u
.info
.lstatus_to_modtype
);
396 pi
->mdio_addr
= (v
& FW_PORT_CMD_MDIOCAP_F
) ?
397 FW_PORT_CMD_MDIOADDR_G(v
) : -1;
398 pi
->port_type
= FW_PORT_CMD_PTYPE_G(v
);
399 pi
->mod_type
= FW_PORT_MOD_TYPE_NA
;
401 init_link_config(&pi
->link_cfg
, be16_to_cpu(port_rpl
.u
.info
.pcap
));
407 * t4vf_fw_reset - issue a reset to FW
408 * @adapter: the adapter
410 * Issues a reset command to FW. For a Physical Function this would
411 * result in the Firmware resetting all of its state. For a Virtual
412 * Function this just resets the state associated with the VF.
414 int t4vf_fw_reset(struct adapter
*adapter
)
416 struct fw_reset_cmd cmd
;
418 memset(&cmd
, 0, sizeof(cmd
));
419 cmd
.op_to_write
= cpu_to_be32(FW_CMD_OP_V(FW_RESET_CMD
) |
421 cmd
.retval_len16
= cpu_to_be32(FW_LEN16(cmd
));
422 return t4vf_wr_mbox(adapter
, &cmd
, sizeof(cmd
), NULL
);
426 * t4vf_query_params - query FW or device parameters
427 * @adapter: the adapter
428 * @nparams: the number of parameters
429 * @params: the parameter names
430 * @vals: the parameter values
432 * Reads the values of firmware or device parameters. Up to 7 parameters
433 * can be queried at once.
435 static int t4vf_query_params(struct adapter
*adapter
, unsigned int nparams
,
436 const u32
*params
, u32
*vals
)
439 struct fw_params_cmd cmd
, rpl
;
440 struct fw_params_param
*p
;
446 memset(&cmd
, 0, sizeof(cmd
));
447 cmd
.op_to_vfn
= cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD
) |
450 len16
= DIV_ROUND_UP(offsetof(struct fw_params_cmd
,
451 param
[nparams
].mnem
), 16);
452 cmd
.retval_len16
= cpu_to_be32(FW_CMD_LEN16_V(len16
));
453 for (i
= 0, p
= &cmd
.param
[0]; i
< nparams
; i
++, p
++)
454 p
->mnem
= htonl(*params
++);
456 ret
= t4vf_wr_mbox(adapter
, &cmd
, sizeof(cmd
), &rpl
);
458 for (i
= 0, p
= &rpl
.param
[0]; i
< nparams
; i
++, p
++)
459 *vals
++ = be32_to_cpu(p
->val
);
464 * t4vf_set_params - sets FW or device parameters
465 * @adapter: the adapter
466 * @nparams: the number of parameters
467 * @params: the parameter names
468 * @vals: the parameter values
470 * Sets the values of firmware or device parameters. Up to 7 parameters
471 * can be specified at once.
473 int t4vf_set_params(struct adapter
*adapter
, unsigned int nparams
,
474 const u32
*params
, const u32
*vals
)
477 struct fw_params_cmd cmd
;
478 struct fw_params_param
*p
;
484 memset(&cmd
, 0, sizeof(cmd
));
485 cmd
.op_to_vfn
= cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD
) |
488 len16
= DIV_ROUND_UP(offsetof(struct fw_params_cmd
,
489 param
[nparams
]), 16);
490 cmd
.retval_len16
= cpu_to_be32(FW_CMD_LEN16_V(len16
));
491 for (i
= 0, p
= &cmd
.param
[0]; i
< nparams
; i
++, p
++) {
492 p
->mnem
= cpu_to_be32(*params
++);
493 p
->val
= cpu_to_be32(*vals
++);
496 return t4vf_wr_mbox(adapter
, &cmd
, sizeof(cmd
), NULL
);
500 * t4vf_fl_pkt_align - return the fl packet alignment
501 * @adapter: the adapter
503 * T4 has a single field to specify the packing and padding boundary.
504 * T5 onwards has separate fields for this and hence the alignment for
505 * next packet offset is maximum of these two. And T6 changes the
506 * Ingress Padding Boundary Shift, so it's all a mess and it's best
507 * if we put this in low-level Common Code ...
510 int t4vf_fl_pkt_align(struct adapter
*adapter
)
512 u32 sge_control
, sge_control2
;
513 unsigned int ingpadboundary
, ingpackboundary
, fl_align
, ingpad_shift
;
515 sge_control
= adapter
->params
.sge
.sge_control
;
517 /* T4 uses a single control field to specify both the PCIe Padding and
518 * Packing Boundary. T5 introduced the ability to specify these
519 * separately. The actual Ingress Packet Data alignment boundary
520 * within Packed Buffer Mode is the maximum of these two
521 * specifications. (Note that it makes no real practical sense to
522 * have the Pading Boudary be larger than the Packing Boundary but you
523 * could set the chip up that way and, in fact, legacy T4 code would
524 * end doing this because it would initialize the Padding Boundary and
525 * leave the Packing Boundary initialized to 0 (16 bytes).)
526 * Padding Boundary values in T6 starts from 8B,
527 * where as it is 32B for T4 and T5.
529 if (CHELSIO_CHIP_VERSION(adapter
->params
.chip
) <= CHELSIO_T5
)
530 ingpad_shift
= INGPADBOUNDARY_SHIFT_X
;
532 ingpad_shift
= T6_INGPADBOUNDARY_SHIFT_X
;
534 ingpadboundary
= 1 << (INGPADBOUNDARY_G(sge_control
) + ingpad_shift
);
536 fl_align
= ingpadboundary
;
537 if (!is_t4(adapter
->params
.chip
)) {
538 /* T5 has a different interpretation of one of the PCIe Packing
541 sge_control2
= adapter
->params
.sge
.sge_control2
;
542 ingpackboundary
= INGPACKBOUNDARY_G(sge_control2
);
543 if (ingpackboundary
== INGPACKBOUNDARY_16B_X
)
544 ingpackboundary
= 16;
546 ingpackboundary
= 1 << (ingpackboundary
+
547 INGPACKBOUNDARY_SHIFT_X
);
549 fl_align
= max(ingpadboundary
, ingpackboundary
);
555 * t4vf_bar2_sge_qregs - return BAR2 SGE Queue register information
556 * @adapter: the adapter
558 * @qtype: the Ingress or Egress type for @qid
559 * @pbar2_qoffset: BAR2 Queue Offset
560 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
562 * Returns the BAR2 SGE Queue Registers information associated with the
563 * indicated Absolute Queue ID. These are passed back in return value
564 * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
565 * and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
567 * This may return an error which indicates that BAR2 SGE Queue
568 * registers aren't available. If an error is not returned, then the
569 * following values are returned:
571 * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
572 * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
574 * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
575 * require the "Inferred Queue ID" ability may be used. E.g. the
576 * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
577 * then these "Inferred Queue ID" register may not be used.
579 int t4vf_bar2_sge_qregs(struct adapter
*adapter
,
581 enum t4_bar2_qtype qtype
,
583 unsigned int *pbar2_qid
)
585 unsigned int page_shift
, page_size
, qpp_shift
, qpp_mask
;
586 u64 bar2_page_offset
, bar2_qoffset
;
587 unsigned int bar2_qid
, bar2_qid_offset
, bar2_qinferred
;
589 /* T4 doesn't support BAR2 SGE Queue registers.
591 if (is_t4(adapter
->params
.chip
))
594 /* Get our SGE Page Size parameters.
596 page_shift
= adapter
->params
.sge
.sge_vf_hps
+ 10;
597 page_size
= 1 << page_shift
;
599 /* Get the right Queues per Page parameters for our Queue.
601 qpp_shift
= (qtype
== T4_BAR2_QTYPE_EGRESS
602 ? adapter
->params
.sge
.sge_vf_eq_qpp
603 : adapter
->params
.sge
.sge_vf_iq_qpp
);
604 qpp_mask
= (1 << qpp_shift
) - 1;
606 /* Calculate the basics of the BAR2 SGE Queue register area:
607 * o The BAR2 page the Queue registers will be in.
608 * o The BAR2 Queue ID.
609 * o The BAR2 Queue ID Offset into the BAR2 page.
611 bar2_page_offset
= ((u64
)(qid
>> qpp_shift
) << page_shift
);
612 bar2_qid
= qid
& qpp_mask
;
613 bar2_qid_offset
= bar2_qid
* SGE_UDB_SIZE
;
615 /* If the BAR2 Queue ID Offset is less than the Page Size, then the
616 * hardware will infer the Absolute Queue ID simply from the writes to
617 * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
618 * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply
619 * write to the first BAR2 SGE Queue Area within the BAR2 Page with
620 * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
621 * from the BAR2 Page and BAR2 Queue ID.
623 * One important censequence of this is that some BAR2 SGE registers
624 * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
625 * there. But other registers synthesize the SGE Queue ID purely
626 * from the writes to the registers -- the Write Combined Doorbell
627 * Buffer is a good example. These BAR2 SGE Registers are only
628 * available for those BAR2 SGE Register areas where the SGE Absolute
629 * Queue ID can be inferred from simple writes.
631 bar2_qoffset
= bar2_page_offset
;
632 bar2_qinferred
= (bar2_qid_offset
< page_size
);
633 if (bar2_qinferred
) {
634 bar2_qoffset
+= bar2_qid_offset
;
638 *pbar2_qoffset
= bar2_qoffset
;
639 *pbar2_qid
= bar2_qid
;
644 * t4vf_get_sge_params - retrieve adapter Scatter gather Engine parameters
645 * @adapter: the adapter
647 * Retrieves various core SGE parameters in the form of hardware SGE
648 * register values. The caller is responsible for decoding these as
649 * needed. The SGE parameters are stored in @adapter->params.sge.
651 int t4vf_get_sge_params(struct adapter
*adapter
)
653 struct sge_params
*sge_params
= &adapter
->params
.sge
;
654 u32 params
[7], vals
[7];
657 params
[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG
) |
658 FW_PARAMS_PARAM_XYZ_V(SGE_CONTROL_A
));
659 params
[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG
) |
660 FW_PARAMS_PARAM_XYZ_V(SGE_HOST_PAGE_SIZE_A
));
661 params
[2] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG
) |
662 FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE0_A
));
663 params
[3] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG
) |
664 FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE1_A
));
665 params
[4] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG
) |
666 FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_0_AND_1_A
));
667 params
[5] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG
) |
668 FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_2_AND_3_A
));
669 params
[6] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG
) |
670 FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_4_AND_5_A
));
671 v
= t4vf_query_params(adapter
, 7, params
, vals
);
674 sge_params
->sge_control
= vals
[0];
675 sge_params
->sge_host_page_size
= vals
[1];
676 sge_params
->sge_fl_buffer_size
[0] = vals
[2];
677 sge_params
->sge_fl_buffer_size
[1] = vals
[3];
678 sge_params
->sge_timer_value_0_and_1
= vals
[4];
679 sge_params
->sge_timer_value_2_and_3
= vals
[5];
680 sge_params
->sge_timer_value_4_and_5
= vals
[6];
682 /* T4 uses a single control field to specify both the PCIe Padding and
683 * Packing Boundary. T5 introduced the ability to specify these
684 * separately with the Padding Boundary in SGE_CONTROL and and Packing
685 * Boundary in SGE_CONTROL2. So for T5 and later we need to grab
686 * SGE_CONTROL in order to determine how ingress packet data will be
687 * laid out in Packed Buffer Mode. Unfortunately, older versions of
688 * the firmware won't let us retrieve SGE_CONTROL2 so if we get a
689 * failure grabbing it we throw an error since we can't figure out the
692 if (!is_t4(adapter
->params
.chip
)) {
693 params
[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG
) |
694 FW_PARAMS_PARAM_XYZ_V(SGE_CONTROL2_A
));
695 v
= t4vf_query_params(adapter
, 1, params
, vals
);
696 if (v
!= FW_SUCCESS
) {
697 dev_err(adapter
->pdev_dev
,
698 "Unable to get SGE Control2; "
699 "probably old firmware.\n");
702 sge_params
->sge_control2
= vals
[0];
705 params
[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG
) |
706 FW_PARAMS_PARAM_XYZ_V(SGE_INGRESS_RX_THRESHOLD_A
));
707 params
[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG
) |
708 FW_PARAMS_PARAM_XYZ_V(SGE_CONM_CTRL_A
));
709 v
= t4vf_query_params(adapter
, 2, params
, vals
);
712 sge_params
->sge_ingress_rx_threshold
= vals
[0];
713 sge_params
->sge_congestion_control
= vals
[1];
715 /* For T5 and later we want to use the new BAR2 Doorbells.
716 * Unfortunately, older firmware didn't allow the this register to be
719 if (!is_t4(adapter
->params
.chip
)) {
721 unsigned int pf
, s_hps
, s_qpp
;
723 params
[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG
) |
724 FW_PARAMS_PARAM_XYZ_V(
725 SGE_EGRESS_QUEUES_PER_PAGE_VF_A
));
726 params
[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG
) |
727 FW_PARAMS_PARAM_XYZ_V(
728 SGE_INGRESS_QUEUES_PER_PAGE_VF_A
));
729 v
= t4vf_query_params(adapter
, 2, params
, vals
);
730 if (v
!= FW_SUCCESS
) {
731 dev_warn(adapter
->pdev_dev
,
732 "Unable to get VF SGE Queues/Page; "
733 "probably old firmware.\n");
736 sge_params
->sge_egress_queues_per_page
= vals
[0];
737 sge_params
->sge_ingress_queues_per_page
= vals
[1];
739 /* We need the Queues/Page for our VF. This is based on the
740 * PF from which we're instantiated and is indexed in the
741 * register we just read. Do it once here so other code in
742 * the driver can just use it.
744 whoami
= t4_read_reg(adapter
,
745 T4VF_PL_BASE_ADDR
+ PL_VF_WHOAMI_A
);
746 pf
= CHELSIO_CHIP_VERSION(adapter
->params
.chip
) <= CHELSIO_T5
?
747 SOURCEPF_G(whoami
) : T6_SOURCEPF_G(whoami
);
749 s_hps
= (HOSTPAGESIZEPF0_S
+
750 (HOSTPAGESIZEPF1_S
- HOSTPAGESIZEPF0_S
) * pf
);
751 sge_params
->sge_vf_hps
=
752 ((sge_params
->sge_host_page_size
>> s_hps
)
753 & HOSTPAGESIZEPF0_M
);
755 s_qpp
= (QUEUESPERPAGEPF0_S
+
756 (QUEUESPERPAGEPF1_S
- QUEUESPERPAGEPF0_S
) * pf
);
757 sge_params
->sge_vf_eq_qpp
=
758 ((sge_params
->sge_egress_queues_per_page
>> s_qpp
)
759 & QUEUESPERPAGEPF0_M
);
760 sge_params
->sge_vf_iq_qpp
=
761 ((sge_params
->sge_ingress_queues_per_page
>> s_qpp
)
762 & QUEUESPERPAGEPF0_M
);
769 * t4vf_get_vpd_params - retrieve device VPD paremeters
770 * @adapter: the adapter
772 * Retrives various device Vital Product Data parameters. The parameters
773 * are stored in @adapter->params.vpd.
775 int t4vf_get_vpd_params(struct adapter
*adapter
)
777 struct vpd_params
*vpd_params
= &adapter
->params
.vpd
;
778 u32 params
[7], vals
[7];
781 params
[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV
) |
782 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CCLK
));
783 v
= t4vf_query_params(adapter
, 1, params
, vals
);
786 vpd_params
->cclk
= vals
[0];
792 * t4vf_get_dev_params - retrieve device paremeters
793 * @adapter: the adapter
795 * Retrives various device parameters. The parameters are stored in
796 * @adapter->params.dev.
798 int t4vf_get_dev_params(struct adapter
*adapter
)
800 struct dev_params
*dev_params
= &adapter
->params
.dev
;
801 u32 params
[7], vals
[7];
804 params
[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV
) |
805 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_FWREV
));
806 params
[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV
) |
807 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_TPREV
));
808 v
= t4vf_query_params(adapter
, 2, params
, vals
);
811 dev_params
->fwrev
= vals
[0];
812 dev_params
->tprev
= vals
[1];
818 * t4vf_get_rss_glb_config - retrieve adapter RSS Global Configuration
819 * @adapter: the adapter
821 * Retrieves global RSS mode and parameters with which we have to live
822 * and stores them in the @adapter's RSS parameters.
824 int t4vf_get_rss_glb_config(struct adapter
*adapter
)
826 struct rss_params
*rss
= &adapter
->params
.rss
;
827 struct fw_rss_glb_config_cmd cmd
, rpl
;
831 * Execute an RSS Global Configuration read command to retrieve
832 * our RSS configuration.
834 memset(&cmd
, 0, sizeof(cmd
));
835 cmd
.op_to_write
= cpu_to_be32(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD
) |
838 cmd
.retval_len16
= cpu_to_be32(FW_LEN16(cmd
));
839 v
= t4vf_wr_mbox(adapter
, &cmd
, sizeof(cmd
), &rpl
);
844 * Transate the big-endian RSS Global Configuration into our
845 * cpu-endian format based on the RSS mode. We also do first level
846 * filtering at this point to weed out modes which don't support
849 rss
->mode
= FW_RSS_GLB_CONFIG_CMD_MODE_G(
850 be32_to_cpu(rpl
.u
.manual
.mode_pkd
));
852 case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL
: {
853 u32 word
= be32_to_cpu(
854 rpl
.u
.basicvirtual
.synmapen_to_hashtoeplitz
);
856 rss
->u
.basicvirtual
.synmapen
=
857 ((word
& FW_RSS_GLB_CONFIG_CMD_SYNMAPEN_F
) != 0);
858 rss
->u
.basicvirtual
.syn4tupenipv6
=
859 ((word
& FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6_F
) != 0);
860 rss
->u
.basicvirtual
.syn2tupenipv6
=
861 ((word
& FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6_F
) != 0);
862 rss
->u
.basicvirtual
.syn4tupenipv4
=
863 ((word
& FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4_F
) != 0);
864 rss
->u
.basicvirtual
.syn2tupenipv4
=
865 ((word
& FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4_F
) != 0);
867 rss
->u
.basicvirtual
.ofdmapen
=
868 ((word
& FW_RSS_GLB_CONFIG_CMD_OFDMAPEN_F
) != 0);
870 rss
->u
.basicvirtual
.tnlmapen
=
871 ((word
& FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F
) != 0);
872 rss
->u
.basicvirtual
.tnlalllookup
=
873 ((word
& FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F
) != 0);
875 rss
->u
.basicvirtual
.hashtoeplitz
=
876 ((word
& FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_F
) != 0);
878 /* we need at least Tunnel Map Enable to be set */
879 if (!rss
->u
.basicvirtual
.tnlmapen
)
885 /* all unknown/unsupported RSS modes result in an error */
893 * t4vf_get_vfres - retrieve VF resource limits
894 * @adapter: the adapter
896 * Retrieves configured resource limits and capabilities for a virtual
897 * function. The results are stored in @adapter->vfres.
899 int t4vf_get_vfres(struct adapter
*adapter
)
901 struct vf_resources
*vfres
= &adapter
->params
.vfres
;
902 struct fw_pfvf_cmd cmd
, rpl
;
907 * Execute PFVF Read command to get VF resource limits; bail out early
908 * with error on command failure.
910 memset(&cmd
, 0, sizeof(cmd
));
911 cmd
.op_to_vfn
= cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD
) |
914 cmd
.retval_len16
= cpu_to_be32(FW_LEN16(cmd
));
915 v
= t4vf_wr_mbox(adapter
, &cmd
, sizeof(cmd
), &rpl
);
920 * Extract VF resource limits and return success.
922 word
= be32_to_cpu(rpl
.niqflint_niq
);
923 vfres
->niqflint
= FW_PFVF_CMD_NIQFLINT_G(word
);
924 vfres
->niq
= FW_PFVF_CMD_NIQ_G(word
);
926 word
= be32_to_cpu(rpl
.type_to_neq
);
927 vfres
->neq
= FW_PFVF_CMD_NEQ_G(word
);
928 vfres
->pmask
= FW_PFVF_CMD_PMASK_G(word
);
930 word
= be32_to_cpu(rpl
.tc_to_nexactf
);
931 vfres
->tc
= FW_PFVF_CMD_TC_G(word
);
932 vfres
->nvi
= FW_PFVF_CMD_NVI_G(word
);
933 vfres
->nexactf
= FW_PFVF_CMD_NEXACTF_G(word
);
935 word
= be32_to_cpu(rpl
.r_caps_to_nethctrl
);
936 vfres
->r_caps
= FW_PFVF_CMD_R_CAPS_G(word
);
937 vfres
->wx_caps
= FW_PFVF_CMD_WX_CAPS_G(word
);
938 vfres
->nethctrl
= FW_PFVF_CMD_NETHCTRL_G(word
);
944 * t4vf_read_rss_vi_config - read a VI's RSS configuration
945 * @adapter: the adapter
946 * @viid: Virtual Interface ID
947 * @config: pointer to host-native VI RSS Configuration buffer
949 * Reads the Virtual Interface's RSS configuration information and
950 * translates it into CPU-native format.
952 int t4vf_read_rss_vi_config(struct adapter
*adapter
, unsigned int viid
,
953 union rss_vi_config
*config
)
955 struct fw_rss_vi_config_cmd cmd
, rpl
;
958 memset(&cmd
, 0, sizeof(cmd
));
959 cmd
.op_to_viid
= cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD
) |
962 FW_RSS_VI_CONFIG_CMD_VIID(viid
));
963 cmd
.retval_len16
= cpu_to_be32(FW_LEN16(cmd
));
964 v
= t4vf_wr_mbox(adapter
, &cmd
, sizeof(cmd
), &rpl
);
968 switch (adapter
->params
.rss
.mode
) {
969 case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL
: {
970 u32 word
= be32_to_cpu(rpl
.u
.basicvirtual
.defaultq_to_udpen
);
972 config
->basicvirtual
.ip6fourtupen
=
973 ((word
& FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F
) != 0);
974 config
->basicvirtual
.ip6twotupen
=
975 ((word
& FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F
) != 0);
976 config
->basicvirtual
.ip4fourtupen
=
977 ((word
& FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F
) != 0);
978 config
->basicvirtual
.ip4twotupen
=
979 ((word
& FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F
) != 0);
980 config
->basicvirtual
.udpen
=
981 ((word
& FW_RSS_VI_CONFIG_CMD_UDPEN_F
) != 0);
982 config
->basicvirtual
.defaultq
=
983 FW_RSS_VI_CONFIG_CMD_DEFAULTQ_G(word
);
995 * t4vf_write_rss_vi_config - write a VI's RSS configuration
996 * @adapter: the adapter
997 * @viid: Virtual Interface ID
998 * @config: pointer to host-native VI RSS Configuration buffer
1000 * Write the Virtual Interface's RSS configuration information
1001 * (translating it into firmware-native format before writing).
1003 int t4vf_write_rss_vi_config(struct adapter
*adapter
, unsigned int viid
,
1004 union rss_vi_config
*config
)
1006 struct fw_rss_vi_config_cmd cmd
, rpl
;
1008 memset(&cmd
, 0, sizeof(cmd
));
1009 cmd
.op_to_viid
= cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD
) |
1012 FW_RSS_VI_CONFIG_CMD_VIID(viid
));
1013 cmd
.retval_len16
= cpu_to_be32(FW_LEN16(cmd
));
1014 switch (adapter
->params
.rss
.mode
) {
1015 case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL
: {
1018 if (config
->basicvirtual
.ip6fourtupen
)
1019 word
|= FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F
;
1020 if (config
->basicvirtual
.ip6twotupen
)
1021 word
|= FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F
;
1022 if (config
->basicvirtual
.ip4fourtupen
)
1023 word
|= FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F
;
1024 if (config
->basicvirtual
.ip4twotupen
)
1025 word
|= FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F
;
1026 if (config
->basicvirtual
.udpen
)
1027 word
|= FW_RSS_VI_CONFIG_CMD_UDPEN_F
;
1028 word
|= FW_RSS_VI_CONFIG_CMD_DEFAULTQ_V(
1029 config
->basicvirtual
.defaultq
);
1030 cmd
.u
.basicvirtual
.defaultq_to_udpen
= cpu_to_be32(word
);
1038 return t4vf_wr_mbox(adapter
, &cmd
, sizeof(cmd
), &rpl
);
1042 * t4vf_config_rss_range - configure a portion of the RSS mapping table
1043 * @adapter: the adapter
1044 * @viid: Virtual Interface of RSS Table Slice
1045 * @start: starting entry in the table to write
1046 * @n: how many table entries to write
1047 * @rspq: values for the "Response Queue" (Ingress Queue) lookup table
1048 * @nrspq: number of values in @rspq
1050 * Programs the selected part of the VI's RSS mapping table with the
1051 * provided values. If @nrspq < @n the supplied values are used repeatedly
1052 * until the full table range is populated.
1054 * The caller must ensure the values in @rspq are in the range 0..1023.
1056 int t4vf_config_rss_range(struct adapter
*adapter
, unsigned int viid
,
1057 int start
, int n
, const u16
*rspq
, int nrspq
)
1059 const u16
*rsp
= rspq
;
1060 const u16
*rsp_end
= rspq
+nrspq
;
1061 struct fw_rss_ind_tbl_cmd cmd
;
1064 * Initialize firmware command template to write the RSS table.
1066 memset(&cmd
, 0, sizeof(cmd
));
1067 cmd
.op_to_viid
= cpu_to_be32(FW_CMD_OP_V(FW_RSS_IND_TBL_CMD
) |
1070 FW_RSS_IND_TBL_CMD_VIID_V(viid
));
1071 cmd
.retval_len16
= cpu_to_be32(FW_LEN16(cmd
));
1074 * Each firmware RSS command can accommodate up to 32 RSS Ingress
1075 * Queue Identifiers. These Ingress Queue IDs are packed three to
1076 * a 32-bit word as 10-bit values with the upper remaining 2 bits
1080 __be32
*qp
= &cmd
.iq0_to_iq2
;
1081 int nq
= min(n
, 32);
1085 * Set up the firmware RSS command header to send the next
1086 * "nq" Ingress Queue IDs to the firmware.
1088 cmd
.niqid
= cpu_to_be16(nq
);
1089 cmd
.startidx
= cpu_to_be16(start
);
1092 * "nq" more done for the start of the next loop.
1098 * While there are still Ingress Queue IDs to stuff into the
1099 * current firmware RSS command, retrieve them from the
1100 * Ingress Queue ID array and insert them into the command.
1104 * Grab up to the next 3 Ingress Queue IDs (wrapping
1105 * around the Ingress Queue ID array if necessary) and
1106 * insert them into the firmware RSS command at the
1107 * current 3-tuple position within the commad.
1111 int nqbuf
= min(3, nq
);
1114 qbuf
[0] = qbuf
[1] = qbuf
[2] = 0;
1121 *qp
++ = cpu_to_be32(FW_RSS_IND_TBL_CMD_IQ0_V(qbuf
[0]) |
1122 FW_RSS_IND_TBL_CMD_IQ1_V(qbuf
[1]) |
1123 FW_RSS_IND_TBL_CMD_IQ2_V(qbuf
[2]));
1127 * Send this portion of the RRS table update to the firmware;
1128 * bail out on any errors.
1130 ret
= t4vf_wr_mbox(adapter
, &cmd
, sizeof(cmd
), NULL
);
1138 * t4vf_alloc_vi - allocate a virtual interface on a port
1139 * @adapter: the adapter
1140 * @port_id: physical port associated with the VI
1142 * Allocate a new Virtual Interface and bind it to the indicated
1143 * physical port. Return the new Virtual Interface Identifier on
1144 * success, or a [negative] error number on failure.
1146 int t4vf_alloc_vi(struct adapter
*adapter
, int port_id
)
1148 struct fw_vi_cmd cmd
, rpl
;
1152 * Execute a VI command to allocate Virtual Interface and return its
1155 memset(&cmd
, 0, sizeof(cmd
));
1156 cmd
.op_to_vfn
= cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD
) |
1160 cmd
.alloc_to_len16
= cpu_to_be32(FW_LEN16(cmd
) |
1162 cmd
.portid_pkd
= FW_VI_CMD_PORTID_V(port_id
);
1163 v
= t4vf_wr_mbox(adapter
, &cmd
, sizeof(cmd
), &rpl
);
1167 return FW_VI_CMD_VIID_G(be16_to_cpu(rpl
.type_viid
));
1171 * t4vf_free_vi -- free a virtual interface
1172 * @adapter: the adapter
1173 * @viid: the virtual interface identifier
1175 * Free a previously allocated Virtual Interface. Return an error on
1178 int t4vf_free_vi(struct adapter
*adapter
, int viid
)
1180 struct fw_vi_cmd cmd
;
1183 * Execute a VI command to free the Virtual Interface.
1185 memset(&cmd
, 0, sizeof(cmd
));
1186 cmd
.op_to_vfn
= cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD
) |
1189 cmd
.alloc_to_len16
= cpu_to_be32(FW_LEN16(cmd
) |
1191 cmd
.type_viid
= cpu_to_be16(FW_VI_CMD_VIID_V(viid
));
1192 return t4vf_wr_mbox(adapter
, &cmd
, sizeof(cmd
), NULL
);
1196 * t4vf_enable_vi - enable/disable a virtual interface
1197 * @adapter: the adapter
1198 * @viid: the Virtual Interface ID
1199 * @rx_en: 1=enable Rx, 0=disable Rx
1200 * @tx_en: 1=enable Tx, 0=disable Tx
1202 * Enables/disables a virtual interface.
1204 int t4vf_enable_vi(struct adapter
*adapter
, unsigned int viid
,
1205 bool rx_en
, bool tx_en
)
1207 struct fw_vi_enable_cmd cmd
;
1209 memset(&cmd
, 0, sizeof(cmd
));
1210 cmd
.op_to_viid
= cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD
) |
1213 FW_VI_ENABLE_CMD_VIID_V(viid
));
1214 cmd
.ien_to_len16
= cpu_to_be32(FW_VI_ENABLE_CMD_IEN_V(rx_en
) |
1215 FW_VI_ENABLE_CMD_EEN_V(tx_en
) |
1217 return t4vf_wr_mbox(adapter
, &cmd
, sizeof(cmd
), NULL
);
1221 * t4vf_identify_port - identify a VI's port by blinking its LED
1222 * @adapter: the adapter
1223 * @viid: the Virtual Interface ID
1224 * @nblinks: how many times to blink LED at 2.5 Hz
1226 * Identifies a VI's port by blinking its LED.
1228 int t4vf_identify_port(struct adapter
*adapter
, unsigned int viid
,
1229 unsigned int nblinks
)
1231 struct fw_vi_enable_cmd cmd
;
1233 memset(&cmd
, 0, sizeof(cmd
));
1234 cmd
.op_to_viid
= cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD
) |
1237 FW_VI_ENABLE_CMD_VIID_V(viid
));
1238 cmd
.ien_to_len16
= cpu_to_be32(FW_VI_ENABLE_CMD_LED_F
|
1240 cmd
.blinkdur
= cpu_to_be16(nblinks
);
1241 return t4vf_wr_mbox(adapter
, &cmd
, sizeof(cmd
), NULL
);
1245 * t4vf_set_rxmode - set Rx properties of a virtual interface
1246 * @adapter: the adapter
1248 * @mtu: the new MTU or -1 for no change
1249 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
1250 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
1251 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
1252 * @vlanex: 1 to enable hardware VLAN Tag extraction, 0 to disable it,
1255 * Sets Rx properties of a virtual interface.
1257 int t4vf_set_rxmode(struct adapter
*adapter
, unsigned int viid
,
1258 int mtu
, int promisc
, int all_multi
, int bcast
, int vlanex
,
1261 struct fw_vi_rxmode_cmd cmd
;
1263 /* convert to FW values */
1265 mtu
= FW_VI_RXMODE_CMD_MTU_M
;
1267 promisc
= FW_VI_RXMODE_CMD_PROMISCEN_M
;
1269 all_multi
= FW_VI_RXMODE_CMD_ALLMULTIEN_M
;
1271 bcast
= FW_VI_RXMODE_CMD_BROADCASTEN_M
;
1273 vlanex
= FW_VI_RXMODE_CMD_VLANEXEN_M
;
1275 memset(&cmd
, 0, sizeof(cmd
));
1276 cmd
.op_to_viid
= cpu_to_be32(FW_CMD_OP_V(FW_VI_RXMODE_CMD
) |
1279 FW_VI_RXMODE_CMD_VIID_V(viid
));
1280 cmd
.retval_len16
= cpu_to_be32(FW_LEN16(cmd
));
1281 cmd
.mtu_to_vlanexen
=
1282 cpu_to_be32(FW_VI_RXMODE_CMD_MTU_V(mtu
) |
1283 FW_VI_RXMODE_CMD_PROMISCEN_V(promisc
) |
1284 FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi
) |
1285 FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast
) |
1286 FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex
));
1287 return t4vf_wr_mbox_core(adapter
, &cmd
, sizeof(cmd
), NULL
, sleep_ok
);
1291 * t4vf_alloc_mac_filt - allocates exact-match filters for MAC addresses
1292 * @adapter: the adapter
1293 * @viid: the Virtual Interface Identifier
1294 * @free: if true any existing filters for this VI id are first removed
1295 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
1296 * @addr: the MAC address(es)
1297 * @idx: where to store the index of each allocated filter
1298 * @hash: pointer to hash address filter bitmap
1299 * @sleep_ok: call is allowed to sleep
1301 * Allocates an exact-match filter for each of the supplied addresses and
1302 * sets it to the corresponding address. If @idx is not %NULL it should
1303 * have at least @naddr entries, each of which will be set to the index of
1304 * the filter allocated for the corresponding MAC address. If a filter
1305 * could not be allocated for an address its index is set to 0xffff.
1306 * If @hash is not %NULL addresses that fail to allocate an exact filter
1307 * are hashed and update the hash filter bitmap pointed at by @hash.
1309 * Returns a negative error number or the number of filters allocated.
1311 int t4vf_alloc_mac_filt(struct adapter
*adapter
, unsigned int viid
, bool free
,
1312 unsigned int naddr
, const u8
**addr
, u16
*idx
,
1313 u64
*hash
, bool sleep_ok
)
1315 int offset
, ret
= 0;
1316 unsigned nfilters
= 0;
1317 unsigned int rem
= naddr
;
1318 struct fw_vi_mac_cmd cmd
, rpl
;
1319 unsigned int max_naddr
= adapter
->params
.arch
.mps_tcam_size
;
1321 if (naddr
> max_naddr
)
1324 for (offset
= 0; offset
< naddr
; /**/) {
1325 unsigned int fw_naddr
= (rem
< ARRAY_SIZE(cmd
.u
.exact
)
1327 : ARRAY_SIZE(cmd
.u
.exact
));
1328 size_t len16
= DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd
,
1329 u
.exact
[fw_naddr
]), 16);
1330 struct fw_vi_mac_exact
*p
;
1333 memset(&cmd
, 0, sizeof(cmd
));
1334 cmd
.op_to_viid
= cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD
) |
1337 (free
? FW_CMD_EXEC_F
: 0) |
1338 FW_VI_MAC_CMD_VIID_V(viid
));
1339 cmd
.freemacs_to_len16
=
1340 cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(free
) |
1341 FW_CMD_LEN16_V(len16
));
1343 for (i
= 0, p
= cmd
.u
.exact
; i
< fw_naddr
; i
++, p
++) {
1344 p
->valid_to_idx
= cpu_to_be16(
1345 FW_VI_MAC_CMD_VALID_F
|
1346 FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_ADD_MAC
));
1347 memcpy(p
->macaddr
, addr
[offset
+i
], sizeof(p
->macaddr
));
1351 ret
= t4vf_wr_mbox_core(adapter
, &cmd
, sizeof(cmd
), &rpl
,
1353 if (ret
&& ret
!= -ENOMEM
)
1356 for (i
= 0, p
= rpl
.u
.exact
; i
< fw_naddr
; i
++, p
++) {
1357 u16 index
= FW_VI_MAC_CMD_IDX_G(
1358 be16_to_cpu(p
->valid_to_idx
));
1365 if (index
< max_naddr
)
1368 *hash
|= (1ULL << hash_mac_addr(addr
[offset
+i
]));
1377 * If there were no errors or we merely ran out of room in our MAC
1378 * address arena, return the number of filters actually written.
1380 if (ret
== 0 || ret
== -ENOMEM
)
1386 * t4vf_free_mac_filt - frees exact-match filters of given MAC addresses
1387 * @adapter: the adapter
1389 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
1390 * @addr: the MAC address(es)
1391 * @sleep_ok: call is allowed to sleep
1393 * Frees the exact-match filter for each of the supplied addresses
1395 * Returns a negative error number or the number of filters freed.
1397 int t4vf_free_mac_filt(struct adapter
*adapter
, unsigned int viid
,
1398 unsigned int naddr
, const u8
**addr
, bool sleep_ok
)
1400 int offset
, ret
= 0;
1401 struct fw_vi_mac_cmd cmd
;
1402 unsigned int nfilters
= 0;
1403 unsigned int max_naddr
= adapter
->params
.arch
.mps_tcam_size
;
1404 unsigned int rem
= naddr
;
1406 if (naddr
> max_naddr
)
1409 for (offset
= 0; offset
< (int)naddr
; /**/) {
1410 unsigned int fw_naddr
= (rem
< ARRAY_SIZE(cmd
.u
.exact
) ?
1411 rem
: ARRAY_SIZE(cmd
.u
.exact
));
1412 size_t len16
= DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd
,
1413 u
.exact
[fw_naddr
]), 16);
1414 struct fw_vi_mac_exact
*p
;
1417 memset(&cmd
, 0, sizeof(cmd
));
1418 cmd
.op_to_viid
= cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD
) |
1422 FW_VI_MAC_CMD_VIID_V(viid
));
1423 cmd
.freemacs_to_len16
=
1424 cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) |
1425 FW_CMD_LEN16_V(len16
));
1427 for (i
= 0, p
= cmd
.u
.exact
; i
< (int)fw_naddr
; i
++, p
++) {
1428 p
->valid_to_idx
= cpu_to_be16(
1429 FW_VI_MAC_CMD_VALID_F
|
1430 FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_MAC_BASED_FREE
));
1431 memcpy(p
->macaddr
, addr
[offset
+i
], sizeof(p
->macaddr
));
1434 ret
= t4vf_wr_mbox_core(adapter
, &cmd
, sizeof(cmd
), &cmd
,
1439 for (i
= 0, p
= cmd
.u
.exact
; i
< fw_naddr
; i
++, p
++) {
1440 u16 index
= FW_VI_MAC_CMD_IDX_G(
1441 be16_to_cpu(p
->valid_to_idx
));
1443 if (index
< max_naddr
)
1457 * t4vf_change_mac - modifies the exact-match filter for a MAC address
1458 * @adapter: the adapter
1459 * @viid: the Virtual Interface ID
1460 * @idx: index of existing filter for old value of MAC address, or -1
1461 * @addr: the new MAC address value
1462 * @persist: if idx < 0, the new MAC allocation should be persistent
1464 * Modifies an exact-match filter and sets it to the new MAC address.
1465 * Note that in general it is not possible to modify the value of a given
1466 * filter so the generic way to modify an address filter is to free the
1467 * one being used by the old address value and allocate a new filter for
1468 * the new address value. @idx can be -1 if the address is a new
1471 * Returns a negative error number or the index of the filter with the new
1474 int t4vf_change_mac(struct adapter
*adapter
, unsigned int viid
,
1475 int idx
, const u8
*addr
, bool persist
)
1478 struct fw_vi_mac_cmd cmd
, rpl
;
1479 struct fw_vi_mac_exact
*p
= &cmd
.u
.exact
[0];
1480 size_t len16
= DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd
,
1482 unsigned int max_mac_addr
= adapter
->params
.arch
.mps_tcam_size
;
1485 * If this is a new allocation, determine whether it should be
1486 * persistent (across a "freemacs" operation) or not.
1489 idx
= persist
? FW_VI_MAC_ADD_PERSIST_MAC
: FW_VI_MAC_ADD_MAC
;
1491 memset(&cmd
, 0, sizeof(cmd
));
1492 cmd
.op_to_viid
= cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD
) |
1495 FW_VI_MAC_CMD_VIID_V(viid
));
1496 cmd
.freemacs_to_len16
= cpu_to_be32(FW_CMD_LEN16_V(len16
));
1497 p
->valid_to_idx
= cpu_to_be16(FW_VI_MAC_CMD_VALID_F
|
1498 FW_VI_MAC_CMD_IDX_V(idx
));
1499 memcpy(p
->macaddr
, addr
, sizeof(p
->macaddr
));
1501 ret
= t4vf_wr_mbox(adapter
, &cmd
, sizeof(cmd
), &rpl
);
1503 p
= &rpl
.u
.exact
[0];
1504 ret
= FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p
->valid_to_idx
));
1505 if (ret
>= max_mac_addr
)
1512 * t4vf_set_addr_hash - program the MAC inexact-match hash filter
1513 * @adapter: the adapter
1514 * @viid: the Virtual Interface Identifier
1515 * @ucast: whether the hash filter should also match unicast addresses
1516 * @vec: the value to be written to the hash filter
1517 * @sleep_ok: call is allowed to sleep
1519 * Sets the 64-bit inexact-match hash filter for a virtual interface.
1521 int t4vf_set_addr_hash(struct adapter
*adapter
, unsigned int viid
,
1522 bool ucast
, u64 vec
, bool sleep_ok
)
1524 struct fw_vi_mac_cmd cmd
;
1525 size_t len16
= DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd
,
1528 memset(&cmd
, 0, sizeof(cmd
));
1529 cmd
.op_to_viid
= cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD
) |
1532 FW_VI_ENABLE_CMD_VIID_V(viid
));
1533 cmd
.freemacs_to_len16
= cpu_to_be32(FW_VI_MAC_CMD_HASHVECEN_F
|
1534 FW_VI_MAC_CMD_HASHUNIEN_V(ucast
) |
1535 FW_CMD_LEN16_V(len16
));
1536 cmd
.u
.hash
.hashvec
= cpu_to_be64(vec
);
1537 return t4vf_wr_mbox_core(adapter
, &cmd
, sizeof(cmd
), NULL
, sleep_ok
);
1541 * t4vf_get_port_stats - collect "port" statistics
1542 * @adapter: the adapter
1543 * @pidx: the port index
1544 * @s: the stats structure to fill
1546 * Collect statistics for the "port"'s Virtual Interface.
1548 int t4vf_get_port_stats(struct adapter
*adapter
, int pidx
,
1549 struct t4vf_port_stats
*s
)
1551 struct port_info
*pi
= adap2pinfo(adapter
, pidx
);
1552 struct fw_vi_stats_vf fwstats
;
1553 unsigned int rem
= VI_VF_NUM_STATS
;
1554 __be64
*fwsp
= (__be64
*)&fwstats
;
1557 * Grab the Virtual Interface statistics a chunk at a time via mailbox
1558 * commands. We could use a Work Request and get all of them at once
1559 * but that's an asynchronous interface which is awkward to use.
1562 unsigned int ix
= VI_VF_NUM_STATS
- rem
;
1563 unsigned int nstats
= min(6U, rem
);
1564 struct fw_vi_stats_cmd cmd
, rpl
;
1565 size_t len
= (offsetof(struct fw_vi_stats_cmd
, u
) +
1566 sizeof(struct fw_vi_stats_ctl
));
1567 size_t len16
= DIV_ROUND_UP(len
, 16);
1570 memset(&cmd
, 0, sizeof(cmd
));
1571 cmd
.op_to_viid
= cpu_to_be32(FW_CMD_OP_V(FW_VI_STATS_CMD
) |
1572 FW_VI_STATS_CMD_VIID_V(pi
->viid
) |
1575 cmd
.retval_len16
= cpu_to_be32(FW_CMD_LEN16_V(len16
));
1576 cmd
.u
.ctl
.nstats_ix
=
1577 cpu_to_be16(FW_VI_STATS_CMD_IX_V(ix
) |
1578 FW_VI_STATS_CMD_NSTATS_V(nstats
));
1579 ret
= t4vf_wr_mbox_ns(adapter
, &cmd
, len
, &rpl
);
1583 memcpy(fwsp
, &rpl
.u
.ctl
.stat0
, sizeof(__be64
) * nstats
);
1590 * Translate firmware statistics into host native statistics.
1592 s
->tx_bcast_bytes
= be64_to_cpu(fwstats
.tx_bcast_bytes
);
1593 s
->tx_bcast_frames
= be64_to_cpu(fwstats
.tx_bcast_frames
);
1594 s
->tx_mcast_bytes
= be64_to_cpu(fwstats
.tx_mcast_bytes
);
1595 s
->tx_mcast_frames
= be64_to_cpu(fwstats
.tx_mcast_frames
);
1596 s
->tx_ucast_bytes
= be64_to_cpu(fwstats
.tx_ucast_bytes
);
1597 s
->tx_ucast_frames
= be64_to_cpu(fwstats
.tx_ucast_frames
);
1598 s
->tx_drop_frames
= be64_to_cpu(fwstats
.tx_drop_frames
);
1599 s
->tx_offload_bytes
= be64_to_cpu(fwstats
.tx_offload_bytes
);
1600 s
->tx_offload_frames
= be64_to_cpu(fwstats
.tx_offload_frames
);
1602 s
->rx_bcast_bytes
= be64_to_cpu(fwstats
.rx_bcast_bytes
);
1603 s
->rx_bcast_frames
= be64_to_cpu(fwstats
.rx_bcast_frames
);
1604 s
->rx_mcast_bytes
= be64_to_cpu(fwstats
.rx_mcast_bytes
);
1605 s
->rx_mcast_frames
= be64_to_cpu(fwstats
.rx_mcast_frames
);
1606 s
->rx_ucast_bytes
= be64_to_cpu(fwstats
.rx_ucast_bytes
);
1607 s
->rx_ucast_frames
= be64_to_cpu(fwstats
.rx_ucast_frames
);
1609 s
->rx_err_frames
= be64_to_cpu(fwstats
.rx_err_frames
);
1615 * t4vf_iq_free - free an ingress queue and its free lists
1616 * @adapter: the adapter
1617 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
1618 * @iqid: ingress queue ID
1619 * @fl0id: FL0 queue ID or 0xffff if no attached FL0
1620 * @fl1id: FL1 queue ID or 0xffff if no attached FL1
1622 * Frees an ingress queue and its associated free lists, if any.
1624 int t4vf_iq_free(struct adapter
*adapter
, unsigned int iqtype
,
1625 unsigned int iqid
, unsigned int fl0id
, unsigned int fl1id
)
1627 struct fw_iq_cmd cmd
;
1629 memset(&cmd
, 0, sizeof(cmd
));
1630 cmd
.op_to_vfn
= cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD
) |
1633 cmd
.alloc_to_len16
= cpu_to_be32(FW_IQ_CMD_FREE_F
|
1635 cmd
.type_to_iqandstindex
=
1636 cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype
));
1638 cmd
.iqid
= cpu_to_be16(iqid
);
1639 cmd
.fl0id
= cpu_to_be16(fl0id
);
1640 cmd
.fl1id
= cpu_to_be16(fl1id
);
1641 return t4vf_wr_mbox(adapter
, &cmd
, sizeof(cmd
), NULL
);
1645 * t4vf_eth_eq_free - free an Ethernet egress queue
1646 * @adapter: the adapter
1647 * @eqid: egress queue ID
1649 * Frees an Ethernet egress queue.
1651 int t4vf_eth_eq_free(struct adapter
*adapter
, unsigned int eqid
)
1653 struct fw_eq_eth_cmd cmd
;
1655 memset(&cmd
, 0, sizeof(cmd
));
1656 cmd
.op_to_vfn
= cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD
) |
1659 cmd
.alloc_to_len16
= cpu_to_be32(FW_EQ_ETH_CMD_FREE_F
|
1661 cmd
.eqid_pkd
= cpu_to_be32(FW_EQ_ETH_CMD_EQID_V(eqid
));
1662 return t4vf_wr_mbox(adapter
, &cmd
, sizeof(cmd
), NULL
);
1666 * t4vf_handle_fw_rpl - process a firmware reply message
1667 * @adapter: the adapter
1668 * @rpl: start of the firmware message
1670 * Processes a firmware message, such as link state change messages.
1672 int t4vf_handle_fw_rpl(struct adapter
*adapter
, const __be64
*rpl
)
1674 const struct fw_cmd_hdr
*cmd_hdr
= (const struct fw_cmd_hdr
*)rpl
;
1675 u8 opcode
= FW_CMD_OP_G(be32_to_cpu(cmd_hdr
->hi
));
1680 * Link/module state change message.
1682 const struct fw_port_cmd
*port_cmd
=
1683 (const struct fw_port_cmd
*)rpl
;
1685 int action
, port_id
, link_ok
, speed
, fc
, pidx
;
1688 * Extract various fields from port status change message.
1690 action
= FW_PORT_CMD_ACTION_G(
1691 be32_to_cpu(port_cmd
->action_to_len16
));
1692 if (action
!= FW_PORT_ACTION_GET_PORT_INFO
) {
1693 dev_err(adapter
->pdev_dev
,
1694 "Unknown firmware PORT reply action %x\n",
1699 port_id
= FW_PORT_CMD_PORTID_G(
1700 be32_to_cpu(port_cmd
->op_to_portid
));
1702 stat
= be32_to_cpu(port_cmd
->u
.info
.lstatus_to_modtype
);
1703 link_ok
= (stat
& FW_PORT_CMD_LSTATUS_F
) != 0;
1706 if (stat
& FW_PORT_CMD_RXPAUSE_F
)
1708 if (stat
& FW_PORT_CMD_TXPAUSE_F
)
1710 if (stat
& FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M
))
1712 else if (stat
& FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G
))
1714 else if (stat
& FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G
))
1716 else if (stat
& FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G
))
1718 else if (stat
& FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G
))
1720 else if (stat
& FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G
))
1724 * Scan all of our "ports" (Virtual Interfaces) looking for
1725 * those bound to the physical port which has changed. If
1726 * our recorded state doesn't match the current state,
1727 * signal that change to the OS code.
1729 for_each_port(adapter
, pidx
) {
1730 struct port_info
*pi
= adap2pinfo(adapter
, pidx
);
1731 struct link_config
*lc
;
1733 if (pi
->port_id
!= port_id
)
1738 mod
= FW_PORT_CMD_MODTYPE_G(stat
);
1739 if (mod
!= pi
->mod_type
) {
1741 t4vf_os_portmod_changed(adapter
, pidx
);
1744 if (link_ok
!= lc
->link_ok
|| speed
!= lc
->speed
||
1746 /* something changed */
1747 lc
->link_ok
= link_ok
;
1751 be16_to_cpu(port_cmd
->u
.info
.pcap
);
1752 lc
->lp_advertising
=
1753 be16_to_cpu(port_cmd
->u
.info
.lpacap
);
1754 t4vf_os_link_changed(adapter
, pidx
, link_ok
);
1761 dev_err(adapter
->pdev_dev
, "Unknown firmware reply %X\n",
1769 int t4vf_prep_adapter(struct adapter
*adapter
)
1772 unsigned int chipid
;
1774 /* Wait for the device to become ready before proceeding ...
1776 err
= t4vf_wait_dev_ready(adapter
);
1780 /* Default port and clock for debugging in case we can't reach
1783 adapter
->params
.nports
= 1;
1784 adapter
->params
.vfres
.pmask
= 1;
1785 adapter
->params
.vpd
.cclk
= 50000;
1787 adapter
->params
.chip
= 0;
1788 switch (CHELSIO_PCI_ID_VER(adapter
->pdev
->device
)) {
1790 adapter
->params
.chip
|= CHELSIO_CHIP_CODE(CHELSIO_T4
, 0);
1791 adapter
->params
.arch
.sge_fl_db
= DBPRIO_F
;
1792 adapter
->params
.arch
.mps_tcam_size
=
1793 NUM_MPS_CLS_SRAM_L_INSTANCES
;
1797 chipid
= REV_G(t4_read_reg(adapter
, PL_VF_REV_A
));
1798 adapter
->params
.chip
|= CHELSIO_CHIP_CODE(CHELSIO_T5
, chipid
);
1799 adapter
->params
.arch
.sge_fl_db
= DBPRIO_F
| DBTYPE_F
;
1800 adapter
->params
.arch
.mps_tcam_size
=
1801 NUM_MPS_T5_CLS_SRAM_L_INSTANCES
;
1805 chipid
= REV_G(t4_read_reg(adapter
, PL_VF_REV_A
));
1806 adapter
->params
.chip
|= CHELSIO_CHIP_CODE(CHELSIO_T6
, chipid
);
1807 adapter
->params
.arch
.sge_fl_db
= 0;
1808 adapter
->params
.arch
.mps_tcam_size
=
1809 NUM_MPS_T5_CLS_SRAM_L_INSTANCES
;