1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2018, Intel Corporation. */
9 #define ICE_DFLT_IRQ_WORK 256
10 #define ICE_RXBUF_3072 3072
11 #define ICE_RXBUF_2048 2048
12 #define ICE_RXBUF_1664 1664
13 #define ICE_RXBUF_1536 1536
14 #define ICE_MAX_CHAINED_RX_BUFS 5
15 #define ICE_MAX_BUF_TXD 8
16 #define ICE_MIN_TX_LEN 17
17 #define ICE_MAX_FRAME_LEGACY_RX 8320
19 /* The size limit for a transmit buffer in a descriptor is (16K - 1).
20 * In order to align with the read requests we will align the value to
21 * the nearest 4K which represents our maximum read request size.
23 #define ICE_MAX_READ_REQ_SIZE 4096
24 #define ICE_MAX_DATA_PER_TXD (16 * 1024 - 1)
25 #define ICE_MAX_DATA_PER_TXD_ALIGNED \
26 (~(ICE_MAX_READ_REQ_SIZE - 1) & ICE_MAX_DATA_PER_TXD)
28 #define ICE_MAX_TXQ_PER_TXQG 128
30 /* Attempt to maximize the headroom available for incoming frames. We use a 2K
31 * buffer for MTUs <= 1500 and need 1536/1534 to store the data for the frame.
32 * This leaves us with 512 bytes of room. From that we need to deduct the
33 * space needed for the shared info and the padding needed to IP align the
36 * Note: For cache line sizes 256 or larger this value is going to end
37 * up negative. In these cases we should fall back to the legacy
40 #if (PAGE_SIZE < 8192)
41 #define ICE_2K_TOO_SMALL_WITH_PADDING \
42 ((unsigned int)(NET_SKB_PAD + ICE_RXBUF_1536) > \
43 SKB_WITH_OVERHEAD(ICE_RXBUF_2048))
46 * ice_compute_pad - compute the padding
47 * @rx_buf_len: buffer length
49 * Figure out the size of half page based on given buffer length and
50 * then subtract the skb_shared_info followed by subtraction of the
51 * actual buffer length; this in turn results in the actual space that
52 * is left for padding usage
54 static inline int ice_compute_pad(int rx_buf_len
)
58 half_page_size
= ALIGN(rx_buf_len
, PAGE_SIZE
/ 2);
59 return SKB_WITH_OVERHEAD(half_page_size
) - rx_buf_len
;
63 * ice_skb_pad - determine the padding that we can supply
65 * Figure out the right Rx buffer size and based on that calculate the
68 static inline int ice_skb_pad(void)
72 /* If a 2K buffer cannot handle a standard Ethernet frame then
73 * optimize padding for a 3K buffer instead of a 1.5K buffer.
75 * For a 3K buffer we need to add enough padding to allow for
76 * tailroom due to NET_IP_ALIGN possibly shifting us out of
77 * cache-line alignment.
79 if (ICE_2K_TOO_SMALL_WITH_PADDING
)
80 rx_buf_len
= ICE_RXBUF_3072
+ SKB_DATA_ALIGN(NET_IP_ALIGN
);
82 rx_buf_len
= ICE_RXBUF_1536
;
84 /* if needed make room for NET_IP_ALIGN */
85 rx_buf_len
-= NET_IP_ALIGN
;
87 return ice_compute_pad(rx_buf_len
);
90 #define ICE_SKB_PAD ice_skb_pad()
92 #define ICE_2K_TOO_SMALL_WITH_PADDING false
93 #define ICE_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
96 /* We are assuming that the cache line is always 64 Bytes here for ice.
97 * In order to make sure that is a correct assumption there is a check in probe
98 * to print a warning if the read from GLPCI_CNF2 tells us that the cache line
99 * size is 128 bytes. We do it this way because we do not want to read the
100 * GLPCI_CNF2 register or a variable containing the value on every pass through
103 #define ICE_CACHE_LINE_BYTES 64
104 #define ICE_DESCS_PER_CACHE_LINE (ICE_CACHE_LINE_BYTES / \
105 sizeof(struct ice_tx_desc))
106 #define ICE_DESCS_FOR_CTX_DESC 1
107 #define ICE_DESCS_FOR_SKB_DATA_PTR 1
108 /* Tx descriptors needed, worst case */
109 #define DESC_NEEDED (MAX_SKB_FRAGS + ICE_DESCS_FOR_CTX_DESC + \
110 ICE_DESCS_PER_CACHE_LINE + ICE_DESCS_FOR_SKB_DATA_PTR)
111 #define ICE_DESC_UNUSED(R) \
112 (u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
113 (R)->next_to_clean - (R)->next_to_use - 1)
115 #define ICE_RX_DESC_UNUSED(R) \
116 ((((R)->first_desc > (R)->next_to_use) ? 0 : (R)->count) + \
117 (R)->first_desc - (R)->next_to_use - 1)
119 #define ICE_RING_QUARTER(R) ((R)->count >> 2)
121 #define ICE_TX_FLAGS_TSO BIT(0)
122 #define ICE_TX_FLAGS_HW_VLAN BIT(1)
123 #define ICE_TX_FLAGS_SW_VLAN BIT(2)
124 /* Free, was ICE_TX_FLAGS_DUMMY_PKT */
125 #define ICE_TX_FLAGS_TSYN BIT(4)
126 #define ICE_TX_FLAGS_IPV4 BIT(5)
127 #define ICE_TX_FLAGS_IPV6 BIT(6)
128 #define ICE_TX_FLAGS_TUNNEL BIT(7)
129 #define ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN BIT(8)
131 #define ICE_XDP_PASS 0
132 #define ICE_XDP_CONSUMED BIT(0)
133 #define ICE_XDP_TX BIT(1)
134 #define ICE_XDP_REDIR BIT(2)
135 #define ICE_XDP_EXIT BIT(3)
136 #define ICE_SKB_CONSUMED ICE_XDP_CONSUMED
138 #define ICE_RX_DMA_ATTR \
139 (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
141 #define ICE_ETH_PKT_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
143 #define ICE_TXD_LAST_DESC_CMD (ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS)
146 * enum ice_tx_buf_type - type of &ice_tx_buf to act on Tx completion
147 * @ICE_TX_BUF_EMPTY: unused OR XSk frame, no action required
148 * @ICE_TX_BUF_DUMMY: dummy Flow Director packet, unmap and kfree()
149 * @ICE_TX_BUF_FRAG: mapped skb OR &xdp_buff frag, only unmap DMA
150 * @ICE_TX_BUF_SKB: &sk_buff, unmap and consume_skb(), update stats
151 * @ICE_TX_BUF_XDP_TX: &xdp_buff, unmap and page_frag_free(), stats
152 * @ICE_TX_BUF_XDP_XMIT: &xdp_frame, unmap and xdp_return_frame(), stats
153 * @ICE_TX_BUF_XSK_TX: &xdp_buff on XSk queue, xsk_buff_free(), stats
155 enum ice_tx_buf_type
{
156 ICE_TX_BUF_EMPTY
= 0U,
167 struct ice_tx_desc
*next_to_watch
;
171 void *raw_buf
; /* used for XDP_TX and FDir rules */
172 struct sk_buff
*skb
; /* used for .ndo_start_xmit() */
173 struct xdp_frame
*xdpf
; /* used for .ndo_xdp_xmit() */
174 struct xdp_buff
*xdp
; /* used for XDP_TX ZC */
176 unsigned int bytecount
;
178 unsigned int gso_segs
;
179 unsigned int nr_frags
; /* used for mbuf XDP */
182 u32 type
:4; /* &ice_tx_buf_type */
184 DEFINE_DMA_UNMAP_LEN(len
);
185 DEFINE_DMA_UNMAP_ADDR(dma
);
188 struct ice_tx_offload_params
{
190 struct ice_tx_ring
*tx_ring
;
194 u32 cd_tunnel_params
;
202 unsigned int page_offset
;
205 unsigned int pagecnt_bias
;
213 struct ice_txq_stats
{
217 int prev_pkt
; /* negative if no pending Tx descriptors */
220 struct ice_rxq_stats
{
222 u64 alloc_page_failed
;
223 u64 alloc_buf_failed
;
226 struct ice_ring_stats
{
227 struct rcu_head rcu
; /* to avoid race on free */
228 struct ice_q_stats stats
;
229 struct u64_stats_sync syncp
;
231 struct ice_txq_stats tx_stats
;
232 struct ice_rxq_stats rx_stats
;
236 enum ice_ring_state_t
{
237 ICE_TX_XPS_INIT_DONE
,
241 /* this enum matches hardware bits and is meant to be used by DYN_CTLN
242 * registers and QINT registers or more generally anywhere in the manual
243 * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
244 * register but instead is a special value meaning "don't update" ITR0/1/2.
250 ICE_ITR_NONE
= 3 /* ITR_NONE must not be used as an index */
253 /* Header split modes defined by DTYPE field of Rx RLAN context */
255 ICE_RX_DTYPE_NO_SPLIT
= 0,
256 ICE_RX_DTYPE_HEADER_SPLIT
= 1,
257 ICE_RX_DTYPE_SPLIT_ALWAYS
= 2,
260 /* indices into GLINT_ITR registers */
261 #define ICE_RX_ITR ICE_IDX_ITR0
262 #define ICE_TX_ITR ICE_IDX_ITR1
263 #define ICE_ITR_8K 124
264 #define ICE_ITR_20K 50
265 #define ICE_ITR_MAX 8160 /* 0x1FE0 */
266 #define ICE_DFLT_TX_ITR ICE_ITR_20K
267 #define ICE_DFLT_RX_ITR ICE_ITR_20K
268 enum ice_dynamic_itr
{
273 #define ITR_IS_DYNAMIC(rc) ((rc)->itr_mode == ITR_DYNAMIC)
274 #define ICE_ITR_GRAN_S 1 /* ITR granularity is always 2us */
275 #define ICE_ITR_GRAN_US BIT(ICE_ITR_GRAN_S)
276 #define ICE_ITR_MASK 0x1FFE /* ITR register value alignment mask */
277 #define ITR_REG_ALIGN(setting) ((setting) & ICE_ITR_MASK)
279 #define ICE_DFLT_INTRL 0
280 #define ICE_MAX_INTRL 236
282 #define ICE_IN_WB_ON_ITR_MODE 255
283 /* Sets WB_ON_ITR and assumes INTENA bit is already cleared, which allows
284 * setting the MSK_M bit to tell hardware to ignore the INTENA_M bit. Also,
285 * set the write-back latency to the usecs passed in.
287 #define ICE_GLINT_DYN_CTL_WB_ON_ITR(usecs, itr_idx) \
288 ((((usecs) << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S)) & \
289 GLINT_DYN_CTL_INTERVAL_M) | \
290 (((itr_idx) << GLINT_DYN_CTL_ITR_INDX_S) & \
291 GLINT_DYN_CTL_ITR_INDX_M) | GLINT_DYN_CTL_INTENA_MSK_M | \
292 GLINT_DYN_CTL_WB_ON_ITR_M)
294 /* Legacy or Advanced Mode Queue */
295 #define ICE_TX_ADVANCED 0
296 #define ICE_TX_LEGACY 1
298 /* descriptor ring, associated with a VSI */
300 /* CL1 - 1st cacheline starts here */
301 struct ice_rx_ring
*next
; /* pointer to next ring in q_vector */
302 void *desc
; /* Descriptor ring memory */
303 struct device
*dev
; /* Used for DMA mapping */
304 struct net_device
*netdev
; /* netdev ring maps to */
305 struct ice_vsi
*vsi
; /* Backreference to associated VSI */
306 struct ice_q_vector
*q_vector
; /* Backreference to associated vector */
308 u16 q_index
; /* Queue number of ring */
310 u16 count
; /* Number of descriptors */
311 u16 reg_idx
; /* HW register index of the ring */
313 /* CL2 - 2nd cacheline starts here */
315 struct ice_rx_buf
*rx_buf
;
316 struct xdp_buff
**xdp_buf
;
319 /* CL3 - 3rd cacheline starts here */
320 struct bpf_prog
*xdp_prog
;
323 /* used in interrupt processing */
329 struct ice_ring_stats
*ring_stats
;
331 struct rcu_head rcu
; /* to avoid race on free */
332 /* CL4 - 4th cacheline starts here */
333 struct ice_channel
*ch
;
334 struct ice_tx_ring
*xdp_ring
;
335 struct xsk_buff_pool
*xsk_pool
;
336 dma_addr_t dma
; /* physical address of ring */
339 u8 dcb_tc
; /* Traffic class of ring */
341 #define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1)
342 #define ICE_RX_FLAGS_CRC_STRIP_DIS BIT(2)
344 /* CL5 - 5th cacheline starts here */
345 struct xdp_rxq_info xdp_rxq
;
346 } ____cacheline_internodealigned_in_smp
;
349 /* CL1 - 1st cacheline starts here */
350 struct ice_tx_ring
*next
; /* pointer to next ring in q_vector */
351 void *desc
; /* Descriptor ring memory */
352 struct device
*dev
; /* Used for DMA mapping */
354 struct ice_tx_buf
*tx_buf
;
355 struct ice_q_vector
*q_vector
; /* Backreference to associated vector */
356 struct net_device
*netdev
; /* netdev ring maps to */
357 struct ice_vsi
*vsi
; /* Backreference to associated VSI */
358 /* CL2 - 2nd cacheline starts here */
359 dma_addr_t dma
; /* physical address of ring */
360 struct xsk_buff_pool
*xsk_pool
;
363 u16 q_handle
; /* Queue handle per TC */
364 u16 reg_idx
; /* HW register index of the ring */
365 u16 count
; /* Number of descriptors */
366 u16 q_index
; /* Queue number of ring */
369 struct ice_ring_stats
*ring_stats
;
370 /* CL3 - 3rd cacheline starts here */
371 struct rcu_head rcu
; /* to avoid race on free */
372 DECLARE_BITMAP(xps_state
, ICE_TX_NBITS
); /* XPS Config State */
373 struct ice_channel
*ch
;
374 struct ice_ptp_tx
*tx_tstamps
;
376 u32 txq_teid
; /* Added Tx queue TEID */
377 /* CL4 - 4th cacheline starts here */
378 #define ICE_TX_FLAGS_RING_XDP BIT(0)
379 #define ICE_TX_FLAGS_RING_VLAN_L2TAG1 BIT(1)
380 #define ICE_TX_FLAGS_RING_VLAN_L2TAG2 BIT(2)
382 u8 dcb_tc
; /* Traffic class of ring */
383 } ____cacheline_internodealigned_in_smp
;
385 static inline bool ice_ring_uses_build_skb(struct ice_rx_ring
*ring
)
387 return !!(ring
->flags
& ICE_RX_FLAGS_RING_BUILD_SKB
);
390 static inline void ice_set_ring_build_skb_ena(struct ice_rx_ring
*ring
)
392 ring
->flags
|= ICE_RX_FLAGS_RING_BUILD_SKB
;
395 static inline void ice_clear_ring_build_skb_ena(struct ice_rx_ring
*ring
)
397 ring
->flags
&= ~ICE_RX_FLAGS_RING_BUILD_SKB
;
400 static inline bool ice_ring_ch_enabled(struct ice_tx_ring
*ring
)
405 static inline bool ice_ring_is_xdp(struct ice_tx_ring
*ring
)
407 return !!(ring
->flags
& ICE_TX_FLAGS_RING_XDP
);
410 enum ice_container_type
{
415 struct ice_ring_container
{
416 /* head of linked-list of rings */
418 struct ice_rx_ring
*rx_ring
;
419 struct ice_tx_ring
*tx_ring
;
421 struct dim dim
; /* data for net_dim algorithm */
422 u16 itr_idx
; /* index in the interrupt vector */
423 /* this matches the maximum number of ITR bits, but in usec
424 * values, so it is shifted left one bit (bit zero is ignored)
434 enum ice_container_type type
;
437 struct ice_coalesce_stored
{
445 /* iterator for handling rings in ring container */
446 #define ice_for_each_rx_ring(pos, head) \
447 for (pos = (head).rx_ring; pos; pos = pos->next)
449 #define ice_for_each_tx_ring(pos, head) \
450 for (pos = (head).tx_ring; pos; pos = pos->next)
452 static inline unsigned int ice_rx_pg_order(struct ice_rx_ring
*ring
)
454 #if (PAGE_SIZE < 8192)
455 if (ring
->rx_buf_len
> (PAGE_SIZE
/ 2))
461 #define ice_rx_pg_size(_ring) (PAGE_SIZE << ice_rx_pg_order(_ring))
463 union ice_32b_rx_flex_desc
;
465 bool ice_alloc_rx_bufs(struct ice_rx_ring
*rxr
, unsigned int cleaned_count
);
466 netdev_tx_t
ice_start_xmit(struct sk_buff
*skb
, struct net_device
*netdev
);
468 ice_select_queue(struct net_device
*dev
, struct sk_buff
*skb
,
469 struct net_device
*sb_dev
);
470 void ice_clean_tx_ring(struct ice_tx_ring
*tx_ring
);
471 void ice_clean_rx_ring(struct ice_rx_ring
*rx_ring
);
472 int ice_setup_tx_ring(struct ice_tx_ring
*tx_ring
);
473 int ice_setup_rx_ring(struct ice_rx_ring
*rx_ring
);
474 void ice_free_tx_ring(struct ice_tx_ring
*tx_ring
);
475 void ice_free_rx_ring(struct ice_rx_ring
*rx_ring
);
476 int ice_napi_poll(struct napi_struct
*napi
, int budget
);
478 ice_prgm_fdir_fltr(struct ice_vsi
*vsi
, struct ice_fltr_desc
*fdir_desc
,
480 int ice_clean_rx_irq(struct ice_rx_ring
*rx_ring
, int budget
);
481 void ice_clean_ctrl_tx_irq(struct ice_tx_ring
*tx_ring
);
482 #endif /* _ICE_TXRX_H_ */