1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
7 /* Interrupt Throttling and Rate Limiting Goodies */
8 #define IAVF_DEFAULT_IRQ_WORK 256
10 /* The datasheet for the X710 and XL710 indicate that the maximum value for
11 * the ITR is 8160usec which is then called out as 0xFF0 with a 2usec
12 * resolution. 8160 is 0x1FE0 when written out in hex. So instead of storing
13 * the register value which is divided by 2 lets use the actual values and
14 * avoid an excessive amount of translation.
16 #define IAVF_ITR_DYNAMIC 0x8000 /* use top bit as a flag */
17 #define IAVF_ITR_MASK 0x1FFE /* mask for ITR register value */
18 #define IAVF_ITR_100K 10 /* all values below must be even */
19 #define IAVF_ITR_50K 20
20 #define IAVF_ITR_20K 50
21 #define IAVF_ITR_18K 60
22 #define IAVF_ITR_8K 122
23 #define IAVF_MAX_ITR 8160 /* maximum value as per datasheet */
24 #define ITR_TO_REG(setting) ((setting) & ~IAVF_ITR_DYNAMIC)
25 #define ITR_REG_ALIGN(setting) __ALIGN_MASK(setting, ~IAVF_ITR_MASK)
26 #define ITR_IS_DYNAMIC(setting) (!!((setting) & IAVF_ITR_DYNAMIC))
28 #define IAVF_ITR_RX_DEF (IAVF_ITR_20K | IAVF_ITR_DYNAMIC)
29 #define IAVF_ITR_TX_DEF (IAVF_ITR_20K | IAVF_ITR_DYNAMIC)
31 /* 0x40 is the enable bit for interrupt rate limiting, and must be set if
32 * the value of the rate limit is non-zero
34 #define INTRL_ENA BIT(6)
35 #define IAVF_MAX_INTRL 0x3B /* reg uses 4 usec resolution */
36 #define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2)
37 #define INTRL_USEC_TO_REG(set) ((set) ? ((set) >> 2) | INTRL_ENA : 0)
38 #define IAVF_INTRL_8K 125 /* 8000 ints/sec */
39 #define IAVF_INTRL_62K 16 /* 62500 ints/sec */
40 #define IAVF_INTRL_83K 12 /* 83333 ints/sec */
42 #define IAVF_QUEUE_END_OF_LIST 0x7FF
44 /* this enum matches hardware bits and is meant to be used by DYN_CTLN
45 * registers and QINT registers or more generally anywhere in the manual
46 * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
47 * register but instead is a special value meaning "don't update" ITR0/1/2.
53 IAVF_ITR_NONE
= 3 /* ITR_NONE must not be used as an index */
56 /* these are indexes into ITRN registers */
57 #define IAVF_RX_ITR IAVF_IDX_ITR0
58 #define IAVF_TX_ITR IAVF_IDX_ITR1
59 #define IAVF_PE_ITR IAVF_IDX_ITR2
61 /* Supported RSS offloads */
62 #define IAVF_DEFAULT_RSS_HENA ( \
63 BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_UDP) | \
64 BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
65 BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_TCP) | \
66 BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
67 BIT_ULL(IAVF_FILTER_PCTYPE_FRAG_IPV4) | \
68 BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_UDP) | \
69 BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_TCP) | \
70 BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
71 BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
72 BIT_ULL(IAVF_FILTER_PCTYPE_FRAG_IPV6) | \
73 BIT_ULL(IAVF_FILTER_PCTYPE_L2_PAYLOAD))
75 #define IAVF_DEFAULT_RSS_HENA_EXPANDED (IAVF_DEFAULT_RSS_HENA | \
76 BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
77 BIT_ULL(IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
78 BIT_ULL(IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
79 BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
80 BIT_ULL(IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
81 BIT_ULL(IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
83 /* Supported Rx Buffer Sizes (a multiple of 128) */
84 #define IAVF_RXBUFFER_256 256
85 #define IAVF_RXBUFFER_1536 1536 /* 128B aligned standard Ethernet frame */
86 #define IAVF_RXBUFFER_2048 2048
87 #define IAVF_RXBUFFER_3072 3072 /* Used for large frames w/ padding */
88 #define IAVF_MAX_RXBUFFER 9728 /* largest size for single descriptor */
90 /* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
91 * reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
92 * this adds up to 512 bytes of extra data meaning the smallest allocation
93 * we could have is 1K.
94 * i.e. RXBUFFER_256 --> 960 byte skb (size-1024 slab)
95 * i.e. RXBUFFER_512 --> 1216 byte skb (size-2048 slab)
97 #define IAVF_RX_HDR_SIZE IAVF_RXBUFFER_256
98 #define IAVF_PACKET_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
99 #define iavf_rx_desc iavf_32byte_rx_desc
101 #define IAVF_RX_DMA_ATTR \
102 (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
104 /* Attempt to maximize the headroom available for incoming frames. We
105 * use a 2K buffer for receives and need 1536/1534 to store the data for
106 * the frame. This leaves us with 512 bytes of room. From that we need
107 * to deduct the space needed for the shared info and the padding needed
108 * to IP align the frame.
110 * Note: For cache line sizes 256 or larger this value is going to end
111 * up negative. In these cases we should fall back to the legacy
114 #if (PAGE_SIZE < 8192)
115 #define IAVF_2K_TOO_SMALL_WITH_PADDING \
116 ((NET_SKB_PAD + IAVF_RXBUFFER_1536) > SKB_WITH_OVERHEAD(IAVF_RXBUFFER_2048))
118 static inline int iavf_compute_pad(int rx_buf_len
)
120 int page_size
, pad_size
;
122 page_size
= ALIGN(rx_buf_len
, PAGE_SIZE
/ 2);
123 pad_size
= SKB_WITH_OVERHEAD(page_size
) - rx_buf_len
;
128 static inline int iavf_skb_pad(void)
132 /* If a 2K buffer cannot handle a standard Ethernet frame then
133 * optimize padding for a 3K buffer instead of a 1.5K buffer.
135 * For a 3K buffer we need to add enough padding to allow for
136 * tailroom due to NET_IP_ALIGN possibly shifting us out of
137 * cache-line alignment.
139 if (IAVF_2K_TOO_SMALL_WITH_PADDING
)
140 rx_buf_len
= IAVF_RXBUFFER_3072
+ SKB_DATA_ALIGN(NET_IP_ALIGN
);
142 rx_buf_len
= IAVF_RXBUFFER_1536
;
144 /* if needed make room for NET_IP_ALIGN */
145 rx_buf_len
-= NET_IP_ALIGN
;
147 return iavf_compute_pad(rx_buf_len
);
150 #define IAVF_SKB_PAD iavf_skb_pad()
152 #define IAVF_2K_TOO_SMALL_WITH_PADDING false
153 #define IAVF_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
157 * iavf_test_staterr - tests bits in Rx descriptor status and error fields
158 * @rx_desc: pointer to receive descriptor (in le64 format)
159 * @stat_err_bits: value to mask
161 * This function does some fast chicanery in order to return the
162 * value of the mask which is really only used for boolean tests.
163 * The status_error_len doesn't need to be shifted because it begins
166 static inline bool iavf_test_staterr(union iavf_rx_desc
*rx_desc
,
167 const u64 stat_err_bits
)
169 return !!(rx_desc
->wb
.qword1
.status_error_len
&
170 cpu_to_le64(stat_err_bits
));
173 /* How many Rx Buffers do we bundle into one write to the hardware ? */
174 #define IAVF_RX_INCREMENT(r, i) \
177 if ((i) == (r)->count) \
179 r->next_to_clean = i; \
182 #define IAVF_RX_NEXT_DESC(r, i, n) \
185 if ((i) == (r)->count) \
187 (n) = IAVF_RX_DESC((r), (i)); \
190 #define IAVF_RX_NEXT_DESC_PREFETCH(r, i, n) \
192 IAVF_RX_NEXT_DESC((r), (i), (n)); \
196 #define IAVF_MAX_BUFFER_TXD 8
197 #define IAVF_MIN_TX_LEN 17
199 /* The size limit for a transmit buffer in a descriptor is (16K - 1).
200 * In order to align with the read requests we will align the value to
201 * the nearest 4K which represents our maximum read request size.
203 #define IAVF_MAX_READ_REQ_SIZE 4096
204 #define IAVF_MAX_DATA_PER_TXD (16 * 1024 - 1)
205 #define IAVF_MAX_DATA_PER_TXD_ALIGNED \
206 (IAVF_MAX_DATA_PER_TXD & ~(IAVF_MAX_READ_REQ_SIZE - 1))
209 * iavf_txd_use_count - estimate the number of descriptors needed for Tx
210 * @size: transmit request size in bytes
212 * Due to hardware alignment restrictions (4K alignment), we need to
213 * assume that we can have no more than 12K of data per descriptor, even
214 * though each descriptor can take up to 16K - 1 bytes of aligned memory.
215 * Thus, we need to divide by 12K. But division is slow! Instead,
216 * we decompose the operation into shifts and one relatively cheap
217 * multiply operation.
219 * To divide by 12K, we first divide by 4K, then divide by 3:
220 * To divide by 4K, shift right by 12 bits
221 * To divide by 3, multiply by 85, then divide by 256
222 * (Divide by 256 is done by shifting right by 8 bits)
223 * Finally, we add one to round up. Because 256 isn't an exact multiple of
224 * 3, we'll underestimate near each multiple of 12K. This is actually more
225 * accurate as we have 4K - 1 of wiggle room that we can fit into the last
226 * segment. For our purposes this is accurate out to 1M which is orders of
227 * magnitude greater than our largest possible GSO size.
229 * This would then be implemented as:
230 * return (((size >> 12) * 85) >> 8) + 1;
232 * Since multiplication and division are commutative, we can reorder
234 * return ((size * 85) >> 20) + 1;
236 static inline unsigned int iavf_txd_use_count(unsigned int size
)
238 return ((size
* 85) >> 20) + 1;
241 /* Tx Descriptors needed, worst case */
242 #define DESC_NEEDED (MAX_SKB_FRAGS + 6)
243 #define IAVF_MIN_DESC_PENDING 4
245 #define IAVF_TX_FLAGS_HW_VLAN BIT(1)
246 #define IAVF_TX_FLAGS_SW_VLAN BIT(2)
247 #define IAVF_TX_FLAGS_TSO BIT(3)
248 #define IAVF_TX_FLAGS_IPV4 BIT(4)
249 #define IAVF_TX_FLAGS_IPV6 BIT(5)
250 #define IAVF_TX_FLAGS_FCCRC BIT(6)
251 #define IAVF_TX_FLAGS_FSO BIT(7)
252 #define IAVF_TX_FLAGS_FD_SB BIT(9)
253 #define IAVF_TX_FLAGS_VXLAN_TUNNEL BIT(10)
254 #define IAVF_TX_FLAGS_HW_OUTER_SINGLE_VLAN BIT(11)
255 #define IAVF_TX_FLAGS_VLAN_MASK 0xffff0000
256 #define IAVF_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
257 #define IAVF_TX_FLAGS_VLAN_PRIO_SHIFT 29
258 #define IAVF_TX_FLAGS_VLAN_SHIFT 16
260 struct iavf_tx_buffer
{
261 struct iavf_tx_desc
*next_to_watch
;
266 unsigned int bytecount
;
267 unsigned short gso_segs
;
269 DEFINE_DMA_UNMAP_ADDR(dma
);
270 DEFINE_DMA_UNMAP_LEN(len
);
274 struct iavf_rx_buffer
{
277 #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
285 struct iavf_queue_stats
{
290 struct iavf_tx_queue_stats
{
297 u64 tx_lost_interrupt
;
300 struct iavf_rx_queue_stats
{
302 u64 alloc_page_failed
;
303 u64 alloc_buff_failed
;
304 u64 page_reuse_count
;
308 enum iavf_ring_state_t
{
309 __IAVF_TX_FDIR_INIT_DONE
,
310 __IAVF_TX_XPS_INIT_DONE
,
311 __IAVF_RING_STATE_NBITS
/* must be last */
314 /* some useful defines for virtchannel interface, which
315 * is the only remaining user of header split
317 #define IAVF_RX_DTYPE_NO_SPLIT 0
318 #define IAVF_RX_DTYPE_HEADER_SPLIT 1
319 #define IAVF_RX_DTYPE_SPLIT_ALWAYS 2
320 #define IAVF_RX_SPLIT_L2 0x1
321 #define IAVF_RX_SPLIT_IP 0x2
322 #define IAVF_RX_SPLIT_TCP_UDP 0x4
323 #define IAVF_RX_SPLIT_SCTP 0x8
325 /* struct that defines a descriptor ring, associated with a VSI */
327 struct iavf_ring
*next
; /* pointer to next ring in q_vector */
328 void *desc
; /* Descriptor ring memory */
329 struct device
*dev
; /* Used for DMA mapping */
330 struct net_device
*netdev
; /* netdev ring maps to */
332 struct iavf_tx_buffer
*tx_bi
;
333 struct iavf_rx_buffer
*rx_bi
;
335 DECLARE_BITMAP(state
, __IAVF_RING_STATE_NBITS
);
336 u16 queue_index
; /* Queue number of ring */
337 u8 dcb_tc
; /* Traffic class of ring */
340 /* high bit set means dynamic, use accessors routines to read/write.
341 * hardware only supports 2us resolution for the ITR registers.
342 * these values always store the USER setting, and must be converted
343 * before programming to a register.
347 u16 count
; /* Number of descriptors */
348 u16 reg_idx
; /* HW register index of the ring */
351 /* used in interrupt processing */
358 bool ring_active
; /* is ring online or not */
359 bool arm_wb
; /* do something to arm write back */
363 #define IAVF_TXR_FLAGS_WB_ON_ITR BIT(0)
364 #define IAVF_RXR_FLAGS_BUILD_SKB_ENABLED BIT(1)
365 #define IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 BIT(3)
366 #define IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2 BIT(4)
367 #define IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2 BIT(5)
370 struct iavf_queue_stats stats
;
371 struct u64_stats_sync syncp
;
373 struct iavf_tx_queue_stats tx_stats
;
374 struct iavf_rx_queue_stats rx_stats
;
377 unsigned int size
; /* length of descriptor ring in bytes */
378 dma_addr_t dma
; /* physical address of ring */
380 struct iavf_vsi
*vsi
; /* Backreference to associated VSI */
381 struct iavf_q_vector
*q_vector
; /* Backreference to associated vector */
383 struct rcu_head rcu
; /* to avoid race on free */
385 struct sk_buff
*skb
; /* When iavf_clean_rx_ring_irq() must
386 * return before it sees the EOP for
387 * the current packet, we save that skb
388 * here and resume receiving this
389 * packet the next time
390 * iavf_clean_rx_ring_irq() is called
393 } ____cacheline_internodealigned_in_smp
;
395 static inline bool ring_uses_build_skb(struct iavf_ring
*ring
)
397 return !!(ring
->flags
& IAVF_RXR_FLAGS_BUILD_SKB_ENABLED
);
400 static inline void set_ring_build_skb_enabled(struct iavf_ring
*ring
)
402 ring
->flags
|= IAVF_RXR_FLAGS_BUILD_SKB_ENABLED
;
405 static inline void clear_ring_build_skb_enabled(struct iavf_ring
*ring
)
407 ring
->flags
&= ~IAVF_RXR_FLAGS_BUILD_SKB_ENABLED
;
410 #define IAVF_ITR_ADAPTIVE_MIN_INC 0x0002
411 #define IAVF_ITR_ADAPTIVE_MIN_USECS 0x0002
412 #define IAVF_ITR_ADAPTIVE_MAX_USECS 0x007e
413 #define IAVF_ITR_ADAPTIVE_LATENCY 0x8000
414 #define IAVF_ITR_ADAPTIVE_BULK 0x0000
415 #define ITR_IS_BULK(x) (!((x) & IAVF_ITR_ADAPTIVE_LATENCY))
417 struct iavf_ring_container
{
418 struct iavf_ring
*ring
; /* pointer to linked list of ring(s) */
419 unsigned long next_update
; /* jiffies value of next update */
420 unsigned int total_bytes
; /* total bytes processed this int */
421 unsigned int total_packets
; /* total packets processed this int */
423 u16 target_itr
; /* target ITR setting for ring(s) */
424 u16 current_itr
; /* current ITR setting for ring(s) */
427 /* iterator for handling rings in ring container */
428 #define iavf_for_each_ring(pos, head) \
429 for (pos = (head).ring; pos != NULL; pos = pos->next)
431 static inline unsigned int iavf_rx_pg_order(struct iavf_ring
*ring
)
433 #if (PAGE_SIZE < 8192)
434 if (ring
->rx_buf_len
> (PAGE_SIZE
/ 2))
440 #define iavf_rx_pg_size(_ring) (PAGE_SIZE << iavf_rx_pg_order(_ring))
442 bool iavf_alloc_rx_buffers(struct iavf_ring
*rxr
, u16 cleaned_count
);
443 netdev_tx_t
iavf_xmit_frame(struct sk_buff
*skb
, struct net_device
*netdev
);
444 int iavf_setup_tx_descriptors(struct iavf_ring
*tx_ring
);
445 int iavf_setup_rx_descriptors(struct iavf_ring
*rx_ring
);
446 void iavf_free_tx_resources(struct iavf_ring
*tx_ring
);
447 void iavf_free_rx_resources(struct iavf_ring
*rx_ring
);
448 int iavf_napi_poll(struct napi_struct
*napi
, int budget
);
449 void iavf_detect_recover_hung(struct iavf_vsi
*vsi
);
450 int __iavf_maybe_stop_tx(struct iavf_ring
*tx_ring
, int size
);
451 bool __iavf_chk_linearize(struct sk_buff
*skb
);
454 * iavf_xmit_descriptor_count - calculate number of Tx descriptors needed
457 * Returns number of data descriptors needed for this skb. Returns 0 to indicate
458 * there is not enough descriptors available in this ring since we need at least
461 static inline int iavf_xmit_descriptor_count(struct sk_buff
*skb
)
463 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[0];
464 unsigned int nr_frags
= skb_shinfo(skb
)->nr_frags
;
465 int count
= 0, size
= skb_headlen(skb
);
468 count
+= iavf_txd_use_count(size
);
473 size
= skb_frag_size(frag
++);
480 * iavf_maybe_stop_tx - 1st level check for Tx stop conditions
481 * @tx_ring: the ring to be checked
482 * @size: the size buffer we want to assure is available
484 * Returns 0 if stop is not needed
486 static inline int iavf_maybe_stop_tx(struct iavf_ring
*tx_ring
, int size
)
488 if (likely(IAVF_DESC_UNUSED(tx_ring
) >= size
))
490 return __iavf_maybe_stop_tx(tx_ring
, size
);
494 * iavf_chk_linearize - Check if there are more than 8 fragments per packet
496 * @count: number of buffers used
498 * Note: Our HW can't scatter-gather more than 8 fragments to build
499 * a packet on the wire and so we need to figure out the cases where we
500 * need to linearize the skb.
502 static inline bool iavf_chk_linearize(struct sk_buff
*skb
, int count
)
504 /* Both TSO and single send will work if count is less than 8 */
505 if (likely(count
< IAVF_MAX_BUFFER_TXD
))
509 return __iavf_chk_linearize(skb
);
511 /* we can support up to 8 data buffers for a single send */
512 return count
!= IAVF_MAX_BUFFER_TXD
;
515 * txring_txq - helper to convert from a ring to a queue
516 * @ring: Tx ring to find the netdev equivalent of
518 static inline struct netdev_queue
*txring_txq(const struct iavf_ring
*ring
)
520 return netdev_get_tx_queue(ring
->netdev
, ring
->queue_index
);
522 #endif /* _IAVF_TXRX_H_ */