]>
Commit | Line | Data |
---|---|---|
f5cedc84 CS |
1 | // SPDX-License-Identifier: (GPL-2.0 OR MIT) |
2 | /* Google virtual Ethernet (gve) driver | |
3 | * | |
dbdaa675 | 4 | * Copyright (C) 2015-2021 Google, Inc. |
f5cedc84 CS |
5 | */ |
6 | ||
7 | #include "gve.h" | |
8 | #include "gve_adminq.h" | |
dbdaa675 | 9 | #include "gve_utils.h" |
f5cedc84 | 10 | #include <linux/etherdevice.h> |
75eaae15 PK |
11 | #include <linux/filter.h> |
12 | #include <net/xdp.h> | |
fd8e4032 | 13 | #include <net/xdp_sock_drv.h> |
f5cedc84 | 14 | |
ede3fcf5 CS |
15 | static void gve_rx_free_buffer(struct device *dev, |
16 | struct gve_rx_slot_page_info *page_info, | |
17 | union gve_rx_data_slot *data_slot) | |
18 | { | |
19 | dma_addr_t dma = (dma_addr_t)(be64_to_cpu(data_slot->addr) & | |
20 | GVE_DATA_SLOT_ADDR_PAGE_MASK); | |
21 | ||
58401b2a | 22 | page_ref_sub(page_info->page, page_info->pagecnt_bias - 1); |
ede3fcf5 CS |
23 | gve_free_page(dev, page_info->page, dma, DMA_FROM_DEVICE); |
24 | } | |
25 | ||
26 | static void gve_rx_unfill_pages(struct gve_priv *priv, struct gve_rx_ring *rx) | |
27 | { | |
58401b2a CS |
28 | u32 slots = rx->mask + 1; |
29 | int i; | |
ede3fcf5 | 30 | |
58401b2a | 31 | if (rx->data.raw_addressing) { |
ede3fcf5 CS |
32 | for (i = 0; i < slots; i++) |
33 | gve_rx_free_buffer(&priv->pdev->dev, &rx->data.page_info[i], | |
34 | &rx->data.data_ring[i]); | |
35 | } else { | |
58401b2a CS |
36 | for (i = 0; i < slots; i++) |
37 | page_ref_sub(rx->data.page_info[i].page, | |
38 | rx->data.page_info[i].pagecnt_bias - 1); | |
ede3fcf5 CS |
39 | gve_unassign_qpl(priv, rx->data.qpl->id); |
40 | rx->data.qpl = NULL; | |
82fd151d SC |
41 | |
42 | for (i = 0; i < rx->qpl_copy_pool_mask + 1; i++) { | |
43 | page_ref_sub(rx->qpl_copy_pool[i].page, | |
44 | rx->qpl_copy_pool[i].pagecnt_bias - 1); | |
45 | put_page(rx->qpl_copy_pool[i].page); | |
46 | } | |
ede3fcf5 CS |
47 | } |
48 | kvfree(rx->data.page_info); | |
49 | rx->data.page_info = NULL; | |
50 | } | |
51 | ||
f5cedc84 CS |
52 | static void gve_rx_free_ring(struct gve_priv *priv, int idx) |
53 | { | |
54 | struct gve_rx_ring *rx = &priv->rx[idx]; | |
55 | struct device *dev = &priv->pdev->dev; | |
ede3fcf5 | 56 | u32 slots = rx->mask + 1; |
f5cedc84 | 57 | size_t bytes; |
f5cedc84 CS |
58 | |
59 | gve_rx_remove_from_block(priv, idx); | |
60 | ||
61 | bytes = sizeof(struct gve_rx_desc) * priv->rx_desc_cnt; | |
62 | dma_free_coherent(dev, bytes, rx->desc.desc_ring, rx->desc.bus); | |
63 | rx->desc.desc_ring = NULL; | |
64 | ||
65 | dma_free_coherent(dev, sizeof(*rx->q_resources), | |
66 | rx->q_resources, rx->q_resources_bus); | |
67 | rx->q_resources = NULL; | |
68 | ||
ede3fcf5 | 69 | gve_rx_unfill_pages(priv, rx); |
f5cedc84 | 70 | |
f5cedc84 CS |
71 | bytes = sizeof(*rx->data.data_ring) * slots; |
72 | dma_free_coherent(dev, bytes, rx->data.data_ring, | |
73 | rx->data.data_bus); | |
74 | rx->data.data_ring = NULL; | |
82fd151d SC |
75 | |
76 | kvfree(rx->qpl_copy_pool); | |
77 | rx->qpl_copy_pool = NULL; | |
78 | ||
f5cedc84 CS |
79 | netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx); |
80 | } | |
81 | ||
82 | static void gve_setup_rx_buffer(struct gve_rx_slot_page_info *page_info, | |
ede3fcf5 | 83 | dma_addr_t addr, struct page *page, __be64 *slot_addr) |
f5cedc84 CS |
84 | { |
85 | page_info->page = page; | |
86 | page_info->page_offset = 0; | |
87 | page_info->page_address = page_address(page); | |
ede3fcf5 | 88 | *slot_addr = cpu_to_be64(addr); |
58401b2a CS |
89 | /* The page already has 1 ref */ |
90 | page_ref_add(page, INT_MAX - 1); | |
91 | page_info->pagecnt_bias = INT_MAX; | |
ede3fcf5 CS |
92 | } |
93 | ||
94 | static int gve_rx_alloc_buffer(struct gve_priv *priv, struct device *dev, | |
95 | struct gve_rx_slot_page_info *page_info, | |
96 | union gve_rx_data_slot *data_slot) | |
97 | { | |
98 | struct page *page; | |
99 | dma_addr_t dma; | |
100 | int err; | |
101 | ||
a92f7a6f CS |
102 | err = gve_alloc_page(priv, dev, &page, &dma, DMA_FROM_DEVICE, |
103 | GFP_ATOMIC); | |
ede3fcf5 CS |
104 | if (err) |
105 | return err; | |
106 | ||
107 | gve_setup_rx_buffer(page_info, dma, page, &data_slot->addr); | |
108 | return 0; | |
f5cedc84 CS |
109 | } |
110 | ||
111 | static int gve_prefill_rx_pages(struct gve_rx_ring *rx) | |
112 | { | |
113 | struct gve_priv *priv = rx->gve; | |
114 | u32 slots; | |
ede3fcf5 | 115 | int err; |
f5cedc84 | 116 | int i; |
82fd151d | 117 | int j; |
f5cedc84 CS |
118 | |
119 | /* Allocate one page per Rx queue slot. Each page is split into two | |
120 | * packet buffers, when possible we "page flip" between the two. | |
121 | */ | |
438b43bd | 122 | slots = rx->mask + 1; |
f5cedc84 CS |
123 | |
124 | rx->data.page_info = kvzalloc(slots * | |
125 | sizeof(*rx->data.page_info), GFP_KERNEL); | |
126 | if (!rx->data.page_info) | |
127 | return -ENOMEM; | |
128 | ||
d4b111fd | 129 | if (!rx->data.raw_addressing) { |
7fc2bf78 | 130 | rx->data.qpl = gve_assign_rx_qpl(priv, rx->q_num); |
d4b111fd CS |
131 | if (!rx->data.qpl) { |
132 | kvfree(rx->data.page_info); | |
133 | rx->data.page_info = NULL; | |
134 | return -ENOMEM; | |
135 | } | |
136 | } | |
f5cedc84 | 137 | for (i = 0; i < slots; i++) { |
ede3fcf5 CS |
138 | if (!rx->data.raw_addressing) { |
139 | struct page *page = rx->data.qpl->pages[i]; | |
140 | dma_addr_t addr = i * PAGE_SIZE; | |
f5cedc84 | 141 | |
ede3fcf5 CS |
142 | gve_setup_rx_buffer(&rx->data.page_info[i], addr, page, |
143 | &rx->data.data_ring[i].qpl_offset); | |
144 | continue; | |
145 | } | |
146 | err = gve_rx_alloc_buffer(priv, &priv->pdev->dev, &rx->data.page_info[i], | |
147 | &rx->data.data_ring[i]); | |
148 | if (err) | |
95535e37 | 149 | goto alloc_err_rda; |
f5cedc84 CS |
150 | } |
151 | ||
82fd151d SC |
152 | if (!rx->data.raw_addressing) { |
153 | for (j = 0; j < rx->qpl_copy_pool_mask + 1; j++) { | |
154 | struct page *page = alloc_page(GFP_KERNEL); | |
155 | ||
64c426df YY |
156 | if (!page) { |
157 | err = -ENOMEM; | |
82fd151d | 158 | goto alloc_err_qpl; |
64c426df | 159 | } |
82fd151d SC |
160 | |
161 | rx->qpl_copy_pool[j].page = page; | |
162 | rx->qpl_copy_pool[j].page_offset = 0; | |
163 | rx->qpl_copy_pool[j].page_address = page_address(page); | |
164 | ||
165 | /* The page already has 1 ref. */ | |
166 | page_ref_add(page, INT_MAX - 1); | |
167 | rx->qpl_copy_pool[j].pagecnt_bias = INT_MAX; | |
168 | } | |
169 | } | |
170 | ||
f5cedc84 | 171 | return slots; |
82fd151d SC |
172 | |
173 | alloc_err_qpl: | |
95535e37 | 174 | /* Fully free the copy pool pages. */ |
82fd151d SC |
175 | while (j--) { |
176 | page_ref_sub(rx->qpl_copy_pool[j].page, | |
177 | rx->qpl_copy_pool[j].pagecnt_bias - 1); | |
178 | put_page(rx->qpl_copy_pool[j].page); | |
179 | } | |
95535e37 SC |
180 | |
181 | /* Do not fully free QPL pages - only remove the bias added in this | |
182 | * function with gve_setup_rx_buffer. | |
183 | */ | |
184 | while (i--) | |
185 | page_ref_sub(rx->data.page_info[i].page, | |
186 | rx->data.page_info[i].pagecnt_bias - 1); | |
187 | ||
188 | gve_unassign_qpl(priv, rx->data.qpl->id); | |
189 | rx->data.qpl = NULL; | |
190 | ||
191 | return err; | |
192 | ||
193 | alloc_err_rda: | |
ede3fcf5 CS |
194 | while (i--) |
195 | gve_rx_free_buffer(&priv->pdev->dev, | |
196 | &rx->data.page_info[i], | |
197 | &rx->data.data_ring[i]); | |
198 | return err; | |
f5cedc84 CS |
199 | } |
200 | ||
37149e93 DA |
201 | static void gve_rx_ctx_clear(struct gve_rx_ctx *ctx) |
202 | { | |
37149e93 DA |
203 | ctx->skb_head = NULL; |
204 | ctx->skb_tail = NULL; | |
82fd151d SC |
205 | ctx->total_size = 0; |
206 | ctx->frag_cnt = 0; | |
207 | ctx->drop_pkt = false; | |
37149e93 DA |
208 | } |
209 | ||
f5cedc84 CS |
210 | static int gve_rx_alloc_ring(struct gve_priv *priv, int idx) |
211 | { | |
212 | struct gve_rx_ring *rx = &priv->rx[idx]; | |
213 | struct device *hdev = &priv->pdev->dev; | |
214 | u32 slots, npages; | |
215 | int filled_pages; | |
216 | size_t bytes; | |
217 | int err; | |
218 | ||
219 | netif_dbg(priv, drv, priv->dev, "allocating rx ring\n"); | |
220 | /* Make sure everything is zeroed to start with */ | |
221 | memset(rx, 0, sizeof(*rx)); | |
222 | ||
223 | rx->gve = priv; | |
224 | rx->q_num = idx; | |
225 | ||
ede3fcf5 | 226 | slots = priv->rx_data_slot_cnt; |
438b43bd | 227 | rx->mask = slots - 1; |
a5886ef4 | 228 | rx->data.raw_addressing = priv->queue_format == GVE_GQI_RDA_FORMAT; |
f5cedc84 CS |
229 | |
230 | /* alloc rx data ring */ | |
231 | bytes = sizeof(*rx->data.data_ring) * slots; | |
232 | rx->data.data_ring = dma_alloc_coherent(hdev, bytes, | |
233 | &rx->data.data_bus, | |
234 | GFP_KERNEL); | |
235 | if (!rx->data.data_ring) | |
236 | return -ENOMEM; | |
82fd151d SC |
237 | |
238 | rx->qpl_copy_pool_mask = min_t(u32, U32_MAX, slots * 2) - 1; | |
239 | rx->qpl_copy_pool_head = 0; | |
240 | rx->qpl_copy_pool = kvcalloc(rx->qpl_copy_pool_mask + 1, | |
241 | sizeof(rx->qpl_copy_pool[0]), | |
242 | GFP_KERNEL); | |
243 | ||
244 | if (!rx->qpl_copy_pool) { | |
245 | err = -ENOMEM; | |
246 | goto abort_with_slots; | |
247 | } | |
248 | ||
f5cedc84 CS |
249 | filled_pages = gve_prefill_rx_pages(rx); |
250 | if (filled_pages < 0) { | |
251 | err = -ENOMEM; | |
82fd151d | 252 | goto abort_with_copy_pool; |
f5cedc84 | 253 | } |
438b43bd | 254 | rx->fill_cnt = filled_pages; |
f5cedc84 CS |
255 | /* Ensure data ring slots (packet buffers) are visible. */ |
256 | dma_wmb(); | |
257 | ||
258 | /* Alloc gve_queue_resources */ | |
259 | rx->q_resources = | |
260 | dma_alloc_coherent(hdev, | |
261 | sizeof(*rx->q_resources), | |
262 | &rx->q_resources_bus, | |
263 | GFP_KERNEL); | |
264 | if (!rx->q_resources) { | |
265 | err = -ENOMEM; | |
266 | goto abort_filled; | |
267 | } | |
268 | netif_dbg(priv, drv, priv->dev, "rx[%d]->data.data_bus=%lx\n", idx, | |
269 | (unsigned long)rx->data.data_bus); | |
270 | ||
271 | /* alloc rx desc ring */ | |
272 | bytes = sizeof(struct gve_rx_desc) * priv->rx_desc_cnt; | |
273 | npages = bytes / PAGE_SIZE; | |
274 | if (npages * PAGE_SIZE != bytes) { | |
275 | err = -EIO; | |
276 | goto abort_with_q_resources; | |
277 | } | |
278 | ||
279 | rx->desc.desc_ring = dma_alloc_coherent(hdev, bytes, &rx->desc.bus, | |
280 | GFP_KERNEL); | |
281 | if (!rx->desc.desc_ring) { | |
282 | err = -ENOMEM; | |
283 | goto abort_with_q_resources; | |
284 | } | |
438b43bd | 285 | rx->cnt = 0; |
ede3fcf5 | 286 | rx->db_threshold = priv->rx_desc_cnt / 2; |
f5cedc84 | 287 | rx->desc.seqno = 1; |
37149e93 DA |
288 | |
289 | /* Allocating half-page buffers allows page-flipping which is faster | |
290 | * than copying or allocating new pages. | |
291 | */ | |
292 | rx->packet_buffer_size = PAGE_SIZE / 2; | |
293 | gve_rx_ctx_clear(&rx->ctx); | |
f5cedc84 CS |
294 | gve_rx_add_to_block(priv, idx); |
295 | ||
296 | return 0; | |
297 | ||
298 | abort_with_q_resources: | |
299 | dma_free_coherent(hdev, sizeof(*rx->q_resources), | |
300 | rx->q_resources, rx->q_resources_bus); | |
301 | rx->q_resources = NULL; | |
302 | abort_filled: | |
ede3fcf5 | 303 | gve_rx_unfill_pages(priv, rx); |
82fd151d SC |
304 | abort_with_copy_pool: |
305 | kvfree(rx->qpl_copy_pool); | |
306 | rx->qpl_copy_pool = NULL; | |
f5cedc84 CS |
307 | abort_with_slots: |
308 | bytes = sizeof(*rx->data.data_ring) * slots; | |
309 | dma_free_coherent(hdev, bytes, rx->data.data_ring, rx->data.data_bus); | |
310 | rx->data.data_ring = NULL; | |
311 | ||
312 | return err; | |
313 | } | |
314 | ||
315 | int gve_rx_alloc_rings(struct gve_priv *priv) | |
316 | { | |
317 | int err = 0; | |
318 | int i; | |
319 | ||
320 | for (i = 0; i < priv->rx_cfg.num_queues; i++) { | |
321 | err = gve_rx_alloc_ring(priv, i); | |
322 | if (err) { | |
323 | netif_err(priv, drv, priv->dev, | |
324 | "Failed to alloc rx ring=%d: err=%d\n", | |
325 | i, err); | |
326 | break; | |
327 | } | |
328 | } | |
329 | /* Unallocate if there was an error */ | |
330 | if (err) { | |
331 | int j; | |
332 | ||
333 | for (j = 0; j < i; j++) | |
334 | gve_rx_free_ring(priv, j); | |
335 | } | |
336 | return err; | |
337 | } | |
338 | ||
9c1a59a2 | 339 | void gve_rx_free_rings_gqi(struct gve_priv *priv) |
f5cedc84 CS |
340 | { |
341 | int i; | |
342 | ||
343 | for (i = 0; i < priv->rx_cfg.num_queues; i++) | |
344 | gve_rx_free_ring(priv, i); | |
345 | } | |
346 | ||
347 | void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx) | |
348 | { | |
349 | u32 db_idx = be32_to_cpu(rx->q_resources->db_index); | |
350 | ||
438b43bd | 351 | iowrite32be(rx->fill_cnt, &priv->db_bar2[db_idx]); |
f5cedc84 CS |
352 | } |
353 | ||
354 | static enum pkt_hash_types gve_rss_type(__be16 pkt_flags) | |
355 | { | |
356 | if (likely(pkt_flags & (GVE_RXF_TCP | GVE_RXF_UDP))) | |
357 | return PKT_HASH_TYPE_L4; | |
358 | if (pkt_flags & (GVE_RXF_IPV4 | GVE_RXF_IPV6)) | |
359 | return PKT_HASH_TYPE_L3; | |
360 | return PKT_HASH_TYPE_L2; | |
361 | } | |
362 | ||
ede3fcf5 | 363 | static struct sk_buff *gve_rx_add_frags(struct napi_struct *napi, |
f5cedc84 | 364 | struct gve_rx_slot_page_info *page_info, |
37149e93 DA |
365 | u16 packet_buffer_size, u16 len, |
366 | struct gve_rx_ctx *ctx) | |
f5cedc84 | 367 | { |
82fd151d SC |
368 | u32 offset = page_info->page_offset + page_info->pad; |
369 | struct sk_buff *skb = ctx->skb_tail; | |
370 | int num_frags = 0; | |
37149e93 | 371 | |
82fd151d SC |
372 | if (!skb) { |
373 | skb = napi_get_frags(napi); | |
374 | if (unlikely(!skb)) | |
375 | return NULL; | |
f5cedc84 | 376 | |
82fd151d SC |
377 | ctx->skb_head = skb; |
378 | ctx->skb_tail = skb; | |
379 | } else { | |
380 | num_frags = skb_shinfo(ctx->skb_tail)->nr_frags; | |
381 | if (num_frags == MAX_SKB_FRAGS) { | |
382 | skb = napi_alloc_skb(napi, 0); | |
383 | if (!skb) | |
384 | return NULL; | |
385 | ||
386 | // We will never chain more than two SKBs: 2 * 16 * 2k > 64k | |
387 | // which is why we do not need to chain by using skb->next | |
388 | skb_shinfo(ctx->skb_tail)->frag_list = skb; | |
f5cedc84 | 389 | |
82fd151d SC |
390 | ctx->skb_tail = skb; |
391 | num_frags = 0; | |
392 | } | |
393 | } | |
394 | ||
395 | if (skb != ctx->skb_head) { | |
396 | ctx->skb_head->len += len; | |
397 | ctx->skb_head->data_len += len; | |
398 | ctx->skb_head->truesize += packet_buffer_size; | |
399 | } | |
400 | skb_add_rx_frag(skb, num_frags, page_info->page, | |
37149e93 | 401 | offset, len, packet_buffer_size); |
f5cedc84 | 402 | |
82fd151d | 403 | return ctx->skb_head; |
f5cedc84 CS |
404 | } |
405 | ||
ede3fcf5 | 406 | static void gve_rx_flip_buff(struct gve_rx_slot_page_info *page_info, __be64 *slot_addr) |
f5cedc84 | 407 | { |
ede3fcf5 | 408 | const __be64 offset = cpu_to_be64(PAGE_SIZE / 2); |
f5cedc84 | 409 | |
ede3fcf5 | 410 | /* "flip" to other packet buffer on this page */ |
920fb451 | 411 | page_info->page_offset ^= PAGE_SIZE / 2; |
ede3fcf5 | 412 | *(slot_addr) ^= offset; |
f5cedc84 CS |
413 | } |
414 | ||
58401b2a | 415 | static int gve_rx_can_recycle_buffer(struct gve_rx_slot_page_info *page_info) |
02b0e0c1 | 416 | { |
58401b2a | 417 | int pagecount = page_count(page_info->page); |
02b0e0c1 DA |
418 | |
419 | /* This page is not being used by any SKBs - reuse */ | |
58401b2a | 420 | if (pagecount == page_info->pagecnt_bias) |
02b0e0c1 DA |
421 | return 1; |
422 | /* This page is still being used by an SKB - we can't reuse */ | |
58401b2a | 423 | else if (pagecount > page_info->pagecnt_bias) |
02b0e0c1 | 424 | return 0; |
58401b2a CS |
425 | WARN(pagecount < page_info->pagecnt_bias, |
426 | "Pagecount should never be less than the bias."); | |
02b0e0c1 DA |
427 | return -1; |
428 | } | |
429 | ||
430 | static struct sk_buff * | |
431 | gve_rx_raw_addressing(struct device *dev, struct net_device *netdev, | |
432 | struct gve_rx_slot_page_info *page_info, u16 len, | |
433 | struct napi_struct *napi, | |
37149e93 DA |
434 | union gve_rx_data_slot *data_slot, |
435 | u16 packet_buffer_size, struct gve_rx_ctx *ctx) | |
02b0e0c1 | 436 | { |
37149e93 | 437 | struct sk_buff *skb = gve_rx_add_frags(napi, page_info, packet_buffer_size, len, ctx); |
02b0e0c1 | 438 | |
02b0e0c1 DA |
439 | if (!skb) |
440 | return NULL; | |
441 | ||
58401b2a CS |
442 | /* Optimistically stop the kernel from freeing the page. |
443 | * We will check again in refill to determine if we need to alloc a | |
444 | * new page. | |
02b0e0c1 | 445 | */ |
58401b2a | 446 | gve_dec_pagecnt_bias(page_info); |
02b0e0c1 DA |
447 | |
448 | return skb; | |
449 | } | |
450 | ||
82fd151d SC |
451 | static struct sk_buff *gve_rx_copy_to_pool(struct gve_rx_ring *rx, |
452 | struct gve_rx_slot_page_info *page_info, | |
453 | u16 len, struct napi_struct *napi) | |
454 | { | |
455 | u32 pool_idx = rx->qpl_copy_pool_head & rx->qpl_copy_pool_mask; | |
456 | void *src = page_info->page_address + page_info->page_offset; | |
457 | struct gve_rx_slot_page_info *copy_page_info; | |
458 | struct gve_rx_ctx *ctx = &rx->ctx; | |
459 | bool alloc_page = false; | |
460 | struct sk_buff *skb; | |
461 | void *dst; | |
462 | ||
463 | copy_page_info = &rx->qpl_copy_pool[pool_idx]; | |
464 | if (!copy_page_info->can_flip) { | |
465 | int recycle = gve_rx_can_recycle_buffer(copy_page_info); | |
466 | ||
467 | if (unlikely(recycle < 0)) { | |
468 | gve_schedule_reset(rx->gve); | |
469 | return NULL; | |
470 | } | |
471 | alloc_page = !recycle; | |
472 | } | |
473 | ||
474 | if (alloc_page) { | |
475 | struct gve_rx_slot_page_info alloc_page_info; | |
476 | struct page *page; | |
477 | ||
478 | /* The least recently used page turned out to be | |
479 | * still in use by the kernel. Ignoring it and moving | |
480 | * on alleviates head-of-line blocking. | |
481 | */ | |
482 | rx->qpl_copy_pool_head++; | |
483 | ||
484 | page = alloc_page(GFP_ATOMIC); | |
485 | if (!page) | |
486 | return NULL; | |
487 | ||
488 | alloc_page_info.page = page; | |
489 | alloc_page_info.page_offset = 0; | |
490 | alloc_page_info.page_address = page_address(page); | |
491 | alloc_page_info.pad = page_info->pad; | |
492 | ||
493 | memcpy(alloc_page_info.page_address, src, page_info->pad + len); | |
494 | skb = gve_rx_add_frags(napi, &alloc_page_info, | |
495 | rx->packet_buffer_size, | |
496 | len, ctx); | |
497 | ||
498 | u64_stats_update_begin(&rx->statss); | |
499 | rx->rx_frag_copy_cnt++; | |
500 | rx->rx_frag_alloc_cnt++; | |
501 | u64_stats_update_end(&rx->statss); | |
502 | ||
503 | return skb; | |
504 | } | |
505 | ||
506 | dst = copy_page_info->page_address + copy_page_info->page_offset; | |
507 | memcpy(dst, src, page_info->pad + len); | |
508 | copy_page_info->pad = page_info->pad; | |
509 | ||
510 | skb = gve_rx_add_frags(napi, copy_page_info, | |
511 | rx->packet_buffer_size, len, ctx); | |
512 | if (unlikely(!skb)) | |
513 | return NULL; | |
514 | ||
515 | gve_dec_pagecnt_bias(copy_page_info); | |
516 | copy_page_info->page_offset += rx->packet_buffer_size; | |
517 | copy_page_info->page_offset &= (PAGE_SIZE - 1); | |
518 | ||
519 | if (copy_page_info->can_flip) { | |
520 | /* We have used both halves of this copy page, it | |
521 | * is time for it to go to the back of the queue. | |
522 | */ | |
523 | copy_page_info->can_flip = false; | |
524 | rx->qpl_copy_pool_head++; | |
525 | prefetch(rx->qpl_copy_pool[rx->qpl_copy_pool_head & rx->qpl_copy_pool_mask].page); | |
526 | } else { | |
527 | copy_page_info->can_flip = true; | |
528 | } | |
529 | ||
530 | u64_stats_update_begin(&rx->statss); | |
531 | rx->rx_frag_copy_cnt++; | |
532 | u64_stats_update_end(&rx->statss); | |
533 | ||
534 | return skb; | |
535 | } | |
536 | ||
02b0e0c1 DA |
537 | static struct sk_buff * |
538 | gve_rx_qpl(struct device *dev, struct net_device *netdev, | |
539 | struct gve_rx_ring *rx, struct gve_rx_slot_page_info *page_info, | |
540 | u16 len, struct napi_struct *napi, | |
541 | union gve_rx_data_slot *data_slot) | |
542 | { | |
37149e93 | 543 | struct gve_rx_ctx *ctx = &rx->ctx; |
02b0e0c1 DA |
544 | struct sk_buff *skb; |
545 | ||
546 | /* if raw_addressing mode is not enabled gvnic can only receive into | |
547 | * registered segments. If the buffer can't be recycled, our only | |
548 | * choice is to copy the data out of it so that we can return it to the | |
549 | * device. | |
550 | */ | |
82fd151d | 551 | if (page_info->can_flip) { |
37149e93 | 552 | skb = gve_rx_add_frags(napi, page_info, rx->packet_buffer_size, len, ctx); |
02b0e0c1 DA |
553 | /* No point in recycling if we didn't get the skb */ |
554 | if (skb) { | |
555 | /* Make sure that the page isn't freed. */ | |
58401b2a | 556 | gve_dec_pagecnt_bias(page_info); |
02b0e0c1 DA |
557 | gve_rx_flip_buff(page_info, &data_slot->qpl_offset); |
558 | } | |
559 | } else { | |
82fd151d | 560 | skb = gve_rx_copy_to_pool(rx, page_info, len, napi); |
02b0e0c1 DA |
561 | } |
562 | return skb; | |
563 | } | |
564 | ||
37149e93 DA |
565 | static struct sk_buff *gve_rx_skb(struct gve_priv *priv, struct gve_rx_ring *rx, |
566 | struct gve_rx_slot_page_info *page_info, struct napi_struct *napi, | |
82fd151d SC |
567 | u16 len, union gve_rx_data_slot *data_slot, |
568 | bool is_only_frag) | |
37149e93 DA |
569 | { |
570 | struct net_device *netdev = priv->dev; | |
571 | struct gve_rx_ctx *ctx = &rx->ctx; | |
572 | struct sk_buff *skb = NULL; | |
f5cedc84 | 573 | |
82fd151d | 574 | if (len <= priv->rx_copybreak && is_only_frag) { |
02b0e0c1 | 575 | /* Just copy small packets */ |
2e80aeae | 576 | skb = gve_rx_copy(netdev, napi, page_info, len); |
37149e93 DA |
577 | if (skb) { |
578 | u64_stats_update_begin(&rx->statss); | |
579 | rx->rx_copied_pkt++; | |
580 | rx->rx_frag_copy_cnt++; | |
581 | rx->rx_copybreak_pkt++; | |
721111b1 DC |
582 | u64_stats_update_end(&rx->statss); |
583 | } | |
02b0e0c1 | 584 | } else { |
82fd151d | 585 | int recycle = gve_rx_can_recycle_buffer(page_info); |
02b0e0c1 | 586 | |
82fd151d SC |
587 | if (unlikely(recycle < 0)) { |
588 | gve_schedule_reset(priv); | |
589 | return NULL; | |
590 | } | |
591 | page_info->can_flip = recycle; | |
592 | if (page_info->can_flip) { | |
593 | u64_stats_update_begin(&rx->statss); | |
594 | rx->rx_frag_flip_cnt++; | |
595 | u64_stats_update_end(&rx->statss); | |
596 | } | |
597 | ||
598 | if (rx->data.raw_addressing) { | |
37149e93 | 599 | skb = gve_rx_raw_addressing(&priv->pdev->dev, netdev, |
02b0e0c1 | 600 | page_info, len, napi, |
37149e93 DA |
601 | data_slot, |
602 | rx->packet_buffer_size, ctx); | |
0287f9ed | 603 | } else { |
37149e93 | 604 | skb = gve_rx_qpl(&priv->pdev->dev, netdev, rx, |
02b0e0c1 | 605 | page_info, len, napi, data_slot); |
0287f9ed | 606 | } |
f5cedc84 | 607 | } |
37149e93 DA |
608 | return skb; |
609 | } | |
f5cedc84 | 610 | |
fd8e4032 PK |
611 | static int gve_xsk_pool_redirect(struct net_device *dev, |
612 | struct gve_rx_ring *rx, | |
613 | void *data, int len, | |
614 | struct bpf_prog *xdp_prog) | |
615 | { | |
616 | struct xdp_buff *xdp; | |
617 | int err; | |
618 | ||
619 | if (rx->xsk_pool->frame_len < len) | |
620 | return -E2BIG; | |
621 | xdp = xsk_buff_alloc(rx->xsk_pool); | |
622 | if (!xdp) { | |
623 | u64_stats_update_begin(&rx->statss); | |
624 | rx->xdp_alloc_fails++; | |
625 | u64_stats_update_end(&rx->statss); | |
626 | return -ENOMEM; | |
627 | } | |
628 | xdp->data_end = xdp->data + len; | |
629 | memcpy(xdp->data, data, len); | |
630 | err = xdp_do_redirect(dev, xdp, xdp_prog); | |
631 | if (err) | |
632 | xsk_buff_free(xdp); | |
633 | return err; | |
634 | } | |
635 | ||
39a7f4aa PK |
636 | static int gve_xdp_redirect(struct net_device *dev, struct gve_rx_ring *rx, |
637 | struct xdp_buff *orig, struct bpf_prog *xdp_prog) | |
638 | { | |
639 | int total_len, len = orig->data_end - orig->data; | |
640 | int headroom = XDP_PACKET_HEADROOM; | |
641 | struct xdp_buff new; | |
642 | void *frame; | |
643 | int err; | |
644 | ||
fd8e4032 PK |
645 | if (rx->xsk_pool) |
646 | return gve_xsk_pool_redirect(dev, rx, orig->data, | |
647 | len, xdp_prog); | |
648 | ||
39a7f4aa PK |
649 | total_len = headroom + SKB_DATA_ALIGN(len) + |
650 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | |
651 | frame = page_frag_alloc(&rx->page_cache, total_len, GFP_ATOMIC); | |
652 | if (!frame) { | |
653 | u64_stats_update_begin(&rx->statss); | |
654 | rx->xdp_alloc_fails++; | |
655 | u64_stats_update_end(&rx->statss); | |
656 | return -ENOMEM; | |
657 | } | |
658 | xdp_init_buff(&new, total_len, &rx->xdp_rxq); | |
659 | xdp_prepare_buff(&new, frame, headroom, len, false); | |
660 | memcpy(new.data, orig->data, len); | |
661 | ||
662 | err = xdp_do_redirect(dev, &new, xdp_prog); | |
663 | if (err) | |
664 | page_frag_free(frame); | |
665 | ||
666 | return err; | |
667 | } | |
668 | ||
75eaae15 PK |
669 | static void gve_xdp_done(struct gve_priv *priv, struct gve_rx_ring *rx, |
670 | struct xdp_buff *xdp, struct bpf_prog *xprog, | |
671 | int xdp_act) | |
672 | { | |
673 | struct gve_tx_ring *tx; | |
674 | int tx_qid; | |
675 | int err; | |
676 | ||
677 | switch (xdp_act) { | |
678 | case XDP_ABORTED: | |
679 | case XDP_DROP: | |
680 | default: | |
681 | break; | |
682 | case XDP_TX: | |
683 | tx_qid = gve_xdp_tx_queue_id(priv, rx->q_num); | |
684 | tx = &priv->tx[tx_qid]; | |
39a7f4aa | 685 | spin_lock(&tx->xdp_lock); |
75eaae15 | 686 | err = gve_xdp_xmit_one(priv, tx, xdp->data, |
39a7f4aa PK |
687 | xdp->data_end - xdp->data, NULL); |
688 | spin_unlock(&tx->xdp_lock); | |
75eaae15 PK |
689 | |
690 | if (unlikely(err)) { | |
691 | u64_stats_update_begin(&rx->statss); | |
692 | rx->xdp_tx_errors++; | |
693 | u64_stats_update_end(&rx->statss); | |
694 | } | |
695 | break; | |
696 | case XDP_REDIRECT: | |
39a7f4aa PK |
697 | err = gve_xdp_redirect(priv->dev, rx, xdp, xprog); |
698 | ||
699 | if (unlikely(err)) { | |
700 | u64_stats_update_begin(&rx->statss); | |
701 | rx->xdp_redirect_errors++; | |
702 | u64_stats_update_end(&rx->statss); | |
703 | } | |
75eaae15 PK |
704 | break; |
705 | } | |
706 | u64_stats_update_begin(&rx->statss); | |
707 | if ((u32)xdp_act < GVE_XDP_ACTIONS) | |
708 | rx->xdp_actions[xdp_act]++; | |
709 | u64_stats_update_end(&rx->statss); | |
710 | } | |
711 | ||
82fd151d SC |
712 | #define GVE_PKTCONT_BIT_IS_SET(x) (GVE_RXF_PKT_CONT & (x)) |
713 | static void gve_rx(struct gve_rx_ring *rx, netdev_features_t feat, | |
714 | struct gve_rx_desc *desc, u32 idx, | |
715 | struct gve_rx_cnts *cnts) | |
37149e93 | 716 | { |
82fd151d | 717 | bool is_last_frag = !GVE_PKTCONT_BIT_IS_SET(desc->flags_seq); |
37149e93 | 718 | struct gve_rx_slot_page_info *page_info; |
82fd151d | 719 | u16 frag_size = be16_to_cpu(desc->len); |
37149e93 DA |
720 | struct gve_rx_ctx *ctx = &rx->ctx; |
721 | union gve_rx_data_slot *data_slot; | |
722 | struct gve_priv *priv = rx->gve; | |
37149e93 | 723 | struct sk_buff *skb = NULL; |
75eaae15 PK |
724 | struct bpf_prog *xprog; |
725 | struct xdp_buff xdp; | |
37149e93 | 726 | dma_addr_t page_bus; |
37149e93 | 727 | void *va; |
37149e93 | 728 | |
75eaae15 | 729 | u16 len = frag_size; |
82fd151d SC |
730 | struct napi_struct *napi = &priv->ntfy_blocks[rx->ntfy_id].napi; |
731 | bool is_first_frag = ctx->frag_cnt == 0; | |
37149e93 | 732 | |
82fd151d | 733 | bool is_only_frag = is_first_frag && is_last_frag; |
37149e93 | 734 | |
82fd151d SC |
735 | if (unlikely(ctx->drop_pkt)) |
736 | goto finish_frag; | |
37149e93 | 737 | |
82fd151d SC |
738 | if (desc->flags_seq & GVE_RXF_ERR) { |
739 | ctx->drop_pkt = true; | |
740 | cnts->desc_err_pkt_cnt++; | |
741 | napi_free_frags(napi); | |
742 | goto finish_frag; | |
743 | } | |
37149e93 | 744 | |
82fd151d SC |
745 | if (unlikely(frag_size > rx->packet_buffer_size)) { |
746 | netdev_warn(priv->dev, "Unexpected frag size %d, can't exceed %d, scheduling reset", | |
747 | frag_size, rx->packet_buffer_size); | |
748 | ctx->drop_pkt = true; | |
749 | napi_free_frags(napi); | |
750 | gve_schedule_reset(rx->gve); | |
751 | goto finish_frag; | |
752 | } | |
37149e93 | 753 | |
82fd151d SC |
754 | /* Prefetch two packet buffers ahead, we will need it soon. */ |
755 | page_info = &rx->data.page_info[(idx + 2) & rx->mask]; | |
756 | va = page_info->page_address + page_info->page_offset; | |
757 | prefetch(page_info->page); /* Kernel page struct. */ | |
758 | prefetch(va); /* Packet header. */ | |
759 | prefetch(va + 64); /* Next cacheline too. */ | |
760 | ||
761 | page_info = &rx->data.page_info[idx]; | |
762 | data_slot = &rx->data.data_ring[idx]; | |
763 | page_bus = (rx->data.raw_addressing) ? | |
764 | be64_to_cpu(data_slot->addr) - page_info->page_offset : | |
765 | rx->data.qpl->page_buses[idx]; | |
766 | dma_sync_single_for_cpu(&priv->pdev->dev, page_bus, | |
767 | PAGE_SIZE, DMA_FROM_DEVICE); | |
768 | page_info->pad = is_first_frag ? GVE_RX_PAD : 0; | |
75eaae15 | 769 | len -= page_info->pad; |
82fd151d SC |
770 | frag_size -= page_info->pad; |
771 | ||
75eaae15 PK |
772 | xprog = READ_ONCE(priv->xdp_prog); |
773 | if (xprog && is_only_frag) { | |
774 | void *old_data; | |
775 | int xdp_act; | |
776 | ||
777 | xdp_init_buff(&xdp, rx->packet_buffer_size, &rx->xdp_rxq); | |
778 | xdp_prepare_buff(&xdp, page_info->page_address + | |
779 | page_info->page_offset, GVE_RX_PAD, | |
780 | len, false); | |
781 | old_data = xdp.data; | |
782 | xdp_act = bpf_prog_run_xdp(xprog, &xdp); | |
783 | if (xdp_act != XDP_PASS) { | |
784 | gve_xdp_done(priv, rx, &xdp, xprog, xdp_act); | |
785 | ctx->total_size += frag_size; | |
786 | goto finish_ok_pkt; | |
787 | } | |
788 | ||
789 | page_info->pad += xdp.data - old_data; | |
790 | len = xdp.data_end - xdp.data; | |
791 | ||
792 | u64_stats_update_begin(&rx->statss); | |
793 | rx->xdp_actions[XDP_PASS]++; | |
794 | u64_stats_update_end(&rx->statss); | |
795 | } | |
796 | ||
797 | skb = gve_rx_skb(priv, rx, page_info, napi, len, | |
82fd151d SC |
798 | data_slot, is_only_frag); |
799 | if (!skb) { | |
800 | u64_stats_update_begin(&rx->statss); | |
801 | rx->rx_skb_alloc_fail++; | |
802 | u64_stats_update_end(&rx->statss); | |
803 | ||
804 | napi_free_frags(napi); | |
805 | ctx->drop_pkt = true; | |
806 | goto finish_frag; | |
807 | } | |
808 | ctx->total_size += frag_size; | |
809 | ||
810 | if (is_first_frag) { | |
811 | if (likely(feat & NETIF_F_RXCSUM)) { | |
812 | /* NIC passes up the partial sum */ | |
813 | if (desc->csum) | |
814 | skb->ip_summed = CHECKSUM_COMPLETE; | |
815 | else | |
816 | skb->ip_summed = CHECKSUM_NONE; | |
817 | skb->csum = csum_unfold(desc->csum); | |
37149e93 DA |
818 | } |
819 | ||
82fd151d SC |
820 | /* parse flags & pass relevant info up */ |
821 | if (likely(feat & NETIF_F_RXHASH) && | |
822 | gve_needs_rss(desc->flags_seq)) | |
823 | skb_set_hash(skb, be32_to_cpu(desc->rss_hash), | |
824 | gve_rss_type(desc->flags_seq)); | |
433e274b | 825 | } |
f5cedc84 | 826 | |
82fd151d SC |
827 | if (is_last_frag) { |
828 | skb_record_rx_queue(skb, rx->q_num); | |
829 | if (skb_is_nonlinear(skb)) | |
830 | napi_gro_frags(napi); | |
f5cedc84 | 831 | else |
82fd151d SC |
832 | napi_gro_receive(napi, skb); |
833 | goto finish_ok_pkt; | |
f5cedc84 CS |
834 | } |
835 | ||
82fd151d SC |
836 | goto finish_frag; |
837 | ||
838 | finish_ok_pkt: | |
839 | cnts->ok_pkt_bytes += ctx->total_size; | |
840 | cnts->ok_pkt_cnt++; | |
841 | finish_frag: | |
842 | ctx->frag_cnt++; | |
843 | if (is_last_frag) { | |
844 | cnts->total_pkt_cnt++; | |
845 | cnts->cont_pkt_cnt += (ctx->frag_cnt > 1); | |
846 | gve_rx_ctx_clear(ctx); | |
37149e93 | 847 | } |
f5cedc84 CS |
848 | } |
849 | ||
2cb67ab1 | 850 | bool gve_rx_work_pending(struct gve_rx_ring *rx) |
f5cedc84 CS |
851 | { |
852 | struct gve_rx_desc *desc; | |
853 | __be16 flags_seq; | |
854 | u32 next_idx; | |
855 | ||
438b43bd | 856 | next_idx = rx->cnt & rx->mask; |
f5cedc84 CS |
857 | desc = rx->desc.desc_ring + next_idx; |
858 | ||
859 | flags_seq = desc->flags_seq; | |
f5cedc84 CS |
860 | |
861 | return (GVE_SEQNO(flags_seq) == rx->desc.seqno); | |
862 | } | |
863 | ||
ede3fcf5 CS |
864 | static bool gve_rx_refill_buffers(struct gve_priv *priv, struct gve_rx_ring *rx) |
865 | { | |
866 | int refill_target = rx->mask + 1; | |
867 | u32 fill_cnt = rx->fill_cnt; | |
868 | ||
869 | while (fill_cnt - rx->cnt < refill_target) { | |
870 | struct gve_rx_slot_page_info *page_info; | |
ede3fcf5 CS |
871 | u32 idx = fill_cnt & rx->mask; |
872 | ||
873 | page_info = &rx->data.page_info[idx]; | |
02b0e0c1 DA |
874 | if (page_info->can_flip) { |
875 | /* The other half of the page is free because it was | |
876 | * free when we processed the descriptor. Flip to it. | |
877 | */ | |
878 | union gve_rx_data_slot *data_slot = | |
879 | &rx->data.data_ring[idx]; | |
880 | ||
881 | gve_rx_flip_buff(page_info, &data_slot->addr); | |
882 | page_info->can_flip = 0; | |
883 | } else { | |
884 | /* It is possible that the networking stack has already | |
885 | * finished processing all outstanding packets in the buffer | |
886 | * and it can be reused. | |
887 | * Flipping is unnecessary here - if the networking stack still | |
888 | * owns half the page it is impossible to tell which half. Either | |
889 | * the whole page is free or it needs to be replaced. | |
890 | */ | |
58401b2a | 891 | int recycle = gve_rx_can_recycle_buffer(page_info); |
02b0e0c1 DA |
892 | |
893 | if (recycle < 0) { | |
894 | if (!rx->data.raw_addressing) | |
895 | gve_schedule_reset(priv); | |
896 | return false; | |
897 | } | |
898 | if (!recycle) { | |
899 | /* We can't reuse the buffer - alloc a new one*/ | |
900 | union gve_rx_data_slot *data_slot = | |
901 | &rx->data.data_ring[idx]; | |
902 | struct device *dev = &priv->pdev->dev; | |
02b0e0c1 DA |
903 | gve_rx_free_buffer(dev, page_info, data_slot); |
904 | page_info->page = NULL; | |
1b4d1c9b CS |
905 | if (gve_rx_alloc_buffer(priv, dev, page_info, |
906 | data_slot)) { | |
907 | u64_stats_update_begin(&rx->statss); | |
908 | rx->rx_buf_alloc_fail++; | |
909 | u64_stats_update_end(&rx->statss); | |
02b0e0c1 | 910 | break; |
1b4d1c9b | 911 | } |
02b0e0c1 | 912 | } |
ede3fcf5 CS |
913 | } |
914 | fill_cnt++; | |
915 | } | |
916 | rx->fill_cnt = fill_cnt; | |
917 | return true; | |
918 | } | |
919 | ||
2cb67ab1 YF |
920 | static int gve_clean_rx_done(struct gve_rx_ring *rx, int budget, |
921 | netdev_features_t feat) | |
f5cedc84 | 922 | { |
39a7f4aa | 923 | u64 xdp_redirects = rx->xdp_actions[XDP_REDIRECT]; |
75eaae15 | 924 | u64 xdp_txs = rx->xdp_actions[XDP_TX]; |
82fd151d | 925 | struct gve_rx_ctx *ctx = &rx->ctx; |
f5cedc84 | 926 | struct gve_priv *priv = rx->gve; |
82fd151d SC |
927 | struct gve_rx_cnts cnts = {0}; |
928 | struct gve_rx_desc *next_desc; | |
37149e93 | 929 | u32 idx = rx->cnt & rx->mask; |
82fd151d SC |
930 | u32 work_done = 0; |
931 | ||
932 | struct gve_rx_desc *desc = &rx->desc.desc_ring[idx]; | |
f5cedc84 | 933 | |
82fd151d | 934 | // Exceed budget only if (and till) the inflight packet is consumed. |
f5cedc84 | 935 | while ((GVE_SEQNO(desc->flags_seq) == rx->desc.seqno) && |
82fd151d SC |
936 | (work_done < budget || ctx->frag_cnt)) { |
937 | next_desc = &rx->desc.desc_ring[(idx + 1) & rx->mask]; | |
938 | prefetch(next_desc); | |
939 | ||
940 | gve_rx(rx, feat, desc, idx, &cnts); | |
941 | ||
942 | rx->cnt++; | |
37149e93 DA |
943 | idx = rx->cnt & rx->mask; |
944 | desc = &rx->desc.desc_ring[idx]; | |
82fd151d SC |
945 | rx->desc.seqno = gve_next_seqno(rx->desc.seqno); |
946 | work_done++; | |
947 | } | |
948 | ||
949 | // The device will only send whole packets. | |
950 | if (unlikely(ctx->frag_cnt)) { | |
951 | struct napi_struct *napi = &priv->ntfy_blocks[rx->ntfy_id].napi; | |
952 | ||
953 | napi_free_frags(napi); | |
954 | gve_rx_ctx_clear(&rx->ctx); | |
955 | netdev_warn(priv->dev, "Unexpected seq number %d with incomplete packet, expected %d, scheduling reset", | |
956 | GVE_SEQNO(desc->flags_seq), rx->desc.seqno); | |
957 | gve_schedule_reset(rx->gve); | |
f5cedc84 CS |
958 | } |
959 | ||
37149e93 | 960 | if (!work_done && rx->fill_cnt - rx->cnt > rx->db_threshold) |
2cb67ab1 | 961 | return 0; |
f5cedc84 | 962 | |
2cb67ab1 YF |
963 | if (work_done) { |
964 | u64_stats_update_begin(&rx->statss); | |
82fd151d SC |
965 | rx->rpackets += cnts.ok_pkt_cnt; |
966 | rx->rbytes += cnts.ok_pkt_bytes; | |
967 | rx->rx_cont_packet_cnt += cnts.cont_pkt_cnt; | |
968 | rx->rx_desc_err_dropped_pkt += cnts.desc_err_pkt_cnt; | |
2cb67ab1 | 969 | u64_stats_update_end(&rx->statss); |
2cb67ab1 | 970 | } |
ede3fcf5 | 971 | |
75eaae15 PK |
972 | if (xdp_txs != rx->xdp_actions[XDP_TX]) |
973 | gve_xdp_tx_flush(priv, rx->q_num); | |
974 | ||
39a7f4aa PK |
975 | if (xdp_redirects != rx->xdp_actions[XDP_REDIRECT]) |
976 | xdp_do_flush(); | |
977 | ||
ede3fcf5 CS |
978 | /* restock ring slots */ |
979 | if (!rx->data.raw_addressing) { | |
980 | /* In QPL mode buffs are refilled as the desc are processed */ | |
981 | rx->fill_cnt += work_done; | |
37149e93 | 982 | } else if (rx->fill_cnt - rx->cnt <= rx->db_threshold) { |
ede3fcf5 CS |
983 | /* In raw addressing mode buffs are only refilled if the avail |
984 | * falls below a threshold. | |
985 | */ | |
986 | if (!gve_rx_refill_buffers(priv, rx)) | |
2cb67ab1 | 987 | return 0; |
ede3fcf5 CS |
988 | |
989 | /* If we were not able to completely refill buffers, we'll want | |
990 | * to schedule this queue for work again to refill buffers. | |
991 | */ | |
37149e93 | 992 | if (rx->fill_cnt - rx->cnt <= rx->db_threshold) { |
ede3fcf5 | 993 | gve_rx_write_doorbell(priv, rx); |
2cb67ab1 | 994 | return budget; |
ede3fcf5 CS |
995 | } |
996 | } | |
f5cedc84 | 997 | |
f5cedc84 | 998 | gve_rx_write_doorbell(priv, rx); |
82fd151d | 999 | return cnts.total_pkt_cnt; |
f5cedc84 CS |
1000 | } |
1001 | ||
2cb67ab1 | 1002 | int gve_rx_poll(struct gve_notify_block *block, int budget) |
f5cedc84 CS |
1003 | { |
1004 | struct gve_rx_ring *rx = block->rx; | |
1005 | netdev_features_t feat; | |
2cb67ab1 | 1006 | int work_done = 0; |
f5cedc84 CS |
1007 | |
1008 | feat = block->napi.dev->features; | |
1009 | ||
1010 | /* If budget is 0, do all the work */ | |
1011 | if (budget == 0) | |
1012 | budget = INT_MAX; | |
1013 | ||
1014 | if (budget > 0) | |
2cb67ab1 YF |
1015 | work_done = gve_clean_rx_done(rx, budget, feat); |
1016 | ||
1017 | return work_done; | |
f5cedc84 | 1018 | } |