]>
Commit | Line | Data |
---|---|---|
c6d30e83 MC |
1 | /* Broadcom NetXtreme-C/E network driver. |
2 | * | |
3 | * Copyright (c) 2016-2017 Broadcom Limited | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License as published by | |
7 | * the Free Software Foundation. | |
8 | */ | |
9 | #include <linux/kernel.h> | |
10 | #include <linux/errno.h> | |
11 | #include <linux/pci.h> | |
12 | #include <linux/netdevice.h> | |
13 | #include <linux/etherdevice.h> | |
14 | #include <linux/if_vlan.h> | |
15 | #include <linux/bpf.h> | |
16 | #include <linux/bpf_trace.h> | |
17 | #include <linux/filter.h> | |
a9ca9f9c | 18 | #include <net/page_pool/helpers.h> |
c6d30e83 MC |
19 | #include "bnxt_hsi.h" |
20 | #include "bnxt.h" | |
21 | #include "bnxt_xdp.h" | |
22 | ||
4f81def2 PC |
23 | DEFINE_STATIC_KEY_FALSE(bnxt_xdp_locking_key); |
24 | ||
c1ba92a8 MC |
25 | struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp, |
26 | struct bnxt_tx_ring_info *txr, | |
a7559bc8 AG |
27 | dma_addr_t mapping, u32 len, |
28 | struct xdp_buff *xdp) | |
38413406 | 29 | { |
a7559bc8 | 30 | struct skb_shared_info *sinfo; |
53f8c2d3 | 31 | struct bnxt_sw_tx_bd *tx_buf; |
38413406 | 32 | struct tx_bd *txbd; |
a7559bc8 | 33 | int num_frags = 0; |
38413406 MC |
34 | u32 flags; |
35 | u16 prod; | |
a7559bc8 AG |
36 | int i; |
37 | ||
38 | if (xdp && xdp_buff_has_frags(xdp)) { | |
39 | sinfo = xdp_get_shared_info_from_buff(xdp); | |
40 | num_frags = sinfo->nr_frags; | |
41 | } | |
38413406 | 42 | |
a7559bc8 | 43 | /* fill up the first buffer */ |
38413406 MC |
44 | prod = txr->tx_prod; |
45 | tx_buf = &txr->tx_buf_ring[prod]; | |
a7559bc8 AG |
46 | tx_buf->nr_frags = num_frags; |
47 | if (xdp) | |
48 | tx_buf->page = virt_to_head_page(xdp->data); | |
38413406 MC |
49 | |
50 | txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; | |
53f8c2d3 MC |
51 | flags = (len << TX_BD_LEN_SHIFT) | |
52 | ((num_frags + 1) << TX_BD_FLAGS_BD_CNT_SHIFT) | | |
53 | bnxt_lhint_arr[len >> 9]; | |
38413406 MC |
54 | txbd->tx_bd_len_flags_type = cpu_to_le32(flags); |
55 | txbd->tx_bd_opaque = prod; | |
56 | txbd->tx_bd_haddr = cpu_to_le64(mapping); | |
57 | ||
a7559bc8 AG |
58 | /* now let us fill up the frags into the next buffers */ |
59 | for (i = 0; i < num_frags ; i++) { | |
60 | skb_frag_t *frag = &sinfo->frags[i]; | |
61 | struct bnxt_sw_tx_bd *frag_tx_buf; | |
a7559bc8 AG |
62 | dma_addr_t frag_mapping; |
63 | int frag_len; | |
64 | ||
65 | prod = NEXT_TX(prod); | |
36647b20 | 66 | WRITE_ONCE(txr->tx_prod, prod); |
a7559bc8 AG |
67 | |
68 | /* first fill up the first buffer */ | |
69 | frag_tx_buf = &txr->tx_buf_ring[prod]; | |
70 | frag_tx_buf->page = skb_frag_page(frag); | |
71 | ||
72 | txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; | |
73 | ||
74 | frag_len = skb_frag_size(frag); | |
a7559bc8 AG |
75 | flags = frag_len << TX_BD_LEN_SHIFT; |
76 | txbd->tx_bd_len_flags_type = cpu_to_le32(flags); | |
23c93c3b AG |
77 | frag_mapping = page_pool_get_dma_addr(skb_frag_page(frag)) + |
78 | skb_frag_off(frag); | |
a7559bc8 AG |
79 | txbd->tx_bd_haddr = cpu_to_le64(frag_mapping); |
80 | ||
81 | len = frag_len; | |
82 | } | |
83 | ||
84 | flags &= ~TX_BD_LEN; | |
85 | txbd->tx_bd_len_flags_type = cpu_to_le32(((len) << TX_BD_LEN_SHIFT) | flags | | |
86 | TX_BD_FLAGS_PACKET_END); | |
87 | /* Sync TX BD */ | |
88 | wmb(); | |
38413406 | 89 | prod = NEXT_TX(prod); |
36647b20 | 90 | WRITE_ONCE(txr->tx_prod, prod); |
a7559bc8 | 91 | |
53f8c2d3 | 92 | return tx_buf; |
c1ba92a8 MC |
93 | } |
94 | ||
95 | static void __bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr, | |
a7559bc8 AG |
96 | dma_addr_t mapping, u32 len, u16 rx_prod, |
97 | struct xdp_buff *xdp) | |
c1ba92a8 MC |
98 | { |
99 | struct bnxt_sw_tx_bd *tx_buf; | |
100 | ||
a7559bc8 | 101 | tx_buf = bnxt_xmit_bd(bp, txr, mapping, len, xdp); |
c1ba92a8 MC |
102 | tx_buf->rx_prod = rx_prod; |
103 | tx_buf->action = XDP_TX; | |
a7559bc8 | 104 | |
38413406 MC |
105 | } |
106 | ||
f18c2b77 AG |
107 | static void __bnxt_xmit_xdp_redirect(struct bnxt *bp, |
108 | struct bnxt_tx_ring_info *txr, | |
109 | dma_addr_t mapping, u32 len, | |
110 | struct xdp_frame *xdpf) | |
111 | { | |
112 | struct bnxt_sw_tx_bd *tx_buf; | |
113 | ||
a7559bc8 | 114 | tx_buf = bnxt_xmit_bd(bp, txr, mapping, len, NULL); |
f18c2b77 AG |
115 | tx_buf->action = XDP_REDIRECT; |
116 | tx_buf->xdpf = xdpf; | |
117 | dma_unmap_addr_set(tx_buf, mapping, mapping); | |
118 | dma_unmap_len_set(tx_buf, len, 0); | |
119 | } | |
120 | ||
37b61cda | 121 | void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) |
38413406 MC |
122 | { |
123 | struct bnxt_tx_ring_info *txr = bnapi->tx_ring; | |
124 | struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; | |
c1ba92a8 | 125 | bool rx_doorbell_needed = false; |
37b61cda | 126 | int nr_pkts = bnapi->tx_pkts; |
38413406 MC |
127 | struct bnxt_sw_tx_bd *tx_buf; |
128 | u16 tx_cons = txr->tx_cons; | |
129 | u16 last_tx_cons = tx_cons; | |
a7559bc8 | 130 | int i, j, frags; |
38413406 | 131 | |
37b61cda JK |
132 | if (!budget) |
133 | return; | |
134 | ||
38413406 | 135 | for (i = 0; i < nr_pkts; i++) { |
c1ba92a8 MC |
136 | tx_buf = &txr->tx_buf_ring[tx_cons]; |
137 | ||
f18c2b77 AG |
138 | if (tx_buf->action == XDP_REDIRECT) { |
139 | struct pci_dev *pdev = bp->pdev; | |
140 | ||
141 | dma_unmap_single(&pdev->dev, | |
142 | dma_unmap_addr(tx_buf, mapping), | |
143 | dma_unmap_len(tx_buf, len), | |
df70303d | 144 | DMA_TO_DEVICE); |
f18c2b77 AG |
145 | xdp_return_frame(tx_buf->xdpf); |
146 | tx_buf->action = 0; | |
147 | tx_buf->xdpf = NULL; | |
148 | } else if (tx_buf->action == XDP_TX) { | |
2b56b3d9 | 149 | tx_buf->action = 0; |
c1ba92a8 MC |
150 | rx_doorbell_needed = true; |
151 | last_tx_cons = tx_cons; | |
a7559bc8 AG |
152 | |
153 | frags = tx_buf->nr_frags; | |
154 | for (j = 0; j < frags; j++) { | |
155 | tx_cons = NEXT_TX(tx_cons); | |
156 | tx_buf = &txr->tx_buf_ring[tx_cons]; | |
157 | page_pool_recycle_direct(rxr->page_pool, tx_buf->page); | |
158 | } | |
2b56b3d9 JK |
159 | } else { |
160 | bnxt_sched_reset_txr(bp, txr, i); | |
161 | return; | |
c1ba92a8 | 162 | } |
38413406 | 163 | tx_cons = NEXT_TX(tx_cons); |
38413406 | 164 | } |
37b61cda JK |
165 | |
166 | bnapi->tx_pkts = 0; | |
36647b20 | 167 | WRITE_ONCE(txr->tx_cons, tx_cons); |
c1ba92a8 | 168 | if (rx_doorbell_needed) { |
38413406 | 169 | tx_buf = &txr->tx_buf_ring[last_tx_cons]; |
c1ba92a8 | 170 | bnxt_db_write(bp, &rxr->rx_db, tx_buf->rx_prod); |
a7559bc8 | 171 | |
38413406 | 172 | } |
38413406 MC |
173 | } |
174 | ||
b231c3f3 AG |
175 | bool bnxt_xdp_attached(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) |
176 | { | |
177 | struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog); | |
178 | ||
179 | return !!xdp_prog; | |
180 | } | |
181 | ||
182 | void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, | |
bbfc17e5 | 183 | u16 cons, u8 *data_ptr, unsigned int len, |
b231c3f3 AG |
184 | struct xdp_buff *xdp) |
185 | { | |
f6974b4c | 186 | u32 buflen = BNXT_RX_PAGE_SIZE; |
b231c3f3 AG |
187 | struct bnxt_sw_rx_bd *rx_buf; |
188 | struct pci_dev *pdev; | |
189 | dma_addr_t mapping; | |
190 | u32 offset; | |
191 | ||
192 | pdev = bp->pdev; | |
193 | rx_buf = &rxr->rx_buf_ring[cons]; | |
194 | offset = bp->rx_offset; | |
195 | ||
196 | mapping = rx_buf->mapping - bp->rx_dma_offset; | |
bbfc17e5 | 197 | dma_sync_single_for_cpu(&pdev->dev, mapping + offset, len, bp->rx_dir); |
b231c3f3 | 198 | |
7dd3de7c | 199 | xdp_init_buff(xdp, buflen, &rxr->xdp_rxq); |
bbfc17e5 | 200 | xdp_prepare_buff(xdp, data_ptr - offset, offset, len, false); |
b231c3f3 AG |
201 | } |
202 | ||
a7559bc8 AG |
203 | void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr, |
204 | struct xdp_buff *xdp) | |
205 | { | |
206 | struct skb_shared_info *shinfo; | |
207 | int i; | |
208 | ||
209 | if (!xdp || !xdp_buff_has_frags(xdp)) | |
210 | return; | |
211 | shinfo = xdp_get_shared_info_from_buff(xdp); | |
212 | for (i = 0; i < shinfo->nr_frags; i++) { | |
213 | struct page *page = skb_frag_page(&shinfo->frags[i]); | |
214 | ||
215 | page_pool_recycle_direct(rxr->page_pool, page); | |
216 | } | |
217 | shinfo->nr_frags = 0; | |
218 | } | |
219 | ||
c6d30e83 MC |
220 | /* returns the following: |
221 | * true - packet consumed by XDP and new buffer is allocated. | |
222 | * false - packet should be passed to the stack. | |
223 | */ | |
224 | bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, | |
9b3e6078 MC |
225 | struct xdp_buff xdp, struct page *page, u8 **data_ptr, |
226 | unsigned int *len, u8 *event) | |
c6d30e83 MC |
227 | { |
228 | struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog); | |
38413406 | 229 | struct bnxt_tx_ring_info *txr; |
c6d30e83 MC |
230 | struct bnxt_sw_rx_bd *rx_buf; |
231 | struct pci_dev *pdev; | |
c6d30e83 | 232 | dma_addr_t mapping; |
a7559bc8 | 233 | u32 tx_needed = 1; |
c6d30e83 | 234 | void *orig_data; |
38413406 | 235 | u32 tx_avail; |
c6d30e83 MC |
236 | u32 offset; |
237 | u32 act; | |
238 | ||
239 | if (!xdp_prog) | |
240 | return false; | |
241 | ||
242 | pdev = bp->pdev; | |
c6d30e83 MC |
243 | offset = bp->rx_offset; |
244 | ||
f18c2b77 | 245 | txr = rxr->bnapi->tx_ring; |
43b5169d | 246 | /* BNXT_RX_PAGE_MODE(bp) when XDP enabled */ |
c6d30e83 | 247 | orig_data = xdp.data; |
c6d30e83 | 248 | |
c6d30e83 | 249 | act = bpf_prog_run_xdp(xdp_prog, &xdp); |
c6d30e83 | 250 | |
38413406 MC |
251 | tx_avail = bnxt_tx_avail(bp, txr); |
252 | /* If the tx ring is not full, we must not update the rx producer yet | |
253 | * because we may still be transmitting on some BDs. | |
254 | */ | |
255 | if (tx_avail != bp->tx_ring_size) | |
256 | *event &= ~BNXT_RX_EVENT; | |
257 | ||
b968e735 | 258 | *len = xdp.data_end - xdp.data; |
9b3e6078 | 259 | if (orig_data != xdp.data) { |
c6d30e83 | 260 | offset = xdp.data - xdp.data_hard_start; |
9b3e6078 MC |
261 | *data_ptr = xdp.data_hard_start + offset; |
262 | } | |
b231c3f3 | 263 | |
c6d30e83 MC |
264 | switch (act) { |
265 | case XDP_PASS: | |
266 | return false; | |
267 | ||
38413406 | 268 | case XDP_TX: |
b231c3f3 AG |
269 | rx_buf = &rxr->rx_buf_ring[cons]; |
270 | mapping = rx_buf->mapping - bp->rx_dma_offset; | |
a7559bc8 AG |
271 | *event = 0; |
272 | ||
273 | if (unlikely(xdp_buff_has_frags(&xdp))) { | |
274 | struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(&xdp); | |
b231c3f3 | 275 | |
a7559bc8 AG |
276 | tx_needed += sinfo->nr_frags; |
277 | *event = BNXT_AGG_EVENT; | |
278 | } | |
279 | ||
280 | if (tx_avail < tx_needed) { | |
38413406 | 281 | trace_xdp_exception(bp->dev, xdp_prog, act); |
a7559bc8 | 282 | bnxt_xdp_buff_frags_free(rxr, &xdp); |
38413406 MC |
283 | bnxt_reuse_rx_data(rxr, cons, page); |
284 | return true; | |
285 | } | |
286 | ||
38413406 MC |
287 | dma_sync_single_for_device(&pdev->dev, mapping + offset, *len, |
288 | bp->rx_dir); | |
a7559bc8 AG |
289 | |
290 | *event |= BNXT_TX_EVENT; | |
52c06092 | 291 | __bnxt_xmit_xdp(bp, txr, mapping + offset, *len, |
a7559bc8 | 292 | NEXT_RX(rxr->rx_prod), &xdp); |
38413406 MC |
293 | bnxt_reuse_rx_data(rxr, cons, page); |
294 | return true; | |
f18c2b77 AG |
295 | case XDP_REDIRECT: |
296 | /* if we are calling this here then we know that the | |
297 | * redirect is coming from a frame received by the | |
298 | * bnxt_en driver. | |
299 | */ | |
b231c3f3 AG |
300 | rx_buf = &rxr->rx_buf_ring[cons]; |
301 | mapping = rx_buf->mapping - bp->rx_dma_offset; | |
f18c2b77 | 302 | dma_unmap_page_attrs(&pdev->dev, mapping, |
f6974b4c | 303 | BNXT_RX_PAGE_SIZE, bp->rx_dir, |
f18c2b77 AG |
304 | DMA_ATTR_WEAK_ORDERING); |
305 | ||
306 | /* if we are unable to allocate a new buffer, abort and reuse */ | |
307 | if (bnxt_alloc_rx_data(bp, rxr, rxr->rx_prod, GFP_ATOMIC)) { | |
308 | trace_xdp_exception(bp->dev, xdp_prog, act); | |
a7559bc8 | 309 | bnxt_xdp_buff_frags_free(rxr, &xdp); |
f18c2b77 AG |
310 | bnxt_reuse_rx_data(rxr, cons, page); |
311 | return true; | |
312 | } | |
313 | ||
314 | if (xdp_do_redirect(bp->dev, &xdp, xdp_prog)) { | |
315 | trace_xdp_exception(bp->dev, xdp_prog, act); | |
322b87ca | 316 | page_pool_recycle_direct(rxr->page_pool, page); |
f18c2b77 AG |
317 | return true; |
318 | } | |
319 | ||
320 | *event |= BNXT_REDIRECT_EVENT; | |
321 | break; | |
c6d30e83 | 322 | default: |
c8064e5b | 323 | bpf_warn_invalid_xdp_action(bp->dev, xdp_prog, act); |
df561f66 | 324 | fallthrough; |
c6d30e83 MC |
325 | case XDP_ABORTED: |
326 | trace_xdp_exception(bp->dev, xdp_prog, act); | |
df561f66 | 327 | fallthrough; |
c6d30e83 | 328 | case XDP_DROP: |
a7559bc8 | 329 | bnxt_xdp_buff_frags_free(rxr, &xdp); |
c6d30e83 MC |
330 | bnxt_reuse_rx_data(rxr, cons, page); |
331 | break; | |
332 | } | |
333 | return true; | |
334 | } | |
335 | ||
f18c2b77 AG |
336 | int bnxt_xdp_xmit(struct net_device *dev, int num_frames, |
337 | struct xdp_frame **frames, u32 flags) | |
338 | { | |
339 | struct bnxt *bp = netdev_priv(dev); | |
340 | struct bpf_prog *xdp_prog = READ_ONCE(bp->xdp_prog); | |
341 | struct pci_dev *pdev = bp->pdev; | |
342 | struct bnxt_tx_ring_info *txr; | |
343 | dma_addr_t mapping; | |
fdc13979 | 344 | int nxmit = 0; |
f18c2b77 AG |
345 | int ring; |
346 | int i; | |
347 | ||
348 | if (!test_bit(BNXT_STATE_OPEN, &bp->state) || | |
349 | !bp->tx_nr_rings_xdp || | |
350 | !xdp_prog) | |
351 | return -EINVAL; | |
352 | ||
353 | ring = smp_processor_id() % bp->tx_nr_rings_xdp; | |
354 | txr = &bp->tx_ring[ring]; | |
355 | ||
27d4073f RJ |
356 | if (READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING) |
357 | return -EINVAL; | |
358 | ||
4f81def2 PC |
359 | if (static_branch_unlikely(&bnxt_xdp_locking_key)) |
360 | spin_lock(&txr->xdp_tx_lock); | |
361 | ||
f18c2b77 AG |
362 | for (i = 0; i < num_frames; i++) { |
363 | struct xdp_frame *xdp = frames[i]; | |
364 | ||
27d4073f | 365 | if (!bnxt_tx_avail(bp, txr)) |
fdc13979 | 366 | break; |
f18c2b77 AG |
367 | |
368 | mapping = dma_map_single(&pdev->dev, xdp->data, xdp->len, | |
369 | DMA_TO_DEVICE); | |
370 | ||
fdc13979 LB |
371 | if (dma_mapping_error(&pdev->dev, mapping)) |
372 | break; | |
373 | ||
f18c2b77 | 374 | __bnxt_xmit_xdp_redirect(bp, txr, mapping, xdp->len, xdp); |
fdc13979 | 375 | nxmit++; |
f18c2b77 AG |
376 | } |
377 | ||
378 | if (flags & XDP_XMIT_FLUSH) { | |
379 | /* Sync BD data before updating doorbell */ | |
380 | wmb(); | |
381 | bnxt_db_write(bp, &txr->tx_db, txr->tx_prod); | |
382 | } | |
383 | ||
4f81def2 PC |
384 | if (static_branch_unlikely(&bnxt_xdp_locking_key)) |
385 | spin_unlock(&txr->xdp_tx_lock); | |
386 | ||
fdc13979 | 387 | return nxmit; |
f18c2b77 AG |
388 | } |
389 | ||
c6d30e83 MC |
390 | /* Under rtnl_lock */ |
391 | static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog) | |
392 | { | |
393 | struct net_device *dev = bp->dev; | |
394 | int tx_xdp = 0, rc, tc; | |
395 | struct bpf_prog *old; | |
396 | ||
9f4b2830 AG |
397 | if (prog && !prog->aux->xdp_has_frags && |
398 | bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) { | |
399 | netdev_warn(dev, "MTU %d larger than %d without XDP frag support.\n", | |
c6d30e83 MC |
400 | bp->dev->mtu, BNXT_MAX_PAGE_MODE_MTU); |
401 | return -EOPNOTSUPP; | |
402 | } | |
403 | if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) { | |
404 | netdev_warn(dev, "ethtool rx/tx channels must be combined to support XDP.\n"); | |
405 | return -EOPNOTSUPP; | |
406 | } | |
1abeacc1 | 407 | if (prog) |
c6d30e83 MC |
408 | tx_xdp = bp->rx_nr_rings; |
409 | ||
410 | tc = netdev_get_num_tc(dev); | |
411 | if (!tc) | |
412 | tc = 1; | |
98fdbe73 MC |
413 | rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings, |
414 | true, tc, tx_xdp); | |
c6d30e83 MC |
415 | if (rc) { |
416 | netdev_warn(dev, "Unable to reserve enough TX rings to support XDP.\n"); | |
417 | return rc; | |
418 | } | |
419 | if (netif_running(dev)) | |
420 | bnxt_close_nic(bp, true, false); | |
421 | ||
422 | old = xchg(&bp->xdp_prog, prog); | |
423 | if (old) | |
424 | bpf_prog_put(old); | |
425 | ||
426 | if (prog) { | |
427 | bnxt_set_rx_skb_mode(bp, true); | |
66c0e13a | 428 | xdp_features_set_redirect_target(dev, true); |
c6d30e83 MC |
429 | } else { |
430 | int rx, tx; | |
431 | ||
66c0e13a | 432 | xdp_features_clear_redirect_target(dev); |
c6d30e83 MC |
433 | bnxt_set_rx_skb_mode(bp, false); |
434 | bnxt_get_max_rings(bp, &rx, &tx, true); | |
435 | if (rx > 1) { | |
436 | bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS; | |
437 | bp->dev->hw_features |= NETIF_F_LRO; | |
438 | } | |
439 | } | |
440 | bp->tx_nr_rings_xdp = tx_xdp; | |
441 | bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc + tx_xdp; | |
442 | bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings); | |
c6d30e83 MC |
443 | bnxt_set_tpa_flags(bp); |
444 | bnxt_set_ring_params(bp); | |
445 | ||
446 | if (netif_running(dev)) | |
447 | return bnxt_open_nic(bp, true, false); | |
448 | ||
449 | return 0; | |
450 | } | |
451 | ||
f4e63525 | 452 | int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp) |
c6d30e83 MC |
453 | { |
454 | struct bnxt *bp = netdev_priv(dev); | |
455 | int rc; | |
456 | ||
457 | switch (xdp->command) { | |
458 | case XDP_SETUP_PROG: | |
459 | rc = bnxt_xdp_set(bp, xdp->prog); | |
460 | break; | |
c6d30e83 MC |
461 | default: |
462 | rc = -EINVAL; | |
463 | break; | |
464 | } | |
465 | return rc; | |
466 | } | |
1dc4c557 AG |
467 | |
468 | struct sk_buff * | |
469 | bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb, u8 num_frags, | |
470 | struct page_pool *pool, struct xdp_buff *xdp, | |
471 | struct rx_cmp_ext *rxcmp1) | |
472 | { | |
473 | struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); | |
474 | ||
475 | if (!skb) | |
476 | return NULL; | |
477 | skb_checksum_none_assert(skb); | |
478 | if (RX_CMP_L4_CS_OK(rxcmp1)) { | |
479 | if (bp->dev->features & NETIF_F_RXCSUM) { | |
480 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
481 | skb->csum_level = RX_CMP_ENCAP(rxcmp1); | |
482 | } | |
483 | } | |
484 | xdp_update_skb_shared_info(skb, num_frags, | |
485 | sinfo->xdp_frags_size, | |
f6974b4c | 486 | BNXT_RX_PAGE_SIZE * sinfo->nr_frags, |
1dc4c557 AG |
487 | xdp_buff_is_frag_pfmemalloc(xdp)); |
488 | return skb; | |
489 | } |