]>
Commit | Line | Data |
---|---|---|
ddc64d0a | 1 | // SPDX-License-Identifier: GPL-2.0-only |
6710e112 JDB |
2 | /* bpf/cpumap.c |
3 | * | |
4 | * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc. | |
6710e112 JDB |
5 | */ |
6 | ||
7 | /* The 'cpumap' is primarily used as a backend map for XDP BPF helper | |
8 | * call bpf_redirect_map() and XDP_REDIRECT action, like 'devmap'. | |
9 | * | |
10 | * Unlike devmap which redirects XDP frames out another NIC device, | |
11 | * this map type redirects raw XDP frames to another CPU. The remote | |
12 | * CPU will do SKB-allocation and call the normal network stack. | |
13 | * | |
14 | * This is a scalability and isolation mechanism, that allow | |
15 | * separating the early driver network XDP layer, from the rest of the | |
16 | * netstack, and assigning dedicated CPUs for this stage. This | |
17 | * basically allows for 10G wirespeed pre-filtering via bpf. | |
18 | */ | |
19 | #include <linux/bpf.h> | |
20 | #include <linux/filter.h> | |
21 | #include <linux/ptr_ring.h> | |
5ab073ff | 22 | #include <net/xdp.h> |
6710e112 JDB |
23 | |
24 | #include <linux/sched.h> | |
25 | #include <linux/workqueue.h> | |
26 | #include <linux/kthread.h> | |
27 | #include <linux/capability.h> | |
f9419f7b | 28 | #include <trace/events/xdp.h> |
6710e112 | 29 | |
1c601d82 JDB |
30 | #include <linux/netdevice.h> /* netif_receive_skb_core */ |
31 | #include <linux/etherdevice.h> /* eth_type_trans */ | |
32 | ||
6710e112 JDB |
33 | /* General idea: XDP packets getting XDP redirected to another CPU, |
34 | * will maximum be stored/queued for one driver ->poll() call. It is | |
d5df2830 | 35 | * guaranteed that queueing the frame and the flush operation happen on |
6710e112 JDB |
36 | * same CPU. Thus, cpu_map_flush operation can deduct via this_cpu_ptr() |
37 | * which queue in bpf_cpu_map_entry contains packets. | |
38 | */ | |
39 | ||
40 | #define CPU_MAP_BULK_SIZE 8 /* 8 == one cacheline on 64-bit archs */ | |
d5df2830 THJ |
41 | struct bpf_cpu_map_entry; |
42 | struct bpf_cpu_map; | |
43 | ||
6710e112 JDB |
44 | struct xdp_bulk_queue { |
45 | void *q[CPU_MAP_BULK_SIZE]; | |
d5df2830 THJ |
46 | struct list_head flush_node; |
47 | struct bpf_cpu_map_entry *obj; | |
6710e112 JDB |
48 | unsigned int count; |
49 | }; | |
50 | ||
51 | /* Struct for every remote "destination" CPU in map */ | |
52 | struct bpf_cpu_map_entry { | |
f9419f7b JDB |
53 | u32 cpu; /* kthread CPU and map index */ |
54 | int map_id; /* Back reference to map */ | |
6710e112 JDB |
55 | u32 qsize; /* Queue size placeholder for map lookup */ |
56 | ||
57 | /* XDP can run multiple RX-ring queues, need __percpu enqueue store */ | |
58 | struct xdp_bulk_queue __percpu *bulkq; | |
59 | ||
d5df2830 THJ |
60 | struct bpf_cpu_map *cmap; |
61 | ||
6710e112 JDB |
62 | /* Queue with potential multi-producers, and single-consumer kthread */ |
63 | struct ptr_ring *queue; | |
64 | struct task_struct *kthread; | |
65 | struct work_struct kthread_stop_wq; | |
66 | ||
67 | atomic_t refcnt; /* Control when this struct can be free'ed */ | |
68 | struct rcu_head rcu; | |
69 | }; | |
70 | ||
71 | struct bpf_cpu_map { | |
72 | struct bpf_map map; | |
73 | /* Below members specific for map type */ | |
74 | struct bpf_cpu_map_entry **cpu_map; | |
6710e112 JDB |
75 | }; |
76 | ||
cdfafe98 BT |
77 | static DEFINE_PER_CPU(struct list_head, cpu_map_flush_list); |
78 | ||
4bc188c7 | 79 | static int bq_flush_to_queue(struct xdp_bulk_queue *bq); |
6710e112 JDB |
80 | |
81 | static struct bpf_map *cpu_map_alloc(union bpf_attr *attr) | |
82 | { | |
83 | struct bpf_cpu_map *cmap; | |
84 | int err = -ENOMEM; | |
85 | u64 cost; | |
cdfafe98 | 86 | int ret; |
6710e112 JDB |
87 | |
88 | if (!capable(CAP_SYS_ADMIN)) | |
89 | return ERR_PTR(-EPERM); | |
90 | ||
91 | /* check sanity of attributes */ | |
92 | if (attr->max_entries == 0 || attr->key_size != 4 || | |
93 | attr->value_size != 4 || attr->map_flags & ~BPF_F_NUMA_NODE) | |
94 | return ERR_PTR(-EINVAL); | |
95 | ||
96 | cmap = kzalloc(sizeof(*cmap), GFP_USER); | |
97 | if (!cmap) | |
98 | return ERR_PTR(-ENOMEM); | |
99 | ||
bd475643 | 100 | bpf_map_init_from_attr(&cmap->map, attr); |
6710e112 JDB |
101 | |
102 | /* Pre-limit array size based on NR_CPUS, not final CPU check */ | |
103 | if (cmap->map.max_entries > NR_CPUS) { | |
104 | err = -E2BIG; | |
105 | goto free_cmap; | |
106 | } | |
107 | ||
108 | /* make sure page count doesn't overflow */ | |
109 | cost = (u64) cmap->map.max_entries * sizeof(struct bpf_cpu_map_entry *); | |
6710e112 JDB |
110 | |
111 | /* Notice returns -EPERM on if map size is larger than memlock limit */ | |
c85d6913 | 112 | ret = bpf_map_charge_init(&cmap->map.memory, cost); |
6710e112 JDB |
113 | if (ret) { |
114 | err = ret; | |
115 | goto free_cmap; | |
116 | } | |
117 | ||
6710e112 JDB |
118 | /* Alloc array for possible remote "destination" CPUs */ |
119 | cmap->cpu_map = bpf_map_area_alloc(cmap->map.max_entries * | |
120 | sizeof(struct bpf_cpu_map_entry *), | |
121 | cmap->map.numa_node); | |
122 | if (!cmap->cpu_map) | |
cdfafe98 | 123 | goto free_charge; |
6710e112 JDB |
124 | |
125 | return &cmap->map; | |
b936ca64 RG |
126 | free_charge: |
127 | bpf_map_charge_finish(&cmap->map.memory); | |
6710e112 JDB |
128 | free_cmap: |
129 | kfree(cmap); | |
130 | return ERR_PTR(err); | |
131 | } | |
132 | ||
6710e112 JDB |
133 | static void get_cpu_map_entry(struct bpf_cpu_map_entry *rcpu) |
134 | { | |
135 | atomic_inc(&rcpu->refcnt); | |
136 | } | |
137 | ||
138 | /* called from workqueue, to workaround syscall using preempt_disable */ | |
139 | static void cpu_map_kthread_stop(struct work_struct *work) | |
140 | { | |
141 | struct bpf_cpu_map_entry *rcpu; | |
142 | ||
143 | rcpu = container_of(work, struct bpf_cpu_map_entry, kthread_stop_wq); | |
144 | ||
145 | /* Wait for flush in __cpu_map_entry_free(), via full RCU barrier, | |
146 | * as it waits until all in-flight call_rcu() callbacks complete. | |
147 | */ | |
148 | rcu_barrier(); | |
149 | ||
150 | /* kthread_stop will wake_up_process and wait for it to complete */ | |
151 | kthread_stop(rcpu->kthread); | |
152 | } | |
153 | ||
0fe875c5 | 154 | static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu, |
8f0504a9 JDB |
155 | struct xdp_frame *xdpf, |
156 | struct sk_buff *skb) | |
1c601d82 | 157 | { |
676e4a6f | 158 | unsigned int hard_start_headroom; |
1c601d82 JDB |
159 | unsigned int frame_size; |
160 | void *pkt_data_start; | |
1c601d82 | 161 | |
676e4a6f JDB |
162 | /* Part of headroom was reserved to xdpf */ |
163 | hard_start_headroom = sizeof(struct xdp_frame) + xdpf->headroom; | |
164 | ||
1c601d82 JDB |
165 | /* build_skb need to place skb_shared_info after SKB end, and |
166 | * also want to know the memory "truesize". Thus, need to | |
167 | * know the memory frame size backing xdp_buff. | |
168 | * | |
169 | * XDP was designed to have PAGE_SIZE frames, but this | |
170 | * assumption is not longer true with ixgbe and i40e. It | |
171 | * would be preferred to set frame_size to 2048 or 4096 | |
172 | * depending on the driver. | |
173 | * frame_size = 2048; | |
70280ed9 | 174 | * frame_len = frame_size - sizeof(*xdp_frame); |
1c601d82 JDB |
175 | * |
176 | * Instead, with info avail, skb_shared_info in placed after | |
177 | * packet len. This, unfortunately fakes the truesize. | |
178 | * Another disadvantage of this approach, the skb_shared_info | |
179 | * is not at a fixed memory location, with mixed length | |
180 | * packets, which is bad for cache-line hotness. | |
181 | */ | |
676e4a6f | 182 | frame_size = SKB_DATA_ALIGN(xdpf->len + hard_start_headroom) + |
1c601d82 JDB |
183 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
184 | ||
676e4a6f | 185 | pkt_data_start = xdpf->data - hard_start_headroom; |
8f0504a9 JDB |
186 | skb = build_skb_around(skb, pkt_data_start, frame_size); |
187 | if (unlikely(!skb)) | |
1c601d82 JDB |
188 | return NULL; |
189 | ||
676e4a6f | 190 | skb_reserve(skb, hard_start_headroom); |
70280ed9 JDB |
191 | __skb_put(skb, xdpf->len); |
192 | if (xdpf->metasize) | |
193 | skb_metadata_set(skb, xdpf->metasize); | |
1c601d82 JDB |
194 | |
195 | /* Essential SKB info: protocol and skb->dev */ | |
70280ed9 | 196 | skb->protocol = eth_type_trans(skb, xdpf->dev_rx); |
1c601d82 JDB |
197 | |
198 | /* Optional SKB info, currently missing: | |
199 | * - HW checksum info (skb->ip_summed) | |
200 | * - HW RX hash (skb_set_hash) | |
201 | * - RX ring dev queue index (skb_record_rx_queue) | |
202 | */ | |
203 | ||
6bf071bf JDB |
204 | /* Until page_pool get SKB return path, release DMA here */ |
205 | xdp_release_frame(xdpf); | |
206 | ||
676e4a6f JDB |
207 | /* Allow SKB to reuse area used by xdp_frame */ |
208 | xdp_scrub_frame(xdpf); | |
209 | ||
1c601d82 JDB |
210 | return skb; |
211 | } | |
212 | ||
5ab073ff JDB |
213 | static void __cpu_map_ring_cleanup(struct ptr_ring *ring) |
214 | { | |
215 | /* The tear-down procedure should have made sure that queue is | |
216 | * empty. See __cpu_map_entry_replace() and work-queue | |
217 | * invoked cpu_map_kthread_stop(). Catch any broken behaviour | |
218 | * gracefully and warn once. | |
219 | */ | |
70280ed9 | 220 | struct xdp_frame *xdpf; |
5ab073ff | 221 | |
70280ed9 JDB |
222 | while ((xdpf = ptr_ring_consume(ring))) |
223 | if (WARN_ON_ONCE(xdpf)) | |
03993094 | 224 | xdp_return_frame(xdpf); |
5ab073ff JDB |
225 | } |
226 | ||
227 | static void put_cpu_map_entry(struct bpf_cpu_map_entry *rcpu) | |
228 | { | |
229 | if (atomic_dec_and_test(&rcpu->refcnt)) { | |
230 | /* The queue should be empty at this point */ | |
231 | __cpu_map_ring_cleanup(rcpu->queue); | |
232 | ptr_ring_cleanup(rcpu->queue, NULL); | |
233 | kfree(rcpu->queue); | |
234 | kfree(rcpu); | |
235 | } | |
236 | } | |
237 | ||
77361825 JDB |
238 | #define CPUMAP_BATCH 8 |
239 | ||
6710e112 JDB |
240 | static int cpu_map_kthread_run(void *data) |
241 | { | |
242 | struct bpf_cpu_map_entry *rcpu = data; | |
243 | ||
244 | set_current_state(TASK_INTERRUPTIBLE); | |
245 | ||
246 | /* When kthread gives stop order, then rcpu have been disconnected | |
247 | * from map, thus no new packets can enter. Remaining in-flight | |
248 | * per CPU stored packets are flushed to this queue. Wait honoring | |
249 | * kthread_stop signal until queue is empty. | |
250 | */ | |
251 | while (!kthread_should_stop() || !__ptr_ring_empty(rcpu->queue)) { | |
77361825 JDB |
252 | unsigned int drops = 0, sched = 0; |
253 | void *frames[CPUMAP_BATCH]; | |
8f0504a9 JDB |
254 | void *skbs[CPUMAP_BATCH]; |
255 | gfp_t gfp = __GFP_ZERO | GFP_ATOMIC; | |
256 | int i, n, m; | |
6710e112 | 257 | |
1c601d82 JDB |
258 | /* Release CPU reschedule checks */ |
259 | if (__ptr_ring_empty(rcpu->queue)) { | |
31749468 JDB |
260 | set_current_state(TASK_INTERRUPTIBLE); |
261 | /* Recheck to avoid lost wake-up */ | |
262 | if (__ptr_ring_empty(rcpu->queue)) { | |
263 | schedule(); | |
264 | sched = 1; | |
265 | } else { | |
266 | __set_current_state(TASK_RUNNING); | |
267 | } | |
1c601d82 | 268 | } else { |
f9419f7b | 269 | sched = cond_resched(); |
6710e112 | 270 | } |
1c601d82 | 271 | |
1c601d82 JDB |
272 | /* |
273 | * The bpf_cpu_map_entry is single consumer, with this | |
274 | * kthread CPU pinned. Lockless access to ptr_ring | |
275 | * consume side valid as no-resize allowed of queue. | |
276 | */ | |
77361825 | 277 | n = ptr_ring_consume_batched(rcpu->queue, frames, CPUMAP_BATCH); |
86d23145 JDB |
278 | |
279 | for (i = 0; i < n; i++) { | |
280 | void *f = frames[i]; | |
281 | struct page *page = virt_to_page(f); | |
282 | ||
283 | /* Bring struct page memory area to curr CPU. Read by | |
284 | * build_skb_around via page_is_pfmemalloc(), and when | |
285 | * freed written by page_frag_free call. | |
286 | */ | |
287 | prefetchw(page); | |
288 | } | |
289 | ||
8f0504a9 JDB |
290 | m = kmem_cache_alloc_bulk(skbuff_head_cache, gfp, n, skbs); |
291 | if (unlikely(m == 0)) { | |
292 | for (i = 0; i < n; i++) | |
293 | skbs[i] = NULL; /* effect: xdp_return_frame */ | |
294 | drops = n; | |
295 | } | |
77361825 JDB |
296 | |
297 | local_bh_disable(); | |
298 | for (i = 0; i < n; i++) { | |
299 | struct xdp_frame *xdpf = frames[i]; | |
8f0504a9 | 300 | struct sk_buff *skb = skbs[i]; |
1c601d82 JDB |
301 | int ret; |
302 | ||
8f0504a9 | 303 | skb = cpu_map_build_skb(rcpu, xdpf, skb); |
1c601d82 | 304 | if (!skb) { |
03993094 | 305 | xdp_return_frame(xdpf); |
1c601d82 JDB |
306 | continue; |
307 | } | |
308 | ||
309 | /* Inject into network stack */ | |
310 | ret = netif_receive_skb_core(skb); | |
311 | if (ret == NET_RX_DROP) | |
312 | drops++; | |
1c601d82 | 313 | } |
f9419f7b | 314 | /* Feedback loop via tracepoint */ |
77361825 | 315 | trace_xdp_cpumap_kthread(rcpu->map_id, n, drops, sched); |
f9419f7b | 316 | |
1c601d82 | 317 | local_bh_enable(); /* resched point, may call do_softirq() */ |
6710e112 JDB |
318 | } |
319 | __set_current_state(TASK_RUNNING); | |
320 | ||
321 | put_cpu_map_entry(rcpu); | |
322 | return 0; | |
323 | } | |
324 | ||
0fe875c5 WY |
325 | static struct bpf_cpu_map_entry *__cpu_map_entry_alloc(u32 qsize, u32 cpu, |
326 | int map_id) | |
6710e112 | 327 | { |
7fc17e90 | 328 | gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; |
6710e112 | 329 | struct bpf_cpu_map_entry *rcpu; |
d5df2830 THJ |
330 | struct xdp_bulk_queue *bq; |
331 | int numa, err, i; | |
6710e112 JDB |
332 | |
333 | /* Have map->numa_node, but choose node of redirect target CPU */ | |
334 | numa = cpu_to_node(cpu); | |
335 | ||
336 | rcpu = kzalloc_node(sizeof(*rcpu), gfp, numa); | |
337 | if (!rcpu) | |
338 | return NULL; | |
339 | ||
340 | /* Alloc percpu bulkq */ | |
341 | rcpu->bulkq = __alloc_percpu_gfp(sizeof(*rcpu->bulkq), | |
342 | sizeof(void *), gfp); | |
343 | if (!rcpu->bulkq) | |
344 | goto free_rcu; | |
345 | ||
d5df2830 THJ |
346 | for_each_possible_cpu(i) { |
347 | bq = per_cpu_ptr(rcpu->bulkq, i); | |
348 | bq->obj = rcpu; | |
349 | } | |
350 | ||
6710e112 JDB |
351 | /* Alloc queue */ |
352 | rcpu->queue = kzalloc_node(sizeof(*rcpu->queue), gfp, numa); | |
353 | if (!rcpu->queue) | |
354 | goto free_bulkq; | |
355 | ||
356 | err = ptr_ring_init(rcpu->queue, qsize, gfp); | |
357 | if (err) | |
358 | goto free_queue; | |
359 | ||
f9419f7b JDB |
360 | rcpu->cpu = cpu; |
361 | rcpu->map_id = map_id; | |
362 | rcpu->qsize = qsize; | |
6710e112 JDB |
363 | |
364 | /* Setup kthread */ | |
365 | rcpu->kthread = kthread_create_on_node(cpu_map_kthread_run, rcpu, numa, | |
366 | "cpumap/%d/map:%d", cpu, map_id); | |
367 | if (IS_ERR(rcpu->kthread)) | |
368 | goto free_ptr_ring; | |
369 | ||
370 | get_cpu_map_entry(rcpu); /* 1-refcnt for being in cmap->cpu_map[] */ | |
371 | get_cpu_map_entry(rcpu); /* 1-refcnt for kthread */ | |
372 | ||
373 | /* Make sure kthread runs on a single CPU */ | |
374 | kthread_bind(rcpu->kthread, cpu); | |
375 | wake_up_process(rcpu->kthread); | |
376 | ||
377 | return rcpu; | |
378 | ||
379 | free_ptr_ring: | |
380 | ptr_ring_cleanup(rcpu->queue, NULL); | |
381 | free_queue: | |
382 | kfree(rcpu->queue); | |
383 | free_bulkq: | |
384 | free_percpu(rcpu->bulkq); | |
385 | free_rcu: | |
386 | kfree(rcpu); | |
387 | return NULL; | |
388 | } | |
389 | ||
0fe875c5 | 390 | static void __cpu_map_entry_free(struct rcu_head *rcu) |
6710e112 JDB |
391 | { |
392 | struct bpf_cpu_map_entry *rcpu; | |
6710e112 JDB |
393 | |
394 | /* This cpu_map_entry have been disconnected from map and one | |
fb5aacdf | 395 | * RCU grace-period have elapsed. Thus, XDP cannot queue any |
6710e112 JDB |
396 | * new packets and cannot change/set flush_needed that can |
397 | * find this entry. | |
398 | */ | |
399 | rcpu = container_of(rcu, struct bpf_cpu_map_entry, rcu); | |
400 | ||
6710e112 JDB |
401 | free_percpu(rcpu->bulkq); |
402 | /* Cannot kthread_stop() here, last put free rcpu resources */ | |
403 | put_cpu_map_entry(rcpu); | |
404 | } | |
405 | ||
406 | /* After xchg pointer to bpf_cpu_map_entry, use the call_rcu() to | |
407 | * ensure any driver rcu critical sections have completed, but this | |
408 | * does not guarantee a flush has happened yet. Because driver side | |
409 | * rcu_read_lock/unlock only protects the running XDP program. The | |
410 | * atomic xchg and NULL-ptr check in __cpu_map_flush() makes sure a | |
411 | * pending flush op doesn't fail. | |
412 | * | |
413 | * The bpf_cpu_map_entry is still used by the kthread, and there can | |
414 | * still be pending packets (in queue and percpu bulkq). A refcnt | |
415 | * makes sure to last user (kthread_stop vs. call_rcu) free memory | |
416 | * resources. | |
417 | * | |
418 | * The rcu callback __cpu_map_entry_free flush remaining packets in | |
419 | * percpu bulkq to queue. Due to caller map_delete_elem() disable | |
420 | * preemption, cannot call kthread_stop() to make sure queue is empty. | |
421 | * Instead a work_queue is started for stopping kthread, | |
fb5aacdf | 422 | * cpu_map_kthread_stop, which waits for an RCU grace period before |
6710e112 JDB |
423 | * stopping kthread, emptying the queue. |
424 | */ | |
0fe875c5 WY |
425 | static void __cpu_map_entry_replace(struct bpf_cpu_map *cmap, |
426 | u32 key_cpu, struct bpf_cpu_map_entry *rcpu) | |
6710e112 JDB |
427 | { |
428 | struct bpf_cpu_map_entry *old_rcpu; | |
429 | ||
430 | old_rcpu = xchg(&cmap->cpu_map[key_cpu], rcpu); | |
431 | if (old_rcpu) { | |
432 | call_rcu(&old_rcpu->rcu, __cpu_map_entry_free); | |
433 | INIT_WORK(&old_rcpu->kthread_stop_wq, cpu_map_kthread_stop); | |
434 | schedule_work(&old_rcpu->kthread_stop_wq); | |
435 | } | |
436 | } | |
437 | ||
0fe875c5 | 438 | static int cpu_map_delete_elem(struct bpf_map *map, void *key) |
6710e112 JDB |
439 | { |
440 | struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map); | |
441 | u32 key_cpu = *(u32 *)key; | |
442 | ||
443 | if (key_cpu >= map->max_entries) | |
444 | return -EINVAL; | |
445 | ||
446 | /* notice caller map_delete_elem() use preempt_disable() */ | |
447 | __cpu_map_entry_replace(cmap, key_cpu, NULL); | |
448 | return 0; | |
449 | } | |
450 | ||
0fe875c5 WY |
451 | static int cpu_map_update_elem(struct bpf_map *map, void *key, void *value, |
452 | u64 map_flags) | |
6710e112 JDB |
453 | { |
454 | struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map); | |
455 | struct bpf_cpu_map_entry *rcpu; | |
456 | ||
457 | /* Array index key correspond to CPU number */ | |
458 | u32 key_cpu = *(u32 *)key; | |
459 | /* Value is the queue size */ | |
460 | u32 qsize = *(u32 *)value; | |
461 | ||
462 | if (unlikely(map_flags > BPF_EXIST)) | |
463 | return -EINVAL; | |
464 | if (unlikely(key_cpu >= cmap->map.max_entries)) | |
465 | return -E2BIG; | |
466 | if (unlikely(map_flags == BPF_NOEXIST)) | |
467 | return -EEXIST; | |
468 | if (unlikely(qsize > 16384)) /* sanity limit on qsize */ | |
469 | return -EOVERFLOW; | |
470 | ||
471 | /* Make sure CPU is a valid possible cpu */ | |
bc23d0e3 | 472 | if (key_cpu >= nr_cpumask_bits || !cpu_possible(key_cpu)) |
6710e112 JDB |
473 | return -ENODEV; |
474 | ||
475 | if (qsize == 0) { | |
476 | rcpu = NULL; /* Same as deleting */ | |
477 | } else { | |
478 | /* Updating qsize cause re-allocation of bpf_cpu_map_entry */ | |
479 | rcpu = __cpu_map_entry_alloc(qsize, key_cpu, map->id); | |
480 | if (!rcpu) | |
481 | return -ENOMEM; | |
d5df2830 | 482 | rcpu->cmap = cmap; |
6710e112 JDB |
483 | } |
484 | rcu_read_lock(); | |
485 | __cpu_map_entry_replace(cmap, key_cpu, rcpu); | |
486 | rcu_read_unlock(); | |
487 | return 0; | |
488 | } | |
489 | ||
0fe875c5 | 490 | static void cpu_map_free(struct bpf_map *map) |
6710e112 JDB |
491 | { |
492 | struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map); | |
6710e112 JDB |
493 | u32 i; |
494 | ||
495 | /* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0, | |
496 | * so the bpf programs (can be more than one that used this map) were | |
497 | * disconnected from events. Wait for outstanding critical sections in | |
498 | * these programs to complete. The rcu critical section only guarantees | |
499 | * no further "XDP/bpf-side" reads against bpf_cpu_map->cpu_map. | |
500 | * It does __not__ ensure pending flush operations (if any) are | |
501 | * complete. | |
502 | */ | |
f6069b9a DB |
503 | |
504 | bpf_clear_redirect_map(map); | |
6710e112 JDB |
505 | synchronize_rcu(); |
506 | ||
6710e112 JDB |
507 | /* For cpu_map the remote CPUs can still be using the entries |
508 | * (struct bpf_cpu_map_entry). | |
509 | */ | |
510 | for (i = 0; i < cmap->map.max_entries; i++) { | |
511 | struct bpf_cpu_map_entry *rcpu; | |
512 | ||
513 | rcpu = READ_ONCE(cmap->cpu_map[i]); | |
514 | if (!rcpu) | |
515 | continue; | |
516 | ||
fb5aacdf | 517 | /* bq flush and cleanup happens after RCU grace-period */ |
6710e112 JDB |
518 | __cpu_map_entry_replace(cmap, i, NULL); /* call_rcu */ |
519 | } | |
6710e112 JDB |
520 | bpf_map_area_free(cmap->cpu_map); |
521 | kfree(cmap); | |
522 | } | |
523 | ||
524 | struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key) | |
525 | { | |
526 | struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map); | |
527 | struct bpf_cpu_map_entry *rcpu; | |
528 | ||
529 | if (key >= map->max_entries) | |
530 | return NULL; | |
531 | ||
532 | rcpu = READ_ONCE(cmap->cpu_map[key]); | |
533 | return rcpu; | |
534 | } | |
535 | ||
536 | static void *cpu_map_lookup_elem(struct bpf_map *map, void *key) | |
537 | { | |
538 | struct bpf_cpu_map_entry *rcpu = | |
539 | __cpu_map_lookup_elem(map, *(u32 *)key); | |
540 | ||
541 | return rcpu ? &rcpu->qsize : NULL; | |
542 | } | |
543 | ||
544 | static int cpu_map_get_next_key(struct bpf_map *map, void *key, void *next_key) | |
545 | { | |
546 | struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map); | |
547 | u32 index = key ? *(u32 *)key : U32_MAX; | |
548 | u32 *next = next_key; | |
549 | ||
550 | if (index >= cmap->map.max_entries) { | |
551 | *next = 0; | |
552 | return 0; | |
553 | } | |
554 | ||
555 | if (index == cmap->map.max_entries - 1) | |
556 | return -ENOENT; | |
557 | *next = index + 1; | |
558 | return 0; | |
559 | } | |
560 | ||
561 | const struct bpf_map_ops cpu_map_ops = { | |
562 | .map_alloc = cpu_map_alloc, | |
563 | .map_free = cpu_map_free, | |
564 | .map_delete_elem = cpu_map_delete_elem, | |
565 | .map_update_elem = cpu_map_update_elem, | |
566 | .map_lookup_elem = cpu_map_lookup_elem, | |
567 | .map_get_next_key = cpu_map_get_next_key, | |
e8d2bec0 | 568 | .map_check_btf = map_check_no_btf, |
6710e112 JDB |
569 | }; |
570 | ||
4bc188c7 | 571 | static int bq_flush_to_queue(struct xdp_bulk_queue *bq) |
6710e112 | 572 | { |
d5df2830 | 573 | struct bpf_cpu_map_entry *rcpu = bq->obj; |
f9419f7b JDB |
574 | unsigned int processed = 0, drops = 0; |
575 | const int to_cpu = rcpu->cpu; | |
6710e112 JDB |
576 | struct ptr_ring *q; |
577 | int i; | |
578 | ||
579 | if (unlikely(!bq->count)) | |
580 | return 0; | |
581 | ||
582 | q = rcpu->queue; | |
583 | spin_lock(&q->producer_lock); | |
584 | ||
585 | for (i = 0; i < bq->count; i++) { | |
70280ed9 | 586 | struct xdp_frame *xdpf = bq->q[i]; |
6710e112 JDB |
587 | int err; |
588 | ||
70280ed9 | 589 | err = __ptr_ring_produce(q, xdpf); |
6710e112 | 590 | if (err) { |
f9419f7b | 591 | drops++; |
4bc188c7 | 592 | xdp_return_frame_rx_napi(xdpf); |
6710e112 | 593 | } |
f9419f7b | 594 | processed++; |
6710e112 JDB |
595 | } |
596 | bq->count = 0; | |
597 | spin_unlock(&q->producer_lock); | |
598 | ||
d5df2830 THJ |
599 | __list_del_clearprev(&bq->flush_node); |
600 | ||
f9419f7b JDB |
601 | /* Feedback loop via tracepoints */ |
602 | trace_xdp_cpumap_enqueue(rcpu->map_id, processed, drops, to_cpu); | |
6710e112 JDB |
603 | return 0; |
604 | } | |
605 | ||
6710e112 JDB |
606 | /* Runs under RCU-read-side, plus in softirq under NAPI protection. |
607 | * Thus, safe percpu variable access. | |
608 | */ | |
70280ed9 | 609 | static int bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf) |
6710e112 | 610 | { |
cdfafe98 | 611 | struct list_head *flush_list = this_cpu_ptr(&cpu_map_flush_list); |
6710e112 JDB |
612 | struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq); |
613 | ||
614 | if (unlikely(bq->count == CPU_MAP_BULK_SIZE)) | |
4bc188c7 | 615 | bq_flush_to_queue(bq); |
6710e112 JDB |
616 | |
617 | /* Notice, xdp_buff/page MUST be queued here, long enough for | |
618 | * driver to code invoking us to finished, due to driver | |
619 | * (e.g. ixgbe) recycle tricks based on page-refcnt. | |
620 | * | |
70280ed9 | 621 | * Thus, incoming xdp_frame is always queued here (else we race |
6710e112 JDB |
622 | * with another CPU on page-refcnt and remaining driver code). |
623 | * Queue time is very short, as driver will invoke flush | |
624 | * operation, when completing napi->poll call. | |
625 | */ | |
70280ed9 | 626 | bq->q[bq->count++] = xdpf; |
d5df2830 THJ |
627 | |
628 | if (!bq->flush_node.prev) | |
629 | list_add(&bq->flush_node, flush_list); | |
630 | ||
6710e112 JDB |
631 | return 0; |
632 | } | |
633 | ||
9c270af3 JDB |
634 | int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp, |
635 | struct net_device *dev_rx) | |
636 | { | |
70280ed9 | 637 | struct xdp_frame *xdpf; |
9c270af3 | 638 | |
70280ed9 JDB |
639 | xdpf = convert_to_xdp_frame(xdp); |
640 | if (unlikely(!xdpf)) | |
1c601d82 | 641 | return -EOVERFLOW; |
9c270af3 | 642 | |
1c601d82 | 643 | /* Info needed when constructing SKB on remote CPU */ |
70280ed9 | 644 | xdpf->dev_rx = dev_rx; |
9c270af3 | 645 | |
70280ed9 | 646 | bq_enqueue(rcpu, xdpf); |
9c270af3 JDB |
647 | return 0; |
648 | } | |
649 | ||
cdfafe98 | 650 | void __cpu_map_flush(void) |
6710e112 | 651 | { |
cdfafe98 | 652 | struct list_head *flush_list = this_cpu_ptr(&cpu_map_flush_list); |
d5df2830 | 653 | struct xdp_bulk_queue *bq, *tmp; |
6710e112 | 654 | |
d5df2830 | 655 | list_for_each_entry_safe(bq, tmp, flush_list, flush_node) { |
4bc188c7 | 656 | bq_flush_to_queue(bq); |
6710e112 JDB |
657 | |
658 | /* If already running, costs spin_lock_irqsave + smb_mb */ | |
d5df2830 | 659 | wake_up_process(bq->obj->kthread); |
6710e112 JDB |
660 | } |
661 | } | |
cdfafe98 BT |
662 | |
663 | static int __init cpu_map_init(void) | |
664 | { | |
665 | int cpu; | |
666 | ||
667 | for_each_possible_cpu(cpu) | |
668 | INIT_LIST_HEAD(&per_cpu(cpu_map_flush_list, cpu)); | |
669 | return 0; | |
670 | } | |
671 | ||
672 | subsys_initcall(cpu_map_init); |