1 /* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * General Public License for more details.
13 /* Devmaps primary use is as a backend map for XDP BPF helper call
14 * bpf_redirect_map(). Because XDP is mostly concerned with performance we
15 * spent some effort to ensure the datapath with redirect maps does not use
16 * any locking. This is a quick note on the details.
18 * We have three possible paths to get into the devmap control plane bpf
19 * syscalls, bpf programs, and driver side xmit/flush operations. A bpf syscall
20 * will invoke an update, delete, or lookup operation. To ensure updates and
21 * deletes appear atomic from the datapath side xchg() is used to modify the
22 * netdev_map array. Then because the datapath does a lookup into the netdev_map
23 * array (read-only) from an RCU critical section we use call_rcu() to wait for
24 * an rcu grace period before free'ing the old data structures. This ensures the
25 * datapath always has a valid copy. However, the datapath does a "flush"
26 * operation that pushes any pending packets in the driver outside the RCU
27 * critical section. Each bpf_dtab_netdev tracks these pending operations using
28 * an atomic per-cpu bitmap. The bpf_dtab_netdev object will not be destroyed
29 * until all bits are cleared indicating outstanding flush operations have
32 * BPF syscalls may race with BPF program calls on any of the update, delete
33 * or lookup operations. As noted above the xchg() operation also keep the
34 * netdev_map consistent in this case. From the devmap side BPF programs
35 * calling into these operations are the same as multiple user space threads
36 * making system calls.
38 * Finally, any of the above may race with a netdev_unregister notifier. The
39 * unregister notifier must search for net devices in the map structure that
40 * contain a reference to the net device and remove them. This is a two step
41 * process (a) dereference the bpf_dtab_netdev object in netdev_map and (b)
42 * check to see if the ifindex is the same as the net_device being removed.
43 * When removing the dev a cmpxchg() is used to ensure the correct dev is
44 * removed, in the case of a concurrent update or delete operation it is
45 * possible that the initially referenced dev is no longer in the map. As the
46 * notifier hook walks the map we know that new dev references can not be
47 * added by the user because core infrastructure ensures dev_get_by_index()
48 * calls will fail at this point.
50 #include <linux/bpf.h>
52 #include <linux/filter.h>
53 #include <trace/events/xdp.h>
55 #define DEV_CREATE_FLAG_MASK \
56 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
58 #define DEV_MAP_BULK_SIZE 16
59 struct xdp_bulk_queue
{
60 struct xdp_frame
*q
[DEV_MAP_BULK_SIZE
];
61 struct net_device
*dev_rx
;
65 struct bpf_dtab_netdev
{
66 struct net_device
*dev
; /* must be first member, due to tracepoint */
67 struct bpf_dtab
*dtab
;
69 struct xdp_bulk_queue __percpu
*bulkq
;
75 struct bpf_dtab_netdev
**netdev_map
;
76 unsigned long __percpu
*flush_needed
;
77 struct list_head list
;
80 static DEFINE_SPINLOCK(dev_map_lock
);
81 static LIST_HEAD(dev_map_list
);
83 static u64
dev_map_bitmap_size(const union bpf_attr
*attr
)
85 return BITS_TO_LONGS((u64
) attr
->max_entries
) * sizeof(unsigned long);
88 static struct bpf_map
*dev_map_alloc(union bpf_attr
*attr
)
90 struct bpf_dtab
*dtab
;
94 if (!capable(CAP_NET_ADMIN
))
95 return ERR_PTR(-EPERM
);
97 /* check sanity of attributes */
98 if (attr
->max_entries
== 0 || attr
->key_size
!= 4 ||
99 attr
->value_size
!= 4 || attr
->map_flags
& ~DEV_CREATE_FLAG_MASK
)
100 return ERR_PTR(-EINVAL
);
102 dtab
= kzalloc(sizeof(*dtab
), GFP_USER
);
104 return ERR_PTR(-ENOMEM
);
106 bpf_map_init_from_attr(&dtab
->map
, attr
);
108 /* make sure page count doesn't overflow */
109 cost
= (u64
) dtab
->map
.max_entries
* sizeof(struct bpf_dtab_netdev
*);
110 cost
+= dev_map_bitmap_size(attr
) * num_possible_cpus();
111 if (cost
>= U32_MAX
- PAGE_SIZE
)
114 dtab
->map
.pages
= round_up(cost
, PAGE_SIZE
) >> PAGE_SHIFT
;
116 /* if map size is larger than memlock limit, reject it early */
117 err
= bpf_map_precharge_memlock(dtab
->map
.pages
);
123 /* A per cpu bitfield with a bit per possible net device */
124 dtab
->flush_needed
= __alloc_percpu_gfp(dev_map_bitmap_size(attr
),
125 __alignof__(unsigned long),
126 GFP_KERNEL
| __GFP_NOWARN
);
127 if (!dtab
->flush_needed
)
130 dtab
->netdev_map
= bpf_map_area_alloc(dtab
->map
.max_entries
*
131 sizeof(struct bpf_dtab_netdev
*),
132 dtab
->map
.numa_node
);
133 if (!dtab
->netdev_map
)
136 spin_lock(&dev_map_lock
);
137 list_add_tail_rcu(&dtab
->list
, &dev_map_list
);
138 spin_unlock(&dev_map_lock
);
142 free_percpu(dtab
->flush_needed
);
147 static void dev_map_free(struct bpf_map
*map
)
149 struct bpf_dtab
*dtab
= container_of(map
, struct bpf_dtab
, map
);
152 /* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
153 * so the programs (can be more than one that used this map) were
154 * disconnected from events. Wait for outstanding critical sections in
155 * these programs to complete. The rcu critical section only guarantees
156 * no further reads against netdev_map. It does __not__ ensure pending
157 * flush operations (if any) are complete.
160 spin_lock(&dev_map_lock
);
161 list_del_rcu(&dtab
->list
);
162 spin_unlock(&dev_map_lock
);
164 bpf_clear_redirect_map(map
);
167 /* Make sure prior __dev_map_entry_free() have completed. */
170 /* To ensure all pending flush operations have completed wait for flush
171 * bitmap to indicate all flush_needed bits to be zero on _all_ cpus.
172 * Because the above synchronize_rcu() ensures the map is disconnected
173 * from the program we can assume no new bits will be set.
175 for_each_online_cpu(cpu
) {
176 unsigned long *bitmap
= per_cpu_ptr(dtab
->flush_needed
, cpu
);
178 while (!bitmap_empty(bitmap
, dtab
->map
.max_entries
))
182 for (i
= 0; i
< dtab
->map
.max_entries
; i
++) {
183 struct bpf_dtab_netdev
*dev
;
185 dev
= dtab
->netdev_map
[i
];
193 free_percpu(dtab
->flush_needed
);
194 bpf_map_area_free(dtab
->netdev_map
);
198 static int dev_map_get_next_key(struct bpf_map
*map
, void *key
, void *next_key
)
200 struct bpf_dtab
*dtab
= container_of(map
, struct bpf_dtab
, map
);
201 u32 index
= key
? *(u32
*)key
: U32_MAX
;
202 u32
*next
= next_key
;
204 if (index
>= dtab
->map
.max_entries
) {
209 if (index
== dtab
->map
.max_entries
- 1)
215 void __dev_map_insert_ctx(struct bpf_map
*map
, u32 bit
)
217 struct bpf_dtab
*dtab
= container_of(map
, struct bpf_dtab
, map
);
218 unsigned long *bitmap
= this_cpu_ptr(dtab
->flush_needed
);
220 __set_bit(bit
, bitmap
);
223 static int bq_xmit_all(struct bpf_dtab_netdev
*obj
,
224 struct xdp_bulk_queue
*bq
, u32 flags
,
227 struct net_device
*dev
= obj
->dev
;
228 int sent
= 0, drops
= 0, err
= 0;
231 if (unlikely(!bq
->count
))
234 for (i
= 0; i
< bq
->count
; i
++) {
235 struct xdp_frame
*xdpf
= bq
->q
[i
];
240 sent
= dev
->netdev_ops
->ndo_xdp_xmit(dev
, bq
->count
, bq
->q
, flags
);
246 drops
= bq
->count
- sent
;
250 trace_xdp_devmap_xmit(&obj
->dtab
->map
, obj
->bit
,
251 sent
, drops
, bq
->dev_rx
, dev
, err
);
255 /* If ndo_xdp_xmit fails with an errno, no frames have been
256 * xmit'ed and it's our responsibility to them free all.
258 for (i
= 0; i
< bq
->count
; i
++) {
259 struct xdp_frame
*xdpf
= bq
->q
[i
];
261 /* RX path under NAPI protection, can return frames faster */
262 if (likely(in_napi_ctx
))
263 xdp_return_frame_rx_napi(xdpf
);
265 xdp_return_frame(xdpf
);
271 /* __dev_map_flush is called from xdp_do_flush_map() which _must_ be signaled
272 * from the driver before returning from its napi->poll() routine. The poll()
273 * routine is called either from busy_poll context or net_rx_action signaled
274 * from NET_RX_SOFTIRQ. Either way the poll routine must complete before the
275 * net device can be torn down. On devmap tear down we ensure the ctx bitmap
276 * is zeroed before completing to ensure all flush operations have completed.
278 void __dev_map_flush(struct bpf_map
*map
)
280 struct bpf_dtab
*dtab
= container_of(map
, struct bpf_dtab
, map
);
281 unsigned long *bitmap
= this_cpu_ptr(dtab
->flush_needed
);
284 for_each_set_bit(bit
, bitmap
, map
->max_entries
) {
285 struct bpf_dtab_netdev
*dev
= READ_ONCE(dtab
->netdev_map
[bit
]);
286 struct xdp_bulk_queue
*bq
;
288 /* This is possible if the dev entry is removed by user space
289 * between xdp redirect and flush op.
294 __clear_bit(bit
, bitmap
);
296 bq
= this_cpu_ptr(dev
->bulkq
);
297 bq_xmit_all(dev
, bq
, XDP_XMIT_FLUSH
, true);
301 /* rcu_read_lock (from syscall and BPF contexts) ensures that if a delete and/or
302 * update happens in parallel here a dev_put wont happen until after reading the
305 struct bpf_dtab_netdev
*__dev_map_lookup_elem(struct bpf_map
*map
, u32 key
)
307 struct bpf_dtab
*dtab
= container_of(map
, struct bpf_dtab
, map
);
308 struct bpf_dtab_netdev
*obj
;
310 if (key
>= map
->max_entries
)
313 obj
= READ_ONCE(dtab
->netdev_map
[key
]);
317 /* Runs under RCU-read-side, plus in softirq under NAPI protection.
318 * Thus, safe percpu variable access.
320 static int bq_enqueue(struct bpf_dtab_netdev
*obj
, struct xdp_frame
*xdpf
,
321 struct net_device
*dev_rx
)
324 struct xdp_bulk_queue
*bq
= this_cpu_ptr(obj
->bulkq
);
326 if (unlikely(bq
->count
== DEV_MAP_BULK_SIZE
))
327 bq_xmit_all(obj
, bq
, 0, true);
329 /* Ingress dev_rx will be the same for all xdp_frame's in
330 * bulk_queue, because bq stored per-CPU and must be flushed
331 * from net_device drivers NAPI func end.
336 bq
->q
[bq
->count
++] = xdpf
;
340 int dev_map_enqueue(struct bpf_dtab_netdev
*dst
, struct xdp_buff
*xdp
,
341 struct net_device
*dev_rx
)
343 struct net_device
*dev
= dst
->dev
;
344 struct xdp_frame
*xdpf
;
347 if (!dev
->netdev_ops
->ndo_xdp_xmit
)
350 err
= xdp_ok_fwd_dev(dev
, xdp
->data_end
- xdp
->data
);
354 xdpf
= convert_to_xdp_frame(xdp
);
358 return bq_enqueue(dst
, xdpf
, dev_rx
);
361 int dev_map_generic_redirect(struct bpf_dtab_netdev
*dst
, struct sk_buff
*skb
,
362 struct bpf_prog
*xdp_prog
)
366 err
= xdp_ok_fwd_dev(dst
->dev
, skb
->len
);
370 generic_xdp_tx(skb
, xdp_prog
);
375 static void *dev_map_lookup_elem(struct bpf_map
*map
, void *key
)
377 struct bpf_dtab_netdev
*obj
= __dev_map_lookup_elem(map
, *(u32
*)key
);
378 struct net_device
*dev
= obj
? obj
->dev
: NULL
;
380 return dev
? &dev
->ifindex
: NULL
;
383 static void dev_map_flush_old(struct bpf_dtab_netdev
*dev
)
385 if (dev
->dev
->netdev_ops
->ndo_xdp_xmit
) {
386 struct xdp_bulk_queue
*bq
;
387 unsigned long *bitmap
;
391 for_each_online_cpu(cpu
) {
392 bitmap
= per_cpu_ptr(dev
->dtab
->flush_needed
, cpu
);
393 __clear_bit(dev
->bit
, bitmap
);
395 bq
= per_cpu_ptr(dev
->bulkq
, cpu
);
396 bq_xmit_all(dev
, bq
, XDP_XMIT_FLUSH
, false);
401 static void __dev_map_entry_free(struct rcu_head
*rcu
)
403 struct bpf_dtab_netdev
*dev
;
405 dev
= container_of(rcu
, struct bpf_dtab_netdev
, rcu
);
406 dev_map_flush_old(dev
);
407 free_percpu(dev
->bulkq
);
412 static int dev_map_delete_elem(struct bpf_map
*map
, void *key
)
414 struct bpf_dtab
*dtab
= container_of(map
, struct bpf_dtab
, map
);
415 struct bpf_dtab_netdev
*old_dev
;
418 if (k
>= map
->max_entries
)
421 /* Use call_rcu() here to ensure any rcu critical sections have
422 * completed, but this does not guarantee a flush has happened
423 * yet. Because driver side rcu_read_lock/unlock only protects the
424 * running XDP program. However, for pending flush operations the
425 * dev and ctx are stored in another per cpu map. And additionally,
426 * the driver tear down ensures all soft irqs are complete before
427 * removing the net device in the case of dev_put equals zero.
429 old_dev
= xchg(&dtab
->netdev_map
[k
], NULL
);
431 call_rcu(&old_dev
->rcu
, __dev_map_entry_free
);
435 static int dev_map_update_elem(struct bpf_map
*map
, void *key
, void *value
,
438 struct bpf_dtab
*dtab
= container_of(map
, struct bpf_dtab
, map
);
439 struct net
*net
= current
->nsproxy
->net_ns
;
440 gfp_t gfp
= GFP_ATOMIC
| __GFP_NOWARN
;
441 struct bpf_dtab_netdev
*dev
, *old_dev
;
443 u32 ifindex
= *(u32
*)value
;
445 if (unlikely(map_flags
> BPF_EXIST
))
447 if (unlikely(i
>= dtab
->map
.max_entries
))
449 if (unlikely(map_flags
== BPF_NOEXIST
))
455 dev
= kmalloc_node(sizeof(*dev
), gfp
, map
->numa_node
);
459 dev
->bulkq
= __alloc_percpu_gfp(sizeof(*dev
->bulkq
),
460 sizeof(void *), gfp
);
466 dev
->dev
= dev_get_by_index(net
, ifindex
);
468 free_percpu(dev
->bulkq
);
477 /* Use call_rcu() here to ensure rcu critical sections have completed
478 * Remembering the driver side flush operation will happen before the
479 * net device is removed.
481 old_dev
= xchg(&dtab
->netdev_map
[i
], dev
);
483 call_rcu(&old_dev
->rcu
, __dev_map_entry_free
);
488 const struct bpf_map_ops dev_map_ops
= {
489 .map_alloc
= dev_map_alloc
,
490 .map_free
= dev_map_free
,
491 .map_get_next_key
= dev_map_get_next_key
,
492 .map_lookup_elem
= dev_map_lookup_elem
,
493 .map_update_elem
= dev_map_update_elem
,
494 .map_delete_elem
= dev_map_delete_elem
,
495 .map_check_btf
= map_check_no_btf
,
498 static int dev_map_notification(struct notifier_block
*notifier
,
499 ulong event
, void *ptr
)
501 struct net_device
*netdev
= netdev_notifier_info_to_dev(ptr
);
502 struct bpf_dtab
*dtab
;
506 case NETDEV_UNREGISTER
:
507 /* This rcu_read_lock/unlock pair is needed because
508 * dev_map_list is an RCU list AND to ensure a delete
509 * operation does not free a netdev_map entry while we
510 * are comparing it against the netdev being unregistered.
513 list_for_each_entry_rcu(dtab
, &dev_map_list
, list
) {
514 for (i
= 0; i
< dtab
->map
.max_entries
; i
++) {
515 struct bpf_dtab_netdev
*dev
, *odev
;
517 dev
= READ_ONCE(dtab
->netdev_map
[i
]);
518 if (!dev
|| netdev
!= dev
->dev
)
520 odev
= cmpxchg(&dtab
->netdev_map
[i
], dev
, NULL
);
523 __dev_map_entry_free
);
534 static struct notifier_block dev_map_notifier
= {
535 .notifier_call
= dev_map_notification
,
538 static int __init
dev_map_init(void)
540 /* Assure tracepoint shadow struct _bpf_dtab_netdev is in sync */
541 BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev
, dev
) !=
542 offsetof(struct _bpf_dtab_netdev
, dev
));
543 register_netdevice_notifier(&dev_map_notifier
);
547 subsys_initcall(dev_map_init
);