2 * Copyright (C) 2017-2018 Netronome Systems, Inc.
4 * This software is licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
8 * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
9 * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
10 * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
11 * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
12 * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
13 * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16 #include <linux/bpf.h>
17 #include <linux/bpf_verifier.h>
18 #include <linux/bug.h>
19 #include <linux/kdev_t.h>
20 #include <linux/list.h>
21 #include <linux/lockdep.h>
22 #include <linux/netdevice.h>
23 #include <linux/printk.h>
24 #include <linux/proc_ns.h>
25 #include <linux/rhashtable.h>
26 #include <linux/rtnetlink.h>
27 #include <linux/rwsem.h>
30 /* Protects offdevs, members of bpf_offload_netdev and offload members
32 * RTNL lock cannot be taken when holding this lock.
34 static DECLARE_RWSEM(bpf_devs_lock
);
36 struct bpf_offload_dev
{
37 const struct bpf_prog_offload_ops
*ops
;
38 struct list_head netdevs
;
42 struct bpf_offload_netdev
{
44 struct net_device
*netdev
;
45 struct bpf_offload_dev
*offdev
; /* NULL when bound-only */
46 struct list_head progs
;
47 struct list_head maps
;
48 struct list_head offdev_netdevs
;
51 static const struct rhashtable_params offdevs_params
= {
53 .key_len
= sizeof(struct net_device
*),
54 .key_offset
= offsetof(struct bpf_offload_netdev
, netdev
),
55 .head_offset
= offsetof(struct bpf_offload_netdev
, l
),
56 .automatic_shrinking
= true,
59 static struct rhashtable offdevs
;
61 static int bpf_dev_offload_check(struct net_device
*netdev
)
65 if (!netdev
->netdev_ops
->ndo_bpf
)
70 static struct bpf_offload_netdev
*
71 bpf_offload_find_netdev(struct net_device
*netdev
)
73 lockdep_assert_held(&bpf_devs_lock
);
75 return rhashtable_lookup_fast(&offdevs
, &netdev
, offdevs_params
);
78 static int __bpf_offload_dev_netdev_register(struct bpf_offload_dev
*offdev
,
79 struct net_device
*netdev
)
81 struct bpf_offload_netdev
*ondev
;
84 ondev
= kzalloc(sizeof(*ondev
), GFP_KERNEL
);
88 ondev
->netdev
= netdev
;
89 ondev
->offdev
= offdev
;
90 INIT_LIST_HEAD(&ondev
->progs
);
91 INIT_LIST_HEAD(&ondev
->maps
);
93 err
= rhashtable_insert_fast(&offdevs
, &ondev
->l
, offdevs_params
);
95 netdev_warn(netdev
, "failed to register for BPF offload\n");
100 list_add(&ondev
->offdev_netdevs
, &offdev
->netdevs
);
108 static void __bpf_prog_offload_destroy(struct bpf_prog
*prog
)
110 struct bpf_prog_offload
*offload
= prog
->aux
->offload
;
112 if (offload
->dev_state
)
113 offload
->offdev
->ops
->destroy(prog
);
115 list_del_init(&offload
->offloads
);
117 prog
->aux
->offload
= NULL
;
120 static int bpf_map_offload_ndo(struct bpf_offloaded_map
*offmap
,
121 enum bpf_netdev_command cmd
)
123 struct netdev_bpf data
= {};
124 struct net_device
*netdev
;
129 data
.offmap
= offmap
;
130 /* Caller must make sure netdev is valid */
131 netdev
= offmap
->netdev
;
133 return netdev
->netdev_ops
->ndo_bpf(netdev
, &data
);
136 static void __bpf_map_offload_destroy(struct bpf_offloaded_map
*offmap
)
138 WARN_ON(bpf_map_offload_ndo(offmap
, BPF_OFFLOAD_MAP_FREE
));
139 /* Make sure BPF_MAP_GET_NEXT_ID can't find this dead map */
140 bpf_map_free_id(&offmap
->map
);
141 list_del_init(&offmap
->offloads
);
142 offmap
->netdev
= NULL
;
145 static void __bpf_offload_dev_netdev_unregister(struct bpf_offload_dev
*offdev
,
146 struct net_device
*netdev
)
148 struct bpf_offload_netdev
*ondev
, *altdev
= NULL
;
149 struct bpf_offloaded_map
*offmap
, *mtmp
;
150 struct bpf_prog_offload
*offload
, *ptmp
;
154 ondev
= rhashtable_lookup_fast(&offdevs
, &netdev
, offdevs_params
);
158 WARN_ON(rhashtable_remove_fast(&offdevs
, &ondev
->l
, offdevs_params
));
160 /* Try to move the objects to another netdev of the device */
162 list_del(&ondev
->offdev_netdevs
);
163 altdev
= list_first_entry_or_null(&offdev
->netdevs
,
164 struct bpf_offload_netdev
,
169 list_for_each_entry(offload
, &ondev
->progs
, offloads
)
170 offload
->netdev
= altdev
->netdev
;
171 list_splice_init(&ondev
->progs
, &altdev
->progs
);
173 list_for_each_entry(offmap
, &ondev
->maps
, offloads
)
174 offmap
->netdev
= altdev
->netdev
;
175 list_splice_init(&ondev
->maps
, &altdev
->maps
);
177 list_for_each_entry_safe(offload
, ptmp
, &ondev
->progs
, offloads
)
178 __bpf_prog_offload_destroy(offload
->prog
);
179 list_for_each_entry_safe(offmap
, mtmp
, &ondev
->maps
, offloads
)
180 __bpf_map_offload_destroy(offmap
);
183 WARN_ON(!list_empty(&ondev
->progs
));
184 WARN_ON(!list_empty(&ondev
->maps
));
188 static int __bpf_prog_dev_bound_init(struct bpf_prog
*prog
, struct net_device
*netdev
)
190 struct bpf_offload_netdev
*ondev
;
191 struct bpf_prog_offload
*offload
;
194 offload
= kzalloc(sizeof(*offload
), GFP_USER
);
198 offload
->prog
= prog
;
199 offload
->netdev
= netdev
;
201 ondev
= bpf_offload_find_netdev(offload
->netdev
);
203 if (bpf_prog_is_offloaded(prog
->aux
)) {
208 /* When only binding to the device, explicitly
209 * create an entry in the hashtable.
211 err
= __bpf_offload_dev_netdev_register(NULL
, offload
->netdev
);
214 ondev
= bpf_offload_find_netdev(offload
->netdev
);
216 offload
->offdev
= ondev
->offdev
;
217 prog
->aux
->offload
= offload
;
218 list_add_tail(&offload
->offloads
, &ondev
->progs
);
226 int bpf_prog_dev_bound_init(struct bpf_prog
*prog
, union bpf_attr
*attr
)
228 struct net_device
*netdev
;
231 if (attr
->prog_type
!= BPF_PROG_TYPE_SCHED_CLS
&&
232 attr
->prog_type
!= BPF_PROG_TYPE_XDP
)
235 if (attr
->prog_flags
& ~BPF_F_XDP_DEV_BOUND_ONLY
)
238 if (attr
->prog_type
== BPF_PROG_TYPE_SCHED_CLS
&&
239 attr
->prog_flags
& BPF_F_XDP_DEV_BOUND_ONLY
)
242 netdev
= dev_get_by_index(current
->nsproxy
->net_ns
, attr
->prog_ifindex
);
246 err
= bpf_dev_offload_check(netdev
);
250 prog
->aux
->offload_requested
= !(attr
->prog_flags
& BPF_F_XDP_DEV_BOUND_ONLY
);
252 down_write(&bpf_devs_lock
);
253 err
= __bpf_prog_dev_bound_init(prog
, netdev
);
254 up_write(&bpf_devs_lock
);
261 int bpf_prog_dev_bound_inherit(struct bpf_prog
*new_prog
, struct bpf_prog
*old_prog
)
265 if (!bpf_prog_is_dev_bound(old_prog
->aux
))
268 if (bpf_prog_is_offloaded(old_prog
->aux
))
271 new_prog
->aux
->dev_bound
= old_prog
->aux
->dev_bound
;
272 new_prog
->aux
->offload_requested
= old_prog
->aux
->offload_requested
;
274 down_write(&bpf_devs_lock
);
275 if (!old_prog
->aux
->offload
) {
280 err
= __bpf_prog_dev_bound_init(new_prog
, old_prog
->aux
->offload
->netdev
);
283 up_write(&bpf_devs_lock
);
287 int bpf_prog_offload_verifier_prep(struct bpf_prog
*prog
)
289 struct bpf_prog_offload
*offload
;
292 down_read(&bpf_devs_lock
);
293 offload
= prog
->aux
->offload
;
295 ret
= offload
->offdev
->ops
->prepare(prog
);
296 offload
->dev_state
= !ret
;
298 up_read(&bpf_devs_lock
);
303 int bpf_prog_offload_verify_insn(struct bpf_verifier_env
*env
,
304 int insn_idx
, int prev_insn_idx
)
306 struct bpf_prog_offload
*offload
;
309 down_read(&bpf_devs_lock
);
310 offload
= env
->prog
->aux
->offload
;
312 ret
= offload
->offdev
->ops
->insn_hook(env
, insn_idx
,
314 up_read(&bpf_devs_lock
);
319 int bpf_prog_offload_finalize(struct bpf_verifier_env
*env
)
321 struct bpf_prog_offload
*offload
;
324 down_read(&bpf_devs_lock
);
325 offload
= env
->prog
->aux
->offload
;
327 if (offload
->offdev
->ops
->finalize
)
328 ret
= offload
->offdev
->ops
->finalize(env
);
332 up_read(&bpf_devs_lock
);
338 bpf_prog_offload_replace_insn(struct bpf_verifier_env
*env
, u32 off
,
339 struct bpf_insn
*insn
)
341 const struct bpf_prog_offload_ops
*ops
;
342 struct bpf_prog_offload
*offload
;
343 int ret
= -EOPNOTSUPP
;
345 down_read(&bpf_devs_lock
);
346 offload
= env
->prog
->aux
->offload
;
348 ops
= offload
->offdev
->ops
;
349 if (!offload
->opt_failed
&& ops
->replace_insn
)
350 ret
= ops
->replace_insn(env
, off
, insn
);
351 offload
->opt_failed
|= ret
;
353 up_read(&bpf_devs_lock
);
357 bpf_prog_offload_remove_insns(struct bpf_verifier_env
*env
, u32 off
, u32 cnt
)
359 struct bpf_prog_offload
*offload
;
360 int ret
= -EOPNOTSUPP
;
362 down_read(&bpf_devs_lock
);
363 offload
= env
->prog
->aux
->offload
;
365 if (!offload
->opt_failed
&& offload
->offdev
->ops
->remove_insns
)
366 ret
= offload
->offdev
->ops
->remove_insns(env
, off
, cnt
);
367 offload
->opt_failed
|= ret
;
369 up_read(&bpf_devs_lock
);
372 void bpf_prog_dev_bound_destroy(struct bpf_prog
*prog
)
374 struct bpf_offload_netdev
*ondev
;
375 struct net_device
*netdev
;
378 down_write(&bpf_devs_lock
);
379 if (prog
->aux
->offload
) {
380 list_del_init(&prog
->aux
->offload
->offloads
);
382 netdev
= prog
->aux
->offload
->netdev
;
383 __bpf_prog_offload_destroy(prog
);
385 ondev
= bpf_offload_find_netdev(netdev
);
386 if (!ondev
->offdev
&& list_empty(&ondev
->progs
))
387 __bpf_offload_dev_netdev_unregister(NULL
, netdev
);
389 up_write(&bpf_devs_lock
);
393 static int bpf_prog_offload_translate(struct bpf_prog
*prog
)
395 struct bpf_prog_offload
*offload
;
398 down_read(&bpf_devs_lock
);
399 offload
= prog
->aux
->offload
;
401 ret
= offload
->offdev
->ops
->translate(prog
);
402 up_read(&bpf_devs_lock
);
407 static unsigned int bpf_prog_warn_on_exec(const void *ctx
,
408 const struct bpf_insn
*insn
)
410 WARN(1, "attempt to execute device eBPF program on the host!");
414 int bpf_prog_offload_compile(struct bpf_prog
*prog
)
416 prog
->bpf_func
= bpf_prog_warn_on_exec
;
418 return bpf_prog_offload_translate(prog
);
421 struct ns_get_path_bpf_prog_args
{
422 struct bpf_prog
*prog
;
423 struct bpf_prog_info
*info
;
426 static struct ns_common
*bpf_prog_offload_info_fill_ns(void *private_data
)
428 struct ns_get_path_bpf_prog_args
*args
= private_data
;
429 struct bpf_prog_aux
*aux
= args
->prog
->aux
;
430 struct ns_common
*ns
;
434 down_read(&bpf_devs_lock
);
437 args
->info
->ifindex
= aux
->offload
->netdev
->ifindex
;
438 net
= dev_net(aux
->offload
->netdev
);
442 args
->info
->ifindex
= 0;
446 up_read(&bpf_devs_lock
);
452 int bpf_prog_offload_info_fill(struct bpf_prog_info
*info
,
453 struct bpf_prog
*prog
)
455 struct ns_get_path_bpf_prog_args args
= {
459 struct bpf_prog_aux
*aux
= prog
->aux
;
460 struct inode
*ns_inode
;
466 res
= ns_get_path_cb(&ns_path
, bpf_prog_offload_info_fill_ns
, &args
);
473 down_read(&bpf_devs_lock
);
476 up_read(&bpf_devs_lock
);
480 ulen
= info
->jited_prog_len
;
481 info
->jited_prog_len
= aux
->offload
->jited_len
;
482 if (info
->jited_prog_len
&& ulen
) {
483 uinsns
= u64_to_user_ptr(info
->jited_prog_insns
);
484 ulen
= min_t(u32
, info
->jited_prog_len
, ulen
);
485 if (copy_to_user(uinsns
, aux
->offload
->jited_image
, ulen
)) {
486 up_read(&bpf_devs_lock
);
491 up_read(&bpf_devs_lock
);
493 ns_inode
= ns_path
.dentry
->d_inode
;
494 info
->netns_dev
= new_encode_dev(ns_inode
->i_sb
->s_dev
);
495 info
->netns_ino
= ns_inode
->i_ino
;
501 const struct bpf_prog_ops bpf_offload_prog_ops
= {
504 struct bpf_map
*bpf_map_offload_map_alloc(union bpf_attr
*attr
)
506 struct net
*net
= current
->nsproxy
->net_ns
;
507 struct bpf_offload_netdev
*ondev
;
508 struct bpf_offloaded_map
*offmap
;
511 if (!capable(CAP_SYS_ADMIN
))
512 return ERR_PTR(-EPERM
);
513 if (attr
->map_type
!= BPF_MAP_TYPE_ARRAY
&&
514 attr
->map_type
!= BPF_MAP_TYPE_HASH
)
515 return ERR_PTR(-EINVAL
);
517 offmap
= bpf_map_area_alloc(sizeof(*offmap
), NUMA_NO_NODE
);
519 return ERR_PTR(-ENOMEM
);
521 bpf_map_init_from_attr(&offmap
->map
, attr
);
524 down_write(&bpf_devs_lock
);
525 offmap
->netdev
= __dev_get_by_index(net
, attr
->map_ifindex
);
526 err
= bpf_dev_offload_check(offmap
->netdev
);
530 ondev
= bpf_offload_find_netdev(offmap
->netdev
);
536 err
= bpf_map_offload_ndo(offmap
, BPF_OFFLOAD_MAP_ALLOC
);
540 list_add_tail(&offmap
->offloads
, &ondev
->maps
);
541 up_write(&bpf_devs_lock
);
547 up_write(&bpf_devs_lock
);
549 bpf_map_area_free(offmap
);
553 void bpf_map_offload_map_free(struct bpf_map
*map
)
555 struct bpf_offloaded_map
*offmap
= map_to_offmap(map
);
558 down_write(&bpf_devs_lock
);
560 __bpf_map_offload_destroy(offmap
);
561 up_write(&bpf_devs_lock
);
564 bpf_map_area_free(offmap
);
567 u64
bpf_map_offload_map_mem_usage(const struct bpf_map
*map
)
569 /* The memory dynamically allocated in netdev dev_ops is not counted */
570 return sizeof(struct bpf_offloaded_map
);
573 int bpf_map_offload_lookup_elem(struct bpf_map
*map
, void *key
, void *value
)
575 struct bpf_offloaded_map
*offmap
= map_to_offmap(map
);
578 down_read(&bpf_devs_lock
);
580 ret
= offmap
->dev_ops
->map_lookup_elem(offmap
, key
, value
);
581 up_read(&bpf_devs_lock
);
586 int bpf_map_offload_update_elem(struct bpf_map
*map
,
587 void *key
, void *value
, u64 flags
)
589 struct bpf_offloaded_map
*offmap
= map_to_offmap(map
);
592 if (unlikely(flags
> BPF_EXIST
))
595 down_read(&bpf_devs_lock
);
597 ret
= offmap
->dev_ops
->map_update_elem(offmap
, key
, value
,
599 up_read(&bpf_devs_lock
);
604 int bpf_map_offload_delete_elem(struct bpf_map
*map
, void *key
)
606 struct bpf_offloaded_map
*offmap
= map_to_offmap(map
);
609 down_read(&bpf_devs_lock
);
611 ret
= offmap
->dev_ops
->map_delete_elem(offmap
, key
);
612 up_read(&bpf_devs_lock
);
617 int bpf_map_offload_get_next_key(struct bpf_map
*map
, void *key
, void *next_key
)
619 struct bpf_offloaded_map
*offmap
= map_to_offmap(map
);
622 down_read(&bpf_devs_lock
);
624 ret
= offmap
->dev_ops
->map_get_next_key(offmap
, key
, next_key
);
625 up_read(&bpf_devs_lock
);
630 struct ns_get_path_bpf_map_args
{
631 struct bpf_offloaded_map
*offmap
;
632 struct bpf_map_info
*info
;
635 static struct ns_common
*bpf_map_offload_info_fill_ns(void *private_data
)
637 struct ns_get_path_bpf_map_args
*args
= private_data
;
638 struct ns_common
*ns
;
642 down_read(&bpf_devs_lock
);
644 if (args
->offmap
->netdev
) {
645 args
->info
->ifindex
= args
->offmap
->netdev
->ifindex
;
646 net
= dev_net(args
->offmap
->netdev
);
650 args
->info
->ifindex
= 0;
654 up_read(&bpf_devs_lock
);
660 int bpf_map_offload_info_fill(struct bpf_map_info
*info
, struct bpf_map
*map
)
662 struct ns_get_path_bpf_map_args args
= {
663 .offmap
= map_to_offmap(map
),
666 struct inode
*ns_inode
;
670 res
= ns_get_path_cb(&ns_path
, bpf_map_offload_info_fill_ns
, &args
);
677 ns_inode
= ns_path
.dentry
->d_inode
;
678 info
->netns_dev
= new_encode_dev(ns_inode
->i_sb
->s_dev
);
679 info
->netns_ino
= ns_inode
->i_ino
;
685 static bool __bpf_offload_dev_match(struct bpf_prog
*prog
,
686 struct net_device
*netdev
)
688 struct bpf_offload_netdev
*ondev1
, *ondev2
;
689 struct bpf_prog_offload
*offload
;
691 if (!bpf_prog_is_dev_bound(prog
->aux
))
694 offload
= prog
->aux
->offload
;
697 if (offload
->netdev
== netdev
)
700 ondev1
= bpf_offload_find_netdev(offload
->netdev
);
701 ondev2
= bpf_offload_find_netdev(netdev
);
703 return ondev1
&& ondev2
&& ondev1
->offdev
== ondev2
->offdev
;
706 bool bpf_offload_dev_match(struct bpf_prog
*prog
, struct net_device
*netdev
)
710 down_read(&bpf_devs_lock
);
711 ret
= __bpf_offload_dev_match(prog
, netdev
);
712 up_read(&bpf_devs_lock
);
716 EXPORT_SYMBOL_GPL(bpf_offload_dev_match
);
718 bool bpf_prog_dev_bound_match(const struct bpf_prog
*lhs
, const struct bpf_prog
*rhs
)
722 if (bpf_prog_is_offloaded(lhs
->aux
) != bpf_prog_is_offloaded(rhs
->aux
))
725 down_read(&bpf_devs_lock
);
726 ret
= lhs
->aux
->offload
&& rhs
->aux
->offload
&&
727 lhs
->aux
->offload
->netdev
&&
728 lhs
->aux
->offload
->netdev
== rhs
->aux
->offload
->netdev
;
729 up_read(&bpf_devs_lock
);
734 bool bpf_offload_prog_map_match(struct bpf_prog
*prog
, struct bpf_map
*map
)
736 struct bpf_offloaded_map
*offmap
;
739 if (!bpf_map_is_offloaded(map
))
740 return bpf_map_offload_neutral(map
);
741 offmap
= map_to_offmap(map
);
743 down_read(&bpf_devs_lock
);
744 ret
= __bpf_offload_dev_match(prog
, offmap
->netdev
);
745 up_read(&bpf_devs_lock
);
750 int bpf_offload_dev_netdev_register(struct bpf_offload_dev
*offdev
,
751 struct net_device
*netdev
)
755 down_write(&bpf_devs_lock
);
756 err
= __bpf_offload_dev_netdev_register(offdev
, netdev
);
757 up_write(&bpf_devs_lock
);
760 EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_register
);
762 void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev
*offdev
,
763 struct net_device
*netdev
)
765 down_write(&bpf_devs_lock
);
766 __bpf_offload_dev_netdev_unregister(offdev
, netdev
);
767 up_write(&bpf_devs_lock
);
769 EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_unregister
);
771 struct bpf_offload_dev
*
772 bpf_offload_dev_create(const struct bpf_prog_offload_ops
*ops
, void *priv
)
774 struct bpf_offload_dev
*offdev
;
776 offdev
= kzalloc(sizeof(*offdev
), GFP_KERNEL
);
778 return ERR_PTR(-ENOMEM
);
782 INIT_LIST_HEAD(&offdev
->netdevs
);
786 EXPORT_SYMBOL_GPL(bpf_offload_dev_create
);
788 void bpf_offload_dev_destroy(struct bpf_offload_dev
*offdev
)
790 WARN_ON(!list_empty(&offdev
->netdevs
));
793 EXPORT_SYMBOL_GPL(bpf_offload_dev_destroy
);
795 void *bpf_offload_dev_priv(struct bpf_offload_dev
*offdev
)
799 EXPORT_SYMBOL_GPL(bpf_offload_dev_priv
);
801 void bpf_dev_bound_netdev_unregister(struct net_device
*dev
)
803 struct bpf_offload_netdev
*ondev
;
807 down_write(&bpf_devs_lock
);
808 ondev
= bpf_offload_find_netdev(dev
);
809 if (ondev
&& !ondev
->offdev
)
810 __bpf_offload_dev_netdev_unregister(NULL
, ondev
->netdev
);
811 up_write(&bpf_devs_lock
);
814 int bpf_dev_bound_kfunc_check(struct bpf_verifier_log
*log
,
815 struct bpf_prog_aux
*prog_aux
)
817 if (!bpf_prog_is_dev_bound(prog_aux
)) {
818 bpf_log(log
, "metadata kfuncs require device-bound program\n");
822 if (bpf_prog_is_offloaded(prog_aux
)) {
823 bpf_log(log
, "metadata kfuncs can't be offloaded\n");
830 void *bpf_dev_bound_resolve_kfunc(struct bpf_prog
*prog
, u32 func_id
)
832 const struct xdp_metadata_ops
*ops
;
835 /* We don't hold bpf_devs_lock while resolving several
836 * kfuncs and can race with the unregister_netdevice().
837 * We rely on bpf_dev_bound_match() check at attach
838 * to render this program unusable.
840 down_read(&bpf_devs_lock
);
841 if (!prog
->aux
->offload
)
844 ops
= prog
->aux
->offload
->netdev
->xdp_metadata_ops
;
848 if (func_id
== bpf_xdp_metadata_kfunc_id(XDP_METADATA_KFUNC_RX_TIMESTAMP
))
849 p
= ops
->xmo_rx_timestamp
;
850 else if (func_id
== bpf_xdp_metadata_kfunc_id(XDP_METADATA_KFUNC_RX_HASH
))
851 p
= ops
->xmo_rx_hash
;
853 up_read(&bpf_devs_lock
);
858 static int __init
bpf_offload_init(void)
860 return rhashtable_init(&offdevs
, &offdevs_params
);
863 core_initcall(bpf_offload_init
);