+++ /dev/null
-From 8ac11a7b2fe3c1dd3cebb0269e03b7cf34533a15 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Wed, 28 Feb 2024 22:45:20 -0800
-Subject: libbpf: Convert st_ops->data to shadow type.
-
-From: Kui-Feng Lee <thinker.li@gmail.com>
-
-[ Upstream commit 69e4a9d2b3f5adf5af4feeab0a9f505da971265a ]
-
-Convert st_ops->data to the shadow type of the struct_ops map. The shadow
-type of a struct_ops type is a variant of the original struct type
-providing a way to access/change the values in the maps of the struct_ops
-type.
-
-bpf_map__initial_value() will return st_ops->data for struct_ops types. The
-skeleton is going to use it as the pointer to the shadow type of the
-original struct type.
-
-One of the main differences between the original struct type and the shadow
-type is that all function pointers of the shadow type are converted to
-pointers of struct bpf_program. Users can replace these bpf_program
-pointers with other BPF programs. The st_ops->progs[] will be updated
-before updating the value of a map to reflect the changes made by users.
-
-Signed-off-by: Kui-Feng Lee <thinker.li@gmail.com>
-Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
-Link: https://lore.kernel.org/bpf/20240229064523.2091270-3-thinker.li@gmail.com
-Stable-dep-of: 04a94133f1b3 ("libbpf: Don't take direct pointers into BTF data from st_ops")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- tools/lib/bpf/libbpf.c | 40 ++++++++++++++++++++++++++++++++++++++--
- 1 file changed, 38 insertions(+), 2 deletions(-)
-
-diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
-index bd5b32c9c5406..76835fa67c6d1 100644
---- a/tools/lib/bpf/libbpf.c
-+++ b/tools/lib/bpf/libbpf.c
-@@ -1011,6 +1011,19 @@ static bool bpf_map__is_struct_ops(const struct bpf_map *map)
- return map->def.type == BPF_MAP_TYPE_STRUCT_OPS;
- }
-
-+static bool is_valid_st_ops_program(struct bpf_object *obj,
-+ const struct bpf_program *prog)
-+{
-+ int i;
-+
-+ for (i = 0; i < obj->nr_programs; i++) {
-+ if (&obj->programs[i] == prog)
-+ return prog->type == BPF_PROG_TYPE_STRUCT_OPS;
-+ }
-+
-+ return false;
-+}
-+
- /* Init the map's fields that depend on kern_btf */
- static int bpf_map__init_kern_struct_ops(struct bpf_map *map)
- {
-@@ -1099,9 +1112,16 @@ static int bpf_map__init_kern_struct_ops(struct bpf_map *map)
- if (btf_is_ptr(mtype)) {
- struct bpf_program *prog;
-
-- prog = st_ops->progs[i];
-+ /* Update the value from the shadow type */
-+ prog = *(void **)mdata;
-+ st_ops->progs[i] = prog;
- if (!prog)
- continue;
-+ if (!is_valid_st_ops_program(obj, prog)) {
-+ pr_warn("struct_ops init_kern %s: member %s is not a struct_ops program\n",
-+ map->name, mname);
-+ return -ENOTSUP;
-+ }
-
- kern_mtype = skip_mods_and_typedefs(kern_btf,
- kern_mtype->type,
-@@ -8902,7 +8922,9 @@ static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj,
- return NULL;
- }
-
--/* Collect the reloc from ELF and populate the st_ops->progs[] */
-+/* Collect the reloc from ELF, populate the st_ops->progs[], and update
-+ * st_ops->data for shadow type.
-+ */
- static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
- Elf64_Shdr *shdr, Elf_Data *data)
- {
-@@ -9015,6 +9037,14 @@ static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
- }
-
- st_ops->progs[member_idx] = prog;
-+
-+ /* st_ops->data will be exposed to users, being returned by
-+ * bpf_map__initial_value() as a pointer to the shadow
-+ * type. All function pointers in the original struct type
-+ * should be converted to a pointer to struct bpf_program
-+ * in the shadow type.
-+ */
-+ *((struct bpf_program **)(st_ops->data + moff)) = prog;
- }
-
- return 0;
-@@ -9373,6 +9403,12 @@ int bpf_map__set_initial_value(struct bpf_map *map,
-
- const void *bpf_map__initial_value(struct bpf_map *map, size_t *psize)
- {
-+ if (bpf_map__is_struct_ops(map)) {
-+ if (psize)
-+ *psize = map->def.value_size;
-+ return map->st_ops->data;
-+ }
-+
- if (!map->mmaped)
- return NULL;
- *psize = map->def.value_size;
---
-2.43.0
-
+++ /dev/null
-From 06cc14343dc0e027e13fa9264640da1a9f1f72ba Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Wed, 22 Mar 2023 20:24:01 -0700
-Subject: libbpf: Create a bpf_link in bpf_map__attach_struct_ops().
-
-From: Kui-Feng Lee <kuifeng@meta.com>
-
-[ Upstream commit 8d1608d70927747da9c1a8770edf7b6ee68f8ebc ]
-
-bpf_map__attach_struct_ops() was creating a dummy bpf_link as a
-placeholder, but now it is constructing an authentic one by calling
-bpf_link_create() if the map has the BPF_F_LINK flag.
-
-You can flag a struct_ops map with BPF_F_LINK by calling
-bpf_map__set_map_flags().
-
-Signed-off-by: Kui-Feng Lee <kuifeng@meta.com>
-Acked-by: Andrii Nakryiko <andrii@kernel.org>
-Link: https://lore.kernel.org/r/20230323032405.3735486-5-kuifeng@meta.com
-Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
-Stable-dep-of: 04a94133f1b3 ("libbpf: Don't take direct pointers into BTF data from st_ops")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- tools/lib/bpf/libbpf.c | 95 +++++++++++++++++++++++++++++++-----------
- 1 file changed, 71 insertions(+), 24 deletions(-)
-
-diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
-index 878f05a424218..25e01addcdb57 100644
---- a/tools/lib/bpf/libbpf.c
-+++ b/tools/lib/bpf/libbpf.c
-@@ -117,6 +117,7 @@ static const char * const attach_type_name[] = {
- [BPF_SK_REUSEPORT_SELECT_OR_MIGRATE] = "sk_reuseport_select_or_migrate",
- [BPF_PERF_EVENT] = "perf_event",
- [BPF_TRACE_KPROBE_MULTI] = "trace_kprobe_multi",
-+ [BPF_STRUCT_OPS] = "struct_ops",
- };
-
- static const char * const link_type_name[] = {
-@@ -7670,6 +7671,37 @@ static int bpf_object__resolve_externs(struct bpf_object *obj,
- return 0;
- }
-
-+static void bpf_map_prepare_vdata(const struct bpf_map *map)
-+{
-+ struct bpf_struct_ops *st_ops;
-+ __u32 i;
-+
-+ st_ops = map->st_ops;
-+ for (i = 0; i < btf_vlen(st_ops->type); i++) {
-+ struct bpf_program *prog = st_ops->progs[i];
-+ void *kern_data;
-+ int prog_fd;
-+
-+ if (!prog)
-+ continue;
-+
-+ prog_fd = bpf_program__fd(prog);
-+ kern_data = st_ops->kern_vdata + st_ops->kern_func_off[i];
-+ *(unsigned long *)kern_data = prog_fd;
-+ }
-+}
-+
-+static int bpf_object_prepare_struct_ops(struct bpf_object *obj)
-+{
-+ int i;
-+
-+ for (i = 0; i < obj->nr_maps; i++)
-+ if (bpf_map__is_struct_ops(&obj->maps[i]))
-+ bpf_map_prepare_vdata(&obj->maps[i]);
-+
-+ return 0;
-+}
-+
- static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const char *target_btf_path)
- {
- int err, i;
-@@ -7695,6 +7727,7 @@ static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const ch
- err = err ? : bpf_object__relocate(obj, obj->btf_custom_path ? : target_btf_path);
- err = err ? : bpf_object__load_progs(obj, extra_log_level);
- err = err ? : bpf_object_init_prog_arrays(obj);
-+ err = err ? : bpf_object_prepare_struct_ops(obj);
-
- if (obj->gen_loader) {
- /* reset FDs */
-@@ -11420,22 +11453,30 @@ struct bpf_link *bpf_program__attach(const struct bpf_program *prog)
- return link;
- }
-
-+struct bpf_link_struct_ops {
-+ struct bpf_link link;
-+ int map_fd;
-+};
-+
- static int bpf_link__detach_struct_ops(struct bpf_link *link)
- {
-+ struct bpf_link_struct_ops *st_link;
- __u32 zero = 0;
-
-- if (bpf_map_delete_elem(link->fd, &zero))
-- return -errno;
-+ st_link = container_of(link, struct bpf_link_struct_ops, link);
-
-- return 0;
-+ if (st_link->map_fd < 0)
-+ /* w/o a real link */
-+ return bpf_map_delete_elem(link->fd, &zero);
-+
-+ return close(link->fd);
- }
-
- struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map)
- {
-- struct bpf_struct_ops *st_ops;
-- struct bpf_link *link;
-- __u32 i, zero = 0;
-- int err;
-+ struct bpf_link_struct_ops *link;
-+ __u32 zero = 0;
-+ int err, fd;
-
- if (!bpf_map__is_struct_ops(map) || map->fd == -1)
- return libbpf_err_ptr(-EINVAL);
-@@ -11444,31 +11485,37 @@ struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map)
- if (!link)
- return libbpf_err_ptr(-EINVAL);
-
-- st_ops = map->st_ops;
-- for (i = 0; i < btf_vlen(st_ops->type); i++) {
-- struct bpf_program *prog = st_ops->progs[i];
-- void *kern_data;
-- int prog_fd;
-+ /* kern_vdata should be prepared during the loading phase. */
-+ err = bpf_map_update_elem(map->fd, &zero, map->st_ops->kern_vdata, 0);
-+ /* It can be EBUSY if the map has been used to create or
-+ * update a link before. We don't allow updating the value of
-+ * a struct_ops once it is set. That ensures that the value
-+ * never changed. So, it is safe to skip EBUSY.
-+ */
-+ if (err && (!(map->def.map_flags & BPF_F_LINK) || err != -EBUSY)) {
-+ free(link);
-+ return libbpf_err_ptr(err);
-+ }
-
-- if (!prog)
-- continue;
-+ link->link.detach = bpf_link__detach_struct_ops;
-
-- prog_fd = bpf_program__fd(prog);
-- kern_data = st_ops->kern_vdata + st_ops->kern_func_off[i];
-- *(unsigned long *)kern_data = prog_fd;
-+ if (!(map->def.map_flags & BPF_F_LINK)) {
-+ /* w/o a real link */
-+ link->link.fd = map->fd;
-+ link->map_fd = -1;
-+ return &link->link;
- }
-
-- err = bpf_map_update_elem(map->fd, &zero, st_ops->kern_vdata, 0);
-- if (err) {
-- err = -errno;
-+ fd = bpf_link_create(map->fd, 0, BPF_STRUCT_OPS, NULL);
-+ if (fd < 0) {
- free(link);
-- return libbpf_err_ptr(err);
-+ return libbpf_err_ptr(fd);
- }
-
-- link->detach = bpf_link__detach_struct_ops;
-- link->fd = map->fd;
-+ link->link.fd = fd;
-+ link->map_fd = map->fd;
-
-- return link;
-+ return &link->link;
- }
-
- typedef enum bpf_perf_event_ret (*bpf_perf_event_print_t)(struct perf_event_header *hdr,
---
-2.43.0
-
+++ /dev/null
-From 0a7be403c467ac132a333e3084c3988b9b77d4c8 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Wed, 24 Jul 2024 12:14:58 -0500
-Subject: libbpf: Don't take direct pointers into BTF data from st_ops
-
-From: David Vernet <void@manifault.com>
-
-[ Upstream commit 04a94133f1b3cccb19e056c26f056c50b4e5b3b1 ]
-
-In struct bpf_struct_ops, we have take a pointer to a BTF type name, and
-a struct btf_type. This was presumably done for convenience, but can
-actually result in subtle and confusing bugs given that BTF data can be
-invalidated before a program is loaded. For example, in sched_ext, we
-may sometimes resize a data section after a skeleton has been opened,
-but before the struct_ops scheduler map has been loaded. This may cause
-the BTF data to be realloc'd, which can then cause a UAF when loading
-the program because the struct_ops map has pointers directly into the
-BTF data.
-
-We're already storing the BTF type_id in struct bpf_struct_ops. Because
-type_id is stable, we can therefore just update the places where we were
-looking at those pointers to instead do the lookups we need from the
-type_id.
-
-Fixes: 590a00888250 ("bpf: libbpf: Add STRUCT_OPS support")
-Signed-off-by: David Vernet <void@manifault.com>
-Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
-Link: https://lore.kernel.org/bpf/20240724171459.281234-1-void@manifault.com
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- tools/lib/bpf/libbpf.c | 23 +++++++++++++----------
- 1 file changed, 13 insertions(+), 10 deletions(-)
-
-diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
-index 7934919b153cb..5c82a223c6f61 100644
---- a/tools/lib/bpf/libbpf.c
-+++ b/tools/lib/bpf/libbpf.c
-@@ -441,8 +441,6 @@ struct bpf_program {
- };
-
- struct bpf_struct_ops {
-- const char *tname;
-- const struct btf_type *type;
- struct bpf_program **progs;
- __u32 *kern_func_off;
- /* e.g. struct tcp_congestion_ops in bpf_prog's btf format */
-@@ -1044,11 +1042,14 @@ static int bpf_object_adjust_struct_ops_autoload(struct bpf_object *obj)
- continue;
-
- for (j = 0; j < obj->nr_maps; ++j) {
-+ const struct btf_type *type;
-+
- map = &obj->maps[j];
- if (!bpf_map__is_struct_ops(map))
- continue;
-
-- vlen = btf_vlen(map->st_ops->type);
-+ type = btf__type_by_id(obj->btf, map->st_ops->type_id);
-+ vlen = btf_vlen(type);
- for (k = 0; k < vlen; ++k) {
- slot_prog = map->st_ops->progs[k];
- if (prog != slot_prog)
-@@ -1082,8 +1083,8 @@ static int bpf_map__init_kern_struct_ops(struct bpf_map *map)
- int err;
-
- st_ops = map->st_ops;
-- type = st_ops->type;
-- tname = st_ops->tname;
-+ type = btf__type_by_id(btf, st_ops->type_id);
-+ tname = btf__name_by_offset(btf, type->name_off);
- err = find_struct_ops_kern_types(obj, tname, &mod_btf,
- &kern_type, &kern_type_id,
- &kern_vtype, &kern_vtype_id,
-@@ -1313,8 +1314,6 @@ static int bpf_object__init_struct_ops_maps(struct bpf_object *obj)
- memcpy(st_ops->data,
- obj->efile.st_ops_data->d_buf + vsi->offset,
- type->size);
-- st_ops->tname = tname;
-- st_ops->type = type;
- st_ops->type_id = type_id;
-
- pr_debug("struct_ops init: struct %s(type_id=%u) %s found at offset %u\n",
-@@ -7780,11 +7779,13 @@ static int bpf_object__resolve_externs(struct bpf_object *obj,
-
- static void bpf_map_prepare_vdata(const struct bpf_map *map)
- {
-+ const struct btf_type *type;
- struct bpf_struct_ops *st_ops;
- __u32 i;
-
- st_ops = map->st_ops;
-- for (i = 0; i < btf_vlen(st_ops->type); i++) {
-+ type = btf__type_by_id(map->obj->btf, st_ops->type_id);
-+ for (i = 0; i < btf_vlen(type); i++) {
- struct bpf_program *prog = st_ops->progs[i];
- void *kern_data;
- int prog_fd;
-@@ -8971,6 +8972,7 @@ static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj,
- static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
- Elf64_Shdr *shdr, Elf_Data *data)
- {
-+ const struct btf_type *type;
- const struct btf_member *member;
- struct bpf_struct_ops *st_ops;
- struct bpf_program *prog;
-@@ -9030,13 +9032,14 @@ static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
- }
- insn_idx = sym->st_value / BPF_INSN_SZ;
-
-- member = find_member_by_offset(st_ops->type, moff * 8);
-+ type = btf__type_by_id(btf, st_ops->type_id);
-+ member = find_member_by_offset(type, moff * 8);
- if (!member) {
- pr_warn("struct_ops reloc %s: cannot find member at moff %u\n",
- map->name, moff);
- return -EINVAL;
- }
-- member_idx = member - btf_members(st_ops->type);
-+ member_idx = member - btf_members(type);
- name = btf__name_by_offset(btf, member->name_off);
-
- if (!resolve_func_ptr(btf, member->type, NULL)) {
---
-2.43.0
-
+++ /dev/null
-From 637be1d85b6354976dec1a81af51919bfcc4ece6 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Thu, 25 May 2023 15:13:11 -0700
-Subject: libbpf: Ensure FD >= 3 during bpf_map__reuse_fd()
-
-From: Andrii Nakryiko <andrii@kernel.org>
-
-[ Upstream commit 4aadd2920b81b3d7e5c8ac63c7d5d673f3c8aaeb ]
-
-Improve bpf_map__reuse_fd() logic and ensure that dup'ed map FD is
-"good" (>= 3) and has O_CLOEXEC flags. Use fcntl(F_DUPFD_CLOEXEC) for
-that, similarly to ensure_good_fd() helper we already use in low-level
-APIs that work with bpf() syscall.
-
-Suggested-by: Lennart Poettering <lennart@poettering.net>
-Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
-Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
-Link: https://lore.kernel.org/bpf/20230525221311.2136408-2-andrii@kernel.org
-Stable-dep-of: 04a94133f1b3 ("libbpf: Don't take direct pointers into BTF data from st_ops")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- tools/lib/bpf/libbpf.c | 13 ++++++-------
- 1 file changed, 6 insertions(+), 7 deletions(-)
-
-diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
-index 25e01addcdb57..b18dab0c80787 100644
---- a/tools/lib/bpf/libbpf.c
-+++ b/tools/lib/bpf/libbpf.c
-@@ -4342,18 +4342,17 @@ int bpf_map__reuse_fd(struct bpf_map *map, int fd)
- if (!new_name)
- return libbpf_err(-errno);
-
-- new_fd = open("/", O_RDONLY | O_CLOEXEC);
-+ /*
-+ * Like dup(), but make sure new FD is >= 3 and has O_CLOEXEC set.
-+ * This is similar to what we do in ensure_good_fd(), but without
-+ * closing original FD.
-+ */
-+ new_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
- if (new_fd < 0) {
- err = -errno;
- goto err_free_new_name;
- }
-
-- new_fd = dup3(fd, new_fd, O_CLOEXEC);
-- if (new_fd < 0) {
-- err = -errno;
-- goto err_close_new_fd;
-- }
--
- err = zclose(map->fd);
- if (err) {
- err = -errno;
---
-2.43.0
-
+++ /dev/null
-From c9f115564561af63db662791e9a35fcf1dfefd2a Mon Sep 17 00:00:00 2001
-From: Martin KaFai Lau <martin.lau@kernel.org>
-Date: Wed, 24 Jan 2024 14:44:18 -0800
-Subject: libbpf: Ensure undefined bpf_attr field stays 0
-
-From: Martin KaFai Lau <martin.lau@kernel.org>
-
-commit c9f115564561af63db662791e9a35fcf1dfefd2a upstream.
-
-The commit 9e926acda0c2 ("libbpf: Find correct module BTFs for struct_ops maps and progs.")
-sets a newly added field (value_type_btf_obj_fd) to -1 in libbpf when
-the caller of the libbpf's bpf_map_create did not define this field by
-passing a NULL "opts" or passing in a "opts" that does not cover this
-new field. OPT_HAS(opts, field) is used to decide if the field is
-defined or not:
-
- ((opts) && opts->sz >= offsetofend(typeof(*(opts)), field))
-
-Once OPTS_HAS decided the field is not defined, that field should
-be set to 0. For this particular new field (value_type_btf_obj_fd),
-its corresponding map_flags "BPF_F_VTYPE_BTF_OBJ_FD" is not set.
-Thus, the kernel does not treat it as an fd field.
-
-Fixes: 9e926acda0c2 ("libbpf: Find correct module BTFs for struct_ops maps and progs.")
-Reported-by: Andrii Nakryiko <andrii@kernel.org>
-Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
-Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
-Link: https://lore.kernel.org/bpf/20240124224418.2905133-1-martin.lau@linux.dev
-Signed-off-by: Alexei Starovoitov <ast@kernel.org>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
----
- tools/lib/bpf/bpf.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
---- a/tools/lib/bpf/bpf.c
-+++ b/tools/lib/bpf/bpf.c
-@@ -192,7 +192,7 @@ int bpf_map_create(enum bpf_map_type map
- attr.btf_key_type_id = OPTS_GET(opts, btf_key_type_id, 0);
- attr.btf_value_type_id = OPTS_GET(opts, btf_value_type_id, 0);
- attr.btf_vmlinux_value_type_id = OPTS_GET(opts, btf_vmlinux_value_type_id, 0);
-- attr.value_type_btf_obj_fd = OPTS_GET(opts, value_type_btf_obj_fd, -1);
-+ attr.value_type_btf_obj_fd = OPTS_GET(opts, value_type_btf_obj_fd, 0);
-
- attr.inner_map_fd = OPTS_GET(opts, inner_map_fd, 0);
- attr.map_flags = OPTS_GET(opts, map_flags, 0);
+++ /dev/null
-From e54112b9721da7a24f43e15b2837c0db046090ab Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Fri, 19 Jan 2024 14:50:03 -0800
-Subject: libbpf: Find correct module BTFs for struct_ops maps and progs.
-
-From: Kui-Feng Lee <thinker.li@gmail.com>
-
-[ Upstream commit 9e926acda0c2e21bca431a1818665ddcd6939755 ]
-
-Locate the module BTFs for struct_ops maps and progs and pass them to the
-kernel. This ensures that the kernel correctly resolves type IDs from the
-appropriate module BTFs.
-
-For the map of a struct_ops object, the FD of the module BTF is set to
-bpf_map to keep a reference to the module BTF. The FD is passed to the
-kernel as value_type_btf_obj_fd when the struct_ops object is loaded.
-
-For a bpf_struct_ops prog, attach_btf_obj_fd of bpf_prog is the FD of a
-module BTF in the kernel.
-
-Signed-off-by: Kui-Feng Lee <thinker.li@gmail.com>
-Acked-by: Andrii Nakryiko <andrii@kernel.org>
-Link: https://lore.kernel.org/r/20240119225005.668602-13-thinker.li@gmail.com
-Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
-Stable-dep-of: 04a94133f1b3 ("libbpf: Don't take direct pointers into BTF data from st_ops")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- tools/lib/bpf/bpf.c | 4 +++-
- tools/lib/bpf/bpf.h | 4 +++-
- tools/lib/bpf/libbpf.c | 41 ++++++++++++++++++++++++++---------
- tools/lib/bpf/libbpf_probes.c | 1 +
- 4 files changed, 38 insertions(+), 12 deletions(-)
-
-diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
-index 1d49a03528365..7d5419e8cb6ae 100644
---- a/tools/lib/bpf/bpf.c
-+++ b/tools/lib/bpf/bpf.c
-@@ -169,7 +169,8 @@ int bpf_map_create(enum bpf_map_type map_type,
- __u32 max_entries,
- const struct bpf_map_create_opts *opts)
- {
-- const size_t attr_sz = offsetofend(union bpf_attr, map_extra);
-+ const size_t attr_sz = offsetofend(union bpf_attr,
-+ value_type_btf_obj_fd);
- union bpf_attr attr;
- int fd;
-
-@@ -191,6 +192,7 @@ int bpf_map_create(enum bpf_map_type map_type,
- attr.btf_key_type_id = OPTS_GET(opts, btf_key_type_id, 0);
- attr.btf_value_type_id = OPTS_GET(opts, btf_value_type_id, 0);
- attr.btf_vmlinux_value_type_id = OPTS_GET(opts, btf_vmlinux_value_type_id, 0);
-+ attr.value_type_btf_obj_fd = OPTS_GET(opts, value_type_btf_obj_fd, -1);
-
- attr.inner_map_fd = OPTS_GET(opts, inner_map_fd, 0);
- attr.map_flags = OPTS_GET(opts, map_flags, 0);
-diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
-index 874fe362375de..41134fec3c06d 100644
---- a/tools/lib/bpf/bpf.h
-+++ b/tools/lib/bpf/bpf.h
-@@ -51,8 +51,10 @@ struct bpf_map_create_opts {
-
- __u32 numa_node;
- __u32 map_ifindex;
-+ __s32 value_type_btf_obj_fd;
-+ size_t:0;
- };
--#define bpf_map_create_opts__last_field map_ifindex
-+#define bpf_map_create_opts__last_field value_type_btf_obj_fd
-
- LIBBPF_API int bpf_map_create(enum bpf_map_type map_type,
- const char *map_name,
-diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
-index 58136673fd312..bd5b32c9c5406 100644
---- a/tools/lib/bpf/libbpf.c
-+++ b/tools/lib/bpf/libbpf.c
-@@ -501,6 +501,7 @@ struct bpf_map {
- struct bpf_map_def def;
- __u32 numa_node;
- __u32 btf_var_idx;
-+ int mod_btf_fd;
- __u32 btf_key_type_id;
- __u32 btf_value_type_id;
- __u32 btf_vmlinux_value_type_id;
-@@ -935,22 +936,29 @@ find_member_by_name(const struct btf *btf, const struct btf_type *t,
- return NULL;
- }
-
-+static int find_ksym_btf_id(struct bpf_object *obj, const char *ksym_name,
-+ __u16 kind, struct btf **res_btf,
-+ struct module_btf **res_mod_btf);
-+
- #define STRUCT_OPS_VALUE_PREFIX "bpf_struct_ops_"
- static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
- const char *name, __u32 kind);
-
- static int
--find_struct_ops_kern_types(const struct btf *btf, const char *tname,
-+find_struct_ops_kern_types(struct bpf_object *obj, const char *tname,
-+ struct module_btf **mod_btf,
- const struct btf_type **type, __u32 *type_id,
- const struct btf_type **vtype, __u32 *vtype_id,
- const struct btf_member **data_member)
- {
- const struct btf_type *kern_type, *kern_vtype;
- const struct btf_member *kern_data_member;
-+ struct btf *btf;
- __s32 kern_vtype_id, kern_type_id;
- __u32 i;
-
-- kern_type_id = btf__find_by_name_kind(btf, tname, BTF_KIND_STRUCT);
-+ kern_type_id = find_ksym_btf_id(obj, tname, BTF_KIND_STRUCT,
-+ &btf, mod_btf);
- if (kern_type_id < 0) {
- pr_warn("struct_ops init_kern: struct %s is not found in kernel BTF\n",
- tname);
-@@ -1004,14 +1012,16 @@ static bool bpf_map__is_struct_ops(const struct bpf_map *map)
- }
-
- /* Init the map's fields that depend on kern_btf */
--static int bpf_map__init_kern_struct_ops(struct bpf_map *map,
-- const struct btf *btf,
-- const struct btf *kern_btf)
-+static int bpf_map__init_kern_struct_ops(struct bpf_map *map)
- {
- const struct btf_member *member, *kern_member, *kern_data_member;
- const struct btf_type *type, *kern_type, *kern_vtype;
- __u32 i, kern_type_id, kern_vtype_id, kern_data_off;
-+ struct bpf_object *obj = map->obj;
-+ const struct btf *btf = obj->btf;
- struct bpf_struct_ops *st_ops;
-+ const struct btf *kern_btf;
-+ struct module_btf *mod_btf;
- void *data, *kern_data;
- const char *tname;
- int err;
-@@ -1019,16 +1029,19 @@ static int bpf_map__init_kern_struct_ops(struct bpf_map *map,
- st_ops = map->st_ops;
- type = st_ops->type;
- tname = st_ops->tname;
-- err = find_struct_ops_kern_types(kern_btf, tname,
-+ err = find_struct_ops_kern_types(obj, tname, &mod_btf,
- &kern_type, &kern_type_id,
- &kern_vtype, &kern_vtype_id,
- &kern_data_member);
- if (err)
- return err;
-
-+ kern_btf = mod_btf ? mod_btf->btf : obj->btf_vmlinux;
-+
- pr_debug("struct_ops init_kern %s: type_id:%u kern_type_id:%u kern_vtype_id:%u\n",
- map->name, st_ops->type_id, kern_type_id, kern_vtype_id);
-
-+ map->mod_btf_fd = mod_btf ? mod_btf->fd : -1;
- map->def.value_size = kern_vtype->size;
- map->btf_vmlinux_value_type_id = kern_vtype_id;
-
-@@ -1104,6 +1117,8 @@ static int bpf_map__init_kern_struct_ops(struct bpf_map *map,
- return -ENOTSUP;
- }
-
-+ if (mod_btf)
-+ prog->attach_btf_obj_fd = mod_btf->fd;
- prog->attach_btf_id = kern_type_id;
- prog->expected_attach_type = kern_member_idx;
-
-@@ -1146,8 +1161,7 @@ static int bpf_object__init_kern_struct_ops_maps(struct bpf_object *obj)
- if (!bpf_map__is_struct_ops(map))
- continue;
-
-- err = bpf_map__init_kern_struct_ops(map, obj->btf,
-- obj->btf_vmlinux);
-+ err = bpf_map__init_kern_struct_ops(map);
- if (err)
- return err;
- }
-@@ -5004,8 +5018,13 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
- create_attr.numa_node = map->numa_node;
- create_attr.map_extra = map->map_extra;
-
-- if (bpf_map__is_struct_ops(map))
-+ if (bpf_map__is_struct_ops(map)) {
- create_attr.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id;
-+ if (map->mod_btf_fd >= 0) {
-+ create_attr.value_type_btf_obj_fd = map->mod_btf_fd;
-+ create_attr.map_flags |= BPF_F_VTYPE_BTF_OBJ_FD;
-+ }
-+ }
-
- if (obj->btf && btf__fd(obj->btf) >= 0) {
- create_attr.btf_fd = btf__fd(obj->btf);
-@@ -9180,7 +9199,9 @@ static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attac
- *btf_obj_fd = 0;
- *btf_type_id = 1;
- } else {
-- err = find_kernel_btf_id(prog->obj, attach_name, attach_type, btf_obj_fd, btf_type_id);
-+ err = find_kernel_btf_id(prog->obj, attach_name,
-+ attach_type, btf_obj_fd,
-+ btf_type_id);
- }
- if (err) {
- pr_warn("prog '%s': failed to find kernel BTF type ID of '%s': %d\n",
-diff --git a/tools/lib/bpf/libbpf_probes.c b/tools/lib/bpf/libbpf_probes.c
-index d504d96adc839..9b6da1daf2999 100644
---- a/tools/lib/bpf/libbpf_probes.c
-+++ b/tools/lib/bpf/libbpf_probes.c
-@@ -239,6 +239,7 @@ static int probe_map_create(enum bpf_map_type map_type)
- case BPF_MAP_TYPE_STRUCT_OPS:
- /* we'll get -ENOTSUPP for invalid BTF type ID for struct_ops */
- opts.btf_vmlinux_value_type_id = 1;
-+ opts.value_type_btf_obj_fd = -1;
- exp_err = -524; /* -ENOTSUPP */
- break;
- case BPF_MAP_TYPE_BLOOM_FILTER:
---
-2.43.0
-
+++ /dev/null
-From 97bd14ba8c58a2a2d4a1ca930e357725254da724 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Wed, 6 Mar 2024 12:45:22 +0200
-Subject: libbpf: Sync progs autoload with maps autocreate for struct_ops maps
-
-From: Eduard Zingerman <eddyz87@gmail.com>
-
-[ Upstream commit fe9d049c3da06373a1a35914b7f695509e4cb1fe ]
-
-Automatically select which struct_ops programs to load depending on
-which struct_ops maps are selected for automatic creation.
-E.g. for the BPF code below:
-
- SEC("struct_ops/test_1") int BPF_PROG(foo) { ... }
- SEC("struct_ops/test_2") int BPF_PROG(bar) { ... }
-
- SEC(".struct_ops.link")
- struct test_ops___v1 A = {
- .foo = (void *)foo
- };
-
- SEC(".struct_ops.link")
- struct test_ops___v2 B = {
- .foo = (void *)foo,
- .bar = (void *)bar,
- };
-
-And the following libbpf API calls:
-
- bpf_map__set_autocreate(skel->maps.A, true);
- bpf_map__set_autocreate(skel->maps.B, false);
-
-The autoload would be enabled for program 'foo' and disabled for
-program 'bar'.
-
-During load, for each struct_ops program P, referenced from some
-struct_ops map M:
-- set P.autoload = true if M.autocreate is true for some M;
-- set P.autoload = false if M.autocreate is false for all M;
-- don't change P.autoload, if P is not referenced from any map.
-
-Do this after bpf_object__init_kern_struct_ops_maps()
-to make sure that shadow vars assignment is done.
-
-Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
-Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
-Link: https://lore.kernel.org/bpf/20240306104529.6453-9-eddyz87@gmail.com
-Stable-dep-of: 04a94133f1b3 ("libbpf: Don't take direct pointers into BTF data from st_ops")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- tools/lib/bpf/libbpf.c | 43 ++++++++++++++++++++++++++++++++++++++++++
- 1 file changed, 43 insertions(+)
-
-diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
-index 76835fa67c6d1..7934919b153cb 100644
---- a/tools/lib/bpf/libbpf.c
-+++ b/tools/lib/bpf/libbpf.c
-@@ -1024,6 +1024,48 @@ static bool is_valid_st_ops_program(struct bpf_object *obj,
- return false;
- }
-
-+/* For each struct_ops program P, referenced from some struct_ops map M,
-+ * enable P.autoload if there are Ms for which M.autocreate is true,
-+ * disable P.autoload if for all Ms M.autocreate is false.
-+ * Don't change P.autoload for programs that are not referenced from any maps.
-+ */
-+static int bpf_object_adjust_struct_ops_autoload(struct bpf_object *obj)
-+{
-+ struct bpf_program *prog, *slot_prog;
-+ struct bpf_map *map;
-+ int i, j, k, vlen;
-+
-+ for (i = 0; i < obj->nr_programs; ++i) {
-+ int should_load = false;
-+ int use_cnt = 0;
-+
-+ prog = &obj->programs[i];
-+ if (prog->type != BPF_PROG_TYPE_STRUCT_OPS)
-+ continue;
-+
-+ for (j = 0; j < obj->nr_maps; ++j) {
-+ map = &obj->maps[j];
-+ if (!bpf_map__is_struct_ops(map))
-+ continue;
-+
-+ vlen = btf_vlen(map->st_ops->type);
-+ for (k = 0; k < vlen; ++k) {
-+ slot_prog = map->st_ops->progs[k];
-+ if (prog != slot_prog)
-+ continue;
-+
-+ use_cnt++;
-+ if (map->autocreate)
-+ should_load = true;
-+ }
-+ }
-+ if (use_cnt)
-+ prog->autoload = should_load;
-+ }
-+
-+ return 0;
-+}
-+
- /* Init the map's fields that depend on kern_btf */
- static int bpf_map__init_kern_struct_ops(struct bpf_map *map)
- {
-@@ -7788,6 +7830,7 @@ static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const ch
- err = err ? : bpf_object__sanitize_and_load_btf(obj);
- err = err ? : bpf_object__sanitize_maps(obj);
- err = err ? : bpf_object__init_kern_struct_ops_maps(obj);
-+ err = err ? : bpf_object_adjust_struct_ops_autoload(obj);
- err = err ? : bpf_object__relocate(obj, obj->btf_custom_path ? : target_btf_path);
- err = err ? : bpf_object__create_maps(obj);
- err = err ? : bpf_object__load_progs(obj, extra_log_level);
---
-2.43.0
-
+++ /dev/null
-From d97f968c80c35205982d2bbc24bc39e34e8e73ae Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Wed, 3 Jan 2024 17:38:42 -0800
-Subject: libbpf: use stable map placeholder FDs
-
-From: Andrii Nakryiko <andrii@kernel.org>
-
-[ Upstream commit dac645b950ea4fc0896fe46a645365cb8d9ab92b ]
-
-Move map creation to later during BPF object loading by pre-creating
-stable placeholder FDs (utilizing memfd_create()). Use dup2()
-syscall to then atomically make those placeholder FDs point to real
-kernel BPF map objects.
-
-This change allows to delay BPF map creation to after all the BPF
-program relocations. That, in turn, allows to delay BTF finalization and
-loading into kernel to after all the relocations as well. We'll take
-advantage of the latter in subsequent patches to allow libbpf to adjust
-BTF in a way that helps with BPF global function usage.
-
-Clean up a few places where we close map->fd, which now shouldn't
-happen, because map->fd should be a valid FD regardless of whether map
-was created or not. Surprisingly and nicely it simplifies a bunch of
-error handling code. If this change doesn't backfire, I'm tempted to
-pre-create such stable FDs for other entities (progs, maybe even BTF).
-We previously did some manipulations to make gen_loader work with fake
-map FDs, with stable map FDs this hack is not necessary for maps (we
-still have it for BTF, but I left it as is for now).
-
-Acked-by: Jiri Olsa <jolsa@kernel.org>
-Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
-Link: https://lore.kernel.org/r/20240104013847.3875810-5-andrii@kernel.org
-Signed-off-by: Alexei Starovoitov <ast@kernel.org>
-Stable-dep-of: 04a94133f1b3 ("libbpf: Don't take direct pointers into BTF data from st_ops")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- tools/lib/bpf/libbpf.c | 101 ++++++++++++++++++++------------
- tools/lib/bpf/libbpf_internal.h | 14 +++++
- 2 files changed, 77 insertions(+), 38 deletions(-)
-
-diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
-index b18dab0c80787..58136673fd312 100644
---- a/tools/lib/bpf/libbpf.c
-+++ b/tools/lib/bpf/libbpf.c
-@@ -1496,6 +1496,16 @@ static int find_elf_var_offset(const struct bpf_object *obj, const char *name, _
- return -ENOENT;
- }
-
-+static int create_placeholder_fd(void)
-+{
-+ int fd;
-+
-+ fd = ensure_good_fd(memfd_create("libbpf-placeholder-fd", MFD_CLOEXEC));
-+ if (fd < 0)
-+ return -errno;
-+ return fd;
-+}
-+
- static struct bpf_map *bpf_object__add_map(struct bpf_object *obj)
- {
- struct bpf_map *map;
-@@ -1508,7 +1518,21 @@ static struct bpf_map *bpf_object__add_map(struct bpf_object *obj)
-
- map = &obj->maps[obj->nr_maps++];
- map->obj = obj;
-- map->fd = -1;
-+ /* Preallocate map FD without actually creating BPF map just yet.
-+ * These map FD "placeholders" will be reused later without changing
-+ * FD value when map is actually created in the kernel.
-+ *
-+ * This is useful to be able to perform BPF program relocations
-+ * without having to create BPF maps before that step. This allows us
-+ * to finalize and load BTF very late in BPF object's loading phase,
-+ * right before BPF maps have to be created and BPF programs have to
-+ * be loaded. By having these map FD placeholders we can perform all
-+ * the sanitizations, relocations, and any other adjustments before we
-+ * start creating actual BPF kernel objects (BTF, maps, progs).
-+ */
-+ map->fd = create_placeholder_fd();
-+ if (map->fd < 0)
-+ return ERR_PTR(map->fd);
- map->inner_map_fd = -1;
- map->autocreate = true;
-
-@@ -2537,7 +2561,9 @@ static int bpf_object__init_user_btf_map(struct bpf_object *obj,
- map->inner_map = calloc(1, sizeof(*map->inner_map));
- if (!map->inner_map)
- return -ENOMEM;
-- map->inner_map->fd = -1;
-+ map->inner_map->fd = create_placeholder_fd();
-+ if (map->inner_map->fd < 0)
-+ return map->inner_map->fd;
- map->inner_map->sec_idx = sec_idx;
- map->inner_map->name = malloc(strlen(map_name) + sizeof(".inner") + 1);
- if (!map->inner_map->name)
-@@ -4353,14 +4379,12 @@ int bpf_map__reuse_fd(struct bpf_map *map, int fd)
- goto err_free_new_name;
- }
-
-- err = zclose(map->fd);
-- if (err) {
-- err = -errno;
-- goto err_close_new_fd;
-- }
-+ err = reuse_fd(map->fd, new_fd);
-+ if (err)
-+ goto err_free_new_name;
-+
- free(map->name);
-
-- map->fd = new_fd;
- map->name = new_name;
- map->def.type = info.type;
- map->def.key_size = info.key_size;
-@@ -4374,8 +4398,6 @@ int bpf_map__reuse_fd(struct bpf_map *map, int fd)
-
- return 0;
-
--err_close_new_fd:
-- close(new_fd);
- err_free_new_name:
- free(new_name);
- return libbpf_err(err);
-@@ -4973,7 +4995,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
- LIBBPF_OPTS(bpf_map_create_opts, create_attr);
- struct bpf_map_def *def = &map->def;
- const char *map_name = NULL;
-- int err = 0;
-+ int err = 0, map_fd;
-
- if (kernel_supports(obj, FEAT_PROG_NAME))
- map_name = map->name;
-@@ -5035,17 +5057,19 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
- bpf_gen__map_create(obj->gen_loader, def->type, map_name,
- def->key_size, def->value_size, def->max_entries,
- &create_attr, is_inner ? -1 : map - obj->maps);
-- /* Pretend to have valid FD to pass various fd >= 0 checks.
-- * This fd == 0 will not be used with any syscall and will be reset to -1 eventually.
-+ /* We keep pretenting we have valid FD to pass various fd >= 0
-+ * checks by just keeping original placeholder FDs in place.
-+ * See bpf_object__add_map() comment.
-+ * This placeholder fd will not be used with any syscall and
-+ * will be reset to -1 eventually.
- */
-- map->fd = 0;
-+ map_fd = map->fd;
- } else {
-- map->fd = bpf_map_create(def->type, map_name,
-- def->key_size, def->value_size,
-- def->max_entries, &create_attr);
-+ map_fd = bpf_map_create(def->type, map_name,
-+ def->key_size, def->value_size,
-+ def->max_entries, &create_attr);
- }
-- if (map->fd < 0 && (create_attr.btf_key_type_id ||
-- create_attr.btf_value_type_id)) {
-+ if (map_fd < 0 && (create_attr.btf_key_type_id || create_attr.btf_value_type_id)) {
- char *cp, errmsg[STRERR_BUFSIZE];
-
- err = -errno;
-@@ -5057,13 +5081,11 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
- create_attr.btf_value_type_id = 0;
- map->btf_key_type_id = 0;
- map->btf_value_type_id = 0;
-- map->fd = bpf_map_create(def->type, map_name,
-- def->key_size, def->value_size,
-- def->max_entries, &create_attr);
-+ map_fd = bpf_map_create(def->type, map_name,
-+ def->key_size, def->value_size,
-+ def->max_entries, &create_attr);
- }
-
-- err = map->fd < 0 ? -errno : 0;
--
- if (bpf_map_type__is_map_in_map(def->type) && map->inner_map) {
- if (obj->gen_loader)
- map->inner_map->fd = -1;
-@@ -5071,7 +5093,19 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
- zfree(&map->inner_map);
- }
-
-- return err;
-+ if (map_fd < 0)
-+ return map_fd;
-+
-+ /* obj->gen_loader case, prevent reuse_fd() from closing map_fd */
-+ if (map->fd == map_fd)
-+ return 0;
-+
-+ /* Keep placeholder FD value but now point it to the BPF map object.
-+ * This way everything that relied on this map's FD (e.g., relocated
-+ * ldimm64 instructions) will stay valid and won't need adjustments.
-+ * map->fd stays valid but now point to what map_fd points to.
-+ */
-+ return reuse_fd(map->fd, map_fd);
- }
-
- static int init_map_in_map_slots(struct bpf_object *obj, struct bpf_map *map)
-@@ -5155,10 +5189,8 @@ static int bpf_object_init_prog_arrays(struct bpf_object *obj)
- continue;
-
- err = init_prog_array_slots(obj, map);
-- if (err < 0) {
-- zclose(map->fd);
-+ if (err < 0)
- return err;
-- }
- }
- return 0;
- }
-@@ -5249,25 +5281,20 @@ bpf_object__create_maps(struct bpf_object *obj)
-
- if (bpf_map__is_internal(map)) {
- err = bpf_object__populate_internal_map(obj, map);
-- if (err < 0) {
-- zclose(map->fd);
-+ if (err < 0)
- goto err_out;
-- }
- }
-
- if (map->init_slots_sz && map->def.type != BPF_MAP_TYPE_PROG_ARRAY) {
- err = init_map_in_map_slots(obj, map);
-- if (err < 0) {
-- zclose(map->fd);
-+ if (err < 0)
- goto err_out;
-- }
- }
- }
-
- if (map->pin_path && !map->pinned) {
- err = bpf_map__pin(map, NULL);
- if (err) {
-- zclose(map->fd);
- if (!retried && err == -EEXIST) {
- retried = true;
- goto retry;
-@@ -7722,8 +7749,8 @@ static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const ch
- err = err ? : bpf_object__sanitize_and_load_btf(obj);
- err = err ? : bpf_object__sanitize_maps(obj);
- err = err ? : bpf_object__init_kern_struct_ops_maps(obj);
-- err = err ? : bpf_object__create_maps(obj);
- err = err ? : bpf_object__relocate(obj, obj->btf_custom_path ? : target_btf_path);
-+ err = err ? : bpf_object__create_maps(obj);
- err = err ? : bpf_object__load_progs(obj, extra_log_level);
- err = err ? : bpf_object_init_prog_arrays(obj);
- err = err ? : bpf_object_prepare_struct_ops(obj);
-@@ -7732,8 +7759,6 @@ static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const ch
- /* reset FDs */
- if (obj->btf)
- btf__set_fd(obj->btf, -1);
-- for (i = 0; i < obj->nr_maps; i++)
-- obj->maps[i].fd = -1;
- if (!err)
- err = bpf_gen__finish(obj->gen_loader, obj->nr_programs, obj->nr_maps);
- }
-diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h
-index 8669f6e0f6e2f..4f081203d02f9 100644
---- a/tools/lib/bpf/libbpf_internal.h
-+++ b/tools/lib/bpf/libbpf_internal.h
-@@ -565,6 +565,20 @@ static inline int ensure_good_fd(int fd)
- return fd;
- }
-
-+/* Point *fixed_fd* to the same file that *tmp_fd* points to.
-+ * Regardless of success, *tmp_fd* is closed.
-+ * Whatever *fixed_fd* pointed to is closed silently.
-+ */
-+static inline int reuse_fd(int fixed_fd, int tmp_fd)
-+{
-+ int err;
-+
-+ err = dup2(tmp_fd, fixed_fd);
-+ err = err < 0 ? -errno : 0;
-+ close(tmp_fd); /* clean up temporary FD */
-+ return err;
-+}
-+
- /* The following two functions are exposed to bpftool */
- int bpf_core_add_cands(struct bpf_core_cand *local_cand,
- size_t local_essent_len,
---
-2.43.0
-
selftests-bpf-fix-compiling-tcp_rtt.c-with-musl-libc.patch
selftests-bpf-fix-compiling-core_reloc.c-with-musl-l.patch
selftests-bpf-fix-errors-compiling-cg_storage_multi..patch
-libbpf-create-a-bpf_link-in-bpf_map__attach_struct_o.patch
-libbpf-ensure-fd-3-during-bpf_map__reuse_fd.patch
-libbpf-use-stable-map-placeholder-fds.patch
-libbpf-find-correct-module-btfs-for-struct_ops-maps-.patch
-libbpf-convert-st_ops-data-to-shadow-type.patch
-libbpf-sync-progs-autoload-with-maps-autocreate-for-.patch
-libbpf-don-t-take-direct-pointers-into-btf-data-from.patch
selftests-bpf-fix-error-compiling-test_lru_map.c.patch
selftests-bpf-fix-c-compile-error-from-missing-_bool.patch
selftests-bpf-replace-extract_build_id-with-read_bui.patch
usb-yurex-fix-inconsistent-locking-bug-in-yurex_read.patch
perf-arm-cmn-fail-dtc-counter-allocation-correctly.patch
iio-magnetometer-ak8975-fix-unexpected-device-error.patch
-libbpf-ensure-undefined-bpf_attr-field-stays-0.patch
powerpc-allow-config_ppc64_big_endian_elf_abi_v2-with-ld.lld-15.patch
pci-pm-mark-devices-disconnected-if-upstream-pcie-link-is-down-on-resume.patch
x86-tdx-fix-in-kernel-mmio-check.patch
+++ /dev/null
-From baf8312a8fc1a0eaec7c8d7bdee15c6c69d0756d Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Wed, 28 Feb 2024 22:45:20 -0800
-Subject: libbpf: Convert st_ops->data to shadow type.
-
-From: Kui-Feng Lee <thinker.li@gmail.com>
-
-[ Upstream commit 69e4a9d2b3f5adf5af4feeab0a9f505da971265a ]
-
-Convert st_ops->data to the shadow type of the struct_ops map. The shadow
-type of a struct_ops type is a variant of the original struct type
-providing a way to access/change the values in the maps of the struct_ops
-type.
-
-bpf_map__initial_value() will return st_ops->data for struct_ops types. The
-skeleton is going to use it as the pointer to the shadow type of the
-original struct type.
-
-One of the main differences between the original struct type and the shadow
-type is that all function pointers of the shadow type are converted to
-pointers of struct bpf_program. Users can replace these bpf_program
-pointers with other BPF programs. The st_ops->progs[] will be updated
-before updating the value of a map to reflect the changes made by users.
-
-Signed-off-by: Kui-Feng Lee <thinker.li@gmail.com>
-Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
-Link: https://lore.kernel.org/bpf/20240229064523.2091270-3-thinker.li@gmail.com
-Stable-dep-of: 04a94133f1b3 ("libbpf: Don't take direct pointers into BTF data from st_ops")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- tools/lib/bpf/libbpf.c | 40 ++++++++++++++++++++++++++++++++++++++--
- 1 file changed, 38 insertions(+), 2 deletions(-)
-
-diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
-index a2b765e419c16..c91917868b557 100644
---- a/tools/lib/bpf/libbpf.c
-+++ b/tools/lib/bpf/libbpf.c
-@@ -994,6 +994,19 @@ static bool bpf_map__is_struct_ops(const struct bpf_map *map)
- return map->def.type == BPF_MAP_TYPE_STRUCT_OPS;
- }
-
-+static bool is_valid_st_ops_program(struct bpf_object *obj,
-+ const struct bpf_program *prog)
-+{
-+ int i;
-+
-+ for (i = 0; i < obj->nr_programs; i++) {
-+ if (&obj->programs[i] == prog)
-+ return prog->type == BPF_PROG_TYPE_STRUCT_OPS;
-+ }
-+
-+ return false;
-+}
-+
- /* Init the map's fields that depend on kern_btf */
- static int bpf_map__init_kern_struct_ops(struct bpf_map *map)
- {
-@@ -1082,9 +1095,16 @@ static int bpf_map__init_kern_struct_ops(struct bpf_map *map)
- if (btf_is_ptr(mtype)) {
- struct bpf_program *prog;
-
-- prog = st_ops->progs[i];
-+ /* Update the value from the shadow type */
-+ prog = *(void **)mdata;
-+ st_ops->progs[i] = prog;
- if (!prog)
- continue;
-+ if (!is_valid_st_ops_program(obj, prog)) {
-+ pr_warn("struct_ops init_kern %s: member %s is not a struct_ops program\n",
-+ map->name, mname);
-+ return -ENOTSUP;
-+ }
-
- kern_mtype = skip_mods_and_typedefs(kern_btf,
- kern_mtype->type,
-@@ -9165,7 +9185,9 @@ static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj,
- return NULL;
- }
-
--/* Collect the reloc from ELF and populate the st_ops->progs[] */
-+/* Collect the reloc from ELF, populate the st_ops->progs[], and update
-+ * st_ops->data for shadow type.
-+ */
- static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
- Elf64_Shdr *shdr, Elf_Data *data)
- {
-@@ -9279,6 +9301,14 @@ static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
- }
-
- st_ops->progs[member_idx] = prog;
-+
-+ /* st_ops->data will be exposed to users, being returned by
-+ * bpf_map__initial_value() as a pointer to the shadow
-+ * type. All function pointers in the original struct type
-+ * should be converted to a pointer to struct bpf_program
-+ * in the shadow type.
-+ */
-+ *((struct bpf_program **)(st_ops->data + moff)) = prog;
- }
-
- return 0;
-@@ -9730,6 +9760,12 @@ int bpf_map__set_initial_value(struct bpf_map *map,
-
- void *bpf_map__initial_value(struct bpf_map *map, size_t *psize)
- {
-+ if (bpf_map__is_struct_ops(map)) {
-+ if (psize)
-+ *psize = map->def.value_size;
-+ return map->st_ops->data;
-+ }
-+
- if (!map->mmaped)
- return NULL;
- *psize = map->def.value_size;
---
-2.43.0
-
+++ /dev/null
-From 235ace355a9655ba562dffe8271fdd211f09b991 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Wed, 24 Jul 2024 12:14:58 -0500
-Subject: libbpf: Don't take direct pointers into BTF data from st_ops
-
-From: David Vernet <void@manifault.com>
-
-[ Upstream commit 04a94133f1b3cccb19e056c26f056c50b4e5b3b1 ]
-
-In struct bpf_struct_ops, we have take a pointer to a BTF type name, and
-a struct btf_type. This was presumably done for convenience, but can
-actually result in subtle and confusing bugs given that BTF data can be
-invalidated before a program is loaded. For example, in sched_ext, we
-may sometimes resize a data section after a skeleton has been opened,
-but before the struct_ops scheduler map has been loaded. This may cause
-the BTF data to be realloc'd, which can then cause a UAF when loading
-the program because the struct_ops map has pointers directly into the
-BTF data.
-
-We're already storing the BTF type_id in struct bpf_struct_ops. Because
-type_id is stable, we can therefore just update the places where we were
-looking at those pointers to instead do the lookups we need from the
-type_id.
-
-Fixes: 590a00888250 ("bpf: libbpf: Add STRUCT_OPS support")
-Signed-off-by: David Vernet <void@manifault.com>
-Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
-Link: https://lore.kernel.org/bpf/20240724171459.281234-1-void@manifault.com
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- tools/lib/bpf/libbpf.c | 23 +++++++++++++----------
- 1 file changed, 13 insertions(+), 10 deletions(-)
-
-diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
-index aeed9bc44247b..bf1c4c69fd92d 100644
---- a/tools/lib/bpf/libbpf.c
-+++ b/tools/lib/bpf/libbpf.c
-@@ -457,8 +457,6 @@ struct bpf_program {
- };
-
- struct bpf_struct_ops {
-- const char *tname;
-- const struct btf_type *type;
- struct bpf_program **progs;
- __u32 *kern_func_off;
- /* e.g. struct tcp_congestion_ops in bpf_prog's btf format */
-@@ -1027,11 +1025,14 @@ static int bpf_object_adjust_struct_ops_autoload(struct bpf_object *obj)
- continue;
-
- for (j = 0; j < obj->nr_maps; ++j) {
-+ const struct btf_type *type;
-+
- map = &obj->maps[j];
- if (!bpf_map__is_struct_ops(map))
- continue;
-
-- vlen = btf_vlen(map->st_ops->type);
-+ type = btf__type_by_id(obj->btf, map->st_ops->type_id);
-+ vlen = btf_vlen(type);
- for (k = 0; k < vlen; ++k) {
- slot_prog = map->st_ops->progs[k];
- if (prog != slot_prog)
-@@ -1065,8 +1066,8 @@ static int bpf_map__init_kern_struct_ops(struct bpf_map *map)
- int err;
-
- st_ops = map->st_ops;
-- type = st_ops->type;
-- tname = st_ops->tname;
-+ type = btf__type_by_id(btf, st_ops->type_id);
-+ tname = btf__name_by_offset(btf, type->name_off);
- err = find_struct_ops_kern_types(obj, tname, &mod_btf,
- &kern_type, &kern_type_id,
- &kern_vtype, &kern_vtype_id,
-@@ -1298,8 +1299,6 @@ static int init_struct_ops_maps(struct bpf_object *obj, const char *sec_name,
- memcpy(st_ops->data,
- data->d_buf + vsi->offset,
- type->size);
-- st_ops->tname = tname;
-- st_ops->type = type;
- st_ops->type_id = type_id;
-
- pr_debug("struct_ops init: struct %s(type_id=%u) %s found at offset %u\n",
-@@ -7995,11 +7994,13 @@ static int bpf_object__resolve_externs(struct bpf_object *obj,
-
- static void bpf_map_prepare_vdata(const struct bpf_map *map)
- {
-+ const struct btf_type *type;
- struct bpf_struct_ops *st_ops;
- __u32 i;
-
- st_ops = map->st_ops;
-- for (i = 0; i < btf_vlen(st_ops->type); i++) {
-+ type = btf__type_by_id(map->obj->btf, st_ops->type_id);
-+ for (i = 0; i < btf_vlen(type); i++) {
- struct bpf_program *prog = st_ops->progs[i];
- void *kern_data;
- int prog_fd;
-@@ -9234,6 +9235,7 @@ static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj,
- static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
- Elf64_Shdr *shdr, Elf_Data *data)
- {
-+ const struct btf_type *type;
- const struct btf_member *member;
- struct bpf_struct_ops *st_ops;
- struct bpf_program *prog;
-@@ -9293,13 +9295,14 @@ static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
- }
- insn_idx = sym->st_value / BPF_INSN_SZ;
-
-- member = find_member_by_offset(st_ops->type, moff * 8);
-+ type = btf__type_by_id(btf, st_ops->type_id);
-+ member = find_member_by_offset(type, moff * 8);
- if (!member) {
- pr_warn("struct_ops reloc %s: cannot find member at moff %u\n",
- map->name, moff);
- return -EINVAL;
- }
-- member_idx = member - btf_members(st_ops->type);
-+ member_idx = member - btf_members(type);
- name = btf__name_by_offset(btf, member->name_off);
-
- if (!resolve_func_ptr(btf, member->type, NULL)) {
---
-2.43.0
-
+++ /dev/null
-From c9f115564561af63db662791e9a35fcf1dfefd2a Mon Sep 17 00:00:00 2001
-From: Martin KaFai Lau <martin.lau@kernel.org>
-Date: Wed, 24 Jan 2024 14:44:18 -0800
-Subject: libbpf: Ensure undefined bpf_attr field stays 0
-
-From: Martin KaFai Lau <martin.lau@kernel.org>
-
-commit c9f115564561af63db662791e9a35fcf1dfefd2a upstream.
-
-The commit 9e926acda0c2 ("libbpf: Find correct module BTFs for struct_ops maps and progs.")
-sets a newly added field (value_type_btf_obj_fd) to -1 in libbpf when
-the caller of the libbpf's bpf_map_create did not define this field by
-passing a NULL "opts" or passing in a "opts" that does not cover this
-new field. OPT_HAS(opts, field) is used to decide if the field is
-defined or not:
-
- ((opts) && opts->sz >= offsetofend(typeof(*(opts)), field))
-
-Once OPTS_HAS decided the field is not defined, that field should
-be set to 0. For this particular new field (value_type_btf_obj_fd),
-its corresponding map_flags "BPF_F_VTYPE_BTF_OBJ_FD" is not set.
-Thus, the kernel does not treat it as an fd field.
-
-Fixes: 9e926acda0c2 ("libbpf: Find correct module BTFs for struct_ops maps and progs.")
-Reported-by: Andrii Nakryiko <andrii@kernel.org>
-Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
-Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
-Link: https://lore.kernel.org/bpf/20240124224418.2905133-1-martin.lau@linux.dev
-Signed-off-by: Alexei Starovoitov <ast@kernel.org>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
----
- tools/lib/bpf/bpf.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
---- a/tools/lib/bpf/bpf.c
-+++ b/tools/lib/bpf/bpf.c
-@@ -192,7 +192,7 @@ int bpf_map_create(enum bpf_map_type map
- attr.btf_key_type_id = OPTS_GET(opts, btf_key_type_id, 0);
- attr.btf_value_type_id = OPTS_GET(opts, btf_value_type_id, 0);
- attr.btf_vmlinux_value_type_id = OPTS_GET(opts, btf_vmlinux_value_type_id, 0);
-- attr.value_type_btf_obj_fd = OPTS_GET(opts, value_type_btf_obj_fd, -1);
-+ attr.value_type_btf_obj_fd = OPTS_GET(opts, value_type_btf_obj_fd, 0);
-
- attr.inner_map_fd = OPTS_GET(opts, inner_map_fd, 0);
- attr.map_flags = OPTS_GET(opts, map_flags, 0);
+++ /dev/null
-From 3964fbc9879994d2c5a6b862fbebc59abf820ea4 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Fri, 19 Jan 2024 14:50:03 -0800
-Subject: libbpf: Find correct module BTFs for struct_ops maps and progs.
-
-From: Kui-Feng Lee <thinker.li@gmail.com>
-
-[ Upstream commit 9e926acda0c2e21bca431a1818665ddcd6939755 ]
-
-Locate the module BTFs for struct_ops maps and progs and pass them to the
-kernel. This ensures that the kernel correctly resolves type IDs from the
-appropriate module BTFs.
-
-For the map of a struct_ops object, the FD of the module BTF is set to
-bpf_map to keep a reference to the module BTF. The FD is passed to the
-kernel as value_type_btf_obj_fd when the struct_ops object is loaded.
-
-For a bpf_struct_ops prog, attach_btf_obj_fd of bpf_prog is the FD of a
-module BTF in the kernel.
-
-Signed-off-by: Kui-Feng Lee <thinker.li@gmail.com>
-Acked-by: Andrii Nakryiko <andrii@kernel.org>
-Link: https://lore.kernel.org/r/20240119225005.668602-13-thinker.li@gmail.com
-Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
-Stable-dep-of: 04a94133f1b3 ("libbpf: Don't take direct pointers into BTF data from st_ops")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- tools/lib/bpf/bpf.c | 4 +++-
- tools/lib/bpf/bpf.h | 4 +++-
- tools/lib/bpf/libbpf.c | 41 ++++++++++++++++++++++++++---------
- tools/lib/bpf/libbpf_probes.c | 1 +
- 4 files changed, 38 insertions(+), 12 deletions(-)
-
-diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
-index b0f1913763a33..ee18aea4a7b58 100644
---- a/tools/lib/bpf/bpf.c
-+++ b/tools/lib/bpf/bpf.c
-@@ -169,7 +169,8 @@ int bpf_map_create(enum bpf_map_type map_type,
- __u32 max_entries,
- const struct bpf_map_create_opts *opts)
- {
-- const size_t attr_sz = offsetofend(union bpf_attr, map_extra);
-+ const size_t attr_sz = offsetofend(union bpf_attr,
-+ value_type_btf_obj_fd);
- union bpf_attr attr;
- int fd;
-
-@@ -191,6 +192,7 @@ int bpf_map_create(enum bpf_map_type map_type,
- attr.btf_key_type_id = OPTS_GET(opts, btf_key_type_id, 0);
- attr.btf_value_type_id = OPTS_GET(opts, btf_value_type_id, 0);
- attr.btf_vmlinux_value_type_id = OPTS_GET(opts, btf_vmlinux_value_type_id, 0);
-+ attr.value_type_btf_obj_fd = OPTS_GET(opts, value_type_btf_obj_fd, -1);
-
- attr.inner_map_fd = OPTS_GET(opts, inner_map_fd, 0);
- attr.map_flags = OPTS_GET(opts, map_flags, 0);
-diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
-index 107fef7488682..db0ff8ade19a4 100644
---- a/tools/lib/bpf/bpf.h
-+++ b/tools/lib/bpf/bpf.h
-@@ -51,8 +51,10 @@ struct bpf_map_create_opts {
-
- __u32 numa_node;
- __u32 map_ifindex;
-+ __s32 value_type_btf_obj_fd;
-+ size_t:0;
- };
--#define bpf_map_create_opts__last_field map_ifindex
-+#define bpf_map_create_opts__last_field value_type_btf_obj_fd
-
- LIBBPF_API int bpf_map_create(enum bpf_map_type map_type,
- const char *map_name,
-diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
-index 54f3380010f55..a2b765e419c16 100644
---- a/tools/lib/bpf/libbpf.c
-+++ b/tools/lib/bpf/libbpf.c
-@@ -518,6 +518,7 @@ struct bpf_map {
- struct bpf_map_def def;
- __u32 numa_node;
- __u32 btf_var_idx;
-+ int mod_btf_fd;
- __u32 btf_key_type_id;
- __u32 btf_value_type_id;
- __u32 btf_vmlinux_value_type_id;
-@@ -918,22 +919,29 @@ find_member_by_name(const struct btf *btf, const struct btf_type *t,
- return NULL;
- }
-
-+static int find_ksym_btf_id(struct bpf_object *obj, const char *ksym_name,
-+ __u16 kind, struct btf **res_btf,
-+ struct module_btf **res_mod_btf);
-+
- #define STRUCT_OPS_VALUE_PREFIX "bpf_struct_ops_"
- static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
- const char *name, __u32 kind);
-
- static int
--find_struct_ops_kern_types(const struct btf *btf, const char *tname,
-+find_struct_ops_kern_types(struct bpf_object *obj, const char *tname,
-+ struct module_btf **mod_btf,
- const struct btf_type **type, __u32 *type_id,
- const struct btf_type **vtype, __u32 *vtype_id,
- const struct btf_member **data_member)
- {
- const struct btf_type *kern_type, *kern_vtype;
- const struct btf_member *kern_data_member;
-+ struct btf *btf;
- __s32 kern_vtype_id, kern_type_id;
- __u32 i;
-
-- kern_type_id = btf__find_by_name_kind(btf, tname, BTF_KIND_STRUCT);
-+ kern_type_id = find_ksym_btf_id(obj, tname, BTF_KIND_STRUCT,
-+ &btf, mod_btf);
- if (kern_type_id < 0) {
- pr_warn("struct_ops init_kern: struct %s is not found in kernel BTF\n",
- tname);
-@@ -987,14 +995,16 @@ static bool bpf_map__is_struct_ops(const struct bpf_map *map)
- }
-
- /* Init the map's fields that depend on kern_btf */
--static int bpf_map__init_kern_struct_ops(struct bpf_map *map,
-- const struct btf *btf,
-- const struct btf *kern_btf)
-+static int bpf_map__init_kern_struct_ops(struct bpf_map *map)
- {
- const struct btf_member *member, *kern_member, *kern_data_member;
- const struct btf_type *type, *kern_type, *kern_vtype;
- __u32 i, kern_type_id, kern_vtype_id, kern_data_off;
-+ struct bpf_object *obj = map->obj;
-+ const struct btf *btf = obj->btf;
- struct bpf_struct_ops *st_ops;
-+ const struct btf *kern_btf;
-+ struct module_btf *mod_btf;
- void *data, *kern_data;
- const char *tname;
- int err;
-@@ -1002,16 +1012,19 @@ static int bpf_map__init_kern_struct_ops(struct bpf_map *map,
- st_ops = map->st_ops;
- type = st_ops->type;
- tname = st_ops->tname;
-- err = find_struct_ops_kern_types(kern_btf, tname,
-+ err = find_struct_ops_kern_types(obj, tname, &mod_btf,
- &kern_type, &kern_type_id,
- &kern_vtype, &kern_vtype_id,
- &kern_data_member);
- if (err)
- return err;
-
-+ kern_btf = mod_btf ? mod_btf->btf : obj->btf_vmlinux;
-+
- pr_debug("struct_ops init_kern %s: type_id:%u kern_type_id:%u kern_vtype_id:%u\n",
- map->name, st_ops->type_id, kern_type_id, kern_vtype_id);
-
-+ map->mod_btf_fd = mod_btf ? mod_btf->fd : -1;
- map->def.value_size = kern_vtype->size;
- map->btf_vmlinux_value_type_id = kern_vtype_id;
-
-@@ -1087,6 +1100,8 @@ static int bpf_map__init_kern_struct_ops(struct bpf_map *map,
- return -ENOTSUP;
- }
-
-+ if (mod_btf)
-+ prog->attach_btf_obj_fd = mod_btf->fd;
- prog->attach_btf_id = kern_type_id;
- prog->expected_attach_type = kern_member_idx;
-
-@@ -1129,8 +1144,7 @@ static int bpf_object__init_kern_struct_ops_maps(struct bpf_object *obj)
- if (!bpf_map__is_struct_ops(map))
- continue;
-
-- err = bpf_map__init_kern_struct_ops(map, obj->btf,
-- obj->btf_vmlinux);
-+ err = bpf_map__init_kern_struct_ops(map);
- if (err)
- return err;
- }
-@@ -5133,8 +5147,13 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
- create_attr.numa_node = map->numa_node;
- create_attr.map_extra = map->map_extra;
-
-- if (bpf_map__is_struct_ops(map))
-+ if (bpf_map__is_struct_ops(map)) {
- create_attr.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id;
-+ if (map->mod_btf_fd >= 0) {
-+ create_attr.value_type_btf_obj_fd = map->mod_btf_fd;
-+ create_attr.map_flags |= BPF_F_VTYPE_BTF_OBJ_FD;
-+ }
-+ }
-
- if (obj->btf && btf__fd(obj->btf) >= 0) {
- create_attr.btf_fd = btf__fd(obj->btf);
-@@ -9444,7 +9463,9 @@ static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attac
- *btf_obj_fd = 0;
- *btf_type_id = 1;
- } else {
-- err = find_kernel_btf_id(prog->obj, attach_name, attach_type, btf_obj_fd, btf_type_id);
-+ err = find_kernel_btf_id(prog->obj, attach_name,
-+ attach_type, btf_obj_fd,
-+ btf_type_id);
- }
- if (err) {
- pr_warn("prog '%s': failed to find kernel BTF type ID of '%s': %d\n",
-diff --git a/tools/lib/bpf/libbpf_probes.c b/tools/lib/bpf/libbpf_probes.c
-index 9c4db90b92b6b..98373d126d9d7 100644
---- a/tools/lib/bpf/libbpf_probes.c
-+++ b/tools/lib/bpf/libbpf_probes.c
-@@ -326,6 +326,7 @@ static int probe_map_create(enum bpf_map_type map_type)
- case BPF_MAP_TYPE_STRUCT_OPS:
- /* we'll get -ENOTSUPP for invalid BTF type ID for struct_ops */
- opts.btf_vmlinux_value_type_id = 1;
-+ opts.value_type_btf_obj_fd = -1;
- exp_err = -524; /* -ENOTSUPP */
- break;
- case BPF_MAP_TYPE_BLOOM_FILTER:
---
-2.43.0
-
+++ /dev/null
-From acc4fc0e88c09cc23a7ffadabf270585883658dd Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Wed, 6 Mar 2024 12:45:22 +0200
-Subject: libbpf: Sync progs autoload with maps autocreate for struct_ops maps
-
-From: Eduard Zingerman <eddyz87@gmail.com>
-
-[ Upstream commit fe9d049c3da06373a1a35914b7f695509e4cb1fe ]
-
-Automatically select which struct_ops programs to load depending on
-which struct_ops maps are selected for automatic creation.
-E.g. for the BPF code below:
-
- SEC("struct_ops/test_1") int BPF_PROG(foo) { ... }
- SEC("struct_ops/test_2") int BPF_PROG(bar) { ... }
-
- SEC(".struct_ops.link")
- struct test_ops___v1 A = {
- .foo = (void *)foo
- };
-
- SEC(".struct_ops.link")
- struct test_ops___v2 B = {
- .foo = (void *)foo,
- .bar = (void *)bar,
- };
-
-And the following libbpf API calls:
-
- bpf_map__set_autocreate(skel->maps.A, true);
- bpf_map__set_autocreate(skel->maps.B, false);
-
-The autoload would be enabled for program 'foo' and disabled for
-program 'bar'.
-
-During load, for each struct_ops program P, referenced from some
-struct_ops map M:
-- set P.autoload = true if M.autocreate is true for some M;
-- set P.autoload = false if M.autocreate is false for all M;
-- don't change P.autoload, if P is not referenced from any map.
-
-Do this after bpf_object__init_kern_struct_ops_maps()
-to make sure that shadow vars assignment is done.
-
-Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
-Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
-Link: https://lore.kernel.org/bpf/20240306104529.6453-9-eddyz87@gmail.com
-Stable-dep-of: 04a94133f1b3 ("libbpf: Don't take direct pointers into BTF data from st_ops")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- tools/lib/bpf/libbpf.c | 43 ++++++++++++++++++++++++++++++++++++++++++
- 1 file changed, 43 insertions(+)
-
-diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
-index c91917868b557..aeed9bc44247b 100644
---- a/tools/lib/bpf/libbpf.c
-+++ b/tools/lib/bpf/libbpf.c
-@@ -1007,6 +1007,48 @@ static bool is_valid_st_ops_program(struct bpf_object *obj,
- return false;
- }
-
-+/* For each struct_ops program P, referenced from some struct_ops map M,
-+ * enable P.autoload if there are Ms for which M.autocreate is true,
-+ * disable P.autoload if for all Ms M.autocreate is false.
-+ * Don't change P.autoload for programs that are not referenced from any maps.
-+ */
-+static int bpf_object_adjust_struct_ops_autoload(struct bpf_object *obj)
-+{
-+ struct bpf_program *prog, *slot_prog;
-+ struct bpf_map *map;
-+ int i, j, k, vlen;
-+
-+ for (i = 0; i < obj->nr_programs; ++i) {
-+ int should_load = false;
-+ int use_cnt = 0;
-+
-+ prog = &obj->programs[i];
-+ if (prog->type != BPF_PROG_TYPE_STRUCT_OPS)
-+ continue;
-+
-+ for (j = 0; j < obj->nr_maps; ++j) {
-+ map = &obj->maps[j];
-+ if (!bpf_map__is_struct_ops(map))
-+ continue;
-+
-+ vlen = btf_vlen(map->st_ops->type);
-+ for (k = 0; k < vlen; ++k) {
-+ slot_prog = map->st_ops->progs[k];
-+ if (prog != slot_prog)
-+ continue;
-+
-+ use_cnt++;
-+ if (map->autocreate)
-+ should_load = true;
-+ }
-+ }
-+ if (use_cnt)
-+ prog->autoload = should_load;
-+ }
-+
-+ return 0;
-+}
-+
- /* Init the map's fields that depend on kern_btf */
- static int bpf_map__init_kern_struct_ops(struct bpf_map *map)
- {
-@@ -8003,6 +8045,7 @@ static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const ch
- err = err ? : bpf_object__sanitize_and_load_btf(obj);
- err = err ? : bpf_object__sanitize_maps(obj);
- err = err ? : bpf_object__init_kern_struct_ops_maps(obj);
-+ err = err ? : bpf_object_adjust_struct_ops_autoload(obj);
- err = err ? : bpf_object__relocate(obj, obj->btf_custom_path ? : target_btf_path);
- err = err ? : bpf_object__create_maps(obj);
- err = err ? : bpf_object__load_progs(obj, extra_log_level);
---
-2.43.0
-
+++ /dev/null
-From 35dc8c8739effa064664c56473eba2c1761f59d9 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Wed, 3 Jan 2024 17:38:42 -0800
-Subject: libbpf: use stable map placeholder FDs
-
-From: Andrii Nakryiko <andrii@kernel.org>
-
-[ Upstream commit dac645b950ea4fc0896fe46a645365cb8d9ab92b ]
-
-Move map creation to later during BPF object loading by pre-creating
-stable placeholder FDs (utilizing memfd_create()). Use dup2()
-syscall to then atomically make those placeholder FDs point to real
-kernel BPF map objects.
-
-This change allows to delay BPF map creation to after all the BPF
-program relocations. That, in turn, allows to delay BTF finalization and
-loading into kernel to after all the relocations as well. We'll take
-advantage of the latter in subsequent patches to allow libbpf to adjust
-BTF in a way that helps with BPF global function usage.
-
-Clean up a few places where we close map->fd, which now shouldn't
-happen, because map->fd should be a valid FD regardless of whether map
-was created or not. Surprisingly and nicely it simplifies a bunch of
-error handling code. If this change doesn't backfire, I'm tempted to
-pre-create such stable FDs for other entities (progs, maybe even BTF).
-We previously did some manipulations to make gen_loader work with fake
-map FDs, with stable map FDs this hack is not necessary for maps (we
-still have it for BTF, but I left it as is for now).
-
-Acked-by: Jiri Olsa <jolsa@kernel.org>
-Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
-Link: https://lore.kernel.org/r/20240104013847.3875810-5-andrii@kernel.org
-Signed-off-by: Alexei Starovoitov <ast@kernel.org>
-Stable-dep-of: 04a94133f1b3 ("libbpf: Don't take direct pointers into BTF data from st_ops")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- tools/lib/bpf/libbpf.c | 101 ++++++++++++++++++++------------
- tools/lib/bpf/libbpf_internal.h | 14 +++++
- 2 files changed, 77 insertions(+), 38 deletions(-)
-
-diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
-index ceed16a10285a..54f3380010f55 100644
---- a/tools/lib/bpf/libbpf.c
-+++ b/tools/lib/bpf/libbpf.c
-@@ -1491,6 +1491,16 @@ static Elf64_Sym *find_elf_var_sym(const struct bpf_object *obj, const char *nam
- return ERR_PTR(-ENOENT);
- }
-
-+static int create_placeholder_fd(void)
-+{
-+ int fd;
-+
-+ fd = ensure_good_fd(memfd_create("libbpf-placeholder-fd", MFD_CLOEXEC));
-+ if (fd < 0)
-+ return -errno;
-+ return fd;
-+}
-+
- static struct bpf_map *bpf_object__add_map(struct bpf_object *obj)
- {
- struct bpf_map *map;
-@@ -1503,7 +1513,21 @@ static struct bpf_map *bpf_object__add_map(struct bpf_object *obj)
-
- map = &obj->maps[obj->nr_maps++];
- map->obj = obj;
-- map->fd = -1;
-+ /* Preallocate map FD without actually creating BPF map just yet.
-+ * These map FD "placeholders" will be reused later without changing
-+ * FD value when map is actually created in the kernel.
-+ *
-+ * This is useful to be able to perform BPF program relocations
-+ * without having to create BPF maps before that step. This allows us
-+ * to finalize and load BTF very late in BPF object's loading phase,
-+ * right before BPF maps have to be created and BPF programs have to
-+ * be loaded. By having these map FD placeholders we can perform all
-+ * the sanitizations, relocations, and any other adjustments before we
-+ * start creating actual BPF kernel objects (BTF, maps, progs).
-+ */
-+ map->fd = create_placeholder_fd();
-+ if (map->fd < 0)
-+ return ERR_PTR(map->fd);
- map->inner_map_fd = -1;
- map->autocreate = true;
-
-@@ -2595,7 +2619,9 @@ static int bpf_object__init_user_btf_map(struct bpf_object *obj,
- map->inner_map = calloc(1, sizeof(*map->inner_map));
- if (!map->inner_map)
- return -ENOMEM;
-- map->inner_map->fd = -1;
-+ map->inner_map->fd = create_placeholder_fd();
-+ if (map->inner_map->fd < 0)
-+ return map->inner_map->fd;
- map->inner_map->sec_idx = sec_idx;
- map->inner_map->name = malloc(strlen(map_name) + sizeof(".inner") + 1);
- if (!map->inner_map->name)
-@@ -4446,14 +4472,12 @@ int bpf_map__reuse_fd(struct bpf_map *map, int fd)
- goto err_free_new_name;
- }
-
-- err = zclose(map->fd);
-- if (err) {
-- err = -errno;
-- goto err_close_new_fd;
-- }
-+ err = reuse_fd(map->fd, new_fd);
-+ if (err)
-+ goto err_free_new_name;
-+
- free(map->name);
-
-- map->fd = new_fd;
- map->name = new_name;
- map->def.type = info.type;
- map->def.key_size = info.key_size;
-@@ -4467,8 +4491,6 @@ int bpf_map__reuse_fd(struct bpf_map *map, int fd)
-
- return 0;
-
--err_close_new_fd:
-- close(new_fd);
- err_free_new_name:
- free(new_name);
- return libbpf_err(err);
-@@ -5102,7 +5124,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
- LIBBPF_OPTS(bpf_map_create_opts, create_attr);
- struct bpf_map_def *def = &map->def;
- const char *map_name = NULL;
-- int err = 0;
-+ int err = 0, map_fd;
-
- if (kernel_supports(obj, FEAT_PROG_NAME))
- map_name = map->name;
-@@ -5164,17 +5186,19 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
- bpf_gen__map_create(obj->gen_loader, def->type, map_name,
- def->key_size, def->value_size, def->max_entries,
- &create_attr, is_inner ? -1 : map - obj->maps);
-- /* Pretend to have valid FD to pass various fd >= 0 checks.
-- * This fd == 0 will not be used with any syscall and will be reset to -1 eventually.
-+ /* We keep pretenting we have valid FD to pass various fd >= 0
-+ * checks by just keeping original placeholder FDs in place.
-+ * See bpf_object__add_map() comment.
-+ * This placeholder fd will not be used with any syscall and
-+ * will be reset to -1 eventually.
- */
-- map->fd = 0;
-+ map_fd = map->fd;
- } else {
-- map->fd = bpf_map_create(def->type, map_name,
-- def->key_size, def->value_size,
-- def->max_entries, &create_attr);
-+ map_fd = bpf_map_create(def->type, map_name,
-+ def->key_size, def->value_size,
-+ def->max_entries, &create_attr);
- }
-- if (map->fd < 0 && (create_attr.btf_key_type_id ||
-- create_attr.btf_value_type_id)) {
-+ if (map_fd < 0 && (create_attr.btf_key_type_id || create_attr.btf_value_type_id)) {
- char *cp, errmsg[STRERR_BUFSIZE];
-
- err = -errno;
-@@ -5186,13 +5210,11 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
- create_attr.btf_value_type_id = 0;
- map->btf_key_type_id = 0;
- map->btf_value_type_id = 0;
-- map->fd = bpf_map_create(def->type, map_name,
-- def->key_size, def->value_size,
-- def->max_entries, &create_attr);
-+ map_fd = bpf_map_create(def->type, map_name,
-+ def->key_size, def->value_size,
-+ def->max_entries, &create_attr);
- }
-
-- err = map->fd < 0 ? -errno : 0;
--
- if (bpf_map_type__is_map_in_map(def->type) && map->inner_map) {
- if (obj->gen_loader)
- map->inner_map->fd = -1;
-@@ -5200,7 +5222,19 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
- zfree(&map->inner_map);
- }
-
-- return err;
-+ if (map_fd < 0)
-+ return map_fd;
-+
-+ /* obj->gen_loader case, prevent reuse_fd() from closing map_fd */
-+ if (map->fd == map_fd)
-+ return 0;
-+
-+ /* Keep placeholder FD value but now point it to the BPF map object.
-+ * This way everything that relied on this map's FD (e.g., relocated
-+ * ldimm64 instructions) will stay valid and won't need adjustments.
-+ * map->fd stays valid but now point to what map_fd points to.
-+ */
-+ return reuse_fd(map->fd, map_fd);
- }
-
- static int init_map_in_map_slots(struct bpf_object *obj, struct bpf_map *map)
-@@ -5284,10 +5318,8 @@ static int bpf_object_init_prog_arrays(struct bpf_object *obj)
- continue;
-
- err = init_prog_array_slots(obj, map);
-- if (err < 0) {
-- zclose(map->fd);
-+ if (err < 0)
- return err;
-- }
- }
- return 0;
- }
-@@ -5378,25 +5410,20 @@ bpf_object__create_maps(struct bpf_object *obj)
-
- if (bpf_map__is_internal(map)) {
- err = bpf_object__populate_internal_map(obj, map);
-- if (err < 0) {
-- zclose(map->fd);
-+ if (err < 0)
- goto err_out;
-- }
- }
-
- if (map->init_slots_sz && map->def.type != BPF_MAP_TYPE_PROG_ARRAY) {
- err = init_map_in_map_slots(obj, map);
-- if (err < 0) {
-- zclose(map->fd);
-+ if (err < 0)
- goto err_out;
-- }
- }
- }
-
- if (map->pin_path && !map->pinned) {
- err = bpf_map__pin(map, NULL);
- if (err) {
-- zclose(map->fd);
- if (!retried && err == -EEXIST) {
- retried = true;
- goto retry;
-@@ -7937,8 +7964,8 @@ static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const ch
- err = err ? : bpf_object__sanitize_and_load_btf(obj);
- err = err ? : bpf_object__sanitize_maps(obj);
- err = err ? : bpf_object__init_kern_struct_ops_maps(obj);
-- err = err ? : bpf_object__create_maps(obj);
- err = err ? : bpf_object__relocate(obj, obj->btf_custom_path ? : target_btf_path);
-+ err = err ? : bpf_object__create_maps(obj);
- err = err ? : bpf_object__load_progs(obj, extra_log_level);
- err = err ? : bpf_object_init_prog_arrays(obj);
- err = err ? : bpf_object_prepare_struct_ops(obj);
-@@ -7947,8 +7974,6 @@ static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const ch
- /* reset FDs */
- if (obj->btf)
- btf__set_fd(obj->btf, -1);
-- for (i = 0; i < obj->nr_maps; i++)
-- obj->maps[i].fd = -1;
- if (!err)
- err = bpf_gen__finish(obj->gen_loader, obj->nr_programs, obj->nr_maps);
- }
-diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h
-index 57dec645d6878..ead99497cb157 100644
---- a/tools/lib/bpf/libbpf_internal.h
-+++ b/tools/lib/bpf/libbpf_internal.h
-@@ -569,6 +569,20 @@ static inline int ensure_good_fd(int fd)
- return fd;
- }
-
-+/* Point *fixed_fd* to the same file that *tmp_fd* points to.
-+ * Regardless of success, *tmp_fd* is closed.
-+ * Whatever *fixed_fd* pointed to is closed silently.
-+ */
-+static inline int reuse_fd(int fixed_fd, int tmp_fd)
-+{
-+ int err;
-+
-+ err = dup2(tmp_fd, fixed_fd);
-+ err = err < 0 ? -errno : 0;
-+ close(tmp_fd); /* clean up temporary FD */
-+ return err;
-+}
-+
- /* The following two functions are exposed to bpftool */
- int bpf_core_add_cands(struct bpf_core_cand *local_cand,
- size_t local_essent_len,
---
-2.43.0
-
selftests-bpf-fix-errors-compiling-lwt_redirect.c-wi.patch
selftests-bpf-fix-errors-compiling-decap_sanity.c-wi.patch
selftests-bpf-fix-errors-compiling-cg_storage_multi..patch
-libbpf-use-stable-map-placeholder-fds.patch
-libbpf-find-correct-module-btfs-for-struct_ops-maps-.patch
-libbpf-convert-st_ops-data-to-shadow-type.patch
-libbpf-sync-progs-autoload-with-maps-autocreate-for-.patch
-libbpf-don-t-take-direct-pointers-into-btf-data-from.patch
selftests-bpf-fix-arg-parsing-in-veristat-test_progs.patch
selftests-bpf-fix-error-compiling-test_lru_map.c.patch
selftests-bpf-fix-c-compile-error-from-missing-_bool.patch
perf-arm-cmn-fail-dtc-counter-allocation-correctly.patch
iio-magnetometer-ak8975-fix-unexpected-device-error.patch
wifi-brcmfmac-add-linefeed-at-end-of-file.patch
-libbpf-ensure-undefined-bpf_attr-field-stays-0.patch
thunderbolt-send-uevent-after-asymmetric-symmetric-switch.patch
thunderbolt-fix-minimum-allocated-usb-3.x-and-pcie-bandwidth.patch
thunderbolt-fix-null-pointer-dereference-in-tb_port_update_credits.patch