BPF_LINK_TYPE_XDP = 6,
BPF_LINK_TYPE_PERF_EVENT = 7,
BPF_LINK_TYPE_KPROBE_MULTI = 8,
++ BPF_LINK_TYPE_STRUCT_OPS = 9,
MAX_BPF_LINK_TYPE,
};
__aligned_u64 addrs;
__aligned_u64 cookies;
} kprobe_multi;
++ struct {
++ /* this is overlaid with the target_btf_id above. */
++ __u32 target_btf_id;
++ /* black box user-provided value passed through
++ * to BPF program at the execution time and
++ * accessible through bpf_get_attach_cookie() BPF helper
++ */
++ __u64 cookie;
++ } tracing;
};
} link_create;
* The **hash_algo** is returned on success,
* **-EOPNOTSUP** if the hash calculation failed or **-EINVAL** if
* invalid arguments are passed.
+ *
+ * void *bpf_kptr_xchg(void *map_value, void *ptr)
+ * Description
+ * Exchange kptr at pointer *map_value* with *ptr*, and return the
+ * old value. *ptr* can be NULL, otherwise it must be a referenced
+ * pointer which will be released when this helper is called.
+ * Return
+ * The old value of kptr (which can be NULL). The returned pointer
+ * if not NULL, is a reference which must be released using its
+ * corresponding release function, or moved into a BPF map before
+ * program exit.
++ *
++ * void *bpf_map_lookup_percpu_elem(struct bpf_map *map, const void *key, u32 cpu)
++ * Description
++ * Perform a lookup in *percpu map* for an entry associated to
++ * *key* on *cpu*.
++ * Return
++ * Map value associated to *key* on *cpu*, or **NULL** if no entry
++ * was found or *cpu* is invalid.
++ *
++ * struct mptcp_sock *bpf_skc_to_mptcp_sock(void *sk)
++ * Description
++ * Dynamically cast a *sk* pointer to a *mptcp_sock* pointer.
++ * Return
++ * *sk* if casting is valid, or **NULL** otherwise.
++ *
++ * long bpf_dynptr_from_mem(void *data, u32 size, u64 flags, struct bpf_dynptr *ptr)
++ * Description
++ * Get a dynptr to local memory *data*.
++ *
++ * *data* must be a ptr to a map value.
++ * The maximum *size* supported is DYNPTR_MAX_SIZE.
++ * *flags* is currently unused.
++ * Return
++ * 0 on success, -E2BIG if the size exceeds DYNPTR_MAX_SIZE,
++ * -EINVAL if flags is not 0.
++ *
++ * long bpf_ringbuf_reserve_dynptr(void *ringbuf, u32 size, u64 flags, struct bpf_dynptr *ptr)
++ * Description
++ * Reserve *size* bytes of payload in a ring buffer *ringbuf*
++ * through the dynptr interface. *flags* must be 0.
++ *
++ * Please note that a corresponding bpf_ringbuf_submit_dynptr or
++ * bpf_ringbuf_discard_dynptr must be called on *ptr*, even if the
++ * reservation fails. This is enforced by the verifier.
++ * Return
++ * 0 on success, or a negative error in case of failure.
++ *
++ * void bpf_ringbuf_submit_dynptr(struct bpf_dynptr *ptr, u64 flags)
++ * Description
++ * Submit reserved ring buffer sample, pointed to by *data*,
++ * through the dynptr interface. This is a no-op if the dynptr is
++ * invalid/null.
++ *
++ * For more information on *flags*, please see
++ * 'bpf_ringbuf_submit'.
++ * Return
++ * Nothing. Always succeeds.
++ *
++ * void bpf_ringbuf_discard_dynptr(struct bpf_dynptr *ptr, u64 flags)
++ * Description
++ * Discard reserved ring buffer sample through the dynptr
++ * interface. This is a no-op if the dynptr is invalid/null.
++ *
++ * For more information on *flags*, please see
++ * 'bpf_ringbuf_discard'.
++ * Return
++ * Nothing. Always succeeds.
++ *
++ * long bpf_dynptr_read(void *dst, u32 len, struct bpf_dynptr *src, u32 offset)
++ * Description
++ * Read *len* bytes from *src* into *dst*, starting from *offset*
++ * into *src*.
++ * Return
++ * 0 on success, -E2BIG if *offset* + *len* exceeds the length
++ * of *src*'s data, -EINVAL if *src* is an invalid dynptr.
++ *
++ * long bpf_dynptr_write(struct bpf_dynptr *dst, u32 offset, void *src, u32 len)
++ * Description
++ * Write *len* bytes from *src* into *dst*, starting from *offset*
++ * into *dst*.
++ * Return
++ * 0 on success, -E2BIG if *offset* + *len* exceeds the length
++ * of *dst*'s data, -EINVAL if *dst* is an invalid dynptr or if *dst*
++ * is a read-only dynptr.
++ *
++ * void *bpf_dynptr_data(struct bpf_dynptr *ptr, u32 offset, u32 len)
++ * Description
++ * Get a pointer to the underlying dynptr data.
++ *
++ * *len* must be a statically known value. The returned data slice
++ * is invalidated whenever the dynptr is invalidated.
++ * Return
++ * Pointer to the underlying dynptr data, NULL if the dynptr is
++ * read-only, if the dynptr is invalid, or if the offset and length
++ * is out of bounds.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
FN(copy_from_user_task), \
FN(skb_set_tstamp), \
FN(ima_file_hash), \
+ FN(kptr_xchg), \
++ FN(map_lookup_percpu_elem), \
++ FN(skc_to_mptcp_sock), \
++ FN(dynptr_from_mem), \
++ FN(ringbuf_reserve_dynptr), \
++ FN(ringbuf_submit_dynptr), \
++ FN(ringbuf_discard_dynptr), \
++ FN(dynptr_read), \
++ FN(dynptr_write), \
++ FN(dynptr_data), \
/* */
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
__u8 tunnel_ttl;
__u16 tunnel_ext; /* Padding, future use. */
__u32 tunnel_label;
++ union {
++ __u32 local_ipv4;
++ __u32 local_ipv6[4];
++ };
};
/* user accessible mirror of in-kernel xfrm_state.
__u64 :64;
} __attribute__((aligned(8)));
++struct bpf_dynptr {
++ __u64 :64;
++ __u64 :64;
++} __attribute__((aligned(8)));
++
struct bpf_sysctl {
__u32 write; /* Sysctl is being read (= 0) or written (= 1).
* Allows 1,2,4-byte read, but no write.