]> git.ipfire.org Git - thirdparty/iproute2.git/commitdiff
uapi: update from 6.1 pre rc1
authorStephen Hemminger <stephen@networkplumber.org>
Tue, 11 Oct 2022 14:17:28 +0000 (07:17 -0700)
committerStephen Hemminger <stephen@networkplumber.org>
Tue, 11 Oct 2022 14:17:52 +0000 (07:17 -0700)
Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
include/uapi/linux/bpf.h
vdpa/include/uapi/linux/vdpa.h

index c50fc66fc5b08d23242ed4dd3a3058623d84742e..5cad2be209e07d7bdf51b6c77a3a58a599fe2836 100644 (file)
@@ -110,6 +110,12 @@ union bpf_iter_link_info {
                __u32   cgroup_fd;
                __u64   cgroup_id;
        } cgroup;
+       /* Parameters of task iterators. */
+       struct {
+               __u32   tid;
+               __u32   pid;
+               __u32   pid_fd;
+       } task;
 };
 
 /* BPF syscall commands, see bpf(2) man-page for more details. */
@@ -928,6 +934,7 @@ enum bpf_map_type {
        BPF_MAP_TYPE_INODE_STORAGE,
        BPF_MAP_TYPE_TASK_STORAGE,
        BPF_MAP_TYPE_BLOOM_FILTER,
+       BPF_MAP_TYPE_USER_RINGBUF,
 };
 
 /* Note that tracing related programs such as
@@ -1252,7 +1259,7 @@ enum {
 
 /* Query effective (directly attached + inherited from ancestor cgroups)
  * programs that will be executed for events within a cgroup.
- * attach_flags with this flag are returned only for directly attached programs.
+ * attach_flags with this flag are always returned 0.
  */
 #define BPF_F_QUERY_EFFECTIVE  (1U << 0)
 
@@ -1451,7 +1458,10 @@ union bpf_attr {
                __u32           attach_flags;
                __aligned_u64   prog_ids;
                __u32           prog_cnt;
-               __aligned_u64   prog_attach_flags; /* output: per-program attach_flags */
+               /* output: per-program attach_flags.
+                * not allowed to be set during effective query.
+                */
+               __aligned_u64   prog_attach_flags;
        } query;
 
        struct { /* anonymous struct used by BPF_RAW_TRACEPOINT_OPEN command */
@@ -4950,6 +4960,7 @@ union bpf_attr {
  *             Get address of the traced function (for tracing and kprobe programs).
  *     Return
  *             Address of the traced function.
+ *             0 for kprobes placed within the function (not at the entry).
  *
  * u64 bpf_get_attach_cookie(void *ctx)
  *     Description
@@ -5079,12 +5090,12 @@ union bpf_attr {
  *
  * long bpf_get_func_arg(void *ctx, u32 n, u64 *value)
  *     Description
- *             Get **n**-th argument (zero based) of the traced function (for tracing programs)
+ *             Get **n**-th argument register (zero based) of the traced function (for tracing programs)
  *             returned in **value**.
  *
  *     Return
  *             0 on success.
- *             **-EINVAL** if n >= arguments count of traced function.
+ *             **-EINVAL** if n >= argument register count of traced function.
  *
  * long bpf_get_func_ret(void *ctx, u64 *value)
  *     Description
@@ -5097,10 +5108,11 @@ union bpf_attr {
  *
  * long bpf_get_func_arg_cnt(void *ctx)
  *     Description
- *             Get number of arguments of the traced function (for tracing programs).
+ *             Get number of registers of the traced function (for tracing programs) where
+ *             function arguments are stored in these registers.
  *
  *     Return
- *             The number of arguments of the traced function.
+ *             The number of argument registers of the traced function.
  *
  * int bpf_get_retval(void)
  *     Description
@@ -5386,6 +5398,43 @@ union bpf_attr {
  *     Return
  *             Current *ktime*.
  *
+ * long bpf_user_ringbuf_drain(struct bpf_map *map, void *callback_fn, void *ctx, u64 flags)
+ *     Description
+ *             Drain samples from the specified user ring buffer, and invoke
+ *             the provided callback for each such sample:
+ *
+ *             long (\*callback_fn)(struct bpf_dynptr \*dynptr, void \*ctx);
+ *
+ *             If **callback_fn** returns 0, the helper will continue to try
+ *             and drain the next sample, up to a maximum of
+ *             BPF_MAX_USER_RINGBUF_SAMPLES samples. If the return value is 1,
+ *             the helper will skip the rest of the samples and return. Other
+ *             return values are not used now, and will be rejected by the
+ *             verifier.
+ *     Return
+ *             The number of drained samples if no error was encountered while
+ *             draining samples, or 0 if no samples were present in the ring
+ *             buffer. If a user-space producer was epoll-waiting on this map,
+ *             and at least one sample was drained, they will receive an event
+ *             notification notifying them of available space in the ring
+ *             buffer. If the BPF_RB_NO_WAKEUP flag is passed to this
+ *             function, no wakeup notification will be sent. If the
+ *             BPF_RB_FORCE_WAKEUP flag is passed, a wakeup notification will
+ *             be sent even if no sample was drained.
+ *
+ *             On failure, the returned value is one of the following:
+ *
+ *             **-EBUSY** if the ring buffer is contended, and another calling
+ *             context was concurrently draining the ring buffer.
+ *
+ *             **-EINVAL** if user-space is not properly tracking the ring
+ *             buffer due to the producer position not being aligned to 8
+ *             bytes, a sample not being aligned to 8 bytes, or the producer
+ *             position not matching the advertised length of a sample.
+ *
+ *             **-E2BIG** if user-space has tried to publish a sample which is
+ *             larger than the size of the ring buffer, or which cannot fit
+ *             within a struct bpf_dynptr.
  */
 #define __BPF_FUNC_MAPPER(FN)          \
        FN(unspec),                     \
@@ -5597,6 +5646,7 @@ union bpf_attr {
        FN(tcp_raw_check_syncookie_ipv4),       \
        FN(tcp_raw_check_syncookie_ipv6),       \
        FN(ktime_get_tai_ns),           \
+       FN(user_ringbuf_drain),         \
        /* */
 
 /* integer value in 'imm' field of BPF_CALL instruction selects which helper
@@ -6218,6 +6268,10 @@ struct bpf_link_info {
                                        __u64 cgroup_id;
                                        __u32 order;
                                } cgroup;
+                               struct {
+                                       __u32 tid;
+                                       __u32 pid;
+                               } task;
                        };
                } iter;
                struct  {
index 94e4dad1d86c764ac452097e8a25256b432f9a4a..056185228efe998c704a9ee00df133cae8bbe8ea 100644 (file)
@@ -46,12 +46,18 @@ enum vdpa_attr {
 
        VDPA_ATTR_DEV_NEGOTIATED_FEATURES,      /* u64 */
        VDPA_ATTR_DEV_MGMTDEV_MAX_VQS,          /* u32 */
+       /* virtio features that are supported by the vDPA management device */
        VDPA_ATTR_DEV_SUPPORTED_FEATURES,       /* u64 */
 
        VDPA_ATTR_DEV_QUEUE_INDEX,              /* u32 */
        VDPA_ATTR_DEV_VENDOR_ATTR_NAME,         /* string */
        VDPA_ATTR_DEV_VENDOR_ATTR_VALUE,        /* u64 */
 
+       VDPA_ATTR_DEV_FEATURES,                 /* u64 */
+
+       /* virtio features that are supported by the vDPA device */
+       VDPA_ATTR_VDPA_DEV_SUPPORTED_FEATURES,  /* u64 */
+
        /* new attributes must be added above here */
        VDPA_ATTR_MAX,
 };