__u8 sqe_flags;
__u8 pdu_size; /* size of aux data for filter */
__u8 pad[5];
+ union {
+ struct {
+ __u32 family;
+ __u32 type;
+ __u32 protocol;
+ } socket;
+ };
};
enum {
/* clear residual, anything from pdu_size and below */
memset((void *) bctx + offsetof(struct io_uring_bpf_ctx, pdu_size), 0,
sizeof(*bctx) - offsetof(struct io_uring_bpf_ctx, pdu_size));
+
+ /*
+ * Opcodes can provide a handler fo populating more data into bctx,
+ * for filters to use.
+ */
+ switch (req->opcode) {
+ case IORING_OP_SOCKET:
+ bctx->pdu_size = sizeof(bctx->socket);
+ io_socket_bpf_populate(bctx, req);
+ break;
+ }
}
/*
return IOU_COMPLETE;
}
+void io_socket_bpf_populate(struct io_uring_bpf_ctx *bctx, struct io_kiocb *req)
+{
+ struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
+
+ bctx->socket.family = sock->domain;
+ bctx->socket.type = sock->type;
+ bctx->socket.protocol = sock->protocol;
+}
+
int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
#include <linux/net.h>
#include <linux/uio.h>
#include <linux/io_uring_types.h>
+#include <uapi/linux/io_uring/bpf_filter.h>
struct io_async_msghdr {
#if defined(CONFIG_NET)
int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
int io_socket(struct io_kiocb *req, unsigned int issue_flags);
+void io_socket_bpf_populate(struct io_uring_bpf_ctx *bctx, struct io_kiocb *req);
int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
int io_connect(struct io_kiocb *req, unsigned int issue_flags);
static inline void io_netmsg_cache_free(const void *entry)
{
}
+static inline void io_socket_bpf_populate(struct io_uring_bpf_ctx *bctx,
+ struct io_kiocb *req)
+{
+}
#endif