Current return value can be replaced with the drop_reason,
reducing kernel bloat:
$ scripts/bloat-o-meter -t vmlinux.old vmlinux.new
add/remove: 0/2 grow/shrink: 1/11 up/down: 32/-603 (-571)
Function old new delta
tcp_v6_rcv 3135 3167 +32
unix_dgram_sendmsg 1731 1726 -5
netlink_unicast 957 945 -12
netlink_dump 1372 1359 -13
sk_filter_trim_cap 882 858 -24
tcp_v4_rcv 3143 3111 -32
__pfx_tcp_filter 32 - -32
netlink_broadcast_filtered 1633 1595 -38
sock_queue_rcv_skb_reason 126 76 -50
tun_net_xmit 1127 1074 -53
__sk_receive_skb 690 632 -58
udpv6_queue_rcv_one_skb 935 869 -66
udp_queue_rcv_one_skb 919 853 -66
tcp_filter 154 - -154
Total: Before=
29722783, After=
29722212, chg -0.00%
Signed-off-by: Eric Dumazet <edumazet@google.com>
Link: https://patch.msgid.link/20260409145625.2306224-6-edumazet@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
return set_memory_rox((unsigned long)hdr, hdr->size >> PAGE_SHIFT);
}
-int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap,
- enum skb_drop_reason *reason);
+enum skb_drop_reason
+sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap);
static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
{
- enum skb_drop_reason ignore_reason;
+ enum skb_drop_reason drop_reason;
- return sk_filter_trim_cap(sk, skb, 1, &ignore_reason);
+ drop_reason = sk_filter_trim_cap(sk, skb, 1);
+ return drop_reason ? -EPERM : 0;
}
static inline enum skb_drop_reason
sk_filter_reason(struct sock *sk, struct sk_buff *skb)
{
- enum skb_drop_reason drop_reason;
-
- sk_filter_trim_cap(sk, skb, 1, &drop_reason);
- return drop_reason;
+ return sk_filter_trim_cap(sk, skb, 1);
}
struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err);
tcp_filter(struct sock *sk, struct sk_buff *skb)
{
const struct tcphdr *th = (const struct tcphdr *)skb->data;
- enum skb_drop_reason reason;
- sk_filter_trim_cap(sk, skb, __tcp_hdrlen(th), &reason);
- return reason;
+ return sk_filter_trim_cap(sk, skb, __tcp_hdrlen(th));
}
void tcp_set_state(struct sock *sk, int state);
* @sk: sock associated with &sk_buff
* @skb: buffer to filter
* @cap: limit on how short the eBPF program may trim the packet
- * @reason: record drop reason
*
* Run the eBPF program and then cut skb->data to correct size returned by
* the program. If pkt_len is 0 we toss packet. If skb->len is smaller
* than pkt_len we keep whole skb->data. This is the socket level
* wrapper to bpf_prog_run. It returns 0 if the packet should
- * be accepted or -EPERM if the packet should be tossed.
+ * be accepted or a drop_reason if the packet should be tossed.
*
*/
-int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb,
- unsigned int cap, enum skb_drop_reason *reason)
+enum skb_drop_reason
+sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap)
{
- int err;
+ enum skb_drop_reason drop_reason;
struct sk_filter *filter;
+ int err;
/*
* If the skb was allocated from pfmemalloc reserves, only
*/
if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC)) {
NET_INC_STATS(sock_net(sk), LINUX_MIB_PFMEMALLOCDROP);
- *reason = SKB_DROP_REASON_PFMEMALLOC;
- return -ENOMEM;
+ return SKB_DROP_REASON_PFMEMALLOC;
}
err = BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb);
- if (err) {
- *reason = SKB_DROP_REASON_SOCKET_FILTER;
- return err;
- }
+ if (err)
+ return SKB_DROP_REASON_SOCKET_FILTER;
err = security_sock_rcv_skb(sk, skb);
- if (err) {
- *reason = SKB_DROP_REASON_SECURITY_HOOK;
- return err;
- }
+ if (err)
+ return SKB_DROP_REASON_SECURITY_HOOK;
+ drop_reason = 0;
rcu_read_lock();
filter = rcu_dereference(sk->sk_filter);
if (filter) {
pkt_len = bpf_prog_run_save_cb(filter->prog, skb);
skb->sk = save_sk;
err = pkt_len ? pskb_trim(skb, max(cap, pkt_len)) : -EPERM;
+ if (err)
+ drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
}
rcu_read_unlock();
- *reason = err ? SKB_DROP_REASON_SOCKET_FILTER : 0;
- return err;
+ return drop_reason;
}
EXPORT_SYMBOL(sk_filter_trim_cap);
int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
const int nested, unsigned int trim_cap, bool refcounted)
{
- enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED;
+ enum skb_drop_reason reason;
int rc = NET_RX_SUCCESS;
int err;
- if (sk_filter_trim_cap(sk, skb, trim_cap, &reason))
+ reason = sk_filter_trim_cap(sk, skb, trim_cap);
+ if (reason)
goto discard_and_relse;
skb->dev = NULL;
udp_lib_checksum_complete(skb))
goto csum_error;
- if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr), &drop_reason))
+ drop_reason = sk_filter_trim_cap(sk, skb, sizeof(struct udphdr));
+ if (drop_reason)
goto drop;
udp_csum_pull_header(skb);
udp_lib_checksum_complete(skb))
goto csum_error;
- if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr), &drop_reason))
+ drop_reason = sk_filter_trim_cap(sk, skb, sizeof(struct udphdr));
+ if (drop_reason)
goto drop;
udp_csum_pull_header(skb);
*/
static int rose_state3_machine(struct sock *sk, struct sk_buff *skb, int frametype, int ns, int nr, int q, int d, int m)
{
- enum skb_drop_reason dr; /* ignored */
struct rose_sock *rose = rose_sk(sk);
int queued = 0;
rose_frames_acked(sk, nr);
if (ns == rose->vr) {
rose_start_idletimer(sk);
- if (!sk_filter_trim_cap(sk, skb, ROSE_MIN_LEN, &dr) &&
+ if (!sk_filter_trim_cap(sk, skb, ROSE_MIN_LEN) &&
__sock_queue_rcv_skb(sk, skb) == 0) {
rose->vr = (rose->vr + 1) % ROSE_MODULUS;
queued = 1;