]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - net/bpf/test_run.c
Merge branches 'clk-ingenic', 'clk-mtk-mux', 'clk-qcom-sdm845-pcie', 'clk-mtk-crit...
[thirdparty/kernel/stable.git] / net / bpf / test_run.c
1 /* Copyright (c) 2017 Facebook
2 *
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
6 */
7 #include <linux/bpf.h>
8 #include <linux/slab.h>
9 #include <linux/vmalloc.h>
10 #include <linux/etherdevice.h>
11 #include <linux/filter.h>
12 #include <linux/sched/signal.h>
13 #include <net/sock.h>
14 #include <net/tcp.h>
15
16 static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
17 u32 *retval, u32 *time)
18 {
19 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { 0 };
20 enum bpf_cgroup_storage_type stype;
21 u64 time_start, time_spent = 0;
22 int ret = 0;
23 u32 i;
24
25 for_each_cgroup_storage_type(stype) {
26 storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
27 if (IS_ERR(storage[stype])) {
28 storage[stype] = NULL;
29 for_each_cgroup_storage_type(stype)
30 bpf_cgroup_storage_free(storage[stype]);
31 return -ENOMEM;
32 }
33 }
34
35 if (!repeat)
36 repeat = 1;
37
38 rcu_read_lock();
39 preempt_disable();
40 time_start = ktime_get_ns();
41 for (i = 0; i < repeat; i++) {
42 bpf_cgroup_storage_set(storage);
43 *retval = BPF_PROG_RUN(prog, ctx);
44
45 if (signal_pending(current)) {
46 ret = -EINTR;
47 break;
48 }
49
50 if (need_resched()) {
51 time_spent += ktime_get_ns() - time_start;
52 preempt_enable();
53 rcu_read_unlock();
54
55 cond_resched();
56
57 rcu_read_lock();
58 preempt_disable();
59 time_start = ktime_get_ns();
60 }
61 }
62 time_spent += ktime_get_ns() - time_start;
63 preempt_enable();
64 rcu_read_unlock();
65
66 do_div(time_spent, repeat);
67 *time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
68
69 for_each_cgroup_storage_type(stype)
70 bpf_cgroup_storage_free(storage[stype]);
71
72 return ret;
73 }
74
75 static int bpf_test_finish(const union bpf_attr *kattr,
76 union bpf_attr __user *uattr, const void *data,
77 u32 size, u32 retval, u32 duration)
78 {
79 void __user *data_out = u64_to_user_ptr(kattr->test.data_out);
80 int err = -EFAULT;
81 u32 copy_size = size;
82
83 /* Clamp copy if the user has provided a size hint, but copy the full
84 * buffer if not to retain old behaviour.
85 */
86 if (kattr->test.data_size_out &&
87 copy_size > kattr->test.data_size_out) {
88 copy_size = kattr->test.data_size_out;
89 err = -ENOSPC;
90 }
91
92 if (data_out && copy_to_user(data_out, data, copy_size))
93 goto out;
94 if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size)))
95 goto out;
96 if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
97 goto out;
98 if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration)))
99 goto out;
100 if (err != -ENOSPC)
101 err = 0;
102 out:
103 return err;
104 }
105
106 static void *bpf_test_init(const union bpf_attr *kattr, u32 size,
107 u32 headroom, u32 tailroom)
108 {
109 void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
110 void *data;
111
112 if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
113 return ERR_PTR(-EINVAL);
114
115 data = kzalloc(size + headroom + tailroom, GFP_USER);
116 if (!data)
117 return ERR_PTR(-ENOMEM);
118
119 if (copy_from_user(data + headroom, data_in, size)) {
120 kfree(data);
121 return ERR_PTR(-EFAULT);
122 }
123 return data;
124 }
125
126 int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
127 union bpf_attr __user *uattr)
128 {
129 bool is_l2 = false, is_direct_pkt_access = false;
130 u32 size = kattr->test.data_size_in;
131 u32 repeat = kattr->test.repeat;
132 u32 retval, duration;
133 int hh_len = ETH_HLEN;
134 struct sk_buff *skb;
135 struct sock *sk;
136 void *data;
137 int ret;
138
139 data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
140 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
141 if (IS_ERR(data))
142 return PTR_ERR(data);
143
144 switch (prog->type) {
145 case BPF_PROG_TYPE_SCHED_CLS:
146 case BPF_PROG_TYPE_SCHED_ACT:
147 is_l2 = true;
148 /* fall through */
149 case BPF_PROG_TYPE_LWT_IN:
150 case BPF_PROG_TYPE_LWT_OUT:
151 case BPF_PROG_TYPE_LWT_XMIT:
152 is_direct_pkt_access = true;
153 break;
154 default:
155 break;
156 }
157
158 sk = kzalloc(sizeof(struct sock), GFP_USER);
159 if (!sk) {
160 kfree(data);
161 return -ENOMEM;
162 }
163 sock_net_set(sk, current->nsproxy->net_ns);
164 sock_init_data(NULL, sk);
165
166 skb = build_skb(data, 0);
167 if (!skb) {
168 kfree(data);
169 kfree(sk);
170 return -ENOMEM;
171 }
172 skb->sk = sk;
173
174 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
175 __skb_put(skb, size);
176 skb->protocol = eth_type_trans(skb, current->nsproxy->net_ns->loopback_dev);
177 skb_reset_network_header(skb);
178
179 if (is_l2)
180 __skb_push(skb, hh_len);
181 if (is_direct_pkt_access)
182 bpf_compute_data_pointers(skb);
183 ret = bpf_test_run(prog, skb, repeat, &retval, &duration);
184 if (ret) {
185 kfree_skb(skb);
186 kfree(sk);
187 return ret;
188 }
189 if (!is_l2) {
190 if (skb_headroom(skb) < hh_len) {
191 int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
192
193 if (pskb_expand_head(skb, nhead, 0, GFP_USER)) {
194 kfree_skb(skb);
195 kfree(sk);
196 return -ENOMEM;
197 }
198 }
199 memset(__skb_push(skb, hh_len), 0, hh_len);
200 }
201
202 size = skb->len;
203 /* bpf program can never convert linear skb to non-linear */
204 if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
205 size = skb_headlen(skb);
206 ret = bpf_test_finish(kattr, uattr, skb->data, size, retval, duration);
207 kfree_skb(skb);
208 kfree(sk);
209 return ret;
210 }
211
212 int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
213 union bpf_attr __user *uattr)
214 {
215 u32 size = kattr->test.data_size_in;
216 u32 repeat = kattr->test.repeat;
217 struct netdev_rx_queue *rxqueue;
218 struct xdp_buff xdp = {};
219 u32 retval, duration;
220 void *data;
221 int ret;
222
223 data = bpf_test_init(kattr, size, XDP_PACKET_HEADROOM + NET_IP_ALIGN, 0);
224 if (IS_ERR(data))
225 return PTR_ERR(data);
226
227 xdp.data_hard_start = data;
228 xdp.data = data + XDP_PACKET_HEADROOM + NET_IP_ALIGN;
229 xdp.data_meta = xdp.data;
230 xdp.data_end = xdp.data + size;
231
232 rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
233 xdp.rxq = &rxqueue->xdp_rxq;
234
235 ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration);
236 if (ret)
237 goto out;
238 if (xdp.data != data + XDP_PACKET_HEADROOM + NET_IP_ALIGN ||
239 xdp.data_end != xdp.data + size)
240 size = xdp.data_end - xdp.data;
241 ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration);
242 out:
243 kfree(data);
244 return ret;
245 }