]> git.ipfire.org Git - thirdparty/linux.git/blame - net/ipv4/esp4_offload.c
Merge tag 'x86-fpu-2020-06-01' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
[thirdparty/linux.git] / net / ipv4 / esp4_offload.c
CommitLineData
75a6faf6 1// SPDX-License-Identifier: GPL-2.0-only
7785bba2
SK
2/*
3 * IPV4 GSO/GRO offload support
4 * Linux INET implementation
5 *
6 * Copyright (C) 2016 secunet Security Networks AG
7 * Author: Steffen Klassert <steffen.klassert@secunet.com>
8 *
7785bba2
SK
9 * ESP GRO support
10 */
11
12#include <linux/skbuff.h>
13#include <linux/init.h>
14#include <net/protocol.h>
15#include <crypto/aead.h>
16#include <crypto/authenc.h>
17#include <linux/err.h>
18#include <linux/module.h>
19#include <net/ip.h>
20#include <net/xfrm.h>
21#include <net/esp.h>
22#include <linux/scatterlist.h>
23#include <linux/kernel.h>
24#include <linux/slab.h>
25#include <linux/spinlock.h>
26#include <net/udp.h>
27
d4546c25
DM
28static struct sk_buff *esp4_gro_receive(struct list_head *head,
29 struct sk_buff *skb)
7785bba2
SK
30{
31 int offset = skb_gro_offset(skb);
32 struct xfrm_offload *xo;
33 struct xfrm_state *x;
34 __be32 seq;
35 __be32 spi;
36 int err;
37
374d1b5a
SK
38 if (!pskb_pull(skb, offset))
39 return NULL;
7785bba2
SK
40
41 if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0)
42 goto out;
43
bcd1f8a4
SK
44 xo = xfrm_offload(skb);
45 if (!xo || !(xo->flags & CRYPTO_DONE)) {
0ca64da1
FW
46 struct sec_path *sp = secpath_set(skb);
47
48 if (!sp)
bcd1f8a4 49 goto out;
7785bba2 50
0ca64da1 51 if (sp->len == XFRM_MAX_DEPTH)
6ed69184 52 goto out_reset;
7785bba2 53
bcd1f8a4
SK
54 x = xfrm_state_lookup(dev_net(skb->dev), skb->mark,
55 (xfrm_address_t *)&ip_hdr(skb)->daddr,
56 spi, IPPROTO_ESP, AF_INET);
57 if (!x)
6ed69184 58 goto out_reset;
7785bba2 59
4e4362d2
UW
60 skb->mark = xfrm_smark_get(skb->mark, x);
61
0ca64da1
FW
62 sp->xvec[sp->len++] = x;
63 sp->olen++;
7785bba2 64
bcd1f8a4 65 xo = xfrm_offload(skb);
db87668a 66 if (!xo)
6ed69184 67 goto out_reset;
7785bba2 68 }
bcd1f8a4 69
7785bba2
SK
70 xo->flags |= XFRM_GRO;
71
72 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
73 XFRM_SPI_SKB_CB(skb)->family = AF_INET;
74 XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
75 XFRM_SPI_SKB_CB(skb)->seq = seq;
76
77 /* We don't need to handle errors from xfrm_input, it does all
78 * the error handling and frees the resources on error. */
79 xfrm_input(skb, IPPROTO_ESP, spi, -2);
80
81 return ERR_PTR(-EINPROGRESS);
6ed69184
MJ
82out_reset:
83 secpath_reset(skb);
7785bba2
SK
84out:
85 skb_push(skb, offset);
86 NAPI_GRO_CB(skb)->same_flow = 0;
87 NAPI_GRO_CB(skb)->flush = 1;
88
89 return NULL;
90}
91
7862b405
SK
92static void esp4_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
93{
94 struct ip_esp_hdr *esph;
95 struct iphdr *iph = ip_hdr(skb);
96 struct xfrm_offload *xo = xfrm_offload(skb);
97 int proto = iph->protocol;
98
99 skb_push(skb, -skb_network_offset(skb));
100 esph = ip_esp_hdr(skb);
101 *skb_mac_header(skb) = IPPROTO_ESP;
102
103 esph->spi = x->id.spi;
104 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
105
106 xo->proto = proto;
107}
108
7613b92b
FW
109static struct sk_buff *xfrm4_tunnel_gso_segment(struct xfrm_state *x,
110 struct sk_buff *skb,
111 netdev_features_t features)
112{
113 __skb_push(skb, skb->mac_len);
114 return skb_mac_gso_segment(skb, features);
115}
116
117static struct sk_buff *xfrm4_transport_gso_segment(struct xfrm_state *x,
118 struct sk_buff *skb,
119 netdev_features_t features)
120{
121 const struct net_offload *ops;
122 struct sk_buff *segs = ERR_PTR(-EINVAL);
123 struct xfrm_offload *xo = xfrm_offload(skb);
124
125 skb->transport_header += x->props.header_len;
126 ops = rcu_dereference(inet_offloads[xo->proto]);
127 if (likely(ops && ops->callbacks.gso_segment))
128 segs = ops->callbacks.gso_segment(skb, features);
129
130 return segs;
131}
132
384a46ea
XL
133static struct sk_buff *xfrm4_beet_gso_segment(struct xfrm_state *x,
134 struct sk_buff *skb,
135 netdev_features_t features)
136{
137 struct xfrm_offload *xo = xfrm_offload(skb);
138 struct sk_buff *segs = ERR_PTR(-EINVAL);
139 const struct net_offload *ops;
6f297068 140 u8 proto = xo->proto;
384a46ea
XL
141
142 skb->transport_header += x->props.header_len;
143
3ffb93ba
XL
144 if (x->sel.family != AF_INET6) {
145 if (proto == IPPROTO_BEETPH) {
146 struct ip_beet_phdr *ph =
147 (struct ip_beet_phdr *)skb->data;
148
149 skb->transport_header += ph->hdrlen * 8;
150 proto = ph->nexthdr;
151 } else {
152 skb->transport_header -= IPV4_BEET_PHMAXLEN;
153 }
154 } else {
6f297068
XL
155 __be16 frag;
156
157 skb->transport_header +=
158 ipv6_skip_exthdr(skb, 0, &proto, &frag);
159 if (proto == IPPROTO_TCP)
160 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
384a46ea
XL
161 }
162
163 __skb_pull(skb, skb_transport_offset(skb));
164 ops = rcu_dereference(inet_offloads[proto]);
165 if (likely(ops && ops->callbacks.gso_segment))
166 segs = ops->callbacks.gso_segment(skb, features);
167
168 return segs;
169}
170
7613b92b
FW
171static struct sk_buff *xfrm4_outer_mode_gso_segment(struct xfrm_state *x,
172 struct sk_buff *skb,
173 netdev_features_t features)
174{
c9500d7b 175 switch (x->outer_mode.encap) {
7613b92b
FW
176 case XFRM_MODE_TUNNEL:
177 return xfrm4_tunnel_gso_segment(x, skb, features);
178 case XFRM_MODE_TRANSPORT:
179 return xfrm4_transport_gso_segment(x, skb, features);
384a46ea
XL
180 case XFRM_MODE_BEET:
181 return xfrm4_beet_gso_segment(x, skb, features);
7613b92b
FW
182 }
183
184 return ERR_PTR(-EOPNOTSUPP);
185}
186
7862b405
SK
187static struct sk_buff *esp4_gso_segment(struct sk_buff *skb,
188 netdev_features_t features)
189{
7862b405
SK
190 struct xfrm_state *x;
191 struct ip_esp_hdr *esph;
192 struct crypto_aead *aead;
7862b405
SK
193 netdev_features_t esp_features = features;
194 struct xfrm_offload *xo = xfrm_offload(skb);
2294be0f 195 struct sec_path *sp;
7862b405
SK
196
197 if (!xo)
3dca3f38 198 return ERR_PTR(-EINVAL);
7862b405 199
121d57af 200 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP))
5ca11440 201 return ERR_PTR(-EINVAL);
7862b405 202
2294be0f
FW
203 sp = skb_sec_path(skb);
204 x = sp->xvec[sp->len - 1];
7862b405
SK
205 aead = x->data;
206 esph = ip_esp_hdr(skb);
207
208 if (esph->spi != x->id.spi)
3dca3f38 209 return ERR_PTR(-EINVAL);
7862b405
SK
210
211 if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
3dca3f38 212 return ERR_PTR(-EINVAL);
7862b405
SK
213
214 __skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead));
215
216 skb->encap_hdr_csum = 1;
217
65fd2c2a
BP
218 if ((!(skb->dev->gso_partial_features & NETIF_F_HW_ESP) &&
219 !(features & NETIF_F_HW_ESP)) || x->xso.dev != skb->dev)
7862b405 220 esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
65fd2c2a
BP
221 else if (!(features & NETIF_F_HW_ESP_TX_CSUM) &&
222 !(skb->dev->gso_partial_features & NETIF_F_HW_ESP_TX_CSUM))
5211fcfb 223 esp_features = features & ~NETIF_F_CSUM_MASK;
7862b405 224
3dca3f38 225 xo->flags |= XFRM_GSO_SEGMENT;
7862b405 226
7613b92b 227 return xfrm4_outer_mode_gso_segment(x, skb, esp_features);
7862b405
SK
228}
229
fca11ebd
SK
230static int esp_input_tail(struct xfrm_state *x, struct sk_buff *skb)
231{
232 struct crypto_aead *aead = x->data;
ec9567a9 233 struct xfrm_offload *xo = xfrm_offload(skb);
fca11ebd
SK
234
235 if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead)))
236 return -EINVAL;
237
ec9567a9
IT
238 if (!(xo->flags & CRYPTO_DONE))
239 skb->ip_summed = CHECKSUM_NONE;
fca11ebd
SK
240
241 return esp_input_done2(skb, 0);
242}
243
244static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_t features)
245{
246 int err;
247 int alen;
248 int blksize;
249 struct xfrm_offload *xo;
250 struct ip_esp_hdr *esph;
251 struct crypto_aead *aead;
252 struct esp_info esp;
253 bool hw_offload = true;
3dca3f38 254 __u32 seq;
fca11ebd
SK
255
256 esp.inplace = true;
257
258 xo = xfrm_offload(skb);
259
260 if (!xo)
261 return -EINVAL;
262
65fd2c2a
BP
263 if ((!(features & NETIF_F_HW_ESP) &&
264 !(skb->dev->gso_partial_features & NETIF_F_HW_ESP)) ||
265 x->xso.dev != skb->dev) {
fca11ebd
SK
266 xo->flags |= CRYPTO_FALLBACK;
267 hw_offload = false;
268 }
269
270 esp.proto = xo->proto;
271
272 /* skb is pure payload to encrypt */
273
274 aead = x->data;
275 alen = crypto_aead_authsize(aead);
276
277 esp.tfclen = 0;
278 /* XXX: Add support for tfc padding here. */
279
280 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
281 esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
282 esp.plen = esp.clen - skb->len - esp.tfclen;
283 esp.tailen = esp.tfclen + esp.plen + alen;
284
285 esp.esph = ip_esp_hdr(skb);
286
287
288 if (!hw_offload || (hw_offload && !skb_is_gso(skb))) {
289 esp.nfrags = esp_output_head(x, skb, &esp);
290 if (esp.nfrags < 0)
291 return esp.nfrags;
292 }
293
3dca3f38
SK
294 seq = xo->seq.low;
295
fca11ebd
SK
296 esph = esp.esph;
297 esph->spi = x->id.spi;
298
299 skb_push(skb, -skb_network_offset(skb));
300
301 if (xo->flags & XFRM_GSO_SEGMENT) {
3dca3f38
SK
302 esph->seq_no = htonl(seq);
303
304 if (!skb_is_gso(skb))
305 xo->seq.low++;
306 else
307 xo->seq.low += skb_shinfo(skb)->gso_segs;
fca11ebd
SK
308 }
309
3dca3f38
SK
310 esp.seqno = cpu_to_be64(seq + ((u64)xo->seq.hi << 32));
311
312 ip_hdr(skb)->tot_len = htons(skb->len);
313 ip_send_check(ip_hdr(skb));
314
fca11ebd
SK
315 if (hw_offload)
316 return 0;
317
fca11ebd 318 err = esp_output_tail(x, skb, &esp);
4ff0308f 319 if (err)
fca11ebd
SK
320 return err;
321
322 secpath_reset(skb);
323
324 return 0;
325}
326
7785bba2
SK
327static const struct net_offload esp4_offload = {
328 .callbacks = {
329 .gro_receive = esp4_gro_receive,
7862b405 330 .gso_segment = esp4_gso_segment,
7785bba2
SK
331 },
332};
333
fca11ebd
SK
334static const struct xfrm_type_offload esp_type_offload = {
335 .description = "ESP4 OFFLOAD",
336 .owner = THIS_MODULE,
337 .proto = IPPROTO_ESP,
338 .input_tail = esp_input_tail,
339 .xmit = esp_xmit,
7862b405 340 .encap = esp4_gso_encap,
fca11ebd
SK
341};
342
7785bba2
SK
343static int __init esp4_offload_init(void)
344{
fca11ebd
SK
345 if (xfrm_register_type_offload(&esp_type_offload, AF_INET) < 0) {
346 pr_info("%s: can't add xfrm type offload\n", __func__);
347 return -EAGAIN;
348 }
349
7785bba2
SK
350 return inet_add_offload(&esp4_offload, IPPROTO_ESP);
351}
352
353static void __exit esp4_offload_exit(void)
354{
4f518e80 355 xfrm_unregister_type_offload(&esp_type_offload, AF_INET);
7785bba2
SK
356 inet_del_offload(&esp4_offload, IPPROTO_ESP);
357}
358
359module_init(esp4_offload_init);
360module_exit(esp4_offload_exit);
361MODULE_LICENSE("GPL");
362MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
ffdb5211 363MODULE_ALIAS_XFRM_OFFLOAD_TYPE(AF_INET, XFRM_PROTO_ESP);