]> git.ipfire.org Git - thirdparty/linux.git/blame - drivers/net/wireguard/send.c
Merge tag 'io_uring-5.7-2020-05-22' of git://git.kernel.dk/linux-block
[thirdparty/linux.git] / drivers / net / wireguard / send.c
CommitLineData
e7096c13
JD
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
4 */
5
6#include "queueing.h"
7#include "timers.h"
8#include "device.h"
9#include "peer.h"
10#include "socket.h"
11#include "messages.h"
12#include "cookie.h"
13
14#include <linux/uio.h>
15#include <linux/inetdevice.h>
16#include <linux/socket.h>
17#include <net/ip_tunnels.h>
18#include <net/udp.h>
19#include <net/sock.h>
20
21static void wg_packet_send_handshake_initiation(struct wg_peer *peer)
22{
23 struct message_handshake_initiation packet;
24
25 if (!wg_birthdate_has_expired(atomic64_read(&peer->last_sent_handshake),
26 REKEY_TIMEOUT))
27 return; /* This function is rate limited. */
28
29 atomic64_set(&peer->last_sent_handshake, ktime_get_coarse_boottime_ns());
30 net_dbg_ratelimited("%s: Sending handshake initiation to peer %llu (%pISpfsc)\n",
31 peer->device->dev->name, peer->internal_id,
32 &peer->endpoint.addr);
33
34 if (wg_noise_handshake_create_initiation(&packet, &peer->handshake)) {
35 wg_cookie_add_mac_to_packet(&packet, sizeof(packet), peer);
36 wg_timers_any_authenticated_packet_traversal(peer);
37 wg_timers_any_authenticated_packet_sent(peer);
38 atomic64_set(&peer->last_sent_handshake,
39 ktime_get_coarse_boottime_ns());
40 wg_socket_send_buffer_to_peer(peer, &packet, sizeof(packet),
41 HANDSHAKE_DSCP);
42 wg_timers_handshake_initiated(peer);
43 }
44}
45
46void wg_packet_handshake_send_worker(struct work_struct *work)
47{
48 struct wg_peer *peer = container_of(work, struct wg_peer,
49 transmit_handshake_work);
50
51 wg_packet_send_handshake_initiation(peer);
52 wg_peer_put(peer);
53}
54
55void wg_packet_send_queued_handshake_initiation(struct wg_peer *peer,
56 bool is_retry)
57{
58 if (!is_retry)
59 peer->timer_handshake_attempts = 0;
60
61 rcu_read_lock_bh();
62 /* We check last_sent_handshake here in addition to the actual function
63 * we're queueing up, so that we don't queue things if not strictly
64 * necessary:
65 */
66 if (!wg_birthdate_has_expired(atomic64_read(&peer->last_sent_handshake),
67 REKEY_TIMEOUT) ||
68 unlikely(READ_ONCE(peer->is_dead)))
69 goto out;
70
71 wg_peer_get(peer);
72 /* Queues up calling packet_send_queued_handshakes(peer), where we do a
73 * peer_put(peer) after:
74 */
75 if (!queue_work(peer->device->handshake_send_wq,
76 &peer->transmit_handshake_work))
77 /* If the work was already queued, we want to drop the
78 * extra reference:
79 */
80 wg_peer_put(peer);
81out:
82 rcu_read_unlock_bh();
83}
84
85void wg_packet_send_handshake_response(struct wg_peer *peer)
86{
87 struct message_handshake_response packet;
88
89 atomic64_set(&peer->last_sent_handshake, ktime_get_coarse_boottime_ns());
90 net_dbg_ratelimited("%s: Sending handshake response to peer %llu (%pISpfsc)\n",
91 peer->device->dev->name, peer->internal_id,
92 &peer->endpoint.addr);
93
94 if (wg_noise_handshake_create_response(&packet, &peer->handshake)) {
95 wg_cookie_add_mac_to_packet(&packet, sizeof(packet), peer);
96 if (wg_noise_handshake_begin_session(&peer->handshake,
97 &peer->keypairs)) {
98 wg_timers_session_derived(peer);
99 wg_timers_any_authenticated_packet_traversal(peer);
100 wg_timers_any_authenticated_packet_sent(peer);
101 atomic64_set(&peer->last_sent_handshake,
102 ktime_get_coarse_boottime_ns());
103 wg_socket_send_buffer_to_peer(peer, &packet,
104 sizeof(packet),
105 HANDSHAKE_DSCP);
106 }
107 }
108}
109
110void wg_packet_send_handshake_cookie(struct wg_device *wg,
111 struct sk_buff *initiating_skb,
112 __le32 sender_index)
113{
114 struct message_handshake_cookie packet;
115
116 net_dbg_skb_ratelimited("%s: Sending cookie response for denied handshake message for %pISpfsc\n",
117 wg->dev->name, initiating_skb);
118 wg_cookie_message_create(&packet, initiating_skb, sender_index,
119 &wg->cookie_checker);
120 wg_socket_send_buffer_as_reply_to_skb(wg, initiating_skb, &packet,
121 sizeof(packet));
122}
123
124static void keep_key_fresh(struct wg_peer *peer)
125{
126 struct noise_keypair *keypair;
243f2148 127 bool send;
e7096c13
JD
128
129 rcu_read_lock_bh();
130 keypair = rcu_dereference_bh(peer->keypairs.current_keypair);
243f2148
JD
131 send = keypair && READ_ONCE(keypair->sending.is_valid) &&
132 (atomic64_read(&keypair->sending.counter.counter) > REKEY_AFTER_MESSAGES ||
133 (keypair->i_am_the_initiator &&
134 wg_birthdate_has_expired(keypair->sending.birthdate, REKEY_AFTER_TIME)));
e7096c13
JD
135 rcu_read_unlock_bh();
136
243f2148 137 if (unlikely(send))
e7096c13
JD
138 wg_packet_send_queued_handshake_initiation(peer, false);
139}
140
141static unsigned int calculate_skb_padding(struct sk_buff *skb)
142{
175f1ca9
JD
143 unsigned int padded_size, last_unit = skb->len;
144
145 if (unlikely(!PACKET_CB(skb)->mtu))
146 return ALIGN(last_unit, MESSAGE_PADDING_MULTIPLE) - last_unit;
147
e7096c13
JD
148 /* We do this modulo business with the MTU, just in case the networking
149 * layer gives us a packet that's bigger than the MTU. In that case, we
150 * wouldn't want the final subtraction to overflow in the case of the
175f1ca9
JD
151 * padded_size being clamped. Fortunately, that's very rarely the case,
152 * so we optimize for that not happening.
e7096c13 153 */
175f1ca9
JD
154 if (unlikely(last_unit > PACKET_CB(skb)->mtu))
155 last_unit %= PACKET_CB(skb)->mtu;
e7096c13 156
175f1ca9
JD
157 padded_size = min(PACKET_CB(skb)->mtu,
158 ALIGN(last_unit, MESSAGE_PADDING_MULTIPLE));
e7096c13
JD
159 return padded_size - last_unit;
160}
161
162static bool encrypt_packet(struct sk_buff *skb, struct noise_keypair *keypair)
163{
164 unsigned int padding_len, plaintext_len, trailer_len;
165 struct scatterlist sg[MAX_SKB_FRAGS + 8];
166 struct message_data *header;
167 struct sk_buff *trailer;
168 int num_frags;
169
170 /* Calculate lengths. */
171 padding_len = calculate_skb_padding(skb);
172 trailer_len = padding_len + noise_encrypted_len(0);
173 plaintext_len = skb->len + padding_len;
174
175 /* Expand data section to have room for padding and auth tag. */
176 num_frags = skb_cow_data(skb, trailer_len, &trailer);
177 if (unlikely(num_frags < 0 || num_frags > ARRAY_SIZE(sg)))
178 return false;
179
180 /* Set the padding to zeros, and make sure it and the auth tag are part
181 * of the skb.
182 */
183 memset(skb_tail_pointer(trailer), 0, padding_len);
184
185 /* Expand head section to have room for our header and the network
186 * stack's headers.
187 */
188 if (unlikely(skb_cow_head(skb, DATA_PACKET_HEAD_ROOM) < 0))
189 return false;
190
191 /* Finalize checksum calculation for the inner packet, if required. */
192 if (unlikely(skb->ip_summed == CHECKSUM_PARTIAL &&
193 skb_checksum_help(skb)))
194 return false;
195
196 /* Only after checksumming can we safely add on the padding at the end
197 * and the header.
198 */
199 skb_set_inner_network_header(skb, 0);
200 header = (struct message_data *)skb_push(skb, sizeof(*header));
201 header->header.type = cpu_to_le32(MESSAGE_DATA);
202 header->key_idx = keypair->remote_index;
203 header->counter = cpu_to_le64(PACKET_CB(skb)->nonce);
204 pskb_put(skb, trailer, trailer_len);
205
206 /* Now we can encrypt the scattergather segments */
207 sg_init_table(sg, num_frags);
208 if (skb_to_sgvec(skb, sg, sizeof(struct message_data),
209 noise_encrypted_len(plaintext_len)) <= 0)
210 return false;
211 return chacha20poly1305_encrypt_sg_inplace(sg, plaintext_len, NULL, 0,
212 PACKET_CB(skb)->nonce,
213 keypair->sending.key);
214}
215
216void wg_packet_send_keepalive(struct wg_peer *peer)
217{
218 struct sk_buff *skb;
219
220 if (skb_queue_empty(&peer->staged_packet_queue)) {
221 skb = alloc_skb(DATA_PACKET_HEAD_ROOM + MESSAGE_MINIMUM_LENGTH,
222 GFP_ATOMIC);
223 if (unlikely(!skb))
224 return;
225 skb_reserve(skb, DATA_PACKET_HEAD_ROOM);
226 skb->dev = peer->device->dev;
227 PACKET_CB(skb)->mtu = skb->dev->mtu;
228 skb_queue_tail(&peer->staged_packet_queue, skb);
229 net_dbg_ratelimited("%s: Sending keepalive packet to peer %llu (%pISpfsc)\n",
230 peer->device->dev->name, peer->internal_id,
231 &peer->endpoint.addr);
232 }
233
234 wg_packet_send_staged_packets(peer);
235}
236
237static void wg_packet_create_data_done(struct sk_buff *first,
238 struct wg_peer *peer)
239{
240 struct sk_buff *skb, *next;
241 bool is_keepalive, data_sent = false;
242
243 wg_timers_any_authenticated_packet_traversal(peer);
244 wg_timers_any_authenticated_packet_sent(peer);
245 skb_list_walk_safe(first, skb, next) {
246 is_keepalive = skb->len == message_data_len(0);
247 if (likely(!wg_socket_send_skb_to_peer(peer, skb,
248 PACKET_CB(skb)->ds) && !is_keepalive))
249 data_sent = true;
250 }
251
252 if (likely(data_sent))
253 wg_timers_data_sent(peer);
254
255 keep_key_fresh(peer);
256}
257
258void wg_packet_tx_worker(struct work_struct *work)
259{
260 struct crypt_queue *queue = container_of(work, struct crypt_queue,
261 work);
262 struct noise_keypair *keypair;
263 enum packet_state state;
264 struct sk_buff *first;
265 struct wg_peer *peer;
266
267 while ((first = __ptr_ring_peek(&queue->ring)) != NULL &&
268 (state = atomic_read_acquire(&PACKET_CB(first)->state)) !=
269 PACKET_STATE_UNCRYPTED) {
270 __ptr_ring_discard_one(&queue->ring);
271 peer = PACKET_PEER(first);
272 keypair = PACKET_CB(first)->keypair;
273
274 if (likely(state == PACKET_STATE_CRYPTED))
275 wg_packet_create_data_done(first, peer);
276 else
277 kfree_skb_list(first);
278
279 wg_noise_keypair_put(keypair, false);
280 wg_peer_put(peer);
4005f5c3
JD
281 if (need_resched())
282 cond_resched();
e7096c13
JD
283 }
284}
285
286void wg_packet_encrypt_worker(struct work_struct *work)
287{
288 struct crypt_queue *queue = container_of(work, struct multicore_worker,
289 work)->ptr;
290 struct sk_buff *first, *skb, *next;
291
292 while ((first = ptr_ring_consume_bh(&queue->ring)) != NULL) {
293 enum packet_state state = PACKET_STATE_CRYPTED;
294
295 skb_list_walk_safe(first, skb, next) {
296 if (likely(encrypt_packet(skb,
297 PACKET_CB(first)->keypair))) {
298 wg_reset_packet(skb);
299 } else {
300 state = PACKET_STATE_DEAD;
301 break;
302 }
303 }
304 wg_queue_enqueue_per_peer(&PACKET_PEER(first)->tx_queue, first,
305 state);
4005f5c3
JD
306 if (need_resched())
307 cond_resched();
e7096c13
JD
308 }
309}
310
311static void wg_packet_create_data(struct sk_buff *first)
312{
313 struct wg_peer *peer = PACKET_PEER(first);
314 struct wg_device *wg = peer->device;
315 int ret = -EINVAL;
316
317 rcu_read_lock_bh();
318 if (unlikely(READ_ONCE(peer->is_dead)))
319 goto err;
320
321 ret = wg_queue_enqueue_per_device_and_peer(&wg->encrypt_queue,
322 &peer->tx_queue, first,
323 wg->packet_crypt_wq,
324 &wg->encrypt_queue.last_cpu);
325 if (unlikely(ret == -EPIPE))
326 wg_queue_enqueue_per_peer(&peer->tx_queue, first,
327 PACKET_STATE_DEAD);
328err:
329 rcu_read_unlock_bh();
330 if (likely(!ret || ret == -EPIPE))
331 return;
332 wg_noise_keypair_put(PACKET_CB(first)->keypair, false);
333 wg_peer_put(peer);
334 kfree_skb_list(first);
335}
336
337void wg_packet_purge_staged_packets(struct wg_peer *peer)
338{
339 spin_lock_bh(&peer->staged_packet_queue.lock);
340 peer->device->dev->stats.tx_dropped += peer->staged_packet_queue.qlen;
341 __skb_queue_purge(&peer->staged_packet_queue);
342 spin_unlock_bh(&peer->staged_packet_queue.lock);
343}
344
345void wg_packet_send_staged_packets(struct wg_peer *peer)
346{
347 struct noise_symmetric_key *key;
348 struct noise_keypair *keypair;
349 struct sk_buff_head packets;
350 struct sk_buff *skb;
351
352 /* Steal the current queue into our local one. */
353 __skb_queue_head_init(&packets);
354 spin_lock_bh(&peer->staged_packet_queue.lock);
355 skb_queue_splice_init(&peer->staged_packet_queue, &packets);
356 spin_unlock_bh(&peer->staged_packet_queue.lock);
357 if (unlikely(skb_queue_empty(&packets)))
358 return;
359
360 /* First we make sure we have a valid reference to a valid key. */
361 rcu_read_lock_bh();
362 keypair = wg_noise_keypair_get(
363 rcu_dereference_bh(peer->keypairs.current_keypair));
364 rcu_read_unlock_bh();
365 if (unlikely(!keypair))
366 goto out_nokey;
367 key = &keypair->sending;
368 if (unlikely(!READ_ONCE(key->is_valid)))
369 goto out_nokey;
370 if (unlikely(wg_birthdate_has_expired(key->birthdate,
371 REJECT_AFTER_TIME)))
372 goto out_invalid;
373
374 /* After we know we have a somewhat valid key, we now try to assign
375 * nonces to all of the packets in the queue. If we can't assign nonces
376 * for all of them, we just consider it a failure and wait for the next
377 * handshake.
378 */
379 skb_queue_walk(&packets, skb) {
380 /* 0 for no outer TOS: no leak. TODO: at some later point, we
381 * might consider using flowi->tos as outer instead.
382 */
383 PACKET_CB(skb)->ds = ip_tunnel_ecn_encap(0, ip_hdr(skb), skb);
384 PACKET_CB(skb)->nonce =
385 atomic64_inc_return(&key->counter.counter) - 1;
386 if (unlikely(PACKET_CB(skb)->nonce >= REJECT_AFTER_MESSAGES))
387 goto out_invalid;
388 }
389
390 packets.prev->next = NULL;
391 wg_peer_get(keypair->entry.peer);
392 PACKET_CB(packets.next)->keypair = keypair;
393 wg_packet_create_data(packets.next);
394 return;
395
396out_invalid:
397 WRITE_ONCE(key->is_valid, false);
398out_nokey:
399 wg_noise_keypair_put(keypair, false);
400
401 /* We orphan the packets if we're waiting on a handshake, so that they
402 * don't block a socket's pool.
403 */
404 skb_queue_walk(&packets, skb)
405 skb_orphan(skb);
406 /* Then we put them back on the top of the queue. We're not too
407 * concerned about accidentally getting things a little out of order if
408 * packets are being added really fast, because this queue is for before
409 * packets can even be sent and it's small anyway.
410 */
411 spin_lock_bh(&peer->staged_packet_queue.lock);
412 skb_queue_splice(&packets, &peer->staged_packet_queue);
413 spin_unlock_bh(&peer->staged_packet_queue.lock);
414
415 /* If we're exiting because there's something wrong with the key, it
416 * means we should initiate a new handshake.
417 */
418 wg_packet_send_queued_handshake_initiation(peer, false);
419}