]> git.ipfire.org Git - thirdparty/linux.git/blob - net/rxrpc/call_event.c
Merge tag 'x86-fpu-2020-06-01' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
[thirdparty/linux.git] / net / rxrpc / call_event.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Management of Tx window, Tx resend, ACKs and out-of-sequence reception
3 *
4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10 #include <linux/module.h>
11 #include <linux/circ_buf.h>
12 #include <linux/net.h>
13 #include <linux/skbuff.h>
14 #include <linux/slab.h>
15 #include <linux/udp.h>
16 #include <net/sock.h>
17 #include <net/af_rxrpc.h>
18 #include "ar-internal.h"
19
20 /*
21 * Propose a PING ACK be sent.
22 */
23 static void rxrpc_propose_ping(struct rxrpc_call *call,
24 bool immediate, bool background)
25 {
26 if (immediate) {
27 if (background &&
28 !test_and_set_bit(RXRPC_CALL_EV_PING, &call->events))
29 rxrpc_queue_call(call);
30 } else {
31 unsigned long now = jiffies;
32 unsigned long ping_at = now + rxrpc_idle_ack_delay;
33
34 if (time_before(ping_at, call->ping_at)) {
35 WRITE_ONCE(call->ping_at, ping_at);
36 rxrpc_reduce_call_timer(call, ping_at, now,
37 rxrpc_timer_set_for_ping);
38 }
39 }
40 }
41
42 /*
43 * propose an ACK be sent
44 */
45 static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
46 u32 serial, bool immediate, bool background,
47 enum rxrpc_propose_ack_trace why)
48 {
49 enum rxrpc_propose_ack_outcome outcome = rxrpc_propose_ack_use;
50 unsigned long expiry = rxrpc_soft_ack_delay;
51 s8 prior = rxrpc_ack_priority[ack_reason];
52
53 /* Pings are handled specially because we don't want to accidentally
54 * lose a ping response by subsuming it into a ping.
55 */
56 if (ack_reason == RXRPC_ACK_PING) {
57 rxrpc_propose_ping(call, immediate, background);
58 goto trace;
59 }
60
61 /* Update DELAY, IDLE, REQUESTED and PING_RESPONSE ACK serial
62 * numbers, but we don't alter the timeout.
63 */
64 _debug("prior %u %u vs %u %u",
65 ack_reason, prior,
66 call->ackr_reason, rxrpc_ack_priority[call->ackr_reason]);
67 if (ack_reason == call->ackr_reason) {
68 if (RXRPC_ACK_UPDATEABLE & (1 << ack_reason)) {
69 outcome = rxrpc_propose_ack_update;
70 call->ackr_serial = serial;
71 }
72 if (!immediate)
73 goto trace;
74 } else if (prior > rxrpc_ack_priority[call->ackr_reason]) {
75 call->ackr_reason = ack_reason;
76 call->ackr_serial = serial;
77 } else {
78 outcome = rxrpc_propose_ack_subsume;
79 }
80
81 switch (ack_reason) {
82 case RXRPC_ACK_REQUESTED:
83 if (rxrpc_requested_ack_delay < expiry)
84 expiry = rxrpc_requested_ack_delay;
85 if (serial == 1)
86 immediate = false;
87 break;
88
89 case RXRPC_ACK_DELAY:
90 if (rxrpc_soft_ack_delay < expiry)
91 expiry = rxrpc_soft_ack_delay;
92 break;
93
94 case RXRPC_ACK_IDLE:
95 if (rxrpc_idle_ack_delay < expiry)
96 expiry = rxrpc_idle_ack_delay;
97 break;
98
99 default:
100 immediate = true;
101 break;
102 }
103
104 if (test_bit(RXRPC_CALL_EV_ACK, &call->events)) {
105 _debug("already scheduled");
106 } else if (immediate || expiry == 0) {
107 _debug("immediate ACK %lx", call->events);
108 if (!test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events) &&
109 background)
110 rxrpc_queue_call(call);
111 } else {
112 unsigned long now = jiffies, ack_at;
113
114 if (call->peer->srtt_us != 0)
115 ack_at = usecs_to_jiffies(call->peer->srtt_us >> 3);
116 else
117 ack_at = expiry;
118
119 ack_at += READ_ONCE(call->tx_backoff);
120 ack_at += now;
121 if (time_before(ack_at, call->ack_at)) {
122 WRITE_ONCE(call->ack_at, ack_at);
123 rxrpc_reduce_call_timer(call, ack_at, now,
124 rxrpc_timer_set_for_ack);
125 }
126 }
127
128 trace:
129 trace_rxrpc_propose_ack(call, why, ack_reason, serial, immediate,
130 background, outcome);
131 }
132
133 /*
134 * propose an ACK be sent, locking the call structure
135 */
136 void rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
137 u32 serial, bool immediate, bool background,
138 enum rxrpc_propose_ack_trace why)
139 {
140 spin_lock_bh(&call->lock);
141 __rxrpc_propose_ACK(call, ack_reason, serial,
142 immediate, background, why);
143 spin_unlock_bh(&call->lock);
144 }
145
146 /*
147 * Handle congestion being detected by the retransmit timeout.
148 */
149 static void rxrpc_congestion_timeout(struct rxrpc_call *call)
150 {
151 set_bit(RXRPC_CALL_RETRANS_TIMEOUT, &call->flags);
152 }
153
154 /*
155 * Perform retransmission of NAK'd and unack'd packets.
156 */
157 static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
158 {
159 struct sk_buff *skb;
160 unsigned long resend_at, rto_j;
161 rxrpc_seq_t cursor, seq, top;
162 ktime_t now, max_age, oldest, ack_ts;
163 int ix;
164 u8 annotation, anno_type, retrans = 0, unacked = 0;
165
166 _enter("{%d,%d}", call->tx_hard_ack, call->tx_top);
167
168 rto_j = call->peer->rto_j;
169
170 now = ktime_get_real();
171 max_age = ktime_sub(now, jiffies_to_usecs(rto_j));
172
173 spin_lock_bh(&call->lock);
174
175 cursor = call->tx_hard_ack;
176 top = call->tx_top;
177 ASSERT(before_eq(cursor, top));
178 if (cursor == top)
179 goto out_unlock;
180
181 /* Scan the packet list without dropping the lock and decide which of
182 * the packets in the Tx buffer we're going to resend and what the new
183 * resend timeout will be.
184 */
185 trace_rxrpc_resend(call, (cursor + 1) & RXRPC_RXTX_BUFF_MASK);
186 oldest = now;
187 for (seq = cursor + 1; before_eq(seq, top); seq++) {
188 ix = seq & RXRPC_RXTX_BUFF_MASK;
189 annotation = call->rxtx_annotations[ix];
190 anno_type = annotation & RXRPC_TX_ANNO_MASK;
191 annotation &= ~RXRPC_TX_ANNO_MASK;
192 if (anno_type == RXRPC_TX_ANNO_ACK)
193 continue;
194
195 skb = call->rxtx_buffer[ix];
196 rxrpc_see_skb(skb, rxrpc_skb_seen);
197
198 if (anno_type == RXRPC_TX_ANNO_UNACK) {
199 if (ktime_after(skb->tstamp, max_age)) {
200 if (ktime_before(skb->tstamp, oldest))
201 oldest = skb->tstamp;
202 continue;
203 }
204 if (!(annotation & RXRPC_TX_ANNO_RESENT))
205 unacked++;
206 }
207
208 /* Okay, we need to retransmit a packet. */
209 call->rxtx_annotations[ix] = RXRPC_TX_ANNO_RETRANS | annotation;
210 retrans++;
211 trace_rxrpc_retransmit(call, seq, annotation | anno_type,
212 ktime_to_ns(ktime_sub(skb->tstamp, max_age)));
213 }
214
215 resend_at = nsecs_to_jiffies(ktime_to_ns(ktime_sub(now, oldest)));
216 resend_at += jiffies + rto_j;
217 WRITE_ONCE(call->resend_at, resend_at);
218
219 if (unacked)
220 rxrpc_congestion_timeout(call);
221
222 /* If there was nothing that needed retransmission then it's likely
223 * that an ACK got lost somewhere. Send a ping to find out instead of
224 * retransmitting data.
225 */
226 if (!retrans) {
227 rxrpc_reduce_call_timer(call, resend_at, now_j,
228 rxrpc_timer_set_for_resend);
229 spin_unlock_bh(&call->lock);
230 ack_ts = ktime_sub(now, call->acks_latest_ts);
231 if (ktime_to_us(ack_ts) < (call->peer->srtt_us >> 3))
232 goto out;
233 rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, true, false,
234 rxrpc_propose_ack_ping_for_lost_ack);
235 rxrpc_send_ack_packet(call, true, NULL);
236 goto out;
237 }
238
239 /* Now go through the Tx window and perform the retransmissions. We
240 * have to drop the lock for each send. If an ACK comes in whilst the
241 * lock is dropped, it may clear some of the retransmission markers for
242 * packets that it soft-ACKs.
243 */
244 for (seq = cursor + 1; before_eq(seq, top); seq++) {
245 ix = seq & RXRPC_RXTX_BUFF_MASK;
246 annotation = call->rxtx_annotations[ix];
247 anno_type = annotation & RXRPC_TX_ANNO_MASK;
248 if (anno_type != RXRPC_TX_ANNO_RETRANS)
249 continue;
250
251 skb = call->rxtx_buffer[ix];
252 rxrpc_get_skb(skb, rxrpc_skb_got);
253 spin_unlock_bh(&call->lock);
254
255 if (rxrpc_send_data_packet(call, skb, true) < 0) {
256 rxrpc_free_skb(skb, rxrpc_skb_freed);
257 return;
258 }
259
260 if (rxrpc_is_client_call(call))
261 rxrpc_expose_client_call(call);
262
263 rxrpc_free_skb(skb, rxrpc_skb_freed);
264 spin_lock_bh(&call->lock);
265
266 /* We need to clear the retransmit state, but there are two
267 * things we need to be aware of: A new ACK/NAK might have been
268 * received and the packet might have been hard-ACK'd (in which
269 * case it will no longer be in the buffer).
270 */
271 if (after(seq, call->tx_hard_ack)) {
272 annotation = call->rxtx_annotations[ix];
273 anno_type = annotation & RXRPC_TX_ANNO_MASK;
274 if (anno_type == RXRPC_TX_ANNO_RETRANS ||
275 anno_type == RXRPC_TX_ANNO_NAK) {
276 annotation &= ~RXRPC_TX_ANNO_MASK;
277 annotation |= RXRPC_TX_ANNO_UNACK;
278 }
279 annotation |= RXRPC_TX_ANNO_RESENT;
280 call->rxtx_annotations[ix] = annotation;
281 }
282
283 if (after(call->tx_hard_ack, seq))
284 seq = call->tx_hard_ack;
285 }
286
287 out_unlock:
288 spin_unlock_bh(&call->lock);
289 out:
290 _leave("");
291 }
292
293 /*
294 * Handle retransmission and deferred ACK/abort generation.
295 */
296 void rxrpc_process_call(struct work_struct *work)
297 {
298 struct rxrpc_call *call =
299 container_of(work, struct rxrpc_call, processor);
300 rxrpc_serial_t *send_ack;
301 unsigned long now, next, t;
302 unsigned int iterations = 0;
303
304 rxrpc_see_call(call);
305
306 //printk("\n--------------------\n");
307 _enter("{%d,%s,%lx}",
308 call->debug_id, rxrpc_call_states[call->state], call->events);
309
310 recheck_state:
311 /* Limit the number of times we do this before returning to the manager */
312 iterations++;
313 if (iterations > 5)
314 goto requeue;
315
316 if (test_and_clear_bit(RXRPC_CALL_EV_ABORT, &call->events)) {
317 rxrpc_send_abort_packet(call);
318 goto recheck_state;
319 }
320
321 if (call->state == RXRPC_CALL_COMPLETE) {
322 del_timer_sync(&call->timer);
323 rxrpc_notify_socket(call);
324 goto out_put;
325 }
326
327 /* Work out if any timeouts tripped */
328 now = jiffies;
329 t = READ_ONCE(call->expect_rx_by);
330 if (time_after_eq(now, t)) {
331 trace_rxrpc_timer(call, rxrpc_timer_exp_normal, now);
332 set_bit(RXRPC_CALL_EV_EXPIRED, &call->events);
333 }
334
335 t = READ_ONCE(call->expect_req_by);
336 if (call->state == RXRPC_CALL_SERVER_RECV_REQUEST &&
337 time_after_eq(now, t)) {
338 trace_rxrpc_timer(call, rxrpc_timer_exp_idle, now);
339 set_bit(RXRPC_CALL_EV_EXPIRED, &call->events);
340 }
341
342 t = READ_ONCE(call->expect_term_by);
343 if (time_after_eq(now, t)) {
344 trace_rxrpc_timer(call, rxrpc_timer_exp_hard, now);
345 set_bit(RXRPC_CALL_EV_EXPIRED, &call->events);
346 }
347
348 t = READ_ONCE(call->ack_at);
349 if (time_after_eq(now, t)) {
350 trace_rxrpc_timer(call, rxrpc_timer_exp_ack, now);
351 cmpxchg(&call->ack_at, t, now + MAX_JIFFY_OFFSET);
352 set_bit(RXRPC_CALL_EV_ACK, &call->events);
353 }
354
355 t = READ_ONCE(call->ack_lost_at);
356 if (time_after_eq(now, t)) {
357 trace_rxrpc_timer(call, rxrpc_timer_exp_lost_ack, now);
358 cmpxchg(&call->ack_lost_at, t, now + MAX_JIFFY_OFFSET);
359 set_bit(RXRPC_CALL_EV_ACK_LOST, &call->events);
360 }
361
362 t = READ_ONCE(call->keepalive_at);
363 if (time_after_eq(now, t)) {
364 trace_rxrpc_timer(call, rxrpc_timer_exp_keepalive, now);
365 cmpxchg(&call->keepalive_at, t, now + MAX_JIFFY_OFFSET);
366 rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, true, true,
367 rxrpc_propose_ack_ping_for_keepalive);
368 set_bit(RXRPC_CALL_EV_PING, &call->events);
369 }
370
371 t = READ_ONCE(call->ping_at);
372 if (time_after_eq(now, t)) {
373 trace_rxrpc_timer(call, rxrpc_timer_exp_ping, now);
374 cmpxchg(&call->ping_at, t, now + MAX_JIFFY_OFFSET);
375 set_bit(RXRPC_CALL_EV_PING, &call->events);
376 }
377
378 t = READ_ONCE(call->resend_at);
379 if (time_after_eq(now, t)) {
380 trace_rxrpc_timer(call, rxrpc_timer_exp_resend, now);
381 cmpxchg(&call->resend_at, t, now + MAX_JIFFY_OFFSET);
382 set_bit(RXRPC_CALL_EV_RESEND, &call->events);
383 }
384
385 /* Process events */
386 if (test_and_clear_bit(RXRPC_CALL_EV_EXPIRED, &call->events)) {
387 if (test_bit(RXRPC_CALL_RX_HEARD, &call->flags) &&
388 (int)call->conn->hi_serial - (int)call->rx_serial > 0) {
389 trace_rxrpc_call_reset(call);
390 rxrpc_abort_call("EXP", call, 0, RX_USER_ABORT, -ECONNRESET);
391 } else {
392 rxrpc_abort_call("EXP", call, 0, RX_USER_ABORT, -ETIME);
393 }
394 set_bit(RXRPC_CALL_EV_ABORT, &call->events);
395 goto recheck_state;
396 }
397
398 send_ack = NULL;
399 if (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events)) {
400 call->acks_lost_top = call->tx_top;
401 rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, true, false,
402 rxrpc_propose_ack_ping_for_lost_ack);
403 send_ack = &call->acks_lost_ping;
404 }
405
406 if (test_and_clear_bit(RXRPC_CALL_EV_ACK, &call->events) ||
407 send_ack) {
408 if (call->ackr_reason) {
409 rxrpc_send_ack_packet(call, false, send_ack);
410 goto recheck_state;
411 }
412 }
413
414 if (test_and_clear_bit(RXRPC_CALL_EV_PING, &call->events)) {
415 rxrpc_send_ack_packet(call, true, NULL);
416 goto recheck_state;
417 }
418
419 if (test_and_clear_bit(RXRPC_CALL_EV_RESEND, &call->events)) {
420 rxrpc_resend(call, now);
421 goto recheck_state;
422 }
423
424 /* Make sure the timer is restarted */
425 next = call->expect_rx_by;
426
427 #define set(T) { t = READ_ONCE(T); if (time_before(t, next)) next = t; }
428
429 set(call->expect_req_by);
430 set(call->expect_term_by);
431 set(call->ack_at);
432 set(call->ack_lost_at);
433 set(call->resend_at);
434 set(call->keepalive_at);
435 set(call->ping_at);
436
437 now = jiffies;
438 if (time_after_eq(now, next))
439 goto recheck_state;
440
441 rxrpc_reduce_call_timer(call, next, now, rxrpc_timer_restart);
442
443 /* other events may have been raised since we started checking */
444 if (call->events && call->state < RXRPC_CALL_COMPLETE)
445 goto requeue;
446
447 out_put:
448 rxrpc_put_call(call, rxrpc_call_put);
449 out:
450 _leave("");
451 return;
452
453 requeue:
454 __rxrpc_queue_call(call);
455 goto out;
456 }