]> git.ipfire.org Git - thirdparty/u-boot.git/blob - net/tcp.c
Merge tag 'efi-2023-10-rc2' of https://source.denx.de/u-boot/custodians/u-boot-efi
[thirdparty/u-boot.git] / net / tcp.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright 2017 Duncan Hare, all rights reserved.
4 */
5
6 /*
7 * General Desription:
8 *
9 * TCP support for the wget command, for fast file downloading.
10 *
11 * HTTP/TCP Receiver:
12 *
13 * Prerequisites: - own ethernet address
14 * - own IP address
15 * - Server IP address
16 * - Server with TCP
17 * - TCP application (eg wget)
18 * Next Step HTTPS?
19 */
20 #include <common.h>
21 #include <command.h>
22 #include <console.h>
23 #include <env_internal.h>
24 #include <errno.h>
25 #include <net.h>
26 #include <net/tcp.h>
27
28 /*
29 * TCP sliding window control used by us to request re-TX
30 */
31 static struct tcp_sack_v tcp_lost;
32
33 /* TCP option timestamp */
34 static u32 loc_timestamp;
35 static u32 rmt_timestamp;
36
37 static u32 tcp_seq_init;
38 static u32 tcp_ack_edge;
39
40 static int tcp_activity_count;
41
42 /*
43 * Search for TCP_SACK and review the comments before the code section
44 * TCP_SACK is the number of packets at the front of the stream
45 */
46
47 enum pkt_state {PKT, NOPKT};
48 struct sack_r {
49 struct sack_edges se;
50 enum pkt_state st;
51 };
52
53 static struct sack_r edge_a[TCP_SACK];
54 static unsigned int sack_idx;
55 static unsigned int prev_len;
56
57 /*
58 * TCP lengths are stored as a rounded up number of 32 bit words.
59 * Add 3 to length round up, rounded, then divided into the
60 * length in 32 bit words.
61 */
62 #define LEN_B_TO_DW(x) ((x) >> 2)
63 #define ROUND_TCPHDR_LEN(x) (LEN_B_TO_DW((x) + 3))
64 #define SHIFT_TO_TCPHDRLEN_FIELD(x) ((x) << 4)
65 #define GET_TCP_HDR_LEN_IN_BYTES(x) ((x) >> 2)
66
67 /* TCP connection state */
68 static enum tcp_state current_tcp_state;
69
70 /* Current TCP RX packet handler */
71 static rxhand_tcp *tcp_packet_handler;
72
73 /**
74 * tcp_get_tcp_state() - get current TCP state
75 *
76 * Return: Current TCP state
77 */
78 enum tcp_state tcp_get_tcp_state(void)
79 {
80 return current_tcp_state;
81 }
82
83 /**
84 * tcp_set_tcp_state() - set current TCP state
85 * @new_state: new TCP state
86 */
87 void tcp_set_tcp_state(enum tcp_state new_state)
88 {
89 current_tcp_state = new_state;
90 }
91
92 static void dummy_handler(uchar *pkt, u16 dport,
93 struct in_addr sip, u16 sport,
94 u32 tcp_seq_num, u32 tcp_ack_num,
95 u8 action, unsigned int len)
96 {
97 }
98
99 /**
100 * tcp_set_tcp_handler() - set a handler to receive data
101 * @f: handler
102 */
103 void tcp_set_tcp_handler(rxhand_tcp *f)
104 {
105 debug_cond(DEBUG_INT_STATE, "--- net_loop TCP handler set (%p)\n", f);
106 if (!f)
107 tcp_packet_handler = dummy_handler;
108 else
109 tcp_packet_handler = f;
110 }
111
112 /**
113 * tcp_set_pseudo_header() - set TCP pseudo header
114 * @pkt: the packet
115 * @src: source IP address
116 * @dest: destinaion IP address
117 * @tcp_len: tcp length
118 * @pkt_len: packet length
119 *
120 * Return: the checksum of the packet
121 */
122 u16 tcp_set_pseudo_header(uchar *pkt, struct in_addr src, struct in_addr dest,
123 int tcp_len, int pkt_len)
124 {
125 union tcp_build_pkt *b = (union tcp_build_pkt *)pkt;
126 int checksum_len;
127
128 /*
129 * Pseudo header
130 *
131 * Zero the byte after the last byte so that the header checksum
132 * will always work.
133 */
134 pkt[pkt_len] = 0;
135
136 net_copy_ip((void *)&b->ph.p_src, &src);
137 net_copy_ip((void *)&b->ph.p_dst, &dest);
138 b->ph.rsvd = 0;
139 b->ph.p = IPPROTO_TCP;
140 b->ph.len = htons(tcp_len);
141 checksum_len = tcp_len + PSEUDO_HDR_SIZE;
142
143 debug_cond(DEBUG_DEV_PKT,
144 "TCP Pesudo Header (to=%pI4, from=%pI4, Len=%d)\n",
145 &b->ph.p_dst, &b->ph.p_src, checksum_len);
146
147 return compute_ip_checksum(pkt + PSEUDO_PAD_SIZE, checksum_len);
148 }
149
150 /**
151 * net_set_ack_options() - set TCP options in acknowledge packets
152 * @b: the packet
153 *
154 * Return: TCP header length
155 */
156 int net_set_ack_options(union tcp_build_pkt *b)
157 {
158 b->sack.hdr.tcp_hlen = SHIFT_TO_TCPHDRLEN_FIELD(LEN_B_TO_DW(TCP_HDR_SIZE));
159
160 b->sack.t_opt.kind = TCP_O_TS;
161 b->sack.t_opt.len = TCP_OPT_LEN_A;
162 b->sack.t_opt.t_snd = htons(loc_timestamp);
163 b->sack.t_opt.t_rcv = rmt_timestamp;
164 b->sack.sack_v.kind = TCP_1_NOP;
165 b->sack.sack_v.len = 0;
166
167 if (IS_ENABLED(CONFIG_PROT_TCP_SACK)) {
168 if (tcp_lost.len > TCP_OPT_LEN_2) {
169 debug_cond(DEBUG_DEV_PKT, "TCP ack opt lost.len %x\n",
170 tcp_lost.len);
171 b->sack.sack_v.len = tcp_lost.len;
172 b->sack.sack_v.kind = TCP_V_SACK;
173 b->sack.sack_v.hill[0].l = htonl(tcp_lost.hill[0].l);
174 b->sack.sack_v.hill[0].r = htonl(tcp_lost.hill[0].r);
175
176 /*
177 * These SACK structures are initialized with NOPs to
178 * provide TCP header alignment padding. There are 4
179 * SACK structures used for both header padding and
180 * internally.
181 */
182 b->sack.sack_v.hill[1].l = htonl(tcp_lost.hill[1].l);
183 b->sack.sack_v.hill[1].r = htonl(tcp_lost.hill[1].r);
184 b->sack.sack_v.hill[2].l = htonl(tcp_lost.hill[2].l);
185 b->sack.sack_v.hill[2].r = htonl(tcp_lost.hill[2].r);
186 b->sack.sack_v.hill[3].l = TCP_O_NOP;
187 b->sack.sack_v.hill[3].r = TCP_O_NOP;
188 }
189
190 b->sack.hdr.tcp_hlen = SHIFT_TO_TCPHDRLEN_FIELD(ROUND_TCPHDR_LEN(TCP_HDR_SIZE +
191 TCP_TSOPT_SIZE +
192 tcp_lost.len));
193 } else {
194 b->sack.sack_v.kind = 0;
195 b->sack.hdr.tcp_hlen = SHIFT_TO_TCPHDRLEN_FIELD(ROUND_TCPHDR_LEN(TCP_HDR_SIZE +
196 TCP_TSOPT_SIZE));
197 }
198
199 /*
200 * This returns the actual rounded up length of the
201 * TCP header to add to the total packet length
202 */
203
204 return GET_TCP_HDR_LEN_IN_BYTES(b->sack.hdr.tcp_hlen);
205 }
206
207 /**
208 * net_set_ack_options() - set TCP options in SYN packets
209 * @b: the packet
210 */
211 void net_set_syn_options(union tcp_build_pkt *b)
212 {
213 if (IS_ENABLED(CONFIG_PROT_TCP_SACK))
214 tcp_lost.len = 0;
215
216 b->ip.hdr.tcp_hlen = 0xa0;
217
218 b->ip.mss.kind = TCP_O_MSS;
219 b->ip.mss.len = TCP_OPT_LEN_4;
220 b->ip.mss.mss = htons(TCP_MSS);
221 b->ip.scale.kind = TCP_O_SCL;
222 b->ip.scale.scale = TCP_SCALE;
223 b->ip.scale.len = TCP_OPT_LEN_3;
224 if (IS_ENABLED(CONFIG_PROT_TCP_SACK)) {
225 b->ip.sack_p.kind = TCP_P_SACK;
226 b->ip.sack_p.len = TCP_OPT_LEN_2;
227 } else {
228 b->ip.sack_p.kind = TCP_1_NOP;
229 b->ip.sack_p.len = TCP_1_NOP;
230 }
231 b->ip.t_opt.kind = TCP_O_TS;
232 b->ip.t_opt.len = TCP_OPT_LEN_A;
233 loc_timestamp = get_ticks();
234 rmt_timestamp = 0;
235 b->ip.t_opt.t_snd = 0;
236 b->ip.t_opt.t_rcv = 0;
237 b->ip.end = TCP_O_END;
238 }
239
240 int tcp_set_tcp_header(uchar *pkt, int dport, int sport, int payload_len,
241 u8 action, u32 tcp_seq_num, u32 tcp_ack_num)
242 {
243 union tcp_build_pkt *b = (union tcp_build_pkt *)pkt;
244 int pkt_hdr_len;
245 int pkt_len;
246 int tcp_len;
247
248 /*
249 * Header: 5 32 bit words. 4 bits TCP header Length,
250 * 4 bits reserved options
251 */
252 b->ip.hdr.tcp_flags = action;
253 pkt_hdr_len = IP_TCP_HDR_SIZE;
254 b->ip.hdr.tcp_hlen = SHIFT_TO_TCPHDRLEN_FIELD(LEN_B_TO_DW(TCP_HDR_SIZE));
255
256 switch (action) {
257 case TCP_SYN:
258 debug_cond(DEBUG_DEV_PKT,
259 "TCP Hdr:SYN (%pI4, %pI4, sq=%u, ak=%u)\n",
260 &net_server_ip, &net_ip,
261 tcp_seq_num, tcp_ack_num);
262 tcp_activity_count = 0;
263 net_set_syn_options(b);
264 tcp_seq_num = 0;
265 tcp_ack_num = 0;
266 pkt_hdr_len = IP_TCP_O_SIZE;
267 if (current_tcp_state == TCP_SYN_SENT) { /* Too many SYNs */
268 action = TCP_FIN;
269 current_tcp_state = TCP_FIN_WAIT_1;
270 } else {
271 current_tcp_state = TCP_SYN_SENT;
272 }
273 break;
274 case TCP_SYN | TCP_ACK:
275 case TCP_ACK:
276 pkt_hdr_len = IP_HDR_SIZE + net_set_ack_options(b);
277 b->ip.hdr.tcp_flags = action;
278 debug_cond(DEBUG_DEV_PKT,
279 "TCP Hdr:ACK (%pI4, %pI4, s=%u, a=%u, A=%x)\n",
280 &net_server_ip, &net_ip, tcp_seq_num, tcp_ack_num,
281 action);
282 break;
283 case TCP_FIN:
284 debug_cond(DEBUG_DEV_PKT,
285 "TCP Hdr:FIN (%pI4, %pI4, s=%u, a=%u)\n",
286 &net_server_ip, &net_ip, tcp_seq_num, tcp_ack_num);
287 payload_len = 0;
288 pkt_hdr_len = IP_TCP_HDR_SIZE;
289 current_tcp_state = TCP_FIN_WAIT_1;
290 break;
291 case TCP_RST | TCP_ACK:
292 case TCP_RST:
293 debug_cond(DEBUG_DEV_PKT,
294 "TCP Hdr:RST (%pI4, %pI4, s=%u, a=%u)\n",
295 &net_server_ip, &net_ip, tcp_seq_num, tcp_ack_num);
296 current_tcp_state = TCP_CLOSED;
297 break;
298 /* Notify connection closing */
299 case (TCP_FIN | TCP_ACK):
300 case (TCP_FIN | TCP_ACK | TCP_PUSH):
301 if (current_tcp_state == TCP_CLOSE_WAIT)
302 current_tcp_state = TCP_CLOSING;
303
304 debug_cond(DEBUG_DEV_PKT,
305 "TCP Hdr:FIN ACK PSH(%pI4, %pI4, s=%u, a=%u, A=%x)\n",
306 &net_server_ip, &net_ip,
307 tcp_seq_num, tcp_ack_num, action);
308 fallthrough;
309 default:
310 pkt_hdr_len = IP_HDR_SIZE + net_set_ack_options(b);
311 b->ip.hdr.tcp_flags = action | TCP_PUSH | TCP_ACK;
312 debug_cond(DEBUG_DEV_PKT,
313 "TCP Hdr:dft (%pI4, %pI4, s=%u, a=%u, A=%x)\n",
314 &net_server_ip, &net_ip,
315 tcp_seq_num, tcp_ack_num, action);
316 }
317
318 pkt_len = pkt_hdr_len + payload_len;
319 tcp_len = pkt_len - IP_HDR_SIZE;
320
321 tcp_ack_edge = tcp_ack_num;
322 /* TCP Header */
323 b->ip.hdr.tcp_ack = htonl(tcp_ack_edge);
324 b->ip.hdr.tcp_src = htons(sport);
325 b->ip.hdr.tcp_dst = htons(dport);
326 b->ip.hdr.tcp_seq = htonl(tcp_seq_num);
327
328 /*
329 * TCP window size - TCP header variable tcp_win.
330 * Change tcp_win only if you have an understanding of network
331 * overrun, congestion, TCP segment sizes, TCP windows, TCP scale,
332 * queuing theory and packet buffering. If there are too few buffers,
333 * there will be data loss, recovery may work or the sending TCP,
334 * the server, could abort the stream transmission.
335 * MSS is governed by maximum Ethernet frame length.
336 * The number of buffers is governed by the desire to have a queue of
337 * full buffers to be processed at the destination to maximize
338 * throughput. Temporary memory use for the boot phase on modern
339 * SOCs is may not be considered a constraint to buffer space, if
340 * it is, then the u-boot tftp or nfs kernel netboot should be
341 * considered.
342 */
343 b->ip.hdr.tcp_win = htons(PKTBUFSRX * TCP_MSS >> TCP_SCALE);
344
345 b->ip.hdr.tcp_xsum = 0;
346 b->ip.hdr.tcp_ugr = 0;
347
348 b->ip.hdr.tcp_xsum = tcp_set_pseudo_header(pkt, net_ip, net_server_ip,
349 tcp_len, pkt_len);
350
351 net_set_ip_header((uchar *)&b->ip, net_server_ip, net_ip,
352 pkt_len, IPPROTO_TCP);
353
354 return pkt_hdr_len;
355 }
356
357 /**
358 * tcp_hole() - Selective Acknowledgment (Essential for fast stream transfer)
359 * @tcp_seq_num: TCP sequence start number
360 * @len: the length of sequence numbers
361 */
362 void tcp_hole(u32 tcp_seq_num, u32 len)
363 {
364 u32 idx_sack, sack_in;
365 u32 sack_end = TCP_SACK - 1;
366 u32 hill = 0;
367 enum pkt_state expect = PKT;
368 u32 seq = tcp_seq_num - tcp_seq_init;
369 u32 hol_l = tcp_ack_edge - tcp_seq_init;
370 u32 hol_r = 0;
371
372 /* Place new seq number in correct place in receive array */
373 if (prev_len == 0)
374 prev_len = len;
375
376 idx_sack = sack_idx + ((tcp_seq_num - tcp_ack_edge) / prev_len);
377 if (idx_sack < TCP_SACK) {
378 edge_a[idx_sack].se.l = tcp_seq_num;
379 edge_a[idx_sack].se.r = tcp_seq_num + len;
380 edge_a[idx_sack].st = PKT;
381
382 /*
383 * The fin (last) packet is not the same length as data
384 * packets, and if it's length is recorded and used for
385 * array index calculation, calculation breaks.
386 */
387 if (prev_len < len)
388 prev_len = len;
389 }
390
391 debug_cond(DEBUG_DEV_PKT,
392 "TCP 1 seq %d, edg %d, len %d, sack_idx %d, sack_end %d\n",
393 seq, hol_l, len, sack_idx, sack_end);
394
395 /* Right edge of contiguous stream, is the left edge of first hill */
396 hol_l = tcp_seq_num - tcp_seq_init;
397 hol_r = hol_l + len;
398
399 if (IS_ENABLED(CONFIG_PROT_TCP_SACK))
400 tcp_lost.len = TCP_OPT_LEN_2;
401
402 debug_cond(DEBUG_DEV_PKT,
403 "TCP 1 in %d, seq %d, pkt_l %d, pkt_r %d, sack_idx %d, sack_end %d\n",
404 idx_sack, seq, hol_l, hol_r, sack_idx, sack_end);
405
406 for (sack_in = sack_idx; sack_in < sack_end && hill < TCP_SACK_HILLS;
407 sack_in++) {
408 switch (expect) {
409 case NOPKT:
410 switch (edge_a[sack_in].st) {
411 case NOPKT:
412 debug_cond(DEBUG_INT_STATE, "N");
413 break;
414 case PKT:
415 debug_cond(DEBUG_INT_STATE, "n");
416 if (IS_ENABLED(CONFIG_PROT_TCP_SACK)) {
417 tcp_lost.hill[hill].l =
418 edge_a[sack_in].se.l;
419 tcp_lost.hill[hill].r =
420 edge_a[sack_in].se.r;
421 }
422 expect = PKT;
423 break;
424 }
425 break;
426 case PKT:
427 switch (edge_a[sack_in].st) {
428 case NOPKT:
429 debug_cond(DEBUG_INT_STATE, "p");
430 if (sack_in > sack_idx &&
431 hill < TCP_SACK_HILLS) {
432 hill++;
433 if (IS_ENABLED(CONFIG_PROT_TCP_SACK))
434 tcp_lost.len += TCP_OPT_LEN_8;
435 }
436 expect = NOPKT;
437 break;
438 case PKT:
439 debug_cond(DEBUG_INT_STATE, "P");
440
441 if (tcp_ack_edge == edge_a[sack_in].se.l) {
442 tcp_ack_edge = edge_a[sack_in].se.r;
443 edge_a[sack_in].st = NOPKT;
444 sack_idx++;
445 } else {
446 if (IS_ENABLED(CONFIG_PROT_TCP_SACK) &&
447 hill < TCP_SACK_HILLS)
448 tcp_lost.hill[hill].r =
449 edge_a[sack_in].se.r;
450 if (IS_ENABLED(CONFIG_PROT_TCP_SACK) &&
451 sack_in == sack_end - 1)
452 tcp_lost.hill[hill].r =
453 edge_a[sack_in].se.r;
454 }
455 break;
456 }
457 break;
458 }
459 }
460 debug_cond(DEBUG_INT_STATE, "\n");
461 if (!IS_ENABLED(CONFIG_PROT_TCP_SACK) || tcp_lost.len <= TCP_OPT_LEN_2)
462 sack_idx = 0;
463 }
464
465 /**
466 * tcp_parse_options() - parsing TCP options
467 * @o: pointer to the option field.
468 * @o_len: length of the option field.
469 */
470 void tcp_parse_options(uchar *o, int o_len)
471 {
472 struct tcp_t_opt *tsopt;
473 uchar *p = o;
474
475 /*
476 * NOPs are options with a zero length, and thus are special.
477 * All other options have length fields.
478 */
479 for (p = o; p < (o + o_len); p = p + p[1]) {
480 if (!p[1])
481 return; /* Finished processing options */
482
483 switch (p[0]) {
484 case TCP_O_END:
485 return;
486 case TCP_O_MSS:
487 case TCP_O_SCL:
488 case TCP_P_SACK:
489 case TCP_V_SACK:
490 break;
491 case TCP_O_TS:
492 tsopt = (struct tcp_t_opt *)p;
493 rmt_timestamp = tsopt->t_snd;
494 return;
495 }
496
497 /* Process optional NOPs */
498 if (p[0] == TCP_O_NOP)
499 p++;
500 }
501 }
502
503 static u8 tcp_state_machine(u8 tcp_flags, u32 tcp_seq_num, int payload_len)
504 {
505 u8 tcp_fin = tcp_flags & TCP_FIN;
506 u8 tcp_syn = tcp_flags & TCP_SYN;
507 u8 tcp_rst = tcp_flags & TCP_RST;
508 u8 tcp_push = tcp_flags & TCP_PUSH;
509 u8 tcp_ack = tcp_flags & TCP_ACK;
510 u8 action = TCP_DATA;
511 int i;
512
513 /*
514 * tcp_flags are examined to determine TX action in a given state
515 * tcp_push is interpreted to mean "inform the app"
516 * urg, ece, cer and nonce flags are not supported.
517 *
518 * exe and crw are use to signal and confirm knowledge of congestion.
519 * This TCP only sends a file request and acks. If it generates
520 * congestion, the network is broken.
521 */
522 debug_cond(DEBUG_INT_STATE, "TCP STATE ENTRY %x\n", action);
523 if (tcp_rst) {
524 action = TCP_DATA;
525 current_tcp_state = TCP_CLOSED;
526 net_set_state(NETLOOP_FAIL);
527 debug_cond(DEBUG_INT_STATE, "TCP Reset %x\n", tcp_flags);
528 return TCP_RST;
529 }
530
531 switch (current_tcp_state) {
532 case TCP_CLOSED:
533 debug_cond(DEBUG_INT_STATE, "TCP CLOSED %x\n", tcp_flags);
534 if (tcp_syn) {
535 action = TCP_SYN | TCP_ACK;
536 tcp_seq_init = tcp_seq_num;
537 tcp_ack_edge = tcp_seq_num + 1;
538 current_tcp_state = TCP_SYN_RECEIVED;
539 } else if (tcp_ack || tcp_fin) {
540 action = TCP_DATA;
541 }
542 break;
543 case TCP_SYN_RECEIVED:
544 case TCP_SYN_SENT:
545 debug_cond(DEBUG_INT_STATE, "TCP_SYN_SENT | TCP_SYN_RECEIVED %x, %u\n",
546 tcp_flags, tcp_seq_num);
547 if (tcp_fin) {
548 action = action | TCP_PUSH;
549 current_tcp_state = TCP_CLOSE_WAIT;
550 } else if (tcp_ack || (tcp_syn && tcp_ack)) {
551 action |= TCP_ACK;
552 tcp_seq_init = tcp_seq_num;
553 tcp_ack_edge = tcp_seq_num + 1;
554 sack_idx = 0;
555 edge_a[sack_idx].se.l = tcp_ack_edge;
556 edge_a[sack_idx].se.r = tcp_ack_edge;
557 prev_len = 0;
558 current_tcp_state = TCP_ESTABLISHED;
559 for (i = 0; i < TCP_SACK; i++)
560 edge_a[i].st = NOPKT;
561
562 if (tcp_syn && tcp_ack)
563 action |= TCP_PUSH;
564 } else {
565 action = TCP_DATA;
566 }
567 break;
568 case TCP_ESTABLISHED:
569 debug_cond(DEBUG_INT_STATE, "TCP_ESTABLISHED %x\n", tcp_flags);
570 if (payload_len > 0) {
571 tcp_hole(tcp_seq_num, payload_len);
572 tcp_fin = TCP_DATA; /* cause standalone FIN */
573 }
574
575 if ((tcp_fin) &&
576 (!IS_ENABLED(CONFIG_PROT_TCP_SACK) ||
577 tcp_lost.len <= TCP_OPT_LEN_2)) {
578 action = action | TCP_FIN | TCP_PUSH | TCP_ACK;
579 current_tcp_state = TCP_CLOSE_WAIT;
580 } else if (tcp_ack) {
581 action = TCP_DATA;
582 }
583
584 if (tcp_syn)
585 action = TCP_ACK + TCP_RST;
586 else if (tcp_push)
587 action = action | TCP_PUSH;
588 break;
589 case TCP_CLOSE_WAIT:
590 debug_cond(DEBUG_INT_STATE, "TCP_CLOSE_WAIT (%x)\n", tcp_flags);
591 action = TCP_DATA;
592 break;
593 case TCP_FIN_WAIT_2:
594 debug_cond(DEBUG_INT_STATE, "TCP_FIN_WAIT_2 (%x)\n", tcp_flags);
595 if (tcp_ack) {
596 action = TCP_PUSH | TCP_ACK;
597 current_tcp_state = TCP_CLOSED;
598 puts("\n");
599 } else if (tcp_syn) {
600 action = TCP_DATA;
601 } else if (tcp_fin) {
602 action = TCP_DATA;
603 }
604 break;
605 case TCP_FIN_WAIT_1:
606 debug_cond(DEBUG_INT_STATE, "TCP_FIN_WAIT_1 (%x)\n", tcp_flags);
607 if (tcp_fin) {
608 tcp_ack_edge++;
609 action = TCP_ACK | TCP_FIN;
610 current_tcp_state = TCP_FIN_WAIT_2;
611 }
612 if (tcp_syn)
613 action = TCP_RST;
614 if (tcp_ack)
615 current_tcp_state = TCP_CLOSED;
616 break;
617 case TCP_CLOSING:
618 debug_cond(DEBUG_INT_STATE, "TCP_CLOSING (%x)\n", tcp_flags);
619 if (tcp_ack) {
620 action = TCP_PUSH;
621 current_tcp_state = TCP_CLOSED;
622 puts("\n");
623 } else if (tcp_syn) {
624 action = TCP_RST;
625 } else if (tcp_fin) {
626 action = TCP_DATA;
627 }
628 break;
629 }
630 return action;
631 }
632
633 /**
634 * rxhand_tcp_f() - process receiving data and call data handler.
635 * @b: the packet
636 * @pkt_len: the length of packet.
637 */
638 void rxhand_tcp_f(union tcp_build_pkt *b, unsigned int pkt_len)
639 {
640 int tcp_len = pkt_len - IP_HDR_SIZE;
641 u16 tcp_rx_xsum = b->ip.hdr.ip_sum;
642 u8 tcp_action = TCP_DATA;
643 u32 tcp_seq_num, tcp_ack_num;
644 int tcp_hdr_len, payload_len;
645
646 /* Verify IP header */
647 debug_cond(DEBUG_DEV_PKT,
648 "TCP RX in RX Sum (to=%pI4, from=%pI4, len=%d)\n",
649 &b->ip.hdr.ip_src, &b->ip.hdr.ip_dst, pkt_len);
650
651 b->ip.hdr.ip_src = net_server_ip;
652 b->ip.hdr.ip_dst = net_ip;
653 b->ip.hdr.ip_sum = 0;
654 if (tcp_rx_xsum != compute_ip_checksum(b, IP_HDR_SIZE)) {
655 debug_cond(DEBUG_DEV_PKT,
656 "TCP RX IP xSum Error (%pI4, =%pI4, len=%d)\n",
657 &net_ip, &net_server_ip, pkt_len);
658 return;
659 }
660
661 /* Build pseudo header and verify TCP header */
662 tcp_rx_xsum = b->ip.hdr.tcp_xsum;
663 b->ip.hdr.tcp_xsum = 0;
664 if (tcp_rx_xsum != tcp_set_pseudo_header((uchar *)b, b->ip.hdr.ip_src,
665 b->ip.hdr.ip_dst, tcp_len,
666 pkt_len)) {
667 debug_cond(DEBUG_DEV_PKT,
668 "TCP RX TCP xSum Error (%pI4, %pI4, len=%d)\n",
669 &net_ip, &net_server_ip, tcp_len);
670 return;
671 }
672
673 tcp_hdr_len = GET_TCP_HDR_LEN_IN_BYTES(b->ip.hdr.tcp_hlen);
674 payload_len = tcp_len - tcp_hdr_len;
675
676 if (tcp_hdr_len > TCP_HDR_SIZE)
677 tcp_parse_options((uchar *)b + IP_TCP_HDR_SIZE,
678 tcp_hdr_len - TCP_HDR_SIZE);
679 /*
680 * Incoming sequence and ack numbers are server's view of the numbers.
681 * The app must swap the numbers when responding.
682 */
683 tcp_seq_num = ntohl(b->ip.hdr.tcp_seq);
684 tcp_ack_num = ntohl(b->ip.hdr.tcp_ack);
685
686 /* Packets are not ordered. Send to app as received. */
687 tcp_action = tcp_state_machine(b->ip.hdr.tcp_flags,
688 tcp_seq_num, payload_len);
689
690 tcp_activity_count++;
691 if (tcp_activity_count > TCP_ACTIVITY) {
692 puts("| ");
693 tcp_activity_count = 0;
694 }
695
696 if ((tcp_action & TCP_PUSH) || payload_len > 0) {
697 debug_cond(DEBUG_DEV_PKT,
698 "TCP Notify (action=%x, Seq=%u,Ack=%u,Pay%d)\n",
699 tcp_action, tcp_seq_num, tcp_ack_num, payload_len);
700
701 (*tcp_packet_handler) ((uchar *)b + pkt_len - payload_len, b->ip.hdr.tcp_dst,
702 b->ip.hdr.ip_src, b->ip.hdr.tcp_src, tcp_seq_num,
703 tcp_ack_num, tcp_action, payload_len);
704
705 } else if (tcp_action != TCP_DATA) {
706 debug_cond(DEBUG_DEV_PKT,
707 "TCP Action (action=%x,Seq=%u,Ack=%u,Pay=%d)\n",
708 tcp_action, tcp_ack_num, tcp_ack_edge, payload_len);
709
710 /*
711 * Warning: Incoming Ack & Seq sequence numbers are transposed
712 * here to outgoing Seq & Ack sequence numbers
713 */
714 net_send_tcp_packet(0, ntohs(b->ip.hdr.tcp_src),
715 ntohs(b->ip.hdr.tcp_dst),
716 (tcp_action & (~TCP_PUSH)),
717 tcp_ack_num, tcp_ack_edge);
718 }
719 }