]> git.ipfire.org Git - thirdparty/linux.git/blob - net/netrom/nr_route.c
gpu: host1x: Use SMMU on Tegra124 and Tegra210
[thirdparty/linux.git] / net / netrom / nr_route.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 *
4 * Copyright Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
5 * Copyright Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
6 * Copyright Tomi Manninen OH2BNS (oh2bns@sral.fi)
7 */
8 #include <linux/errno.h>
9 #include <linux/types.h>
10 #include <linux/socket.h>
11 #include <linux/in.h>
12 #include <linux/kernel.h>
13 #include <linux/timer.h>
14 #include <linux/string.h>
15 #include <linux/sockios.h>
16 #include <linux/net.h>
17 #include <linux/slab.h>
18 #include <net/ax25.h>
19 #include <linux/inet.h>
20 #include <linux/netdevice.h>
21 #include <net/arp.h>
22 #include <linux/if_arp.h>
23 #include <linux/skbuff.h>
24 #include <net/sock.h>
25 #include <linux/uaccess.h>
26 #include <linux/fcntl.h>
27 #include <linux/termios.h> /* For TIOCINQ/OUTQ */
28 #include <linux/mm.h>
29 #include <linux/interrupt.h>
30 #include <linux/notifier.h>
31 #include <linux/init.h>
32 #include <linux/spinlock.h>
33 #include <net/netrom.h>
34 #include <linux/seq_file.h>
35 #include <linux/export.h>
36
37 static unsigned int nr_neigh_no = 1;
38
39 static HLIST_HEAD(nr_node_list);
40 static DEFINE_SPINLOCK(nr_node_list_lock);
41 static HLIST_HEAD(nr_neigh_list);
42 static DEFINE_SPINLOCK(nr_neigh_list_lock);
43
44 static struct nr_node *nr_node_get(ax25_address *callsign)
45 {
46 struct nr_node *found = NULL;
47 struct nr_node *nr_node;
48
49 spin_lock_bh(&nr_node_list_lock);
50 nr_node_for_each(nr_node, &nr_node_list)
51 if (ax25cmp(callsign, &nr_node->callsign) == 0) {
52 nr_node_hold(nr_node);
53 found = nr_node;
54 break;
55 }
56 spin_unlock_bh(&nr_node_list_lock);
57 return found;
58 }
59
60 static struct nr_neigh *nr_neigh_get_dev(ax25_address *callsign,
61 struct net_device *dev)
62 {
63 struct nr_neigh *found = NULL;
64 struct nr_neigh *nr_neigh;
65
66 spin_lock_bh(&nr_neigh_list_lock);
67 nr_neigh_for_each(nr_neigh, &nr_neigh_list)
68 if (ax25cmp(callsign, &nr_neigh->callsign) == 0 &&
69 nr_neigh->dev == dev) {
70 nr_neigh_hold(nr_neigh);
71 found = nr_neigh;
72 break;
73 }
74 spin_unlock_bh(&nr_neigh_list_lock);
75 return found;
76 }
77
78 static void nr_remove_neigh(struct nr_neigh *);
79
80 /* re-sort the routes in quality order. */
81 static void re_sort_routes(struct nr_node *nr_node, int x, int y)
82 {
83 if (nr_node->routes[y].quality > nr_node->routes[x].quality) {
84 if (nr_node->which == x)
85 nr_node->which = y;
86 else if (nr_node->which == y)
87 nr_node->which = x;
88
89 swap(nr_node->routes[x], nr_node->routes[y]);
90 }
91 }
92
93 /*
94 * Add a new route to a node, and in the process add the node and the
95 * neighbour if it is new.
96 */
97 static int __must_check nr_add_node(ax25_address *nr, const char *mnemonic,
98 ax25_address *ax25, ax25_digi *ax25_digi, struct net_device *dev,
99 int quality, int obs_count)
100 {
101 struct nr_node *nr_node;
102 struct nr_neigh *nr_neigh;
103 int i, found;
104 struct net_device *odev;
105
106 if ((odev=nr_dev_get(nr)) != NULL) { /* Can't add routes to ourself */
107 dev_put(odev);
108 return -EINVAL;
109 }
110
111 nr_node = nr_node_get(nr);
112
113 nr_neigh = nr_neigh_get_dev(ax25, dev);
114
115 /*
116 * The L2 link to a neighbour has failed in the past
117 * and now a frame comes from this neighbour. We assume
118 * it was a temporary trouble with the link and reset the
119 * routes now (and not wait for a node broadcast).
120 */
121 if (nr_neigh != NULL && nr_neigh->failed != 0 && quality == 0) {
122 struct nr_node *nr_nodet;
123
124 spin_lock_bh(&nr_node_list_lock);
125 nr_node_for_each(nr_nodet, &nr_node_list) {
126 nr_node_lock(nr_nodet);
127 for (i = 0; i < nr_nodet->count; i++)
128 if (nr_nodet->routes[i].neighbour == nr_neigh)
129 if (i < nr_nodet->which)
130 nr_nodet->which = i;
131 nr_node_unlock(nr_nodet);
132 }
133 spin_unlock_bh(&nr_node_list_lock);
134 }
135
136 if (nr_neigh != NULL)
137 nr_neigh->failed = 0;
138
139 if (quality == 0 && nr_neigh != NULL && nr_node != NULL) {
140 nr_neigh_put(nr_neigh);
141 nr_node_put(nr_node);
142 return 0;
143 }
144
145 if (nr_neigh == NULL) {
146 if ((nr_neigh = kmalloc(sizeof(*nr_neigh), GFP_ATOMIC)) == NULL) {
147 if (nr_node)
148 nr_node_put(nr_node);
149 return -ENOMEM;
150 }
151
152 nr_neigh->callsign = *ax25;
153 nr_neigh->digipeat = NULL;
154 nr_neigh->ax25 = NULL;
155 nr_neigh->dev = dev;
156 nr_neigh->quality = sysctl_netrom_default_path_quality;
157 nr_neigh->locked = 0;
158 nr_neigh->count = 0;
159 nr_neigh->number = nr_neigh_no++;
160 nr_neigh->failed = 0;
161 refcount_set(&nr_neigh->refcount, 1);
162
163 if (ax25_digi != NULL && ax25_digi->ndigi > 0) {
164 nr_neigh->digipeat = kmemdup(ax25_digi,
165 sizeof(*ax25_digi),
166 GFP_KERNEL);
167 if (nr_neigh->digipeat == NULL) {
168 kfree(nr_neigh);
169 if (nr_node)
170 nr_node_put(nr_node);
171 return -ENOMEM;
172 }
173 }
174
175 spin_lock_bh(&nr_neigh_list_lock);
176 hlist_add_head(&nr_neigh->neigh_node, &nr_neigh_list);
177 nr_neigh_hold(nr_neigh);
178 spin_unlock_bh(&nr_neigh_list_lock);
179 }
180
181 if (quality != 0 && ax25cmp(nr, ax25) == 0 && !nr_neigh->locked)
182 nr_neigh->quality = quality;
183
184 if (nr_node == NULL) {
185 if ((nr_node = kmalloc(sizeof(*nr_node), GFP_ATOMIC)) == NULL) {
186 if (nr_neigh)
187 nr_neigh_put(nr_neigh);
188 return -ENOMEM;
189 }
190
191 nr_node->callsign = *nr;
192 strcpy(nr_node->mnemonic, mnemonic);
193
194 nr_node->which = 0;
195 nr_node->count = 1;
196 refcount_set(&nr_node->refcount, 1);
197 spin_lock_init(&nr_node->node_lock);
198
199 nr_node->routes[0].quality = quality;
200 nr_node->routes[0].obs_count = obs_count;
201 nr_node->routes[0].neighbour = nr_neigh;
202
203 nr_neigh_hold(nr_neigh);
204 nr_neigh->count++;
205
206 spin_lock_bh(&nr_node_list_lock);
207 hlist_add_head(&nr_node->node_node, &nr_node_list);
208 /* refcount initialized at 1 */
209 spin_unlock_bh(&nr_node_list_lock);
210
211 return 0;
212 }
213 nr_node_lock(nr_node);
214
215 if (quality != 0)
216 strcpy(nr_node->mnemonic, mnemonic);
217
218 for (found = 0, i = 0; i < nr_node->count; i++) {
219 if (nr_node->routes[i].neighbour == nr_neigh) {
220 nr_node->routes[i].quality = quality;
221 nr_node->routes[i].obs_count = obs_count;
222 found = 1;
223 break;
224 }
225 }
226
227 if (!found) {
228 /* We have space at the bottom, slot it in */
229 if (nr_node->count < 3) {
230 nr_node->routes[2] = nr_node->routes[1];
231 nr_node->routes[1] = nr_node->routes[0];
232
233 nr_node->routes[0].quality = quality;
234 nr_node->routes[0].obs_count = obs_count;
235 nr_node->routes[0].neighbour = nr_neigh;
236
237 nr_node->which++;
238 nr_node->count++;
239 nr_neigh_hold(nr_neigh);
240 nr_neigh->count++;
241 } else {
242 /* It must be better than the worst */
243 if (quality > nr_node->routes[2].quality) {
244 nr_node->routes[2].neighbour->count--;
245 nr_neigh_put(nr_node->routes[2].neighbour);
246
247 if (nr_node->routes[2].neighbour->count == 0 && !nr_node->routes[2].neighbour->locked)
248 nr_remove_neigh(nr_node->routes[2].neighbour);
249
250 nr_node->routes[2].quality = quality;
251 nr_node->routes[2].obs_count = obs_count;
252 nr_node->routes[2].neighbour = nr_neigh;
253
254 nr_neigh_hold(nr_neigh);
255 nr_neigh->count++;
256 }
257 }
258 }
259
260 /* Now re-sort the routes in quality order */
261 switch (nr_node->count) {
262 case 3:
263 re_sort_routes(nr_node, 0, 1);
264 re_sort_routes(nr_node, 1, 2);
265 /* fall through */
266 case 2:
267 re_sort_routes(nr_node, 0, 1);
268 case 1:
269 break;
270 }
271
272 for (i = 0; i < nr_node->count; i++) {
273 if (nr_node->routes[i].neighbour == nr_neigh) {
274 if (i < nr_node->which)
275 nr_node->which = i;
276 break;
277 }
278 }
279
280 nr_neigh_put(nr_neigh);
281 nr_node_unlock(nr_node);
282 nr_node_put(nr_node);
283 return 0;
284 }
285
286 static inline void __nr_remove_node(struct nr_node *nr_node)
287 {
288 hlist_del_init(&nr_node->node_node);
289 nr_node_put(nr_node);
290 }
291
292 #define nr_remove_node_locked(__node) \
293 __nr_remove_node(__node)
294
295 static void nr_remove_node(struct nr_node *nr_node)
296 {
297 spin_lock_bh(&nr_node_list_lock);
298 __nr_remove_node(nr_node);
299 spin_unlock_bh(&nr_node_list_lock);
300 }
301
302 static inline void __nr_remove_neigh(struct nr_neigh *nr_neigh)
303 {
304 hlist_del_init(&nr_neigh->neigh_node);
305 nr_neigh_put(nr_neigh);
306 }
307
308 #define nr_remove_neigh_locked(__neigh) \
309 __nr_remove_neigh(__neigh)
310
311 static void nr_remove_neigh(struct nr_neigh *nr_neigh)
312 {
313 spin_lock_bh(&nr_neigh_list_lock);
314 __nr_remove_neigh(nr_neigh);
315 spin_unlock_bh(&nr_neigh_list_lock);
316 }
317
318 /*
319 * "Delete" a node. Strictly speaking remove a route to a node. The node
320 * is only deleted if no routes are left to it.
321 */
322 static int nr_del_node(ax25_address *callsign, ax25_address *neighbour, struct net_device *dev)
323 {
324 struct nr_node *nr_node;
325 struct nr_neigh *nr_neigh;
326 int i;
327
328 nr_node = nr_node_get(callsign);
329
330 if (nr_node == NULL)
331 return -EINVAL;
332
333 nr_neigh = nr_neigh_get_dev(neighbour, dev);
334
335 if (nr_neigh == NULL) {
336 nr_node_put(nr_node);
337 return -EINVAL;
338 }
339
340 nr_node_lock(nr_node);
341 for (i = 0; i < nr_node->count; i++) {
342 if (nr_node->routes[i].neighbour == nr_neigh) {
343 nr_neigh->count--;
344 nr_neigh_put(nr_neigh);
345
346 if (nr_neigh->count == 0 && !nr_neigh->locked)
347 nr_remove_neigh(nr_neigh);
348 nr_neigh_put(nr_neigh);
349
350 nr_node->count--;
351
352 if (nr_node->count == 0) {
353 nr_remove_node(nr_node);
354 } else {
355 switch (i) {
356 case 0:
357 nr_node->routes[0] = nr_node->routes[1];
358 /* fall through */
359 case 1:
360 nr_node->routes[1] = nr_node->routes[2];
361 case 2:
362 break;
363 }
364 nr_node_put(nr_node);
365 }
366 nr_node_unlock(nr_node);
367
368 return 0;
369 }
370 }
371 nr_neigh_put(nr_neigh);
372 nr_node_unlock(nr_node);
373 nr_node_put(nr_node);
374
375 return -EINVAL;
376 }
377
378 /*
379 * Lock a neighbour with a quality.
380 */
381 static int __must_check nr_add_neigh(ax25_address *callsign,
382 ax25_digi *ax25_digi, struct net_device *dev, unsigned int quality)
383 {
384 struct nr_neigh *nr_neigh;
385
386 nr_neigh = nr_neigh_get_dev(callsign, dev);
387 if (nr_neigh) {
388 nr_neigh->quality = quality;
389 nr_neigh->locked = 1;
390 nr_neigh_put(nr_neigh);
391 return 0;
392 }
393
394 if ((nr_neigh = kmalloc(sizeof(*nr_neigh), GFP_ATOMIC)) == NULL)
395 return -ENOMEM;
396
397 nr_neigh->callsign = *callsign;
398 nr_neigh->digipeat = NULL;
399 nr_neigh->ax25 = NULL;
400 nr_neigh->dev = dev;
401 nr_neigh->quality = quality;
402 nr_neigh->locked = 1;
403 nr_neigh->count = 0;
404 nr_neigh->number = nr_neigh_no++;
405 nr_neigh->failed = 0;
406 refcount_set(&nr_neigh->refcount, 1);
407
408 if (ax25_digi != NULL && ax25_digi->ndigi > 0) {
409 nr_neigh->digipeat = kmemdup(ax25_digi, sizeof(*ax25_digi),
410 GFP_KERNEL);
411 if (nr_neigh->digipeat == NULL) {
412 kfree(nr_neigh);
413 return -ENOMEM;
414 }
415 }
416
417 spin_lock_bh(&nr_neigh_list_lock);
418 hlist_add_head(&nr_neigh->neigh_node, &nr_neigh_list);
419 /* refcount is initialized at 1 */
420 spin_unlock_bh(&nr_neigh_list_lock);
421
422 return 0;
423 }
424
425 /*
426 * "Delete" a neighbour. The neighbour is only removed if the number
427 * of nodes that may use it is zero.
428 */
429 static int nr_del_neigh(ax25_address *callsign, struct net_device *dev, unsigned int quality)
430 {
431 struct nr_neigh *nr_neigh;
432
433 nr_neigh = nr_neigh_get_dev(callsign, dev);
434
435 if (nr_neigh == NULL) return -EINVAL;
436
437 nr_neigh->quality = quality;
438 nr_neigh->locked = 0;
439
440 if (nr_neigh->count == 0)
441 nr_remove_neigh(nr_neigh);
442 nr_neigh_put(nr_neigh);
443
444 return 0;
445 }
446
447 /*
448 * Decrement the obsolescence count by one. If a route is reduced to a
449 * count of zero, remove it. Also remove any unlocked neighbours with
450 * zero nodes routing via it.
451 */
452 static int nr_dec_obs(void)
453 {
454 struct nr_neigh *nr_neigh;
455 struct nr_node *s;
456 struct hlist_node *nodet;
457 int i;
458
459 spin_lock_bh(&nr_node_list_lock);
460 nr_node_for_each_safe(s, nodet, &nr_node_list) {
461 nr_node_lock(s);
462 for (i = 0; i < s->count; i++) {
463 switch (s->routes[i].obs_count) {
464 case 0: /* A locked entry */
465 break;
466
467 case 1: /* From 1 -> 0 */
468 nr_neigh = s->routes[i].neighbour;
469
470 nr_neigh->count--;
471 nr_neigh_put(nr_neigh);
472
473 if (nr_neigh->count == 0 && !nr_neigh->locked)
474 nr_remove_neigh(nr_neigh);
475
476 s->count--;
477
478 switch (i) {
479 case 0:
480 s->routes[0] = s->routes[1];
481 /* Fallthrough */
482 case 1:
483 s->routes[1] = s->routes[2];
484 case 2:
485 break;
486 }
487 break;
488
489 default:
490 s->routes[i].obs_count--;
491 break;
492
493 }
494 }
495
496 if (s->count <= 0)
497 nr_remove_node_locked(s);
498 nr_node_unlock(s);
499 }
500 spin_unlock_bh(&nr_node_list_lock);
501
502 return 0;
503 }
504
505 /*
506 * A device has been removed. Remove its routes and neighbours.
507 */
508 void nr_rt_device_down(struct net_device *dev)
509 {
510 struct nr_neigh *s;
511 struct hlist_node *nodet, *node2t;
512 struct nr_node *t;
513 int i;
514
515 spin_lock_bh(&nr_neigh_list_lock);
516 nr_neigh_for_each_safe(s, nodet, &nr_neigh_list) {
517 if (s->dev == dev) {
518 spin_lock_bh(&nr_node_list_lock);
519 nr_node_for_each_safe(t, node2t, &nr_node_list) {
520 nr_node_lock(t);
521 for (i = 0; i < t->count; i++) {
522 if (t->routes[i].neighbour == s) {
523 t->count--;
524
525 switch (i) {
526 case 0:
527 t->routes[0] = t->routes[1];
528 /* fall through */
529 case 1:
530 t->routes[1] = t->routes[2];
531 case 2:
532 break;
533 }
534 }
535 }
536
537 if (t->count <= 0)
538 nr_remove_node_locked(t);
539 nr_node_unlock(t);
540 }
541 spin_unlock_bh(&nr_node_list_lock);
542
543 nr_remove_neigh_locked(s);
544 }
545 }
546 spin_unlock_bh(&nr_neigh_list_lock);
547 }
548
549 /*
550 * Check that the device given is a valid AX.25 interface that is "up".
551 * Or a valid ethernet interface with an AX.25 callsign binding.
552 */
553 static struct net_device *nr_ax25_dev_get(char *devname)
554 {
555 struct net_device *dev;
556
557 if ((dev = dev_get_by_name(&init_net, devname)) == NULL)
558 return NULL;
559
560 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_AX25)
561 return dev;
562
563 dev_put(dev);
564 return NULL;
565 }
566
567 /*
568 * Find the first active NET/ROM device, usually "nr0".
569 */
570 struct net_device *nr_dev_first(void)
571 {
572 struct net_device *dev, *first = NULL;
573
574 rcu_read_lock();
575 for_each_netdev_rcu(&init_net, dev) {
576 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM)
577 if (first == NULL || strncmp(dev->name, first->name, 3) < 0)
578 first = dev;
579 }
580 if (first)
581 dev_hold(first);
582 rcu_read_unlock();
583
584 return first;
585 }
586
587 /*
588 * Find the NET/ROM device for the given callsign.
589 */
590 struct net_device *nr_dev_get(ax25_address *addr)
591 {
592 struct net_device *dev;
593
594 rcu_read_lock();
595 for_each_netdev_rcu(&init_net, dev) {
596 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM &&
597 ax25cmp(addr, (ax25_address *)dev->dev_addr) == 0) {
598 dev_hold(dev);
599 goto out;
600 }
601 }
602 dev = NULL;
603 out:
604 rcu_read_unlock();
605 return dev;
606 }
607
608 static ax25_digi *nr_call_to_digi(ax25_digi *digi, int ndigis,
609 ax25_address *digipeaters)
610 {
611 int i;
612
613 if (ndigis == 0)
614 return NULL;
615
616 for (i = 0; i < ndigis; i++) {
617 digi->calls[i] = digipeaters[i];
618 digi->repeated[i] = 0;
619 }
620
621 digi->ndigi = ndigis;
622 digi->lastrepeat = -1;
623
624 return digi;
625 }
626
627 /*
628 * Handle the ioctls that control the routing functions.
629 */
630 int nr_rt_ioctl(unsigned int cmd, void __user *arg)
631 {
632 struct nr_route_struct nr_route;
633 struct net_device *dev;
634 ax25_digi digi;
635 int ret;
636
637 switch (cmd) {
638 case SIOCADDRT:
639 if (copy_from_user(&nr_route, arg, sizeof(struct nr_route_struct)))
640 return -EFAULT;
641 if (nr_route.ndigis > AX25_MAX_DIGIS)
642 return -EINVAL;
643 if ((dev = nr_ax25_dev_get(nr_route.device)) == NULL)
644 return -EINVAL;
645 switch (nr_route.type) {
646 case NETROM_NODE:
647 if (strnlen(nr_route.mnemonic, 7) == 7) {
648 ret = -EINVAL;
649 break;
650 }
651
652 ret = nr_add_node(&nr_route.callsign,
653 nr_route.mnemonic,
654 &nr_route.neighbour,
655 nr_call_to_digi(&digi, nr_route.ndigis,
656 nr_route.digipeaters),
657 dev, nr_route.quality,
658 nr_route.obs_count);
659 break;
660 case NETROM_NEIGH:
661 ret = nr_add_neigh(&nr_route.callsign,
662 nr_call_to_digi(&digi, nr_route.ndigis,
663 nr_route.digipeaters),
664 dev, nr_route.quality);
665 break;
666 default:
667 ret = -EINVAL;
668 }
669 dev_put(dev);
670 return ret;
671
672 case SIOCDELRT:
673 if (copy_from_user(&nr_route, arg, sizeof(struct nr_route_struct)))
674 return -EFAULT;
675 if ((dev = nr_ax25_dev_get(nr_route.device)) == NULL)
676 return -EINVAL;
677 switch (nr_route.type) {
678 case NETROM_NODE:
679 ret = nr_del_node(&nr_route.callsign,
680 &nr_route.neighbour, dev);
681 break;
682 case NETROM_NEIGH:
683 ret = nr_del_neigh(&nr_route.callsign,
684 dev, nr_route.quality);
685 break;
686 default:
687 ret = -EINVAL;
688 }
689 dev_put(dev);
690 return ret;
691
692 case SIOCNRDECOBS:
693 return nr_dec_obs();
694
695 default:
696 return -EINVAL;
697 }
698
699 return 0;
700 }
701
702 /*
703 * A level 2 link has timed out, therefore it appears to be a poor link,
704 * then don't use that neighbour until it is reset.
705 */
706 void nr_link_failed(ax25_cb *ax25, int reason)
707 {
708 struct nr_neigh *s, *nr_neigh = NULL;
709 struct nr_node *nr_node = NULL;
710
711 spin_lock_bh(&nr_neigh_list_lock);
712 nr_neigh_for_each(s, &nr_neigh_list) {
713 if (s->ax25 == ax25) {
714 nr_neigh_hold(s);
715 nr_neigh = s;
716 break;
717 }
718 }
719 spin_unlock_bh(&nr_neigh_list_lock);
720
721 if (nr_neigh == NULL)
722 return;
723
724 nr_neigh->ax25 = NULL;
725 ax25_cb_put(ax25);
726
727 if (++nr_neigh->failed < sysctl_netrom_link_fails_count) {
728 nr_neigh_put(nr_neigh);
729 return;
730 }
731 spin_lock_bh(&nr_node_list_lock);
732 nr_node_for_each(nr_node, &nr_node_list) {
733 nr_node_lock(nr_node);
734 if (nr_node->which < nr_node->count &&
735 nr_node->routes[nr_node->which].neighbour == nr_neigh)
736 nr_node->which++;
737 nr_node_unlock(nr_node);
738 }
739 spin_unlock_bh(&nr_node_list_lock);
740 nr_neigh_put(nr_neigh);
741 }
742
743 /*
744 * Route a frame to an appropriate AX.25 connection. A NULL ax25_cb
745 * indicates an internally generated frame.
746 */
747 int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25)
748 {
749 ax25_address *nr_src, *nr_dest;
750 struct nr_neigh *nr_neigh;
751 struct nr_node *nr_node;
752 struct net_device *dev;
753 unsigned char *dptr;
754 ax25_cb *ax25s;
755 int ret;
756 struct sk_buff *skbn;
757
758
759 nr_src = (ax25_address *)(skb->data + 0);
760 nr_dest = (ax25_address *)(skb->data + 7);
761
762 if (ax25 != NULL) {
763 ret = nr_add_node(nr_src, "", &ax25->dest_addr, ax25->digipeat,
764 ax25->ax25_dev->dev, 0,
765 sysctl_netrom_obsolescence_count_initialiser);
766 if (ret)
767 return ret;
768 }
769
770 if ((dev = nr_dev_get(nr_dest)) != NULL) { /* Its for me */
771 if (ax25 == NULL) /* Its from me */
772 ret = nr_loopback_queue(skb);
773 else
774 ret = nr_rx_frame(skb, dev);
775 dev_put(dev);
776 return ret;
777 }
778
779 if (!sysctl_netrom_routing_control && ax25 != NULL)
780 return 0;
781
782 /* Its Time-To-Live has expired */
783 if (skb->data[14] == 1) {
784 return 0;
785 }
786
787 nr_node = nr_node_get(nr_dest);
788 if (nr_node == NULL)
789 return 0;
790 nr_node_lock(nr_node);
791
792 if (nr_node->which >= nr_node->count) {
793 nr_node_unlock(nr_node);
794 nr_node_put(nr_node);
795 return 0;
796 }
797
798 nr_neigh = nr_node->routes[nr_node->which].neighbour;
799
800 if ((dev = nr_dev_first()) == NULL) {
801 nr_node_unlock(nr_node);
802 nr_node_put(nr_node);
803 return 0;
804 }
805
806 /* We are going to change the netrom headers so we should get our
807 own skb, we also did not know until now how much header space
808 we had to reserve... - RXQ */
809 if ((skbn=skb_copy_expand(skb, dev->hard_header_len, 0, GFP_ATOMIC)) == NULL) {
810 nr_node_unlock(nr_node);
811 nr_node_put(nr_node);
812 dev_put(dev);
813 return 0;
814 }
815 kfree_skb(skb);
816 skb=skbn;
817 skb->data[14]--;
818
819 dptr = skb_push(skb, 1);
820 *dptr = AX25_P_NETROM;
821
822 ax25s = nr_neigh->ax25;
823 nr_neigh->ax25 = ax25_send_frame(skb, 256,
824 (ax25_address *)dev->dev_addr,
825 &nr_neigh->callsign,
826 nr_neigh->digipeat, nr_neigh->dev);
827 if (ax25s)
828 ax25_cb_put(ax25s);
829
830 dev_put(dev);
831 ret = (nr_neigh->ax25 != NULL);
832 nr_node_unlock(nr_node);
833 nr_node_put(nr_node);
834
835 return ret;
836 }
837
838 #ifdef CONFIG_PROC_FS
839
840 static void *nr_node_start(struct seq_file *seq, loff_t *pos)
841 __acquires(&nr_node_list_lock)
842 {
843 spin_lock_bh(&nr_node_list_lock);
844 return seq_hlist_start_head(&nr_node_list, *pos);
845 }
846
847 static void *nr_node_next(struct seq_file *seq, void *v, loff_t *pos)
848 {
849 return seq_hlist_next(v, &nr_node_list, pos);
850 }
851
852 static void nr_node_stop(struct seq_file *seq, void *v)
853 __releases(&nr_node_list_lock)
854 {
855 spin_unlock_bh(&nr_node_list_lock);
856 }
857
858 static int nr_node_show(struct seq_file *seq, void *v)
859 {
860 char buf[11];
861 int i;
862
863 if (v == SEQ_START_TOKEN)
864 seq_puts(seq,
865 "callsign mnemonic w n qual obs neigh qual obs neigh qual obs neigh\n");
866 else {
867 struct nr_node *nr_node = hlist_entry(v, struct nr_node,
868 node_node);
869
870 nr_node_lock(nr_node);
871 seq_printf(seq, "%-9s %-7s %d %d",
872 ax2asc(buf, &nr_node->callsign),
873 (nr_node->mnemonic[0] == '\0') ? "*" : nr_node->mnemonic,
874 nr_node->which + 1,
875 nr_node->count);
876
877 for (i = 0; i < nr_node->count; i++) {
878 seq_printf(seq, " %3d %d %05d",
879 nr_node->routes[i].quality,
880 nr_node->routes[i].obs_count,
881 nr_node->routes[i].neighbour->number);
882 }
883 nr_node_unlock(nr_node);
884
885 seq_puts(seq, "\n");
886 }
887 return 0;
888 }
889
890 const struct seq_operations nr_node_seqops = {
891 .start = nr_node_start,
892 .next = nr_node_next,
893 .stop = nr_node_stop,
894 .show = nr_node_show,
895 };
896
897 static void *nr_neigh_start(struct seq_file *seq, loff_t *pos)
898 __acquires(&nr_neigh_list_lock)
899 {
900 spin_lock_bh(&nr_neigh_list_lock);
901 return seq_hlist_start_head(&nr_neigh_list, *pos);
902 }
903
904 static void *nr_neigh_next(struct seq_file *seq, void *v, loff_t *pos)
905 {
906 return seq_hlist_next(v, &nr_neigh_list, pos);
907 }
908
909 static void nr_neigh_stop(struct seq_file *seq, void *v)
910 __releases(&nr_neigh_list_lock)
911 {
912 spin_unlock_bh(&nr_neigh_list_lock);
913 }
914
915 static int nr_neigh_show(struct seq_file *seq, void *v)
916 {
917 char buf[11];
918 int i;
919
920 if (v == SEQ_START_TOKEN)
921 seq_puts(seq, "addr callsign dev qual lock count failed digipeaters\n");
922 else {
923 struct nr_neigh *nr_neigh;
924
925 nr_neigh = hlist_entry(v, struct nr_neigh, neigh_node);
926 seq_printf(seq, "%05d %-9s %-4s %3d %d %3d %3d",
927 nr_neigh->number,
928 ax2asc(buf, &nr_neigh->callsign),
929 nr_neigh->dev ? nr_neigh->dev->name : "???",
930 nr_neigh->quality,
931 nr_neigh->locked,
932 nr_neigh->count,
933 nr_neigh->failed);
934
935 if (nr_neigh->digipeat != NULL) {
936 for (i = 0; i < nr_neigh->digipeat->ndigi; i++)
937 seq_printf(seq, " %s",
938 ax2asc(buf, &nr_neigh->digipeat->calls[i]));
939 }
940
941 seq_puts(seq, "\n");
942 }
943 return 0;
944 }
945
946 const struct seq_operations nr_neigh_seqops = {
947 .start = nr_neigh_start,
948 .next = nr_neigh_next,
949 .stop = nr_neigh_stop,
950 .show = nr_neigh_show,
951 };
952 #endif
953
954 /*
955 * Free all memory associated with the nodes and routes lists.
956 */
957 void nr_rt_free(void)
958 {
959 struct nr_neigh *s = NULL;
960 struct nr_node *t = NULL;
961 struct hlist_node *nodet;
962
963 spin_lock_bh(&nr_neigh_list_lock);
964 spin_lock_bh(&nr_node_list_lock);
965 nr_node_for_each_safe(t, nodet, &nr_node_list) {
966 nr_node_lock(t);
967 nr_remove_node_locked(t);
968 nr_node_unlock(t);
969 }
970 nr_neigh_for_each_safe(s, nodet, &nr_neigh_list) {
971 while(s->count) {
972 s->count--;
973 nr_neigh_put(s);
974 }
975 nr_remove_neigh_locked(s);
976 }
977 spin_unlock_bh(&nr_node_list_lock);
978 spin_unlock_bh(&nr_neigh_list_lock);
979 }