]> git.ipfire.org Git - people/arne_f/kernel.git/blob - net/ax25/ax25_iface.c
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[people/arne_f/kernel.git] / net / ax25 / ax25_iface.c
1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
8 */
9 #include <linux/errno.h>
10 #include <linux/types.h>
11 #include <linux/socket.h>
12 #include <linux/in.h>
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/spinlock.h>
16 #include <linux/timer.h>
17 #include <linux/string.h>
18 #include <linux/sockios.h>
19 #include <linux/net.h>
20 #include <linux/slab.h>
21 #include <net/ax25.h>
22 #include <linux/inet.h>
23 #include <linux/netdevice.h>
24 #include <linux/skbuff.h>
25 #include <net/sock.h>
26 #include <asm/uaccess.h>
27 #include <asm/system.h>
28 #include <linux/fcntl.h>
29 #include <linux/mm.h>
30 #include <linux/interrupt.h>
31
32 static struct ax25_protocol *protocol_list;
33 static DEFINE_RWLOCK(protocol_list_lock);
34
35 static HLIST_HEAD(ax25_linkfail_list);
36 static DEFINE_SPINLOCK(linkfail_lock);
37
38 static struct listen_struct {
39 struct listen_struct *next;
40 ax25_address callsign;
41 struct net_device *dev;
42 } *listen_list = NULL;
43 static DEFINE_SPINLOCK(listen_lock);
44
45 /*
46 * Do not register the internal protocols AX25_P_TEXT, AX25_P_SEGMENT,
47 * AX25_P_IP or AX25_P_ARP ...
48 */
49 void ax25_register_pid(struct ax25_protocol *ap)
50 {
51 write_lock_bh(&protocol_list_lock);
52 ap->next = protocol_list;
53 protocol_list = ap;
54 write_unlock_bh(&protocol_list_lock);
55 }
56
57 EXPORT_SYMBOL_GPL(ax25_register_pid);
58
59 void ax25_protocol_release(unsigned int pid)
60 {
61 struct ax25_protocol *s, *protocol;
62
63 write_lock_bh(&protocol_list_lock);
64 protocol = protocol_list;
65 if (protocol == NULL)
66 goto out;
67
68 if (protocol->pid == pid) {
69 protocol_list = protocol->next;
70 goto out;
71 }
72
73 while (protocol != NULL && protocol->next != NULL) {
74 if (protocol->next->pid == pid) {
75 s = protocol->next;
76 protocol->next = protocol->next->next;
77 goto out;
78 }
79
80 protocol = protocol->next;
81 }
82 out:
83 write_unlock_bh(&protocol_list_lock);
84 }
85
86 EXPORT_SYMBOL(ax25_protocol_release);
87
88 void ax25_linkfail_register(struct ax25_linkfail *lf)
89 {
90 spin_lock_bh(&linkfail_lock);
91 hlist_add_head(&lf->lf_node, &ax25_linkfail_list);
92 spin_unlock_bh(&linkfail_lock);
93 }
94
95 EXPORT_SYMBOL(ax25_linkfail_register);
96
97 void ax25_linkfail_release(struct ax25_linkfail *lf)
98 {
99 spin_lock_bh(&linkfail_lock);
100 hlist_del_init(&lf->lf_node);
101 spin_unlock_bh(&linkfail_lock);
102 }
103
104 EXPORT_SYMBOL(ax25_linkfail_release);
105
106 int ax25_listen_register(ax25_address *callsign, struct net_device *dev)
107 {
108 struct listen_struct *listen;
109
110 if (ax25_listen_mine(callsign, dev))
111 return 0;
112
113 if ((listen = kmalloc(sizeof(*listen), GFP_ATOMIC)) == NULL)
114 return -ENOMEM;
115
116 listen->callsign = *callsign;
117 listen->dev = dev;
118
119 spin_lock_bh(&listen_lock);
120 listen->next = listen_list;
121 listen_list = listen;
122 spin_unlock_bh(&listen_lock);
123
124 return 0;
125 }
126
127 EXPORT_SYMBOL(ax25_listen_register);
128
129 void ax25_listen_release(ax25_address *callsign, struct net_device *dev)
130 {
131 struct listen_struct *s, *listen;
132
133 spin_lock_bh(&listen_lock);
134 listen = listen_list;
135 if (listen == NULL) {
136 spin_unlock_bh(&listen_lock);
137 return;
138 }
139
140 if (ax25cmp(&listen->callsign, callsign) == 0 && listen->dev == dev) {
141 listen_list = listen->next;
142 spin_unlock_bh(&listen_lock);
143 kfree(listen);
144 return;
145 }
146
147 while (listen != NULL && listen->next != NULL) {
148 if (ax25cmp(&listen->next->callsign, callsign) == 0 && listen->next->dev == dev) {
149 s = listen->next;
150 listen->next = listen->next->next;
151 spin_unlock_bh(&listen_lock);
152 kfree(s);
153 return;
154 }
155
156 listen = listen->next;
157 }
158 spin_unlock_bh(&listen_lock);
159 }
160
161 EXPORT_SYMBOL(ax25_listen_release);
162
163 int (*ax25_protocol_function(unsigned int pid))(struct sk_buff *, ax25_cb *)
164 {
165 int (*res)(struct sk_buff *, ax25_cb *) = NULL;
166 struct ax25_protocol *protocol;
167
168 read_lock(&protocol_list_lock);
169 for (protocol = protocol_list; protocol != NULL; protocol = protocol->next)
170 if (protocol->pid == pid) {
171 res = protocol->func;
172 break;
173 }
174 read_unlock(&protocol_list_lock);
175
176 return res;
177 }
178
179 int ax25_listen_mine(ax25_address *callsign, struct net_device *dev)
180 {
181 struct listen_struct *listen;
182
183 spin_lock_bh(&listen_lock);
184 for (listen = listen_list; listen != NULL; listen = listen->next)
185 if (ax25cmp(&listen->callsign, callsign) == 0 &&
186 (listen->dev == dev || listen->dev == NULL)) {
187 spin_unlock_bh(&listen_lock);
188 return 1;
189 }
190 spin_unlock_bh(&listen_lock);
191
192 return 0;
193 }
194
195 void ax25_link_failed(ax25_cb *ax25, int reason)
196 {
197 struct ax25_linkfail *lf;
198 struct hlist_node *node;
199
200 spin_lock_bh(&linkfail_lock);
201 hlist_for_each_entry(lf, node, &ax25_linkfail_list, lf_node)
202 lf->func(ax25, reason);
203 spin_unlock_bh(&linkfail_lock);
204 }
205
206 int ax25_protocol_is_registered(unsigned int pid)
207 {
208 struct ax25_protocol *protocol;
209 int res = 0;
210
211 read_lock_bh(&protocol_list_lock);
212 for (protocol = protocol_list; protocol != NULL; protocol = protocol->next)
213 if (protocol->pid == pid) {
214 res = 1;
215 break;
216 }
217 read_unlock_bh(&protocol_list_lock);
218
219 return res;
220 }