]>
Commit | Line | Data |
---|---|---|
aa5a7aca IPG |
1 | /* |
2 | * Intel Wireless WiMAX Connection 2400m | |
3 | * Handle incoming traffic and deliver it to the control or data planes | |
4 | * | |
5 | * | |
6 | * Copyright (C) 2007-2008 Intel Corporation. All rights reserved. | |
7 | * | |
8 | * Redistribution and use in source and binary forms, with or without | |
9 | * modification, are permitted provided that the following conditions | |
10 | * are met: | |
11 | * | |
12 | * * Redistributions of source code must retain the above copyright | |
13 | * notice, this list of conditions and the following disclaimer. | |
14 | * * Redistributions in binary form must reproduce the above copyright | |
15 | * notice, this list of conditions and the following disclaimer in | |
16 | * the documentation and/or other materials provided with the | |
17 | * distribution. | |
18 | * * Neither the name of Intel Corporation nor the names of its | |
19 | * contributors may be used to endorse or promote products derived | |
20 | * from this software without specific prior written permission. | |
21 | * | |
22 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
23 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
24 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
25 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
26 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
27 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
28 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
29 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
30 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
31 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
32 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
33 | * | |
34 | * | |
35 | * Intel Corporation <linux-wimax@intel.com> | |
36 | * Yanir Lubetkin <yanirx.lubetkin@intel.com> | |
37 | * - Initial implementation | |
38 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | |
39 | * - Use skb_clone(), break up processing in chunks | |
40 | * - Split transport/device specific | |
41 | * - Make buffer size dynamic to exert less memory pressure | |
c747583d | 42 | * - RX reorder support |
aa5a7aca IPG |
43 | * |
44 | * This handles the RX path. | |
45 | * | |
46 | * We receive an RX message from the bus-specific driver, which | |
47 | * contains one or more payloads that have potentially different | |
48 | * destinataries (data or control paths). | |
49 | * | |
50 | * So we just take that payload from the transport specific code in | |
51 | * the form of an skb, break it up in chunks (a cloned skb each in the | |
52 | * case of network packets) and pass it to netdev or to the | |
53 | * command/ack handler (and from there to the WiMAX stack). | |
54 | * | |
55 | * PROTOCOL FORMAT | |
56 | * | |
57 | * The format of the buffer is: | |
58 | * | |
59 | * HEADER (struct i2400m_msg_hdr) | |
60 | * PAYLOAD DESCRIPTOR 0 (struct i2400m_pld) | |
61 | * PAYLOAD DESCRIPTOR 1 | |
62 | * ... | |
63 | * PAYLOAD DESCRIPTOR N | |
64 | * PAYLOAD 0 (raw bytes) | |
65 | * PAYLOAD 1 | |
66 | * ... | |
67 | * PAYLOAD N | |
68 | * | |
69 | * See tx.c for a deeper description on alignment requirements and | |
70 | * other fun facts of it. | |
71 | * | |
fd5c565c IPG |
72 | * DATA PACKETS |
73 | * | |
74 | * In firmwares <= v1.3, data packets have no header for RX, but they | |
75 | * do for TX (currently unused). | |
76 | * | |
77 | * In firmware >= 1.4, RX packets have an extended header (16 | |
78 | * bytes). This header conveys information for management of host | |
79 | * reordering of packets (the device offloads storage of the packets | |
c747583d | 80 | * for reordering to the host). Read below for more information. |
fd5c565c IPG |
81 | * |
82 | * The header is used as dummy space to emulate an ethernet header and | |
83 | * thus be able to act as an ethernet device without having to reallocate. | |
84 | * | |
c747583d IPG |
85 | * DATA RX REORDERING |
86 | * | |
87 | * Starting in firmware v1.4, the device can deliver packets for | |
88 | * delivery with special reordering information; this allows it to | |
89 | * more effectively do packet management when some frames were lost in | |
90 | * the radio traffic. | |
91 | * | |
92 | * Thus, for RX packets that come out of order, the device gives the | |
93 | * driver enough information to queue them properly and then at some | |
94 | * point, the signal to deliver the whole (or part) of the queued | |
95 | * packets to the networking stack. There are 16 such queues. | |
96 | * | |
97 | * This only happens when a packet comes in with the "need reorder" | |
98 | * flag set in the RX header. When such bit is set, the following | |
99 | * operations might be indicated: | |
100 | * | |
101 | * - reset queue: send all queued packets to the OS | |
102 | * | |
103 | * - queue: queue a packet | |
104 | * | |
105 | * - update ws: update the queue's window start and deliver queued | |
106 | * packets that meet the criteria | |
107 | * | |
108 | * - queue & update ws: queue a packet, update the window start and | |
109 | * deliver queued packets that meet the criteria | |
110 | * | |
111 | * (delivery criteria: the packet's [normalized] sequence number is | |
112 | * lower than the new [normalized] window start). | |
113 | * | |
114 | * See the i2400m_roq_*() functions for details. | |
115 | * | |
aa5a7aca IPG |
116 | * ROADMAP |
117 | * | |
118 | * i2400m_rx | |
119 | * i2400m_rx_msg_hdr_check | |
120 | * i2400m_rx_pl_descr_check | |
121 | * i2400m_rx_payload | |
122 | * i2400m_net_rx | |
fd5c565c IPG |
123 | * i2400m_rx_edata |
124 | * i2400m_net_erx | |
c747583d IPG |
125 | * i2400m_roq_reset |
126 | * i2400m_net_erx | |
127 | * i2400m_roq_queue | |
128 | * __i2400m_roq_queue | |
129 | * i2400m_roq_update_ws | |
130 | * __i2400m_roq_update_ws | |
131 | * i2400m_net_erx | |
132 | * i2400m_roq_queue_update_ws | |
133 | * __i2400m_roq_queue | |
134 | * __i2400m_roq_update_ws | |
135 | * i2400m_net_erx | |
aa5a7aca IPG |
136 | * i2400m_rx_ctl |
137 | * i2400m_msg_size_check | |
138 | * i2400m_report_hook_work [in a workqueue] | |
139 | * i2400m_report_hook | |
140 | * wimax_msg_to_user | |
141 | * i2400m_rx_ctl_ack | |
142 | * wimax_msg_to_user_alloc | |
143 | * i2400m_rx_trace | |
144 | * i2400m_msg_size_check | |
145 | * wimax_msg | |
146 | */ | |
5a0e3ad6 | 147 | #include <linux/slab.h> |
aa5a7aca IPG |
148 | #include <linux/kernel.h> |
149 | #include <linux/if_arp.h> | |
150 | #include <linux/netdevice.h> | |
151 | #include <linux/workqueue.h> | |
152 | #include "i2400m.h" | |
153 | ||
154 | ||
155 | #define D_SUBMODULE rx | |
156 | #include "debug-levels.h" | |
157 | ||
9d7fdf1b PP |
158 | static int i2400m_rx_reorder_disabled; /* 0 (rx reorder enabled) by default */ |
159 | module_param_named(rx_reorder_disabled, i2400m_rx_reorder_disabled, int, 0644); | |
160 | MODULE_PARM_DESC(rx_reorder_disabled, | |
161 | "If true, RX reordering will be disabled."); | |
162 | ||
aa5a7aca IPG |
163 | struct i2400m_report_hook_args { |
164 | struct sk_buff *skb_rx; | |
165 | const struct i2400m_l3l4_hdr *l3l4_hdr; | |
166 | size_t size; | |
a0beba21 | 167 | struct list_head list_node; |
aa5a7aca IPG |
168 | }; |
169 | ||
170 | ||
171 | /* | |
172 | * Execute i2400m_report_hook in a workqueue | |
173 | * | |
a0beba21 IPG |
174 | * Goes over the list of queued reports in i2400m->rx_reports and |
175 | * processes them. | |
aa5a7aca | 176 | * |
a0beba21 IPG |
177 | * NOTE: refcounts on i2400m are not needed because we flush the |
178 | * workqueue this runs on (i2400m->work_queue) before destroying | |
179 | * i2400m. | |
aa5a7aca | 180 | */ |
aa5a7aca IPG |
181 | void i2400m_report_hook_work(struct work_struct *ws) |
182 | { | |
a0beba21 IPG |
183 | struct i2400m *i2400m = container_of(ws, struct i2400m, rx_report_ws); |
184 | struct device *dev = i2400m_dev(i2400m); | |
185 | struct i2400m_report_hook_args *args, *args_next; | |
186 | LIST_HEAD(list); | |
187 | unsigned long flags; | |
188 | ||
189 | while (1) { | |
190 | spin_lock_irqsave(&i2400m->rx_lock, flags); | |
191 | list_splice_init(&i2400m->rx_reports, &list); | |
192 | spin_unlock_irqrestore(&i2400m->rx_lock, flags); | |
193 | if (list_empty(&list)) | |
194 | break; | |
195 | else | |
196 | d_printf(1, dev, "processing queued reports\n"); | |
197 | list_for_each_entry_safe(args, args_next, &list, list_node) { | |
198 | d_printf(2, dev, "processing queued report %p\n", args); | |
199 | i2400m_report_hook(i2400m, args->l3l4_hdr, args->size); | |
200 | kfree_skb(args->skb_rx); | |
201 | list_del(&args->list_node); | |
202 | kfree(args); | |
203 | } | |
204 | } | |
205 | } | |
206 | ||
207 | ||
208 | /* | |
209 | * Flush the list of queued reports | |
210 | */ | |
211 | static | |
212 | void i2400m_report_hook_flush(struct i2400m *i2400m) | |
213 | { | |
214 | struct device *dev = i2400m_dev(i2400m); | |
215 | struct i2400m_report_hook_args *args, *args_next; | |
216 | LIST_HEAD(list); | |
217 | unsigned long flags; | |
218 | ||
219 | d_printf(1, dev, "flushing queued reports\n"); | |
220 | spin_lock_irqsave(&i2400m->rx_lock, flags); | |
221 | list_splice_init(&i2400m->rx_reports, &list); | |
222 | spin_unlock_irqrestore(&i2400m->rx_lock, flags); | |
223 | list_for_each_entry_safe(args, args_next, &list, list_node) { | |
224 | d_printf(2, dev, "flushing queued report %p\n", args); | |
225 | kfree_skb(args->skb_rx); | |
226 | list_del(&args->list_node); | |
227 | kfree(args); | |
228 | } | |
229 | } | |
230 | ||
231 | ||
232 | /* | |
233 | * Queue a report for later processing | |
234 | * | |
235 | * @i2400m: device descriptor | |
236 | * @skb_rx: skb that contains the payload (for reference counting) | |
237 | * @l3l4_hdr: pointer to the control | |
238 | * @size: size of the message | |
239 | */ | |
240 | static | |
241 | void i2400m_report_hook_queue(struct i2400m *i2400m, struct sk_buff *skb_rx, | |
242 | const void *l3l4_hdr, size_t size) | |
243 | { | |
244 | struct device *dev = i2400m_dev(i2400m); | |
245 | unsigned long flags; | |
246 | struct i2400m_report_hook_args *args; | |
247 | ||
248 | args = kzalloc(sizeof(*args), GFP_NOIO); | |
249 | if (args) { | |
250 | args->skb_rx = skb_get(skb_rx); | |
251 | args->l3l4_hdr = l3l4_hdr; | |
252 | args->size = size; | |
253 | spin_lock_irqsave(&i2400m->rx_lock, flags); | |
254 | list_add_tail(&args->list_node, &i2400m->rx_reports); | |
255 | spin_unlock_irqrestore(&i2400m->rx_lock, flags); | |
256 | d_printf(2, dev, "queued report %p\n", args); | |
257 | rmb(); /* see i2400m->ready's documentation */ | |
258 | if (likely(i2400m->ready)) /* only send if up */ | |
259 | queue_work(i2400m->work_queue, &i2400m->rx_report_ws); | |
260 | } else { | |
261 | if (printk_ratelimit()) | |
262 | dev_err(dev, "%s:%u: Can't allocate %zu B\n", | |
263 | __func__, __LINE__, sizeof(*args)); | |
264 | } | |
aa5a7aca IPG |
265 | } |
266 | ||
267 | ||
268 | /* | |
269 | * Process an ack to a command | |
270 | * | |
271 | * @i2400m: device descriptor | |
272 | * @payload: pointer to message | |
273 | * @size: size of the message | |
274 | * | |
275 | * Pass the acknodledgment (in an skb) to the thread that is waiting | |
276 | * for it in i2400m->msg_completion. | |
277 | * | |
278 | * We need to coordinate properly with the thread waiting for the | |
279 | * ack. Check if it is waiting or if it is gone. We loose the spinlock | |
280 | * to avoid allocating on atomic contexts (yeah, could use GFP_ATOMIC, | |
281 | * but this is not so speed critical). | |
282 | */ | |
283 | static | |
284 | void i2400m_rx_ctl_ack(struct i2400m *i2400m, | |
285 | const void *payload, size_t size) | |
286 | { | |
287 | struct device *dev = i2400m_dev(i2400m); | |
288 | struct wimax_dev *wimax_dev = &i2400m->wimax_dev; | |
289 | unsigned long flags; | |
290 | struct sk_buff *ack_skb; | |
291 | ||
292 | /* Anyone waiting for an answer? */ | |
293 | spin_lock_irqsave(&i2400m->rx_lock, flags); | |
294 | if (i2400m->ack_skb != ERR_PTR(-EINPROGRESS)) { | |
295 | dev_err(dev, "Huh? reply to command with no waiters\n"); | |
296 | goto error_no_waiter; | |
297 | } | |
298 | spin_unlock_irqrestore(&i2400m->rx_lock, flags); | |
299 | ||
300 | ack_skb = wimax_msg_alloc(wimax_dev, NULL, payload, size, GFP_KERNEL); | |
301 | ||
302 | /* Check waiter didn't time out waiting for the answer... */ | |
303 | spin_lock_irqsave(&i2400m->rx_lock, flags); | |
304 | if (i2400m->ack_skb != ERR_PTR(-EINPROGRESS)) { | |
305 | d_printf(1, dev, "Huh? waiter for command reply cancelled\n"); | |
306 | goto error_waiter_cancelled; | |
307 | } | |
3e02a06a | 308 | if (IS_ERR(ack_skb)) |
aa5a7aca | 309 | dev_err(dev, "CMD/GET/SET ack: cannot allocate SKB\n"); |
3e02a06a | 310 | i2400m->ack_skb = ack_skb; |
aa5a7aca IPG |
311 | spin_unlock_irqrestore(&i2400m->rx_lock, flags); |
312 | complete(&i2400m->msg_completion); | |
313 | return; | |
314 | ||
315 | error_waiter_cancelled: | |
3e02a06a DC |
316 | if (!IS_ERR(ack_skb)) |
317 | kfree_skb(ack_skb); | |
aa5a7aca IPG |
318 | error_no_waiter: |
319 | spin_unlock_irqrestore(&i2400m->rx_lock, flags); | |
aa5a7aca IPG |
320 | } |
321 | ||
322 | ||
323 | /* | |
324 | * Receive and process a control payload | |
325 | * | |
326 | * @i2400m: device descriptor | |
327 | * @skb_rx: skb that contains the payload (for reference counting) | |
328 | * @payload: pointer to message | |
329 | * @size: size of the message | |
330 | * | |
331 | * There are two types of control RX messages: reports (asynchronous, | |
332 | * like your every day interrupts) and 'acks' (reponses to a command, | |
333 | * get or set request). | |
334 | * | |
335 | * If it is a report, we run hooks on it (to extract information for | |
336 | * things we need to do in the driver) and then pass it over to the | |
337 | * WiMAX stack to send it to user space. | |
338 | * | |
339 | * NOTE: report processing is done in a workqueue specific to the | |
340 | * generic driver, to avoid deadlocks in the system. | |
341 | * | |
342 | * If it is not a report, it is an ack to a previously executed | |
343 | * command, set or get, so wake up whoever is waiting for it from | |
344 | * i2400m_msg_to_dev(). i2400m_rx_ctl_ack() takes care of that. | |
345 | * | |
346 | * Note that the sizes we pass to other functions from here are the | |
347 | * sizes of the _l3l4_hdr + payload, not full buffer sizes, as we have | |
348 | * verified in _msg_size_check() that they are congruent. | |
349 | * | |
350 | * For reports: We can't clone the original skb where the data is | |
351 | * because we need to send this up via netlink; netlink has to add | |
25985edc | 352 | * headers and we can't overwrite what's preceding the payload...as |
aa5a7aca IPG |
353 | * it is another message. So we just dup them. |
354 | */ | |
355 | static | |
356 | void i2400m_rx_ctl(struct i2400m *i2400m, struct sk_buff *skb_rx, | |
357 | const void *payload, size_t size) | |
358 | { | |
359 | int result; | |
360 | struct device *dev = i2400m_dev(i2400m); | |
361 | const struct i2400m_l3l4_hdr *l3l4_hdr = payload; | |
362 | unsigned msg_type; | |
363 | ||
364 | result = i2400m_msg_size_check(i2400m, l3l4_hdr, size); | |
365 | if (result < 0) { | |
366 | dev_err(dev, "HW BUG? device sent a bad message: %d\n", | |
367 | result); | |
368 | goto error_check; | |
369 | } | |
370 | msg_type = le16_to_cpu(l3l4_hdr->type); | |
371 | d_printf(1, dev, "%s 0x%04x: %zu bytes\n", | |
372 | msg_type & I2400M_MT_REPORT_MASK ? "REPORT" : "CMD/SET/GET", | |
373 | msg_type, size); | |
374 | d_dump(2, dev, l3l4_hdr, size); | |
375 | if (msg_type & I2400M_MT_REPORT_MASK) { | |
a0beba21 IPG |
376 | /* |
377 | * Process each report | |
378 | * | |
379 | * - has to be ran serialized as well | |
380 | * | |
381 | * - the handling might force the execution of | |
382 | * commands. That might cause reentrancy issues with | |
383 | * bus-specific subdrivers and workqueues, so the we | |
384 | * run it in a separate workqueue. | |
385 | * | |
386 | * - when the driver is not yet ready to handle them, | |
387 | * they are queued and at some point the queue is | |
388 | * restarted [NOTE: we can't queue SKBs directly, as | |
389 | * this might be a piece of a SKB, not the whole | |
390 | * thing, and this is cheaper than cloning the | |
391 | * SKB]. | |
392 | * | |
393 | * Note we don't do refcounting for the device | |
394 | * structure; this is because before destroying | |
395 | * 'i2400m', we make sure to flush the | |
396 | * i2400m->work_queue, so there are no issues. | |
397 | */ | |
398 | i2400m_report_hook_queue(i2400m, skb_rx, l3l4_hdr, size); | |
44b849d1 IPG |
399 | if (unlikely(i2400m->trace_msg_from_user)) |
400 | wimax_msg(&i2400m->wimax_dev, "echo", | |
401 | l3l4_hdr, size, GFP_KERNEL); | |
aa5a7aca IPG |
402 | result = wimax_msg(&i2400m->wimax_dev, NULL, l3l4_hdr, size, |
403 | GFP_KERNEL); | |
404 | if (result < 0) | |
405 | dev_err(dev, "error sending report to userspace: %d\n", | |
406 | result); | |
407 | } else /* an ack to a CMD, GET or SET */ | |
408 | i2400m_rx_ctl_ack(i2400m, payload, size); | |
409 | error_check: | |
410 | return; | |
411 | } | |
412 | ||
413 | ||
aa5a7aca IPG |
414 | /* |
415 | * Receive and send up a trace | |
416 | * | |
417 | * @i2400m: device descriptor | |
418 | * @skb_rx: skb that contains the trace (for reference counting) | |
419 | * @payload: pointer to trace message inside the skb | |
420 | * @size: size of the message | |
421 | * | |
422 | * THe i2400m might produce trace information (diagnostics) and we | |
423 | * send them through a different kernel-to-user pipe (to avoid | |
424 | * clogging it). | |
425 | * | |
426 | * As in i2400m_rx_ctl(), we can't clone the original skb where the | |
427 | * data is because we need to send this up via netlink; netlink has to | |
25985edc | 428 | * add headers and we can't overwrite what's preceding the |
aa5a7aca IPG |
429 | * payload...as it is another message. So we just dup them. |
430 | */ | |
431 | static | |
432 | void i2400m_rx_trace(struct i2400m *i2400m, | |
433 | const void *payload, size_t size) | |
434 | { | |
435 | int result; | |
436 | struct device *dev = i2400m_dev(i2400m); | |
437 | struct wimax_dev *wimax_dev = &i2400m->wimax_dev; | |
438 | const struct i2400m_l3l4_hdr *l3l4_hdr = payload; | |
439 | unsigned msg_type; | |
440 | ||
441 | result = i2400m_msg_size_check(i2400m, l3l4_hdr, size); | |
442 | if (result < 0) { | |
443 | dev_err(dev, "HW BUG? device sent a bad trace message: %d\n", | |
444 | result); | |
445 | goto error_check; | |
446 | } | |
447 | msg_type = le16_to_cpu(l3l4_hdr->type); | |
448 | d_printf(1, dev, "Trace %s 0x%04x: %zu bytes\n", | |
449 | msg_type & I2400M_MT_REPORT_MASK ? "REPORT" : "CMD/SET/GET", | |
450 | msg_type, size); | |
451 | d_dump(2, dev, l3l4_hdr, size); | |
aa5a7aca IPG |
452 | result = wimax_msg(wimax_dev, "trace", l3l4_hdr, size, GFP_KERNEL); |
453 | if (result < 0) | |
454 | dev_err(dev, "error sending trace to userspace: %d\n", | |
455 | result); | |
456 | error_check: | |
457 | return; | |
458 | } | |
459 | ||
c747583d IPG |
460 | |
461 | /* | |
462 | * Reorder queue data stored on skb->cb while the skb is queued in the | |
463 | * reorder queues. | |
464 | */ | |
465 | struct i2400m_roq_data { | |
466 | unsigned sn; /* Serial number for the skb */ | |
467 | enum i2400m_cs cs; /* packet type for the skb */ | |
468 | }; | |
469 | ||
470 | ||
471 | /* | |
472 | * ReOrder Queue | |
473 | * | |
474 | * @ws: Window Start; sequence number where the current window start | |
475 | * is for this queue | |
476 | * @queue: the skb queue itself | |
477 | * @log: circular ring buffer used to log information about the | |
478 | * reorder process in this queue that can be displayed in case of | |
479 | * error to help diagnose it. | |
480 | * | |
481 | * This is the head for a list of skbs. In the skb->cb member of the | |
482 | * skb when queued here contains a 'struct i2400m_roq_data' were we | |
483 | * store the sequence number (sn) and the cs (packet type) coming from | |
484 | * the RX payload header from the device. | |
485 | */ | |
486 | struct i2400m_roq | |
487 | { | |
488 | unsigned ws; | |
489 | struct sk_buff_head queue; | |
490 | struct i2400m_roq_log *log; | |
491 | }; | |
492 | ||
493 | ||
494 | static | |
495 | void __i2400m_roq_init(struct i2400m_roq *roq) | |
496 | { | |
497 | roq->ws = 0; | |
498 | skb_queue_head_init(&roq->queue); | |
499 | } | |
500 | ||
501 | ||
502 | static | |
503 | unsigned __i2400m_roq_index(struct i2400m *i2400m, struct i2400m_roq *roq) | |
504 | { | |
505 | return ((unsigned long) roq - (unsigned long) i2400m->rx_roq) | |
506 | / sizeof(*roq); | |
507 | } | |
508 | ||
509 | ||
510 | /* | |
511 | * Normalize a sequence number based on the queue's window start | |
512 | * | |
513 | * nsn = (sn - ws) % 2048 | |
514 | * | |
515 | * Note that if @sn < @roq->ws, we still need a positive number; %'s | |
516 | * sign is implementation specific, so we normalize it by adding 2048 | |
517 | * to bring it to be positive. | |
518 | */ | |
519 | static | |
520 | unsigned __i2400m_roq_nsn(struct i2400m_roq *roq, unsigned sn) | |
521 | { | |
522 | int r; | |
523 | r = ((int) sn - (int) roq->ws) % 2048; | |
524 | if (r < 0) | |
525 | r += 2048; | |
526 | return r; | |
527 | } | |
528 | ||
529 | ||
530 | /* | |
531 | * Circular buffer to keep the last N reorder operations | |
532 | * | |
533 | * In case something fails, dumb then to try to come up with what | |
534 | * happened. | |
535 | */ | |
536 | enum { | |
537 | I2400M_ROQ_LOG_LENGTH = 32, | |
538 | }; | |
539 | ||
540 | struct i2400m_roq_log { | |
541 | struct i2400m_roq_log_entry { | |
542 | enum i2400m_ro_type type; | |
543 | unsigned ws, count, sn, nsn, new_ws; | |
544 | } entry[I2400M_ROQ_LOG_LENGTH]; | |
545 | unsigned in, out; | |
546 | }; | |
547 | ||
548 | ||
549 | /* Print a log entry */ | |
550 | static | |
551 | void i2400m_roq_log_entry_print(struct i2400m *i2400m, unsigned index, | |
552 | unsigned e_index, | |
553 | struct i2400m_roq_log_entry *e) | |
554 | { | |
555 | struct device *dev = i2400m_dev(i2400m); | |
556 | ||
557 | switch(e->type) { | |
558 | case I2400M_RO_TYPE_RESET: | |
559 | dev_err(dev, "q#%d reset ws %u cnt %u sn %u/%u" | |
560 | " - new nws %u\n", | |
561 | index, e->ws, e->count, e->sn, e->nsn, e->new_ws); | |
562 | break; | |
563 | case I2400M_RO_TYPE_PACKET: | |
564 | dev_err(dev, "q#%d queue ws %u cnt %u sn %u/%u\n", | |
565 | index, e->ws, e->count, e->sn, e->nsn); | |
566 | break; | |
567 | case I2400M_RO_TYPE_WS: | |
568 | dev_err(dev, "q#%d update_ws ws %u cnt %u sn %u/%u" | |
569 | " - new nws %u\n", | |
570 | index, e->ws, e->count, e->sn, e->nsn, e->new_ws); | |
571 | break; | |
572 | case I2400M_RO_TYPE_PACKET_WS: | |
573 | dev_err(dev, "q#%d queue_update_ws ws %u cnt %u sn %u/%u" | |
574 | " - new nws %u\n", | |
575 | index, e->ws, e->count, e->sn, e->nsn, e->new_ws); | |
576 | break; | |
577 | default: | |
578 | dev_err(dev, "q#%d BUG? entry %u - unknown type %u\n", | |
579 | index, e_index, e->type); | |
580 | break; | |
581 | } | |
582 | } | |
583 | ||
584 | ||
585 | static | |
586 | void i2400m_roq_log_add(struct i2400m *i2400m, | |
587 | struct i2400m_roq *roq, enum i2400m_ro_type type, | |
588 | unsigned ws, unsigned count, unsigned sn, | |
589 | unsigned nsn, unsigned new_ws) | |
590 | { | |
591 | struct i2400m_roq_log_entry *e; | |
592 | unsigned cnt_idx; | |
593 | int index = __i2400m_roq_index(i2400m, roq); | |
594 | ||
595 | /* if we run out of space, we eat from the end */ | |
596 | if (roq->log->in - roq->log->out == I2400M_ROQ_LOG_LENGTH) | |
597 | roq->log->out++; | |
598 | cnt_idx = roq->log->in++ % I2400M_ROQ_LOG_LENGTH; | |
599 | e = &roq->log->entry[cnt_idx]; | |
600 | ||
601 | e->type = type; | |
602 | e->ws = ws; | |
603 | e->count = count; | |
604 | e->sn = sn; | |
605 | e->nsn = nsn; | |
606 | e->new_ws = new_ws; | |
607 | ||
608 | if (d_test(1)) | |
609 | i2400m_roq_log_entry_print(i2400m, index, cnt_idx, e); | |
610 | } | |
611 | ||
612 | ||
613 | /* Dump all the entries in the FIFO and reinitialize it */ | |
614 | static | |
615 | void i2400m_roq_log_dump(struct i2400m *i2400m, struct i2400m_roq *roq) | |
616 | { | |
617 | unsigned cnt, cnt_idx; | |
618 | struct i2400m_roq_log_entry *e; | |
619 | int index = __i2400m_roq_index(i2400m, roq); | |
620 | ||
621 | BUG_ON(roq->log->out > roq->log->in); | |
622 | for (cnt = roq->log->out; cnt < roq->log->in; cnt++) { | |
623 | cnt_idx = cnt % I2400M_ROQ_LOG_LENGTH; | |
624 | e = &roq->log->entry[cnt_idx]; | |
625 | i2400m_roq_log_entry_print(i2400m, index, cnt_idx, e); | |
626 | memset(e, 0, sizeof(*e)); | |
627 | } | |
628 | roq->log->in = roq->log->out = 0; | |
629 | } | |
630 | ||
631 | ||
632 | /* | |
633 | * Backbone for the queuing of an skb (by normalized sequence number) | |
634 | * | |
635 | * @i2400m: device descriptor | |
636 | * @roq: reorder queue where to add | |
637 | * @skb: the skb to add | |
638 | * @sn: the sequence number of the skb | |
639 | * @nsn: the normalized sequence number of the skb (pre-computed by the | |
640 | * caller from the @sn and @roq->ws). | |
641 | * | |
642 | * We try first a couple of quick cases: | |
643 | * | |
644 | * - the queue is empty | |
645 | * - the skb would be appended to the queue | |
646 | * | |
647 | * These will be the most common operations. | |
648 | * | |
649 | * If these fail, then we have to do a sorted insertion in the queue, | |
650 | * which is the slowest path. | |
651 | * | |
652 | * We don't have to acquire a reference count as we are going to own it. | |
653 | */ | |
654 | static | |
655 | void __i2400m_roq_queue(struct i2400m *i2400m, struct i2400m_roq *roq, | |
656 | struct sk_buff *skb, unsigned sn, unsigned nsn) | |
657 | { | |
658 | struct device *dev = i2400m_dev(i2400m); | |
659 | struct sk_buff *skb_itr; | |
660 | struct i2400m_roq_data *roq_data_itr, *roq_data; | |
661 | unsigned nsn_itr; | |
662 | ||
663 | d_fnstart(4, dev, "(i2400m %p roq %p skb %p sn %u nsn %u)\n", | |
664 | i2400m, roq, skb, sn, nsn); | |
665 | ||
666 | roq_data = (struct i2400m_roq_data *) &skb->cb; | |
667 | BUILD_BUG_ON(sizeof(*roq_data) > sizeof(skb->cb)); | |
668 | roq_data->sn = sn; | |
669 | d_printf(3, dev, "ERX: roq %p [ws %u] nsn %d sn %u\n", | |
670 | roq, roq->ws, nsn, roq_data->sn); | |
671 | ||
672 | /* Queues will be empty on not-so-bad environments, so try | |
673 | * that first */ | |
674 | if (skb_queue_empty(&roq->queue)) { | |
675 | d_printf(2, dev, "ERX: roq %p - first one\n", roq); | |
676 | __skb_queue_head(&roq->queue, skb); | |
677 | goto out; | |
678 | } | |
679 | /* Now try append, as most of the operations will be that */ | |
680 | skb_itr = skb_peek_tail(&roq->queue); | |
681 | roq_data_itr = (struct i2400m_roq_data *) &skb_itr->cb; | |
682 | nsn_itr = __i2400m_roq_nsn(roq, roq_data_itr->sn); | |
683 | /* NSN bounds assumed correct (checked when it was queued) */ | |
684 | if (nsn >= nsn_itr) { | |
685 | d_printf(2, dev, "ERX: roq %p - appended after %p (nsn %d sn %u)\n", | |
686 | roq, skb_itr, nsn_itr, roq_data_itr->sn); | |
687 | __skb_queue_tail(&roq->queue, skb); | |
688 | goto out; | |
689 | } | |
690 | /* None of the fast paths option worked. Iterate to find the | |
691 | * right spot where to insert the packet; we know the queue is | |
692 | * not empty, so we are not the first ones; we also know we | |
693 | * are not going to be the last ones. The list is sorted, so | |
694 | * we have to insert before the the first guy with an nsn_itr | |
695 | * greater that our nsn. */ | |
696 | skb_queue_walk(&roq->queue, skb_itr) { | |
697 | roq_data_itr = (struct i2400m_roq_data *) &skb_itr->cb; | |
698 | nsn_itr = __i2400m_roq_nsn(roq, roq_data_itr->sn); | |
699 | /* NSN bounds assumed correct (checked when it was queued) */ | |
700 | if (nsn_itr > nsn) { | |
701 | d_printf(2, dev, "ERX: roq %p - queued before %p " | |
702 | "(nsn %d sn %u)\n", roq, skb_itr, nsn_itr, | |
703 | roq_data_itr->sn); | |
704 | __skb_queue_before(&roq->queue, skb_itr, skb); | |
705 | goto out; | |
706 | } | |
707 | } | |
708 | /* If we get here, that is VERY bad -- print info to help | |
709 | * diagnose and crash it */ | |
710 | dev_err(dev, "SW BUG? failed to insert packet\n"); | |
711 | dev_err(dev, "ERX: roq %p [ws %u] skb %p nsn %d sn %u\n", | |
712 | roq, roq->ws, skb, nsn, roq_data->sn); | |
713 | skb_queue_walk(&roq->queue, skb_itr) { | |
714 | roq_data_itr = (struct i2400m_roq_data *) &skb_itr->cb; | |
715 | nsn_itr = __i2400m_roq_nsn(roq, roq_data_itr->sn); | |
716 | /* NSN bounds assumed correct (checked when it was queued) */ | |
717 | dev_err(dev, "ERX: roq %p skb_itr %p nsn %d sn %u\n", | |
718 | roq, skb_itr, nsn_itr, roq_data_itr->sn); | |
719 | } | |
720 | BUG(); | |
721 | out: | |
722 | d_fnend(4, dev, "(i2400m %p roq %p skb %p sn %u nsn %d) = void\n", | |
723 | i2400m, roq, skb, sn, nsn); | |
c747583d IPG |
724 | } |
725 | ||
726 | ||
727 | /* | |
728 | * Backbone for the update window start operation | |
729 | * | |
730 | * @i2400m: device descriptor | |
731 | * @roq: Reorder queue | |
732 | * @sn: New sequence number | |
733 | * | |
734 | * Updates the window start of a queue; when doing so, it must deliver | |
735 | * to the networking stack all the queued skb's whose normalized | |
736 | * sequence number is lower than the new normalized window start. | |
737 | */ | |
738 | static | |
739 | unsigned __i2400m_roq_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq, | |
740 | unsigned sn) | |
741 | { | |
742 | struct device *dev = i2400m_dev(i2400m); | |
743 | struct sk_buff *skb_itr, *tmp_itr; | |
744 | struct i2400m_roq_data *roq_data_itr; | |
745 | unsigned new_nws, nsn_itr; | |
746 | ||
747 | new_nws = __i2400m_roq_nsn(roq, sn); | |
0809a7bb PP |
748 | /* |
749 | * For type 2(update_window_start) rx messages, there is no | |
750 | * need to check if the normalized sequence number is greater 1023. | |
751 | * Simply insert and deliver all packets to the host up to the | |
752 | * window start. | |
753 | */ | |
c747583d IPG |
754 | skb_queue_walk_safe(&roq->queue, skb_itr, tmp_itr) { |
755 | roq_data_itr = (struct i2400m_roq_data *) &skb_itr->cb; | |
756 | nsn_itr = __i2400m_roq_nsn(roq, roq_data_itr->sn); | |
757 | /* NSN bounds assumed correct (checked when it was queued) */ | |
758 | if (nsn_itr < new_nws) { | |
759 | d_printf(2, dev, "ERX: roq %p - release skb %p " | |
760 | "(nsn %u/%u new nws %u)\n", | |
761 | roq, skb_itr, nsn_itr, roq_data_itr->sn, | |
762 | new_nws); | |
763 | __skb_unlink(skb_itr, &roq->queue); | |
764 | i2400m_net_erx(i2400m, skb_itr, roq_data_itr->cs); | |
765 | } | |
766 | else | |
767 | break; /* rest of packets all nsn_itr > nws */ | |
768 | } | |
769 | roq->ws = sn; | |
770 | return new_nws; | |
771 | } | |
772 | ||
773 | ||
774 | /* | |
775 | * Reset a queue | |
776 | * | |
777 | * @i2400m: device descriptor | |
778 | * @cin: Queue Index | |
779 | * | |
780 | * Deliver all the packets and reset the window-start to zero. Name is | |
781 | * kind of misleading. | |
782 | */ | |
783 | static | |
784 | void i2400m_roq_reset(struct i2400m *i2400m, struct i2400m_roq *roq) | |
785 | { | |
786 | struct device *dev = i2400m_dev(i2400m); | |
787 | struct sk_buff *skb_itr, *tmp_itr; | |
788 | struct i2400m_roq_data *roq_data_itr; | |
789 | ||
790 | d_fnstart(2, dev, "(i2400m %p roq %p)\n", i2400m, roq); | |
791 | i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_RESET, | |
792 | roq->ws, skb_queue_len(&roq->queue), | |
793 | ~0, ~0, 0); | |
794 | skb_queue_walk_safe(&roq->queue, skb_itr, tmp_itr) { | |
795 | roq_data_itr = (struct i2400m_roq_data *) &skb_itr->cb; | |
796 | d_printf(2, dev, "ERX: roq %p - release skb %p (sn %u)\n", | |
797 | roq, skb_itr, roq_data_itr->sn); | |
798 | __skb_unlink(skb_itr, &roq->queue); | |
799 | i2400m_net_erx(i2400m, skb_itr, roq_data_itr->cs); | |
800 | } | |
801 | roq->ws = 0; | |
802 | d_fnend(2, dev, "(i2400m %p roq %p) = void\n", i2400m, roq); | |
c747583d IPG |
803 | } |
804 | ||
805 | ||
806 | /* | |
807 | * Queue a packet | |
808 | * | |
809 | * @i2400m: device descriptor | |
810 | * @cin: Queue Index | |
811 | * @skb: containing the packet data | |
812 | * @fbn: First block number of the packet in @skb | |
813 | * @lbn: Last block number of the packet in @skb | |
814 | * | |
815 | * The hardware is asking the driver to queue a packet for later | |
816 | * delivery to the networking stack. | |
817 | */ | |
818 | static | |
819 | void i2400m_roq_queue(struct i2400m *i2400m, struct i2400m_roq *roq, | |
820 | struct sk_buff * skb, unsigned lbn) | |
821 | { | |
822 | struct device *dev = i2400m_dev(i2400m); | |
823 | unsigned nsn, len; | |
824 | ||
825 | d_fnstart(2, dev, "(i2400m %p roq %p skb %p lbn %u) = void\n", | |
826 | i2400m, roq, skb, lbn); | |
827 | len = skb_queue_len(&roq->queue); | |
828 | nsn = __i2400m_roq_nsn(roq, lbn); | |
829 | if (unlikely(nsn >= 1024)) { | |
830 | dev_err(dev, "SW BUG? queue nsn %d (lbn %u ws %u)\n", | |
831 | nsn, lbn, roq->ws); | |
832 | i2400m_roq_log_dump(i2400m, roq); | |
c931ceeb | 833 | i2400m_reset(i2400m, I2400M_RT_WARM); |
c747583d IPG |
834 | } else { |
835 | __i2400m_roq_queue(i2400m, roq, skb, lbn, nsn); | |
836 | i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_PACKET, | |
837 | roq->ws, len, lbn, nsn, ~0); | |
838 | } | |
839 | d_fnend(2, dev, "(i2400m %p roq %p skb %p lbn %u) = void\n", | |
840 | i2400m, roq, skb, lbn); | |
c747583d IPG |
841 | } |
842 | ||
843 | ||
844 | /* | |
845 | * Update the window start in a reorder queue and deliver all skbs | |
846 | * with a lower window start | |
847 | * | |
848 | * @i2400m: device descriptor | |
849 | * @roq: Reorder queue | |
850 | * @sn: New sequence number | |
851 | */ | |
852 | static | |
853 | void i2400m_roq_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq, | |
854 | unsigned sn) | |
855 | { | |
856 | struct device *dev = i2400m_dev(i2400m); | |
857 | unsigned old_ws, nsn, len; | |
858 | ||
859 | d_fnstart(2, dev, "(i2400m %p roq %p sn %u)\n", i2400m, roq, sn); | |
860 | old_ws = roq->ws; | |
861 | len = skb_queue_len(&roq->queue); | |
862 | nsn = __i2400m_roq_update_ws(i2400m, roq, sn); | |
863 | i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_WS, | |
864 | old_ws, len, sn, nsn, roq->ws); | |
865 | d_fnstart(2, dev, "(i2400m %p roq %p sn %u) = void\n", i2400m, roq, sn); | |
c747583d IPG |
866 | } |
867 | ||
868 | ||
869 | /* | |
870 | * Queue a packet and update the window start | |
871 | * | |
872 | * @i2400m: device descriptor | |
873 | * @cin: Queue Index | |
874 | * @skb: containing the packet data | |
875 | * @fbn: First block number of the packet in @skb | |
876 | * @sn: Last block number of the packet in @skb | |
877 | * | |
878 | * Note that unlike i2400m_roq_update_ws(), which sets the new window | |
879 | * start to @sn, in here we'll set it to @sn + 1. | |
880 | */ | |
881 | static | |
882 | void i2400m_roq_queue_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq, | |
883 | struct sk_buff * skb, unsigned sn) | |
884 | { | |
885 | struct device *dev = i2400m_dev(i2400m); | |
886 | unsigned nsn, old_ws, len; | |
887 | ||
888 | d_fnstart(2, dev, "(i2400m %p roq %p skb %p sn %u)\n", | |
889 | i2400m, roq, skb, sn); | |
890 | len = skb_queue_len(&roq->queue); | |
891 | nsn = __i2400m_roq_nsn(roq, sn); | |
0809a7bb PP |
892 | /* |
893 | * For type 3(queue_update_window_start) rx messages, there is no | |
894 | * need to check if the normalized sequence number is greater 1023. | |
895 | * Simply insert and deliver all packets to the host up to the | |
896 | * window start. | |
897 | */ | |
c747583d | 898 | old_ws = roq->ws; |
0809a7bb PP |
899 | /* If the queue is empty, don't bother as we'd queue |
900 | * it and immediately unqueue it -- just deliver it. | |
901 | */ | |
902 | if (len == 0) { | |
903 | struct i2400m_roq_data *roq_data; | |
904 | roq_data = (struct i2400m_roq_data *) &skb->cb; | |
905 | i2400m_net_erx(i2400m, skb, roq_data->cs); | |
906 | } else | |
907 | __i2400m_roq_queue(i2400m, roq, skb, sn, nsn); | |
908 | ||
909 | __i2400m_roq_update_ws(i2400m, roq, sn + 1); | |
910 | i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_PACKET_WS, | |
911 | old_ws, len, sn, nsn, roq->ws); | |
912 | ||
c747583d IPG |
913 | d_fnend(2, dev, "(i2400m %p roq %p skb %p sn %u) = void\n", |
914 | i2400m, roq, skb, sn); | |
c747583d IPG |
915 | } |
916 | ||
917 | ||
d11a6e44 PP |
918 | /* |
919 | * This routine destroys the memory allocated for rx_roq, when no | |
920 | * other thread is accessing it. Access to rx_roq is refcounted by | |
921 | * rx_roq_refcount, hence memory allocated must be destroyed when | |
922 | * rx_roq_refcount becomes zero. This routine gets executed when | |
923 | * rx_roq_refcount becomes zero. | |
924 | */ | |
e3d32687 | 925 | static void i2400m_rx_roq_destroy(struct kref *ref) |
d11a6e44 PP |
926 | { |
927 | unsigned itr; | |
928 | struct i2400m *i2400m | |
929 | = container_of(ref, struct i2400m, rx_roq_refcount); | |
930 | for (itr = 0; itr < I2400M_RO_CIN + 1; itr++) | |
931 | __skb_queue_purge(&i2400m->rx_roq[itr].queue); | |
932 | kfree(i2400m->rx_roq[0].log); | |
933 | kfree(i2400m->rx_roq); | |
934 | i2400m->rx_roq = NULL; | |
935 | } | |
936 | ||
fd5c565c IPG |
937 | /* |
938 | * Receive and send up an extended data packet | |
939 | * | |
940 | * @i2400m: device descriptor | |
941 | * @skb_rx: skb that contains the extended data packet | |
942 | * @single_last: 1 if the payload is the only one or the last one of | |
943 | * the skb. | |
944 | * @payload: pointer to the packet's data inside the skb | |
945 | * @size: size of the payload | |
946 | * | |
947 | * Starting in v1.4 of the i2400m's firmware, the device can send data | |
948 | * packets to the host in an extended format that; this incudes a 16 | |
949 | * byte header (struct i2400m_pl_edata_hdr). Using this header's space | |
950 | * we can fake ethernet headers for ethernet device emulation without | |
951 | * having to copy packets around. | |
952 | * | |
953 | * This function handles said path. | |
c747583d IPG |
954 | * |
955 | * | |
956 | * Receive and send up an extended data packet that requires no reordering | |
957 | * | |
958 | * @i2400m: device descriptor | |
959 | * @skb_rx: skb that contains the extended data packet | |
960 | * @single_last: 1 if the payload is the only one or the last one of | |
961 | * the skb. | |
962 | * @payload: pointer to the packet's data (past the actual extended | |
963 | * data payload header). | |
964 | * @size: size of the payload | |
965 | * | |
966 | * Pass over to the networking stack a data packet that might have | |
967 | * reordering requirements. | |
968 | * | |
969 | * This needs to the decide if the skb in which the packet is | |
970 | * contained can be reused or if it needs to be cloned. Then it has to | |
971 | * be trimmed in the edges so that the beginning is the space for eth | |
972 | * header and then pass it to i2400m_net_erx() for the stack | |
973 | * | |
974 | * Assumes the caller has verified the sanity of the payload (size, | |
975 | * etc) already. | |
fd5c565c IPG |
976 | */ |
977 | static | |
978 | void i2400m_rx_edata(struct i2400m *i2400m, struct sk_buff *skb_rx, | |
979 | unsigned single_last, const void *payload, size_t size) | |
980 | { | |
981 | struct device *dev = i2400m_dev(i2400m); | |
982 | const struct i2400m_pl_edata_hdr *hdr = payload; | |
983 | struct net_device *net_dev = i2400m->wimax_dev.net_dev; | |
984 | struct sk_buff *skb; | |
985 | enum i2400m_cs cs; | |
c747583d IPG |
986 | u32 reorder; |
987 | unsigned ro_needed, ro_type, ro_cin, ro_sn; | |
988 | struct i2400m_roq *roq; | |
989 | struct i2400m_roq_data *roq_data; | |
d11a6e44 | 990 | unsigned long flags; |
fd5c565c | 991 | |
c747583d IPG |
992 | BUILD_BUG_ON(ETH_HLEN > sizeof(*hdr)); |
993 | ||
994 | d_fnstart(2, dev, "(i2400m %p skb_rx %p single %u payload %p " | |
fd5c565c IPG |
995 | "size %zu)\n", i2400m, skb_rx, single_last, payload, size); |
996 | if (size < sizeof(*hdr)) { | |
997 | dev_err(dev, "ERX: HW BUG? message with short header (%zu " | |
998 | "vs %zu bytes expected)\n", size, sizeof(*hdr)); | |
999 | goto error; | |
1000 | } | |
c747583d | 1001 | |
fd5c565c IPG |
1002 | if (single_last) { |
1003 | skb = skb_get(skb_rx); | |
c747583d | 1004 | d_printf(3, dev, "ERX: skb %p reusing\n", skb); |
fd5c565c IPG |
1005 | } else { |
1006 | skb = skb_clone(skb_rx, GFP_KERNEL); | |
fd5c565c IPG |
1007 | if (skb == NULL) { |
1008 | dev_err(dev, "ERX: no memory to clone skb\n"); | |
1009 | net_dev->stats.rx_dropped++; | |
1010 | goto error_skb_clone; | |
1011 | } | |
c747583d | 1012 | d_printf(3, dev, "ERX: skb %p cloned from %p\n", skb, skb_rx); |
fd5c565c IPG |
1013 | } |
1014 | /* now we have to pull and trim so that the skb points to the | |
1015 | * beginning of the IP packet; the netdev part will add the | |
c747583d IPG |
1016 | * ethernet header as needed - we know there is enough space |
1017 | * because we checked in i2400m_rx_edata(). */ | |
fd5c565c | 1018 | skb_pull(skb, payload + sizeof(*hdr) - (void *) skb->data); |
c747583d IPG |
1019 | skb_trim(skb, (void *) skb_end_pointer(skb) - payload - sizeof(*hdr)); |
1020 | ||
1021 | reorder = le32_to_cpu(hdr->reorder); | |
1022 | ro_needed = reorder & I2400M_RO_NEEDED; | |
1023 | cs = hdr->cs; | |
1024 | if (ro_needed) { | |
1025 | ro_type = (reorder >> I2400M_RO_TYPE_SHIFT) & I2400M_RO_TYPE; | |
1026 | ro_cin = (reorder >> I2400M_RO_CIN_SHIFT) & I2400M_RO_CIN; | |
1027 | ro_sn = (reorder >> I2400M_RO_SN_SHIFT) & I2400M_RO_SN; | |
1028 | ||
d11a6e44 | 1029 | spin_lock_irqsave(&i2400m->rx_lock, flags); |
3a24934f | 1030 | if (i2400m->rx_roq == NULL) { |
d11a6e44 PP |
1031 | kfree_skb(skb); /* rx_roq is already destroyed */ |
1032 | spin_unlock_irqrestore(&i2400m->rx_lock, flags); | |
1033 | goto error; | |
1034 | } | |
3a24934f | 1035 | roq = &i2400m->rx_roq[ro_cin]; |
d11a6e44 PP |
1036 | kref_get(&i2400m->rx_roq_refcount); |
1037 | spin_unlock_irqrestore(&i2400m->rx_lock, flags); | |
1038 | ||
c747583d IPG |
1039 | roq_data = (struct i2400m_roq_data *) &skb->cb; |
1040 | roq_data->sn = ro_sn; | |
1041 | roq_data->cs = cs; | |
1042 | d_printf(2, dev, "ERX: reorder needed: " | |
1043 | "type %u cin %u [ws %u] sn %u/%u len %zuB\n", | |
1044 | ro_type, ro_cin, roq->ws, ro_sn, | |
1045 | __i2400m_roq_nsn(roq, ro_sn), size); | |
1046 | d_dump(2, dev, payload, size); | |
1047 | switch(ro_type) { | |
1048 | case I2400M_RO_TYPE_RESET: | |
1049 | i2400m_roq_reset(i2400m, roq); | |
1050 | kfree_skb(skb); /* no data here */ | |
1051 | break; | |
1052 | case I2400M_RO_TYPE_PACKET: | |
1053 | i2400m_roq_queue(i2400m, roq, skb, ro_sn); | |
1054 | break; | |
1055 | case I2400M_RO_TYPE_WS: | |
1056 | i2400m_roq_update_ws(i2400m, roq, ro_sn); | |
1057 | kfree_skb(skb); /* no data here */ | |
1058 | break; | |
1059 | case I2400M_RO_TYPE_PACKET_WS: | |
1060 | i2400m_roq_queue_update_ws(i2400m, roq, skb, ro_sn); | |
1061 | break; | |
1062 | default: | |
1063 | dev_err(dev, "HW BUG? unknown reorder type %u\n", ro_type); | |
1064 | } | |
d11a6e44 PP |
1065 | |
1066 | spin_lock_irqsave(&i2400m->rx_lock, flags); | |
1067 | kref_put(&i2400m->rx_roq_refcount, i2400m_rx_roq_destroy); | |
1068 | spin_unlock_irqrestore(&i2400m->rx_lock, flags); | |
c747583d IPG |
1069 | } |
1070 | else | |
1071 | i2400m_net_erx(i2400m, skb, cs); | |
fd5c565c IPG |
1072 | error_skb_clone: |
1073 | error: | |
c747583d | 1074 | d_fnend(2, dev, "(i2400m %p skb_rx %p single %u payload %p " |
fd5c565c | 1075 | "size %zu) = void\n", i2400m, skb_rx, single_last, payload, size); |
fd5c565c IPG |
1076 | } |
1077 | ||
1078 | ||
aa5a7aca IPG |
1079 | /* |
1080 | * Act on a received payload | |
1081 | * | |
1082 | * @i2400m: device instance | |
1083 | * @skb_rx: skb where the transaction was received | |
fd5c565c IPG |
1084 | * @single_last: 1 this is the only payload or the last one (so the |
1085 | * skb can be reused instead of cloned). | |
aa5a7aca IPG |
1086 | * @pld: payload descriptor |
1087 | * @payload: payload data | |
1088 | * | |
1089 | * Upon reception of a payload, look at its guts in the payload | |
fd5c565c IPG |
1090 | * descriptor and decide what to do with it. If it is a single payload |
1091 | * skb or if the last skb is a data packet, the skb will be referenced | |
1092 | * and modified (so it doesn't have to be cloned). | |
aa5a7aca IPG |
1093 | */ |
1094 | static | |
1095 | void i2400m_rx_payload(struct i2400m *i2400m, struct sk_buff *skb_rx, | |
fd5c565c | 1096 | unsigned single_last, const struct i2400m_pld *pld, |
aa5a7aca IPG |
1097 | const void *payload) |
1098 | { | |
1099 | struct device *dev = i2400m_dev(i2400m); | |
1100 | size_t pl_size = i2400m_pld_size(pld); | |
1101 | enum i2400m_pt pl_type = i2400m_pld_type(pld); | |
1102 | ||
fd5c565c IPG |
1103 | d_printf(7, dev, "RX: received payload type %u, %zu bytes\n", |
1104 | pl_type, pl_size); | |
1105 | d_dump(8, dev, payload, pl_size); | |
1106 | ||
aa5a7aca IPG |
1107 | switch (pl_type) { |
1108 | case I2400M_PT_DATA: | |
1109 | d_printf(3, dev, "RX: data payload %zu bytes\n", pl_size); | |
fd5c565c | 1110 | i2400m_net_rx(i2400m, skb_rx, single_last, payload, pl_size); |
aa5a7aca IPG |
1111 | break; |
1112 | case I2400M_PT_CTRL: | |
1113 | i2400m_rx_ctl(i2400m, skb_rx, payload, pl_size); | |
1114 | break; | |
1115 | case I2400M_PT_TRACE: | |
1116 | i2400m_rx_trace(i2400m, payload, pl_size); | |
1117 | break; | |
fd5c565c IPG |
1118 | case I2400M_PT_EDATA: |
1119 | d_printf(3, dev, "ERX: data payload %zu bytes\n", pl_size); | |
1120 | i2400m_rx_edata(i2400m, skb_rx, single_last, payload, pl_size); | |
1121 | break; | |
aa5a7aca IPG |
1122 | default: /* Anything else shouldn't come to the host */ |
1123 | if (printk_ratelimit()) | |
1124 | dev_err(dev, "RX: HW BUG? unexpected payload type %u\n", | |
1125 | pl_type); | |
1126 | } | |
1127 | } | |
1128 | ||
1129 | ||
1130 | /* | |
1131 | * Check a received transaction's message header | |
1132 | * | |
1133 | * @i2400m: device descriptor | |
1134 | * @msg_hdr: message header | |
1135 | * @buf_size: size of the received buffer | |
1136 | * | |
1137 | * Check that the declarations done by a RX buffer message header are | |
1138 | * sane and consistent with the amount of data that was received. | |
1139 | */ | |
1140 | static | |
1141 | int i2400m_rx_msg_hdr_check(struct i2400m *i2400m, | |
1142 | const struct i2400m_msg_hdr *msg_hdr, | |
1143 | size_t buf_size) | |
1144 | { | |
1145 | int result = -EIO; | |
1146 | struct device *dev = i2400m_dev(i2400m); | |
1147 | if (buf_size < sizeof(*msg_hdr)) { | |
1148 | dev_err(dev, "RX: HW BUG? message with short header (%zu " | |
1149 | "vs %zu bytes expected)\n", buf_size, sizeof(*msg_hdr)); | |
1150 | goto error; | |
1151 | } | |
1152 | if (msg_hdr->barker != cpu_to_le32(I2400M_D2H_MSG_BARKER)) { | |
1153 | dev_err(dev, "RX: HW BUG? message received with unknown " | |
1154 | "barker 0x%08x (buf_size %zu bytes)\n", | |
1155 | le32_to_cpu(msg_hdr->barker), buf_size); | |
1156 | goto error; | |
1157 | } | |
1158 | if (msg_hdr->num_pls == 0) { | |
1159 | dev_err(dev, "RX: HW BUG? zero payload packets in message\n"); | |
1160 | goto error; | |
1161 | } | |
1162 | if (le16_to_cpu(msg_hdr->num_pls) > I2400M_MAX_PLS_IN_MSG) { | |
1163 | dev_err(dev, "RX: HW BUG? message contains more payload " | |
1164 | "than maximum; ignoring.\n"); | |
1165 | goto error; | |
1166 | } | |
1167 | result = 0; | |
1168 | error: | |
1169 | return result; | |
1170 | } | |
1171 | ||
1172 | ||
1173 | /* | |
1174 | * Check a payload descriptor against the received data | |
1175 | * | |
1176 | * @i2400m: device descriptor | |
1177 | * @pld: payload descriptor | |
1178 | * @pl_itr: offset (in bytes) in the received buffer the payload is | |
1179 | * located | |
1180 | * @buf_size: size of the received buffer | |
1181 | * | |
1182 | * Given a payload descriptor (part of a RX buffer), check it is sane | |
1183 | * and that the data it declares fits in the buffer. | |
1184 | */ | |
1185 | static | |
1186 | int i2400m_rx_pl_descr_check(struct i2400m *i2400m, | |
1187 | const struct i2400m_pld *pld, | |
1188 | size_t pl_itr, size_t buf_size) | |
1189 | { | |
1190 | int result = -EIO; | |
1191 | struct device *dev = i2400m_dev(i2400m); | |
1192 | size_t pl_size = i2400m_pld_size(pld); | |
1193 | enum i2400m_pt pl_type = i2400m_pld_type(pld); | |
1194 | ||
1195 | if (pl_size > i2400m->bus_pl_size_max) { | |
1196 | dev_err(dev, "RX: HW BUG? payload @%zu: size %zu is " | |
1197 | "bigger than maximum %zu; ignoring message\n", | |
1198 | pl_itr, pl_size, i2400m->bus_pl_size_max); | |
1199 | goto error; | |
1200 | } | |
1201 | if (pl_itr + pl_size > buf_size) { /* enough? */ | |
1202 | dev_err(dev, "RX: HW BUG? payload @%zu: size %zu " | |
1203 | "goes beyond the received buffer " | |
1204 | "size (%zu bytes); ignoring message\n", | |
1205 | pl_itr, pl_size, buf_size); | |
1206 | goto error; | |
1207 | } | |
1208 | if (pl_type >= I2400M_PT_ILLEGAL) { | |
1209 | dev_err(dev, "RX: HW BUG? illegal payload type %u; " | |
1210 | "ignoring message\n", pl_type); | |
1211 | goto error; | |
1212 | } | |
1213 | result = 0; | |
1214 | error: | |
1215 | return result; | |
1216 | } | |
1217 | ||
1218 | ||
1219 | /** | |
1220 | * i2400m_rx - Receive a buffer of data from the device | |
1221 | * | |
1222 | * @i2400m: device descriptor | |
1223 | * @skb: skbuff where the data has been received | |
1224 | * | |
1225 | * Parse in a buffer of data that contains an RX message sent from the | |
1226 | * device. See the file header for the format. Run all checks on the | |
1227 | * buffer header, then run over each payload's descriptors, verify | |
1228 | * their consistency and act on each payload's contents. If | |
af901ca1 | 1229 | * everything is successful, update the device's statistics. |
aa5a7aca IPG |
1230 | * |
1231 | * Note: You need to set the skb to contain only the length of the | |
1232 | * received buffer; for that, use skb_trim(skb, RECEIVED_SIZE). | |
1233 | * | |
1234 | * Returns: | |
1235 | * | |
1236 | * 0 if ok, < 0 errno on error | |
1237 | * | |
1238 | * If ok, this function owns now the skb and the caller DOESN'T have | |
1239 | * to run kfree_skb() on it. However, on error, the caller still owns | |
1240 | * the skb and it is responsible for releasing it. | |
1241 | */ | |
1242 | int i2400m_rx(struct i2400m *i2400m, struct sk_buff *skb) | |
1243 | { | |
1244 | int i, result; | |
1245 | struct device *dev = i2400m_dev(i2400m); | |
1246 | const struct i2400m_msg_hdr *msg_hdr; | |
0aa7dead | 1247 | size_t pl_itr, pl_size; |
aa5a7aca | 1248 | unsigned long flags; |
0aa7dead | 1249 | unsigned num_pls, single_last, skb_len; |
aa5a7aca IPG |
1250 | |
1251 | skb_len = skb->len; | |
0aa7dead | 1252 | d_fnstart(4, dev, "(i2400m %p skb %p [size %u])\n", |
aa5a7aca IPG |
1253 | i2400m, skb, skb_len); |
1254 | result = -EIO; | |
1255 | msg_hdr = (void *) skb->data; | |
0aa7dead | 1256 | result = i2400m_rx_msg_hdr_check(i2400m, msg_hdr, skb_len); |
aa5a7aca IPG |
1257 | if (result < 0) |
1258 | goto error_msg_hdr_check; | |
1259 | result = -EIO; | |
1260 | num_pls = le16_to_cpu(msg_hdr->num_pls); | |
1261 | pl_itr = sizeof(*msg_hdr) + /* Check payload descriptor(s) */ | |
1262 | num_pls * sizeof(msg_hdr->pld[0]); | |
8593a196 | 1263 | pl_itr = ALIGN(pl_itr, I2400M_PL_ALIGN); |
0aa7dead | 1264 | if (pl_itr > skb_len) { /* got all the payload descriptors? */ |
aa5a7aca IPG |
1265 | dev_err(dev, "RX: HW BUG? message too short (%u bytes) for " |
1266 | "%u payload descriptors (%zu each, total %zu)\n", | |
0aa7dead | 1267 | skb_len, num_pls, sizeof(msg_hdr->pld[0]), pl_itr); |
aa5a7aca IPG |
1268 | goto error_pl_descr_short; |
1269 | } | |
1270 | /* Walk each payload payload--check we really got it */ | |
1271 | for (i = 0; i < num_pls; i++) { | |
1272 | /* work around old gcc warnings */ | |
1273 | pl_size = i2400m_pld_size(&msg_hdr->pld[i]); | |
1274 | result = i2400m_rx_pl_descr_check(i2400m, &msg_hdr->pld[i], | |
0aa7dead | 1275 | pl_itr, skb_len); |
aa5a7aca IPG |
1276 | if (result < 0) |
1277 | goto error_pl_descr_check; | |
fd5c565c IPG |
1278 | single_last = num_pls == 1 || i == num_pls - 1; |
1279 | i2400m_rx_payload(i2400m, skb, single_last, &msg_hdr->pld[i], | |
aa5a7aca | 1280 | skb->data + pl_itr); |
8593a196 | 1281 | pl_itr += ALIGN(pl_size, I2400M_PL_ALIGN); |
aa5a7aca IPG |
1282 | cond_resched(); /* Don't monopolize */ |
1283 | } | |
1284 | kfree_skb(skb); | |
1285 | /* Update device statistics */ | |
1286 | spin_lock_irqsave(&i2400m->rx_lock, flags); | |
1287 | i2400m->rx_pl_num += i; | |
1288 | if (i > i2400m->rx_pl_max) | |
1289 | i2400m->rx_pl_max = i; | |
1290 | if (i < i2400m->rx_pl_min) | |
1291 | i2400m->rx_pl_min = i; | |
1292 | i2400m->rx_num++; | |
0aa7dead JS |
1293 | i2400m->rx_size_acc += skb_len; |
1294 | if (skb_len < i2400m->rx_size_min) | |
1295 | i2400m->rx_size_min = skb_len; | |
1296 | if (skb_len > i2400m->rx_size_max) | |
1297 | i2400m->rx_size_max = skb_len; | |
aa5a7aca IPG |
1298 | spin_unlock_irqrestore(&i2400m->rx_lock, flags); |
1299 | error_pl_descr_check: | |
1300 | error_pl_descr_short: | |
1301 | error_msg_hdr_check: | |
0aa7dead | 1302 | d_fnend(4, dev, "(i2400m %p skb %p [size %u]) = %d\n", |
aa5a7aca IPG |
1303 | i2400m, skb, skb_len, result); |
1304 | return result; | |
1305 | } | |
1306 | EXPORT_SYMBOL_GPL(i2400m_rx); | |
c747583d IPG |
1307 | |
1308 | ||
aba3792a IPG |
1309 | void i2400m_unknown_barker(struct i2400m *i2400m, |
1310 | const void *buf, size_t size) | |
1311 | { | |
1312 | struct device *dev = i2400m_dev(i2400m); | |
1313 | char prefix[64]; | |
1314 | const __le32 *barker = buf; | |
1315 | dev_err(dev, "RX: HW BUG? unknown barker %08x, " | |
1316 | "dropping %zu bytes\n", le32_to_cpu(*barker), size); | |
1317 | snprintf(prefix, sizeof(prefix), "%s %s: ", | |
1318 | dev_driver_string(dev), dev_name(dev)); | |
1319 | if (size > 64) { | |
1320 | print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, | |
1321 | 8, 4, buf, 64, 0); | |
1322 | printk(KERN_ERR "%s... (only first 64 bytes " | |
1323 | "dumped)\n", prefix); | |
1324 | } else | |
1325 | print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, | |
1326 | 8, 4, buf, size, 0); | |
1327 | } | |
1328 | EXPORT_SYMBOL(i2400m_unknown_barker); | |
1329 | ||
1330 | ||
c747583d IPG |
1331 | /* |
1332 | * Initialize the RX queue and infrastructure | |
1333 | * | |
1334 | * This sets up all the RX reordering infrastructures, which will not | |
1335 | * be used if reordering is not enabled or if the firmware does not | |
1336 | * support it. The device is told to do reordering in | |
1337 | * i2400m_dev_initialize(), where it also looks at the value of the | |
1338 | * i2400m->rx_reorder switch before taking a decission. | |
1339 | * | |
1340 | * Note we allocate the roq queues in one chunk and the actual logging | |
1341 | * support for it (logging) in another one and then we setup the | |
1342 | * pointers from the first to the last. | |
1343 | */ | |
1344 | int i2400m_rx_setup(struct i2400m *i2400m) | |
1345 | { | |
1346 | int result = 0; | |
1347 | struct device *dev = i2400m_dev(i2400m); | |
1348 | ||
1349 | i2400m->rx_reorder = i2400m_rx_reorder_disabled? 0 : 1; | |
1350 | if (i2400m->rx_reorder) { | |
1351 | unsigned itr; | |
1352 | size_t size; | |
1353 | struct i2400m_roq_log *rd; | |
1354 | ||
1355 | result = -ENOMEM; | |
1356 | ||
1357 | size = sizeof(i2400m->rx_roq[0]) * (I2400M_RO_CIN + 1); | |
1358 | i2400m->rx_roq = kzalloc(size, GFP_KERNEL); | |
1359 | if (i2400m->rx_roq == NULL) { | |
1360 | dev_err(dev, "RX: cannot allocate %zu bytes for " | |
1361 | "reorder queues\n", size); | |
1362 | goto error_roq_alloc; | |
1363 | } | |
1364 | ||
1365 | size = sizeof(*i2400m->rx_roq[0].log) * (I2400M_RO_CIN + 1); | |
1366 | rd = kzalloc(size, GFP_KERNEL); | |
1367 | if (rd == NULL) { | |
1368 | dev_err(dev, "RX: cannot allocate %zu bytes for " | |
1369 | "reorder queues log areas\n", size); | |
1370 | result = -ENOMEM; | |
1371 | goto error_roq_log_alloc; | |
1372 | } | |
1373 | ||
1374 | for(itr = 0; itr < I2400M_RO_CIN + 1; itr++) { | |
1375 | __i2400m_roq_init(&i2400m->rx_roq[itr]); | |
1376 | i2400m->rx_roq[itr].log = &rd[itr]; | |
1377 | } | |
d11a6e44 | 1378 | kref_init(&i2400m->rx_roq_refcount); |
c747583d IPG |
1379 | } |
1380 | return 0; | |
1381 | ||
1382 | error_roq_log_alloc: | |
1383 | kfree(i2400m->rx_roq); | |
1384 | error_roq_alloc: | |
1385 | return result; | |
1386 | } | |
1387 | ||
1388 | ||
1389 | /* Tear down the RX queue and infrastructure */ | |
1390 | void i2400m_rx_release(struct i2400m *i2400m) | |
1391 | { | |
d11a6e44 PP |
1392 | unsigned long flags; |
1393 | ||
c747583d | 1394 | if (i2400m->rx_reorder) { |
d11a6e44 PP |
1395 | spin_lock_irqsave(&i2400m->rx_lock, flags); |
1396 | kref_put(&i2400m->rx_roq_refcount, i2400m_rx_roq_destroy); | |
1397 | spin_unlock_irqrestore(&i2400m->rx_lock, flags); | |
c747583d | 1398 | } |
a0beba21 IPG |
1399 | /* at this point, nothing can be received... */ |
1400 | i2400m_report_hook_flush(i2400m); | |
c747583d | 1401 | } |