]> git.ipfire.org Git - thirdparty/linux.git/blob - drivers/usb/gadget/udc/net2272.c
Merge tag 'io_uring-5.7-2020-05-22' of git://git.kernel.dk/linux-block
[thirdparty/linux.git] / drivers / usb / gadget / udc / net2272.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Driver for PLX NET2272 USB device controller
4 *
5 * Copyright (C) 2005-2006 PLX Technology, Inc.
6 * Copyright (C) 2006-2011 Analog Devices, Inc.
7 */
8
9 #include <linux/delay.h>
10 #include <linux/device.h>
11 #include <linux/errno.h>
12 #include <linux/gpio.h>
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
15 #include <linux/io.h>
16 #include <linux/ioport.h>
17 #include <linux/kernel.h>
18 #include <linux/list.h>
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/pci.h>
22 #include <linux/platform_device.h>
23 #include <linux/prefetch.h>
24 #include <linux/sched.h>
25 #include <linux/slab.h>
26 #include <linux/timer.h>
27 #include <linux/usb.h>
28 #include <linux/usb/ch9.h>
29 #include <linux/usb/gadget.h>
30
31 #include <asm/byteorder.h>
32 #include <asm/unaligned.h>
33
34 #include "net2272.h"
35
36 #define DRIVER_DESC "PLX NET2272 USB Peripheral Controller"
37
38 static const char driver_name[] = "net2272";
39 static const char driver_vers[] = "2006 October 17/mainline";
40 static const char driver_desc[] = DRIVER_DESC;
41
42 static const char ep0name[] = "ep0";
43 static const char * const ep_name[] = {
44 ep0name,
45 "ep-a", "ep-b", "ep-c",
46 };
47
48 #ifdef CONFIG_USB_NET2272_DMA
49 /*
50 * use_dma: the NET2272 can use an external DMA controller.
51 * Note that since there is no generic DMA api, some functions,
52 * notably request_dma, start_dma, and cancel_dma will need to be
53 * modified for your platform's particular dma controller.
54 *
55 * If use_dma is disabled, pio will be used instead.
56 */
57 static bool use_dma = 0;
58 module_param(use_dma, bool, 0644);
59
60 /*
61 * dma_ep: selects the endpoint for use with dma (1=ep-a, 2=ep-b)
62 * The NET2272 can only use dma for a single endpoint at a time.
63 * At some point this could be modified to allow either endpoint
64 * to take control of dma as it becomes available.
65 *
66 * Note that DMA should not be used on OUT endpoints unless it can
67 * be guaranteed that no short packets will arrive on an IN endpoint
68 * while the DMA operation is pending. Otherwise the OUT DMA will
69 * terminate prematurely (See NET2272 Errata 630-0213-0101)
70 */
71 static ushort dma_ep = 1;
72 module_param(dma_ep, ushort, 0644);
73
74 /*
75 * dma_mode: net2272 dma mode setting (see LOCCTL1 definiton):
76 * mode 0 == Slow DREQ mode
77 * mode 1 == Fast DREQ mode
78 * mode 2 == Burst mode
79 */
80 static ushort dma_mode = 2;
81 module_param(dma_mode, ushort, 0644);
82 #else
83 #define use_dma 0
84 #define dma_ep 1
85 #define dma_mode 2
86 #endif
87
88 /*
89 * fifo_mode: net2272 buffer configuration:
90 * mode 0 == ep-{a,b,c} 512db each
91 * mode 1 == ep-a 1k, ep-{b,c} 512db
92 * mode 2 == ep-a 1k, ep-b 1k, ep-c 512db
93 * mode 3 == ep-a 1k, ep-b disabled, ep-c 512db
94 */
95 static ushort fifo_mode = 0;
96 module_param(fifo_mode, ushort, 0644);
97
98 /*
99 * enable_suspend: When enabled, the driver will respond to
100 * USB suspend requests by powering down the NET2272. Otherwise,
101 * USB suspend requests will be ignored. This is acceptible for
102 * self-powered devices. For bus powered devices set this to 1.
103 */
104 static ushort enable_suspend = 0;
105 module_param(enable_suspend, ushort, 0644);
106
107 static void assert_out_naking(struct net2272_ep *ep, const char *where)
108 {
109 u8 tmp;
110
111 #ifndef DEBUG
112 return;
113 #endif
114
115 tmp = net2272_ep_read(ep, EP_STAT0);
116 if ((tmp & (1 << NAK_OUT_PACKETS)) == 0) {
117 dev_dbg(ep->dev->dev, "%s %s %02x !NAK\n",
118 ep->ep.name, where, tmp);
119 net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
120 }
121 }
122 #define ASSERT_OUT_NAKING(ep) assert_out_naking(ep, __func__)
123
124 static void stop_out_naking(struct net2272_ep *ep)
125 {
126 u8 tmp = net2272_ep_read(ep, EP_STAT0);
127
128 if ((tmp & (1 << NAK_OUT_PACKETS)) != 0)
129 net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
130 }
131
132 #define PIPEDIR(bAddress) (usb_pipein(bAddress) ? "in" : "out")
133
134 static char *type_string(u8 bmAttributes)
135 {
136 switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) {
137 case USB_ENDPOINT_XFER_BULK: return "bulk";
138 case USB_ENDPOINT_XFER_ISOC: return "iso";
139 case USB_ENDPOINT_XFER_INT: return "intr";
140 default: return "control";
141 }
142 }
143
144 static char *buf_state_string(unsigned state)
145 {
146 switch (state) {
147 case BUFF_FREE: return "free";
148 case BUFF_VALID: return "valid";
149 case BUFF_LCL: return "local";
150 case BUFF_USB: return "usb";
151 default: return "unknown";
152 }
153 }
154
155 static char *dma_mode_string(void)
156 {
157 if (!use_dma)
158 return "PIO";
159 switch (dma_mode) {
160 case 0: return "SLOW DREQ";
161 case 1: return "FAST DREQ";
162 case 2: return "BURST";
163 default: return "invalid";
164 }
165 }
166
167 static void net2272_dequeue_all(struct net2272_ep *);
168 static int net2272_kick_dma(struct net2272_ep *, struct net2272_request *);
169 static int net2272_fifo_status(struct usb_ep *);
170
171 static const struct usb_ep_ops net2272_ep_ops;
172
173 /*---------------------------------------------------------------------------*/
174
175 static int
176 net2272_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
177 {
178 struct net2272 *dev;
179 struct net2272_ep *ep;
180 u32 max;
181 u8 tmp;
182 unsigned long flags;
183
184 ep = container_of(_ep, struct net2272_ep, ep);
185 if (!_ep || !desc || ep->desc || _ep->name == ep0name
186 || desc->bDescriptorType != USB_DT_ENDPOINT)
187 return -EINVAL;
188 dev = ep->dev;
189 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
190 return -ESHUTDOWN;
191
192 max = usb_endpoint_maxp(desc);
193
194 spin_lock_irqsave(&dev->lock, flags);
195 _ep->maxpacket = max;
196 ep->desc = desc;
197
198 /* net2272_ep_reset() has already been called */
199 ep->stopped = 0;
200 ep->wedged = 0;
201
202 /* set speed-dependent max packet */
203 net2272_ep_write(ep, EP_MAXPKT0, max & 0xff);
204 net2272_ep_write(ep, EP_MAXPKT1, (max & 0xff00) >> 8);
205
206 /* set type, direction, address; reset fifo counters */
207 net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
208 tmp = usb_endpoint_type(desc);
209 if (usb_endpoint_xfer_bulk(desc)) {
210 /* catch some particularly blatant driver bugs */
211 if ((dev->gadget.speed == USB_SPEED_HIGH && max != 512) ||
212 (dev->gadget.speed == USB_SPEED_FULL && max > 64)) {
213 spin_unlock_irqrestore(&dev->lock, flags);
214 return -ERANGE;
215 }
216 }
217 ep->is_iso = usb_endpoint_xfer_isoc(desc) ? 1 : 0;
218 tmp <<= ENDPOINT_TYPE;
219 tmp |= ((desc->bEndpointAddress & 0x0f) << ENDPOINT_NUMBER);
220 tmp |= usb_endpoint_dir_in(desc) << ENDPOINT_DIRECTION;
221 tmp |= (1 << ENDPOINT_ENABLE);
222
223 /* for OUT transfers, block the rx fifo until a read is posted */
224 ep->is_in = usb_endpoint_dir_in(desc);
225 if (!ep->is_in)
226 net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
227
228 net2272_ep_write(ep, EP_CFG, tmp);
229
230 /* enable irqs */
231 tmp = (1 << ep->num) | net2272_read(dev, IRQENB0);
232 net2272_write(dev, IRQENB0, tmp);
233
234 tmp = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
235 | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
236 | net2272_ep_read(ep, EP_IRQENB);
237 net2272_ep_write(ep, EP_IRQENB, tmp);
238
239 tmp = desc->bEndpointAddress;
240 dev_dbg(dev->dev, "enabled %s (ep%d%s-%s) max %04x cfg %02x\n",
241 _ep->name, tmp & 0x0f, PIPEDIR(tmp),
242 type_string(desc->bmAttributes), max,
243 net2272_ep_read(ep, EP_CFG));
244
245 spin_unlock_irqrestore(&dev->lock, flags);
246 return 0;
247 }
248
249 static void net2272_ep_reset(struct net2272_ep *ep)
250 {
251 u8 tmp;
252
253 ep->desc = NULL;
254 INIT_LIST_HEAD(&ep->queue);
255
256 usb_ep_set_maxpacket_limit(&ep->ep, ~0);
257 ep->ep.ops = &net2272_ep_ops;
258
259 /* disable irqs, endpoint */
260 net2272_ep_write(ep, EP_IRQENB, 0);
261
262 /* init to our chosen defaults, notably so that we NAK OUT
263 * packets until the driver queues a read.
264 */
265 tmp = (1 << NAK_OUT_PACKETS_MODE) | (1 << ALT_NAK_OUT_PACKETS);
266 net2272_ep_write(ep, EP_RSPSET, tmp);
267
268 tmp = (1 << INTERRUPT_MODE) | (1 << HIDE_STATUS_PHASE);
269 if (ep->num != 0)
270 tmp |= (1 << ENDPOINT_TOGGLE) | (1 << ENDPOINT_HALT);
271
272 net2272_ep_write(ep, EP_RSPCLR, tmp);
273
274 /* scrub most status bits, and flush any fifo state */
275 net2272_ep_write(ep, EP_STAT0,
276 (1 << DATA_IN_TOKEN_INTERRUPT)
277 | (1 << DATA_OUT_TOKEN_INTERRUPT)
278 | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
279 | (1 << DATA_PACKET_RECEIVED_INTERRUPT)
280 | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT));
281
282 net2272_ep_write(ep, EP_STAT1,
283 (1 << TIMEOUT)
284 | (1 << USB_OUT_ACK_SENT)
285 | (1 << USB_OUT_NAK_SENT)
286 | (1 << USB_IN_ACK_RCVD)
287 | (1 << USB_IN_NAK_SENT)
288 | (1 << USB_STALL_SENT)
289 | (1 << LOCAL_OUT_ZLP)
290 | (1 << BUFFER_FLUSH));
291
292 /* fifo size is handled seperately */
293 }
294
295 static int net2272_disable(struct usb_ep *_ep)
296 {
297 struct net2272_ep *ep;
298 unsigned long flags;
299
300 ep = container_of(_ep, struct net2272_ep, ep);
301 if (!_ep || !ep->desc || _ep->name == ep0name)
302 return -EINVAL;
303
304 spin_lock_irqsave(&ep->dev->lock, flags);
305 net2272_dequeue_all(ep);
306 net2272_ep_reset(ep);
307
308 dev_vdbg(ep->dev->dev, "disabled %s\n", _ep->name);
309
310 spin_unlock_irqrestore(&ep->dev->lock, flags);
311 return 0;
312 }
313
314 /*---------------------------------------------------------------------------*/
315
316 static struct usb_request *
317 net2272_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
318 {
319 struct net2272_request *req;
320
321 if (!_ep)
322 return NULL;
323
324 req = kzalloc(sizeof(*req), gfp_flags);
325 if (!req)
326 return NULL;
327
328 INIT_LIST_HEAD(&req->queue);
329
330 return &req->req;
331 }
332
333 static void
334 net2272_free_request(struct usb_ep *_ep, struct usb_request *_req)
335 {
336 struct net2272_request *req;
337
338 if (!_ep || !_req)
339 return;
340
341 req = container_of(_req, struct net2272_request, req);
342 WARN_ON(!list_empty(&req->queue));
343 kfree(req);
344 }
345
346 static void
347 net2272_done(struct net2272_ep *ep, struct net2272_request *req, int status)
348 {
349 struct net2272 *dev;
350 unsigned stopped = ep->stopped;
351
352 if (ep->num == 0) {
353 if (ep->dev->protocol_stall) {
354 ep->stopped = 1;
355 set_halt(ep);
356 }
357 allow_status(ep);
358 }
359
360 list_del_init(&req->queue);
361
362 if (req->req.status == -EINPROGRESS)
363 req->req.status = status;
364 else
365 status = req->req.status;
366
367 dev = ep->dev;
368 if (use_dma && ep->dma)
369 usb_gadget_unmap_request(&dev->gadget, &req->req,
370 ep->is_in);
371
372 if (status && status != -ESHUTDOWN)
373 dev_vdbg(dev->dev, "complete %s req %p stat %d len %u/%u buf %p\n",
374 ep->ep.name, &req->req, status,
375 req->req.actual, req->req.length, req->req.buf);
376
377 /* don't modify queue heads during completion callback */
378 ep->stopped = 1;
379 spin_unlock(&dev->lock);
380 usb_gadget_giveback_request(&ep->ep, &req->req);
381 spin_lock(&dev->lock);
382 ep->stopped = stopped;
383 }
384
385 static int
386 net2272_write_packet(struct net2272_ep *ep, u8 *buf,
387 struct net2272_request *req, unsigned max)
388 {
389 u16 __iomem *ep_data = net2272_reg_addr(ep->dev, EP_DATA);
390 u16 *bufp;
391 unsigned length, count;
392 u8 tmp;
393
394 length = min(req->req.length - req->req.actual, max);
395 req->req.actual += length;
396
397 dev_vdbg(ep->dev->dev, "write packet %s req %p max %u len %u avail %u\n",
398 ep->ep.name, req, max, length,
399 (net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0));
400
401 count = length;
402 bufp = (u16 *)buf;
403
404 while (likely(count >= 2)) {
405 /* no byte-swap required; chip endian set during init */
406 writew(*bufp++, ep_data);
407 count -= 2;
408 }
409 buf = (u8 *)bufp;
410
411 /* write final byte by placing the NET2272 into 8-bit mode */
412 if (unlikely(count)) {
413 tmp = net2272_read(ep->dev, LOCCTL);
414 net2272_write(ep->dev, LOCCTL, tmp & ~(1 << DATA_WIDTH));
415 writeb(*buf, ep_data);
416 net2272_write(ep->dev, LOCCTL, tmp);
417 }
418 return length;
419 }
420
421 /* returns: 0: still running, 1: completed, negative: errno */
422 static int
423 net2272_write_fifo(struct net2272_ep *ep, struct net2272_request *req)
424 {
425 u8 *buf;
426 unsigned count, max;
427 int status;
428
429 dev_vdbg(ep->dev->dev, "write_fifo %s actual %d len %d\n",
430 ep->ep.name, req->req.actual, req->req.length);
431
432 /*
433 * Keep loading the endpoint until the final packet is loaded,
434 * or the endpoint buffer is full.
435 */
436 top:
437 /*
438 * Clear interrupt status
439 * - Packet Transmitted interrupt will become set again when the
440 * host successfully takes another packet
441 */
442 net2272_ep_write(ep, EP_STAT0, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT));
443 while (!(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_FULL))) {
444 buf = req->req.buf + req->req.actual;
445 prefetch(buf);
446
447 /* force pagesel */
448 net2272_ep_read(ep, EP_STAT0);
449
450 max = (net2272_ep_read(ep, EP_AVAIL1) << 8) |
451 (net2272_ep_read(ep, EP_AVAIL0));
452
453 if (max < ep->ep.maxpacket)
454 max = (net2272_ep_read(ep, EP_AVAIL1) << 8)
455 | (net2272_ep_read(ep, EP_AVAIL0));
456
457 count = net2272_write_packet(ep, buf, req, max);
458 /* see if we are done */
459 if (req->req.length == req->req.actual) {
460 /* validate short or zlp packet */
461 if (count < ep->ep.maxpacket)
462 set_fifo_bytecount(ep, 0);
463 net2272_done(ep, req, 0);
464
465 if (!list_empty(&ep->queue)) {
466 req = list_entry(ep->queue.next,
467 struct net2272_request,
468 queue);
469 status = net2272_kick_dma(ep, req);
470
471 if (status < 0)
472 if ((net2272_ep_read(ep, EP_STAT0)
473 & (1 << BUFFER_EMPTY)))
474 goto top;
475 }
476 return 1;
477 }
478 net2272_ep_write(ep, EP_STAT0, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT));
479 }
480 return 0;
481 }
482
483 static void
484 net2272_out_flush(struct net2272_ep *ep)
485 {
486 ASSERT_OUT_NAKING(ep);
487
488 net2272_ep_write(ep, EP_STAT0, (1 << DATA_OUT_TOKEN_INTERRUPT)
489 | (1 << DATA_PACKET_RECEIVED_INTERRUPT));
490 net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
491 }
492
493 static int
494 net2272_read_packet(struct net2272_ep *ep, u8 *buf,
495 struct net2272_request *req, unsigned avail)
496 {
497 u16 __iomem *ep_data = net2272_reg_addr(ep->dev, EP_DATA);
498 unsigned is_short;
499 u16 *bufp;
500
501 req->req.actual += avail;
502
503 dev_vdbg(ep->dev->dev, "read packet %s req %p len %u avail %u\n",
504 ep->ep.name, req, avail,
505 (net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0));
506
507 is_short = (avail < ep->ep.maxpacket);
508
509 if (unlikely(avail == 0)) {
510 /* remove any zlp from the buffer */
511 (void)readw(ep_data);
512 return is_short;
513 }
514
515 /* Ensure we get the final byte */
516 if (unlikely(avail % 2))
517 avail++;
518 bufp = (u16 *)buf;
519
520 do {
521 *bufp++ = readw(ep_data);
522 avail -= 2;
523 } while (avail);
524
525 /*
526 * To avoid false endpoint available race condition must read
527 * ep stat0 twice in the case of a short transfer
528 */
529 if (net2272_ep_read(ep, EP_STAT0) & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT))
530 net2272_ep_read(ep, EP_STAT0);
531
532 return is_short;
533 }
534
535 static int
536 net2272_read_fifo(struct net2272_ep *ep, struct net2272_request *req)
537 {
538 u8 *buf;
539 unsigned is_short;
540 int count;
541 int tmp;
542 int cleanup = 0;
543 int status = -1;
544
545 dev_vdbg(ep->dev->dev, "read_fifo %s actual %d len %d\n",
546 ep->ep.name, req->req.actual, req->req.length);
547
548 top:
549 do {
550 buf = req->req.buf + req->req.actual;
551 prefetchw(buf);
552
553 count = (net2272_ep_read(ep, EP_AVAIL1) << 8)
554 | net2272_ep_read(ep, EP_AVAIL0);
555
556 net2272_ep_write(ep, EP_STAT0,
557 (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT) |
558 (1 << DATA_PACKET_RECEIVED_INTERRUPT));
559
560 tmp = req->req.length - req->req.actual;
561
562 if (count > tmp) {
563 if ((tmp % ep->ep.maxpacket) != 0) {
564 dev_err(ep->dev->dev,
565 "%s out fifo %d bytes, expected %d\n",
566 ep->ep.name, count, tmp);
567 cleanup = 1;
568 }
569 count = (tmp > 0) ? tmp : 0;
570 }
571
572 is_short = net2272_read_packet(ep, buf, req, count);
573
574 /* completion */
575 if (unlikely(cleanup || is_short ||
576 req->req.actual == req->req.length)) {
577
578 if (cleanup) {
579 net2272_out_flush(ep);
580 net2272_done(ep, req, -EOVERFLOW);
581 } else
582 net2272_done(ep, req, 0);
583
584 /* re-initialize endpoint transfer registers
585 * otherwise they may result in erroneous pre-validation
586 * for subsequent control reads
587 */
588 if (unlikely(ep->num == 0)) {
589 net2272_ep_write(ep, EP_TRANSFER2, 0);
590 net2272_ep_write(ep, EP_TRANSFER1, 0);
591 net2272_ep_write(ep, EP_TRANSFER0, 0);
592 }
593
594 if (!list_empty(&ep->queue)) {
595 req = list_entry(ep->queue.next,
596 struct net2272_request, queue);
597 status = net2272_kick_dma(ep, req);
598 if ((status < 0) &&
599 !(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_EMPTY)))
600 goto top;
601 }
602 return 1;
603 }
604 } while (!(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_EMPTY)));
605
606 return 0;
607 }
608
609 static void
610 net2272_pio_advance(struct net2272_ep *ep)
611 {
612 struct net2272_request *req;
613
614 if (unlikely(list_empty(&ep->queue)))
615 return;
616
617 req = list_entry(ep->queue.next, struct net2272_request, queue);
618 (ep->is_in ? net2272_write_fifo : net2272_read_fifo)(ep, req);
619 }
620
621 /* returns 0 on success, else negative errno */
622 static int
623 net2272_request_dma(struct net2272 *dev, unsigned ep, u32 buf,
624 unsigned len, unsigned dir)
625 {
626 dev_vdbg(dev->dev, "request_dma ep %d buf %08x len %d dir %d\n",
627 ep, buf, len, dir);
628
629 /* The NET2272 only supports a single dma channel */
630 if (dev->dma_busy)
631 return -EBUSY;
632 /*
633 * EP_TRANSFER (used to determine the number of bytes received
634 * in an OUT transfer) is 24 bits wide; don't ask for more than that.
635 */
636 if ((dir == 1) && (len > 0x1000000))
637 return -EINVAL;
638
639 dev->dma_busy = 1;
640
641 /* initialize platform's dma */
642 #ifdef CONFIG_USB_PCI
643 /* NET2272 addr, buffer addr, length, etc. */
644 switch (dev->dev_id) {
645 case PCI_DEVICE_ID_RDK1:
646 /* Setup PLX 9054 DMA mode */
647 writel((1 << LOCAL_BUS_WIDTH) |
648 (1 << TA_READY_INPUT_ENABLE) |
649 (0 << LOCAL_BURST_ENABLE) |
650 (1 << DONE_INTERRUPT_ENABLE) |
651 (1 << LOCAL_ADDRESSING_MODE) |
652 (1 << DEMAND_MODE) |
653 (1 << DMA_EOT_ENABLE) |
654 (1 << FAST_SLOW_TERMINATE_MODE_SELECT) |
655 (1 << DMA_CHANNEL_INTERRUPT_SELECT),
656 dev->rdk1.plx9054_base_addr + DMAMODE0);
657
658 writel(0x100000, dev->rdk1.plx9054_base_addr + DMALADR0);
659 writel(buf, dev->rdk1.plx9054_base_addr + DMAPADR0);
660 writel(len, dev->rdk1.plx9054_base_addr + DMASIZ0);
661 writel((dir << DIRECTION_OF_TRANSFER) |
662 (1 << INTERRUPT_AFTER_TERMINAL_COUNT),
663 dev->rdk1.plx9054_base_addr + DMADPR0);
664 writel((1 << LOCAL_DMA_CHANNEL_0_INTERRUPT_ENABLE) |
665 readl(dev->rdk1.plx9054_base_addr + INTCSR),
666 dev->rdk1.plx9054_base_addr + INTCSR);
667
668 break;
669 }
670 #endif
671
672 net2272_write(dev, DMAREQ,
673 (0 << DMA_BUFFER_VALID) |
674 (1 << DMA_REQUEST_ENABLE) |
675 (1 << DMA_CONTROL_DACK) |
676 (dev->dma_eot_polarity << EOT_POLARITY) |
677 (dev->dma_dack_polarity << DACK_POLARITY) |
678 (dev->dma_dreq_polarity << DREQ_POLARITY) |
679 ((ep >> 1) << DMA_ENDPOINT_SELECT));
680
681 (void) net2272_read(dev, SCRATCH);
682
683 return 0;
684 }
685
686 static void
687 net2272_start_dma(struct net2272 *dev)
688 {
689 /* start platform's dma controller */
690 #ifdef CONFIG_USB_PCI
691 switch (dev->dev_id) {
692 case PCI_DEVICE_ID_RDK1:
693 writeb((1 << CHANNEL_ENABLE) | (1 << CHANNEL_START),
694 dev->rdk1.plx9054_base_addr + DMACSR0);
695 break;
696 }
697 #endif
698 }
699
700 /* returns 0 on success, else negative errno */
701 static int
702 net2272_kick_dma(struct net2272_ep *ep, struct net2272_request *req)
703 {
704 unsigned size;
705 u8 tmp;
706
707 if (!use_dma || (ep->num < 1) || (ep->num > 2) || !ep->dma)
708 return -EINVAL;
709
710 /* don't use dma for odd-length transfers
711 * otherwise, we'd need to deal with the last byte with pio
712 */
713 if (req->req.length & 1)
714 return -EINVAL;
715
716 dev_vdbg(ep->dev->dev, "kick_dma %s req %p dma %08llx\n",
717 ep->ep.name, req, (unsigned long long) req->req.dma);
718
719 net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
720
721 /* The NET2272 can only use DMA on one endpoint at a time */
722 if (ep->dev->dma_busy)
723 return -EBUSY;
724
725 /* Make sure we only DMA an even number of bytes (we'll use
726 * pio to complete the transfer)
727 */
728 size = req->req.length;
729 size &= ~1;
730
731 /* device-to-host transfer */
732 if (ep->is_in) {
733 /* initialize platform's dma controller */
734 if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 0))
735 /* unable to obtain DMA channel; return error and use pio mode */
736 return -EBUSY;
737 req->req.actual += size;
738
739 /* host-to-device transfer */
740 } else {
741 tmp = net2272_ep_read(ep, EP_STAT0);
742
743 /* initialize platform's dma controller */
744 if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 1))
745 /* unable to obtain DMA channel; return error and use pio mode */
746 return -EBUSY;
747
748 if (!(tmp & (1 << BUFFER_EMPTY)))
749 ep->not_empty = 1;
750 else
751 ep->not_empty = 0;
752
753
754 /* allow the endpoint's buffer to fill */
755 net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
756
757 /* this transfer completed and data's already in the fifo
758 * return error so pio gets used.
759 */
760 if (tmp & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)) {
761
762 /* deassert dreq */
763 net2272_write(ep->dev, DMAREQ,
764 (0 << DMA_BUFFER_VALID) |
765 (0 << DMA_REQUEST_ENABLE) |
766 (1 << DMA_CONTROL_DACK) |
767 (ep->dev->dma_eot_polarity << EOT_POLARITY) |
768 (ep->dev->dma_dack_polarity << DACK_POLARITY) |
769 (ep->dev->dma_dreq_polarity << DREQ_POLARITY) |
770 ((ep->num >> 1) << DMA_ENDPOINT_SELECT));
771
772 return -EBUSY;
773 }
774 }
775
776 /* Don't use per-packet interrupts: use dma interrupts only */
777 net2272_ep_write(ep, EP_IRQENB, 0);
778
779 net2272_start_dma(ep->dev);
780
781 return 0;
782 }
783
784 static void net2272_cancel_dma(struct net2272 *dev)
785 {
786 #ifdef CONFIG_USB_PCI
787 switch (dev->dev_id) {
788 case PCI_DEVICE_ID_RDK1:
789 writeb(0, dev->rdk1.plx9054_base_addr + DMACSR0);
790 writeb(1 << CHANNEL_ABORT, dev->rdk1.plx9054_base_addr + DMACSR0);
791 while (!(readb(dev->rdk1.plx9054_base_addr + DMACSR0) &
792 (1 << CHANNEL_DONE)))
793 continue; /* wait for dma to stabalize */
794
795 /* dma abort generates an interrupt */
796 writeb(1 << CHANNEL_CLEAR_INTERRUPT,
797 dev->rdk1.plx9054_base_addr + DMACSR0);
798 break;
799 }
800 #endif
801
802 dev->dma_busy = 0;
803 }
804
805 /*---------------------------------------------------------------------------*/
806
807 static int
808 net2272_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
809 {
810 struct net2272_request *req;
811 struct net2272_ep *ep;
812 struct net2272 *dev;
813 unsigned long flags;
814 int status = -1;
815 u8 s;
816
817 req = container_of(_req, struct net2272_request, req);
818 if (!_req || !_req->complete || !_req->buf
819 || !list_empty(&req->queue))
820 return -EINVAL;
821 ep = container_of(_ep, struct net2272_ep, ep);
822 if (!_ep || (!ep->desc && ep->num != 0))
823 return -EINVAL;
824 dev = ep->dev;
825 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
826 return -ESHUTDOWN;
827
828 /* set up dma mapping in case the caller didn't */
829 if (use_dma && ep->dma) {
830 status = usb_gadget_map_request(&dev->gadget, _req,
831 ep->is_in);
832 if (status)
833 return status;
834 }
835
836 dev_vdbg(dev->dev, "%s queue req %p, len %d buf %p dma %08llx %s\n",
837 _ep->name, _req, _req->length, _req->buf,
838 (unsigned long long) _req->dma, _req->zero ? "zero" : "!zero");
839
840 spin_lock_irqsave(&dev->lock, flags);
841
842 _req->status = -EINPROGRESS;
843 _req->actual = 0;
844
845 /* kickstart this i/o queue? */
846 if (list_empty(&ep->queue) && !ep->stopped) {
847 /* maybe there's no control data, just status ack */
848 if (ep->num == 0 && _req->length == 0) {
849 net2272_done(ep, req, 0);
850 dev_vdbg(dev->dev, "%s status ack\n", ep->ep.name);
851 goto done;
852 }
853
854 /* Return zlp, don't let it block subsequent packets */
855 s = net2272_ep_read(ep, EP_STAT0);
856 if (s & (1 << BUFFER_EMPTY)) {
857 /* Buffer is empty check for a blocking zlp, handle it */
858 if ((s & (1 << NAK_OUT_PACKETS)) &&
859 net2272_ep_read(ep, EP_STAT1) & (1 << LOCAL_OUT_ZLP)) {
860 dev_dbg(dev->dev, "WARNING: returning ZLP short packet termination!\n");
861 /*
862 * Request is going to terminate with a short packet ...
863 * hope the client is ready for it!
864 */
865 status = net2272_read_fifo(ep, req);
866 /* clear short packet naking */
867 net2272_ep_write(ep, EP_STAT0, (1 << NAK_OUT_PACKETS));
868 goto done;
869 }
870 }
871
872 /* try dma first */
873 status = net2272_kick_dma(ep, req);
874
875 if (status < 0) {
876 /* dma failed (most likely in use by another endpoint)
877 * fallback to pio
878 */
879 status = 0;
880
881 if (ep->is_in)
882 status = net2272_write_fifo(ep, req);
883 else {
884 s = net2272_ep_read(ep, EP_STAT0);
885 if ((s & (1 << BUFFER_EMPTY)) == 0)
886 status = net2272_read_fifo(ep, req);
887 }
888
889 if (unlikely(status != 0)) {
890 if (status > 0)
891 status = 0;
892 req = NULL;
893 }
894 }
895 }
896 if (likely(req))
897 list_add_tail(&req->queue, &ep->queue);
898
899 if (likely(!list_empty(&ep->queue)))
900 net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
901 done:
902 spin_unlock_irqrestore(&dev->lock, flags);
903
904 return 0;
905 }
906
907 /* dequeue ALL requests */
908 static void
909 net2272_dequeue_all(struct net2272_ep *ep)
910 {
911 struct net2272_request *req;
912
913 /* called with spinlock held */
914 ep->stopped = 1;
915
916 while (!list_empty(&ep->queue)) {
917 req = list_entry(ep->queue.next,
918 struct net2272_request,
919 queue);
920 net2272_done(ep, req, -ESHUTDOWN);
921 }
922 }
923
924 /* dequeue JUST ONE request */
925 static int
926 net2272_dequeue(struct usb_ep *_ep, struct usb_request *_req)
927 {
928 struct net2272_ep *ep;
929 struct net2272_request *req;
930 unsigned long flags;
931 int stopped;
932
933 ep = container_of(_ep, struct net2272_ep, ep);
934 if (!_ep || (!ep->desc && ep->num != 0) || !_req)
935 return -EINVAL;
936
937 spin_lock_irqsave(&ep->dev->lock, flags);
938 stopped = ep->stopped;
939 ep->stopped = 1;
940
941 /* make sure it's still queued on this endpoint */
942 list_for_each_entry(req, &ep->queue, queue) {
943 if (&req->req == _req)
944 break;
945 }
946 if (&req->req != _req) {
947 ep->stopped = stopped;
948 spin_unlock_irqrestore(&ep->dev->lock, flags);
949 return -EINVAL;
950 }
951
952 /* queue head may be partially complete */
953 if (ep->queue.next == &req->queue) {
954 dev_dbg(ep->dev->dev, "unlink (%s) pio\n", _ep->name);
955 net2272_done(ep, req, -ECONNRESET);
956 }
957 req = NULL;
958 ep->stopped = stopped;
959
960 spin_unlock_irqrestore(&ep->dev->lock, flags);
961 return 0;
962 }
963
964 /*---------------------------------------------------------------------------*/
965
966 static int
967 net2272_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged)
968 {
969 struct net2272_ep *ep;
970 unsigned long flags;
971 int ret = 0;
972
973 ep = container_of(_ep, struct net2272_ep, ep);
974 if (!_ep || (!ep->desc && ep->num != 0))
975 return -EINVAL;
976 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
977 return -ESHUTDOWN;
978 if (ep->desc /* not ep0 */ && usb_endpoint_xfer_isoc(ep->desc))
979 return -EINVAL;
980
981 spin_lock_irqsave(&ep->dev->lock, flags);
982 if (!list_empty(&ep->queue))
983 ret = -EAGAIN;
984 else if (ep->is_in && value && net2272_fifo_status(_ep) != 0)
985 ret = -EAGAIN;
986 else {
987 dev_vdbg(ep->dev->dev, "%s %s %s\n", _ep->name,
988 value ? "set" : "clear",
989 wedged ? "wedge" : "halt");
990 /* set/clear */
991 if (value) {
992 if (ep->num == 0)
993 ep->dev->protocol_stall = 1;
994 else
995 set_halt(ep);
996 if (wedged)
997 ep->wedged = 1;
998 } else {
999 clear_halt(ep);
1000 ep->wedged = 0;
1001 }
1002 }
1003 spin_unlock_irqrestore(&ep->dev->lock, flags);
1004
1005 return ret;
1006 }
1007
1008 static int
1009 net2272_set_halt(struct usb_ep *_ep, int value)
1010 {
1011 return net2272_set_halt_and_wedge(_ep, value, 0);
1012 }
1013
1014 static int
1015 net2272_set_wedge(struct usb_ep *_ep)
1016 {
1017 if (!_ep || _ep->name == ep0name)
1018 return -EINVAL;
1019 return net2272_set_halt_and_wedge(_ep, 1, 1);
1020 }
1021
1022 static int
1023 net2272_fifo_status(struct usb_ep *_ep)
1024 {
1025 struct net2272_ep *ep;
1026 u16 avail;
1027
1028 ep = container_of(_ep, struct net2272_ep, ep);
1029 if (!_ep || (!ep->desc && ep->num != 0))
1030 return -ENODEV;
1031 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1032 return -ESHUTDOWN;
1033
1034 avail = net2272_ep_read(ep, EP_AVAIL1) << 8;
1035 avail |= net2272_ep_read(ep, EP_AVAIL0);
1036 if (avail > ep->fifo_size)
1037 return -EOVERFLOW;
1038 if (ep->is_in)
1039 avail = ep->fifo_size - avail;
1040 return avail;
1041 }
1042
1043 static void
1044 net2272_fifo_flush(struct usb_ep *_ep)
1045 {
1046 struct net2272_ep *ep;
1047
1048 ep = container_of(_ep, struct net2272_ep, ep);
1049 if (!_ep || (!ep->desc && ep->num != 0))
1050 return;
1051 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1052 return;
1053
1054 net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
1055 }
1056
1057 static const struct usb_ep_ops net2272_ep_ops = {
1058 .enable = net2272_enable,
1059 .disable = net2272_disable,
1060
1061 .alloc_request = net2272_alloc_request,
1062 .free_request = net2272_free_request,
1063
1064 .queue = net2272_queue,
1065 .dequeue = net2272_dequeue,
1066
1067 .set_halt = net2272_set_halt,
1068 .set_wedge = net2272_set_wedge,
1069 .fifo_status = net2272_fifo_status,
1070 .fifo_flush = net2272_fifo_flush,
1071 };
1072
1073 /*---------------------------------------------------------------------------*/
1074
1075 static int
1076 net2272_get_frame(struct usb_gadget *_gadget)
1077 {
1078 struct net2272 *dev;
1079 unsigned long flags;
1080 u16 ret;
1081
1082 if (!_gadget)
1083 return -ENODEV;
1084 dev = container_of(_gadget, struct net2272, gadget);
1085 spin_lock_irqsave(&dev->lock, flags);
1086
1087 ret = net2272_read(dev, FRAME1) << 8;
1088 ret |= net2272_read(dev, FRAME0);
1089
1090 spin_unlock_irqrestore(&dev->lock, flags);
1091 return ret;
1092 }
1093
1094 static int
1095 net2272_wakeup(struct usb_gadget *_gadget)
1096 {
1097 struct net2272 *dev;
1098 u8 tmp;
1099 unsigned long flags;
1100
1101 if (!_gadget)
1102 return 0;
1103 dev = container_of(_gadget, struct net2272, gadget);
1104
1105 spin_lock_irqsave(&dev->lock, flags);
1106 tmp = net2272_read(dev, USBCTL0);
1107 if (tmp & (1 << IO_WAKEUP_ENABLE))
1108 net2272_write(dev, USBCTL1, (1 << GENERATE_RESUME));
1109
1110 spin_unlock_irqrestore(&dev->lock, flags);
1111
1112 return 0;
1113 }
1114
1115 static int
1116 net2272_set_selfpowered(struct usb_gadget *_gadget, int value)
1117 {
1118 if (!_gadget)
1119 return -ENODEV;
1120
1121 _gadget->is_selfpowered = (value != 0);
1122
1123 return 0;
1124 }
1125
1126 static int
1127 net2272_pullup(struct usb_gadget *_gadget, int is_on)
1128 {
1129 struct net2272 *dev;
1130 u8 tmp;
1131 unsigned long flags;
1132
1133 if (!_gadget)
1134 return -ENODEV;
1135 dev = container_of(_gadget, struct net2272, gadget);
1136
1137 spin_lock_irqsave(&dev->lock, flags);
1138 tmp = net2272_read(dev, USBCTL0);
1139 dev->softconnect = (is_on != 0);
1140 if (is_on)
1141 tmp |= (1 << USB_DETECT_ENABLE);
1142 else
1143 tmp &= ~(1 << USB_DETECT_ENABLE);
1144 net2272_write(dev, USBCTL0, tmp);
1145 spin_unlock_irqrestore(&dev->lock, flags);
1146
1147 return 0;
1148 }
1149
1150 static int net2272_start(struct usb_gadget *_gadget,
1151 struct usb_gadget_driver *driver);
1152 static int net2272_stop(struct usb_gadget *_gadget);
1153
1154 static const struct usb_gadget_ops net2272_ops = {
1155 .get_frame = net2272_get_frame,
1156 .wakeup = net2272_wakeup,
1157 .set_selfpowered = net2272_set_selfpowered,
1158 .pullup = net2272_pullup,
1159 .udc_start = net2272_start,
1160 .udc_stop = net2272_stop,
1161 };
1162
1163 /*---------------------------------------------------------------------------*/
1164
1165 static ssize_t
1166 registers_show(struct device *_dev, struct device_attribute *attr, char *buf)
1167 {
1168 struct net2272 *dev;
1169 char *next;
1170 unsigned size, t;
1171 unsigned long flags;
1172 u8 t1, t2;
1173 int i;
1174 const char *s;
1175
1176 dev = dev_get_drvdata(_dev);
1177 next = buf;
1178 size = PAGE_SIZE;
1179 spin_lock_irqsave(&dev->lock, flags);
1180
1181 /* Main Control Registers */
1182 t = scnprintf(next, size, "%s version %s,"
1183 "chiprev %02x, locctl %02x\n"
1184 "irqenb0 %02x irqenb1 %02x "
1185 "irqstat0 %02x irqstat1 %02x\n",
1186 driver_name, driver_vers, dev->chiprev,
1187 net2272_read(dev, LOCCTL),
1188 net2272_read(dev, IRQENB0),
1189 net2272_read(dev, IRQENB1),
1190 net2272_read(dev, IRQSTAT0),
1191 net2272_read(dev, IRQSTAT1));
1192 size -= t;
1193 next += t;
1194
1195 /* DMA */
1196 t1 = net2272_read(dev, DMAREQ);
1197 t = scnprintf(next, size, "\ndmareq %02x: %s %s%s%s%s\n",
1198 t1, ep_name[(t1 & 0x01) + 1],
1199 t1 & (1 << DMA_CONTROL_DACK) ? "dack " : "",
1200 t1 & (1 << DMA_REQUEST_ENABLE) ? "reqenb " : "",
1201 t1 & (1 << DMA_REQUEST) ? "req " : "",
1202 t1 & (1 << DMA_BUFFER_VALID) ? "valid " : "");
1203 size -= t;
1204 next += t;
1205
1206 /* USB Control Registers */
1207 t1 = net2272_read(dev, USBCTL1);
1208 if (t1 & (1 << VBUS_PIN)) {
1209 if (t1 & (1 << USB_HIGH_SPEED))
1210 s = "high speed";
1211 else if (dev->gadget.speed == USB_SPEED_UNKNOWN)
1212 s = "powered";
1213 else
1214 s = "full speed";
1215 } else
1216 s = "not attached";
1217 t = scnprintf(next, size,
1218 "usbctl0 %02x usbctl1 %02x addr 0x%02x (%s)\n",
1219 net2272_read(dev, USBCTL0), t1,
1220 net2272_read(dev, OURADDR), s);
1221 size -= t;
1222 next += t;
1223
1224 /* Endpoint Registers */
1225 for (i = 0; i < 4; ++i) {
1226 struct net2272_ep *ep;
1227
1228 ep = &dev->ep[i];
1229 if (i && !ep->desc)
1230 continue;
1231
1232 t1 = net2272_ep_read(ep, EP_CFG);
1233 t2 = net2272_ep_read(ep, EP_RSPSET);
1234 t = scnprintf(next, size,
1235 "\n%s\tcfg %02x rsp (%02x) %s%s%s%s%s%s%s%s"
1236 "irqenb %02x\n",
1237 ep->ep.name, t1, t2,
1238 (t2 & (1 << ALT_NAK_OUT_PACKETS)) ? "NAK " : "",
1239 (t2 & (1 << HIDE_STATUS_PHASE)) ? "hide " : "",
1240 (t2 & (1 << AUTOVALIDATE)) ? "auto " : "",
1241 (t2 & (1 << INTERRUPT_MODE)) ? "interrupt " : "",
1242 (t2 & (1 << CONTROL_STATUS_PHASE_HANDSHAKE)) ? "status " : "",
1243 (t2 & (1 << NAK_OUT_PACKETS_MODE)) ? "NAKmode " : "",
1244 (t2 & (1 << ENDPOINT_TOGGLE)) ? "DATA1 " : "DATA0 ",
1245 (t2 & (1 << ENDPOINT_HALT)) ? "HALT " : "",
1246 net2272_ep_read(ep, EP_IRQENB));
1247 size -= t;
1248 next += t;
1249
1250 t = scnprintf(next, size,
1251 "\tstat0 %02x stat1 %02x avail %04x "
1252 "(ep%d%s-%s)%s\n",
1253 net2272_ep_read(ep, EP_STAT0),
1254 net2272_ep_read(ep, EP_STAT1),
1255 (net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0),
1256 t1 & 0x0f,
1257 ep->is_in ? "in" : "out",
1258 type_string(t1 >> 5),
1259 ep->stopped ? "*" : "");
1260 size -= t;
1261 next += t;
1262
1263 t = scnprintf(next, size,
1264 "\tep_transfer %06x\n",
1265 ((net2272_ep_read(ep, EP_TRANSFER2) & 0xff) << 16) |
1266 ((net2272_ep_read(ep, EP_TRANSFER1) & 0xff) << 8) |
1267 ((net2272_ep_read(ep, EP_TRANSFER0) & 0xff)));
1268 size -= t;
1269 next += t;
1270
1271 t1 = net2272_ep_read(ep, EP_BUFF_STATES) & 0x03;
1272 t2 = (net2272_ep_read(ep, EP_BUFF_STATES) >> 2) & 0x03;
1273 t = scnprintf(next, size,
1274 "\tbuf-a %s buf-b %s\n",
1275 buf_state_string(t1),
1276 buf_state_string(t2));
1277 size -= t;
1278 next += t;
1279 }
1280
1281 spin_unlock_irqrestore(&dev->lock, flags);
1282
1283 return PAGE_SIZE - size;
1284 }
1285 static DEVICE_ATTR_RO(registers);
1286
1287 /*---------------------------------------------------------------------------*/
1288
1289 static void
1290 net2272_set_fifo_mode(struct net2272 *dev, int mode)
1291 {
1292 u8 tmp;
1293
1294 tmp = net2272_read(dev, LOCCTL) & 0x3f;
1295 tmp |= (mode << 6);
1296 net2272_write(dev, LOCCTL, tmp);
1297
1298 INIT_LIST_HEAD(&dev->gadget.ep_list);
1299
1300 /* always ep-a, ep-c ... maybe not ep-b */
1301 list_add_tail(&dev->ep[1].ep.ep_list, &dev->gadget.ep_list);
1302
1303 switch (mode) {
1304 case 0:
1305 list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
1306 dev->ep[1].fifo_size = dev->ep[2].fifo_size = 512;
1307 break;
1308 case 1:
1309 list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
1310 dev->ep[1].fifo_size = 1024;
1311 dev->ep[2].fifo_size = 512;
1312 break;
1313 case 2:
1314 list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
1315 dev->ep[1].fifo_size = dev->ep[2].fifo_size = 1024;
1316 break;
1317 case 3:
1318 dev->ep[1].fifo_size = 1024;
1319 break;
1320 }
1321
1322 /* ep-c is always 2 512 byte buffers */
1323 list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list);
1324 dev->ep[3].fifo_size = 512;
1325 }
1326
1327 /*---------------------------------------------------------------------------*/
1328
1329 static void
1330 net2272_usb_reset(struct net2272 *dev)
1331 {
1332 dev->gadget.speed = USB_SPEED_UNKNOWN;
1333
1334 net2272_cancel_dma(dev);
1335
1336 net2272_write(dev, IRQENB0, 0);
1337 net2272_write(dev, IRQENB1, 0);
1338
1339 /* clear irq state */
1340 net2272_write(dev, IRQSTAT0, 0xff);
1341 net2272_write(dev, IRQSTAT1, ~(1 << SUSPEND_REQUEST_INTERRUPT));
1342
1343 net2272_write(dev, DMAREQ,
1344 (0 << DMA_BUFFER_VALID) |
1345 (0 << DMA_REQUEST_ENABLE) |
1346 (1 << DMA_CONTROL_DACK) |
1347 (dev->dma_eot_polarity << EOT_POLARITY) |
1348 (dev->dma_dack_polarity << DACK_POLARITY) |
1349 (dev->dma_dreq_polarity << DREQ_POLARITY) |
1350 ((dma_ep >> 1) << DMA_ENDPOINT_SELECT));
1351
1352 net2272_cancel_dma(dev);
1353 net2272_set_fifo_mode(dev, (fifo_mode <= 3) ? fifo_mode : 0);
1354
1355 /* Set the NET2272 ep fifo data width to 16-bit mode and for correct byte swapping
1356 * note that the higher level gadget drivers are expected to convert data to little endian.
1357 * Enable byte swap for your local bus/cpu if needed by setting BYTE_SWAP in LOCCTL here
1358 */
1359 net2272_write(dev, LOCCTL, net2272_read(dev, LOCCTL) | (1 << DATA_WIDTH));
1360 net2272_write(dev, LOCCTL1, (dma_mode << DMA_MODE));
1361 }
1362
1363 static void
1364 net2272_usb_reinit(struct net2272 *dev)
1365 {
1366 int i;
1367
1368 /* basic endpoint init */
1369 for (i = 0; i < 4; ++i) {
1370 struct net2272_ep *ep = &dev->ep[i];
1371
1372 ep->ep.name = ep_name[i];
1373 ep->dev = dev;
1374 ep->num = i;
1375 ep->not_empty = 0;
1376
1377 if (use_dma && ep->num == dma_ep)
1378 ep->dma = 1;
1379
1380 if (i > 0 && i <= 3)
1381 ep->fifo_size = 512;
1382 else
1383 ep->fifo_size = 64;
1384 net2272_ep_reset(ep);
1385
1386 if (i == 0) {
1387 ep->ep.caps.type_control = true;
1388 } else {
1389 ep->ep.caps.type_iso = true;
1390 ep->ep.caps.type_bulk = true;
1391 ep->ep.caps.type_int = true;
1392 }
1393
1394 ep->ep.caps.dir_in = true;
1395 ep->ep.caps.dir_out = true;
1396 }
1397 usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 64);
1398
1399 dev->gadget.ep0 = &dev->ep[0].ep;
1400 dev->ep[0].stopped = 0;
1401 INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
1402 }
1403
1404 static void
1405 net2272_ep0_start(struct net2272 *dev)
1406 {
1407 struct net2272_ep *ep0 = &dev->ep[0];
1408
1409 net2272_ep_write(ep0, EP_RSPSET,
1410 (1 << NAK_OUT_PACKETS_MODE) |
1411 (1 << ALT_NAK_OUT_PACKETS));
1412 net2272_ep_write(ep0, EP_RSPCLR,
1413 (1 << HIDE_STATUS_PHASE) |
1414 (1 << CONTROL_STATUS_PHASE_HANDSHAKE));
1415 net2272_write(dev, USBCTL0,
1416 (dev->softconnect << USB_DETECT_ENABLE) |
1417 (1 << USB_ROOT_PORT_WAKEUP_ENABLE) |
1418 (1 << IO_WAKEUP_ENABLE));
1419 net2272_write(dev, IRQENB0,
1420 (1 << SETUP_PACKET_INTERRUPT_ENABLE) |
1421 (1 << ENDPOINT_0_INTERRUPT_ENABLE) |
1422 (1 << DMA_DONE_INTERRUPT_ENABLE));
1423 net2272_write(dev, IRQENB1,
1424 (1 << VBUS_INTERRUPT_ENABLE) |
1425 (1 << ROOT_PORT_RESET_INTERRUPT_ENABLE) |
1426 (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE));
1427 }
1428
1429 /* when a driver is successfully registered, it will receive
1430 * control requests including set_configuration(), which enables
1431 * non-control requests. then usb traffic follows until a
1432 * disconnect is reported. then a host may connect again, or
1433 * the driver might get unbound.
1434 */
1435 static int net2272_start(struct usb_gadget *_gadget,
1436 struct usb_gadget_driver *driver)
1437 {
1438 struct net2272 *dev;
1439 unsigned i;
1440
1441 if (!driver || !driver->setup ||
1442 driver->max_speed != USB_SPEED_HIGH)
1443 return -EINVAL;
1444
1445 dev = container_of(_gadget, struct net2272, gadget);
1446
1447 for (i = 0; i < 4; ++i)
1448 dev->ep[i].irqs = 0;
1449 /* hook up the driver ... */
1450 dev->softconnect = 1;
1451 driver->driver.bus = NULL;
1452 dev->driver = driver;
1453
1454 /* ... then enable host detection and ep0; and we're ready
1455 * for set_configuration as well as eventual disconnect.
1456 */
1457 net2272_ep0_start(dev);
1458
1459 return 0;
1460 }
1461
1462 static void
1463 stop_activity(struct net2272 *dev, struct usb_gadget_driver *driver)
1464 {
1465 int i;
1466
1467 /* don't disconnect if it's not connected */
1468 if (dev->gadget.speed == USB_SPEED_UNKNOWN)
1469 driver = NULL;
1470
1471 /* stop hardware; prevent new request submissions;
1472 * and kill any outstanding requests.
1473 */
1474 net2272_usb_reset(dev);
1475 for (i = 0; i < 4; ++i)
1476 net2272_dequeue_all(&dev->ep[i]);
1477
1478 /* report disconnect; the driver is already quiesced */
1479 if (driver) {
1480 spin_unlock(&dev->lock);
1481 driver->disconnect(&dev->gadget);
1482 spin_lock(&dev->lock);
1483 }
1484
1485 net2272_usb_reinit(dev);
1486 }
1487
1488 static int net2272_stop(struct usb_gadget *_gadget)
1489 {
1490 struct net2272 *dev;
1491 unsigned long flags;
1492
1493 dev = container_of(_gadget, struct net2272, gadget);
1494
1495 spin_lock_irqsave(&dev->lock, flags);
1496 stop_activity(dev, NULL);
1497 spin_unlock_irqrestore(&dev->lock, flags);
1498
1499 dev->driver = NULL;
1500
1501 return 0;
1502 }
1503
1504 /*---------------------------------------------------------------------------*/
1505 /* handle ep-a/ep-b dma completions */
1506 static void
1507 net2272_handle_dma(struct net2272_ep *ep)
1508 {
1509 struct net2272_request *req;
1510 unsigned len;
1511 int status;
1512
1513 if (!list_empty(&ep->queue))
1514 req = list_entry(ep->queue.next,
1515 struct net2272_request, queue);
1516 else
1517 req = NULL;
1518
1519 dev_vdbg(ep->dev->dev, "handle_dma %s req %p\n", ep->ep.name, req);
1520
1521 /* Ensure DREQ is de-asserted */
1522 net2272_write(ep->dev, DMAREQ,
1523 (0 << DMA_BUFFER_VALID)
1524 | (0 << DMA_REQUEST_ENABLE)
1525 | (1 << DMA_CONTROL_DACK)
1526 | (ep->dev->dma_eot_polarity << EOT_POLARITY)
1527 | (ep->dev->dma_dack_polarity << DACK_POLARITY)
1528 | (ep->dev->dma_dreq_polarity << DREQ_POLARITY)
1529 | (ep->dma << DMA_ENDPOINT_SELECT));
1530
1531 ep->dev->dma_busy = 0;
1532
1533 net2272_ep_write(ep, EP_IRQENB,
1534 (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
1535 | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
1536 | net2272_ep_read(ep, EP_IRQENB));
1537
1538 /* device-to-host transfer completed */
1539 if (ep->is_in) {
1540 /* validate a short packet or zlp if necessary */
1541 if ((req->req.length % ep->ep.maxpacket != 0) ||
1542 req->req.zero)
1543 set_fifo_bytecount(ep, 0);
1544
1545 net2272_done(ep, req, 0);
1546 if (!list_empty(&ep->queue)) {
1547 req = list_entry(ep->queue.next,
1548 struct net2272_request, queue);
1549 status = net2272_kick_dma(ep, req);
1550 if (status < 0)
1551 net2272_pio_advance(ep);
1552 }
1553
1554 /* host-to-device transfer completed */
1555 } else {
1556 /* terminated with a short packet? */
1557 if (net2272_read(ep->dev, IRQSTAT0) &
1558 (1 << DMA_DONE_INTERRUPT)) {
1559 /* abort system dma */
1560 net2272_cancel_dma(ep->dev);
1561 }
1562
1563 /* EP_TRANSFER will contain the number of bytes
1564 * actually received.
1565 * NOTE: There is no overflow detection on EP_TRANSFER:
1566 * We can't deal with transfers larger than 2^24 bytes!
1567 */
1568 len = (net2272_ep_read(ep, EP_TRANSFER2) << 16)
1569 | (net2272_ep_read(ep, EP_TRANSFER1) << 8)
1570 | (net2272_ep_read(ep, EP_TRANSFER0));
1571
1572 if (ep->not_empty)
1573 len += 4;
1574
1575 req->req.actual += len;
1576
1577 /* get any remaining data */
1578 net2272_pio_advance(ep);
1579 }
1580 }
1581
1582 /*---------------------------------------------------------------------------*/
1583
1584 static void
1585 net2272_handle_ep(struct net2272_ep *ep)
1586 {
1587 struct net2272_request *req;
1588 u8 stat0, stat1;
1589
1590 if (!list_empty(&ep->queue))
1591 req = list_entry(ep->queue.next,
1592 struct net2272_request, queue);
1593 else
1594 req = NULL;
1595
1596 /* ack all, and handle what we care about */
1597 stat0 = net2272_ep_read(ep, EP_STAT0);
1598 stat1 = net2272_ep_read(ep, EP_STAT1);
1599 ep->irqs++;
1600
1601 dev_vdbg(ep->dev->dev, "%s ack ep_stat0 %02x, ep_stat1 %02x, req %p\n",
1602 ep->ep.name, stat0, stat1, req ? &req->req : NULL);
1603
1604 net2272_ep_write(ep, EP_STAT0, stat0 &
1605 ~((1 << NAK_OUT_PACKETS)
1606 | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)));
1607 net2272_ep_write(ep, EP_STAT1, stat1);
1608
1609 /* data packet(s) received (in the fifo, OUT)
1610 * direction must be validated, otherwise control read status phase
1611 * could be interpreted as a valid packet
1612 */
1613 if (!ep->is_in && (stat0 & (1 << DATA_PACKET_RECEIVED_INTERRUPT)))
1614 net2272_pio_advance(ep);
1615 /* data packet(s) transmitted (IN) */
1616 else if (stat0 & (1 << DATA_PACKET_TRANSMITTED_INTERRUPT))
1617 net2272_pio_advance(ep);
1618 }
1619
1620 static struct net2272_ep *
1621 net2272_get_ep_by_addr(struct net2272 *dev, u16 wIndex)
1622 {
1623 struct net2272_ep *ep;
1624
1625 if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
1626 return &dev->ep[0];
1627
1628 list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) {
1629 u8 bEndpointAddress;
1630
1631 if (!ep->desc)
1632 continue;
1633 bEndpointAddress = ep->desc->bEndpointAddress;
1634 if ((wIndex ^ bEndpointAddress) & USB_DIR_IN)
1635 continue;
1636 if ((wIndex & 0x0f) == (bEndpointAddress & 0x0f))
1637 return ep;
1638 }
1639 return NULL;
1640 }
1641
1642 /*
1643 * USB Test Packet:
1644 * JKJKJKJK * 9
1645 * JJKKJJKK * 8
1646 * JJJJKKKK * 8
1647 * JJJJJJJKKKKKKK * 8
1648 * JJJJJJJK * 8
1649 * {JKKKKKKK * 10}, JK
1650 */
1651 static const u8 net2272_test_packet[] = {
1652 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1653 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
1654 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE,
1655 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1656 0x7F, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD,
1657 0xFC, 0x7E, 0xBF, 0xDF, 0xEF, 0xF7, 0xFD, 0x7E
1658 };
1659
1660 static void
1661 net2272_set_test_mode(struct net2272 *dev, int mode)
1662 {
1663 int i;
1664
1665 /* Disable all net2272 interrupts:
1666 * Nothing but a power cycle should stop the test.
1667 */
1668 net2272_write(dev, IRQENB0, 0x00);
1669 net2272_write(dev, IRQENB1, 0x00);
1670
1671 /* Force tranceiver to high-speed */
1672 net2272_write(dev, XCVRDIAG, 1 << FORCE_HIGH_SPEED);
1673
1674 net2272_write(dev, PAGESEL, 0);
1675 net2272_write(dev, EP_STAT0, 1 << DATA_PACKET_TRANSMITTED_INTERRUPT);
1676 net2272_write(dev, EP_RSPCLR,
1677 (1 << CONTROL_STATUS_PHASE_HANDSHAKE)
1678 | (1 << HIDE_STATUS_PHASE));
1679 net2272_write(dev, EP_CFG, 1 << ENDPOINT_DIRECTION);
1680 net2272_write(dev, EP_STAT1, 1 << BUFFER_FLUSH);
1681
1682 /* wait for status phase to complete */
1683 while (!(net2272_read(dev, EP_STAT0) &
1684 (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)))
1685 ;
1686
1687 /* Enable test mode */
1688 net2272_write(dev, USBTEST, mode);
1689
1690 /* load test packet */
1691 if (mode == TEST_PACKET) {
1692 /* switch to 8 bit mode */
1693 net2272_write(dev, LOCCTL, net2272_read(dev, LOCCTL) &
1694 ~(1 << DATA_WIDTH));
1695
1696 for (i = 0; i < sizeof(net2272_test_packet); ++i)
1697 net2272_write(dev, EP_DATA, net2272_test_packet[i]);
1698
1699 /* Validate test packet */
1700 net2272_write(dev, EP_TRANSFER0, 0);
1701 }
1702 }
1703
1704 static void
1705 net2272_handle_stat0_irqs(struct net2272 *dev, u8 stat)
1706 {
1707 struct net2272_ep *ep;
1708 u8 num, scratch;
1709
1710 /* starting a control request? */
1711 if (unlikely(stat & (1 << SETUP_PACKET_INTERRUPT))) {
1712 union {
1713 u8 raw[8];
1714 struct usb_ctrlrequest r;
1715 } u;
1716 int tmp = 0;
1717 struct net2272_request *req;
1718
1719 if (dev->gadget.speed == USB_SPEED_UNKNOWN) {
1720 if (net2272_read(dev, USBCTL1) & (1 << USB_HIGH_SPEED))
1721 dev->gadget.speed = USB_SPEED_HIGH;
1722 else
1723 dev->gadget.speed = USB_SPEED_FULL;
1724 dev_dbg(dev->dev, "%s\n",
1725 usb_speed_string(dev->gadget.speed));
1726 }
1727
1728 ep = &dev->ep[0];
1729 ep->irqs++;
1730
1731 /* make sure any leftover interrupt state is cleared */
1732 stat &= ~(1 << ENDPOINT_0_INTERRUPT);
1733 while (!list_empty(&ep->queue)) {
1734 req = list_entry(ep->queue.next,
1735 struct net2272_request, queue);
1736 net2272_done(ep, req,
1737 (req->req.actual == req->req.length) ? 0 : -EPROTO);
1738 }
1739 ep->stopped = 0;
1740 dev->protocol_stall = 0;
1741 net2272_ep_write(ep, EP_STAT0,
1742 (1 << DATA_IN_TOKEN_INTERRUPT)
1743 | (1 << DATA_OUT_TOKEN_INTERRUPT)
1744 | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
1745 | (1 << DATA_PACKET_RECEIVED_INTERRUPT)
1746 | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT));
1747 net2272_ep_write(ep, EP_STAT1,
1748 (1 << TIMEOUT)
1749 | (1 << USB_OUT_ACK_SENT)
1750 | (1 << USB_OUT_NAK_SENT)
1751 | (1 << USB_IN_ACK_RCVD)
1752 | (1 << USB_IN_NAK_SENT)
1753 | (1 << USB_STALL_SENT)
1754 | (1 << LOCAL_OUT_ZLP));
1755
1756 /*
1757 * Ensure Control Read pre-validation setting is beyond maximum size
1758 * - Control Writes can leave non-zero values in EP_TRANSFER. If
1759 * an EP0 transfer following the Control Write is a Control Read,
1760 * the NET2272 sees the non-zero EP_TRANSFER as an unexpected
1761 * pre-validation count.
1762 * - Setting EP_TRANSFER beyond the maximum EP0 transfer size ensures
1763 * the pre-validation count cannot cause an unexpected validatation
1764 */
1765 net2272_write(dev, PAGESEL, 0);
1766 net2272_write(dev, EP_TRANSFER2, 0xff);
1767 net2272_write(dev, EP_TRANSFER1, 0xff);
1768 net2272_write(dev, EP_TRANSFER0, 0xff);
1769
1770 u.raw[0] = net2272_read(dev, SETUP0);
1771 u.raw[1] = net2272_read(dev, SETUP1);
1772 u.raw[2] = net2272_read(dev, SETUP2);
1773 u.raw[3] = net2272_read(dev, SETUP3);
1774 u.raw[4] = net2272_read(dev, SETUP4);
1775 u.raw[5] = net2272_read(dev, SETUP5);
1776 u.raw[6] = net2272_read(dev, SETUP6);
1777 u.raw[7] = net2272_read(dev, SETUP7);
1778 /*
1779 * If you have a big endian cpu make sure le16_to_cpus
1780 * performs the proper byte swapping here...
1781 */
1782 le16_to_cpus(&u.r.wValue);
1783 le16_to_cpus(&u.r.wIndex);
1784 le16_to_cpus(&u.r.wLength);
1785
1786 /* ack the irq */
1787 net2272_write(dev, IRQSTAT0, 1 << SETUP_PACKET_INTERRUPT);
1788 stat ^= (1 << SETUP_PACKET_INTERRUPT);
1789
1790 /* watch control traffic at the token level, and force
1791 * synchronization before letting the status phase happen.
1792 */
1793 ep->is_in = (u.r.bRequestType & USB_DIR_IN) != 0;
1794 if (ep->is_in) {
1795 scratch = (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
1796 | (1 << DATA_OUT_TOKEN_INTERRUPT_ENABLE)
1797 | (1 << DATA_IN_TOKEN_INTERRUPT_ENABLE);
1798 stop_out_naking(ep);
1799 } else
1800 scratch = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
1801 | (1 << DATA_OUT_TOKEN_INTERRUPT_ENABLE)
1802 | (1 << DATA_IN_TOKEN_INTERRUPT_ENABLE);
1803 net2272_ep_write(ep, EP_IRQENB, scratch);
1804
1805 if ((u.r.bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD)
1806 goto delegate;
1807 switch (u.r.bRequest) {
1808 case USB_REQ_GET_STATUS: {
1809 struct net2272_ep *e;
1810 u16 status = 0;
1811
1812 switch (u.r.bRequestType & USB_RECIP_MASK) {
1813 case USB_RECIP_ENDPOINT:
1814 e = net2272_get_ep_by_addr(dev, u.r.wIndex);
1815 if (!e || u.r.wLength > 2)
1816 goto do_stall;
1817 if (net2272_ep_read(e, EP_RSPSET) & (1 << ENDPOINT_HALT))
1818 status = cpu_to_le16(1);
1819 else
1820 status = cpu_to_le16(0);
1821
1822 /* don't bother with a request object! */
1823 net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
1824 writew(status, net2272_reg_addr(dev, EP_DATA));
1825 set_fifo_bytecount(&dev->ep[0], 0);
1826 allow_status(ep);
1827 dev_vdbg(dev->dev, "%s stat %02x\n",
1828 ep->ep.name, status);
1829 goto next_endpoints;
1830 case USB_RECIP_DEVICE:
1831 if (u.r.wLength > 2)
1832 goto do_stall;
1833 if (dev->gadget.is_selfpowered)
1834 status = (1 << USB_DEVICE_SELF_POWERED);
1835
1836 /* don't bother with a request object! */
1837 net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
1838 writew(status, net2272_reg_addr(dev, EP_DATA));
1839 set_fifo_bytecount(&dev->ep[0], 0);
1840 allow_status(ep);
1841 dev_vdbg(dev->dev, "device stat %02x\n", status);
1842 goto next_endpoints;
1843 case USB_RECIP_INTERFACE:
1844 if (u.r.wLength > 2)
1845 goto do_stall;
1846
1847 /* don't bother with a request object! */
1848 net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
1849 writew(status, net2272_reg_addr(dev, EP_DATA));
1850 set_fifo_bytecount(&dev->ep[0], 0);
1851 allow_status(ep);
1852 dev_vdbg(dev->dev, "interface status %02x\n", status);
1853 goto next_endpoints;
1854 }
1855
1856 break;
1857 }
1858 case USB_REQ_CLEAR_FEATURE: {
1859 struct net2272_ep *e;
1860
1861 if (u.r.bRequestType != USB_RECIP_ENDPOINT)
1862 goto delegate;
1863 if (u.r.wValue != USB_ENDPOINT_HALT ||
1864 u.r.wLength != 0)
1865 goto do_stall;
1866 e = net2272_get_ep_by_addr(dev, u.r.wIndex);
1867 if (!e)
1868 goto do_stall;
1869 if (e->wedged) {
1870 dev_vdbg(dev->dev, "%s wedged, halt not cleared\n",
1871 ep->ep.name);
1872 } else {
1873 dev_vdbg(dev->dev, "%s clear halt\n", ep->ep.name);
1874 clear_halt(e);
1875 }
1876 allow_status(ep);
1877 goto next_endpoints;
1878 }
1879 case USB_REQ_SET_FEATURE: {
1880 struct net2272_ep *e;
1881
1882 if (u.r.bRequestType == USB_RECIP_DEVICE) {
1883 if (u.r.wIndex != NORMAL_OPERATION)
1884 net2272_set_test_mode(dev, (u.r.wIndex >> 8));
1885 allow_status(ep);
1886 dev_vdbg(dev->dev, "test mode: %d\n", u.r.wIndex);
1887 goto next_endpoints;
1888 } else if (u.r.bRequestType != USB_RECIP_ENDPOINT)
1889 goto delegate;
1890 if (u.r.wValue != USB_ENDPOINT_HALT ||
1891 u.r.wLength != 0)
1892 goto do_stall;
1893 e = net2272_get_ep_by_addr(dev, u.r.wIndex);
1894 if (!e)
1895 goto do_stall;
1896 set_halt(e);
1897 allow_status(ep);
1898 dev_vdbg(dev->dev, "%s set halt\n", ep->ep.name);
1899 goto next_endpoints;
1900 }
1901 case USB_REQ_SET_ADDRESS: {
1902 net2272_write(dev, OURADDR, u.r.wValue & 0xff);
1903 allow_status(ep);
1904 break;
1905 }
1906 default:
1907 delegate:
1908 dev_vdbg(dev->dev, "setup %02x.%02x v%04x i%04x "
1909 "ep_cfg %08x\n",
1910 u.r.bRequestType, u.r.bRequest,
1911 u.r.wValue, u.r.wIndex,
1912 net2272_ep_read(ep, EP_CFG));
1913 spin_unlock(&dev->lock);
1914 tmp = dev->driver->setup(&dev->gadget, &u.r);
1915 spin_lock(&dev->lock);
1916 }
1917
1918 /* stall ep0 on error */
1919 if (tmp < 0) {
1920 do_stall:
1921 dev_vdbg(dev->dev, "req %02x.%02x protocol STALL; stat %d\n",
1922 u.r.bRequestType, u.r.bRequest, tmp);
1923 dev->protocol_stall = 1;
1924 }
1925 /* endpoint dma irq? */
1926 } else if (stat & (1 << DMA_DONE_INTERRUPT)) {
1927 net2272_cancel_dma(dev);
1928 net2272_write(dev, IRQSTAT0, 1 << DMA_DONE_INTERRUPT);
1929 stat &= ~(1 << DMA_DONE_INTERRUPT);
1930 num = (net2272_read(dev, DMAREQ) & (1 << DMA_ENDPOINT_SELECT))
1931 ? 2 : 1;
1932
1933 ep = &dev->ep[num];
1934 net2272_handle_dma(ep);
1935 }
1936
1937 next_endpoints:
1938 /* endpoint data irq? */
1939 scratch = stat & 0x0f;
1940 stat &= ~0x0f;
1941 for (num = 0; scratch; num++) {
1942 u8 t;
1943
1944 /* does this endpoint's FIFO and queue need tending? */
1945 t = 1 << num;
1946 if ((scratch & t) == 0)
1947 continue;
1948 scratch ^= t;
1949
1950 ep = &dev->ep[num];
1951 net2272_handle_ep(ep);
1952 }
1953
1954 /* some interrupts we can just ignore */
1955 stat &= ~(1 << SOF_INTERRUPT);
1956
1957 if (stat)
1958 dev_dbg(dev->dev, "unhandled irqstat0 %02x\n", stat);
1959 }
1960
1961 static void
1962 net2272_handle_stat1_irqs(struct net2272 *dev, u8 stat)
1963 {
1964 u8 tmp, mask;
1965
1966 /* after disconnect there's nothing else to do! */
1967 tmp = (1 << VBUS_INTERRUPT) | (1 << ROOT_PORT_RESET_INTERRUPT);
1968 mask = (1 << USB_HIGH_SPEED) | (1 << USB_FULL_SPEED);
1969
1970 if (stat & tmp) {
1971 bool reset = false;
1972 bool disconnect = false;
1973
1974 /*
1975 * Ignore disconnects and resets if the speed hasn't been set.
1976 * VBUS can bounce and there's always an initial reset.
1977 */
1978 net2272_write(dev, IRQSTAT1, tmp);
1979 if (dev->gadget.speed != USB_SPEED_UNKNOWN) {
1980 if ((stat & (1 << VBUS_INTERRUPT)) &&
1981 (net2272_read(dev, USBCTL1) &
1982 (1 << VBUS_PIN)) == 0) {
1983 disconnect = true;
1984 dev_dbg(dev->dev, "disconnect %s\n",
1985 dev->driver->driver.name);
1986 } else if ((stat & (1 << ROOT_PORT_RESET_INTERRUPT)) &&
1987 (net2272_read(dev, USBCTL1) & mask)
1988 == 0) {
1989 reset = true;
1990 dev_dbg(dev->dev, "reset %s\n",
1991 dev->driver->driver.name);
1992 }
1993
1994 if (disconnect || reset) {
1995 stop_activity(dev, dev->driver);
1996 net2272_ep0_start(dev);
1997 spin_unlock(&dev->lock);
1998 if (reset)
1999 usb_gadget_udc_reset
2000 (&dev->gadget, dev->driver);
2001 else
2002 (dev->driver->disconnect)
2003 (&dev->gadget);
2004 spin_lock(&dev->lock);
2005 return;
2006 }
2007 }
2008 stat &= ~tmp;
2009
2010 if (!stat)
2011 return;
2012 }
2013
2014 tmp = (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT);
2015 if (stat & tmp) {
2016 net2272_write(dev, IRQSTAT1, tmp);
2017 if (stat & (1 << SUSPEND_REQUEST_INTERRUPT)) {
2018 if (dev->driver->suspend)
2019 dev->driver->suspend(&dev->gadget);
2020 if (!enable_suspend) {
2021 stat &= ~(1 << SUSPEND_REQUEST_INTERRUPT);
2022 dev_dbg(dev->dev, "Suspend disabled, ignoring\n");
2023 }
2024 } else {
2025 if (dev->driver->resume)
2026 dev->driver->resume(&dev->gadget);
2027 }
2028 stat &= ~tmp;
2029 }
2030
2031 /* clear any other status/irqs */
2032 if (stat)
2033 net2272_write(dev, IRQSTAT1, stat);
2034
2035 /* some status we can just ignore */
2036 stat &= ~((1 << CONTROL_STATUS_INTERRUPT)
2037 | (1 << SUSPEND_REQUEST_INTERRUPT)
2038 | (1 << RESUME_INTERRUPT));
2039 if (!stat)
2040 return;
2041 else
2042 dev_dbg(dev->dev, "unhandled irqstat1 %02x\n", stat);
2043 }
2044
2045 static irqreturn_t net2272_irq(int irq, void *_dev)
2046 {
2047 struct net2272 *dev = _dev;
2048 #if defined(PLX_PCI_RDK) || defined(PLX_PCI_RDK2)
2049 u32 intcsr;
2050 #endif
2051 #if defined(PLX_PCI_RDK)
2052 u8 dmareq;
2053 #endif
2054 spin_lock(&dev->lock);
2055 #if defined(PLX_PCI_RDK)
2056 intcsr = readl(dev->rdk1.plx9054_base_addr + INTCSR);
2057
2058 if ((intcsr & LOCAL_INTERRUPT_TEST) == LOCAL_INTERRUPT_TEST) {
2059 writel(intcsr & ~(1 << PCI_INTERRUPT_ENABLE),
2060 dev->rdk1.plx9054_base_addr + INTCSR);
2061 net2272_handle_stat1_irqs(dev, net2272_read(dev, IRQSTAT1));
2062 net2272_handle_stat0_irqs(dev, net2272_read(dev, IRQSTAT0));
2063 intcsr = readl(dev->rdk1.plx9054_base_addr + INTCSR);
2064 writel(intcsr | (1 << PCI_INTERRUPT_ENABLE),
2065 dev->rdk1.plx9054_base_addr + INTCSR);
2066 }
2067 if ((intcsr & DMA_CHANNEL_0_TEST) == DMA_CHANNEL_0_TEST) {
2068 writeb((1 << CHANNEL_CLEAR_INTERRUPT | (0 << CHANNEL_ENABLE)),
2069 dev->rdk1.plx9054_base_addr + DMACSR0);
2070
2071 dmareq = net2272_read(dev, DMAREQ);
2072 if (dmareq & 0x01)
2073 net2272_handle_dma(&dev->ep[2]);
2074 else
2075 net2272_handle_dma(&dev->ep[1]);
2076 }
2077 #endif
2078 #if defined(PLX_PCI_RDK2)
2079 /* see if PCI int for us by checking irqstat */
2080 intcsr = readl(dev->rdk2.fpga_base_addr + RDK2_IRQSTAT);
2081 if (!(intcsr & (1 << NET2272_PCI_IRQ))) {
2082 spin_unlock(&dev->lock);
2083 return IRQ_NONE;
2084 }
2085 /* check dma interrupts */
2086 #endif
2087 /* Platform/devcice interrupt handler */
2088 #if !defined(PLX_PCI_RDK)
2089 net2272_handle_stat1_irqs(dev, net2272_read(dev, IRQSTAT1));
2090 net2272_handle_stat0_irqs(dev, net2272_read(dev, IRQSTAT0));
2091 #endif
2092 spin_unlock(&dev->lock);
2093
2094 return IRQ_HANDLED;
2095 }
2096
2097 static int net2272_present(struct net2272 *dev)
2098 {
2099 /*
2100 * Quick test to see if CPU can communicate properly with the NET2272.
2101 * Verifies connection using writes and reads to write/read and
2102 * read-only registers.
2103 *
2104 * This routine is strongly recommended especially during early bring-up
2105 * of new hardware, however for designs that do not apply Power On System
2106 * Tests (POST) it may discarded (or perhaps minimized).
2107 */
2108 unsigned int ii;
2109 u8 val, refval;
2110
2111 /* Verify NET2272 write/read SCRATCH register can write and read */
2112 refval = net2272_read(dev, SCRATCH);
2113 for (ii = 0; ii < 0x100; ii += 7) {
2114 net2272_write(dev, SCRATCH, ii);
2115 val = net2272_read(dev, SCRATCH);
2116 if (val != ii) {
2117 dev_dbg(dev->dev,
2118 "%s: write/read SCRATCH register test failed: "
2119 "wrote:0x%2.2x, read:0x%2.2x\n",
2120 __func__, ii, val);
2121 return -EINVAL;
2122 }
2123 }
2124 /* To be nice, we write the original SCRATCH value back: */
2125 net2272_write(dev, SCRATCH, refval);
2126
2127 /* Verify NET2272 CHIPREV register is read-only: */
2128 refval = net2272_read(dev, CHIPREV_2272);
2129 for (ii = 0; ii < 0x100; ii += 7) {
2130 net2272_write(dev, CHIPREV_2272, ii);
2131 val = net2272_read(dev, CHIPREV_2272);
2132 if (val != refval) {
2133 dev_dbg(dev->dev,
2134 "%s: write/read CHIPREV register test failed: "
2135 "wrote 0x%2.2x, read:0x%2.2x expected:0x%2.2x\n",
2136 __func__, ii, val, refval);
2137 return -EINVAL;
2138 }
2139 }
2140
2141 /*
2142 * Verify NET2272's "NET2270 legacy revision" register
2143 * - NET2272 has two revision registers. The NET2270 legacy revision
2144 * register should read the same value, regardless of the NET2272
2145 * silicon revision. The legacy register applies to NET2270
2146 * firmware being applied to the NET2272.
2147 */
2148 val = net2272_read(dev, CHIPREV_LEGACY);
2149 if (val != NET2270_LEGACY_REV) {
2150 /*
2151 * Unexpected legacy revision value
2152 * - Perhaps the chip is a NET2270?
2153 */
2154 dev_dbg(dev->dev,
2155 "%s: WARNING: UNEXPECTED NET2272 LEGACY REGISTER VALUE:\n"
2156 " - CHIPREV_LEGACY: expected 0x%2.2x, got:0x%2.2x. (Not NET2272?)\n",
2157 __func__, NET2270_LEGACY_REV, val);
2158 return -EINVAL;
2159 }
2160
2161 /*
2162 * Verify NET2272 silicon revision
2163 * - This revision register is appropriate for the silicon version
2164 * of the NET2272
2165 */
2166 val = net2272_read(dev, CHIPREV_2272);
2167 switch (val) {
2168 case CHIPREV_NET2272_R1:
2169 /*
2170 * NET2272 Rev 1 has DMA related errata:
2171 * - Newer silicon (Rev 1A or better) required
2172 */
2173 dev_dbg(dev->dev,
2174 "%s: Rev 1 detected: newer silicon recommended for DMA support\n",
2175 __func__);
2176 break;
2177 case CHIPREV_NET2272_R1A:
2178 break;
2179 default:
2180 /* NET2272 silicon version *may* not work with this firmware */
2181 dev_dbg(dev->dev,
2182 "%s: unexpected silicon revision register value: "
2183 " CHIPREV_2272: 0x%2.2x\n",
2184 __func__, val);
2185 /*
2186 * Return Success, even though the chip rev is not an expected value
2187 * - Older, pre-built firmware can attempt to operate on newer silicon
2188 * - Often, new silicon is perfectly compatible
2189 */
2190 }
2191
2192 /* Success: NET2272 checks out OK */
2193 return 0;
2194 }
2195
2196 static void
2197 net2272_gadget_release(struct device *_dev)
2198 {
2199 struct net2272 *dev = dev_get_drvdata(_dev);
2200 kfree(dev);
2201 }
2202
2203 /*---------------------------------------------------------------------------*/
2204
2205 static void
2206 net2272_remove(struct net2272 *dev)
2207 {
2208 usb_del_gadget_udc(&dev->gadget);
2209 free_irq(dev->irq, dev);
2210 iounmap(dev->base_addr);
2211 device_remove_file(dev->dev, &dev_attr_registers);
2212
2213 dev_info(dev->dev, "unbind\n");
2214 }
2215
2216 static struct net2272 *net2272_probe_init(struct device *dev, unsigned int irq)
2217 {
2218 struct net2272 *ret;
2219
2220 if (!irq) {
2221 dev_dbg(dev, "No IRQ!\n");
2222 return ERR_PTR(-ENODEV);
2223 }
2224
2225 /* alloc, and start init */
2226 ret = kzalloc(sizeof(*ret), GFP_KERNEL);
2227 if (!ret)
2228 return ERR_PTR(-ENOMEM);
2229
2230 spin_lock_init(&ret->lock);
2231 ret->irq = irq;
2232 ret->dev = dev;
2233 ret->gadget.ops = &net2272_ops;
2234 ret->gadget.max_speed = USB_SPEED_HIGH;
2235
2236 /* the "gadget" abstracts/virtualizes the controller */
2237 ret->gadget.name = driver_name;
2238
2239 return ret;
2240 }
2241
2242 static int
2243 net2272_probe_fin(struct net2272 *dev, unsigned int irqflags)
2244 {
2245 int ret;
2246
2247 /* See if there... */
2248 if (net2272_present(dev)) {
2249 dev_warn(dev->dev, "2272 not found!\n");
2250 ret = -ENODEV;
2251 goto err;
2252 }
2253
2254 net2272_usb_reset(dev);
2255 net2272_usb_reinit(dev);
2256
2257 ret = request_irq(dev->irq, net2272_irq, irqflags, driver_name, dev);
2258 if (ret) {
2259 dev_err(dev->dev, "request interrupt %i failed\n", dev->irq);
2260 goto err;
2261 }
2262
2263 dev->chiprev = net2272_read(dev, CHIPREV_2272);
2264
2265 /* done */
2266 dev_info(dev->dev, "%s\n", driver_desc);
2267 dev_info(dev->dev, "irq %i, mem %p, chip rev %04x, dma %s\n",
2268 dev->irq, dev->base_addr, dev->chiprev,
2269 dma_mode_string());
2270 dev_info(dev->dev, "version: %s\n", driver_vers);
2271
2272 ret = device_create_file(dev->dev, &dev_attr_registers);
2273 if (ret)
2274 goto err_irq;
2275
2276 ret = usb_add_gadget_udc_release(dev->dev, &dev->gadget,
2277 net2272_gadget_release);
2278 if (ret)
2279 goto err_add_udc;
2280
2281 return 0;
2282
2283 err_add_udc:
2284 device_remove_file(dev->dev, &dev_attr_registers);
2285 err_irq:
2286 free_irq(dev->irq, dev);
2287 err:
2288 return ret;
2289 }
2290
2291 #ifdef CONFIG_USB_PCI
2292
2293 /*
2294 * wrap this driver around the specified device, but
2295 * don't respond over USB until a gadget driver binds to us
2296 */
2297
2298 static int
2299 net2272_rdk1_probe(struct pci_dev *pdev, struct net2272 *dev)
2300 {
2301 unsigned long resource, len, tmp;
2302 void __iomem *mem_mapped_addr[4];
2303 int ret, i;
2304
2305 /*
2306 * BAR 0 holds PLX 9054 config registers
2307 * BAR 1 is i/o memory; unused here
2308 * BAR 2 holds EPLD config registers
2309 * BAR 3 holds NET2272 registers
2310 */
2311
2312 /* Find and map all address spaces */
2313 for (i = 0; i < 4; ++i) {
2314 if (i == 1)
2315 continue; /* BAR1 unused */
2316
2317 resource = pci_resource_start(pdev, i);
2318 len = pci_resource_len(pdev, i);
2319
2320 if (!request_mem_region(resource, len, driver_name)) {
2321 dev_dbg(dev->dev, "controller already in use\n");
2322 ret = -EBUSY;
2323 goto err;
2324 }
2325
2326 mem_mapped_addr[i] = ioremap(resource, len);
2327 if (mem_mapped_addr[i] == NULL) {
2328 release_mem_region(resource, len);
2329 dev_dbg(dev->dev, "can't map memory\n");
2330 ret = -EFAULT;
2331 goto err;
2332 }
2333 }
2334
2335 dev->rdk1.plx9054_base_addr = mem_mapped_addr[0];
2336 dev->rdk1.epld_base_addr = mem_mapped_addr[2];
2337 dev->base_addr = mem_mapped_addr[3];
2338
2339 /* Set PLX 9054 bus width (16 bits) */
2340 tmp = readl(dev->rdk1.plx9054_base_addr + LBRD1);
2341 writel((tmp & ~(3 << MEMORY_SPACE_LOCAL_BUS_WIDTH)) | W16_BIT,
2342 dev->rdk1.plx9054_base_addr + LBRD1);
2343
2344 /* Enable PLX 9054 Interrupts */
2345 writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) |
2346 (1 << PCI_INTERRUPT_ENABLE) |
2347 (1 << LOCAL_INTERRUPT_INPUT_ENABLE),
2348 dev->rdk1.plx9054_base_addr + INTCSR);
2349
2350 writeb((1 << CHANNEL_CLEAR_INTERRUPT | (0 << CHANNEL_ENABLE)),
2351 dev->rdk1.plx9054_base_addr + DMACSR0);
2352
2353 /* reset */
2354 writeb((1 << EPLD_DMA_ENABLE) |
2355 (1 << DMA_CTL_DACK) |
2356 (1 << DMA_TIMEOUT_ENABLE) |
2357 (1 << USER) |
2358 (0 << MPX_MODE) |
2359 (1 << BUSWIDTH) |
2360 (1 << NET2272_RESET),
2361 dev->base_addr + EPLD_IO_CONTROL_REGISTER);
2362
2363 mb();
2364 writeb(readb(dev->base_addr + EPLD_IO_CONTROL_REGISTER) &
2365 ~(1 << NET2272_RESET),
2366 dev->base_addr + EPLD_IO_CONTROL_REGISTER);
2367 udelay(200);
2368
2369 return 0;
2370
2371 err:
2372 while (--i >= 0) {
2373 iounmap(mem_mapped_addr[i]);
2374 release_mem_region(pci_resource_start(pdev, i),
2375 pci_resource_len(pdev, i));
2376 }
2377
2378 return ret;
2379 }
2380
2381 static int
2382 net2272_rdk2_probe(struct pci_dev *pdev, struct net2272 *dev)
2383 {
2384 unsigned long resource, len;
2385 void __iomem *mem_mapped_addr[2];
2386 int ret, i;
2387
2388 /*
2389 * BAR 0 holds FGPA config registers
2390 * BAR 1 holds NET2272 registers
2391 */
2392
2393 /* Find and map all address spaces, bar2-3 unused in rdk 2 */
2394 for (i = 0; i < 2; ++i) {
2395 resource = pci_resource_start(pdev, i);
2396 len = pci_resource_len(pdev, i);
2397
2398 if (!request_mem_region(resource, len, driver_name)) {
2399 dev_dbg(dev->dev, "controller already in use\n");
2400 ret = -EBUSY;
2401 goto err;
2402 }
2403
2404 mem_mapped_addr[i] = ioremap(resource, len);
2405 if (mem_mapped_addr[i] == NULL) {
2406 release_mem_region(resource, len);
2407 dev_dbg(dev->dev, "can't map memory\n");
2408 ret = -EFAULT;
2409 goto err;
2410 }
2411 }
2412
2413 dev->rdk2.fpga_base_addr = mem_mapped_addr[0];
2414 dev->base_addr = mem_mapped_addr[1];
2415
2416 mb();
2417 /* Set 2272 bus width (16 bits) and reset */
2418 writel((1 << CHIP_RESET), dev->rdk2.fpga_base_addr + RDK2_LOCCTLRDK);
2419 udelay(200);
2420 writel((1 << BUS_WIDTH), dev->rdk2.fpga_base_addr + RDK2_LOCCTLRDK);
2421 /* Print fpga version number */
2422 dev_info(dev->dev, "RDK2 FPGA version %08x\n",
2423 readl(dev->rdk2.fpga_base_addr + RDK2_FPGAREV));
2424 /* Enable FPGA Interrupts */
2425 writel((1 << NET2272_PCI_IRQ), dev->rdk2.fpga_base_addr + RDK2_IRQENB);
2426
2427 return 0;
2428
2429 err:
2430 while (--i >= 0) {
2431 iounmap(mem_mapped_addr[i]);
2432 release_mem_region(pci_resource_start(pdev, i),
2433 pci_resource_len(pdev, i));
2434 }
2435
2436 return ret;
2437 }
2438
2439 static int
2440 net2272_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2441 {
2442 struct net2272 *dev;
2443 int ret;
2444
2445 dev = net2272_probe_init(&pdev->dev, pdev->irq);
2446 if (IS_ERR(dev))
2447 return PTR_ERR(dev);
2448 dev->dev_id = pdev->device;
2449
2450 if (pci_enable_device(pdev) < 0) {
2451 ret = -ENODEV;
2452 goto err_free;
2453 }
2454
2455 pci_set_master(pdev);
2456
2457 switch (pdev->device) {
2458 case PCI_DEVICE_ID_RDK1: ret = net2272_rdk1_probe(pdev, dev); break;
2459 case PCI_DEVICE_ID_RDK2: ret = net2272_rdk2_probe(pdev, dev); break;
2460 default: BUG();
2461 }
2462 if (ret)
2463 goto err_pci;
2464
2465 ret = net2272_probe_fin(dev, 0);
2466 if (ret)
2467 goto err_pci;
2468
2469 pci_set_drvdata(pdev, dev);
2470
2471 return 0;
2472
2473 err_pci:
2474 pci_disable_device(pdev);
2475 err_free:
2476 kfree(dev);
2477
2478 return ret;
2479 }
2480
2481 static void
2482 net2272_rdk1_remove(struct pci_dev *pdev, struct net2272 *dev)
2483 {
2484 int i;
2485
2486 /* disable PLX 9054 interrupts */
2487 writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) &
2488 ~(1 << PCI_INTERRUPT_ENABLE),
2489 dev->rdk1.plx9054_base_addr + INTCSR);
2490
2491 /* clean up resources allocated during probe() */
2492 iounmap(dev->rdk1.plx9054_base_addr);
2493 iounmap(dev->rdk1.epld_base_addr);
2494
2495 for (i = 0; i < 4; ++i) {
2496 if (i == 1)
2497 continue; /* BAR1 unused */
2498 release_mem_region(pci_resource_start(pdev, i),
2499 pci_resource_len(pdev, i));
2500 }
2501 }
2502
2503 static void
2504 net2272_rdk2_remove(struct pci_dev *pdev, struct net2272 *dev)
2505 {
2506 int i;
2507
2508 /* disable fpga interrupts
2509 writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) &
2510 ~(1 << PCI_INTERRUPT_ENABLE),
2511 dev->rdk1.plx9054_base_addr + INTCSR);
2512 */
2513
2514 /* clean up resources allocated during probe() */
2515 iounmap(dev->rdk2.fpga_base_addr);
2516
2517 for (i = 0; i < 2; ++i)
2518 release_mem_region(pci_resource_start(pdev, i),
2519 pci_resource_len(pdev, i));
2520 }
2521
2522 static void
2523 net2272_pci_remove(struct pci_dev *pdev)
2524 {
2525 struct net2272 *dev = pci_get_drvdata(pdev);
2526
2527 net2272_remove(dev);
2528
2529 switch (pdev->device) {
2530 case PCI_DEVICE_ID_RDK1: net2272_rdk1_remove(pdev, dev); break;
2531 case PCI_DEVICE_ID_RDK2: net2272_rdk2_remove(pdev, dev); break;
2532 default: BUG();
2533 }
2534
2535 pci_disable_device(pdev);
2536
2537 kfree(dev);
2538 }
2539
2540 /* Table of matching PCI IDs */
2541 static struct pci_device_id pci_ids[] = {
2542 { /* RDK 1 card */
2543 .class = ((PCI_CLASS_BRIDGE_OTHER << 8) | 0xfe),
2544 .class_mask = 0,
2545 .vendor = PCI_VENDOR_ID_PLX,
2546 .device = PCI_DEVICE_ID_RDK1,
2547 .subvendor = PCI_ANY_ID,
2548 .subdevice = PCI_ANY_ID,
2549 },
2550 { /* RDK 2 card */
2551 .class = ((PCI_CLASS_BRIDGE_OTHER << 8) | 0xfe),
2552 .class_mask = 0,
2553 .vendor = PCI_VENDOR_ID_PLX,
2554 .device = PCI_DEVICE_ID_RDK2,
2555 .subvendor = PCI_ANY_ID,
2556 .subdevice = PCI_ANY_ID,
2557 },
2558 { }
2559 };
2560 MODULE_DEVICE_TABLE(pci, pci_ids);
2561
2562 static struct pci_driver net2272_pci_driver = {
2563 .name = driver_name,
2564 .id_table = pci_ids,
2565
2566 .probe = net2272_pci_probe,
2567 .remove = net2272_pci_remove,
2568 };
2569
2570 static int net2272_pci_register(void)
2571 {
2572 return pci_register_driver(&net2272_pci_driver);
2573 }
2574
2575 static void net2272_pci_unregister(void)
2576 {
2577 pci_unregister_driver(&net2272_pci_driver);
2578 }
2579
2580 #else
2581 static inline int net2272_pci_register(void) { return 0; }
2582 static inline void net2272_pci_unregister(void) { }
2583 #endif
2584
2585 /*---------------------------------------------------------------------------*/
2586
2587 static int
2588 net2272_plat_probe(struct platform_device *pdev)
2589 {
2590 struct net2272 *dev;
2591 int ret;
2592 unsigned int irqflags;
2593 resource_size_t base, len;
2594 struct resource *iomem, *iomem_bus, *irq_res;
2595
2596 irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2597 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2598 iomem_bus = platform_get_resource(pdev, IORESOURCE_BUS, 0);
2599 if (!irq_res || !iomem) {
2600 dev_err(&pdev->dev, "must provide irq/base addr");
2601 return -EINVAL;
2602 }
2603
2604 dev = net2272_probe_init(&pdev->dev, irq_res->start);
2605 if (IS_ERR(dev))
2606 return PTR_ERR(dev);
2607
2608 irqflags = 0;
2609 if (irq_res->flags & IORESOURCE_IRQ_HIGHEDGE)
2610 irqflags |= IRQF_TRIGGER_RISING;
2611 if (irq_res->flags & IORESOURCE_IRQ_LOWEDGE)
2612 irqflags |= IRQF_TRIGGER_FALLING;
2613 if (irq_res->flags & IORESOURCE_IRQ_HIGHLEVEL)
2614 irqflags |= IRQF_TRIGGER_HIGH;
2615 if (irq_res->flags & IORESOURCE_IRQ_LOWLEVEL)
2616 irqflags |= IRQF_TRIGGER_LOW;
2617
2618 base = iomem->start;
2619 len = resource_size(iomem);
2620 if (iomem_bus)
2621 dev->base_shift = iomem_bus->start;
2622
2623 if (!request_mem_region(base, len, driver_name)) {
2624 dev_dbg(dev->dev, "get request memory region!\n");
2625 ret = -EBUSY;
2626 goto err;
2627 }
2628 dev->base_addr = ioremap(base, len);
2629 if (!dev->base_addr) {
2630 dev_dbg(dev->dev, "can't map memory\n");
2631 ret = -EFAULT;
2632 goto err_req;
2633 }
2634
2635 ret = net2272_probe_fin(dev, IRQF_TRIGGER_LOW);
2636 if (ret)
2637 goto err_io;
2638
2639 platform_set_drvdata(pdev, dev);
2640 dev_info(&pdev->dev, "running in 16-bit, %sbyte swap local bus mode\n",
2641 (net2272_read(dev, LOCCTL) & (1 << BYTE_SWAP)) ? "" : "no ");
2642
2643 return 0;
2644
2645 err_io:
2646 iounmap(dev->base_addr);
2647 err_req:
2648 release_mem_region(base, len);
2649 err:
2650 kfree(dev);
2651
2652 return ret;
2653 }
2654
2655 static int
2656 net2272_plat_remove(struct platform_device *pdev)
2657 {
2658 struct net2272 *dev = platform_get_drvdata(pdev);
2659
2660 net2272_remove(dev);
2661
2662 release_mem_region(pdev->resource[0].start,
2663 resource_size(&pdev->resource[0]));
2664
2665 kfree(dev);
2666
2667 return 0;
2668 }
2669
2670 static struct platform_driver net2272_plat_driver = {
2671 .probe = net2272_plat_probe,
2672 .remove = net2272_plat_remove,
2673 .driver = {
2674 .name = driver_name,
2675 },
2676 /* FIXME .suspend, .resume */
2677 };
2678 MODULE_ALIAS("platform:net2272");
2679
2680 static int __init net2272_init(void)
2681 {
2682 int ret;
2683
2684 ret = net2272_pci_register();
2685 if (ret)
2686 return ret;
2687 ret = platform_driver_register(&net2272_plat_driver);
2688 if (ret)
2689 goto err_pci;
2690 return ret;
2691
2692 err_pci:
2693 net2272_pci_unregister();
2694 return ret;
2695 }
2696 module_init(net2272_init);
2697
2698 static void __exit net2272_cleanup(void)
2699 {
2700 net2272_pci_unregister();
2701 platform_driver_unregister(&net2272_plat_driver);
2702 }
2703 module_exit(net2272_cleanup);
2704
2705 MODULE_DESCRIPTION(DRIVER_DESC);
2706 MODULE_AUTHOR("PLX Technology, Inc.");
2707 MODULE_LICENSE("GPL");