2 * User-space DMA and UIO based Redrapids Pocket Change CardBus driver
4 * Copyright 2008 Vijay Kumar <vijaykumar@bravegnu.org>
6 * Licensed under GPL version 2 only.
9 #include <linux/device.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 #include <linux/uio_driver.h>
13 #include <linux/spinlock.h>
14 #include <linux/cdev.h>
15 #include <linux/delay.h>
16 #include <linux/sysfs.h>
17 #include <linux/poll.h>
18 #include <linux/idr.h>
19 #include <linux/interrupt.h>
20 #include <linux/init.h>
21 #include <linux/ioctl.h>
23 #include <linux/sched.h>
24 #include <linux/slab.h>
28 #include <asm/cacheflush.h>
30 #ifndef PCI_VENDOR_ID_RRAPIDS
31 #define PCI_VENDOR_ID_RRAPIDS 0x17D2
34 #ifndef PCI_DEVICE_ID_RRAPIDS_POCKET_CHANGE
35 #define PCI_DEVICE_ID_RRAPIDS_POCKET_CHANGE 0x0351
38 #define POCH_NCHANNELS 2
40 #define MAX_POCH_CARDS 8
41 #define MAX_POCH_DEVICES (MAX_POCH_CARDS * POCH_NCHANNELS)
43 #define DRV_NAME "poch"
44 #define PFX DRV_NAME ": "
47 * BAR0 Bridge Register Definitions
50 #define BRIDGE_REV_REG 0x0
51 #define BRIDGE_INT_MASK_REG 0x4
52 #define BRIDGE_INT_STAT_REG 0x8
54 #define BRIDGE_INT_ACTIVE (0x1 << 31)
55 #define BRIDGE_INT_FPGA (0x1 << 2)
56 #define BRIDGE_INT_TEMP_FAIL (0x1 << 1)
57 #define BRIDGE_INT_TEMP_WARN (0x1 << 0)
59 #define BRIDGE_FPGA_RESET_REG 0xC
61 #define BRIDGE_CARD_POWER_REG 0x10
62 #define BRIDGE_CARD_POWER_EN (0x1 << 0)
63 #define BRIDGE_CARD_POWER_PROG_DONE (0x1 << 31)
65 #define BRIDGE_JTAG_REG 0x14
66 #define BRIDGE_DMA_GO_REG 0x18
67 #define BRIDGE_STAT_0_REG 0x1C
68 #define BRIDGE_STAT_1_REG 0x20
69 #define BRIDGE_STAT_2_REG 0x24
70 #define BRIDGE_STAT_3_REG 0x28
71 #define BRIDGE_TEMP_STAT_REG 0x2C
72 #define BRIDGE_TEMP_THRESH_REG 0x30
73 #define BRIDGE_EEPROM_REVSEL_REG 0x34
74 #define BRIDGE_CIS_STRUCT_REG 0x100
75 #define BRIDGE_BOARDREV_REG 0x124
78 * BAR1 FPGA Register Definitions
81 #define FPGA_IFACE_REV_REG 0x0
82 #define FPGA_RX_BLOCK_SIZE_REG 0x8
83 #define FPGA_TX_BLOCK_SIZE_REG 0xC
84 #define FPGA_RX_BLOCK_COUNT_REG 0x10
85 #define FPGA_TX_BLOCK_COUNT_REG 0x14
86 #define FPGA_RX_CURR_DMA_BLOCK_REG 0x18
87 #define FPGA_TX_CURR_DMA_BLOCK_REG 0x1C
88 #define FPGA_RX_GROUP_COUNT_REG 0x20
89 #define FPGA_TX_GROUP_COUNT_REG 0x24
90 #define FPGA_RX_CURR_GROUP_REG 0x28
91 #define FPGA_TX_CURR_GROUP_REG 0x2C
92 #define FPGA_RX_CURR_PCI_REG 0x38
93 #define FPGA_TX_CURR_PCI_REG 0x3C
94 #define FPGA_RX_GROUP0_START_REG 0x40
95 #define FPGA_TX_GROUP0_START_REG 0xC0
96 #define FPGA_DMA_DESC_1_REG 0x140
97 #define FPGA_DMA_DESC_2_REG 0x144
98 #define FPGA_DMA_DESC_3_REG 0x148
99 #define FPGA_DMA_DESC_4_REG 0x14C
101 #define FPGA_DMA_INT_STAT_REG 0x150
102 #define FPGA_DMA_INT_MASK_REG 0x154
103 #define FPGA_DMA_INT_RX (1 << 0)
104 #define FPGA_DMA_INT_TX (1 << 1)
106 #define FPGA_RX_GROUPS_PER_INT_REG 0x158
107 #define FPGA_TX_GROUPS_PER_INT_REG 0x15C
108 #define FPGA_DMA_ADR_PAGE_REG 0x160
109 #define FPGA_FPGA_REV_REG 0x200
111 #define FPGA_ADC_CLOCK_CTL_REG 0x204
112 #define FPGA_ADC_CLOCK_CTL_OSC_EN (0x1 << 3)
113 #define FPGA_ADC_CLOCK_LOCAL_CLK (0x1 | FPGA_ADC_CLOCK_CTL_OSC_EN)
114 #define FPGA_ADC_CLOCK_EXT_SAMP_CLK 0X0
116 #define FPGA_ADC_DAC_EN_REG 0x208
117 #define FPGA_ADC_DAC_EN_DAC_OFF (0x1 << 1)
118 #define FPGA_ADC_DAC_EN_ADC_OFF (0x1 << 0)
120 #define FPGA_INT_STAT_REG 0x20C
121 #define FPGA_INT_MASK_REG 0x210
122 #define FPGA_INT_PLL_UNLOCKED (0x1 << 9)
123 #define FPGA_INT_DMA_CORE (0x1 << 8)
124 #define FPGA_INT_TX_FF_EMPTY (0x1 << 7)
125 #define FPGA_INT_RX_FF_EMPTY (0x1 << 6)
126 #define FPGA_INT_TX_FF_OVRFLW (0x1 << 3)
127 #define FPGA_INT_RX_FF_OVRFLW (0x1 << 2)
128 #define FPGA_INT_TX_ACQ_DONE (0x1 << 1)
129 #define FPGA_INT_RX_ACQ_DONE (0x1)
131 #define FPGA_RX_CTL_REG 0x214
132 #define FPGA_RX_CTL_FIFO_FLUSH (0x1 << 9)
133 #define FPGA_RX_CTL_SYNTH_DATA (0x1 << 8)
134 #define FPGA_RX_CTL_CONT_CAP (0x0 << 1)
135 #define FPGA_RX_CTL_SNAP_CAP (0x1 << 1)
137 #define FPGA_RX_ARM_REG 0x21C
139 #define FPGA_DOM_REG 0x224
140 #define FPGA_DOM_DCM_RESET (0x1 << 5)
141 #define FPGA_DOM_SOFT_RESET (0x1 << 4)
142 #define FPGA_DOM_DUAL_M_SG_DMA (0x0)
143 #define FPGA_DOM_TARGET_ACCESS (0x1)
145 #define FPGA_TX_CTL_REG 0x228
146 #define FPGA_TX_CTL_FIFO_FLUSH (0x1 << 9)
147 #define FPGA_TX_CTL_OUTPUT_ZERO (0x0 << 2)
148 #define FPGA_TX_CTL_OUTPUT_CARDBUS (0x1 << 2)
149 #define FPGA_TX_CTL_OUTPUT_ADC (0x2 << 2)
150 #define FPGA_TX_CTL_OUTPUT_SNAPSHOT (0x3 << 2)
151 #define FPGA_TX_CTL_LOOPBACK (0x1 << 0)
153 #define FPGA_ENDIAN_MODE_REG 0x22C
154 #define FPGA_RX_FIFO_COUNT_REG 0x28C
155 #define FPGA_TX_ENABLE_REG 0x298
156 #define FPGA_TX_TRIGGER_REG 0x29C
157 #define FPGA_TX_DATAMEM_COUNT_REG 0x2A8
158 #define FPGA_CAP_FIFO_REG 0x300
159 #define FPGA_TX_SNAPSHOT_REG 0x8000
162 * Channel Index Definitions
177 struct poch_group_info
{
180 unsigned long user_offset
;
183 struct channel_info
{
186 atomic_t sys_block_size
;
187 atomic_t sys_group_size
;
188 atomic_t sys_group_count
;
190 enum channel_dir dir
;
192 unsigned long block_size
;
193 unsigned long group_size
;
194 unsigned long group_count
;
196 /* Contains the DMA address and VM offset of each group. */
197 struct poch_group_info
*groups
;
199 /* Contains the header and circular buffer exported to userspace. */
200 spinlock_t group_offsets_lock
;
202 /* Last group consumed by user space. */
203 unsigned int consumed
;
204 /* Last group indicated as 'complete' to user space. */
205 unsigned int transfer
;
207 wait_queue_head_t wq
;
210 unsigned int data_available
;
211 unsigned int space_available
;
214 void __iomem
*bridge_iomem
;
215 void __iomem
*fpga_iomem
;
216 spinlock_t
*iomem_lock
;
222 struct poch_counters counters
;
223 spinlock_t counters_lock
;
230 struct pci_dev
*pci_dev
;
231 unsigned int nchannels
;
232 struct channel_info channels
[POCH_NCHANNELS
];
235 /* Counts the no. of channels that have been opened. On first
236 * open, the card is powered on. On last channel close, the
237 * card is powered off.
241 void __iomem
*bridge_iomem
;
242 void __iomem
*fpga_iomem
;
243 spinlock_t iomem_lock
;
249 module_param(synth_rx
, bool, 0600);
250 MODULE_PARM_DESC(synth_rx
,
251 "Synthesize received values using a counter. Default: No");
254 module_param(loopback
, bool, 0600);
255 MODULE_PARM_DESC(loopback
,
256 "Enable hardware loopback of trasnmitted data. Default: No");
258 static dev_t poch_first_dev
;
259 static struct class *poch_cls
;
260 static DEFINE_IDR(poch_ids
);
262 static ssize_t
store_block_size(struct device
*dev
,
263 struct device_attribute
*attr
,
264 const char *buf
, size_t count
)
266 struct channel_info
*channel
= dev_get_drvdata(dev
);
267 unsigned long block_size
;
269 sscanf(buf
, "%lu", &block_size
);
270 atomic_set(&channel
->sys_block_size
, block_size
);
274 static DEVICE_ATTR(block_size
, S_IWUSR
|S_IWGRP
, NULL
, store_block_size
);
276 static ssize_t
store_group_size(struct device
*dev
,
277 struct device_attribute
*attr
,
278 const char *buf
, size_t count
)
280 struct channel_info
*channel
= dev_get_drvdata(dev
);
281 unsigned long group_size
;
283 sscanf(buf
, "%lu", &group_size
);
284 atomic_set(&channel
->sys_group_size
, group_size
);
288 static DEVICE_ATTR(group_size
, S_IWUSR
|S_IWGRP
, NULL
, store_group_size
);
290 static ssize_t
store_group_count(struct device
*dev
,
291 struct device_attribute
*attr
,
292 const char *buf
, size_t count
)
294 struct channel_info
*channel
= dev_get_drvdata(dev
);
295 unsigned long group_count
;
297 sscanf(buf
, "%lu", &group_count
);
298 atomic_set(&channel
->sys_group_count
, group_count
);
302 static DEVICE_ATTR(group_count
, S_IWUSR
|S_IWGRP
, NULL
, store_group_count
);
304 static ssize_t
show_direction(struct device
*dev
,
305 struct device_attribute
*attr
, char *buf
)
307 struct channel_info
*channel
= dev_get_drvdata(dev
);
310 len
= sprintf(buf
, "%s\n", (channel
->dir
? "tx" : "rx"));
313 static DEVICE_ATTR(dir
, S_IRUSR
|S_IRGRP
, show_direction
, NULL
);
315 static unsigned long npages(unsigned long bytes
)
317 if (bytes
% PAGE_SIZE
== 0)
318 return bytes
/ PAGE_SIZE
;
320 return (bytes
/ PAGE_SIZE
) + 1;
323 static ssize_t
show_mmap_size(struct device
*dev
,
324 struct device_attribute
*attr
, char *buf
)
326 struct channel_info
*channel
= dev_get_drvdata(dev
);
328 unsigned long mmap_size
;
329 unsigned long group_pages
;
330 unsigned long total_group_pages
;
332 group_pages
= npages(channel
->group_size
);
333 total_group_pages
= group_pages
* channel
->group_count
;
335 mmap_size
= total_group_pages
* PAGE_SIZE
;
336 len
= sprintf(buf
, "%lu\n", mmap_size
);
339 static DEVICE_ATTR(mmap_size
, S_IRUSR
|S_IRGRP
, show_mmap_size
, NULL
);
341 static struct device_attribute
*poch_class_attrs
[] = {
342 &dev_attr_block_size
,
343 &dev_attr_group_size
,
344 &dev_attr_group_count
,
349 static void poch_channel_free_groups(struct channel_info
*channel
)
353 for (i
= 0; i
< channel
->group_count
; i
++) {
354 struct poch_group_info
*group
;
357 group
= &channel
->groups
[i
];
358 order
= get_order(channel
->group_size
);
360 __free_pages(group
->pg
, order
);
364 static int poch_channel_alloc_groups(struct channel_info
*channel
)
367 unsigned long group_pages
;
369 group_pages
= npages(channel
->group_size
);
371 for (i
= 0; i
< channel
->group_count
; i
++) {
372 struct poch_group_info
*group
;
376 group
= &channel
->groups
[i
];
377 order
= get_order(channel
->group_size
);
380 * __GFP_COMP is required here since we are going to
381 * perform non-linear mapping to userspace. For more
382 * information read the vm_insert_page() function
386 gfp_mask
= GFP_KERNEL
| GFP_DMA32
| __GFP_ZERO
;
387 group
->pg
= alloc_pages(gfp_mask
, order
);
389 poch_channel_free_groups(channel
);
393 /* FIXME: This is the physical address not the bus
394 * address! This won't work in architectures that
395 * have an IOMMU. Can we use pci_map_single() for
398 group
->dma_addr
= page_to_pfn(group
->pg
) * PAGE_SIZE
;
399 group
->user_offset
= (i
* group_pages
) * PAGE_SIZE
;
401 printk(KERN_INFO PFX
"%ld: user_offset: 0x%lx\n", i
,
408 static int channel_latch_attr(struct channel_info
*channel
)
410 channel
->group_count
= atomic_read(&channel
->sys_group_count
);
411 channel
->group_size
= atomic_read(&channel
->sys_group_size
);
412 channel
->block_size
= atomic_read(&channel
->sys_block_size
);
414 if (channel
->group_count
== 0) {
415 printk(KERN_ERR PFX
"invalid group count %lu",
416 channel
->group_count
);
420 if (channel
->group_size
== 0 ||
421 channel
->group_size
< channel
->block_size
) {
422 printk(KERN_ERR PFX
"invalid group size %lu",
423 channel
->group_size
);
427 if (channel
->block_size
== 0 || (channel
->block_size
% 8) != 0) {
428 printk(KERN_ERR PFX
"invalid block size %lu",
429 channel
->block_size
);
433 if (channel
->group_size
% channel
->block_size
!= 0) {
435 "group size should be multiple of block size");
443 * Configure DMA group registers
445 static void channel_dma_init(struct channel_info
*channel
)
447 void __iomem
*fpga
= channel
->fpga_iomem
;
451 unsigned int group_in_page
;
456 u32 groups_per_int_reg
;
459 if (channel
->chno
== CHNO_RX_CHANNEL
) {
460 group_regs_base
= FPGA_RX_GROUP0_START_REG
;
461 block_size_reg
= FPGA_RX_BLOCK_SIZE_REG
;
462 block_count_reg
= FPGA_RX_BLOCK_COUNT_REG
;
463 group_count_reg
= FPGA_RX_GROUP_COUNT_REG
;
464 groups_per_int_reg
= FPGA_RX_GROUPS_PER_INT_REG
;
465 curr_pci_reg
= FPGA_RX_CURR_PCI_REG
;
467 group_regs_base
= FPGA_TX_GROUP0_START_REG
;
468 block_size_reg
= FPGA_TX_BLOCK_SIZE_REG
;
469 block_count_reg
= FPGA_TX_BLOCK_COUNT_REG
;
470 group_count_reg
= FPGA_TX_GROUP_COUNT_REG
;
471 groups_per_int_reg
= FPGA_TX_GROUPS_PER_INT_REG
;
472 curr_pci_reg
= FPGA_TX_CURR_PCI_REG
;
475 printk(KERN_WARNING
"block_size, group_size, group_count\n");
477 * Block size is represented in no. of 64 bit transfers.
479 iowrite32(channel
->block_size
/ 8, fpga
+ block_size_reg
);
480 iowrite32(channel
->group_size
/ channel
->block_size
,
481 fpga
+ block_count_reg
);
482 iowrite32(channel
->group_count
, fpga
+ group_count_reg
);
483 /* FIXME: Hardcoded groups per int. Get it from sysfs? */
484 iowrite32(16, fpga
+ groups_per_int_reg
);
486 /* Unlock PCI address? Not defined in the data sheet, but used
487 * in the reference code by Redrapids.
489 iowrite32(0x1, fpga
+ curr_pci_reg
);
491 /* The DMA address page register is shared between the RX and
492 * TX channels, so acquire lock.
494 for (i
= 0; i
< channel
->group_count
; i
++) {
496 group_in_page
= i
% 32;
498 group_reg
= group_regs_base
+ (group_in_page
* 4);
500 spin_lock(channel
->iomem_lock
);
501 iowrite32(page
, fpga
+ FPGA_DMA_ADR_PAGE_REG
);
502 iowrite32(channel
->groups
[i
].dma_addr
, fpga
+ group_reg
);
503 spin_unlock(channel
->iomem_lock
);
506 for (i
= 0; i
< channel
->group_count
; i
++) {
508 group_in_page
= i
% 32;
510 group_reg
= group_regs_base
+ (group_in_page
* 4);
512 spin_lock(channel
->iomem_lock
);
513 iowrite32(page
, fpga
+ FPGA_DMA_ADR_PAGE_REG
);
514 printk(KERN_INFO PFX
"%ld: read dma_addr: 0x%x\n", i
,
515 ioread32(fpga
+ group_reg
));
516 spin_unlock(channel
->iomem_lock
);
521 static void __poch_channel_clear_counters(struct channel_info
*channel
)
523 channel
->counters
.pll_unlock
= 0;
524 channel
->counters
.fifo_empty
= 0;
525 channel
->counters
.fifo_overflow
= 0;
528 static int poch_channel_init(struct channel_info
*channel
,
529 struct poch_dev
*poch_dev
)
531 struct pci_dev
*pdev
= poch_dev
->pci_dev
;
532 struct device
*dev
= &pdev
->dev
;
533 unsigned long alloc_size
;
536 printk(KERN_WARNING
"channel_latch_attr\n");
538 ret
= channel_latch_attr(channel
);
542 channel
->consumed
= 0;
543 channel
->transfer
= 0;
545 /* Allocate memory to hold group information. */
546 alloc_size
= channel
->group_count
* sizeof(struct poch_group_info
);
547 channel
->groups
= kzalloc(alloc_size
, GFP_KERNEL
);
548 if (!channel
->groups
) {
549 dev_err(dev
, "error allocating memory for group info\n");
554 printk(KERN_WARNING
"poch_channel_alloc_groups\n");
556 ret
= poch_channel_alloc_groups(channel
);
558 dev_err(dev
, "error allocating groups of order %d\n",
559 get_order(channel
->group_size
));
560 goto out_free_group_info
;
563 channel
->fpga_iomem
= poch_dev
->fpga_iomem
;
564 channel
->bridge_iomem
= poch_dev
->bridge_iomem
;
565 channel
->iomem_lock
= &poch_dev
->iomem_lock
;
566 spin_lock_init(&channel
->counters_lock
);
568 __poch_channel_clear_counters(channel
);
573 kfree(channel
->groups
);
578 static int poch_wait_fpga_prog(void __iomem
*bridge
)
580 unsigned long total_wait
;
581 const unsigned long wait_period
= 100;
582 /* FIXME: Get the actual timeout */
583 const unsigned long prog_timeo
= 10000; /* 10 Seconds */
586 printk(KERN_WARNING
"poch_wait_fpg_prog\n");
588 printk(KERN_INFO PFX
"programming fpga ...\n");
592 total_wait
+= wait_period
;
594 card_power
= ioread32(bridge
+ BRIDGE_CARD_POWER_REG
);
595 if (card_power
& BRIDGE_CARD_POWER_PROG_DONE
) {
596 printk(KERN_INFO PFX
"programming done\n");
599 if (total_wait
> prog_timeo
) {
601 "timed out while programming FPGA\n");
607 static void poch_card_power_off(struct poch_dev
*poch_dev
)
609 void __iomem
*bridge
= poch_dev
->bridge_iomem
;
612 iowrite32(0, bridge
+ BRIDGE_INT_MASK_REG
);
613 iowrite32(0, bridge
+ BRIDGE_DMA_GO_REG
);
615 card_power
= ioread32(bridge
+ BRIDGE_CARD_POWER_REG
);
616 iowrite32(card_power
& ~BRIDGE_CARD_POWER_EN
,
617 bridge
+ BRIDGE_CARD_POWER_REG
);
625 static void poch_card_clock_on(void __iomem
*fpga
)
627 /* FIXME: Get this data through sysfs? */
628 enum clk_src clk_src
= CLK_SRC_ON_BOARD
;
630 if (clk_src
== CLK_SRC_ON_BOARD
) {
631 iowrite32(FPGA_ADC_CLOCK_LOCAL_CLK
| FPGA_ADC_CLOCK_CTL_OSC_EN
,
632 fpga
+ FPGA_ADC_CLOCK_CTL_REG
);
633 } else if (clk_src
== CLK_SRC_EXTERNAL
) {
634 iowrite32(FPGA_ADC_CLOCK_EXT_SAMP_CLK
,
635 fpga
+ FPGA_ADC_CLOCK_CTL_REG
);
639 static int poch_card_power_on(struct poch_dev
*poch_dev
)
641 void __iomem
*bridge
= poch_dev
->bridge_iomem
;
642 void __iomem
*fpga
= poch_dev
->fpga_iomem
;
644 iowrite32(BRIDGE_CARD_POWER_EN
, bridge
+ BRIDGE_CARD_POWER_REG
);
646 if (poch_wait_fpga_prog(bridge
) != 0) {
647 poch_card_power_off(poch_dev
);
651 poch_card_clock_on(fpga
);
653 /* Sync to new clock, reset state machines, set DMA mode. */
654 iowrite32(FPGA_DOM_DCM_RESET
| FPGA_DOM_SOFT_RESET
655 | FPGA_DOM_DUAL_M_SG_DMA
, fpga
+ FPGA_DOM_REG
);
657 /* FIXME: The time required for sync. needs to be tuned. */
663 static void poch_channel_analog_on(struct channel_info
*channel
)
665 void __iomem
*fpga
= channel
->fpga_iomem
;
668 spin_lock(channel
->iomem_lock
);
669 adc_dac_en
= ioread32(fpga
+ FPGA_ADC_DAC_EN_REG
);
670 switch (channel
->chno
) {
671 case CHNO_RX_CHANNEL
:
672 iowrite32(adc_dac_en
& ~FPGA_ADC_DAC_EN_ADC_OFF
,
673 fpga
+ FPGA_ADC_DAC_EN_REG
);
675 case CHNO_TX_CHANNEL
:
676 iowrite32(adc_dac_en
& ~FPGA_ADC_DAC_EN_DAC_OFF
,
677 fpga
+ FPGA_ADC_DAC_EN_REG
);
680 spin_unlock(channel
->iomem_lock
);
683 static int poch_open(struct inode
*inode
, struct file
*filp
)
685 struct poch_dev
*poch_dev
;
686 struct channel_info
*channel
;
687 void __iomem
*bridge
;
693 poch_dev
= container_of(inode
->i_cdev
, struct poch_dev
, cdev
);
694 bridge
= poch_dev
->bridge_iomem
;
695 fpga
= poch_dev
->fpga_iomem
;
697 chno
= iminor(inode
) % poch_dev
->nchannels
;
698 channel
= &poch_dev
->channels
[chno
];
700 if (!atomic_dec_and_test(&channel
->free
)) {
701 atomic_inc(&channel
->free
);
706 usage
= atomic_inc_return(&poch_dev
->usage
);
708 printk(KERN_WARNING
"poch_card_power_on\n");
711 ret
= poch_card_power_on(poch_dev
);
716 printk(KERN_INFO
"CardBus Bridge Revision: %x\n",
717 ioread32(bridge
+ BRIDGE_REV_REG
));
718 printk(KERN_INFO
"CardBus Interface Revision: %x\n",
719 ioread32(fpga
+ FPGA_IFACE_REV_REG
));
721 channel
->chno
= chno
;
722 filp
->private_data
= channel
;
724 printk(KERN_WARNING
"poch_channel_init\n");
726 ret
= poch_channel_init(channel
, poch_dev
);
730 poch_channel_analog_on(channel
);
732 printk(KERN_WARNING
"channel_dma_init\n");
734 channel_dma_init(channel
);
736 printk(KERN_WARNING
"poch_channel_analog_on\n");
739 printk(KERN_WARNING
"setting up DMA\n");
741 /* Initialize DMA Controller. */
742 iowrite32(FPGA_CAP_FIFO_REG
, bridge
+ BRIDGE_STAT_2_REG
);
743 iowrite32(FPGA_DMA_DESC_1_REG
, bridge
+ BRIDGE_STAT_3_REG
);
745 ioread32(fpga
+ FPGA_DMA_INT_STAT_REG
);
746 ioread32(fpga
+ FPGA_INT_STAT_REG
);
747 ioread32(bridge
+ BRIDGE_INT_STAT_REG
);
749 /* Initialize Interrupts. FIXME: Enable temperature
750 * handling We are enabling both Tx and Rx channel
751 * interrupts here. Do we need to enable interrupts
752 * only for the current channel? Anyways we won't get
753 * the interrupt unless the DMA is activated.
755 iowrite32(BRIDGE_INT_FPGA
, bridge
+ BRIDGE_INT_MASK_REG
);
756 iowrite32(FPGA_INT_DMA_CORE
757 | FPGA_INT_PLL_UNLOCKED
758 | FPGA_INT_TX_FF_EMPTY
759 | FPGA_INT_RX_FF_EMPTY
760 | FPGA_INT_TX_FF_OVRFLW
761 | FPGA_INT_RX_FF_OVRFLW
,
762 fpga
+ FPGA_INT_MASK_REG
);
763 iowrite32(FPGA_DMA_INT_RX
| FPGA_DMA_INT_TX
,
764 fpga
+ FPGA_DMA_INT_MASK_REG
);
767 if (channel
->dir
== CHANNEL_DIR_TX
) {
768 /* Flush TX FIFO and output data from cardbus. */
771 ctl_val
|= FPGA_TX_CTL_FIFO_FLUSH
;
772 ctl_val
|= FPGA_TX_CTL_OUTPUT_CARDBUS
;
774 ctl_val
|= FPGA_TX_CTL_LOOPBACK
;
776 iowrite32(ctl_val
, fpga
+ FPGA_TX_CTL_REG
);
778 /* Flush RX FIFO and output data to cardbus. */
779 u32 ctl_val
= FPGA_RX_CTL_CONT_CAP
| FPGA_RX_CTL_FIFO_FLUSH
;
781 ctl_val
|= FPGA_RX_CTL_SYNTH_DATA
;
783 iowrite32(ctl_val
, fpga
+ FPGA_RX_CTL_REG
);
786 atomic_inc(&channel
->inited
);
792 poch_card_power_off(poch_dev
);
794 atomic_dec(&poch_dev
->usage
);
795 atomic_inc(&channel
->free
);
800 static int poch_release(struct inode
*inode
, struct file
*filp
)
802 struct channel_info
*channel
= filp
->private_data
;
803 struct poch_dev
*poch_dev
;
806 poch_dev
= container_of(inode
->i_cdev
, struct poch_dev
, cdev
);
808 usage
= atomic_dec_return(&poch_dev
->usage
);
810 printk(KERN_WARNING
"poch_card_power_off\n");
811 poch_card_power_off(poch_dev
);
814 atomic_dec(&channel
->inited
);
815 poch_channel_free_groups(channel
);
816 kfree(channel
->groups
);
817 atomic_inc(&channel
->free
);
823 * Map the the group buffers, to user space.
825 static int poch_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
827 struct channel_info
*channel
= filp
->private_data
;
832 unsigned long group_pages
;
833 unsigned long total_group_pages
;
841 printk(KERN_WARNING
"poch_mmap\n");
844 printk(KERN_WARNING PFX
"page offset: %lu\n", vma
->vm_pgoff
);
848 group_pages
= npages(channel
->group_size
);
849 total_group_pages
= group_pages
* channel
->group_count
;
851 size
= vma
->vm_end
- vma
->vm_start
;
852 if (size
!= total_group_pages
* PAGE_SIZE
) {
853 printk(KERN_WARNING PFX
"required %lu bytes\n", size
);
857 start
= vma
->vm_start
;
859 for (i
= 0; i
< channel
->group_count
; i
++) {
860 pg
= channel
->groups
[i
].pg
;
861 for (pg_num
= 0; pg_num
< group_pages
; pg_num
++, pg
++) {
862 printk(KERN_DEBUG PFX
"%d: group %d: 0x%lx\n",
864 ret
= vm_insert_page(vma
, start
, pg
);
866 printk(KERN_DEBUG PFX
867 "vm_insert 2 failed at %d\n", pg_num
);
878 * Check whether there is some group that the user space has not
879 * consumed yet. When the user space consumes a group, it sets it to
880 * -1. Cosuming could be reading data in case of RX and filling a
881 * buffer in case of TX.
883 static int poch_channel_available(struct channel_info
*channel
)
887 spin_lock_irq(&channel
->group_offsets_lock
);
889 if (channel
->consumed
!= channel
->transfer
)
892 spin_unlock_irq(&channel
->group_offsets_lock
);
897 static unsigned int poch_poll(struct file
*filp
, poll_table
*pt
)
899 struct channel_info
*channel
= filp
->private_data
;
900 unsigned int ret
= 0;
902 poll_wait(filp
, &channel
->wq
, pt
);
904 if (poch_channel_available(channel
)) {
905 if (channel
->dir
== CHANNEL_DIR_RX
)
906 ret
= POLLIN
| POLLRDNORM
;
908 ret
= POLLOUT
| POLLWRNORM
;
914 static int poch_ioctl(struct inode
*inode
, struct file
*filp
,
915 unsigned int cmd
, unsigned long arg
)
917 struct channel_info
*channel
= filp
->private_data
;
918 void __iomem
*fpga
= channel
->fpga_iomem
;
919 void __iomem
*bridge
= channel
->bridge_iomem
;
920 void __user
*argp
= (void __user
*)arg
;
921 struct vm_area_struct
*vms
;
922 struct poch_counters counters
;
926 case POCH_IOC_TRANSFER_START
:
927 switch (channel
->chno
) {
928 case CHNO_TX_CHANNEL
:
929 printk(KERN_INFO PFX
"ioctl: Tx start\n");
930 iowrite32(0x1, fpga
+ FPGA_TX_TRIGGER_REG
);
931 iowrite32(0x1, fpga
+ FPGA_TX_ENABLE_REG
);
933 /* FIXME: Does it make sense to do a DMA GO
934 * twice, once in Tx and once in Rx.
936 iowrite32(0x1, bridge
+ BRIDGE_DMA_GO_REG
);
938 case CHNO_RX_CHANNEL
:
939 printk(KERN_INFO PFX
"ioctl: Rx start\n");
940 iowrite32(0x1, fpga
+ FPGA_RX_ARM_REG
);
941 iowrite32(0x1, bridge
+ BRIDGE_DMA_GO_REG
);
945 case POCH_IOC_TRANSFER_STOP
:
946 switch (channel
->chno
) {
947 case CHNO_TX_CHANNEL
:
948 printk(KERN_INFO PFX
"ioctl: Tx stop\n");
949 iowrite32(0x0, fpga
+ FPGA_TX_ENABLE_REG
);
950 iowrite32(0x0, fpga
+ FPGA_TX_TRIGGER_REG
);
951 iowrite32(0x0, bridge
+ BRIDGE_DMA_GO_REG
);
953 case CHNO_RX_CHANNEL
:
954 printk(KERN_INFO PFX
"ioctl: Rx stop\n");
955 iowrite32(0x0, fpga
+ FPGA_RX_ARM_REG
);
956 iowrite32(0x0, bridge
+ BRIDGE_DMA_GO_REG
);
960 case POCH_IOC_CONSUME
:
967 struct poch_consume consume
;
968 struct poch_consume
*uconsume
;
971 ret
= copy_from_user(&consume
, uconsume
, sizeof(consume
));
975 spin_lock_irq(&channel
->group_offsets_lock
);
977 channel
->consumed
+= consume
.nflush
;
978 channel
->consumed
%= channel
->group_count
;
980 available
= channel
->transfer
- channel
->consumed
;
982 available
+= channel
->group_count
;
984 from
= channel
->consumed
;
986 spin_unlock_irq(&channel
->group_offsets_lock
);
988 nfetch
= consume
.nfetch
;
989 count
= min(available
, nfetch
);
991 for (i
= 0; i
< count
; i
++) {
992 j
= (from
+ i
) % channel
->group_count
;
993 ret
= put_user(channel
->groups
[j
].user_offset
,
994 &consume
.offsets
[i
]);
999 ret
= put_user(count
, &uconsume
->nfetch
);
1005 case POCH_IOC_GET_COUNTERS
:
1006 if (!access_ok(VERIFY_WRITE
, argp
, sizeof(struct poch_counters
)))
1009 spin_lock_irq(&channel
->counters_lock
);
1010 counters
= channel
->counters
;
1011 __poch_channel_clear_counters(channel
);
1012 spin_unlock_irq(&channel
->counters_lock
);
1014 ret
= copy_to_user(argp
, &counters
,
1015 sizeof(struct poch_counters
));
1020 case POCH_IOC_SYNC_GROUP_FOR_USER
:
1021 case POCH_IOC_SYNC_GROUP_FOR_DEVICE
:
1022 vms
= find_vma(current
->mm
, arg
);
1024 /* Address not mapped. */
1026 if (vms
->vm_file
!= filp
)
1027 /* Address mapped from different device/file. */
1030 flush_cache_range(vms
, arg
, arg
+ channel
->group_size
);
1036 static struct file_operations poch_fops
= {
1037 .owner
= THIS_MODULE
,
1039 .release
= poch_release
,
1040 .ioctl
= poch_ioctl
,
1045 static void poch_irq_dma(struct channel_info
*channel
)
1051 struct poch_group_info
*groups
;
1054 if (!atomic_read(&channel
->inited
))
1057 prev_transfer
= channel
->transfer
;
1059 if (channel
->chno
== CHNO_RX_CHANNEL
)
1060 curr_group_reg
= FPGA_RX_CURR_GROUP_REG
;
1062 curr_group_reg
= FPGA_TX_CURR_GROUP_REG
;
1064 curr_transfer
= ioread32(channel
->fpga_iomem
+ curr_group_reg
);
1066 groups_done
= curr_transfer
- prev_transfer
;
1067 /* Check wrap over, and handle it. */
1068 if (groups_done
<= 0)
1069 groups_done
+= channel
->group_count
;
1071 groups
= channel
->groups
;
1073 spin_lock(&channel
->group_offsets_lock
);
1075 for (i
= 0; i
< groups_done
; i
++) {
1076 j
= (prev_transfer
+ i
) % channel
->group_count
;
1078 channel
->transfer
+= 1;
1079 channel
->transfer
%= channel
->group_count
;
1081 if (channel
->transfer
== channel
->consumed
) {
1082 channel
->consumed
+= 1;
1083 channel
->consumed
%= channel
->group_count
;
1087 spin_unlock(&channel
->group_offsets_lock
);
1089 wake_up_interruptible(&channel
->wq
);
1092 static irqreturn_t
poch_irq_handler(int irq
, void *p
)
1094 struct poch_dev
*poch_dev
= p
;
1095 void __iomem
*bridge
= poch_dev
->bridge_iomem
;
1096 void __iomem
*fpga
= poch_dev
->fpga_iomem
;
1097 struct channel_info
*channel_rx
= &poch_dev
->channels
[CHNO_RX_CHANNEL
];
1098 struct channel_info
*channel_tx
= &poch_dev
->channels
[CHNO_TX_CHANNEL
];
1103 bridge_stat
= ioread32(bridge
+ BRIDGE_INT_STAT_REG
);
1104 fpga_stat
= ioread32(fpga
+ FPGA_INT_STAT_REG
);
1105 dma_stat
= ioread32(fpga
+ FPGA_DMA_INT_STAT_REG
);
1107 ioread32(fpga
+ FPGA_DMA_INT_STAT_REG
);
1108 ioread32(fpga
+ FPGA_INT_STAT_REG
);
1109 ioread32(bridge
+ BRIDGE_INT_STAT_REG
);
1111 if (bridge_stat
& BRIDGE_INT_FPGA
) {
1112 if (fpga_stat
& FPGA_INT_DMA_CORE
) {
1113 if (dma_stat
& FPGA_DMA_INT_RX
)
1114 poch_irq_dma(channel_rx
);
1115 if (dma_stat
& FPGA_DMA_INT_TX
)
1116 poch_irq_dma(channel_tx
);
1118 if (fpga_stat
& FPGA_INT_PLL_UNLOCKED
) {
1119 channel_tx
->counters
.pll_unlock
++;
1120 channel_rx
->counters
.pll_unlock
++;
1121 if (printk_ratelimit())
1122 printk(KERN_WARNING PFX
"PLL unlocked\n");
1124 if (fpga_stat
& FPGA_INT_TX_FF_EMPTY
)
1125 channel_tx
->counters
.fifo_empty
++;
1126 if (fpga_stat
& FPGA_INT_TX_FF_OVRFLW
)
1127 channel_tx
->counters
.fifo_overflow
++;
1128 if (fpga_stat
& FPGA_INT_RX_FF_EMPTY
)
1129 channel_rx
->counters
.fifo_empty
++;
1130 if (fpga_stat
& FPGA_INT_RX_FF_OVRFLW
)
1131 channel_rx
->counters
.fifo_overflow
++;
1134 * FIXME: These errors should be notified through the
1135 * poll interface as POLLERR.
1138 /* Re-enable interrupts. */
1139 iowrite32(BRIDGE_INT_FPGA
, bridge
+ BRIDGE_INT_MASK_REG
);
1147 static void poch_class_dev_unregister(struct poch_dev
*poch_dev
, int id
)
1151 struct channel_info
*channel
;
1154 if (poch_dev
->dev
== NULL
)
1157 for (i
= 0; i
< poch_dev
->nchannels
; i
++) {
1158 channel
= &poch_dev
->channels
[i
];
1159 devno
= poch_first_dev
+ (id
* poch_dev
->nchannels
) + i
;
1164 nattrs
= sizeof(poch_class_attrs
)/sizeof(poch_class_attrs
[0]);
1165 for (j
= 0; j
< nattrs
; j
++)
1166 device_remove_file(channel
->dev
, poch_class_attrs
[j
]);
1168 device_unregister(channel
->dev
);
1171 device_unregister(poch_dev
->dev
);
1174 static int __devinit
poch_class_dev_register(struct poch_dev
*poch_dev
,
1177 struct device
*dev
= &poch_dev
->pci_dev
->dev
;
1181 struct channel_info
*channel
;
1184 poch_dev
->dev
= device_create(poch_cls
, &poch_dev
->pci_dev
->dev
,
1185 MKDEV(0, 0), NULL
, "poch%d", id
);
1186 if (IS_ERR(poch_dev
->dev
)) {
1187 dev_err(dev
, "error creating parent class device");
1188 ret
= PTR_ERR(poch_dev
->dev
);
1189 poch_dev
->dev
= NULL
;
1193 for (i
= 0; i
< poch_dev
->nchannels
; i
++) {
1194 channel
= &poch_dev
->channels
[i
];
1196 devno
= poch_first_dev
+ (id
* poch_dev
->nchannels
) + i
;
1197 channel
->dev
= device_create(poch_cls
, poch_dev
->dev
, devno
,
1199 if (IS_ERR(channel
->dev
)) {
1200 dev_err(dev
, "error creating channel class device");
1201 ret
= PTR_ERR(channel
->dev
);
1202 channel
->dev
= NULL
;
1203 poch_class_dev_unregister(poch_dev
, id
);
1207 dev_set_drvdata(channel
->dev
, channel
);
1208 nattrs
= sizeof(poch_class_attrs
)/sizeof(poch_class_attrs
[0]);
1209 for (j
= 0; j
< nattrs
; j
++) {
1210 ret
= device_create_file(channel
->dev
,
1211 poch_class_attrs
[j
]);
1213 dev_err(dev
, "error creating attribute file");
1214 poch_class_dev_unregister(poch_dev
, id
);
1223 static int __devinit
poch_pci_probe(struct pci_dev
*pdev
,
1224 const struct pci_device_id
*pci_id
)
1226 struct device
*dev
= &pdev
->dev
;
1227 struct poch_dev
*poch_dev
;
1228 struct uio_info
*uio
;
1233 poch_dev
= kzalloc(sizeof(struct poch_dev
), GFP_KERNEL
);
1235 dev_err(dev
, "error allocating priv. data memory\n");
1239 poch_dev
->pci_dev
= pdev
;
1240 uio
= &poch_dev
->uio
;
1242 pci_set_drvdata(pdev
, poch_dev
);
1244 spin_lock_init(&poch_dev
->iomem_lock
);
1246 poch_dev
->nchannels
= POCH_NCHANNELS
;
1247 poch_dev
->channels
[CHNO_RX_CHANNEL
].dir
= CHANNEL_DIR_RX
;
1248 poch_dev
->channels
[CHNO_TX_CHANNEL
].dir
= CHANNEL_DIR_TX
;
1250 for (i
= 0; i
< poch_dev
->nchannels
; i
++) {
1251 init_waitqueue_head(&poch_dev
->channels
[i
].wq
);
1252 atomic_set(&poch_dev
->channels
[i
].free
, 1);
1253 atomic_set(&poch_dev
->channels
[i
].inited
, 0);
1256 ret
= pci_enable_device(pdev
);
1258 dev_err(dev
, "error enabling device\n");
1262 ret
= pci_request_regions(pdev
, "poch");
1264 dev_err(dev
, "error requesting resources\n");
1268 uio
->mem
[0].addr
= pci_resource_start(pdev
, 1);
1269 if (!uio
->mem
[0].addr
) {
1270 dev_err(dev
, "invalid BAR1\n");
1275 uio
->mem
[0].size
= pci_resource_len(pdev
, 1);
1276 uio
->mem
[0].memtype
= UIO_MEM_PHYS
;
1279 uio
->version
= "0.0.1";
1281 ret
= uio_register_device(dev
, uio
);
1283 dev_err(dev
, "error register UIO device: %d\n", ret
);
1287 poch_dev
->bridge_iomem
= ioremap(pci_resource_start(pdev
, 0),
1288 pci_resource_len(pdev
, 0));
1289 if (poch_dev
->bridge_iomem
== NULL
) {
1290 dev_err(dev
, "error mapping bridge (bar0) registers\n");
1295 poch_dev
->fpga_iomem
= ioremap(pci_resource_start(pdev
, 1),
1296 pci_resource_len(pdev
, 1));
1297 if (poch_dev
->fpga_iomem
== NULL
) {
1298 dev_err(dev
, "error mapping fpga (bar1) registers\n");
1300 goto out_bar0_unmap
;
1303 ret
= request_irq(pdev
->irq
, poch_irq_handler
, IRQF_SHARED
,
1304 dev_name(dev
), poch_dev
);
1306 dev_err(dev
, "error requesting IRQ %u\n", pdev
->irq
);
1308 goto out_bar1_unmap
;
1311 if (!idr_pre_get(&poch_ids
, GFP_KERNEL
)) {
1312 dev_err(dev
, "error allocating memory ids\n");
1317 idr_get_new(&poch_ids
, poch_dev
, &id
);
1318 if (id
>= MAX_POCH_CARDS
) {
1319 dev_err(dev
, "minors exhausted\n");
1324 cdev_init(&poch_dev
->cdev
, &poch_fops
);
1325 poch_dev
->cdev
.owner
= THIS_MODULE
;
1326 ret
= cdev_add(&poch_dev
->cdev
,
1327 poch_first_dev
+ (id
* poch_dev
->nchannels
),
1328 poch_dev
->nchannels
);
1330 dev_err(dev
, "error register character device\n");
1331 goto out_idr_remove
;
1334 ret
= poch_class_dev_register(poch_dev
, id
);
1341 cdev_del(&poch_dev
->cdev
);
1343 idr_remove(&poch_ids
, id
);
1345 free_irq(pdev
->irq
, poch_dev
);
1347 iounmap(poch_dev
->fpga_iomem
);
1349 iounmap(poch_dev
->bridge_iomem
);
1351 uio_unregister_device(uio
);
1353 pci_release_regions(pdev
);
1355 pci_disable_device(pdev
);
1362 * FIXME: We are yet to handle the hot unplug case.
1364 static void poch_pci_remove(struct pci_dev
*pdev
)
1366 struct poch_dev
*poch_dev
= pci_get_drvdata(pdev
);
1367 struct uio_info
*uio
= &poch_dev
->uio
;
1368 unsigned int minor
= MINOR(poch_dev
->cdev
.dev
);
1369 unsigned int id
= minor
/ poch_dev
->nchannels
;
1371 poch_class_dev_unregister(poch_dev
, id
);
1372 cdev_del(&poch_dev
->cdev
);
1373 idr_remove(&poch_ids
, id
);
1374 free_irq(pdev
->irq
, poch_dev
);
1375 iounmap(poch_dev
->fpga_iomem
);
1376 iounmap(poch_dev
->bridge_iomem
);
1377 uio_unregister_device(uio
);
1378 pci_release_regions(pdev
);
1379 pci_disable_device(pdev
);
1380 pci_set_drvdata(pdev
, NULL
);
1381 iounmap(uio
->mem
[0].internal_addr
);
1386 static const struct pci_device_id poch_pci_ids
[] /* __devinitconst */ = {
1387 { PCI_DEVICE(PCI_VENDOR_ID_RRAPIDS
,
1388 PCI_DEVICE_ID_RRAPIDS_POCKET_CHANGE
) },
1392 static struct pci_driver poch_pci_driver
= {
1394 .id_table
= poch_pci_ids
,
1395 .probe
= poch_pci_probe
,
1396 .remove
= poch_pci_remove
,
1399 static int __init
poch_init_module(void)
1403 ret
= alloc_chrdev_region(&poch_first_dev
, 0,
1404 MAX_POCH_DEVICES
, DRV_NAME
);
1406 printk(KERN_ERR PFX
"error allocating device no.");
1410 poch_cls
= class_create(THIS_MODULE
, "pocketchange");
1411 if (IS_ERR(poch_cls
)) {
1412 ret
= PTR_ERR(poch_cls
);
1413 goto out_unreg_chrdev
;
1416 ret
= pci_register_driver(&poch_pci_driver
);
1418 printk(KERN_ERR PFX
"error register PCI device");
1419 goto out_class_destroy
;
1425 class_destroy(poch_cls
);
1428 unregister_chrdev_region(poch_first_dev
, MAX_POCH_DEVICES
);
1433 static void __exit
poch_exit_module(void)
1435 pci_unregister_driver(&poch_pci_driver
);
1436 class_destroy(poch_cls
);
1437 unregister_chrdev_region(poch_first_dev
, MAX_POCH_DEVICES
);
1440 module_init(poch_init_module
);
1441 module_exit(poch_exit_module
);
1443 MODULE_LICENSE("GPL v2");