2 * User-space DMA and UIO based Redrapids Pocket Change CardBus driver
4 * Copyright 2008 Vijay Kumar <vijaykumar@bravegnu.org>
6 * Licensed under GPL version 2 only.
9 #include <linux/device.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 #include <linux/uio_driver.h>
13 #include <linux/spinlock.h>
14 #include <linux/cdev.h>
15 #include <linux/delay.h>
16 #include <linux/sysfs.h>
17 #include <linux/poll.h>
18 #include <linux/idr.h>
19 #include <linux/interrupt.h>
20 #include <linux/init.h>
21 #include <linux/ioctl.h>
26 #include <asm/cacheflush.h>
28 #ifndef PCI_VENDOR_ID_RRAPIDS
29 #define PCI_VENDOR_ID_RRAPIDS 0x17D2
32 #ifndef PCI_DEVICE_ID_RRAPIDS_POCKET_CHANGE
33 #define PCI_DEVICE_ID_RRAPIDS_POCKET_CHANGE 0x0351
36 #define POCH_NCHANNELS 2
38 #define MAX_POCH_CARDS 8
39 #define MAX_POCH_DEVICES (MAX_POCH_CARDS * POCH_NCHANNELS)
41 #define DRV_NAME "poch"
42 #define PFX DRV_NAME ": "
45 * BAR0 Bridge Register Definitions
48 #define BRIDGE_REV_REG 0x0
49 #define BRIDGE_INT_MASK_REG 0x4
50 #define BRIDGE_INT_STAT_REG 0x8
52 #define BRIDGE_INT_ACTIVE (0x1 << 31)
53 #define BRIDGE_INT_FPGA (0x1 << 2)
54 #define BRIDGE_INT_TEMP_FAIL (0x1 << 1)
55 #define BRIDGE_INT_TEMP_WARN (0x1 << 0)
57 #define BRIDGE_FPGA_RESET_REG 0xC
59 #define BRIDGE_CARD_POWER_REG 0x10
60 #define BRIDGE_CARD_POWER_EN (0x1 << 0)
61 #define BRIDGE_CARD_POWER_PROG_DONE (0x1 << 31)
63 #define BRIDGE_JTAG_REG 0x14
64 #define BRIDGE_DMA_GO_REG 0x18
65 #define BRIDGE_STAT_0_REG 0x1C
66 #define BRIDGE_STAT_1_REG 0x20
67 #define BRIDGE_STAT_2_REG 0x24
68 #define BRIDGE_STAT_3_REG 0x28
69 #define BRIDGE_TEMP_STAT_REG 0x2C
70 #define BRIDGE_TEMP_THRESH_REG 0x30
71 #define BRIDGE_EEPROM_REVSEL_REG 0x34
72 #define BRIDGE_CIS_STRUCT_REG 0x100
73 #define BRIDGE_BOARDREV_REG 0x124
76 * BAR1 FPGA Register Definitions
79 #define FPGA_IFACE_REV_REG 0x0
80 #define FPGA_RX_BLOCK_SIZE_REG 0x8
81 #define FPGA_TX_BLOCK_SIZE_REG 0xC
82 #define FPGA_RX_BLOCK_COUNT_REG 0x10
83 #define FPGA_TX_BLOCK_COUNT_REG 0x14
84 #define FPGA_RX_CURR_DMA_BLOCK_REG 0x18
85 #define FPGA_TX_CURR_DMA_BLOCK_REG 0x1C
86 #define FPGA_RX_GROUP_COUNT_REG 0x20
87 #define FPGA_TX_GROUP_COUNT_REG 0x24
88 #define FPGA_RX_CURR_GROUP_REG 0x28
89 #define FPGA_TX_CURR_GROUP_REG 0x2C
90 #define FPGA_RX_CURR_PCI_REG 0x38
91 #define FPGA_TX_CURR_PCI_REG 0x3C
92 #define FPGA_RX_GROUP0_START_REG 0x40
93 #define FPGA_TX_GROUP0_START_REG 0xC0
94 #define FPGA_DMA_DESC_1_REG 0x140
95 #define FPGA_DMA_DESC_2_REG 0x144
96 #define FPGA_DMA_DESC_3_REG 0x148
97 #define FPGA_DMA_DESC_4_REG 0x14C
99 #define FPGA_DMA_INT_STAT_REG 0x150
100 #define FPGA_DMA_INT_MASK_REG 0x154
101 #define FPGA_DMA_INT_RX (1 << 0)
102 #define FPGA_DMA_INT_TX (1 << 1)
104 #define FPGA_RX_GROUPS_PER_INT_REG 0x158
105 #define FPGA_TX_GROUPS_PER_INT_REG 0x15C
106 #define FPGA_DMA_ADR_PAGE_REG 0x160
107 #define FPGA_FPGA_REV_REG 0x200
109 #define FPGA_ADC_CLOCK_CTL_REG 0x204
110 #define FPGA_ADC_CLOCK_CTL_OSC_EN (0x1 << 3)
111 #define FPGA_ADC_CLOCK_LOCAL_CLK (0x1 | FPGA_ADC_CLOCK_CTL_OSC_EN)
112 #define FPGA_ADC_CLOCK_EXT_SAMP_CLK 0X0
114 #define FPGA_ADC_DAC_EN_REG 0x208
115 #define FPGA_ADC_DAC_EN_DAC_OFF (0x1 << 1)
116 #define FPGA_ADC_DAC_EN_ADC_OFF (0x1 << 0)
118 #define FPGA_INT_STAT_REG 0x20C
119 #define FPGA_INT_MASK_REG 0x210
120 #define FPGA_INT_PLL_UNLOCKED (0x1 << 9)
121 #define FPGA_INT_DMA_CORE (0x1 << 8)
122 #define FPGA_INT_TX_FF_EMPTY (0x1 << 7)
123 #define FPGA_INT_RX_FF_EMPTY (0x1 << 6)
124 #define FPGA_INT_TX_FF_OVRFLW (0x1 << 3)
125 #define FPGA_INT_RX_FF_OVRFLW (0x1 << 2)
126 #define FPGA_INT_TX_ACQ_DONE (0x1 << 1)
127 #define FPGA_INT_RX_ACQ_DONE (0x1)
129 #define FPGA_RX_CTL_REG 0x214
130 #define FPGA_RX_CTL_FIFO_FLUSH (0x1 << 9)
131 #define FPGA_RX_CTL_SYNTH_DATA (0x1 << 8)
132 #define FPGA_RX_CTL_CONT_CAP (0x0 << 1)
133 #define FPGA_RX_CTL_SNAP_CAP (0x1 << 1)
135 #define FPGA_RX_ARM_REG 0x21C
137 #define FPGA_DOM_REG 0x224
138 #define FPGA_DOM_DCM_RESET (0x1 << 5)
139 #define FPGA_DOM_SOFT_RESET (0x1 << 4)
140 #define FPGA_DOM_DUAL_M_SG_DMA (0x0)
141 #define FPGA_DOM_TARGET_ACCESS (0x1)
143 #define FPGA_TX_CTL_REG 0x228
144 #define FPGA_TX_CTL_FIFO_FLUSH (0x1 << 9)
145 #define FPGA_TX_CTL_OUTPUT_ZERO (0x0 << 2)
146 #define FPGA_TX_CTL_OUTPUT_CARDBUS (0x1 << 2)
147 #define FPGA_TX_CTL_OUTPUT_ADC (0x2 << 2)
148 #define FPGA_TX_CTL_OUTPUT_SNAPSHOT (0x3 << 2)
149 #define FPGA_TX_CTL_LOOPBACK (0x1 << 0)
151 #define FPGA_ENDIAN_MODE_REG 0x22C
152 #define FPGA_RX_FIFO_COUNT_REG 0x28C
153 #define FPGA_TX_ENABLE_REG 0x298
154 #define FPGA_TX_TRIGGER_REG 0x29C
155 #define FPGA_TX_DATAMEM_COUNT_REG 0x2A8
156 #define FPGA_CAP_FIFO_REG 0x300
157 #define FPGA_TX_SNAPSHOT_REG 0x8000
160 * Channel Index Definitions
175 struct poch_group_info
{
178 unsigned long user_offset
;
181 struct channel_info
{
184 atomic_t sys_block_size
;
185 atomic_t sys_group_size
;
186 atomic_t sys_group_count
;
188 enum channel_dir dir
;
190 unsigned long block_size
;
191 unsigned long group_size
;
192 unsigned long group_count
;
194 /* Contains the DMA address and VM offset of each group. */
195 struct poch_group_info
*groups
;
197 /* Contains the header and circular buffer exported to userspace. */
198 spinlock_t group_offsets_lock
;
199 struct poch_cbuf_header
*header
;
200 struct page
*header_pg
;
201 unsigned long header_size
;
203 /* Last group indicated as 'complete' to user space. */
204 unsigned int transfer
;
206 wait_queue_head_t wq
;
209 unsigned int data_available
;
210 unsigned int space_available
;
213 void __iomem
*bridge_iomem
;
214 void __iomem
*fpga_iomem
;
215 spinlock_t
*iomem_lock
;
221 struct poch_counters counters
;
222 spinlock_t counters_lock
;
229 struct pci_dev
*pci_dev
;
230 unsigned int nchannels
;
231 struct channel_info channels
[POCH_NCHANNELS
];
234 /* Counts the no. of channels that have been opened. On first
235 * open, the card is powered on. On last channel close, the
236 * card is powered off.
240 void __iomem
*bridge_iomem
;
241 void __iomem
*fpga_iomem
;
242 spinlock_t iomem_lock
;
247 static dev_t poch_first_dev
;
248 static struct class *poch_cls
;
249 static DEFINE_IDR(poch_ids
);
251 static ssize_t
store_block_size(struct device
*dev
,
252 struct device_attribute
*attr
,
253 const char *buf
, size_t count
)
255 struct channel_info
*channel
= dev_get_drvdata(dev
);
256 unsigned long block_size
;
258 sscanf(buf
, "%lu", &block_size
);
259 atomic_set(&channel
->sys_block_size
, block_size
);
263 static DEVICE_ATTR(block_size
, S_IWUSR
|S_IWGRP
, NULL
, store_block_size
);
265 static ssize_t
store_group_size(struct device
*dev
,
266 struct device_attribute
*attr
,
267 const char *buf
, size_t count
)
269 struct channel_info
*channel
= dev_get_drvdata(dev
);
270 unsigned long group_size
;
272 sscanf(buf
, "%lu", &group_size
);
273 atomic_set(&channel
->sys_group_size
, group_size
);
277 static DEVICE_ATTR(group_size
, S_IWUSR
|S_IWGRP
, NULL
, store_group_size
);
279 static ssize_t
store_group_count(struct device
*dev
,
280 struct device_attribute
*attr
,
281 const char *buf
, size_t count
)
283 struct channel_info
*channel
= dev_get_drvdata(dev
);
284 unsigned long group_count
;
286 sscanf(buf
, "%lu", &group_count
);
287 atomic_set(&channel
->sys_group_count
, group_count
);
291 static DEVICE_ATTR(group_count
, S_IWUSR
|S_IWGRP
, NULL
, store_group_count
);
293 static ssize_t
show_direction(struct device
*dev
,
294 struct device_attribute
*attr
, char *buf
)
296 struct channel_info
*channel
= dev_get_drvdata(dev
);
299 len
= sprintf(buf
, "%s\n", (channel
->dir
? "tx" : "rx"));
302 static DEVICE_ATTR(dir
, S_IRUSR
|S_IRGRP
, show_direction
, NULL
);
304 static unsigned long npages(unsigned long bytes
)
306 if (bytes
% PAGE_SIZE
== 0)
307 return bytes
/ PAGE_SIZE
;
309 return (bytes
/ PAGE_SIZE
) + 1;
312 static ssize_t
show_mmap_size(struct device
*dev
,
313 struct device_attribute
*attr
, char *buf
)
315 struct channel_info
*channel
= dev_get_drvdata(dev
);
317 unsigned long mmap_size
;
318 unsigned long group_pages
;
319 unsigned long header_pages
;
320 unsigned long total_group_pages
;
322 group_pages
= npages(channel
->group_size
);
323 header_pages
= npages(channel
->header_size
);
324 total_group_pages
= group_pages
* channel
->group_count
;
326 mmap_size
= (header_pages
+ total_group_pages
) * PAGE_SIZE
;
327 len
= sprintf(buf
, "%lu\n", mmap_size
);
330 static DEVICE_ATTR(mmap_size
, S_IRUSR
|S_IRGRP
, show_mmap_size
, NULL
);
332 static struct device_attribute
*poch_class_attrs
[] = {
333 &dev_attr_block_size
,
334 &dev_attr_group_size
,
335 &dev_attr_group_count
,
340 static void poch_channel_free_groups(struct channel_info
*channel
)
344 for (i
= 0; i
< channel
->group_count
; i
++) {
345 struct poch_group_info
*group
;
348 group
= &channel
->groups
[i
];
349 order
= get_order(channel
->group_size
);
351 __free_pages(group
->pg
, order
);
355 static int poch_channel_alloc_groups(struct channel_info
*channel
)
358 unsigned long group_pages
;
359 unsigned long header_pages
;
361 group_pages
= npages(channel
->group_size
);
362 header_pages
= npages(channel
->header_size
);
364 for (i
= 0; i
< channel
->group_count
; i
++) {
365 struct poch_group_info
*group
;
369 group
= &channel
->groups
[i
];
370 order
= get_order(channel
->group_size
);
373 * __GFP_COMP is required here since we are going to
374 * perform non-linear mapping to userspace. For more
375 * information read the vm_insert_page() function
379 gfp_mask
= GFP_KERNEL
| GFP_DMA32
| __GFP_ZERO
;
380 group
->pg
= alloc_pages(gfp_mask
, order
);
382 poch_channel_free_groups(channel
);
386 /* FIXME: This is the physical address not the bus
387 * address! This won't work in architectures that
388 * have an IOMMU. Can we use pci_map_single() for
391 group
->dma_addr
= page_to_pfn(group
->pg
) * PAGE_SIZE
;
393 (header_pages
+ (i
* group_pages
)) * PAGE_SIZE
;
395 printk(KERN_INFO PFX
"%ld: user_offset: 0x%lx\n", i
,
402 static int channel_latch_attr(struct channel_info
*channel
)
404 channel
->group_count
= atomic_read(&channel
->sys_group_count
);
405 channel
->group_size
= atomic_read(&channel
->sys_group_size
);
406 channel
->block_size
= atomic_read(&channel
->sys_block_size
);
408 if (channel
->group_count
== 0) {
409 printk(KERN_ERR PFX
"invalid group count %lu",
410 channel
->group_count
);
414 if (channel
->group_size
== 0 ||
415 channel
->group_size
< channel
->block_size
) {
416 printk(KERN_ERR PFX
"invalid group size %lu",
417 channel
->group_size
);
421 if (channel
->block_size
== 0 || (channel
->block_size
% 8) != 0) {
422 printk(KERN_ERR PFX
"invalid block size %lu",
423 channel
->block_size
);
427 if (channel
->group_size
% channel
->block_size
!= 0) {
429 "group size should be multiple of block size");
437 * Configure DMA group registers
439 static void channel_dma_init(struct channel_info
*channel
)
441 void __iomem
*fpga
= channel
->fpga_iomem
;
445 unsigned int group_in_page
;
450 u32 groups_per_int_reg
;
453 if (channel
->chno
== CHNO_RX_CHANNEL
) {
454 group_regs_base
= FPGA_RX_GROUP0_START_REG
;
455 block_size_reg
= FPGA_RX_BLOCK_SIZE_REG
;
456 block_count_reg
= FPGA_RX_BLOCK_COUNT_REG
;
457 group_count_reg
= FPGA_RX_GROUP_COUNT_REG
;
458 groups_per_int_reg
= FPGA_RX_GROUPS_PER_INT_REG
;
459 curr_pci_reg
= FPGA_RX_CURR_PCI_REG
;
461 group_regs_base
= FPGA_TX_GROUP0_START_REG
;
462 block_size_reg
= FPGA_TX_BLOCK_SIZE_REG
;
463 block_count_reg
= FPGA_TX_BLOCK_COUNT_REG
;
464 group_count_reg
= FPGA_TX_GROUP_COUNT_REG
;
465 groups_per_int_reg
= FPGA_TX_GROUPS_PER_INT_REG
;
466 curr_pci_reg
= FPGA_TX_CURR_PCI_REG
;
469 printk(KERN_WARNING
"block_size, group_size, group_count\n");
471 * Block size is represented in no. of 64 bit transfers.
473 iowrite32(channel
->block_size
/ 8, fpga
+ block_size_reg
);
474 iowrite32(channel
->group_size
/ channel
->block_size
,
475 fpga
+ block_count_reg
);
476 iowrite32(channel
->group_count
, fpga
+ group_count_reg
);
477 /* FIXME: Hardcoded groups per int. Get it from sysfs? */
478 iowrite32(1, fpga
+ groups_per_int_reg
);
480 /* Unlock PCI address? Not defined in the data sheet, but used
481 * in the reference code by Redrapids.
483 iowrite32(0x1, fpga
+ curr_pci_reg
);
485 /* The DMA address page register is shared between the RX and
486 * TX channels, so acquire lock.
488 for (i
= 0; i
< channel
->group_count
; i
++) {
490 group_in_page
= i
% 32;
492 group_reg
= group_regs_base
+ (group_in_page
* 4);
494 spin_lock(channel
->iomem_lock
);
495 iowrite32(page
, fpga
+ FPGA_DMA_ADR_PAGE_REG
);
496 iowrite32(channel
->groups
[i
].dma_addr
, fpga
+ group_reg
);
497 spin_unlock(channel
->iomem_lock
);
500 for (i
= 0; i
< channel
->group_count
; i
++) {
502 group_in_page
= i
% 32;
504 group_reg
= group_regs_base
+ (group_in_page
* 4);
506 spin_lock(channel
->iomem_lock
);
507 iowrite32(page
, fpga
+ FPGA_DMA_ADR_PAGE_REG
);
508 printk(KERN_INFO PFX
"%ld: read dma_addr: 0x%x\n", i
,
509 ioread32(fpga
+ group_reg
));
510 spin_unlock(channel
->iomem_lock
);
515 static int poch_channel_alloc_header(struct channel_info
*channel
)
517 struct poch_cbuf_header
*header
= channel
->header
;
518 unsigned long group_offset_size
;
519 unsigned long tot_group_offsets_size
;
521 /* Allocate memory to hold header exported userspace */
522 group_offset_size
= sizeof(header
->group_offsets
[0]);
523 tot_group_offsets_size
= group_offset_size
* channel
->group_count
;
524 channel
->header_size
= sizeof(*header
) + tot_group_offsets_size
;
525 channel
->header_pg
= alloc_pages(GFP_KERNEL
| __GFP_ZERO
,
526 get_order(channel
->header_size
));
527 if (!channel
->header_pg
)
530 channel
->header
= page_address(channel
->header_pg
);
535 static void poch_channel_free_header(struct channel_info
*channel
)
539 order
= get_order(channel
->header_size
);
540 __free_pages(channel
->header_pg
, order
);
543 static void poch_channel_init_header(struct channel_info
*channel
)
546 struct poch_group_info
*groups
;
549 channel
->header
->group_size_bytes
= channel
->group_size
;
550 channel
->header
->group_count
= channel
->group_count
;
552 spin_lock_init(&channel
->group_offsets_lock
);
554 group_offsets
= channel
->header
->group_offsets
;
555 groups
= channel
->groups
;
557 for (i
= 0; i
< channel
->group_count
; i
++) {
558 if (channel
->dir
== CHANNEL_DIR_RX
)
559 group_offsets
[i
] = -1;
561 group_offsets
[i
] = groups
[i
].user_offset
;
565 static void __poch_channel_clear_counters(struct channel_info
*channel
)
567 channel
->counters
.pll_unlock
= 0;
568 channel
->counters
.fifo_empty
= 0;
569 channel
->counters
.fifo_overflow
= 0;
572 static int poch_channel_init(struct channel_info
*channel
,
573 struct poch_dev
*poch_dev
)
575 struct pci_dev
*pdev
= poch_dev
->pci_dev
;
576 struct device
*dev
= &pdev
->dev
;
577 unsigned long alloc_size
;
580 printk(KERN_WARNING
"channel_latch_attr\n");
582 ret
= channel_latch_attr(channel
);
586 channel
->transfer
= 0;
588 /* Allocate memory to hold group information. */
589 alloc_size
= channel
->group_count
* sizeof(struct poch_group_info
);
590 channel
->groups
= kzalloc(alloc_size
, GFP_KERNEL
);
591 if (!channel
->groups
) {
592 dev_err(dev
, "error allocating memory for group info\n");
597 printk(KERN_WARNING
"poch_channel_alloc_groups\n");
599 ret
= poch_channel_alloc_groups(channel
);
601 dev_err(dev
, "error allocating groups of order %d\n",
602 get_order(channel
->group_size
));
603 goto out_free_group_info
;
606 ret
= poch_channel_alloc_header(channel
);
608 dev_err(dev
, "error allocating user space header\n");
609 goto out_free_groups
;
612 channel
->fpga_iomem
= poch_dev
->fpga_iomem
;
613 channel
->bridge_iomem
= poch_dev
->bridge_iomem
;
614 channel
->iomem_lock
= &poch_dev
->iomem_lock
;
615 spin_lock_init(&channel
->counters_lock
);
617 __poch_channel_clear_counters(channel
);
619 printk(KERN_WARNING
"poch_channel_init_header\n");
621 poch_channel_init_header(channel
);
626 poch_channel_free_groups(channel
);
628 kfree(channel
->groups
);
633 static int poch_wait_fpga_prog(void __iomem
*bridge
)
635 unsigned long total_wait
;
636 const unsigned long wait_period
= 100;
637 /* FIXME: Get the actual timeout */
638 const unsigned long prog_timeo
= 10000; /* 10 Seconds */
641 printk(KERN_WARNING
"poch_wait_fpg_prog\n");
643 printk(KERN_INFO PFX
"programming fpga ...\n");
647 total_wait
+= wait_period
;
649 card_power
= ioread32(bridge
+ BRIDGE_CARD_POWER_REG
);
650 if (card_power
& BRIDGE_CARD_POWER_PROG_DONE
) {
651 printk(KERN_INFO PFX
"programming done\n");
654 if (total_wait
> prog_timeo
) {
656 "timed out while programming FPGA\n");
662 static void poch_card_power_off(struct poch_dev
*poch_dev
)
664 void __iomem
*bridge
= poch_dev
->bridge_iomem
;
667 iowrite32(0, bridge
+ BRIDGE_INT_MASK_REG
);
668 iowrite32(0, bridge
+ BRIDGE_DMA_GO_REG
);
670 card_power
= ioread32(bridge
+ BRIDGE_CARD_POWER_REG
);
671 iowrite32(card_power
& ~BRIDGE_CARD_POWER_EN
,
672 bridge
+ BRIDGE_CARD_POWER_REG
);
680 static void poch_card_clock_on(void __iomem
*fpga
)
682 /* FIXME: Get this data through sysfs? */
683 enum clk_src clk_src
= CLK_SRC_ON_BOARD
;
685 if (clk_src
== CLK_SRC_ON_BOARD
) {
686 iowrite32(FPGA_ADC_CLOCK_LOCAL_CLK
| FPGA_ADC_CLOCK_CTL_OSC_EN
,
687 fpga
+ FPGA_ADC_CLOCK_CTL_REG
);
688 } else if (clk_src
== CLK_SRC_EXTERNAL
) {
689 iowrite32(FPGA_ADC_CLOCK_EXT_SAMP_CLK
,
690 fpga
+ FPGA_ADC_CLOCK_CTL_REG
);
694 static int poch_card_power_on(struct poch_dev
*poch_dev
)
696 void __iomem
*bridge
= poch_dev
->bridge_iomem
;
697 void __iomem
*fpga
= poch_dev
->fpga_iomem
;
699 iowrite32(BRIDGE_CARD_POWER_EN
, bridge
+ BRIDGE_CARD_POWER_REG
);
701 if (poch_wait_fpga_prog(bridge
) != 0) {
702 poch_card_power_off(poch_dev
);
706 poch_card_clock_on(fpga
);
708 /* Sync to new clock, reset state machines, set DMA mode. */
709 iowrite32(FPGA_DOM_DCM_RESET
| FPGA_DOM_SOFT_RESET
710 | FPGA_DOM_DUAL_M_SG_DMA
, fpga
+ FPGA_DOM_REG
);
712 /* FIXME: The time required for sync. needs to be tuned. */
718 static void poch_channel_analog_on(struct channel_info
*channel
)
720 void __iomem
*fpga
= channel
->fpga_iomem
;
723 spin_lock(channel
->iomem_lock
);
724 adc_dac_en
= ioread32(fpga
+ FPGA_ADC_DAC_EN_REG
);
725 switch (channel
->chno
) {
726 case CHNO_RX_CHANNEL
:
727 iowrite32(adc_dac_en
& ~FPGA_ADC_DAC_EN_ADC_OFF
,
728 fpga
+ FPGA_ADC_DAC_EN_REG
);
730 case CHNO_TX_CHANNEL
:
731 iowrite32(adc_dac_en
& ~FPGA_ADC_DAC_EN_DAC_OFF
,
732 fpga
+ FPGA_ADC_DAC_EN_REG
);
735 spin_unlock(channel
->iomem_lock
);
738 static int poch_open(struct inode
*inode
, struct file
*filp
)
740 struct poch_dev
*poch_dev
;
741 struct channel_info
*channel
;
742 void __iomem
*bridge
;
748 poch_dev
= container_of(inode
->i_cdev
, struct poch_dev
, cdev
);
749 bridge
= poch_dev
->bridge_iomem
;
750 fpga
= poch_dev
->fpga_iomem
;
752 chno
= iminor(inode
) % poch_dev
->nchannels
;
753 channel
= &poch_dev
->channels
[chno
];
755 if (!atomic_dec_and_test(&channel
->free
)) {
756 atomic_inc(&channel
->free
);
761 usage
= atomic_inc_return(&poch_dev
->usage
);
763 printk(KERN_WARNING
"poch_card_power_on\n");
766 ret
= poch_card_power_on(poch_dev
);
771 printk(KERN_INFO
"CardBus Bridge Revision: %x\n",
772 ioread32(bridge
+ BRIDGE_REV_REG
));
773 printk(KERN_INFO
"CardBus Interface Revision: %x\n",
774 ioread32(fpga
+ FPGA_IFACE_REV_REG
));
776 channel
->chno
= chno
;
777 filp
->private_data
= channel
;
779 printk(KERN_WARNING
"poch_channel_init\n");
781 ret
= poch_channel_init(channel
, poch_dev
);
785 poch_channel_analog_on(channel
);
787 printk(KERN_WARNING
"channel_dma_init\n");
789 channel_dma_init(channel
);
791 printk(KERN_WARNING
"poch_channel_analog_on\n");
794 printk(KERN_WARNING
"setting up DMA\n");
796 /* Initialize DMA Controller. */
797 iowrite32(FPGA_CAP_FIFO_REG
, bridge
+ BRIDGE_STAT_2_REG
);
798 iowrite32(FPGA_DMA_DESC_1_REG
, bridge
+ BRIDGE_STAT_3_REG
);
800 ioread32(fpga
+ FPGA_DMA_INT_STAT_REG
);
801 ioread32(fpga
+ FPGA_INT_STAT_REG
);
802 ioread32(bridge
+ BRIDGE_INT_STAT_REG
);
804 /* Initialize Interrupts. FIXME: Enable temperature
805 * handling We are enabling both Tx and Rx channel
806 * interrupts here. Do we need to enable interrupts
807 * only for the current channel? Anyways we won't get
808 * the interrupt unless the DMA is activated.
810 iowrite32(BRIDGE_INT_FPGA
, bridge
+ BRIDGE_INT_MASK_REG
);
811 iowrite32(FPGA_INT_DMA_CORE
812 | FPGA_INT_PLL_UNLOCKED
813 | FPGA_INT_TX_FF_EMPTY
814 | FPGA_INT_RX_FF_EMPTY
815 | FPGA_INT_TX_FF_OVRFLW
816 | FPGA_INT_RX_FF_OVRFLW
,
817 fpga
+ FPGA_INT_MASK_REG
);
818 iowrite32(FPGA_DMA_INT_RX
| FPGA_DMA_INT_TX
,
819 fpga
+ FPGA_DMA_INT_MASK_REG
);
822 if (channel
->dir
== CHANNEL_DIR_TX
) {
823 /* Flush TX FIFO and output data from cardbus. */
824 iowrite32(FPGA_TX_CTL_FIFO_FLUSH
825 | FPGA_TX_CTL_OUTPUT_CARDBUS
,
826 fpga
+ FPGA_TX_CTL_REG
);
828 /* Flush RX FIFO and output data to cardbus. */
829 iowrite32(FPGA_RX_CTL_CONT_CAP
830 | FPGA_RX_CTL_FIFO_FLUSH
,
831 fpga
+ FPGA_RX_CTL_REG
);
834 atomic_inc(&channel
->inited
);
840 poch_card_power_off(poch_dev
);
842 atomic_dec(&poch_dev
->usage
);
843 atomic_inc(&channel
->free
);
848 static int poch_release(struct inode
*inode
, struct file
*filp
)
850 struct channel_info
*channel
= filp
->private_data
;
851 struct poch_dev
*poch_dev
;
854 poch_dev
= container_of(inode
->i_cdev
, struct poch_dev
, cdev
);
856 usage
= atomic_dec_return(&poch_dev
->usage
);
858 printk(KERN_WARNING
"poch_card_power_off\n");
859 poch_card_power_off(poch_dev
);
862 atomic_dec(&channel
->inited
);
863 poch_channel_free_header(channel
);
864 poch_channel_free_groups(channel
);
865 kfree(channel
->groups
);
866 atomic_inc(&channel
->free
);
872 * Map the header and the group buffers, to user space.
874 static int poch_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
876 struct channel_info
*channel
= filp
->private_data
;
881 unsigned long group_pages
;
882 unsigned long header_pages
;
883 unsigned long total_group_pages
;
891 printk(KERN_WARNING
"poch_mmap\n");
894 printk(KERN_WARNING PFX
"page offset: %lu\n", vma
->vm_pgoff
);
898 group_pages
= npages(channel
->group_size
);
899 header_pages
= npages(channel
->header_size
);
900 total_group_pages
= group_pages
* channel
->group_count
;
902 size
= vma
->vm_end
- vma
->vm_start
;
903 if (size
!= (header_pages
+ total_group_pages
) * PAGE_SIZE
) {
904 printk(KERN_WARNING PFX
"required %lu bytes\n", size
);
908 start
= vma
->vm_start
;
910 /* FIXME: Cleanup required on failure? */
911 pg
= channel
->header_pg
;
912 for (pg_num
= 0; pg_num
< header_pages
; pg_num
++, pg
++) {
913 printk(KERN_DEBUG PFX
"page_count: %d\n", page_count(pg
));
914 printk(KERN_DEBUG PFX
"%d: header: 0x%lx\n", pg_num
, start
);
915 ret
= vm_insert_page(vma
, start
, pg
);
917 printk(KERN_DEBUG
"vm_insert 1 failed at %lx\n", start
);
923 for (i
= 0; i
< channel
->group_count
; i
++) {
924 pg
= channel
->groups
[i
].pg
;
925 for (pg_num
= 0; pg_num
< group_pages
; pg_num
++, pg
++) {
926 printk(KERN_DEBUG PFX
"%d: group %d: 0x%lx\n",
928 ret
= vm_insert_page(vma
, start
, pg
);
930 printk(KERN_DEBUG PFX
931 "vm_insert 2 failed at %d\n", pg_num
);
942 * Check whether there is some group that the user space has not
943 * consumed yet. When the user space consumes a group, it sets it to
944 * -1. Cosuming could be reading data in case of RX and filling a
945 * buffer in case of TX.
947 static int poch_channel_available(struct channel_info
*channel
)
951 spin_lock_irq(&channel
->group_offsets_lock
);
953 for (i
= 0; i
< channel
->group_count
; i
++) {
954 if (channel
->header
->group_offsets
[i
] != -1) {
955 spin_unlock_irq(&channel
->group_offsets_lock
);
960 spin_unlock_irq(&channel
->group_offsets_lock
);
965 static unsigned int poch_poll(struct file
*filp
, poll_table
*pt
)
967 struct channel_info
*channel
= filp
->private_data
;
968 unsigned int ret
= 0;
970 poll_wait(filp
, &channel
->wq
, pt
);
972 if (poch_channel_available(channel
)) {
973 if (channel
->dir
== CHANNEL_DIR_RX
)
974 ret
= POLLIN
| POLLRDNORM
;
976 ret
= POLLOUT
| POLLWRNORM
;
982 static int poch_ioctl(struct inode
*inode
, struct file
*filp
,
983 unsigned int cmd
, unsigned long arg
)
985 struct channel_info
*channel
= filp
->private_data
;
986 void __iomem
*fpga
= channel
->fpga_iomem
;
987 void __iomem
*bridge
= channel
->bridge_iomem
;
988 void __user
*argp
= (void __user
*)arg
;
989 struct vm_area_struct
*vms
;
990 struct poch_counters counters
;
994 case POCH_IOC_TRANSFER_START
:
995 switch (channel
->chno
) {
996 case CHNO_TX_CHANNEL
:
997 printk(KERN_INFO PFX
"ioctl: Tx start\n");
998 iowrite32(0x1, fpga
+ FPGA_TX_TRIGGER_REG
);
999 iowrite32(0x1, fpga
+ FPGA_TX_ENABLE_REG
);
1001 /* FIXME: Does it make sense to do a DMA GO
1002 * twice, once in Tx and once in Rx.
1004 iowrite32(0x1, bridge
+ BRIDGE_DMA_GO_REG
);
1006 case CHNO_RX_CHANNEL
:
1007 printk(KERN_INFO PFX
"ioctl: Rx start\n");
1008 iowrite32(0x1, fpga
+ FPGA_RX_ARM_REG
);
1009 iowrite32(0x1, bridge
+ BRIDGE_DMA_GO_REG
);
1013 case POCH_IOC_TRANSFER_STOP
:
1014 switch (channel
->chno
) {
1015 case CHNO_TX_CHANNEL
:
1016 printk(KERN_INFO PFX
"ioctl: Tx stop\n");
1017 iowrite32(0x0, fpga
+ FPGA_TX_ENABLE_REG
);
1018 iowrite32(0x0, fpga
+ FPGA_TX_TRIGGER_REG
);
1019 iowrite32(0x0, bridge
+ BRIDGE_DMA_GO_REG
);
1021 case CHNO_RX_CHANNEL
:
1022 printk(KERN_INFO PFX
"ioctl: Rx stop\n");
1023 iowrite32(0x0, fpga
+ FPGA_RX_ARM_REG
);
1024 iowrite32(0x0, bridge
+ BRIDGE_DMA_GO_REG
);
1028 case POCH_IOC_GET_COUNTERS
:
1029 if (!access_ok(VERIFY_WRITE
, argp
, sizeof(struct poch_counters
)))
1032 spin_lock_irq(&channel
->counters_lock
);
1033 counters
= channel
->counters
;
1034 __poch_channel_clear_counters(channel
);
1035 spin_unlock_irq(&channel
->counters_lock
);
1037 ret
= copy_to_user(argp
, &counters
,
1038 sizeof(struct poch_counters
));
1043 case POCH_IOC_SYNC_GROUP_FOR_USER
:
1044 case POCH_IOC_SYNC_GROUP_FOR_DEVICE
:
1045 vms
= find_vma(current
->mm
, arg
);
1047 /* Address not mapped. */
1049 if (vms
->vm_file
!= filp
)
1050 /* Address mapped from different device/file. */
1053 flush_cache_range(vms
, arg
, arg
+ channel
->group_size
);
1059 static struct file_operations poch_fops
= {
1060 .owner
= THIS_MODULE
,
1062 .release
= poch_release
,
1063 .ioctl
= poch_ioctl
,
1068 static void poch_irq_dma(struct channel_info
*channel
)
1074 struct poch_group_info
*groups
;
1078 if (!atomic_read(&channel
->inited
))
1081 prev_transfer
= channel
->transfer
;
1083 if (channel
->chno
== CHNO_RX_CHANNEL
)
1084 curr_group_reg
= FPGA_RX_CURR_GROUP_REG
;
1086 curr_group_reg
= FPGA_TX_CURR_GROUP_REG
;
1088 curr_transfer
= ioread32(channel
->fpga_iomem
+ curr_group_reg
);
1090 groups_done
= curr_transfer
- prev_transfer
;
1091 /* Check wrap over, and handle it. */
1092 if (groups_done
<= 0)
1093 groups_done
+= channel
->group_count
;
1095 group_offsets
= channel
->header
->group_offsets
;
1096 groups
= channel
->groups
;
1098 spin_lock(&channel
->group_offsets_lock
);
1100 for (i
= 0; i
< groups_done
; i
++) {
1101 j
= (prev_transfer
+ i
) % channel
->group_count
;
1102 group_offsets
[j
] = groups
[j
].user_offset
;
1105 spin_unlock(&channel
->group_offsets_lock
);
1107 channel
->transfer
= curr_transfer
;
1109 wake_up_interruptible(&channel
->wq
);
1112 static irqreturn_t
poch_irq_handler(int irq
, void *p
)
1114 struct poch_dev
*poch_dev
= p
;
1115 void __iomem
*bridge
= poch_dev
->bridge_iomem
;
1116 void __iomem
*fpga
= poch_dev
->fpga_iomem
;
1117 struct channel_info
*channel_rx
= &poch_dev
->channels
[CHNO_RX_CHANNEL
];
1118 struct channel_info
*channel_tx
= &poch_dev
->channels
[CHNO_TX_CHANNEL
];
1123 bridge_stat
= ioread32(bridge
+ BRIDGE_INT_STAT_REG
);
1124 fpga_stat
= ioread32(fpga
+ FPGA_INT_STAT_REG
);
1125 dma_stat
= ioread32(fpga
+ FPGA_DMA_INT_STAT_REG
);
1127 ioread32(fpga
+ FPGA_DMA_INT_STAT_REG
);
1128 ioread32(fpga
+ FPGA_INT_STAT_REG
);
1129 ioread32(bridge
+ BRIDGE_INT_STAT_REG
);
1131 if (bridge_stat
& BRIDGE_INT_FPGA
) {
1132 if (fpga_stat
& FPGA_INT_DMA_CORE
) {
1133 if (dma_stat
& FPGA_DMA_INT_RX
)
1134 poch_irq_dma(channel_rx
);
1135 if (dma_stat
& FPGA_DMA_INT_TX
)
1136 poch_irq_dma(channel_tx
);
1138 if (fpga_stat
& FPGA_INT_PLL_UNLOCKED
) {
1139 channel_tx
->counters
.pll_unlock
++;
1140 channel_rx
->counters
.pll_unlock
++;
1141 if (printk_ratelimit())
1142 printk(KERN_WARNING PFX
"PLL unlocked\n");
1144 if (fpga_stat
& FPGA_INT_TX_FF_EMPTY
)
1145 channel_tx
->counters
.fifo_empty
++;
1146 if (fpga_stat
& FPGA_INT_TX_FF_OVRFLW
)
1147 channel_tx
->counters
.fifo_overflow
++;
1148 if (fpga_stat
& FPGA_INT_RX_FF_EMPTY
)
1149 channel_rx
->counters
.fifo_empty
++;
1150 if (fpga_stat
& FPGA_INT_RX_FF_OVRFLW
)
1151 channel_rx
->counters
.fifo_overflow
++;
1154 * FIXME: These errors should be notified through the
1155 * poll interface as POLLERR.
1158 /* Re-enable interrupts. */
1159 iowrite32(BRIDGE_INT_FPGA
, bridge
+ BRIDGE_INT_MASK_REG
);
1167 static void poch_class_dev_unregister(struct poch_dev
*poch_dev
, int id
)
1171 struct channel_info
*channel
;
1174 if (poch_dev
->dev
== NULL
)
1177 for (i
= 0; i
< poch_dev
->nchannels
; i
++) {
1178 channel
= &poch_dev
->channels
[i
];
1179 devno
= poch_first_dev
+ (id
* poch_dev
->nchannels
) + i
;
1184 nattrs
= sizeof(poch_class_attrs
)/sizeof(poch_class_attrs
[0]);
1185 for (j
= 0; j
< nattrs
; j
++)
1186 device_remove_file(channel
->dev
, poch_class_attrs
[j
]);
1188 device_unregister(channel
->dev
);
1191 device_unregister(poch_dev
->dev
);
1194 static int __devinit
poch_class_dev_register(struct poch_dev
*poch_dev
,
1197 struct device
*dev
= &poch_dev
->pci_dev
->dev
;
1201 struct channel_info
*channel
;
1204 poch_dev
->dev
= device_create(poch_cls
, &poch_dev
->pci_dev
->dev
,
1205 MKDEV(0, 0), NULL
, "poch%d", id
);
1206 if (IS_ERR(poch_dev
->dev
)) {
1207 dev_err(dev
, "error creating parent class device");
1208 ret
= PTR_ERR(poch_dev
->dev
);
1209 poch_dev
->dev
= NULL
;
1213 for (i
= 0; i
< poch_dev
->nchannels
; i
++) {
1214 channel
= &poch_dev
->channels
[i
];
1216 devno
= poch_first_dev
+ (id
* poch_dev
->nchannels
) + i
;
1217 channel
->dev
= device_create(poch_cls
, poch_dev
->dev
, devno
,
1219 if (IS_ERR(channel
->dev
)) {
1220 dev_err(dev
, "error creating channel class device");
1221 ret
= PTR_ERR(channel
->dev
);
1222 channel
->dev
= NULL
;
1223 poch_class_dev_unregister(poch_dev
, id
);
1227 dev_set_drvdata(channel
->dev
, channel
);
1228 nattrs
= sizeof(poch_class_attrs
)/sizeof(poch_class_attrs
[0]);
1229 for (j
= 0; j
< nattrs
; j
++) {
1230 ret
= device_create_file(channel
->dev
,
1231 poch_class_attrs
[j
]);
1233 dev_err(dev
, "error creating attribute file");
1234 poch_class_dev_unregister(poch_dev
, id
);
1243 static int __devinit
poch_pci_probe(struct pci_dev
*pdev
,
1244 const struct pci_device_id
*pci_id
)
1246 struct device
*dev
= &pdev
->dev
;
1247 struct poch_dev
*poch_dev
;
1248 struct uio_info
*uio
;
1253 poch_dev
= kzalloc(sizeof(struct poch_dev
), GFP_KERNEL
);
1255 dev_err(dev
, "error allocating priv. data memory\n");
1259 poch_dev
->pci_dev
= pdev
;
1260 uio
= &poch_dev
->uio
;
1262 pci_set_drvdata(pdev
, poch_dev
);
1264 spin_lock_init(&poch_dev
->iomem_lock
);
1266 poch_dev
->nchannels
= POCH_NCHANNELS
;
1267 poch_dev
->channels
[CHNO_RX_CHANNEL
].dir
= CHANNEL_DIR_RX
;
1268 poch_dev
->channels
[CHNO_TX_CHANNEL
].dir
= CHANNEL_DIR_TX
;
1270 for (i
= 0; i
< poch_dev
->nchannels
; i
++) {
1271 init_waitqueue_head(&poch_dev
->channels
[i
].wq
);
1272 atomic_set(&poch_dev
->channels
[i
].free
, 1);
1273 atomic_set(&poch_dev
->channels
[i
].inited
, 0);
1276 ret
= pci_enable_device(pdev
);
1278 dev_err(dev
, "error enabling device\n");
1282 ret
= pci_request_regions(pdev
, "poch");
1284 dev_err(dev
, "error requesting resources\n");
1288 uio
->mem
[0].addr
= pci_resource_start(pdev
, 1);
1289 if (!uio
->mem
[0].addr
) {
1290 dev_err(dev
, "invalid BAR1\n");
1295 uio
->mem
[0].size
= pci_resource_len(pdev
, 1);
1296 uio
->mem
[0].memtype
= UIO_MEM_PHYS
;
1299 uio
->version
= "0.0.1";
1301 ret
= uio_register_device(dev
, uio
);
1303 dev_err(dev
, "error register UIO device: %d\n", ret
);
1307 poch_dev
->bridge_iomem
= ioremap(pci_resource_start(pdev
, 0),
1308 pci_resource_len(pdev
, 0));
1309 if (poch_dev
->bridge_iomem
== NULL
) {
1310 dev_err(dev
, "error mapping bridge (bar0) registers\n");
1315 poch_dev
->fpga_iomem
= ioremap(pci_resource_start(pdev
, 1),
1316 pci_resource_len(pdev
, 1));
1317 if (poch_dev
->fpga_iomem
== NULL
) {
1318 dev_err(dev
, "error mapping fpga (bar1) registers\n");
1320 goto out_bar0_unmap
;
1323 ret
= request_irq(pdev
->irq
, poch_irq_handler
, IRQF_SHARED
,
1324 dev_name(dev
), poch_dev
);
1326 dev_err(dev
, "error requesting IRQ %u\n", pdev
->irq
);
1328 goto out_bar1_unmap
;
1331 if (!idr_pre_get(&poch_ids
, GFP_KERNEL
)) {
1332 dev_err(dev
, "error allocating memory ids\n");
1337 idr_get_new(&poch_ids
, poch_dev
, &id
);
1338 if (id
>= MAX_POCH_CARDS
) {
1339 dev_err(dev
, "minors exhausted\n");
1344 cdev_init(&poch_dev
->cdev
, &poch_fops
);
1345 poch_dev
->cdev
.owner
= THIS_MODULE
;
1346 ret
= cdev_add(&poch_dev
->cdev
,
1347 poch_first_dev
+ (id
* poch_dev
->nchannels
),
1348 poch_dev
->nchannels
);
1350 dev_err(dev
, "error register character device\n");
1351 goto out_idr_remove
;
1354 ret
= poch_class_dev_register(poch_dev
, id
);
1361 cdev_del(&poch_dev
->cdev
);
1363 idr_remove(&poch_ids
, id
);
1365 free_irq(pdev
->irq
, poch_dev
);
1367 iounmap(poch_dev
->fpga_iomem
);
1369 iounmap(poch_dev
->bridge_iomem
);
1371 uio_unregister_device(uio
);
1373 pci_release_regions(pdev
);
1375 pci_disable_device(pdev
);
1382 * FIXME: We are yet to handle the hot unplug case.
1384 static void poch_pci_remove(struct pci_dev
*pdev
)
1386 struct poch_dev
*poch_dev
= pci_get_drvdata(pdev
);
1387 struct uio_info
*uio
= &poch_dev
->uio
;
1388 unsigned int minor
= MINOR(poch_dev
->cdev
.dev
);
1389 unsigned int id
= minor
/ poch_dev
->nchannels
;
1391 poch_class_dev_unregister(poch_dev
, id
);
1392 cdev_del(&poch_dev
->cdev
);
1393 idr_remove(&poch_ids
, id
);
1394 free_irq(pdev
->irq
, poch_dev
);
1395 iounmap(poch_dev
->fpga_iomem
);
1396 iounmap(poch_dev
->bridge_iomem
);
1397 uio_unregister_device(uio
);
1398 pci_release_regions(pdev
);
1399 pci_disable_device(pdev
);
1400 pci_set_drvdata(pdev
, NULL
);
1401 iounmap(uio
->mem
[0].internal_addr
);
1406 static const struct pci_device_id poch_pci_ids
[] /* __devinitconst */ = {
1407 { PCI_DEVICE(PCI_VENDOR_ID_RRAPIDS
,
1408 PCI_DEVICE_ID_RRAPIDS_POCKET_CHANGE
) },
1412 static struct pci_driver poch_pci_driver
= {
1414 .id_table
= poch_pci_ids
,
1415 .probe
= poch_pci_probe
,
1416 .remove
= poch_pci_remove
,
1419 static int __init
poch_init_module(void)
1423 ret
= alloc_chrdev_region(&poch_first_dev
, 0,
1424 MAX_POCH_DEVICES
, DRV_NAME
);
1426 printk(KERN_ERR PFX
"error allocating device no.");
1430 poch_cls
= class_create(THIS_MODULE
, "pocketchange");
1431 if (IS_ERR(poch_cls
)) {
1432 ret
= PTR_ERR(poch_cls
);
1433 goto out_unreg_chrdev
;
1436 ret
= pci_register_driver(&poch_pci_driver
);
1438 printk(KERN_ERR PFX
"error register PCI device");
1439 goto out_class_destroy
;
1445 class_destroy(poch_cls
);
1448 unregister_chrdev_region(poch_first_dev
, MAX_POCH_DEVICES
);
1453 static void __exit
poch_exit_module(void)
1455 pci_unregister_driver(&poch_pci_driver
);
1456 class_destroy(poch_cls
);
1457 unregister_chrdev_region(poch_first_dev
, MAX_POCH_DEVICES
);
1460 module_init(poch_init_module
);
1461 module_exit(poch_exit_module
);
1463 MODULE_LICENSE("GPL v2");