2 * User-space DMA and UIO based Redrapids Pocket Change CardBus driver
4 * Copyright 2008 Vijay Kumar <vijaykumar@bravegnu.org>
6 * Licensed under GPL version 2 only.
9 #include <linux/device.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 #include <linux/uio_driver.h>
13 #include <linux/spinlock.h>
14 #include <linux/cdev.h>
15 #include <linux/delay.h>
16 #include <linux/sysfs.h>
17 #include <linux/poll.h>
18 #include <linux/idr.h>
19 #include <linux/interrupt.h>
20 #include <linux/init.h>
21 #include <linux/ioctl.h>
23 #include <linux/sched.h>
27 #include <asm/cacheflush.h>
29 #ifndef PCI_VENDOR_ID_RRAPIDS
30 #define PCI_VENDOR_ID_RRAPIDS 0x17D2
33 #ifndef PCI_DEVICE_ID_RRAPIDS_POCKET_CHANGE
34 #define PCI_DEVICE_ID_RRAPIDS_POCKET_CHANGE 0x0351
37 #define POCH_NCHANNELS 2
39 #define MAX_POCH_CARDS 8
40 #define MAX_POCH_DEVICES (MAX_POCH_CARDS * POCH_NCHANNELS)
42 #define DRV_NAME "poch"
43 #define PFX DRV_NAME ": "
46 * BAR0 Bridge Register Definitions
49 #define BRIDGE_REV_REG 0x0
50 #define BRIDGE_INT_MASK_REG 0x4
51 #define BRIDGE_INT_STAT_REG 0x8
53 #define BRIDGE_INT_ACTIVE (0x1 << 31)
54 #define BRIDGE_INT_FPGA (0x1 << 2)
55 #define BRIDGE_INT_TEMP_FAIL (0x1 << 1)
56 #define BRIDGE_INT_TEMP_WARN (0x1 << 0)
58 #define BRIDGE_FPGA_RESET_REG 0xC
60 #define BRIDGE_CARD_POWER_REG 0x10
61 #define BRIDGE_CARD_POWER_EN (0x1 << 0)
62 #define BRIDGE_CARD_POWER_PROG_DONE (0x1 << 31)
64 #define BRIDGE_JTAG_REG 0x14
65 #define BRIDGE_DMA_GO_REG 0x18
66 #define BRIDGE_STAT_0_REG 0x1C
67 #define BRIDGE_STAT_1_REG 0x20
68 #define BRIDGE_STAT_2_REG 0x24
69 #define BRIDGE_STAT_3_REG 0x28
70 #define BRIDGE_TEMP_STAT_REG 0x2C
71 #define BRIDGE_TEMP_THRESH_REG 0x30
72 #define BRIDGE_EEPROM_REVSEL_REG 0x34
73 #define BRIDGE_CIS_STRUCT_REG 0x100
74 #define BRIDGE_BOARDREV_REG 0x124
77 * BAR1 FPGA Register Definitions
80 #define FPGA_IFACE_REV_REG 0x0
81 #define FPGA_RX_BLOCK_SIZE_REG 0x8
82 #define FPGA_TX_BLOCK_SIZE_REG 0xC
83 #define FPGA_RX_BLOCK_COUNT_REG 0x10
84 #define FPGA_TX_BLOCK_COUNT_REG 0x14
85 #define FPGA_RX_CURR_DMA_BLOCK_REG 0x18
86 #define FPGA_TX_CURR_DMA_BLOCK_REG 0x1C
87 #define FPGA_RX_GROUP_COUNT_REG 0x20
88 #define FPGA_TX_GROUP_COUNT_REG 0x24
89 #define FPGA_RX_CURR_GROUP_REG 0x28
90 #define FPGA_TX_CURR_GROUP_REG 0x2C
91 #define FPGA_RX_CURR_PCI_REG 0x38
92 #define FPGA_TX_CURR_PCI_REG 0x3C
93 #define FPGA_RX_GROUP0_START_REG 0x40
94 #define FPGA_TX_GROUP0_START_REG 0xC0
95 #define FPGA_DMA_DESC_1_REG 0x140
96 #define FPGA_DMA_DESC_2_REG 0x144
97 #define FPGA_DMA_DESC_3_REG 0x148
98 #define FPGA_DMA_DESC_4_REG 0x14C
100 #define FPGA_DMA_INT_STAT_REG 0x150
101 #define FPGA_DMA_INT_MASK_REG 0x154
102 #define FPGA_DMA_INT_RX (1 << 0)
103 #define FPGA_DMA_INT_TX (1 << 1)
105 #define FPGA_RX_GROUPS_PER_INT_REG 0x158
106 #define FPGA_TX_GROUPS_PER_INT_REG 0x15C
107 #define FPGA_DMA_ADR_PAGE_REG 0x160
108 #define FPGA_FPGA_REV_REG 0x200
110 #define FPGA_ADC_CLOCK_CTL_REG 0x204
111 #define FPGA_ADC_CLOCK_CTL_OSC_EN (0x1 << 3)
112 #define FPGA_ADC_CLOCK_LOCAL_CLK (0x1 | FPGA_ADC_CLOCK_CTL_OSC_EN)
113 #define FPGA_ADC_CLOCK_EXT_SAMP_CLK 0X0
115 #define FPGA_ADC_DAC_EN_REG 0x208
116 #define FPGA_ADC_DAC_EN_DAC_OFF (0x1 << 1)
117 #define FPGA_ADC_DAC_EN_ADC_OFF (0x1 << 0)
119 #define FPGA_INT_STAT_REG 0x20C
120 #define FPGA_INT_MASK_REG 0x210
121 #define FPGA_INT_PLL_UNLOCKED (0x1 << 9)
122 #define FPGA_INT_DMA_CORE (0x1 << 8)
123 #define FPGA_INT_TX_FF_EMPTY (0x1 << 7)
124 #define FPGA_INT_RX_FF_EMPTY (0x1 << 6)
125 #define FPGA_INT_TX_FF_OVRFLW (0x1 << 3)
126 #define FPGA_INT_RX_FF_OVRFLW (0x1 << 2)
127 #define FPGA_INT_TX_ACQ_DONE (0x1 << 1)
128 #define FPGA_INT_RX_ACQ_DONE (0x1)
130 #define FPGA_RX_CTL_REG 0x214
131 #define FPGA_RX_CTL_FIFO_FLUSH (0x1 << 9)
132 #define FPGA_RX_CTL_SYNTH_DATA (0x1 << 8)
133 #define FPGA_RX_CTL_CONT_CAP (0x0 << 1)
134 #define FPGA_RX_CTL_SNAP_CAP (0x1 << 1)
136 #define FPGA_RX_ARM_REG 0x21C
138 #define FPGA_DOM_REG 0x224
139 #define FPGA_DOM_DCM_RESET (0x1 << 5)
140 #define FPGA_DOM_SOFT_RESET (0x1 << 4)
141 #define FPGA_DOM_DUAL_M_SG_DMA (0x0)
142 #define FPGA_DOM_TARGET_ACCESS (0x1)
144 #define FPGA_TX_CTL_REG 0x228
145 #define FPGA_TX_CTL_FIFO_FLUSH (0x1 << 9)
146 #define FPGA_TX_CTL_OUTPUT_ZERO (0x0 << 2)
147 #define FPGA_TX_CTL_OUTPUT_CARDBUS (0x1 << 2)
148 #define FPGA_TX_CTL_OUTPUT_ADC (0x2 << 2)
149 #define FPGA_TX_CTL_OUTPUT_SNAPSHOT (0x3 << 2)
150 #define FPGA_TX_CTL_LOOPBACK (0x1 << 0)
152 #define FPGA_ENDIAN_MODE_REG 0x22C
153 #define FPGA_RX_FIFO_COUNT_REG 0x28C
154 #define FPGA_TX_ENABLE_REG 0x298
155 #define FPGA_TX_TRIGGER_REG 0x29C
156 #define FPGA_TX_DATAMEM_COUNT_REG 0x2A8
157 #define FPGA_CAP_FIFO_REG 0x300
158 #define FPGA_TX_SNAPSHOT_REG 0x8000
161 * Channel Index Definitions
176 struct poch_group_info
{
179 unsigned long user_offset
;
182 struct channel_info
{
185 atomic_t sys_block_size
;
186 atomic_t sys_group_size
;
187 atomic_t sys_group_count
;
189 enum channel_dir dir
;
191 unsigned long block_size
;
192 unsigned long group_size
;
193 unsigned long group_count
;
195 /* Contains the DMA address and VM offset of each group. */
196 struct poch_group_info
*groups
;
198 /* Contains the header and circular buffer exported to userspace. */
199 spinlock_t group_offsets_lock
;
200 struct poch_cbuf_header
*header
;
201 struct page
*header_pg
;
202 unsigned long header_size
;
204 /* Last group indicated as 'complete' to user space. */
205 unsigned int transfer
;
207 wait_queue_head_t wq
;
210 unsigned int data_available
;
211 unsigned int space_available
;
214 void __iomem
*bridge_iomem
;
215 void __iomem
*fpga_iomem
;
216 spinlock_t
*iomem_lock
;
222 struct poch_counters counters
;
223 spinlock_t counters_lock
;
230 struct pci_dev
*pci_dev
;
231 unsigned int nchannels
;
232 struct channel_info channels
[POCH_NCHANNELS
];
235 /* Counts the no. of channels that have been opened. On first
236 * open, the card is powered on. On last channel close, the
237 * card is powered off.
241 void __iomem
*bridge_iomem
;
242 void __iomem
*fpga_iomem
;
243 spinlock_t iomem_lock
;
248 static dev_t poch_first_dev
;
249 static struct class *poch_cls
;
250 static DEFINE_IDR(poch_ids
);
252 static ssize_t
store_block_size(struct device
*dev
,
253 struct device_attribute
*attr
,
254 const char *buf
, size_t count
)
256 struct channel_info
*channel
= dev_get_drvdata(dev
);
257 unsigned long block_size
;
259 sscanf(buf
, "%lu", &block_size
);
260 atomic_set(&channel
->sys_block_size
, block_size
);
264 static DEVICE_ATTR(block_size
, S_IWUSR
|S_IWGRP
, NULL
, store_block_size
);
266 static ssize_t
store_group_size(struct device
*dev
,
267 struct device_attribute
*attr
,
268 const char *buf
, size_t count
)
270 struct channel_info
*channel
= dev_get_drvdata(dev
);
271 unsigned long group_size
;
273 sscanf(buf
, "%lu", &group_size
);
274 atomic_set(&channel
->sys_group_size
, group_size
);
278 static DEVICE_ATTR(group_size
, S_IWUSR
|S_IWGRP
, NULL
, store_group_size
);
280 static ssize_t
store_group_count(struct device
*dev
,
281 struct device_attribute
*attr
,
282 const char *buf
, size_t count
)
284 struct channel_info
*channel
= dev_get_drvdata(dev
);
285 unsigned long group_count
;
287 sscanf(buf
, "%lu", &group_count
);
288 atomic_set(&channel
->sys_group_count
, group_count
);
292 static DEVICE_ATTR(group_count
, S_IWUSR
|S_IWGRP
, NULL
, store_group_count
);
294 static ssize_t
show_direction(struct device
*dev
,
295 struct device_attribute
*attr
, char *buf
)
297 struct channel_info
*channel
= dev_get_drvdata(dev
);
300 len
= sprintf(buf
, "%s\n", (channel
->dir
? "tx" : "rx"));
303 static DEVICE_ATTR(dir
, S_IRUSR
|S_IRGRP
, show_direction
, NULL
);
305 static unsigned long npages(unsigned long bytes
)
307 if (bytes
% PAGE_SIZE
== 0)
308 return bytes
/ PAGE_SIZE
;
310 return (bytes
/ PAGE_SIZE
) + 1;
313 static ssize_t
show_mmap_size(struct device
*dev
,
314 struct device_attribute
*attr
, char *buf
)
316 struct channel_info
*channel
= dev_get_drvdata(dev
);
318 unsigned long mmap_size
;
319 unsigned long group_pages
;
320 unsigned long header_pages
;
321 unsigned long total_group_pages
;
323 group_pages
= npages(channel
->group_size
);
324 header_pages
= npages(channel
->header_size
);
325 total_group_pages
= group_pages
* channel
->group_count
;
327 mmap_size
= (header_pages
+ total_group_pages
) * PAGE_SIZE
;
328 len
= sprintf(buf
, "%lu\n", mmap_size
);
331 static DEVICE_ATTR(mmap_size
, S_IRUSR
|S_IRGRP
, show_mmap_size
, NULL
);
333 static struct device_attribute
*poch_class_attrs
[] = {
334 &dev_attr_block_size
,
335 &dev_attr_group_size
,
336 &dev_attr_group_count
,
341 static void poch_channel_free_groups(struct channel_info
*channel
)
345 for (i
= 0; i
< channel
->group_count
; i
++) {
346 struct poch_group_info
*group
;
349 group
= &channel
->groups
[i
];
350 order
= get_order(channel
->group_size
);
352 __free_pages(group
->pg
, order
);
356 static int poch_channel_alloc_groups(struct channel_info
*channel
)
359 unsigned long group_pages
;
360 unsigned long header_pages
;
362 group_pages
= npages(channel
->group_size
);
363 header_pages
= npages(channel
->header_size
);
365 for (i
= 0; i
< channel
->group_count
; i
++) {
366 struct poch_group_info
*group
;
370 group
= &channel
->groups
[i
];
371 order
= get_order(channel
->group_size
);
374 * __GFP_COMP is required here since we are going to
375 * perform non-linear mapping to userspace. For more
376 * information read the vm_insert_page() function
380 gfp_mask
= GFP_KERNEL
| GFP_DMA32
| __GFP_ZERO
;
381 group
->pg
= alloc_pages(gfp_mask
, order
);
383 poch_channel_free_groups(channel
);
387 /* FIXME: This is the physical address not the bus
388 * address! This won't work in architectures that
389 * have an IOMMU. Can we use pci_map_single() for
392 group
->dma_addr
= page_to_pfn(group
->pg
) * PAGE_SIZE
;
394 (header_pages
+ (i
* group_pages
)) * PAGE_SIZE
;
396 printk(KERN_INFO PFX
"%ld: user_offset: 0x%lx\n", i
,
403 static int channel_latch_attr(struct channel_info
*channel
)
405 channel
->group_count
= atomic_read(&channel
->sys_group_count
);
406 channel
->group_size
= atomic_read(&channel
->sys_group_size
);
407 channel
->block_size
= atomic_read(&channel
->sys_block_size
);
409 if (channel
->group_count
== 0) {
410 printk(KERN_ERR PFX
"invalid group count %lu",
411 channel
->group_count
);
415 if (channel
->group_size
== 0 ||
416 channel
->group_size
< channel
->block_size
) {
417 printk(KERN_ERR PFX
"invalid group size %lu",
418 channel
->group_size
);
422 if (channel
->block_size
== 0 || (channel
->block_size
% 8) != 0) {
423 printk(KERN_ERR PFX
"invalid block size %lu",
424 channel
->block_size
);
428 if (channel
->group_size
% channel
->block_size
!= 0) {
430 "group size should be multiple of block size");
438 * Configure DMA group registers
440 static void channel_dma_init(struct channel_info
*channel
)
442 void __iomem
*fpga
= channel
->fpga_iomem
;
446 unsigned int group_in_page
;
451 u32 groups_per_int_reg
;
454 if (channel
->chno
== CHNO_RX_CHANNEL
) {
455 group_regs_base
= FPGA_RX_GROUP0_START_REG
;
456 block_size_reg
= FPGA_RX_BLOCK_SIZE_REG
;
457 block_count_reg
= FPGA_RX_BLOCK_COUNT_REG
;
458 group_count_reg
= FPGA_RX_GROUP_COUNT_REG
;
459 groups_per_int_reg
= FPGA_RX_GROUPS_PER_INT_REG
;
460 curr_pci_reg
= FPGA_RX_CURR_PCI_REG
;
462 group_regs_base
= FPGA_TX_GROUP0_START_REG
;
463 block_size_reg
= FPGA_TX_BLOCK_SIZE_REG
;
464 block_count_reg
= FPGA_TX_BLOCK_COUNT_REG
;
465 group_count_reg
= FPGA_TX_GROUP_COUNT_REG
;
466 groups_per_int_reg
= FPGA_TX_GROUPS_PER_INT_REG
;
467 curr_pci_reg
= FPGA_TX_CURR_PCI_REG
;
470 printk(KERN_WARNING
"block_size, group_size, group_count\n");
472 * Block size is represented in no. of 64 bit transfers.
474 iowrite32(channel
->block_size
/ 8, fpga
+ block_size_reg
);
475 iowrite32(channel
->group_size
/ channel
->block_size
,
476 fpga
+ block_count_reg
);
477 iowrite32(channel
->group_count
, fpga
+ group_count_reg
);
478 /* FIXME: Hardcoded groups per int. Get it from sysfs? */
479 iowrite32(1, fpga
+ groups_per_int_reg
);
481 /* Unlock PCI address? Not defined in the data sheet, but used
482 * in the reference code by Redrapids.
484 iowrite32(0x1, fpga
+ curr_pci_reg
);
486 /* The DMA address page register is shared between the RX and
487 * TX channels, so acquire lock.
489 for (i
= 0; i
< channel
->group_count
; i
++) {
491 group_in_page
= i
% 32;
493 group_reg
= group_regs_base
+ (group_in_page
* 4);
495 spin_lock(channel
->iomem_lock
);
496 iowrite32(page
, fpga
+ FPGA_DMA_ADR_PAGE_REG
);
497 iowrite32(channel
->groups
[i
].dma_addr
, fpga
+ group_reg
);
498 spin_unlock(channel
->iomem_lock
);
501 for (i
= 0; i
< channel
->group_count
; i
++) {
503 group_in_page
= i
% 32;
505 group_reg
= group_regs_base
+ (group_in_page
* 4);
507 spin_lock(channel
->iomem_lock
);
508 iowrite32(page
, fpga
+ FPGA_DMA_ADR_PAGE_REG
);
509 printk(KERN_INFO PFX
"%ld: read dma_addr: 0x%x\n", i
,
510 ioread32(fpga
+ group_reg
));
511 spin_unlock(channel
->iomem_lock
);
516 static int poch_channel_alloc_header(struct channel_info
*channel
)
518 struct poch_cbuf_header
*header
= channel
->header
;
519 unsigned long group_offset_size
;
520 unsigned long tot_group_offsets_size
;
522 /* Allocate memory to hold header exported userspace */
523 group_offset_size
= sizeof(header
->group_offsets
[0]);
524 tot_group_offsets_size
= group_offset_size
* channel
->group_count
;
525 channel
->header_size
= sizeof(*header
) + tot_group_offsets_size
;
526 channel
->header_pg
= alloc_pages(GFP_KERNEL
| __GFP_ZERO
,
527 get_order(channel
->header_size
));
528 if (!channel
->header_pg
)
531 channel
->header
= page_address(channel
->header_pg
);
536 static void poch_channel_free_header(struct channel_info
*channel
)
540 order
= get_order(channel
->header_size
);
541 __free_pages(channel
->header_pg
, order
);
544 static void poch_channel_init_header(struct channel_info
*channel
)
547 struct poch_group_info
*groups
;
550 channel
->header
->group_size_bytes
= channel
->group_size
;
551 channel
->header
->group_count
= channel
->group_count
;
553 spin_lock_init(&channel
->group_offsets_lock
);
555 group_offsets
= channel
->header
->group_offsets
;
556 groups
= channel
->groups
;
558 for (i
= 0; i
< channel
->group_count
; i
++) {
559 if (channel
->dir
== CHANNEL_DIR_RX
)
560 group_offsets
[i
] = -1;
562 group_offsets
[i
] = groups
[i
].user_offset
;
566 static void __poch_channel_clear_counters(struct channel_info
*channel
)
568 channel
->counters
.pll_unlock
= 0;
569 channel
->counters
.fifo_empty
= 0;
570 channel
->counters
.fifo_overflow
= 0;
573 static int poch_channel_init(struct channel_info
*channel
,
574 struct poch_dev
*poch_dev
)
576 struct pci_dev
*pdev
= poch_dev
->pci_dev
;
577 struct device
*dev
= &pdev
->dev
;
578 unsigned long alloc_size
;
581 printk(KERN_WARNING
"channel_latch_attr\n");
583 ret
= channel_latch_attr(channel
);
587 channel
->transfer
= 0;
589 /* Allocate memory to hold group information. */
590 alloc_size
= channel
->group_count
* sizeof(struct poch_group_info
);
591 channel
->groups
= kzalloc(alloc_size
, GFP_KERNEL
);
592 if (!channel
->groups
) {
593 dev_err(dev
, "error allocating memory for group info\n");
598 printk(KERN_WARNING
"poch_channel_alloc_groups\n");
600 ret
= poch_channel_alloc_groups(channel
);
602 dev_err(dev
, "error allocating groups of order %d\n",
603 get_order(channel
->group_size
));
604 goto out_free_group_info
;
607 ret
= poch_channel_alloc_header(channel
);
609 dev_err(dev
, "error allocating user space header\n");
610 goto out_free_groups
;
613 channel
->fpga_iomem
= poch_dev
->fpga_iomem
;
614 channel
->bridge_iomem
= poch_dev
->bridge_iomem
;
615 channel
->iomem_lock
= &poch_dev
->iomem_lock
;
616 spin_lock_init(&channel
->counters_lock
);
618 __poch_channel_clear_counters(channel
);
620 printk(KERN_WARNING
"poch_channel_init_header\n");
622 poch_channel_init_header(channel
);
627 poch_channel_free_groups(channel
);
629 kfree(channel
->groups
);
634 static int poch_wait_fpga_prog(void __iomem
*bridge
)
636 unsigned long total_wait
;
637 const unsigned long wait_period
= 100;
638 /* FIXME: Get the actual timeout */
639 const unsigned long prog_timeo
= 10000; /* 10 Seconds */
642 printk(KERN_WARNING
"poch_wait_fpg_prog\n");
644 printk(KERN_INFO PFX
"programming fpga ...\n");
648 total_wait
+= wait_period
;
650 card_power
= ioread32(bridge
+ BRIDGE_CARD_POWER_REG
);
651 if (card_power
& BRIDGE_CARD_POWER_PROG_DONE
) {
652 printk(KERN_INFO PFX
"programming done\n");
655 if (total_wait
> prog_timeo
) {
657 "timed out while programming FPGA\n");
663 static void poch_card_power_off(struct poch_dev
*poch_dev
)
665 void __iomem
*bridge
= poch_dev
->bridge_iomem
;
668 iowrite32(0, bridge
+ BRIDGE_INT_MASK_REG
);
669 iowrite32(0, bridge
+ BRIDGE_DMA_GO_REG
);
671 card_power
= ioread32(bridge
+ BRIDGE_CARD_POWER_REG
);
672 iowrite32(card_power
& ~BRIDGE_CARD_POWER_EN
,
673 bridge
+ BRIDGE_CARD_POWER_REG
);
681 static void poch_card_clock_on(void __iomem
*fpga
)
683 /* FIXME: Get this data through sysfs? */
684 enum clk_src clk_src
= CLK_SRC_ON_BOARD
;
686 if (clk_src
== CLK_SRC_ON_BOARD
) {
687 iowrite32(FPGA_ADC_CLOCK_LOCAL_CLK
| FPGA_ADC_CLOCK_CTL_OSC_EN
,
688 fpga
+ FPGA_ADC_CLOCK_CTL_REG
);
689 } else if (clk_src
== CLK_SRC_EXTERNAL
) {
690 iowrite32(FPGA_ADC_CLOCK_EXT_SAMP_CLK
,
691 fpga
+ FPGA_ADC_CLOCK_CTL_REG
);
695 static int poch_card_power_on(struct poch_dev
*poch_dev
)
697 void __iomem
*bridge
= poch_dev
->bridge_iomem
;
698 void __iomem
*fpga
= poch_dev
->fpga_iomem
;
700 iowrite32(BRIDGE_CARD_POWER_EN
, bridge
+ BRIDGE_CARD_POWER_REG
);
702 if (poch_wait_fpga_prog(bridge
) != 0) {
703 poch_card_power_off(poch_dev
);
707 poch_card_clock_on(fpga
);
709 /* Sync to new clock, reset state machines, set DMA mode. */
710 iowrite32(FPGA_DOM_DCM_RESET
| FPGA_DOM_SOFT_RESET
711 | FPGA_DOM_DUAL_M_SG_DMA
, fpga
+ FPGA_DOM_REG
);
713 /* FIXME: The time required for sync. needs to be tuned. */
719 static void poch_channel_analog_on(struct channel_info
*channel
)
721 void __iomem
*fpga
= channel
->fpga_iomem
;
724 spin_lock(channel
->iomem_lock
);
725 adc_dac_en
= ioread32(fpga
+ FPGA_ADC_DAC_EN_REG
);
726 switch (channel
->chno
) {
727 case CHNO_RX_CHANNEL
:
728 iowrite32(adc_dac_en
& ~FPGA_ADC_DAC_EN_ADC_OFF
,
729 fpga
+ FPGA_ADC_DAC_EN_REG
);
731 case CHNO_TX_CHANNEL
:
732 iowrite32(adc_dac_en
& ~FPGA_ADC_DAC_EN_DAC_OFF
,
733 fpga
+ FPGA_ADC_DAC_EN_REG
);
736 spin_unlock(channel
->iomem_lock
);
739 static int poch_open(struct inode
*inode
, struct file
*filp
)
741 struct poch_dev
*poch_dev
;
742 struct channel_info
*channel
;
743 void __iomem
*bridge
;
749 poch_dev
= container_of(inode
->i_cdev
, struct poch_dev
, cdev
);
750 bridge
= poch_dev
->bridge_iomem
;
751 fpga
= poch_dev
->fpga_iomem
;
753 chno
= iminor(inode
) % poch_dev
->nchannels
;
754 channel
= &poch_dev
->channels
[chno
];
756 if (!atomic_dec_and_test(&channel
->free
)) {
757 atomic_inc(&channel
->free
);
762 usage
= atomic_inc_return(&poch_dev
->usage
);
764 printk(KERN_WARNING
"poch_card_power_on\n");
767 ret
= poch_card_power_on(poch_dev
);
772 printk(KERN_INFO
"CardBus Bridge Revision: %x\n",
773 ioread32(bridge
+ BRIDGE_REV_REG
));
774 printk(KERN_INFO
"CardBus Interface Revision: %x\n",
775 ioread32(fpga
+ FPGA_IFACE_REV_REG
));
777 channel
->chno
= chno
;
778 filp
->private_data
= channel
;
780 printk(KERN_WARNING
"poch_channel_init\n");
782 ret
= poch_channel_init(channel
, poch_dev
);
786 poch_channel_analog_on(channel
);
788 printk(KERN_WARNING
"channel_dma_init\n");
790 channel_dma_init(channel
);
792 printk(KERN_WARNING
"poch_channel_analog_on\n");
795 printk(KERN_WARNING
"setting up DMA\n");
797 /* Initialize DMA Controller. */
798 iowrite32(FPGA_CAP_FIFO_REG
, bridge
+ BRIDGE_STAT_2_REG
);
799 iowrite32(FPGA_DMA_DESC_1_REG
, bridge
+ BRIDGE_STAT_3_REG
);
801 ioread32(fpga
+ FPGA_DMA_INT_STAT_REG
);
802 ioread32(fpga
+ FPGA_INT_STAT_REG
);
803 ioread32(bridge
+ BRIDGE_INT_STAT_REG
);
805 /* Initialize Interrupts. FIXME: Enable temperature
806 * handling We are enabling both Tx and Rx channel
807 * interrupts here. Do we need to enable interrupts
808 * only for the current channel? Anyways we won't get
809 * the interrupt unless the DMA is activated.
811 iowrite32(BRIDGE_INT_FPGA
, bridge
+ BRIDGE_INT_MASK_REG
);
812 iowrite32(FPGA_INT_DMA_CORE
813 | FPGA_INT_PLL_UNLOCKED
814 | FPGA_INT_TX_FF_EMPTY
815 | FPGA_INT_RX_FF_EMPTY
816 | FPGA_INT_TX_FF_OVRFLW
817 | FPGA_INT_RX_FF_OVRFLW
,
818 fpga
+ FPGA_INT_MASK_REG
);
819 iowrite32(FPGA_DMA_INT_RX
| FPGA_DMA_INT_TX
,
820 fpga
+ FPGA_DMA_INT_MASK_REG
);
823 if (channel
->dir
== CHANNEL_DIR_TX
) {
824 /* Flush TX FIFO and output data from cardbus. */
825 iowrite32(FPGA_TX_CTL_FIFO_FLUSH
826 | FPGA_TX_CTL_OUTPUT_CARDBUS
,
827 fpga
+ FPGA_TX_CTL_REG
);
829 /* Flush RX FIFO and output data to cardbus. */
830 iowrite32(FPGA_RX_CTL_CONT_CAP
831 | FPGA_RX_CTL_FIFO_FLUSH
,
832 fpga
+ FPGA_RX_CTL_REG
);
835 atomic_inc(&channel
->inited
);
841 poch_card_power_off(poch_dev
);
843 atomic_dec(&poch_dev
->usage
);
844 atomic_inc(&channel
->free
);
849 static int poch_release(struct inode
*inode
, struct file
*filp
)
851 struct channel_info
*channel
= filp
->private_data
;
852 struct poch_dev
*poch_dev
;
855 poch_dev
= container_of(inode
->i_cdev
, struct poch_dev
, cdev
);
857 usage
= atomic_dec_return(&poch_dev
->usage
);
859 printk(KERN_WARNING
"poch_card_power_off\n");
860 poch_card_power_off(poch_dev
);
863 atomic_dec(&channel
->inited
);
864 poch_channel_free_header(channel
);
865 poch_channel_free_groups(channel
);
866 kfree(channel
->groups
);
867 atomic_inc(&channel
->free
);
873 * Map the header and the group buffers, to user space.
875 static int poch_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
877 struct channel_info
*channel
= filp
->private_data
;
882 unsigned long group_pages
;
883 unsigned long header_pages
;
884 unsigned long total_group_pages
;
892 printk(KERN_WARNING
"poch_mmap\n");
895 printk(KERN_WARNING PFX
"page offset: %lu\n", vma
->vm_pgoff
);
899 group_pages
= npages(channel
->group_size
);
900 header_pages
= npages(channel
->header_size
);
901 total_group_pages
= group_pages
* channel
->group_count
;
903 size
= vma
->vm_end
- vma
->vm_start
;
904 if (size
!= (header_pages
+ total_group_pages
) * PAGE_SIZE
) {
905 printk(KERN_WARNING PFX
"required %lu bytes\n", size
);
909 start
= vma
->vm_start
;
911 /* FIXME: Cleanup required on failure? */
912 pg
= channel
->header_pg
;
913 for (pg_num
= 0; pg_num
< header_pages
; pg_num
++, pg
++) {
914 printk(KERN_DEBUG PFX
"page_count: %d\n", page_count(pg
));
915 printk(KERN_DEBUG PFX
"%d: header: 0x%lx\n", pg_num
, start
);
916 ret
= vm_insert_page(vma
, start
, pg
);
918 printk(KERN_DEBUG
"vm_insert 1 failed at %lx\n", start
);
924 for (i
= 0; i
< channel
->group_count
; i
++) {
925 pg
= channel
->groups
[i
].pg
;
926 for (pg_num
= 0; pg_num
< group_pages
; pg_num
++, pg
++) {
927 printk(KERN_DEBUG PFX
"%d: group %d: 0x%lx\n",
929 ret
= vm_insert_page(vma
, start
, pg
);
931 printk(KERN_DEBUG PFX
932 "vm_insert 2 failed at %d\n", pg_num
);
943 * Check whether there is some group that the user space has not
944 * consumed yet. When the user space consumes a group, it sets it to
945 * -1. Cosuming could be reading data in case of RX and filling a
946 * buffer in case of TX.
948 static int poch_channel_available(struct channel_info
*channel
)
952 spin_lock_irq(&channel
->group_offsets_lock
);
954 for (i
= 0; i
< channel
->group_count
; i
++) {
955 if (channel
->header
->group_offsets
[i
] != -1) {
956 spin_unlock_irq(&channel
->group_offsets_lock
);
961 spin_unlock_irq(&channel
->group_offsets_lock
);
966 static unsigned int poch_poll(struct file
*filp
, poll_table
*pt
)
968 struct channel_info
*channel
= filp
->private_data
;
969 unsigned int ret
= 0;
971 poll_wait(filp
, &channel
->wq
, pt
);
973 if (poch_channel_available(channel
)) {
974 if (channel
->dir
== CHANNEL_DIR_RX
)
975 ret
= POLLIN
| POLLRDNORM
;
977 ret
= POLLOUT
| POLLWRNORM
;
983 static int poch_ioctl(struct inode
*inode
, struct file
*filp
,
984 unsigned int cmd
, unsigned long arg
)
986 struct channel_info
*channel
= filp
->private_data
;
987 void __iomem
*fpga
= channel
->fpga_iomem
;
988 void __iomem
*bridge
= channel
->bridge_iomem
;
989 void __user
*argp
= (void __user
*)arg
;
990 struct vm_area_struct
*vms
;
991 struct poch_counters counters
;
995 case POCH_IOC_TRANSFER_START
:
996 switch (channel
->chno
) {
997 case CHNO_TX_CHANNEL
:
998 printk(KERN_INFO PFX
"ioctl: Tx start\n");
999 iowrite32(0x1, fpga
+ FPGA_TX_TRIGGER_REG
);
1000 iowrite32(0x1, fpga
+ FPGA_TX_ENABLE_REG
);
1002 /* FIXME: Does it make sense to do a DMA GO
1003 * twice, once in Tx and once in Rx.
1005 iowrite32(0x1, bridge
+ BRIDGE_DMA_GO_REG
);
1007 case CHNO_RX_CHANNEL
:
1008 printk(KERN_INFO PFX
"ioctl: Rx start\n");
1009 iowrite32(0x1, fpga
+ FPGA_RX_ARM_REG
);
1010 iowrite32(0x1, bridge
+ BRIDGE_DMA_GO_REG
);
1014 case POCH_IOC_TRANSFER_STOP
:
1015 switch (channel
->chno
) {
1016 case CHNO_TX_CHANNEL
:
1017 printk(KERN_INFO PFX
"ioctl: Tx stop\n");
1018 iowrite32(0x0, fpga
+ FPGA_TX_ENABLE_REG
);
1019 iowrite32(0x0, fpga
+ FPGA_TX_TRIGGER_REG
);
1020 iowrite32(0x0, bridge
+ BRIDGE_DMA_GO_REG
);
1022 case CHNO_RX_CHANNEL
:
1023 printk(KERN_INFO PFX
"ioctl: Rx stop\n");
1024 iowrite32(0x0, fpga
+ FPGA_RX_ARM_REG
);
1025 iowrite32(0x0, bridge
+ BRIDGE_DMA_GO_REG
);
1029 case POCH_IOC_GET_COUNTERS
:
1030 if (!access_ok(VERIFY_WRITE
, argp
, sizeof(struct poch_counters
)))
1033 spin_lock_irq(&channel
->counters_lock
);
1034 counters
= channel
->counters
;
1035 __poch_channel_clear_counters(channel
);
1036 spin_unlock_irq(&channel
->counters_lock
);
1038 ret
= copy_to_user(argp
, &counters
,
1039 sizeof(struct poch_counters
));
1044 case POCH_IOC_SYNC_GROUP_FOR_USER
:
1045 case POCH_IOC_SYNC_GROUP_FOR_DEVICE
:
1046 vms
= find_vma(current
->mm
, arg
);
1048 /* Address not mapped. */
1050 if (vms
->vm_file
!= filp
)
1051 /* Address mapped from different device/file. */
1054 flush_cache_range(vms
, arg
, arg
+ channel
->group_size
);
1060 static struct file_operations poch_fops
= {
1061 .owner
= THIS_MODULE
,
1063 .release
= poch_release
,
1064 .ioctl
= poch_ioctl
,
1069 static void poch_irq_dma(struct channel_info
*channel
)
1075 struct poch_group_info
*groups
;
1079 if (!atomic_read(&channel
->inited
))
1082 prev_transfer
= channel
->transfer
;
1084 if (channel
->chno
== CHNO_RX_CHANNEL
)
1085 curr_group_reg
= FPGA_RX_CURR_GROUP_REG
;
1087 curr_group_reg
= FPGA_TX_CURR_GROUP_REG
;
1089 curr_transfer
= ioread32(channel
->fpga_iomem
+ curr_group_reg
);
1091 groups_done
= curr_transfer
- prev_transfer
;
1092 /* Check wrap over, and handle it. */
1093 if (groups_done
<= 0)
1094 groups_done
+= channel
->group_count
;
1096 group_offsets
= channel
->header
->group_offsets
;
1097 groups
= channel
->groups
;
1099 spin_lock(&channel
->group_offsets_lock
);
1101 for (i
= 0; i
< groups_done
; i
++) {
1102 j
= (prev_transfer
+ i
) % channel
->group_count
;
1103 group_offsets
[j
] = groups
[j
].user_offset
;
1106 spin_unlock(&channel
->group_offsets_lock
);
1108 channel
->transfer
= curr_transfer
;
1110 wake_up_interruptible(&channel
->wq
);
1113 static irqreturn_t
poch_irq_handler(int irq
, void *p
)
1115 struct poch_dev
*poch_dev
= p
;
1116 void __iomem
*bridge
= poch_dev
->bridge_iomem
;
1117 void __iomem
*fpga
= poch_dev
->fpga_iomem
;
1118 struct channel_info
*channel_rx
= &poch_dev
->channels
[CHNO_RX_CHANNEL
];
1119 struct channel_info
*channel_tx
= &poch_dev
->channels
[CHNO_TX_CHANNEL
];
1124 bridge_stat
= ioread32(bridge
+ BRIDGE_INT_STAT_REG
);
1125 fpga_stat
= ioread32(fpga
+ FPGA_INT_STAT_REG
);
1126 dma_stat
= ioread32(fpga
+ FPGA_DMA_INT_STAT_REG
);
1128 ioread32(fpga
+ FPGA_DMA_INT_STAT_REG
);
1129 ioread32(fpga
+ FPGA_INT_STAT_REG
);
1130 ioread32(bridge
+ BRIDGE_INT_STAT_REG
);
1132 if (bridge_stat
& BRIDGE_INT_FPGA
) {
1133 if (fpga_stat
& FPGA_INT_DMA_CORE
) {
1134 if (dma_stat
& FPGA_DMA_INT_RX
)
1135 poch_irq_dma(channel_rx
);
1136 if (dma_stat
& FPGA_DMA_INT_TX
)
1137 poch_irq_dma(channel_tx
);
1139 if (fpga_stat
& FPGA_INT_PLL_UNLOCKED
) {
1140 channel_tx
->counters
.pll_unlock
++;
1141 channel_rx
->counters
.pll_unlock
++;
1142 if (printk_ratelimit())
1143 printk(KERN_WARNING PFX
"PLL unlocked\n");
1145 if (fpga_stat
& FPGA_INT_TX_FF_EMPTY
)
1146 channel_tx
->counters
.fifo_empty
++;
1147 if (fpga_stat
& FPGA_INT_TX_FF_OVRFLW
)
1148 channel_tx
->counters
.fifo_overflow
++;
1149 if (fpga_stat
& FPGA_INT_RX_FF_EMPTY
)
1150 channel_rx
->counters
.fifo_empty
++;
1151 if (fpga_stat
& FPGA_INT_RX_FF_OVRFLW
)
1152 channel_rx
->counters
.fifo_overflow
++;
1155 * FIXME: These errors should be notified through the
1156 * poll interface as POLLERR.
1159 /* Re-enable interrupts. */
1160 iowrite32(BRIDGE_INT_FPGA
, bridge
+ BRIDGE_INT_MASK_REG
);
1168 static void poch_class_dev_unregister(struct poch_dev
*poch_dev
, int id
)
1172 struct channel_info
*channel
;
1175 if (poch_dev
->dev
== NULL
)
1178 for (i
= 0; i
< poch_dev
->nchannels
; i
++) {
1179 channel
= &poch_dev
->channels
[i
];
1180 devno
= poch_first_dev
+ (id
* poch_dev
->nchannels
) + i
;
1185 nattrs
= sizeof(poch_class_attrs
)/sizeof(poch_class_attrs
[0]);
1186 for (j
= 0; j
< nattrs
; j
++)
1187 device_remove_file(channel
->dev
, poch_class_attrs
[j
]);
1189 device_unregister(channel
->dev
);
1192 device_unregister(poch_dev
->dev
);
1195 static int __devinit
poch_class_dev_register(struct poch_dev
*poch_dev
,
1198 struct device
*dev
= &poch_dev
->pci_dev
->dev
;
1202 struct channel_info
*channel
;
1205 poch_dev
->dev
= device_create(poch_cls
, &poch_dev
->pci_dev
->dev
,
1206 MKDEV(0, 0), NULL
, "poch%d", id
);
1207 if (IS_ERR(poch_dev
->dev
)) {
1208 dev_err(dev
, "error creating parent class device");
1209 ret
= PTR_ERR(poch_dev
->dev
);
1210 poch_dev
->dev
= NULL
;
1214 for (i
= 0; i
< poch_dev
->nchannels
; i
++) {
1215 channel
= &poch_dev
->channels
[i
];
1217 devno
= poch_first_dev
+ (id
* poch_dev
->nchannels
) + i
;
1218 channel
->dev
= device_create(poch_cls
, poch_dev
->dev
, devno
,
1220 if (IS_ERR(channel
->dev
)) {
1221 dev_err(dev
, "error creating channel class device");
1222 ret
= PTR_ERR(channel
->dev
);
1223 channel
->dev
= NULL
;
1224 poch_class_dev_unregister(poch_dev
, id
);
1228 dev_set_drvdata(channel
->dev
, channel
);
1229 nattrs
= sizeof(poch_class_attrs
)/sizeof(poch_class_attrs
[0]);
1230 for (j
= 0; j
< nattrs
; j
++) {
1231 ret
= device_create_file(channel
->dev
,
1232 poch_class_attrs
[j
]);
1234 dev_err(dev
, "error creating attribute file");
1235 poch_class_dev_unregister(poch_dev
, id
);
1244 static int __devinit
poch_pci_probe(struct pci_dev
*pdev
,
1245 const struct pci_device_id
*pci_id
)
1247 struct device
*dev
= &pdev
->dev
;
1248 struct poch_dev
*poch_dev
;
1249 struct uio_info
*uio
;
1254 poch_dev
= kzalloc(sizeof(struct poch_dev
), GFP_KERNEL
);
1256 dev_err(dev
, "error allocating priv. data memory\n");
1260 poch_dev
->pci_dev
= pdev
;
1261 uio
= &poch_dev
->uio
;
1263 pci_set_drvdata(pdev
, poch_dev
);
1265 spin_lock_init(&poch_dev
->iomem_lock
);
1267 poch_dev
->nchannels
= POCH_NCHANNELS
;
1268 poch_dev
->channels
[CHNO_RX_CHANNEL
].dir
= CHANNEL_DIR_RX
;
1269 poch_dev
->channels
[CHNO_TX_CHANNEL
].dir
= CHANNEL_DIR_TX
;
1271 for (i
= 0; i
< poch_dev
->nchannels
; i
++) {
1272 init_waitqueue_head(&poch_dev
->channels
[i
].wq
);
1273 atomic_set(&poch_dev
->channels
[i
].free
, 1);
1274 atomic_set(&poch_dev
->channels
[i
].inited
, 0);
1277 ret
= pci_enable_device(pdev
);
1279 dev_err(dev
, "error enabling device\n");
1283 ret
= pci_request_regions(pdev
, "poch");
1285 dev_err(dev
, "error requesting resources\n");
1289 uio
->mem
[0].addr
= pci_resource_start(pdev
, 1);
1290 if (!uio
->mem
[0].addr
) {
1291 dev_err(dev
, "invalid BAR1\n");
1296 uio
->mem
[0].size
= pci_resource_len(pdev
, 1);
1297 uio
->mem
[0].memtype
= UIO_MEM_PHYS
;
1300 uio
->version
= "0.0.1";
1302 ret
= uio_register_device(dev
, uio
);
1304 dev_err(dev
, "error register UIO device: %d\n", ret
);
1308 poch_dev
->bridge_iomem
= ioremap(pci_resource_start(pdev
, 0),
1309 pci_resource_len(pdev
, 0));
1310 if (poch_dev
->bridge_iomem
== NULL
) {
1311 dev_err(dev
, "error mapping bridge (bar0) registers\n");
1316 poch_dev
->fpga_iomem
= ioremap(pci_resource_start(pdev
, 1),
1317 pci_resource_len(pdev
, 1));
1318 if (poch_dev
->fpga_iomem
== NULL
) {
1319 dev_err(dev
, "error mapping fpga (bar1) registers\n");
1321 goto out_bar0_unmap
;
1324 ret
= request_irq(pdev
->irq
, poch_irq_handler
, IRQF_SHARED
,
1325 dev_name(dev
), poch_dev
);
1327 dev_err(dev
, "error requesting IRQ %u\n", pdev
->irq
);
1329 goto out_bar1_unmap
;
1332 if (!idr_pre_get(&poch_ids
, GFP_KERNEL
)) {
1333 dev_err(dev
, "error allocating memory ids\n");
1338 idr_get_new(&poch_ids
, poch_dev
, &id
);
1339 if (id
>= MAX_POCH_CARDS
) {
1340 dev_err(dev
, "minors exhausted\n");
1345 cdev_init(&poch_dev
->cdev
, &poch_fops
);
1346 poch_dev
->cdev
.owner
= THIS_MODULE
;
1347 ret
= cdev_add(&poch_dev
->cdev
,
1348 poch_first_dev
+ (id
* poch_dev
->nchannels
),
1349 poch_dev
->nchannels
);
1351 dev_err(dev
, "error register character device\n");
1352 goto out_idr_remove
;
1355 ret
= poch_class_dev_register(poch_dev
, id
);
1362 cdev_del(&poch_dev
->cdev
);
1364 idr_remove(&poch_ids
, id
);
1366 free_irq(pdev
->irq
, poch_dev
);
1368 iounmap(poch_dev
->fpga_iomem
);
1370 iounmap(poch_dev
->bridge_iomem
);
1372 uio_unregister_device(uio
);
1374 pci_release_regions(pdev
);
1376 pci_disable_device(pdev
);
1383 * FIXME: We are yet to handle the hot unplug case.
1385 static void poch_pci_remove(struct pci_dev
*pdev
)
1387 struct poch_dev
*poch_dev
= pci_get_drvdata(pdev
);
1388 struct uio_info
*uio
= &poch_dev
->uio
;
1389 unsigned int minor
= MINOR(poch_dev
->cdev
.dev
);
1390 unsigned int id
= minor
/ poch_dev
->nchannels
;
1392 poch_class_dev_unregister(poch_dev
, id
);
1393 cdev_del(&poch_dev
->cdev
);
1394 idr_remove(&poch_ids
, id
);
1395 free_irq(pdev
->irq
, poch_dev
);
1396 iounmap(poch_dev
->fpga_iomem
);
1397 iounmap(poch_dev
->bridge_iomem
);
1398 uio_unregister_device(uio
);
1399 pci_release_regions(pdev
);
1400 pci_disable_device(pdev
);
1401 pci_set_drvdata(pdev
, NULL
);
1402 iounmap(uio
->mem
[0].internal_addr
);
1407 static const struct pci_device_id poch_pci_ids
[] /* __devinitconst */ = {
1408 { PCI_DEVICE(PCI_VENDOR_ID_RRAPIDS
,
1409 PCI_DEVICE_ID_RRAPIDS_POCKET_CHANGE
) },
1413 static struct pci_driver poch_pci_driver
= {
1415 .id_table
= poch_pci_ids
,
1416 .probe
= poch_pci_probe
,
1417 .remove
= poch_pci_remove
,
1420 static int __init
poch_init_module(void)
1424 ret
= alloc_chrdev_region(&poch_first_dev
, 0,
1425 MAX_POCH_DEVICES
, DRV_NAME
);
1427 printk(KERN_ERR PFX
"error allocating device no.");
1431 poch_cls
= class_create(THIS_MODULE
, "pocketchange");
1432 if (IS_ERR(poch_cls
)) {
1433 ret
= PTR_ERR(poch_cls
);
1434 goto out_unreg_chrdev
;
1437 ret
= pci_register_driver(&poch_pci_driver
);
1439 printk(KERN_ERR PFX
"error register PCI device");
1440 goto out_class_destroy
;
1446 class_destroy(poch_cls
);
1449 unregister_chrdev_region(poch_first_dev
, MAX_POCH_DEVICES
);
1454 static void __exit
poch_exit_module(void)
1456 pci_unregister_driver(&poch_pci_driver
);
1457 class_destroy(poch_cls
);
1458 unregister_chrdev_region(poch_first_dev
, MAX_POCH_DEVICES
);
1461 module_init(poch_init_module
);
1462 module_exit(poch_exit_module
);
1464 MODULE_LICENSE("GPL v2");