On Tue, Nov 06, 2007 at 02:33:53AM -0800, akpm@linux-foundation.org wrote:
[mmotm.git] / drivers / staging / b3dfg / b3dfg.c
blob4a43c51c172abdb454e857e42dac52432b37cd35
1 /*
2 * Brontes PCI frame grabber driver
4 * Copyright (C) 2008 3M Company
5 * Contact: Justin Bronder <jsbronder@brontes3d.com>
6 * Original Authors: Daniel Drake <ddrake@brontes3d.com>
7 * Duane Griffin <duaneg@dghda.com>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <linux/device.h>
25 #include <linux/fs.h>
26 #include <linux/interrupt.h>
27 #include <linux/spinlock.h>
28 #include <linux/ioctl.h>
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <linux/pci.h>
32 #include <linux/types.h>
33 #include <linux/cdev.h>
34 #include <linux/list.h>
35 #include <linux/poll.h>
36 #include <linux/wait.h>
37 #include <linux/mm.h>
38 #include <linux/uaccess.h>
39 #include <linux/sched.h>
41 static unsigned int b3dfg_nbuf = 2;
43 module_param_named(buffer_count, b3dfg_nbuf, uint, 0444);
45 MODULE_PARM_DESC(buffer_count, "Number of buffers (min 2, default 2)");
47 MODULE_AUTHOR("Daniel Drake <ddrake@brontes3d.com>");
48 MODULE_DESCRIPTION("Brontes frame grabber driver");
49 MODULE_LICENSE("GPL");
51 #define DRIVER_NAME "b3dfg"
52 #define B3DFG_MAX_DEVS 4
53 #define B3DFG_FRAMES_PER_BUFFER 3
55 #define B3DFG_BAR_REGS 0
56 #define B3DFG_REGS_LENGTH 0x10000
58 #define B3DFG_IOC_MAGIC 0xb3 /* dfg :-) */
59 #define B3DFG_IOCGFRMSZ _IOR(B3DFG_IOC_MAGIC, 1, int)
60 #define B3DFG_IOCTNUMBUFS _IO(B3DFG_IOC_MAGIC, 2)
61 #define B3DFG_IOCTTRANS _IO(B3DFG_IOC_MAGIC, 3)
62 #define B3DFG_IOCTQUEUEBUF _IO(B3DFG_IOC_MAGIC, 4)
63 #define B3DFG_IOCTPOLLBUF _IOWR(B3DFG_IOC_MAGIC, 5, struct b3dfg_poll)
64 #define B3DFG_IOCTWAITBUF _IOWR(B3DFG_IOC_MAGIC, 6, struct b3dfg_wait)
65 #define B3DFG_IOCGWANDSTAT _IOR(B3DFG_IOC_MAGIC, 7, int)
67 enum {
68 /* number of 4kb pages per frame */
69 B3D_REG_FRM_SIZE = 0x0,
71 /* bit 0: set to enable interrupts
72 * bit 1: set to enable cable status change interrupts */
73 B3D_REG_HW_CTRL = 0x4,
75 /* bit 0-1 - 1-based ID of next pending frame transfer (0 = none)
76 * bit 2 indicates the previous DMA transfer has completed
77 * bit 3 indicates wand cable status change
78 * bit 8:15 - counter of number of discarded triplets */
79 B3D_REG_DMA_STS = 0x8,
81 /* bit 0: wand status (1 = present, 0 = disconnected) */
82 B3D_REG_WAND_STS = 0xc,
84 /* bus address for DMA transfers. lower 2 bits must be zero because DMA
85 * works with 32 bit word size. */
86 B3D_REG_EC220_DMA_ADDR = 0x8000,
88 /* bit 20:0 - number of 32 bit words to be transferred
89 * bit 21:31 - reserved */
90 B3D_REG_EC220_TRF_SIZE = 0x8004,
92 /* bit 0 - error bit
93 * bit 1 - interrupt bit (set to generate interrupt at end of transfer)
94 * bit 2 - start bit (set to start transfer)
95 * bit 3 - direction (0 = DMA_TO_DEVICE, 1 = DMA_FROM_DEVICE
96 * bit 4:31 - reserved */
97 B3D_REG_EC220_DMA_STS = 0x8008,
100 enum b3dfg_buffer_state {
101 B3DFG_BUFFER_POLLED = 0,
102 B3DFG_BUFFER_PENDING,
103 B3DFG_BUFFER_POPULATED,
106 struct b3dfg_buffer {
107 unsigned char *frame[B3DFG_FRAMES_PER_BUFFER];
108 struct list_head list;
109 u8 state;
112 struct b3dfg_dev {
114 /* no protection needed: all finalized at initialization time */
115 struct pci_dev *pdev;
116 struct cdev chardev;
117 struct device *dev;
118 void __iomem *regs;
119 unsigned int frame_size;
122 * Protects buffer state, including buffer_queue, triplet_ready,
123 * cur_dma_frame_idx & cur_dma_frame_addr.
125 spinlock_t buffer_lock;
126 struct b3dfg_buffer *buffers;
127 struct list_head buffer_queue;
129 /* Last frame in triplet transferred (-1 if none). */
130 int cur_dma_frame_idx;
132 /* Current frame's address for DMA. */
133 dma_addr_t cur_dma_frame_addr;
136 * Protects cstate_tstamp.
137 * Nests inside buffer_lock.
139 spinlock_t cstate_lock;
140 unsigned long cstate_tstamp;
143 * Protects triplets_dropped.
144 * Nests inside buffers_lock.
146 spinlock_t triplets_dropped_lock;
147 unsigned int triplets_dropped;
149 wait_queue_head_t buffer_waitqueue;
151 unsigned int transmission_enabled:1;
152 unsigned int triplet_ready:1;
155 static u8 b3dfg_devices[B3DFG_MAX_DEVS];
157 static struct class *b3dfg_class;
158 static dev_t b3dfg_devt;
160 static const struct pci_device_id b3dfg_ids[] __devinitdata = {
161 { PCI_DEVICE(0x0b3d, 0x0001) },
162 { },
165 MODULE_DEVICE_TABLE(pci, b3dfg_ids);
167 /***** user-visible types *****/
169 struct b3dfg_poll {
170 int buffer_idx;
171 unsigned int triplets_dropped;
174 struct b3dfg_wait {
175 int buffer_idx;
176 unsigned int timeout;
177 unsigned int triplets_dropped;
180 /**** register I/O ****/
182 static u32 b3dfg_read32(struct b3dfg_dev *fgdev, u16 reg)
184 return ioread32(fgdev->regs + reg);
187 static void b3dfg_write32(struct b3dfg_dev *fgdev, u16 reg, u32 value)
189 iowrite32(value, fgdev->regs + reg);
192 /**** buffer management ****/
195 * Program EC220 for transfer of a specific frame.
196 * Called with buffer_lock held.
198 static int setup_frame_transfer(struct b3dfg_dev *fgdev,
199 struct b3dfg_buffer *buf, int frame)
201 unsigned char *frm_addr;
202 dma_addr_t frm_addr_dma;
203 unsigned int frm_size = fgdev->frame_size;
205 frm_addr = buf->frame[frame];
206 frm_addr_dma = pci_map_single(fgdev->pdev, frm_addr,
207 frm_size, PCI_DMA_FROMDEVICE);
208 if (pci_dma_mapping_error(fgdev->pdev, frm_addr_dma))
209 return -ENOMEM;
211 fgdev->cur_dma_frame_addr = frm_addr_dma;
212 fgdev->cur_dma_frame_idx = frame;
214 b3dfg_write32(fgdev, B3D_REG_EC220_DMA_ADDR,
215 cpu_to_le32(frm_addr_dma));
216 b3dfg_write32(fgdev, B3D_REG_EC220_TRF_SIZE,
217 cpu_to_le32(frm_size >> 2));
218 b3dfg_write32(fgdev, B3D_REG_EC220_DMA_STS, 0xf);
220 return 0;
223 /* Caller should hold buffer lock */
224 static void dequeue_all_buffers(struct b3dfg_dev *fgdev)
226 int i;
227 for (i = 0; i < b3dfg_nbuf; i++) {
228 struct b3dfg_buffer *buf = &fgdev->buffers[i];
229 buf->state = B3DFG_BUFFER_POLLED;
230 list_del_init(&buf->list);
234 /* queue a buffer to receive data */
235 static int queue_buffer(struct b3dfg_dev *fgdev, int bufidx)
237 struct device *dev = &fgdev->pdev->dev;
238 struct b3dfg_buffer *buf;
239 unsigned long flags;
240 int r = 0;
242 spin_lock_irqsave(&fgdev->buffer_lock, flags);
243 if (bufidx < 0 || bufidx >= b3dfg_nbuf) {
244 dev_dbg(dev, "Invalid buffer index, %d\n", bufidx);
245 r = -ENOENT;
246 goto out;
248 buf = &fgdev->buffers[bufidx];
250 if (unlikely(buf->state == B3DFG_BUFFER_PENDING)) {
251 dev_dbg(dev, "buffer %d is already queued\n", bufidx);
252 r = -EINVAL;
253 goto out;
256 buf->state = B3DFG_BUFFER_PENDING;
257 list_add_tail(&buf->list, &fgdev->buffer_queue);
259 if (fgdev->transmission_enabled && fgdev->triplet_ready) {
260 dev_dbg(dev, "triplet is ready, pushing immediately\n");
261 fgdev->triplet_ready = 0;
262 r = setup_frame_transfer(fgdev, buf, 0);
263 if (r)
264 dev_err(dev, "unable to map DMA buffer\n");
267 out:
268 spin_unlock_irqrestore(&fgdev->buffer_lock, flags);
269 return r;
272 /* non-blocking buffer poll. returns 1 if data is present in the buffer,
273 * 0 otherwise */
274 static int poll_buffer(struct b3dfg_dev *fgdev, void __user *arg)
276 struct device *dev = &fgdev->pdev->dev;
277 struct b3dfg_poll p;
278 struct b3dfg_buffer *buf;
279 unsigned long flags;
280 int r = 1;
281 int arg_out = 0;
283 if (copy_from_user(&p, arg, sizeof(p)))
284 return -EFAULT;
286 if (unlikely(!fgdev->transmission_enabled)) {
287 dev_dbg(dev, "cannot poll, transmission disabled\n");
288 return -EINVAL;
291 if (p.buffer_idx < 0 || p.buffer_idx >= b3dfg_nbuf)
292 return -ENOENT;
294 buf = &fgdev->buffers[p.buffer_idx];
296 spin_lock_irqsave(&fgdev->buffer_lock, flags);
298 if (likely(buf->state == B3DFG_BUFFER_POPULATED)) {
299 arg_out = 1;
300 buf->state = B3DFG_BUFFER_POLLED;
302 /* IRQs already disabled by spin_lock_irqsave above. */
303 spin_lock(&fgdev->triplets_dropped_lock);
304 p.triplets_dropped = fgdev->triplets_dropped;
305 fgdev->triplets_dropped = 0;
306 spin_unlock(&fgdev->triplets_dropped_lock);
307 } else {
308 r = 0;
311 spin_unlock_irqrestore(&fgdev->buffer_lock, flags);
313 if (arg_out && copy_to_user(arg, &p, sizeof(p)))
314 r = -EFAULT;
316 return r;
319 static unsigned long get_cstate_change(struct b3dfg_dev *fgdev)
321 unsigned long flags, when;
323 spin_lock_irqsave(&fgdev->cstate_lock, flags);
324 when = fgdev->cstate_tstamp;
325 spin_unlock_irqrestore(&fgdev->cstate_lock, flags);
326 return when;
329 static int is_event_ready(struct b3dfg_dev *fgdev, struct b3dfg_buffer *buf,
330 unsigned long when)
332 int result;
333 unsigned long flags;
335 spin_lock_irqsave(&fgdev->buffer_lock, flags);
336 spin_lock(&fgdev->cstate_lock);
337 result = (!fgdev->transmission_enabled ||
338 buf->state == B3DFG_BUFFER_POPULATED ||
339 when != fgdev->cstate_tstamp);
340 spin_unlock(&fgdev->cstate_lock);
341 spin_unlock_irqrestore(&fgdev->buffer_lock, flags);
343 return result;
346 /* sleep until a specific buffer becomes populated */
347 static int wait_buffer(struct b3dfg_dev *fgdev, void __user *arg)
349 struct device *dev = &fgdev->pdev->dev;
350 struct b3dfg_wait w;
351 struct b3dfg_buffer *buf;
352 unsigned long flags, when;
353 int r;
355 if (copy_from_user(&w, arg, sizeof(w)))
356 return -EFAULT;
358 if (!fgdev->transmission_enabled) {
359 dev_dbg(dev, "cannot wait, transmission disabled\n");
360 return -EINVAL;
363 if (w.buffer_idx < 0 || w.buffer_idx >= b3dfg_nbuf)
364 return -ENOENT;
366 buf = &fgdev->buffers[w.buffer_idx];
368 spin_lock_irqsave(&fgdev->buffer_lock, flags);
370 if (buf->state == B3DFG_BUFFER_POPULATED) {
371 r = w.timeout;
372 goto out_triplets_dropped;
375 spin_unlock_irqrestore(&fgdev->buffer_lock, flags);
377 when = get_cstate_change(fgdev);
378 if (w.timeout > 0) {
379 r = wait_event_interruptible_timeout(fgdev->buffer_waitqueue,
380 is_event_ready(fgdev, buf, when),
381 (w.timeout * HZ) / 1000);
383 if (unlikely(r < 0))
384 goto out;
386 w.timeout = r * 1000 / HZ;
387 } else {
388 r = wait_event_interruptible(fgdev->buffer_waitqueue,
389 is_event_ready(fgdev, buf, when));
391 if (unlikely(r)) {
392 r = -ERESTARTSYS;
393 goto out;
397 /* TODO: Inform the user via field(s) in w? */
398 if (!fgdev->transmission_enabled || when != get_cstate_change(fgdev)) {
399 r = -EINVAL;
400 goto out;
403 spin_lock_irqsave(&fgdev->buffer_lock, flags);
405 if (buf->state != B3DFG_BUFFER_POPULATED) {
406 r = -ETIMEDOUT;
407 goto out_unlock;
410 buf->state = B3DFG_BUFFER_POLLED;
412 out_triplets_dropped:
414 /* IRQs already disabled by spin_lock_irqsave above. */
415 spin_lock(&fgdev->triplets_dropped_lock);
416 w.triplets_dropped = fgdev->triplets_dropped;
417 fgdev->triplets_dropped = 0;
418 spin_unlock(&fgdev->triplets_dropped_lock);
420 out_unlock:
421 spin_unlock_irqrestore(&fgdev->buffer_lock, flags);
422 if (copy_to_user(arg, &w, sizeof(w)))
423 r = -EFAULT;
424 out:
425 return r;
428 /* mmap page fault handler */
429 static int b3dfg_vma_fault(struct vm_area_struct *vma,
430 struct vm_fault *vmf)
432 struct b3dfg_dev *fgdev = vma->vm_file->private_data;
433 unsigned long off = vmf->pgoff << PAGE_SHIFT;
434 unsigned int frame_size = fgdev->frame_size;
435 unsigned int buf_size = frame_size * B3DFG_FRAMES_PER_BUFFER;
436 unsigned char *addr;
438 /* determine which buffer the offset lies within */
439 unsigned int buf_idx = off / buf_size;
440 /* and the offset into the buffer */
441 unsigned int buf_off = off % buf_size;
443 /* determine which frame inside the buffer the offset lies in */
444 unsigned int frm_idx = buf_off / frame_size;
445 /* and the offset into the frame */
446 unsigned int frm_off = buf_off % frame_size;
448 if (unlikely(buf_idx >= b3dfg_nbuf))
449 return VM_FAULT_SIGBUS;
451 addr = fgdev->buffers[buf_idx].frame[frm_idx] + frm_off;
452 vm_insert_pfn(vma, (unsigned long)vmf->virtual_address,
453 virt_to_phys(addr) >> PAGE_SHIFT);
455 return VM_FAULT_NOPAGE;
458 static struct vm_operations_struct b3dfg_vm_ops = {
459 .fault = b3dfg_vma_fault,
462 static int get_wand_status(struct b3dfg_dev *fgdev, int __user *arg)
464 u32 wndstat = b3dfg_read32(fgdev, B3D_REG_WAND_STS);
465 dev_dbg(&fgdev->pdev->dev, "wand status %x\n", wndstat);
466 return __put_user(wndstat & 0x1, arg);
469 static int enable_transmission(struct b3dfg_dev *fgdev)
471 unsigned long flags;
472 struct device *dev = &fgdev->pdev->dev;
474 dev_dbg(dev, "enable transmission\n");
476 /* check the cable is plugged in. */
477 if (!b3dfg_read32(fgdev, B3D_REG_WAND_STS)) {
478 dev_dbg(dev, "cannot start transmission without wand\n");
479 return -EINVAL;
482 spin_lock_irqsave(&fgdev->buffer_lock, flags);
484 /* Handle racing enable_transmission calls. */
485 if (fgdev->transmission_enabled) {
486 spin_unlock_irqrestore(&fgdev->buffer_lock, flags);
487 goto out;
490 spin_lock(&fgdev->triplets_dropped_lock);
491 fgdev->triplets_dropped = 0;
492 spin_unlock(&fgdev->triplets_dropped_lock);
494 fgdev->triplet_ready = 0;
495 fgdev->cur_dma_frame_idx = -1;
496 fgdev->transmission_enabled = 1;
498 spin_unlock_irqrestore(&fgdev->buffer_lock, flags);
500 /* Enable DMA and cable status interrupts. */
501 b3dfg_write32(fgdev, B3D_REG_HW_CTRL, 0x03);
503 out:
504 return 0;
507 static void disable_transmission(struct b3dfg_dev *fgdev)
509 struct device *dev = &fgdev->pdev->dev;
510 unsigned long flags;
511 u32 tmp;
513 dev_dbg(dev, "disable transmission\n");
515 /* guarantee that no more interrupts will be serviced */
516 spin_lock_irqsave(&fgdev->buffer_lock, flags);
517 fgdev->transmission_enabled = 0;
519 b3dfg_write32(fgdev, B3D_REG_HW_CTRL, 0);
521 /* FIXME: temporary debugging only. if the board stops transmitting,
522 * hitting ctrl+c and seeing this message is useful for determining
523 * the state of the board. */
524 tmp = b3dfg_read32(fgdev, B3D_REG_DMA_STS);
525 dev_dbg(dev, "DMA_STS reads %x after TX stopped\n", tmp);
527 dequeue_all_buffers(fgdev);
528 spin_unlock_irqrestore(&fgdev->buffer_lock, flags);
530 wake_up_interruptible(&fgdev->buffer_waitqueue);
533 static int set_transmission(struct b3dfg_dev *fgdev, int enabled)
535 int res = 0;
537 if (enabled && !fgdev->transmission_enabled)
538 res = enable_transmission(fgdev);
539 else if (!enabled && fgdev->transmission_enabled)
540 disable_transmission(fgdev);
542 return res;
545 /* Called in interrupt context. */
546 static void handle_cstate_unplug(struct b3dfg_dev *fgdev)
548 /* Disable all interrupts. */
549 b3dfg_write32(fgdev, B3D_REG_HW_CTRL, 0);
551 /* Stop transmission. */
552 spin_lock(&fgdev->buffer_lock);
553 fgdev->transmission_enabled = 0;
555 fgdev->cur_dma_frame_idx = -1;
556 fgdev->triplet_ready = 0;
557 if (fgdev->cur_dma_frame_addr) {
558 pci_unmap_single(fgdev->pdev, fgdev->cur_dma_frame_addr,
559 fgdev->frame_size, PCI_DMA_FROMDEVICE);
560 fgdev->cur_dma_frame_addr = 0;
562 dequeue_all_buffers(fgdev);
563 spin_unlock(&fgdev->buffer_lock);
566 /* Called in interrupt context. */
567 static void handle_cstate_change(struct b3dfg_dev *fgdev)
569 u32 cstate = b3dfg_read32(fgdev, B3D_REG_WAND_STS);
570 unsigned long when;
571 struct device *dev = &fgdev->pdev->dev;
573 dev_dbg(dev, "cable state change: %u\n", cstate);
576 * When the wand is unplugged we reset our state. The hardware will
577 * have done the same internally.
579 * Note we should never see a cable *plugged* event, as interrupts
580 * should only be enabled when transmitting, which requires the cable
581 * to be plugged. If we do see one it probably means the cable has been
582 * unplugged and re-plugged very rapidly. Possibly because it has a
583 * broken wire and is momentarily losing contact.
585 * TODO: At the moment if you plug in the cable then enable transmission
586 * the hardware will raise a couple of spurious interrupts, so
587 * just ignore them for now.
589 * Once the hardware is fixed we should complain and treat it as an
590 * unplug. Or at least track how frequently it is happening and do
591 * so if too many come in.
593 if (cstate) {
594 dev_warn(dev, "ignoring unexpected plug event\n");
595 return;
597 handle_cstate_unplug(fgdev);
600 * Record cable state change timestamp & wake anyone waiting
601 * on a cable state change. Be paranoid about ensuring events
602 * are not missed if we somehow get two interrupts in a jiffy.
604 spin_lock(&fgdev->cstate_lock);
605 when = jiffies_64;
606 if (when <= fgdev->cstate_tstamp)
607 when = fgdev->cstate_tstamp + 1;
608 fgdev->cstate_tstamp = when;
609 wake_up_interruptible(&fgdev->buffer_waitqueue);
610 spin_unlock(&fgdev->cstate_lock);
613 /* Called with buffer_lock held. */
614 static void transfer_complete(struct b3dfg_dev *fgdev)
616 struct b3dfg_buffer *buf;
617 struct device *dev = &fgdev->pdev->dev;
619 pci_unmap_single(fgdev->pdev, fgdev->cur_dma_frame_addr,
620 fgdev->frame_size, PCI_DMA_FROMDEVICE);
621 fgdev->cur_dma_frame_addr = 0;
623 buf = list_entry(fgdev->buffer_queue.next, struct b3dfg_buffer, list);
625 dev_dbg(dev, "handle frame completion\n");
626 if (fgdev->cur_dma_frame_idx == B3DFG_FRAMES_PER_BUFFER - 1) {
628 /* last frame of that triplet completed */
629 dev_dbg(dev, "triplet completed\n");
630 buf->state = B3DFG_BUFFER_POPULATED;
631 list_del_init(&buf->list);
632 wake_up_interruptible(&fgdev->buffer_waitqueue);
637 * Called with buffer_lock held.
639 * Note that idx is the (1-based) *next* frame to be transferred, while
640 * cur_dma_frame_idx is the (0-based) *last* frame to have been transferred (or
641 * -1 if none). Thus there should be a difference of 2 between them.
643 static bool setup_next_frame_transfer(struct b3dfg_dev *fgdev, int idx)
645 struct b3dfg_buffer *buf;
646 struct device *dev = &fgdev->pdev->dev;
647 bool need_ack = 1;
649 dev_dbg(dev, "program DMA transfer for next frame: %d\n", idx);
651 buf = list_entry(fgdev->buffer_queue.next, struct b3dfg_buffer, list);
652 if (idx == fgdev->cur_dma_frame_idx + 2) {
653 if (setup_frame_transfer(fgdev, buf, idx - 1))
654 dev_err(dev, "unable to map DMA buffer\n");
655 need_ack = 0;
656 } else {
657 dev_err(dev, "frame mismatch, got %d, expected %d\n",
658 idx, fgdev->cur_dma_frame_idx + 2);
660 /* FIXME: handle dropped triplets here */
663 return need_ack;
666 static irqreturn_t b3dfg_intr(int irq, void *dev_id)
668 struct b3dfg_dev *fgdev = dev_id;
669 struct device *dev = &fgdev->pdev->dev;
670 u32 sts;
671 u8 dropped;
672 bool need_ack = 1;
673 irqreturn_t res = IRQ_HANDLED;
675 sts = b3dfg_read32(fgdev, B3D_REG_DMA_STS);
676 if (unlikely(sts == 0)) {
677 dev_warn(dev, "ignore interrupt, DMA status is 0\n");
678 res = IRQ_NONE;
679 goto out;
682 if (unlikely(!fgdev->transmission_enabled)) {
683 dev_warn(dev, "ignore interrupt, TX disabled\n");
684 res = IRQ_HANDLED;
685 goto out;
688 /* Handle dropped frames, as reported by the hardware. */
689 dropped = (sts >> 8) & 0xff;
690 dev_dbg(dev, "intr: DMA_STS=%08x (drop=%d comp=%d next=%d)\n",
691 sts, dropped, !!(sts & 0x4), sts & 0x3);
692 if (unlikely(dropped > 0)) {
693 spin_lock(&fgdev->triplets_dropped_lock);
694 fgdev->triplets_dropped += dropped;
695 spin_unlock(&fgdev->triplets_dropped_lock);
698 /* Handle a cable state change (i.e. the wand being unplugged). */
699 if (sts & 0x08) {
700 handle_cstate_change(fgdev);
701 goto out;
704 spin_lock(&fgdev->buffer_lock);
705 if (unlikely(list_empty(&fgdev->buffer_queue))) {
707 /* FIXME need more sanity checking here */
708 dev_info(dev, "buffer not ready for next transfer\n");
709 fgdev->triplet_ready = 1;
710 goto out_unlock;
713 /* Has a frame transfer been completed? */
714 if (sts & 0x4) {
715 u32 dma_status = b3dfg_read32(fgdev, B3D_REG_EC220_DMA_STS);
717 /* Check for DMA errors reported by the hardware. */
718 if (unlikely(dma_status & 0x1)) {
719 dev_err(dev, "EC220 error: %08x\n", dma_status);
721 /* FIXME flesh out error handling */
722 goto out_unlock;
725 /* Sanity check, we should have a frame index at this point. */
726 if (unlikely(fgdev->cur_dma_frame_idx == -1)) {
727 dev_err(dev, "completed but no last idx?\n");
729 /* FIXME flesh out error handling */
730 goto out_unlock;
733 transfer_complete(fgdev);
736 /* Is there another frame transfer pending? */
737 if (sts & 0x3)
738 need_ack = setup_next_frame_transfer(fgdev, sts & 0x3);
739 else
740 fgdev->cur_dma_frame_idx = -1;
742 out_unlock:
743 spin_unlock(&fgdev->buffer_lock);
744 out:
745 if (need_ack) {
746 dev_dbg(dev, "acknowledging interrupt\n");
747 b3dfg_write32(fgdev, B3D_REG_EC220_DMA_STS, 0x0b);
749 return res;
752 static int b3dfg_open(struct inode *inode, struct file *filp)
754 struct b3dfg_dev *fgdev =
755 container_of(inode->i_cdev, struct b3dfg_dev, chardev);
757 dev_dbg(&fgdev->pdev->dev, "open\n");
758 filp->private_data = fgdev;
759 return 0;
762 static int b3dfg_release(struct inode *inode, struct file *filp)
764 struct b3dfg_dev *fgdev = filp->private_data;
765 dev_dbg(&fgdev->pdev->dev, "release\n");
766 disable_transmission(fgdev);
767 return 0;
770 static long b3dfg_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
772 struct b3dfg_dev *fgdev = filp->private_data;
774 switch (cmd) {
775 case B3DFG_IOCGFRMSZ:
776 return __put_user(fgdev->frame_size, (int __user *) arg);
777 case B3DFG_IOCGWANDSTAT:
778 return get_wand_status(fgdev, (int __user *) arg);
779 case B3DFG_IOCTTRANS:
780 return set_transmission(fgdev, (int) arg);
781 case B3DFG_IOCTQUEUEBUF:
782 return queue_buffer(fgdev, (int) arg);
783 case B3DFG_IOCTPOLLBUF:
784 return poll_buffer(fgdev, (void __user *) arg);
785 case B3DFG_IOCTWAITBUF:
786 return wait_buffer(fgdev, (void __user *) arg);
787 default:
788 dev_dbg(&fgdev->pdev->dev, "unrecognised ioctl %x\n", cmd);
789 return -EINVAL;
793 static unsigned int b3dfg_poll(struct file *filp, poll_table *poll_table)
795 struct b3dfg_dev *fgdev = filp->private_data;
796 unsigned long flags, when;
797 int i;
798 int r = 0;
800 when = get_cstate_change(fgdev);
801 poll_wait(filp, &fgdev->buffer_waitqueue, poll_table);
803 spin_lock_irqsave(&fgdev->buffer_lock, flags);
804 for (i = 0; i < b3dfg_nbuf; i++) {
805 if (fgdev->buffers[i].state == B3DFG_BUFFER_POPULATED) {
806 r = POLLIN | POLLRDNORM;
807 break;
810 spin_unlock_irqrestore(&fgdev->buffer_lock, flags);
812 /* TODO: Confirm this is how we want to communicate the change. */
813 if (!fgdev->transmission_enabled || when != get_cstate_change(fgdev))
814 r = POLLERR;
816 return r;
819 static int b3dfg_mmap(struct file *filp, struct vm_area_struct *vma)
821 struct b3dfg_dev *fgdev = filp->private_data;
822 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
823 unsigned long vsize = vma->vm_end - vma->vm_start;
824 unsigned long bufdatalen = b3dfg_nbuf * fgdev->frame_size * 3;
825 unsigned long psize = bufdatalen - offset;
826 int r = 0;
828 if (vsize <= psize) {
829 vma->vm_flags |= VM_IO | VM_RESERVED | VM_CAN_NONLINEAR |
830 VM_PFNMAP;
831 vma->vm_ops = &b3dfg_vm_ops;
832 } else {
833 r = -EINVAL;
836 return r;
839 static struct file_operations b3dfg_fops = {
840 .owner = THIS_MODULE,
841 .open = b3dfg_open,
842 .release = b3dfg_release,
843 .unlocked_ioctl = b3dfg_ioctl,
844 .poll = b3dfg_poll,
845 .mmap = b3dfg_mmap,
848 static void free_all_frame_buffers(struct b3dfg_dev *fgdev)
850 int i, j;
851 for (i = 0; i < b3dfg_nbuf; i++)
852 for (j = 0; j < B3DFG_FRAMES_PER_BUFFER; j++)
853 kfree(fgdev->buffers[i].frame[j]);
854 kfree(fgdev->buffers);
857 /* initialize device and any data structures. called before any interrupts
858 * are enabled. */
859 static int b3dfg_init_dev(struct b3dfg_dev *fgdev)
861 int i, j;
862 u32 frm_size = b3dfg_read32(fgdev, B3D_REG_FRM_SIZE);
864 /* Disable interrupts. In abnormal circumstances (e.g. after a crash)
865 * the board may still be transmitting from the previous session. If we
866 * ensure that interrupts are disabled before we later enable them, we
867 * are sure to capture a triplet from the start, rather than starting
868 * from frame 2 or 3. Disabling interrupts causes the FG to throw away
869 * all buffered data and stop buffering more until interrupts are
870 * enabled again.
872 b3dfg_write32(fgdev, B3D_REG_HW_CTRL, 0);
874 fgdev->frame_size = frm_size * 4096;
875 fgdev->buffers = kzalloc(sizeof(struct b3dfg_buffer) * b3dfg_nbuf,
876 GFP_KERNEL);
877 if (!fgdev->buffers)
878 goto err_no_buf;
879 for (i = 0; i < b3dfg_nbuf; i++) {
880 struct b3dfg_buffer *buf = &fgdev->buffers[i];
881 for (j = 0; j < B3DFG_FRAMES_PER_BUFFER; j++) {
882 buf->frame[j] = kmalloc(fgdev->frame_size, GFP_KERNEL);
883 if (!buf->frame[j])
884 goto err_no_mem;
886 INIT_LIST_HEAD(&buf->list);
889 INIT_LIST_HEAD(&fgdev->buffer_queue);
890 init_waitqueue_head(&fgdev->buffer_waitqueue);
891 spin_lock_init(&fgdev->buffer_lock);
892 spin_lock_init(&fgdev->cstate_lock);
893 spin_lock_init(&fgdev->triplets_dropped_lock);
894 return 0;
896 err_no_mem:
897 free_all_frame_buffers(fgdev);
898 err_no_buf:
899 return -ENOMEM;
902 /* find next free minor number, returns -1 if none are availabile */
903 static int get_free_minor(void)
905 int i;
906 for (i = 0; i < B3DFG_MAX_DEVS; i++) {
907 if (b3dfg_devices[i] == 0)
908 return i;
910 return -1;
913 static int __devinit b3dfg_probe(struct pci_dev *pdev,
914 const struct pci_device_id *id)
916 struct b3dfg_dev *fgdev = kzalloc(sizeof(*fgdev), GFP_KERNEL);
917 int r = 0;
918 int minor = get_free_minor();
919 dev_t devno = MKDEV(MAJOR(b3dfg_devt), minor);
920 unsigned long res_len;
921 resource_size_t res_base;
923 if (fgdev == NULL)
924 return -ENOMEM;
926 if (minor < 0) {
927 dev_err(&pdev->dev, "too many devices found!\n");
928 r = -EIO;
929 goto err_free;
932 b3dfg_devices[minor] = 1;
933 dev_info(&pdev->dev, "probe device with IRQ %d\n", pdev->irq);
935 cdev_init(&fgdev->chardev, &b3dfg_fops);
936 fgdev->chardev.owner = THIS_MODULE;
938 r = cdev_add(&fgdev->chardev, devno, 1);
939 if (r) {
940 dev_err(&pdev->dev, "cannot add char device\n");
941 goto err_release_minor;
944 fgdev->dev = device_create(
945 b3dfg_class,
946 &pdev->dev,
947 devno,
948 dev_get_drvdata(&pdev->dev),
949 DRIVER_NAME "%d", minor);
951 if (IS_ERR(fgdev->dev)) {
952 dev_err(&pdev->dev, "cannot create device\n");
953 r = PTR_ERR(fgdev->dev);
954 goto err_del_cdev;
957 r = pci_enable_device(pdev);
958 if (r) {
959 dev_err(&pdev->dev, "cannot enable PCI device\n");
960 goto err_dev_unreg;
963 res_len = pci_resource_len(pdev, B3DFG_BAR_REGS);
964 if (res_len != B3DFG_REGS_LENGTH) {
965 dev_err(&pdev->dev, "invalid register resource size\n");
966 r = -EIO;
967 goto err_disable;
970 if (pci_resource_flags(pdev, B3DFG_BAR_REGS)
971 != (IORESOURCE_MEM | IORESOURCE_SIZEALIGN)) {
972 dev_err(&pdev->dev, "invalid resource flags\n");
973 r = -EIO;
974 goto err_disable;
976 r = pci_request_regions(pdev, DRIVER_NAME);
977 if (r) {
978 dev_err(&pdev->dev, "cannot obtain PCI resources\n");
979 goto err_disable;
982 pci_set_master(pdev);
984 r = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
985 if (r) {
986 dev_err(&pdev->dev, "no usable DMA configuration\n");
987 goto err_free_res;
990 res_base = pci_resource_start(pdev, B3DFG_BAR_REGS);
991 fgdev->regs = ioremap_nocache(res_base, res_len);
992 if (!fgdev->regs) {
993 dev_err(&pdev->dev, "regs ioremap failed\n");
994 r = -EIO;
995 goto err_free_res;
998 fgdev->pdev = pdev;
999 pci_set_drvdata(pdev, fgdev);
1000 r = b3dfg_init_dev(fgdev);
1001 if (r < 0) {
1002 dev_err(&pdev->dev, "failed to initalize device\n");
1003 goto err_unmap;
1006 r = request_irq(pdev->irq, b3dfg_intr, IRQF_SHARED, DRIVER_NAME, fgdev);
1007 if (r) {
1008 dev_err(&pdev->dev, "couldn't request irq %d\n", pdev->irq);
1009 goto err_free_bufs;
1012 return 0;
1014 err_free_bufs:
1015 free_all_frame_buffers(fgdev);
1016 err_unmap:
1017 iounmap(fgdev->regs);
1018 err_free_res:
1019 pci_release_regions(pdev);
1020 err_disable:
1021 pci_disable_device(pdev);
1022 err_dev_unreg:
1023 device_destroy(b3dfg_class, devno);
1024 err_del_cdev:
1025 cdev_del(&fgdev->chardev);
1026 err_release_minor:
1027 b3dfg_devices[minor] = 0;
1028 err_free:
1029 kfree(fgdev);
1030 return r;
1033 static void __devexit b3dfg_remove(struct pci_dev *pdev)
1035 struct b3dfg_dev *fgdev = pci_get_drvdata(pdev);
1036 unsigned int minor = MINOR(fgdev->chardev.dev);
1038 dev_dbg(&pdev->dev, "remove\n");
1040 free_irq(pdev->irq, fgdev);
1041 iounmap(fgdev->regs);
1042 pci_release_regions(pdev);
1043 pci_disable_device(pdev);
1044 device_destroy(b3dfg_class, MKDEV(MAJOR(b3dfg_devt), minor));
1045 cdev_del(&fgdev->chardev);
1046 free_all_frame_buffers(fgdev);
1047 kfree(fgdev);
1048 b3dfg_devices[minor] = 0;
1051 static struct pci_driver b3dfg_driver = {
1052 .name = DRIVER_NAME,
1053 .id_table = b3dfg_ids,
1054 .probe = b3dfg_probe,
1055 .remove = __devexit_p(b3dfg_remove),
1058 static int __init b3dfg_module_init(void)
1060 int r;
1062 if (b3dfg_nbuf < 2) {
1063 printk(KERN_ERR DRIVER_NAME
1064 ": buffer_count is out of range (must be >= 2)");
1065 return -EINVAL;
1068 printk(KERN_INFO DRIVER_NAME ": loaded\n");
1070 b3dfg_class = class_create(THIS_MODULE, DRIVER_NAME);
1071 if (IS_ERR(b3dfg_class))
1072 return PTR_ERR(b3dfg_class);
1074 r = alloc_chrdev_region(&b3dfg_devt, 0, B3DFG_MAX_DEVS, DRIVER_NAME);
1075 if (r)
1076 goto err1;
1078 r = pci_register_driver(&b3dfg_driver);
1079 if (r)
1080 goto err2;
1082 return r;
1084 err2:
1085 unregister_chrdev_region(b3dfg_devt, B3DFG_MAX_DEVS);
1086 err1:
1087 class_destroy(b3dfg_class);
1088 return r;
1091 static void __exit b3dfg_module_exit(void)
1093 printk(KERN_INFO DRIVER_NAME ": unloaded\n");
1094 pci_unregister_driver(&b3dfg_driver);
1095 unregister_chrdev_region(b3dfg_devt, B3DFG_MAX_DEVS);
1096 class_destroy(b3dfg_class);
1099 module_init(b3dfg_module_init);
1100 module_exit(b3dfg_module_exit);