2 * nosy - Snoop mode driver for TI PCILynx 1394 controllers
3 * Copyright (C) 2002-2007 Kristian Høgsberg
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software Foundation,
17 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 #include <linux/device.h>
21 #include <linux/errno.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
26 #include <linux/kernel.h>
27 #include <linux/kref.h>
28 #include <linux/miscdevice.h>
29 #include <linux/module.h>
30 #include <linux/mutex.h>
31 #include <linux/pci.h>
32 #include <linux/poll.h>
33 #include <linux/sched.h> /* required for linux/wait.h */
34 #include <linux/slab.h>
35 #include <linux/spinlock.h>
36 #include <linux/time64.h>
37 #include <linux/timex.h>
38 #include <linux/uaccess.h>
39 #include <linux/wait.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/atomic.h>
42 #include <asm/byteorder.h>
45 #include "nosy-user.h"
47 #define TCODE_PHY_PACKET 0x10
48 #define PCI_DEVICE_ID_TI_PCILYNX 0x8000
50 static char driver_name
[] = KBUILD_MODNAME
;
52 /* this is the physical layout of a PCL, its size is 128 bytes */
55 __le32 async_error_next
;
58 __le32 remaining_transfer_count
;
59 __le32 next_data_buffer
;
71 struct packet_buffer
{
74 long total_packet_count
, lost_packet_count
;
76 struct packet
*head
, *tail
;
77 wait_queue_head_t wait
;
81 struct pci_dev
*pci_device
;
82 __iomem
char *registers
;
84 struct pcl
*rcv_start_pcl
, *rcv_pcl
;
87 dma_addr_t rcv_start_pcl_bus
, rcv_pcl_bus
, rcv_buffer_bus
;
89 spinlock_t client_list_lock
;
90 struct list_head client_list
;
92 struct miscdevice misc
;
93 struct list_head link
;
97 static inline struct pcilynx
*
98 lynx_get(struct pcilynx
*lynx
)
100 kref_get(&lynx
->kref
);
106 lynx_release(struct kref
*kref
)
108 kfree(container_of(kref
, struct pcilynx
, kref
));
112 lynx_put(struct pcilynx
*lynx
)
114 kref_put(&lynx
->kref
, lynx_release
);
118 struct pcilynx
*lynx
;
120 struct packet_buffer buffer
;
121 struct list_head link
;
124 static DEFINE_MUTEX(card_mutex
);
125 static LIST_HEAD(card_list
);
128 packet_buffer_init(struct packet_buffer
*buffer
, size_t capacity
)
130 buffer
->data
= kmalloc(capacity
, GFP_KERNEL
);
131 if (buffer
->data
== NULL
)
133 buffer
->head
= (struct packet
*) buffer
->data
;
134 buffer
->tail
= (struct packet
*) buffer
->data
;
135 buffer
->capacity
= capacity
;
136 buffer
->lost_packet_count
= 0;
137 atomic_set(&buffer
->size
, 0);
138 init_waitqueue_head(&buffer
->wait
);
144 packet_buffer_destroy(struct packet_buffer
*buffer
)
150 packet_buffer_get(struct client
*client
, char __user
*data
, size_t user_length
)
152 struct packet_buffer
*buffer
= &client
->buffer
;
156 if (wait_event_interruptible(buffer
->wait
,
157 atomic_read(&buffer
->size
) > 0) ||
158 list_empty(&client
->lynx
->link
))
161 if (atomic_read(&buffer
->size
) == 0)
164 /* FIXME: Check length <= user_length. */
166 end
= buffer
->data
+ buffer
->capacity
;
167 length
= buffer
->head
->length
;
169 if (&buffer
->head
->data
[length
] < end
) {
170 if (copy_to_user(data
, buffer
->head
->data
, length
))
172 buffer
->head
= (struct packet
*) &buffer
->head
->data
[length
];
174 size_t split
= end
- buffer
->head
->data
;
176 if (copy_to_user(data
, buffer
->head
->data
, split
))
178 if (copy_to_user(data
+ split
, buffer
->data
, length
- split
))
180 buffer
->head
= (struct packet
*) &buffer
->data
[length
- split
];
184 * Decrease buffer->size as the last thing, since this is what
185 * keeps the interrupt from overwriting the packet we are
186 * retrieving from the buffer.
188 atomic_sub(sizeof(struct packet
) + length
, &buffer
->size
);
194 packet_buffer_put(struct packet_buffer
*buffer
, void *data
, size_t length
)
198 buffer
->total_packet_count
++;
200 if (buffer
->capacity
<
201 atomic_read(&buffer
->size
) + sizeof(struct packet
) + length
) {
202 buffer
->lost_packet_count
++;
206 end
= buffer
->data
+ buffer
->capacity
;
207 buffer
->tail
->length
= length
;
209 if (&buffer
->tail
->data
[length
] < end
) {
210 memcpy(buffer
->tail
->data
, data
, length
);
211 buffer
->tail
= (struct packet
*) &buffer
->tail
->data
[length
];
213 size_t split
= end
- buffer
->tail
->data
;
215 memcpy(buffer
->tail
->data
, data
, split
);
216 memcpy(buffer
->data
, data
+ split
, length
- split
);
217 buffer
->tail
= (struct packet
*) &buffer
->data
[length
- split
];
220 /* Finally, adjust buffer size and wake up userspace reader. */
222 atomic_add(sizeof(struct packet
) + length
, &buffer
->size
);
223 wake_up_interruptible(&buffer
->wait
);
227 reg_write(struct pcilynx
*lynx
, int offset
, u32 data
)
229 writel(data
, lynx
->registers
+ offset
);
233 reg_read(struct pcilynx
*lynx
, int offset
)
235 return readl(lynx
->registers
+ offset
);
239 reg_set_bits(struct pcilynx
*lynx
, int offset
, u32 mask
)
241 reg_write(lynx
, offset
, (reg_read(lynx
, offset
) | mask
));
245 * Maybe the pcl programs could be set up to just append data instead
246 * of using a whole packet.
249 run_pcl(struct pcilynx
*lynx
, dma_addr_t pcl_bus
,
252 reg_write(lynx
, DMA0_CURRENT_PCL
+ dmachan
* 0x20, pcl_bus
);
253 reg_write(lynx
, DMA0_CHAN_CTRL
+ dmachan
* 0x20,
254 DMA_CHAN_CTRL_ENABLE
| DMA_CHAN_CTRL_LINK
);
258 set_phy_reg(struct pcilynx
*lynx
, int addr
, int val
)
261 dev_err(&lynx
->pci_device
->dev
,
262 "PHY register address %d out of range\n", addr
);
266 dev_err(&lynx
->pci_device
->dev
,
267 "PHY register value %d out of range\n", val
);
270 reg_write(lynx
, LINK_PHY
, LINK_PHY_WRITE
|
271 LINK_PHY_ADDR(addr
) | LINK_PHY_WDATA(val
));
277 nosy_open(struct inode
*inode
, struct file
*file
)
279 int minor
= iminor(inode
);
280 struct client
*client
;
281 struct pcilynx
*tmp
, *lynx
= NULL
;
283 mutex_lock(&card_mutex
);
284 list_for_each_entry(tmp
, &card_list
, link
)
285 if (tmp
->misc
.minor
== minor
) {
286 lynx
= lynx_get(tmp
);
289 mutex_unlock(&card_mutex
);
293 client
= kmalloc(sizeof *client
, GFP_KERNEL
);
297 client
->tcode_mask
= ~0;
299 INIT_LIST_HEAD(&client
->link
);
301 if (packet_buffer_init(&client
->buffer
, 128 * 1024) < 0)
304 file
->private_data
= client
;
306 return nonseekable_open(inode
, file
);
315 nosy_release(struct inode
*inode
, struct file
*file
)
317 struct client
*client
= file
->private_data
;
318 struct pcilynx
*lynx
= client
->lynx
;
320 spin_lock_irq(&lynx
->client_list_lock
);
321 list_del_init(&client
->link
);
322 spin_unlock_irq(&lynx
->client_list_lock
);
324 packet_buffer_destroy(&client
->buffer
);
332 nosy_poll(struct file
*file
, poll_table
*pt
)
334 struct client
*client
= file
->private_data
;
335 unsigned int ret
= 0;
337 poll_wait(file
, &client
->buffer
.wait
, pt
);
339 if (atomic_read(&client
->buffer
.size
) > 0)
340 ret
= POLLIN
| POLLRDNORM
;
342 if (list_empty(&client
->lynx
->link
))
349 nosy_read(struct file
*file
, char __user
*buffer
, size_t count
, loff_t
*offset
)
351 struct client
*client
= file
->private_data
;
353 return packet_buffer_get(client
, buffer
, count
);
357 nosy_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
359 struct client
*client
= file
->private_data
;
360 spinlock_t
*client_list_lock
= &client
->lynx
->client_list_lock
;
361 struct nosy_stats stats
;
364 case NOSY_IOC_GET_STATS
:
365 spin_lock_irq(client_list_lock
);
366 stats
.total_packet_count
= client
->buffer
.total_packet_count
;
367 stats
.lost_packet_count
= client
->buffer
.lost_packet_count
;
368 spin_unlock_irq(client_list_lock
);
370 if (copy_to_user((void __user
*) arg
, &stats
, sizeof stats
))
376 spin_lock_irq(client_list_lock
);
377 list_add_tail(&client
->link
, &client
->lynx
->client_list
);
378 spin_unlock_irq(client_list_lock
);
383 spin_lock_irq(client_list_lock
);
384 list_del_init(&client
->link
);
385 spin_unlock_irq(client_list_lock
);
389 case NOSY_IOC_FILTER
:
390 spin_lock_irq(client_list_lock
);
391 client
->tcode_mask
= arg
;
392 spin_unlock_irq(client_list_lock
);
398 /* Flush buffer, configure filter. */
402 static const struct file_operations nosy_ops
= {
403 .owner
= THIS_MODULE
,
405 .unlocked_ioctl
= nosy_ioctl
,
408 .release
= nosy_release
,
411 #define PHY_PACKET_SIZE 12 /* 1 payload, 1 inverse, 1 ack = 3 quadlets */
414 packet_irq_handler(struct pcilynx
*lynx
)
416 struct client
*client
;
417 u32 tcode_mask
, tcode
, timestamp
;
419 struct timespec64 ts64
;
421 /* FIXME: Also report rcv_speed. */
423 length
= __le32_to_cpu(lynx
->rcv_pcl
->pcl_status
) & 0x00001fff;
424 tcode
= __le32_to_cpu(lynx
->rcv_buffer
[1]) >> 4 & 0xf;
426 ktime_get_real_ts64(&ts64
);
427 timestamp
= ts64
.tv_nsec
/ NSEC_PER_USEC
;
428 lynx
->rcv_buffer
[0] = (__force __le32
)timestamp
;
430 if (length
== PHY_PACKET_SIZE
)
431 tcode_mask
= 1 << TCODE_PHY_PACKET
;
433 tcode_mask
= 1 << tcode
;
435 spin_lock(&lynx
->client_list_lock
);
437 list_for_each_entry(client
, &lynx
->client_list
, link
)
438 if (client
->tcode_mask
& tcode_mask
)
439 packet_buffer_put(&client
->buffer
,
440 lynx
->rcv_buffer
, length
+ 4);
442 spin_unlock(&lynx
->client_list_lock
);
446 bus_reset_irq_handler(struct pcilynx
*lynx
)
448 struct client
*client
;
449 struct timespec64 ts64
;
452 ktime_get_real_ts64(&ts64
);
453 timestamp
= ts64
.tv_nsec
/ NSEC_PER_USEC
;
455 spin_lock(&lynx
->client_list_lock
);
457 list_for_each_entry(client
, &lynx
->client_list
, link
)
458 packet_buffer_put(&client
->buffer
, ×tamp
, 4);
460 spin_unlock(&lynx
->client_list_lock
);
464 irq_handler(int irq
, void *device
)
466 struct pcilynx
*lynx
= device
;
469 pci_int_status
= reg_read(lynx
, PCI_INT_STATUS
);
471 if (pci_int_status
== ~0)
472 /* Card was ejected. */
475 if ((pci_int_status
& PCI_INT_INT_PEND
) == 0)
476 /* Not our interrupt, bail out quickly. */
479 if ((pci_int_status
& PCI_INT_P1394_INT
) != 0) {
482 link_int_status
= reg_read(lynx
, LINK_INT_STATUS
);
483 reg_write(lynx
, LINK_INT_STATUS
, link_int_status
);
485 if ((link_int_status
& LINK_INT_PHY_BUSRESET
) > 0)
486 bus_reset_irq_handler(lynx
);
489 /* Clear the PCI_INT_STATUS register only after clearing the
490 * LINK_INT_STATUS register; otherwise the PCI_INT_P1394 will
491 * be set again immediately. */
493 reg_write(lynx
, PCI_INT_STATUS
, pci_int_status
);
495 if ((pci_int_status
& PCI_INT_DMA0_HLT
) > 0) {
496 packet_irq_handler(lynx
);
497 run_pcl(lynx
, lynx
->rcv_start_pcl_bus
, 0);
504 remove_card(struct pci_dev
*dev
)
506 struct pcilynx
*lynx
= pci_get_drvdata(dev
);
507 struct client
*client
;
509 mutex_lock(&card_mutex
);
510 list_del_init(&lynx
->link
);
511 misc_deregister(&lynx
->misc
);
512 mutex_unlock(&card_mutex
);
514 reg_write(lynx
, PCI_INT_ENABLE
, 0);
515 free_irq(lynx
->pci_device
->irq
, lynx
);
517 spin_lock_irq(&lynx
->client_list_lock
);
518 list_for_each_entry(client
, &lynx
->client_list
, link
)
519 wake_up_interruptible(&client
->buffer
.wait
);
520 spin_unlock_irq(&lynx
->client_list_lock
);
522 pci_free_consistent(lynx
->pci_device
, sizeof(struct pcl
),
523 lynx
->rcv_start_pcl
, lynx
->rcv_start_pcl_bus
);
524 pci_free_consistent(lynx
->pci_device
, sizeof(struct pcl
),
525 lynx
->rcv_pcl
, lynx
->rcv_pcl_bus
);
526 pci_free_consistent(lynx
->pci_device
, PAGE_SIZE
,
527 lynx
->rcv_buffer
, lynx
->rcv_buffer_bus
);
529 iounmap(lynx
->registers
);
530 pci_disable_device(dev
);
534 #define RCV_BUFFER_SIZE (16 * 1024)
537 add_card(struct pci_dev
*dev
, const struct pci_device_id
*unused
)
539 struct pcilynx
*lynx
;
543 if (pci_set_dma_mask(dev
, DMA_BIT_MASK(32))) {
545 "DMA address limits not supported for PCILynx hardware\n");
548 if (pci_enable_device(dev
)) {
549 dev_err(&dev
->dev
, "Failed to enable PCILynx hardware\n");
554 lynx
= kzalloc(sizeof *lynx
, GFP_KERNEL
);
556 dev_err(&dev
->dev
, "Failed to allocate control structure\n");
560 lynx
->pci_device
= dev
;
561 pci_set_drvdata(dev
, lynx
);
563 spin_lock_init(&lynx
->client_list_lock
);
564 INIT_LIST_HEAD(&lynx
->client_list
);
565 kref_init(&lynx
->kref
);
567 lynx
->registers
= ioremap_nocache(pci_resource_start(dev
, 0),
568 PCILYNX_MAX_REGISTER
);
569 if (lynx
->registers
== NULL
) {
570 dev_err(&dev
->dev
, "Failed to map registers\n");
572 goto fail_deallocate_lynx
;
575 lynx
->rcv_start_pcl
= pci_alloc_consistent(lynx
->pci_device
,
576 sizeof(struct pcl
), &lynx
->rcv_start_pcl_bus
);
577 lynx
->rcv_pcl
= pci_alloc_consistent(lynx
->pci_device
,
578 sizeof(struct pcl
), &lynx
->rcv_pcl_bus
);
579 lynx
->rcv_buffer
= pci_alloc_consistent(lynx
->pci_device
,
580 RCV_BUFFER_SIZE
, &lynx
->rcv_buffer_bus
);
581 if (lynx
->rcv_start_pcl
== NULL
||
582 lynx
->rcv_pcl
== NULL
||
583 lynx
->rcv_buffer
== NULL
) {
584 dev_err(&dev
->dev
, "Failed to allocate receive buffer\n");
586 goto fail_deallocate_buffers
;
588 lynx
->rcv_start_pcl
->next
= cpu_to_le32(lynx
->rcv_pcl_bus
);
589 lynx
->rcv_pcl
->next
= cpu_to_le32(PCL_NEXT_INVALID
);
590 lynx
->rcv_pcl
->async_error_next
= cpu_to_le32(PCL_NEXT_INVALID
);
592 lynx
->rcv_pcl
->buffer
[0].control
=
593 cpu_to_le32(PCL_CMD_RCV
| PCL_BIGENDIAN
| 2044);
594 lynx
->rcv_pcl
->buffer
[0].pointer
=
595 cpu_to_le32(lynx
->rcv_buffer_bus
+ 4);
596 p
= lynx
->rcv_buffer_bus
+ 2048;
597 end
= lynx
->rcv_buffer_bus
+ RCV_BUFFER_SIZE
;
598 for (i
= 1; p
< end
; i
++, p
+= 2048) {
599 lynx
->rcv_pcl
->buffer
[i
].control
=
600 cpu_to_le32(PCL_CMD_RCV
| PCL_BIGENDIAN
| 2048);
601 lynx
->rcv_pcl
->buffer
[i
].pointer
= cpu_to_le32(p
);
603 lynx
->rcv_pcl
->buffer
[i
- 1].control
|= cpu_to_le32(PCL_LAST_BUFF
);
605 reg_set_bits(lynx
, MISC_CONTROL
, MISC_CONTROL_SWRESET
);
606 /* Fix buggy cards with autoboot pin not tied low: */
607 reg_write(lynx
, DMA0_CHAN_CTRL
, 0);
608 reg_write(lynx
, DMA_GLOBAL_REGISTER
, 0x00 << 24);
611 /* now, looking for PHY register set */
612 if ((get_phy_reg(lynx
, 2) & 0xe0) == 0xe0) {
613 lynx
->phyic
.reg_1394a
= 1;
614 PRINT(KERN_INFO
, lynx
->id
,
615 "found 1394a conform PHY (using extended register set)");
616 lynx
->phyic
.vendor
= get_phy_vendorid(lynx
);
617 lynx
->phyic
.product
= get_phy_productid(lynx
);
619 lynx
->phyic
.reg_1394a
= 0;
620 PRINT(KERN_INFO
, lynx
->id
, "found old 1394 PHY");
624 /* Setup the general receive FIFO max size. */
625 reg_write(lynx
, FIFO_SIZES
, 255);
627 reg_set_bits(lynx
, PCI_INT_ENABLE
, PCI_INT_DMA_ALL
);
629 reg_write(lynx
, LINK_INT_ENABLE
,
630 LINK_INT_PHY_TIME_OUT
| LINK_INT_PHY_REG_RCVD
|
631 LINK_INT_PHY_BUSRESET
| LINK_INT_IT_STUCK
|
632 LINK_INT_AT_STUCK
| LINK_INT_SNTRJ
|
633 LINK_INT_TC_ERR
| LINK_INT_GRF_OVER_FLOW
|
634 LINK_INT_ITF_UNDER_FLOW
| LINK_INT_ATF_UNDER_FLOW
);
636 /* Disable the L flag in self ID packets. */
637 set_phy_reg(lynx
, 4, 0);
639 /* Put this baby into snoop mode */
640 reg_set_bits(lynx
, LINK_CONTROL
, LINK_CONTROL_SNOOP_ENABLE
);
642 run_pcl(lynx
, lynx
->rcv_start_pcl_bus
, 0);
644 if (request_irq(dev
->irq
, irq_handler
, IRQF_SHARED
,
645 driver_name
, lynx
)) {
647 "Failed to allocate shared interrupt %d\n", dev
->irq
);
649 goto fail_deallocate_buffers
;
652 lynx
->misc
.parent
= &dev
->dev
;
653 lynx
->misc
.minor
= MISC_DYNAMIC_MINOR
;
654 lynx
->misc
.name
= "nosy";
655 lynx
->misc
.fops
= &nosy_ops
;
657 mutex_lock(&card_mutex
);
658 ret
= misc_register(&lynx
->misc
);
660 dev_err(&dev
->dev
, "Failed to register misc char device\n");
661 mutex_unlock(&card_mutex
);
664 list_add_tail(&lynx
->link
, &card_list
);
665 mutex_unlock(&card_mutex
);
668 "Initialized PCILynx IEEE1394 card, irq=%d\n", dev
->irq
);
673 reg_write(lynx
, PCI_INT_ENABLE
, 0);
674 free_irq(lynx
->pci_device
->irq
, lynx
);
676 fail_deallocate_buffers
:
677 if (lynx
->rcv_start_pcl
)
678 pci_free_consistent(lynx
->pci_device
, sizeof(struct pcl
),
679 lynx
->rcv_start_pcl
, lynx
->rcv_start_pcl_bus
);
681 pci_free_consistent(lynx
->pci_device
, sizeof(struct pcl
),
682 lynx
->rcv_pcl
, lynx
->rcv_pcl_bus
);
683 if (lynx
->rcv_buffer
)
684 pci_free_consistent(lynx
->pci_device
, PAGE_SIZE
,
685 lynx
->rcv_buffer
, lynx
->rcv_buffer_bus
);
686 iounmap(lynx
->registers
);
688 fail_deallocate_lynx
:
692 pci_disable_device(dev
);
697 static struct pci_device_id pci_table
[] = {
699 .vendor
= PCI_VENDOR_ID_TI
,
700 .device
= PCI_DEVICE_ID_TI_PCILYNX
,
701 .subvendor
= PCI_ANY_ID
,
702 .subdevice
= PCI_ANY_ID
,
704 { } /* Terminating entry */
707 MODULE_DEVICE_TABLE(pci
, pci_table
);
709 static struct pci_driver lynx_pci_driver
= {
711 .id_table
= pci_table
,
713 .remove
= remove_card
,
716 module_pci_driver(lynx_pci_driver
);
718 MODULE_AUTHOR("Kristian Hoegsberg");
719 MODULE_DESCRIPTION("Snoop mode driver for TI pcilynx 1394 controllers");
720 MODULE_LICENSE("GPL");