1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * nosy - Snoop mode driver for TI PCILynx 1394 controllers
4 * Copyright (C) 2002-2007 Kristian Høgsberg
7 #include <linux/device.h>
8 #include <linux/errno.h>
10 #include <linux/init.h>
11 #include <linux/interrupt.h>
13 #include <linux/kernel.h>
14 #include <linux/kref.h>
15 #include <linux/miscdevice.h>
16 #include <linux/module.h>
17 #include <linux/mutex.h>
18 #include <linux/pci.h>
19 #include <linux/poll.h>
20 #include <linux/sched.h> /* required for linux/wait.h */
21 #include <linux/slab.h>
22 #include <linux/spinlock.h>
23 #include <linux/time64.h>
24 #include <linux/timex.h>
25 #include <linux/uaccess.h>
26 #include <linux/wait.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/atomic.h>
29 #include <asm/byteorder.h>
32 #include "nosy-user.h"
34 #define TCODE_PHY_PACKET 0x10
35 #define PCI_DEVICE_ID_TI_PCILYNX 0x8000
37 static char driver_name
[] = KBUILD_MODNAME
;
39 /* this is the physical layout of a PCL, its size is 128 bytes */
42 __le32 async_error_next
;
45 __le32 remaining_transfer_count
;
46 __le32 next_data_buffer
;
58 struct packet_buffer
{
61 long total_packet_count
, lost_packet_count
;
63 struct packet
*head
, *tail
;
64 wait_queue_head_t wait
;
68 struct pci_dev
*pci_device
;
69 __iomem
char *registers
;
71 struct pcl
*rcv_start_pcl
, *rcv_pcl
;
74 dma_addr_t rcv_start_pcl_bus
, rcv_pcl_bus
, rcv_buffer_bus
;
76 spinlock_t client_list_lock
;
77 struct list_head client_list
;
79 struct miscdevice misc
;
80 struct list_head link
;
84 static inline struct pcilynx
*
85 lynx_get(struct pcilynx
*lynx
)
87 kref_get(&lynx
->kref
);
93 lynx_release(struct kref
*kref
)
95 kfree(container_of(kref
, struct pcilynx
, kref
));
99 lynx_put(struct pcilynx
*lynx
)
101 kref_put(&lynx
->kref
, lynx_release
);
105 struct pcilynx
*lynx
;
107 struct packet_buffer buffer
;
108 struct list_head link
;
111 static DEFINE_MUTEX(card_mutex
);
112 static LIST_HEAD(card_list
);
115 packet_buffer_init(struct packet_buffer
*buffer
, size_t capacity
)
117 buffer
->data
= kmalloc(capacity
, GFP_KERNEL
);
118 if (buffer
->data
== NULL
)
120 buffer
->head
= (struct packet
*) buffer
->data
;
121 buffer
->tail
= (struct packet
*) buffer
->data
;
122 buffer
->capacity
= capacity
;
123 buffer
->lost_packet_count
= 0;
124 atomic_set(&buffer
->size
, 0);
125 init_waitqueue_head(&buffer
->wait
);
131 packet_buffer_destroy(struct packet_buffer
*buffer
)
137 packet_buffer_get(struct client
*client
, char __user
*data
, size_t user_length
)
139 struct packet_buffer
*buffer
= &client
->buffer
;
143 if (wait_event_interruptible(buffer
->wait
,
144 atomic_read(&buffer
->size
) > 0) ||
145 list_empty(&client
->lynx
->link
))
148 if (atomic_read(&buffer
->size
) == 0)
151 length
= buffer
->head
->length
;
153 if (length
> user_length
)
156 end
= buffer
->data
+ buffer
->capacity
;
158 if (&buffer
->head
->data
[length
] < end
) {
159 if (copy_to_user(data
, buffer
->head
->data
, length
))
161 buffer
->head
= (struct packet
*) &buffer
->head
->data
[length
];
163 size_t split
= end
- buffer
->head
->data
;
165 if (copy_to_user(data
, buffer
->head
->data
, split
))
167 if (copy_to_user(data
+ split
, buffer
->data
, length
- split
))
169 buffer
->head
= (struct packet
*) &buffer
->data
[length
- split
];
173 * Decrease buffer->size as the last thing, since this is what
174 * keeps the interrupt from overwriting the packet we are
175 * retrieving from the buffer.
177 atomic_sub(sizeof(struct packet
) + length
, &buffer
->size
);
183 packet_buffer_put(struct packet_buffer
*buffer
, void *data
, size_t length
)
187 buffer
->total_packet_count
++;
189 if (buffer
->capacity
<
190 atomic_read(&buffer
->size
) + sizeof(struct packet
) + length
) {
191 buffer
->lost_packet_count
++;
195 end
= buffer
->data
+ buffer
->capacity
;
196 buffer
->tail
->length
= length
;
198 if (&buffer
->tail
->data
[length
] < end
) {
199 memcpy(buffer
->tail
->data
, data
, length
);
200 buffer
->tail
= (struct packet
*) &buffer
->tail
->data
[length
];
202 size_t split
= end
- buffer
->tail
->data
;
204 memcpy(buffer
->tail
->data
, data
, split
);
205 memcpy(buffer
->data
, data
+ split
, length
- split
);
206 buffer
->tail
= (struct packet
*) &buffer
->data
[length
- split
];
209 /* Finally, adjust buffer size and wake up userspace reader. */
211 atomic_add(sizeof(struct packet
) + length
, &buffer
->size
);
212 wake_up_interruptible(&buffer
->wait
);
216 reg_write(struct pcilynx
*lynx
, int offset
, u32 data
)
218 writel(data
, lynx
->registers
+ offset
);
222 reg_read(struct pcilynx
*lynx
, int offset
)
224 return readl(lynx
->registers
+ offset
);
228 reg_set_bits(struct pcilynx
*lynx
, int offset
, u32 mask
)
230 reg_write(lynx
, offset
, (reg_read(lynx
, offset
) | mask
));
234 * Maybe the pcl programs could be set up to just append data instead
235 * of using a whole packet.
238 run_pcl(struct pcilynx
*lynx
, dma_addr_t pcl_bus
,
241 reg_write(lynx
, DMA0_CURRENT_PCL
+ dmachan
* 0x20, pcl_bus
);
242 reg_write(lynx
, DMA0_CHAN_CTRL
+ dmachan
* 0x20,
243 DMA_CHAN_CTRL_ENABLE
| DMA_CHAN_CTRL_LINK
);
247 set_phy_reg(struct pcilynx
*lynx
, int addr
, int val
)
250 dev_err(&lynx
->pci_device
->dev
,
251 "PHY register address %d out of range\n", addr
);
255 dev_err(&lynx
->pci_device
->dev
,
256 "PHY register value %d out of range\n", val
);
259 reg_write(lynx
, LINK_PHY
, LINK_PHY_WRITE
|
260 LINK_PHY_ADDR(addr
) | LINK_PHY_WDATA(val
));
266 nosy_open(struct inode
*inode
, struct file
*file
)
268 int minor
= iminor(inode
);
269 struct client
*client
;
270 struct pcilynx
*tmp
, *lynx
= NULL
;
272 mutex_lock(&card_mutex
);
273 list_for_each_entry(tmp
, &card_list
, link
)
274 if (tmp
->misc
.minor
== minor
) {
275 lynx
= lynx_get(tmp
);
278 mutex_unlock(&card_mutex
);
282 client
= kmalloc(sizeof *client
, GFP_KERNEL
);
286 client
->tcode_mask
= ~0;
288 INIT_LIST_HEAD(&client
->link
);
290 if (packet_buffer_init(&client
->buffer
, 128 * 1024) < 0)
293 file
->private_data
= client
;
295 return stream_open(inode
, file
);
304 nosy_release(struct inode
*inode
, struct file
*file
)
306 struct client
*client
= file
->private_data
;
307 struct pcilynx
*lynx
= client
->lynx
;
309 spin_lock_irq(&lynx
->client_list_lock
);
310 list_del_init(&client
->link
);
311 spin_unlock_irq(&lynx
->client_list_lock
);
313 packet_buffer_destroy(&client
->buffer
);
321 nosy_poll(struct file
*file
, poll_table
*pt
)
323 struct client
*client
= file
->private_data
;
326 poll_wait(file
, &client
->buffer
.wait
, pt
);
328 if (atomic_read(&client
->buffer
.size
) > 0)
329 ret
= EPOLLIN
| EPOLLRDNORM
;
331 if (list_empty(&client
->lynx
->link
))
338 nosy_read(struct file
*file
, char __user
*buffer
, size_t count
, loff_t
*offset
)
340 struct client
*client
= file
->private_data
;
342 return packet_buffer_get(client
, buffer
, count
);
346 nosy_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
348 struct client
*client
= file
->private_data
;
349 spinlock_t
*client_list_lock
= &client
->lynx
->client_list_lock
;
350 struct nosy_stats stats
;
354 case NOSY_IOC_GET_STATS
:
355 spin_lock_irq(client_list_lock
);
356 stats
.total_packet_count
= client
->buffer
.total_packet_count
;
357 stats
.lost_packet_count
= client
->buffer
.lost_packet_count
;
358 spin_unlock_irq(client_list_lock
);
360 if (copy_to_user((void __user
*) arg
, &stats
, sizeof stats
))
367 spin_lock_irq(client_list_lock
);
368 if (list_empty(&client
->link
)) {
369 list_add_tail(&client
->link
, &client
->lynx
->client_list
);
372 spin_unlock_irq(client_list_lock
);
377 spin_lock_irq(client_list_lock
);
378 list_del_init(&client
->link
);
379 spin_unlock_irq(client_list_lock
);
383 case NOSY_IOC_FILTER
:
384 spin_lock_irq(client_list_lock
);
385 client
->tcode_mask
= arg
;
386 spin_unlock_irq(client_list_lock
);
392 /* Flush buffer, configure filter. */
396 static const struct file_operations nosy_ops
= {
397 .owner
= THIS_MODULE
,
399 .unlocked_ioctl
= nosy_ioctl
,
402 .release
= nosy_release
,
405 #define PHY_PACKET_SIZE 12 /* 1 payload, 1 inverse, 1 ack = 3 quadlets */
408 packet_irq_handler(struct pcilynx
*lynx
)
410 struct client
*client
;
411 u32 tcode_mask
, tcode
, timestamp
;
413 struct timespec64 ts64
;
415 /* FIXME: Also report rcv_speed. */
417 length
= __le32_to_cpu(lynx
->rcv_pcl
->pcl_status
) & 0x00001fff;
418 tcode
= __le32_to_cpu(lynx
->rcv_buffer
[1]) >> 4 & 0xf;
420 ktime_get_real_ts64(&ts64
);
421 timestamp
= ts64
.tv_nsec
/ NSEC_PER_USEC
;
422 lynx
->rcv_buffer
[0] = (__force __le32
)timestamp
;
424 if (length
== PHY_PACKET_SIZE
)
425 tcode_mask
= 1 << TCODE_PHY_PACKET
;
427 tcode_mask
= 1 << tcode
;
429 spin_lock(&lynx
->client_list_lock
);
431 list_for_each_entry(client
, &lynx
->client_list
, link
)
432 if (client
->tcode_mask
& tcode_mask
)
433 packet_buffer_put(&client
->buffer
,
434 lynx
->rcv_buffer
, length
+ 4);
436 spin_unlock(&lynx
->client_list_lock
);
440 bus_reset_irq_handler(struct pcilynx
*lynx
)
442 struct client
*client
;
443 struct timespec64 ts64
;
446 ktime_get_real_ts64(&ts64
);
447 timestamp
= ts64
.tv_nsec
/ NSEC_PER_USEC
;
449 spin_lock(&lynx
->client_list_lock
);
451 list_for_each_entry(client
, &lynx
->client_list
, link
)
452 packet_buffer_put(&client
->buffer
, ×tamp
, 4);
454 spin_unlock(&lynx
->client_list_lock
);
458 irq_handler(int irq
, void *device
)
460 struct pcilynx
*lynx
= device
;
463 pci_int_status
= reg_read(lynx
, PCI_INT_STATUS
);
465 if (pci_int_status
== ~0)
466 /* Card was ejected. */
469 if ((pci_int_status
& PCI_INT_INT_PEND
) == 0)
470 /* Not our interrupt, bail out quickly. */
473 if ((pci_int_status
& PCI_INT_P1394_INT
) != 0) {
476 link_int_status
= reg_read(lynx
, LINK_INT_STATUS
);
477 reg_write(lynx
, LINK_INT_STATUS
, link_int_status
);
479 if ((link_int_status
& LINK_INT_PHY_BUSRESET
) > 0)
480 bus_reset_irq_handler(lynx
);
483 /* Clear the PCI_INT_STATUS register only after clearing the
484 * LINK_INT_STATUS register; otherwise the PCI_INT_P1394 will
485 * be set again immediately. */
487 reg_write(lynx
, PCI_INT_STATUS
, pci_int_status
);
489 if ((pci_int_status
& PCI_INT_DMA0_HLT
) > 0) {
490 packet_irq_handler(lynx
);
491 run_pcl(lynx
, lynx
->rcv_start_pcl_bus
, 0);
498 remove_card(struct pci_dev
*dev
)
500 struct pcilynx
*lynx
= pci_get_drvdata(dev
);
501 struct client
*client
;
503 mutex_lock(&card_mutex
);
504 list_del_init(&lynx
->link
);
505 misc_deregister(&lynx
->misc
);
506 mutex_unlock(&card_mutex
);
508 reg_write(lynx
, PCI_INT_ENABLE
, 0);
509 free_irq(lynx
->pci_device
->irq
, lynx
);
511 spin_lock_irq(&lynx
->client_list_lock
);
512 list_for_each_entry(client
, &lynx
->client_list
, link
)
513 wake_up_interruptible(&client
->buffer
.wait
);
514 spin_unlock_irq(&lynx
->client_list_lock
);
516 dma_free_coherent(&lynx
->pci_device
->dev
, sizeof(struct pcl
),
517 lynx
->rcv_start_pcl
, lynx
->rcv_start_pcl_bus
);
518 dma_free_coherent(&lynx
->pci_device
->dev
, sizeof(struct pcl
),
519 lynx
->rcv_pcl
, lynx
->rcv_pcl_bus
);
520 dma_free_coherent(&lynx
->pci_device
->dev
, PAGE_SIZE
, lynx
->rcv_buffer
,
521 lynx
->rcv_buffer_bus
);
523 iounmap(lynx
->registers
);
524 pci_disable_device(dev
);
528 #define RCV_BUFFER_SIZE (16 * 1024)
531 add_card(struct pci_dev
*dev
, const struct pci_device_id
*unused
)
533 struct pcilynx
*lynx
;
537 if (dma_set_mask(&dev
->dev
, DMA_BIT_MASK(32))) {
539 "DMA address limits not supported for PCILynx hardware\n");
542 if (pci_enable_device(dev
)) {
543 dev_err(&dev
->dev
, "Failed to enable PCILynx hardware\n");
548 lynx
= kzalloc(sizeof *lynx
, GFP_KERNEL
);
550 dev_err(&dev
->dev
, "Failed to allocate control structure\n");
554 lynx
->pci_device
= dev
;
555 pci_set_drvdata(dev
, lynx
);
557 spin_lock_init(&lynx
->client_list_lock
);
558 INIT_LIST_HEAD(&lynx
->client_list
);
559 kref_init(&lynx
->kref
);
561 lynx
->registers
= ioremap(pci_resource_start(dev
, 0),
562 PCILYNX_MAX_REGISTER
);
563 if (lynx
->registers
== NULL
) {
564 dev_err(&dev
->dev
, "Failed to map registers\n");
566 goto fail_deallocate_lynx
;
569 lynx
->rcv_start_pcl
= dma_alloc_coherent(&lynx
->pci_device
->dev
,
571 &lynx
->rcv_start_pcl_bus
,
573 lynx
->rcv_pcl
= dma_alloc_coherent(&lynx
->pci_device
->dev
,
575 &lynx
->rcv_pcl_bus
, GFP_KERNEL
);
576 lynx
->rcv_buffer
= dma_alloc_coherent(&lynx
->pci_device
->dev
,
578 &lynx
->rcv_buffer_bus
, GFP_KERNEL
);
579 if (lynx
->rcv_start_pcl
== NULL
||
580 lynx
->rcv_pcl
== NULL
||
581 lynx
->rcv_buffer
== NULL
) {
582 dev_err(&dev
->dev
, "Failed to allocate receive buffer\n");
584 goto fail_deallocate_buffers
;
586 lynx
->rcv_start_pcl
->next
= cpu_to_le32(lynx
->rcv_pcl_bus
);
587 lynx
->rcv_pcl
->next
= cpu_to_le32(PCL_NEXT_INVALID
);
588 lynx
->rcv_pcl
->async_error_next
= cpu_to_le32(PCL_NEXT_INVALID
);
590 lynx
->rcv_pcl
->buffer
[0].control
=
591 cpu_to_le32(PCL_CMD_RCV
| PCL_BIGENDIAN
| 2044);
592 lynx
->rcv_pcl
->buffer
[0].pointer
=
593 cpu_to_le32(lynx
->rcv_buffer_bus
+ 4);
594 p
= lynx
->rcv_buffer_bus
+ 2048;
595 end
= lynx
->rcv_buffer_bus
+ RCV_BUFFER_SIZE
;
596 for (i
= 1; p
< end
; i
++, p
+= 2048) {
597 lynx
->rcv_pcl
->buffer
[i
].control
=
598 cpu_to_le32(PCL_CMD_RCV
| PCL_BIGENDIAN
| 2048);
599 lynx
->rcv_pcl
->buffer
[i
].pointer
= cpu_to_le32(p
);
601 lynx
->rcv_pcl
->buffer
[i
- 1].control
|= cpu_to_le32(PCL_LAST_BUFF
);
603 reg_set_bits(lynx
, MISC_CONTROL
, MISC_CONTROL_SWRESET
);
604 /* Fix buggy cards with autoboot pin not tied low: */
605 reg_write(lynx
, DMA0_CHAN_CTRL
, 0);
606 reg_write(lynx
, DMA_GLOBAL_REGISTER
, 0x00 << 24);
609 /* now, looking for PHY register set */
610 if ((get_phy_reg(lynx
, 2) & 0xe0) == 0xe0) {
611 lynx
->phyic
.reg_1394a
= 1;
612 PRINT(KERN_INFO
, lynx
->id
,
613 "found 1394a conform PHY (using extended register set)");
614 lynx
->phyic
.vendor
= get_phy_vendorid(lynx
);
615 lynx
->phyic
.product
= get_phy_productid(lynx
);
617 lynx
->phyic
.reg_1394a
= 0;
618 PRINT(KERN_INFO
, lynx
->id
, "found old 1394 PHY");
622 /* Setup the general receive FIFO max size. */
623 reg_write(lynx
, FIFO_SIZES
, 255);
625 reg_set_bits(lynx
, PCI_INT_ENABLE
, PCI_INT_DMA_ALL
);
627 reg_write(lynx
, LINK_INT_ENABLE
,
628 LINK_INT_PHY_TIME_OUT
| LINK_INT_PHY_REG_RCVD
|
629 LINK_INT_PHY_BUSRESET
| LINK_INT_IT_STUCK
|
630 LINK_INT_AT_STUCK
| LINK_INT_SNTRJ
|
631 LINK_INT_TC_ERR
| LINK_INT_GRF_OVER_FLOW
|
632 LINK_INT_ITF_UNDER_FLOW
| LINK_INT_ATF_UNDER_FLOW
);
634 /* Disable the L flag in self ID packets. */
635 set_phy_reg(lynx
, 4, 0);
637 /* Put this baby into snoop mode */
638 reg_set_bits(lynx
, LINK_CONTROL
, LINK_CONTROL_SNOOP_ENABLE
);
640 run_pcl(lynx
, lynx
->rcv_start_pcl_bus
, 0);
642 if (request_irq(dev
->irq
, irq_handler
, IRQF_SHARED
,
643 driver_name
, lynx
)) {
645 "Failed to allocate shared interrupt %d\n", dev
->irq
);
647 goto fail_deallocate_buffers
;
650 lynx
->misc
.parent
= &dev
->dev
;
651 lynx
->misc
.minor
= MISC_DYNAMIC_MINOR
;
652 lynx
->misc
.name
= "nosy";
653 lynx
->misc
.fops
= &nosy_ops
;
655 mutex_lock(&card_mutex
);
656 ret
= misc_register(&lynx
->misc
);
658 dev_err(&dev
->dev
, "Failed to register misc char device\n");
659 mutex_unlock(&card_mutex
);
662 list_add_tail(&lynx
->link
, &card_list
);
663 mutex_unlock(&card_mutex
);
666 "Initialized PCILynx IEEE1394 card, irq=%d\n", dev
->irq
);
671 reg_write(lynx
, PCI_INT_ENABLE
, 0);
672 free_irq(lynx
->pci_device
->irq
, lynx
);
674 fail_deallocate_buffers
:
675 if (lynx
->rcv_start_pcl
)
676 dma_free_coherent(&lynx
->pci_device
->dev
, sizeof(struct pcl
),
678 lynx
->rcv_start_pcl_bus
);
680 dma_free_coherent(&lynx
->pci_device
->dev
, sizeof(struct pcl
),
681 lynx
->rcv_pcl
, lynx
->rcv_pcl_bus
);
682 if (lynx
->rcv_buffer
)
683 dma_free_coherent(&lynx
->pci_device
->dev
, PAGE_SIZE
,
684 lynx
->rcv_buffer
, lynx
->rcv_buffer_bus
);
685 iounmap(lynx
->registers
);
687 fail_deallocate_lynx
:
691 pci_disable_device(dev
);
696 static struct pci_device_id pci_table
[] = {
698 .vendor
= PCI_VENDOR_ID_TI
,
699 .device
= PCI_DEVICE_ID_TI_PCILYNX
,
700 .subvendor
= PCI_ANY_ID
,
701 .subdevice
= PCI_ANY_ID
,
703 { } /* Terminating entry */
706 MODULE_DEVICE_TABLE(pci
, pci_table
);
708 static struct pci_driver lynx_pci_driver
= {
710 .id_table
= pci_table
,
712 .remove
= remove_card
,
715 module_pci_driver(lynx_pci_driver
);
717 MODULE_AUTHOR("Kristian Hoegsberg");
718 MODULE_DESCRIPTION("Snoop mode driver for TI pcilynx 1394 controllers");
719 MODULE_LICENSE("GPL");