1 // SPDX-License-Identifier: GPL-2.0
3 * Microsemi Switchtec(tm) PCIe Management Driver
4 * Copyright (c) 2017, Microsemi Corporation
7 #include <linux/switchtec.h>
8 #include <linux/switchtec_ioctl.h>
10 #include <linux/interrupt.h>
11 #include <linux/module.h>
13 #include <linux/uaccess.h>
14 #include <linux/poll.h>
15 #include <linux/wait.h>
16 #include <linux/io-64-nonatomic-lo-hi.h>
17 #include <linux/nospec.h>
19 MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver");
20 MODULE_VERSION("0.1");
21 MODULE_LICENSE("GPL");
22 MODULE_AUTHOR("Microsemi Corporation");
24 static int max_devices
= 16;
25 module_param(max_devices
, int, 0644);
26 MODULE_PARM_DESC(max_devices
, "max number of switchtec device instances");
28 static bool use_dma_mrpc
= true;
29 module_param(use_dma_mrpc
, bool, 0644);
30 MODULE_PARM_DESC(use_dma_mrpc
,
31 "Enable the use of the DMA MRPC feature");
33 static int nirqs
= 32;
34 module_param(nirqs
, int, 0644);
35 MODULE_PARM_DESC(nirqs
, "number of interrupts to allocate (more may be useful for NTB applications)");
37 static dev_t switchtec_devt
;
38 static DEFINE_IDA(switchtec_minor_ida
);
40 const struct class switchtec_class
= {
43 EXPORT_SYMBOL_GPL(switchtec_class
);
53 struct switchtec_user
{
54 struct switchtec_dev
*stdev
;
56 enum mrpc_state state
;
58 wait_queue_head_t cmd_comp
;
60 struct list_head list
;
68 unsigned char data
[SWITCHTEC_MRPC_PAYLOAD_SIZE
];
73 * The MMIO reads to the device_id register should always return the device ID
74 * of the device, otherwise the firmware is probably stuck or unreachable
75 * due to a firmware reset which clears PCI state including the BARs and Memory
78 static int is_firmware_running(struct switchtec_dev
*stdev
)
80 u32 device
= ioread32(&stdev
->mmio_sys_info
->device_id
);
82 return stdev
->pdev
->device
== device
;
85 static struct switchtec_user
*stuser_create(struct switchtec_dev
*stdev
)
87 struct switchtec_user
*stuser
;
89 stuser
= kzalloc(sizeof(*stuser
), GFP_KERNEL
);
91 return ERR_PTR(-ENOMEM
);
93 get_device(&stdev
->dev
);
94 stuser
->stdev
= stdev
;
95 kref_init(&stuser
->kref
);
96 INIT_LIST_HEAD(&stuser
->list
);
97 init_waitqueue_head(&stuser
->cmd_comp
);
98 stuser
->event_cnt
= atomic_read(&stdev
->event_cnt
);
100 dev_dbg(&stdev
->dev
, "%s: %p\n", __func__
, stuser
);
105 static void stuser_free(struct kref
*kref
)
107 struct switchtec_user
*stuser
;
109 stuser
= container_of(kref
, struct switchtec_user
, kref
);
111 dev_dbg(&stuser
->stdev
->dev
, "%s: %p\n", __func__
, stuser
);
113 put_device(&stuser
->stdev
->dev
);
117 static void stuser_put(struct switchtec_user
*stuser
)
119 kref_put(&stuser
->kref
, stuser_free
);
122 static void stuser_set_state(struct switchtec_user
*stuser
,
123 enum mrpc_state state
)
125 /* requires the mrpc_mutex to already be held when called */
127 static const char * const state_names
[] = {
128 [MRPC_IDLE
] = "IDLE",
129 [MRPC_QUEUED
] = "QUEUED",
130 [MRPC_RUNNING
] = "RUNNING",
131 [MRPC_DONE
] = "DONE",
132 [MRPC_IO_ERROR
] = "IO_ERROR",
135 stuser
->state
= state
;
137 dev_dbg(&stuser
->stdev
->dev
, "stuser state %p -> %s",
138 stuser
, state_names
[state
]);
141 static void mrpc_complete_cmd(struct switchtec_dev
*stdev
);
143 static void flush_wc_buf(struct switchtec_dev
*stdev
)
145 struct ntb_dbmsg_regs __iomem
*mmio_dbmsg
;
148 * odb (outbound doorbell) register is processed by low latency
149 * hardware and w/o side effect
151 mmio_dbmsg
= (void __iomem
*)stdev
->mmio_ntb
+
152 SWITCHTEC_NTB_REG_DBMSG_OFFSET
;
153 ioread32(&mmio_dbmsg
->odb
);
156 static void mrpc_cmd_submit(struct switchtec_dev
*stdev
)
158 /* requires the mrpc_mutex to already be held when called */
160 struct switchtec_user
*stuser
;
162 if (stdev
->mrpc_busy
)
165 if (list_empty(&stdev
->mrpc_queue
))
168 stuser
= list_entry(stdev
->mrpc_queue
.next
, struct switchtec_user
,
171 if (stdev
->dma_mrpc
) {
172 stdev
->dma_mrpc
->status
= SWITCHTEC_MRPC_STATUS_INPROGRESS
;
173 memset(stdev
->dma_mrpc
->data
, 0xFF, SWITCHTEC_MRPC_PAYLOAD_SIZE
);
176 stuser_set_state(stuser
, MRPC_RUNNING
);
177 stdev
->mrpc_busy
= 1;
178 memcpy_toio(&stdev
->mmio_mrpc
->input_data
,
179 stuser
->data
, stuser
->data_len
);
181 iowrite32(stuser
->cmd
, &stdev
->mmio_mrpc
->cmd
);
183 schedule_delayed_work(&stdev
->mrpc_timeout
,
184 msecs_to_jiffies(500));
187 static int mrpc_queue_cmd(struct switchtec_user
*stuser
)
189 /* requires the mrpc_mutex to already be held when called */
191 struct switchtec_dev
*stdev
= stuser
->stdev
;
193 kref_get(&stuser
->kref
);
194 stuser
->read_len
= sizeof(stuser
->data
);
195 stuser_set_state(stuser
, MRPC_QUEUED
);
196 stuser
->cmd_done
= false;
197 list_add_tail(&stuser
->list
, &stdev
->mrpc_queue
);
199 mrpc_cmd_submit(stdev
);
204 static void mrpc_cleanup_cmd(struct switchtec_dev
*stdev
)
206 /* requires the mrpc_mutex to already be held when called */
208 struct switchtec_user
*stuser
= list_entry(stdev
->mrpc_queue
.next
,
209 struct switchtec_user
, list
);
211 stuser
->cmd_done
= true;
212 wake_up_interruptible(&stuser
->cmd_comp
);
213 list_del_init(&stuser
->list
);
215 stdev
->mrpc_busy
= 0;
217 mrpc_cmd_submit(stdev
);
220 static void mrpc_complete_cmd(struct switchtec_dev
*stdev
)
222 /* requires the mrpc_mutex to already be held when called */
224 struct switchtec_user
*stuser
;
226 if (list_empty(&stdev
->mrpc_queue
))
229 stuser
= list_entry(stdev
->mrpc_queue
.next
, struct switchtec_user
,
233 stuser
->status
= stdev
->dma_mrpc
->status
;
235 stuser
->status
= ioread32(&stdev
->mmio_mrpc
->status
);
237 if (stuser
->status
== SWITCHTEC_MRPC_STATUS_INPROGRESS
)
240 stuser_set_state(stuser
, MRPC_DONE
);
241 stuser
->return_code
= 0;
243 if (stuser
->status
!= SWITCHTEC_MRPC_STATUS_DONE
&&
244 stuser
->status
!= SWITCHTEC_MRPC_STATUS_ERROR
)
248 stuser
->return_code
= stdev
->dma_mrpc
->rtn_code
;
250 stuser
->return_code
= ioread32(&stdev
->mmio_mrpc
->ret_value
);
251 if (stuser
->return_code
!= 0)
255 memcpy(stuser
->data
, &stdev
->dma_mrpc
->data
,
258 memcpy_fromio(stuser
->data
, &stdev
->mmio_mrpc
->output_data
,
261 mrpc_cleanup_cmd(stdev
);
264 static void mrpc_event_work(struct work_struct
*work
)
266 struct switchtec_dev
*stdev
;
268 stdev
= container_of(work
, struct switchtec_dev
, mrpc_work
);
270 dev_dbg(&stdev
->dev
, "%s\n", __func__
);
272 mutex_lock(&stdev
->mrpc_mutex
);
273 cancel_delayed_work(&stdev
->mrpc_timeout
);
274 mrpc_complete_cmd(stdev
);
275 mutex_unlock(&stdev
->mrpc_mutex
);
278 static void mrpc_error_complete_cmd(struct switchtec_dev
*stdev
)
280 /* requires the mrpc_mutex to already be held when called */
282 struct switchtec_user
*stuser
;
284 if (list_empty(&stdev
->mrpc_queue
))
287 stuser
= list_entry(stdev
->mrpc_queue
.next
,
288 struct switchtec_user
, list
);
290 stuser_set_state(stuser
, MRPC_IO_ERROR
);
292 mrpc_cleanup_cmd(stdev
);
295 static void mrpc_timeout_work(struct work_struct
*work
)
297 struct switchtec_dev
*stdev
;
300 stdev
= container_of(work
, struct switchtec_dev
, mrpc_timeout
.work
);
302 dev_dbg(&stdev
->dev
, "%s\n", __func__
);
304 mutex_lock(&stdev
->mrpc_mutex
);
306 if (!is_firmware_running(stdev
)) {
307 mrpc_error_complete_cmd(stdev
);
312 status
= stdev
->dma_mrpc
->status
;
314 status
= ioread32(&stdev
->mmio_mrpc
->status
);
315 if (status
== SWITCHTEC_MRPC_STATUS_INPROGRESS
) {
316 schedule_delayed_work(&stdev
->mrpc_timeout
,
317 msecs_to_jiffies(500));
321 mrpc_complete_cmd(stdev
);
323 mutex_unlock(&stdev
->mrpc_mutex
);
326 static ssize_t
device_version_show(struct device
*dev
,
327 struct device_attribute
*attr
, char *buf
)
329 struct switchtec_dev
*stdev
= to_stdev(dev
);
332 ver
= ioread32(&stdev
->mmio_sys_info
->device_version
);
334 return sysfs_emit(buf
, "%x\n", ver
);
336 static DEVICE_ATTR_RO(device_version
);
338 static ssize_t
fw_version_show(struct device
*dev
,
339 struct device_attribute
*attr
, char *buf
)
341 struct switchtec_dev
*stdev
= to_stdev(dev
);
344 ver
= ioread32(&stdev
->mmio_sys_info
->firmware_version
);
346 return sysfs_emit(buf
, "%08x\n", ver
);
348 static DEVICE_ATTR_RO(fw_version
);
350 static ssize_t
io_string_show(char *buf
, void __iomem
*attr
, size_t len
)
354 memcpy_fromio(buf
, attr
, len
);
358 for (i
= len
- 1; i
> 0; i
--) {
368 #define DEVICE_ATTR_SYS_INFO_STR(field) \
369 static ssize_t field ## _show(struct device *dev, \
370 struct device_attribute *attr, char *buf) \
372 struct switchtec_dev *stdev = to_stdev(dev); \
373 struct sys_info_regs __iomem *si = stdev->mmio_sys_info; \
374 if (stdev->gen == SWITCHTEC_GEN3) \
375 return io_string_show(buf, &si->gen3.field, \
376 sizeof(si->gen3.field)); \
377 else if (stdev->gen >= SWITCHTEC_GEN4) \
378 return io_string_show(buf, &si->gen4.field, \
379 sizeof(si->gen4.field)); \
381 return -EOPNOTSUPP; \
384 static DEVICE_ATTR_RO(field)
386 DEVICE_ATTR_SYS_INFO_STR(vendor_id
);
387 DEVICE_ATTR_SYS_INFO_STR(product_id
);
388 DEVICE_ATTR_SYS_INFO_STR(product_revision
);
390 static ssize_t
component_vendor_show(struct device
*dev
,
391 struct device_attribute
*attr
, char *buf
)
393 struct switchtec_dev
*stdev
= to_stdev(dev
);
394 struct sys_info_regs __iomem
*si
= stdev
->mmio_sys_info
;
396 /* component_vendor field not supported after gen3 */
397 if (stdev
->gen
!= SWITCHTEC_GEN3
)
398 return sysfs_emit(buf
, "none\n");
400 return io_string_show(buf
, &si
->gen3
.component_vendor
,
401 sizeof(si
->gen3
.component_vendor
));
403 static DEVICE_ATTR_RO(component_vendor
);
405 static ssize_t
component_id_show(struct device
*dev
,
406 struct device_attribute
*attr
, char *buf
)
408 struct switchtec_dev
*stdev
= to_stdev(dev
);
409 int id
= ioread16(&stdev
->mmio_sys_info
->gen3
.component_id
);
411 /* component_id field not supported after gen3 */
412 if (stdev
->gen
!= SWITCHTEC_GEN3
)
413 return sysfs_emit(buf
, "none\n");
415 return sysfs_emit(buf
, "PM%04X\n", id
);
417 static DEVICE_ATTR_RO(component_id
);
419 static ssize_t
component_revision_show(struct device
*dev
,
420 struct device_attribute
*attr
, char *buf
)
422 struct switchtec_dev
*stdev
= to_stdev(dev
);
423 int rev
= ioread8(&stdev
->mmio_sys_info
->gen3
.component_revision
);
425 /* component_revision field not supported after gen3 */
426 if (stdev
->gen
!= SWITCHTEC_GEN3
)
427 return sysfs_emit(buf
, "255\n");
429 return sysfs_emit(buf
, "%d\n", rev
);
431 static DEVICE_ATTR_RO(component_revision
);
433 static ssize_t
partition_show(struct device
*dev
,
434 struct device_attribute
*attr
, char *buf
)
436 struct switchtec_dev
*stdev
= to_stdev(dev
);
438 return sysfs_emit(buf
, "%d\n", stdev
->partition
);
440 static DEVICE_ATTR_RO(partition
);
442 static ssize_t
partition_count_show(struct device
*dev
,
443 struct device_attribute
*attr
, char *buf
)
445 struct switchtec_dev
*stdev
= to_stdev(dev
);
447 return sysfs_emit(buf
, "%d\n", stdev
->partition_count
);
449 static DEVICE_ATTR_RO(partition_count
);
451 static struct attribute
*switchtec_device_attrs
[] = {
452 &dev_attr_device_version
.attr
,
453 &dev_attr_fw_version
.attr
,
454 &dev_attr_vendor_id
.attr
,
455 &dev_attr_product_id
.attr
,
456 &dev_attr_product_revision
.attr
,
457 &dev_attr_component_vendor
.attr
,
458 &dev_attr_component_id
.attr
,
459 &dev_attr_component_revision
.attr
,
460 &dev_attr_partition
.attr
,
461 &dev_attr_partition_count
.attr
,
465 ATTRIBUTE_GROUPS(switchtec_device
);
467 static int switchtec_dev_open(struct inode
*inode
, struct file
*filp
)
469 struct switchtec_dev
*stdev
;
470 struct switchtec_user
*stuser
;
472 stdev
= container_of(inode
->i_cdev
, struct switchtec_dev
, cdev
);
474 stuser
= stuser_create(stdev
);
476 return PTR_ERR(stuser
);
478 filp
->private_data
= stuser
;
479 stream_open(inode
, filp
);
481 dev_dbg(&stdev
->dev
, "%s: %p\n", __func__
, stuser
);
486 static int switchtec_dev_release(struct inode
*inode
, struct file
*filp
)
488 struct switchtec_user
*stuser
= filp
->private_data
;
495 static int lock_mutex_and_test_alive(struct switchtec_dev
*stdev
)
497 if (mutex_lock_interruptible(&stdev
->mrpc_mutex
))
501 mutex_unlock(&stdev
->mrpc_mutex
);
508 static ssize_t
switchtec_dev_write(struct file
*filp
, const char __user
*data
,
509 size_t size
, loff_t
*off
)
511 struct switchtec_user
*stuser
= filp
->private_data
;
512 struct switchtec_dev
*stdev
= stuser
->stdev
;
515 if (size
< sizeof(stuser
->cmd
) ||
516 size
> sizeof(stuser
->cmd
) + sizeof(stuser
->data
))
519 stuser
->data_len
= size
- sizeof(stuser
->cmd
);
521 rc
= lock_mutex_and_test_alive(stdev
);
525 if (stuser
->state
!= MRPC_IDLE
) {
530 rc
= copy_from_user(&stuser
->cmd
, data
, sizeof(stuser
->cmd
));
535 if (((MRPC_CMD_ID(stuser
->cmd
) == MRPC_GAS_WRITE
) ||
536 (MRPC_CMD_ID(stuser
->cmd
) == MRPC_GAS_READ
)) &&
537 !capable(CAP_SYS_ADMIN
)) {
542 data
+= sizeof(stuser
->cmd
);
543 rc
= copy_from_user(&stuser
->data
, data
, size
- sizeof(stuser
->cmd
));
549 rc
= mrpc_queue_cmd(stuser
);
552 mutex_unlock(&stdev
->mrpc_mutex
);
560 static ssize_t
switchtec_dev_read(struct file
*filp
, char __user
*data
,
561 size_t size
, loff_t
*off
)
563 struct switchtec_user
*stuser
= filp
->private_data
;
564 struct switchtec_dev
*stdev
= stuser
->stdev
;
567 if (size
< sizeof(stuser
->cmd
) ||
568 size
> sizeof(stuser
->cmd
) + sizeof(stuser
->data
))
571 rc
= lock_mutex_and_test_alive(stdev
);
575 if (stuser
->state
== MRPC_IDLE
) {
576 mutex_unlock(&stdev
->mrpc_mutex
);
580 stuser
->read_len
= size
- sizeof(stuser
->return_code
);
582 mutex_unlock(&stdev
->mrpc_mutex
);
584 if (filp
->f_flags
& O_NONBLOCK
) {
585 if (!stuser
->cmd_done
)
588 rc
= wait_event_interruptible(stuser
->cmd_comp
,
594 rc
= lock_mutex_and_test_alive(stdev
);
598 if (stuser
->state
== MRPC_IO_ERROR
) {
599 mutex_unlock(&stdev
->mrpc_mutex
);
603 if (stuser
->state
!= MRPC_DONE
) {
604 mutex_unlock(&stdev
->mrpc_mutex
);
608 rc
= copy_to_user(data
, &stuser
->return_code
,
609 sizeof(stuser
->return_code
));
611 mutex_unlock(&stdev
->mrpc_mutex
);
615 data
+= sizeof(stuser
->return_code
);
616 rc
= copy_to_user(data
, &stuser
->data
,
617 size
- sizeof(stuser
->return_code
));
619 mutex_unlock(&stdev
->mrpc_mutex
);
623 stuser_set_state(stuser
, MRPC_IDLE
);
625 mutex_unlock(&stdev
->mrpc_mutex
);
627 if (stuser
->status
== SWITCHTEC_MRPC_STATUS_DONE
||
628 stuser
->status
== SWITCHTEC_MRPC_STATUS_ERROR
)
630 else if (stuser
->status
== SWITCHTEC_MRPC_STATUS_INTERRUPTED
)
636 static __poll_t
switchtec_dev_poll(struct file
*filp
, poll_table
*wait
)
638 struct switchtec_user
*stuser
= filp
->private_data
;
639 struct switchtec_dev
*stdev
= stuser
->stdev
;
642 poll_wait(filp
, &stuser
->cmd_comp
, wait
);
643 poll_wait(filp
, &stdev
->event_wq
, wait
);
645 if (lock_mutex_and_test_alive(stdev
))
646 return EPOLLIN
| EPOLLRDHUP
| EPOLLOUT
| EPOLLERR
| EPOLLHUP
;
648 mutex_unlock(&stdev
->mrpc_mutex
);
650 if (stuser
->cmd_done
)
651 ret
|= EPOLLIN
| EPOLLRDNORM
;
653 if (stuser
->event_cnt
!= atomic_read(&stdev
->event_cnt
))
654 ret
|= EPOLLPRI
| EPOLLRDBAND
;
659 static int ioctl_flash_info(struct switchtec_dev
*stdev
,
660 struct switchtec_ioctl_flash_info __user
*uinfo
)
662 struct switchtec_ioctl_flash_info info
= {0};
663 struct flash_info_regs __iomem
*fi
= stdev
->mmio_flash_info
;
665 if (stdev
->gen
== SWITCHTEC_GEN3
) {
666 info
.flash_length
= ioread32(&fi
->gen3
.flash_length
);
667 info
.num_partitions
= SWITCHTEC_NUM_PARTITIONS_GEN3
;
668 } else if (stdev
->gen
>= SWITCHTEC_GEN4
) {
669 info
.flash_length
= ioread32(&fi
->gen4
.flash_length
);
670 info
.num_partitions
= SWITCHTEC_NUM_PARTITIONS_GEN4
;
675 if (copy_to_user(uinfo
, &info
, sizeof(info
)))
681 static void set_fw_info_part(struct switchtec_ioctl_flash_part_info
*info
,
682 struct partition_info __iomem
*pi
)
684 info
->address
= ioread32(&pi
->address
);
685 info
->length
= ioread32(&pi
->length
);
688 static int flash_part_info_gen3(struct switchtec_dev
*stdev
,
689 struct switchtec_ioctl_flash_part_info
*info
)
691 struct flash_info_regs_gen3 __iomem
*fi
=
692 &stdev
->mmio_flash_info
->gen3
;
693 struct sys_info_regs_gen3 __iomem
*si
= &stdev
->mmio_sys_info
->gen3
;
694 u32 active_addr
= -1;
696 switch (info
->flash_partition
) {
697 case SWITCHTEC_IOCTL_PART_CFG0
:
698 active_addr
= ioread32(&fi
->active_cfg
);
699 set_fw_info_part(info
, &fi
->cfg0
);
700 if (ioread16(&si
->cfg_running
) == SWITCHTEC_GEN3_CFG0_RUNNING
)
701 info
->active
|= SWITCHTEC_IOCTL_PART_RUNNING
;
703 case SWITCHTEC_IOCTL_PART_CFG1
:
704 active_addr
= ioread32(&fi
->active_cfg
);
705 set_fw_info_part(info
, &fi
->cfg1
);
706 if (ioread16(&si
->cfg_running
) == SWITCHTEC_GEN3_CFG1_RUNNING
)
707 info
->active
|= SWITCHTEC_IOCTL_PART_RUNNING
;
709 case SWITCHTEC_IOCTL_PART_IMG0
:
710 active_addr
= ioread32(&fi
->active_img
);
711 set_fw_info_part(info
, &fi
->img0
);
712 if (ioread16(&si
->img_running
) == SWITCHTEC_GEN3_IMG0_RUNNING
)
713 info
->active
|= SWITCHTEC_IOCTL_PART_RUNNING
;
715 case SWITCHTEC_IOCTL_PART_IMG1
:
716 active_addr
= ioread32(&fi
->active_img
);
717 set_fw_info_part(info
, &fi
->img1
);
718 if (ioread16(&si
->img_running
) == SWITCHTEC_GEN3_IMG1_RUNNING
)
719 info
->active
|= SWITCHTEC_IOCTL_PART_RUNNING
;
721 case SWITCHTEC_IOCTL_PART_NVLOG
:
722 set_fw_info_part(info
, &fi
->nvlog
);
724 case SWITCHTEC_IOCTL_PART_VENDOR0
:
725 set_fw_info_part(info
, &fi
->vendor
[0]);
727 case SWITCHTEC_IOCTL_PART_VENDOR1
:
728 set_fw_info_part(info
, &fi
->vendor
[1]);
730 case SWITCHTEC_IOCTL_PART_VENDOR2
:
731 set_fw_info_part(info
, &fi
->vendor
[2]);
733 case SWITCHTEC_IOCTL_PART_VENDOR3
:
734 set_fw_info_part(info
, &fi
->vendor
[3]);
736 case SWITCHTEC_IOCTL_PART_VENDOR4
:
737 set_fw_info_part(info
, &fi
->vendor
[4]);
739 case SWITCHTEC_IOCTL_PART_VENDOR5
:
740 set_fw_info_part(info
, &fi
->vendor
[5]);
742 case SWITCHTEC_IOCTL_PART_VENDOR6
:
743 set_fw_info_part(info
, &fi
->vendor
[6]);
745 case SWITCHTEC_IOCTL_PART_VENDOR7
:
746 set_fw_info_part(info
, &fi
->vendor
[7]);
752 if (info
->address
== active_addr
)
753 info
->active
|= SWITCHTEC_IOCTL_PART_ACTIVE
;
758 static int flash_part_info_gen4(struct switchtec_dev
*stdev
,
759 struct switchtec_ioctl_flash_part_info
*info
)
761 struct flash_info_regs_gen4 __iomem
*fi
= &stdev
->mmio_flash_info
->gen4
;
762 struct sys_info_regs_gen4 __iomem
*si
= &stdev
->mmio_sys_info
->gen4
;
763 struct active_partition_info_gen4 __iomem
*af
= &fi
->active_flag
;
765 switch (info
->flash_partition
) {
766 case SWITCHTEC_IOCTL_PART_MAP_0
:
767 set_fw_info_part(info
, &fi
->map0
);
769 case SWITCHTEC_IOCTL_PART_MAP_1
:
770 set_fw_info_part(info
, &fi
->map1
);
772 case SWITCHTEC_IOCTL_PART_KEY_0
:
773 set_fw_info_part(info
, &fi
->key0
);
774 if (ioread8(&af
->key
) == SWITCHTEC_GEN4_KEY0_ACTIVE
)
775 info
->active
|= SWITCHTEC_IOCTL_PART_ACTIVE
;
776 if (ioread16(&si
->key_running
) == SWITCHTEC_GEN4_KEY0_RUNNING
)
777 info
->active
|= SWITCHTEC_IOCTL_PART_RUNNING
;
779 case SWITCHTEC_IOCTL_PART_KEY_1
:
780 set_fw_info_part(info
, &fi
->key1
);
781 if (ioread8(&af
->key
) == SWITCHTEC_GEN4_KEY1_ACTIVE
)
782 info
->active
|= SWITCHTEC_IOCTL_PART_ACTIVE
;
783 if (ioread16(&si
->key_running
) == SWITCHTEC_GEN4_KEY1_RUNNING
)
784 info
->active
|= SWITCHTEC_IOCTL_PART_RUNNING
;
786 case SWITCHTEC_IOCTL_PART_BL2_0
:
787 set_fw_info_part(info
, &fi
->bl2_0
);
788 if (ioread8(&af
->bl2
) == SWITCHTEC_GEN4_BL2_0_ACTIVE
)
789 info
->active
|= SWITCHTEC_IOCTL_PART_ACTIVE
;
790 if (ioread16(&si
->bl2_running
) == SWITCHTEC_GEN4_BL2_0_RUNNING
)
791 info
->active
|= SWITCHTEC_IOCTL_PART_RUNNING
;
793 case SWITCHTEC_IOCTL_PART_BL2_1
:
794 set_fw_info_part(info
, &fi
->bl2_1
);
795 if (ioread8(&af
->bl2
) == SWITCHTEC_GEN4_BL2_1_ACTIVE
)
796 info
->active
|= SWITCHTEC_IOCTL_PART_ACTIVE
;
797 if (ioread16(&si
->bl2_running
) == SWITCHTEC_GEN4_BL2_1_RUNNING
)
798 info
->active
|= SWITCHTEC_IOCTL_PART_RUNNING
;
800 case SWITCHTEC_IOCTL_PART_CFG0
:
801 set_fw_info_part(info
, &fi
->cfg0
);
802 if (ioread8(&af
->cfg
) == SWITCHTEC_GEN4_CFG0_ACTIVE
)
803 info
->active
|= SWITCHTEC_IOCTL_PART_ACTIVE
;
804 if (ioread16(&si
->cfg_running
) == SWITCHTEC_GEN4_CFG0_RUNNING
)
805 info
->active
|= SWITCHTEC_IOCTL_PART_RUNNING
;
807 case SWITCHTEC_IOCTL_PART_CFG1
:
808 set_fw_info_part(info
, &fi
->cfg1
);
809 if (ioread8(&af
->cfg
) == SWITCHTEC_GEN4_CFG1_ACTIVE
)
810 info
->active
|= SWITCHTEC_IOCTL_PART_ACTIVE
;
811 if (ioread16(&si
->cfg_running
) == SWITCHTEC_GEN4_CFG1_RUNNING
)
812 info
->active
|= SWITCHTEC_IOCTL_PART_RUNNING
;
814 case SWITCHTEC_IOCTL_PART_IMG0
:
815 set_fw_info_part(info
, &fi
->img0
);
816 if (ioread8(&af
->img
) == SWITCHTEC_GEN4_IMG0_ACTIVE
)
817 info
->active
|= SWITCHTEC_IOCTL_PART_ACTIVE
;
818 if (ioread16(&si
->img_running
) == SWITCHTEC_GEN4_IMG0_RUNNING
)
819 info
->active
|= SWITCHTEC_IOCTL_PART_RUNNING
;
821 case SWITCHTEC_IOCTL_PART_IMG1
:
822 set_fw_info_part(info
, &fi
->img1
);
823 if (ioread8(&af
->img
) == SWITCHTEC_GEN4_IMG1_ACTIVE
)
824 info
->active
|= SWITCHTEC_IOCTL_PART_ACTIVE
;
825 if (ioread16(&si
->img_running
) == SWITCHTEC_GEN4_IMG1_RUNNING
)
826 info
->active
|= SWITCHTEC_IOCTL_PART_RUNNING
;
828 case SWITCHTEC_IOCTL_PART_NVLOG
:
829 set_fw_info_part(info
, &fi
->nvlog
);
831 case SWITCHTEC_IOCTL_PART_VENDOR0
:
832 set_fw_info_part(info
, &fi
->vendor
[0]);
834 case SWITCHTEC_IOCTL_PART_VENDOR1
:
835 set_fw_info_part(info
, &fi
->vendor
[1]);
837 case SWITCHTEC_IOCTL_PART_VENDOR2
:
838 set_fw_info_part(info
, &fi
->vendor
[2]);
840 case SWITCHTEC_IOCTL_PART_VENDOR3
:
841 set_fw_info_part(info
, &fi
->vendor
[3]);
843 case SWITCHTEC_IOCTL_PART_VENDOR4
:
844 set_fw_info_part(info
, &fi
->vendor
[4]);
846 case SWITCHTEC_IOCTL_PART_VENDOR5
:
847 set_fw_info_part(info
, &fi
->vendor
[5]);
849 case SWITCHTEC_IOCTL_PART_VENDOR6
:
850 set_fw_info_part(info
, &fi
->vendor
[6]);
852 case SWITCHTEC_IOCTL_PART_VENDOR7
:
853 set_fw_info_part(info
, &fi
->vendor
[7]);
862 static int ioctl_flash_part_info(struct switchtec_dev
*stdev
,
863 struct switchtec_ioctl_flash_part_info __user
*uinfo
)
866 struct switchtec_ioctl_flash_part_info info
= {0};
868 if (copy_from_user(&info
, uinfo
, sizeof(info
)))
871 if (stdev
->gen
== SWITCHTEC_GEN3
) {
872 ret
= flash_part_info_gen3(stdev
, &info
);
875 } else if (stdev
->gen
>= SWITCHTEC_GEN4
) {
876 ret
= flash_part_info_gen4(stdev
, &info
);
883 if (copy_to_user(uinfo
, &info
, sizeof(info
)))
889 static int ioctl_event_summary(struct switchtec_dev
*stdev
,
890 struct switchtec_user
*stuser
,
891 struct switchtec_ioctl_event_summary __user
*usum
,
894 struct switchtec_ioctl_event_summary
*s
;
899 s
= kzalloc(sizeof(*s
), GFP_KERNEL
);
903 s
->global
= ioread32(&stdev
->mmio_sw_event
->global_summary
);
904 s
->part_bitmap
= ioread64(&stdev
->mmio_sw_event
->part_event_bitmap
);
905 s
->local_part
= ioread32(&stdev
->mmio_part_cfg
->part_event_summary
);
907 for (i
= 0; i
< stdev
->partition_count
; i
++) {
908 reg
= ioread32(&stdev
->mmio_part_cfg_all
[i
].part_event_summary
);
912 for (i
= 0; i
< stdev
->pff_csr_count
; i
++) {
913 reg
= ioread32(&stdev
->mmio_pff_csr
[i
].pff_event_summary
);
917 if (copy_to_user(usum
, s
, size
)) {
922 stuser
->event_cnt
= atomic_read(&stdev
->event_cnt
);
929 static u32 __iomem
*global_ev_reg(struct switchtec_dev
*stdev
,
930 size_t offset
, int index
)
932 return (void __iomem
*)stdev
->mmio_sw_event
+ offset
;
935 static u32 __iomem
*part_ev_reg(struct switchtec_dev
*stdev
,
936 size_t offset
, int index
)
938 return (void __iomem
*)&stdev
->mmio_part_cfg_all
[index
] + offset
;
941 static u32 __iomem
*pff_ev_reg(struct switchtec_dev
*stdev
,
942 size_t offset
, int index
)
944 return (void __iomem
*)&stdev
->mmio_pff_csr
[index
] + offset
;
947 #define EV_GLB(i, r)[i] = {offsetof(struct sw_event_regs, r), global_ev_reg}
948 #define EV_PAR(i, r)[i] = {offsetof(struct part_cfg_regs, r), part_ev_reg}
949 #define EV_PFF(i, r)[i] = {offsetof(struct pff_csr_regs, r), pff_ev_reg}
951 static const struct event_reg
{
953 u32 __iomem
*(*map_reg
)(struct switchtec_dev
*stdev
,
954 size_t offset
, int index
);
956 EV_GLB(SWITCHTEC_IOCTL_EVENT_STACK_ERROR
, stack_error_event_hdr
),
957 EV_GLB(SWITCHTEC_IOCTL_EVENT_PPU_ERROR
, ppu_error_event_hdr
),
958 EV_GLB(SWITCHTEC_IOCTL_EVENT_ISP_ERROR
, isp_error_event_hdr
),
959 EV_GLB(SWITCHTEC_IOCTL_EVENT_SYS_RESET
, sys_reset_event_hdr
),
960 EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_EXC
, fw_exception_hdr
),
961 EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_NMI
, fw_nmi_hdr
),
962 EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_NON_FATAL
, fw_non_fatal_hdr
),
963 EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_FATAL
, fw_fatal_hdr
),
964 EV_GLB(SWITCHTEC_IOCTL_EVENT_TWI_MRPC_COMP
, twi_mrpc_comp_hdr
),
965 EV_GLB(SWITCHTEC_IOCTL_EVENT_TWI_MRPC_COMP_ASYNC
,
966 twi_mrpc_comp_async_hdr
),
967 EV_GLB(SWITCHTEC_IOCTL_EVENT_CLI_MRPC_COMP
, cli_mrpc_comp_hdr
),
968 EV_GLB(SWITCHTEC_IOCTL_EVENT_CLI_MRPC_COMP_ASYNC
,
969 cli_mrpc_comp_async_hdr
),
970 EV_GLB(SWITCHTEC_IOCTL_EVENT_GPIO_INT
, gpio_interrupt_hdr
),
971 EV_GLB(SWITCHTEC_IOCTL_EVENT_GFMS
, gfms_event_hdr
),
972 EV_PAR(SWITCHTEC_IOCTL_EVENT_PART_RESET
, part_reset_hdr
),
973 EV_PAR(SWITCHTEC_IOCTL_EVENT_MRPC_COMP
, mrpc_comp_hdr
),
974 EV_PAR(SWITCHTEC_IOCTL_EVENT_MRPC_COMP_ASYNC
, mrpc_comp_async_hdr
),
975 EV_PAR(SWITCHTEC_IOCTL_EVENT_DYN_PART_BIND_COMP
, dyn_binding_hdr
),
976 EV_PAR(SWITCHTEC_IOCTL_EVENT_INTERCOMM_REQ_NOTIFY
,
977 intercomm_notify_hdr
),
978 EV_PFF(SWITCHTEC_IOCTL_EVENT_AER_IN_P2P
, aer_in_p2p_hdr
),
979 EV_PFF(SWITCHTEC_IOCTL_EVENT_AER_IN_VEP
, aer_in_vep_hdr
),
980 EV_PFF(SWITCHTEC_IOCTL_EVENT_DPC
, dpc_hdr
),
981 EV_PFF(SWITCHTEC_IOCTL_EVENT_CTS
, cts_hdr
),
982 EV_PFF(SWITCHTEC_IOCTL_EVENT_UEC
, uec_hdr
),
983 EV_PFF(SWITCHTEC_IOCTL_EVENT_HOTPLUG
, hotplug_hdr
),
984 EV_PFF(SWITCHTEC_IOCTL_EVENT_IER
, ier_hdr
),
985 EV_PFF(SWITCHTEC_IOCTL_EVENT_THRESH
, threshold_hdr
),
986 EV_PFF(SWITCHTEC_IOCTL_EVENT_POWER_MGMT
, power_mgmt_hdr
),
987 EV_PFF(SWITCHTEC_IOCTL_EVENT_TLP_THROTTLING
, tlp_throttling_hdr
),
988 EV_PFF(SWITCHTEC_IOCTL_EVENT_FORCE_SPEED
, force_speed_hdr
),
989 EV_PFF(SWITCHTEC_IOCTL_EVENT_CREDIT_TIMEOUT
, credit_timeout_hdr
),
990 EV_PFF(SWITCHTEC_IOCTL_EVENT_LINK_STATE
, link_state_hdr
),
993 static u32 __iomem
*event_hdr_addr(struct switchtec_dev
*stdev
,
994 int event_id
, int index
)
998 if (event_id
< 0 || event_id
>= SWITCHTEC_IOCTL_MAX_EVENTS
)
999 return (u32 __iomem
*)ERR_PTR(-EINVAL
);
1001 off
= event_regs
[event_id
].offset
;
1003 if (event_regs
[event_id
].map_reg
== part_ev_reg
) {
1004 if (index
== SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX
)
1005 index
= stdev
->partition
;
1006 else if (index
< 0 || index
>= stdev
->partition_count
)
1007 return (u32 __iomem
*)ERR_PTR(-EINVAL
);
1008 } else if (event_regs
[event_id
].map_reg
== pff_ev_reg
) {
1009 if (index
< 0 || index
>= stdev
->pff_csr_count
)
1010 return (u32 __iomem
*)ERR_PTR(-EINVAL
);
1013 return event_regs
[event_id
].map_reg(stdev
, off
, index
);
1016 static int event_ctl(struct switchtec_dev
*stdev
,
1017 struct switchtec_ioctl_event_ctl
*ctl
)
1023 reg
= event_hdr_addr(stdev
, ctl
->event_id
, ctl
->index
);
1025 return PTR_ERR(reg
);
1027 hdr
= ioread32(reg
);
1028 if (hdr
& SWITCHTEC_EVENT_NOT_SUPP
)
1031 for (i
= 0; i
< ARRAY_SIZE(ctl
->data
); i
++)
1032 ctl
->data
[i
] = ioread32(®
[i
+ 1]);
1034 ctl
->occurred
= hdr
& SWITCHTEC_EVENT_OCCURRED
;
1035 ctl
->count
= (hdr
>> 5) & 0xFF;
1037 if (!(ctl
->flags
& SWITCHTEC_IOCTL_EVENT_FLAG_CLEAR
))
1038 hdr
&= ~SWITCHTEC_EVENT_CLEAR
;
1039 if (ctl
->flags
& SWITCHTEC_IOCTL_EVENT_FLAG_EN_POLL
)
1040 hdr
|= SWITCHTEC_EVENT_EN_IRQ
;
1041 if (ctl
->flags
& SWITCHTEC_IOCTL_EVENT_FLAG_DIS_POLL
)
1042 hdr
&= ~SWITCHTEC_EVENT_EN_IRQ
;
1043 if (ctl
->flags
& SWITCHTEC_IOCTL_EVENT_FLAG_EN_LOG
)
1044 hdr
|= SWITCHTEC_EVENT_EN_LOG
;
1045 if (ctl
->flags
& SWITCHTEC_IOCTL_EVENT_FLAG_DIS_LOG
)
1046 hdr
&= ~SWITCHTEC_EVENT_EN_LOG
;
1047 if (ctl
->flags
& SWITCHTEC_IOCTL_EVENT_FLAG_EN_CLI
)
1048 hdr
|= SWITCHTEC_EVENT_EN_CLI
;
1049 if (ctl
->flags
& SWITCHTEC_IOCTL_EVENT_FLAG_DIS_CLI
)
1050 hdr
&= ~SWITCHTEC_EVENT_EN_CLI
;
1051 if (ctl
->flags
& SWITCHTEC_IOCTL_EVENT_FLAG_EN_FATAL
)
1052 hdr
|= SWITCHTEC_EVENT_FATAL
;
1053 if (ctl
->flags
& SWITCHTEC_IOCTL_EVENT_FLAG_DIS_FATAL
)
1054 hdr
&= ~SWITCHTEC_EVENT_FATAL
;
1057 iowrite32(hdr
, reg
);
1060 if (hdr
& SWITCHTEC_EVENT_EN_IRQ
)
1061 ctl
->flags
|= SWITCHTEC_IOCTL_EVENT_FLAG_EN_POLL
;
1062 if (hdr
& SWITCHTEC_EVENT_EN_LOG
)
1063 ctl
->flags
|= SWITCHTEC_IOCTL_EVENT_FLAG_EN_LOG
;
1064 if (hdr
& SWITCHTEC_EVENT_EN_CLI
)
1065 ctl
->flags
|= SWITCHTEC_IOCTL_EVENT_FLAG_EN_CLI
;
1066 if (hdr
& SWITCHTEC_EVENT_FATAL
)
1067 ctl
->flags
|= SWITCHTEC_IOCTL_EVENT_FLAG_EN_FATAL
;
1072 static int ioctl_event_ctl(struct switchtec_dev
*stdev
,
1073 struct switchtec_ioctl_event_ctl __user
*uctl
)
1077 unsigned int event_flags
;
1078 struct switchtec_ioctl_event_ctl ctl
;
1080 if (copy_from_user(&ctl
, uctl
, sizeof(ctl
)))
1083 if (ctl
.event_id
>= SWITCHTEC_IOCTL_MAX_EVENTS
)
1086 if (ctl
.flags
& SWITCHTEC_IOCTL_EVENT_FLAG_UNUSED
)
1089 if (ctl
.index
== SWITCHTEC_IOCTL_EVENT_IDX_ALL
) {
1090 if (event_regs
[ctl
.event_id
].map_reg
== global_ev_reg
)
1092 else if (event_regs
[ctl
.event_id
].map_reg
== part_ev_reg
)
1093 nr_idxs
= stdev
->partition_count
;
1094 else if (event_regs
[ctl
.event_id
].map_reg
== pff_ev_reg
)
1095 nr_idxs
= stdev
->pff_csr_count
;
1099 event_flags
= ctl
.flags
;
1100 for (ctl
.index
= 0; ctl
.index
< nr_idxs
; ctl
.index
++) {
1101 ctl
.flags
= event_flags
;
1102 ret
= event_ctl(stdev
, &ctl
);
1103 if (ret
< 0 && ret
!= -EOPNOTSUPP
)
1107 ret
= event_ctl(stdev
, &ctl
);
1112 if (copy_to_user(uctl
, &ctl
, sizeof(ctl
)))
1118 static int ioctl_pff_to_port(struct switchtec_dev
*stdev
,
1119 struct switchtec_ioctl_pff_port __user
*up
)
1123 struct part_cfg_regs __iomem
*pcfg
;
1124 struct switchtec_ioctl_pff_port p
;
1126 if (copy_from_user(&p
, up
, sizeof(p
)))
1130 for (part
= 0; part
< stdev
->partition_count
; part
++) {
1131 pcfg
= &stdev
->mmio_part_cfg_all
[part
];
1134 reg
= ioread32(&pcfg
->usp_pff_inst_id
);
1140 reg
= ioread32(&pcfg
->vep_pff_inst_id
) & 0xFF;
1142 p
.port
= SWITCHTEC_IOCTL_PFF_VEP
;
1146 for (i
= 0; i
< ARRAY_SIZE(pcfg
->dsp_pff_inst_id
); i
++) {
1147 reg
= ioread32(&pcfg
->dsp_pff_inst_id
[i
]);
1159 if (copy_to_user(up
, &p
, sizeof(p
)))
1165 static int ioctl_port_to_pff(struct switchtec_dev
*stdev
,
1166 struct switchtec_ioctl_pff_port __user
*up
)
1168 struct switchtec_ioctl_pff_port p
;
1169 struct part_cfg_regs __iomem
*pcfg
;
1171 if (copy_from_user(&p
, up
, sizeof(p
)))
1174 if (p
.partition
== SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX
)
1175 pcfg
= stdev
->mmio_part_cfg
;
1176 else if (p
.partition
< stdev
->partition_count
)
1177 pcfg
= &stdev
->mmio_part_cfg_all
[p
.partition
];
1183 p
.pff
= ioread32(&pcfg
->usp_pff_inst_id
);
1185 case SWITCHTEC_IOCTL_PFF_VEP
:
1186 p
.pff
= ioread32(&pcfg
->vep_pff_inst_id
) & 0xFF;
1189 if (p
.port
> ARRAY_SIZE(pcfg
->dsp_pff_inst_id
))
1191 p
.port
= array_index_nospec(p
.port
,
1192 ARRAY_SIZE(pcfg
->dsp_pff_inst_id
) + 1);
1193 p
.pff
= ioread32(&pcfg
->dsp_pff_inst_id
[p
.port
- 1]);
1197 if (copy_to_user(up
, &p
, sizeof(p
)))
1203 static long switchtec_dev_ioctl(struct file
*filp
, unsigned int cmd
,
1206 struct switchtec_user
*stuser
= filp
->private_data
;
1207 struct switchtec_dev
*stdev
= stuser
->stdev
;
1209 void __user
*argp
= (void __user
*)arg
;
1211 rc
= lock_mutex_and_test_alive(stdev
);
1216 case SWITCHTEC_IOCTL_FLASH_INFO
:
1217 rc
= ioctl_flash_info(stdev
, argp
);
1219 case SWITCHTEC_IOCTL_FLASH_PART_INFO
:
1220 rc
= ioctl_flash_part_info(stdev
, argp
);
1222 case SWITCHTEC_IOCTL_EVENT_SUMMARY_LEGACY
:
1223 rc
= ioctl_event_summary(stdev
, stuser
, argp
,
1224 sizeof(struct switchtec_ioctl_event_summary_legacy
));
1226 case SWITCHTEC_IOCTL_EVENT_CTL
:
1227 rc
= ioctl_event_ctl(stdev
, argp
);
1229 case SWITCHTEC_IOCTL_PFF_TO_PORT
:
1230 rc
= ioctl_pff_to_port(stdev
, argp
);
1232 case SWITCHTEC_IOCTL_PORT_TO_PFF
:
1233 rc
= ioctl_port_to_pff(stdev
, argp
);
1235 case SWITCHTEC_IOCTL_EVENT_SUMMARY
:
1236 rc
= ioctl_event_summary(stdev
, stuser
, argp
,
1237 sizeof(struct switchtec_ioctl_event_summary
));
1244 mutex_unlock(&stdev
->mrpc_mutex
);
1248 static const struct file_operations switchtec_fops
= {
1249 .owner
= THIS_MODULE
,
1250 .open
= switchtec_dev_open
,
1251 .release
= switchtec_dev_release
,
1252 .write
= switchtec_dev_write
,
1253 .read
= switchtec_dev_read
,
1254 .poll
= switchtec_dev_poll
,
1255 .unlocked_ioctl
= switchtec_dev_ioctl
,
1256 .compat_ioctl
= compat_ptr_ioctl
,
1259 static void link_event_work(struct work_struct
*work
)
1261 struct switchtec_dev
*stdev
;
1263 stdev
= container_of(work
, struct switchtec_dev
, link_event_work
);
1265 if (stdev
->link_notifier
)
1266 stdev
->link_notifier(stdev
);
1269 static void check_link_state_events(struct switchtec_dev
*stdev
)
1276 for (idx
= 0; idx
< stdev
->pff_csr_count
; idx
++) {
1277 reg
= ioread32(&stdev
->mmio_pff_csr
[idx
].link_state_hdr
);
1278 dev_dbg(&stdev
->dev
, "link_state: %d->%08x\n", idx
, reg
);
1279 count
= (reg
>> 5) & 0xFF;
1281 if (count
!= stdev
->link_event_count
[idx
]) {
1283 stdev
->link_event_count
[idx
] = count
;
1288 schedule_work(&stdev
->link_event_work
);
1291 static void enable_link_state_events(struct switchtec_dev
*stdev
)
1295 for (idx
= 0; idx
< stdev
->pff_csr_count
; idx
++) {
1296 iowrite32(SWITCHTEC_EVENT_CLEAR
|
1297 SWITCHTEC_EVENT_EN_IRQ
,
1298 &stdev
->mmio_pff_csr
[idx
].link_state_hdr
);
1302 static void enable_dma_mrpc(struct switchtec_dev
*stdev
)
1304 writeq(stdev
->dma_mrpc_dma_addr
, &stdev
->mmio_mrpc
->dma_addr
);
1305 flush_wc_buf(stdev
);
1306 iowrite32(SWITCHTEC_DMA_MRPC_EN
, &stdev
->mmio_mrpc
->dma_en
);
1309 static void stdev_release(struct device
*dev
)
1311 struct switchtec_dev
*stdev
= to_stdev(dev
);
1316 static void stdev_kill(struct switchtec_dev
*stdev
)
1318 struct switchtec_user
*stuser
, *tmpuser
;
1320 pci_clear_master(stdev
->pdev
);
1322 cancel_delayed_work_sync(&stdev
->mrpc_timeout
);
1324 /* Mark the hardware as unavailable and complete all completions */
1325 mutex_lock(&stdev
->mrpc_mutex
);
1326 stdev
->alive
= false;
1328 /* Wake up and kill any users waiting on an MRPC request */
1329 list_for_each_entry_safe(stuser
, tmpuser
, &stdev
->mrpc_queue
, list
) {
1330 stuser
->cmd_done
= true;
1331 wake_up_interruptible(&stuser
->cmd_comp
);
1332 list_del_init(&stuser
->list
);
1336 mutex_unlock(&stdev
->mrpc_mutex
);
1338 /* Wake up any users waiting on event_wq */
1339 wake_up_interruptible(&stdev
->event_wq
);
1342 static struct switchtec_dev
*stdev_create(struct pci_dev
*pdev
)
1344 struct switchtec_dev
*stdev
;
1350 stdev
= kzalloc_node(sizeof(*stdev
), GFP_KERNEL
,
1351 dev_to_node(&pdev
->dev
));
1353 return ERR_PTR(-ENOMEM
);
1355 stdev
->alive
= true;
1356 stdev
->pdev
= pci_dev_get(pdev
);
1357 INIT_LIST_HEAD(&stdev
->mrpc_queue
);
1358 mutex_init(&stdev
->mrpc_mutex
);
1359 stdev
->mrpc_busy
= 0;
1360 INIT_WORK(&stdev
->mrpc_work
, mrpc_event_work
);
1361 INIT_DELAYED_WORK(&stdev
->mrpc_timeout
, mrpc_timeout_work
);
1362 INIT_WORK(&stdev
->link_event_work
, link_event_work
);
1363 init_waitqueue_head(&stdev
->event_wq
);
1364 atomic_set(&stdev
->event_cnt
, 0);
1367 device_initialize(dev
);
1368 dev
->class = &switchtec_class
;
1369 dev
->parent
= &pdev
->dev
;
1370 dev
->groups
= switchtec_device_groups
;
1371 dev
->release
= stdev_release
;
1373 minor
= ida_alloc(&switchtec_minor_ida
, GFP_KERNEL
);
1379 dev
->devt
= MKDEV(MAJOR(switchtec_devt
), minor
);
1380 dev_set_name(dev
, "switchtec%d", minor
);
1382 cdev
= &stdev
->cdev
;
1383 cdev_init(cdev
, &switchtec_fops
);
1384 cdev
->owner
= THIS_MODULE
;
1389 pci_dev_put(stdev
->pdev
);
1390 put_device(&stdev
->dev
);
1394 static int mask_event(struct switchtec_dev
*stdev
, int eid
, int idx
)
1396 size_t off
= event_regs
[eid
].offset
;
1397 u32 __iomem
*hdr_reg
;
1400 hdr_reg
= event_regs
[eid
].map_reg(stdev
, off
, idx
);
1401 hdr
= ioread32(hdr_reg
);
1403 if (hdr
& SWITCHTEC_EVENT_NOT_SUPP
)
1406 if (!(hdr
& SWITCHTEC_EVENT_OCCURRED
&& hdr
& SWITCHTEC_EVENT_EN_IRQ
))
1409 dev_dbg(&stdev
->dev
, "%s: %d %d %x\n", __func__
, eid
, idx
, hdr
);
1410 hdr
&= ~(SWITCHTEC_EVENT_EN_IRQ
| SWITCHTEC_EVENT_OCCURRED
);
1411 iowrite32(hdr
, hdr_reg
);
1416 static int mask_all_events(struct switchtec_dev
*stdev
, int eid
)
1421 if (event_regs
[eid
].map_reg
== part_ev_reg
) {
1422 for (idx
= 0; idx
< stdev
->partition_count
; idx
++)
1423 count
+= mask_event(stdev
, eid
, idx
);
1424 } else if (event_regs
[eid
].map_reg
== pff_ev_reg
) {
1425 for (idx
= 0; idx
< stdev
->pff_csr_count
; idx
++) {
1426 if (!stdev
->pff_local
[idx
])
1429 count
+= mask_event(stdev
, eid
, idx
);
1432 count
+= mask_event(stdev
, eid
, 0);
1438 static irqreturn_t
switchtec_event_isr(int irq
, void *dev
)
1440 struct switchtec_dev
*stdev
= dev
;
1442 irqreturn_t ret
= IRQ_NONE
;
1443 int eid
, event_count
= 0;
1445 reg
= ioread32(&stdev
->mmio_part_cfg
->mrpc_comp_hdr
);
1446 if (reg
& SWITCHTEC_EVENT_OCCURRED
) {
1447 dev_dbg(&stdev
->dev
, "%s: mrpc comp\n", __func__
);
1449 schedule_work(&stdev
->mrpc_work
);
1450 iowrite32(reg
, &stdev
->mmio_part_cfg
->mrpc_comp_hdr
);
1453 check_link_state_events(stdev
);
1455 for (eid
= 0; eid
< SWITCHTEC_IOCTL_MAX_EVENTS
; eid
++) {
1456 if (eid
== SWITCHTEC_IOCTL_EVENT_LINK_STATE
||
1457 eid
== SWITCHTEC_IOCTL_EVENT_MRPC_COMP
)
1460 event_count
+= mask_all_events(stdev
, eid
);
1464 atomic_inc(&stdev
->event_cnt
);
1465 wake_up_interruptible(&stdev
->event_wq
);
1466 dev_dbg(&stdev
->dev
, "%s: %d events\n", __func__
,
1475 static irqreturn_t
switchtec_dma_mrpc_isr(int irq
, void *dev
)
1477 struct switchtec_dev
*stdev
= dev
;
1479 iowrite32(SWITCHTEC_EVENT_CLEAR
|
1480 SWITCHTEC_EVENT_EN_IRQ
,
1481 &stdev
->mmio_part_cfg
->mrpc_comp_hdr
);
1482 schedule_work(&stdev
->mrpc_work
);
1487 static int switchtec_init_isr(struct switchtec_dev
*stdev
)
1497 nvecs
= pci_alloc_irq_vectors(stdev
->pdev
, 1, nirqs
,
1498 PCI_IRQ_MSIX
| PCI_IRQ_MSI
|
1503 event_irq
= ioread16(&stdev
->mmio_part_cfg
->vep_vector_number
);
1504 if (event_irq
< 0 || event_irq
>= nvecs
)
1507 event_irq
= pci_irq_vector(stdev
->pdev
, event_irq
);
1511 rc
= devm_request_irq(&stdev
->pdev
->dev
, event_irq
,
1512 switchtec_event_isr
, 0,
1513 KBUILD_MODNAME
, stdev
);
1518 if (!stdev
->dma_mrpc
)
1521 dma_mrpc_irq
= ioread32(&stdev
->mmio_mrpc
->dma_vector
);
1522 if (dma_mrpc_irq
< 0 || dma_mrpc_irq
>= nvecs
)
1525 dma_mrpc_irq
= pci_irq_vector(stdev
->pdev
, dma_mrpc_irq
);
1526 if (dma_mrpc_irq
< 0)
1527 return dma_mrpc_irq
;
1529 rc
= devm_request_irq(&stdev
->pdev
->dev
, dma_mrpc_irq
,
1530 switchtec_dma_mrpc_isr
, 0,
1531 KBUILD_MODNAME
, stdev
);
1536 static void init_pff(struct switchtec_dev
*stdev
)
1540 struct part_cfg_regs __iomem
*pcfg
= stdev
->mmio_part_cfg
;
1542 for (i
= 0; i
< SWITCHTEC_MAX_PFF_CSR
; i
++) {
1543 reg
= ioread16(&stdev
->mmio_pff_csr
[i
].vendor_id
);
1544 if (reg
!= PCI_VENDOR_ID_MICROSEMI
)
1548 stdev
->pff_csr_count
= i
;
1550 reg
= ioread32(&pcfg
->usp_pff_inst_id
);
1551 if (reg
< stdev
->pff_csr_count
)
1552 stdev
->pff_local
[reg
] = 1;
1554 reg
= ioread32(&pcfg
->vep_pff_inst_id
) & 0xFF;
1555 if (reg
< stdev
->pff_csr_count
)
1556 stdev
->pff_local
[reg
] = 1;
1558 for (i
= 0; i
< ARRAY_SIZE(pcfg
->dsp_pff_inst_id
); i
++) {
1559 reg
= ioread32(&pcfg
->dsp_pff_inst_id
[i
]);
1560 if (reg
< stdev
->pff_csr_count
)
1561 stdev
->pff_local
[reg
] = 1;
1565 static int switchtec_init_pci(struct switchtec_dev
*stdev
,
1566 struct pci_dev
*pdev
)
1570 unsigned long res_start
, res_len
;
1571 u32 __iomem
*part_id
;
1573 rc
= pcim_enable_device(pdev
);
1577 rc
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
1581 pci_set_master(pdev
);
1583 res_start
= pci_resource_start(pdev
, 0);
1584 res_len
= pci_resource_len(pdev
, 0);
1586 if (!devm_request_mem_region(&pdev
->dev
, res_start
,
1587 res_len
, KBUILD_MODNAME
))
1590 stdev
->mmio_mrpc
= devm_ioremap_wc(&pdev
->dev
, res_start
,
1591 SWITCHTEC_GAS_TOP_CFG_OFFSET
);
1592 if (!stdev
->mmio_mrpc
)
1595 map
= devm_ioremap(&pdev
->dev
,
1596 res_start
+ SWITCHTEC_GAS_TOP_CFG_OFFSET
,
1597 res_len
- SWITCHTEC_GAS_TOP_CFG_OFFSET
);
1601 stdev
->mmio
= map
- SWITCHTEC_GAS_TOP_CFG_OFFSET
;
1602 stdev
->mmio_sw_event
= stdev
->mmio
+ SWITCHTEC_GAS_SW_EVENT_OFFSET
;
1603 stdev
->mmio_sys_info
= stdev
->mmio
+ SWITCHTEC_GAS_SYS_INFO_OFFSET
;
1604 stdev
->mmio_flash_info
= stdev
->mmio
+ SWITCHTEC_GAS_FLASH_INFO_OFFSET
;
1605 stdev
->mmio_ntb
= stdev
->mmio
+ SWITCHTEC_GAS_NTB_OFFSET
;
1607 if (stdev
->gen
== SWITCHTEC_GEN3
)
1608 part_id
= &stdev
->mmio_sys_info
->gen3
.partition_id
;
1609 else if (stdev
->gen
>= SWITCHTEC_GEN4
)
1610 part_id
= &stdev
->mmio_sys_info
->gen4
.partition_id
;
1614 stdev
->partition
= ioread8(part_id
);
1615 stdev
->partition_count
= ioread8(&stdev
->mmio_ntb
->partition_count
);
1616 stdev
->mmio_part_cfg_all
= stdev
->mmio
+ SWITCHTEC_GAS_PART_CFG_OFFSET
;
1617 stdev
->mmio_part_cfg
= &stdev
->mmio_part_cfg_all
[stdev
->partition
];
1618 stdev
->mmio_pff_csr
= stdev
->mmio
+ SWITCHTEC_GAS_PFF_CSR_OFFSET
;
1620 if (stdev
->partition_count
< 1)
1621 stdev
->partition_count
= 1;
1625 pci_set_drvdata(pdev
, stdev
);
1630 if (ioread32(&stdev
->mmio_mrpc
->dma_ver
) == 0)
1633 stdev
->dma_mrpc
= dma_alloc_coherent(&stdev
->pdev
->dev
,
1634 sizeof(*stdev
->dma_mrpc
),
1635 &stdev
->dma_mrpc_dma_addr
,
1637 if (stdev
->dma_mrpc
== NULL
)
1643 static void switchtec_exit_pci(struct switchtec_dev
*stdev
)
1645 if (stdev
->dma_mrpc
) {
1646 iowrite32(0, &stdev
->mmio_mrpc
->dma_en
);
1647 flush_wc_buf(stdev
);
1648 writeq(0, &stdev
->mmio_mrpc
->dma_addr
);
1649 dma_free_coherent(&stdev
->pdev
->dev
, sizeof(*stdev
->dma_mrpc
),
1650 stdev
->dma_mrpc
, stdev
->dma_mrpc_dma_addr
);
1651 stdev
->dma_mrpc
= NULL
;
1655 static int switchtec_pci_probe(struct pci_dev
*pdev
,
1656 const struct pci_device_id
*id
)
1658 struct switchtec_dev
*stdev
;
1661 if (pdev
->class == (PCI_CLASS_BRIDGE_OTHER
<< 8))
1662 request_module_nowait("ntb_hw_switchtec");
1664 stdev
= stdev_create(pdev
);
1666 return PTR_ERR(stdev
);
1668 stdev
->gen
= id
->driver_data
;
1670 rc
= switchtec_init_pci(stdev
, pdev
);
1674 rc
= switchtec_init_isr(stdev
);
1676 dev_err(&stdev
->dev
, "failed to init isr.\n");
1680 iowrite32(SWITCHTEC_EVENT_CLEAR
|
1681 SWITCHTEC_EVENT_EN_IRQ
,
1682 &stdev
->mmio_part_cfg
->mrpc_comp_hdr
);
1683 enable_link_state_events(stdev
);
1685 if (stdev
->dma_mrpc
)
1686 enable_dma_mrpc(stdev
);
1688 rc
= cdev_device_add(&stdev
->cdev
, &stdev
->dev
);
1692 dev_info(&stdev
->dev
, "Management device registered.\n");
1699 switchtec_exit_pci(stdev
);
1701 ida_free(&switchtec_minor_ida
, MINOR(stdev
->dev
.devt
));
1702 put_device(&stdev
->dev
);
1706 static void switchtec_pci_remove(struct pci_dev
*pdev
)
1708 struct switchtec_dev
*stdev
= pci_get_drvdata(pdev
);
1710 pci_set_drvdata(pdev
, NULL
);
1712 cdev_device_del(&stdev
->cdev
, &stdev
->dev
);
1713 ida_free(&switchtec_minor_ida
, MINOR(stdev
->dev
.devt
));
1714 dev_info(&stdev
->dev
, "unregistered.\n");
1716 switchtec_exit_pci(stdev
);
1717 pci_dev_put(stdev
->pdev
);
1719 put_device(&stdev
->dev
);
1722 #define SWITCHTEC_PCI_DEVICE(device_id, gen) \
1724 .vendor = PCI_VENDOR_ID_MICROSEMI, \
1725 .device = device_id, \
1726 .subvendor = PCI_ANY_ID, \
1727 .subdevice = PCI_ANY_ID, \
1728 .class = (PCI_CLASS_MEMORY_OTHER << 8), \
1729 .class_mask = 0xFFFFFFFF, \
1730 .driver_data = gen, \
1733 .vendor = PCI_VENDOR_ID_MICROSEMI, \
1734 .device = device_id, \
1735 .subvendor = PCI_ANY_ID, \
1736 .subdevice = PCI_ANY_ID, \
1737 .class = (PCI_CLASS_BRIDGE_OTHER << 8), \
1738 .class_mask = 0xFFFFFFFF, \
1739 .driver_data = gen, \
1742 static const struct pci_device_id switchtec_pci_tbl
[] = {
1743 SWITCHTEC_PCI_DEVICE(0x8531, SWITCHTEC_GEN3
), /* PFX 24xG3 */
1744 SWITCHTEC_PCI_DEVICE(0x8532, SWITCHTEC_GEN3
), /* PFX 32xG3 */
1745 SWITCHTEC_PCI_DEVICE(0x8533, SWITCHTEC_GEN3
), /* PFX 48xG3 */
1746 SWITCHTEC_PCI_DEVICE(0x8534, SWITCHTEC_GEN3
), /* PFX 64xG3 */
1747 SWITCHTEC_PCI_DEVICE(0x8535, SWITCHTEC_GEN3
), /* PFX 80xG3 */
1748 SWITCHTEC_PCI_DEVICE(0x8536, SWITCHTEC_GEN3
), /* PFX 96xG3 */
1749 SWITCHTEC_PCI_DEVICE(0x8541, SWITCHTEC_GEN3
), /* PSX 24xG3 */
1750 SWITCHTEC_PCI_DEVICE(0x8542, SWITCHTEC_GEN3
), /* PSX 32xG3 */
1751 SWITCHTEC_PCI_DEVICE(0x8543, SWITCHTEC_GEN3
), /* PSX 48xG3 */
1752 SWITCHTEC_PCI_DEVICE(0x8544, SWITCHTEC_GEN3
), /* PSX 64xG3 */
1753 SWITCHTEC_PCI_DEVICE(0x8545, SWITCHTEC_GEN3
), /* PSX 80xG3 */
1754 SWITCHTEC_PCI_DEVICE(0x8546, SWITCHTEC_GEN3
), /* PSX 96xG3 */
1755 SWITCHTEC_PCI_DEVICE(0x8551, SWITCHTEC_GEN3
), /* PAX 24XG3 */
1756 SWITCHTEC_PCI_DEVICE(0x8552, SWITCHTEC_GEN3
), /* PAX 32XG3 */
1757 SWITCHTEC_PCI_DEVICE(0x8553, SWITCHTEC_GEN3
), /* PAX 48XG3 */
1758 SWITCHTEC_PCI_DEVICE(0x8554, SWITCHTEC_GEN3
), /* PAX 64XG3 */
1759 SWITCHTEC_PCI_DEVICE(0x8555, SWITCHTEC_GEN3
), /* PAX 80XG3 */
1760 SWITCHTEC_PCI_DEVICE(0x8556, SWITCHTEC_GEN3
), /* PAX 96XG3 */
1761 SWITCHTEC_PCI_DEVICE(0x8561, SWITCHTEC_GEN3
), /* PFXL 24XG3 */
1762 SWITCHTEC_PCI_DEVICE(0x8562, SWITCHTEC_GEN3
), /* PFXL 32XG3 */
1763 SWITCHTEC_PCI_DEVICE(0x8563, SWITCHTEC_GEN3
), /* PFXL 48XG3 */
1764 SWITCHTEC_PCI_DEVICE(0x8564, SWITCHTEC_GEN3
), /* PFXL 64XG3 */
1765 SWITCHTEC_PCI_DEVICE(0x8565, SWITCHTEC_GEN3
), /* PFXL 80XG3 */
1766 SWITCHTEC_PCI_DEVICE(0x8566, SWITCHTEC_GEN3
), /* PFXL 96XG3 */
1767 SWITCHTEC_PCI_DEVICE(0x8571, SWITCHTEC_GEN3
), /* PFXI 24XG3 */
1768 SWITCHTEC_PCI_DEVICE(0x8572, SWITCHTEC_GEN3
), /* PFXI 32XG3 */
1769 SWITCHTEC_PCI_DEVICE(0x8573, SWITCHTEC_GEN3
), /* PFXI 48XG3 */
1770 SWITCHTEC_PCI_DEVICE(0x8574, SWITCHTEC_GEN3
), /* PFXI 64XG3 */
1771 SWITCHTEC_PCI_DEVICE(0x8575, SWITCHTEC_GEN3
), /* PFXI 80XG3 */
1772 SWITCHTEC_PCI_DEVICE(0x8576, SWITCHTEC_GEN3
), /* PFXI 96XG3 */
1773 SWITCHTEC_PCI_DEVICE(0x4000, SWITCHTEC_GEN4
), /* PFX 100XG4 */
1774 SWITCHTEC_PCI_DEVICE(0x4084, SWITCHTEC_GEN4
), /* PFX 84XG4 */
1775 SWITCHTEC_PCI_DEVICE(0x4068, SWITCHTEC_GEN4
), /* PFX 68XG4 */
1776 SWITCHTEC_PCI_DEVICE(0x4052, SWITCHTEC_GEN4
), /* PFX 52XG4 */
1777 SWITCHTEC_PCI_DEVICE(0x4036, SWITCHTEC_GEN4
), /* PFX 36XG4 */
1778 SWITCHTEC_PCI_DEVICE(0x4028, SWITCHTEC_GEN4
), /* PFX 28XG4 */
1779 SWITCHTEC_PCI_DEVICE(0x4100, SWITCHTEC_GEN4
), /* PSX 100XG4 */
1780 SWITCHTEC_PCI_DEVICE(0x4184, SWITCHTEC_GEN4
), /* PSX 84XG4 */
1781 SWITCHTEC_PCI_DEVICE(0x4168, SWITCHTEC_GEN4
), /* PSX 68XG4 */
1782 SWITCHTEC_PCI_DEVICE(0x4152, SWITCHTEC_GEN4
), /* PSX 52XG4 */
1783 SWITCHTEC_PCI_DEVICE(0x4136, SWITCHTEC_GEN4
), /* PSX 36XG4 */
1784 SWITCHTEC_PCI_DEVICE(0x4128, SWITCHTEC_GEN4
), /* PSX 28XG4 */
1785 SWITCHTEC_PCI_DEVICE(0x4200, SWITCHTEC_GEN4
), /* PAX 100XG4 */
1786 SWITCHTEC_PCI_DEVICE(0x4284, SWITCHTEC_GEN4
), /* PAX 84XG4 */
1787 SWITCHTEC_PCI_DEVICE(0x4268, SWITCHTEC_GEN4
), /* PAX 68XG4 */
1788 SWITCHTEC_PCI_DEVICE(0x4252, SWITCHTEC_GEN4
), /* PAX 52XG4 */
1789 SWITCHTEC_PCI_DEVICE(0x4236, SWITCHTEC_GEN4
), /* PAX 36XG4 */
1790 SWITCHTEC_PCI_DEVICE(0x4228, SWITCHTEC_GEN4
), /* PAX 28XG4 */
1791 SWITCHTEC_PCI_DEVICE(0x4352, SWITCHTEC_GEN4
), /* PFXA 52XG4 */
1792 SWITCHTEC_PCI_DEVICE(0x4336, SWITCHTEC_GEN4
), /* PFXA 36XG4 */
1793 SWITCHTEC_PCI_DEVICE(0x4328, SWITCHTEC_GEN4
), /* PFXA 28XG4 */
1794 SWITCHTEC_PCI_DEVICE(0x4452, SWITCHTEC_GEN4
), /* PSXA 52XG4 */
1795 SWITCHTEC_PCI_DEVICE(0x4436, SWITCHTEC_GEN4
), /* PSXA 36XG4 */
1796 SWITCHTEC_PCI_DEVICE(0x4428, SWITCHTEC_GEN4
), /* PSXA 28XG4 */
1797 SWITCHTEC_PCI_DEVICE(0x4552, SWITCHTEC_GEN4
), /* PAXA 52XG4 */
1798 SWITCHTEC_PCI_DEVICE(0x4536, SWITCHTEC_GEN4
), /* PAXA 36XG4 */
1799 SWITCHTEC_PCI_DEVICE(0x4528, SWITCHTEC_GEN4
), /* PAXA 28XG4 */
1800 SWITCHTEC_PCI_DEVICE(0x5000, SWITCHTEC_GEN5
), /* PFX 100XG5 */
1801 SWITCHTEC_PCI_DEVICE(0x5084, SWITCHTEC_GEN5
), /* PFX 84XG5 */
1802 SWITCHTEC_PCI_DEVICE(0x5068, SWITCHTEC_GEN5
), /* PFX 68XG5 */
1803 SWITCHTEC_PCI_DEVICE(0x5052, SWITCHTEC_GEN5
), /* PFX 52XG5 */
1804 SWITCHTEC_PCI_DEVICE(0x5036, SWITCHTEC_GEN5
), /* PFX 36XG5 */
1805 SWITCHTEC_PCI_DEVICE(0x5028, SWITCHTEC_GEN5
), /* PFX 28XG5 */
1806 SWITCHTEC_PCI_DEVICE(0x5100, SWITCHTEC_GEN5
), /* PSX 100XG5 */
1807 SWITCHTEC_PCI_DEVICE(0x5184, SWITCHTEC_GEN5
), /* PSX 84XG5 */
1808 SWITCHTEC_PCI_DEVICE(0x5168, SWITCHTEC_GEN5
), /* PSX 68XG5 */
1809 SWITCHTEC_PCI_DEVICE(0x5152, SWITCHTEC_GEN5
), /* PSX 52XG5 */
1810 SWITCHTEC_PCI_DEVICE(0x5136, SWITCHTEC_GEN5
), /* PSX 36XG5 */
1811 SWITCHTEC_PCI_DEVICE(0x5128, SWITCHTEC_GEN5
), /* PSX 28XG5 */
1812 SWITCHTEC_PCI_DEVICE(0x5200, SWITCHTEC_GEN5
), /* PAX 100XG5 */
1813 SWITCHTEC_PCI_DEVICE(0x5284, SWITCHTEC_GEN5
), /* PAX 84XG5 */
1814 SWITCHTEC_PCI_DEVICE(0x5268, SWITCHTEC_GEN5
), /* PAX 68XG5 */
1815 SWITCHTEC_PCI_DEVICE(0x5252, SWITCHTEC_GEN5
), /* PAX 52XG5 */
1816 SWITCHTEC_PCI_DEVICE(0x5236, SWITCHTEC_GEN5
), /* PAX 36XG5 */
1817 SWITCHTEC_PCI_DEVICE(0x5228, SWITCHTEC_GEN5
), /* PAX 28XG5 */
1818 SWITCHTEC_PCI_DEVICE(0x5300, SWITCHTEC_GEN5
), /* PFXA 100XG5 */
1819 SWITCHTEC_PCI_DEVICE(0x5384, SWITCHTEC_GEN5
), /* PFXA 84XG5 */
1820 SWITCHTEC_PCI_DEVICE(0x5368, SWITCHTEC_GEN5
), /* PFXA 68XG5 */
1821 SWITCHTEC_PCI_DEVICE(0x5352, SWITCHTEC_GEN5
), /* PFXA 52XG5 */
1822 SWITCHTEC_PCI_DEVICE(0x5336, SWITCHTEC_GEN5
), /* PFXA 36XG5 */
1823 SWITCHTEC_PCI_DEVICE(0x5328, SWITCHTEC_GEN5
), /* PFXA 28XG5 */
1824 SWITCHTEC_PCI_DEVICE(0x5400, SWITCHTEC_GEN5
), /* PSXA 100XG5 */
1825 SWITCHTEC_PCI_DEVICE(0x5484, SWITCHTEC_GEN5
), /* PSXA 84XG5 */
1826 SWITCHTEC_PCI_DEVICE(0x5468, SWITCHTEC_GEN5
), /* PSXA 68XG5 */
1827 SWITCHTEC_PCI_DEVICE(0x5452, SWITCHTEC_GEN5
), /* PSXA 52XG5 */
1828 SWITCHTEC_PCI_DEVICE(0x5436, SWITCHTEC_GEN5
), /* PSXA 36XG5 */
1829 SWITCHTEC_PCI_DEVICE(0x5428, SWITCHTEC_GEN5
), /* PSXA 28XG5 */
1830 SWITCHTEC_PCI_DEVICE(0x5500, SWITCHTEC_GEN5
), /* PAXA 100XG5 */
1831 SWITCHTEC_PCI_DEVICE(0x5584, SWITCHTEC_GEN5
), /* PAXA 84XG5 */
1832 SWITCHTEC_PCI_DEVICE(0x5568, SWITCHTEC_GEN5
), /* PAXA 68XG5 */
1833 SWITCHTEC_PCI_DEVICE(0x5552, SWITCHTEC_GEN5
), /* PAXA 52XG5 */
1834 SWITCHTEC_PCI_DEVICE(0x5536, SWITCHTEC_GEN5
), /* PAXA 36XG5 */
1835 SWITCHTEC_PCI_DEVICE(0x5528, SWITCHTEC_GEN5
), /* PAXA 28XG5 */
1838 MODULE_DEVICE_TABLE(pci
, switchtec_pci_tbl
);
1840 static struct pci_driver switchtec_pci_driver
= {
1841 .name
= KBUILD_MODNAME
,
1842 .id_table
= switchtec_pci_tbl
,
1843 .probe
= switchtec_pci_probe
,
1844 .remove
= switchtec_pci_remove
,
1847 static int __init
switchtec_init(void)
1851 rc
= alloc_chrdev_region(&switchtec_devt
, 0, max_devices
,
1856 rc
= class_register(&switchtec_class
);
1858 goto err_create_class
;
1860 rc
= pci_register_driver(&switchtec_pci_driver
);
1862 goto err_pci_register
;
1864 pr_info(KBUILD_MODNAME
": loaded.\n");
1869 class_unregister(&switchtec_class
);
1872 unregister_chrdev_region(switchtec_devt
, max_devices
);
1876 module_init(switchtec_init
);
1878 static void __exit
switchtec_exit(void)
1880 pci_unregister_driver(&switchtec_pci_driver
);
1881 class_unregister(&switchtec_class
);
1882 unregister_chrdev_region(switchtec_devt
, max_devices
);
1883 ida_destroy(&switchtec_minor_ida
);
1885 pr_info(KBUILD_MODNAME
": unloaded.\n");
1887 module_exit(switchtec_exit
);