1 // SPDX-License-Identifier: GPL-2.0
3 * Microsemi Switchtec(tm) PCIe Management Driver
4 * Copyright (c) 2017, Microsemi Corporation
7 #include <linux/switchtec.h>
8 #include <linux/switchtec_ioctl.h>
10 #include <linux/interrupt.h>
11 #include <linux/module.h>
13 #include <linux/uaccess.h>
14 #include <linux/poll.h>
15 #include <linux/wait.h>
16 #include <linux/io-64-nonatomic-lo-hi.h>
17 #include <linux/nospec.h>
19 MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver");
20 MODULE_VERSION("0.1");
21 MODULE_LICENSE("GPL");
22 MODULE_AUTHOR("Microsemi Corporation");
24 static int max_devices
= 16;
25 module_param(max_devices
, int, 0644);
26 MODULE_PARM_DESC(max_devices
, "max number of switchtec device instances");
28 static bool use_dma_mrpc
= true;
29 module_param(use_dma_mrpc
, bool, 0644);
30 MODULE_PARM_DESC(use_dma_mrpc
,
31 "Enable the use of the DMA MRPC feature");
33 static int nirqs
= 32;
34 module_param(nirqs
, int, 0644);
35 MODULE_PARM_DESC(nirqs
, "number of interrupts to allocate (more may be useful for NTB applications)");
37 static dev_t switchtec_devt
;
38 static DEFINE_IDA(switchtec_minor_ida
);
40 struct class *switchtec_class
;
41 EXPORT_SYMBOL_GPL(switchtec_class
);
50 struct switchtec_user
{
51 struct switchtec_dev
*stdev
;
53 enum mrpc_state state
;
55 wait_queue_head_t cmd_comp
;
57 struct list_head list
;
65 unsigned char data
[SWITCHTEC_MRPC_PAYLOAD_SIZE
];
69 static struct switchtec_user
*stuser_create(struct switchtec_dev
*stdev
)
71 struct switchtec_user
*stuser
;
73 stuser
= kzalloc(sizeof(*stuser
), GFP_KERNEL
);
75 return ERR_PTR(-ENOMEM
);
77 get_device(&stdev
->dev
);
78 stuser
->stdev
= stdev
;
79 kref_init(&stuser
->kref
);
80 INIT_LIST_HEAD(&stuser
->list
);
81 init_waitqueue_head(&stuser
->cmd_comp
);
82 stuser
->event_cnt
= atomic_read(&stdev
->event_cnt
);
84 dev_dbg(&stdev
->dev
, "%s: %p\n", __func__
, stuser
);
89 static void stuser_free(struct kref
*kref
)
91 struct switchtec_user
*stuser
;
93 stuser
= container_of(kref
, struct switchtec_user
, kref
);
95 dev_dbg(&stuser
->stdev
->dev
, "%s: %p\n", __func__
, stuser
);
97 put_device(&stuser
->stdev
->dev
);
101 static void stuser_put(struct switchtec_user
*stuser
)
103 kref_put(&stuser
->kref
, stuser_free
);
106 static void stuser_set_state(struct switchtec_user
*stuser
,
107 enum mrpc_state state
)
109 /* requires the mrpc_mutex to already be held when called */
111 const char * const state_names
[] = {
112 [MRPC_IDLE
] = "IDLE",
113 [MRPC_QUEUED
] = "QUEUED",
114 [MRPC_RUNNING
] = "RUNNING",
115 [MRPC_DONE
] = "DONE",
118 stuser
->state
= state
;
120 dev_dbg(&stuser
->stdev
->dev
, "stuser state %p -> %s",
121 stuser
, state_names
[state
]);
124 static void mrpc_complete_cmd(struct switchtec_dev
*stdev
);
126 static void flush_wc_buf(struct switchtec_dev
*stdev
)
128 struct ntb_dbmsg_regs __iomem
*mmio_dbmsg
;
131 * odb (outbound doorbell) register is processed by low latency
132 * hardware and w/o side effect
134 mmio_dbmsg
= (void __iomem
*)stdev
->mmio_ntb
+
135 SWITCHTEC_NTB_REG_DBMSG_OFFSET
;
136 ioread32(&mmio_dbmsg
->odb
);
139 static void mrpc_cmd_submit(struct switchtec_dev
*stdev
)
141 /* requires the mrpc_mutex to already be held when called */
143 struct switchtec_user
*stuser
;
145 if (stdev
->mrpc_busy
)
148 if (list_empty(&stdev
->mrpc_queue
))
151 stuser
= list_entry(stdev
->mrpc_queue
.next
, struct switchtec_user
,
154 if (stdev
->dma_mrpc
) {
155 stdev
->dma_mrpc
->status
= SWITCHTEC_MRPC_STATUS_INPROGRESS
;
156 memset(stdev
->dma_mrpc
->data
, 0xFF, SWITCHTEC_MRPC_PAYLOAD_SIZE
);
159 stuser_set_state(stuser
, MRPC_RUNNING
);
160 stdev
->mrpc_busy
= 1;
161 memcpy_toio(&stdev
->mmio_mrpc
->input_data
,
162 stuser
->data
, stuser
->data_len
);
164 iowrite32(stuser
->cmd
, &stdev
->mmio_mrpc
->cmd
);
166 schedule_delayed_work(&stdev
->mrpc_timeout
,
167 msecs_to_jiffies(500));
170 static int mrpc_queue_cmd(struct switchtec_user
*stuser
)
172 /* requires the mrpc_mutex to already be held when called */
174 struct switchtec_dev
*stdev
= stuser
->stdev
;
176 kref_get(&stuser
->kref
);
177 stuser
->read_len
= sizeof(stuser
->data
);
178 stuser_set_state(stuser
, MRPC_QUEUED
);
179 stuser
->cmd_done
= false;
180 list_add_tail(&stuser
->list
, &stdev
->mrpc_queue
);
182 mrpc_cmd_submit(stdev
);
187 static void mrpc_complete_cmd(struct switchtec_dev
*stdev
)
189 /* requires the mrpc_mutex to already be held when called */
190 struct switchtec_user
*stuser
;
192 if (list_empty(&stdev
->mrpc_queue
))
195 stuser
= list_entry(stdev
->mrpc_queue
.next
, struct switchtec_user
,
199 stuser
->status
= stdev
->dma_mrpc
->status
;
201 stuser
->status
= ioread32(&stdev
->mmio_mrpc
->status
);
203 if (stuser
->status
== SWITCHTEC_MRPC_STATUS_INPROGRESS
)
206 stuser_set_state(stuser
, MRPC_DONE
);
207 stuser
->return_code
= 0;
209 if (stuser
->status
!= SWITCHTEC_MRPC_STATUS_DONE
)
213 stuser
->return_code
= stdev
->dma_mrpc
->rtn_code
;
215 stuser
->return_code
= ioread32(&stdev
->mmio_mrpc
->ret_value
);
216 if (stuser
->return_code
!= 0)
220 memcpy(stuser
->data
, &stdev
->dma_mrpc
->data
,
223 memcpy_fromio(stuser
->data
, &stdev
->mmio_mrpc
->output_data
,
226 stuser
->cmd_done
= true;
227 wake_up_interruptible(&stuser
->cmd_comp
);
228 list_del_init(&stuser
->list
);
230 stdev
->mrpc_busy
= 0;
232 mrpc_cmd_submit(stdev
);
235 static void mrpc_event_work(struct work_struct
*work
)
237 struct switchtec_dev
*stdev
;
239 stdev
= container_of(work
, struct switchtec_dev
, mrpc_work
);
241 dev_dbg(&stdev
->dev
, "%s\n", __func__
);
243 mutex_lock(&stdev
->mrpc_mutex
);
244 cancel_delayed_work(&stdev
->mrpc_timeout
);
245 mrpc_complete_cmd(stdev
);
246 mutex_unlock(&stdev
->mrpc_mutex
);
249 static void mrpc_timeout_work(struct work_struct
*work
)
251 struct switchtec_dev
*stdev
;
254 stdev
= container_of(work
, struct switchtec_dev
, mrpc_timeout
.work
);
256 dev_dbg(&stdev
->dev
, "%s\n", __func__
);
258 mutex_lock(&stdev
->mrpc_mutex
);
261 status
= stdev
->dma_mrpc
->status
;
263 status
= ioread32(&stdev
->mmio_mrpc
->status
);
264 if (status
== SWITCHTEC_MRPC_STATUS_INPROGRESS
) {
265 schedule_delayed_work(&stdev
->mrpc_timeout
,
266 msecs_to_jiffies(500));
270 mrpc_complete_cmd(stdev
);
272 mutex_unlock(&stdev
->mrpc_mutex
);
275 static ssize_t
device_version_show(struct device
*dev
,
276 struct device_attribute
*attr
, char *buf
)
278 struct switchtec_dev
*stdev
= to_stdev(dev
);
281 ver
= ioread32(&stdev
->mmio_sys_info
->device_version
);
283 return sprintf(buf
, "%x\n", ver
);
285 static DEVICE_ATTR_RO(device_version
);
287 static ssize_t
fw_version_show(struct device
*dev
,
288 struct device_attribute
*attr
, char *buf
)
290 struct switchtec_dev
*stdev
= to_stdev(dev
);
293 ver
= ioread32(&stdev
->mmio_sys_info
->firmware_version
);
295 return sprintf(buf
, "%08x\n", ver
);
297 static DEVICE_ATTR_RO(fw_version
);
299 static ssize_t
io_string_show(char *buf
, void __iomem
*attr
, size_t len
)
303 memcpy_fromio(buf
, attr
, len
);
307 for (i
= len
- 1; i
> 0; i
--) {
317 #define DEVICE_ATTR_SYS_INFO_STR(field) \
318 static ssize_t field ## _show(struct device *dev, \
319 struct device_attribute *attr, char *buf) \
321 struct switchtec_dev *stdev = to_stdev(dev); \
322 struct sys_info_regs __iomem *si = stdev->mmio_sys_info; \
323 if (stdev->gen == SWITCHTEC_GEN3) \
324 return io_string_show(buf, &si->gen3.field, \
325 sizeof(si->gen3.field)); \
326 else if (stdev->gen == SWITCHTEC_GEN4) \
327 return io_string_show(buf, &si->gen4.field, \
328 sizeof(si->gen4.field)); \
333 static DEVICE_ATTR_RO(field)
335 DEVICE_ATTR_SYS_INFO_STR(vendor_id
);
336 DEVICE_ATTR_SYS_INFO_STR(product_id
);
337 DEVICE_ATTR_SYS_INFO_STR(product_revision
);
339 static ssize_t
component_vendor_show(struct device
*dev
,
340 struct device_attribute
*attr
, char *buf
)
342 struct switchtec_dev
*stdev
= to_stdev(dev
);
343 struct sys_info_regs __iomem
*si
= stdev
->mmio_sys_info
;
345 /* component_vendor field not supported after gen3 */
346 if (stdev
->gen
!= SWITCHTEC_GEN3
)
347 return sprintf(buf
, "none\n");
349 return io_string_show(buf
, &si
->gen3
.component_vendor
,
350 sizeof(si
->gen3
.component_vendor
));
352 static DEVICE_ATTR_RO(component_vendor
);
354 static ssize_t
component_id_show(struct device
*dev
,
355 struct device_attribute
*attr
, char *buf
)
357 struct switchtec_dev
*stdev
= to_stdev(dev
);
358 int id
= ioread16(&stdev
->mmio_sys_info
->gen3
.component_id
);
360 /* component_id field not supported after gen3 */
361 if (stdev
->gen
!= SWITCHTEC_GEN3
)
362 return sprintf(buf
, "none\n");
364 return sprintf(buf
, "PM%04X\n", id
);
366 static DEVICE_ATTR_RO(component_id
);
368 static ssize_t
component_revision_show(struct device
*dev
,
369 struct device_attribute
*attr
, char *buf
)
371 struct switchtec_dev
*stdev
= to_stdev(dev
);
372 int rev
= ioread8(&stdev
->mmio_sys_info
->gen3
.component_revision
);
374 /* component_revision field not supported after gen3 */
375 if (stdev
->gen
!= SWITCHTEC_GEN3
)
376 return sprintf(buf
, "255\n");
378 return sprintf(buf
, "%d\n", rev
);
380 static DEVICE_ATTR_RO(component_revision
);
382 static ssize_t
partition_show(struct device
*dev
,
383 struct device_attribute
*attr
, char *buf
)
385 struct switchtec_dev
*stdev
= to_stdev(dev
);
387 return sprintf(buf
, "%d\n", stdev
->partition
);
389 static DEVICE_ATTR_RO(partition
);
391 static ssize_t
partition_count_show(struct device
*dev
,
392 struct device_attribute
*attr
, char *buf
)
394 struct switchtec_dev
*stdev
= to_stdev(dev
);
396 return sprintf(buf
, "%d\n", stdev
->partition_count
);
398 static DEVICE_ATTR_RO(partition_count
);
400 static struct attribute
*switchtec_device_attrs
[] = {
401 &dev_attr_device_version
.attr
,
402 &dev_attr_fw_version
.attr
,
403 &dev_attr_vendor_id
.attr
,
404 &dev_attr_product_id
.attr
,
405 &dev_attr_product_revision
.attr
,
406 &dev_attr_component_vendor
.attr
,
407 &dev_attr_component_id
.attr
,
408 &dev_attr_component_revision
.attr
,
409 &dev_attr_partition
.attr
,
410 &dev_attr_partition_count
.attr
,
414 ATTRIBUTE_GROUPS(switchtec_device
);
416 static int switchtec_dev_open(struct inode
*inode
, struct file
*filp
)
418 struct switchtec_dev
*stdev
;
419 struct switchtec_user
*stuser
;
421 stdev
= container_of(inode
->i_cdev
, struct switchtec_dev
, cdev
);
423 stuser
= stuser_create(stdev
);
425 return PTR_ERR(stuser
);
427 filp
->private_data
= stuser
;
428 stream_open(inode
, filp
);
430 dev_dbg(&stdev
->dev
, "%s: %p\n", __func__
, stuser
);
435 static int switchtec_dev_release(struct inode
*inode
, struct file
*filp
)
437 struct switchtec_user
*stuser
= filp
->private_data
;
444 static int lock_mutex_and_test_alive(struct switchtec_dev
*stdev
)
446 if (mutex_lock_interruptible(&stdev
->mrpc_mutex
))
450 mutex_unlock(&stdev
->mrpc_mutex
);
457 static ssize_t
switchtec_dev_write(struct file
*filp
, const char __user
*data
,
458 size_t size
, loff_t
*off
)
460 struct switchtec_user
*stuser
= filp
->private_data
;
461 struct switchtec_dev
*stdev
= stuser
->stdev
;
464 if (size
< sizeof(stuser
->cmd
) ||
465 size
> sizeof(stuser
->cmd
) + sizeof(stuser
->data
))
468 stuser
->data_len
= size
- sizeof(stuser
->cmd
);
470 rc
= lock_mutex_and_test_alive(stdev
);
474 if (stuser
->state
!= MRPC_IDLE
) {
479 rc
= copy_from_user(&stuser
->cmd
, data
, sizeof(stuser
->cmd
));
484 if (((MRPC_CMD_ID(stuser
->cmd
) == MRPC_GAS_WRITE
) ||
485 (MRPC_CMD_ID(stuser
->cmd
) == MRPC_GAS_READ
)) &&
486 !capable(CAP_SYS_ADMIN
)) {
491 data
+= sizeof(stuser
->cmd
);
492 rc
= copy_from_user(&stuser
->data
, data
, size
- sizeof(stuser
->cmd
));
498 rc
= mrpc_queue_cmd(stuser
);
501 mutex_unlock(&stdev
->mrpc_mutex
);
509 static ssize_t
switchtec_dev_read(struct file
*filp
, char __user
*data
,
510 size_t size
, loff_t
*off
)
512 struct switchtec_user
*stuser
= filp
->private_data
;
513 struct switchtec_dev
*stdev
= stuser
->stdev
;
516 if (size
< sizeof(stuser
->cmd
) ||
517 size
> sizeof(stuser
->cmd
) + sizeof(stuser
->data
))
520 rc
= lock_mutex_and_test_alive(stdev
);
524 if (stuser
->state
== MRPC_IDLE
) {
525 mutex_unlock(&stdev
->mrpc_mutex
);
529 stuser
->read_len
= size
- sizeof(stuser
->return_code
);
531 mutex_unlock(&stdev
->mrpc_mutex
);
533 if (filp
->f_flags
& O_NONBLOCK
) {
534 if (!stuser
->cmd_done
)
537 rc
= wait_event_interruptible(stuser
->cmd_comp
,
543 rc
= lock_mutex_and_test_alive(stdev
);
547 if (stuser
->state
!= MRPC_DONE
) {
548 mutex_unlock(&stdev
->mrpc_mutex
);
552 rc
= copy_to_user(data
, &stuser
->return_code
,
553 sizeof(stuser
->return_code
));
559 data
+= sizeof(stuser
->return_code
);
560 rc
= copy_to_user(data
, &stuser
->data
,
561 size
- sizeof(stuser
->return_code
));
567 stuser_set_state(stuser
, MRPC_IDLE
);
570 mutex_unlock(&stdev
->mrpc_mutex
);
572 if (stuser
->status
== SWITCHTEC_MRPC_STATUS_DONE
)
574 else if (stuser
->status
== SWITCHTEC_MRPC_STATUS_INTERRUPTED
)
580 static __poll_t
switchtec_dev_poll(struct file
*filp
, poll_table
*wait
)
582 struct switchtec_user
*stuser
= filp
->private_data
;
583 struct switchtec_dev
*stdev
= stuser
->stdev
;
586 poll_wait(filp
, &stuser
->cmd_comp
, wait
);
587 poll_wait(filp
, &stdev
->event_wq
, wait
);
589 if (lock_mutex_and_test_alive(stdev
))
590 return EPOLLIN
| EPOLLRDHUP
| EPOLLOUT
| EPOLLERR
| EPOLLHUP
;
592 mutex_unlock(&stdev
->mrpc_mutex
);
594 if (stuser
->cmd_done
)
595 ret
|= EPOLLIN
| EPOLLRDNORM
;
597 if (stuser
->event_cnt
!= atomic_read(&stdev
->event_cnt
))
598 ret
|= EPOLLPRI
| EPOLLRDBAND
;
603 static int ioctl_flash_info(struct switchtec_dev
*stdev
,
604 struct switchtec_ioctl_flash_info __user
*uinfo
)
606 struct switchtec_ioctl_flash_info info
= {0};
607 struct flash_info_regs __iomem
*fi
= stdev
->mmio_flash_info
;
609 if (stdev
->gen
== SWITCHTEC_GEN3
) {
610 info
.flash_length
= ioread32(&fi
->gen3
.flash_length
);
611 info
.num_partitions
= SWITCHTEC_NUM_PARTITIONS_GEN3
;
612 } else if (stdev
->gen
== SWITCHTEC_GEN4
) {
613 info
.flash_length
= ioread32(&fi
->gen4
.flash_length
);
614 info
.num_partitions
= SWITCHTEC_NUM_PARTITIONS_GEN4
;
619 if (copy_to_user(uinfo
, &info
, sizeof(info
)))
625 static void set_fw_info_part(struct switchtec_ioctl_flash_part_info
*info
,
626 struct partition_info __iomem
*pi
)
628 info
->address
= ioread32(&pi
->address
);
629 info
->length
= ioread32(&pi
->length
);
632 static int flash_part_info_gen3(struct switchtec_dev
*stdev
,
633 struct switchtec_ioctl_flash_part_info
*info
)
635 struct flash_info_regs_gen3 __iomem
*fi
=
636 &stdev
->mmio_flash_info
->gen3
;
637 struct sys_info_regs_gen3 __iomem
*si
= &stdev
->mmio_sys_info
->gen3
;
638 u32 active_addr
= -1;
640 switch (info
->flash_partition
) {
641 case SWITCHTEC_IOCTL_PART_CFG0
:
642 active_addr
= ioread32(&fi
->active_cfg
);
643 set_fw_info_part(info
, &fi
->cfg0
);
644 if (ioread16(&si
->cfg_running
) == SWITCHTEC_GEN3_CFG0_RUNNING
)
645 info
->active
|= SWITCHTEC_IOCTL_PART_RUNNING
;
647 case SWITCHTEC_IOCTL_PART_CFG1
:
648 active_addr
= ioread32(&fi
->active_cfg
);
649 set_fw_info_part(info
, &fi
->cfg1
);
650 if (ioread16(&si
->cfg_running
) == SWITCHTEC_GEN3_CFG1_RUNNING
)
651 info
->active
|= SWITCHTEC_IOCTL_PART_RUNNING
;
653 case SWITCHTEC_IOCTL_PART_IMG0
:
654 active_addr
= ioread32(&fi
->active_img
);
655 set_fw_info_part(info
, &fi
->img0
);
656 if (ioread16(&si
->img_running
) == SWITCHTEC_GEN3_IMG0_RUNNING
)
657 info
->active
|= SWITCHTEC_IOCTL_PART_RUNNING
;
659 case SWITCHTEC_IOCTL_PART_IMG1
:
660 active_addr
= ioread32(&fi
->active_img
);
661 set_fw_info_part(info
, &fi
->img1
);
662 if (ioread16(&si
->img_running
) == SWITCHTEC_GEN3_IMG1_RUNNING
)
663 info
->active
|= SWITCHTEC_IOCTL_PART_RUNNING
;
665 case SWITCHTEC_IOCTL_PART_NVLOG
:
666 set_fw_info_part(info
, &fi
->nvlog
);
668 case SWITCHTEC_IOCTL_PART_VENDOR0
:
669 set_fw_info_part(info
, &fi
->vendor
[0]);
671 case SWITCHTEC_IOCTL_PART_VENDOR1
:
672 set_fw_info_part(info
, &fi
->vendor
[1]);
674 case SWITCHTEC_IOCTL_PART_VENDOR2
:
675 set_fw_info_part(info
, &fi
->vendor
[2]);
677 case SWITCHTEC_IOCTL_PART_VENDOR3
:
678 set_fw_info_part(info
, &fi
->vendor
[3]);
680 case SWITCHTEC_IOCTL_PART_VENDOR4
:
681 set_fw_info_part(info
, &fi
->vendor
[4]);
683 case SWITCHTEC_IOCTL_PART_VENDOR5
:
684 set_fw_info_part(info
, &fi
->vendor
[5]);
686 case SWITCHTEC_IOCTL_PART_VENDOR6
:
687 set_fw_info_part(info
, &fi
->vendor
[6]);
689 case SWITCHTEC_IOCTL_PART_VENDOR7
:
690 set_fw_info_part(info
, &fi
->vendor
[7]);
696 if (info
->address
== active_addr
)
697 info
->active
|= SWITCHTEC_IOCTL_PART_ACTIVE
;
702 static int flash_part_info_gen4(struct switchtec_dev
*stdev
,
703 struct switchtec_ioctl_flash_part_info
*info
)
705 struct flash_info_regs_gen4 __iomem
*fi
= &stdev
->mmio_flash_info
->gen4
;
706 struct sys_info_regs_gen4 __iomem
*si
= &stdev
->mmio_sys_info
->gen4
;
707 struct active_partition_info_gen4 __iomem
*af
= &fi
->active_flag
;
709 switch (info
->flash_partition
) {
710 case SWITCHTEC_IOCTL_PART_MAP_0
:
711 set_fw_info_part(info
, &fi
->map0
);
713 case SWITCHTEC_IOCTL_PART_MAP_1
:
714 set_fw_info_part(info
, &fi
->map1
);
716 case SWITCHTEC_IOCTL_PART_KEY_0
:
717 set_fw_info_part(info
, &fi
->key0
);
718 if (ioread8(&af
->key
) == SWITCHTEC_GEN4_KEY0_ACTIVE
)
719 info
->active
|= SWITCHTEC_IOCTL_PART_ACTIVE
;
720 if (ioread16(&si
->key_running
) == SWITCHTEC_GEN4_KEY0_RUNNING
)
721 info
->active
|= SWITCHTEC_IOCTL_PART_RUNNING
;
723 case SWITCHTEC_IOCTL_PART_KEY_1
:
724 set_fw_info_part(info
, &fi
->key1
);
725 if (ioread8(&af
->key
) == SWITCHTEC_GEN4_KEY1_ACTIVE
)
726 info
->active
|= SWITCHTEC_IOCTL_PART_ACTIVE
;
727 if (ioread16(&si
->key_running
) == SWITCHTEC_GEN4_KEY1_RUNNING
)
728 info
->active
|= SWITCHTEC_IOCTL_PART_RUNNING
;
730 case SWITCHTEC_IOCTL_PART_BL2_0
:
731 set_fw_info_part(info
, &fi
->bl2_0
);
732 if (ioread8(&af
->bl2
) == SWITCHTEC_GEN4_BL2_0_ACTIVE
)
733 info
->active
|= SWITCHTEC_IOCTL_PART_ACTIVE
;
734 if (ioread16(&si
->bl2_running
) == SWITCHTEC_GEN4_BL2_0_RUNNING
)
735 info
->active
|= SWITCHTEC_IOCTL_PART_RUNNING
;
737 case SWITCHTEC_IOCTL_PART_BL2_1
:
738 set_fw_info_part(info
, &fi
->bl2_1
);
739 if (ioread8(&af
->bl2
) == SWITCHTEC_GEN4_BL2_1_ACTIVE
)
740 info
->active
|= SWITCHTEC_IOCTL_PART_ACTIVE
;
741 if (ioread16(&si
->bl2_running
) == SWITCHTEC_GEN4_BL2_1_RUNNING
)
742 info
->active
|= SWITCHTEC_IOCTL_PART_RUNNING
;
744 case SWITCHTEC_IOCTL_PART_CFG0
:
745 set_fw_info_part(info
, &fi
->cfg0
);
746 if (ioread8(&af
->cfg
) == SWITCHTEC_GEN4_CFG0_ACTIVE
)
747 info
->active
|= SWITCHTEC_IOCTL_PART_ACTIVE
;
748 if (ioread16(&si
->cfg_running
) == SWITCHTEC_GEN4_CFG0_RUNNING
)
749 info
->active
|= SWITCHTEC_IOCTL_PART_RUNNING
;
751 case SWITCHTEC_IOCTL_PART_CFG1
:
752 set_fw_info_part(info
, &fi
->cfg1
);
753 if (ioread8(&af
->cfg
) == SWITCHTEC_GEN4_CFG1_ACTIVE
)
754 info
->active
|= SWITCHTEC_IOCTL_PART_ACTIVE
;
755 if (ioread16(&si
->cfg_running
) == SWITCHTEC_GEN4_CFG1_RUNNING
)
756 info
->active
|= SWITCHTEC_IOCTL_PART_RUNNING
;
758 case SWITCHTEC_IOCTL_PART_IMG0
:
759 set_fw_info_part(info
, &fi
->img0
);
760 if (ioread8(&af
->img
) == SWITCHTEC_GEN4_IMG0_ACTIVE
)
761 info
->active
|= SWITCHTEC_IOCTL_PART_ACTIVE
;
762 if (ioread16(&si
->img_running
) == SWITCHTEC_GEN4_IMG0_RUNNING
)
763 info
->active
|= SWITCHTEC_IOCTL_PART_RUNNING
;
765 case SWITCHTEC_IOCTL_PART_IMG1
:
766 set_fw_info_part(info
, &fi
->img1
);
767 if (ioread8(&af
->img
) == SWITCHTEC_GEN4_IMG1_ACTIVE
)
768 info
->active
|= SWITCHTEC_IOCTL_PART_ACTIVE
;
769 if (ioread16(&si
->img_running
) == SWITCHTEC_GEN4_IMG1_RUNNING
)
770 info
->active
|= SWITCHTEC_IOCTL_PART_RUNNING
;
772 case SWITCHTEC_IOCTL_PART_NVLOG
:
773 set_fw_info_part(info
, &fi
->nvlog
);
775 case SWITCHTEC_IOCTL_PART_VENDOR0
:
776 set_fw_info_part(info
, &fi
->vendor
[0]);
778 case SWITCHTEC_IOCTL_PART_VENDOR1
:
779 set_fw_info_part(info
, &fi
->vendor
[1]);
781 case SWITCHTEC_IOCTL_PART_VENDOR2
:
782 set_fw_info_part(info
, &fi
->vendor
[2]);
784 case SWITCHTEC_IOCTL_PART_VENDOR3
:
785 set_fw_info_part(info
, &fi
->vendor
[3]);
787 case SWITCHTEC_IOCTL_PART_VENDOR4
:
788 set_fw_info_part(info
, &fi
->vendor
[4]);
790 case SWITCHTEC_IOCTL_PART_VENDOR5
:
791 set_fw_info_part(info
, &fi
->vendor
[5]);
793 case SWITCHTEC_IOCTL_PART_VENDOR6
:
794 set_fw_info_part(info
, &fi
->vendor
[6]);
796 case SWITCHTEC_IOCTL_PART_VENDOR7
:
797 set_fw_info_part(info
, &fi
->vendor
[7]);
806 static int ioctl_flash_part_info(struct switchtec_dev
*stdev
,
807 struct switchtec_ioctl_flash_part_info __user
*uinfo
)
810 struct switchtec_ioctl_flash_part_info info
= {0};
812 if (copy_from_user(&info
, uinfo
, sizeof(info
)))
815 if (stdev
->gen
== SWITCHTEC_GEN3
) {
816 ret
= flash_part_info_gen3(stdev
, &info
);
819 } else if (stdev
->gen
== SWITCHTEC_GEN4
) {
820 ret
= flash_part_info_gen4(stdev
, &info
);
827 if (copy_to_user(uinfo
, &info
, sizeof(info
)))
833 static int ioctl_event_summary(struct switchtec_dev
*stdev
,
834 struct switchtec_user
*stuser
,
835 struct switchtec_ioctl_event_summary __user
*usum
,
838 struct switchtec_ioctl_event_summary
*s
;
843 s
= kzalloc(sizeof(*s
), GFP_KERNEL
);
847 s
->global
= ioread32(&stdev
->mmio_sw_event
->global_summary
);
848 s
->part_bitmap
= ioread64(&stdev
->mmio_sw_event
->part_event_bitmap
);
849 s
->local_part
= ioread32(&stdev
->mmio_part_cfg
->part_event_summary
);
851 for (i
= 0; i
< stdev
->partition_count
; i
++) {
852 reg
= ioread32(&stdev
->mmio_part_cfg_all
[i
].part_event_summary
);
856 for (i
= 0; i
< stdev
->pff_csr_count
; i
++) {
857 reg
= ioread32(&stdev
->mmio_pff_csr
[i
].pff_event_summary
);
861 if (copy_to_user(usum
, s
, size
)) {
866 stuser
->event_cnt
= atomic_read(&stdev
->event_cnt
);
873 static u32 __iomem
*global_ev_reg(struct switchtec_dev
*stdev
,
874 size_t offset
, int index
)
876 return (void __iomem
*)stdev
->mmio_sw_event
+ offset
;
879 static u32 __iomem
*part_ev_reg(struct switchtec_dev
*stdev
,
880 size_t offset
, int index
)
882 return (void __iomem
*)&stdev
->mmio_part_cfg_all
[index
] + offset
;
885 static u32 __iomem
*pff_ev_reg(struct switchtec_dev
*stdev
,
886 size_t offset
, int index
)
888 return (void __iomem
*)&stdev
->mmio_pff_csr
[index
] + offset
;
891 #define EV_GLB(i, r)[i] = {offsetof(struct sw_event_regs, r), global_ev_reg}
892 #define EV_PAR(i, r)[i] = {offsetof(struct part_cfg_regs, r), part_ev_reg}
893 #define EV_PFF(i, r)[i] = {offsetof(struct pff_csr_regs, r), pff_ev_reg}
895 static const struct event_reg
{
897 u32 __iomem
*(*map_reg
)(struct switchtec_dev
*stdev
,
898 size_t offset
, int index
);
900 EV_GLB(SWITCHTEC_IOCTL_EVENT_STACK_ERROR
, stack_error_event_hdr
),
901 EV_GLB(SWITCHTEC_IOCTL_EVENT_PPU_ERROR
, ppu_error_event_hdr
),
902 EV_GLB(SWITCHTEC_IOCTL_EVENT_ISP_ERROR
, isp_error_event_hdr
),
903 EV_GLB(SWITCHTEC_IOCTL_EVENT_SYS_RESET
, sys_reset_event_hdr
),
904 EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_EXC
, fw_exception_hdr
),
905 EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_NMI
, fw_nmi_hdr
),
906 EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_NON_FATAL
, fw_non_fatal_hdr
),
907 EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_FATAL
, fw_fatal_hdr
),
908 EV_GLB(SWITCHTEC_IOCTL_EVENT_TWI_MRPC_COMP
, twi_mrpc_comp_hdr
),
909 EV_GLB(SWITCHTEC_IOCTL_EVENT_TWI_MRPC_COMP_ASYNC
,
910 twi_mrpc_comp_async_hdr
),
911 EV_GLB(SWITCHTEC_IOCTL_EVENT_CLI_MRPC_COMP
, cli_mrpc_comp_hdr
),
912 EV_GLB(SWITCHTEC_IOCTL_EVENT_CLI_MRPC_COMP_ASYNC
,
913 cli_mrpc_comp_async_hdr
),
914 EV_GLB(SWITCHTEC_IOCTL_EVENT_GPIO_INT
, gpio_interrupt_hdr
),
915 EV_GLB(SWITCHTEC_IOCTL_EVENT_GFMS
, gfms_event_hdr
),
916 EV_PAR(SWITCHTEC_IOCTL_EVENT_PART_RESET
, part_reset_hdr
),
917 EV_PAR(SWITCHTEC_IOCTL_EVENT_MRPC_COMP
, mrpc_comp_hdr
),
918 EV_PAR(SWITCHTEC_IOCTL_EVENT_MRPC_COMP_ASYNC
, mrpc_comp_async_hdr
),
919 EV_PAR(SWITCHTEC_IOCTL_EVENT_DYN_PART_BIND_COMP
, dyn_binding_hdr
),
920 EV_PAR(SWITCHTEC_IOCTL_EVENT_INTERCOMM_REQ_NOTIFY
,
921 intercomm_notify_hdr
),
922 EV_PFF(SWITCHTEC_IOCTL_EVENT_AER_IN_P2P
, aer_in_p2p_hdr
),
923 EV_PFF(SWITCHTEC_IOCTL_EVENT_AER_IN_VEP
, aer_in_vep_hdr
),
924 EV_PFF(SWITCHTEC_IOCTL_EVENT_DPC
, dpc_hdr
),
925 EV_PFF(SWITCHTEC_IOCTL_EVENT_CTS
, cts_hdr
),
926 EV_PFF(SWITCHTEC_IOCTL_EVENT_UEC
, uec_hdr
),
927 EV_PFF(SWITCHTEC_IOCTL_EVENT_HOTPLUG
, hotplug_hdr
),
928 EV_PFF(SWITCHTEC_IOCTL_EVENT_IER
, ier_hdr
),
929 EV_PFF(SWITCHTEC_IOCTL_EVENT_THRESH
, threshold_hdr
),
930 EV_PFF(SWITCHTEC_IOCTL_EVENT_POWER_MGMT
, power_mgmt_hdr
),
931 EV_PFF(SWITCHTEC_IOCTL_EVENT_TLP_THROTTLING
, tlp_throttling_hdr
),
932 EV_PFF(SWITCHTEC_IOCTL_EVENT_FORCE_SPEED
, force_speed_hdr
),
933 EV_PFF(SWITCHTEC_IOCTL_EVENT_CREDIT_TIMEOUT
, credit_timeout_hdr
),
934 EV_PFF(SWITCHTEC_IOCTL_EVENT_LINK_STATE
, link_state_hdr
),
937 static u32 __iomem
*event_hdr_addr(struct switchtec_dev
*stdev
,
938 int event_id
, int index
)
942 if (event_id
< 0 || event_id
>= SWITCHTEC_IOCTL_MAX_EVENTS
)
943 return (u32 __iomem
*)ERR_PTR(-EINVAL
);
945 off
= event_regs
[event_id
].offset
;
947 if (event_regs
[event_id
].map_reg
== part_ev_reg
) {
948 if (index
== SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX
)
949 index
= stdev
->partition
;
950 else if (index
< 0 || index
>= stdev
->partition_count
)
951 return (u32 __iomem
*)ERR_PTR(-EINVAL
);
952 } else if (event_regs
[event_id
].map_reg
== pff_ev_reg
) {
953 if (index
< 0 || index
>= stdev
->pff_csr_count
)
954 return (u32 __iomem
*)ERR_PTR(-EINVAL
);
957 return event_regs
[event_id
].map_reg(stdev
, off
, index
);
960 static int event_ctl(struct switchtec_dev
*stdev
,
961 struct switchtec_ioctl_event_ctl
*ctl
)
967 reg
= event_hdr_addr(stdev
, ctl
->event_id
, ctl
->index
);
972 for (i
= 0; i
< ARRAY_SIZE(ctl
->data
); i
++)
973 ctl
->data
[i
] = ioread32(®
[i
+ 1]);
975 ctl
->occurred
= hdr
& SWITCHTEC_EVENT_OCCURRED
;
976 ctl
->count
= (hdr
>> 5) & 0xFF;
978 if (!(ctl
->flags
& SWITCHTEC_IOCTL_EVENT_FLAG_CLEAR
))
979 hdr
&= ~SWITCHTEC_EVENT_CLEAR
;
980 if (ctl
->flags
& SWITCHTEC_IOCTL_EVENT_FLAG_EN_POLL
)
981 hdr
|= SWITCHTEC_EVENT_EN_IRQ
;
982 if (ctl
->flags
& SWITCHTEC_IOCTL_EVENT_FLAG_DIS_POLL
)
983 hdr
&= ~SWITCHTEC_EVENT_EN_IRQ
;
984 if (ctl
->flags
& SWITCHTEC_IOCTL_EVENT_FLAG_EN_LOG
)
985 hdr
|= SWITCHTEC_EVENT_EN_LOG
;
986 if (ctl
->flags
& SWITCHTEC_IOCTL_EVENT_FLAG_DIS_LOG
)
987 hdr
&= ~SWITCHTEC_EVENT_EN_LOG
;
988 if (ctl
->flags
& SWITCHTEC_IOCTL_EVENT_FLAG_EN_CLI
)
989 hdr
|= SWITCHTEC_EVENT_EN_CLI
;
990 if (ctl
->flags
& SWITCHTEC_IOCTL_EVENT_FLAG_DIS_CLI
)
991 hdr
&= ~SWITCHTEC_EVENT_EN_CLI
;
992 if (ctl
->flags
& SWITCHTEC_IOCTL_EVENT_FLAG_EN_FATAL
)
993 hdr
|= SWITCHTEC_EVENT_FATAL
;
994 if (ctl
->flags
& SWITCHTEC_IOCTL_EVENT_FLAG_DIS_FATAL
)
995 hdr
&= ~SWITCHTEC_EVENT_FATAL
;
1001 if (hdr
& SWITCHTEC_EVENT_EN_IRQ
)
1002 ctl
->flags
|= SWITCHTEC_IOCTL_EVENT_FLAG_EN_POLL
;
1003 if (hdr
& SWITCHTEC_EVENT_EN_LOG
)
1004 ctl
->flags
|= SWITCHTEC_IOCTL_EVENT_FLAG_EN_LOG
;
1005 if (hdr
& SWITCHTEC_EVENT_EN_CLI
)
1006 ctl
->flags
|= SWITCHTEC_IOCTL_EVENT_FLAG_EN_CLI
;
1007 if (hdr
& SWITCHTEC_EVENT_FATAL
)
1008 ctl
->flags
|= SWITCHTEC_IOCTL_EVENT_FLAG_EN_FATAL
;
1013 static int ioctl_event_ctl(struct switchtec_dev
*stdev
,
1014 struct switchtec_ioctl_event_ctl __user
*uctl
)
1018 unsigned int event_flags
;
1019 struct switchtec_ioctl_event_ctl ctl
;
1021 if (copy_from_user(&ctl
, uctl
, sizeof(ctl
)))
1024 if (ctl
.event_id
>= SWITCHTEC_IOCTL_MAX_EVENTS
)
1027 if (ctl
.flags
& SWITCHTEC_IOCTL_EVENT_FLAG_UNUSED
)
1030 if (ctl
.index
== SWITCHTEC_IOCTL_EVENT_IDX_ALL
) {
1031 if (event_regs
[ctl
.event_id
].map_reg
== global_ev_reg
)
1033 else if (event_regs
[ctl
.event_id
].map_reg
== part_ev_reg
)
1034 nr_idxs
= stdev
->partition_count
;
1035 else if (event_regs
[ctl
.event_id
].map_reg
== pff_ev_reg
)
1036 nr_idxs
= stdev
->pff_csr_count
;
1040 event_flags
= ctl
.flags
;
1041 for (ctl
.index
= 0; ctl
.index
< nr_idxs
; ctl
.index
++) {
1042 ctl
.flags
= event_flags
;
1043 ret
= event_ctl(stdev
, &ctl
);
1048 ret
= event_ctl(stdev
, &ctl
);
1053 if (copy_to_user(uctl
, &ctl
, sizeof(ctl
)))
1059 static int ioctl_pff_to_port(struct switchtec_dev
*stdev
,
1060 struct switchtec_ioctl_pff_port __user
*up
)
1064 struct part_cfg_regs __iomem
*pcfg
;
1065 struct switchtec_ioctl_pff_port p
;
1067 if (copy_from_user(&p
, up
, sizeof(p
)))
1071 for (part
= 0; part
< stdev
->partition_count
; part
++) {
1072 pcfg
= &stdev
->mmio_part_cfg_all
[part
];
1075 reg
= ioread32(&pcfg
->usp_pff_inst_id
);
1081 reg
= ioread32(&pcfg
->vep_pff_inst_id
);
1083 p
.port
= SWITCHTEC_IOCTL_PFF_VEP
;
1087 for (i
= 0; i
< ARRAY_SIZE(pcfg
->dsp_pff_inst_id
); i
++) {
1088 reg
= ioread32(&pcfg
->dsp_pff_inst_id
[i
]);
1100 if (copy_to_user(up
, &p
, sizeof(p
)))
1106 static int ioctl_port_to_pff(struct switchtec_dev
*stdev
,
1107 struct switchtec_ioctl_pff_port __user
*up
)
1109 struct switchtec_ioctl_pff_port p
;
1110 struct part_cfg_regs __iomem
*pcfg
;
1112 if (copy_from_user(&p
, up
, sizeof(p
)))
1115 if (p
.partition
== SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX
)
1116 pcfg
= stdev
->mmio_part_cfg
;
1117 else if (p
.partition
< stdev
->partition_count
)
1118 pcfg
= &stdev
->mmio_part_cfg_all
[p
.partition
];
1124 p
.pff
= ioread32(&pcfg
->usp_pff_inst_id
);
1126 case SWITCHTEC_IOCTL_PFF_VEP
:
1127 p
.pff
= ioread32(&pcfg
->vep_pff_inst_id
);
1130 if (p
.port
> ARRAY_SIZE(pcfg
->dsp_pff_inst_id
))
1132 p
.port
= array_index_nospec(p
.port
,
1133 ARRAY_SIZE(pcfg
->dsp_pff_inst_id
) + 1);
1134 p
.pff
= ioread32(&pcfg
->dsp_pff_inst_id
[p
.port
- 1]);
1138 if (copy_to_user(up
, &p
, sizeof(p
)))
1144 static long switchtec_dev_ioctl(struct file
*filp
, unsigned int cmd
,
1147 struct switchtec_user
*stuser
= filp
->private_data
;
1148 struct switchtec_dev
*stdev
= stuser
->stdev
;
1150 void __user
*argp
= (void __user
*)arg
;
1152 rc
= lock_mutex_and_test_alive(stdev
);
1157 case SWITCHTEC_IOCTL_FLASH_INFO
:
1158 rc
= ioctl_flash_info(stdev
, argp
);
1160 case SWITCHTEC_IOCTL_FLASH_PART_INFO
:
1161 rc
= ioctl_flash_part_info(stdev
, argp
);
1163 case SWITCHTEC_IOCTL_EVENT_SUMMARY_LEGACY
:
1164 rc
= ioctl_event_summary(stdev
, stuser
, argp
,
1165 sizeof(struct switchtec_ioctl_event_summary_legacy
));
1167 case SWITCHTEC_IOCTL_EVENT_CTL
:
1168 rc
= ioctl_event_ctl(stdev
, argp
);
1170 case SWITCHTEC_IOCTL_PFF_TO_PORT
:
1171 rc
= ioctl_pff_to_port(stdev
, argp
);
1173 case SWITCHTEC_IOCTL_PORT_TO_PFF
:
1174 rc
= ioctl_port_to_pff(stdev
, argp
);
1176 case SWITCHTEC_IOCTL_EVENT_SUMMARY
:
1177 rc
= ioctl_event_summary(stdev
, stuser
, argp
,
1178 sizeof(struct switchtec_ioctl_event_summary
));
1185 mutex_unlock(&stdev
->mrpc_mutex
);
1189 static const struct file_operations switchtec_fops
= {
1190 .owner
= THIS_MODULE
,
1191 .open
= switchtec_dev_open
,
1192 .release
= switchtec_dev_release
,
1193 .write
= switchtec_dev_write
,
1194 .read
= switchtec_dev_read
,
1195 .poll
= switchtec_dev_poll
,
1196 .unlocked_ioctl
= switchtec_dev_ioctl
,
1197 .compat_ioctl
= compat_ptr_ioctl
,
1200 static void link_event_work(struct work_struct
*work
)
1202 struct switchtec_dev
*stdev
;
1204 stdev
= container_of(work
, struct switchtec_dev
, link_event_work
);
1206 if (stdev
->link_notifier
)
1207 stdev
->link_notifier(stdev
);
1210 static void check_link_state_events(struct switchtec_dev
*stdev
)
1217 for (idx
= 0; idx
< stdev
->pff_csr_count
; idx
++) {
1218 reg
= ioread32(&stdev
->mmio_pff_csr
[idx
].link_state_hdr
);
1219 dev_dbg(&stdev
->dev
, "link_state: %d->%08x\n", idx
, reg
);
1220 count
= (reg
>> 5) & 0xFF;
1222 if (count
!= stdev
->link_event_count
[idx
]) {
1224 stdev
->link_event_count
[idx
] = count
;
1229 schedule_work(&stdev
->link_event_work
);
1232 static void enable_link_state_events(struct switchtec_dev
*stdev
)
1236 for (idx
= 0; idx
< stdev
->pff_csr_count
; idx
++) {
1237 iowrite32(SWITCHTEC_EVENT_CLEAR
|
1238 SWITCHTEC_EVENT_EN_IRQ
,
1239 &stdev
->mmio_pff_csr
[idx
].link_state_hdr
);
1243 static void enable_dma_mrpc(struct switchtec_dev
*stdev
)
1245 writeq(stdev
->dma_mrpc_dma_addr
, &stdev
->mmio_mrpc
->dma_addr
);
1246 flush_wc_buf(stdev
);
1247 iowrite32(SWITCHTEC_DMA_MRPC_EN
, &stdev
->mmio_mrpc
->dma_en
);
1250 static void stdev_release(struct device
*dev
)
1252 struct switchtec_dev
*stdev
= to_stdev(dev
);
1254 if (stdev
->dma_mrpc
) {
1255 iowrite32(0, &stdev
->mmio_mrpc
->dma_en
);
1256 flush_wc_buf(stdev
);
1257 writeq(0, &stdev
->mmio_mrpc
->dma_addr
);
1258 dma_free_coherent(&stdev
->pdev
->dev
, sizeof(*stdev
->dma_mrpc
),
1259 stdev
->dma_mrpc
, stdev
->dma_mrpc_dma_addr
);
1264 static void stdev_kill(struct switchtec_dev
*stdev
)
1266 struct switchtec_user
*stuser
, *tmpuser
;
1268 pci_clear_master(stdev
->pdev
);
1270 cancel_delayed_work_sync(&stdev
->mrpc_timeout
);
1272 /* Mark the hardware as unavailable and complete all completions */
1273 mutex_lock(&stdev
->mrpc_mutex
);
1274 stdev
->alive
= false;
1276 /* Wake up and kill any users waiting on an MRPC request */
1277 list_for_each_entry_safe(stuser
, tmpuser
, &stdev
->mrpc_queue
, list
) {
1278 stuser
->cmd_done
= true;
1279 wake_up_interruptible(&stuser
->cmd_comp
);
1280 list_del_init(&stuser
->list
);
1284 mutex_unlock(&stdev
->mrpc_mutex
);
1286 /* Wake up any users waiting on event_wq */
1287 wake_up_interruptible(&stdev
->event_wq
);
1290 static struct switchtec_dev
*stdev_create(struct pci_dev
*pdev
)
1292 struct switchtec_dev
*stdev
;
1298 stdev
= kzalloc_node(sizeof(*stdev
), GFP_KERNEL
,
1299 dev_to_node(&pdev
->dev
));
1301 return ERR_PTR(-ENOMEM
);
1303 stdev
->alive
= true;
1305 INIT_LIST_HEAD(&stdev
->mrpc_queue
);
1306 mutex_init(&stdev
->mrpc_mutex
);
1307 stdev
->mrpc_busy
= 0;
1308 INIT_WORK(&stdev
->mrpc_work
, mrpc_event_work
);
1309 INIT_DELAYED_WORK(&stdev
->mrpc_timeout
, mrpc_timeout_work
);
1310 INIT_WORK(&stdev
->link_event_work
, link_event_work
);
1311 init_waitqueue_head(&stdev
->event_wq
);
1312 atomic_set(&stdev
->event_cnt
, 0);
1315 device_initialize(dev
);
1316 dev
->class = switchtec_class
;
1317 dev
->parent
= &pdev
->dev
;
1318 dev
->groups
= switchtec_device_groups
;
1319 dev
->release
= stdev_release
;
1321 minor
= ida_simple_get(&switchtec_minor_ida
, 0, 0,
1328 dev
->devt
= MKDEV(MAJOR(switchtec_devt
), minor
);
1329 dev_set_name(dev
, "switchtec%d", minor
);
1331 cdev
= &stdev
->cdev
;
1332 cdev_init(cdev
, &switchtec_fops
);
1333 cdev
->owner
= THIS_MODULE
;
1338 put_device(&stdev
->dev
);
1342 static int mask_event(struct switchtec_dev
*stdev
, int eid
, int idx
)
1344 size_t off
= event_regs
[eid
].offset
;
1345 u32 __iomem
*hdr_reg
;
1348 hdr_reg
= event_regs
[eid
].map_reg(stdev
, off
, idx
);
1349 hdr
= ioread32(hdr_reg
);
1351 if (!(hdr
& SWITCHTEC_EVENT_OCCURRED
&& hdr
& SWITCHTEC_EVENT_EN_IRQ
))
1354 dev_dbg(&stdev
->dev
, "%s: %d %d %x\n", __func__
, eid
, idx
, hdr
);
1355 hdr
&= ~(SWITCHTEC_EVENT_EN_IRQ
| SWITCHTEC_EVENT_OCCURRED
);
1356 iowrite32(hdr
, hdr_reg
);
1361 static int mask_all_events(struct switchtec_dev
*stdev
, int eid
)
1366 if (event_regs
[eid
].map_reg
== part_ev_reg
) {
1367 for (idx
= 0; idx
< stdev
->partition_count
; idx
++)
1368 count
+= mask_event(stdev
, eid
, idx
);
1369 } else if (event_regs
[eid
].map_reg
== pff_ev_reg
) {
1370 for (idx
= 0; idx
< stdev
->pff_csr_count
; idx
++) {
1371 if (!stdev
->pff_local
[idx
])
1374 count
+= mask_event(stdev
, eid
, idx
);
1377 count
+= mask_event(stdev
, eid
, 0);
1383 static irqreturn_t
switchtec_event_isr(int irq
, void *dev
)
1385 struct switchtec_dev
*stdev
= dev
;
1387 irqreturn_t ret
= IRQ_NONE
;
1388 int eid
, event_count
= 0;
1390 reg
= ioread32(&stdev
->mmio_part_cfg
->mrpc_comp_hdr
);
1391 if (reg
& SWITCHTEC_EVENT_OCCURRED
) {
1392 dev_dbg(&stdev
->dev
, "%s: mrpc comp\n", __func__
);
1394 schedule_work(&stdev
->mrpc_work
);
1395 iowrite32(reg
, &stdev
->mmio_part_cfg
->mrpc_comp_hdr
);
1398 check_link_state_events(stdev
);
1400 for (eid
= 0; eid
< SWITCHTEC_IOCTL_MAX_EVENTS
; eid
++) {
1401 if (eid
== SWITCHTEC_IOCTL_EVENT_LINK_STATE
||
1402 eid
== SWITCHTEC_IOCTL_EVENT_MRPC_COMP
)
1405 event_count
+= mask_all_events(stdev
, eid
);
1409 atomic_inc(&stdev
->event_cnt
);
1410 wake_up_interruptible(&stdev
->event_wq
);
1411 dev_dbg(&stdev
->dev
, "%s: %d events\n", __func__
,
1420 static irqreturn_t
switchtec_dma_mrpc_isr(int irq
, void *dev
)
1422 struct switchtec_dev
*stdev
= dev
;
1423 irqreturn_t ret
= IRQ_NONE
;
1425 iowrite32(SWITCHTEC_EVENT_CLEAR
|
1426 SWITCHTEC_EVENT_EN_IRQ
,
1427 &stdev
->mmio_part_cfg
->mrpc_comp_hdr
);
1428 schedule_work(&stdev
->mrpc_work
);
1434 static int switchtec_init_isr(struct switchtec_dev
*stdev
)
1444 nvecs
= pci_alloc_irq_vectors(stdev
->pdev
, 1, nirqs
,
1445 PCI_IRQ_MSIX
| PCI_IRQ_MSI
|
1450 event_irq
= ioread16(&stdev
->mmio_part_cfg
->vep_vector_number
);
1451 if (event_irq
< 0 || event_irq
>= nvecs
)
1454 event_irq
= pci_irq_vector(stdev
->pdev
, event_irq
);
1458 rc
= devm_request_irq(&stdev
->pdev
->dev
, event_irq
,
1459 switchtec_event_isr
, 0,
1460 KBUILD_MODNAME
, stdev
);
1465 if (!stdev
->dma_mrpc
)
1468 dma_mrpc_irq
= ioread32(&stdev
->mmio_mrpc
->dma_vector
);
1469 if (dma_mrpc_irq
< 0 || dma_mrpc_irq
>= nvecs
)
1472 dma_mrpc_irq
= pci_irq_vector(stdev
->pdev
, dma_mrpc_irq
);
1473 if (dma_mrpc_irq
< 0)
1474 return dma_mrpc_irq
;
1476 rc
= devm_request_irq(&stdev
->pdev
->dev
, dma_mrpc_irq
,
1477 switchtec_dma_mrpc_isr
, 0,
1478 KBUILD_MODNAME
, stdev
);
1483 static void init_pff(struct switchtec_dev
*stdev
)
1487 struct part_cfg_regs __iomem
*pcfg
= stdev
->mmio_part_cfg
;
1489 for (i
= 0; i
< SWITCHTEC_MAX_PFF_CSR
; i
++) {
1490 reg
= ioread16(&stdev
->mmio_pff_csr
[i
].vendor_id
);
1491 if (reg
!= PCI_VENDOR_ID_MICROSEMI
)
1495 stdev
->pff_csr_count
= i
;
1497 reg
= ioread32(&pcfg
->usp_pff_inst_id
);
1498 if (reg
< stdev
->pff_csr_count
)
1499 stdev
->pff_local
[reg
] = 1;
1501 reg
= ioread32(&pcfg
->vep_pff_inst_id
);
1502 if (reg
< stdev
->pff_csr_count
)
1503 stdev
->pff_local
[reg
] = 1;
1505 for (i
= 0; i
< ARRAY_SIZE(pcfg
->dsp_pff_inst_id
); i
++) {
1506 reg
= ioread32(&pcfg
->dsp_pff_inst_id
[i
]);
1507 if (reg
< stdev
->pff_csr_count
)
1508 stdev
->pff_local
[reg
] = 1;
1512 static int switchtec_init_pci(struct switchtec_dev
*stdev
,
1513 struct pci_dev
*pdev
)
1517 unsigned long res_start
, res_len
;
1518 u32 __iomem
*part_id
;
1520 rc
= pcim_enable_device(pdev
);
1524 rc
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
1528 pci_set_master(pdev
);
1530 res_start
= pci_resource_start(pdev
, 0);
1531 res_len
= pci_resource_len(pdev
, 0);
1533 if (!devm_request_mem_region(&pdev
->dev
, res_start
,
1534 res_len
, KBUILD_MODNAME
))
1537 stdev
->mmio_mrpc
= devm_ioremap_wc(&pdev
->dev
, res_start
,
1538 SWITCHTEC_GAS_TOP_CFG_OFFSET
);
1539 if (!stdev
->mmio_mrpc
)
1542 map
= devm_ioremap(&pdev
->dev
,
1543 res_start
+ SWITCHTEC_GAS_TOP_CFG_OFFSET
,
1544 res_len
- SWITCHTEC_GAS_TOP_CFG_OFFSET
);
1548 stdev
->mmio
= map
- SWITCHTEC_GAS_TOP_CFG_OFFSET
;
1549 stdev
->mmio_sw_event
= stdev
->mmio
+ SWITCHTEC_GAS_SW_EVENT_OFFSET
;
1550 stdev
->mmio_sys_info
= stdev
->mmio
+ SWITCHTEC_GAS_SYS_INFO_OFFSET
;
1551 stdev
->mmio_flash_info
= stdev
->mmio
+ SWITCHTEC_GAS_FLASH_INFO_OFFSET
;
1552 stdev
->mmio_ntb
= stdev
->mmio
+ SWITCHTEC_GAS_NTB_OFFSET
;
1554 if (stdev
->gen
== SWITCHTEC_GEN3
)
1555 part_id
= &stdev
->mmio_sys_info
->gen3
.partition_id
;
1556 else if (stdev
->gen
== SWITCHTEC_GEN4
)
1557 part_id
= &stdev
->mmio_sys_info
->gen4
.partition_id
;
1561 stdev
->partition
= ioread8(part_id
);
1562 stdev
->partition_count
= ioread8(&stdev
->mmio_ntb
->partition_count
);
1563 stdev
->mmio_part_cfg_all
= stdev
->mmio
+ SWITCHTEC_GAS_PART_CFG_OFFSET
;
1564 stdev
->mmio_part_cfg
= &stdev
->mmio_part_cfg_all
[stdev
->partition
];
1565 stdev
->mmio_pff_csr
= stdev
->mmio
+ SWITCHTEC_GAS_PFF_CSR_OFFSET
;
1567 if (stdev
->partition_count
< 1)
1568 stdev
->partition_count
= 1;
1572 pci_set_drvdata(pdev
, stdev
);
1577 if (ioread32(&stdev
->mmio_mrpc
->dma_ver
) == 0)
1580 stdev
->dma_mrpc
= dma_alloc_coherent(&stdev
->pdev
->dev
,
1581 sizeof(*stdev
->dma_mrpc
),
1582 &stdev
->dma_mrpc_dma_addr
,
1584 if (stdev
->dma_mrpc
== NULL
)
1590 static int switchtec_pci_probe(struct pci_dev
*pdev
,
1591 const struct pci_device_id
*id
)
1593 struct switchtec_dev
*stdev
;
1596 if (pdev
->class == (PCI_CLASS_BRIDGE_OTHER
<< 8))
1597 request_module_nowait("ntb_hw_switchtec");
1599 stdev
= stdev_create(pdev
);
1601 return PTR_ERR(stdev
);
1603 stdev
->gen
= id
->driver_data
;
1605 rc
= switchtec_init_pci(stdev
, pdev
);
1609 rc
= switchtec_init_isr(stdev
);
1611 dev_err(&stdev
->dev
, "failed to init isr.\n");
1615 iowrite32(SWITCHTEC_EVENT_CLEAR
|
1616 SWITCHTEC_EVENT_EN_IRQ
,
1617 &stdev
->mmio_part_cfg
->mrpc_comp_hdr
);
1618 enable_link_state_events(stdev
);
1620 if (stdev
->dma_mrpc
)
1621 enable_dma_mrpc(stdev
);
1623 rc
= cdev_device_add(&stdev
->cdev
, &stdev
->dev
);
1627 dev_info(&stdev
->dev
, "Management device registered.\n");
1634 ida_simple_remove(&switchtec_minor_ida
, MINOR(stdev
->dev
.devt
));
1635 put_device(&stdev
->dev
);
1639 static void switchtec_pci_remove(struct pci_dev
*pdev
)
1641 struct switchtec_dev
*stdev
= pci_get_drvdata(pdev
);
1643 pci_set_drvdata(pdev
, NULL
);
1645 cdev_device_del(&stdev
->cdev
, &stdev
->dev
);
1646 ida_simple_remove(&switchtec_minor_ida
, MINOR(stdev
->dev
.devt
));
1647 dev_info(&stdev
->dev
, "unregistered.\n");
1649 put_device(&stdev
->dev
);
1652 #define SWITCHTEC_PCI_DEVICE(device_id, gen) \
1654 .vendor = PCI_VENDOR_ID_MICROSEMI, \
1655 .device = device_id, \
1656 .subvendor = PCI_ANY_ID, \
1657 .subdevice = PCI_ANY_ID, \
1658 .class = (PCI_CLASS_MEMORY_OTHER << 8), \
1659 .class_mask = 0xFFFFFFFF, \
1660 .driver_data = gen, \
1663 .vendor = PCI_VENDOR_ID_MICROSEMI, \
1664 .device = device_id, \
1665 .subvendor = PCI_ANY_ID, \
1666 .subdevice = PCI_ANY_ID, \
1667 .class = (PCI_CLASS_BRIDGE_OTHER << 8), \
1668 .class_mask = 0xFFFFFFFF, \
1669 .driver_data = gen, \
1672 static const struct pci_device_id switchtec_pci_tbl
[] = {
1673 SWITCHTEC_PCI_DEVICE(0x8531, SWITCHTEC_GEN3
), //PFX 24xG3
1674 SWITCHTEC_PCI_DEVICE(0x8532, SWITCHTEC_GEN3
), //PFX 32xG3
1675 SWITCHTEC_PCI_DEVICE(0x8533, SWITCHTEC_GEN3
), //PFX 48xG3
1676 SWITCHTEC_PCI_DEVICE(0x8534, SWITCHTEC_GEN3
), //PFX 64xG3
1677 SWITCHTEC_PCI_DEVICE(0x8535, SWITCHTEC_GEN3
), //PFX 80xG3
1678 SWITCHTEC_PCI_DEVICE(0x8536, SWITCHTEC_GEN3
), //PFX 96xG3
1679 SWITCHTEC_PCI_DEVICE(0x8541, SWITCHTEC_GEN3
), //PSX 24xG3
1680 SWITCHTEC_PCI_DEVICE(0x8542, SWITCHTEC_GEN3
), //PSX 32xG3
1681 SWITCHTEC_PCI_DEVICE(0x8543, SWITCHTEC_GEN3
), //PSX 48xG3
1682 SWITCHTEC_PCI_DEVICE(0x8544, SWITCHTEC_GEN3
), //PSX 64xG3
1683 SWITCHTEC_PCI_DEVICE(0x8545, SWITCHTEC_GEN3
), //PSX 80xG3
1684 SWITCHTEC_PCI_DEVICE(0x8546, SWITCHTEC_GEN3
), //PSX 96xG3
1685 SWITCHTEC_PCI_DEVICE(0x8551, SWITCHTEC_GEN3
), //PAX 24XG3
1686 SWITCHTEC_PCI_DEVICE(0x8552, SWITCHTEC_GEN3
), //PAX 32XG3
1687 SWITCHTEC_PCI_DEVICE(0x8553, SWITCHTEC_GEN3
), //PAX 48XG3
1688 SWITCHTEC_PCI_DEVICE(0x8554, SWITCHTEC_GEN3
), //PAX 64XG3
1689 SWITCHTEC_PCI_DEVICE(0x8555, SWITCHTEC_GEN3
), //PAX 80XG3
1690 SWITCHTEC_PCI_DEVICE(0x8556, SWITCHTEC_GEN3
), //PAX 96XG3
1691 SWITCHTEC_PCI_DEVICE(0x8561, SWITCHTEC_GEN3
), //PFXL 24XG3
1692 SWITCHTEC_PCI_DEVICE(0x8562, SWITCHTEC_GEN3
), //PFXL 32XG3
1693 SWITCHTEC_PCI_DEVICE(0x8563, SWITCHTEC_GEN3
), //PFXL 48XG3
1694 SWITCHTEC_PCI_DEVICE(0x8564, SWITCHTEC_GEN3
), //PFXL 64XG3
1695 SWITCHTEC_PCI_DEVICE(0x8565, SWITCHTEC_GEN3
), //PFXL 80XG3
1696 SWITCHTEC_PCI_DEVICE(0x8566, SWITCHTEC_GEN3
), //PFXL 96XG3
1697 SWITCHTEC_PCI_DEVICE(0x8571, SWITCHTEC_GEN3
), //PFXI 24XG3
1698 SWITCHTEC_PCI_DEVICE(0x8572, SWITCHTEC_GEN3
), //PFXI 32XG3
1699 SWITCHTEC_PCI_DEVICE(0x8573, SWITCHTEC_GEN3
), //PFXI 48XG3
1700 SWITCHTEC_PCI_DEVICE(0x8574, SWITCHTEC_GEN3
), //PFXI 64XG3
1701 SWITCHTEC_PCI_DEVICE(0x8575, SWITCHTEC_GEN3
), //PFXI 80XG3
1702 SWITCHTEC_PCI_DEVICE(0x8576, SWITCHTEC_GEN3
), //PFXI 96XG3
1703 SWITCHTEC_PCI_DEVICE(0x4000, SWITCHTEC_GEN4
), //PFX 100XG4
1704 SWITCHTEC_PCI_DEVICE(0x4084, SWITCHTEC_GEN4
), //PFX 84XG4
1705 SWITCHTEC_PCI_DEVICE(0x4068, SWITCHTEC_GEN4
), //PFX 68XG4
1706 SWITCHTEC_PCI_DEVICE(0x4052, SWITCHTEC_GEN4
), //PFX 52XG4
1707 SWITCHTEC_PCI_DEVICE(0x4036, SWITCHTEC_GEN4
), //PFX 36XG4
1708 SWITCHTEC_PCI_DEVICE(0x4028, SWITCHTEC_GEN4
), //PFX 28XG4
1709 SWITCHTEC_PCI_DEVICE(0x4100, SWITCHTEC_GEN4
), //PSX 100XG4
1710 SWITCHTEC_PCI_DEVICE(0x4184, SWITCHTEC_GEN4
), //PSX 84XG4
1711 SWITCHTEC_PCI_DEVICE(0x4168, SWITCHTEC_GEN4
), //PSX 68XG4
1712 SWITCHTEC_PCI_DEVICE(0x4152, SWITCHTEC_GEN4
), //PSX 52XG4
1713 SWITCHTEC_PCI_DEVICE(0x4136, SWITCHTEC_GEN4
), //PSX 36XG4
1714 SWITCHTEC_PCI_DEVICE(0x4128, SWITCHTEC_GEN4
), //PSX 28XG4
1715 SWITCHTEC_PCI_DEVICE(0x4200, SWITCHTEC_GEN4
), //PAX 100XG4
1716 SWITCHTEC_PCI_DEVICE(0x4284, SWITCHTEC_GEN4
), //PAX 84XG4
1717 SWITCHTEC_PCI_DEVICE(0x4268, SWITCHTEC_GEN4
), //PAX 68XG4
1718 SWITCHTEC_PCI_DEVICE(0x4252, SWITCHTEC_GEN4
), //PAX 52XG4
1719 SWITCHTEC_PCI_DEVICE(0x4236, SWITCHTEC_GEN4
), //PAX 36XG4
1720 SWITCHTEC_PCI_DEVICE(0x4228, SWITCHTEC_GEN4
), //PAX 28XG4
1723 MODULE_DEVICE_TABLE(pci
, switchtec_pci_tbl
);
1725 static struct pci_driver switchtec_pci_driver
= {
1726 .name
= KBUILD_MODNAME
,
1727 .id_table
= switchtec_pci_tbl
,
1728 .probe
= switchtec_pci_probe
,
1729 .remove
= switchtec_pci_remove
,
1732 static int __init
switchtec_init(void)
1736 rc
= alloc_chrdev_region(&switchtec_devt
, 0, max_devices
,
1741 switchtec_class
= class_create(THIS_MODULE
, "switchtec");
1742 if (IS_ERR(switchtec_class
)) {
1743 rc
= PTR_ERR(switchtec_class
);
1744 goto err_create_class
;
1747 rc
= pci_register_driver(&switchtec_pci_driver
);
1749 goto err_pci_register
;
1751 pr_info(KBUILD_MODNAME
": loaded.\n");
1756 class_destroy(switchtec_class
);
1759 unregister_chrdev_region(switchtec_devt
, max_devices
);
1763 module_init(switchtec_init
);
1765 static void __exit
switchtec_exit(void)
1767 pci_unregister_driver(&switchtec_pci_driver
);
1768 class_destroy(switchtec_class
);
1769 unregister_chrdev_region(switchtec_devt
, max_devices
);
1770 ida_destroy(&switchtec_minor_ida
);
1772 pr_info(KBUILD_MODNAME
": unloaded.\n");
1774 module_exit(switchtec_exit
);