1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright 2018-2020 Broadcom.
6 #include <linux/delay.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/firmware.h>
10 #include <linux/idr.h>
11 #include <linux/interrupt.h>
12 #include <linux/panic_notifier.h>
13 #include <linux/kref.h>
14 #include <linux/module.h>
15 #include <linux/mutex.h>
16 #include <linux/pci.h>
17 #include <linux/pci_regs.h>
18 #include <uapi/linux/misc/bcm_vk.h>
22 #define PCI_DEVICE_ID_VALKYRIE 0x5e87
23 #define PCI_DEVICE_ID_VIPER 0x5e88
25 static DEFINE_IDA(bcm_vk_ida
);
40 struct load_image_entry
{
42 const char *image_name
[IMG_PER_TYPE_MAX
];
45 #define NUM_BOOT_STAGES 2
46 /* default firmware images names */
47 static const struct load_image_entry image_tab
[][NUM_BOOT_STAGES
] = {
49 {VK_IMAGE_TYPE_BOOT1
, {"vk_a0-boot1.bin", "vk-boot1.bin"}},
50 {VK_IMAGE_TYPE_BOOT2
, {"vk_a0-boot2.bin", "vk-boot2.bin"}}
53 {VK_IMAGE_TYPE_BOOT1
, {"vk_b0-boot1.bin", "vk-boot1.bin"}},
54 {VK_IMAGE_TYPE_BOOT2
, {"vk_b0-boot2.bin", "vk-boot2.bin"}}
58 {VK_IMAGE_TYPE_BOOT1
, {"vp-boot1.bin", ""}},
59 {VK_IMAGE_TYPE_BOOT2
, {"vp-boot2.bin", ""}}
63 /* Location of memory base addresses of interest in BAR1 */
64 /* Load Boot1 to start of ITCM */
65 #define BAR1_CODEPUSH_BASE_BOOT1 0x100000
67 /* Allow minimum 1s for Load Image timeout responses */
68 #define LOAD_IMAGE_TIMEOUT_MS (1 * MSEC_PER_SEC)
70 /* Image startup timeouts */
71 #define BOOT1_STARTUP_TIMEOUT_MS (5 * MSEC_PER_SEC)
72 #define BOOT2_STARTUP_TIMEOUT_MS (10 * MSEC_PER_SEC)
74 /* 1ms wait for checking the transfer complete status */
75 #define TXFR_COMPLETE_TIMEOUT_MS 1
78 #define VK_MSIX_MSGQ_MAX 3
79 #define VK_MSIX_NOTF_MAX 1
80 #define VK_MSIX_TTY_MAX BCM_VK_NUM_TTY
81 #define VK_MSIX_IRQ_MAX (VK_MSIX_MSGQ_MAX + VK_MSIX_NOTF_MAX + \
83 #define VK_MSIX_IRQ_MIN_REQ (VK_MSIX_MSGQ_MAX + VK_MSIX_NOTF_MAX)
85 /* Number of bits set in DMA mask*/
86 #define BCM_VK_DMA_BITS 64
88 /* Ucode boot wait time */
89 #define BCM_VK_UCODE_BOOT_US (100 * USEC_PER_MSEC)
91 #define BCM_VK_UCODE_BOOT_MAX_US ((BCM_VK_UCODE_BOOT_US * 3) >> 1)
93 /* deinit time for the card os after receiving doorbell */
94 #define BCM_VK_DEINIT_TIME_MS (2 * MSEC_PER_SEC)
99 static bool auto_load
= true;
100 module_param(auto_load
, bool, 0444);
101 MODULE_PARM_DESC(auto_load
,
102 "Load images automatically at PCIe probe time.\n");
103 static uint nr_scratch_pages
= VK_BAR1_SCRATCH_DEF_NR_PAGES
;
104 module_param(nr_scratch_pages
, uint
, 0444);
105 MODULE_PARM_DESC(nr_scratch_pages
,
106 "Number of pre allocated DMAable coherent pages.\n");
107 static uint nr_ib_sgl_blk
= BCM_VK_DEF_IB_SGL_BLK_LEN
;
108 module_param(nr_ib_sgl_blk
, uint
, 0444);
109 MODULE_PARM_DESC(nr_ib_sgl_blk
,
110 "Number of in-band msg blks for short SGL.\n");
113 * alerts that could be generated from peer
115 const struct bcm_vk_entry bcm_vk_peer_err
[BCM_VK_PEER_ERR_NUM
] = {
116 {ERR_LOG_UECC
, ERR_LOG_UECC
, "uecc"},
117 {ERR_LOG_SSIM_BUSY
, ERR_LOG_SSIM_BUSY
, "ssim_busy"},
118 {ERR_LOG_AFBC_BUSY
, ERR_LOG_AFBC_BUSY
, "afbc_busy"},
119 {ERR_LOG_HIGH_TEMP_ERR
, ERR_LOG_HIGH_TEMP_ERR
, "high_temp"},
120 {ERR_LOG_WDOG_TIMEOUT
, ERR_LOG_WDOG_TIMEOUT
, "wdog_timeout"},
121 {ERR_LOG_SYS_FAULT
, ERR_LOG_SYS_FAULT
, "sys_fault"},
122 {ERR_LOG_RAMDUMP
, ERR_LOG_RAMDUMP
, "ramdump"},
123 {ERR_LOG_COP_WDOG_TIMEOUT
, ERR_LOG_COP_WDOG_TIMEOUT
,
125 {ERR_LOG_MEM_ALLOC_FAIL
, ERR_LOG_MEM_ALLOC_FAIL
, "malloc_fail warn"},
126 {ERR_LOG_LOW_TEMP_WARN
, ERR_LOG_LOW_TEMP_WARN
, "low_temp warn"},
127 {ERR_LOG_ECC
, ERR_LOG_ECC
, "ecc"},
128 {ERR_LOG_IPC_DWN
, ERR_LOG_IPC_DWN
, "ipc_down"},
131 /* alerts detected by the host */
132 const struct bcm_vk_entry bcm_vk_host_err
[BCM_VK_HOST_ERR_NUM
] = {
133 {ERR_LOG_HOST_PCIE_DWN
, ERR_LOG_HOST_PCIE_DWN
, "PCIe_down"},
134 {ERR_LOG_HOST_HB_FAIL
, ERR_LOG_HOST_HB_FAIL
, "hb_fail"},
135 {ERR_LOG_HOST_INTF_V_FAIL
, ERR_LOG_HOST_INTF_V_FAIL
, "intf_ver_fail"},
138 irqreturn_t
bcm_vk_notf_irqhandler(int irq
, void *dev_id
)
140 struct bcm_vk
*vk
= dev_id
;
142 if (!bcm_vk_drv_access_ok(vk
)) {
143 dev_err(&vk
->pdev
->dev
,
144 "Interrupt %d received when msgq not inited\n", irq
);
145 goto skip_schedule_work
;
148 /* if notification is not pending, set bit and schedule work */
149 if (test_and_set_bit(BCM_VK_WQ_NOTF_PEND
, vk
->wq_offload
) == 0)
150 queue_work(vk
->wq_thread
, &vk
->wq_work
);
156 static int bcm_vk_intf_ver_chk(struct bcm_vk
*vk
)
158 struct device
*dev
= &vk
->pdev
->dev
;
163 /* read interface register */
164 reg
= vkread32(vk
, BAR_0
, BAR_INTF_VER
);
165 major
= (reg
>> BAR_INTF_VER_MAJOR_SHIFT
) & BAR_INTF_VER_MASK
;
166 minor
= reg
& BAR_INTF_VER_MASK
;
169 * if major number is 0, it is pre-release and it would be allowed
170 * to continue, else, check versions accordingly
173 dev_warn(dev
, "Pre-release major.minor=%d.%d - drv %d.%d\n",
174 major
, minor
, SEMANTIC_MAJOR
, SEMANTIC_MINOR
);
175 } else if (major
!= SEMANTIC_MAJOR
) {
177 "Intf major.minor=%d.%d rejected - drv %d.%d\n",
178 major
, minor
, SEMANTIC_MAJOR
, SEMANTIC_MINOR
);
179 bcm_vk_set_host_alert(vk
, ERR_LOG_HOST_INTF_V_FAIL
);
183 "Intf major.minor=%d.%d passed - drv %d.%d\n",
184 major
, minor
, SEMANTIC_MAJOR
, SEMANTIC_MINOR
);
189 static void bcm_vk_log_notf(struct bcm_vk
*vk
,
190 struct bcm_vk_alert
*alert
,
191 struct bcm_vk_entry
const *entry_tab
,
192 const u32 table_size
)
195 u32 masked_val
, latched_val
;
196 struct bcm_vk_entry
const *entry
;
198 u16 ecc_mem_err
, uecc_mem_err
;
199 struct device
*dev
= &vk
->pdev
->dev
;
201 for (i
= 0; i
< table_size
; i
++) {
202 entry
= &entry_tab
[i
];
203 masked_val
= entry
->mask
& alert
->notfs
;
204 latched_val
= entry
->mask
& alert
->flags
;
206 if (masked_val
== ERR_LOG_UECC
) {
208 * if there is difference between stored cnt and it
209 * is greater than threshold, log it.
211 reg
= vkread32(vk
, BAR_0
, BAR_CARD_ERR_MEM
);
212 BCM_VK_EXTRACT_FIELD(uecc_mem_err
, reg
,
213 BCM_VK_MEM_ERR_FIELD_MASK
,
214 BCM_VK_UECC_MEM_ERR_SHIFT
);
215 if ((uecc_mem_err
!= vk
->alert_cnts
.uecc
) &&
216 (uecc_mem_err
>= BCM_VK_UECC_THRESHOLD
))
218 "ALERT! %s.%d uecc RAISED - ErrCnt %d\n",
219 DRV_MODULE_NAME
, vk
->devid
,
221 vk
->alert_cnts
.uecc
= uecc_mem_err
;
222 } else if (masked_val
== ERR_LOG_ECC
) {
223 reg
= vkread32(vk
, BAR_0
, BAR_CARD_ERR_MEM
);
224 BCM_VK_EXTRACT_FIELD(ecc_mem_err
, reg
,
225 BCM_VK_MEM_ERR_FIELD_MASK
,
226 BCM_VK_ECC_MEM_ERR_SHIFT
);
227 if ((ecc_mem_err
!= vk
->alert_cnts
.ecc
) &&
228 (ecc_mem_err
>= BCM_VK_ECC_THRESHOLD
))
229 dev_info(dev
, "ALERT! %s.%d ecc RAISED - ErrCnt %d\n",
230 DRV_MODULE_NAME
, vk
->devid
,
232 vk
->alert_cnts
.ecc
= ecc_mem_err
;
233 } else if (masked_val
!= latched_val
) {
234 /* print a log as info */
235 dev_info(dev
, "ALERT! %s.%d %s %s\n",
236 DRV_MODULE_NAME
, vk
->devid
, entry
->str
,
237 masked_val
? "RAISED" : "CLEARED");
242 static void bcm_vk_dump_peer_log(struct bcm_vk
*vk
)
244 struct bcm_vk_peer_log log
;
245 struct bcm_vk_peer_log
*log_info
= &vk
->peerlog_info
;
246 char loc_buf
[BCM_VK_PEER_LOG_LINE_MAX
];
248 struct device
*dev
= &vk
->pdev
->dev
;
249 unsigned int data_offset
;
251 memcpy_fromio(&log
, vk
->bar
[BAR_2
] + vk
->peerlog_off
, sizeof(log
));
253 dev_dbg(dev
, "Peer PANIC: Size 0x%x(0x%x), [Rd Wr] = [%d %d]\n",
254 log
.buf_size
, log
.mask
, log
.rd_idx
, log
.wr_idx
);
256 if (!log_info
->buf_size
) {
257 dev_err(dev
, "Peer log dump disabled - skipped!\n");
261 /* perform range checking for rd/wr idx */
262 if ((log
.rd_idx
> log_info
->mask
) ||
263 (log
.wr_idx
> log_info
->mask
) ||
264 (log
.buf_size
!= log_info
->buf_size
) ||
265 (log
.mask
!= log_info
->mask
)) {
267 "Corrupted Ptrs: Size 0x%x(0x%x) Mask 0x%x(0x%x) [Rd Wr] = [%d %d], skip log dump.\n",
268 log_info
->buf_size
, log
.buf_size
,
269 log_info
->mask
, log
.mask
,
270 log
.rd_idx
, log
.wr_idx
);
275 data_offset
= vk
->peerlog_off
+ sizeof(struct bcm_vk_peer_log
);
276 loc_buf
[BCM_VK_PEER_LOG_LINE_MAX
- 1] = '\0';
277 while (log
.rd_idx
!= log
.wr_idx
) {
278 loc_buf
[cnt
] = vkread8(vk
, BAR_2
, data_offset
+ log
.rd_idx
);
280 if ((loc_buf
[cnt
] == '\0') ||
281 (cnt
== (BCM_VK_PEER_LOG_LINE_MAX
- 1))) {
282 dev_err(dev
, "%s", loc_buf
);
287 log
.rd_idx
= (log
.rd_idx
+ 1) & log
.mask
;
289 /* update rd idx at the end */
290 vkwrite32(vk
, log
.rd_idx
, BAR_2
,
291 vk
->peerlog_off
+ offsetof(struct bcm_vk_peer_log
, rd_idx
));
294 void bcm_vk_handle_notf(struct bcm_vk
*vk
)
297 struct bcm_vk_alert alert
;
301 /* handle peer alerts and then locally detected ones */
302 reg
= vkread32(vk
, BAR_0
, BAR_CARD_ERR_LOG
);
303 intf_down
= BCM_VK_INTF_IS_DOWN(reg
);
305 vk
->peer_alert
.notfs
= reg
;
306 bcm_vk_log_notf(vk
, &vk
->peer_alert
, bcm_vk_peer_err
,
307 ARRAY_SIZE(bcm_vk_peer_err
));
308 vk
->peer_alert
.flags
= vk
->peer_alert
.notfs
;
310 /* turn off access */
311 bcm_vk_blk_drv_access(vk
);
314 /* check and make copy of alert with lock and then free lock */
315 spin_lock_irqsave(&vk
->host_alert_lock
, flags
);
317 vk
->host_alert
.notfs
|= ERR_LOG_HOST_PCIE_DWN
;
319 alert
= vk
->host_alert
;
320 vk
->host_alert
.flags
= vk
->host_alert
.notfs
;
321 spin_unlock_irqrestore(&vk
->host_alert_lock
, flags
);
323 /* call display with copy */
324 bcm_vk_log_notf(vk
, &alert
, bcm_vk_host_err
,
325 ARRAY_SIZE(bcm_vk_host_err
));
328 * If it is a sys fault or heartbeat timeout, we would like extract
329 * log msg from the card so that we would know what is the last fault
332 ((vk
->host_alert
.flags
& ERR_LOG_HOST_HB_FAIL
) ||
333 (vk
->peer_alert
.flags
& ERR_LOG_SYS_FAULT
)))
334 bcm_vk_dump_peer_log(vk
);
337 static inline int bcm_vk_wait(struct bcm_vk
*vk
, enum pci_barno bar
,
338 u64 offset
, u32 mask
, u32 value
,
339 unsigned long timeout_ms
)
341 struct device
*dev
= &vk
->pdev
->dev
;
342 unsigned long start_time
;
343 unsigned long timeout
;
344 u32 rd_val
, boot_status
;
346 start_time
= jiffies
;
347 timeout
= start_time
+ msecs_to_jiffies(timeout_ms
);
350 rd_val
= vkread32(vk
, bar
, offset
);
351 dev_dbg(dev
, "BAR%d Offset=0x%llx: 0x%x\n",
352 bar
, offset
, rd_val
);
354 /* check for any boot err condition */
355 boot_status
= vkread32(vk
, BAR_0
, BAR_BOOT_STATUS
);
356 if (boot_status
& BOOT_ERR_MASK
) {
357 dev_err(dev
, "Boot Err 0x%x, progress 0x%x after %d ms\n",
358 (boot_status
& BOOT_ERR_MASK
) >> BOOT_ERR_SHIFT
,
359 boot_status
& BOOT_PROG_MASK
,
360 jiffies_to_msecs(jiffies
- start_time
));
364 if (time_after(jiffies
, timeout
))
369 } while ((rd_val
& mask
) != value
);
374 static void bcm_vk_get_card_info(struct bcm_vk
*vk
)
376 struct device
*dev
= &vk
->pdev
->dev
;
380 struct bcm_vk_card_info
*info
= &vk
->card_info
;
382 /* first read the offset from spare register */
383 offset
= vkread32(vk
, BAR_0
, BAR_CARD_STATIC_INFO
);
384 offset
&= (pci_resource_len(vk
->pdev
, BAR_2
* 2) - 1);
386 /* based on the offset, read info to internal card info structure */
388 for (i
= 0; i
< sizeof(*info
); i
++)
389 *dst
++ = vkread8(vk
, BAR_2
, offset
++);
391 #define CARD_INFO_LOG_FMT "version : %x\n" \
394 "cpu_freq : %d MHz\n" \
395 "cpu_scale : %d full, %d lowest\n" \
396 "ddr_freq : %d MHz\n" \
397 "ddr_size : %d MB\n" \
398 "video_freq: %d MHz\n"
399 dev_dbg(dev
, CARD_INFO_LOG_FMT
, info
->version
, info
->os_tag
,
400 info
->cmpt_tag
, info
->cpu_freq_mhz
, info
->cpu_scale
[0],
401 info
->cpu_scale
[MAX_OPP
- 1], info
->ddr_freq_mhz
,
402 info
->ddr_size_MB
, info
->video_core_freq_mhz
);
405 * get the peer log pointer, only need the offset, and get record
406 * of the log buffer information which would be used for checking
407 * before dump, in case the BAR2 memory has been corrupted.
409 vk
->peerlog_off
= offset
;
410 memcpy_fromio(&vk
->peerlog_info
, vk
->bar
[BAR_2
] + vk
->peerlog_off
,
411 sizeof(vk
->peerlog_info
));
414 * Do a range checking and if out of bound, the record will be zeroed
415 * which guarantees that nothing would be dumped. In other words,
416 * peer dump is disabled.
418 if ((vk
->peerlog_info
.buf_size
> BCM_VK_PEER_LOG_BUF_MAX
) ||
419 (vk
->peerlog_info
.mask
!= (vk
->peerlog_info
.buf_size
- 1)) ||
420 (vk
->peerlog_info
.rd_idx
> vk
->peerlog_info
.mask
) ||
421 (vk
->peerlog_info
.wr_idx
> vk
->peerlog_info
.mask
)) {
422 dev_err(dev
, "Peer log disabled - range error: Size 0x%x(0x%x), [Rd Wr] = [%d %d]\n",
423 vk
->peerlog_info
.buf_size
,
424 vk
->peerlog_info
.mask
,
425 vk
->peerlog_info
.rd_idx
,
426 vk
->peerlog_info
.wr_idx
);
427 memset(&vk
->peerlog_info
, 0, sizeof(vk
->peerlog_info
));
429 dev_dbg(dev
, "Peer log: Size 0x%x(0x%x), [Rd Wr] = [%d %d]\n",
430 vk
->peerlog_info
.buf_size
,
431 vk
->peerlog_info
.mask
,
432 vk
->peerlog_info
.rd_idx
,
433 vk
->peerlog_info
.wr_idx
);
437 static void bcm_vk_get_proc_mon_info(struct bcm_vk
*vk
)
439 struct device
*dev
= &vk
->pdev
->dev
;
440 struct bcm_vk_proc_mon_info
*mon
= &vk
->proc_mon_info
;
441 u32 num
, entry_size
, offset
, buf_size
;
444 /* calculate offset which is based on peerlog offset */
445 buf_size
= vkread32(vk
, BAR_2
,
447 + offsetof(struct bcm_vk_peer_log
, buf_size
));
448 offset
= vk
->peerlog_off
+ sizeof(struct bcm_vk_peer_log
)
451 /* first read the num and entry size */
452 num
= vkread32(vk
, BAR_2
, offset
);
453 entry_size
= vkread32(vk
, BAR_2
, offset
+ sizeof(num
));
455 /* check for max allowed */
456 if (num
> BCM_VK_PROC_MON_MAX
) {
457 dev_err(dev
, "Processing monitoring entry %d exceeds max %d\n",
458 num
, BCM_VK_PROC_MON_MAX
);
462 mon
->entry_size
= entry_size
;
464 vk
->proc_mon_off
= offset
;
466 /* read it once that will capture those static info */
467 dst
= (u8
*)&mon
->entries
[0];
468 offset
+= sizeof(num
) + sizeof(entry_size
);
469 memcpy_fromio(dst
, vk
->bar
[BAR_2
] + offset
, num
* entry_size
);
472 static int bcm_vk_sync_card_info(struct bcm_vk
*vk
)
474 u32 rdy_marker
= vkread32(vk
, BAR_1
, VK_BAR1_MSGQ_DEF_RDY
);
476 /* check for marker, but allow diags mode to skip sync */
477 if (!bcm_vk_msgq_marker_valid(vk
))
478 return (rdy_marker
== VK_BAR1_DIAG_RDY_MARKER
? 0 : -EINVAL
);
481 * Write down scratch addr which is used for DMA. For
482 * signed part, BAR1 is accessible only after boot2 has come
486 vkwrite32(vk
, (u64
)vk
->tdma_addr
>> 32, BAR_1
,
487 VK_BAR1_SCRATCH_OFF_HI
);
488 vkwrite32(vk
, (u32
)vk
->tdma_addr
, BAR_1
,
489 VK_BAR1_SCRATCH_OFF_LO
);
490 vkwrite32(vk
, nr_scratch_pages
* PAGE_SIZE
, BAR_1
,
491 VK_BAR1_SCRATCH_SZ_ADDR
);
494 /* get static card info, only need to read once */
495 bcm_vk_get_card_info(vk
);
497 /* get the proc mon info once */
498 bcm_vk_get_proc_mon_info(vk
);
503 void bcm_vk_blk_drv_access(struct bcm_vk
*vk
)
508 * kill all the apps except for the process that is resetting.
509 * If not called during reset, reset_pid will be 0, and all will be
512 spin_lock(&vk
->ctx_lock
);
514 /* set msgq_inited to 0 so that all rd/wr will be blocked */
515 atomic_set(&vk
->msgq_inited
, 0);
517 for (i
= 0; i
< VK_PID_HT_SZ
; i
++) {
518 struct bcm_vk_ctx
*ctx
;
520 list_for_each_entry(ctx
, &vk
->pid_ht
[i
].head
, node
) {
521 if (ctx
->pid
!= vk
->reset_pid
) {
522 dev_dbg(&vk
->pdev
->dev
,
523 "Send kill signal to pid %d\n",
525 kill_pid(find_vpid(ctx
->pid
), SIGKILL
, 1);
529 bcm_vk_tty_terminate_tty_user(vk
);
530 spin_unlock(&vk
->ctx_lock
);
533 static void bcm_vk_buf_notify(struct bcm_vk
*vk
, void *bufp
,
534 dma_addr_t host_buf_addr
, u32 buf_size
)
536 /* update the dma address to the card */
537 vkwrite32(vk
, (u64
)host_buf_addr
>> 32, BAR_1
,
538 VK_BAR1_DMA_BUF_OFF_HI
);
539 vkwrite32(vk
, (u32
)host_buf_addr
, BAR_1
,
540 VK_BAR1_DMA_BUF_OFF_LO
);
541 vkwrite32(vk
, buf_size
, BAR_1
, VK_BAR1_DMA_BUF_SZ
);
544 static int bcm_vk_load_image_by_type(struct bcm_vk
*vk
, u32 load_type
,
545 const char *filename
)
547 struct device
*dev
= &vk
->pdev
->dev
;
548 const struct firmware
*fw
= NULL
;
550 size_t max_buf
, offset
;
555 dma_addr_t boot_dma_addr
;
558 if (load_type
== VK_IMAGE_TYPE_BOOT1
) {
560 * After POR, enable VK soft BOOTSRC so bootrom do not clear
561 * the pushed image (the TCM memories).
563 value
= vkread32(vk
, BAR_0
, BAR_BOOTSRC_SELECT
);
564 value
|= BOOTSRC_SOFT_ENABLE
;
565 vkwrite32(vk
, value
, BAR_0
, BAR_BOOTSRC_SELECT
);
567 codepush
= CODEPUSH_BOOTSTART
+ CODEPUSH_BOOT1_ENTRY
;
568 offset_codepush
= BAR_CODEPUSH_SBL
;
570 /* Write a 1 to request SRAM open bit */
571 vkwrite32(vk
, CODEPUSH_BOOTSTART
, BAR_0
, offset_codepush
);
573 /* Wait for VK to respond */
574 ret
= bcm_vk_wait(vk
, BAR_0
, BAR_BOOT_STATUS
, SRAM_OPEN
,
575 SRAM_OPEN
, LOAD_IMAGE_TIMEOUT_MS
);
577 dev_err(dev
, "boot1 wait SRAM err - ret(%d)\n", ret
);
582 bufp
= dma_alloc_coherent(dev
,
584 &boot_dma_addr
, GFP_KERNEL
);
586 dev_err(dev
, "Error allocating 0x%zx\n", max_buf
);
590 } else if (load_type
== VK_IMAGE_TYPE_BOOT2
) {
591 codepush
= CODEPUSH_BOOT2_ENTRY
;
592 offset_codepush
= BAR_CODEPUSH_SBI
;
594 /* Wait for VK to respond */
595 ret
= bcm_vk_wait(vk
, BAR_0
, BAR_BOOT_STATUS
, DDR_OPEN
,
596 DDR_OPEN
, LOAD_IMAGE_TIMEOUT_MS
);
598 dev_err(dev
, "boot2 wait DDR open error - ret(%d)\n",
604 bufp
= dma_alloc_coherent(dev
,
606 &boot_dma_addr
, GFP_KERNEL
);
608 dev_err(dev
, "Error allocating 0x%zx\n", max_buf
);
613 bcm_vk_buf_notify(vk
, bufp
, boot_dma_addr
, max_buf
);
615 dev_err(dev
, "Error invalid image type 0x%x\n", load_type
);
621 ret
= request_partial_firmware_into_buf(&fw
, filename
, dev
,
622 bufp
, max_buf
, offset
);
624 dev_err(dev
, "Error %d requesting firmware file: %s\n",
626 goto err_firmware_out
;
628 dev_dbg(dev
, "size=0x%zx\n", fw
->size
);
629 if (load_type
== VK_IMAGE_TYPE_BOOT1
)
630 memcpy_toio(vk
->bar
[BAR_1
] + BAR1_CODEPUSH_BASE_BOOT1
,
634 dev_dbg(dev
, "Signaling 0x%x to 0x%llx\n", codepush
, offset_codepush
);
635 vkwrite32(vk
, codepush
, BAR_0
, offset_codepush
);
637 if (load_type
== VK_IMAGE_TYPE_BOOT1
) {
640 /* wait until done */
641 ret
= bcm_vk_wait(vk
, BAR_0
, BAR_BOOT_STATUS
,
644 BOOT1_STARTUP_TIMEOUT_MS
);
646 boot_status
= vkread32(vk
, BAR_0
, BAR_BOOT_STATUS
);
647 is_stdalone
= !BCM_VK_INTF_IS_DOWN(boot_status
) &&
648 (boot_status
& BOOT_STDALONE_RUNNING
);
649 if (ret
&& !is_stdalone
) {
651 "Timeout %ld ms waiting for boot1 to come up - ret(%d)\n",
652 BOOT1_STARTUP_TIMEOUT_MS
, ret
);
653 goto err_firmware_out
;
654 } else if (is_stdalone
) {
657 reg
= vkread32(vk
, BAR_0
, BAR_BOOT1_STDALONE_PROGRESS
);
658 if ((reg
& BOOT1_STDALONE_PROGRESS_MASK
) ==
659 BOOT1_STDALONE_SUCCESS
) {
660 dev_info(dev
, "Boot1 standalone success\n");
663 dev_err(dev
, "Timeout %ld ms - Boot1 standalone failure\n",
664 BOOT1_STARTUP_TIMEOUT_MS
);
666 goto err_firmware_out
;
669 } else if (load_type
== VK_IMAGE_TYPE_BOOT2
) {
670 unsigned long timeout
;
672 timeout
= jiffies
+ msecs_to_jiffies(LOAD_IMAGE_TIMEOUT_MS
);
674 /* To send more data to VK than max_buf allowed at a time */
677 * Check for ack from card. when Ack is received,
678 * it means all the data is received by card.
679 * Exit the loop after ack is received.
681 ret
= bcm_vk_wait(vk
, BAR_0
, BAR_BOOT_STATUS
,
682 FW_LOADER_ACK_RCVD_ALL_DATA
,
683 FW_LOADER_ACK_RCVD_ALL_DATA
,
684 TXFR_COMPLETE_TIMEOUT_MS
);
686 dev_dbg(dev
, "Exit boot2 download\n");
688 } else if (ret
== -EFAULT
) {
689 dev_err(dev
, "Error detected during ACK waiting");
690 goto err_firmware_out
;
693 /* exit the loop, if there is no response from card */
694 if (time_after(jiffies
, timeout
)) {
695 dev_err(dev
, "Error. No reply from card\n");
697 goto err_firmware_out
;
700 /* Wait for VK to open BAR space to copy new data */
701 ret
= bcm_vk_wait(vk
, BAR_0
, offset_codepush
,
703 TXFR_COMPLETE_TIMEOUT_MS
);
706 ret
= request_partial_firmware_into_buf
714 "Error %d requesting firmware file: %s offset: 0x%zx\n",
715 ret
, filename
, offset
);
716 goto err_firmware_out
;
718 dev_dbg(dev
, "size=0x%zx\n", fw
->size
);
719 dev_dbg(dev
, "Signaling 0x%x to 0x%llx\n",
720 codepush
, offset_codepush
);
721 vkwrite32(vk
, codepush
, BAR_0
, offset_codepush
);
722 /* reload timeout after every codepush */
724 msecs_to_jiffies(LOAD_IMAGE_TIMEOUT_MS
);
725 } else if (ret
== -EFAULT
) {
726 dev_err(dev
, "Error detected waiting for transfer\n");
727 goto err_firmware_out
;
731 /* wait for fw status bits to indicate app ready */
732 ret
= bcm_vk_wait(vk
, BAR_0
, VK_BAR_FWSTS
,
735 BOOT2_STARTUP_TIMEOUT_MS
);
737 dev_err(dev
, "Boot2 not ready - ret(%d)\n", ret
);
738 goto err_firmware_out
;
741 is_stdalone
= vkread32(vk
, BAR_0
, BAR_BOOT_STATUS
) &
742 BOOT_STDALONE_RUNNING
;
744 ret
= bcm_vk_intf_ver_chk(vk
);
746 dev_err(dev
, "failure in intf version check\n");
747 goto err_firmware_out
;
751 * Next, initialize Message Q if we are loading boot2.
754 ret
= bcm_vk_sync_msgq(vk
, true);
756 dev_err(dev
, "Boot2 Error reading comm msg Q info\n");
758 goto err_firmware_out
;
761 /* sync & channel other info */
762 ret
= bcm_vk_sync_card_info(vk
);
764 dev_err(dev
, "Syncing Card Info failure\n");
765 goto err_firmware_out
;
771 release_firmware(fw
);
775 dma_free_coherent(dev
, max_buf
, bufp
, boot_dma_addr
);
780 static u32
bcm_vk_next_boot_image(struct bcm_vk
*vk
)
784 u32 load_type
= 0; /* default for unknown */
786 boot_status
= vkread32(vk
, BAR_0
, BAR_BOOT_STATUS
);
787 fw_status
= vkread32(vk
, BAR_0
, VK_BAR_FWSTS
);
789 if (!BCM_VK_INTF_IS_DOWN(boot_status
) && (boot_status
& SRAM_OPEN
))
790 load_type
= VK_IMAGE_TYPE_BOOT1
;
791 else if (boot_status
== BOOT1_RUNNING
)
792 load_type
= VK_IMAGE_TYPE_BOOT2
;
794 /* Log status so that we know different stages */
795 dev_info(&vk
->pdev
->dev
,
796 "boot-status value for next image: 0x%x : fw-status 0x%x\n",
797 boot_status
, fw_status
);
802 static enum soc_idx
get_soc_idx(struct bcm_vk
*vk
)
804 struct pci_dev
*pdev
= vk
->pdev
;
805 enum soc_idx idx
= VK_IDX_INVALID
;
807 static enum soc_idx
const vk_soc_tab
[] = { VALKYRIE_A0
, VALKYRIE_B0
};
809 switch (pdev
->device
) {
810 case PCI_DEVICE_ID_VALKYRIE
:
811 /* get the chip id to decide sub-class */
812 rev
= MAJOR_SOC_REV(vkread32(vk
, BAR_0
, BAR_CHIP_ID
));
813 if (rev
< ARRAY_SIZE(vk_soc_tab
)) {
814 idx
= vk_soc_tab
[rev
];
816 /* Default to A0 firmware for all other chip revs */
819 "Rev %d not in image lookup table, default to idx=%d\n",
824 case PCI_DEVICE_ID_VIPER
:
829 dev_err(&pdev
->dev
, "no images for 0x%x\n", pdev
->device
);
834 static const char *get_load_fw_name(struct bcm_vk
*vk
,
835 const struct load_image_entry
*entry
)
837 const struct firmware
*fw
;
838 struct device
*dev
= &vk
->pdev
->dev
;
843 for (i
= 0; i
< IMG_PER_TYPE_MAX
; i
++) {
845 ret
= request_partial_firmware_into_buf(&fw
,
846 entry
->image_name
[i
],
850 release_firmware(fw
);
852 return entry
->image_name
[i
];
857 int bcm_vk_auto_load_all_images(struct bcm_vk
*vk
)
861 struct device
*dev
= &vk
->pdev
->dev
;
863 const char *curr_name
;
865 idx
= get_soc_idx(vk
);
866 if (idx
== VK_IDX_INVALID
)
867 goto auto_load_all_exit
;
869 /* log a message to know the relative loading order */
870 dev_dbg(dev
, "Load All for device %d\n", vk
->devid
);
872 for (i
= 0; i
< NUM_BOOT_STAGES
; i
++) {
873 curr_type
= image_tab
[idx
][i
].image_type
;
874 if (bcm_vk_next_boot_image(vk
) == curr_type
) {
875 curr_name
= get_load_fw_name(vk
, &image_tab
[idx
][i
]);
877 dev_err(dev
, "No suitable firmware exists for type %d",
880 goto auto_load_all_exit
;
882 ret
= bcm_vk_load_image_by_type(vk
, curr_type
,
884 dev_info(dev
, "Auto load %s, ret %d\n",
888 dev_err(dev
, "Error loading default %s\n",
890 goto auto_load_all_exit
;
899 static int bcm_vk_trigger_autoload(struct bcm_vk
*vk
)
901 if (test_and_set_bit(BCM_VK_WQ_DWNLD_PEND
, vk
->wq_offload
) != 0)
904 set_bit(BCM_VK_WQ_DWNLD_AUTO
, vk
->wq_offload
);
905 queue_work(vk
->wq_thread
, &vk
->wq_work
);
911 * deferred work queue for draining and auto download.
913 static void bcm_vk_wq_handler(struct work_struct
*work
)
915 struct bcm_vk
*vk
= container_of(work
, struct bcm_vk
, wq_work
);
916 struct device
*dev
= &vk
->pdev
->dev
;
919 /* check wq offload bit map to perform various operations */
920 if (test_bit(BCM_VK_WQ_NOTF_PEND
, vk
->wq_offload
)) {
921 /* clear bit right the way for notification */
922 clear_bit(BCM_VK_WQ_NOTF_PEND
, vk
->wq_offload
);
923 bcm_vk_handle_notf(vk
);
925 if (test_bit(BCM_VK_WQ_DWNLD_AUTO
, vk
->wq_offload
)) {
926 bcm_vk_auto_load_all_images(vk
);
929 * at the end of operation, clear AUTO bit and pending
932 clear_bit(BCM_VK_WQ_DWNLD_AUTO
, vk
->wq_offload
);
933 clear_bit(BCM_VK_WQ_DWNLD_PEND
, vk
->wq_offload
);
936 /* next, try to drain */
937 ret
= bcm_to_h_msg_dequeue(vk
);
940 dev_dbg(dev
, "Spurious trigger for workqueue\n");
942 bcm_vk_blk_drv_access(vk
);
945 static long bcm_vk_load_image(struct bcm_vk
*vk
,
946 const struct vk_image __user
*arg
)
948 struct device
*dev
= &vk
->pdev
->dev
;
949 const char *image_name
;
950 struct vk_image image
;
956 if (copy_from_user(&image
, arg
, sizeof(image
)))
959 if ((image
.type
!= VK_IMAGE_TYPE_BOOT1
) &&
960 (image
.type
!= VK_IMAGE_TYPE_BOOT2
)) {
961 dev_err(dev
, "invalid image.type %u\n", image
.type
);
965 next_loadable
= bcm_vk_next_boot_image(vk
);
966 if (next_loadable
!= image
.type
) {
967 dev_err(dev
, "Next expected image %u, Loading %u\n",
968 next_loadable
, image
.type
);
973 * if something is pending download already. This could only happen
974 * for now when the driver is being loaded, or if someone has issued
975 * another download command in another shell.
977 if (test_and_set_bit(BCM_VK_WQ_DWNLD_PEND
, vk
->wq_offload
) != 0) {
978 dev_err(dev
, "Download operation already pending.\n");
982 image_name
= image
.filename
;
983 if (image_name
[0] == '\0') {
984 /* Use default image name if NULL */
985 idx
= get_soc_idx(vk
);
986 if (idx
== VK_IDX_INVALID
)
989 /* Image idx starts with boot1 */
990 image_idx
= image
.type
- VK_IMAGE_TYPE_BOOT1
;
991 image_name
= get_load_fw_name(vk
, &image_tab
[idx
][image_idx
]);
993 dev_err(dev
, "No suitable image found for type %d",
999 /* Ensure filename is NULL terminated */
1000 image
.filename
[sizeof(image
.filename
) - 1] = '\0';
1002 ret
= bcm_vk_load_image_by_type(vk
, image
.type
, image_name
);
1003 dev_info(dev
, "Load %s, ret %d\n", image_name
, ret
);
1005 clear_bit(BCM_VK_WQ_DWNLD_PEND
, vk
->wq_offload
);
1010 static int bcm_vk_reset_successful(struct bcm_vk
*vk
)
1012 struct device
*dev
= &vk
->pdev
->dev
;
1013 u32 fw_status
, reset_reason
;
1017 * Reset could be triggered when the card in several state:
1020 * iii) boot2 running
1022 * i) & ii) - no status bits will be updated. If vkboot1
1023 * runs automatically after reset, it will update the reason
1024 * to be unknown reason
1025 * iii) - reboot reason match + deinit done.
1027 fw_status
= vkread32(vk
, BAR_0
, VK_BAR_FWSTS
);
1028 /* immediate exit if interface goes down */
1029 if (BCM_VK_INTF_IS_DOWN(fw_status
)) {
1030 dev_err(dev
, "PCIe Intf Down!\n");
1034 reset_reason
= (fw_status
& VK_FWSTS_RESET_REASON_MASK
);
1035 if ((reset_reason
== VK_FWSTS_RESET_MBOX_DB
) ||
1036 (reset_reason
== VK_FWSTS_RESET_UNKNOWN
))
1040 * if some of the deinit bits are set, but done
1041 * bit is not, this is a failure if triggered while boot2 is running
1043 if ((fw_status
& VK_FWSTS_DEINIT_TRIGGERED
) &&
1044 !(fw_status
& VK_FWSTS_RESET_DONE
))
1048 dev_dbg(dev
, "FW status = 0x%x ret %d\n", fw_status
, ret
);
1053 static void bcm_to_v_reset_doorbell(struct bcm_vk
*vk
, u32 db_val
)
1055 vkwrite32(vk
, db_val
, BAR_0
, VK_BAR0_RESET_DB_BASE
);
1058 static int bcm_vk_trigger_reset(struct bcm_vk
*vk
)
1061 u32 value
, boot_status
;
1062 bool is_stdalone
, is_boot2
;
1063 static const u32 bar0_reg_clr_list
[] = { BAR_OS_UPTIME
,
1066 BAR_CARD_TEMPERATURE
,
1067 BAR_CARD_PWR_AND_THRE
};
1069 /* clean up before pressing the door bell */
1070 bcm_vk_drain_msg_on_reset(vk
);
1071 vkwrite32(vk
, 0, BAR_1
, VK_BAR1_MSGQ_DEF_RDY
);
1072 /* make tag '\0' terminated */
1073 vkwrite32(vk
, 0, BAR_1
, VK_BAR1_BOOT1_VER_TAG
);
1075 for (i
= 0; i
< VK_BAR1_DAUTH_MAX
; i
++) {
1076 vkwrite32(vk
, 0, BAR_1
, VK_BAR1_DAUTH_STORE_ADDR(i
));
1077 vkwrite32(vk
, 0, BAR_1
, VK_BAR1_DAUTH_VALID_ADDR(i
));
1079 for (i
= 0; i
< VK_BAR1_SOTP_REVID_MAX
; i
++)
1080 vkwrite32(vk
, 0, BAR_1
, VK_BAR1_SOTP_REVID_ADDR(i
));
1082 memset(&vk
->card_info
, 0, sizeof(vk
->card_info
));
1083 memset(&vk
->peerlog_info
, 0, sizeof(vk
->peerlog_info
));
1084 memset(&vk
->proc_mon_info
, 0, sizeof(vk
->proc_mon_info
));
1085 memset(&vk
->alert_cnts
, 0, sizeof(vk
->alert_cnts
));
1088 * When boot request fails, the CODE_PUSH_OFFSET stays persistent.
1089 * Allowing us to debug the failure. When we call reset,
1090 * we should clear CODE_PUSH_OFFSET so ROM does not execute
1091 * boot again (and fails again) and instead waits for a new
1092 * codepush. And, if previous boot has encountered error, need
1093 * to clear the entry values
1095 boot_status
= vkread32(vk
, BAR_0
, BAR_BOOT_STATUS
);
1096 if (boot_status
& BOOT_ERR_MASK
) {
1097 dev_info(&vk
->pdev
->dev
,
1098 "Card in boot error 0x%x, clear CODEPUSH val\n",
1102 value
= vkread32(vk
, BAR_0
, BAR_CODEPUSH_SBL
);
1103 value
&= CODEPUSH_MASK
;
1105 vkwrite32(vk
, value
, BAR_0
, BAR_CODEPUSH_SBL
);
1107 /* special reset handling */
1108 is_stdalone
= boot_status
& BOOT_STDALONE_RUNNING
;
1109 is_boot2
= (boot_status
& BOOT_STATE_MASK
) == BOOT2_RUNNING
;
1110 if (vk
->peer_alert
.flags
& ERR_LOG_RAMDUMP
) {
1112 * if card is in ramdump mode, it is hitting an error. Don't
1113 * reset the reboot reason as it will contain valid info that
1114 * is important - simply use special reset
1116 vkwrite32(vk
, VK_BAR0_RESET_RAMPDUMP
, BAR_0
, VK_BAR_FWSTS
);
1117 return VK_BAR0_RESET_RAMPDUMP
;
1118 } else if (is_stdalone
&& !is_boot2
) {
1119 dev_info(&vk
->pdev
->dev
, "Hard reset on Standalone mode");
1120 bcm_to_v_reset_doorbell(vk
, VK_BAR0_RESET_DB_HARD
);
1121 return VK_BAR0_RESET_DB_HARD
;
1124 /* reset fw_status with proper reason, and press db */
1125 vkwrite32(vk
, VK_FWSTS_RESET_MBOX_DB
, BAR_0
, VK_BAR_FWSTS
);
1126 bcm_to_v_reset_doorbell(vk
, VK_BAR0_RESET_DB_SOFT
);
1128 /* clear other necessary registers and alert records */
1129 for (i
= 0; i
< ARRAY_SIZE(bar0_reg_clr_list
); i
++)
1130 vkwrite32(vk
, 0, BAR_0
, bar0_reg_clr_list
[i
]);
1131 memset(&vk
->host_alert
, 0, sizeof(vk
->host_alert
));
1132 memset(&vk
->peer_alert
, 0, sizeof(vk
->peer_alert
));
1133 /* clear 4096 bits of bitmap */
1134 bitmap_clear(vk
->bmap
, 0, VK_MSG_ID_BITMAP_SIZE
);
1139 static long bcm_vk_reset(struct bcm_vk
*vk
, struct vk_reset __user
*arg
)
1141 struct device
*dev
= &vk
->pdev
->dev
;
1142 struct vk_reset reset
;
1147 if (copy_from_user(&reset
, arg
, sizeof(struct vk_reset
)))
1150 /* check if any download is in-progress, if so return error */
1151 if (test_and_set_bit(BCM_VK_WQ_DWNLD_PEND
, vk
->wq_offload
) != 0) {
1152 dev_err(dev
, "Download operation pending - skip reset.\n");
1156 ramdump_reset
= vk
->peer_alert
.flags
& ERR_LOG_RAMDUMP
;
1157 dev_info(dev
, "Issue Reset %s\n",
1158 ramdump_reset
? "in ramdump mode" : "");
1161 * The following is the sequence of reset:
1162 * - send card level graceful shut down
1163 * - wait enough time for VK to handle its business, stopping DMA etc
1165 * - Trigger interrupt with DB
1167 bcm_vk_send_shutdown_msg(vk
, VK_SHUTDOWN_GRACEFUL
, 0, 0);
1169 spin_lock(&vk
->ctx_lock
);
1170 if (!vk
->reset_pid
) {
1171 vk
->reset_pid
= task_pid_nr(current
);
1173 dev_err(dev
, "Reset already launched by process pid %d\n",
1177 spin_unlock(&vk
->ctx_lock
);
1181 bcm_vk_blk_drv_access(vk
);
1182 special_reset
= bcm_vk_trigger_reset(vk
);
1185 * Wait enough time for card os to deinit
1186 * and populate the reset reason.
1188 msleep(BCM_VK_DEINIT_TIME_MS
);
1190 if (special_reset
) {
1191 /* if it is special ramdump reset, return the type to user */
1192 reset
.arg2
= special_reset
;
1193 if (copy_to_user(arg
, &reset
, sizeof(reset
)))
1196 ret
= bcm_vk_reset_successful(vk
);
1200 clear_bit(BCM_VK_WQ_DWNLD_PEND
, vk
->wq_offload
);
1204 static int bcm_vk_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1206 struct bcm_vk_ctx
*ctx
= file
->private_data
;
1207 struct bcm_vk
*vk
= container_of(ctx
->miscdev
, struct bcm_vk
, miscdev
);
1208 unsigned long pg_size
;
1210 /* only BAR2 is mmap possible, which is bar num 4 due to 64bit */
1211 #define VK_MMAPABLE_BAR 4
1213 pg_size
= ((pci_resource_len(vk
->pdev
, VK_MMAPABLE_BAR
) - 1)
1215 if (vma
->vm_pgoff
+ vma_pages(vma
) > pg_size
)
1218 vma
->vm_pgoff
+= (pci_resource_start(vk
->pdev
, VK_MMAPABLE_BAR
)
1220 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
1222 return io_remap_pfn_range(vma
, vma
->vm_start
, vma
->vm_pgoff
,
1223 vma
->vm_end
- vma
->vm_start
,
1227 static long bcm_vk_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
1230 struct bcm_vk_ctx
*ctx
= file
->private_data
;
1231 struct bcm_vk
*vk
= container_of(ctx
->miscdev
, struct bcm_vk
, miscdev
);
1232 void __user
*argp
= (void __user
*)arg
;
1234 dev_dbg(&vk
->pdev
->dev
,
1235 "ioctl, cmd=0x%02x, arg=0x%02lx\n",
1238 mutex_lock(&vk
->mutex
);
1241 case VK_IOCTL_LOAD_IMAGE
:
1242 ret
= bcm_vk_load_image(vk
, argp
);
1245 case VK_IOCTL_RESET
:
1246 ret
= bcm_vk_reset(vk
, argp
);
1253 mutex_unlock(&vk
->mutex
);
1258 static const struct file_operations bcm_vk_fops
= {
1259 .owner
= THIS_MODULE
,
1260 .open
= bcm_vk_open
,
1261 .read
= bcm_vk_read
,
1262 .write
= bcm_vk_write
,
1263 .poll
= bcm_vk_poll
,
1264 .release
= bcm_vk_release
,
1265 .mmap
= bcm_vk_mmap
,
1266 .unlocked_ioctl
= bcm_vk_ioctl
,
1269 static int bcm_vk_on_panic(struct notifier_block
*nb
,
1270 unsigned long e
, void *p
)
1272 struct bcm_vk
*vk
= container_of(nb
, struct bcm_vk
, panic_nb
);
1274 bcm_to_v_reset_doorbell(vk
, VK_BAR0_RESET_DB_HARD
);
1279 static int bcm_vk_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
1287 struct device
*dev
= &pdev
->dev
;
1288 struct miscdevice
*misc_device
;
1291 /* allocate vk structure which is tied to kref for freeing */
1292 vk
= kzalloc(sizeof(*vk
), GFP_KERNEL
);
1296 kref_init(&vk
->kref
);
1297 if (nr_ib_sgl_blk
> BCM_VK_IB_SGL_BLK_MAX
) {
1298 dev_warn(dev
, "Inband SGL blk %d limited to max %d\n",
1299 nr_ib_sgl_blk
, BCM_VK_IB_SGL_BLK_MAX
);
1300 nr_ib_sgl_blk
= BCM_VK_IB_SGL_BLK_MAX
;
1302 vk
->ib_sgl_size
= nr_ib_sgl_blk
* VK_MSGQ_BLK_SIZE
;
1303 mutex_init(&vk
->mutex
);
1305 err
= pci_enable_device(pdev
);
1307 dev_err(dev
, "Cannot enable PCI device\n");
1310 vk
->pdev
= pci_dev_get(pdev
);
1312 err
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
1314 dev_err(dev
, "Cannot obtain PCI resources\n");
1315 goto err_disable_pdev
;
1318 /* make sure DMA is good */
1319 err
= dma_set_mask_and_coherent(&pdev
->dev
,
1320 DMA_BIT_MASK(BCM_VK_DMA_BITS
));
1322 dev_err(dev
, "failed to set DMA mask\n");
1323 goto err_disable_pdev
;
1326 /* The tdma is a scratch area for some DMA testings. */
1327 if (nr_scratch_pages
) {
1328 vk
->tdma_vaddr
= dma_alloc_coherent
1330 nr_scratch_pages
* PAGE_SIZE
,
1331 &vk
->tdma_addr
, GFP_KERNEL
);
1332 if (!vk
->tdma_vaddr
) {
1334 goto err_disable_pdev
;
1338 pci_set_master(pdev
);
1339 pci_set_drvdata(pdev
, vk
);
1341 irq
= pci_alloc_irq_vectors(pdev
,
1342 VK_MSIX_IRQ_MIN_REQ
,
1344 PCI_IRQ_MSI
| PCI_IRQ_MSIX
);
1346 if (irq
< VK_MSIX_IRQ_MIN_REQ
) {
1347 dev_err(dev
, "failed to get min %d MSIX interrupts, irq(%d)\n",
1348 VK_MSIX_IRQ_MIN_REQ
, irq
);
1349 err
= (irq
>= 0) ? -EINVAL
: irq
;
1350 goto err_disable_pdev
;
1353 if (irq
!= VK_MSIX_IRQ_MAX
)
1354 dev_warn(dev
, "Number of IRQs %d allocated - requested(%d).\n",
1355 irq
, VK_MSIX_IRQ_MAX
);
1357 for (i
= 0; i
< MAX_BAR
; i
++) {
1358 /* multiple by 2 for 64 bit BAR mapping */
1359 vk
->bar
[i
] = pci_ioremap_bar(pdev
, i
* 2);
1361 dev_err(dev
, "failed to remap BAR%d\n", i
);
1367 for (vk
->num_irqs
= 0;
1368 vk
->num_irqs
< VK_MSIX_MSGQ_MAX
;
1370 err
= devm_request_irq(dev
, pci_irq_vector(pdev
, vk
->num_irqs
),
1371 bcm_vk_msgq_irqhandler
,
1372 IRQF_SHARED
, DRV_MODULE_NAME
, vk
);
1374 dev_err(dev
, "failed to request msgq IRQ %d for MSIX %d\n",
1375 pdev
->irq
+ vk
->num_irqs
, vk
->num_irqs
+ 1);
1379 /* one irq for notification from VK */
1380 err
= devm_request_irq(dev
, pci_irq_vector(pdev
, vk
->num_irqs
),
1381 bcm_vk_notf_irqhandler
,
1382 IRQF_SHARED
, DRV_MODULE_NAME
, vk
);
1384 dev_err(dev
, "failed to request notf IRQ %d for MSIX %d\n",
1385 pdev
->irq
+ vk
->num_irqs
, vk
->num_irqs
+ 1);
1391 (i
< VK_MSIX_TTY_MAX
) && (vk
->num_irqs
< irq
);
1392 i
++, vk
->num_irqs
++) {
1393 err
= devm_request_irq(dev
, pci_irq_vector(pdev
, vk
->num_irqs
),
1394 bcm_vk_tty_irqhandler
,
1395 IRQF_SHARED
, DRV_MODULE_NAME
, vk
);
1397 dev_err(dev
, "failed request tty IRQ %d for MSIX %d\n",
1398 pdev
->irq
+ vk
->num_irqs
, vk
->num_irqs
+ 1);
1401 bcm_vk_tty_set_irq_enabled(vk
, i
);
1404 id
= ida_alloc(&bcm_vk_ida
, GFP_KERNEL
);
1407 dev_err(dev
, "unable to get id\n");
1412 snprintf(name
, sizeof(name
), DRV_MODULE_NAME
".%d", id
);
1413 misc_device
= &vk
->miscdev
;
1414 misc_device
->minor
= MISC_DYNAMIC_MINOR
;
1415 misc_device
->name
= kstrdup(name
, GFP_KERNEL
);
1416 if (!misc_device
->name
) {
1418 goto err_ida_remove
;
1420 misc_device
->fops
= &bcm_vk_fops
,
1422 err
= misc_register(misc_device
);
1424 dev_err(dev
, "failed to register device\n");
1425 goto err_kfree_name
;
1428 INIT_WORK(&vk
->wq_work
, bcm_vk_wq_handler
);
1430 /* create dedicated workqueue */
1431 vk
->wq_thread
= create_singlethread_workqueue(name
);
1432 if (!vk
->wq_thread
) {
1433 dev_err(dev
, "Fail to create workqueue thread\n");
1435 goto err_misc_deregister
;
1438 err
= bcm_vk_msg_init(vk
);
1440 dev_err(dev
, "failed to init msg queue info\n");
1441 goto err_destroy_workqueue
;
1444 /* sync other info */
1445 bcm_vk_sync_card_info(vk
);
1447 /* register for panic notifier */
1448 vk
->panic_nb
.notifier_call
= bcm_vk_on_panic
;
1449 err
= atomic_notifier_chain_register(&panic_notifier_list
,
1452 dev_err(dev
, "Fail to register panic notifier\n");
1453 goto err_destroy_workqueue
;
1456 snprintf(name
, sizeof(name
), KBUILD_MODNAME
".%d_ttyVK", id
);
1457 err
= bcm_vk_tty_init(vk
, name
);
1459 goto err_unregister_panic_notifier
;
1462 * lets trigger an auto download. We don't want to do it serially here
1463 * because at probing time, it is not supposed to block for a long time.
1465 boot_status
= vkread32(vk
, BAR_0
, BAR_BOOT_STATUS
);
1467 if ((boot_status
& BOOT_STATE_MASK
) == BROM_RUNNING
) {
1468 err
= bcm_vk_trigger_autoload(vk
);
1470 goto err_bcm_vk_tty_exit
;
1473 "Auto-load skipped - BROM not in proper state (0x%x)\n",
1481 dev_dbg(dev
, "BCM-VK:%u created\n", id
);
1485 err_bcm_vk_tty_exit
:
1486 bcm_vk_tty_exit(vk
);
1488 err_unregister_panic_notifier
:
1489 atomic_notifier_chain_unregister(&panic_notifier_list
,
1492 err_destroy_workqueue
:
1493 destroy_workqueue(vk
->wq_thread
);
1495 err_misc_deregister
:
1496 misc_deregister(misc_device
);
1499 kfree(misc_device
->name
);
1500 misc_device
->name
= NULL
;
1503 ida_free(&bcm_vk_ida
, id
);
1506 for (i
= 0; i
< vk
->num_irqs
; i
++)
1507 devm_free_irq(dev
, pci_irq_vector(pdev
, i
), vk
);
1509 pci_disable_msix(pdev
);
1510 pci_disable_msi(pdev
);
1513 for (i
= 0; i
< MAX_BAR
; i
++) {
1515 pci_iounmap(pdev
, vk
->bar
[i
]);
1517 pci_release_regions(pdev
);
1521 dma_free_coherent(&pdev
->dev
, nr_scratch_pages
* PAGE_SIZE
,
1522 vk
->tdma_vaddr
, vk
->tdma_addr
);
1524 pci_free_irq_vectors(pdev
);
1525 pci_disable_device(pdev
);
1534 void bcm_vk_release_data(struct kref
*kref
)
1536 struct bcm_vk
*vk
= container_of(kref
, struct bcm_vk
, kref
);
1537 struct pci_dev
*pdev
= vk
->pdev
;
1539 dev_dbg(&pdev
->dev
, "BCM-VK:%d release data 0x%p\n", vk
->devid
, vk
);
1544 static void bcm_vk_remove(struct pci_dev
*pdev
)
1547 struct bcm_vk
*vk
= pci_get_drvdata(pdev
);
1548 struct miscdevice
*misc_device
= &vk
->miscdev
;
1550 bcm_vk_hb_deinit(vk
);
1553 * Trigger a reset to card and wait enough time for UCODE to rerun,
1554 * which re-initialize the card into its default state.
1555 * This ensures when driver is re-enumerated it will start from
1556 * a completely clean state.
1558 bcm_vk_trigger_reset(vk
);
1559 usleep_range(BCM_VK_UCODE_BOOT_US
, BCM_VK_UCODE_BOOT_MAX_US
);
1561 /* unregister panic notifier */
1562 atomic_notifier_chain_unregister(&panic_notifier_list
,
1565 bcm_vk_msg_remove(vk
);
1566 bcm_vk_tty_exit(vk
);
1569 dma_free_coherent(&pdev
->dev
, nr_scratch_pages
* PAGE_SIZE
,
1570 vk
->tdma_vaddr
, vk
->tdma_addr
);
1572 /* remove if name is set which means misc dev registered */
1573 if (misc_device
->name
) {
1574 misc_deregister(misc_device
);
1575 kfree(misc_device
->name
);
1576 ida_free(&bcm_vk_ida
, vk
->devid
);
1578 for (i
= 0; i
< vk
->num_irqs
; i
++)
1579 devm_free_irq(&pdev
->dev
, pci_irq_vector(pdev
, i
), vk
);
1581 pci_disable_msix(pdev
);
1582 pci_disable_msi(pdev
);
1584 cancel_work_sync(&vk
->wq_work
);
1585 destroy_workqueue(vk
->wq_thread
);
1586 bcm_vk_tty_wq_exit(vk
);
1588 for (i
= 0; i
< MAX_BAR
; i
++) {
1590 pci_iounmap(pdev
, vk
->bar
[i
]);
1593 dev_dbg(&pdev
->dev
, "BCM-VK:%d released\n", vk
->devid
);
1595 pci_release_regions(pdev
);
1596 pci_free_irq_vectors(pdev
);
1597 pci_disable_device(pdev
);
1599 kref_put(&vk
->kref
, bcm_vk_release_data
);
1602 static void bcm_vk_shutdown(struct pci_dev
*pdev
)
1604 struct bcm_vk
*vk
= pci_get_drvdata(pdev
);
1607 reg
= vkread32(vk
, BAR_0
, BAR_BOOT_STATUS
);
1608 boot_stat
= reg
& BOOT_STATE_MASK
;
1610 if (boot_stat
== BOOT1_RUNNING
) {
1611 /* simply trigger a reset interrupt to park it */
1612 bcm_vk_trigger_reset(vk
);
1613 } else if (boot_stat
== BROM_NOT_RUN
) {
1618 * The boot status only reflects boot condition since last reset
1619 * As ucode will run only once to configure pcie, if multiple
1620 * resets happen, we lost track if ucode has run or not.
1621 * Here, read the current link speed and use that to
1622 * sync up the bootstatus properly so that on reboot-back-up,
1623 * it has the proper state to start with autoload
1625 err
= pcie_capability_read_word(pdev
, PCI_EXP_LNKSTA
, &lnksta
);
1627 (lnksta
& PCI_EXP_LNKSTA_CLS
) != PCI_EXP_LNKSTA_CLS_2_5GB
) {
1628 reg
|= BROM_STATUS_COMPLETE
;
1629 vkwrite32(vk
, reg
, BAR_0
, BAR_BOOT_STATUS
);
1634 static const struct pci_device_id bcm_vk_ids
[] = {
1635 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_VALKYRIE
), },
1638 MODULE_DEVICE_TABLE(pci
, bcm_vk_ids
);
1640 static struct pci_driver pci_driver
= {
1641 .name
= DRV_MODULE_NAME
,
1642 .id_table
= bcm_vk_ids
,
1643 .probe
= bcm_vk_probe
,
1644 .remove
= bcm_vk_remove
,
1645 .shutdown
= bcm_vk_shutdown
,
1647 module_pci_driver(pci_driver
);
1649 MODULE_DESCRIPTION("Broadcom VK Host Driver");
1650 MODULE_AUTHOR("Scott Branden <scott.branden@broadcom.com>");
1651 MODULE_LICENSE("GPL v2");
1652 MODULE_VERSION("1.0");