1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
4 * Copyright (c) 2014- QLogic Corporation.
8 * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
16 BFA_TRC_FILE(CNA
, IOC_CT
);
18 #define bfa_ioc_ct_sync_pos(__ioc) \
19 ((uint32_t) (1 << bfa_ioc_pcifn(__ioc)))
20 #define BFA_IOC_SYNC_REQD_SH 16
21 #define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff)
22 #define bfa_ioc_ct_clear_sync_ackd(__val) (__val & 0xffff0000)
23 #define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH)
24 #define bfa_ioc_ct_sync_reqd_pos(__ioc) \
25 (bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH)
28 * forward declarations
30 static bfa_boolean_t
bfa_ioc_ct_firmware_lock(struct bfa_ioc_s
*ioc
);
31 static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s
*ioc
);
32 static void bfa_ioc_ct_notify_fail(struct bfa_ioc_s
*ioc
);
33 static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s
*ioc
);
34 static bfa_boolean_t
bfa_ioc_ct_sync_start(struct bfa_ioc_s
*ioc
);
35 static void bfa_ioc_ct_sync_join(struct bfa_ioc_s
*ioc
);
36 static void bfa_ioc_ct_sync_leave(struct bfa_ioc_s
*ioc
);
37 static void bfa_ioc_ct_sync_ack(struct bfa_ioc_s
*ioc
);
38 static bfa_boolean_t
bfa_ioc_ct_sync_complete(struct bfa_ioc_s
*ioc
);
39 static void bfa_ioc_ct_set_cur_ioc_fwstate(
40 struct bfa_ioc_s
*ioc
, enum bfi_ioc_state fwstate
);
41 static enum bfi_ioc_state
bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc_s
*ioc
);
42 static void bfa_ioc_ct_set_alt_ioc_fwstate(
43 struct bfa_ioc_s
*ioc
, enum bfi_ioc_state fwstate
);
44 static enum bfi_ioc_state
bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc_s
*ioc
);
46 static struct bfa_ioc_hwif_s hwif_ct
;
47 static struct bfa_ioc_hwif_s hwif_ct2
;
50 * Return true if firmware of current driver matches the running firmware.
53 bfa_ioc_ct_firmware_lock(struct bfa_ioc_s
*ioc
)
55 enum bfi_ioc_state ioc_fwstate
;
57 struct bfi_ioc_image_hdr_s fwhdr
;
59 bfa_ioc_sem_get(ioc
->ioc_regs
.ioc_usage_sem_reg
);
60 usecnt
= readl(ioc
->ioc_regs
.ioc_usage_reg
);
63 * If usage count is 0, always return TRUE.
66 writel(1, ioc
->ioc_regs
.ioc_usage_reg
);
67 readl(ioc
->ioc_regs
.ioc_usage_sem_reg
);
68 writel(1, ioc
->ioc_regs
.ioc_usage_sem_reg
);
69 writel(0, ioc
->ioc_regs
.ioc_fail_sync
);
74 ioc_fwstate
= readl(ioc
->ioc_regs
.ioc_fwstate
);
75 bfa_trc(ioc
, ioc_fwstate
);
78 * Use count cannot be non-zero and chip in uninitialized state.
80 WARN_ON(ioc_fwstate
== BFI_IOC_UNINIT
);
83 * Check if another driver with a different firmware is active
85 bfa_ioc_fwver_get(ioc
, &fwhdr
);
86 if (!bfa_ioc_fwver_cmp(ioc
, &fwhdr
)) {
87 readl(ioc
->ioc_regs
.ioc_usage_sem_reg
);
88 writel(1, ioc
->ioc_regs
.ioc_usage_sem_reg
);
94 * Same firmware version. Increment the reference count.
97 writel(usecnt
, ioc
->ioc_regs
.ioc_usage_reg
);
98 readl(ioc
->ioc_regs
.ioc_usage_sem_reg
);
99 writel(1, ioc
->ioc_regs
.ioc_usage_sem_reg
);
100 bfa_trc(ioc
, usecnt
);
105 bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s
*ioc
)
110 * decrement usage count
112 bfa_ioc_sem_get(ioc
->ioc_regs
.ioc_usage_sem_reg
);
113 usecnt
= readl(ioc
->ioc_regs
.ioc_usage_reg
);
114 WARN_ON(usecnt
<= 0);
117 writel(usecnt
, ioc
->ioc_regs
.ioc_usage_reg
);
118 bfa_trc(ioc
, usecnt
);
120 readl(ioc
->ioc_regs
.ioc_usage_sem_reg
);
121 writel(1, ioc
->ioc_regs
.ioc_usage_sem_reg
);
125 * Notify other functions on HB failure.
128 bfa_ioc_ct_notify_fail(struct bfa_ioc_s
*ioc
)
130 if (bfa_ioc_is_cna(ioc
)) {
131 writel(__FW_INIT_HALT_P
, ioc
->ioc_regs
.ll_halt
);
132 writel(__FW_INIT_HALT_P
, ioc
->ioc_regs
.alt_ll_halt
);
133 /* Wait for halt to take effect */
134 readl(ioc
->ioc_regs
.ll_halt
);
135 readl(ioc
->ioc_regs
.alt_ll_halt
);
137 writel(~0U, ioc
->ioc_regs
.err_set
);
138 readl(ioc
->ioc_regs
.err_set
);
143 * Host to LPU mailbox message addresses
145 static struct { u32 hfn_mbox
, lpu_mbox
, hfn_pgn
; } ct_fnreg
[] = {
146 { HOSTFN0_LPU_MBOX0_0
, LPU_HOSTFN0_MBOX0_0
, HOST_PAGE_NUM_FN0
},
147 { HOSTFN1_LPU_MBOX0_8
, LPU_HOSTFN1_MBOX0_8
, HOST_PAGE_NUM_FN1
},
148 { HOSTFN2_LPU_MBOX0_0
, LPU_HOSTFN2_MBOX0_0
, HOST_PAGE_NUM_FN2
},
149 { HOSTFN3_LPU_MBOX0_8
, LPU_HOSTFN3_MBOX0_8
, HOST_PAGE_NUM_FN3
}
153 * Host <-> LPU mailbox command/status registers - port 0
155 static struct { u32 hfn
, lpu
; } ct_p0reg
[] = {
156 { HOSTFN0_LPU0_CMD_STAT
, LPU0_HOSTFN0_CMD_STAT
},
157 { HOSTFN1_LPU0_CMD_STAT
, LPU0_HOSTFN1_CMD_STAT
},
158 { HOSTFN2_LPU0_CMD_STAT
, LPU0_HOSTFN2_CMD_STAT
},
159 { HOSTFN3_LPU0_CMD_STAT
, LPU0_HOSTFN3_CMD_STAT
}
163 * Host <-> LPU mailbox command/status registers - port 1
165 static struct { u32 hfn
, lpu
; } ct_p1reg
[] = {
166 { HOSTFN0_LPU1_CMD_STAT
, LPU1_HOSTFN0_CMD_STAT
},
167 { HOSTFN1_LPU1_CMD_STAT
, LPU1_HOSTFN1_CMD_STAT
},
168 { HOSTFN2_LPU1_CMD_STAT
, LPU1_HOSTFN2_CMD_STAT
},
169 { HOSTFN3_LPU1_CMD_STAT
, LPU1_HOSTFN3_CMD_STAT
}
172 static struct { uint32_t hfn_mbox
, lpu_mbox
, hfn_pgn
, hfn
, lpu
, lpu_read
; }
174 { CT2_HOSTFN_LPU0_MBOX0
, CT2_LPU0_HOSTFN_MBOX0
, CT2_HOSTFN_PAGE_NUM
,
175 CT2_HOSTFN_LPU0_CMD_STAT
, CT2_LPU0_HOSTFN_CMD_STAT
,
176 CT2_HOSTFN_LPU0_READ_STAT
},
177 { CT2_HOSTFN_LPU1_MBOX0
, CT2_LPU1_HOSTFN_MBOX0
, CT2_HOSTFN_PAGE_NUM
,
178 CT2_HOSTFN_LPU1_CMD_STAT
, CT2_LPU1_HOSTFN_CMD_STAT
,
179 CT2_HOSTFN_LPU1_READ_STAT
},
183 bfa_ioc_ct_reg_init(struct bfa_ioc_s
*ioc
)
186 int pcifn
= bfa_ioc_pcifn(ioc
);
188 rb
= bfa_ioc_bar0(ioc
);
190 ioc
->ioc_regs
.hfn_mbox
= rb
+ ct_fnreg
[pcifn
].hfn_mbox
;
191 ioc
->ioc_regs
.lpu_mbox
= rb
+ ct_fnreg
[pcifn
].lpu_mbox
;
192 ioc
->ioc_regs
.host_page_num_fn
= rb
+ ct_fnreg
[pcifn
].hfn_pgn
;
194 if (ioc
->port_id
== 0) {
195 ioc
->ioc_regs
.heartbeat
= rb
+ BFA_IOC0_HBEAT_REG
;
196 ioc
->ioc_regs
.ioc_fwstate
= rb
+ BFA_IOC0_STATE_REG
;
197 ioc
->ioc_regs
.alt_ioc_fwstate
= rb
+ BFA_IOC1_STATE_REG
;
198 ioc
->ioc_regs
.hfn_mbox_cmd
= rb
+ ct_p0reg
[pcifn
].hfn
;
199 ioc
->ioc_regs
.lpu_mbox_cmd
= rb
+ ct_p0reg
[pcifn
].lpu
;
200 ioc
->ioc_regs
.ll_halt
= rb
+ FW_INIT_HALT_P0
;
201 ioc
->ioc_regs
.alt_ll_halt
= rb
+ FW_INIT_HALT_P1
;
203 ioc
->ioc_regs
.heartbeat
= (rb
+ BFA_IOC1_HBEAT_REG
);
204 ioc
->ioc_regs
.ioc_fwstate
= (rb
+ BFA_IOC1_STATE_REG
);
205 ioc
->ioc_regs
.alt_ioc_fwstate
= rb
+ BFA_IOC0_STATE_REG
;
206 ioc
->ioc_regs
.hfn_mbox_cmd
= rb
+ ct_p1reg
[pcifn
].hfn
;
207 ioc
->ioc_regs
.lpu_mbox_cmd
= rb
+ ct_p1reg
[pcifn
].lpu
;
208 ioc
->ioc_regs
.ll_halt
= rb
+ FW_INIT_HALT_P1
;
209 ioc
->ioc_regs
.alt_ll_halt
= rb
+ FW_INIT_HALT_P0
;
213 * PSS control registers
215 ioc
->ioc_regs
.pss_ctl_reg
= (rb
+ PSS_CTL_REG
);
216 ioc
->ioc_regs
.pss_err_status_reg
= (rb
+ PSS_ERR_STATUS_REG
);
217 ioc
->ioc_regs
.app_pll_fast_ctl_reg
= (rb
+ APP_PLL_LCLK_CTL_REG
);
218 ioc
->ioc_regs
.app_pll_slow_ctl_reg
= (rb
+ APP_PLL_SCLK_CTL_REG
);
221 * IOC semaphore registers and serialization
223 ioc
->ioc_regs
.ioc_sem_reg
= (rb
+ HOST_SEM0_REG
);
224 ioc
->ioc_regs
.ioc_usage_sem_reg
= (rb
+ HOST_SEM1_REG
);
225 ioc
->ioc_regs
.ioc_init_sem_reg
= (rb
+ HOST_SEM2_REG
);
226 ioc
->ioc_regs
.ioc_usage_reg
= (rb
+ BFA_FW_USE_COUNT
);
227 ioc
->ioc_regs
.ioc_fail_sync
= (rb
+ BFA_IOC_FAIL_SYNC
);
232 ioc
->ioc_regs
.smem_page_start
= (rb
+ PSS_SMEM_PAGE_START
);
233 ioc
->ioc_regs
.smem_pg0
= BFI_IOC_SMEM_PG0_CT
;
236 * err set reg : for notification of hb failure in fcmode
238 ioc
->ioc_regs
.err_set
= (rb
+ ERR_SET_REG
);
242 bfa_ioc_ct2_reg_init(struct bfa_ioc_s
*ioc
)
245 int port
= bfa_ioc_portid(ioc
);
247 rb
= bfa_ioc_bar0(ioc
);
249 ioc
->ioc_regs
.hfn_mbox
= rb
+ ct2_reg
[port
].hfn_mbox
;
250 ioc
->ioc_regs
.lpu_mbox
= rb
+ ct2_reg
[port
].lpu_mbox
;
251 ioc
->ioc_regs
.host_page_num_fn
= rb
+ ct2_reg
[port
].hfn_pgn
;
252 ioc
->ioc_regs
.hfn_mbox_cmd
= rb
+ ct2_reg
[port
].hfn
;
253 ioc
->ioc_regs
.lpu_mbox_cmd
= rb
+ ct2_reg
[port
].lpu
;
254 ioc
->ioc_regs
.lpu_read_stat
= rb
+ ct2_reg
[port
].lpu_read
;
257 ioc
->ioc_regs
.heartbeat
= rb
+ CT2_BFA_IOC0_HBEAT_REG
;
258 ioc
->ioc_regs
.ioc_fwstate
= rb
+ CT2_BFA_IOC0_STATE_REG
;
259 ioc
->ioc_regs
.alt_ioc_fwstate
= rb
+ CT2_BFA_IOC1_STATE_REG
;
260 ioc
->ioc_regs
.ll_halt
= rb
+ FW_INIT_HALT_P0
;
261 ioc
->ioc_regs
.alt_ll_halt
= rb
+ FW_INIT_HALT_P1
;
263 ioc
->ioc_regs
.heartbeat
= (rb
+ CT2_BFA_IOC1_HBEAT_REG
);
264 ioc
->ioc_regs
.ioc_fwstate
= (rb
+ CT2_BFA_IOC1_STATE_REG
);
265 ioc
->ioc_regs
.alt_ioc_fwstate
= rb
+ CT2_BFA_IOC0_STATE_REG
;
266 ioc
->ioc_regs
.ll_halt
= rb
+ FW_INIT_HALT_P1
;
267 ioc
->ioc_regs
.alt_ll_halt
= rb
+ FW_INIT_HALT_P0
;
271 * PSS control registers
273 ioc
->ioc_regs
.pss_ctl_reg
= (rb
+ PSS_CTL_REG
);
274 ioc
->ioc_regs
.pss_err_status_reg
= (rb
+ PSS_ERR_STATUS_REG
);
275 ioc
->ioc_regs
.app_pll_fast_ctl_reg
= (rb
+ CT2_APP_PLL_LCLK_CTL_REG
);
276 ioc
->ioc_regs
.app_pll_slow_ctl_reg
= (rb
+ CT2_APP_PLL_SCLK_CTL_REG
);
279 * IOC semaphore registers and serialization
281 ioc
->ioc_regs
.ioc_sem_reg
= (rb
+ CT2_HOST_SEM0_REG
);
282 ioc
->ioc_regs
.ioc_usage_sem_reg
= (rb
+ CT2_HOST_SEM1_REG
);
283 ioc
->ioc_regs
.ioc_init_sem_reg
= (rb
+ CT2_HOST_SEM2_REG
);
284 ioc
->ioc_regs
.ioc_usage_reg
= (rb
+ CT2_BFA_FW_USE_COUNT
);
285 ioc
->ioc_regs
.ioc_fail_sync
= (rb
+ CT2_BFA_IOC_FAIL_SYNC
);
290 ioc
->ioc_regs
.smem_page_start
= (rb
+ PSS_SMEM_PAGE_START
);
291 ioc
->ioc_regs
.smem_pg0
= BFI_IOC_SMEM_PG0_CT
;
294 * err set reg : for notification of hb failure in fcmode
296 ioc
->ioc_regs
.err_set
= (rb
+ ERR_SET_REG
);
300 * Initialize IOC to port mapping.
303 #define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8)
305 bfa_ioc_ct_map_port(struct bfa_ioc_s
*ioc
)
307 void __iomem
*rb
= ioc
->pcidev
.pci_bar_kva
;
311 * For catapult, base port id on personality register and IOC type
313 r32
= readl(rb
+ FNC_PERS_REG
);
314 r32
>>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc
));
315 ioc
->port_id
= (r32
& __F0_PORT_MAP_MK
) >> __F0_PORT_MAP_SH
;
317 bfa_trc(ioc
, bfa_ioc_pcifn(ioc
));
318 bfa_trc(ioc
, ioc
->port_id
);
322 bfa_ioc_ct2_map_port(struct bfa_ioc_s
*ioc
)
324 void __iomem
*rb
= ioc
->pcidev
.pci_bar_kva
;
327 r32
= readl(rb
+ CT2_HOSTFN_PERSONALITY0
);
328 ioc
->port_id
= ((r32
& __FC_LL_PORT_MAP__MK
) >> __FC_LL_PORT_MAP__SH
);
330 bfa_trc(ioc
, bfa_ioc_pcifn(ioc
));
331 bfa_trc(ioc
, ioc
->port_id
);
335 * Set interrupt mode for a function: INTX or MSIX
338 bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s
*ioc
, bfa_boolean_t msix
)
340 void __iomem
*rb
= ioc
->pcidev
.pci_bar_kva
;
343 r32
= readl(rb
+ FNC_PERS_REG
);
346 mode
= (r32
>> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc
))) &
350 * If already in desired mode, do not change anything
352 if ((!msix
&& mode
) || (msix
&& !mode
))
356 mode
= __F0_INTX_STATUS_MSIX
;
358 mode
= __F0_INTX_STATUS_INTA
;
360 r32
&= ~(__F0_INTX_STATUS
<< FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc
)));
361 r32
|= (mode
<< FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc
)));
364 writel(r32
, rb
+ FNC_PERS_REG
);
368 bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc_s
*ioc
)
372 r32
= readl(ioc
->ioc_regs
.lpu_read_stat
);
374 writel(1, ioc
->ioc_regs
.lpu_read_stat
);
382 * Cleanup hw semaphore and usecnt registers
385 bfa_ioc_ct_ownership_reset(struct bfa_ioc_s
*ioc
)
388 bfa_ioc_sem_get(ioc
->ioc_regs
.ioc_usage_sem_reg
);
389 writel(0, ioc
->ioc_regs
.ioc_usage_reg
);
390 readl(ioc
->ioc_regs
.ioc_usage_sem_reg
);
391 writel(1, ioc
->ioc_regs
.ioc_usage_sem_reg
);
393 writel(0, ioc
->ioc_regs
.ioc_fail_sync
);
395 * Read the hw sem reg to make sure that it is locked
396 * before we clear it. If it is not locked, writing 1
397 * will lock it instead of clearing it.
399 readl(ioc
->ioc_regs
.ioc_sem_reg
);
400 writel(1, ioc
->ioc_regs
.ioc_sem_reg
);
404 bfa_ioc_ct_sync_start(struct bfa_ioc_s
*ioc
)
406 uint32_t r32
= readl(ioc
->ioc_regs
.ioc_fail_sync
);
407 uint32_t sync_reqd
= bfa_ioc_ct_get_sync_reqd(r32
);
410 * Driver load time. If the sync required bit for this PCI fn
411 * is set, it is due to an unclean exit by the driver for this
412 * PCI fn in the previous incarnation. Whoever comes here first
413 * should clean it up, no matter which PCI fn.
416 if (sync_reqd
& bfa_ioc_ct_sync_pos(ioc
)) {
417 writel(0, ioc
->ioc_regs
.ioc_fail_sync
);
418 writel(1, ioc
->ioc_regs
.ioc_usage_reg
);
419 writel(BFI_IOC_UNINIT
, ioc
->ioc_regs
.ioc_fwstate
);
420 writel(BFI_IOC_UNINIT
, ioc
->ioc_regs
.alt_ioc_fwstate
);
424 return bfa_ioc_ct_sync_complete(ioc
);
428 * Synchronized IOC failure processing routines
431 bfa_ioc_ct_sync_join(struct bfa_ioc_s
*ioc
)
433 uint32_t r32
= readl(ioc
->ioc_regs
.ioc_fail_sync
);
434 uint32_t sync_pos
= bfa_ioc_ct_sync_reqd_pos(ioc
);
436 writel((r32
| sync_pos
), ioc
->ioc_regs
.ioc_fail_sync
);
440 bfa_ioc_ct_sync_leave(struct bfa_ioc_s
*ioc
)
442 uint32_t r32
= readl(ioc
->ioc_regs
.ioc_fail_sync
);
443 uint32_t sync_msk
= bfa_ioc_ct_sync_reqd_pos(ioc
) |
444 bfa_ioc_ct_sync_pos(ioc
);
446 writel((r32
& ~sync_msk
), ioc
->ioc_regs
.ioc_fail_sync
);
450 bfa_ioc_ct_sync_ack(struct bfa_ioc_s
*ioc
)
452 uint32_t r32
= readl(ioc
->ioc_regs
.ioc_fail_sync
);
454 writel((r32
| bfa_ioc_ct_sync_pos(ioc
)),
455 ioc
->ioc_regs
.ioc_fail_sync
);
459 bfa_ioc_ct_sync_complete(struct bfa_ioc_s
*ioc
)
461 uint32_t r32
= readl(ioc
->ioc_regs
.ioc_fail_sync
);
462 uint32_t sync_reqd
= bfa_ioc_ct_get_sync_reqd(r32
);
463 uint32_t sync_ackd
= bfa_ioc_ct_get_sync_ackd(r32
);
470 * The check below is to see whether any other PCI fn
471 * has reinitialized the ASIC (reset sync_ackd bits)
472 * and failed again while this IOC was waiting for hw
473 * semaphore (in bfa_iocpf_sm_semwait()).
475 tmp_ackd
= sync_ackd
;
476 if ((sync_reqd
& bfa_ioc_ct_sync_pos(ioc
)) &&
477 !(sync_ackd
& bfa_ioc_ct_sync_pos(ioc
)))
478 sync_ackd
|= bfa_ioc_ct_sync_pos(ioc
);
480 if (sync_reqd
== sync_ackd
) {
481 writel(bfa_ioc_ct_clear_sync_ackd(r32
),
482 ioc
->ioc_regs
.ioc_fail_sync
);
483 writel(BFI_IOC_FAIL
, ioc
->ioc_regs
.ioc_fwstate
);
484 writel(BFI_IOC_FAIL
, ioc
->ioc_regs
.alt_ioc_fwstate
);
489 * If another PCI fn reinitialized and failed again while
490 * this IOC was waiting for hw sem, the sync_ackd bit for
491 * this IOC need to be set again to allow reinitialization.
493 if (tmp_ackd
!= sync_ackd
)
494 writel((r32
| sync_ackd
), ioc
->ioc_regs
.ioc_fail_sync
);
500 * Called from bfa_ioc_attach() to map asic specific calls.
503 bfa_ioc_set_ctx_hwif(struct bfa_ioc_s
*ioc
, struct bfa_ioc_hwif_s
*hwif
)
505 hwif
->ioc_firmware_lock
= bfa_ioc_ct_firmware_lock
;
506 hwif
->ioc_firmware_unlock
= bfa_ioc_ct_firmware_unlock
;
507 hwif
->ioc_notify_fail
= bfa_ioc_ct_notify_fail
;
508 hwif
->ioc_ownership_reset
= bfa_ioc_ct_ownership_reset
;
509 hwif
->ioc_sync_start
= bfa_ioc_ct_sync_start
;
510 hwif
->ioc_sync_join
= bfa_ioc_ct_sync_join
;
511 hwif
->ioc_sync_leave
= bfa_ioc_ct_sync_leave
;
512 hwif
->ioc_sync_ack
= bfa_ioc_ct_sync_ack
;
513 hwif
->ioc_sync_complete
= bfa_ioc_ct_sync_complete
;
514 hwif
->ioc_set_fwstate
= bfa_ioc_ct_set_cur_ioc_fwstate
;
515 hwif
->ioc_get_fwstate
= bfa_ioc_ct_get_cur_ioc_fwstate
;
516 hwif
->ioc_set_alt_fwstate
= bfa_ioc_ct_set_alt_ioc_fwstate
;
517 hwif
->ioc_get_alt_fwstate
= bfa_ioc_ct_get_alt_ioc_fwstate
;
521 * Called from bfa_ioc_attach() to map asic specific calls.
524 bfa_ioc_set_ct_hwif(struct bfa_ioc_s
*ioc
)
526 bfa_ioc_set_ctx_hwif(ioc
, &hwif_ct
);
528 hwif_ct
.ioc_pll_init
= bfa_ioc_ct_pll_init
;
529 hwif_ct
.ioc_reg_init
= bfa_ioc_ct_reg_init
;
530 hwif_ct
.ioc_map_port
= bfa_ioc_ct_map_port
;
531 hwif_ct
.ioc_isr_mode_set
= bfa_ioc_ct_isr_mode_set
;
532 ioc
->ioc_hwif
= &hwif_ct
;
536 * Called from bfa_ioc_attach() to map asic specific calls.
539 bfa_ioc_set_ct2_hwif(struct bfa_ioc_s
*ioc
)
541 bfa_ioc_set_ctx_hwif(ioc
, &hwif_ct2
);
543 hwif_ct2
.ioc_pll_init
= bfa_ioc_ct2_pll_init
;
544 hwif_ct2
.ioc_reg_init
= bfa_ioc_ct2_reg_init
;
545 hwif_ct2
.ioc_map_port
= bfa_ioc_ct2_map_port
;
546 hwif_ct2
.ioc_lpu_read_stat
= bfa_ioc_ct2_lpu_read_stat
;
547 hwif_ct2
.ioc_isr_mode_set
= NULL
;
548 ioc
->ioc_hwif
= &hwif_ct2
;
552 * Workaround for MSI-X resource allocation for catapult-2 with no asic block
554 #define HOSTFN_MSIX_DEFAULT 64
555 #define HOSTFN_MSIX_VT_INDEX_MBOX_ERR 0x30138
556 #define HOSTFN_MSIX_VT_OFST_NUMVT 0x3013c
557 #define __MSIX_VT_NUMVT__MK 0x003ff800
558 #define __MSIX_VT_NUMVT__SH 11
559 #define __MSIX_VT_NUMVT_(_v) ((_v) << __MSIX_VT_NUMVT__SH)
560 #define __MSIX_VT_OFST_ 0x000007ff
562 bfa_ioc_ct2_poweron(struct bfa_ioc_s
*ioc
)
564 void __iomem
*rb
= ioc
->pcidev
.pci_bar_kva
;
567 r32
= readl(rb
+ HOSTFN_MSIX_VT_OFST_NUMVT
);
568 if (r32
& __MSIX_VT_NUMVT__MK
) {
569 writel(r32
& __MSIX_VT_OFST_
,
570 rb
+ HOSTFN_MSIX_VT_INDEX_MBOX_ERR
);
574 writel(__MSIX_VT_NUMVT_(HOSTFN_MSIX_DEFAULT
- 1) |
575 HOSTFN_MSIX_DEFAULT
* bfa_ioc_pcifn(ioc
),
576 rb
+ HOSTFN_MSIX_VT_OFST_NUMVT
);
577 writel(HOSTFN_MSIX_DEFAULT
* bfa_ioc_pcifn(ioc
),
578 rb
+ HOSTFN_MSIX_VT_INDEX_MBOX_ERR
);
582 bfa_ioc_ct_pll_init(void __iomem
*rb
, enum bfi_asic_mode mode
)
584 u32 pll_sclk
, pll_fclk
, r32
;
585 bfa_boolean_t fcmode
= (mode
== BFI_ASIC_MODE_FC
);
587 pll_sclk
= __APP_PLL_SCLK_LRESETN
| __APP_PLL_SCLK_ENARST
|
588 __APP_PLL_SCLK_RSEL200500
| __APP_PLL_SCLK_P0_1(3U) |
589 __APP_PLL_SCLK_JITLMT0_1(3U) |
590 __APP_PLL_SCLK_CNTLMT0_1(1U);
591 pll_fclk
= __APP_PLL_LCLK_LRESETN
| __APP_PLL_LCLK_ENARST
|
592 __APP_PLL_LCLK_RSEL200500
| __APP_PLL_LCLK_P0_1(3U) |
593 __APP_PLL_LCLK_JITLMT0_1(3U) |
594 __APP_PLL_LCLK_CNTLMT0_1(1U);
597 writel(0, (rb
+ OP_MODE
));
598 writel(__APP_EMS_CMLCKSEL
| __APP_EMS_REFCKBUFEN2
|
599 __APP_EMS_CHANNEL_SEL
, (rb
+ ETH_MAC_SER_REG
));
601 writel(__GLOBAL_FCOE_MODE
, (rb
+ OP_MODE
));
602 writel(__APP_EMS_REFCKBUFEN1
, (rb
+ ETH_MAC_SER_REG
));
604 writel(BFI_IOC_UNINIT
, (rb
+ BFA_IOC0_STATE_REG
));
605 writel(BFI_IOC_UNINIT
, (rb
+ BFA_IOC1_STATE_REG
));
606 writel(0xffffffffU
, (rb
+ HOSTFN0_INT_MSK
));
607 writel(0xffffffffU
, (rb
+ HOSTFN1_INT_MSK
));
608 writel(0xffffffffU
, (rb
+ HOSTFN0_INT_STATUS
));
609 writel(0xffffffffU
, (rb
+ HOSTFN1_INT_STATUS
));
610 writel(0xffffffffU
, (rb
+ HOSTFN0_INT_MSK
));
611 writel(0xffffffffU
, (rb
+ HOSTFN1_INT_MSK
));
612 writel(pll_sclk
| __APP_PLL_SCLK_LOGIC_SOFT_RESET
,
613 rb
+ APP_PLL_SCLK_CTL_REG
);
614 writel(pll_fclk
| __APP_PLL_LCLK_LOGIC_SOFT_RESET
,
615 rb
+ APP_PLL_LCLK_CTL_REG
);
616 writel(pll_sclk
| __APP_PLL_SCLK_LOGIC_SOFT_RESET
|
617 __APP_PLL_SCLK_ENABLE
, rb
+ APP_PLL_SCLK_CTL_REG
);
618 writel(pll_fclk
| __APP_PLL_LCLK_LOGIC_SOFT_RESET
|
619 __APP_PLL_LCLK_ENABLE
, rb
+ APP_PLL_LCLK_CTL_REG
);
620 readl(rb
+ HOSTFN0_INT_MSK
);
622 writel(0xffffffffU
, (rb
+ HOSTFN0_INT_STATUS
));
623 writel(0xffffffffU
, (rb
+ HOSTFN1_INT_STATUS
));
624 writel(pll_sclk
| __APP_PLL_SCLK_ENABLE
, rb
+ APP_PLL_SCLK_CTL_REG
);
625 writel(pll_fclk
| __APP_PLL_LCLK_ENABLE
, rb
+ APP_PLL_LCLK_CTL_REG
);
628 writel(__PMM_1T_RESET_P
, (rb
+ PMM_1T_RESET_REG_P0
));
629 writel(__PMM_1T_RESET_P
, (rb
+ PMM_1T_RESET_REG_P1
));
631 r32
= readl((rb
+ PSS_CTL_REG
));
632 r32
&= ~__PSS_LMEM_RESET
;
633 writel(r32
, (rb
+ PSS_CTL_REG
));
636 writel(0, (rb
+ PMM_1T_RESET_REG_P0
));
637 writel(0, (rb
+ PMM_1T_RESET_REG_P1
));
640 writel(__EDRAM_BISTR_START
, (rb
+ MBIST_CTL_REG
));
642 r32
= readl((rb
+ MBIST_STAT_REG
));
643 writel(0, (rb
+ MBIST_CTL_REG
));
644 return BFA_STATUS_OK
;
648 bfa_ioc_ct2_sclk_init(void __iomem
*rb
)
653 * put s_clk PLL and PLL FSM in reset
655 r32
= readl((rb
+ CT2_APP_PLL_SCLK_CTL_REG
));
656 r32
&= ~(__APP_PLL_SCLK_ENABLE
| __APP_PLL_SCLK_LRESETN
);
657 r32
|= (__APP_PLL_SCLK_ENARST
| __APP_PLL_SCLK_BYPASS
|
658 __APP_PLL_SCLK_LOGIC_SOFT_RESET
);
659 writel(r32
, (rb
+ CT2_APP_PLL_SCLK_CTL_REG
));
662 * Ignore mode and program for the max clock (which is FC16)
663 * Firmware/NFC will do the PLL init appropiately
665 r32
= readl((rb
+ CT2_APP_PLL_SCLK_CTL_REG
));
666 r32
&= ~(__APP_PLL_SCLK_REFCLK_SEL
| __APP_PLL_SCLK_CLK_DIV2
);
667 writel(r32
, (rb
+ CT2_APP_PLL_SCLK_CTL_REG
));
670 * while doing PLL init dont clock gate ethernet subsystem
672 r32
= readl((rb
+ CT2_CHIP_MISC_PRG
));
673 writel(r32
| __ETH_CLK_ENABLE_PORT0
, (rb
+ CT2_CHIP_MISC_PRG
));
675 r32
= readl((rb
+ CT2_PCIE_MISC_REG
));
676 writel(r32
| __ETH_CLK_ENABLE_PORT1
, (rb
+ CT2_PCIE_MISC_REG
));
681 r32
= readl((rb
+ CT2_APP_PLL_SCLK_CTL_REG
));
682 r32
&= (__P_SCLK_PLL_LOCK
| __APP_PLL_SCLK_REFCLK_SEL
|
683 __APP_PLL_SCLK_CLK_DIV2
);
684 writel(r32
| 0x1061731b, (rb
+ CT2_APP_PLL_SCLK_CTL_REG
));
687 * poll for s_clk lock or delay 1ms
693 bfa_ioc_ct2_lclk_init(void __iomem
*rb
)
698 * put l_clk PLL and PLL FSM in reset
700 r32
= readl((rb
+ CT2_APP_PLL_LCLK_CTL_REG
));
701 r32
&= ~(__APP_PLL_LCLK_ENABLE
| __APP_PLL_LCLK_LRESETN
);
702 r32
|= (__APP_PLL_LCLK_ENARST
| __APP_PLL_LCLK_BYPASS
|
703 __APP_PLL_LCLK_LOGIC_SOFT_RESET
);
704 writel(r32
, (rb
+ CT2_APP_PLL_LCLK_CTL_REG
));
707 * set LPU speed (set for FC16 which will work for other modes)
709 r32
= readl((rb
+ CT2_CHIP_MISC_PRG
));
710 writel(r32
, (rb
+ CT2_CHIP_MISC_PRG
));
713 * set LPU half speed (set for FC16 which will work for other modes)
715 r32
= readl((rb
+ CT2_APP_PLL_LCLK_CTL_REG
));
716 writel(r32
, (rb
+ CT2_APP_PLL_LCLK_CTL_REG
));
719 * set lclk for mode (set for FC16)
721 r32
= readl((rb
+ CT2_APP_PLL_LCLK_CTL_REG
));
722 r32
&= (__P_LCLK_PLL_LOCK
| __APP_LPUCLK_HALFSPEED
);
724 writel(r32
, (rb
+ CT2_APP_PLL_LCLK_CTL_REG
));
727 * poll for s_clk lock or delay 1ms
733 bfa_ioc_ct2_mem_init(void __iomem
*rb
)
737 r32
= readl((rb
+ PSS_CTL_REG
));
738 r32
&= ~__PSS_LMEM_RESET
;
739 writel(r32
, (rb
+ PSS_CTL_REG
));
742 writel(__EDRAM_BISTR_START
, (rb
+ CT2_MBIST_CTL_REG
));
744 writel(0, (rb
+ CT2_MBIST_CTL_REG
));
748 bfa_ioc_ct2_mac_reset(void __iomem
*rb
)
750 /* put port0, port1 MAC & AHB in reset */
751 writel((__CSI_MAC_RESET
| __CSI_MAC_AHB_RESET
),
752 rb
+ CT2_CSI_MAC_CONTROL_REG(0));
753 writel((__CSI_MAC_RESET
| __CSI_MAC_AHB_RESET
),
754 rb
+ CT2_CSI_MAC_CONTROL_REG(1));
758 bfa_ioc_ct2_enable_flash(void __iomem
*rb
)
762 r32
= readl((rb
+ PSS_GPIO_OUT_REG
));
763 writel(r32
& ~1, (rb
+ PSS_GPIO_OUT_REG
));
764 r32
= readl((rb
+ PSS_GPIO_OE_REG
));
765 writel(r32
| 1, (rb
+ PSS_GPIO_OE_REG
));
768 #define CT2_NFC_MAX_DELAY 1000
769 #define CT2_NFC_PAUSE_MAX_DELAY 4000
770 #define CT2_NFC_VER_VALID 0x147
771 #define CT2_NFC_STATE_RUNNING 0x20000001
772 #define BFA_IOC_PLL_POLL 1000000
775 bfa_ioc_ct2_nfc_halted(void __iomem
*rb
)
779 r32
= readl(rb
+ CT2_NFC_CSR_SET_REG
);
780 if (r32
& __NFC_CONTROLLER_HALTED
)
787 bfa_ioc_ct2_nfc_halt(void __iomem
*rb
)
791 writel(__HALT_NFC_CONTROLLER
, rb
+ CT2_NFC_CSR_SET_REG
);
792 for (i
= 0; i
< CT2_NFC_MAX_DELAY
; i
++) {
793 if (bfa_ioc_ct2_nfc_halted(rb
))
797 WARN_ON(!bfa_ioc_ct2_nfc_halted(rb
));
801 bfa_ioc_ct2_nfc_resume(void __iomem
*rb
)
806 writel(__HALT_NFC_CONTROLLER
, rb
+ CT2_NFC_CSR_CLR_REG
);
807 for (i
= 0; i
< CT2_NFC_MAX_DELAY
; i
++) {
808 r32
= readl(rb
+ CT2_NFC_CSR_SET_REG
);
809 if (!(r32
& __NFC_CONTROLLER_HALTED
))
817 bfa_ioc_ct2_clk_reset(void __iomem
*rb
)
821 bfa_ioc_ct2_sclk_init(rb
);
822 bfa_ioc_ct2_lclk_init(rb
);
825 * release soft reset on s_clk & l_clk
827 r32
= readl((rb
+ CT2_APP_PLL_SCLK_CTL_REG
));
828 writel(r32
& ~__APP_PLL_SCLK_LOGIC_SOFT_RESET
,
829 (rb
+ CT2_APP_PLL_SCLK_CTL_REG
));
831 r32
= readl((rb
+ CT2_APP_PLL_LCLK_CTL_REG
));
832 writel(r32
& ~__APP_PLL_LCLK_LOGIC_SOFT_RESET
,
833 (rb
+ CT2_APP_PLL_LCLK_CTL_REG
));
838 bfa_ioc_ct2_nfc_clk_reset(void __iomem
*rb
)
842 r32
= readl((rb
+ PSS_CTL_REG
));
843 r32
|= (__PSS_LPU0_RESET
| __PSS_LPU1_RESET
);
844 writel(r32
, (rb
+ PSS_CTL_REG
));
846 writel(__RESET_AND_START_SCLK_LCLK_PLLS
, rb
+ CT2_CSI_FW_CTL_SET_REG
);
848 for (i
= 0; i
< BFA_IOC_PLL_POLL
; i
++) {
849 r32
= readl(rb
+ CT2_NFC_FLASH_STS_REG
);
851 if ((r32
& __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS
))
854 WARN_ON(!(r32
& __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS
));
856 for (i
= 0; i
< BFA_IOC_PLL_POLL
; i
++) {
857 r32
= readl(rb
+ CT2_NFC_FLASH_STS_REG
);
859 if (!(r32
& __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS
))
862 WARN_ON((r32
& __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS
));
864 r32
= readl(rb
+ CT2_CSI_FW_CTL_REG
);
865 WARN_ON((r32
& __RESET_AND_START_SCLK_LCLK_PLLS
));
869 bfa_ioc_ct2_wait_till_nfc_running(void __iomem
*rb
)
874 if (bfa_ioc_ct2_nfc_halted(rb
))
875 bfa_ioc_ct2_nfc_resume(rb
);
876 for (i
= 0; i
< CT2_NFC_PAUSE_MAX_DELAY
; i
++) {
877 r32
= readl(rb
+ CT2_NFC_STS_REG
);
878 if (r32
== CT2_NFC_STATE_RUNNING
)
883 r32
= readl(rb
+ CT2_NFC_STS_REG
);
884 WARN_ON(!(r32
== CT2_NFC_STATE_RUNNING
));
888 bfa_ioc_ct2_pll_init(void __iomem
*rb
, enum bfi_asic_mode mode
)
890 u32 wgn
, r32
, nfc_ver
;
892 wgn
= readl(rb
+ CT2_WGN_STATUS
);
894 if (wgn
== (__WGN_READY
| __GLBL_PF_VF_CFG_RDY
)) {
896 * If flash is corrupted, enable flash explicitly
898 bfa_ioc_ct2_clk_reset(rb
);
899 bfa_ioc_ct2_enable_flash(rb
);
901 bfa_ioc_ct2_mac_reset(rb
);
903 bfa_ioc_ct2_clk_reset(rb
);
904 bfa_ioc_ct2_enable_flash(rb
);
907 nfc_ver
= readl(rb
+ CT2_RSC_GPR15_REG
);
909 if ((nfc_ver
>= CT2_NFC_VER_VALID
) &&
910 (wgn
== (__A2T_AHB_LOAD
| __WGN_READY
))) {
912 bfa_ioc_ct2_wait_till_nfc_running(rb
);
914 bfa_ioc_ct2_nfc_clk_reset(rb
);
916 bfa_ioc_ct2_nfc_halt(rb
);
918 bfa_ioc_ct2_clk_reset(rb
);
919 bfa_ioc_ct2_mac_reset(rb
);
920 bfa_ioc_ct2_clk_reset(rb
);
925 * The very first PCIe DMA Read done by LPU fails with a fatal error,
926 * when Address Translation Cache (ATC) has been enabled by system BIOS.
929 * Disable Invalidated Tag Match Enable capability by setting the bit 26
930 * of CHIP_MISC_PRG to 0, by default it is set to 1.
932 r32
= readl(rb
+ CT2_CHIP_MISC_PRG
);
933 writel((r32
& 0xfbffffff), (rb
+ CT2_CHIP_MISC_PRG
));
936 * Mask the interrupts and clear any
937 * pending interrupts left by BIOS/EFI
940 writel(1, (rb
+ CT2_LPU0_HOSTFN_MBOX0_MSK
));
941 writel(1, (rb
+ CT2_LPU1_HOSTFN_MBOX0_MSK
));
943 /* For first time initialization, no need to clear interrupts */
944 r32
= readl(rb
+ HOST_SEM5_REG
);
946 r32
= readl((rb
+ CT2_LPU0_HOSTFN_CMD_STAT
));
948 writel(1, (rb
+ CT2_LPU0_HOSTFN_CMD_STAT
));
949 readl((rb
+ CT2_LPU0_HOSTFN_CMD_STAT
));
951 r32
= readl((rb
+ CT2_LPU1_HOSTFN_CMD_STAT
));
953 writel(1, (rb
+ CT2_LPU1_HOSTFN_CMD_STAT
));
954 readl((rb
+ CT2_LPU1_HOSTFN_CMD_STAT
));
958 bfa_ioc_ct2_mem_init(rb
);
960 writel(BFI_IOC_UNINIT
, (rb
+ CT2_BFA_IOC0_STATE_REG
));
961 writel(BFI_IOC_UNINIT
, (rb
+ CT2_BFA_IOC1_STATE_REG
));
963 return BFA_STATUS_OK
;
967 bfa_ioc_ct_set_cur_ioc_fwstate(struct bfa_ioc_s
*ioc
,
968 enum bfi_ioc_state fwstate
)
970 writel(fwstate
, ioc
->ioc_regs
.ioc_fwstate
);
973 static enum bfi_ioc_state
974 bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc_s
*ioc
)
976 return (enum bfi_ioc_state
)readl(ioc
->ioc_regs
.ioc_fwstate
);
980 bfa_ioc_ct_set_alt_ioc_fwstate(struct bfa_ioc_s
*ioc
,
981 enum bfi_ioc_state fwstate
)
983 writel(fwstate
, ioc
->ioc_regs
.alt_ioc_fwstate
);
986 static enum bfi_ioc_state
987 bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc_s
*ioc
)
989 return (enum bfi_ioc_state
) readl(ioc
->ioc_regs
.alt_ioc_fwstate
);