1 // SPDX-License-Identifier: GPL-2.0-only
3 * Linux network driver for QLogic BR-series Converged Network Adapter.
6 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
7 * Copyright (c) 2014-2015 QLogic Corporation
18 #define bfa_ioc_ct_sync_pos(__ioc) BIT(bfa_ioc_pcifn(__ioc))
19 #define BFA_IOC_SYNC_REQD_SH 16
20 #define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff)
21 #define bfa_ioc_ct_clear_sync_ackd(__val) (__val & 0xffff0000)
22 #define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH)
23 #define bfa_ioc_ct_sync_reqd_pos(__ioc) \
24 (bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH)
27 * forward declarations
29 static bool bfa_ioc_ct_firmware_lock(struct bfa_ioc
*ioc
);
30 static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc
*ioc
);
31 static void bfa_ioc_ct_reg_init(struct bfa_ioc
*ioc
);
32 static void bfa_ioc_ct2_reg_init(struct bfa_ioc
*ioc
);
33 static void bfa_ioc_ct_map_port(struct bfa_ioc
*ioc
);
34 static void bfa_ioc_ct2_map_port(struct bfa_ioc
*ioc
);
35 static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc
*ioc
, bool msix
);
36 static void bfa_ioc_ct_notify_fail(struct bfa_ioc
*ioc
);
37 static void bfa_ioc_ct_ownership_reset(struct bfa_ioc
*ioc
);
38 static bool bfa_ioc_ct_sync_start(struct bfa_ioc
*ioc
);
39 static void bfa_ioc_ct_sync_join(struct bfa_ioc
*ioc
);
40 static void bfa_ioc_ct_sync_leave(struct bfa_ioc
*ioc
);
41 static void bfa_ioc_ct_sync_ack(struct bfa_ioc
*ioc
);
42 static bool bfa_ioc_ct_sync_complete(struct bfa_ioc
*ioc
);
43 static void bfa_ioc_ct_set_cur_ioc_fwstate(
44 struct bfa_ioc
*ioc
, enum bfi_ioc_state fwstate
);
45 static enum bfi_ioc_state
bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc
*ioc
);
46 static void bfa_ioc_ct_set_alt_ioc_fwstate(
47 struct bfa_ioc
*ioc
, enum bfi_ioc_state fwstate
);
48 static enum bfi_ioc_state
bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc
*ioc
);
49 static enum bfa_status
bfa_ioc_ct_pll_init(void __iomem
*rb
,
50 enum bfi_asic_mode asic_mode
);
51 static enum bfa_status
bfa_ioc_ct2_pll_init(void __iomem
*rb
,
52 enum bfi_asic_mode asic_mode
);
53 static bool bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc
*ioc
);
55 static const struct bfa_ioc_hwif nw_hwif_ct
= {
56 .ioc_pll_init
= bfa_ioc_ct_pll_init
,
57 .ioc_firmware_lock
= bfa_ioc_ct_firmware_lock
,
58 .ioc_firmware_unlock
= bfa_ioc_ct_firmware_unlock
,
59 .ioc_reg_init
= bfa_ioc_ct_reg_init
,
60 .ioc_map_port
= bfa_ioc_ct_map_port
,
61 .ioc_isr_mode_set
= bfa_ioc_ct_isr_mode_set
,
62 .ioc_notify_fail
= bfa_ioc_ct_notify_fail
,
63 .ioc_ownership_reset
= bfa_ioc_ct_ownership_reset
,
64 .ioc_sync_start
= bfa_ioc_ct_sync_start
,
65 .ioc_sync_join
= bfa_ioc_ct_sync_join
,
66 .ioc_sync_leave
= bfa_ioc_ct_sync_leave
,
67 .ioc_sync_ack
= bfa_ioc_ct_sync_ack
,
68 .ioc_sync_complete
= bfa_ioc_ct_sync_complete
,
69 .ioc_set_fwstate
= bfa_ioc_ct_set_cur_ioc_fwstate
,
70 .ioc_get_fwstate
= bfa_ioc_ct_get_cur_ioc_fwstate
,
71 .ioc_set_alt_fwstate
= bfa_ioc_ct_set_alt_ioc_fwstate
,
72 .ioc_get_alt_fwstate
= bfa_ioc_ct_get_alt_ioc_fwstate
,
75 static const struct bfa_ioc_hwif nw_hwif_ct2
= {
76 .ioc_pll_init
= bfa_ioc_ct2_pll_init
,
77 .ioc_firmware_lock
= bfa_ioc_ct_firmware_lock
,
78 .ioc_firmware_unlock
= bfa_ioc_ct_firmware_unlock
,
79 .ioc_reg_init
= bfa_ioc_ct2_reg_init
,
80 .ioc_map_port
= bfa_ioc_ct2_map_port
,
81 .ioc_lpu_read_stat
= bfa_ioc_ct2_lpu_read_stat
,
82 .ioc_isr_mode_set
= NULL
,
83 .ioc_notify_fail
= bfa_ioc_ct_notify_fail
,
84 .ioc_ownership_reset
= bfa_ioc_ct_ownership_reset
,
85 .ioc_sync_start
= bfa_ioc_ct_sync_start
,
86 .ioc_sync_join
= bfa_ioc_ct_sync_join
,
87 .ioc_sync_leave
= bfa_ioc_ct_sync_leave
,
88 .ioc_sync_ack
= bfa_ioc_ct_sync_ack
,
89 .ioc_sync_complete
= bfa_ioc_ct_sync_complete
,
90 .ioc_set_fwstate
= bfa_ioc_ct_set_cur_ioc_fwstate
,
91 .ioc_get_fwstate
= bfa_ioc_ct_get_cur_ioc_fwstate
,
92 .ioc_set_alt_fwstate
= bfa_ioc_ct_set_alt_ioc_fwstate
,
93 .ioc_get_alt_fwstate
= bfa_ioc_ct_get_alt_ioc_fwstate
,
96 /* Called from bfa_ioc_attach() to map asic specific calls. */
98 bfa_nw_ioc_set_ct_hwif(struct bfa_ioc
*ioc
)
100 ioc
->ioc_hwif
= &nw_hwif_ct
;
104 bfa_nw_ioc_set_ct2_hwif(struct bfa_ioc
*ioc
)
106 ioc
->ioc_hwif
= &nw_hwif_ct2
;
109 /* Return true if firmware of current driver matches the running firmware. */
111 bfa_ioc_ct_firmware_lock(struct bfa_ioc
*ioc
)
113 enum bfi_ioc_state ioc_fwstate
;
115 struct bfi_ioc_image_hdr fwhdr
;
118 * If bios boot (flash based) -- do not increment usage count
120 if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc
)) <
124 bfa_nw_ioc_sem_get(ioc
->ioc_regs
.ioc_usage_sem_reg
);
125 usecnt
= readl(ioc
->ioc_regs
.ioc_usage_reg
);
128 * If usage count is 0, always return TRUE.
131 writel(1, ioc
->ioc_regs
.ioc_usage_reg
);
132 bfa_nw_ioc_sem_release(ioc
->ioc_regs
.ioc_usage_sem_reg
);
133 writel(0, ioc
->ioc_regs
.ioc_fail_sync
);
137 ioc_fwstate
= readl(ioc
->ioc_regs
.ioc_fwstate
);
140 * Use count cannot be non-zero and chip in uninitialized state.
142 BUG_ON(!(ioc_fwstate
!= BFI_IOC_UNINIT
));
145 * Check if another driver with a different firmware is active
147 bfa_nw_ioc_fwver_get(ioc
, &fwhdr
);
148 if (!bfa_nw_ioc_fwver_cmp(ioc
, &fwhdr
)) {
149 bfa_nw_ioc_sem_release(ioc
->ioc_regs
.ioc_usage_sem_reg
);
154 * Same firmware version. Increment the reference count.
157 writel(usecnt
, ioc
->ioc_regs
.ioc_usage_reg
);
158 bfa_nw_ioc_sem_release(ioc
->ioc_regs
.ioc_usage_sem_reg
);
163 bfa_ioc_ct_firmware_unlock(struct bfa_ioc
*ioc
)
168 * If bios boot (flash based) -- do not decrement usage count
170 if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc
)) <
175 * decrement usage count
177 bfa_nw_ioc_sem_get(ioc
->ioc_regs
.ioc_usage_sem_reg
);
178 usecnt
= readl(ioc
->ioc_regs
.ioc_usage_reg
);
179 BUG_ON(!(usecnt
> 0));
182 writel(usecnt
, ioc
->ioc_regs
.ioc_usage_reg
);
184 bfa_nw_ioc_sem_release(ioc
->ioc_regs
.ioc_usage_sem_reg
);
187 /* Notify other functions on HB failure. */
189 bfa_ioc_ct_notify_fail(struct bfa_ioc
*ioc
)
191 writel(__FW_INIT_HALT_P
, ioc
->ioc_regs
.ll_halt
);
192 writel(__FW_INIT_HALT_P
, ioc
->ioc_regs
.alt_ll_halt
);
193 /* Wait for halt to take effect */
194 readl(ioc
->ioc_regs
.ll_halt
);
195 readl(ioc
->ioc_regs
.alt_ll_halt
);
198 /* Host to LPU mailbox message addresses */
199 static const struct {
204 { HOSTFN0_LPU_MBOX0_0
, LPU_HOSTFN0_MBOX0_0
, HOST_PAGE_NUM_FN0
},
205 { HOSTFN1_LPU_MBOX0_8
, LPU_HOSTFN1_MBOX0_8
, HOST_PAGE_NUM_FN1
},
206 { HOSTFN2_LPU_MBOX0_0
, LPU_HOSTFN2_MBOX0_0
, HOST_PAGE_NUM_FN2
},
207 { HOSTFN3_LPU_MBOX0_8
, LPU_HOSTFN3_MBOX0_8
, HOST_PAGE_NUM_FN3
}
210 /* Host <-> LPU mailbox command/status registers - port 0 */
211 static const struct {
215 { HOSTFN0_LPU0_CMD_STAT
, LPU0_HOSTFN0_CMD_STAT
},
216 { HOSTFN1_LPU0_CMD_STAT
, LPU0_HOSTFN1_CMD_STAT
},
217 { HOSTFN2_LPU0_CMD_STAT
, LPU0_HOSTFN2_CMD_STAT
},
218 { HOSTFN3_LPU0_CMD_STAT
, LPU0_HOSTFN3_CMD_STAT
}
221 /* Host <-> LPU mailbox command/status registers - port 1 */
222 static const struct {
226 { HOSTFN0_LPU1_CMD_STAT
, LPU1_HOSTFN0_CMD_STAT
},
227 { HOSTFN1_LPU1_CMD_STAT
, LPU1_HOSTFN1_CMD_STAT
},
228 { HOSTFN2_LPU1_CMD_STAT
, LPU1_HOSTFN2_CMD_STAT
},
229 { HOSTFN3_LPU1_CMD_STAT
, LPU1_HOSTFN3_CMD_STAT
}
232 static const struct {
240 { CT2_HOSTFN_LPU0_MBOX0
, CT2_LPU0_HOSTFN_MBOX0
, CT2_HOSTFN_PAGE_NUM
,
241 CT2_HOSTFN_LPU0_CMD_STAT
, CT2_LPU0_HOSTFN_CMD_STAT
,
242 CT2_HOSTFN_LPU0_READ_STAT
},
243 { CT2_HOSTFN_LPU1_MBOX0
, CT2_LPU1_HOSTFN_MBOX0
, CT2_HOSTFN_PAGE_NUM
,
244 CT2_HOSTFN_LPU1_CMD_STAT
, CT2_LPU1_HOSTFN_CMD_STAT
,
245 CT2_HOSTFN_LPU1_READ_STAT
},
249 bfa_ioc_ct_reg_init(struct bfa_ioc
*ioc
)
252 int pcifn
= bfa_ioc_pcifn(ioc
);
254 rb
= bfa_ioc_bar0(ioc
);
256 ioc
->ioc_regs
.hfn_mbox
= rb
+ ct_fnreg
[pcifn
].hfn_mbox
;
257 ioc
->ioc_regs
.lpu_mbox
= rb
+ ct_fnreg
[pcifn
].lpu_mbox
;
258 ioc
->ioc_regs
.host_page_num_fn
= rb
+ ct_fnreg
[pcifn
].hfn_pgn
;
260 if (ioc
->port_id
== 0) {
261 ioc
->ioc_regs
.heartbeat
= rb
+ BFA_IOC0_HBEAT_REG
;
262 ioc
->ioc_regs
.ioc_fwstate
= rb
+ BFA_IOC0_STATE_REG
;
263 ioc
->ioc_regs
.alt_ioc_fwstate
= rb
+ BFA_IOC1_STATE_REG
;
264 ioc
->ioc_regs
.hfn_mbox_cmd
= rb
+ ct_p0reg
[pcifn
].hfn
;
265 ioc
->ioc_regs
.lpu_mbox_cmd
= rb
+ ct_p0reg
[pcifn
].lpu
;
266 ioc
->ioc_regs
.ll_halt
= rb
+ FW_INIT_HALT_P0
;
267 ioc
->ioc_regs
.alt_ll_halt
= rb
+ FW_INIT_HALT_P1
;
269 ioc
->ioc_regs
.heartbeat
= rb
+ BFA_IOC1_HBEAT_REG
;
270 ioc
->ioc_regs
.ioc_fwstate
= rb
+ BFA_IOC1_STATE_REG
;
271 ioc
->ioc_regs
.alt_ioc_fwstate
= rb
+ BFA_IOC0_STATE_REG
;
272 ioc
->ioc_regs
.hfn_mbox_cmd
= rb
+ ct_p1reg
[pcifn
].hfn
;
273 ioc
->ioc_regs
.lpu_mbox_cmd
= rb
+ ct_p1reg
[pcifn
].lpu
;
274 ioc
->ioc_regs
.ll_halt
= rb
+ FW_INIT_HALT_P1
;
275 ioc
->ioc_regs
.alt_ll_halt
= rb
+ FW_INIT_HALT_P0
;
279 * PSS control registers
281 ioc
->ioc_regs
.pss_ctl_reg
= rb
+ PSS_CTL_REG
;
282 ioc
->ioc_regs
.pss_err_status_reg
= rb
+ PSS_ERR_STATUS_REG
;
283 ioc
->ioc_regs
.app_pll_fast_ctl_reg
= rb
+ APP_PLL_LCLK_CTL_REG
;
284 ioc
->ioc_regs
.app_pll_slow_ctl_reg
= rb
+ APP_PLL_SCLK_CTL_REG
;
287 * IOC semaphore registers and serialization
289 ioc
->ioc_regs
.ioc_sem_reg
= rb
+ HOST_SEM0_REG
;
290 ioc
->ioc_regs
.ioc_usage_sem_reg
= rb
+ HOST_SEM1_REG
;
291 ioc
->ioc_regs
.ioc_init_sem_reg
= rb
+ HOST_SEM2_REG
;
292 ioc
->ioc_regs
.ioc_usage_reg
= rb
+ BFA_FW_USE_COUNT
;
293 ioc
->ioc_regs
.ioc_fail_sync
= rb
+ BFA_IOC_FAIL_SYNC
;
298 ioc
->ioc_regs
.smem_page_start
= rb
+ PSS_SMEM_PAGE_START
;
299 ioc
->ioc_regs
.smem_pg0
= BFI_IOC_SMEM_PG0_CT
;
302 * err set reg : for notification of hb failure in fcmode
304 ioc
->ioc_regs
.err_set
= (rb
+ ERR_SET_REG
);
308 bfa_ioc_ct2_reg_init(struct bfa_ioc
*ioc
)
311 int port
= bfa_ioc_portid(ioc
);
313 rb
= bfa_ioc_bar0(ioc
);
315 ioc
->ioc_regs
.hfn_mbox
= rb
+ ct2_reg
[port
].hfn_mbox
;
316 ioc
->ioc_regs
.lpu_mbox
= rb
+ ct2_reg
[port
].lpu_mbox
;
317 ioc
->ioc_regs
.host_page_num_fn
= rb
+ ct2_reg
[port
].hfn_pgn
;
318 ioc
->ioc_regs
.hfn_mbox_cmd
= rb
+ ct2_reg
[port
].hfn
;
319 ioc
->ioc_regs
.lpu_mbox_cmd
= rb
+ ct2_reg
[port
].lpu
;
320 ioc
->ioc_regs
.lpu_read_stat
= rb
+ ct2_reg
[port
].lpu_read
;
323 ioc
->ioc_regs
.heartbeat
= rb
+ CT2_BFA_IOC0_HBEAT_REG
;
324 ioc
->ioc_regs
.ioc_fwstate
= rb
+ CT2_BFA_IOC0_STATE_REG
;
325 ioc
->ioc_regs
.alt_ioc_fwstate
= rb
+ CT2_BFA_IOC1_STATE_REG
;
326 ioc
->ioc_regs
.ll_halt
= rb
+ FW_INIT_HALT_P0
;
327 ioc
->ioc_regs
.alt_ll_halt
= rb
+ FW_INIT_HALT_P1
;
329 ioc
->ioc_regs
.heartbeat
= rb
+ CT2_BFA_IOC1_HBEAT_REG
;
330 ioc
->ioc_regs
.ioc_fwstate
= rb
+ CT2_BFA_IOC1_STATE_REG
;
331 ioc
->ioc_regs
.alt_ioc_fwstate
= rb
+ CT2_BFA_IOC0_STATE_REG
;
332 ioc
->ioc_regs
.ll_halt
= rb
+ FW_INIT_HALT_P1
;
333 ioc
->ioc_regs
.alt_ll_halt
= rb
+ FW_INIT_HALT_P0
;
337 * PSS control registers
339 ioc
->ioc_regs
.pss_ctl_reg
= rb
+ PSS_CTL_REG
;
340 ioc
->ioc_regs
.pss_err_status_reg
= rb
+ PSS_ERR_STATUS_REG
;
341 ioc
->ioc_regs
.app_pll_fast_ctl_reg
= rb
+ CT2_APP_PLL_LCLK_CTL_REG
;
342 ioc
->ioc_regs
.app_pll_slow_ctl_reg
= rb
+ CT2_APP_PLL_SCLK_CTL_REG
;
345 * IOC semaphore registers and serialization
347 ioc
->ioc_regs
.ioc_sem_reg
= rb
+ CT2_HOST_SEM0_REG
;
348 ioc
->ioc_regs
.ioc_usage_sem_reg
= rb
+ CT2_HOST_SEM1_REG
;
349 ioc
->ioc_regs
.ioc_init_sem_reg
= rb
+ CT2_HOST_SEM2_REG
;
350 ioc
->ioc_regs
.ioc_usage_reg
= rb
+ CT2_BFA_FW_USE_COUNT
;
351 ioc
->ioc_regs
.ioc_fail_sync
= rb
+ CT2_BFA_IOC_FAIL_SYNC
;
356 ioc
->ioc_regs
.smem_page_start
= rb
+ PSS_SMEM_PAGE_START
;
357 ioc
->ioc_regs
.smem_pg0
= BFI_IOC_SMEM_PG0_CT
;
360 * err set reg : for notification of hb failure in fcmode
362 ioc
->ioc_regs
.err_set
= rb
+ ERR_SET_REG
;
365 /* Initialize IOC to port mapping. */
367 #define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8)
369 bfa_ioc_ct_map_port(struct bfa_ioc
*ioc
)
371 void __iomem
*rb
= ioc
->pcidev
.pci_bar_kva
;
375 * For catapult, base port id on personality register and IOC type
377 r32
= readl(rb
+ FNC_PERS_REG
);
378 r32
>>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc
));
379 ioc
->port_id
= (r32
& __F0_PORT_MAP_MK
) >> __F0_PORT_MAP_SH
;
384 bfa_ioc_ct2_map_port(struct bfa_ioc
*ioc
)
386 void __iomem
*rb
= ioc
->pcidev
.pci_bar_kva
;
389 r32
= readl(rb
+ CT2_HOSTFN_PERSONALITY0
);
390 ioc
->port_id
= ((r32
& __FC_LL_PORT_MAP__MK
) >> __FC_LL_PORT_MAP__SH
);
393 /* Set interrupt mode for a function: INTX or MSIX */
395 bfa_ioc_ct_isr_mode_set(struct bfa_ioc
*ioc
, bool msix
)
397 void __iomem
*rb
= ioc
->pcidev
.pci_bar_kva
;
400 r32
= readl(rb
+ FNC_PERS_REG
);
402 mode
= (r32
>> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc
))) &
406 * If already in desired mode, do not change anything
408 if ((!msix
&& mode
) || (msix
&& !mode
))
412 mode
= __F0_INTX_STATUS_MSIX
;
414 mode
= __F0_INTX_STATUS_INTA
;
416 r32
&= ~(__F0_INTX_STATUS
<< FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc
)));
417 r32
|= (mode
<< FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc
)));
419 writel(r32
, rb
+ FNC_PERS_REG
);
423 bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc
*ioc
)
427 r32
= readl(ioc
->ioc_regs
.lpu_read_stat
);
429 writel(1, ioc
->ioc_regs
.lpu_read_stat
);
436 /* MSI-X resource allocation for 1860 with no asic block */
437 #define HOSTFN_MSIX_DEFAULT 64
438 #define HOSTFN_MSIX_VT_INDEX_MBOX_ERR 0x30138
439 #define HOSTFN_MSIX_VT_OFST_NUMVT 0x3013c
440 #define __MSIX_VT_NUMVT__MK 0x003ff800
441 #define __MSIX_VT_NUMVT__SH 11
442 #define __MSIX_VT_NUMVT_(_v) ((_v) << __MSIX_VT_NUMVT__SH)
443 #define __MSIX_VT_OFST_ 0x000007ff
445 bfa_nw_ioc_ct2_poweron(struct bfa_ioc
*ioc
)
447 void __iomem
*rb
= ioc
->pcidev
.pci_bar_kva
;
450 r32
= readl(rb
+ HOSTFN_MSIX_VT_OFST_NUMVT
);
451 if (r32
& __MSIX_VT_NUMVT__MK
) {
452 writel(r32
& __MSIX_VT_OFST_
,
453 rb
+ HOSTFN_MSIX_VT_INDEX_MBOX_ERR
);
457 writel(__MSIX_VT_NUMVT_(HOSTFN_MSIX_DEFAULT
- 1) |
458 HOSTFN_MSIX_DEFAULT
* bfa_ioc_pcifn(ioc
),
459 rb
+ HOSTFN_MSIX_VT_OFST_NUMVT
);
460 writel(HOSTFN_MSIX_DEFAULT
* bfa_ioc_pcifn(ioc
),
461 rb
+ HOSTFN_MSIX_VT_INDEX_MBOX_ERR
);
464 /* Cleanup hw semaphore and usecnt registers */
466 bfa_ioc_ct_ownership_reset(struct bfa_ioc
*ioc
)
468 bfa_nw_ioc_sem_get(ioc
->ioc_regs
.ioc_usage_sem_reg
);
469 writel(0, ioc
->ioc_regs
.ioc_usage_reg
);
470 bfa_nw_ioc_sem_release(ioc
->ioc_regs
.ioc_usage_sem_reg
);
473 * Read the hw sem reg to make sure that it is locked
474 * before we clear it. If it is not locked, writing 1
475 * will lock it instead of clearing it.
477 readl(ioc
->ioc_regs
.ioc_sem_reg
);
478 bfa_nw_ioc_hw_sem_release(ioc
);
481 /* Synchronized IOC failure processing routines */
483 bfa_ioc_ct_sync_start(struct bfa_ioc
*ioc
)
485 u32 r32
= readl(ioc
->ioc_regs
.ioc_fail_sync
);
486 u32 sync_reqd
= bfa_ioc_ct_get_sync_reqd(r32
);
489 * Driver load time. If the sync required bit for this PCI fn
490 * is set, it is due to an unclean exit by the driver for this
491 * PCI fn in the previous incarnation. Whoever comes here first
492 * should clean it up, no matter which PCI fn.
495 if (sync_reqd
& bfa_ioc_ct_sync_pos(ioc
)) {
496 writel(0, ioc
->ioc_regs
.ioc_fail_sync
);
497 writel(1, ioc
->ioc_regs
.ioc_usage_reg
);
498 writel(BFI_IOC_UNINIT
, ioc
->ioc_regs
.ioc_fwstate
);
499 writel(BFI_IOC_UNINIT
, ioc
->ioc_regs
.alt_ioc_fwstate
);
503 return bfa_ioc_ct_sync_complete(ioc
);
505 /* Synchronized IOC failure processing routines */
507 bfa_ioc_ct_sync_join(struct bfa_ioc
*ioc
)
509 u32 r32
= readl(ioc
->ioc_regs
.ioc_fail_sync
);
510 u32 sync_pos
= bfa_ioc_ct_sync_reqd_pos(ioc
);
512 writel((r32
| sync_pos
), ioc
->ioc_regs
.ioc_fail_sync
);
516 bfa_ioc_ct_sync_leave(struct bfa_ioc
*ioc
)
518 u32 r32
= readl(ioc
->ioc_regs
.ioc_fail_sync
);
519 u32 sync_msk
= bfa_ioc_ct_sync_reqd_pos(ioc
) |
520 bfa_ioc_ct_sync_pos(ioc
);
522 writel((r32
& ~sync_msk
), ioc
->ioc_regs
.ioc_fail_sync
);
526 bfa_ioc_ct_sync_ack(struct bfa_ioc
*ioc
)
528 u32 r32
= readl(ioc
->ioc_regs
.ioc_fail_sync
);
530 writel(r32
| bfa_ioc_ct_sync_pos(ioc
), ioc
->ioc_regs
.ioc_fail_sync
);
534 bfa_ioc_ct_sync_complete(struct bfa_ioc
*ioc
)
536 u32 r32
= readl(ioc
->ioc_regs
.ioc_fail_sync
);
537 u32 sync_reqd
= bfa_ioc_ct_get_sync_reqd(r32
);
538 u32 sync_ackd
= bfa_ioc_ct_get_sync_ackd(r32
);
545 * The check below is to see whether any other PCI fn
546 * has reinitialized the ASIC (reset sync_ackd bits)
547 * and failed again while this IOC was waiting for hw
548 * semaphore (in bfa_iocpf_sm_semwait()).
550 tmp_ackd
= sync_ackd
;
551 if ((sync_reqd
& bfa_ioc_ct_sync_pos(ioc
)) &&
552 !(sync_ackd
& bfa_ioc_ct_sync_pos(ioc
)))
553 sync_ackd
|= bfa_ioc_ct_sync_pos(ioc
);
555 if (sync_reqd
== sync_ackd
) {
556 writel(bfa_ioc_ct_clear_sync_ackd(r32
),
557 ioc
->ioc_regs
.ioc_fail_sync
);
558 writel(BFI_IOC_FAIL
, ioc
->ioc_regs
.ioc_fwstate
);
559 writel(BFI_IOC_FAIL
, ioc
->ioc_regs
.alt_ioc_fwstate
);
564 * If another PCI fn reinitialized and failed again while
565 * this IOC was waiting for hw sem, the sync_ackd bit for
566 * this IOC need to be set again to allow reinitialization.
568 if (tmp_ackd
!= sync_ackd
)
569 writel((r32
| sync_ackd
), ioc
->ioc_regs
.ioc_fail_sync
);
575 bfa_ioc_ct_set_cur_ioc_fwstate(struct bfa_ioc
*ioc
,
576 enum bfi_ioc_state fwstate
)
578 writel(fwstate
, ioc
->ioc_regs
.ioc_fwstate
);
581 static enum bfi_ioc_state
582 bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc
*ioc
)
584 return (enum bfi_ioc_state
)readl(ioc
->ioc_regs
.ioc_fwstate
);
588 bfa_ioc_ct_set_alt_ioc_fwstate(struct bfa_ioc
*ioc
,
589 enum bfi_ioc_state fwstate
)
591 writel(fwstate
, ioc
->ioc_regs
.alt_ioc_fwstate
);
594 static enum bfi_ioc_state
595 bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc
*ioc
)
597 return (enum bfi_ioc_state
)readl(ioc
->ioc_regs
.alt_ioc_fwstate
);
600 static enum bfa_status
601 bfa_ioc_ct_pll_init(void __iomem
*rb
, enum bfi_asic_mode asic_mode
)
603 u32 pll_sclk
, pll_fclk
, r32
;
604 bool fcmode
= (asic_mode
== BFI_ASIC_MODE_FC
);
606 pll_sclk
= __APP_PLL_SCLK_LRESETN
| __APP_PLL_SCLK_ENARST
|
607 __APP_PLL_SCLK_RSEL200500
| __APP_PLL_SCLK_P0_1(3U) |
608 __APP_PLL_SCLK_JITLMT0_1(3U) |
609 __APP_PLL_SCLK_CNTLMT0_1(1U);
610 pll_fclk
= __APP_PLL_LCLK_LRESETN
| __APP_PLL_LCLK_ENARST
|
611 __APP_PLL_LCLK_RSEL200500
| __APP_PLL_LCLK_P0_1(3U) |
612 __APP_PLL_LCLK_JITLMT0_1(3U) |
613 __APP_PLL_LCLK_CNTLMT0_1(1U);
616 writel(0, (rb
+ OP_MODE
));
617 writel(__APP_EMS_CMLCKSEL
|
618 __APP_EMS_REFCKBUFEN2
|
619 __APP_EMS_CHANNEL_SEL
,
620 (rb
+ ETH_MAC_SER_REG
));
622 writel(__GLOBAL_FCOE_MODE
, (rb
+ OP_MODE
));
623 writel(__APP_EMS_REFCKBUFEN1
,
624 (rb
+ ETH_MAC_SER_REG
));
626 writel(BFI_IOC_UNINIT
, (rb
+ BFA_IOC0_STATE_REG
));
627 writel(BFI_IOC_UNINIT
, (rb
+ BFA_IOC1_STATE_REG
));
628 writel(0xffffffffU
, (rb
+ HOSTFN0_INT_MSK
));
629 writel(0xffffffffU
, (rb
+ HOSTFN1_INT_MSK
));
630 writel(0xffffffffU
, (rb
+ HOSTFN0_INT_STATUS
));
631 writel(0xffffffffU
, (rb
+ HOSTFN1_INT_STATUS
));
632 writel(0xffffffffU
, (rb
+ HOSTFN0_INT_MSK
));
633 writel(0xffffffffU
, (rb
+ HOSTFN1_INT_MSK
));
635 __APP_PLL_SCLK_LOGIC_SOFT_RESET
,
636 rb
+ APP_PLL_SCLK_CTL_REG
);
638 __APP_PLL_LCLK_LOGIC_SOFT_RESET
,
639 rb
+ APP_PLL_LCLK_CTL_REG
);
641 __APP_PLL_SCLK_LOGIC_SOFT_RESET
| __APP_PLL_SCLK_ENABLE
,
642 rb
+ APP_PLL_SCLK_CTL_REG
);
644 __APP_PLL_LCLK_LOGIC_SOFT_RESET
| __APP_PLL_LCLK_ENABLE
,
645 rb
+ APP_PLL_LCLK_CTL_REG
);
646 readl(rb
+ HOSTFN0_INT_MSK
);
648 writel(0xffffffffU
, (rb
+ HOSTFN0_INT_STATUS
));
649 writel(0xffffffffU
, (rb
+ HOSTFN1_INT_STATUS
));
651 __APP_PLL_SCLK_ENABLE
,
652 rb
+ APP_PLL_SCLK_CTL_REG
);
654 __APP_PLL_LCLK_ENABLE
,
655 rb
+ APP_PLL_LCLK_CTL_REG
);
658 writel(__PMM_1T_RESET_P
, (rb
+ PMM_1T_RESET_REG_P0
));
659 writel(__PMM_1T_RESET_P
, (rb
+ PMM_1T_RESET_REG_P1
));
661 r32
= readl(rb
+ PSS_CTL_REG
);
662 r32
&= ~__PSS_LMEM_RESET
;
663 writel(r32
, (rb
+ PSS_CTL_REG
));
666 writel(0, (rb
+ PMM_1T_RESET_REG_P0
));
667 writel(0, (rb
+ PMM_1T_RESET_REG_P1
));
670 writel(__EDRAM_BISTR_START
, (rb
+ MBIST_CTL_REG
));
672 r32
= readl(rb
+ MBIST_STAT_REG
);
673 writel(0, (rb
+ MBIST_CTL_REG
));
674 return BFA_STATUS_OK
;
678 bfa_ioc_ct2_sclk_init(void __iomem
*rb
)
683 * put s_clk PLL and PLL FSM in reset
685 r32
= readl(rb
+ CT2_APP_PLL_SCLK_CTL_REG
);
686 r32
&= ~(__APP_PLL_SCLK_ENABLE
| __APP_PLL_SCLK_LRESETN
);
687 r32
|= (__APP_PLL_SCLK_ENARST
| __APP_PLL_SCLK_BYPASS
|
688 __APP_PLL_SCLK_LOGIC_SOFT_RESET
);
689 writel(r32
, (rb
+ CT2_APP_PLL_SCLK_CTL_REG
));
692 * Ignore mode and program for the max clock (which is FC16)
693 * Firmware/NFC will do the PLL init appropriately
695 r32
= readl(rb
+ CT2_APP_PLL_SCLK_CTL_REG
);
696 r32
&= ~(__APP_PLL_SCLK_REFCLK_SEL
| __APP_PLL_SCLK_CLK_DIV2
);
697 writel(r32
, (rb
+ CT2_APP_PLL_SCLK_CTL_REG
));
700 * while doing PLL init dont clock gate ethernet subsystem
702 r32
= readl(rb
+ CT2_CHIP_MISC_PRG
);
703 writel(r32
| __ETH_CLK_ENABLE_PORT0
,
704 rb
+ CT2_CHIP_MISC_PRG
);
706 r32
= readl(rb
+ CT2_PCIE_MISC_REG
);
707 writel(r32
| __ETH_CLK_ENABLE_PORT1
,
708 rb
+ CT2_PCIE_MISC_REG
);
713 r32
= readl(rb
+ CT2_APP_PLL_SCLK_CTL_REG
);
714 r32
&= (__P_SCLK_PLL_LOCK
| __APP_PLL_SCLK_REFCLK_SEL
|
715 __APP_PLL_SCLK_CLK_DIV2
);
716 writel(r32
| 0x1061731b, rb
+ CT2_APP_PLL_SCLK_CTL_REG
);
719 * poll for s_clk lock or delay 1ms
724 * Dont do clock gating for ethernet subsystem, firmware/NFC will
725 * do this appropriately
730 bfa_ioc_ct2_lclk_init(void __iomem
*rb
)
735 * put l_clk PLL and PLL FSM in reset
737 r32
= readl(rb
+ CT2_APP_PLL_LCLK_CTL_REG
);
738 r32
&= ~(__APP_PLL_LCLK_ENABLE
| __APP_PLL_LCLK_LRESETN
);
739 r32
|= (__APP_PLL_LCLK_ENARST
| __APP_PLL_LCLK_BYPASS
|
740 __APP_PLL_LCLK_LOGIC_SOFT_RESET
);
741 writel(r32
, rb
+ CT2_APP_PLL_LCLK_CTL_REG
);
744 * set LPU speed (set for FC16 which will work for other modes)
746 r32
= readl(rb
+ CT2_CHIP_MISC_PRG
);
747 writel(r32
, (rb
+ CT2_CHIP_MISC_PRG
));
750 * set LPU half speed (set for FC16 which will work for other modes)
752 r32
= readl(rb
+ CT2_APP_PLL_LCLK_CTL_REG
);
753 writel(r32
, rb
+ CT2_APP_PLL_LCLK_CTL_REG
);
756 * set lclk for mode (set for FC16)
758 r32
= readl(rb
+ CT2_APP_PLL_LCLK_CTL_REG
);
759 r32
&= (__P_LCLK_PLL_LOCK
| __APP_LPUCLK_HALFSPEED
);
761 writel(r32
, (rb
+ CT2_APP_PLL_LCLK_CTL_REG
));
764 * poll for s_clk lock or delay 1ms
770 bfa_ioc_ct2_mem_init(void __iomem
*rb
)
774 r32
= readl(rb
+ PSS_CTL_REG
);
775 r32
&= ~__PSS_LMEM_RESET
;
776 writel(r32
, rb
+ PSS_CTL_REG
);
779 writel(__EDRAM_BISTR_START
, rb
+ CT2_MBIST_CTL_REG
);
781 writel(0, rb
+ CT2_MBIST_CTL_REG
);
785 bfa_ioc_ct2_mac_reset(void __iomem
*rb
)
789 bfa_ioc_ct2_sclk_init(rb
);
790 bfa_ioc_ct2_lclk_init(rb
);
793 * release soft reset on s_clk & l_clk
795 r32
= readl(rb
+ CT2_APP_PLL_SCLK_CTL_REG
);
796 writel(r32
& ~__APP_PLL_SCLK_LOGIC_SOFT_RESET
,
797 rb
+ CT2_APP_PLL_SCLK_CTL_REG
);
800 * release soft reset on s_clk & l_clk
802 r32
= readl(rb
+ CT2_APP_PLL_LCLK_CTL_REG
);
803 writel(r32
& ~__APP_PLL_LCLK_LOGIC_SOFT_RESET
,
804 rb
+ CT2_APP_PLL_LCLK_CTL_REG
);
806 /* put port0, port1 MAC & AHB in reset */
807 writel(__CSI_MAC_RESET
| __CSI_MAC_AHB_RESET
,
808 rb
+ CT2_CSI_MAC_CONTROL_REG(0));
809 writel(__CSI_MAC_RESET
| __CSI_MAC_AHB_RESET
,
810 rb
+ CT2_CSI_MAC_CONTROL_REG(1));
813 #define CT2_NFC_MAX_DELAY 1000
814 #define CT2_NFC_VER_VALID 0x143
815 #define BFA_IOC_PLL_POLL 1000000
818 bfa_ioc_ct2_nfc_halted(void __iomem
*rb
)
822 r32
= readl(rb
+ CT2_NFC_CSR_SET_REG
);
823 if (r32
& __NFC_CONTROLLER_HALTED
)
830 bfa_ioc_ct2_nfc_resume(void __iomem
*rb
)
835 writel(__HALT_NFC_CONTROLLER
, rb
+ CT2_NFC_CSR_CLR_REG
);
836 for (i
= 0; i
< CT2_NFC_MAX_DELAY
; i
++) {
837 r32
= readl(rb
+ CT2_NFC_CSR_SET_REG
);
838 if (!(r32
& __NFC_CONTROLLER_HALTED
))
845 static enum bfa_status
846 bfa_ioc_ct2_pll_init(void __iomem
*rb
, enum bfi_asic_mode asic_mode
)
848 volatile u32 wgn
, r32
;
851 wgn
= readl(rb
+ CT2_WGN_STATUS
);
853 nfc_ver
= readl(rb
+ CT2_RSC_GPR15_REG
);
855 if (wgn
== (__A2T_AHB_LOAD
| __WGN_READY
) &&
856 nfc_ver
>= CT2_NFC_VER_VALID
) {
857 if (bfa_ioc_ct2_nfc_halted(rb
))
858 bfa_ioc_ct2_nfc_resume(rb
);
859 writel(__RESET_AND_START_SCLK_LCLK_PLLS
,
860 rb
+ CT2_CSI_FW_CTL_SET_REG
);
862 for (i
= 0; i
< BFA_IOC_PLL_POLL
; i
++) {
863 r32
= readl(rb
+ CT2_APP_PLL_LCLK_CTL_REG
);
864 if (r32
& __RESET_AND_START_SCLK_LCLK_PLLS
)
867 BUG_ON(!(r32
& __RESET_AND_START_SCLK_LCLK_PLLS
));
869 for (i
= 0; i
< BFA_IOC_PLL_POLL
; i
++) {
870 r32
= readl(rb
+ CT2_APP_PLL_LCLK_CTL_REG
);
871 if (!(r32
& __RESET_AND_START_SCLK_LCLK_PLLS
))
874 BUG_ON(r32
& __RESET_AND_START_SCLK_LCLK_PLLS
);
877 r32
= readl(rb
+ CT2_CSI_FW_CTL_REG
);
878 BUG_ON(r32
& __RESET_AND_START_SCLK_LCLK_PLLS
);
880 writel(__HALT_NFC_CONTROLLER
, (rb
+ CT2_NFC_CSR_SET_REG
));
881 for (i
= 0; i
< CT2_NFC_MAX_DELAY
; i
++) {
882 r32
= readl(rb
+ CT2_NFC_CSR_SET_REG
);
883 if (r32
& __NFC_CONTROLLER_HALTED
)
888 bfa_ioc_ct2_mac_reset(rb
);
889 bfa_ioc_ct2_sclk_init(rb
);
890 bfa_ioc_ct2_lclk_init(rb
);
892 /* release soft reset on s_clk & l_clk */
893 r32
= readl(rb
+ CT2_APP_PLL_SCLK_CTL_REG
);
894 writel(r32
& ~__APP_PLL_SCLK_LOGIC_SOFT_RESET
,
895 rb
+ CT2_APP_PLL_SCLK_CTL_REG
);
896 r32
= readl(rb
+ CT2_APP_PLL_LCLK_CTL_REG
);
897 writel(r32
& ~__APP_PLL_LCLK_LOGIC_SOFT_RESET
,
898 rb
+ CT2_APP_PLL_LCLK_CTL_REG
);
901 /* Announce flash device presence, if flash was corrupted. */
902 if (wgn
== (__WGN_READY
| __GLBL_PF_VF_CFG_RDY
)) {
903 r32
= readl(rb
+ PSS_GPIO_OUT_REG
);
904 writel(r32
& ~1, rb
+ PSS_GPIO_OUT_REG
);
905 r32
= readl(rb
+ PSS_GPIO_OE_REG
);
906 writel(r32
| 1, rb
+ PSS_GPIO_OE_REG
);
910 * Mask the interrupts and clear any
911 * pending interrupts left by BIOS/EFI
913 writel(1, rb
+ CT2_LPU0_HOSTFN_MBOX0_MSK
);
914 writel(1, rb
+ CT2_LPU1_HOSTFN_MBOX0_MSK
);
916 /* For first time initialization, no need to clear interrupts */
917 r32
= readl(rb
+ HOST_SEM5_REG
);
919 r32
= readl(rb
+ CT2_LPU0_HOSTFN_CMD_STAT
);
921 writel(1, rb
+ CT2_LPU0_HOSTFN_CMD_STAT
);
922 readl(rb
+ CT2_LPU0_HOSTFN_CMD_STAT
);
924 r32
= readl(rb
+ CT2_LPU1_HOSTFN_CMD_STAT
);
926 writel(1, rb
+ CT2_LPU1_HOSTFN_CMD_STAT
);
927 readl(rb
+ CT2_LPU1_HOSTFN_CMD_STAT
);
931 bfa_ioc_ct2_mem_init(rb
);
933 writel(BFI_IOC_UNINIT
, rb
+ CT2_BFA_IOC0_STATE_REG
);
934 writel(BFI_IOC_UNINIT
, rb
+ CT2_BFA_IOC1_STATE_REG
);
935 return BFA_STATUS_OK
;