2 * Linux network driver for Brocade Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
25 #define bfa_ioc_ct_sync_pos(__ioc) \
26 ((u32) (1 << bfa_ioc_pcifn(__ioc)))
27 #define BFA_IOC_SYNC_REQD_SH 16
28 #define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff)
29 #define bfa_ioc_ct_clear_sync_ackd(__val) (__val & 0xffff0000)
30 #define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH)
31 #define bfa_ioc_ct_sync_reqd_pos(__ioc) \
32 (bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH)
35 * forward declarations
37 static bool bfa_ioc_ct_firmware_lock(struct bfa_ioc
*ioc
);
38 static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc
*ioc
);
39 static void bfa_ioc_ct_reg_init(struct bfa_ioc
*ioc
);
40 static void bfa_ioc_ct2_reg_init(struct bfa_ioc
*ioc
);
41 static void bfa_ioc_ct_map_port(struct bfa_ioc
*ioc
);
42 static void bfa_ioc_ct2_map_port(struct bfa_ioc
*ioc
);
43 static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc
*ioc
, bool msix
);
44 static void bfa_ioc_ct_notify_fail(struct bfa_ioc
*ioc
);
45 static void bfa_ioc_ct_ownership_reset(struct bfa_ioc
*ioc
);
46 static bool bfa_ioc_ct_sync_start(struct bfa_ioc
*ioc
);
47 static void bfa_ioc_ct_sync_join(struct bfa_ioc
*ioc
);
48 static void bfa_ioc_ct_sync_leave(struct bfa_ioc
*ioc
);
49 static void bfa_ioc_ct_sync_ack(struct bfa_ioc
*ioc
);
50 static bool bfa_ioc_ct_sync_complete(struct bfa_ioc
*ioc
);
51 static void bfa_ioc_ct_set_cur_ioc_fwstate(
52 struct bfa_ioc
*ioc
, enum bfi_ioc_state fwstate
);
53 static enum bfi_ioc_state
bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc
*ioc
);
54 static void bfa_ioc_ct_set_alt_ioc_fwstate(
55 struct bfa_ioc
*ioc
, enum bfi_ioc_state fwstate
);
56 static enum bfi_ioc_state
bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc
*ioc
);
57 static enum bfa_status
bfa_ioc_ct_pll_init(void __iomem
*rb
,
58 enum bfi_asic_mode asic_mode
);
59 static enum bfa_status
bfa_ioc_ct2_pll_init(void __iomem
*rb
,
60 enum bfi_asic_mode asic_mode
);
61 static bool bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc
*ioc
);
63 static const struct bfa_ioc_hwif nw_hwif_ct
= {
64 .ioc_pll_init
= bfa_ioc_ct_pll_init
,
65 .ioc_firmware_lock
= bfa_ioc_ct_firmware_lock
,
66 .ioc_firmware_unlock
= bfa_ioc_ct_firmware_unlock
,
67 .ioc_reg_init
= bfa_ioc_ct_reg_init
,
68 .ioc_map_port
= bfa_ioc_ct_map_port
,
69 .ioc_isr_mode_set
= bfa_ioc_ct_isr_mode_set
,
70 .ioc_notify_fail
= bfa_ioc_ct_notify_fail
,
71 .ioc_ownership_reset
= bfa_ioc_ct_ownership_reset
,
72 .ioc_sync_start
= bfa_ioc_ct_sync_start
,
73 .ioc_sync_join
= bfa_ioc_ct_sync_join
,
74 .ioc_sync_leave
= bfa_ioc_ct_sync_leave
,
75 .ioc_sync_ack
= bfa_ioc_ct_sync_ack
,
76 .ioc_sync_complete
= bfa_ioc_ct_sync_complete
,
77 .ioc_set_fwstate
= bfa_ioc_ct_set_cur_ioc_fwstate
,
78 .ioc_get_fwstate
= bfa_ioc_ct_get_cur_ioc_fwstate
,
79 .ioc_set_alt_fwstate
= bfa_ioc_ct_set_alt_ioc_fwstate
,
80 .ioc_get_alt_fwstate
= bfa_ioc_ct_get_alt_ioc_fwstate
,
83 static const struct bfa_ioc_hwif nw_hwif_ct2
= {
84 .ioc_pll_init
= bfa_ioc_ct2_pll_init
,
85 .ioc_firmware_lock
= bfa_ioc_ct_firmware_lock
,
86 .ioc_firmware_unlock
= bfa_ioc_ct_firmware_unlock
,
87 .ioc_reg_init
= bfa_ioc_ct2_reg_init
,
88 .ioc_map_port
= bfa_ioc_ct2_map_port
,
89 .ioc_lpu_read_stat
= bfa_ioc_ct2_lpu_read_stat
,
90 .ioc_isr_mode_set
= NULL
,
91 .ioc_notify_fail
= bfa_ioc_ct_notify_fail
,
92 .ioc_ownership_reset
= bfa_ioc_ct_ownership_reset
,
93 .ioc_sync_start
= bfa_ioc_ct_sync_start
,
94 .ioc_sync_join
= bfa_ioc_ct_sync_join
,
95 .ioc_sync_leave
= bfa_ioc_ct_sync_leave
,
96 .ioc_sync_ack
= bfa_ioc_ct_sync_ack
,
97 .ioc_sync_complete
= bfa_ioc_ct_sync_complete
,
98 .ioc_set_fwstate
= bfa_ioc_ct_set_cur_ioc_fwstate
,
99 .ioc_get_fwstate
= bfa_ioc_ct_get_cur_ioc_fwstate
,
100 .ioc_set_alt_fwstate
= bfa_ioc_ct_set_alt_ioc_fwstate
,
101 .ioc_get_alt_fwstate
= bfa_ioc_ct_get_alt_ioc_fwstate
,
104 /* Called from bfa_ioc_attach() to map asic specific calls. */
106 bfa_nw_ioc_set_ct_hwif(struct bfa_ioc
*ioc
)
108 ioc
->ioc_hwif
= &nw_hwif_ct
;
112 bfa_nw_ioc_set_ct2_hwif(struct bfa_ioc
*ioc
)
114 ioc
->ioc_hwif
= &nw_hwif_ct2
;
117 /* Return true if firmware of current driver matches the running firmware. */
119 bfa_ioc_ct_firmware_lock(struct bfa_ioc
*ioc
)
121 enum bfi_ioc_state ioc_fwstate
;
123 struct bfi_ioc_image_hdr fwhdr
;
126 * If bios boot (flash based) -- do not increment usage count
128 if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc
)) <
132 bfa_nw_ioc_sem_get(ioc
->ioc_regs
.ioc_usage_sem_reg
);
133 usecnt
= readl(ioc
->ioc_regs
.ioc_usage_reg
);
136 * If usage count is 0, always return TRUE.
139 writel(1, ioc
->ioc_regs
.ioc_usage_reg
);
140 bfa_nw_ioc_sem_release(ioc
->ioc_regs
.ioc_usage_sem_reg
);
141 writel(0, ioc
->ioc_regs
.ioc_fail_sync
);
145 ioc_fwstate
= readl(ioc
->ioc_regs
.ioc_fwstate
);
148 * Use count cannot be non-zero and chip in uninitialized state.
150 BUG_ON(!(ioc_fwstate
!= BFI_IOC_UNINIT
));
153 * Check if another driver with a different firmware is active
155 bfa_nw_ioc_fwver_get(ioc
, &fwhdr
);
156 if (!bfa_nw_ioc_fwver_cmp(ioc
, &fwhdr
)) {
157 bfa_nw_ioc_sem_release(ioc
->ioc_regs
.ioc_usage_sem_reg
);
162 * Same firmware version. Increment the reference count.
165 writel(usecnt
, ioc
->ioc_regs
.ioc_usage_reg
);
166 bfa_nw_ioc_sem_release(ioc
->ioc_regs
.ioc_usage_sem_reg
);
171 bfa_ioc_ct_firmware_unlock(struct bfa_ioc
*ioc
)
176 * If bios boot (flash based) -- do not decrement usage count
178 if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc
)) <
183 * decrement usage count
185 bfa_nw_ioc_sem_get(ioc
->ioc_regs
.ioc_usage_sem_reg
);
186 usecnt
= readl(ioc
->ioc_regs
.ioc_usage_reg
);
187 BUG_ON(!(usecnt
> 0));
190 writel(usecnt
, ioc
->ioc_regs
.ioc_usage_reg
);
192 bfa_nw_ioc_sem_release(ioc
->ioc_regs
.ioc_usage_sem_reg
);
195 /* Notify other functions on HB failure. */
197 bfa_ioc_ct_notify_fail(struct bfa_ioc
*ioc
)
199 writel(__FW_INIT_HALT_P
, ioc
->ioc_regs
.ll_halt
);
200 writel(__FW_INIT_HALT_P
, ioc
->ioc_regs
.alt_ll_halt
);
201 /* Wait for halt to take effect */
202 readl(ioc
->ioc_regs
.ll_halt
);
203 readl(ioc
->ioc_regs
.alt_ll_halt
);
206 /* Host to LPU mailbox message addresses */
207 static const struct {
212 { HOSTFN0_LPU_MBOX0_0
, LPU_HOSTFN0_MBOX0_0
, HOST_PAGE_NUM_FN0
},
213 { HOSTFN1_LPU_MBOX0_8
, LPU_HOSTFN1_MBOX0_8
, HOST_PAGE_NUM_FN1
},
214 { HOSTFN2_LPU_MBOX0_0
, LPU_HOSTFN2_MBOX0_0
, HOST_PAGE_NUM_FN2
},
215 { HOSTFN3_LPU_MBOX0_8
, LPU_HOSTFN3_MBOX0_8
, HOST_PAGE_NUM_FN3
}
218 /* Host <-> LPU mailbox command/status registers - port 0 */
219 static const struct {
223 { HOSTFN0_LPU0_CMD_STAT
, LPU0_HOSTFN0_CMD_STAT
},
224 { HOSTFN1_LPU0_CMD_STAT
, LPU0_HOSTFN1_CMD_STAT
},
225 { HOSTFN2_LPU0_CMD_STAT
, LPU0_HOSTFN2_CMD_STAT
},
226 { HOSTFN3_LPU0_CMD_STAT
, LPU0_HOSTFN3_CMD_STAT
}
229 /* Host <-> LPU mailbox command/status registers - port 1 */
230 static const struct {
234 { HOSTFN0_LPU1_CMD_STAT
, LPU1_HOSTFN0_CMD_STAT
},
235 { HOSTFN1_LPU1_CMD_STAT
, LPU1_HOSTFN1_CMD_STAT
},
236 { HOSTFN2_LPU1_CMD_STAT
, LPU1_HOSTFN2_CMD_STAT
},
237 { HOSTFN3_LPU1_CMD_STAT
, LPU1_HOSTFN3_CMD_STAT
}
240 static const struct {
248 { CT2_HOSTFN_LPU0_MBOX0
, CT2_LPU0_HOSTFN_MBOX0
, CT2_HOSTFN_PAGE_NUM
,
249 CT2_HOSTFN_LPU0_CMD_STAT
, CT2_LPU0_HOSTFN_CMD_STAT
,
250 CT2_HOSTFN_LPU0_READ_STAT
},
251 { CT2_HOSTFN_LPU1_MBOX0
, CT2_LPU1_HOSTFN_MBOX0
, CT2_HOSTFN_PAGE_NUM
,
252 CT2_HOSTFN_LPU1_CMD_STAT
, CT2_LPU1_HOSTFN_CMD_STAT
,
253 CT2_HOSTFN_LPU1_READ_STAT
},
257 bfa_ioc_ct_reg_init(struct bfa_ioc
*ioc
)
260 int pcifn
= bfa_ioc_pcifn(ioc
);
262 rb
= bfa_ioc_bar0(ioc
);
264 ioc
->ioc_regs
.hfn_mbox
= rb
+ ct_fnreg
[pcifn
].hfn_mbox
;
265 ioc
->ioc_regs
.lpu_mbox
= rb
+ ct_fnreg
[pcifn
].lpu_mbox
;
266 ioc
->ioc_regs
.host_page_num_fn
= rb
+ ct_fnreg
[pcifn
].hfn_pgn
;
268 if (ioc
->port_id
== 0) {
269 ioc
->ioc_regs
.heartbeat
= rb
+ BFA_IOC0_HBEAT_REG
;
270 ioc
->ioc_regs
.ioc_fwstate
= rb
+ BFA_IOC0_STATE_REG
;
271 ioc
->ioc_regs
.alt_ioc_fwstate
= rb
+ BFA_IOC1_STATE_REG
;
272 ioc
->ioc_regs
.hfn_mbox_cmd
= rb
+ ct_p0reg
[pcifn
].hfn
;
273 ioc
->ioc_regs
.lpu_mbox_cmd
= rb
+ ct_p0reg
[pcifn
].lpu
;
274 ioc
->ioc_regs
.ll_halt
= rb
+ FW_INIT_HALT_P0
;
275 ioc
->ioc_regs
.alt_ll_halt
= rb
+ FW_INIT_HALT_P1
;
277 ioc
->ioc_regs
.heartbeat
= rb
+ BFA_IOC1_HBEAT_REG
;
278 ioc
->ioc_regs
.ioc_fwstate
= rb
+ BFA_IOC1_STATE_REG
;
279 ioc
->ioc_regs
.alt_ioc_fwstate
= rb
+ BFA_IOC0_STATE_REG
;
280 ioc
->ioc_regs
.hfn_mbox_cmd
= rb
+ ct_p1reg
[pcifn
].hfn
;
281 ioc
->ioc_regs
.lpu_mbox_cmd
= rb
+ ct_p1reg
[pcifn
].lpu
;
282 ioc
->ioc_regs
.ll_halt
= rb
+ FW_INIT_HALT_P1
;
283 ioc
->ioc_regs
.alt_ll_halt
= rb
+ FW_INIT_HALT_P0
;
287 * PSS control registers
289 ioc
->ioc_regs
.pss_ctl_reg
= rb
+ PSS_CTL_REG
;
290 ioc
->ioc_regs
.pss_err_status_reg
= rb
+ PSS_ERR_STATUS_REG
;
291 ioc
->ioc_regs
.app_pll_fast_ctl_reg
= rb
+ APP_PLL_LCLK_CTL_REG
;
292 ioc
->ioc_regs
.app_pll_slow_ctl_reg
= rb
+ APP_PLL_SCLK_CTL_REG
;
295 * IOC semaphore registers and serialization
297 ioc
->ioc_regs
.ioc_sem_reg
= rb
+ HOST_SEM0_REG
;
298 ioc
->ioc_regs
.ioc_usage_sem_reg
= rb
+ HOST_SEM1_REG
;
299 ioc
->ioc_regs
.ioc_init_sem_reg
= rb
+ HOST_SEM2_REG
;
300 ioc
->ioc_regs
.ioc_usage_reg
= rb
+ BFA_FW_USE_COUNT
;
301 ioc
->ioc_regs
.ioc_fail_sync
= rb
+ BFA_IOC_FAIL_SYNC
;
306 ioc
->ioc_regs
.smem_page_start
= rb
+ PSS_SMEM_PAGE_START
;
307 ioc
->ioc_regs
.smem_pg0
= BFI_IOC_SMEM_PG0_CT
;
310 * err set reg : for notification of hb failure in fcmode
312 ioc
->ioc_regs
.err_set
= (rb
+ ERR_SET_REG
);
316 bfa_ioc_ct2_reg_init(struct bfa_ioc
*ioc
)
319 int port
= bfa_ioc_portid(ioc
);
321 rb
= bfa_ioc_bar0(ioc
);
323 ioc
->ioc_regs
.hfn_mbox
= rb
+ ct2_reg
[port
].hfn_mbox
;
324 ioc
->ioc_regs
.lpu_mbox
= rb
+ ct2_reg
[port
].lpu_mbox
;
325 ioc
->ioc_regs
.host_page_num_fn
= rb
+ ct2_reg
[port
].hfn_pgn
;
326 ioc
->ioc_regs
.hfn_mbox_cmd
= rb
+ ct2_reg
[port
].hfn
;
327 ioc
->ioc_regs
.lpu_mbox_cmd
= rb
+ ct2_reg
[port
].lpu
;
328 ioc
->ioc_regs
.lpu_read_stat
= rb
+ ct2_reg
[port
].lpu_read
;
331 ioc
->ioc_regs
.heartbeat
= rb
+ CT2_BFA_IOC0_HBEAT_REG
;
332 ioc
->ioc_regs
.ioc_fwstate
= rb
+ CT2_BFA_IOC0_STATE_REG
;
333 ioc
->ioc_regs
.alt_ioc_fwstate
= rb
+ CT2_BFA_IOC1_STATE_REG
;
334 ioc
->ioc_regs
.ll_halt
= rb
+ FW_INIT_HALT_P0
;
335 ioc
->ioc_regs
.alt_ll_halt
= rb
+ FW_INIT_HALT_P1
;
337 ioc
->ioc_regs
.heartbeat
= rb
+ CT2_BFA_IOC1_HBEAT_REG
;
338 ioc
->ioc_regs
.ioc_fwstate
= rb
+ CT2_BFA_IOC1_STATE_REG
;
339 ioc
->ioc_regs
.alt_ioc_fwstate
= rb
+ CT2_BFA_IOC0_STATE_REG
;
340 ioc
->ioc_regs
.ll_halt
= rb
+ FW_INIT_HALT_P1
;
341 ioc
->ioc_regs
.alt_ll_halt
= rb
+ FW_INIT_HALT_P0
;
345 * PSS control registers
347 ioc
->ioc_regs
.pss_ctl_reg
= rb
+ PSS_CTL_REG
;
348 ioc
->ioc_regs
.pss_err_status_reg
= rb
+ PSS_ERR_STATUS_REG
;
349 ioc
->ioc_regs
.app_pll_fast_ctl_reg
= rb
+ CT2_APP_PLL_LCLK_CTL_REG
;
350 ioc
->ioc_regs
.app_pll_slow_ctl_reg
= rb
+ CT2_APP_PLL_SCLK_CTL_REG
;
353 * IOC semaphore registers and serialization
355 ioc
->ioc_regs
.ioc_sem_reg
= rb
+ CT2_HOST_SEM0_REG
;
356 ioc
->ioc_regs
.ioc_usage_sem_reg
= rb
+ CT2_HOST_SEM1_REG
;
357 ioc
->ioc_regs
.ioc_init_sem_reg
= rb
+ CT2_HOST_SEM2_REG
;
358 ioc
->ioc_regs
.ioc_usage_reg
= rb
+ CT2_BFA_FW_USE_COUNT
;
359 ioc
->ioc_regs
.ioc_fail_sync
= rb
+ CT2_BFA_IOC_FAIL_SYNC
;
364 ioc
->ioc_regs
.smem_page_start
= rb
+ PSS_SMEM_PAGE_START
;
365 ioc
->ioc_regs
.smem_pg0
= BFI_IOC_SMEM_PG0_CT
;
368 * err set reg : for notification of hb failure in fcmode
370 ioc
->ioc_regs
.err_set
= rb
+ ERR_SET_REG
;
373 /* Initialize IOC to port mapping. */
375 #define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8)
377 bfa_ioc_ct_map_port(struct bfa_ioc
*ioc
)
379 void __iomem
*rb
= ioc
->pcidev
.pci_bar_kva
;
383 * For catapult, base port id on personality register and IOC type
385 r32
= readl(rb
+ FNC_PERS_REG
);
386 r32
>>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc
));
387 ioc
->port_id
= (r32
& __F0_PORT_MAP_MK
) >> __F0_PORT_MAP_SH
;
392 bfa_ioc_ct2_map_port(struct bfa_ioc
*ioc
)
394 void __iomem
*rb
= ioc
->pcidev
.pci_bar_kva
;
397 r32
= readl(rb
+ CT2_HOSTFN_PERSONALITY0
);
398 ioc
->port_id
= ((r32
& __FC_LL_PORT_MAP__MK
) >> __FC_LL_PORT_MAP__SH
);
401 /* Set interrupt mode for a function: INTX or MSIX */
403 bfa_ioc_ct_isr_mode_set(struct bfa_ioc
*ioc
, bool msix
)
405 void __iomem
*rb
= ioc
->pcidev
.pci_bar_kva
;
408 r32
= readl(rb
+ FNC_PERS_REG
);
410 mode
= (r32
>> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc
))) &
414 * If already in desired mode, do not change anything
416 if ((!msix
&& mode
) || (msix
&& !mode
))
420 mode
= __F0_INTX_STATUS_MSIX
;
422 mode
= __F0_INTX_STATUS_INTA
;
424 r32
&= ~(__F0_INTX_STATUS
<< FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc
)));
425 r32
|= (mode
<< FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc
)));
427 writel(r32
, rb
+ FNC_PERS_REG
);
431 bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc
*ioc
)
435 r32
= readl(ioc
->ioc_regs
.lpu_read_stat
);
437 writel(1, ioc
->ioc_regs
.lpu_read_stat
);
444 /* MSI-X resource allocation for 1860 with no asic block */
445 #define HOSTFN_MSIX_DEFAULT 64
446 #define HOSTFN_MSIX_VT_INDEX_MBOX_ERR 0x30138
447 #define HOSTFN_MSIX_VT_OFST_NUMVT 0x3013c
448 #define __MSIX_VT_NUMVT__MK 0x003ff800
449 #define __MSIX_VT_NUMVT__SH 11
450 #define __MSIX_VT_NUMVT_(_v) ((_v) << __MSIX_VT_NUMVT__SH)
451 #define __MSIX_VT_OFST_ 0x000007ff
453 bfa_nw_ioc_ct2_poweron(struct bfa_ioc
*ioc
)
455 void __iomem
*rb
= ioc
->pcidev
.pci_bar_kva
;
458 r32
= readl(rb
+ HOSTFN_MSIX_VT_OFST_NUMVT
);
459 if (r32
& __MSIX_VT_NUMVT__MK
) {
460 writel(r32
& __MSIX_VT_OFST_
,
461 rb
+ HOSTFN_MSIX_VT_INDEX_MBOX_ERR
);
465 writel(__MSIX_VT_NUMVT_(HOSTFN_MSIX_DEFAULT
- 1) |
466 HOSTFN_MSIX_DEFAULT
* bfa_ioc_pcifn(ioc
),
467 rb
+ HOSTFN_MSIX_VT_OFST_NUMVT
);
468 writel(HOSTFN_MSIX_DEFAULT
* bfa_ioc_pcifn(ioc
),
469 rb
+ HOSTFN_MSIX_VT_INDEX_MBOX_ERR
);
472 /* Cleanup hw semaphore and usecnt registers */
474 bfa_ioc_ct_ownership_reset(struct bfa_ioc
*ioc
)
476 bfa_nw_ioc_sem_get(ioc
->ioc_regs
.ioc_usage_sem_reg
);
477 writel(0, ioc
->ioc_regs
.ioc_usage_reg
);
478 bfa_nw_ioc_sem_release(ioc
->ioc_regs
.ioc_usage_sem_reg
);
481 * Read the hw sem reg to make sure that it is locked
482 * before we clear it. If it is not locked, writing 1
483 * will lock it instead of clearing it.
485 readl(ioc
->ioc_regs
.ioc_sem_reg
);
486 bfa_nw_ioc_hw_sem_release(ioc
);
489 /* Synchronized IOC failure processing routines */
491 bfa_ioc_ct_sync_start(struct bfa_ioc
*ioc
)
493 u32 r32
= readl(ioc
->ioc_regs
.ioc_fail_sync
);
494 u32 sync_reqd
= bfa_ioc_ct_get_sync_reqd(r32
);
497 * Driver load time. If the sync required bit for this PCI fn
498 * is set, it is due to an unclean exit by the driver for this
499 * PCI fn in the previous incarnation. Whoever comes here first
500 * should clean it up, no matter which PCI fn.
503 if (sync_reqd
& bfa_ioc_ct_sync_pos(ioc
)) {
504 writel(0, ioc
->ioc_regs
.ioc_fail_sync
);
505 writel(1, ioc
->ioc_regs
.ioc_usage_reg
);
506 writel(BFI_IOC_UNINIT
, ioc
->ioc_regs
.ioc_fwstate
);
507 writel(BFI_IOC_UNINIT
, ioc
->ioc_regs
.alt_ioc_fwstate
);
511 return bfa_ioc_ct_sync_complete(ioc
);
513 /* Synchronized IOC failure processing routines */
515 bfa_ioc_ct_sync_join(struct bfa_ioc
*ioc
)
517 u32 r32
= readl(ioc
->ioc_regs
.ioc_fail_sync
);
518 u32 sync_pos
= bfa_ioc_ct_sync_reqd_pos(ioc
);
520 writel((r32
| sync_pos
), ioc
->ioc_regs
.ioc_fail_sync
);
524 bfa_ioc_ct_sync_leave(struct bfa_ioc
*ioc
)
526 u32 r32
= readl(ioc
->ioc_regs
.ioc_fail_sync
);
527 u32 sync_msk
= bfa_ioc_ct_sync_reqd_pos(ioc
) |
528 bfa_ioc_ct_sync_pos(ioc
);
530 writel((r32
& ~sync_msk
), ioc
->ioc_regs
.ioc_fail_sync
);
534 bfa_ioc_ct_sync_ack(struct bfa_ioc
*ioc
)
536 u32 r32
= readl(ioc
->ioc_regs
.ioc_fail_sync
);
538 writel((r32
| bfa_ioc_ct_sync_pos(ioc
)), ioc
->ioc_regs
.ioc_fail_sync
);
542 bfa_ioc_ct_sync_complete(struct bfa_ioc
*ioc
)
544 u32 r32
= readl(ioc
->ioc_regs
.ioc_fail_sync
);
545 u32 sync_reqd
= bfa_ioc_ct_get_sync_reqd(r32
);
546 u32 sync_ackd
= bfa_ioc_ct_get_sync_ackd(r32
);
553 * The check below is to see whether any other PCI fn
554 * has reinitialized the ASIC (reset sync_ackd bits)
555 * and failed again while this IOC was waiting for hw
556 * semaphore (in bfa_iocpf_sm_semwait()).
558 tmp_ackd
= sync_ackd
;
559 if ((sync_reqd
& bfa_ioc_ct_sync_pos(ioc
)) &&
560 !(sync_ackd
& bfa_ioc_ct_sync_pos(ioc
)))
561 sync_ackd
|= bfa_ioc_ct_sync_pos(ioc
);
563 if (sync_reqd
== sync_ackd
) {
564 writel(bfa_ioc_ct_clear_sync_ackd(r32
),
565 ioc
->ioc_regs
.ioc_fail_sync
);
566 writel(BFI_IOC_FAIL
, ioc
->ioc_regs
.ioc_fwstate
);
567 writel(BFI_IOC_FAIL
, ioc
->ioc_regs
.alt_ioc_fwstate
);
572 * If another PCI fn reinitialized and failed again while
573 * this IOC was waiting for hw sem, the sync_ackd bit for
574 * this IOC need to be set again to allow reinitialization.
576 if (tmp_ackd
!= sync_ackd
)
577 writel((r32
| sync_ackd
), ioc
->ioc_regs
.ioc_fail_sync
);
583 bfa_ioc_ct_set_cur_ioc_fwstate(struct bfa_ioc
*ioc
,
584 enum bfi_ioc_state fwstate
)
586 writel(fwstate
, ioc
->ioc_regs
.ioc_fwstate
);
589 static enum bfi_ioc_state
590 bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc
*ioc
)
592 return (enum bfi_ioc_state
)readl(ioc
->ioc_regs
.ioc_fwstate
);
596 bfa_ioc_ct_set_alt_ioc_fwstate(struct bfa_ioc
*ioc
,
597 enum bfi_ioc_state fwstate
)
599 writel(fwstate
, ioc
->ioc_regs
.alt_ioc_fwstate
);
602 static enum bfi_ioc_state
603 bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc
*ioc
)
605 return (enum bfi_ioc_state
)readl(ioc
->ioc_regs
.alt_ioc_fwstate
);
608 static enum bfa_status
609 bfa_ioc_ct_pll_init(void __iomem
*rb
, enum bfi_asic_mode asic_mode
)
611 u32 pll_sclk
, pll_fclk
, r32
;
612 bool fcmode
= (asic_mode
== BFI_ASIC_MODE_FC
);
614 pll_sclk
= __APP_PLL_SCLK_LRESETN
| __APP_PLL_SCLK_ENARST
|
615 __APP_PLL_SCLK_RSEL200500
| __APP_PLL_SCLK_P0_1(3U) |
616 __APP_PLL_SCLK_JITLMT0_1(3U) |
617 __APP_PLL_SCLK_CNTLMT0_1(1U);
618 pll_fclk
= __APP_PLL_LCLK_LRESETN
| __APP_PLL_LCLK_ENARST
|
619 __APP_PLL_LCLK_RSEL200500
| __APP_PLL_LCLK_P0_1(3U) |
620 __APP_PLL_LCLK_JITLMT0_1(3U) |
621 __APP_PLL_LCLK_CNTLMT0_1(1U);
624 writel(0, (rb
+ OP_MODE
));
625 writel(__APP_EMS_CMLCKSEL
|
626 __APP_EMS_REFCKBUFEN2
|
627 __APP_EMS_CHANNEL_SEL
,
628 (rb
+ ETH_MAC_SER_REG
));
630 writel(__GLOBAL_FCOE_MODE
, (rb
+ OP_MODE
));
631 writel(__APP_EMS_REFCKBUFEN1
,
632 (rb
+ ETH_MAC_SER_REG
));
634 writel(BFI_IOC_UNINIT
, (rb
+ BFA_IOC0_STATE_REG
));
635 writel(BFI_IOC_UNINIT
, (rb
+ BFA_IOC1_STATE_REG
));
636 writel(0xffffffffU
, (rb
+ HOSTFN0_INT_MSK
));
637 writel(0xffffffffU
, (rb
+ HOSTFN1_INT_MSK
));
638 writel(0xffffffffU
, (rb
+ HOSTFN0_INT_STATUS
));
639 writel(0xffffffffU
, (rb
+ HOSTFN1_INT_STATUS
));
640 writel(0xffffffffU
, (rb
+ HOSTFN0_INT_MSK
));
641 writel(0xffffffffU
, (rb
+ HOSTFN1_INT_MSK
));
643 __APP_PLL_SCLK_LOGIC_SOFT_RESET
,
644 rb
+ APP_PLL_SCLK_CTL_REG
);
646 __APP_PLL_LCLK_LOGIC_SOFT_RESET
,
647 rb
+ APP_PLL_LCLK_CTL_REG
);
649 __APP_PLL_SCLK_LOGIC_SOFT_RESET
| __APP_PLL_SCLK_ENABLE
,
650 rb
+ APP_PLL_SCLK_CTL_REG
);
652 __APP_PLL_LCLK_LOGIC_SOFT_RESET
| __APP_PLL_LCLK_ENABLE
,
653 rb
+ APP_PLL_LCLK_CTL_REG
);
654 readl(rb
+ HOSTFN0_INT_MSK
);
656 writel(0xffffffffU
, (rb
+ HOSTFN0_INT_STATUS
));
657 writel(0xffffffffU
, (rb
+ HOSTFN1_INT_STATUS
));
659 __APP_PLL_SCLK_ENABLE
,
660 rb
+ APP_PLL_SCLK_CTL_REG
);
662 __APP_PLL_LCLK_ENABLE
,
663 rb
+ APP_PLL_LCLK_CTL_REG
);
666 writel(__PMM_1T_RESET_P
, (rb
+ PMM_1T_RESET_REG_P0
));
667 writel(__PMM_1T_RESET_P
, (rb
+ PMM_1T_RESET_REG_P1
));
669 r32
= readl((rb
+ PSS_CTL_REG
));
670 r32
&= ~__PSS_LMEM_RESET
;
671 writel(r32
, (rb
+ PSS_CTL_REG
));
674 writel(0, (rb
+ PMM_1T_RESET_REG_P0
));
675 writel(0, (rb
+ PMM_1T_RESET_REG_P1
));
678 writel(__EDRAM_BISTR_START
, (rb
+ MBIST_CTL_REG
));
680 r32
= readl((rb
+ MBIST_STAT_REG
));
681 writel(0, (rb
+ MBIST_CTL_REG
));
682 return BFA_STATUS_OK
;
686 bfa_ioc_ct2_sclk_init(void __iomem
*rb
)
691 * put s_clk PLL and PLL FSM in reset
693 r32
= readl((rb
+ CT2_APP_PLL_SCLK_CTL_REG
));
694 r32
&= ~(__APP_PLL_SCLK_ENABLE
| __APP_PLL_SCLK_LRESETN
);
695 r32
|= (__APP_PLL_SCLK_ENARST
| __APP_PLL_SCLK_BYPASS
|
696 __APP_PLL_SCLK_LOGIC_SOFT_RESET
);
697 writel(r32
, (rb
+ CT2_APP_PLL_SCLK_CTL_REG
));
700 * Ignore mode and program for the max clock (which is FC16)
701 * Firmware/NFC will do the PLL init appropiately
703 r32
= readl((rb
+ CT2_APP_PLL_SCLK_CTL_REG
));
704 r32
&= ~(__APP_PLL_SCLK_REFCLK_SEL
| __APP_PLL_SCLK_CLK_DIV2
);
705 writel(r32
, (rb
+ CT2_APP_PLL_SCLK_CTL_REG
));
708 * while doing PLL init dont clock gate ethernet subsystem
710 r32
= readl((rb
+ CT2_CHIP_MISC_PRG
));
711 writel((r32
| __ETH_CLK_ENABLE_PORT0
),
712 (rb
+ CT2_CHIP_MISC_PRG
));
714 r32
= readl((rb
+ CT2_PCIE_MISC_REG
));
715 writel((r32
| __ETH_CLK_ENABLE_PORT1
),
716 (rb
+ CT2_PCIE_MISC_REG
));
721 r32
= readl((rb
+ CT2_APP_PLL_SCLK_CTL_REG
));
722 r32
&= (__P_SCLK_PLL_LOCK
| __APP_PLL_SCLK_REFCLK_SEL
|
723 __APP_PLL_SCLK_CLK_DIV2
);
724 writel(r32
| 0x1061731b, (rb
+ CT2_APP_PLL_SCLK_CTL_REG
));
727 * poll for s_clk lock or delay 1ms
732 * Dont do clock gating for ethernet subsystem, firmware/NFC will
733 * do this appropriately
738 bfa_ioc_ct2_lclk_init(void __iomem
*rb
)
743 * put l_clk PLL and PLL FSM in reset
745 r32
= readl((rb
+ CT2_APP_PLL_LCLK_CTL_REG
));
746 r32
&= ~(__APP_PLL_LCLK_ENABLE
| __APP_PLL_LCLK_LRESETN
);
747 r32
|= (__APP_PLL_LCLK_ENARST
| __APP_PLL_LCLK_BYPASS
|
748 __APP_PLL_LCLK_LOGIC_SOFT_RESET
);
749 writel(r32
, (rb
+ CT2_APP_PLL_LCLK_CTL_REG
));
752 * set LPU speed (set for FC16 which will work for other modes)
754 r32
= readl((rb
+ CT2_CHIP_MISC_PRG
));
755 writel(r32
, (rb
+ CT2_CHIP_MISC_PRG
));
758 * set LPU half speed (set for FC16 which will work for other modes)
760 r32
= readl((rb
+ CT2_APP_PLL_LCLK_CTL_REG
));
761 writel(r32
, (rb
+ CT2_APP_PLL_LCLK_CTL_REG
));
764 * set lclk for mode (set for FC16)
766 r32
= readl((rb
+ CT2_APP_PLL_LCLK_CTL_REG
));
767 r32
&= (__P_LCLK_PLL_LOCK
| __APP_LPUCLK_HALFSPEED
);
769 writel(r32
, (rb
+ CT2_APP_PLL_LCLK_CTL_REG
));
772 * poll for s_clk lock or delay 1ms
778 bfa_ioc_ct2_mem_init(void __iomem
*rb
)
782 r32
= readl((rb
+ PSS_CTL_REG
));
783 r32
&= ~__PSS_LMEM_RESET
;
784 writel(r32
, (rb
+ PSS_CTL_REG
));
787 writel(__EDRAM_BISTR_START
, (rb
+ CT2_MBIST_CTL_REG
));
789 writel(0, (rb
+ CT2_MBIST_CTL_REG
));
793 bfa_ioc_ct2_mac_reset(void __iomem
*rb
)
797 bfa_ioc_ct2_sclk_init(rb
);
798 bfa_ioc_ct2_lclk_init(rb
);
801 * release soft reset on s_clk & l_clk
803 r32
= readl((rb
+ CT2_APP_PLL_SCLK_CTL_REG
));
804 writel((r32
& ~__APP_PLL_SCLK_LOGIC_SOFT_RESET
),
805 (rb
+ CT2_APP_PLL_SCLK_CTL_REG
));
808 * release soft reset on s_clk & l_clk
810 r32
= readl((rb
+ CT2_APP_PLL_LCLK_CTL_REG
));
811 writel((r32
& ~__APP_PLL_LCLK_LOGIC_SOFT_RESET
),
812 (rb
+ CT2_APP_PLL_LCLK_CTL_REG
));
814 /* put port0, port1 MAC & AHB in reset */
815 writel((__CSI_MAC_RESET
| __CSI_MAC_AHB_RESET
),
816 (rb
+ CT2_CSI_MAC_CONTROL_REG(0)));
817 writel((__CSI_MAC_RESET
| __CSI_MAC_AHB_RESET
),
818 (rb
+ CT2_CSI_MAC_CONTROL_REG(1)));
821 #define CT2_NFC_MAX_DELAY 1000
822 #define CT2_NFC_VER_VALID 0x143
823 #define BFA_IOC_PLL_POLL 1000000
826 bfa_ioc_ct2_nfc_halted(void __iomem
*rb
)
830 r32
= readl(rb
+ CT2_NFC_CSR_SET_REG
);
831 if (r32
& __NFC_CONTROLLER_HALTED
)
838 bfa_ioc_ct2_nfc_resume(void __iomem
*rb
)
843 writel(__HALT_NFC_CONTROLLER
, rb
+ CT2_NFC_CSR_CLR_REG
);
844 for (i
= 0; i
< CT2_NFC_MAX_DELAY
; i
++) {
845 r32
= readl(rb
+ CT2_NFC_CSR_SET_REG
);
846 if (!(r32
& __NFC_CONTROLLER_HALTED
))
853 static enum bfa_status
854 bfa_ioc_ct2_pll_init(void __iomem
*rb
, enum bfi_asic_mode asic_mode
)
856 volatile u32 wgn
, r32
;
859 wgn
= readl(rb
+ CT2_WGN_STATUS
);
861 nfc_ver
= readl(rb
+ CT2_RSC_GPR15_REG
);
863 if ((wgn
== (__A2T_AHB_LOAD
| __WGN_READY
)) &&
864 (nfc_ver
>= CT2_NFC_VER_VALID
)) {
865 if (bfa_ioc_ct2_nfc_halted(rb
))
866 bfa_ioc_ct2_nfc_resume(rb
);
867 writel(__RESET_AND_START_SCLK_LCLK_PLLS
,
868 rb
+ CT2_CSI_FW_CTL_SET_REG
);
870 for (i
= 0; i
< BFA_IOC_PLL_POLL
; i
++) {
871 r32
= readl(rb
+ CT2_APP_PLL_LCLK_CTL_REG
);
872 if (r32
& __RESET_AND_START_SCLK_LCLK_PLLS
)
875 BUG_ON(!(r32
& __RESET_AND_START_SCLK_LCLK_PLLS
));
877 for (i
= 0; i
< BFA_IOC_PLL_POLL
; i
++) {
878 r32
= readl(rb
+ CT2_APP_PLL_LCLK_CTL_REG
);
879 if (!(r32
& __RESET_AND_START_SCLK_LCLK_PLLS
))
882 BUG_ON(r32
& __RESET_AND_START_SCLK_LCLK_PLLS
);
885 r32
= readl(rb
+ CT2_CSI_FW_CTL_REG
);
886 BUG_ON(r32
& __RESET_AND_START_SCLK_LCLK_PLLS
);
888 writel(__HALT_NFC_CONTROLLER
, (rb
+ CT2_NFC_CSR_SET_REG
));
889 for (i
= 0; i
< CT2_NFC_MAX_DELAY
; i
++) {
890 r32
= readl(rb
+ CT2_NFC_CSR_SET_REG
);
891 if (r32
& __NFC_CONTROLLER_HALTED
)
896 bfa_ioc_ct2_mac_reset(rb
);
897 bfa_ioc_ct2_sclk_init(rb
);
898 bfa_ioc_ct2_lclk_init(rb
);
900 /* release soft reset on s_clk & l_clk */
901 r32
= readl((rb
+ CT2_APP_PLL_SCLK_CTL_REG
));
902 writel(r32
& ~__APP_PLL_SCLK_LOGIC_SOFT_RESET
,
903 rb
+ CT2_APP_PLL_SCLK_CTL_REG
);
904 r32
= readl((rb
+ CT2_APP_PLL_LCLK_CTL_REG
));
905 writel(r32
& ~__APP_PLL_LCLK_LOGIC_SOFT_RESET
,
906 rb
+ CT2_APP_PLL_LCLK_CTL_REG
);
909 /* Announce flash device presence, if flash was corrupted. */
910 if (wgn
== (__WGN_READY
| __GLBL_PF_VF_CFG_RDY
)) {
911 r32
= readl((rb
+ PSS_GPIO_OUT_REG
));
912 writel(r32
& ~1, rb
+ PSS_GPIO_OUT_REG
);
913 r32
= readl((rb
+ PSS_GPIO_OE_REG
));
914 writel(r32
| 1, rb
+ PSS_GPIO_OE_REG
);
918 * Mask the interrupts and clear any
919 * pending interrupts left by BIOS/EFI
921 writel(1, (rb
+ CT2_LPU0_HOSTFN_MBOX0_MSK
));
922 writel(1, (rb
+ CT2_LPU1_HOSTFN_MBOX0_MSK
));
924 /* For first time initialization, no need to clear interrupts */
925 r32
= readl(rb
+ HOST_SEM5_REG
);
927 r32
= readl((rb
+ CT2_LPU0_HOSTFN_CMD_STAT
));
929 writel(1, (rb
+ CT2_LPU0_HOSTFN_CMD_STAT
));
930 readl((rb
+ CT2_LPU0_HOSTFN_CMD_STAT
));
932 r32
= readl((rb
+ CT2_LPU1_HOSTFN_CMD_STAT
));
934 writel(1, (rb
+ CT2_LPU1_HOSTFN_CMD_STAT
));
935 readl((rb
+ CT2_LPU1_HOSTFN_CMD_STAT
));
939 bfa_ioc_ct2_mem_init(rb
);
941 writel(BFI_IOC_UNINIT
, (rb
+ CT2_BFA_IOC0_STATE_REG
));
942 writel(BFI_IOC_UNINIT
, (rb
+ CT2_BFA_IOC1_STATE_REG
));
943 return BFA_STATUS_OK
;