2 * Linux network driver for Brocade Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
22 #include "bfi_ctreg.h"
25 #define bfa_ioc_ct_sync_pos(__ioc) \
26 ((u32) (1 << bfa_ioc_pcifn(__ioc)))
27 #define BFA_IOC_SYNC_REQD_SH 16
28 #define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff)
29 #define bfa_ioc_ct_clear_sync_ackd(__val) (__val & 0xffff0000)
30 #define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH)
31 #define bfa_ioc_ct_sync_reqd_pos(__ioc) \
32 (bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH)
35 * forward declarations
37 static bool bfa_ioc_ct_firmware_lock(struct bfa_ioc
*ioc
);
38 static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc
*ioc
);
39 static void bfa_ioc_ct_reg_init(struct bfa_ioc
*ioc
);
40 static void bfa_ioc_ct_map_port(struct bfa_ioc
*ioc
);
41 static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc
*ioc
, bool msix
);
42 static void bfa_ioc_ct_notify_fail(struct bfa_ioc
*ioc
);
43 static void bfa_ioc_ct_ownership_reset(struct bfa_ioc
*ioc
);
44 static bool bfa_ioc_ct_sync_start(struct bfa_ioc
*ioc
);
45 static void bfa_ioc_ct_sync_join(struct bfa_ioc
*ioc
);
46 static void bfa_ioc_ct_sync_leave(struct bfa_ioc
*ioc
);
47 static void bfa_ioc_ct_sync_ack(struct bfa_ioc
*ioc
);
48 static bool bfa_ioc_ct_sync_complete(struct bfa_ioc
*ioc
);
49 static enum bfa_status
bfa_ioc_ct_pll_init(void __iomem
*rb
, bool fcmode
);
51 static struct bfa_ioc_hwif nw_hwif_ct
;
54 * Called from bfa_ioc_attach() to map asic specific calls.
57 bfa_nw_ioc_set_ct_hwif(struct bfa_ioc
*ioc
)
59 nw_hwif_ct
.ioc_pll_init
= bfa_ioc_ct_pll_init
;
60 nw_hwif_ct
.ioc_firmware_lock
= bfa_ioc_ct_firmware_lock
;
61 nw_hwif_ct
.ioc_firmware_unlock
= bfa_ioc_ct_firmware_unlock
;
62 nw_hwif_ct
.ioc_reg_init
= bfa_ioc_ct_reg_init
;
63 nw_hwif_ct
.ioc_map_port
= bfa_ioc_ct_map_port
;
64 nw_hwif_ct
.ioc_isr_mode_set
= bfa_ioc_ct_isr_mode_set
;
65 nw_hwif_ct
.ioc_notify_fail
= bfa_ioc_ct_notify_fail
;
66 nw_hwif_ct
.ioc_ownership_reset
= bfa_ioc_ct_ownership_reset
;
67 nw_hwif_ct
.ioc_sync_start
= bfa_ioc_ct_sync_start
;
68 nw_hwif_ct
.ioc_sync_join
= bfa_ioc_ct_sync_join
;
69 nw_hwif_ct
.ioc_sync_leave
= bfa_ioc_ct_sync_leave
;
70 nw_hwif_ct
.ioc_sync_ack
= bfa_ioc_ct_sync_ack
;
71 nw_hwif_ct
.ioc_sync_complete
= bfa_ioc_ct_sync_complete
;
73 ioc
->ioc_hwif
= &nw_hwif_ct
;
77 * Return true if firmware of current driver matches the running firmware.
80 bfa_ioc_ct_firmware_lock(struct bfa_ioc
*ioc
)
82 enum bfi_ioc_state ioc_fwstate
;
84 struct bfi_ioc_image_hdr fwhdr
;
87 * Firmware match check is relevant only for CNA.
93 * If bios boot (flash based) -- do not increment usage count
95 if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc
)) <
99 bfa_nw_ioc_sem_get(ioc
->ioc_regs
.ioc_usage_sem_reg
);
100 usecnt
= readl(ioc
->ioc_regs
.ioc_usage_reg
);
103 * If usage count is 0, always return TRUE.
106 writel(1, ioc
->ioc_regs
.ioc_usage_reg
);
107 bfa_nw_ioc_sem_release(ioc
->ioc_regs
.ioc_usage_sem_reg
);
108 writel(0, ioc
->ioc_regs
.ioc_fail_sync
);
112 ioc_fwstate
= readl(ioc
->ioc_regs
.ioc_fwstate
);
115 * Use count cannot be non-zero and chip in uninitialized state.
117 BUG_ON(!(ioc_fwstate
!= BFI_IOC_UNINIT
));
120 * Check if another driver with a different firmware is active
122 bfa_nw_ioc_fwver_get(ioc
, &fwhdr
);
123 if (!bfa_nw_ioc_fwver_cmp(ioc
, &fwhdr
)) {
124 bfa_nw_ioc_sem_release(ioc
->ioc_regs
.ioc_usage_sem_reg
);
129 * Same firmware version. Increment the reference count.
132 writel(usecnt
, ioc
->ioc_regs
.ioc_usage_reg
);
133 bfa_nw_ioc_sem_release(ioc
->ioc_regs
.ioc_usage_sem_reg
);
138 bfa_ioc_ct_firmware_unlock(struct bfa_ioc
*ioc
)
143 * Firmware lock is relevant only for CNA.
149 * If bios boot (flash based) -- do not decrement usage count
151 if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc
)) <
156 * decrement usage count
158 bfa_nw_ioc_sem_get(ioc
->ioc_regs
.ioc_usage_sem_reg
);
159 usecnt
= readl(ioc
->ioc_regs
.ioc_usage_reg
);
160 BUG_ON(!(usecnt
> 0));
163 writel(usecnt
, ioc
->ioc_regs
.ioc_usage_reg
);
165 bfa_nw_ioc_sem_release(ioc
->ioc_regs
.ioc_usage_sem_reg
);
169 * Notify other functions on HB failure.
172 bfa_ioc_ct_notify_fail(struct bfa_ioc
*ioc
)
175 writel(__FW_INIT_HALT_P
, ioc
->ioc_regs
.ll_halt
);
176 writel(__FW_INIT_HALT_P
, ioc
->ioc_regs
.alt_ll_halt
);
177 /* Wait for halt to take effect */
178 readl(ioc
->ioc_regs
.ll_halt
);
179 readl(ioc
->ioc_regs
.alt_ll_halt
);
181 writel(__PSS_ERR_STATUS_SET
, ioc
->ioc_regs
.err_set
);
182 readl(ioc
->ioc_regs
.err_set
);
187 * Host to LPU mailbox message addresses
189 static struct { u32 hfn_mbox
, lpu_mbox
, hfn_pgn
; } iocreg_fnreg
[] = {
190 { HOSTFN0_LPU_MBOX0_0
, LPU_HOSTFN0_MBOX0_0
, HOST_PAGE_NUM_FN0
},
191 { HOSTFN1_LPU_MBOX0_8
, LPU_HOSTFN1_MBOX0_8
, HOST_PAGE_NUM_FN1
},
192 { HOSTFN2_LPU_MBOX0_0
, LPU_HOSTFN2_MBOX0_0
, HOST_PAGE_NUM_FN2
},
193 { HOSTFN3_LPU_MBOX0_8
, LPU_HOSTFN3_MBOX0_8
, HOST_PAGE_NUM_FN3
}
197 * Host <-> LPU mailbox command/status registers - port 0
199 static struct { u32 hfn
, lpu
; } iocreg_mbcmd_p0
[] = {
200 { HOSTFN0_LPU0_MBOX0_CMD_STAT
, LPU0_HOSTFN0_MBOX0_CMD_STAT
},
201 { HOSTFN1_LPU0_MBOX0_CMD_STAT
, LPU0_HOSTFN1_MBOX0_CMD_STAT
},
202 { HOSTFN2_LPU0_MBOX0_CMD_STAT
, LPU0_HOSTFN2_MBOX0_CMD_STAT
},
203 { HOSTFN3_LPU0_MBOX0_CMD_STAT
, LPU0_HOSTFN3_MBOX0_CMD_STAT
}
207 * Host <-> LPU mailbox command/status registers - port 1
209 static struct { u32 hfn
, lpu
; } iocreg_mbcmd_p1
[] = {
210 { HOSTFN0_LPU1_MBOX0_CMD_STAT
, LPU1_HOSTFN0_MBOX0_CMD_STAT
},
211 { HOSTFN1_LPU1_MBOX0_CMD_STAT
, LPU1_HOSTFN1_MBOX0_CMD_STAT
},
212 { HOSTFN2_LPU1_MBOX0_CMD_STAT
, LPU1_HOSTFN2_MBOX0_CMD_STAT
},
213 { HOSTFN3_LPU1_MBOX0_CMD_STAT
, LPU1_HOSTFN3_MBOX0_CMD_STAT
}
217 bfa_ioc_ct_reg_init(struct bfa_ioc
*ioc
)
220 int pcifn
= bfa_ioc_pcifn(ioc
);
222 rb
= bfa_ioc_bar0(ioc
);
224 ioc
->ioc_regs
.hfn_mbox
= rb
+ iocreg_fnreg
[pcifn
].hfn_mbox
;
225 ioc
->ioc_regs
.lpu_mbox
= rb
+ iocreg_fnreg
[pcifn
].lpu_mbox
;
226 ioc
->ioc_regs
.host_page_num_fn
= rb
+ iocreg_fnreg
[pcifn
].hfn_pgn
;
228 if (ioc
->port_id
== 0) {
229 ioc
->ioc_regs
.heartbeat
= rb
+ BFA_IOC0_HBEAT_REG
;
230 ioc
->ioc_regs
.ioc_fwstate
= rb
+ BFA_IOC0_STATE_REG
;
231 ioc
->ioc_regs
.alt_ioc_fwstate
= rb
+ BFA_IOC1_STATE_REG
;
232 ioc
->ioc_regs
.hfn_mbox_cmd
= rb
+ iocreg_mbcmd_p0
[pcifn
].hfn
;
233 ioc
->ioc_regs
.lpu_mbox_cmd
= rb
+ iocreg_mbcmd_p0
[pcifn
].lpu
;
234 ioc
->ioc_regs
.ll_halt
= rb
+ FW_INIT_HALT_P0
;
235 ioc
->ioc_regs
.alt_ll_halt
= rb
+ FW_INIT_HALT_P1
;
237 ioc
->ioc_regs
.heartbeat
= (rb
+ BFA_IOC1_HBEAT_REG
);
238 ioc
->ioc_regs
.ioc_fwstate
= (rb
+ BFA_IOC1_STATE_REG
);
239 ioc
->ioc_regs
.alt_ioc_fwstate
= rb
+ BFA_IOC0_STATE_REG
;
240 ioc
->ioc_regs
.hfn_mbox_cmd
= rb
+ iocreg_mbcmd_p1
[pcifn
].hfn
;
241 ioc
->ioc_regs
.lpu_mbox_cmd
= rb
+ iocreg_mbcmd_p1
[pcifn
].lpu
;
242 ioc
->ioc_regs
.ll_halt
= rb
+ FW_INIT_HALT_P1
;
243 ioc
->ioc_regs
.alt_ll_halt
= rb
+ FW_INIT_HALT_P0
;
247 * PSS control registers
249 ioc
->ioc_regs
.pss_ctl_reg
= (rb
+ PSS_CTL_REG
);
250 ioc
->ioc_regs
.pss_err_status_reg
= (rb
+ PSS_ERR_STATUS_REG
);
251 ioc
->ioc_regs
.app_pll_fast_ctl_reg
= (rb
+ APP_PLL_425_CTL_REG
);
252 ioc
->ioc_regs
.app_pll_slow_ctl_reg
= (rb
+ APP_PLL_312_CTL_REG
);
255 * IOC semaphore registers and serialization
257 ioc
->ioc_regs
.ioc_sem_reg
= (rb
+ HOST_SEM0_REG
);
258 ioc
->ioc_regs
.ioc_usage_sem_reg
= (rb
+ HOST_SEM1_REG
);
259 ioc
->ioc_regs
.ioc_init_sem_reg
= (rb
+ HOST_SEM2_REG
);
260 ioc
->ioc_regs
.ioc_usage_reg
= (rb
+ BFA_FW_USE_COUNT
);
261 ioc
->ioc_regs
.ioc_fail_sync
= (rb
+ BFA_IOC_FAIL_SYNC
);
266 ioc
->ioc_regs
.smem_page_start
= (rb
+ PSS_SMEM_PAGE_START
);
267 ioc
->ioc_regs
.smem_pg0
= BFI_IOC_SMEM_PG0_CT
;
270 * err set reg : for notification of hb failure in fcmode
272 ioc
->ioc_regs
.err_set
= (rb
+ ERR_SET_REG
);
276 * Initialize IOC to port mapping.
279 #define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8)
281 bfa_ioc_ct_map_port(struct bfa_ioc
*ioc
)
283 void __iomem
*rb
= ioc
->pcidev
.pci_bar_kva
;
287 * For catapult, base port id on personality register and IOC type
289 r32
= readl(rb
+ FNC_PERS_REG
);
290 r32
>>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc
));
291 ioc
->port_id
= (r32
& __F0_PORT_MAP_MK
) >> __F0_PORT_MAP_SH
;
296 * Set interrupt mode for a function: INTX or MSIX
299 bfa_ioc_ct_isr_mode_set(struct bfa_ioc
*ioc
, bool msix
)
301 void __iomem
*rb
= ioc
->pcidev
.pci_bar_kva
;
304 r32
= readl(rb
+ FNC_PERS_REG
);
306 mode
= (r32
>> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc
))) &
310 * If already in desired mode, do not change anything
316 mode
= __F0_INTX_STATUS_MSIX
;
318 mode
= __F0_INTX_STATUS_INTA
;
320 r32
&= ~(__F0_INTX_STATUS
<< FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc
)));
321 r32
|= (mode
<< FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc
)));
323 writel(r32
, rb
+ FNC_PERS_REG
);
327 * Cleanup hw semaphore and usecnt registers
330 bfa_ioc_ct_ownership_reset(struct bfa_ioc
*ioc
)
333 bfa_nw_ioc_sem_get(ioc
->ioc_regs
.ioc_usage_sem_reg
);
334 writel(0, ioc
->ioc_regs
.ioc_usage_reg
);
335 bfa_nw_ioc_sem_release(ioc
->ioc_regs
.ioc_usage_sem_reg
);
339 * Read the hw sem reg to make sure that it is locked
340 * before we clear it. If it is not locked, writing 1
341 * will lock it instead of clearing it.
343 readl(ioc
->ioc_regs
.ioc_sem_reg
);
344 bfa_nw_ioc_hw_sem_release(ioc
);
348 * Synchronized IOC failure processing routines
351 bfa_ioc_ct_sync_start(struct bfa_ioc
*ioc
)
353 u32 r32
= readl(ioc
->ioc_regs
.ioc_fail_sync
);
354 u32 sync_reqd
= bfa_ioc_ct_get_sync_reqd(r32
);
357 * Driver load time. If the sync required bit for this PCI fn
358 * is set, it is due to an unclean exit by the driver for this
359 * PCI fn in the previous incarnation. Whoever comes here first
360 * should clean it up, no matter which PCI fn.
363 if (sync_reqd
& bfa_ioc_ct_sync_pos(ioc
)) {
364 writel(0, ioc
->ioc_regs
.ioc_fail_sync
);
365 writel(1, ioc
->ioc_regs
.ioc_usage_reg
);
366 writel(BFI_IOC_UNINIT
, ioc
->ioc_regs
.ioc_fwstate
);
367 writel(BFI_IOC_UNINIT
, ioc
->ioc_regs
.alt_ioc_fwstate
);
371 return bfa_ioc_ct_sync_complete(ioc
);
374 * Synchronized IOC failure processing routines
377 bfa_ioc_ct_sync_join(struct bfa_ioc
*ioc
)
379 u32 r32
= readl(ioc
->ioc_regs
.ioc_fail_sync
);
380 u32 sync_pos
= bfa_ioc_ct_sync_reqd_pos(ioc
);
382 writel((r32
| sync_pos
), ioc
->ioc_regs
.ioc_fail_sync
);
386 bfa_ioc_ct_sync_leave(struct bfa_ioc
*ioc
)
388 u32 r32
= readl(ioc
->ioc_regs
.ioc_fail_sync
);
389 u32 sync_msk
= bfa_ioc_ct_sync_reqd_pos(ioc
) |
390 bfa_ioc_ct_sync_pos(ioc
);
392 writel((r32
& ~sync_msk
), ioc
->ioc_regs
.ioc_fail_sync
);
396 bfa_ioc_ct_sync_ack(struct bfa_ioc
*ioc
)
398 u32 r32
= readl(ioc
->ioc_regs
.ioc_fail_sync
);
400 writel((r32
| bfa_ioc_ct_sync_pos(ioc
)), ioc
->ioc_regs
.ioc_fail_sync
);
404 bfa_ioc_ct_sync_complete(struct bfa_ioc
*ioc
)
406 u32 r32
= readl(ioc
->ioc_regs
.ioc_fail_sync
);
407 u32 sync_reqd
= bfa_ioc_ct_get_sync_reqd(r32
);
408 u32 sync_ackd
= bfa_ioc_ct_get_sync_ackd(r32
);
415 * The check below is to see whether any other PCI fn
416 * has reinitialized the ASIC (reset sync_ackd bits)
417 * and failed again while this IOC was waiting for hw
418 * semaphore (in bfa_iocpf_sm_semwait()).
420 tmp_ackd
= sync_ackd
;
421 if ((sync_reqd
& bfa_ioc_ct_sync_pos(ioc
)) &&
422 !(sync_ackd
& bfa_ioc_ct_sync_pos(ioc
)))
423 sync_ackd
|= bfa_ioc_ct_sync_pos(ioc
);
425 if (sync_reqd
== sync_ackd
) {
426 writel(bfa_ioc_ct_clear_sync_ackd(r32
),
427 ioc
->ioc_regs
.ioc_fail_sync
);
428 writel(BFI_IOC_FAIL
, ioc
->ioc_regs
.ioc_fwstate
);
429 writel(BFI_IOC_FAIL
, ioc
->ioc_regs
.alt_ioc_fwstate
);
434 * If another PCI fn reinitialized and failed again while
435 * this IOC was waiting for hw sem, the sync_ackd bit for
436 * this IOC need to be set again to allow reinitialization.
438 if (tmp_ackd
!= sync_ackd
)
439 writel((r32
| sync_ackd
), ioc
->ioc_regs
.ioc_fail_sync
);
444 static enum bfa_status
445 bfa_ioc_ct_pll_init(void __iomem
*rb
, bool fcmode
)
447 u32 pll_sclk
, pll_fclk
, r32
;
449 pll_sclk
= __APP_PLL_312_LRESETN
| __APP_PLL_312_ENARST
|
450 __APP_PLL_312_RSEL200500
| __APP_PLL_312_P0_1(3U) |
451 __APP_PLL_312_JITLMT0_1(3U) |
452 __APP_PLL_312_CNTLMT0_1(1U);
453 pll_fclk
= __APP_PLL_425_LRESETN
| __APP_PLL_425_ENARST
|
454 __APP_PLL_425_RSEL200500
| __APP_PLL_425_P0_1(3U) |
455 __APP_PLL_425_JITLMT0_1(3U) |
456 __APP_PLL_425_CNTLMT0_1(1U);
458 writel(0, (rb
+ OP_MODE
));
459 writel(__APP_EMS_CMLCKSEL
|
460 __APP_EMS_REFCKBUFEN2
|
461 __APP_EMS_CHANNEL_SEL
,
462 (rb
+ ETH_MAC_SER_REG
));
464 writel(__GLOBAL_FCOE_MODE
, (rb
+ OP_MODE
));
465 writel(__APP_EMS_REFCKBUFEN1
,
466 (rb
+ ETH_MAC_SER_REG
));
468 writel(BFI_IOC_UNINIT
, (rb
+ BFA_IOC0_STATE_REG
));
469 writel(BFI_IOC_UNINIT
, (rb
+ BFA_IOC1_STATE_REG
));
470 writel(0xffffffffU
, (rb
+ HOSTFN0_INT_MSK
));
471 writel(0xffffffffU
, (rb
+ HOSTFN1_INT_MSK
));
472 writel(0xffffffffU
, (rb
+ HOSTFN0_INT_STATUS
));
473 writel(0xffffffffU
, (rb
+ HOSTFN1_INT_STATUS
));
474 writel(0xffffffffU
, (rb
+ HOSTFN0_INT_MSK
));
475 writel(0xffffffffU
, (rb
+ HOSTFN1_INT_MSK
));
477 __APP_PLL_312_LOGIC_SOFT_RESET
,
478 rb
+ APP_PLL_312_CTL_REG
);
480 __APP_PLL_425_LOGIC_SOFT_RESET
,
481 rb
+ APP_PLL_425_CTL_REG
);
483 __APP_PLL_312_LOGIC_SOFT_RESET
| __APP_PLL_312_ENABLE
,
484 rb
+ APP_PLL_312_CTL_REG
);
486 __APP_PLL_425_LOGIC_SOFT_RESET
| __APP_PLL_425_ENABLE
,
487 rb
+ APP_PLL_425_CTL_REG
);
488 readl(rb
+ HOSTFN0_INT_MSK
);
490 writel(0xffffffffU
, (rb
+ HOSTFN0_INT_STATUS
));
491 writel(0xffffffffU
, (rb
+ HOSTFN1_INT_STATUS
));
493 __APP_PLL_312_ENABLE
,
494 rb
+ APP_PLL_312_CTL_REG
);
496 __APP_PLL_425_ENABLE
,
497 rb
+ APP_PLL_425_CTL_REG
);
499 writel(__PMM_1T_RESET_P
, (rb
+ PMM_1T_RESET_REG_P0
));
500 writel(__PMM_1T_RESET_P
, (rb
+ PMM_1T_RESET_REG_P1
));
502 r32
= readl((rb
+ PSS_CTL_REG
));
503 r32
&= ~__PSS_LMEM_RESET
;
504 writel(r32
, (rb
+ PSS_CTL_REG
));
507 writel(0, (rb
+ PMM_1T_RESET_REG_P0
));
508 writel(0, (rb
+ PMM_1T_RESET_REG_P1
));
511 writel(__EDRAM_BISTR_START
, (rb
+ MBIST_CTL_REG
));
513 r32
= readl((rb
+ MBIST_STAT_REG
));
514 writel(0, (rb
+ MBIST_CTL_REG
));
515 return BFA_STATUS_OK
;