2 * Linux network driver for Brocade Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
25 #define bfa_ioc_ct_sync_pos(__ioc) \
26 ((u32) (1 << bfa_ioc_pcifn(__ioc)))
27 #define BFA_IOC_SYNC_REQD_SH 16
28 #define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff)
29 #define bfa_ioc_ct_clear_sync_ackd(__val) (__val & 0xffff0000)
30 #define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH)
31 #define bfa_ioc_ct_sync_reqd_pos(__ioc) \
32 (bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH)
35 * forward declarations
37 static bool bfa_ioc_ct_firmware_lock(struct bfa_ioc
*ioc
);
38 static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc
*ioc
);
39 static void bfa_ioc_ct_reg_init(struct bfa_ioc
*ioc
);
40 static void bfa_ioc_ct2_reg_init(struct bfa_ioc
*ioc
);
41 static void bfa_ioc_ct_map_port(struct bfa_ioc
*ioc
);
42 static void bfa_ioc_ct2_map_port(struct bfa_ioc
*ioc
);
43 static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc
*ioc
, bool msix
);
44 static void bfa_ioc_ct_notify_fail(struct bfa_ioc
*ioc
);
45 static void bfa_ioc_ct_ownership_reset(struct bfa_ioc
*ioc
);
46 static bool bfa_ioc_ct_sync_start(struct bfa_ioc
*ioc
);
47 static void bfa_ioc_ct_sync_join(struct bfa_ioc
*ioc
);
48 static void bfa_ioc_ct_sync_leave(struct bfa_ioc
*ioc
);
49 static void bfa_ioc_ct_sync_ack(struct bfa_ioc
*ioc
);
50 static bool bfa_ioc_ct_sync_complete(struct bfa_ioc
*ioc
);
51 static enum bfa_status
bfa_ioc_ct_pll_init(void __iomem
*rb
,
52 enum bfi_asic_mode asic_mode
);
53 static enum bfa_status
bfa_ioc_ct2_pll_init(void __iomem
*rb
,
54 enum bfi_asic_mode asic_mode
);
55 static bool bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc
*ioc
);
57 static const struct bfa_ioc_hwif nw_hwif_ct
= {
58 .ioc_pll_init
= bfa_ioc_ct_pll_init
,
59 .ioc_firmware_lock
= bfa_ioc_ct_firmware_lock
,
60 .ioc_firmware_unlock
= bfa_ioc_ct_firmware_unlock
,
61 .ioc_reg_init
= bfa_ioc_ct_reg_init
,
62 .ioc_map_port
= bfa_ioc_ct_map_port
,
63 .ioc_isr_mode_set
= bfa_ioc_ct_isr_mode_set
,
64 .ioc_notify_fail
= bfa_ioc_ct_notify_fail
,
65 .ioc_ownership_reset
= bfa_ioc_ct_ownership_reset
,
66 .ioc_sync_start
= bfa_ioc_ct_sync_start
,
67 .ioc_sync_join
= bfa_ioc_ct_sync_join
,
68 .ioc_sync_leave
= bfa_ioc_ct_sync_leave
,
69 .ioc_sync_ack
= bfa_ioc_ct_sync_ack
,
70 .ioc_sync_complete
= bfa_ioc_ct_sync_complete
,
73 static const struct bfa_ioc_hwif nw_hwif_ct2
= {
74 .ioc_pll_init
= bfa_ioc_ct2_pll_init
,
75 .ioc_firmware_lock
= bfa_ioc_ct_firmware_lock
,
76 .ioc_firmware_unlock
= bfa_ioc_ct_firmware_unlock
,
77 .ioc_reg_init
= bfa_ioc_ct2_reg_init
,
78 .ioc_map_port
= bfa_ioc_ct2_map_port
,
79 .ioc_lpu_read_stat
= bfa_ioc_ct2_lpu_read_stat
,
80 .ioc_isr_mode_set
= NULL
,
81 .ioc_notify_fail
= bfa_ioc_ct_notify_fail
,
82 .ioc_ownership_reset
= bfa_ioc_ct_ownership_reset
,
83 .ioc_sync_start
= bfa_ioc_ct_sync_start
,
84 .ioc_sync_join
= bfa_ioc_ct_sync_join
,
85 .ioc_sync_leave
= bfa_ioc_ct_sync_leave
,
86 .ioc_sync_ack
= bfa_ioc_ct_sync_ack
,
87 .ioc_sync_complete
= bfa_ioc_ct_sync_complete
,
91 * Called from bfa_ioc_attach() to map asic specific calls.
94 bfa_nw_ioc_set_ct_hwif(struct bfa_ioc
*ioc
)
96 ioc
->ioc_hwif
= &nw_hwif_ct
;
100 bfa_nw_ioc_set_ct2_hwif(struct bfa_ioc
*ioc
)
102 ioc
->ioc_hwif
= &nw_hwif_ct2
;
106 * Return true if firmware of current driver matches the running firmware.
109 bfa_ioc_ct_firmware_lock(struct bfa_ioc
*ioc
)
111 enum bfi_ioc_state ioc_fwstate
;
113 struct bfi_ioc_image_hdr fwhdr
;
116 * If bios boot (flash based) -- do not increment usage count
118 if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc
)) <
122 bfa_nw_ioc_sem_get(ioc
->ioc_regs
.ioc_usage_sem_reg
);
123 usecnt
= readl(ioc
->ioc_regs
.ioc_usage_reg
);
126 * If usage count is 0, always return TRUE.
129 writel(1, ioc
->ioc_regs
.ioc_usage_reg
);
130 bfa_nw_ioc_sem_release(ioc
->ioc_regs
.ioc_usage_sem_reg
);
131 writel(0, ioc
->ioc_regs
.ioc_fail_sync
);
135 ioc_fwstate
= readl(ioc
->ioc_regs
.ioc_fwstate
);
138 * Use count cannot be non-zero and chip in uninitialized state.
140 BUG_ON(!(ioc_fwstate
!= BFI_IOC_UNINIT
));
143 * Check if another driver with a different firmware is active
145 bfa_nw_ioc_fwver_get(ioc
, &fwhdr
);
146 if (!bfa_nw_ioc_fwver_cmp(ioc
, &fwhdr
)) {
147 bfa_nw_ioc_sem_release(ioc
->ioc_regs
.ioc_usage_sem_reg
);
152 * Same firmware version. Increment the reference count.
155 writel(usecnt
, ioc
->ioc_regs
.ioc_usage_reg
);
156 bfa_nw_ioc_sem_release(ioc
->ioc_regs
.ioc_usage_sem_reg
);
161 bfa_ioc_ct_firmware_unlock(struct bfa_ioc
*ioc
)
166 * If bios boot (flash based) -- do not decrement usage count
168 if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc
)) <
173 * decrement usage count
175 bfa_nw_ioc_sem_get(ioc
->ioc_regs
.ioc_usage_sem_reg
);
176 usecnt
= readl(ioc
->ioc_regs
.ioc_usage_reg
);
177 BUG_ON(!(usecnt
> 0));
180 writel(usecnt
, ioc
->ioc_regs
.ioc_usage_reg
);
182 bfa_nw_ioc_sem_release(ioc
->ioc_regs
.ioc_usage_sem_reg
);
186 * Notify other functions on HB failure.
189 bfa_ioc_ct_notify_fail(struct bfa_ioc
*ioc
)
191 writel(__FW_INIT_HALT_P
, ioc
->ioc_regs
.ll_halt
);
192 writel(__FW_INIT_HALT_P
, ioc
->ioc_regs
.alt_ll_halt
);
193 /* Wait for halt to take effect */
194 readl(ioc
->ioc_regs
.ll_halt
);
195 readl(ioc
->ioc_regs
.alt_ll_halt
);
199 * Host to LPU mailbox message addresses
201 static const struct {
206 { HOSTFN0_LPU_MBOX0_0
, LPU_HOSTFN0_MBOX0_0
, HOST_PAGE_NUM_FN0
},
207 { HOSTFN1_LPU_MBOX0_8
, LPU_HOSTFN1_MBOX0_8
, HOST_PAGE_NUM_FN1
},
208 { HOSTFN2_LPU_MBOX0_0
, LPU_HOSTFN2_MBOX0_0
, HOST_PAGE_NUM_FN2
},
209 { HOSTFN3_LPU_MBOX0_8
, LPU_HOSTFN3_MBOX0_8
, HOST_PAGE_NUM_FN3
}
213 * Host <-> LPU mailbox command/status registers - port 0
215 static const struct {
219 { HOSTFN0_LPU0_CMD_STAT
, LPU0_HOSTFN0_CMD_STAT
},
220 { HOSTFN1_LPU0_CMD_STAT
, LPU0_HOSTFN1_CMD_STAT
},
221 { HOSTFN2_LPU0_CMD_STAT
, LPU0_HOSTFN2_CMD_STAT
},
222 { HOSTFN3_LPU0_CMD_STAT
, LPU0_HOSTFN3_CMD_STAT
}
226 * Host <-> LPU mailbox command/status registers - port 1
228 static const struct {
232 { HOSTFN0_LPU1_CMD_STAT
, LPU1_HOSTFN0_CMD_STAT
},
233 { HOSTFN1_LPU1_CMD_STAT
, LPU1_HOSTFN1_CMD_STAT
},
234 { HOSTFN2_LPU1_CMD_STAT
, LPU1_HOSTFN2_CMD_STAT
},
235 { HOSTFN3_LPU1_CMD_STAT
, LPU1_HOSTFN3_CMD_STAT
}
238 static const struct {
246 { CT2_HOSTFN_LPU0_MBOX0
, CT2_LPU0_HOSTFN_MBOX0
, CT2_HOSTFN_PAGE_NUM
,
247 CT2_HOSTFN_LPU0_CMD_STAT
, CT2_LPU0_HOSTFN_CMD_STAT
,
248 CT2_HOSTFN_LPU0_READ_STAT
},
249 { CT2_HOSTFN_LPU1_MBOX0
, CT2_LPU1_HOSTFN_MBOX0
, CT2_HOSTFN_PAGE_NUM
,
250 CT2_HOSTFN_LPU1_CMD_STAT
, CT2_LPU1_HOSTFN_CMD_STAT
,
251 CT2_HOSTFN_LPU1_READ_STAT
},
255 bfa_ioc_ct_reg_init(struct bfa_ioc
*ioc
)
258 int pcifn
= bfa_ioc_pcifn(ioc
);
260 rb
= bfa_ioc_bar0(ioc
);
262 ioc
->ioc_regs
.hfn_mbox
= rb
+ ct_fnreg
[pcifn
].hfn_mbox
;
263 ioc
->ioc_regs
.lpu_mbox
= rb
+ ct_fnreg
[pcifn
].lpu_mbox
;
264 ioc
->ioc_regs
.host_page_num_fn
= rb
+ ct_fnreg
[pcifn
].hfn_pgn
;
266 if (ioc
->port_id
== 0) {
267 ioc
->ioc_regs
.heartbeat
= rb
+ BFA_IOC0_HBEAT_REG
;
268 ioc
->ioc_regs
.ioc_fwstate
= rb
+ BFA_IOC0_STATE_REG
;
269 ioc
->ioc_regs
.alt_ioc_fwstate
= rb
+ BFA_IOC1_STATE_REG
;
270 ioc
->ioc_regs
.hfn_mbox_cmd
= rb
+ ct_p0reg
[pcifn
].hfn
;
271 ioc
->ioc_regs
.lpu_mbox_cmd
= rb
+ ct_p0reg
[pcifn
].lpu
;
272 ioc
->ioc_regs
.ll_halt
= rb
+ FW_INIT_HALT_P0
;
273 ioc
->ioc_regs
.alt_ll_halt
= rb
+ FW_INIT_HALT_P1
;
275 ioc
->ioc_regs
.heartbeat
= rb
+ BFA_IOC1_HBEAT_REG
;
276 ioc
->ioc_regs
.ioc_fwstate
= rb
+ BFA_IOC1_STATE_REG
;
277 ioc
->ioc_regs
.alt_ioc_fwstate
= rb
+ BFA_IOC0_STATE_REG
;
278 ioc
->ioc_regs
.hfn_mbox_cmd
= rb
+ ct_p1reg
[pcifn
].hfn
;
279 ioc
->ioc_regs
.lpu_mbox_cmd
= rb
+ ct_p1reg
[pcifn
].lpu
;
280 ioc
->ioc_regs
.ll_halt
= rb
+ FW_INIT_HALT_P1
;
281 ioc
->ioc_regs
.alt_ll_halt
= rb
+ FW_INIT_HALT_P0
;
285 * PSS control registers
287 ioc
->ioc_regs
.pss_ctl_reg
= rb
+ PSS_CTL_REG
;
288 ioc
->ioc_regs
.pss_err_status_reg
= rb
+ PSS_ERR_STATUS_REG
;
289 ioc
->ioc_regs
.app_pll_fast_ctl_reg
= rb
+ APP_PLL_LCLK_CTL_REG
;
290 ioc
->ioc_regs
.app_pll_slow_ctl_reg
= rb
+ APP_PLL_SCLK_CTL_REG
;
293 * IOC semaphore registers and serialization
295 ioc
->ioc_regs
.ioc_sem_reg
= rb
+ HOST_SEM0_REG
;
296 ioc
->ioc_regs
.ioc_usage_sem_reg
= rb
+ HOST_SEM1_REG
;
297 ioc
->ioc_regs
.ioc_init_sem_reg
= rb
+ HOST_SEM2_REG
;
298 ioc
->ioc_regs
.ioc_usage_reg
= rb
+ BFA_FW_USE_COUNT
;
299 ioc
->ioc_regs
.ioc_fail_sync
= rb
+ BFA_IOC_FAIL_SYNC
;
304 ioc
->ioc_regs
.smem_page_start
= rb
+ PSS_SMEM_PAGE_START
;
305 ioc
->ioc_regs
.smem_pg0
= BFI_IOC_SMEM_PG0_CT
;
308 * err set reg : for notification of hb failure in fcmode
310 ioc
->ioc_regs
.err_set
= (rb
+ ERR_SET_REG
);
314 bfa_ioc_ct2_reg_init(struct bfa_ioc
*ioc
)
317 int port
= bfa_ioc_portid(ioc
);
319 rb
= bfa_ioc_bar0(ioc
);
321 ioc
->ioc_regs
.hfn_mbox
= rb
+ ct2_reg
[port
].hfn_mbox
;
322 ioc
->ioc_regs
.lpu_mbox
= rb
+ ct2_reg
[port
].lpu_mbox
;
323 ioc
->ioc_regs
.host_page_num_fn
= rb
+ ct2_reg
[port
].hfn_pgn
;
324 ioc
->ioc_regs
.hfn_mbox_cmd
= rb
+ ct2_reg
[port
].hfn
;
325 ioc
->ioc_regs
.lpu_mbox_cmd
= rb
+ ct2_reg
[port
].lpu
;
326 ioc
->ioc_regs
.lpu_read_stat
= rb
+ ct2_reg
[port
].lpu_read
;
329 ioc
->ioc_regs
.heartbeat
= rb
+ CT2_BFA_IOC0_HBEAT_REG
;
330 ioc
->ioc_regs
.ioc_fwstate
= rb
+ CT2_BFA_IOC0_STATE_REG
;
331 ioc
->ioc_regs
.alt_ioc_fwstate
= rb
+ CT2_BFA_IOC1_STATE_REG
;
332 ioc
->ioc_regs
.ll_halt
= rb
+ FW_INIT_HALT_P0
;
333 ioc
->ioc_regs
.alt_ll_halt
= rb
+ FW_INIT_HALT_P1
;
335 ioc
->ioc_regs
.heartbeat
= rb
+ CT2_BFA_IOC1_HBEAT_REG
;
336 ioc
->ioc_regs
.ioc_fwstate
= rb
+ CT2_BFA_IOC1_STATE_REG
;
337 ioc
->ioc_regs
.alt_ioc_fwstate
= rb
+ CT2_BFA_IOC0_STATE_REG
;
338 ioc
->ioc_regs
.ll_halt
= rb
+ FW_INIT_HALT_P1
;
339 ioc
->ioc_regs
.alt_ll_halt
= rb
+ FW_INIT_HALT_P0
;
343 * PSS control registers
345 ioc
->ioc_regs
.pss_ctl_reg
= rb
+ PSS_CTL_REG
;
346 ioc
->ioc_regs
.pss_err_status_reg
= rb
+ PSS_ERR_STATUS_REG
;
347 ioc
->ioc_regs
.app_pll_fast_ctl_reg
= rb
+ CT2_APP_PLL_LCLK_CTL_REG
;
348 ioc
->ioc_regs
.app_pll_slow_ctl_reg
= rb
+ CT2_APP_PLL_SCLK_CTL_REG
;
351 * IOC semaphore registers and serialization
353 ioc
->ioc_regs
.ioc_sem_reg
= rb
+ CT2_HOST_SEM0_REG
;
354 ioc
->ioc_regs
.ioc_usage_sem_reg
= rb
+ CT2_HOST_SEM1_REG
;
355 ioc
->ioc_regs
.ioc_init_sem_reg
= rb
+ CT2_HOST_SEM2_REG
;
356 ioc
->ioc_regs
.ioc_usage_reg
= rb
+ CT2_BFA_FW_USE_COUNT
;
357 ioc
->ioc_regs
.ioc_fail_sync
= rb
+ CT2_BFA_IOC_FAIL_SYNC
;
362 ioc
->ioc_regs
.smem_page_start
= rb
+ PSS_SMEM_PAGE_START
;
363 ioc
->ioc_regs
.smem_pg0
= BFI_IOC_SMEM_PG0_CT
;
366 * err set reg : for notification of hb failure in fcmode
368 ioc
->ioc_regs
.err_set
= rb
+ ERR_SET_REG
;
372 * Initialize IOC to port mapping.
375 #define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8)
377 bfa_ioc_ct_map_port(struct bfa_ioc
*ioc
)
379 void __iomem
*rb
= ioc
->pcidev
.pci_bar_kva
;
383 * For catapult, base port id on personality register and IOC type
385 r32
= readl(rb
+ FNC_PERS_REG
);
386 r32
>>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc
));
387 ioc
->port_id
= (r32
& __F0_PORT_MAP_MK
) >> __F0_PORT_MAP_SH
;
392 bfa_ioc_ct2_map_port(struct bfa_ioc
*ioc
)
394 void __iomem
*rb
= ioc
->pcidev
.pci_bar_kva
;
397 r32
= readl(rb
+ CT2_HOSTFN_PERSONALITY0
);
398 ioc
->port_id
= ((r32
& __FC_LL_PORT_MAP__MK
) >> __FC_LL_PORT_MAP__SH
);
402 * Set interrupt mode for a function: INTX or MSIX
405 bfa_ioc_ct_isr_mode_set(struct bfa_ioc
*ioc
, bool msix
)
407 void __iomem
*rb
= ioc
->pcidev
.pci_bar_kva
;
410 r32
= readl(rb
+ FNC_PERS_REG
);
412 mode
= (r32
>> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc
))) &
416 * If already in desired mode, do not change anything
418 if ((!msix
&& mode
) || (msix
&& !mode
))
422 mode
= __F0_INTX_STATUS_MSIX
;
424 mode
= __F0_INTX_STATUS_INTA
;
426 r32
&= ~(__F0_INTX_STATUS
<< FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc
)));
427 r32
|= (mode
<< FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc
)));
429 writel(r32
, rb
+ FNC_PERS_REG
);
433 bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc
*ioc
)
437 r32
= readl(ioc
->ioc_regs
.lpu_read_stat
);
439 writel(1, ioc
->ioc_regs
.lpu_read_stat
);
447 * MSI-X resource allocation for 1860 with no asic block
449 #define HOSTFN_MSIX_DEFAULT 64
450 #define HOSTFN_MSIX_VT_INDEX_MBOX_ERR 0x30138
451 #define HOSTFN_MSIX_VT_OFST_NUMVT 0x3013c
452 #define __MSIX_VT_NUMVT__MK 0x003ff800
453 #define __MSIX_VT_NUMVT__SH 11
454 #define __MSIX_VT_NUMVT_(_v) ((_v) << __MSIX_VT_NUMVT__SH)
455 #define __MSIX_VT_OFST_ 0x000007ff
457 bfa_nw_ioc_ct2_poweron(struct bfa_ioc
*ioc
)
459 void __iomem
*rb
= ioc
->pcidev
.pci_bar_kva
;
462 r32
= readl(rb
+ HOSTFN_MSIX_VT_OFST_NUMVT
);
463 if (r32
& __MSIX_VT_NUMVT__MK
) {
464 writel(r32
& __MSIX_VT_OFST_
,
465 rb
+ HOSTFN_MSIX_VT_INDEX_MBOX_ERR
);
469 writel(__MSIX_VT_NUMVT_(HOSTFN_MSIX_DEFAULT
- 1) |
470 HOSTFN_MSIX_DEFAULT
* bfa_ioc_pcifn(ioc
),
471 rb
+ HOSTFN_MSIX_VT_OFST_NUMVT
);
472 writel(HOSTFN_MSIX_DEFAULT
* bfa_ioc_pcifn(ioc
),
473 rb
+ HOSTFN_MSIX_VT_INDEX_MBOX_ERR
);
477 * Cleanup hw semaphore and usecnt registers
480 bfa_ioc_ct_ownership_reset(struct bfa_ioc
*ioc
)
482 bfa_nw_ioc_sem_get(ioc
->ioc_regs
.ioc_usage_sem_reg
);
483 writel(0, ioc
->ioc_regs
.ioc_usage_reg
);
484 bfa_nw_ioc_sem_release(ioc
->ioc_regs
.ioc_usage_sem_reg
);
487 * Read the hw sem reg to make sure that it is locked
488 * before we clear it. If it is not locked, writing 1
489 * will lock it instead of clearing it.
491 readl(ioc
->ioc_regs
.ioc_sem_reg
);
492 bfa_nw_ioc_hw_sem_release(ioc
);
496 * Synchronized IOC failure processing routines
499 bfa_ioc_ct_sync_start(struct bfa_ioc
*ioc
)
501 u32 r32
= readl(ioc
->ioc_regs
.ioc_fail_sync
);
502 u32 sync_reqd
= bfa_ioc_ct_get_sync_reqd(r32
);
505 * Driver load time. If the sync required bit for this PCI fn
506 * is set, it is due to an unclean exit by the driver for this
507 * PCI fn in the previous incarnation. Whoever comes here first
508 * should clean it up, no matter which PCI fn.
511 if (sync_reqd
& bfa_ioc_ct_sync_pos(ioc
)) {
512 writel(0, ioc
->ioc_regs
.ioc_fail_sync
);
513 writel(1, ioc
->ioc_regs
.ioc_usage_reg
);
514 writel(BFI_IOC_UNINIT
, ioc
->ioc_regs
.ioc_fwstate
);
515 writel(BFI_IOC_UNINIT
, ioc
->ioc_regs
.alt_ioc_fwstate
);
519 return bfa_ioc_ct_sync_complete(ioc
);
522 * Synchronized IOC failure processing routines
525 bfa_ioc_ct_sync_join(struct bfa_ioc
*ioc
)
527 u32 r32
= readl(ioc
->ioc_regs
.ioc_fail_sync
);
528 u32 sync_pos
= bfa_ioc_ct_sync_reqd_pos(ioc
);
530 writel((r32
| sync_pos
), ioc
->ioc_regs
.ioc_fail_sync
);
534 bfa_ioc_ct_sync_leave(struct bfa_ioc
*ioc
)
536 u32 r32
= readl(ioc
->ioc_regs
.ioc_fail_sync
);
537 u32 sync_msk
= bfa_ioc_ct_sync_reqd_pos(ioc
) |
538 bfa_ioc_ct_sync_pos(ioc
);
540 writel((r32
& ~sync_msk
), ioc
->ioc_regs
.ioc_fail_sync
);
544 bfa_ioc_ct_sync_ack(struct bfa_ioc
*ioc
)
546 u32 r32
= readl(ioc
->ioc_regs
.ioc_fail_sync
);
548 writel((r32
| bfa_ioc_ct_sync_pos(ioc
)), ioc
->ioc_regs
.ioc_fail_sync
);
552 bfa_ioc_ct_sync_complete(struct bfa_ioc
*ioc
)
554 u32 r32
= readl(ioc
->ioc_regs
.ioc_fail_sync
);
555 u32 sync_reqd
= bfa_ioc_ct_get_sync_reqd(r32
);
556 u32 sync_ackd
= bfa_ioc_ct_get_sync_ackd(r32
);
563 * The check below is to see whether any other PCI fn
564 * has reinitialized the ASIC (reset sync_ackd bits)
565 * and failed again while this IOC was waiting for hw
566 * semaphore (in bfa_iocpf_sm_semwait()).
568 tmp_ackd
= sync_ackd
;
569 if ((sync_reqd
& bfa_ioc_ct_sync_pos(ioc
)) &&
570 !(sync_ackd
& bfa_ioc_ct_sync_pos(ioc
)))
571 sync_ackd
|= bfa_ioc_ct_sync_pos(ioc
);
573 if (sync_reqd
== sync_ackd
) {
574 writel(bfa_ioc_ct_clear_sync_ackd(r32
),
575 ioc
->ioc_regs
.ioc_fail_sync
);
576 writel(BFI_IOC_FAIL
, ioc
->ioc_regs
.ioc_fwstate
);
577 writel(BFI_IOC_FAIL
, ioc
->ioc_regs
.alt_ioc_fwstate
);
582 * If another PCI fn reinitialized and failed again while
583 * this IOC was waiting for hw sem, the sync_ackd bit for
584 * this IOC need to be set again to allow reinitialization.
586 if (tmp_ackd
!= sync_ackd
)
587 writel((r32
| sync_ackd
), ioc
->ioc_regs
.ioc_fail_sync
);
592 static enum bfa_status
593 bfa_ioc_ct_pll_init(void __iomem
*rb
, enum bfi_asic_mode asic_mode
)
595 u32 pll_sclk
, pll_fclk
, r32
;
596 bool fcmode
= (asic_mode
== BFI_ASIC_MODE_FC
);
598 pll_sclk
= __APP_PLL_SCLK_LRESETN
| __APP_PLL_SCLK_ENARST
|
599 __APP_PLL_SCLK_RSEL200500
| __APP_PLL_SCLK_P0_1(3U) |
600 __APP_PLL_SCLK_JITLMT0_1(3U) |
601 __APP_PLL_SCLK_CNTLMT0_1(1U);
602 pll_fclk
= __APP_PLL_LCLK_LRESETN
| __APP_PLL_LCLK_ENARST
|
603 __APP_PLL_LCLK_RSEL200500
| __APP_PLL_LCLK_P0_1(3U) |
604 __APP_PLL_LCLK_JITLMT0_1(3U) |
605 __APP_PLL_LCLK_CNTLMT0_1(1U);
608 writel(0, (rb
+ OP_MODE
));
609 writel(__APP_EMS_CMLCKSEL
|
610 __APP_EMS_REFCKBUFEN2
|
611 __APP_EMS_CHANNEL_SEL
,
612 (rb
+ ETH_MAC_SER_REG
));
614 writel(__GLOBAL_FCOE_MODE
, (rb
+ OP_MODE
));
615 writel(__APP_EMS_REFCKBUFEN1
,
616 (rb
+ ETH_MAC_SER_REG
));
618 writel(BFI_IOC_UNINIT
, (rb
+ BFA_IOC0_STATE_REG
));
619 writel(BFI_IOC_UNINIT
, (rb
+ BFA_IOC1_STATE_REG
));
620 writel(0xffffffffU
, (rb
+ HOSTFN0_INT_MSK
));
621 writel(0xffffffffU
, (rb
+ HOSTFN1_INT_MSK
));
622 writel(0xffffffffU
, (rb
+ HOSTFN0_INT_STATUS
));
623 writel(0xffffffffU
, (rb
+ HOSTFN1_INT_STATUS
));
624 writel(0xffffffffU
, (rb
+ HOSTFN0_INT_MSK
));
625 writel(0xffffffffU
, (rb
+ HOSTFN1_INT_MSK
));
627 __APP_PLL_SCLK_LOGIC_SOFT_RESET
,
628 rb
+ APP_PLL_SCLK_CTL_REG
);
630 __APP_PLL_LCLK_LOGIC_SOFT_RESET
,
631 rb
+ APP_PLL_LCLK_CTL_REG
);
633 __APP_PLL_SCLK_LOGIC_SOFT_RESET
| __APP_PLL_SCLK_ENABLE
,
634 rb
+ APP_PLL_SCLK_CTL_REG
);
636 __APP_PLL_LCLK_LOGIC_SOFT_RESET
| __APP_PLL_LCLK_ENABLE
,
637 rb
+ APP_PLL_LCLK_CTL_REG
);
638 readl(rb
+ HOSTFN0_INT_MSK
);
640 writel(0xffffffffU
, (rb
+ HOSTFN0_INT_STATUS
));
641 writel(0xffffffffU
, (rb
+ HOSTFN1_INT_STATUS
));
643 __APP_PLL_SCLK_ENABLE
,
644 rb
+ APP_PLL_SCLK_CTL_REG
);
646 __APP_PLL_LCLK_ENABLE
,
647 rb
+ APP_PLL_LCLK_CTL_REG
);
650 writel(__PMM_1T_RESET_P
, (rb
+ PMM_1T_RESET_REG_P0
));
651 writel(__PMM_1T_RESET_P
, (rb
+ PMM_1T_RESET_REG_P1
));
653 r32
= readl((rb
+ PSS_CTL_REG
));
654 r32
&= ~__PSS_LMEM_RESET
;
655 writel(r32
, (rb
+ PSS_CTL_REG
));
658 writel(0, (rb
+ PMM_1T_RESET_REG_P0
));
659 writel(0, (rb
+ PMM_1T_RESET_REG_P1
));
662 writel(__EDRAM_BISTR_START
, (rb
+ MBIST_CTL_REG
));
664 r32
= readl((rb
+ MBIST_STAT_REG
));
665 writel(0, (rb
+ MBIST_CTL_REG
));
666 return BFA_STATUS_OK
;
670 bfa_ioc_ct2_sclk_init(void __iomem
*rb
)
675 * put s_clk PLL and PLL FSM in reset
677 r32
= readl((rb
+ CT2_APP_PLL_SCLK_CTL_REG
));
678 r32
&= ~(__APP_PLL_SCLK_ENABLE
| __APP_PLL_SCLK_LRESETN
);
679 r32
|= (__APP_PLL_SCLK_ENARST
| __APP_PLL_SCLK_BYPASS
|
680 __APP_PLL_SCLK_LOGIC_SOFT_RESET
);
681 writel(r32
, (rb
+ CT2_APP_PLL_SCLK_CTL_REG
));
684 * Ignore mode and program for the max clock (which is FC16)
685 * Firmware/NFC will do the PLL init appropiately
687 r32
= readl((rb
+ CT2_APP_PLL_SCLK_CTL_REG
));
688 r32
&= ~(__APP_PLL_SCLK_REFCLK_SEL
| __APP_PLL_SCLK_CLK_DIV2
);
689 writel(r32
, (rb
+ CT2_APP_PLL_SCLK_CTL_REG
));
692 * while doing PLL init dont clock gate ethernet subsystem
694 r32
= readl((rb
+ CT2_CHIP_MISC_PRG
));
695 writel((r32
| __ETH_CLK_ENABLE_PORT0
),
696 (rb
+ CT2_CHIP_MISC_PRG
));
698 r32
= readl((rb
+ CT2_PCIE_MISC_REG
));
699 writel((r32
| __ETH_CLK_ENABLE_PORT1
),
700 (rb
+ CT2_PCIE_MISC_REG
));
705 r32
= readl((rb
+ CT2_APP_PLL_SCLK_CTL_REG
));
706 r32
&= (__P_SCLK_PLL_LOCK
| __APP_PLL_SCLK_REFCLK_SEL
|
707 __APP_PLL_SCLK_CLK_DIV2
);
708 writel(r32
| 0x1061731b, (rb
+ CT2_APP_PLL_SCLK_CTL_REG
));
711 * poll for s_clk lock or delay 1ms
716 * Dont do clock gating for ethernet subsystem, firmware/NFC will
717 * do this appropriately
722 bfa_ioc_ct2_lclk_init(void __iomem
*rb
)
727 * put l_clk PLL and PLL FSM in reset
729 r32
= readl((rb
+ CT2_APP_PLL_LCLK_CTL_REG
));
730 r32
&= ~(__APP_PLL_LCLK_ENABLE
| __APP_PLL_LCLK_LRESETN
);
731 r32
|= (__APP_PLL_LCLK_ENARST
| __APP_PLL_LCLK_BYPASS
|
732 __APP_PLL_LCLK_LOGIC_SOFT_RESET
);
733 writel(r32
, (rb
+ CT2_APP_PLL_LCLK_CTL_REG
));
736 * set LPU speed (set for FC16 which will work for other modes)
738 r32
= readl((rb
+ CT2_CHIP_MISC_PRG
));
739 writel(r32
, (rb
+ CT2_CHIP_MISC_PRG
));
742 * set LPU half speed (set for FC16 which will work for other modes)
744 r32
= readl((rb
+ CT2_APP_PLL_LCLK_CTL_REG
));
745 writel(r32
, (rb
+ CT2_APP_PLL_LCLK_CTL_REG
));
748 * set lclk for mode (set for FC16)
750 r32
= readl((rb
+ CT2_APP_PLL_LCLK_CTL_REG
));
751 r32
&= (__P_LCLK_PLL_LOCK
| __APP_LPUCLK_HALFSPEED
);
753 writel(r32
, (rb
+ CT2_APP_PLL_LCLK_CTL_REG
));
756 * poll for s_clk lock or delay 1ms
762 bfa_ioc_ct2_mem_init(void __iomem
*rb
)
766 r32
= readl((rb
+ PSS_CTL_REG
));
767 r32
&= ~__PSS_LMEM_RESET
;
768 writel(r32
, (rb
+ PSS_CTL_REG
));
771 writel(__EDRAM_BISTR_START
, (rb
+ CT2_MBIST_CTL_REG
));
773 writel(0, (rb
+ CT2_MBIST_CTL_REG
));
777 bfa_ioc_ct2_mac_reset(void __iomem
*rb
)
781 bfa_ioc_ct2_sclk_init(rb
);
782 bfa_ioc_ct2_lclk_init(rb
);
785 * release soft reset on s_clk & l_clk
787 r32
= readl((rb
+ CT2_APP_PLL_SCLK_CTL_REG
));
788 writel((r32
& ~__APP_PLL_SCLK_LOGIC_SOFT_RESET
),
789 (rb
+ CT2_APP_PLL_SCLK_CTL_REG
));
792 * release soft reset on s_clk & l_clk
794 r32
= readl((rb
+ CT2_APP_PLL_LCLK_CTL_REG
));
795 writel((r32
& ~__APP_PLL_LCLK_LOGIC_SOFT_RESET
),
796 (rb
+ CT2_APP_PLL_LCLK_CTL_REG
));
798 /* put port0, port1 MAC & AHB in reset */
799 writel((__CSI_MAC_RESET
| __CSI_MAC_AHB_RESET
),
800 (rb
+ CT2_CSI_MAC_CONTROL_REG(0)));
801 writel((__CSI_MAC_RESET
| __CSI_MAC_AHB_RESET
),
802 (rb
+ CT2_CSI_MAC_CONTROL_REG(1)));
805 #define CT2_NFC_MAX_DELAY 1000
806 static enum bfa_status
807 bfa_ioc_ct2_pll_init(void __iomem
*rb
, enum bfi_asic_mode asic_mode
)
809 volatile u32 wgn
, r32
;
813 * Initialize PLL if not already done by NFC
815 wgn
= readl(rb
+ CT2_WGN_STATUS
);
816 if (!(wgn
& __GLBL_PF_VF_CFG_RDY
)) {
817 writel(__HALT_NFC_CONTROLLER
, (rb
+ CT2_NFC_CSR_SET_REG
));
818 for (i
= 0; i
< CT2_NFC_MAX_DELAY
; i
++) {
819 r32
= readl(rb
+ CT2_NFC_CSR_SET_REG
);
820 if (r32
& __NFC_CONTROLLER_HALTED
)
827 * Mask the interrupts and clear any
828 * pending interrupts left by BIOS/EFI
831 writel(1, (rb
+ CT2_LPU0_HOSTFN_MBOX0_MSK
));
832 writel(1, (rb
+ CT2_LPU1_HOSTFN_MBOX0_MSK
));
834 r32
= readl((rb
+ CT2_LPU0_HOSTFN_CMD_STAT
));
836 writel(1, (rb
+ CT2_LPU0_HOSTFN_CMD_STAT
));
837 readl((rb
+ CT2_LPU0_HOSTFN_CMD_STAT
));
839 r32
= readl((rb
+ CT2_LPU1_HOSTFN_CMD_STAT
));
841 writel(1, (rb
+ CT2_LPU1_HOSTFN_CMD_STAT
));
842 readl((rb
+ CT2_LPU1_HOSTFN_CMD_STAT
));
845 bfa_ioc_ct2_mac_reset(rb
);
846 bfa_ioc_ct2_sclk_init(rb
);
847 bfa_ioc_ct2_lclk_init(rb
);
850 * release soft reset on s_clk & l_clk
852 r32
= readl((rb
+ CT2_APP_PLL_SCLK_CTL_REG
));
853 writel((r32
& ~__APP_PLL_SCLK_LOGIC_SOFT_RESET
),
854 (rb
+ CT2_APP_PLL_SCLK_CTL_REG
));
857 * release soft reset on s_clk & l_clk
859 r32
= readl((rb
+ CT2_APP_PLL_LCLK_CTL_REG
));
860 writel(r32
& ~__APP_PLL_LCLK_LOGIC_SOFT_RESET
,
861 (rb
+ CT2_APP_PLL_LCLK_CTL_REG
));
864 * Announce flash device presence, if flash was corrupted.
866 if (wgn
== (__WGN_READY
| __GLBL_PF_VF_CFG_RDY
)) {
867 r32
= readl((rb
+ PSS_GPIO_OUT_REG
));
868 writel((r32
& ~1), (rb
+ PSS_GPIO_OUT_REG
));
869 r32
= readl((rb
+ PSS_GPIO_OE_REG
));
870 writel((r32
| 1), (rb
+ PSS_GPIO_OE_REG
));
873 bfa_ioc_ct2_mem_init(rb
);
875 writel(BFI_IOC_UNINIT
, (rb
+ CT2_BFA_IOC0_STATE_REG
));
876 writel(BFI_IOC_UNINIT
, (rb
+ CT2_BFA_IOC1_STATE_REG
));
877 return BFA_STATUS_OK
;