2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
20 #include <bfa_fwimg_priv.h>
21 #include <bfa_trcmod_priv.h>
22 #include <cs/bfa_debug.h>
23 #include <bfi/bfi_ioc.h>
24 #include <bfi/bfi_ctreg.h>
25 #include <aen/bfa_aen_ioc.h>
26 #include <aen/bfa_aen.h>
27 #include <log/bfa_log_hal.h>
28 #include <defs/bfa_defs_pci.h>
30 BFA_TRC_FILE(HAL
, IOC
);
33 * IOC local definitions
35 #define BFA_IOC_TOV 2000 /* msecs */
36 #define BFA_IOC_HB_TOV 1000 /* msecs */
37 #define BFA_IOC_HB_FAIL_MAX 4
38 #define BFA_IOC_HWINIT_MAX 2
39 #define BFA_IOC_FWIMG_MINSZ (16 * 1024)
40 #define BFA_IOC_TOV_RECOVER (BFA_IOC_HB_FAIL_MAX * BFA_IOC_HB_TOV \
43 #define bfa_ioc_timer_start(__ioc) \
44 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
45 bfa_ioc_timeout, (__ioc), BFA_IOC_TOV)
46 #define bfa_ioc_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
48 #define BFA_DBG_FWTRC_ENTS (BFI_IOC_TRC_ENTS)
49 #define BFA_DBG_FWTRC_LEN \
50 (BFA_DBG_FWTRC_ENTS * sizeof(struct bfa_trc_s) + \
51 (sizeof(struct bfa_trc_mod_s) - \
52 BFA_TRC_MAX * sizeof(struct bfa_trc_s)))
53 #define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
54 #define bfa_ioc_stats(_ioc, _stats) (_ioc)->stats._stats ++
56 #define BFA_FLASH_CHUNK_NO(off) (off / BFI_FLASH_CHUNK_SZ_WORDS)
57 #define BFA_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS)
58 #define BFA_FLASH_CHUNK_ADDR(chunkno) (chunkno * BFI_FLASH_CHUNK_SZ_WORDS)
59 bfa_boolean_t bfa_auto_recover
= BFA_FALSE
;
62 * forward declarations
64 static void bfa_ioc_aen_post(struct bfa_ioc_s
*bfa
,
65 enum bfa_ioc_aen_event event
);
66 static void bfa_ioc_hw_sem_get(struct bfa_ioc_s
*ioc
);
67 static void bfa_ioc_hw_sem_release(struct bfa_ioc_s
*ioc
);
68 static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s
*ioc
);
69 static void bfa_ioc_hwinit(struct bfa_ioc_s
*ioc
, bfa_boolean_t force
);
70 static void bfa_ioc_timeout(void *ioc
);
71 static void bfa_ioc_send_enable(struct bfa_ioc_s
*ioc
);
72 static void bfa_ioc_send_disable(struct bfa_ioc_s
*ioc
);
73 static void bfa_ioc_send_getattr(struct bfa_ioc_s
*ioc
);
74 static void bfa_ioc_hb_monitor(struct bfa_ioc_s
*ioc
);
75 static void bfa_ioc_hb_stop(struct bfa_ioc_s
*ioc
);
76 static void bfa_ioc_reset(struct bfa_ioc_s
*ioc
, bfa_boolean_t force
);
77 static void bfa_ioc_mbox_poll(struct bfa_ioc_s
*ioc
);
78 static void bfa_ioc_mbox_hbfail(struct bfa_ioc_s
*ioc
);
79 static void bfa_ioc_recover(struct bfa_ioc_s
*ioc
);
80 static bfa_boolean_t
bfa_ioc_firmware_lock(struct bfa_ioc_s
*ioc
);
81 static void bfa_ioc_firmware_unlock(struct bfa_ioc_s
*ioc
);
82 static void bfa_ioc_disable_comp(struct bfa_ioc_s
*ioc
);
83 static void bfa_ioc_lpu_stop(struct bfa_ioc_s
*ioc
);
90 * IOC state machine events
93 IOC_E_ENABLE
= 1, /* IOC enable request */
94 IOC_E_DISABLE
= 2, /* IOC disable request */
95 IOC_E_TIMEOUT
= 3, /* f/w response timeout */
96 IOC_E_FWREADY
= 4, /* f/w initialization done */
97 IOC_E_FWRSP_GETATTR
= 5, /* IOC get attribute response */
98 IOC_E_FWRSP_ENABLE
= 6, /* enable f/w response */
99 IOC_E_FWRSP_DISABLE
= 7, /* disable f/w response */
100 IOC_E_HBFAIL
= 8, /* heartbeat failure */
101 IOC_E_HWERROR
= 9, /* hardware error interrupt */
102 IOC_E_SEMLOCKED
= 10, /* h/w semaphore is locked */
103 IOC_E_DETACH
= 11, /* driver detach cleanup */
106 bfa_fsm_state_decl(bfa_ioc
, reset
, struct bfa_ioc_s
, enum ioc_event
);
107 bfa_fsm_state_decl(bfa_ioc
, fwcheck
, struct bfa_ioc_s
, enum ioc_event
);
108 bfa_fsm_state_decl(bfa_ioc
, mismatch
, struct bfa_ioc_s
, enum ioc_event
);
109 bfa_fsm_state_decl(bfa_ioc
, semwait
, struct bfa_ioc_s
, enum ioc_event
);
110 bfa_fsm_state_decl(bfa_ioc
, hwinit
, struct bfa_ioc_s
, enum ioc_event
);
111 bfa_fsm_state_decl(bfa_ioc
, enabling
, struct bfa_ioc_s
, enum ioc_event
);
112 bfa_fsm_state_decl(bfa_ioc
, getattr
, struct bfa_ioc_s
, enum ioc_event
);
113 bfa_fsm_state_decl(bfa_ioc
, op
, struct bfa_ioc_s
, enum ioc_event
);
114 bfa_fsm_state_decl(bfa_ioc
, initfail
, struct bfa_ioc_s
, enum ioc_event
);
115 bfa_fsm_state_decl(bfa_ioc
, hbfail
, struct bfa_ioc_s
, enum ioc_event
);
116 bfa_fsm_state_decl(bfa_ioc
, disabling
, struct bfa_ioc_s
, enum ioc_event
);
117 bfa_fsm_state_decl(bfa_ioc
, disabled
, struct bfa_ioc_s
, enum ioc_event
);
119 static struct bfa_sm_table_s ioc_sm_table
[] = {
120 {BFA_SM(bfa_ioc_sm_reset
), BFA_IOC_RESET
},
121 {BFA_SM(bfa_ioc_sm_fwcheck
), BFA_IOC_FWMISMATCH
},
122 {BFA_SM(bfa_ioc_sm_mismatch
), BFA_IOC_FWMISMATCH
},
123 {BFA_SM(bfa_ioc_sm_semwait
), BFA_IOC_SEMWAIT
},
124 {BFA_SM(bfa_ioc_sm_hwinit
), BFA_IOC_HWINIT
},
125 {BFA_SM(bfa_ioc_sm_enabling
), BFA_IOC_HWINIT
},
126 {BFA_SM(bfa_ioc_sm_getattr
), BFA_IOC_GETATTR
},
127 {BFA_SM(bfa_ioc_sm_op
), BFA_IOC_OPERATIONAL
},
128 {BFA_SM(bfa_ioc_sm_initfail
), BFA_IOC_INITFAIL
},
129 {BFA_SM(bfa_ioc_sm_hbfail
), BFA_IOC_HBFAIL
},
130 {BFA_SM(bfa_ioc_sm_disabling
), BFA_IOC_DISABLING
},
131 {BFA_SM(bfa_ioc_sm_disabled
), BFA_IOC_DISABLED
},
135 * Reset entry actions -- initialize state machine
138 bfa_ioc_sm_reset_entry(struct bfa_ioc_s
*ioc
)
140 ioc
->retry_count
= 0;
141 ioc
->auto_recover
= bfa_auto_recover
;
145 * Beginning state. IOC is in reset state.
148 bfa_ioc_sm_reset(struct bfa_ioc_s
*ioc
, enum ioc_event event
)
154 bfa_fsm_set_state(ioc
, bfa_ioc_sm_fwcheck
);
158 bfa_ioc_disable_comp(ioc
);
165 bfa_sm_fault(ioc
, event
);
170 * Semaphore should be acquired for version check.
173 bfa_ioc_sm_fwcheck_entry(struct bfa_ioc_s
*ioc
)
175 bfa_ioc_hw_sem_get(ioc
);
179 * Awaiting h/w semaphore to continue with version check.
182 bfa_ioc_sm_fwcheck(struct bfa_ioc_s
*ioc
, enum ioc_event event
)
187 case IOC_E_SEMLOCKED
:
188 if (bfa_ioc_firmware_lock(ioc
)) {
189 ioc
->retry_count
= 0;
190 bfa_fsm_set_state(ioc
, bfa_ioc_sm_hwinit
);
192 bfa_ioc_hw_sem_release(ioc
);
193 bfa_fsm_set_state(ioc
, bfa_ioc_sm_mismatch
);
198 bfa_ioc_disable_comp(ioc
);
204 bfa_ioc_hw_sem_get_cancel(ioc
);
205 bfa_fsm_set_state(ioc
, bfa_ioc_sm_reset
);
212 bfa_sm_fault(ioc
, event
);
217 * Notify enable completion callback and generate mismatch AEN.
220 bfa_ioc_sm_mismatch_entry(struct bfa_ioc_s
*ioc
)
223 * Provide enable completion callback and AEN notification only once.
225 if (ioc
->retry_count
== 0) {
226 ioc
->cbfn
->enable_cbfn(ioc
->bfa
, BFA_STATUS_IOC_FAILURE
);
227 bfa_ioc_aen_post(ioc
, BFA_IOC_AEN_FWMISMATCH
);
230 bfa_ioc_timer_start(ioc
);
234 * Awaiting firmware version match.
237 bfa_ioc_sm_mismatch(struct bfa_ioc_s
*ioc
, enum ioc_event event
)
243 bfa_fsm_set_state(ioc
, bfa_ioc_sm_fwcheck
);
247 bfa_ioc_disable_comp(ioc
);
253 bfa_ioc_timer_stop(ioc
);
254 bfa_fsm_set_state(ioc
, bfa_ioc_sm_reset
);
261 bfa_sm_fault(ioc
, event
);
266 * Request for semaphore.
269 bfa_ioc_sm_semwait_entry(struct bfa_ioc_s
*ioc
)
271 bfa_ioc_hw_sem_get(ioc
);
275 * Awaiting semaphore for h/w initialzation.
278 bfa_ioc_sm_semwait(struct bfa_ioc_s
*ioc
, enum ioc_event event
)
283 case IOC_E_SEMLOCKED
:
284 ioc
->retry_count
= 0;
285 bfa_fsm_set_state(ioc
, bfa_ioc_sm_hwinit
);
289 bfa_ioc_hw_sem_get_cancel(ioc
);
290 bfa_fsm_set_state(ioc
, bfa_ioc_sm_disabled
);
294 bfa_sm_fault(ioc
, event
);
300 bfa_ioc_sm_hwinit_entry(struct bfa_ioc_s
*ioc
)
302 bfa_ioc_timer_start(ioc
);
303 bfa_ioc_reset(ioc
, BFA_FALSE
);
307 * Hardware is being initialized. Interrupts are enabled.
308 * Holding hardware semaphore lock.
311 bfa_ioc_sm_hwinit(struct bfa_ioc_s
*ioc
, enum ioc_event event
)
317 bfa_ioc_timer_stop(ioc
);
318 bfa_fsm_set_state(ioc
, bfa_ioc_sm_enabling
);
322 bfa_ioc_timer_stop(ioc
);
329 if (ioc
->retry_count
< BFA_IOC_HWINIT_MAX
) {
330 bfa_ioc_timer_start(ioc
);
331 bfa_ioc_reset(ioc
, BFA_TRUE
);
335 bfa_ioc_hw_sem_release(ioc
);
336 bfa_fsm_set_state(ioc
, bfa_ioc_sm_initfail
);
340 bfa_ioc_hw_sem_release(ioc
);
341 bfa_ioc_timer_stop(ioc
);
342 bfa_fsm_set_state(ioc
, bfa_ioc_sm_disabled
);
346 bfa_sm_fault(ioc
, event
);
352 bfa_ioc_sm_enabling_entry(struct bfa_ioc_s
*ioc
)
354 bfa_ioc_timer_start(ioc
);
355 bfa_ioc_send_enable(ioc
);
359 * Host IOC function is being enabled, awaiting response from firmware.
360 * Semaphore is acquired.
363 bfa_ioc_sm_enabling(struct bfa_ioc_s
*ioc
, enum ioc_event event
)
368 case IOC_E_FWRSP_ENABLE
:
369 bfa_ioc_timer_stop(ioc
);
370 bfa_ioc_hw_sem_release(ioc
);
371 bfa_fsm_set_state(ioc
, bfa_ioc_sm_getattr
);
375 bfa_ioc_timer_stop(ioc
);
382 if (ioc
->retry_count
< BFA_IOC_HWINIT_MAX
) {
383 bfa_reg_write(ioc
->ioc_regs
.ioc_fwstate
,
385 bfa_fsm_set_state(ioc
, bfa_ioc_sm_hwinit
);
389 bfa_ioc_hw_sem_release(ioc
);
390 bfa_fsm_set_state(ioc
, bfa_ioc_sm_initfail
);
394 bfa_ioc_timer_stop(ioc
);
395 bfa_ioc_hw_sem_release(ioc
);
396 bfa_fsm_set_state(ioc
, bfa_ioc_sm_disabled
);
400 bfa_ioc_send_enable(ioc
);
404 bfa_sm_fault(ioc
, event
);
410 bfa_ioc_sm_getattr_entry(struct bfa_ioc_s
*ioc
)
412 bfa_ioc_timer_start(ioc
);
413 bfa_ioc_send_getattr(ioc
);
417 * IOC configuration in progress. Timer is active.
420 bfa_ioc_sm_getattr(struct bfa_ioc_s
*ioc
, enum ioc_event event
)
425 case IOC_E_FWRSP_GETATTR
:
426 bfa_ioc_timer_stop(ioc
);
427 bfa_fsm_set_state(ioc
, bfa_ioc_sm_op
);
431 bfa_ioc_timer_stop(ioc
);
437 bfa_fsm_set_state(ioc
, bfa_ioc_sm_initfail
);
441 bfa_ioc_timer_stop(ioc
);
442 bfa_fsm_set_state(ioc
, bfa_ioc_sm_disabled
);
446 bfa_sm_fault(ioc
, event
);
452 bfa_ioc_sm_op_entry(struct bfa_ioc_s
*ioc
)
454 ioc
->cbfn
->enable_cbfn(ioc
->bfa
, BFA_STATUS_OK
);
455 bfa_ioc_hb_monitor(ioc
);
456 bfa_ioc_aen_post(ioc
, BFA_IOC_AEN_ENABLE
);
460 bfa_ioc_sm_op(struct bfa_ioc_s
*ioc
, enum ioc_event event
)
469 bfa_ioc_hb_stop(ioc
);
470 bfa_fsm_set_state(ioc
, bfa_ioc_sm_disabling
);
476 * Hard error or IOC recovery by other function.
477 * Treat it same as heartbeat failure.
479 bfa_ioc_hb_stop(ioc
);
481 * !!! fall through !!!
485 bfa_fsm_set_state(ioc
, bfa_ioc_sm_hbfail
);
489 bfa_sm_fault(ioc
, event
);
495 bfa_ioc_sm_disabling_entry(struct bfa_ioc_s
*ioc
)
497 bfa_ioc_aen_post(ioc
, BFA_IOC_AEN_DISABLE
);
498 bfa_ioc_timer_start(ioc
);
499 bfa_ioc_send_disable(ioc
);
503 * IOC is being disabled
506 bfa_ioc_sm_disabling(struct bfa_ioc_s
*ioc
, enum ioc_event event
)
512 case IOC_E_FWRSP_DISABLE
:
513 bfa_ioc_timer_stop(ioc
);
515 * !!! fall through !!!
519 bfa_fsm_set_state(ioc
, bfa_ioc_sm_disabled
);
523 bfa_sm_fault(ioc
, event
);
528 * IOC disable completion entry.
531 bfa_ioc_sm_disabled_entry(struct bfa_ioc_s
*ioc
)
533 bfa_ioc_disable_comp(ioc
);
537 bfa_ioc_sm_disabled(struct bfa_ioc_s
*ioc
, enum ioc_event event
)
543 bfa_fsm_set_state(ioc
, bfa_ioc_sm_semwait
);
547 ioc
->cbfn
->disable_cbfn(ioc
->bfa
);
554 bfa_ioc_firmware_unlock(ioc
);
555 bfa_fsm_set_state(ioc
, bfa_ioc_sm_reset
);
559 bfa_sm_fault(ioc
, event
);
565 bfa_ioc_sm_initfail_entry(struct bfa_ioc_s
*ioc
)
567 ioc
->cbfn
->enable_cbfn(ioc
->bfa
, BFA_STATUS_IOC_FAILURE
);
568 bfa_ioc_timer_start(ioc
);
572 * Hardware initialization failed.
575 bfa_ioc_sm_initfail(struct bfa_ioc_s
*ioc
, enum ioc_event event
)
581 bfa_ioc_timer_stop(ioc
);
582 bfa_fsm_set_state(ioc
, bfa_ioc_sm_disabled
);
586 bfa_ioc_timer_stop(ioc
);
587 bfa_ioc_firmware_unlock(ioc
);
588 bfa_fsm_set_state(ioc
, bfa_ioc_sm_reset
);
592 bfa_fsm_set_state(ioc
, bfa_ioc_sm_semwait
);
596 bfa_sm_fault(ioc
, event
);
602 bfa_ioc_sm_hbfail_entry(struct bfa_ioc_s
*ioc
)
604 struct list_head
*qe
;
605 struct bfa_ioc_hbfail_notify_s
*notify
;
608 * Mark IOC as failed in hardware and stop firmware.
610 bfa_ioc_lpu_stop(ioc
);
611 bfa_reg_write(ioc
->ioc_regs
.ioc_fwstate
, BFI_IOC_HBFAIL
);
613 if (ioc
->pcidev
.device_id
== BFA_PCI_DEVICE_ID_CT
) {
614 bfa_reg_write(ioc
->ioc_regs
.ll_halt
, __FW_INIT_HALT_P
);
616 * Wait for halt to take effect
618 bfa_reg_read(ioc
->ioc_regs
.ll_halt
);
622 * Notify driver and common modules registered for notification.
624 ioc
->cbfn
->hbfail_cbfn(ioc
->bfa
);
625 list_for_each(qe
, &ioc
->hb_notify_q
) {
626 notify
= (struct bfa_ioc_hbfail_notify_s
*)qe
;
627 notify
->cbfn(notify
->cbarg
);
631 * Flush any queued up mailbox requests.
633 bfa_ioc_mbox_hbfail(ioc
);
634 bfa_ioc_aen_post(ioc
, BFA_IOC_AEN_HBFAIL
);
637 * Trigger auto-recovery after a delay.
639 if (ioc
->auto_recover
) {
640 bfa_timer_begin(ioc
->timer_mod
, &ioc
->ioc_timer
,
641 bfa_ioc_timeout
, ioc
, BFA_IOC_TOV_RECOVER
);
646 * IOC heartbeat failure.
649 bfa_ioc_sm_hbfail(struct bfa_ioc_s
*ioc
, enum ioc_event event
)
656 ioc
->cbfn
->enable_cbfn(ioc
->bfa
, BFA_STATUS_IOC_FAILURE
);
660 if (ioc
->auto_recover
)
661 bfa_ioc_timer_stop(ioc
);
662 bfa_fsm_set_state(ioc
, bfa_ioc_sm_disabled
);
666 bfa_fsm_set_state(ioc
, bfa_ioc_sm_semwait
);
671 * Recovery is already initiated by other function.
676 bfa_sm_fault(ioc
, event
);
683 * bfa_ioc_pvt BFA IOC private functions
687 bfa_ioc_disable_comp(struct bfa_ioc_s
*ioc
)
689 struct list_head
*qe
;
690 struct bfa_ioc_hbfail_notify_s
*notify
;
692 ioc
->cbfn
->disable_cbfn(ioc
->bfa
);
695 * Notify common modules registered for notification.
697 list_for_each(qe
, &ioc
->hb_notify_q
) {
698 notify
= (struct bfa_ioc_hbfail_notify_s
*)qe
;
699 notify
->cbfn(notify
->cbarg
);
704 bfa_ioc_sem_timeout(void *ioc_arg
)
706 struct bfa_ioc_s
*ioc
= (struct bfa_ioc_s
*)ioc_arg
;
708 bfa_ioc_hw_sem_get(ioc
);
712 bfa_ioc_usage_sem_get(struct bfa_ioc_s
*ioc
)
716 #define BFA_SEM_SPINCNT 1000
719 r32
= bfa_reg_read(ioc
->ioc_regs
.ioc_usage_sem_reg
);
721 if (cnt
> BFA_SEM_SPINCNT
)
724 bfa_assert(cnt
< BFA_SEM_SPINCNT
);
728 bfa_ioc_usage_sem_release(struct bfa_ioc_s
*ioc
)
730 bfa_reg_write(ioc
->ioc_regs
.ioc_usage_sem_reg
, 1);
734 bfa_ioc_hw_sem_get(struct bfa_ioc_s
*ioc
)
739 * First read to the semaphore register will return 0, subsequent reads
740 * will return 1. Semaphore is released by writing 0 to the register
742 r32
= bfa_reg_read(ioc
->ioc_regs
.ioc_sem_reg
);
744 bfa_fsm_send_event(ioc
, IOC_E_SEMLOCKED
);
748 bfa_timer_begin(ioc
->timer_mod
, &ioc
->sem_timer
, bfa_ioc_sem_timeout
,
753 bfa_ioc_hw_sem_release(struct bfa_ioc_s
*ioc
)
755 bfa_reg_write(ioc
->ioc_regs
.ioc_sem_reg
, 1);
759 bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s
*ioc
)
761 bfa_timer_stop(&ioc
->sem_timer
);
765 * Initialize LPU local memory (aka secondary memory / SRAM)
768 bfa_ioc_lmem_init(struct bfa_ioc_s
*ioc
)
772 #define PSS_LMEM_INIT_TIME 10000
774 pss_ctl
= bfa_reg_read(ioc
->ioc_regs
.pss_ctl_reg
);
775 pss_ctl
&= ~__PSS_LMEM_RESET
;
776 pss_ctl
|= __PSS_LMEM_INIT_EN
;
777 pss_ctl
|= __PSS_I2C_CLK_DIV(3UL); /* i2c workaround 12.5khz clock */
778 bfa_reg_write(ioc
->ioc_regs
.pss_ctl_reg
, pss_ctl
);
781 * wait for memory initialization to be complete
785 pss_ctl
= bfa_reg_read(ioc
->ioc_regs
.pss_ctl_reg
);
787 } while (!(pss_ctl
& __PSS_LMEM_INIT_DONE
) && (i
< PSS_LMEM_INIT_TIME
));
790 * If memory initialization is not successful, IOC timeout will catch
793 bfa_assert(pss_ctl
& __PSS_LMEM_INIT_DONE
);
794 bfa_trc(ioc
, pss_ctl
);
796 pss_ctl
&= ~(__PSS_LMEM_INIT_DONE
| __PSS_LMEM_INIT_EN
);
797 bfa_reg_write(ioc
->ioc_regs
.pss_ctl_reg
, pss_ctl
);
801 bfa_ioc_lpu_start(struct bfa_ioc_s
*ioc
)
806 * Take processor out of reset.
808 pss_ctl
= bfa_reg_read(ioc
->ioc_regs
.pss_ctl_reg
);
809 pss_ctl
&= ~__PSS_LPU0_RESET
;
811 bfa_reg_write(ioc
->ioc_regs
.pss_ctl_reg
, pss_ctl
);
815 bfa_ioc_lpu_stop(struct bfa_ioc_s
*ioc
)
820 * Put processors in reset.
822 pss_ctl
= bfa_reg_read(ioc
->ioc_regs
.pss_ctl_reg
);
823 pss_ctl
|= (__PSS_LPU0_RESET
| __PSS_LPU1_RESET
);
825 bfa_reg_write(ioc
->ioc_regs
.pss_ctl_reg
, pss_ctl
);
829 * Get driver and firmware versions.
832 bfa_ioc_fwver_get(struct bfa_ioc_s
*ioc
, struct bfi_ioc_image_hdr_s
*fwhdr
)
837 u32
*fwsig
= (u32
*) fwhdr
;
839 pgnum
= bfa_ioc_smem_pgnum(ioc
, loff
);
840 pgoff
= bfa_ioc_smem_pgoff(ioc
, loff
);
841 bfa_reg_write(ioc
->ioc_regs
.host_page_num_fn
, pgnum
);
843 for (i
= 0; i
< (sizeof(struct bfi_ioc_image_hdr_s
) / sizeof(u32
));
845 fwsig
[i
] = bfa_mem_read(ioc
->ioc_regs
.smem_page_start
, loff
);
851 bfa_ioc_fwimg_get_chunk(struct bfa_ioc_s
*ioc
, u32 off
)
854 return bfi_image_ct_get_chunk(off
);
855 return bfi_image_cb_get_chunk(off
);
859 bfa_ioc_fwimg_get_size(struct bfa_ioc_s
*ioc
)
861 return (ioc
->ctdev
) ? bfi_image_ct_size
: bfi_image_cb_size
;
865 * Returns TRUE if same.
868 bfa_ioc_fwver_cmp(struct bfa_ioc_s
*ioc
, struct bfi_ioc_image_hdr_s
*fwhdr
)
870 struct bfi_ioc_image_hdr_s
*drv_fwhdr
;
874 (struct bfi_ioc_image_hdr_s
*)bfa_ioc_fwimg_get_chunk(ioc
, 0);
876 for (i
= 0; i
< BFI_IOC_MD5SUM_SZ
; i
++) {
877 if (fwhdr
->md5sum
[i
] != drv_fwhdr
->md5sum
[i
]) {
879 bfa_trc(ioc
, fwhdr
->md5sum
[i
]);
880 bfa_trc(ioc
, drv_fwhdr
->md5sum
[i
]);
885 bfa_trc(ioc
, fwhdr
->md5sum
[0]);
890 * Return true if current running version is valid. Firmware signature and
891 * execution context (driver/bios) must match.
894 bfa_ioc_fwver_valid(struct bfa_ioc_s
*ioc
)
896 struct bfi_ioc_image_hdr_s fwhdr
, *drv_fwhdr
;
899 * If bios/efi boot (flash based) -- return true
901 if (bfa_ioc_fwimg_get_size(ioc
) < BFA_IOC_FWIMG_MINSZ
)
904 bfa_ioc_fwver_get(ioc
, &fwhdr
);
906 (struct bfi_ioc_image_hdr_s
*)bfa_ioc_fwimg_get_chunk(ioc
, 0);
908 if (fwhdr
.signature
!= drv_fwhdr
->signature
) {
909 bfa_trc(ioc
, fwhdr
.signature
);
910 bfa_trc(ioc
, drv_fwhdr
->signature
);
914 if (fwhdr
.exec
!= drv_fwhdr
->exec
) {
915 bfa_trc(ioc
, fwhdr
.exec
);
916 bfa_trc(ioc
, drv_fwhdr
->exec
);
920 return bfa_ioc_fwver_cmp(ioc
, &fwhdr
);
924 * Return true if firmware of current driver matches the running firmware.
927 bfa_ioc_firmware_lock(struct bfa_ioc_s
*ioc
)
929 enum bfi_ioc_state ioc_fwstate
;
931 struct bfi_ioc_image_hdr_s fwhdr
;
934 * Firmware match check is relevant only for CNA.
940 * If bios boot (flash based) -- do not increment usage count
942 if (bfa_ioc_fwimg_get_size(ioc
) < BFA_IOC_FWIMG_MINSZ
)
945 bfa_ioc_usage_sem_get(ioc
);
946 usecnt
= bfa_reg_read(ioc
->ioc_regs
.ioc_usage_reg
);
949 * If usage count is 0, always return TRUE.
952 bfa_reg_write(ioc
->ioc_regs
.ioc_usage_reg
, 1);
953 bfa_ioc_usage_sem_release(ioc
);
954 bfa_trc(ioc
, usecnt
);
958 ioc_fwstate
= bfa_reg_read(ioc
->ioc_regs
.ioc_fwstate
);
959 bfa_trc(ioc
, ioc_fwstate
);
962 * Use count cannot be non-zero and chip in uninitialized state.
964 bfa_assert(ioc_fwstate
!= BFI_IOC_UNINIT
);
967 * Check if another driver with a different firmware is active
969 bfa_ioc_fwver_get(ioc
, &fwhdr
);
970 if (!bfa_ioc_fwver_cmp(ioc
, &fwhdr
)) {
971 bfa_ioc_usage_sem_release(ioc
);
972 bfa_trc(ioc
, usecnt
);
977 * Same firmware version. Increment the reference count.
980 bfa_reg_write(ioc
->ioc_regs
.ioc_usage_reg
, usecnt
);
981 bfa_ioc_usage_sem_release(ioc
);
982 bfa_trc(ioc
, usecnt
);
987 bfa_ioc_firmware_unlock(struct bfa_ioc_s
*ioc
)
992 * Firmware lock is relevant only for CNA.
993 * If bios boot (flash based) -- do not decrement usage count
995 if (!ioc
->cna
|| (bfa_ioc_fwimg_get_size(ioc
) < BFA_IOC_FWIMG_MINSZ
))
999 * decrement usage count
1001 bfa_ioc_usage_sem_get(ioc
);
1002 usecnt
= bfa_reg_read(ioc
->ioc_regs
.ioc_usage_reg
);
1003 bfa_assert(usecnt
> 0);
1006 bfa_reg_write(ioc
->ioc_regs
.ioc_usage_reg
, usecnt
);
1007 bfa_trc(ioc
, usecnt
);
1009 bfa_ioc_usage_sem_release(ioc
);
1013 * Conditionally flush any pending message from firmware at start.
1016 bfa_ioc_msgflush(struct bfa_ioc_s
*ioc
)
1020 r32
= bfa_reg_read(ioc
->ioc_regs
.lpu_mbox_cmd
);
1022 bfa_reg_write(ioc
->ioc_regs
.lpu_mbox_cmd
, 1);
1027 bfa_ioc_hwinit(struct bfa_ioc_s
*ioc
, bfa_boolean_t force
)
1029 enum bfi_ioc_state ioc_fwstate
;
1030 bfa_boolean_t fwvalid
;
1032 ioc_fwstate
= bfa_reg_read(ioc
->ioc_regs
.ioc_fwstate
);
1035 ioc_fwstate
= BFI_IOC_UNINIT
;
1037 bfa_trc(ioc
, ioc_fwstate
);
1040 * check if firmware is valid
1042 fwvalid
= (ioc_fwstate
== BFI_IOC_UNINIT
) ?
1043 BFA_FALSE
: bfa_ioc_fwver_valid(ioc
);
1046 bfa_ioc_boot(ioc
, BFI_BOOT_TYPE_NORMAL
, ioc
->pcidev
.device_id
);
1051 * If hardware initialization is in progress (initialized by other IOC),
1052 * just wait for an initialization completion interrupt.
1054 if (ioc_fwstate
== BFI_IOC_INITING
) {
1055 bfa_trc(ioc
, ioc_fwstate
);
1056 ioc
->cbfn
->reset_cbfn(ioc
->bfa
);
1061 * If IOC function is disabled and firmware version is same,
1062 * just re-enable IOC.
1064 if (ioc_fwstate
== BFI_IOC_DISABLED
|| ioc_fwstate
== BFI_IOC_OP
) {
1065 bfa_trc(ioc
, ioc_fwstate
);
1068 * When using MSI-X any pending firmware ready event should
1069 * be flushed. Otherwise MSI-X interrupts are not delivered.
1071 bfa_ioc_msgflush(ioc
);
1072 ioc
->cbfn
->reset_cbfn(ioc
->bfa
);
1073 bfa_fsm_send_event(ioc
, IOC_E_FWREADY
);
1078 * Initialize the h/w for any other states.
1080 bfa_ioc_boot(ioc
, BFI_BOOT_TYPE_NORMAL
, ioc
->pcidev
.device_id
);
1084 bfa_ioc_timeout(void *ioc_arg
)
1086 struct bfa_ioc_s
*ioc
= (struct bfa_ioc_s
*)ioc_arg
;
1089 bfa_fsm_send_event(ioc
, IOC_E_TIMEOUT
);
1093 bfa_ioc_mbox_send(struct bfa_ioc_s
*ioc
, void *ioc_msg
, int len
)
1095 u32
*msgp
= (u32
*) ioc_msg
;
1098 bfa_trc(ioc
, msgp
[0]);
1101 bfa_assert(len
<= BFI_IOC_MSGLEN_MAX
);
1104 * first write msg to mailbox registers
1106 for (i
= 0; i
< len
/ sizeof(u32
); i
++)
1107 bfa_reg_write(ioc
->ioc_regs
.hfn_mbox
+ i
* sizeof(u32
),
1108 bfa_os_wtole(msgp
[i
]));
1110 for (; i
< BFI_IOC_MSGLEN_MAX
/ sizeof(u32
); i
++)
1111 bfa_reg_write(ioc
->ioc_regs
.hfn_mbox
+ i
* sizeof(u32
), 0);
1114 * write 1 to mailbox CMD to trigger LPU event
1116 bfa_reg_write(ioc
->ioc_regs
.hfn_mbox_cmd
, 1);
1117 (void)bfa_reg_read(ioc
->ioc_regs
.hfn_mbox_cmd
);
1121 bfa_ioc_send_enable(struct bfa_ioc_s
*ioc
)
1123 struct bfi_ioc_ctrl_req_s enable_req
;
1125 bfi_h2i_set(enable_req
.mh
, BFI_MC_IOC
, BFI_IOC_H2I_ENABLE_REQ
,
1126 bfa_ioc_portid(ioc
));
1127 enable_req
.ioc_class
= ioc
->ioc_mc
;
1128 bfa_ioc_mbox_send(ioc
, &enable_req
, sizeof(struct bfi_ioc_ctrl_req_s
));
1132 bfa_ioc_send_disable(struct bfa_ioc_s
*ioc
)
1134 struct bfi_ioc_ctrl_req_s disable_req
;
1136 bfi_h2i_set(disable_req
.mh
, BFI_MC_IOC
, BFI_IOC_H2I_DISABLE_REQ
,
1137 bfa_ioc_portid(ioc
));
1138 bfa_ioc_mbox_send(ioc
, &disable_req
, sizeof(struct bfi_ioc_ctrl_req_s
));
1142 bfa_ioc_send_getattr(struct bfa_ioc_s
*ioc
)
1144 struct bfi_ioc_getattr_req_s attr_req
;
1146 bfi_h2i_set(attr_req
.mh
, BFI_MC_IOC
, BFI_IOC_H2I_GETATTR_REQ
,
1147 bfa_ioc_portid(ioc
));
1148 bfa_dma_be_addr_set(attr_req
.attr_addr
, ioc
->attr_dma
.pa
);
1149 bfa_ioc_mbox_send(ioc
, &attr_req
, sizeof(attr_req
));
1153 bfa_ioc_hb_check(void *cbarg
)
1155 struct bfa_ioc_s
*ioc
= cbarg
;
1158 hb_count
= bfa_reg_read(ioc
->ioc_regs
.heartbeat
);
1159 if (ioc
->hb_count
== hb_count
) {
1162 ioc
->hb_count
= hb_count
;
1166 if (ioc
->hb_fail
>= BFA_IOC_HB_FAIL_MAX
) {
1167 bfa_log(ioc
->logm
, BFA_LOG_HAL_HEARTBEAT_FAILURE
, hb_count
);
1169 bfa_ioc_recover(ioc
);
1173 bfa_ioc_mbox_poll(ioc
);
1174 bfa_timer_begin(ioc
->timer_mod
, &ioc
->ioc_timer
, bfa_ioc_hb_check
, ioc
,
1179 bfa_ioc_hb_monitor(struct bfa_ioc_s
*ioc
)
1182 ioc
->hb_count
= bfa_reg_read(ioc
->ioc_regs
.heartbeat
);
1183 bfa_timer_begin(ioc
->timer_mod
, &ioc
->ioc_timer
, bfa_ioc_hb_check
, ioc
,
1188 bfa_ioc_hb_stop(struct bfa_ioc_s
*ioc
)
1190 bfa_timer_stop(&ioc
->ioc_timer
);
1194 * Host to LPU mailbox message addresses
1197 u32 hfn_mbox
, lpu_mbox
, hfn_pgn
;
1198 } iocreg_fnreg
[] = {
1200 HOSTFN0_LPU_MBOX0_0
, LPU_HOSTFN0_MBOX0_0
, HOST_PAGE_NUM_FN0
}, {
1201 HOSTFN1_LPU_MBOX0_8
, LPU_HOSTFN1_MBOX0_8
, HOST_PAGE_NUM_FN1
}, {
1202 HOSTFN2_LPU_MBOX0_0
, LPU_HOSTFN2_MBOX0_0
, HOST_PAGE_NUM_FN2
}, {
1203 HOSTFN3_LPU_MBOX0_8
, LPU_HOSTFN3_MBOX0_8
, HOST_PAGE_NUM_FN3
}
1207 * Host <-> LPU mailbox command/status registers - port 0
1211 } iocreg_mbcmd_p0
[] = {
1213 HOSTFN0_LPU0_MBOX0_CMD_STAT
, LPU0_HOSTFN0_MBOX0_CMD_STAT
}, {
1214 HOSTFN1_LPU0_MBOX0_CMD_STAT
, LPU0_HOSTFN1_MBOX0_CMD_STAT
}, {
1215 HOSTFN2_LPU0_MBOX0_CMD_STAT
, LPU0_HOSTFN2_MBOX0_CMD_STAT
}, {
1216 HOSTFN3_LPU0_MBOX0_CMD_STAT
, LPU0_HOSTFN3_MBOX0_CMD_STAT
}
1220 * Host <-> LPU mailbox command/status registers - port 1
1224 } iocreg_mbcmd_p1
[] = {
1226 HOSTFN0_LPU1_MBOX0_CMD_STAT
, LPU1_HOSTFN0_MBOX0_CMD_STAT
}, {
1227 HOSTFN1_LPU1_MBOX0_CMD_STAT
, LPU1_HOSTFN1_MBOX0_CMD_STAT
}, {
1228 HOSTFN2_LPU1_MBOX0_CMD_STAT
, LPU1_HOSTFN2_MBOX0_CMD_STAT
}, {
1229 HOSTFN3_LPU1_MBOX0_CMD_STAT
, LPU1_HOSTFN3_MBOX0_CMD_STAT
}
1233 * Shared IRQ handling in INTX mode
1237 } iocreg_shirq_next
[] = {
1239 HOSTFN1_INT_STATUS
, HOSTFN1_INT_MSK
}, {
1240 HOSTFN2_INT_STATUS
, HOSTFN2_INT_MSK
}, {
1241 HOSTFN3_INT_STATUS
, HOSTFN3_INT_MSK
}, {
1242 HOSTFN0_INT_STATUS
, HOSTFN0_INT_MSK
},};
1245 bfa_ioc_reg_init(struct bfa_ioc_s
*ioc
)
1248 int pcifn
= bfa_ioc_pcifn(ioc
);
1250 rb
= bfa_ioc_bar0(ioc
);
1252 ioc
->ioc_regs
.hfn_mbox
= rb
+ iocreg_fnreg
[pcifn
].hfn_mbox
;
1253 ioc
->ioc_regs
.lpu_mbox
= rb
+ iocreg_fnreg
[pcifn
].lpu_mbox
;
1254 ioc
->ioc_regs
.host_page_num_fn
= rb
+ iocreg_fnreg
[pcifn
].hfn_pgn
;
1256 if (ioc
->port_id
== 0) {
1257 ioc
->ioc_regs
.heartbeat
= rb
+ BFA_IOC0_HBEAT_REG
;
1258 ioc
->ioc_regs
.ioc_fwstate
= rb
+ BFA_IOC0_STATE_REG
;
1259 ioc
->ioc_regs
.hfn_mbox_cmd
= rb
+ iocreg_mbcmd_p0
[pcifn
].hfn
;
1260 ioc
->ioc_regs
.lpu_mbox_cmd
= rb
+ iocreg_mbcmd_p0
[pcifn
].lpu
;
1261 ioc
->ioc_regs
.ll_halt
= rb
+ FW_INIT_HALT_P0
;
1263 ioc
->ioc_regs
.heartbeat
= (rb
+ BFA_IOC1_HBEAT_REG
);
1264 ioc
->ioc_regs
.ioc_fwstate
= (rb
+ BFA_IOC1_STATE_REG
);
1265 ioc
->ioc_regs
.hfn_mbox_cmd
= rb
+ iocreg_mbcmd_p1
[pcifn
].hfn
;
1266 ioc
->ioc_regs
.lpu_mbox_cmd
= rb
+ iocreg_mbcmd_p1
[pcifn
].lpu
;
1267 ioc
->ioc_regs
.ll_halt
= rb
+ FW_INIT_HALT_P1
;
1271 * Shared IRQ handling in INTX mode
1273 ioc
->ioc_regs
.shirq_isr_next
= rb
+ iocreg_shirq_next
[pcifn
].isr
;
1274 ioc
->ioc_regs
.shirq_msk_next
= rb
+ iocreg_shirq_next
[pcifn
].msk
;
1277 * PSS control registers
1279 ioc
->ioc_regs
.pss_ctl_reg
= (rb
+ PSS_CTL_REG
);
1280 ioc
->ioc_regs
.app_pll_fast_ctl_reg
= (rb
+ APP_PLL_425_CTL_REG
);
1281 ioc
->ioc_regs
.app_pll_slow_ctl_reg
= (rb
+ APP_PLL_312_CTL_REG
);
1284 * IOC semaphore registers and serialization
1286 ioc
->ioc_regs
.ioc_sem_reg
= (rb
+ HOST_SEM0_REG
);
1287 ioc
->ioc_regs
.ioc_usage_sem_reg
= (rb
+ HOST_SEM1_REG
);
1288 ioc
->ioc_regs
.ioc_usage_reg
= (rb
+ BFA_FW_USE_COUNT
);
1291 * sram memory access
1293 ioc
->ioc_regs
.smem_page_start
= (rb
+ PSS_SMEM_PAGE_START
);
1294 ioc
->ioc_regs
.smem_pg0
= BFI_IOC_SMEM_PG0_CB
;
1295 if (ioc
->pcidev
.device_id
== BFA_PCI_DEVICE_ID_CT
)
1296 ioc
->ioc_regs
.smem_pg0
= BFI_IOC_SMEM_PG0_CT
;
1300 * Initiate a full firmware download.
1303 bfa_ioc_download_fw(struct bfa_ioc_s
*ioc
, u32 boot_type
,
1313 * Initialize LMEM first before code download
1315 bfa_ioc_lmem_init(ioc
);
1318 * Flash based firmware boot
1320 bfa_trc(ioc
, bfa_ioc_fwimg_get_size(ioc
));
1321 if (bfa_ioc_fwimg_get_size(ioc
) < BFA_IOC_FWIMG_MINSZ
)
1322 boot_type
= BFI_BOOT_TYPE_FLASH
;
1323 fwimg
= bfa_ioc_fwimg_get_chunk(ioc
, chunkno
);
1324 fwimg
[BFI_BOOT_TYPE_OFF
/ sizeof(u32
)] = bfa_os_swap32(boot_type
);
1325 fwimg
[BFI_BOOT_PARAM_OFF
/ sizeof(u32
)] =
1326 bfa_os_swap32(boot_param
);
1328 pgnum
= bfa_ioc_smem_pgnum(ioc
, loff
);
1329 pgoff
= bfa_ioc_smem_pgoff(ioc
, loff
);
1331 bfa_reg_write(ioc
->ioc_regs
.host_page_num_fn
, pgnum
);
1333 for (i
= 0; i
< bfa_ioc_fwimg_get_size(ioc
); i
++) {
1335 if (BFA_FLASH_CHUNK_NO(i
) != chunkno
) {
1336 chunkno
= BFA_FLASH_CHUNK_NO(i
);
1337 fwimg
= bfa_ioc_fwimg_get_chunk(ioc
,
1338 BFA_FLASH_CHUNK_ADDR(chunkno
));
1344 bfa_mem_write(ioc
->ioc_regs
.smem_page_start
, loff
,
1345 fwimg
[BFA_FLASH_OFFSET_IN_CHUNK(i
)]);
1347 loff
+= sizeof(u32
);
1350 * handle page offset wrap around
1352 loff
= PSS_SMEM_PGOFF(loff
);
1355 bfa_reg_write(ioc
->ioc_regs
.host_page_num_fn
, pgnum
);
1359 bfa_reg_write(ioc
->ioc_regs
.host_page_num_fn
,
1360 bfa_ioc_smem_pgnum(ioc
, 0));
1364 bfa_ioc_reset(struct bfa_ioc_s
*ioc
, bfa_boolean_t force
)
1366 bfa_ioc_hwinit(ioc
, force
);
1370 * Update BFA configuration from firmware configuration.
1373 bfa_ioc_getattr_reply(struct bfa_ioc_s
*ioc
)
1375 struct bfi_ioc_attr_s
*attr
= ioc
->attr
;
1377 attr
->adapter_prop
= bfa_os_ntohl(attr
->adapter_prop
);
1378 attr
->maxfrsize
= bfa_os_ntohs(attr
->maxfrsize
);
1380 bfa_fsm_send_event(ioc
, IOC_E_FWRSP_GETATTR
);
1384 * Attach time initialization of mbox logic.
1387 bfa_ioc_mbox_attach(struct bfa_ioc_s
*ioc
)
1389 struct bfa_ioc_mbox_mod_s
*mod
= &ioc
->mbox_mod
;
1392 INIT_LIST_HEAD(&mod
->cmd_q
);
1393 for (mc
= 0; mc
< BFI_MC_MAX
; mc
++) {
1394 mod
->mbhdlr
[mc
].cbfn
= NULL
;
1395 mod
->mbhdlr
[mc
].cbarg
= ioc
->bfa
;
1400 * Mbox poll timer -- restarts any pending mailbox requests.
1403 bfa_ioc_mbox_poll(struct bfa_ioc_s
*ioc
)
1405 struct bfa_ioc_mbox_mod_s
*mod
= &ioc
->mbox_mod
;
1406 struct bfa_mbox_cmd_s
*cmd
;
1410 * If no command pending, do nothing
1412 if (list_empty(&mod
->cmd_q
))
1416 * If previous command is not yet fetched by firmware, do nothing
1418 stat
= bfa_reg_read(ioc
->ioc_regs
.hfn_mbox_cmd
);
1423 * Enqueue command to firmware.
1425 bfa_q_deq(&mod
->cmd_q
, &cmd
);
1426 bfa_ioc_mbox_send(ioc
, cmd
->msg
, sizeof(cmd
->msg
));
1430 * Cleanup any pending requests.
1433 bfa_ioc_mbox_hbfail(struct bfa_ioc_s
*ioc
)
1435 struct bfa_ioc_mbox_mod_s
*mod
= &ioc
->mbox_mod
;
1436 struct bfa_mbox_cmd_s
*cmd
;
1438 while (!list_empty(&mod
->cmd_q
))
1439 bfa_q_deq(&mod
->cmd_q
, &cmd
);
1443 * Initialize IOC to port mapping.
1446 #define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8)
1448 bfa_ioc_map_port(struct bfa_ioc_s
*ioc
)
1450 bfa_os_addr_t rb
= ioc
->pcidev
.pci_bar_kva
;
1454 * For crossbow, port id is same as pci function.
1456 if (ioc
->pcidev
.device_id
!= BFA_PCI_DEVICE_ID_CT
) {
1457 ioc
->port_id
= bfa_ioc_pcifn(ioc
);
1462 * For catapult, base port id on personality register and IOC type
1464 r32
= bfa_reg_read(rb
+ FNC_PERS_REG
);
1465 r32
>>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc
));
1466 ioc
->port_id
= (r32
& __F0_PORT_MAP_MK
) >> __F0_PORT_MAP_SH
;
1468 bfa_trc(ioc
, bfa_ioc_pcifn(ioc
));
1469 bfa_trc(ioc
, ioc
->port_id
);
1479 * Set interrupt mode for a function: INTX or MSIX
1482 bfa_ioc_isr_mode_set(struct bfa_ioc_s
*ioc
, bfa_boolean_t msix
)
1484 bfa_os_addr_t rb
= ioc
->pcidev
.pci_bar_kva
;
1487 r32
= bfa_reg_read(rb
+ FNC_PERS_REG
);
1490 mode
= (r32
>> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc
))) &
1494 * If already in desired mode, do not change anything
1500 mode
= __F0_INTX_STATUS_MSIX
;
1502 mode
= __F0_INTX_STATUS_INTA
;
1504 r32
&= ~(__F0_INTX_STATUS
<< FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc
)));
1505 r32
|= (mode
<< FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc
)));
1508 bfa_reg_write(rb
+ FNC_PERS_REG
, r32
);
1512 bfa_ioc_pll_init(struct bfa_ioc_s
*ioc
)
1514 bfa_os_addr_t rb
= ioc
->pcidev
.pci_bar_kva
;
1515 u32 pll_sclk
, pll_fclk
, r32
;
1517 if (ioc
->pcidev
.device_id
== BFA_PCI_DEVICE_ID_CT
) {
1519 __APP_PLL_312_ENABLE
| __APP_PLL_312_LRESETN
|
1520 __APP_PLL_312_RSEL200500
| __APP_PLL_312_P0_1(0U) |
1521 __APP_PLL_312_JITLMT0_1(3U) |
1522 __APP_PLL_312_CNTLMT0_1(1U);
1524 __APP_PLL_425_ENABLE
| __APP_PLL_425_LRESETN
|
1525 __APP_PLL_425_RSEL200500
| __APP_PLL_425_P0_1(0U) |
1526 __APP_PLL_425_JITLMT0_1(3U) |
1527 __APP_PLL_425_CNTLMT0_1(1U);
1530 * For catapult, choose operational mode FC/FCoE
1533 bfa_reg_write((rb
+ OP_MODE
), 0);
1534 bfa_reg_write((rb
+ ETH_MAC_SER_REG
),
1535 __APP_EMS_CMLCKSEL
| __APP_EMS_REFCKBUFEN2
1536 | __APP_EMS_CHANNEL_SEL
);
1538 ioc
->pllinit
= BFA_TRUE
;
1539 bfa_reg_write((rb
+ OP_MODE
), __GLOBAL_FCOE_MODE
);
1540 bfa_reg_write((rb
+ ETH_MAC_SER_REG
),
1541 __APP_EMS_REFCKBUFEN1
);
1545 __APP_PLL_312_ENABLE
| __APP_PLL_312_LRESETN
|
1546 __APP_PLL_312_P0_1(3U) | __APP_PLL_312_JITLMT0_1(3U) |
1547 __APP_PLL_312_CNTLMT0_1(3U);
1549 __APP_PLL_425_ENABLE
| __APP_PLL_425_LRESETN
|
1550 __APP_PLL_425_RSEL200500
| __APP_PLL_425_P0_1(3U) |
1551 __APP_PLL_425_JITLMT0_1(3U) |
1552 __APP_PLL_425_CNTLMT0_1(3U);
1555 bfa_reg_write((rb
+ BFA_IOC0_STATE_REG
), BFI_IOC_UNINIT
);
1556 bfa_reg_write((rb
+ BFA_IOC1_STATE_REG
), BFI_IOC_UNINIT
);
1558 bfa_reg_write((rb
+ HOSTFN0_INT_MSK
), 0xffffffffU
);
1559 bfa_reg_write((rb
+ HOSTFN1_INT_MSK
), 0xffffffffU
);
1560 bfa_reg_write((rb
+ HOSTFN0_INT_STATUS
), 0xffffffffU
);
1561 bfa_reg_write((rb
+ HOSTFN1_INT_STATUS
), 0xffffffffU
);
1562 bfa_reg_write((rb
+ HOSTFN0_INT_MSK
), 0xffffffffU
);
1563 bfa_reg_write((rb
+ HOSTFN1_INT_MSK
), 0xffffffffU
);
1565 bfa_reg_write(ioc
->ioc_regs
.app_pll_slow_ctl_reg
,
1566 __APP_PLL_312_LOGIC_SOFT_RESET
);
1567 bfa_reg_write(ioc
->ioc_regs
.app_pll_slow_ctl_reg
,
1568 __APP_PLL_312_BYPASS
| __APP_PLL_312_LOGIC_SOFT_RESET
);
1569 bfa_reg_write(ioc
->ioc_regs
.app_pll_fast_ctl_reg
,
1570 __APP_PLL_425_LOGIC_SOFT_RESET
);
1571 bfa_reg_write(ioc
->ioc_regs
.app_pll_fast_ctl_reg
,
1572 __APP_PLL_425_BYPASS
| __APP_PLL_425_LOGIC_SOFT_RESET
);
1574 bfa_reg_write(ioc
->ioc_regs
.app_pll_slow_ctl_reg
,
1575 __APP_PLL_312_LOGIC_SOFT_RESET
);
1576 bfa_reg_write(ioc
->ioc_regs
.app_pll_fast_ctl_reg
,
1577 __APP_PLL_425_LOGIC_SOFT_RESET
);
1579 bfa_reg_write(ioc
->ioc_regs
.app_pll_slow_ctl_reg
,
1580 pll_sclk
| __APP_PLL_312_LOGIC_SOFT_RESET
);
1581 bfa_reg_write(ioc
->ioc_regs
.app_pll_fast_ctl_reg
,
1582 pll_fclk
| __APP_PLL_425_LOGIC_SOFT_RESET
);
1585 * Wait for PLLs to lock.
1587 bfa_os_udelay(2000);
1588 bfa_reg_write((rb
+ HOSTFN0_INT_STATUS
), 0xffffffffU
);
1589 bfa_reg_write((rb
+ HOSTFN1_INT_STATUS
), 0xffffffffU
);
1591 bfa_reg_write(ioc
->ioc_regs
.app_pll_slow_ctl_reg
, pll_sclk
);
1592 bfa_reg_write(ioc
->ioc_regs
.app_pll_fast_ctl_reg
, pll_fclk
);
1594 if (ioc
->pcidev
.device_id
== BFA_PCI_DEVICE_ID_CT
) {
1595 bfa_reg_write((rb
+ MBIST_CTL_REG
), __EDRAM_BISTR_START
);
1596 bfa_os_udelay(1000);
1597 r32
= bfa_reg_read((rb
+ MBIST_STAT_REG
));
1601 return BFA_STATUS_OK
;
1605 * Interface used by diag module to do firmware boot with memory test
1606 * as the entry vector.
1609 bfa_ioc_boot(struct bfa_ioc_s
*ioc
, u32 boot_type
, u32 boot_param
)
1613 bfa_ioc_stats(ioc
, ioc_boots
);
1615 if (bfa_ioc_pll_init(ioc
) != BFA_STATUS_OK
)
1619 * Initialize IOC state of all functions on a chip reset.
1621 rb
= ioc
->pcidev
.pci_bar_kva
;
1622 if (boot_param
== BFI_BOOT_TYPE_MEMTEST
) {
1623 bfa_reg_write((rb
+ BFA_IOC0_STATE_REG
), BFI_IOC_MEMTEST
);
1624 bfa_reg_write((rb
+ BFA_IOC1_STATE_REG
), BFI_IOC_MEMTEST
);
1626 bfa_reg_write((rb
+ BFA_IOC0_STATE_REG
), BFI_IOC_INITING
);
1627 bfa_reg_write((rb
+ BFA_IOC1_STATE_REG
), BFI_IOC_INITING
);
1630 bfa_ioc_download_fw(ioc
, boot_type
, boot_param
);
1633 * Enable interrupts just before starting LPU
1635 ioc
->cbfn
->reset_cbfn(ioc
->bfa
);
1636 bfa_ioc_lpu_start(ioc
);
1640 * Enable/disable IOC failure auto recovery.
1643 bfa_ioc_auto_recover(bfa_boolean_t auto_recover
)
1645 bfa_auto_recover
= BFA_FALSE
;
1650 bfa_ioc_is_operational(struct bfa_ioc_s
*ioc
)
1652 return bfa_fsm_cmp_state(ioc
, bfa_ioc_sm_op
);
1656 bfa_ioc_msgget(struct bfa_ioc_s
*ioc
, void *mbmsg
)
1665 for (i
= 0; i
< (sizeof(union bfi_ioc_i2h_msg_u
) / sizeof(u32
));
1667 r32
= bfa_reg_read(ioc
->ioc_regs
.lpu_mbox
+
1669 msgp
[i
] = bfa_os_htonl(r32
);
1673 * turn off mailbox interrupt by clearing mailbox status
1675 bfa_reg_write(ioc
->ioc_regs
.lpu_mbox_cmd
, 1);
1676 bfa_reg_read(ioc
->ioc_regs
.lpu_mbox_cmd
);
1680 bfa_ioc_isr(struct bfa_ioc_s
*ioc
, struct bfi_mbmsg_s
*m
)
1682 union bfi_ioc_i2h_msg_u
*msg
;
1684 msg
= (union bfi_ioc_i2h_msg_u
*)m
;
1686 bfa_ioc_stats(ioc
, ioc_isrs
);
1688 switch (msg
->mh
.msg_id
) {
1689 case BFI_IOC_I2H_HBEAT
:
1692 case BFI_IOC_I2H_READY_EVENT
:
1693 bfa_fsm_send_event(ioc
, IOC_E_FWREADY
);
1696 case BFI_IOC_I2H_ENABLE_REPLY
:
1697 bfa_fsm_send_event(ioc
, IOC_E_FWRSP_ENABLE
);
1700 case BFI_IOC_I2H_DISABLE_REPLY
:
1701 bfa_fsm_send_event(ioc
, IOC_E_FWRSP_DISABLE
);
1704 case BFI_IOC_I2H_GETATTR_REPLY
:
1705 bfa_ioc_getattr_reply(ioc
);
1709 bfa_trc(ioc
, msg
->mh
.msg_id
);
1715 * IOC attach time initialization and setup.
1717 * @param[in] ioc memory for IOC
1718 * @param[in] bfa driver instance structure
1719 * @param[in] trcmod kernel trace module
1720 * @param[in] aen kernel aen event module
1721 * @param[in] logm kernel logging module
1724 bfa_ioc_attach(struct bfa_ioc_s
*ioc
, void *bfa
, struct bfa_ioc_cbfn_s
*cbfn
,
1725 struct bfa_timer_mod_s
*timer_mod
, struct bfa_trc_mod_s
*trcmod
,
1726 struct bfa_aen_s
*aen
, struct bfa_log_mod_s
*logm
)
1730 ioc
->timer_mod
= timer_mod
;
1731 ioc
->trcmod
= trcmod
;
1734 ioc
->fcmode
= BFA_FALSE
;
1735 ioc
->pllinit
= BFA_FALSE
;
1736 ioc
->dbg_fwsave_once
= BFA_TRUE
;
1738 bfa_ioc_mbox_attach(ioc
);
1739 INIT_LIST_HEAD(&ioc
->hb_notify_q
);
1741 bfa_fsm_set_state(ioc
, bfa_ioc_sm_reset
);
1745 * Driver detach time IOC cleanup.
1748 bfa_ioc_detach(struct bfa_ioc_s
*ioc
)
1750 bfa_fsm_send_event(ioc
, IOC_E_DETACH
);
1754 * Setup IOC PCI properties.
1756 * @param[in] pcidev PCI device information for this IOC
1759 bfa_ioc_pci_init(struct bfa_ioc_s
*ioc
, struct bfa_pcidev_s
*pcidev
,
1763 ioc
->pcidev
= *pcidev
;
1764 ioc
->ctdev
= (ioc
->pcidev
.device_id
== BFA_PCI_DEVICE_ID_CT
);
1765 ioc
->cna
= ioc
->ctdev
&& !ioc
->fcmode
;
1767 bfa_ioc_map_port(ioc
);
1768 bfa_ioc_reg_init(ioc
);
1772 * Initialize IOC dma memory
1774 * @param[in] dm_kva kernel virtual address of IOC dma memory
1775 * @param[in] dm_pa physical address of IOC dma memory
1778 bfa_ioc_mem_claim(struct bfa_ioc_s
*ioc
, u8
*dm_kva
, u64 dm_pa
)
1781 * dma memory for firmware attribute
1783 ioc
->attr_dma
.kva
= dm_kva
;
1784 ioc
->attr_dma
.pa
= dm_pa
;
1785 ioc
->attr
= (struct bfi_ioc_attr_s
*)dm_kva
;
1789 * Return size of dma memory required.
1792 bfa_ioc_meminfo(void)
1794 return BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s
), BFA_DMA_ALIGN_SZ
);
1798 bfa_ioc_enable(struct bfa_ioc_s
*ioc
)
1800 bfa_ioc_stats(ioc
, ioc_enables
);
1801 ioc
->dbg_fwsave_once
= BFA_TRUE
;
1803 bfa_fsm_send_event(ioc
, IOC_E_ENABLE
);
1807 bfa_ioc_disable(struct bfa_ioc_s
*ioc
)
1809 bfa_ioc_stats(ioc
, ioc_disables
);
1810 bfa_fsm_send_event(ioc
, IOC_E_DISABLE
);
1814 * Returns memory required for saving firmware trace in case of crash.
1815 * Driver must call this interface to allocate memory required for
1816 * automatic saving of firmware trace. Driver should call
1817 * bfa_ioc_debug_memclaim() right after bfa_ioc_attach() to setup this
1821 bfa_ioc_debug_trcsz(bfa_boolean_t auto_recover
)
1823 return (auto_recover
) ? BFA_DBG_FWTRC_LEN
: 0;
1827 * Initialize memory for saving firmware trace. Driver must initialize
1828 * trace memory before call bfa_ioc_enable().
1831 bfa_ioc_debug_memclaim(struct bfa_ioc_s
*ioc
, void *dbg_fwsave
)
1833 bfa_assert(ioc
->auto_recover
);
1834 ioc
->dbg_fwsave
= dbg_fwsave
;
1835 ioc
->dbg_fwsave_len
= bfa_ioc_debug_trcsz(ioc
->auto_recover
);
1839 bfa_ioc_smem_pgnum(struct bfa_ioc_s
*ioc
, u32 fmaddr
)
1841 return PSS_SMEM_PGNUM(ioc
->ioc_regs
.smem_pg0
, fmaddr
);
1845 bfa_ioc_smem_pgoff(struct bfa_ioc_s
*ioc
, u32 fmaddr
)
1847 return PSS_SMEM_PGOFF(fmaddr
);
1851 * Register mailbox message handler functions
1853 * @param[in] ioc IOC instance
1854 * @param[in] mcfuncs message class handler functions
1857 bfa_ioc_mbox_register(struct bfa_ioc_s
*ioc
, bfa_ioc_mbox_mcfunc_t
*mcfuncs
)
1859 struct bfa_ioc_mbox_mod_s
*mod
= &ioc
->mbox_mod
;
1862 for (mc
= 0; mc
< BFI_MC_MAX
; mc
++)
1863 mod
->mbhdlr
[mc
].cbfn
= mcfuncs
[mc
];
1867 * Register mailbox message handler function, to be called by common modules
1870 bfa_ioc_mbox_regisr(struct bfa_ioc_s
*ioc
, enum bfi_mclass mc
,
1871 bfa_ioc_mbox_mcfunc_t cbfn
, void *cbarg
)
1873 struct bfa_ioc_mbox_mod_s
*mod
= &ioc
->mbox_mod
;
1875 mod
->mbhdlr
[mc
].cbfn
= cbfn
;
1876 mod
->mbhdlr
[mc
].cbarg
= cbarg
;
1880 * Queue a mailbox command request to firmware. Waits if mailbox is busy.
1881 * Responsibility of caller to serialize
1883 * @param[in] ioc IOC instance
1884 * @param[i] cmd Mailbox command
1887 bfa_ioc_mbox_queue(struct bfa_ioc_s
*ioc
, struct bfa_mbox_cmd_s
*cmd
)
1889 struct bfa_ioc_mbox_mod_s
*mod
= &ioc
->mbox_mod
;
1893 * If a previous command is pending, queue new command
1895 if (!list_empty(&mod
->cmd_q
)) {
1896 list_add_tail(&cmd
->qe
, &mod
->cmd_q
);
1901 * If mailbox is busy, queue command for poll timer
1903 stat
= bfa_reg_read(ioc
->ioc_regs
.hfn_mbox_cmd
);
1905 list_add_tail(&cmd
->qe
, &mod
->cmd_q
);
1910 * mailbox is free -- queue command to firmware
1912 bfa_ioc_mbox_send(ioc
, cmd
->msg
, sizeof(cmd
->msg
));
1916 * Handle mailbox interrupts
1919 bfa_ioc_mbox_isr(struct bfa_ioc_s
*ioc
)
1921 struct bfa_ioc_mbox_mod_s
*mod
= &ioc
->mbox_mod
;
1922 struct bfi_mbmsg_s m
;
1925 bfa_ioc_msgget(ioc
, &m
);
1928 * Treat IOC message class as special.
1930 mc
= m
.mh
.msg_class
;
1931 if (mc
== BFI_MC_IOC
) {
1932 bfa_ioc_isr(ioc
, &m
);
1936 if ((mc
> BFI_MC_MAX
) || (mod
->mbhdlr
[mc
].cbfn
== NULL
))
1939 mod
->mbhdlr
[mc
].cbfn(mod
->mbhdlr
[mc
].cbarg
, &m
);
1943 bfa_ioc_error_isr(struct bfa_ioc_s
*ioc
)
1945 bfa_fsm_send_event(ioc
, IOC_E_HWERROR
);
1948 #ifndef BFA_BIOS_BUILD
1951 * return true if IOC is disabled
1954 bfa_ioc_is_disabled(struct bfa_ioc_s
*ioc
)
1956 return (bfa_fsm_cmp_state(ioc
, bfa_ioc_sm_disabling
)
1957 || bfa_fsm_cmp_state(ioc
, bfa_ioc_sm_disabled
));
1961 * return true if IOC firmware is different.
1964 bfa_ioc_fw_mismatch(struct bfa_ioc_s
*ioc
)
1966 return (bfa_fsm_cmp_state(ioc
, bfa_ioc_sm_reset
)
1967 || bfa_fsm_cmp_state(ioc
, bfa_ioc_sm_fwcheck
)
1968 || bfa_fsm_cmp_state(ioc
, bfa_ioc_sm_mismatch
));
1971 #define bfa_ioc_state_disabled(__sm) \
1972 (((__sm) == BFI_IOC_UNINIT) || \
1973 ((__sm) == BFI_IOC_INITING) || \
1974 ((__sm) == BFI_IOC_HWINIT) || \
1975 ((__sm) == BFI_IOC_DISABLED) || \
1976 ((__sm) == BFI_IOC_HBFAIL) || \
1977 ((__sm) == BFI_IOC_CFG_DISABLED))
1980 * Check if adapter is disabled -- both IOCs should be in a disabled
1984 bfa_ioc_adapter_is_disabled(struct bfa_ioc_s
*ioc
)
1987 bfa_os_addr_t rb
= ioc
->pcidev
.pci_bar_kva
;
1989 if (!bfa_fsm_cmp_state(ioc
, bfa_ioc_sm_disabled
))
1992 ioc_state
= bfa_reg_read(rb
+ BFA_IOC0_STATE_REG
);
1993 if (!bfa_ioc_state_disabled(ioc_state
))
1996 ioc_state
= bfa_reg_read(rb
+ BFA_IOC1_STATE_REG
);
1997 if (!bfa_ioc_state_disabled(ioc_state
))
2004 * Add to IOC heartbeat failure notification queue. To be used by common
2008 bfa_ioc_hbfail_register(struct bfa_ioc_s
*ioc
,
2009 struct bfa_ioc_hbfail_notify_s
*notify
)
2011 list_add_tail(¬ify
->qe
, &ioc
->hb_notify_q
);
2014 #define BFA_MFG_NAME "Brocade"
2016 bfa_ioc_get_adapter_attr(struct bfa_ioc_s
*ioc
,
2017 struct bfa_adapter_attr_s
*ad_attr
)
2019 struct bfi_ioc_attr_s
*ioc_attr
;
2020 char model
[BFA_ADAPTER_MODEL_NAME_LEN
];
2022 ioc_attr
= ioc
->attr
;
2023 bfa_os_memcpy((void *)&ad_attr
->serial_num
,
2024 (void *)ioc_attr
->brcd_serialnum
,
2025 BFA_ADAPTER_SERIAL_NUM_LEN
);
2027 bfa_os_memcpy(&ad_attr
->fw_ver
, ioc_attr
->fw_version
, BFA_VERSION_LEN
);
2028 bfa_os_memcpy(&ad_attr
->optrom_ver
, ioc_attr
->optrom_version
,
2030 bfa_os_memcpy(&ad_attr
->manufacturer
, BFA_MFG_NAME
,
2031 BFA_ADAPTER_MFG_NAME_LEN
);
2032 bfa_os_memcpy(&ad_attr
->vpd
, &ioc_attr
->vpd
,
2033 sizeof(struct bfa_mfg_vpd_s
));
2035 ad_attr
->nports
= BFI_ADAPTER_GETP(NPORTS
, ioc_attr
->adapter_prop
);
2036 ad_attr
->max_speed
= BFI_ADAPTER_GETP(SPEED
, ioc_attr
->adapter_prop
);
2041 if (BFI_ADAPTER_GETP(SPEED
, ioc_attr
->adapter_prop
) == 10) {
2042 strcpy(model
, "BR-10?0");
2043 model
[5] = '0' + ad_attr
->nports
;
2045 strcpy(model
, "Brocade-??5");
2047 '0' + BFI_ADAPTER_GETP(SPEED
, ioc_attr
->adapter_prop
);
2048 model
[9] = '0' + ad_attr
->nports
;
2051 if (BFI_ADAPTER_IS_SPECIAL(ioc_attr
->adapter_prop
))
2052 ad_attr
->prototype
= 1;
2054 ad_attr
->prototype
= 0;
2056 bfa_os_memcpy(&ad_attr
->model
, model
, BFA_ADAPTER_MODEL_NAME_LEN
);
2057 bfa_os_memcpy(&ad_attr
->model_descr
, &ad_attr
->model
,
2058 BFA_ADAPTER_MODEL_NAME_LEN
);
2060 ad_attr
->pwwn
= bfa_ioc_get_pwwn(ioc
);
2061 ad_attr
->mac
= bfa_ioc_get_mac(ioc
);
2063 ad_attr
->pcie_gen
= ioc_attr
->pcie_gen
;
2064 ad_attr
->pcie_lanes
= ioc_attr
->pcie_lanes
;
2065 ad_attr
->pcie_lanes_orig
= ioc_attr
->pcie_lanes_orig
;
2066 ad_attr
->asic_rev
= ioc_attr
->asic_rev
;
2067 ad_attr
->hw_ver
[0] = 'R';
2068 ad_attr
->hw_ver
[1] = 'e';
2069 ad_attr
->hw_ver
[2] = 'v';
2070 ad_attr
->hw_ver
[3] = '-';
2071 ad_attr
->hw_ver
[4] = ioc_attr
->asic_rev
;
2072 ad_attr
->hw_ver
[5] = '\0';
2074 ad_attr
->cna_capable
= ioc
->cna
;
2078 bfa_ioc_get_attr(struct bfa_ioc_s
*ioc
, struct bfa_ioc_attr_s
*ioc_attr
)
2080 bfa_os_memset((void *)ioc_attr
, 0, sizeof(struct bfa_ioc_attr_s
));
2082 ioc_attr
->state
= bfa_sm_to_state(ioc_sm_table
, ioc
->fsm
);
2083 ioc_attr
->port_id
= ioc
->port_id
;
2086 ioc_attr
->ioc_type
= BFA_IOC_TYPE_FC
;
2087 else if (ioc
->ioc_mc
== BFI_MC_IOCFC
)
2088 ioc_attr
->ioc_type
= BFA_IOC_TYPE_FCoE
;
2089 else if (ioc
->ioc_mc
== BFI_MC_LL
)
2090 ioc_attr
->ioc_type
= BFA_IOC_TYPE_LL
;
2092 bfa_ioc_get_adapter_attr(ioc
, &ioc_attr
->adapter_attr
);
2094 ioc_attr
->pci_attr
.device_id
= ioc
->pcidev
.device_id
;
2095 ioc_attr
->pci_attr
.pcifn
= ioc
->pcidev
.pci_func
;
2096 ioc_attr
->pci_attr
.chip_rev
[0] = 'R';
2097 ioc_attr
->pci_attr
.chip_rev
[1] = 'e';
2098 ioc_attr
->pci_attr
.chip_rev
[2] = 'v';
2099 ioc_attr
->pci_attr
.chip_rev
[3] = '-';
2100 ioc_attr
->pci_attr
.chip_rev
[4] = ioc_attr
->adapter_attr
.asic_rev
;
2101 ioc_attr
->pci_attr
.chip_rev
[5] = '\0';
2108 bfa_ioc_get_pwwn(struct bfa_ioc_s
*ioc
)
2112 u8 byte
[sizeof(wwn_t
)];
2116 w
.wwn
= ioc
->attr
->mfg_wwn
;
2118 if (bfa_ioc_portid(ioc
) == 1)
2125 bfa_ioc_get_nwwn(struct bfa_ioc_s
*ioc
)
2129 u8 byte
[sizeof(wwn_t
)];
2133 w
.wwn
= ioc
->attr
->mfg_wwn
;
2135 if (bfa_ioc_portid(ioc
) == 1)
2144 bfa_ioc_get_wwn_naa5(struct bfa_ioc_s
*ioc
, u16 inst
)
2148 u8 byte
[sizeof(wwn_t
)];
2154 w
.wwn
= ioc
->attr
->mfg_wwn
;
2155 w5
.byte
[0] = 0x50 | w
.byte
[2] >> 4;
2156 w5
.byte
[1] = w
.byte
[2] << 4 | w
.byte
[3] >> 4;
2157 w5
.byte
[2] = w
.byte
[3] << 4 | w
.byte
[4] >> 4;
2158 w5
.byte
[3] = w
.byte
[4] << 4 | w
.byte
[5] >> 4;
2159 w5
.byte
[4] = w
.byte
[5] << 4 | w
.byte
[6] >> 4;
2160 w5
.byte
[5] = w
.byte
[6] << 4 | w
.byte
[7] >> 4;
2161 w5
.byte
[6] = w
.byte
[7] << 4 | (inst
& 0x0f00) >> 8;
2162 w5
.byte
[7] = (inst
& 0xff);
2168 bfa_ioc_get_adid(struct bfa_ioc_s
*ioc
)
2170 return ioc
->attr
->mfg_wwn
;
2174 bfa_ioc_get_mac(struct bfa_ioc_s
*ioc
)
2178 mac
= ioc
->attr
->mfg_mac
;
2179 mac
.mac
[MAC_ADDRLEN
- 1] += bfa_ioc_pcifn(ioc
);
2185 bfa_ioc_set_fcmode(struct bfa_ioc_s
*ioc
)
2187 ioc
->fcmode
= BFA_TRUE
;
2188 ioc
->port_id
= bfa_ioc_pcifn(ioc
);
2192 bfa_ioc_get_fcmode(struct bfa_ioc_s
*ioc
)
2194 return ioc
->fcmode
|| (ioc
->pcidev
.device_id
!= BFA_PCI_DEVICE_ID_CT
);
2198 * Return true if interrupt should be claimed.
2201 bfa_ioc_intx_claim(struct bfa_ioc_s
*ioc
)
2206 * Always claim if not catapult.
2212 * FALSE if next device is claiming interrupt.
2213 * TRUE if next device is not interrupting or not present.
2215 msk
= bfa_reg_read(ioc
->ioc_regs
.shirq_msk_next
);
2216 isr
= bfa_reg_read(ioc
->ioc_regs
.shirq_isr_next
);
2217 return !(isr
& ~msk
);
2221 * Send AEN notification
2224 bfa_ioc_aen_post(struct bfa_ioc_s
*ioc
, enum bfa_ioc_aen_event event
)
2226 union bfa_aen_data_u aen_data
;
2227 struct bfa_log_mod_s
*logmod
= ioc
->logm
;
2229 struct bfa_ioc_attr_s ioc_attr
;
2232 case BFA_IOC_AEN_HBGOOD
:
2233 bfa_log(logmod
, BFA_AEN_IOC_HBGOOD
, inst_num
);
2235 case BFA_IOC_AEN_HBFAIL
:
2236 bfa_log(logmod
, BFA_AEN_IOC_HBFAIL
, inst_num
);
2238 case BFA_IOC_AEN_ENABLE
:
2239 bfa_log(logmod
, BFA_AEN_IOC_ENABLE
, inst_num
);
2241 case BFA_IOC_AEN_DISABLE
:
2242 bfa_log(logmod
, BFA_AEN_IOC_DISABLE
, inst_num
);
2244 case BFA_IOC_AEN_FWMISMATCH
:
2245 bfa_log(logmod
, BFA_AEN_IOC_FWMISMATCH
, inst_num
);
2251 memset(&aen_data
.ioc
.pwwn
, 0, sizeof(aen_data
.ioc
.pwwn
));
2252 memset(&aen_data
.ioc
.mac
, 0, sizeof(aen_data
.ioc
.mac
));
2253 bfa_ioc_get_attr(ioc
, &ioc_attr
);
2254 switch (ioc_attr
.ioc_type
) {
2255 case BFA_IOC_TYPE_FC
:
2256 aen_data
.ioc
.pwwn
= bfa_ioc_get_pwwn(ioc
);
2258 case BFA_IOC_TYPE_FCoE
:
2259 aen_data
.ioc
.pwwn
= bfa_ioc_get_pwwn(ioc
);
2260 aen_data
.ioc
.mac
= bfa_ioc_get_mac(ioc
);
2262 case BFA_IOC_TYPE_LL
:
2263 aen_data
.ioc
.mac
= bfa_ioc_get_mac(ioc
);
2266 bfa_assert(ioc_attr
.ioc_type
== BFA_IOC_TYPE_FC
);
2269 aen_data
.ioc
.ioc_type
= ioc_attr
.ioc_type
;
2273 * Retrieve saved firmware trace from a prior IOC failure.
2276 bfa_ioc_debug_fwsave(struct bfa_ioc_s
*ioc
, void *trcdata
, int *trclen
)
2280 if (ioc
->dbg_fwsave_len
== 0)
2281 return BFA_STATUS_ENOFSAVE
;
2284 if (tlen
> ioc
->dbg_fwsave_len
)
2285 tlen
= ioc
->dbg_fwsave_len
;
2287 bfa_os_memcpy(trcdata
, ioc
->dbg_fwsave
, tlen
);
2289 return BFA_STATUS_OK
;
2293 * Retrieve saved firmware trace from a prior IOC failure.
2296 bfa_ioc_debug_fwtrc(struct bfa_ioc_s
*ioc
, void *trcdata
, int *trclen
)
2299 u32 loff
= BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc
));
2301 u32
*tbuf
= trcdata
, r32
;
2303 bfa_trc(ioc
, *trclen
);
2305 pgnum
= bfa_ioc_smem_pgnum(ioc
, loff
);
2306 loff
= bfa_ioc_smem_pgoff(ioc
, loff
);
2307 bfa_reg_write(ioc
->ioc_regs
.host_page_num_fn
, pgnum
);
2310 if (tlen
> BFA_DBG_FWTRC_LEN
)
2311 tlen
= BFA_DBG_FWTRC_LEN
;
2312 tlen
/= sizeof(u32
);
2316 for (i
= 0; i
< tlen
; i
++) {
2317 r32
= bfa_mem_read(ioc
->ioc_regs
.smem_page_start
, loff
);
2318 tbuf
[i
] = bfa_os_ntohl(r32
);
2319 loff
+= sizeof(u32
);
2322 * handle page offset wrap around
2324 loff
= PSS_SMEM_PGOFF(loff
);
2327 bfa_reg_write(ioc
->ioc_regs
.host_page_num_fn
, pgnum
);
2330 bfa_reg_write(ioc
->ioc_regs
.host_page_num_fn
,
2331 bfa_ioc_smem_pgnum(ioc
, 0));
2332 bfa_trc(ioc
, pgnum
);
2334 *trclen
= tlen
* sizeof(u32
);
2335 return BFA_STATUS_OK
;
2339 * Save firmware trace if configured.
2342 bfa_ioc_debug_save(struct bfa_ioc_s
*ioc
)
2346 if (ioc
->dbg_fwsave_len
) {
2347 tlen
= ioc
->dbg_fwsave_len
;
2348 bfa_ioc_debug_fwtrc(ioc
, ioc
->dbg_fwsave
, &tlen
);
2353 * Firmware failure detected. Start recovery actions.
2356 bfa_ioc_recover(struct bfa_ioc_s
*ioc
)
2358 if (ioc
->dbg_fwsave_once
) {
2359 ioc
->dbg_fwsave_once
= BFA_FALSE
;
2360 bfa_ioc_debug_save(ioc
);
2363 bfa_ioc_stats(ioc
, ioc_hbfails
);
2364 bfa_fsm_send_event(ioc
, IOC_E_HBFAIL
);
2370 bfa_ioc_aen_post(struct bfa_ioc_s
*ioc
, enum bfa_ioc_aen_event event
)
2375 bfa_ioc_recover(struct bfa_ioc_s
*ioc
)