2 * Linux network driver for Brocade Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
23 /* IOC local definitions */
25 #define bfa_ioc_state_disabled(__sm) \
26 (((__sm) == BFI_IOC_UNINIT) || \
27 ((__sm) == BFI_IOC_INITING) || \
28 ((__sm) == BFI_IOC_HWINIT) || \
29 ((__sm) == BFI_IOC_DISABLED) || \
30 ((__sm) == BFI_IOC_FAIL) || \
31 ((__sm) == BFI_IOC_CFG_DISABLED))
33 /* Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. */
35 #define bfa_ioc_firmware_lock(__ioc) \
36 ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
37 #define bfa_ioc_firmware_unlock(__ioc) \
38 ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
39 #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
40 #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
41 #define bfa_ioc_notify_fail(__ioc) \
42 ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
43 #define bfa_ioc_sync_start(__ioc) \
44 ((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
45 #define bfa_ioc_sync_join(__ioc) \
46 ((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
47 #define bfa_ioc_sync_leave(__ioc) \
48 ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
49 #define bfa_ioc_sync_ack(__ioc) \
50 ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
51 #define bfa_ioc_sync_complete(__ioc) \
52 ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
53 #define bfa_ioc_set_cur_ioc_fwstate(__ioc, __fwstate) \
54 ((__ioc)->ioc_hwif->ioc_set_fwstate(__ioc, __fwstate))
55 #define bfa_ioc_get_cur_ioc_fwstate(__ioc) \
56 ((__ioc)->ioc_hwif->ioc_get_fwstate(__ioc))
57 #define bfa_ioc_set_alt_ioc_fwstate(__ioc, __fwstate) \
58 ((__ioc)->ioc_hwif->ioc_set_alt_fwstate(__ioc, __fwstate))
59 #define bfa_ioc_get_alt_ioc_fwstate(__ioc) \
60 ((__ioc)->ioc_hwif->ioc_get_alt_fwstate(__ioc))
62 #define bfa_ioc_mbox_cmd_pending(__ioc) \
63 (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
64 readl((__ioc)->ioc_regs.hfn_mbox_cmd))
66 static bool bfa_nw_auto_recover
= true;
69 * forward declarations
71 static void bfa_ioc_hw_sem_init(struct bfa_ioc
*ioc
);
72 static void bfa_ioc_hw_sem_get(struct bfa_ioc
*ioc
);
73 static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc
*ioc
);
74 static void bfa_ioc_hwinit(struct bfa_ioc
*ioc
, bool force
);
75 static void bfa_ioc_poll_fwinit(struct bfa_ioc
*ioc
);
76 static void bfa_ioc_send_enable(struct bfa_ioc
*ioc
);
77 static void bfa_ioc_send_disable(struct bfa_ioc
*ioc
);
78 static void bfa_ioc_send_getattr(struct bfa_ioc
*ioc
);
79 static void bfa_ioc_hb_monitor(struct bfa_ioc
*ioc
);
80 static void bfa_ioc_hb_stop(struct bfa_ioc
*ioc
);
81 static void bfa_ioc_reset(struct bfa_ioc
*ioc
, bool force
);
82 static void bfa_ioc_mbox_poll(struct bfa_ioc
*ioc
);
83 static void bfa_ioc_mbox_flush(struct bfa_ioc
*ioc
);
84 static void bfa_ioc_recover(struct bfa_ioc
*ioc
);
85 static void bfa_ioc_event_notify(struct bfa_ioc
*, enum bfa_ioc_event
);
86 static void bfa_ioc_disable_comp(struct bfa_ioc
*ioc
);
87 static void bfa_ioc_lpu_stop(struct bfa_ioc
*ioc
);
88 static void bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc
*ioc
);
89 static void bfa_ioc_fail_notify(struct bfa_ioc
*ioc
);
90 static void bfa_ioc_pf_enabled(struct bfa_ioc
*ioc
);
91 static void bfa_ioc_pf_disabled(struct bfa_ioc
*ioc
);
92 static void bfa_ioc_pf_failed(struct bfa_ioc
*ioc
);
93 static void bfa_ioc_pf_hwfailed(struct bfa_ioc
*ioc
);
94 static void bfa_ioc_pf_fwmismatch(struct bfa_ioc
*ioc
);
95 static enum bfa_status
bfa_ioc_boot(struct bfa_ioc
*ioc
,
96 enum bfi_fwboot_type boot_type
, u32 boot_param
);
97 static u32
bfa_ioc_smem_pgnum(struct bfa_ioc
*ioc
, u32 fmaddr
);
98 static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc
*ioc
,
100 static void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc
*ioc
,
102 static void bfa_ioc_get_pci_chip_rev(struct bfa_ioc
*ioc
,
104 static void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc
*ioc
,
106 static void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc
*ioc
,
108 static void bfa_ioc_get_adapter_model(struct bfa_ioc
*ioc
, char *model
);
109 static u64
bfa_ioc_get_pwwn(struct bfa_ioc
*ioc
);
111 /* IOC state machine definitions/declarations */
113 IOC_E_RESET
= 1, /*!< IOC reset request */
114 IOC_E_ENABLE
= 2, /*!< IOC enable request */
115 IOC_E_DISABLE
= 3, /*!< IOC disable request */
116 IOC_E_DETACH
= 4, /*!< driver detach cleanup */
117 IOC_E_ENABLED
= 5, /*!< f/w enabled */
118 IOC_E_FWRSP_GETATTR
= 6, /*!< IOC get attribute response */
119 IOC_E_DISABLED
= 7, /*!< f/w disabled */
120 IOC_E_PFFAILED
= 8, /*!< failure notice by iocpf sm */
121 IOC_E_HBFAIL
= 9, /*!< heartbeat failure */
122 IOC_E_HWERROR
= 10, /*!< hardware error interrupt */
123 IOC_E_TIMEOUT
= 11, /*!< timeout */
124 IOC_E_HWFAILED
= 12, /*!< PCI mapping failure notice */
127 bfa_fsm_state_decl(bfa_ioc
, uninit
, struct bfa_ioc
, enum ioc_event
);
128 bfa_fsm_state_decl(bfa_ioc
, reset
, struct bfa_ioc
, enum ioc_event
);
129 bfa_fsm_state_decl(bfa_ioc
, enabling
, struct bfa_ioc
, enum ioc_event
);
130 bfa_fsm_state_decl(bfa_ioc
, getattr
, struct bfa_ioc
, enum ioc_event
);
131 bfa_fsm_state_decl(bfa_ioc
, op
, struct bfa_ioc
, enum ioc_event
);
132 bfa_fsm_state_decl(bfa_ioc
, fail_retry
, struct bfa_ioc
, enum ioc_event
);
133 bfa_fsm_state_decl(bfa_ioc
, fail
, struct bfa_ioc
, enum ioc_event
);
134 bfa_fsm_state_decl(bfa_ioc
, disabling
, struct bfa_ioc
, enum ioc_event
);
135 bfa_fsm_state_decl(bfa_ioc
, disabled
, struct bfa_ioc
, enum ioc_event
);
136 bfa_fsm_state_decl(bfa_ioc
, hwfail
, struct bfa_ioc
, enum ioc_event
);
138 static struct bfa_sm_table ioc_sm_table
[] = {
139 {BFA_SM(bfa_ioc_sm_uninit
), BFA_IOC_UNINIT
},
140 {BFA_SM(bfa_ioc_sm_reset
), BFA_IOC_RESET
},
141 {BFA_SM(bfa_ioc_sm_enabling
), BFA_IOC_ENABLING
},
142 {BFA_SM(bfa_ioc_sm_getattr
), BFA_IOC_GETATTR
},
143 {BFA_SM(bfa_ioc_sm_op
), BFA_IOC_OPERATIONAL
},
144 {BFA_SM(bfa_ioc_sm_fail_retry
), BFA_IOC_INITFAIL
},
145 {BFA_SM(bfa_ioc_sm_fail
), BFA_IOC_FAIL
},
146 {BFA_SM(bfa_ioc_sm_disabling
), BFA_IOC_DISABLING
},
147 {BFA_SM(bfa_ioc_sm_disabled
), BFA_IOC_DISABLED
},
148 {BFA_SM(bfa_ioc_sm_hwfail
), BFA_IOC_HWFAIL
},
152 * Forward declareations for iocpf state machine
154 static void bfa_iocpf_enable(struct bfa_ioc
*ioc
);
155 static void bfa_iocpf_disable(struct bfa_ioc
*ioc
);
156 static void bfa_iocpf_fail(struct bfa_ioc
*ioc
);
157 static void bfa_iocpf_initfail(struct bfa_ioc
*ioc
);
158 static void bfa_iocpf_getattrfail(struct bfa_ioc
*ioc
);
159 static void bfa_iocpf_stop(struct bfa_ioc
*ioc
);
161 /* IOCPF state machine events */
163 IOCPF_E_ENABLE
= 1, /*!< IOCPF enable request */
164 IOCPF_E_DISABLE
= 2, /*!< IOCPF disable request */
165 IOCPF_E_STOP
= 3, /*!< stop on driver detach */
166 IOCPF_E_FWREADY
= 4, /*!< f/w initialization done */
167 IOCPF_E_FWRSP_ENABLE
= 5, /*!< enable f/w response */
168 IOCPF_E_FWRSP_DISABLE
= 6, /*!< disable f/w response */
169 IOCPF_E_FAIL
= 7, /*!< failure notice by ioc sm */
170 IOCPF_E_INITFAIL
= 8, /*!< init fail notice by ioc sm */
171 IOCPF_E_GETATTRFAIL
= 9, /*!< init fail notice by ioc sm */
172 IOCPF_E_SEMLOCKED
= 10, /*!< h/w semaphore is locked */
173 IOCPF_E_TIMEOUT
= 11, /*!< f/w response timeout */
174 IOCPF_E_SEM_ERROR
= 12, /*!< h/w sem mapping error */
178 enum bfa_iocpf_state
{
179 BFA_IOCPF_RESET
= 1, /*!< IOC is in reset state */
180 BFA_IOCPF_SEMWAIT
= 2, /*!< Waiting for IOC h/w semaphore */
181 BFA_IOCPF_HWINIT
= 3, /*!< IOC h/w is being initialized */
182 BFA_IOCPF_READY
= 4, /*!< IOCPF is initialized */
183 BFA_IOCPF_INITFAIL
= 5, /*!< IOCPF failed */
184 BFA_IOCPF_FAIL
= 6, /*!< IOCPF failed */
185 BFA_IOCPF_DISABLING
= 7, /*!< IOCPF is being disabled */
186 BFA_IOCPF_DISABLED
= 8, /*!< IOCPF is disabled */
187 BFA_IOCPF_FWMISMATCH
= 9, /*!< IOC f/w different from drivers */
190 bfa_fsm_state_decl(bfa_iocpf
, reset
, struct bfa_iocpf
, enum iocpf_event
);
191 bfa_fsm_state_decl(bfa_iocpf
, fwcheck
, struct bfa_iocpf
, enum iocpf_event
);
192 bfa_fsm_state_decl(bfa_iocpf
, mismatch
, struct bfa_iocpf
, enum iocpf_event
);
193 bfa_fsm_state_decl(bfa_iocpf
, semwait
, struct bfa_iocpf
, enum iocpf_event
);
194 bfa_fsm_state_decl(bfa_iocpf
, hwinit
, struct bfa_iocpf
, enum iocpf_event
);
195 bfa_fsm_state_decl(bfa_iocpf
, enabling
, struct bfa_iocpf
, enum iocpf_event
);
196 bfa_fsm_state_decl(bfa_iocpf
, ready
, struct bfa_iocpf
, enum iocpf_event
);
197 bfa_fsm_state_decl(bfa_iocpf
, initfail_sync
, struct bfa_iocpf
,
199 bfa_fsm_state_decl(bfa_iocpf
, initfail
, struct bfa_iocpf
, enum iocpf_event
);
200 bfa_fsm_state_decl(bfa_iocpf
, fail_sync
, struct bfa_iocpf
, enum iocpf_event
);
201 bfa_fsm_state_decl(bfa_iocpf
, fail
, struct bfa_iocpf
, enum iocpf_event
);
202 bfa_fsm_state_decl(bfa_iocpf
, disabling
, struct bfa_iocpf
, enum iocpf_event
);
203 bfa_fsm_state_decl(bfa_iocpf
, disabling_sync
, struct bfa_iocpf
,
205 bfa_fsm_state_decl(bfa_iocpf
, disabled
, struct bfa_iocpf
, enum iocpf_event
);
207 static struct bfa_sm_table iocpf_sm_table
[] = {
208 {BFA_SM(bfa_iocpf_sm_reset
), BFA_IOCPF_RESET
},
209 {BFA_SM(bfa_iocpf_sm_fwcheck
), BFA_IOCPF_FWMISMATCH
},
210 {BFA_SM(bfa_iocpf_sm_mismatch
), BFA_IOCPF_FWMISMATCH
},
211 {BFA_SM(bfa_iocpf_sm_semwait
), BFA_IOCPF_SEMWAIT
},
212 {BFA_SM(bfa_iocpf_sm_hwinit
), BFA_IOCPF_HWINIT
},
213 {BFA_SM(bfa_iocpf_sm_enabling
), BFA_IOCPF_HWINIT
},
214 {BFA_SM(bfa_iocpf_sm_ready
), BFA_IOCPF_READY
},
215 {BFA_SM(bfa_iocpf_sm_initfail_sync
), BFA_IOCPF_INITFAIL
},
216 {BFA_SM(bfa_iocpf_sm_initfail
), BFA_IOCPF_INITFAIL
},
217 {BFA_SM(bfa_iocpf_sm_fail_sync
), BFA_IOCPF_FAIL
},
218 {BFA_SM(bfa_iocpf_sm_fail
), BFA_IOCPF_FAIL
},
219 {BFA_SM(bfa_iocpf_sm_disabling
), BFA_IOCPF_DISABLING
},
220 {BFA_SM(bfa_iocpf_sm_disabling_sync
), BFA_IOCPF_DISABLING
},
221 {BFA_SM(bfa_iocpf_sm_disabled
), BFA_IOCPF_DISABLED
},
224 /* IOC State Machine */
226 /* Beginning state. IOC uninit state. */
228 bfa_ioc_sm_uninit_entry(struct bfa_ioc
*ioc
)
232 /* IOC is in uninit state. */
234 bfa_ioc_sm_uninit(struct bfa_ioc
*ioc
, enum ioc_event event
)
238 bfa_fsm_set_state(ioc
, bfa_ioc_sm_reset
);
246 /* Reset entry actions -- initialize state machine */
248 bfa_ioc_sm_reset_entry(struct bfa_ioc
*ioc
)
250 bfa_fsm_set_state(&ioc
->iocpf
, bfa_iocpf_sm_reset
);
253 /* IOC is in reset state. */
255 bfa_ioc_sm_reset(struct bfa_ioc
*ioc
, enum ioc_event event
)
259 bfa_fsm_set_state(ioc
, bfa_ioc_sm_enabling
);
263 bfa_ioc_disable_comp(ioc
);
267 bfa_fsm_set_state(ioc
, bfa_ioc_sm_uninit
);
276 bfa_ioc_sm_enabling_entry(struct bfa_ioc
*ioc
)
278 bfa_iocpf_enable(ioc
);
281 /* Host IOC function is being enabled, awaiting response from firmware.
282 * Semaphore is acquired.
285 bfa_ioc_sm_enabling(struct bfa_ioc
*ioc
, enum ioc_event event
)
289 bfa_fsm_set_state(ioc
, bfa_ioc_sm_getattr
);
293 /* !!! fall through !!! */
295 ioc
->cbfn
->enable_cbfn(ioc
->bfa
, BFA_STATUS_IOC_FAILURE
);
296 bfa_fsm_set_state(ioc
, bfa_ioc_sm_fail
);
297 if (event
!= IOC_E_PFFAILED
)
298 bfa_iocpf_initfail(ioc
);
302 ioc
->cbfn
->enable_cbfn(ioc
->bfa
, BFA_STATUS_IOC_FAILURE
);
303 bfa_fsm_set_state(ioc
, bfa_ioc_sm_hwfail
);
307 bfa_fsm_set_state(ioc
, bfa_ioc_sm_disabling
);
311 bfa_fsm_set_state(ioc
, bfa_ioc_sm_uninit
);
323 /* Semaphore should be acquired for version check. */
325 bfa_ioc_sm_getattr_entry(struct bfa_ioc
*ioc
)
327 mod_timer(&ioc
->ioc_timer
, jiffies
+
328 msecs_to_jiffies(BFA_IOC_TOV
));
329 bfa_ioc_send_getattr(ioc
);
332 /* IOC configuration in progress. Timer is active. */
334 bfa_ioc_sm_getattr(struct bfa_ioc
*ioc
, enum ioc_event event
)
337 case IOC_E_FWRSP_GETATTR
:
338 del_timer(&ioc
->ioc_timer
);
339 bfa_fsm_set_state(ioc
, bfa_ioc_sm_op
);
344 del_timer(&ioc
->ioc_timer
);
347 ioc
->cbfn
->enable_cbfn(ioc
->bfa
, BFA_STATUS_IOC_FAILURE
);
348 bfa_fsm_set_state(ioc
, bfa_ioc_sm_fail
);
349 if (event
!= IOC_E_PFFAILED
)
350 bfa_iocpf_getattrfail(ioc
);
354 del_timer(&ioc
->ioc_timer
);
355 bfa_fsm_set_state(ioc
, bfa_ioc_sm_disabling
);
367 bfa_ioc_sm_op_entry(struct bfa_ioc
*ioc
)
369 ioc
->cbfn
->enable_cbfn(ioc
->bfa
, BFA_STATUS_OK
);
370 bfa_ioc_event_notify(ioc
, BFA_IOC_E_ENABLED
);
371 bfa_ioc_hb_monitor(ioc
);
375 bfa_ioc_sm_op(struct bfa_ioc
*ioc
, enum ioc_event event
)
382 bfa_ioc_hb_stop(ioc
);
383 bfa_fsm_set_state(ioc
, bfa_ioc_sm_disabling
);
388 bfa_ioc_hb_stop(ioc
);
389 /* !!! fall through !!! */
391 if (ioc
->iocpf
.auto_recover
)
392 bfa_fsm_set_state(ioc
, bfa_ioc_sm_fail_retry
);
394 bfa_fsm_set_state(ioc
, bfa_ioc_sm_fail
);
396 bfa_ioc_fail_notify(ioc
);
398 if (event
!= IOC_E_PFFAILED
)
408 bfa_ioc_sm_disabling_entry(struct bfa_ioc
*ioc
)
410 bfa_iocpf_disable(ioc
);
413 /* IOC is being disabled */
415 bfa_ioc_sm_disabling(struct bfa_ioc
*ioc
, enum ioc_event event
)
419 bfa_fsm_set_state(ioc
, bfa_ioc_sm_disabled
);
424 * No state change. Will move to disabled state
425 * after iocpf sm completes failure processing and
426 * moves to disabled state.
432 bfa_fsm_set_state(ioc
, bfa_ioc_sm_hwfail
);
433 bfa_ioc_disable_comp(ioc
);
441 /* IOC disable completion entry. */
443 bfa_ioc_sm_disabled_entry(struct bfa_ioc
*ioc
)
445 bfa_ioc_disable_comp(ioc
);
449 bfa_ioc_sm_disabled(struct bfa_ioc
*ioc
, enum ioc_event event
)
453 bfa_fsm_set_state(ioc
, bfa_ioc_sm_enabling
);
457 ioc
->cbfn
->disable_cbfn(ioc
->bfa
);
461 bfa_fsm_set_state(ioc
, bfa_ioc_sm_uninit
);
471 bfa_ioc_sm_fail_retry_entry(struct bfa_ioc
*ioc
)
475 /* Hardware initialization retry. */
477 bfa_ioc_sm_fail_retry(struct bfa_ioc
*ioc
, enum ioc_event event
)
481 bfa_fsm_set_state(ioc
, bfa_ioc_sm_getattr
);
487 * Initialization retry failed.
489 ioc
->cbfn
->enable_cbfn(ioc
->bfa
, BFA_STATUS_IOC_FAILURE
);
490 bfa_fsm_set_state(ioc
, bfa_ioc_sm_fail
);
491 if (event
!= IOC_E_PFFAILED
)
492 bfa_iocpf_initfail(ioc
);
496 ioc
->cbfn
->enable_cbfn(ioc
->bfa
, BFA_STATUS_IOC_FAILURE
);
497 bfa_fsm_set_state(ioc
, bfa_ioc_sm_hwfail
);
504 bfa_fsm_set_state(ioc
, bfa_ioc_sm_disabling
);
508 bfa_fsm_set_state(ioc
, bfa_ioc_sm_uninit
);
518 bfa_ioc_sm_fail_entry(struct bfa_ioc
*ioc
)
524 bfa_ioc_sm_fail(struct bfa_ioc
*ioc
, enum ioc_event event
)
528 ioc
->cbfn
->enable_cbfn(ioc
->bfa
, BFA_STATUS_IOC_FAILURE
);
532 bfa_fsm_set_state(ioc
, bfa_ioc_sm_disabling
);
536 bfa_fsm_set_state(ioc
, bfa_ioc_sm_uninit
);
541 /* HB failure notification, ignore. */
550 bfa_ioc_sm_hwfail_entry(struct bfa_ioc
*ioc
)
556 bfa_ioc_sm_hwfail(struct bfa_ioc
*ioc
, enum ioc_event event
)
561 ioc
->cbfn
->enable_cbfn(ioc
->bfa
, BFA_STATUS_IOC_FAILURE
);
565 ioc
->cbfn
->disable_cbfn(ioc
->bfa
);
569 bfa_fsm_set_state(ioc
, bfa_ioc_sm_uninit
);
577 /* IOCPF State Machine */
579 /* Reset entry actions -- initialize state machine */
581 bfa_iocpf_sm_reset_entry(struct bfa_iocpf
*iocpf
)
583 iocpf
->fw_mismatch_notified
= false;
584 iocpf
->auto_recover
= bfa_nw_auto_recover
;
587 /* Beginning state. IOC is in reset state. */
589 bfa_iocpf_sm_reset(struct bfa_iocpf
*iocpf
, enum iocpf_event event
)
593 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_fwcheck
);
604 /* Semaphore should be acquired for version check. */
606 bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf
*iocpf
)
608 bfa_ioc_hw_sem_init(iocpf
->ioc
);
609 bfa_ioc_hw_sem_get(iocpf
->ioc
);
612 /* Awaiting h/w semaphore to continue with version check. */
614 bfa_iocpf_sm_fwcheck(struct bfa_iocpf
*iocpf
, enum iocpf_event event
)
616 struct bfa_ioc
*ioc
= iocpf
->ioc
;
619 case IOCPF_E_SEMLOCKED
:
620 if (bfa_ioc_firmware_lock(ioc
)) {
621 if (bfa_ioc_sync_start(ioc
)) {
622 bfa_ioc_sync_join(ioc
);
623 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_hwinit
);
625 bfa_ioc_firmware_unlock(ioc
);
626 bfa_nw_ioc_hw_sem_release(ioc
);
627 mod_timer(&ioc
->sem_timer
, jiffies
+
628 msecs_to_jiffies(BFA_IOC_HWSEM_TOV
));
631 bfa_nw_ioc_hw_sem_release(ioc
);
632 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_mismatch
);
636 case IOCPF_E_SEM_ERROR
:
637 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_fail
);
638 bfa_ioc_pf_hwfailed(ioc
);
641 case IOCPF_E_DISABLE
:
642 bfa_ioc_hw_sem_get_cancel(ioc
);
643 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_reset
);
644 bfa_ioc_pf_disabled(ioc
);
648 bfa_ioc_hw_sem_get_cancel(ioc
);
649 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_reset
);
657 /* Notify enable completion callback */
659 bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf
*iocpf
)
661 /* Call only the first time sm enters fwmismatch state. */
662 if (!iocpf
->fw_mismatch_notified
)
663 bfa_ioc_pf_fwmismatch(iocpf
->ioc
);
665 iocpf
->fw_mismatch_notified
= true;
666 mod_timer(&(iocpf
->ioc
)->iocpf_timer
, jiffies
+
667 msecs_to_jiffies(BFA_IOC_TOV
));
670 /* Awaiting firmware version match. */
672 bfa_iocpf_sm_mismatch(struct bfa_iocpf
*iocpf
, enum iocpf_event event
)
674 struct bfa_ioc
*ioc
= iocpf
->ioc
;
677 case IOCPF_E_TIMEOUT
:
678 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_fwcheck
);
681 case IOCPF_E_DISABLE
:
682 del_timer(&ioc
->iocpf_timer
);
683 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_reset
);
684 bfa_ioc_pf_disabled(ioc
);
688 del_timer(&ioc
->iocpf_timer
);
689 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_reset
);
697 /* Request for semaphore. */
699 bfa_iocpf_sm_semwait_entry(struct bfa_iocpf
*iocpf
)
701 bfa_ioc_hw_sem_get(iocpf
->ioc
);
704 /* Awaiting semaphore for h/w initialzation. */
706 bfa_iocpf_sm_semwait(struct bfa_iocpf
*iocpf
, enum iocpf_event event
)
708 struct bfa_ioc
*ioc
= iocpf
->ioc
;
711 case IOCPF_E_SEMLOCKED
:
712 if (bfa_ioc_sync_complete(ioc
)) {
713 bfa_ioc_sync_join(ioc
);
714 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_hwinit
);
716 bfa_nw_ioc_hw_sem_release(ioc
);
717 mod_timer(&ioc
->sem_timer
, jiffies
+
718 msecs_to_jiffies(BFA_IOC_HWSEM_TOV
));
722 case IOCPF_E_SEM_ERROR
:
723 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_fail
);
724 bfa_ioc_pf_hwfailed(ioc
);
727 case IOCPF_E_DISABLE
:
728 bfa_ioc_hw_sem_get_cancel(ioc
);
729 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_disabling_sync
);
738 bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf
*iocpf
)
740 iocpf
->poll_time
= 0;
741 bfa_ioc_reset(iocpf
->ioc
, false);
744 /* Hardware is being initialized. Interrupts are enabled.
745 * Holding hardware semaphore lock.
748 bfa_iocpf_sm_hwinit(struct bfa_iocpf
*iocpf
, enum iocpf_event event
)
750 struct bfa_ioc
*ioc
= iocpf
->ioc
;
753 case IOCPF_E_FWREADY
:
754 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_enabling
);
757 case IOCPF_E_TIMEOUT
:
758 bfa_nw_ioc_hw_sem_release(ioc
);
759 bfa_ioc_pf_failed(ioc
);
760 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_initfail_sync
);
763 case IOCPF_E_DISABLE
:
764 del_timer(&ioc
->iocpf_timer
);
765 bfa_ioc_sync_leave(ioc
);
766 bfa_nw_ioc_hw_sem_release(ioc
);
767 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_disabled
);
776 bfa_iocpf_sm_enabling_entry(struct bfa_iocpf
*iocpf
)
778 mod_timer(&(iocpf
->ioc
)->iocpf_timer
, jiffies
+
779 msecs_to_jiffies(BFA_IOC_TOV
));
781 * Enable Interrupts before sending fw IOC ENABLE cmd.
783 iocpf
->ioc
->cbfn
->reset_cbfn(iocpf
->ioc
->bfa
);
784 bfa_ioc_send_enable(iocpf
->ioc
);
787 /* Host IOC function is being enabled, awaiting response from firmware.
788 * Semaphore is acquired.
791 bfa_iocpf_sm_enabling(struct bfa_iocpf
*iocpf
, enum iocpf_event event
)
793 struct bfa_ioc
*ioc
= iocpf
->ioc
;
796 case IOCPF_E_FWRSP_ENABLE
:
797 del_timer(&ioc
->iocpf_timer
);
798 bfa_nw_ioc_hw_sem_release(ioc
);
799 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_ready
);
802 case IOCPF_E_INITFAIL
:
803 del_timer(&ioc
->iocpf_timer
);
805 * !!! fall through !!!
807 case IOCPF_E_TIMEOUT
:
808 bfa_nw_ioc_hw_sem_release(ioc
);
809 if (event
== IOCPF_E_TIMEOUT
)
810 bfa_ioc_pf_failed(ioc
);
811 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_initfail_sync
);
814 case IOCPF_E_DISABLE
:
815 del_timer(&ioc
->iocpf_timer
);
816 bfa_nw_ioc_hw_sem_release(ioc
);
817 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_disabling
);
826 bfa_iocpf_sm_ready_entry(struct bfa_iocpf
*iocpf
)
828 bfa_ioc_pf_enabled(iocpf
->ioc
);
832 bfa_iocpf_sm_ready(struct bfa_iocpf
*iocpf
, enum iocpf_event event
)
835 case IOCPF_E_DISABLE
:
836 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_disabling
);
839 case IOCPF_E_GETATTRFAIL
:
840 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_initfail_sync
);
844 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_fail_sync
);
853 bfa_iocpf_sm_disabling_entry(struct bfa_iocpf
*iocpf
)
855 mod_timer(&(iocpf
->ioc
)->iocpf_timer
, jiffies
+
856 msecs_to_jiffies(BFA_IOC_TOV
));
857 bfa_ioc_send_disable(iocpf
->ioc
);
860 /* IOC is being disabled */
862 bfa_iocpf_sm_disabling(struct bfa_iocpf
*iocpf
, enum iocpf_event event
)
864 struct bfa_ioc
*ioc
= iocpf
->ioc
;
867 case IOCPF_E_FWRSP_DISABLE
:
868 del_timer(&ioc
->iocpf_timer
);
869 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_disabling_sync
);
873 del_timer(&ioc
->iocpf_timer
);
875 * !!! fall through !!!
878 case IOCPF_E_TIMEOUT
:
879 bfa_ioc_set_cur_ioc_fwstate(ioc
, BFI_IOC_FAIL
);
880 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_disabling_sync
);
883 case IOCPF_E_FWRSP_ENABLE
:
892 bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf
*iocpf
)
894 bfa_ioc_hw_sem_get(iocpf
->ioc
);
897 /* IOC hb ack request is being removed. */
899 bfa_iocpf_sm_disabling_sync(struct bfa_iocpf
*iocpf
, enum iocpf_event event
)
901 struct bfa_ioc
*ioc
= iocpf
->ioc
;
904 case IOCPF_E_SEMLOCKED
:
905 bfa_ioc_sync_leave(ioc
);
906 bfa_nw_ioc_hw_sem_release(ioc
);
907 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_disabled
);
910 case IOCPF_E_SEM_ERROR
:
911 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_fail
);
912 bfa_ioc_pf_hwfailed(ioc
);
923 /* IOC disable completion entry. */
925 bfa_iocpf_sm_disabled_entry(struct bfa_iocpf
*iocpf
)
927 bfa_ioc_mbox_flush(iocpf
->ioc
);
928 bfa_ioc_pf_disabled(iocpf
->ioc
);
932 bfa_iocpf_sm_disabled(struct bfa_iocpf
*iocpf
, enum iocpf_event event
)
934 struct bfa_ioc
*ioc
= iocpf
->ioc
;
938 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_semwait
);
942 bfa_ioc_firmware_unlock(ioc
);
943 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_reset
);
952 bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf
*iocpf
)
954 bfa_nw_ioc_debug_save_ftrc(iocpf
->ioc
);
955 bfa_ioc_hw_sem_get(iocpf
->ioc
);
958 /* Hardware initialization failed. */
960 bfa_iocpf_sm_initfail_sync(struct bfa_iocpf
*iocpf
, enum iocpf_event event
)
962 struct bfa_ioc
*ioc
= iocpf
->ioc
;
965 case IOCPF_E_SEMLOCKED
:
966 bfa_ioc_notify_fail(ioc
);
967 bfa_ioc_sync_leave(ioc
);
968 bfa_ioc_set_cur_ioc_fwstate(ioc
, BFI_IOC_FAIL
);
969 bfa_nw_ioc_hw_sem_release(ioc
);
970 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_initfail
);
973 case IOCPF_E_SEM_ERROR
:
974 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_fail
);
975 bfa_ioc_pf_hwfailed(ioc
);
978 case IOCPF_E_DISABLE
:
979 bfa_ioc_hw_sem_get_cancel(ioc
);
980 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_disabling_sync
);
984 bfa_ioc_hw_sem_get_cancel(ioc
);
985 bfa_ioc_firmware_unlock(ioc
);
986 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_reset
);
998 bfa_iocpf_sm_initfail_entry(struct bfa_iocpf
*iocpf
)
1002 /* Hardware initialization failed. */
1004 bfa_iocpf_sm_initfail(struct bfa_iocpf
*iocpf
, enum iocpf_event event
)
1006 struct bfa_ioc
*ioc
= iocpf
->ioc
;
1009 case IOCPF_E_DISABLE
:
1010 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_disabled
);
1014 bfa_ioc_firmware_unlock(ioc
);
1015 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_reset
);
1019 bfa_sm_fault(event
);
1024 bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf
*iocpf
)
1027 * Mark IOC as failed in hardware and stop firmware.
1029 bfa_ioc_lpu_stop(iocpf
->ioc
);
1032 * Flush any queued up mailbox requests.
1034 bfa_ioc_mbox_flush(iocpf
->ioc
);
1035 bfa_ioc_hw_sem_get(iocpf
->ioc
);
1038 /* IOC is in failed state. */
1040 bfa_iocpf_sm_fail_sync(struct bfa_iocpf
*iocpf
, enum iocpf_event event
)
1042 struct bfa_ioc
*ioc
= iocpf
->ioc
;
1045 case IOCPF_E_SEMLOCKED
:
1046 bfa_ioc_sync_ack(ioc
);
1047 bfa_ioc_notify_fail(ioc
);
1048 if (!iocpf
->auto_recover
) {
1049 bfa_ioc_sync_leave(ioc
);
1050 bfa_ioc_set_cur_ioc_fwstate(ioc
, BFI_IOC_FAIL
);
1051 bfa_nw_ioc_hw_sem_release(ioc
);
1052 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_fail
);
1054 if (bfa_ioc_sync_complete(ioc
))
1055 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_hwinit
);
1057 bfa_nw_ioc_hw_sem_release(ioc
);
1058 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_semwait
);
1063 case IOCPF_E_SEM_ERROR
:
1064 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_fail
);
1065 bfa_ioc_pf_hwfailed(ioc
);
1068 case IOCPF_E_DISABLE
:
1069 bfa_ioc_hw_sem_get_cancel(ioc
);
1070 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_disabling_sync
);
1077 bfa_sm_fault(event
);
1082 bfa_iocpf_sm_fail_entry(struct bfa_iocpf
*iocpf
)
1086 /* IOC is in failed state. */
1088 bfa_iocpf_sm_fail(struct bfa_iocpf
*iocpf
, enum iocpf_event event
)
1091 case IOCPF_E_DISABLE
:
1092 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_disabled
);
1096 bfa_sm_fault(event
);
1100 /* BFA IOC private functions */
1102 /* Notify common modules registered for notification. */
1104 bfa_ioc_event_notify(struct bfa_ioc
*ioc
, enum bfa_ioc_event event
)
1106 struct bfa_ioc_notify
*notify
;
1107 struct list_head
*qe
;
1109 list_for_each(qe
, &ioc
->notify_q
) {
1110 notify
= (struct bfa_ioc_notify
*)qe
;
1111 notify
->cbfn(notify
->cbarg
, event
);
1116 bfa_ioc_disable_comp(struct bfa_ioc
*ioc
)
1118 ioc
->cbfn
->disable_cbfn(ioc
->bfa
);
1119 bfa_ioc_event_notify(ioc
, BFA_IOC_E_DISABLED
);
1123 bfa_nw_ioc_sem_get(void __iomem
*sem_reg
)
1127 #define BFA_SEM_SPINCNT 3000
1129 r32
= readl(sem_reg
);
1131 while ((r32
& 1) && (cnt
< BFA_SEM_SPINCNT
)) {
1134 r32
= readl(sem_reg
);
1144 bfa_nw_ioc_sem_release(void __iomem
*sem_reg
)
1150 /* Clear fwver hdr */
1152 bfa_ioc_fwver_clear(struct bfa_ioc
*ioc
)
1154 u32 pgnum
, pgoff
, loff
= 0;
1157 pgnum
= PSS_SMEM_PGNUM(ioc
->ioc_regs
.smem_pg0
, loff
);
1158 pgoff
= PSS_SMEM_PGOFF(loff
);
1159 writel(pgnum
, ioc
->ioc_regs
.host_page_num_fn
);
1161 for (i
= 0; i
< (sizeof(struct bfi_ioc_image_hdr
) / sizeof(u32
)); i
++) {
1162 writel(0, ioc
->ioc_regs
.smem_page_start
+ loff
);
1163 loff
+= sizeof(u32
);
1169 bfa_ioc_hw_sem_init(struct bfa_ioc
*ioc
)
1171 struct bfi_ioc_image_hdr fwhdr
;
1174 /* Spin on init semaphore to serialize. */
1175 r32
= readl(ioc
->ioc_regs
.ioc_init_sem_reg
);
1178 r32
= readl(ioc
->ioc_regs
.ioc_init_sem_reg
);
1181 fwstate
= bfa_ioc_get_cur_ioc_fwstate(ioc
);
1182 if (fwstate
== BFI_IOC_UNINIT
) {
1183 writel(1, ioc
->ioc_regs
.ioc_init_sem_reg
);
1187 bfa_nw_ioc_fwver_get(ioc
, &fwhdr
);
1189 if (swab32(fwhdr
.exec
) == BFI_FWBOOT_TYPE_NORMAL
) {
1190 writel(1, ioc
->ioc_regs
.ioc_init_sem_reg
);
1194 bfa_ioc_fwver_clear(ioc
);
1195 bfa_ioc_set_cur_ioc_fwstate(ioc
, BFI_IOC_UNINIT
);
1196 bfa_ioc_set_alt_ioc_fwstate(ioc
, BFI_IOC_UNINIT
);
1199 * Try to lock and then unlock the semaphore.
1201 readl(ioc
->ioc_regs
.ioc_sem_reg
);
1202 writel(1, ioc
->ioc_regs
.ioc_sem_reg
);
1204 /* Unlock init semaphore */
1205 writel(1, ioc
->ioc_regs
.ioc_init_sem_reg
);
1209 bfa_ioc_hw_sem_get(struct bfa_ioc
*ioc
)
1214 * First read to the semaphore register will return 0, subsequent reads
1215 * will return 1. Semaphore is released by writing 1 to the register
1217 r32
= readl(ioc
->ioc_regs
.ioc_sem_reg
);
1219 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_SEM_ERROR
);
1223 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_SEMLOCKED
);
1227 mod_timer(&ioc
->sem_timer
, jiffies
+
1228 msecs_to_jiffies(BFA_IOC_HWSEM_TOV
));
1232 bfa_nw_ioc_hw_sem_release(struct bfa_ioc
*ioc
)
1234 writel(1, ioc
->ioc_regs
.ioc_sem_reg
);
1238 bfa_ioc_hw_sem_get_cancel(struct bfa_ioc
*ioc
)
1240 del_timer(&ioc
->sem_timer
);
1243 /* Initialize LPU local memory (aka secondary memory / SRAM) */
1245 bfa_ioc_lmem_init(struct bfa_ioc
*ioc
)
1249 #define PSS_LMEM_INIT_TIME 10000
1251 pss_ctl
= readl(ioc
->ioc_regs
.pss_ctl_reg
);
1252 pss_ctl
&= ~__PSS_LMEM_RESET
;
1253 pss_ctl
|= __PSS_LMEM_INIT_EN
;
1256 * i2c workaround 12.5khz clock
1258 pss_ctl
|= __PSS_I2C_CLK_DIV(3UL);
1259 writel(pss_ctl
, ioc
->ioc_regs
.pss_ctl_reg
);
1262 * wait for memory initialization to be complete
1266 pss_ctl
= readl(ioc
->ioc_regs
.pss_ctl_reg
);
1268 } while (!(pss_ctl
& __PSS_LMEM_INIT_DONE
) && (i
< PSS_LMEM_INIT_TIME
));
1271 * If memory initialization is not successful, IOC timeout will catch
1274 BUG_ON(!(pss_ctl
& __PSS_LMEM_INIT_DONE
));
1276 pss_ctl
&= ~(__PSS_LMEM_INIT_DONE
| __PSS_LMEM_INIT_EN
);
1277 writel(pss_ctl
, ioc
->ioc_regs
.pss_ctl_reg
);
1281 bfa_ioc_lpu_start(struct bfa_ioc
*ioc
)
1286 * Take processor out of reset.
1288 pss_ctl
= readl(ioc
->ioc_regs
.pss_ctl_reg
);
1289 pss_ctl
&= ~__PSS_LPU0_RESET
;
1291 writel(pss_ctl
, ioc
->ioc_regs
.pss_ctl_reg
);
1295 bfa_ioc_lpu_stop(struct bfa_ioc
*ioc
)
1300 * Put processors in reset.
1302 pss_ctl
= readl(ioc
->ioc_regs
.pss_ctl_reg
);
1303 pss_ctl
|= (__PSS_LPU0_RESET
| __PSS_LPU1_RESET
);
1305 writel(pss_ctl
, ioc
->ioc_regs
.pss_ctl_reg
);
1308 /* Get driver and firmware versions. */
1310 bfa_nw_ioc_fwver_get(struct bfa_ioc
*ioc
, struct bfi_ioc_image_hdr
*fwhdr
)
1315 u32
*fwsig
= (u32
*) fwhdr
;
1317 pgnum
= bfa_ioc_smem_pgnum(ioc
, loff
);
1318 writel(pgnum
, ioc
->ioc_regs
.host_page_num_fn
);
1320 for (i
= 0; i
< (sizeof(struct bfi_ioc_image_hdr
) / sizeof(u32
));
1323 swab32(readl((loff
) + (ioc
->ioc_regs
.smem_page_start
)));
1324 loff
+= sizeof(u32
);
1329 bfa_ioc_fwver_md5_check(struct bfi_ioc_image_hdr
*fwhdr_1
,
1330 struct bfi_ioc_image_hdr
*fwhdr_2
)
1334 for (i
= 0; i
< BFI_IOC_MD5SUM_SZ
; i
++) {
1335 if (fwhdr_1
->md5sum
[i
] != fwhdr_2
->md5sum
[i
])
1342 /* Returns TRUE if major minor and maintainence are same.
1343 * If patch version are same, check for MD5 Checksum to be same.
1346 bfa_ioc_fw_ver_compatible(struct bfi_ioc_image_hdr
*drv_fwhdr
,
1347 struct bfi_ioc_image_hdr
*fwhdr_to_cmp
)
1349 if (drv_fwhdr
->signature
!= fwhdr_to_cmp
->signature
)
1351 if (drv_fwhdr
->fwver
.major
!= fwhdr_to_cmp
->fwver
.major
)
1353 if (drv_fwhdr
->fwver
.minor
!= fwhdr_to_cmp
->fwver
.minor
)
1355 if (drv_fwhdr
->fwver
.maint
!= fwhdr_to_cmp
->fwver
.maint
)
1357 if (drv_fwhdr
->fwver
.patch
== fwhdr_to_cmp
->fwver
.patch
&&
1358 drv_fwhdr
->fwver
.phase
== fwhdr_to_cmp
->fwver
.phase
&&
1359 drv_fwhdr
->fwver
.build
== fwhdr_to_cmp
->fwver
.build
)
1360 return bfa_ioc_fwver_md5_check(drv_fwhdr
, fwhdr_to_cmp
);
1366 bfa_ioc_flash_fwver_valid(struct bfi_ioc_image_hdr
*flash_fwhdr
)
1368 if (flash_fwhdr
->fwver
.major
== 0 || flash_fwhdr
->fwver
.major
== 0xFF)
1375 fwhdr_is_ga(struct bfi_ioc_image_hdr
*fwhdr
)
1377 if (fwhdr
->fwver
.phase
== 0 &&
1378 fwhdr
->fwver
.build
== 0)
1384 /* Returns TRUE if both are compatible and patch of fwhdr_to_cmp is better. */
1385 static enum bfi_ioc_img_ver_cmp
1386 bfa_ioc_fw_ver_patch_cmp(struct bfi_ioc_image_hdr
*base_fwhdr
,
1387 struct bfi_ioc_image_hdr
*fwhdr_to_cmp
)
1389 if (bfa_ioc_fw_ver_compatible(base_fwhdr
, fwhdr_to_cmp
) == false)
1390 return BFI_IOC_IMG_VER_INCOMP
;
1392 if (fwhdr_to_cmp
->fwver
.patch
> base_fwhdr
->fwver
.patch
)
1393 return BFI_IOC_IMG_VER_BETTER
;
1394 else if (fwhdr_to_cmp
->fwver
.patch
< base_fwhdr
->fwver
.patch
)
1395 return BFI_IOC_IMG_VER_OLD
;
1397 /* GA takes priority over internal builds of the same patch stream.
1398 * At this point major minor maint and patch numbers are same.
1400 if (fwhdr_is_ga(base_fwhdr
) == true)
1401 if (fwhdr_is_ga(fwhdr_to_cmp
))
1402 return BFI_IOC_IMG_VER_SAME
;
1404 return BFI_IOC_IMG_VER_OLD
;
1406 if (fwhdr_is_ga(fwhdr_to_cmp
))
1407 return BFI_IOC_IMG_VER_BETTER
;
1409 if (fwhdr_to_cmp
->fwver
.phase
> base_fwhdr
->fwver
.phase
)
1410 return BFI_IOC_IMG_VER_BETTER
;
1411 else if (fwhdr_to_cmp
->fwver
.phase
< base_fwhdr
->fwver
.phase
)
1412 return BFI_IOC_IMG_VER_OLD
;
1414 if (fwhdr_to_cmp
->fwver
.build
> base_fwhdr
->fwver
.build
)
1415 return BFI_IOC_IMG_VER_BETTER
;
1416 else if (fwhdr_to_cmp
->fwver
.build
< base_fwhdr
->fwver
.build
)
1417 return BFI_IOC_IMG_VER_OLD
;
1419 /* All Version Numbers are equal.
1420 * Md5 check to be done as a part of compatibility check.
1422 return BFI_IOC_IMG_VER_SAME
;
1425 /* register definitions */
1426 #define FLI_CMD_REG 0x0001d000
1427 #define FLI_WRDATA_REG 0x0001d00c
1428 #define FLI_RDDATA_REG 0x0001d010
1429 #define FLI_ADDR_REG 0x0001d004
1430 #define FLI_DEV_STATUS_REG 0x0001d014
1432 #define BFA_FLASH_FIFO_SIZE 128 /* fifo size */
1433 #define BFA_FLASH_CHECK_MAX 10000 /* max # of status check */
1434 #define BFA_FLASH_BLOCKING_OP_MAX 1000000 /* max # of blocking op check */
1435 #define BFA_FLASH_WIP_MASK 0x01 /* write in progress bit mask */
1437 #define NFC_STATE_RUNNING 0x20000001
1438 #define NFC_STATE_PAUSED 0x00004560
1439 #define NFC_VER_VALID 0x147
1441 enum bfa_flash_cmd
{
1442 BFA_FLASH_FAST_READ
= 0x0b, /* fast read */
1443 BFA_FLASH_WRITE_ENABLE
= 0x06, /* write enable */
1444 BFA_FLASH_SECTOR_ERASE
= 0xd8, /* sector erase */
1445 BFA_FLASH_WRITE
= 0x02, /* write */
1446 BFA_FLASH_READ_STATUS
= 0x05, /* read status */
1449 /* hardware error definition */
1450 enum bfa_flash_err
{
1451 BFA_FLASH_NOT_PRESENT
= -1, /*!< flash not present */
1452 BFA_FLASH_UNINIT
= -2, /*!< flash not initialized */
1453 BFA_FLASH_BAD
= -3, /*!< flash bad */
1454 BFA_FLASH_BUSY
= -4, /*!< flash busy */
1455 BFA_FLASH_ERR_CMD_ACT
= -5, /*!< command active never cleared */
1456 BFA_FLASH_ERR_FIFO_CNT
= -6, /*!< fifo count never cleared */
1457 BFA_FLASH_ERR_WIP
= -7, /*!< write-in-progress never cleared */
1458 BFA_FLASH_ERR_TIMEOUT
= -8, /*!< fli timeout */
1459 BFA_FLASH_ERR_LEN
= -9, /*!< invalid length */
1462 /* flash command register data structure */
1463 union bfa_flash_cmd_reg
{
1484 /* flash device status register data structure */
1485 union bfa_flash_dev_status_reg
{
1508 /* flash address register data structure */
1509 union bfa_flash_addr_reg
{
1522 /* Flash raw private functions */
1524 bfa_flash_set_cmd(void __iomem
*pci_bar
, u8 wr_cnt
,
1525 u8 rd_cnt
, u8 ad_cnt
, u8 op
)
1527 union bfa_flash_cmd_reg cmd
;
1531 cmd
.r
.write_cnt
= wr_cnt
;
1532 cmd
.r
.read_cnt
= rd_cnt
;
1533 cmd
.r
.addr_cnt
= ad_cnt
;
1535 writel(cmd
.i
, (pci_bar
+ FLI_CMD_REG
));
1539 bfa_flash_set_addr(void __iomem
*pci_bar
, u32 address
)
1541 union bfa_flash_addr_reg addr
;
1543 addr
.r
.addr
= address
& 0x00ffffff;
1545 writel(addr
.i
, (pci_bar
+ FLI_ADDR_REG
));
1549 bfa_flash_cmd_act_check(void __iomem
*pci_bar
)
1551 union bfa_flash_cmd_reg cmd
;
1553 cmd
.i
= readl(pci_bar
+ FLI_CMD_REG
);
1556 return BFA_FLASH_ERR_CMD_ACT
;
1561 /* Flush FLI data fifo. */
1563 bfa_flash_fifo_flush(void __iomem
*pci_bar
)
1567 union bfa_flash_dev_status_reg dev_status
;
1569 dev_status
.i
= readl(pci_bar
+ FLI_DEV_STATUS_REG
);
1571 if (!dev_status
.r
.fifo_cnt
)
1574 /* fifo counter in terms of words */
1575 for (i
= 0; i
< dev_status
.r
.fifo_cnt
; i
++)
1576 t
= readl(pci_bar
+ FLI_RDDATA_REG
);
1578 /* Check the device status. It may take some time. */
1579 for (i
= 0; i
< BFA_FLASH_CHECK_MAX
; i
++) {
1580 dev_status
.i
= readl(pci_bar
+ FLI_DEV_STATUS_REG
);
1581 if (!dev_status
.r
.fifo_cnt
)
1585 if (dev_status
.r
.fifo_cnt
)
1586 return BFA_FLASH_ERR_FIFO_CNT
;
1591 /* Read flash status. */
1593 bfa_flash_status_read(void __iomem
*pci_bar
)
1595 union bfa_flash_dev_status_reg dev_status
;
1600 status
= bfa_flash_fifo_flush(pci_bar
);
1604 bfa_flash_set_cmd(pci_bar
, 0, 4, 0, BFA_FLASH_READ_STATUS
);
1606 for (i
= 0; i
< BFA_FLASH_CHECK_MAX
; i
++) {
1607 status
= bfa_flash_cmd_act_check(pci_bar
);
1615 dev_status
.i
= readl(pci_bar
+ FLI_DEV_STATUS_REG
);
1616 if (!dev_status
.r
.fifo_cnt
)
1617 return BFA_FLASH_BUSY
;
1619 ret_status
= readl(pci_bar
+ FLI_RDDATA_REG
);
1622 status
= bfa_flash_fifo_flush(pci_bar
);
1629 /* Start flash read operation. */
1631 bfa_flash_read_start(void __iomem
*pci_bar
, u32 offset
, u32 len
,
1636 /* len must be mutiple of 4 and not exceeding fifo size */
1637 if (len
== 0 || len
> BFA_FLASH_FIFO_SIZE
|| (len
& 0x03) != 0)
1638 return BFA_FLASH_ERR_LEN
;
1641 status
= bfa_flash_status_read(pci_bar
);
1642 if (status
== BFA_FLASH_BUSY
)
1643 status
= bfa_flash_status_read(pci_bar
);
1648 /* check if write-in-progress bit is cleared */
1649 if (status
& BFA_FLASH_WIP_MASK
)
1650 return BFA_FLASH_ERR_WIP
;
1652 bfa_flash_set_addr(pci_bar
, offset
);
1654 bfa_flash_set_cmd(pci_bar
, 0, (u8
)len
, 4, BFA_FLASH_FAST_READ
);
1659 /* Check flash read operation. */
1661 bfa_flash_read_check(void __iomem
*pci_bar
)
1663 if (bfa_flash_cmd_act_check(pci_bar
))
1669 /* End flash read operation. */
1671 bfa_flash_read_end(void __iomem
*pci_bar
, u32 len
, char *buf
)
1675 /* read data fifo up to 32 words */
1676 for (i
= 0; i
< len
; i
+= 4) {
1677 u32 w
= readl(pci_bar
+ FLI_RDDATA_REG
);
1678 *((u32
*)(buf
+ i
)) = swab32(w
);
1681 bfa_flash_fifo_flush(pci_bar
);
1684 /* Perform flash raw read. */
1686 #define FLASH_BLOCKING_OP_MAX 500
1687 #define FLASH_SEM_LOCK_REG 0x18820
1690 bfa_raw_sem_get(void __iomem
*bar
)
1694 locked
= readl((bar
+ FLASH_SEM_LOCK_REG
));
1699 static enum bfa_status
1700 bfa_flash_sem_get(void __iomem
*bar
)
1702 u32 n
= FLASH_BLOCKING_OP_MAX
;
1704 while (!bfa_raw_sem_get(bar
)) {
1706 return BFA_STATUS_BADFLASH
;
1709 return BFA_STATUS_OK
;
1713 bfa_flash_sem_put(void __iomem
*bar
)
1715 writel(0, (bar
+ FLASH_SEM_LOCK_REG
));
1718 static enum bfa_status
1719 bfa_flash_raw_read(void __iomem
*pci_bar
, u32 offset
, char *buf
,
1723 u32 off
, l
, s
, residue
, fifo_sz
;
1727 fifo_sz
= BFA_FLASH_FIFO_SIZE
;
1728 status
= bfa_flash_sem_get(pci_bar
);
1729 if (status
!= BFA_STATUS_OK
)
1735 l
= (n
+ 1) * fifo_sz
- s
;
1739 status
= bfa_flash_read_start(pci_bar
, offset
+ off
, l
,
1742 bfa_flash_sem_put(pci_bar
);
1743 return BFA_STATUS_FAILED
;
1746 n
= BFA_FLASH_BLOCKING_OP_MAX
;
1747 while (bfa_flash_read_check(pci_bar
)) {
1749 bfa_flash_sem_put(pci_bar
);
1750 return BFA_STATUS_FAILED
;
1754 bfa_flash_read_end(pci_bar
, l
, &buf
[off
]);
1759 bfa_flash_sem_put(pci_bar
);
1761 return BFA_STATUS_OK
;
1764 #define BFA_FLASH_PART_FWIMG_ADDR 0x100000 /* fw image address */
1766 static enum bfa_status
1767 bfa_nw_ioc_flash_img_get_chnk(struct bfa_ioc
*ioc
, u32 off
,
1770 return bfa_flash_raw_read(ioc
->pcidev
.pci_bar_kva
,
1771 BFA_FLASH_PART_FWIMG_ADDR
+ (off
* sizeof(u32
)),
1772 (char *)fwimg
, BFI_FLASH_CHUNK_SZ
);
1775 static enum bfi_ioc_img_ver_cmp
1776 bfa_ioc_flash_fwver_cmp(struct bfa_ioc
*ioc
,
1777 struct bfi_ioc_image_hdr
*base_fwhdr
)
1779 struct bfi_ioc_image_hdr
*flash_fwhdr
;
1780 enum bfa_status status
;
1781 u32 fwimg
[BFI_FLASH_CHUNK_SZ_WORDS
];
1783 status
= bfa_nw_ioc_flash_img_get_chnk(ioc
, 0, fwimg
);
1784 if (status
!= BFA_STATUS_OK
)
1785 return BFI_IOC_IMG_VER_INCOMP
;
1787 flash_fwhdr
= (struct bfi_ioc_image_hdr
*)fwimg
;
1788 if (bfa_ioc_flash_fwver_valid(flash_fwhdr
))
1789 return bfa_ioc_fw_ver_patch_cmp(base_fwhdr
, flash_fwhdr
);
1791 return BFI_IOC_IMG_VER_INCOMP
;
1795 * Returns TRUE if driver is willing to work with current smem f/w version.
1798 bfa_nw_ioc_fwver_cmp(struct bfa_ioc
*ioc
, struct bfi_ioc_image_hdr
*fwhdr
)
1800 struct bfi_ioc_image_hdr
*drv_fwhdr
;
1801 enum bfi_ioc_img_ver_cmp smem_flash_cmp
, drv_smem_cmp
;
1803 drv_fwhdr
= (struct bfi_ioc_image_hdr
*)
1804 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc
), 0);
1806 /* If smem is incompatible or old, driver should not work with it. */
1807 drv_smem_cmp
= bfa_ioc_fw_ver_patch_cmp(drv_fwhdr
, fwhdr
);
1808 if (drv_smem_cmp
== BFI_IOC_IMG_VER_INCOMP
||
1809 drv_smem_cmp
== BFI_IOC_IMG_VER_OLD
) {
1813 /* IF Flash has a better F/W than smem do not work with smem.
1814 * If smem f/w == flash f/w, as smem f/w not old | incmp, work with it.
1815 * If Flash is old or incomp work with smem iff smem f/w == drv f/w.
1817 smem_flash_cmp
= bfa_ioc_flash_fwver_cmp(ioc
, fwhdr
);
1819 if (smem_flash_cmp
== BFI_IOC_IMG_VER_BETTER
)
1821 else if (smem_flash_cmp
== BFI_IOC_IMG_VER_SAME
)
1824 return (drv_smem_cmp
== BFI_IOC_IMG_VER_SAME
) ?
1828 /* Return true if current running version is valid. Firmware signature and
1829 * execution context (driver/bios) must match.
1832 bfa_ioc_fwver_valid(struct bfa_ioc
*ioc
, u32 boot_env
)
1834 struct bfi_ioc_image_hdr fwhdr
;
1836 bfa_nw_ioc_fwver_get(ioc
, &fwhdr
);
1837 if (swab32(fwhdr
.bootenv
) != boot_env
)
1840 return bfa_nw_ioc_fwver_cmp(ioc
, &fwhdr
);
1843 /* Conditionally flush any pending message from firmware at start. */
1845 bfa_ioc_msgflush(struct bfa_ioc
*ioc
)
1849 r32
= readl(ioc
->ioc_regs
.lpu_mbox_cmd
);
1851 writel(1, ioc
->ioc_regs
.lpu_mbox_cmd
);
1855 bfa_ioc_hwinit(struct bfa_ioc
*ioc
, bool force
)
1857 enum bfi_ioc_state ioc_fwstate
;
1861 ioc_fwstate
= bfa_ioc_get_cur_ioc_fwstate(ioc
);
1864 ioc_fwstate
= BFI_IOC_UNINIT
;
1866 boot_env
= BFI_FWBOOT_ENV_OS
;
1869 * check if firmware is valid
1871 fwvalid
= (ioc_fwstate
== BFI_IOC_UNINIT
) ?
1872 false : bfa_ioc_fwver_valid(ioc
, boot_env
);
1875 if (bfa_ioc_boot(ioc
, BFI_FWBOOT_TYPE_NORMAL
, boot_env
) ==
1877 bfa_ioc_poll_fwinit(ioc
);
1883 * If hardware initialization is in progress (initialized by other IOC),
1884 * just wait for an initialization completion interrupt.
1886 if (ioc_fwstate
== BFI_IOC_INITING
) {
1887 bfa_ioc_poll_fwinit(ioc
);
1892 * If IOC function is disabled and firmware version is same,
1893 * just re-enable IOC.
1895 if (ioc_fwstate
== BFI_IOC_DISABLED
|| ioc_fwstate
== BFI_IOC_OP
) {
1897 * When using MSI-X any pending firmware ready event should
1898 * be flushed. Otherwise MSI-X interrupts are not delivered.
1900 bfa_ioc_msgflush(ioc
);
1901 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_FWREADY
);
1906 * Initialize the h/w for any other states.
1908 if (bfa_ioc_boot(ioc
, BFI_FWBOOT_TYPE_NORMAL
, boot_env
) ==
1910 bfa_ioc_poll_fwinit(ioc
);
1914 bfa_nw_ioc_timeout(void *ioc_arg
)
1916 struct bfa_ioc
*ioc
= (struct bfa_ioc
*) ioc_arg
;
1918 bfa_fsm_send_event(ioc
, IOC_E_TIMEOUT
);
1922 bfa_ioc_mbox_send(struct bfa_ioc
*ioc
, void *ioc_msg
, int len
)
1924 u32
*msgp
= (u32
*) ioc_msg
;
1927 BUG_ON(!(len
<= BFI_IOC_MSGLEN_MAX
));
1930 * first write msg to mailbox registers
1932 for (i
= 0; i
< len
/ sizeof(u32
); i
++)
1933 writel(cpu_to_le32(msgp
[i
]),
1934 ioc
->ioc_regs
.hfn_mbox
+ i
* sizeof(u32
));
1936 for (; i
< BFI_IOC_MSGLEN_MAX
/ sizeof(u32
); i
++)
1937 writel(0, ioc
->ioc_regs
.hfn_mbox
+ i
* sizeof(u32
));
1940 * write 1 to mailbox CMD to trigger LPU event
1942 writel(1, ioc
->ioc_regs
.hfn_mbox_cmd
);
1943 (void) readl(ioc
->ioc_regs
.hfn_mbox_cmd
);
1947 bfa_ioc_send_enable(struct bfa_ioc
*ioc
)
1949 struct bfi_ioc_ctrl_req enable_req
;
1952 bfi_h2i_set(enable_req
.mh
, BFI_MC_IOC
, BFI_IOC_H2I_ENABLE_REQ
,
1953 bfa_ioc_portid(ioc
));
1954 enable_req
.clscode
= htons(ioc
->clscode
);
1955 do_gettimeofday(&tv
);
1956 enable_req
.tv_sec
= ntohl(tv
.tv_sec
);
1957 bfa_ioc_mbox_send(ioc
, &enable_req
, sizeof(struct bfi_ioc_ctrl_req
));
1961 bfa_ioc_send_disable(struct bfa_ioc
*ioc
)
1963 struct bfi_ioc_ctrl_req disable_req
;
1965 bfi_h2i_set(disable_req
.mh
, BFI_MC_IOC
, BFI_IOC_H2I_DISABLE_REQ
,
1966 bfa_ioc_portid(ioc
));
1967 bfa_ioc_mbox_send(ioc
, &disable_req
, sizeof(struct bfi_ioc_ctrl_req
));
1971 bfa_ioc_send_getattr(struct bfa_ioc
*ioc
)
1973 struct bfi_ioc_getattr_req attr_req
;
1975 bfi_h2i_set(attr_req
.mh
, BFI_MC_IOC
, BFI_IOC_H2I_GETATTR_REQ
,
1976 bfa_ioc_portid(ioc
));
1977 bfa_dma_be_addr_set(attr_req
.attr_addr
, ioc
->attr_dma
.pa
);
1978 bfa_ioc_mbox_send(ioc
, &attr_req
, sizeof(attr_req
));
1982 bfa_nw_ioc_hb_check(void *cbarg
)
1984 struct bfa_ioc
*ioc
= cbarg
;
1987 hb_count
= readl(ioc
->ioc_regs
.heartbeat
);
1988 if (ioc
->hb_count
== hb_count
) {
1989 bfa_ioc_recover(ioc
);
1992 ioc
->hb_count
= hb_count
;
1995 bfa_ioc_mbox_poll(ioc
);
1996 mod_timer(&ioc
->hb_timer
, jiffies
+
1997 msecs_to_jiffies(BFA_IOC_HB_TOV
));
2001 bfa_ioc_hb_monitor(struct bfa_ioc
*ioc
)
2003 ioc
->hb_count
= readl(ioc
->ioc_regs
.heartbeat
);
2004 mod_timer(&ioc
->hb_timer
, jiffies
+
2005 msecs_to_jiffies(BFA_IOC_HB_TOV
));
2009 bfa_ioc_hb_stop(struct bfa_ioc
*ioc
)
2011 del_timer(&ioc
->hb_timer
);
2014 /* Initiate a full firmware download. */
2015 static enum bfa_status
2016 bfa_ioc_download_fw(struct bfa_ioc
*ioc
, u32 boot_type
,
2026 u32 fwimg_buf
[BFI_FLASH_CHUNK_SZ_WORDS
];
2027 enum bfa_status status
;
2029 if (boot_env
== BFI_FWBOOT_ENV_OS
&&
2030 boot_type
== BFI_FWBOOT_TYPE_FLASH
) {
2031 fwimg_size
= BFI_FLASH_IMAGE_SZ
/sizeof(u32
);
2033 status
= bfa_nw_ioc_flash_img_get_chnk(ioc
,
2034 BFA_IOC_FLASH_CHUNK_ADDR(chunkno
), fwimg_buf
);
2035 if (status
!= BFA_STATUS_OK
)
2040 fwimg_size
= bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc
));
2041 fwimg
= bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc
),
2042 BFA_IOC_FLASH_CHUNK_ADDR(chunkno
));
2045 pgnum
= bfa_ioc_smem_pgnum(ioc
, loff
);
2047 writel(pgnum
, ioc
->ioc_regs
.host_page_num_fn
);
2049 for (i
= 0; i
< fwimg_size
; i
++) {
2050 if (BFA_IOC_FLASH_CHUNK_NO(i
) != chunkno
) {
2051 chunkno
= BFA_IOC_FLASH_CHUNK_NO(i
);
2052 if (boot_env
== BFI_FWBOOT_ENV_OS
&&
2053 boot_type
== BFI_FWBOOT_TYPE_FLASH
) {
2054 status
= bfa_nw_ioc_flash_img_get_chnk(ioc
,
2055 BFA_IOC_FLASH_CHUNK_ADDR(chunkno
),
2057 if (status
!= BFA_STATUS_OK
)
2062 fwimg
= bfa_cb_image_get_chunk(
2063 bfa_ioc_asic_gen(ioc
),
2064 BFA_IOC_FLASH_CHUNK_ADDR(chunkno
));
2071 writel((swab32(fwimg
[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i
)])),
2072 ((ioc
->ioc_regs
.smem_page_start
) + (loff
)));
2074 loff
+= sizeof(u32
);
2077 * handle page offset wrap around
2079 loff
= PSS_SMEM_PGOFF(loff
);
2083 ioc
->ioc_regs
.host_page_num_fn
);
2087 writel(bfa_ioc_smem_pgnum(ioc
, 0),
2088 ioc
->ioc_regs
.host_page_num_fn
);
2091 * Set boot type, env and device mode at the end.
2093 if (boot_env
== BFI_FWBOOT_ENV_OS
&&
2094 boot_type
== BFI_FWBOOT_TYPE_FLASH
) {
2095 boot_type
= BFI_FWBOOT_TYPE_NORMAL
;
2097 asicmode
= BFI_FWBOOT_DEVMODE(ioc
->asic_gen
, ioc
->asic_mode
,
2098 ioc
->port0_mode
, ioc
->port1_mode
);
2099 writel(asicmode
, ((ioc
->ioc_regs
.smem_page_start
)
2100 + BFI_FWBOOT_DEVMODE_OFF
));
2101 writel(boot_type
, ((ioc
->ioc_regs
.smem_page_start
)
2102 + (BFI_FWBOOT_TYPE_OFF
)));
2103 writel(boot_env
, ((ioc
->ioc_regs
.smem_page_start
)
2104 + (BFI_FWBOOT_ENV_OFF
)));
2105 return BFA_STATUS_OK
;
2109 bfa_ioc_reset(struct bfa_ioc
*ioc
, bool force
)
2111 bfa_ioc_hwinit(ioc
, force
);
2114 /* BFA ioc enable reply by firmware */
2116 bfa_ioc_enable_reply(struct bfa_ioc
*ioc
, enum bfa_mode port_mode
,
2119 struct bfa_iocpf
*iocpf
= &ioc
->iocpf
;
2121 ioc
->port_mode
= ioc
->port_mode_cfg
= port_mode
;
2122 ioc
->ad_cap_bm
= cap_bm
;
2123 bfa_fsm_send_event(iocpf
, IOCPF_E_FWRSP_ENABLE
);
2126 /* Update BFA configuration from firmware configuration. */
2128 bfa_ioc_getattr_reply(struct bfa_ioc
*ioc
)
2130 struct bfi_ioc_attr
*attr
= ioc
->attr
;
2132 attr
->adapter_prop
= ntohl(attr
->adapter_prop
);
2133 attr
->card_type
= ntohl(attr
->card_type
);
2134 attr
->maxfrsize
= ntohs(attr
->maxfrsize
);
2136 bfa_fsm_send_event(ioc
, IOC_E_FWRSP_GETATTR
);
2139 /* Attach time initialization of mbox logic. */
2141 bfa_ioc_mbox_attach(struct bfa_ioc
*ioc
)
2143 struct bfa_ioc_mbox_mod
*mod
= &ioc
->mbox_mod
;
2146 INIT_LIST_HEAD(&mod
->cmd_q
);
2147 for (mc
= 0; mc
< BFI_MC_MAX
; mc
++) {
2148 mod
->mbhdlr
[mc
].cbfn
= NULL
;
2149 mod
->mbhdlr
[mc
].cbarg
= ioc
->bfa
;
2153 /* Mbox poll timer -- restarts any pending mailbox requests. */
2155 bfa_ioc_mbox_poll(struct bfa_ioc
*ioc
)
2157 struct bfa_ioc_mbox_mod
*mod
= &ioc
->mbox_mod
;
2158 struct bfa_mbox_cmd
*cmd
;
2159 bfa_mbox_cmd_cbfn_t cbfn
;
2164 * If no command pending, do nothing
2166 if (list_empty(&mod
->cmd_q
))
2170 * If previous command is not yet fetched by firmware, do nothing
2172 stat
= readl(ioc
->ioc_regs
.hfn_mbox_cmd
);
2177 * Enqueue command to firmware.
2179 bfa_q_deq(&mod
->cmd_q
, &cmd
);
2180 bfa_ioc_mbox_send(ioc
, cmd
->msg
, sizeof(cmd
->msg
));
2183 * Give a callback to the client, indicating that the command is sent
2193 /* Cleanup any pending requests. */
2195 bfa_ioc_mbox_flush(struct bfa_ioc
*ioc
)
2197 struct bfa_ioc_mbox_mod
*mod
= &ioc
->mbox_mod
;
2198 struct bfa_mbox_cmd
*cmd
;
2200 while (!list_empty(&mod
->cmd_q
))
2201 bfa_q_deq(&mod
->cmd_q
, &cmd
);
2205 * bfa_nw_ioc_smem_read - Read data from SMEM to host through PCI memmap
2207 * @ioc: memory for IOC
2208 * @tbuf: app memory to store data from smem
2209 * @soff: smem offset
2210 * @sz: size of smem in bytes
2213 bfa_nw_ioc_smem_read(struct bfa_ioc
*ioc
, void *tbuf
, u32 soff
, u32 sz
)
2215 u32 pgnum
, loff
, r32
;
2219 pgnum
= PSS_SMEM_PGNUM(ioc
->ioc_regs
.smem_pg0
, soff
);
2220 loff
= PSS_SMEM_PGOFF(soff
);
2223 * Hold semaphore to serialize pll init and fwtrc.
2225 if (bfa_nw_ioc_sem_get(ioc
->ioc_regs
.ioc_init_sem_reg
) == 0)
2228 writel(pgnum
, ioc
->ioc_regs
.host_page_num_fn
);
2230 len
= sz
/sizeof(u32
);
2231 for (i
= 0; i
< len
; i
++) {
2232 r32
= swab32(readl((loff
) + (ioc
->ioc_regs
.smem_page_start
)));
2233 buf
[i
] = be32_to_cpu(r32
);
2234 loff
+= sizeof(u32
);
2237 * handle page offset wrap around
2239 loff
= PSS_SMEM_PGOFF(loff
);
2242 writel(pgnum
, ioc
->ioc_regs
.host_page_num_fn
);
2246 writel(PSS_SMEM_PGNUM(ioc
->ioc_regs
.smem_pg0
, 0),
2247 ioc
->ioc_regs
.host_page_num_fn
);
2252 readl(ioc
->ioc_regs
.ioc_init_sem_reg
);
2253 writel(1, ioc
->ioc_regs
.ioc_init_sem_reg
);
2257 /* Retrieve saved firmware trace from a prior IOC failure. */
2259 bfa_nw_ioc_debug_fwtrc(struct bfa_ioc
*ioc
, void *trcdata
, int *trclen
)
2261 u32 loff
= BFI_IOC_TRC_OFF
+ BNA_DBG_FWTRC_LEN
* ioc
->port_id
;
2262 int tlen
, status
= 0;
2265 if (tlen
> BNA_DBG_FWTRC_LEN
)
2266 tlen
= BNA_DBG_FWTRC_LEN
;
2268 status
= bfa_nw_ioc_smem_read(ioc
, trcdata
, loff
, tlen
);
2273 /* Save firmware trace if configured. */
2275 bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc
*ioc
)
2279 if (ioc
->dbg_fwsave_once
) {
2280 ioc
->dbg_fwsave_once
= 0;
2281 if (ioc
->dbg_fwsave_len
) {
2282 tlen
= ioc
->dbg_fwsave_len
;
2283 bfa_nw_ioc_debug_fwtrc(ioc
, ioc
->dbg_fwsave
, &tlen
);
2288 /* Retrieve saved firmware trace from a prior IOC failure. */
2290 bfa_nw_ioc_debug_fwsave(struct bfa_ioc
*ioc
, void *trcdata
, int *trclen
)
2294 if (ioc
->dbg_fwsave_len
== 0)
2295 return BFA_STATUS_ENOFSAVE
;
2298 if (tlen
> ioc
->dbg_fwsave_len
)
2299 tlen
= ioc
->dbg_fwsave_len
;
2301 memcpy(trcdata
, ioc
->dbg_fwsave
, tlen
);
2303 return BFA_STATUS_OK
;
2307 bfa_ioc_fail_notify(struct bfa_ioc
*ioc
)
2310 * Notify driver and common modules registered for notification.
2312 ioc
->cbfn
->hbfail_cbfn(ioc
->bfa
);
2313 bfa_ioc_event_notify(ioc
, BFA_IOC_E_FAILED
);
2314 bfa_nw_ioc_debug_save_ftrc(ioc
);
2317 /* IOCPF to IOC interface */
2319 bfa_ioc_pf_enabled(struct bfa_ioc
*ioc
)
2321 bfa_fsm_send_event(ioc
, IOC_E_ENABLED
);
2325 bfa_ioc_pf_disabled(struct bfa_ioc
*ioc
)
2327 bfa_fsm_send_event(ioc
, IOC_E_DISABLED
);
2331 bfa_ioc_pf_failed(struct bfa_ioc
*ioc
)
2333 bfa_fsm_send_event(ioc
, IOC_E_PFFAILED
);
2337 bfa_ioc_pf_hwfailed(struct bfa_ioc
*ioc
)
2339 bfa_fsm_send_event(ioc
, IOC_E_HWFAILED
);
2343 bfa_ioc_pf_fwmismatch(struct bfa_ioc
*ioc
)
2346 * Provide enable completion callback and AEN notification.
2348 ioc
->cbfn
->enable_cbfn(ioc
->bfa
, BFA_STATUS_IOC_FAILURE
);
2352 static enum bfa_status
2353 bfa_ioc_pll_init(struct bfa_ioc
*ioc
)
2356 * Hold semaphore so that nobody can access the chip during init.
2358 bfa_nw_ioc_sem_get(ioc
->ioc_regs
.ioc_init_sem_reg
);
2360 bfa_ioc_pll_init_asic(ioc
);
2362 ioc
->pllinit
= true;
2364 /* Initialize LMEM */
2365 bfa_ioc_lmem_init(ioc
);
2368 * release semaphore.
2370 bfa_nw_ioc_sem_release(ioc
->ioc_regs
.ioc_init_sem_reg
);
2372 return BFA_STATUS_OK
;
2375 /* Interface used by diag module to do firmware boot with memory test
2376 * as the entry vector.
2378 static enum bfa_status
2379 bfa_ioc_boot(struct bfa_ioc
*ioc
, enum bfi_fwboot_type boot_type
,
2382 struct bfi_ioc_image_hdr
*drv_fwhdr
;
2383 enum bfa_status status
;
2384 bfa_ioc_stats(ioc
, ioc_boots
);
2386 if (bfa_ioc_pll_init(ioc
) != BFA_STATUS_OK
)
2387 return BFA_STATUS_FAILED
;
2388 if (boot_env
== BFI_FWBOOT_ENV_OS
&&
2389 boot_type
== BFI_FWBOOT_TYPE_NORMAL
) {
2390 drv_fwhdr
= (struct bfi_ioc_image_hdr
*)
2391 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc
), 0);
2392 /* Work with Flash iff flash f/w is better than driver f/w.
2393 * Otherwise push drivers firmware.
2395 if (bfa_ioc_flash_fwver_cmp(ioc
, drv_fwhdr
) ==
2396 BFI_IOC_IMG_VER_BETTER
)
2397 boot_type
= BFI_FWBOOT_TYPE_FLASH
;
2401 * Initialize IOC state of all functions on a chip reset.
2403 if (boot_type
== BFI_FWBOOT_TYPE_MEMTEST
) {
2404 bfa_ioc_set_cur_ioc_fwstate(ioc
, BFI_IOC_MEMTEST
);
2405 bfa_ioc_set_alt_ioc_fwstate(ioc
, BFI_IOC_MEMTEST
);
2407 bfa_ioc_set_cur_ioc_fwstate(ioc
, BFI_IOC_INITING
);
2408 bfa_ioc_set_alt_ioc_fwstate(ioc
, BFI_IOC_INITING
);
2411 bfa_ioc_msgflush(ioc
);
2412 status
= bfa_ioc_download_fw(ioc
, boot_type
, boot_env
);
2413 if (status
== BFA_STATUS_OK
)
2414 bfa_ioc_lpu_start(ioc
);
2416 bfa_nw_iocpf_timeout(ioc
);
2421 /* Enable/disable IOC failure auto recovery. */
2423 bfa_nw_ioc_auto_recover(bool auto_recover
)
2425 bfa_nw_auto_recover
= auto_recover
;
2429 bfa_ioc_msgget(struct bfa_ioc
*ioc
, void *mbmsg
)
2435 r32
= readl(ioc
->ioc_regs
.lpu_mbox_cmd
);
2442 for (i
= 0; i
< (sizeof(union bfi_ioc_i2h_msg_u
) / sizeof(u32
));
2444 r32
= readl(ioc
->ioc_regs
.lpu_mbox
+
2446 msgp
[i
] = htonl(r32
);
2450 * turn off mailbox interrupt by clearing mailbox status
2452 writel(1, ioc
->ioc_regs
.lpu_mbox_cmd
);
2453 readl(ioc
->ioc_regs
.lpu_mbox_cmd
);
2459 bfa_ioc_isr(struct bfa_ioc
*ioc
, struct bfi_mbmsg
*m
)
2461 union bfi_ioc_i2h_msg_u
*msg
;
2462 struct bfa_iocpf
*iocpf
= &ioc
->iocpf
;
2464 msg
= (union bfi_ioc_i2h_msg_u
*) m
;
2466 bfa_ioc_stats(ioc
, ioc_isrs
);
2468 switch (msg
->mh
.msg_id
) {
2469 case BFI_IOC_I2H_HBEAT
:
2472 case BFI_IOC_I2H_ENABLE_REPLY
:
2473 bfa_ioc_enable_reply(ioc
,
2474 (enum bfa_mode
)msg
->fw_event
.port_mode
,
2475 msg
->fw_event
.cap_bm
);
2478 case BFI_IOC_I2H_DISABLE_REPLY
:
2479 bfa_fsm_send_event(iocpf
, IOCPF_E_FWRSP_DISABLE
);
2482 case BFI_IOC_I2H_GETATTR_REPLY
:
2483 bfa_ioc_getattr_reply(ioc
);
2492 * bfa_nw_ioc_attach - IOC attach time initialization and setup.
2494 * @ioc: memory for IOC
2495 * @bfa: driver instance structure
2498 bfa_nw_ioc_attach(struct bfa_ioc
*ioc
, void *bfa
, struct bfa_ioc_cbfn
*cbfn
)
2502 ioc
->fcmode
= false;
2503 ioc
->pllinit
= false;
2504 ioc
->dbg_fwsave_once
= true;
2505 ioc
->iocpf
.ioc
= ioc
;
2507 bfa_ioc_mbox_attach(ioc
);
2508 INIT_LIST_HEAD(&ioc
->notify_q
);
2510 bfa_fsm_set_state(ioc
, bfa_ioc_sm_uninit
);
2511 bfa_fsm_send_event(ioc
, IOC_E_RESET
);
2514 /* Driver detach time IOC cleanup. */
2516 bfa_nw_ioc_detach(struct bfa_ioc
*ioc
)
2518 bfa_fsm_send_event(ioc
, IOC_E_DETACH
);
2520 /* Done with detach, empty the notify_q. */
2521 INIT_LIST_HEAD(&ioc
->notify_q
);
2525 * bfa_nw_ioc_pci_init - Setup IOC PCI properties.
2527 * @pcidev: PCI device information for this IOC
2530 bfa_nw_ioc_pci_init(struct bfa_ioc
*ioc
, struct bfa_pcidev
*pcidev
,
2531 enum bfi_pcifn_class clscode
)
2533 ioc
->clscode
= clscode
;
2534 ioc
->pcidev
= *pcidev
;
2537 * Initialize IOC and device personality
2539 ioc
->port0_mode
= ioc
->port1_mode
= BFI_PORT_MODE_FC
;
2540 ioc
->asic_mode
= BFI_ASIC_MODE_FC
;
2542 switch (pcidev
->device_id
) {
2543 case PCI_DEVICE_ID_BROCADE_CT
:
2544 ioc
->asic_gen
= BFI_ASIC_GEN_CT
;
2545 ioc
->port0_mode
= ioc
->port1_mode
= BFI_PORT_MODE_ETH
;
2546 ioc
->asic_mode
= BFI_ASIC_MODE_ETH
;
2547 ioc
->port_mode
= ioc
->port_mode_cfg
= BFA_MODE_CNA
;
2548 ioc
->ad_cap_bm
= BFA_CM_CNA
;
2551 case BFA_PCI_DEVICE_ID_CT2
:
2552 ioc
->asic_gen
= BFI_ASIC_GEN_CT2
;
2553 if (clscode
== BFI_PCIFN_CLASS_FC
&&
2554 pcidev
->ssid
== BFA_PCI_CT2_SSID_FC
) {
2555 ioc
->asic_mode
= BFI_ASIC_MODE_FC16
;
2557 ioc
->port_mode
= ioc
->port_mode_cfg
= BFA_MODE_HBA
;
2558 ioc
->ad_cap_bm
= BFA_CM_HBA
;
2560 ioc
->port0_mode
= ioc
->port1_mode
= BFI_PORT_MODE_ETH
;
2561 ioc
->asic_mode
= BFI_ASIC_MODE_ETH
;
2562 if (pcidev
->ssid
== BFA_PCI_CT2_SSID_FCoE
) {
2564 ioc
->port_mode_cfg
= BFA_MODE_CNA
;
2565 ioc
->ad_cap_bm
= BFA_CM_CNA
;
2568 ioc
->port_mode_cfg
= BFA_MODE_NIC
;
2569 ioc
->ad_cap_bm
= BFA_CM_NIC
;
2579 * Set asic specific interfaces.
2581 if (ioc
->asic_gen
== BFI_ASIC_GEN_CT
)
2582 bfa_nw_ioc_set_ct_hwif(ioc
);
2584 WARN_ON(ioc
->asic_gen
!= BFI_ASIC_GEN_CT2
);
2585 bfa_nw_ioc_set_ct2_hwif(ioc
);
2586 bfa_nw_ioc_ct2_poweron(ioc
);
2589 bfa_ioc_map_port(ioc
);
2590 bfa_ioc_reg_init(ioc
);
2594 * bfa_nw_ioc_mem_claim - Initialize IOC dma memory
2596 * @dm_kva: kernel virtual address of IOC dma memory
2597 * @dm_pa: physical address of IOC dma memory
2600 bfa_nw_ioc_mem_claim(struct bfa_ioc
*ioc
, u8
*dm_kva
, u64 dm_pa
)
2603 * dma memory for firmware attribute
2605 ioc
->attr_dma
.kva
= dm_kva
;
2606 ioc
->attr_dma
.pa
= dm_pa
;
2607 ioc
->attr
= (struct bfi_ioc_attr
*) dm_kva
;
2610 /* Return size of dma memory required. */
2612 bfa_nw_ioc_meminfo(void)
2614 return roundup(sizeof(struct bfi_ioc_attr
), BFA_DMA_ALIGN_SZ
);
2618 bfa_nw_ioc_enable(struct bfa_ioc
*ioc
)
2620 bfa_ioc_stats(ioc
, ioc_enables
);
2621 ioc
->dbg_fwsave_once
= true;
2623 bfa_fsm_send_event(ioc
, IOC_E_ENABLE
);
2627 bfa_nw_ioc_disable(struct bfa_ioc
*ioc
)
2629 bfa_ioc_stats(ioc
, ioc_disables
);
2630 bfa_fsm_send_event(ioc
, IOC_E_DISABLE
);
2633 /* Initialize memory for saving firmware trace. */
2635 bfa_nw_ioc_debug_memclaim(struct bfa_ioc
*ioc
, void *dbg_fwsave
)
2637 ioc
->dbg_fwsave
= dbg_fwsave
;
2638 ioc
->dbg_fwsave_len
= ioc
->iocpf
.auto_recover
? BNA_DBG_FWTRC_LEN
: 0;
2642 bfa_ioc_smem_pgnum(struct bfa_ioc
*ioc
, u32 fmaddr
)
2644 return PSS_SMEM_PGNUM(ioc
->ioc_regs
.smem_pg0
, fmaddr
);
2647 /* Register mailbox message handler function, to be called by common modules */
2649 bfa_nw_ioc_mbox_regisr(struct bfa_ioc
*ioc
, enum bfi_mclass mc
,
2650 bfa_ioc_mbox_mcfunc_t cbfn
, void *cbarg
)
2652 struct bfa_ioc_mbox_mod
*mod
= &ioc
->mbox_mod
;
2654 mod
->mbhdlr
[mc
].cbfn
= cbfn
;
2655 mod
->mbhdlr
[mc
].cbarg
= cbarg
;
2659 * bfa_nw_ioc_mbox_queue - Queue a mailbox command request to firmware.
2661 * @ioc: IOC instance
2662 * @cmd: Mailbox command
2664 * Waits if mailbox is busy. Responsibility of caller to serialize
2667 bfa_nw_ioc_mbox_queue(struct bfa_ioc
*ioc
, struct bfa_mbox_cmd
*cmd
,
2668 bfa_mbox_cmd_cbfn_t cbfn
, void *cbarg
)
2670 struct bfa_ioc_mbox_mod
*mod
= &ioc
->mbox_mod
;
2677 * If a previous command is pending, queue new command
2679 if (!list_empty(&mod
->cmd_q
)) {
2680 list_add_tail(&cmd
->qe
, &mod
->cmd_q
);
2685 * If mailbox is busy, queue command for poll timer
2687 stat
= readl(ioc
->ioc_regs
.hfn_mbox_cmd
);
2689 list_add_tail(&cmd
->qe
, &mod
->cmd_q
);
2694 * mailbox is free -- queue command to firmware
2696 bfa_ioc_mbox_send(ioc
, cmd
->msg
, sizeof(cmd
->msg
));
2701 /* Handle mailbox interrupts */
2703 bfa_nw_ioc_mbox_isr(struct bfa_ioc
*ioc
)
2705 struct bfa_ioc_mbox_mod
*mod
= &ioc
->mbox_mod
;
2709 if (bfa_ioc_msgget(ioc
, &m
)) {
2711 * Treat IOC message class as special.
2713 mc
= m
.mh
.msg_class
;
2714 if (mc
== BFI_MC_IOC
) {
2715 bfa_ioc_isr(ioc
, &m
);
2719 if ((mc
>= BFI_MC_MAX
) || (mod
->mbhdlr
[mc
].cbfn
== NULL
))
2722 mod
->mbhdlr
[mc
].cbfn(mod
->mbhdlr
[mc
].cbarg
, &m
);
2725 bfa_ioc_lpu_read_stat(ioc
);
2728 * Try to send pending mailbox commands
2730 bfa_ioc_mbox_poll(ioc
);
2734 bfa_nw_ioc_error_isr(struct bfa_ioc
*ioc
)
2736 bfa_ioc_stats(ioc
, ioc_hbfails
);
2737 bfa_ioc_stats_hb_count(ioc
, ioc
->hb_count
);
2738 bfa_fsm_send_event(ioc
, IOC_E_HWERROR
);
2741 /* return true if IOC is disabled */
2743 bfa_nw_ioc_is_disabled(struct bfa_ioc
*ioc
)
2745 return bfa_fsm_cmp_state(ioc
, bfa_ioc_sm_disabling
) ||
2746 bfa_fsm_cmp_state(ioc
, bfa_ioc_sm_disabled
);
2749 /* return true if IOC is operational */
2751 bfa_nw_ioc_is_operational(struct bfa_ioc
*ioc
)
2753 return bfa_fsm_cmp_state(ioc
, bfa_ioc_sm_op
);
2756 /* Add to IOC heartbeat failure notification queue. To be used by common
2757 * modules such as cee, port, diag.
2760 bfa_nw_ioc_notify_register(struct bfa_ioc
*ioc
,
2761 struct bfa_ioc_notify
*notify
)
2763 list_add_tail(¬ify
->qe
, &ioc
->notify_q
);
2766 #define BFA_MFG_NAME "Brocade"
2768 bfa_ioc_get_adapter_attr(struct bfa_ioc
*ioc
,
2769 struct bfa_adapter_attr
*ad_attr
)
2771 struct bfi_ioc_attr
*ioc_attr
;
2773 ioc_attr
= ioc
->attr
;
2775 bfa_ioc_get_adapter_serial_num(ioc
, ad_attr
->serial_num
);
2776 bfa_ioc_get_adapter_fw_ver(ioc
, ad_attr
->fw_ver
);
2777 bfa_ioc_get_adapter_optrom_ver(ioc
, ad_attr
->optrom_ver
);
2778 bfa_ioc_get_adapter_manufacturer(ioc
, ad_attr
->manufacturer
);
2779 memcpy(&ad_attr
->vpd
, &ioc_attr
->vpd
,
2780 sizeof(struct bfa_mfg_vpd
));
2782 ad_attr
->nports
= bfa_ioc_get_nports(ioc
);
2783 ad_attr
->max_speed
= bfa_ioc_speed_sup(ioc
);
2785 bfa_ioc_get_adapter_model(ioc
, ad_attr
->model
);
2786 /* For now, model descr uses same model string */
2787 bfa_ioc_get_adapter_model(ioc
, ad_attr
->model_descr
);
2789 ad_attr
->card_type
= ioc_attr
->card_type
;
2790 ad_attr
->is_mezz
= bfa_mfg_is_mezz(ioc_attr
->card_type
);
2792 if (BFI_ADAPTER_IS_SPECIAL(ioc_attr
->adapter_prop
))
2793 ad_attr
->prototype
= 1;
2795 ad_attr
->prototype
= 0;
2797 ad_attr
->pwwn
= bfa_ioc_get_pwwn(ioc
);
2798 ad_attr
->mac
= bfa_nw_ioc_get_mac(ioc
);
2800 ad_attr
->pcie_gen
= ioc_attr
->pcie_gen
;
2801 ad_attr
->pcie_lanes
= ioc_attr
->pcie_lanes
;
2802 ad_attr
->pcie_lanes_orig
= ioc_attr
->pcie_lanes_orig
;
2803 ad_attr
->asic_rev
= ioc_attr
->asic_rev
;
2805 bfa_ioc_get_pci_chip_rev(ioc
, ad_attr
->hw_ver
);
2808 static enum bfa_ioc_type
2809 bfa_ioc_get_type(struct bfa_ioc
*ioc
)
2811 if (ioc
->clscode
== BFI_PCIFN_CLASS_ETH
)
2812 return BFA_IOC_TYPE_LL
;
2814 BUG_ON(!(ioc
->clscode
== BFI_PCIFN_CLASS_FC
));
2816 return (ioc
->attr
->port_mode
== BFI_PORT_MODE_FC
)
2817 ? BFA_IOC_TYPE_FC
: BFA_IOC_TYPE_FCoE
;
2821 bfa_ioc_get_adapter_serial_num(struct bfa_ioc
*ioc
, char *serial_num
)
2824 (void *)ioc
->attr
->brcd_serialnum
,
2825 BFA_ADAPTER_SERIAL_NUM_LEN
);
2829 bfa_ioc_get_adapter_fw_ver(struct bfa_ioc
*ioc
, char *fw_ver
)
2831 memcpy(fw_ver
, ioc
->attr
->fw_version
, BFA_VERSION_LEN
);
2835 bfa_ioc_get_pci_chip_rev(struct bfa_ioc
*ioc
, char *chip_rev
)
2837 BUG_ON(!(chip_rev
));
2839 memset(chip_rev
, 0, BFA_IOC_CHIP_REV_LEN
);
2845 chip_rev
[4] = ioc
->attr
->asic_rev
;
2850 bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc
*ioc
, char *optrom_ver
)
2852 memcpy(optrom_ver
, ioc
->attr
->optrom_version
,
2857 bfa_ioc_get_adapter_manufacturer(struct bfa_ioc
*ioc
, char *manufacturer
)
2859 memcpy(manufacturer
, BFA_MFG_NAME
, BFA_ADAPTER_MFG_NAME_LEN
);
2863 bfa_ioc_get_adapter_model(struct bfa_ioc
*ioc
, char *model
)
2865 struct bfi_ioc_attr
*ioc_attr
;
2868 memset(model
, 0, BFA_ADAPTER_MODEL_NAME_LEN
);
2870 ioc_attr
= ioc
->attr
;
2872 snprintf(model
, BFA_ADAPTER_MODEL_NAME_LEN
, "%s-%u",
2873 BFA_MFG_NAME
, ioc_attr
->card_type
);
2876 static enum bfa_ioc_state
2877 bfa_ioc_get_state(struct bfa_ioc
*ioc
)
2879 enum bfa_iocpf_state iocpf_st
;
2880 enum bfa_ioc_state ioc_st
= bfa_sm_to_state(ioc_sm_table
, ioc
->fsm
);
2882 if (ioc_st
== BFA_IOC_ENABLING
||
2883 ioc_st
== BFA_IOC_FAIL
|| ioc_st
== BFA_IOC_INITFAIL
) {
2885 iocpf_st
= bfa_sm_to_state(iocpf_sm_table
, ioc
->iocpf
.fsm
);
2888 case BFA_IOCPF_SEMWAIT
:
2889 ioc_st
= BFA_IOC_SEMWAIT
;
2892 case BFA_IOCPF_HWINIT
:
2893 ioc_st
= BFA_IOC_HWINIT
;
2896 case BFA_IOCPF_FWMISMATCH
:
2897 ioc_st
= BFA_IOC_FWMISMATCH
;
2900 case BFA_IOCPF_FAIL
:
2901 ioc_st
= BFA_IOC_FAIL
;
2904 case BFA_IOCPF_INITFAIL
:
2905 ioc_st
= BFA_IOC_INITFAIL
;
2916 bfa_nw_ioc_get_attr(struct bfa_ioc
*ioc
, struct bfa_ioc_attr
*ioc_attr
)
2918 memset((void *)ioc_attr
, 0, sizeof(struct bfa_ioc_attr
));
2920 ioc_attr
->state
= bfa_ioc_get_state(ioc
);
2921 ioc_attr
->port_id
= bfa_ioc_portid(ioc
);
2922 ioc_attr
->port_mode
= ioc
->port_mode
;
2924 ioc_attr
->port_mode_cfg
= ioc
->port_mode_cfg
;
2925 ioc_attr
->cap_bm
= ioc
->ad_cap_bm
;
2927 ioc_attr
->ioc_type
= bfa_ioc_get_type(ioc
);
2929 bfa_ioc_get_adapter_attr(ioc
, &ioc_attr
->adapter_attr
);
2931 ioc_attr
->pci_attr
.device_id
= bfa_ioc_devid(ioc
);
2932 ioc_attr
->pci_attr
.pcifn
= bfa_ioc_pcifn(ioc
);
2933 ioc_attr
->def_fn
= bfa_ioc_is_default(ioc
);
2934 bfa_ioc_get_pci_chip_rev(ioc
, ioc_attr
->pci_attr
.chip_rev
);
2939 bfa_ioc_get_pwwn(struct bfa_ioc
*ioc
)
2941 return ioc
->attr
->pwwn
;
2945 bfa_nw_ioc_get_mac(struct bfa_ioc
*ioc
)
2947 return ioc
->attr
->mac
;
2950 /* Firmware failure detected. Start recovery actions. */
2952 bfa_ioc_recover(struct bfa_ioc
*ioc
)
2954 pr_crit("Heart Beat of IOC has failed\n");
2955 bfa_ioc_stats(ioc
, ioc_hbfails
);
2956 bfa_ioc_stats_hb_count(ioc
, ioc
->hb_count
);
2957 bfa_fsm_send_event(ioc
, IOC_E_HBFAIL
);
2960 /* BFA IOC PF private functions */
2963 bfa_iocpf_enable(struct bfa_ioc
*ioc
)
2965 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_ENABLE
);
2969 bfa_iocpf_disable(struct bfa_ioc
*ioc
)
2971 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_DISABLE
);
2975 bfa_iocpf_fail(struct bfa_ioc
*ioc
)
2977 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_FAIL
);
2981 bfa_iocpf_initfail(struct bfa_ioc
*ioc
)
2983 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_INITFAIL
);
2987 bfa_iocpf_getattrfail(struct bfa_ioc
*ioc
)
2989 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_GETATTRFAIL
);
2993 bfa_iocpf_stop(struct bfa_ioc
*ioc
)
2995 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_STOP
);
2999 bfa_nw_iocpf_timeout(void *ioc_arg
)
3001 struct bfa_ioc
*ioc
= (struct bfa_ioc
*) ioc_arg
;
3002 enum bfa_iocpf_state iocpf_st
;
3004 iocpf_st
= bfa_sm_to_state(iocpf_sm_table
, ioc
->iocpf
.fsm
);
3006 if (iocpf_st
== BFA_IOCPF_HWINIT
)
3007 bfa_ioc_poll_fwinit(ioc
);
3009 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_TIMEOUT
);
3013 bfa_nw_iocpf_sem_timeout(void *ioc_arg
)
3015 struct bfa_ioc
*ioc
= (struct bfa_ioc
*) ioc_arg
;
3017 bfa_ioc_hw_sem_get(ioc
);
3021 bfa_ioc_poll_fwinit(struct bfa_ioc
*ioc
)
3023 u32 fwstate
= bfa_ioc_get_cur_ioc_fwstate(ioc
);
3025 if (fwstate
== BFI_IOC_DISABLED
) {
3026 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_FWREADY
);
3030 if (ioc
->iocpf
.poll_time
>= BFA_IOC_TOV
) {
3031 bfa_nw_iocpf_timeout(ioc
);
3033 ioc
->iocpf
.poll_time
+= BFA_IOC_POLL_TOV
;
3034 mod_timer(&ioc
->iocpf_timer
, jiffies
+
3035 msecs_to_jiffies(BFA_IOC_POLL_TOV
));
3040 * Flash module specific
3044 * FLASH DMA buffer should be big enough to hold both MFG block and
3045 * asic block(64k) at the same time and also should be 2k aligned to
3046 * avoid write segement to cross sector boundary.
3048 #define BFA_FLASH_SEG_SZ 2048
3049 #define BFA_FLASH_DMA_BUF_SZ \
3050 roundup(0x010000 + sizeof(struct bfa_mfg_block), BFA_FLASH_SEG_SZ)
3053 bfa_flash_cb(struct bfa_flash
*flash
)
3057 flash
->cbfn(flash
->cbarg
, flash
->status
);
3061 bfa_flash_notify(void *cbarg
, enum bfa_ioc_event event
)
3063 struct bfa_flash
*flash
= cbarg
;
3066 case BFA_IOC_E_DISABLED
:
3067 case BFA_IOC_E_FAILED
:
3068 if (flash
->op_busy
) {
3069 flash
->status
= BFA_STATUS_IOC_FAILURE
;
3070 flash
->cbfn(flash
->cbarg
, flash
->status
);
3080 * Send flash write request.
3083 bfa_flash_write_send(struct bfa_flash
*flash
)
3085 struct bfi_flash_write_req
*msg
=
3086 (struct bfi_flash_write_req
*) flash
->mb
.msg
;
3089 msg
->type
= be32_to_cpu(flash
->type
);
3090 msg
->instance
= flash
->instance
;
3091 msg
->offset
= be32_to_cpu(flash
->addr_off
+ flash
->offset
);
3092 len
= (flash
->residue
< BFA_FLASH_DMA_BUF_SZ
) ?
3093 flash
->residue
: BFA_FLASH_DMA_BUF_SZ
;
3094 msg
->length
= be32_to_cpu(len
);
3096 /* indicate if it's the last msg of the whole write operation */
3097 msg
->last
= (len
== flash
->residue
) ? 1 : 0;
3099 bfi_h2i_set(msg
->mh
, BFI_MC_FLASH
, BFI_FLASH_H2I_WRITE_REQ
,
3100 bfa_ioc_portid(flash
->ioc
));
3101 bfa_alen_set(&msg
->alen
, len
, flash
->dbuf_pa
);
3102 memcpy(flash
->dbuf_kva
, flash
->ubuf
+ flash
->offset
, len
);
3103 bfa_nw_ioc_mbox_queue(flash
->ioc
, &flash
->mb
, NULL
, NULL
);
3105 flash
->residue
-= len
;
3106 flash
->offset
+= len
;
3110 * bfa_flash_read_send - Send flash read request.
3112 * @cbarg: callback argument
3115 bfa_flash_read_send(void *cbarg
)
3117 struct bfa_flash
*flash
= cbarg
;
3118 struct bfi_flash_read_req
*msg
=
3119 (struct bfi_flash_read_req
*) flash
->mb
.msg
;
3122 msg
->type
= be32_to_cpu(flash
->type
);
3123 msg
->instance
= flash
->instance
;
3124 msg
->offset
= be32_to_cpu(flash
->addr_off
+ flash
->offset
);
3125 len
= (flash
->residue
< BFA_FLASH_DMA_BUF_SZ
) ?
3126 flash
->residue
: BFA_FLASH_DMA_BUF_SZ
;
3127 msg
->length
= be32_to_cpu(len
);
3128 bfi_h2i_set(msg
->mh
, BFI_MC_FLASH
, BFI_FLASH_H2I_READ_REQ
,
3129 bfa_ioc_portid(flash
->ioc
));
3130 bfa_alen_set(&msg
->alen
, len
, flash
->dbuf_pa
);
3131 bfa_nw_ioc_mbox_queue(flash
->ioc
, &flash
->mb
, NULL
, NULL
);
3135 * bfa_flash_intr - Process flash response messages upon receiving interrupts.
3137 * @flasharg: flash structure
3138 * @msg: message structure
3141 bfa_flash_intr(void *flasharg
, struct bfi_mbmsg
*msg
)
3143 struct bfa_flash
*flash
= flasharg
;
3147 struct bfi_flash_query_rsp
*query
;
3148 struct bfi_flash_write_rsp
*write
;
3149 struct bfi_flash_read_rsp
*read
;
3150 struct bfi_mbmsg
*msg
;
3155 /* receiving response after ioc failure */
3156 if (!flash
->op_busy
&& msg
->mh
.msg_id
!= BFI_FLASH_I2H_EVENT
)
3159 switch (msg
->mh
.msg_id
) {
3160 case BFI_FLASH_I2H_QUERY_RSP
:
3161 status
= be32_to_cpu(m
.query
->status
);
3162 if (status
== BFA_STATUS_OK
) {
3164 struct bfa_flash_attr
*attr
, *f
;
3166 attr
= (struct bfa_flash_attr
*) flash
->ubuf
;
3167 f
= (struct bfa_flash_attr
*) flash
->dbuf_kva
;
3168 attr
->status
= be32_to_cpu(f
->status
);
3169 attr
->npart
= be32_to_cpu(f
->npart
);
3170 for (i
= 0; i
< attr
->npart
; i
++) {
3171 attr
->part
[i
].part_type
=
3172 be32_to_cpu(f
->part
[i
].part_type
);
3173 attr
->part
[i
].part_instance
=
3174 be32_to_cpu(f
->part
[i
].part_instance
);
3175 attr
->part
[i
].part_off
=
3176 be32_to_cpu(f
->part
[i
].part_off
);
3177 attr
->part
[i
].part_size
=
3178 be32_to_cpu(f
->part
[i
].part_size
);
3179 attr
->part
[i
].part_len
=
3180 be32_to_cpu(f
->part
[i
].part_len
);
3181 attr
->part
[i
].part_status
=
3182 be32_to_cpu(f
->part
[i
].part_status
);
3185 flash
->status
= status
;
3186 bfa_flash_cb(flash
);
3188 case BFI_FLASH_I2H_WRITE_RSP
:
3189 status
= be32_to_cpu(m
.write
->status
);
3190 if (status
!= BFA_STATUS_OK
|| flash
->residue
== 0) {
3191 flash
->status
= status
;
3192 bfa_flash_cb(flash
);
3194 bfa_flash_write_send(flash
);
3196 case BFI_FLASH_I2H_READ_RSP
:
3197 status
= be32_to_cpu(m
.read
->status
);
3198 if (status
!= BFA_STATUS_OK
) {
3199 flash
->status
= status
;
3200 bfa_flash_cb(flash
);
3202 u32 len
= be32_to_cpu(m
.read
->length
);
3203 memcpy(flash
->ubuf
+ flash
->offset
,
3204 flash
->dbuf_kva
, len
);
3205 flash
->residue
-= len
;
3206 flash
->offset
+= len
;
3207 if (flash
->residue
== 0) {
3208 flash
->status
= status
;
3209 bfa_flash_cb(flash
);
3211 bfa_flash_read_send(flash
);
3214 case BFI_FLASH_I2H_BOOT_VER_RSP
:
3215 case BFI_FLASH_I2H_EVENT
:
3223 * Flash memory info API.
3226 bfa_nw_flash_meminfo(void)
3228 return roundup(BFA_FLASH_DMA_BUF_SZ
, BFA_DMA_ALIGN_SZ
);
3232 * bfa_nw_flash_attach - Flash attach API.
3234 * @flash: flash structure
3235 * @ioc: ioc structure
3236 * @dev: device structure
3239 bfa_nw_flash_attach(struct bfa_flash
*flash
, struct bfa_ioc
*ioc
, void *dev
)
3243 flash
->cbarg
= NULL
;
3246 bfa_nw_ioc_mbox_regisr(flash
->ioc
, BFI_MC_FLASH
, bfa_flash_intr
, flash
);
3247 bfa_q_qe_init(&flash
->ioc_notify
);
3248 bfa_ioc_notify_init(&flash
->ioc_notify
, bfa_flash_notify
, flash
);
3249 list_add_tail(&flash
->ioc_notify
.qe
, &flash
->ioc
->notify_q
);
3253 * bfa_nw_flash_memclaim - Claim memory for flash
3255 * @flash: flash structure
3256 * @dm_kva: pointer to virtual memory address
3257 * @dm_pa: physical memory address
3260 bfa_nw_flash_memclaim(struct bfa_flash
*flash
, u8
*dm_kva
, u64 dm_pa
)
3262 flash
->dbuf_kva
= dm_kva
;
3263 flash
->dbuf_pa
= dm_pa
;
3264 memset(flash
->dbuf_kva
, 0, BFA_FLASH_DMA_BUF_SZ
);
3265 dm_kva
+= roundup(BFA_FLASH_DMA_BUF_SZ
, BFA_DMA_ALIGN_SZ
);
3266 dm_pa
+= roundup(BFA_FLASH_DMA_BUF_SZ
, BFA_DMA_ALIGN_SZ
);
3270 * bfa_nw_flash_get_attr - Get flash attribute.
3272 * @flash: flash structure
3273 * @attr: flash attribute structure
3274 * @cbfn: callback function
3275 * @cbarg: callback argument
3280 bfa_nw_flash_get_attr(struct bfa_flash
*flash
, struct bfa_flash_attr
*attr
,
3281 bfa_cb_flash cbfn
, void *cbarg
)
3283 struct bfi_flash_query_req
*msg
=
3284 (struct bfi_flash_query_req
*) flash
->mb
.msg
;
3286 if (!bfa_nw_ioc_is_operational(flash
->ioc
))
3287 return BFA_STATUS_IOC_NON_OP
;
3290 return BFA_STATUS_DEVBUSY
;
3294 flash
->cbarg
= cbarg
;
3295 flash
->ubuf
= (u8
*) attr
;
3297 bfi_h2i_set(msg
->mh
, BFI_MC_FLASH
, BFI_FLASH_H2I_QUERY_REQ
,
3298 bfa_ioc_portid(flash
->ioc
));
3299 bfa_alen_set(&msg
->alen
, sizeof(struct bfa_flash_attr
), flash
->dbuf_pa
);
3300 bfa_nw_ioc_mbox_queue(flash
->ioc
, &flash
->mb
, NULL
, NULL
);
3302 return BFA_STATUS_OK
;
3306 * bfa_nw_flash_update_part - Update flash partition.
3308 * @flash: flash structure
3309 * @type: flash partition type
3310 * @instance: flash partition instance
3311 * @buf: update data buffer
3312 * @len: data buffer length
3313 * @offset: offset relative to the partition starting address
3314 * @cbfn: callback function
3315 * @cbarg: callback argument
3320 bfa_nw_flash_update_part(struct bfa_flash
*flash
, u32 type
, u8 instance
,
3321 void *buf
, u32 len
, u32 offset
,
3322 bfa_cb_flash cbfn
, void *cbarg
)
3324 if (!bfa_nw_ioc_is_operational(flash
->ioc
))
3325 return BFA_STATUS_IOC_NON_OP
;
3328 * 'len' must be in word (4-byte) boundary
3330 if (!len
|| (len
& 0x03))
3331 return BFA_STATUS_FLASH_BAD_LEN
;
3333 if (type
== BFA_FLASH_PART_MFG
)
3334 return BFA_STATUS_EINVAL
;
3337 return BFA_STATUS_DEVBUSY
;
3341 flash
->cbarg
= cbarg
;
3343 flash
->instance
= instance
;
3344 flash
->residue
= len
;
3346 flash
->addr_off
= offset
;
3349 bfa_flash_write_send(flash
);
3351 return BFA_STATUS_OK
;
3355 * bfa_nw_flash_read_part - Read flash partition.
3357 * @flash: flash structure
3358 * @type: flash partition type
3359 * @instance: flash partition instance
3360 * @buf: read data buffer
3361 * @len: data buffer length
3362 * @offset: offset relative to the partition starting address
3363 * @cbfn: callback function
3364 * @cbarg: callback argument
3369 bfa_nw_flash_read_part(struct bfa_flash
*flash
, u32 type
, u8 instance
,
3370 void *buf
, u32 len
, u32 offset
,
3371 bfa_cb_flash cbfn
, void *cbarg
)
3373 if (!bfa_nw_ioc_is_operational(flash
->ioc
))
3374 return BFA_STATUS_IOC_NON_OP
;
3377 * 'len' must be in word (4-byte) boundary
3379 if (!len
|| (len
& 0x03))
3380 return BFA_STATUS_FLASH_BAD_LEN
;
3383 return BFA_STATUS_DEVBUSY
;
3387 flash
->cbarg
= cbarg
;
3389 flash
->instance
= instance
;
3390 flash
->residue
= len
;
3392 flash
->addr_off
= offset
;
3395 bfa_flash_read_send(flash
);
3397 return BFA_STATUS_OK
;