2 * Linux network driver for QLogic BR-series Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
15 * Copyright (c) 2014-2015 QLogic Corporation
24 /* IOC local definitions */
26 /* Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. */
28 #define bfa_ioc_firmware_lock(__ioc) \
29 ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
30 #define bfa_ioc_firmware_unlock(__ioc) \
31 ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
32 #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
33 #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
34 #define bfa_ioc_notify_fail(__ioc) \
35 ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
36 #define bfa_ioc_sync_start(__ioc) \
37 ((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
38 #define bfa_ioc_sync_join(__ioc) \
39 ((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
40 #define bfa_ioc_sync_leave(__ioc) \
41 ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
42 #define bfa_ioc_sync_ack(__ioc) \
43 ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
44 #define bfa_ioc_sync_complete(__ioc) \
45 ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
46 #define bfa_ioc_set_cur_ioc_fwstate(__ioc, __fwstate) \
47 ((__ioc)->ioc_hwif->ioc_set_fwstate(__ioc, __fwstate))
48 #define bfa_ioc_get_cur_ioc_fwstate(__ioc) \
49 ((__ioc)->ioc_hwif->ioc_get_fwstate(__ioc))
50 #define bfa_ioc_set_alt_ioc_fwstate(__ioc, __fwstate) \
51 ((__ioc)->ioc_hwif->ioc_set_alt_fwstate(__ioc, __fwstate))
53 static bool bfa_nw_auto_recover
= true;
56 * forward declarations
58 static void bfa_ioc_hw_sem_init(struct bfa_ioc
*ioc
);
59 static void bfa_ioc_hw_sem_get(struct bfa_ioc
*ioc
);
60 static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc
*ioc
);
61 static void bfa_ioc_hwinit(struct bfa_ioc
*ioc
, bool force
);
62 static void bfa_ioc_poll_fwinit(struct bfa_ioc
*ioc
);
63 static void bfa_ioc_send_enable(struct bfa_ioc
*ioc
);
64 static void bfa_ioc_send_disable(struct bfa_ioc
*ioc
);
65 static void bfa_ioc_send_getattr(struct bfa_ioc
*ioc
);
66 static void bfa_ioc_hb_monitor(struct bfa_ioc
*ioc
);
67 static void bfa_ioc_hb_stop(struct bfa_ioc
*ioc
);
68 static void bfa_ioc_reset(struct bfa_ioc
*ioc
, bool force
);
69 static void bfa_ioc_mbox_poll(struct bfa_ioc
*ioc
);
70 static void bfa_ioc_mbox_flush(struct bfa_ioc
*ioc
);
71 static void bfa_ioc_recover(struct bfa_ioc
*ioc
);
72 static void bfa_ioc_event_notify(struct bfa_ioc
*, enum bfa_ioc_event
);
73 static void bfa_ioc_disable_comp(struct bfa_ioc
*ioc
);
74 static void bfa_ioc_lpu_stop(struct bfa_ioc
*ioc
);
75 static void bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc
*ioc
);
76 static void bfa_ioc_fail_notify(struct bfa_ioc
*ioc
);
77 static void bfa_ioc_pf_enabled(struct bfa_ioc
*ioc
);
78 static void bfa_ioc_pf_disabled(struct bfa_ioc
*ioc
);
79 static void bfa_ioc_pf_failed(struct bfa_ioc
*ioc
);
80 static void bfa_ioc_pf_hwfailed(struct bfa_ioc
*ioc
);
81 static void bfa_ioc_pf_fwmismatch(struct bfa_ioc
*ioc
);
82 static enum bfa_status
bfa_ioc_boot(struct bfa_ioc
*ioc
,
83 enum bfi_fwboot_type boot_type
, u32 boot_param
);
84 static u32
bfa_ioc_smem_pgnum(struct bfa_ioc
*ioc
, u32 fmaddr
);
85 static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc
*ioc
,
87 static void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc
*ioc
,
89 static void bfa_ioc_get_pci_chip_rev(struct bfa_ioc
*ioc
,
91 static void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc
*ioc
,
93 static void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc
*ioc
,
95 static void bfa_ioc_get_adapter_model(struct bfa_ioc
*ioc
, char *model
);
96 static u64
bfa_ioc_get_pwwn(struct bfa_ioc
*ioc
);
98 /* IOC state machine definitions/declarations */
100 IOC_E_RESET
= 1, /*!< IOC reset request */
101 IOC_E_ENABLE
= 2, /*!< IOC enable request */
102 IOC_E_DISABLE
= 3, /*!< IOC disable request */
103 IOC_E_DETACH
= 4, /*!< driver detach cleanup */
104 IOC_E_ENABLED
= 5, /*!< f/w enabled */
105 IOC_E_FWRSP_GETATTR
= 6, /*!< IOC get attribute response */
106 IOC_E_DISABLED
= 7, /*!< f/w disabled */
107 IOC_E_PFFAILED
= 8, /*!< failure notice by iocpf sm */
108 IOC_E_HBFAIL
= 9, /*!< heartbeat failure */
109 IOC_E_HWERROR
= 10, /*!< hardware error interrupt */
110 IOC_E_TIMEOUT
= 11, /*!< timeout */
111 IOC_E_HWFAILED
= 12, /*!< PCI mapping failure notice */
114 bfa_fsm_state_decl(bfa_ioc
, uninit
, struct bfa_ioc
, enum ioc_event
);
115 bfa_fsm_state_decl(bfa_ioc
, reset
, struct bfa_ioc
, enum ioc_event
);
116 bfa_fsm_state_decl(bfa_ioc
, enabling
, struct bfa_ioc
, enum ioc_event
);
117 bfa_fsm_state_decl(bfa_ioc
, getattr
, struct bfa_ioc
, enum ioc_event
);
118 bfa_fsm_state_decl(bfa_ioc
, op
, struct bfa_ioc
, enum ioc_event
);
119 bfa_fsm_state_decl(bfa_ioc
, fail_retry
, struct bfa_ioc
, enum ioc_event
);
120 bfa_fsm_state_decl(bfa_ioc
, fail
, struct bfa_ioc
, enum ioc_event
);
121 bfa_fsm_state_decl(bfa_ioc
, disabling
, struct bfa_ioc
, enum ioc_event
);
122 bfa_fsm_state_decl(bfa_ioc
, disabled
, struct bfa_ioc
, enum ioc_event
);
123 bfa_fsm_state_decl(bfa_ioc
, hwfail
, struct bfa_ioc
, enum ioc_event
);
125 static struct bfa_sm_table ioc_sm_table
[] = {
126 {BFA_SM(bfa_ioc_sm_uninit
), BFA_IOC_UNINIT
},
127 {BFA_SM(bfa_ioc_sm_reset
), BFA_IOC_RESET
},
128 {BFA_SM(bfa_ioc_sm_enabling
), BFA_IOC_ENABLING
},
129 {BFA_SM(bfa_ioc_sm_getattr
), BFA_IOC_GETATTR
},
130 {BFA_SM(bfa_ioc_sm_op
), BFA_IOC_OPERATIONAL
},
131 {BFA_SM(bfa_ioc_sm_fail_retry
), BFA_IOC_INITFAIL
},
132 {BFA_SM(bfa_ioc_sm_fail
), BFA_IOC_FAIL
},
133 {BFA_SM(bfa_ioc_sm_disabling
), BFA_IOC_DISABLING
},
134 {BFA_SM(bfa_ioc_sm_disabled
), BFA_IOC_DISABLED
},
135 {BFA_SM(bfa_ioc_sm_hwfail
), BFA_IOC_HWFAIL
},
139 * Forward declareations for iocpf state machine
141 static void bfa_iocpf_enable(struct bfa_ioc
*ioc
);
142 static void bfa_iocpf_disable(struct bfa_ioc
*ioc
);
143 static void bfa_iocpf_fail(struct bfa_ioc
*ioc
);
144 static void bfa_iocpf_initfail(struct bfa_ioc
*ioc
);
145 static void bfa_iocpf_getattrfail(struct bfa_ioc
*ioc
);
146 static void bfa_iocpf_stop(struct bfa_ioc
*ioc
);
148 /* IOCPF state machine events */
150 IOCPF_E_ENABLE
= 1, /*!< IOCPF enable request */
151 IOCPF_E_DISABLE
= 2, /*!< IOCPF disable request */
152 IOCPF_E_STOP
= 3, /*!< stop on driver detach */
153 IOCPF_E_FWREADY
= 4, /*!< f/w initialization done */
154 IOCPF_E_FWRSP_ENABLE
= 5, /*!< enable f/w response */
155 IOCPF_E_FWRSP_DISABLE
= 6, /*!< disable f/w response */
156 IOCPF_E_FAIL
= 7, /*!< failure notice by ioc sm */
157 IOCPF_E_INITFAIL
= 8, /*!< init fail notice by ioc sm */
158 IOCPF_E_GETATTRFAIL
= 9, /*!< init fail notice by ioc sm */
159 IOCPF_E_SEMLOCKED
= 10, /*!< h/w semaphore is locked */
160 IOCPF_E_TIMEOUT
= 11, /*!< f/w response timeout */
161 IOCPF_E_SEM_ERROR
= 12, /*!< h/w sem mapping error */
165 enum bfa_iocpf_state
{
166 BFA_IOCPF_RESET
= 1, /*!< IOC is in reset state */
167 BFA_IOCPF_SEMWAIT
= 2, /*!< Waiting for IOC h/w semaphore */
168 BFA_IOCPF_HWINIT
= 3, /*!< IOC h/w is being initialized */
169 BFA_IOCPF_READY
= 4, /*!< IOCPF is initialized */
170 BFA_IOCPF_INITFAIL
= 5, /*!< IOCPF failed */
171 BFA_IOCPF_FAIL
= 6, /*!< IOCPF failed */
172 BFA_IOCPF_DISABLING
= 7, /*!< IOCPF is being disabled */
173 BFA_IOCPF_DISABLED
= 8, /*!< IOCPF is disabled */
174 BFA_IOCPF_FWMISMATCH
= 9, /*!< IOC f/w different from drivers */
177 bfa_fsm_state_decl(bfa_iocpf
, reset
, struct bfa_iocpf
, enum iocpf_event
);
178 bfa_fsm_state_decl(bfa_iocpf
, fwcheck
, struct bfa_iocpf
, enum iocpf_event
);
179 bfa_fsm_state_decl(bfa_iocpf
, mismatch
, struct bfa_iocpf
, enum iocpf_event
);
180 bfa_fsm_state_decl(bfa_iocpf
, semwait
, struct bfa_iocpf
, enum iocpf_event
);
181 bfa_fsm_state_decl(bfa_iocpf
, hwinit
, struct bfa_iocpf
, enum iocpf_event
);
182 bfa_fsm_state_decl(bfa_iocpf
, enabling
, struct bfa_iocpf
, enum iocpf_event
);
183 bfa_fsm_state_decl(bfa_iocpf
, ready
, struct bfa_iocpf
, enum iocpf_event
);
184 bfa_fsm_state_decl(bfa_iocpf
, initfail_sync
, struct bfa_iocpf
,
186 bfa_fsm_state_decl(bfa_iocpf
, initfail
, struct bfa_iocpf
, enum iocpf_event
);
187 bfa_fsm_state_decl(bfa_iocpf
, fail_sync
, struct bfa_iocpf
, enum iocpf_event
);
188 bfa_fsm_state_decl(bfa_iocpf
, fail
, struct bfa_iocpf
, enum iocpf_event
);
189 bfa_fsm_state_decl(bfa_iocpf
, disabling
, struct bfa_iocpf
, enum iocpf_event
);
190 bfa_fsm_state_decl(bfa_iocpf
, disabling_sync
, struct bfa_iocpf
,
192 bfa_fsm_state_decl(bfa_iocpf
, disabled
, struct bfa_iocpf
, enum iocpf_event
);
194 static struct bfa_sm_table iocpf_sm_table
[] = {
195 {BFA_SM(bfa_iocpf_sm_reset
), BFA_IOCPF_RESET
},
196 {BFA_SM(bfa_iocpf_sm_fwcheck
), BFA_IOCPF_FWMISMATCH
},
197 {BFA_SM(bfa_iocpf_sm_mismatch
), BFA_IOCPF_FWMISMATCH
},
198 {BFA_SM(bfa_iocpf_sm_semwait
), BFA_IOCPF_SEMWAIT
},
199 {BFA_SM(bfa_iocpf_sm_hwinit
), BFA_IOCPF_HWINIT
},
200 {BFA_SM(bfa_iocpf_sm_enabling
), BFA_IOCPF_HWINIT
},
201 {BFA_SM(bfa_iocpf_sm_ready
), BFA_IOCPF_READY
},
202 {BFA_SM(bfa_iocpf_sm_initfail_sync
), BFA_IOCPF_INITFAIL
},
203 {BFA_SM(bfa_iocpf_sm_initfail
), BFA_IOCPF_INITFAIL
},
204 {BFA_SM(bfa_iocpf_sm_fail_sync
), BFA_IOCPF_FAIL
},
205 {BFA_SM(bfa_iocpf_sm_fail
), BFA_IOCPF_FAIL
},
206 {BFA_SM(bfa_iocpf_sm_disabling
), BFA_IOCPF_DISABLING
},
207 {BFA_SM(bfa_iocpf_sm_disabling_sync
), BFA_IOCPF_DISABLING
},
208 {BFA_SM(bfa_iocpf_sm_disabled
), BFA_IOCPF_DISABLED
},
211 /* IOC State Machine */
213 /* Beginning state. IOC uninit state. */
215 bfa_ioc_sm_uninit_entry(struct bfa_ioc
*ioc
)
219 /* IOC is in uninit state. */
221 bfa_ioc_sm_uninit(struct bfa_ioc
*ioc
, enum ioc_event event
)
225 bfa_fsm_set_state(ioc
, bfa_ioc_sm_reset
);
233 /* Reset entry actions -- initialize state machine */
235 bfa_ioc_sm_reset_entry(struct bfa_ioc
*ioc
)
237 bfa_fsm_set_state(&ioc
->iocpf
, bfa_iocpf_sm_reset
);
240 /* IOC is in reset state. */
242 bfa_ioc_sm_reset(struct bfa_ioc
*ioc
, enum ioc_event event
)
246 bfa_fsm_set_state(ioc
, bfa_ioc_sm_enabling
);
250 bfa_ioc_disable_comp(ioc
);
254 bfa_fsm_set_state(ioc
, bfa_ioc_sm_uninit
);
263 bfa_ioc_sm_enabling_entry(struct bfa_ioc
*ioc
)
265 bfa_iocpf_enable(ioc
);
268 /* Host IOC function is being enabled, awaiting response from firmware.
269 * Semaphore is acquired.
272 bfa_ioc_sm_enabling(struct bfa_ioc
*ioc
, enum ioc_event event
)
276 bfa_fsm_set_state(ioc
, bfa_ioc_sm_getattr
);
280 /* !!! fall through !!! */
282 ioc
->cbfn
->enable_cbfn(ioc
->bfa
, BFA_STATUS_IOC_FAILURE
);
283 bfa_fsm_set_state(ioc
, bfa_ioc_sm_fail
);
284 if (event
!= IOC_E_PFFAILED
)
285 bfa_iocpf_initfail(ioc
);
289 ioc
->cbfn
->enable_cbfn(ioc
->bfa
, BFA_STATUS_IOC_FAILURE
);
290 bfa_fsm_set_state(ioc
, bfa_ioc_sm_hwfail
);
294 bfa_fsm_set_state(ioc
, bfa_ioc_sm_disabling
);
298 bfa_fsm_set_state(ioc
, bfa_ioc_sm_uninit
);
310 /* Semaphore should be acquired for version check. */
312 bfa_ioc_sm_getattr_entry(struct bfa_ioc
*ioc
)
314 mod_timer(&ioc
->ioc_timer
, jiffies
+
315 msecs_to_jiffies(BFA_IOC_TOV
));
316 bfa_ioc_send_getattr(ioc
);
319 /* IOC configuration in progress. Timer is active. */
321 bfa_ioc_sm_getattr(struct bfa_ioc
*ioc
, enum ioc_event event
)
324 case IOC_E_FWRSP_GETATTR
:
325 del_timer(&ioc
->ioc_timer
);
326 bfa_fsm_set_state(ioc
, bfa_ioc_sm_op
);
331 del_timer(&ioc
->ioc_timer
);
334 ioc
->cbfn
->enable_cbfn(ioc
->bfa
, BFA_STATUS_IOC_FAILURE
);
335 bfa_fsm_set_state(ioc
, bfa_ioc_sm_fail
);
336 if (event
!= IOC_E_PFFAILED
)
337 bfa_iocpf_getattrfail(ioc
);
341 del_timer(&ioc
->ioc_timer
);
342 bfa_fsm_set_state(ioc
, bfa_ioc_sm_disabling
);
354 bfa_ioc_sm_op_entry(struct bfa_ioc
*ioc
)
356 ioc
->cbfn
->enable_cbfn(ioc
->bfa
, BFA_STATUS_OK
);
357 bfa_ioc_event_notify(ioc
, BFA_IOC_E_ENABLED
);
358 bfa_ioc_hb_monitor(ioc
);
362 bfa_ioc_sm_op(struct bfa_ioc
*ioc
, enum ioc_event event
)
369 bfa_ioc_hb_stop(ioc
);
370 bfa_fsm_set_state(ioc
, bfa_ioc_sm_disabling
);
375 bfa_ioc_hb_stop(ioc
);
376 /* !!! fall through !!! */
378 if (ioc
->iocpf
.auto_recover
)
379 bfa_fsm_set_state(ioc
, bfa_ioc_sm_fail_retry
);
381 bfa_fsm_set_state(ioc
, bfa_ioc_sm_fail
);
383 bfa_ioc_fail_notify(ioc
);
385 if (event
!= IOC_E_PFFAILED
)
395 bfa_ioc_sm_disabling_entry(struct bfa_ioc
*ioc
)
397 bfa_iocpf_disable(ioc
);
400 /* IOC is being disabled */
402 bfa_ioc_sm_disabling(struct bfa_ioc
*ioc
, enum ioc_event event
)
406 bfa_fsm_set_state(ioc
, bfa_ioc_sm_disabled
);
411 * No state change. Will move to disabled state
412 * after iocpf sm completes failure processing and
413 * moves to disabled state.
419 bfa_fsm_set_state(ioc
, bfa_ioc_sm_hwfail
);
420 bfa_ioc_disable_comp(ioc
);
428 /* IOC disable completion entry. */
430 bfa_ioc_sm_disabled_entry(struct bfa_ioc
*ioc
)
432 bfa_ioc_disable_comp(ioc
);
436 bfa_ioc_sm_disabled(struct bfa_ioc
*ioc
, enum ioc_event event
)
440 bfa_fsm_set_state(ioc
, bfa_ioc_sm_enabling
);
444 ioc
->cbfn
->disable_cbfn(ioc
->bfa
);
448 bfa_fsm_set_state(ioc
, bfa_ioc_sm_uninit
);
458 bfa_ioc_sm_fail_retry_entry(struct bfa_ioc
*ioc
)
462 /* Hardware initialization retry. */
464 bfa_ioc_sm_fail_retry(struct bfa_ioc
*ioc
, enum ioc_event event
)
468 bfa_fsm_set_state(ioc
, bfa_ioc_sm_getattr
);
474 * Initialization retry failed.
476 ioc
->cbfn
->enable_cbfn(ioc
->bfa
, BFA_STATUS_IOC_FAILURE
);
477 bfa_fsm_set_state(ioc
, bfa_ioc_sm_fail
);
478 if (event
!= IOC_E_PFFAILED
)
479 bfa_iocpf_initfail(ioc
);
483 ioc
->cbfn
->enable_cbfn(ioc
->bfa
, BFA_STATUS_IOC_FAILURE
);
484 bfa_fsm_set_state(ioc
, bfa_ioc_sm_hwfail
);
491 bfa_fsm_set_state(ioc
, bfa_ioc_sm_disabling
);
495 bfa_fsm_set_state(ioc
, bfa_ioc_sm_uninit
);
505 bfa_ioc_sm_fail_entry(struct bfa_ioc
*ioc
)
511 bfa_ioc_sm_fail(struct bfa_ioc
*ioc
, enum ioc_event event
)
515 ioc
->cbfn
->enable_cbfn(ioc
->bfa
, BFA_STATUS_IOC_FAILURE
);
519 bfa_fsm_set_state(ioc
, bfa_ioc_sm_disabling
);
523 bfa_fsm_set_state(ioc
, bfa_ioc_sm_uninit
);
528 /* HB failure notification, ignore. */
537 bfa_ioc_sm_hwfail_entry(struct bfa_ioc
*ioc
)
543 bfa_ioc_sm_hwfail(struct bfa_ioc
*ioc
, enum ioc_event event
)
548 ioc
->cbfn
->enable_cbfn(ioc
->bfa
, BFA_STATUS_IOC_FAILURE
);
552 ioc
->cbfn
->disable_cbfn(ioc
->bfa
);
556 bfa_fsm_set_state(ioc
, bfa_ioc_sm_uninit
);
564 /* IOCPF State Machine */
566 /* Reset entry actions -- initialize state machine */
568 bfa_iocpf_sm_reset_entry(struct bfa_iocpf
*iocpf
)
570 iocpf
->fw_mismatch_notified
= false;
571 iocpf
->auto_recover
= bfa_nw_auto_recover
;
574 /* Beginning state. IOC is in reset state. */
576 bfa_iocpf_sm_reset(struct bfa_iocpf
*iocpf
, enum iocpf_event event
)
580 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_fwcheck
);
591 /* Semaphore should be acquired for version check. */
593 bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf
*iocpf
)
595 bfa_ioc_hw_sem_init(iocpf
->ioc
);
596 bfa_ioc_hw_sem_get(iocpf
->ioc
);
599 /* Awaiting h/w semaphore to continue with version check. */
601 bfa_iocpf_sm_fwcheck(struct bfa_iocpf
*iocpf
, enum iocpf_event event
)
603 struct bfa_ioc
*ioc
= iocpf
->ioc
;
606 case IOCPF_E_SEMLOCKED
:
607 if (bfa_ioc_firmware_lock(ioc
)) {
608 if (bfa_ioc_sync_start(ioc
)) {
609 bfa_ioc_sync_join(ioc
);
610 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_hwinit
);
612 bfa_ioc_firmware_unlock(ioc
);
613 bfa_nw_ioc_hw_sem_release(ioc
);
614 mod_timer(&ioc
->sem_timer
, jiffies
+
615 msecs_to_jiffies(BFA_IOC_HWSEM_TOV
));
618 bfa_nw_ioc_hw_sem_release(ioc
);
619 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_mismatch
);
623 case IOCPF_E_SEM_ERROR
:
624 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_fail
);
625 bfa_ioc_pf_hwfailed(ioc
);
628 case IOCPF_E_DISABLE
:
629 bfa_ioc_hw_sem_get_cancel(ioc
);
630 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_reset
);
631 bfa_ioc_pf_disabled(ioc
);
635 bfa_ioc_hw_sem_get_cancel(ioc
);
636 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_reset
);
644 /* Notify enable completion callback */
646 bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf
*iocpf
)
648 /* Call only the first time sm enters fwmismatch state. */
649 if (!iocpf
->fw_mismatch_notified
)
650 bfa_ioc_pf_fwmismatch(iocpf
->ioc
);
652 iocpf
->fw_mismatch_notified
= true;
653 mod_timer(&(iocpf
->ioc
)->iocpf_timer
, jiffies
+
654 msecs_to_jiffies(BFA_IOC_TOV
));
657 /* Awaiting firmware version match. */
659 bfa_iocpf_sm_mismatch(struct bfa_iocpf
*iocpf
, enum iocpf_event event
)
661 struct bfa_ioc
*ioc
= iocpf
->ioc
;
664 case IOCPF_E_TIMEOUT
:
665 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_fwcheck
);
668 case IOCPF_E_DISABLE
:
669 del_timer(&ioc
->iocpf_timer
);
670 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_reset
);
671 bfa_ioc_pf_disabled(ioc
);
675 del_timer(&ioc
->iocpf_timer
);
676 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_reset
);
684 /* Request for semaphore. */
686 bfa_iocpf_sm_semwait_entry(struct bfa_iocpf
*iocpf
)
688 bfa_ioc_hw_sem_get(iocpf
->ioc
);
691 /* Awaiting semaphore for h/w initialzation. */
693 bfa_iocpf_sm_semwait(struct bfa_iocpf
*iocpf
, enum iocpf_event event
)
695 struct bfa_ioc
*ioc
= iocpf
->ioc
;
698 case IOCPF_E_SEMLOCKED
:
699 if (bfa_ioc_sync_complete(ioc
)) {
700 bfa_ioc_sync_join(ioc
);
701 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_hwinit
);
703 bfa_nw_ioc_hw_sem_release(ioc
);
704 mod_timer(&ioc
->sem_timer
, jiffies
+
705 msecs_to_jiffies(BFA_IOC_HWSEM_TOV
));
709 case IOCPF_E_SEM_ERROR
:
710 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_fail
);
711 bfa_ioc_pf_hwfailed(ioc
);
714 case IOCPF_E_DISABLE
:
715 bfa_ioc_hw_sem_get_cancel(ioc
);
716 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_disabling_sync
);
725 bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf
*iocpf
)
727 iocpf
->poll_time
= 0;
728 bfa_ioc_reset(iocpf
->ioc
, false);
731 /* Hardware is being initialized. Interrupts are enabled.
732 * Holding hardware semaphore lock.
735 bfa_iocpf_sm_hwinit(struct bfa_iocpf
*iocpf
, enum iocpf_event event
)
737 struct bfa_ioc
*ioc
= iocpf
->ioc
;
740 case IOCPF_E_FWREADY
:
741 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_enabling
);
744 case IOCPF_E_TIMEOUT
:
745 bfa_nw_ioc_hw_sem_release(ioc
);
746 bfa_ioc_pf_failed(ioc
);
747 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_initfail_sync
);
750 case IOCPF_E_DISABLE
:
751 del_timer(&ioc
->iocpf_timer
);
752 bfa_ioc_sync_leave(ioc
);
753 bfa_nw_ioc_hw_sem_release(ioc
);
754 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_disabled
);
763 bfa_iocpf_sm_enabling_entry(struct bfa_iocpf
*iocpf
)
765 mod_timer(&(iocpf
->ioc
)->iocpf_timer
, jiffies
+
766 msecs_to_jiffies(BFA_IOC_TOV
));
768 * Enable Interrupts before sending fw IOC ENABLE cmd.
770 iocpf
->ioc
->cbfn
->reset_cbfn(iocpf
->ioc
->bfa
);
771 bfa_ioc_send_enable(iocpf
->ioc
);
774 /* Host IOC function is being enabled, awaiting response from firmware.
775 * Semaphore is acquired.
778 bfa_iocpf_sm_enabling(struct bfa_iocpf
*iocpf
, enum iocpf_event event
)
780 struct bfa_ioc
*ioc
= iocpf
->ioc
;
783 case IOCPF_E_FWRSP_ENABLE
:
784 del_timer(&ioc
->iocpf_timer
);
785 bfa_nw_ioc_hw_sem_release(ioc
);
786 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_ready
);
789 case IOCPF_E_INITFAIL
:
790 del_timer(&ioc
->iocpf_timer
);
792 * !!! fall through !!!
794 case IOCPF_E_TIMEOUT
:
795 bfa_nw_ioc_hw_sem_release(ioc
);
796 if (event
== IOCPF_E_TIMEOUT
)
797 bfa_ioc_pf_failed(ioc
);
798 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_initfail_sync
);
801 case IOCPF_E_DISABLE
:
802 del_timer(&ioc
->iocpf_timer
);
803 bfa_nw_ioc_hw_sem_release(ioc
);
804 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_disabling
);
813 bfa_iocpf_sm_ready_entry(struct bfa_iocpf
*iocpf
)
815 bfa_ioc_pf_enabled(iocpf
->ioc
);
819 bfa_iocpf_sm_ready(struct bfa_iocpf
*iocpf
, enum iocpf_event event
)
822 case IOCPF_E_DISABLE
:
823 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_disabling
);
826 case IOCPF_E_GETATTRFAIL
:
827 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_initfail_sync
);
831 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_fail_sync
);
840 bfa_iocpf_sm_disabling_entry(struct bfa_iocpf
*iocpf
)
842 mod_timer(&(iocpf
->ioc
)->iocpf_timer
, jiffies
+
843 msecs_to_jiffies(BFA_IOC_TOV
));
844 bfa_ioc_send_disable(iocpf
->ioc
);
847 /* IOC is being disabled */
849 bfa_iocpf_sm_disabling(struct bfa_iocpf
*iocpf
, enum iocpf_event event
)
851 struct bfa_ioc
*ioc
= iocpf
->ioc
;
854 case IOCPF_E_FWRSP_DISABLE
:
855 del_timer(&ioc
->iocpf_timer
);
856 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_disabling_sync
);
860 del_timer(&ioc
->iocpf_timer
);
862 * !!! fall through !!!
865 case IOCPF_E_TIMEOUT
:
866 bfa_ioc_set_cur_ioc_fwstate(ioc
, BFI_IOC_FAIL
);
867 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_disabling_sync
);
870 case IOCPF_E_FWRSP_ENABLE
:
879 bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf
*iocpf
)
881 bfa_ioc_hw_sem_get(iocpf
->ioc
);
884 /* IOC hb ack request is being removed. */
886 bfa_iocpf_sm_disabling_sync(struct bfa_iocpf
*iocpf
, enum iocpf_event event
)
888 struct bfa_ioc
*ioc
= iocpf
->ioc
;
891 case IOCPF_E_SEMLOCKED
:
892 bfa_ioc_sync_leave(ioc
);
893 bfa_nw_ioc_hw_sem_release(ioc
);
894 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_disabled
);
897 case IOCPF_E_SEM_ERROR
:
898 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_fail
);
899 bfa_ioc_pf_hwfailed(ioc
);
910 /* IOC disable completion entry. */
912 bfa_iocpf_sm_disabled_entry(struct bfa_iocpf
*iocpf
)
914 bfa_ioc_mbox_flush(iocpf
->ioc
);
915 bfa_ioc_pf_disabled(iocpf
->ioc
);
919 bfa_iocpf_sm_disabled(struct bfa_iocpf
*iocpf
, enum iocpf_event event
)
921 struct bfa_ioc
*ioc
= iocpf
->ioc
;
925 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_semwait
);
929 bfa_ioc_firmware_unlock(ioc
);
930 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_reset
);
939 bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf
*iocpf
)
941 bfa_nw_ioc_debug_save_ftrc(iocpf
->ioc
);
942 bfa_ioc_hw_sem_get(iocpf
->ioc
);
945 /* Hardware initialization failed. */
947 bfa_iocpf_sm_initfail_sync(struct bfa_iocpf
*iocpf
, enum iocpf_event event
)
949 struct bfa_ioc
*ioc
= iocpf
->ioc
;
952 case IOCPF_E_SEMLOCKED
:
953 bfa_ioc_notify_fail(ioc
);
954 bfa_ioc_sync_leave(ioc
);
955 bfa_ioc_set_cur_ioc_fwstate(ioc
, BFI_IOC_FAIL
);
956 bfa_nw_ioc_hw_sem_release(ioc
);
957 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_initfail
);
960 case IOCPF_E_SEM_ERROR
:
961 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_fail
);
962 bfa_ioc_pf_hwfailed(ioc
);
965 case IOCPF_E_DISABLE
:
966 bfa_ioc_hw_sem_get_cancel(ioc
);
967 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_disabling_sync
);
971 bfa_ioc_hw_sem_get_cancel(ioc
);
972 bfa_ioc_firmware_unlock(ioc
);
973 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_reset
);
985 bfa_iocpf_sm_initfail_entry(struct bfa_iocpf
*iocpf
)
989 /* Hardware initialization failed. */
991 bfa_iocpf_sm_initfail(struct bfa_iocpf
*iocpf
, enum iocpf_event event
)
993 struct bfa_ioc
*ioc
= iocpf
->ioc
;
996 case IOCPF_E_DISABLE
:
997 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_disabled
);
1001 bfa_ioc_firmware_unlock(ioc
);
1002 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_reset
);
1006 bfa_sm_fault(event
);
1011 bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf
*iocpf
)
1014 * Mark IOC as failed in hardware and stop firmware.
1016 bfa_ioc_lpu_stop(iocpf
->ioc
);
1019 * Flush any queued up mailbox requests.
1021 bfa_ioc_mbox_flush(iocpf
->ioc
);
1022 bfa_ioc_hw_sem_get(iocpf
->ioc
);
1025 /* IOC is in failed state. */
1027 bfa_iocpf_sm_fail_sync(struct bfa_iocpf
*iocpf
, enum iocpf_event event
)
1029 struct bfa_ioc
*ioc
= iocpf
->ioc
;
1032 case IOCPF_E_SEMLOCKED
:
1033 bfa_ioc_sync_ack(ioc
);
1034 bfa_ioc_notify_fail(ioc
);
1035 if (!iocpf
->auto_recover
) {
1036 bfa_ioc_sync_leave(ioc
);
1037 bfa_ioc_set_cur_ioc_fwstate(ioc
, BFI_IOC_FAIL
);
1038 bfa_nw_ioc_hw_sem_release(ioc
);
1039 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_fail
);
1041 if (bfa_ioc_sync_complete(ioc
))
1042 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_hwinit
);
1044 bfa_nw_ioc_hw_sem_release(ioc
);
1045 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_semwait
);
1050 case IOCPF_E_SEM_ERROR
:
1051 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_fail
);
1052 bfa_ioc_pf_hwfailed(ioc
);
1055 case IOCPF_E_DISABLE
:
1056 bfa_ioc_hw_sem_get_cancel(ioc
);
1057 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_disabling_sync
);
1064 bfa_sm_fault(event
);
1069 bfa_iocpf_sm_fail_entry(struct bfa_iocpf
*iocpf
)
1073 /* IOC is in failed state. */
1075 bfa_iocpf_sm_fail(struct bfa_iocpf
*iocpf
, enum iocpf_event event
)
1078 case IOCPF_E_DISABLE
:
1079 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_disabled
);
1083 bfa_sm_fault(event
);
1087 /* BFA IOC private functions */
1089 /* Notify common modules registered for notification. */
1091 bfa_ioc_event_notify(struct bfa_ioc
*ioc
, enum bfa_ioc_event event
)
1093 struct bfa_ioc_notify
*notify
;
1095 list_for_each_entry(notify
, &ioc
->notify_q
, qe
)
1096 notify
->cbfn(notify
->cbarg
, event
);
1100 bfa_ioc_disable_comp(struct bfa_ioc
*ioc
)
1102 ioc
->cbfn
->disable_cbfn(ioc
->bfa
);
1103 bfa_ioc_event_notify(ioc
, BFA_IOC_E_DISABLED
);
1107 bfa_nw_ioc_sem_get(void __iomem
*sem_reg
)
1111 #define BFA_SEM_SPINCNT 3000
1113 r32
= readl(sem_reg
);
1115 while ((r32
& 1) && (cnt
< BFA_SEM_SPINCNT
)) {
1118 r32
= readl(sem_reg
);
1128 bfa_nw_ioc_sem_release(void __iomem
*sem_reg
)
1134 /* Clear fwver hdr */
1136 bfa_ioc_fwver_clear(struct bfa_ioc
*ioc
)
1138 u32 pgnum
, pgoff
, loff
= 0;
1141 pgnum
= PSS_SMEM_PGNUM(ioc
->ioc_regs
.smem_pg0
, loff
);
1142 pgoff
= PSS_SMEM_PGOFF(loff
);
1143 writel(pgnum
, ioc
->ioc_regs
.host_page_num_fn
);
1145 for (i
= 0; i
< (sizeof(struct bfi_ioc_image_hdr
) / sizeof(u32
)); i
++) {
1146 writel(0, ioc
->ioc_regs
.smem_page_start
+ loff
);
1147 loff
+= sizeof(u32
);
1153 bfa_ioc_hw_sem_init(struct bfa_ioc
*ioc
)
1155 struct bfi_ioc_image_hdr fwhdr
;
1158 /* Spin on init semaphore to serialize. */
1159 r32
= readl(ioc
->ioc_regs
.ioc_init_sem_reg
);
1162 r32
= readl(ioc
->ioc_regs
.ioc_init_sem_reg
);
1165 fwstate
= bfa_ioc_get_cur_ioc_fwstate(ioc
);
1166 if (fwstate
== BFI_IOC_UNINIT
) {
1167 writel(1, ioc
->ioc_regs
.ioc_init_sem_reg
);
1171 bfa_nw_ioc_fwver_get(ioc
, &fwhdr
);
1173 if (swab32(fwhdr
.exec
) == BFI_FWBOOT_TYPE_NORMAL
) {
1174 writel(1, ioc
->ioc_regs
.ioc_init_sem_reg
);
1178 bfa_ioc_fwver_clear(ioc
);
1179 bfa_ioc_set_cur_ioc_fwstate(ioc
, BFI_IOC_UNINIT
);
1180 bfa_ioc_set_alt_ioc_fwstate(ioc
, BFI_IOC_UNINIT
);
1183 * Try to lock and then unlock the semaphore.
1185 readl(ioc
->ioc_regs
.ioc_sem_reg
);
1186 writel(1, ioc
->ioc_regs
.ioc_sem_reg
);
1188 /* Unlock init semaphore */
1189 writel(1, ioc
->ioc_regs
.ioc_init_sem_reg
);
1193 bfa_ioc_hw_sem_get(struct bfa_ioc
*ioc
)
1198 * First read to the semaphore register will return 0, subsequent reads
1199 * will return 1. Semaphore is released by writing 1 to the register
1201 r32
= readl(ioc
->ioc_regs
.ioc_sem_reg
);
1203 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_SEM_ERROR
);
1207 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_SEMLOCKED
);
1211 mod_timer(&ioc
->sem_timer
, jiffies
+
1212 msecs_to_jiffies(BFA_IOC_HWSEM_TOV
));
1216 bfa_nw_ioc_hw_sem_release(struct bfa_ioc
*ioc
)
1218 writel(1, ioc
->ioc_regs
.ioc_sem_reg
);
1222 bfa_ioc_hw_sem_get_cancel(struct bfa_ioc
*ioc
)
1224 del_timer(&ioc
->sem_timer
);
1227 /* Initialize LPU local memory (aka secondary memory / SRAM) */
1229 bfa_ioc_lmem_init(struct bfa_ioc
*ioc
)
1233 #define PSS_LMEM_INIT_TIME 10000
1235 pss_ctl
= readl(ioc
->ioc_regs
.pss_ctl_reg
);
1236 pss_ctl
&= ~__PSS_LMEM_RESET
;
1237 pss_ctl
|= __PSS_LMEM_INIT_EN
;
1240 * i2c workaround 12.5khz clock
1242 pss_ctl
|= __PSS_I2C_CLK_DIV(3UL);
1243 writel(pss_ctl
, ioc
->ioc_regs
.pss_ctl_reg
);
1246 * wait for memory initialization to be complete
1250 pss_ctl
= readl(ioc
->ioc_regs
.pss_ctl_reg
);
1252 } while (!(pss_ctl
& __PSS_LMEM_INIT_DONE
) && (i
< PSS_LMEM_INIT_TIME
));
1255 * If memory initialization is not successful, IOC timeout will catch
1258 BUG_ON(!(pss_ctl
& __PSS_LMEM_INIT_DONE
));
1260 pss_ctl
&= ~(__PSS_LMEM_INIT_DONE
| __PSS_LMEM_INIT_EN
);
1261 writel(pss_ctl
, ioc
->ioc_regs
.pss_ctl_reg
);
1265 bfa_ioc_lpu_start(struct bfa_ioc
*ioc
)
1270 * Take processor out of reset.
1272 pss_ctl
= readl(ioc
->ioc_regs
.pss_ctl_reg
);
1273 pss_ctl
&= ~__PSS_LPU0_RESET
;
1275 writel(pss_ctl
, ioc
->ioc_regs
.pss_ctl_reg
);
1279 bfa_ioc_lpu_stop(struct bfa_ioc
*ioc
)
1284 * Put processors in reset.
1286 pss_ctl
= readl(ioc
->ioc_regs
.pss_ctl_reg
);
1287 pss_ctl
|= (__PSS_LPU0_RESET
| __PSS_LPU1_RESET
);
1289 writel(pss_ctl
, ioc
->ioc_regs
.pss_ctl_reg
);
1292 /* Get driver and firmware versions. */
1294 bfa_nw_ioc_fwver_get(struct bfa_ioc
*ioc
, struct bfi_ioc_image_hdr
*fwhdr
)
1299 u32
*fwsig
= (u32
*) fwhdr
;
1301 pgnum
= bfa_ioc_smem_pgnum(ioc
, loff
);
1302 writel(pgnum
, ioc
->ioc_regs
.host_page_num_fn
);
1304 for (i
= 0; i
< (sizeof(struct bfi_ioc_image_hdr
) / sizeof(u32
));
1307 swab32(readl(loff
+ ioc
->ioc_regs
.smem_page_start
));
1308 loff
+= sizeof(u32
);
1313 bfa_ioc_fwver_md5_check(struct bfi_ioc_image_hdr
*fwhdr_1
,
1314 struct bfi_ioc_image_hdr
*fwhdr_2
)
1318 for (i
= 0; i
< BFI_IOC_MD5SUM_SZ
; i
++) {
1319 if (fwhdr_1
->md5sum
[i
] != fwhdr_2
->md5sum
[i
])
1326 /* Returns TRUE if major minor and maintenance are same.
1327 * If patch version are same, check for MD5 Checksum to be same.
1330 bfa_ioc_fw_ver_compatible(struct bfi_ioc_image_hdr
*drv_fwhdr
,
1331 struct bfi_ioc_image_hdr
*fwhdr_to_cmp
)
1333 if (drv_fwhdr
->signature
!= fwhdr_to_cmp
->signature
)
1335 if (drv_fwhdr
->fwver
.major
!= fwhdr_to_cmp
->fwver
.major
)
1337 if (drv_fwhdr
->fwver
.minor
!= fwhdr_to_cmp
->fwver
.minor
)
1339 if (drv_fwhdr
->fwver
.maint
!= fwhdr_to_cmp
->fwver
.maint
)
1341 if (drv_fwhdr
->fwver
.patch
== fwhdr_to_cmp
->fwver
.patch
&&
1342 drv_fwhdr
->fwver
.phase
== fwhdr_to_cmp
->fwver
.phase
&&
1343 drv_fwhdr
->fwver
.build
== fwhdr_to_cmp
->fwver
.build
)
1344 return bfa_ioc_fwver_md5_check(drv_fwhdr
, fwhdr_to_cmp
);
1350 bfa_ioc_flash_fwver_valid(struct bfi_ioc_image_hdr
*flash_fwhdr
)
1352 if (flash_fwhdr
->fwver
.major
== 0 || flash_fwhdr
->fwver
.major
== 0xFF)
1359 fwhdr_is_ga(struct bfi_ioc_image_hdr
*fwhdr
)
1361 if (fwhdr
->fwver
.phase
== 0 &&
1362 fwhdr
->fwver
.build
== 0)
1368 /* Returns TRUE if both are compatible and patch of fwhdr_to_cmp is better. */
1369 static enum bfi_ioc_img_ver_cmp
1370 bfa_ioc_fw_ver_patch_cmp(struct bfi_ioc_image_hdr
*base_fwhdr
,
1371 struct bfi_ioc_image_hdr
*fwhdr_to_cmp
)
1373 if (!bfa_ioc_fw_ver_compatible(base_fwhdr
, fwhdr_to_cmp
))
1374 return BFI_IOC_IMG_VER_INCOMP
;
1376 if (fwhdr_to_cmp
->fwver
.patch
> base_fwhdr
->fwver
.patch
)
1377 return BFI_IOC_IMG_VER_BETTER
;
1378 else if (fwhdr_to_cmp
->fwver
.patch
< base_fwhdr
->fwver
.patch
)
1379 return BFI_IOC_IMG_VER_OLD
;
1381 /* GA takes priority over internal builds of the same patch stream.
1382 * At this point major minor maint and patch numbers are same.
1384 if (fwhdr_is_ga(base_fwhdr
))
1385 if (fwhdr_is_ga(fwhdr_to_cmp
))
1386 return BFI_IOC_IMG_VER_SAME
;
1388 return BFI_IOC_IMG_VER_OLD
;
1390 if (fwhdr_is_ga(fwhdr_to_cmp
))
1391 return BFI_IOC_IMG_VER_BETTER
;
1393 if (fwhdr_to_cmp
->fwver
.phase
> base_fwhdr
->fwver
.phase
)
1394 return BFI_IOC_IMG_VER_BETTER
;
1395 else if (fwhdr_to_cmp
->fwver
.phase
< base_fwhdr
->fwver
.phase
)
1396 return BFI_IOC_IMG_VER_OLD
;
1398 if (fwhdr_to_cmp
->fwver
.build
> base_fwhdr
->fwver
.build
)
1399 return BFI_IOC_IMG_VER_BETTER
;
1400 else if (fwhdr_to_cmp
->fwver
.build
< base_fwhdr
->fwver
.build
)
1401 return BFI_IOC_IMG_VER_OLD
;
1403 /* All Version Numbers are equal.
1404 * Md5 check to be done as a part of compatibility check.
1406 return BFI_IOC_IMG_VER_SAME
;
1409 /* register definitions */
1410 #define FLI_CMD_REG 0x0001d000
1411 #define FLI_WRDATA_REG 0x0001d00c
1412 #define FLI_RDDATA_REG 0x0001d010
1413 #define FLI_ADDR_REG 0x0001d004
1414 #define FLI_DEV_STATUS_REG 0x0001d014
1416 #define BFA_FLASH_FIFO_SIZE 128 /* fifo size */
1417 #define BFA_FLASH_CHECK_MAX 10000 /* max # of status check */
1418 #define BFA_FLASH_BLOCKING_OP_MAX 1000000 /* max # of blocking op check */
1419 #define BFA_FLASH_WIP_MASK 0x01 /* write in progress bit mask */
1421 #define NFC_STATE_RUNNING 0x20000001
1422 #define NFC_STATE_PAUSED 0x00004560
1423 #define NFC_VER_VALID 0x147
1425 enum bfa_flash_cmd
{
1426 BFA_FLASH_FAST_READ
= 0x0b, /* fast read */
1427 BFA_FLASH_WRITE_ENABLE
= 0x06, /* write enable */
1428 BFA_FLASH_SECTOR_ERASE
= 0xd8, /* sector erase */
1429 BFA_FLASH_WRITE
= 0x02, /* write */
1430 BFA_FLASH_READ_STATUS
= 0x05, /* read status */
1433 /* hardware error definition */
1434 enum bfa_flash_err
{
1435 BFA_FLASH_NOT_PRESENT
= -1, /*!< flash not present */
1436 BFA_FLASH_UNINIT
= -2, /*!< flash not initialized */
1437 BFA_FLASH_BAD
= -3, /*!< flash bad */
1438 BFA_FLASH_BUSY
= -4, /*!< flash busy */
1439 BFA_FLASH_ERR_CMD_ACT
= -5, /*!< command active never cleared */
1440 BFA_FLASH_ERR_FIFO_CNT
= -6, /*!< fifo count never cleared */
1441 BFA_FLASH_ERR_WIP
= -7, /*!< write-in-progress never cleared */
1442 BFA_FLASH_ERR_TIMEOUT
= -8, /*!< fli timeout */
1443 BFA_FLASH_ERR_LEN
= -9, /*!< invalid length */
1446 /* flash command register data structure */
1447 union bfa_flash_cmd_reg
{
1468 /* flash device status register data structure */
1469 union bfa_flash_dev_status_reg
{
1492 /* flash address register data structure */
1493 union bfa_flash_addr_reg
{
1506 /* Flash raw private functions */
1508 bfa_flash_set_cmd(void __iomem
*pci_bar
, u8 wr_cnt
,
1509 u8 rd_cnt
, u8 ad_cnt
, u8 op
)
1511 union bfa_flash_cmd_reg cmd
;
1515 cmd
.r
.write_cnt
= wr_cnt
;
1516 cmd
.r
.read_cnt
= rd_cnt
;
1517 cmd
.r
.addr_cnt
= ad_cnt
;
1519 writel(cmd
.i
, (pci_bar
+ FLI_CMD_REG
));
1523 bfa_flash_set_addr(void __iomem
*pci_bar
, u32 address
)
1525 union bfa_flash_addr_reg addr
;
1527 addr
.r
.addr
= address
& 0x00ffffff;
1529 writel(addr
.i
, (pci_bar
+ FLI_ADDR_REG
));
1533 bfa_flash_cmd_act_check(void __iomem
*pci_bar
)
1535 union bfa_flash_cmd_reg cmd
;
1537 cmd
.i
= readl(pci_bar
+ FLI_CMD_REG
);
1540 return BFA_FLASH_ERR_CMD_ACT
;
1545 /* Flush FLI data fifo. */
1547 bfa_flash_fifo_flush(void __iomem
*pci_bar
)
1551 union bfa_flash_dev_status_reg dev_status
;
1553 dev_status
.i
= readl(pci_bar
+ FLI_DEV_STATUS_REG
);
1555 if (!dev_status
.r
.fifo_cnt
)
1558 /* fifo counter in terms of words */
1559 for (i
= 0; i
< dev_status
.r
.fifo_cnt
; i
++)
1560 t
= readl(pci_bar
+ FLI_RDDATA_REG
);
1562 /* Check the device status. It may take some time. */
1563 for (i
= 0; i
< BFA_FLASH_CHECK_MAX
; i
++) {
1564 dev_status
.i
= readl(pci_bar
+ FLI_DEV_STATUS_REG
);
1565 if (!dev_status
.r
.fifo_cnt
)
1569 if (dev_status
.r
.fifo_cnt
)
1570 return BFA_FLASH_ERR_FIFO_CNT
;
1575 /* Read flash status. */
1577 bfa_flash_status_read(void __iomem
*pci_bar
)
1579 union bfa_flash_dev_status_reg dev_status
;
1584 status
= bfa_flash_fifo_flush(pci_bar
);
1588 bfa_flash_set_cmd(pci_bar
, 0, 4, 0, BFA_FLASH_READ_STATUS
);
1590 for (i
= 0; i
< BFA_FLASH_CHECK_MAX
; i
++) {
1591 status
= bfa_flash_cmd_act_check(pci_bar
);
1599 dev_status
.i
= readl(pci_bar
+ FLI_DEV_STATUS_REG
);
1600 if (!dev_status
.r
.fifo_cnt
)
1601 return BFA_FLASH_BUSY
;
1603 ret_status
= readl(pci_bar
+ FLI_RDDATA_REG
);
1606 status
= bfa_flash_fifo_flush(pci_bar
);
1613 /* Start flash read operation. */
1615 bfa_flash_read_start(void __iomem
*pci_bar
, u32 offset
, u32 len
,
1620 /* len must be mutiple of 4 and not exceeding fifo size */
1621 if (len
== 0 || len
> BFA_FLASH_FIFO_SIZE
|| (len
& 0x03) != 0)
1622 return BFA_FLASH_ERR_LEN
;
1625 status
= bfa_flash_status_read(pci_bar
);
1626 if (status
== BFA_FLASH_BUSY
)
1627 status
= bfa_flash_status_read(pci_bar
);
1632 /* check if write-in-progress bit is cleared */
1633 if (status
& BFA_FLASH_WIP_MASK
)
1634 return BFA_FLASH_ERR_WIP
;
1636 bfa_flash_set_addr(pci_bar
, offset
);
1638 bfa_flash_set_cmd(pci_bar
, 0, (u8
)len
, 4, BFA_FLASH_FAST_READ
);
1643 /* Check flash read operation. */
1645 bfa_flash_read_check(void __iomem
*pci_bar
)
1647 if (bfa_flash_cmd_act_check(pci_bar
))
1653 /* End flash read operation. */
1655 bfa_flash_read_end(void __iomem
*pci_bar
, u32 len
, char *buf
)
1659 /* read data fifo up to 32 words */
1660 for (i
= 0; i
< len
; i
+= 4) {
1661 u32 w
= readl(pci_bar
+ FLI_RDDATA_REG
);
1662 *((u32
*)(buf
+ i
)) = swab32(w
);
1665 bfa_flash_fifo_flush(pci_bar
);
1668 /* Perform flash raw read. */
1670 #define FLASH_BLOCKING_OP_MAX 500
1671 #define FLASH_SEM_LOCK_REG 0x18820
1674 bfa_raw_sem_get(void __iomem
*bar
)
1678 locked
= readl(bar
+ FLASH_SEM_LOCK_REG
);
1683 static enum bfa_status
1684 bfa_flash_sem_get(void __iomem
*bar
)
1686 u32 n
= FLASH_BLOCKING_OP_MAX
;
1688 while (!bfa_raw_sem_get(bar
)) {
1690 return BFA_STATUS_BADFLASH
;
1693 return BFA_STATUS_OK
;
1697 bfa_flash_sem_put(void __iomem
*bar
)
1699 writel(0, (bar
+ FLASH_SEM_LOCK_REG
));
1702 static enum bfa_status
1703 bfa_flash_raw_read(void __iomem
*pci_bar
, u32 offset
, char *buf
,
1708 u32 off
, l
, s
, residue
, fifo_sz
;
1712 fifo_sz
= BFA_FLASH_FIFO_SIZE
;
1713 status
= bfa_flash_sem_get(pci_bar
);
1714 if (status
!= BFA_STATUS_OK
)
1720 l
= (n
+ 1) * fifo_sz
- s
;
1724 status
= bfa_flash_read_start(pci_bar
, offset
+ off
, l
,
1727 bfa_flash_sem_put(pci_bar
);
1728 return BFA_STATUS_FAILED
;
1731 n
= BFA_FLASH_BLOCKING_OP_MAX
;
1732 while (bfa_flash_read_check(pci_bar
)) {
1734 bfa_flash_sem_put(pci_bar
);
1735 return BFA_STATUS_FAILED
;
1739 bfa_flash_read_end(pci_bar
, l
, &buf
[off
]);
1744 bfa_flash_sem_put(pci_bar
);
1746 return BFA_STATUS_OK
;
1749 #define BFA_FLASH_PART_FWIMG_ADDR 0x100000 /* fw image address */
1751 static enum bfa_status
1752 bfa_nw_ioc_flash_img_get_chnk(struct bfa_ioc
*ioc
, u32 off
,
1755 return bfa_flash_raw_read(ioc
->pcidev
.pci_bar_kva
,
1756 BFA_FLASH_PART_FWIMG_ADDR
+ (off
* sizeof(u32
)),
1757 (char *)fwimg
, BFI_FLASH_CHUNK_SZ
);
1760 static enum bfi_ioc_img_ver_cmp
1761 bfa_ioc_flash_fwver_cmp(struct bfa_ioc
*ioc
,
1762 struct bfi_ioc_image_hdr
*base_fwhdr
)
1764 struct bfi_ioc_image_hdr
*flash_fwhdr
;
1765 enum bfa_status status
;
1766 u32 fwimg
[BFI_FLASH_CHUNK_SZ_WORDS
];
1768 status
= bfa_nw_ioc_flash_img_get_chnk(ioc
, 0, fwimg
);
1769 if (status
!= BFA_STATUS_OK
)
1770 return BFI_IOC_IMG_VER_INCOMP
;
1772 flash_fwhdr
= (struct bfi_ioc_image_hdr
*)fwimg
;
1773 if (bfa_ioc_flash_fwver_valid(flash_fwhdr
))
1774 return bfa_ioc_fw_ver_patch_cmp(base_fwhdr
, flash_fwhdr
);
1776 return BFI_IOC_IMG_VER_INCOMP
;
1780 * Returns TRUE if driver is willing to work with current smem f/w version.
1783 bfa_nw_ioc_fwver_cmp(struct bfa_ioc
*ioc
, struct bfi_ioc_image_hdr
*fwhdr
)
1785 struct bfi_ioc_image_hdr
*drv_fwhdr
;
1786 enum bfi_ioc_img_ver_cmp smem_flash_cmp
, drv_smem_cmp
;
1788 drv_fwhdr
= (struct bfi_ioc_image_hdr
*)
1789 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc
), 0);
1791 /* If smem is incompatible or old, driver should not work with it. */
1792 drv_smem_cmp
= bfa_ioc_fw_ver_patch_cmp(drv_fwhdr
, fwhdr
);
1793 if (drv_smem_cmp
== BFI_IOC_IMG_VER_INCOMP
||
1794 drv_smem_cmp
== BFI_IOC_IMG_VER_OLD
) {
1798 /* IF Flash has a better F/W than smem do not work with smem.
1799 * If smem f/w == flash f/w, as smem f/w not old | incmp, work with it.
1800 * If Flash is old or incomp work with smem iff smem f/w == drv f/w.
1802 smem_flash_cmp
= bfa_ioc_flash_fwver_cmp(ioc
, fwhdr
);
1804 if (smem_flash_cmp
== BFI_IOC_IMG_VER_BETTER
)
1806 else if (smem_flash_cmp
== BFI_IOC_IMG_VER_SAME
)
1809 return (drv_smem_cmp
== BFI_IOC_IMG_VER_SAME
) ?
1813 /* Return true if current running version is valid. Firmware signature and
1814 * execution context (driver/bios) must match.
1817 bfa_ioc_fwver_valid(struct bfa_ioc
*ioc
, u32 boot_env
)
1819 struct bfi_ioc_image_hdr fwhdr
;
1821 bfa_nw_ioc_fwver_get(ioc
, &fwhdr
);
1822 if (swab32(fwhdr
.bootenv
) != boot_env
)
1825 return bfa_nw_ioc_fwver_cmp(ioc
, &fwhdr
);
1828 /* Conditionally flush any pending message from firmware at start. */
1830 bfa_ioc_msgflush(struct bfa_ioc
*ioc
)
1834 r32
= readl(ioc
->ioc_regs
.lpu_mbox_cmd
);
1836 writel(1, ioc
->ioc_regs
.lpu_mbox_cmd
);
1840 bfa_ioc_hwinit(struct bfa_ioc
*ioc
, bool force
)
1842 enum bfi_ioc_state ioc_fwstate
;
1846 ioc_fwstate
= bfa_ioc_get_cur_ioc_fwstate(ioc
);
1849 ioc_fwstate
= BFI_IOC_UNINIT
;
1851 boot_env
= BFI_FWBOOT_ENV_OS
;
1854 * check if firmware is valid
1856 fwvalid
= (ioc_fwstate
== BFI_IOC_UNINIT
) ?
1857 false : bfa_ioc_fwver_valid(ioc
, boot_env
);
1860 if (bfa_ioc_boot(ioc
, BFI_FWBOOT_TYPE_NORMAL
, boot_env
) ==
1862 bfa_ioc_poll_fwinit(ioc
);
1868 * If hardware initialization is in progress (initialized by other IOC),
1869 * just wait for an initialization completion interrupt.
1871 if (ioc_fwstate
== BFI_IOC_INITING
) {
1872 bfa_ioc_poll_fwinit(ioc
);
1877 * If IOC function is disabled and firmware version is same,
1878 * just re-enable IOC.
1880 if (ioc_fwstate
== BFI_IOC_DISABLED
|| ioc_fwstate
== BFI_IOC_OP
) {
1882 * When using MSI-X any pending firmware ready event should
1883 * be flushed. Otherwise MSI-X interrupts are not delivered.
1885 bfa_ioc_msgflush(ioc
);
1886 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_FWREADY
);
1891 * Initialize the h/w for any other states.
1893 if (bfa_ioc_boot(ioc
, BFI_FWBOOT_TYPE_NORMAL
, boot_env
) ==
1895 bfa_ioc_poll_fwinit(ioc
);
1899 bfa_nw_ioc_timeout(struct bfa_ioc
*ioc
)
1901 bfa_fsm_send_event(ioc
, IOC_E_TIMEOUT
);
1905 bfa_ioc_mbox_send(struct bfa_ioc
*ioc
, void *ioc_msg
, int len
)
1907 u32
*msgp
= (u32
*) ioc_msg
;
1910 BUG_ON(!(len
<= BFI_IOC_MSGLEN_MAX
));
1913 * first write msg to mailbox registers
1915 for (i
= 0; i
< len
/ sizeof(u32
); i
++)
1916 writel(cpu_to_le32(msgp
[i
]),
1917 ioc
->ioc_regs
.hfn_mbox
+ i
* sizeof(u32
));
1919 for (; i
< BFI_IOC_MSGLEN_MAX
/ sizeof(u32
); i
++)
1920 writel(0, ioc
->ioc_regs
.hfn_mbox
+ i
* sizeof(u32
));
1923 * write 1 to mailbox CMD to trigger LPU event
1925 writel(1, ioc
->ioc_regs
.hfn_mbox_cmd
);
1926 (void) readl(ioc
->ioc_regs
.hfn_mbox_cmd
);
1930 bfa_ioc_send_enable(struct bfa_ioc
*ioc
)
1932 struct bfi_ioc_ctrl_req enable_req
;
1934 bfi_h2i_set(enable_req
.mh
, BFI_MC_IOC
, BFI_IOC_H2I_ENABLE_REQ
,
1935 bfa_ioc_portid(ioc
));
1936 enable_req
.clscode
= htons(ioc
->clscode
);
1937 enable_req
.rsvd
= htons(0);
1938 /* overflow in 2106 */
1939 enable_req
.tv_sec
= ntohl(ktime_get_real_seconds());
1940 bfa_ioc_mbox_send(ioc
, &enable_req
, sizeof(struct bfi_ioc_ctrl_req
));
1944 bfa_ioc_send_disable(struct bfa_ioc
*ioc
)
1946 struct bfi_ioc_ctrl_req disable_req
;
1948 bfi_h2i_set(disable_req
.mh
, BFI_MC_IOC
, BFI_IOC_H2I_DISABLE_REQ
,
1949 bfa_ioc_portid(ioc
));
1950 disable_req
.clscode
= htons(ioc
->clscode
);
1951 disable_req
.rsvd
= htons(0);
1952 /* overflow in 2106 */
1953 disable_req
.tv_sec
= ntohl(ktime_get_real_seconds());
1954 bfa_ioc_mbox_send(ioc
, &disable_req
, sizeof(struct bfi_ioc_ctrl_req
));
1958 bfa_ioc_send_getattr(struct bfa_ioc
*ioc
)
1960 struct bfi_ioc_getattr_req attr_req
;
1962 bfi_h2i_set(attr_req
.mh
, BFI_MC_IOC
, BFI_IOC_H2I_GETATTR_REQ
,
1963 bfa_ioc_portid(ioc
));
1964 bfa_dma_be_addr_set(attr_req
.attr_addr
, ioc
->attr_dma
.pa
);
1965 bfa_ioc_mbox_send(ioc
, &attr_req
, sizeof(attr_req
));
1969 bfa_nw_ioc_hb_check(struct bfa_ioc
*ioc
)
1973 hb_count
= readl(ioc
->ioc_regs
.heartbeat
);
1974 if (ioc
->hb_count
== hb_count
) {
1975 bfa_ioc_recover(ioc
);
1978 ioc
->hb_count
= hb_count
;
1981 bfa_ioc_mbox_poll(ioc
);
1982 mod_timer(&ioc
->hb_timer
, jiffies
+
1983 msecs_to_jiffies(BFA_IOC_HB_TOV
));
1987 bfa_ioc_hb_monitor(struct bfa_ioc
*ioc
)
1989 ioc
->hb_count
= readl(ioc
->ioc_regs
.heartbeat
);
1990 mod_timer(&ioc
->hb_timer
, jiffies
+
1991 msecs_to_jiffies(BFA_IOC_HB_TOV
));
1995 bfa_ioc_hb_stop(struct bfa_ioc
*ioc
)
1997 del_timer(&ioc
->hb_timer
);
2000 /* Initiate a full firmware download. */
2001 static enum bfa_status
2002 bfa_ioc_download_fw(struct bfa_ioc
*ioc
, u32 boot_type
,
2012 u32 fwimg_buf
[BFI_FLASH_CHUNK_SZ_WORDS
];
2013 enum bfa_status status
;
2015 if (boot_env
== BFI_FWBOOT_ENV_OS
&&
2016 boot_type
== BFI_FWBOOT_TYPE_FLASH
) {
2017 fwimg_size
= BFI_FLASH_IMAGE_SZ
/sizeof(u32
);
2019 status
= bfa_nw_ioc_flash_img_get_chnk(ioc
,
2020 BFA_IOC_FLASH_CHUNK_ADDR(chunkno
), fwimg_buf
);
2021 if (status
!= BFA_STATUS_OK
)
2026 fwimg_size
= bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc
));
2027 fwimg
= bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc
),
2028 BFA_IOC_FLASH_CHUNK_ADDR(chunkno
));
2031 pgnum
= bfa_ioc_smem_pgnum(ioc
, loff
);
2033 writel(pgnum
, ioc
->ioc_regs
.host_page_num_fn
);
2035 for (i
= 0; i
< fwimg_size
; i
++) {
2036 if (BFA_IOC_FLASH_CHUNK_NO(i
) != chunkno
) {
2037 chunkno
= BFA_IOC_FLASH_CHUNK_NO(i
);
2038 if (boot_env
== BFI_FWBOOT_ENV_OS
&&
2039 boot_type
== BFI_FWBOOT_TYPE_FLASH
) {
2040 status
= bfa_nw_ioc_flash_img_get_chnk(ioc
,
2041 BFA_IOC_FLASH_CHUNK_ADDR(chunkno
),
2043 if (status
!= BFA_STATUS_OK
)
2048 fwimg
= bfa_cb_image_get_chunk(
2049 bfa_ioc_asic_gen(ioc
),
2050 BFA_IOC_FLASH_CHUNK_ADDR(chunkno
));
2057 writel(swab32(fwimg
[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i
)]),
2058 ioc
->ioc_regs
.smem_page_start
+ loff
);
2060 loff
+= sizeof(u32
);
2063 * handle page offset wrap around
2065 loff
= PSS_SMEM_PGOFF(loff
);
2069 ioc
->ioc_regs
.host_page_num_fn
);
2073 writel(bfa_ioc_smem_pgnum(ioc
, 0),
2074 ioc
->ioc_regs
.host_page_num_fn
);
2077 * Set boot type, env and device mode at the end.
2079 if (boot_env
== BFI_FWBOOT_ENV_OS
&&
2080 boot_type
== BFI_FWBOOT_TYPE_FLASH
) {
2081 boot_type
= BFI_FWBOOT_TYPE_NORMAL
;
2083 asicmode
= BFI_FWBOOT_DEVMODE(ioc
->asic_gen
, ioc
->asic_mode
,
2084 ioc
->port0_mode
, ioc
->port1_mode
);
2085 writel(asicmode
, ((ioc
->ioc_regs
.smem_page_start
)
2086 + BFI_FWBOOT_DEVMODE_OFF
));
2087 writel(boot_type
, ((ioc
->ioc_regs
.smem_page_start
)
2088 + (BFI_FWBOOT_TYPE_OFF
)));
2089 writel(boot_env
, ((ioc
->ioc_regs
.smem_page_start
)
2090 + (BFI_FWBOOT_ENV_OFF
)));
2091 return BFA_STATUS_OK
;
2095 bfa_ioc_reset(struct bfa_ioc
*ioc
, bool force
)
2097 bfa_ioc_hwinit(ioc
, force
);
2100 /* BFA ioc enable reply by firmware */
2102 bfa_ioc_enable_reply(struct bfa_ioc
*ioc
, enum bfa_mode port_mode
,
2105 struct bfa_iocpf
*iocpf
= &ioc
->iocpf
;
2107 ioc
->port_mode
= ioc
->port_mode_cfg
= port_mode
;
2108 ioc
->ad_cap_bm
= cap_bm
;
2109 bfa_fsm_send_event(iocpf
, IOCPF_E_FWRSP_ENABLE
);
2112 /* Update BFA configuration from firmware configuration. */
2114 bfa_ioc_getattr_reply(struct bfa_ioc
*ioc
)
2116 struct bfi_ioc_attr
*attr
= ioc
->attr
;
2118 attr
->adapter_prop
= ntohl(attr
->adapter_prop
);
2119 attr
->card_type
= ntohl(attr
->card_type
);
2120 attr
->maxfrsize
= ntohs(attr
->maxfrsize
);
2122 bfa_fsm_send_event(ioc
, IOC_E_FWRSP_GETATTR
);
2125 /* Attach time initialization of mbox logic. */
2127 bfa_ioc_mbox_attach(struct bfa_ioc
*ioc
)
2129 struct bfa_ioc_mbox_mod
*mod
= &ioc
->mbox_mod
;
2132 INIT_LIST_HEAD(&mod
->cmd_q
);
2133 for (mc
= 0; mc
< BFI_MC_MAX
; mc
++) {
2134 mod
->mbhdlr
[mc
].cbfn
= NULL
;
2135 mod
->mbhdlr
[mc
].cbarg
= ioc
->bfa
;
2139 /* Mbox poll timer -- restarts any pending mailbox requests. */
2141 bfa_ioc_mbox_poll(struct bfa_ioc
*ioc
)
2143 struct bfa_ioc_mbox_mod
*mod
= &ioc
->mbox_mod
;
2144 struct bfa_mbox_cmd
*cmd
;
2145 bfa_mbox_cmd_cbfn_t cbfn
;
2150 * If no command pending, do nothing
2152 if (list_empty(&mod
->cmd_q
))
2156 * If previous command is not yet fetched by firmware, do nothing
2158 stat
= readl(ioc
->ioc_regs
.hfn_mbox_cmd
);
2163 * Enqueue command to firmware.
2165 cmd
= list_first_entry(&mod
->cmd_q
, struct bfa_mbox_cmd
, qe
);
2167 bfa_ioc_mbox_send(ioc
, cmd
->msg
, sizeof(cmd
->msg
));
2170 * Give a callback to the client, indicating that the command is sent
2180 /* Cleanup any pending requests. */
2182 bfa_ioc_mbox_flush(struct bfa_ioc
*ioc
)
2184 struct bfa_ioc_mbox_mod
*mod
= &ioc
->mbox_mod
;
2185 struct bfa_mbox_cmd
*cmd
;
2187 while (!list_empty(&mod
->cmd_q
)) {
2188 cmd
= list_first_entry(&mod
->cmd_q
, struct bfa_mbox_cmd
, qe
);
2194 * bfa_nw_ioc_smem_read - Read data from SMEM to host through PCI memmap
2196 * @ioc: memory for IOC
2197 * @tbuf: app memory to store data from smem
2198 * @soff: smem offset
2199 * @sz: size of smem in bytes
2202 bfa_nw_ioc_smem_read(struct bfa_ioc
*ioc
, void *tbuf
, u32 soff
, u32 sz
)
2204 u32 pgnum
, loff
, r32
;
2208 pgnum
= PSS_SMEM_PGNUM(ioc
->ioc_regs
.smem_pg0
, soff
);
2209 loff
= PSS_SMEM_PGOFF(soff
);
2212 * Hold semaphore to serialize pll init and fwtrc.
2214 if (!bfa_nw_ioc_sem_get(ioc
->ioc_regs
.ioc_init_sem_reg
))
2217 writel(pgnum
, ioc
->ioc_regs
.host_page_num_fn
);
2219 len
= sz
/sizeof(u32
);
2220 for (i
= 0; i
< len
; i
++) {
2221 r32
= swab32(readl(loff
+ ioc
->ioc_regs
.smem_page_start
));
2222 buf
[i
] = be32_to_cpu(r32
);
2223 loff
+= sizeof(u32
);
2226 * handle page offset wrap around
2228 loff
= PSS_SMEM_PGOFF(loff
);
2231 writel(pgnum
, ioc
->ioc_regs
.host_page_num_fn
);
2235 writel(PSS_SMEM_PGNUM(ioc
->ioc_regs
.smem_pg0
, 0),
2236 ioc
->ioc_regs
.host_page_num_fn
);
2241 readl(ioc
->ioc_regs
.ioc_init_sem_reg
);
2242 writel(1, ioc
->ioc_regs
.ioc_init_sem_reg
);
2246 /* Retrieve saved firmware trace from a prior IOC failure. */
2248 bfa_nw_ioc_debug_fwtrc(struct bfa_ioc
*ioc
, void *trcdata
, int *trclen
)
2250 u32 loff
= BFI_IOC_TRC_OFF
+ BNA_DBG_FWTRC_LEN
* ioc
->port_id
;
2251 int tlen
, status
= 0;
2254 if (tlen
> BNA_DBG_FWTRC_LEN
)
2255 tlen
= BNA_DBG_FWTRC_LEN
;
2257 status
= bfa_nw_ioc_smem_read(ioc
, trcdata
, loff
, tlen
);
2262 /* Save firmware trace if configured. */
2264 bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc
*ioc
)
2268 if (ioc
->dbg_fwsave_once
) {
2269 ioc
->dbg_fwsave_once
= false;
2270 if (ioc
->dbg_fwsave_len
) {
2271 tlen
= ioc
->dbg_fwsave_len
;
2272 bfa_nw_ioc_debug_fwtrc(ioc
, ioc
->dbg_fwsave
, &tlen
);
2277 /* Retrieve saved firmware trace from a prior IOC failure. */
2279 bfa_nw_ioc_debug_fwsave(struct bfa_ioc
*ioc
, void *trcdata
, int *trclen
)
2283 if (ioc
->dbg_fwsave_len
== 0)
2284 return BFA_STATUS_ENOFSAVE
;
2287 if (tlen
> ioc
->dbg_fwsave_len
)
2288 tlen
= ioc
->dbg_fwsave_len
;
2290 memcpy(trcdata
, ioc
->dbg_fwsave
, tlen
);
2292 return BFA_STATUS_OK
;
2296 bfa_ioc_fail_notify(struct bfa_ioc
*ioc
)
2299 * Notify driver and common modules registered for notification.
2301 ioc
->cbfn
->hbfail_cbfn(ioc
->bfa
);
2302 bfa_ioc_event_notify(ioc
, BFA_IOC_E_FAILED
);
2303 bfa_nw_ioc_debug_save_ftrc(ioc
);
2306 /* IOCPF to IOC interface */
2308 bfa_ioc_pf_enabled(struct bfa_ioc
*ioc
)
2310 bfa_fsm_send_event(ioc
, IOC_E_ENABLED
);
2314 bfa_ioc_pf_disabled(struct bfa_ioc
*ioc
)
2316 bfa_fsm_send_event(ioc
, IOC_E_DISABLED
);
2320 bfa_ioc_pf_failed(struct bfa_ioc
*ioc
)
2322 bfa_fsm_send_event(ioc
, IOC_E_PFFAILED
);
2326 bfa_ioc_pf_hwfailed(struct bfa_ioc
*ioc
)
2328 bfa_fsm_send_event(ioc
, IOC_E_HWFAILED
);
2332 bfa_ioc_pf_fwmismatch(struct bfa_ioc
*ioc
)
2335 * Provide enable completion callback and AEN notification.
2337 ioc
->cbfn
->enable_cbfn(ioc
->bfa
, BFA_STATUS_IOC_FAILURE
);
2341 static enum bfa_status
2342 bfa_ioc_pll_init(struct bfa_ioc
*ioc
)
2345 * Hold semaphore so that nobody can access the chip during init.
2347 bfa_nw_ioc_sem_get(ioc
->ioc_regs
.ioc_init_sem_reg
);
2349 bfa_ioc_pll_init_asic(ioc
);
2351 ioc
->pllinit
= true;
2353 /* Initialize LMEM */
2354 bfa_ioc_lmem_init(ioc
);
2357 * release semaphore.
2359 bfa_nw_ioc_sem_release(ioc
->ioc_regs
.ioc_init_sem_reg
);
2361 return BFA_STATUS_OK
;
2364 /* Interface used by diag module to do firmware boot with memory test
2365 * as the entry vector.
2367 static enum bfa_status
2368 bfa_ioc_boot(struct bfa_ioc
*ioc
, enum bfi_fwboot_type boot_type
,
2371 struct bfi_ioc_image_hdr
*drv_fwhdr
;
2372 enum bfa_status status
;
2373 bfa_ioc_stats(ioc
, ioc_boots
);
2375 if (bfa_ioc_pll_init(ioc
) != BFA_STATUS_OK
)
2376 return BFA_STATUS_FAILED
;
2377 if (boot_env
== BFI_FWBOOT_ENV_OS
&&
2378 boot_type
== BFI_FWBOOT_TYPE_NORMAL
) {
2379 drv_fwhdr
= (struct bfi_ioc_image_hdr
*)
2380 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc
), 0);
2381 /* Work with Flash iff flash f/w is better than driver f/w.
2382 * Otherwise push drivers firmware.
2384 if (bfa_ioc_flash_fwver_cmp(ioc
, drv_fwhdr
) ==
2385 BFI_IOC_IMG_VER_BETTER
)
2386 boot_type
= BFI_FWBOOT_TYPE_FLASH
;
2390 * Initialize IOC state of all functions on a chip reset.
2392 if (boot_type
== BFI_FWBOOT_TYPE_MEMTEST
) {
2393 bfa_ioc_set_cur_ioc_fwstate(ioc
, BFI_IOC_MEMTEST
);
2394 bfa_ioc_set_alt_ioc_fwstate(ioc
, BFI_IOC_MEMTEST
);
2396 bfa_ioc_set_cur_ioc_fwstate(ioc
, BFI_IOC_INITING
);
2397 bfa_ioc_set_alt_ioc_fwstate(ioc
, BFI_IOC_INITING
);
2400 bfa_ioc_msgflush(ioc
);
2401 status
= bfa_ioc_download_fw(ioc
, boot_type
, boot_env
);
2402 if (status
== BFA_STATUS_OK
)
2403 bfa_ioc_lpu_start(ioc
);
2405 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_TIMEOUT
);
2410 /* Enable/disable IOC failure auto recovery. */
2412 bfa_nw_ioc_auto_recover(bool auto_recover
)
2414 bfa_nw_auto_recover
= auto_recover
;
2418 bfa_ioc_msgget(struct bfa_ioc
*ioc
, void *mbmsg
)
2424 r32
= readl(ioc
->ioc_regs
.lpu_mbox_cmd
);
2431 for (i
= 0; i
< (sizeof(union bfi_ioc_i2h_msg_u
) / sizeof(u32
));
2433 r32
= readl(ioc
->ioc_regs
.lpu_mbox
+
2435 msgp
[i
] = htonl(r32
);
2439 * turn off mailbox interrupt by clearing mailbox status
2441 writel(1, ioc
->ioc_regs
.lpu_mbox_cmd
);
2442 readl(ioc
->ioc_regs
.lpu_mbox_cmd
);
2448 bfa_ioc_isr(struct bfa_ioc
*ioc
, struct bfi_mbmsg
*m
)
2450 union bfi_ioc_i2h_msg_u
*msg
;
2451 struct bfa_iocpf
*iocpf
= &ioc
->iocpf
;
2453 msg
= (union bfi_ioc_i2h_msg_u
*) m
;
2455 bfa_ioc_stats(ioc
, ioc_isrs
);
2457 switch (msg
->mh
.msg_id
) {
2458 case BFI_IOC_I2H_HBEAT
:
2461 case BFI_IOC_I2H_ENABLE_REPLY
:
2462 bfa_ioc_enable_reply(ioc
,
2463 (enum bfa_mode
)msg
->fw_event
.port_mode
,
2464 msg
->fw_event
.cap_bm
);
2467 case BFI_IOC_I2H_DISABLE_REPLY
:
2468 bfa_fsm_send_event(iocpf
, IOCPF_E_FWRSP_DISABLE
);
2471 case BFI_IOC_I2H_GETATTR_REPLY
:
2472 bfa_ioc_getattr_reply(ioc
);
2481 * bfa_nw_ioc_attach - IOC attach time initialization and setup.
2483 * @ioc: memory for IOC
2484 * @bfa: driver instance structure
2487 bfa_nw_ioc_attach(struct bfa_ioc
*ioc
, void *bfa
, struct bfa_ioc_cbfn
*cbfn
)
2491 ioc
->fcmode
= false;
2492 ioc
->pllinit
= false;
2493 ioc
->dbg_fwsave_once
= true;
2494 ioc
->iocpf
.ioc
= ioc
;
2496 bfa_ioc_mbox_attach(ioc
);
2497 INIT_LIST_HEAD(&ioc
->notify_q
);
2499 bfa_fsm_set_state(ioc
, bfa_ioc_sm_uninit
);
2500 bfa_fsm_send_event(ioc
, IOC_E_RESET
);
2503 /* Driver detach time IOC cleanup. */
2505 bfa_nw_ioc_detach(struct bfa_ioc
*ioc
)
2507 bfa_fsm_send_event(ioc
, IOC_E_DETACH
);
2509 /* Done with detach, empty the notify_q. */
2510 INIT_LIST_HEAD(&ioc
->notify_q
);
2514 * bfa_nw_ioc_pci_init - Setup IOC PCI properties.
2516 * @pcidev: PCI device information for this IOC
2519 bfa_nw_ioc_pci_init(struct bfa_ioc
*ioc
, struct bfa_pcidev
*pcidev
,
2520 enum bfi_pcifn_class clscode
)
2522 ioc
->clscode
= clscode
;
2523 ioc
->pcidev
= *pcidev
;
2526 * Initialize IOC and device personality
2528 ioc
->port0_mode
= ioc
->port1_mode
= BFI_PORT_MODE_FC
;
2529 ioc
->asic_mode
= BFI_ASIC_MODE_FC
;
2531 switch (pcidev
->device_id
) {
2532 case PCI_DEVICE_ID_BROCADE_CT
:
2533 ioc
->asic_gen
= BFI_ASIC_GEN_CT
;
2534 ioc
->port0_mode
= ioc
->port1_mode
= BFI_PORT_MODE_ETH
;
2535 ioc
->asic_mode
= BFI_ASIC_MODE_ETH
;
2536 ioc
->port_mode
= ioc
->port_mode_cfg
= BFA_MODE_CNA
;
2537 ioc
->ad_cap_bm
= BFA_CM_CNA
;
2540 case BFA_PCI_DEVICE_ID_CT2
:
2541 ioc
->asic_gen
= BFI_ASIC_GEN_CT2
;
2542 if (clscode
== BFI_PCIFN_CLASS_FC
&&
2543 pcidev
->ssid
== BFA_PCI_CT2_SSID_FC
) {
2544 ioc
->asic_mode
= BFI_ASIC_MODE_FC16
;
2546 ioc
->port_mode
= ioc
->port_mode_cfg
= BFA_MODE_HBA
;
2547 ioc
->ad_cap_bm
= BFA_CM_HBA
;
2549 ioc
->port0_mode
= ioc
->port1_mode
= BFI_PORT_MODE_ETH
;
2550 ioc
->asic_mode
= BFI_ASIC_MODE_ETH
;
2551 if (pcidev
->ssid
== BFA_PCI_CT2_SSID_FCoE
) {
2553 ioc
->port_mode_cfg
= BFA_MODE_CNA
;
2554 ioc
->ad_cap_bm
= BFA_CM_CNA
;
2557 ioc
->port_mode_cfg
= BFA_MODE_NIC
;
2558 ioc
->ad_cap_bm
= BFA_CM_NIC
;
2568 * Set asic specific interfaces.
2570 if (ioc
->asic_gen
== BFI_ASIC_GEN_CT
)
2571 bfa_nw_ioc_set_ct_hwif(ioc
);
2573 WARN_ON(ioc
->asic_gen
!= BFI_ASIC_GEN_CT2
);
2574 bfa_nw_ioc_set_ct2_hwif(ioc
);
2575 bfa_nw_ioc_ct2_poweron(ioc
);
2578 bfa_ioc_map_port(ioc
);
2579 bfa_ioc_reg_init(ioc
);
2583 * bfa_nw_ioc_mem_claim - Initialize IOC dma memory
2585 * @dm_kva: kernel virtual address of IOC dma memory
2586 * @dm_pa: physical address of IOC dma memory
2589 bfa_nw_ioc_mem_claim(struct bfa_ioc
*ioc
, u8
*dm_kva
, u64 dm_pa
)
2592 * dma memory for firmware attribute
2594 ioc
->attr_dma
.kva
= dm_kva
;
2595 ioc
->attr_dma
.pa
= dm_pa
;
2596 ioc
->attr
= (struct bfi_ioc_attr
*) dm_kva
;
2599 /* Return size of dma memory required. */
2601 bfa_nw_ioc_meminfo(void)
2603 return roundup(sizeof(struct bfi_ioc_attr
), BFA_DMA_ALIGN_SZ
);
2607 bfa_nw_ioc_enable(struct bfa_ioc
*ioc
)
2609 bfa_ioc_stats(ioc
, ioc_enables
);
2610 ioc
->dbg_fwsave_once
= true;
2612 bfa_fsm_send_event(ioc
, IOC_E_ENABLE
);
2616 bfa_nw_ioc_disable(struct bfa_ioc
*ioc
)
2618 bfa_ioc_stats(ioc
, ioc_disables
);
2619 bfa_fsm_send_event(ioc
, IOC_E_DISABLE
);
2622 /* Initialize memory for saving firmware trace. */
2624 bfa_nw_ioc_debug_memclaim(struct bfa_ioc
*ioc
, void *dbg_fwsave
)
2626 ioc
->dbg_fwsave
= dbg_fwsave
;
2627 ioc
->dbg_fwsave_len
= ioc
->iocpf
.auto_recover
? BNA_DBG_FWTRC_LEN
: 0;
2631 bfa_ioc_smem_pgnum(struct bfa_ioc
*ioc
, u32 fmaddr
)
2633 return PSS_SMEM_PGNUM(ioc
->ioc_regs
.smem_pg0
, fmaddr
);
2636 /* Register mailbox message handler function, to be called by common modules */
2638 bfa_nw_ioc_mbox_regisr(struct bfa_ioc
*ioc
, enum bfi_mclass mc
,
2639 bfa_ioc_mbox_mcfunc_t cbfn
, void *cbarg
)
2641 struct bfa_ioc_mbox_mod
*mod
= &ioc
->mbox_mod
;
2643 mod
->mbhdlr
[mc
].cbfn
= cbfn
;
2644 mod
->mbhdlr
[mc
].cbarg
= cbarg
;
2648 * bfa_nw_ioc_mbox_queue - Queue a mailbox command request to firmware.
2650 * @ioc: IOC instance
2651 * @cmd: Mailbox command
2653 * Waits if mailbox is busy. Responsibility of caller to serialize
2656 bfa_nw_ioc_mbox_queue(struct bfa_ioc
*ioc
, struct bfa_mbox_cmd
*cmd
,
2657 bfa_mbox_cmd_cbfn_t cbfn
, void *cbarg
)
2659 struct bfa_ioc_mbox_mod
*mod
= &ioc
->mbox_mod
;
2666 * If a previous command is pending, queue new command
2668 if (!list_empty(&mod
->cmd_q
)) {
2669 list_add_tail(&cmd
->qe
, &mod
->cmd_q
);
2674 * If mailbox is busy, queue command for poll timer
2676 stat
= readl(ioc
->ioc_regs
.hfn_mbox_cmd
);
2678 list_add_tail(&cmd
->qe
, &mod
->cmd_q
);
2683 * mailbox is free -- queue command to firmware
2685 bfa_ioc_mbox_send(ioc
, cmd
->msg
, sizeof(cmd
->msg
));
2690 /* Handle mailbox interrupts */
2692 bfa_nw_ioc_mbox_isr(struct bfa_ioc
*ioc
)
2694 struct bfa_ioc_mbox_mod
*mod
= &ioc
->mbox_mod
;
2698 if (bfa_ioc_msgget(ioc
, &m
)) {
2700 * Treat IOC message class as special.
2702 mc
= m
.mh
.msg_class
;
2703 if (mc
== BFI_MC_IOC
) {
2704 bfa_ioc_isr(ioc
, &m
);
2708 if ((mc
>= BFI_MC_MAX
) || (mod
->mbhdlr
[mc
].cbfn
== NULL
))
2711 mod
->mbhdlr
[mc
].cbfn(mod
->mbhdlr
[mc
].cbarg
, &m
);
2714 bfa_ioc_lpu_read_stat(ioc
);
2717 * Try to send pending mailbox commands
2719 bfa_ioc_mbox_poll(ioc
);
2723 bfa_nw_ioc_error_isr(struct bfa_ioc
*ioc
)
2725 bfa_ioc_stats(ioc
, ioc_hbfails
);
2726 bfa_ioc_stats_hb_count(ioc
, ioc
->hb_count
);
2727 bfa_fsm_send_event(ioc
, IOC_E_HWERROR
);
2730 /* return true if IOC is disabled */
2732 bfa_nw_ioc_is_disabled(struct bfa_ioc
*ioc
)
2734 return bfa_fsm_cmp_state(ioc
, bfa_ioc_sm_disabling
) ||
2735 bfa_fsm_cmp_state(ioc
, bfa_ioc_sm_disabled
);
2738 /* return true if IOC is operational */
2740 bfa_nw_ioc_is_operational(struct bfa_ioc
*ioc
)
2742 return bfa_fsm_cmp_state(ioc
, bfa_ioc_sm_op
);
2745 /* Add to IOC heartbeat failure notification queue. To be used by common
2746 * modules such as cee, port, diag.
2749 bfa_nw_ioc_notify_register(struct bfa_ioc
*ioc
,
2750 struct bfa_ioc_notify
*notify
)
2752 list_add_tail(¬ify
->qe
, &ioc
->notify_q
);
2755 #define BFA_MFG_NAME "QLogic"
2757 bfa_ioc_get_adapter_attr(struct bfa_ioc
*ioc
,
2758 struct bfa_adapter_attr
*ad_attr
)
2760 struct bfi_ioc_attr
*ioc_attr
;
2762 ioc_attr
= ioc
->attr
;
2764 bfa_ioc_get_adapter_serial_num(ioc
, ad_attr
->serial_num
);
2765 bfa_ioc_get_adapter_fw_ver(ioc
, ad_attr
->fw_ver
);
2766 bfa_ioc_get_adapter_optrom_ver(ioc
, ad_attr
->optrom_ver
);
2767 bfa_ioc_get_adapter_manufacturer(ioc
, ad_attr
->manufacturer
);
2768 memcpy(&ad_attr
->vpd
, &ioc_attr
->vpd
,
2769 sizeof(struct bfa_mfg_vpd
));
2771 ad_attr
->nports
= bfa_ioc_get_nports(ioc
);
2772 ad_attr
->max_speed
= bfa_ioc_speed_sup(ioc
);
2774 bfa_ioc_get_adapter_model(ioc
, ad_attr
->model
);
2775 /* For now, model descr uses same model string */
2776 bfa_ioc_get_adapter_model(ioc
, ad_attr
->model_descr
);
2778 ad_attr
->card_type
= ioc_attr
->card_type
;
2779 ad_attr
->is_mezz
= bfa_mfg_is_mezz(ioc_attr
->card_type
);
2781 if (BFI_ADAPTER_IS_SPECIAL(ioc_attr
->adapter_prop
))
2782 ad_attr
->prototype
= 1;
2784 ad_attr
->prototype
= 0;
2786 ad_attr
->pwwn
= bfa_ioc_get_pwwn(ioc
);
2787 bfa_nw_ioc_get_mac(ioc
, ad_attr
->mac
);
2789 ad_attr
->pcie_gen
= ioc_attr
->pcie_gen
;
2790 ad_attr
->pcie_lanes
= ioc_attr
->pcie_lanes
;
2791 ad_attr
->pcie_lanes_orig
= ioc_attr
->pcie_lanes_orig
;
2792 ad_attr
->asic_rev
= ioc_attr
->asic_rev
;
2794 bfa_ioc_get_pci_chip_rev(ioc
, ad_attr
->hw_ver
);
2797 static enum bfa_ioc_type
2798 bfa_ioc_get_type(struct bfa_ioc
*ioc
)
2800 if (ioc
->clscode
== BFI_PCIFN_CLASS_ETH
)
2801 return BFA_IOC_TYPE_LL
;
2803 BUG_ON(!(ioc
->clscode
== BFI_PCIFN_CLASS_FC
));
2805 return (ioc
->attr
->port_mode
== BFI_PORT_MODE_FC
)
2806 ? BFA_IOC_TYPE_FC
: BFA_IOC_TYPE_FCoE
;
2810 bfa_ioc_get_adapter_serial_num(struct bfa_ioc
*ioc
, char *serial_num
)
2813 (void *)ioc
->attr
->brcd_serialnum
,
2814 BFA_ADAPTER_SERIAL_NUM_LEN
);
2818 bfa_ioc_get_adapter_fw_ver(struct bfa_ioc
*ioc
, char *fw_ver
)
2820 memcpy(fw_ver
, ioc
->attr
->fw_version
, BFA_VERSION_LEN
);
2824 bfa_ioc_get_pci_chip_rev(struct bfa_ioc
*ioc
, char *chip_rev
)
2826 BUG_ON(!(chip_rev
));
2828 memset(chip_rev
, 0, BFA_IOC_CHIP_REV_LEN
);
2834 chip_rev
[4] = ioc
->attr
->asic_rev
;
2839 bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc
*ioc
, char *optrom_ver
)
2841 memcpy(optrom_ver
, ioc
->attr
->optrom_version
,
2846 bfa_ioc_get_adapter_manufacturer(struct bfa_ioc
*ioc
, char *manufacturer
)
2848 strncpy(manufacturer
, BFA_MFG_NAME
, BFA_ADAPTER_MFG_NAME_LEN
);
2852 bfa_ioc_get_adapter_model(struct bfa_ioc
*ioc
, char *model
)
2854 struct bfi_ioc_attr
*ioc_attr
;
2857 memset(model
, 0, BFA_ADAPTER_MODEL_NAME_LEN
);
2859 ioc_attr
= ioc
->attr
;
2861 snprintf(model
, BFA_ADAPTER_MODEL_NAME_LEN
, "%s-%u",
2862 BFA_MFG_NAME
, ioc_attr
->card_type
);
2865 static enum bfa_ioc_state
2866 bfa_ioc_get_state(struct bfa_ioc
*ioc
)
2868 enum bfa_iocpf_state iocpf_st
;
2869 enum bfa_ioc_state ioc_st
= bfa_sm_to_state(ioc_sm_table
, ioc
->fsm
);
2871 if (ioc_st
== BFA_IOC_ENABLING
||
2872 ioc_st
== BFA_IOC_FAIL
|| ioc_st
== BFA_IOC_INITFAIL
) {
2874 iocpf_st
= bfa_sm_to_state(iocpf_sm_table
, ioc
->iocpf
.fsm
);
2877 case BFA_IOCPF_SEMWAIT
:
2878 ioc_st
= BFA_IOC_SEMWAIT
;
2881 case BFA_IOCPF_HWINIT
:
2882 ioc_st
= BFA_IOC_HWINIT
;
2885 case BFA_IOCPF_FWMISMATCH
:
2886 ioc_st
= BFA_IOC_FWMISMATCH
;
2889 case BFA_IOCPF_FAIL
:
2890 ioc_st
= BFA_IOC_FAIL
;
2893 case BFA_IOCPF_INITFAIL
:
2894 ioc_st
= BFA_IOC_INITFAIL
;
2905 bfa_nw_ioc_get_attr(struct bfa_ioc
*ioc
, struct bfa_ioc_attr
*ioc_attr
)
2907 memset((void *)ioc_attr
, 0, sizeof(struct bfa_ioc_attr
));
2909 ioc_attr
->state
= bfa_ioc_get_state(ioc
);
2910 ioc_attr
->port_id
= bfa_ioc_portid(ioc
);
2911 ioc_attr
->port_mode
= ioc
->port_mode
;
2913 ioc_attr
->port_mode_cfg
= ioc
->port_mode_cfg
;
2914 ioc_attr
->cap_bm
= ioc
->ad_cap_bm
;
2916 ioc_attr
->ioc_type
= bfa_ioc_get_type(ioc
);
2918 bfa_ioc_get_adapter_attr(ioc
, &ioc_attr
->adapter_attr
);
2920 ioc_attr
->pci_attr
.device_id
= bfa_ioc_devid(ioc
);
2921 ioc_attr
->pci_attr
.pcifn
= bfa_ioc_pcifn(ioc
);
2922 ioc_attr
->def_fn
= bfa_ioc_is_default(ioc
);
2923 bfa_ioc_get_pci_chip_rev(ioc
, ioc_attr
->pci_attr
.chip_rev
);
2928 bfa_ioc_get_pwwn(struct bfa_ioc
*ioc
)
2930 return ioc
->attr
->pwwn
;
2934 bfa_nw_ioc_get_mac(struct bfa_ioc
*ioc
, u8
*mac
)
2936 ether_addr_copy(mac
, ioc
->attr
->mac
);
2939 /* Firmware failure detected. Start recovery actions. */
2941 bfa_ioc_recover(struct bfa_ioc
*ioc
)
2943 pr_crit("Heart Beat of IOC has failed\n");
2944 bfa_ioc_stats(ioc
, ioc_hbfails
);
2945 bfa_ioc_stats_hb_count(ioc
, ioc
->hb_count
);
2946 bfa_fsm_send_event(ioc
, IOC_E_HBFAIL
);
2949 /* BFA IOC PF private functions */
2952 bfa_iocpf_enable(struct bfa_ioc
*ioc
)
2954 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_ENABLE
);
2958 bfa_iocpf_disable(struct bfa_ioc
*ioc
)
2960 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_DISABLE
);
2964 bfa_iocpf_fail(struct bfa_ioc
*ioc
)
2966 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_FAIL
);
2970 bfa_iocpf_initfail(struct bfa_ioc
*ioc
)
2972 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_INITFAIL
);
2976 bfa_iocpf_getattrfail(struct bfa_ioc
*ioc
)
2978 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_GETATTRFAIL
);
2982 bfa_iocpf_stop(struct bfa_ioc
*ioc
)
2984 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_STOP
);
2988 bfa_nw_iocpf_timeout(struct bfa_ioc
*ioc
)
2990 enum bfa_iocpf_state iocpf_st
;
2992 iocpf_st
= bfa_sm_to_state(iocpf_sm_table
, ioc
->iocpf
.fsm
);
2994 if (iocpf_st
== BFA_IOCPF_HWINIT
)
2995 bfa_ioc_poll_fwinit(ioc
);
2997 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_TIMEOUT
);
3001 bfa_nw_iocpf_sem_timeout(struct bfa_ioc
*ioc
)
3003 bfa_ioc_hw_sem_get(ioc
);
3007 bfa_ioc_poll_fwinit(struct bfa_ioc
*ioc
)
3009 u32 fwstate
= bfa_ioc_get_cur_ioc_fwstate(ioc
);
3011 if (fwstate
== BFI_IOC_DISABLED
) {
3012 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_FWREADY
);
3016 if (ioc
->iocpf
.poll_time
>= BFA_IOC_TOV
) {
3017 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_TIMEOUT
);
3019 ioc
->iocpf
.poll_time
+= BFA_IOC_POLL_TOV
;
3020 mod_timer(&ioc
->iocpf_timer
, jiffies
+
3021 msecs_to_jiffies(BFA_IOC_POLL_TOV
));
3026 * Flash module specific
3030 * FLASH DMA buffer should be big enough to hold both MFG block and
3031 * asic block(64k) at the same time and also should be 2k aligned to
3032 * avoid write segement to cross sector boundary.
3034 #define BFA_FLASH_SEG_SZ 2048
3035 #define BFA_FLASH_DMA_BUF_SZ \
3036 roundup(0x010000 + sizeof(struct bfa_mfg_block), BFA_FLASH_SEG_SZ)
3039 bfa_flash_cb(struct bfa_flash
*flash
)
3043 flash
->cbfn(flash
->cbarg
, flash
->status
);
3047 bfa_flash_notify(void *cbarg
, enum bfa_ioc_event event
)
3049 struct bfa_flash
*flash
= cbarg
;
3052 case BFA_IOC_E_DISABLED
:
3053 case BFA_IOC_E_FAILED
:
3054 if (flash
->op_busy
) {
3055 flash
->status
= BFA_STATUS_IOC_FAILURE
;
3056 flash
->cbfn(flash
->cbarg
, flash
->status
);
3066 * Send flash write request.
3069 bfa_flash_write_send(struct bfa_flash
*flash
)
3071 struct bfi_flash_write_req
*msg
=
3072 (struct bfi_flash_write_req
*) flash
->mb
.msg
;
3075 msg
->type
= be32_to_cpu(flash
->type
);
3076 msg
->instance
= flash
->instance
;
3077 msg
->offset
= be32_to_cpu(flash
->addr_off
+ flash
->offset
);
3078 len
= (flash
->residue
< BFA_FLASH_DMA_BUF_SZ
) ?
3079 flash
->residue
: BFA_FLASH_DMA_BUF_SZ
;
3080 msg
->length
= be32_to_cpu(len
);
3082 /* indicate if it's the last msg of the whole write operation */
3083 msg
->last
= (len
== flash
->residue
) ? 1 : 0;
3085 bfi_h2i_set(msg
->mh
, BFI_MC_FLASH
, BFI_FLASH_H2I_WRITE_REQ
,
3086 bfa_ioc_portid(flash
->ioc
));
3087 bfa_alen_set(&msg
->alen
, len
, flash
->dbuf_pa
);
3088 memcpy(flash
->dbuf_kva
, flash
->ubuf
+ flash
->offset
, len
);
3089 bfa_nw_ioc_mbox_queue(flash
->ioc
, &flash
->mb
, NULL
, NULL
);
3091 flash
->residue
-= len
;
3092 flash
->offset
+= len
;
3096 * bfa_flash_read_send - Send flash read request.
3098 * @cbarg: callback argument
3101 bfa_flash_read_send(void *cbarg
)
3103 struct bfa_flash
*flash
= cbarg
;
3104 struct bfi_flash_read_req
*msg
=
3105 (struct bfi_flash_read_req
*) flash
->mb
.msg
;
3108 msg
->type
= be32_to_cpu(flash
->type
);
3109 msg
->instance
= flash
->instance
;
3110 msg
->offset
= be32_to_cpu(flash
->addr_off
+ flash
->offset
);
3111 len
= (flash
->residue
< BFA_FLASH_DMA_BUF_SZ
) ?
3112 flash
->residue
: BFA_FLASH_DMA_BUF_SZ
;
3113 msg
->length
= be32_to_cpu(len
);
3114 bfi_h2i_set(msg
->mh
, BFI_MC_FLASH
, BFI_FLASH_H2I_READ_REQ
,
3115 bfa_ioc_portid(flash
->ioc
));
3116 bfa_alen_set(&msg
->alen
, len
, flash
->dbuf_pa
);
3117 bfa_nw_ioc_mbox_queue(flash
->ioc
, &flash
->mb
, NULL
, NULL
);
3121 * bfa_flash_intr - Process flash response messages upon receiving interrupts.
3123 * @flasharg: flash structure
3124 * @msg: message structure
3127 bfa_flash_intr(void *flasharg
, struct bfi_mbmsg
*msg
)
3129 struct bfa_flash
*flash
= flasharg
;
3133 struct bfi_flash_query_rsp
*query
;
3134 struct bfi_flash_write_rsp
*write
;
3135 struct bfi_flash_read_rsp
*read
;
3136 struct bfi_mbmsg
*msg
;
3141 /* receiving response after ioc failure */
3142 if (!flash
->op_busy
&& msg
->mh
.msg_id
!= BFI_FLASH_I2H_EVENT
)
3145 switch (msg
->mh
.msg_id
) {
3146 case BFI_FLASH_I2H_QUERY_RSP
:
3147 status
= be32_to_cpu(m
.query
->status
);
3148 if (status
== BFA_STATUS_OK
) {
3150 struct bfa_flash_attr
*attr
, *f
;
3152 attr
= (struct bfa_flash_attr
*) flash
->ubuf
;
3153 f
= (struct bfa_flash_attr
*) flash
->dbuf_kva
;
3154 attr
->status
= be32_to_cpu(f
->status
);
3155 attr
->npart
= be32_to_cpu(f
->npart
);
3156 for (i
= 0; i
< attr
->npart
; i
++) {
3157 attr
->part
[i
].part_type
=
3158 be32_to_cpu(f
->part
[i
].part_type
);
3159 attr
->part
[i
].part_instance
=
3160 be32_to_cpu(f
->part
[i
].part_instance
);
3161 attr
->part
[i
].part_off
=
3162 be32_to_cpu(f
->part
[i
].part_off
);
3163 attr
->part
[i
].part_size
=
3164 be32_to_cpu(f
->part
[i
].part_size
);
3165 attr
->part
[i
].part_len
=
3166 be32_to_cpu(f
->part
[i
].part_len
);
3167 attr
->part
[i
].part_status
=
3168 be32_to_cpu(f
->part
[i
].part_status
);
3171 flash
->status
= status
;
3172 bfa_flash_cb(flash
);
3174 case BFI_FLASH_I2H_WRITE_RSP
:
3175 status
= be32_to_cpu(m
.write
->status
);
3176 if (status
!= BFA_STATUS_OK
|| flash
->residue
== 0) {
3177 flash
->status
= status
;
3178 bfa_flash_cb(flash
);
3180 bfa_flash_write_send(flash
);
3182 case BFI_FLASH_I2H_READ_RSP
:
3183 status
= be32_to_cpu(m
.read
->status
);
3184 if (status
!= BFA_STATUS_OK
) {
3185 flash
->status
= status
;
3186 bfa_flash_cb(flash
);
3188 u32 len
= be32_to_cpu(m
.read
->length
);
3189 memcpy(flash
->ubuf
+ flash
->offset
,
3190 flash
->dbuf_kva
, len
);
3191 flash
->residue
-= len
;
3192 flash
->offset
+= len
;
3193 if (flash
->residue
== 0) {
3194 flash
->status
= status
;
3195 bfa_flash_cb(flash
);
3197 bfa_flash_read_send(flash
);
3200 case BFI_FLASH_I2H_BOOT_VER_RSP
:
3201 case BFI_FLASH_I2H_EVENT
:
3209 * Flash memory info API.
3212 bfa_nw_flash_meminfo(void)
3214 return roundup(BFA_FLASH_DMA_BUF_SZ
, BFA_DMA_ALIGN_SZ
);
3218 * bfa_nw_flash_attach - Flash attach API.
3220 * @flash: flash structure
3221 * @ioc: ioc structure
3222 * @dev: device structure
3225 bfa_nw_flash_attach(struct bfa_flash
*flash
, struct bfa_ioc
*ioc
, void *dev
)
3229 flash
->cbarg
= NULL
;
3232 bfa_nw_ioc_mbox_regisr(flash
->ioc
, BFI_MC_FLASH
, bfa_flash_intr
, flash
);
3233 bfa_ioc_notify_init(&flash
->ioc_notify
, bfa_flash_notify
, flash
);
3234 list_add_tail(&flash
->ioc_notify
.qe
, &flash
->ioc
->notify_q
);
3238 * bfa_nw_flash_memclaim - Claim memory for flash
3240 * @flash: flash structure
3241 * @dm_kva: pointer to virtual memory address
3242 * @dm_pa: physical memory address
3245 bfa_nw_flash_memclaim(struct bfa_flash
*flash
, u8
*dm_kva
, u64 dm_pa
)
3247 flash
->dbuf_kva
= dm_kva
;
3248 flash
->dbuf_pa
= dm_pa
;
3249 memset(flash
->dbuf_kva
, 0, BFA_FLASH_DMA_BUF_SZ
);
3250 dm_kva
+= roundup(BFA_FLASH_DMA_BUF_SZ
, BFA_DMA_ALIGN_SZ
);
3251 dm_pa
+= roundup(BFA_FLASH_DMA_BUF_SZ
, BFA_DMA_ALIGN_SZ
);
3255 * bfa_nw_flash_get_attr - Get flash attribute.
3257 * @flash: flash structure
3258 * @attr: flash attribute structure
3259 * @cbfn: callback function
3260 * @cbarg: callback argument
3265 bfa_nw_flash_get_attr(struct bfa_flash
*flash
, struct bfa_flash_attr
*attr
,
3266 bfa_cb_flash cbfn
, void *cbarg
)
3268 struct bfi_flash_query_req
*msg
=
3269 (struct bfi_flash_query_req
*) flash
->mb
.msg
;
3271 if (!bfa_nw_ioc_is_operational(flash
->ioc
))
3272 return BFA_STATUS_IOC_NON_OP
;
3275 return BFA_STATUS_DEVBUSY
;
3279 flash
->cbarg
= cbarg
;
3280 flash
->ubuf
= (u8
*) attr
;
3282 bfi_h2i_set(msg
->mh
, BFI_MC_FLASH
, BFI_FLASH_H2I_QUERY_REQ
,
3283 bfa_ioc_portid(flash
->ioc
));
3284 bfa_alen_set(&msg
->alen
, sizeof(struct bfa_flash_attr
), flash
->dbuf_pa
);
3285 bfa_nw_ioc_mbox_queue(flash
->ioc
, &flash
->mb
, NULL
, NULL
);
3287 return BFA_STATUS_OK
;
3291 * bfa_nw_flash_update_part - Update flash partition.
3293 * @flash: flash structure
3294 * @type: flash partition type
3295 * @instance: flash partition instance
3296 * @buf: update data buffer
3297 * @len: data buffer length
3298 * @offset: offset relative to the partition starting address
3299 * @cbfn: callback function
3300 * @cbarg: callback argument
3305 bfa_nw_flash_update_part(struct bfa_flash
*flash
, u32 type
, u8 instance
,
3306 void *buf
, u32 len
, u32 offset
,
3307 bfa_cb_flash cbfn
, void *cbarg
)
3309 if (!bfa_nw_ioc_is_operational(flash
->ioc
))
3310 return BFA_STATUS_IOC_NON_OP
;
3313 * 'len' must be in word (4-byte) boundary
3315 if (!len
|| (len
& 0x03))
3316 return BFA_STATUS_FLASH_BAD_LEN
;
3318 if (type
== BFA_FLASH_PART_MFG
)
3319 return BFA_STATUS_EINVAL
;
3322 return BFA_STATUS_DEVBUSY
;
3326 flash
->cbarg
= cbarg
;
3328 flash
->instance
= instance
;
3329 flash
->residue
= len
;
3331 flash
->addr_off
= offset
;
3334 bfa_flash_write_send(flash
);
3336 return BFA_STATUS_OK
;
3340 * bfa_nw_flash_read_part - Read flash partition.
3342 * @flash: flash structure
3343 * @type: flash partition type
3344 * @instance: flash partition instance
3345 * @buf: read data buffer
3346 * @len: data buffer length
3347 * @offset: offset relative to the partition starting address
3348 * @cbfn: callback function
3349 * @cbarg: callback argument
3354 bfa_nw_flash_read_part(struct bfa_flash
*flash
, u32 type
, u8 instance
,
3355 void *buf
, u32 len
, u32 offset
,
3356 bfa_cb_flash cbfn
, void *cbarg
)
3358 if (!bfa_nw_ioc_is_operational(flash
->ioc
))
3359 return BFA_STATUS_IOC_NON_OP
;
3362 * 'len' must be in word (4-byte) boundary
3364 if (!len
|| (len
& 0x03))
3365 return BFA_STATUS_FLASH_BAD_LEN
;
3368 return BFA_STATUS_DEVBUSY
;
3372 flash
->cbarg
= cbarg
;
3374 flash
->instance
= instance
;
3375 flash
->residue
= len
;
3377 flash
->addr_off
= offset
;
3380 bfa_flash_read_send(flash
);
3382 return BFA_STATUS_OK
;