2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
23 #include "bfa_defs_svc.h"
25 BFA_TRC_FILE(CNA
, IOC
);
28 * IOC local definitions
30 #define BFA_IOC_TOV 3000 /* msecs */
31 #define BFA_IOC_HWSEM_TOV 500 /* msecs */
32 #define BFA_IOC_HB_TOV 500 /* msecs */
33 #define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV
34 #define BFA_IOC_POLL_TOV BFA_TIMER_FREQ
36 #define bfa_ioc_timer_start(__ioc) \
37 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
38 bfa_ioc_timeout, (__ioc), BFA_IOC_TOV)
39 #define bfa_ioc_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
41 #define bfa_hb_timer_start(__ioc) \
42 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->hb_timer, \
43 bfa_ioc_hb_check, (__ioc), BFA_IOC_HB_TOV)
44 #define bfa_hb_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->hb_timer)
46 #define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
49 * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
52 #define bfa_ioc_firmware_lock(__ioc) \
53 ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
54 #define bfa_ioc_firmware_unlock(__ioc) \
55 ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
56 #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
57 #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
58 #define bfa_ioc_notify_fail(__ioc) \
59 ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
60 #define bfa_ioc_sync_start(__ioc) \
61 ((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
62 #define bfa_ioc_sync_join(__ioc) \
63 ((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
64 #define bfa_ioc_sync_leave(__ioc) \
65 ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
66 #define bfa_ioc_sync_ack(__ioc) \
67 ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
68 #define bfa_ioc_sync_complete(__ioc) \
69 ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
71 #define bfa_ioc_mbox_cmd_pending(__ioc) \
72 (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
73 readl((__ioc)->ioc_regs.hfn_mbox_cmd))
75 bfa_boolean_t bfa_auto_recover
= BFA_TRUE
;
78 * forward declarations
80 static void bfa_ioc_hw_sem_get(struct bfa_ioc_s
*ioc
);
81 static void bfa_ioc_hwinit(struct bfa_ioc_s
*ioc
, bfa_boolean_t force
);
82 static void bfa_ioc_timeout(void *ioc
);
83 static void bfa_ioc_poll_fwinit(struct bfa_ioc_s
*ioc
);
84 static void bfa_ioc_send_enable(struct bfa_ioc_s
*ioc
);
85 static void bfa_ioc_send_disable(struct bfa_ioc_s
*ioc
);
86 static void bfa_ioc_send_getattr(struct bfa_ioc_s
*ioc
);
87 static void bfa_ioc_hb_monitor(struct bfa_ioc_s
*ioc
);
88 static void bfa_ioc_mbox_poll(struct bfa_ioc_s
*ioc
);
89 static void bfa_ioc_mbox_flush(struct bfa_ioc_s
*ioc
);
90 static void bfa_ioc_recover(struct bfa_ioc_s
*ioc
);
91 static void bfa_ioc_check_attr_wwns(struct bfa_ioc_s
*ioc
);
92 static void bfa_ioc_event_notify(struct bfa_ioc_s
*ioc
,
93 enum bfa_ioc_event_e event
);
94 static void bfa_ioc_disable_comp(struct bfa_ioc_s
*ioc
);
95 static void bfa_ioc_lpu_stop(struct bfa_ioc_s
*ioc
);
96 static void bfa_ioc_debug_save_ftrc(struct bfa_ioc_s
*ioc
);
97 static void bfa_ioc_fail_notify(struct bfa_ioc_s
*ioc
);
98 static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s
*ioc
);
102 * IOC state machine definitions/declarations
105 IOC_E_RESET
= 1, /* IOC reset request */
106 IOC_E_ENABLE
= 2, /* IOC enable request */
107 IOC_E_DISABLE
= 3, /* IOC disable request */
108 IOC_E_DETACH
= 4, /* driver detach cleanup */
109 IOC_E_ENABLED
= 5, /* f/w enabled */
110 IOC_E_FWRSP_GETATTR
= 6, /* IOC get attribute response */
111 IOC_E_DISABLED
= 7, /* f/w disabled */
112 IOC_E_PFFAILED
= 8, /* failure notice by iocpf sm */
113 IOC_E_HBFAIL
= 9, /* heartbeat failure */
114 IOC_E_HWERROR
= 10, /* hardware error interrupt */
115 IOC_E_TIMEOUT
= 11, /* timeout */
116 IOC_E_HWFAILED
= 12, /* PCI mapping failure notice */
117 IOC_E_FWRSP_ACQ_ADDR
= 13, /* Acquiring address */
120 bfa_fsm_state_decl(bfa_ioc
, uninit
, struct bfa_ioc_s
, enum ioc_event
);
121 bfa_fsm_state_decl(bfa_ioc
, reset
, struct bfa_ioc_s
, enum ioc_event
);
122 bfa_fsm_state_decl(bfa_ioc
, enabling
, struct bfa_ioc_s
, enum ioc_event
);
123 bfa_fsm_state_decl(bfa_ioc
, getattr
, struct bfa_ioc_s
, enum ioc_event
);
124 bfa_fsm_state_decl(bfa_ioc
, op
, struct bfa_ioc_s
, enum ioc_event
);
125 bfa_fsm_state_decl(bfa_ioc
, fail_retry
, struct bfa_ioc_s
, enum ioc_event
);
126 bfa_fsm_state_decl(bfa_ioc
, fail
, struct bfa_ioc_s
, enum ioc_event
);
127 bfa_fsm_state_decl(bfa_ioc
, disabling
, struct bfa_ioc_s
, enum ioc_event
);
128 bfa_fsm_state_decl(bfa_ioc
, disabled
, struct bfa_ioc_s
, enum ioc_event
);
129 bfa_fsm_state_decl(bfa_ioc
, hwfail
, struct bfa_ioc_s
, enum ioc_event
);
130 bfa_fsm_state_decl(bfa_ioc
, acq_addr
, struct bfa_ioc_s
, enum ioc_event
);
132 static struct bfa_sm_table_s ioc_sm_table
[] = {
133 {BFA_SM(bfa_ioc_sm_uninit
), BFA_IOC_UNINIT
},
134 {BFA_SM(bfa_ioc_sm_reset
), BFA_IOC_RESET
},
135 {BFA_SM(bfa_ioc_sm_enabling
), BFA_IOC_ENABLING
},
136 {BFA_SM(bfa_ioc_sm_getattr
), BFA_IOC_GETATTR
},
137 {BFA_SM(bfa_ioc_sm_op
), BFA_IOC_OPERATIONAL
},
138 {BFA_SM(bfa_ioc_sm_fail_retry
), BFA_IOC_INITFAIL
},
139 {BFA_SM(bfa_ioc_sm_fail
), BFA_IOC_FAIL
},
140 {BFA_SM(bfa_ioc_sm_disabling
), BFA_IOC_DISABLING
},
141 {BFA_SM(bfa_ioc_sm_disabled
), BFA_IOC_DISABLED
},
142 {BFA_SM(bfa_ioc_sm_hwfail
), BFA_IOC_HWFAIL
},
143 {BFA_SM(bfa_ioc_sm_acq_addr
), BFA_IOC_ACQ_ADDR
},
147 * IOCPF state machine definitions/declarations
150 #define bfa_iocpf_timer_start(__ioc) \
151 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
152 bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV)
153 #define bfa_iocpf_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
155 #define bfa_iocpf_poll_timer_start(__ioc) \
156 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
157 bfa_iocpf_poll_timeout, (__ioc), BFA_IOC_POLL_TOV)
159 #define bfa_sem_timer_start(__ioc) \
160 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->sem_timer, \
161 bfa_iocpf_sem_timeout, (__ioc), BFA_IOC_HWSEM_TOV)
162 #define bfa_sem_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->sem_timer)
165 * Forward declareations for iocpf state machine
167 static void bfa_iocpf_timeout(void *ioc_arg
);
168 static void bfa_iocpf_sem_timeout(void *ioc_arg
);
169 static void bfa_iocpf_poll_timeout(void *ioc_arg
);
172 * IOCPF state machine events
175 IOCPF_E_ENABLE
= 1, /* IOCPF enable request */
176 IOCPF_E_DISABLE
= 2, /* IOCPF disable request */
177 IOCPF_E_STOP
= 3, /* stop on driver detach */
178 IOCPF_E_FWREADY
= 4, /* f/w initialization done */
179 IOCPF_E_FWRSP_ENABLE
= 5, /* enable f/w response */
180 IOCPF_E_FWRSP_DISABLE
= 6, /* disable f/w response */
181 IOCPF_E_FAIL
= 7, /* failure notice by ioc sm */
182 IOCPF_E_INITFAIL
= 8, /* init fail notice by ioc sm */
183 IOCPF_E_GETATTRFAIL
= 9, /* init fail notice by ioc sm */
184 IOCPF_E_SEMLOCKED
= 10, /* h/w semaphore is locked */
185 IOCPF_E_TIMEOUT
= 11, /* f/w response timeout */
186 IOCPF_E_SEM_ERROR
= 12, /* h/w sem mapping error */
192 enum bfa_iocpf_state
{
193 BFA_IOCPF_RESET
= 1, /* IOC is in reset state */
194 BFA_IOCPF_SEMWAIT
= 2, /* Waiting for IOC h/w semaphore */
195 BFA_IOCPF_HWINIT
= 3, /* IOC h/w is being initialized */
196 BFA_IOCPF_READY
= 4, /* IOCPF is initialized */
197 BFA_IOCPF_INITFAIL
= 5, /* IOCPF failed */
198 BFA_IOCPF_FAIL
= 6, /* IOCPF failed */
199 BFA_IOCPF_DISABLING
= 7, /* IOCPF is being disabled */
200 BFA_IOCPF_DISABLED
= 8, /* IOCPF is disabled */
201 BFA_IOCPF_FWMISMATCH
= 9, /* IOC f/w different from drivers */
204 bfa_fsm_state_decl(bfa_iocpf
, reset
, struct bfa_iocpf_s
, enum iocpf_event
);
205 bfa_fsm_state_decl(bfa_iocpf
, fwcheck
, struct bfa_iocpf_s
, enum iocpf_event
);
206 bfa_fsm_state_decl(bfa_iocpf
, mismatch
, struct bfa_iocpf_s
, enum iocpf_event
);
207 bfa_fsm_state_decl(bfa_iocpf
, semwait
, struct bfa_iocpf_s
, enum iocpf_event
);
208 bfa_fsm_state_decl(bfa_iocpf
, hwinit
, struct bfa_iocpf_s
, enum iocpf_event
);
209 bfa_fsm_state_decl(bfa_iocpf
, enabling
, struct bfa_iocpf_s
, enum iocpf_event
);
210 bfa_fsm_state_decl(bfa_iocpf
, ready
, struct bfa_iocpf_s
, enum iocpf_event
);
211 bfa_fsm_state_decl(bfa_iocpf
, initfail_sync
, struct bfa_iocpf_s
,
213 bfa_fsm_state_decl(bfa_iocpf
, initfail
, struct bfa_iocpf_s
, enum iocpf_event
);
214 bfa_fsm_state_decl(bfa_iocpf
, fail_sync
, struct bfa_iocpf_s
, enum iocpf_event
);
215 bfa_fsm_state_decl(bfa_iocpf
, fail
, struct bfa_iocpf_s
, enum iocpf_event
);
216 bfa_fsm_state_decl(bfa_iocpf
, disabling
, struct bfa_iocpf_s
, enum iocpf_event
);
217 bfa_fsm_state_decl(bfa_iocpf
, disabling_sync
, struct bfa_iocpf_s
,
219 bfa_fsm_state_decl(bfa_iocpf
, disabled
, struct bfa_iocpf_s
, enum iocpf_event
);
221 static struct bfa_sm_table_s iocpf_sm_table
[] = {
222 {BFA_SM(bfa_iocpf_sm_reset
), BFA_IOCPF_RESET
},
223 {BFA_SM(bfa_iocpf_sm_fwcheck
), BFA_IOCPF_FWMISMATCH
},
224 {BFA_SM(bfa_iocpf_sm_mismatch
), BFA_IOCPF_FWMISMATCH
},
225 {BFA_SM(bfa_iocpf_sm_semwait
), BFA_IOCPF_SEMWAIT
},
226 {BFA_SM(bfa_iocpf_sm_hwinit
), BFA_IOCPF_HWINIT
},
227 {BFA_SM(bfa_iocpf_sm_enabling
), BFA_IOCPF_HWINIT
},
228 {BFA_SM(bfa_iocpf_sm_ready
), BFA_IOCPF_READY
},
229 {BFA_SM(bfa_iocpf_sm_initfail_sync
), BFA_IOCPF_INITFAIL
},
230 {BFA_SM(bfa_iocpf_sm_initfail
), BFA_IOCPF_INITFAIL
},
231 {BFA_SM(bfa_iocpf_sm_fail_sync
), BFA_IOCPF_FAIL
},
232 {BFA_SM(bfa_iocpf_sm_fail
), BFA_IOCPF_FAIL
},
233 {BFA_SM(bfa_iocpf_sm_disabling
), BFA_IOCPF_DISABLING
},
234 {BFA_SM(bfa_iocpf_sm_disabling_sync
), BFA_IOCPF_DISABLING
},
235 {BFA_SM(bfa_iocpf_sm_disabled
), BFA_IOCPF_DISABLED
},
243 * Beginning state. IOC uninit state.
247 bfa_ioc_sm_uninit_entry(struct bfa_ioc_s
*ioc
)
252 * IOC is in uninit state.
255 bfa_ioc_sm_uninit(struct bfa_ioc_s
*ioc
, enum ioc_event event
)
261 bfa_fsm_set_state(ioc
, bfa_ioc_sm_reset
);
265 bfa_sm_fault(ioc
, event
);
269 * Reset entry actions -- initialize state machine
272 bfa_ioc_sm_reset_entry(struct bfa_ioc_s
*ioc
)
274 bfa_fsm_set_state(&ioc
->iocpf
, bfa_iocpf_sm_reset
);
278 * IOC is in reset state.
281 bfa_ioc_sm_reset(struct bfa_ioc_s
*ioc
, enum ioc_event event
)
287 bfa_fsm_set_state(ioc
, bfa_ioc_sm_enabling
);
291 bfa_ioc_disable_comp(ioc
);
295 bfa_fsm_set_state(ioc
, bfa_ioc_sm_uninit
);
299 bfa_sm_fault(ioc
, event
);
305 bfa_ioc_sm_enabling_entry(struct bfa_ioc_s
*ioc
)
307 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_ENABLE
);
311 * Host IOC function is being enabled, awaiting response from firmware.
312 * Semaphore is acquired.
315 bfa_ioc_sm_enabling(struct bfa_ioc_s
*ioc
, enum ioc_event event
)
321 bfa_fsm_set_state(ioc
, bfa_ioc_sm_getattr
);
325 /* !!! fall through !!! */
327 ioc
->cbfn
->enable_cbfn(ioc
->bfa
, BFA_STATUS_IOC_FAILURE
);
328 bfa_fsm_set_state(ioc
, bfa_ioc_sm_fail
);
329 if (event
!= IOC_E_PFFAILED
)
330 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_INITFAIL
);
334 ioc
->cbfn
->enable_cbfn(ioc
->bfa
, BFA_STATUS_IOC_FAILURE
);
335 bfa_fsm_set_state(ioc
, bfa_ioc_sm_hwfail
);
339 bfa_fsm_set_state(ioc
, bfa_ioc_sm_disabling
);
343 bfa_fsm_set_state(ioc
, bfa_ioc_sm_uninit
);
344 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_STOP
);
351 bfa_sm_fault(ioc
, event
);
357 bfa_ioc_sm_getattr_entry(struct bfa_ioc_s
*ioc
)
359 bfa_ioc_timer_start(ioc
);
360 bfa_ioc_send_getattr(ioc
);
364 * IOC configuration in progress. Timer is active.
367 bfa_ioc_sm_getattr(struct bfa_ioc_s
*ioc
, enum ioc_event event
)
372 case IOC_E_FWRSP_GETATTR
:
373 bfa_ioc_timer_stop(ioc
);
374 bfa_ioc_check_attr_wwns(ioc
);
375 bfa_ioc_hb_monitor(ioc
);
376 bfa_fsm_set_state(ioc
, bfa_ioc_sm_op
);
379 case IOC_E_FWRSP_ACQ_ADDR
:
380 bfa_ioc_timer_stop(ioc
);
381 bfa_ioc_hb_monitor(ioc
);
382 bfa_fsm_set_state(ioc
, bfa_ioc_sm_acq_addr
);
387 bfa_ioc_timer_stop(ioc
);
388 /* !!! fall through !!! */
390 ioc
->cbfn
->enable_cbfn(ioc
->bfa
, BFA_STATUS_IOC_FAILURE
);
391 bfa_fsm_set_state(ioc
, bfa_ioc_sm_fail
);
392 if (event
!= IOC_E_PFFAILED
)
393 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_GETATTRFAIL
);
397 bfa_ioc_timer_stop(ioc
);
398 bfa_fsm_set_state(ioc
, bfa_ioc_sm_disabling
);
405 bfa_sm_fault(ioc
, event
);
410 * Acquiring address from fabric (entry function)
413 bfa_ioc_sm_acq_addr_entry(struct bfa_ioc_s
*ioc
)
418 * Acquiring address from the fabric
421 bfa_ioc_sm_acq_addr(struct bfa_ioc_s
*ioc
, enum ioc_event event
)
426 case IOC_E_FWRSP_GETATTR
:
427 bfa_ioc_check_attr_wwns(ioc
);
428 bfa_fsm_set_state(ioc
, bfa_ioc_sm_op
);
433 bfa_hb_timer_stop(ioc
);
435 ioc
->cbfn
->enable_cbfn(ioc
->bfa
, BFA_STATUS_IOC_FAILURE
);
436 bfa_fsm_set_state(ioc
, bfa_ioc_sm_fail
);
437 if (event
!= IOC_E_PFFAILED
)
438 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_GETATTRFAIL
);
442 bfa_hb_timer_stop(ioc
);
443 bfa_fsm_set_state(ioc
, bfa_ioc_sm_disabling
);
450 bfa_sm_fault(ioc
, event
);
455 bfa_ioc_sm_op_entry(struct bfa_ioc_s
*ioc
)
457 struct bfad_s
*bfad
= (struct bfad_s
*)ioc
->bfa
->bfad
;
459 ioc
->cbfn
->enable_cbfn(ioc
->bfa
, BFA_STATUS_OK
);
460 bfa_ioc_event_notify(ioc
, BFA_IOC_E_ENABLED
);
461 BFA_LOG(KERN_INFO
, bfad
, bfa_log_level
, "IOC enabled\n");
462 bfa_ioc_aen_post(ioc
, BFA_IOC_AEN_ENABLE
);
466 bfa_ioc_sm_op(struct bfa_ioc_s
*ioc
, enum ioc_event event
)
475 bfa_hb_timer_stop(ioc
);
476 bfa_fsm_set_state(ioc
, bfa_ioc_sm_disabling
);
481 bfa_hb_timer_stop(ioc
);
482 /* !!! fall through !!! */
484 if (ioc
->iocpf
.auto_recover
)
485 bfa_fsm_set_state(ioc
, bfa_ioc_sm_fail_retry
);
487 bfa_fsm_set_state(ioc
, bfa_ioc_sm_fail
);
489 bfa_ioc_fail_notify(ioc
);
491 if (event
!= IOC_E_PFFAILED
)
492 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_FAIL
);
496 bfa_sm_fault(ioc
, event
);
502 bfa_ioc_sm_disabling_entry(struct bfa_ioc_s
*ioc
)
504 struct bfad_s
*bfad
= (struct bfad_s
*)ioc
->bfa
->bfad
;
505 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_DISABLE
);
506 BFA_LOG(KERN_INFO
, bfad
, bfa_log_level
, "IOC disabled\n");
507 bfa_ioc_aen_post(ioc
, BFA_IOC_AEN_DISABLE
);
511 * IOC is being disabled
514 bfa_ioc_sm_disabling(struct bfa_ioc_s
*ioc
, enum ioc_event event
)
520 bfa_fsm_set_state(ioc
, bfa_ioc_sm_disabled
);
525 * No state change. Will move to disabled state
526 * after iocpf sm completes failure processing and
527 * moves to disabled state.
529 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_FAIL
);
533 bfa_fsm_set_state(ioc
, bfa_ioc_sm_hwfail
);
534 bfa_ioc_disable_comp(ioc
);
538 bfa_sm_fault(ioc
, event
);
543 * IOC disable completion entry.
546 bfa_ioc_sm_disabled_entry(struct bfa_ioc_s
*ioc
)
548 bfa_ioc_disable_comp(ioc
);
552 bfa_ioc_sm_disabled(struct bfa_ioc_s
*ioc
, enum ioc_event event
)
558 bfa_fsm_set_state(ioc
, bfa_ioc_sm_enabling
);
562 ioc
->cbfn
->disable_cbfn(ioc
->bfa
);
566 bfa_fsm_set_state(ioc
, bfa_ioc_sm_uninit
);
567 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_STOP
);
571 bfa_sm_fault(ioc
, event
);
577 bfa_ioc_sm_fail_retry_entry(struct bfa_ioc_s
*ioc
)
583 * Hardware initialization retry.
586 bfa_ioc_sm_fail_retry(struct bfa_ioc_s
*ioc
, enum ioc_event event
)
592 bfa_fsm_set_state(ioc
, bfa_ioc_sm_getattr
);
598 * Initialization retry failed.
600 ioc
->cbfn
->enable_cbfn(ioc
->bfa
, BFA_STATUS_IOC_FAILURE
);
601 bfa_fsm_set_state(ioc
, bfa_ioc_sm_fail
);
602 if (event
!= IOC_E_PFFAILED
)
603 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_INITFAIL
);
607 ioc
->cbfn
->enable_cbfn(ioc
->bfa
, BFA_STATUS_IOC_FAILURE
);
608 bfa_fsm_set_state(ioc
, bfa_ioc_sm_hwfail
);
615 bfa_fsm_set_state(ioc
, bfa_ioc_sm_disabling
);
619 bfa_fsm_set_state(ioc
, bfa_ioc_sm_uninit
);
620 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_STOP
);
624 bfa_sm_fault(ioc
, event
);
630 bfa_ioc_sm_fail_entry(struct bfa_ioc_s
*ioc
)
639 bfa_ioc_sm_fail(struct bfa_ioc_s
*ioc
, enum ioc_event event
)
646 ioc
->cbfn
->enable_cbfn(ioc
->bfa
, BFA_STATUS_IOC_FAILURE
);
650 bfa_fsm_set_state(ioc
, bfa_ioc_sm_disabling
);
654 bfa_fsm_set_state(ioc
, bfa_ioc_sm_uninit
);
655 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_STOP
);
660 * HB failure notification, ignore.
664 bfa_sm_fault(ioc
, event
);
669 bfa_ioc_sm_hwfail_entry(struct bfa_ioc_s
*ioc
)
675 bfa_ioc_sm_hwfail(struct bfa_ioc_s
*ioc
, enum ioc_event event
)
681 ioc
->cbfn
->enable_cbfn(ioc
->bfa
, BFA_STATUS_IOC_FAILURE
);
685 ioc
->cbfn
->disable_cbfn(ioc
->bfa
);
689 bfa_fsm_set_state(ioc
, bfa_ioc_sm_uninit
);
693 bfa_sm_fault(ioc
, event
);
698 * IOCPF State Machine
702 * Reset entry actions -- initialize state machine
705 bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s
*iocpf
)
707 iocpf
->fw_mismatch_notified
= BFA_FALSE
;
708 iocpf
->auto_recover
= bfa_auto_recover
;
712 * Beginning state. IOC is in reset state.
715 bfa_iocpf_sm_reset(struct bfa_iocpf_s
*iocpf
, enum iocpf_event event
)
717 struct bfa_ioc_s
*ioc
= iocpf
->ioc
;
723 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_fwcheck
);
730 bfa_sm_fault(ioc
, event
);
735 * Semaphore should be acquired for version check.
738 bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s
*iocpf
)
740 struct bfi_ioc_image_hdr_s fwhdr
;
741 u32 fwstate
= readl(iocpf
->ioc
->ioc_regs
.ioc_fwstate
);
744 if (fwstate
== BFI_IOC_UNINIT
)
747 bfa_ioc_fwver_get(iocpf
->ioc
, &fwhdr
);
749 if (swab32(fwhdr
.exec
) == BFI_FWBOOT_TYPE_NORMAL
)
752 bfa_trc(iocpf
->ioc
, fwstate
);
753 bfa_trc(iocpf
->ioc
, fwhdr
.exec
);
754 writel(BFI_IOC_UNINIT
, iocpf
->ioc
->ioc_regs
.ioc_fwstate
);
757 * Try to lock and then unlock the semaphore.
759 readl(iocpf
->ioc
->ioc_regs
.ioc_sem_reg
);
760 writel(1, iocpf
->ioc
->ioc_regs
.ioc_sem_reg
);
762 bfa_ioc_hw_sem_get(iocpf
->ioc
);
766 * Awaiting h/w semaphore to continue with version check.
769 bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s
*iocpf
, enum iocpf_event event
)
771 struct bfa_ioc_s
*ioc
= iocpf
->ioc
;
776 case IOCPF_E_SEMLOCKED
:
777 if (bfa_ioc_firmware_lock(ioc
)) {
778 if (bfa_ioc_sync_start(ioc
)) {
779 bfa_ioc_sync_join(ioc
);
780 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_hwinit
);
782 bfa_ioc_firmware_unlock(ioc
);
783 writel(1, ioc
->ioc_regs
.ioc_sem_reg
);
784 bfa_sem_timer_start(ioc
);
787 writel(1, ioc
->ioc_regs
.ioc_sem_reg
);
788 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_mismatch
);
792 case IOCPF_E_SEM_ERROR
:
793 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_fail
);
794 bfa_fsm_send_event(ioc
, IOC_E_HWFAILED
);
797 case IOCPF_E_DISABLE
:
798 bfa_sem_timer_stop(ioc
);
799 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_reset
);
800 bfa_fsm_send_event(ioc
, IOC_E_DISABLED
);
804 bfa_sem_timer_stop(ioc
);
805 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_reset
);
809 bfa_sm_fault(ioc
, event
);
814 * Notify enable completion callback.
817 bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s
*iocpf
)
820 * Call only the first time sm enters fwmismatch state.
822 if (iocpf
->fw_mismatch_notified
== BFA_FALSE
)
823 bfa_ioc_pf_fwmismatch(iocpf
->ioc
);
825 iocpf
->fw_mismatch_notified
= BFA_TRUE
;
826 bfa_iocpf_timer_start(iocpf
->ioc
);
830 * Awaiting firmware version match.
833 bfa_iocpf_sm_mismatch(struct bfa_iocpf_s
*iocpf
, enum iocpf_event event
)
835 struct bfa_ioc_s
*ioc
= iocpf
->ioc
;
840 case IOCPF_E_TIMEOUT
:
841 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_fwcheck
);
844 case IOCPF_E_DISABLE
:
845 bfa_iocpf_timer_stop(ioc
);
846 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_reset
);
847 bfa_fsm_send_event(ioc
, IOC_E_DISABLED
);
851 bfa_iocpf_timer_stop(ioc
);
852 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_reset
);
856 bfa_sm_fault(ioc
, event
);
861 * Request for semaphore.
864 bfa_iocpf_sm_semwait_entry(struct bfa_iocpf_s
*iocpf
)
866 bfa_ioc_hw_sem_get(iocpf
->ioc
);
870 * Awaiting semaphore for h/w initialzation.
873 bfa_iocpf_sm_semwait(struct bfa_iocpf_s
*iocpf
, enum iocpf_event event
)
875 struct bfa_ioc_s
*ioc
= iocpf
->ioc
;
880 case IOCPF_E_SEMLOCKED
:
881 if (bfa_ioc_sync_complete(ioc
)) {
882 bfa_ioc_sync_join(ioc
);
883 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_hwinit
);
885 writel(1, ioc
->ioc_regs
.ioc_sem_reg
);
886 bfa_sem_timer_start(ioc
);
890 case IOCPF_E_SEM_ERROR
:
891 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_fail
);
892 bfa_fsm_send_event(ioc
, IOC_E_HWFAILED
);
895 case IOCPF_E_DISABLE
:
896 bfa_sem_timer_stop(ioc
);
897 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_disabling_sync
);
901 bfa_sm_fault(ioc
, event
);
906 bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s
*iocpf
)
908 iocpf
->poll_time
= 0;
909 bfa_ioc_hwinit(iocpf
->ioc
, BFA_FALSE
);
913 * Hardware is being initialized. Interrupts are enabled.
914 * Holding hardware semaphore lock.
917 bfa_iocpf_sm_hwinit(struct bfa_iocpf_s
*iocpf
, enum iocpf_event event
)
919 struct bfa_ioc_s
*ioc
= iocpf
->ioc
;
924 case IOCPF_E_FWREADY
:
925 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_enabling
);
928 case IOCPF_E_TIMEOUT
:
929 writel(1, ioc
->ioc_regs
.ioc_sem_reg
);
930 bfa_fsm_send_event(ioc
, IOC_E_PFFAILED
);
931 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_initfail_sync
);
934 case IOCPF_E_DISABLE
:
935 bfa_iocpf_timer_stop(ioc
);
936 bfa_ioc_sync_leave(ioc
);
937 writel(1, ioc
->ioc_regs
.ioc_sem_reg
);
938 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_disabled
);
942 bfa_sm_fault(ioc
, event
);
947 bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s
*iocpf
)
949 bfa_iocpf_timer_start(iocpf
->ioc
);
951 * Enable Interrupts before sending fw IOC ENABLE cmd.
953 iocpf
->ioc
->cbfn
->reset_cbfn(iocpf
->ioc
->bfa
);
954 bfa_ioc_send_enable(iocpf
->ioc
);
958 * Host IOC function is being enabled, awaiting response from firmware.
959 * Semaphore is acquired.
962 bfa_iocpf_sm_enabling(struct bfa_iocpf_s
*iocpf
, enum iocpf_event event
)
964 struct bfa_ioc_s
*ioc
= iocpf
->ioc
;
969 case IOCPF_E_FWRSP_ENABLE
:
970 bfa_iocpf_timer_stop(ioc
);
971 writel(1, ioc
->ioc_regs
.ioc_sem_reg
);
972 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_ready
);
975 case IOCPF_E_INITFAIL
:
976 bfa_iocpf_timer_stop(ioc
);
978 * !!! fall through !!!
981 case IOCPF_E_TIMEOUT
:
982 writel(1, ioc
->ioc_regs
.ioc_sem_reg
);
983 if (event
== IOCPF_E_TIMEOUT
)
984 bfa_fsm_send_event(ioc
, IOC_E_PFFAILED
);
985 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_initfail_sync
);
988 case IOCPF_E_DISABLE
:
989 bfa_iocpf_timer_stop(ioc
);
990 writel(1, ioc
->ioc_regs
.ioc_sem_reg
);
991 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_disabling
);
995 bfa_sm_fault(ioc
, event
);
1000 bfa_iocpf_sm_ready_entry(struct bfa_iocpf_s
*iocpf
)
1002 bfa_fsm_send_event(iocpf
->ioc
, IOC_E_ENABLED
);
1006 bfa_iocpf_sm_ready(struct bfa_iocpf_s
*iocpf
, enum iocpf_event event
)
1008 struct bfa_ioc_s
*ioc
= iocpf
->ioc
;
1010 bfa_trc(ioc
, event
);
1013 case IOCPF_E_DISABLE
:
1014 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_disabling
);
1017 case IOCPF_E_GETATTRFAIL
:
1018 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_initfail_sync
);
1022 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_fail_sync
);
1026 bfa_sm_fault(ioc
, event
);
1031 bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s
*iocpf
)
1033 bfa_iocpf_timer_start(iocpf
->ioc
);
1034 bfa_ioc_send_disable(iocpf
->ioc
);
1038 * IOC is being disabled
1041 bfa_iocpf_sm_disabling(struct bfa_iocpf_s
*iocpf
, enum iocpf_event event
)
1043 struct bfa_ioc_s
*ioc
= iocpf
->ioc
;
1045 bfa_trc(ioc
, event
);
1048 case IOCPF_E_FWRSP_DISABLE
:
1049 bfa_iocpf_timer_stop(ioc
);
1050 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_disabling_sync
);
1054 bfa_iocpf_timer_stop(ioc
);
1056 * !!! fall through !!!
1059 case IOCPF_E_TIMEOUT
:
1060 writel(BFI_IOC_FAIL
, ioc
->ioc_regs
.ioc_fwstate
);
1061 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_disabling_sync
);
1064 case IOCPF_E_FWRSP_ENABLE
:
1068 bfa_sm_fault(ioc
, event
);
1073 bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf_s
*iocpf
)
1075 bfa_ioc_hw_sem_get(iocpf
->ioc
);
1079 * IOC hb ack request is being removed.
1082 bfa_iocpf_sm_disabling_sync(struct bfa_iocpf_s
*iocpf
, enum iocpf_event event
)
1084 struct bfa_ioc_s
*ioc
= iocpf
->ioc
;
1086 bfa_trc(ioc
, event
);
1089 case IOCPF_E_SEMLOCKED
:
1090 bfa_ioc_sync_leave(ioc
);
1091 writel(1, ioc
->ioc_regs
.ioc_sem_reg
);
1092 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_disabled
);
1095 case IOCPF_E_SEM_ERROR
:
1096 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_fail
);
1097 bfa_fsm_send_event(ioc
, IOC_E_HWFAILED
);
1104 bfa_sm_fault(ioc
, event
);
1109 * IOC disable completion entry.
1112 bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s
*iocpf
)
1114 bfa_ioc_mbox_flush(iocpf
->ioc
);
1115 bfa_fsm_send_event(iocpf
->ioc
, IOC_E_DISABLED
);
1119 bfa_iocpf_sm_disabled(struct bfa_iocpf_s
*iocpf
, enum iocpf_event event
)
1121 struct bfa_ioc_s
*ioc
= iocpf
->ioc
;
1123 bfa_trc(ioc
, event
);
1126 case IOCPF_E_ENABLE
:
1127 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_semwait
);
1131 bfa_ioc_firmware_unlock(ioc
);
1132 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_reset
);
1136 bfa_sm_fault(ioc
, event
);
1141 bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf_s
*iocpf
)
1143 bfa_ioc_debug_save_ftrc(iocpf
->ioc
);
1144 bfa_ioc_hw_sem_get(iocpf
->ioc
);
1148 * Hardware initialization failed.
1151 bfa_iocpf_sm_initfail_sync(struct bfa_iocpf_s
*iocpf
, enum iocpf_event event
)
1153 struct bfa_ioc_s
*ioc
= iocpf
->ioc
;
1155 bfa_trc(ioc
, event
);
1158 case IOCPF_E_SEMLOCKED
:
1159 bfa_ioc_notify_fail(ioc
);
1160 bfa_ioc_sync_leave(ioc
);
1161 writel(BFI_IOC_FAIL
, ioc
->ioc_regs
.ioc_fwstate
);
1162 writel(1, ioc
->ioc_regs
.ioc_sem_reg
);
1163 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_initfail
);
1166 case IOCPF_E_SEM_ERROR
:
1167 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_fail
);
1168 bfa_fsm_send_event(ioc
, IOC_E_HWFAILED
);
1171 case IOCPF_E_DISABLE
:
1172 bfa_sem_timer_stop(ioc
);
1173 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_disabling_sync
);
1177 bfa_sem_timer_stop(ioc
);
1178 bfa_ioc_firmware_unlock(ioc
);
1179 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_reset
);
1186 bfa_sm_fault(ioc
, event
);
1191 bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s
*iocpf
)
1193 bfa_trc(iocpf
->ioc
, 0);
1197 * Hardware initialization failed.
1200 bfa_iocpf_sm_initfail(struct bfa_iocpf_s
*iocpf
, enum iocpf_event event
)
1202 struct bfa_ioc_s
*ioc
= iocpf
->ioc
;
1204 bfa_trc(ioc
, event
);
1207 case IOCPF_E_DISABLE
:
1208 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_disabled
);
1212 bfa_ioc_firmware_unlock(ioc
);
1213 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_reset
);
1217 bfa_sm_fault(ioc
, event
);
1222 bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf_s
*iocpf
)
1225 * Mark IOC as failed in hardware and stop firmware.
1227 bfa_ioc_lpu_stop(iocpf
->ioc
);
1230 * Flush any queued up mailbox requests.
1232 bfa_ioc_mbox_flush(iocpf
->ioc
);
1234 bfa_ioc_hw_sem_get(iocpf
->ioc
);
1238 bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s
*iocpf
, enum iocpf_event event
)
1240 struct bfa_ioc_s
*ioc
= iocpf
->ioc
;
1242 bfa_trc(ioc
, event
);
1245 case IOCPF_E_SEMLOCKED
:
1246 bfa_ioc_sync_ack(ioc
);
1247 bfa_ioc_notify_fail(ioc
);
1248 if (!iocpf
->auto_recover
) {
1249 bfa_ioc_sync_leave(ioc
);
1250 writel(BFI_IOC_FAIL
, ioc
->ioc_regs
.ioc_fwstate
);
1251 writel(1, ioc
->ioc_regs
.ioc_sem_reg
);
1252 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_fail
);
1254 if (bfa_ioc_sync_complete(ioc
))
1255 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_hwinit
);
1257 writel(1, ioc
->ioc_regs
.ioc_sem_reg
);
1258 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_semwait
);
1263 case IOCPF_E_SEM_ERROR
:
1264 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_fail
);
1265 bfa_fsm_send_event(ioc
, IOC_E_HWFAILED
);
1268 case IOCPF_E_DISABLE
:
1269 bfa_sem_timer_stop(ioc
);
1270 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_disabling_sync
);
1277 bfa_sm_fault(ioc
, event
);
1282 bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s
*iocpf
)
1284 bfa_trc(iocpf
->ioc
, 0);
1288 * IOC is in failed state.
1291 bfa_iocpf_sm_fail(struct bfa_iocpf_s
*iocpf
, enum iocpf_event event
)
1293 struct bfa_ioc_s
*ioc
= iocpf
->ioc
;
1295 bfa_trc(ioc
, event
);
1298 case IOCPF_E_DISABLE
:
1299 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_disabled
);
1303 bfa_sm_fault(ioc
, event
);
1308 * BFA IOC private functions
1312 * Notify common modules registered for notification.
1315 bfa_ioc_event_notify(struct bfa_ioc_s
*ioc
, enum bfa_ioc_event_e event
)
1317 struct bfa_ioc_notify_s
*notify
;
1318 struct list_head
*qe
;
1320 list_for_each(qe
, &ioc
->notify_q
) {
1321 notify
= (struct bfa_ioc_notify_s
*)qe
;
1322 notify
->cbfn(notify
->cbarg
, event
);
1327 bfa_ioc_disable_comp(struct bfa_ioc_s
*ioc
)
1329 ioc
->cbfn
->disable_cbfn(ioc
->bfa
);
1330 bfa_ioc_event_notify(ioc
, BFA_IOC_E_DISABLED
);
1334 bfa_ioc_sem_get(void __iomem
*sem_reg
)
1338 #define BFA_SEM_SPINCNT 3000
1340 r32
= readl(sem_reg
);
1342 while ((r32
& 1) && (cnt
< BFA_SEM_SPINCNT
)) {
1345 r32
= readl(sem_reg
);
1355 bfa_ioc_hw_sem_get(struct bfa_ioc_s
*ioc
)
1360 * First read to the semaphore register will return 0, subsequent reads
1361 * will return 1. Semaphore is released by writing 1 to the register
1363 r32
= readl(ioc
->ioc_regs
.ioc_sem_reg
);
1366 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_SEM_ERROR
);
1370 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_SEMLOCKED
);
1374 bfa_sem_timer_start(ioc
);
1378 * Initialize LPU local memory (aka secondary memory / SRAM)
1381 bfa_ioc_lmem_init(struct bfa_ioc_s
*ioc
)
1385 #define PSS_LMEM_INIT_TIME 10000
1387 pss_ctl
= readl(ioc
->ioc_regs
.pss_ctl_reg
);
1388 pss_ctl
&= ~__PSS_LMEM_RESET
;
1389 pss_ctl
|= __PSS_LMEM_INIT_EN
;
1392 * i2c workaround 12.5khz clock
1394 pss_ctl
|= __PSS_I2C_CLK_DIV(3UL);
1395 writel(pss_ctl
, ioc
->ioc_regs
.pss_ctl_reg
);
1398 * wait for memory initialization to be complete
1402 pss_ctl
= readl(ioc
->ioc_regs
.pss_ctl_reg
);
1404 } while (!(pss_ctl
& __PSS_LMEM_INIT_DONE
) && (i
< PSS_LMEM_INIT_TIME
));
1407 * If memory initialization is not successful, IOC timeout will catch
1410 WARN_ON(!(pss_ctl
& __PSS_LMEM_INIT_DONE
));
1411 bfa_trc(ioc
, pss_ctl
);
1413 pss_ctl
&= ~(__PSS_LMEM_INIT_DONE
| __PSS_LMEM_INIT_EN
);
1414 writel(pss_ctl
, ioc
->ioc_regs
.pss_ctl_reg
);
1418 bfa_ioc_lpu_start(struct bfa_ioc_s
*ioc
)
1423 * Take processor out of reset.
1425 pss_ctl
= readl(ioc
->ioc_regs
.pss_ctl_reg
);
1426 pss_ctl
&= ~__PSS_LPU0_RESET
;
1428 writel(pss_ctl
, ioc
->ioc_regs
.pss_ctl_reg
);
1432 bfa_ioc_lpu_stop(struct bfa_ioc_s
*ioc
)
1437 * Put processors in reset.
1439 pss_ctl
= readl(ioc
->ioc_regs
.pss_ctl_reg
);
1440 pss_ctl
|= (__PSS_LPU0_RESET
| __PSS_LPU1_RESET
);
1442 writel(pss_ctl
, ioc
->ioc_regs
.pss_ctl_reg
);
1446 * Get driver and firmware versions.
1449 bfa_ioc_fwver_get(struct bfa_ioc_s
*ioc
, struct bfi_ioc_image_hdr_s
*fwhdr
)
1454 u32
*fwsig
= (u32
*) fwhdr
;
1456 pgnum
= PSS_SMEM_PGNUM(ioc
->ioc_regs
.smem_pg0
, loff
);
1457 pgoff
= PSS_SMEM_PGOFF(loff
);
1458 writel(pgnum
, ioc
->ioc_regs
.host_page_num_fn
);
1460 for (i
= 0; i
< (sizeof(struct bfi_ioc_image_hdr_s
) / sizeof(u32
));
1463 bfa_mem_read(ioc
->ioc_regs
.smem_page_start
, loff
);
1464 loff
+= sizeof(u32
);
1469 * Returns TRUE if same.
1472 bfa_ioc_fwver_cmp(struct bfa_ioc_s
*ioc
, struct bfi_ioc_image_hdr_s
*fwhdr
)
1474 struct bfi_ioc_image_hdr_s
*drv_fwhdr
;
1477 drv_fwhdr
= (struct bfi_ioc_image_hdr_s
*)
1478 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc
), 0);
1480 for (i
= 0; i
< BFI_IOC_MD5SUM_SZ
; i
++) {
1481 if (fwhdr
->md5sum
[i
] != drv_fwhdr
->md5sum
[i
]) {
1483 bfa_trc(ioc
, fwhdr
->md5sum
[i
]);
1484 bfa_trc(ioc
, drv_fwhdr
->md5sum
[i
]);
1489 bfa_trc(ioc
, fwhdr
->md5sum
[0]);
1494 * Return true if current running version is valid. Firmware signature and
1495 * execution context (driver/bios) must match.
1497 static bfa_boolean_t
1498 bfa_ioc_fwver_valid(struct bfa_ioc_s
*ioc
, u32 boot_env
)
1500 struct bfi_ioc_image_hdr_s fwhdr
, *drv_fwhdr
;
1502 bfa_ioc_fwver_get(ioc
, &fwhdr
);
1503 drv_fwhdr
= (struct bfi_ioc_image_hdr_s
*)
1504 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc
), 0);
1506 if (fwhdr
.signature
!= drv_fwhdr
->signature
) {
1507 bfa_trc(ioc
, fwhdr
.signature
);
1508 bfa_trc(ioc
, drv_fwhdr
->signature
);
1512 if (swab32(fwhdr
.bootenv
) != boot_env
) {
1513 bfa_trc(ioc
, fwhdr
.bootenv
);
1514 bfa_trc(ioc
, boot_env
);
1518 return bfa_ioc_fwver_cmp(ioc
, &fwhdr
);
1522 * Conditionally flush any pending message from firmware at start.
1525 bfa_ioc_msgflush(struct bfa_ioc_s
*ioc
)
1529 r32
= readl(ioc
->ioc_regs
.lpu_mbox_cmd
);
1531 writel(1, ioc
->ioc_regs
.lpu_mbox_cmd
);
1535 bfa_ioc_hwinit(struct bfa_ioc_s
*ioc
, bfa_boolean_t force
)
1537 enum bfi_ioc_state ioc_fwstate
;
1538 bfa_boolean_t fwvalid
;
1542 ioc_fwstate
= readl(ioc
->ioc_regs
.ioc_fwstate
);
1545 ioc_fwstate
= BFI_IOC_UNINIT
;
1547 bfa_trc(ioc
, ioc_fwstate
);
1549 boot_type
= BFI_FWBOOT_TYPE_NORMAL
;
1550 boot_env
= BFI_FWBOOT_ENV_OS
;
1553 * check if firmware is valid
1555 fwvalid
= (ioc_fwstate
== BFI_IOC_UNINIT
) ?
1556 BFA_FALSE
: bfa_ioc_fwver_valid(ioc
, boot_env
);
1559 bfa_ioc_boot(ioc
, boot_type
, boot_env
);
1560 bfa_ioc_poll_fwinit(ioc
);
1565 * If hardware initialization is in progress (initialized by other IOC),
1566 * just wait for an initialization completion interrupt.
1568 if (ioc_fwstate
== BFI_IOC_INITING
) {
1569 bfa_ioc_poll_fwinit(ioc
);
1574 * If IOC function is disabled and firmware version is same,
1575 * just re-enable IOC.
1577 * If option rom, IOC must not be in operational state. With
1578 * convergence, IOC will be in operational state when 2nd driver
1581 if (ioc_fwstate
== BFI_IOC_DISABLED
|| ioc_fwstate
== BFI_IOC_OP
) {
1584 * When using MSI-X any pending firmware ready event should
1585 * be flushed. Otherwise MSI-X interrupts are not delivered.
1587 bfa_ioc_msgflush(ioc
);
1588 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_FWREADY
);
1593 * Initialize the h/w for any other states.
1595 bfa_ioc_boot(ioc
, boot_type
, boot_env
);
1596 bfa_ioc_poll_fwinit(ioc
);
1600 bfa_ioc_timeout(void *ioc_arg
)
1602 struct bfa_ioc_s
*ioc
= (struct bfa_ioc_s
*) ioc_arg
;
1605 bfa_fsm_send_event(ioc
, IOC_E_TIMEOUT
);
1609 bfa_ioc_mbox_send(struct bfa_ioc_s
*ioc
, void *ioc_msg
, int len
)
1611 u32
*msgp
= (u32
*) ioc_msg
;
1614 bfa_trc(ioc
, msgp
[0]);
1617 WARN_ON(len
> BFI_IOC_MSGLEN_MAX
);
1620 * first write msg to mailbox registers
1622 for (i
= 0; i
< len
/ sizeof(u32
); i
++)
1623 writel(cpu_to_le32(msgp
[i
]),
1624 ioc
->ioc_regs
.hfn_mbox
+ i
* sizeof(u32
));
1626 for (; i
< BFI_IOC_MSGLEN_MAX
/ sizeof(u32
); i
++)
1627 writel(0, ioc
->ioc_regs
.hfn_mbox
+ i
* sizeof(u32
));
1630 * write 1 to mailbox CMD to trigger LPU event
1632 writel(1, ioc
->ioc_regs
.hfn_mbox_cmd
);
1633 (void) readl(ioc
->ioc_regs
.hfn_mbox_cmd
);
1637 bfa_ioc_send_enable(struct bfa_ioc_s
*ioc
)
1639 struct bfi_ioc_ctrl_req_s enable_req
;
1642 bfi_h2i_set(enable_req
.mh
, BFI_MC_IOC
, BFI_IOC_H2I_ENABLE_REQ
,
1643 bfa_ioc_portid(ioc
));
1644 enable_req
.clscode
= cpu_to_be16(ioc
->clscode
);
1645 do_gettimeofday(&tv
);
1646 enable_req
.tv_sec
= be32_to_cpu(tv
.tv_sec
);
1647 bfa_ioc_mbox_send(ioc
, &enable_req
, sizeof(struct bfi_ioc_ctrl_req_s
));
1651 bfa_ioc_send_disable(struct bfa_ioc_s
*ioc
)
1653 struct bfi_ioc_ctrl_req_s disable_req
;
1655 bfi_h2i_set(disable_req
.mh
, BFI_MC_IOC
, BFI_IOC_H2I_DISABLE_REQ
,
1656 bfa_ioc_portid(ioc
));
1657 bfa_ioc_mbox_send(ioc
, &disable_req
, sizeof(struct bfi_ioc_ctrl_req_s
));
1661 bfa_ioc_send_getattr(struct bfa_ioc_s
*ioc
)
1663 struct bfi_ioc_getattr_req_s attr_req
;
1665 bfi_h2i_set(attr_req
.mh
, BFI_MC_IOC
, BFI_IOC_H2I_GETATTR_REQ
,
1666 bfa_ioc_portid(ioc
));
1667 bfa_dma_be_addr_set(attr_req
.attr_addr
, ioc
->attr_dma
.pa
);
1668 bfa_ioc_mbox_send(ioc
, &attr_req
, sizeof(attr_req
));
1672 bfa_ioc_hb_check(void *cbarg
)
1674 struct bfa_ioc_s
*ioc
= cbarg
;
1677 hb_count
= readl(ioc
->ioc_regs
.heartbeat
);
1678 if (ioc
->hb_count
== hb_count
) {
1679 bfa_ioc_recover(ioc
);
1682 ioc
->hb_count
= hb_count
;
1685 bfa_ioc_mbox_poll(ioc
);
1686 bfa_hb_timer_start(ioc
);
1690 bfa_ioc_hb_monitor(struct bfa_ioc_s
*ioc
)
1692 ioc
->hb_count
= readl(ioc
->ioc_regs
.heartbeat
);
1693 bfa_hb_timer_start(ioc
);
1697 * Initiate a full firmware download.
1700 bfa_ioc_download_fw(struct bfa_ioc_s
*ioc
, u32 boot_type
,
1711 * Initialize LMEM first before code download
1713 bfa_ioc_lmem_init(ioc
);
1715 bfa_trc(ioc
, bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc
)));
1716 fwimg
= bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc
), chunkno
);
1718 pgnum
= PSS_SMEM_PGNUM(ioc
->ioc_regs
.smem_pg0
, loff
);
1719 pgoff
= PSS_SMEM_PGOFF(loff
);
1721 writel(pgnum
, ioc
->ioc_regs
.host_page_num_fn
);
1723 for (i
= 0; i
< bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc
)); i
++) {
1725 if (BFA_IOC_FLASH_CHUNK_NO(i
) != chunkno
) {
1726 chunkno
= BFA_IOC_FLASH_CHUNK_NO(i
);
1727 fwimg
= bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc
),
1728 BFA_IOC_FLASH_CHUNK_ADDR(chunkno
));
1734 bfa_mem_write(ioc
->ioc_regs
.smem_page_start
, loff
,
1735 fwimg
[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i
)]);
1737 loff
+= sizeof(u32
);
1740 * handle page offset wrap around
1742 loff
= PSS_SMEM_PGOFF(loff
);
1745 writel(pgnum
, ioc
->ioc_regs
.host_page_num_fn
);
1749 writel(PSS_SMEM_PGNUM(ioc
->ioc_regs
.smem_pg0
, 0),
1750 ioc
->ioc_regs
.host_page_num_fn
);
1753 * Set boot type and device mode at the end.
1755 asicmode
= BFI_FWBOOT_DEVMODE(ioc
->asic_gen
, ioc
->asic_mode
,
1756 ioc
->port0_mode
, ioc
->port1_mode
);
1757 bfa_mem_write(ioc
->ioc_regs
.smem_page_start
, BFI_FWBOOT_DEVMODE_OFF
,
1759 bfa_mem_write(ioc
->ioc_regs
.smem_page_start
, BFI_FWBOOT_TYPE_OFF
,
1761 bfa_mem_write(ioc
->ioc_regs
.smem_page_start
, BFI_FWBOOT_ENV_OFF
,
1767 * Update BFA configuration from firmware configuration.
1770 bfa_ioc_getattr_reply(struct bfa_ioc_s
*ioc
)
1772 struct bfi_ioc_attr_s
*attr
= ioc
->attr
;
1774 attr
->adapter_prop
= be32_to_cpu(attr
->adapter_prop
);
1775 attr
->card_type
= be32_to_cpu(attr
->card_type
);
1776 attr
->maxfrsize
= be16_to_cpu(attr
->maxfrsize
);
1777 ioc
->fcmode
= (attr
->port_mode
== BFI_PORT_MODE_FC
);
1779 bfa_fsm_send_event(ioc
, IOC_E_FWRSP_GETATTR
);
1783 * Attach time initialization of mbox logic.
1786 bfa_ioc_mbox_attach(struct bfa_ioc_s
*ioc
)
1788 struct bfa_ioc_mbox_mod_s
*mod
= &ioc
->mbox_mod
;
1791 INIT_LIST_HEAD(&mod
->cmd_q
);
1792 for (mc
= 0; mc
< BFI_MC_MAX
; mc
++) {
1793 mod
->mbhdlr
[mc
].cbfn
= NULL
;
1794 mod
->mbhdlr
[mc
].cbarg
= ioc
->bfa
;
1799 * Mbox poll timer -- restarts any pending mailbox requests.
1802 bfa_ioc_mbox_poll(struct bfa_ioc_s
*ioc
)
1804 struct bfa_ioc_mbox_mod_s
*mod
= &ioc
->mbox_mod
;
1805 struct bfa_mbox_cmd_s
*cmd
;
1809 * If no command pending, do nothing
1811 if (list_empty(&mod
->cmd_q
))
1815 * If previous command is not yet fetched by firmware, do nothing
1817 stat
= readl(ioc
->ioc_regs
.hfn_mbox_cmd
);
1822 * Enqueue command to firmware.
1824 bfa_q_deq(&mod
->cmd_q
, &cmd
);
1825 bfa_ioc_mbox_send(ioc
, cmd
->msg
, sizeof(cmd
->msg
));
1829 * Cleanup any pending requests.
1832 bfa_ioc_mbox_flush(struct bfa_ioc_s
*ioc
)
1834 struct bfa_ioc_mbox_mod_s
*mod
= &ioc
->mbox_mod
;
1835 struct bfa_mbox_cmd_s
*cmd
;
1837 while (!list_empty(&mod
->cmd_q
))
1838 bfa_q_deq(&mod
->cmd_q
, &cmd
);
1842 * Read data from SMEM to host through PCI memmap
1844 * @param[in] ioc memory for IOC
1845 * @param[in] tbuf app memory to store data from smem
1846 * @param[in] soff smem offset
1847 * @param[in] sz size of smem in bytes
1850 bfa_ioc_smem_read(struct bfa_ioc_s
*ioc
, void *tbuf
, u32 soff
, u32 sz
)
1857 pgnum
= PSS_SMEM_PGNUM(ioc
->ioc_regs
.smem_pg0
, soff
);
1858 loff
= PSS_SMEM_PGOFF(soff
);
1859 bfa_trc(ioc
, pgnum
);
1864 * Hold semaphore to serialize pll init and fwtrc.
1866 if (BFA_FALSE
== bfa_ioc_sem_get(ioc
->ioc_regs
.ioc_init_sem_reg
)) {
1868 return BFA_STATUS_FAILED
;
1871 writel(pgnum
, ioc
->ioc_regs
.host_page_num_fn
);
1873 len
= sz
/sizeof(u32
);
1875 for (i
= 0; i
< len
; i
++) {
1876 r32
= bfa_mem_read(ioc
->ioc_regs
.smem_page_start
, loff
);
1877 buf
[i
] = be32_to_cpu(r32
);
1878 loff
+= sizeof(u32
);
1881 * handle page offset wrap around
1883 loff
= PSS_SMEM_PGOFF(loff
);
1886 writel(pgnum
, ioc
->ioc_regs
.host_page_num_fn
);
1889 writel(PSS_SMEM_PGNUM(ioc
->ioc_regs
.smem_pg0
, 0),
1890 ioc
->ioc_regs
.host_page_num_fn
);
1892 * release semaphore.
1894 readl(ioc
->ioc_regs
.ioc_init_sem_reg
);
1895 writel(1, ioc
->ioc_regs
.ioc_init_sem_reg
);
1897 bfa_trc(ioc
, pgnum
);
1898 return BFA_STATUS_OK
;
1902 * Clear SMEM data from host through PCI memmap
1904 * @param[in] ioc memory for IOC
1905 * @param[in] soff smem offset
1906 * @param[in] sz size of smem in bytes
1909 bfa_ioc_smem_clr(struct bfa_ioc_s
*ioc
, u32 soff
, u32 sz
)
1914 pgnum
= PSS_SMEM_PGNUM(ioc
->ioc_regs
.smem_pg0
, soff
);
1915 loff
= PSS_SMEM_PGOFF(soff
);
1916 bfa_trc(ioc
, pgnum
);
1921 * Hold semaphore to serialize pll init and fwtrc.
1923 if (BFA_FALSE
== bfa_ioc_sem_get(ioc
->ioc_regs
.ioc_init_sem_reg
)) {
1925 return BFA_STATUS_FAILED
;
1928 writel(pgnum
, ioc
->ioc_regs
.host_page_num_fn
);
1930 len
= sz
/sizeof(u32
); /* len in words */
1932 for (i
= 0; i
< len
; i
++) {
1933 bfa_mem_write(ioc
->ioc_regs
.smem_page_start
, loff
, 0);
1934 loff
+= sizeof(u32
);
1937 * handle page offset wrap around
1939 loff
= PSS_SMEM_PGOFF(loff
);
1942 writel(pgnum
, ioc
->ioc_regs
.host_page_num_fn
);
1945 writel(PSS_SMEM_PGNUM(ioc
->ioc_regs
.smem_pg0
, 0),
1946 ioc
->ioc_regs
.host_page_num_fn
);
1949 * release semaphore.
1951 readl(ioc
->ioc_regs
.ioc_init_sem_reg
);
1952 writel(1, ioc
->ioc_regs
.ioc_init_sem_reg
);
1953 bfa_trc(ioc
, pgnum
);
1954 return BFA_STATUS_OK
;
1958 bfa_ioc_fail_notify(struct bfa_ioc_s
*ioc
)
1960 struct bfad_s
*bfad
= (struct bfad_s
*)ioc
->bfa
->bfad
;
1963 * Notify driver and common modules registered for notification.
1965 ioc
->cbfn
->hbfail_cbfn(ioc
->bfa
);
1966 bfa_ioc_event_notify(ioc
, BFA_IOC_E_FAILED
);
1968 bfa_ioc_debug_save_ftrc(ioc
);
1970 BFA_LOG(KERN_CRIT
, bfad
, bfa_log_level
,
1971 "Heart Beat of IOC has failed\n");
1972 bfa_ioc_aen_post(ioc
, BFA_IOC_AEN_HBFAIL
);
1977 bfa_ioc_pf_fwmismatch(struct bfa_ioc_s
*ioc
)
1979 struct bfad_s
*bfad
= (struct bfad_s
*)ioc
->bfa
->bfad
;
1981 * Provide enable completion callback.
1983 ioc
->cbfn
->enable_cbfn(ioc
->bfa
, BFA_STATUS_IOC_FAILURE
);
1984 BFA_LOG(KERN_WARNING
, bfad
, bfa_log_level
,
1985 "Running firmware version is incompatible "
1986 "with the driver version\n");
1987 bfa_ioc_aen_post(ioc
, BFA_IOC_AEN_FWMISMATCH
);
1991 bfa_ioc_pll_init(struct bfa_ioc_s
*ioc
)
1995 * Hold semaphore so that nobody can access the chip during init.
1997 bfa_ioc_sem_get(ioc
->ioc_regs
.ioc_init_sem_reg
);
1999 bfa_ioc_pll_init_asic(ioc
);
2001 ioc
->pllinit
= BFA_TRUE
;
2003 * release semaphore.
2005 readl(ioc
->ioc_regs
.ioc_init_sem_reg
);
2006 writel(1, ioc
->ioc_regs
.ioc_init_sem_reg
);
2008 return BFA_STATUS_OK
;
2012 * Interface used by diag module to do firmware boot with memory test
2013 * as the entry vector.
2016 bfa_ioc_boot(struct bfa_ioc_s
*ioc
, u32 boot_type
, u32 boot_env
)
2018 bfa_ioc_stats(ioc
, ioc_boots
);
2020 if (bfa_ioc_pll_init(ioc
) != BFA_STATUS_OK
)
2024 * Initialize IOC state of all functions on a chip reset.
2026 if (boot_type
== BFI_FWBOOT_TYPE_MEMTEST
) {
2027 writel(BFI_IOC_MEMTEST
, ioc
->ioc_regs
.ioc_fwstate
);
2028 writel(BFI_IOC_MEMTEST
, ioc
->ioc_regs
.alt_ioc_fwstate
);
2030 writel(BFI_IOC_INITING
, ioc
->ioc_regs
.ioc_fwstate
);
2031 writel(BFI_IOC_INITING
, ioc
->ioc_regs
.alt_ioc_fwstate
);
2034 bfa_ioc_msgflush(ioc
);
2035 bfa_ioc_download_fw(ioc
, boot_type
, boot_env
);
2036 bfa_ioc_lpu_start(ioc
);
2040 * Enable/disable IOC failure auto recovery.
2043 bfa_ioc_auto_recover(bfa_boolean_t auto_recover
)
2045 bfa_auto_recover
= auto_recover
;
2051 bfa_ioc_is_operational(struct bfa_ioc_s
*ioc
)
2053 return bfa_fsm_cmp_state(ioc
, bfa_ioc_sm_op
);
2057 bfa_ioc_is_initialized(struct bfa_ioc_s
*ioc
)
2059 u32 r32
= readl(ioc
->ioc_regs
.ioc_fwstate
);
2061 return ((r32
!= BFI_IOC_UNINIT
) &&
2062 (r32
!= BFI_IOC_INITING
) &&
2063 (r32
!= BFI_IOC_MEMTEST
));
2067 bfa_ioc_msgget(struct bfa_ioc_s
*ioc
, void *mbmsg
)
2069 __be32
*msgp
= mbmsg
;
2073 r32
= readl(ioc
->ioc_regs
.lpu_mbox_cmd
);
2080 for (i
= 0; i
< (sizeof(union bfi_ioc_i2h_msg_u
) / sizeof(u32
));
2082 r32
= readl(ioc
->ioc_regs
.lpu_mbox
+
2084 msgp
[i
] = cpu_to_be32(r32
);
2088 * turn off mailbox interrupt by clearing mailbox status
2090 writel(1, ioc
->ioc_regs
.lpu_mbox_cmd
);
2091 readl(ioc
->ioc_regs
.lpu_mbox_cmd
);
2097 bfa_ioc_isr(struct bfa_ioc_s
*ioc
, struct bfi_mbmsg_s
*m
)
2099 union bfi_ioc_i2h_msg_u
*msg
;
2100 struct bfa_iocpf_s
*iocpf
= &ioc
->iocpf
;
2102 msg
= (union bfi_ioc_i2h_msg_u
*) m
;
2104 bfa_ioc_stats(ioc
, ioc_isrs
);
2106 switch (msg
->mh
.msg_id
) {
2107 case BFI_IOC_I2H_HBEAT
:
2110 case BFI_IOC_I2H_ENABLE_REPLY
:
2111 ioc
->port_mode
= ioc
->port_mode_cfg
=
2112 (enum bfa_mode_s
)msg
->fw_event
.port_mode
;
2113 ioc
->ad_cap_bm
= msg
->fw_event
.cap_bm
;
2114 bfa_fsm_send_event(iocpf
, IOCPF_E_FWRSP_ENABLE
);
2117 case BFI_IOC_I2H_DISABLE_REPLY
:
2118 bfa_fsm_send_event(iocpf
, IOCPF_E_FWRSP_DISABLE
);
2121 case BFI_IOC_I2H_GETATTR_REPLY
:
2122 bfa_ioc_getattr_reply(ioc
);
2125 case BFI_IOC_I2H_ACQ_ADDR_REPLY
:
2126 bfa_fsm_send_event(ioc
, IOC_E_FWRSP_ACQ_ADDR
);
2130 bfa_trc(ioc
, msg
->mh
.msg_id
);
2136 * IOC attach time initialization and setup.
2138 * @param[in] ioc memory for IOC
2139 * @param[in] bfa driver instance structure
2142 bfa_ioc_attach(struct bfa_ioc_s
*ioc
, void *bfa
, struct bfa_ioc_cbfn_s
*cbfn
,
2143 struct bfa_timer_mod_s
*timer_mod
)
2147 ioc
->timer_mod
= timer_mod
;
2148 ioc
->fcmode
= BFA_FALSE
;
2149 ioc
->pllinit
= BFA_FALSE
;
2150 ioc
->dbg_fwsave_once
= BFA_TRUE
;
2151 ioc
->iocpf
.ioc
= ioc
;
2153 bfa_ioc_mbox_attach(ioc
);
2154 INIT_LIST_HEAD(&ioc
->notify_q
);
2156 bfa_fsm_set_state(ioc
, bfa_ioc_sm_uninit
);
2157 bfa_fsm_send_event(ioc
, IOC_E_RESET
);
2161 * Driver detach time IOC cleanup.
2164 bfa_ioc_detach(struct bfa_ioc_s
*ioc
)
2166 bfa_fsm_send_event(ioc
, IOC_E_DETACH
);
2167 INIT_LIST_HEAD(&ioc
->notify_q
);
2171 * Setup IOC PCI properties.
2173 * @param[in] pcidev PCI device information for this IOC
2176 bfa_ioc_pci_init(struct bfa_ioc_s
*ioc
, struct bfa_pcidev_s
*pcidev
,
2177 enum bfi_pcifn_class clscode
)
2179 ioc
->clscode
= clscode
;
2180 ioc
->pcidev
= *pcidev
;
2183 * Initialize IOC and device personality
2185 ioc
->port0_mode
= ioc
->port1_mode
= BFI_PORT_MODE_FC
;
2186 ioc
->asic_mode
= BFI_ASIC_MODE_FC
;
2188 switch (pcidev
->device_id
) {
2189 case BFA_PCI_DEVICE_ID_FC_8G1P
:
2190 case BFA_PCI_DEVICE_ID_FC_8G2P
:
2191 ioc
->asic_gen
= BFI_ASIC_GEN_CB
;
2192 ioc
->fcmode
= BFA_TRUE
;
2193 ioc
->port_mode
= ioc
->port_mode_cfg
= BFA_MODE_HBA
;
2194 ioc
->ad_cap_bm
= BFA_CM_HBA
;
2197 case BFA_PCI_DEVICE_ID_CT
:
2198 ioc
->asic_gen
= BFI_ASIC_GEN_CT
;
2199 ioc
->port0_mode
= ioc
->port1_mode
= BFI_PORT_MODE_ETH
;
2200 ioc
->asic_mode
= BFI_ASIC_MODE_ETH
;
2201 ioc
->port_mode
= ioc
->port_mode_cfg
= BFA_MODE_CNA
;
2202 ioc
->ad_cap_bm
= BFA_CM_CNA
;
2205 case BFA_PCI_DEVICE_ID_CT_FC
:
2206 ioc
->asic_gen
= BFI_ASIC_GEN_CT
;
2207 ioc
->fcmode
= BFA_TRUE
;
2208 ioc
->port_mode
= ioc
->port_mode_cfg
= BFA_MODE_HBA
;
2209 ioc
->ad_cap_bm
= BFA_CM_HBA
;
2212 case BFA_PCI_DEVICE_ID_CT2
:
2213 ioc
->asic_gen
= BFI_ASIC_GEN_CT2
;
2214 if (clscode
== BFI_PCIFN_CLASS_FC
&&
2215 pcidev
->ssid
== BFA_PCI_CT2_SSID_FC
) {
2216 ioc
->asic_mode
= BFI_ASIC_MODE_FC16
;
2217 ioc
->fcmode
= BFA_TRUE
;
2218 ioc
->port_mode
= ioc
->port_mode_cfg
= BFA_MODE_HBA
;
2219 ioc
->ad_cap_bm
= BFA_CM_HBA
;
2221 ioc
->port0_mode
= ioc
->port1_mode
= BFI_PORT_MODE_ETH
;
2222 ioc
->asic_mode
= BFI_ASIC_MODE_ETH
;
2223 if (pcidev
->ssid
== BFA_PCI_CT2_SSID_FCoE
) {
2225 ioc
->port_mode_cfg
= BFA_MODE_CNA
;
2226 ioc
->ad_cap_bm
= BFA_CM_CNA
;
2229 ioc
->port_mode_cfg
= BFA_MODE_NIC
;
2230 ioc
->ad_cap_bm
= BFA_CM_NIC
;
2240 * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
2242 if (ioc
->asic_gen
== BFI_ASIC_GEN_CB
)
2243 bfa_ioc_set_cb_hwif(ioc
);
2244 else if (ioc
->asic_gen
== BFI_ASIC_GEN_CT
)
2245 bfa_ioc_set_ct_hwif(ioc
);
2247 WARN_ON(ioc
->asic_gen
!= BFI_ASIC_GEN_CT2
);
2248 bfa_ioc_set_ct2_hwif(ioc
);
2249 bfa_ioc_ct2_poweron(ioc
);
2252 bfa_ioc_map_port(ioc
);
2253 bfa_ioc_reg_init(ioc
);
2257 * Initialize IOC dma memory
2259 * @param[in] dm_kva kernel virtual address of IOC dma memory
2260 * @param[in] dm_pa physical address of IOC dma memory
2263 bfa_ioc_mem_claim(struct bfa_ioc_s
*ioc
, u8
*dm_kva
, u64 dm_pa
)
2266 * dma memory for firmware attribute
2268 ioc
->attr_dma
.kva
= dm_kva
;
2269 ioc
->attr_dma
.pa
= dm_pa
;
2270 ioc
->attr
= (struct bfi_ioc_attr_s
*) dm_kva
;
2274 bfa_ioc_enable(struct bfa_ioc_s
*ioc
)
2276 bfa_ioc_stats(ioc
, ioc_enables
);
2277 ioc
->dbg_fwsave_once
= BFA_TRUE
;
2279 bfa_fsm_send_event(ioc
, IOC_E_ENABLE
);
2283 bfa_ioc_disable(struct bfa_ioc_s
*ioc
)
2285 bfa_ioc_stats(ioc
, ioc_disables
);
2286 bfa_fsm_send_event(ioc
, IOC_E_DISABLE
);
2291 * Initialize memory for saving firmware trace. Driver must initialize
2292 * trace memory before call bfa_ioc_enable().
2295 bfa_ioc_debug_memclaim(struct bfa_ioc_s
*ioc
, void *dbg_fwsave
)
2297 ioc
->dbg_fwsave
= dbg_fwsave
;
2298 ioc
->dbg_fwsave_len
= (ioc
->iocpf
.auto_recover
) ? BFA_DBG_FWTRC_LEN
: 0;
2302 * Register mailbox message handler functions
2304 * @param[in] ioc IOC instance
2305 * @param[in] mcfuncs message class handler functions
2308 bfa_ioc_mbox_register(struct bfa_ioc_s
*ioc
, bfa_ioc_mbox_mcfunc_t
*mcfuncs
)
2310 struct bfa_ioc_mbox_mod_s
*mod
= &ioc
->mbox_mod
;
2313 for (mc
= 0; mc
< BFI_MC_MAX
; mc
++)
2314 mod
->mbhdlr
[mc
].cbfn
= mcfuncs
[mc
];
2318 * Register mailbox message handler function, to be called by common modules
2321 bfa_ioc_mbox_regisr(struct bfa_ioc_s
*ioc
, enum bfi_mclass mc
,
2322 bfa_ioc_mbox_mcfunc_t cbfn
, void *cbarg
)
2324 struct bfa_ioc_mbox_mod_s
*mod
= &ioc
->mbox_mod
;
2326 mod
->mbhdlr
[mc
].cbfn
= cbfn
;
2327 mod
->mbhdlr
[mc
].cbarg
= cbarg
;
2331 * Queue a mailbox command request to firmware. Waits if mailbox is busy.
2332 * Responsibility of caller to serialize
2334 * @param[in] ioc IOC instance
2335 * @param[i] cmd Mailbox command
2338 bfa_ioc_mbox_queue(struct bfa_ioc_s
*ioc
, struct bfa_mbox_cmd_s
*cmd
)
2340 struct bfa_ioc_mbox_mod_s
*mod
= &ioc
->mbox_mod
;
2344 * If a previous command is pending, queue new command
2346 if (!list_empty(&mod
->cmd_q
)) {
2347 list_add_tail(&cmd
->qe
, &mod
->cmd_q
);
2352 * If mailbox is busy, queue command for poll timer
2354 stat
= readl(ioc
->ioc_regs
.hfn_mbox_cmd
);
2356 list_add_tail(&cmd
->qe
, &mod
->cmd_q
);
2361 * mailbox is free -- queue command to firmware
2363 bfa_ioc_mbox_send(ioc
, cmd
->msg
, sizeof(cmd
->msg
));
2367 * Handle mailbox interrupts
2370 bfa_ioc_mbox_isr(struct bfa_ioc_s
*ioc
)
2372 struct bfa_ioc_mbox_mod_s
*mod
= &ioc
->mbox_mod
;
2373 struct bfi_mbmsg_s m
;
2376 if (bfa_ioc_msgget(ioc
, &m
)) {
2378 * Treat IOC message class as special.
2380 mc
= m
.mh
.msg_class
;
2381 if (mc
== BFI_MC_IOC
) {
2382 bfa_ioc_isr(ioc
, &m
);
2386 if ((mc
> BFI_MC_MAX
) || (mod
->mbhdlr
[mc
].cbfn
== NULL
))
2389 mod
->mbhdlr
[mc
].cbfn(mod
->mbhdlr
[mc
].cbarg
, &m
);
2392 bfa_ioc_lpu_read_stat(ioc
);
2395 * Try to send pending mailbox commands
2397 bfa_ioc_mbox_poll(ioc
);
2401 bfa_ioc_error_isr(struct bfa_ioc_s
*ioc
)
2403 bfa_ioc_stats(ioc
, ioc_hbfails
);
2404 ioc
->stats
.hb_count
= ioc
->hb_count
;
2405 bfa_fsm_send_event(ioc
, IOC_E_HWERROR
);
2409 * return true if IOC is disabled
2412 bfa_ioc_is_disabled(struct bfa_ioc_s
*ioc
)
2414 return bfa_fsm_cmp_state(ioc
, bfa_ioc_sm_disabling
) ||
2415 bfa_fsm_cmp_state(ioc
, bfa_ioc_sm_disabled
);
2419 * Return TRUE if IOC is in acquiring address state
2422 bfa_ioc_is_acq_addr(struct bfa_ioc_s
*ioc
)
2424 return bfa_fsm_cmp_state(ioc
, bfa_ioc_sm_acq_addr
);
2428 * return true if IOC firmware is different.
2431 bfa_ioc_fw_mismatch(struct bfa_ioc_s
*ioc
)
2433 return bfa_fsm_cmp_state(ioc
, bfa_ioc_sm_reset
) ||
2434 bfa_fsm_cmp_state(&ioc
->iocpf
, bfa_iocpf_sm_fwcheck
) ||
2435 bfa_fsm_cmp_state(&ioc
->iocpf
, bfa_iocpf_sm_mismatch
);
2438 #define bfa_ioc_state_disabled(__sm) \
2439 (((__sm) == BFI_IOC_UNINIT) || \
2440 ((__sm) == BFI_IOC_INITING) || \
2441 ((__sm) == BFI_IOC_HWINIT) || \
2442 ((__sm) == BFI_IOC_DISABLED) || \
2443 ((__sm) == BFI_IOC_FAIL) || \
2444 ((__sm) == BFI_IOC_CFG_DISABLED))
2447 * Check if adapter is disabled -- both IOCs should be in a disabled
2451 bfa_ioc_adapter_is_disabled(struct bfa_ioc_s
*ioc
)
2455 if (!bfa_fsm_cmp_state(ioc
, bfa_ioc_sm_disabled
))
2458 ioc_state
= readl(ioc
->ioc_regs
.ioc_fwstate
);
2459 if (!bfa_ioc_state_disabled(ioc_state
))
2462 if (ioc
->pcidev
.device_id
!= BFA_PCI_DEVICE_ID_FC_8G1P
) {
2463 ioc_state
= readl(ioc
->ioc_regs
.alt_ioc_fwstate
);
2464 if (!bfa_ioc_state_disabled(ioc_state
))
2472 * Reset IOC fwstate registers.
2475 bfa_ioc_reset_fwstate(struct bfa_ioc_s
*ioc
)
2477 writel(BFI_IOC_UNINIT
, ioc
->ioc_regs
.ioc_fwstate
);
2478 writel(BFI_IOC_UNINIT
, ioc
->ioc_regs
.alt_ioc_fwstate
);
2481 #define BFA_MFG_NAME "Brocade"
2483 bfa_ioc_get_adapter_attr(struct bfa_ioc_s
*ioc
,
2484 struct bfa_adapter_attr_s
*ad_attr
)
2486 struct bfi_ioc_attr_s
*ioc_attr
;
2488 ioc_attr
= ioc
->attr
;
2490 bfa_ioc_get_adapter_serial_num(ioc
, ad_attr
->serial_num
);
2491 bfa_ioc_get_adapter_fw_ver(ioc
, ad_attr
->fw_ver
);
2492 bfa_ioc_get_adapter_optrom_ver(ioc
, ad_attr
->optrom_ver
);
2493 bfa_ioc_get_adapter_manufacturer(ioc
, ad_attr
->manufacturer
);
2494 memcpy(&ad_attr
->vpd
, &ioc_attr
->vpd
,
2495 sizeof(struct bfa_mfg_vpd_s
));
2497 ad_attr
->nports
= bfa_ioc_get_nports(ioc
);
2498 ad_attr
->max_speed
= bfa_ioc_speed_sup(ioc
);
2500 bfa_ioc_get_adapter_model(ioc
, ad_attr
->model
);
2501 /* For now, model descr uses same model string */
2502 bfa_ioc_get_adapter_model(ioc
, ad_attr
->model_descr
);
2504 ad_attr
->card_type
= ioc_attr
->card_type
;
2505 ad_attr
->is_mezz
= bfa_mfg_is_mezz(ioc_attr
->card_type
);
2507 if (BFI_ADAPTER_IS_SPECIAL(ioc_attr
->adapter_prop
))
2508 ad_attr
->prototype
= 1;
2510 ad_attr
->prototype
= 0;
2512 ad_attr
->pwwn
= ioc
->attr
->pwwn
;
2513 ad_attr
->mac
= bfa_ioc_get_mac(ioc
);
2515 ad_attr
->pcie_gen
= ioc_attr
->pcie_gen
;
2516 ad_attr
->pcie_lanes
= ioc_attr
->pcie_lanes
;
2517 ad_attr
->pcie_lanes_orig
= ioc_attr
->pcie_lanes_orig
;
2518 ad_attr
->asic_rev
= ioc_attr
->asic_rev
;
2520 bfa_ioc_get_pci_chip_rev(ioc
, ad_attr
->hw_ver
);
2522 ad_attr
->cna_capable
= bfa_ioc_is_cna(ioc
);
2523 ad_attr
->trunk_capable
= (ad_attr
->nports
> 1) &&
2524 !bfa_ioc_is_cna(ioc
) && !ad_attr
->is_mezz
;
2528 bfa_ioc_get_type(struct bfa_ioc_s
*ioc
)
2530 if (ioc
->clscode
== BFI_PCIFN_CLASS_ETH
)
2531 return BFA_IOC_TYPE_LL
;
2533 WARN_ON(ioc
->clscode
!= BFI_PCIFN_CLASS_FC
);
2535 return (ioc
->attr
->port_mode
== BFI_PORT_MODE_FC
)
2536 ? BFA_IOC_TYPE_FC
: BFA_IOC_TYPE_FCoE
;
2540 bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s
*ioc
, char *serial_num
)
2542 memset((void *)serial_num
, 0, BFA_ADAPTER_SERIAL_NUM_LEN
);
2543 memcpy((void *)serial_num
,
2544 (void *)ioc
->attr
->brcd_serialnum
,
2545 BFA_ADAPTER_SERIAL_NUM_LEN
);
2549 bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s
*ioc
, char *fw_ver
)
2551 memset((void *)fw_ver
, 0, BFA_VERSION_LEN
);
2552 memcpy(fw_ver
, ioc
->attr
->fw_version
, BFA_VERSION_LEN
);
2556 bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s
*ioc
, char *chip_rev
)
2560 memset((void *)chip_rev
, 0, BFA_IOC_CHIP_REV_LEN
);
2566 chip_rev
[4] = ioc
->attr
->asic_rev
;
2571 bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s
*ioc
, char *optrom_ver
)
2573 memset((void *)optrom_ver
, 0, BFA_VERSION_LEN
);
2574 memcpy(optrom_ver
, ioc
->attr
->optrom_version
,
2579 bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s
*ioc
, char *manufacturer
)
2581 memset((void *)manufacturer
, 0, BFA_ADAPTER_MFG_NAME_LEN
);
2582 memcpy(manufacturer
, BFA_MFG_NAME
, BFA_ADAPTER_MFG_NAME_LEN
);
2586 bfa_ioc_get_adapter_model(struct bfa_ioc_s
*ioc
, char *model
)
2588 struct bfi_ioc_attr_s
*ioc_attr
;
2591 memset((void *)model
, 0, BFA_ADAPTER_MODEL_NAME_LEN
);
2593 ioc_attr
= ioc
->attr
;
2595 snprintf(model
, BFA_ADAPTER_MODEL_NAME_LEN
, "%s-%u",
2596 BFA_MFG_NAME
, ioc_attr
->card_type
);
2600 bfa_ioc_get_state(struct bfa_ioc_s
*ioc
)
2602 enum bfa_iocpf_state iocpf_st
;
2603 enum bfa_ioc_state ioc_st
= bfa_sm_to_state(ioc_sm_table
, ioc
->fsm
);
2605 if (ioc_st
== BFA_IOC_ENABLING
||
2606 ioc_st
== BFA_IOC_FAIL
|| ioc_st
== BFA_IOC_INITFAIL
) {
2608 iocpf_st
= bfa_sm_to_state(iocpf_sm_table
, ioc
->iocpf
.fsm
);
2611 case BFA_IOCPF_SEMWAIT
:
2612 ioc_st
= BFA_IOC_SEMWAIT
;
2615 case BFA_IOCPF_HWINIT
:
2616 ioc_st
= BFA_IOC_HWINIT
;
2619 case BFA_IOCPF_FWMISMATCH
:
2620 ioc_st
= BFA_IOC_FWMISMATCH
;
2623 case BFA_IOCPF_FAIL
:
2624 ioc_st
= BFA_IOC_FAIL
;
2627 case BFA_IOCPF_INITFAIL
:
2628 ioc_st
= BFA_IOC_INITFAIL
;
2640 bfa_ioc_get_attr(struct bfa_ioc_s
*ioc
, struct bfa_ioc_attr_s
*ioc_attr
)
2642 memset((void *)ioc_attr
, 0, sizeof(struct bfa_ioc_attr_s
));
2644 ioc_attr
->state
= bfa_ioc_get_state(ioc
);
2645 ioc_attr
->port_id
= ioc
->port_id
;
2646 ioc_attr
->port_mode
= ioc
->port_mode
;
2647 ioc_attr
->port_mode_cfg
= ioc
->port_mode_cfg
;
2648 ioc_attr
->cap_bm
= ioc
->ad_cap_bm
;
2650 ioc_attr
->ioc_type
= bfa_ioc_get_type(ioc
);
2652 bfa_ioc_get_adapter_attr(ioc
, &ioc_attr
->adapter_attr
);
2654 ioc_attr
->pci_attr
.device_id
= ioc
->pcidev
.device_id
;
2655 ioc_attr
->pci_attr
.pcifn
= ioc
->pcidev
.pci_func
;
2656 bfa_ioc_get_pci_chip_rev(ioc
, ioc_attr
->pci_attr
.chip_rev
);
2660 bfa_ioc_get_mac(struct bfa_ioc_s
*ioc
)
2663 * Check the IOC type and return the appropriate MAC
2665 if (bfa_ioc_get_type(ioc
) == BFA_IOC_TYPE_FCoE
)
2666 return ioc
->attr
->fcoe_mac
;
2668 return ioc
->attr
->mac
;
2672 bfa_ioc_get_mfg_mac(struct bfa_ioc_s
*ioc
)
2676 m
= ioc
->attr
->mfg_mac
;
2677 if (bfa_mfg_is_old_wwn_mac_model(ioc
->attr
->card_type
))
2678 m
.mac
[MAC_ADDRLEN
- 1] += bfa_ioc_pcifn(ioc
);
2680 bfa_mfg_increment_wwn_mac(&(m
.mac
[MAC_ADDRLEN
-3]),
2681 bfa_ioc_pcifn(ioc
));
2687 * Send AEN notification
2690 bfa_ioc_aen_post(struct bfa_ioc_s
*ioc
, enum bfa_ioc_aen_event event
)
2692 struct bfad_s
*bfad
= (struct bfad_s
*)ioc
->bfa
->bfad
;
2693 struct bfa_aen_entry_s
*aen_entry
;
2694 enum bfa_ioc_type_e ioc_type
;
2696 bfad_get_aen_entry(bfad
, aen_entry
);
2700 ioc_type
= bfa_ioc_get_type(ioc
);
2702 case BFA_IOC_TYPE_FC
:
2703 aen_entry
->aen_data
.ioc
.pwwn
= ioc
->attr
->pwwn
;
2705 case BFA_IOC_TYPE_FCoE
:
2706 aen_entry
->aen_data
.ioc
.pwwn
= ioc
->attr
->pwwn
;
2707 aen_entry
->aen_data
.ioc
.mac
= bfa_ioc_get_mac(ioc
);
2709 case BFA_IOC_TYPE_LL
:
2710 aen_entry
->aen_data
.ioc
.mac
= bfa_ioc_get_mac(ioc
);
2713 WARN_ON(ioc_type
!= BFA_IOC_TYPE_FC
);
2717 /* Send the AEN notification */
2718 aen_entry
->aen_data
.ioc
.ioc_type
= ioc_type
;
2719 bfad_im_post_vendor_event(aen_entry
, bfad
, ++ioc
->ioc_aen_seq
,
2720 BFA_AEN_CAT_IOC
, event
);
2724 * Retrieve saved firmware trace from a prior IOC failure.
2727 bfa_ioc_debug_fwsave(struct bfa_ioc_s
*ioc
, void *trcdata
, int *trclen
)
2731 if (ioc
->dbg_fwsave_len
== 0)
2732 return BFA_STATUS_ENOFSAVE
;
2735 if (tlen
> ioc
->dbg_fwsave_len
)
2736 tlen
= ioc
->dbg_fwsave_len
;
2738 memcpy(trcdata
, ioc
->dbg_fwsave
, tlen
);
2740 return BFA_STATUS_OK
;
2745 * Retrieve saved firmware trace from a prior IOC failure.
2748 bfa_ioc_debug_fwtrc(struct bfa_ioc_s
*ioc
, void *trcdata
, int *trclen
)
2750 u32 loff
= BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc
));
2752 bfa_status_t status
;
2754 bfa_trc(ioc
, *trclen
);
2757 if (tlen
> BFA_DBG_FWTRC_LEN
)
2758 tlen
= BFA_DBG_FWTRC_LEN
;
2760 status
= bfa_ioc_smem_read(ioc
, trcdata
, loff
, tlen
);
2766 bfa_ioc_send_fwsync(struct bfa_ioc_s
*ioc
)
2768 struct bfa_mbox_cmd_s cmd
;
2769 struct bfi_ioc_ctrl_req_s
*req
= (struct bfi_ioc_ctrl_req_s
*) cmd
.msg
;
2771 bfi_h2i_set(req
->mh
, BFI_MC_IOC
, BFI_IOC_H2I_DBG_SYNC
,
2772 bfa_ioc_portid(ioc
));
2773 req
->clscode
= cpu_to_be16(ioc
->clscode
);
2774 bfa_ioc_mbox_queue(ioc
, &cmd
);
2778 bfa_ioc_fwsync(struct bfa_ioc_s
*ioc
)
2780 u32 fwsync_iter
= 1000;
2782 bfa_ioc_send_fwsync(ioc
);
2785 * After sending a fw sync mbox command wait for it to
2786 * take effect. We will not wait for a response because
2787 * 1. fw_sync mbox cmd doesn't have a response.
2788 * 2. Even if we implement that, interrupts might not
2789 * be enabled when we call this function.
2790 * So, just keep checking if any mbox cmd is pending, and
2791 * after waiting for a reasonable amount of time, go ahead.
2792 * It is possible that fw has crashed and the mbox command
2793 * is never acknowledged.
2795 while (bfa_ioc_mbox_cmd_pending(ioc
) && fwsync_iter
> 0)
2800 * Dump firmware smem
2803 bfa_ioc_debug_fwcore(struct bfa_ioc_s
*ioc
, void *buf
,
2804 u32
*offset
, int *buflen
)
2808 bfa_status_t status
;
2809 u32 smem_len
= BFA_IOC_FW_SMEM_SIZE(ioc
);
2811 if (*offset
>= smem_len
) {
2812 *offset
= *buflen
= 0;
2813 return BFA_STATUS_EINVAL
;
2820 * First smem read, sync smem before proceeding
2821 * No need to sync before reading every chunk.
2824 bfa_ioc_fwsync(ioc
);
2826 if ((loff
+ dlen
) >= smem_len
)
2827 dlen
= smem_len
- loff
;
2829 status
= bfa_ioc_smem_read(ioc
, buf
, loff
, dlen
);
2831 if (status
!= BFA_STATUS_OK
) {
2832 *offset
= *buflen
= 0;
2838 if (*offset
>= smem_len
)
2847 * Firmware statistics
2850 bfa_ioc_fw_stats_get(struct bfa_ioc_s
*ioc
, void *stats
)
2852 u32 loff
= BFI_IOC_FWSTATS_OFF
+ \
2853 BFI_IOC_FWSTATS_SZ
* (bfa_ioc_portid(ioc
));
2855 bfa_status_t status
;
2857 if (ioc
->stats_busy
) {
2858 bfa_trc(ioc
, ioc
->stats_busy
);
2859 return BFA_STATUS_DEVBUSY
;
2861 ioc
->stats_busy
= BFA_TRUE
;
2863 tlen
= sizeof(struct bfa_fw_stats_s
);
2864 status
= bfa_ioc_smem_read(ioc
, stats
, loff
, tlen
);
2866 ioc
->stats_busy
= BFA_FALSE
;
2871 bfa_ioc_fw_stats_clear(struct bfa_ioc_s
*ioc
)
2873 u32 loff
= BFI_IOC_FWSTATS_OFF
+ \
2874 BFI_IOC_FWSTATS_SZ
* (bfa_ioc_portid(ioc
));
2876 bfa_status_t status
;
2878 if (ioc
->stats_busy
) {
2879 bfa_trc(ioc
, ioc
->stats_busy
);
2880 return BFA_STATUS_DEVBUSY
;
2882 ioc
->stats_busy
= BFA_TRUE
;
2884 tlen
= sizeof(struct bfa_fw_stats_s
);
2885 status
= bfa_ioc_smem_clr(ioc
, loff
, tlen
);
2887 ioc
->stats_busy
= BFA_FALSE
;
2892 * Save firmware trace if configured.
2895 bfa_ioc_debug_save_ftrc(struct bfa_ioc_s
*ioc
)
2899 if (ioc
->dbg_fwsave_once
) {
2900 ioc
->dbg_fwsave_once
= BFA_FALSE
;
2901 if (ioc
->dbg_fwsave_len
) {
2902 tlen
= ioc
->dbg_fwsave_len
;
2903 bfa_ioc_debug_fwtrc(ioc
, ioc
->dbg_fwsave
, &tlen
);
2909 * Firmware failure detected. Start recovery actions.
2912 bfa_ioc_recover(struct bfa_ioc_s
*ioc
)
2914 bfa_ioc_stats(ioc
, ioc_hbfails
);
2915 ioc
->stats
.hb_count
= ioc
->hb_count
;
2916 bfa_fsm_send_event(ioc
, IOC_E_HBFAIL
);
2920 bfa_ioc_check_attr_wwns(struct bfa_ioc_s
*ioc
)
2922 if (bfa_ioc_get_type(ioc
) == BFA_IOC_TYPE_LL
)
2924 if (ioc
->attr
->nwwn
== 0)
2925 bfa_ioc_aen_post(ioc
, BFA_IOC_AEN_INVALID_NWWN
);
2926 if (ioc
->attr
->pwwn
== 0)
2927 bfa_ioc_aen_post(ioc
, BFA_IOC_AEN_INVALID_PWWN
);
2931 * BFA IOC PF private functions
2934 bfa_iocpf_timeout(void *ioc_arg
)
2936 struct bfa_ioc_s
*ioc
= (struct bfa_ioc_s
*) ioc_arg
;
2939 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_TIMEOUT
);
2943 bfa_iocpf_sem_timeout(void *ioc_arg
)
2945 struct bfa_ioc_s
*ioc
= (struct bfa_ioc_s
*) ioc_arg
;
2947 bfa_ioc_hw_sem_get(ioc
);
2951 bfa_ioc_poll_fwinit(struct bfa_ioc_s
*ioc
)
2953 u32 fwstate
= readl(ioc
->ioc_regs
.ioc_fwstate
);
2955 bfa_trc(ioc
, fwstate
);
2957 if (fwstate
== BFI_IOC_DISABLED
) {
2958 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_FWREADY
);
2962 if (ioc
->iocpf
.poll_time
>= BFA_IOC_TOV
)
2963 bfa_iocpf_timeout(ioc
);
2965 ioc
->iocpf
.poll_time
+= BFA_IOC_POLL_TOV
;
2966 bfa_iocpf_poll_timer_start(ioc
);
2971 bfa_iocpf_poll_timeout(void *ioc_arg
)
2973 struct bfa_ioc_s
*ioc
= (struct bfa_ioc_s
*) ioc_arg
;
2975 bfa_ioc_poll_fwinit(ioc
);
2979 * bfa timer function
2982 bfa_timer_beat(struct bfa_timer_mod_s
*mod
)
2984 struct list_head
*qh
= &mod
->timer_q
;
2985 struct list_head
*qe
, *qe_next
;
2986 struct bfa_timer_s
*elem
;
2987 struct list_head timedout_q
;
2989 INIT_LIST_HEAD(&timedout_q
);
2991 qe
= bfa_q_next(qh
);
2994 qe_next
= bfa_q_next(qe
);
2996 elem
= (struct bfa_timer_s
*) qe
;
2997 if (elem
->timeout
<= BFA_TIMER_FREQ
) {
2999 list_del(&elem
->qe
);
3000 list_add_tail(&elem
->qe
, &timedout_q
);
3002 elem
->timeout
-= BFA_TIMER_FREQ
;
3005 qe
= qe_next
; /* go to next elem */
3009 * Pop all the timeout entries
3011 while (!list_empty(&timedout_q
)) {
3012 bfa_q_deq(&timedout_q
, &elem
);
3013 elem
->timercb(elem
->arg
);
3018 * Should be called with lock protection
3021 bfa_timer_begin(struct bfa_timer_mod_s
*mod
, struct bfa_timer_s
*timer
,
3022 void (*timercb
) (void *), void *arg
, unsigned int timeout
)
3025 WARN_ON(timercb
== NULL
);
3026 WARN_ON(bfa_q_is_on_q(&mod
->timer_q
, timer
));
3028 timer
->timeout
= timeout
;
3029 timer
->timercb
= timercb
;
3032 list_add_tail(&timer
->qe
, &mod
->timer_q
);
3036 * Should be called with lock protection
3039 bfa_timer_stop(struct bfa_timer_s
*timer
)
3041 WARN_ON(list_empty(&timer
->qe
));
3043 list_del(&timer
->qe
);
3047 * ASIC block related
3050 bfa_ablk_config_swap(struct bfa_ablk_cfg_s
*cfg
)
3052 struct bfa_ablk_cfg_inst_s
*cfg_inst
;
3057 for (i
= 0; i
< BFA_ABLK_MAX
; i
++) {
3058 cfg_inst
= &cfg
->inst
[i
];
3059 for (j
= 0; j
< BFA_ABLK_MAX_PFS
; j
++) {
3060 be16
= cfg_inst
->pf_cfg
[j
].pers
;
3061 cfg_inst
->pf_cfg
[j
].pers
= be16_to_cpu(be16
);
3062 be16
= cfg_inst
->pf_cfg
[j
].num_qpairs
;
3063 cfg_inst
->pf_cfg
[j
].num_qpairs
= be16_to_cpu(be16
);
3064 be16
= cfg_inst
->pf_cfg
[j
].num_vectors
;
3065 cfg_inst
->pf_cfg
[j
].num_vectors
= be16_to_cpu(be16
);
3066 be32
= cfg_inst
->pf_cfg
[j
].bw
;
3067 cfg_inst
->pf_cfg
[j
].bw
= be16_to_cpu(be32
);
3073 bfa_ablk_isr(void *cbarg
, struct bfi_mbmsg_s
*msg
)
3075 struct bfa_ablk_s
*ablk
= (struct bfa_ablk_s
*)cbarg
;
3076 struct bfi_ablk_i2h_rsp_s
*rsp
= (struct bfi_ablk_i2h_rsp_s
*)msg
;
3077 bfa_ablk_cbfn_t cbfn
;
3079 WARN_ON(msg
->mh
.msg_class
!= BFI_MC_ABLK
);
3080 bfa_trc(ablk
->ioc
, msg
->mh
.msg_id
);
3082 switch (msg
->mh
.msg_id
) {
3083 case BFI_ABLK_I2H_QUERY
:
3084 if (rsp
->status
== BFA_STATUS_OK
) {
3085 memcpy(ablk
->cfg
, ablk
->dma_addr
.kva
,
3086 sizeof(struct bfa_ablk_cfg_s
));
3087 bfa_ablk_config_swap(ablk
->cfg
);
3092 case BFI_ABLK_I2H_ADPT_CONFIG
:
3093 case BFI_ABLK_I2H_PORT_CONFIG
:
3094 /* update config port mode */
3095 ablk
->ioc
->port_mode_cfg
= rsp
->port_mode
;
3097 case BFI_ABLK_I2H_PF_DELETE
:
3098 case BFI_ABLK_I2H_PF_UPDATE
:
3099 case BFI_ABLK_I2H_OPTROM_ENABLE
:
3100 case BFI_ABLK_I2H_OPTROM_DISABLE
:
3104 case BFI_ABLK_I2H_PF_CREATE
:
3105 *(ablk
->pcifn
) = rsp
->pcifn
;
3113 ablk
->busy
= BFA_FALSE
;
3117 cbfn(ablk
->cbarg
, rsp
->status
);
3122 bfa_ablk_notify(void *cbarg
, enum bfa_ioc_event_e event
)
3124 struct bfa_ablk_s
*ablk
= (struct bfa_ablk_s
*)cbarg
;
3126 bfa_trc(ablk
->ioc
, event
);
3129 case BFA_IOC_E_ENABLED
:
3130 WARN_ON(ablk
->busy
!= BFA_FALSE
);
3133 case BFA_IOC_E_DISABLED
:
3134 case BFA_IOC_E_FAILED
:
3135 /* Fail any pending requests */
3139 ablk
->cbfn(ablk
->cbarg
, BFA_STATUS_FAILED
);
3141 ablk
->busy
= BFA_FALSE
;
3152 bfa_ablk_meminfo(void)
3154 return BFA_ROUNDUP(sizeof(struct bfa_ablk_cfg_s
), BFA_DMA_ALIGN_SZ
);
3158 bfa_ablk_memclaim(struct bfa_ablk_s
*ablk
, u8
*dma_kva
, u64 dma_pa
)
3160 ablk
->dma_addr
.kva
= dma_kva
;
3161 ablk
->dma_addr
.pa
= dma_pa
;
3165 bfa_ablk_attach(struct bfa_ablk_s
*ablk
, struct bfa_ioc_s
*ioc
)
3169 bfa_ioc_mbox_regisr(ablk
->ioc
, BFI_MC_ABLK
, bfa_ablk_isr
, ablk
);
3170 bfa_q_qe_init(&ablk
->ioc_notify
);
3171 bfa_ioc_notify_init(&ablk
->ioc_notify
, bfa_ablk_notify
, ablk
);
3172 list_add_tail(&ablk
->ioc_notify
.qe
, &ablk
->ioc
->notify_q
);
3176 bfa_ablk_query(struct bfa_ablk_s
*ablk
, struct bfa_ablk_cfg_s
*ablk_cfg
,
3177 bfa_ablk_cbfn_t cbfn
, void *cbarg
)
3179 struct bfi_ablk_h2i_query_s
*m
;
3183 if (!bfa_ioc_is_operational(ablk
->ioc
)) {
3184 bfa_trc(ablk
->ioc
, BFA_STATUS_IOC_FAILURE
);
3185 return BFA_STATUS_IOC_FAILURE
;
3189 bfa_trc(ablk
->ioc
, BFA_STATUS_DEVBUSY
);
3190 return BFA_STATUS_DEVBUSY
;
3193 ablk
->cfg
= ablk_cfg
;
3195 ablk
->cbarg
= cbarg
;
3196 ablk
->busy
= BFA_TRUE
;
3198 m
= (struct bfi_ablk_h2i_query_s
*)ablk
->mb
.msg
;
3199 bfi_h2i_set(m
->mh
, BFI_MC_ABLK
, BFI_ABLK_H2I_QUERY
,
3200 bfa_ioc_portid(ablk
->ioc
));
3201 bfa_dma_be_addr_set(m
->addr
, ablk
->dma_addr
.pa
);
3202 bfa_ioc_mbox_queue(ablk
->ioc
, &ablk
->mb
);
3204 return BFA_STATUS_OK
;
3208 bfa_ablk_pf_create(struct bfa_ablk_s
*ablk
, u16
*pcifn
,
3209 u8 port
, enum bfi_pcifn_class personality
, int bw
,
3210 bfa_ablk_cbfn_t cbfn
, void *cbarg
)
3212 struct bfi_ablk_h2i_pf_req_s
*m
;
3214 if (!bfa_ioc_is_operational(ablk
->ioc
)) {
3215 bfa_trc(ablk
->ioc
, BFA_STATUS_IOC_FAILURE
);
3216 return BFA_STATUS_IOC_FAILURE
;
3220 bfa_trc(ablk
->ioc
, BFA_STATUS_DEVBUSY
);
3221 return BFA_STATUS_DEVBUSY
;
3224 ablk
->pcifn
= pcifn
;
3226 ablk
->cbarg
= cbarg
;
3227 ablk
->busy
= BFA_TRUE
;
3229 m
= (struct bfi_ablk_h2i_pf_req_s
*)ablk
->mb
.msg
;
3230 bfi_h2i_set(m
->mh
, BFI_MC_ABLK
, BFI_ABLK_H2I_PF_CREATE
,
3231 bfa_ioc_portid(ablk
->ioc
));
3232 m
->pers
= cpu_to_be16((u16
)personality
);
3233 m
->bw
= cpu_to_be32(bw
);
3235 bfa_ioc_mbox_queue(ablk
->ioc
, &ablk
->mb
);
3237 return BFA_STATUS_OK
;
3241 bfa_ablk_pf_delete(struct bfa_ablk_s
*ablk
, int pcifn
,
3242 bfa_ablk_cbfn_t cbfn
, void *cbarg
)
3244 struct bfi_ablk_h2i_pf_req_s
*m
;
3246 if (!bfa_ioc_is_operational(ablk
->ioc
)) {
3247 bfa_trc(ablk
->ioc
, BFA_STATUS_IOC_FAILURE
);
3248 return BFA_STATUS_IOC_FAILURE
;
3252 bfa_trc(ablk
->ioc
, BFA_STATUS_DEVBUSY
);
3253 return BFA_STATUS_DEVBUSY
;
3257 ablk
->cbarg
= cbarg
;
3258 ablk
->busy
= BFA_TRUE
;
3260 m
= (struct bfi_ablk_h2i_pf_req_s
*)ablk
->mb
.msg
;
3261 bfi_h2i_set(m
->mh
, BFI_MC_ABLK
, BFI_ABLK_H2I_PF_DELETE
,
3262 bfa_ioc_portid(ablk
->ioc
));
3263 m
->pcifn
= (u8
)pcifn
;
3264 bfa_ioc_mbox_queue(ablk
->ioc
, &ablk
->mb
);
3266 return BFA_STATUS_OK
;
3270 bfa_ablk_adapter_config(struct bfa_ablk_s
*ablk
, enum bfa_mode_s mode
,
3271 int max_pf
, int max_vf
, bfa_ablk_cbfn_t cbfn
, void *cbarg
)
3273 struct bfi_ablk_h2i_cfg_req_s
*m
;
3275 if (!bfa_ioc_is_operational(ablk
->ioc
)) {
3276 bfa_trc(ablk
->ioc
, BFA_STATUS_IOC_FAILURE
);
3277 return BFA_STATUS_IOC_FAILURE
;
3281 bfa_trc(ablk
->ioc
, BFA_STATUS_DEVBUSY
);
3282 return BFA_STATUS_DEVBUSY
;
3286 ablk
->cbarg
= cbarg
;
3287 ablk
->busy
= BFA_TRUE
;
3289 m
= (struct bfi_ablk_h2i_cfg_req_s
*)ablk
->mb
.msg
;
3290 bfi_h2i_set(m
->mh
, BFI_MC_ABLK
, BFI_ABLK_H2I_ADPT_CONFIG
,
3291 bfa_ioc_portid(ablk
->ioc
));
3293 m
->max_pf
= (u8
)max_pf
;
3294 m
->max_vf
= (u8
)max_vf
;
3295 bfa_ioc_mbox_queue(ablk
->ioc
, &ablk
->mb
);
3297 return BFA_STATUS_OK
;
3301 bfa_ablk_port_config(struct bfa_ablk_s
*ablk
, int port
, enum bfa_mode_s mode
,
3302 int max_pf
, int max_vf
, bfa_ablk_cbfn_t cbfn
, void *cbarg
)
3304 struct bfi_ablk_h2i_cfg_req_s
*m
;
3306 if (!bfa_ioc_is_operational(ablk
->ioc
)) {
3307 bfa_trc(ablk
->ioc
, BFA_STATUS_IOC_FAILURE
);
3308 return BFA_STATUS_IOC_FAILURE
;
3312 bfa_trc(ablk
->ioc
, BFA_STATUS_DEVBUSY
);
3313 return BFA_STATUS_DEVBUSY
;
3317 ablk
->cbarg
= cbarg
;
3318 ablk
->busy
= BFA_TRUE
;
3320 m
= (struct bfi_ablk_h2i_cfg_req_s
*)ablk
->mb
.msg
;
3321 bfi_h2i_set(m
->mh
, BFI_MC_ABLK
, BFI_ABLK_H2I_PORT_CONFIG
,
3322 bfa_ioc_portid(ablk
->ioc
));
3325 m
->max_pf
= (u8
)max_pf
;
3326 m
->max_vf
= (u8
)max_vf
;
3327 bfa_ioc_mbox_queue(ablk
->ioc
, &ablk
->mb
);
3329 return BFA_STATUS_OK
;
3333 bfa_ablk_pf_update(struct bfa_ablk_s
*ablk
, int pcifn
, int bw
,
3334 bfa_ablk_cbfn_t cbfn
, void *cbarg
)
3336 struct bfi_ablk_h2i_pf_req_s
*m
;
3338 if (!bfa_ioc_is_operational(ablk
->ioc
)) {
3339 bfa_trc(ablk
->ioc
, BFA_STATUS_IOC_FAILURE
);
3340 return BFA_STATUS_IOC_FAILURE
;
3344 bfa_trc(ablk
->ioc
, BFA_STATUS_DEVBUSY
);
3345 return BFA_STATUS_DEVBUSY
;
3349 ablk
->cbarg
= cbarg
;
3350 ablk
->busy
= BFA_TRUE
;
3352 m
= (struct bfi_ablk_h2i_pf_req_s
*)ablk
->mb
.msg
;
3353 bfi_h2i_set(m
->mh
, BFI_MC_ABLK
, BFI_ABLK_H2I_PF_UPDATE
,
3354 bfa_ioc_portid(ablk
->ioc
));
3355 m
->pcifn
= (u8
)pcifn
;
3356 m
->bw
= cpu_to_be32(bw
);
3357 bfa_ioc_mbox_queue(ablk
->ioc
, &ablk
->mb
);
3359 return BFA_STATUS_OK
;
3363 bfa_ablk_optrom_en(struct bfa_ablk_s
*ablk
, bfa_ablk_cbfn_t cbfn
, void *cbarg
)
3365 struct bfi_ablk_h2i_optrom_s
*m
;
3367 if (!bfa_ioc_is_operational(ablk
->ioc
)) {
3368 bfa_trc(ablk
->ioc
, BFA_STATUS_IOC_FAILURE
);
3369 return BFA_STATUS_IOC_FAILURE
;
3373 bfa_trc(ablk
->ioc
, BFA_STATUS_DEVBUSY
);
3374 return BFA_STATUS_DEVBUSY
;
3378 ablk
->cbarg
= cbarg
;
3379 ablk
->busy
= BFA_TRUE
;
3381 m
= (struct bfi_ablk_h2i_optrom_s
*)ablk
->mb
.msg
;
3382 bfi_h2i_set(m
->mh
, BFI_MC_ABLK
, BFI_ABLK_H2I_OPTROM_ENABLE
,
3383 bfa_ioc_portid(ablk
->ioc
));
3384 bfa_ioc_mbox_queue(ablk
->ioc
, &ablk
->mb
);
3386 return BFA_STATUS_OK
;
3390 bfa_ablk_optrom_dis(struct bfa_ablk_s
*ablk
, bfa_ablk_cbfn_t cbfn
, void *cbarg
)
3392 struct bfi_ablk_h2i_optrom_s
*m
;
3394 if (!bfa_ioc_is_operational(ablk
->ioc
)) {
3395 bfa_trc(ablk
->ioc
, BFA_STATUS_IOC_FAILURE
);
3396 return BFA_STATUS_IOC_FAILURE
;
3400 bfa_trc(ablk
->ioc
, BFA_STATUS_DEVBUSY
);
3401 return BFA_STATUS_DEVBUSY
;
3405 ablk
->cbarg
= cbarg
;
3406 ablk
->busy
= BFA_TRUE
;
3408 m
= (struct bfi_ablk_h2i_optrom_s
*)ablk
->mb
.msg
;
3409 bfi_h2i_set(m
->mh
, BFI_MC_ABLK
, BFI_ABLK_H2I_OPTROM_DISABLE
,
3410 bfa_ioc_portid(ablk
->ioc
));
3411 bfa_ioc_mbox_queue(ablk
->ioc
, &ablk
->mb
);
3413 return BFA_STATUS_OK
;
3417 * SFP module specific
3420 /* forward declarations */
3421 static void bfa_sfp_getdata_send(struct bfa_sfp_s
*sfp
);
3422 static void bfa_sfp_media_get(struct bfa_sfp_s
*sfp
);
3423 static bfa_status_t
bfa_sfp_speed_valid(struct bfa_sfp_s
*sfp
,
3424 enum bfa_port_speed portspeed
);
3427 bfa_cb_sfp_show(struct bfa_sfp_s
*sfp
)
3429 bfa_trc(sfp
, sfp
->lock
);
3431 sfp
->cbfn(sfp
->cbarg
, sfp
->status
);
3437 bfa_cb_sfp_state_query(struct bfa_sfp_s
*sfp
)
3439 bfa_trc(sfp
, sfp
->portspeed
);
3441 bfa_sfp_media_get(sfp
);
3442 if (sfp
->state_query_cbfn
)
3443 sfp
->state_query_cbfn(sfp
->state_query_cbarg
,
3448 if (sfp
->portspeed
) {
3449 sfp
->status
= bfa_sfp_speed_valid(sfp
, sfp
->portspeed
);
3450 if (sfp
->state_query_cbfn
)
3451 sfp
->state_query_cbfn(sfp
->state_query_cbarg
,
3453 sfp
->portspeed
= BFA_PORT_SPEED_UNKNOWN
;
3456 sfp
->state_query_lock
= 0;
3457 sfp
->state_query_cbfn
= NULL
;
3461 * IOC event handler.
3464 bfa_sfp_notify(void *sfp_arg
, enum bfa_ioc_event_e event
)
3466 struct bfa_sfp_s
*sfp
= sfp_arg
;
3468 bfa_trc(sfp
, event
);
3469 bfa_trc(sfp
, sfp
->lock
);
3470 bfa_trc(sfp
, sfp
->state_query_lock
);
3473 case BFA_IOC_E_DISABLED
:
3474 case BFA_IOC_E_FAILED
:
3476 sfp
->status
= BFA_STATUS_IOC_FAILURE
;
3477 bfa_cb_sfp_show(sfp
);
3480 if (sfp
->state_query_lock
) {
3481 sfp
->status
= BFA_STATUS_IOC_FAILURE
;
3482 bfa_cb_sfp_state_query(sfp
);
3492 * SFP's State Change Notification post to AEN
3495 bfa_sfp_scn_aen_post(struct bfa_sfp_s
*sfp
, struct bfi_sfp_scn_s
*rsp
)
3497 struct bfad_s
*bfad
= (struct bfad_s
*)sfp
->ioc
->bfa
->bfad
;
3498 struct bfa_aen_entry_s
*aen_entry
;
3499 enum bfa_port_aen_event aen_evt
= 0;
3501 bfa_trc(sfp
, (((u64
)rsp
->pomlvl
) << 16) | (((u64
)rsp
->sfpid
) << 8) |
3504 bfad_get_aen_entry(bfad
, aen_entry
);
3508 aen_entry
->aen_data
.port
.ioc_type
= bfa_ioc_get_type(sfp
->ioc
);
3509 aen_entry
->aen_data
.port
.pwwn
= sfp
->ioc
->attr
->pwwn
;
3510 aen_entry
->aen_data
.port
.mac
= bfa_ioc_get_mac(sfp
->ioc
);
3512 switch (rsp
->event
) {
3513 case BFA_SFP_SCN_INSERTED
:
3514 aen_evt
= BFA_PORT_AEN_SFP_INSERT
;
3516 case BFA_SFP_SCN_REMOVED
:
3517 aen_evt
= BFA_PORT_AEN_SFP_REMOVE
;
3519 case BFA_SFP_SCN_FAILED
:
3520 aen_evt
= BFA_PORT_AEN_SFP_ACCESS_ERROR
;
3522 case BFA_SFP_SCN_UNSUPPORT
:
3523 aen_evt
= BFA_PORT_AEN_SFP_UNSUPPORT
;
3525 case BFA_SFP_SCN_POM
:
3526 aen_evt
= BFA_PORT_AEN_SFP_POM
;
3527 aen_entry
->aen_data
.port
.level
= rsp
->pomlvl
;
3530 bfa_trc(sfp
, rsp
->event
);
3534 /* Send the AEN notification */
3535 bfad_im_post_vendor_event(aen_entry
, bfad
, ++sfp
->ioc
->ioc_aen_seq
,
3536 BFA_AEN_CAT_PORT
, aen_evt
);
3543 bfa_sfp_getdata_send(struct bfa_sfp_s
*sfp
)
3545 struct bfi_sfp_req_s
*req
= (struct bfi_sfp_req_s
*)sfp
->mbcmd
.msg
;
3547 bfa_trc(sfp
, req
->memtype
);
3549 /* build host command */
3550 bfi_h2i_set(req
->mh
, BFI_MC_SFP
, BFI_SFP_H2I_SHOW
,
3551 bfa_ioc_portid(sfp
->ioc
));
3554 bfa_ioc_mbox_queue(sfp
->ioc
, &sfp
->mbcmd
);
3558 * SFP is valid, read sfp data
3561 bfa_sfp_getdata(struct bfa_sfp_s
*sfp
, enum bfi_sfp_mem_e memtype
)
3563 struct bfi_sfp_req_s
*req
= (struct bfi_sfp_req_s
*)sfp
->mbcmd
.msg
;
3565 WARN_ON(sfp
->lock
!= 0);
3566 bfa_trc(sfp
, sfp
->state
);
3569 sfp
->memtype
= memtype
;
3570 req
->memtype
= memtype
;
3573 bfa_alen_set(&req
->alen
, sizeof(struct sfp_mem_s
), sfp
->dbuf_pa
);
3575 bfa_sfp_getdata_send(sfp
);
3582 bfa_sfp_scn(struct bfa_sfp_s
*sfp
, struct bfi_mbmsg_s
*msg
)
3584 struct bfi_sfp_scn_s
*rsp
= (struct bfi_sfp_scn_s
*) msg
;
3586 switch (rsp
->event
) {
3587 case BFA_SFP_SCN_INSERTED
:
3588 sfp
->state
= BFA_SFP_STATE_INSERTED
;
3589 sfp
->data_valid
= 0;
3590 bfa_sfp_scn_aen_post(sfp
, rsp
);
3592 case BFA_SFP_SCN_REMOVED
:
3593 sfp
->state
= BFA_SFP_STATE_REMOVED
;
3594 sfp
->data_valid
= 0;
3595 bfa_sfp_scn_aen_post(sfp
, rsp
);
3597 case BFA_SFP_SCN_FAILED
:
3598 sfp
->state
= BFA_SFP_STATE_FAILED
;
3599 sfp
->data_valid
= 0;
3600 bfa_sfp_scn_aen_post(sfp
, rsp
);
3602 case BFA_SFP_SCN_UNSUPPORT
:
3603 sfp
->state
= BFA_SFP_STATE_UNSUPPORT
;
3604 bfa_sfp_scn_aen_post(sfp
, rsp
);
3606 bfa_sfp_getdata(sfp
, BFI_SFP_MEM_ALL
);
3608 case BFA_SFP_SCN_POM
:
3609 bfa_sfp_scn_aen_post(sfp
, rsp
);
3611 case BFA_SFP_SCN_VALID
:
3612 sfp
->state
= BFA_SFP_STATE_VALID
;
3614 bfa_sfp_getdata(sfp
, BFI_SFP_MEM_ALL
);
3617 bfa_trc(sfp
, rsp
->event
);
3626 bfa_sfp_show_comp(struct bfa_sfp_s
*sfp
, struct bfi_mbmsg_s
*msg
)
3628 struct bfi_sfp_rsp_s
*rsp
= (struct bfi_sfp_rsp_s
*) msg
;
3632 * receiving response after ioc failure
3634 bfa_trc(sfp
, sfp
->lock
);
3638 bfa_trc(sfp
, rsp
->status
);
3639 if (rsp
->status
== BFA_STATUS_OK
) {
3640 sfp
->data_valid
= 1;
3641 if (sfp
->state
== BFA_SFP_STATE_VALID
)
3642 sfp
->status
= BFA_STATUS_OK
;
3643 else if (sfp
->state
== BFA_SFP_STATE_UNSUPPORT
)
3644 sfp
->status
= BFA_STATUS_SFP_UNSUPP
;
3646 bfa_trc(sfp
, sfp
->state
);
3648 sfp
->data_valid
= 0;
3649 sfp
->status
= rsp
->status
;
3650 /* sfpshow shouldn't change sfp state */
3653 bfa_trc(sfp
, sfp
->memtype
);
3654 if (sfp
->memtype
== BFI_SFP_MEM_DIAGEXT
) {
3655 bfa_trc(sfp
, sfp
->data_valid
);
3656 if (sfp
->data_valid
) {
3657 u32 size
= sizeof(struct sfp_mem_s
);
3658 u8
*des
= (u8
*) &(sfp
->sfpmem
->srlid_base
);
3659 memcpy(des
, sfp
->dbuf_kva
, size
);
3662 * Queue completion callback.
3664 bfa_cb_sfp_show(sfp
);
3668 bfa_trc(sfp
, sfp
->state_query_lock
);
3669 if (sfp
->state_query_lock
) {
3670 sfp
->state
= rsp
->state
;
3671 /* Complete callback */
3672 bfa_cb_sfp_state_query(sfp
);
3677 * SFP query fw sfp state
3680 bfa_sfp_state_query(struct bfa_sfp_s
*sfp
)
3682 struct bfi_sfp_req_s
*req
= (struct bfi_sfp_req_s
*)sfp
->mbcmd
.msg
;
3684 /* Should not be doing query if not in _INIT state */
3685 WARN_ON(sfp
->state
!= BFA_SFP_STATE_INIT
);
3686 WARN_ON(sfp
->state_query_lock
!= 0);
3687 bfa_trc(sfp
, sfp
->state
);
3689 sfp
->state_query_lock
= 1;
3693 bfa_sfp_getdata(sfp
, BFI_SFP_MEM_ALL
);
3697 bfa_sfp_media_get(struct bfa_sfp_s
*sfp
)
3699 enum bfa_defs_sfp_media_e
*media
= sfp
->media
;
3701 *media
= BFA_SFP_MEDIA_UNKNOWN
;
3703 if (sfp
->state
== BFA_SFP_STATE_UNSUPPORT
)
3704 *media
= BFA_SFP_MEDIA_UNSUPPORT
;
3705 else if (sfp
->state
== BFA_SFP_STATE_VALID
) {
3706 union sfp_xcvr_e10g_code_u e10g
;
3707 struct sfp_mem_s
*sfpmem
= (struct sfp_mem_s
*)sfp
->dbuf_kva
;
3708 u16 xmtr_tech
= (sfpmem
->srlid_base
.xcvr
[4] & 0x3) << 7 |
3709 (sfpmem
->srlid_base
.xcvr
[5] >> 1);
3711 e10g
.b
= sfpmem
->srlid_base
.xcvr
[0];
3712 bfa_trc(sfp
, e10g
.b
);
3713 bfa_trc(sfp
, xmtr_tech
);
3714 /* check fc transmitter tech */
3715 if ((xmtr_tech
& SFP_XMTR_TECH_CU
) ||
3716 (xmtr_tech
& SFP_XMTR_TECH_CP
) ||
3717 (xmtr_tech
& SFP_XMTR_TECH_CA
))
3718 *media
= BFA_SFP_MEDIA_CU
;
3719 else if ((xmtr_tech
& SFP_XMTR_TECH_EL_INTRA
) ||
3720 (xmtr_tech
& SFP_XMTR_TECH_EL_INTER
))
3721 *media
= BFA_SFP_MEDIA_EL
;
3722 else if ((xmtr_tech
& SFP_XMTR_TECH_LL
) ||
3723 (xmtr_tech
& SFP_XMTR_TECH_LC
))
3724 *media
= BFA_SFP_MEDIA_LW
;
3725 else if ((xmtr_tech
& SFP_XMTR_TECH_SL
) ||
3726 (xmtr_tech
& SFP_XMTR_TECH_SN
) ||
3727 (xmtr_tech
& SFP_XMTR_TECH_SA
))
3728 *media
= BFA_SFP_MEDIA_SW
;
3729 /* Check 10G Ethernet Compilance code */
3730 else if (e10g
.r
.e10g_sr
)
3731 *media
= BFA_SFP_MEDIA_SW
;
3732 else if (e10g
.r
.e10g_lrm
&& e10g
.r
.e10g_lr
)
3733 *media
= BFA_SFP_MEDIA_LW
;
3734 else if (e10g
.r
.e10g_unall
)
3735 *media
= BFA_SFP_MEDIA_UNKNOWN
;
3739 bfa_trc(sfp
, sfp
->state
);
3743 bfa_sfp_speed_valid(struct bfa_sfp_s
*sfp
, enum bfa_port_speed portspeed
)
3745 struct sfp_mem_s
*sfpmem
= (struct sfp_mem_s
*)sfp
->dbuf_kva
;
3746 struct sfp_xcvr_s
*xcvr
= (struct sfp_xcvr_s
*) sfpmem
->srlid_base
.xcvr
;
3747 union sfp_xcvr_fc3_code_u fc3
= xcvr
->fc3
;
3748 union sfp_xcvr_e10g_code_u e10g
= xcvr
->e10g
;
3750 if (portspeed
== BFA_PORT_SPEED_10GBPS
) {
3751 if (e10g
.r
.e10g_sr
|| e10g
.r
.e10g_lr
)
3752 return BFA_STATUS_OK
;
3754 bfa_trc(sfp
, e10g
.b
);
3755 return BFA_STATUS_UNSUPP_SPEED
;
3758 if (((portspeed
& BFA_PORT_SPEED_16GBPS
) && fc3
.r
.mb1600
) ||
3759 ((portspeed
& BFA_PORT_SPEED_8GBPS
) && fc3
.r
.mb800
) ||
3760 ((portspeed
& BFA_PORT_SPEED_4GBPS
) && fc3
.r
.mb400
) ||
3761 ((portspeed
& BFA_PORT_SPEED_2GBPS
) && fc3
.r
.mb200
) ||
3762 ((portspeed
& BFA_PORT_SPEED_1GBPS
) && fc3
.r
.mb100
))
3763 return BFA_STATUS_OK
;
3765 bfa_trc(sfp
, portspeed
);
3766 bfa_trc(sfp
, fc3
.b
);
3767 bfa_trc(sfp
, e10g
.b
);
3768 return BFA_STATUS_UNSUPP_SPEED
;
3776 bfa_sfp_intr(void *sfparg
, struct bfi_mbmsg_s
*msg
)
3778 struct bfa_sfp_s
*sfp
= sfparg
;
3780 switch (msg
->mh
.msg_id
) {
3781 case BFI_SFP_I2H_SHOW
:
3782 bfa_sfp_show_comp(sfp
, msg
);
3785 case BFI_SFP_I2H_SCN
:
3786 bfa_sfp_scn(sfp
, msg
);
3790 bfa_trc(sfp
, msg
->mh
.msg_id
);
3796 * Return DMA memory needed by sfp module.
3799 bfa_sfp_meminfo(void)
3801 return BFA_ROUNDUP(sizeof(struct sfp_mem_s
), BFA_DMA_ALIGN_SZ
);
3805 * Attach virtual and physical memory for SFP.
3808 bfa_sfp_attach(struct bfa_sfp_s
*sfp
, struct bfa_ioc_s
*ioc
, void *dev
,
3809 struct bfa_trc_mod_s
*trcmod
)
3813 sfp
->trcmod
= trcmod
;
3819 sfp
->data_valid
= 0;
3820 sfp
->state
= BFA_SFP_STATE_INIT
;
3821 sfp
->state_query_lock
= 0;
3822 sfp
->state_query_cbfn
= NULL
;
3823 sfp
->state_query_cbarg
= NULL
;
3825 sfp
->portspeed
= BFA_PORT_SPEED_UNKNOWN
;
3826 sfp
->is_elb
= BFA_FALSE
;
3828 bfa_ioc_mbox_regisr(sfp
->ioc
, BFI_MC_SFP
, bfa_sfp_intr
, sfp
);
3829 bfa_q_qe_init(&sfp
->ioc_notify
);
3830 bfa_ioc_notify_init(&sfp
->ioc_notify
, bfa_sfp_notify
, sfp
);
3831 list_add_tail(&sfp
->ioc_notify
.qe
, &sfp
->ioc
->notify_q
);
3835 * Claim Memory for SFP
3838 bfa_sfp_memclaim(struct bfa_sfp_s
*sfp
, u8
*dm_kva
, u64 dm_pa
)
3840 sfp
->dbuf_kva
= dm_kva
;
3841 sfp
->dbuf_pa
= dm_pa
;
3842 memset(sfp
->dbuf_kva
, 0, sizeof(struct sfp_mem_s
));
3844 dm_kva
+= BFA_ROUNDUP(sizeof(struct sfp_mem_s
), BFA_DMA_ALIGN_SZ
);
3845 dm_pa
+= BFA_ROUNDUP(sizeof(struct sfp_mem_s
), BFA_DMA_ALIGN_SZ
);
3849 * Show SFP eeprom content
3851 * @param[in] sfp - bfa sfp module
3853 * @param[out] sfpmem - sfp eeprom data
3857 bfa_sfp_show(struct bfa_sfp_s
*sfp
, struct sfp_mem_s
*sfpmem
,
3858 bfa_cb_sfp_t cbfn
, void *cbarg
)
3861 if (!bfa_ioc_is_operational(sfp
->ioc
)) {
3863 return BFA_STATUS_IOC_NON_OP
;
3868 return BFA_STATUS_DEVBUSY
;
3873 sfp
->sfpmem
= sfpmem
;
3875 bfa_sfp_getdata(sfp
, BFI_SFP_MEM_DIAGEXT
);
3876 return BFA_STATUS_OK
;
3880 * Return SFP Media type
3882 * @param[in] sfp - bfa sfp module
3884 * @param[out] media - port speed from user
3888 bfa_sfp_media(struct bfa_sfp_s
*sfp
, enum bfa_defs_sfp_media_e
*media
,
3889 bfa_cb_sfp_t cbfn
, void *cbarg
)
3891 if (!bfa_ioc_is_operational(sfp
->ioc
)) {
3893 return BFA_STATUS_IOC_NON_OP
;
3897 if (sfp
->state
== BFA_SFP_STATE_INIT
) {
3898 if (sfp
->state_query_lock
) {
3900 return BFA_STATUS_DEVBUSY
;
3902 sfp
->state_query_cbfn
= cbfn
;
3903 sfp
->state_query_cbarg
= cbarg
;
3904 bfa_sfp_state_query(sfp
);
3905 return BFA_STATUS_SFP_NOT_READY
;
3909 bfa_sfp_media_get(sfp
);
3910 return BFA_STATUS_OK
;
3914 * Check if user set port speed is allowed by the SFP
3916 * @param[in] sfp - bfa sfp module
3917 * @param[in] portspeed - port speed from user
3921 bfa_sfp_speed(struct bfa_sfp_s
*sfp
, enum bfa_port_speed portspeed
,
3922 bfa_cb_sfp_t cbfn
, void *cbarg
)
3924 WARN_ON(portspeed
== BFA_PORT_SPEED_UNKNOWN
);
3926 if (!bfa_ioc_is_operational(sfp
->ioc
))
3927 return BFA_STATUS_IOC_NON_OP
;
3929 /* For Mezz card, all speed is allowed */
3930 if (bfa_mfg_is_mezz(sfp
->ioc
->attr
->card_type
))
3931 return BFA_STATUS_OK
;
3933 /* Check SFP state */
3934 sfp
->portspeed
= portspeed
;
3935 if (sfp
->state
== BFA_SFP_STATE_INIT
) {
3936 if (sfp
->state_query_lock
) {
3938 return BFA_STATUS_DEVBUSY
;
3940 sfp
->state_query_cbfn
= cbfn
;
3941 sfp
->state_query_cbarg
= cbarg
;
3942 bfa_sfp_state_query(sfp
);
3943 return BFA_STATUS_SFP_NOT_READY
;
3947 if (sfp
->state
== BFA_SFP_STATE_REMOVED
||
3948 sfp
->state
== BFA_SFP_STATE_FAILED
) {
3949 bfa_trc(sfp
, sfp
->state
);
3950 return BFA_STATUS_NO_SFP_DEV
;
3953 if (sfp
->state
== BFA_SFP_STATE_INSERTED
) {
3954 bfa_trc(sfp
, sfp
->state
);
3955 return BFA_STATUS_DEVBUSY
; /* sfp is reading data */
3958 /* For eloopback, all speed is allowed */
3960 return BFA_STATUS_OK
;
3962 return bfa_sfp_speed_valid(sfp
, portspeed
);
3966 * Flash module specific
3970 * FLASH DMA buffer should be big enough to hold both MFG block and
3971 * asic block(64k) at the same time and also should be 2k aligned to
3972 * avoid write segement to cross sector boundary.
3974 #define BFA_FLASH_SEG_SZ 2048
3975 #define BFA_FLASH_DMA_BUF_SZ \
3976 BFA_ROUNDUP(0x010000 + sizeof(struct bfa_mfg_block_s), BFA_FLASH_SEG_SZ)
3979 bfa_flash_aen_audit_post(struct bfa_ioc_s
*ioc
, enum bfa_audit_aen_event event
,
3982 struct bfad_s
*bfad
= (struct bfad_s
*)ioc
->bfa
->bfad
;
3983 struct bfa_aen_entry_s
*aen_entry
;
3985 bfad_get_aen_entry(bfad
, aen_entry
);
3989 aen_entry
->aen_data
.audit
.pwwn
= ioc
->attr
->pwwn
;
3990 aen_entry
->aen_data
.audit
.partition_inst
= inst
;
3991 aen_entry
->aen_data
.audit
.partition_type
= type
;
3993 /* Send the AEN notification */
3994 bfad_im_post_vendor_event(aen_entry
, bfad
, ++ioc
->ioc_aen_seq
,
3995 BFA_AEN_CAT_AUDIT
, event
);
3999 bfa_flash_cb(struct bfa_flash_s
*flash
)
4003 flash
->cbfn(flash
->cbarg
, flash
->status
);
4007 bfa_flash_notify(void *cbarg
, enum bfa_ioc_event_e event
)
4009 struct bfa_flash_s
*flash
= cbarg
;
4011 bfa_trc(flash
, event
);
4013 case BFA_IOC_E_DISABLED
:
4014 case BFA_IOC_E_FAILED
:
4015 if (flash
->op_busy
) {
4016 flash
->status
= BFA_STATUS_IOC_FAILURE
;
4017 flash
->cbfn(flash
->cbarg
, flash
->status
);
4028 * Send flash attribute query request.
4030 * @param[in] cbarg - callback argument
4033 bfa_flash_query_send(void *cbarg
)
4035 struct bfa_flash_s
*flash
= cbarg
;
4036 struct bfi_flash_query_req_s
*msg
=
4037 (struct bfi_flash_query_req_s
*) flash
->mb
.msg
;
4039 bfi_h2i_set(msg
->mh
, BFI_MC_FLASH
, BFI_FLASH_H2I_QUERY_REQ
,
4040 bfa_ioc_portid(flash
->ioc
));
4041 bfa_alen_set(&msg
->alen
, sizeof(struct bfa_flash_attr_s
),
4043 bfa_ioc_mbox_queue(flash
->ioc
, &flash
->mb
);
4047 * Send flash write request.
4049 * @param[in] cbarg - callback argument
4052 bfa_flash_write_send(struct bfa_flash_s
*flash
)
4054 struct bfi_flash_write_req_s
*msg
=
4055 (struct bfi_flash_write_req_s
*) flash
->mb
.msg
;
4058 msg
->type
= be32_to_cpu(flash
->type
);
4059 msg
->instance
= flash
->instance
;
4060 msg
->offset
= be32_to_cpu(flash
->addr_off
+ flash
->offset
);
4061 len
= (flash
->residue
< BFA_FLASH_DMA_BUF_SZ
) ?
4062 flash
->residue
: BFA_FLASH_DMA_BUF_SZ
;
4063 msg
->length
= be32_to_cpu(len
);
4065 /* indicate if it's the last msg of the whole write operation */
4066 msg
->last
= (len
== flash
->residue
) ? 1 : 0;
4068 bfi_h2i_set(msg
->mh
, BFI_MC_FLASH
, BFI_FLASH_H2I_WRITE_REQ
,
4069 bfa_ioc_portid(flash
->ioc
));
4070 bfa_alen_set(&msg
->alen
, len
, flash
->dbuf_pa
);
4071 memcpy(flash
->dbuf_kva
, flash
->ubuf
+ flash
->offset
, len
);
4072 bfa_ioc_mbox_queue(flash
->ioc
, &flash
->mb
);
4074 flash
->residue
-= len
;
4075 flash
->offset
+= len
;
4079 * Send flash read request.
4081 * @param[in] cbarg - callback argument
4084 bfa_flash_read_send(void *cbarg
)
4086 struct bfa_flash_s
*flash
= cbarg
;
4087 struct bfi_flash_read_req_s
*msg
=
4088 (struct bfi_flash_read_req_s
*) flash
->mb
.msg
;
4091 msg
->type
= be32_to_cpu(flash
->type
);
4092 msg
->instance
= flash
->instance
;
4093 msg
->offset
= be32_to_cpu(flash
->addr_off
+ flash
->offset
);
4094 len
= (flash
->residue
< BFA_FLASH_DMA_BUF_SZ
) ?
4095 flash
->residue
: BFA_FLASH_DMA_BUF_SZ
;
4096 msg
->length
= be32_to_cpu(len
);
4097 bfi_h2i_set(msg
->mh
, BFI_MC_FLASH
, BFI_FLASH_H2I_READ_REQ
,
4098 bfa_ioc_portid(flash
->ioc
));
4099 bfa_alen_set(&msg
->alen
, len
, flash
->dbuf_pa
);
4100 bfa_ioc_mbox_queue(flash
->ioc
, &flash
->mb
);
4104 * Send flash erase request.
4106 * @param[in] cbarg - callback argument
4109 bfa_flash_erase_send(void *cbarg
)
4111 struct bfa_flash_s
*flash
= cbarg
;
4112 struct bfi_flash_erase_req_s
*msg
=
4113 (struct bfi_flash_erase_req_s
*) flash
->mb
.msg
;
4115 msg
->type
= be32_to_cpu(flash
->type
);
4116 msg
->instance
= flash
->instance
;
4117 bfi_h2i_set(msg
->mh
, BFI_MC_FLASH
, BFI_FLASH_H2I_ERASE_REQ
,
4118 bfa_ioc_portid(flash
->ioc
));
4119 bfa_ioc_mbox_queue(flash
->ioc
, &flash
->mb
);
4123 * Process flash response messages upon receiving interrupts.
4125 * @param[in] flasharg - flash structure
4126 * @param[in] msg - message structure
4129 bfa_flash_intr(void *flasharg
, struct bfi_mbmsg_s
*msg
)
4131 struct bfa_flash_s
*flash
= flasharg
;
4135 struct bfi_flash_query_rsp_s
*query
;
4136 struct bfi_flash_erase_rsp_s
*erase
;
4137 struct bfi_flash_write_rsp_s
*write
;
4138 struct bfi_flash_read_rsp_s
*read
;
4139 struct bfi_flash_event_s
*event
;
4140 struct bfi_mbmsg_s
*msg
;
4144 bfa_trc(flash
, msg
->mh
.msg_id
);
4146 if (!flash
->op_busy
&& msg
->mh
.msg_id
!= BFI_FLASH_I2H_EVENT
) {
4147 /* receiving response after ioc failure */
4148 bfa_trc(flash
, 0x9999);
4152 switch (msg
->mh
.msg_id
) {
4153 case BFI_FLASH_I2H_QUERY_RSP
:
4154 status
= be32_to_cpu(m
.query
->status
);
4155 bfa_trc(flash
, status
);
4156 if (status
== BFA_STATUS_OK
) {
4158 struct bfa_flash_attr_s
*attr
, *f
;
4160 attr
= (struct bfa_flash_attr_s
*) flash
->ubuf
;
4161 f
= (struct bfa_flash_attr_s
*) flash
->dbuf_kva
;
4162 attr
->status
= be32_to_cpu(f
->status
);
4163 attr
->npart
= be32_to_cpu(f
->npart
);
4164 bfa_trc(flash
, attr
->status
);
4165 bfa_trc(flash
, attr
->npart
);
4166 for (i
= 0; i
< attr
->npart
; i
++) {
4167 attr
->part
[i
].part_type
=
4168 be32_to_cpu(f
->part
[i
].part_type
);
4169 attr
->part
[i
].part_instance
=
4170 be32_to_cpu(f
->part
[i
].part_instance
);
4171 attr
->part
[i
].part_off
=
4172 be32_to_cpu(f
->part
[i
].part_off
);
4173 attr
->part
[i
].part_size
=
4174 be32_to_cpu(f
->part
[i
].part_size
);
4175 attr
->part
[i
].part_len
=
4176 be32_to_cpu(f
->part
[i
].part_len
);
4177 attr
->part
[i
].part_status
=
4178 be32_to_cpu(f
->part
[i
].part_status
);
4181 flash
->status
= status
;
4182 bfa_flash_cb(flash
);
4184 case BFI_FLASH_I2H_ERASE_RSP
:
4185 status
= be32_to_cpu(m
.erase
->status
);
4186 bfa_trc(flash
, status
);
4187 flash
->status
= status
;
4188 bfa_flash_cb(flash
);
4190 case BFI_FLASH_I2H_WRITE_RSP
:
4191 status
= be32_to_cpu(m
.write
->status
);
4192 bfa_trc(flash
, status
);
4193 if (status
!= BFA_STATUS_OK
|| flash
->residue
== 0) {
4194 flash
->status
= status
;
4195 bfa_flash_cb(flash
);
4197 bfa_trc(flash
, flash
->offset
);
4198 bfa_flash_write_send(flash
);
4201 case BFI_FLASH_I2H_READ_RSP
:
4202 status
= be32_to_cpu(m
.read
->status
);
4203 bfa_trc(flash
, status
);
4204 if (status
!= BFA_STATUS_OK
) {
4205 flash
->status
= status
;
4206 bfa_flash_cb(flash
);
4208 u32 len
= be32_to_cpu(m
.read
->length
);
4209 bfa_trc(flash
, flash
->offset
);
4210 bfa_trc(flash
, len
);
4211 memcpy(flash
->ubuf
+ flash
->offset
,
4212 flash
->dbuf_kva
, len
);
4213 flash
->residue
-= len
;
4214 flash
->offset
+= len
;
4215 if (flash
->residue
== 0) {
4216 flash
->status
= status
;
4217 bfa_flash_cb(flash
);
4219 bfa_flash_read_send(flash
);
4222 case BFI_FLASH_I2H_BOOT_VER_RSP
:
4224 case BFI_FLASH_I2H_EVENT
:
4225 status
= be32_to_cpu(m
.event
->status
);
4226 bfa_trc(flash
, status
);
4227 if (status
== BFA_STATUS_BAD_FWCFG
)
4228 bfa_ioc_aen_post(flash
->ioc
, BFA_IOC_AEN_FWCFG_ERROR
);
4229 else if (status
== BFA_STATUS_INVALID_VENDOR
) {
4231 param
= be32_to_cpu(m
.event
->param
);
4232 bfa_trc(flash
, param
);
4233 bfa_ioc_aen_post(flash
->ioc
,
4234 BFA_IOC_AEN_INVALID_VENDOR
);
4244 * Flash memory info API.
4246 * @param[in] mincfg - minimal cfg variable
4249 bfa_flash_meminfo(bfa_boolean_t mincfg
)
4251 /* min driver doesn't need flash */
4254 return BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ
, BFA_DMA_ALIGN_SZ
);
4260 * @param[in] flash - flash structure
4261 * @param[in] ioc - ioc structure
4262 * @param[in] dev - device structure
4263 * @param[in] trcmod - trace module
4264 * @param[in] logmod - log module
4267 bfa_flash_attach(struct bfa_flash_s
*flash
, struct bfa_ioc_s
*ioc
, void *dev
,
4268 struct bfa_trc_mod_s
*trcmod
, bfa_boolean_t mincfg
)
4271 flash
->trcmod
= trcmod
;
4273 flash
->cbarg
= NULL
;
4276 bfa_ioc_mbox_regisr(flash
->ioc
, BFI_MC_FLASH
, bfa_flash_intr
, flash
);
4277 bfa_q_qe_init(&flash
->ioc_notify
);
4278 bfa_ioc_notify_init(&flash
->ioc_notify
, bfa_flash_notify
, flash
);
4279 list_add_tail(&flash
->ioc_notify
.qe
, &flash
->ioc
->notify_q
);
4281 /* min driver doesn't need flash */
4283 flash
->dbuf_kva
= NULL
;
4289 * Claim memory for flash
4291 * @param[in] flash - flash structure
4292 * @param[in] dm_kva - pointer to virtual memory address
4293 * @param[in] dm_pa - physical memory address
4294 * @param[in] mincfg - minimal cfg variable
4297 bfa_flash_memclaim(struct bfa_flash_s
*flash
, u8
*dm_kva
, u64 dm_pa
,
4298 bfa_boolean_t mincfg
)
4303 flash
->dbuf_kva
= dm_kva
;
4304 flash
->dbuf_pa
= dm_pa
;
4305 memset(flash
->dbuf_kva
, 0, BFA_FLASH_DMA_BUF_SZ
);
4306 dm_kva
+= BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ
, BFA_DMA_ALIGN_SZ
);
4307 dm_pa
+= BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ
, BFA_DMA_ALIGN_SZ
);
4311 * Get flash attribute.
4313 * @param[in] flash - flash structure
4314 * @param[in] attr - flash attribute structure
4315 * @param[in] cbfn - callback function
4316 * @param[in] cbarg - callback argument
4321 bfa_flash_get_attr(struct bfa_flash_s
*flash
, struct bfa_flash_attr_s
*attr
,
4322 bfa_cb_flash_t cbfn
, void *cbarg
)
4324 bfa_trc(flash
, BFI_FLASH_H2I_QUERY_REQ
);
4326 if (!bfa_ioc_is_operational(flash
->ioc
))
4327 return BFA_STATUS_IOC_NON_OP
;
4329 if (flash
->op_busy
) {
4330 bfa_trc(flash
, flash
->op_busy
);
4331 return BFA_STATUS_DEVBUSY
;
4336 flash
->cbarg
= cbarg
;
4337 flash
->ubuf
= (u8
*) attr
;
4338 bfa_flash_query_send(flash
);
4340 return BFA_STATUS_OK
;
4344 * Erase flash partition.
4346 * @param[in] flash - flash structure
4347 * @param[in] type - flash partition type
4348 * @param[in] instance - flash partition instance
4349 * @param[in] cbfn - callback function
4350 * @param[in] cbarg - callback argument
4355 bfa_flash_erase_part(struct bfa_flash_s
*flash
, enum bfa_flash_part_type type
,
4356 u8 instance
, bfa_cb_flash_t cbfn
, void *cbarg
)
4358 bfa_trc(flash
, BFI_FLASH_H2I_ERASE_REQ
);
4359 bfa_trc(flash
, type
);
4360 bfa_trc(flash
, instance
);
4362 if (!bfa_ioc_is_operational(flash
->ioc
))
4363 return BFA_STATUS_IOC_NON_OP
;
4365 if (flash
->op_busy
) {
4366 bfa_trc(flash
, flash
->op_busy
);
4367 return BFA_STATUS_DEVBUSY
;
4372 flash
->cbarg
= cbarg
;
4374 flash
->instance
= instance
;
4376 bfa_flash_erase_send(flash
);
4377 bfa_flash_aen_audit_post(flash
->ioc
, BFA_AUDIT_AEN_FLASH_ERASE
,
4379 return BFA_STATUS_OK
;
4383 * Update flash partition.
4385 * @param[in] flash - flash structure
4386 * @param[in] type - flash partition type
4387 * @param[in] instance - flash partition instance
4388 * @param[in] buf - update data buffer
4389 * @param[in] len - data buffer length
4390 * @param[in] offset - offset relative to the partition starting address
4391 * @param[in] cbfn - callback function
4392 * @param[in] cbarg - callback argument
4397 bfa_flash_update_part(struct bfa_flash_s
*flash
, enum bfa_flash_part_type type
,
4398 u8 instance
, void *buf
, u32 len
, u32 offset
,
4399 bfa_cb_flash_t cbfn
, void *cbarg
)
4401 bfa_trc(flash
, BFI_FLASH_H2I_WRITE_REQ
);
4402 bfa_trc(flash
, type
);
4403 bfa_trc(flash
, instance
);
4404 bfa_trc(flash
, len
);
4405 bfa_trc(flash
, offset
);
4407 if (!bfa_ioc_is_operational(flash
->ioc
))
4408 return BFA_STATUS_IOC_NON_OP
;
4411 * 'len' must be in word (4-byte) boundary
4412 * 'offset' must be in sector (16kb) boundary
4414 if (!len
|| (len
& 0x03) || (offset
& 0x00003FFF))
4415 return BFA_STATUS_FLASH_BAD_LEN
;
4417 if (type
== BFA_FLASH_PART_MFG
)
4418 return BFA_STATUS_EINVAL
;
4420 if (flash
->op_busy
) {
4421 bfa_trc(flash
, flash
->op_busy
);
4422 return BFA_STATUS_DEVBUSY
;
4427 flash
->cbarg
= cbarg
;
4429 flash
->instance
= instance
;
4430 flash
->residue
= len
;
4432 flash
->addr_off
= offset
;
4435 bfa_flash_write_send(flash
);
4436 return BFA_STATUS_OK
;
4440 * Read flash partition.
4442 * @param[in] flash - flash structure
4443 * @param[in] type - flash partition type
4444 * @param[in] instance - flash partition instance
4445 * @param[in] buf - read data buffer
4446 * @param[in] len - data buffer length
4447 * @param[in] offset - offset relative to the partition starting address
4448 * @param[in] cbfn - callback function
4449 * @param[in] cbarg - callback argument
4454 bfa_flash_read_part(struct bfa_flash_s
*flash
, enum bfa_flash_part_type type
,
4455 u8 instance
, void *buf
, u32 len
, u32 offset
,
4456 bfa_cb_flash_t cbfn
, void *cbarg
)
4458 bfa_trc(flash
, BFI_FLASH_H2I_READ_REQ
);
4459 bfa_trc(flash
, type
);
4460 bfa_trc(flash
, instance
);
4461 bfa_trc(flash
, len
);
4462 bfa_trc(flash
, offset
);
4464 if (!bfa_ioc_is_operational(flash
->ioc
))
4465 return BFA_STATUS_IOC_NON_OP
;
4468 * 'len' must be in word (4-byte) boundary
4469 * 'offset' must be in sector (16kb) boundary
4471 if (!len
|| (len
& 0x03) || (offset
& 0x00003FFF))
4472 return BFA_STATUS_FLASH_BAD_LEN
;
4474 if (flash
->op_busy
) {
4475 bfa_trc(flash
, flash
->op_busy
);
4476 return BFA_STATUS_DEVBUSY
;
4481 flash
->cbarg
= cbarg
;
4483 flash
->instance
= instance
;
4484 flash
->residue
= len
;
4486 flash
->addr_off
= offset
;
4488 bfa_flash_read_send(flash
);
4490 return BFA_STATUS_OK
;
4494 * DIAG module specific
4497 #define BFA_DIAG_MEMTEST_TOV 50000 /* memtest timeout in msec */
4498 #define BFA_DIAG_FWPING_TOV 1000 /* msec */
4500 /* IOC event handler */
4502 bfa_diag_notify(void *diag_arg
, enum bfa_ioc_event_e event
)
4504 struct bfa_diag_s
*diag
= diag_arg
;
4506 bfa_trc(diag
, event
);
4507 bfa_trc(diag
, diag
->block
);
4508 bfa_trc(diag
, diag
->fwping
.lock
);
4509 bfa_trc(diag
, diag
->tsensor
.lock
);
4512 case BFA_IOC_E_DISABLED
:
4513 case BFA_IOC_E_FAILED
:
4514 if (diag
->fwping
.lock
) {
4515 diag
->fwping
.status
= BFA_STATUS_IOC_FAILURE
;
4516 diag
->fwping
.cbfn(diag
->fwping
.cbarg
,
4517 diag
->fwping
.status
);
4518 diag
->fwping
.lock
= 0;
4521 if (diag
->tsensor
.lock
) {
4522 diag
->tsensor
.status
= BFA_STATUS_IOC_FAILURE
;
4523 diag
->tsensor
.cbfn(diag
->tsensor
.cbarg
,
4524 diag
->tsensor
.status
);
4525 diag
->tsensor
.lock
= 0;
4529 if (diag
->timer_active
) {
4530 bfa_timer_stop(&diag
->timer
);
4531 diag
->timer_active
= 0;
4534 diag
->status
= BFA_STATUS_IOC_FAILURE
;
4535 diag
->cbfn(diag
->cbarg
, diag
->status
);
4546 bfa_diag_memtest_done(void *cbarg
)
4548 struct bfa_diag_s
*diag
= cbarg
;
4549 struct bfa_ioc_s
*ioc
= diag
->ioc
;
4550 struct bfa_diag_memtest_result
*res
= diag
->result
;
4551 u32 loff
= BFI_BOOT_MEMTEST_RES_ADDR
;
4552 u32 pgnum
, pgoff
, i
;
4554 pgnum
= PSS_SMEM_PGNUM(ioc
->ioc_regs
.smem_pg0
, loff
);
4555 pgoff
= PSS_SMEM_PGOFF(loff
);
4557 writel(pgnum
, ioc
->ioc_regs
.host_page_num_fn
);
4559 for (i
= 0; i
< (sizeof(struct bfa_diag_memtest_result
) /
4560 sizeof(u32
)); i
++) {
4561 /* read test result from smem */
4562 *((u32
*) res
+ i
) =
4563 bfa_mem_read(ioc
->ioc_regs
.smem_page_start
, loff
);
4564 loff
+= sizeof(u32
);
4567 /* Reset IOC fwstates to BFI_IOC_UNINIT */
4568 bfa_ioc_reset_fwstate(ioc
);
4570 res
->status
= swab32(res
->status
);
4571 bfa_trc(diag
, res
->status
);
4573 if (res
->status
== BFI_BOOT_MEMTEST_RES_SIG
)
4574 diag
->status
= BFA_STATUS_OK
;
4576 diag
->status
= BFA_STATUS_MEMTEST_FAILED
;
4577 res
->addr
= swab32(res
->addr
);
4578 res
->exp
= swab32(res
->exp
);
4579 res
->act
= swab32(res
->act
);
4580 res
->err_status
= swab32(res
->err_status
);
4581 res
->err_status1
= swab32(res
->err_status1
);
4582 res
->err_addr
= swab32(res
->err_addr
);
4583 bfa_trc(diag
, res
->addr
);
4584 bfa_trc(diag
, res
->exp
);
4585 bfa_trc(diag
, res
->act
);
4586 bfa_trc(diag
, res
->err_status
);
4587 bfa_trc(diag
, res
->err_status1
);
4588 bfa_trc(diag
, res
->err_addr
);
4590 diag
->timer_active
= 0;
4591 diag
->cbfn(diag
->cbarg
, diag
->status
);
4600 * Perform DMA test directly
4603 diag_fwping_send(struct bfa_diag_s
*diag
)
4605 struct bfi_diag_fwping_req_s
*fwping_req
;
4608 bfa_trc(diag
, diag
->fwping
.dbuf_pa
);
4610 /* fill DMA area with pattern */
4611 for (i
= 0; i
< (BFI_DIAG_DMA_BUF_SZ
>> 2); i
++)
4612 *((u32
*)diag
->fwping
.dbuf_kva
+ i
) = diag
->fwping
.data
;
4615 fwping_req
= (struct bfi_diag_fwping_req_s
*)diag
->fwping
.mbcmd
.msg
;
4618 bfa_alen_set(&fwping_req
->alen
, BFI_DIAG_DMA_BUF_SZ
,
4619 diag
->fwping
.dbuf_pa
);
4620 /* Set up dma count */
4621 fwping_req
->count
= cpu_to_be32(diag
->fwping
.count
);
4622 /* Set up data pattern */
4623 fwping_req
->data
= diag
->fwping
.data
;
4625 /* build host command */
4626 bfi_h2i_set(fwping_req
->mh
, BFI_MC_DIAG
, BFI_DIAG_H2I_FWPING
,
4627 bfa_ioc_portid(diag
->ioc
));
4630 bfa_ioc_mbox_queue(diag
->ioc
, &diag
->fwping
.mbcmd
);
4634 diag_fwping_comp(struct bfa_diag_s
*diag
,
4635 struct bfi_diag_fwping_rsp_s
*diag_rsp
)
4637 u32 rsp_data
= diag_rsp
->data
;
4638 u8 rsp_dma_status
= diag_rsp
->dma_status
;
4640 bfa_trc(diag
, rsp_data
);
4641 bfa_trc(diag
, rsp_dma_status
);
4643 if (rsp_dma_status
== BFA_STATUS_OK
) {
4645 pat
= (diag
->fwping
.count
& 0x1) ? ~(diag
->fwping
.data
) :
4647 /* Check mbox data */
4648 if (diag
->fwping
.data
!= rsp_data
) {
4649 bfa_trc(diag
, rsp_data
);
4650 diag
->fwping
.result
->dmastatus
=
4651 BFA_STATUS_DATACORRUPTED
;
4652 diag
->fwping
.status
= BFA_STATUS_DATACORRUPTED
;
4653 diag
->fwping
.cbfn(diag
->fwping
.cbarg
,
4654 diag
->fwping
.status
);
4655 diag
->fwping
.lock
= 0;
4658 /* Check dma pattern */
4659 for (i
= 0; i
< (BFI_DIAG_DMA_BUF_SZ
>> 2); i
++) {
4660 if (*((u32
*)diag
->fwping
.dbuf_kva
+ i
) != pat
) {
4664 *((u32
*)diag
->fwping
.dbuf_kva
+ i
));
4665 diag
->fwping
.result
->dmastatus
=
4666 BFA_STATUS_DATACORRUPTED
;
4667 diag
->fwping
.status
= BFA_STATUS_DATACORRUPTED
;
4668 diag
->fwping
.cbfn(diag
->fwping
.cbarg
,
4669 diag
->fwping
.status
);
4670 diag
->fwping
.lock
= 0;
4674 diag
->fwping
.result
->dmastatus
= BFA_STATUS_OK
;
4675 diag
->fwping
.status
= BFA_STATUS_OK
;
4676 diag
->fwping
.cbfn(diag
->fwping
.cbarg
, diag
->fwping
.status
);
4677 diag
->fwping
.lock
= 0;
4679 diag
->fwping
.status
= BFA_STATUS_HDMA_FAILED
;
4680 diag
->fwping
.cbfn(diag
->fwping
.cbarg
, diag
->fwping
.status
);
4681 diag
->fwping
.lock
= 0;
4686 * Temperature Sensor
4690 diag_tempsensor_send(struct bfa_diag_s
*diag
)
4692 struct bfi_diag_ts_req_s
*msg
;
4694 msg
= (struct bfi_diag_ts_req_s
*)diag
->tsensor
.mbcmd
.msg
;
4695 bfa_trc(diag
, msg
->temp
);
4696 /* build host command */
4697 bfi_h2i_set(msg
->mh
, BFI_MC_DIAG
, BFI_DIAG_H2I_TEMPSENSOR
,
4698 bfa_ioc_portid(diag
->ioc
));
4700 bfa_ioc_mbox_queue(diag
->ioc
, &diag
->tsensor
.mbcmd
);
4704 diag_tempsensor_comp(struct bfa_diag_s
*diag
, bfi_diag_ts_rsp_t
*rsp
)
4706 if (!diag
->tsensor
.lock
) {
4707 /* receiving response after ioc failure */
4708 bfa_trc(diag
, diag
->tsensor
.lock
);
4713 * ASIC junction tempsensor is a reg read operation
4714 * it will always return OK
4716 diag
->tsensor
.temp
->temp
= be16_to_cpu(rsp
->temp
);
4717 diag
->tsensor
.temp
->ts_junc
= rsp
->ts_junc
;
4718 diag
->tsensor
.temp
->ts_brd
= rsp
->ts_brd
;
4719 diag
->tsensor
.temp
->status
= BFA_STATUS_OK
;
4722 if (rsp
->status
== BFA_STATUS_OK
) {
4723 diag
->tsensor
.temp
->brd_temp
=
4724 be16_to_cpu(rsp
->brd_temp
);
4726 bfa_trc(diag
, rsp
->status
);
4727 diag
->tsensor
.temp
->brd_temp
= 0;
4728 diag
->tsensor
.temp
->status
= BFA_STATUS_DEVBUSY
;
4731 bfa_trc(diag
, rsp
->ts_junc
);
4732 bfa_trc(diag
, rsp
->temp
);
4733 bfa_trc(diag
, rsp
->ts_brd
);
4734 bfa_trc(diag
, rsp
->brd_temp
);
4735 diag
->tsensor
.cbfn(diag
->tsensor
.cbarg
, diag
->tsensor
.status
);
4736 diag
->tsensor
.lock
= 0;
4743 diag_ledtest_send(struct bfa_diag_s
*diag
, struct bfa_diag_ledtest_s
*ledtest
)
4745 struct bfi_diag_ledtest_req_s
*msg
;
4747 msg
= (struct bfi_diag_ledtest_req_s
*)diag
->ledtest
.mbcmd
.msg
;
4748 /* build host command */
4749 bfi_h2i_set(msg
->mh
, BFI_MC_DIAG
, BFI_DIAG_H2I_LEDTEST
,
4750 bfa_ioc_portid(diag
->ioc
));
4753 * convert the freq from N blinks per 10 sec to
4754 * crossbow ontime value. We do it here because division is need
4757 ledtest
->freq
= 500 / ledtest
->freq
;
4759 if (ledtest
->freq
== 0)
4762 bfa_trc(diag
, ledtest
->freq
);
4763 /* mcpy(&ledtest_req->req, ledtest, sizeof(bfa_diag_ledtest_t)); */
4764 msg
->cmd
= (u8
) ledtest
->cmd
;
4765 msg
->color
= (u8
) ledtest
->color
;
4766 msg
->portid
= bfa_ioc_portid(diag
->ioc
);
4767 msg
->led
= ledtest
->led
;
4768 msg
->freq
= cpu_to_be16(ledtest
->freq
);
4771 bfa_ioc_mbox_queue(diag
->ioc
, &diag
->ledtest
.mbcmd
);
4775 diag_ledtest_comp(struct bfa_diag_s
*diag
, struct bfi_diag_ledtest_rsp_s
* msg
)
4777 bfa_trc(diag
, diag
->ledtest
.lock
);
4778 diag
->ledtest
.lock
= BFA_FALSE
;
4779 /* no bfa_cb_queue is needed because driver is not waiting */
4786 diag_portbeacon_send(struct bfa_diag_s
*diag
, bfa_boolean_t beacon
, u32 sec
)
4788 struct bfi_diag_portbeacon_req_s
*msg
;
4790 msg
= (struct bfi_diag_portbeacon_req_s
*)diag
->beacon
.mbcmd
.msg
;
4791 /* build host command */
4792 bfi_h2i_set(msg
->mh
, BFI_MC_DIAG
, BFI_DIAG_H2I_PORTBEACON
,
4793 bfa_ioc_portid(diag
->ioc
));
4794 msg
->beacon
= beacon
;
4795 msg
->period
= cpu_to_be32(sec
);
4797 bfa_ioc_mbox_queue(diag
->ioc
, &diag
->beacon
.mbcmd
);
4801 diag_portbeacon_comp(struct bfa_diag_s
*diag
)
4803 bfa_trc(diag
, diag
->beacon
.state
);
4804 diag
->beacon
.state
= BFA_FALSE
;
4805 if (diag
->cbfn_beacon
)
4806 diag
->cbfn_beacon(diag
->dev
, BFA_FALSE
, diag
->beacon
.link_e2e
);
4810 * Diag hmbox handler
4813 bfa_diag_intr(void *diagarg
, struct bfi_mbmsg_s
*msg
)
4815 struct bfa_diag_s
*diag
= diagarg
;
4817 switch (msg
->mh
.msg_id
) {
4818 case BFI_DIAG_I2H_PORTBEACON
:
4819 diag_portbeacon_comp(diag
);
4821 case BFI_DIAG_I2H_FWPING
:
4822 diag_fwping_comp(diag
, (struct bfi_diag_fwping_rsp_s
*) msg
);
4824 case BFI_DIAG_I2H_TEMPSENSOR
:
4825 diag_tempsensor_comp(diag
, (bfi_diag_ts_rsp_t
*) msg
);
4827 case BFI_DIAG_I2H_LEDTEST
:
4828 diag_ledtest_comp(diag
, (struct bfi_diag_ledtest_rsp_s
*) msg
);
4831 bfa_trc(diag
, msg
->mh
.msg_id
);
4839 * @param[in] *diag - diag data struct
4840 * @param[in] *memtest - mem test params input from upper layer,
4841 * @param[in] pattern - mem test pattern
4842 * @param[in] *result - mem test result
4843 * @param[in] cbfn - mem test callback functioin
4844 * @param[in] cbarg - callback functioin arg
4849 bfa_diag_memtest(struct bfa_diag_s
*diag
, struct bfa_diag_memtest_s
*memtest
,
4850 u32 pattern
, struct bfa_diag_memtest_result
*result
,
4851 bfa_cb_diag_t cbfn
, void *cbarg
)
4853 bfa_trc(diag
, pattern
);
4855 if (!bfa_ioc_adapter_is_disabled(diag
->ioc
))
4856 return BFA_STATUS_ADAPTER_ENABLED
;
4858 /* check to see if there is another destructive diag cmd running */
4860 bfa_trc(diag
, diag
->block
);
4861 return BFA_STATUS_DEVBUSY
;
4865 diag
->result
= result
;
4867 diag
->cbarg
= cbarg
;
4869 /* download memtest code and take LPU0 out of reset */
4870 bfa_ioc_boot(diag
->ioc
, BFI_FWBOOT_TYPE_MEMTEST
, BFI_FWBOOT_ENV_OS
);
4872 bfa_timer_begin(diag
->ioc
->timer_mod
, &diag
->timer
,
4873 bfa_diag_memtest_done
, diag
, BFA_DIAG_MEMTEST_TOV
);
4874 diag
->timer_active
= 1;
4875 return BFA_STATUS_OK
;
4879 * DIAG firmware ping command
4881 * @param[in] *diag - diag data struct
4882 * @param[in] cnt - dma loop count for testing PCIE
4883 * @param[in] data - data pattern to pass in fw
4884 * @param[in] *result - pt to bfa_diag_fwping_result_t data struct
4885 * @param[in] cbfn - callback function
4886 * @param[in] *cbarg - callback functioin arg
4891 bfa_diag_fwping(struct bfa_diag_s
*diag
, u32 cnt
, u32 data
,
4892 struct bfa_diag_results_fwping
*result
, bfa_cb_diag_t cbfn
,
4896 bfa_trc(diag
, data
);
4898 if (!bfa_ioc_is_operational(diag
->ioc
))
4899 return BFA_STATUS_IOC_NON_OP
;
4901 if (bfa_asic_id_ct2(bfa_ioc_devid((diag
->ioc
))) &&
4902 ((diag
->ioc
)->clscode
== BFI_PCIFN_CLASS_ETH
))
4903 return BFA_STATUS_CMD_NOTSUPP
;
4905 /* check to see if there is another destructive diag cmd running */
4906 if (diag
->block
|| diag
->fwping
.lock
) {
4907 bfa_trc(diag
, diag
->block
);
4908 bfa_trc(diag
, diag
->fwping
.lock
);
4909 return BFA_STATUS_DEVBUSY
;
4912 /* Initialization */
4913 diag
->fwping
.lock
= 1;
4914 diag
->fwping
.cbfn
= cbfn
;
4915 diag
->fwping
.cbarg
= cbarg
;
4916 diag
->fwping
.result
= result
;
4917 diag
->fwping
.data
= data
;
4918 diag
->fwping
.count
= cnt
;
4920 /* Init test results */
4921 diag
->fwping
.result
->data
= 0;
4922 diag
->fwping
.result
->status
= BFA_STATUS_OK
;
4924 /* kick off the first ping */
4925 diag_fwping_send(diag
);
4926 return BFA_STATUS_OK
;
4930 * Read Temperature Sensor
4932 * @param[in] *diag - diag data struct
4933 * @param[in] *result - pt to bfa_diag_temp_t data struct
4934 * @param[in] cbfn - callback function
4935 * @param[in] *cbarg - callback functioin arg
4940 bfa_diag_tsensor_query(struct bfa_diag_s
*diag
,
4941 struct bfa_diag_results_tempsensor_s
*result
,
4942 bfa_cb_diag_t cbfn
, void *cbarg
)
4944 /* check to see if there is a destructive diag cmd running */
4945 if (diag
->block
|| diag
->tsensor
.lock
) {
4946 bfa_trc(diag
, diag
->block
);
4947 bfa_trc(diag
, diag
->tsensor
.lock
);
4948 return BFA_STATUS_DEVBUSY
;
4951 if (!bfa_ioc_is_operational(diag
->ioc
))
4952 return BFA_STATUS_IOC_NON_OP
;
4954 /* Init diag mod params */
4955 diag
->tsensor
.lock
= 1;
4956 diag
->tsensor
.temp
= result
;
4957 diag
->tsensor
.cbfn
= cbfn
;
4958 diag
->tsensor
.cbarg
= cbarg
;
4960 /* Send msg to fw */
4961 diag_tempsensor_send(diag
);
4963 return BFA_STATUS_OK
;
4969 * @param[in] *diag - diag data struct
4970 * @param[in] *ledtest - pt to ledtest data structure
4975 bfa_diag_ledtest(struct bfa_diag_s
*diag
, struct bfa_diag_ledtest_s
*ledtest
)
4977 bfa_trc(diag
, ledtest
->cmd
);
4979 if (!bfa_ioc_is_operational(diag
->ioc
))
4980 return BFA_STATUS_IOC_NON_OP
;
4982 if (diag
->beacon
.state
)
4983 return BFA_STATUS_BEACON_ON
;
4985 if (diag
->ledtest
.lock
)
4986 return BFA_STATUS_LEDTEST_OP
;
4988 /* Send msg to fw */
4989 diag
->ledtest
.lock
= BFA_TRUE
;
4990 diag_ledtest_send(diag
, ledtest
);
4992 return BFA_STATUS_OK
;
4996 * Port beaconing command
4998 * @param[in] *diag - diag data struct
4999 * @param[in] beacon - port beaconing 1:ON 0:OFF
5000 * @param[in] link_e2e_beacon - link beaconing 1:ON 0:OFF
5001 * @param[in] sec - beaconing duration in seconds
5006 bfa_diag_beacon_port(struct bfa_diag_s
*diag
, bfa_boolean_t beacon
,
5007 bfa_boolean_t link_e2e_beacon
, uint32_t sec
)
5009 bfa_trc(diag
, beacon
);
5010 bfa_trc(diag
, link_e2e_beacon
);
5013 if (!bfa_ioc_is_operational(diag
->ioc
))
5014 return BFA_STATUS_IOC_NON_OP
;
5016 if (diag
->ledtest
.lock
)
5017 return BFA_STATUS_LEDTEST_OP
;
5019 if (diag
->beacon
.state
&& beacon
) /* beacon alread on */
5020 return BFA_STATUS_BEACON_ON
;
5022 diag
->beacon
.state
= beacon
;
5023 diag
->beacon
.link_e2e
= link_e2e_beacon
;
5024 if (diag
->cbfn_beacon
)
5025 diag
->cbfn_beacon(diag
->dev
, beacon
, link_e2e_beacon
);
5027 /* Send msg to fw */
5028 diag_portbeacon_send(diag
, beacon
, sec
);
5030 return BFA_STATUS_OK
;
5034 * Return DMA memory needed by diag module.
5037 bfa_diag_meminfo(void)
5039 return BFA_ROUNDUP(BFI_DIAG_DMA_BUF_SZ
, BFA_DMA_ALIGN_SZ
);
5043 * Attach virtual and physical memory for Diag.
5046 bfa_diag_attach(struct bfa_diag_s
*diag
, struct bfa_ioc_s
*ioc
, void *dev
,
5047 bfa_cb_diag_beacon_t cbfn_beacon
, struct bfa_trc_mod_s
*trcmod
)
5051 diag
->trcmod
= trcmod
;
5056 diag
->result
= NULL
;
5057 diag
->cbfn_beacon
= cbfn_beacon
;
5059 bfa_ioc_mbox_regisr(diag
->ioc
, BFI_MC_DIAG
, bfa_diag_intr
, diag
);
5060 bfa_q_qe_init(&diag
->ioc_notify
);
5061 bfa_ioc_notify_init(&diag
->ioc_notify
, bfa_diag_notify
, diag
);
5062 list_add_tail(&diag
->ioc_notify
.qe
, &diag
->ioc
->notify_q
);
5066 bfa_diag_memclaim(struct bfa_diag_s
*diag
, u8
*dm_kva
, u64 dm_pa
)
5068 diag
->fwping
.dbuf_kva
= dm_kva
;
5069 diag
->fwping
.dbuf_pa
= dm_pa
;
5070 memset(diag
->fwping
.dbuf_kva
, 0, BFI_DIAG_DMA_BUF_SZ
);
5074 * PHY module specific
5076 #define BFA_PHY_DMA_BUF_SZ 0x02000 /* 8k dma buffer */
5077 #define BFA_PHY_LOCK_STATUS 0x018878 /* phy semaphore status reg */
5080 bfa_phy_ntoh32(u32
*obuf
, u32
*ibuf
, int sz
)
5084 for (i
= 0; i
< m
; i
++)
5085 obuf
[i
] = be32_to_cpu(ibuf
[i
]);
5088 static bfa_boolean_t
5089 bfa_phy_present(struct bfa_phy_s
*phy
)
5091 return (phy
->ioc
->attr
->card_type
== BFA_MFG_TYPE_LIGHTNING
);
5095 bfa_phy_notify(void *cbarg
, enum bfa_ioc_event_e event
)
5097 struct bfa_phy_s
*phy
= cbarg
;
5099 bfa_trc(phy
, event
);
5102 case BFA_IOC_E_DISABLED
:
5103 case BFA_IOC_E_FAILED
:
5105 phy
->status
= BFA_STATUS_IOC_FAILURE
;
5106 phy
->cbfn(phy
->cbarg
, phy
->status
);
5117 * Send phy attribute query request.
5119 * @param[in] cbarg - callback argument
5122 bfa_phy_query_send(void *cbarg
)
5124 struct bfa_phy_s
*phy
= cbarg
;
5125 struct bfi_phy_query_req_s
*msg
=
5126 (struct bfi_phy_query_req_s
*) phy
->mb
.msg
;
5128 msg
->instance
= phy
->instance
;
5129 bfi_h2i_set(msg
->mh
, BFI_MC_PHY
, BFI_PHY_H2I_QUERY_REQ
,
5130 bfa_ioc_portid(phy
->ioc
));
5131 bfa_alen_set(&msg
->alen
, sizeof(struct bfa_phy_attr_s
), phy
->dbuf_pa
);
5132 bfa_ioc_mbox_queue(phy
->ioc
, &phy
->mb
);
5136 * Send phy write request.
5138 * @param[in] cbarg - callback argument
5141 bfa_phy_write_send(void *cbarg
)
5143 struct bfa_phy_s
*phy
= cbarg
;
5144 struct bfi_phy_write_req_s
*msg
=
5145 (struct bfi_phy_write_req_s
*) phy
->mb
.msg
;
5150 msg
->instance
= phy
->instance
;
5151 msg
->offset
= cpu_to_be32(phy
->addr_off
+ phy
->offset
);
5152 len
= (phy
->residue
< BFA_PHY_DMA_BUF_SZ
) ?
5153 phy
->residue
: BFA_PHY_DMA_BUF_SZ
;
5154 msg
->length
= cpu_to_be32(len
);
5156 /* indicate if it's the last msg of the whole write operation */
5157 msg
->last
= (len
== phy
->residue
) ? 1 : 0;
5159 bfi_h2i_set(msg
->mh
, BFI_MC_PHY
, BFI_PHY_H2I_WRITE_REQ
,
5160 bfa_ioc_portid(phy
->ioc
));
5161 bfa_alen_set(&msg
->alen
, len
, phy
->dbuf_pa
);
5163 buf
= (u16
*) (phy
->ubuf
+ phy
->offset
);
5164 dbuf
= (u16
*)phy
->dbuf_kva
;
5166 for (i
= 0; i
< sz
; i
++)
5167 buf
[i
] = cpu_to_be16(dbuf
[i
]);
5169 bfa_ioc_mbox_queue(phy
->ioc
, &phy
->mb
);
5171 phy
->residue
-= len
;
5176 * Send phy read request.
5178 * @param[in] cbarg - callback argument
5181 bfa_phy_read_send(void *cbarg
)
5183 struct bfa_phy_s
*phy
= cbarg
;
5184 struct bfi_phy_read_req_s
*msg
=
5185 (struct bfi_phy_read_req_s
*) phy
->mb
.msg
;
5188 msg
->instance
= phy
->instance
;
5189 msg
->offset
= cpu_to_be32(phy
->addr_off
+ phy
->offset
);
5190 len
= (phy
->residue
< BFA_PHY_DMA_BUF_SZ
) ?
5191 phy
->residue
: BFA_PHY_DMA_BUF_SZ
;
5192 msg
->length
= cpu_to_be32(len
);
5193 bfi_h2i_set(msg
->mh
, BFI_MC_PHY
, BFI_PHY_H2I_READ_REQ
,
5194 bfa_ioc_portid(phy
->ioc
));
5195 bfa_alen_set(&msg
->alen
, len
, phy
->dbuf_pa
);
5196 bfa_ioc_mbox_queue(phy
->ioc
, &phy
->mb
);
5200 * Send phy stats request.
5202 * @param[in] cbarg - callback argument
5205 bfa_phy_stats_send(void *cbarg
)
5207 struct bfa_phy_s
*phy
= cbarg
;
5208 struct bfi_phy_stats_req_s
*msg
=
5209 (struct bfi_phy_stats_req_s
*) phy
->mb
.msg
;
5211 msg
->instance
= phy
->instance
;
5212 bfi_h2i_set(msg
->mh
, BFI_MC_PHY
, BFI_PHY_H2I_STATS_REQ
,
5213 bfa_ioc_portid(phy
->ioc
));
5214 bfa_alen_set(&msg
->alen
, sizeof(struct bfa_phy_stats_s
), phy
->dbuf_pa
);
5215 bfa_ioc_mbox_queue(phy
->ioc
, &phy
->mb
);
5219 * Flash memory info API.
5221 * @param[in] mincfg - minimal cfg variable
5224 bfa_phy_meminfo(bfa_boolean_t mincfg
)
5226 /* min driver doesn't need phy */
5230 return BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ
, BFA_DMA_ALIGN_SZ
);
5236 * @param[in] phy - phy structure
5237 * @param[in] ioc - ioc structure
5238 * @param[in] dev - device structure
5239 * @param[in] trcmod - trace module
5240 * @param[in] logmod - log module
5243 bfa_phy_attach(struct bfa_phy_s
*phy
, struct bfa_ioc_s
*ioc
, void *dev
,
5244 struct bfa_trc_mod_s
*trcmod
, bfa_boolean_t mincfg
)
5247 phy
->trcmod
= trcmod
;
5252 bfa_ioc_mbox_regisr(phy
->ioc
, BFI_MC_PHY
, bfa_phy_intr
, phy
);
5253 bfa_q_qe_init(&phy
->ioc_notify
);
5254 bfa_ioc_notify_init(&phy
->ioc_notify
, bfa_phy_notify
, phy
);
5255 list_add_tail(&phy
->ioc_notify
.qe
, &phy
->ioc
->notify_q
);
5257 /* min driver doesn't need phy */
5259 phy
->dbuf_kva
= NULL
;
5265 * Claim memory for phy
5267 * @param[in] phy - phy structure
5268 * @param[in] dm_kva - pointer to virtual memory address
5269 * @param[in] dm_pa - physical memory address
5270 * @param[in] mincfg - minimal cfg variable
5273 bfa_phy_memclaim(struct bfa_phy_s
*phy
, u8
*dm_kva
, u64 dm_pa
,
5274 bfa_boolean_t mincfg
)
5279 phy
->dbuf_kva
= dm_kva
;
5280 phy
->dbuf_pa
= dm_pa
;
5281 memset(phy
->dbuf_kva
, 0, BFA_PHY_DMA_BUF_SZ
);
5282 dm_kva
+= BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ
, BFA_DMA_ALIGN_SZ
);
5283 dm_pa
+= BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ
, BFA_DMA_ALIGN_SZ
);
5287 bfa_phy_busy(struct bfa_ioc_s
*ioc
)
5291 rb
= bfa_ioc_bar0(ioc
);
5292 return readl(rb
+ BFA_PHY_LOCK_STATUS
);
5296 * Get phy attribute.
5298 * @param[in] phy - phy structure
5299 * @param[in] attr - phy attribute structure
5300 * @param[in] cbfn - callback function
5301 * @param[in] cbarg - callback argument
5306 bfa_phy_get_attr(struct bfa_phy_s
*phy
, u8 instance
,
5307 struct bfa_phy_attr_s
*attr
, bfa_cb_phy_t cbfn
, void *cbarg
)
5309 bfa_trc(phy
, BFI_PHY_H2I_QUERY_REQ
);
5310 bfa_trc(phy
, instance
);
5312 if (!bfa_phy_present(phy
))
5313 return BFA_STATUS_PHY_NOT_PRESENT
;
5315 if (!bfa_ioc_is_operational(phy
->ioc
))
5316 return BFA_STATUS_IOC_NON_OP
;
5318 if (phy
->op_busy
|| bfa_phy_busy(phy
->ioc
)) {
5319 bfa_trc(phy
, phy
->op_busy
);
5320 return BFA_STATUS_DEVBUSY
;
5326 phy
->instance
= instance
;
5327 phy
->ubuf
= (uint8_t *) attr
;
5328 bfa_phy_query_send(phy
);
5330 return BFA_STATUS_OK
;
5336 * @param[in] phy - phy structure
5337 * @param[in] instance - phy image instance
5338 * @param[in] stats - pointer to phy stats
5339 * @param[in] cbfn - callback function
5340 * @param[in] cbarg - callback argument
5345 bfa_phy_get_stats(struct bfa_phy_s
*phy
, u8 instance
,
5346 struct bfa_phy_stats_s
*stats
,
5347 bfa_cb_phy_t cbfn
, void *cbarg
)
5349 bfa_trc(phy
, BFI_PHY_H2I_STATS_REQ
);
5350 bfa_trc(phy
, instance
);
5352 if (!bfa_phy_present(phy
))
5353 return BFA_STATUS_PHY_NOT_PRESENT
;
5355 if (!bfa_ioc_is_operational(phy
->ioc
))
5356 return BFA_STATUS_IOC_NON_OP
;
5358 if (phy
->op_busy
|| bfa_phy_busy(phy
->ioc
)) {
5359 bfa_trc(phy
, phy
->op_busy
);
5360 return BFA_STATUS_DEVBUSY
;
5366 phy
->instance
= instance
;
5367 phy
->ubuf
= (u8
*) stats
;
5368 bfa_phy_stats_send(phy
);
5370 return BFA_STATUS_OK
;
5376 * @param[in] phy - phy structure
5377 * @param[in] instance - phy image instance
5378 * @param[in] buf - update data buffer
5379 * @param[in] len - data buffer length
5380 * @param[in] offset - offset relative to starting address
5381 * @param[in] cbfn - callback function
5382 * @param[in] cbarg - callback argument
5387 bfa_phy_update(struct bfa_phy_s
*phy
, u8 instance
,
5388 void *buf
, u32 len
, u32 offset
,
5389 bfa_cb_phy_t cbfn
, void *cbarg
)
5391 bfa_trc(phy
, BFI_PHY_H2I_WRITE_REQ
);
5392 bfa_trc(phy
, instance
);
5394 bfa_trc(phy
, offset
);
5396 if (!bfa_phy_present(phy
))
5397 return BFA_STATUS_PHY_NOT_PRESENT
;
5399 if (!bfa_ioc_is_operational(phy
->ioc
))
5400 return BFA_STATUS_IOC_NON_OP
;
5402 /* 'len' must be in word (4-byte) boundary */
5403 if (!len
|| (len
& 0x03))
5404 return BFA_STATUS_FAILED
;
5406 if (phy
->op_busy
|| bfa_phy_busy(phy
->ioc
)) {
5407 bfa_trc(phy
, phy
->op_busy
);
5408 return BFA_STATUS_DEVBUSY
;
5414 phy
->instance
= instance
;
5417 phy
->addr_off
= offset
;
5420 bfa_phy_write_send(phy
);
5421 return BFA_STATUS_OK
;
5427 * @param[in] phy - phy structure
5428 * @param[in] instance - phy image instance
5429 * @param[in] buf - read data buffer
5430 * @param[in] len - data buffer length
5431 * @param[in] offset - offset relative to starting address
5432 * @param[in] cbfn - callback function
5433 * @param[in] cbarg - callback argument
5438 bfa_phy_read(struct bfa_phy_s
*phy
, u8 instance
,
5439 void *buf
, u32 len
, u32 offset
,
5440 bfa_cb_phy_t cbfn
, void *cbarg
)
5442 bfa_trc(phy
, BFI_PHY_H2I_READ_REQ
);
5443 bfa_trc(phy
, instance
);
5445 bfa_trc(phy
, offset
);
5447 if (!bfa_phy_present(phy
))
5448 return BFA_STATUS_PHY_NOT_PRESENT
;
5450 if (!bfa_ioc_is_operational(phy
->ioc
))
5451 return BFA_STATUS_IOC_NON_OP
;
5453 /* 'len' must be in word (4-byte) boundary */
5454 if (!len
|| (len
& 0x03))
5455 return BFA_STATUS_FAILED
;
5457 if (phy
->op_busy
|| bfa_phy_busy(phy
->ioc
)) {
5458 bfa_trc(phy
, phy
->op_busy
);
5459 return BFA_STATUS_DEVBUSY
;
5465 phy
->instance
= instance
;
5468 phy
->addr_off
= offset
;
5470 bfa_phy_read_send(phy
);
5472 return BFA_STATUS_OK
;
5476 * Process phy response messages upon receiving interrupts.
5478 * @param[in] phyarg - phy structure
5479 * @param[in] msg - message structure
5482 bfa_phy_intr(void *phyarg
, struct bfi_mbmsg_s
*msg
)
5484 struct bfa_phy_s
*phy
= phyarg
;
5488 struct bfi_phy_query_rsp_s
*query
;
5489 struct bfi_phy_stats_rsp_s
*stats
;
5490 struct bfi_phy_write_rsp_s
*write
;
5491 struct bfi_phy_read_rsp_s
*read
;
5492 struct bfi_mbmsg_s
*msg
;
5496 bfa_trc(phy
, msg
->mh
.msg_id
);
5498 if (!phy
->op_busy
) {
5499 /* receiving response after ioc failure */
5500 bfa_trc(phy
, 0x9999);
5504 switch (msg
->mh
.msg_id
) {
5505 case BFI_PHY_I2H_QUERY_RSP
:
5506 status
= be32_to_cpu(m
.query
->status
);
5507 bfa_trc(phy
, status
);
5509 if (status
== BFA_STATUS_OK
) {
5510 struct bfa_phy_attr_s
*attr
=
5511 (struct bfa_phy_attr_s
*) phy
->ubuf
;
5512 bfa_phy_ntoh32((u32
*)attr
, (u32
*)phy
->dbuf_kva
,
5513 sizeof(struct bfa_phy_attr_s
));
5514 bfa_trc(phy
, attr
->status
);
5515 bfa_trc(phy
, attr
->length
);
5518 phy
->status
= status
;
5521 phy
->cbfn(phy
->cbarg
, phy
->status
);
5523 case BFI_PHY_I2H_STATS_RSP
:
5524 status
= be32_to_cpu(m
.stats
->status
);
5525 bfa_trc(phy
, status
);
5527 if (status
== BFA_STATUS_OK
) {
5528 struct bfa_phy_stats_s
*stats
=
5529 (struct bfa_phy_stats_s
*) phy
->ubuf
;
5530 bfa_phy_ntoh32((u32
*)stats
, (u32
*)phy
->dbuf_kva
,
5531 sizeof(struct bfa_phy_stats_s
));
5532 bfa_trc(phy
, stats
->status
);
5535 phy
->status
= status
;
5538 phy
->cbfn(phy
->cbarg
, phy
->status
);
5540 case BFI_PHY_I2H_WRITE_RSP
:
5541 status
= be32_to_cpu(m
.write
->status
);
5542 bfa_trc(phy
, status
);
5544 if (status
!= BFA_STATUS_OK
|| phy
->residue
== 0) {
5545 phy
->status
= status
;
5548 phy
->cbfn(phy
->cbarg
, phy
->status
);
5550 bfa_trc(phy
, phy
->offset
);
5551 bfa_phy_write_send(phy
);
5554 case BFI_PHY_I2H_READ_RSP
:
5555 status
= be32_to_cpu(m
.read
->status
);
5556 bfa_trc(phy
, status
);
5558 if (status
!= BFA_STATUS_OK
) {
5559 phy
->status
= status
;
5562 phy
->cbfn(phy
->cbarg
, phy
->status
);
5564 u32 len
= be32_to_cpu(m
.read
->length
);
5565 u16
*buf
= (u16
*)(phy
->ubuf
+ phy
->offset
);
5566 u16
*dbuf
= (u16
*)phy
->dbuf_kva
;
5567 int i
, sz
= len
>> 1;
5569 bfa_trc(phy
, phy
->offset
);
5572 for (i
= 0; i
< sz
; i
++)
5573 buf
[i
] = be16_to_cpu(dbuf
[i
]);
5575 phy
->residue
-= len
;
5578 if (phy
->residue
== 0) {
5579 phy
->status
= status
;
5582 phy
->cbfn(phy
->cbarg
, phy
->status
);
5584 bfa_phy_read_send(phy
);
5593 * DCONF module specific
5599 * DCONF state machine events
5601 enum bfa_dconf_event
{
5602 BFA_DCONF_SM_INIT
= 1, /* dconf Init */
5603 BFA_DCONF_SM_FLASH_COMP
= 2, /* read/write to flash */
5604 BFA_DCONF_SM_WR
= 3, /* binding change, map */
5605 BFA_DCONF_SM_TIMEOUT
= 4, /* Start timer */
5606 BFA_DCONF_SM_EXIT
= 5, /* exit dconf module */
5607 BFA_DCONF_SM_IOCDISABLE
= 6, /* IOC disable event */
5610 /* forward declaration of DCONF state machine */
5611 static void bfa_dconf_sm_uninit(struct bfa_dconf_mod_s
*dconf
,
5612 enum bfa_dconf_event event
);
5613 static void bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s
*dconf
,
5614 enum bfa_dconf_event event
);
5615 static void bfa_dconf_sm_ready(struct bfa_dconf_mod_s
*dconf
,
5616 enum bfa_dconf_event event
);
5617 static void bfa_dconf_sm_dirty(struct bfa_dconf_mod_s
*dconf
,
5618 enum bfa_dconf_event event
);
5619 static void bfa_dconf_sm_sync(struct bfa_dconf_mod_s
*dconf
,
5620 enum bfa_dconf_event event
);
5621 static void bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s
*dconf
,
5622 enum bfa_dconf_event event
);
5623 static void bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s
*dconf
,
5624 enum bfa_dconf_event event
);
5626 static void bfa_dconf_cbfn(void *dconf
, bfa_status_t status
);
5627 static void bfa_dconf_timer(void *cbarg
);
5628 static bfa_status_t
bfa_dconf_flash_write(struct bfa_dconf_mod_s
*dconf
);
5629 static void bfa_dconf_init_cb(void *arg
, bfa_status_t status
);
5632 * Begining state of dconf module. Waiting for an event to start.
5635 bfa_dconf_sm_uninit(struct bfa_dconf_mod_s
*dconf
, enum bfa_dconf_event event
)
5637 bfa_status_t bfa_status
;
5638 bfa_trc(dconf
->bfa
, event
);
5641 case BFA_DCONF_SM_INIT
:
5642 if (dconf
->min_cfg
) {
5643 bfa_trc(dconf
->bfa
, dconf
->min_cfg
);
5646 bfa_sm_set_state(dconf
, bfa_dconf_sm_flash_read
);
5647 dconf
->flashdone
= BFA_FALSE
;
5648 bfa_trc(dconf
->bfa
, dconf
->flashdone
);
5649 bfa_status
= bfa_flash_read_part(BFA_FLASH(dconf
->bfa
),
5650 BFA_FLASH_PART_DRV
, dconf
->instance
,
5652 sizeof(struct bfa_dconf_s
), 0,
5653 bfa_dconf_init_cb
, dconf
->bfa
);
5654 if (bfa_status
!= BFA_STATUS_OK
) {
5655 bfa_dconf_init_cb(dconf
->bfa
, BFA_STATUS_FAILED
);
5656 bfa_sm_set_state(dconf
, bfa_dconf_sm_uninit
);
5660 case BFA_DCONF_SM_EXIT
:
5661 dconf
->flashdone
= BFA_TRUE
;
5662 case BFA_DCONF_SM_IOCDISABLE
:
5663 case BFA_DCONF_SM_WR
:
5664 case BFA_DCONF_SM_FLASH_COMP
:
5667 bfa_sm_fault(dconf
->bfa
, event
);
5672 * Read flash for dconf entries and make a call back to the driver once done.
5675 bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s
*dconf
,
5676 enum bfa_dconf_event event
)
5678 bfa_trc(dconf
->bfa
, event
);
5681 case BFA_DCONF_SM_FLASH_COMP
:
5682 bfa_sm_set_state(dconf
, bfa_dconf_sm_ready
);
5684 case BFA_DCONF_SM_TIMEOUT
:
5685 bfa_sm_set_state(dconf
, bfa_dconf_sm_ready
);
5687 case BFA_DCONF_SM_EXIT
:
5688 dconf
->flashdone
= BFA_TRUE
;
5689 bfa_trc(dconf
->bfa
, dconf
->flashdone
);
5690 case BFA_DCONF_SM_IOCDISABLE
:
5691 bfa_sm_set_state(dconf
, bfa_dconf_sm_uninit
);
5694 bfa_sm_fault(dconf
->bfa
, event
);
5699 * DCONF Module is in ready state. Has completed the initialization.
5702 bfa_dconf_sm_ready(struct bfa_dconf_mod_s
*dconf
, enum bfa_dconf_event event
)
5704 bfa_trc(dconf
->bfa
, event
);
5707 case BFA_DCONF_SM_WR
:
5708 bfa_timer_start(dconf
->bfa
, &dconf
->timer
,
5709 bfa_dconf_timer
, dconf
, BFA_DCONF_UPDATE_TOV
);
5710 bfa_sm_set_state(dconf
, bfa_dconf_sm_dirty
);
5712 case BFA_DCONF_SM_EXIT
:
5713 dconf
->flashdone
= BFA_TRUE
;
5714 bfa_trc(dconf
->bfa
, dconf
->flashdone
);
5715 bfa_sm_set_state(dconf
, bfa_dconf_sm_uninit
);
5717 case BFA_DCONF_SM_INIT
:
5718 case BFA_DCONF_SM_IOCDISABLE
:
5721 bfa_sm_fault(dconf
->bfa
, event
);
5726 * entries are dirty, write back to the flash.
5730 bfa_dconf_sm_dirty(struct bfa_dconf_mod_s
*dconf
, enum bfa_dconf_event event
)
5732 bfa_trc(dconf
->bfa
, event
);
5735 case BFA_DCONF_SM_TIMEOUT
:
5736 bfa_sm_set_state(dconf
, bfa_dconf_sm_sync
);
5737 bfa_dconf_flash_write(dconf
);
5739 case BFA_DCONF_SM_WR
:
5740 bfa_timer_stop(&dconf
->timer
);
5741 bfa_timer_start(dconf
->bfa
, &dconf
->timer
,
5742 bfa_dconf_timer
, dconf
, BFA_DCONF_UPDATE_TOV
);
5744 case BFA_DCONF_SM_EXIT
:
5745 bfa_timer_stop(&dconf
->timer
);
5746 bfa_timer_start(dconf
->bfa
, &dconf
->timer
,
5747 bfa_dconf_timer
, dconf
, BFA_DCONF_UPDATE_TOV
);
5748 bfa_sm_set_state(dconf
, bfa_dconf_sm_final_sync
);
5749 bfa_dconf_flash_write(dconf
);
5751 case BFA_DCONF_SM_FLASH_COMP
:
5753 case BFA_DCONF_SM_IOCDISABLE
:
5754 bfa_timer_stop(&dconf
->timer
);
5755 bfa_sm_set_state(dconf
, bfa_dconf_sm_iocdown_dirty
);
5758 bfa_sm_fault(dconf
->bfa
, event
);
5763 * Sync the dconf entries to the flash.
5766 bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s
*dconf
,
5767 enum bfa_dconf_event event
)
5769 bfa_trc(dconf
->bfa
, event
);
5772 case BFA_DCONF_SM_IOCDISABLE
:
5773 case BFA_DCONF_SM_FLASH_COMP
:
5774 bfa_timer_stop(&dconf
->timer
);
5775 case BFA_DCONF_SM_TIMEOUT
:
5776 bfa_sm_set_state(dconf
, bfa_dconf_sm_uninit
);
5777 dconf
->flashdone
= BFA_TRUE
;
5778 bfa_trc(dconf
->bfa
, dconf
->flashdone
);
5779 bfa_ioc_disable(&dconf
->bfa
->ioc
);
5782 bfa_sm_fault(dconf
->bfa
, event
);
5787 bfa_dconf_sm_sync(struct bfa_dconf_mod_s
*dconf
, enum bfa_dconf_event event
)
5789 bfa_trc(dconf
->bfa
, event
);
5792 case BFA_DCONF_SM_FLASH_COMP
:
5793 bfa_sm_set_state(dconf
, bfa_dconf_sm_ready
);
5795 case BFA_DCONF_SM_WR
:
5796 bfa_timer_start(dconf
->bfa
, &dconf
->timer
,
5797 bfa_dconf_timer
, dconf
, BFA_DCONF_UPDATE_TOV
);
5798 bfa_sm_set_state(dconf
, bfa_dconf_sm_dirty
);
5800 case BFA_DCONF_SM_EXIT
:
5801 bfa_timer_start(dconf
->bfa
, &dconf
->timer
,
5802 bfa_dconf_timer
, dconf
, BFA_DCONF_UPDATE_TOV
);
5803 bfa_sm_set_state(dconf
, bfa_dconf_sm_final_sync
);
5805 case BFA_DCONF_SM_IOCDISABLE
:
5806 bfa_sm_set_state(dconf
, bfa_dconf_sm_iocdown_dirty
);
5809 bfa_sm_fault(dconf
->bfa
, event
);
5814 bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s
*dconf
,
5815 enum bfa_dconf_event event
)
5817 bfa_trc(dconf
->bfa
, event
);
5820 case BFA_DCONF_SM_INIT
:
5821 bfa_timer_start(dconf
->bfa
, &dconf
->timer
,
5822 bfa_dconf_timer
, dconf
, BFA_DCONF_UPDATE_TOV
);
5823 bfa_sm_set_state(dconf
, bfa_dconf_sm_dirty
);
5825 case BFA_DCONF_SM_EXIT
:
5826 dconf
->flashdone
= BFA_TRUE
;
5827 bfa_sm_set_state(dconf
, bfa_dconf_sm_uninit
);
5829 case BFA_DCONF_SM_IOCDISABLE
:
5832 bfa_sm_fault(dconf
->bfa
, event
);
5837 * Compute and return memory needed by DRV_CFG module.
5840 bfa_dconf_meminfo(struct bfa_iocfc_cfg_s
*cfg
, struct bfa_meminfo_s
*meminfo
,
5843 struct bfa_mem_kva_s
*dconf_kva
= BFA_MEM_DCONF_KVA(bfa
);
5845 if (cfg
->drvcfg
.min_cfg
)
5846 bfa_mem_kva_setup(meminfo
, dconf_kva
,
5847 sizeof(struct bfa_dconf_hdr_s
));
5849 bfa_mem_kva_setup(meminfo
, dconf_kva
,
5850 sizeof(struct bfa_dconf_s
));
5854 bfa_dconf_attach(struct bfa_s
*bfa
, void *bfad
, struct bfa_iocfc_cfg_s
*cfg
,
5855 struct bfa_pcidev_s
*pcidev
)
5857 struct bfa_dconf_mod_s
*dconf
= BFA_DCONF_MOD(bfa
);
5861 dconf
->instance
= bfa
->ioc
.port_id
;
5862 bfa_trc(bfa
, dconf
->instance
);
5864 dconf
->dconf
= (struct bfa_dconf_s
*) bfa_mem_kva_curp(dconf
);
5865 if (cfg
->drvcfg
.min_cfg
) {
5866 bfa_mem_kva_curp(dconf
) += sizeof(struct bfa_dconf_hdr_s
);
5867 dconf
->min_cfg
= BFA_TRUE
;
5869 * Set the flashdone flag to TRUE explicitly as no flash
5870 * write will happen in min_cfg mode.
5872 dconf
->flashdone
= BFA_TRUE
;
5874 dconf
->min_cfg
= BFA_FALSE
;
5875 bfa_mem_kva_curp(dconf
) += sizeof(struct bfa_dconf_s
);
5878 bfa_dconf_read_data_valid(bfa
) = BFA_FALSE
;
5879 bfa_sm_set_state(dconf
, bfa_dconf_sm_uninit
);
5883 bfa_dconf_init_cb(void *arg
, bfa_status_t status
)
5885 struct bfa_s
*bfa
= arg
;
5886 struct bfa_dconf_mod_s
*dconf
= BFA_DCONF_MOD(bfa
);
5888 dconf
->flashdone
= BFA_TRUE
;
5889 bfa_trc(bfa
, dconf
->flashdone
);
5890 bfa_iocfc_cb_dconf_modinit(bfa
, status
);
5891 if (status
== BFA_STATUS_OK
) {
5892 bfa_dconf_read_data_valid(bfa
) = BFA_TRUE
;
5893 if (dconf
->dconf
->hdr
.signature
!= BFI_DCONF_SIGNATURE
)
5894 dconf
->dconf
->hdr
.signature
= BFI_DCONF_SIGNATURE
;
5895 if (dconf
->dconf
->hdr
.version
!= BFI_DCONF_VERSION
)
5896 dconf
->dconf
->hdr
.version
= BFI_DCONF_VERSION
;
5898 bfa_sm_send_event(dconf
, BFA_DCONF_SM_FLASH_COMP
);
5902 bfa_dconf_modinit(struct bfa_s
*bfa
)
5904 struct bfa_dconf_mod_s
*dconf
= BFA_DCONF_MOD(bfa
);
5905 bfa_sm_send_event(dconf
, BFA_DCONF_SM_INIT
);
5908 bfa_dconf_start(struct bfa_s
*bfa
)
5913 bfa_dconf_stop(struct bfa_s
*bfa
)
5917 static void bfa_dconf_timer(void *cbarg
)
5919 struct bfa_dconf_mod_s
*dconf
= cbarg
;
5920 bfa_sm_send_event(dconf
, BFA_DCONF_SM_TIMEOUT
);
5923 bfa_dconf_iocdisable(struct bfa_s
*bfa
)
5925 struct bfa_dconf_mod_s
*dconf
= BFA_DCONF_MOD(bfa
);
5926 bfa_sm_send_event(dconf
, BFA_DCONF_SM_IOCDISABLE
);
5930 bfa_dconf_detach(struct bfa_s
*bfa
)
5935 bfa_dconf_flash_write(struct bfa_dconf_mod_s
*dconf
)
5937 bfa_status_t bfa_status
;
5938 bfa_trc(dconf
->bfa
, 0);
5940 bfa_status
= bfa_flash_update_part(BFA_FLASH(dconf
->bfa
),
5941 BFA_FLASH_PART_DRV
, dconf
->instance
,
5942 dconf
->dconf
, sizeof(struct bfa_dconf_s
), 0,
5943 bfa_dconf_cbfn
, dconf
);
5944 if (bfa_status
!= BFA_STATUS_OK
)
5945 WARN_ON(bfa_status
);
5946 bfa_trc(dconf
->bfa
, bfa_status
);
5952 bfa_dconf_update(struct bfa_s
*bfa
)
5954 struct bfa_dconf_mod_s
*dconf
= BFA_DCONF_MOD(bfa
);
5955 bfa_trc(dconf
->bfa
, 0);
5956 if (bfa_sm_cmp_state(dconf
, bfa_dconf_sm_iocdown_dirty
))
5957 return BFA_STATUS_FAILED
;
5959 if (dconf
->min_cfg
) {
5960 bfa_trc(dconf
->bfa
, dconf
->min_cfg
);
5961 return BFA_STATUS_FAILED
;
5964 bfa_sm_send_event(dconf
, BFA_DCONF_SM_WR
);
5965 return BFA_STATUS_OK
;
5969 bfa_dconf_cbfn(void *arg
, bfa_status_t status
)
5971 struct bfa_dconf_mod_s
*dconf
= arg
;
5973 bfa_sm_send_event(dconf
, BFA_DCONF_SM_FLASH_COMP
);
5977 bfa_dconf_modexit(struct bfa_s
*bfa
)
5979 struct bfa_dconf_mod_s
*dconf
= BFA_DCONF_MOD(bfa
);
5980 BFA_DCONF_MOD(bfa
)->flashdone
= BFA_FALSE
;
5981 bfa_trc(bfa
, BFA_DCONF_MOD(bfa
)->flashdone
);
5982 bfa_sm_send_event(dconf
, BFA_DCONF_SM_EXIT
);