ipv4: fix ip_rt_update_pmtu()
[linux/fpc-iii.git] / drivers / scsi / bfa / bfa_ioc.c
blobc1f72c49196f64e6f3e1498ca88802fb68754c23
1 /*
2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
18 #include "bfad_drv.h"
19 #include "bfa_ioc.h"
20 #include "bfi_ctreg.h"
21 #include "bfa_defs.h"
22 #include "bfa_defs_svc.h"
24 BFA_TRC_FILE(CNA, IOC);
27 * IOC local definitions
29 #define BFA_IOC_TOV 3000 /* msecs */
30 #define BFA_IOC_HWSEM_TOV 500 /* msecs */
31 #define BFA_IOC_HB_TOV 500 /* msecs */
32 #define BFA_IOC_HWINIT_MAX 5
33 #define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV
35 #define bfa_ioc_timer_start(__ioc) \
36 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
37 bfa_ioc_timeout, (__ioc), BFA_IOC_TOV)
38 #define bfa_ioc_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
40 #define bfa_hb_timer_start(__ioc) \
41 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->hb_timer, \
42 bfa_ioc_hb_check, (__ioc), BFA_IOC_HB_TOV)
43 #define bfa_hb_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->hb_timer)
45 #define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
48 * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
51 #define bfa_ioc_firmware_lock(__ioc) \
52 ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
53 #define bfa_ioc_firmware_unlock(__ioc) \
54 ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
55 #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
56 #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
57 #define bfa_ioc_notify_fail(__ioc) \
58 ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
59 #define bfa_ioc_sync_join(__ioc) \
60 ((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
61 #define bfa_ioc_sync_leave(__ioc) \
62 ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
63 #define bfa_ioc_sync_ack(__ioc) \
64 ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
65 #define bfa_ioc_sync_complete(__ioc) \
66 ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
68 #define bfa_ioc_mbox_cmd_pending(__ioc) \
69 (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
70 readl((__ioc)->ioc_regs.hfn_mbox_cmd))
72 bfa_boolean_t bfa_auto_recover = BFA_TRUE;
75 * forward declarations
77 static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
78 static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
79 static void bfa_ioc_timeout(void *ioc);
80 static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc);
81 static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc);
82 static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc);
83 static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
84 static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
85 static void bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc);
86 static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
87 static void bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc);
88 static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
89 static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
90 static void bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc);
91 static void bfa_ioc_fail_notify(struct bfa_ioc_s *ioc);
92 static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc);
96 * IOC state machine definitions/declarations
98 enum ioc_event {
99 IOC_E_RESET = 1, /* IOC reset request */
100 IOC_E_ENABLE = 2, /* IOC enable request */
101 IOC_E_DISABLE = 3, /* IOC disable request */
102 IOC_E_DETACH = 4, /* driver detach cleanup */
103 IOC_E_ENABLED = 5, /* f/w enabled */
104 IOC_E_FWRSP_GETATTR = 6, /* IOC get attribute response */
105 IOC_E_DISABLED = 7, /* f/w disabled */
106 IOC_E_INITFAILED = 8, /* failure notice by iocpf sm */
107 IOC_E_PFFAILED = 9, /* failure notice by iocpf sm */
108 IOC_E_HBFAIL = 10, /* heartbeat failure */
109 IOC_E_HWERROR = 11, /* hardware error interrupt */
110 IOC_E_TIMEOUT = 12, /* timeout */
113 bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event);
114 bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event);
115 bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event);
116 bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event);
117 bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event);
118 bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc_s, enum ioc_event);
119 bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event);
120 bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
121 bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
123 static struct bfa_sm_table_s ioc_sm_table[] = {
124 {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
125 {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
126 {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
127 {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
128 {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
129 {BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL},
130 {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
131 {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
132 {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
136 * IOCPF state machine definitions/declarations
139 #define bfa_iocpf_timer_start(__ioc) \
140 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
141 bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV)
142 #define bfa_iocpf_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
144 #define bfa_iocpf_recovery_timer_start(__ioc) \
145 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
146 bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV_RECOVER)
148 #define bfa_sem_timer_start(__ioc) \
149 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->sem_timer, \
150 bfa_iocpf_sem_timeout, (__ioc), BFA_IOC_HWSEM_TOV)
151 #define bfa_sem_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->sem_timer)
154 * Forward declareations for iocpf state machine
156 static void bfa_iocpf_timeout(void *ioc_arg);
157 static void bfa_iocpf_sem_timeout(void *ioc_arg);
160 * IOCPF state machine events
162 enum iocpf_event {
163 IOCPF_E_ENABLE = 1, /* IOCPF enable request */
164 IOCPF_E_DISABLE = 2, /* IOCPF disable request */
165 IOCPF_E_STOP = 3, /* stop on driver detach */
166 IOCPF_E_FWREADY = 4, /* f/w initialization done */
167 IOCPF_E_FWRSP_ENABLE = 5, /* enable f/w response */
168 IOCPF_E_FWRSP_DISABLE = 6, /* disable f/w response */
169 IOCPF_E_FAIL = 7, /* failure notice by ioc sm */
170 IOCPF_E_INITFAIL = 8, /* init fail notice by ioc sm */
171 IOCPF_E_GETATTRFAIL = 9, /* init fail notice by ioc sm */
172 IOCPF_E_SEMLOCKED = 10, /* h/w semaphore is locked */
173 IOCPF_E_TIMEOUT = 11, /* f/w response timeout */
177 * IOCPF states
179 enum bfa_iocpf_state {
180 BFA_IOCPF_RESET = 1, /* IOC is in reset state */
181 BFA_IOCPF_SEMWAIT = 2, /* Waiting for IOC h/w semaphore */
182 BFA_IOCPF_HWINIT = 3, /* IOC h/w is being initialized */
183 BFA_IOCPF_READY = 4, /* IOCPF is initialized */
184 BFA_IOCPF_INITFAIL = 5, /* IOCPF failed */
185 BFA_IOCPF_FAIL = 6, /* IOCPF failed */
186 BFA_IOCPF_DISABLING = 7, /* IOCPF is being disabled */
187 BFA_IOCPF_DISABLED = 8, /* IOCPF is disabled */
188 BFA_IOCPF_FWMISMATCH = 9, /* IOC f/w different from drivers */
191 bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf_s, enum iocpf_event);
192 bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf_s, enum iocpf_event);
193 bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf_s, enum iocpf_event);
194 bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf_s, enum iocpf_event);
195 bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf_s, enum iocpf_event);
196 bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf_s, enum iocpf_event);
197 bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf_s, enum iocpf_event);
198 bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf_s,
199 enum iocpf_event);
200 bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf_s, enum iocpf_event);
201 bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf_s, enum iocpf_event);
202 bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf_s, enum iocpf_event);
203 bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf_s, enum iocpf_event);
204 bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf_s,
205 enum iocpf_event);
206 bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf_s, enum iocpf_event);
208 static struct bfa_sm_table_s iocpf_sm_table[] = {
209 {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
210 {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
211 {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
212 {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
213 {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
214 {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
215 {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
216 {BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL},
217 {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
218 {BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL},
219 {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
220 {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
221 {BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING},
222 {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
226 * IOC State Machine
230 * Beginning state. IOC uninit state.
233 static void
234 bfa_ioc_sm_uninit_entry(struct bfa_ioc_s *ioc)
239 * IOC is in uninit state.
241 static void
242 bfa_ioc_sm_uninit(struct bfa_ioc_s *ioc, enum ioc_event event)
244 bfa_trc(ioc, event);
246 switch (event) {
247 case IOC_E_RESET:
248 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
249 break;
251 default:
252 bfa_sm_fault(ioc, event);
256 * Reset entry actions -- initialize state machine
258 static void
259 bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc)
261 bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
265 * IOC is in reset state.
267 static void
268 bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event)
270 bfa_trc(ioc, event);
272 switch (event) {
273 case IOC_E_ENABLE:
274 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
275 break;
277 case IOC_E_DISABLE:
278 bfa_ioc_disable_comp(ioc);
279 break;
281 case IOC_E_DETACH:
282 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
283 break;
285 default:
286 bfa_sm_fault(ioc, event);
291 static void
292 bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc)
294 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
298 * Host IOC function is being enabled, awaiting response from firmware.
299 * Semaphore is acquired.
301 static void
302 bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event)
304 bfa_trc(ioc, event);
306 switch (event) {
307 case IOC_E_ENABLED:
308 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
309 break;
311 case IOC_E_PFFAILED:
312 /* !!! fall through !!! */
313 case IOC_E_HWERROR:
314 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
315 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
316 if (event != IOC_E_PFFAILED)
317 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
318 break;
320 case IOC_E_DISABLE:
321 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
322 break;
324 case IOC_E_DETACH:
325 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
326 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
327 break;
329 case IOC_E_ENABLE:
330 break;
332 default:
333 bfa_sm_fault(ioc, event);
338 static void
339 bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc)
341 bfa_ioc_timer_start(ioc);
342 bfa_ioc_send_getattr(ioc);
346 * IOC configuration in progress. Timer is active.
348 static void
349 bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
351 bfa_trc(ioc, event);
353 switch (event) {
354 case IOC_E_FWRSP_GETATTR:
355 bfa_ioc_timer_stop(ioc);
356 bfa_ioc_check_attr_wwns(ioc);
357 bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
358 break;
360 break;
361 case IOC_E_PFFAILED:
362 case IOC_E_HWERROR:
363 bfa_ioc_timer_stop(ioc);
364 /* !!! fall through !!! */
365 case IOC_E_TIMEOUT:
366 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
367 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
368 if (event != IOC_E_PFFAILED)
369 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
370 break;
372 case IOC_E_DISABLE:
373 bfa_ioc_timer_stop(ioc);
374 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
375 break;
377 case IOC_E_ENABLE:
378 break;
380 default:
381 bfa_sm_fault(ioc, event);
386 static void
387 bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
389 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
391 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
392 bfa_ioc_hb_monitor(ioc);
393 BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC enabled\n");
396 static void
397 bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
399 bfa_trc(ioc, event);
401 switch (event) {
402 case IOC_E_ENABLE:
403 break;
405 case IOC_E_DISABLE:
406 bfa_hb_timer_stop(ioc);
407 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
408 break;
410 case IOC_E_PFFAILED:
411 case IOC_E_HWERROR:
412 bfa_hb_timer_stop(ioc);
413 /* !!! fall through !!! */
414 case IOC_E_HBFAIL:
415 bfa_ioc_fail_notify(ioc);
417 if (ioc->iocpf.auto_recover)
418 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
419 else
420 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
422 if (event != IOC_E_PFFAILED)
423 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
424 break;
426 default:
427 bfa_sm_fault(ioc, event);
432 static void
433 bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
435 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
436 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
437 BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC disabled\n");
441 * IOC is being disabled
443 static void
444 bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
446 bfa_trc(ioc, event);
448 switch (event) {
449 case IOC_E_DISABLED:
450 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
451 break;
453 case IOC_E_HWERROR:
455 * No state change. Will move to disabled state
456 * after iocpf sm completes failure processing and
457 * moves to disabled state.
459 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
460 break;
462 default:
463 bfa_sm_fault(ioc, event);
468 * IOC disable completion entry.
470 static void
471 bfa_ioc_sm_disabled_entry(struct bfa_ioc_s *ioc)
473 bfa_ioc_disable_comp(ioc);
476 static void
477 bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event)
479 bfa_trc(ioc, event);
481 switch (event) {
482 case IOC_E_ENABLE:
483 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
484 break;
486 case IOC_E_DISABLE:
487 ioc->cbfn->disable_cbfn(ioc->bfa);
488 break;
490 case IOC_E_DETACH:
491 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
492 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
493 break;
495 default:
496 bfa_sm_fault(ioc, event);
501 static void
502 bfa_ioc_sm_fail_retry_entry(struct bfa_ioc_s *ioc)
504 bfa_trc(ioc, 0);
508 * Hardware initialization retry.
510 static void
511 bfa_ioc_sm_fail_retry(struct bfa_ioc_s *ioc, enum ioc_event event)
513 bfa_trc(ioc, event);
515 switch (event) {
516 case IOC_E_ENABLED:
517 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
518 break;
520 case IOC_E_PFFAILED:
521 case IOC_E_HWERROR:
523 * Initialization retry failed.
525 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
526 if (event != IOC_E_PFFAILED)
527 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
528 break;
530 case IOC_E_INITFAILED:
531 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
532 break;
534 case IOC_E_ENABLE:
535 break;
537 case IOC_E_DISABLE:
538 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
539 break;
541 case IOC_E_DETACH:
542 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
543 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
544 break;
546 default:
547 bfa_sm_fault(ioc, event);
552 static void
553 bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc)
555 bfa_trc(ioc, 0);
559 * IOC failure.
561 static void
562 bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
564 bfa_trc(ioc, event);
566 switch (event) {
568 case IOC_E_ENABLE:
569 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
570 break;
572 case IOC_E_DISABLE:
573 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
574 break;
576 case IOC_E_DETACH:
577 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
578 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
579 break;
581 case IOC_E_HWERROR:
583 * HB failure notification, ignore.
585 break;
586 default:
587 bfa_sm_fault(ioc, event);
592 * IOCPF State Machine
596 * Reset entry actions -- initialize state machine
598 static void
599 bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s *iocpf)
601 iocpf->retry_count = 0;
602 iocpf->auto_recover = bfa_auto_recover;
606 * Beginning state. IOC is in reset state.
608 static void
609 bfa_iocpf_sm_reset(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
611 struct bfa_ioc_s *ioc = iocpf->ioc;
613 bfa_trc(ioc, event);
615 switch (event) {
616 case IOCPF_E_ENABLE:
617 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
618 break;
620 case IOCPF_E_STOP:
621 break;
623 default:
624 bfa_sm_fault(ioc, event);
629 * Semaphore should be acquired for version check.
631 static void
632 bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
634 bfa_ioc_hw_sem_get(iocpf->ioc);
638 * Awaiting h/w semaphore to continue with version check.
640 static void
641 bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
643 struct bfa_ioc_s *ioc = iocpf->ioc;
645 bfa_trc(ioc, event);
647 switch (event) {
648 case IOCPF_E_SEMLOCKED:
649 if (bfa_ioc_firmware_lock(ioc)) {
650 if (bfa_ioc_sync_complete(ioc)) {
651 iocpf->retry_count = 0;
652 bfa_ioc_sync_join(ioc);
653 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
654 } else {
655 bfa_ioc_firmware_unlock(ioc);
656 writel(1, ioc->ioc_regs.ioc_sem_reg);
657 bfa_sem_timer_start(ioc);
659 } else {
660 writel(1, ioc->ioc_regs.ioc_sem_reg);
661 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
663 break;
665 case IOCPF_E_DISABLE:
666 bfa_sem_timer_stop(ioc);
667 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
668 bfa_fsm_send_event(ioc, IOC_E_DISABLED);
669 break;
671 case IOCPF_E_STOP:
672 bfa_sem_timer_stop(ioc);
673 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
674 break;
676 default:
677 bfa_sm_fault(ioc, event);
682 * Notify enable completion callback.
684 static void
685 bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s *iocpf)
688 * Call only the first time sm enters fwmismatch state.
690 if (iocpf->retry_count == 0)
691 bfa_ioc_pf_fwmismatch(iocpf->ioc);
693 iocpf->retry_count++;
694 bfa_iocpf_timer_start(iocpf->ioc);
698 * Awaiting firmware version match.
700 static void
701 bfa_iocpf_sm_mismatch(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
703 struct bfa_ioc_s *ioc = iocpf->ioc;
705 bfa_trc(ioc, event);
707 switch (event) {
708 case IOCPF_E_TIMEOUT:
709 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
710 break;
712 case IOCPF_E_DISABLE:
713 bfa_iocpf_timer_stop(ioc);
714 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
715 bfa_fsm_send_event(ioc, IOC_E_DISABLED);
716 break;
718 case IOCPF_E_STOP:
719 bfa_iocpf_timer_stop(ioc);
720 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
721 break;
723 default:
724 bfa_sm_fault(ioc, event);
729 * Request for semaphore.
731 static void
732 bfa_iocpf_sm_semwait_entry(struct bfa_iocpf_s *iocpf)
734 bfa_ioc_hw_sem_get(iocpf->ioc);
738 * Awaiting semaphore for h/w initialzation.
740 static void
741 bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
743 struct bfa_ioc_s *ioc = iocpf->ioc;
745 bfa_trc(ioc, event);
747 switch (event) {
748 case IOCPF_E_SEMLOCKED:
749 if (bfa_ioc_sync_complete(ioc)) {
750 bfa_ioc_sync_join(ioc);
751 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
752 } else {
753 writel(1, ioc->ioc_regs.ioc_sem_reg);
754 bfa_sem_timer_start(ioc);
756 break;
758 case IOCPF_E_DISABLE:
759 bfa_sem_timer_stop(ioc);
760 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
761 break;
763 default:
764 bfa_sm_fault(ioc, event);
768 static void
769 bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf)
771 bfa_iocpf_timer_start(iocpf->ioc);
772 bfa_ioc_hwinit(iocpf->ioc, BFA_FALSE);
776 * Hardware is being initialized. Interrupts are enabled.
777 * Holding hardware semaphore lock.
779 static void
780 bfa_iocpf_sm_hwinit(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
782 struct bfa_ioc_s *ioc = iocpf->ioc;
784 bfa_trc(ioc, event);
786 switch (event) {
787 case IOCPF_E_FWREADY:
788 bfa_iocpf_timer_stop(ioc);
789 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
790 break;
792 case IOCPF_E_INITFAIL:
793 bfa_iocpf_timer_stop(ioc);
795 * !!! fall through !!!
798 case IOCPF_E_TIMEOUT:
799 writel(1, ioc->ioc_regs.ioc_sem_reg);
800 if (event == IOCPF_E_TIMEOUT)
801 bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
802 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
803 break;
805 case IOCPF_E_DISABLE:
806 bfa_iocpf_timer_stop(ioc);
807 bfa_ioc_sync_leave(ioc);
808 writel(1, ioc->ioc_regs.ioc_sem_reg);
809 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
810 break;
812 default:
813 bfa_sm_fault(ioc, event);
817 static void
818 bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf)
820 bfa_iocpf_timer_start(iocpf->ioc);
821 bfa_ioc_send_enable(iocpf->ioc);
825 * Host IOC function is being enabled, awaiting response from firmware.
826 * Semaphore is acquired.
828 static void
829 bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
831 struct bfa_ioc_s *ioc = iocpf->ioc;
833 bfa_trc(ioc, event);
835 switch (event) {
836 case IOCPF_E_FWRSP_ENABLE:
837 bfa_iocpf_timer_stop(ioc);
838 writel(1, ioc->ioc_regs.ioc_sem_reg);
839 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
840 break;
842 case IOCPF_E_INITFAIL:
843 bfa_iocpf_timer_stop(ioc);
845 * !!! fall through !!!
848 case IOCPF_E_TIMEOUT:
849 writel(1, ioc->ioc_regs.ioc_sem_reg);
850 if (event == IOCPF_E_TIMEOUT)
851 bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
852 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
853 break;
855 case IOCPF_E_DISABLE:
856 bfa_iocpf_timer_stop(ioc);
857 writel(1, ioc->ioc_regs.ioc_sem_reg);
858 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
859 break;
861 case IOCPF_E_FWREADY:
862 bfa_ioc_send_enable(ioc);
863 break;
865 default:
866 bfa_sm_fault(ioc, event);
870 static void
871 bfa_iocpf_sm_ready_entry(struct bfa_iocpf_s *iocpf)
873 bfa_fsm_send_event(iocpf->ioc, IOC_E_ENABLED);
876 static void
877 bfa_iocpf_sm_ready(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
879 struct bfa_ioc_s *ioc = iocpf->ioc;
881 bfa_trc(ioc, event);
883 switch (event) {
884 case IOCPF_E_DISABLE:
885 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
886 break;
888 case IOCPF_E_GETATTRFAIL:
889 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
890 break;
892 case IOCPF_E_FAIL:
893 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
894 break;
896 case IOCPF_E_FWREADY:
897 if (bfa_ioc_is_operational(ioc)) {
898 bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
899 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
900 } else {
901 bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
902 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
904 break;
906 default:
907 bfa_sm_fault(ioc, event);
911 static void
912 bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s *iocpf)
914 bfa_iocpf_timer_start(iocpf->ioc);
915 bfa_ioc_send_disable(iocpf->ioc);
919 * IOC is being disabled
921 static void
922 bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
924 struct bfa_ioc_s *ioc = iocpf->ioc;
926 bfa_trc(ioc, event);
928 switch (event) {
929 case IOCPF_E_FWRSP_DISABLE:
930 case IOCPF_E_FWREADY:
931 bfa_iocpf_timer_stop(ioc);
932 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
933 break;
935 case IOCPF_E_FAIL:
936 bfa_iocpf_timer_stop(ioc);
938 * !!! fall through !!!
941 case IOCPF_E_TIMEOUT:
942 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
943 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
944 break;
946 case IOCPF_E_FWRSP_ENABLE:
947 break;
949 default:
950 bfa_sm_fault(ioc, event);
954 static void
955 bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf_s *iocpf)
957 bfa_ioc_hw_sem_get(iocpf->ioc);
961 * IOC hb ack request is being removed.
963 static void
964 bfa_iocpf_sm_disabling_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
966 struct bfa_ioc_s *ioc = iocpf->ioc;
968 bfa_trc(ioc, event);
970 switch (event) {
971 case IOCPF_E_SEMLOCKED:
972 bfa_ioc_sync_leave(ioc);
973 writel(1, ioc->ioc_regs.ioc_sem_reg);
974 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
975 break;
977 case IOCPF_E_FAIL:
978 break;
980 default:
981 bfa_sm_fault(ioc, event);
986 * IOC disable completion entry.
988 static void
989 bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s *iocpf)
991 bfa_fsm_send_event(iocpf->ioc, IOC_E_DISABLED);
994 static void
995 bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
997 struct bfa_ioc_s *ioc = iocpf->ioc;
999 bfa_trc(ioc, event);
1001 switch (event) {
1002 case IOCPF_E_ENABLE:
1003 iocpf->retry_count = 0;
1004 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1005 break;
1007 case IOCPF_E_STOP:
1008 bfa_ioc_firmware_unlock(ioc);
1009 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1010 break;
1012 default:
1013 bfa_sm_fault(ioc, event);
1017 static void
1018 bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf_s *iocpf)
1020 bfa_ioc_hw_sem_get(iocpf->ioc);
1024 * Hardware initialization failed.
1026 static void
1027 bfa_iocpf_sm_initfail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1029 struct bfa_ioc_s *ioc = iocpf->ioc;
1031 bfa_trc(ioc, event);
1033 switch (event) {
1034 case IOCPF_E_SEMLOCKED:
1035 bfa_ioc_notify_fail(ioc);
1036 bfa_ioc_sync_ack(ioc);
1037 iocpf->retry_count++;
1038 if (iocpf->retry_count >= BFA_IOC_HWINIT_MAX) {
1039 bfa_ioc_sync_leave(ioc);
1040 writel(1, ioc->ioc_regs.ioc_sem_reg);
1041 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
1042 } else {
1043 if (bfa_ioc_sync_complete(ioc))
1044 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
1045 else {
1046 writel(1, ioc->ioc_regs.ioc_sem_reg);
1047 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1050 break;
1052 case IOCPF_E_DISABLE:
1053 bfa_sem_timer_stop(ioc);
1054 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1055 break;
1057 case IOCPF_E_STOP:
1058 bfa_sem_timer_stop(ioc);
1059 bfa_ioc_firmware_unlock(ioc);
1060 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1061 break;
1063 case IOCPF_E_FAIL:
1064 break;
1066 default:
1067 bfa_sm_fault(ioc, event);
1071 static void
1072 bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf)
1074 bfa_fsm_send_event(iocpf->ioc, IOC_E_INITFAILED);
1078 * Hardware initialization failed.
1080 static void
1081 bfa_iocpf_sm_initfail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1083 struct bfa_ioc_s *ioc = iocpf->ioc;
1085 bfa_trc(ioc, event);
1087 switch (event) {
1088 case IOCPF_E_DISABLE:
1089 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1090 break;
1092 case IOCPF_E_STOP:
1093 bfa_ioc_firmware_unlock(ioc);
1094 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1095 break;
1097 default:
1098 bfa_sm_fault(ioc, event);
1102 static void
1103 bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf_s *iocpf)
1106 * Mark IOC as failed in hardware and stop firmware.
1108 bfa_ioc_lpu_stop(iocpf->ioc);
1111 * Flush any queued up mailbox requests.
1113 bfa_ioc_mbox_hbfail(iocpf->ioc);
1115 bfa_ioc_hw_sem_get(iocpf->ioc);
1118 static void
1119 bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1121 struct bfa_ioc_s *ioc = iocpf->ioc;
1123 bfa_trc(ioc, event);
1125 switch (event) {
1126 case IOCPF_E_SEMLOCKED:
1127 iocpf->retry_count = 0;
1128 bfa_ioc_sync_ack(ioc);
1129 bfa_ioc_notify_fail(ioc);
1130 if (!iocpf->auto_recover) {
1131 bfa_ioc_sync_leave(ioc);
1132 writel(1, ioc->ioc_regs.ioc_sem_reg);
1133 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1134 } else {
1135 if (bfa_ioc_sync_complete(ioc))
1136 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
1137 else {
1138 writel(1, ioc->ioc_regs.ioc_sem_reg);
1139 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1142 break;
1144 case IOCPF_E_DISABLE:
1145 bfa_sem_timer_stop(ioc);
1146 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1147 break;
1149 case IOCPF_E_FAIL:
1150 break;
1152 default:
1153 bfa_sm_fault(ioc, event);
1157 static void
1158 bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf)
1163 * IOC is in failed state.
1165 static void
1166 bfa_iocpf_sm_fail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1168 struct bfa_ioc_s *ioc = iocpf->ioc;
1170 bfa_trc(ioc, event);
1172 switch (event) {
1173 case IOCPF_E_DISABLE:
1174 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1175 break;
1177 default:
1178 bfa_sm_fault(ioc, event);
1183 * BFA IOC private functions
1186 static void
1187 bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
1189 struct list_head *qe;
1190 struct bfa_ioc_hbfail_notify_s *notify;
1192 ioc->cbfn->disable_cbfn(ioc->bfa);
1195 * Notify common modules registered for notification.
1197 list_for_each(qe, &ioc->hb_notify_q) {
1198 notify = (struct bfa_ioc_hbfail_notify_s *) qe;
1199 notify->cbfn(notify->cbarg);
1203 bfa_boolean_t
1204 bfa_ioc_sem_get(void __iomem *sem_reg)
1206 u32 r32;
1207 int cnt = 0;
1208 #define BFA_SEM_SPINCNT 3000
1210 r32 = readl(sem_reg);
1212 while (r32 && (cnt < BFA_SEM_SPINCNT)) {
1213 cnt++;
1214 udelay(2);
1215 r32 = readl(sem_reg);
1218 if (r32 == 0)
1219 return BFA_TRUE;
1221 WARN_ON(cnt >= BFA_SEM_SPINCNT);
1222 return BFA_FALSE;
1225 static void
1226 bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
1228 u32 r32;
1231 * First read to the semaphore register will return 0, subsequent reads
1232 * will return 1. Semaphore is released by writing 1 to the register
1234 r32 = readl(ioc->ioc_regs.ioc_sem_reg);
1235 if (r32 == 0) {
1236 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
1237 return;
1240 bfa_sem_timer_start(ioc);
1244 * Initialize LPU local memory (aka secondary memory / SRAM)
1246 static void
1247 bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
1249 u32 pss_ctl;
1250 int i;
1251 #define PSS_LMEM_INIT_TIME 10000
1253 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1254 pss_ctl &= ~__PSS_LMEM_RESET;
1255 pss_ctl |= __PSS_LMEM_INIT_EN;
1258 * i2c workaround 12.5khz clock
1260 pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
1261 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1264 * wait for memory initialization to be complete
1266 i = 0;
1267 do {
1268 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1269 i++;
1270 } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
1273 * If memory initialization is not successful, IOC timeout will catch
1274 * such failures.
1276 WARN_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
1277 bfa_trc(ioc, pss_ctl);
1279 pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
1280 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1283 static void
1284 bfa_ioc_lpu_start(struct bfa_ioc_s *ioc)
1286 u32 pss_ctl;
1289 * Take processor out of reset.
1291 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1292 pss_ctl &= ~__PSS_LPU0_RESET;
1294 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1297 static void
1298 bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
1300 u32 pss_ctl;
1303 * Put processors in reset.
1305 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1306 pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
1308 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1312 * Get driver and firmware versions.
1314 void
1315 bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
1317 u32 pgnum, pgoff;
1318 u32 loff = 0;
1319 int i;
1320 u32 *fwsig = (u32 *) fwhdr;
1322 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1323 pgoff = PSS_SMEM_PGOFF(loff);
1324 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1326 for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32));
1327 i++) {
1328 fwsig[i] =
1329 bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
1330 loff += sizeof(u32);
1335 * Returns TRUE if same.
1337 bfa_boolean_t
1338 bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
1340 struct bfi_ioc_image_hdr_s *drv_fwhdr;
1341 int i;
1343 drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
1344 bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
1346 for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
1347 if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i]) {
1348 bfa_trc(ioc, i);
1349 bfa_trc(ioc, fwhdr->md5sum[i]);
1350 bfa_trc(ioc, drv_fwhdr->md5sum[i]);
1351 return BFA_FALSE;
1355 bfa_trc(ioc, fwhdr->md5sum[0]);
1356 return BFA_TRUE;
1360 * Return true if current running version is valid. Firmware signature and
1361 * execution context (driver/bios) must match.
1363 static bfa_boolean_t
1364 bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
1366 struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr;
1368 bfa_ioc_fwver_get(ioc, &fwhdr);
1369 drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
1370 bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
1372 if (fwhdr.signature != drv_fwhdr->signature) {
1373 bfa_trc(ioc, fwhdr.signature);
1374 bfa_trc(ioc, drv_fwhdr->signature);
1375 return BFA_FALSE;
1378 if (swab32(fwhdr.param) != boot_env) {
1379 bfa_trc(ioc, fwhdr.param);
1380 bfa_trc(ioc, boot_env);
1381 return BFA_FALSE;
1384 return bfa_ioc_fwver_cmp(ioc, &fwhdr);
1388 * Conditionally flush any pending message from firmware at start.
1390 static void
1391 bfa_ioc_msgflush(struct bfa_ioc_s *ioc)
1393 u32 r32;
1395 r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
1396 if (r32)
1397 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1400 static void
1401 bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1403 enum bfi_ioc_state ioc_fwstate;
1404 bfa_boolean_t fwvalid;
1405 u32 boot_type;
1406 u32 boot_env;
1408 ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
1410 if (force)
1411 ioc_fwstate = BFI_IOC_UNINIT;
1413 bfa_trc(ioc, ioc_fwstate);
1415 boot_type = BFI_BOOT_TYPE_NORMAL;
1416 boot_env = BFI_BOOT_LOADER_OS;
1419 * check if firmware is valid
1421 fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
1422 BFA_FALSE : bfa_ioc_fwver_valid(ioc, boot_env);
1424 if (!fwvalid) {
1425 bfa_ioc_boot(ioc, boot_type, boot_env);
1426 return;
1430 * If hardware initialization is in progress (initialized by other IOC),
1431 * just wait for an initialization completion interrupt.
1433 if (ioc_fwstate == BFI_IOC_INITING) {
1434 ioc->cbfn->reset_cbfn(ioc->bfa);
1435 return;
1439 * If IOC function is disabled and firmware version is same,
1440 * just re-enable IOC.
1442 * If option rom, IOC must not be in operational state. With
1443 * convergence, IOC will be in operational state when 2nd driver
1444 * is loaded.
1446 if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
1449 * When using MSI-X any pending firmware ready event should
1450 * be flushed. Otherwise MSI-X interrupts are not delivered.
1452 bfa_ioc_msgflush(ioc);
1453 ioc->cbfn->reset_cbfn(ioc->bfa);
1454 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
1455 return;
1459 * Initialize the h/w for any other states.
1461 bfa_ioc_boot(ioc, boot_type, boot_env);
1464 static void
1465 bfa_ioc_timeout(void *ioc_arg)
1467 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
1469 bfa_trc(ioc, 0);
1470 bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
1473 void
1474 bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len)
1476 u32 *msgp = (u32 *) ioc_msg;
1477 u32 i;
1479 bfa_trc(ioc, msgp[0]);
1480 bfa_trc(ioc, len);
1482 WARN_ON(len > BFI_IOC_MSGLEN_MAX);
1485 * first write msg to mailbox registers
1487 for (i = 0; i < len / sizeof(u32); i++)
1488 writel(cpu_to_le32(msgp[i]),
1489 ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1491 for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
1492 writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1495 * write 1 to mailbox CMD to trigger LPU event
1497 writel(1, ioc->ioc_regs.hfn_mbox_cmd);
1498 (void) readl(ioc->ioc_regs.hfn_mbox_cmd);
1501 static void
1502 bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
1504 struct bfi_ioc_ctrl_req_s enable_req;
1505 struct timeval tv;
1507 bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1508 bfa_ioc_portid(ioc));
1509 enable_req.ioc_class = ioc->ioc_mc;
1510 do_gettimeofday(&tv);
1511 enable_req.tv_sec = be32_to_cpu(tv.tv_sec);
1512 bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1515 static void
1516 bfa_ioc_send_disable(struct bfa_ioc_s *ioc)
1518 struct bfi_ioc_ctrl_req_s disable_req;
1520 bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1521 bfa_ioc_portid(ioc));
1522 bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1525 static void
1526 bfa_ioc_send_getattr(struct bfa_ioc_s *ioc)
1528 struct bfi_ioc_getattr_req_s attr_req;
1530 bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
1531 bfa_ioc_portid(ioc));
1532 bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
1533 bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
1536 static void
1537 bfa_ioc_hb_check(void *cbarg)
1539 struct bfa_ioc_s *ioc = cbarg;
1540 u32 hb_count;
1542 hb_count = readl(ioc->ioc_regs.heartbeat);
1543 if (ioc->hb_count == hb_count) {
1544 bfa_ioc_recover(ioc);
1545 return;
1546 } else {
1547 ioc->hb_count = hb_count;
1550 bfa_ioc_mbox_poll(ioc);
1551 bfa_hb_timer_start(ioc);
1554 static void
1555 bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc)
1557 ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
1558 bfa_hb_timer_start(ioc);
1562 * Initiate a full firmware download.
1564 static void
1565 bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
1566 u32 boot_env)
1568 u32 *fwimg;
1569 u32 pgnum, pgoff;
1570 u32 loff = 0;
1571 u32 chunkno = 0;
1572 u32 i;
1575 * Initialize LMEM first before code download
1577 bfa_ioc_lmem_init(ioc);
1579 bfa_trc(ioc, bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)));
1580 fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno);
1582 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1583 pgoff = PSS_SMEM_PGOFF(loff);
1585 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1587 for (i = 0; i < bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)); i++) {
1589 if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
1590 chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
1591 fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc),
1592 BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1596 * write smem
1598 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
1599 fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]);
1601 loff += sizeof(u32);
1604 * handle page offset wrap around
1606 loff = PSS_SMEM_PGOFF(loff);
1607 if (loff == 0) {
1608 pgnum++;
1609 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1613 writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1614 ioc->ioc_regs.host_page_num_fn);
1617 * Set boot type and boot param at the end.
1619 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_TYPE_OFF,
1620 swab32(boot_type));
1621 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_LOADER_OFF,
1622 swab32(boot_env));
1627 * Update BFA configuration from firmware configuration.
1629 static void
1630 bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
1632 struct bfi_ioc_attr_s *attr = ioc->attr;
1634 attr->adapter_prop = be32_to_cpu(attr->adapter_prop);
1635 attr->card_type = be32_to_cpu(attr->card_type);
1636 attr->maxfrsize = be16_to_cpu(attr->maxfrsize);
1638 bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
1642 * Attach time initialization of mbox logic.
1644 static void
1645 bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc)
1647 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1648 int mc;
1650 INIT_LIST_HEAD(&mod->cmd_q);
1651 for (mc = 0; mc < BFI_MC_MAX; mc++) {
1652 mod->mbhdlr[mc].cbfn = NULL;
1653 mod->mbhdlr[mc].cbarg = ioc->bfa;
1658 * Mbox poll timer -- restarts any pending mailbox requests.
1660 static void
1661 bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc)
1663 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1664 struct bfa_mbox_cmd_s *cmd;
1665 u32 stat;
1668 * If no command pending, do nothing
1670 if (list_empty(&mod->cmd_q))
1671 return;
1674 * If previous command is not yet fetched by firmware, do nothing
1676 stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
1677 if (stat)
1678 return;
1681 * Enqueue command to firmware.
1683 bfa_q_deq(&mod->cmd_q, &cmd);
1684 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1688 * Cleanup any pending requests.
1690 static void
1691 bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc)
1693 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1694 struct bfa_mbox_cmd_s *cmd;
1696 while (!list_empty(&mod->cmd_q))
1697 bfa_q_deq(&mod->cmd_q, &cmd);
1701 * Read data from SMEM to host through PCI memmap
1703 * @param[in] ioc memory for IOC
1704 * @param[in] tbuf app memory to store data from smem
1705 * @param[in] soff smem offset
1706 * @param[in] sz size of smem in bytes
1708 static bfa_status_t
1709 bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz)
1711 u32 pgnum, loff;
1712 __be32 r32;
1713 int i, len;
1714 u32 *buf = tbuf;
1716 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
1717 loff = PSS_SMEM_PGOFF(soff);
1718 bfa_trc(ioc, pgnum);
1719 bfa_trc(ioc, loff);
1720 bfa_trc(ioc, sz);
1723 * Hold semaphore to serialize pll init and fwtrc.
1725 if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
1726 bfa_trc(ioc, 0);
1727 return BFA_STATUS_FAILED;
1730 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1732 len = sz/sizeof(u32);
1733 bfa_trc(ioc, len);
1734 for (i = 0; i < len; i++) {
1735 r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
1736 buf[i] = be32_to_cpu(r32);
1737 loff += sizeof(u32);
1740 * handle page offset wrap around
1742 loff = PSS_SMEM_PGOFF(loff);
1743 if (loff == 0) {
1744 pgnum++;
1745 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1748 writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1749 ioc->ioc_regs.host_page_num_fn);
1751 * release semaphore.
1753 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1755 bfa_trc(ioc, pgnum);
1756 return BFA_STATUS_OK;
1760 * Clear SMEM data from host through PCI memmap
1762 * @param[in] ioc memory for IOC
1763 * @param[in] soff smem offset
1764 * @param[in] sz size of smem in bytes
1766 static bfa_status_t
1767 bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
1769 int i, len;
1770 u32 pgnum, loff;
1772 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
1773 loff = PSS_SMEM_PGOFF(soff);
1774 bfa_trc(ioc, pgnum);
1775 bfa_trc(ioc, loff);
1776 bfa_trc(ioc, sz);
1779 * Hold semaphore to serialize pll init and fwtrc.
1781 if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
1782 bfa_trc(ioc, 0);
1783 return BFA_STATUS_FAILED;
1786 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1788 len = sz/sizeof(u32); /* len in words */
1789 bfa_trc(ioc, len);
1790 for (i = 0; i < len; i++) {
1791 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 0);
1792 loff += sizeof(u32);
1795 * handle page offset wrap around
1797 loff = PSS_SMEM_PGOFF(loff);
1798 if (loff == 0) {
1799 pgnum++;
1800 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1803 writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1804 ioc->ioc_regs.host_page_num_fn);
1807 * release semaphore.
1809 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1810 bfa_trc(ioc, pgnum);
1811 return BFA_STATUS_OK;
1814 static void
1815 bfa_ioc_fail_notify(struct bfa_ioc_s *ioc)
1817 struct list_head *qe;
1818 struct bfa_ioc_hbfail_notify_s *notify;
1819 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
1822 * Notify driver and common modules registered for notification.
1824 ioc->cbfn->hbfail_cbfn(ioc->bfa);
1825 list_for_each(qe, &ioc->hb_notify_q) {
1826 notify = (struct bfa_ioc_hbfail_notify_s *) qe;
1827 notify->cbfn(notify->cbarg);
1830 bfa_ioc_debug_save_ftrc(ioc);
1832 BFA_LOG(KERN_CRIT, bfad, bfa_log_level,
1833 "Heart Beat of IOC has failed\n");
1837 static void
1838 bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc)
1840 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
1842 * Provide enable completion callback.
1844 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
1845 BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
1846 "Running firmware version is incompatible "
1847 "with the driver version\n");
1850 bfa_status_t
1851 bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
1855 * Hold semaphore so that nobody can access the chip during init.
1857 bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
1859 bfa_ioc_pll_init_asic(ioc);
1861 ioc->pllinit = BFA_TRUE;
1863 * release semaphore.
1865 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1867 return BFA_STATUS_OK;
1871 * Interface used by diag module to do firmware boot with memory test
1872 * as the entry vector.
1874 void
1875 bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)
1877 void __iomem *rb;
1879 bfa_ioc_stats(ioc, ioc_boots);
1881 if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
1882 return;
1885 * Initialize IOC state of all functions on a chip reset.
1887 rb = ioc->pcidev.pci_bar_kva;
1888 if (boot_type == BFI_BOOT_TYPE_MEMTEST) {
1889 writel(BFI_IOC_MEMTEST, (rb + BFA_IOC0_STATE_REG));
1890 writel(BFI_IOC_MEMTEST, (rb + BFA_IOC1_STATE_REG));
1891 } else {
1892 writel(BFI_IOC_INITING, (rb + BFA_IOC0_STATE_REG));
1893 writel(BFI_IOC_INITING, (rb + BFA_IOC1_STATE_REG));
1896 bfa_ioc_msgflush(ioc);
1897 bfa_ioc_download_fw(ioc, boot_type, boot_env);
1900 * Enable interrupts just before starting LPU
1902 ioc->cbfn->reset_cbfn(ioc->bfa);
1903 bfa_ioc_lpu_start(ioc);
1907 * Enable/disable IOC failure auto recovery.
1909 void
1910 bfa_ioc_auto_recover(bfa_boolean_t auto_recover)
1912 bfa_auto_recover = auto_recover;
1917 bfa_boolean_t
1918 bfa_ioc_is_operational(struct bfa_ioc_s *ioc)
1920 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
1923 bfa_boolean_t
1924 bfa_ioc_is_initialized(struct bfa_ioc_s *ioc)
1926 u32 r32 = readl(ioc->ioc_regs.ioc_fwstate);
1928 return ((r32 != BFI_IOC_UNINIT) &&
1929 (r32 != BFI_IOC_INITING) &&
1930 (r32 != BFI_IOC_MEMTEST));
1933 void
1934 bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
1936 __be32 *msgp = mbmsg;
1937 u32 r32;
1938 int i;
1941 * read the MBOX msg
1943 for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
1944 i++) {
1945 r32 = readl(ioc->ioc_regs.lpu_mbox +
1946 i * sizeof(u32));
1947 msgp[i] = cpu_to_be32(r32);
1951 * turn off mailbox interrupt by clearing mailbox status
1953 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1954 readl(ioc->ioc_regs.lpu_mbox_cmd);
1957 void
1958 bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
1960 union bfi_ioc_i2h_msg_u *msg;
1961 struct bfa_iocpf_s *iocpf = &ioc->iocpf;
1963 msg = (union bfi_ioc_i2h_msg_u *) m;
1965 bfa_ioc_stats(ioc, ioc_isrs);
1967 switch (msg->mh.msg_id) {
1968 case BFI_IOC_I2H_HBEAT:
1969 break;
1971 case BFI_IOC_I2H_READY_EVENT:
1972 bfa_fsm_send_event(iocpf, IOCPF_E_FWREADY);
1973 break;
1975 case BFI_IOC_I2H_ENABLE_REPLY:
1976 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
1977 break;
1979 case BFI_IOC_I2H_DISABLE_REPLY:
1980 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
1981 break;
1983 case BFI_IOC_I2H_GETATTR_REPLY:
1984 bfa_ioc_getattr_reply(ioc);
1985 break;
1987 default:
1988 bfa_trc(ioc, msg->mh.msg_id);
1989 WARN_ON(1);
1994 * IOC attach time initialization and setup.
1996 * @param[in] ioc memory for IOC
1997 * @param[in] bfa driver instance structure
1999 void
2000 bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn,
2001 struct bfa_timer_mod_s *timer_mod)
2003 ioc->bfa = bfa;
2004 ioc->cbfn = cbfn;
2005 ioc->timer_mod = timer_mod;
2006 ioc->fcmode = BFA_FALSE;
2007 ioc->pllinit = BFA_FALSE;
2008 ioc->dbg_fwsave_once = BFA_TRUE;
2009 ioc->iocpf.ioc = ioc;
2011 bfa_ioc_mbox_attach(ioc);
2012 INIT_LIST_HEAD(&ioc->hb_notify_q);
2014 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
2015 bfa_fsm_send_event(ioc, IOC_E_RESET);
2019 * Driver detach time IOC cleanup.
2021 void
2022 bfa_ioc_detach(struct bfa_ioc_s *ioc)
2024 bfa_fsm_send_event(ioc, IOC_E_DETACH);
2028 * Setup IOC PCI properties.
2030 * @param[in] pcidev PCI device information for this IOC
2032 void
2033 bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
2034 enum bfi_mclass mc)
2036 ioc->ioc_mc = mc;
2037 ioc->pcidev = *pcidev;
2038 ioc->ctdev = bfa_asic_id_ct(ioc->pcidev.device_id);
2039 ioc->cna = ioc->ctdev && !ioc->fcmode;
2042 * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
2044 if (ioc->ctdev)
2045 bfa_ioc_set_ct_hwif(ioc);
2046 else
2047 bfa_ioc_set_cb_hwif(ioc);
2049 bfa_ioc_map_port(ioc);
2050 bfa_ioc_reg_init(ioc);
2054 * Initialize IOC dma memory
2056 * @param[in] dm_kva kernel virtual address of IOC dma memory
2057 * @param[in] dm_pa physical address of IOC dma memory
2059 void
2060 bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa)
2063 * dma memory for firmware attribute
2065 ioc->attr_dma.kva = dm_kva;
2066 ioc->attr_dma.pa = dm_pa;
2067 ioc->attr = (struct bfi_ioc_attr_s *) dm_kva;
2070 void
2071 bfa_ioc_enable(struct bfa_ioc_s *ioc)
2073 bfa_ioc_stats(ioc, ioc_enables);
2074 ioc->dbg_fwsave_once = BFA_TRUE;
2076 bfa_fsm_send_event(ioc, IOC_E_ENABLE);
2079 void
2080 bfa_ioc_disable(struct bfa_ioc_s *ioc)
2082 bfa_ioc_stats(ioc, ioc_disables);
2083 bfa_fsm_send_event(ioc, IOC_E_DISABLE);
2088 * Initialize memory for saving firmware trace. Driver must initialize
2089 * trace memory before call bfa_ioc_enable().
2091 void
2092 bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave)
2094 ioc->dbg_fwsave = dbg_fwsave;
2095 ioc->dbg_fwsave_len = (ioc->iocpf.auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
2099 * Register mailbox message handler functions
2101 * @param[in] ioc IOC instance
2102 * @param[in] mcfuncs message class handler functions
2104 void
2105 bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs)
2107 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
2108 int mc;
2110 for (mc = 0; mc < BFI_MC_MAX; mc++)
2111 mod->mbhdlr[mc].cbfn = mcfuncs[mc];
2115 * Register mailbox message handler function, to be called by common modules
2117 void
2118 bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
2119 bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
2121 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
2123 mod->mbhdlr[mc].cbfn = cbfn;
2124 mod->mbhdlr[mc].cbarg = cbarg;
2128 * Queue a mailbox command request to firmware. Waits if mailbox is busy.
2129 * Responsibility of caller to serialize
2131 * @param[in] ioc IOC instance
2132 * @param[i] cmd Mailbox command
2134 void
2135 bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
2137 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
2138 u32 stat;
2141 * If a previous command is pending, queue new command
2143 if (!list_empty(&mod->cmd_q)) {
2144 list_add_tail(&cmd->qe, &mod->cmd_q);
2145 return;
2149 * If mailbox is busy, queue command for poll timer
2151 stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
2152 if (stat) {
2153 list_add_tail(&cmd->qe, &mod->cmd_q);
2154 return;
2158 * mailbox is free -- queue command to firmware
2160 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
2164 * Handle mailbox interrupts
2166 void
2167 bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc)
2169 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
2170 struct bfi_mbmsg_s m;
2171 int mc;
2173 bfa_ioc_msgget(ioc, &m);
2176 * Treat IOC message class as special.
2178 mc = m.mh.msg_class;
2179 if (mc == BFI_MC_IOC) {
2180 bfa_ioc_isr(ioc, &m);
2181 return;
2184 if ((mc > BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
2185 return;
2187 mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
2190 void
2191 bfa_ioc_error_isr(struct bfa_ioc_s *ioc)
2193 bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2196 void
2197 bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc)
2199 ioc->fcmode = BFA_TRUE;
2200 ioc->port_id = bfa_ioc_pcifn(ioc);
2204 * return true if IOC is disabled
2206 bfa_boolean_t
2207 bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
2209 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
2210 bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
2214 * return true if IOC firmware is different.
2216 bfa_boolean_t
2217 bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
2219 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset) ||
2220 bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_fwcheck) ||
2221 bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_mismatch);
2224 #define bfa_ioc_state_disabled(__sm) \
2225 (((__sm) == BFI_IOC_UNINIT) || \
2226 ((__sm) == BFI_IOC_INITING) || \
2227 ((__sm) == BFI_IOC_HWINIT) || \
2228 ((__sm) == BFI_IOC_DISABLED) || \
2229 ((__sm) == BFI_IOC_FAIL) || \
2230 ((__sm) == BFI_IOC_CFG_DISABLED))
2233 * Check if adapter is disabled -- both IOCs should be in a disabled
2234 * state.
2236 bfa_boolean_t
2237 bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
2239 u32 ioc_state;
2240 void __iomem *rb = ioc->pcidev.pci_bar_kva;
2242 if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
2243 return BFA_FALSE;
2245 ioc_state = readl(rb + BFA_IOC0_STATE_REG);
2246 if (!bfa_ioc_state_disabled(ioc_state))
2247 return BFA_FALSE;
2249 if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) {
2250 ioc_state = readl(rb + BFA_IOC1_STATE_REG);
2251 if (!bfa_ioc_state_disabled(ioc_state))
2252 return BFA_FALSE;
2255 return BFA_TRUE;
2259 * Reset IOC fwstate registers.
2261 void
2262 bfa_ioc_reset_fwstate(struct bfa_ioc_s *ioc)
2264 writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
2265 writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
2268 #define BFA_MFG_NAME "Brocade"
2269 void
2270 bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
2271 struct bfa_adapter_attr_s *ad_attr)
2273 struct bfi_ioc_attr_s *ioc_attr;
2275 ioc_attr = ioc->attr;
2277 bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
2278 bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
2279 bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
2280 bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
2281 memcpy(&ad_attr->vpd, &ioc_attr->vpd,
2282 sizeof(struct bfa_mfg_vpd_s));
2284 ad_attr->nports = bfa_ioc_get_nports(ioc);
2285 ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
2287 bfa_ioc_get_adapter_model(ioc, ad_attr->model);
2288 /* For now, model descr uses same model string */
2289 bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
2291 ad_attr->card_type = ioc_attr->card_type;
2292 ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
2294 if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
2295 ad_attr->prototype = 1;
2296 else
2297 ad_attr->prototype = 0;
2299 ad_attr->pwwn = ioc->attr->pwwn;
2300 ad_attr->mac = bfa_ioc_get_mac(ioc);
2302 ad_attr->pcie_gen = ioc_attr->pcie_gen;
2303 ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
2304 ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
2305 ad_attr->asic_rev = ioc_attr->asic_rev;
2307 bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
2309 ad_attr->cna_capable = ioc->cna;
2310 ad_attr->trunk_capable = (ad_attr->nports > 1) && !ioc->cna &&
2311 !ad_attr->is_mezz;
2314 enum bfa_ioc_type_e
2315 bfa_ioc_get_type(struct bfa_ioc_s *ioc)
2317 if (!ioc->ctdev || ioc->fcmode)
2318 return BFA_IOC_TYPE_FC;
2319 else if (ioc->ioc_mc == BFI_MC_IOCFC)
2320 return BFA_IOC_TYPE_FCoE;
2321 else if (ioc->ioc_mc == BFI_MC_LL)
2322 return BFA_IOC_TYPE_LL;
2323 else {
2324 WARN_ON(ioc->ioc_mc != BFI_MC_LL);
2325 return BFA_IOC_TYPE_LL;
2329 void
2330 bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num)
2332 memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
2333 memcpy((void *)serial_num,
2334 (void *)ioc->attr->brcd_serialnum,
2335 BFA_ADAPTER_SERIAL_NUM_LEN);
2338 void
2339 bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver)
2341 memset((void *)fw_ver, 0, BFA_VERSION_LEN);
2342 memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
2345 void
2346 bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev)
2348 WARN_ON(!chip_rev);
2350 memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
2352 chip_rev[0] = 'R';
2353 chip_rev[1] = 'e';
2354 chip_rev[2] = 'v';
2355 chip_rev[3] = '-';
2356 chip_rev[4] = ioc->attr->asic_rev;
2357 chip_rev[5] = '\0';
2360 void
2361 bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver)
2363 memset((void *)optrom_ver, 0, BFA_VERSION_LEN);
2364 memcpy(optrom_ver, ioc->attr->optrom_version,
2365 BFA_VERSION_LEN);
2368 void
2369 bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
2371 memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
2372 memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
2375 void
2376 bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
2378 struct bfi_ioc_attr_s *ioc_attr;
2380 WARN_ON(!model);
2381 memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
2383 ioc_attr = ioc->attr;
2386 * model name
2388 snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
2389 BFA_MFG_NAME, ioc_attr->card_type);
2392 enum bfa_ioc_state
2393 bfa_ioc_get_state(struct bfa_ioc_s *ioc)
2395 enum bfa_iocpf_state iocpf_st;
2396 enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
2398 if (ioc_st == BFA_IOC_ENABLING ||
2399 ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
2401 iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2403 switch (iocpf_st) {
2404 case BFA_IOCPF_SEMWAIT:
2405 ioc_st = BFA_IOC_SEMWAIT;
2406 break;
2408 case BFA_IOCPF_HWINIT:
2409 ioc_st = BFA_IOC_HWINIT;
2410 break;
2412 case BFA_IOCPF_FWMISMATCH:
2413 ioc_st = BFA_IOC_FWMISMATCH;
2414 break;
2416 case BFA_IOCPF_FAIL:
2417 ioc_st = BFA_IOC_FAIL;
2418 break;
2420 case BFA_IOCPF_INITFAIL:
2421 ioc_st = BFA_IOC_INITFAIL;
2422 break;
2424 default:
2425 break;
2429 return ioc_st;
2432 void
2433 bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
2435 memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s));
2437 ioc_attr->state = bfa_ioc_get_state(ioc);
2438 ioc_attr->port_id = ioc->port_id;
2440 ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
2442 bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
2444 ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
2445 ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
2446 bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
2449 mac_t
2450 bfa_ioc_get_mac(struct bfa_ioc_s *ioc)
2453 * Check the IOC type and return the appropriate MAC
2455 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE)
2456 return ioc->attr->fcoe_mac;
2457 else
2458 return ioc->attr->mac;
2461 mac_t
2462 bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc)
2464 mac_t m;
2466 m = ioc->attr->mfg_mac;
2467 if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type))
2468 m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
2469 else
2470 bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]),
2471 bfa_ioc_pcifn(ioc));
2473 return m;
2476 bfa_boolean_t
2477 bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc)
2479 return ioc->fcmode || !bfa_asic_id_ct(ioc->pcidev.device_id);
2483 * Retrieve saved firmware trace from a prior IOC failure.
2485 bfa_status_t
2486 bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2488 int tlen;
2490 if (ioc->dbg_fwsave_len == 0)
2491 return BFA_STATUS_ENOFSAVE;
2493 tlen = *trclen;
2494 if (tlen > ioc->dbg_fwsave_len)
2495 tlen = ioc->dbg_fwsave_len;
2497 memcpy(trcdata, ioc->dbg_fwsave, tlen);
2498 *trclen = tlen;
2499 return BFA_STATUS_OK;
2504 * Retrieve saved firmware trace from a prior IOC failure.
2506 bfa_status_t
2507 bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2509 u32 loff = BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc));
2510 int tlen;
2511 bfa_status_t status;
2513 bfa_trc(ioc, *trclen);
2515 tlen = *trclen;
2516 if (tlen > BFA_DBG_FWTRC_LEN)
2517 tlen = BFA_DBG_FWTRC_LEN;
2519 status = bfa_ioc_smem_read(ioc, trcdata, loff, tlen);
2520 *trclen = tlen;
2521 return status;
2524 static void
2525 bfa_ioc_send_fwsync(struct bfa_ioc_s *ioc)
2527 struct bfa_mbox_cmd_s cmd;
2528 struct bfi_ioc_ctrl_req_s *req = (struct bfi_ioc_ctrl_req_s *) cmd.msg;
2530 bfi_h2i_set(req->mh, BFI_MC_IOC, BFI_IOC_H2I_DBG_SYNC,
2531 bfa_ioc_portid(ioc));
2532 req->ioc_class = ioc->ioc_mc;
2533 bfa_ioc_mbox_queue(ioc, &cmd);
2536 static void
2537 bfa_ioc_fwsync(struct bfa_ioc_s *ioc)
2539 u32 fwsync_iter = 1000;
2541 bfa_ioc_send_fwsync(ioc);
2544 * After sending a fw sync mbox command wait for it to
2545 * take effect. We will not wait for a response because
2546 * 1. fw_sync mbox cmd doesn't have a response.
2547 * 2. Even if we implement that, interrupts might not
2548 * be enabled when we call this function.
2549 * So, just keep checking if any mbox cmd is pending, and
2550 * after waiting for a reasonable amount of time, go ahead.
2551 * It is possible that fw has crashed and the mbox command
2552 * is never acknowledged.
2554 while (bfa_ioc_mbox_cmd_pending(ioc) && fwsync_iter > 0)
2555 fwsync_iter--;
2559 * Dump firmware smem
2561 bfa_status_t
2562 bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
2563 u32 *offset, int *buflen)
2565 u32 loff;
2566 int dlen;
2567 bfa_status_t status;
2568 u32 smem_len = BFA_IOC_FW_SMEM_SIZE(ioc);
2570 if (*offset >= smem_len) {
2571 *offset = *buflen = 0;
2572 return BFA_STATUS_EINVAL;
2575 loff = *offset;
2576 dlen = *buflen;
2579 * First smem read, sync smem before proceeding
2580 * No need to sync before reading every chunk.
2582 if (loff == 0)
2583 bfa_ioc_fwsync(ioc);
2585 if ((loff + dlen) >= smem_len)
2586 dlen = smem_len - loff;
2588 status = bfa_ioc_smem_read(ioc, buf, loff, dlen);
2590 if (status != BFA_STATUS_OK) {
2591 *offset = *buflen = 0;
2592 return status;
2595 *offset += dlen;
2597 if (*offset >= smem_len)
2598 *offset = 0;
2600 *buflen = dlen;
2602 return status;
2606 * Firmware statistics
2608 bfa_status_t
2609 bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats)
2611 u32 loff = BFI_IOC_FWSTATS_OFF + \
2612 BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
2613 int tlen;
2614 bfa_status_t status;
2616 if (ioc->stats_busy) {
2617 bfa_trc(ioc, ioc->stats_busy);
2618 return BFA_STATUS_DEVBUSY;
2620 ioc->stats_busy = BFA_TRUE;
2622 tlen = sizeof(struct bfa_fw_stats_s);
2623 status = bfa_ioc_smem_read(ioc, stats, loff, tlen);
2625 ioc->stats_busy = BFA_FALSE;
2626 return status;
2629 bfa_status_t
2630 bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc)
2632 u32 loff = BFI_IOC_FWSTATS_OFF + \
2633 BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
2634 int tlen;
2635 bfa_status_t status;
2637 if (ioc->stats_busy) {
2638 bfa_trc(ioc, ioc->stats_busy);
2639 return BFA_STATUS_DEVBUSY;
2641 ioc->stats_busy = BFA_TRUE;
2643 tlen = sizeof(struct bfa_fw_stats_s);
2644 status = bfa_ioc_smem_clr(ioc, loff, tlen);
2646 ioc->stats_busy = BFA_FALSE;
2647 return status;
2651 * Save firmware trace if configured.
2653 static void
2654 bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc)
2656 int tlen;
2658 if (ioc->dbg_fwsave_once) {
2659 ioc->dbg_fwsave_once = BFA_FALSE;
2660 if (ioc->dbg_fwsave_len) {
2661 tlen = ioc->dbg_fwsave_len;
2662 bfa_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
2668 * Firmware failure detected. Start recovery actions.
2670 static void
2671 bfa_ioc_recover(struct bfa_ioc_s *ioc)
2673 bfa_ioc_stats(ioc, ioc_hbfails);
2674 bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
2677 static void
2678 bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc)
2680 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
2681 return;
2685 * BFA IOC PF private functions
2687 static void
2688 bfa_iocpf_timeout(void *ioc_arg)
2690 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
2692 bfa_trc(ioc, 0);
2693 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
2696 static void
2697 bfa_iocpf_sem_timeout(void *ioc_arg)
2699 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
2701 bfa_ioc_hw_sem_get(ioc);
2705 * bfa timer function
2707 void
2708 bfa_timer_beat(struct bfa_timer_mod_s *mod)
2710 struct list_head *qh = &mod->timer_q;
2711 struct list_head *qe, *qe_next;
2712 struct bfa_timer_s *elem;
2713 struct list_head timedout_q;
2715 INIT_LIST_HEAD(&timedout_q);
2717 qe = bfa_q_next(qh);
2719 while (qe != qh) {
2720 qe_next = bfa_q_next(qe);
2722 elem = (struct bfa_timer_s *) qe;
2723 if (elem->timeout <= BFA_TIMER_FREQ) {
2724 elem->timeout = 0;
2725 list_del(&elem->qe);
2726 list_add_tail(&elem->qe, &timedout_q);
2727 } else {
2728 elem->timeout -= BFA_TIMER_FREQ;
2731 qe = qe_next; /* go to next elem */
2735 * Pop all the timeout entries
2737 while (!list_empty(&timedout_q)) {
2738 bfa_q_deq(&timedout_q, &elem);
2739 elem->timercb(elem->arg);
2744 * Should be called with lock protection
2746 void
2747 bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
2748 void (*timercb) (void *), void *arg, unsigned int timeout)
2751 WARN_ON(timercb == NULL);
2752 WARN_ON(bfa_q_is_on_q(&mod->timer_q, timer));
2754 timer->timeout = timeout;
2755 timer->timercb = timercb;
2756 timer->arg = arg;
2758 list_add_tail(&timer->qe, &mod->timer_q);
2762 * Should be called with lock protection
2764 void
2765 bfa_timer_stop(struct bfa_timer_s *timer)
2767 WARN_ON(list_empty(&timer->qe));
2769 list_del(&timer->qe);