drm/virtio: Don't return error if virtio-gpu PCI dev is not found
[drm/drm-misc.git] / drivers / scsi / bfa / bfa_ioc.c
blobaa68d61a2d0d3983d52918576b34c0f29577d9bb
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
4 * Copyright (c) 2014- QLogic Corporation.
5 * All rights reserved
6 * www.qlogic.com
8 * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
9 */
11 #include "bfad_drv.h"
12 #include "bfad_im.h"
13 #include "bfa_ioc.h"
14 #include "bfi_reg.h"
15 #include "bfa_defs.h"
16 #include "bfa_defs_svc.h"
17 #include "bfi.h"
19 BFA_TRC_FILE(CNA, IOC);
22 * IOC local definitions
24 #define BFA_IOC_TOV 3000 /* msecs */
25 #define BFA_IOC_HWSEM_TOV 500 /* msecs */
26 #define BFA_IOC_HB_TOV 500 /* msecs */
27 #define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV
28 #define BFA_IOC_POLL_TOV BFA_TIMER_FREQ
30 #define bfa_ioc_timer_start(__ioc) \
31 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
32 bfa_ioc_timeout, (__ioc), BFA_IOC_TOV)
33 #define bfa_ioc_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
35 #define bfa_hb_timer_start(__ioc) \
36 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->hb_timer, \
37 bfa_ioc_hb_check, (__ioc), BFA_IOC_HB_TOV)
38 #define bfa_hb_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->hb_timer)
40 #define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
42 #define bfa_ioc_state_disabled(__sm) \
43 (((__sm) == BFI_IOC_UNINIT) || \
44 ((__sm) == BFI_IOC_INITING) || \
45 ((__sm) == BFI_IOC_HWINIT) || \
46 ((__sm) == BFI_IOC_DISABLED) || \
47 ((__sm) == BFI_IOC_FAIL) || \
48 ((__sm) == BFI_IOC_CFG_DISABLED))
51 * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
54 #define bfa_ioc_firmware_lock(__ioc) \
55 ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
56 #define bfa_ioc_firmware_unlock(__ioc) \
57 ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
58 #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
59 #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
60 #define bfa_ioc_notify_fail(__ioc) \
61 ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
62 #define bfa_ioc_sync_start(__ioc) \
63 ((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
64 #define bfa_ioc_sync_join(__ioc) \
65 ((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
66 #define bfa_ioc_sync_leave(__ioc) \
67 ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
68 #define bfa_ioc_sync_ack(__ioc) \
69 ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
70 #define bfa_ioc_sync_complete(__ioc) \
71 ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
72 #define bfa_ioc_set_cur_ioc_fwstate(__ioc, __fwstate) \
73 ((__ioc)->ioc_hwif->ioc_set_fwstate(__ioc, __fwstate))
74 #define bfa_ioc_get_cur_ioc_fwstate(__ioc) \
75 ((__ioc)->ioc_hwif->ioc_get_fwstate(__ioc))
76 #define bfa_ioc_set_alt_ioc_fwstate(__ioc, __fwstate) \
77 ((__ioc)->ioc_hwif->ioc_set_alt_fwstate(__ioc, __fwstate))
78 #define bfa_ioc_get_alt_ioc_fwstate(__ioc) \
79 ((__ioc)->ioc_hwif->ioc_get_alt_fwstate(__ioc))
81 #define bfa_ioc_mbox_cmd_pending(__ioc) \
82 (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
83 readl((__ioc)->ioc_regs.hfn_mbox_cmd))
85 bfa_boolean_t bfa_auto_recover = BFA_TRUE;
88 * forward declarations
90 static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
91 static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
92 static void bfa_ioc_timeout(void *ioc);
93 static void bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc);
94 static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc);
95 static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc);
96 static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc);
97 static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
98 static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
99 static void bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc);
100 static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
101 static void bfa_ioc_event_notify(struct bfa_ioc_s *ioc ,
102 enum bfa_ioc_event_e event);
103 static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
104 static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
105 static void bfa_ioc_fail_notify(struct bfa_ioc_s *ioc);
106 static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc);
107 static enum bfi_ioc_img_ver_cmp_e bfa_ioc_fw_ver_patch_cmp(
108 struct bfi_ioc_image_hdr_s *base_fwhdr,
109 struct bfi_ioc_image_hdr_s *fwhdr_to_cmp);
110 static enum bfi_ioc_img_ver_cmp_e bfa_ioc_flash_fwver_cmp(
111 struct bfa_ioc_s *ioc,
112 struct bfi_ioc_image_hdr_s *base_fwhdr);
115 * IOC state machine definitions/declarations
117 bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event);
118 bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event);
119 bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event);
120 bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event);
121 bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event);
122 bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc_s, enum ioc_event);
123 bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event);
124 bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
125 bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
126 bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc_s, enum ioc_event);
128 struct bfa_ioc_sm_table {
129 bfa_ioc_sm_t sm; /* state machine function */
130 enum bfa_ioc_state state; /* state machine encoding */
131 char *name; /* state name for display */
134 static struct bfa_ioc_sm_table ioc_sm_table[] = {
135 {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
136 {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
137 {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
138 {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
139 {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
140 {BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL},
141 {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
142 {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
143 {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
144 {BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL},
147 static inline enum bfa_ioc_state
148 bfa_ioc_sm_to_state(struct bfa_ioc_sm_table *smt, bfa_ioc_sm_t sm)
150 int i = 0;
152 while (smt[i].sm && smt[i].sm != sm)
153 i++;
154 return smt[i].state;
158 * IOCPF state machine definitions/declarations
161 #define bfa_iocpf_timer_start(__ioc) \
162 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
163 bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV)
164 #define bfa_iocpf_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
166 #define bfa_iocpf_poll_timer_start(__ioc) \
167 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
168 bfa_iocpf_poll_timeout, (__ioc), BFA_IOC_POLL_TOV)
170 #define bfa_sem_timer_start(__ioc) \
171 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->sem_timer, \
172 bfa_iocpf_sem_timeout, (__ioc), BFA_IOC_HWSEM_TOV)
173 #define bfa_sem_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->sem_timer)
176 * Forward declareations for iocpf state machine
178 static void bfa_iocpf_timeout(void *ioc_arg);
179 static void bfa_iocpf_sem_timeout(void *ioc_arg);
180 static void bfa_iocpf_poll_timeout(void *ioc_arg);
183 * IOCPF states
185 enum bfa_iocpf_state {
186 BFA_IOCPF_RESET = 1, /* IOC is in reset state */
187 BFA_IOCPF_SEMWAIT = 2, /* Waiting for IOC h/w semaphore */
188 BFA_IOCPF_HWINIT = 3, /* IOC h/w is being initialized */
189 BFA_IOCPF_READY = 4, /* IOCPF is initialized */
190 BFA_IOCPF_INITFAIL = 5, /* IOCPF failed */
191 BFA_IOCPF_FAIL = 6, /* IOCPF failed */
192 BFA_IOCPF_DISABLING = 7, /* IOCPF is being disabled */
193 BFA_IOCPF_DISABLED = 8, /* IOCPF is disabled */
194 BFA_IOCPF_FWMISMATCH = 9, /* IOC f/w different from drivers */
197 bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf_s, enum iocpf_event);
198 bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf_s, enum iocpf_event);
199 bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf_s, enum iocpf_event);
200 bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf_s, enum iocpf_event);
201 bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf_s, enum iocpf_event);
202 bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf_s, enum iocpf_event);
203 bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf_s, enum iocpf_event);
204 bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf_s,
205 enum iocpf_event);
206 bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf_s, enum iocpf_event);
207 bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf_s, enum iocpf_event);
208 bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf_s, enum iocpf_event);
209 bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf_s, enum iocpf_event);
210 bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf_s,
211 enum iocpf_event);
212 bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf_s, enum iocpf_event);
214 struct bfa_iocpf_sm_table {
215 bfa_iocpf_sm_t sm; /* state machine function */
216 enum bfa_iocpf_state state; /* state machine encoding */
217 char *name; /* state name for display */
220 static inline enum bfa_iocpf_state
221 bfa_iocpf_sm_to_state(struct bfa_iocpf_sm_table *smt, bfa_iocpf_sm_t sm)
223 int i = 0;
225 while (smt[i].sm && smt[i].sm != sm)
226 i++;
227 return smt[i].state;
230 static struct bfa_iocpf_sm_table iocpf_sm_table[] = {
231 {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
232 {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
233 {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
234 {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
235 {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
236 {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
237 {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
238 {BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL},
239 {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
240 {BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL},
241 {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
242 {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
243 {BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING},
244 {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
248 * IOC State Machine
252 * Beginning state. IOC uninit state.
255 static void
256 bfa_ioc_sm_uninit_entry(struct bfa_ioc_s *ioc)
261 * IOC is in uninit state.
263 static void
264 bfa_ioc_sm_uninit(struct bfa_ioc_s *ioc, enum ioc_event event)
266 bfa_trc(ioc, event);
268 switch (event) {
269 case IOC_E_RESET:
270 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
271 break;
273 default:
274 bfa_sm_fault(ioc, event);
278 * Reset entry actions -- initialize state machine
280 static void
281 bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc)
283 bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
287 * IOC is in reset state.
289 static void
290 bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event)
292 bfa_trc(ioc, event);
294 switch (event) {
295 case IOC_E_ENABLE:
296 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
297 break;
299 case IOC_E_DISABLE:
300 bfa_ioc_disable_comp(ioc);
301 break;
303 case IOC_E_DETACH:
304 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
305 break;
307 default:
308 bfa_sm_fault(ioc, event);
313 static void
314 bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc)
316 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
320 * Host IOC function is being enabled, awaiting response from firmware.
321 * Semaphore is acquired.
323 static void
324 bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event)
326 bfa_trc(ioc, event);
328 switch (event) {
329 case IOC_E_ENABLED:
330 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
331 break;
333 case IOC_E_PFFAILED:
334 /* !!! fall through !!! */
335 case IOC_E_HWERROR:
336 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
337 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
338 if (event != IOC_E_PFFAILED)
339 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
340 break;
342 case IOC_E_HWFAILED:
343 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
344 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
345 break;
347 case IOC_E_DISABLE:
348 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
349 break;
351 case IOC_E_DETACH:
352 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
353 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
354 break;
356 case IOC_E_ENABLE:
357 break;
359 default:
360 bfa_sm_fault(ioc, event);
365 static void
366 bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc)
368 bfa_ioc_timer_start(ioc);
369 bfa_ioc_send_getattr(ioc);
373 * IOC configuration in progress. Timer is active.
375 static void
376 bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
378 bfa_trc(ioc, event);
380 switch (event) {
381 case IOC_E_FWRSP_GETATTR:
382 bfa_ioc_timer_stop(ioc);
383 bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
384 break;
386 case IOC_E_PFFAILED:
387 case IOC_E_HWERROR:
388 bfa_ioc_timer_stop(ioc);
389 fallthrough;
390 case IOC_E_TIMEOUT:
391 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
392 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
393 if (event != IOC_E_PFFAILED)
394 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
395 break;
397 case IOC_E_DISABLE:
398 bfa_ioc_timer_stop(ioc);
399 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
400 break;
402 case IOC_E_ENABLE:
403 break;
405 default:
406 bfa_sm_fault(ioc, event);
410 static void
411 bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
413 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
415 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
416 bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
417 bfa_ioc_hb_monitor(ioc);
418 BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC enabled\n");
419 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_ENABLE);
422 static void
423 bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
425 bfa_trc(ioc, event);
427 switch (event) {
428 case IOC_E_ENABLE:
429 break;
431 case IOC_E_DISABLE:
432 bfa_hb_timer_stop(ioc);
433 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
434 break;
436 case IOC_E_PFFAILED:
437 case IOC_E_HWERROR:
438 bfa_hb_timer_stop(ioc);
439 fallthrough;
440 case IOC_E_HBFAIL:
441 if (ioc->iocpf.auto_recover)
442 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
443 else
444 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
446 bfa_ioc_fail_notify(ioc);
448 if (event != IOC_E_PFFAILED)
449 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
450 break;
452 default:
453 bfa_sm_fault(ioc, event);
458 static void
459 bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
461 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
462 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
463 BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC disabled\n");
464 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_DISABLE);
468 * IOC is being disabled
470 static void
471 bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
473 bfa_trc(ioc, event);
475 switch (event) {
476 case IOC_E_DISABLED:
477 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
478 break;
480 case IOC_E_HWERROR:
482 * No state change. Will move to disabled state
483 * after iocpf sm completes failure processing and
484 * moves to disabled state.
486 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
487 break;
489 case IOC_E_HWFAILED:
490 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
491 bfa_ioc_disable_comp(ioc);
492 break;
494 default:
495 bfa_sm_fault(ioc, event);
500 * IOC disable completion entry.
502 static void
503 bfa_ioc_sm_disabled_entry(struct bfa_ioc_s *ioc)
505 bfa_ioc_disable_comp(ioc);
508 static void
509 bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event)
511 bfa_trc(ioc, event);
513 switch (event) {
514 case IOC_E_ENABLE:
515 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
516 break;
518 case IOC_E_DISABLE:
519 ioc->cbfn->disable_cbfn(ioc->bfa);
520 break;
522 case IOC_E_DETACH:
523 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
524 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
525 break;
527 default:
528 bfa_sm_fault(ioc, event);
533 static void
534 bfa_ioc_sm_fail_retry_entry(struct bfa_ioc_s *ioc)
536 bfa_trc(ioc, 0);
540 * Hardware initialization retry.
542 static void
543 bfa_ioc_sm_fail_retry(struct bfa_ioc_s *ioc, enum ioc_event event)
545 bfa_trc(ioc, event);
547 switch (event) {
548 case IOC_E_ENABLED:
549 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
550 break;
552 case IOC_E_PFFAILED:
553 case IOC_E_HWERROR:
555 * Initialization retry failed.
557 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
558 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
559 if (event != IOC_E_PFFAILED)
560 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
561 break;
563 case IOC_E_HWFAILED:
564 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
565 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
566 break;
568 case IOC_E_ENABLE:
569 break;
571 case IOC_E_DISABLE:
572 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
573 break;
575 case IOC_E_DETACH:
576 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
577 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
578 break;
580 default:
581 bfa_sm_fault(ioc, event);
586 static void
587 bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc)
589 bfa_trc(ioc, 0);
593 * IOC failure.
595 static void
596 bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
598 bfa_trc(ioc, event);
600 switch (event) {
602 case IOC_E_ENABLE:
603 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
604 break;
606 case IOC_E_DISABLE:
607 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
608 break;
610 case IOC_E_DETACH:
611 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
612 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
613 break;
615 case IOC_E_HWERROR:
616 case IOC_E_HWFAILED:
618 * HB failure / HW error notification, ignore.
620 break;
621 default:
622 bfa_sm_fault(ioc, event);
626 static void
627 bfa_ioc_sm_hwfail_entry(struct bfa_ioc_s *ioc)
629 bfa_trc(ioc, 0);
632 static void
633 bfa_ioc_sm_hwfail(struct bfa_ioc_s *ioc, enum ioc_event event)
635 bfa_trc(ioc, event);
637 switch (event) {
638 case IOC_E_ENABLE:
639 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
640 break;
642 case IOC_E_DISABLE:
643 ioc->cbfn->disable_cbfn(ioc->bfa);
644 break;
646 case IOC_E_DETACH:
647 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
648 break;
650 case IOC_E_HWERROR:
651 /* Ignore - already in hwfail state */
652 break;
654 default:
655 bfa_sm_fault(ioc, event);
660 * IOCPF State Machine
664 * Reset entry actions -- initialize state machine
666 static void
667 bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s *iocpf)
669 iocpf->fw_mismatch_notified = BFA_FALSE;
670 iocpf->auto_recover = bfa_auto_recover;
674 * Beginning state. IOC is in reset state.
676 static void
677 bfa_iocpf_sm_reset(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
679 struct bfa_ioc_s *ioc = iocpf->ioc;
681 bfa_trc(ioc, event);
683 switch (event) {
684 case IOCPF_E_ENABLE:
685 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
686 break;
688 case IOCPF_E_STOP:
689 break;
691 default:
692 bfa_sm_fault(ioc, event);
697 * Semaphore should be acquired for version check.
699 static void
700 bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
702 struct bfi_ioc_image_hdr_s fwhdr;
703 u32 r32, fwstate, pgnum, loff = 0;
704 int i;
707 * Spin on init semaphore to serialize.
709 r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg);
710 while (r32 & 0x1) {
711 udelay(20);
712 r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg);
715 /* h/w sem init */
716 fwstate = bfa_ioc_get_cur_ioc_fwstate(iocpf->ioc);
717 if (fwstate == BFI_IOC_UNINIT) {
718 writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
719 goto sem_get;
722 bfa_ioc_fwver_get(iocpf->ioc, &fwhdr);
724 if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) {
725 writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
726 goto sem_get;
730 * Clear fwver hdr
732 pgnum = PSS_SMEM_PGNUM(iocpf->ioc->ioc_regs.smem_pg0, loff);
733 writel(pgnum, iocpf->ioc->ioc_regs.host_page_num_fn);
735 for (i = 0; i < sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32); i++) {
736 bfa_mem_write(iocpf->ioc->ioc_regs.smem_page_start, loff, 0);
737 loff += sizeof(u32);
740 bfa_trc(iocpf->ioc, fwstate);
741 bfa_trc(iocpf->ioc, swab32(fwhdr.exec));
742 bfa_ioc_set_cur_ioc_fwstate(iocpf->ioc, BFI_IOC_UNINIT);
743 bfa_ioc_set_alt_ioc_fwstate(iocpf->ioc, BFI_IOC_UNINIT);
746 * Unlock the hw semaphore. Should be here only once per boot.
748 bfa_ioc_ownership_reset(iocpf->ioc);
751 * unlock init semaphore.
753 writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
755 sem_get:
756 bfa_ioc_hw_sem_get(iocpf->ioc);
760 * Awaiting h/w semaphore to continue with version check.
762 static void
763 bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
765 struct bfa_ioc_s *ioc = iocpf->ioc;
767 bfa_trc(ioc, event);
769 switch (event) {
770 case IOCPF_E_SEMLOCKED:
771 if (bfa_ioc_firmware_lock(ioc)) {
772 if (bfa_ioc_sync_start(ioc)) {
773 bfa_ioc_sync_join(ioc);
774 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
775 } else {
776 bfa_ioc_firmware_unlock(ioc);
777 writel(1, ioc->ioc_regs.ioc_sem_reg);
778 bfa_sem_timer_start(ioc);
780 } else {
781 writel(1, ioc->ioc_regs.ioc_sem_reg);
782 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
784 break;
786 case IOCPF_E_SEM_ERROR:
787 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
788 bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
789 break;
791 case IOCPF_E_DISABLE:
792 bfa_sem_timer_stop(ioc);
793 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
794 bfa_fsm_send_event(ioc, IOC_E_DISABLED);
795 break;
797 case IOCPF_E_STOP:
798 bfa_sem_timer_stop(ioc);
799 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
800 break;
802 default:
803 bfa_sm_fault(ioc, event);
808 * Notify enable completion callback.
810 static void
811 bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s *iocpf)
814 * Call only the first time sm enters fwmismatch state.
816 if (iocpf->fw_mismatch_notified == BFA_FALSE)
817 bfa_ioc_pf_fwmismatch(iocpf->ioc);
819 iocpf->fw_mismatch_notified = BFA_TRUE;
820 bfa_iocpf_timer_start(iocpf->ioc);
824 * Awaiting firmware version match.
826 static void
827 bfa_iocpf_sm_mismatch(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
829 struct bfa_ioc_s *ioc = iocpf->ioc;
831 bfa_trc(ioc, event);
833 switch (event) {
834 case IOCPF_E_TIMEOUT:
835 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
836 break;
838 case IOCPF_E_DISABLE:
839 bfa_iocpf_timer_stop(ioc);
840 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
841 bfa_fsm_send_event(ioc, IOC_E_DISABLED);
842 break;
844 case IOCPF_E_STOP:
845 bfa_iocpf_timer_stop(ioc);
846 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
847 break;
849 default:
850 bfa_sm_fault(ioc, event);
855 * Request for semaphore.
857 static void
858 bfa_iocpf_sm_semwait_entry(struct bfa_iocpf_s *iocpf)
860 bfa_ioc_hw_sem_get(iocpf->ioc);
864 * Awaiting semaphore for h/w initialzation.
866 static void
867 bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
869 struct bfa_ioc_s *ioc = iocpf->ioc;
871 bfa_trc(ioc, event);
873 switch (event) {
874 case IOCPF_E_SEMLOCKED:
875 if (bfa_ioc_sync_complete(ioc)) {
876 bfa_ioc_sync_join(ioc);
877 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
878 } else {
879 writel(1, ioc->ioc_regs.ioc_sem_reg);
880 bfa_sem_timer_start(ioc);
882 break;
884 case IOCPF_E_SEM_ERROR:
885 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
886 bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
887 break;
889 case IOCPF_E_DISABLE:
890 bfa_sem_timer_stop(ioc);
891 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
892 break;
894 default:
895 bfa_sm_fault(ioc, event);
899 static void
900 bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf)
902 iocpf->poll_time = 0;
903 bfa_ioc_hwinit(iocpf->ioc, BFA_FALSE);
907 * Hardware is being initialized. Interrupts are enabled.
908 * Holding hardware semaphore lock.
910 static void
911 bfa_iocpf_sm_hwinit(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
913 struct bfa_ioc_s *ioc = iocpf->ioc;
915 bfa_trc(ioc, event);
917 switch (event) {
918 case IOCPF_E_FWREADY:
919 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
920 break;
922 case IOCPF_E_TIMEOUT:
923 writel(1, ioc->ioc_regs.ioc_sem_reg);
924 bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
925 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
926 break;
928 case IOCPF_E_DISABLE:
929 bfa_iocpf_timer_stop(ioc);
930 bfa_ioc_sync_leave(ioc);
931 writel(1, ioc->ioc_regs.ioc_sem_reg);
932 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
933 break;
935 default:
936 bfa_sm_fault(ioc, event);
940 static void
941 bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf)
943 bfa_iocpf_timer_start(iocpf->ioc);
945 * Enable Interrupts before sending fw IOC ENABLE cmd.
947 iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa);
948 bfa_ioc_send_enable(iocpf->ioc);
952 * Host IOC function is being enabled, awaiting response from firmware.
953 * Semaphore is acquired.
955 static void
956 bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
958 struct bfa_ioc_s *ioc = iocpf->ioc;
960 bfa_trc(ioc, event);
962 switch (event) {
963 case IOCPF_E_FWRSP_ENABLE:
964 bfa_iocpf_timer_stop(ioc);
965 writel(1, ioc->ioc_regs.ioc_sem_reg);
966 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
967 break;
969 case IOCPF_E_INITFAIL:
970 bfa_iocpf_timer_stop(ioc);
971 fallthrough;
973 case IOCPF_E_TIMEOUT:
974 writel(1, ioc->ioc_regs.ioc_sem_reg);
975 if (event == IOCPF_E_TIMEOUT)
976 bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
977 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
978 break;
980 case IOCPF_E_DISABLE:
981 bfa_iocpf_timer_stop(ioc);
982 writel(1, ioc->ioc_regs.ioc_sem_reg);
983 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
984 break;
986 default:
987 bfa_sm_fault(ioc, event);
991 static void
992 bfa_iocpf_sm_ready_entry(struct bfa_iocpf_s *iocpf)
994 bfa_fsm_send_event(iocpf->ioc, IOC_E_ENABLED);
997 static void
998 bfa_iocpf_sm_ready(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1000 struct bfa_ioc_s *ioc = iocpf->ioc;
1002 bfa_trc(ioc, event);
1004 switch (event) {
1005 case IOCPF_E_DISABLE:
1006 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
1007 break;
1009 case IOCPF_E_GETATTRFAIL:
1010 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
1011 break;
1013 case IOCPF_E_FAIL:
1014 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
1015 break;
1017 default:
1018 bfa_sm_fault(ioc, event);
1022 static void
1023 bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s *iocpf)
1025 bfa_iocpf_timer_start(iocpf->ioc);
1026 bfa_ioc_send_disable(iocpf->ioc);
1030 * IOC is being disabled
1032 static void
1033 bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1035 struct bfa_ioc_s *ioc = iocpf->ioc;
1037 bfa_trc(ioc, event);
1039 switch (event) {
1040 case IOCPF_E_FWRSP_DISABLE:
1041 bfa_iocpf_timer_stop(ioc);
1042 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1043 break;
1045 case IOCPF_E_FAIL:
1046 bfa_iocpf_timer_stop(ioc);
1047 fallthrough;
1049 case IOCPF_E_TIMEOUT:
1050 bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
1051 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1052 break;
1054 case IOCPF_E_FWRSP_ENABLE:
1055 break;
1057 default:
1058 bfa_sm_fault(ioc, event);
1062 static void
1063 bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf_s *iocpf)
1065 bfa_ioc_hw_sem_get(iocpf->ioc);
1069 * IOC hb ack request is being removed.
1071 static void
1072 bfa_iocpf_sm_disabling_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1074 struct bfa_ioc_s *ioc = iocpf->ioc;
1076 bfa_trc(ioc, event);
1078 switch (event) {
1079 case IOCPF_E_SEMLOCKED:
1080 bfa_ioc_sync_leave(ioc);
1081 writel(1, ioc->ioc_regs.ioc_sem_reg);
1082 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1083 break;
1085 case IOCPF_E_SEM_ERROR:
1086 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1087 bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1088 break;
1090 case IOCPF_E_FAIL:
1091 break;
1093 default:
1094 bfa_sm_fault(ioc, event);
1099 * IOC disable completion entry.
1101 static void
1102 bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s *iocpf)
1104 bfa_ioc_mbox_flush(iocpf->ioc);
1105 bfa_fsm_send_event(iocpf->ioc, IOC_E_DISABLED);
1108 static void
1109 bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1111 struct bfa_ioc_s *ioc = iocpf->ioc;
1113 bfa_trc(ioc, event);
1115 switch (event) {
1116 case IOCPF_E_ENABLE:
1117 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1118 break;
1120 case IOCPF_E_STOP:
1121 bfa_ioc_firmware_unlock(ioc);
1122 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1123 break;
1125 default:
1126 bfa_sm_fault(ioc, event);
1130 static void
1131 bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf_s *iocpf)
1133 bfa_ioc_debug_save_ftrc(iocpf->ioc);
1134 bfa_ioc_hw_sem_get(iocpf->ioc);
1138 * Hardware initialization failed.
1140 static void
1141 bfa_iocpf_sm_initfail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1143 struct bfa_ioc_s *ioc = iocpf->ioc;
1145 bfa_trc(ioc, event);
1147 switch (event) {
1148 case IOCPF_E_SEMLOCKED:
1149 bfa_ioc_notify_fail(ioc);
1150 bfa_ioc_sync_leave(ioc);
1151 bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
1152 writel(1, ioc->ioc_regs.ioc_sem_reg);
1153 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
1154 break;
1156 case IOCPF_E_SEM_ERROR:
1157 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1158 bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1159 break;
1161 case IOCPF_E_DISABLE:
1162 bfa_sem_timer_stop(ioc);
1163 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1164 break;
1166 case IOCPF_E_STOP:
1167 bfa_sem_timer_stop(ioc);
1168 bfa_ioc_firmware_unlock(ioc);
1169 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1170 break;
1172 case IOCPF_E_FAIL:
1173 break;
1175 default:
1176 bfa_sm_fault(ioc, event);
1180 static void
1181 bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf)
1183 bfa_trc(iocpf->ioc, 0);
1187 * Hardware initialization failed.
1189 static void
1190 bfa_iocpf_sm_initfail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1192 struct bfa_ioc_s *ioc = iocpf->ioc;
1194 bfa_trc(ioc, event);
1196 switch (event) {
1197 case IOCPF_E_DISABLE:
1198 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1199 break;
1201 case IOCPF_E_STOP:
1202 bfa_ioc_firmware_unlock(ioc);
1203 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1204 break;
1206 default:
1207 bfa_sm_fault(ioc, event);
1211 static void
1212 bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf_s *iocpf)
1215 * Mark IOC as failed in hardware and stop firmware.
1217 bfa_ioc_lpu_stop(iocpf->ioc);
1220 * Flush any queued up mailbox requests.
1222 bfa_ioc_mbox_flush(iocpf->ioc);
1224 bfa_ioc_hw_sem_get(iocpf->ioc);
1227 static void
1228 bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1230 struct bfa_ioc_s *ioc = iocpf->ioc;
1232 bfa_trc(ioc, event);
1234 switch (event) {
1235 case IOCPF_E_SEMLOCKED:
1236 bfa_ioc_sync_ack(ioc);
1237 bfa_ioc_notify_fail(ioc);
1238 if (!iocpf->auto_recover) {
1239 bfa_ioc_sync_leave(ioc);
1240 bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
1241 writel(1, ioc->ioc_regs.ioc_sem_reg);
1242 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1243 } else {
1244 if (bfa_ioc_sync_complete(ioc))
1245 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
1246 else {
1247 writel(1, ioc->ioc_regs.ioc_sem_reg);
1248 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1251 break;
1253 case IOCPF_E_SEM_ERROR:
1254 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1255 bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1256 break;
1258 case IOCPF_E_DISABLE:
1259 bfa_sem_timer_stop(ioc);
1260 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1261 break;
1263 case IOCPF_E_FAIL:
1264 break;
1266 default:
1267 bfa_sm_fault(ioc, event);
1271 static void
1272 bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf)
1274 bfa_trc(iocpf->ioc, 0);
1278 * IOC is in failed state.
1280 static void
1281 bfa_iocpf_sm_fail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1283 struct bfa_ioc_s *ioc = iocpf->ioc;
1285 bfa_trc(ioc, event);
1287 switch (event) {
1288 case IOCPF_E_DISABLE:
1289 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1290 break;
1292 default:
1293 bfa_sm_fault(ioc, event);
1298 * BFA IOC private functions
1302 * Notify common modules registered for notification.
1304 static void
1305 bfa_ioc_event_notify(struct bfa_ioc_s *ioc, enum bfa_ioc_event_e event)
1307 struct bfa_ioc_notify_s *notify;
1308 struct list_head *qe;
1310 list_for_each(qe, &ioc->notify_q) {
1311 notify = (struct bfa_ioc_notify_s *)qe;
1312 notify->cbfn(notify->cbarg, event);
1316 static void
1317 bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
1319 ioc->cbfn->disable_cbfn(ioc->bfa);
1320 bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED);
1323 bfa_boolean_t
1324 bfa_ioc_sem_get(void __iomem *sem_reg)
1326 u32 r32;
1327 int cnt = 0;
1328 #define BFA_SEM_SPINCNT 3000
1330 r32 = readl(sem_reg);
1332 while ((r32 & 1) && (cnt < BFA_SEM_SPINCNT)) {
1333 cnt++;
1334 udelay(2);
1335 r32 = readl(sem_reg);
1338 if (!(r32 & 1))
1339 return BFA_TRUE;
1341 return BFA_FALSE;
1344 static void
1345 bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
1347 u32 r32;
1350 * First read to the semaphore register will return 0, subsequent reads
1351 * will return 1. Semaphore is released by writing 1 to the register
1353 r32 = readl(ioc->ioc_regs.ioc_sem_reg);
1354 if (r32 == ~0) {
1355 WARN_ON(r32 == ~0);
1356 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEM_ERROR);
1357 return;
1359 if (!(r32 & 1)) {
1360 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
1361 return;
1364 bfa_sem_timer_start(ioc);
1368 * Initialize LPU local memory (aka secondary memory / SRAM)
1370 static void
1371 bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
1373 u32 pss_ctl;
1374 int i;
1375 #define PSS_LMEM_INIT_TIME 10000
1377 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1378 pss_ctl &= ~__PSS_LMEM_RESET;
1379 pss_ctl |= __PSS_LMEM_INIT_EN;
1382 * i2c workaround 12.5khz clock
1384 pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
1385 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1388 * wait for memory initialization to be complete
1390 i = 0;
1391 do {
1392 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1393 i++;
1394 } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
1397 * If memory initialization is not successful, IOC timeout will catch
1398 * such failures.
1400 WARN_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
1401 bfa_trc(ioc, pss_ctl);
1403 pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
1404 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1407 static void
1408 bfa_ioc_lpu_start(struct bfa_ioc_s *ioc)
1410 u32 pss_ctl;
1413 * Take processor out of reset.
1415 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1416 pss_ctl &= ~__PSS_LPU0_RESET;
1418 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1421 static void
1422 bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
1424 u32 pss_ctl;
1427 * Put processors in reset.
1429 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1430 pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
1432 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1436 * Get driver and firmware versions.
1438 void
1439 bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
1441 u32 pgnum;
1442 u32 loff = 0;
1443 int i;
1444 u32 *fwsig = (u32 *) fwhdr;
1446 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1447 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1449 for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32));
1450 i++) {
1451 fwsig[i] =
1452 bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
1453 loff += sizeof(u32);
1458 * Returns TRUE if driver is willing to work with current smem f/w version.
1460 bfa_boolean_t
1461 bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc,
1462 struct bfi_ioc_image_hdr_s *smem_fwhdr)
1464 struct bfi_ioc_image_hdr_s *drv_fwhdr;
1465 enum bfi_ioc_img_ver_cmp_e smem_flash_cmp, drv_smem_cmp;
1467 drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
1468 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
1471 * If smem is incompatible or old, driver should not work with it.
1473 drv_smem_cmp = bfa_ioc_fw_ver_patch_cmp(drv_fwhdr, smem_fwhdr);
1474 if (drv_smem_cmp == BFI_IOC_IMG_VER_INCOMP ||
1475 drv_smem_cmp == BFI_IOC_IMG_VER_OLD) {
1476 return BFA_FALSE;
1480 * IF Flash has a better F/W than smem do not work with smem.
1481 * If smem f/w == flash f/w, as smem f/w not old | incmp, work with it.
1482 * If Flash is old or incomp work with smem iff smem f/w == drv f/w.
1484 smem_flash_cmp = bfa_ioc_flash_fwver_cmp(ioc, smem_fwhdr);
1486 if (smem_flash_cmp == BFI_IOC_IMG_VER_BETTER) {
1487 return BFA_FALSE;
1488 } else if (smem_flash_cmp == BFI_IOC_IMG_VER_SAME) {
1489 return BFA_TRUE;
1490 } else {
1491 return (drv_smem_cmp == BFI_IOC_IMG_VER_SAME) ?
1492 BFA_TRUE : BFA_FALSE;
1497 * Return true if current running version is valid. Firmware signature and
1498 * execution context (driver/bios) must match.
1500 static bfa_boolean_t
1501 bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
1503 struct bfi_ioc_image_hdr_s fwhdr;
1505 bfa_ioc_fwver_get(ioc, &fwhdr);
1507 if (swab32(fwhdr.bootenv) != boot_env) {
1508 bfa_trc(ioc, fwhdr.bootenv);
1509 bfa_trc(ioc, boot_env);
1510 return BFA_FALSE;
1513 return bfa_ioc_fwver_cmp(ioc, &fwhdr);
1516 static bfa_boolean_t
1517 bfa_ioc_fwver_md5_check(struct bfi_ioc_image_hdr_s *fwhdr_1,
1518 struct bfi_ioc_image_hdr_s *fwhdr_2)
1520 int i;
1522 for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++)
1523 if (fwhdr_1->md5sum[i] != fwhdr_2->md5sum[i])
1524 return BFA_FALSE;
1526 return BFA_TRUE;
1530 * Returns TRUE if major minor and maintainence are same.
1531 * If patch versions are same, check for MD5 Checksum to be same.
1533 static bfa_boolean_t
1534 bfa_ioc_fw_ver_compatible(struct bfi_ioc_image_hdr_s *drv_fwhdr,
1535 struct bfi_ioc_image_hdr_s *fwhdr_to_cmp)
1537 if (drv_fwhdr->signature != fwhdr_to_cmp->signature)
1538 return BFA_FALSE;
1540 if (drv_fwhdr->fwver.major != fwhdr_to_cmp->fwver.major)
1541 return BFA_FALSE;
1543 if (drv_fwhdr->fwver.minor != fwhdr_to_cmp->fwver.minor)
1544 return BFA_FALSE;
1546 if (drv_fwhdr->fwver.maint != fwhdr_to_cmp->fwver.maint)
1547 return BFA_FALSE;
1549 if (drv_fwhdr->fwver.patch == fwhdr_to_cmp->fwver.patch &&
1550 drv_fwhdr->fwver.phase == fwhdr_to_cmp->fwver.phase &&
1551 drv_fwhdr->fwver.build == fwhdr_to_cmp->fwver.build) {
1552 return bfa_ioc_fwver_md5_check(drv_fwhdr, fwhdr_to_cmp);
1555 return BFA_TRUE;
1558 static bfa_boolean_t
1559 bfa_ioc_flash_fwver_valid(struct bfi_ioc_image_hdr_s *flash_fwhdr)
1561 if (flash_fwhdr->fwver.major == 0 || flash_fwhdr->fwver.major == 0xFF)
1562 return BFA_FALSE;
1564 return BFA_TRUE;
1567 static bfa_boolean_t fwhdr_is_ga(struct bfi_ioc_image_hdr_s *fwhdr)
1569 if (fwhdr->fwver.phase == 0 &&
1570 fwhdr->fwver.build == 0)
1571 return BFA_TRUE;
1573 return BFA_FALSE;
1577 * Returns TRUE if both are compatible and patch of fwhdr_to_cmp is better.
1579 static enum bfi_ioc_img_ver_cmp_e
1580 bfa_ioc_fw_ver_patch_cmp(struct bfi_ioc_image_hdr_s *base_fwhdr,
1581 struct bfi_ioc_image_hdr_s *fwhdr_to_cmp)
1583 if (bfa_ioc_fw_ver_compatible(base_fwhdr, fwhdr_to_cmp) == BFA_FALSE)
1584 return BFI_IOC_IMG_VER_INCOMP;
1586 if (fwhdr_to_cmp->fwver.patch > base_fwhdr->fwver.patch)
1587 return BFI_IOC_IMG_VER_BETTER;
1589 else if (fwhdr_to_cmp->fwver.patch < base_fwhdr->fwver.patch)
1590 return BFI_IOC_IMG_VER_OLD;
1593 * GA takes priority over internal builds of the same patch stream.
1594 * At this point major minor maint and patch numbers are same.
1597 if (fwhdr_is_ga(base_fwhdr) == BFA_TRUE) {
1598 if (fwhdr_is_ga(fwhdr_to_cmp))
1599 return BFI_IOC_IMG_VER_SAME;
1600 else
1601 return BFI_IOC_IMG_VER_OLD;
1602 } else {
1603 if (fwhdr_is_ga(fwhdr_to_cmp))
1604 return BFI_IOC_IMG_VER_BETTER;
1607 if (fwhdr_to_cmp->fwver.phase > base_fwhdr->fwver.phase)
1608 return BFI_IOC_IMG_VER_BETTER;
1609 else if (fwhdr_to_cmp->fwver.phase < base_fwhdr->fwver.phase)
1610 return BFI_IOC_IMG_VER_OLD;
1612 if (fwhdr_to_cmp->fwver.build > base_fwhdr->fwver.build)
1613 return BFI_IOC_IMG_VER_BETTER;
1614 else if (fwhdr_to_cmp->fwver.build < base_fwhdr->fwver.build)
1615 return BFI_IOC_IMG_VER_OLD;
1618 * All Version Numbers are equal.
1619 * Md5 check to be done as a part of compatibility check.
1621 return BFI_IOC_IMG_VER_SAME;
1624 #define BFA_FLASH_PART_FWIMG_ADDR 0x100000 /* fw image address */
1626 bfa_status_t
1627 bfa_ioc_flash_img_get_chnk(struct bfa_ioc_s *ioc, u32 off,
1628 u32 *fwimg)
1630 return bfa_flash_raw_read(ioc->pcidev.pci_bar_kva,
1631 BFA_FLASH_PART_FWIMG_ADDR + (off * sizeof(u32)),
1632 (char *)fwimg, BFI_FLASH_CHUNK_SZ);
1635 static enum bfi_ioc_img_ver_cmp_e
1636 bfa_ioc_flash_fwver_cmp(struct bfa_ioc_s *ioc,
1637 struct bfi_ioc_image_hdr_s *base_fwhdr)
1639 struct bfi_ioc_image_hdr_s *flash_fwhdr;
1640 bfa_status_t status;
1641 u32 fwimg[BFI_FLASH_CHUNK_SZ_WORDS];
1643 status = bfa_ioc_flash_img_get_chnk(ioc, 0, fwimg);
1644 if (status != BFA_STATUS_OK)
1645 return BFI_IOC_IMG_VER_INCOMP;
1647 flash_fwhdr = (struct bfi_ioc_image_hdr_s *) fwimg;
1648 if (bfa_ioc_flash_fwver_valid(flash_fwhdr) == BFA_TRUE)
1649 return bfa_ioc_fw_ver_patch_cmp(base_fwhdr, flash_fwhdr);
1650 else
1651 return BFI_IOC_IMG_VER_INCOMP;
1656 * Invalidate fwver signature
1658 bfa_status_t
1659 bfa_ioc_fwsig_invalidate(struct bfa_ioc_s *ioc)
1662 u32 pgnum;
1663 u32 loff = 0;
1664 enum bfi_ioc_state ioc_fwstate;
1666 ioc_fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
1667 if (!bfa_ioc_state_disabled(ioc_fwstate))
1668 return BFA_STATUS_ADAPTER_ENABLED;
1670 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1671 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1672 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, BFA_IOC_FW_INV_SIGN);
1674 return BFA_STATUS_OK;
1678 * Conditionally flush any pending message from firmware at start.
1680 static void
1681 bfa_ioc_msgflush(struct bfa_ioc_s *ioc)
1683 u32 r32;
1685 r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
1686 if (r32)
1687 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1690 static void
1691 bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1693 enum bfi_ioc_state ioc_fwstate;
1694 bfa_boolean_t fwvalid;
1695 u32 boot_type;
1696 u32 boot_env;
1698 ioc_fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
1700 if (force)
1701 ioc_fwstate = BFI_IOC_UNINIT;
1703 bfa_trc(ioc, ioc_fwstate);
1705 boot_type = BFI_FWBOOT_TYPE_NORMAL;
1706 boot_env = BFI_FWBOOT_ENV_OS;
1709 * check if firmware is valid
1711 fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
1712 BFA_FALSE : bfa_ioc_fwver_valid(ioc, boot_env);
1714 if (!fwvalid) {
1715 if (bfa_ioc_boot(ioc, boot_type, boot_env) == BFA_STATUS_OK)
1716 bfa_ioc_poll_fwinit(ioc);
1717 return;
1721 * If hardware initialization is in progress (initialized by other IOC),
1722 * just wait for an initialization completion interrupt.
1724 if (ioc_fwstate == BFI_IOC_INITING) {
1725 bfa_ioc_poll_fwinit(ioc);
1726 return;
1730 * If IOC function is disabled and firmware version is same,
1731 * just re-enable IOC.
1733 * If option rom, IOC must not be in operational state. With
1734 * convergence, IOC will be in operational state when 2nd driver
1735 * is loaded.
1737 if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
1740 * When using MSI-X any pending firmware ready event should
1741 * be flushed. Otherwise MSI-X interrupts are not delivered.
1743 bfa_ioc_msgflush(ioc);
1744 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
1745 return;
1749 * Initialize the h/w for any other states.
1751 if (bfa_ioc_boot(ioc, boot_type, boot_env) == BFA_STATUS_OK)
1752 bfa_ioc_poll_fwinit(ioc);
1755 static void
1756 bfa_ioc_timeout(void *ioc_arg)
1758 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
1760 bfa_trc(ioc, 0);
1761 bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
1764 void
1765 bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len)
1767 u32 *msgp = (u32 *) ioc_msg;
1768 u32 i;
1770 bfa_trc(ioc, msgp[0]);
1771 bfa_trc(ioc, len);
1773 WARN_ON(len > BFI_IOC_MSGLEN_MAX);
1776 * first write msg to mailbox registers
1778 for (i = 0; i < len / sizeof(u32); i++)
1779 writel(cpu_to_le32(msgp[i]),
1780 ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1782 for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
1783 writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1786 * write 1 to mailbox CMD to trigger LPU event
1788 writel(1, ioc->ioc_regs.hfn_mbox_cmd);
1789 (void) readl(ioc->ioc_regs.hfn_mbox_cmd);
1792 static void
1793 bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
1795 struct bfi_ioc_ctrl_req_s enable_req;
1797 bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1798 bfa_ioc_portid(ioc));
1799 enable_req.clscode = cpu_to_be16(ioc->clscode);
1800 /* unsigned 32-bit time_t overflow in y2106 */
1801 enable_req.tv_sec = be32_to_cpu(ktime_get_real_seconds());
1802 bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1805 static void
1806 bfa_ioc_send_disable(struct bfa_ioc_s *ioc)
1808 struct bfi_ioc_ctrl_req_s disable_req;
1810 bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1811 bfa_ioc_portid(ioc));
1812 disable_req.clscode = cpu_to_be16(ioc->clscode);
1813 /* unsigned 32-bit time_t overflow in y2106 */
1814 disable_req.tv_sec = be32_to_cpu(ktime_get_real_seconds());
1815 bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1818 static void
1819 bfa_ioc_send_getattr(struct bfa_ioc_s *ioc)
1821 struct bfi_ioc_getattr_req_s attr_req;
1823 bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
1824 bfa_ioc_portid(ioc));
1825 bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
1826 bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
1829 static void
1830 bfa_ioc_hb_check(void *cbarg)
1832 struct bfa_ioc_s *ioc = cbarg;
1833 u32 hb_count;
1835 hb_count = readl(ioc->ioc_regs.heartbeat);
1836 if (ioc->hb_count == hb_count) {
1837 bfa_ioc_recover(ioc);
1838 return;
1839 } else {
1840 ioc->hb_count = hb_count;
1843 bfa_ioc_mbox_poll(ioc);
1844 bfa_hb_timer_start(ioc);
1847 static void
1848 bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc)
1850 ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
1851 bfa_hb_timer_start(ioc);
1855 * Initiate a full firmware download.
1857 static bfa_status_t
1858 bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
1859 u32 boot_env)
1861 u32 *fwimg;
1862 u32 pgnum;
1863 u32 loff = 0;
1864 u32 chunkno = 0;
1865 u32 i;
1866 u32 asicmode;
1867 u32 fwimg_size;
1868 u32 fwimg_buf[BFI_FLASH_CHUNK_SZ_WORDS];
1869 bfa_status_t status;
1871 if (boot_env == BFI_FWBOOT_ENV_OS &&
1872 boot_type == BFI_FWBOOT_TYPE_FLASH) {
1873 fwimg_size = BFI_FLASH_IMAGE_SZ/sizeof(u32);
1875 status = bfa_ioc_flash_img_get_chnk(ioc,
1876 BFA_IOC_FLASH_CHUNK_ADDR(chunkno), fwimg_buf);
1877 if (status != BFA_STATUS_OK)
1878 return status;
1880 fwimg = fwimg_buf;
1881 } else {
1882 fwimg_size = bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc));
1883 fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc),
1884 BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1887 bfa_trc(ioc, fwimg_size);
1890 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1891 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1893 for (i = 0; i < fwimg_size; i++) {
1895 if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
1896 chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
1898 if (boot_env == BFI_FWBOOT_ENV_OS &&
1899 boot_type == BFI_FWBOOT_TYPE_FLASH) {
1900 status = bfa_ioc_flash_img_get_chnk(ioc,
1901 BFA_IOC_FLASH_CHUNK_ADDR(chunkno),
1902 fwimg_buf);
1903 if (status != BFA_STATUS_OK)
1904 return status;
1906 fwimg = fwimg_buf;
1907 } else {
1908 fwimg = bfa_cb_image_get_chunk(
1909 bfa_ioc_asic_gen(ioc),
1910 BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1915 * write smem
1917 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
1918 fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]);
1920 loff += sizeof(u32);
1923 * handle page offset wrap around
1925 loff = PSS_SMEM_PGOFF(loff);
1926 if (loff == 0) {
1927 pgnum++;
1928 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1932 writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1933 ioc->ioc_regs.host_page_num_fn);
1936 * Set boot type, env and device mode at the end.
1938 if (boot_env == BFI_FWBOOT_ENV_OS &&
1939 boot_type == BFI_FWBOOT_TYPE_FLASH) {
1940 boot_type = BFI_FWBOOT_TYPE_NORMAL;
1942 asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode,
1943 ioc->port0_mode, ioc->port1_mode);
1944 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_DEVMODE_OFF,
1945 swab32(asicmode));
1946 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_TYPE_OFF,
1947 swab32(boot_type));
1948 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_ENV_OFF,
1949 swab32(boot_env));
1950 return BFA_STATUS_OK;
1955 * Update BFA configuration from firmware configuration.
1957 static void
1958 bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
1960 struct bfi_ioc_attr_s *attr = ioc->attr;
1962 attr->adapter_prop = be32_to_cpu(attr->adapter_prop);
1963 attr->card_type = be32_to_cpu(attr->card_type);
1964 attr->maxfrsize = be16_to_cpu(attr->maxfrsize);
1965 ioc->fcmode = (attr->port_mode == BFI_PORT_MODE_FC);
1966 attr->mfg_year = be16_to_cpu(attr->mfg_year);
1968 bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
1972 * Attach time initialization of mbox logic.
1974 static void
1975 bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc)
1977 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1978 int mc;
1980 INIT_LIST_HEAD(&mod->cmd_q);
1981 for (mc = 0; mc < BFI_MC_MAX; mc++) {
1982 mod->mbhdlr[mc].cbfn = NULL;
1983 mod->mbhdlr[mc].cbarg = ioc->bfa;
1988 * Mbox poll timer -- restarts any pending mailbox requests.
1990 static void
1991 bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc)
1993 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1994 struct bfa_mbox_cmd_s *cmd;
1995 u32 stat;
1998 * If no command pending, do nothing
2000 if (list_empty(&mod->cmd_q))
2001 return;
2004 * If previous command is not yet fetched by firmware, do nothing
2006 stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
2007 if (stat)
2008 return;
2011 * Enqueue command to firmware.
2013 bfa_q_deq(&mod->cmd_q, &cmd);
2014 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
2018 * Cleanup any pending requests.
2020 static void
2021 bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc)
2023 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
2024 struct bfa_mbox_cmd_s *cmd;
2026 while (!list_empty(&mod->cmd_q))
2027 bfa_q_deq(&mod->cmd_q, &cmd);
2031 * Read data from SMEM to host through PCI memmap
2033 * @param[in] ioc memory for IOC
2034 * @param[in] tbuf app memory to store data from smem
2035 * @param[in] soff smem offset
2036 * @param[in] sz size of smem in bytes
2038 static bfa_status_t
2039 bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz)
2041 u32 pgnum, loff;
2042 __be32 r32;
2043 int i, len;
2044 u32 *buf = tbuf;
2046 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
2047 loff = PSS_SMEM_PGOFF(soff);
2048 bfa_trc(ioc, pgnum);
2049 bfa_trc(ioc, loff);
2050 bfa_trc(ioc, sz);
2053 * Hold semaphore to serialize pll init and fwtrc.
2055 if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
2056 bfa_trc(ioc, 0);
2057 return BFA_STATUS_FAILED;
2060 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
2062 len = sz/sizeof(u32);
2063 bfa_trc(ioc, len);
2064 for (i = 0; i < len; i++) {
2065 r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
2066 buf[i] = swab32(r32);
2067 loff += sizeof(u32);
2070 * handle page offset wrap around
2072 loff = PSS_SMEM_PGOFF(loff);
2073 if (loff == 0) {
2074 pgnum++;
2075 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
2078 writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
2079 ioc->ioc_regs.host_page_num_fn);
2081 * release semaphore.
2083 readl(ioc->ioc_regs.ioc_init_sem_reg);
2084 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
2086 bfa_trc(ioc, pgnum);
2087 return BFA_STATUS_OK;
2091 * Clear SMEM data from host through PCI memmap
2093 * @param[in] ioc memory for IOC
2094 * @param[in] soff smem offset
2095 * @param[in] sz size of smem in bytes
2097 static bfa_status_t
2098 bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
2100 int i, len;
2101 u32 pgnum, loff;
2103 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
2104 loff = PSS_SMEM_PGOFF(soff);
2105 bfa_trc(ioc, pgnum);
2106 bfa_trc(ioc, loff);
2107 bfa_trc(ioc, sz);
2110 * Hold semaphore to serialize pll init and fwtrc.
2112 if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
2113 bfa_trc(ioc, 0);
2114 return BFA_STATUS_FAILED;
2117 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
2119 len = sz/sizeof(u32); /* len in words */
2120 bfa_trc(ioc, len);
2121 for (i = 0; i < len; i++) {
2122 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 0);
2123 loff += sizeof(u32);
2126 * handle page offset wrap around
2128 loff = PSS_SMEM_PGOFF(loff);
2129 if (loff == 0) {
2130 pgnum++;
2131 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
2134 writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
2135 ioc->ioc_regs.host_page_num_fn);
2138 * release semaphore.
2140 readl(ioc->ioc_regs.ioc_init_sem_reg);
2141 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
2142 bfa_trc(ioc, pgnum);
2143 return BFA_STATUS_OK;
2146 static void
2147 bfa_ioc_fail_notify(struct bfa_ioc_s *ioc)
2149 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
2152 * Notify driver and common modules registered for notification.
2154 ioc->cbfn->hbfail_cbfn(ioc->bfa);
2155 bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED);
2157 bfa_ioc_debug_save_ftrc(ioc);
2159 BFA_LOG(KERN_CRIT, bfad, bfa_log_level,
2160 "Heart Beat of IOC has failed\n");
2161 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_HBFAIL);
2165 static void
2166 bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc)
2168 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
2170 * Provide enable completion callback.
2172 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
2173 BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
2174 "Running firmware version is incompatible "
2175 "with the driver version\n");
2176 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_FWMISMATCH);
2179 bfa_status_t
2180 bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
2184 * Hold semaphore so that nobody can access the chip during init.
2186 bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
2188 bfa_ioc_pll_init_asic(ioc);
2190 ioc->pllinit = BFA_TRUE;
2193 * Initialize LMEM
2195 bfa_ioc_lmem_init(ioc);
2198 * release semaphore.
2200 readl(ioc->ioc_regs.ioc_init_sem_reg);
2201 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
2203 return BFA_STATUS_OK;
2207 * Interface used by diag module to do firmware boot with memory test
2208 * as the entry vector.
2210 bfa_status_t
2211 bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)
2213 struct bfi_ioc_image_hdr_s *drv_fwhdr;
2214 bfa_status_t status;
2215 bfa_ioc_stats(ioc, ioc_boots);
2217 if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
2218 return BFA_STATUS_FAILED;
2220 if (boot_env == BFI_FWBOOT_ENV_OS &&
2221 boot_type == BFI_FWBOOT_TYPE_NORMAL) {
2223 drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
2224 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
2227 * Work with Flash iff flash f/w is better than driver f/w.
2228 * Otherwise push drivers firmware.
2230 if (bfa_ioc_flash_fwver_cmp(ioc, drv_fwhdr) ==
2231 BFI_IOC_IMG_VER_BETTER)
2232 boot_type = BFI_FWBOOT_TYPE_FLASH;
2236 * Initialize IOC state of all functions on a chip reset.
2238 if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) {
2239 bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_MEMTEST);
2240 bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_MEMTEST);
2241 } else {
2242 bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_INITING);
2243 bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_INITING);
2246 bfa_ioc_msgflush(ioc);
2247 status = bfa_ioc_download_fw(ioc, boot_type, boot_env);
2248 if (status == BFA_STATUS_OK)
2249 bfa_ioc_lpu_start(ioc);
2250 else {
2251 WARN_ON(boot_type == BFI_FWBOOT_TYPE_MEMTEST);
2252 bfa_iocpf_timeout(ioc);
2254 return status;
2257 bfa_boolean_t
2258 bfa_ioc_is_operational(struct bfa_ioc_s *ioc)
2260 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
2263 bfa_boolean_t
2264 bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
2266 __be32 *msgp = mbmsg;
2267 u32 r32;
2268 int i;
2270 r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
2271 if ((r32 & 1) == 0)
2272 return BFA_FALSE;
2275 * read the MBOX msg
2277 for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
2278 i++) {
2279 r32 = readl(ioc->ioc_regs.lpu_mbox +
2280 i * sizeof(u32));
2281 msgp[i] = cpu_to_be32(r32);
2285 * turn off mailbox interrupt by clearing mailbox status
2287 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
2288 readl(ioc->ioc_regs.lpu_mbox_cmd);
2290 return BFA_TRUE;
2293 void
2294 bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
2296 union bfi_ioc_i2h_msg_u *msg;
2297 struct bfa_iocpf_s *iocpf = &ioc->iocpf;
2299 msg = (union bfi_ioc_i2h_msg_u *) m;
2301 bfa_ioc_stats(ioc, ioc_isrs);
2303 switch (msg->mh.msg_id) {
2304 case BFI_IOC_I2H_HBEAT:
2305 break;
2307 case BFI_IOC_I2H_ENABLE_REPLY:
2308 ioc->port_mode = ioc->port_mode_cfg =
2309 (enum bfa_mode_s)msg->fw_event.port_mode;
2310 ioc->ad_cap_bm = msg->fw_event.cap_bm;
2311 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
2312 break;
2314 case BFI_IOC_I2H_DISABLE_REPLY:
2315 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
2316 break;
2318 case BFI_IOC_I2H_GETATTR_REPLY:
2319 bfa_ioc_getattr_reply(ioc);
2320 break;
2322 default:
2323 bfa_trc(ioc, msg->mh.msg_id);
2324 WARN_ON(1);
2329 * IOC attach time initialization and setup.
2331 * @param[in] ioc memory for IOC
2332 * @param[in] bfa driver instance structure
2334 void
2335 bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn,
2336 struct bfa_timer_mod_s *timer_mod)
2338 ioc->bfa = bfa;
2339 ioc->cbfn = cbfn;
2340 ioc->timer_mod = timer_mod;
2341 ioc->fcmode = BFA_FALSE;
2342 ioc->pllinit = BFA_FALSE;
2343 ioc->dbg_fwsave_once = BFA_TRUE;
2344 ioc->iocpf.ioc = ioc;
2346 bfa_ioc_mbox_attach(ioc);
2347 INIT_LIST_HEAD(&ioc->notify_q);
2349 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
2350 bfa_fsm_send_event(ioc, IOC_E_RESET);
2354 * Driver detach time IOC cleanup.
2356 void
2357 bfa_ioc_detach(struct bfa_ioc_s *ioc)
2359 bfa_fsm_send_event(ioc, IOC_E_DETACH);
2360 INIT_LIST_HEAD(&ioc->notify_q);
2364 * Setup IOC PCI properties.
2366 * @param[in] pcidev PCI device information for this IOC
2368 void
2369 bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
2370 enum bfi_pcifn_class clscode)
2372 ioc->clscode = clscode;
2373 ioc->pcidev = *pcidev;
2376 * Initialize IOC and device personality
2378 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC;
2379 ioc->asic_mode = BFI_ASIC_MODE_FC;
2381 switch (pcidev->device_id) {
2382 case BFA_PCI_DEVICE_ID_FC_8G1P:
2383 case BFA_PCI_DEVICE_ID_FC_8G2P:
2384 ioc->asic_gen = BFI_ASIC_GEN_CB;
2385 ioc->fcmode = BFA_TRUE;
2386 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2387 ioc->ad_cap_bm = BFA_CM_HBA;
2388 break;
2390 case BFA_PCI_DEVICE_ID_CT:
2391 ioc->asic_gen = BFI_ASIC_GEN_CT;
2392 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
2393 ioc->asic_mode = BFI_ASIC_MODE_ETH;
2394 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA;
2395 ioc->ad_cap_bm = BFA_CM_CNA;
2396 break;
2398 case BFA_PCI_DEVICE_ID_CT_FC:
2399 ioc->asic_gen = BFI_ASIC_GEN_CT;
2400 ioc->fcmode = BFA_TRUE;
2401 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2402 ioc->ad_cap_bm = BFA_CM_HBA;
2403 break;
2405 case BFA_PCI_DEVICE_ID_CT2:
2406 case BFA_PCI_DEVICE_ID_CT2_QUAD:
2407 ioc->asic_gen = BFI_ASIC_GEN_CT2;
2408 if (clscode == BFI_PCIFN_CLASS_FC &&
2409 pcidev->ssid == BFA_PCI_CT2_SSID_FC) {
2410 ioc->asic_mode = BFI_ASIC_MODE_FC16;
2411 ioc->fcmode = BFA_TRUE;
2412 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2413 ioc->ad_cap_bm = BFA_CM_HBA;
2414 } else {
2415 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
2416 ioc->asic_mode = BFI_ASIC_MODE_ETH;
2417 if (pcidev->ssid == BFA_PCI_CT2_SSID_FCoE) {
2418 ioc->port_mode =
2419 ioc->port_mode_cfg = BFA_MODE_CNA;
2420 ioc->ad_cap_bm = BFA_CM_CNA;
2421 } else {
2422 ioc->port_mode =
2423 ioc->port_mode_cfg = BFA_MODE_NIC;
2424 ioc->ad_cap_bm = BFA_CM_NIC;
2427 break;
2429 default:
2430 WARN_ON(1);
2434 * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
2436 if (ioc->asic_gen == BFI_ASIC_GEN_CB)
2437 bfa_ioc_set_cb_hwif(ioc);
2438 else if (ioc->asic_gen == BFI_ASIC_GEN_CT)
2439 bfa_ioc_set_ct_hwif(ioc);
2440 else {
2441 WARN_ON(ioc->asic_gen != BFI_ASIC_GEN_CT2);
2442 bfa_ioc_set_ct2_hwif(ioc);
2443 bfa_ioc_ct2_poweron(ioc);
2446 bfa_ioc_map_port(ioc);
2447 bfa_ioc_reg_init(ioc);
2451 * Initialize IOC dma memory
2453 * @param[in] dm_kva kernel virtual address of IOC dma memory
2454 * @param[in] dm_pa physical address of IOC dma memory
2456 void
2457 bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa)
2460 * dma memory for firmware attribute
2462 ioc->attr_dma.kva = dm_kva;
2463 ioc->attr_dma.pa = dm_pa;
2464 ioc->attr = (struct bfi_ioc_attr_s *) dm_kva;
2467 void
2468 bfa_ioc_enable(struct bfa_ioc_s *ioc)
2470 bfa_ioc_stats(ioc, ioc_enables);
2471 ioc->dbg_fwsave_once = BFA_TRUE;
2473 bfa_fsm_send_event(ioc, IOC_E_ENABLE);
2476 void
2477 bfa_ioc_disable(struct bfa_ioc_s *ioc)
2479 bfa_ioc_stats(ioc, ioc_disables);
2480 bfa_fsm_send_event(ioc, IOC_E_DISABLE);
2483 void
2484 bfa_ioc_suspend(struct bfa_ioc_s *ioc)
2486 ioc->dbg_fwsave_once = BFA_TRUE;
2487 bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2491 * Initialize memory for saving firmware trace. Driver must initialize
2492 * trace memory before call bfa_ioc_enable().
2494 void
2495 bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave)
2497 ioc->dbg_fwsave = dbg_fwsave;
2498 ioc->dbg_fwsave_len = BFA_DBG_FWTRC_LEN;
2502 * Register mailbox message handler functions
2504 * @param[in] ioc IOC instance
2505 * @param[in] mcfuncs message class handler functions
2507 void
2508 bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs)
2510 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
2511 int mc;
2513 for (mc = 0; mc < BFI_MC_MAX; mc++)
2514 mod->mbhdlr[mc].cbfn = mcfuncs[mc];
2518 * Register mailbox message handler function, to be called by common modules
2520 void
2521 bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
2522 bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
2524 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
2526 mod->mbhdlr[mc].cbfn = cbfn;
2527 mod->mbhdlr[mc].cbarg = cbarg;
2531 * Queue a mailbox command request to firmware. Waits if mailbox is busy.
2532 * Responsibility of caller to serialize
2534 * @param[in] ioc IOC instance
2535 * @param[i] cmd Mailbox command
2537 void
2538 bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
2540 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
2541 u32 stat;
2544 * If a previous command is pending, queue new command
2546 if (!list_empty(&mod->cmd_q)) {
2547 list_add_tail(&cmd->qe, &mod->cmd_q);
2548 return;
2552 * If mailbox is busy, queue command for poll timer
2554 stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
2555 if (stat) {
2556 list_add_tail(&cmd->qe, &mod->cmd_q);
2557 return;
2561 * mailbox is free -- queue command to firmware
2563 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
2567 * Handle mailbox interrupts
2569 void
2570 bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc)
2572 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
2573 struct bfi_mbmsg_s m;
2574 int mc;
2576 if (bfa_ioc_msgget(ioc, &m)) {
2578 * Treat IOC message class as special.
2580 mc = m.mh.msg_class;
2581 if (mc == BFI_MC_IOC) {
2582 bfa_ioc_isr(ioc, &m);
2583 return;
2586 if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
2587 return;
2589 mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
2592 bfa_ioc_lpu_read_stat(ioc);
2595 * Try to send pending mailbox commands
2597 bfa_ioc_mbox_poll(ioc);
2600 void
2601 bfa_ioc_error_isr(struct bfa_ioc_s *ioc)
2603 bfa_ioc_stats(ioc, ioc_hbfails);
2604 ioc->stats.hb_count = ioc->hb_count;
2605 bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2609 * return true if IOC is disabled
2611 bfa_boolean_t
2612 bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
2614 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
2615 bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
2619 * return true if IOC firmware is different.
2621 bfa_boolean_t
2622 bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
2624 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset) ||
2625 bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_fwcheck) ||
2626 bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_mismatch);
2630 * Check if adapter is disabled -- both IOCs should be in a disabled
2631 * state.
2633 bfa_boolean_t
2634 bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
2636 u32 ioc_state;
2638 if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
2639 return BFA_FALSE;
2641 ioc_state = bfa_ioc_get_cur_ioc_fwstate(ioc);
2642 if (!bfa_ioc_state_disabled(ioc_state))
2643 return BFA_FALSE;
2645 if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) {
2646 ioc_state = bfa_ioc_get_cur_ioc_fwstate(ioc);
2647 if (!bfa_ioc_state_disabled(ioc_state))
2648 return BFA_FALSE;
2651 return BFA_TRUE;
2655 * Reset IOC fwstate registers.
2657 void
2658 bfa_ioc_reset_fwstate(struct bfa_ioc_s *ioc)
2660 bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_UNINIT);
2661 bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_UNINIT);
2664 #define BFA_MFG_NAME "QLogic"
2665 void
2666 bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
2667 struct bfa_adapter_attr_s *ad_attr)
2669 struct bfi_ioc_attr_s *ioc_attr;
2671 ioc_attr = ioc->attr;
2673 bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
2674 bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
2675 bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
2676 bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
2677 memcpy(&ad_attr->vpd, &ioc_attr->vpd,
2678 sizeof(struct bfa_mfg_vpd_s));
2680 ad_attr->nports = bfa_ioc_get_nports(ioc);
2681 ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
2683 bfa_ioc_get_adapter_model(ioc, ad_attr->model);
2684 /* For now, model descr uses same model string */
2685 bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
2687 ad_attr->card_type = ioc_attr->card_type;
2688 ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
2690 if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
2691 ad_attr->prototype = 1;
2692 else
2693 ad_attr->prototype = 0;
2695 ad_attr->pwwn = ioc->attr->pwwn;
2696 ad_attr->mac = bfa_ioc_get_mac(ioc);
2698 ad_attr->pcie_gen = ioc_attr->pcie_gen;
2699 ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
2700 ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
2701 ad_attr->asic_rev = ioc_attr->asic_rev;
2703 bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
2705 ad_attr->cna_capable = bfa_ioc_is_cna(ioc);
2706 ad_attr->trunk_capable = (ad_attr->nports > 1) &&
2707 !bfa_ioc_is_cna(ioc) && !ad_attr->is_mezz;
2708 ad_attr->mfg_day = ioc_attr->mfg_day;
2709 ad_attr->mfg_month = ioc_attr->mfg_month;
2710 ad_attr->mfg_year = ioc_attr->mfg_year;
2711 memcpy(ad_attr->uuid, ioc_attr->uuid, BFA_ADAPTER_UUID_LEN);
2714 enum bfa_ioc_type_e
2715 bfa_ioc_get_type(struct bfa_ioc_s *ioc)
2717 if (ioc->clscode == BFI_PCIFN_CLASS_ETH)
2718 return BFA_IOC_TYPE_LL;
2720 WARN_ON(ioc->clscode != BFI_PCIFN_CLASS_FC);
2722 return (ioc->attr->port_mode == BFI_PORT_MODE_FC)
2723 ? BFA_IOC_TYPE_FC : BFA_IOC_TYPE_FCoE;
2726 void
2727 bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num)
2729 memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
2730 memcpy((void *)serial_num,
2731 (void *)ioc->attr->brcd_serialnum,
2732 BFA_ADAPTER_SERIAL_NUM_LEN);
2735 void
2736 bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver)
2738 memset((void *)fw_ver, 0, BFA_VERSION_LEN);
2739 memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
2742 void
2743 bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev)
2745 WARN_ON(!chip_rev);
2747 memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
2749 chip_rev[0] = 'R';
2750 chip_rev[1] = 'e';
2751 chip_rev[2] = 'v';
2752 chip_rev[3] = '-';
2753 chip_rev[4] = ioc->attr->asic_rev;
2754 chip_rev[5] = '\0';
2757 void
2758 bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver)
2760 memset((void *)optrom_ver, 0, BFA_VERSION_LEN);
2761 memcpy(optrom_ver, ioc->attr->optrom_version,
2762 BFA_VERSION_LEN);
2765 void
2766 bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
2768 memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
2769 strscpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
2772 void
2773 bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
2775 struct bfi_ioc_attr_s *ioc_attr;
2776 u8 nports = bfa_ioc_get_nports(ioc);
2778 WARN_ON(!model);
2779 memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
2781 ioc_attr = ioc->attr;
2783 if (bfa_asic_id_ct2(ioc->pcidev.device_id) &&
2784 (!bfa_mfg_is_mezz(ioc_attr->card_type)))
2785 snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u-%u%s",
2786 BFA_MFG_NAME, ioc_attr->card_type, nports, "p");
2787 else
2788 snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
2789 BFA_MFG_NAME, ioc_attr->card_type);
2792 enum bfa_ioc_state
2793 bfa_ioc_get_state(struct bfa_ioc_s *ioc)
2795 enum bfa_iocpf_state iocpf_st;
2796 enum bfa_ioc_state ioc_st = bfa_ioc_sm_to_state(ioc_sm_table, ioc->fsm);
2798 if (ioc_st == BFA_IOC_ENABLING ||
2799 ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
2801 iocpf_st = bfa_iocpf_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2803 switch (iocpf_st) {
2804 case BFA_IOCPF_SEMWAIT:
2805 ioc_st = BFA_IOC_SEMWAIT;
2806 break;
2808 case BFA_IOCPF_HWINIT:
2809 ioc_st = BFA_IOC_HWINIT;
2810 break;
2812 case BFA_IOCPF_FWMISMATCH:
2813 ioc_st = BFA_IOC_FWMISMATCH;
2814 break;
2816 case BFA_IOCPF_FAIL:
2817 ioc_st = BFA_IOC_FAIL;
2818 break;
2820 case BFA_IOCPF_INITFAIL:
2821 ioc_st = BFA_IOC_INITFAIL;
2822 break;
2824 default:
2825 break;
2829 return ioc_st;
2832 void
2833 bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
2835 memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s));
2837 ioc_attr->state = bfa_ioc_get_state(ioc);
2838 ioc_attr->port_id = bfa_ioc_portid(ioc);
2839 ioc_attr->port_mode = ioc->port_mode;
2840 ioc_attr->port_mode_cfg = ioc->port_mode_cfg;
2841 ioc_attr->cap_bm = ioc->ad_cap_bm;
2843 ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
2845 bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
2847 ioc_attr->pci_attr.device_id = bfa_ioc_devid(ioc);
2848 ioc_attr->pci_attr.pcifn = bfa_ioc_pcifn(ioc);
2849 ioc_attr->def_fn = (bfa_ioc_pcifn(ioc) == bfa_ioc_portid(ioc));
2850 bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
2853 mac_t
2854 bfa_ioc_get_mac(struct bfa_ioc_s *ioc)
2857 * Check the IOC type and return the appropriate MAC
2859 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE)
2860 return ioc->attr->fcoe_mac;
2861 else
2862 return ioc->attr->mac;
2865 mac_t
2866 bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc)
2868 mac_t m;
2870 m = ioc->attr->mfg_mac;
2871 if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type))
2872 m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
2873 else
2874 bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]),
2875 bfa_ioc_pcifn(ioc));
2877 return m;
2881 * Send AEN notification
2883 void
2884 bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event)
2886 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
2887 struct bfa_aen_entry_s *aen_entry;
2888 enum bfa_ioc_type_e ioc_type;
2890 bfad_get_aen_entry(bfad, aen_entry);
2891 if (!aen_entry)
2892 return;
2894 ioc_type = bfa_ioc_get_type(ioc);
2895 switch (ioc_type) {
2896 case BFA_IOC_TYPE_FC:
2897 aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn;
2898 break;
2899 case BFA_IOC_TYPE_FCoE:
2900 aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn;
2901 aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
2902 break;
2903 case BFA_IOC_TYPE_LL:
2904 aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
2905 break;
2906 default:
2907 WARN_ON(ioc_type != BFA_IOC_TYPE_FC);
2908 break;
2911 /* Send the AEN notification */
2912 aen_entry->aen_data.ioc.ioc_type = ioc_type;
2913 bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq,
2914 BFA_AEN_CAT_IOC, event);
2918 * Retrieve saved firmware trace from a prior IOC failure.
2920 bfa_status_t
2921 bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2923 int tlen;
2925 if (ioc->dbg_fwsave_len == 0)
2926 return BFA_STATUS_ENOFSAVE;
2928 tlen = *trclen;
2929 if (tlen > ioc->dbg_fwsave_len)
2930 tlen = ioc->dbg_fwsave_len;
2932 memcpy(trcdata, ioc->dbg_fwsave, tlen);
2933 *trclen = tlen;
2934 return BFA_STATUS_OK;
2939 * Retrieve saved firmware trace from a prior IOC failure.
2941 bfa_status_t
2942 bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2944 u32 loff = BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc));
2945 int tlen;
2946 bfa_status_t status;
2948 bfa_trc(ioc, *trclen);
2950 tlen = *trclen;
2951 if (tlen > BFA_DBG_FWTRC_LEN)
2952 tlen = BFA_DBG_FWTRC_LEN;
2954 status = bfa_ioc_smem_read(ioc, trcdata, loff, tlen);
2955 *trclen = tlen;
2956 return status;
2959 static void
2960 bfa_ioc_send_fwsync(struct bfa_ioc_s *ioc)
2962 struct bfa_mbox_cmd_s cmd;
2963 struct bfi_ioc_ctrl_req_s *req = (struct bfi_ioc_ctrl_req_s *) cmd.msg;
2965 bfi_h2i_set(req->mh, BFI_MC_IOC, BFI_IOC_H2I_DBG_SYNC,
2966 bfa_ioc_portid(ioc));
2967 req->clscode = cpu_to_be16(ioc->clscode);
2968 bfa_ioc_mbox_queue(ioc, &cmd);
2971 static void
2972 bfa_ioc_fwsync(struct bfa_ioc_s *ioc)
2974 u32 fwsync_iter = 1000;
2976 bfa_ioc_send_fwsync(ioc);
2979 * After sending a fw sync mbox command wait for it to
2980 * take effect. We will not wait for a response because
2981 * 1. fw_sync mbox cmd doesn't have a response.
2982 * 2. Even if we implement that, interrupts might not
2983 * be enabled when we call this function.
2984 * So, just keep checking if any mbox cmd is pending, and
2985 * after waiting for a reasonable amount of time, go ahead.
2986 * It is possible that fw has crashed and the mbox command
2987 * is never acknowledged.
2989 while (bfa_ioc_mbox_cmd_pending(ioc) && fwsync_iter > 0)
2990 fwsync_iter--;
2994 * Dump firmware smem
2996 bfa_status_t
2997 bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
2998 u32 *offset, int *buflen)
3000 u32 loff;
3001 int dlen;
3002 bfa_status_t status;
3003 u32 smem_len = BFA_IOC_FW_SMEM_SIZE(ioc);
3005 if (*offset >= smem_len) {
3006 *offset = *buflen = 0;
3007 return BFA_STATUS_EINVAL;
3010 loff = *offset;
3011 dlen = *buflen;
3014 * First smem read, sync smem before proceeding
3015 * No need to sync before reading every chunk.
3017 if (loff == 0)
3018 bfa_ioc_fwsync(ioc);
3020 if ((loff + dlen) >= smem_len)
3021 dlen = smem_len - loff;
3023 status = bfa_ioc_smem_read(ioc, buf, loff, dlen);
3025 if (status != BFA_STATUS_OK) {
3026 *offset = *buflen = 0;
3027 return status;
3030 *offset += dlen;
3032 if (*offset >= smem_len)
3033 *offset = 0;
3035 *buflen = dlen;
3037 return status;
3041 * Firmware statistics
3043 bfa_status_t
3044 bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats)
3046 u32 loff = BFI_IOC_FWSTATS_OFF + \
3047 BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
3048 int tlen;
3049 bfa_status_t status;
3051 if (ioc->stats_busy) {
3052 bfa_trc(ioc, ioc->stats_busy);
3053 return BFA_STATUS_DEVBUSY;
3055 ioc->stats_busy = BFA_TRUE;
3057 tlen = sizeof(struct bfa_fw_stats_s);
3058 status = bfa_ioc_smem_read(ioc, stats, loff, tlen);
3060 ioc->stats_busy = BFA_FALSE;
3061 return status;
3064 bfa_status_t
3065 bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc)
3067 u32 loff = BFI_IOC_FWSTATS_OFF + \
3068 BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
3069 int tlen;
3070 bfa_status_t status;
3072 if (ioc->stats_busy) {
3073 bfa_trc(ioc, ioc->stats_busy);
3074 return BFA_STATUS_DEVBUSY;
3076 ioc->stats_busy = BFA_TRUE;
3078 tlen = sizeof(struct bfa_fw_stats_s);
3079 status = bfa_ioc_smem_clr(ioc, loff, tlen);
3081 ioc->stats_busy = BFA_FALSE;
3082 return status;
3086 * Save firmware trace if configured.
3088 void
3089 bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc)
3091 int tlen;
3093 if (ioc->dbg_fwsave_once) {
3094 ioc->dbg_fwsave_once = BFA_FALSE;
3095 if (ioc->dbg_fwsave_len) {
3096 tlen = ioc->dbg_fwsave_len;
3097 bfa_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
3103 * Firmware failure detected. Start recovery actions.
3105 static void
3106 bfa_ioc_recover(struct bfa_ioc_s *ioc)
3108 bfa_ioc_stats(ioc, ioc_hbfails);
3109 ioc->stats.hb_count = ioc->hb_count;
3110 bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
3114 * BFA IOC PF private functions
3116 static void
3117 bfa_iocpf_timeout(void *ioc_arg)
3119 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
3121 bfa_trc(ioc, 0);
3122 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
3125 static void
3126 bfa_iocpf_sem_timeout(void *ioc_arg)
3128 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
3130 bfa_ioc_hw_sem_get(ioc);
3133 static void
3134 bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc)
3136 u32 fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
3138 bfa_trc(ioc, fwstate);
3140 if (fwstate == BFI_IOC_DISABLED) {
3141 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
3142 return;
3145 if (ioc->iocpf.poll_time >= (3 * BFA_IOC_TOV))
3146 bfa_iocpf_timeout(ioc);
3147 else {
3148 ioc->iocpf.poll_time += BFA_IOC_POLL_TOV;
3149 bfa_iocpf_poll_timer_start(ioc);
3153 static void
3154 bfa_iocpf_poll_timeout(void *ioc_arg)
3156 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
3158 bfa_ioc_poll_fwinit(ioc);
3162 * bfa timer function
3164 void
3165 bfa_timer_beat(struct bfa_timer_mod_s *mod)
3167 struct list_head *qh = &mod->timer_q;
3168 struct list_head *qe, *qe_next;
3169 struct bfa_timer_s *elem;
3170 struct list_head timedout_q;
3172 INIT_LIST_HEAD(&timedout_q);
3174 qe = bfa_q_next(qh);
3176 while (qe != qh) {
3177 qe_next = bfa_q_next(qe);
3179 elem = (struct bfa_timer_s *) qe;
3180 if (elem->timeout <= BFA_TIMER_FREQ) {
3181 elem->timeout = 0;
3182 list_del(&elem->qe);
3183 list_add_tail(&elem->qe, &timedout_q);
3184 } else {
3185 elem->timeout -= BFA_TIMER_FREQ;
3188 qe = qe_next; /* go to next elem */
3192 * Pop all the timeout entries
3194 while (!list_empty(&timedout_q)) {
3195 bfa_q_deq(&timedout_q, &elem);
3196 elem->timercb(elem->arg);
3201 * Should be called with lock protection
3203 void
3204 bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
3205 void (*timercb) (void *), void *arg, unsigned int timeout)
3208 WARN_ON(timercb == NULL);
3209 WARN_ON(bfa_q_is_on_q(&mod->timer_q, timer));
3211 timer->timeout = timeout;
3212 timer->timercb = timercb;
3213 timer->arg = arg;
3215 list_add_tail(&timer->qe, &mod->timer_q);
3219 * Should be called with lock protection
3221 void
3222 bfa_timer_stop(struct bfa_timer_s *timer)
3224 WARN_ON(list_empty(&timer->qe));
3226 list_del(&timer->qe);
3230 * ASIC block related
3232 static void
3233 bfa_ablk_config_swap(struct bfa_ablk_cfg_s *cfg)
3235 struct bfa_ablk_cfg_inst_s *cfg_inst;
3236 int i, j;
3237 u16 be16;
3239 for (i = 0; i < BFA_ABLK_MAX; i++) {
3240 cfg_inst = &cfg->inst[i];
3241 for (j = 0; j < BFA_ABLK_MAX_PFS; j++) {
3242 be16 = cfg_inst->pf_cfg[j].pers;
3243 cfg_inst->pf_cfg[j].pers = be16_to_cpu(be16);
3244 be16 = cfg_inst->pf_cfg[j].num_qpairs;
3245 cfg_inst->pf_cfg[j].num_qpairs = be16_to_cpu(be16);
3246 be16 = cfg_inst->pf_cfg[j].num_vectors;
3247 cfg_inst->pf_cfg[j].num_vectors = be16_to_cpu(be16);
3248 be16 = cfg_inst->pf_cfg[j].bw_min;
3249 cfg_inst->pf_cfg[j].bw_min = be16_to_cpu(be16);
3250 be16 = cfg_inst->pf_cfg[j].bw_max;
3251 cfg_inst->pf_cfg[j].bw_max = be16_to_cpu(be16);
3256 static void
3257 bfa_ablk_isr(void *cbarg, struct bfi_mbmsg_s *msg)
3259 struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg;
3260 struct bfi_ablk_i2h_rsp_s *rsp = (struct bfi_ablk_i2h_rsp_s *)msg;
3261 bfa_ablk_cbfn_t cbfn;
3263 WARN_ON(msg->mh.msg_class != BFI_MC_ABLK);
3264 bfa_trc(ablk->ioc, msg->mh.msg_id);
3266 switch (msg->mh.msg_id) {
3267 case BFI_ABLK_I2H_QUERY:
3268 if (rsp->status == BFA_STATUS_OK) {
3269 memcpy(ablk->cfg, ablk->dma_addr.kva,
3270 sizeof(struct bfa_ablk_cfg_s));
3271 bfa_ablk_config_swap(ablk->cfg);
3272 ablk->cfg = NULL;
3274 break;
3276 case BFI_ABLK_I2H_ADPT_CONFIG:
3277 case BFI_ABLK_I2H_PORT_CONFIG:
3278 /* update config port mode */
3279 ablk->ioc->port_mode_cfg = rsp->port_mode;
3280 break;
3282 case BFI_ABLK_I2H_PF_DELETE:
3283 case BFI_ABLK_I2H_PF_UPDATE:
3284 case BFI_ABLK_I2H_OPTROM_ENABLE:
3285 case BFI_ABLK_I2H_OPTROM_DISABLE:
3286 /* No-op */
3287 break;
3289 case BFI_ABLK_I2H_PF_CREATE:
3290 *(ablk->pcifn) = rsp->pcifn;
3291 ablk->pcifn = NULL;
3292 break;
3294 default:
3295 WARN_ON(1);
3298 ablk->busy = BFA_FALSE;
3299 if (ablk->cbfn) {
3300 cbfn = ablk->cbfn;
3301 ablk->cbfn = NULL;
3302 cbfn(ablk->cbarg, rsp->status);
3306 static void
3307 bfa_ablk_notify(void *cbarg, enum bfa_ioc_event_e event)
3309 struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg;
3311 bfa_trc(ablk->ioc, event);
3313 switch (event) {
3314 case BFA_IOC_E_ENABLED:
3315 WARN_ON(ablk->busy != BFA_FALSE);
3316 break;
3318 case BFA_IOC_E_DISABLED:
3319 case BFA_IOC_E_FAILED:
3320 /* Fail any pending requests */
3321 ablk->pcifn = NULL;
3322 if (ablk->busy) {
3323 if (ablk->cbfn)
3324 ablk->cbfn(ablk->cbarg, BFA_STATUS_FAILED);
3325 ablk->cbfn = NULL;
3326 ablk->busy = BFA_FALSE;
3328 break;
3330 default:
3331 WARN_ON(1);
3332 break;
3337 bfa_ablk_meminfo(void)
3339 return BFA_ROUNDUP(sizeof(struct bfa_ablk_cfg_s), BFA_DMA_ALIGN_SZ);
3342 void
3343 bfa_ablk_memclaim(struct bfa_ablk_s *ablk, u8 *dma_kva, u64 dma_pa)
3345 ablk->dma_addr.kva = dma_kva;
3346 ablk->dma_addr.pa = dma_pa;
3349 void
3350 bfa_ablk_attach(struct bfa_ablk_s *ablk, struct bfa_ioc_s *ioc)
3352 ablk->ioc = ioc;
3354 bfa_ioc_mbox_regisr(ablk->ioc, BFI_MC_ABLK, bfa_ablk_isr, ablk);
3355 bfa_q_qe_init(&ablk->ioc_notify);
3356 bfa_ioc_notify_init(&ablk->ioc_notify, bfa_ablk_notify, ablk);
3357 list_add_tail(&ablk->ioc_notify.qe, &ablk->ioc->notify_q);
3360 bfa_status_t
3361 bfa_ablk_query(struct bfa_ablk_s *ablk, struct bfa_ablk_cfg_s *ablk_cfg,
3362 bfa_ablk_cbfn_t cbfn, void *cbarg)
3364 struct bfi_ablk_h2i_query_s *m;
3366 WARN_ON(!ablk_cfg);
3368 if (!bfa_ioc_is_operational(ablk->ioc)) {
3369 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3370 return BFA_STATUS_IOC_FAILURE;
3373 if (ablk->busy) {
3374 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3375 return BFA_STATUS_DEVBUSY;
3378 ablk->cfg = ablk_cfg;
3379 ablk->cbfn = cbfn;
3380 ablk->cbarg = cbarg;
3381 ablk->busy = BFA_TRUE;
3383 m = (struct bfi_ablk_h2i_query_s *)ablk->mb.msg;
3384 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_QUERY,
3385 bfa_ioc_portid(ablk->ioc));
3386 bfa_dma_be_addr_set(m->addr, ablk->dma_addr.pa);
3387 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3389 return BFA_STATUS_OK;
3392 bfa_status_t
3393 bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn,
3394 u8 port, enum bfi_pcifn_class personality,
3395 u16 bw_min, u16 bw_max,
3396 bfa_ablk_cbfn_t cbfn, void *cbarg)
3398 struct bfi_ablk_h2i_pf_req_s *m;
3400 if (!bfa_ioc_is_operational(ablk->ioc)) {
3401 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3402 return BFA_STATUS_IOC_FAILURE;
3405 if (ablk->busy) {
3406 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3407 return BFA_STATUS_DEVBUSY;
3410 ablk->pcifn = pcifn;
3411 ablk->cbfn = cbfn;
3412 ablk->cbarg = cbarg;
3413 ablk->busy = BFA_TRUE;
3415 m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3416 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_CREATE,
3417 bfa_ioc_portid(ablk->ioc));
3418 m->pers = cpu_to_be16((u16)personality);
3419 m->bw_min = cpu_to_be16(bw_min);
3420 m->bw_max = cpu_to_be16(bw_max);
3421 m->port = port;
3422 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3424 return BFA_STATUS_OK;
3427 bfa_status_t
3428 bfa_ablk_pf_delete(struct bfa_ablk_s *ablk, int pcifn,
3429 bfa_ablk_cbfn_t cbfn, void *cbarg)
3431 struct bfi_ablk_h2i_pf_req_s *m;
3433 if (!bfa_ioc_is_operational(ablk->ioc)) {
3434 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3435 return BFA_STATUS_IOC_FAILURE;
3438 if (ablk->busy) {
3439 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3440 return BFA_STATUS_DEVBUSY;
3443 ablk->cbfn = cbfn;
3444 ablk->cbarg = cbarg;
3445 ablk->busy = BFA_TRUE;
3447 m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3448 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_DELETE,
3449 bfa_ioc_portid(ablk->ioc));
3450 m->pcifn = (u8)pcifn;
3451 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3453 return BFA_STATUS_OK;
3456 bfa_status_t
3457 bfa_ablk_adapter_config(struct bfa_ablk_s *ablk, enum bfa_mode_s mode,
3458 int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg)
3460 struct bfi_ablk_h2i_cfg_req_s *m;
3462 if (!bfa_ioc_is_operational(ablk->ioc)) {
3463 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3464 return BFA_STATUS_IOC_FAILURE;
3467 if (ablk->busy) {
3468 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3469 return BFA_STATUS_DEVBUSY;
3472 ablk->cbfn = cbfn;
3473 ablk->cbarg = cbarg;
3474 ablk->busy = BFA_TRUE;
3476 m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg;
3477 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_ADPT_CONFIG,
3478 bfa_ioc_portid(ablk->ioc));
3479 m->mode = (u8)mode;
3480 m->max_pf = (u8)max_pf;
3481 m->max_vf = (u8)max_vf;
3482 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3484 return BFA_STATUS_OK;
3487 bfa_status_t
3488 bfa_ablk_port_config(struct bfa_ablk_s *ablk, int port, enum bfa_mode_s mode,
3489 int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg)
3491 struct bfi_ablk_h2i_cfg_req_s *m;
3493 if (!bfa_ioc_is_operational(ablk->ioc)) {
3494 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3495 return BFA_STATUS_IOC_FAILURE;
3498 if (ablk->busy) {
3499 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3500 return BFA_STATUS_DEVBUSY;
3503 ablk->cbfn = cbfn;
3504 ablk->cbarg = cbarg;
3505 ablk->busy = BFA_TRUE;
3507 m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg;
3508 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PORT_CONFIG,
3509 bfa_ioc_portid(ablk->ioc));
3510 m->port = (u8)port;
3511 m->mode = (u8)mode;
3512 m->max_pf = (u8)max_pf;
3513 m->max_vf = (u8)max_vf;
3514 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3516 return BFA_STATUS_OK;
3519 bfa_status_t
3520 bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn, u16 bw_min,
3521 u16 bw_max, bfa_ablk_cbfn_t cbfn, void *cbarg)
3523 struct bfi_ablk_h2i_pf_req_s *m;
3525 if (!bfa_ioc_is_operational(ablk->ioc)) {
3526 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3527 return BFA_STATUS_IOC_FAILURE;
3530 if (ablk->busy) {
3531 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3532 return BFA_STATUS_DEVBUSY;
3535 ablk->cbfn = cbfn;
3536 ablk->cbarg = cbarg;
3537 ablk->busy = BFA_TRUE;
3539 m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3540 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_UPDATE,
3541 bfa_ioc_portid(ablk->ioc));
3542 m->pcifn = (u8)pcifn;
3543 m->bw_min = cpu_to_be16(bw_min);
3544 m->bw_max = cpu_to_be16(bw_max);
3545 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3547 return BFA_STATUS_OK;
3550 bfa_status_t
3551 bfa_ablk_optrom_en(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg)
3553 struct bfi_ablk_h2i_optrom_s *m;
3555 if (!bfa_ioc_is_operational(ablk->ioc)) {
3556 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3557 return BFA_STATUS_IOC_FAILURE;
3560 if (ablk->busy) {
3561 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3562 return BFA_STATUS_DEVBUSY;
3565 ablk->cbfn = cbfn;
3566 ablk->cbarg = cbarg;
3567 ablk->busy = BFA_TRUE;
3569 m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg;
3570 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_ENABLE,
3571 bfa_ioc_portid(ablk->ioc));
3572 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3574 return BFA_STATUS_OK;
3577 bfa_status_t
3578 bfa_ablk_optrom_dis(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg)
3580 struct bfi_ablk_h2i_optrom_s *m;
3582 if (!bfa_ioc_is_operational(ablk->ioc)) {
3583 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3584 return BFA_STATUS_IOC_FAILURE;
3587 if (ablk->busy) {
3588 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3589 return BFA_STATUS_DEVBUSY;
3592 ablk->cbfn = cbfn;
3593 ablk->cbarg = cbarg;
3594 ablk->busy = BFA_TRUE;
3596 m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg;
3597 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_DISABLE,
3598 bfa_ioc_portid(ablk->ioc));
3599 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3601 return BFA_STATUS_OK;
3605 * SFP module specific
3608 /* forward declarations */
3609 static void bfa_sfp_getdata_send(struct bfa_sfp_s *sfp);
3610 static void bfa_sfp_media_get(struct bfa_sfp_s *sfp);
3611 static bfa_status_t bfa_sfp_speed_valid(struct bfa_sfp_s *sfp,
3612 enum bfa_port_speed portspeed);
3614 static void
3615 bfa_cb_sfp_show(struct bfa_sfp_s *sfp)
3617 bfa_trc(sfp, sfp->lock);
3618 if (sfp->cbfn)
3619 sfp->cbfn(sfp->cbarg, sfp->status);
3620 sfp->lock = 0;
3621 sfp->cbfn = NULL;
3624 static void
3625 bfa_cb_sfp_state_query(struct bfa_sfp_s *sfp)
3627 bfa_trc(sfp, sfp->portspeed);
3628 if (sfp->media) {
3629 bfa_sfp_media_get(sfp);
3630 if (sfp->state_query_cbfn)
3631 sfp->state_query_cbfn(sfp->state_query_cbarg,
3632 sfp->status);
3633 sfp->media = NULL;
3636 if (sfp->portspeed) {
3637 sfp->status = bfa_sfp_speed_valid(sfp, sfp->portspeed);
3638 if (sfp->state_query_cbfn)
3639 sfp->state_query_cbfn(sfp->state_query_cbarg,
3640 sfp->status);
3641 sfp->portspeed = BFA_PORT_SPEED_UNKNOWN;
3644 sfp->state_query_lock = 0;
3645 sfp->state_query_cbfn = NULL;
3649 * IOC event handler.
3651 static void
3652 bfa_sfp_notify(void *sfp_arg, enum bfa_ioc_event_e event)
3654 struct bfa_sfp_s *sfp = sfp_arg;
3656 bfa_trc(sfp, event);
3657 bfa_trc(sfp, sfp->lock);
3658 bfa_trc(sfp, sfp->state_query_lock);
3660 switch (event) {
3661 case BFA_IOC_E_DISABLED:
3662 case BFA_IOC_E_FAILED:
3663 if (sfp->lock) {
3664 sfp->status = BFA_STATUS_IOC_FAILURE;
3665 bfa_cb_sfp_show(sfp);
3668 if (sfp->state_query_lock) {
3669 sfp->status = BFA_STATUS_IOC_FAILURE;
3670 bfa_cb_sfp_state_query(sfp);
3672 break;
3674 default:
3675 break;
3680 * SFP's State Change Notification post to AEN
3682 static void
3683 bfa_sfp_scn_aen_post(struct bfa_sfp_s *sfp, struct bfi_sfp_scn_s *rsp)
3685 struct bfad_s *bfad = (struct bfad_s *)sfp->ioc->bfa->bfad;
3686 struct bfa_aen_entry_s *aen_entry;
3687 enum bfa_port_aen_event aen_evt = 0;
3689 bfa_trc(sfp, (((u64)rsp->pomlvl) << 16) | (((u64)rsp->sfpid) << 8) |
3690 ((u64)rsp->event));
3692 bfad_get_aen_entry(bfad, aen_entry);
3693 if (!aen_entry)
3694 return;
3696 aen_entry->aen_data.port.ioc_type = bfa_ioc_get_type(sfp->ioc);
3697 aen_entry->aen_data.port.pwwn = sfp->ioc->attr->pwwn;
3698 aen_entry->aen_data.port.mac = bfa_ioc_get_mac(sfp->ioc);
3700 switch (rsp->event) {
3701 case BFA_SFP_SCN_INSERTED:
3702 aen_evt = BFA_PORT_AEN_SFP_INSERT;
3703 break;
3704 case BFA_SFP_SCN_REMOVED:
3705 aen_evt = BFA_PORT_AEN_SFP_REMOVE;
3706 break;
3707 case BFA_SFP_SCN_FAILED:
3708 aen_evt = BFA_PORT_AEN_SFP_ACCESS_ERROR;
3709 break;
3710 case BFA_SFP_SCN_UNSUPPORT:
3711 aen_evt = BFA_PORT_AEN_SFP_UNSUPPORT;
3712 break;
3713 case BFA_SFP_SCN_POM:
3714 aen_evt = BFA_PORT_AEN_SFP_POM;
3715 aen_entry->aen_data.port.level = rsp->pomlvl;
3716 break;
3717 default:
3718 bfa_trc(sfp, rsp->event);
3719 WARN_ON(1);
3722 /* Send the AEN notification */
3723 bfad_im_post_vendor_event(aen_entry, bfad, ++sfp->ioc->ioc_aen_seq,
3724 BFA_AEN_CAT_PORT, aen_evt);
3728 * SFP get data send
3730 static void
3731 bfa_sfp_getdata_send(struct bfa_sfp_s *sfp)
3733 struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3735 bfa_trc(sfp, req->memtype);
3737 /* build host command */
3738 bfi_h2i_set(req->mh, BFI_MC_SFP, BFI_SFP_H2I_SHOW,
3739 bfa_ioc_portid(sfp->ioc));
3741 /* send mbox cmd */
3742 bfa_ioc_mbox_queue(sfp->ioc, &sfp->mbcmd);
3746 * SFP is valid, read sfp data
3748 static void
3749 bfa_sfp_getdata(struct bfa_sfp_s *sfp, enum bfi_sfp_mem_e memtype)
3751 struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3753 WARN_ON(sfp->lock != 0);
3754 bfa_trc(sfp, sfp->state);
3756 sfp->lock = 1;
3757 sfp->memtype = memtype;
3758 req->memtype = memtype;
3760 /* Setup SG list */
3761 bfa_alen_set(&req->alen, sizeof(struct sfp_mem_s), sfp->dbuf_pa);
3763 bfa_sfp_getdata_send(sfp);
3767 * SFP scn handler
3769 static void
3770 bfa_sfp_scn(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
3772 struct bfi_sfp_scn_s *rsp = (struct bfi_sfp_scn_s *) msg;
3774 switch (rsp->event) {
3775 case BFA_SFP_SCN_INSERTED:
3776 sfp->state = BFA_SFP_STATE_INSERTED;
3777 sfp->data_valid = 0;
3778 bfa_sfp_scn_aen_post(sfp, rsp);
3779 break;
3780 case BFA_SFP_SCN_REMOVED:
3781 sfp->state = BFA_SFP_STATE_REMOVED;
3782 sfp->data_valid = 0;
3783 bfa_sfp_scn_aen_post(sfp, rsp);
3784 break;
3785 case BFA_SFP_SCN_FAILED:
3786 sfp->state = BFA_SFP_STATE_FAILED;
3787 sfp->data_valid = 0;
3788 bfa_sfp_scn_aen_post(sfp, rsp);
3789 break;
3790 case BFA_SFP_SCN_UNSUPPORT:
3791 sfp->state = BFA_SFP_STATE_UNSUPPORT;
3792 bfa_sfp_scn_aen_post(sfp, rsp);
3793 if (!sfp->lock)
3794 bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3795 break;
3796 case BFA_SFP_SCN_POM:
3797 bfa_sfp_scn_aen_post(sfp, rsp);
3798 break;
3799 case BFA_SFP_SCN_VALID:
3800 sfp->state = BFA_SFP_STATE_VALID;
3801 if (!sfp->lock)
3802 bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3803 break;
3804 default:
3805 bfa_trc(sfp, rsp->event);
3806 WARN_ON(1);
3811 * SFP show complete
3813 static void
3814 bfa_sfp_show_comp(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
3816 struct bfi_sfp_rsp_s *rsp = (struct bfi_sfp_rsp_s *) msg;
3818 if (!sfp->lock) {
3820 * receiving response after ioc failure
3822 bfa_trc(sfp, sfp->lock);
3823 return;
3826 bfa_trc(sfp, rsp->status);
3827 if (rsp->status == BFA_STATUS_OK) {
3828 sfp->data_valid = 1;
3829 if (sfp->state == BFA_SFP_STATE_VALID)
3830 sfp->status = BFA_STATUS_OK;
3831 else if (sfp->state == BFA_SFP_STATE_UNSUPPORT)
3832 sfp->status = BFA_STATUS_SFP_UNSUPP;
3833 else
3834 bfa_trc(sfp, sfp->state);
3835 } else {
3836 sfp->data_valid = 0;
3837 sfp->status = rsp->status;
3838 /* sfpshow shouldn't change sfp state */
3841 bfa_trc(sfp, sfp->memtype);
3842 if (sfp->memtype == BFI_SFP_MEM_DIAGEXT) {
3843 bfa_trc(sfp, sfp->data_valid);
3844 if (sfp->data_valid) {
3845 u32 size = sizeof(struct sfp_mem_s);
3846 u8 *des = (u8 *)(sfp->sfpmem);
3847 memcpy(des, sfp->dbuf_kva, size);
3850 * Queue completion callback.
3852 bfa_cb_sfp_show(sfp);
3853 } else
3854 sfp->lock = 0;
3856 bfa_trc(sfp, sfp->state_query_lock);
3857 if (sfp->state_query_lock) {
3858 sfp->state = rsp->state;
3859 /* Complete callback */
3860 bfa_cb_sfp_state_query(sfp);
3865 * SFP query fw sfp state
3867 static void
3868 bfa_sfp_state_query(struct bfa_sfp_s *sfp)
3870 struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3872 /* Should not be doing query if not in _INIT state */
3873 WARN_ON(sfp->state != BFA_SFP_STATE_INIT);
3874 WARN_ON(sfp->state_query_lock != 0);
3875 bfa_trc(sfp, sfp->state);
3877 sfp->state_query_lock = 1;
3878 req->memtype = 0;
3880 if (!sfp->lock)
3881 bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3884 static void
3885 bfa_sfp_media_get(struct bfa_sfp_s *sfp)
3887 enum bfa_defs_sfp_media_e *media = sfp->media;
3889 *media = BFA_SFP_MEDIA_UNKNOWN;
3891 if (sfp->state == BFA_SFP_STATE_UNSUPPORT)
3892 *media = BFA_SFP_MEDIA_UNSUPPORT;
3893 else if (sfp->state == BFA_SFP_STATE_VALID) {
3894 union sfp_xcvr_e10g_code_u e10g;
3895 struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva;
3896 u16 xmtr_tech = (sfpmem->srlid_base.xcvr[4] & 0x3) << 7 |
3897 (sfpmem->srlid_base.xcvr[5] >> 1);
3899 e10g.b = sfpmem->srlid_base.xcvr[0];
3900 bfa_trc(sfp, e10g.b);
3901 bfa_trc(sfp, xmtr_tech);
3902 /* check fc transmitter tech */
3903 if ((xmtr_tech & SFP_XMTR_TECH_CU) ||
3904 (xmtr_tech & SFP_XMTR_TECH_CP) ||
3905 (xmtr_tech & SFP_XMTR_TECH_CA))
3906 *media = BFA_SFP_MEDIA_CU;
3907 else if ((xmtr_tech & SFP_XMTR_TECH_EL_INTRA) ||
3908 (xmtr_tech & SFP_XMTR_TECH_EL_INTER))
3909 *media = BFA_SFP_MEDIA_EL;
3910 else if ((xmtr_tech & SFP_XMTR_TECH_LL) ||
3911 (xmtr_tech & SFP_XMTR_TECH_LC))
3912 *media = BFA_SFP_MEDIA_LW;
3913 else if ((xmtr_tech & SFP_XMTR_TECH_SL) ||
3914 (xmtr_tech & SFP_XMTR_TECH_SN) ||
3915 (xmtr_tech & SFP_XMTR_TECH_SA))
3916 *media = BFA_SFP_MEDIA_SW;
3917 /* Check 10G Ethernet Compilance code */
3918 else if (e10g.r.e10g_sr)
3919 *media = BFA_SFP_MEDIA_SW;
3920 else if (e10g.r.e10g_lrm && e10g.r.e10g_lr)
3921 *media = BFA_SFP_MEDIA_LW;
3922 else if (e10g.r.e10g_unall)
3923 *media = BFA_SFP_MEDIA_UNKNOWN;
3924 else
3925 bfa_trc(sfp, 0);
3926 } else
3927 bfa_trc(sfp, sfp->state);
3930 static bfa_status_t
3931 bfa_sfp_speed_valid(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed)
3933 struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva;
3934 struct sfp_xcvr_s *xcvr = (struct sfp_xcvr_s *) sfpmem->srlid_base.xcvr;
3935 union sfp_xcvr_fc3_code_u fc3 = xcvr->fc3;
3936 union sfp_xcvr_e10g_code_u e10g = xcvr->e10g;
3938 if (portspeed == BFA_PORT_SPEED_10GBPS) {
3939 if (e10g.r.e10g_sr || e10g.r.e10g_lr)
3940 return BFA_STATUS_OK;
3941 else {
3942 bfa_trc(sfp, e10g.b);
3943 return BFA_STATUS_UNSUPP_SPEED;
3946 if (((portspeed & BFA_PORT_SPEED_16GBPS) && fc3.r.mb1600) ||
3947 ((portspeed & BFA_PORT_SPEED_8GBPS) && fc3.r.mb800) ||
3948 ((portspeed & BFA_PORT_SPEED_4GBPS) && fc3.r.mb400) ||
3949 ((portspeed & BFA_PORT_SPEED_2GBPS) && fc3.r.mb200) ||
3950 ((portspeed & BFA_PORT_SPEED_1GBPS) && fc3.r.mb100))
3951 return BFA_STATUS_OK;
3952 else {
3953 bfa_trc(sfp, portspeed);
3954 bfa_trc(sfp, fc3.b);
3955 bfa_trc(sfp, e10g.b);
3956 return BFA_STATUS_UNSUPP_SPEED;
3961 * SFP hmbox handler
3963 void
3964 bfa_sfp_intr(void *sfparg, struct bfi_mbmsg_s *msg)
3966 struct bfa_sfp_s *sfp = sfparg;
3968 switch (msg->mh.msg_id) {
3969 case BFI_SFP_I2H_SHOW:
3970 bfa_sfp_show_comp(sfp, msg);
3971 break;
3973 case BFI_SFP_I2H_SCN:
3974 bfa_sfp_scn(sfp, msg);
3975 break;
3977 default:
3978 bfa_trc(sfp, msg->mh.msg_id);
3979 WARN_ON(1);
3984 * Return DMA memory needed by sfp module.
3987 bfa_sfp_meminfo(void)
3989 return BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
3993 * Attach virtual and physical memory for SFP.
3995 void
3996 bfa_sfp_attach(struct bfa_sfp_s *sfp, struct bfa_ioc_s *ioc, void *dev,
3997 struct bfa_trc_mod_s *trcmod)
3999 sfp->dev = dev;
4000 sfp->ioc = ioc;
4001 sfp->trcmod = trcmod;
4003 sfp->cbfn = NULL;
4004 sfp->cbarg = NULL;
4005 sfp->sfpmem = NULL;
4006 sfp->lock = 0;
4007 sfp->data_valid = 0;
4008 sfp->state = BFA_SFP_STATE_INIT;
4009 sfp->state_query_lock = 0;
4010 sfp->state_query_cbfn = NULL;
4011 sfp->state_query_cbarg = NULL;
4012 sfp->media = NULL;
4013 sfp->portspeed = BFA_PORT_SPEED_UNKNOWN;
4014 sfp->is_elb = BFA_FALSE;
4016 bfa_ioc_mbox_regisr(sfp->ioc, BFI_MC_SFP, bfa_sfp_intr, sfp);
4017 bfa_q_qe_init(&sfp->ioc_notify);
4018 bfa_ioc_notify_init(&sfp->ioc_notify, bfa_sfp_notify, sfp);
4019 list_add_tail(&sfp->ioc_notify.qe, &sfp->ioc->notify_q);
4023 * Claim Memory for SFP
4025 void
4026 bfa_sfp_memclaim(struct bfa_sfp_s *sfp, u8 *dm_kva, u64 dm_pa)
4028 sfp->dbuf_kva = dm_kva;
4029 sfp->dbuf_pa = dm_pa;
4030 memset(sfp->dbuf_kva, 0, sizeof(struct sfp_mem_s));
4032 dm_kva += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
4033 dm_pa += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
4037 * Show SFP eeprom content
4039 * @param[in] sfp - bfa sfp module
4041 * @param[out] sfpmem - sfp eeprom data
4044 bfa_status_t
4045 bfa_sfp_show(struct bfa_sfp_s *sfp, struct sfp_mem_s *sfpmem,
4046 bfa_cb_sfp_t cbfn, void *cbarg)
4049 if (!bfa_ioc_is_operational(sfp->ioc)) {
4050 bfa_trc(sfp, 0);
4051 return BFA_STATUS_IOC_NON_OP;
4054 if (sfp->lock) {
4055 bfa_trc(sfp, 0);
4056 return BFA_STATUS_DEVBUSY;
4059 sfp->cbfn = cbfn;
4060 sfp->cbarg = cbarg;
4061 sfp->sfpmem = sfpmem;
4063 bfa_sfp_getdata(sfp, BFI_SFP_MEM_DIAGEXT);
4064 return BFA_STATUS_OK;
4068 * Return SFP Media type
4070 * @param[in] sfp - bfa sfp module
4072 * @param[out] media - port speed from user
4075 bfa_status_t
4076 bfa_sfp_media(struct bfa_sfp_s *sfp, enum bfa_defs_sfp_media_e *media,
4077 bfa_cb_sfp_t cbfn, void *cbarg)
4079 if (!bfa_ioc_is_operational(sfp->ioc)) {
4080 bfa_trc(sfp, 0);
4081 return BFA_STATUS_IOC_NON_OP;
4084 sfp->media = media;
4085 if (sfp->state == BFA_SFP_STATE_INIT) {
4086 if (sfp->state_query_lock) {
4087 bfa_trc(sfp, 0);
4088 return BFA_STATUS_DEVBUSY;
4089 } else {
4090 sfp->state_query_cbfn = cbfn;
4091 sfp->state_query_cbarg = cbarg;
4092 bfa_sfp_state_query(sfp);
4093 return BFA_STATUS_SFP_NOT_READY;
4097 bfa_sfp_media_get(sfp);
4098 return BFA_STATUS_OK;
4102 * Check if user set port speed is allowed by the SFP
4104 * @param[in] sfp - bfa sfp module
4105 * @param[in] portspeed - port speed from user
4108 bfa_status_t
4109 bfa_sfp_speed(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed,
4110 bfa_cb_sfp_t cbfn, void *cbarg)
4112 WARN_ON(portspeed == BFA_PORT_SPEED_UNKNOWN);
4114 if (!bfa_ioc_is_operational(sfp->ioc))
4115 return BFA_STATUS_IOC_NON_OP;
4117 /* For Mezz card, all speed is allowed */
4118 if (bfa_mfg_is_mezz(sfp->ioc->attr->card_type))
4119 return BFA_STATUS_OK;
4121 /* Check SFP state */
4122 sfp->portspeed = portspeed;
4123 if (sfp->state == BFA_SFP_STATE_INIT) {
4124 if (sfp->state_query_lock) {
4125 bfa_trc(sfp, 0);
4126 return BFA_STATUS_DEVBUSY;
4127 } else {
4128 sfp->state_query_cbfn = cbfn;
4129 sfp->state_query_cbarg = cbarg;
4130 bfa_sfp_state_query(sfp);
4131 return BFA_STATUS_SFP_NOT_READY;
4135 if (sfp->state == BFA_SFP_STATE_REMOVED ||
4136 sfp->state == BFA_SFP_STATE_FAILED) {
4137 bfa_trc(sfp, sfp->state);
4138 return BFA_STATUS_NO_SFP_DEV;
4141 if (sfp->state == BFA_SFP_STATE_INSERTED) {
4142 bfa_trc(sfp, sfp->state);
4143 return BFA_STATUS_DEVBUSY; /* sfp is reading data */
4146 /* For eloopback, all speed is allowed */
4147 if (sfp->is_elb)
4148 return BFA_STATUS_OK;
4150 return bfa_sfp_speed_valid(sfp, portspeed);
4154 * Flash module specific
4158 * FLASH DMA buffer should be big enough to hold both MFG block and
4159 * asic block(64k) at the same time and also should be 2k aligned to
4160 * avoid write segement to cross sector boundary.
4162 #define BFA_FLASH_SEG_SZ 2048
4163 #define BFA_FLASH_DMA_BUF_SZ \
4164 BFA_ROUNDUP(0x010000 + sizeof(struct bfa_mfg_block_s), BFA_FLASH_SEG_SZ)
4166 static void
4167 bfa_flash_aen_audit_post(struct bfa_ioc_s *ioc, enum bfa_audit_aen_event event,
4168 int inst, int type)
4170 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
4171 struct bfa_aen_entry_s *aen_entry;
4173 bfad_get_aen_entry(bfad, aen_entry);
4174 if (!aen_entry)
4175 return;
4177 aen_entry->aen_data.audit.pwwn = ioc->attr->pwwn;
4178 aen_entry->aen_data.audit.partition_inst = inst;
4179 aen_entry->aen_data.audit.partition_type = type;
4181 /* Send the AEN notification */
4182 bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq,
4183 BFA_AEN_CAT_AUDIT, event);
4186 static void
4187 bfa_flash_cb(struct bfa_flash_s *flash)
4189 flash->op_busy = 0;
4190 if (flash->cbfn)
4191 flash->cbfn(flash->cbarg, flash->status);
4194 static void
4195 bfa_flash_notify(void *cbarg, enum bfa_ioc_event_e event)
4197 struct bfa_flash_s *flash = cbarg;
4199 bfa_trc(flash, event);
4200 switch (event) {
4201 case BFA_IOC_E_DISABLED:
4202 case BFA_IOC_E_FAILED:
4203 if (flash->op_busy) {
4204 flash->status = BFA_STATUS_IOC_FAILURE;
4205 flash->cbfn(flash->cbarg, flash->status);
4206 flash->op_busy = 0;
4208 break;
4210 default:
4211 break;
4216 * Send flash attribute query request.
4218 * @param[in] cbarg - callback argument
4220 static void
4221 bfa_flash_query_send(void *cbarg)
4223 struct bfa_flash_s *flash = cbarg;
4224 struct bfi_flash_query_req_s *msg =
4225 (struct bfi_flash_query_req_s *) flash->mb.msg;
4227 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_QUERY_REQ,
4228 bfa_ioc_portid(flash->ioc));
4229 bfa_alen_set(&msg->alen, sizeof(struct bfa_flash_attr_s),
4230 flash->dbuf_pa);
4231 bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4235 * Send flash write request.
4237 * @param[in] cbarg - callback argument
4239 static void
4240 bfa_flash_write_send(struct bfa_flash_s *flash)
4242 struct bfi_flash_write_req_s *msg =
4243 (struct bfi_flash_write_req_s *) flash->mb.msg;
4244 u32 len;
4246 msg->type = be32_to_cpu(flash->type);
4247 msg->instance = flash->instance;
4248 msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
4249 len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
4250 flash->residue : BFA_FLASH_DMA_BUF_SZ;
4251 msg->length = be32_to_cpu(len);
4253 /* indicate if it's the last msg of the whole write operation */
4254 msg->last = (len == flash->residue) ? 1 : 0;
4256 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_WRITE_REQ,
4257 bfa_ioc_portid(flash->ioc));
4258 bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
4259 memcpy(flash->dbuf_kva, flash->ubuf + flash->offset, len);
4260 bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4262 flash->residue -= len;
4263 flash->offset += len;
4267 * Send flash read request.
4269 * @param[in] cbarg - callback argument
4271 static void
4272 bfa_flash_read_send(void *cbarg)
4274 struct bfa_flash_s *flash = cbarg;
4275 struct bfi_flash_read_req_s *msg =
4276 (struct bfi_flash_read_req_s *) flash->mb.msg;
4277 u32 len;
4279 msg->type = be32_to_cpu(flash->type);
4280 msg->instance = flash->instance;
4281 msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
4282 len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
4283 flash->residue : BFA_FLASH_DMA_BUF_SZ;
4284 msg->length = be32_to_cpu(len);
4285 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_READ_REQ,
4286 bfa_ioc_portid(flash->ioc));
4287 bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
4288 bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4292 * Send flash erase request.
4294 * @param[in] cbarg - callback argument
4296 static void
4297 bfa_flash_erase_send(void *cbarg)
4299 struct bfa_flash_s *flash = cbarg;
4300 struct bfi_flash_erase_req_s *msg =
4301 (struct bfi_flash_erase_req_s *) flash->mb.msg;
4303 msg->type = be32_to_cpu(flash->type);
4304 msg->instance = flash->instance;
4305 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_ERASE_REQ,
4306 bfa_ioc_portid(flash->ioc));
4307 bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4311 * Process flash response messages upon receiving interrupts.
4313 * @param[in] flasharg - flash structure
4314 * @param[in] msg - message structure
4316 static void
4317 bfa_flash_intr(void *flasharg, struct bfi_mbmsg_s *msg)
4319 struct bfa_flash_s *flash = flasharg;
4320 u32 status;
4322 union {
4323 struct bfi_flash_query_rsp_s *query;
4324 struct bfi_flash_erase_rsp_s *erase;
4325 struct bfi_flash_write_rsp_s *write;
4326 struct bfi_flash_read_rsp_s *read;
4327 struct bfi_flash_event_s *event;
4328 struct bfi_mbmsg_s *msg;
4329 } m;
4331 m.msg = msg;
4332 bfa_trc(flash, msg->mh.msg_id);
4334 if (!flash->op_busy && msg->mh.msg_id != BFI_FLASH_I2H_EVENT) {
4335 /* receiving response after ioc failure */
4336 bfa_trc(flash, 0x9999);
4337 return;
4340 switch (msg->mh.msg_id) {
4341 case BFI_FLASH_I2H_QUERY_RSP:
4342 status = be32_to_cpu(m.query->status);
4343 bfa_trc(flash, status);
4344 if (status == BFA_STATUS_OK) {
4345 u32 i;
4346 struct bfa_flash_attr_s *attr, *f;
4348 attr = (struct bfa_flash_attr_s *) flash->ubuf;
4349 f = (struct bfa_flash_attr_s *) flash->dbuf_kva;
4350 attr->status = be32_to_cpu(f->status);
4351 attr->npart = be32_to_cpu(f->npart);
4352 bfa_trc(flash, attr->status);
4353 bfa_trc(flash, attr->npart);
4354 for (i = 0; i < attr->npart; i++) {
4355 attr->part[i].part_type =
4356 be32_to_cpu(f->part[i].part_type);
4357 attr->part[i].part_instance =
4358 be32_to_cpu(f->part[i].part_instance);
4359 attr->part[i].part_off =
4360 be32_to_cpu(f->part[i].part_off);
4361 attr->part[i].part_size =
4362 be32_to_cpu(f->part[i].part_size);
4363 attr->part[i].part_len =
4364 be32_to_cpu(f->part[i].part_len);
4365 attr->part[i].part_status =
4366 be32_to_cpu(f->part[i].part_status);
4369 flash->status = status;
4370 bfa_flash_cb(flash);
4371 break;
4372 case BFI_FLASH_I2H_ERASE_RSP:
4373 status = be32_to_cpu(m.erase->status);
4374 bfa_trc(flash, status);
4375 flash->status = status;
4376 bfa_flash_cb(flash);
4377 break;
4378 case BFI_FLASH_I2H_WRITE_RSP:
4379 status = be32_to_cpu(m.write->status);
4380 bfa_trc(flash, status);
4381 if (status != BFA_STATUS_OK || flash->residue == 0) {
4382 flash->status = status;
4383 bfa_flash_cb(flash);
4384 } else {
4385 bfa_trc(flash, flash->offset);
4386 bfa_flash_write_send(flash);
4388 break;
4389 case BFI_FLASH_I2H_READ_RSP:
4390 status = be32_to_cpu(m.read->status);
4391 bfa_trc(flash, status);
4392 if (status != BFA_STATUS_OK) {
4393 flash->status = status;
4394 bfa_flash_cb(flash);
4395 } else {
4396 u32 len = be32_to_cpu(m.read->length);
4397 bfa_trc(flash, flash->offset);
4398 bfa_trc(flash, len);
4399 memcpy(flash->ubuf + flash->offset,
4400 flash->dbuf_kva, len);
4401 flash->residue -= len;
4402 flash->offset += len;
4403 if (flash->residue == 0) {
4404 flash->status = status;
4405 bfa_flash_cb(flash);
4406 } else
4407 bfa_flash_read_send(flash);
4409 break;
4410 case BFI_FLASH_I2H_BOOT_VER_RSP:
4411 break;
4412 case BFI_FLASH_I2H_EVENT:
4413 status = be32_to_cpu(m.event->status);
4414 bfa_trc(flash, status);
4415 if (status == BFA_STATUS_BAD_FWCFG)
4416 bfa_ioc_aen_post(flash->ioc, BFA_IOC_AEN_FWCFG_ERROR);
4417 else if (status == BFA_STATUS_INVALID_VENDOR) {
4418 u32 param;
4419 param = be32_to_cpu(m.event->param);
4420 bfa_trc(flash, param);
4421 bfa_ioc_aen_post(flash->ioc,
4422 BFA_IOC_AEN_INVALID_VENDOR);
4424 break;
4426 default:
4427 WARN_ON(1);
4432 * Flash memory info API.
4434 * @param[in] mincfg - minimal cfg variable
4437 bfa_flash_meminfo(bfa_boolean_t mincfg)
4439 /* min driver doesn't need flash */
4440 if (mincfg)
4441 return 0;
4442 return BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4446 * Flash attach API.
4448 * @param[in] flash - flash structure
4449 * @param[in] ioc - ioc structure
4450 * @param[in] dev - device structure
4451 * @param[in] trcmod - trace module
4452 * @param[in] logmod - log module
4454 void
4455 bfa_flash_attach(struct bfa_flash_s *flash, struct bfa_ioc_s *ioc, void *dev,
4456 struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
4458 flash->ioc = ioc;
4459 flash->trcmod = trcmod;
4460 flash->cbfn = NULL;
4461 flash->cbarg = NULL;
4462 flash->op_busy = 0;
4464 bfa_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash);
4465 bfa_q_qe_init(&flash->ioc_notify);
4466 bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash);
4467 list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q);
4469 /* min driver doesn't need flash */
4470 if (mincfg) {
4471 flash->dbuf_kva = NULL;
4472 flash->dbuf_pa = 0;
4477 * Claim memory for flash
4479 * @param[in] flash - flash structure
4480 * @param[in] dm_kva - pointer to virtual memory address
4481 * @param[in] dm_pa - physical memory address
4482 * @param[in] mincfg - minimal cfg variable
4484 void
4485 bfa_flash_memclaim(struct bfa_flash_s *flash, u8 *dm_kva, u64 dm_pa,
4486 bfa_boolean_t mincfg)
4488 if (mincfg)
4489 return;
4491 flash->dbuf_kva = dm_kva;
4492 flash->dbuf_pa = dm_pa;
4493 memset(flash->dbuf_kva, 0, BFA_FLASH_DMA_BUF_SZ);
4494 dm_kva += BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4495 dm_pa += BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4499 * Get flash attribute.
4501 * @param[in] flash - flash structure
4502 * @param[in] attr - flash attribute structure
4503 * @param[in] cbfn - callback function
4504 * @param[in] cbarg - callback argument
4506 * Return status.
4508 bfa_status_t
4509 bfa_flash_get_attr(struct bfa_flash_s *flash, struct bfa_flash_attr_s *attr,
4510 bfa_cb_flash_t cbfn, void *cbarg)
4512 bfa_trc(flash, BFI_FLASH_H2I_QUERY_REQ);
4514 if (!bfa_ioc_is_operational(flash->ioc))
4515 return BFA_STATUS_IOC_NON_OP;
4517 if (flash->op_busy) {
4518 bfa_trc(flash, flash->op_busy);
4519 return BFA_STATUS_DEVBUSY;
4522 flash->op_busy = 1;
4523 flash->cbfn = cbfn;
4524 flash->cbarg = cbarg;
4525 flash->ubuf = (u8 *) attr;
4526 bfa_flash_query_send(flash);
4528 return BFA_STATUS_OK;
4532 * Erase flash partition.
4534 * @param[in] flash - flash structure
4535 * @param[in] type - flash partition type
4536 * @param[in] instance - flash partition instance
4537 * @param[in] cbfn - callback function
4538 * @param[in] cbarg - callback argument
4540 * Return status.
4542 bfa_status_t
4543 bfa_flash_erase_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4544 u8 instance, bfa_cb_flash_t cbfn, void *cbarg)
4546 bfa_trc(flash, BFI_FLASH_H2I_ERASE_REQ);
4547 bfa_trc(flash, type);
4548 bfa_trc(flash, instance);
4550 if (!bfa_ioc_is_operational(flash->ioc))
4551 return BFA_STATUS_IOC_NON_OP;
4553 if (flash->op_busy) {
4554 bfa_trc(flash, flash->op_busy);
4555 return BFA_STATUS_DEVBUSY;
4558 flash->op_busy = 1;
4559 flash->cbfn = cbfn;
4560 flash->cbarg = cbarg;
4561 flash->type = type;
4562 flash->instance = instance;
4564 bfa_flash_erase_send(flash);
4565 bfa_flash_aen_audit_post(flash->ioc, BFA_AUDIT_AEN_FLASH_ERASE,
4566 instance, type);
4567 return BFA_STATUS_OK;
4571 * Update flash partition.
4573 * @param[in] flash - flash structure
4574 * @param[in] type - flash partition type
4575 * @param[in] instance - flash partition instance
4576 * @param[in] buf - update data buffer
4577 * @param[in] len - data buffer length
4578 * @param[in] offset - offset relative to the partition starting address
4579 * @param[in] cbfn - callback function
4580 * @param[in] cbarg - callback argument
4582 * Return status.
4584 bfa_status_t
4585 bfa_flash_update_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4586 u8 instance, void *buf, u32 len, u32 offset,
4587 bfa_cb_flash_t cbfn, void *cbarg)
4589 bfa_trc(flash, BFI_FLASH_H2I_WRITE_REQ);
4590 bfa_trc(flash, type);
4591 bfa_trc(flash, instance);
4592 bfa_trc(flash, len);
4593 bfa_trc(flash, offset);
4595 if (!bfa_ioc_is_operational(flash->ioc))
4596 return BFA_STATUS_IOC_NON_OP;
4599 * 'len' must be in word (4-byte) boundary
4600 * 'offset' must be in sector (16kb) boundary
4602 if (!len || (len & 0x03) || (offset & 0x00003FFF))
4603 return BFA_STATUS_FLASH_BAD_LEN;
4605 if (type == BFA_FLASH_PART_MFG)
4606 return BFA_STATUS_EINVAL;
4608 if (flash->op_busy) {
4609 bfa_trc(flash, flash->op_busy);
4610 return BFA_STATUS_DEVBUSY;
4613 flash->op_busy = 1;
4614 flash->cbfn = cbfn;
4615 flash->cbarg = cbarg;
4616 flash->type = type;
4617 flash->instance = instance;
4618 flash->residue = len;
4619 flash->offset = 0;
4620 flash->addr_off = offset;
4621 flash->ubuf = buf;
4623 bfa_flash_write_send(flash);
4624 return BFA_STATUS_OK;
4628 * Read flash partition.
4630 * @param[in] flash - flash structure
4631 * @param[in] type - flash partition type
4632 * @param[in] instance - flash partition instance
4633 * @param[in] buf - read data buffer
4634 * @param[in] len - data buffer length
4635 * @param[in] offset - offset relative to the partition starting address
4636 * @param[in] cbfn - callback function
4637 * @param[in] cbarg - callback argument
4639 * Return status.
4641 bfa_status_t
4642 bfa_flash_read_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4643 u8 instance, void *buf, u32 len, u32 offset,
4644 bfa_cb_flash_t cbfn, void *cbarg)
4646 bfa_trc(flash, BFI_FLASH_H2I_READ_REQ);
4647 bfa_trc(flash, type);
4648 bfa_trc(flash, instance);
4649 bfa_trc(flash, len);
4650 bfa_trc(flash, offset);
4652 if (!bfa_ioc_is_operational(flash->ioc))
4653 return BFA_STATUS_IOC_NON_OP;
4656 * 'len' must be in word (4-byte) boundary
4657 * 'offset' must be in sector (16kb) boundary
4659 if (!len || (len & 0x03) || (offset & 0x00003FFF))
4660 return BFA_STATUS_FLASH_BAD_LEN;
4662 if (flash->op_busy) {
4663 bfa_trc(flash, flash->op_busy);
4664 return BFA_STATUS_DEVBUSY;
4667 flash->op_busy = 1;
4668 flash->cbfn = cbfn;
4669 flash->cbarg = cbarg;
4670 flash->type = type;
4671 flash->instance = instance;
4672 flash->residue = len;
4673 flash->offset = 0;
4674 flash->addr_off = offset;
4675 flash->ubuf = buf;
4676 bfa_flash_read_send(flash);
4678 return BFA_STATUS_OK;
4682 * DIAG module specific
4685 #define BFA_DIAG_MEMTEST_TOV 50000 /* memtest timeout in msec */
4686 #define CT2_BFA_DIAG_MEMTEST_TOV (9*30*1000) /* 4.5 min */
4688 /* IOC event handler */
4689 static void
4690 bfa_diag_notify(void *diag_arg, enum bfa_ioc_event_e event)
4692 struct bfa_diag_s *diag = diag_arg;
4694 bfa_trc(diag, event);
4695 bfa_trc(diag, diag->block);
4696 bfa_trc(diag, diag->fwping.lock);
4697 bfa_trc(diag, diag->tsensor.lock);
4699 switch (event) {
4700 case BFA_IOC_E_DISABLED:
4701 case BFA_IOC_E_FAILED:
4702 if (diag->fwping.lock) {
4703 diag->fwping.status = BFA_STATUS_IOC_FAILURE;
4704 diag->fwping.cbfn(diag->fwping.cbarg,
4705 diag->fwping.status);
4706 diag->fwping.lock = 0;
4709 if (diag->tsensor.lock) {
4710 diag->tsensor.status = BFA_STATUS_IOC_FAILURE;
4711 diag->tsensor.cbfn(diag->tsensor.cbarg,
4712 diag->tsensor.status);
4713 diag->tsensor.lock = 0;
4716 if (diag->block) {
4717 if (diag->timer_active) {
4718 bfa_timer_stop(&diag->timer);
4719 diag->timer_active = 0;
4722 diag->status = BFA_STATUS_IOC_FAILURE;
4723 diag->cbfn(diag->cbarg, diag->status);
4724 diag->block = 0;
4726 break;
4728 default:
4729 break;
4733 static void
4734 bfa_diag_memtest_done(void *cbarg)
4736 struct bfa_diag_s *diag = cbarg;
4737 struct bfa_ioc_s *ioc = diag->ioc;
4738 struct bfa_diag_memtest_result *res = diag->result;
4739 u32 loff = BFI_BOOT_MEMTEST_RES_ADDR;
4740 u32 pgnum, i;
4742 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
4743 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
4745 for (i = 0; i < (sizeof(struct bfa_diag_memtest_result) /
4746 sizeof(u32)); i++) {
4747 /* read test result from smem */
4748 *((u32 *) res + i) =
4749 bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
4750 loff += sizeof(u32);
4753 /* Reset IOC fwstates to BFI_IOC_UNINIT */
4754 bfa_ioc_reset_fwstate(ioc);
4756 res->status = swab32(res->status);
4757 bfa_trc(diag, res->status);
4759 if (res->status == BFI_BOOT_MEMTEST_RES_SIG)
4760 diag->status = BFA_STATUS_OK;
4761 else {
4762 diag->status = BFA_STATUS_MEMTEST_FAILED;
4763 res->addr = swab32(res->addr);
4764 res->exp = swab32(res->exp);
4765 res->act = swab32(res->act);
4766 res->err_status = swab32(res->err_status);
4767 res->err_status1 = swab32(res->err_status1);
4768 res->err_addr = swab32(res->err_addr);
4769 bfa_trc(diag, res->addr);
4770 bfa_trc(diag, res->exp);
4771 bfa_trc(diag, res->act);
4772 bfa_trc(diag, res->err_status);
4773 bfa_trc(diag, res->err_status1);
4774 bfa_trc(diag, res->err_addr);
4776 diag->timer_active = 0;
4777 diag->cbfn(diag->cbarg, diag->status);
4778 diag->block = 0;
4782 * Firmware ping
4786 * Perform DMA test directly
4788 static void
4789 diag_fwping_send(struct bfa_diag_s *diag)
4791 struct bfi_diag_fwping_req_s *fwping_req;
4792 u32 i;
4794 bfa_trc(diag, diag->fwping.dbuf_pa);
4796 /* fill DMA area with pattern */
4797 for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++)
4798 *((u32 *)diag->fwping.dbuf_kva + i) = diag->fwping.data;
4800 /* Fill mbox msg */
4801 fwping_req = (struct bfi_diag_fwping_req_s *)diag->fwping.mbcmd.msg;
4803 /* Setup SG list */
4804 bfa_alen_set(&fwping_req->alen, BFI_DIAG_DMA_BUF_SZ,
4805 diag->fwping.dbuf_pa);
4806 /* Set up dma count */
4807 fwping_req->count = cpu_to_be32(diag->fwping.count);
4808 /* Set up data pattern */
4809 fwping_req->data = diag->fwping.data;
4811 /* build host command */
4812 bfi_h2i_set(fwping_req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_FWPING,
4813 bfa_ioc_portid(diag->ioc));
4815 /* send mbox cmd */
4816 bfa_ioc_mbox_queue(diag->ioc, &diag->fwping.mbcmd);
4819 static void
4820 diag_fwping_comp(struct bfa_diag_s *diag,
4821 struct bfi_diag_fwping_rsp_s *diag_rsp)
4823 u32 rsp_data = diag_rsp->data;
4824 u8 rsp_dma_status = diag_rsp->dma_status;
4826 bfa_trc(diag, rsp_data);
4827 bfa_trc(diag, rsp_dma_status);
4829 if (rsp_dma_status == BFA_STATUS_OK) {
4830 u32 i, pat;
4831 pat = (diag->fwping.count & 0x1) ? ~(diag->fwping.data) :
4832 diag->fwping.data;
4833 /* Check mbox data */
4834 if (diag->fwping.data != rsp_data) {
4835 bfa_trc(diag, rsp_data);
4836 diag->fwping.result->dmastatus =
4837 BFA_STATUS_DATACORRUPTED;
4838 diag->fwping.status = BFA_STATUS_DATACORRUPTED;
4839 diag->fwping.cbfn(diag->fwping.cbarg,
4840 diag->fwping.status);
4841 diag->fwping.lock = 0;
4842 return;
4844 /* Check dma pattern */
4845 for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++) {
4846 if (*((u32 *)diag->fwping.dbuf_kva + i) != pat) {
4847 bfa_trc(diag, i);
4848 bfa_trc(diag, pat);
4849 bfa_trc(diag,
4850 *((u32 *)diag->fwping.dbuf_kva + i));
4851 diag->fwping.result->dmastatus =
4852 BFA_STATUS_DATACORRUPTED;
4853 diag->fwping.status = BFA_STATUS_DATACORRUPTED;
4854 diag->fwping.cbfn(diag->fwping.cbarg,
4855 diag->fwping.status);
4856 diag->fwping.lock = 0;
4857 return;
4860 diag->fwping.result->dmastatus = BFA_STATUS_OK;
4861 diag->fwping.status = BFA_STATUS_OK;
4862 diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status);
4863 diag->fwping.lock = 0;
4864 } else {
4865 diag->fwping.status = BFA_STATUS_HDMA_FAILED;
4866 diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status);
4867 diag->fwping.lock = 0;
4872 * Temperature Sensor
4875 static void
4876 diag_tempsensor_send(struct bfa_diag_s *diag)
4878 struct bfi_diag_ts_req_s *msg;
4880 msg = (struct bfi_diag_ts_req_s *)diag->tsensor.mbcmd.msg;
4881 bfa_trc(diag, msg->temp);
4882 /* build host command */
4883 bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_TEMPSENSOR,
4884 bfa_ioc_portid(diag->ioc));
4885 /* send mbox cmd */
4886 bfa_ioc_mbox_queue(diag->ioc, &diag->tsensor.mbcmd);
4889 static void
4890 diag_tempsensor_comp(struct bfa_diag_s *diag, bfi_diag_ts_rsp_t *rsp)
4892 if (!diag->tsensor.lock) {
4893 /* receiving response after ioc failure */
4894 bfa_trc(diag, diag->tsensor.lock);
4895 return;
4899 * ASIC junction tempsensor is a reg read operation
4900 * it will always return OK
4902 diag->tsensor.temp->temp = be16_to_cpu(rsp->temp);
4903 diag->tsensor.temp->ts_junc = rsp->ts_junc;
4904 diag->tsensor.temp->ts_brd = rsp->ts_brd;
4906 if (rsp->ts_brd) {
4907 /* tsensor.temp->status is brd_temp status */
4908 diag->tsensor.temp->status = rsp->status;
4909 if (rsp->status == BFA_STATUS_OK) {
4910 diag->tsensor.temp->brd_temp =
4911 be16_to_cpu(rsp->brd_temp);
4912 } else
4913 diag->tsensor.temp->brd_temp = 0;
4916 bfa_trc(diag, rsp->status);
4917 bfa_trc(diag, rsp->ts_junc);
4918 bfa_trc(diag, rsp->temp);
4919 bfa_trc(diag, rsp->ts_brd);
4920 bfa_trc(diag, rsp->brd_temp);
4922 /* tsensor status is always good bcos we always have junction temp */
4923 diag->tsensor.status = BFA_STATUS_OK;
4924 diag->tsensor.cbfn(diag->tsensor.cbarg, diag->tsensor.status);
4925 diag->tsensor.lock = 0;
4929 * LED Test command
4931 static void
4932 diag_ledtest_send(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest)
4934 struct bfi_diag_ledtest_req_s *msg;
4936 msg = (struct bfi_diag_ledtest_req_s *)diag->ledtest.mbcmd.msg;
4937 /* build host command */
4938 bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_LEDTEST,
4939 bfa_ioc_portid(diag->ioc));
4942 * convert the freq from N blinks per 10 sec to
4943 * crossbow ontime value. We do it here because division is need
4945 if (ledtest->freq)
4946 ledtest->freq = 500 / ledtest->freq;
4948 if (ledtest->freq == 0)
4949 ledtest->freq = 1;
4951 bfa_trc(diag, ledtest->freq);
4952 /* mcpy(&ledtest_req->req, ledtest, sizeof(bfa_diag_ledtest_t)); */
4953 msg->cmd = (u8) ledtest->cmd;
4954 msg->color = (u8) ledtest->color;
4955 msg->portid = bfa_ioc_portid(diag->ioc);
4956 msg->led = ledtest->led;
4957 msg->freq = cpu_to_be16(ledtest->freq);
4959 /* send mbox cmd */
4960 bfa_ioc_mbox_queue(diag->ioc, &diag->ledtest.mbcmd);
4963 static void
4964 diag_ledtest_comp(struct bfa_diag_s *diag, struct bfi_diag_ledtest_rsp_s *msg)
4966 bfa_trc(diag, diag->ledtest.lock);
4967 diag->ledtest.lock = BFA_FALSE;
4968 /* no bfa_cb_queue is needed because driver is not waiting */
4972 * Port beaconing
4974 static void
4975 diag_portbeacon_send(struct bfa_diag_s *diag, bfa_boolean_t beacon, u32 sec)
4977 struct bfi_diag_portbeacon_req_s *msg;
4979 msg = (struct bfi_diag_portbeacon_req_s *)diag->beacon.mbcmd.msg;
4980 /* build host command */
4981 bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_PORTBEACON,
4982 bfa_ioc_portid(diag->ioc));
4983 msg->beacon = beacon;
4984 msg->period = cpu_to_be32(sec);
4985 /* send mbox cmd */
4986 bfa_ioc_mbox_queue(diag->ioc, &diag->beacon.mbcmd);
4989 static void
4990 diag_portbeacon_comp(struct bfa_diag_s *diag)
4992 bfa_trc(diag, diag->beacon.state);
4993 diag->beacon.state = BFA_FALSE;
4994 if (diag->cbfn_beacon)
4995 diag->cbfn_beacon(diag->dev, BFA_FALSE, diag->beacon.link_e2e);
4999 * Diag hmbox handler
5001 static void
5002 bfa_diag_intr(void *diagarg, struct bfi_mbmsg_s *msg)
5004 struct bfa_diag_s *diag = diagarg;
5006 switch (msg->mh.msg_id) {
5007 case BFI_DIAG_I2H_PORTBEACON:
5008 diag_portbeacon_comp(diag);
5009 break;
5010 case BFI_DIAG_I2H_FWPING:
5011 diag_fwping_comp(diag, (struct bfi_diag_fwping_rsp_s *) msg);
5012 break;
5013 case BFI_DIAG_I2H_TEMPSENSOR:
5014 diag_tempsensor_comp(diag, (bfi_diag_ts_rsp_t *) msg);
5015 break;
5016 case BFI_DIAG_I2H_LEDTEST:
5017 diag_ledtest_comp(diag, (struct bfi_diag_ledtest_rsp_s *) msg);
5018 break;
5019 default:
5020 bfa_trc(diag, msg->mh.msg_id);
5021 WARN_ON(1);
5026 * Gen RAM Test
5028 * @param[in] *diag - diag data struct
5029 * @param[in] *memtest - mem test params input from upper layer,
5030 * @param[in] pattern - mem test pattern
5031 * @param[in] *result - mem test result
5032 * @param[in] cbfn - mem test callback functioin
5033 * @param[in] cbarg - callback functioin arg
5035 * @param[out]
5037 bfa_status_t
5038 bfa_diag_memtest(struct bfa_diag_s *diag, struct bfa_diag_memtest_s *memtest,
5039 u32 pattern, struct bfa_diag_memtest_result *result,
5040 bfa_cb_diag_t cbfn, void *cbarg)
5042 u32 memtest_tov;
5044 bfa_trc(diag, pattern);
5046 if (!bfa_ioc_adapter_is_disabled(diag->ioc))
5047 return BFA_STATUS_ADAPTER_ENABLED;
5049 /* check to see if there is another destructive diag cmd running */
5050 if (diag->block) {
5051 bfa_trc(diag, diag->block);
5052 return BFA_STATUS_DEVBUSY;
5053 } else
5054 diag->block = 1;
5056 diag->result = result;
5057 diag->cbfn = cbfn;
5058 diag->cbarg = cbarg;
5060 /* download memtest code and take LPU0 out of reset */
5061 bfa_ioc_boot(diag->ioc, BFI_FWBOOT_TYPE_MEMTEST, BFI_FWBOOT_ENV_OS);
5063 memtest_tov = (bfa_ioc_asic_gen(diag->ioc) == BFI_ASIC_GEN_CT2) ?
5064 CT2_BFA_DIAG_MEMTEST_TOV : BFA_DIAG_MEMTEST_TOV;
5065 bfa_timer_begin(diag->ioc->timer_mod, &diag->timer,
5066 bfa_diag_memtest_done, diag, memtest_tov);
5067 diag->timer_active = 1;
5068 return BFA_STATUS_OK;
5072 * DIAG firmware ping command
5074 * @param[in] *diag - diag data struct
5075 * @param[in] cnt - dma loop count for testing PCIE
5076 * @param[in] data - data pattern to pass in fw
5077 * @param[in] *result - pt to bfa_diag_fwping_result_t data struct
5078 * @param[in] cbfn - callback function
5079 * @param[in] *cbarg - callback functioin arg
5081 * @param[out]
5083 bfa_status_t
5084 bfa_diag_fwping(struct bfa_diag_s *diag, u32 cnt, u32 data,
5085 struct bfa_diag_results_fwping *result, bfa_cb_diag_t cbfn,
5086 void *cbarg)
5088 bfa_trc(diag, cnt);
5089 bfa_trc(diag, data);
5091 if (!bfa_ioc_is_operational(diag->ioc))
5092 return BFA_STATUS_IOC_NON_OP;
5094 if (bfa_asic_id_ct2(bfa_ioc_devid((diag->ioc))) &&
5095 ((diag->ioc)->clscode == BFI_PCIFN_CLASS_ETH))
5096 return BFA_STATUS_CMD_NOTSUPP;
5098 /* check to see if there is another destructive diag cmd running */
5099 if (diag->block || diag->fwping.lock) {
5100 bfa_trc(diag, diag->block);
5101 bfa_trc(diag, diag->fwping.lock);
5102 return BFA_STATUS_DEVBUSY;
5105 /* Initialization */
5106 diag->fwping.lock = 1;
5107 diag->fwping.cbfn = cbfn;
5108 diag->fwping.cbarg = cbarg;
5109 diag->fwping.result = result;
5110 diag->fwping.data = data;
5111 diag->fwping.count = cnt;
5113 /* Init test results */
5114 diag->fwping.result->data = 0;
5115 diag->fwping.result->status = BFA_STATUS_OK;
5117 /* kick off the first ping */
5118 diag_fwping_send(diag);
5119 return BFA_STATUS_OK;
5123 * Read Temperature Sensor
5125 * @param[in] *diag - diag data struct
5126 * @param[in] *result - pt to bfa_diag_temp_t data struct
5127 * @param[in] cbfn - callback function
5128 * @param[in] *cbarg - callback functioin arg
5130 * @param[out]
5132 bfa_status_t
5133 bfa_diag_tsensor_query(struct bfa_diag_s *diag,
5134 struct bfa_diag_results_tempsensor_s *result,
5135 bfa_cb_diag_t cbfn, void *cbarg)
5137 /* check to see if there is a destructive diag cmd running */
5138 if (diag->block || diag->tsensor.lock) {
5139 bfa_trc(diag, diag->block);
5140 bfa_trc(diag, diag->tsensor.lock);
5141 return BFA_STATUS_DEVBUSY;
5144 if (!bfa_ioc_is_operational(diag->ioc))
5145 return BFA_STATUS_IOC_NON_OP;
5147 /* Init diag mod params */
5148 diag->tsensor.lock = 1;
5149 diag->tsensor.temp = result;
5150 diag->tsensor.cbfn = cbfn;
5151 diag->tsensor.cbarg = cbarg;
5152 diag->tsensor.status = BFA_STATUS_OK;
5154 /* Send msg to fw */
5155 diag_tempsensor_send(diag);
5157 return BFA_STATUS_OK;
5161 * LED Test command
5163 * @param[in] *diag - diag data struct
5164 * @param[in] *ledtest - pt to ledtest data structure
5166 * @param[out]
5168 bfa_status_t
5169 bfa_diag_ledtest(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest)
5171 bfa_trc(diag, ledtest->cmd);
5173 if (!bfa_ioc_is_operational(diag->ioc))
5174 return BFA_STATUS_IOC_NON_OP;
5176 if (diag->beacon.state)
5177 return BFA_STATUS_BEACON_ON;
5179 if (diag->ledtest.lock)
5180 return BFA_STATUS_LEDTEST_OP;
5182 /* Send msg to fw */
5183 diag->ledtest.lock = BFA_TRUE;
5184 diag_ledtest_send(diag, ledtest);
5186 return BFA_STATUS_OK;
5190 * Port beaconing command
5192 * @param[in] *diag - diag data struct
5193 * @param[in] beacon - port beaconing 1:ON 0:OFF
5194 * @param[in] link_e2e_beacon - link beaconing 1:ON 0:OFF
5195 * @param[in] sec - beaconing duration in seconds
5197 * @param[out]
5199 bfa_status_t
5200 bfa_diag_beacon_port(struct bfa_diag_s *diag, bfa_boolean_t beacon,
5201 bfa_boolean_t link_e2e_beacon, uint32_t sec)
5203 bfa_trc(diag, beacon);
5204 bfa_trc(diag, link_e2e_beacon);
5205 bfa_trc(diag, sec);
5207 if (!bfa_ioc_is_operational(diag->ioc))
5208 return BFA_STATUS_IOC_NON_OP;
5210 if (diag->ledtest.lock)
5211 return BFA_STATUS_LEDTEST_OP;
5213 if (diag->beacon.state && beacon) /* beacon alread on */
5214 return BFA_STATUS_BEACON_ON;
5216 diag->beacon.state = beacon;
5217 diag->beacon.link_e2e = link_e2e_beacon;
5218 if (diag->cbfn_beacon)
5219 diag->cbfn_beacon(diag->dev, beacon, link_e2e_beacon);
5221 /* Send msg to fw */
5222 diag_portbeacon_send(diag, beacon, sec);
5224 return BFA_STATUS_OK;
5228 * Return DMA memory needed by diag module.
5231 bfa_diag_meminfo(void)
5233 return BFA_ROUNDUP(BFI_DIAG_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5237 * Attach virtual and physical memory for Diag.
5239 void
5240 bfa_diag_attach(struct bfa_diag_s *diag, struct bfa_ioc_s *ioc, void *dev,
5241 bfa_cb_diag_beacon_t cbfn_beacon, struct bfa_trc_mod_s *trcmod)
5243 diag->dev = dev;
5244 diag->ioc = ioc;
5245 diag->trcmod = trcmod;
5247 diag->block = 0;
5248 diag->cbfn = NULL;
5249 diag->cbarg = NULL;
5250 diag->result = NULL;
5251 diag->cbfn_beacon = cbfn_beacon;
5253 bfa_ioc_mbox_regisr(diag->ioc, BFI_MC_DIAG, bfa_diag_intr, diag);
5254 bfa_q_qe_init(&diag->ioc_notify);
5255 bfa_ioc_notify_init(&diag->ioc_notify, bfa_diag_notify, diag);
5256 list_add_tail(&diag->ioc_notify.qe, &diag->ioc->notify_q);
5259 void
5260 bfa_diag_memclaim(struct bfa_diag_s *diag, u8 *dm_kva, u64 dm_pa)
5262 diag->fwping.dbuf_kva = dm_kva;
5263 diag->fwping.dbuf_pa = dm_pa;
5264 memset(diag->fwping.dbuf_kva, 0, BFI_DIAG_DMA_BUF_SZ);
5268 * PHY module specific
5270 #define BFA_PHY_DMA_BUF_SZ 0x02000 /* 8k dma buffer */
5271 #define BFA_PHY_LOCK_STATUS 0x018878 /* phy semaphore status reg */
5273 static void
5274 bfa_phy_ntoh32(u32 *obuf, u32 *ibuf, int sz)
5276 int i, m = sz >> 2;
5278 for (i = 0; i < m; i++)
5279 obuf[i] = be32_to_cpu(ibuf[i]);
5282 static bfa_boolean_t
5283 bfa_phy_present(struct bfa_phy_s *phy)
5285 return (phy->ioc->attr->card_type == BFA_MFG_TYPE_LIGHTNING);
5288 static void
5289 bfa_phy_notify(void *cbarg, enum bfa_ioc_event_e event)
5291 struct bfa_phy_s *phy = cbarg;
5293 bfa_trc(phy, event);
5295 switch (event) {
5296 case BFA_IOC_E_DISABLED:
5297 case BFA_IOC_E_FAILED:
5298 if (phy->op_busy) {
5299 phy->status = BFA_STATUS_IOC_FAILURE;
5300 phy->cbfn(phy->cbarg, phy->status);
5301 phy->op_busy = 0;
5303 break;
5305 default:
5306 break;
5311 * Send phy attribute query request.
5313 * @param[in] cbarg - callback argument
5315 static void
5316 bfa_phy_query_send(void *cbarg)
5318 struct bfa_phy_s *phy = cbarg;
5319 struct bfi_phy_query_req_s *msg =
5320 (struct bfi_phy_query_req_s *) phy->mb.msg;
5322 msg->instance = phy->instance;
5323 bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_QUERY_REQ,
5324 bfa_ioc_portid(phy->ioc));
5325 bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_attr_s), phy->dbuf_pa);
5326 bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5330 * Send phy write request.
5332 * @param[in] cbarg - callback argument
5334 static void
5335 bfa_phy_write_send(void *cbarg)
5337 struct bfa_phy_s *phy = cbarg;
5338 struct bfi_phy_write_req_s *msg =
5339 (struct bfi_phy_write_req_s *) phy->mb.msg;
5340 u32 len;
5341 u16 *buf, *dbuf;
5342 int i, sz;
5344 msg->instance = phy->instance;
5345 msg->offset = cpu_to_be32(phy->addr_off + phy->offset);
5346 len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ?
5347 phy->residue : BFA_PHY_DMA_BUF_SZ;
5348 msg->length = cpu_to_be32(len);
5350 /* indicate if it's the last msg of the whole write operation */
5351 msg->last = (len == phy->residue) ? 1 : 0;
5353 bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_WRITE_REQ,
5354 bfa_ioc_portid(phy->ioc));
5355 bfa_alen_set(&msg->alen, len, phy->dbuf_pa);
5357 buf = (u16 *) (phy->ubuf + phy->offset);
5358 dbuf = (u16 *)phy->dbuf_kva;
5359 sz = len >> 1;
5360 for (i = 0; i < sz; i++)
5361 buf[i] = cpu_to_be16(dbuf[i]);
5363 bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5365 phy->residue -= len;
5366 phy->offset += len;
5370 * Send phy read request.
5372 * @param[in] cbarg - callback argument
5374 static void
5375 bfa_phy_read_send(void *cbarg)
5377 struct bfa_phy_s *phy = cbarg;
5378 struct bfi_phy_read_req_s *msg =
5379 (struct bfi_phy_read_req_s *) phy->mb.msg;
5380 u32 len;
5382 msg->instance = phy->instance;
5383 msg->offset = cpu_to_be32(phy->addr_off + phy->offset);
5384 len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ?
5385 phy->residue : BFA_PHY_DMA_BUF_SZ;
5386 msg->length = cpu_to_be32(len);
5387 bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_READ_REQ,
5388 bfa_ioc_portid(phy->ioc));
5389 bfa_alen_set(&msg->alen, len, phy->dbuf_pa);
5390 bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5394 * Send phy stats request.
5396 * @param[in] cbarg - callback argument
5398 static void
5399 bfa_phy_stats_send(void *cbarg)
5401 struct bfa_phy_s *phy = cbarg;
5402 struct bfi_phy_stats_req_s *msg =
5403 (struct bfi_phy_stats_req_s *) phy->mb.msg;
5405 msg->instance = phy->instance;
5406 bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_STATS_REQ,
5407 bfa_ioc_portid(phy->ioc));
5408 bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_stats_s), phy->dbuf_pa);
5409 bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5413 * Flash memory info API.
5415 * @param[in] mincfg - minimal cfg variable
5418 bfa_phy_meminfo(bfa_boolean_t mincfg)
5420 /* min driver doesn't need phy */
5421 if (mincfg)
5422 return 0;
5424 return BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5428 * Flash attach API.
5430 * @param[in] phy - phy structure
5431 * @param[in] ioc - ioc structure
5432 * @param[in] dev - device structure
5433 * @param[in] trcmod - trace module
5434 * @param[in] logmod - log module
5436 void
5437 bfa_phy_attach(struct bfa_phy_s *phy, struct bfa_ioc_s *ioc, void *dev,
5438 struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
5440 phy->ioc = ioc;
5441 phy->trcmod = trcmod;
5442 phy->cbfn = NULL;
5443 phy->cbarg = NULL;
5444 phy->op_busy = 0;
5446 bfa_ioc_mbox_regisr(phy->ioc, BFI_MC_PHY, bfa_phy_intr, phy);
5447 bfa_q_qe_init(&phy->ioc_notify);
5448 bfa_ioc_notify_init(&phy->ioc_notify, bfa_phy_notify, phy);
5449 list_add_tail(&phy->ioc_notify.qe, &phy->ioc->notify_q);
5451 /* min driver doesn't need phy */
5452 if (mincfg) {
5453 phy->dbuf_kva = NULL;
5454 phy->dbuf_pa = 0;
5459 * Claim memory for phy
5461 * @param[in] phy - phy structure
5462 * @param[in] dm_kva - pointer to virtual memory address
5463 * @param[in] dm_pa - physical memory address
5464 * @param[in] mincfg - minimal cfg variable
5466 void
5467 bfa_phy_memclaim(struct bfa_phy_s *phy, u8 *dm_kva, u64 dm_pa,
5468 bfa_boolean_t mincfg)
5470 if (mincfg)
5471 return;
5473 phy->dbuf_kva = dm_kva;
5474 phy->dbuf_pa = dm_pa;
5475 memset(phy->dbuf_kva, 0, BFA_PHY_DMA_BUF_SZ);
5476 dm_kva += BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5477 dm_pa += BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5480 bfa_boolean_t
5481 bfa_phy_busy(struct bfa_ioc_s *ioc)
5483 void __iomem *rb;
5485 rb = bfa_ioc_bar0(ioc);
5486 return readl(rb + BFA_PHY_LOCK_STATUS);
5490 * Get phy attribute.
5492 * @param[in] phy - phy structure
5493 * @param[in] attr - phy attribute structure
5494 * @param[in] cbfn - callback function
5495 * @param[in] cbarg - callback argument
5497 * Return status.
5499 bfa_status_t
5500 bfa_phy_get_attr(struct bfa_phy_s *phy, u8 instance,
5501 struct bfa_phy_attr_s *attr, bfa_cb_phy_t cbfn, void *cbarg)
5503 bfa_trc(phy, BFI_PHY_H2I_QUERY_REQ);
5504 bfa_trc(phy, instance);
5506 if (!bfa_phy_present(phy))
5507 return BFA_STATUS_PHY_NOT_PRESENT;
5509 if (!bfa_ioc_is_operational(phy->ioc))
5510 return BFA_STATUS_IOC_NON_OP;
5512 if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5513 bfa_trc(phy, phy->op_busy);
5514 return BFA_STATUS_DEVBUSY;
5517 phy->op_busy = 1;
5518 phy->cbfn = cbfn;
5519 phy->cbarg = cbarg;
5520 phy->instance = instance;
5521 phy->ubuf = (uint8_t *) attr;
5522 bfa_phy_query_send(phy);
5524 return BFA_STATUS_OK;
5528 * Get phy stats.
5530 * @param[in] phy - phy structure
5531 * @param[in] instance - phy image instance
5532 * @param[in] stats - pointer to phy stats
5533 * @param[in] cbfn - callback function
5534 * @param[in] cbarg - callback argument
5536 * Return status.
5538 bfa_status_t
5539 bfa_phy_get_stats(struct bfa_phy_s *phy, u8 instance,
5540 struct bfa_phy_stats_s *stats,
5541 bfa_cb_phy_t cbfn, void *cbarg)
5543 bfa_trc(phy, BFI_PHY_H2I_STATS_REQ);
5544 bfa_trc(phy, instance);
5546 if (!bfa_phy_present(phy))
5547 return BFA_STATUS_PHY_NOT_PRESENT;
5549 if (!bfa_ioc_is_operational(phy->ioc))
5550 return BFA_STATUS_IOC_NON_OP;
5552 if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5553 bfa_trc(phy, phy->op_busy);
5554 return BFA_STATUS_DEVBUSY;
5557 phy->op_busy = 1;
5558 phy->cbfn = cbfn;
5559 phy->cbarg = cbarg;
5560 phy->instance = instance;
5561 phy->ubuf = (u8 *) stats;
5562 bfa_phy_stats_send(phy);
5564 return BFA_STATUS_OK;
5568 * Update phy image.
5570 * @param[in] phy - phy structure
5571 * @param[in] instance - phy image instance
5572 * @param[in] buf - update data buffer
5573 * @param[in] len - data buffer length
5574 * @param[in] offset - offset relative to starting address
5575 * @param[in] cbfn - callback function
5576 * @param[in] cbarg - callback argument
5578 * Return status.
5580 bfa_status_t
5581 bfa_phy_update(struct bfa_phy_s *phy, u8 instance,
5582 void *buf, u32 len, u32 offset,
5583 bfa_cb_phy_t cbfn, void *cbarg)
5585 bfa_trc(phy, BFI_PHY_H2I_WRITE_REQ);
5586 bfa_trc(phy, instance);
5587 bfa_trc(phy, len);
5588 bfa_trc(phy, offset);
5590 if (!bfa_phy_present(phy))
5591 return BFA_STATUS_PHY_NOT_PRESENT;
5593 if (!bfa_ioc_is_operational(phy->ioc))
5594 return BFA_STATUS_IOC_NON_OP;
5596 /* 'len' must be in word (4-byte) boundary */
5597 if (!len || (len & 0x03))
5598 return BFA_STATUS_FAILED;
5600 if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5601 bfa_trc(phy, phy->op_busy);
5602 return BFA_STATUS_DEVBUSY;
5605 phy->op_busy = 1;
5606 phy->cbfn = cbfn;
5607 phy->cbarg = cbarg;
5608 phy->instance = instance;
5609 phy->residue = len;
5610 phy->offset = 0;
5611 phy->addr_off = offset;
5612 phy->ubuf = buf;
5614 bfa_phy_write_send(phy);
5615 return BFA_STATUS_OK;
5619 * Read phy image.
5621 * @param[in] phy - phy structure
5622 * @param[in] instance - phy image instance
5623 * @param[in] buf - read data buffer
5624 * @param[in] len - data buffer length
5625 * @param[in] offset - offset relative to starting address
5626 * @param[in] cbfn - callback function
5627 * @param[in] cbarg - callback argument
5629 * Return status.
5631 bfa_status_t
5632 bfa_phy_read(struct bfa_phy_s *phy, u8 instance,
5633 void *buf, u32 len, u32 offset,
5634 bfa_cb_phy_t cbfn, void *cbarg)
5636 bfa_trc(phy, BFI_PHY_H2I_READ_REQ);
5637 bfa_trc(phy, instance);
5638 bfa_trc(phy, len);
5639 bfa_trc(phy, offset);
5641 if (!bfa_phy_present(phy))
5642 return BFA_STATUS_PHY_NOT_PRESENT;
5644 if (!bfa_ioc_is_operational(phy->ioc))
5645 return BFA_STATUS_IOC_NON_OP;
5647 /* 'len' must be in word (4-byte) boundary */
5648 if (!len || (len & 0x03))
5649 return BFA_STATUS_FAILED;
5651 if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5652 bfa_trc(phy, phy->op_busy);
5653 return BFA_STATUS_DEVBUSY;
5656 phy->op_busy = 1;
5657 phy->cbfn = cbfn;
5658 phy->cbarg = cbarg;
5659 phy->instance = instance;
5660 phy->residue = len;
5661 phy->offset = 0;
5662 phy->addr_off = offset;
5663 phy->ubuf = buf;
5664 bfa_phy_read_send(phy);
5666 return BFA_STATUS_OK;
5670 * Process phy response messages upon receiving interrupts.
5672 * @param[in] phyarg - phy structure
5673 * @param[in] msg - message structure
5675 void
5676 bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg)
5678 struct bfa_phy_s *phy = phyarg;
5679 u32 status;
5681 union {
5682 struct bfi_phy_query_rsp_s *query;
5683 struct bfi_phy_stats_rsp_s *stats;
5684 struct bfi_phy_write_rsp_s *write;
5685 struct bfi_phy_read_rsp_s *read;
5686 struct bfi_mbmsg_s *msg;
5687 } m;
5689 m.msg = msg;
5690 bfa_trc(phy, msg->mh.msg_id);
5692 if (!phy->op_busy) {
5693 /* receiving response after ioc failure */
5694 bfa_trc(phy, 0x9999);
5695 return;
5698 switch (msg->mh.msg_id) {
5699 case BFI_PHY_I2H_QUERY_RSP:
5700 status = be32_to_cpu(m.query->status);
5701 bfa_trc(phy, status);
5703 if (status == BFA_STATUS_OK) {
5704 struct bfa_phy_attr_s *attr =
5705 (struct bfa_phy_attr_s *) phy->ubuf;
5706 bfa_phy_ntoh32((u32 *)attr, (u32 *)phy->dbuf_kva,
5707 sizeof(struct bfa_phy_attr_s));
5708 bfa_trc(phy, attr->status);
5709 bfa_trc(phy, attr->length);
5712 phy->status = status;
5713 phy->op_busy = 0;
5714 if (phy->cbfn)
5715 phy->cbfn(phy->cbarg, phy->status);
5716 break;
5717 case BFI_PHY_I2H_STATS_RSP:
5718 status = be32_to_cpu(m.stats->status);
5719 bfa_trc(phy, status);
5721 if (status == BFA_STATUS_OK) {
5722 struct bfa_phy_stats_s *stats =
5723 (struct bfa_phy_stats_s *) phy->ubuf;
5724 bfa_phy_ntoh32((u32 *)stats, (u32 *)phy->dbuf_kva,
5725 sizeof(struct bfa_phy_stats_s));
5726 bfa_trc(phy, stats->status);
5729 phy->status = status;
5730 phy->op_busy = 0;
5731 if (phy->cbfn)
5732 phy->cbfn(phy->cbarg, phy->status);
5733 break;
5734 case BFI_PHY_I2H_WRITE_RSP:
5735 status = be32_to_cpu(m.write->status);
5736 bfa_trc(phy, status);
5738 if (status != BFA_STATUS_OK || phy->residue == 0) {
5739 phy->status = status;
5740 phy->op_busy = 0;
5741 if (phy->cbfn)
5742 phy->cbfn(phy->cbarg, phy->status);
5743 } else {
5744 bfa_trc(phy, phy->offset);
5745 bfa_phy_write_send(phy);
5747 break;
5748 case BFI_PHY_I2H_READ_RSP:
5749 status = be32_to_cpu(m.read->status);
5750 bfa_trc(phy, status);
5752 if (status != BFA_STATUS_OK) {
5753 phy->status = status;
5754 phy->op_busy = 0;
5755 if (phy->cbfn)
5756 phy->cbfn(phy->cbarg, phy->status);
5757 } else {
5758 u32 len = be32_to_cpu(m.read->length);
5759 u16 *buf = (u16 *)(phy->ubuf + phy->offset);
5760 u16 *dbuf = (u16 *)phy->dbuf_kva;
5761 int i, sz = len >> 1;
5763 bfa_trc(phy, phy->offset);
5764 bfa_trc(phy, len);
5766 for (i = 0; i < sz; i++)
5767 buf[i] = be16_to_cpu(dbuf[i]);
5769 phy->residue -= len;
5770 phy->offset += len;
5772 if (phy->residue == 0) {
5773 phy->status = status;
5774 phy->op_busy = 0;
5775 if (phy->cbfn)
5776 phy->cbfn(phy->cbarg, phy->status);
5777 } else
5778 bfa_phy_read_send(phy);
5780 break;
5781 default:
5782 WARN_ON(1);
5786 /* forward declaration of DCONF state machine */
5787 static void bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf,
5788 enum bfa_dconf_event event);
5789 static void bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
5790 enum bfa_dconf_event event);
5791 static void bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf,
5792 enum bfa_dconf_event event);
5793 static void bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf,
5794 enum bfa_dconf_event event);
5795 static void bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf,
5796 enum bfa_dconf_event event);
5797 static void bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
5798 enum bfa_dconf_event event);
5799 static void bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
5800 enum bfa_dconf_event event);
5802 static void bfa_dconf_cbfn(void *dconf, bfa_status_t status);
5803 static void bfa_dconf_timer(void *cbarg);
5804 static bfa_status_t bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf);
5805 static void bfa_dconf_init_cb(void *arg, bfa_status_t status);
5808 * Beginning state of dconf module. Waiting for an event to start.
5810 static void
5811 bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5813 bfa_status_t bfa_status;
5814 bfa_trc(dconf->bfa, event);
5816 switch (event) {
5817 case BFA_DCONF_SM_INIT:
5818 if (dconf->min_cfg) {
5819 bfa_trc(dconf->bfa, dconf->min_cfg);
5820 bfa_fsm_send_event(&dconf->bfa->iocfc,
5821 IOCFC_E_DCONF_DONE);
5822 return;
5824 bfa_sm_set_state(dconf, bfa_dconf_sm_flash_read);
5825 bfa_timer_start(dconf->bfa, &dconf->timer,
5826 bfa_dconf_timer, dconf, 2 * BFA_DCONF_UPDATE_TOV);
5827 bfa_status = bfa_flash_read_part(BFA_FLASH(dconf->bfa),
5828 BFA_FLASH_PART_DRV, dconf->instance,
5829 dconf->dconf,
5830 sizeof(struct bfa_dconf_s), 0,
5831 bfa_dconf_init_cb, dconf->bfa);
5832 if (bfa_status != BFA_STATUS_OK) {
5833 bfa_timer_stop(&dconf->timer);
5834 bfa_dconf_init_cb(dconf->bfa, BFA_STATUS_FAILED);
5835 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5836 return;
5838 break;
5839 case BFA_DCONF_SM_EXIT:
5840 bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
5841 break;
5842 case BFA_DCONF_SM_IOCDISABLE:
5843 case BFA_DCONF_SM_WR:
5844 case BFA_DCONF_SM_FLASH_COMP:
5845 break;
5846 default:
5847 bfa_sm_fault(dconf->bfa, event);
5852 * Read flash for dconf entries and make a call back to the driver once done.
5854 static void
5855 bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
5856 enum bfa_dconf_event event)
5858 bfa_trc(dconf->bfa, event);
5860 switch (event) {
5861 case BFA_DCONF_SM_FLASH_COMP:
5862 bfa_timer_stop(&dconf->timer);
5863 bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
5864 break;
5865 case BFA_DCONF_SM_TIMEOUT:
5866 bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
5867 bfa_ioc_suspend(&dconf->bfa->ioc);
5868 break;
5869 case BFA_DCONF_SM_EXIT:
5870 bfa_timer_stop(&dconf->timer);
5871 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5872 bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
5873 break;
5874 case BFA_DCONF_SM_IOCDISABLE:
5875 bfa_timer_stop(&dconf->timer);
5876 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5877 break;
5878 default:
5879 bfa_sm_fault(dconf->bfa, event);
5884 * DCONF Module is in ready state. Has completed the initialization.
5886 static void
5887 bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5889 bfa_trc(dconf->bfa, event);
5891 switch (event) {
5892 case BFA_DCONF_SM_WR:
5893 bfa_timer_start(dconf->bfa, &dconf->timer,
5894 bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5895 bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
5896 break;
5897 case BFA_DCONF_SM_EXIT:
5898 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5899 bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
5900 break;
5901 case BFA_DCONF_SM_INIT:
5902 case BFA_DCONF_SM_IOCDISABLE:
5903 break;
5904 default:
5905 bfa_sm_fault(dconf->bfa, event);
5910 * entries are dirty, write back to the flash.
5913 static void
5914 bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5916 bfa_trc(dconf->bfa, event);
5918 switch (event) {
5919 case BFA_DCONF_SM_TIMEOUT:
5920 bfa_sm_set_state(dconf, bfa_dconf_sm_sync);
5921 bfa_dconf_flash_write(dconf);
5922 break;
5923 case BFA_DCONF_SM_WR:
5924 bfa_timer_stop(&dconf->timer);
5925 bfa_timer_start(dconf->bfa, &dconf->timer,
5926 bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5927 break;
5928 case BFA_DCONF_SM_EXIT:
5929 bfa_timer_stop(&dconf->timer);
5930 bfa_timer_start(dconf->bfa, &dconf->timer,
5931 bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5932 bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync);
5933 bfa_dconf_flash_write(dconf);
5934 break;
5935 case BFA_DCONF_SM_FLASH_COMP:
5936 break;
5937 case BFA_DCONF_SM_IOCDISABLE:
5938 bfa_timer_stop(&dconf->timer);
5939 bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty);
5940 break;
5941 default:
5942 bfa_sm_fault(dconf->bfa, event);
5947 * Sync the dconf entries to the flash.
5949 static void
5950 bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
5951 enum bfa_dconf_event event)
5953 bfa_trc(dconf->bfa, event);
5955 switch (event) {
5956 case BFA_DCONF_SM_IOCDISABLE:
5957 case BFA_DCONF_SM_FLASH_COMP:
5958 bfa_timer_stop(&dconf->timer);
5959 fallthrough;
5960 case BFA_DCONF_SM_TIMEOUT:
5961 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5962 bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
5963 break;
5964 default:
5965 bfa_sm_fault(dconf->bfa, event);
5969 static void
5970 bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5972 bfa_trc(dconf->bfa, event);
5974 switch (event) {
5975 case BFA_DCONF_SM_FLASH_COMP:
5976 bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
5977 break;
5978 case BFA_DCONF_SM_WR:
5979 bfa_timer_start(dconf->bfa, &dconf->timer,
5980 bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5981 bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
5982 break;
5983 case BFA_DCONF_SM_EXIT:
5984 bfa_timer_start(dconf->bfa, &dconf->timer,
5985 bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5986 bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync);
5987 break;
5988 case BFA_DCONF_SM_IOCDISABLE:
5989 bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty);
5990 break;
5991 default:
5992 bfa_sm_fault(dconf->bfa, event);
5996 static void
5997 bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
5998 enum bfa_dconf_event event)
6000 bfa_trc(dconf->bfa, event);
6002 switch (event) {
6003 case BFA_DCONF_SM_INIT:
6004 bfa_timer_start(dconf->bfa, &dconf->timer,
6005 bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
6006 bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
6007 break;
6008 case BFA_DCONF_SM_EXIT:
6009 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
6010 bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
6011 break;
6012 case BFA_DCONF_SM_IOCDISABLE:
6013 break;
6014 default:
6015 bfa_sm_fault(dconf->bfa, event);
6020 * Compute and return memory needed by DRV_CFG module.
6022 void
6023 bfa_dconf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
6024 struct bfa_s *bfa)
6026 struct bfa_mem_kva_s *dconf_kva = BFA_MEM_DCONF_KVA(bfa);
6028 if (cfg->drvcfg.min_cfg)
6029 bfa_mem_kva_setup(meminfo, dconf_kva,
6030 sizeof(struct bfa_dconf_hdr_s));
6031 else
6032 bfa_mem_kva_setup(meminfo, dconf_kva,
6033 sizeof(struct bfa_dconf_s));
6036 void
6037 bfa_dconf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg)
6039 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
6041 dconf->bfad = bfad;
6042 dconf->bfa = bfa;
6043 dconf->instance = bfa->ioc.port_id;
6044 bfa_trc(bfa, dconf->instance);
6046 dconf->dconf = (struct bfa_dconf_s *) bfa_mem_kva_curp(dconf);
6047 if (cfg->drvcfg.min_cfg) {
6048 bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_hdr_s);
6049 dconf->min_cfg = BFA_TRUE;
6050 } else {
6051 dconf->min_cfg = BFA_FALSE;
6052 bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_s);
6055 bfa_dconf_read_data_valid(bfa) = BFA_FALSE;
6056 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
6059 static void
6060 bfa_dconf_init_cb(void *arg, bfa_status_t status)
6062 struct bfa_s *bfa = arg;
6063 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
6065 if (status == BFA_STATUS_OK) {
6066 bfa_dconf_read_data_valid(bfa) = BFA_TRUE;
6067 if (dconf->dconf->hdr.signature != BFI_DCONF_SIGNATURE)
6068 dconf->dconf->hdr.signature = BFI_DCONF_SIGNATURE;
6069 if (dconf->dconf->hdr.version != BFI_DCONF_VERSION)
6070 dconf->dconf->hdr.version = BFI_DCONF_VERSION;
6072 bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
6073 bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_DCONF_DONE);
6076 void
6077 bfa_dconf_modinit(struct bfa_s *bfa)
6079 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
6080 bfa_sm_send_event(dconf, BFA_DCONF_SM_INIT);
6083 static void bfa_dconf_timer(void *cbarg)
6085 struct bfa_dconf_mod_s *dconf = cbarg;
6086 bfa_sm_send_event(dconf, BFA_DCONF_SM_TIMEOUT);
6089 void
6090 bfa_dconf_iocdisable(struct bfa_s *bfa)
6092 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
6093 bfa_sm_send_event(dconf, BFA_DCONF_SM_IOCDISABLE);
6096 static bfa_status_t
6097 bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf)
6099 bfa_status_t bfa_status;
6100 bfa_trc(dconf->bfa, 0);
6102 bfa_status = bfa_flash_update_part(BFA_FLASH(dconf->bfa),
6103 BFA_FLASH_PART_DRV, dconf->instance,
6104 dconf->dconf, sizeof(struct bfa_dconf_s), 0,
6105 bfa_dconf_cbfn, dconf);
6106 if (bfa_status != BFA_STATUS_OK)
6107 WARN_ON(bfa_status);
6108 bfa_trc(dconf->bfa, bfa_status);
6110 return bfa_status;
6113 bfa_status_t
6114 bfa_dconf_update(struct bfa_s *bfa)
6116 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
6117 bfa_trc(dconf->bfa, 0);
6118 if (bfa_sm_cmp_state(dconf, bfa_dconf_sm_iocdown_dirty))
6119 return BFA_STATUS_FAILED;
6121 if (dconf->min_cfg) {
6122 bfa_trc(dconf->bfa, dconf->min_cfg);
6123 return BFA_STATUS_FAILED;
6126 bfa_sm_send_event(dconf, BFA_DCONF_SM_WR);
6127 return BFA_STATUS_OK;
6130 static void
6131 bfa_dconf_cbfn(void *arg, bfa_status_t status)
6133 struct bfa_dconf_mod_s *dconf = arg;
6134 WARN_ON(status);
6135 bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
6138 void
6139 bfa_dconf_modexit(struct bfa_s *bfa)
6141 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
6142 bfa_sm_send_event(dconf, BFA_DCONF_SM_EXIT);
6146 * FRU specific functions
6149 #define BFA_FRU_DMA_BUF_SZ 0x02000 /* 8k dma buffer */
6150 #define BFA_FRU_CHINOOK_MAX_SIZE 0x10000
6151 #define BFA_FRU_LIGHTNING_MAX_SIZE 0x200
6153 static void
6154 bfa_fru_notify(void *cbarg, enum bfa_ioc_event_e event)
6156 struct bfa_fru_s *fru = cbarg;
6158 bfa_trc(fru, event);
6160 switch (event) {
6161 case BFA_IOC_E_DISABLED:
6162 case BFA_IOC_E_FAILED:
6163 if (fru->op_busy) {
6164 fru->status = BFA_STATUS_IOC_FAILURE;
6165 fru->cbfn(fru->cbarg, fru->status);
6166 fru->op_busy = 0;
6168 break;
6170 default:
6171 break;
6176 * Send fru write request.
6178 * @param[in] cbarg - callback argument
6180 static void
6181 bfa_fru_write_send(void *cbarg, enum bfi_fru_h2i_msgs msg_type)
6183 struct bfa_fru_s *fru = cbarg;
6184 struct bfi_fru_write_req_s *msg =
6185 (struct bfi_fru_write_req_s *) fru->mb.msg;
6186 u32 len;
6188 msg->offset = cpu_to_be32(fru->addr_off + fru->offset);
6189 len = (fru->residue < BFA_FRU_DMA_BUF_SZ) ?
6190 fru->residue : BFA_FRU_DMA_BUF_SZ;
6191 msg->length = cpu_to_be32(len);
6194 * indicate if it's the last msg of the whole write operation
6196 msg->last = (len == fru->residue) ? 1 : 0;
6198 msg->trfr_cmpl = (len == fru->residue) ? fru->trfr_cmpl : 0;
6199 bfi_h2i_set(msg->mh, BFI_MC_FRU, msg_type, bfa_ioc_portid(fru->ioc));
6200 bfa_alen_set(&msg->alen, len, fru->dbuf_pa);
6202 memcpy(fru->dbuf_kva, fru->ubuf + fru->offset, len);
6203 bfa_ioc_mbox_queue(fru->ioc, &fru->mb);
6205 fru->residue -= len;
6206 fru->offset += len;
6210 * Send fru read request.
6212 * @param[in] cbarg - callback argument
6214 static void
6215 bfa_fru_read_send(void *cbarg, enum bfi_fru_h2i_msgs msg_type)
6217 struct bfa_fru_s *fru = cbarg;
6218 struct bfi_fru_read_req_s *msg =
6219 (struct bfi_fru_read_req_s *) fru->mb.msg;
6220 u32 len;
6222 msg->offset = cpu_to_be32(fru->addr_off + fru->offset);
6223 len = (fru->residue < BFA_FRU_DMA_BUF_SZ) ?
6224 fru->residue : BFA_FRU_DMA_BUF_SZ;
6225 msg->length = cpu_to_be32(len);
6226 bfi_h2i_set(msg->mh, BFI_MC_FRU, msg_type, bfa_ioc_portid(fru->ioc));
6227 bfa_alen_set(&msg->alen, len, fru->dbuf_pa);
6228 bfa_ioc_mbox_queue(fru->ioc, &fru->mb);
6232 * Flash memory info API.
6234 * @param[in] mincfg - minimal cfg variable
6237 bfa_fru_meminfo(bfa_boolean_t mincfg)
6239 /* min driver doesn't need fru */
6240 if (mincfg)
6241 return 0;
6243 return BFA_ROUNDUP(BFA_FRU_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
6247 * Flash attach API.
6249 * @param[in] fru - fru structure
6250 * @param[in] ioc - ioc structure
6251 * @param[in] dev - device structure
6252 * @param[in] trcmod - trace module
6253 * @param[in] logmod - log module
6255 void
6256 bfa_fru_attach(struct bfa_fru_s *fru, struct bfa_ioc_s *ioc, void *dev,
6257 struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
6259 fru->ioc = ioc;
6260 fru->trcmod = trcmod;
6261 fru->cbfn = NULL;
6262 fru->cbarg = NULL;
6263 fru->op_busy = 0;
6265 bfa_ioc_mbox_regisr(fru->ioc, BFI_MC_FRU, bfa_fru_intr, fru);
6266 bfa_q_qe_init(&fru->ioc_notify);
6267 bfa_ioc_notify_init(&fru->ioc_notify, bfa_fru_notify, fru);
6268 list_add_tail(&fru->ioc_notify.qe, &fru->ioc->notify_q);
6270 /* min driver doesn't need fru */
6271 if (mincfg) {
6272 fru->dbuf_kva = NULL;
6273 fru->dbuf_pa = 0;
6278 * Claim memory for fru
6280 * @param[in] fru - fru structure
6281 * @param[in] dm_kva - pointer to virtual memory address
6282 * @param[in] dm_pa - frusical memory address
6283 * @param[in] mincfg - minimal cfg variable
6285 void
6286 bfa_fru_memclaim(struct bfa_fru_s *fru, u8 *dm_kva, u64 dm_pa,
6287 bfa_boolean_t mincfg)
6289 if (mincfg)
6290 return;
6292 fru->dbuf_kva = dm_kva;
6293 fru->dbuf_pa = dm_pa;
6294 memset(fru->dbuf_kva, 0, BFA_FRU_DMA_BUF_SZ);
6295 dm_kva += BFA_ROUNDUP(BFA_FRU_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
6296 dm_pa += BFA_ROUNDUP(BFA_FRU_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
6300 * Update fru vpd image.
6302 * @param[in] fru - fru structure
6303 * @param[in] buf - update data buffer
6304 * @param[in] len - data buffer length
6305 * @param[in] offset - offset relative to starting address
6306 * @param[in] cbfn - callback function
6307 * @param[in] cbarg - callback argument
6309 * Return status.
6311 bfa_status_t
6312 bfa_fruvpd_update(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset,
6313 bfa_cb_fru_t cbfn, void *cbarg, u8 trfr_cmpl)
6315 bfa_trc(fru, BFI_FRUVPD_H2I_WRITE_REQ);
6316 bfa_trc(fru, len);
6317 bfa_trc(fru, offset);
6319 if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2 &&
6320 fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK2)
6321 return BFA_STATUS_FRU_NOT_PRESENT;
6323 if (fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK)
6324 return BFA_STATUS_CMD_NOTSUPP;
6326 if (!bfa_ioc_is_operational(fru->ioc))
6327 return BFA_STATUS_IOC_NON_OP;
6329 if (fru->op_busy) {
6330 bfa_trc(fru, fru->op_busy);
6331 return BFA_STATUS_DEVBUSY;
6334 fru->op_busy = 1;
6336 fru->cbfn = cbfn;
6337 fru->cbarg = cbarg;
6338 fru->residue = len;
6339 fru->offset = 0;
6340 fru->addr_off = offset;
6341 fru->ubuf = buf;
6342 fru->trfr_cmpl = trfr_cmpl;
6344 bfa_fru_write_send(fru, BFI_FRUVPD_H2I_WRITE_REQ);
6346 return BFA_STATUS_OK;
6350 * Read fru vpd image.
6352 * @param[in] fru - fru structure
6353 * @param[in] buf - read data buffer
6354 * @param[in] len - data buffer length
6355 * @param[in] offset - offset relative to starting address
6356 * @param[in] cbfn - callback function
6357 * @param[in] cbarg - callback argument
6359 * Return status.
6361 bfa_status_t
6362 bfa_fruvpd_read(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset,
6363 bfa_cb_fru_t cbfn, void *cbarg)
6365 bfa_trc(fru, BFI_FRUVPD_H2I_READ_REQ);
6366 bfa_trc(fru, len);
6367 bfa_trc(fru, offset);
6369 if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
6370 return BFA_STATUS_FRU_NOT_PRESENT;
6372 if (fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK &&
6373 fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK2)
6374 return BFA_STATUS_CMD_NOTSUPP;
6376 if (!bfa_ioc_is_operational(fru->ioc))
6377 return BFA_STATUS_IOC_NON_OP;
6379 if (fru->op_busy) {
6380 bfa_trc(fru, fru->op_busy);
6381 return BFA_STATUS_DEVBUSY;
6384 fru->op_busy = 1;
6386 fru->cbfn = cbfn;
6387 fru->cbarg = cbarg;
6388 fru->residue = len;
6389 fru->offset = 0;
6390 fru->addr_off = offset;
6391 fru->ubuf = buf;
6392 bfa_fru_read_send(fru, BFI_FRUVPD_H2I_READ_REQ);
6394 return BFA_STATUS_OK;
6398 * Get maximum size fru vpd image.
6400 * @param[in] fru - fru structure
6401 * @param[out] size - maximum size of fru vpd data
6403 * Return status.
6405 bfa_status_t
6406 bfa_fruvpd_get_max_size(struct bfa_fru_s *fru, u32 *max_size)
6408 if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
6409 return BFA_STATUS_FRU_NOT_PRESENT;
6411 if (!bfa_ioc_is_operational(fru->ioc))
6412 return BFA_STATUS_IOC_NON_OP;
6414 if (fru->ioc->attr->card_type == BFA_MFG_TYPE_CHINOOK ||
6415 fru->ioc->attr->card_type == BFA_MFG_TYPE_CHINOOK2)
6416 *max_size = BFA_FRU_CHINOOK_MAX_SIZE;
6417 else
6418 return BFA_STATUS_CMD_NOTSUPP;
6419 return BFA_STATUS_OK;
6422 * tfru write.
6424 * @param[in] fru - fru structure
6425 * @param[in] buf - update data buffer
6426 * @param[in] len - data buffer length
6427 * @param[in] offset - offset relative to starting address
6428 * @param[in] cbfn - callback function
6429 * @param[in] cbarg - callback argument
6431 * Return status.
6433 bfa_status_t
6434 bfa_tfru_write(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset,
6435 bfa_cb_fru_t cbfn, void *cbarg)
6437 bfa_trc(fru, BFI_TFRU_H2I_WRITE_REQ);
6438 bfa_trc(fru, len);
6439 bfa_trc(fru, offset);
6440 bfa_trc(fru, *((u8 *) buf));
6442 if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
6443 return BFA_STATUS_FRU_NOT_PRESENT;
6445 if (!bfa_ioc_is_operational(fru->ioc))
6446 return BFA_STATUS_IOC_NON_OP;
6448 if (fru->op_busy) {
6449 bfa_trc(fru, fru->op_busy);
6450 return BFA_STATUS_DEVBUSY;
6453 fru->op_busy = 1;
6455 fru->cbfn = cbfn;
6456 fru->cbarg = cbarg;
6457 fru->residue = len;
6458 fru->offset = 0;
6459 fru->addr_off = offset;
6460 fru->ubuf = buf;
6462 bfa_fru_write_send(fru, BFI_TFRU_H2I_WRITE_REQ);
6464 return BFA_STATUS_OK;
6468 * tfru read.
6470 * @param[in] fru - fru structure
6471 * @param[in] buf - read data buffer
6472 * @param[in] len - data buffer length
6473 * @param[in] offset - offset relative to starting address
6474 * @param[in] cbfn - callback function
6475 * @param[in] cbarg - callback argument
6477 * Return status.
6479 bfa_status_t
6480 bfa_tfru_read(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset,
6481 bfa_cb_fru_t cbfn, void *cbarg)
6483 bfa_trc(fru, BFI_TFRU_H2I_READ_REQ);
6484 bfa_trc(fru, len);
6485 bfa_trc(fru, offset);
6487 if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
6488 return BFA_STATUS_FRU_NOT_PRESENT;
6490 if (!bfa_ioc_is_operational(fru->ioc))
6491 return BFA_STATUS_IOC_NON_OP;
6493 if (fru->op_busy) {
6494 bfa_trc(fru, fru->op_busy);
6495 return BFA_STATUS_DEVBUSY;
6498 fru->op_busy = 1;
6500 fru->cbfn = cbfn;
6501 fru->cbarg = cbarg;
6502 fru->residue = len;
6503 fru->offset = 0;
6504 fru->addr_off = offset;
6505 fru->ubuf = buf;
6506 bfa_fru_read_send(fru, BFI_TFRU_H2I_READ_REQ);
6508 return BFA_STATUS_OK;
6512 * Process fru response messages upon receiving interrupts.
6514 * @param[in] fruarg - fru structure
6515 * @param[in] msg - message structure
6517 void
6518 bfa_fru_intr(void *fruarg, struct bfi_mbmsg_s *msg)
6520 struct bfa_fru_s *fru = fruarg;
6521 struct bfi_fru_rsp_s *rsp = (struct bfi_fru_rsp_s *)msg;
6522 u32 status;
6524 bfa_trc(fru, msg->mh.msg_id);
6526 if (!fru->op_busy) {
6528 * receiving response after ioc failure
6530 bfa_trc(fru, 0x9999);
6531 return;
6534 switch (msg->mh.msg_id) {
6535 case BFI_FRUVPD_I2H_WRITE_RSP:
6536 case BFI_TFRU_I2H_WRITE_RSP:
6537 status = be32_to_cpu(rsp->status);
6538 bfa_trc(fru, status);
6540 if (status != BFA_STATUS_OK || fru->residue == 0) {
6541 fru->status = status;
6542 fru->op_busy = 0;
6543 if (fru->cbfn)
6544 fru->cbfn(fru->cbarg, fru->status);
6545 } else {
6546 bfa_trc(fru, fru->offset);
6547 if (msg->mh.msg_id == BFI_FRUVPD_I2H_WRITE_RSP)
6548 bfa_fru_write_send(fru,
6549 BFI_FRUVPD_H2I_WRITE_REQ);
6550 else
6551 bfa_fru_write_send(fru,
6552 BFI_TFRU_H2I_WRITE_REQ);
6554 break;
6555 case BFI_FRUVPD_I2H_READ_RSP:
6556 case BFI_TFRU_I2H_READ_RSP:
6557 status = be32_to_cpu(rsp->status);
6558 bfa_trc(fru, status);
6560 if (status != BFA_STATUS_OK) {
6561 fru->status = status;
6562 fru->op_busy = 0;
6563 if (fru->cbfn)
6564 fru->cbfn(fru->cbarg, fru->status);
6565 } else {
6566 u32 len = be32_to_cpu(rsp->length);
6568 bfa_trc(fru, fru->offset);
6569 bfa_trc(fru, len);
6571 memcpy(fru->ubuf + fru->offset, fru->dbuf_kva, len);
6572 fru->residue -= len;
6573 fru->offset += len;
6575 if (fru->residue == 0) {
6576 fru->status = status;
6577 fru->op_busy = 0;
6578 if (fru->cbfn)
6579 fru->cbfn(fru->cbarg, fru->status);
6580 } else {
6581 if (msg->mh.msg_id == BFI_FRUVPD_I2H_READ_RSP)
6582 bfa_fru_read_send(fru,
6583 BFI_FRUVPD_H2I_READ_REQ);
6584 else
6585 bfa_fru_read_send(fru,
6586 BFI_TFRU_H2I_READ_REQ);
6589 break;
6590 default:
6591 WARN_ON(1);
6596 * register definitions
6598 #define FLI_CMD_REG 0x0001d000
6599 #define FLI_RDDATA_REG 0x0001d010
6600 #define FLI_ADDR_REG 0x0001d004
6601 #define FLI_DEV_STATUS_REG 0x0001d014
6603 #define BFA_FLASH_FIFO_SIZE 128 /* fifo size */
6604 #define BFA_FLASH_CHECK_MAX 10000 /* max # of status check */
6605 #define BFA_FLASH_BLOCKING_OP_MAX 1000000 /* max # of blocking op check */
6606 #define BFA_FLASH_WIP_MASK 0x01 /* write in progress bit mask */
6608 enum bfa_flash_cmd {
6609 BFA_FLASH_FAST_READ = 0x0b, /* fast read */
6610 BFA_FLASH_READ_STATUS = 0x05, /* read status */
6614 * Hardware error definition
6616 enum bfa_flash_err {
6617 BFA_FLASH_NOT_PRESENT = -1, /*!< flash not present */
6618 BFA_FLASH_UNINIT = -2, /*!< flash not initialized */
6619 BFA_FLASH_BAD = -3, /*!< flash bad */
6620 BFA_FLASH_BUSY = -4, /*!< flash busy */
6621 BFA_FLASH_ERR_CMD_ACT = -5, /*!< command active never cleared */
6622 BFA_FLASH_ERR_FIFO_CNT = -6, /*!< fifo count never cleared */
6623 BFA_FLASH_ERR_WIP = -7, /*!< write-in-progress never cleared */
6624 BFA_FLASH_ERR_TIMEOUT = -8, /*!< fli timeout */
6625 BFA_FLASH_ERR_LEN = -9, /*!< invalid length */
6629 * Flash command register data structure
6631 union bfa_flash_cmd_reg_u {
6632 struct {
6633 #ifdef __BIG_ENDIAN
6634 u32 act:1;
6635 u32 rsv:1;
6636 u32 write_cnt:9;
6637 u32 read_cnt:9;
6638 u32 addr_cnt:4;
6639 u32 cmd:8;
6640 #else
6641 u32 cmd:8;
6642 u32 addr_cnt:4;
6643 u32 read_cnt:9;
6644 u32 write_cnt:9;
6645 u32 rsv:1;
6646 u32 act:1;
6647 #endif
6648 } r;
6649 u32 i;
6653 * Flash device status register data structure
6655 union bfa_flash_dev_status_reg_u {
6656 struct {
6657 #ifdef __BIG_ENDIAN
6658 u32 rsv:21;
6659 u32 fifo_cnt:6;
6660 u32 busy:1;
6661 u32 init_status:1;
6662 u32 present:1;
6663 u32 bad:1;
6664 u32 good:1;
6665 #else
6666 u32 good:1;
6667 u32 bad:1;
6668 u32 present:1;
6669 u32 init_status:1;
6670 u32 busy:1;
6671 u32 fifo_cnt:6;
6672 u32 rsv:21;
6673 #endif
6674 } r;
6675 u32 i;
6679 * Flash address register data structure
6681 union bfa_flash_addr_reg_u {
6682 struct {
6683 #ifdef __BIG_ENDIAN
6684 u32 addr:24;
6685 u32 dummy:8;
6686 #else
6687 u32 dummy:8;
6688 u32 addr:24;
6689 #endif
6690 } r;
6691 u32 i;
6695 * dg flash_raw_private Flash raw private functions
6697 static void
6698 bfa_flash_set_cmd(void __iomem *pci_bar, u8 wr_cnt,
6699 u8 rd_cnt, u8 ad_cnt, u8 op)
6701 union bfa_flash_cmd_reg_u cmd;
6703 cmd.i = 0;
6704 cmd.r.act = 1;
6705 cmd.r.write_cnt = wr_cnt;
6706 cmd.r.read_cnt = rd_cnt;
6707 cmd.r.addr_cnt = ad_cnt;
6708 cmd.r.cmd = op;
6709 writel(cmd.i, (pci_bar + FLI_CMD_REG));
6712 static void
6713 bfa_flash_set_addr(void __iomem *pci_bar, u32 address)
6715 union bfa_flash_addr_reg_u addr;
6717 addr.r.addr = address & 0x00ffffff;
6718 addr.r.dummy = 0;
6719 writel(addr.i, (pci_bar + FLI_ADDR_REG));
6722 static int
6723 bfa_flash_cmd_act_check(void __iomem *pci_bar)
6725 union bfa_flash_cmd_reg_u cmd;
6727 cmd.i = readl(pci_bar + FLI_CMD_REG);
6729 if (cmd.r.act)
6730 return BFA_FLASH_ERR_CMD_ACT;
6732 return 0;
6736 * @brief
6737 * Flush FLI data fifo.
6739 * @param[in] pci_bar - pci bar address
6740 * @param[in] dev_status - device status
6742 * Return 0 on success, negative error number on error.
6744 static u32
6745 bfa_flash_fifo_flush(void __iomem *pci_bar)
6747 u32 i;
6748 union bfa_flash_dev_status_reg_u dev_status;
6750 dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
6752 if (!dev_status.r.fifo_cnt)
6753 return 0;
6755 /* fifo counter in terms of words */
6756 for (i = 0; i < dev_status.r.fifo_cnt; i++)
6757 readl(pci_bar + FLI_RDDATA_REG);
6760 * Check the device status. It may take some time.
6762 for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) {
6763 dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
6764 if (!dev_status.r.fifo_cnt)
6765 break;
6768 if (dev_status.r.fifo_cnt)
6769 return BFA_FLASH_ERR_FIFO_CNT;
6771 return 0;
6775 * @brief
6776 * Read flash status.
6778 * @param[in] pci_bar - pci bar address
6780 * Return 0 on success, negative error number on error.
6782 static u32
6783 bfa_flash_status_read(void __iomem *pci_bar)
6785 union bfa_flash_dev_status_reg_u dev_status;
6786 int status;
6787 u32 ret_status;
6788 int i;
6790 status = bfa_flash_fifo_flush(pci_bar);
6791 if (status < 0)
6792 return status;
6794 bfa_flash_set_cmd(pci_bar, 0, 4, 0, BFA_FLASH_READ_STATUS);
6796 for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) {
6797 status = bfa_flash_cmd_act_check(pci_bar);
6798 if (!status)
6799 break;
6802 if (status)
6803 return status;
6805 dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
6806 if (!dev_status.r.fifo_cnt)
6807 return BFA_FLASH_BUSY;
6809 ret_status = readl(pci_bar + FLI_RDDATA_REG);
6810 ret_status >>= 24;
6812 status = bfa_flash_fifo_flush(pci_bar);
6813 if (status < 0)
6814 return status;
6816 return ret_status;
6820 * @brief
6821 * Start flash read operation.
6823 * @param[in] pci_bar - pci bar address
6824 * @param[in] offset - flash address offset
6825 * @param[in] len - read data length
6826 * @param[in] buf - read data buffer
6828 * Return 0 on success, negative error number on error.
6830 static u32
6831 bfa_flash_read_start(void __iomem *pci_bar, u32 offset, u32 len,
6832 char *buf)
6834 int status;
6837 * len must be mutiple of 4 and not exceeding fifo size
6839 if (len == 0 || len > BFA_FLASH_FIFO_SIZE || (len & 0x03) != 0)
6840 return BFA_FLASH_ERR_LEN;
6843 * check status
6845 status = bfa_flash_status_read(pci_bar);
6846 if (status == BFA_FLASH_BUSY)
6847 status = bfa_flash_status_read(pci_bar);
6849 if (status < 0)
6850 return status;
6853 * check if write-in-progress bit is cleared
6855 if (status & BFA_FLASH_WIP_MASK)
6856 return BFA_FLASH_ERR_WIP;
6858 bfa_flash_set_addr(pci_bar, offset);
6860 bfa_flash_set_cmd(pci_bar, 0, (u8)len, 4, BFA_FLASH_FAST_READ);
6862 return 0;
6866 * @brief
6867 * Check flash read operation.
6869 * @param[in] pci_bar - pci bar address
6871 * Return flash device status, 1 if busy, 0 if not.
6873 static u32
6874 bfa_flash_read_check(void __iomem *pci_bar)
6876 if (bfa_flash_cmd_act_check(pci_bar))
6877 return 1;
6879 return 0;
6883 * @brief
6884 * End flash read operation.
6886 * @param[in] pci_bar - pci bar address
6887 * @param[in] len - read data length
6888 * @param[in] buf - read data buffer
6891 static void
6892 bfa_flash_read_end(void __iomem *pci_bar, u32 len, char *buf)
6895 u32 i;
6898 * read data fifo up to 32 words
6900 for (i = 0; i < len; i += 4) {
6901 u32 w = readl(pci_bar + FLI_RDDATA_REG);
6902 *((u32 *) (buf + i)) = swab32(w);
6905 bfa_flash_fifo_flush(pci_bar);
6909 * @brief
6910 * Perform flash raw read.
6912 * @param[in] pci_bar - pci bar address
6913 * @param[in] offset - flash partition address offset
6914 * @param[in] buf - read data buffer
6915 * @param[in] len - read data length
6917 * Return status.
6921 #define FLASH_BLOCKING_OP_MAX 500
6922 #define FLASH_SEM_LOCK_REG 0x18820
6924 static int
6925 bfa_raw_sem_get(void __iomem *bar)
6927 int locked;
6929 locked = readl((bar + FLASH_SEM_LOCK_REG));
6930 return !locked;
6934 static bfa_status_t
6935 bfa_flash_sem_get(void __iomem *bar)
6937 u32 n = FLASH_BLOCKING_OP_MAX;
6939 while (!bfa_raw_sem_get(bar)) {
6940 if (--n <= 0)
6941 return BFA_STATUS_BADFLASH;
6942 mdelay(10);
6944 return BFA_STATUS_OK;
6947 static void
6948 bfa_flash_sem_put(void __iomem *bar)
6950 writel(0, (bar + FLASH_SEM_LOCK_REG));
6953 bfa_status_t
6954 bfa_flash_raw_read(void __iomem *pci_bar, u32 offset, char *buf,
6955 u32 len)
6957 u32 n;
6958 int status;
6959 u32 off, l, s, residue, fifo_sz;
6961 residue = len;
6962 off = 0;
6963 fifo_sz = BFA_FLASH_FIFO_SIZE;
6964 status = bfa_flash_sem_get(pci_bar);
6965 if (status != BFA_STATUS_OK)
6966 return status;
6968 while (residue) {
6969 s = offset + off;
6970 n = s / fifo_sz;
6971 l = (n + 1) * fifo_sz - s;
6972 if (l > residue)
6973 l = residue;
6975 status = bfa_flash_read_start(pci_bar, offset + off, l,
6976 &buf[off]);
6977 if (status < 0) {
6978 bfa_flash_sem_put(pci_bar);
6979 return BFA_STATUS_FAILED;
6982 n = BFA_FLASH_BLOCKING_OP_MAX;
6983 while (bfa_flash_read_check(pci_bar)) {
6984 if (--n <= 0) {
6985 bfa_flash_sem_put(pci_bar);
6986 return BFA_STATUS_FAILED;
6990 bfa_flash_read_end(pci_bar, l, &buf[off]);
6992 residue -= l;
6993 off += l;
6995 bfa_flash_sem_put(pci_bar);
6997 return BFA_STATUS_OK;