mei: me: add cannon point device ids
[linux/fpc-iii.git] / drivers / scsi / bfa / bfa_svc.c
blobe640223bab3c6845fff6d686c7f894c0a900d15e
1 /*
2 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
3 * Copyright (c) 2014- QLogic Corporation.
4 * All rights reserved
5 * www.qlogic.com
7 * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License (GPL) Version 2 as
11 * published by the Free Software Foundation
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
19 #include "bfad_drv.h"
20 #include "bfad_im.h"
21 #include "bfa_plog.h"
22 #include "bfa_cs.h"
23 #include "bfa_modules.h"
25 BFA_TRC_FILE(HAL, FCXP);
28 * LPS related definitions
30 #define BFA_LPS_MIN_LPORTS (1)
31 #define BFA_LPS_MAX_LPORTS (256)
34 * Maximum Vports supported per physical port or vf.
36 #define BFA_LPS_MAX_VPORTS_SUPP_CB 255
37 #define BFA_LPS_MAX_VPORTS_SUPP_CT 190
41 * FC PORT related definitions
44 * The port is considered disabled if corresponding physical port or IOC are
45 * disabled explicitly
47 #define BFA_PORT_IS_DISABLED(bfa) \
48 ((bfa_fcport_is_disabled(bfa) == BFA_TRUE) || \
49 (bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE))
52 * BFA port state machine events
54 enum bfa_fcport_sm_event {
55 BFA_FCPORT_SM_START = 1, /* start port state machine */
56 BFA_FCPORT_SM_STOP = 2, /* stop port state machine */
57 BFA_FCPORT_SM_ENABLE = 3, /* enable port */
58 BFA_FCPORT_SM_DISABLE = 4, /* disable port state machine */
59 BFA_FCPORT_SM_FWRSP = 5, /* firmware enable/disable rsp */
60 BFA_FCPORT_SM_LINKUP = 6, /* firmware linkup event */
61 BFA_FCPORT_SM_LINKDOWN = 7, /* firmware linkup down */
62 BFA_FCPORT_SM_QRESUME = 8, /* CQ space available */
63 BFA_FCPORT_SM_HWFAIL = 9, /* IOC h/w failure */
64 BFA_FCPORT_SM_DPORTENABLE = 10, /* enable dport */
65 BFA_FCPORT_SM_DPORTDISABLE = 11,/* disable dport */
66 BFA_FCPORT_SM_FAA_MISCONFIG = 12, /* FAA misconfiguratin */
67 BFA_FCPORT_SM_DDPORTENABLE = 13, /* enable ddport */
68 BFA_FCPORT_SM_DDPORTDISABLE = 14, /* disable ddport */
72 * BFA port link notification state machine events
75 enum bfa_fcport_ln_sm_event {
76 BFA_FCPORT_LN_SM_LINKUP = 1, /* linkup event */
77 BFA_FCPORT_LN_SM_LINKDOWN = 2, /* linkdown event */
78 BFA_FCPORT_LN_SM_NOTIFICATION = 3 /* done notification */
82 * RPORT related definitions
84 #define bfa_rport_offline_cb(__rp) do { \
85 if ((__rp)->bfa->fcs) \
86 bfa_cb_rport_offline((__rp)->rport_drv); \
87 else { \
88 bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \
89 __bfa_cb_rport_offline, (__rp)); \
90 } \
91 } while (0)
93 #define bfa_rport_online_cb(__rp) do { \
94 if ((__rp)->bfa->fcs) \
95 bfa_cb_rport_online((__rp)->rport_drv); \
96 else { \
97 bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \
98 __bfa_cb_rport_online, (__rp)); \
99 } \
100 } while (0)
103 * forward declarations FCXP related functions
105 static void __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete);
106 static void hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
107 struct bfi_fcxp_send_rsp_s *fcxp_rsp);
108 static void hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen,
109 struct bfa_fcxp_s *fcxp, struct fchs_s *fchs);
110 static void bfa_fcxp_qresume(void *cbarg);
111 static void bfa_fcxp_queue(struct bfa_fcxp_s *fcxp,
112 struct bfi_fcxp_send_req_s *send_req);
115 * forward declarations for LPS functions
117 static void bfa_lps_login_rsp(struct bfa_s *bfa,
118 struct bfi_lps_login_rsp_s *rsp);
119 static void bfa_lps_no_res(struct bfa_lps_s *first_lps, u8 count);
120 static void bfa_lps_logout_rsp(struct bfa_s *bfa,
121 struct bfi_lps_logout_rsp_s *rsp);
122 static void bfa_lps_reqq_resume(void *lps_arg);
123 static void bfa_lps_free(struct bfa_lps_s *lps);
124 static void bfa_lps_send_login(struct bfa_lps_s *lps);
125 static void bfa_lps_send_logout(struct bfa_lps_s *lps);
126 static void bfa_lps_send_set_n2n_pid(struct bfa_lps_s *lps);
127 static void bfa_lps_login_comp(struct bfa_lps_s *lps);
128 static void bfa_lps_logout_comp(struct bfa_lps_s *lps);
129 static void bfa_lps_cvl_event(struct bfa_lps_s *lps);
132 * forward declaration for LPS state machine
134 static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event);
135 static void bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event);
136 static void bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event
137 event);
138 static void bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event);
139 static void bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps,
140 enum bfa_lps_event event);
141 static void bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event);
142 static void bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event
143 event);
146 * forward declaration for FC Port functions
148 static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport);
149 static bfa_boolean_t bfa_fcport_send_disable(struct bfa_fcport_s *fcport);
150 static void bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport);
151 static void bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport);
152 static void bfa_fcport_set_wwns(struct bfa_fcport_s *fcport);
153 static void __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete);
154 static void bfa_fcport_scn(struct bfa_fcport_s *fcport,
155 enum bfa_port_linkstate event, bfa_boolean_t trunk);
156 static void bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln,
157 enum bfa_port_linkstate event);
158 static void __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete);
159 static void bfa_fcport_stats_get_timeout(void *cbarg);
160 static void bfa_fcport_stats_clr_timeout(void *cbarg);
161 static void bfa_trunk_iocdisable(struct bfa_s *bfa);
164 * forward declaration for FC PORT state machine
166 static void bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
167 enum bfa_fcport_sm_event event);
168 static void bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
169 enum bfa_fcport_sm_event event);
170 static void bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
171 enum bfa_fcport_sm_event event);
172 static void bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
173 enum bfa_fcport_sm_event event);
174 static void bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
175 enum bfa_fcport_sm_event event);
176 static void bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
177 enum bfa_fcport_sm_event event);
178 static void bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
179 enum bfa_fcport_sm_event event);
180 static void bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
181 enum bfa_fcport_sm_event event);
182 static void bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
183 enum bfa_fcport_sm_event event);
184 static void bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
185 enum bfa_fcport_sm_event event);
186 static void bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
187 enum bfa_fcport_sm_event event);
188 static void bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
189 enum bfa_fcport_sm_event event);
190 static void bfa_fcport_sm_dport(struct bfa_fcport_s *fcport,
191 enum bfa_fcport_sm_event event);
192 static void bfa_fcport_sm_ddport(struct bfa_fcport_s *fcport,
193 enum bfa_fcport_sm_event event);
194 static void bfa_fcport_sm_faa_misconfig(struct bfa_fcport_s *fcport,
195 enum bfa_fcport_sm_event event);
197 static void bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
198 enum bfa_fcport_ln_sm_event event);
199 static void bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
200 enum bfa_fcport_ln_sm_event event);
201 static void bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
202 enum bfa_fcport_ln_sm_event event);
203 static void bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
204 enum bfa_fcport_ln_sm_event event);
205 static void bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
206 enum bfa_fcport_ln_sm_event event);
207 static void bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
208 enum bfa_fcport_ln_sm_event event);
209 static void bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
210 enum bfa_fcport_ln_sm_event event);
212 static struct bfa_sm_table_s hal_port_sm_table[] = {
213 {BFA_SM(bfa_fcport_sm_uninit), BFA_PORT_ST_UNINIT},
214 {BFA_SM(bfa_fcport_sm_enabling_qwait), BFA_PORT_ST_ENABLING_QWAIT},
215 {BFA_SM(bfa_fcport_sm_enabling), BFA_PORT_ST_ENABLING},
216 {BFA_SM(bfa_fcport_sm_linkdown), BFA_PORT_ST_LINKDOWN},
217 {BFA_SM(bfa_fcport_sm_linkup), BFA_PORT_ST_LINKUP},
218 {BFA_SM(bfa_fcport_sm_disabling_qwait), BFA_PORT_ST_DISABLING_QWAIT},
219 {BFA_SM(bfa_fcport_sm_toggling_qwait), BFA_PORT_ST_TOGGLING_QWAIT},
220 {BFA_SM(bfa_fcport_sm_disabling), BFA_PORT_ST_DISABLING},
221 {BFA_SM(bfa_fcport_sm_disabled), BFA_PORT_ST_DISABLED},
222 {BFA_SM(bfa_fcport_sm_stopped), BFA_PORT_ST_STOPPED},
223 {BFA_SM(bfa_fcport_sm_iocdown), BFA_PORT_ST_IOCDOWN},
224 {BFA_SM(bfa_fcport_sm_iocfail), BFA_PORT_ST_IOCDOWN},
225 {BFA_SM(bfa_fcport_sm_dport), BFA_PORT_ST_DPORT},
226 {BFA_SM(bfa_fcport_sm_ddport), BFA_PORT_ST_DDPORT},
227 {BFA_SM(bfa_fcport_sm_faa_misconfig), BFA_PORT_ST_FAA_MISCONFIG},
232 * forward declaration for RPORT related functions
234 static struct bfa_rport_s *bfa_rport_alloc(struct bfa_rport_mod_s *rp_mod);
235 static void bfa_rport_free(struct bfa_rport_s *rport);
236 static bfa_boolean_t bfa_rport_send_fwcreate(struct bfa_rport_s *rp);
237 static bfa_boolean_t bfa_rport_send_fwdelete(struct bfa_rport_s *rp);
238 static bfa_boolean_t bfa_rport_send_fwspeed(struct bfa_rport_s *rp);
239 static void __bfa_cb_rport_online(void *cbarg,
240 bfa_boolean_t complete);
241 static void __bfa_cb_rport_offline(void *cbarg,
242 bfa_boolean_t complete);
245 * forward declaration for RPORT state machine
247 static void bfa_rport_sm_uninit(struct bfa_rport_s *rp,
248 enum bfa_rport_event event);
249 static void bfa_rport_sm_created(struct bfa_rport_s *rp,
250 enum bfa_rport_event event);
251 static void bfa_rport_sm_fwcreate(struct bfa_rport_s *rp,
252 enum bfa_rport_event event);
253 static void bfa_rport_sm_online(struct bfa_rport_s *rp,
254 enum bfa_rport_event event);
255 static void bfa_rport_sm_fwdelete(struct bfa_rport_s *rp,
256 enum bfa_rport_event event);
257 static void bfa_rport_sm_offline(struct bfa_rport_s *rp,
258 enum bfa_rport_event event);
259 static void bfa_rport_sm_deleting(struct bfa_rport_s *rp,
260 enum bfa_rport_event event);
261 static void bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
262 enum bfa_rport_event event);
263 static void bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
264 enum bfa_rport_event event);
265 static void bfa_rport_sm_iocdisable(struct bfa_rport_s *rp,
266 enum bfa_rport_event event);
267 static void bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp,
268 enum bfa_rport_event event);
269 static void bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp,
270 enum bfa_rport_event event);
271 static void bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp,
272 enum bfa_rport_event event);
275 * PLOG related definitions
277 static int
278 plkd_validate_logrec(struct bfa_plog_rec_s *pl_rec)
280 if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
281 (pl_rec->log_type != BFA_PL_LOG_TYPE_STRING))
282 return 1;
284 if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
285 (pl_rec->log_num_ints > BFA_PL_INT_LOG_SZ))
286 return 1;
288 return 0;
291 static u64
292 bfa_get_log_time(void)
294 u64 system_time = 0;
295 struct timeval tv;
296 do_gettimeofday(&tv);
298 /* We are interested in seconds only. */
299 system_time = tv.tv_sec;
300 return system_time;
303 static void
304 bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec)
306 u16 tail;
307 struct bfa_plog_rec_s *pl_recp;
309 if (plog->plog_enabled == 0)
310 return;
312 if (plkd_validate_logrec(pl_rec)) {
313 WARN_ON(1);
314 return;
317 tail = plog->tail;
319 pl_recp = &(plog->plog_recs[tail]);
321 memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s));
323 pl_recp->tv = bfa_get_log_time();
324 BFA_PL_LOG_REC_INCR(plog->tail);
326 if (plog->head == plog->tail)
327 BFA_PL_LOG_REC_INCR(plog->head);
330 void
331 bfa_plog_init(struct bfa_plog_s *plog)
333 memset((char *)plog, 0, sizeof(struct bfa_plog_s));
335 memcpy(plog->plog_sig, BFA_PL_SIG_STR, BFA_PL_SIG_LEN);
336 plog->head = plog->tail = 0;
337 plog->plog_enabled = 1;
340 void
341 bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
342 enum bfa_plog_eid event,
343 u16 misc, char *log_str)
345 struct bfa_plog_rec_s lp;
347 if (plog->plog_enabled) {
348 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
349 lp.mid = mid;
350 lp.eid = event;
351 lp.log_type = BFA_PL_LOG_TYPE_STRING;
352 lp.misc = misc;
353 strncpy(lp.log_entry.string_log, log_str,
354 BFA_PL_STRING_LOG_SZ - 1);
355 lp.log_entry.string_log[BFA_PL_STRING_LOG_SZ - 1] = '\0';
356 bfa_plog_add(plog, &lp);
360 void
361 bfa_plog_intarr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
362 enum bfa_plog_eid event,
363 u16 misc, u32 *intarr, u32 num_ints)
365 struct bfa_plog_rec_s lp;
366 u32 i;
368 if (num_ints > BFA_PL_INT_LOG_SZ)
369 num_ints = BFA_PL_INT_LOG_SZ;
371 if (plog->plog_enabled) {
372 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
373 lp.mid = mid;
374 lp.eid = event;
375 lp.log_type = BFA_PL_LOG_TYPE_INT;
376 lp.misc = misc;
378 for (i = 0; i < num_ints; i++)
379 lp.log_entry.int_log[i] = intarr[i];
381 lp.log_num_ints = (u8) num_ints;
383 bfa_plog_add(plog, &lp);
387 void
388 bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
389 enum bfa_plog_eid event,
390 u16 misc, struct fchs_s *fchdr)
392 struct bfa_plog_rec_s lp;
393 u32 *tmp_int = (u32 *) fchdr;
394 u32 ints[BFA_PL_INT_LOG_SZ];
396 if (plog->plog_enabled) {
397 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
399 ints[0] = tmp_int[0];
400 ints[1] = tmp_int[1];
401 ints[2] = tmp_int[4];
403 bfa_plog_intarr(plog, mid, event, misc, ints, 3);
407 void
408 bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
409 enum bfa_plog_eid event, u16 misc, struct fchs_s *fchdr,
410 u32 pld_w0)
412 struct bfa_plog_rec_s lp;
413 u32 *tmp_int = (u32 *) fchdr;
414 u32 ints[BFA_PL_INT_LOG_SZ];
416 if (plog->plog_enabled) {
417 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
419 ints[0] = tmp_int[0];
420 ints[1] = tmp_int[1];
421 ints[2] = tmp_int[4];
422 ints[3] = pld_w0;
424 bfa_plog_intarr(plog, mid, event, misc, ints, 4);
430 * fcxp_pvt BFA FCXP private functions
433 static void
434 claim_fcxps_mem(struct bfa_fcxp_mod_s *mod)
436 u16 i;
437 struct bfa_fcxp_s *fcxp;
439 fcxp = (struct bfa_fcxp_s *) bfa_mem_kva_curp(mod);
440 memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps);
442 INIT_LIST_HEAD(&mod->fcxp_req_free_q);
443 INIT_LIST_HEAD(&mod->fcxp_rsp_free_q);
444 INIT_LIST_HEAD(&mod->fcxp_active_q);
445 INIT_LIST_HEAD(&mod->fcxp_req_unused_q);
446 INIT_LIST_HEAD(&mod->fcxp_rsp_unused_q);
448 mod->fcxp_list = fcxp;
450 for (i = 0; i < mod->num_fcxps; i++) {
451 fcxp->fcxp_mod = mod;
452 fcxp->fcxp_tag = i;
454 if (i < (mod->num_fcxps / 2)) {
455 list_add_tail(&fcxp->qe, &mod->fcxp_req_free_q);
456 fcxp->req_rsp = BFA_TRUE;
457 } else {
458 list_add_tail(&fcxp->qe, &mod->fcxp_rsp_free_q);
459 fcxp->req_rsp = BFA_FALSE;
462 bfa_reqq_winit(&fcxp->reqq_wqe, bfa_fcxp_qresume, fcxp);
463 fcxp->reqq_waiting = BFA_FALSE;
465 fcxp = fcxp + 1;
468 bfa_mem_kva_curp(mod) = (void *)fcxp;
471 void
472 bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
473 struct bfa_s *bfa)
475 struct bfa_fcxp_mod_s *fcxp_mod = BFA_FCXP_MOD(bfa);
476 struct bfa_mem_kva_s *fcxp_kva = BFA_MEM_FCXP_KVA(bfa);
477 struct bfa_mem_dma_s *seg_ptr;
478 u16 nsegs, idx, per_seg_fcxp;
479 u16 num_fcxps = cfg->fwcfg.num_fcxp_reqs;
480 u32 per_fcxp_sz;
482 if (num_fcxps == 0)
483 return;
485 if (cfg->drvcfg.min_cfg)
486 per_fcxp_sz = 2 * BFA_FCXP_MAX_IBUF_SZ;
487 else
488 per_fcxp_sz = BFA_FCXP_MAX_IBUF_SZ + BFA_FCXP_MAX_LBUF_SZ;
490 /* dma memory */
491 nsegs = BFI_MEM_DMA_NSEGS(num_fcxps, per_fcxp_sz);
492 per_seg_fcxp = BFI_MEM_NREQS_SEG(per_fcxp_sz);
494 bfa_mem_dma_seg_iter(fcxp_mod, seg_ptr, nsegs, idx) {
495 if (num_fcxps >= per_seg_fcxp) {
496 num_fcxps -= per_seg_fcxp;
497 bfa_mem_dma_setup(minfo, seg_ptr,
498 per_seg_fcxp * per_fcxp_sz);
499 } else
500 bfa_mem_dma_setup(minfo, seg_ptr,
501 num_fcxps * per_fcxp_sz);
504 /* kva memory */
505 bfa_mem_kva_setup(minfo, fcxp_kva,
506 cfg->fwcfg.num_fcxp_reqs * sizeof(struct bfa_fcxp_s));
509 void
510 bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
511 struct bfa_pcidev_s *pcidev)
513 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
515 mod->bfa = bfa;
516 mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs;
519 * Initialize FCXP request and response payload sizes.
521 mod->req_pld_sz = mod->rsp_pld_sz = BFA_FCXP_MAX_IBUF_SZ;
522 if (!cfg->drvcfg.min_cfg)
523 mod->rsp_pld_sz = BFA_FCXP_MAX_LBUF_SZ;
525 INIT_LIST_HEAD(&mod->req_wait_q);
526 INIT_LIST_HEAD(&mod->rsp_wait_q);
528 claim_fcxps_mem(mod);
531 void
532 bfa_fcxp_iocdisable(struct bfa_s *bfa)
534 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
535 struct bfa_fcxp_s *fcxp;
536 struct list_head *qe, *qen;
538 /* Enqueue unused fcxp resources to free_q */
539 list_splice_tail_init(&mod->fcxp_req_unused_q, &mod->fcxp_req_free_q);
540 list_splice_tail_init(&mod->fcxp_rsp_unused_q, &mod->fcxp_rsp_free_q);
542 list_for_each_safe(qe, qen, &mod->fcxp_active_q) {
543 fcxp = (struct bfa_fcxp_s *) qe;
544 if (fcxp->caller == NULL) {
545 fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
546 BFA_STATUS_IOC_FAILURE, 0, 0, NULL);
547 bfa_fcxp_free(fcxp);
548 } else {
549 fcxp->rsp_status = BFA_STATUS_IOC_FAILURE;
550 bfa_cb_queue(bfa, &fcxp->hcb_qe,
551 __bfa_fcxp_send_cbfn, fcxp);
556 static struct bfa_fcxp_s *
557 bfa_fcxp_get(struct bfa_fcxp_mod_s *fm, bfa_boolean_t req)
559 struct bfa_fcxp_s *fcxp;
561 if (req)
562 bfa_q_deq(&fm->fcxp_req_free_q, &fcxp);
563 else
564 bfa_q_deq(&fm->fcxp_rsp_free_q, &fcxp);
566 if (fcxp)
567 list_add_tail(&fcxp->qe, &fm->fcxp_active_q);
569 return fcxp;
572 static void
573 bfa_fcxp_init_reqrsp(struct bfa_fcxp_s *fcxp,
574 struct bfa_s *bfa,
575 u8 *use_ibuf,
576 u32 *nr_sgles,
577 bfa_fcxp_get_sgaddr_t *r_sga_cbfn,
578 bfa_fcxp_get_sglen_t *r_sglen_cbfn,
579 struct list_head *r_sgpg_q,
580 int n_sgles,
581 bfa_fcxp_get_sgaddr_t sga_cbfn,
582 bfa_fcxp_get_sglen_t sglen_cbfn)
585 WARN_ON(bfa == NULL);
587 bfa_trc(bfa, fcxp->fcxp_tag);
589 if (n_sgles == 0) {
590 *use_ibuf = 1;
591 } else {
592 WARN_ON(*sga_cbfn == NULL);
593 WARN_ON(*sglen_cbfn == NULL);
595 *use_ibuf = 0;
596 *r_sga_cbfn = sga_cbfn;
597 *r_sglen_cbfn = sglen_cbfn;
599 *nr_sgles = n_sgles;
602 * alloc required sgpgs
604 if (n_sgles > BFI_SGE_INLINE)
605 WARN_ON(1);
610 static void
611 bfa_fcxp_init(struct bfa_fcxp_s *fcxp,
612 void *caller, struct bfa_s *bfa, int nreq_sgles,
613 int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
614 bfa_fcxp_get_sglen_t req_sglen_cbfn,
615 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
616 bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
619 WARN_ON(bfa == NULL);
621 bfa_trc(bfa, fcxp->fcxp_tag);
623 fcxp->caller = caller;
625 bfa_fcxp_init_reqrsp(fcxp, bfa,
626 &fcxp->use_ireqbuf, &fcxp->nreq_sgles, &fcxp->req_sga_cbfn,
627 &fcxp->req_sglen_cbfn, &fcxp->req_sgpg_q,
628 nreq_sgles, req_sga_cbfn, req_sglen_cbfn);
630 bfa_fcxp_init_reqrsp(fcxp, bfa,
631 &fcxp->use_irspbuf, &fcxp->nrsp_sgles, &fcxp->rsp_sga_cbfn,
632 &fcxp->rsp_sglen_cbfn, &fcxp->rsp_sgpg_q,
633 nrsp_sgles, rsp_sga_cbfn, rsp_sglen_cbfn);
637 static void
638 bfa_fcxp_put(struct bfa_fcxp_s *fcxp)
640 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
641 struct bfa_fcxp_wqe_s *wqe;
643 if (fcxp->req_rsp)
644 bfa_q_deq(&mod->req_wait_q, &wqe);
645 else
646 bfa_q_deq(&mod->rsp_wait_q, &wqe);
648 if (wqe) {
649 bfa_trc(mod->bfa, fcxp->fcxp_tag);
651 bfa_fcxp_init(fcxp, wqe->caller, wqe->bfa, wqe->nreq_sgles,
652 wqe->nrsp_sgles, wqe->req_sga_cbfn,
653 wqe->req_sglen_cbfn, wqe->rsp_sga_cbfn,
654 wqe->rsp_sglen_cbfn);
656 wqe->alloc_cbfn(wqe->alloc_cbarg, fcxp);
657 return;
660 WARN_ON(!bfa_q_is_on_q(&mod->fcxp_active_q, fcxp));
661 list_del(&fcxp->qe);
663 if (fcxp->req_rsp)
664 list_add_tail(&fcxp->qe, &mod->fcxp_req_free_q);
665 else
666 list_add_tail(&fcxp->qe, &mod->fcxp_rsp_free_q);
669 static void
670 bfa_fcxp_null_comp(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
671 bfa_status_t req_status, u32 rsp_len,
672 u32 resid_len, struct fchs_s *rsp_fchs)
674 /* discarded fcxp completion */
677 static void
678 __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete)
680 struct bfa_fcxp_s *fcxp = cbarg;
682 if (complete) {
683 fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
684 fcxp->rsp_status, fcxp->rsp_len,
685 fcxp->residue_len, &fcxp->rsp_fchs);
686 } else {
687 bfa_fcxp_free(fcxp);
691 static void
692 hal_fcxp_send_comp(struct bfa_s *bfa, struct bfi_fcxp_send_rsp_s *fcxp_rsp)
694 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
695 struct bfa_fcxp_s *fcxp;
696 u16 fcxp_tag = be16_to_cpu(fcxp_rsp->fcxp_tag);
698 bfa_trc(bfa, fcxp_tag);
700 fcxp_rsp->rsp_len = be32_to_cpu(fcxp_rsp->rsp_len);
703 * @todo f/w should not set residue to non-0 when everything
704 * is received.
706 if (fcxp_rsp->req_status == BFA_STATUS_OK)
707 fcxp_rsp->residue_len = 0;
708 else
709 fcxp_rsp->residue_len = be32_to_cpu(fcxp_rsp->residue_len);
711 fcxp = BFA_FCXP_FROM_TAG(mod, fcxp_tag);
713 WARN_ON(fcxp->send_cbfn == NULL);
715 hal_fcxp_rx_plog(mod->bfa, fcxp, fcxp_rsp);
717 if (fcxp->send_cbfn != NULL) {
718 bfa_trc(mod->bfa, (NULL == fcxp->caller));
719 if (fcxp->caller == NULL) {
720 fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
721 fcxp_rsp->req_status, fcxp_rsp->rsp_len,
722 fcxp_rsp->residue_len, &fcxp_rsp->fchs);
724 * fcxp automatically freed on return from the callback
726 bfa_fcxp_free(fcxp);
727 } else {
728 fcxp->rsp_status = fcxp_rsp->req_status;
729 fcxp->rsp_len = fcxp_rsp->rsp_len;
730 fcxp->residue_len = fcxp_rsp->residue_len;
731 fcxp->rsp_fchs = fcxp_rsp->fchs;
733 bfa_cb_queue(bfa, &fcxp->hcb_qe,
734 __bfa_fcxp_send_cbfn, fcxp);
736 } else {
737 bfa_trc(bfa, (NULL == fcxp->send_cbfn));
741 static void
742 hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen, struct bfa_fcxp_s *fcxp,
743 struct fchs_s *fchs)
746 * TODO: TX ox_id
748 if (reqlen > 0) {
749 if (fcxp->use_ireqbuf) {
750 u32 pld_w0 =
751 *((u32 *) BFA_FCXP_REQ_PLD(fcxp));
753 bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
754 BFA_PL_EID_TX,
755 reqlen + sizeof(struct fchs_s), fchs,
756 pld_w0);
757 } else {
758 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
759 BFA_PL_EID_TX,
760 reqlen + sizeof(struct fchs_s),
761 fchs);
763 } else {
764 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_TX,
765 reqlen + sizeof(struct fchs_s), fchs);
769 static void
770 hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
771 struct bfi_fcxp_send_rsp_s *fcxp_rsp)
773 if (fcxp_rsp->rsp_len > 0) {
774 if (fcxp->use_irspbuf) {
775 u32 pld_w0 =
776 *((u32 *) BFA_FCXP_RSP_PLD(fcxp));
778 bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
779 BFA_PL_EID_RX,
780 (u16) fcxp_rsp->rsp_len,
781 &fcxp_rsp->fchs, pld_w0);
782 } else {
783 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
784 BFA_PL_EID_RX,
785 (u16) fcxp_rsp->rsp_len,
786 &fcxp_rsp->fchs);
788 } else {
789 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_RX,
790 (u16) fcxp_rsp->rsp_len, &fcxp_rsp->fchs);
795 * Handler to resume sending fcxp when space in available in cpe queue.
797 static void
798 bfa_fcxp_qresume(void *cbarg)
800 struct bfa_fcxp_s *fcxp = cbarg;
801 struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
802 struct bfi_fcxp_send_req_s *send_req;
804 fcxp->reqq_waiting = BFA_FALSE;
805 send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
806 bfa_fcxp_queue(fcxp, send_req);
810 * Queue fcxp send request to foimrware.
812 static void
813 bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
815 struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
816 struct bfa_fcxp_req_info_s *reqi = &fcxp->req_info;
817 struct bfa_fcxp_rsp_info_s *rspi = &fcxp->rsp_info;
818 struct bfa_rport_s *rport = reqi->bfa_rport;
820 bfi_h2i_set(send_req->mh, BFI_MC_FCXP, BFI_FCXP_H2I_SEND_REQ,
821 bfa_fn_lpu(bfa));
823 send_req->fcxp_tag = cpu_to_be16(fcxp->fcxp_tag);
824 if (rport) {
825 send_req->rport_fw_hndl = rport->fw_handle;
826 send_req->max_frmsz = cpu_to_be16(rport->rport_info.max_frmsz);
827 if (send_req->max_frmsz == 0)
828 send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
829 } else {
830 send_req->rport_fw_hndl = 0;
831 send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
834 send_req->vf_id = cpu_to_be16(reqi->vf_id);
835 send_req->lp_fwtag = bfa_lps_get_fwtag(bfa, reqi->lp_tag);
836 send_req->class = reqi->class;
837 send_req->rsp_timeout = rspi->rsp_timeout;
838 send_req->cts = reqi->cts;
839 send_req->fchs = reqi->fchs;
841 send_req->req_len = cpu_to_be32(reqi->req_tot_len);
842 send_req->rsp_maxlen = cpu_to_be32(rspi->rsp_maxlen);
845 * setup req sgles
847 if (fcxp->use_ireqbuf == 1) {
848 bfa_alen_set(&send_req->req_alen, reqi->req_tot_len,
849 BFA_FCXP_REQ_PLD_PA(fcxp));
850 } else {
851 if (fcxp->nreq_sgles > 0) {
852 WARN_ON(fcxp->nreq_sgles != 1);
853 bfa_alen_set(&send_req->req_alen, reqi->req_tot_len,
854 fcxp->req_sga_cbfn(fcxp->caller, 0));
855 } else {
856 WARN_ON(reqi->req_tot_len != 0);
857 bfa_alen_set(&send_req->rsp_alen, 0, 0);
862 * setup rsp sgles
864 if (fcxp->use_irspbuf == 1) {
865 WARN_ON(rspi->rsp_maxlen > BFA_FCXP_MAX_LBUF_SZ);
867 bfa_alen_set(&send_req->rsp_alen, rspi->rsp_maxlen,
868 BFA_FCXP_RSP_PLD_PA(fcxp));
869 } else {
870 if (fcxp->nrsp_sgles > 0) {
871 WARN_ON(fcxp->nrsp_sgles != 1);
872 bfa_alen_set(&send_req->rsp_alen, rspi->rsp_maxlen,
873 fcxp->rsp_sga_cbfn(fcxp->caller, 0));
875 } else {
876 WARN_ON(rspi->rsp_maxlen != 0);
877 bfa_alen_set(&send_req->rsp_alen, 0, 0);
881 hal_fcxp_tx_plog(bfa, reqi->req_tot_len, fcxp, &reqi->fchs);
883 bfa_reqq_produce(bfa, BFA_REQQ_FCXP, send_req->mh);
885 bfa_trc(bfa, bfa_reqq_pi(bfa, BFA_REQQ_FCXP));
886 bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP));
890 * Allocate an FCXP instance to send a response or to send a request
891 * that has a response. Request/response buffers are allocated by caller.
893 * @param[in] bfa BFA bfa instance
894 * @param[in] nreq_sgles Number of SG elements required for request
895 * buffer. 0, if fcxp internal buffers are used.
896 * Use bfa_fcxp_get_reqbuf() to get the
897 * internal req buffer.
898 * @param[in] req_sgles SG elements describing request buffer. Will be
899 * copied in by BFA and hence can be freed on
900 * return from this function.
901 * @param[in] get_req_sga function ptr to be called to get a request SG
902 * Address (given the sge index).
903 * @param[in] get_req_sglen function ptr to be called to get a request SG
904 * len (given the sge index).
905 * @param[in] get_rsp_sga function ptr to be called to get a response SG
906 * Address (given the sge index).
907 * @param[in] get_rsp_sglen function ptr to be called to get a response SG
908 * len (given the sge index).
909 * @param[in] req Allocated FCXP is used to send req or rsp?
910 * request - BFA_TRUE, response - BFA_FALSE
912 * @return FCXP instance. NULL on failure.
914 struct bfa_fcxp_s *
915 bfa_fcxp_req_rsp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles,
916 int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
917 bfa_fcxp_get_sglen_t req_sglen_cbfn,
918 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
919 bfa_fcxp_get_sglen_t rsp_sglen_cbfn, bfa_boolean_t req)
921 struct bfa_fcxp_s *fcxp = NULL;
923 WARN_ON(bfa == NULL);
925 fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa), req);
926 if (fcxp == NULL)
927 return NULL;
929 bfa_trc(bfa, fcxp->fcxp_tag);
931 bfa_fcxp_init(fcxp, caller, bfa, nreq_sgles, nrsp_sgles, req_sga_cbfn,
932 req_sglen_cbfn, rsp_sga_cbfn, rsp_sglen_cbfn);
934 return fcxp;
938 * Get the internal request buffer pointer
940 * @param[in] fcxp BFA fcxp pointer
942 * @return pointer to the internal request buffer
944 void *
945 bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp)
947 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
948 void *reqbuf;
950 WARN_ON(fcxp->use_ireqbuf != 1);
951 reqbuf = bfa_mem_get_dmabuf_kva(mod, fcxp->fcxp_tag,
952 mod->req_pld_sz + mod->rsp_pld_sz);
953 return reqbuf;
957 bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp)
959 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
961 return mod->req_pld_sz;
965 * Get the internal response buffer pointer
967 * @param[in] fcxp BFA fcxp pointer
969 * @return pointer to the internal request buffer
971 void *
972 bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp)
974 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
975 void *fcxp_buf;
977 WARN_ON(fcxp->use_irspbuf != 1);
979 fcxp_buf = bfa_mem_get_dmabuf_kva(mod, fcxp->fcxp_tag,
980 mod->req_pld_sz + mod->rsp_pld_sz);
982 /* fcxp_buf = req_buf + rsp_buf :- add req_buf_sz to get to rsp_buf */
983 return ((u8 *) fcxp_buf) + mod->req_pld_sz;
987 * Free the BFA FCXP
989 * @param[in] fcxp BFA fcxp pointer
991 * @return void
993 void
994 bfa_fcxp_free(struct bfa_fcxp_s *fcxp)
996 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
998 WARN_ON(fcxp == NULL);
999 bfa_trc(mod->bfa, fcxp->fcxp_tag);
1000 bfa_fcxp_put(fcxp);
1004 * Send a FCXP request
1006 * @param[in] fcxp BFA fcxp pointer
1007 * @param[in] rport BFA rport pointer. Could be left NULL for WKA rports
1008 * @param[in] vf_id virtual Fabric ID
1009 * @param[in] lp_tag lport tag
1010 * @param[in] cts use Continuous sequence
1011 * @param[in] cos fc Class of Service
1012 * @param[in] reqlen request length, does not include FCHS length
1013 * @param[in] fchs fc Header Pointer. The header content will be copied
1014 * in by BFA.
1016 * @param[in] cbfn call back function to be called on receiving
1017 * the response
1018 * @param[in] cbarg arg for cbfn
1019 * @param[in] rsp_timeout
1020 * response timeout
1022 * @return bfa_status_t
1024 void
1025 bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
1026 u16 vf_id, u8 lp_tag, bfa_boolean_t cts, enum fc_cos cos,
1027 u32 reqlen, struct fchs_s *fchs, bfa_cb_fcxp_send_t cbfn,
1028 void *cbarg, u32 rsp_maxlen, u8 rsp_timeout)
1030 struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
1031 struct bfa_fcxp_req_info_s *reqi = &fcxp->req_info;
1032 struct bfa_fcxp_rsp_info_s *rspi = &fcxp->rsp_info;
1033 struct bfi_fcxp_send_req_s *send_req;
1035 bfa_trc(bfa, fcxp->fcxp_tag);
1038 * setup request/response info
1040 reqi->bfa_rport = rport;
1041 reqi->vf_id = vf_id;
1042 reqi->lp_tag = lp_tag;
1043 reqi->class = cos;
1044 rspi->rsp_timeout = rsp_timeout;
1045 reqi->cts = cts;
1046 reqi->fchs = *fchs;
1047 reqi->req_tot_len = reqlen;
1048 rspi->rsp_maxlen = rsp_maxlen;
1049 fcxp->send_cbfn = cbfn ? cbfn : bfa_fcxp_null_comp;
1050 fcxp->send_cbarg = cbarg;
1053 * If no room in CPE queue, wait for space in request queue
1055 send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
1056 if (!send_req) {
1057 bfa_trc(bfa, fcxp->fcxp_tag);
1058 fcxp->reqq_waiting = BFA_TRUE;
1059 bfa_reqq_wait(bfa, BFA_REQQ_FCXP, &fcxp->reqq_wqe);
1060 return;
1063 bfa_fcxp_queue(fcxp, send_req);
1067 * Abort a BFA FCXP
1069 * @param[in] fcxp BFA fcxp pointer
1071 * @return void
1073 bfa_status_t
1074 bfa_fcxp_abort(struct bfa_fcxp_s *fcxp)
1076 bfa_trc(fcxp->fcxp_mod->bfa, fcxp->fcxp_tag);
1077 WARN_ON(1);
1078 return BFA_STATUS_OK;
1081 void
1082 bfa_fcxp_req_rsp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
1083 bfa_fcxp_alloc_cbfn_t alloc_cbfn, void *alloc_cbarg,
1084 void *caller, int nreq_sgles,
1085 int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
1086 bfa_fcxp_get_sglen_t req_sglen_cbfn,
1087 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
1088 bfa_fcxp_get_sglen_t rsp_sglen_cbfn, bfa_boolean_t req)
1090 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1092 if (req)
1093 WARN_ON(!list_empty(&mod->fcxp_req_free_q));
1094 else
1095 WARN_ON(!list_empty(&mod->fcxp_rsp_free_q));
1097 wqe->alloc_cbfn = alloc_cbfn;
1098 wqe->alloc_cbarg = alloc_cbarg;
1099 wqe->caller = caller;
1100 wqe->bfa = bfa;
1101 wqe->nreq_sgles = nreq_sgles;
1102 wqe->nrsp_sgles = nrsp_sgles;
1103 wqe->req_sga_cbfn = req_sga_cbfn;
1104 wqe->req_sglen_cbfn = req_sglen_cbfn;
1105 wqe->rsp_sga_cbfn = rsp_sga_cbfn;
1106 wqe->rsp_sglen_cbfn = rsp_sglen_cbfn;
1108 if (req)
1109 list_add_tail(&wqe->qe, &mod->req_wait_q);
1110 else
1111 list_add_tail(&wqe->qe, &mod->rsp_wait_q);
1114 void
1115 bfa_fcxp_walloc_cancel(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe)
1117 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1119 WARN_ON(!bfa_q_is_on_q(&mod->req_wait_q, wqe) ||
1120 !bfa_q_is_on_q(&mod->rsp_wait_q, wqe));
1121 list_del(&wqe->qe);
1124 void
1125 bfa_fcxp_discard(struct bfa_fcxp_s *fcxp)
1128 * If waiting for room in request queue, cancel reqq wait
1129 * and free fcxp.
1131 if (fcxp->reqq_waiting) {
1132 fcxp->reqq_waiting = BFA_FALSE;
1133 bfa_reqq_wcancel(&fcxp->reqq_wqe);
1134 bfa_fcxp_free(fcxp);
1135 return;
1138 fcxp->send_cbfn = bfa_fcxp_null_comp;
1141 void
1142 bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
1144 switch (msg->mhdr.msg_id) {
1145 case BFI_FCXP_I2H_SEND_RSP:
1146 hal_fcxp_send_comp(bfa, (struct bfi_fcxp_send_rsp_s *) msg);
1147 break;
1149 default:
1150 bfa_trc(bfa, msg->mhdr.msg_id);
1151 WARN_ON(1);
1156 bfa_fcxp_get_maxrsp(struct bfa_s *bfa)
1158 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1160 return mod->rsp_pld_sz;
1163 void
1164 bfa_fcxp_res_recfg(struct bfa_s *bfa, u16 num_fcxp_fw)
1166 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1167 struct list_head *qe;
1168 int i;
1170 for (i = 0; i < (mod->num_fcxps - num_fcxp_fw); i++) {
1171 if (i < ((mod->num_fcxps - num_fcxp_fw) / 2)) {
1172 bfa_q_deq_tail(&mod->fcxp_req_free_q, &qe);
1173 list_add_tail(qe, &mod->fcxp_req_unused_q);
1174 } else {
1175 bfa_q_deq_tail(&mod->fcxp_rsp_free_q, &qe);
1176 list_add_tail(qe, &mod->fcxp_rsp_unused_q);
1182 * BFA LPS state machine functions
1186 * Init state -- no login
1188 static void
1189 bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event)
1191 bfa_trc(lps->bfa, lps->bfa_tag);
1192 bfa_trc(lps->bfa, event);
1194 switch (event) {
1195 case BFA_LPS_SM_LOGIN:
1196 if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1197 bfa_sm_set_state(lps, bfa_lps_sm_loginwait);
1198 bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1199 } else {
1200 bfa_sm_set_state(lps, bfa_lps_sm_login);
1201 bfa_lps_send_login(lps);
1204 if (lps->fdisc)
1205 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1206 BFA_PL_EID_LOGIN, 0, "FDISC Request");
1207 else
1208 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1209 BFA_PL_EID_LOGIN, 0, "FLOGI Request");
1210 break;
1212 case BFA_LPS_SM_LOGOUT:
1213 bfa_lps_logout_comp(lps);
1214 break;
1216 case BFA_LPS_SM_DELETE:
1217 bfa_lps_free(lps);
1218 break;
1220 case BFA_LPS_SM_RX_CVL:
1221 case BFA_LPS_SM_OFFLINE:
1222 break;
1224 case BFA_LPS_SM_FWRSP:
1226 * Could happen when fabric detects loopback and discards
1227 * the lps request. Fw will eventually sent out the timeout
1228 * Just ignore
1230 break;
1231 case BFA_LPS_SM_SET_N2N_PID:
1233 * When topology is set to loop, bfa_lps_set_n2n_pid() sends
1234 * this event. Ignore this event.
1236 break;
1238 default:
1239 bfa_sm_fault(lps->bfa, event);
1244 * login is in progress -- awaiting response from firmware
1246 static void
1247 bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
1249 bfa_trc(lps->bfa, lps->bfa_tag);
1250 bfa_trc(lps->bfa, event);
1252 switch (event) {
1253 case BFA_LPS_SM_FWRSP:
1254 if (lps->status == BFA_STATUS_OK) {
1255 bfa_sm_set_state(lps, bfa_lps_sm_online);
1256 if (lps->fdisc)
1257 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1258 BFA_PL_EID_LOGIN, 0, "FDISC Accept");
1259 else
1260 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1261 BFA_PL_EID_LOGIN, 0, "FLOGI Accept");
1262 /* If N2N, send the assigned PID to FW */
1263 bfa_trc(lps->bfa, lps->fport);
1264 bfa_trc(lps->bfa, lps->lp_pid);
1266 if (!lps->fport && lps->lp_pid)
1267 bfa_sm_send_event(lps, BFA_LPS_SM_SET_N2N_PID);
1268 } else {
1269 bfa_sm_set_state(lps, bfa_lps_sm_init);
1270 if (lps->fdisc)
1271 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1272 BFA_PL_EID_LOGIN, 0,
1273 "FDISC Fail (RJT or timeout)");
1274 else
1275 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1276 BFA_PL_EID_LOGIN, 0,
1277 "FLOGI Fail (RJT or timeout)");
1279 bfa_lps_login_comp(lps);
1280 break;
1282 case BFA_LPS_SM_OFFLINE:
1283 case BFA_LPS_SM_DELETE:
1284 bfa_sm_set_state(lps, bfa_lps_sm_init);
1285 break;
1287 case BFA_LPS_SM_SET_N2N_PID:
1288 bfa_trc(lps->bfa, lps->fport);
1289 bfa_trc(lps->bfa, lps->lp_pid);
1290 break;
1292 default:
1293 bfa_sm_fault(lps->bfa, event);
1298 * login pending - awaiting space in request queue
1300 static void
1301 bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1303 bfa_trc(lps->bfa, lps->bfa_tag);
1304 bfa_trc(lps->bfa, event);
1306 switch (event) {
1307 case BFA_LPS_SM_RESUME:
1308 bfa_sm_set_state(lps, bfa_lps_sm_login);
1309 bfa_lps_send_login(lps);
1310 break;
1312 case BFA_LPS_SM_OFFLINE:
1313 case BFA_LPS_SM_DELETE:
1314 bfa_sm_set_state(lps, bfa_lps_sm_init);
1315 bfa_reqq_wcancel(&lps->wqe);
1316 break;
1318 case BFA_LPS_SM_RX_CVL:
1320 * Login was not even sent out; so when getting out
1321 * of this state, it will appear like a login retry
1322 * after Clear virtual link
1324 break;
1326 default:
1327 bfa_sm_fault(lps->bfa, event);
1332 * login complete
1334 static void
1335 bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event)
1337 bfa_trc(lps->bfa, lps->bfa_tag);
1338 bfa_trc(lps->bfa, event);
1340 switch (event) {
1341 case BFA_LPS_SM_LOGOUT:
1342 if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1343 bfa_sm_set_state(lps, bfa_lps_sm_logowait);
1344 bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1345 } else {
1346 bfa_sm_set_state(lps, bfa_lps_sm_logout);
1347 bfa_lps_send_logout(lps);
1349 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1350 BFA_PL_EID_LOGO, 0, "Logout");
1351 break;
1353 case BFA_LPS_SM_RX_CVL:
1354 bfa_sm_set_state(lps, bfa_lps_sm_init);
1356 /* Let the vport module know about this event */
1357 bfa_lps_cvl_event(lps);
1358 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1359 BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
1360 break;
1362 case BFA_LPS_SM_SET_N2N_PID:
1363 if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1364 bfa_sm_set_state(lps, bfa_lps_sm_online_n2n_pid_wait);
1365 bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1366 } else
1367 bfa_lps_send_set_n2n_pid(lps);
1368 break;
1370 case BFA_LPS_SM_OFFLINE:
1371 case BFA_LPS_SM_DELETE:
1372 bfa_sm_set_state(lps, bfa_lps_sm_init);
1373 break;
1375 default:
1376 bfa_sm_fault(lps->bfa, event);
1381 * login complete
1383 static void
1384 bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1386 bfa_trc(lps->bfa, lps->bfa_tag);
1387 bfa_trc(lps->bfa, event);
1389 switch (event) {
1390 case BFA_LPS_SM_RESUME:
1391 bfa_sm_set_state(lps, bfa_lps_sm_online);
1392 bfa_lps_send_set_n2n_pid(lps);
1393 break;
1395 case BFA_LPS_SM_LOGOUT:
1396 bfa_sm_set_state(lps, bfa_lps_sm_logowait);
1397 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1398 BFA_PL_EID_LOGO, 0, "Logout");
1399 break;
1401 case BFA_LPS_SM_RX_CVL:
1402 bfa_sm_set_state(lps, bfa_lps_sm_init);
1403 bfa_reqq_wcancel(&lps->wqe);
1405 /* Let the vport module know about this event */
1406 bfa_lps_cvl_event(lps);
1407 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1408 BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
1409 break;
1411 case BFA_LPS_SM_OFFLINE:
1412 case BFA_LPS_SM_DELETE:
1413 bfa_sm_set_state(lps, bfa_lps_sm_init);
1414 bfa_reqq_wcancel(&lps->wqe);
1415 break;
1417 default:
1418 bfa_sm_fault(lps->bfa, event);
1423 * logout in progress - awaiting firmware response
1425 static void
1426 bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event)
1428 bfa_trc(lps->bfa, lps->bfa_tag);
1429 bfa_trc(lps->bfa, event);
1431 switch (event) {
1432 case BFA_LPS_SM_FWRSP:
1433 case BFA_LPS_SM_OFFLINE:
1434 bfa_sm_set_state(lps, bfa_lps_sm_init);
1435 bfa_lps_logout_comp(lps);
1436 break;
1438 case BFA_LPS_SM_DELETE:
1439 bfa_sm_set_state(lps, bfa_lps_sm_init);
1440 break;
1442 default:
1443 bfa_sm_fault(lps->bfa, event);
1448 * logout pending -- awaiting space in request queue
1450 static void
1451 bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1453 bfa_trc(lps->bfa, lps->bfa_tag);
1454 bfa_trc(lps->bfa, event);
1456 switch (event) {
1457 case BFA_LPS_SM_RESUME:
1458 bfa_sm_set_state(lps, bfa_lps_sm_logout);
1459 bfa_lps_send_logout(lps);
1460 break;
1462 case BFA_LPS_SM_OFFLINE:
1463 case BFA_LPS_SM_DELETE:
1464 bfa_sm_set_state(lps, bfa_lps_sm_init);
1465 bfa_reqq_wcancel(&lps->wqe);
1466 break;
1468 default:
1469 bfa_sm_fault(lps->bfa, event);
1476 * lps_pvt BFA LPS private functions
1480 * return memory requirement
1482 void
1483 bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
1484 struct bfa_s *bfa)
1486 struct bfa_mem_kva_s *lps_kva = BFA_MEM_LPS_KVA(bfa);
1488 if (cfg->drvcfg.min_cfg)
1489 bfa_mem_kva_setup(minfo, lps_kva,
1490 sizeof(struct bfa_lps_s) * BFA_LPS_MIN_LPORTS);
1491 else
1492 bfa_mem_kva_setup(minfo, lps_kva,
1493 sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS);
1497 * bfa module attach at initialization time
1499 void
1500 bfa_lps_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
1501 struct bfa_pcidev_s *pcidev)
1503 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1504 struct bfa_lps_s *lps;
1505 int i;
1507 mod->num_lps = BFA_LPS_MAX_LPORTS;
1508 if (cfg->drvcfg.min_cfg)
1509 mod->num_lps = BFA_LPS_MIN_LPORTS;
1510 else
1511 mod->num_lps = BFA_LPS_MAX_LPORTS;
1512 mod->lps_arr = lps = (struct bfa_lps_s *) bfa_mem_kva_curp(mod);
1514 bfa_mem_kva_curp(mod) += mod->num_lps * sizeof(struct bfa_lps_s);
1516 INIT_LIST_HEAD(&mod->lps_free_q);
1517 INIT_LIST_HEAD(&mod->lps_active_q);
1518 INIT_LIST_HEAD(&mod->lps_login_q);
1520 for (i = 0; i < mod->num_lps; i++, lps++) {
1521 lps->bfa = bfa;
1522 lps->bfa_tag = (u8) i;
1523 lps->reqq = BFA_REQQ_LPS;
1524 bfa_reqq_winit(&lps->wqe, bfa_lps_reqq_resume, lps);
1525 list_add_tail(&lps->qe, &mod->lps_free_q);
1530 * IOC in disabled state -- consider all lps offline
1532 void
1533 bfa_lps_iocdisable(struct bfa_s *bfa)
1535 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1536 struct bfa_lps_s *lps;
1537 struct list_head *qe, *qen;
1539 list_for_each_safe(qe, qen, &mod->lps_active_q) {
1540 lps = (struct bfa_lps_s *) qe;
1541 bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
1543 list_for_each_safe(qe, qen, &mod->lps_login_q) {
1544 lps = (struct bfa_lps_s *) qe;
1545 bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
1547 list_splice_tail_init(&mod->lps_login_q, &mod->lps_active_q);
1551 * Firmware login response
1553 static void
1554 bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp)
1556 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1557 struct bfa_lps_s *lps;
1559 WARN_ON(rsp->bfa_tag >= mod->num_lps);
1560 lps = BFA_LPS_FROM_TAG(mod, rsp->bfa_tag);
1562 lps->status = rsp->status;
1563 switch (rsp->status) {
1564 case BFA_STATUS_OK:
1565 lps->fw_tag = rsp->fw_tag;
1566 lps->fport = rsp->f_port;
1567 if (lps->fport)
1568 lps->lp_pid = rsp->lp_pid;
1569 lps->npiv_en = rsp->npiv_en;
1570 lps->pr_bbcred = be16_to_cpu(rsp->bb_credit);
1571 lps->pr_pwwn = rsp->port_name;
1572 lps->pr_nwwn = rsp->node_name;
1573 lps->auth_req = rsp->auth_req;
1574 lps->lp_mac = rsp->lp_mac;
1575 lps->brcd_switch = rsp->brcd_switch;
1576 lps->fcf_mac = rsp->fcf_mac;
1578 break;
1580 case BFA_STATUS_FABRIC_RJT:
1581 lps->lsrjt_rsn = rsp->lsrjt_rsn;
1582 lps->lsrjt_expl = rsp->lsrjt_expl;
1584 break;
1586 case BFA_STATUS_EPROTOCOL:
1587 lps->ext_status = rsp->ext_status;
1589 break;
1591 case BFA_STATUS_VPORT_MAX:
1592 if (rsp->ext_status)
1593 bfa_lps_no_res(lps, rsp->ext_status);
1594 break;
1596 default:
1597 /* Nothing to do with other status */
1598 break;
1601 list_del(&lps->qe);
1602 list_add_tail(&lps->qe, &mod->lps_active_q);
1603 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1606 static void
1607 bfa_lps_no_res(struct bfa_lps_s *first_lps, u8 count)
1609 struct bfa_s *bfa = first_lps->bfa;
1610 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1611 struct list_head *qe, *qe_next;
1612 struct bfa_lps_s *lps;
1614 bfa_trc(bfa, count);
1616 qe = bfa_q_next(first_lps);
1618 while (count && qe) {
1619 qe_next = bfa_q_next(qe);
1620 lps = (struct bfa_lps_s *)qe;
1621 bfa_trc(bfa, lps->bfa_tag);
1622 lps->status = first_lps->status;
1623 list_del(&lps->qe);
1624 list_add_tail(&lps->qe, &mod->lps_active_q);
1625 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1626 qe = qe_next;
1627 count--;
1632 * Firmware logout response
1634 static void
1635 bfa_lps_logout_rsp(struct bfa_s *bfa, struct bfi_lps_logout_rsp_s *rsp)
1637 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1638 struct bfa_lps_s *lps;
1640 WARN_ON(rsp->bfa_tag >= mod->num_lps);
1641 lps = BFA_LPS_FROM_TAG(mod, rsp->bfa_tag);
1643 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1647 * Firmware received a Clear virtual link request (for FCoE)
1649 static void
1650 bfa_lps_rx_cvl_event(struct bfa_s *bfa, struct bfi_lps_cvl_event_s *cvl)
1652 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1653 struct bfa_lps_s *lps;
1655 lps = BFA_LPS_FROM_TAG(mod, cvl->bfa_tag);
1657 bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL);
1661 * Space is available in request queue, resume queueing request to firmware.
1663 static void
1664 bfa_lps_reqq_resume(void *lps_arg)
1666 struct bfa_lps_s *lps = lps_arg;
1668 bfa_sm_send_event(lps, BFA_LPS_SM_RESUME);
1672 * lps is freed -- triggered by vport delete
1674 static void
1675 bfa_lps_free(struct bfa_lps_s *lps)
1677 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(lps->bfa);
1679 lps->lp_pid = 0;
1680 list_del(&lps->qe);
1681 list_add_tail(&lps->qe, &mod->lps_free_q);
1685 * send login request to firmware
1687 static void
1688 bfa_lps_send_login(struct bfa_lps_s *lps)
1690 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(lps->bfa);
1691 struct bfi_lps_login_req_s *m;
1693 m = bfa_reqq_next(lps->bfa, lps->reqq);
1694 WARN_ON(!m);
1696 bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGIN_REQ,
1697 bfa_fn_lpu(lps->bfa));
1699 m->bfa_tag = lps->bfa_tag;
1700 m->alpa = lps->alpa;
1701 m->pdu_size = cpu_to_be16(lps->pdusz);
1702 m->pwwn = lps->pwwn;
1703 m->nwwn = lps->nwwn;
1704 m->fdisc = lps->fdisc;
1705 m->auth_en = lps->auth_en;
1707 bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
1708 list_del(&lps->qe);
1709 list_add_tail(&lps->qe, &mod->lps_login_q);
1713 * send logout request to firmware
1715 static void
1716 bfa_lps_send_logout(struct bfa_lps_s *lps)
1718 struct bfi_lps_logout_req_s *m;
1720 m = bfa_reqq_next(lps->bfa, lps->reqq);
1721 WARN_ON(!m);
1723 bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGOUT_REQ,
1724 bfa_fn_lpu(lps->bfa));
1726 m->fw_tag = lps->fw_tag;
1727 m->port_name = lps->pwwn;
1728 bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
1732 * send n2n pid set request to firmware
1734 static void
1735 bfa_lps_send_set_n2n_pid(struct bfa_lps_s *lps)
1737 struct bfi_lps_n2n_pid_req_s *m;
1739 m = bfa_reqq_next(lps->bfa, lps->reqq);
1740 WARN_ON(!m);
1742 bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_N2N_PID_REQ,
1743 bfa_fn_lpu(lps->bfa));
1745 m->fw_tag = lps->fw_tag;
1746 m->lp_pid = lps->lp_pid;
1747 bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
1751 * Indirect login completion handler for non-fcs
1753 static void
1754 bfa_lps_login_comp_cb(void *arg, bfa_boolean_t complete)
1756 struct bfa_lps_s *lps = arg;
1758 if (!complete)
1759 return;
1761 if (lps->fdisc)
1762 bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
1763 else
1764 bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
1768 * Login completion handler -- direct call for fcs, queue for others
1770 static void
1771 bfa_lps_login_comp(struct bfa_lps_s *lps)
1773 if (!lps->bfa->fcs) {
1774 bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_login_comp_cb,
1775 lps);
1776 return;
1779 if (lps->fdisc)
1780 bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
1781 else
1782 bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
1786 * Indirect logout completion handler for non-fcs
1788 static void
1789 bfa_lps_logout_comp_cb(void *arg, bfa_boolean_t complete)
1791 struct bfa_lps_s *lps = arg;
1793 if (!complete)
1794 return;
1796 if (lps->fdisc)
1797 bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
1798 else
1799 bfa_cb_lps_flogo_comp(lps->bfa->bfad, lps->uarg);
1803 * Logout completion handler -- direct call for fcs, queue for others
1805 static void
1806 bfa_lps_logout_comp(struct bfa_lps_s *lps)
1808 if (!lps->bfa->fcs) {
1809 bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_logout_comp_cb,
1810 lps);
1811 return;
1813 if (lps->fdisc)
1814 bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
1818 * Clear virtual link completion handler for non-fcs
1820 static void
1821 bfa_lps_cvl_event_cb(void *arg, bfa_boolean_t complete)
1823 struct bfa_lps_s *lps = arg;
1825 if (!complete)
1826 return;
1828 /* Clear virtual link to base port will result in link down */
1829 if (lps->fdisc)
1830 bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
1834 * Received Clear virtual link event --direct call for fcs,
1835 * queue for others
1837 static void
1838 bfa_lps_cvl_event(struct bfa_lps_s *lps)
1840 if (!lps->bfa->fcs) {
1841 bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_cvl_event_cb,
1842 lps);
1843 return;
1846 /* Clear virtual link to base port will result in link down */
1847 if (lps->fdisc)
1848 bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
1854 * lps_public BFA LPS public functions
1858 bfa_lps_get_max_vport(struct bfa_s *bfa)
1860 if (bfa_ioc_devid(&bfa->ioc) == BFA_PCI_DEVICE_ID_CT)
1861 return BFA_LPS_MAX_VPORTS_SUPP_CT;
1862 else
1863 return BFA_LPS_MAX_VPORTS_SUPP_CB;
1867 * Allocate a lport srvice tag.
1869 struct bfa_lps_s *
1870 bfa_lps_alloc(struct bfa_s *bfa)
1872 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1873 struct bfa_lps_s *lps = NULL;
1875 bfa_q_deq(&mod->lps_free_q, &lps);
1877 if (lps == NULL)
1878 return NULL;
1880 list_add_tail(&lps->qe, &mod->lps_active_q);
1882 bfa_sm_set_state(lps, bfa_lps_sm_init);
1883 return lps;
1887 * Free lport service tag. This can be called anytime after an alloc.
1888 * No need to wait for any pending login/logout completions.
1890 void
1891 bfa_lps_delete(struct bfa_lps_s *lps)
1893 bfa_sm_send_event(lps, BFA_LPS_SM_DELETE);
1897 * Initiate a lport login.
1899 void
1900 bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz,
1901 wwn_t pwwn, wwn_t nwwn, bfa_boolean_t auth_en)
1903 lps->uarg = uarg;
1904 lps->alpa = alpa;
1905 lps->pdusz = pdusz;
1906 lps->pwwn = pwwn;
1907 lps->nwwn = nwwn;
1908 lps->fdisc = BFA_FALSE;
1909 lps->auth_en = auth_en;
1910 bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1914 * Initiate a lport fdisc login.
1916 void
1917 bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz, wwn_t pwwn,
1918 wwn_t nwwn)
1920 lps->uarg = uarg;
1921 lps->alpa = 0;
1922 lps->pdusz = pdusz;
1923 lps->pwwn = pwwn;
1924 lps->nwwn = nwwn;
1925 lps->fdisc = BFA_TRUE;
1926 lps->auth_en = BFA_FALSE;
1927 bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1932 * Initiate a lport FDSIC logout.
1934 void
1935 bfa_lps_fdisclogo(struct bfa_lps_s *lps)
1937 bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
1941 bfa_lps_get_fwtag(struct bfa_s *bfa, u8 lp_tag)
1943 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1945 return BFA_LPS_FROM_TAG(mod, lp_tag)->fw_tag;
1949 * Return lport services tag given the pid
1952 bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid)
1954 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1955 struct bfa_lps_s *lps;
1956 int i;
1958 for (i = 0, lps = mod->lps_arr; i < mod->num_lps; i++, lps++) {
1959 if (lps->lp_pid == pid)
1960 return lps->bfa_tag;
1963 /* Return base port tag anyway */
1964 return 0;
1969 * return port id assigned to the base lport
1972 bfa_lps_get_base_pid(struct bfa_s *bfa)
1974 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1976 return BFA_LPS_FROM_TAG(mod, 0)->lp_pid;
1980 * Set PID in case of n2n (which is assigned during PLOGI)
1982 void
1983 bfa_lps_set_n2n_pid(struct bfa_lps_s *lps, uint32_t n2n_pid)
1985 bfa_trc(lps->bfa, lps->bfa_tag);
1986 bfa_trc(lps->bfa, n2n_pid);
1988 lps->lp_pid = n2n_pid;
1989 bfa_sm_send_event(lps, BFA_LPS_SM_SET_N2N_PID);
1993 * LPS firmware message class handler.
1995 void
1996 bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
1998 union bfi_lps_i2h_msg_u msg;
2000 bfa_trc(bfa, m->mhdr.msg_id);
2001 msg.msg = m;
2003 switch (m->mhdr.msg_id) {
2004 case BFI_LPS_I2H_LOGIN_RSP:
2005 bfa_lps_login_rsp(bfa, msg.login_rsp);
2006 break;
2008 case BFI_LPS_I2H_LOGOUT_RSP:
2009 bfa_lps_logout_rsp(bfa, msg.logout_rsp);
2010 break;
2012 case BFI_LPS_I2H_CVL_EVENT:
2013 bfa_lps_rx_cvl_event(bfa, msg.cvl_event);
2014 break;
2016 default:
2017 bfa_trc(bfa, m->mhdr.msg_id);
2018 WARN_ON(1);
2022 static void
2023 bfa_fcport_aen_post(struct bfa_fcport_s *fcport, enum bfa_port_aen_event event)
2025 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2026 struct bfa_aen_entry_s *aen_entry;
2028 bfad_get_aen_entry(bfad, aen_entry);
2029 if (!aen_entry)
2030 return;
2032 aen_entry->aen_data.port.ioc_type = bfa_get_type(fcport->bfa);
2033 aen_entry->aen_data.port.pwwn = fcport->pwwn;
2035 /* Send the AEN notification */
2036 bfad_im_post_vendor_event(aen_entry, bfad, ++fcport->bfa->bfa_aen_seq,
2037 BFA_AEN_CAT_PORT, event);
2041 * FC PORT state machine functions
2043 static void
2044 bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
2045 enum bfa_fcport_sm_event event)
2047 bfa_trc(fcport->bfa, event);
2049 switch (event) {
2050 case BFA_FCPORT_SM_START:
2052 * Start event after IOC is configured and BFA is started.
2054 fcport->use_flash_cfg = BFA_TRUE;
2056 if (bfa_fcport_send_enable(fcport)) {
2057 bfa_trc(fcport->bfa, BFA_TRUE);
2058 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2059 } else {
2060 bfa_trc(fcport->bfa, BFA_FALSE);
2061 bfa_sm_set_state(fcport,
2062 bfa_fcport_sm_enabling_qwait);
2064 break;
2066 case BFA_FCPORT_SM_ENABLE:
2068 * Port is persistently configured to be in enabled state. Do
2069 * not change state. Port enabling is done when START event is
2070 * received.
2072 break;
2074 case BFA_FCPORT_SM_DISABLE:
2076 * If a port is persistently configured to be disabled, the
2077 * first event will a port disable request.
2079 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2080 break;
2082 case BFA_FCPORT_SM_HWFAIL:
2083 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2084 break;
2086 default:
2087 bfa_sm_fault(fcport->bfa, event);
2091 static void
2092 bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
2093 enum bfa_fcport_sm_event event)
2095 char pwwn_buf[BFA_STRING_32];
2096 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2097 bfa_trc(fcport->bfa, event);
2099 switch (event) {
2100 case BFA_FCPORT_SM_QRESUME:
2101 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2102 bfa_fcport_send_enable(fcport);
2103 break;
2105 case BFA_FCPORT_SM_STOP:
2106 bfa_reqq_wcancel(&fcport->reqq_wait);
2107 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2108 break;
2110 case BFA_FCPORT_SM_ENABLE:
2112 * Already enable is in progress.
2114 break;
2116 case BFA_FCPORT_SM_DISABLE:
2118 * Just send disable request to firmware when room becomes
2119 * available in request queue.
2121 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2122 bfa_reqq_wcancel(&fcport->reqq_wait);
2123 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2124 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2125 wwn2str(pwwn_buf, fcport->pwwn);
2126 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2127 "Base port disabled: WWN = %s\n", pwwn_buf);
2128 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2129 break;
2131 case BFA_FCPORT_SM_LINKUP:
2132 case BFA_FCPORT_SM_LINKDOWN:
2134 * Possible to get link events when doing back-to-back
2135 * enable/disables.
2137 break;
2139 case BFA_FCPORT_SM_HWFAIL:
2140 bfa_reqq_wcancel(&fcport->reqq_wait);
2141 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2142 break;
2144 case BFA_FCPORT_SM_FAA_MISCONFIG:
2145 bfa_fcport_reset_linkinfo(fcport);
2146 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2147 bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
2148 break;
2150 default:
2151 bfa_sm_fault(fcport->bfa, event);
2155 static void
2156 bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
2157 enum bfa_fcport_sm_event event)
2159 char pwwn_buf[BFA_STRING_32];
2160 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2161 bfa_trc(fcport->bfa, event);
2163 switch (event) {
2164 case BFA_FCPORT_SM_FWRSP:
2165 case BFA_FCPORT_SM_LINKDOWN:
2166 bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
2167 break;
2169 case BFA_FCPORT_SM_LINKUP:
2170 bfa_fcport_update_linkinfo(fcport);
2171 bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
2173 WARN_ON(!fcport->event_cbfn);
2174 bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
2175 break;
2177 case BFA_FCPORT_SM_ENABLE:
2179 * Already being enabled.
2181 break;
2183 case BFA_FCPORT_SM_DISABLE:
2184 if (bfa_fcport_send_disable(fcport))
2185 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2186 else
2187 bfa_sm_set_state(fcport,
2188 bfa_fcport_sm_disabling_qwait);
2190 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2191 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2192 wwn2str(pwwn_buf, fcport->pwwn);
2193 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2194 "Base port disabled: WWN = %s\n", pwwn_buf);
2195 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2196 break;
2198 case BFA_FCPORT_SM_STOP:
2199 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2200 break;
2202 case BFA_FCPORT_SM_HWFAIL:
2203 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2204 break;
2206 case BFA_FCPORT_SM_FAA_MISCONFIG:
2207 bfa_fcport_reset_linkinfo(fcport);
2208 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2209 bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
2210 break;
2212 default:
2213 bfa_sm_fault(fcport->bfa, event);
2217 static void
2218 bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
2219 enum bfa_fcport_sm_event event)
2221 struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
2222 char pwwn_buf[BFA_STRING_32];
2223 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2225 bfa_trc(fcport->bfa, event);
2227 switch (event) {
2228 case BFA_FCPORT_SM_LINKUP:
2229 bfa_fcport_update_linkinfo(fcport);
2230 bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
2231 WARN_ON(!fcport->event_cbfn);
2232 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2233 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup");
2234 if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
2236 bfa_trc(fcport->bfa,
2237 pevent->link_state.attr.vc_fcf.fcf.fipenabled);
2238 bfa_trc(fcport->bfa,
2239 pevent->link_state.attr.vc_fcf.fcf.fipfailed);
2241 if (pevent->link_state.attr.vc_fcf.fcf.fipfailed)
2242 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2243 BFA_PL_EID_FIP_FCF_DISC, 0,
2244 "FIP FCF Discovery Failed");
2245 else
2246 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2247 BFA_PL_EID_FIP_FCF_DISC, 0,
2248 "FIP FCF Discovered");
2251 bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
2252 wwn2str(pwwn_buf, fcport->pwwn);
2253 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2254 "Base port online: WWN = %s\n", pwwn_buf);
2255 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ONLINE);
2257 /* If QoS is enabled and it is not online, send AEN */
2258 if (fcport->cfg.qos_enabled &&
2259 fcport->qos_attr.state != BFA_QOS_ONLINE)
2260 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_QOS_NEG);
2261 break;
2263 case BFA_FCPORT_SM_LINKDOWN:
2265 * Possible to get link down event.
2267 break;
2269 case BFA_FCPORT_SM_ENABLE:
2271 * Already enabled.
2273 break;
2275 case BFA_FCPORT_SM_DISABLE:
2276 if (bfa_fcport_send_disable(fcport))
2277 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2278 else
2279 bfa_sm_set_state(fcport,
2280 bfa_fcport_sm_disabling_qwait);
2282 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2283 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2284 wwn2str(pwwn_buf, fcport->pwwn);
2285 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2286 "Base port disabled: WWN = %s\n", pwwn_buf);
2287 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2288 break;
2290 case BFA_FCPORT_SM_STOP:
2291 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2292 break;
2294 case BFA_FCPORT_SM_HWFAIL:
2295 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2296 break;
2298 case BFA_FCPORT_SM_FAA_MISCONFIG:
2299 bfa_fcport_reset_linkinfo(fcport);
2300 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2301 bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
2302 break;
2304 default:
2305 bfa_sm_fault(fcport->bfa, event);
2309 static void
2310 bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
2311 enum bfa_fcport_sm_event event)
2313 char pwwn_buf[BFA_STRING_32];
2314 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2316 bfa_trc(fcport->bfa, event);
2318 switch (event) {
2319 case BFA_FCPORT_SM_ENABLE:
2321 * Already enabled.
2323 break;
2325 case BFA_FCPORT_SM_DISABLE:
2326 if (bfa_fcport_send_disable(fcport))
2327 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2328 else
2329 bfa_sm_set_state(fcport,
2330 bfa_fcport_sm_disabling_qwait);
2332 bfa_fcport_reset_linkinfo(fcport);
2333 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2334 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2335 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2336 wwn2str(pwwn_buf, fcport->pwwn);
2337 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2338 "Base port offline: WWN = %s\n", pwwn_buf);
2339 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
2340 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2341 "Base port disabled: WWN = %s\n", pwwn_buf);
2342 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2343 break;
2345 case BFA_FCPORT_SM_LINKDOWN:
2346 bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
2347 bfa_fcport_reset_linkinfo(fcport);
2348 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2349 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2350 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown");
2351 wwn2str(pwwn_buf, fcport->pwwn);
2352 if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
2353 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2354 "Base port offline: WWN = %s\n", pwwn_buf);
2355 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
2356 } else {
2357 BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2358 "Base port (WWN = %s) "
2359 "lost fabric connectivity\n", pwwn_buf);
2360 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2362 break;
2364 case BFA_FCPORT_SM_STOP:
2365 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2366 bfa_fcport_reset_linkinfo(fcport);
2367 wwn2str(pwwn_buf, fcport->pwwn);
2368 if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
2369 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2370 "Base port offline: WWN = %s\n", pwwn_buf);
2371 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
2372 } else {
2373 BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2374 "Base port (WWN = %s) "
2375 "lost fabric connectivity\n", pwwn_buf);
2376 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2378 break;
2380 case BFA_FCPORT_SM_HWFAIL:
2381 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2382 bfa_fcport_reset_linkinfo(fcport);
2383 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2384 wwn2str(pwwn_buf, fcport->pwwn);
2385 if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
2386 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2387 "Base port offline: WWN = %s\n", pwwn_buf);
2388 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
2389 } else {
2390 BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2391 "Base port (WWN = %s) "
2392 "lost fabric connectivity\n", pwwn_buf);
2393 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2395 break;
2397 case BFA_FCPORT_SM_FAA_MISCONFIG:
2398 bfa_fcport_reset_linkinfo(fcport);
2399 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2400 bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
2401 break;
2403 default:
2404 bfa_sm_fault(fcport->bfa, event);
2408 static void
2409 bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
2410 enum bfa_fcport_sm_event event)
2412 bfa_trc(fcport->bfa, event);
2414 switch (event) {
2415 case BFA_FCPORT_SM_QRESUME:
2416 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2417 bfa_fcport_send_disable(fcport);
2418 break;
2420 case BFA_FCPORT_SM_STOP:
2421 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2422 bfa_reqq_wcancel(&fcport->reqq_wait);
2423 break;
2425 case BFA_FCPORT_SM_ENABLE:
2426 bfa_sm_set_state(fcport, bfa_fcport_sm_toggling_qwait);
2427 break;
2429 case BFA_FCPORT_SM_DISABLE:
2431 * Already being disabled.
2433 break;
2435 case BFA_FCPORT_SM_LINKUP:
2436 case BFA_FCPORT_SM_LINKDOWN:
2438 * Possible to get link events when doing back-to-back
2439 * enable/disables.
2441 break;
2443 case BFA_FCPORT_SM_HWFAIL:
2444 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2445 bfa_reqq_wcancel(&fcport->reqq_wait);
2446 break;
2448 case BFA_FCPORT_SM_FAA_MISCONFIG:
2449 bfa_fcport_reset_linkinfo(fcport);
2450 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2451 bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
2452 break;
2454 default:
2455 bfa_sm_fault(fcport->bfa, event);
2459 static void
2460 bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
2461 enum bfa_fcport_sm_event event)
2463 bfa_trc(fcport->bfa, event);
2465 switch (event) {
2466 case BFA_FCPORT_SM_QRESUME:
2467 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2468 bfa_fcport_send_disable(fcport);
2469 if (bfa_fcport_send_enable(fcport))
2470 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2471 else
2472 bfa_sm_set_state(fcport,
2473 bfa_fcport_sm_enabling_qwait);
2474 break;
2476 case BFA_FCPORT_SM_STOP:
2477 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2478 bfa_reqq_wcancel(&fcport->reqq_wait);
2479 break;
2481 case BFA_FCPORT_SM_ENABLE:
2482 break;
2484 case BFA_FCPORT_SM_DISABLE:
2485 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
2486 break;
2488 case BFA_FCPORT_SM_LINKUP:
2489 case BFA_FCPORT_SM_LINKDOWN:
2491 * Possible to get link events when doing back-to-back
2492 * enable/disables.
2494 break;
2496 case BFA_FCPORT_SM_HWFAIL:
2497 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2498 bfa_reqq_wcancel(&fcport->reqq_wait);
2499 break;
2501 default:
2502 bfa_sm_fault(fcport->bfa, event);
2506 static void
2507 bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
2508 enum bfa_fcport_sm_event event)
2510 char pwwn_buf[BFA_STRING_32];
2511 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2512 bfa_trc(fcport->bfa, event);
2514 switch (event) {
2515 case BFA_FCPORT_SM_FWRSP:
2516 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2517 break;
2519 case BFA_FCPORT_SM_DISABLE:
2521 * Already being disabled.
2523 break;
2525 case BFA_FCPORT_SM_ENABLE:
2526 if (bfa_fcport_send_enable(fcport))
2527 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2528 else
2529 bfa_sm_set_state(fcport,
2530 bfa_fcport_sm_enabling_qwait);
2532 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2533 BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
2534 wwn2str(pwwn_buf, fcport->pwwn);
2535 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2536 "Base port enabled: WWN = %s\n", pwwn_buf);
2537 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
2538 break;
2540 case BFA_FCPORT_SM_STOP:
2541 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2542 break;
2544 case BFA_FCPORT_SM_LINKUP:
2545 case BFA_FCPORT_SM_LINKDOWN:
2547 * Possible to get link events when doing back-to-back
2548 * enable/disables.
2550 break;
2552 case BFA_FCPORT_SM_HWFAIL:
2553 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2554 break;
2556 default:
2557 bfa_sm_fault(fcport->bfa, event);
2561 static void
2562 bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
2563 enum bfa_fcport_sm_event event)
2565 char pwwn_buf[BFA_STRING_32];
2566 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2567 bfa_trc(fcport->bfa, event);
2569 switch (event) {
2570 case BFA_FCPORT_SM_START:
2572 * Ignore start event for a port that is disabled.
2574 break;
2576 case BFA_FCPORT_SM_STOP:
2577 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2578 break;
2580 case BFA_FCPORT_SM_ENABLE:
2581 if (bfa_fcport_send_enable(fcport))
2582 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2583 else
2584 bfa_sm_set_state(fcport,
2585 bfa_fcport_sm_enabling_qwait);
2587 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2588 BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
2589 wwn2str(pwwn_buf, fcport->pwwn);
2590 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2591 "Base port enabled: WWN = %s\n", pwwn_buf);
2592 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
2593 break;
2595 case BFA_FCPORT_SM_DISABLE:
2597 * Already disabled.
2599 break;
2601 case BFA_FCPORT_SM_HWFAIL:
2602 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2603 break;
2605 case BFA_FCPORT_SM_DPORTENABLE:
2606 bfa_sm_set_state(fcport, bfa_fcport_sm_dport);
2607 break;
2609 case BFA_FCPORT_SM_DDPORTENABLE:
2610 bfa_sm_set_state(fcport, bfa_fcport_sm_ddport);
2611 break;
2613 default:
2614 bfa_sm_fault(fcport->bfa, event);
2618 static void
2619 bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
2620 enum bfa_fcport_sm_event event)
2622 bfa_trc(fcport->bfa, event);
2624 switch (event) {
2625 case BFA_FCPORT_SM_START:
2626 if (bfa_fcport_send_enable(fcport))
2627 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2628 else
2629 bfa_sm_set_state(fcport,
2630 bfa_fcport_sm_enabling_qwait);
2631 break;
2633 default:
2635 * Ignore all other events.
2642 * Port is enabled. IOC is down/failed.
2644 static void
2645 bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
2646 enum bfa_fcport_sm_event event)
2648 bfa_trc(fcport->bfa, event);
2650 switch (event) {
2651 case BFA_FCPORT_SM_START:
2652 if (bfa_fcport_send_enable(fcport))
2653 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2654 else
2655 bfa_sm_set_state(fcport,
2656 bfa_fcport_sm_enabling_qwait);
2657 break;
2659 default:
2661 * Ignore all events.
2668 * Port is disabled. IOC is down/failed.
2670 static void
2671 bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
2672 enum bfa_fcport_sm_event event)
2674 bfa_trc(fcport->bfa, event);
2676 switch (event) {
2677 case BFA_FCPORT_SM_START:
2678 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2679 break;
2681 case BFA_FCPORT_SM_ENABLE:
2682 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2683 break;
2685 default:
2687 * Ignore all events.
2693 static void
2694 bfa_fcport_sm_dport(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event)
2696 bfa_trc(fcport->bfa, event);
2698 switch (event) {
2699 case BFA_FCPORT_SM_DPORTENABLE:
2700 case BFA_FCPORT_SM_DISABLE:
2701 case BFA_FCPORT_SM_ENABLE:
2702 case BFA_FCPORT_SM_START:
2704 * Ignore event for a port that is dport
2706 break;
2708 case BFA_FCPORT_SM_STOP:
2709 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2710 break;
2712 case BFA_FCPORT_SM_HWFAIL:
2713 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2714 break;
2716 case BFA_FCPORT_SM_DPORTDISABLE:
2717 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2718 break;
2720 default:
2721 bfa_sm_fault(fcport->bfa, event);
2725 static void
2726 bfa_fcport_sm_ddport(struct bfa_fcport_s *fcport,
2727 enum bfa_fcport_sm_event event)
2729 bfa_trc(fcport->bfa, event);
2731 switch (event) {
2732 case BFA_FCPORT_SM_DISABLE:
2733 case BFA_FCPORT_SM_DDPORTDISABLE:
2734 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2735 break;
2737 case BFA_FCPORT_SM_DPORTENABLE:
2738 case BFA_FCPORT_SM_DPORTDISABLE:
2739 case BFA_FCPORT_SM_ENABLE:
2740 case BFA_FCPORT_SM_START:
2742 * Ignore event for a port that is ddport
2744 break;
2746 case BFA_FCPORT_SM_STOP:
2747 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2748 break;
2750 case BFA_FCPORT_SM_HWFAIL:
2751 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2752 break;
2754 default:
2755 bfa_sm_fault(fcport->bfa, event);
2759 static void
2760 bfa_fcport_sm_faa_misconfig(struct bfa_fcport_s *fcport,
2761 enum bfa_fcport_sm_event event)
2763 bfa_trc(fcport->bfa, event);
2765 switch (event) {
2766 case BFA_FCPORT_SM_DPORTENABLE:
2767 case BFA_FCPORT_SM_ENABLE:
2768 case BFA_FCPORT_SM_START:
2770 * Ignore event for a port as there is FAA misconfig
2772 break;
2774 case BFA_FCPORT_SM_DISABLE:
2775 if (bfa_fcport_send_disable(fcport))
2776 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2777 else
2778 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
2780 bfa_fcport_reset_linkinfo(fcport);
2781 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2782 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2783 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2784 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2785 break;
2787 case BFA_FCPORT_SM_STOP:
2788 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2789 break;
2791 case BFA_FCPORT_SM_HWFAIL:
2792 bfa_fcport_reset_linkinfo(fcport);
2793 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2794 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2795 break;
2797 default:
2798 bfa_sm_fault(fcport->bfa, event);
2803 * Link state is down
2805 static void
2806 bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
2807 enum bfa_fcport_ln_sm_event event)
2809 bfa_trc(ln->fcport->bfa, event);
2811 switch (event) {
2812 case BFA_FCPORT_LN_SM_LINKUP:
2813 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
2814 bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
2815 break;
2817 default:
2818 bfa_sm_fault(ln->fcport->bfa, event);
2823 * Link state is waiting for down notification
2825 static void
2826 bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
2827 enum bfa_fcport_ln_sm_event event)
2829 bfa_trc(ln->fcport->bfa, event);
2831 switch (event) {
2832 case BFA_FCPORT_LN_SM_LINKUP:
2833 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
2834 break;
2836 case BFA_FCPORT_LN_SM_NOTIFICATION:
2837 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
2838 break;
2840 default:
2841 bfa_sm_fault(ln->fcport->bfa, event);
2846 * Link state is waiting for down notification and there is a pending up
2848 static void
2849 bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
2850 enum bfa_fcport_ln_sm_event event)
2852 bfa_trc(ln->fcport->bfa, event);
2854 switch (event) {
2855 case BFA_FCPORT_LN_SM_LINKDOWN:
2856 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2857 break;
2859 case BFA_FCPORT_LN_SM_NOTIFICATION:
2860 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
2861 bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
2862 break;
2864 default:
2865 bfa_sm_fault(ln->fcport->bfa, event);
2870 * Link state is up
2872 static void
2873 bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
2874 enum bfa_fcport_ln_sm_event event)
2876 bfa_trc(ln->fcport->bfa, event);
2878 switch (event) {
2879 case BFA_FCPORT_LN_SM_LINKDOWN:
2880 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2881 bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2882 break;
2884 default:
2885 bfa_sm_fault(ln->fcport->bfa, event);
2890 * Link state is waiting for up notification
2892 static void
2893 bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
2894 enum bfa_fcport_ln_sm_event event)
2896 bfa_trc(ln->fcport->bfa, event);
2898 switch (event) {
2899 case BFA_FCPORT_LN_SM_LINKDOWN:
2900 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
2901 break;
2903 case BFA_FCPORT_LN_SM_NOTIFICATION:
2904 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up);
2905 break;
2907 default:
2908 bfa_sm_fault(ln->fcport->bfa, event);
2913 * Link state is waiting for up notification and there is a pending down
2915 static void
2916 bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
2917 enum bfa_fcport_ln_sm_event event)
2919 bfa_trc(ln->fcport->bfa, event);
2921 switch (event) {
2922 case BFA_FCPORT_LN_SM_LINKUP:
2923 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_up_nf);
2924 break;
2926 case BFA_FCPORT_LN_SM_NOTIFICATION:
2927 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2928 bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2929 break;
2931 default:
2932 bfa_sm_fault(ln->fcport->bfa, event);
2937 * Link state is waiting for up notification and there are pending down and up
2939 static void
2940 bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
2941 enum bfa_fcport_ln_sm_event event)
2943 bfa_trc(ln->fcport->bfa, event);
2945 switch (event) {
2946 case BFA_FCPORT_LN_SM_LINKDOWN:
2947 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
2948 break;
2950 case BFA_FCPORT_LN_SM_NOTIFICATION:
2951 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
2952 bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2953 break;
2955 default:
2956 bfa_sm_fault(ln->fcport->bfa, event);
2960 static void
2961 __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete)
2963 struct bfa_fcport_ln_s *ln = cbarg;
2965 if (complete)
2966 ln->fcport->event_cbfn(ln->fcport->event_cbarg, ln->ln_event);
2967 else
2968 bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
2972 * Send SCN notification to upper layers.
2973 * trunk - false if caller is fcport to ignore fcport event in trunked mode
2975 static void
2976 bfa_fcport_scn(struct bfa_fcport_s *fcport, enum bfa_port_linkstate event,
2977 bfa_boolean_t trunk)
2979 if (fcport->cfg.trunked && !trunk)
2980 return;
2982 switch (event) {
2983 case BFA_PORT_LINKUP:
2984 bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKUP);
2985 break;
2986 case BFA_PORT_LINKDOWN:
2987 bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKDOWN);
2988 break;
2989 default:
2990 WARN_ON(1);
2994 static void
2995 bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, enum bfa_port_linkstate event)
2997 struct bfa_fcport_s *fcport = ln->fcport;
2999 if (fcport->bfa->fcs) {
3000 fcport->event_cbfn(fcport->event_cbarg, event);
3001 bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
3002 } else {
3003 ln->ln_event = event;
3004 bfa_cb_queue(fcport->bfa, &ln->ln_qe,
3005 __bfa_cb_fcport_event, ln);
3009 #define FCPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_fcport_stats_u), \
3010 BFA_CACHELINE_SZ))
3012 void
3013 bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
3014 struct bfa_s *bfa)
3016 struct bfa_mem_dma_s *fcport_dma = BFA_MEM_FCPORT_DMA(bfa);
3018 bfa_mem_dma_setup(minfo, fcport_dma, FCPORT_STATS_DMA_SZ);
3021 static void
3022 bfa_fcport_qresume(void *cbarg)
3024 struct bfa_fcport_s *fcport = cbarg;
3026 bfa_sm_send_event(fcport, BFA_FCPORT_SM_QRESUME);
3029 static void
3030 bfa_fcport_mem_claim(struct bfa_fcport_s *fcport)
3032 struct bfa_mem_dma_s *fcport_dma = &fcport->fcport_dma;
3034 fcport->stats_kva = bfa_mem_dma_virt(fcport_dma);
3035 fcport->stats_pa = bfa_mem_dma_phys(fcport_dma);
3036 fcport->stats = (union bfa_fcport_stats_u *)
3037 bfa_mem_dma_virt(fcport_dma);
3041 * Memory initialization.
3043 void
3044 bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
3045 struct bfa_pcidev_s *pcidev)
3047 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3048 struct bfa_port_cfg_s *port_cfg = &fcport->cfg;
3049 struct bfa_fcport_ln_s *ln = &fcport->ln;
3050 struct timeval tv;
3052 fcport->bfa = bfa;
3053 ln->fcport = fcport;
3055 bfa_fcport_mem_claim(fcport);
3057 bfa_sm_set_state(fcport, bfa_fcport_sm_uninit);
3058 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
3061 * initialize time stamp for stats reset
3063 do_gettimeofday(&tv);
3064 fcport->stats_reset_time = tv.tv_sec;
3065 fcport->stats_dma_ready = BFA_FALSE;
3068 * initialize and set default configuration
3070 port_cfg->topology = BFA_PORT_TOPOLOGY_P2P;
3071 port_cfg->speed = BFA_PORT_SPEED_AUTO;
3072 port_cfg->trunked = BFA_FALSE;
3073 port_cfg->maxfrsize = 0;
3075 port_cfg->trl_def_speed = BFA_PORT_SPEED_1GBPS;
3076 port_cfg->qos_bw.high = BFA_QOS_BW_HIGH;
3077 port_cfg->qos_bw.med = BFA_QOS_BW_MED;
3078 port_cfg->qos_bw.low = BFA_QOS_BW_LOW;
3080 fcport->fec_state = BFA_FEC_OFFLINE;
3082 INIT_LIST_HEAD(&fcport->stats_pending_q);
3083 INIT_LIST_HEAD(&fcport->statsclr_pending_q);
3085 bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport);
3088 void
3089 bfa_fcport_start(struct bfa_s *bfa)
3091 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_START);
3095 * Called when IOC failure is detected.
3097 void
3098 bfa_fcport_iocdisable(struct bfa_s *bfa)
3100 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3102 bfa_sm_send_event(fcport, BFA_FCPORT_SM_HWFAIL);
3103 bfa_trunk_iocdisable(bfa);
3107 * Update loop info in fcport for SCN online
3109 static void
3110 bfa_fcport_update_loop_info(struct bfa_fcport_s *fcport,
3111 struct bfa_fcport_loop_info_s *loop_info)
3113 fcport->myalpa = loop_info->myalpa;
3114 fcport->alpabm_valid =
3115 loop_info->alpabm_val;
3116 memcpy(fcport->alpabm.alpa_bm,
3117 loop_info->alpabm.alpa_bm,
3118 sizeof(struct fc_alpabm_s));
3121 static void
3122 bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport)
3124 struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
3125 struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
3127 fcport->speed = pevent->link_state.speed;
3128 fcport->topology = pevent->link_state.topology;
3130 if (fcport->topology == BFA_PORT_TOPOLOGY_LOOP) {
3131 bfa_fcport_update_loop_info(fcport,
3132 &pevent->link_state.attr.loop_info);
3133 return;
3136 /* QoS Details */
3137 fcport->qos_attr = pevent->link_state.qos_attr;
3138 fcport->qos_vc_attr = pevent->link_state.attr.vc_fcf.qos_vc_attr;
3140 if (fcport->cfg.bb_cr_enabled)
3141 fcport->bbcr_attr = pevent->link_state.attr.bbcr_attr;
3143 fcport->fec_state = pevent->link_state.fec_state;
3146 * update trunk state if applicable
3148 if (!fcport->cfg.trunked)
3149 trunk->attr.state = BFA_TRUNK_DISABLED;
3151 /* update FCoE specific */
3152 fcport->fcoe_vlan =
3153 be16_to_cpu(pevent->link_state.attr.vc_fcf.fcf.vlan);
3155 bfa_trc(fcport->bfa, fcport->speed);
3156 bfa_trc(fcport->bfa, fcport->topology);
3159 static void
3160 bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport)
3162 fcport->speed = BFA_PORT_SPEED_UNKNOWN;
3163 fcport->topology = BFA_PORT_TOPOLOGY_NONE;
3164 fcport->fec_state = BFA_FEC_OFFLINE;
3168 * Send port enable message to firmware.
3170 static bfa_boolean_t
3171 bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
3173 struct bfi_fcport_enable_req_s *m;
3176 * Increment message tag before queue check, so that responses to old
3177 * requests are discarded.
3179 fcport->msgtag++;
3182 * check for room in queue to send request now
3184 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3185 if (!m) {
3186 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3187 &fcport->reqq_wait);
3188 return BFA_FALSE;
3191 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_ENABLE_REQ,
3192 bfa_fn_lpu(fcport->bfa));
3193 m->nwwn = fcport->nwwn;
3194 m->pwwn = fcport->pwwn;
3195 m->port_cfg = fcport->cfg;
3196 m->msgtag = fcport->msgtag;
3197 m->port_cfg.maxfrsize = cpu_to_be16(fcport->cfg.maxfrsize);
3198 m->use_flash_cfg = fcport->use_flash_cfg;
3199 bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa);
3200 bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo);
3201 bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi);
3204 * queue I/O message to firmware
3206 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh);
3207 return BFA_TRUE;
3211 * Send port disable message to firmware.
3213 static bfa_boolean_t
3214 bfa_fcport_send_disable(struct bfa_fcport_s *fcport)
3216 struct bfi_fcport_req_s *m;
3219 * Increment message tag before queue check, so that responses to old
3220 * requests are discarded.
3222 fcport->msgtag++;
3225 * check for room in queue to send request now
3227 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3228 if (!m) {
3229 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3230 &fcport->reqq_wait);
3231 return BFA_FALSE;
3234 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_DISABLE_REQ,
3235 bfa_fn_lpu(fcport->bfa));
3236 m->msgtag = fcport->msgtag;
3239 * queue I/O message to firmware
3241 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh);
3243 return BFA_TRUE;
3246 static void
3247 bfa_fcport_set_wwns(struct bfa_fcport_s *fcport)
3249 fcport->pwwn = fcport->bfa->ioc.attr->pwwn;
3250 fcport->nwwn = fcport->bfa->ioc.attr->nwwn;
3252 bfa_trc(fcport->bfa, fcport->pwwn);
3253 bfa_trc(fcport->bfa, fcport->nwwn);
3256 static void
3257 bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d,
3258 struct bfa_qos_stats_s *s)
3260 u32 *dip = (u32 *) d;
3261 __be32 *sip = (__be32 *) s;
3262 int i;
3264 /* Now swap the 32 bit fields */
3265 for (i = 0; i < (sizeof(struct bfa_qos_stats_s)/sizeof(u32)); ++i)
3266 dip[i] = be32_to_cpu(sip[i]);
3269 static void
3270 bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d,
3271 struct bfa_fcoe_stats_s *s)
3273 u32 *dip = (u32 *) d;
3274 __be32 *sip = (__be32 *) s;
3275 int i;
3277 for (i = 0; i < ((sizeof(struct bfa_fcoe_stats_s))/sizeof(u32));
3278 i = i + 2) {
3279 #ifdef __BIG_ENDIAN
3280 dip[i] = be32_to_cpu(sip[i]);
3281 dip[i + 1] = be32_to_cpu(sip[i + 1]);
3282 #else
3283 dip[i] = be32_to_cpu(sip[i + 1]);
3284 dip[i + 1] = be32_to_cpu(sip[i]);
3285 #endif
3289 static void
3290 __bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
3292 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *)cbarg;
3293 struct bfa_cb_pending_q_s *cb;
3294 struct list_head *qe, *qen;
3295 union bfa_fcport_stats_u *ret;
3297 if (complete) {
3298 struct timeval tv;
3299 if (fcport->stats_status == BFA_STATUS_OK)
3300 do_gettimeofday(&tv);
3302 list_for_each_safe(qe, qen, &fcport->stats_pending_q) {
3303 bfa_q_deq(&fcport->stats_pending_q, &qe);
3304 cb = (struct bfa_cb_pending_q_s *)qe;
3305 if (fcport->stats_status == BFA_STATUS_OK) {
3306 ret = (union bfa_fcport_stats_u *)cb->data;
3307 /* Swap FC QoS or FCoE stats */
3308 if (bfa_ioc_get_fcmode(&fcport->bfa->ioc))
3309 bfa_fcport_qos_stats_swap(&ret->fcqos,
3310 &fcport->stats->fcqos);
3311 else {
3312 bfa_fcport_fcoe_stats_swap(&ret->fcoe,
3313 &fcport->stats->fcoe);
3314 ret->fcoe.secs_reset =
3315 tv.tv_sec - fcport->stats_reset_time;
3318 bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe,
3319 fcport->stats_status);
3321 fcport->stats_status = BFA_STATUS_OK;
3322 } else {
3323 INIT_LIST_HEAD(&fcport->stats_pending_q);
3324 fcport->stats_status = BFA_STATUS_OK;
3328 static void
3329 bfa_fcport_stats_get_timeout(void *cbarg)
3331 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3333 bfa_trc(fcport->bfa, fcport->stats_qfull);
3335 if (fcport->stats_qfull) {
3336 bfa_reqq_wcancel(&fcport->stats_reqq_wait);
3337 fcport->stats_qfull = BFA_FALSE;
3340 fcport->stats_status = BFA_STATUS_ETIMER;
3341 __bfa_cb_fcport_stats_get(fcport, BFA_TRUE);
3344 static void
3345 bfa_fcport_send_stats_get(void *cbarg)
3347 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3348 struct bfi_fcport_req_s *msg;
3350 msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3352 if (!msg) {
3353 fcport->stats_qfull = BFA_TRUE;
3354 bfa_reqq_winit(&fcport->stats_reqq_wait,
3355 bfa_fcport_send_stats_get, fcport);
3356 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3357 &fcport->stats_reqq_wait);
3358 return;
3360 fcport->stats_qfull = BFA_FALSE;
3362 memset(msg, 0, sizeof(struct bfi_fcport_req_s));
3363 bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_GET_REQ,
3364 bfa_fn_lpu(fcport->bfa));
3365 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, msg->mh);
3368 static void
3369 __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
3371 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3372 struct bfa_cb_pending_q_s *cb;
3373 struct list_head *qe, *qen;
3375 if (complete) {
3376 struct timeval tv;
3379 * re-initialize time stamp for stats reset
3381 do_gettimeofday(&tv);
3382 fcport->stats_reset_time = tv.tv_sec;
3383 list_for_each_safe(qe, qen, &fcport->statsclr_pending_q) {
3384 bfa_q_deq(&fcport->statsclr_pending_q, &qe);
3385 cb = (struct bfa_cb_pending_q_s *)qe;
3386 bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe,
3387 fcport->stats_status);
3389 fcport->stats_status = BFA_STATUS_OK;
3390 } else {
3391 INIT_LIST_HEAD(&fcport->statsclr_pending_q);
3392 fcport->stats_status = BFA_STATUS_OK;
3396 static void
3397 bfa_fcport_stats_clr_timeout(void *cbarg)
3399 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3401 bfa_trc(fcport->bfa, fcport->stats_qfull);
3403 if (fcport->stats_qfull) {
3404 bfa_reqq_wcancel(&fcport->stats_reqq_wait);
3405 fcport->stats_qfull = BFA_FALSE;
3408 fcport->stats_status = BFA_STATUS_ETIMER;
3409 __bfa_cb_fcport_stats_clr(fcport, BFA_TRUE);
3412 static void
3413 bfa_fcport_send_stats_clear(void *cbarg)
3415 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3416 struct bfi_fcport_req_s *msg;
3418 msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3420 if (!msg) {
3421 fcport->stats_qfull = BFA_TRUE;
3422 bfa_reqq_winit(&fcport->stats_reqq_wait,
3423 bfa_fcport_send_stats_clear, fcport);
3424 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3425 &fcport->stats_reqq_wait);
3426 return;
3428 fcport->stats_qfull = BFA_FALSE;
3430 memset(msg, 0, sizeof(struct bfi_fcport_req_s));
3431 bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ,
3432 bfa_fn_lpu(fcport->bfa));
3433 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, msg->mh);
3437 * Handle trunk SCN event from firmware.
3439 static void
3440 bfa_trunk_scn(struct bfa_fcport_s *fcport, struct bfi_fcport_trunk_scn_s *scn)
3442 struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
3443 struct bfi_fcport_trunk_link_s *tlink;
3444 struct bfa_trunk_link_attr_s *lattr;
3445 enum bfa_trunk_state state_prev;
3446 int i;
3447 int link_bm = 0;
3449 bfa_trc(fcport->bfa, fcport->cfg.trunked);
3450 WARN_ON(scn->trunk_state != BFA_TRUNK_ONLINE &&
3451 scn->trunk_state != BFA_TRUNK_OFFLINE);
3453 bfa_trc(fcport->bfa, trunk->attr.state);
3454 bfa_trc(fcport->bfa, scn->trunk_state);
3455 bfa_trc(fcport->bfa, scn->trunk_speed);
3458 * Save off new state for trunk attribute query
3460 state_prev = trunk->attr.state;
3461 if (fcport->cfg.trunked && (trunk->attr.state != BFA_TRUNK_DISABLED))
3462 trunk->attr.state = scn->trunk_state;
3463 trunk->attr.speed = scn->trunk_speed;
3464 for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
3465 lattr = &trunk->attr.link_attr[i];
3466 tlink = &scn->tlink[i];
3468 lattr->link_state = tlink->state;
3469 lattr->trunk_wwn = tlink->trunk_wwn;
3470 lattr->fctl = tlink->fctl;
3471 lattr->speed = tlink->speed;
3472 lattr->deskew = be32_to_cpu(tlink->deskew);
3474 if (tlink->state == BFA_TRUNK_LINK_STATE_UP) {
3475 fcport->speed = tlink->speed;
3476 fcport->topology = BFA_PORT_TOPOLOGY_P2P;
3477 link_bm |= 1 << i;
3480 bfa_trc(fcport->bfa, lattr->link_state);
3481 bfa_trc(fcport->bfa, lattr->trunk_wwn);
3482 bfa_trc(fcport->bfa, lattr->fctl);
3483 bfa_trc(fcport->bfa, lattr->speed);
3484 bfa_trc(fcport->bfa, lattr->deskew);
3487 switch (link_bm) {
3488 case 3:
3489 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3490 BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,1)");
3491 break;
3492 case 2:
3493 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3494 BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(-,1)");
3495 break;
3496 case 1:
3497 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3498 BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,-)");
3499 break;
3500 default:
3501 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3502 BFA_PL_EID_TRUNK_SCN, 0, "Trunk down");
3506 * Notify upper layers if trunk state changed.
3508 if ((state_prev != trunk->attr.state) ||
3509 (scn->trunk_state == BFA_TRUNK_OFFLINE)) {
3510 bfa_fcport_scn(fcport, (scn->trunk_state == BFA_TRUNK_ONLINE) ?
3511 BFA_PORT_LINKUP : BFA_PORT_LINKDOWN, BFA_TRUE);
3515 static void
3516 bfa_trunk_iocdisable(struct bfa_s *bfa)
3518 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3519 int i = 0;
3522 * In trunked mode, notify upper layers that link is down
3524 if (fcport->cfg.trunked) {
3525 if (fcport->trunk.attr.state == BFA_TRUNK_ONLINE)
3526 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_TRUE);
3528 fcport->trunk.attr.state = BFA_TRUNK_OFFLINE;
3529 fcport->trunk.attr.speed = BFA_PORT_SPEED_UNKNOWN;
3530 for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
3531 fcport->trunk.attr.link_attr[i].trunk_wwn = 0;
3532 fcport->trunk.attr.link_attr[i].fctl =
3533 BFA_TRUNK_LINK_FCTL_NORMAL;
3534 fcport->trunk.attr.link_attr[i].link_state =
3535 BFA_TRUNK_LINK_STATE_DN_LINKDN;
3536 fcport->trunk.attr.link_attr[i].speed =
3537 BFA_PORT_SPEED_UNKNOWN;
3538 fcport->trunk.attr.link_attr[i].deskew = 0;
3544 * Called to initialize port attributes
3546 void
3547 bfa_fcport_init(struct bfa_s *bfa)
3549 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3552 * Initialize port attributes from IOC hardware data.
3554 bfa_fcport_set_wwns(fcport);
3555 if (fcport->cfg.maxfrsize == 0)
3556 fcport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc);
3557 fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc);
3558 fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc);
3560 if (bfa_fcport_is_pbcdisabled(bfa))
3561 bfa->modules.port.pbc_disabled = BFA_TRUE;
3563 WARN_ON(!fcport->cfg.maxfrsize);
3564 WARN_ON(!fcport->cfg.rx_bbcredit);
3565 WARN_ON(!fcport->speed_sup);
3569 * Firmware message handler.
3571 void
3572 bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
3574 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3575 union bfi_fcport_i2h_msg_u i2hmsg;
3577 i2hmsg.msg = msg;
3578 fcport->event_arg.i2hmsg = i2hmsg;
3580 bfa_trc(bfa, msg->mhdr.msg_id);
3581 bfa_trc(bfa, bfa_sm_to_state(hal_port_sm_table, fcport->sm));
3583 switch (msg->mhdr.msg_id) {
3584 case BFI_FCPORT_I2H_ENABLE_RSP:
3585 if (fcport->msgtag == i2hmsg.penable_rsp->msgtag) {
3587 fcport->stats_dma_ready = BFA_TRUE;
3588 if (fcport->use_flash_cfg) {
3589 fcport->cfg = i2hmsg.penable_rsp->port_cfg;
3590 fcport->cfg.maxfrsize =
3591 cpu_to_be16(fcport->cfg.maxfrsize);
3592 fcport->cfg.path_tov =
3593 cpu_to_be16(fcport->cfg.path_tov);
3594 fcport->cfg.q_depth =
3595 cpu_to_be16(fcport->cfg.q_depth);
3597 if (fcport->cfg.trunked)
3598 fcport->trunk.attr.state =
3599 BFA_TRUNK_OFFLINE;
3600 else
3601 fcport->trunk.attr.state =
3602 BFA_TRUNK_DISABLED;
3603 fcport->qos_attr.qos_bw =
3604 i2hmsg.penable_rsp->port_cfg.qos_bw;
3605 fcport->use_flash_cfg = BFA_FALSE;
3608 if (fcport->cfg.qos_enabled)
3609 fcport->qos_attr.state = BFA_QOS_OFFLINE;
3610 else
3611 fcport->qos_attr.state = BFA_QOS_DISABLED;
3613 fcport->qos_attr.qos_bw_op =
3614 i2hmsg.penable_rsp->port_cfg.qos_bw;
3616 if (fcport->cfg.bb_cr_enabled)
3617 fcport->bbcr_attr.state = BFA_BBCR_OFFLINE;
3618 else
3619 fcport->bbcr_attr.state = BFA_BBCR_DISABLED;
3621 bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
3623 break;
3625 case BFI_FCPORT_I2H_DISABLE_RSP:
3626 if (fcport->msgtag == i2hmsg.penable_rsp->msgtag)
3627 bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
3628 break;
3630 case BFI_FCPORT_I2H_EVENT:
3631 if (fcport->cfg.bb_cr_enabled)
3632 fcport->bbcr_attr.state = BFA_BBCR_OFFLINE;
3633 else
3634 fcport->bbcr_attr.state = BFA_BBCR_DISABLED;
3636 if (i2hmsg.event->link_state.linkstate == BFA_PORT_LINKUP)
3637 bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKUP);
3638 else {
3639 if (i2hmsg.event->link_state.linkstate_rsn ==
3640 BFA_PORT_LINKSTATE_RSN_FAA_MISCONFIG)
3641 bfa_sm_send_event(fcport,
3642 BFA_FCPORT_SM_FAA_MISCONFIG);
3643 else
3644 bfa_sm_send_event(fcport,
3645 BFA_FCPORT_SM_LINKDOWN);
3647 fcport->qos_attr.qos_bw_op =
3648 i2hmsg.event->link_state.qos_attr.qos_bw_op;
3649 break;
3651 case BFI_FCPORT_I2H_TRUNK_SCN:
3652 bfa_trunk_scn(fcport, i2hmsg.trunk_scn);
3653 break;
3655 case BFI_FCPORT_I2H_STATS_GET_RSP:
3657 * check for timer pop before processing the rsp
3659 if (list_empty(&fcport->stats_pending_q) ||
3660 (fcport->stats_status == BFA_STATUS_ETIMER))
3661 break;
3663 bfa_timer_stop(&fcport->timer);
3664 fcport->stats_status = i2hmsg.pstatsget_rsp->status;
3665 __bfa_cb_fcport_stats_get(fcport, BFA_TRUE);
3666 break;
3668 case BFI_FCPORT_I2H_STATS_CLEAR_RSP:
3670 * check for timer pop before processing the rsp
3672 if (list_empty(&fcport->statsclr_pending_q) ||
3673 (fcport->stats_status == BFA_STATUS_ETIMER))
3674 break;
3676 bfa_timer_stop(&fcport->timer);
3677 fcport->stats_status = BFA_STATUS_OK;
3678 __bfa_cb_fcport_stats_clr(fcport, BFA_TRUE);
3679 break;
3681 case BFI_FCPORT_I2H_ENABLE_AEN:
3682 bfa_sm_send_event(fcport, BFA_FCPORT_SM_ENABLE);
3683 break;
3685 case BFI_FCPORT_I2H_DISABLE_AEN:
3686 bfa_sm_send_event(fcport, BFA_FCPORT_SM_DISABLE);
3687 break;
3689 default:
3690 WARN_ON(1);
3691 break;
3696 * Registered callback for port events.
3698 void
3699 bfa_fcport_event_register(struct bfa_s *bfa,
3700 void (*cbfn) (void *cbarg,
3701 enum bfa_port_linkstate event),
3702 void *cbarg)
3704 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3706 fcport->event_cbfn = cbfn;
3707 fcport->event_cbarg = cbarg;
3710 bfa_status_t
3711 bfa_fcport_enable(struct bfa_s *bfa)
3713 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3715 if (bfa_fcport_is_pbcdisabled(bfa))
3716 return BFA_STATUS_PBC;
3718 if (bfa_ioc_is_disabled(&bfa->ioc))
3719 return BFA_STATUS_IOC_DISABLED;
3721 if (fcport->diag_busy)
3722 return BFA_STATUS_DIAG_BUSY;
3724 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_ENABLE);
3725 return BFA_STATUS_OK;
3728 bfa_status_t
3729 bfa_fcport_disable(struct bfa_s *bfa)
3731 if (bfa_fcport_is_pbcdisabled(bfa))
3732 return BFA_STATUS_PBC;
3734 if (bfa_ioc_is_disabled(&bfa->ioc))
3735 return BFA_STATUS_IOC_DISABLED;
3737 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DISABLE);
3738 return BFA_STATUS_OK;
3741 /* If PBC is disabled on port, return error */
3742 bfa_status_t
3743 bfa_fcport_is_pbcdisabled(struct bfa_s *bfa)
3745 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3746 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
3747 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
3749 if (cfgrsp->pbc_cfg.port_enabled == BFI_PBC_PORT_DISABLED) {
3750 bfa_trc(bfa, fcport->pwwn);
3751 return BFA_STATUS_PBC;
3753 return BFA_STATUS_OK;
3757 * Configure port speed.
3759 bfa_status_t
3760 bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_port_speed speed)
3762 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3764 bfa_trc(bfa, speed);
3766 if (fcport->cfg.trunked == BFA_TRUE)
3767 return BFA_STATUS_TRUNK_ENABLED;
3768 if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
3769 (speed == BFA_PORT_SPEED_16GBPS))
3770 return BFA_STATUS_UNSUPP_SPEED;
3771 if ((speed != BFA_PORT_SPEED_AUTO) && (speed > fcport->speed_sup)) {
3772 bfa_trc(bfa, fcport->speed_sup);
3773 return BFA_STATUS_UNSUPP_SPEED;
3776 /* Port speed entered needs to be checked */
3777 if (bfa_ioc_get_type(&fcport->bfa->ioc) == BFA_IOC_TYPE_FC) {
3778 /* For CT2, 1G is not supported */
3779 if ((speed == BFA_PORT_SPEED_1GBPS) &&
3780 (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)))
3781 return BFA_STATUS_UNSUPP_SPEED;
3783 /* Already checked for Auto Speed and Max Speed supp */
3784 if (!(speed == BFA_PORT_SPEED_1GBPS ||
3785 speed == BFA_PORT_SPEED_2GBPS ||
3786 speed == BFA_PORT_SPEED_4GBPS ||
3787 speed == BFA_PORT_SPEED_8GBPS ||
3788 speed == BFA_PORT_SPEED_16GBPS ||
3789 speed == BFA_PORT_SPEED_AUTO))
3790 return BFA_STATUS_UNSUPP_SPEED;
3791 } else {
3792 if (speed != BFA_PORT_SPEED_10GBPS)
3793 return BFA_STATUS_UNSUPP_SPEED;
3796 fcport->cfg.speed = speed;
3798 return BFA_STATUS_OK;
3802 * Get current speed.
3804 enum bfa_port_speed
3805 bfa_fcport_get_speed(struct bfa_s *bfa)
3807 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3809 return fcport->speed;
3813 * Configure port topology.
3815 bfa_status_t
3816 bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_port_topology topology)
3818 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3820 bfa_trc(bfa, topology);
3821 bfa_trc(bfa, fcport->cfg.topology);
3823 switch (topology) {
3824 case BFA_PORT_TOPOLOGY_P2P:
3825 break;
3827 case BFA_PORT_TOPOLOGY_LOOP:
3828 if ((bfa_fcport_is_qos_enabled(bfa) != BFA_FALSE) ||
3829 (fcport->qos_attr.state != BFA_QOS_DISABLED))
3830 return BFA_STATUS_ERROR_QOS_ENABLED;
3831 if (fcport->cfg.ratelimit != BFA_FALSE)
3832 return BFA_STATUS_ERROR_TRL_ENABLED;
3833 if ((bfa_fcport_is_trunk_enabled(bfa) != BFA_FALSE) ||
3834 (fcport->trunk.attr.state != BFA_TRUNK_DISABLED))
3835 return BFA_STATUS_ERROR_TRUNK_ENABLED;
3836 if ((bfa_fcport_get_speed(bfa) == BFA_PORT_SPEED_16GBPS) ||
3837 (fcport->cfg.speed == BFA_PORT_SPEED_16GBPS))
3838 return BFA_STATUS_UNSUPP_SPEED;
3839 if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type))
3840 return BFA_STATUS_LOOP_UNSUPP_MEZZ;
3841 if (bfa_fcport_is_dport(bfa) != BFA_FALSE)
3842 return BFA_STATUS_DPORT_ERR;
3843 if (bfa_fcport_is_ddport(bfa) != BFA_FALSE)
3844 return BFA_STATUS_DPORT_ERR;
3845 break;
3847 case BFA_PORT_TOPOLOGY_AUTO:
3848 break;
3850 default:
3851 return BFA_STATUS_EINVAL;
3854 fcport->cfg.topology = topology;
3855 return BFA_STATUS_OK;
3859 * Get current topology.
3861 enum bfa_port_topology
3862 bfa_fcport_get_topology(struct bfa_s *bfa)
3864 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3866 return fcport->topology;
3870 * Get config topology.
3872 enum bfa_port_topology
3873 bfa_fcport_get_cfg_topology(struct bfa_s *bfa)
3875 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3877 return fcport->cfg.topology;
3880 bfa_status_t
3881 bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa)
3883 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3885 bfa_trc(bfa, alpa);
3886 bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
3887 bfa_trc(bfa, fcport->cfg.hardalpa);
3889 fcport->cfg.cfg_hardalpa = BFA_TRUE;
3890 fcport->cfg.hardalpa = alpa;
3892 return BFA_STATUS_OK;
3895 bfa_status_t
3896 bfa_fcport_clr_hardalpa(struct bfa_s *bfa)
3898 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3900 bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
3901 bfa_trc(bfa, fcport->cfg.hardalpa);
3903 fcport->cfg.cfg_hardalpa = BFA_FALSE;
3904 return BFA_STATUS_OK;
3907 bfa_boolean_t
3908 bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa)
3910 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3912 *alpa = fcport->cfg.hardalpa;
3913 return fcport->cfg.cfg_hardalpa;
3917 bfa_fcport_get_myalpa(struct bfa_s *bfa)
3919 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3921 return fcport->myalpa;
3924 bfa_status_t
3925 bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize)
3927 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3929 bfa_trc(bfa, maxfrsize);
3930 bfa_trc(bfa, fcport->cfg.maxfrsize);
3932 /* with in range */
3933 if ((maxfrsize > FC_MAX_PDUSZ) || (maxfrsize < FC_MIN_PDUSZ))
3934 return BFA_STATUS_INVLD_DFSZ;
3936 /* power of 2, if not the max frame size of 2112 */
3937 if ((maxfrsize != FC_MAX_PDUSZ) && (maxfrsize & (maxfrsize - 1)))
3938 return BFA_STATUS_INVLD_DFSZ;
3940 fcport->cfg.maxfrsize = maxfrsize;
3941 return BFA_STATUS_OK;
3945 bfa_fcport_get_maxfrsize(struct bfa_s *bfa)
3947 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3949 return fcport->cfg.maxfrsize;
3953 bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa)
3955 if (bfa_fcport_get_topology(bfa) != BFA_PORT_TOPOLOGY_LOOP)
3956 return (BFA_FCPORT_MOD(bfa))->cfg.rx_bbcredit;
3958 else
3959 return 0;
3962 void
3963 bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit)
3965 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3967 fcport->cfg.tx_bbcredit = (u8)tx_bbcredit;
3971 * Get port attributes.
3974 wwn_t
3975 bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node)
3977 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3978 if (node)
3979 return fcport->nwwn;
3980 else
3981 return fcport->pwwn;
3984 void
3985 bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
3987 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3989 memset(attr, 0, sizeof(struct bfa_port_attr_s));
3991 attr->nwwn = fcport->nwwn;
3992 attr->pwwn = fcport->pwwn;
3994 attr->factorypwwn = bfa->ioc.attr->mfg_pwwn;
3995 attr->factorynwwn = bfa->ioc.attr->mfg_nwwn;
3997 memcpy(&attr->pport_cfg, &fcport->cfg,
3998 sizeof(struct bfa_port_cfg_s));
3999 /* speed attributes */
4000 attr->pport_cfg.speed = fcport->cfg.speed;
4001 attr->speed_supported = fcport->speed_sup;
4002 attr->speed = fcport->speed;
4003 attr->cos_supported = FC_CLASS_3;
4005 /* topology attributes */
4006 attr->pport_cfg.topology = fcport->cfg.topology;
4007 attr->topology = fcport->topology;
4008 attr->pport_cfg.trunked = fcport->cfg.trunked;
4010 /* beacon attributes */
4011 attr->beacon = fcport->beacon;
4012 attr->link_e2e_beacon = fcport->link_e2e_beacon;
4014 attr->pport_cfg.path_tov = bfa_fcpim_path_tov_get(bfa);
4015 attr->pport_cfg.q_depth = bfa_fcpim_qdepth_get(bfa);
4016 attr->port_state = bfa_sm_to_state(hal_port_sm_table, fcport->sm);
4018 attr->fec_state = fcport->fec_state;
4020 /* PBC Disabled State */
4021 if (bfa_fcport_is_pbcdisabled(bfa))
4022 attr->port_state = BFA_PORT_ST_PREBOOT_DISABLED;
4023 else {
4024 if (bfa_ioc_is_disabled(&fcport->bfa->ioc))
4025 attr->port_state = BFA_PORT_ST_IOCDIS;
4026 else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc))
4027 attr->port_state = BFA_PORT_ST_FWMISMATCH;
4030 /* FCoE vlan */
4031 attr->fcoe_vlan = fcport->fcoe_vlan;
4034 #define BFA_FCPORT_STATS_TOV 1000
4037 * Fetch port statistics (FCQoS or FCoE).
4039 bfa_status_t
4040 bfa_fcport_get_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb)
4042 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4044 if (!bfa_iocfc_is_operational(bfa) ||
4045 !fcport->stats_dma_ready)
4046 return BFA_STATUS_IOC_NON_OP;
4048 if (!list_empty(&fcport->statsclr_pending_q))
4049 return BFA_STATUS_DEVBUSY;
4051 if (list_empty(&fcport->stats_pending_q)) {
4052 list_add_tail(&cb->hcb_qe.qe, &fcport->stats_pending_q);
4053 bfa_fcport_send_stats_get(fcport);
4054 bfa_timer_start(bfa, &fcport->timer,
4055 bfa_fcport_stats_get_timeout,
4056 fcport, BFA_FCPORT_STATS_TOV);
4057 } else
4058 list_add_tail(&cb->hcb_qe.qe, &fcport->stats_pending_q);
4060 return BFA_STATUS_OK;
4064 * Reset port statistics (FCQoS or FCoE).
4066 bfa_status_t
4067 bfa_fcport_clear_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb)
4069 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4071 if (!bfa_iocfc_is_operational(bfa) ||
4072 !fcport->stats_dma_ready)
4073 return BFA_STATUS_IOC_NON_OP;
4075 if (!list_empty(&fcport->stats_pending_q))
4076 return BFA_STATUS_DEVBUSY;
4078 if (list_empty(&fcport->statsclr_pending_q)) {
4079 list_add_tail(&cb->hcb_qe.qe, &fcport->statsclr_pending_q);
4080 bfa_fcport_send_stats_clear(fcport);
4081 bfa_timer_start(bfa, &fcport->timer,
4082 bfa_fcport_stats_clr_timeout,
4083 fcport, BFA_FCPORT_STATS_TOV);
4084 } else
4085 list_add_tail(&cb->hcb_qe.qe, &fcport->statsclr_pending_q);
4087 return BFA_STATUS_OK;
4091 * Fetch port attributes.
4093 bfa_boolean_t
4094 bfa_fcport_is_disabled(struct bfa_s *bfa)
4096 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4098 return bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
4099 BFA_PORT_ST_DISABLED;
4103 bfa_boolean_t
4104 bfa_fcport_is_dport(struct bfa_s *bfa)
4106 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4108 return (bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
4109 BFA_PORT_ST_DPORT);
4112 bfa_boolean_t
4113 bfa_fcport_is_ddport(struct bfa_s *bfa)
4115 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4117 return (bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
4118 BFA_PORT_ST_DDPORT);
4121 bfa_status_t
4122 bfa_fcport_set_qos_bw(struct bfa_s *bfa, struct bfa_qos_bw_s *qos_bw)
4124 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4125 enum bfa_ioc_type_e ioc_type = bfa_get_type(bfa);
4127 bfa_trc(bfa, ioc_type);
4129 if ((qos_bw->high == 0) || (qos_bw->med == 0) || (qos_bw->low == 0))
4130 return BFA_STATUS_QOS_BW_INVALID;
4132 if ((qos_bw->high + qos_bw->med + qos_bw->low) != 100)
4133 return BFA_STATUS_QOS_BW_INVALID;
4135 if ((qos_bw->med > qos_bw->high) || (qos_bw->low > qos_bw->med) ||
4136 (qos_bw->low > qos_bw->high))
4137 return BFA_STATUS_QOS_BW_INVALID;
4139 if ((ioc_type == BFA_IOC_TYPE_FC) &&
4140 (fcport->cfg.topology != BFA_PORT_TOPOLOGY_LOOP))
4141 fcport->cfg.qos_bw = *qos_bw;
4143 return BFA_STATUS_OK;
4146 bfa_boolean_t
4147 bfa_fcport_is_ratelim(struct bfa_s *bfa)
4149 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4151 return fcport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE;
4156 * Enable/Disable FAA feature in port config
4158 void
4159 bfa_fcport_cfg_faa(struct bfa_s *bfa, u8 state)
4161 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4163 bfa_trc(bfa, state);
4164 fcport->cfg.faa_state = state;
4168 * Get default minimum ratelim speed
4170 enum bfa_port_speed
4171 bfa_fcport_get_ratelim_speed(struct bfa_s *bfa)
4173 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4175 bfa_trc(bfa, fcport->cfg.trl_def_speed);
4176 return fcport->cfg.trl_def_speed;
4180 void
4181 bfa_fcport_beacon(void *dev, bfa_boolean_t beacon,
4182 bfa_boolean_t link_e2e_beacon)
4184 struct bfa_s *bfa = dev;
4185 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4187 bfa_trc(bfa, beacon);
4188 bfa_trc(bfa, link_e2e_beacon);
4189 bfa_trc(bfa, fcport->beacon);
4190 bfa_trc(bfa, fcport->link_e2e_beacon);
4192 fcport->beacon = beacon;
4193 fcport->link_e2e_beacon = link_e2e_beacon;
4196 bfa_boolean_t
4197 bfa_fcport_is_linkup(struct bfa_s *bfa)
4199 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4201 return (!fcport->cfg.trunked &&
4202 bfa_sm_cmp_state(fcport, bfa_fcport_sm_linkup)) ||
4203 (fcport->cfg.trunked &&
4204 fcport->trunk.attr.state == BFA_TRUNK_ONLINE);
4207 bfa_boolean_t
4208 bfa_fcport_is_qos_enabled(struct bfa_s *bfa)
4210 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4212 return fcport->cfg.qos_enabled;
4215 bfa_boolean_t
4216 bfa_fcport_is_trunk_enabled(struct bfa_s *bfa)
4218 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4220 return fcport->cfg.trunked;
4223 bfa_status_t
4224 bfa_fcport_cfg_bbcr(struct bfa_s *bfa, bfa_boolean_t on_off, u8 bb_scn)
4226 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4228 bfa_trc(bfa, on_off);
4230 if (bfa_ioc_get_type(&fcport->bfa->ioc) != BFA_IOC_TYPE_FC)
4231 return BFA_STATUS_BBCR_FC_ONLY;
4233 if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type) &&
4234 (bfa->ioc.attr->card_type != BFA_MFG_TYPE_CHINOOK))
4235 return BFA_STATUS_CMD_NOTSUPP_MEZZ;
4237 if (on_off) {
4238 if (fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP)
4239 return BFA_STATUS_TOPOLOGY_LOOP;
4241 if (fcport->cfg.qos_enabled)
4242 return BFA_STATUS_ERROR_QOS_ENABLED;
4244 if (fcport->cfg.trunked)
4245 return BFA_STATUS_TRUNK_ENABLED;
4247 if ((fcport->cfg.speed != BFA_PORT_SPEED_AUTO) &&
4248 (fcport->cfg.speed < bfa_ioc_speed_sup(&bfa->ioc)))
4249 return BFA_STATUS_ERR_BBCR_SPEED_UNSUPPORT;
4251 if (bfa_ioc_speed_sup(&bfa->ioc) < BFA_PORT_SPEED_8GBPS)
4252 return BFA_STATUS_FEATURE_NOT_SUPPORTED;
4254 if (fcport->cfg.bb_cr_enabled) {
4255 if (bb_scn != fcport->cfg.bb_scn)
4256 return BFA_STATUS_BBCR_CFG_NO_CHANGE;
4257 else
4258 return BFA_STATUS_NO_CHANGE;
4261 if ((bb_scn == 0) || (bb_scn > BFA_BB_SCN_MAX))
4262 bb_scn = BFA_BB_SCN_DEF;
4264 fcport->cfg.bb_cr_enabled = on_off;
4265 fcport->cfg.bb_scn = bb_scn;
4266 } else {
4267 if (!fcport->cfg.bb_cr_enabled)
4268 return BFA_STATUS_NO_CHANGE;
4270 fcport->cfg.bb_cr_enabled = on_off;
4271 fcport->cfg.bb_scn = 0;
4274 return BFA_STATUS_OK;
4277 bfa_status_t
4278 bfa_fcport_get_bbcr_attr(struct bfa_s *bfa,
4279 struct bfa_bbcr_attr_s *bbcr_attr)
4281 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4283 if (bfa_ioc_get_type(&fcport->bfa->ioc) != BFA_IOC_TYPE_FC)
4284 return BFA_STATUS_BBCR_FC_ONLY;
4286 if (fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP)
4287 return BFA_STATUS_TOPOLOGY_LOOP;
4289 *bbcr_attr = fcport->bbcr_attr;
4291 return BFA_STATUS_OK;
4294 void
4295 bfa_fcport_dportenable(struct bfa_s *bfa)
4298 * Assume caller check for port is in disable state
4300 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DPORTENABLE);
4301 bfa_port_set_dportenabled(&bfa->modules.port, BFA_TRUE);
4304 void
4305 bfa_fcport_dportdisable(struct bfa_s *bfa)
4308 * Assume caller check for port is in disable state
4310 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DPORTDISABLE);
4311 bfa_port_set_dportenabled(&bfa->modules.port, BFA_FALSE);
4314 void
4315 bfa_fcport_ddportenable(struct bfa_s *bfa)
4318 * Assume caller check for port is in disable state
4320 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DDPORTENABLE);
4323 void
4324 bfa_fcport_ddportdisable(struct bfa_s *bfa)
4327 * Assume caller check for port is in disable state
4329 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DDPORTDISABLE);
4333 * Rport State machine functions
4336 * Beginning state, only online event expected.
4338 static void
4339 bfa_rport_sm_uninit(struct bfa_rport_s *rp, enum bfa_rport_event event)
4341 bfa_trc(rp->bfa, rp->rport_tag);
4342 bfa_trc(rp->bfa, event);
4344 switch (event) {
4345 case BFA_RPORT_SM_CREATE:
4346 bfa_stats(rp, sm_un_cr);
4347 bfa_sm_set_state(rp, bfa_rport_sm_created);
4348 break;
4350 default:
4351 bfa_stats(rp, sm_un_unexp);
4352 bfa_sm_fault(rp->bfa, event);
4356 static void
4357 bfa_rport_sm_created(struct bfa_rport_s *rp, enum bfa_rport_event event)
4359 bfa_trc(rp->bfa, rp->rport_tag);
4360 bfa_trc(rp->bfa, event);
4362 switch (event) {
4363 case BFA_RPORT_SM_ONLINE:
4364 bfa_stats(rp, sm_cr_on);
4365 if (bfa_rport_send_fwcreate(rp))
4366 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4367 else
4368 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4369 break;
4371 case BFA_RPORT_SM_DELETE:
4372 bfa_stats(rp, sm_cr_del);
4373 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4374 bfa_rport_free(rp);
4375 break;
4377 case BFA_RPORT_SM_HWFAIL:
4378 bfa_stats(rp, sm_cr_hwf);
4379 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4380 break;
4382 default:
4383 bfa_stats(rp, sm_cr_unexp);
4384 bfa_sm_fault(rp->bfa, event);
4389 * Waiting for rport create response from firmware.
4391 static void
4392 bfa_rport_sm_fwcreate(struct bfa_rport_s *rp, enum bfa_rport_event event)
4394 bfa_trc(rp->bfa, rp->rport_tag);
4395 bfa_trc(rp->bfa, event);
4397 switch (event) {
4398 case BFA_RPORT_SM_FWRSP:
4399 bfa_stats(rp, sm_fwc_rsp);
4400 bfa_sm_set_state(rp, bfa_rport_sm_online);
4401 bfa_rport_online_cb(rp);
4402 break;
4404 case BFA_RPORT_SM_DELETE:
4405 bfa_stats(rp, sm_fwc_del);
4406 bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
4407 break;
4409 case BFA_RPORT_SM_OFFLINE:
4410 bfa_stats(rp, sm_fwc_off);
4411 bfa_sm_set_state(rp, bfa_rport_sm_offline_pending);
4412 break;
4414 case BFA_RPORT_SM_HWFAIL:
4415 bfa_stats(rp, sm_fwc_hwf);
4416 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4417 break;
4419 default:
4420 bfa_stats(rp, sm_fwc_unexp);
4421 bfa_sm_fault(rp->bfa, event);
4426 * Request queue is full, awaiting queue resume to send create request.
4428 static void
4429 bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4431 bfa_trc(rp->bfa, rp->rport_tag);
4432 bfa_trc(rp->bfa, event);
4434 switch (event) {
4435 case BFA_RPORT_SM_QRESUME:
4436 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4437 bfa_rport_send_fwcreate(rp);
4438 break;
4440 case BFA_RPORT_SM_DELETE:
4441 bfa_stats(rp, sm_fwc_del);
4442 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4443 bfa_reqq_wcancel(&rp->reqq_wait);
4444 bfa_rport_free(rp);
4445 break;
4447 case BFA_RPORT_SM_OFFLINE:
4448 bfa_stats(rp, sm_fwc_off);
4449 bfa_sm_set_state(rp, bfa_rport_sm_offline);
4450 bfa_reqq_wcancel(&rp->reqq_wait);
4451 bfa_rport_offline_cb(rp);
4452 break;
4454 case BFA_RPORT_SM_HWFAIL:
4455 bfa_stats(rp, sm_fwc_hwf);
4456 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4457 bfa_reqq_wcancel(&rp->reqq_wait);
4458 break;
4460 default:
4461 bfa_stats(rp, sm_fwc_unexp);
4462 bfa_sm_fault(rp->bfa, event);
4467 * Online state - normal parking state.
4469 static void
4470 bfa_rport_sm_online(struct bfa_rport_s *rp, enum bfa_rport_event event)
4472 struct bfi_rport_qos_scn_s *qos_scn;
4474 bfa_trc(rp->bfa, rp->rport_tag);
4475 bfa_trc(rp->bfa, event);
4477 switch (event) {
4478 case BFA_RPORT_SM_OFFLINE:
4479 bfa_stats(rp, sm_on_off);
4480 if (bfa_rport_send_fwdelete(rp))
4481 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4482 else
4483 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
4484 break;
4486 case BFA_RPORT_SM_DELETE:
4487 bfa_stats(rp, sm_on_del);
4488 if (bfa_rport_send_fwdelete(rp))
4489 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4490 else
4491 bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4492 break;
4494 case BFA_RPORT_SM_HWFAIL:
4495 bfa_stats(rp, sm_on_hwf);
4496 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4497 break;
4499 case BFA_RPORT_SM_SET_SPEED:
4500 bfa_rport_send_fwspeed(rp);
4501 break;
4503 case BFA_RPORT_SM_QOS_SCN:
4504 qos_scn = (struct bfi_rport_qos_scn_s *) rp->event_arg.fw_msg;
4505 rp->qos_attr = qos_scn->new_qos_attr;
4506 bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_flow_id);
4507 bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_flow_id);
4508 bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_priority);
4509 bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_priority);
4511 qos_scn->old_qos_attr.qos_flow_id =
4512 be32_to_cpu(qos_scn->old_qos_attr.qos_flow_id);
4513 qos_scn->new_qos_attr.qos_flow_id =
4514 be32_to_cpu(qos_scn->new_qos_attr.qos_flow_id);
4516 if (qos_scn->old_qos_attr.qos_flow_id !=
4517 qos_scn->new_qos_attr.qos_flow_id)
4518 bfa_cb_rport_qos_scn_flowid(rp->rport_drv,
4519 qos_scn->old_qos_attr,
4520 qos_scn->new_qos_attr);
4521 if (qos_scn->old_qos_attr.qos_priority !=
4522 qos_scn->new_qos_attr.qos_priority)
4523 bfa_cb_rport_qos_scn_prio(rp->rport_drv,
4524 qos_scn->old_qos_attr,
4525 qos_scn->new_qos_attr);
4526 break;
4528 default:
4529 bfa_stats(rp, sm_on_unexp);
4530 bfa_sm_fault(rp->bfa, event);
4535 * Firmware rport is being deleted - awaiting f/w response.
4537 static void
4538 bfa_rport_sm_fwdelete(struct bfa_rport_s *rp, enum bfa_rport_event event)
4540 bfa_trc(rp->bfa, rp->rport_tag);
4541 bfa_trc(rp->bfa, event);
4543 switch (event) {
4544 case BFA_RPORT_SM_FWRSP:
4545 bfa_stats(rp, sm_fwd_rsp);
4546 bfa_sm_set_state(rp, bfa_rport_sm_offline);
4547 bfa_rport_offline_cb(rp);
4548 break;
4550 case BFA_RPORT_SM_DELETE:
4551 bfa_stats(rp, sm_fwd_del);
4552 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4553 break;
4555 case BFA_RPORT_SM_HWFAIL:
4556 bfa_stats(rp, sm_fwd_hwf);
4557 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4558 bfa_rport_offline_cb(rp);
4559 break;
4561 default:
4562 bfa_stats(rp, sm_fwd_unexp);
4563 bfa_sm_fault(rp->bfa, event);
4567 static void
4568 bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4570 bfa_trc(rp->bfa, rp->rport_tag);
4571 bfa_trc(rp->bfa, event);
4573 switch (event) {
4574 case BFA_RPORT_SM_QRESUME:
4575 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4576 bfa_rport_send_fwdelete(rp);
4577 break;
4579 case BFA_RPORT_SM_DELETE:
4580 bfa_stats(rp, sm_fwd_del);
4581 bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4582 break;
4584 case BFA_RPORT_SM_HWFAIL:
4585 bfa_stats(rp, sm_fwd_hwf);
4586 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4587 bfa_reqq_wcancel(&rp->reqq_wait);
4588 bfa_rport_offline_cb(rp);
4589 break;
4591 default:
4592 bfa_stats(rp, sm_fwd_unexp);
4593 bfa_sm_fault(rp->bfa, event);
4598 * Offline state.
4600 static void
4601 bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event)
4603 bfa_trc(rp->bfa, rp->rport_tag);
4604 bfa_trc(rp->bfa, event);
4606 switch (event) {
4607 case BFA_RPORT_SM_DELETE:
4608 bfa_stats(rp, sm_off_del);
4609 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4610 bfa_rport_free(rp);
4611 break;
4613 case BFA_RPORT_SM_ONLINE:
4614 bfa_stats(rp, sm_off_on);
4615 if (bfa_rport_send_fwcreate(rp))
4616 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4617 else
4618 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4619 break;
4621 case BFA_RPORT_SM_HWFAIL:
4622 bfa_stats(rp, sm_off_hwf);
4623 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4624 break;
4626 case BFA_RPORT_SM_OFFLINE:
4627 bfa_rport_offline_cb(rp);
4628 break;
4630 default:
4631 bfa_stats(rp, sm_off_unexp);
4632 bfa_sm_fault(rp->bfa, event);
4637 * Rport is deleted, waiting for firmware response to delete.
4639 static void
4640 bfa_rport_sm_deleting(struct bfa_rport_s *rp, enum bfa_rport_event event)
4642 bfa_trc(rp->bfa, rp->rport_tag);
4643 bfa_trc(rp->bfa, event);
4645 switch (event) {
4646 case BFA_RPORT_SM_FWRSP:
4647 bfa_stats(rp, sm_del_fwrsp);
4648 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4649 bfa_rport_free(rp);
4650 break;
4652 case BFA_RPORT_SM_HWFAIL:
4653 bfa_stats(rp, sm_del_hwf);
4654 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4655 bfa_rport_free(rp);
4656 break;
4658 default:
4659 bfa_sm_fault(rp->bfa, event);
4663 static void
4664 bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4666 bfa_trc(rp->bfa, rp->rport_tag);
4667 bfa_trc(rp->bfa, event);
4669 switch (event) {
4670 case BFA_RPORT_SM_QRESUME:
4671 bfa_stats(rp, sm_del_fwrsp);
4672 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4673 bfa_rport_send_fwdelete(rp);
4674 break;
4676 case BFA_RPORT_SM_HWFAIL:
4677 bfa_stats(rp, sm_del_hwf);
4678 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4679 bfa_reqq_wcancel(&rp->reqq_wait);
4680 bfa_rport_free(rp);
4681 break;
4683 default:
4684 bfa_sm_fault(rp->bfa, event);
4689 * Waiting for rport create response from firmware. A delete is pending.
4691 static void
4692 bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
4693 enum bfa_rport_event event)
4695 bfa_trc(rp->bfa, rp->rport_tag);
4696 bfa_trc(rp->bfa, event);
4698 switch (event) {
4699 case BFA_RPORT_SM_FWRSP:
4700 bfa_stats(rp, sm_delp_fwrsp);
4701 if (bfa_rport_send_fwdelete(rp))
4702 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4703 else
4704 bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4705 break;
4707 case BFA_RPORT_SM_HWFAIL:
4708 bfa_stats(rp, sm_delp_hwf);
4709 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4710 bfa_rport_free(rp);
4711 break;
4713 default:
4714 bfa_stats(rp, sm_delp_unexp);
4715 bfa_sm_fault(rp->bfa, event);
4720 * Waiting for rport create response from firmware. Rport offline is pending.
4722 static void
4723 bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
4724 enum bfa_rport_event event)
4726 bfa_trc(rp->bfa, rp->rport_tag);
4727 bfa_trc(rp->bfa, event);
4729 switch (event) {
4730 case BFA_RPORT_SM_FWRSP:
4731 bfa_stats(rp, sm_offp_fwrsp);
4732 if (bfa_rport_send_fwdelete(rp))
4733 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4734 else
4735 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
4736 break;
4738 case BFA_RPORT_SM_DELETE:
4739 bfa_stats(rp, sm_offp_del);
4740 bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
4741 break;
4743 case BFA_RPORT_SM_HWFAIL:
4744 bfa_stats(rp, sm_offp_hwf);
4745 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4746 bfa_rport_offline_cb(rp);
4747 break;
4749 default:
4750 bfa_stats(rp, sm_offp_unexp);
4751 bfa_sm_fault(rp->bfa, event);
4756 * IOC h/w failed.
4758 static void
4759 bfa_rport_sm_iocdisable(struct bfa_rport_s *rp, enum bfa_rport_event event)
4761 bfa_trc(rp->bfa, rp->rport_tag);
4762 bfa_trc(rp->bfa, event);
4764 switch (event) {
4765 case BFA_RPORT_SM_OFFLINE:
4766 bfa_stats(rp, sm_iocd_off);
4767 bfa_rport_offline_cb(rp);
4768 break;
4770 case BFA_RPORT_SM_DELETE:
4771 bfa_stats(rp, sm_iocd_del);
4772 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4773 bfa_rport_free(rp);
4774 break;
4776 case BFA_RPORT_SM_ONLINE:
4777 bfa_stats(rp, sm_iocd_on);
4778 if (bfa_rport_send_fwcreate(rp))
4779 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4780 else
4781 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4782 break;
4784 case BFA_RPORT_SM_HWFAIL:
4785 break;
4787 default:
4788 bfa_stats(rp, sm_iocd_unexp);
4789 bfa_sm_fault(rp->bfa, event);
4796 * bfa_rport_private BFA rport private functions
4799 static void
4800 __bfa_cb_rport_online(void *cbarg, bfa_boolean_t complete)
4802 struct bfa_rport_s *rp = cbarg;
4804 if (complete)
4805 bfa_cb_rport_online(rp->rport_drv);
4808 static void
4809 __bfa_cb_rport_offline(void *cbarg, bfa_boolean_t complete)
4811 struct bfa_rport_s *rp = cbarg;
4813 if (complete)
4814 bfa_cb_rport_offline(rp->rport_drv);
4817 static void
4818 bfa_rport_qresume(void *cbarg)
4820 struct bfa_rport_s *rp = cbarg;
4822 bfa_sm_send_event(rp, BFA_RPORT_SM_QRESUME);
4825 void
4826 bfa_rport_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
4827 struct bfa_s *bfa)
4829 struct bfa_mem_kva_s *rport_kva = BFA_MEM_RPORT_KVA(bfa);
4831 if (cfg->fwcfg.num_rports < BFA_RPORT_MIN)
4832 cfg->fwcfg.num_rports = BFA_RPORT_MIN;
4834 /* kva memory */
4835 bfa_mem_kva_setup(minfo, rport_kva,
4836 cfg->fwcfg.num_rports * sizeof(struct bfa_rport_s));
4839 void
4840 bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4841 struct bfa_pcidev_s *pcidev)
4843 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4844 struct bfa_rport_s *rp;
4845 u16 i;
4847 INIT_LIST_HEAD(&mod->rp_free_q);
4848 INIT_LIST_HEAD(&mod->rp_active_q);
4849 INIT_LIST_HEAD(&mod->rp_unused_q);
4851 rp = (struct bfa_rport_s *) bfa_mem_kva_curp(mod);
4852 mod->rps_list = rp;
4853 mod->num_rports = cfg->fwcfg.num_rports;
4855 WARN_ON(!mod->num_rports ||
4856 (mod->num_rports & (mod->num_rports - 1)));
4858 for (i = 0; i < mod->num_rports; i++, rp++) {
4859 memset(rp, 0, sizeof(struct bfa_rport_s));
4860 rp->bfa = bfa;
4861 rp->rport_tag = i;
4862 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4865 * - is unused
4867 if (i)
4868 list_add_tail(&rp->qe, &mod->rp_free_q);
4870 bfa_reqq_winit(&rp->reqq_wait, bfa_rport_qresume, rp);
4874 * consume memory
4876 bfa_mem_kva_curp(mod) = (u8 *) rp;
4879 void
4880 bfa_rport_iocdisable(struct bfa_s *bfa)
4882 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4883 struct bfa_rport_s *rport;
4884 struct list_head *qe, *qen;
4886 /* Enqueue unused rport resources to free_q */
4887 list_splice_tail_init(&mod->rp_unused_q, &mod->rp_free_q);
4889 list_for_each_safe(qe, qen, &mod->rp_active_q) {
4890 rport = (struct bfa_rport_s *) qe;
4891 bfa_sm_send_event(rport, BFA_RPORT_SM_HWFAIL);
4895 static struct bfa_rport_s *
4896 bfa_rport_alloc(struct bfa_rport_mod_s *mod)
4898 struct bfa_rport_s *rport;
4900 bfa_q_deq(&mod->rp_free_q, &rport);
4901 if (rport)
4902 list_add_tail(&rport->qe, &mod->rp_active_q);
4904 return rport;
4907 static void
4908 bfa_rport_free(struct bfa_rport_s *rport)
4910 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(rport->bfa);
4912 WARN_ON(!bfa_q_is_on_q(&mod->rp_active_q, rport));
4913 list_del(&rport->qe);
4914 list_add_tail(&rport->qe, &mod->rp_free_q);
4917 static bfa_boolean_t
4918 bfa_rport_send_fwcreate(struct bfa_rport_s *rp)
4920 struct bfi_rport_create_req_s *m;
4923 * check for room in queue to send request now
4925 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4926 if (!m) {
4927 bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
4928 return BFA_FALSE;
4931 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_CREATE_REQ,
4932 bfa_fn_lpu(rp->bfa));
4933 m->bfa_handle = rp->rport_tag;
4934 m->max_frmsz = cpu_to_be16(rp->rport_info.max_frmsz);
4935 m->pid = rp->rport_info.pid;
4936 m->lp_fwtag = bfa_lps_get_fwtag(rp->bfa, (u8)rp->rport_info.lp_tag);
4937 m->local_pid = rp->rport_info.local_pid;
4938 m->fc_class = rp->rport_info.fc_class;
4939 m->vf_en = rp->rport_info.vf_en;
4940 m->vf_id = rp->rport_info.vf_id;
4941 m->cisc = rp->rport_info.cisc;
4944 * queue I/O message to firmware
4946 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
4947 return BFA_TRUE;
4950 static bfa_boolean_t
4951 bfa_rport_send_fwdelete(struct bfa_rport_s *rp)
4953 struct bfi_rport_delete_req_s *m;
4956 * check for room in queue to send request now
4958 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4959 if (!m) {
4960 bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
4961 return BFA_FALSE;
4964 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_DELETE_REQ,
4965 bfa_fn_lpu(rp->bfa));
4966 m->fw_handle = rp->fw_handle;
4969 * queue I/O message to firmware
4971 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
4972 return BFA_TRUE;
4975 static bfa_boolean_t
4976 bfa_rport_send_fwspeed(struct bfa_rport_s *rp)
4978 struct bfa_rport_speed_req_s *m;
4981 * check for room in queue to send request now
4983 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4984 if (!m) {
4985 bfa_trc(rp->bfa, rp->rport_info.speed);
4986 return BFA_FALSE;
4989 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_SET_SPEED_REQ,
4990 bfa_fn_lpu(rp->bfa));
4991 m->fw_handle = rp->fw_handle;
4992 m->speed = (u8)rp->rport_info.speed;
4995 * queue I/O message to firmware
4997 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
4998 return BFA_TRUE;
5004 * bfa_rport_public
5008 * Rport interrupt processing.
5010 void
5011 bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
5013 union bfi_rport_i2h_msg_u msg;
5014 struct bfa_rport_s *rp;
5016 bfa_trc(bfa, m->mhdr.msg_id);
5018 msg.msg = m;
5020 switch (m->mhdr.msg_id) {
5021 case BFI_RPORT_I2H_CREATE_RSP:
5022 rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle);
5023 rp->fw_handle = msg.create_rsp->fw_handle;
5024 rp->qos_attr = msg.create_rsp->qos_attr;
5025 bfa_rport_set_lunmask(bfa, rp);
5026 WARN_ON(msg.create_rsp->status != BFA_STATUS_OK);
5027 bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
5028 break;
5030 case BFI_RPORT_I2H_DELETE_RSP:
5031 rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle);
5032 WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK);
5033 bfa_rport_unset_lunmask(bfa, rp);
5034 bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
5035 break;
5037 case BFI_RPORT_I2H_QOS_SCN:
5038 rp = BFA_RPORT_FROM_TAG(bfa, msg.qos_scn_evt->bfa_handle);
5039 rp->event_arg.fw_msg = msg.qos_scn_evt;
5040 bfa_sm_send_event(rp, BFA_RPORT_SM_QOS_SCN);
5041 break;
5043 case BFI_RPORT_I2H_LIP_SCN_ONLINE:
5044 bfa_fcport_update_loop_info(BFA_FCPORT_MOD(bfa),
5045 &msg.lip_scn->loop_info);
5046 bfa_cb_rport_scn_online(bfa);
5047 break;
5049 case BFI_RPORT_I2H_LIP_SCN_OFFLINE:
5050 bfa_cb_rport_scn_offline(bfa);
5051 break;
5053 case BFI_RPORT_I2H_NO_DEV:
5054 rp = BFA_RPORT_FROM_TAG(bfa, msg.lip_scn->bfa_handle);
5055 bfa_cb_rport_scn_no_dev(rp->rport_drv);
5056 break;
5058 default:
5059 bfa_trc(bfa, m->mhdr.msg_id);
5060 WARN_ON(1);
5064 void
5065 bfa_rport_res_recfg(struct bfa_s *bfa, u16 num_rport_fw)
5067 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
5068 struct list_head *qe;
5069 int i;
5071 for (i = 0; i < (mod->num_rports - num_rport_fw); i++) {
5072 bfa_q_deq_tail(&mod->rp_free_q, &qe);
5073 list_add_tail(qe, &mod->rp_unused_q);
5078 * bfa_rport_api
5081 struct bfa_rport_s *
5082 bfa_rport_create(struct bfa_s *bfa, void *rport_drv)
5084 struct bfa_rport_s *rp;
5086 rp = bfa_rport_alloc(BFA_RPORT_MOD(bfa));
5088 if (rp == NULL)
5089 return NULL;
5091 rp->bfa = bfa;
5092 rp->rport_drv = rport_drv;
5093 memset(&rp->stats, 0, sizeof(rp->stats));
5095 WARN_ON(!bfa_sm_cmp_state(rp, bfa_rport_sm_uninit));
5096 bfa_sm_send_event(rp, BFA_RPORT_SM_CREATE);
5098 return rp;
5101 void
5102 bfa_rport_online(struct bfa_rport_s *rport, struct bfa_rport_info_s *rport_info)
5104 WARN_ON(rport_info->max_frmsz == 0);
5107 * Some JBODs are seen to be not setting PDU size correctly in PLOGI
5108 * responses. Default to minimum size.
5110 if (rport_info->max_frmsz == 0) {
5111 bfa_trc(rport->bfa, rport->rport_tag);
5112 rport_info->max_frmsz = FC_MIN_PDUSZ;
5115 rport->rport_info = *rport_info;
5116 bfa_sm_send_event(rport, BFA_RPORT_SM_ONLINE);
5119 void
5120 bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed)
5122 WARN_ON(speed == 0);
5123 WARN_ON(speed == BFA_PORT_SPEED_AUTO);
5125 if (rport) {
5126 rport->rport_info.speed = speed;
5127 bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED);
5131 /* Set Rport LUN Mask */
5132 void
5133 bfa_rport_set_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp)
5135 struct bfa_lps_mod_s *lps_mod = BFA_LPS_MOD(bfa);
5136 wwn_t lp_wwn, rp_wwn;
5137 u8 lp_tag = (u8)rp->rport_info.lp_tag;
5139 rp_wwn = ((struct bfa_fcs_rport_s *)rp->rport_drv)->pwwn;
5140 lp_wwn = (BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag))->pwwn;
5142 BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag)->lun_mask =
5143 rp->lun_mask = BFA_TRUE;
5144 bfa_fcpim_lunmask_rp_update(bfa, lp_wwn, rp_wwn, rp->rport_tag, lp_tag);
5147 /* Unset Rport LUN mask */
5148 void
5149 bfa_rport_unset_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp)
5151 struct bfa_lps_mod_s *lps_mod = BFA_LPS_MOD(bfa);
5152 wwn_t lp_wwn, rp_wwn;
5154 rp_wwn = ((struct bfa_fcs_rport_s *)rp->rport_drv)->pwwn;
5155 lp_wwn = (BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag))->pwwn;
5157 BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag)->lun_mask =
5158 rp->lun_mask = BFA_FALSE;
5159 bfa_fcpim_lunmask_rp_update(bfa, lp_wwn, rp_wwn,
5160 BFA_RPORT_TAG_INVALID, BFA_LP_TAG_INVALID);
5164 * SGPG related functions
5168 * Compute and return memory needed by FCP(im) module.
5170 void
5171 bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
5172 struct bfa_s *bfa)
5174 struct bfa_sgpg_mod_s *sgpg_mod = BFA_SGPG_MOD(bfa);
5175 struct bfa_mem_kva_s *sgpg_kva = BFA_MEM_SGPG_KVA(bfa);
5176 struct bfa_mem_dma_s *seg_ptr;
5177 u16 nsegs, idx, per_seg_sgpg, num_sgpg;
5178 u32 sgpg_sz = sizeof(struct bfi_sgpg_s);
5180 if (cfg->drvcfg.num_sgpgs < BFA_SGPG_MIN)
5181 cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
5182 else if (cfg->drvcfg.num_sgpgs > BFA_SGPG_MAX)
5183 cfg->drvcfg.num_sgpgs = BFA_SGPG_MAX;
5185 num_sgpg = cfg->drvcfg.num_sgpgs;
5187 nsegs = BFI_MEM_DMA_NSEGS(num_sgpg, sgpg_sz);
5188 per_seg_sgpg = BFI_MEM_NREQS_SEG(sgpg_sz);
5190 bfa_mem_dma_seg_iter(sgpg_mod, seg_ptr, nsegs, idx) {
5191 if (num_sgpg >= per_seg_sgpg) {
5192 num_sgpg -= per_seg_sgpg;
5193 bfa_mem_dma_setup(minfo, seg_ptr,
5194 per_seg_sgpg * sgpg_sz);
5195 } else
5196 bfa_mem_dma_setup(minfo, seg_ptr,
5197 num_sgpg * sgpg_sz);
5200 /* kva memory */
5201 bfa_mem_kva_setup(minfo, sgpg_kva,
5202 cfg->drvcfg.num_sgpgs * sizeof(struct bfa_sgpg_s));
5205 void
5206 bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
5207 struct bfa_pcidev_s *pcidev)
5209 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5210 struct bfa_sgpg_s *hsgpg;
5211 struct bfi_sgpg_s *sgpg;
5212 u64 align_len;
5213 struct bfa_mem_dma_s *seg_ptr;
5214 u32 sgpg_sz = sizeof(struct bfi_sgpg_s);
5215 u16 i, idx, nsegs, per_seg_sgpg, num_sgpg;
5217 union {
5218 u64 pa;
5219 union bfi_addr_u addr;
5220 } sgpg_pa, sgpg_pa_tmp;
5222 INIT_LIST_HEAD(&mod->sgpg_q);
5223 INIT_LIST_HEAD(&mod->sgpg_wait_q);
5225 bfa_trc(bfa, cfg->drvcfg.num_sgpgs);
5227 mod->free_sgpgs = mod->num_sgpgs = cfg->drvcfg.num_sgpgs;
5229 num_sgpg = cfg->drvcfg.num_sgpgs;
5230 nsegs = BFI_MEM_DMA_NSEGS(num_sgpg, sgpg_sz);
5232 /* dma/kva mem claim */
5233 hsgpg = (struct bfa_sgpg_s *) bfa_mem_kva_curp(mod);
5235 bfa_mem_dma_seg_iter(mod, seg_ptr, nsegs, idx) {
5237 if (!bfa_mem_dma_virt(seg_ptr))
5238 break;
5240 align_len = BFA_SGPG_ROUNDUP(bfa_mem_dma_phys(seg_ptr)) -
5241 bfa_mem_dma_phys(seg_ptr);
5243 sgpg = (struct bfi_sgpg_s *)
5244 (((u8 *) bfa_mem_dma_virt(seg_ptr)) + align_len);
5245 sgpg_pa.pa = bfa_mem_dma_phys(seg_ptr) + align_len;
5246 WARN_ON(sgpg_pa.pa & (sgpg_sz - 1));
5248 per_seg_sgpg = (seg_ptr->mem_len - (u32)align_len) / sgpg_sz;
5250 for (i = 0; num_sgpg > 0 && i < per_seg_sgpg; i++, num_sgpg--) {
5251 memset(hsgpg, 0, sizeof(*hsgpg));
5252 memset(sgpg, 0, sizeof(*sgpg));
5254 hsgpg->sgpg = sgpg;
5255 sgpg_pa_tmp.pa = bfa_sgaddr_le(sgpg_pa.pa);
5256 hsgpg->sgpg_pa = sgpg_pa_tmp.addr;
5257 list_add_tail(&hsgpg->qe, &mod->sgpg_q);
5259 sgpg++;
5260 hsgpg++;
5261 sgpg_pa.pa += sgpg_sz;
5265 bfa_mem_kva_curp(mod) = (u8 *) hsgpg;
5268 bfa_status_t
5269 bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpgs)
5271 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5272 struct bfa_sgpg_s *hsgpg;
5273 int i;
5275 if (mod->free_sgpgs < nsgpgs)
5276 return BFA_STATUS_ENOMEM;
5278 for (i = 0; i < nsgpgs; i++) {
5279 bfa_q_deq(&mod->sgpg_q, &hsgpg);
5280 WARN_ON(!hsgpg);
5281 list_add_tail(&hsgpg->qe, sgpg_q);
5284 mod->free_sgpgs -= nsgpgs;
5285 return BFA_STATUS_OK;
5288 void
5289 bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpg)
5291 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5292 struct bfa_sgpg_wqe_s *wqe;
5294 mod->free_sgpgs += nsgpg;
5295 WARN_ON(mod->free_sgpgs > mod->num_sgpgs);
5297 list_splice_tail_init(sgpg_q, &mod->sgpg_q);
5299 if (list_empty(&mod->sgpg_wait_q))
5300 return;
5303 * satisfy as many waiting requests as possible
5305 do {
5306 wqe = bfa_q_first(&mod->sgpg_wait_q);
5307 if (mod->free_sgpgs < wqe->nsgpg)
5308 nsgpg = mod->free_sgpgs;
5309 else
5310 nsgpg = wqe->nsgpg;
5311 bfa_sgpg_malloc(bfa, &wqe->sgpg_q, nsgpg);
5312 wqe->nsgpg -= nsgpg;
5313 if (wqe->nsgpg == 0) {
5314 list_del(&wqe->qe);
5315 wqe->cbfn(wqe->cbarg);
5317 } while (mod->free_sgpgs && !list_empty(&mod->sgpg_wait_q));
5320 void
5321 bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpg)
5323 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5325 WARN_ON(nsgpg <= 0);
5326 WARN_ON(nsgpg <= mod->free_sgpgs);
5328 wqe->nsgpg_total = wqe->nsgpg = nsgpg;
5331 * allocate any left to this one first
5333 if (mod->free_sgpgs) {
5335 * no one else is waiting for SGPG
5337 WARN_ON(!list_empty(&mod->sgpg_wait_q));
5338 list_splice_tail_init(&mod->sgpg_q, &wqe->sgpg_q);
5339 wqe->nsgpg -= mod->free_sgpgs;
5340 mod->free_sgpgs = 0;
5343 list_add_tail(&wqe->qe, &mod->sgpg_wait_q);
5346 void
5347 bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe)
5349 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5351 WARN_ON(!bfa_q_is_on_q(&mod->sgpg_wait_q, wqe));
5352 list_del(&wqe->qe);
5354 if (wqe->nsgpg_total != wqe->nsgpg)
5355 bfa_sgpg_mfree(bfa, &wqe->sgpg_q,
5356 wqe->nsgpg_total - wqe->nsgpg);
5359 void
5360 bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe, void (*cbfn) (void *cbarg),
5361 void *cbarg)
5363 INIT_LIST_HEAD(&wqe->sgpg_q);
5364 wqe->cbfn = cbfn;
5365 wqe->cbarg = cbarg;
5369 * UF related functions
5372 *****************************************************************************
5373 * Internal functions
5374 *****************************************************************************
5376 static void
5377 __bfa_cb_uf_recv(void *cbarg, bfa_boolean_t complete)
5379 struct bfa_uf_s *uf = cbarg;
5380 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(uf->bfa);
5382 if (complete)
5383 ufm->ufrecv(ufm->cbarg, uf);
5386 static void
5387 claim_uf_post_msgs(struct bfa_uf_mod_s *ufm)
5389 struct bfi_uf_buf_post_s *uf_bp_msg;
5390 u16 i;
5391 u16 buf_len;
5393 ufm->uf_buf_posts = (struct bfi_uf_buf_post_s *) bfa_mem_kva_curp(ufm);
5394 uf_bp_msg = ufm->uf_buf_posts;
5396 for (i = 0, uf_bp_msg = ufm->uf_buf_posts; i < ufm->num_ufs;
5397 i++, uf_bp_msg++) {
5398 memset(uf_bp_msg, 0, sizeof(struct bfi_uf_buf_post_s));
5400 uf_bp_msg->buf_tag = i;
5401 buf_len = sizeof(struct bfa_uf_buf_s);
5402 uf_bp_msg->buf_len = cpu_to_be16(buf_len);
5403 bfi_h2i_set(uf_bp_msg->mh, BFI_MC_UF, BFI_UF_H2I_BUF_POST,
5404 bfa_fn_lpu(ufm->bfa));
5405 bfa_alen_set(&uf_bp_msg->alen, buf_len, ufm_pbs_pa(ufm, i));
5409 * advance pointer beyond consumed memory
5411 bfa_mem_kva_curp(ufm) = (u8 *) uf_bp_msg;
5414 static void
5415 claim_ufs(struct bfa_uf_mod_s *ufm)
5417 u16 i;
5418 struct bfa_uf_s *uf;
5421 * Claim block of memory for UF list
5423 ufm->uf_list = (struct bfa_uf_s *) bfa_mem_kva_curp(ufm);
5426 * Initialize UFs and queue it in UF free queue
5428 for (i = 0, uf = ufm->uf_list; i < ufm->num_ufs; i++, uf++) {
5429 memset(uf, 0, sizeof(struct bfa_uf_s));
5430 uf->bfa = ufm->bfa;
5431 uf->uf_tag = i;
5432 uf->pb_len = BFA_PER_UF_DMA_SZ;
5433 uf->buf_kva = bfa_mem_get_dmabuf_kva(ufm, i, BFA_PER_UF_DMA_SZ);
5434 uf->buf_pa = ufm_pbs_pa(ufm, i);
5435 list_add_tail(&uf->qe, &ufm->uf_free_q);
5439 * advance memory pointer
5441 bfa_mem_kva_curp(ufm) = (u8 *) uf;
5444 static void
5445 uf_mem_claim(struct bfa_uf_mod_s *ufm)
5447 claim_ufs(ufm);
5448 claim_uf_post_msgs(ufm);
5451 void
5452 bfa_uf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
5453 struct bfa_s *bfa)
5455 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5456 struct bfa_mem_kva_s *uf_kva = BFA_MEM_UF_KVA(bfa);
5457 u32 num_ufs = cfg->fwcfg.num_uf_bufs;
5458 struct bfa_mem_dma_s *seg_ptr;
5459 u16 nsegs, idx, per_seg_uf = 0;
5461 nsegs = BFI_MEM_DMA_NSEGS(num_ufs, BFA_PER_UF_DMA_SZ);
5462 per_seg_uf = BFI_MEM_NREQS_SEG(BFA_PER_UF_DMA_SZ);
5464 bfa_mem_dma_seg_iter(ufm, seg_ptr, nsegs, idx) {
5465 if (num_ufs >= per_seg_uf) {
5466 num_ufs -= per_seg_uf;
5467 bfa_mem_dma_setup(minfo, seg_ptr,
5468 per_seg_uf * BFA_PER_UF_DMA_SZ);
5469 } else
5470 bfa_mem_dma_setup(minfo, seg_ptr,
5471 num_ufs * BFA_PER_UF_DMA_SZ);
5474 /* kva memory */
5475 bfa_mem_kva_setup(minfo, uf_kva, cfg->fwcfg.num_uf_bufs *
5476 (sizeof(struct bfa_uf_s) + sizeof(struct bfi_uf_buf_post_s)));
5479 void
5480 bfa_uf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
5481 struct bfa_pcidev_s *pcidev)
5483 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5485 ufm->bfa = bfa;
5486 ufm->num_ufs = cfg->fwcfg.num_uf_bufs;
5487 INIT_LIST_HEAD(&ufm->uf_free_q);
5488 INIT_LIST_HEAD(&ufm->uf_posted_q);
5489 INIT_LIST_HEAD(&ufm->uf_unused_q);
5491 uf_mem_claim(ufm);
5494 static struct bfa_uf_s *
5495 bfa_uf_get(struct bfa_uf_mod_s *uf_mod)
5497 struct bfa_uf_s *uf;
5499 bfa_q_deq(&uf_mod->uf_free_q, &uf);
5500 return uf;
5503 static void
5504 bfa_uf_put(struct bfa_uf_mod_s *uf_mod, struct bfa_uf_s *uf)
5506 list_add_tail(&uf->qe, &uf_mod->uf_free_q);
5509 static bfa_status_t
5510 bfa_uf_post(struct bfa_uf_mod_s *ufm, struct bfa_uf_s *uf)
5512 struct bfi_uf_buf_post_s *uf_post_msg;
5514 uf_post_msg = bfa_reqq_next(ufm->bfa, BFA_REQQ_FCXP);
5515 if (!uf_post_msg)
5516 return BFA_STATUS_FAILED;
5518 memcpy(uf_post_msg, &ufm->uf_buf_posts[uf->uf_tag],
5519 sizeof(struct bfi_uf_buf_post_s));
5520 bfa_reqq_produce(ufm->bfa, BFA_REQQ_FCXP, uf_post_msg->mh);
5522 bfa_trc(ufm->bfa, uf->uf_tag);
5524 list_add_tail(&uf->qe, &ufm->uf_posted_q);
5525 return BFA_STATUS_OK;
5528 static void
5529 bfa_uf_post_all(struct bfa_uf_mod_s *uf_mod)
5531 struct bfa_uf_s *uf;
5533 while ((uf = bfa_uf_get(uf_mod)) != NULL) {
5534 if (bfa_uf_post(uf_mod, uf) != BFA_STATUS_OK)
5535 break;
5539 static void
5540 uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m)
5542 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5543 u16 uf_tag = m->buf_tag;
5544 struct bfa_uf_s *uf = &ufm->uf_list[uf_tag];
5545 struct bfa_uf_buf_s *uf_buf;
5546 uint8_t *buf;
5547 struct fchs_s *fchs;
5549 uf_buf = (struct bfa_uf_buf_s *)
5550 bfa_mem_get_dmabuf_kva(ufm, uf_tag, uf->pb_len);
5551 buf = &uf_buf->d[0];
5553 m->frm_len = be16_to_cpu(m->frm_len);
5554 m->xfr_len = be16_to_cpu(m->xfr_len);
5556 fchs = (struct fchs_s *)uf_buf;
5558 list_del(&uf->qe); /* dequeue from posted queue */
5560 uf->data_ptr = buf;
5561 uf->data_len = m->xfr_len;
5563 WARN_ON(uf->data_len < sizeof(struct fchs_s));
5565 if (uf->data_len == sizeof(struct fchs_s)) {
5566 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_UF, BFA_PL_EID_RX,
5567 uf->data_len, (struct fchs_s *)buf);
5568 } else {
5569 u32 pld_w0 = *((u32 *) (buf + sizeof(struct fchs_s)));
5570 bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_UF,
5571 BFA_PL_EID_RX, uf->data_len,
5572 (struct fchs_s *)buf, pld_w0);
5575 if (bfa->fcs)
5576 __bfa_cb_uf_recv(uf, BFA_TRUE);
5577 else
5578 bfa_cb_queue(bfa, &uf->hcb_qe, __bfa_cb_uf_recv, uf);
5581 void
5582 bfa_uf_iocdisable(struct bfa_s *bfa)
5584 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5585 struct bfa_uf_s *uf;
5586 struct list_head *qe, *qen;
5588 /* Enqueue unused uf resources to free_q */
5589 list_splice_tail_init(&ufm->uf_unused_q, &ufm->uf_free_q);
5591 list_for_each_safe(qe, qen, &ufm->uf_posted_q) {
5592 uf = (struct bfa_uf_s *) qe;
5593 list_del(&uf->qe);
5594 bfa_uf_put(ufm, uf);
5598 void
5599 bfa_uf_start(struct bfa_s *bfa)
5601 bfa_uf_post_all(BFA_UF_MOD(bfa));
5605 * Register handler for all unsolicted receive frames.
5607 * @param[in] bfa BFA instance
5608 * @param[in] ufrecv receive handler function
5609 * @param[in] cbarg receive handler arg
5611 void
5612 bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv, void *cbarg)
5614 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5616 ufm->ufrecv = ufrecv;
5617 ufm->cbarg = cbarg;
5621 * Free an unsolicited frame back to BFA.
5623 * @param[in] uf unsolicited frame to be freed
5625 * @return None
5627 void
5628 bfa_uf_free(struct bfa_uf_s *uf)
5630 bfa_uf_put(BFA_UF_MOD(uf->bfa), uf);
5631 bfa_uf_post_all(BFA_UF_MOD(uf->bfa));
5637 * uf_pub BFA uf module public functions
5639 void
5640 bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
5642 bfa_trc(bfa, msg->mhdr.msg_id);
5644 switch (msg->mhdr.msg_id) {
5645 case BFI_UF_I2H_FRM_RCVD:
5646 uf_recv(bfa, (struct bfi_uf_frm_rcvd_s *) msg);
5647 break;
5649 default:
5650 bfa_trc(bfa, msg->mhdr.msg_id);
5651 WARN_ON(1);
5655 void
5656 bfa_uf_res_recfg(struct bfa_s *bfa, u16 num_uf_fw)
5658 struct bfa_uf_mod_s *mod = BFA_UF_MOD(bfa);
5659 struct list_head *qe;
5660 int i;
5662 for (i = 0; i < (mod->num_ufs - num_uf_fw); i++) {
5663 bfa_q_deq_tail(&mod->uf_free_q, &qe);
5664 list_add_tail(qe, &mod->uf_unused_q);
5669 * Dport forward declaration
5672 enum bfa_dport_test_state_e {
5673 BFA_DPORT_ST_DISABLED = 0, /*!< dport is disabled */
5674 BFA_DPORT_ST_INP = 1, /*!< test in progress */
5675 BFA_DPORT_ST_COMP = 2, /*!< test complete successfully */
5676 BFA_DPORT_ST_NO_SFP = 3, /*!< sfp is not present */
5677 BFA_DPORT_ST_NOTSTART = 4, /*!< test not start dport is enabled */
5681 * BFA DPORT state machine events
5683 enum bfa_dport_sm_event {
5684 BFA_DPORT_SM_ENABLE = 1, /* dport enable event */
5685 BFA_DPORT_SM_DISABLE = 2, /* dport disable event */
5686 BFA_DPORT_SM_FWRSP = 3, /* fw enable/disable rsp */
5687 BFA_DPORT_SM_QRESUME = 4, /* CQ space available */
5688 BFA_DPORT_SM_HWFAIL = 5, /* IOC h/w failure */
5689 BFA_DPORT_SM_START = 6, /* re-start dport test */
5690 BFA_DPORT_SM_REQFAIL = 7, /* request failure */
5691 BFA_DPORT_SM_SCN = 8, /* state change notify frm fw */
5694 static void bfa_dport_sm_disabled(struct bfa_dport_s *dport,
5695 enum bfa_dport_sm_event event);
5696 static void bfa_dport_sm_enabling_qwait(struct bfa_dport_s *dport,
5697 enum bfa_dport_sm_event event);
5698 static void bfa_dport_sm_enabling(struct bfa_dport_s *dport,
5699 enum bfa_dport_sm_event event);
5700 static void bfa_dport_sm_enabled(struct bfa_dport_s *dport,
5701 enum bfa_dport_sm_event event);
5702 static void bfa_dport_sm_disabling_qwait(struct bfa_dport_s *dport,
5703 enum bfa_dport_sm_event event);
5704 static void bfa_dport_sm_disabling(struct bfa_dport_s *dport,
5705 enum bfa_dport_sm_event event);
5706 static void bfa_dport_sm_starting_qwait(struct bfa_dport_s *dport,
5707 enum bfa_dport_sm_event event);
5708 static void bfa_dport_sm_starting(struct bfa_dport_s *dport,
5709 enum bfa_dport_sm_event event);
5710 static void bfa_dport_sm_dynamic_disabling(struct bfa_dport_s *dport,
5711 enum bfa_dport_sm_event event);
5712 static void bfa_dport_sm_dynamic_disabling_qwait(struct bfa_dport_s *dport,
5713 enum bfa_dport_sm_event event);
5714 static void bfa_dport_qresume(void *cbarg);
5715 static void bfa_dport_req_comp(struct bfa_dport_s *dport,
5716 struct bfi_diag_dport_rsp_s *msg);
5717 static void bfa_dport_scn(struct bfa_dport_s *dport,
5718 struct bfi_diag_dport_scn_s *msg);
5721 * BFA fcdiag module
5723 #define BFA_DIAG_QTEST_TOV 1000 /* msec */
5726 * Set port status to busy
5728 static void
5729 bfa_fcdiag_set_busy_status(struct bfa_fcdiag_s *fcdiag)
5731 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(fcdiag->bfa);
5733 if (fcdiag->lb.lock)
5734 fcport->diag_busy = BFA_TRUE;
5735 else
5736 fcport->diag_busy = BFA_FALSE;
5739 void
5740 bfa_fcdiag_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
5741 struct bfa_pcidev_s *pcidev)
5743 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5744 struct bfa_dport_s *dport = &fcdiag->dport;
5746 fcdiag->bfa = bfa;
5747 fcdiag->trcmod = bfa->trcmod;
5748 /* The common DIAG attach bfa_diag_attach() will do all memory claim */
5749 dport->bfa = bfa;
5750 bfa_sm_set_state(dport, bfa_dport_sm_disabled);
5751 bfa_reqq_winit(&dport->reqq_wait, bfa_dport_qresume, dport);
5752 dport->cbfn = NULL;
5753 dport->cbarg = NULL;
5754 dport->test_state = BFA_DPORT_ST_DISABLED;
5755 memset(&dport->result, 0, sizeof(struct bfa_diag_dport_result_s));
5758 void
5759 bfa_fcdiag_iocdisable(struct bfa_s *bfa)
5761 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5762 struct bfa_dport_s *dport = &fcdiag->dport;
5764 bfa_trc(fcdiag, fcdiag->lb.lock);
5765 if (fcdiag->lb.lock) {
5766 fcdiag->lb.status = BFA_STATUS_IOC_FAILURE;
5767 fcdiag->lb.cbfn(fcdiag->lb.cbarg, fcdiag->lb.status);
5768 fcdiag->lb.lock = 0;
5769 bfa_fcdiag_set_busy_status(fcdiag);
5772 bfa_sm_send_event(dport, BFA_DPORT_SM_HWFAIL);
5775 static void
5776 bfa_fcdiag_queuetest_timeout(void *cbarg)
5778 struct bfa_fcdiag_s *fcdiag = cbarg;
5779 struct bfa_diag_qtest_result_s *res = fcdiag->qtest.result;
5781 bfa_trc(fcdiag, fcdiag->qtest.all);
5782 bfa_trc(fcdiag, fcdiag->qtest.count);
5784 fcdiag->qtest.timer_active = 0;
5786 res->status = BFA_STATUS_ETIMER;
5787 res->count = QTEST_CNT_DEFAULT - fcdiag->qtest.count;
5788 if (fcdiag->qtest.all)
5789 res->queue = fcdiag->qtest.all;
5791 bfa_trc(fcdiag, BFA_STATUS_ETIMER);
5792 fcdiag->qtest.status = BFA_STATUS_ETIMER;
5793 fcdiag->qtest.cbfn(fcdiag->qtest.cbarg, fcdiag->qtest.status);
5794 fcdiag->qtest.lock = 0;
5797 static bfa_status_t
5798 bfa_fcdiag_queuetest_send(struct bfa_fcdiag_s *fcdiag)
5800 u32 i;
5801 struct bfi_diag_qtest_req_s *req;
5803 req = bfa_reqq_next(fcdiag->bfa, fcdiag->qtest.queue);
5804 if (!req)
5805 return BFA_STATUS_DEVBUSY;
5807 /* build host command */
5808 bfi_h2i_set(req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_QTEST,
5809 bfa_fn_lpu(fcdiag->bfa));
5811 for (i = 0; i < BFI_LMSG_PL_WSZ; i++)
5812 req->data[i] = QTEST_PAT_DEFAULT;
5814 bfa_trc(fcdiag, fcdiag->qtest.queue);
5815 /* ring door bell */
5816 bfa_reqq_produce(fcdiag->bfa, fcdiag->qtest.queue, req->mh);
5817 return BFA_STATUS_OK;
5820 static void
5821 bfa_fcdiag_queuetest_comp(struct bfa_fcdiag_s *fcdiag,
5822 bfi_diag_qtest_rsp_t *rsp)
5824 struct bfa_diag_qtest_result_s *res = fcdiag->qtest.result;
5825 bfa_status_t status = BFA_STATUS_OK;
5826 int i;
5828 /* Check timer, should still be active */
5829 if (!fcdiag->qtest.timer_active) {
5830 bfa_trc(fcdiag, fcdiag->qtest.timer_active);
5831 return;
5834 /* update count */
5835 fcdiag->qtest.count--;
5837 /* Check result */
5838 for (i = 0; i < BFI_LMSG_PL_WSZ; i++) {
5839 if (rsp->data[i] != ~(QTEST_PAT_DEFAULT)) {
5840 res->status = BFA_STATUS_DATACORRUPTED;
5841 break;
5845 if (res->status == BFA_STATUS_OK) {
5846 if (fcdiag->qtest.count > 0) {
5847 status = bfa_fcdiag_queuetest_send(fcdiag);
5848 if (status == BFA_STATUS_OK)
5849 return;
5850 else
5851 res->status = status;
5852 } else if (fcdiag->qtest.all > 0 &&
5853 fcdiag->qtest.queue < (BFI_IOC_MAX_CQS - 1)) {
5854 fcdiag->qtest.count = QTEST_CNT_DEFAULT;
5855 fcdiag->qtest.queue++;
5856 status = bfa_fcdiag_queuetest_send(fcdiag);
5857 if (status == BFA_STATUS_OK)
5858 return;
5859 else
5860 res->status = status;
5864 /* Stop timer when we comp all queue */
5865 if (fcdiag->qtest.timer_active) {
5866 bfa_timer_stop(&fcdiag->qtest.timer);
5867 fcdiag->qtest.timer_active = 0;
5869 res->queue = fcdiag->qtest.queue;
5870 res->count = QTEST_CNT_DEFAULT - fcdiag->qtest.count;
5871 bfa_trc(fcdiag, res->count);
5872 bfa_trc(fcdiag, res->status);
5873 fcdiag->qtest.status = res->status;
5874 fcdiag->qtest.cbfn(fcdiag->qtest.cbarg, fcdiag->qtest.status);
5875 fcdiag->qtest.lock = 0;
5878 static void
5879 bfa_fcdiag_loopback_comp(struct bfa_fcdiag_s *fcdiag,
5880 struct bfi_diag_lb_rsp_s *rsp)
5882 struct bfa_diag_loopback_result_s *res = fcdiag->lb.result;
5884 res->numtxmfrm = be32_to_cpu(rsp->res.numtxmfrm);
5885 res->numosffrm = be32_to_cpu(rsp->res.numosffrm);
5886 res->numrcvfrm = be32_to_cpu(rsp->res.numrcvfrm);
5887 res->badfrminf = be32_to_cpu(rsp->res.badfrminf);
5888 res->badfrmnum = be32_to_cpu(rsp->res.badfrmnum);
5889 res->status = rsp->res.status;
5890 fcdiag->lb.status = rsp->res.status;
5891 bfa_trc(fcdiag, fcdiag->lb.status);
5892 fcdiag->lb.cbfn(fcdiag->lb.cbarg, fcdiag->lb.status);
5893 fcdiag->lb.lock = 0;
5894 bfa_fcdiag_set_busy_status(fcdiag);
5897 static bfa_status_t
5898 bfa_fcdiag_loopback_send(struct bfa_fcdiag_s *fcdiag,
5899 struct bfa_diag_loopback_s *loopback)
5901 struct bfi_diag_lb_req_s *lb_req;
5903 lb_req = bfa_reqq_next(fcdiag->bfa, BFA_REQQ_DIAG);
5904 if (!lb_req)
5905 return BFA_STATUS_DEVBUSY;
5907 /* build host command */
5908 bfi_h2i_set(lb_req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_LOOPBACK,
5909 bfa_fn_lpu(fcdiag->bfa));
5911 lb_req->lb_mode = loopback->lb_mode;
5912 lb_req->speed = loopback->speed;
5913 lb_req->loopcnt = loopback->loopcnt;
5914 lb_req->pattern = loopback->pattern;
5916 /* ring door bell */
5917 bfa_reqq_produce(fcdiag->bfa, BFA_REQQ_DIAG, lb_req->mh);
5919 bfa_trc(fcdiag, loopback->lb_mode);
5920 bfa_trc(fcdiag, loopback->speed);
5921 bfa_trc(fcdiag, loopback->loopcnt);
5922 bfa_trc(fcdiag, loopback->pattern);
5923 return BFA_STATUS_OK;
5927 * cpe/rme intr handler
5929 void
5930 bfa_fcdiag_intr(struct bfa_s *bfa, struct bfi_msg_s *msg)
5932 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5934 switch (msg->mhdr.msg_id) {
5935 case BFI_DIAG_I2H_LOOPBACK:
5936 bfa_fcdiag_loopback_comp(fcdiag,
5937 (struct bfi_diag_lb_rsp_s *) msg);
5938 break;
5939 case BFI_DIAG_I2H_QTEST:
5940 bfa_fcdiag_queuetest_comp(fcdiag, (bfi_diag_qtest_rsp_t *)msg);
5941 break;
5942 case BFI_DIAG_I2H_DPORT:
5943 bfa_dport_req_comp(&fcdiag->dport,
5944 (struct bfi_diag_dport_rsp_s *)msg);
5945 break;
5946 case BFI_DIAG_I2H_DPORT_SCN:
5947 bfa_dport_scn(&fcdiag->dport,
5948 (struct bfi_diag_dport_scn_s *)msg);
5949 break;
5950 default:
5951 bfa_trc(fcdiag, msg->mhdr.msg_id);
5952 WARN_ON(1);
5957 * Loopback test
5959 * @param[in] *bfa - bfa data struct
5960 * @param[in] opmode - port operation mode
5961 * @param[in] speed - port speed
5962 * @param[in] lpcnt - loop count
5963 * @param[in] pat - pattern to build packet
5964 * @param[in] *result - pt to bfa_diag_loopback_result_t data struct
5965 * @param[in] cbfn - callback function
5966 * @param[in] cbarg - callback functioin arg
5968 * @param[out]
5970 bfa_status_t
5971 bfa_fcdiag_loopback(struct bfa_s *bfa, enum bfa_port_opmode opmode,
5972 enum bfa_port_speed speed, u32 lpcnt, u32 pat,
5973 struct bfa_diag_loopback_result_s *result, bfa_cb_diag_t cbfn,
5974 void *cbarg)
5976 struct bfa_diag_loopback_s loopback;
5977 struct bfa_port_attr_s attr;
5978 bfa_status_t status;
5979 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5981 if (!bfa_iocfc_is_operational(bfa))
5982 return BFA_STATUS_IOC_NON_OP;
5984 /* if port is PBC disabled, return error */
5985 if (bfa_fcport_is_pbcdisabled(bfa)) {
5986 bfa_trc(fcdiag, BFA_STATUS_PBC);
5987 return BFA_STATUS_PBC;
5990 if (bfa_fcport_is_disabled(bfa) == BFA_FALSE) {
5991 bfa_trc(fcdiag, opmode);
5992 return BFA_STATUS_PORT_NOT_DISABLED;
5996 * Check if input speed is supported by the port mode
5998 if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) {
5999 if (!(speed == BFA_PORT_SPEED_1GBPS ||
6000 speed == BFA_PORT_SPEED_2GBPS ||
6001 speed == BFA_PORT_SPEED_4GBPS ||
6002 speed == BFA_PORT_SPEED_8GBPS ||
6003 speed == BFA_PORT_SPEED_16GBPS ||
6004 speed == BFA_PORT_SPEED_AUTO)) {
6005 bfa_trc(fcdiag, speed);
6006 return BFA_STATUS_UNSUPP_SPEED;
6008 bfa_fcport_get_attr(bfa, &attr);
6009 bfa_trc(fcdiag, attr.speed_supported);
6010 if (speed > attr.speed_supported)
6011 return BFA_STATUS_UNSUPP_SPEED;
6012 } else {
6013 if (speed != BFA_PORT_SPEED_10GBPS) {
6014 bfa_trc(fcdiag, speed);
6015 return BFA_STATUS_UNSUPP_SPEED;
6020 * For CT2, 1G is not supported
6022 if ((speed == BFA_PORT_SPEED_1GBPS) &&
6023 (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id))) {
6024 bfa_trc(fcdiag, speed);
6025 return BFA_STATUS_UNSUPP_SPEED;
6028 /* For Mezz card, port speed entered needs to be checked */
6029 if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type)) {
6030 if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) {
6031 if (!(speed == BFA_PORT_SPEED_1GBPS ||
6032 speed == BFA_PORT_SPEED_2GBPS ||
6033 speed == BFA_PORT_SPEED_4GBPS ||
6034 speed == BFA_PORT_SPEED_8GBPS ||
6035 speed == BFA_PORT_SPEED_16GBPS ||
6036 speed == BFA_PORT_SPEED_AUTO))
6037 return BFA_STATUS_UNSUPP_SPEED;
6038 } else {
6039 if (speed != BFA_PORT_SPEED_10GBPS)
6040 return BFA_STATUS_UNSUPP_SPEED;
6043 /* check to see if fcport is dport */
6044 if (bfa_fcport_is_dport(bfa)) {
6045 bfa_trc(fcdiag, fcdiag->lb.lock);
6046 return BFA_STATUS_DPORT_ENABLED;
6048 /* check to see if there is another destructive diag cmd running */
6049 if (fcdiag->lb.lock) {
6050 bfa_trc(fcdiag, fcdiag->lb.lock);
6051 return BFA_STATUS_DEVBUSY;
6054 fcdiag->lb.lock = 1;
6055 loopback.lb_mode = opmode;
6056 loopback.speed = speed;
6057 loopback.loopcnt = lpcnt;
6058 loopback.pattern = pat;
6059 fcdiag->lb.result = result;
6060 fcdiag->lb.cbfn = cbfn;
6061 fcdiag->lb.cbarg = cbarg;
6062 memset(result, 0, sizeof(struct bfa_diag_loopback_result_s));
6063 bfa_fcdiag_set_busy_status(fcdiag);
6065 /* Send msg to fw */
6066 status = bfa_fcdiag_loopback_send(fcdiag, &loopback);
6067 return status;
6071 * DIAG queue test command
6073 * @param[in] *bfa - bfa data struct
6074 * @param[in] force - 1: don't do ioc op checking
6075 * @param[in] queue - queue no. to test
6076 * @param[in] *result - pt to bfa_diag_qtest_result_t data struct
6077 * @param[in] cbfn - callback function
6078 * @param[in] *cbarg - callback functioin arg
6080 * @param[out]
6082 bfa_status_t
6083 bfa_fcdiag_queuetest(struct bfa_s *bfa, u32 force, u32 queue,
6084 struct bfa_diag_qtest_result_s *result, bfa_cb_diag_t cbfn,
6085 void *cbarg)
6087 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6088 bfa_status_t status;
6089 bfa_trc(fcdiag, force);
6090 bfa_trc(fcdiag, queue);
6092 if (!force && !bfa_iocfc_is_operational(bfa))
6093 return BFA_STATUS_IOC_NON_OP;
6095 /* check to see if there is another destructive diag cmd running */
6096 if (fcdiag->qtest.lock) {
6097 bfa_trc(fcdiag, fcdiag->qtest.lock);
6098 return BFA_STATUS_DEVBUSY;
6101 /* Initialization */
6102 fcdiag->qtest.lock = 1;
6103 fcdiag->qtest.cbfn = cbfn;
6104 fcdiag->qtest.cbarg = cbarg;
6105 fcdiag->qtest.result = result;
6106 fcdiag->qtest.count = QTEST_CNT_DEFAULT;
6108 /* Init test results */
6109 fcdiag->qtest.result->status = BFA_STATUS_OK;
6110 fcdiag->qtest.result->count = 0;
6112 /* send */
6113 if (queue < BFI_IOC_MAX_CQS) {
6114 fcdiag->qtest.result->queue = (u8)queue;
6115 fcdiag->qtest.queue = (u8)queue;
6116 fcdiag->qtest.all = 0;
6117 } else {
6118 fcdiag->qtest.result->queue = 0;
6119 fcdiag->qtest.queue = 0;
6120 fcdiag->qtest.all = 1;
6122 status = bfa_fcdiag_queuetest_send(fcdiag);
6124 /* Start a timer */
6125 if (status == BFA_STATUS_OK) {
6126 bfa_timer_start(bfa, &fcdiag->qtest.timer,
6127 bfa_fcdiag_queuetest_timeout, fcdiag,
6128 BFA_DIAG_QTEST_TOV);
6129 fcdiag->qtest.timer_active = 1;
6131 return status;
6135 * DIAG PLB is running
6137 * @param[in] *bfa - bfa data struct
6139 * @param[out]
6141 bfa_status_t
6142 bfa_fcdiag_lb_is_running(struct bfa_s *bfa)
6144 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6145 return fcdiag->lb.lock ? BFA_STATUS_DIAG_BUSY : BFA_STATUS_OK;
6149 * D-port
6151 #define bfa_dport_result_start(__dport, __mode) do { \
6152 (__dport)->result.start_time = bfa_get_log_time(); \
6153 (__dport)->result.status = DPORT_TEST_ST_INPRG; \
6154 (__dport)->result.mode = (__mode); \
6155 (__dport)->result.rp_pwwn = (__dport)->rp_pwwn; \
6156 (__dport)->result.rp_nwwn = (__dport)->rp_nwwn; \
6157 (__dport)->result.lpcnt = (__dport)->lpcnt; \
6158 } while (0)
6160 static bfa_boolean_t bfa_dport_send_req(struct bfa_dport_s *dport,
6161 enum bfi_dport_req req);
6162 static void
6163 bfa_cb_fcdiag_dport(struct bfa_dport_s *dport, bfa_status_t bfa_status)
6165 if (dport->cbfn != NULL) {
6166 dport->cbfn(dport->cbarg, bfa_status);
6167 dport->cbfn = NULL;
6168 dport->cbarg = NULL;
6172 static void
6173 bfa_dport_sm_disabled(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
6175 bfa_trc(dport->bfa, event);
6177 switch (event) {
6178 case BFA_DPORT_SM_ENABLE:
6179 bfa_fcport_dportenable(dport->bfa);
6180 if (bfa_dport_send_req(dport, BFI_DPORT_ENABLE))
6181 bfa_sm_set_state(dport, bfa_dport_sm_enabling);
6182 else
6183 bfa_sm_set_state(dport, bfa_dport_sm_enabling_qwait);
6184 break;
6186 case BFA_DPORT_SM_DISABLE:
6187 /* Already disabled */
6188 break;
6190 case BFA_DPORT_SM_HWFAIL:
6191 /* ignore */
6192 break;
6194 case BFA_DPORT_SM_SCN:
6195 if (dport->i2hmsg.scn.state == BFI_DPORT_SCN_DDPORT_ENABLE) {
6196 bfa_fcport_ddportenable(dport->bfa);
6197 dport->dynamic = BFA_TRUE;
6198 dport->test_state = BFA_DPORT_ST_NOTSTART;
6199 bfa_sm_set_state(dport, bfa_dport_sm_enabled);
6200 } else {
6201 bfa_trc(dport->bfa, dport->i2hmsg.scn.state);
6202 WARN_ON(1);
6204 break;
6206 default:
6207 bfa_sm_fault(dport->bfa, event);
6211 static void
6212 bfa_dport_sm_enabling_qwait(struct bfa_dport_s *dport,
6213 enum bfa_dport_sm_event event)
6215 bfa_trc(dport->bfa, event);
6217 switch (event) {
6218 case BFA_DPORT_SM_QRESUME:
6219 bfa_sm_set_state(dport, bfa_dport_sm_enabling);
6220 bfa_dport_send_req(dport, BFI_DPORT_ENABLE);
6221 break;
6223 case BFA_DPORT_SM_HWFAIL:
6224 bfa_reqq_wcancel(&dport->reqq_wait);
6225 bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6226 bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED);
6227 break;
6229 default:
6230 bfa_sm_fault(dport->bfa, event);
6234 static void
6235 bfa_dport_sm_enabling(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
6237 bfa_trc(dport->bfa, event);
6239 switch (event) {
6240 case BFA_DPORT_SM_FWRSP:
6241 memset(&dport->result, 0,
6242 sizeof(struct bfa_diag_dport_result_s));
6243 if (dport->i2hmsg.rsp.status == BFA_STATUS_DPORT_INV_SFP) {
6244 dport->test_state = BFA_DPORT_ST_NO_SFP;
6245 } else {
6246 dport->test_state = BFA_DPORT_ST_INP;
6247 bfa_dport_result_start(dport, BFA_DPORT_OPMODE_AUTO);
6249 bfa_sm_set_state(dport, bfa_dport_sm_enabled);
6250 break;
6252 case BFA_DPORT_SM_REQFAIL:
6253 dport->test_state = BFA_DPORT_ST_DISABLED;
6254 bfa_fcport_dportdisable(dport->bfa);
6255 bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6256 break;
6258 case BFA_DPORT_SM_HWFAIL:
6259 bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6260 bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED);
6261 break;
6263 default:
6264 bfa_sm_fault(dport->bfa, event);
6268 static void
6269 bfa_dport_sm_enabled(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
6271 bfa_trc(dport->bfa, event);
6273 switch (event) {
6274 case BFA_DPORT_SM_START:
6275 if (bfa_dport_send_req(dport, BFI_DPORT_START))
6276 bfa_sm_set_state(dport, bfa_dport_sm_starting);
6277 else
6278 bfa_sm_set_state(dport, bfa_dport_sm_starting_qwait);
6279 break;
6281 case BFA_DPORT_SM_DISABLE:
6282 bfa_fcport_dportdisable(dport->bfa);
6283 if (bfa_dport_send_req(dport, BFI_DPORT_DISABLE))
6284 bfa_sm_set_state(dport, bfa_dport_sm_disabling);
6285 else
6286 bfa_sm_set_state(dport, bfa_dport_sm_disabling_qwait);
6287 break;
6289 case BFA_DPORT_SM_HWFAIL:
6290 bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6291 break;
6293 case BFA_DPORT_SM_SCN:
6294 switch (dport->i2hmsg.scn.state) {
6295 case BFI_DPORT_SCN_TESTCOMP:
6296 dport->test_state = BFA_DPORT_ST_COMP;
6297 break;
6299 case BFI_DPORT_SCN_TESTSTART:
6300 dport->test_state = BFA_DPORT_ST_INP;
6301 break;
6303 case BFI_DPORT_SCN_TESTSKIP:
6304 case BFI_DPORT_SCN_SUBTESTSTART:
6305 /* no state change */
6306 break;
6308 case BFI_DPORT_SCN_SFP_REMOVED:
6309 dport->test_state = BFA_DPORT_ST_NO_SFP;
6310 break;
6312 case BFI_DPORT_SCN_DDPORT_DISABLE:
6313 bfa_fcport_ddportdisable(dport->bfa);
6315 if (bfa_dport_send_req(dport, BFI_DPORT_DYN_DISABLE))
6316 bfa_sm_set_state(dport,
6317 bfa_dport_sm_dynamic_disabling);
6318 else
6319 bfa_sm_set_state(dport,
6320 bfa_dport_sm_dynamic_disabling_qwait);
6321 break;
6323 case BFI_DPORT_SCN_FCPORT_DISABLE:
6324 bfa_fcport_ddportdisable(dport->bfa);
6326 bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6327 dport->dynamic = BFA_FALSE;
6328 break;
6330 default:
6331 bfa_trc(dport->bfa, dport->i2hmsg.scn.state);
6332 bfa_sm_fault(dport->bfa, event);
6334 break;
6335 default:
6336 bfa_sm_fault(dport->bfa, event);
6340 static void
6341 bfa_dport_sm_disabling_qwait(struct bfa_dport_s *dport,
6342 enum bfa_dport_sm_event event)
6344 bfa_trc(dport->bfa, event);
6346 switch (event) {
6347 case BFA_DPORT_SM_QRESUME:
6348 bfa_sm_set_state(dport, bfa_dport_sm_disabling);
6349 bfa_dport_send_req(dport, BFI_DPORT_DISABLE);
6350 break;
6352 case BFA_DPORT_SM_HWFAIL:
6353 bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6354 bfa_reqq_wcancel(&dport->reqq_wait);
6355 bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK);
6356 break;
6358 case BFA_DPORT_SM_SCN:
6359 /* ignore */
6360 break;
6362 default:
6363 bfa_sm_fault(dport->bfa, event);
6367 static void
6368 bfa_dport_sm_disabling(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
6370 bfa_trc(dport->bfa, event);
6372 switch (event) {
6373 case BFA_DPORT_SM_FWRSP:
6374 dport->test_state = BFA_DPORT_ST_DISABLED;
6375 bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6376 break;
6378 case BFA_DPORT_SM_HWFAIL:
6379 bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6380 bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK);
6381 break;
6383 case BFA_DPORT_SM_SCN:
6384 /* no state change */
6385 break;
6387 default:
6388 bfa_sm_fault(dport->bfa, event);
6392 static void
6393 bfa_dport_sm_starting_qwait(struct bfa_dport_s *dport,
6394 enum bfa_dport_sm_event event)
6396 bfa_trc(dport->bfa, event);
6398 switch (event) {
6399 case BFA_DPORT_SM_QRESUME:
6400 bfa_sm_set_state(dport, bfa_dport_sm_starting);
6401 bfa_dport_send_req(dport, BFI_DPORT_START);
6402 break;
6404 case BFA_DPORT_SM_HWFAIL:
6405 bfa_reqq_wcancel(&dport->reqq_wait);
6406 bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6407 bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED);
6408 break;
6410 default:
6411 bfa_sm_fault(dport->bfa, event);
6415 static void
6416 bfa_dport_sm_starting(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
6418 bfa_trc(dport->bfa, event);
6420 switch (event) {
6421 case BFA_DPORT_SM_FWRSP:
6422 memset(&dport->result, 0,
6423 sizeof(struct bfa_diag_dport_result_s));
6424 if (dport->i2hmsg.rsp.status == BFA_STATUS_DPORT_INV_SFP) {
6425 dport->test_state = BFA_DPORT_ST_NO_SFP;
6426 } else {
6427 dport->test_state = BFA_DPORT_ST_INP;
6428 bfa_dport_result_start(dport, BFA_DPORT_OPMODE_MANU);
6430 /* fall thru */
6432 case BFA_DPORT_SM_REQFAIL:
6433 bfa_sm_set_state(dport, bfa_dport_sm_enabled);
6434 break;
6436 case BFA_DPORT_SM_HWFAIL:
6437 bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6438 bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED);
6439 break;
6441 default:
6442 bfa_sm_fault(dport->bfa, event);
6446 static void
6447 bfa_dport_sm_dynamic_disabling(struct bfa_dport_s *dport,
6448 enum bfa_dport_sm_event event)
6450 bfa_trc(dport->bfa, event);
6452 switch (event) {
6453 case BFA_DPORT_SM_SCN:
6454 switch (dport->i2hmsg.scn.state) {
6455 case BFI_DPORT_SCN_DDPORT_DISABLED:
6456 bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6457 dport->dynamic = BFA_FALSE;
6458 bfa_fcport_enable(dport->bfa);
6459 break;
6461 default:
6462 bfa_trc(dport->bfa, dport->i2hmsg.scn.state);
6463 bfa_sm_fault(dport->bfa, event);
6466 break;
6468 case BFA_DPORT_SM_HWFAIL:
6469 bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6470 bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK);
6471 break;
6473 default:
6474 bfa_sm_fault(dport->bfa, event);
6478 static void
6479 bfa_dport_sm_dynamic_disabling_qwait(struct bfa_dport_s *dport,
6480 enum bfa_dport_sm_event event)
6482 bfa_trc(dport->bfa, event);
6484 switch (event) {
6485 case BFA_DPORT_SM_QRESUME:
6486 bfa_sm_set_state(dport, bfa_dport_sm_dynamic_disabling);
6487 bfa_dport_send_req(dport, BFI_DPORT_DYN_DISABLE);
6488 break;
6490 case BFA_DPORT_SM_HWFAIL:
6491 bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6492 bfa_reqq_wcancel(&dport->reqq_wait);
6493 bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK);
6494 break;
6496 case BFA_DPORT_SM_SCN:
6497 /* ignore */
6498 break;
6500 default:
6501 bfa_sm_fault(dport->bfa, event);
6505 static bfa_boolean_t
6506 bfa_dport_send_req(struct bfa_dport_s *dport, enum bfi_dport_req req)
6508 struct bfi_diag_dport_req_s *m;
6511 * check for room in queue to send request now
6513 m = bfa_reqq_next(dport->bfa, BFA_REQQ_DIAG);
6514 if (!m) {
6515 bfa_reqq_wait(dport->bfa, BFA_REQQ_PORT, &dport->reqq_wait);
6516 return BFA_FALSE;
6519 bfi_h2i_set(m->mh, BFI_MC_DIAG, BFI_DIAG_H2I_DPORT,
6520 bfa_fn_lpu(dport->bfa));
6521 m->req = req;
6522 if ((req == BFI_DPORT_ENABLE) || (req == BFI_DPORT_START)) {
6523 m->lpcnt = cpu_to_be32(dport->lpcnt);
6524 m->payload = cpu_to_be32(dport->payload);
6528 * queue I/O message to firmware
6530 bfa_reqq_produce(dport->bfa, BFA_REQQ_DIAG, m->mh);
6532 return BFA_TRUE;
6535 static void
6536 bfa_dport_qresume(void *cbarg)
6538 struct bfa_dport_s *dport = cbarg;
6540 bfa_sm_send_event(dport, BFA_DPORT_SM_QRESUME);
6543 static void
6544 bfa_dport_req_comp(struct bfa_dport_s *dport, struct bfi_diag_dport_rsp_s *msg)
6546 msg->status = cpu_to_be32(msg->status);
6547 dport->i2hmsg.rsp.status = msg->status;
6548 dport->rp_pwwn = msg->pwwn;
6549 dport->rp_nwwn = msg->nwwn;
6551 if ((msg->status == BFA_STATUS_OK) ||
6552 (msg->status == BFA_STATUS_DPORT_NO_SFP)) {
6553 bfa_trc(dport->bfa, msg->status);
6554 bfa_trc(dport->bfa, dport->rp_pwwn);
6555 bfa_trc(dport->bfa, dport->rp_nwwn);
6556 bfa_sm_send_event(dport, BFA_DPORT_SM_FWRSP);
6558 } else {
6559 bfa_trc(dport->bfa, msg->status);
6560 bfa_sm_send_event(dport, BFA_DPORT_SM_REQFAIL);
6562 bfa_cb_fcdiag_dport(dport, msg->status);
6565 static bfa_boolean_t
6566 bfa_dport_is_sending_req(struct bfa_dport_s *dport)
6568 if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabling) ||
6569 bfa_sm_cmp_state(dport, bfa_dport_sm_enabling_qwait) ||
6570 bfa_sm_cmp_state(dport, bfa_dport_sm_disabling) ||
6571 bfa_sm_cmp_state(dport, bfa_dport_sm_disabling_qwait) ||
6572 bfa_sm_cmp_state(dport, bfa_dport_sm_starting) ||
6573 bfa_sm_cmp_state(dport, bfa_dport_sm_starting_qwait)) {
6574 return BFA_TRUE;
6575 } else {
6576 return BFA_FALSE;
6580 static void
6581 bfa_dport_scn(struct bfa_dport_s *dport, struct bfi_diag_dport_scn_s *msg)
6583 int i;
6584 uint8_t subtesttype;
6586 bfa_trc(dport->bfa, msg->state);
6587 dport->i2hmsg.scn.state = msg->state;
6589 switch (dport->i2hmsg.scn.state) {
6590 case BFI_DPORT_SCN_TESTCOMP:
6591 dport->result.end_time = bfa_get_log_time();
6592 bfa_trc(dport->bfa, dport->result.end_time);
6594 dport->result.status = msg->info.testcomp.status;
6595 bfa_trc(dport->bfa, dport->result.status);
6597 dport->result.roundtrip_latency =
6598 cpu_to_be32(msg->info.testcomp.latency);
6599 dport->result.est_cable_distance =
6600 cpu_to_be32(msg->info.testcomp.distance);
6601 dport->result.buffer_required =
6602 be16_to_cpu(msg->info.testcomp.numbuffer);
6604 dport->result.frmsz = be16_to_cpu(msg->info.testcomp.frm_sz);
6605 dport->result.speed = msg->info.testcomp.speed;
6607 bfa_trc(dport->bfa, dport->result.roundtrip_latency);
6608 bfa_trc(dport->bfa, dport->result.est_cable_distance);
6609 bfa_trc(dport->bfa, dport->result.buffer_required);
6610 bfa_trc(dport->bfa, dport->result.frmsz);
6611 bfa_trc(dport->bfa, dport->result.speed);
6613 for (i = DPORT_TEST_ELOOP; i < DPORT_TEST_MAX; i++) {
6614 dport->result.subtest[i].status =
6615 msg->info.testcomp.subtest_status[i];
6616 bfa_trc(dport->bfa, dport->result.subtest[i].status);
6618 break;
6620 case BFI_DPORT_SCN_TESTSKIP:
6621 case BFI_DPORT_SCN_DDPORT_ENABLE:
6622 memset(&dport->result, 0,
6623 sizeof(struct bfa_diag_dport_result_s));
6624 break;
6626 case BFI_DPORT_SCN_TESTSTART:
6627 memset(&dport->result, 0,
6628 sizeof(struct bfa_diag_dport_result_s));
6629 dport->rp_pwwn = msg->info.teststart.pwwn;
6630 dport->rp_nwwn = msg->info.teststart.nwwn;
6631 dport->lpcnt = cpu_to_be32(msg->info.teststart.numfrm);
6632 bfa_dport_result_start(dport, msg->info.teststart.mode);
6633 break;
6635 case BFI_DPORT_SCN_SUBTESTSTART:
6636 subtesttype = msg->info.teststart.type;
6637 dport->result.subtest[subtesttype].start_time =
6638 bfa_get_log_time();
6639 dport->result.subtest[subtesttype].status =
6640 DPORT_TEST_ST_INPRG;
6642 bfa_trc(dport->bfa, subtesttype);
6643 bfa_trc(dport->bfa,
6644 dport->result.subtest[subtesttype].start_time);
6645 break;
6647 case BFI_DPORT_SCN_SFP_REMOVED:
6648 case BFI_DPORT_SCN_DDPORT_DISABLED:
6649 case BFI_DPORT_SCN_DDPORT_DISABLE:
6650 case BFI_DPORT_SCN_FCPORT_DISABLE:
6651 dport->result.status = DPORT_TEST_ST_IDLE;
6652 break;
6654 default:
6655 bfa_sm_fault(dport->bfa, msg->state);
6658 bfa_sm_send_event(dport, BFA_DPORT_SM_SCN);
6662 * Dport enable
6664 * @param[in] *bfa - bfa data struct
6666 bfa_status_t
6667 bfa_dport_enable(struct bfa_s *bfa, u32 lpcnt, u32 pat,
6668 bfa_cb_diag_t cbfn, void *cbarg)
6670 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6671 struct bfa_dport_s *dport = &fcdiag->dport;
6674 * Dport is not support in MEZZ card
6676 if (bfa_mfg_is_mezz(dport->bfa->ioc.attr->card_type)) {
6677 bfa_trc(dport->bfa, BFA_STATUS_PBC);
6678 return BFA_STATUS_CMD_NOTSUPP_MEZZ;
6682 * Dport is supported in CT2 or above
6684 if (!(bfa_asic_id_ct2(dport->bfa->ioc.pcidev.device_id))) {
6685 bfa_trc(dport->bfa, dport->bfa->ioc.pcidev.device_id);
6686 return BFA_STATUS_FEATURE_NOT_SUPPORTED;
6690 * Check to see if IOC is down
6692 if (!bfa_iocfc_is_operational(bfa))
6693 return BFA_STATUS_IOC_NON_OP;
6695 /* if port is PBC disabled, return error */
6696 if (bfa_fcport_is_pbcdisabled(bfa)) {
6697 bfa_trc(dport->bfa, BFA_STATUS_PBC);
6698 return BFA_STATUS_PBC;
6702 * Check if port mode is FC port
6704 if (bfa_ioc_get_type(&bfa->ioc) != BFA_IOC_TYPE_FC) {
6705 bfa_trc(dport->bfa, bfa_ioc_get_type(&bfa->ioc));
6706 return BFA_STATUS_CMD_NOTSUPP_CNA;
6710 * Check if port is in LOOP mode
6712 if ((bfa_fcport_get_cfg_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP) ||
6713 (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP)) {
6714 bfa_trc(dport->bfa, 0);
6715 return BFA_STATUS_TOPOLOGY_LOOP;
6719 * Check if port is TRUNK mode
6721 if (bfa_fcport_is_trunk_enabled(bfa)) {
6722 bfa_trc(dport->bfa, 0);
6723 return BFA_STATUS_ERROR_TRUNK_ENABLED;
6727 * Check if diag loopback is running
6729 if (bfa_fcdiag_lb_is_running(bfa)) {
6730 bfa_trc(dport->bfa, 0);
6731 return BFA_STATUS_DIAG_BUSY;
6735 * Check to see if port is disable or in dport state
6737 if ((bfa_fcport_is_disabled(bfa) == BFA_FALSE) &&
6738 (bfa_fcport_is_dport(bfa) == BFA_FALSE)) {
6739 bfa_trc(dport->bfa, 0);
6740 return BFA_STATUS_PORT_NOT_DISABLED;
6744 * Check if dport is in dynamic mode
6746 if (dport->dynamic)
6747 return BFA_STATUS_DDPORT_ERR;
6750 * Check if dport is busy
6752 if (bfa_dport_is_sending_req(dport))
6753 return BFA_STATUS_DEVBUSY;
6756 * Check if dport is already enabled
6758 if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabled)) {
6759 bfa_trc(dport->bfa, 0);
6760 return BFA_STATUS_DPORT_ENABLED;
6763 bfa_trc(dport->bfa, lpcnt);
6764 bfa_trc(dport->bfa, pat);
6765 dport->lpcnt = (lpcnt) ? lpcnt : DPORT_ENABLE_LOOPCNT_DEFAULT;
6766 dport->payload = (pat) ? pat : LB_PATTERN_DEFAULT;
6767 dport->cbfn = cbfn;
6768 dport->cbarg = cbarg;
6770 bfa_sm_send_event(dport, BFA_DPORT_SM_ENABLE);
6771 return BFA_STATUS_OK;
6775 * Dport disable
6777 * @param[in] *bfa - bfa data struct
6779 bfa_status_t
6780 bfa_dport_disable(struct bfa_s *bfa, bfa_cb_diag_t cbfn, void *cbarg)
6782 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6783 struct bfa_dport_s *dport = &fcdiag->dport;
6785 if (bfa_ioc_is_disabled(&bfa->ioc))
6786 return BFA_STATUS_IOC_DISABLED;
6788 /* if port is PBC disabled, return error */
6789 if (bfa_fcport_is_pbcdisabled(bfa)) {
6790 bfa_trc(dport->bfa, BFA_STATUS_PBC);
6791 return BFA_STATUS_PBC;
6795 * Check if dport is in dynamic mode
6797 if (dport->dynamic) {
6798 return BFA_STATUS_DDPORT_ERR;
6802 * Check to see if port is disable or in dport state
6804 if ((bfa_fcport_is_disabled(bfa) == BFA_FALSE) &&
6805 (bfa_fcport_is_dport(bfa) == BFA_FALSE)) {
6806 bfa_trc(dport->bfa, 0);
6807 return BFA_STATUS_PORT_NOT_DISABLED;
6811 * Check if dport is busy
6813 if (bfa_dport_is_sending_req(dport))
6814 return BFA_STATUS_DEVBUSY;
6817 * Check if dport is already disabled
6819 if (bfa_sm_cmp_state(dport, bfa_dport_sm_disabled)) {
6820 bfa_trc(dport->bfa, 0);
6821 return BFA_STATUS_DPORT_DISABLED;
6824 dport->cbfn = cbfn;
6825 dport->cbarg = cbarg;
6827 bfa_sm_send_event(dport, BFA_DPORT_SM_DISABLE);
6828 return BFA_STATUS_OK;
6832 * Dport start -- restart dport test
6834 * @param[in] *bfa - bfa data struct
6836 bfa_status_t
6837 bfa_dport_start(struct bfa_s *bfa, u32 lpcnt, u32 pat,
6838 bfa_cb_diag_t cbfn, void *cbarg)
6840 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6841 struct bfa_dport_s *dport = &fcdiag->dport;
6844 * Check to see if IOC is down
6846 if (!bfa_iocfc_is_operational(bfa))
6847 return BFA_STATUS_IOC_NON_OP;
6850 * Check if dport is in dynamic mode
6852 if (dport->dynamic)
6853 return BFA_STATUS_DDPORT_ERR;
6856 * Check if dport is busy
6858 if (bfa_dport_is_sending_req(dport))
6859 return BFA_STATUS_DEVBUSY;
6862 * Check if dport is in enabled state.
6863 * Test can only be restart when previous test has completed
6865 if (!bfa_sm_cmp_state(dport, bfa_dport_sm_enabled)) {
6866 bfa_trc(dport->bfa, 0);
6867 return BFA_STATUS_DPORT_DISABLED;
6869 } else {
6870 if (dport->test_state == BFA_DPORT_ST_NO_SFP)
6871 return BFA_STATUS_DPORT_INV_SFP;
6873 if (dport->test_state == BFA_DPORT_ST_INP)
6874 return BFA_STATUS_DEVBUSY;
6876 WARN_ON(dport->test_state != BFA_DPORT_ST_COMP);
6879 bfa_trc(dport->bfa, lpcnt);
6880 bfa_trc(dport->bfa, pat);
6882 dport->lpcnt = (lpcnt) ? lpcnt : DPORT_ENABLE_LOOPCNT_DEFAULT;
6883 dport->payload = (pat) ? pat : LB_PATTERN_DEFAULT;
6885 dport->cbfn = cbfn;
6886 dport->cbarg = cbarg;
6888 bfa_sm_send_event(dport, BFA_DPORT_SM_START);
6889 return BFA_STATUS_OK;
6893 * Dport show -- return dport test result
6895 * @param[in] *bfa - bfa data struct
6897 bfa_status_t
6898 bfa_dport_show(struct bfa_s *bfa, struct bfa_diag_dport_result_s *result)
6900 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6901 struct bfa_dport_s *dport = &fcdiag->dport;
6904 * Check to see if IOC is down
6906 if (!bfa_iocfc_is_operational(bfa))
6907 return BFA_STATUS_IOC_NON_OP;
6910 * Check if dport is busy
6912 if (bfa_dport_is_sending_req(dport))
6913 return BFA_STATUS_DEVBUSY;
6916 * Check if dport is in enabled state.
6918 if (!bfa_sm_cmp_state(dport, bfa_dport_sm_enabled)) {
6919 bfa_trc(dport->bfa, 0);
6920 return BFA_STATUS_DPORT_DISABLED;
6925 * Check if there is SFP
6927 if (dport->test_state == BFA_DPORT_ST_NO_SFP)
6928 return BFA_STATUS_DPORT_INV_SFP;
6930 memcpy(result, &dport->result, sizeof(struct bfa_diag_dport_result_s));
6932 return BFA_STATUS_OK;