2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
21 #include "bfa_modules.h"
23 BFA_TRC_FILE(HAL
, FCXP
);
32 * LPS related definitions
34 #define BFA_LPS_MIN_LPORTS (1)
35 #define BFA_LPS_MAX_LPORTS (256)
38 * Maximum Vports supported per physical port or vf.
40 #define BFA_LPS_MAX_VPORTS_SUPP_CB 255
41 #define BFA_LPS_MAX_VPORTS_SUPP_CT 190
45 * FC PORT related definitions
48 * The port is considered disabled if corresponding physical port or IOC are
51 #define BFA_PORT_IS_DISABLED(bfa) \
52 ((bfa_fcport_is_disabled(bfa) == BFA_TRUE) || \
53 (bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE))
56 * BFA port state machine events
58 enum bfa_fcport_sm_event
{
59 BFA_FCPORT_SM_START
= 1, /* start port state machine */
60 BFA_FCPORT_SM_STOP
= 2, /* stop port state machine */
61 BFA_FCPORT_SM_ENABLE
= 3, /* enable port */
62 BFA_FCPORT_SM_DISABLE
= 4, /* disable port state machine */
63 BFA_FCPORT_SM_FWRSP
= 5, /* firmware enable/disable rsp */
64 BFA_FCPORT_SM_LINKUP
= 6, /* firmware linkup event */
65 BFA_FCPORT_SM_LINKDOWN
= 7, /* firmware linkup down */
66 BFA_FCPORT_SM_QRESUME
= 8, /* CQ space available */
67 BFA_FCPORT_SM_HWFAIL
= 9, /* IOC h/w failure */
71 * BFA port link notification state machine events
74 enum bfa_fcport_ln_sm_event
{
75 BFA_FCPORT_LN_SM_LINKUP
= 1, /* linkup event */
76 BFA_FCPORT_LN_SM_LINKDOWN
= 2, /* linkdown event */
77 BFA_FCPORT_LN_SM_NOTIFICATION
= 3 /* done notification */
81 * RPORT related definitions
83 #define bfa_rport_offline_cb(__rp) do { \
84 if ((__rp)->bfa->fcs) \
85 bfa_cb_rport_offline((__rp)->rport_drv); \
87 bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \
88 __bfa_cb_rport_offline, (__rp)); \
92 #define bfa_rport_online_cb(__rp) do { \
93 if ((__rp)->bfa->fcs) \
94 bfa_cb_rport_online((__rp)->rport_drv); \
96 bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \
97 __bfa_cb_rport_online, (__rp)); \
102 * forward declarations FCXP related functions
104 static void __bfa_fcxp_send_cbfn(void *cbarg
, bfa_boolean_t complete
);
105 static void hal_fcxp_rx_plog(struct bfa_s
*bfa
, struct bfa_fcxp_s
*fcxp
,
106 struct bfi_fcxp_send_rsp_s
*fcxp_rsp
);
107 static void hal_fcxp_tx_plog(struct bfa_s
*bfa
, u32 reqlen
,
108 struct bfa_fcxp_s
*fcxp
, struct fchs_s
*fchs
);
109 static void bfa_fcxp_qresume(void *cbarg
);
110 static void bfa_fcxp_queue(struct bfa_fcxp_s
*fcxp
,
111 struct bfi_fcxp_send_req_s
*send_req
);
114 * forward declarations for LPS functions
116 static void bfa_lps_meminfo(struct bfa_iocfc_cfg_s
*cfg
, u32
*ndm_len
,
118 static void bfa_lps_attach(struct bfa_s
*bfa
, void *bfad
,
119 struct bfa_iocfc_cfg_s
*cfg
,
120 struct bfa_meminfo_s
*meminfo
,
121 struct bfa_pcidev_s
*pcidev
);
122 static void bfa_lps_detach(struct bfa_s
*bfa
);
123 static void bfa_lps_start(struct bfa_s
*bfa
);
124 static void bfa_lps_stop(struct bfa_s
*bfa
);
125 static void bfa_lps_iocdisable(struct bfa_s
*bfa
);
126 static void bfa_lps_login_rsp(struct bfa_s
*bfa
,
127 struct bfi_lps_login_rsp_s
*rsp
);
128 static void bfa_lps_logout_rsp(struct bfa_s
*bfa
,
129 struct bfi_lps_logout_rsp_s
*rsp
);
130 static void bfa_lps_reqq_resume(void *lps_arg
);
131 static void bfa_lps_free(struct bfa_lps_s
*lps
);
132 static void bfa_lps_send_login(struct bfa_lps_s
*lps
);
133 static void bfa_lps_send_logout(struct bfa_lps_s
*lps
);
134 static void bfa_lps_send_set_n2n_pid(struct bfa_lps_s
*lps
);
135 static void bfa_lps_login_comp(struct bfa_lps_s
*lps
);
136 static void bfa_lps_logout_comp(struct bfa_lps_s
*lps
);
137 static void bfa_lps_cvl_event(struct bfa_lps_s
*lps
);
140 * forward declaration for LPS state machine
142 static void bfa_lps_sm_init(struct bfa_lps_s
*lps
, enum bfa_lps_event event
);
143 static void bfa_lps_sm_login(struct bfa_lps_s
*lps
, enum bfa_lps_event event
);
144 static void bfa_lps_sm_loginwait(struct bfa_lps_s
*lps
, enum bfa_lps_event
146 static void bfa_lps_sm_online(struct bfa_lps_s
*lps
, enum bfa_lps_event event
);
147 static void bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s
*lps
,
148 enum bfa_lps_event event
);
149 static void bfa_lps_sm_logout(struct bfa_lps_s
*lps
, enum bfa_lps_event event
);
150 static void bfa_lps_sm_logowait(struct bfa_lps_s
*lps
, enum bfa_lps_event
154 * forward declaration for FC Port functions
156 static bfa_boolean_t
bfa_fcport_send_enable(struct bfa_fcport_s
*fcport
);
157 static bfa_boolean_t
bfa_fcport_send_disable(struct bfa_fcport_s
*fcport
);
158 static void bfa_fcport_update_linkinfo(struct bfa_fcport_s
*fcport
);
159 static void bfa_fcport_reset_linkinfo(struct bfa_fcport_s
*fcport
);
160 static void bfa_fcport_set_wwns(struct bfa_fcport_s
*fcport
);
161 static void __bfa_cb_fcport_event(void *cbarg
, bfa_boolean_t complete
);
162 static void bfa_fcport_scn(struct bfa_fcport_s
*fcport
,
163 enum bfa_port_linkstate event
, bfa_boolean_t trunk
);
164 static void bfa_fcport_queue_cb(struct bfa_fcport_ln_s
*ln
,
165 enum bfa_port_linkstate event
);
166 static void __bfa_cb_fcport_stats_clr(void *cbarg
, bfa_boolean_t complete
);
167 static void bfa_fcport_stats_get_timeout(void *cbarg
);
168 static void bfa_fcport_stats_clr_timeout(void *cbarg
);
169 static void bfa_trunk_iocdisable(struct bfa_s
*bfa
);
172 * forward declaration for FC PORT state machine
174 static void bfa_fcport_sm_uninit(struct bfa_fcport_s
*fcport
,
175 enum bfa_fcport_sm_event event
);
176 static void bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s
*fcport
,
177 enum bfa_fcport_sm_event event
);
178 static void bfa_fcport_sm_enabling(struct bfa_fcport_s
*fcport
,
179 enum bfa_fcport_sm_event event
);
180 static void bfa_fcport_sm_linkdown(struct bfa_fcport_s
*fcport
,
181 enum bfa_fcport_sm_event event
);
182 static void bfa_fcport_sm_linkup(struct bfa_fcport_s
*fcport
,
183 enum bfa_fcport_sm_event event
);
184 static void bfa_fcport_sm_disabling(struct bfa_fcport_s
*fcport
,
185 enum bfa_fcport_sm_event event
);
186 static void bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s
*fcport
,
187 enum bfa_fcport_sm_event event
);
188 static void bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s
*fcport
,
189 enum bfa_fcport_sm_event event
);
190 static void bfa_fcport_sm_disabled(struct bfa_fcport_s
*fcport
,
191 enum bfa_fcport_sm_event event
);
192 static void bfa_fcport_sm_stopped(struct bfa_fcport_s
*fcport
,
193 enum bfa_fcport_sm_event event
);
194 static void bfa_fcport_sm_iocdown(struct bfa_fcport_s
*fcport
,
195 enum bfa_fcport_sm_event event
);
196 static void bfa_fcport_sm_iocfail(struct bfa_fcport_s
*fcport
,
197 enum bfa_fcport_sm_event event
);
199 static void bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s
*ln
,
200 enum bfa_fcport_ln_sm_event event
);
201 static void bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s
*ln
,
202 enum bfa_fcport_ln_sm_event event
);
203 static void bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s
*ln
,
204 enum bfa_fcport_ln_sm_event event
);
205 static void bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s
*ln
,
206 enum bfa_fcport_ln_sm_event event
);
207 static void bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s
*ln
,
208 enum bfa_fcport_ln_sm_event event
);
209 static void bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s
*ln
,
210 enum bfa_fcport_ln_sm_event event
);
211 static void bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s
*ln
,
212 enum bfa_fcport_ln_sm_event event
);
214 static struct bfa_sm_table_s hal_port_sm_table
[] = {
215 {BFA_SM(bfa_fcport_sm_uninit
), BFA_PORT_ST_UNINIT
},
216 {BFA_SM(bfa_fcport_sm_enabling_qwait
), BFA_PORT_ST_ENABLING_QWAIT
},
217 {BFA_SM(bfa_fcport_sm_enabling
), BFA_PORT_ST_ENABLING
},
218 {BFA_SM(bfa_fcport_sm_linkdown
), BFA_PORT_ST_LINKDOWN
},
219 {BFA_SM(bfa_fcport_sm_linkup
), BFA_PORT_ST_LINKUP
},
220 {BFA_SM(bfa_fcport_sm_disabling_qwait
), BFA_PORT_ST_DISABLING_QWAIT
},
221 {BFA_SM(bfa_fcport_sm_toggling_qwait
), BFA_PORT_ST_TOGGLING_QWAIT
},
222 {BFA_SM(bfa_fcport_sm_disabling
), BFA_PORT_ST_DISABLING
},
223 {BFA_SM(bfa_fcport_sm_disabled
), BFA_PORT_ST_DISABLED
},
224 {BFA_SM(bfa_fcport_sm_stopped
), BFA_PORT_ST_STOPPED
},
225 {BFA_SM(bfa_fcport_sm_iocdown
), BFA_PORT_ST_IOCDOWN
},
226 {BFA_SM(bfa_fcport_sm_iocfail
), BFA_PORT_ST_IOCDOWN
},
231 * forward declaration for RPORT related functions
233 static struct bfa_rport_s
*bfa_rport_alloc(struct bfa_rport_mod_s
*rp_mod
);
234 static void bfa_rport_free(struct bfa_rport_s
*rport
);
235 static bfa_boolean_t
bfa_rport_send_fwcreate(struct bfa_rport_s
*rp
);
236 static bfa_boolean_t
bfa_rport_send_fwdelete(struct bfa_rport_s
*rp
);
237 static bfa_boolean_t
bfa_rport_send_fwspeed(struct bfa_rport_s
*rp
);
238 static void __bfa_cb_rport_online(void *cbarg
,
239 bfa_boolean_t complete
);
240 static void __bfa_cb_rport_offline(void *cbarg
,
241 bfa_boolean_t complete
);
244 * forward declaration for RPORT state machine
246 static void bfa_rport_sm_uninit(struct bfa_rport_s
*rp
,
247 enum bfa_rport_event event
);
248 static void bfa_rport_sm_created(struct bfa_rport_s
*rp
,
249 enum bfa_rport_event event
);
250 static void bfa_rport_sm_fwcreate(struct bfa_rport_s
*rp
,
251 enum bfa_rport_event event
);
252 static void bfa_rport_sm_online(struct bfa_rport_s
*rp
,
253 enum bfa_rport_event event
);
254 static void bfa_rport_sm_fwdelete(struct bfa_rport_s
*rp
,
255 enum bfa_rport_event event
);
256 static void bfa_rport_sm_offline(struct bfa_rport_s
*rp
,
257 enum bfa_rport_event event
);
258 static void bfa_rport_sm_deleting(struct bfa_rport_s
*rp
,
259 enum bfa_rport_event event
);
260 static void bfa_rport_sm_offline_pending(struct bfa_rport_s
*rp
,
261 enum bfa_rport_event event
);
262 static void bfa_rport_sm_delete_pending(struct bfa_rport_s
*rp
,
263 enum bfa_rport_event event
);
264 static void bfa_rport_sm_iocdisable(struct bfa_rport_s
*rp
,
265 enum bfa_rport_event event
);
266 static void bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s
*rp
,
267 enum bfa_rport_event event
);
268 static void bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s
*rp
,
269 enum bfa_rport_event event
);
270 static void bfa_rport_sm_deleting_qfull(struct bfa_rport_s
*rp
,
271 enum bfa_rport_event event
);
274 * PLOG related definitions
277 plkd_validate_logrec(struct bfa_plog_rec_s
*pl_rec
)
279 if ((pl_rec
->log_type
!= BFA_PL_LOG_TYPE_INT
) &&
280 (pl_rec
->log_type
!= BFA_PL_LOG_TYPE_STRING
))
283 if ((pl_rec
->log_type
!= BFA_PL_LOG_TYPE_INT
) &&
284 (pl_rec
->log_num_ints
> BFA_PL_INT_LOG_SZ
))
291 bfa_get_log_time(void)
295 do_gettimeofday(&tv
);
297 /* We are interested in seconds only. */
298 system_time
= tv
.tv_sec
;
303 bfa_plog_add(struct bfa_plog_s
*plog
, struct bfa_plog_rec_s
*pl_rec
)
306 struct bfa_plog_rec_s
*pl_recp
;
308 if (plog
->plog_enabled
== 0)
311 if (plkd_validate_logrec(pl_rec
)) {
318 pl_recp
= &(plog
->plog_recs
[tail
]);
320 memcpy(pl_recp
, pl_rec
, sizeof(struct bfa_plog_rec_s
));
322 pl_recp
->tv
= bfa_get_log_time();
323 BFA_PL_LOG_REC_INCR(plog
->tail
);
325 if (plog
->head
== plog
->tail
)
326 BFA_PL_LOG_REC_INCR(plog
->head
);
330 bfa_plog_init(struct bfa_plog_s
*plog
)
332 memset((char *)plog
, 0, sizeof(struct bfa_plog_s
));
334 memcpy(plog
->plog_sig
, BFA_PL_SIG_STR
, BFA_PL_SIG_LEN
);
335 plog
->head
= plog
->tail
= 0;
336 plog
->plog_enabled
= 1;
340 bfa_plog_str(struct bfa_plog_s
*plog
, enum bfa_plog_mid mid
,
341 enum bfa_plog_eid event
,
342 u16 misc
, char *log_str
)
344 struct bfa_plog_rec_s lp
;
346 if (plog
->plog_enabled
) {
347 memset(&lp
, 0, sizeof(struct bfa_plog_rec_s
));
350 lp
.log_type
= BFA_PL_LOG_TYPE_STRING
;
352 strncpy(lp
.log_entry
.string_log
, log_str
,
353 BFA_PL_STRING_LOG_SZ
- 1);
354 lp
.log_entry
.string_log
[BFA_PL_STRING_LOG_SZ
- 1] = '\0';
355 bfa_plog_add(plog
, &lp
);
360 bfa_plog_intarr(struct bfa_plog_s
*plog
, enum bfa_plog_mid mid
,
361 enum bfa_plog_eid event
,
362 u16 misc
, u32
*intarr
, u32 num_ints
)
364 struct bfa_plog_rec_s lp
;
367 if (num_ints
> BFA_PL_INT_LOG_SZ
)
368 num_ints
= BFA_PL_INT_LOG_SZ
;
370 if (plog
->plog_enabled
) {
371 memset(&lp
, 0, sizeof(struct bfa_plog_rec_s
));
374 lp
.log_type
= BFA_PL_LOG_TYPE_INT
;
377 for (i
= 0; i
< num_ints
; i
++)
378 lp
.log_entry
.int_log
[i
] = intarr
[i
];
380 lp
.log_num_ints
= (u8
) num_ints
;
382 bfa_plog_add(plog
, &lp
);
387 bfa_plog_fchdr(struct bfa_plog_s
*plog
, enum bfa_plog_mid mid
,
388 enum bfa_plog_eid event
,
389 u16 misc
, struct fchs_s
*fchdr
)
391 struct bfa_plog_rec_s lp
;
392 u32
*tmp_int
= (u32
*) fchdr
;
393 u32 ints
[BFA_PL_INT_LOG_SZ
];
395 if (plog
->plog_enabled
) {
396 memset(&lp
, 0, sizeof(struct bfa_plog_rec_s
));
398 ints
[0] = tmp_int
[0];
399 ints
[1] = tmp_int
[1];
400 ints
[2] = tmp_int
[4];
402 bfa_plog_intarr(plog
, mid
, event
, misc
, ints
, 3);
407 bfa_plog_fchdr_and_pl(struct bfa_plog_s
*plog
, enum bfa_plog_mid mid
,
408 enum bfa_plog_eid event
, u16 misc
, struct fchs_s
*fchdr
,
411 struct bfa_plog_rec_s lp
;
412 u32
*tmp_int
= (u32
*) fchdr
;
413 u32 ints
[BFA_PL_INT_LOG_SZ
];
415 if (plog
->plog_enabled
) {
416 memset(&lp
, 0, sizeof(struct bfa_plog_rec_s
));
418 ints
[0] = tmp_int
[0];
419 ints
[1] = tmp_int
[1];
420 ints
[2] = tmp_int
[4];
423 bfa_plog_intarr(plog
, mid
, event
, misc
, ints
, 4);
429 * fcxp_pvt BFA FCXP private functions
433 claim_fcxp_req_rsp_mem(struct bfa_fcxp_mod_s
*mod
, struct bfa_meminfo_s
*mi
)
439 dm_kva
= bfa_meminfo_dma_virt(mi
);
440 dm_pa
= bfa_meminfo_dma_phys(mi
);
442 buf_pool_sz
= mod
->req_pld_sz
* mod
->num_fcxps
;
445 * Initialize the fcxp req payload list
447 mod
->req_pld_list_kva
= dm_kva
;
448 mod
->req_pld_list_pa
= dm_pa
;
449 dm_kva
+= buf_pool_sz
;
450 dm_pa
+= buf_pool_sz
;
451 memset(mod
->req_pld_list_kva
, 0, buf_pool_sz
);
454 * Initialize the fcxp rsp payload list
456 buf_pool_sz
= mod
->rsp_pld_sz
* mod
->num_fcxps
;
457 mod
->rsp_pld_list_kva
= dm_kva
;
458 mod
->rsp_pld_list_pa
= dm_pa
;
459 dm_kva
+= buf_pool_sz
;
460 dm_pa
+= buf_pool_sz
;
461 memset(mod
->rsp_pld_list_kva
, 0, buf_pool_sz
);
463 bfa_meminfo_dma_virt(mi
) = dm_kva
;
464 bfa_meminfo_dma_phys(mi
) = dm_pa
;
468 claim_fcxps_mem(struct bfa_fcxp_mod_s
*mod
, struct bfa_meminfo_s
*mi
)
471 struct bfa_fcxp_s
*fcxp
;
473 fcxp
= (struct bfa_fcxp_s
*) bfa_meminfo_kva(mi
);
474 memset(fcxp
, 0, sizeof(struct bfa_fcxp_s
) * mod
->num_fcxps
);
476 INIT_LIST_HEAD(&mod
->fcxp_free_q
);
477 INIT_LIST_HEAD(&mod
->fcxp_active_q
);
479 mod
->fcxp_list
= fcxp
;
481 for (i
= 0; i
< mod
->num_fcxps
; i
++) {
482 fcxp
->fcxp_mod
= mod
;
485 list_add_tail(&fcxp
->qe
, &mod
->fcxp_free_q
);
486 bfa_reqq_winit(&fcxp
->reqq_wqe
, bfa_fcxp_qresume
, fcxp
);
487 fcxp
->reqq_waiting
= BFA_FALSE
;
492 bfa_meminfo_kva(mi
) = (void *)fcxp
;
496 bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s
*cfg
, u32
*ndm_len
,
499 u16 num_fcxp_reqs
= cfg
->fwcfg
.num_fcxp_reqs
;
501 if (num_fcxp_reqs
== 0)
505 * Account for req/rsp payload
507 *dm_len
+= BFA_FCXP_MAX_IBUF_SZ
* num_fcxp_reqs
;
508 if (cfg
->drvcfg
.min_cfg
)
509 *dm_len
+= BFA_FCXP_MAX_IBUF_SZ
* num_fcxp_reqs
;
511 *dm_len
+= BFA_FCXP_MAX_LBUF_SZ
* num_fcxp_reqs
;
514 * Account for fcxp structs
516 *ndm_len
+= sizeof(struct bfa_fcxp_s
) * num_fcxp_reqs
;
520 bfa_fcxp_attach(struct bfa_s
*bfa
, void *bfad
, struct bfa_iocfc_cfg_s
*cfg
,
521 struct bfa_meminfo_s
*meminfo
, struct bfa_pcidev_s
*pcidev
)
523 struct bfa_fcxp_mod_s
*mod
= BFA_FCXP_MOD(bfa
);
525 memset(mod
, 0, sizeof(struct bfa_fcxp_mod_s
));
527 mod
->num_fcxps
= cfg
->fwcfg
.num_fcxp_reqs
;
530 * Initialize FCXP request and response payload sizes.
532 mod
->req_pld_sz
= mod
->rsp_pld_sz
= BFA_FCXP_MAX_IBUF_SZ
;
533 if (!cfg
->drvcfg
.min_cfg
)
534 mod
->rsp_pld_sz
= BFA_FCXP_MAX_LBUF_SZ
;
536 INIT_LIST_HEAD(&mod
->wait_q
);
538 claim_fcxp_req_rsp_mem(mod
, meminfo
);
539 claim_fcxps_mem(mod
, meminfo
);
543 bfa_fcxp_detach(struct bfa_s
*bfa
)
548 bfa_fcxp_start(struct bfa_s
*bfa
)
553 bfa_fcxp_stop(struct bfa_s
*bfa
)
558 bfa_fcxp_iocdisable(struct bfa_s
*bfa
)
560 struct bfa_fcxp_mod_s
*mod
= BFA_FCXP_MOD(bfa
);
561 struct bfa_fcxp_s
*fcxp
;
562 struct list_head
*qe
, *qen
;
564 list_for_each_safe(qe
, qen
, &mod
->fcxp_active_q
) {
565 fcxp
= (struct bfa_fcxp_s
*) qe
;
566 if (fcxp
->caller
== NULL
) {
567 fcxp
->send_cbfn(fcxp
->caller
, fcxp
, fcxp
->send_cbarg
,
568 BFA_STATUS_IOC_FAILURE
, 0, 0, NULL
);
571 fcxp
->rsp_status
= BFA_STATUS_IOC_FAILURE
;
572 bfa_cb_queue(bfa
, &fcxp
->hcb_qe
,
573 __bfa_fcxp_send_cbfn
, fcxp
);
578 static struct bfa_fcxp_s
*
579 bfa_fcxp_get(struct bfa_fcxp_mod_s
*fm
)
581 struct bfa_fcxp_s
*fcxp
;
583 bfa_q_deq(&fm
->fcxp_free_q
, &fcxp
);
586 list_add_tail(&fcxp
->qe
, &fm
->fcxp_active_q
);
592 bfa_fcxp_init_reqrsp(struct bfa_fcxp_s
*fcxp
,
596 bfa_fcxp_get_sgaddr_t
*r_sga_cbfn
,
597 bfa_fcxp_get_sglen_t
*r_sglen_cbfn
,
598 struct list_head
*r_sgpg_q
,
600 bfa_fcxp_get_sgaddr_t sga_cbfn
,
601 bfa_fcxp_get_sglen_t sglen_cbfn
)
604 WARN_ON(bfa
== NULL
);
606 bfa_trc(bfa
, fcxp
->fcxp_tag
);
611 WARN_ON(*sga_cbfn
== NULL
);
612 WARN_ON(*sglen_cbfn
== NULL
);
615 *r_sga_cbfn
= sga_cbfn
;
616 *r_sglen_cbfn
= sglen_cbfn
;
621 * alloc required sgpgs
623 if (n_sgles
> BFI_SGE_INLINE
)
630 bfa_fcxp_init(struct bfa_fcxp_s
*fcxp
,
631 void *caller
, struct bfa_s
*bfa
, int nreq_sgles
,
632 int nrsp_sgles
, bfa_fcxp_get_sgaddr_t req_sga_cbfn
,
633 bfa_fcxp_get_sglen_t req_sglen_cbfn
,
634 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn
,
635 bfa_fcxp_get_sglen_t rsp_sglen_cbfn
)
638 WARN_ON(bfa
== NULL
);
640 bfa_trc(bfa
, fcxp
->fcxp_tag
);
642 fcxp
->caller
= caller
;
644 bfa_fcxp_init_reqrsp(fcxp
, bfa
,
645 &fcxp
->use_ireqbuf
, &fcxp
->nreq_sgles
, &fcxp
->req_sga_cbfn
,
646 &fcxp
->req_sglen_cbfn
, &fcxp
->req_sgpg_q
,
647 nreq_sgles
, req_sga_cbfn
, req_sglen_cbfn
);
649 bfa_fcxp_init_reqrsp(fcxp
, bfa
,
650 &fcxp
->use_irspbuf
, &fcxp
->nrsp_sgles
, &fcxp
->rsp_sga_cbfn
,
651 &fcxp
->rsp_sglen_cbfn
, &fcxp
->rsp_sgpg_q
,
652 nrsp_sgles
, rsp_sga_cbfn
, rsp_sglen_cbfn
);
657 bfa_fcxp_put(struct bfa_fcxp_s
*fcxp
)
659 struct bfa_fcxp_mod_s
*mod
= fcxp
->fcxp_mod
;
660 struct bfa_fcxp_wqe_s
*wqe
;
662 bfa_q_deq(&mod
->wait_q
, &wqe
);
664 bfa_trc(mod
->bfa
, fcxp
->fcxp_tag
);
666 bfa_fcxp_init(fcxp
, wqe
->caller
, wqe
->bfa
, wqe
->nreq_sgles
,
667 wqe
->nrsp_sgles
, wqe
->req_sga_cbfn
,
668 wqe
->req_sglen_cbfn
, wqe
->rsp_sga_cbfn
,
669 wqe
->rsp_sglen_cbfn
);
671 wqe
->alloc_cbfn(wqe
->alloc_cbarg
, fcxp
);
675 WARN_ON(!bfa_q_is_on_q(&mod
->fcxp_active_q
, fcxp
));
677 list_add_tail(&fcxp
->qe
, &mod
->fcxp_free_q
);
681 bfa_fcxp_null_comp(void *bfad_fcxp
, struct bfa_fcxp_s
*fcxp
, void *cbarg
,
682 bfa_status_t req_status
, u32 rsp_len
,
683 u32 resid_len
, struct fchs_s
*rsp_fchs
)
685 /* discarded fcxp completion */
689 __bfa_fcxp_send_cbfn(void *cbarg
, bfa_boolean_t complete
)
691 struct bfa_fcxp_s
*fcxp
= cbarg
;
694 fcxp
->send_cbfn(fcxp
->caller
, fcxp
, fcxp
->send_cbarg
,
695 fcxp
->rsp_status
, fcxp
->rsp_len
,
696 fcxp
->residue_len
, &fcxp
->rsp_fchs
);
703 hal_fcxp_send_comp(struct bfa_s
*bfa
, struct bfi_fcxp_send_rsp_s
*fcxp_rsp
)
705 struct bfa_fcxp_mod_s
*mod
= BFA_FCXP_MOD(bfa
);
706 struct bfa_fcxp_s
*fcxp
;
707 u16 fcxp_tag
= be16_to_cpu(fcxp_rsp
->fcxp_tag
);
709 bfa_trc(bfa
, fcxp_tag
);
711 fcxp_rsp
->rsp_len
= be32_to_cpu(fcxp_rsp
->rsp_len
);
714 * @todo f/w should not set residue to non-0 when everything
717 if (fcxp_rsp
->req_status
== BFA_STATUS_OK
)
718 fcxp_rsp
->residue_len
= 0;
720 fcxp_rsp
->residue_len
= be32_to_cpu(fcxp_rsp
->residue_len
);
722 fcxp
= BFA_FCXP_FROM_TAG(mod
, fcxp_tag
);
724 WARN_ON(fcxp
->send_cbfn
== NULL
);
726 hal_fcxp_rx_plog(mod
->bfa
, fcxp
, fcxp_rsp
);
728 if (fcxp
->send_cbfn
!= NULL
) {
729 bfa_trc(mod
->bfa
, (NULL
== fcxp
->caller
));
730 if (fcxp
->caller
== NULL
) {
731 fcxp
->send_cbfn(fcxp
->caller
, fcxp
, fcxp
->send_cbarg
,
732 fcxp_rsp
->req_status
, fcxp_rsp
->rsp_len
,
733 fcxp_rsp
->residue_len
, &fcxp_rsp
->fchs
);
735 * fcxp automatically freed on return from the callback
739 fcxp
->rsp_status
= fcxp_rsp
->req_status
;
740 fcxp
->rsp_len
= fcxp_rsp
->rsp_len
;
741 fcxp
->residue_len
= fcxp_rsp
->residue_len
;
742 fcxp
->rsp_fchs
= fcxp_rsp
->fchs
;
744 bfa_cb_queue(bfa
, &fcxp
->hcb_qe
,
745 __bfa_fcxp_send_cbfn
, fcxp
);
748 bfa_trc(bfa
, (NULL
== fcxp
->send_cbfn
));
753 hal_fcxp_set_local_sges(struct bfi_sge_s
*sge
, u32 reqlen
, u64 req_pa
)
755 union bfi_addr_u sga_zero
= { {0} };
757 sge
->sg_len
= reqlen
;
758 sge
->flags
= BFI_SGE_DATA_LAST
;
759 bfa_dma_addr_set(sge
[0].sga
, req_pa
);
764 sge
->sg_len
= reqlen
;
765 sge
->flags
= BFI_SGE_PGDLEN
;
770 hal_fcxp_tx_plog(struct bfa_s
*bfa
, u32 reqlen
, struct bfa_fcxp_s
*fcxp
,
777 if (fcxp
->use_ireqbuf
) {
779 *((u32
*) BFA_FCXP_REQ_PLD(fcxp
));
781 bfa_plog_fchdr_and_pl(bfa
->plog
, BFA_PL_MID_HAL_FCXP
,
783 reqlen
+ sizeof(struct fchs_s
), fchs
,
786 bfa_plog_fchdr(bfa
->plog
, BFA_PL_MID_HAL_FCXP
,
788 reqlen
+ sizeof(struct fchs_s
),
792 bfa_plog_fchdr(bfa
->plog
, BFA_PL_MID_HAL_FCXP
, BFA_PL_EID_TX
,
793 reqlen
+ sizeof(struct fchs_s
), fchs
);
798 hal_fcxp_rx_plog(struct bfa_s
*bfa
, struct bfa_fcxp_s
*fcxp
,
799 struct bfi_fcxp_send_rsp_s
*fcxp_rsp
)
801 if (fcxp_rsp
->rsp_len
> 0) {
802 if (fcxp
->use_irspbuf
) {
804 *((u32
*) BFA_FCXP_RSP_PLD(fcxp
));
806 bfa_plog_fchdr_and_pl(bfa
->plog
, BFA_PL_MID_HAL_FCXP
,
808 (u16
) fcxp_rsp
->rsp_len
,
809 &fcxp_rsp
->fchs
, pld_w0
);
811 bfa_plog_fchdr(bfa
->plog
, BFA_PL_MID_HAL_FCXP
,
813 (u16
) fcxp_rsp
->rsp_len
,
817 bfa_plog_fchdr(bfa
->plog
, BFA_PL_MID_HAL_FCXP
, BFA_PL_EID_RX
,
818 (u16
) fcxp_rsp
->rsp_len
, &fcxp_rsp
->fchs
);
823 * Handler to resume sending fcxp when space in available in cpe queue.
826 bfa_fcxp_qresume(void *cbarg
)
828 struct bfa_fcxp_s
*fcxp
= cbarg
;
829 struct bfa_s
*bfa
= fcxp
->fcxp_mod
->bfa
;
830 struct bfi_fcxp_send_req_s
*send_req
;
832 fcxp
->reqq_waiting
= BFA_FALSE
;
833 send_req
= bfa_reqq_next(bfa
, BFA_REQQ_FCXP
);
834 bfa_fcxp_queue(fcxp
, send_req
);
838 * Queue fcxp send request to foimrware.
841 bfa_fcxp_queue(struct bfa_fcxp_s
*fcxp
, struct bfi_fcxp_send_req_s
*send_req
)
843 struct bfa_s
*bfa
= fcxp
->fcxp_mod
->bfa
;
844 struct bfa_fcxp_req_info_s
*reqi
= &fcxp
->req_info
;
845 struct bfa_fcxp_rsp_info_s
*rspi
= &fcxp
->rsp_info
;
846 struct bfa_rport_s
*rport
= reqi
->bfa_rport
;
848 bfi_h2i_set(send_req
->mh
, BFI_MC_FCXP
, BFI_FCXP_H2I_SEND_REQ
,
851 send_req
->fcxp_tag
= cpu_to_be16(fcxp
->fcxp_tag
);
853 send_req
->rport_fw_hndl
= rport
->fw_handle
;
854 send_req
->max_frmsz
= cpu_to_be16(rport
->rport_info
.max_frmsz
);
855 if (send_req
->max_frmsz
== 0)
856 send_req
->max_frmsz
= cpu_to_be16(FC_MAX_PDUSZ
);
858 send_req
->rport_fw_hndl
= 0;
859 send_req
->max_frmsz
= cpu_to_be16(FC_MAX_PDUSZ
);
862 send_req
->vf_id
= cpu_to_be16(reqi
->vf_id
);
863 send_req
->lp_tag
= reqi
->lp_tag
;
864 send_req
->class = reqi
->class;
865 send_req
->rsp_timeout
= rspi
->rsp_timeout
;
866 send_req
->cts
= reqi
->cts
;
867 send_req
->fchs
= reqi
->fchs
;
869 send_req
->req_len
= cpu_to_be32(reqi
->req_tot_len
);
870 send_req
->rsp_maxlen
= cpu_to_be32(rspi
->rsp_maxlen
);
875 if (fcxp
->use_ireqbuf
== 1) {
876 hal_fcxp_set_local_sges(send_req
->req_sge
, reqi
->req_tot_len
,
877 BFA_FCXP_REQ_PLD_PA(fcxp
));
879 if (fcxp
->nreq_sgles
> 0) {
880 WARN_ON(fcxp
->nreq_sgles
!= 1);
881 hal_fcxp_set_local_sges(send_req
->req_sge
,
883 fcxp
->req_sga_cbfn(fcxp
->caller
,
886 WARN_ON(reqi
->req_tot_len
!= 0);
887 hal_fcxp_set_local_sges(send_req
->rsp_sge
, 0, 0);
894 if (fcxp
->use_irspbuf
== 1) {
895 WARN_ON(rspi
->rsp_maxlen
> BFA_FCXP_MAX_LBUF_SZ
);
897 hal_fcxp_set_local_sges(send_req
->rsp_sge
, rspi
->rsp_maxlen
,
898 BFA_FCXP_RSP_PLD_PA(fcxp
));
901 if (fcxp
->nrsp_sgles
> 0) {
902 WARN_ON(fcxp
->nrsp_sgles
!= 1);
903 hal_fcxp_set_local_sges(send_req
->rsp_sge
,
905 fcxp
->rsp_sga_cbfn(fcxp
->caller
,
908 WARN_ON(rspi
->rsp_maxlen
!= 0);
909 hal_fcxp_set_local_sges(send_req
->rsp_sge
, 0, 0);
913 hal_fcxp_tx_plog(bfa
, reqi
->req_tot_len
, fcxp
, &reqi
->fchs
);
915 bfa_reqq_produce(bfa
, BFA_REQQ_FCXP
);
917 bfa_trc(bfa
, bfa_reqq_pi(bfa
, BFA_REQQ_FCXP
));
918 bfa_trc(bfa
, bfa_reqq_ci(bfa
, BFA_REQQ_FCXP
));
922 * Allocate an FCXP instance to send a response or to send a request
923 * that has a response. Request/response buffers are allocated by caller.
925 * @param[in] bfa BFA bfa instance
926 * @param[in] nreq_sgles Number of SG elements required for request
927 * buffer. 0, if fcxp internal buffers are used.
928 * Use bfa_fcxp_get_reqbuf() to get the
929 * internal req buffer.
930 * @param[in] req_sgles SG elements describing request buffer. Will be
931 * copied in by BFA and hence can be freed on
932 * return from this function.
933 * @param[in] get_req_sga function ptr to be called to get a request SG
934 * Address (given the sge index).
935 * @param[in] get_req_sglen function ptr to be called to get a request SG
936 * len (given the sge index).
937 * @param[in] get_rsp_sga function ptr to be called to get a response SG
938 * Address (given the sge index).
939 * @param[in] get_rsp_sglen function ptr to be called to get a response SG
940 * len (given the sge index).
942 * @return FCXP instance. NULL on failure.
945 bfa_fcxp_alloc(void *caller
, struct bfa_s
*bfa
, int nreq_sgles
,
946 int nrsp_sgles
, bfa_fcxp_get_sgaddr_t req_sga_cbfn
,
947 bfa_fcxp_get_sglen_t req_sglen_cbfn
,
948 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn
,
949 bfa_fcxp_get_sglen_t rsp_sglen_cbfn
)
951 struct bfa_fcxp_s
*fcxp
= NULL
;
953 WARN_ON(bfa
== NULL
);
955 fcxp
= bfa_fcxp_get(BFA_FCXP_MOD(bfa
));
959 bfa_trc(bfa
, fcxp
->fcxp_tag
);
961 bfa_fcxp_init(fcxp
, caller
, bfa
, nreq_sgles
, nrsp_sgles
, req_sga_cbfn
,
962 req_sglen_cbfn
, rsp_sga_cbfn
, rsp_sglen_cbfn
);
968 * Get the internal request buffer pointer
970 * @param[in] fcxp BFA fcxp pointer
972 * @return pointer to the internal request buffer
975 bfa_fcxp_get_reqbuf(struct bfa_fcxp_s
*fcxp
)
977 struct bfa_fcxp_mod_s
*mod
= fcxp
->fcxp_mod
;
980 WARN_ON(fcxp
->use_ireqbuf
!= 1);
981 reqbuf
= ((u8
*)mod
->req_pld_list_kva
) +
982 fcxp
->fcxp_tag
* mod
->req_pld_sz
;
987 bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s
*fcxp
)
989 struct bfa_fcxp_mod_s
*mod
= fcxp
->fcxp_mod
;
991 return mod
->req_pld_sz
;
995 * Get the internal response buffer pointer
997 * @param[in] fcxp BFA fcxp pointer
999 * @return pointer to the internal request buffer
1002 bfa_fcxp_get_rspbuf(struct bfa_fcxp_s
*fcxp
)
1004 struct bfa_fcxp_mod_s
*mod
= fcxp
->fcxp_mod
;
1007 WARN_ON(fcxp
->use_irspbuf
!= 1);
1009 rspbuf
= ((u8
*)mod
->rsp_pld_list_kva
) +
1010 fcxp
->fcxp_tag
* mod
->rsp_pld_sz
;
1017 * @param[in] fcxp BFA fcxp pointer
1022 bfa_fcxp_free(struct bfa_fcxp_s
*fcxp
)
1024 struct bfa_fcxp_mod_s
*mod
= fcxp
->fcxp_mod
;
1026 WARN_ON(fcxp
== NULL
);
1027 bfa_trc(mod
->bfa
, fcxp
->fcxp_tag
);
1032 * Send a FCXP request
1034 * @param[in] fcxp BFA fcxp pointer
1035 * @param[in] rport BFA rport pointer. Could be left NULL for WKA rports
1036 * @param[in] vf_id virtual Fabric ID
1037 * @param[in] lp_tag lport tag
1038 * @param[in] cts use Continuous sequence
1039 * @param[in] cos fc Class of Service
1040 * @param[in] reqlen request length, does not include FCHS length
1041 * @param[in] fchs fc Header Pointer. The header content will be copied
1044 * @param[in] cbfn call back function to be called on receiving
1046 * @param[in] cbarg arg for cbfn
1047 * @param[in] rsp_timeout
1050 * @return bfa_status_t
1053 bfa_fcxp_send(struct bfa_fcxp_s
*fcxp
, struct bfa_rport_s
*rport
,
1054 u16 vf_id
, u8 lp_tag
, bfa_boolean_t cts
, enum fc_cos cos
,
1055 u32 reqlen
, struct fchs_s
*fchs
, bfa_cb_fcxp_send_t cbfn
,
1056 void *cbarg
, u32 rsp_maxlen
, u8 rsp_timeout
)
1058 struct bfa_s
*bfa
= fcxp
->fcxp_mod
->bfa
;
1059 struct bfa_fcxp_req_info_s
*reqi
= &fcxp
->req_info
;
1060 struct bfa_fcxp_rsp_info_s
*rspi
= &fcxp
->rsp_info
;
1061 struct bfi_fcxp_send_req_s
*send_req
;
1063 bfa_trc(bfa
, fcxp
->fcxp_tag
);
1066 * setup request/response info
1068 reqi
->bfa_rport
= rport
;
1069 reqi
->vf_id
= vf_id
;
1070 reqi
->lp_tag
= lp_tag
;
1072 rspi
->rsp_timeout
= rsp_timeout
;
1075 reqi
->req_tot_len
= reqlen
;
1076 rspi
->rsp_maxlen
= rsp_maxlen
;
1077 fcxp
->send_cbfn
= cbfn
? cbfn
: bfa_fcxp_null_comp
;
1078 fcxp
->send_cbarg
= cbarg
;
1081 * If no room in CPE queue, wait for space in request queue
1083 send_req
= bfa_reqq_next(bfa
, BFA_REQQ_FCXP
);
1085 bfa_trc(bfa
, fcxp
->fcxp_tag
);
1086 fcxp
->reqq_waiting
= BFA_TRUE
;
1087 bfa_reqq_wait(bfa
, BFA_REQQ_FCXP
, &fcxp
->reqq_wqe
);
1091 bfa_fcxp_queue(fcxp
, send_req
);
1097 * @param[in] fcxp BFA fcxp pointer
1102 bfa_fcxp_abort(struct bfa_fcxp_s
*fcxp
)
1104 bfa_trc(fcxp
->fcxp_mod
->bfa
, fcxp
->fcxp_tag
);
1106 return BFA_STATUS_OK
;
1110 bfa_fcxp_alloc_wait(struct bfa_s
*bfa
, struct bfa_fcxp_wqe_s
*wqe
,
1111 bfa_fcxp_alloc_cbfn_t alloc_cbfn
, void *alloc_cbarg
,
1112 void *caller
, int nreq_sgles
,
1113 int nrsp_sgles
, bfa_fcxp_get_sgaddr_t req_sga_cbfn
,
1114 bfa_fcxp_get_sglen_t req_sglen_cbfn
,
1115 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn
,
1116 bfa_fcxp_get_sglen_t rsp_sglen_cbfn
)
1118 struct bfa_fcxp_mod_s
*mod
= BFA_FCXP_MOD(bfa
);
1120 WARN_ON(!list_empty(&mod
->fcxp_free_q
));
1122 wqe
->alloc_cbfn
= alloc_cbfn
;
1123 wqe
->alloc_cbarg
= alloc_cbarg
;
1124 wqe
->caller
= caller
;
1126 wqe
->nreq_sgles
= nreq_sgles
;
1127 wqe
->nrsp_sgles
= nrsp_sgles
;
1128 wqe
->req_sga_cbfn
= req_sga_cbfn
;
1129 wqe
->req_sglen_cbfn
= req_sglen_cbfn
;
1130 wqe
->rsp_sga_cbfn
= rsp_sga_cbfn
;
1131 wqe
->rsp_sglen_cbfn
= rsp_sglen_cbfn
;
1133 list_add_tail(&wqe
->qe
, &mod
->wait_q
);
1137 bfa_fcxp_walloc_cancel(struct bfa_s
*bfa
, struct bfa_fcxp_wqe_s
*wqe
)
1139 struct bfa_fcxp_mod_s
*mod
= BFA_FCXP_MOD(bfa
);
1141 WARN_ON(!bfa_q_is_on_q(&mod
->wait_q
, wqe
));
1146 bfa_fcxp_discard(struct bfa_fcxp_s
*fcxp
)
1149 * If waiting for room in request queue, cancel reqq wait
1152 if (fcxp
->reqq_waiting
) {
1153 fcxp
->reqq_waiting
= BFA_FALSE
;
1154 bfa_reqq_wcancel(&fcxp
->reqq_wqe
);
1155 bfa_fcxp_free(fcxp
);
1159 fcxp
->send_cbfn
= bfa_fcxp_null_comp
;
1163 bfa_fcxp_isr(struct bfa_s
*bfa
, struct bfi_msg_s
*msg
)
1165 switch (msg
->mhdr
.msg_id
) {
1166 case BFI_FCXP_I2H_SEND_RSP
:
1167 hal_fcxp_send_comp(bfa
, (struct bfi_fcxp_send_rsp_s
*) msg
);
1171 bfa_trc(bfa
, msg
->mhdr
.msg_id
);
1177 bfa_fcxp_get_maxrsp(struct bfa_s
*bfa
)
1179 struct bfa_fcxp_mod_s
*mod
= BFA_FCXP_MOD(bfa
);
1181 return mod
->rsp_pld_sz
;
1186 * BFA LPS state machine functions
1190 * Init state -- no login
1193 bfa_lps_sm_init(struct bfa_lps_s
*lps
, enum bfa_lps_event event
)
1195 bfa_trc(lps
->bfa
, lps
->lp_tag
);
1196 bfa_trc(lps
->bfa
, event
);
1199 case BFA_LPS_SM_LOGIN
:
1200 if (bfa_reqq_full(lps
->bfa
, lps
->reqq
)) {
1201 bfa_sm_set_state(lps
, bfa_lps_sm_loginwait
);
1202 bfa_reqq_wait(lps
->bfa
, lps
->reqq
, &lps
->wqe
);
1204 bfa_sm_set_state(lps
, bfa_lps_sm_login
);
1205 bfa_lps_send_login(lps
);
1209 bfa_plog_str(lps
->bfa
->plog
, BFA_PL_MID_LPS
,
1210 BFA_PL_EID_LOGIN
, 0, "FDISC Request");
1212 bfa_plog_str(lps
->bfa
->plog
, BFA_PL_MID_LPS
,
1213 BFA_PL_EID_LOGIN
, 0, "FLOGI Request");
1216 case BFA_LPS_SM_LOGOUT
:
1217 bfa_lps_logout_comp(lps
);
1220 case BFA_LPS_SM_DELETE
:
1224 case BFA_LPS_SM_RX_CVL
:
1225 case BFA_LPS_SM_OFFLINE
:
1228 case BFA_LPS_SM_FWRSP
:
1230 * Could happen when fabric detects loopback and discards
1231 * the lps request. Fw will eventually sent out the timeout
1237 bfa_sm_fault(lps
->bfa
, event
);
1242 * login is in progress -- awaiting response from firmware
1245 bfa_lps_sm_login(struct bfa_lps_s
*lps
, enum bfa_lps_event event
)
1247 bfa_trc(lps
->bfa
, lps
->lp_tag
);
1248 bfa_trc(lps
->bfa
, event
);
1251 case BFA_LPS_SM_FWRSP
:
1252 if (lps
->status
== BFA_STATUS_OK
) {
1253 bfa_sm_set_state(lps
, bfa_lps_sm_online
);
1255 bfa_plog_str(lps
->bfa
->plog
, BFA_PL_MID_LPS
,
1256 BFA_PL_EID_LOGIN
, 0, "FDISC Accept");
1258 bfa_plog_str(lps
->bfa
->plog
, BFA_PL_MID_LPS
,
1259 BFA_PL_EID_LOGIN
, 0, "FLOGI Accept");
1260 /* If N2N, send the assigned PID to FW */
1261 bfa_trc(lps
->bfa
, lps
->fport
);
1262 bfa_trc(lps
->bfa
, lps
->lp_pid
);
1264 if (!lps
->fport
&& lps
->lp_pid
)
1265 bfa_sm_send_event(lps
, BFA_LPS_SM_SET_N2N_PID
);
1267 bfa_sm_set_state(lps
, bfa_lps_sm_init
);
1269 bfa_plog_str(lps
->bfa
->plog
, BFA_PL_MID_LPS
,
1270 BFA_PL_EID_LOGIN
, 0,
1271 "FDISC Fail (RJT or timeout)");
1273 bfa_plog_str(lps
->bfa
->plog
, BFA_PL_MID_LPS
,
1274 BFA_PL_EID_LOGIN
, 0,
1275 "FLOGI Fail (RJT or timeout)");
1277 bfa_lps_login_comp(lps
);
1280 case BFA_LPS_SM_OFFLINE
:
1281 bfa_sm_set_state(lps
, bfa_lps_sm_init
);
1284 case BFA_LPS_SM_SET_N2N_PID
:
1285 bfa_trc(lps
->bfa
, lps
->fport
);
1286 bfa_trc(lps
->bfa
, lps
->lp_pid
);
1290 bfa_sm_fault(lps
->bfa
, event
);
1295 * login pending - awaiting space in request queue
1298 bfa_lps_sm_loginwait(struct bfa_lps_s
*lps
, enum bfa_lps_event event
)
1300 bfa_trc(lps
->bfa
, lps
->lp_tag
);
1301 bfa_trc(lps
->bfa
, event
);
1304 case BFA_LPS_SM_RESUME
:
1305 bfa_sm_set_state(lps
, bfa_lps_sm_login
);
1308 case BFA_LPS_SM_OFFLINE
:
1309 bfa_sm_set_state(lps
, bfa_lps_sm_init
);
1310 bfa_reqq_wcancel(&lps
->wqe
);
1313 case BFA_LPS_SM_RX_CVL
:
1315 * Login was not even sent out; so when getting out
1316 * of this state, it will appear like a login retry
1317 * after Clear virtual link
1322 bfa_sm_fault(lps
->bfa
, event
);
1330 bfa_lps_sm_online(struct bfa_lps_s
*lps
, enum bfa_lps_event event
)
1332 bfa_trc(lps
->bfa
, lps
->lp_tag
);
1333 bfa_trc(lps
->bfa
, event
);
1336 case BFA_LPS_SM_LOGOUT
:
1337 if (bfa_reqq_full(lps
->bfa
, lps
->reqq
)) {
1338 bfa_sm_set_state(lps
, bfa_lps_sm_logowait
);
1339 bfa_reqq_wait(lps
->bfa
, lps
->reqq
, &lps
->wqe
);
1341 bfa_sm_set_state(lps
, bfa_lps_sm_logout
);
1342 bfa_lps_send_logout(lps
);
1344 bfa_plog_str(lps
->bfa
->plog
, BFA_PL_MID_LPS
,
1345 BFA_PL_EID_LOGO
, 0, "Logout");
1348 case BFA_LPS_SM_RX_CVL
:
1349 bfa_sm_set_state(lps
, bfa_lps_sm_init
);
1351 /* Let the vport module know about this event */
1352 bfa_lps_cvl_event(lps
);
1353 bfa_plog_str(lps
->bfa
->plog
, BFA_PL_MID_LPS
,
1354 BFA_PL_EID_FIP_FCF_CVL
, 0, "FCF Clear Virt. Link Rx");
1357 case BFA_LPS_SM_SET_N2N_PID
:
1358 if (bfa_reqq_full(lps
->bfa
, lps
->reqq
)) {
1359 bfa_sm_set_state(lps
, bfa_lps_sm_online_n2n_pid_wait
);
1360 bfa_reqq_wait(lps
->bfa
, lps
->reqq
, &lps
->wqe
);
1362 bfa_lps_send_set_n2n_pid(lps
);
1365 case BFA_LPS_SM_OFFLINE
:
1366 case BFA_LPS_SM_DELETE
:
1367 bfa_sm_set_state(lps
, bfa_lps_sm_init
);
1371 bfa_sm_fault(lps
->bfa
, event
);
1379 bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s
*lps
, enum bfa_lps_event event
)
1381 bfa_trc(lps
->bfa
, lps
->lp_tag
);
1382 bfa_trc(lps
->bfa
, event
);
1385 case BFA_LPS_SM_RESUME
:
1386 bfa_sm_set_state(lps
, bfa_lps_sm_online
);
1387 bfa_lps_send_set_n2n_pid(lps
);
1390 case BFA_LPS_SM_LOGOUT
:
1391 bfa_sm_set_state(lps
, bfa_lps_sm_logowait
);
1392 bfa_plog_str(lps
->bfa
->plog
, BFA_PL_MID_LPS
,
1393 BFA_PL_EID_LOGO
, 0, "Logout");
1396 case BFA_LPS_SM_RX_CVL
:
1397 bfa_sm_set_state(lps
, bfa_lps_sm_init
);
1398 bfa_reqq_wcancel(&lps
->wqe
);
1400 /* Let the vport module know about this event */
1401 bfa_lps_cvl_event(lps
);
1402 bfa_plog_str(lps
->bfa
->plog
, BFA_PL_MID_LPS
,
1403 BFA_PL_EID_FIP_FCF_CVL
, 0, "FCF Clear Virt. Link Rx");
1406 case BFA_LPS_SM_OFFLINE
:
1407 case BFA_LPS_SM_DELETE
:
1408 bfa_sm_set_state(lps
, bfa_lps_sm_init
);
1409 bfa_reqq_wcancel(&lps
->wqe
);
1413 bfa_sm_fault(lps
->bfa
, event
);
1418 * logout in progress - awaiting firmware response
1421 bfa_lps_sm_logout(struct bfa_lps_s
*lps
, enum bfa_lps_event event
)
1423 bfa_trc(lps
->bfa
, lps
->lp_tag
);
1424 bfa_trc(lps
->bfa
, event
);
1427 case BFA_LPS_SM_FWRSP
:
1428 bfa_sm_set_state(lps
, bfa_lps_sm_init
);
1429 bfa_lps_logout_comp(lps
);
1432 case BFA_LPS_SM_OFFLINE
:
1433 bfa_sm_set_state(lps
, bfa_lps_sm_init
);
1437 bfa_sm_fault(lps
->bfa
, event
);
1442 * logout pending -- awaiting space in request queue
1445 bfa_lps_sm_logowait(struct bfa_lps_s
*lps
, enum bfa_lps_event event
)
1447 bfa_trc(lps
->bfa
, lps
->lp_tag
);
1448 bfa_trc(lps
->bfa
, event
);
1451 case BFA_LPS_SM_RESUME
:
1452 bfa_sm_set_state(lps
, bfa_lps_sm_logout
);
1453 bfa_lps_send_logout(lps
);
1456 case BFA_LPS_SM_OFFLINE
:
1457 bfa_sm_set_state(lps
, bfa_lps_sm_init
);
1458 bfa_reqq_wcancel(&lps
->wqe
);
1462 bfa_sm_fault(lps
->bfa
, event
);
1469 * lps_pvt BFA LPS private functions
1473 * return memory requirement
1476 bfa_lps_meminfo(struct bfa_iocfc_cfg_s
*cfg
, u32
*ndm_len
,
1479 if (cfg
->drvcfg
.min_cfg
)
1480 *ndm_len
+= sizeof(struct bfa_lps_s
) * BFA_LPS_MIN_LPORTS
;
1482 *ndm_len
+= sizeof(struct bfa_lps_s
) * BFA_LPS_MAX_LPORTS
;
1486 * bfa module attach at initialization time
1489 bfa_lps_attach(struct bfa_s
*bfa
, void *bfad
, struct bfa_iocfc_cfg_s
*cfg
,
1490 struct bfa_meminfo_s
*meminfo
, struct bfa_pcidev_s
*pcidev
)
1492 struct bfa_lps_mod_s
*mod
= BFA_LPS_MOD(bfa
);
1493 struct bfa_lps_s
*lps
;
1496 memset(mod
, 0, sizeof(struct bfa_lps_mod_s
));
1497 mod
->num_lps
= BFA_LPS_MAX_LPORTS
;
1498 if (cfg
->drvcfg
.min_cfg
)
1499 mod
->num_lps
= BFA_LPS_MIN_LPORTS
;
1501 mod
->num_lps
= BFA_LPS_MAX_LPORTS
;
1502 mod
->lps_arr
= lps
= (struct bfa_lps_s
*) bfa_meminfo_kva(meminfo
);
1504 bfa_meminfo_kva(meminfo
) += mod
->num_lps
* sizeof(struct bfa_lps_s
);
1506 INIT_LIST_HEAD(&mod
->lps_free_q
);
1507 INIT_LIST_HEAD(&mod
->lps_active_q
);
1509 for (i
= 0; i
< mod
->num_lps
; i
++, lps
++) {
1511 lps
->lp_tag
= (u8
) i
;
1512 lps
->reqq
= BFA_REQQ_LPS
;
1513 bfa_reqq_winit(&lps
->wqe
, bfa_lps_reqq_resume
, lps
);
1514 list_add_tail(&lps
->qe
, &mod
->lps_free_q
);
1519 bfa_lps_detach(struct bfa_s
*bfa
)
1524 bfa_lps_start(struct bfa_s
*bfa
)
1529 bfa_lps_stop(struct bfa_s
*bfa
)
1534 * IOC in disabled state -- consider all lps offline
1537 bfa_lps_iocdisable(struct bfa_s
*bfa
)
1539 struct bfa_lps_mod_s
*mod
= BFA_LPS_MOD(bfa
);
1540 struct bfa_lps_s
*lps
;
1541 struct list_head
*qe
, *qen
;
1543 list_for_each_safe(qe
, qen
, &mod
->lps_active_q
) {
1544 lps
= (struct bfa_lps_s
*) qe
;
1545 bfa_sm_send_event(lps
, BFA_LPS_SM_OFFLINE
);
1550 * Firmware login response
1553 bfa_lps_login_rsp(struct bfa_s
*bfa
, struct bfi_lps_login_rsp_s
*rsp
)
1555 struct bfa_lps_mod_s
*mod
= BFA_LPS_MOD(bfa
);
1556 struct bfa_lps_s
*lps
;
1558 WARN_ON(rsp
->lp_tag
>= mod
->num_lps
);
1559 lps
= BFA_LPS_FROM_TAG(mod
, rsp
->lp_tag
);
1561 lps
->status
= rsp
->status
;
1562 switch (rsp
->status
) {
1564 lps
->fport
= rsp
->f_port
;
1566 lps
->lp_pid
= rsp
->lp_pid
;
1567 lps
->npiv_en
= rsp
->npiv_en
;
1568 lps
->pr_bbcred
= be16_to_cpu(rsp
->bb_credit
);
1569 lps
->pr_pwwn
= rsp
->port_name
;
1570 lps
->pr_nwwn
= rsp
->node_name
;
1571 lps
->auth_req
= rsp
->auth_req
;
1572 lps
->lp_mac
= rsp
->lp_mac
;
1573 lps
->brcd_switch
= rsp
->brcd_switch
;
1574 lps
->fcf_mac
= rsp
->fcf_mac
;
1578 case BFA_STATUS_FABRIC_RJT
:
1579 lps
->lsrjt_rsn
= rsp
->lsrjt_rsn
;
1580 lps
->lsrjt_expl
= rsp
->lsrjt_expl
;
1584 case BFA_STATUS_EPROTOCOL
:
1585 lps
->ext_status
= rsp
->ext_status
;
1590 /* Nothing to do with other status */
1594 bfa_sm_send_event(lps
, BFA_LPS_SM_FWRSP
);
1598 * Firmware logout response
1601 bfa_lps_logout_rsp(struct bfa_s
*bfa
, struct bfi_lps_logout_rsp_s
*rsp
)
1603 struct bfa_lps_mod_s
*mod
= BFA_LPS_MOD(bfa
);
1604 struct bfa_lps_s
*lps
;
1606 WARN_ON(rsp
->lp_tag
>= mod
->num_lps
);
1607 lps
= BFA_LPS_FROM_TAG(mod
, rsp
->lp_tag
);
1609 bfa_sm_send_event(lps
, BFA_LPS_SM_FWRSP
);
1613 * Firmware received a Clear virtual link request (for FCoE)
1616 bfa_lps_rx_cvl_event(struct bfa_s
*bfa
, struct bfi_lps_cvl_event_s
*cvl
)
1618 struct bfa_lps_mod_s
*mod
= BFA_LPS_MOD(bfa
);
1619 struct bfa_lps_s
*lps
;
1621 lps
= BFA_LPS_FROM_TAG(mod
, cvl
->lp_tag
);
1623 bfa_sm_send_event(lps
, BFA_LPS_SM_RX_CVL
);
1627 * Space is available in request queue, resume queueing request to firmware.
1630 bfa_lps_reqq_resume(void *lps_arg
)
1632 struct bfa_lps_s
*lps
= lps_arg
;
1634 bfa_sm_send_event(lps
, BFA_LPS_SM_RESUME
);
1638 * lps is freed -- triggered by vport delete
1641 bfa_lps_free(struct bfa_lps_s
*lps
)
1643 struct bfa_lps_mod_s
*mod
= BFA_LPS_MOD(lps
->bfa
);
1647 list_add_tail(&lps
->qe
, &mod
->lps_free_q
);
1651 * send login request to firmware
1654 bfa_lps_send_login(struct bfa_lps_s
*lps
)
1656 struct bfi_lps_login_req_s
*m
;
1658 m
= bfa_reqq_next(lps
->bfa
, lps
->reqq
);
1661 bfi_h2i_set(m
->mh
, BFI_MC_LPS
, BFI_LPS_H2I_LOGIN_REQ
,
1662 bfa_lpuid(lps
->bfa
));
1664 m
->lp_tag
= lps
->lp_tag
;
1665 m
->alpa
= lps
->alpa
;
1666 m
->pdu_size
= cpu_to_be16(lps
->pdusz
);
1667 m
->pwwn
= lps
->pwwn
;
1668 m
->nwwn
= lps
->nwwn
;
1669 m
->fdisc
= lps
->fdisc
;
1670 m
->auth_en
= lps
->auth_en
;
1672 bfa_reqq_produce(lps
->bfa
, lps
->reqq
);
1676 * send logout request to firmware
1679 bfa_lps_send_logout(struct bfa_lps_s
*lps
)
1681 struct bfi_lps_logout_req_s
*m
;
1683 m
= bfa_reqq_next(lps
->bfa
, lps
->reqq
);
1686 bfi_h2i_set(m
->mh
, BFI_MC_LPS
, BFI_LPS_H2I_LOGOUT_REQ
,
1687 bfa_lpuid(lps
->bfa
));
1689 m
->lp_tag
= lps
->lp_tag
;
1690 m
->port_name
= lps
->pwwn
;
1691 bfa_reqq_produce(lps
->bfa
, lps
->reqq
);
1695 * send n2n pid set request to firmware
1698 bfa_lps_send_set_n2n_pid(struct bfa_lps_s
*lps
)
1700 struct bfi_lps_n2n_pid_req_s
*m
;
1702 m
= bfa_reqq_next(lps
->bfa
, lps
->reqq
);
1705 bfi_h2i_set(m
->mh
, BFI_MC_LPS
, BFI_LPS_H2I_N2N_PID_REQ
,
1706 bfa_lpuid(lps
->bfa
));
1708 m
->lp_tag
= lps
->lp_tag
;
1709 m
->lp_pid
= lps
->lp_pid
;
1710 bfa_reqq_produce(lps
->bfa
, lps
->reqq
);
1714 * Indirect login completion handler for non-fcs
1717 bfa_lps_login_comp_cb(void *arg
, bfa_boolean_t complete
)
1719 struct bfa_lps_s
*lps
= arg
;
1725 bfa_cb_lps_fdisc_comp(lps
->bfa
->bfad
, lps
->uarg
, lps
->status
);
1727 bfa_cb_lps_flogi_comp(lps
->bfa
->bfad
, lps
->uarg
, lps
->status
);
1731 * Login completion handler -- direct call for fcs, queue for others
1734 bfa_lps_login_comp(struct bfa_lps_s
*lps
)
1736 if (!lps
->bfa
->fcs
) {
1737 bfa_cb_queue(lps
->bfa
, &lps
->hcb_qe
, bfa_lps_login_comp_cb
,
1743 bfa_cb_lps_fdisc_comp(lps
->bfa
->bfad
, lps
->uarg
, lps
->status
);
1745 bfa_cb_lps_flogi_comp(lps
->bfa
->bfad
, lps
->uarg
, lps
->status
);
1749 * Indirect logout completion handler for non-fcs
1752 bfa_lps_logout_comp_cb(void *arg
, bfa_boolean_t complete
)
1754 struct bfa_lps_s
*lps
= arg
;
1760 bfa_cb_lps_fdisclogo_comp(lps
->bfa
->bfad
, lps
->uarg
);
1764 * Logout completion handler -- direct call for fcs, queue for others
1767 bfa_lps_logout_comp(struct bfa_lps_s
*lps
)
1769 if (!lps
->bfa
->fcs
) {
1770 bfa_cb_queue(lps
->bfa
, &lps
->hcb_qe
, bfa_lps_logout_comp_cb
,
1775 bfa_cb_lps_fdisclogo_comp(lps
->bfa
->bfad
, lps
->uarg
);
1779 * Clear virtual link completion handler for non-fcs
1782 bfa_lps_cvl_event_cb(void *arg
, bfa_boolean_t complete
)
1784 struct bfa_lps_s
*lps
= arg
;
1789 /* Clear virtual link to base port will result in link down */
1791 bfa_cb_lps_cvl_event(lps
->bfa
->bfad
, lps
->uarg
);
1795 * Received Clear virtual link event --direct call for fcs,
1799 bfa_lps_cvl_event(struct bfa_lps_s
*lps
)
1801 if (!lps
->bfa
->fcs
) {
1802 bfa_cb_queue(lps
->bfa
, &lps
->hcb_qe
, bfa_lps_cvl_event_cb
,
1807 /* Clear virtual link to base port will result in link down */
1809 bfa_cb_lps_cvl_event(lps
->bfa
->bfad
, lps
->uarg
);
1815 * lps_public BFA LPS public functions
1819 bfa_lps_get_max_vport(struct bfa_s
*bfa
)
1821 if (bfa_ioc_devid(&bfa
->ioc
) == BFA_PCI_DEVICE_ID_CT
)
1822 return BFA_LPS_MAX_VPORTS_SUPP_CT
;
1824 return BFA_LPS_MAX_VPORTS_SUPP_CB
;
1828 * Allocate a lport srvice tag.
1831 bfa_lps_alloc(struct bfa_s
*bfa
)
1833 struct bfa_lps_mod_s
*mod
= BFA_LPS_MOD(bfa
);
1834 struct bfa_lps_s
*lps
= NULL
;
1836 bfa_q_deq(&mod
->lps_free_q
, &lps
);
1841 list_add_tail(&lps
->qe
, &mod
->lps_active_q
);
1843 bfa_sm_set_state(lps
, bfa_lps_sm_init
);
1848 * Free lport service tag. This can be called anytime after an alloc.
1849 * No need to wait for any pending login/logout completions.
1852 bfa_lps_delete(struct bfa_lps_s
*lps
)
1854 bfa_sm_send_event(lps
, BFA_LPS_SM_DELETE
);
1858 * Initiate a lport login.
1861 bfa_lps_flogi(struct bfa_lps_s
*lps
, void *uarg
, u8 alpa
, u16 pdusz
,
1862 wwn_t pwwn
, wwn_t nwwn
, bfa_boolean_t auth_en
)
1869 lps
->fdisc
= BFA_FALSE
;
1870 lps
->auth_en
= auth_en
;
1871 bfa_sm_send_event(lps
, BFA_LPS_SM_LOGIN
);
1875 * Initiate a lport fdisc login.
1878 bfa_lps_fdisc(struct bfa_lps_s
*lps
, void *uarg
, u16 pdusz
, wwn_t pwwn
,
1886 lps
->fdisc
= BFA_TRUE
;
1887 lps
->auth_en
= BFA_FALSE
;
1888 bfa_sm_send_event(lps
, BFA_LPS_SM_LOGIN
);
1893 * Initiate a lport FDSIC logout.
1896 bfa_lps_fdisclogo(struct bfa_lps_s
*lps
)
1898 bfa_sm_send_event(lps
, BFA_LPS_SM_LOGOUT
);
1903 * Return lport services tag given the pid
1906 bfa_lps_get_tag_from_pid(struct bfa_s
*bfa
, u32 pid
)
1908 struct bfa_lps_mod_s
*mod
= BFA_LPS_MOD(bfa
);
1909 struct bfa_lps_s
*lps
;
1912 for (i
= 0, lps
= mod
->lps_arr
; i
< mod
->num_lps
; i
++, lps
++) {
1913 if (lps
->lp_pid
== pid
)
1917 /* Return base port tag anyway */
1923 * return port id assigned to the base lport
1926 bfa_lps_get_base_pid(struct bfa_s
*bfa
)
1928 struct bfa_lps_mod_s
*mod
= BFA_LPS_MOD(bfa
);
1930 return BFA_LPS_FROM_TAG(mod
, 0)->lp_pid
;
1934 * Set PID in case of n2n (which is assigned during PLOGI)
1937 bfa_lps_set_n2n_pid(struct bfa_lps_s
*lps
, uint32_t n2n_pid
)
1939 bfa_trc(lps
->bfa
, lps
->lp_tag
);
1940 bfa_trc(lps
->bfa
, n2n_pid
);
1942 lps
->lp_pid
= n2n_pid
;
1943 bfa_sm_send_event(lps
, BFA_LPS_SM_SET_N2N_PID
);
1947 * LPS firmware message class handler.
1950 bfa_lps_isr(struct bfa_s
*bfa
, struct bfi_msg_s
*m
)
1952 union bfi_lps_i2h_msg_u msg
;
1954 bfa_trc(bfa
, m
->mhdr
.msg_id
);
1957 switch (m
->mhdr
.msg_id
) {
1958 case BFI_LPS_H2I_LOGIN_RSP
:
1959 bfa_lps_login_rsp(bfa
, msg
.login_rsp
);
1962 case BFI_LPS_H2I_LOGOUT_RSP
:
1963 bfa_lps_logout_rsp(bfa
, msg
.logout_rsp
);
1966 case BFI_LPS_H2I_CVL_EVENT
:
1967 bfa_lps_rx_cvl_event(bfa
, msg
.cvl_event
);
1971 bfa_trc(bfa
, m
->mhdr
.msg_id
);
1977 * FC PORT state machine functions
1980 bfa_fcport_sm_uninit(struct bfa_fcport_s
*fcport
,
1981 enum bfa_fcport_sm_event event
)
1983 bfa_trc(fcport
->bfa
, event
);
1986 case BFA_FCPORT_SM_START
:
1988 * Start event after IOC is configured and BFA is started.
1990 fcport
->use_flash_cfg
= BFA_TRUE
;
1992 if (bfa_fcport_send_enable(fcport
)) {
1993 bfa_trc(fcport
->bfa
, BFA_TRUE
);
1994 bfa_sm_set_state(fcport
, bfa_fcport_sm_enabling
);
1996 bfa_trc(fcport
->bfa
, BFA_FALSE
);
1997 bfa_sm_set_state(fcport
,
1998 bfa_fcport_sm_enabling_qwait
);
2002 case BFA_FCPORT_SM_ENABLE
:
2004 * Port is persistently configured to be in enabled state. Do
2005 * not change state. Port enabling is done when START event is
2010 case BFA_FCPORT_SM_DISABLE
:
2012 * If a port is persistently configured to be disabled, the
2013 * first event will a port disable request.
2015 bfa_sm_set_state(fcport
, bfa_fcport_sm_disabled
);
2018 case BFA_FCPORT_SM_HWFAIL
:
2019 bfa_sm_set_state(fcport
, bfa_fcport_sm_iocdown
);
2023 bfa_sm_fault(fcport
->bfa
, event
);
2028 bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s
*fcport
,
2029 enum bfa_fcport_sm_event event
)
2031 char pwwn_buf
[BFA_STRING_32
];
2032 struct bfad_s
*bfad
= (struct bfad_s
*)fcport
->bfa
->bfad
;
2033 bfa_trc(fcport
->bfa
, event
);
2036 case BFA_FCPORT_SM_QRESUME
:
2037 bfa_sm_set_state(fcport
, bfa_fcport_sm_enabling
);
2038 bfa_fcport_send_enable(fcport
);
2041 case BFA_FCPORT_SM_STOP
:
2042 bfa_reqq_wcancel(&fcport
->reqq_wait
);
2043 bfa_sm_set_state(fcport
, bfa_fcport_sm_stopped
);
2046 case BFA_FCPORT_SM_ENABLE
:
2048 * Already enable is in progress.
2052 case BFA_FCPORT_SM_DISABLE
:
2054 * Just send disable request to firmware when room becomes
2055 * available in request queue.
2057 bfa_sm_set_state(fcport
, bfa_fcport_sm_disabled
);
2058 bfa_reqq_wcancel(&fcport
->reqq_wait
);
2059 bfa_plog_str(fcport
->bfa
->plog
, BFA_PL_MID_HAL
,
2060 BFA_PL_EID_PORT_DISABLE
, 0, "Port Disable");
2061 wwn2str(pwwn_buf
, fcport
->pwwn
);
2062 BFA_LOG(KERN_INFO
, bfad
, bfa_log_level
,
2063 "Base port disabled: WWN = %s\n", pwwn_buf
);
2066 case BFA_FCPORT_SM_LINKUP
:
2067 case BFA_FCPORT_SM_LINKDOWN
:
2069 * Possible to get link events when doing back-to-back
2074 case BFA_FCPORT_SM_HWFAIL
:
2075 bfa_reqq_wcancel(&fcport
->reqq_wait
);
2076 bfa_sm_set_state(fcport
, bfa_fcport_sm_iocdown
);
2080 bfa_sm_fault(fcport
->bfa
, event
);
2085 bfa_fcport_sm_enabling(struct bfa_fcport_s
*fcport
,
2086 enum bfa_fcport_sm_event event
)
2088 char pwwn_buf
[BFA_STRING_32
];
2089 struct bfad_s
*bfad
= (struct bfad_s
*)fcport
->bfa
->bfad
;
2090 bfa_trc(fcport
->bfa
, event
);
2093 case BFA_FCPORT_SM_FWRSP
:
2094 case BFA_FCPORT_SM_LINKDOWN
:
2095 bfa_sm_set_state(fcport
, bfa_fcport_sm_linkdown
);
2098 case BFA_FCPORT_SM_LINKUP
:
2099 bfa_fcport_update_linkinfo(fcport
);
2100 bfa_sm_set_state(fcport
, bfa_fcport_sm_linkup
);
2102 WARN_ON(!fcport
->event_cbfn
);
2103 bfa_fcport_scn(fcport
, BFA_PORT_LINKUP
, BFA_FALSE
);
2106 case BFA_FCPORT_SM_ENABLE
:
2108 * Already being enabled.
2112 case BFA_FCPORT_SM_DISABLE
:
2113 if (bfa_fcport_send_disable(fcport
))
2114 bfa_sm_set_state(fcport
, bfa_fcport_sm_disabling
);
2116 bfa_sm_set_state(fcport
,
2117 bfa_fcport_sm_disabling_qwait
);
2119 bfa_plog_str(fcport
->bfa
->plog
, BFA_PL_MID_HAL
,
2120 BFA_PL_EID_PORT_DISABLE
, 0, "Port Disable");
2121 wwn2str(pwwn_buf
, fcport
->pwwn
);
2122 BFA_LOG(KERN_INFO
, bfad
, bfa_log_level
,
2123 "Base port disabled: WWN = %s\n", pwwn_buf
);
2126 case BFA_FCPORT_SM_STOP
:
2127 bfa_sm_set_state(fcport
, bfa_fcport_sm_stopped
);
2130 case BFA_FCPORT_SM_HWFAIL
:
2131 bfa_sm_set_state(fcport
, bfa_fcport_sm_iocdown
);
2135 bfa_sm_fault(fcport
->bfa
, event
);
2140 bfa_fcport_sm_linkdown(struct bfa_fcport_s
*fcport
,
2141 enum bfa_fcport_sm_event event
)
2143 struct bfi_fcport_event_s
*pevent
= fcport
->event_arg
.i2hmsg
.event
;
2144 char pwwn_buf
[BFA_STRING_32
];
2145 struct bfad_s
*bfad
= (struct bfad_s
*)fcport
->bfa
->bfad
;
2147 bfa_trc(fcport
->bfa
, event
);
2150 case BFA_FCPORT_SM_LINKUP
:
2151 bfa_fcport_update_linkinfo(fcport
);
2152 bfa_sm_set_state(fcport
, bfa_fcport_sm_linkup
);
2153 WARN_ON(!fcport
->event_cbfn
);
2154 bfa_plog_str(fcport
->bfa
->plog
, BFA_PL_MID_HAL
,
2155 BFA_PL_EID_PORT_ST_CHANGE
, 0, "Port Linkup");
2156 if (!bfa_ioc_get_fcmode(&fcport
->bfa
->ioc
)) {
2158 bfa_trc(fcport
->bfa
,
2159 pevent
->link_state
.vc_fcf
.fcf
.fipenabled
);
2160 bfa_trc(fcport
->bfa
,
2161 pevent
->link_state
.vc_fcf
.fcf
.fipfailed
);
2163 if (pevent
->link_state
.vc_fcf
.fcf
.fipfailed
)
2164 bfa_plog_str(fcport
->bfa
->plog
, BFA_PL_MID_HAL
,
2165 BFA_PL_EID_FIP_FCF_DISC
, 0,
2166 "FIP FCF Discovery Failed");
2168 bfa_plog_str(fcport
->bfa
->plog
, BFA_PL_MID_HAL
,
2169 BFA_PL_EID_FIP_FCF_DISC
, 0,
2170 "FIP FCF Discovered");
2173 bfa_fcport_scn(fcport
, BFA_PORT_LINKUP
, BFA_FALSE
);
2174 wwn2str(pwwn_buf
, fcport
->pwwn
);
2175 BFA_LOG(KERN_INFO
, bfad
, bfa_log_level
,
2176 "Base port online: WWN = %s\n", pwwn_buf
);
2179 case BFA_FCPORT_SM_LINKDOWN
:
2181 * Possible to get link down event.
2185 case BFA_FCPORT_SM_ENABLE
:
2191 case BFA_FCPORT_SM_DISABLE
:
2192 if (bfa_fcport_send_disable(fcport
))
2193 bfa_sm_set_state(fcport
, bfa_fcport_sm_disabling
);
2195 bfa_sm_set_state(fcport
,
2196 bfa_fcport_sm_disabling_qwait
);
2198 bfa_plog_str(fcport
->bfa
->plog
, BFA_PL_MID_HAL
,
2199 BFA_PL_EID_PORT_DISABLE
, 0, "Port Disable");
2200 wwn2str(pwwn_buf
, fcport
->pwwn
);
2201 BFA_LOG(KERN_INFO
, bfad
, bfa_log_level
,
2202 "Base port disabled: WWN = %s\n", pwwn_buf
);
2205 case BFA_FCPORT_SM_STOP
:
2206 bfa_sm_set_state(fcport
, bfa_fcport_sm_stopped
);
2209 case BFA_FCPORT_SM_HWFAIL
:
2210 bfa_sm_set_state(fcport
, bfa_fcport_sm_iocdown
);
2214 bfa_sm_fault(fcport
->bfa
, event
);
2219 bfa_fcport_sm_linkup(struct bfa_fcport_s
*fcport
,
2220 enum bfa_fcport_sm_event event
)
2222 char pwwn_buf
[BFA_STRING_32
];
2223 struct bfad_s
*bfad
= (struct bfad_s
*)fcport
->bfa
->bfad
;
2225 bfa_trc(fcport
->bfa
, event
);
2228 case BFA_FCPORT_SM_ENABLE
:
2234 case BFA_FCPORT_SM_DISABLE
:
2235 if (bfa_fcport_send_disable(fcport
))
2236 bfa_sm_set_state(fcport
, bfa_fcport_sm_disabling
);
2238 bfa_sm_set_state(fcport
,
2239 bfa_fcport_sm_disabling_qwait
);
2241 bfa_fcport_reset_linkinfo(fcport
);
2242 bfa_fcport_scn(fcport
, BFA_PORT_LINKDOWN
, BFA_FALSE
);
2243 bfa_plog_str(fcport
->bfa
->plog
, BFA_PL_MID_HAL
,
2244 BFA_PL_EID_PORT_DISABLE
, 0, "Port Disable");
2245 wwn2str(pwwn_buf
, fcport
->pwwn
);
2246 BFA_LOG(KERN_INFO
, bfad
, bfa_log_level
,
2247 "Base port offline: WWN = %s\n", pwwn_buf
);
2248 BFA_LOG(KERN_INFO
, bfad
, bfa_log_level
,
2249 "Base port disabled: WWN = %s\n", pwwn_buf
);
2252 case BFA_FCPORT_SM_LINKDOWN
:
2253 bfa_sm_set_state(fcport
, bfa_fcport_sm_linkdown
);
2254 bfa_fcport_reset_linkinfo(fcport
);
2255 bfa_fcport_scn(fcport
, BFA_PORT_LINKDOWN
, BFA_FALSE
);
2256 bfa_plog_str(fcport
->bfa
->plog
, BFA_PL_MID_HAL
,
2257 BFA_PL_EID_PORT_ST_CHANGE
, 0, "Port Linkdown");
2258 wwn2str(pwwn_buf
, fcport
->pwwn
);
2259 if (BFA_PORT_IS_DISABLED(fcport
->bfa
))
2260 BFA_LOG(KERN_INFO
, bfad
, bfa_log_level
,
2261 "Base port offline: WWN = %s\n", pwwn_buf
);
2263 BFA_LOG(KERN_ERR
, bfad
, bfa_log_level
,
2264 "Base port (WWN = %s) "
2265 "lost fabric connectivity\n", pwwn_buf
);
2268 case BFA_FCPORT_SM_STOP
:
2269 bfa_sm_set_state(fcport
, bfa_fcport_sm_stopped
);
2270 bfa_fcport_reset_linkinfo(fcport
);
2271 wwn2str(pwwn_buf
, fcport
->pwwn
);
2272 if (BFA_PORT_IS_DISABLED(fcport
->bfa
))
2273 BFA_LOG(KERN_INFO
, bfad
, bfa_log_level
,
2274 "Base port offline: WWN = %s\n", pwwn_buf
);
2276 BFA_LOG(KERN_ERR
, bfad
, bfa_log_level
,
2277 "Base port (WWN = %s) "
2278 "lost fabric connectivity\n", pwwn_buf
);
2281 case BFA_FCPORT_SM_HWFAIL
:
2282 bfa_sm_set_state(fcport
, bfa_fcport_sm_iocdown
);
2283 bfa_fcport_reset_linkinfo(fcport
);
2284 bfa_fcport_scn(fcport
, BFA_PORT_LINKDOWN
, BFA_FALSE
);
2285 wwn2str(pwwn_buf
, fcport
->pwwn
);
2286 if (BFA_PORT_IS_DISABLED(fcport
->bfa
))
2287 BFA_LOG(KERN_INFO
, bfad
, bfa_log_level
,
2288 "Base port offline: WWN = %s\n", pwwn_buf
);
2290 BFA_LOG(KERN_ERR
, bfad
, bfa_log_level
,
2291 "Base port (WWN = %s) "
2292 "lost fabric connectivity\n", pwwn_buf
);
2296 bfa_sm_fault(fcport
->bfa
, event
);
2301 bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s
*fcport
,
2302 enum bfa_fcport_sm_event event
)
2304 bfa_trc(fcport
->bfa
, event
);
2307 case BFA_FCPORT_SM_QRESUME
:
2308 bfa_sm_set_state(fcport
, bfa_fcport_sm_disabling
);
2309 bfa_fcport_send_disable(fcport
);
2312 case BFA_FCPORT_SM_STOP
:
2313 bfa_sm_set_state(fcport
, bfa_fcport_sm_stopped
);
2314 bfa_reqq_wcancel(&fcport
->reqq_wait
);
2317 case BFA_FCPORT_SM_ENABLE
:
2318 bfa_sm_set_state(fcport
, bfa_fcport_sm_toggling_qwait
);
2321 case BFA_FCPORT_SM_DISABLE
:
2323 * Already being disabled.
2327 case BFA_FCPORT_SM_LINKUP
:
2328 case BFA_FCPORT_SM_LINKDOWN
:
2330 * Possible to get link events when doing back-to-back
2335 case BFA_FCPORT_SM_HWFAIL
:
2336 bfa_sm_set_state(fcport
, bfa_fcport_sm_iocfail
);
2337 bfa_reqq_wcancel(&fcport
->reqq_wait
);
2341 bfa_sm_fault(fcport
->bfa
, event
);
2346 bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s
*fcport
,
2347 enum bfa_fcport_sm_event event
)
2349 bfa_trc(fcport
->bfa
, event
);
2352 case BFA_FCPORT_SM_QRESUME
:
2353 bfa_sm_set_state(fcport
, bfa_fcport_sm_disabling
);
2354 bfa_fcport_send_disable(fcport
);
2355 if (bfa_fcport_send_enable(fcport
))
2356 bfa_sm_set_state(fcport
, bfa_fcport_sm_enabling
);
2358 bfa_sm_set_state(fcport
,
2359 bfa_fcport_sm_enabling_qwait
);
2362 case BFA_FCPORT_SM_STOP
:
2363 bfa_sm_set_state(fcport
, bfa_fcport_sm_stopped
);
2364 bfa_reqq_wcancel(&fcport
->reqq_wait
);
2367 case BFA_FCPORT_SM_ENABLE
:
2370 case BFA_FCPORT_SM_DISABLE
:
2371 bfa_sm_set_state(fcport
, bfa_fcport_sm_disabling_qwait
);
2374 case BFA_FCPORT_SM_LINKUP
:
2375 case BFA_FCPORT_SM_LINKDOWN
:
2377 * Possible to get link events when doing back-to-back
2382 case BFA_FCPORT_SM_HWFAIL
:
2383 bfa_sm_set_state(fcport
, bfa_fcport_sm_iocfail
);
2384 bfa_reqq_wcancel(&fcport
->reqq_wait
);
2388 bfa_sm_fault(fcport
->bfa
, event
);
2393 bfa_fcport_sm_disabling(struct bfa_fcport_s
*fcport
,
2394 enum bfa_fcport_sm_event event
)
2396 char pwwn_buf
[BFA_STRING_32
];
2397 struct bfad_s
*bfad
= (struct bfad_s
*)fcport
->bfa
->bfad
;
2398 bfa_trc(fcport
->bfa
, event
);
2401 case BFA_FCPORT_SM_FWRSP
:
2402 bfa_sm_set_state(fcport
, bfa_fcport_sm_disabled
);
2405 case BFA_FCPORT_SM_DISABLE
:
2407 * Already being disabled.
2411 case BFA_FCPORT_SM_ENABLE
:
2412 if (bfa_fcport_send_enable(fcport
))
2413 bfa_sm_set_state(fcport
, bfa_fcport_sm_enabling
);
2415 bfa_sm_set_state(fcport
,
2416 bfa_fcport_sm_enabling_qwait
);
2418 bfa_plog_str(fcport
->bfa
->plog
, BFA_PL_MID_HAL
,
2419 BFA_PL_EID_PORT_ENABLE
, 0, "Port Enable");
2420 wwn2str(pwwn_buf
, fcport
->pwwn
);
2421 BFA_LOG(KERN_INFO
, bfad
, bfa_log_level
,
2422 "Base port enabled: WWN = %s\n", pwwn_buf
);
2425 case BFA_FCPORT_SM_STOP
:
2426 bfa_sm_set_state(fcport
, bfa_fcport_sm_stopped
);
2429 case BFA_FCPORT_SM_LINKUP
:
2430 case BFA_FCPORT_SM_LINKDOWN
:
2432 * Possible to get link events when doing back-to-back
2437 case BFA_FCPORT_SM_HWFAIL
:
2438 bfa_sm_set_state(fcport
, bfa_fcport_sm_iocfail
);
2442 bfa_sm_fault(fcport
->bfa
, event
);
2447 bfa_fcport_sm_disabled(struct bfa_fcport_s
*fcport
,
2448 enum bfa_fcport_sm_event event
)
2450 char pwwn_buf
[BFA_STRING_32
];
2451 struct bfad_s
*bfad
= (struct bfad_s
*)fcport
->bfa
->bfad
;
2452 bfa_trc(fcport
->bfa
, event
);
2455 case BFA_FCPORT_SM_START
:
2457 * Ignore start event for a port that is disabled.
2461 case BFA_FCPORT_SM_STOP
:
2462 bfa_sm_set_state(fcport
, bfa_fcport_sm_stopped
);
2465 case BFA_FCPORT_SM_ENABLE
:
2466 if (bfa_fcport_send_enable(fcport
))
2467 bfa_sm_set_state(fcport
, bfa_fcport_sm_enabling
);
2469 bfa_sm_set_state(fcport
,
2470 bfa_fcport_sm_enabling_qwait
);
2472 bfa_plog_str(fcport
->bfa
->plog
, BFA_PL_MID_HAL
,
2473 BFA_PL_EID_PORT_ENABLE
, 0, "Port Enable");
2474 wwn2str(pwwn_buf
, fcport
->pwwn
);
2475 BFA_LOG(KERN_INFO
, bfad
, bfa_log_level
,
2476 "Base port enabled: WWN = %s\n", pwwn_buf
);
2479 case BFA_FCPORT_SM_DISABLE
:
2485 case BFA_FCPORT_SM_HWFAIL
:
2486 bfa_sm_set_state(fcport
, bfa_fcport_sm_iocfail
);
2490 bfa_sm_fault(fcport
->bfa
, event
);
2495 bfa_fcport_sm_stopped(struct bfa_fcport_s
*fcport
,
2496 enum bfa_fcport_sm_event event
)
2498 bfa_trc(fcport
->bfa
, event
);
2501 case BFA_FCPORT_SM_START
:
2502 if (bfa_fcport_send_enable(fcport
))
2503 bfa_sm_set_state(fcport
, bfa_fcport_sm_enabling
);
2505 bfa_sm_set_state(fcport
,
2506 bfa_fcport_sm_enabling_qwait
);
2511 * Ignore all other events.
2518 * Port is enabled. IOC is down/failed.
2521 bfa_fcport_sm_iocdown(struct bfa_fcport_s
*fcport
,
2522 enum bfa_fcport_sm_event event
)
2524 bfa_trc(fcport
->bfa
, event
);
2527 case BFA_FCPORT_SM_START
:
2528 if (bfa_fcport_send_enable(fcport
))
2529 bfa_sm_set_state(fcport
, bfa_fcport_sm_enabling
);
2531 bfa_sm_set_state(fcport
,
2532 bfa_fcport_sm_enabling_qwait
);
2537 * Ignore all events.
2544 * Port is disabled. IOC is down/failed.
2547 bfa_fcport_sm_iocfail(struct bfa_fcport_s
*fcport
,
2548 enum bfa_fcport_sm_event event
)
2550 bfa_trc(fcport
->bfa
, event
);
2553 case BFA_FCPORT_SM_START
:
2554 bfa_sm_set_state(fcport
, bfa_fcport_sm_disabled
);
2557 case BFA_FCPORT_SM_ENABLE
:
2558 bfa_sm_set_state(fcport
, bfa_fcport_sm_iocdown
);
2563 * Ignore all events.
2570 * Link state is down
2573 bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s
*ln
,
2574 enum bfa_fcport_ln_sm_event event
)
2576 bfa_trc(ln
->fcport
->bfa
, event
);
2579 case BFA_FCPORT_LN_SM_LINKUP
:
2580 bfa_sm_set_state(ln
, bfa_fcport_ln_sm_up_nf
);
2581 bfa_fcport_queue_cb(ln
, BFA_PORT_LINKUP
);
2585 bfa_sm_fault(ln
->fcport
->bfa
, event
);
2590 * Link state is waiting for down notification
2593 bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s
*ln
,
2594 enum bfa_fcport_ln_sm_event event
)
2596 bfa_trc(ln
->fcport
->bfa
, event
);
2599 case BFA_FCPORT_LN_SM_LINKUP
:
2600 bfa_sm_set_state(ln
, bfa_fcport_ln_sm_dn_up_nf
);
2603 case BFA_FCPORT_LN_SM_NOTIFICATION
:
2604 bfa_sm_set_state(ln
, bfa_fcport_ln_sm_dn
);
2608 bfa_sm_fault(ln
->fcport
->bfa
, event
);
2613 * Link state is waiting for down notification and there is a pending up
2616 bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s
*ln
,
2617 enum bfa_fcport_ln_sm_event event
)
2619 bfa_trc(ln
->fcport
->bfa
, event
);
2622 case BFA_FCPORT_LN_SM_LINKDOWN
:
2623 bfa_sm_set_state(ln
, bfa_fcport_ln_sm_dn_nf
);
2626 case BFA_FCPORT_LN_SM_NOTIFICATION
:
2627 bfa_sm_set_state(ln
, bfa_fcport_ln_sm_up_nf
);
2628 bfa_fcport_queue_cb(ln
, BFA_PORT_LINKUP
);
2632 bfa_sm_fault(ln
->fcport
->bfa
, event
);
2640 bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s
*ln
,
2641 enum bfa_fcport_ln_sm_event event
)
2643 bfa_trc(ln
->fcport
->bfa
, event
);
2646 case BFA_FCPORT_LN_SM_LINKDOWN
:
2647 bfa_sm_set_state(ln
, bfa_fcport_ln_sm_dn_nf
);
2648 bfa_fcport_queue_cb(ln
, BFA_PORT_LINKDOWN
);
2652 bfa_sm_fault(ln
->fcport
->bfa
, event
);
2657 * Link state is waiting for up notification
2660 bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s
*ln
,
2661 enum bfa_fcport_ln_sm_event event
)
2663 bfa_trc(ln
->fcport
->bfa
, event
);
2666 case BFA_FCPORT_LN_SM_LINKDOWN
:
2667 bfa_sm_set_state(ln
, bfa_fcport_ln_sm_up_dn_nf
);
2670 case BFA_FCPORT_LN_SM_NOTIFICATION
:
2671 bfa_sm_set_state(ln
, bfa_fcport_ln_sm_up
);
2675 bfa_sm_fault(ln
->fcport
->bfa
, event
);
2680 * Link state is waiting for up notification and there is a pending down
2683 bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s
*ln
,
2684 enum bfa_fcport_ln_sm_event event
)
2686 bfa_trc(ln
->fcport
->bfa
, event
);
2689 case BFA_FCPORT_LN_SM_LINKUP
:
2690 bfa_sm_set_state(ln
, bfa_fcport_ln_sm_up_dn_up_nf
);
2693 case BFA_FCPORT_LN_SM_NOTIFICATION
:
2694 bfa_sm_set_state(ln
, bfa_fcport_ln_sm_dn_nf
);
2695 bfa_fcport_queue_cb(ln
, BFA_PORT_LINKDOWN
);
2699 bfa_sm_fault(ln
->fcport
->bfa
, event
);
2704 * Link state is waiting for up notification and there are pending down and up
2707 bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s
*ln
,
2708 enum bfa_fcport_ln_sm_event event
)
2710 bfa_trc(ln
->fcport
->bfa
, event
);
2713 case BFA_FCPORT_LN_SM_LINKDOWN
:
2714 bfa_sm_set_state(ln
, bfa_fcport_ln_sm_up_dn_nf
);
2717 case BFA_FCPORT_LN_SM_NOTIFICATION
:
2718 bfa_sm_set_state(ln
, bfa_fcport_ln_sm_dn_up_nf
);
2719 bfa_fcport_queue_cb(ln
, BFA_PORT_LINKDOWN
);
2723 bfa_sm_fault(ln
->fcport
->bfa
, event
);
2728 __bfa_cb_fcport_event(void *cbarg
, bfa_boolean_t complete
)
2730 struct bfa_fcport_ln_s
*ln
= cbarg
;
2733 ln
->fcport
->event_cbfn(ln
->fcport
->event_cbarg
, ln
->ln_event
);
2735 bfa_sm_send_event(ln
, BFA_FCPORT_LN_SM_NOTIFICATION
);
2739 * Send SCN notification to upper layers.
2740 * trunk - false if caller is fcport to ignore fcport event in trunked mode
2743 bfa_fcport_scn(struct bfa_fcport_s
*fcport
, enum bfa_port_linkstate event
,
2744 bfa_boolean_t trunk
)
2746 if (fcport
->cfg
.trunked
&& !trunk
)
2750 case BFA_PORT_LINKUP
:
2751 bfa_sm_send_event(&fcport
->ln
, BFA_FCPORT_LN_SM_LINKUP
);
2753 case BFA_PORT_LINKDOWN
:
2754 bfa_sm_send_event(&fcport
->ln
, BFA_FCPORT_LN_SM_LINKDOWN
);
2762 bfa_fcport_queue_cb(struct bfa_fcport_ln_s
*ln
, enum bfa_port_linkstate event
)
2764 struct bfa_fcport_s
*fcport
= ln
->fcport
;
2766 if (fcport
->bfa
->fcs
) {
2767 fcport
->event_cbfn(fcport
->event_cbarg
, event
);
2768 bfa_sm_send_event(ln
, BFA_FCPORT_LN_SM_NOTIFICATION
);
2770 ln
->ln_event
= event
;
2771 bfa_cb_queue(fcport
->bfa
, &ln
->ln_qe
,
2772 __bfa_cb_fcport_event
, ln
);
2776 #define FCPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_fcport_stats_u), \
2780 bfa_fcport_meminfo(struct bfa_iocfc_cfg_s
*cfg
, u32
*ndm_len
,
2783 *dm_len
+= FCPORT_STATS_DMA_SZ
;
2787 bfa_fcport_qresume(void *cbarg
)
2789 struct bfa_fcport_s
*fcport
= cbarg
;
2791 bfa_sm_send_event(fcport
, BFA_FCPORT_SM_QRESUME
);
2795 bfa_fcport_mem_claim(struct bfa_fcport_s
*fcport
, struct bfa_meminfo_s
*meminfo
)
2800 dm_kva
= bfa_meminfo_dma_virt(meminfo
);
2801 dm_pa
= bfa_meminfo_dma_phys(meminfo
);
2803 fcport
->stats_kva
= dm_kva
;
2804 fcport
->stats_pa
= dm_pa
;
2805 fcport
->stats
= (union bfa_fcport_stats_u
*) dm_kva
;
2807 dm_kva
+= FCPORT_STATS_DMA_SZ
;
2808 dm_pa
+= FCPORT_STATS_DMA_SZ
;
2810 bfa_meminfo_dma_virt(meminfo
) = dm_kva
;
2811 bfa_meminfo_dma_phys(meminfo
) = dm_pa
;
2815 * Memory initialization.
2818 bfa_fcport_attach(struct bfa_s
*bfa
, void *bfad
, struct bfa_iocfc_cfg_s
*cfg
,
2819 struct bfa_meminfo_s
*meminfo
, struct bfa_pcidev_s
*pcidev
)
2821 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(bfa
);
2822 struct bfa_port_cfg_s
*port_cfg
= &fcport
->cfg
;
2823 struct bfa_fcport_ln_s
*ln
= &fcport
->ln
;
2826 memset(fcport
, 0, sizeof(struct bfa_fcport_s
));
2828 ln
->fcport
= fcport
;
2830 bfa_fcport_mem_claim(fcport
, meminfo
);
2832 bfa_sm_set_state(fcport
, bfa_fcport_sm_uninit
);
2833 bfa_sm_set_state(ln
, bfa_fcport_ln_sm_dn
);
2836 * initialize time stamp for stats reset
2838 do_gettimeofday(&tv
);
2839 fcport
->stats_reset_time
= tv
.tv_sec
;
2842 * initialize and set default configuration
2844 port_cfg
->topology
= BFA_PORT_TOPOLOGY_P2P
;
2845 port_cfg
->speed
= BFA_PORT_SPEED_AUTO
;
2846 port_cfg
->trunked
= BFA_FALSE
;
2847 port_cfg
->maxfrsize
= 0;
2849 port_cfg
->trl_def_speed
= BFA_PORT_SPEED_1GBPS
;
2851 bfa_reqq_winit(&fcport
->reqq_wait
, bfa_fcport_qresume
, fcport
);
2855 bfa_fcport_detach(struct bfa_s
*bfa
)
2860 * Called when IOC is ready.
2863 bfa_fcport_start(struct bfa_s
*bfa
)
2865 bfa_sm_send_event(BFA_FCPORT_MOD(bfa
), BFA_FCPORT_SM_START
);
2869 * Called before IOC is stopped.
2872 bfa_fcport_stop(struct bfa_s
*bfa
)
2874 bfa_sm_send_event(BFA_FCPORT_MOD(bfa
), BFA_FCPORT_SM_STOP
);
2875 bfa_trunk_iocdisable(bfa
);
2879 * Called when IOC failure is detected.
2882 bfa_fcport_iocdisable(struct bfa_s
*bfa
)
2884 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(bfa
);
2886 bfa_sm_send_event(fcport
, BFA_FCPORT_SM_HWFAIL
);
2887 bfa_trunk_iocdisable(bfa
);
2891 bfa_fcport_update_linkinfo(struct bfa_fcport_s
*fcport
)
2893 struct bfi_fcport_event_s
*pevent
= fcport
->event_arg
.i2hmsg
.event
;
2894 struct bfa_fcport_trunk_s
*trunk
= &fcport
->trunk
;
2896 fcport
->speed
= pevent
->link_state
.speed
;
2897 fcport
->topology
= pevent
->link_state
.topology
;
2899 if (fcport
->topology
== BFA_PORT_TOPOLOGY_LOOP
)
2903 fcport
->qos_attr
= pevent
->link_state
.qos_attr
;
2904 fcport
->qos_vc_attr
= pevent
->link_state
.vc_fcf
.qos_vc_attr
;
2907 * update trunk state if applicable
2909 if (!fcport
->cfg
.trunked
)
2910 trunk
->attr
.state
= BFA_TRUNK_DISABLED
;
2912 /* update FCoE specific */
2913 fcport
->fcoe_vlan
= be16_to_cpu(pevent
->link_state
.vc_fcf
.fcf
.vlan
);
2915 bfa_trc(fcport
->bfa
, fcport
->speed
);
2916 bfa_trc(fcport
->bfa
, fcport
->topology
);
2920 bfa_fcport_reset_linkinfo(struct bfa_fcport_s
*fcport
)
2922 fcport
->speed
= BFA_PORT_SPEED_UNKNOWN
;
2923 fcport
->topology
= BFA_PORT_TOPOLOGY_NONE
;
2927 * Send port enable message to firmware.
2929 static bfa_boolean_t
2930 bfa_fcport_send_enable(struct bfa_fcport_s
*fcport
)
2932 struct bfi_fcport_enable_req_s
*m
;
2935 * Increment message tag before queue check, so that responses to old
2936 * requests are discarded.
2941 * check for room in queue to send request now
2943 m
= bfa_reqq_next(fcport
->bfa
, BFA_REQQ_PORT
);
2945 bfa_reqq_wait(fcport
->bfa
, BFA_REQQ_PORT
,
2946 &fcport
->reqq_wait
);
2950 bfi_h2i_set(m
->mh
, BFI_MC_FCPORT
, BFI_FCPORT_H2I_ENABLE_REQ
,
2951 bfa_lpuid(fcport
->bfa
));
2952 m
->nwwn
= fcport
->nwwn
;
2953 m
->pwwn
= fcport
->pwwn
;
2954 m
->port_cfg
= fcport
->cfg
;
2955 m
->msgtag
= fcport
->msgtag
;
2956 m
->port_cfg
.maxfrsize
= cpu_to_be16(fcport
->cfg
.maxfrsize
);
2957 m
->use_flash_cfg
= fcport
->use_flash_cfg
;
2958 bfa_dma_be_addr_set(m
->stats_dma_addr
, fcport
->stats_pa
);
2959 bfa_trc(fcport
->bfa
, m
->stats_dma_addr
.a32
.addr_lo
);
2960 bfa_trc(fcport
->bfa
, m
->stats_dma_addr
.a32
.addr_hi
);
2963 * queue I/O message to firmware
2965 bfa_reqq_produce(fcport
->bfa
, BFA_REQQ_PORT
);
2970 * Send port disable message to firmware.
2972 static bfa_boolean_t
2973 bfa_fcport_send_disable(struct bfa_fcport_s
*fcport
)
2975 struct bfi_fcport_req_s
*m
;
2978 * Increment message tag before queue check, so that responses to old
2979 * requests are discarded.
2984 * check for room in queue to send request now
2986 m
= bfa_reqq_next(fcport
->bfa
, BFA_REQQ_PORT
);
2988 bfa_reqq_wait(fcport
->bfa
, BFA_REQQ_PORT
,
2989 &fcport
->reqq_wait
);
2993 bfi_h2i_set(m
->mh
, BFI_MC_FCPORT
, BFI_FCPORT_H2I_DISABLE_REQ
,
2994 bfa_lpuid(fcport
->bfa
));
2995 m
->msgtag
= fcport
->msgtag
;
2998 * queue I/O message to firmware
3000 bfa_reqq_produce(fcport
->bfa
, BFA_REQQ_PORT
);
3006 bfa_fcport_set_wwns(struct bfa_fcport_s
*fcport
)
3008 fcport
->pwwn
= fcport
->bfa
->ioc
.attr
->pwwn
;
3009 fcport
->nwwn
= fcport
->bfa
->ioc
.attr
->nwwn
;
3011 bfa_trc(fcport
->bfa
, fcport
->pwwn
);
3012 bfa_trc(fcport
->bfa
, fcport
->nwwn
);
3016 bfa_fcport_send_txcredit(void *port_cbarg
)
3019 struct bfa_fcport_s
*fcport
= port_cbarg
;
3020 struct bfi_fcport_set_svc_params_req_s
*m
;
3023 * check for room in queue to send request now
3025 m
= bfa_reqq_next(fcport
->bfa
, BFA_REQQ_PORT
);
3027 bfa_trc(fcport
->bfa
, fcport
->cfg
.tx_bbcredit
);
3031 bfi_h2i_set(m
->mh
, BFI_MC_FCPORT
, BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ
,
3032 bfa_lpuid(fcport
->bfa
));
3033 m
->tx_bbcredit
= cpu_to_be16((u16
)fcport
->cfg
.tx_bbcredit
);
3036 * queue I/O message to firmware
3038 bfa_reqq_produce(fcport
->bfa
, BFA_REQQ_PORT
);
3042 bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s
*d
,
3043 struct bfa_qos_stats_s
*s
)
3045 u32
*dip
= (u32
*) d
;
3046 __be32
*sip
= (__be32
*) s
;
3049 /* Now swap the 32 bit fields */
3050 for (i
= 0; i
< (sizeof(struct bfa_qos_stats_s
)/sizeof(u32
)); ++i
)
3051 dip
[i
] = be32_to_cpu(sip
[i
]);
3055 bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s
*d
,
3056 struct bfa_fcoe_stats_s
*s
)
3058 u32
*dip
= (u32
*) d
;
3059 __be32
*sip
= (__be32
*) s
;
3062 for (i
= 0; i
< ((sizeof(struct bfa_fcoe_stats_s
))/sizeof(u32
));
3065 dip
[i
] = be32_to_cpu(sip
[i
]);
3066 dip
[i
+ 1] = be32_to_cpu(sip
[i
+ 1]);
3068 dip
[i
] = be32_to_cpu(sip
[i
+ 1]);
3069 dip
[i
+ 1] = be32_to_cpu(sip
[i
]);
3075 __bfa_cb_fcport_stats_get(void *cbarg
, bfa_boolean_t complete
)
3077 struct bfa_fcport_s
*fcport
= cbarg
;
3080 if (fcport
->stats_status
== BFA_STATUS_OK
) {
3083 /* Swap FC QoS or FCoE stats */
3084 if (bfa_ioc_get_fcmode(&fcport
->bfa
->ioc
)) {
3085 bfa_fcport_qos_stats_swap(
3086 &fcport
->stats_ret
->fcqos
,
3087 &fcport
->stats
->fcqos
);
3089 bfa_fcport_fcoe_stats_swap(
3090 &fcport
->stats_ret
->fcoe
,
3091 &fcport
->stats
->fcoe
);
3093 do_gettimeofday(&tv
);
3094 fcport
->stats_ret
->fcoe
.secs_reset
=
3095 tv
.tv_sec
- fcport
->stats_reset_time
;
3098 fcport
->stats_cbfn(fcport
->stats_cbarg
, fcport
->stats_status
);
3100 fcport
->stats_busy
= BFA_FALSE
;
3101 fcport
->stats_status
= BFA_STATUS_OK
;
3106 bfa_fcport_stats_get_timeout(void *cbarg
)
3108 struct bfa_fcport_s
*fcport
= (struct bfa_fcport_s
*) cbarg
;
3110 bfa_trc(fcport
->bfa
, fcport
->stats_qfull
);
3112 if (fcport
->stats_qfull
) {
3113 bfa_reqq_wcancel(&fcport
->stats_reqq_wait
);
3114 fcport
->stats_qfull
= BFA_FALSE
;
3117 fcport
->stats_status
= BFA_STATUS_ETIMER
;
3118 bfa_cb_queue(fcport
->bfa
, &fcport
->hcb_qe
, __bfa_cb_fcport_stats_get
,
3123 bfa_fcport_send_stats_get(void *cbarg
)
3125 struct bfa_fcport_s
*fcport
= (struct bfa_fcport_s
*) cbarg
;
3126 struct bfi_fcport_req_s
*msg
;
3128 msg
= bfa_reqq_next(fcport
->bfa
, BFA_REQQ_PORT
);
3131 fcport
->stats_qfull
= BFA_TRUE
;
3132 bfa_reqq_winit(&fcport
->stats_reqq_wait
,
3133 bfa_fcport_send_stats_get
, fcport
);
3134 bfa_reqq_wait(fcport
->bfa
, BFA_REQQ_PORT
,
3135 &fcport
->stats_reqq_wait
);
3138 fcport
->stats_qfull
= BFA_FALSE
;
3140 memset(msg
, 0, sizeof(struct bfi_fcport_req_s
));
3141 bfi_h2i_set(msg
->mh
, BFI_MC_FCPORT
, BFI_FCPORT_H2I_STATS_GET_REQ
,
3142 bfa_lpuid(fcport
->bfa
));
3143 bfa_reqq_produce(fcport
->bfa
, BFA_REQQ_PORT
);
3147 __bfa_cb_fcport_stats_clr(void *cbarg
, bfa_boolean_t complete
)
3149 struct bfa_fcport_s
*fcport
= cbarg
;
3155 * re-initialize time stamp for stats reset
3157 do_gettimeofday(&tv
);
3158 fcport
->stats_reset_time
= tv
.tv_sec
;
3160 fcport
->stats_cbfn(fcport
->stats_cbarg
, fcport
->stats_status
);
3162 fcport
->stats_busy
= BFA_FALSE
;
3163 fcport
->stats_status
= BFA_STATUS_OK
;
3168 bfa_fcport_stats_clr_timeout(void *cbarg
)
3170 struct bfa_fcport_s
*fcport
= (struct bfa_fcport_s
*) cbarg
;
3172 bfa_trc(fcport
->bfa
, fcport
->stats_qfull
);
3174 if (fcport
->stats_qfull
) {
3175 bfa_reqq_wcancel(&fcport
->stats_reqq_wait
);
3176 fcport
->stats_qfull
= BFA_FALSE
;
3179 fcport
->stats_status
= BFA_STATUS_ETIMER
;
3180 bfa_cb_queue(fcport
->bfa
, &fcport
->hcb_qe
,
3181 __bfa_cb_fcport_stats_clr
, fcport
);
3185 bfa_fcport_send_stats_clear(void *cbarg
)
3187 struct bfa_fcport_s
*fcport
= (struct bfa_fcport_s
*) cbarg
;
3188 struct bfi_fcport_req_s
*msg
;
3190 msg
= bfa_reqq_next(fcport
->bfa
, BFA_REQQ_PORT
);
3193 fcport
->stats_qfull
= BFA_TRUE
;
3194 bfa_reqq_winit(&fcport
->stats_reqq_wait
,
3195 bfa_fcport_send_stats_clear
, fcport
);
3196 bfa_reqq_wait(fcport
->bfa
, BFA_REQQ_PORT
,
3197 &fcport
->stats_reqq_wait
);
3200 fcport
->stats_qfull
= BFA_FALSE
;
3202 memset(msg
, 0, sizeof(struct bfi_fcport_req_s
));
3203 bfi_h2i_set(msg
->mh
, BFI_MC_FCPORT
, BFI_FCPORT_H2I_STATS_CLEAR_REQ
,
3204 bfa_lpuid(fcport
->bfa
));
3205 bfa_reqq_produce(fcport
->bfa
, BFA_REQQ_PORT
);
3209 * Handle trunk SCN event from firmware.
3212 bfa_trunk_scn(struct bfa_fcport_s
*fcport
, struct bfi_fcport_trunk_scn_s
*scn
)
3214 struct bfa_fcport_trunk_s
*trunk
= &fcport
->trunk
;
3215 struct bfi_fcport_trunk_link_s
*tlink
;
3216 struct bfa_trunk_link_attr_s
*lattr
;
3217 enum bfa_trunk_state state_prev
;
3221 bfa_trc(fcport
->bfa
, fcport
->cfg
.trunked
);
3222 WARN_ON(scn
->trunk_state
!= BFA_TRUNK_ONLINE
&&
3223 scn
->trunk_state
!= BFA_TRUNK_OFFLINE
);
3225 bfa_trc(fcport
->bfa
, trunk
->attr
.state
);
3226 bfa_trc(fcport
->bfa
, scn
->trunk_state
);
3227 bfa_trc(fcport
->bfa
, scn
->trunk_speed
);
3230 * Save off new state for trunk attribute query
3232 state_prev
= trunk
->attr
.state
;
3233 if (fcport
->cfg
.trunked
&& (trunk
->attr
.state
!= BFA_TRUNK_DISABLED
))
3234 trunk
->attr
.state
= scn
->trunk_state
;
3235 trunk
->attr
.speed
= scn
->trunk_speed
;
3236 for (i
= 0; i
< BFA_TRUNK_MAX_PORTS
; i
++) {
3237 lattr
= &trunk
->attr
.link_attr
[i
];
3238 tlink
= &scn
->tlink
[i
];
3240 lattr
->link_state
= tlink
->state
;
3241 lattr
->trunk_wwn
= tlink
->trunk_wwn
;
3242 lattr
->fctl
= tlink
->fctl
;
3243 lattr
->speed
= tlink
->speed
;
3244 lattr
->deskew
= be32_to_cpu(tlink
->deskew
);
3246 if (tlink
->state
== BFA_TRUNK_LINK_STATE_UP
) {
3247 fcport
->speed
= tlink
->speed
;
3248 fcport
->topology
= BFA_PORT_TOPOLOGY_P2P
;
3252 bfa_trc(fcport
->bfa
, lattr
->link_state
);
3253 bfa_trc(fcport
->bfa
, lattr
->trunk_wwn
);
3254 bfa_trc(fcport
->bfa
, lattr
->fctl
);
3255 bfa_trc(fcport
->bfa
, lattr
->speed
);
3256 bfa_trc(fcport
->bfa
, lattr
->deskew
);
3261 bfa_plog_str(fcport
->bfa
->plog
, BFA_PL_MID_HAL
,
3262 BFA_PL_EID_TRUNK_SCN
, 0, "Trunk up(0,1)");
3265 bfa_plog_str(fcport
->bfa
->plog
, BFA_PL_MID_HAL
,
3266 BFA_PL_EID_TRUNK_SCN
, 0, "Trunk up(-,1)");
3269 bfa_plog_str(fcport
->bfa
->plog
, BFA_PL_MID_HAL
,
3270 BFA_PL_EID_TRUNK_SCN
, 0, "Trunk up(0,-)");
3273 bfa_plog_str(fcport
->bfa
->plog
, BFA_PL_MID_HAL
,
3274 BFA_PL_EID_TRUNK_SCN
, 0, "Trunk down");
3278 * Notify upper layers if trunk state changed.
3280 if ((state_prev
!= trunk
->attr
.state
) ||
3281 (scn
->trunk_state
== BFA_TRUNK_OFFLINE
)) {
3282 bfa_fcport_scn(fcport
, (scn
->trunk_state
== BFA_TRUNK_ONLINE
) ?
3283 BFA_PORT_LINKUP
: BFA_PORT_LINKDOWN
, BFA_TRUE
);
3288 bfa_trunk_iocdisable(struct bfa_s
*bfa
)
3290 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(bfa
);
3294 * In trunked mode, notify upper layers that link is down
3296 if (fcport
->cfg
.trunked
) {
3297 if (fcport
->trunk
.attr
.state
== BFA_TRUNK_ONLINE
)
3298 bfa_fcport_scn(fcport
, BFA_PORT_LINKDOWN
, BFA_TRUE
);
3300 fcport
->trunk
.attr
.state
= BFA_TRUNK_OFFLINE
;
3301 fcport
->trunk
.attr
.speed
= BFA_PORT_SPEED_UNKNOWN
;
3302 for (i
= 0; i
< BFA_TRUNK_MAX_PORTS
; i
++) {
3303 fcport
->trunk
.attr
.link_attr
[i
].trunk_wwn
= 0;
3304 fcport
->trunk
.attr
.link_attr
[i
].fctl
=
3305 BFA_TRUNK_LINK_FCTL_NORMAL
;
3306 fcport
->trunk
.attr
.link_attr
[i
].link_state
=
3307 BFA_TRUNK_LINK_STATE_DN_LINKDN
;
3308 fcport
->trunk
.attr
.link_attr
[i
].speed
=
3309 BFA_PORT_SPEED_UNKNOWN
;
3310 fcport
->trunk
.attr
.link_attr
[i
].deskew
= 0;
3316 * Called to initialize port attributes
3319 bfa_fcport_init(struct bfa_s
*bfa
)
3321 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(bfa
);
3324 * Initialize port attributes from IOC hardware data.
3326 bfa_fcport_set_wwns(fcport
);
3327 if (fcport
->cfg
.maxfrsize
== 0)
3328 fcport
->cfg
.maxfrsize
= bfa_ioc_maxfrsize(&bfa
->ioc
);
3329 fcport
->cfg
.rx_bbcredit
= bfa_ioc_rx_bbcredit(&bfa
->ioc
);
3330 fcport
->speed_sup
= bfa_ioc_speed_sup(&bfa
->ioc
);
3332 WARN_ON(!fcport
->cfg
.maxfrsize
);
3333 WARN_ON(!fcport
->cfg
.rx_bbcredit
);
3334 WARN_ON(!fcport
->speed_sup
);
3338 * Firmware message handler.
3341 bfa_fcport_isr(struct bfa_s
*bfa
, struct bfi_msg_s
*msg
)
3343 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(bfa
);
3344 union bfi_fcport_i2h_msg_u i2hmsg
;
3347 fcport
->event_arg
.i2hmsg
= i2hmsg
;
3349 bfa_trc(bfa
, msg
->mhdr
.msg_id
);
3350 bfa_trc(bfa
, bfa_sm_to_state(hal_port_sm_table
, fcport
->sm
));
3352 switch (msg
->mhdr
.msg_id
) {
3353 case BFI_FCPORT_I2H_ENABLE_RSP
:
3354 if (fcport
->msgtag
== i2hmsg
.penable_rsp
->msgtag
) {
3356 if (fcport
->use_flash_cfg
) {
3357 fcport
->cfg
= i2hmsg
.penable_rsp
->port_cfg
;
3358 fcport
->cfg
.maxfrsize
=
3359 cpu_to_be16(fcport
->cfg
.maxfrsize
);
3360 fcport
->cfg
.path_tov
=
3361 cpu_to_be16(fcport
->cfg
.path_tov
);
3362 fcport
->cfg
.q_depth
=
3363 cpu_to_be16(fcport
->cfg
.q_depth
);
3365 if (fcport
->cfg
.trunked
)
3366 fcport
->trunk
.attr
.state
=
3369 fcport
->trunk
.attr
.state
=
3371 fcport
->use_flash_cfg
= BFA_FALSE
;
3374 bfa_sm_send_event(fcport
, BFA_FCPORT_SM_FWRSP
);
3378 case BFI_FCPORT_I2H_DISABLE_RSP
:
3379 if (fcport
->msgtag
== i2hmsg
.penable_rsp
->msgtag
)
3380 bfa_sm_send_event(fcport
, BFA_FCPORT_SM_FWRSP
);
3383 case BFI_FCPORT_I2H_EVENT
:
3384 if (i2hmsg
.event
->link_state
.linkstate
== BFA_PORT_LINKUP
)
3385 bfa_sm_send_event(fcport
, BFA_FCPORT_SM_LINKUP
);
3387 bfa_sm_send_event(fcport
, BFA_FCPORT_SM_LINKDOWN
);
3390 case BFI_FCPORT_I2H_TRUNK_SCN
:
3391 bfa_trunk_scn(fcport
, i2hmsg
.trunk_scn
);
3394 case BFI_FCPORT_I2H_STATS_GET_RSP
:
3396 * check for timer pop before processing the rsp
3398 if (fcport
->stats_busy
== BFA_FALSE
||
3399 fcport
->stats_status
== BFA_STATUS_ETIMER
)
3402 bfa_timer_stop(&fcport
->timer
);
3403 fcport
->stats_status
= i2hmsg
.pstatsget_rsp
->status
;
3404 bfa_cb_queue(fcport
->bfa
, &fcport
->hcb_qe
,
3405 __bfa_cb_fcport_stats_get
, fcport
);
3408 case BFI_FCPORT_I2H_STATS_CLEAR_RSP
:
3410 * check for timer pop before processing the rsp
3412 if (fcport
->stats_busy
== BFA_FALSE
||
3413 fcport
->stats_status
== BFA_STATUS_ETIMER
)
3416 bfa_timer_stop(&fcport
->timer
);
3417 fcport
->stats_status
= BFA_STATUS_OK
;
3418 bfa_cb_queue(fcport
->bfa
, &fcport
->hcb_qe
,
3419 __bfa_cb_fcport_stats_clr
, fcport
);
3422 case BFI_FCPORT_I2H_ENABLE_AEN
:
3423 bfa_sm_send_event(fcport
, BFA_FCPORT_SM_ENABLE
);
3426 case BFI_FCPORT_I2H_DISABLE_AEN
:
3427 bfa_sm_send_event(fcport
, BFA_FCPORT_SM_DISABLE
);
3437 * Registered callback for port events.
3440 bfa_fcport_event_register(struct bfa_s
*bfa
,
3441 void (*cbfn
) (void *cbarg
,
3442 enum bfa_port_linkstate event
),
3445 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(bfa
);
3447 fcport
->event_cbfn
= cbfn
;
3448 fcport
->event_cbarg
= cbarg
;
3452 bfa_fcport_enable(struct bfa_s
*bfa
)
3454 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(bfa
);
3456 if (bfa_ioc_is_disabled(&bfa
->ioc
))
3457 return BFA_STATUS_IOC_DISABLED
;
3459 if (fcport
->diag_busy
)
3460 return BFA_STATUS_DIAG_BUSY
;
3462 bfa_sm_send_event(BFA_FCPORT_MOD(bfa
), BFA_FCPORT_SM_ENABLE
);
3463 return BFA_STATUS_OK
;
3467 bfa_fcport_disable(struct bfa_s
*bfa
)
3470 if (bfa_ioc_is_disabled(&bfa
->ioc
))
3471 return BFA_STATUS_IOC_DISABLED
;
3473 bfa_sm_send_event(BFA_FCPORT_MOD(bfa
), BFA_FCPORT_SM_DISABLE
);
3474 return BFA_STATUS_OK
;
3478 * Configure port speed.
3481 bfa_fcport_cfg_speed(struct bfa_s
*bfa
, enum bfa_port_speed speed
)
3483 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(bfa
);
3485 bfa_trc(bfa
, speed
);
3487 if (fcport
->cfg
.trunked
== BFA_TRUE
)
3488 return BFA_STATUS_TRUNK_ENABLED
;
3489 if ((speed
!= BFA_PORT_SPEED_AUTO
) && (speed
> fcport
->speed_sup
)) {
3490 bfa_trc(bfa
, fcport
->speed_sup
);
3491 return BFA_STATUS_UNSUPP_SPEED
;
3494 fcport
->cfg
.speed
= speed
;
3496 return BFA_STATUS_OK
;
3500 * Get current speed.
3503 bfa_fcport_get_speed(struct bfa_s
*bfa
)
3505 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(bfa
);
3507 return fcport
->speed
;
3511 * Configure port topology.
3514 bfa_fcport_cfg_topology(struct bfa_s
*bfa
, enum bfa_port_topology topology
)
3516 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(bfa
);
3518 bfa_trc(bfa
, topology
);
3519 bfa_trc(bfa
, fcport
->cfg
.topology
);
3522 case BFA_PORT_TOPOLOGY_P2P
:
3523 case BFA_PORT_TOPOLOGY_LOOP
:
3524 case BFA_PORT_TOPOLOGY_AUTO
:
3528 return BFA_STATUS_EINVAL
;
3531 fcport
->cfg
.topology
= topology
;
3532 return BFA_STATUS_OK
;
3536 * Get current topology.
3538 enum bfa_port_topology
3539 bfa_fcport_get_topology(struct bfa_s
*bfa
)
3541 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(bfa
);
3543 return fcport
->topology
;
3547 bfa_fcport_cfg_hardalpa(struct bfa_s
*bfa
, u8 alpa
)
3549 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(bfa
);
3552 bfa_trc(bfa
, fcport
->cfg
.cfg_hardalpa
);
3553 bfa_trc(bfa
, fcport
->cfg
.hardalpa
);
3555 fcport
->cfg
.cfg_hardalpa
= BFA_TRUE
;
3556 fcport
->cfg
.hardalpa
= alpa
;
3558 return BFA_STATUS_OK
;
3562 bfa_fcport_clr_hardalpa(struct bfa_s
*bfa
)
3564 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(bfa
);
3566 bfa_trc(bfa
, fcport
->cfg
.cfg_hardalpa
);
3567 bfa_trc(bfa
, fcport
->cfg
.hardalpa
);
3569 fcport
->cfg
.cfg_hardalpa
= BFA_FALSE
;
3570 return BFA_STATUS_OK
;
3574 bfa_fcport_get_hardalpa(struct bfa_s
*bfa
, u8
*alpa
)
3576 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(bfa
);
3578 *alpa
= fcport
->cfg
.hardalpa
;
3579 return fcport
->cfg
.cfg_hardalpa
;
3583 bfa_fcport_get_myalpa(struct bfa_s
*bfa
)
3585 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(bfa
);
3587 return fcport
->myalpa
;
3591 bfa_fcport_cfg_maxfrsize(struct bfa_s
*bfa
, u16 maxfrsize
)
3593 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(bfa
);
3595 bfa_trc(bfa
, maxfrsize
);
3596 bfa_trc(bfa
, fcport
->cfg
.maxfrsize
);
3599 if ((maxfrsize
> FC_MAX_PDUSZ
) || (maxfrsize
< FC_MIN_PDUSZ
))
3600 return BFA_STATUS_INVLD_DFSZ
;
3602 /* power of 2, if not the max frame size of 2112 */
3603 if ((maxfrsize
!= FC_MAX_PDUSZ
) && (maxfrsize
& (maxfrsize
- 1)))
3604 return BFA_STATUS_INVLD_DFSZ
;
3606 fcport
->cfg
.maxfrsize
= maxfrsize
;
3607 return BFA_STATUS_OK
;
3611 bfa_fcport_get_maxfrsize(struct bfa_s
*bfa
)
3613 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(bfa
);
3615 return fcport
->cfg
.maxfrsize
;
3619 bfa_fcport_get_rx_bbcredit(struct bfa_s
*bfa
)
3621 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(bfa
);
3623 return fcport
->cfg
.rx_bbcredit
;
3627 bfa_fcport_set_tx_bbcredit(struct bfa_s
*bfa
, u16 tx_bbcredit
)
3629 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(bfa
);
3631 fcport
->cfg
.tx_bbcredit
= (u8
)tx_bbcredit
;
3632 bfa_fcport_send_txcredit(fcport
);
3636 * Get port attributes.
3640 bfa_fcport_get_wwn(struct bfa_s
*bfa
, bfa_boolean_t node
)
3642 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(bfa
);
3644 return fcport
->nwwn
;
3646 return fcport
->pwwn
;
3650 bfa_fcport_get_attr(struct bfa_s
*bfa
, struct bfa_port_attr_s
*attr
)
3652 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(bfa
);
3654 memset(attr
, 0, sizeof(struct bfa_port_attr_s
));
3656 attr
->nwwn
= fcport
->nwwn
;
3657 attr
->pwwn
= fcport
->pwwn
;
3659 attr
->factorypwwn
= bfa
->ioc
.attr
->mfg_pwwn
;
3660 attr
->factorynwwn
= bfa
->ioc
.attr
->mfg_nwwn
;
3662 memcpy(&attr
->pport_cfg
, &fcport
->cfg
,
3663 sizeof(struct bfa_port_cfg_s
));
3664 /* speed attributes */
3665 attr
->pport_cfg
.speed
= fcport
->cfg
.speed
;
3666 attr
->speed_supported
= fcport
->speed_sup
;
3667 attr
->speed
= fcport
->speed
;
3668 attr
->cos_supported
= FC_CLASS_3
;
3670 /* topology attributes */
3671 attr
->pport_cfg
.topology
= fcport
->cfg
.topology
;
3672 attr
->topology
= fcport
->topology
;
3673 attr
->pport_cfg
.trunked
= fcport
->cfg
.trunked
;
3675 /* beacon attributes */
3676 attr
->beacon
= fcport
->beacon
;
3677 attr
->link_e2e_beacon
= fcport
->link_e2e_beacon
;
3678 attr
->plog_enabled
= (bfa_boolean_t
)fcport
->bfa
->plog
->plog_enabled
;
3679 attr
->io_profile
= bfa_fcpim_get_io_profile(fcport
->bfa
);
3681 attr
->pport_cfg
.path_tov
= bfa_fcpim_path_tov_get(bfa
);
3682 attr
->pport_cfg
.q_depth
= bfa_fcpim_qdepth_get(bfa
);
3683 attr
->port_state
= bfa_sm_to_state(hal_port_sm_table
, fcport
->sm
);
3684 if (bfa_ioc_is_disabled(&fcport
->bfa
->ioc
))
3685 attr
->port_state
= BFA_PORT_ST_IOCDIS
;
3686 else if (bfa_ioc_fw_mismatch(&fcport
->bfa
->ioc
))
3687 attr
->port_state
= BFA_PORT_ST_FWMISMATCH
;
3690 attr
->fcoe_vlan
= fcport
->fcoe_vlan
;
3693 #define BFA_FCPORT_STATS_TOV 1000
3696 * Fetch port statistics (FCQoS or FCoE).
3699 bfa_fcport_get_stats(struct bfa_s
*bfa
, union bfa_fcport_stats_u
*stats
,
3700 bfa_cb_port_t cbfn
, void *cbarg
)
3702 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(bfa
);
3704 if (fcport
->stats_busy
) {
3705 bfa_trc(bfa
, fcport
->stats_busy
);
3706 return BFA_STATUS_DEVBUSY
;
3709 fcport
->stats_busy
= BFA_TRUE
;
3710 fcport
->stats_ret
= stats
;
3711 fcport
->stats_cbfn
= cbfn
;
3712 fcport
->stats_cbarg
= cbarg
;
3714 bfa_fcport_send_stats_get(fcport
);
3716 bfa_timer_start(bfa
, &fcport
->timer
, bfa_fcport_stats_get_timeout
,
3717 fcport
, BFA_FCPORT_STATS_TOV
);
3718 return BFA_STATUS_OK
;
3722 * Reset port statistics (FCQoS or FCoE).
3725 bfa_fcport_clear_stats(struct bfa_s
*bfa
, bfa_cb_port_t cbfn
, void *cbarg
)
3727 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(bfa
);
3729 if (fcport
->stats_busy
) {
3730 bfa_trc(bfa
, fcport
->stats_busy
);
3731 return BFA_STATUS_DEVBUSY
;
3734 fcport
->stats_busy
= BFA_TRUE
;
3735 fcport
->stats_cbfn
= cbfn
;
3736 fcport
->stats_cbarg
= cbarg
;
3738 bfa_fcport_send_stats_clear(fcport
);
3740 bfa_timer_start(bfa
, &fcport
->timer
, bfa_fcport_stats_clr_timeout
,
3741 fcport
, BFA_FCPORT_STATS_TOV
);
3742 return BFA_STATUS_OK
;
3747 * Fetch port attributes.
3750 bfa_fcport_is_disabled(struct bfa_s
*bfa
)
3752 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(bfa
);
3754 return bfa_sm_to_state(hal_port_sm_table
, fcport
->sm
) ==
3755 BFA_PORT_ST_DISABLED
;
3760 bfa_fcport_is_ratelim(struct bfa_s
*bfa
)
3762 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(bfa
);
3764 return fcport
->cfg
.ratelimit
? BFA_TRUE
: BFA_FALSE
;
3769 * Get default minimum ratelim speed
3772 bfa_fcport_get_ratelim_speed(struct bfa_s
*bfa
)
3774 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(bfa
);
3776 bfa_trc(bfa
, fcport
->cfg
.trl_def_speed
);
3777 return fcport
->cfg
.trl_def_speed
;
3782 bfa_fcport_is_linkup(struct bfa_s
*bfa
)
3784 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(bfa
);
3786 return (!fcport
->cfg
.trunked
&&
3787 bfa_sm_cmp_state(fcport
, bfa_fcport_sm_linkup
)) ||
3788 (fcport
->cfg
.trunked
&&
3789 fcport
->trunk
.attr
.state
== BFA_TRUNK_ONLINE
);
3793 bfa_fcport_is_qos_enabled(struct bfa_s
*bfa
)
3795 struct bfa_fcport_s
*fcport
= BFA_FCPORT_MOD(bfa
);
3797 return fcport
->cfg
.qos_enabled
;
3801 * Rport State machine functions
3804 * Beginning state, only online event expected.
3807 bfa_rport_sm_uninit(struct bfa_rport_s
*rp
, enum bfa_rport_event event
)
3809 bfa_trc(rp
->bfa
, rp
->rport_tag
);
3810 bfa_trc(rp
->bfa
, event
);
3813 case BFA_RPORT_SM_CREATE
:
3814 bfa_stats(rp
, sm_un_cr
);
3815 bfa_sm_set_state(rp
, bfa_rport_sm_created
);
3819 bfa_stats(rp
, sm_un_unexp
);
3820 bfa_sm_fault(rp
->bfa
, event
);
3825 bfa_rport_sm_created(struct bfa_rport_s
*rp
, enum bfa_rport_event event
)
3827 bfa_trc(rp
->bfa
, rp
->rport_tag
);
3828 bfa_trc(rp
->bfa
, event
);
3831 case BFA_RPORT_SM_ONLINE
:
3832 bfa_stats(rp
, sm_cr_on
);
3833 if (bfa_rport_send_fwcreate(rp
))
3834 bfa_sm_set_state(rp
, bfa_rport_sm_fwcreate
);
3836 bfa_sm_set_state(rp
, bfa_rport_sm_fwcreate_qfull
);
3839 case BFA_RPORT_SM_DELETE
:
3840 bfa_stats(rp
, sm_cr_del
);
3841 bfa_sm_set_state(rp
, bfa_rport_sm_uninit
);
3845 case BFA_RPORT_SM_HWFAIL
:
3846 bfa_stats(rp
, sm_cr_hwf
);
3847 bfa_sm_set_state(rp
, bfa_rport_sm_iocdisable
);
3851 bfa_stats(rp
, sm_cr_unexp
);
3852 bfa_sm_fault(rp
->bfa
, event
);
3857 * Waiting for rport create response from firmware.
3860 bfa_rport_sm_fwcreate(struct bfa_rport_s
*rp
, enum bfa_rport_event event
)
3862 bfa_trc(rp
->bfa
, rp
->rport_tag
);
3863 bfa_trc(rp
->bfa
, event
);
3866 case BFA_RPORT_SM_FWRSP
:
3867 bfa_stats(rp
, sm_fwc_rsp
);
3868 bfa_sm_set_state(rp
, bfa_rport_sm_online
);
3869 bfa_rport_online_cb(rp
);
3872 case BFA_RPORT_SM_DELETE
:
3873 bfa_stats(rp
, sm_fwc_del
);
3874 bfa_sm_set_state(rp
, bfa_rport_sm_delete_pending
);
3877 case BFA_RPORT_SM_OFFLINE
:
3878 bfa_stats(rp
, sm_fwc_off
);
3879 bfa_sm_set_state(rp
, bfa_rport_sm_offline_pending
);
3882 case BFA_RPORT_SM_HWFAIL
:
3883 bfa_stats(rp
, sm_fwc_hwf
);
3884 bfa_sm_set_state(rp
, bfa_rport_sm_iocdisable
);
3888 bfa_stats(rp
, sm_fwc_unexp
);
3889 bfa_sm_fault(rp
->bfa
, event
);
3894 * Request queue is full, awaiting queue resume to send create request.
3897 bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s
*rp
, enum bfa_rport_event event
)
3899 bfa_trc(rp
->bfa
, rp
->rport_tag
);
3900 bfa_trc(rp
->bfa
, event
);
3903 case BFA_RPORT_SM_QRESUME
:
3904 bfa_sm_set_state(rp
, bfa_rport_sm_fwcreate
);
3905 bfa_rport_send_fwcreate(rp
);
3908 case BFA_RPORT_SM_DELETE
:
3909 bfa_stats(rp
, sm_fwc_del
);
3910 bfa_sm_set_state(rp
, bfa_rport_sm_uninit
);
3911 bfa_reqq_wcancel(&rp
->reqq_wait
);
3915 case BFA_RPORT_SM_OFFLINE
:
3916 bfa_stats(rp
, sm_fwc_off
);
3917 bfa_sm_set_state(rp
, bfa_rport_sm_offline
);
3918 bfa_reqq_wcancel(&rp
->reqq_wait
);
3919 bfa_rport_offline_cb(rp
);
3922 case BFA_RPORT_SM_HWFAIL
:
3923 bfa_stats(rp
, sm_fwc_hwf
);
3924 bfa_sm_set_state(rp
, bfa_rport_sm_iocdisable
);
3925 bfa_reqq_wcancel(&rp
->reqq_wait
);
3929 bfa_stats(rp
, sm_fwc_unexp
);
3930 bfa_sm_fault(rp
->bfa
, event
);
3935 * Online state - normal parking state.
3938 bfa_rport_sm_online(struct bfa_rport_s
*rp
, enum bfa_rport_event event
)
3940 struct bfi_rport_qos_scn_s
*qos_scn
;
3942 bfa_trc(rp
->bfa
, rp
->rport_tag
);
3943 bfa_trc(rp
->bfa
, event
);
3946 case BFA_RPORT_SM_OFFLINE
:
3947 bfa_stats(rp
, sm_on_off
);
3948 if (bfa_rport_send_fwdelete(rp
))
3949 bfa_sm_set_state(rp
, bfa_rport_sm_fwdelete
);
3951 bfa_sm_set_state(rp
, bfa_rport_sm_fwdelete_qfull
);
3954 case BFA_RPORT_SM_DELETE
:
3955 bfa_stats(rp
, sm_on_del
);
3956 if (bfa_rport_send_fwdelete(rp
))
3957 bfa_sm_set_state(rp
, bfa_rport_sm_deleting
);
3959 bfa_sm_set_state(rp
, bfa_rport_sm_deleting_qfull
);
3962 case BFA_RPORT_SM_HWFAIL
:
3963 bfa_stats(rp
, sm_on_hwf
);
3964 bfa_sm_set_state(rp
, bfa_rport_sm_iocdisable
);
3967 case BFA_RPORT_SM_SET_SPEED
:
3968 bfa_rport_send_fwspeed(rp
);
3971 case BFA_RPORT_SM_QOS_SCN
:
3972 qos_scn
= (struct bfi_rport_qos_scn_s
*) rp
->event_arg
.fw_msg
;
3973 rp
->qos_attr
= qos_scn
->new_qos_attr
;
3974 bfa_trc(rp
->bfa
, qos_scn
->old_qos_attr
.qos_flow_id
);
3975 bfa_trc(rp
->bfa
, qos_scn
->new_qos_attr
.qos_flow_id
);
3976 bfa_trc(rp
->bfa
, qos_scn
->old_qos_attr
.qos_priority
);
3977 bfa_trc(rp
->bfa
, qos_scn
->new_qos_attr
.qos_priority
);
3979 qos_scn
->old_qos_attr
.qos_flow_id
=
3980 be32_to_cpu(qos_scn
->old_qos_attr
.qos_flow_id
);
3981 qos_scn
->new_qos_attr
.qos_flow_id
=
3982 be32_to_cpu(qos_scn
->new_qos_attr
.qos_flow_id
);
3984 if (qos_scn
->old_qos_attr
.qos_flow_id
!=
3985 qos_scn
->new_qos_attr
.qos_flow_id
)
3986 bfa_cb_rport_qos_scn_flowid(rp
->rport_drv
,
3987 qos_scn
->old_qos_attr
,
3988 qos_scn
->new_qos_attr
);
3989 if (qos_scn
->old_qos_attr
.qos_priority
!=
3990 qos_scn
->new_qos_attr
.qos_priority
)
3991 bfa_cb_rport_qos_scn_prio(rp
->rport_drv
,
3992 qos_scn
->old_qos_attr
,
3993 qos_scn
->new_qos_attr
);
3997 bfa_stats(rp
, sm_on_unexp
);
3998 bfa_sm_fault(rp
->bfa
, event
);
4003 * Firmware rport is being deleted - awaiting f/w response.
4006 bfa_rport_sm_fwdelete(struct bfa_rport_s
*rp
, enum bfa_rport_event event
)
4008 bfa_trc(rp
->bfa
, rp
->rport_tag
);
4009 bfa_trc(rp
->bfa
, event
);
4012 case BFA_RPORT_SM_FWRSP
:
4013 bfa_stats(rp
, sm_fwd_rsp
);
4014 bfa_sm_set_state(rp
, bfa_rport_sm_offline
);
4015 bfa_rport_offline_cb(rp
);
4018 case BFA_RPORT_SM_DELETE
:
4019 bfa_stats(rp
, sm_fwd_del
);
4020 bfa_sm_set_state(rp
, bfa_rport_sm_deleting
);
4023 case BFA_RPORT_SM_HWFAIL
:
4024 bfa_stats(rp
, sm_fwd_hwf
);
4025 bfa_sm_set_state(rp
, bfa_rport_sm_iocdisable
);
4026 bfa_rport_offline_cb(rp
);
4030 bfa_stats(rp
, sm_fwd_unexp
);
4031 bfa_sm_fault(rp
->bfa
, event
);
4036 bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s
*rp
, enum bfa_rport_event event
)
4038 bfa_trc(rp
->bfa
, rp
->rport_tag
);
4039 bfa_trc(rp
->bfa
, event
);
4042 case BFA_RPORT_SM_QRESUME
:
4043 bfa_sm_set_state(rp
, bfa_rport_sm_fwdelete
);
4044 bfa_rport_send_fwdelete(rp
);
4047 case BFA_RPORT_SM_DELETE
:
4048 bfa_stats(rp
, sm_fwd_del
);
4049 bfa_sm_set_state(rp
, bfa_rport_sm_deleting_qfull
);
4052 case BFA_RPORT_SM_HWFAIL
:
4053 bfa_stats(rp
, sm_fwd_hwf
);
4054 bfa_sm_set_state(rp
, bfa_rport_sm_iocdisable
);
4055 bfa_reqq_wcancel(&rp
->reqq_wait
);
4056 bfa_rport_offline_cb(rp
);
4060 bfa_stats(rp
, sm_fwd_unexp
);
4061 bfa_sm_fault(rp
->bfa
, event
);
4069 bfa_rport_sm_offline(struct bfa_rport_s
*rp
, enum bfa_rport_event event
)
4071 bfa_trc(rp
->bfa
, rp
->rport_tag
);
4072 bfa_trc(rp
->bfa
, event
);
4075 case BFA_RPORT_SM_DELETE
:
4076 bfa_stats(rp
, sm_off_del
);
4077 bfa_sm_set_state(rp
, bfa_rport_sm_uninit
);
4081 case BFA_RPORT_SM_ONLINE
:
4082 bfa_stats(rp
, sm_off_on
);
4083 if (bfa_rport_send_fwcreate(rp
))
4084 bfa_sm_set_state(rp
, bfa_rport_sm_fwcreate
);
4086 bfa_sm_set_state(rp
, bfa_rport_sm_fwcreate_qfull
);
4089 case BFA_RPORT_SM_HWFAIL
:
4090 bfa_stats(rp
, sm_off_hwf
);
4091 bfa_sm_set_state(rp
, bfa_rport_sm_iocdisable
);
4095 bfa_stats(rp
, sm_off_unexp
);
4096 bfa_sm_fault(rp
->bfa
, event
);
4101 * Rport is deleted, waiting for firmware response to delete.
4104 bfa_rport_sm_deleting(struct bfa_rport_s
*rp
, enum bfa_rport_event event
)
4106 bfa_trc(rp
->bfa
, rp
->rport_tag
);
4107 bfa_trc(rp
->bfa
, event
);
4110 case BFA_RPORT_SM_FWRSP
:
4111 bfa_stats(rp
, sm_del_fwrsp
);
4112 bfa_sm_set_state(rp
, bfa_rport_sm_uninit
);
4116 case BFA_RPORT_SM_HWFAIL
:
4117 bfa_stats(rp
, sm_del_hwf
);
4118 bfa_sm_set_state(rp
, bfa_rport_sm_uninit
);
4123 bfa_sm_fault(rp
->bfa
, event
);
4128 bfa_rport_sm_deleting_qfull(struct bfa_rport_s
*rp
, enum bfa_rport_event event
)
4130 bfa_trc(rp
->bfa
, rp
->rport_tag
);
4131 bfa_trc(rp
->bfa
, event
);
4134 case BFA_RPORT_SM_QRESUME
:
4135 bfa_stats(rp
, sm_del_fwrsp
);
4136 bfa_sm_set_state(rp
, bfa_rport_sm_deleting
);
4137 bfa_rport_send_fwdelete(rp
);
4140 case BFA_RPORT_SM_HWFAIL
:
4141 bfa_stats(rp
, sm_del_hwf
);
4142 bfa_sm_set_state(rp
, bfa_rport_sm_uninit
);
4143 bfa_reqq_wcancel(&rp
->reqq_wait
);
4148 bfa_sm_fault(rp
->bfa
, event
);
4153 * Waiting for rport create response from firmware. A delete is pending.
4156 bfa_rport_sm_delete_pending(struct bfa_rport_s
*rp
,
4157 enum bfa_rport_event event
)
4159 bfa_trc(rp
->bfa
, rp
->rport_tag
);
4160 bfa_trc(rp
->bfa
, event
);
4163 case BFA_RPORT_SM_FWRSP
:
4164 bfa_stats(rp
, sm_delp_fwrsp
);
4165 if (bfa_rport_send_fwdelete(rp
))
4166 bfa_sm_set_state(rp
, bfa_rport_sm_deleting
);
4168 bfa_sm_set_state(rp
, bfa_rport_sm_deleting_qfull
);
4171 case BFA_RPORT_SM_HWFAIL
:
4172 bfa_stats(rp
, sm_delp_hwf
);
4173 bfa_sm_set_state(rp
, bfa_rport_sm_uninit
);
4178 bfa_stats(rp
, sm_delp_unexp
);
4179 bfa_sm_fault(rp
->bfa
, event
);
4184 * Waiting for rport create response from firmware. Rport offline is pending.
4187 bfa_rport_sm_offline_pending(struct bfa_rport_s
*rp
,
4188 enum bfa_rport_event event
)
4190 bfa_trc(rp
->bfa
, rp
->rport_tag
);
4191 bfa_trc(rp
->bfa
, event
);
4194 case BFA_RPORT_SM_FWRSP
:
4195 bfa_stats(rp
, sm_offp_fwrsp
);
4196 if (bfa_rport_send_fwdelete(rp
))
4197 bfa_sm_set_state(rp
, bfa_rport_sm_fwdelete
);
4199 bfa_sm_set_state(rp
, bfa_rport_sm_fwdelete_qfull
);
4202 case BFA_RPORT_SM_DELETE
:
4203 bfa_stats(rp
, sm_offp_del
);
4204 bfa_sm_set_state(rp
, bfa_rport_sm_delete_pending
);
4207 case BFA_RPORT_SM_HWFAIL
:
4208 bfa_stats(rp
, sm_offp_hwf
);
4209 bfa_sm_set_state(rp
, bfa_rport_sm_iocdisable
);
4213 bfa_stats(rp
, sm_offp_unexp
);
4214 bfa_sm_fault(rp
->bfa
, event
);
4222 bfa_rport_sm_iocdisable(struct bfa_rport_s
*rp
, enum bfa_rport_event event
)
4224 bfa_trc(rp
->bfa
, rp
->rport_tag
);
4225 bfa_trc(rp
->bfa
, event
);
4228 case BFA_RPORT_SM_OFFLINE
:
4229 bfa_stats(rp
, sm_iocd_off
);
4230 bfa_rport_offline_cb(rp
);
4233 case BFA_RPORT_SM_DELETE
:
4234 bfa_stats(rp
, sm_iocd_del
);
4235 bfa_sm_set_state(rp
, bfa_rport_sm_uninit
);
4239 case BFA_RPORT_SM_ONLINE
:
4240 bfa_stats(rp
, sm_iocd_on
);
4241 if (bfa_rport_send_fwcreate(rp
))
4242 bfa_sm_set_state(rp
, bfa_rport_sm_fwcreate
);
4244 bfa_sm_set_state(rp
, bfa_rport_sm_fwcreate_qfull
);
4247 case BFA_RPORT_SM_HWFAIL
:
4251 bfa_stats(rp
, sm_iocd_unexp
);
4252 bfa_sm_fault(rp
->bfa
, event
);
4259 * bfa_rport_private BFA rport private functions
4263 __bfa_cb_rport_online(void *cbarg
, bfa_boolean_t complete
)
4265 struct bfa_rport_s
*rp
= cbarg
;
4268 bfa_cb_rport_online(rp
->rport_drv
);
4272 __bfa_cb_rport_offline(void *cbarg
, bfa_boolean_t complete
)
4274 struct bfa_rport_s
*rp
= cbarg
;
4277 bfa_cb_rport_offline(rp
->rport_drv
);
4281 bfa_rport_qresume(void *cbarg
)
4283 struct bfa_rport_s
*rp
= cbarg
;
4285 bfa_sm_send_event(rp
, BFA_RPORT_SM_QRESUME
);
4289 bfa_rport_meminfo(struct bfa_iocfc_cfg_s
*cfg
, u32
*km_len
,
4292 if (cfg
->fwcfg
.num_rports
< BFA_RPORT_MIN
)
4293 cfg
->fwcfg
.num_rports
= BFA_RPORT_MIN
;
4295 *km_len
+= cfg
->fwcfg
.num_rports
* sizeof(struct bfa_rport_s
);
4299 bfa_rport_attach(struct bfa_s
*bfa
, void *bfad
, struct bfa_iocfc_cfg_s
*cfg
,
4300 struct bfa_meminfo_s
*meminfo
, struct bfa_pcidev_s
*pcidev
)
4302 struct bfa_rport_mod_s
*mod
= BFA_RPORT_MOD(bfa
);
4303 struct bfa_rport_s
*rp
;
4306 INIT_LIST_HEAD(&mod
->rp_free_q
);
4307 INIT_LIST_HEAD(&mod
->rp_active_q
);
4309 rp
= (struct bfa_rport_s
*) bfa_meminfo_kva(meminfo
);
4311 mod
->num_rports
= cfg
->fwcfg
.num_rports
;
4313 WARN_ON(!mod
->num_rports
||
4314 (mod
->num_rports
& (mod
->num_rports
- 1)));
4316 for (i
= 0; i
< mod
->num_rports
; i
++, rp
++) {
4317 memset(rp
, 0, sizeof(struct bfa_rport_s
));
4320 bfa_sm_set_state(rp
, bfa_rport_sm_uninit
);
4326 list_add_tail(&rp
->qe
, &mod
->rp_free_q
);
4328 bfa_reqq_winit(&rp
->reqq_wait
, bfa_rport_qresume
, rp
);
4334 bfa_meminfo_kva(meminfo
) = (u8
*) rp
;
4338 bfa_rport_detach(struct bfa_s
*bfa
)
4343 bfa_rport_start(struct bfa_s
*bfa
)
4348 bfa_rport_stop(struct bfa_s
*bfa
)
4353 bfa_rport_iocdisable(struct bfa_s
*bfa
)
4355 struct bfa_rport_mod_s
*mod
= BFA_RPORT_MOD(bfa
);
4356 struct bfa_rport_s
*rport
;
4357 struct list_head
*qe
, *qen
;
4359 list_for_each_safe(qe
, qen
, &mod
->rp_active_q
) {
4360 rport
= (struct bfa_rport_s
*) qe
;
4361 bfa_sm_send_event(rport
, BFA_RPORT_SM_HWFAIL
);
4365 static struct bfa_rport_s
*
4366 bfa_rport_alloc(struct bfa_rport_mod_s
*mod
)
4368 struct bfa_rport_s
*rport
;
4370 bfa_q_deq(&mod
->rp_free_q
, &rport
);
4372 list_add_tail(&rport
->qe
, &mod
->rp_active_q
);
4378 bfa_rport_free(struct bfa_rport_s
*rport
)
4380 struct bfa_rport_mod_s
*mod
= BFA_RPORT_MOD(rport
->bfa
);
4382 WARN_ON(!bfa_q_is_on_q(&mod
->rp_active_q
, rport
));
4383 list_del(&rport
->qe
);
4384 list_add_tail(&rport
->qe
, &mod
->rp_free_q
);
4387 static bfa_boolean_t
4388 bfa_rport_send_fwcreate(struct bfa_rport_s
*rp
)
4390 struct bfi_rport_create_req_s
*m
;
4393 * check for room in queue to send request now
4395 m
= bfa_reqq_next(rp
->bfa
, BFA_REQQ_RPORT
);
4397 bfa_reqq_wait(rp
->bfa
, BFA_REQQ_RPORT
, &rp
->reqq_wait
);
4401 bfi_h2i_set(m
->mh
, BFI_MC_RPORT
, BFI_RPORT_H2I_CREATE_REQ
,
4402 bfa_lpuid(rp
->bfa
));
4403 m
->bfa_handle
= rp
->rport_tag
;
4404 m
->max_frmsz
= cpu_to_be16(rp
->rport_info
.max_frmsz
);
4405 m
->pid
= rp
->rport_info
.pid
;
4406 m
->lp_tag
= rp
->rport_info
.lp_tag
;
4407 m
->local_pid
= rp
->rport_info
.local_pid
;
4408 m
->fc_class
= rp
->rport_info
.fc_class
;
4409 m
->vf_en
= rp
->rport_info
.vf_en
;
4410 m
->vf_id
= rp
->rport_info
.vf_id
;
4411 m
->cisc
= rp
->rport_info
.cisc
;
4414 * queue I/O message to firmware
4416 bfa_reqq_produce(rp
->bfa
, BFA_REQQ_RPORT
);
4420 static bfa_boolean_t
4421 bfa_rport_send_fwdelete(struct bfa_rport_s
*rp
)
4423 struct bfi_rport_delete_req_s
*m
;
4426 * check for room in queue to send request now
4428 m
= bfa_reqq_next(rp
->bfa
, BFA_REQQ_RPORT
);
4430 bfa_reqq_wait(rp
->bfa
, BFA_REQQ_RPORT
, &rp
->reqq_wait
);
4434 bfi_h2i_set(m
->mh
, BFI_MC_RPORT
, BFI_RPORT_H2I_DELETE_REQ
,
4435 bfa_lpuid(rp
->bfa
));
4436 m
->fw_handle
= rp
->fw_handle
;
4439 * queue I/O message to firmware
4441 bfa_reqq_produce(rp
->bfa
, BFA_REQQ_RPORT
);
4445 static bfa_boolean_t
4446 bfa_rport_send_fwspeed(struct bfa_rport_s
*rp
)
4448 struct bfa_rport_speed_req_s
*m
;
4451 * check for room in queue to send request now
4453 m
= bfa_reqq_next(rp
->bfa
, BFA_REQQ_RPORT
);
4455 bfa_trc(rp
->bfa
, rp
->rport_info
.speed
);
4459 bfi_h2i_set(m
->mh
, BFI_MC_RPORT
, BFI_RPORT_H2I_SET_SPEED_REQ
,
4460 bfa_lpuid(rp
->bfa
));
4461 m
->fw_handle
= rp
->fw_handle
;
4462 m
->speed
= (u8
)rp
->rport_info
.speed
;
4465 * queue I/O message to firmware
4467 bfa_reqq_produce(rp
->bfa
, BFA_REQQ_RPORT
);
4478 * Rport interrupt processing.
4481 bfa_rport_isr(struct bfa_s
*bfa
, struct bfi_msg_s
*m
)
4483 union bfi_rport_i2h_msg_u msg
;
4484 struct bfa_rport_s
*rp
;
4486 bfa_trc(bfa
, m
->mhdr
.msg_id
);
4490 switch (m
->mhdr
.msg_id
) {
4491 case BFI_RPORT_I2H_CREATE_RSP
:
4492 rp
= BFA_RPORT_FROM_TAG(bfa
, msg
.create_rsp
->bfa_handle
);
4493 rp
->fw_handle
= msg
.create_rsp
->fw_handle
;
4494 rp
->qos_attr
= msg
.create_rsp
->qos_attr
;
4495 WARN_ON(msg
.create_rsp
->status
!= BFA_STATUS_OK
);
4496 bfa_sm_send_event(rp
, BFA_RPORT_SM_FWRSP
);
4499 case BFI_RPORT_I2H_DELETE_RSP
:
4500 rp
= BFA_RPORT_FROM_TAG(bfa
, msg
.delete_rsp
->bfa_handle
);
4501 WARN_ON(msg
.delete_rsp
->status
!= BFA_STATUS_OK
);
4502 bfa_sm_send_event(rp
, BFA_RPORT_SM_FWRSP
);
4505 case BFI_RPORT_I2H_QOS_SCN
:
4506 rp
= BFA_RPORT_FROM_TAG(bfa
, msg
.qos_scn_evt
->bfa_handle
);
4507 rp
->event_arg
.fw_msg
= msg
.qos_scn_evt
;
4508 bfa_sm_send_event(rp
, BFA_RPORT_SM_QOS_SCN
);
4512 bfa_trc(bfa
, m
->mhdr
.msg_id
);
4523 struct bfa_rport_s
*
4524 bfa_rport_create(struct bfa_s
*bfa
, void *rport_drv
)
4526 struct bfa_rport_s
*rp
;
4528 rp
= bfa_rport_alloc(BFA_RPORT_MOD(bfa
));
4534 rp
->rport_drv
= rport_drv
;
4535 memset(&rp
->stats
, 0, sizeof(rp
->stats
));
4537 WARN_ON(!bfa_sm_cmp_state(rp
, bfa_rport_sm_uninit
));
4538 bfa_sm_send_event(rp
, BFA_RPORT_SM_CREATE
);
4544 bfa_rport_online(struct bfa_rport_s
*rport
, struct bfa_rport_info_s
*rport_info
)
4546 WARN_ON(rport_info
->max_frmsz
== 0);
4549 * Some JBODs are seen to be not setting PDU size correctly in PLOGI
4550 * responses. Default to minimum size.
4552 if (rport_info
->max_frmsz
== 0) {
4553 bfa_trc(rport
->bfa
, rport
->rport_tag
);
4554 rport_info
->max_frmsz
= FC_MIN_PDUSZ
;
4557 rport
->rport_info
= *rport_info
;
4558 bfa_sm_send_event(rport
, BFA_RPORT_SM_ONLINE
);
4562 bfa_rport_speed(struct bfa_rport_s
*rport
, enum bfa_port_speed speed
)
4564 WARN_ON(speed
== 0);
4565 WARN_ON(speed
== BFA_PORT_SPEED_AUTO
);
4567 rport
->rport_info
.speed
= speed
;
4568 bfa_sm_send_event(rport
, BFA_RPORT_SM_SET_SPEED
);
4573 * SGPG related functions
4577 * Compute and return memory needed by FCP(im) module.
4580 bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s
*cfg
, u32
*km_len
,
4583 if (cfg
->drvcfg
.num_sgpgs
< BFA_SGPG_MIN
)
4584 cfg
->drvcfg
.num_sgpgs
= BFA_SGPG_MIN
;
4586 *km_len
+= (cfg
->drvcfg
.num_sgpgs
+ 1) * sizeof(struct bfa_sgpg_s
);
4587 *dm_len
+= (cfg
->drvcfg
.num_sgpgs
+ 1) * sizeof(struct bfi_sgpg_s
);
4592 bfa_sgpg_attach(struct bfa_s
*bfa
, void *bfad
, struct bfa_iocfc_cfg_s
*cfg
,
4593 struct bfa_meminfo_s
*minfo
, struct bfa_pcidev_s
*pcidev
)
4595 struct bfa_sgpg_mod_s
*mod
= BFA_SGPG_MOD(bfa
);
4597 struct bfa_sgpg_s
*hsgpg
;
4598 struct bfi_sgpg_s
*sgpg
;
4603 union bfi_addr_u addr
;
4604 } sgpg_pa
, sgpg_pa_tmp
;
4606 INIT_LIST_HEAD(&mod
->sgpg_q
);
4607 INIT_LIST_HEAD(&mod
->sgpg_wait_q
);
4609 bfa_trc(bfa
, cfg
->drvcfg
.num_sgpgs
);
4611 mod
->num_sgpgs
= cfg
->drvcfg
.num_sgpgs
;
4612 mod
->sgpg_arr_pa
= bfa_meminfo_dma_phys(minfo
);
4613 align_len
= (BFA_SGPG_ROUNDUP(mod
->sgpg_arr_pa
) - mod
->sgpg_arr_pa
);
4614 mod
->sgpg_arr_pa
+= align_len
;
4615 mod
->hsgpg_arr
= (struct bfa_sgpg_s
*) (bfa_meminfo_kva(minfo
) +
4617 mod
->sgpg_arr
= (struct bfi_sgpg_s
*) (bfa_meminfo_dma_virt(minfo
) +
4620 hsgpg
= mod
->hsgpg_arr
;
4621 sgpg
= mod
->sgpg_arr
;
4622 sgpg_pa
.pa
= mod
->sgpg_arr_pa
;
4623 mod
->free_sgpgs
= mod
->num_sgpgs
;
4625 WARN_ON(sgpg_pa
.pa
& (sizeof(struct bfi_sgpg_s
) - 1));
4627 for (i
= 0; i
< mod
->num_sgpgs
; i
++) {
4628 memset(hsgpg
, 0, sizeof(*hsgpg
));
4629 memset(sgpg
, 0, sizeof(*sgpg
));
4632 sgpg_pa_tmp
.pa
= bfa_sgaddr_le(sgpg_pa
.pa
);
4633 hsgpg
->sgpg_pa
= sgpg_pa_tmp
.addr
;
4634 list_add_tail(&hsgpg
->qe
, &mod
->sgpg_q
);
4638 sgpg_pa
.pa
+= sizeof(struct bfi_sgpg_s
);
4641 bfa_meminfo_kva(minfo
) = (u8
*) hsgpg
;
4642 bfa_meminfo_dma_virt(minfo
) = (u8
*) sgpg
;
4643 bfa_meminfo_dma_phys(minfo
) = sgpg_pa
.pa
;
4647 bfa_sgpg_detach(struct bfa_s
*bfa
)
4652 bfa_sgpg_start(struct bfa_s
*bfa
)
4657 bfa_sgpg_stop(struct bfa_s
*bfa
)
4662 bfa_sgpg_iocdisable(struct bfa_s
*bfa
)
4667 bfa_sgpg_malloc(struct bfa_s
*bfa
, struct list_head
*sgpg_q
, int nsgpgs
)
4669 struct bfa_sgpg_mod_s
*mod
= BFA_SGPG_MOD(bfa
);
4670 struct bfa_sgpg_s
*hsgpg
;
4673 if (mod
->free_sgpgs
< nsgpgs
)
4674 return BFA_STATUS_ENOMEM
;
4676 for (i
= 0; i
< nsgpgs
; i
++) {
4677 bfa_q_deq(&mod
->sgpg_q
, &hsgpg
);
4679 list_add_tail(&hsgpg
->qe
, sgpg_q
);
4682 mod
->free_sgpgs
-= nsgpgs
;
4683 return BFA_STATUS_OK
;
4687 bfa_sgpg_mfree(struct bfa_s
*bfa
, struct list_head
*sgpg_q
, int nsgpg
)
4689 struct bfa_sgpg_mod_s
*mod
= BFA_SGPG_MOD(bfa
);
4690 struct bfa_sgpg_wqe_s
*wqe
;
4692 mod
->free_sgpgs
+= nsgpg
;
4693 WARN_ON(mod
->free_sgpgs
> mod
->num_sgpgs
);
4695 list_splice_tail_init(sgpg_q
, &mod
->sgpg_q
);
4697 if (list_empty(&mod
->sgpg_wait_q
))
4701 * satisfy as many waiting requests as possible
4704 wqe
= bfa_q_first(&mod
->sgpg_wait_q
);
4705 if (mod
->free_sgpgs
< wqe
->nsgpg
)
4706 nsgpg
= mod
->free_sgpgs
;
4709 bfa_sgpg_malloc(bfa
, &wqe
->sgpg_q
, nsgpg
);
4710 wqe
->nsgpg
-= nsgpg
;
4711 if (wqe
->nsgpg
== 0) {
4713 wqe
->cbfn(wqe
->cbarg
);
4715 } while (mod
->free_sgpgs
&& !list_empty(&mod
->sgpg_wait_q
));
4719 bfa_sgpg_wait(struct bfa_s
*bfa
, struct bfa_sgpg_wqe_s
*wqe
, int nsgpg
)
4721 struct bfa_sgpg_mod_s
*mod
= BFA_SGPG_MOD(bfa
);
4723 WARN_ON(nsgpg
<= 0);
4724 WARN_ON(nsgpg
<= mod
->free_sgpgs
);
4726 wqe
->nsgpg_total
= wqe
->nsgpg
= nsgpg
;
4729 * allocate any left to this one first
4731 if (mod
->free_sgpgs
) {
4733 * no one else is waiting for SGPG
4735 WARN_ON(!list_empty(&mod
->sgpg_wait_q
));
4736 list_splice_tail_init(&mod
->sgpg_q
, &wqe
->sgpg_q
);
4737 wqe
->nsgpg
-= mod
->free_sgpgs
;
4738 mod
->free_sgpgs
= 0;
4741 list_add_tail(&wqe
->qe
, &mod
->sgpg_wait_q
);
4745 bfa_sgpg_wcancel(struct bfa_s
*bfa
, struct bfa_sgpg_wqe_s
*wqe
)
4747 struct bfa_sgpg_mod_s
*mod
= BFA_SGPG_MOD(bfa
);
4749 WARN_ON(!bfa_q_is_on_q(&mod
->sgpg_wait_q
, wqe
));
4752 if (wqe
->nsgpg_total
!= wqe
->nsgpg
)
4753 bfa_sgpg_mfree(bfa
, &wqe
->sgpg_q
,
4754 wqe
->nsgpg_total
- wqe
->nsgpg
);
4758 bfa_sgpg_winit(struct bfa_sgpg_wqe_s
*wqe
, void (*cbfn
) (void *cbarg
),
4761 INIT_LIST_HEAD(&wqe
->sgpg_q
);
4767 * UF related functions
4770 *****************************************************************************
4771 * Internal functions
4772 *****************************************************************************
4775 __bfa_cb_uf_recv(void *cbarg
, bfa_boolean_t complete
)
4777 struct bfa_uf_s
*uf
= cbarg
;
4778 struct bfa_uf_mod_s
*ufm
= BFA_UF_MOD(uf
->bfa
);
4781 ufm
->ufrecv(ufm
->cbarg
, uf
);
4785 claim_uf_pbs(struct bfa_uf_mod_s
*ufm
, struct bfa_meminfo_s
*mi
)
4789 ufm
->uf_pbs_kva
= (struct bfa_uf_buf_s
*) bfa_meminfo_dma_virt(mi
);
4790 ufm
->uf_pbs_pa
= bfa_meminfo_dma_phys(mi
);
4791 uf_pb_tot_sz
= BFA_ROUNDUP((sizeof(struct bfa_uf_buf_s
) * ufm
->num_ufs
),
4794 bfa_meminfo_dma_virt(mi
) += uf_pb_tot_sz
;
4795 bfa_meminfo_dma_phys(mi
) += uf_pb_tot_sz
;
4797 memset((void *)ufm
->uf_pbs_kva
, 0, uf_pb_tot_sz
);
4801 claim_uf_post_msgs(struct bfa_uf_mod_s
*ufm
, struct bfa_meminfo_s
*mi
)
4803 struct bfi_uf_buf_post_s
*uf_bp_msg
;
4804 struct bfi_sge_s
*sge
;
4805 union bfi_addr_u sga_zero
= { {0} };
4809 ufm
->uf_buf_posts
= (struct bfi_uf_buf_post_s
*) bfa_meminfo_kva(mi
);
4810 uf_bp_msg
= ufm
->uf_buf_posts
;
4812 for (i
= 0, uf_bp_msg
= ufm
->uf_buf_posts
; i
< ufm
->num_ufs
;
4814 memset(uf_bp_msg
, 0, sizeof(struct bfi_uf_buf_post_s
));
4816 uf_bp_msg
->buf_tag
= i
;
4817 buf_len
= sizeof(struct bfa_uf_buf_s
);
4818 uf_bp_msg
->buf_len
= cpu_to_be16(buf_len
);
4819 bfi_h2i_set(uf_bp_msg
->mh
, BFI_MC_UF
, BFI_UF_H2I_BUF_POST
,
4820 bfa_lpuid(ufm
->bfa
));
4822 sge
= uf_bp_msg
->sge
;
4823 sge
[0].sg_len
= buf_len
;
4824 sge
[0].flags
= BFI_SGE_DATA_LAST
;
4825 bfa_dma_addr_set(sge
[0].sga
, ufm_pbs_pa(ufm
, i
));
4828 sge
[1].sg_len
= buf_len
;
4829 sge
[1].flags
= BFI_SGE_PGDLEN
;
4830 sge
[1].sga
= sga_zero
;
4831 bfa_sge_to_be(&sge
[1]);
4835 * advance pointer beyond consumed memory
4837 bfa_meminfo_kva(mi
) = (u8
*) uf_bp_msg
;
4841 claim_ufs(struct bfa_uf_mod_s
*ufm
, struct bfa_meminfo_s
*mi
)
4844 struct bfa_uf_s
*uf
;
4847 * Claim block of memory for UF list
4849 ufm
->uf_list
= (struct bfa_uf_s
*) bfa_meminfo_kva(mi
);
4852 * Initialize UFs and queue it in UF free queue
4854 for (i
= 0, uf
= ufm
->uf_list
; i
< ufm
->num_ufs
; i
++, uf
++) {
4855 memset(uf
, 0, sizeof(struct bfa_uf_s
));
4858 uf
->pb_len
= sizeof(struct bfa_uf_buf_s
);
4859 uf
->buf_kva
= (void *)&ufm
->uf_pbs_kva
[i
];
4860 uf
->buf_pa
= ufm_pbs_pa(ufm
, i
);
4861 list_add_tail(&uf
->qe
, &ufm
->uf_free_q
);
4865 * advance memory pointer
4867 bfa_meminfo_kva(mi
) = (u8
*) uf
;
4871 uf_mem_claim(struct bfa_uf_mod_s
*ufm
, struct bfa_meminfo_s
*mi
)
4873 claim_uf_pbs(ufm
, mi
);
4875 claim_uf_post_msgs(ufm
, mi
);
4879 bfa_uf_meminfo(struct bfa_iocfc_cfg_s
*cfg
, u32
*ndm_len
, u32
*dm_len
)
4881 u32 num_ufs
= cfg
->fwcfg
.num_uf_bufs
;
4884 * dma-able memory for UF posted bufs
4886 *dm_len
+= BFA_ROUNDUP((sizeof(struct bfa_uf_buf_s
) * num_ufs
),
4890 * kernel Virtual memory for UFs and UF buf post msg copies
4892 *ndm_len
+= sizeof(struct bfa_uf_s
) * num_ufs
;
4893 *ndm_len
+= sizeof(struct bfi_uf_buf_post_s
) * num_ufs
;
4897 bfa_uf_attach(struct bfa_s
*bfa
, void *bfad
, struct bfa_iocfc_cfg_s
*cfg
,
4898 struct bfa_meminfo_s
*meminfo
, struct bfa_pcidev_s
*pcidev
)
4900 struct bfa_uf_mod_s
*ufm
= BFA_UF_MOD(bfa
);
4902 memset(ufm
, 0, sizeof(struct bfa_uf_mod_s
));
4904 ufm
->num_ufs
= cfg
->fwcfg
.num_uf_bufs
;
4905 INIT_LIST_HEAD(&ufm
->uf_free_q
);
4906 INIT_LIST_HEAD(&ufm
->uf_posted_q
);
4908 uf_mem_claim(ufm
, meminfo
);
4912 bfa_uf_detach(struct bfa_s
*bfa
)
4916 static struct bfa_uf_s
*
4917 bfa_uf_get(struct bfa_uf_mod_s
*uf_mod
)
4919 struct bfa_uf_s
*uf
;
4921 bfa_q_deq(&uf_mod
->uf_free_q
, &uf
);
4926 bfa_uf_put(struct bfa_uf_mod_s
*uf_mod
, struct bfa_uf_s
*uf
)
4928 list_add_tail(&uf
->qe
, &uf_mod
->uf_free_q
);
4932 bfa_uf_post(struct bfa_uf_mod_s
*ufm
, struct bfa_uf_s
*uf
)
4934 struct bfi_uf_buf_post_s
*uf_post_msg
;
4936 uf_post_msg
= bfa_reqq_next(ufm
->bfa
, BFA_REQQ_FCXP
);
4938 return BFA_STATUS_FAILED
;
4940 memcpy(uf_post_msg
, &ufm
->uf_buf_posts
[uf
->uf_tag
],
4941 sizeof(struct bfi_uf_buf_post_s
));
4942 bfa_reqq_produce(ufm
->bfa
, BFA_REQQ_FCXP
);
4944 bfa_trc(ufm
->bfa
, uf
->uf_tag
);
4946 list_add_tail(&uf
->qe
, &ufm
->uf_posted_q
);
4947 return BFA_STATUS_OK
;
4951 bfa_uf_post_all(struct bfa_uf_mod_s
*uf_mod
)
4953 struct bfa_uf_s
*uf
;
4955 while ((uf
= bfa_uf_get(uf_mod
)) != NULL
) {
4956 if (bfa_uf_post(uf_mod
, uf
) != BFA_STATUS_OK
)
4962 uf_recv(struct bfa_s
*bfa
, struct bfi_uf_frm_rcvd_s
*m
)
4964 struct bfa_uf_mod_s
*ufm
= BFA_UF_MOD(bfa
);
4965 u16 uf_tag
= m
->buf_tag
;
4966 struct bfa_uf_buf_s
*uf_buf
= &ufm
->uf_pbs_kva
[uf_tag
];
4967 struct bfa_uf_s
*uf
= &ufm
->uf_list
[uf_tag
];
4968 u8
*buf
= &uf_buf
->d
[0];
4969 struct fchs_s
*fchs
;
4971 m
->frm_len
= be16_to_cpu(m
->frm_len
);
4972 m
->xfr_len
= be16_to_cpu(m
->xfr_len
);
4974 fchs
= (struct fchs_s
*)uf_buf
;
4976 list_del(&uf
->qe
); /* dequeue from posted queue */
4979 uf
->data_len
= m
->xfr_len
;
4981 WARN_ON(uf
->data_len
< sizeof(struct fchs_s
));
4983 if (uf
->data_len
== sizeof(struct fchs_s
)) {
4984 bfa_plog_fchdr(bfa
->plog
, BFA_PL_MID_HAL_UF
, BFA_PL_EID_RX
,
4985 uf
->data_len
, (struct fchs_s
*)buf
);
4987 u32 pld_w0
= *((u32
*) (buf
+ sizeof(struct fchs_s
)));
4988 bfa_plog_fchdr_and_pl(bfa
->plog
, BFA_PL_MID_HAL_UF
,
4989 BFA_PL_EID_RX
, uf
->data_len
,
4990 (struct fchs_s
*)buf
, pld_w0
);
4994 __bfa_cb_uf_recv(uf
, BFA_TRUE
);
4996 bfa_cb_queue(bfa
, &uf
->hcb_qe
, __bfa_cb_uf_recv
, uf
);
5000 bfa_uf_stop(struct bfa_s
*bfa
)
5005 bfa_uf_iocdisable(struct bfa_s
*bfa
)
5007 struct bfa_uf_mod_s
*ufm
= BFA_UF_MOD(bfa
);
5008 struct bfa_uf_s
*uf
;
5009 struct list_head
*qe
, *qen
;
5011 list_for_each_safe(qe
, qen
, &ufm
->uf_posted_q
) {
5012 uf
= (struct bfa_uf_s
*) qe
;
5014 bfa_uf_put(ufm
, uf
);
5019 bfa_uf_start(struct bfa_s
*bfa
)
5021 bfa_uf_post_all(BFA_UF_MOD(bfa
));
5025 * Register handler for all unsolicted receive frames.
5027 * @param[in] bfa BFA instance
5028 * @param[in] ufrecv receive handler function
5029 * @param[in] cbarg receive handler arg
5032 bfa_uf_recv_register(struct bfa_s
*bfa
, bfa_cb_uf_recv_t ufrecv
, void *cbarg
)
5034 struct bfa_uf_mod_s
*ufm
= BFA_UF_MOD(bfa
);
5036 ufm
->ufrecv
= ufrecv
;
5041 * Free an unsolicited frame back to BFA.
5043 * @param[in] uf unsolicited frame to be freed
5048 bfa_uf_free(struct bfa_uf_s
*uf
)
5050 bfa_uf_put(BFA_UF_MOD(uf
->bfa
), uf
);
5051 bfa_uf_post_all(BFA_UF_MOD(uf
->bfa
));
5057 * uf_pub BFA uf module public functions
5060 bfa_uf_isr(struct bfa_s
*bfa
, struct bfi_msg_s
*msg
)
5062 bfa_trc(bfa
, msg
->mhdr
.msg_id
);
5064 switch (msg
->mhdr
.msg_id
) {
5065 case BFI_UF_I2H_FRM_RCVD
:
5066 uf_recv(bfa
, (struct bfi_uf_frm_rcvd_s
*) msg
);
5070 bfa_trc(bfa
, msg
->mhdr
.msg_id
);