Merge remote-tracking branch 'moduleh/module.h-split'
[linux-2.6/next.git] / drivers / scsi / bfa / bfa_fcpim.c
blobe07bd4745d8ba5b968ded24e81785096c1535b84
1 /*
2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
18 #include "bfad_drv.h"
19 #include "bfa_modules.h"
21 BFA_TRC_FILE(HAL, FCPIM);
24 * BFA ITNIM Related definitions
26 static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
27 static bfa_boolean_t bfa_ioim_lm_proc_rpl_data(struct bfa_ioim_s *ioim);
28 static bfa_boolean_t bfa_ioim_lm_proc_inq_data(struct bfa_ioim_s *ioim);
29 static void bfa_ioim_lm_init(struct bfa_s *bfa);
31 #define BFA_ITNIM_FROM_TAG(_fcpim, _tag) \
32 (((_fcpim)->itnim_arr + ((_tag) & ((_fcpim)->num_itnims - 1))))
34 #define bfa_fcpim_additn(__itnim) \
35 list_add_tail(&(__itnim)->qe, &(__itnim)->fcpim->itnim_q)
36 #define bfa_fcpim_delitn(__itnim) do { \
37 WARN_ON(!bfa_q_is_on_q(&(__itnim)->fcpim->itnim_q, __itnim)); \
38 bfa_itnim_update_del_itn_stats(__itnim); \
39 list_del(&(__itnim)->qe); \
40 WARN_ON(!list_empty(&(__itnim)->io_q)); \
41 WARN_ON(!list_empty(&(__itnim)->io_cleanup_q)); \
42 WARN_ON(!list_empty(&(__itnim)->pending_q)); \
43 } while (0)
45 #define bfa_itnim_online_cb(__itnim) do { \
46 if ((__itnim)->bfa->fcs) \
47 bfa_cb_itnim_online((__itnim)->ditn); \
48 else { \
49 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
50 __bfa_cb_itnim_online, (__itnim)); \
51 } \
52 } while (0)
54 #define bfa_itnim_offline_cb(__itnim) do { \
55 if ((__itnim)->bfa->fcs) \
56 bfa_cb_itnim_offline((__itnim)->ditn); \
57 else { \
58 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
59 __bfa_cb_itnim_offline, (__itnim)); \
60 } \
61 } while (0)
63 #define bfa_ioim_rp_wwn(__ioim) \
64 (((struct bfa_fcs_rport_s *) \
65 (__ioim)->itnim->rport->rport_drv)->pwwn)
67 #define bfa_ioim_lp_wwn(__ioim) \
68 ((BFA_LPS_FROM_TAG(BFA_LPS_MOD((__ioim)->bfa), \
69 (__ioim)->itnim->rport->rport_info.lp_tag))->pwwn) \
71 #define bfa_itnim_sler_cb(__itnim) do { \
72 if ((__itnim)->bfa->fcs) \
73 bfa_cb_itnim_sler((__itnim)->ditn); \
74 else { \
75 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
76 __bfa_cb_itnim_sler, (__itnim)); \
77 } \
78 } while (0)
80 enum bfa_ioim_lm_status {
81 BFA_IOIM_LM_PRESENT = 1,
82 BFA_IOIM_LM_LUN_NOT_SUP = 2,
83 BFA_IOIM_LM_RPL_DATA_CHANGED = 3,
84 BFA_IOIM_LM_LUN_NOT_RDY = 4,
87 enum bfa_ioim_lm_ua_status {
88 BFA_IOIM_LM_UA_RESET = 0,
89 BFA_IOIM_LM_UA_SET = 1,
93 * itnim state machine event
95 enum bfa_itnim_event {
96 BFA_ITNIM_SM_CREATE = 1, /* itnim is created */
97 BFA_ITNIM_SM_ONLINE = 2, /* itnim is online */
98 BFA_ITNIM_SM_OFFLINE = 3, /* itnim is offline */
99 BFA_ITNIM_SM_FWRSP = 4, /* firmware response */
100 BFA_ITNIM_SM_DELETE = 5, /* deleting an existing itnim */
101 BFA_ITNIM_SM_CLEANUP = 6, /* IO cleanup completion */
102 BFA_ITNIM_SM_SLER = 7, /* second level error recovery */
103 BFA_ITNIM_SM_HWFAIL = 8, /* IOC h/w failure event */
104 BFA_ITNIM_SM_QRESUME = 9, /* queue space available */
108 * BFA IOIM related definitions
110 #define bfa_ioim_move_to_comp_q(__ioim) do { \
111 list_del(&(__ioim)->qe); \
112 list_add_tail(&(__ioim)->qe, &(__ioim)->fcpim->ioim_comp_q); \
113 } while (0)
116 #define bfa_ioim_cb_profile_comp(__fcpim, __ioim) do { \
117 if ((__fcpim)->profile_comp) \
118 (__fcpim)->profile_comp(__ioim); \
119 } while (0)
121 #define bfa_ioim_cb_profile_start(__fcpim, __ioim) do { \
122 if ((__fcpim)->profile_start) \
123 (__fcpim)->profile_start(__ioim); \
124 } while (0)
127 * IO state machine events
129 enum bfa_ioim_event {
130 BFA_IOIM_SM_START = 1, /* io start request from host */
131 BFA_IOIM_SM_COMP_GOOD = 2, /* io good comp, resource free */
132 BFA_IOIM_SM_COMP = 3, /* io comp, resource is free */
133 BFA_IOIM_SM_COMP_UTAG = 4, /* io comp, resource is free */
134 BFA_IOIM_SM_DONE = 5, /* io comp, resource not free */
135 BFA_IOIM_SM_FREE = 6, /* io resource is freed */
136 BFA_IOIM_SM_ABORT = 7, /* abort request from scsi stack */
137 BFA_IOIM_SM_ABORT_COMP = 8, /* abort from f/w */
138 BFA_IOIM_SM_ABORT_DONE = 9, /* abort completion from f/w */
139 BFA_IOIM_SM_QRESUME = 10, /* CQ space available to queue IO */
140 BFA_IOIM_SM_SGALLOCED = 11, /* SG page allocation successful */
141 BFA_IOIM_SM_SQRETRY = 12, /* sequence recovery retry */
142 BFA_IOIM_SM_HCB = 13, /* bfa callback complete */
143 BFA_IOIM_SM_CLEANUP = 14, /* IO cleanup from itnim */
144 BFA_IOIM_SM_TMSTART = 15, /* IO cleanup from tskim */
145 BFA_IOIM_SM_TMDONE = 16, /* IO cleanup from tskim */
146 BFA_IOIM_SM_HWFAIL = 17, /* IOC h/w failure event */
147 BFA_IOIM_SM_IOTOV = 18, /* ITN offline TOV */
148 BFA_IOIM_SM_LM_LUN_NOT_SUP = 19,/* lunmask lun not supported */
149 BFA_IOIM_SM_LM_RPL_DC = 20, /* lunmask report-lun data changed */
150 BFA_IOIM_SM_LM_LUN_NOT_RDY = 21,/* lunmask lun not ready */
155 * BFA TSKIM related definitions
159 * task management completion handling
161 #define bfa_tskim_qcomp(__tskim, __cbfn) do { \
162 bfa_cb_queue((__tskim)->bfa, &(__tskim)->hcb_qe, __cbfn, (__tskim));\
163 bfa_tskim_notify_comp(__tskim); \
164 } while (0)
166 #define bfa_tskim_notify_comp(__tskim) do { \
167 if ((__tskim)->notify) \
168 bfa_itnim_tskdone((__tskim)->itnim); \
169 } while (0)
172 enum bfa_tskim_event {
173 BFA_TSKIM_SM_START = 1, /* TM command start */
174 BFA_TSKIM_SM_DONE = 2, /* TM completion */
175 BFA_TSKIM_SM_QRESUME = 3, /* resume after qfull */
176 BFA_TSKIM_SM_HWFAIL = 5, /* IOC h/w failure event */
177 BFA_TSKIM_SM_HCB = 6, /* BFA callback completion */
178 BFA_TSKIM_SM_IOS_DONE = 7, /* IO and sub TM completions */
179 BFA_TSKIM_SM_CLEANUP = 8, /* TM cleanup on ITN offline */
180 BFA_TSKIM_SM_CLEANUP_DONE = 9, /* TM abort completion */
184 * forward declaration for BFA ITNIM functions
186 static void bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim);
187 static bfa_boolean_t bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim);
188 static bfa_boolean_t bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim);
189 static void bfa_itnim_cleanp_comp(void *itnim_cbarg);
190 static void bfa_itnim_cleanup(struct bfa_itnim_s *itnim);
191 static void __bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete);
192 static void __bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete);
193 static void __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete);
194 static void bfa_itnim_iotov_online(struct bfa_itnim_s *itnim);
195 static void bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim);
196 static void bfa_itnim_iotov(void *itnim_arg);
197 static void bfa_itnim_iotov_start(struct bfa_itnim_s *itnim);
198 static void bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim);
199 static void bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim);
202 * forward declaration of ITNIM state machine
204 static void bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim,
205 enum bfa_itnim_event event);
206 static void bfa_itnim_sm_created(struct bfa_itnim_s *itnim,
207 enum bfa_itnim_event event);
208 static void bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim,
209 enum bfa_itnim_event event);
210 static void bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
211 enum bfa_itnim_event event);
212 static void bfa_itnim_sm_online(struct bfa_itnim_s *itnim,
213 enum bfa_itnim_event event);
214 static void bfa_itnim_sm_sler(struct bfa_itnim_s *itnim,
215 enum bfa_itnim_event event);
216 static void bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
217 enum bfa_itnim_event event);
218 static void bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
219 enum bfa_itnim_event event);
220 static void bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim,
221 enum bfa_itnim_event event);
222 static void bfa_itnim_sm_offline(struct bfa_itnim_s *itnim,
223 enum bfa_itnim_event event);
224 static void bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
225 enum bfa_itnim_event event);
226 static void bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim,
227 enum bfa_itnim_event event);
228 static void bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
229 enum bfa_itnim_event event);
230 static void bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
231 enum bfa_itnim_event event);
232 static void bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
233 enum bfa_itnim_event event);
236 * forward declaration for BFA IOIM functions
238 static bfa_boolean_t bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim);
239 static bfa_boolean_t bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim);
240 static bfa_boolean_t bfa_ioim_send_abort(struct bfa_ioim_s *ioim);
241 static void bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim);
242 static void __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete);
243 static void __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete);
244 static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete);
245 static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete);
246 static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete);
247 static bfa_boolean_t bfa_ioim_is_abortable(struct bfa_ioim_s *ioim);
248 static void __bfa_cb_ioim_lm_lun_not_sup(void *cbarg, bfa_boolean_t complete);
249 static void __bfa_cb_ioim_lm_rpl_dc(void *cbarg, bfa_boolean_t complete);
250 static void __bfa_cb_ioim_lm_lun_not_rdy(void *cbarg, bfa_boolean_t complete);
253 * forward declaration of BFA IO state machine
255 static void bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim,
256 enum bfa_ioim_event event);
257 static void bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim,
258 enum bfa_ioim_event event);
259 static void bfa_ioim_sm_active(struct bfa_ioim_s *ioim,
260 enum bfa_ioim_event event);
261 static void bfa_ioim_sm_abort(struct bfa_ioim_s *ioim,
262 enum bfa_ioim_event event);
263 static void bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim,
264 enum bfa_ioim_event event);
265 static void bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim,
266 enum bfa_ioim_event event);
267 static void bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim,
268 enum bfa_ioim_event event);
269 static void bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim,
270 enum bfa_ioim_event event);
271 static void bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim,
272 enum bfa_ioim_event event);
273 static void bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim,
274 enum bfa_ioim_event event);
275 static void bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim,
276 enum bfa_ioim_event event);
277 static void bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim,
278 enum bfa_ioim_event event);
280 * forward declaration for BFA TSKIM functions
282 static void __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete);
283 static void __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete);
284 static bfa_boolean_t bfa_tskim_match_scope(struct bfa_tskim_s *tskim,
285 struct scsi_lun lun);
286 static void bfa_tskim_gather_ios(struct bfa_tskim_s *tskim);
287 static void bfa_tskim_cleanp_comp(void *tskim_cbarg);
288 static void bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim);
289 static bfa_boolean_t bfa_tskim_send(struct bfa_tskim_s *tskim);
290 static bfa_boolean_t bfa_tskim_send_abort(struct bfa_tskim_s *tskim);
291 static void bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim);
294 * forward declaration of BFA TSKIM state machine
296 static void bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim,
297 enum bfa_tskim_event event);
298 static void bfa_tskim_sm_active(struct bfa_tskim_s *tskim,
299 enum bfa_tskim_event event);
300 static void bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim,
301 enum bfa_tskim_event event);
302 static void bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim,
303 enum bfa_tskim_event event);
304 static void bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim,
305 enum bfa_tskim_event event);
306 static void bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
307 enum bfa_tskim_event event);
308 static void bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim,
309 enum bfa_tskim_event event);
311 * BFA FCP Initiator Mode module
315 * Compute and return memory needed by FCP(im) module.
317 static void
318 bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len)
320 bfa_itnim_meminfo(cfg, km_len);
323 * IO memory
325 *km_len += cfg->fwcfg.num_ioim_reqs *
326 (sizeof(struct bfa_ioim_s) + sizeof(struct bfa_ioim_sp_s));
329 * task management command memory
331 if (cfg->fwcfg.num_tskim_reqs < BFA_TSKIM_MIN)
332 cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN;
333 *km_len += cfg->fwcfg.num_tskim_reqs * sizeof(struct bfa_tskim_s);
337 static void
338 bfa_fcpim_attach(struct bfa_fcp_mod_s *fcp, void *bfad,
339 struct bfa_iocfc_cfg_s *cfg, struct bfa_pcidev_s *pcidev)
341 struct bfa_fcpim_s *fcpim = &fcp->fcpim;
342 struct bfa_s *bfa = fcp->bfa;
344 bfa_trc(bfa, cfg->drvcfg.path_tov);
345 bfa_trc(bfa, cfg->fwcfg.num_rports);
346 bfa_trc(bfa, cfg->fwcfg.num_ioim_reqs);
347 bfa_trc(bfa, cfg->fwcfg.num_tskim_reqs);
349 fcpim->fcp = fcp;
350 fcpim->bfa = bfa;
351 fcpim->num_itnims = cfg->fwcfg.num_rports;
352 fcpim->num_tskim_reqs = cfg->fwcfg.num_tskim_reqs;
353 fcpim->path_tov = cfg->drvcfg.path_tov;
354 fcpim->delay_comp = cfg->drvcfg.delay_comp;
355 fcpim->profile_comp = NULL;
356 fcpim->profile_start = NULL;
358 bfa_itnim_attach(fcpim);
359 bfa_tskim_attach(fcpim);
360 bfa_ioim_attach(fcpim);
363 static void
364 bfa_fcpim_iocdisable(struct bfa_fcp_mod_s *fcp)
366 struct bfa_fcpim_s *fcpim = &fcp->fcpim;
367 struct bfa_itnim_s *itnim;
368 struct list_head *qe, *qen;
370 /* Enqueue unused ioim resources to free_q */
371 list_splice_tail_init(&fcpim->tskim_unused_q, &fcpim->tskim_free_q);
373 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
374 itnim = (struct bfa_itnim_s *) qe;
375 bfa_itnim_iocdisable(itnim);
379 void
380 bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov)
382 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
384 fcpim->path_tov = path_tov * 1000;
385 if (fcpim->path_tov > BFA_FCPIM_PATHTOV_MAX)
386 fcpim->path_tov = BFA_FCPIM_PATHTOV_MAX;
390 bfa_fcpim_path_tov_get(struct bfa_s *bfa)
392 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
394 return fcpim->path_tov / 1000;
397 #define bfa_fcpim_add_iostats(__l, __r, __stats) \
398 (__l->__stats += __r->__stats)
400 void
401 bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *lstats,
402 struct bfa_itnim_iostats_s *rstats)
404 bfa_fcpim_add_iostats(lstats, rstats, total_ios);
405 bfa_fcpim_add_iostats(lstats, rstats, qresumes);
406 bfa_fcpim_add_iostats(lstats, rstats, no_iotags);
407 bfa_fcpim_add_iostats(lstats, rstats, io_aborts);
408 bfa_fcpim_add_iostats(lstats, rstats, no_tskims);
409 bfa_fcpim_add_iostats(lstats, rstats, iocomp_ok);
410 bfa_fcpim_add_iostats(lstats, rstats, iocomp_underrun);
411 bfa_fcpim_add_iostats(lstats, rstats, iocomp_overrun);
412 bfa_fcpim_add_iostats(lstats, rstats, iocomp_aborted);
413 bfa_fcpim_add_iostats(lstats, rstats, iocomp_timedout);
414 bfa_fcpim_add_iostats(lstats, rstats, iocom_nexus_abort);
415 bfa_fcpim_add_iostats(lstats, rstats, iocom_proto_err);
416 bfa_fcpim_add_iostats(lstats, rstats, iocom_dif_err);
417 bfa_fcpim_add_iostats(lstats, rstats, iocom_sqer_needed);
418 bfa_fcpim_add_iostats(lstats, rstats, iocom_res_free);
419 bfa_fcpim_add_iostats(lstats, rstats, iocom_hostabrts);
420 bfa_fcpim_add_iostats(lstats, rstats, iocom_utags);
421 bfa_fcpim_add_iostats(lstats, rstats, io_cleanups);
422 bfa_fcpim_add_iostats(lstats, rstats, io_tmaborts);
423 bfa_fcpim_add_iostats(lstats, rstats, onlines);
424 bfa_fcpim_add_iostats(lstats, rstats, offlines);
425 bfa_fcpim_add_iostats(lstats, rstats, creates);
426 bfa_fcpim_add_iostats(lstats, rstats, deletes);
427 bfa_fcpim_add_iostats(lstats, rstats, create_comps);
428 bfa_fcpim_add_iostats(lstats, rstats, delete_comps);
429 bfa_fcpim_add_iostats(lstats, rstats, sler_events);
430 bfa_fcpim_add_iostats(lstats, rstats, fw_create);
431 bfa_fcpim_add_iostats(lstats, rstats, fw_delete);
432 bfa_fcpim_add_iostats(lstats, rstats, ioc_disabled);
433 bfa_fcpim_add_iostats(lstats, rstats, cleanup_comps);
434 bfa_fcpim_add_iostats(lstats, rstats, tm_cmnds);
435 bfa_fcpim_add_iostats(lstats, rstats, tm_fw_rsps);
436 bfa_fcpim_add_iostats(lstats, rstats, tm_success);
437 bfa_fcpim_add_iostats(lstats, rstats, tm_failures);
438 bfa_fcpim_add_iostats(lstats, rstats, tm_io_comps);
439 bfa_fcpim_add_iostats(lstats, rstats, tm_qresumes);
440 bfa_fcpim_add_iostats(lstats, rstats, tm_iocdowns);
441 bfa_fcpim_add_iostats(lstats, rstats, tm_cleanups);
442 bfa_fcpim_add_iostats(lstats, rstats, tm_cleanup_comps);
443 bfa_fcpim_add_iostats(lstats, rstats, io_comps);
444 bfa_fcpim_add_iostats(lstats, rstats, input_reqs);
445 bfa_fcpim_add_iostats(lstats, rstats, output_reqs);
446 bfa_fcpim_add_iostats(lstats, rstats, rd_throughput);
447 bfa_fcpim_add_iostats(lstats, rstats, wr_throughput);
448 bfa_fcpim_add_iostats(lstats, rstats, lm_lun_across_sg);
449 bfa_fcpim_add_iostats(lstats, rstats, lm_lun_not_sup);
450 bfa_fcpim_add_iostats(lstats, rstats, lm_rpl_data_changed);
451 bfa_fcpim_add_iostats(lstats, rstats, lm_wire_residue_changed);
452 bfa_fcpim_add_iostats(lstats, rstats, lm_small_buf_addresidue);
453 bfa_fcpim_add_iostats(lstats, rstats, lm_lun_not_rdy);
456 bfa_status_t
457 bfa_fcpim_port_iostats(struct bfa_s *bfa,
458 struct bfa_itnim_iostats_s *stats, u8 lp_tag)
460 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
461 struct list_head *qe, *qen;
462 struct bfa_itnim_s *itnim;
464 /* accumulate IO stats from itnim */
465 memset(stats, 0, sizeof(struct bfa_itnim_iostats_s));
466 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
467 itnim = (struct bfa_itnim_s *) qe;
468 if (itnim->rport->rport_info.lp_tag != lp_tag)
469 continue;
470 bfa_fcpim_add_stats(stats, &(itnim->stats));
472 return BFA_STATUS_OK;
475 void
476 bfa_ioim_profile_comp(struct bfa_ioim_s *ioim)
478 struct bfa_itnim_latency_s *io_lat =
479 &(ioim->itnim->ioprofile.io_latency);
480 u32 val, idx;
482 val = (u32)(jiffies - ioim->start_time);
483 idx = bfa_ioim_get_index(scsi_bufflen((struct scsi_cmnd *)ioim->dio));
484 bfa_itnim_ioprofile_update(ioim->itnim, idx);
486 io_lat->count[idx]++;
487 io_lat->min[idx] = (io_lat->min[idx] < val) ? io_lat->min[idx] : val;
488 io_lat->max[idx] = (io_lat->max[idx] > val) ? io_lat->max[idx] : val;
489 io_lat->avg[idx] += val;
492 void
493 bfa_ioim_profile_start(struct bfa_ioim_s *ioim)
495 ioim->start_time = jiffies;
498 bfa_status_t
499 bfa_fcpim_profile_on(struct bfa_s *bfa, u32 time)
501 struct bfa_itnim_s *itnim;
502 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
503 struct list_head *qe, *qen;
505 /* accumulate IO stats from itnim */
506 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
507 itnim = (struct bfa_itnim_s *) qe;
508 bfa_itnim_clear_stats(itnim);
510 fcpim->io_profile = BFA_TRUE;
511 fcpim->io_profile_start_time = time;
512 fcpim->profile_comp = bfa_ioim_profile_comp;
513 fcpim->profile_start = bfa_ioim_profile_start;
514 return BFA_STATUS_OK;
517 bfa_status_t
518 bfa_fcpim_profile_off(struct bfa_s *bfa)
520 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
521 fcpim->io_profile = BFA_FALSE;
522 fcpim->io_profile_start_time = 0;
523 fcpim->profile_comp = NULL;
524 fcpim->profile_start = NULL;
525 return BFA_STATUS_OK;
529 bfa_fcpim_qdepth_get(struct bfa_s *bfa)
531 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
533 return fcpim->q_depth;
537 * BFA ITNIM module state machine functions
541 * Beginning/unallocated state - no events expected.
543 static void
544 bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
546 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
547 bfa_trc(itnim->bfa, event);
549 switch (event) {
550 case BFA_ITNIM_SM_CREATE:
551 bfa_sm_set_state(itnim, bfa_itnim_sm_created);
552 itnim->is_online = BFA_FALSE;
553 bfa_fcpim_additn(itnim);
554 break;
556 default:
557 bfa_sm_fault(itnim->bfa, event);
562 * Beginning state, only online event expected.
564 static void
565 bfa_itnim_sm_created(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
567 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
568 bfa_trc(itnim->bfa, event);
570 switch (event) {
571 case BFA_ITNIM_SM_ONLINE:
572 if (bfa_itnim_send_fwcreate(itnim))
573 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
574 else
575 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
576 break;
578 case BFA_ITNIM_SM_DELETE:
579 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
580 bfa_fcpim_delitn(itnim);
581 break;
583 case BFA_ITNIM_SM_HWFAIL:
584 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
585 break;
587 default:
588 bfa_sm_fault(itnim->bfa, event);
593 * Waiting for itnim create response from firmware.
595 static void
596 bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
598 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
599 bfa_trc(itnim->bfa, event);
601 switch (event) {
602 case BFA_ITNIM_SM_FWRSP:
603 bfa_sm_set_state(itnim, bfa_itnim_sm_online);
604 itnim->is_online = BFA_TRUE;
605 bfa_itnim_iotov_online(itnim);
606 bfa_itnim_online_cb(itnim);
607 break;
609 case BFA_ITNIM_SM_DELETE:
610 bfa_sm_set_state(itnim, bfa_itnim_sm_delete_pending);
611 break;
613 case BFA_ITNIM_SM_OFFLINE:
614 if (bfa_itnim_send_fwdelete(itnim))
615 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
616 else
617 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
618 break;
620 case BFA_ITNIM_SM_HWFAIL:
621 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
622 break;
624 default:
625 bfa_sm_fault(itnim->bfa, event);
629 static void
630 bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
631 enum bfa_itnim_event event)
633 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
634 bfa_trc(itnim->bfa, event);
636 switch (event) {
637 case BFA_ITNIM_SM_QRESUME:
638 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
639 bfa_itnim_send_fwcreate(itnim);
640 break;
642 case BFA_ITNIM_SM_DELETE:
643 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
644 bfa_reqq_wcancel(&itnim->reqq_wait);
645 bfa_fcpim_delitn(itnim);
646 break;
648 case BFA_ITNIM_SM_OFFLINE:
649 bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
650 bfa_reqq_wcancel(&itnim->reqq_wait);
651 bfa_itnim_offline_cb(itnim);
652 break;
654 case BFA_ITNIM_SM_HWFAIL:
655 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
656 bfa_reqq_wcancel(&itnim->reqq_wait);
657 break;
659 default:
660 bfa_sm_fault(itnim->bfa, event);
665 * Waiting for itnim create response from firmware, a delete is pending.
667 static void
668 bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
669 enum bfa_itnim_event event)
671 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
672 bfa_trc(itnim->bfa, event);
674 switch (event) {
675 case BFA_ITNIM_SM_FWRSP:
676 if (bfa_itnim_send_fwdelete(itnim))
677 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
678 else
679 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
680 break;
682 case BFA_ITNIM_SM_HWFAIL:
683 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
684 bfa_fcpim_delitn(itnim);
685 break;
687 default:
688 bfa_sm_fault(itnim->bfa, event);
693 * Online state - normal parking state.
695 static void
696 bfa_itnim_sm_online(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
698 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
699 bfa_trc(itnim->bfa, event);
701 switch (event) {
702 case BFA_ITNIM_SM_OFFLINE:
703 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
704 itnim->is_online = BFA_FALSE;
705 bfa_itnim_iotov_start(itnim);
706 bfa_itnim_cleanup(itnim);
707 break;
709 case BFA_ITNIM_SM_DELETE:
710 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
711 itnim->is_online = BFA_FALSE;
712 bfa_itnim_cleanup(itnim);
713 break;
715 case BFA_ITNIM_SM_SLER:
716 bfa_sm_set_state(itnim, bfa_itnim_sm_sler);
717 itnim->is_online = BFA_FALSE;
718 bfa_itnim_iotov_start(itnim);
719 bfa_itnim_sler_cb(itnim);
720 break;
722 case BFA_ITNIM_SM_HWFAIL:
723 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
724 itnim->is_online = BFA_FALSE;
725 bfa_itnim_iotov_start(itnim);
726 bfa_itnim_iocdisable_cleanup(itnim);
727 break;
729 default:
730 bfa_sm_fault(itnim->bfa, event);
735 * Second level error recovery need.
737 static void
738 bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
740 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
741 bfa_trc(itnim->bfa, event);
743 switch (event) {
744 case BFA_ITNIM_SM_OFFLINE:
745 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
746 bfa_itnim_cleanup(itnim);
747 break;
749 case BFA_ITNIM_SM_DELETE:
750 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
751 bfa_itnim_cleanup(itnim);
752 bfa_itnim_iotov_delete(itnim);
753 break;
755 case BFA_ITNIM_SM_HWFAIL:
756 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
757 bfa_itnim_iocdisable_cleanup(itnim);
758 break;
760 default:
761 bfa_sm_fault(itnim->bfa, event);
766 * Going offline. Waiting for active IO cleanup.
768 static void
769 bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
770 enum bfa_itnim_event event)
772 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
773 bfa_trc(itnim->bfa, event);
775 switch (event) {
776 case BFA_ITNIM_SM_CLEANUP:
777 if (bfa_itnim_send_fwdelete(itnim))
778 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
779 else
780 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
781 break;
783 case BFA_ITNIM_SM_DELETE:
784 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
785 bfa_itnim_iotov_delete(itnim);
786 break;
788 case BFA_ITNIM_SM_HWFAIL:
789 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
790 bfa_itnim_iocdisable_cleanup(itnim);
791 bfa_itnim_offline_cb(itnim);
792 break;
794 case BFA_ITNIM_SM_SLER:
795 break;
797 default:
798 bfa_sm_fault(itnim->bfa, event);
803 * Deleting itnim. Waiting for active IO cleanup.
805 static void
806 bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
807 enum bfa_itnim_event event)
809 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
810 bfa_trc(itnim->bfa, event);
812 switch (event) {
813 case BFA_ITNIM_SM_CLEANUP:
814 if (bfa_itnim_send_fwdelete(itnim))
815 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
816 else
817 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
818 break;
820 case BFA_ITNIM_SM_HWFAIL:
821 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
822 bfa_itnim_iocdisable_cleanup(itnim);
823 break;
825 default:
826 bfa_sm_fault(itnim->bfa, event);
831 * Rport offline. Fimrware itnim is being deleted - awaiting f/w response.
833 static void
834 bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
836 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
837 bfa_trc(itnim->bfa, event);
839 switch (event) {
840 case BFA_ITNIM_SM_FWRSP:
841 bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
842 bfa_itnim_offline_cb(itnim);
843 break;
845 case BFA_ITNIM_SM_DELETE:
846 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
847 break;
849 case BFA_ITNIM_SM_HWFAIL:
850 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
851 bfa_itnim_offline_cb(itnim);
852 break;
854 default:
855 bfa_sm_fault(itnim->bfa, event);
859 static void
860 bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
861 enum bfa_itnim_event event)
863 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
864 bfa_trc(itnim->bfa, event);
866 switch (event) {
867 case BFA_ITNIM_SM_QRESUME:
868 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
869 bfa_itnim_send_fwdelete(itnim);
870 break;
872 case BFA_ITNIM_SM_DELETE:
873 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
874 break;
876 case BFA_ITNIM_SM_HWFAIL:
877 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
878 bfa_reqq_wcancel(&itnim->reqq_wait);
879 bfa_itnim_offline_cb(itnim);
880 break;
882 default:
883 bfa_sm_fault(itnim->bfa, event);
888 * Offline state.
890 static void
891 bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
893 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
894 bfa_trc(itnim->bfa, event);
896 switch (event) {
897 case BFA_ITNIM_SM_DELETE:
898 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
899 bfa_itnim_iotov_delete(itnim);
900 bfa_fcpim_delitn(itnim);
901 break;
903 case BFA_ITNIM_SM_ONLINE:
904 if (bfa_itnim_send_fwcreate(itnim))
905 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
906 else
907 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
908 break;
910 case BFA_ITNIM_SM_HWFAIL:
911 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
912 break;
914 default:
915 bfa_sm_fault(itnim->bfa, event);
919 static void
920 bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
921 enum bfa_itnim_event event)
923 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
924 bfa_trc(itnim->bfa, event);
926 switch (event) {
927 case BFA_ITNIM_SM_DELETE:
928 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
929 bfa_itnim_iotov_delete(itnim);
930 bfa_fcpim_delitn(itnim);
931 break;
933 case BFA_ITNIM_SM_OFFLINE:
934 bfa_itnim_offline_cb(itnim);
935 break;
937 case BFA_ITNIM_SM_ONLINE:
938 if (bfa_itnim_send_fwcreate(itnim))
939 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
940 else
941 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
942 break;
944 case BFA_ITNIM_SM_HWFAIL:
945 break;
947 default:
948 bfa_sm_fault(itnim->bfa, event);
953 * Itnim is deleted, waiting for firmware response to delete.
955 static void
956 bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
958 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
959 bfa_trc(itnim->bfa, event);
961 switch (event) {
962 case BFA_ITNIM_SM_FWRSP:
963 case BFA_ITNIM_SM_HWFAIL:
964 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
965 bfa_fcpim_delitn(itnim);
966 break;
968 default:
969 bfa_sm_fault(itnim->bfa, event);
973 static void
974 bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
975 enum bfa_itnim_event event)
977 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
978 bfa_trc(itnim->bfa, event);
980 switch (event) {
981 case BFA_ITNIM_SM_QRESUME:
982 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
983 bfa_itnim_send_fwdelete(itnim);
984 break;
986 case BFA_ITNIM_SM_HWFAIL:
987 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
988 bfa_reqq_wcancel(&itnim->reqq_wait);
989 bfa_fcpim_delitn(itnim);
990 break;
992 default:
993 bfa_sm_fault(itnim->bfa, event);
998 * Initiate cleanup of all IOs on an IOC failure.
1000 static void
1001 bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim)
1003 struct bfa_tskim_s *tskim;
1004 struct bfa_ioim_s *ioim;
1005 struct list_head *qe, *qen;
1007 list_for_each_safe(qe, qen, &itnim->tsk_q) {
1008 tskim = (struct bfa_tskim_s *) qe;
1009 bfa_tskim_iocdisable(tskim);
1012 list_for_each_safe(qe, qen, &itnim->io_q) {
1013 ioim = (struct bfa_ioim_s *) qe;
1014 bfa_ioim_iocdisable(ioim);
1018 * For IO request in pending queue, we pretend an early timeout.
1020 list_for_each_safe(qe, qen, &itnim->pending_q) {
1021 ioim = (struct bfa_ioim_s *) qe;
1022 bfa_ioim_tov(ioim);
1025 list_for_each_safe(qe, qen, &itnim->io_cleanup_q) {
1026 ioim = (struct bfa_ioim_s *) qe;
1027 bfa_ioim_iocdisable(ioim);
1032 * IO cleanup completion
1034 static void
1035 bfa_itnim_cleanp_comp(void *itnim_cbarg)
1037 struct bfa_itnim_s *itnim = itnim_cbarg;
1039 bfa_stats(itnim, cleanup_comps);
1040 bfa_sm_send_event(itnim, BFA_ITNIM_SM_CLEANUP);
1044 * Initiate cleanup of all IOs.
1046 static void
1047 bfa_itnim_cleanup(struct bfa_itnim_s *itnim)
1049 struct bfa_ioim_s *ioim;
1050 struct bfa_tskim_s *tskim;
1051 struct list_head *qe, *qen;
1053 bfa_wc_init(&itnim->wc, bfa_itnim_cleanp_comp, itnim);
1055 list_for_each_safe(qe, qen, &itnim->io_q) {
1056 ioim = (struct bfa_ioim_s *) qe;
1059 * Move IO to a cleanup queue from active queue so that a later
1060 * TM will not pickup this IO.
1062 list_del(&ioim->qe);
1063 list_add_tail(&ioim->qe, &itnim->io_cleanup_q);
1065 bfa_wc_up(&itnim->wc);
1066 bfa_ioim_cleanup(ioim);
1069 list_for_each_safe(qe, qen, &itnim->tsk_q) {
1070 tskim = (struct bfa_tskim_s *) qe;
1071 bfa_wc_up(&itnim->wc);
1072 bfa_tskim_cleanup(tskim);
1075 bfa_wc_wait(&itnim->wc);
1078 static void
1079 __bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete)
1081 struct bfa_itnim_s *itnim = cbarg;
1083 if (complete)
1084 bfa_cb_itnim_online(itnim->ditn);
1087 static void
1088 __bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete)
1090 struct bfa_itnim_s *itnim = cbarg;
1092 if (complete)
1093 bfa_cb_itnim_offline(itnim->ditn);
1096 static void
1097 __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete)
1099 struct bfa_itnim_s *itnim = cbarg;
1101 if (complete)
1102 bfa_cb_itnim_sler(itnim->ditn);
1106 * Call to resume any I/O requests waiting for room in request queue.
1108 static void
1109 bfa_itnim_qresume(void *cbarg)
1111 struct bfa_itnim_s *itnim = cbarg;
1113 bfa_sm_send_event(itnim, BFA_ITNIM_SM_QRESUME);
1117 * bfa_itnim_public
1120 void
1121 bfa_itnim_iodone(struct bfa_itnim_s *itnim)
1123 bfa_wc_down(&itnim->wc);
1126 void
1127 bfa_itnim_tskdone(struct bfa_itnim_s *itnim)
1129 bfa_wc_down(&itnim->wc);
1132 void
1133 bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len)
1136 * ITN memory
1138 *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itnim_s);
1141 void
1142 bfa_itnim_attach(struct bfa_fcpim_s *fcpim)
1144 struct bfa_s *bfa = fcpim->bfa;
1145 struct bfa_fcp_mod_s *fcp = fcpim->fcp;
1146 struct bfa_itnim_s *itnim;
1147 int i, j;
1149 INIT_LIST_HEAD(&fcpim->itnim_q);
1151 itnim = (struct bfa_itnim_s *) bfa_mem_kva_curp(fcp);
1152 fcpim->itnim_arr = itnim;
1154 for (i = 0; i < fcpim->num_itnims; i++, itnim++) {
1155 memset(itnim, 0, sizeof(struct bfa_itnim_s));
1156 itnim->bfa = bfa;
1157 itnim->fcpim = fcpim;
1158 itnim->reqq = BFA_REQQ_QOS_LO;
1159 itnim->rport = BFA_RPORT_FROM_TAG(bfa, i);
1160 itnim->iotov_active = BFA_FALSE;
1161 bfa_reqq_winit(&itnim->reqq_wait, bfa_itnim_qresume, itnim);
1163 INIT_LIST_HEAD(&itnim->io_q);
1164 INIT_LIST_HEAD(&itnim->io_cleanup_q);
1165 INIT_LIST_HEAD(&itnim->pending_q);
1166 INIT_LIST_HEAD(&itnim->tsk_q);
1167 INIT_LIST_HEAD(&itnim->delay_comp_q);
1168 for (j = 0; j < BFA_IOBUCKET_MAX; j++)
1169 itnim->ioprofile.io_latency.min[j] = ~0;
1170 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
1173 bfa_mem_kva_curp(fcp) = (u8 *) itnim;
1176 void
1177 bfa_itnim_iocdisable(struct bfa_itnim_s *itnim)
1179 bfa_stats(itnim, ioc_disabled);
1180 bfa_sm_send_event(itnim, BFA_ITNIM_SM_HWFAIL);
1183 static bfa_boolean_t
1184 bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim)
1186 struct bfi_itn_create_req_s *m;
1188 itnim->msg_no++;
1191 * check for room in queue to send request now
1193 m = bfa_reqq_next(itnim->bfa, itnim->reqq);
1194 if (!m) {
1195 bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
1196 return BFA_FALSE;
1199 bfi_h2i_set(m->mh, BFI_MC_ITN, BFI_ITN_H2I_CREATE_REQ,
1200 bfa_fn_lpu(itnim->bfa));
1201 m->fw_handle = itnim->rport->fw_handle;
1202 m->class = FC_CLASS_3;
1203 m->seq_rec = itnim->seq_rec;
1204 m->msg_no = itnim->msg_no;
1205 bfa_stats(itnim, fw_create);
1208 * queue I/O message to firmware
1210 bfa_reqq_produce(itnim->bfa, itnim->reqq, m->mh);
1211 return BFA_TRUE;
1214 static bfa_boolean_t
1215 bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim)
1217 struct bfi_itn_delete_req_s *m;
1220 * check for room in queue to send request now
1222 m = bfa_reqq_next(itnim->bfa, itnim->reqq);
1223 if (!m) {
1224 bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
1225 return BFA_FALSE;
1228 bfi_h2i_set(m->mh, BFI_MC_ITN, BFI_ITN_H2I_DELETE_REQ,
1229 bfa_fn_lpu(itnim->bfa));
1230 m->fw_handle = itnim->rport->fw_handle;
1231 bfa_stats(itnim, fw_delete);
1234 * queue I/O message to firmware
1236 bfa_reqq_produce(itnim->bfa, itnim->reqq, m->mh);
1237 return BFA_TRUE;
1241 * Cleanup all pending failed inflight requests.
1243 static void
1244 bfa_itnim_delayed_comp(struct bfa_itnim_s *itnim, bfa_boolean_t iotov)
1246 struct bfa_ioim_s *ioim;
1247 struct list_head *qe, *qen;
1249 list_for_each_safe(qe, qen, &itnim->delay_comp_q) {
1250 ioim = (struct bfa_ioim_s *)qe;
1251 bfa_ioim_delayed_comp(ioim, iotov);
1256 * Start all pending IO requests.
1258 static void
1259 bfa_itnim_iotov_online(struct bfa_itnim_s *itnim)
1261 struct bfa_ioim_s *ioim;
1263 bfa_itnim_iotov_stop(itnim);
1266 * Abort all inflight IO requests in the queue
1268 bfa_itnim_delayed_comp(itnim, BFA_FALSE);
1271 * Start all pending IO requests.
1273 while (!list_empty(&itnim->pending_q)) {
1274 bfa_q_deq(&itnim->pending_q, &ioim);
1275 list_add_tail(&ioim->qe, &itnim->io_q);
1276 bfa_ioim_start(ioim);
1281 * Fail all pending IO requests
1283 static void
1284 bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim)
1286 struct bfa_ioim_s *ioim;
1289 * Fail all inflight IO requests in the queue
1291 bfa_itnim_delayed_comp(itnim, BFA_TRUE);
1294 * Fail any pending IO requests.
1296 while (!list_empty(&itnim->pending_q)) {
1297 bfa_q_deq(&itnim->pending_q, &ioim);
1298 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
1299 bfa_ioim_tov(ioim);
1304 * IO TOV timer callback. Fail any pending IO requests.
1306 static void
1307 bfa_itnim_iotov(void *itnim_arg)
1309 struct bfa_itnim_s *itnim = itnim_arg;
1311 itnim->iotov_active = BFA_FALSE;
1313 bfa_cb_itnim_tov_begin(itnim->ditn);
1314 bfa_itnim_iotov_cleanup(itnim);
1315 bfa_cb_itnim_tov(itnim->ditn);
1319 * Start IO TOV timer for failing back pending IO requests in offline state.
1321 static void
1322 bfa_itnim_iotov_start(struct bfa_itnim_s *itnim)
1324 if (itnim->fcpim->path_tov > 0) {
1326 itnim->iotov_active = BFA_TRUE;
1327 WARN_ON(!bfa_itnim_hold_io(itnim));
1328 bfa_timer_start(itnim->bfa, &itnim->timer,
1329 bfa_itnim_iotov, itnim, itnim->fcpim->path_tov);
1334 * Stop IO TOV timer.
1336 static void
1337 bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim)
1339 if (itnim->iotov_active) {
1340 itnim->iotov_active = BFA_FALSE;
1341 bfa_timer_stop(&itnim->timer);
1346 * Stop IO TOV timer.
1348 static void
1349 bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim)
1351 bfa_boolean_t pathtov_active = BFA_FALSE;
1353 if (itnim->iotov_active)
1354 pathtov_active = BFA_TRUE;
1356 bfa_itnim_iotov_stop(itnim);
1357 if (pathtov_active)
1358 bfa_cb_itnim_tov_begin(itnim->ditn);
1359 bfa_itnim_iotov_cleanup(itnim);
1360 if (pathtov_active)
1361 bfa_cb_itnim_tov(itnim->ditn);
1364 static void
1365 bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim)
1367 struct bfa_fcpim_s *fcpim = BFA_FCPIM(itnim->bfa);
1368 fcpim->del_itn_stats.del_itn_iocomp_aborted +=
1369 itnim->stats.iocomp_aborted;
1370 fcpim->del_itn_stats.del_itn_iocomp_timedout +=
1371 itnim->stats.iocomp_timedout;
1372 fcpim->del_itn_stats.del_itn_iocom_sqer_needed +=
1373 itnim->stats.iocom_sqer_needed;
1374 fcpim->del_itn_stats.del_itn_iocom_res_free +=
1375 itnim->stats.iocom_res_free;
1376 fcpim->del_itn_stats.del_itn_iocom_hostabrts +=
1377 itnim->stats.iocom_hostabrts;
1378 fcpim->del_itn_stats.del_itn_total_ios += itnim->stats.total_ios;
1379 fcpim->del_itn_stats.del_io_iocdowns += itnim->stats.io_iocdowns;
1380 fcpim->del_itn_stats.del_tm_iocdowns += itnim->stats.tm_iocdowns;
1384 * bfa_itnim_public
1388 * Itnim interrupt processing.
1390 void
1391 bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
1393 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
1394 union bfi_itn_i2h_msg_u msg;
1395 struct bfa_itnim_s *itnim;
1397 bfa_trc(bfa, m->mhdr.msg_id);
1399 msg.msg = m;
1401 switch (m->mhdr.msg_id) {
1402 case BFI_ITN_I2H_CREATE_RSP:
1403 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1404 msg.create_rsp->bfa_handle);
1405 WARN_ON(msg.create_rsp->status != BFA_STATUS_OK);
1406 bfa_stats(itnim, create_comps);
1407 bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
1408 break;
1410 case BFI_ITN_I2H_DELETE_RSP:
1411 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1412 msg.delete_rsp->bfa_handle);
1413 WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK);
1414 bfa_stats(itnim, delete_comps);
1415 bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
1416 break;
1418 case BFI_ITN_I2H_SLER_EVENT:
1419 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1420 msg.sler_event->bfa_handle);
1421 bfa_stats(itnim, sler_events);
1422 bfa_sm_send_event(itnim, BFA_ITNIM_SM_SLER);
1423 break;
1425 default:
1426 bfa_trc(bfa, m->mhdr.msg_id);
1427 WARN_ON(1);
1432 * bfa_itnim_api
1435 struct bfa_itnim_s *
1436 bfa_itnim_create(struct bfa_s *bfa, struct bfa_rport_s *rport, void *ditn)
1438 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
1439 struct bfa_itnim_s *itnim;
1441 bfa_itn_create(bfa, rport, bfa_itnim_isr);
1443 itnim = BFA_ITNIM_FROM_TAG(fcpim, rport->rport_tag);
1444 WARN_ON(itnim->rport != rport);
1446 itnim->ditn = ditn;
1448 bfa_stats(itnim, creates);
1449 bfa_sm_send_event(itnim, BFA_ITNIM_SM_CREATE);
1451 return itnim;
1454 void
1455 bfa_itnim_delete(struct bfa_itnim_s *itnim)
1457 bfa_stats(itnim, deletes);
1458 bfa_sm_send_event(itnim, BFA_ITNIM_SM_DELETE);
1461 void
1462 bfa_itnim_online(struct bfa_itnim_s *itnim, bfa_boolean_t seq_rec)
1464 itnim->seq_rec = seq_rec;
1465 bfa_stats(itnim, onlines);
1466 bfa_sm_send_event(itnim, BFA_ITNIM_SM_ONLINE);
1469 void
1470 bfa_itnim_offline(struct bfa_itnim_s *itnim)
1472 bfa_stats(itnim, offlines);
1473 bfa_sm_send_event(itnim, BFA_ITNIM_SM_OFFLINE);
1477 * Return true if itnim is considered offline for holding off IO request.
1478 * IO is not held if itnim is being deleted.
1480 bfa_boolean_t
1481 bfa_itnim_hold_io(struct bfa_itnim_s *itnim)
1483 return itnim->fcpim->path_tov && itnim->iotov_active &&
1484 (bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwcreate) ||
1485 bfa_sm_cmp_state(itnim, bfa_itnim_sm_sler) ||
1486 bfa_sm_cmp_state(itnim, bfa_itnim_sm_cleanup_offline) ||
1487 bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwdelete) ||
1488 bfa_sm_cmp_state(itnim, bfa_itnim_sm_offline) ||
1489 bfa_sm_cmp_state(itnim, bfa_itnim_sm_iocdisable));
1492 #define bfa_io_lat_clock_res_div HZ
1493 #define bfa_io_lat_clock_res_mul 1000
1494 bfa_status_t
1495 bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim,
1496 struct bfa_itnim_ioprofile_s *ioprofile)
1498 struct bfa_fcpim_s *fcpim = BFA_FCPIM(itnim->bfa);
1499 if (!fcpim->io_profile)
1500 return BFA_STATUS_IOPROFILE_OFF;
1502 itnim->ioprofile.index = BFA_IOBUCKET_MAX;
1503 itnim->ioprofile.io_profile_start_time =
1504 bfa_io_profile_start_time(itnim->bfa);
1505 itnim->ioprofile.clock_res_mul = bfa_io_lat_clock_res_mul;
1506 itnim->ioprofile.clock_res_div = bfa_io_lat_clock_res_div;
1507 *ioprofile = itnim->ioprofile;
1509 return BFA_STATUS_OK;
1512 void
1513 bfa_itnim_clear_stats(struct bfa_itnim_s *itnim)
1515 int j;
1516 memset(&itnim->stats, 0, sizeof(itnim->stats));
1517 memset(&itnim->ioprofile, 0, sizeof(itnim->ioprofile));
1518 for (j = 0; j < BFA_IOBUCKET_MAX; j++)
1519 itnim->ioprofile.io_latency.min[j] = ~0;
1523 * BFA IO module state machine functions
1527 * IO is not started (unallocated).
1529 static void
1530 bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1532 switch (event) {
1533 case BFA_IOIM_SM_START:
1534 if (!bfa_itnim_is_online(ioim->itnim)) {
1535 if (!bfa_itnim_hold_io(ioim->itnim)) {
1536 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1537 list_del(&ioim->qe);
1538 list_add_tail(&ioim->qe,
1539 &ioim->fcpim->ioim_comp_q);
1540 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1541 __bfa_cb_ioim_pathtov, ioim);
1542 } else {
1543 list_del(&ioim->qe);
1544 list_add_tail(&ioim->qe,
1545 &ioim->itnim->pending_q);
1547 break;
1550 if (ioim->nsges > BFI_SGE_INLINE) {
1551 if (!bfa_ioim_sgpg_alloc(ioim)) {
1552 bfa_sm_set_state(ioim, bfa_ioim_sm_sgalloc);
1553 return;
1557 if (!bfa_ioim_send_ioreq(ioim)) {
1558 bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1559 break;
1562 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1563 break;
1565 case BFA_IOIM_SM_IOTOV:
1566 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1567 bfa_ioim_move_to_comp_q(ioim);
1568 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1569 __bfa_cb_ioim_pathtov, ioim);
1570 break;
1572 case BFA_IOIM_SM_ABORT:
1574 * IO in pending queue can get abort requests. Complete abort
1575 * requests immediately.
1577 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1578 WARN_ON(!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim));
1579 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1580 __bfa_cb_ioim_abort, ioim);
1581 break;
1583 case BFA_IOIM_SM_LM_LUN_NOT_SUP:
1584 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1585 bfa_ioim_move_to_comp_q(ioim);
1586 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1587 __bfa_cb_ioim_lm_lun_not_sup, ioim);
1588 break;
1590 case BFA_IOIM_SM_LM_RPL_DC:
1591 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1592 bfa_ioim_move_to_comp_q(ioim);
1593 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1594 __bfa_cb_ioim_lm_rpl_dc, ioim);
1595 break;
1597 case BFA_IOIM_SM_LM_LUN_NOT_RDY:
1598 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1599 bfa_ioim_move_to_comp_q(ioim);
1600 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1601 __bfa_cb_ioim_lm_lun_not_rdy, ioim);
1602 break;
1604 default:
1605 bfa_sm_fault(ioim->bfa, event);
1610 * IO is waiting for SG pages.
1612 static void
1613 bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1615 bfa_trc(ioim->bfa, ioim->iotag);
1616 bfa_trc(ioim->bfa, event);
1618 switch (event) {
1619 case BFA_IOIM_SM_SGALLOCED:
1620 if (!bfa_ioim_send_ioreq(ioim)) {
1621 bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1622 break;
1624 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1625 break;
1627 case BFA_IOIM_SM_CLEANUP:
1628 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1629 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1630 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1631 ioim);
1632 bfa_ioim_notify_cleanup(ioim);
1633 break;
1635 case BFA_IOIM_SM_ABORT:
1636 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1637 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1638 bfa_ioim_move_to_comp_q(ioim);
1639 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1640 ioim);
1641 break;
1643 case BFA_IOIM_SM_HWFAIL:
1644 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1645 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1646 bfa_ioim_move_to_comp_q(ioim);
1647 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1648 ioim);
1649 break;
1651 default:
1652 bfa_sm_fault(ioim->bfa, event);
1657 * IO is active.
1659 static void
1660 bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1662 switch (event) {
1663 case BFA_IOIM_SM_COMP_GOOD:
1664 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1665 bfa_ioim_move_to_comp_q(ioim);
1666 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1667 __bfa_cb_ioim_good_comp, ioim);
1668 break;
1670 case BFA_IOIM_SM_COMP:
1671 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1672 bfa_ioim_move_to_comp_q(ioim);
1673 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
1674 ioim);
1675 break;
1677 case BFA_IOIM_SM_DONE:
1678 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1679 bfa_ioim_move_to_comp_q(ioim);
1680 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
1681 ioim);
1682 break;
1684 case BFA_IOIM_SM_ABORT:
1685 ioim->iosp->abort_explicit = BFA_TRUE;
1686 ioim->io_cbfn = __bfa_cb_ioim_abort;
1688 if (bfa_ioim_send_abort(ioim))
1689 bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
1690 else {
1691 bfa_sm_set_state(ioim, bfa_ioim_sm_abort_qfull);
1692 bfa_stats(ioim->itnim, qwait);
1693 bfa_reqq_wait(ioim->bfa, ioim->reqq,
1694 &ioim->iosp->reqq_wait);
1696 break;
1698 case BFA_IOIM_SM_CLEANUP:
1699 ioim->iosp->abort_explicit = BFA_FALSE;
1700 ioim->io_cbfn = __bfa_cb_ioim_failed;
1702 if (bfa_ioim_send_abort(ioim))
1703 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1704 else {
1705 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1706 bfa_stats(ioim->itnim, qwait);
1707 bfa_reqq_wait(ioim->bfa, ioim->reqq,
1708 &ioim->iosp->reqq_wait);
1710 break;
1712 case BFA_IOIM_SM_HWFAIL:
1713 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1714 bfa_ioim_move_to_comp_q(ioim);
1715 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1716 ioim);
1717 break;
1719 case BFA_IOIM_SM_SQRETRY:
1720 if (bfa_ioim_maxretry_reached(ioim)) {
1721 /* max retry reached, free IO */
1722 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1723 bfa_ioim_move_to_comp_q(ioim);
1724 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1725 __bfa_cb_ioim_failed, ioim);
1726 break;
1728 /* waiting for IO tag resource free */
1729 bfa_sm_set_state(ioim, bfa_ioim_sm_cmnd_retry);
1730 break;
1732 default:
1733 bfa_sm_fault(ioim->bfa, event);
1738 * IO is retried with new tag.
1740 static void
1741 bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1743 switch (event) {
1744 case BFA_IOIM_SM_FREE:
1745 /* abts and rrq done. Now retry the IO with new tag */
1746 bfa_ioim_update_iotag(ioim);
1747 if (!bfa_ioim_send_ioreq(ioim)) {
1748 bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1749 break;
1751 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1752 break;
1754 case BFA_IOIM_SM_CLEANUP:
1755 ioim->iosp->abort_explicit = BFA_FALSE;
1756 ioim->io_cbfn = __bfa_cb_ioim_failed;
1758 if (bfa_ioim_send_abort(ioim))
1759 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1760 else {
1761 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1762 bfa_stats(ioim->itnim, qwait);
1763 bfa_reqq_wait(ioim->bfa, ioim->reqq,
1764 &ioim->iosp->reqq_wait);
1766 break;
1768 case BFA_IOIM_SM_HWFAIL:
1769 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1770 bfa_ioim_move_to_comp_q(ioim);
1771 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1772 __bfa_cb_ioim_failed, ioim);
1773 break;
1775 case BFA_IOIM_SM_ABORT:
1776 /* in this state IO abort is done.
1777 * Waiting for IO tag resource free.
1779 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1780 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1781 ioim);
1782 break;
1784 default:
1785 bfa_sm_fault(ioim->bfa, event);
1790 * IO is being aborted, waiting for completion from firmware.
1792 static void
1793 bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1795 bfa_trc(ioim->bfa, ioim->iotag);
1796 bfa_trc(ioim->bfa, event);
1798 switch (event) {
1799 case BFA_IOIM_SM_COMP_GOOD:
1800 case BFA_IOIM_SM_COMP:
1801 case BFA_IOIM_SM_DONE:
1802 case BFA_IOIM_SM_FREE:
1803 break;
1805 case BFA_IOIM_SM_ABORT_DONE:
1806 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1807 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1808 ioim);
1809 break;
1811 case BFA_IOIM_SM_ABORT_COMP:
1812 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1813 bfa_ioim_move_to_comp_q(ioim);
1814 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1815 ioim);
1816 break;
1818 case BFA_IOIM_SM_COMP_UTAG:
1819 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1820 bfa_ioim_move_to_comp_q(ioim);
1821 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1822 ioim);
1823 break;
1825 case BFA_IOIM_SM_CLEANUP:
1826 WARN_ON(ioim->iosp->abort_explicit != BFA_TRUE);
1827 ioim->iosp->abort_explicit = BFA_FALSE;
1829 if (bfa_ioim_send_abort(ioim))
1830 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1831 else {
1832 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1833 bfa_stats(ioim->itnim, qwait);
1834 bfa_reqq_wait(ioim->bfa, ioim->reqq,
1835 &ioim->iosp->reqq_wait);
1837 break;
1839 case BFA_IOIM_SM_HWFAIL:
1840 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1841 bfa_ioim_move_to_comp_q(ioim);
1842 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1843 ioim);
1844 break;
1846 default:
1847 bfa_sm_fault(ioim->bfa, event);
1852 * IO is being cleaned up (implicit abort), waiting for completion from
1853 * firmware.
1855 static void
1856 bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1858 bfa_trc(ioim->bfa, ioim->iotag);
1859 bfa_trc(ioim->bfa, event);
1861 switch (event) {
1862 case BFA_IOIM_SM_COMP_GOOD:
1863 case BFA_IOIM_SM_COMP:
1864 case BFA_IOIM_SM_DONE:
1865 case BFA_IOIM_SM_FREE:
1866 break;
1868 case BFA_IOIM_SM_ABORT:
1870 * IO is already being aborted implicitly
1872 ioim->io_cbfn = __bfa_cb_ioim_abort;
1873 break;
1875 case BFA_IOIM_SM_ABORT_DONE:
1876 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1877 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1878 bfa_ioim_notify_cleanup(ioim);
1879 break;
1881 case BFA_IOIM_SM_ABORT_COMP:
1882 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1883 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1884 bfa_ioim_notify_cleanup(ioim);
1885 break;
1887 case BFA_IOIM_SM_COMP_UTAG:
1888 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1889 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1890 bfa_ioim_notify_cleanup(ioim);
1891 break;
1893 case BFA_IOIM_SM_HWFAIL:
1894 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1895 bfa_ioim_move_to_comp_q(ioim);
1896 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1897 ioim);
1898 break;
1900 case BFA_IOIM_SM_CLEANUP:
1902 * IO can be in cleanup state already due to TM command.
1903 * 2nd cleanup request comes from ITN offline event.
1905 break;
1907 default:
1908 bfa_sm_fault(ioim->bfa, event);
1913 * IO is waiting for room in request CQ
1915 static void
1916 bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1918 bfa_trc(ioim->bfa, ioim->iotag);
1919 bfa_trc(ioim->bfa, event);
1921 switch (event) {
1922 case BFA_IOIM_SM_QRESUME:
1923 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1924 bfa_ioim_send_ioreq(ioim);
1925 break;
1927 case BFA_IOIM_SM_ABORT:
1928 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1929 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1930 bfa_ioim_move_to_comp_q(ioim);
1931 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1932 ioim);
1933 break;
1935 case BFA_IOIM_SM_CLEANUP:
1936 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1937 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1938 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1939 ioim);
1940 bfa_ioim_notify_cleanup(ioim);
1941 break;
1943 case BFA_IOIM_SM_HWFAIL:
1944 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1945 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1946 bfa_ioim_move_to_comp_q(ioim);
1947 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1948 ioim);
1949 break;
1951 default:
1952 bfa_sm_fault(ioim->bfa, event);
1957 * Active IO is being aborted, waiting for room in request CQ.
1959 static void
1960 bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1962 bfa_trc(ioim->bfa, ioim->iotag);
1963 bfa_trc(ioim->bfa, event);
1965 switch (event) {
1966 case BFA_IOIM_SM_QRESUME:
1967 bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
1968 bfa_ioim_send_abort(ioim);
1969 break;
1971 case BFA_IOIM_SM_CLEANUP:
1972 WARN_ON(ioim->iosp->abort_explicit != BFA_TRUE);
1973 ioim->iosp->abort_explicit = BFA_FALSE;
1974 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1975 break;
1977 case BFA_IOIM_SM_COMP_GOOD:
1978 case BFA_IOIM_SM_COMP:
1979 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1980 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1981 bfa_ioim_move_to_comp_q(ioim);
1982 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1983 ioim);
1984 break;
1986 case BFA_IOIM_SM_DONE:
1987 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1988 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1989 bfa_ioim_move_to_comp_q(ioim);
1990 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1991 ioim);
1992 break;
1994 case BFA_IOIM_SM_HWFAIL:
1995 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1996 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1997 bfa_ioim_move_to_comp_q(ioim);
1998 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1999 ioim);
2000 break;
2002 default:
2003 bfa_sm_fault(ioim->bfa, event);
2008 * Active IO is being cleaned up, waiting for room in request CQ.
2010 static void
2011 bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2013 bfa_trc(ioim->bfa, ioim->iotag);
2014 bfa_trc(ioim->bfa, event);
2016 switch (event) {
2017 case BFA_IOIM_SM_QRESUME:
2018 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
2019 bfa_ioim_send_abort(ioim);
2020 break;
2022 case BFA_IOIM_SM_ABORT:
2024 * IO is already being cleaned up implicitly
2026 ioim->io_cbfn = __bfa_cb_ioim_abort;
2027 break;
2029 case BFA_IOIM_SM_COMP_GOOD:
2030 case BFA_IOIM_SM_COMP:
2031 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2032 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2033 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
2034 bfa_ioim_notify_cleanup(ioim);
2035 break;
2037 case BFA_IOIM_SM_DONE:
2038 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
2039 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2040 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
2041 bfa_ioim_notify_cleanup(ioim);
2042 break;
2044 case BFA_IOIM_SM_HWFAIL:
2045 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2046 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2047 bfa_ioim_move_to_comp_q(ioim);
2048 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
2049 ioim);
2050 break;
2052 default:
2053 bfa_sm_fault(ioim->bfa, event);
2058 * IO bfa callback is pending.
2060 static void
2061 bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2063 switch (event) {
2064 case BFA_IOIM_SM_HCB:
2065 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
2066 bfa_ioim_free(ioim);
2067 break;
2069 case BFA_IOIM_SM_CLEANUP:
2070 bfa_ioim_notify_cleanup(ioim);
2071 break;
2073 case BFA_IOIM_SM_HWFAIL:
2074 break;
2076 default:
2077 bfa_sm_fault(ioim->bfa, event);
2082 * IO bfa callback is pending. IO resource cannot be freed.
2084 static void
2085 bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2087 bfa_trc(ioim->bfa, ioim->iotag);
2088 bfa_trc(ioim->bfa, event);
2090 switch (event) {
2091 case BFA_IOIM_SM_HCB:
2092 bfa_sm_set_state(ioim, bfa_ioim_sm_resfree);
2093 list_del(&ioim->qe);
2094 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_resfree_q);
2095 break;
2097 case BFA_IOIM_SM_FREE:
2098 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2099 break;
2101 case BFA_IOIM_SM_CLEANUP:
2102 bfa_ioim_notify_cleanup(ioim);
2103 break;
2105 case BFA_IOIM_SM_HWFAIL:
2106 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2107 break;
2109 default:
2110 bfa_sm_fault(ioim->bfa, event);
2115 * IO is completed, waiting resource free from firmware.
2117 static void
2118 bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2120 bfa_trc(ioim->bfa, ioim->iotag);
2121 bfa_trc(ioim->bfa, event);
2123 switch (event) {
2124 case BFA_IOIM_SM_FREE:
2125 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
2126 bfa_ioim_free(ioim);
2127 break;
2129 case BFA_IOIM_SM_CLEANUP:
2130 bfa_ioim_notify_cleanup(ioim);
2131 break;
2133 case BFA_IOIM_SM_HWFAIL:
2134 break;
2136 default:
2137 bfa_sm_fault(ioim->bfa, event);
2142 * This is called from bfa_fcpim_start after the bfa_init() with flash read
2143 * is complete by driver. now invalidate the stale content of lun mask
2144 * like unit attention, rp tag and lp tag.
2146 static void
2147 bfa_ioim_lm_init(struct bfa_s *bfa)
2149 struct bfa_lun_mask_s *lunm_list;
2150 int i;
2152 if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2153 return;
2155 lunm_list = bfa_get_lun_mask_list(bfa);
2156 for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2157 lunm_list[i].ua = BFA_IOIM_LM_UA_RESET;
2158 lunm_list[i].lp_tag = BFA_LP_TAG_INVALID;
2159 lunm_list[i].rp_tag = BFA_RPORT_TAG_INVALID;
2164 * Validate LUN for LUN masking
2166 static enum bfa_ioim_lm_status
2167 bfa_ioim_lm_check(struct bfa_ioim_s *ioim, struct bfa_lps_s *lps,
2168 struct bfa_rport_s *rp, struct scsi_lun lun)
2170 u8 i;
2171 struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa);
2172 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
2173 struct scsi_cdb_s *cdb = (struct scsi_cdb_s *)cmnd->cmnd;
2175 if ((cdb->scsi_cdb[0] == REPORT_LUNS) &&
2176 (scsilun_to_int((struct scsi_lun *)&lun) == 0)) {
2177 ioim->proc_rsp_data = bfa_ioim_lm_proc_rpl_data;
2178 return BFA_IOIM_LM_PRESENT;
2181 for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2183 if (lun_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
2184 continue;
2186 if ((scsilun_to_int((struct scsi_lun *)&lun_list[i].lun) ==
2187 scsilun_to_int((struct scsi_lun *)&lun))
2188 && (rp->rport_tag == lun_list[i].rp_tag)
2189 && ((u8)ioim->itnim->rport->rport_info.lp_tag ==
2190 lun_list[i].lp_tag)) {
2191 bfa_trc(ioim->bfa, lun_list[i].rp_tag);
2192 bfa_trc(ioim->bfa, lun_list[i].lp_tag);
2193 bfa_trc(ioim->bfa, scsilun_to_int(
2194 (struct scsi_lun *)&lun_list[i].lun));
2196 if ((lun_list[i].ua == BFA_IOIM_LM_UA_SET) &&
2197 ((cdb->scsi_cdb[0] != INQUIRY) ||
2198 (cdb->scsi_cdb[0] != REPORT_LUNS))) {
2199 lun_list[i].ua = BFA_IOIM_LM_UA_RESET;
2200 return BFA_IOIM_LM_RPL_DATA_CHANGED;
2203 if (cdb->scsi_cdb[0] == REPORT_LUNS)
2204 ioim->proc_rsp_data = bfa_ioim_lm_proc_rpl_data;
2206 return BFA_IOIM_LM_PRESENT;
2210 if ((cdb->scsi_cdb[0] == INQUIRY) &&
2211 (scsilun_to_int((struct scsi_lun *)&lun) == 0)) {
2212 ioim->proc_rsp_data = bfa_ioim_lm_proc_inq_data;
2213 return BFA_IOIM_LM_PRESENT;
2216 if (cdb->scsi_cdb[0] == TEST_UNIT_READY)
2217 return BFA_IOIM_LM_LUN_NOT_RDY;
2219 return BFA_IOIM_LM_LUN_NOT_SUP;
2222 static bfa_boolean_t
2223 bfa_ioim_lm_proc_rsp_data_dummy(struct bfa_ioim_s *ioim)
2225 return BFA_TRUE;
2228 static void
2229 bfa_ioim_lm_fetch_lun(struct bfa_ioim_s *ioim, u8 *rl_data, int offset,
2230 int buf_lun_cnt)
2232 struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa);
2233 struct scsi_lun *lun_data = (struct scsi_lun *)(rl_data + offset);
2234 struct scsi_lun lun;
2235 int i, j;
2237 bfa_trc(ioim->bfa, buf_lun_cnt);
2238 for (j = 0; j < buf_lun_cnt; j++) {
2239 lun = *((struct scsi_lun *)(lun_data + j));
2240 for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2241 if (lun_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
2242 continue;
2243 if ((lun_list[i].rp_wwn == bfa_ioim_rp_wwn(ioim)) &&
2244 (lun_list[i].lp_wwn == bfa_ioim_lp_wwn(ioim)) &&
2245 (scsilun_to_int((struct scsi_lun *)&lun_list[i].lun)
2246 == scsilun_to_int((struct scsi_lun *)&lun))) {
2247 lun_list[i].state = BFA_IOIM_LUN_MASK_FETCHED;
2248 break;
2250 } /* next lun in mask DB */
2251 } /* next lun in buf */
2254 static int
2255 bfa_ioim_lm_update_lun_sg(struct bfa_ioim_s *ioim, u32 *pgdlen,
2256 struct scsi_report_luns_data_s *rl)
2258 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
2259 struct scatterlist *sg = scsi_sglist(cmnd);
2260 struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa);
2261 struct scsi_lun *prev_rl_data = NULL, *base_rl_data;
2262 int i, j, sgeid, lun_fetched_cnt = 0, prev_sg_len = 0, base_count;
2263 int lun_across_sg_bytes, bytes_from_next_buf;
2264 u64 last_lun, temp_last_lun;
2266 /* fetch luns from the first sg element */
2267 bfa_ioim_lm_fetch_lun(ioim, (u8 *)(rl->lun), 0,
2268 (sg_dma_len(sg) / sizeof(struct scsi_lun)) - 1);
2270 /* fetch luns from multiple sg elements */
2271 scsi_for_each_sg(cmnd, sg, scsi_sg_count(cmnd), sgeid) {
2272 if (sgeid == 0) {
2273 prev_sg_len = sg_dma_len(sg);
2274 prev_rl_data = (struct scsi_lun *)
2275 phys_to_virt(sg_dma_address(sg));
2276 continue;
2279 /* if the buf is having more data */
2280 lun_across_sg_bytes = prev_sg_len % sizeof(struct scsi_lun);
2281 if (lun_across_sg_bytes) {
2282 bfa_trc(ioim->bfa, lun_across_sg_bytes);
2283 bfa_stats(ioim->itnim, lm_lun_across_sg);
2284 bytes_from_next_buf = sizeof(struct scsi_lun) -
2285 lun_across_sg_bytes;
2287 /* from next buf take higher bytes */
2288 temp_last_lun = *((u64 *)
2289 phys_to_virt(sg_dma_address(sg)));
2290 last_lun |= temp_last_lun >>
2291 (lun_across_sg_bytes * BITS_PER_BYTE);
2293 /* from prev buf take higher bytes */
2294 temp_last_lun = *((u64 *)(prev_rl_data +
2295 (prev_sg_len - lun_across_sg_bytes)));
2296 temp_last_lun >>= bytes_from_next_buf * BITS_PER_BYTE;
2297 last_lun = last_lun | (temp_last_lun <<
2298 (bytes_from_next_buf * BITS_PER_BYTE));
2300 bfa_ioim_lm_fetch_lun(ioim, (u8 *)&last_lun, 0, 1);
2301 } else
2302 bytes_from_next_buf = 0;
2304 *pgdlen += sg_dma_len(sg);
2305 prev_sg_len = sg_dma_len(sg);
2306 prev_rl_data = (struct scsi_lun *)
2307 phys_to_virt(sg_dma_address(sg));
2308 bfa_ioim_lm_fetch_lun(ioim, (u8 *)prev_rl_data,
2309 bytes_from_next_buf,
2310 sg_dma_len(sg) / sizeof(struct scsi_lun));
2313 /* update the report luns data - based on fetched luns */
2314 sg = scsi_sglist(cmnd);
2315 base_rl_data = (struct scsi_lun *)rl->lun;
2316 base_count = (sg_dma_len(sg) / sizeof(struct scsi_lun)) - 1;
2317 for (i = 0, j = 0; i < MAX_LUN_MASK_CFG; i++) {
2318 if (lun_list[i].state == BFA_IOIM_LUN_MASK_FETCHED) {
2319 base_rl_data[j] = lun_list[i].lun;
2320 lun_list[i].state = BFA_IOIM_LUN_MASK_ACTIVE;
2321 j++;
2322 lun_fetched_cnt++;
2325 if (j > base_count) {
2326 j = 0;
2327 sg = sg_next(sg);
2328 base_rl_data = (struct scsi_lun *)
2329 phys_to_virt(sg_dma_address(sg));
2330 base_count = sg_dma_len(sg) / sizeof(struct scsi_lun);
2334 bfa_trc(ioim->bfa, lun_fetched_cnt);
2335 return lun_fetched_cnt;
2338 static bfa_boolean_t
2339 bfa_ioim_lm_proc_inq_data(struct bfa_ioim_s *ioim)
2341 struct scsi_inquiry_data_s *inq;
2342 struct scatterlist *sg = scsi_sglist((struct scsi_cmnd *)ioim->dio);
2344 ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
2345 inq = (struct scsi_inquiry_data_s *)phys_to_virt(sg_dma_address(sg));
2347 bfa_trc(ioim->bfa, inq->device_type);
2348 inq->peripheral_qual = SCSI_INQ_PQ_NOT_CON;
2349 return 0;
2352 static bfa_boolean_t
2353 bfa_ioim_lm_proc_rpl_data(struct bfa_ioim_s *ioim)
2355 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
2356 struct scatterlist *sg = scsi_sglist(cmnd);
2357 struct bfi_ioim_rsp_s *m;
2358 struct scsi_report_luns_data_s *rl = NULL;
2359 int lun_count = 0, lun_fetched_cnt = 0;
2360 u32 residue, pgdlen = 0;
2362 ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
2363 if (bfa_get_lun_mask_status(ioim->bfa) != BFA_LUNMASK_ENABLED)
2364 return BFA_TRUE;
2366 m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
2367 if (m->scsi_status == SCSI_STATUS_CHECK_CONDITION)
2368 return BFA_TRUE;
2370 pgdlen = sg_dma_len(sg);
2371 bfa_trc(ioim->bfa, pgdlen);
2372 rl = (struct scsi_report_luns_data_s *)phys_to_virt(sg_dma_address(sg));
2373 lun_count = cpu_to_be32(rl->lun_list_length) / sizeof(struct scsi_lun);
2374 lun_fetched_cnt = bfa_ioim_lm_update_lun_sg(ioim, &pgdlen, rl);
2376 if (lun_count == lun_fetched_cnt)
2377 return BFA_TRUE;
2379 bfa_trc(ioim->bfa, lun_count);
2380 bfa_trc(ioim->bfa, lun_fetched_cnt);
2381 bfa_trc(ioim->bfa, be32_to_cpu(rl->lun_list_length));
2383 if (be32_to_cpu(rl->lun_list_length) <= pgdlen)
2384 rl->lun_list_length = be32_to_cpu(lun_fetched_cnt) *
2385 sizeof(struct scsi_lun);
2386 else
2387 bfa_stats(ioim->itnim, lm_small_buf_addresidue);
2389 bfa_trc(ioim->bfa, be32_to_cpu(rl->lun_list_length));
2390 bfa_trc(ioim->bfa, be32_to_cpu(m->residue));
2392 residue = be32_to_cpu(m->residue);
2393 residue += (lun_count - lun_fetched_cnt) * sizeof(struct scsi_lun);
2394 bfa_stats(ioim->itnim, lm_wire_residue_changed);
2395 m->residue = be32_to_cpu(residue);
2396 bfa_trc(ioim->bfa, ioim->nsges);
2397 return BFA_FALSE;
2400 static void
2401 __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete)
2403 struct bfa_ioim_s *ioim = cbarg;
2405 if (!complete) {
2406 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2407 return;
2410 bfa_cb_ioim_good_comp(ioim->bfa->bfad, ioim->dio);
2413 static void
2414 __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
2416 struct bfa_ioim_s *ioim = cbarg;
2417 struct bfi_ioim_rsp_s *m;
2418 u8 *snsinfo = NULL;
2419 u8 sns_len = 0;
2420 s32 residue = 0;
2422 if (!complete) {
2423 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2424 return;
2427 m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
2428 if (m->io_status == BFI_IOIM_STS_OK) {
2430 * setup sense information, if present
2432 if ((m->scsi_status == SCSI_STATUS_CHECK_CONDITION) &&
2433 m->sns_len) {
2434 sns_len = m->sns_len;
2435 snsinfo = BFA_SNSINFO_FROM_TAG(ioim->fcpim->fcp,
2436 ioim->iotag);
2440 * setup residue value correctly for normal completions
2442 if (m->resid_flags == FCP_RESID_UNDER) {
2443 residue = be32_to_cpu(m->residue);
2444 bfa_stats(ioim->itnim, iocomp_underrun);
2446 if (m->resid_flags == FCP_RESID_OVER) {
2447 residue = be32_to_cpu(m->residue);
2448 residue = -residue;
2449 bfa_stats(ioim->itnim, iocomp_overrun);
2453 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, m->io_status,
2454 m->scsi_status, sns_len, snsinfo, residue);
2457 static void
2458 __bfa_cb_ioim_lm_lun_not_sup(void *cbarg, bfa_boolean_t complete)
2460 struct bfa_ioim_s *ioim = cbarg;
2461 int sns_len = 0xD;
2462 u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio);
2463 struct scsi_sense_s *snsinfo;
2465 if (!complete) {
2466 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2467 return;
2470 snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(
2471 ioim->fcpim->fcp, ioim->iotag);
2472 snsinfo->rsp_code = SCSI_SENSE_CUR_ERR;
2473 snsinfo->add_sense_length = 0xa;
2474 snsinfo->asc = SCSI_ASC_LUN_NOT_SUPPORTED;
2475 snsinfo->sense_key = ILLEGAL_REQUEST;
2476 bfa_trc(ioim->bfa, residue);
2477 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK,
2478 SCSI_STATUS_CHECK_CONDITION, sns_len,
2479 (u8 *)snsinfo, residue);
2482 static void
2483 __bfa_cb_ioim_lm_rpl_dc(void *cbarg, bfa_boolean_t complete)
2485 struct bfa_ioim_s *ioim = cbarg;
2486 int sns_len = 0xD;
2487 u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio);
2488 struct scsi_sense_s *snsinfo;
2490 if (!complete) {
2491 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2492 return;
2495 snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(ioim->fcpim->fcp,
2496 ioim->iotag);
2497 snsinfo->rsp_code = SCSI_SENSE_CUR_ERR;
2498 snsinfo->sense_key = SCSI_MP_IEC_UNIT_ATTN;
2499 snsinfo->asc = SCSI_ASC_TOCC;
2500 snsinfo->add_sense_length = 0x6;
2501 snsinfo->ascq = SCSI_ASCQ_RL_DATA_CHANGED;
2502 bfa_trc(ioim->bfa, residue);
2503 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK,
2504 SCSI_STATUS_CHECK_CONDITION, sns_len,
2505 (u8 *)snsinfo, residue);
2508 static void
2509 __bfa_cb_ioim_lm_lun_not_rdy(void *cbarg, bfa_boolean_t complete)
2511 struct bfa_ioim_s *ioim = cbarg;
2512 int sns_len = 0xD;
2513 u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio);
2514 struct scsi_sense_s *snsinfo;
2516 if (!complete) {
2517 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2518 return;
2521 snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(
2522 ioim->fcpim->fcp, ioim->iotag);
2523 snsinfo->rsp_code = SCSI_SENSE_CUR_ERR;
2524 snsinfo->add_sense_length = 0xa;
2525 snsinfo->sense_key = NOT_READY;
2526 snsinfo->asc = SCSI_ASC_LUN_NOT_READY;
2527 snsinfo->ascq = SCSI_ASCQ_MAN_INTR_REQ;
2528 bfa_trc(ioim->bfa, residue);
2529 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK,
2530 SCSI_STATUS_CHECK_CONDITION, sns_len,
2531 (u8 *)snsinfo, residue);
2534 void
2535 bfa_fcpim_lunmask_rp_update(struct bfa_s *bfa, wwn_t lp_wwn, wwn_t rp_wwn,
2536 u16 rp_tag, u8 lp_tag)
2538 struct bfa_lun_mask_s *lun_list;
2539 u8 i;
2541 if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2542 return;
2544 lun_list = bfa_get_lun_mask_list(bfa);
2545 for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2546 if (lun_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE) {
2547 if ((lun_list[i].lp_wwn == lp_wwn) &&
2548 (lun_list[i].rp_wwn == rp_wwn)) {
2549 lun_list[i].rp_tag = rp_tag;
2550 lun_list[i].lp_tag = lp_tag;
2557 * set UA for all active luns in LM DB
2559 static void
2560 bfa_ioim_lm_set_ua(struct bfa_s *bfa)
2562 struct bfa_lun_mask_s *lunm_list;
2563 int i;
2565 lunm_list = bfa_get_lun_mask_list(bfa);
2566 for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2567 if (lunm_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
2568 continue;
2569 lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
2573 bfa_status_t
2574 bfa_fcpim_lunmask_update(struct bfa_s *bfa, u32 update)
2576 struct bfa_lunmask_cfg_s *lun_mask;
2578 bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
2579 if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2580 return BFA_STATUS_FAILED;
2582 if (bfa_get_lun_mask_status(bfa) == update)
2583 return BFA_STATUS_NO_CHANGE;
2585 lun_mask = bfa_get_lun_mask(bfa);
2586 lun_mask->status = update;
2588 if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_ENABLED)
2589 bfa_ioim_lm_set_ua(bfa);
2591 return bfa_dconf_update(bfa);
2594 bfa_status_t
2595 bfa_fcpim_lunmask_clear(struct bfa_s *bfa)
2597 int i;
2598 struct bfa_lun_mask_s *lunm_list;
2600 bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
2601 if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2602 return BFA_STATUS_FAILED;
2604 lunm_list = bfa_get_lun_mask_list(bfa);
2605 for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2606 if (lunm_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE) {
2607 if (lunm_list[i].rp_tag != BFA_RPORT_TAG_INVALID)
2608 bfa_rport_unset_lunmask(bfa,
2609 BFA_RPORT_FROM_TAG(bfa, lunm_list[i].rp_tag));
2613 memset(lunm_list, 0, sizeof(struct bfa_lun_mask_s) * MAX_LUN_MASK_CFG);
2614 return bfa_dconf_update(bfa);
2617 bfa_status_t
2618 bfa_fcpim_lunmask_query(struct bfa_s *bfa, void *buf)
2620 struct bfa_lunmask_cfg_s *lun_mask;
2622 bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
2623 if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2624 return BFA_STATUS_FAILED;
2626 lun_mask = bfa_get_lun_mask(bfa);
2627 memcpy(buf, lun_mask, sizeof(struct bfa_lunmask_cfg_s));
2628 return BFA_STATUS_OK;
2631 bfa_status_t
2632 bfa_fcpim_lunmask_add(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn,
2633 wwn_t rpwwn, struct scsi_lun lun)
2635 struct bfa_lun_mask_s *lunm_list;
2636 struct bfa_rport_s *rp = NULL;
2637 int i, free_index = MAX_LUN_MASK_CFG + 1;
2638 struct bfa_fcs_lport_s *port = NULL;
2639 struct bfa_fcs_rport_s *rp_fcs;
2641 bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
2642 if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2643 return BFA_STATUS_FAILED;
2645 port = bfa_fcs_lookup_port(&((struct bfad_s *)bfa->bfad)->bfa_fcs,
2646 vf_id, *pwwn);
2647 if (port) {
2648 *pwwn = port->port_cfg.pwwn;
2649 rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
2650 rp = rp_fcs->bfa_rport;
2653 lunm_list = bfa_get_lun_mask_list(bfa);
2654 /* if entry exists */
2655 for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2656 if (lunm_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
2657 free_index = i;
2658 if ((lunm_list[i].lp_wwn == *pwwn) &&
2659 (lunm_list[i].rp_wwn == rpwwn) &&
2660 (scsilun_to_int((struct scsi_lun *)&lunm_list[i].lun) ==
2661 scsilun_to_int((struct scsi_lun *)&lun)))
2662 return BFA_STATUS_ENTRY_EXISTS;
2665 if (free_index > MAX_LUN_MASK_CFG)
2666 return BFA_STATUS_MAX_ENTRY_REACHED;
2668 if (rp) {
2669 lunm_list[free_index].lp_tag = bfa_lps_get_tag_from_pid(bfa,
2670 rp->rport_info.local_pid);
2671 lunm_list[free_index].rp_tag = rp->rport_tag;
2672 } else {
2673 lunm_list[free_index].lp_tag = BFA_LP_TAG_INVALID;
2674 lunm_list[free_index].rp_tag = BFA_RPORT_TAG_INVALID;
2677 lunm_list[free_index].lp_wwn = *pwwn;
2678 lunm_list[free_index].rp_wwn = rpwwn;
2679 lunm_list[free_index].lun = lun;
2680 lunm_list[free_index].state = BFA_IOIM_LUN_MASK_ACTIVE;
2682 /* set for all luns in this rp */
2683 for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2684 if ((lunm_list[i].lp_wwn == *pwwn) &&
2685 (lunm_list[i].rp_wwn == rpwwn))
2686 lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
2689 return bfa_dconf_update(bfa);
2692 bfa_status_t
2693 bfa_fcpim_lunmask_delete(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn,
2694 wwn_t rpwwn, struct scsi_lun lun)
2696 struct bfa_lun_mask_s *lunm_list;
2697 struct bfa_rport_s *rp = NULL;
2698 struct bfa_fcs_lport_s *port = NULL;
2699 struct bfa_fcs_rport_s *rp_fcs;
2700 int i;
2702 /* in min cfg lunm_list could be NULL but no commands should run. */
2703 if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2704 return BFA_STATUS_FAILED;
2706 bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
2707 bfa_trc(bfa, *pwwn);
2708 bfa_trc(bfa, rpwwn);
2709 bfa_trc(bfa, scsilun_to_int((struct scsi_lun *)&lun));
2711 if (*pwwn == 0) {
2712 port = bfa_fcs_lookup_port(
2713 &((struct bfad_s *)bfa->bfad)->bfa_fcs,
2714 vf_id, *pwwn);
2715 if (port) {
2716 *pwwn = port->port_cfg.pwwn;
2717 rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
2718 rp = rp_fcs->bfa_rport;
2722 lunm_list = bfa_get_lun_mask_list(bfa);
2723 for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2724 if ((lunm_list[i].lp_wwn == *pwwn) &&
2725 (lunm_list[i].rp_wwn == rpwwn) &&
2726 (scsilun_to_int((struct scsi_lun *)&lunm_list[i].lun) ==
2727 scsilun_to_int((struct scsi_lun *)&lun))) {
2728 lunm_list[i].lp_wwn = 0;
2729 lunm_list[i].rp_wwn = 0;
2730 int_to_scsilun(0, &lunm_list[i].lun);
2731 lunm_list[i].state = BFA_IOIM_LUN_MASK_INACTIVE;
2732 if (lunm_list[i].rp_tag != BFA_RPORT_TAG_INVALID) {
2733 lunm_list[i].rp_tag = BFA_RPORT_TAG_INVALID;
2734 lunm_list[i].lp_tag = BFA_LP_TAG_INVALID;
2736 return bfa_dconf_update(bfa);
2740 /* set for all luns in this rp */
2741 for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2742 if ((lunm_list[i].lp_wwn == *pwwn) &&
2743 (lunm_list[i].rp_wwn == rpwwn))
2744 lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
2747 return BFA_STATUS_ENTRY_NOT_EXISTS;
2750 static void
2751 __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete)
2753 struct bfa_ioim_s *ioim = cbarg;
2755 if (!complete) {
2756 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2757 return;
2760 ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
2761 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED,
2762 0, 0, NULL, 0);
2765 static void
2766 __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete)
2768 struct bfa_ioim_s *ioim = cbarg;
2770 bfa_stats(ioim->itnim, path_tov_expired);
2771 if (!complete) {
2772 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2773 return;
2776 ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
2777 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV,
2778 0, 0, NULL, 0);
2781 static void
2782 __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete)
2784 struct bfa_ioim_s *ioim = cbarg;
2786 if (!complete) {
2787 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2788 return;
2791 ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
2792 bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio);
2795 static void
2796 bfa_ioim_sgpg_alloced(void *cbarg)
2798 struct bfa_ioim_s *ioim = cbarg;
2800 ioim->nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
2801 list_splice_tail_init(&ioim->iosp->sgpg_wqe.sgpg_q, &ioim->sgpg_q);
2802 ioim->sgpg = bfa_q_first(&ioim->sgpg_q);
2803 bfa_sm_send_event(ioim, BFA_IOIM_SM_SGALLOCED);
2807 * Send I/O request to firmware.
2809 static bfa_boolean_t
2810 bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
2812 struct bfa_itnim_s *itnim = ioim->itnim;
2813 struct bfi_ioim_req_s *m;
2814 static struct fcp_cmnd_s cmnd_z0 = { { { 0 } } };
2815 struct bfi_sge_s *sge, *sgpge;
2816 u32 pgdlen = 0;
2817 u32 fcp_dl;
2818 u64 addr;
2819 struct scatterlist *sg;
2820 struct bfa_sgpg_s *sgpg;
2821 struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
2822 u32 i, sge_id, pgcumsz;
2823 enum dma_data_direction dmadir;
2826 * check for room in queue to send request now
2828 m = bfa_reqq_next(ioim->bfa, ioim->reqq);
2829 if (!m) {
2830 bfa_stats(ioim->itnim, qwait);
2831 bfa_reqq_wait(ioim->bfa, ioim->reqq,
2832 &ioim->iosp->reqq_wait);
2833 return BFA_FALSE;
2837 * build i/o request message next
2839 m->io_tag = cpu_to_be16(ioim->iotag);
2840 m->rport_hdl = ioim->itnim->rport->fw_handle;
2841 m->io_timeout = 0;
2843 sge = &m->sges[0];
2844 sgpg = ioim->sgpg;
2845 sge_id = 0;
2846 sgpge = NULL;
2847 pgcumsz = 0;
2848 scsi_for_each_sg(cmnd, sg, ioim->nsges, i) {
2849 if (i == 0) {
2850 /* build inline IO SG element */
2851 addr = bfa_sgaddr_le(sg_dma_address(sg));
2852 sge->sga = *(union bfi_addr_u *) &addr;
2853 pgdlen = sg_dma_len(sg);
2854 sge->sg_len = pgdlen;
2855 sge->flags = (ioim->nsges > BFI_SGE_INLINE) ?
2856 BFI_SGE_DATA_CPL : BFI_SGE_DATA_LAST;
2857 bfa_sge_to_be(sge);
2858 sge++;
2859 } else {
2860 if (sge_id == 0)
2861 sgpge = sgpg->sgpg->sges;
2863 addr = bfa_sgaddr_le(sg_dma_address(sg));
2864 sgpge->sga = *(union bfi_addr_u *) &addr;
2865 sgpge->sg_len = sg_dma_len(sg);
2866 pgcumsz += sgpge->sg_len;
2868 /* set flags */
2869 if (i < (ioim->nsges - 1) &&
2870 sge_id < (BFI_SGPG_DATA_SGES - 1))
2871 sgpge->flags = BFI_SGE_DATA;
2872 else if (i < (ioim->nsges - 1))
2873 sgpge->flags = BFI_SGE_DATA_CPL;
2874 else
2875 sgpge->flags = BFI_SGE_DATA_LAST;
2877 bfa_sge_to_le(sgpge);
2879 sgpge++;
2880 if (i == (ioim->nsges - 1)) {
2881 sgpge->flags = BFI_SGE_PGDLEN;
2882 sgpge->sga.a32.addr_lo = 0;
2883 sgpge->sga.a32.addr_hi = 0;
2884 sgpge->sg_len = pgcumsz;
2885 bfa_sge_to_le(sgpge);
2886 } else if (++sge_id == BFI_SGPG_DATA_SGES) {
2887 sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg);
2888 sgpge->flags = BFI_SGE_LINK;
2889 sgpge->sga = sgpg->sgpg_pa;
2890 sgpge->sg_len = pgcumsz;
2891 bfa_sge_to_le(sgpge);
2892 sge_id = 0;
2893 pgcumsz = 0;
2898 if (ioim->nsges > BFI_SGE_INLINE) {
2899 sge->sga = ioim->sgpg->sgpg_pa;
2900 } else {
2901 sge->sga.a32.addr_lo = 0;
2902 sge->sga.a32.addr_hi = 0;
2904 sge->sg_len = pgdlen;
2905 sge->flags = BFI_SGE_PGDLEN;
2906 bfa_sge_to_be(sge);
2909 * set up I/O command parameters
2911 m->cmnd = cmnd_z0;
2912 int_to_scsilun(cmnd->device->lun, &m->cmnd.lun);
2913 dmadir = cmnd->sc_data_direction;
2914 if (dmadir == DMA_TO_DEVICE)
2915 m->cmnd.iodir = FCP_IODIR_WRITE;
2916 else if (dmadir == DMA_FROM_DEVICE)
2917 m->cmnd.iodir = FCP_IODIR_READ;
2918 else
2919 m->cmnd.iodir = FCP_IODIR_NONE;
2921 m->cmnd.cdb = *(struct scsi_cdb_s *) cmnd->cmnd;
2922 fcp_dl = scsi_bufflen(cmnd);
2923 m->cmnd.fcp_dl = cpu_to_be32(fcp_dl);
2926 * set up I/O message header
2928 switch (m->cmnd.iodir) {
2929 case FCP_IODIR_READ:
2930 bfi_h2i_set(m->mh, BFI_MC_IOIM_READ, 0, bfa_fn_lpu(ioim->bfa));
2931 bfa_stats(itnim, input_reqs);
2932 ioim->itnim->stats.rd_throughput += fcp_dl;
2933 break;
2934 case FCP_IODIR_WRITE:
2935 bfi_h2i_set(m->mh, BFI_MC_IOIM_WRITE, 0, bfa_fn_lpu(ioim->bfa));
2936 bfa_stats(itnim, output_reqs);
2937 ioim->itnim->stats.wr_throughput += fcp_dl;
2938 break;
2939 case FCP_IODIR_RW:
2940 bfa_stats(itnim, input_reqs);
2941 bfa_stats(itnim, output_reqs);
2942 default:
2943 bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_fn_lpu(ioim->bfa));
2945 if (itnim->seq_rec ||
2946 (scsi_bufflen(cmnd) & (sizeof(u32) - 1)))
2947 bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_fn_lpu(ioim->bfa));
2950 * queue I/O message to firmware
2952 bfa_reqq_produce(ioim->bfa, ioim->reqq, m->mh);
2953 return BFA_TRUE;
2957 * Setup any additional SG pages needed.Inline SG element is setup
2958 * at queuing time.
2960 static bfa_boolean_t
2961 bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim)
2963 u16 nsgpgs;
2965 WARN_ON(ioim->nsges <= BFI_SGE_INLINE);
2968 * allocate SG pages needed
2970 nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
2971 if (!nsgpgs)
2972 return BFA_TRUE;
2974 if (bfa_sgpg_malloc(ioim->bfa, &ioim->sgpg_q, nsgpgs)
2975 != BFA_STATUS_OK) {
2976 bfa_sgpg_wait(ioim->bfa, &ioim->iosp->sgpg_wqe, nsgpgs);
2977 return BFA_FALSE;
2980 ioim->nsgpgs = nsgpgs;
2981 ioim->sgpg = bfa_q_first(&ioim->sgpg_q);
2983 return BFA_TRUE;
2987 * Send I/O abort request to firmware.
2989 static bfa_boolean_t
2990 bfa_ioim_send_abort(struct bfa_ioim_s *ioim)
2992 struct bfi_ioim_abort_req_s *m;
2993 enum bfi_ioim_h2i msgop;
2996 * check for room in queue to send request now
2998 m = bfa_reqq_next(ioim->bfa, ioim->reqq);
2999 if (!m)
3000 return BFA_FALSE;
3003 * build i/o request message next
3005 if (ioim->iosp->abort_explicit)
3006 msgop = BFI_IOIM_H2I_IOABORT_REQ;
3007 else
3008 msgop = BFI_IOIM_H2I_IOCLEANUP_REQ;
3010 bfi_h2i_set(m->mh, BFI_MC_IOIM, msgop, bfa_fn_lpu(ioim->bfa));
3011 m->io_tag = cpu_to_be16(ioim->iotag);
3012 m->abort_tag = ++ioim->abort_tag;
3015 * queue I/O message to firmware
3017 bfa_reqq_produce(ioim->bfa, ioim->reqq, m->mh);
3018 return BFA_TRUE;
3022 * Call to resume any I/O requests waiting for room in request queue.
3024 static void
3025 bfa_ioim_qresume(void *cbarg)
3027 struct bfa_ioim_s *ioim = cbarg;
3029 bfa_stats(ioim->itnim, qresumes);
3030 bfa_sm_send_event(ioim, BFA_IOIM_SM_QRESUME);
3034 static void
3035 bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim)
3038 * Move IO from itnim queue to fcpim global queue since itnim will be
3039 * freed.
3041 list_del(&ioim->qe);
3042 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
3044 if (!ioim->iosp->tskim) {
3045 if (ioim->fcpim->delay_comp && ioim->itnim->iotov_active) {
3046 bfa_cb_dequeue(&ioim->hcb_qe);
3047 list_del(&ioim->qe);
3048 list_add_tail(&ioim->qe, &ioim->itnim->delay_comp_q);
3050 bfa_itnim_iodone(ioim->itnim);
3051 } else
3052 bfa_wc_down(&ioim->iosp->tskim->wc);
3055 static bfa_boolean_t
3056 bfa_ioim_is_abortable(struct bfa_ioim_s *ioim)
3058 if ((bfa_sm_cmp_state(ioim, bfa_ioim_sm_uninit) &&
3059 (!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim))) ||
3060 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort)) ||
3061 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort_qfull)) ||
3062 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb)) ||
3063 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb_free)) ||
3064 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_resfree)))
3065 return BFA_FALSE;
3067 return BFA_TRUE;
3070 void
3071 bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov)
3074 * If path tov timer expired, failback with PATHTOV status - these
3075 * IO requests are not normally retried by IO stack.
3077 * Otherwise device cameback online and fail it with normal failed
3078 * status so that IO stack retries these failed IO requests.
3080 if (iotov)
3081 ioim->io_cbfn = __bfa_cb_ioim_pathtov;
3082 else {
3083 ioim->io_cbfn = __bfa_cb_ioim_failed;
3084 bfa_stats(ioim->itnim, iocom_nexus_abort);
3086 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
3089 * Move IO to fcpim global queue since itnim will be
3090 * freed.
3092 list_del(&ioim->qe);
3093 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
3098 * Memory allocation and initialization.
3100 void
3101 bfa_ioim_attach(struct bfa_fcpim_s *fcpim)
3103 struct bfa_ioim_s *ioim;
3104 struct bfa_fcp_mod_s *fcp = fcpim->fcp;
3105 struct bfa_ioim_sp_s *iosp;
3106 u16 i;
3109 * claim memory first
3111 ioim = (struct bfa_ioim_s *) bfa_mem_kva_curp(fcp);
3112 fcpim->ioim_arr = ioim;
3113 bfa_mem_kva_curp(fcp) = (u8 *) (ioim + fcpim->fcp->num_ioim_reqs);
3115 iosp = (struct bfa_ioim_sp_s *) bfa_mem_kva_curp(fcp);
3116 fcpim->ioim_sp_arr = iosp;
3117 bfa_mem_kva_curp(fcp) = (u8 *) (iosp + fcpim->fcp->num_ioim_reqs);
3120 * Initialize ioim free queues
3122 INIT_LIST_HEAD(&fcpim->ioim_resfree_q);
3123 INIT_LIST_HEAD(&fcpim->ioim_comp_q);
3125 for (i = 0; i < fcpim->fcp->num_ioim_reqs;
3126 i++, ioim++, iosp++) {
3128 * initialize IOIM
3130 memset(ioim, 0, sizeof(struct bfa_ioim_s));
3131 ioim->iotag = i;
3132 ioim->bfa = fcpim->bfa;
3133 ioim->fcpim = fcpim;
3134 ioim->iosp = iosp;
3135 ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
3136 INIT_LIST_HEAD(&ioim->sgpg_q);
3137 bfa_reqq_winit(&ioim->iosp->reqq_wait,
3138 bfa_ioim_qresume, ioim);
3139 bfa_sgpg_winit(&ioim->iosp->sgpg_wqe,
3140 bfa_ioim_sgpg_alloced, ioim);
3141 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
3145 void
3146 bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
3148 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
3149 struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
3150 struct bfa_ioim_s *ioim;
3151 u16 iotag;
3152 enum bfa_ioim_event evt = BFA_IOIM_SM_COMP;
3154 iotag = be16_to_cpu(rsp->io_tag);
3156 ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
3157 WARN_ON(ioim->iotag != iotag);
3159 bfa_trc(ioim->bfa, ioim->iotag);
3160 bfa_trc(ioim->bfa, rsp->io_status);
3161 bfa_trc(ioim->bfa, rsp->reuse_io_tag);
3163 if (bfa_sm_cmp_state(ioim, bfa_ioim_sm_active))
3164 ioim->iosp->comp_rspmsg = *m;
3166 switch (rsp->io_status) {
3167 case BFI_IOIM_STS_OK:
3168 bfa_stats(ioim->itnim, iocomp_ok);
3169 if (rsp->reuse_io_tag == 0)
3170 evt = BFA_IOIM_SM_DONE;
3171 else
3172 evt = BFA_IOIM_SM_COMP;
3173 ioim->proc_rsp_data(ioim);
3174 break;
3176 case BFI_IOIM_STS_TIMEDOUT:
3177 bfa_stats(ioim->itnim, iocomp_timedout);
3178 case BFI_IOIM_STS_ABORTED:
3179 rsp->io_status = BFI_IOIM_STS_ABORTED;
3180 bfa_stats(ioim->itnim, iocomp_aborted);
3181 if (rsp->reuse_io_tag == 0)
3182 evt = BFA_IOIM_SM_DONE;
3183 else
3184 evt = BFA_IOIM_SM_COMP;
3185 break;
3187 case BFI_IOIM_STS_PROTO_ERR:
3188 bfa_stats(ioim->itnim, iocom_proto_err);
3189 WARN_ON(!rsp->reuse_io_tag);
3190 evt = BFA_IOIM_SM_COMP;
3191 break;
3193 case BFI_IOIM_STS_SQER_NEEDED:
3194 bfa_stats(ioim->itnim, iocom_sqer_needed);
3195 WARN_ON(rsp->reuse_io_tag != 0);
3196 evt = BFA_IOIM_SM_SQRETRY;
3197 break;
3199 case BFI_IOIM_STS_RES_FREE:
3200 bfa_stats(ioim->itnim, iocom_res_free);
3201 evt = BFA_IOIM_SM_FREE;
3202 break;
3204 case BFI_IOIM_STS_HOST_ABORTED:
3205 bfa_stats(ioim->itnim, iocom_hostabrts);
3206 if (rsp->abort_tag != ioim->abort_tag) {
3207 bfa_trc(ioim->bfa, rsp->abort_tag);
3208 bfa_trc(ioim->bfa, ioim->abort_tag);
3209 ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
3210 return;
3213 if (rsp->reuse_io_tag)
3214 evt = BFA_IOIM_SM_ABORT_COMP;
3215 else
3216 evt = BFA_IOIM_SM_ABORT_DONE;
3217 break;
3219 case BFI_IOIM_STS_UTAG:
3220 bfa_stats(ioim->itnim, iocom_utags);
3221 evt = BFA_IOIM_SM_COMP_UTAG;
3222 break;
3224 default:
3225 WARN_ON(1);
3228 ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
3229 bfa_sm_send_event(ioim, evt);
3232 void
3233 bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
3235 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
3236 struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
3237 struct bfa_ioim_s *ioim;
3238 u16 iotag;
3240 iotag = be16_to_cpu(rsp->io_tag);
3242 ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
3243 WARN_ON(BFA_IOIM_TAG_2_ID(ioim->iotag) != iotag);
3245 bfa_ioim_cb_profile_comp(fcpim, ioim);
3247 if (bfa_get_lun_mask_status(bfa) != BFA_LUNMASK_ENABLED) {
3248 bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
3249 return;
3252 if (ioim->proc_rsp_data(ioim) == BFA_TRUE)
3253 bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
3254 else
3255 bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP);
3259 * Called by itnim to clean up IO while going offline.
3261 void
3262 bfa_ioim_cleanup(struct bfa_ioim_s *ioim)
3264 bfa_trc(ioim->bfa, ioim->iotag);
3265 bfa_stats(ioim->itnim, io_cleanups);
3267 ioim->iosp->tskim = NULL;
3268 bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
3271 void
3272 bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim, struct bfa_tskim_s *tskim)
3274 bfa_trc(ioim->bfa, ioim->iotag);
3275 bfa_stats(ioim->itnim, io_tmaborts);
3277 ioim->iosp->tskim = tskim;
3278 bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
3282 * IOC failure handling.
3284 void
3285 bfa_ioim_iocdisable(struct bfa_ioim_s *ioim)
3287 bfa_trc(ioim->bfa, ioim->iotag);
3288 bfa_stats(ioim->itnim, io_iocdowns);
3289 bfa_sm_send_event(ioim, BFA_IOIM_SM_HWFAIL);
3293 * IO offline TOV popped. Fail the pending IO.
3295 void
3296 bfa_ioim_tov(struct bfa_ioim_s *ioim)
3298 bfa_trc(ioim->bfa, ioim->iotag);
3299 bfa_sm_send_event(ioim, BFA_IOIM_SM_IOTOV);
3304 * Allocate IOIM resource for initiator mode I/O request.
3306 struct bfa_ioim_s *
3307 bfa_ioim_alloc(struct bfa_s *bfa, struct bfad_ioim_s *dio,
3308 struct bfa_itnim_s *itnim, u16 nsges)
3310 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
3311 struct bfa_ioim_s *ioim;
3312 struct bfa_iotag_s *iotag = NULL;
3315 * alocate IOIM resource
3317 bfa_q_deq(&fcpim->fcp->iotag_ioim_free_q, &iotag);
3318 if (!iotag) {
3319 bfa_stats(itnim, no_iotags);
3320 return NULL;
3323 ioim = BFA_IOIM_FROM_TAG(fcpim, iotag->tag);
3325 ioim->dio = dio;
3326 ioim->itnim = itnim;
3327 ioim->nsges = nsges;
3328 ioim->nsgpgs = 0;
3330 bfa_stats(itnim, total_ios);
3331 fcpim->ios_active++;
3333 list_add_tail(&ioim->qe, &itnim->io_q);
3335 return ioim;
3338 void
3339 bfa_ioim_free(struct bfa_ioim_s *ioim)
3341 struct bfa_fcpim_s *fcpim = ioim->fcpim;
3342 struct bfa_iotag_s *iotag;
3344 if (ioim->nsgpgs > 0)
3345 bfa_sgpg_mfree(ioim->bfa, &ioim->sgpg_q, ioim->nsgpgs);
3347 bfa_stats(ioim->itnim, io_comps);
3348 fcpim->ios_active--;
3350 ioim->iotag &= BFA_IOIM_IOTAG_MASK;
3352 WARN_ON(!(ioim->iotag <
3353 (fcpim->fcp->num_ioim_reqs + fcpim->fcp->num_fwtio_reqs)));
3354 iotag = BFA_IOTAG_FROM_TAG(fcpim->fcp, ioim->iotag);
3356 if (ioim->iotag < fcpim->fcp->num_ioim_reqs)
3357 list_add_tail(&iotag->qe, &fcpim->fcp->iotag_ioim_free_q);
3358 else
3359 list_add_tail(&iotag->qe, &fcpim->fcp->iotag_tio_free_q);
3361 list_del(&ioim->qe);
3364 void
3365 bfa_ioim_start(struct bfa_ioim_s *ioim)
3367 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
3368 struct bfa_lps_s *lps;
3369 enum bfa_ioim_lm_status status;
3370 struct scsi_lun scsilun;
3372 if (bfa_get_lun_mask_status(ioim->bfa) == BFA_LUNMASK_ENABLED) {
3373 lps = BFA_IOIM_TO_LPS(ioim);
3374 int_to_scsilun(cmnd->device->lun, &scsilun);
3375 status = bfa_ioim_lm_check(ioim, lps,
3376 ioim->itnim->rport, scsilun);
3377 if (status == BFA_IOIM_LM_LUN_NOT_RDY) {
3378 bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_LUN_NOT_RDY);
3379 bfa_stats(ioim->itnim, lm_lun_not_rdy);
3380 return;
3383 if (status == BFA_IOIM_LM_LUN_NOT_SUP) {
3384 bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_LUN_NOT_SUP);
3385 bfa_stats(ioim->itnim, lm_lun_not_sup);
3386 return;
3389 if (status == BFA_IOIM_LM_RPL_DATA_CHANGED) {
3390 bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_RPL_DC);
3391 bfa_stats(ioim->itnim, lm_rpl_data_changed);
3392 return;
3396 bfa_ioim_cb_profile_start(ioim->fcpim, ioim);
3399 * Obtain the queue over which this request has to be issued
3401 ioim->reqq = bfa_fcpim_ioredirect_enabled(ioim->bfa) ?
3402 BFA_FALSE : bfa_itnim_get_reqq(ioim);
3404 bfa_sm_send_event(ioim, BFA_IOIM_SM_START);
3408 * Driver I/O abort request.
3410 bfa_status_t
3411 bfa_ioim_abort(struct bfa_ioim_s *ioim)
3414 bfa_trc(ioim->bfa, ioim->iotag);
3416 if (!bfa_ioim_is_abortable(ioim))
3417 return BFA_STATUS_FAILED;
3419 bfa_stats(ioim->itnim, io_aborts);
3420 bfa_sm_send_event(ioim, BFA_IOIM_SM_ABORT);
3422 return BFA_STATUS_OK;
3426 * BFA TSKIM state machine functions
3430 * Task management command beginning state.
3432 static void
3433 bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3435 bfa_trc(tskim->bfa, event);
3437 switch (event) {
3438 case BFA_TSKIM_SM_START:
3439 bfa_sm_set_state(tskim, bfa_tskim_sm_active);
3440 bfa_tskim_gather_ios(tskim);
3443 * If device is offline, do not send TM on wire. Just cleanup
3444 * any pending IO requests and complete TM request.
3446 if (!bfa_itnim_is_online(tskim->itnim)) {
3447 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
3448 tskim->tsk_status = BFI_TSKIM_STS_OK;
3449 bfa_tskim_cleanup_ios(tskim);
3450 return;
3453 if (!bfa_tskim_send(tskim)) {
3454 bfa_sm_set_state(tskim, bfa_tskim_sm_qfull);
3455 bfa_stats(tskim->itnim, tm_qwait);
3456 bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
3457 &tskim->reqq_wait);
3459 break;
3461 default:
3462 bfa_sm_fault(tskim->bfa, event);
3467 * TM command is active, awaiting completion from firmware to
3468 * cleanup IO requests in TM scope.
3470 static void
3471 bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3473 bfa_trc(tskim->bfa, event);
3475 switch (event) {
3476 case BFA_TSKIM_SM_DONE:
3477 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
3478 bfa_tskim_cleanup_ios(tskim);
3479 break;
3481 case BFA_TSKIM_SM_CLEANUP:
3482 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
3483 if (!bfa_tskim_send_abort(tskim)) {
3484 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup_qfull);
3485 bfa_stats(tskim->itnim, tm_qwait);
3486 bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
3487 &tskim->reqq_wait);
3489 break;
3491 case BFA_TSKIM_SM_HWFAIL:
3492 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3493 bfa_tskim_iocdisable_ios(tskim);
3494 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3495 break;
3497 default:
3498 bfa_sm_fault(tskim->bfa, event);
3503 * An active TM is being cleaned up since ITN is offline. Awaiting cleanup
3504 * completion event from firmware.
3506 static void
3507 bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3509 bfa_trc(tskim->bfa, event);
3511 switch (event) {
3512 case BFA_TSKIM_SM_DONE:
3514 * Ignore and wait for ABORT completion from firmware.
3516 break;
3518 case BFA_TSKIM_SM_CLEANUP_DONE:
3519 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
3520 bfa_tskim_cleanup_ios(tskim);
3521 break;
3523 case BFA_TSKIM_SM_HWFAIL:
3524 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3525 bfa_tskim_iocdisable_ios(tskim);
3526 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3527 break;
3529 default:
3530 bfa_sm_fault(tskim->bfa, event);
3534 static void
3535 bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3537 bfa_trc(tskim->bfa, event);
3539 switch (event) {
3540 case BFA_TSKIM_SM_IOS_DONE:
3541 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3542 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_done);
3543 break;
3545 case BFA_TSKIM_SM_CLEANUP:
3547 * Ignore, TM command completed on wire.
3548 * Notify TM conmpletion on IO cleanup completion.
3550 break;
3552 case BFA_TSKIM_SM_HWFAIL:
3553 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3554 bfa_tskim_iocdisable_ios(tskim);
3555 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3556 break;
3558 default:
3559 bfa_sm_fault(tskim->bfa, event);
3564 * Task management command is waiting for room in request CQ
3566 static void
3567 bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3569 bfa_trc(tskim->bfa, event);
3571 switch (event) {
3572 case BFA_TSKIM_SM_QRESUME:
3573 bfa_sm_set_state(tskim, bfa_tskim_sm_active);
3574 bfa_tskim_send(tskim);
3575 break;
3577 case BFA_TSKIM_SM_CLEANUP:
3579 * No need to send TM on wire since ITN is offline.
3581 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
3582 bfa_reqq_wcancel(&tskim->reqq_wait);
3583 bfa_tskim_cleanup_ios(tskim);
3584 break;
3586 case BFA_TSKIM_SM_HWFAIL:
3587 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3588 bfa_reqq_wcancel(&tskim->reqq_wait);
3589 bfa_tskim_iocdisable_ios(tskim);
3590 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3591 break;
3593 default:
3594 bfa_sm_fault(tskim->bfa, event);
3599 * Task management command is active, awaiting for room in request CQ
3600 * to send clean up request.
3602 static void
3603 bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
3604 enum bfa_tskim_event event)
3606 bfa_trc(tskim->bfa, event);
3608 switch (event) {
3609 case BFA_TSKIM_SM_DONE:
3610 bfa_reqq_wcancel(&tskim->reqq_wait);
3612 * Fall through !!!
3614 case BFA_TSKIM_SM_QRESUME:
3615 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
3616 bfa_tskim_send_abort(tskim);
3617 break;
3619 case BFA_TSKIM_SM_HWFAIL:
3620 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3621 bfa_reqq_wcancel(&tskim->reqq_wait);
3622 bfa_tskim_iocdisable_ios(tskim);
3623 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3624 break;
3626 default:
3627 bfa_sm_fault(tskim->bfa, event);
3632 * BFA callback is pending
3634 static void
3635 bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3637 bfa_trc(tskim->bfa, event);
3639 switch (event) {
3640 case BFA_TSKIM_SM_HCB:
3641 bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
3642 bfa_tskim_free(tskim);
3643 break;
3645 case BFA_TSKIM_SM_CLEANUP:
3646 bfa_tskim_notify_comp(tskim);
3647 break;
3649 case BFA_TSKIM_SM_HWFAIL:
3650 break;
3652 default:
3653 bfa_sm_fault(tskim->bfa, event);
3657 static void
3658 __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete)
3660 struct bfa_tskim_s *tskim = cbarg;
3662 if (!complete) {
3663 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
3664 return;
3667 bfa_stats(tskim->itnim, tm_success);
3668 bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk, tskim->tsk_status);
3671 static void
3672 __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete)
3674 struct bfa_tskim_s *tskim = cbarg;
3676 if (!complete) {
3677 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
3678 return;
3681 bfa_stats(tskim->itnim, tm_failures);
3682 bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk,
3683 BFI_TSKIM_STS_FAILED);
3686 static bfa_boolean_t
3687 bfa_tskim_match_scope(struct bfa_tskim_s *tskim, struct scsi_lun lun)
3689 switch (tskim->tm_cmnd) {
3690 case FCP_TM_TARGET_RESET:
3691 return BFA_TRUE;
3693 case FCP_TM_ABORT_TASK_SET:
3694 case FCP_TM_CLEAR_TASK_SET:
3695 case FCP_TM_LUN_RESET:
3696 case FCP_TM_CLEAR_ACA:
3697 return !memcmp(&tskim->lun, &lun, sizeof(lun));
3699 default:
3700 WARN_ON(1);
3703 return BFA_FALSE;
3707 * Gather affected IO requests and task management commands.
3709 static void
3710 bfa_tskim_gather_ios(struct bfa_tskim_s *tskim)
3712 struct bfa_itnim_s *itnim = tskim->itnim;
3713 struct bfa_ioim_s *ioim;
3714 struct list_head *qe, *qen;
3715 struct scsi_cmnd *cmnd;
3716 struct scsi_lun scsilun;
3718 INIT_LIST_HEAD(&tskim->io_q);
3721 * Gather any active IO requests first.
3723 list_for_each_safe(qe, qen, &itnim->io_q) {
3724 ioim = (struct bfa_ioim_s *) qe;
3725 cmnd = (struct scsi_cmnd *) ioim->dio;
3726 int_to_scsilun(cmnd->device->lun, &scsilun);
3727 if (bfa_tskim_match_scope(tskim, scsilun)) {
3728 list_del(&ioim->qe);
3729 list_add_tail(&ioim->qe, &tskim->io_q);
3734 * Failback any pending IO requests immediately.
3736 list_for_each_safe(qe, qen, &itnim->pending_q) {
3737 ioim = (struct bfa_ioim_s *) qe;
3738 cmnd = (struct scsi_cmnd *) ioim->dio;
3739 int_to_scsilun(cmnd->device->lun, &scsilun);
3740 if (bfa_tskim_match_scope(tskim, scsilun)) {
3741 list_del(&ioim->qe);
3742 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
3743 bfa_ioim_tov(ioim);
3749 * IO cleanup completion
3751 static void
3752 bfa_tskim_cleanp_comp(void *tskim_cbarg)
3754 struct bfa_tskim_s *tskim = tskim_cbarg;
3756 bfa_stats(tskim->itnim, tm_io_comps);
3757 bfa_sm_send_event(tskim, BFA_TSKIM_SM_IOS_DONE);
3761 * Gather affected IO requests and task management commands.
3763 static void
3764 bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim)
3766 struct bfa_ioim_s *ioim;
3767 struct list_head *qe, *qen;
3769 bfa_wc_init(&tskim->wc, bfa_tskim_cleanp_comp, tskim);
3771 list_for_each_safe(qe, qen, &tskim->io_q) {
3772 ioim = (struct bfa_ioim_s *) qe;
3773 bfa_wc_up(&tskim->wc);
3774 bfa_ioim_cleanup_tm(ioim, tskim);
3777 bfa_wc_wait(&tskim->wc);
3781 * Send task management request to firmware.
3783 static bfa_boolean_t
3784 bfa_tskim_send(struct bfa_tskim_s *tskim)
3786 struct bfa_itnim_s *itnim = tskim->itnim;
3787 struct bfi_tskim_req_s *m;
3790 * check for room in queue to send request now
3792 m = bfa_reqq_next(tskim->bfa, itnim->reqq);
3793 if (!m)
3794 return BFA_FALSE;
3797 * build i/o request message next
3799 bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_TM_REQ,
3800 bfa_fn_lpu(tskim->bfa));
3802 m->tsk_tag = cpu_to_be16(tskim->tsk_tag);
3803 m->itn_fhdl = tskim->itnim->rport->fw_handle;
3804 m->t_secs = tskim->tsecs;
3805 m->lun = tskim->lun;
3806 m->tm_flags = tskim->tm_cmnd;
3809 * queue I/O message to firmware
3811 bfa_reqq_produce(tskim->bfa, itnim->reqq, m->mh);
3812 return BFA_TRUE;
3816 * Send abort request to cleanup an active TM to firmware.
3818 static bfa_boolean_t
3819 bfa_tskim_send_abort(struct bfa_tskim_s *tskim)
3821 struct bfa_itnim_s *itnim = tskim->itnim;
3822 struct bfi_tskim_abortreq_s *m;
3825 * check for room in queue to send request now
3827 m = bfa_reqq_next(tskim->bfa, itnim->reqq);
3828 if (!m)
3829 return BFA_FALSE;
3832 * build i/o request message next
3834 bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_ABORT_REQ,
3835 bfa_fn_lpu(tskim->bfa));
3837 m->tsk_tag = cpu_to_be16(tskim->tsk_tag);
3840 * queue I/O message to firmware
3842 bfa_reqq_produce(tskim->bfa, itnim->reqq, m->mh);
3843 return BFA_TRUE;
3847 * Call to resume task management cmnd waiting for room in request queue.
3849 static void
3850 bfa_tskim_qresume(void *cbarg)
3852 struct bfa_tskim_s *tskim = cbarg;
3854 bfa_stats(tskim->itnim, tm_qresumes);
3855 bfa_sm_send_event(tskim, BFA_TSKIM_SM_QRESUME);
3859 * Cleanup IOs associated with a task mangement command on IOC failures.
3861 static void
3862 bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim)
3864 struct bfa_ioim_s *ioim;
3865 struct list_head *qe, *qen;
3867 list_for_each_safe(qe, qen, &tskim->io_q) {
3868 ioim = (struct bfa_ioim_s *) qe;
3869 bfa_ioim_iocdisable(ioim);
3874 * Notification on completions from related ioim.
3876 void
3877 bfa_tskim_iodone(struct bfa_tskim_s *tskim)
3879 bfa_wc_down(&tskim->wc);
3883 * Handle IOC h/w failure notification from itnim.
3885 void
3886 bfa_tskim_iocdisable(struct bfa_tskim_s *tskim)
3888 tskim->notify = BFA_FALSE;
3889 bfa_stats(tskim->itnim, tm_iocdowns);
3890 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HWFAIL);
3894 * Cleanup TM command and associated IOs as part of ITNIM offline.
3896 void
3897 bfa_tskim_cleanup(struct bfa_tskim_s *tskim)
3899 tskim->notify = BFA_TRUE;
3900 bfa_stats(tskim->itnim, tm_cleanups);
3901 bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP);
3905 * Memory allocation and initialization.
3907 void
3908 bfa_tskim_attach(struct bfa_fcpim_s *fcpim)
3910 struct bfa_tskim_s *tskim;
3911 struct bfa_fcp_mod_s *fcp = fcpim->fcp;
3912 u16 i;
3914 INIT_LIST_HEAD(&fcpim->tskim_free_q);
3915 INIT_LIST_HEAD(&fcpim->tskim_unused_q);
3917 tskim = (struct bfa_tskim_s *) bfa_mem_kva_curp(fcp);
3918 fcpim->tskim_arr = tskim;
3920 for (i = 0; i < fcpim->num_tskim_reqs; i++, tskim++) {
3922 * initialize TSKIM
3924 memset(tskim, 0, sizeof(struct bfa_tskim_s));
3925 tskim->tsk_tag = i;
3926 tskim->bfa = fcpim->bfa;
3927 tskim->fcpim = fcpim;
3928 tskim->notify = BFA_FALSE;
3929 bfa_reqq_winit(&tskim->reqq_wait, bfa_tskim_qresume,
3930 tskim);
3931 bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
3933 list_add_tail(&tskim->qe, &fcpim->tskim_free_q);
3936 bfa_mem_kva_curp(fcp) = (u8 *) tskim;
3939 void
3940 bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
3942 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
3943 struct bfi_tskim_rsp_s *rsp = (struct bfi_tskim_rsp_s *) m;
3944 struct bfa_tskim_s *tskim;
3945 u16 tsk_tag = be16_to_cpu(rsp->tsk_tag);
3947 tskim = BFA_TSKIM_FROM_TAG(fcpim, tsk_tag);
3948 WARN_ON(tskim->tsk_tag != tsk_tag);
3950 tskim->tsk_status = rsp->tsk_status;
3953 * Firmware sends BFI_TSKIM_STS_ABORTED status for abort
3954 * requests. All other statuses are for normal completions.
3956 if (rsp->tsk_status == BFI_TSKIM_STS_ABORTED) {
3957 bfa_stats(tskim->itnim, tm_cleanup_comps);
3958 bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP_DONE);
3959 } else {
3960 bfa_stats(tskim->itnim, tm_fw_rsps);
3961 bfa_sm_send_event(tskim, BFA_TSKIM_SM_DONE);
3966 struct bfa_tskim_s *
3967 bfa_tskim_alloc(struct bfa_s *bfa, struct bfad_tskim_s *dtsk)
3969 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
3970 struct bfa_tskim_s *tskim;
3972 bfa_q_deq(&fcpim->tskim_free_q, &tskim);
3974 if (tskim)
3975 tskim->dtsk = dtsk;
3977 return tskim;
3980 void
3981 bfa_tskim_free(struct bfa_tskim_s *tskim)
3983 WARN_ON(!bfa_q_is_on_q_func(&tskim->itnim->tsk_q, &tskim->qe));
3984 list_del(&tskim->qe);
3985 list_add_tail(&tskim->qe, &tskim->fcpim->tskim_free_q);
3989 * Start a task management command.
3991 * @param[in] tskim BFA task management command instance
3992 * @param[in] itnim i-t nexus for the task management command
3993 * @param[in] lun lun, if applicable
3994 * @param[in] tm_cmnd Task management command code.
3995 * @param[in] t_secs Timeout in seconds
3997 * @return None.
3999 void
4000 bfa_tskim_start(struct bfa_tskim_s *tskim, struct bfa_itnim_s *itnim,
4001 struct scsi_lun lun,
4002 enum fcp_tm_cmnd tm_cmnd, u8 tsecs)
4004 tskim->itnim = itnim;
4005 tskim->lun = lun;
4006 tskim->tm_cmnd = tm_cmnd;
4007 tskim->tsecs = tsecs;
4008 tskim->notify = BFA_FALSE;
4009 bfa_stats(itnim, tm_cmnds);
4011 list_add_tail(&tskim->qe, &itnim->tsk_q);
4012 bfa_sm_send_event(tskim, BFA_TSKIM_SM_START);
4015 void
4016 bfa_tskim_res_recfg(struct bfa_s *bfa, u16 num_tskim_fw)
4018 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
4019 struct list_head *qe;
4020 int i;
4022 for (i = 0; i < (fcpim->num_tskim_reqs - num_tskim_fw); i++) {
4023 bfa_q_deq_tail(&fcpim->tskim_free_q, &qe);
4024 list_add_tail(qe, &fcpim->tskim_unused_q);
4028 /* BFA FCP module - parent module for fcpim */
4030 BFA_MODULE(fcp);
4032 static void
4033 bfa_fcp_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
4034 struct bfa_s *bfa)
4036 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
4037 struct bfa_mem_kva_s *fcp_kva = BFA_MEM_FCP_KVA(bfa);
4038 struct bfa_mem_dma_s *seg_ptr;
4039 u16 nsegs, idx, per_seg_ios, num_io_req;
4040 u32 km_len = 0;
4043 * ZERO for num_ioim_reqs and num_fwtio_reqs is allowed config value.
4044 * So if the values are non zero, adjust them appropriately.
4046 if (cfg->fwcfg.num_ioim_reqs &&
4047 cfg->fwcfg.num_ioim_reqs < BFA_IOIM_MIN)
4048 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN;
4049 else if (cfg->fwcfg.num_ioim_reqs > BFA_IOIM_MAX)
4050 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX;
4052 if (cfg->fwcfg.num_fwtio_reqs > BFA_FWTIO_MAX)
4053 cfg->fwcfg.num_fwtio_reqs = BFA_FWTIO_MAX;
4055 num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
4056 if (num_io_req > BFA_IO_MAX) {
4057 if (cfg->fwcfg.num_ioim_reqs && cfg->fwcfg.num_fwtio_reqs) {
4058 cfg->fwcfg.num_ioim_reqs = BFA_IO_MAX/2;
4059 cfg->fwcfg.num_fwtio_reqs = BFA_IO_MAX/2;
4060 } else if (cfg->fwcfg.num_fwtio_reqs)
4061 cfg->fwcfg.num_fwtio_reqs = BFA_FWTIO_MAX;
4062 else
4063 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX;
4066 bfa_fcpim_meminfo(cfg, &km_len);
4068 num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
4069 km_len += num_io_req * sizeof(struct bfa_iotag_s);
4070 km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itn_s);
4072 /* dma memory */
4073 nsegs = BFI_MEM_DMA_NSEGS(num_io_req, BFI_IOIM_SNSLEN);
4074 per_seg_ios = BFI_MEM_NREQS_SEG(BFI_IOIM_SNSLEN);
4076 bfa_mem_dma_seg_iter(fcp, seg_ptr, nsegs, idx) {
4077 if (num_io_req >= per_seg_ios) {
4078 num_io_req -= per_seg_ios;
4079 bfa_mem_dma_setup(minfo, seg_ptr,
4080 per_seg_ios * BFI_IOIM_SNSLEN);
4081 } else
4082 bfa_mem_dma_setup(minfo, seg_ptr,
4083 num_io_req * BFI_IOIM_SNSLEN);
4086 /* kva memory */
4087 bfa_mem_kva_setup(minfo, fcp_kva, km_len);
4090 static void
4091 bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4092 struct bfa_pcidev_s *pcidev)
4094 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
4095 struct bfa_mem_dma_s *seg_ptr;
4096 u16 idx, nsegs, num_io_req;
4098 fcp->num_ioim_reqs = cfg->fwcfg.num_ioim_reqs;
4099 fcp->num_fwtio_reqs = cfg->fwcfg.num_fwtio_reqs;
4100 fcp->num_itns = cfg->fwcfg.num_rports;
4101 fcp->bfa = bfa;
4104 * Setup the pool of snsbase addr's, that is passed to fw as
4105 * part of bfi_iocfc_cfg_s.
4107 num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
4108 nsegs = BFI_MEM_DMA_NSEGS(num_io_req, BFI_IOIM_SNSLEN);
4110 bfa_mem_dma_seg_iter(fcp, seg_ptr, nsegs, idx) {
4112 if (!bfa_mem_dma_virt(seg_ptr))
4113 break;
4115 fcp->snsbase[idx].pa = bfa_mem_dma_phys(seg_ptr);
4116 fcp->snsbase[idx].kva = bfa_mem_dma_virt(seg_ptr);
4117 bfa_iocfc_set_snsbase(bfa, idx, fcp->snsbase[idx].pa);
4120 bfa_fcpim_attach(fcp, bfad, cfg, pcidev);
4122 bfa_iotag_attach(fcp);
4124 fcp->itn_arr = (struct bfa_itn_s *) bfa_mem_kva_curp(fcp);
4125 bfa_mem_kva_curp(fcp) = (u8 *)fcp->itn_arr +
4126 (fcp->num_itns * sizeof(struct bfa_itn_s));
4127 memset(fcp->itn_arr, 0,
4128 (fcp->num_itns * sizeof(struct bfa_itn_s)));
4131 static void
4132 bfa_fcp_detach(struct bfa_s *bfa)
4136 static void
4137 bfa_fcp_start(struct bfa_s *bfa)
4139 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
4142 * bfa_init() with flash read is complete. now invalidate the stale
4143 * content of lun mask like unit attention, rp tag and lp tag.
4145 bfa_ioim_lm_init(fcp->bfa);
4148 static void
4149 bfa_fcp_stop(struct bfa_s *bfa)
4153 static void
4154 bfa_fcp_iocdisable(struct bfa_s *bfa)
4156 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
4158 /* Enqueue unused ioim resources to free_q */
4159 list_splice_tail_init(&fcp->iotag_unused_q, &fcp->iotag_ioim_free_q);
4161 bfa_fcpim_iocdisable(fcp);
4164 void
4165 bfa_fcp_res_recfg(struct bfa_s *bfa, u16 num_ioim_fw)
4167 struct bfa_fcp_mod_s *mod = BFA_FCP_MOD(bfa);
4168 struct list_head *qe;
4169 int i;
4171 for (i = 0; i < (mod->num_ioim_reqs - num_ioim_fw); i++) {
4172 bfa_q_deq_tail(&mod->iotag_ioim_free_q, &qe);
4173 list_add_tail(qe, &mod->iotag_unused_q);
4177 void
4178 bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
4179 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
4181 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
4182 struct bfa_itn_s *itn;
4184 itn = BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
4185 itn->isr = isr;
4189 * Itn interrupt processing.
4191 void
4192 bfa_itn_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
4194 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
4195 union bfi_itn_i2h_msg_u msg;
4196 struct bfa_itn_s *itn;
4198 msg.msg = m;
4199 itn = BFA_ITN_FROM_TAG(fcp, msg.create_rsp->bfa_handle);
4201 if (itn->isr)
4202 itn->isr(bfa, m);
4203 else
4204 WARN_ON(1);
4207 void
4208 bfa_iotag_attach(struct bfa_fcp_mod_s *fcp)
4210 struct bfa_iotag_s *iotag;
4211 u16 num_io_req, i;
4213 iotag = (struct bfa_iotag_s *) bfa_mem_kva_curp(fcp);
4214 fcp->iotag_arr = iotag;
4216 INIT_LIST_HEAD(&fcp->iotag_ioim_free_q);
4217 INIT_LIST_HEAD(&fcp->iotag_tio_free_q);
4218 INIT_LIST_HEAD(&fcp->iotag_unused_q);
4220 num_io_req = fcp->num_ioim_reqs + fcp->num_fwtio_reqs;
4221 for (i = 0; i < num_io_req; i++, iotag++) {
4222 memset(iotag, 0, sizeof(struct bfa_iotag_s));
4223 iotag->tag = i;
4224 if (i < fcp->num_ioim_reqs)
4225 list_add_tail(&iotag->qe, &fcp->iotag_ioim_free_q);
4226 else
4227 list_add_tail(&iotag->qe, &fcp->iotag_tio_free_q);
4230 bfa_mem_kva_curp(fcp) = (u8 *) iotag;