2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
19 #include <cs/bfa_debug.h>
20 #include <bfa_cb_ioim_macros.h>
22 BFA_TRC_FILE(HAL
, IOIM
);
25 * forward declarations.
27 static bfa_boolean_t
bfa_ioim_send_ioreq(struct bfa_ioim_s
*ioim
);
28 static bfa_boolean_t
bfa_ioim_sge_setup(struct bfa_ioim_s
*ioim
);
29 static void bfa_ioim_sgpg_setup(struct bfa_ioim_s
*ioim
);
30 static bfa_boolean_t
bfa_ioim_send_abort(struct bfa_ioim_s
*ioim
);
31 static void bfa_ioim_notify_cleanup(struct bfa_ioim_s
*ioim
);
32 static void __bfa_cb_ioim_good_comp(void *cbarg
, bfa_boolean_t complete
);
33 static void __bfa_cb_ioim_comp(void *cbarg
, bfa_boolean_t complete
);
34 static void __bfa_cb_ioim_abort(void *cbarg
, bfa_boolean_t complete
);
35 static void __bfa_cb_ioim_failed(void *cbarg
, bfa_boolean_t complete
);
36 static void __bfa_cb_ioim_pathtov(void *cbarg
, bfa_boolean_t complete
);
43 * IO state machine events
46 BFA_IOIM_SM_START
= 1, /* io start request from host */
47 BFA_IOIM_SM_COMP_GOOD
= 2, /* io good comp, resource free */
48 BFA_IOIM_SM_COMP
= 3, /* io comp, resource is free */
49 BFA_IOIM_SM_COMP_UTAG
= 4, /* io comp, resource is free */
50 BFA_IOIM_SM_DONE
= 5, /* io comp, resource not free */
51 BFA_IOIM_SM_FREE
= 6, /* io resource is freed */
52 BFA_IOIM_SM_ABORT
= 7, /* abort request from scsi stack */
53 BFA_IOIM_SM_ABORT_COMP
= 8, /* abort from f/w */
54 BFA_IOIM_SM_ABORT_DONE
= 9, /* abort completion from f/w */
55 BFA_IOIM_SM_QRESUME
= 10, /* CQ space available to queue IO */
56 BFA_IOIM_SM_SGALLOCED
= 11, /* SG page allocation successful */
57 BFA_IOIM_SM_SQRETRY
= 12, /* sequence recovery retry */
58 BFA_IOIM_SM_HCB
= 13, /* bfa callback complete */
59 BFA_IOIM_SM_CLEANUP
= 14, /* IO cleanup from itnim */
60 BFA_IOIM_SM_TMSTART
= 15, /* IO cleanup from tskim */
61 BFA_IOIM_SM_TMDONE
= 16, /* IO cleanup from tskim */
62 BFA_IOIM_SM_HWFAIL
= 17, /* IOC h/w failure event */
63 BFA_IOIM_SM_IOTOV
= 18, /* ITN offline TOV */
67 * forward declaration of IO state machine
69 static void bfa_ioim_sm_uninit(struct bfa_ioim_s
*ioim
,
70 enum bfa_ioim_event event
);
71 static void bfa_ioim_sm_sgalloc(struct bfa_ioim_s
*ioim
,
72 enum bfa_ioim_event event
);
73 static void bfa_ioim_sm_active(struct bfa_ioim_s
*ioim
,
74 enum bfa_ioim_event event
);
75 static void bfa_ioim_sm_abort(struct bfa_ioim_s
*ioim
,
76 enum bfa_ioim_event event
);
77 static void bfa_ioim_sm_cleanup(struct bfa_ioim_s
*ioim
,
78 enum bfa_ioim_event event
);
79 static void bfa_ioim_sm_qfull(struct bfa_ioim_s
*ioim
,
80 enum bfa_ioim_event event
);
81 static void bfa_ioim_sm_abort_qfull(struct bfa_ioim_s
*ioim
,
82 enum bfa_ioim_event event
);
83 static void bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s
*ioim
,
84 enum bfa_ioim_event event
);
85 static void bfa_ioim_sm_hcb(struct bfa_ioim_s
*ioim
,
86 enum bfa_ioim_event event
);
87 static void bfa_ioim_sm_hcb_free(struct bfa_ioim_s
*ioim
,
88 enum bfa_ioim_event event
);
89 static void bfa_ioim_sm_resfree(struct bfa_ioim_s
*ioim
,
90 enum bfa_ioim_event event
);
93 * IO is not started (unallocated).
96 bfa_ioim_sm_uninit(struct bfa_ioim_s
*ioim
, enum bfa_ioim_event event
)
98 bfa_trc_fp(ioim
->bfa
, ioim
->iotag
);
99 bfa_trc_fp(ioim
->bfa
, event
);
102 case BFA_IOIM_SM_START
:
103 if (!bfa_itnim_is_online(ioim
->itnim
)) {
104 if (!bfa_itnim_hold_io(ioim
->itnim
)) {
105 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
107 list_add_tail(&ioim
->qe
,
108 &ioim
->fcpim
->ioim_comp_q
);
109 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
,
110 __bfa_cb_ioim_pathtov
, ioim
);
113 list_add_tail(&ioim
->qe
,
114 &ioim
->itnim
->pending_q
);
119 if (ioim
->nsges
> BFI_SGE_INLINE
) {
120 if (!bfa_ioim_sge_setup(ioim
)) {
121 bfa_sm_set_state(ioim
, bfa_ioim_sm_sgalloc
);
126 if (!bfa_ioim_send_ioreq(ioim
)) {
127 bfa_sm_set_state(ioim
, bfa_ioim_sm_qfull
);
131 bfa_sm_set_state(ioim
, bfa_ioim_sm_active
);
134 case BFA_IOIM_SM_IOTOV
:
135 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
136 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
,
137 __bfa_cb_ioim_pathtov
, ioim
);
140 case BFA_IOIM_SM_ABORT
:
142 * IO in pending queue can get abort requests. Complete abort
143 * requests immediately.
145 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
146 bfa_assert(bfa_q_is_on_q(&ioim
->itnim
->pending_q
, ioim
));
147 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_abort
,
152 bfa_sm_fault(ioim
->bfa
, event
);
157 * IO is waiting for SG pages.
160 bfa_ioim_sm_sgalloc(struct bfa_ioim_s
*ioim
, enum bfa_ioim_event event
)
162 bfa_trc(ioim
->bfa
, ioim
->iotag
);
163 bfa_trc(ioim
->bfa
, event
);
166 case BFA_IOIM_SM_SGALLOCED
:
167 if (!bfa_ioim_send_ioreq(ioim
)) {
168 bfa_sm_set_state(ioim
, bfa_ioim_sm_qfull
);
171 bfa_sm_set_state(ioim
, bfa_ioim_sm_active
);
174 case BFA_IOIM_SM_CLEANUP
:
175 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
176 bfa_sgpg_wcancel(ioim
->bfa
, &ioim
->iosp
->sgpg_wqe
);
177 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_failed
,
179 bfa_ioim_notify_cleanup(ioim
);
182 case BFA_IOIM_SM_ABORT
:
183 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
184 bfa_sgpg_wcancel(ioim
->bfa
, &ioim
->iosp
->sgpg_wqe
);
185 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_abort
,
189 case BFA_IOIM_SM_HWFAIL
:
190 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
191 bfa_sgpg_wcancel(ioim
->bfa
, &ioim
->iosp
->sgpg_wqe
);
192 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_failed
,
197 bfa_sm_fault(ioim
->bfa
, event
);
205 bfa_ioim_sm_active(struct bfa_ioim_s
*ioim
, enum bfa_ioim_event event
)
207 bfa_trc_fp(ioim
->bfa
, ioim
->iotag
);
208 bfa_trc_fp(ioim
->bfa
, event
);
211 case BFA_IOIM_SM_COMP_GOOD
:
212 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
213 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
,
214 __bfa_cb_ioim_good_comp
, ioim
);
217 case BFA_IOIM_SM_COMP
:
218 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
219 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_comp
,
223 case BFA_IOIM_SM_DONE
:
224 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb_free
);
225 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_comp
,
229 case BFA_IOIM_SM_ABORT
:
230 ioim
->iosp
->abort_explicit
= BFA_TRUE
;
231 ioim
->io_cbfn
= __bfa_cb_ioim_abort
;
233 if (bfa_ioim_send_abort(ioim
))
234 bfa_sm_set_state(ioim
, bfa_ioim_sm_abort
);
236 bfa_sm_set_state(ioim
, bfa_ioim_sm_abort_qfull
);
237 bfa_reqq_wait(ioim
->bfa
, ioim
->itnim
->reqq
,
238 &ioim
->iosp
->reqq_wait
);
242 case BFA_IOIM_SM_CLEANUP
:
243 ioim
->iosp
->abort_explicit
= BFA_FALSE
;
244 ioim
->io_cbfn
= __bfa_cb_ioim_failed
;
246 if (bfa_ioim_send_abort(ioim
))
247 bfa_sm_set_state(ioim
, bfa_ioim_sm_cleanup
);
249 bfa_sm_set_state(ioim
, bfa_ioim_sm_cleanup_qfull
);
250 bfa_reqq_wait(ioim
->bfa
, ioim
->itnim
->reqq
,
251 &ioim
->iosp
->reqq_wait
);
255 case BFA_IOIM_SM_HWFAIL
:
256 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
257 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_failed
,
262 bfa_sm_fault(ioim
->bfa
, event
);
267 * IO is being aborted, waiting for completion from firmware.
270 bfa_ioim_sm_abort(struct bfa_ioim_s
*ioim
, enum bfa_ioim_event event
)
272 bfa_trc(ioim
->bfa
, ioim
->iotag
);
273 bfa_trc(ioim
->bfa
, event
);
276 case BFA_IOIM_SM_COMP_GOOD
:
277 case BFA_IOIM_SM_COMP
:
278 case BFA_IOIM_SM_DONE
:
279 case BFA_IOIM_SM_FREE
:
282 case BFA_IOIM_SM_ABORT_DONE
:
283 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb_free
);
284 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_abort
,
288 case BFA_IOIM_SM_ABORT_COMP
:
289 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
290 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_abort
,
294 case BFA_IOIM_SM_COMP_UTAG
:
295 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
296 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_abort
,
300 case BFA_IOIM_SM_CLEANUP
:
301 bfa_assert(ioim
->iosp
->abort_explicit
== BFA_TRUE
);
302 ioim
->iosp
->abort_explicit
= BFA_FALSE
;
304 if (bfa_ioim_send_abort(ioim
))
305 bfa_sm_set_state(ioim
, bfa_ioim_sm_cleanup
);
307 bfa_sm_set_state(ioim
, bfa_ioim_sm_cleanup_qfull
);
308 bfa_reqq_wait(ioim
->bfa
, ioim
->itnim
->reqq
,
309 &ioim
->iosp
->reqq_wait
);
313 case BFA_IOIM_SM_HWFAIL
:
314 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
315 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_failed
,
320 bfa_sm_fault(ioim
->bfa
, event
);
325 * IO is being cleaned up (implicit abort), waiting for completion from
329 bfa_ioim_sm_cleanup(struct bfa_ioim_s
*ioim
, enum bfa_ioim_event event
)
331 bfa_trc(ioim
->bfa
, ioim
->iotag
);
332 bfa_trc(ioim
->bfa
, event
);
335 case BFA_IOIM_SM_COMP_GOOD
:
336 case BFA_IOIM_SM_COMP
:
337 case BFA_IOIM_SM_DONE
:
338 case BFA_IOIM_SM_FREE
:
341 case BFA_IOIM_SM_ABORT
:
343 * IO is already being aborted implicitly
345 ioim
->io_cbfn
= __bfa_cb_ioim_abort
;
348 case BFA_IOIM_SM_ABORT_DONE
:
349 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb_free
);
350 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, ioim
->io_cbfn
, ioim
);
351 bfa_ioim_notify_cleanup(ioim
);
354 case BFA_IOIM_SM_ABORT_COMP
:
355 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
356 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, ioim
->io_cbfn
, ioim
);
357 bfa_ioim_notify_cleanup(ioim
);
360 case BFA_IOIM_SM_COMP_UTAG
:
361 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
362 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, ioim
->io_cbfn
, ioim
);
363 bfa_ioim_notify_cleanup(ioim
);
366 case BFA_IOIM_SM_HWFAIL
:
367 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
368 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_failed
,
372 case BFA_IOIM_SM_CLEANUP
:
374 * IO can be in cleanup state already due to TM command. 2nd cleanup
375 * request comes from ITN offline event.
380 bfa_sm_fault(ioim
->bfa
, event
);
385 * IO is waiting for room in request CQ
388 bfa_ioim_sm_qfull(struct bfa_ioim_s
*ioim
, enum bfa_ioim_event event
)
390 bfa_trc(ioim
->bfa
, ioim
->iotag
);
391 bfa_trc(ioim
->bfa
, event
);
394 case BFA_IOIM_SM_QRESUME
:
395 bfa_sm_set_state(ioim
, bfa_ioim_sm_active
);
396 bfa_ioim_send_ioreq(ioim
);
399 case BFA_IOIM_SM_ABORT
:
400 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
401 bfa_reqq_wcancel(&ioim
->iosp
->reqq_wait
);
402 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_abort
,
406 case BFA_IOIM_SM_CLEANUP
:
407 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
408 bfa_reqq_wcancel(&ioim
->iosp
->reqq_wait
);
409 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_failed
,
411 bfa_ioim_notify_cleanup(ioim
);
414 case BFA_IOIM_SM_HWFAIL
:
415 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
416 bfa_reqq_wcancel(&ioim
->iosp
->reqq_wait
);
417 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_failed
,
422 bfa_sm_fault(ioim
->bfa
, event
);
427 * Active IO is being aborted, waiting for room in request CQ.
430 bfa_ioim_sm_abort_qfull(struct bfa_ioim_s
*ioim
, enum bfa_ioim_event event
)
432 bfa_trc(ioim
->bfa
, ioim
->iotag
);
433 bfa_trc(ioim
->bfa
, event
);
436 case BFA_IOIM_SM_QRESUME
:
437 bfa_sm_set_state(ioim
, bfa_ioim_sm_abort
);
438 bfa_ioim_send_abort(ioim
);
441 case BFA_IOIM_SM_CLEANUP
:
442 bfa_assert(ioim
->iosp
->abort_explicit
== BFA_TRUE
);
443 ioim
->iosp
->abort_explicit
= BFA_FALSE
;
444 bfa_sm_set_state(ioim
, bfa_ioim_sm_cleanup_qfull
);
447 case BFA_IOIM_SM_COMP_GOOD
:
448 case BFA_IOIM_SM_COMP
:
449 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
450 bfa_reqq_wcancel(&ioim
->iosp
->reqq_wait
);
451 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_abort
,
455 case BFA_IOIM_SM_DONE
:
456 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb_free
);
457 bfa_reqq_wcancel(&ioim
->iosp
->reqq_wait
);
458 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_abort
,
462 case BFA_IOIM_SM_HWFAIL
:
463 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
464 bfa_reqq_wcancel(&ioim
->iosp
->reqq_wait
);
465 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_failed
,
470 bfa_sm_fault(ioim
->bfa
, event
);
475 * Active IO is being cleaned up, waiting for room in request CQ.
478 bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s
*ioim
, enum bfa_ioim_event event
)
480 bfa_trc(ioim
->bfa
, ioim
->iotag
);
481 bfa_trc(ioim
->bfa
, event
);
484 case BFA_IOIM_SM_QRESUME
:
485 bfa_sm_set_state(ioim
, bfa_ioim_sm_cleanup
);
486 bfa_ioim_send_abort(ioim
);
489 case BFA_IOIM_SM_ABORT
:
491 * IO is alraedy being cleaned up implicitly
493 ioim
->io_cbfn
= __bfa_cb_ioim_abort
;
496 case BFA_IOIM_SM_COMP_GOOD
:
497 case BFA_IOIM_SM_COMP
:
498 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
499 bfa_reqq_wcancel(&ioim
->iosp
->reqq_wait
);
500 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, ioim
->io_cbfn
, ioim
);
501 bfa_ioim_notify_cleanup(ioim
);
504 case BFA_IOIM_SM_DONE
:
505 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb_free
);
506 bfa_reqq_wcancel(&ioim
->iosp
->reqq_wait
);
507 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, ioim
->io_cbfn
, ioim
);
508 bfa_ioim_notify_cleanup(ioim
);
511 case BFA_IOIM_SM_HWFAIL
:
512 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
513 bfa_reqq_wcancel(&ioim
->iosp
->reqq_wait
);
514 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, __bfa_cb_ioim_failed
,
519 bfa_sm_fault(ioim
->bfa
, event
);
524 * IO bfa callback is pending.
527 bfa_ioim_sm_hcb(struct bfa_ioim_s
*ioim
, enum bfa_ioim_event event
)
529 bfa_trc_fp(ioim
->bfa
, ioim
->iotag
);
530 bfa_trc_fp(ioim
->bfa
, event
);
533 case BFA_IOIM_SM_HCB
:
534 bfa_sm_set_state(ioim
, bfa_ioim_sm_uninit
);
536 bfa_cb_ioim_resfree(ioim
->bfa
->bfad
);
539 case BFA_IOIM_SM_CLEANUP
:
540 bfa_ioim_notify_cleanup(ioim
);
543 case BFA_IOIM_SM_HWFAIL
:
547 bfa_sm_fault(ioim
->bfa
, event
);
552 * IO bfa callback is pending. IO resource cannot be freed.
555 bfa_ioim_sm_hcb_free(struct bfa_ioim_s
*ioim
, enum bfa_ioim_event event
)
557 bfa_trc(ioim
->bfa
, ioim
->iotag
);
558 bfa_trc(ioim
->bfa
, event
);
561 case BFA_IOIM_SM_HCB
:
562 bfa_sm_set_state(ioim
, bfa_ioim_sm_resfree
);
564 list_add_tail(&ioim
->qe
, &ioim
->fcpim
->ioim_resfree_q
);
567 case BFA_IOIM_SM_FREE
:
568 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
571 case BFA_IOIM_SM_CLEANUP
:
572 bfa_ioim_notify_cleanup(ioim
);
575 case BFA_IOIM_SM_HWFAIL
:
576 bfa_sm_set_state(ioim
, bfa_ioim_sm_hcb
);
580 bfa_sm_fault(ioim
->bfa
, event
);
585 * IO is completed, waiting resource free from firmware.
588 bfa_ioim_sm_resfree(struct bfa_ioim_s
*ioim
, enum bfa_ioim_event event
)
590 bfa_trc(ioim
->bfa
, ioim
->iotag
);
591 bfa_trc(ioim
->bfa
, event
);
594 case BFA_IOIM_SM_FREE
:
595 bfa_sm_set_state(ioim
, bfa_ioim_sm_uninit
);
597 bfa_cb_ioim_resfree(ioim
->bfa
->bfad
);
600 case BFA_IOIM_SM_CLEANUP
:
601 bfa_ioim_notify_cleanup(ioim
);
604 case BFA_IOIM_SM_HWFAIL
:
608 bfa_sm_fault(ioim
->bfa
, event
);
619 __bfa_cb_ioim_good_comp(void *cbarg
, bfa_boolean_t complete
)
621 struct bfa_ioim_s
*ioim
= cbarg
;
624 bfa_sm_send_event(ioim
, BFA_IOIM_SM_HCB
);
628 bfa_cb_ioim_good_comp(ioim
->bfa
->bfad
, ioim
->dio
);
632 __bfa_cb_ioim_comp(void *cbarg
, bfa_boolean_t complete
)
634 struct bfa_ioim_s
*ioim
= cbarg
;
635 struct bfi_ioim_rsp_s
*m
;
641 bfa_sm_send_event(ioim
, BFA_IOIM_SM_HCB
);
645 m
= (struct bfi_ioim_rsp_s
*) &ioim
->iosp
->comp_rspmsg
;
646 if (m
->io_status
== BFI_IOIM_STS_OK
) {
648 * setup sense information, if present
650 if (m
->scsi_status
== SCSI_STATUS_CHECK_CONDITION
652 sns_len
= m
->sns_len
;
653 snsinfo
= ioim
->iosp
->snsinfo
;
657 * setup residue value correctly for normal completions
659 if (m
->resid_flags
== FCP_RESID_UNDER
)
660 residue
= bfa_os_ntohl(m
->residue
);
661 if (m
->resid_flags
== FCP_RESID_OVER
) {
662 residue
= bfa_os_ntohl(m
->residue
);
667 bfa_cb_ioim_done(ioim
->bfa
->bfad
, ioim
->dio
, m
->io_status
,
668 m
->scsi_status
, sns_len
, snsinfo
, residue
);
672 __bfa_cb_ioim_failed(void *cbarg
, bfa_boolean_t complete
)
674 struct bfa_ioim_s
*ioim
= cbarg
;
677 bfa_sm_send_event(ioim
, BFA_IOIM_SM_HCB
);
681 bfa_cb_ioim_done(ioim
->bfa
->bfad
, ioim
->dio
, BFI_IOIM_STS_ABORTED
,
686 __bfa_cb_ioim_pathtov(void *cbarg
, bfa_boolean_t complete
)
688 struct bfa_ioim_s
*ioim
= cbarg
;
691 bfa_sm_send_event(ioim
, BFA_IOIM_SM_HCB
);
695 bfa_cb_ioim_done(ioim
->bfa
->bfad
, ioim
->dio
, BFI_IOIM_STS_PATHTOV
,
700 __bfa_cb_ioim_abort(void *cbarg
, bfa_boolean_t complete
)
702 struct bfa_ioim_s
*ioim
= cbarg
;
705 bfa_sm_send_event(ioim
, BFA_IOIM_SM_HCB
);
709 bfa_cb_ioim_abort(ioim
->bfa
->bfad
, ioim
->dio
);
713 bfa_ioim_sgpg_alloced(void *cbarg
)
715 struct bfa_ioim_s
*ioim
= cbarg
;
717 ioim
->nsgpgs
= BFA_SGPG_NPAGE(ioim
->nsges
);
718 list_splice_tail_init(&ioim
->iosp
->sgpg_wqe
.sgpg_q
, &ioim
->sgpg_q
);
719 bfa_ioim_sgpg_setup(ioim
);
720 bfa_sm_send_event(ioim
, BFA_IOIM_SM_SGALLOCED
);
724 * Send I/O request to firmware.
727 bfa_ioim_send_ioreq(struct bfa_ioim_s
*ioim
)
729 struct bfa_itnim_s
*itnim
= ioim
->itnim
;
730 struct bfi_ioim_req_s
*m
;
731 static struct fcp_cmnd_s cmnd_z0
= { 0 };
732 struct bfi_sge_s
*sge
;
736 * check for room in queue to send request now
738 m
= bfa_reqq_next(ioim
->bfa
, itnim
->reqq
);
740 bfa_reqq_wait(ioim
->bfa
, ioim
->itnim
->reqq
,
741 &ioim
->iosp
->reqq_wait
);
746 * build i/o request message next
748 m
->io_tag
= bfa_os_htons(ioim
->iotag
);
749 m
->rport_hdl
= ioim
->itnim
->rport
->fw_handle
;
750 m
->io_timeout
= bfa_cb_ioim_get_timeout(ioim
->dio
);
753 * build inline IO SG element here
757 sge
->sga
= bfa_cb_ioim_get_sgaddr(ioim
->dio
, 0);
758 pgdlen
= bfa_cb_ioim_get_sglen(ioim
->dio
, 0);
759 sge
->sg_len
= pgdlen
;
760 sge
->flags
= (ioim
->nsges
> BFI_SGE_INLINE
) ?
761 BFI_SGE_DATA_CPL
: BFI_SGE_DATA_LAST
;
766 if (ioim
->nsges
> BFI_SGE_INLINE
) {
767 sge
->sga
= ioim
->sgpg
->sgpg_pa
;
769 sge
->sga
.a32
.addr_lo
= 0;
770 sge
->sga
.a32
.addr_hi
= 0;
772 sge
->sg_len
= pgdlen
;
773 sge
->flags
= BFI_SGE_PGDLEN
;
777 * set up I/O command parameters
779 bfa_os_assign(m
->cmnd
, cmnd_z0
);
780 m
->cmnd
.lun
= bfa_cb_ioim_get_lun(ioim
->dio
);
781 m
->cmnd
.iodir
= bfa_cb_ioim_get_iodir(ioim
->dio
);
782 bfa_os_assign(m
->cmnd
.cdb
,
783 *(struct scsi_cdb_s
*)bfa_cb_ioim_get_cdb(ioim
->dio
));
784 m
->cmnd
.fcp_dl
= bfa_os_htonl(bfa_cb_ioim_get_size(ioim
->dio
));
787 * set up I/O message header
789 switch (m
->cmnd
.iodir
) {
791 bfi_h2i_set(m
->mh
, BFI_MC_IOIM_READ
, 0, bfa_lpuid(ioim
->bfa
));
792 bfa_stats(itnim
, input_reqs
);
794 case FCP_IODIR_WRITE
:
795 bfi_h2i_set(m
->mh
, BFI_MC_IOIM_WRITE
, 0, bfa_lpuid(ioim
->bfa
));
796 bfa_stats(itnim
, output_reqs
);
799 bfa_stats(itnim
, input_reqs
);
800 bfa_stats(itnim
, output_reqs
);
802 bfi_h2i_set(m
->mh
, BFI_MC_IOIM_IO
, 0, bfa_lpuid(ioim
->bfa
));
804 if (itnim
->seq_rec
||
805 (bfa_cb_ioim_get_size(ioim
->dio
) & (sizeof(u32
) - 1)))
806 bfi_h2i_set(m
->mh
, BFI_MC_IOIM_IO
, 0, bfa_lpuid(ioim
->bfa
));
809 m
->cmnd
.crn
= bfa_cb_ioim_get_crn(ioim
->dio
);
810 m
->cmnd
.priority
= bfa_cb_ioim_get_priority(ioim
->dio
);
811 m
->cmnd
.taskattr
= bfa_cb_ioim_get_taskattr(ioim
->dio
);
814 * Handle large CDB (>16 bytes).
816 m
->cmnd
.addl_cdb_len
= (bfa_cb_ioim_get_cdblen(ioim
->dio
) -
817 FCP_CMND_CDB_LEN
) / sizeof(u32
);
818 if (m
->cmnd
.addl_cdb_len
) {
819 bfa_os_memcpy(&m
->cmnd
.cdb
+ 1, (struct scsi_cdb_s
*)
820 bfa_cb_ioim_get_cdb(ioim
->dio
) + 1,
821 m
->cmnd
.addl_cdb_len
* sizeof(u32
));
822 fcp_cmnd_fcpdl(&m
->cmnd
) =
823 bfa_os_htonl(bfa_cb_ioim_get_size(ioim
->dio
));
828 * queue I/O message to firmware
830 bfa_reqq_produce(ioim
->bfa
, itnim
->reqq
);
835 * Setup any additional SG pages needed.Inline SG element is setup
839 bfa_ioim_sge_setup(struct bfa_ioim_s
*ioim
)
843 bfa_assert(ioim
->nsges
> BFI_SGE_INLINE
);
846 * allocate SG pages needed
848 nsgpgs
= BFA_SGPG_NPAGE(ioim
->nsges
);
852 if (bfa_sgpg_malloc(ioim
->bfa
, &ioim
->sgpg_q
, nsgpgs
)
854 bfa_sgpg_wait(ioim
->bfa
, &ioim
->iosp
->sgpg_wqe
, nsgpgs
);
858 ioim
->nsgpgs
= nsgpgs
;
859 bfa_ioim_sgpg_setup(ioim
);
865 bfa_ioim_sgpg_setup(struct bfa_ioim_s
*ioim
)
868 struct bfi_sge_s
*sge
;
869 struct bfa_sgpg_s
*sgpg
;
872 sgeid
= BFI_SGE_INLINE
;
873 ioim
->sgpg
= sgpg
= bfa_q_first(&ioim
->sgpg_q
);
876 sge
= sgpg
->sgpg
->sges
;
877 nsges
= ioim
->nsges
- sgeid
;
878 if (nsges
> BFI_SGPG_DATA_SGES
)
879 nsges
= BFI_SGPG_DATA_SGES
;
882 for (i
= 0; i
< nsges
; i
++, sge
++, sgeid
++) {
883 sge
->sga
= bfa_cb_ioim_get_sgaddr(ioim
->dio
, sgeid
);
884 sge
->sg_len
= bfa_cb_ioim_get_sglen(ioim
->dio
, sgeid
);
885 pgcumsz
+= sge
->sg_len
;
891 sge
->flags
= BFI_SGE_DATA
;
892 else if (sgeid
< (ioim
->nsges
- 1))
893 sge
->flags
= BFI_SGE_DATA_CPL
;
895 sge
->flags
= BFI_SGE_DATA_LAST
;
898 sgpg
= (struct bfa_sgpg_s
*) bfa_q_next(sgpg
);
901 * set the link element of each page
903 if (sgeid
== ioim
->nsges
) {
904 sge
->flags
= BFI_SGE_PGDLEN
;
905 sge
->sga
.a32
.addr_lo
= 0;
906 sge
->sga
.a32
.addr_hi
= 0;
908 sge
->flags
= BFI_SGE_LINK
;
909 sge
->sga
= sgpg
->sgpg_pa
;
911 sge
->sg_len
= pgcumsz
;
912 } while (sgeid
< ioim
->nsges
);
916 * Send I/O abort request to firmware.
919 bfa_ioim_send_abort(struct bfa_ioim_s
*ioim
)
921 struct bfa_itnim_s
*itnim
= ioim
->itnim
;
922 struct bfi_ioim_abort_req_s
*m
;
923 enum bfi_ioim_h2i msgop
;
926 * check for room in queue to send request now
928 m
= bfa_reqq_next(ioim
->bfa
, itnim
->reqq
);
933 * build i/o request message next
935 if (ioim
->iosp
->abort_explicit
)
936 msgop
= BFI_IOIM_H2I_IOABORT_REQ
;
938 msgop
= BFI_IOIM_H2I_IOCLEANUP_REQ
;
940 bfi_h2i_set(m
->mh
, BFI_MC_IOIM
, msgop
, bfa_lpuid(ioim
->bfa
));
941 m
->io_tag
= bfa_os_htons(ioim
->iotag
);
942 m
->abort_tag
= ++ioim
->abort_tag
;
945 * queue I/O message to firmware
947 bfa_reqq_produce(ioim
->bfa
, itnim
->reqq
);
952 * Call to resume any I/O requests waiting for room in request queue.
955 bfa_ioim_qresume(void *cbarg
)
957 struct bfa_ioim_s
*ioim
= cbarg
;
959 bfa_fcpim_stats(ioim
->fcpim
, qresumes
);
960 bfa_sm_send_event(ioim
, BFA_IOIM_SM_QRESUME
);
965 bfa_ioim_notify_cleanup(struct bfa_ioim_s
*ioim
)
968 * Move IO from itnim queue to fcpim global queue since itnim will be
972 list_add_tail(&ioim
->qe
, &ioim
->fcpim
->ioim_comp_q
);
974 if (!ioim
->iosp
->tskim
) {
975 if (ioim
->fcpim
->delay_comp
&& ioim
->itnim
->iotov_active
) {
976 bfa_cb_dequeue(&ioim
->hcb_qe
);
978 list_add_tail(&ioim
->qe
, &ioim
->itnim
->delay_comp_q
);
980 bfa_itnim_iodone(ioim
->itnim
);
982 bfa_tskim_iodone(ioim
->iosp
->tskim
);
986 * or after the link comes back.
989 bfa_ioim_delayed_comp(struct bfa_ioim_s
*ioim
, bfa_boolean_t iotov
)
992 * If path tov timer expired, failback with PATHTOV status - these
993 * IO requests are not normally retried by IO stack.
995 * Otherwise device cameback online and fail it with normal failed
996 * status so that IO stack retries these failed IO requests.
999 ioim
->io_cbfn
= __bfa_cb_ioim_pathtov
;
1001 ioim
->io_cbfn
= __bfa_cb_ioim_failed
;
1003 bfa_cb_queue(ioim
->bfa
, &ioim
->hcb_qe
, ioim
->io_cbfn
, ioim
);
1006 * Move IO to fcpim global queue since itnim will be
1009 list_del(&ioim
->qe
);
1010 list_add_tail(&ioim
->qe
, &ioim
->fcpim
->ioim_comp_q
);
1020 * Memory allocation and initialization.
1023 bfa_ioim_attach(struct bfa_fcpim_mod_s
*fcpim
, struct bfa_meminfo_s
*minfo
)
1025 struct bfa_ioim_s
*ioim
;
1026 struct bfa_ioim_sp_s
*iosp
;
1032 * claim memory first
1034 ioim
= (struct bfa_ioim_s
*) bfa_meminfo_kva(minfo
);
1035 fcpim
->ioim_arr
= ioim
;
1036 bfa_meminfo_kva(minfo
) = (u8
*) (ioim
+ fcpim
->num_ioim_reqs
);
1038 iosp
= (struct bfa_ioim_sp_s
*) bfa_meminfo_kva(minfo
);
1039 fcpim
->ioim_sp_arr
= iosp
;
1040 bfa_meminfo_kva(minfo
) = (u8
*) (iosp
+ fcpim
->num_ioim_reqs
);
1043 * Claim DMA memory for per IO sense data.
1045 snsbufsz
= fcpim
->num_ioim_reqs
* BFI_IOIM_SNSLEN
;
1046 fcpim
->snsbase
.pa
= bfa_meminfo_dma_phys(minfo
);
1047 bfa_meminfo_dma_phys(minfo
) += snsbufsz
;
1049 fcpim
->snsbase
.kva
= bfa_meminfo_dma_virt(minfo
);
1050 bfa_meminfo_dma_virt(minfo
) += snsbufsz
;
1051 snsinfo
= fcpim
->snsbase
.kva
;
1052 bfa_iocfc_set_snsbase(fcpim
->bfa
, fcpim
->snsbase
.pa
);
1055 * Initialize ioim free queues
1057 INIT_LIST_HEAD(&fcpim
->ioim_free_q
);
1058 INIT_LIST_HEAD(&fcpim
->ioim_resfree_q
);
1059 INIT_LIST_HEAD(&fcpim
->ioim_comp_q
);
1061 for (i
= 0; i
< fcpim
->num_ioim_reqs
;
1062 i
++, ioim
++, iosp
++, snsinfo
+= BFI_IOIM_SNSLEN
) {
1066 bfa_os_memset(ioim
, 0, sizeof(struct bfa_ioim_s
));
1068 ioim
->bfa
= fcpim
->bfa
;
1069 ioim
->fcpim
= fcpim
;
1071 iosp
->snsinfo
= snsinfo
;
1072 INIT_LIST_HEAD(&ioim
->sgpg_q
);
1073 bfa_reqq_winit(&ioim
->iosp
->reqq_wait
,
1074 bfa_ioim_qresume
, ioim
);
1075 bfa_sgpg_winit(&ioim
->iosp
->sgpg_wqe
,
1076 bfa_ioim_sgpg_alloced
, ioim
);
1077 bfa_sm_set_state(ioim
, bfa_ioim_sm_uninit
);
1079 list_add_tail(&ioim
->qe
, &fcpim
->ioim_free_q
);
1084 * Driver detach time call.
1087 bfa_ioim_detach(struct bfa_fcpim_mod_s
*fcpim
)
1092 bfa_ioim_isr(struct bfa_s
*bfa
, struct bfi_msg_s
*m
)
1094 struct bfa_fcpim_mod_s
*fcpim
= BFA_FCPIM_MOD(bfa
);
1095 struct bfi_ioim_rsp_s
*rsp
= (struct bfi_ioim_rsp_s
*) m
;
1096 struct bfa_ioim_s
*ioim
;
1098 enum bfa_ioim_event evt
= BFA_IOIM_SM_COMP
;
1100 iotag
= bfa_os_ntohs(rsp
->io_tag
);
1102 ioim
= BFA_IOIM_FROM_TAG(fcpim
, iotag
);
1103 bfa_assert(ioim
->iotag
== iotag
);
1105 bfa_trc(ioim
->bfa
, ioim
->iotag
);
1106 bfa_trc(ioim
->bfa
, rsp
->io_status
);
1107 bfa_trc(ioim
->bfa
, rsp
->reuse_io_tag
);
1109 if (bfa_sm_cmp_state(ioim
, bfa_ioim_sm_active
))
1110 bfa_os_assign(ioim
->iosp
->comp_rspmsg
, *m
);
1112 switch (rsp
->io_status
) {
1113 case BFI_IOIM_STS_OK
:
1114 bfa_fcpim_stats(fcpim
, iocomp_ok
);
1115 if (rsp
->reuse_io_tag
== 0)
1116 evt
= BFA_IOIM_SM_DONE
;
1118 evt
= BFA_IOIM_SM_COMP
;
1121 case BFI_IOIM_STS_TIMEDOUT
:
1122 case BFI_IOIM_STS_ABORTED
:
1123 rsp
->io_status
= BFI_IOIM_STS_ABORTED
;
1124 bfa_fcpim_stats(fcpim
, iocomp_aborted
);
1125 if (rsp
->reuse_io_tag
== 0)
1126 evt
= BFA_IOIM_SM_DONE
;
1128 evt
= BFA_IOIM_SM_COMP
;
1131 case BFI_IOIM_STS_PROTO_ERR
:
1132 bfa_fcpim_stats(fcpim
, iocom_proto_err
);
1133 bfa_assert(rsp
->reuse_io_tag
);
1134 evt
= BFA_IOIM_SM_COMP
;
1137 case BFI_IOIM_STS_SQER_NEEDED
:
1138 bfa_fcpim_stats(fcpim
, iocom_sqer_needed
);
1139 bfa_assert(rsp
->reuse_io_tag
== 0);
1140 evt
= BFA_IOIM_SM_SQRETRY
;
1143 case BFI_IOIM_STS_RES_FREE
:
1144 bfa_fcpim_stats(fcpim
, iocom_res_free
);
1145 evt
= BFA_IOIM_SM_FREE
;
1148 case BFI_IOIM_STS_HOST_ABORTED
:
1149 bfa_fcpim_stats(fcpim
, iocom_hostabrts
);
1150 if (rsp
->abort_tag
!= ioim
->abort_tag
) {
1151 bfa_trc(ioim
->bfa
, rsp
->abort_tag
);
1152 bfa_trc(ioim
->bfa
, ioim
->abort_tag
);
1156 if (rsp
->reuse_io_tag
)
1157 evt
= BFA_IOIM_SM_ABORT_COMP
;
1159 evt
= BFA_IOIM_SM_ABORT_DONE
;
1162 case BFI_IOIM_STS_UTAG
:
1163 bfa_fcpim_stats(fcpim
, iocom_utags
);
1164 evt
= BFA_IOIM_SM_COMP_UTAG
;
1171 bfa_sm_send_event(ioim
, evt
);
1175 bfa_ioim_good_comp_isr(struct bfa_s
*bfa
, struct bfi_msg_s
*m
)
1177 struct bfa_fcpim_mod_s
*fcpim
= BFA_FCPIM_MOD(bfa
);
1178 struct bfi_ioim_rsp_s
*rsp
= (struct bfi_ioim_rsp_s
*) m
;
1179 struct bfa_ioim_s
*ioim
;
1182 iotag
= bfa_os_ntohs(rsp
->io_tag
);
1184 ioim
= BFA_IOIM_FROM_TAG(fcpim
, iotag
);
1185 bfa_assert(ioim
->iotag
== iotag
);
1187 bfa_trc_fp(ioim
->bfa
, ioim
->iotag
);
1188 bfa_sm_send_event(ioim
, BFA_IOIM_SM_COMP_GOOD
);
1192 * Called by itnim to clean up IO while going offline.
1195 bfa_ioim_cleanup(struct bfa_ioim_s
*ioim
)
1197 bfa_trc(ioim
->bfa
, ioim
->iotag
);
1198 bfa_fcpim_stats(ioim
->fcpim
, io_cleanups
);
1200 ioim
->iosp
->tskim
= NULL
;
1201 bfa_sm_send_event(ioim
, BFA_IOIM_SM_CLEANUP
);
1205 bfa_ioim_cleanup_tm(struct bfa_ioim_s
*ioim
, struct bfa_tskim_s
*tskim
)
1207 bfa_trc(ioim
->bfa
, ioim
->iotag
);
1208 bfa_fcpim_stats(ioim
->fcpim
, io_tmaborts
);
1210 ioim
->iosp
->tskim
= tskim
;
1211 bfa_sm_send_event(ioim
, BFA_IOIM_SM_CLEANUP
);
1215 * IOC failure handling.
1218 bfa_ioim_iocdisable(struct bfa_ioim_s
*ioim
)
1220 bfa_sm_send_event(ioim
, BFA_IOIM_SM_HWFAIL
);
1224 * IO offline TOV popped. Fail the pending IO.
1227 bfa_ioim_tov(struct bfa_ioim_s
*ioim
)
1229 bfa_sm_send_event(ioim
, BFA_IOIM_SM_IOTOV
);
1239 * Allocate IOIM resource for initiator mode I/O request.
1242 bfa_ioim_alloc(struct bfa_s
*bfa
, struct bfad_ioim_s
*dio
,
1243 struct bfa_itnim_s
*itnim
, u16 nsges
)
1245 struct bfa_fcpim_mod_s
*fcpim
= BFA_FCPIM_MOD(bfa
);
1246 struct bfa_ioim_s
*ioim
;
1249 * alocate IOIM resource
1251 bfa_q_deq(&fcpim
->ioim_free_q
, &ioim
);
1253 bfa_fcpim_stats(fcpim
, no_iotags
);
1258 ioim
->itnim
= itnim
;
1259 ioim
->nsges
= nsges
;
1262 bfa_stats(fcpim
, total_ios
);
1263 bfa_stats(itnim
, ios
);
1264 fcpim
->ios_active
++;
1266 list_add_tail(&ioim
->qe
, &itnim
->io_q
);
1267 bfa_trc_fp(ioim
->bfa
, ioim
->iotag
);
1273 bfa_ioim_free(struct bfa_ioim_s
*ioim
)
1275 struct bfa_fcpim_mod_s
*fcpim
= ioim
->fcpim
;
1277 bfa_trc_fp(ioim
->bfa
, ioim
->iotag
);
1278 bfa_assert_fp(bfa_sm_cmp_state(ioim
, bfa_ioim_sm_uninit
));
1280 bfa_assert_fp(list_empty(&ioim
->sgpg_q
)
1281 || (ioim
->nsges
> BFI_SGE_INLINE
));
1283 if (ioim
->nsgpgs
> 0)
1284 bfa_sgpg_mfree(ioim
->bfa
, &ioim
->sgpg_q
, ioim
->nsgpgs
);
1286 bfa_stats(ioim
->itnim
, io_comps
);
1287 fcpim
->ios_active
--;
1289 list_del(&ioim
->qe
);
1290 list_add_tail(&ioim
->qe
, &fcpim
->ioim_free_q
);
1294 bfa_ioim_start(struct bfa_ioim_s
*ioim
)
1296 bfa_trc_fp(ioim
->bfa
, ioim
->iotag
);
1297 bfa_sm_send_event(ioim
, BFA_IOIM_SM_START
);
1301 * Driver I/O abort request.
1304 bfa_ioim_abort(struct bfa_ioim_s
*ioim
)
1306 bfa_trc(ioim
->bfa
, ioim
->iotag
);
1307 bfa_fcpim_stats(ioim
->fcpim
, io_aborts
);
1308 bfa_sm_send_event(ioim
, BFA_IOIM_SM_ABORT
);