x86/amd-iommu: Add per IOMMU reference counting
[linux/fpc-iii.git] / drivers / scsi / bfa / bfa_tskim.c
blob010d40d1e5d38ea79c5e4b03a8fa8ac8d0063626
1 /*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
18 #include <bfa.h>
19 #include <bfa_cb_ioim_macros.h>
21 BFA_TRC_FILE(HAL, TSKIM);
23 /**
24 * task management completion handling
26 #define bfa_tskim_qcomp(__tskim, __cbfn) do { \
27 bfa_cb_queue((__tskim)->bfa, &(__tskim)->hcb_qe, __cbfn, (__tskim)); \
28 bfa_tskim_notify_comp(__tskim); \
29 } while (0)
31 #define bfa_tskim_notify_comp(__tskim) do { \
32 if ((__tskim)->notify) \
33 bfa_itnim_tskdone((__tskim)->itnim); \
34 } while (0)
37 * forward declarations
39 static void __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete);
40 static void __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete);
41 static bfa_boolean_t bfa_tskim_match_scope(struct bfa_tskim_s *tskim,
42 lun_t lun);
43 static void bfa_tskim_gather_ios(struct bfa_tskim_s *tskim);
44 static void bfa_tskim_cleanp_comp(void *tskim_cbarg);
45 static void bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim);
46 static bfa_boolean_t bfa_tskim_send(struct bfa_tskim_s *tskim);
47 static bfa_boolean_t bfa_tskim_send_abort(struct bfa_tskim_s *tskim);
48 static void bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim);
50 /**
51 * bfa_tskim_sm
54 enum bfa_tskim_event {
55 BFA_TSKIM_SM_START = 1, /* TM command start */
56 BFA_TSKIM_SM_DONE = 2, /* TM completion */
57 BFA_TSKIM_SM_QRESUME = 3, /* resume after qfull */
58 BFA_TSKIM_SM_HWFAIL = 5, /* IOC h/w failure event */
59 BFA_TSKIM_SM_HCB = 6, /* BFA callback completion */
60 BFA_TSKIM_SM_IOS_DONE = 7, /* IO and sub TM completions */
61 BFA_TSKIM_SM_CLEANUP = 8, /* TM cleanup on ITN offline */
62 BFA_TSKIM_SM_CLEANUP_DONE = 9, /* TM abort completion */
65 static void bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim,
66 enum bfa_tskim_event event);
67 static void bfa_tskim_sm_active(struct bfa_tskim_s *tskim,
68 enum bfa_tskim_event event);
69 static void bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim,
70 enum bfa_tskim_event event);
71 static void bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim,
72 enum bfa_tskim_event event);
73 static void bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim,
74 enum bfa_tskim_event event);
75 static void bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
76 enum bfa_tskim_event event);
77 static void bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim,
78 enum bfa_tskim_event event);
80 /**
81 * Task management command beginning state.
83 static void
84 bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
86 bfa_trc(tskim->bfa, event);
88 switch (event) {
89 case BFA_TSKIM_SM_START:
90 bfa_sm_set_state(tskim, bfa_tskim_sm_active);
91 bfa_tskim_gather_ios(tskim);
93 /**
94 * If device is offline, do not send TM on wire. Just cleanup
95 * any pending IO requests and complete TM request.
97 if (!bfa_itnim_is_online(tskim->itnim)) {
98 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
99 tskim->tsk_status = BFI_TSKIM_STS_OK;
100 bfa_tskim_cleanup_ios(tskim);
101 return;
104 if (!bfa_tskim_send(tskim)) {
105 bfa_sm_set_state(tskim, bfa_tskim_sm_qfull);
106 bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
107 &tskim->reqq_wait);
109 break;
111 default:
112 bfa_assert(0);
117 * brief
118 * TM command is active, awaiting completion from firmware to
119 * cleanup IO requests in TM scope.
121 static void
122 bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
124 bfa_trc(tskim->bfa, event);
126 switch (event) {
127 case BFA_TSKIM_SM_DONE:
128 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
129 bfa_tskim_cleanup_ios(tskim);
130 break;
132 case BFA_TSKIM_SM_CLEANUP:
133 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
134 if (!bfa_tskim_send_abort(tskim)) {
135 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup_qfull);
136 bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
137 &tskim->reqq_wait);
139 break;
141 case BFA_TSKIM_SM_HWFAIL:
142 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
143 bfa_tskim_iocdisable_ios(tskim);
144 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
145 break;
147 default:
148 bfa_assert(0);
153 * An active TM is being cleaned up since ITN is offline. Awaiting cleanup
154 * completion event from firmware.
156 static void
157 bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
159 bfa_trc(tskim->bfa, event);
161 switch (event) {
162 case BFA_TSKIM_SM_DONE:
164 * Ignore and wait for ABORT completion from firmware.
166 break;
168 case BFA_TSKIM_SM_CLEANUP_DONE:
169 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
170 bfa_tskim_cleanup_ios(tskim);
171 break;
173 case BFA_TSKIM_SM_HWFAIL:
174 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
175 bfa_tskim_iocdisable_ios(tskim);
176 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
177 break;
179 default:
180 bfa_assert(0);
184 static void
185 bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
187 bfa_trc(tskim->bfa, event);
189 switch (event) {
190 case BFA_TSKIM_SM_IOS_DONE:
191 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
192 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_done);
193 break;
195 case BFA_TSKIM_SM_CLEANUP:
197 * Ignore, TM command completed on wire.
198 * Notify TM conmpletion on IO cleanup completion.
200 break;
202 case BFA_TSKIM_SM_HWFAIL:
203 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
204 bfa_tskim_iocdisable_ios(tskim);
205 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
206 break;
208 default:
209 bfa_assert(0);
214 * Task management command is waiting for room in request CQ
216 static void
217 bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
219 bfa_trc(tskim->bfa, event);
221 switch (event) {
222 case BFA_TSKIM_SM_QRESUME:
223 bfa_sm_set_state(tskim, bfa_tskim_sm_active);
224 bfa_tskim_send(tskim);
225 break;
227 case BFA_TSKIM_SM_CLEANUP:
229 * No need to send TM on wire since ITN is offline.
231 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
232 bfa_reqq_wcancel(&tskim->reqq_wait);
233 bfa_tskim_cleanup_ios(tskim);
234 break;
236 case BFA_TSKIM_SM_HWFAIL:
237 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
238 bfa_reqq_wcancel(&tskim->reqq_wait);
239 bfa_tskim_iocdisable_ios(tskim);
240 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
241 break;
243 default:
244 bfa_assert(0);
249 * Task management command is active, awaiting for room in request CQ
250 * to send clean up request.
252 static void
253 bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
254 enum bfa_tskim_event event)
256 bfa_trc(tskim->bfa, event);
258 switch (event) {
259 case BFA_TSKIM_SM_DONE:
260 bfa_reqq_wcancel(&tskim->reqq_wait);
263 * Fall through !!!
266 case BFA_TSKIM_SM_QRESUME:
267 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
268 bfa_tskim_send_abort(tskim);
269 break;
271 case BFA_TSKIM_SM_HWFAIL:
272 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
273 bfa_reqq_wcancel(&tskim->reqq_wait);
274 bfa_tskim_iocdisable_ios(tskim);
275 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
276 break;
278 default:
279 bfa_assert(0);
284 * BFA callback is pending
286 static void
287 bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
289 bfa_trc(tskim->bfa, event);
291 switch (event) {
292 case BFA_TSKIM_SM_HCB:
293 bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
294 bfa_tskim_free(tskim);
295 break;
297 case BFA_TSKIM_SM_CLEANUP:
298 bfa_tskim_notify_comp(tskim);
299 break;
301 case BFA_TSKIM_SM_HWFAIL:
302 break;
304 default:
305 bfa_assert(0);
312 * bfa_tskim_private
315 static void
316 __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete)
318 struct bfa_tskim_s *tskim = cbarg;
320 if (!complete) {
321 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
322 return;
325 bfa_stats(tskim->itnim, tm_success);
326 bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk, tskim->tsk_status);
329 static void
330 __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete)
332 struct bfa_tskim_s *tskim = cbarg;
334 if (!complete) {
335 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
336 return;
339 bfa_stats(tskim->itnim, tm_failures);
340 bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk,
341 BFI_TSKIM_STS_FAILED);
344 static bfa_boolean_t
345 bfa_tskim_match_scope(struct bfa_tskim_s *tskim, lun_t lun)
347 switch (tskim->tm_cmnd) {
348 case FCP_TM_TARGET_RESET:
349 return BFA_TRUE;
351 case FCP_TM_ABORT_TASK_SET:
352 case FCP_TM_CLEAR_TASK_SET:
353 case FCP_TM_LUN_RESET:
354 case FCP_TM_CLEAR_ACA:
355 return (tskim->lun == lun);
357 default:
358 bfa_assert(0);
361 return BFA_FALSE;
365 * Gather affected IO requests and task management commands.
367 static void
368 bfa_tskim_gather_ios(struct bfa_tskim_s *tskim)
370 struct bfa_itnim_s *itnim = tskim->itnim;
371 struct bfa_ioim_s *ioim;
372 struct list_head *qe, *qen;
374 INIT_LIST_HEAD(&tskim->io_q);
377 * Gather any active IO requests first.
379 list_for_each_safe(qe, qen, &itnim->io_q) {
380 ioim = (struct bfa_ioim_s *) qe;
381 if (bfa_tskim_match_scope
382 (tskim, bfa_cb_ioim_get_lun(ioim->dio))) {
383 list_del(&ioim->qe);
384 list_add_tail(&ioim->qe, &tskim->io_q);
389 * Failback any pending IO requests immediately.
391 list_for_each_safe(qe, qen, &itnim->pending_q) {
392 ioim = (struct bfa_ioim_s *) qe;
393 if (bfa_tskim_match_scope
394 (tskim, bfa_cb_ioim_get_lun(ioim->dio))) {
395 list_del(&ioim->qe);
396 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
397 bfa_ioim_tov(ioim);
403 * IO cleanup completion
405 static void
406 bfa_tskim_cleanp_comp(void *tskim_cbarg)
408 struct bfa_tskim_s *tskim = tskim_cbarg;
410 bfa_stats(tskim->itnim, tm_io_comps);
411 bfa_sm_send_event(tskim, BFA_TSKIM_SM_IOS_DONE);
415 * Gather affected IO requests and task management commands.
417 static void
418 bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim)
420 struct bfa_ioim_s *ioim;
421 struct list_head *qe, *qen;
423 bfa_wc_init(&tskim->wc, bfa_tskim_cleanp_comp, tskim);
425 list_for_each_safe(qe, qen, &tskim->io_q) {
426 ioim = (struct bfa_ioim_s *) qe;
427 bfa_wc_up(&tskim->wc);
428 bfa_ioim_cleanup_tm(ioim, tskim);
431 bfa_wc_wait(&tskim->wc);
435 * Send task management request to firmware.
437 static bfa_boolean_t
438 bfa_tskim_send(struct bfa_tskim_s *tskim)
440 struct bfa_itnim_s *itnim = tskim->itnim;
441 struct bfi_tskim_req_s *m;
444 * check for room in queue to send request now
446 m = bfa_reqq_next(tskim->bfa, itnim->reqq);
447 if (!m)
448 return BFA_FALSE;
451 * build i/o request message next
453 bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_TM_REQ,
454 bfa_lpuid(tskim->bfa));
456 m->tsk_tag = bfa_os_htons(tskim->tsk_tag);
457 m->itn_fhdl = tskim->itnim->rport->fw_handle;
458 m->t_secs = tskim->tsecs;
459 m->lun = tskim->lun;
460 m->tm_flags = tskim->tm_cmnd;
463 * queue I/O message to firmware
465 bfa_reqq_produce(tskim->bfa, itnim->reqq);
466 return BFA_TRUE;
470 * Send abort request to cleanup an active TM to firmware.
472 static bfa_boolean_t
473 bfa_tskim_send_abort(struct bfa_tskim_s *tskim)
475 struct bfa_itnim_s *itnim = tskim->itnim;
476 struct bfi_tskim_abortreq_s *m;
479 * check for room in queue to send request now
481 m = bfa_reqq_next(tskim->bfa, itnim->reqq);
482 if (!m)
483 return BFA_FALSE;
486 * build i/o request message next
488 bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_ABORT_REQ,
489 bfa_lpuid(tskim->bfa));
491 m->tsk_tag = bfa_os_htons(tskim->tsk_tag);
494 * queue I/O message to firmware
496 bfa_reqq_produce(tskim->bfa, itnim->reqq);
497 return BFA_TRUE;
501 * Call to resume task management cmnd waiting for room in request queue.
503 static void
504 bfa_tskim_qresume(void *cbarg)
506 struct bfa_tskim_s *tskim = cbarg;
508 bfa_fcpim_stats(tskim->fcpim, qresumes);
509 bfa_stats(tskim->itnim, tm_qresumes);
510 bfa_sm_send_event(tskim, BFA_TSKIM_SM_QRESUME);
514 * Cleanup IOs associated with a task mangement command on IOC failures.
516 static void
517 bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim)
519 struct bfa_ioim_s *ioim;
520 struct list_head *qe, *qen;
522 list_for_each_safe(qe, qen, &tskim->io_q) {
523 ioim = (struct bfa_ioim_s *) qe;
524 bfa_ioim_iocdisable(ioim);
531 * bfa_tskim_friend
535 * Notification on completions from related ioim.
537 void
538 bfa_tskim_iodone(struct bfa_tskim_s *tskim)
540 bfa_wc_down(&tskim->wc);
544 * Handle IOC h/w failure notification from itnim.
546 void
547 bfa_tskim_iocdisable(struct bfa_tskim_s *tskim)
549 tskim->notify = BFA_FALSE;
550 bfa_stats(tskim->itnim, tm_iocdowns);
551 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HWFAIL);
555 * Cleanup TM command and associated IOs as part of ITNIM offline.
557 void
558 bfa_tskim_cleanup(struct bfa_tskim_s *tskim)
560 tskim->notify = BFA_TRUE;
561 bfa_stats(tskim->itnim, tm_cleanups);
562 bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP);
566 * Memory allocation and initialization.
568 void
569 bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
571 struct bfa_tskim_s *tskim;
572 u16 i;
574 INIT_LIST_HEAD(&fcpim->tskim_free_q);
576 tskim = (struct bfa_tskim_s *) bfa_meminfo_kva(minfo);
577 fcpim->tskim_arr = tskim;
579 for (i = 0; i < fcpim->num_tskim_reqs; i++, tskim++) {
581 * initialize TSKIM
583 bfa_os_memset(tskim, 0, sizeof(struct bfa_tskim_s));
584 tskim->tsk_tag = i;
585 tskim->bfa = fcpim->bfa;
586 tskim->fcpim = fcpim;
587 tskim->notify = BFA_FALSE;
588 bfa_reqq_winit(&tskim->reqq_wait, bfa_tskim_qresume,
589 tskim);
590 bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
592 list_add_tail(&tskim->qe, &fcpim->tskim_free_q);
595 bfa_meminfo_kva(minfo) = (u8 *) tskim;
598 void
599 bfa_tskim_detach(struct bfa_fcpim_mod_s *fcpim)
602 * @todo
606 void
607 bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
609 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
610 struct bfi_tskim_rsp_s *rsp = (struct bfi_tskim_rsp_s *) m;
611 struct bfa_tskim_s *tskim;
612 u16 tsk_tag = bfa_os_ntohs(rsp->tsk_tag);
614 tskim = BFA_TSKIM_FROM_TAG(fcpim, tsk_tag);
615 bfa_assert(tskim->tsk_tag == tsk_tag);
617 tskim->tsk_status = rsp->tsk_status;
620 * Firmware sends BFI_TSKIM_STS_ABORTED status for abort
621 * requests. All other statuses are for normal completions.
623 if (rsp->tsk_status == BFI_TSKIM_STS_ABORTED) {
624 bfa_stats(tskim->itnim, tm_cleanup_comps);
625 bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP_DONE);
626 } else {
627 bfa_stats(tskim->itnim, tm_fw_rsps);
628 bfa_sm_send_event(tskim, BFA_TSKIM_SM_DONE);
635 * bfa_tskim_api
639 struct bfa_tskim_s *
640 bfa_tskim_alloc(struct bfa_s *bfa, struct bfad_tskim_s *dtsk)
642 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
643 struct bfa_tskim_s *tskim;
645 bfa_q_deq(&fcpim->tskim_free_q, &tskim);
647 if (!tskim)
648 bfa_fcpim_stats(fcpim, no_tskims);
649 else
650 tskim->dtsk = dtsk;
652 return tskim;
655 void
656 bfa_tskim_free(struct bfa_tskim_s *tskim)
658 bfa_assert(bfa_q_is_on_q_func(&tskim->itnim->tsk_q, &tskim->qe));
659 list_del(&tskim->qe);
660 list_add_tail(&tskim->qe, &tskim->fcpim->tskim_free_q);
664 * Start a task management command.
666 * @param[in] tskim BFA task management command instance
667 * @param[in] itnim i-t nexus for the task management command
668 * @param[in] lun lun, if applicable
669 * @param[in] tm_cmnd Task management command code.
670 * @param[in] t_secs Timeout in seconds
672 * @return None.
674 void
675 bfa_tskim_start(struct bfa_tskim_s *tskim, struct bfa_itnim_s *itnim, lun_t lun,
676 enum fcp_tm_cmnd tm_cmnd, u8 tsecs)
678 tskim->itnim = itnim;
679 tskim->lun = lun;
680 tskim->tm_cmnd = tm_cmnd;
681 tskim->tsecs = tsecs;
682 tskim->notify = BFA_FALSE;
683 bfa_stats(itnim, tm_cmnds);
685 list_add_tail(&tskim->qe, &itnim->tsk_q);
686 bfa_sm_send_event(tskim, BFA_TSKIM_SM_START);