hfs: get rid of hfs_sync_super
[linux/fpc-iii.git] / drivers / scsi / bnx2fc / bnx2fc_io.c
blob4f7453b9e41e2486b662d7fc3d8f55b1762fe154
1 /* bnx2fc_io.c: Broadcom NetXtreme II Linux FCoE offload driver.
2 * IO manager and SCSI IO processing.
4 * Copyright (c) 2008 - 2011 Broadcom Corporation
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
10 * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
13 #include "bnx2fc.h"
15 #define RESERVE_FREE_LIST_INDEX num_possible_cpus()
17 static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len,
18 int bd_index);
19 static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req);
20 static int bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req);
21 static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req);
22 static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req);
23 static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
24 struct fcoe_fcp_rsp_payload *fcp_rsp,
25 u8 num_rq);
27 void bnx2fc_cmd_timer_set(struct bnx2fc_cmd *io_req,
28 unsigned int timer_msec)
30 struct bnx2fc_interface *interface = io_req->port->priv;
32 if (queue_delayed_work(interface->timer_work_queue,
33 &io_req->timeout_work,
34 msecs_to_jiffies(timer_msec)))
35 kref_get(&io_req->refcount);
38 static void bnx2fc_cmd_timeout(struct work_struct *work)
40 struct bnx2fc_cmd *io_req = container_of(work, struct bnx2fc_cmd,
41 timeout_work.work);
42 struct fc_lport *lport;
43 struct fc_rport_priv *rdata;
44 u8 cmd_type = io_req->cmd_type;
45 struct bnx2fc_rport *tgt = io_req->tgt;
46 int logo_issued;
47 int rc;
49 BNX2FC_IO_DBG(io_req, "cmd_timeout, cmd_type = %d,"
50 "req_flags = %lx\n", cmd_type, io_req->req_flags);
52 spin_lock_bh(&tgt->tgt_lock);
53 if (test_and_clear_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags)) {
54 clear_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags);
56 * ideally we should hold the io_req until RRQ complets,
57 * and release io_req from timeout hold.
59 spin_unlock_bh(&tgt->tgt_lock);
60 bnx2fc_send_rrq(io_req);
61 return;
63 if (test_and_clear_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags)) {
64 BNX2FC_IO_DBG(io_req, "IO ready for reuse now\n");
65 goto done;
68 switch (cmd_type) {
69 case BNX2FC_SCSI_CMD:
70 if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
71 &io_req->req_flags)) {
72 /* Handle eh_abort timeout */
73 BNX2FC_IO_DBG(io_req, "eh_abort timed out\n");
74 complete(&io_req->tm_done);
75 } else if (test_bit(BNX2FC_FLAG_ISSUE_ABTS,
76 &io_req->req_flags)) {
77 /* Handle internally generated ABTS timeout */
78 BNX2FC_IO_DBG(io_req, "ABTS timed out refcnt = %d\n",
79 io_req->refcount.refcount.counter);
80 if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
81 &io_req->req_flags))) {
83 lport = io_req->port->lport;
84 rdata = io_req->tgt->rdata;
85 logo_issued = test_and_set_bit(
86 BNX2FC_FLAG_EXPL_LOGO,
87 &tgt->flags);
88 kref_put(&io_req->refcount, bnx2fc_cmd_release);
89 spin_unlock_bh(&tgt->tgt_lock);
91 /* Explicitly logo the target */
92 if (!logo_issued) {
93 BNX2FC_IO_DBG(io_req, "Explicit "
94 "logo - tgt flags = 0x%lx\n",
95 tgt->flags);
97 mutex_lock(&lport->disc.disc_mutex);
98 lport->tt.rport_logoff(rdata);
99 mutex_unlock(&lport->disc.disc_mutex);
101 return;
103 } else {
104 /* Hanlde IO timeout */
105 BNX2FC_IO_DBG(io_req, "IO timed out. issue ABTS\n");
106 if (test_and_set_bit(BNX2FC_FLAG_IO_COMPL,
107 &io_req->req_flags)) {
108 BNX2FC_IO_DBG(io_req, "IO completed before "
109 " timer expiry\n");
110 goto done;
113 if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
114 &io_req->req_flags)) {
115 rc = bnx2fc_initiate_abts(io_req);
116 if (rc == SUCCESS)
117 goto done;
119 * Explicitly logo the target if
120 * abts initiation fails
122 lport = io_req->port->lport;
123 rdata = io_req->tgt->rdata;
124 logo_issued = test_and_set_bit(
125 BNX2FC_FLAG_EXPL_LOGO,
126 &tgt->flags);
127 kref_put(&io_req->refcount, bnx2fc_cmd_release);
128 spin_unlock_bh(&tgt->tgt_lock);
130 if (!logo_issued) {
131 BNX2FC_IO_DBG(io_req, "Explicit "
132 "logo - tgt flags = 0x%lx\n",
133 tgt->flags);
136 mutex_lock(&lport->disc.disc_mutex);
137 lport->tt.rport_logoff(rdata);
138 mutex_unlock(&lport->disc.disc_mutex);
140 return;
141 } else {
142 BNX2FC_IO_DBG(io_req, "IO already in "
143 "ABTS processing\n");
146 break;
147 case BNX2FC_ELS:
149 if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
150 BNX2FC_IO_DBG(io_req, "ABTS for ELS timed out\n");
152 if (!test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
153 &io_req->req_flags)) {
154 lport = io_req->port->lport;
155 rdata = io_req->tgt->rdata;
156 logo_issued = test_and_set_bit(
157 BNX2FC_FLAG_EXPL_LOGO,
158 &tgt->flags);
159 kref_put(&io_req->refcount, bnx2fc_cmd_release);
160 spin_unlock_bh(&tgt->tgt_lock);
162 /* Explicitly logo the target */
163 if (!logo_issued) {
164 BNX2FC_IO_DBG(io_req, "Explicitly logo"
165 "(els)\n");
166 mutex_lock(&lport->disc.disc_mutex);
167 lport->tt.rport_logoff(rdata);
168 mutex_unlock(&lport->disc.disc_mutex);
170 return;
172 } else {
174 * Handle ELS timeout.
175 * tgt_lock is used to sync compl path and timeout
176 * path. If els compl path is processing this IO, we
177 * have nothing to do here, just release the timer hold
179 BNX2FC_IO_DBG(io_req, "ELS timed out\n");
180 if (test_and_set_bit(BNX2FC_FLAG_ELS_DONE,
181 &io_req->req_flags))
182 goto done;
184 /* Indicate the cb_func that this ELS is timed out */
185 set_bit(BNX2FC_FLAG_ELS_TIMEOUT, &io_req->req_flags);
187 if ((io_req->cb_func) && (io_req->cb_arg)) {
188 io_req->cb_func(io_req->cb_arg);
189 io_req->cb_arg = NULL;
192 break;
193 default:
194 printk(KERN_ERR PFX "cmd_timeout: invalid cmd_type %d\n",
195 cmd_type);
196 break;
199 done:
200 /* release the cmd that was held when timer was set */
201 kref_put(&io_req->refcount, bnx2fc_cmd_release);
202 spin_unlock_bh(&tgt->tgt_lock);
205 static void bnx2fc_scsi_done(struct bnx2fc_cmd *io_req, int err_code)
207 /* Called with host lock held */
208 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
211 * active_cmd_queue may have other command types as well,
212 * and during flush operation, we want to error back only
213 * scsi commands.
215 if (io_req->cmd_type != BNX2FC_SCSI_CMD)
216 return;
218 BNX2FC_IO_DBG(io_req, "scsi_done. err_code = 0x%x\n", err_code);
219 if (test_bit(BNX2FC_FLAG_CMD_LOST, &io_req->req_flags)) {
220 /* Do not call scsi done for this IO */
221 return;
224 bnx2fc_unmap_sg_list(io_req);
225 io_req->sc_cmd = NULL;
226 if (!sc_cmd) {
227 printk(KERN_ERR PFX "scsi_done - sc_cmd NULL. "
228 "IO(0x%x) already cleaned up\n",
229 io_req->xid);
230 return;
232 sc_cmd->result = err_code << 16;
234 BNX2FC_IO_DBG(io_req, "sc=%p, result=0x%x, retries=%d, allowed=%d\n",
235 sc_cmd, host_byte(sc_cmd->result), sc_cmd->retries,
236 sc_cmd->allowed);
237 scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd));
238 sc_cmd->SCp.ptr = NULL;
239 sc_cmd->scsi_done(sc_cmd);
242 struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba,
243 u16 min_xid, u16 max_xid)
245 struct bnx2fc_cmd_mgr *cmgr;
246 struct io_bdt *bdt_info;
247 struct bnx2fc_cmd *io_req;
248 size_t len;
249 u32 mem_size;
250 u16 xid;
251 int i;
252 int num_ios, num_pri_ios;
253 size_t bd_tbl_sz;
254 int arr_sz = num_possible_cpus() + 1;
256 if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) {
257 printk(KERN_ERR PFX "cmd_mgr_alloc: Invalid min_xid 0x%x \
258 and max_xid 0x%x\n", min_xid, max_xid);
259 return NULL;
261 BNX2FC_MISC_DBG("min xid 0x%x, max xid 0x%x\n", min_xid, max_xid);
263 num_ios = max_xid - min_xid + 1;
264 len = (num_ios * (sizeof(struct bnx2fc_cmd *)));
265 len += sizeof(struct bnx2fc_cmd_mgr);
267 cmgr = kzalloc(len, GFP_KERNEL);
268 if (!cmgr) {
269 printk(KERN_ERR PFX "failed to alloc cmgr\n");
270 return NULL;
273 cmgr->free_list = kzalloc(sizeof(*cmgr->free_list) *
274 arr_sz, GFP_KERNEL);
275 if (!cmgr->free_list) {
276 printk(KERN_ERR PFX "failed to alloc free_list\n");
277 goto mem_err;
280 cmgr->free_list_lock = kzalloc(sizeof(*cmgr->free_list_lock) *
281 arr_sz, GFP_KERNEL);
282 if (!cmgr->free_list_lock) {
283 printk(KERN_ERR PFX "failed to alloc free_list_lock\n");
284 goto mem_err;
287 cmgr->hba = hba;
288 cmgr->cmds = (struct bnx2fc_cmd **)(cmgr + 1);
290 for (i = 0; i < arr_sz; i++) {
291 INIT_LIST_HEAD(&cmgr->free_list[i]);
292 spin_lock_init(&cmgr->free_list_lock[i]);
296 * Pre-allocated pool of bnx2fc_cmds.
297 * Last entry in the free list array is the free list
298 * of slow path requests.
300 xid = BNX2FC_MIN_XID;
301 num_pri_ios = num_ios - BNX2FC_ELSTM_XIDS;
302 for (i = 0; i < num_ios; i++) {
303 io_req = kzalloc(sizeof(*io_req), GFP_KERNEL);
305 if (!io_req) {
306 printk(KERN_ERR PFX "failed to alloc io_req\n");
307 goto mem_err;
310 INIT_LIST_HEAD(&io_req->link);
311 INIT_DELAYED_WORK(&io_req->timeout_work, bnx2fc_cmd_timeout);
313 io_req->xid = xid++;
314 if (i < num_pri_ios)
315 list_add_tail(&io_req->link,
316 &cmgr->free_list[io_req->xid %
317 num_possible_cpus()]);
318 else
319 list_add_tail(&io_req->link,
320 &cmgr->free_list[num_possible_cpus()]);
321 io_req++;
324 /* Allocate pool of io_bdts - one for each bnx2fc_cmd */
325 mem_size = num_ios * sizeof(struct io_bdt *);
326 cmgr->io_bdt_pool = kmalloc(mem_size, GFP_KERNEL);
327 if (!cmgr->io_bdt_pool) {
328 printk(KERN_ERR PFX "failed to alloc io_bdt_pool\n");
329 goto mem_err;
332 mem_size = sizeof(struct io_bdt);
333 for (i = 0; i < num_ios; i++) {
334 cmgr->io_bdt_pool[i] = kmalloc(mem_size, GFP_KERNEL);
335 if (!cmgr->io_bdt_pool[i]) {
336 printk(KERN_ERR PFX "failed to alloc "
337 "io_bdt_pool[%d]\n", i);
338 goto mem_err;
342 /* Allocate an map fcoe_bdt_ctx structures */
343 bd_tbl_sz = BNX2FC_MAX_BDS_PER_CMD * sizeof(struct fcoe_bd_ctx);
344 for (i = 0; i < num_ios; i++) {
345 bdt_info = cmgr->io_bdt_pool[i];
346 bdt_info->bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
347 bd_tbl_sz,
348 &bdt_info->bd_tbl_dma,
349 GFP_KERNEL);
350 if (!bdt_info->bd_tbl) {
351 printk(KERN_ERR PFX "failed to alloc "
352 "bdt_tbl[%d]\n", i);
353 goto mem_err;
357 return cmgr;
359 mem_err:
360 bnx2fc_cmd_mgr_free(cmgr);
361 return NULL;
364 void bnx2fc_cmd_mgr_free(struct bnx2fc_cmd_mgr *cmgr)
366 struct io_bdt *bdt_info;
367 struct bnx2fc_hba *hba = cmgr->hba;
368 size_t bd_tbl_sz;
369 u16 min_xid = BNX2FC_MIN_XID;
370 u16 max_xid = BNX2FC_MAX_XID;
371 int num_ios;
372 int i;
374 num_ios = max_xid - min_xid + 1;
376 /* Free fcoe_bdt_ctx structures */
377 if (!cmgr->io_bdt_pool)
378 goto free_cmd_pool;
380 bd_tbl_sz = BNX2FC_MAX_BDS_PER_CMD * sizeof(struct fcoe_bd_ctx);
381 for (i = 0; i < num_ios; i++) {
382 bdt_info = cmgr->io_bdt_pool[i];
383 if (bdt_info->bd_tbl) {
384 dma_free_coherent(&hba->pcidev->dev, bd_tbl_sz,
385 bdt_info->bd_tbl,
386 bdt_info->bd_tbl_dma);
387 bdt_info->bd_tbl = NULL;
391 /* Destroy io_bdt pool */
392 for (i = 0; i < num_ios; i++) {
393 kfree(cmgr->io_bdt_pool[i]);
394 cmgr->io_bdt_pool[i] = NULL;
397 kfree(cmgr->io_bdt_pool);
398 cmgr->io_bdt_pool = NULL;
400 free_cmd_pool:
401 kfree(cmgr->free_list_lock);
403 /* Destroy cmd pool */
404 if (!cmgr->free_list)
405 goto free_cmgr;
407 for (i = 0; i < num_possible_cpus() + 1; i++) {
408 struct list_head *list;
409 struct list_head *tmp;
411 list_for_each_safe(list, tmp, &cmgr->free_list[i]) {
412 struct bnx2fc_cmd *io_req = (struct bnx2fc_cmd *)list;
413 list_del(&io_req->link);
414 kfree(io_req);
417 kfree(cmgr->free_list);
418 free_cmgr:
419 /* Free command manager itself */
420 kfree(cmgr);
423 struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type)
425 struct fcoe_port *port = tgt->port;
426 struct bnx2fc_interface *interface = port->priv;
427 struct bnx2fc_cmd_mgr *cmd_mgr = interface->hba->cmd_mgr;
428 struct bnx2fc_cmd *io_req;
429 struct list_head *listp;
430 struct io_bdt *bd_tbl;
431 int index = RESERVE_FREE_LIST_INDEX;
432 u32 free_sqes;
433 u32 max_sqes;
434 u16 xid;
436 max_sqes = tgt->max_sqes;
437 switch (type) {
438 case BNX2FC_TASK_MGMT_CMD:
439 max_sqes = BNX2FC_TM_MAX_SQES;
440 break;
441 case BNX2FC_ELS:
442 max_sqes = BNX2FC_ELS_MAX_SQES;
443 break;
444 default:
445 break;
449 * NOTE: Free list insertions and deletions are protected with
450 * cmgr lock
452 spin_lock_bh(&cmd_mgr->free_list_lock[index]);
453 free_sqes = atomic_read(&tgt->free_sqes);
454 if ((list_empty(&(cmd_mgr->free_list[index]))) ||
455 (tgt->num_active_ios.counter >= max_sqes) ||
456 (free_sqes + max_sqes <= BNX2FC_SQ_WQES_MAX)) {
457 BNX2FC_TGT_DBG(tgt, "No free els_tm cmds available "
458 "ios(%d):sqes(%d)\n",
459 tgt->num_active_ios.counter, tgt->max_sqes);
460 if (list_empty(&(cmd_mgr->free_list[index])))
461 printk(KERN_ERR PFX "elstm_alloc: list_empty\n");
462 spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
463 return NULL;
466 listp = (struct list_head *)
467 cmd_mgr->free_list[index].next;
468 list_del_init(listp);
469 io_req = (struct bnx2fc_cmd *) listp;
470 xid = io_req->xid;
471 cmd_mgr->cmds[xid] = io_req;
472 atomic_inc(&tgt->num_active_ios);
473 atomic_dec(&tgt->free_sqes);
474 spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
476 INIT_LIST_HEAD(&io_req->link);
478 io_req->port = port;
479 io_req->cmd_mgr = cmd_mgr;
480 io_req->req_flags = 0;
481 io_req->cmd_type = type;
483 /* Bind io_bdt for this io_req */
484 /* Have a static link between io_req and io_bdt_pool */
485 bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid];
486 bd_tbl->io_req = io_req;
488 /* Hold the io_req against deletion */
489 kref_init(&io_req->refcount);
490 return io_req;
493 struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt)
495 struct fcoe_port *port = tgt->port;
496 struct bnx2fc_interface *interface = port->priv;
497 struct bnx2fc_cmd_mgr *cmd_mgr = interface->hba->cmd_mgr;
498 struct bnx2fc_cmd *io_req;
499 struct list_head *listp;
500 struct io_bdt *bd_tbl;
501 u32 free_sqes;
502 u32 max_sqes;
503 u16 xid;
504 int index = get_cpu();
506 max_sqes = BNX2FC_SCSI_MAX_SQES;
508 * NOTE: Free list insertions and deletions are protected with
509 * cmgr lock
511 spin_lock_bh(&cmd_mgr->free_list_lock[index]);
512 free_sqes = atomic_read(&tgt->free_sqes);
513 if ((list_empty(&cmd_mgr->free_list[index])) ||
514 (tgt->num_active_ios.counter >= max_sqes) ||
515 (free_sqes + max_sqes <= BNX2FC_SQ_WQES_MAX)) {
516 spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
517 put_cpu();
518 return NULL;
521 listp = (struct list_head *)
522 cmd_mgr->free_list[index].next;
523 list_del_init(listp);
524 io_req = (struct bnx2fc_cmd *) listp;
525 xid = io_req->xid;
526 cmd_mgr->cmds[xid] = io_req;
527 atomic_inc(&tgt->num_active_ios);
528 atomic_dec(&tgt->free_sqes);
529 spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
530 put_cpu();
532 INIT_LIST_HEAD(&io_req->link);
534 io_req->port = port;
535 io_req->cmd_mgr = cmd_mgr;
536 io_req->req_flags = 0;
538 /* Bind io_bdt for this io_req */
539 /* Have a static link between io_req and io_bdt_pool */
540 bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid];
541 bd_tbl->io_req = io_req;
543 /* Hold the io_req against deletion */
544 kref_init(&io_req->refcount);
545 return io_req;
548 void bnx2fc_cmd_release(struct kref *ref)
550 struct bnx2fc_cmd *io_req = container_of(ref,
551 struct bnx2fc_cmd, refcount);
552 struct bnx2fc_cmd_mgr *cmd_mgr = io_req->cmd_mgr;
553 int index;
555 if (io_req->cmd_type == BNX2FC_SCSI_CMD)
556 index = io_req->xid % num_possible_cpus();
557 else
558 index = RESERVE_FREE_LIST_INDEX;
561 spin_lock_bh(&cmd_mgr->free_list_lock[index]);
562 if (io_req->cmd_type != BNX2FC_SCSI_CMD)
563 bnx2fc_free_mp_resc(io_req);
564 cmd_mgr->cmds[io_req->xid] = NULL;
565 /* Delete IO from retire queue */
566 list_del_init(&io_req->link);
567 /* Add it to the free list */
568 list_add(&io_req->link,
569 &cmd_mgr->free_list[index]);
570 atomic_dec(&io_req->tgt->num_active_ios);
571 spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
575 static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req)
577 struct bnx2fc_mp_req *mp_req = &(io_req->mp_req);
578 struct bnx2fc_interface *interface = io_req->port->priv;
579 struct bnx2fc_hba *hba = interface->hba;
580 size_t sz = sizeof(struct fcoe_bd_ctx);
582 /* clear tm flags */
583 mp_req->tm_flags = 0;
584 if (mp_req->mp_req_bd) {
585 dma_free_coherent(&hba->pcidev->dev, sz,
586 mp_req->mp_req_bd,
587 mp_req->mp_req_bd_dma);
588 mp_req->mp_req_bd = NULL;
590 if (mp_req->mp_resp_bd) {
591 dma_free_coherent(&hba->pcidev->dev, sz,
592 mp_req->mp_resp_bd,
593 mp_req->mp_resp_bd_dma);
594 mp_req->mp_resp_bd = NULL;
596 if (mp_req->req_buf) {
597 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
598 mp_req->req_buf,
599 mp_req->req_buf_dma);
600 mp_req->req_buf = NULL;
602 if (mp_req->resp_buf) {
603 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
604 mp_req->resp_buf,
605 mp_req->resp_buf_dma);
606 mp_req->resp_buf = NULL;
610 int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
612 struct bnx2fc_mp_req *mp_req;
613 struct fcoe_bd_ctx *mp_req_bd;
614 struct fcoe_bd_ctx *mp_resp_bd;
615 struct bnx2fc_interface *interface = io_req->port->priv;
616 struct bnx2fc_hba *hba = interface->hba;
617 dma_addr_t addr;
618 size_t sz;
620 mp_req = (struct bnx2fc_mp_req *)&(io_req->mp_req);
621 memset(mp_req, 0, sizeof(struct bnx2fc_mp_req));
623 mp_req->req_len = sizeof(struct fcp_cmnd);
624 io_req->data_xfer_len = mp_req->req_len;
625 mp_req->req_buf = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
626 &mp_req->req_buf_dma,
627 GFP_ATOMIC);
628 if (!mp_req->req_buf) {
629 printk(KERN_ERR PFX "unable to alloc MP req buffer\n");
630 bnx2fc_free_mp_resc(io_req);
631 return FAILED;
634 mp_req->resp_buf = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
635 &mp_req->resp_buf_dma,
636 GFP_ATOMIC);
637 if (!mp_req->resp_buf) {
638 printk(KERN_ERR PFX "unable to alloc TM resp buffer\n");
639 bnx2fc_free_mp_resc(io_req);
640 return FAILED;
642 memset(mp_req->req_buf, 0, PAGE_SIZE);
643 memset(mp_req->resp_buf, 0, PAGE_SIZE);
645 /* Allocate and map mp_req_bd and mp_resp_bd */
646 sz = sizeof(struct fcoe_bd_ctx);
647 mp_req->mp_req_bd = dma_alloc_coherent(&hba->pcidev->dev, sz,
648 &mp_req->mp_req_bd_dma,
649 GFP_ATOMIC);
650 if (!mp_req->mp_req_bd) {
651 printk(KERN_ERR PFX "unable to alloc MP req bd\n");
652 bnx2fc_free_mp_resc(io_req);
653 return FAILED;
655 mp_req->mp_resp_bd = dma_alloc_coherent(&hba->pcidev->dev, sz,
656 &mp_req->mp_resp_bd_dma,
657 GFP_ATOMIC);
658 if (!mp_req->mp_req_bd) {
659 printk(KERN_ERR PFX "unable to alloc MP resp bd\n");
660 bnx2fc_free_mp_resc(io_req);
661 return FAILED;
663 /* Fill bd table */
664 addr = mp_req->req_buf_dma;
665 mp_req_bd = mp_req->mp_req_bd;
666 mp_req_bd->buf_addr_lo = (u32)addr & 0xffffffff;
667 mp_req_bd->buf_addr_hi = (u32)((u64)addr >> 32);
668 mp_req_bd->buf_len = PAGE_SIZE;
669 mp_req_bd->flags = 0;
672 * MP buffer is either a task mgmt command or an ELS.
673 * So the assumption is that it consumes a single bd
674 * entry in the bd table
676 mp_resp_bd = mp_req->mp_resp_bd;
677 addr = mp_req->resp_buf_dma;
678 mp_resp_bd->buf_addr_lo = (u32)addr & 0xffffffff;
679 mp_resp_bd->buf_addr_hi = (u32)((u64)addr >> 32);
680 mp_resp_bd->buf_len = PAGE_SIZE;
681 mp_resp_bd->flags = 0;
683 return SUCCESS;
686 static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
688 struct fc_lport *lport;
689 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
690 struct fc_rport_libfc_priv *rp = rport->dd_data;
691 struct fcoe_port *port;
692 struct bnx2fc_interface *interface;
693 struct bnx2fc_rport *tgt;
694 struct bnx2fc_cmd *io_req;
695 struct bnx2fc_mp_req *tm_req;
696 struct fcoe_task_ctx_entry *task;
697 struct fcoe_task_ctx_entry *task_page;
698 struct Scsi_Host *host = sc_cmd->device->host;
699 struct fc_frame_header *fc_hdr;
700 struct fcp_cmnd *fcp_cmnd;
701 int task_idx, index;
702 int rc = SUCCESS;
703 u16 xid;
704 u32 sid, did;
705 unsigned long start = jiffies;
707 lport = shost_priv(host);
708 port = lport_priv(lport);
709 interface = port->priv;
711 if (rport == NULL) {
712 printk(KERN_ERR PFX "device_reset: rport is NULL\n");
713 rc = FAILED;
714 goto tmf_err;
717 rc = fc_block_scsi_eh(sc_cmd);
718 if (rc)
719 return rc;
721 if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
722 printk(KERN_ERR PFX "device_reset: link is not ready\n");
723 rc = FAILED;
724 goto tmf_err;
726 /* rport and tgt are allocated together, so tgt should be non-NULL */
727 tgt = (struct bnx2fc_rport *)&rp[1];
729 if (!(test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags))) {
730 printk(KERN_ERR PFX "device_reset: tgt not offloaded\n");
731 rc = FAILED;
732 goto tmf_err;
734 retry_tmf:
735 io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_TASK_MGMT_CMD);
736 if (!io_req) {
737 if (time_after(jiffies, start + HZ)) {
738 printk(KERN_ERR PFX "tmf: Failed TMF");
739 rc = FAILED;
740 goto tmf_err;
742 msleep(20);
743 goto retry_tmf;
745 /* Initialize rest of io_req fields */
746 io_req->sc_cmd = sc_cmd;
747 io_req->port = port;
748 io_req->tgt = tgt;
750 tm_req = (struct bnx2fc_mp_req *)&(io_req->mp_req);
752 rc = bnx2fc_init_mp_req(io_req);
753 if (rc == FAILED) {
754 printk(KERN_ERR PFX "Task mgmt MP request init failed\n");
755 spin_lock_bh(&tgt->tgt_lock);
756 kref_put(&io_req->refcount, bnx2fc_cmd_release);
757 spin_unlock_bh(&tgt->tgt_lock);
758 goto tmf_err;
761 /* Set TM flags */
762 io_req->io_req_flags = 0;
763 tm_req->tm_flags = tm_flags;
765 /* Fill FCP_CMND */
766 bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tm_req->req_buf);
767 fcp_cmnd = (struct fcp_cmnd *)tm_req->req_buf;
768 memset(fcp_cmnd->fc_cdb, 0, sc_cmd->cmd_len);
769 fcp_cmnd->fc_dl = 0;
771 /* Fill FC header */
772 fc_hdr = &(tm_req->req_fc_hdr);
773 sid = tgt->sid;
774 did = rport->port_id;
775 __fc_fill_fc_hdr(fc_hdr, FC_RCTL_DD_UNSOL_CMD, did, sid,
776 FC_TYPE_FCP, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
777 FC_FC_SEQ_INIT, 0);
778 /* Obtain exchange id */
779 xid = io_req->xid;
781 BNX2FC_TGT_DBG(tgt, "Initiate TMF - xid = 0x%x\n", xid);
782 task_idx = xid/BNX2FC_TASKS_PER_PAGE;
783 index = xid % BNX2FC_TASKS_PER_PAGE;
785 /* Initialize task context for this IO request */
786 task_page = (struct fcoe_task_ctx_entry *)
787 interface->hba->task_ctx[task_idx];
788 task = &(task_page[index]);
789 bnx2fc_init_mp_task(io_req, task);
791 sc_cmd->SCp.ptr = (char *)io_req;
793 /* Obtain free SQ entry */
794 spin_lock_bh(&tgt->tgt_lock);
795 bnx2fc_add_2_sq(tgt, xid);
797 /* Enqueue the io_req to active_tm_queue */
798 io_req->on_tmf_queue = 1;
799 list_add_tail(&io_req->link, &tgt->active_tm_queue);
801 init_completion(&io_req->tm_done);
802 io_req->wait_for_comp = 1;
804 /* Ring doorbell */
805 bnx2fc_ring_doorbell(tgt);
806 spin_unlock_bh(&tgt->tgt_lock);
808 rc = wait_for_completion_timeout(&io_req->tm_done,
809 BNX2FC_TM_TIMEOUT * HZ);
810 spin_lock_bh(&tgt->tgt_lock);
812 io_req->wait_for_comp = 0;
813 if (!(test_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags))) {
814 set_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags);
815 if (io_req->on_tmf_queue) {
816 list_del_init(&io_req->link);
817 io_req->on_tmf_queue = 0;
819 io_req->wait_for_comp = 1;
820 bnx2fc_initiate_cleanup(io_req);
821 spin_unlock_bh(&tgt->tgt_lock);
822 rc = wait_for_completion_timeout(&io_req->tm_done,
823 BNX2FC_FW_TIMEOUT);
824 spin_lock_bh(&tgt->tgt_lock);
825 io_req->wait_for_comp = 0;
826 if (!rc)
827 kref_put(&io_req->refcount, bnx2fc_cmd_release);
830 spin_unlock_bh(&tgt->tgt_lock);
832 if (!rc) {
833 BNX2FC_TGT_DBG(tgt, "task mgmt command failed...\n");
834 rc = FAILED;
835 } else {
836 BNX2FC_TGT_DBG(tgt, "task mgmt command success...\n");
837 rc = SUCCESS;
839 tmf_err:
840 return rc;
843 int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req)
845 struct fc_lport *lport;
846 struct bnx2fc_rport *tgt = io_req->tgt;
847 struct fc_rport *rport = tgt->rport;
848 struct fc_rport_priv *rdata = tgt->rdata;
849 struct bnx2fc_interface *interface;
850 struct fcoe_port *port;
851 struct bnx2fc_cmd *abts_io_req;
852 struct fcoe_task_ctx_entry *task;
853 struct fcoe_task_ctx_entry *task_page;
854 struct fc_frame_header *fc_hdr;
855 struct bnx2fc_mp_req *abts_req;
856 int task_idx, index;
857 u32 sid, did;
858 u16 xid;
859 int rc = SUCCESS;
860 u32 r_a_tov = rdata->r_a_tov;
862 /* called with tgt_lock held */
863 BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_abts\n");
865 port = io_req->port;
866 interface = port->priv;
867 lport = port->lport;
869 if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
870 printk(KERN_ERR PFX "initiate_abts: tgt not offloaded\n");
871 rc = FAILED;
872 goto abts_err;
875 if (rport == NULL) {
876 printk(KERN_ERR PFX "initiate_abts: rport is NULL\n");
877 rc = FAILED;
878 goto abts_err;
881 if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
882 printk(KERN_ERR PFX "initiate_abts: link is not ready\n");
883 rc = FAILED;
884 goto abts_err;
887 abts_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_ABTS);
888 if (!abts_io_req) {
889 printk(KERN_ERR PFX "abts: couldnt allocate cmd\n");
890 rc = FAILED;
891 goto abts_err;
894 /* Initialize rest of io_req fields */
895 abts_io_req->sc_cmd = NULL;
896 abts_io_req->port = port;
897 abts_io_req->tgt = tgt;
898 abts_io_req->data_xfer_len = 0; /* No data transfer for ABTS */
900 abts_req = (struct bnx2fc_mp_req *)&(abts_io_req->mp_req);
901 memset(abts_req, 0, sizeof(struct bnx2fc_mp_req));
903 /* Fill FC header */
904 fc_hdr = &(abts_req->req_fc_hdr);
906 /* Obtain oxid and rxid for the original exchange to be aborted */
907 fc_hdr->fh_ox_id = htons(io_req->xid);
908 fc_hdr->fh_rx_id = htons(io_req->task->rxwr_txrd.var_ctx.rx_id);
910 sid = tgt->sid;
911 did = rport->port_id;
913 __fc_fill_fc_hdr(fc_hdr, FC_RCTL_BA_ABTS, did, sid,
914 FC_TYPE_BLS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
915 FC_FC_SEQ_INIT, 0);
917 xid = abts_io_req->xid;
918 BNX2FC_IO_DBG(abts_io_req, "ABTS io_req\n");
919 task_idx = xid/BNX2FC_TASKS_PER_PAGE;
920 index = xid % BNX2FC_TASKS_PER_PAGE;
922 /* Initialize task context for this IO request */
923 task_page = (struct fcoe_task_ctx_entry *)
924 interface->hba->task_ctx[task_idx];
925 task = &(task_page[index]);
926 bnx2fc_init_mp_task(abts_io_req, task);
929 * ABTS task is a temporary task that will be cleaned up
930 * irrespective of ABTS response. We need to start the timer
931 * for the original exchange, as the CQE is posted for the original
932 * IO request.
934 * Timer for ABTS is started only when it is originated by a
935 * TM request. For the ABTS issued as part of ULP timeout,
936 * scsi-ml maintains the timers.
939 /* if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags))*/
940 bnx2fc_cmd_timer_set(io_req, 2 * r_a_tov);
942 /* Obtain free SQ entry */
943 bnx2fc_add_2_sq(tgt, xid);
945 /* Ring doorbell */
946 bnx2fc_ring_doorbell(tgt);
948 abts_err:
949 return rc;
952 int bnx2fc_initiate_seq_cleanup(struct bnx2fc_cmd *orig_io_req, u32 offset,
953 enum fc_rctl r_ctl)
955 struct fc_lport *lport;
956 struct bnx2fc_rport *tgt = orig_io_req->tgt;
957 struct bnx2fc_interface *interface;
958 struct fcoe_port *port;
959 struct bnx2fc_cmd *seq_clnp_req;
960 struct fcoe_task_ctx_entry *task;
961 struct fcoe_task_ctx_entry *task_page;
962 struct bnx2fc_els_cb_arg *cb_arg = NULL;
963 int task_idx, index;
964 u16 xid;
965 int rc = 0;
967 BNX2FC_IO_DBG(orig_io_req, "bnx2fc_initiate_seq_cleanup xid = 0x%x\n",
968 orig_io_req->xid);
969 kref_get(&orig_io_req->refcount);
971 port = orig_io_req->port;
972 interface = port->priv;
973 lport = port->lport;
975 cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
976 if (!cb_arg) {
977 printk(KERN_ERR PFX "Unable to alloc cb_arg for seq clnup\n");
978 rc = -ENOMEM;
979 goto cleanup_err;
982 seq_clnp_req = bnx2fc_elstm_alloc(tgt, BNX2FC_SEQ_CLEANUP);
983 if (!seq_clnp_req) {
984 printk(KERN_ERR PFX "cleanup: couldnt allocate cmd\n");
985 rc = -ENOMEM;
986 kfree(cb_arg);
987 goto cleanup_err;
989 /* Initialize rest of io_req fields */
990 seq_clnp_req->sc_cmd = NULL;
991 seq_clnp_req->port = port;
992 seq_clnp_req->tgt = tgt;
993 seq_clnp_req->data_xfer_len = 0; /* No data transfer for cleanup */
995 xid = seq_clnp_req->xid;
997 task_idx = xid/BNX2FC_TASKS_PER_PAGE;
998 index = xid % BNX2FC_TASKS_PER_PAGE;
1000 /* Initialize task context for this IO request */
1001 task_page = (struct fcoe_task_ctx_entry *)
1002 interface->hba->task_ctx[task_idx];
1003 task = &(task_page[index]);
1004 cb_arg->aborted_io_req = orig_io_req;
1005 cb_arg->io_req = seq_clnp_req;
1006 cb_arg->r_ctl = r_ctl;
1007 cb_arg->offset = offset;
1008 seq_clnp_req->cb_arg = cb_arg;
1010 printk(KERN_ERR PFX "call init_seq_cleanup_task\n");
1011 bnx2fc_init_seq_cleanup_task(seq_clnp_req, task, orig_io_req, offset);
1013 /* Obtain free SQ entry */
1014 bnx2fc_add_2_sq(tgt, xid);
1016 /* Ring doorbell */
1017 bnx2fc_ring_doorbell(tgt);
1018 cleanup_err:
1019 return rc;
1022 int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req)
1024 struct fc_lport *lport;
1025 struct bnx2fc_rport *tgt = io_req->tgt;
1026 struct bnx2fc_interface *interface;
1027 struct fcoe_port *port;
1028 struct bnx2fc_cmd *cleanup_io_req;
1029 struct fcoe_task_ctx_entry *task;
1030 struct fcoe_task_ctx_entry *task_page;
1031 int task_idx, index;
1032 u16 xid, orig_xid;
1033 int rc = 0;
1035 /* ASSUMPTION: called with tgt_lock held */
1036 BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_cleanup\n");
1038 port = io_req->port;
1039 interface = port->priv;
1040 lport = port->lport;
1042 cleanup_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_CLEANUP);
1043 if (!cleanup_io_req) {
1044 printk(KERN_ERR PFX "cleanup: couldnt allocate cmd\n");
1045 rc = -1;
1046 goto cleanup_err;
1049 /* Initialize rest of io_req fields */
1050 cleanup_io_req->sc_cmd = NULL;
1051 cleanup_io_req->port = port;
1052 cleanup_io_req->tgt = tgt;
1053 cleanup_io_req->data_xfer_len = 0; /* No data transfer for cleanup */
1055 xid = cleanup_io_req->xid;
1057 task_idx = xid/BNX2FC_TASKS_PER_PAGE;
1058 index = xid % BNX2FC_TASKS_PER_PAGE;
1060 /* Initialize task context for this IO request */
1061 task_page = (struct fcoe_task_ctx_entry *)
1062 interface->hba->task_ctx[task_idx];
1063 task = &(task_page[index]);
1064 orig_xid = io_req->xid;
1066 BNX2FC_IO_DBG(io_req, "CLEANUP io_req xid = 0x%x\n", xid);
1068 bnx2fc_init_cleanup_task(cleanup_io_req, task, orig_xid);
1070 /* Obtain free SQ entry */
1071 bnx2fc_add_2_sq(tgt, xid);
1073 /* Ring doorbell */
1074 bnx2fc_ring_doorbell(tgt);
1076 cleanup_err:
1077 return rc;
1081 * bnx2fc_eh_target_reset: Reset a target
1083 * @sc_cmd: SCSI command
1085 * Set from SCSI host template to send task mgmt command to the target
1086 * and wait for the response
1088 int bnx2fc_eh_target_reset(struct scsi_cmnd *sc_cmd)
1090 return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_TGT_RESET);
1094 * bnx2fc_eh_device_reset - Reset a single LUN
1096 * @sc_cmd: SCSI command
1098 * Set from SCSI host template to send task mgmt command to the target
1099 * and wait for the response
1101 int bnx2fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
1103 return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET);
1106 int bnx2fc_expl_logo(struct fc_lport *lport, struct bnx2fc_cmd *io_req)
1108 struct bnx2fc_rport *tgt = io_req->tgt;
1109 struct fc_rport_priv *rdata = tgt->rdata;
1110 int logo_issued;
1111 int rc = SUCCESS;
1112 int wait_cnt = 0;
1114 BNX2FC_IO_DBG(io_req, "Expl logo - tgt flags = 0x%lx\n",
1115 tgt->flags);
1116 logo_issued = test_and_set_bit(BNX2FC_FLAG_EXPL_LOGO,
1117 &tgt->flags);
1118 io_req->wait_for_comp = 1;
1119 bnx2fc_initiate_cleanup(io_req);
1121 spin_unlock_bh(&tgt->tgt_lock);
1123 wait_for_completion(&io_req->tm_done);
1125 io_req->wait_for_comp = 0;
1127 * release the reference taken in eh_abort to allow the
1128 * target to re-login after flushing IOs
1130 kref_put(&io_req->refcount, bnx2fc_cmd_release);
1132 if (!logo_issued) {
1133 clear_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags);
1134 mutex_lock(&lport->disc.disc_mutex);
1135 lport->tt.rport_logoff(rdata);
1136 mutex_unlock(&lport->disc.disc_mutex);
1137 do {
1138 msleep(BNX2FC_RELOGIN_WAIT_TIME);
1139 if (wait_cnt++ > BNX2FC_RELOGIN_WAIT_CNT) {
1140 rc = FAILED;
1141 break;
1143 } while (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags));
1145 spin_lock_bh(&tgt->tgt_lock);
1146 return rc;
1149 * bnx2fc_eh_abort - eh_abort_handler api to abort an outstanding
1150 * SCSI command
1152 * @sc_cmd: SCSI_ML command pointer
1154 * SCSI abort request handler
1156 int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
1158 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
1159 struct fc_rport_libfc_priv *rp = rport->dd_data;
1160 struct bnx2fc_cmd *io_req;
1161 struct fc_lport *lport;
1162 struct bnx2fc_rport *tgt;
1163 int rc = FAILED;
1166 rc = fc_block_scsi_eh(sc_cmd);
1167 if (rc)
1168 return rc;
1170 lport = shost_priv(sc_cmd->device->host);
1171 if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) {
1172 printk(KERN_ERR PFX "eh_abort: link not ready\n");
1173 return rc;
1176 tgt = (struct bnx2fc_rport *)&rp[1];
1178 BNX2FC_TGT_DBG(tgt, "Entered bnx2fc_eh_abort\n");
1180 spin_lock_bh(&tgt->tgt_lock);
1181 io_req = (struct bnx2fc_cmd *)sc_cmd->SCp.ptr;
1182 if (!io_req) {
1183 /* Command might have just completed */
1184 printk(KERN_ERR PFX "eh_abort: io_req is NULL\n");
1185 spin_unlock_bh(&tgt->tgt_lock);
1186 return SUCCESS;
1188 BNX2FC_IO_DBG(io_req, "eh_abort - refcnt = %d\n",
1189 io_req->refcount.refcount.counter);
1191 /* Hold IO request across abort processing */
1192 kref_get(&io_req->refcount);
1194 BUG_ON(tgt != io_req->tgt);
1196 /* Remove the io_req from the active_q. */
1198 * Task Mgmt functions (LUN RESET & TGT RESET) will not
1199 * issue an ABTS on this particular IO req, as the
1200 * io_req is no longer in the active_q.
1202 if (tgt->flush_in_prog) {
1203 printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) "
1204 "flush in progress\n", io_req->xid);
1205 kref_put(&io_req->refcount, bnx2fc_cmd_release);
1206 spin_unlock_bh(&tgt->tgt_lock);
1207 return SUCCESS;
1210 if (io_req->on_active_queue == 0) {
1211 printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) "
1212 "not on active_q\n", io_req->xid);
1214 * This condition can happen only due to the FW bug,
1215 * where we do not receive cleanup response from
1216 * the FW. Handle this case gracefully by erroring
1217 * back the IO request to SCSI-ml
1219 bnx2fc_scsi_done(io_req, DID_ABORT);
1221 kref_put(&io_req->refcount, bnx2fc_cmd_release);
1222 spin_unlock_bh(&tgt->tgt_lock);
1223 return SUCCESS;
1227 * Only eh_abort processing will remove the IO from
1228 * active_cmd_q before processing the request. this is
1229 * done to avoid race conditions between IOs aborted
1230 * as part of task management completion and eh_abort
1231 * processing
1233 list_del_init(&io_req->link);
1234 io_req->on_active_queue = 0;
1235 /* Move IO req to retire queue */
1236 list_add_tail(&io_req->link, &tgt->io_retire_queue);
1238 init_completion(&io_req->tm_done);
1240 if (test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
1241 printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) "
1242 "already in abts processing\n", io_req->xid);
1243 if (cancel_delayed_work(&io_req->timeout_work))
1244 kref_put(&io_req->refcount,
1245 bnx2fc_cmd_release); /* drop timer hold */
1246 rc = bnx2fc_expl_logo(lport, io_req);
1247 goto out;
1250 /* Cancel the current timer running on this io_req */
1251 if (cancel_delayed_work(&io_req->timeout_work))
1252 kref_put(&io_req->refcount,
1253 bnx2fc_cmd_release); /* drop timer hold */
1254 set_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags);
1255 io_req->wait_for_comp = 1;
1256 rc = bnx2fc_initiate_abts(io_req);
1257 if (rc == FAILED) {
1258 bnx2fc_initiate_cleanup(io_req);
1259 spin_unlock_bh(&tgt->tgt_lock);
1260 wait_for_completion(&io_req->tm_done);
1261 spin_lock_bh(&tgt->tgt_lock);
1262 io_req->wait_for_comp = 0;
1263 goto done;
1265 spin_unlock_bh(&tgt->tgt_lock);
1267 wait_for_completion(&io_req->tm_done);
1269 spin_lock_bh(&tgt->tgt_lock);
1270 io_req->wait_for_comp = 0;
1271 if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
1272 &io_req->req_flags))) {
1273 /* Let the scsi-ml try to recover this command */
1274 printk(KERN_ERR PFX "abort failed, xid = 0x%x\n",
1275 io_req->xid);
1276 rc = bnx2fc_expl_logo(lport, io_req);
1277 goto out;
1278 } else {
1280 * We come here even when there was a race condition
1281 * between timeout and abts completion, and abts
1282 * completion happens just in time.
1284 BNX2FC_IO_DBG(io_req, "abort succeeded\n");
1285 rc = SUCCESS;
1286 bnx2fc_scsi_done(io_req, DID_ABORT);
1287 kref_put(&io_req->refcount, bnx2fc_cmd_release);
1289 done:
1290 /* release the reference taken in eh_abort */
1291 kref_put(&io_req->refcount, bnx2fc_cmd_release);
1292 out:
1293 spin_unlock_bh(&tgt->tgt_lock);
1294 return rc;
1297 void bnx2fc_process_seq_cleanup_compl(struct bnx2fc_cmd *seq_clnp_req,
1298 struct fcoe_task_ctx_entry *task,
1299 u8 rx_state)
1301 struct bnx2fc_els_cb_arg *cb_arg = seq_clnp_req->cb_arg;
1302 struct bnx2fc_cmd *orig_io_req = cb_arg->aborted_io_req;
1303 u32 offset = cb_arg->offset;
1304 enum fc_rctl r_ctl = cb_arg->r_ctl;
1305 int rc = 0;
1306 struct bnx2fc_rport *tgt = orig_io_req->tgt;
1308 BNX2FC_IO_DBG(orig_io_req, "Entered process_cleanup_compl xid = 0x%x"
1309 "cmd_type = %d\n",
1310 seq_clnp_req->xid, seq_clnp_req->cmd_type);
1312 if (rx_state == FCOE_TASK_RX_STATE_IGNORED_SEQUENCE_CLEANUP) {
1313 printk(KERN_ERR PFX "seq cleanup ignored - xid = 0x%x\n",
1314 seq_clnp_req->xid);
1315 goto free_cb_arg;
1318 spin_unlock_bh(&tgt->tgt_lock);
1319 rc = bnx2fc_send_srr(orig_io_req, offset, r_ctl);
1320 spin_lock_bh(&tgt->tgt_lock);
1322 if (rc)
1323 printk(KERN_ERR PFX "clnup_compl: Unable to send SRR"
1324 " IO will abort\n");
1325 seq_clnp_req->cb_arg = NULL;
1326 kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
1327 free_cb_arg:
1328 kfree(cb_arg);
1329 return;
1332 void bnx2fc_process_cleanup_compl(struct bnx2fc_cmd *io_req,
1333 struct fcoe_task_ctx_entry *task,
1334 u8 num_rq)
1336 BNX2FC_IO_DBG(io_req, "Entered process_cleanup_compl "
1337 "refcnt = %d, cmd_type = %d\n",
1338 io_req->refcount.refcount.counter, io_req->cmd_type);
1339 bnx2fc_scsi_done(io_req, DID_ERROR);
1340 kref_put(&io_req->refcount, bnx2fc_cmd_release);
1341 if (io_req->wait_for_comp)
1342 complete(&io_req->tm_done);
1345 void bnx2fc_process_abts_compl(struct bnx2fc_cmd *io_req,
1346 struct fcoe_task_ctx_entry *task,
1347 u8 num_rq)
1349 u32 r_ctl;
1350 u32 r_a_tov = FC_DEF_R_A_TOV;
1351 u8 issue_rrq = 0;
1352 struct bnx2fc_rport *tgt = io_req->tgt;
1354 BNX2FC_IO_DBG(io_req, "Entered process_abts_compl xid = 0x%x"
1355 "refcnt = %d, cmd_type = %d\n",
1356 io_req->xid,
1357 io_req->refcount.refcount.counter, io_req->cmd_type);
1359 if (test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
1360 &io_req->req_flags)) {
1361 BNX2FC_IO_DBG(io_req, "Timer context finished processing"
1362 " this io\n");
1363 return;
1366 /* Do not issue RRQ as this IO is already cleanedup */
1367 if (test_and_set_bit(BNX2FC_FLAG_IO_CLEANUP,
1368 &io_req->req_flags))
1369 goto io_compl;
1372 * For ABTS issued due to SCSI eh_abort_handler, timeout
1373 * values are maintained by scsi-ml itself. Cancel timeout
1374 * in case ABTS issued as part of task management function
1375 * or due to FW error.
1377 if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags))
1378 if (cancel_delayed_work(&io_req->timeout_work))
1379 kref_put(&io_req->refcount,
1380 bnx2fc_cmd_release); /* drop timer hold */
1382 r_ctl = (u8)task->rxwr_only.union_ctx.comp_info.abts_rsp.r_ctl;
1384 switch (r_ctl) {
1385 case FC_RCTL_BA_ACC:
1387 * Dont release this cmd yet. It will be relesed
1388 * after we get RRQ response
1390 BNX2FC_IO_DBG(io_req, "ABTS response - ACC Send RRQ\n");
1391 issue_rrq = 1;
1392 break;
1394 case FC_RCTL_BA_RJT:
1395 BNX2FC_IO_DBG(io_req, "ABTS response - RJT\n");
1396 break;
1397 default:
1398 printk(KERN_ERR PFX "Unknown ABTS response\n");
1399 break;
1402 if (issue_rrq) {
1403 BNX2FC_IO_DBG(io_req, "Issue RRQ after R_A_TOV\n");
1404 set_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags);
1406 set_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags);
1407 bnx2fc_cmd_timer_set(io_req, r_a_tov);
1409 io_compl:
1410 if (io_req->wait_for_comp) {
1411 if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
1412 &io_req->req_flags))
1413 complete(&io_req->tm_done);
1414 } else {
1416 * We end up here when ABTS is issued as
1417 * in asynchronous context, i.e., as part
1418 * of task management completion, or
1419 * when FW error is received or when the
1420 * ABTS is issued when the IO is timed
1421 * out.
1424 if (io_req->on_active_queue) {
1425 list_del_init(&io_req->link);
1426 io_req->on_active_queue = 0;
1427 /* Move IO req to retire queue */
1428 list_add_tail(&io_req->link, &tgt->io_retire_queue);
1430 bnx2fc_scsi_done(io_req, DID_ERROR);
1431 kref_put(&io_req->refcount, bnx2fc_cmd_release);
1435 static void bnx2fc_lun_reset_cmpl(struct bnx2fc_cmd *io_req)
1437 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1438 struct bnx2fc_rport *tgt = io_req->tgt;
1439 struct list_head *list;
1440 struct list_head *tmp;
1441 struct bnx2fc_cmd *cmd;
1442 int tm_lun = sc_cmd->device->lun;
1443 int rc = 0;
1444 int lun;
1446 /* called with tgt_lock held */
1447 BNX2FC_IO_DBG(io_req, "Entered bnx2fc_lun_reset_cmpl\n");
1449 * Walk thru the active_ios queue and ABORT the IO
1450 * that matches with the LUN that was reset
1452 list_for_each_safe(list, tmp, &tgt->active_cmd_queue) {
1453 BNX2FC_TGT_DBG(tgt, "LUN RST cmpl: scan for pending IOs\n");
1454 cmd = (struct bnx2fc_cmd *)list;
1455 lun = cmd->sc_cmd->device->lun;
1456 if (lun == tm_lun) {
1457 /* Initiate ABTS on this cmd */
1458 if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
1459 &cmd->req_flags)) {
1460 /* cancel the IO timeout */
1461 if (cancel_delayed_work(&io_req->timeout_work))
1462 kref_put(&io_req->refcount,
1463 bnx2fc_cmd_release);
1464 /* timer hold */
1465 rc = bnx2fc_initiate_abts(cmd);
1466 /* abts shouldn't fail in this context */
1467 WARN_ON(rc != SUCCESS);
1468 } else
1469 printk(KERN_ERR PFX "lun_rst: abts already in"
1470 " progress for this IO 0x%x\n",
1471 cmd->xid);
1476 static void bnx2fc_tgt_reset_cmpl(struct bnx2fc_cmd *io_req)
1478 struct bnx2fc_rport *tgt = io_req->tgt;
1479 struct list_head *list;
1480 struct list_head *tmp;
1481 struct bnx2fc_cmd *cmd;
1482 int rc = 0;
1484 /* called with tgt_lock held */
1485 BNX2FC_IO_DBG(io_req, "Entered bnx2fc_tgt_reset_cmpl\n");
1487 * Walk thru the active_ios queue and ABORT the IO
1488 * that matches with the LUN that was reset
1490 list_for_each_safe(list, tmp, &tgt->active_cmd_queue) {
1491 BNX2FC_TGT_DBG(tgt, "TGT RST cmpl: scan for pending IOs\n");
1492 cmd = (struct bnx2fc_cmd *)list;
1493 /* Initiate ABTS */
1494 if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
1495 &cmd->req_flags)) {
1496 /* cancel the IO timeout */
1497 if (cancel_delayed_work(&io_req->timeout_work))
1498 kref_put(&io_req->refcount,
1499 bnx2fc_cmd_release); /* timer hold */
1500 rc = bnx2fc_initiate_abts(cmd);
1501 /* abts shouldn't fail in this context */
1502 WARN_ON(rc != SUCCESS);
1504 } else
1505 printk(KERN_ERR PFX "tgt_rst: abts already in progress"
1506 " for this IO 0x%x\n", cmd->xid);
1510 void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req,
1511 struct fcoe_task_ctx_entry *task, u8 num_rq)
1513 struct bnx2fc_mp_req *tm_req;
1514 struct fc_frame_header *fc_hdr;
1515 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1516 u64 *hdr;
1517 u64 *temp_hdr;
1518 void *rsp_buf;
1520 /* Called with tgt_lock held */
1521 BNX2FC_IO_DBG(io_req, "Entered process_tm_compl\n");
1523 if (!(test_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags)))
1524 set_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags);
1525 else {
1526 /* TM has already timed out and we got
1527 * delayed completion. Ignore completion
1528 * processing.
1530 return;
1533 tm_req = &(io_req->mp_req);
1534 fc_hdr = &(tm_req->resp_fc_hdr);
1535 hdr = (u64 *)fc_hdr;
1536 temp_hdr = (u64 *)
1537 &task->rxwr_only.union_ctx.comp_info.mp_rsp.fc_hdr;
1538 hdr[0] = cpu_to_be64(temp_hdr[0]);
1539 hdr[1] = cpu_to_be64(temp_hdr[1]);
1540 hdr[2] = cpu_to_be64(temp_hdr[2]);
1542 tm_req->resp_len =
1543 task->rxwr_only.union_ctx.comp_info.mp_rsp.mp_payload_len;
1545 rsp_buf = tm_req->resp_buf;
1547 if (fc_hdr->fh_r_ctl == FC_RCTL_DD_CMD_STATUS) {
1548 bnx2fc_parse_fcp_rsp(io_req,
1549 (struct fcoe_fcp_rsp_payload *)
1550 rsp_buf, num_rq);
1551 if (io_req->fcp_rsp_code == 0) {
1552 /* TM successful */
1553 if (tm_req->tm_flags & FCP_TMF_LUN_RESET)
1554 bnx2fc_lun_reset_cmpl(io_req);
1555 else if (tm_req->tm_flags & FCP_TMF_TGT_RESET)
1556 bnx2fc_tgt_reset_cmpl(io_req);
1558 } else {
1559 printk(KERN_ERR PFX "tmf's fc_hdr r_ctl = 0x%x\n",
1560 fc_hdr->fh_r_ctl);
1562 if (!sc_cmd->SCp.ptr) {
1563 printk(KERN_ERR PFX "tm_compl: SCp.ptr is NULL\n");
1564 return;
1566 switch (io_req->fcp_status) {
1567 case FC_GOOD:
1568 if (io_req->cdb_status == 0) {
1569 /* Good IO completion */
1570 sc_cmd->result = DID_OK << 16;
1571 } else {
1572 /* Transport status is good, SCSI status not good */
1573 sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
1575 if (io_req->fcp_resid)
1576 scsi_set_resid(sc_cmd, io_req->fcp_resid);
1577 break;
1579 default:
1580 BNX2FC_IO_DBG(io_req, "process_tm_compl: fcp_status = %d\n",
1581 io_req->fcp_status);
1582 break;
1585 sc_cmd = io_req->sc_cmd;
1586 io_req->sc_cmd = NULL;
1588 /* check if the io_req exists in tgt's tmf_q */
1589 if (io_req->on_tmf_queue) {
1591 list_del_init(&io_req->link);
1592 io_req->on_tmf_queue = 0;
1593 } else {
1595 printk(KERN_ERR PFX "Command not on active_cmd_queue!\n");
1596 return;
1599 sc_cmd->SCp.ptr = NULL;
1600 sc_cmd->scsi_done(sc_cmd);
1602 kref_put(&io_req->refcount, bnx2fc_cmd_release);
1603 if (io_req->wait_for_comp) {
1604 BNX2FC_IO_DBG(io_req, "tm_compl - wake up the waiter\n");
1605 complete(&io_req->tm_done);
1609 static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len,
1610 int bd_index)
1612 struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl;
1613 int frag_size, sg_frags;
1615 sg_frags = 0;
1616 while (sg_len) {
1617 if (sg_len >= BNX2FC_BD_SPLIT_SZ)
1618 frag_size = BNX2FC_BD_SPLIT_SZ;
1619 else
1620 frag_size = sg_len;
1621 bd[bd_index + sg_frags].buf_addr_lo = addr & 0xffffffff;
1622 bd[bd_index + sg_frags].buf_addr_hi = addr >> 32;
1623 bd[bd_index + sg_frags].buf_len = (u16)frag_size;
1624 bd[bd_index + sg_frags].flags = 0;
1626 addr += (u64) frag_size;
1627 sg_frags++;
1628 sg_len -= frag_size;
1630 return sg_frags;
1634 static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req)
1636 struct bnx2fc_interface *interface = io_req->port->priv;
1637 struct bnx2fc_hba *hba = interface->hba;
1638 struct scsi_cmnd *sc = io_req->sc_cmd;
1639 struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl;
1640 struct scatterlist *sg;
1641 int byte_count = 0;
1642 int sg_count = 0;
1643 int bd_count = 0;
1644 int sg_frags;
1645 unsigned int sg_len;
1646 u64 addr;
1647 int i;
1649 sg_count = dma_map_sg(&hba->pcidev->dev, scsi_sglist(sc),
1650 scsi_sg_count(sc), sc->sc_data_direction);
1651 scsi_for_each_sg(sc, sg, sg_count, i) {
1652 sg_len = sg_dma_len(sg);
1653 addr = sg_dma_address(sg);
1654 if (sg_len > BNX2FC_MAX_BD_LEN) {
1655 sg_frags = bnx2fc_split_bd(io_req, addr, sg_len,
1656 bd_count);
1657 } else {
1659 sg_frags = 1;
1660 bd[bd_count].buf_addr_lo = addr & 0xffffffff;
1661 bd[bd_count].buf_addr_hi = addr >> 32;
1662 bd[bd_count].buf_len = (u16)sg_len;
1663 bd[bd_count].flags = 0;
1665 bd_count += sg_frags;
1666 byte_count += sg_len;
1668 if (byte_count != scsi_bufflen(sc))
1669 printk(KERN_ERR PFX "byte_count = %d != scsi_bufflen = %d, "
1670 "task_id = 0x%x\n", byte_count, scsi_bufflen(sc),
1671 io_req->xid);
1672 return bd_count;
1675 static int bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req)
1677 struct scsi_cmnd *sc = io_req->sc_cmd;
1678 struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl;
1679 int bd_count;
1681 if (scsi_sg_count(sc)) {
1682 bd_count = bnx2fc_map_sg(io_req);
1683 if (bd_count == 0)
1684 return -ENOMEM;
1685 } else {
1686 bd_count = 0;
1687 bd[0].buf_addr_lo = bd[0].buf_addr_hi = 0;
1688 bd[0].buf_len = bd[0].flags = 0;
1690 io_req->bd_tbl->bd_valid = bd_count;
1692 return 0;
1695 static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req)
1697 struct scsi_cmnd *sc = io_req->sc_cmd;
1699 if (io_req->bd_tbl->bd_valid && sc) {
1700 scsi_dma_unmap(sc);
1701 io_req->bd_tbl->bd_valid = 0;
1705 void bnx2fc_build_fcp_cmnd(struct bnx2fc_cmd *io_req,
1706 struct fcp_cmnd *fcp_cmnd)
1708 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1709 char tag[2];
1711 memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
1713 int_to_scsilun(sc_cmd->device->lun, &fcp_cmnd->fc_lun);
1715 fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len);
1716 memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len);
1718 fcp_cmnd->fc_cmdref = 0;
1719 fcp_cmnd->fc_pri_ta = 0;
1720 fcp_cmnd->fc_tm_flags = io_req->mp_req.tm_flags;
1721 fcp_cmnd->fc_flags = io_req->io_req_flags;
1723 if (scsi_populate_tag_msg(sc_cmd, tag)) {
1724 switch (tag[0]) {
1725 case HEAD_OF_QUEUE_TAG:
1726 fcp_cmnd->fc_pri_ta = FCP_PTA_HEADQ;
1727 break;
1728 case ORDERED_QUEUE_TAG:
1729 fcp_cmnd->fc_pri_ta = FCP_PTA_ORDERED;
1730 break;
1731 default:
1732 fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
1733 break;
1735 } else {
1736 fcp_cmnd->fc_pri_ta = 0;
1740 static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
1741 struct fcoe_fcp_rsp_payload *fcp_rsp,
1742 u8 num_rq)
1744 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1745 struct bnx2fc_rport *tgt = io_req->tgt;
1746 u8 rsp_flags = fcp_rsp->fcp_flags.flags;
1747 u32 rq_buff_len = 0;
1748 int i;
1749 unsigned char *rq_data;
1750 unsigned char *dummy;
1751 int fcp_sns_len = 0;
1752 int fcp_rsp_len = 0;
1754 io_req->fcp_status = FC_GOOD;
1755 io_req->fcp_resid = fcp_rsp->fcp_resid;
1757 io_req->scsi_comp_flags = rsp_flags;
1758 CMD_SCSI_STATUS(sc_cmd) = io_req->cdb_status =
1759 fcp_rsp->scsi_status_code;
1761 /* Fetch fcp_rsp_info and fcp_sns_info if available */
1762 if (num_rq) {
1765 * We do not anticipate num_rq >1, as the linux defined
1766 * SCSI_SENSE_BUFFERSIZE is 96 bytes + 8 bytes of FCP_RSP_INFO
1767 * 256 bytes of single rq buffer is good enough to hold this.
1770 if (rsp_flags &
1771 FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID) {
1772 fcp_rsp_len = rq_buff_len
1773 = fcp_rsp->fcp_rsp_len;
1776 if (rsp_flags &
1777 FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID) {
1778 fcp_sns_len = fcp_rsp->fcp_sns_len;
1779 rq_buff_len += fcp_rsp->fcp_sns_len;
1782 io_req->fcp_rsp_len = fcp_rsp_len;
1783 io_req->fcp_sns_len = fcp_sns_len;
1785 if (rq_buff_len > num_rq * BNX2FC_RQ_BUF_SZ) {
1786 /* Invalid sense sense length. */
1787 printk(KERN_ERR PFX "invalid sns length %d\n",
1788 rq_buff_len);
1789 /* reset rq_buff_len */
1790 rq_buff_len = num_rq * BNX2FC_RQ_BUF_SZ;
1793 rq_data = bnx2fc_get_next_rqe(tgt, 1);
1795 if (num_rq > 1) {
1796 /* We do not need extra sense data */
1797 for (i = 1; i < num_rq; i++)
1798 dummy = bnx2fc_get_next_rqe(tgt, 1);
1801 /* fetch fcp_rsp_code */
1802 if ((fcp_rsp_len == 4) || (fcp_rsp_len == 8)) {
1803 /* Only for task management function */
1804 io_req->fcp_rsp_code = rq_data[3];
1805 printk(KERN_ERR PFX "fcp_rsp_code = %d\n",
1806 io_req->fcp_rsp_code);
1809 /* fetch sense data */
1810 rq_data += fcp_rsp_len;
1812 if (fcp_sns_len > SCSI_SENSE_BUFFERSIZE) {
1813 printk(KERN_ERR PFX "Truncating sense buffer\n");
1814 fcp_sns_len = SCSI_SENSE_BUFFERSIZE;
1817 memset(sc_cmd->sense_buffer, 0, sizeof(sc_cmd->sense_buffer));
1818 if (fcp_sns_len)
1819 memcpy(sc_cmd->sense_buffer, rq_data, fcp_sns_len);
1821 /* return RQ entries */
1822 for (i = 0; i < num_rq; i++)
1823 bnx2fc_return_rqe(tgt, 1);
1828 * bnx2fc_queuecommand - Queuecommand function of the scsi template
1830 * @host: The Scsi_Host the command was issued to
1831 * @sc_cmd: struct scsi_cmnd to be executed
1833 * This is the IO strategy routine, called by SCSI-ML
1835 int bnx2fc_queuecommand(struct Scsi_Host *host,
1836 struct scsi_cmnd *sc_cmd)
1838 struct fc_lport *lport = shost_priv(host);
1839 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
1840 struct fc_rport_libfc_priv *rp = rport->dd_data;
1841 struct bnx2fc_rport *tgt;
1842 struct bnx2fc_cmd *io_req;
1843 int rc = 0;
1844 int rval;
1846 rval = fc_remote_port_chkready(rport);
1847 if (rval) {
1848 sc_cmd->result = rval;
1849 sc_cmd->scsi_done(sc_cmd);
1850 return 0;
1853 if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) {
1854 rc = SCSI_MLQUEUE_HOST_BUSY;
1855 goto exit_qcmd;
1858 /* rport and tgt are allocated together, so tgt should be non-NULL */
1859 tgt = (struct bnx2fc_rport *)&rp[1];
1861 if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
1863 * Session is not offloaded yet. Let SCSI-ml retry
1864 * the command.
1866 rc = SCSI_MLQUEUE_TARGET_BUSY;
1867 goto exit_qcmd;
1870 io_req = bnx2fc_cmd_alloc(tgt);
1871 if (!io_req) {
1872 rc = SCSI_MLQUEUE_HOST_BUSY;
1873 goto exit_qcmd;
1875 io_req->sc_cmd = sc_cmd;
1877 if (bnx2fc_post_io_req(tgt, io_req)) {
1878 printk(KERN_ERR PFX "Unable to post io_req\n");
1879 rc = SCSI_MLQUEUE_HOST_BUSY;
1880 goto exit_qcmd;
1882 exit_qcmd:
1883 return rc;
1886 void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
1887 struct fcoe_task_ctx_entry *task,
1888 u8 num_rq)
1890 struct fcoe_fcp_rsp_payload *fcp_rsp;
1891 struct bnx2fc_rport *tgt = io_req->tgt;
1892 struct scsi_cmnd *sc_cmd;
1893 struct Scsi_Host *host;
1896 /* scsi_cmd_cmpl is called with tgt lock held */
1898 if (test_and_set_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags)) {
1899 /* we will not receive ABTS response for this IO */
1900 BNX2FC_IO_DBG(io_req, "Timer context finished processing "
1901 "this scsi cmd\n");
1904 /* Cancel the timeout_work, as we received IO completion */
1905 if (cancel_delayed_work(&io_req->timeout_work))
1906 kref_put(&io_req->refcount,
1907 bnx2fc_cmd_release); /* drop timer hold */
1909 sc_cmd = io_req->sc_cmd;
1910 if (sc_cmd == NULL) {
1911 printk(KERN_ERR PFX "scsi_cmd_compl - sc_cmd is NULL\n");
1912 return;
1915 /* Fetch fcp_rsp from task context and perform cmd completion */
1916 fcp_rsp = (struct fcoe_fcp_rsp_payload *)
1917 &(task->rxwr_only.union_ctx.comp_info.fcp_rsp.payload);
1919 /* parse fcp_rsp and obtain sense data from RQ if available */
1920 bnx2fc_parse_fcp_rsp(io_req, fcp_rsp, num_rq);
1922 host = sc_cmd->device->host;
1923 if (!sc_cmd->SCp.ptr) {
1924 printk(KERN_ERR PFX "SCp.ptr is NULL\n");
1925 return;
1928 if (io_req->on_active_queue) {
1929 list_del_init(&io_req->link);
1930 io_req->on_active_queue = 0;
1931 /* Move IO req to retire queue */
1932 list_add_tail(&io_req->link, &tgt->io_retire_queue);
1933 } else {
1934 /* This should not happen, but could have been pulled
1935 * by bnx2fc_flush_active_ios(), or during a race
1936 * between command abort and (late) completion.
1938 BNX2FC_IO_DBG(io_req, "xid not on active_cmd_queue\n");
1939 if (io_req->wait_for_comp)
1940 if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
1941 &io_req->req_flags))
1942 complete(&io_req->tm_done);
1945 bnx2fc_unmap_sg_list(io_req);
1946 io_req->sc_cmd = NULL;
1948 switch (io_req->fcp_status) {
1949 case FC_GOOD:
1950 if (io_req->cdb_status == 0) {
1951 /* Good IO completion */
1952 sc_cmd->result = DID_OK << 16;
1953 } else {
1954 /* Transport status is good, SCSI status not good */
1955 BNX2FC_IO_DBG(io_req, "scsi_cmpl: cdb_status = %d"
1956 " fcp_resid = 0x%x\n",
1957 io_req->cdb_status, io_req->fcp_resid);
1958 sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
1960 if (io_req->fcp_resid)
1961 scsi_set_resid(sc_cmd, io_req->fcp_resid);
1962 break;
1963 default:
1964 printk(KERN_ERR PFX "scsi_cmd_compl: fcp_status = %d\n",
1965 io_req->fcp_status);
1966 break;
1968 sc_cmd->SCp.ptr = NULL;
1969 sc_cmd->scsi_done(sc_cmd);
1970 kref_put(&io_req->refcount, bnx2fc_cmd_release);
1973 int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
1974 struct bnx2fc_cmd *io_req)
1976 struct fcoe_task_ctx_entry *task;
1977 struct fcoe_task_ctx_entry *task_page;
1978 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1979 struct fcoe_port *port = tgt->port;
1980 struct bnx2fc_interface *interface = port->priv;
1981 struct bnx2fc_hba *hba = interface->hba;
1982 struct fc_lport *lport = port->lport;
1983 struct fcoe_dev_stats *stats;
1984 int task_idx, index;
1985 u16 xid;
1987 /* Initialize rest of io_req fields */
1988 io_req->cmd_type = BNX2FC_SCSI_CMD;
1989 io_req->port = port;
1990 io_req->tgt = tgt;
1991 io_req->data_xfer_len = scsi_bufflen(sc_cmd);
1992 sc_cmd->SCp.ptr = (char *)io_req;
1994 stats = per_cpu_ptr(lport->dev_stats, get_cpu());
1995 if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
1996 io_req->io_req_flags = BNX2FC_READ;
1997 stats->InputRequests++;
1998 stats->InputBytes += io_req->data_xfer_len;
1999 } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
2000 io_req->io_req_flags = BNX2FC_WRITE;
2001 stats->OutputRequests++;
2002 stats->OutputBytes += io_req->data_xfer_len;
2003 } else {
2004 io_req->io_req_flags = 0;
2005 stats->ControlRequests++;
2007 put_cpu();
2009 xid = io_req->xid;
2011 /* Build buffer descriptor list for firmware from sg list */
2012 if (bnx2fc_build_bd_list_from_sg(io_req)) {
2013 printk(KERN_ERR PFX "BD list creation failed\n");
2014 spin_lock_bh(&tgt->tgt_lock);
2015 kref_put(&io_req->refcount, bnx2fc_cmd_release);
2016 spin_unlock_bh(&tgt->tgt_lock);
2017 return -EAGAIN;
2020 task_idx = xid / BNX2FC_TASKS_PER_PAGE;
2021 index = xid % BNX2FC_TASKS_PER_PAGE;
2023 /* Initialize task context for this IO request */
2024 task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx];
2025 task = &(task_page[index]);
2026 bnx2fc_init_task(io_req, task);
2028 spin_lock_bh(&tgt->tgt_lock);
2030 if (tgt->flush_in_prog) {
2031 printk(KERN_ERR PFX "Flush in progress..Host Busy\n");
2032 kref_put(&io_req->refcount, bnx2fc_cmd_release);
2033 spin_unlock_bh(&tgt->tgt_lock);
2034 return -EAGAIN;
2037 if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
2038 printk(KERN_ERR PFX "Session not ready...post_io\n");
2039 kref_put(&io_req->refcount, bnx2fc_cmd_release);
2040 spin_unlock_bh(&tgt->tgt_lock);
2041 return -EAGAIN;
2044 /* Time IO req */
2045 if (tgt->io_timeout)
2046 bnx2fc_cmd_timer_set(io_req, BNX2FC_IO_TIMEOUT);
2047 /* Obtain free SQ entry */
2048 bnx2fc_add_2_sq(tgt, xid);
2050 /* Enqueue the io_req to active_cmd_queue */
2052 io_req->on_active_queue = 1;
2053 /* move io_req from pending_queue to active_queue */
2054 list_add_tail(&io_req->link, &tgt->active_cmd_queue);
2056 /* Ring doorbell */
2057 bnx2fc_ring_doorbell(tgt);
2058 spin_unlock_bh(&tgt->tgt_lock);
2059 return 0;