1 /* bnx2fc_io.c: QLogic Linux FCoE offload driver.
2 * IO manager and SCSI IO processing.
4 * Copyright (c) 2008-2013 Broadcom Corporation
5 * Copyright (c) 2014-2015 QLogic Corporation
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation.
11 * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
16 #define RESERVE_FREE_LIST_INDEX num_possible_cpus()
18 static int bnx2fc_split_bd(struct bnx2fc_cmd
*io_req
, u64 addr
, int sg_len
,
20 static int bnx2fc_map_sg(struct bnx2fc_cmd
*io_req
);
21 static int bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd
*io_req
);
22 static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd
*io_req
);
23 static void bnx2fc_free_mp_resc(struct bnx2fc_cmd
*io_req
);
24 static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd
*io_req
,
25 struct fcoe_fcp_rsp_payload
*fcp_rsp
,
28 void bnx2fc_cmd_timer_set(struct bnx2fc_cmd
*io_req
,
29 unsigned int timer_msec
)
31 struct bnx2fc_interface
*interface
= io_req
->port
->priv
;
33 if (queue_delayed_work(interface
->timer_work_queue
,
34 &io_req
->timeout_work
,
35 msecs_to_jiffies(timer_msec
)))
36 kref_get(&io_req
->refcount
);
39 static void bnx2fc_cmd_timeout(struct work_struct
*work
)
41 struct bnx2fc_cmd
*io_req
= container_of(work
, struct bnx2fc_cmd
,
43 u8 cmd_type
= io_req
->cmd_type
;
44 struct bnx2fc_rport
*tgt
= io_req
->tgt
;
47 BNX2FC_IO_DBG(io_req
, "cmd_timeout, cmd_type = %d,"
48 "req_flags = %lx\n", cmd_type
, io_req
->req_flags
);
50 spin_lock_bh(&tgt
->tgt_lock
);
51 if (test_and_clear_bit(BNX2FC_FLAG_ISSUE_RRQ
, &io_req
->req_flags
)) {
52 clear_bit(BNX2FC_FLAG_RETIRE_OXID
, &io_req
->req_flags
);
54 * ideally we should hold the io_req until RRQ complets,
55 * and release io_req from timeout hold.
57 spin_unlock_bh(&tgt
->tgt_lock
);
58 bnx2fc_send_rrq(io_req
);
61 if (test_and_clear_bit(BNX2FC_FLAG_RETIRE_OXID
, &io_req
->req_flags
)) {
62 BNX2FC_IO_DBG(io_req
, "IO ready for reuse now\n");
68 if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT
,
69 &io_req
->req_flags
)) {
70 /* Handle eh_abort timeout */
71 BNX2FC_IO_DBG(io_req
, "eh_abort timed out\n");
72 complete(&io_req
->tm_done
);
73 } else if (test_bit(BNX2FC_FLAG_ISSUE_ABTS
,
74 &io_req
->req_flags
)) {
75 /* Handle internally generated ABTS timeout */
76 BNX2FC_IO_DBG(io_req
, "ABTS timed out refcnt = %d\n",
77 io_req
->refcount
.refcount
.counter
);
78 if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE
,
79 &io_req
->req_flags
))) {
81 * Cleanup and return original command to
84 bnx2fc_initiate_cleanup(io_req
);
85 kref_put(&io_req
->refcount
, bnx2fc_cmd_release
);
86 spin_unlock_bh(&tgt
->tgt_lock
);
91 /* Hanlde IO timeout */
92 BNX2FC_IO_DBG(io_req
, "IO timed out. issue ABTS\n");
93 if (test_and_set_bit(BNX2FC_FLAG_IO_COMPL
,
94 &io_req
->req_flags
)) {
95 BNX2FC_IO_DBG(io_req
, "IO completed before "
100 if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS
,
101 &io_req
->req_flags
)) {
102 rc
= bnx2fc_initiate_abts(io_req
);
106 kref_put(&io_req
->refcount
, bnx2fc_cmd_release
);
107 spin_unlock_bh(&tgt
->tgt_lock
);
111 BNX2FC_IO_DBG(io_req
, "IO already in "
112 "ABTS processing\n");
118 if (test_bit(BNX2FC_FLAG_ISSUE_ABTS
, &io_req
->req_flags
)) {
119 BNX2FC_IO_DBG(io_req
, "ABTS for ELS timed out\n");
121 if (!test_and_set_bit(BNX2FC_FLAG_ABTS_DONE
,
122 &io_req
->req_flags
)) {
123 kref_put(&io_req
->refcount
, bnx2fc_cmd_release
);
124 spin_unlock_bh(&tgt
->tgt_lock
);
130 * Handle ELS timeout.
131 * tgt_lock is used to sync compl path and timeout
132 * path. If els compl path is processing this IO, we
133 * have nothing to do here, just release the timer hold
135 BNX2FC_IO_DBG(io_req
, "ELS timed out\n");
136 if (test_and_set_bit(BNX2FC_FLAG_ELS_DONE
,
140 /* Indicate the cb_func that this ELS is timed out */
141 set_bit(BNX2FC_FLAG_ELS_TIMEOUT
, &io_req
->req_flags
);
143 if ((io_req
->cb_func
) && (io_req
->cb_arg
)) {
144 io_req
->cb_func(io_req
->cb_arg
);
145 io_req
->cb_arg
= NULL
;
150 printk(KERN_ERR PFX
"cmd_timeout: invalid cmd_type %d\n",
156 /* release the cmd that was held when timer was set */
157 kref_put(&io_req
->refcount
, bnx2fc_cmd_release
);
158 spin_unlock_bh(&tgt
->tgt_lock
);
161 static void bnx2fc_scsi_done(struct bnx2fc_cmd
*io_req
, int err_code
)
163 /* Called with host lock held */
164 struct scsi_cmnd
*sc_cmd
= io_req
->sc_cmd
;
167 * active_cmd_queue may have other command types as well,
168 * and during flush operation, we want to error back only
171 if (io_req
->cmd_type
!= BNX2FC_SCSI_CMD
)
174 BNX2FC_IO_DBG(io_req
, "scsi_done. err_code = 0x%x\n", err_code
);
175 if (test_bit(BNX2FC_FLAG_CMD_LOST
, &io_req
->req_flags
)) {
176 /* Do not call scsi done for this IO */
180 bnx2fc_unmap_sg_list(io_req
);
181 io_req
->sc_cmd
= NULL
;
183 /* Sanity checks before returning command to mid-layer */
185 printk(KERN_ERR PFX
"scsi_done - sc_cmd NULL. "
186 "IO(0x%x) already cleaned up\n",
190 if (!sc_cmd
->device
) {
191 pr_err(PFX
"0x%x: sc_cmd->device is NULL.\n", io_req
->xid
);
194 if (!sc_cmd
->device
->host
) {
195 pr_err(PFX
"0x%x: sc_cmd->device->host is NULL.\n",
200 sc_cmd
->result
= err_code
<< 16;
202 BNX2FC_IO_DBG(io_req
, "sc=%p, result=0x%x, retries=%d, allowed=%d\n",
203 sc_cmd
, host_byte(sc_cmd
->result
), sc_cmd
->retries
,
205 scsi_set_resid(sc_cmd
, scsi_bufflen(sc_cmd
));
206 sc_cmd
->SCp
.ptr
= NULL
;
207 sc_cmd
->scsi_done(sc_cmd
);
210 struct bnx2fc_cmd_mgr
*bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba
*hba
)
212 struct bnx2fc_cmd_mgr
*cmgr
;
213 struct io_bdt
*bdt_info
;
214 struct bnx2fc_cmd
*io_req
;
219 int num_ios
, num_pri_ios
;
221 int arr_sz
= num_possible_cpus() + 1;
222 u16 min_xid
= BNX2FC_MIN_XID
;
223 u16 max_xid
= hba
->max_xid
;
225 if (max_xid
<= min_xid
|| max_xid
== FC_XID_UNKNOWN
) {
226 printk(KERN_ERR PFX
"cmd_mgr_alloc: Invalid min_xid 0x%x \
227 and max_xid 0x%x\n", min_xid
, max_xid
);
230 BNX2FC_MISC_DBG("min xid 0x%x, max xid 0x%x\n", min_xid
, max_xid
);
232 num_ios
= max_xid
- min_xid
+ 1;
233 len
= (num_ios
* (sizeof(struct bnx2fc_cmd
*)));
234 len
+= sizeof(struct bnx2fc_cmd_mgr
);
236 cmgr
= kzalloc(len
, GFP_KERNEL
);
238 printk(KERN_ERR PFX
"failed to alloc cmgr\n");
242 cmgr
->free_list
= kzalloc(sizeof(*cmgr
->free_list
) *
244 if (!cmgr
->free_list
) {
245 printk(KERN_ERR PFX
"failed to alloc free_list\n");
249 cmgr
->free_list_lock
= kzalloc(sizeof(*cmgr
->free_list_lock
) *
251 if (!cmgr
->free_list_lock
) {
252 printk(KERN_ERR PFX
"failed to alloc free_list_lock\n");
253 kfree(cmgr
->free_list
);
254 cmgr
->free_list
= NULL
;
259 cmgr
->cmds
= (struct bnx2fc_cmd
**)(cmgr
+ 1);
261 for (i
= 0; i
< arr_sz
; i
++) {
262 INIT_LIST_HEAD(&cmgr
->free_list
[i
]);
263 spin_lock_init(&cmgr
->free_list_lock
[i
]);
267 * Pre-allocated pool of bnx2fc_cmds.
268 * Last entry in the free list array is the free list
269 * of slow path requests.
271 xid
= BNX2FC_MIN_XID
;
272 num_pri_ios
= num_ios
- hba
->elstm_xids
;
273 for (i
= 0; i
< num_ios
; i
++) {
274 io_req
= kzalloc(sizeof(*io_req
), GFP_KERNEL
);
277 printk(KERN_ERR PFX
"failed to alloc io_req\n");
281 INIT_LIST_HEAD(&io_req
->link
);
282 INIT_DELAYED_WORK(&io_req
->timeout_work
, bnx2fc_cmd_timeout
);
286 list_add_tail(&io_req
->link
,
287 &cmgr
->free_list
[io_req
->xid
%
288 num_possible_cpus()]);
290 list_add_tail(&io_req
->link
,
291 &cmgr
->free_list
[num_possible_cpus()]);
295 /* Allocate pool of io_bdts - one for each bnx2fc_cmd */
296 mem_size
= num_ios
* sizeof(struct io_bdt
*);
297 cmgr
->io_bdt_pool
= kmalloc(mem_size
, GFP_KERNEL
);
298 if (!cmgr
->io_bdt_pool
) {
299 printk(KERN_ERR PFX
"failed to alloc io_bdt_pool\n");
303 mem_size
= sizeof(struct io_bdt
);
304 for (i
= 0; i
< num_ios
; i
++) {
305 cmgr
->io_bdt_pool
[i
] = kmalloc(mem_size
, GFP_KERNEL
);
306 if (!cmgr
->io_bdt_pool
[i
]) {
307 printk(KERN_ERR PFX
"failed to alloc "
308 "io_bdt_pool[%d]\n", i
);
313 /* Allocate an map fcoe_bdt_ctx structures */
314 bd_tbl_sz
= BNX2FC_MAX_BDS_PER_CMD
* sizeof(struct fcoe_bd_ctx
);
315 for (i
= 0; i
< num_ios
; i
++) {
316 bdt_info
= cmgr
->io_bdt_pool
[i
];
317 bdt_info
->bd_tbl
= dma_alloc_coherent(&hba
->pcidev
->dev
,
319 &bdt_info
->bd_tbl_dma
,
321 if (!bdt_info
->bd_tbl
) {
322 printk(KERN_ERR PFX
"failed to alloc "
331 bnx2fc_cmd_mgr_free(cmgr
);
335 void bnx2fc_cmd_mgr_free(struct bnx2fc_cmd_mgr
*cmgr
)
337 struct io_bdt
*bdt_info
;
338 struct bnx2fc_hba
*hba
= cmgr
->hba
;
340 u16 min_xid
= BNX2FC_MIN_XID
;
341 u16 max_xid
= hba
->max_xid
;
345 num_ios
= max_xid
- min_xid
+ 1;
347 /* Free fcoe_bdt_ctx structures */
348 if (!cmgr
->io_bdt_pool
)
351 bd_tbl_sz
= BNX2FC_MAX_BDS_PER_CMD
* sizeof(struct fcoe_bd_ctx
);
352 for (i
= 0; i
< num_ios
; i
++) {
353 bdt_info
= cmgr
->io_bdt_pool
[i
];
354 if (bdt_info
->bd_tbl
) {
355 dma_free_coherent(&hba
->pcidev
->dev
, bd_tbl_sz
,
357 bdt_info
->bd_tbl_dma
);
358 bdt_info
->bd_tbl
= NULL
;
362 /* Destroy io_bdt pool */
363 for (i
= 0; i
< num_ios
; i
++) {
364 kfree(cmgr
->io_bdt_pool
[i
]);
365 cmgr
->io_bdt_pool
[i
] = NULL
;
368 kfree(cmgr
->io_bdt_pool
);
369 cmgr
->io_bdt_pool
= NULL
;
372 kfree(cmgr
->free_list_lock
);
374 /* Destroy cmd pool */
375 if (!cmgr
->free_list
)
378 for (i
= 0; i
< num_possible_cpus() + 1; i
++) {
379 struct bnx2fc_cmd
*tmp
, *io_req
;
381 list_for_each_entry_safe(io_req
, tmp
,
382 &cmgr
->free_list
[i
], link
) {
383 list_del(&io_req
->link
);
387 kfree(cmgr
->free_list
);
389 /* Free command manager itself */
393 struct bnx2fc_cmd
*bnx2fc_elstm_alloc(struct bnx2fc_rport
*tgt
, int type
)
395 struct fcoe_port
*port
= tgt
->port
;
396 struct bnx2fc_interface
*interface
= port
->priv
;
397 struct bnx2fc_cmd_mgr
*cmd_mgr
= interface
->hba
->cmd_mgr
;
398 struct bnx2fc_cmd
*io_req
;
399 struct list_head
*listp
;
400 struct io_bdt
*bd_tbl
;
401 int index
= RESERVE_FREE_LIST_INDEX
;
406 max_sqes
= tgt
->max_sqes
;
408 case BNX2FC_TASK_MGMT_CMD
:
409 max_sqes
= BNX2FC_TM_MAX_SQES
;
412 max_sqes
= BNX2FC_ELS_MAX_SQES
;
419 * NOTE: Free list insertions and deletions are protected with
422 spin_lock_bh(&cmd_mgr
->free_list_lock
[index
]);
423 free_sqes
= atomic_read(&tgt
->free_sqes
);
424 if ((list_empty(&(cmd_mgr
->free_list
[index
]))) ||
425 (tgt
->num_active_ios
.counter
>= max_sqes
) ||
426 (free_sqes
+ max_sqes
<= BNX2FC_SQ_WQES_MAX
)) {
427 BNX2FC_TGT_DBG(tgt
, "No free els_tm cmds available "
428 "ios(%d):sqes(%d)\n",
429 tgt
->num_active_ios
.counter
, tgt
->max_sqes
);
430 if (list_empty(&(cmd_mgr
->free_list
[index
])))
431 printk(KERN_ERR PFX
"elstm_alloc: list_empty\n");
432 spin_unlock_bh(&cmd_mgr
->free_list_lock
[index
]);
436 listp
= (struct list_head
*)
437 cmd_mgr
->free_list
[index
].next
;
438 list_del_init(listp
);
439 io_req
= (struct bnx2fc_cmd
*) listp
;
441 cmd_mgr
->cmds
[xid
] = io_req
;
442 atomic_inc(&tgt
->num_active_ios
);
443 atomic_dec(&tgt
->free_sqes
);
444 spin_unlock_bh(&cmd_mgr
->free_list_lock
[index
]);
446 INIT_LIST_HEAD(&io_req
->link
);
449 io_req
->cmd_mgr
= cmd_mgr
;
450 io_req
->req_flags
= 0;
451 io_req
->cmd_type
= type
;
453 /* Bind io_bdt for this io_req */
454 /* Have a static link between io_req and io_bdt_pool */
455 bd_tbl
= io_req
->bd_tbl
= cmd_mgr
->io_bdt_pool
[xid
];
456 bd_tbl
->io_req
= io_req
;
458 /* Hold the io_req against deletion */
459 kref_init(&io_req
->refcount
);
463 struct bnx2fc_cmd
*bnx2fc_cmd_alloc(struct bnx2fc_rport
*tgt
)
465 struct fcoe_port
*port
= tgt
->port
;
466 struct bnx2fc_interface
*interface
= port
->priv
;
467 struct bnx2fc_cmd_mgr
*cmd_mgr
= interface
->hba
->cmd_mgr
;
468 struct bnx2fc_cmd
*io_req
;
469 struct list_head
*listp
;
470 struct io_bdt
*bd_tbl
;
474 int index
= get_cpu();
476 max_sqes
= BNX2FC_SCSI_MAX_SQES
;
478 * NOTE: Free list insertions and deletions are protected with
481 spin_lock_bh(&cmd_mgr
->free_list_lock
[index
]);
482 free_sqes
= atomic_read(&tgt
->free_sqes
);
483 if ((list_empty(&cmd_mgr
->free_list
[index
])) ||
484 (tgt
->num_active_ios
.counter
>= max_sqes
) ||
485 (free_sqes
+ max_sqes
<= BNX2FC_SQ_WQES_MAX
)) {
486 spin_unlock_bh(&cmd_mgr
->free_list_lock
[index
]);
491 listp
= (struct list_head
*)
492 cmd_mgr
->free_list
[index
].next
;
493 list_del_init(listp
);
494 io_req
= (struct bnx2fc_cmd
*) listp
;
496 cmd_mgr
->cmds
[xid
] = io_req
;
497 atomic_inc(&tgt
->num_active_ios
);
498 atomic_dec(&tgt
->free_sqes
);
499 spin_unlock_bh(&cmd_mgr
->free_list_lock
[index
]);
502 INIT_LIST_HEAD(&io_req
->link
);
505 io_req
->cmd_mgr
= cmd_mgr
;
506 io_req
->req_flags
= 0;
508 /* Bind io_bdt for this io_req */
509 /* Have a static link between io_req and io_bdt_pool */
510 bd_tbl
= io_req
->bd_tbl
= cmd_mgr
->io_bdt_pool
[xid
];
511 bd_tbl
->io_req
= io_req
;
513 /* Hold the io_req against deletion */
514 kref_init(&io_req
->refcount
);
518 void bnx2fc_cmd_release(struct kref
*ref
)
520 struct bnx2fc_cmd
*io_req
= container_of(ref
,
521 struct bnx2fc_cmd
, refcount
);
522 struct bnx2fc_cmd_mgr
*cmd_mgr
= io_req
->cmd_mgr
;
525 if (io_req
->cmd_type
== BNX2FC_SCSI_CMD
)
526 index
= io_req
->xid
% num_possible_cpus();
528 index
= RESERVE_FREE_LIST_INDEX
;
531 spin_lock_bh(&cmd_mgr
->free_list_lock
[index
]);
532 if (io_req
->cmd_type
!= BNX2FC_SCSI_CMD
)
533 bnx2fc_free_mp_resc(io_req
);
534 cmd_mgr
->cmds
[io_req
->xid
] = NULL
;
535 /* Delete IO from retire queue */
536 list_del_init(&io_req
->link
);
537 /* Add it to the free list */
538 list_add(&io_req
->link
,
539 &cmd_mgr
->free_list
[index
]);
540 atomic_dec(&io_req
->tgt
->num_active_ios
);
541 spin_unlock_bh(&cmd_mgr
->free_list_lock
[index
]);
545 static void bnx2fc_free_mp_resc(struct bnx2fc_cmd
*io_req
)
547 struct bnx2fc_mp_req
*mp_req
= &(io_req
->mp_req
);
548 struct bnx2fc_interface
*interface
= io_req
->port
->priv
;
549 struct bnx2fc_hba
*hba
= interface
->hba
;
550 size_t sz
= sizeof(struct fcoe_bd_ctx
);
553 mp_req
->tm_flags
= 0;
554 if (mp_req
->mp_req_bd
) {
555 dma_free_coherent(&hba
->pcidev
->dev
, sz
,
557 mp_req
->mp_req_bd_dma
);
558 mp_req
->mp_req_bd
= NULL
;
560 if (mp_req
->mp_resp_bd
) {
561 dma_free_coherent(&hba
->pcidev
->dev
, sz
,
563 mp_req
->mp_resp_bd_dma
);
564 mp_req
->mp_resp_bd
= NULL
;
566 if (mp_req
->req_buf
) {
567 dma_free_coherent(&hba
->pcidev
->dev
, CNIC_PAGE_SIZE
,
569 mp_req
->req_buf_dma
);
570 mp_req
->req_buf
= NULL
;
572 if (mp_req
->resp_buf
) {
573 dma_free_coherent(&hba
->pcidev
->dev
, CNIC_PAGE_SIZE
,
575 mp_req
->resp_buf_dma
);
576 mp_req
->resp_buf
= NULL
;
580 int bnx2fc_init_mp_req(struct bnx2fc_cmd
*io_req
)
582 struct bnx2fc_mp_req
*mp_req
;
583 struct fcoe_bd_ctx
*mp_req_bd
;
584 struct fcoe_bd_ctx
*mp_resp_bd
;
585 struct bnx2fc_interface
*interface
= io_req
->port
->priv
;
586 struct bnx2fc_hba
*hba
= interface
->hba
;
590 mp_req
= (struct bnx2fc_mp_req
*)&(io_req
->mp_req
);
591 memset(mp_req
, 0, sizeof(struct bnx2fc_mp_req
));
593 if (io_req
->cmd_type
!= BNX2FC_ELS
) {
594 mp_req
->req_len
= sizeof(struct fcp_cmnd
);
595 io_req
->data_xfer_len
= mp_req
->req_len
;
597 mp_req
->req_len
= io_req
->data_xfer_len
;
599 mp_req
->req_buf
= dma_alloc_coherent(&hba
->pcidev
->dev
, CNIC_PAGE_SIZE
,
600 &mp_req
->req_buf_dma
,
602 if (!mp_req
->req_buf
) {
603 printk(KERN_ERR PFX
"unable to alloc MP req buffer\n");
604 bnx2fc_free_mp_resc(io_req
);
608 mp_req
->resp_buf
= dma_alloc_coherent(&hba
->pcidev
->dev
, CNIC_PAGE_SIZE
,
609 &mp_req
->resp_buf_dma
,
611 if (!mp_req
->resp_buf
) {
612 printk(KERN_ERR PFX
"unable to alloc TM resp buffer\n");
613 bnx2fc_free_mp_resc(io_req
);
616 memset(mp_req
->req_buf
, 0, CNIC_PAGE_SIZE
);
617 memset(mp_req
->resp_buf
, 0, CNIC_PAGE_SIZE
);
619 /* Allocate and map mp_req_bd and mp_resp_bd */
620 sz
= sizeof(struct fcoe_bd_ctx
);
621 mp_req
->mp_req_bd
= dma_alloc_coherent(&hba
->pcidev
->dev
, sz
,
622 &mp_req
->mp_req_bd_dma
,
624 if (!mp_req
->mp_req_bd
) {
625 printk(KERN_ERR PFX
"unable to alloc MP req bd\n");
626 bnx2fc_free_mp_resc(io_req
);
629 mp_req
->mp_resp_bd
= dma_alloc_coherent(&hba
->pcidev
->dev
, sz
,
630 &mp_req
->mp_resp_bd_dma
,
632 if (!mp_req
->mp_resp_bd
) {
633 printk(KERN_ERR PFX
"unable to alloc MP resp bd\n");
634 bnx2fc_free_mp_resc(io_req
);
638 addr
= mp_req
->req_buf_dma
;
639 mp_req_bd
= mp_req
->mp_req_bd
;
640 mp_req_bd
->buf_addr_lo
= (u32
)addr
& 0xffffffff;
641 mp_req_bd
->buf_addr_hi
= (u32
)((u64
)addr
>> 32);
642 mp_req_bd
->buf_len
= CNIC_PAGE_SIZE
;
643 mp_req_bd
->flags
= 0;
646 * MP buffer is either a task mgmt command or an ELS.
647 * So the assumption is that it consumes a single bd
648 * entry in the bd table
650 mp_resp_bd
= mp_req
->mp_resp_bd
;
651 addr
= mp_req
->resp_buf_dma
;
652 mp_resp_bd
->buf_addr_lo
= (u32
)addr
& 0xffffffff;
653 mp_resp_bd
->buf_addr_hi
= (u32
)((u64
)addr
>> 32);
654 mp_resp_bd
->buf_len
= CNIC_PAGE_SIZE
;
655 mp_resp_bd
->flags
= 0;
660 static int bnx2fc_initiate_tmf(struct scsi_cmnd
*sc_cmd
, u8 tm_flags
)
662 struct fc_lport
*lport
;
663 struct fc_rport
*rport
;
664 struct fc_rport_libfc_priv
*rp
;
665 struct fcoe_port
*port
;
666 struct bnx2fc_interface
*interface
;
667 struct bnx2fc_rport
*tgt
;
668 struct bnx2fc_cmd
*io_req
;
669 struct bnx2fc_mp_req
*tm_req
;
670 struct fcoe_task_ctx_entry
*task
;
671 struct fcoe_task_ctx_entry
*task_page
;
672 struct Scsi_Host
*host
= sc_cmd
->device
->host
;
673 struct fc_frame_header
*fc_hdr
;
674 struct fcp_cmnd
*fcp_cmnd
;
679 unsigned long start
= jiffies
;
681 lport
= shost_priv(host
);
682 rport
= starget_to_rport(scsi_target(sc_cmd
->device
));
683 port
= lport_priv(lport
);
684 interface
= port
->priv
;
687 printk(KERN_ERR PFX
"device_reset: rport is NULL\n");
693 rc
= fc_block_scsi_eh(sc_cmd
);
697 if (lport
->state
!= LPORT_ST_READY
|| !(lport
->link_up
)) {
698 printk(KERN_ERR PFX
"device_reset: link is not ready\n");
702 /* rport and tgt are allocated together, so tgt should be non-NULL */
703 tgt
= (struct bnx2fc_rport
*)&rp
[1];
705 if (!(test_bit(BNX2FC_FLAG_SESSION_READY
, &tgt
->flags
))) {
706 printk(KERN_ERR PFX
"device_reset: tgt not offloaded\n");
711 io_req
= bnx2fc_elstm_alloc(tgt
, BNX2FC_TASK_MGMT_CMD
);
713 if (time_after(jiffies
, start
+ HZ
)) {
714 printk(KERN_ERR PFX
"tmf: Failed TMF");
721 /* Initialize rest of io_req fields */
722 io_req
->sc_cmd
= sc_cmd
;
726 tm_req
= (struct bnx2fc_mp_req
*)&(io_req
->mp_req
);
728 rc
= bnx2fc_init_mp_req(io_req
);
730 printk(KERN_ERR PFX
"Task mgmt MP request init failed\n");
731 spin_lock_bh(&tgt
->tgt_lock
);
732 kref_put(&io_req
->refcount
, bnx2fc_cmd_release
);
733 spin_unlock_bh(&tgt
->tgt_lock
);
738 io_req
->io_req_flags
= 0;
739 tm_req
->tm_flags
= tm_flags
;
742 bnx2fc_build_fcp_cmnd(io_req
, (struct fcp_cmnd
*)tm_req
->req_buf
);
743 fcp_cmnd
= (struct fcp_cmnd
*)tm_req
->req_buf
;
744 memset(fcp_cmnd
->fc_cdb
, 0, sc_cmd
->cmd_len
);
748 fc_hdr
= &(tm_req
->req_fc_hdr
);
750 did
= rport
->port_id
;
751 __fc_fill_fc_hdr(fc_hdr
, FC_RCTL_DD_UNSOL_CMD
, did
, sid
,
752 FC_TYPE_FCP
, FC_FC_FIRST_SEQ
| FC_FC_END_SEQ
|
754 /* Obtain exchange id */
757 BNX2FC_TGT_DBG(tgt
, "Initiate TMF - xid = 0x%x\n", xid
);
758 task_idx
= xid
/BNX2FC_TASKS_PER_PAGE
;
759 index
= xid
% BNX2FC_TASKS_PER_PAGE
;
761 /* Initialize task context for this IO request */
762 task_page
= (struct fcoe_task_ctx_entry
*)
763 interface
->hba
->task_ctx
[task_idx
];
764 task
= &(task_page
[index
]);
765 bnx2fc_init_mp_task(io_req
, task
);
767 sc_cmd
->SCp
.ptr
= (char *)io_req
;
769 /* Obtain free SQ entry */
770 spin_lock_bh(&tgt
->tgt_lock
);
771 bnx2fc_add_2_sq(tgt
, xid
);
773 /* Enqueue the io_req to active_tm_queue */
774 io_req
->on_tmf_queue
= 1;
775 list_add_tail(&io_req
->link
, &tgt
->active_tm_queue
);
777 init_completion(&io_req
->tm_done
);
778 io_req
->wait_for_comp
= 1;
781 bnx2fc_ring_doorbell(tgt
);
782 spin_unlock_bh(&tgt
->tgt_lock
);
784 rc
= wait_for_completion_timeout(&io_req
->tm_done
,
785 interface
->tm_timeout
* HZ
);
786 spin_lock_bh(&tgt
->tgt_lock
);
788 io_req
->wait_for_comp
= 0;
789 if (!(test_bit(BNX2FC_FLAG_TM_COMPL
, &io_req
->req_flags
))) {
790 set_bit(BNX2FC_FLAG_TM_TIMEOUT
, &io_req
->req_flags
);
791 if (io_req
->on_tmf_queue
) {
792 list_del_init(&io_req
->link
);
793 io_req
->on_tmf_queue
= 0;
795 io_req
->wait_for_comp
= 1;
796 bnx2fc_initiate_cleanup(io_req
);
797 spin_unlock_bh(&tgt
->tgt_lock
);
798 rc
= wait_for_completion_timeout(&io_req
->tm_done
,
800 spin_lock_bh(&tgt
->tgt_lock
);
801 io_req
->wait_for_comp
= 0;
803 kref_put(&io_req
->refcount
, bnx2fc_cmd_release
);
806 spin_unlock_bh(&tgt
->tgt_lock
);
809 BNX2FC_TGT_DBG(tgt
, "task mgmt command failed...\n");
812 BNX2FC_TGT_DBG(tgt
, "task mgmt command success...\n");
819 int bnx2fc_initiate_abts(struct bnx2fc_cmd
*io_req
)
821 struct fc_lport
*lport
;
822 struct bnx2fc_rport
*tgt
= io_req
->tgt
;
823 struct fc_rport
*rport
= tgt
->rport
;
824 struct fc_rport_priv
*rdata
= tgt
->rdata
;
825 struct bnx2fc_interface
*interface
;
826 struct fcoe_port
*port
;
827 struct bnx2fc_cmd
*abts_io_req
;
828 struct fcoe_task_ctx_entry
*task
;
829 struct fcoe_task_ctx_entry
*task_page
;
830 struct fc_frame_header
*fc_hdr
;
831 struct bnx2fc_mp_req
*abts_req
;
836 u32 r_a_tov
= rdata
->r_a_tov
;
838 /* called with tgt_lock held */
839 BNX2FC_IO_DBG(io_req
, "Entered bnx2fc_initiate_abts\n");
842 interface
= port
->priv
;
845 if (!test_bit(BNX2FC_FLAG_SESSION_READY
, &tgt
->flags
)) {
846 printk(KERN_ERR PFX
"initiate_abts: tgt not offloaded\n");
852 printk(KERN_ERR PFX
"initiate_abts: rport is NULL\n");
857 if (lport
->state
!= LPORT_ST_READY
|| !(lport
->link_up
)) {
858 printk(KERN_ERR PFX
"initiate_abts: link is not ready\n");
863 abts_io_req
= bnx2fc_elstm_alloc(tgt
, BNX2FC_ABTS
);
865 printk(KERN_ERR PFX
"abts: couldnt allocate cmd\n");
870 /* Initialize rest of io_req fields */
871 abts_io_req
->sc_cmd
= NULL
;
872 abts_io_req
->port
= port
;
873 abts_io_req
->tgt
= tgt
;
874 abts_io_req
->data_xfer_len
= 0; /* No data transfer for ABTS */
876 abts_req
= (struct bnx2fc_mp_req
*)&(abts_io_req
->mp_req
);
877 memset(abts_req
, 0, sizeof(struct bnx2fc_mp_req
));
880 fc_hdr
= &(abts_req
->req_fc_hdr
);
882 /* Obtain oxid and rxid for the original exchange to be aborted */
883 fc_hdr
->fh_ox_id
= htons(io_req
->xid
);
884 fc_hdr
->fh_rx_id
= htons(io_req
->task
->rxwr_txrd
.var_ctx
.rx_id
);
887 did
= rport
->port_id
;
889 __fc_fill_fc_hdr(fc_hdr
, FC_RCTL_BA_ABTS
, did
, sid
,
890 FC_TYPE_BLS
, FC_FC_FIRST_SEQ
| FC_FC_END_SEQ
|
893 xid
= abts_io_req
->xid
;
894 BNX2FC_IO_DBG(abts_io_req
, "ABTS io_req\n");
895 task_idx
= xid
/BNX2FC_TASKS_PER_PAGE
;
896 index
= xid
% BNX2FC_TASKS_PER_PAGE
;
898 /* Initialize task context for this IO request */
899 task_page
= (struct fcoe_task_ctx_entry
*)
900 interface
->hba
->task_ctx
[task_idx
];
901 task
= &(task_page
[index
]);
902 bnx2fc_init_mp_task(abts_io_req
, task
);
905 * ABTS task is a temporary task that will be cleaned up
906 * irrespective of ABTS response. We need to start the timer
907 * for the original exchange, as the CQE is posted for the original
910 * Timer for ABTS is started only when it is originated by a
911 * TM request. For the ABTS issued as part of ULP timeout,
912 * scsi-ml maintains the timers.
915 /* if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags))*/
916 bnx2fc_cmd_timer_set(io_req
, 2 * r_a_tov
);
918 /* Obtain free SQ entry */
919 bnx2fc_add_2_sq(tgt
, xid
);
922 bnx2fc_ring_doorbell(tgt
);
928 int bnx2fc_initiate_seq_cleanup(struct bnx2fc_cmd
*orig_io_req
, u32 offset
,
931 struct fc_lport
*lport
;
932 struct bnx2fc_rport
*tgt
= orig_io_req
->tgt
;
933 struct bnx2fc_interface
*interface
;
934 struct fcoe_port
*port
;
935 struct bnx2fc_cmd
*seq_clnp_req
;
936 struct fcoe_task_ctx_entry
*task
;
937 struct fcoe_task_ctx_entry
*task_page
;
938 struct bnx2fc_els_cb_arg
*cb_arg
= NULL
;
943 BNX2FC_IO_DBG(orig_io_req
, "bnx2fc_initiate_seq_cleanup xid = 0x%x\n",
945 kref_get(&orig_io_req
->refcount
);
947 port
= orig_io_req
->port
;
948 interface
= port
->priv
;
951 cb_arg
= kzalloc(sizeof(struct bnx2fc_els_cb_arg
), GFP_ATOMIC
);
953 printk(KERN_ERR PFX
"Unable to alloc cb_arg for seq clnup\n");
958 seq_clnp_req
= bnx2fc_elstm_alloc(tgt
, BNX2FC_SEQ_CLEANUP
);
960 printk(KERN_ERR PFX
"cleanup: couldnt allocate cmd\n");
965 /* Initialize rest of io_req fields */
966 seq_clnp_req
->sc_cmd
= NULL
;
967 seq_clnp_req
->port
= port
;
968 seq_clnp_req
->tgt
= tgt
;
969 seq_clnp_req
->data_xfer_len
= 0; /* No data transfer for cleanup */
971 xid
= seq_clnp_req
->xid
;
973 task_idx
= xid
/BNX2FC_TASKS_PER_PAGE
;
974 index
= xid
% BNX2FC_TASKS_PER_PAGE
;
976 /* Initialize task context for this IO request */
977 task_page
= (struct fcoe_task_ctx_entry
*)
978 interface
->hba
->task_ctx
[task_idx
];
979 task
= &(task_page
[index
]);
980 cb_arg
->aborted_io_req
= orig_io_req
;
981 cb_arg
->io_req
= seq_clnp_req
;
982 cb_arg
->r_ctl
= r_ctl
;
983 cb_arg
->offset
= offset
;
984 seq_clnp_req
->cb_arg
= cb_arg
;
986 printk(KERN_ERR PFX
"call init_seq_cleanup_task\n");
987 bnx2fc_init_seq_cleanup_task(seq_clnp_req
, task
, orig_io_req
, offset
);
989 /* Obtain free SQ entry */
990 bnx2fc_add_2_sq(tgt
, xid
);
993 bnx2fc_ring_doorbell(tgt
);
998 int bnx2fc_initiate_cleanup(struct bnx2fc_cmd
*io_req
)
1000 struct fc_lport
*lport
;
1001 struct bnx2fc_rport
*tgt
= io_req
->tgt
;
1002 struct bnx2fc_interface
*interface
;
1003 struct fcoe_port
*port
;
1004 struct bnx2fc_cmd
*cleanup_io_req
;
1005 struct fcoe_task_ctx_entry
*task
;
1006 struct fcoe_task_ctx_entry
*task_page
;
1007 int task_idx
, index
;
1011 /* ASSUMPTION: called with tgt_lock held */
1012 BNX2FC_IO_DBG(io_req
, "Entered bnx2fc_initiate_cleanup\n");
1014 port
= io_req
->port
;
1015 interface
= port
->priv
;
1016 lport
= port
->lport
;
1018 cleanup_io_req
= bnx2fc_elstm_alloc(tgt
, BNX2FC_CLEANUP
);
1019 if (!cleanup_io_req
) {
1020 printk(KERN_ERR PFX
"cleanup: couldnt allocate cmd\n");
1025 /* Initialize rest of io_req fields */
1026 cleanup_io_req
->sc_cmd
= NULL
;
1027 cleanup_io_req
->port
= port
;
1028 cleanup_io_req
->tgt
= tgt
;
1029 cleanup_io_req
->data_xfer_len
= 0; /* No data transfer for cleanup */
1031 xid
= cleanup_io_req
->xid
;
1033 task_idx
= xid
/BNX2FC_TASKS_PER_PAGE
;
1034 index
= xid
% BNX2FC_TASKS_PER_PAGE
;
1036 /* Initialize task context for this IO request */
1037 task_page
= (struct fcoe_task_ctx_entry
*)
1038 interface
->hba
->task_ctx
[task_idx
];
1039 task
= &(task_page
[index
]);
1040 orig_xid
= io_req
->xid
;
1042 BNX2FC_IO_DBG(io_req
, "CLEANUP io_req xid = 0x%x\n", xid
);
1044 bnx2fc_init_cleanup_task(cleanup_io_req
, task
, orig_xid
);
1046 /* Obtain free SQ entry */
1047 bnx2fc_add_2_sq(tgt
, xid
);
1050 bnx2fc_ring_doorbell(tgt
);
1057 * bnx2fc_eh_target_reset: Reset a target
1059 * @sc_cmd: SCSI command
1061 * Set from SCSI host template to send task mgmt command to the target
1062 * and wait for the response
1064 int bnx2fc_eh_target_reset(struct scsi_cmnd
*sc_cmd
)
1066 return bnx2fc_initiate_tmf(sc_cmd
, FCP_TMF_TGT_RESET
);
1070 * bnx2fc_eh_device_reset - Reset a single LUN
1072 * @sc_cmd: SCSI command
1074 * Set from SCSI host template to send task mgmt command to the target
1075 * and wait for the response
1077 int bnx2fc_eh_device_reset(struct scsi_cmnd
*sc_cmd
)
1079 return bnx2fc_initiate_tmf(sc_cmd
, FCP_TMF_LUN_RESET
);
1082 int bnx2fc_abts_cleanup(struct bnx2fc_cmd
*io_req
)
1084 struct bnx2fc_rport
*tgt
= io_req
->tgt
;
1087 io_req
->wait_for_comp
= 1;
1088 bnx2fc_initiate_cleanup(io_req
);
1090 spin_unlock_bh(&tgt
->tgt_lock
);
1092 wait_for_completion(&io_req
->tm_done
);
1094 io_req
->wait_for_comp
= 0;
1096 * release the reference taken in eh_abort to allow the
1097 * target to re-login after flushing IOs
1099 kref_put(&io_req
->refcount
, bnx2fc_cmd_release
);
1101 spin_lock_bh(&tgt
->tgt_lock
);
1105 * bnx2fc_eh_abort - eh_abort_handler api to abort an outstanding
1108 * @sc_cmd: SCSI_ML command pointer
1110 * SCSI abort request handler
1112 int bnx2fc_eh_abort(struct scsi_cmnd
*sc_cmd
)
1114 struct fc_rport
*rport
= starget_to_rport(scsi_target(sc_cmd
->device
));
1115 struct fc_rport_libfc_priv
*rp
= rport
->dd_data
;
1116 struct bnx2fc_cmd
*io_req
;
1117 struct fc_lport
*lport
;
1118 struct bnx2fc_rport
*tgt
;
1121 rc
= fc_block_scsi_eh(sc_cmd
);
1125 lport
= shost_priv(sc_cmd
->device
->host
);
1126 if ((lport
->state
!= LPORT_ST_READY
) || !(lport
->link_up
)) {
1127 printk(KERN_ERR PFX
"eh_abort: link not ready\n");
1131 tgt
= (struct bnx2fc_rport
*)&rp
[1];
1133 BNX2FC_TGT_DBG(tgt
, "Entered bnx2fc_eh_abort\n");
1135 spin_lock_bh(&tgt
->tgt_lock
);
1136 io_req
= (struct bnx2fc_cmd
*)sc_cmd
->SCp
.ptr
;
1138 /* Command might have just completed */
1139 printk(KERN_ERR PFX
"eh_abort: io_req is NULL\n");
1140 spin_unlock_bh(&tgt
->tgt_lock
);
1143 BNX2FC_IO_DBG(io_req
, "eh_abort - refcnt = %d\n",
1144 io_req
->refcount
.refcount
.counter
);
1146 /* Hold IO request across abort processing */
1147 kref_get(&io_req
->refcount
);
1149 BUG_ON(tgt
!= io_req
->tgt
);
1151 /* Remove the io_req from the active_q. */
1153 * Task Mgmt functions (LUN RESET & TGT RESET) will not
1154 * issue an ABTS on this particular IO req, as the
1155 * io_req is no longer in the active_q.
1157 if (tgt
->flush_in_prog
) {
1158 printk(KERN_ERR PFX
"eh_abort: io_req (xid = 0x%x) "
1159 "flush in progress\n", io_req
->xid
);
1160 kref_put(&io_req
->refcount
, bnx2fc_cmd_release
);
1161 spin_unlock_bh(&tgt
->tgt_lock
);
1165 if (io_req
->on_active_queue
== 0) {
1166 printk(KERN_ERR PFX
"eh_abort: io_req (xid = 0x%x) "
1167 "not on active_q\n", io_req
->xid
);
1169 * This condition can happen only due to the FW bug,
1170 * where we do not receive cleanup response from
1171 * the FW. Handle this case gracefully by erroring
1172 * back the IO request to SCSI-ml
1174 bnx2fc_scsi_done(io_req
, DID_ABORT
);
1176 kref_put(&io_req
->refcount
, bnx2fc_cmd_release
);
1177 spin_unlock_bh(&tgt
->tgt_lock
);
1182 * Only eh_abort processing will remove the IO from
1183 * active_cmd_q before processing the request. this is
1184 * done to avoid race conditions between IOs aborted
1185 * as part of task management completion and eh_abort
1188 list_del_init(&io_req
->link
);
1189 io_req
->on_active_queue
= 0;
1190 /* Move IO req to retire queue */
1191 list_add_tail(&io_req
->link
, &tgt
->io_retire_queue
);
1193 init_completion(&io_req
->tm_done
);
1195 if (test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS
, &io_req
->req_flags
)) {
1196 printk(KERN_ERR PFX
"eh_abort: io_req (xid = 0x%x) "
1197 "already in abts processing\n", io_req
->xid
);
1198 if (cancel_delayed_work(&io_req
->timeout_work
))
1199 kref_put(&io_req
->refcount
,
1200 bnx2fc_cmd_release
); /* drop timer hold */
1201 rc
= bnx2fc_abts_cleanup(io_req
);
1202 /* This only occurs when an task abort was requested while ABTS
1203 is in progress. Setting the IO_CLEANUP flag will skip the
1204 RRQ process in the case when the fw generated SCSI_CMD cmpl
1205 was a result from the ABTS request rather than the CLEANUP
1207 set_bit(BNX2FC_FLAG_IO_CLEANUP
, &io_req
->req_flags
);
1211 /* Cancel the current timer running on this io_req */
1212 if (cancel_delayed_work(&io_req
->timeout_work
))
1213 kref_put(&io_req
->refcount
,
1214 bnx2fc_cmd_release
); /* drop timer hold */
1215 set_bit(BNX2FC_FLAG_EH_ABORT
, &io_req
->req_flags
);
1216 io_req
->wait_for_comp
= 1;
1217 rc
= bnx2fc_initiate_abts(io_req
);
1219 bnx2fc_initiate_cleanup(io_req
);
1220 spin_unlock_bh(&tgt
->tgt_lock
);
1221 wait_for_completion(&io_req
->tm_done
);
1222 spin_lock_bh(&tgt
->tgt_lock
);
1223 io_req
->wait_for_comp
= 0;
1226 spin_unlock_bh(&tgt
->tgt_lock
);
1228 wait_for_completion(&io_req
->tm_done
);
1230 spin_lock_bh(&tgt
->tgt_lock
);
1231 io_req
->wait_for_comp
= 0;
1232 if (test_bit(BNX2FC_FLAG_IO_COMPL
, &io_req
->req_flags
)) {
1233 BNX2FC_IO_DBG(io_req
, "IO completed in a different context\n");
1235 } else if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE
,
1236 &io_req
->req_flags
))) {
1237 /* Let the scsi-ml try to recover this command */
1238 printk(KERN_ERR PFX
"abort failed, xid = 0x%x\n",
1240 rc
= bnx2fc_abts_cleanup(io_req
);
1244 * We come here even when there was a race condition
1245 * between timeout and abts completion, and abts
1246 * completion happens just in time.
1248 BNX2FC_IO_DBG(io_req
, "abort succeeded\n");
1250 bnx2fc_scsi_done(io_req
, DID_ABORT
);
1251 kref_put(&io_req
->refcount
, bnx2fc_cmd_release
);
1254 /* release the reference taken in eh_abort */
1255 kref_put(&io_req
->refcount
, bnx2fc_cmd_release
);
1257 spin_unlock_bh(&tgt
->tgt_lock
);
1261 void bnx2fc_process_seq_cleanup_compl(struct bnx2fc_cmd
*seq_clnp_req
,
1262 struct fcoe_task_ctx_entry
*task
,
1265 struct bnx2fc_els_cb_arg
*cb_arg
= seq_clnp_req
->cb_arg
;
1266 struct bnx2fc_cmd
*orig_io_req
= cb_arg
->aborted_io_req
;
1267 u32 offset
= cb_arg
->offset
;
1268 enum fc_rctl r_ctl
= cb_arg
->r_ctl
;
1270 struct bnx2fc_rport
*tgt
= orig_io_req
->tgt
;
1272 BNX2FC_IO_DBG(orig_io_req
, "Entered process_cleanup_compl xid = 0x%x"
1274 seq_clnp_req
->xid
, seq_clnp_req
->cmd_type
);
1276 if (rx_state
== FCOE_TASK_RX_STATE_IGNORED_SEQUENCE_CLEANUP
) {
1277 printk(KERN_ERR PFX
"seq cleanup ignored - xid = 0x%x\n",
1282 spin_unlock_bh(&tgt
->tgt_lock
);
1283 rc
= bnx2fc_send_srr(orig_io_req
, offset
, r_ctl
);
1284 spin_lock_bh(&tgt
->tgt_lock
);
1287 printk(KERN_ERR PFX
"clnup_compl: Unable to send SRR"
1288 " IO will abort\n");
1289 seq_clnp_req
->cb_arg
= NULL
;
1290 kref_put(&orig_io_req
->refcount
, bnx2fc_cmd_release
);
1296 void bnx2fc_process_cleanup_compl(struct bnx2fc_cmd
*io_req
,
1297 struct fcoe_task_ctx_entry
*task
,
1300 BNX2FC_IO_DBG(io_req
, "Entered process_cleanup_compl "
1301 "refcnt = %d, cmd_type = %d\n",
1302 io_req
->refcount
.refcount
.counter
, io_req
->cmd_type
);
1303 bnx2fc_scsi_done(io_req
, DID_ERROR
);
1304 kref_put(&io_req
->refcount
, bnx2fc_cmd_release
);
1305 if (io_req
->wait_for_comp
)
1306 complete(&io_req
->tm_done
);
1309 void bnx2fc_process_abts_compl(struct bnx2fc_cmd
*io_req
,
1310 struct fcoe_task_ctx_entry
*task
,
1314 u32 r_a_tov
= FC_DEF_R_A_TOV
;
1316 struct bnx2fc_rport
*tgt
= io_req
->tgt
;
1318 BNX2FC_IO_DBG(io_req
, "Entered process_abts_compl xid = 0x%x"
1319 "refcnt = %d, cmd_type = %d\n",
1321 io_req
->refcount
.refcount
.counter
, io_req
->cmd_type
);
1323 if (test_and_set_bit(BNX2FC_FLAG_ABTS_DONE
,
1324 &io_req
->req_flags
)) {
1325 BNX2FC_IO_DBG(io_req
, "Timer context finished processing"
1330 /* Do not issue RRQ as this IO is already cleanedup */
1331 if (test_and_set_bit(BNX2FC_FLAG_IO_CLEANUP
,
1332 &io_req
->req_flags
))
1336 * For ABTS issued due to SCSI eh_abort_handler, timeout
1337 * values are maintained by scsi-ml itself. Cancel timeout
1338 * in case ABTS issued as part of task management function
1339 * or due to FW error.
1341 if (test_bit(BNX2FC_FLAG_ISSUE_ABTS
, &io_req
->req_flags
))
1342 if (cancel_delayed_work(&io_req
->timeout_work
))
1343 kref_put(&io_req
->refcount
,
1344 bnx2fc_cmd_release
); /* drop timer hold */
1346 r_ctl
= (u8
)task
->rxwr_only
.union_ctx
.comp_info
.abts_rsp
.r_ctl
;
1349 case FC_RCTL_BA_ACC
:
1351 * Dont release this cmd yet. It will be relesed
1352 * after we get RRQ response
1354 BNX2FC_IO_DBG(io_req
, "ABTS response - ACC Send RRQ\n");
1358 case FC_RCTL_BA_RJT
:
1359 BNX2FC_IO_DBG(io_req
, "ABTS response - RJT\n");
1362 printk(KERN_ERR PFX
"Unknown ABTS response\n");
1367 BNX2FC_IO_DBG(io_req
, "Issue RRQ after R_A_TOV\n");
1368 set_bit(BNX2FC_FLAG_ISSUE_RRQ
, &io_req
->req_flags
);
1370 set_bit(BNX2FC_FLAG_RETIRE_OXID
, &io_req
->req_flags
);
1371 bnx2fc_cmd_timer_set(io_req
, r_a_tov
);
1374 if (io_req
->wait_for_comp
) {
1375 if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT
,
1376 &io_req
->req_flags
))
1377 complete(&io_req
->tm_done
);
1380 * We end up here when ABTS is issued as
1381 * in asynchronous context, i.e., as part
1382 * of task management completion, or
1383 * when FW error is received or when the
1384 * ABTS is issued when the IO is timed
1388 if (io_req
->on_active_queue
) {
1389 list_del_init(&io_req
->link
);
1390 io_req
->on_active_queue
= 0;
1391 /* Move IO req to retire queue */
1392 list_add_tail(&io_req
->link
, &tgt
->io_retire_queue
);
1394 bnx2fc_scsi_done(io_req
, DID_ERROR
);
1395 kref_put(&io_req
->refcount
, bnx2fc_cmd_release
);
1399 static void bnx2fc_lun_reset_cmpl(struct bnx2fc_cmd
*io_req
)
1401 struct scsi_cmnd
*sc_cmd
= io_req
->sc_cmd
;
1402 struct bnx2fc_rport
*tgt
= io_req
->tgt
;
1403 struct bnx2fc_cmd
*cmd
, *tmp
;
1404 u64 tm_lun
= sc_cmd
->device
->lun
;
1408 /* called with tgt_lock held */
1409 BNX2FC_IO_DBG(io_req
, "Entered bnx2fc_lun_reset_cmpl\n");
1411 * Walk thru the active_ios queue and ABORT the IO
1412 * that matches with the LUN that was reset
1414 list_for_each_entry_safe(cmd
, tmp
, &tgt
->active_cmd_queue
, link
) {
1415 BNX2FC_TGT_DBG(tgt
, "LUN RST cmpl: scan for pending IOs\n");
1416 lun
= cmd
->sc_cmd
->device
->lun
;
1417 if (lun
== tm_lun
) {
1418 /* Initiate ABTS on this cmd */
1419 if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS
,
1421 /* cancel the IO timeout */
1422 if (cancel_delayed_work(&io_req
->timeout_work
))
1423 kref_put(&io_req
->refcount
,
1424 bnx2fc_cmd_release
);
1426 rc
= bnx2fc_initiate_abts(cmd
);
1427 /* abts shouldn't fail in this context */
1428 WARN_ON(rc
!= SUCCESS
);
1430 printk(KERN_ERR PFX
"lun_rst: abts already in"
1431 " progress for this IO 0x%x\n",
1437 static void bnx2fc_tgt_reset_cmpl(struct bnx2fc_cmd
*io_req
)
1439 struct bnx2fc_rport
*tgt
= io_req
->tgt
;
1440 struct bnx2fc_cmd
*cmd
, *tmp
;
1443 /* called with tgt_lock held */
1444 BNX2FC_IO_DBG(io_req
, "Entered bnx2fc_tgt_reset_cmpl\n");
1446 * Walk thru the active_ios queue and ABORT the IO
1447 * that matches with the LUN that was reset
1449 list_for_each_entry_safe(cmd
, tmp
, &tgt
->active_cmd_queue
, link
) {
1450 BNX2FC_TGT_DBG(tgt
, "TGT RST cmpl: scan for pending IOs\n");
1452 if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS
,
1454 /* cancel the IO timeout */
1455 if (cancel_delayed_work(&io_req
->timeout_work
))
1456 kref_put(&io_req
->refcount
,
1457 bnx2fc_cmd_release
); /* timer hold */
1458 rc
= bnx2fc_initiate_abts(cmd
);
1459 /* abts shouldn't fail in this context */
1460 WARN_ON(rc
!= SUCCESS
);
1463 printk(KERN_ERR PFX
"tgt_rst: abts already in progress"
1464 " for this IO 0x%x\n", cmd
->xid
);
1468 void bnx2fc_process_tm_compl(struct bnx2fc_cmd
*io_req
,
1469 struct fcoe_task_ctx_entry
*task
, u8 num_rq
)
1471 struct bnx2fc_mp_req
*tm_req
;
1472 struct fc_frame_header
*fc_hdr
;
1473 struct scsi_cmnd
*sc_cmd
= io_req
->sc_cmd
;
1478 /* Called with tgt_lock held */
1479 BNX2FC_IO_DBG(io_req
, "Entered process_tm_compl\n");
1481 if (!(test_bit(BNX2FC_FLAG_TM_TIMEOUT
, &io_req
->req_flags
)))
1482 set_bit(BNX2FC_FLAG_TM_COMPL
, &io_req
->req_flags
);
1484 /* TM has already timed out and we got
1485 * delayed completion. Ignore completion
1491 tm_req
= &(io_req
->mp_req
);
1492 fc_hdr
= &(tm_req
->resp_fc_hdr
);
1493 hdr
= (u64
*)fc_hdr
;
1495 &task
->rxwr_only
.union_ctx
.comp_info
.mp_rsp
.fc_hdr
;
1496 hdr
[0] = cpu_to_be64(temp_hdr
[0]);
1497 hdr
[1] = cpu_to_be64(temp_hdr
[1]);
1498 hdr
[2] = cpu_to_be64(temp_hdr
[2]);
1501 task
->rxwr_only
.union_ctx
.comp_info
.mp_rsp
.mp_payload_len
;
1503 rsp_buf
= tm_req
->resp_buf
;
1505 if (fc_hdr
->fh_r_ctl
== FC_RCTL_DD_CMD_STATUS
) {
1506 bnx2fc_parse_fcp_rsp(io_req
,
1507 (struct fcoe_fcp_rsp_payload
*)
1509 if (io_req
->fcp_rsp_code
== 0) {
1511 if (tm_req
->tm_flags
& FCP_TMF_LUN_RESET
)
1512 bnx2fc_lun_reset_cmpl(io_req
);
1513 else if (tm_req
->tm_flags
& FCP_TMF_TGT_RESET
)
1514 bnx2fc_tgt_reset_cmpl(io_req
);
1517 printk(KERN_ERR PFX
"tmf's fc_hdr r_ctl = 0x%x\n",
1520 if (!sc_cmd
->SCp
.ptr
) {
1521 printk(KERN_ERR PFX
"tm_compl: SCp.ptr is NULL\n");
1524 switch (io_req
->fcp_status
) {
1526 if (io_req
->cdb_status
== 0) {
1527 /* Good IO completion */
1528 sc_cmd
->result
= DID_OK
<< 16;
1530 /* Transport status is good, SCSI status not good */
1531 sc_cmd
->result
= (DID_OK
<< 16) | io_req
->cdb_status
;
1533 if (io_req
->fcp_resid
)
1534 scsi_set_resid(sc_cmd
, io_req
->fcp_resid
);
1538 BNX2FC_IO_DBG(io_req
, "process_tm_compl: fcp_status = %d\n",
1539 io_req
->fcp_status
);
1543 sc_cmd
= io_req
->sc_cmd
;
1544 io_req
->sc_cmd
= NULL
;
1546 /* check if the io_req exists in tgt's tmf_q */
1547 if (io_req
->on_tmf_queue
) {
1549 list_del_init(&io_req
->link
);
1550 io_req
->on_tmf_queue
= 0;
1553 printk(KERN_ERR PFX
"Command not on active_cmd_queue!\n");
1557 sc_cmd
->SCp
.ptr
= NULL
;
1558 sc_cmd
->scsi_done(sc_cmd
);
1560 kref_put(&io_req
->refcount
, bnx2fc_cmd_release
);
1561 if (io_req
->wait_for_comp
) {
1562 BNX2FC_IO_DBG(io_req
, "tm_compl - wake up the waiter\n");
1563 complete(&io_req
->tm_done
);
1567 static int bnx2fc_split_bd(struct bnx2fc_cmd
*io_req
, u64 addr
, int sg_len
,
1570 struct fcoe_bd_ctx
*bd
= io_req
->bd_tbl
->bd_tbl
;
1571 int frag_size
, sg_frags
;
1575 if (sg_len
>= BNX2FC_BD_SPLIT_SZ
)
1576 frag_size
= BNX2FC_BD_SPLIT_SZ
;
1579 bd
[bd_index
+ sg_frags
].buf_addr_lo
= addr
& 0xffffffff;
1580 bd
[bd_index
+ sg_frags
].buf_addr_hi
= addr
>> 32;
1581 bd
[bd_index
+ sg_frags
].buf_len
= (u16
)frag_size
;
1582 bd
[bd_index
+ sg_frags
].flags
= 0;
1584 addr
+= (u64
) frag_size
;
1586 sg_len
-= frag_size
;
1592 static int bnx2fc_map_sg(struct bnx2fc_cmd
*io_req
)
1594 struct bnx2fc_interface
*interface
= io_req
->port
->priv
;
1595 struct bnx2fc_hba
*hba
= interface
->hba
;
1596 struct scsi_cmnd
*sc
= io_req
->sc_cmd
;
1597 struct fcoe_bd_ctx
*bd
= io_req
->bd_tbl
->bd_tbl
;
1598 struct scatterlist
*sg
;
1603 unsigned int sg_len
;
1608 * Use dma_map_sg directly to ensure we're using the correct
1609 * dev struct off of pcidev.
1611 sg_count
= dma_map_sg(&hba
->pcidev
->dev
, scsi_sglist(sc
),
1612 scsi_sg_count(sc
), sc
->sc_data_direction
);
1613 scsi_for_each_sg(sc
, sg
, sg_count
, i
) {
1614 sg_len
= sg_dma_len(sg
);
1615 addr
= sg_dma_address(sg
);
1616 if (sg_len
> BNX2FC_MAX_BD_LEN
) {
1617 sg_frags
= bnx2fc_split_bd(io_req
, addr
, sg_len
,
1622 bd
[bd_count
].buf_addr_lo
= addr
& 0xffffffff;
1623 bd
[bd_count
].buf_addr_hi
= addr
>> 32;
1624 bd
[bd_count
].buf_len
= (u16
)sg_len
;
1625 bd
[bd_count
].flags
= 0;
1627 bd_count
+= sg_frags
;
1628 byte_count
+= sg_len
;
1630 if (byte_count
!= scsi_bufflen(sc
))
1631 printk(KERN_ERR PFX
"byte_count = %d != scsi_bufflen = %d, "
1632 "task_id = 0x%x\n", byte_count
, scsi_bufflen(sc
),
1637 static int bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd
*io_req
)
1639 struct scsi_cmnd
*sc
= io_req
->sc_cmd
;
1640 struct fcoe_bd_ctx
*bd
= io_req
->bd_tbl
->bd_tbl
;
1643 if (scsi_sg_count(sc
)) {
1644 bd_count
= bnx2fc_map_sg(io_req
);
1649 bd
[0].buf_addr_lo
= bd
[0].buf_addr_hi
= 0;
1650 bd
[0].buf_len
= bd
[0].flags
= 0;
1652 io_req
->bd_tbl
->bd_valid
= bd_count
;
1657 static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd
*io_req
)
1659 struct scsi_cmnd
*sc
= io_req
->sc_cmd
;
1660 struct bnx2fc_interface
*interface
= io_req
->port
->priv
;
1661 struct bnx2fc_hba
*hba
= interface
->hba
;
1664 * Use dma_unmap_sg directly to ensure we're using the correct
1665 * dev struct off of pcidev.
1667 if (io_req
->bd_tbl
->bd_valid
&& sc
&& scsi_sg_count(sc
)) {
1668 dma_unmap_sg(&hba
->pcidev
->dev
, scsi_sglist(sc
),
1669 scsi_sg_count(sc
), sc
->sc_data_direction
);
1670 io_req
->bd_tbl
->bd_valid
= 0;
1674 void bnx2fc_build_fcp_cmnd(struct bnx2fc_cmd
*io_req
,
1675 struct fcp_cmnd
*fcp_cmnd
)
1677 struct scsi_cmnd
*sc_cmd
= io_req
->sc_cmd
;
1679 memset(fcp_cmnd
, 0, sizeof(struct fcp_cmnd
));
1681 int_to_scsilun(sc_cmd
->device
->lun
, &fcp_cmnd
->fc_lun
);
1683 fcp_cmnd
->fc_dl
= htonl(io_req
->data_xfer_len
);
1684 memcpy(fcp_cmnd
->fc_cdb
, sc_cmd
->cmnd
, sc_cmd
->cmd_len
);
1686 fcp_cmnd
->fc_cmdref
= 0;
1687 fcp_cmnd
->fc_pri_ta
= 0;
1688 fcp_cmnd
->fc_tm_flags
= io_req
->mp_req
.tm_flags
;
1689 fcp_cmnd
->fc_flags
= io_req
->io_req_flags
;
1690 fcp_cmnd
->fc_pri_ta
= FCP_PTA_SIMPLE
;
1693 static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd
*io_req
,
1694 struct fcoe_fcp_rsp_payload
*fcp_rsp
,
1697 struct scsi_cmnd
*sc_cmd
= io_req
->sc_cmd
;
1698 struct bnx2fc_rport
*tgt
= io_req
->tgt
;
1699 u8 rsp_flags
= fcp_rsp
->fcp_flags
.flags
;
1700 u32 rq_buff_len
= 0;
1702 unsigned char *rq_data
;
1703 unsigned char *dummy
;
1704 int fcp_sns_len
= 0;
1705 int fcp_rsp_len
= 0;
1707 io_req
->fcp_status
= FC_GOOD
;
1708 io_req
->fcp_resid
= 0;
1709 if (rsp_flags
& (FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER
|
1710 FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER
))
1711 io_req
->fcp_resid
= fcp_rsp
->fcp_resid
;
1713 io_req
->scsi_comp_flags
= rsp_flags
;
1714 CMD_SCSI_STATUS(sc_cmd
) = io_req
->cdb_status
=
1715 fcp_rsp
->scsi_status_code
;
1717 /* Fetch fcp_rsp_info and fcp_sns_info if available */
1721 * We do not anticipate num_rq >1, as the linux defined
1722 * SCSI_SENSE_BUFFERSIZE is 96 bytes + 8 bytes of FCP_RSP_INFO
1723 * 256 bytes of single rq buffer is good enough to hold this.
1727 FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID
) {
1728 fcp_rsp_len
= rq_buff_len
1729 = fcp_rsp
->fcp_rsp_len
;
1733 FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID
) {
1734 fcp_sns_len
= fcp_rsp
->fcp_sns_len
;
1735 rq_buff_len
+= fcp_rsp
->fcp_sns_len
;
1738 io_req
->fcp_rsp_len
= fcp_rsp_len
;
1739 io_req
->fcp_sns_len
= fcp_sns_len
;
1741 if (rq_buff_len
> num_rq
* BNX2FC_RQ_BUF_SZ
) {
1742 /* Invalid sense sense length. */
1743 printk(KERN_ERR PFX
"invalid sns length %d\n",
1745 /* reset rq_buff_len */
1746 rq_buff_len
= num_rq
* BNX2FC_RQ_BUF_SZ
;
1749 rq_data
= bnx2fc_get_next_rqe(tgt
, 1);
1752 /* We do not need extra sense data */
1753 for (i
= 1; i
< num_rq
; i
++)
1754 dummy
= bnx2fc_get_next_rqe(tgt
, 1);
1757 /* fetch fcp_rsp_code */
1758 if ((fcp_rsp_len
== 4) || (fcp_rsp_len
== 8)) {
1759 /* Only for task management function */
1760 io_req
->fcp_rsp_code
= rq_data
[3];
1761 BNX2FC_IO_DBG(io_req
, "fcp_rsp_code = %d\n",
1762 io_req
->fcp_rsp_code
);
1765 /* fetch sense data */
1766 rq_data
+= fcp_rsp_len
;
1768 if (fcp_sns_len
> SCSI_SENSE_BUFFERSIZE
) {
1769 printk(KERN_ERR PFX
"Truncating sense buffer\n");
1770 fcp_sns_len
= SCSI_SENSE_BUFFERSIZE
;
1773 memset(sc_cmd
->sense_buffer
, 0, SCSI_SENSE_BUFFERSIZE
);
1775 memcpy(sc_cmd
->sense_buffer
, rq_data
, fcp_sns_len
);
1777 /* return RQ entries */
1778 for (i
= 0; i
< num_rq
; i
++)
1779 bnx2fc_return_rqe(tgt
, 1);
1784 * bnx2fc_queuecommand - Queuecommand function of the scsi template
1786 * @host: The Scsi_Host the command was issued to
1787 * @sc_cmd: struct scsi_cmnd to be executed
1789 * This is the IO strategy routine, called by SCSI-ML
1791 int bnx2fc_queuecommand(struct Scsi_Host
*host
,
1792 struct scsi_cmnd
*sc_cmd
)
1794 struct fc_lport
*lport
= shost_priv(host
);
1795 struct fc_rport
*rport
= starget_to_rport(scsi_target(sc_cmd
->device
));
1796 struct fc_rport_libfc_priv
*rp
= rport
->dd_data
;
1797 struct bnx2fc_rport
*tgt
;
1798 struct bnx2fc_cmd
*io_req
;
1802 rval
= fc_remote_port_chkready(rport
);
1804 sc_cmd
->result
= rval
;
1805 sc_cmd
->scsi_done(sc_cmd
);
1809 if ((lport
->state
!= LPORT_ST_READY
) || !(lport
->link_up
)) {
1810 rc
= SCSI_MLQUEUE_HOST_BUSY
;
1814 /* rport and tgt are allocated together, so tgt should be non-NULL */
1815 tgt
= (struct bnx2fc_rport
*)&rp
[1];
1817 if (!test_bit(BNX2FC_FLAG_SESSION_READY
, &tgt
->flags
)) {
1819 * Session is not offloaded yet. Let SCSI-ml retry
1822 rc
= SCSI_MLQUEUE_TARGET_BUSY
;
1825 if (tgt
->retry_delay_timestamp
) {
1826 if (time_after(jiffies
, tgt
->retry_delay_timestamp
)) {
1827 tgt
->retry_delay_timestamp
= 0;
1829 /* If retry_delay timer is active, flow off the ML */
1830 rc
= SCSI_MLQUEUE_TARGET_BUSY
;
1835 spin_lock_bh(&tgt
->tgt_lock
);
1837 io_req
= bnx2fc_cmd_alloc(tgt
);
1839 rc
= SCSI_MLQUEUE_HOST_BUSY
;
1840 goto exit_qcmd_tgtlock
;
1842 io_req
->sc_cmd
= sc_cmd
;
1844 if (bnx2fc_post_io_req(tgt
, io_req
)) {
1845 printk(KERN_ERR PFX
"Unable to post io_req\n");
1846 rc
= SCSI_MLQUEUE_HOST_BUSY
;
1847 goto exit_qcmd_tgtlock
;
1851 spin_unlock_bh(&tgt
->tgt_lock
);
1856 void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd
*io_req
,
1857 struct fcoe_task_ctx_entry
*task
,
1860 struct fcoe_fcp_rsp_payload
*fcp_rsp
;
1861 struct bnx2fc_rport
*tgt
= io_req
->tgt
;
1862 struct scsi_cmnd
*sc_cmd
;
1863 struct Scsi_Host
*host
;
1866 /* scsi_cmd_cmpl is called with tgt lock held */
1868 if (test_and_set_bit(BNX2FC_FLAG_IO_COMPL
, &io_req
->req_flags
)) {
1869 /* we will not receive ABTS response for this IO */
1870 BNX2FC_IO_DBG(io_req
, "Timer context finished processing "
1874 /* Cancel the timeout_work, as we received IO completion */
1875 if (cancel_delayed_work(&io_req
->timeout_work
))
1876 kref_put(&io_req
->refcount
,
1877 bnx2fc_cmd_release
); /* drop timer hold */
1879 sc_cmd
= io_req
->sc_cmd
;
1880 if (sc_cmd
== NULL
) {
1881 printk(KERN_ERR PFX
"scsi_cmd_compl - sc_cmd is NULL\n");
1885 /* Fetch fcp_rsp from task context and perform cmd completion */
1886 fcp_rsp
= (struct fcoe_fcp_rsp_payload
*)
1887 &(task
->rxwr_only
.union_ctx
.comp_info
.fcp_rsp
.payload
);
1889 /* parse fcp_rsp and obtain sense data from RQ if available */
1890 bnx2fc_parse_fcp_rsp(io_req
, fcp_rsp
, num_rq
);
1892 host
= sc_cmd
->device
->host
;
1893 if (!sc_cmd
->SCp
.ptr
) {
1894 printk(KERN_ERR PFX
"SCp.ptr is NULL\n");
1898 if (io_req
->on_active_queue
) {
1899 list_del_init(&io_req
->link
);
1900 io_req
->on_active_queue
= 0;
1901 /* Move IO req to retire queue */
1902 list_add_tail(&io_req
->link
, &tgt
->io_retire_queue
);
1904 /* This should not happen, but could have been pulled
1905 * by bnx2fc_flush_active_ios(), or during a race
1906 * between command abort and (late) completion.
1908 BNX2FC_IO_DBG(io_req
, "xid not on active_cmd_queue\n");
1909 if (io_req
->wait_for_comp
)
1910 if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT
,
1911 &io_req
->req_flags
))
1912 complete(&io_req
->tm_done
);
1915 bnx2fc_unmap_sg_list(io_req
);
1916 io_req
->sc_cmd
= NULL
;
1918 switch (io_req
->fcp_status
) {
1920 if (io_req
->cdb_status
== 0) {
1921 /* Good IO completion */
1922 sc_cmd
->result
= DID_OK
<< 16;
1924 /* Transport status is good, SCSI status not good */
1925 BNX2FC_IO_DBG(io_req
, "scsi_cmpl: cdb_status = %d"
1926 " fcp_resid = 0x%x\n",
1927 io_req
->cdb_status
, io_req
->fcp_resid
);
1928 sc_cmd
->result
= (DID_OK
<< 16) | io_req
->cdb_status
;
1930 if (io_req
->cdb_status
== SAM_STAT_TASK_SET_FULL
||
1931 io_req
->cdb_status
== SAM_STAT_BUSY
) {
1932 /* Set the jiffies + retry_delay_timer * 100ms
1933 for the rport/tgt */
1934 tgt
->retry_delay_timestamp
= jiffies
+
1935 fcp_rsp
->retry_delay_timer
* HZ
/ 10;
1939 if (io_req
->fcp_resid
)
1940 scsi_set_resid(sc_cmd
, io_req
->fcp_resid
);
1943 printk(KERN_ERR PFX
"scsi_cmd_compl: fcp_status = %d\n",
1944 io_req
->fcp_status
);
1947 sc_cmd
->SCp
.ptr
= NULL
;
1948 sc_cmd
->scsi_done(sc_cmd
);
1949 kref_put(&io_req
->refcount
, bnx2fc_cmd_release
);
1952 int bnx2fc_post_io_req(struct bnx2fc_rport
*tgt
,
1953 struct bnx2fc_cmd
*io_req
)
1955 struct fcoe_task_ctx_entry
*task
;
1956 struct fcoe_task_ctx_entry
*task_page
;
1957 struct scsi_cmnd
*sc_cmd
= io_req
->sc_cmd
;
1958 struct fcoe_port
*port
= tgt
->port
;
1959 struct bnx2fc_interface
*interface
= port
->priv
;
1960 struct bnx2fc_hba
*hba
= interface
->hba
;
1961 struct fc_lport
*lport
= port
->lport
;
1962 struct fc_stats
*stats
;
1963 int task_idx
, index
;
1966 /* bnx2fc_post_io_req() is called with the tgt_lock held */
1968 /* Initialize rest of io_req fields */
1969 io_req
->cmd_type
= BNX2FC_SCSI_CMD
;
1970 io_req
->port
= port
;
1972 io_req
->data_xfer_len
= scsi_bufflen(sc_cmd
);
1973 sc_cmd
->SCp
.ptr
= (char *)io_req
;
1975 stats
= per_cpu_ptr(lport
->stats
, get_cpu());
1976 if (sc_cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
1977 io_req
->io_req_flags
= BNX2FC_READ
;
1978 stats
->InputRequests
++;
1979 stats
->InputBytes
+= io_req
->data_xfer_len
;
1980 } else if (sc_cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
1981 io_req
->io_req_flags
= BNX2FC_WRITE
;
1982 stats
->OutputRequests
++;
1983 stats
->OutputBytes
+= io_req
->data_xfer_len
;
1985 io_req
->io_req_flags
= 0;
1986 stats
->ControlRequests
++;
1992 /* Build buffer descriptor list for firmware from sg list */
1993 if (bnx2fc_build_bd_list_from_sg(io_req
)) {
1994 printk(KERN_ERR PFX
"BD list creation failed\n");
1995 kref_put(&io_req
->refcount
, bnx2fc_cmd_release
);
1999 task_idx
= xid
/ BNX2FC_TASKS_PER_PAGE
;
2000 index
= xid
% BNX2FC_TASKS_PER_PAGE
;
2002 /* Initialize task context for this IO request */
2003 task_page
= (struct fcoe_task_ctx_entry
*) hba
->task_ctx
[task_idx
];
2004 task
= &(task_page
[index
]);
2005 bnx2fc_init_task(io_req
, task
);
2007 if (tgt
->flush_in_prog
) {
2008 printk(KERN_ERR PFX
"Flush in progress..Host Busy\n");
2009 kref_put(&io_req
->refcount
, bnx2fc_cmd_release
);
2013 if (!test_bit(BNX2FC_FLAG_SESSION_READY
, &tgt
->flags
)) {
2014 printk(KERN_ERR PFX
"Session not ready...post_io\n");
2015 kref_put(&io_req
->refcount
, bnx2fc_cmd_release
);
2020 if (tgt
->io_timeout
)
2021 bnx2fc_cmd_timer_set(io_req
, BNX2FC_IO_TIMEOUT
);
2022 /* Obtain free SQ entry */
2023 bnx2fc_add_2_sq(tgt
, xid
);
2025 /* Enqueue the io_req to active_cmd_queue */
2027 io_req
->on_active_queue
= 1;
2028 /* move io_req from pending_queue to active_queue */
2029 list_add_tail(&io_req
->link
, &tgt
->active_cmd_queue
);
2032 bnx2fc_ring_doorbell(tgt
);