2 * qla_target.c SCSI LLD infrastructure for QLogic 22xx/23xx/24xx/25xx
4 * based on qla2x00t.c code:
6 * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net>
7 * Copyright (C) 2004 - 2005 Leonid Stoljar
8 * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
9 * Copyright (C) 2006 - 2010 ID7 Ltd.
11 * Forward port and refactoring to modern qla2xxx and target/configfs
13 * Copyright (C) 2010-2013 Nicholas A. Bellinger <nab@kernel.org>
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation, version 2
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
26 #include <linux/module.h>
27 #include <linux/init.h>
28 #include <linux/types.h>
29 #include <linux/blkdev.h>
30 #include <linux/interrupt.h>
31 #include <linux/pci.h>
32 #include <linux/delay.h>
33 #include <linux/list.h>
34 #include <linux/workqueue.h>
35 #include <asm/unaligned.h>
36 #include <scsi/scsi.h>
37 #include <scsi/scsi_host.h>
38 #include <scsi/scsi_tcq.h>
39 #include <target/target_core_base.h>
40 #include <target/target_core_fabric.h>
43 #include "qla_target.h"
45 static int ql2xtgt_tape_enable
;
46 module_param(ql2xtgt_tape_enable
, int, S_IRUGO
|S_IWUSR
);
47 MODULE_PARM_DESC(ql2xtgt_tape_enable
,
48 "Enables Sequence level error recovery (aka FC Tape). Default is 0 - no SLER. 1 - Enable SLER.");
50 static char *qlini_mode
= QLA2XXX_INI_MODE_STR_ENABLED
;
51 module_param(qlini_mode
, charp
, S_IRUGO
);
52 MODULE_PARM_DESC(qlini_mode
,
53 "Determines when initiator mode will be enabled. Possible values: "
54 "\"exclusive\" - initiator mode will be enabled on load, "
55 "disabled on enabling target mode and then on disabling target mode "
57 "\"disabled\" - initiator mode will never be enabled; "
58 "\"enabled\" (default) - initiator mode will always stay enabled.");
60 int ql2x_ini_mode
= QLA2XXX_INI_MODE_EXCLUSIVE
;
62 static int temp_sam_status
= SAM_STAT_BUSY
;
65 * From scsi/fc/fc_fcp.h
67 enum fcp_resp_rsp_codes
{
69 FCP_DATA_LEN_INVALID
= 1,
70 FCP_CMND_FIELDS_INVALID
= 2,
71 FCP_DATA_PARAM_MISMATCH
= 3,
74 FCP_TMF_INVALID_LUN
= 9,
78 * fc_pri_ta from scsi/fc/fc_fcp.h
80 #define FCP_PTA_SIMPLE 0 /* simple task attribute */
81 #define FCP_PTA_HEADQ 1 /* head of queue task attribute */
82 #define FCP_PTA_ORDERED 2 /* ordered task attribute */
83 #define FCP_PTA_ACA 4 /* auto. contingent allegiance */
84 #define FCP_PTA_MASK 7 /* mask for task attribute field */
85 #define FCP_PRI_SHIFT 3 /* priority field starts in bit 3 */
86 #define FCP_PRI_RESVD_MASK 0x80 /* reserved bits in priority field */
89 * This driver calls qla2x00_alloc_iocbs() and qla2x00_issue_marker(), which
90 * must be called under HW lock and could unlock/lock it inside.
91 * It isn't an issue, since in the current implementation on the time when
92 * those functions are called:
94 * - Either context is IRQ and only IRQ handler can modify HW data,
95 * including rings related fields,
97 * - Or access to target mode variables from struct qla_tgt doesn't
98 * cross those functions boundaries, except tgt_stop, which
99 * additionally protected by irq_cmd_count.
101 /* Predefs for callbacks handed to qla2xxx LLD */
102 static void qlt_24xx_atio_pkt(struct scsi_qla_host
*ha
,
103 struct atio_from_isp
*pkt
);
104 static void qlt_response_pkt(struct scsi_qla_host
*ha
, response_t
*pkt
);
105 static int qlt_issue_task_mgmt(struct qla_tgt_sess
*sess
, uint32_t lun
,
106 int fn
, void *iocb
, int flags
);
107 static void qlt_send_term_exchange(struct scsi_qla_host
*ha
, struct qla_tgt_cmd
108 *cmd
, struct atio_from_isp
*atio
, int ha_locked
);
109 static void qlt_reject_free_srr_imm(struct scsi_qla_host
*ha
,
110 struct qla_tgt_srr_imm
*imm
, int ha_lock
);
111 static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host
*vha
,
112 struct qla_tgt_cmd
*cmd
);
113 static void qlt_alloc_qfull_cmd(struct scsi_qla_host
*vha
,
114 struct atio_from_isp
*atio
, uint16_t status
, int qfull
);
115 static void qlt_disable_vha(struct scsi_qla_host
*vha
);
119 static struct kmem_cache
*qla_tgt_mgmt_cmd_cachep
;
120 static mempool_t
*qla_tgt_mgmt_cmd_mempool
;
121 static struct workqueue_struct
*qla_tgt_wq
;
122 static DEFINE_MUTEX(qla_tgt_mutex
);
123 static LIST_HEAD(qla_tgt_glist
);
125 /* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
126 static struct qla_tgt_sess
*qlt_find_sess_by_port_name(
128 const uint8_t *port_name
)
130 struct qla_tgt_sess
*sess
;
132 list_for_each_entry(sess
, &tgt
->sess_list
, sess_list_entry
) {
133 if (!memcmp(sess
->port_name
, port_name
, WWN_SIZE
))
140 /* Might release hw lock, then reaquire!! */
141 static inline int qlt_issue_marker(struct scsi_qla_host
*vha
, int vha_locked
)
143 /* Send marker if required */
144 if (unlikely(vha
->marker_needed
!= 0)) {
145 int rc
= qla2x00_issue_marker(vha
, vha_locked
);
146 if (rc
!= QLA_SUCCESS
) {
147 ql_dbg(ql_dbg_tgt
, vha
, 0xe03d,
148 "qla_target(%d): issue_marker() failed\n",
157 struct scsi_qla_host
*qlt_find_host_by_d_id(struct scsi_qla_host
*vha
,
160 struct qla_hw_data
*ha
= vha
->hw
;
163 if ((vha
->d_id
.b
.area
!= d_id
[1]) || (vha
->d_id
.b
.domain
!= d_id
[0]))
166 if (vha
->d_id
.b
.al_pa
== d_id
[2])
169 BUG_ON(ha
->tgt
.tgt_vp_map
== NULL
);
170 vp_idx
= ha
->tgt
.tgt_vp_map
[d_id
[2]].idx
;
171 if (likely(test_bit(vp_idx
, ha
->vp_idx_map
)))
172 return ha
->tgt
.tgt_vp_map
[vp_idx
].vha
;
178 struct scsi_qla_host
*qlt_find_host_by_vp_idx(struct scsi_qla_host
*vha
,
181 struct qla_hw_data
*ha
= vha
->hw
;
183 if (vha
->vp_idx
== vp_idx
)
186 BUG_ON(ha
->tgt
.tgt_vp_map
== NULL
);
187 if (likely(test_bit(vp_idx
, ha
->vp_idx_map
)))
188 return ha
->tgt
.tgt_vp_map
[vp_idx
].vha
;
193 static inline void qlt_incr_num_pend_cmds(struct scsi_qla_host
*vha
)
197 spin_lock_irqsave(&vha
->hw
->tgt
.q_full_lock
, flags
);
199 vha
->hw
->tgt
.num_pend_cmds
++;
200 if (vha
->hw
->tgt
.num_pend_cmds
> vha
->hw
->qla_stats
.stat_max_pend_cmds
)
201 vha
->hw
->qla_stats
.stat_max_pend_cmds
=
202 vha
->hw
->tgt
.num_pend_cmds
;
203 spin_unlock_irqrestore(&vha
->hw
->tgt
.q_full_lock
, flags
);
205 static inline void qlt_decr_num_pend_cmds(struct scsi_qla_host
*vha
)
209 spin_lock_irqsave(&vha
->hw
->tgt
.q_full_lock
, flags
);
210 vha
->hw
->tgt
.num_pend_cmds
--;
211 spin_unlock_irqrestore(&vha
->hw
->tgt
.q_full_lock
, flags
);
214 static void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host
*vha
,
215 struct atio_from_isp
*atio
)
217 ql_dbg(ql_dbg_tgt
, vha
, 0xe072,
218 "%s: qla_target(%d): type %x ox_id %04x\n",
219 __func__
, vha
->vp_idx
, atio
->u
.raw
.entry_type
,
220 be16_to_cpu(atio
->u
.isp24
.fcp_hdr
.ox_id
));
222 switch (atio
->u
.raw
.entry_type
) {
225 struct scsi_qla_host
*host
= qlt_find_host_by_d_id(vha
,
226 atio
->u
.isp24
.fcp_hdr
.d_id
);
227 if (unlikely(NULL
== host
)) {
228 ql_dbg(ql_dbg_tgt
, vha
, 0xe03e,
229 "qla_target(%d): Received ATIO_TYPE7 "
230 "with unknown d_id %x:%x:%x\n", vha
->vp_idx
,
231 atio
->u
.isp24
.fcp_hdr
.d_id
[0],
232 atio
->u
.isp24
.fcp_hdr
.d_id
[1],
233 atio
->u
.isp24
.fcp_hdr
.d_id
[2]);
236 qlt_24xx_atio_pkt(host
, atio
);
240 case IMMED_NOTIFY_TYPE
:
242 struct scsi_qla_host
*host
= vha
;
243 struct imm_ntfy_from_isp
*entry
=
244 (struct imm_ntfy_from_isp
*)atio
;
246 if ((entry
->u
.isp24
.vp_index
!= 0xFF) &&
247 (entry
->u
.isp24
.nport_handle
!= 0xFFFF)) {
248 host
= qlt_find_host_by_vp_idx(vha
,
249 entry
->u
.isp24
.vp_index
);
250 if (unlikely(!host
)) {
251 ql_dbg(ql_dbg_tgt
, vha
, 0xe03f,
252 "qla_target(%d): Received "
253 "ATIO (IMMED_NOTIFY_TYPE) "
254 "with unknown vp_index %d\n",
255 vha
->vp_idx
, entry
->u
.isp24
.vp_index
);
259 qlt_24xx_atio_pkt(host
, atio
);
264 ql_dbg(ql_dbg_tgt
, vha
, 0xe040,
265 "qla_target(%d): Received unknown ATIO atio "
266 "type %x\n", vha
->vp_idx
, atio
->u
.raw
.entry_type
);
273 void qlt_response_pkt_all_vps(struct scsi_qla_host
*vha
, response_t
*pkt
)
275 switch (pkt
->entry_type
) {
277 ql_dbg(ql_dbg_tgt
, vha
, 0xe073,
278 "qla_target(%d):%s: CRC2 Response pkt\n",
279 vha
->vp_idx
, __func__
);
282 struct ctio7_from_24xx
*entry
= (struct ctio7_from_24xx
*)pkt
;
283 struct scsi_qla_host
*host
= qlt_find_host_by_vp_idx(vha
,
285 if (unlikely(!host
)) {
286 ql_dbg(ql_dbg_tgt
, vha
, 0xe041,
287 "qla_target(%d): Response pkt (CTIO_TYPE7) "
288 "received, with unknown vp_index %d\n",
289 vha
->vp_idx
, entry
->vp_index
);
292 qlt_response_pkt(host
, pkt
);
296 case IMMED_NOTIFY_TYPE
:
298 struct scsi_qla_host
*host
= vha
;
299 struct imm_ntfy_from_isp
*entry
=
300 (struct imm_ntfy_from_isp
*)pkt
;
302 host
= qlt_find_host_by_vp_idx(vha
, entry
->u
.isp24
.vp_index
);
303 if (unlikely(!host
)) {
304 ql_dbg(ql_dbg_tgt
, vha
, 0xe042,
305 "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) "
306 "received, with unknown vp_index %d\n",
307 vha
->vp_idx
, entry
->u
.isp24
.vp_index
);
310 qlt_response_pkt(host
, pkt
);
314 case NOTIFY_ACK_TYPE
:
316 struct scsi_qla_host
*host
= vha
;
317 struct nack_to_isp
*entry
= (struct nack_to_isp
*)pkt
;
319 if (0xFF != entry
->u
.isp24
.vp_index
) {
320 host
= qlt_find_host_by_vp_idx(vha
,
321 entry
->u
.isp24
.vp_index
);
322 if (unlikely(!host
)) {
323 ql_dbg(ql_dbg_tgt
, vha
, 0xe043,
324 "qla_target(%d): Response "
325 "pkt (NOTIFY_ACK_TYPE) "
326 "received, with unknown "
327 "vp_index %d\n", vha
->vp_idx
,
328 entry
->u
.isp24
.vp_index
);
332 qlt_response_pkt(host
, pkt
);
338 struct abts_recv_from_24xx
*entry
=
339 (struct abts_recv_from_24xx
*)pkt
;
340 struct scsi_qla_host
*host
= qlt_find_host_by_vp_idx(vha
,
342 if (unlikely(!host
)) {
343 ql_dbg(ql_dbg_tgt
, vha
, 0xe044,
344 "qla_target(%d): Response pkt "
345 "(ABTS_RECV_24XX) received, with unknown "
346 "vp_index %d\n", vha
->vp_idx
, entry
->vp_index
);
349 qlt_response_pkt(host
, pkt
);
355 struct abts_resp_to_24xx
*entry
=
356 (struct abts_resp_to_24xx
*)pkt
;
357 struct scsi_qla_host
*host
= qlt_find_host_by_vp_idx(vha
,
359 if (unlikely(!host
)) {
360 ql_dbg(ql_dbg_tgt
, vha
, 0xe045,
361 "qla_target(%d): Response pkt "
362 "(ABTS_RECV_24XX) received, with unknown "
363 "vp_index %d\n", vha
->vp_idx
, entry
->vp_index
);
366 qlt_response_pkt(host
, pkt
);
371 qlt_response_pkt(vha
, pkt
);
377 static void qlt_free_session_done(struct work_struct
*work
)
379 struct qla_tgt_sess
*sess
= container_of(work
, struct qla_tgt_sess
,
381 struct qla_tgt
*tgt
= sess
->tgt
;
382 struct scsi_qla_host
*vha
= sess
->vha
;
383 struct qla_hw_data
*ha
= vha
->hw
;
387 * Release the target session for FC Nexus from fabric module code.
389 if (sess
->se_sess
!= NULL
)
390 ha
->tgt
.tgt_ops
->free_session(sess
);
392 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf001,
393 "Unregistration of sess %p finished\n", sess
);
397 * We need to protect against race, when tgt is freed before or
401 if (tgt
->sess_count
== 0)
402 wake_up_all(&tgt
->waitQ
);
405 /* ha->hardware_lock supposed to be held on entry */
406 void qlt_unreg_sess(struct qla_tgt_sess
*sess
)
408 struct scsi_qla_host
*vha
= sess
->vha
;
410 vha
->hw
->tgt
.tgt_ops
->clear_nacl_from_fcport_map(sess
);
412 list_del(&sess
->sess_list_entry
);
414 list_del(&sess
->del_list_entry
);
416 INIT_WORK(&sess
->free_work
, qlt_free_session_done
);
417 schedule_work(&sess
->free_work
);
419 EXPORT_SYMBOL(qlt_unreg_sess
);
421 /* ha->hardware_lock supposed to be held on entry */
422 static int qlt_reset(struct scsi_qla_host
*vha
, void *iocb
, int mcmd
)
424 struct qla_hw_data
*ha
= vha
->hw
;
425 struct qla_tgt_sess
*sess
= NULL
;
426 uint32_t unpacked_lun
, lun
= 0;
429 struct imm_ntfy_from_isp
*n
= (struct imm_ntfy_from_isp
*)iocb
;
430 struct atio_from_isp
*a
= (struct atio_from_isp
*)iocb
;
432 loop_id
= le16_to_cpu(n
->u
.isp24
.nport_handle
);
433 if (loop_id
== 0xFFFF) {
434 #if 0 /* FIXME: Re-enable Global event handling.. */
436 atomic_inc(&ha
->tgt
.qla_tgt
->tgt_global_resets_count
);
437 qlt_clear_tgt_db(ha
->tgt
.qla_tgt
);
438 if (!list_empty(&ha
->tgt
.qla_tgt
->sess_list
)) {
439 sess
= list_entry(ha
->tgt
.qla_tgt
->sess_list
.next
,
440 typeof(*sess
), sess_list_entry
);
442 case QLA_TGT_NEXUS_LOSS_SESS
:
443 mcmd
= QLA_TGT_NEXUS_LOSS
;
445 case QLA_TGT_ABORT_ALL_SESS
:
446 mcmd
= QLA_TGT_ABORT_ALL
;
448 case QLA_TGT_NEXUS_LOSS
:
449 case QLA_TGT_ABORT_ALL
:
452 ql_dbg(ql_dbg_tgt
, vha
, 0xe046,
453 "qla_target(%d): Not allowed "
454 "command %x in %s", vha
->vp_idx
,
463 sess
= ha
->tgt
.tgt_ops
->find_sess_by_loop_id(vha
, loop_id
);
466 ql_dbg(ql_dbg_tgt
, vha
, 0xe000,
467 "Using sess for qla_tgt_reset: %p\n", sess
);
473 ql_dbg(ql_dbg_tgt
, vha
, 0xe047,
474 "scsi(%ld): resetting (session %p from port %8phC mcmd %x, "
475 "loop_id %d)\n", vha
->host_no
, sess
, sess
->port_name
,
478 lun
= a
->u
.isp24
.fcp_cmnd
.lun
;
479 unpacked_lun
= scsilun_to_int((struct scsi_lun
*)&lun
);
481 return qlt_issue_task_mgmt(sess
, unpacked_lun
, mcmd
,
482 iocb
, QLA24XX_MGMT_SEND_NACK
);
485 /* ha->hardware_lock supposed to be held on entry */
486 static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess
*sess
,
489 struct qla_tgt
*tgt
= sess
->tgt
;
490 uint32_t dev_loss_tmo
= tgt
->ha
->port_down_retry_count
+ 5;
495 ql_dbg(ql_dbg_tgt
, sess
->vha
, 0xe001,
496 "Scheduling sess %p for deletion\n", sess
);
497 list_add_tail(&sess
->del_list_entry
, &tgt
->del_sess_list
);
503 sess
->expires
= jiffies
+ dev_loss_tmo
* HZ
;
505 ql_dbg(ql_dbg_tgt
, sess
->vha
, 0xe048,
506 "qla_target(%d): session for port %8phC (loop ID %d) scheduled for "
507 "deletion in %u secs (expires: %lu) immed: %d\n",
508 sess
->vha
->vp_idx
, sess
->port_name
, sess
->loop_id
, dev_loss_tmo
,
509 sess
->expires
, immediate
);
512 schedule_delayed_work(&tgt
->sess_del_work
, 0);
514 schedule_delayed_work(&tgt
->sess_del_work
,
515 sess
->expires
- jiffies
);
518 /* ha->hardware_lock supposed to be held on entry */
519 static void qlt_clear_tgt_db(struct qla_tgt
*tgt
)
521 struct qla_tgt_sess
*sess
;
523 list_for_each_entry(sess
, &tgt
->sess_list
, sess_list_entry
)
524 qlt_schedule_sess_for_deletion(sess
, true);
526 /* At this point tgt could be already dead */
529 static int qla24xx_get_loop_id(struct scsi_qla_host
*vha
, const uint8_t *s_id
,
532 struct qla_hw_data
*ha
= vha
->hw
;
533 dma_addr_t gid_list_dma
;
534 struct gid_list_info
*gid_list
;
539 gid_list
= dma_alloc_coherent(&ha
->pdev
->dev
, qla2x00_gid_list_size(ha
),
540 &gid_list_dma
, GFP_KERNEL
);
542 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf044,
543 "qla_target(%d): DMA Alloc failed of %u\n",
544 vha
->vp_idx
, qla2x00_gid_list_size(ha
));
548 /* Get list of logged in devices */
549 rc
= qla2x00_get_id_list(vha
, gid_list
, gid_list_dma
, &entries
);
550 if (rc
!= QLA_SUCCESS
) {
551 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf045,
552 "qla_target(%d): get_id_list() failed: %x\n",
555 goto out_free_id_list
;
558 id_iter
= (char *)gid_list
;
560 for (i
= 0; i
< entries
; i
++) {
561 struct gid_list_info
*gid
= (struct gid_list_info
*)id_iter
;
562 if ((gid
->al_pa
== s_id
[2]) &&
563 (gid
->area
== s_id
[1]) &&
564 (gid
->domain
== s_id
[0])) {
565 *loop_id
= le16_to_cpu(gid
->loop_id
);
569 id_iter
+= ha
->gid_list_info_size
;
573 dma_free_coherent(&ha
->pdev
->dev
, qla2x00_gid_list_size(ha
),
574 gid_list
, gid_list_dma
);
578 /* ha->hardware_lock supposed to be held on entry */
579 static void qlt_undelete_sess(struct qla_tgt_sess
*sess
)
581 BUG_ON(!sess
->deleted
);
583 list_del(&sess
->del_list_entry
);
587 static void qlt_del_sess_work_fn(struct delayed_work
*work
)
589 struct qla_tgt
*tgt
= container_of(work
, struct qla_tgt
,
591 struct scsi_qla_host
*vha
= tgt
->vha
;
592 struct qla_hw_data
*ha
= vha
->hw
;
593 struct qla_tgt_sess
*sess
;
594 unsigned long flags
, elapsed
;
596 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
597 while (!list_empty(&tgt
->del_sess_list
)) {
598 sess
= list_entry(tgt
->del_sess_list
.next
, typeof(*sess
),
601 if (time_after_eq(elapsed
, sess
->expires
)) {
602 qlt_undelete_sess(sess
);
604 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf004,
605 "Timeout: sess %p about to be deleted\n",
607 ha
->tgt
.tgt_ops
->shutdown_sess(sess
);
608 ha
->tgt
.tgt_ops
->put_sess(sess
);
610 schedule_delayed_work(&tgt
->sess_del_work
,
611 sess
->expires
- elapsed
);
615 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
619 * Adds an extra ref to allow to drop hw lock after adding sess to the list.
620 * Caller must put it.
622 static struct qla_tgt_sess
*qlt_create_sess(
623 struct scsi_qla_host
*vha
,
627 struct qla_hw_data
*ha
= vha
->hw
;
628 struct qla_tgt_sess
*sess
;
630 unsigned char be_sid
[3];
632 /* Check to avoid double sessions */
633 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
634 list_for_each_entry(sess
, &vha
->vha_tgt
.qla_tgt
->sess_list
,
636 if (!memcmp(sess
->port_name
, fcport
->port_name
, WWN_SIZE
)) {
637 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf005,
638 "Double sess %p found (s_id %x:%x:%x, "
639 "loop_id %d), updating to d_id %x:%x:%x, "
640 "loop_id %d", sess
, sess
->s_id
.b
.domain
,
641 sess
->s_id
.b
.al_pa
, sess
->s_id
.b
.area
,
642 sess
->loop_id
, fcport
->d_id
.b
.domain
,
643 fcport
->d_id
.b
.al_pa
, fcport
->d_id
.b
.area
,
647 qlt_undelete_sess(sess
);
649 kref_get(&sess
->se_sess
->sess_kref
);
650 ha
->tgt
.tgt_ops
->update_sess(sess
, fcport
->d_id
, fcport
->loop_id
,
651 (fcport
->flags
& FCF_CONF_COMP_SUPPORTED
));
653 if (sess
->local
&& !local
)
655 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
660 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
662 sess
= kzalloc(sizeof(*sess
), GFP_KERNEL
);
664 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf04a,
665 "qla_target(%u): session allocation failed, all commands "
666 "from port %8phC will be refused", vha
->vp_idx
,
671 sess
->tgt
= vha
->vha_tgt
.qla_tgt
;
673 sess
->s_id
= fcport
->d_id
;
674 sess
->loop_id
= fcport
->loop_id
;
677 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf006,
678 "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n",
679 sess
, vha
->vha_tgt
.qla_tgt
);
681 be_sid
[0] = sess
->s_id
.b
.domain
;
682 be_sid
[1] = sess
->s_id
.b
.area
;
683 be_sid
[2] = sess
->s_id
.b
.al_pa
;
685 * Determine if this fc_port->port_name is allowed to access
686 * target mode using explict NodeACLs+MappedLUNs, or using
687 * TPG demo mode. If this is successful a target mode FC nexus
690 if (ha
->tgt
.tgt_ops
->check_initiator_node_acl(vha
,
691 &fcport
->port_name
[0], sess
, &be_sid
[0], fcport
->loop_id
) < 0) {
696 * Take an extra reference to ->sess_kref here to handle qla_tgt_sess
697 * access across ->hardware_lock reaquire.
699 kref_get(&sess
->se_sess
->sess_kref
);
701 sess
->conf_compl_supported
= (fcport
->flags
& FCF_CONF_COMP_SUPPORTED
);
702 BUILD_BUG_ON(sizeof(sess
->port_name
) != sizeof(fcport
->port_name
));
703 memcpy(sess
->port_name
, fcport
->port_name
, sizeof(sess
->port_name
));
705 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
706 list_add_tail(&sess
->sess_list_entry
, &vha
->vha_tgt
.qla_tgt
->sess_list
);
707 vha
->vha_tgt
.qla_tgt
->sess_count
++;
708 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
710 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf04b,
711 "qla_target(%d): %ssession for wwn %8phC (loop_id %d, "
712 "s_id %x:%x:%x, confirmed completion %ssupported) added\n",
713 vha
->vp_idx
, local
? "local " : "", fcport
->port_name
,
714 fcport
->loop_id
, sess
->s_id
.b
.domain
, sess
->s_id
.b
.area
,
715 sess
->s_id
.b
.al_pa
, sess
->conf_compl_supported
? "" : "not ");
721 * Called from drivers/scsi/qla2xxx/qla_init.c:qla2x00_reg_remote_port()
723 void qlt_fc_port_added(struct scsi_qla_host
*vha
, fc_port_t
*fcport
)
725 struct qla_hw_data
*ha
= vha
->hw
;
726 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
727 struct qla_tgt_sess
*sess
;
730 if (!vha
->hw
->tgt
.tgt_ops
)
733 if (!tgt
|| (fcport
->port_type
!= FCT_INITIATOR
))
736 if (qla_ini_mode_enabled(vha
))
739 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
741 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
744 sess
= qlt_find_sess_by_port_name(tgt
, fcport
->port_name
);
746 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
748 mutex_lock(&vha
->vha_tgt
.tgt_mutex
);
749 sess
= qlt_create_sess(vha
, fcport
, false);
750 mutex_unlock(&vha
->vha_tgt
.tgt_mutex
);
752 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
754 kref_get(&sess
->se_sess
->sess_kref
);
757 qlt_undelete_sess(sess
);
759 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf04c,
760 "qla_target(%u): %ssession for port %8phC "
761 "(loop ID %d) reappeared\n", vha
->vp_idx
,
762 sess
->local
? "local " : "", sess
->port_name
,
765 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf007,
766 "Reappeared sess %p\n", sess
);
768 ha
->tgt
.tgt_ops
->update_sess(sess
, fcport
->d_id
, fcport
->loop_id
,
769 (fcport
->flags
& FCF_CONF_COMP_SUPPORTED
));
772 if (sess
&& sess
->local
) {
773 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf04d,
774 "qla_target(%u): local session for "
775 "port %8phC (loop ID %d) became global\n", vha
->vp_idx
,
776 fcport
->port_name
, sess
->loop_id
);
779 ha
->tgt
.tgt_ops
->put_sess(sess
);
780 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
783 void qlt_fc_port_deleted(struct scsi_qla_host
*vha
, fc_port_t
*fcport
)
785 struct qla_hw_data
*ha
= vha
->hw
;
786 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
787 struct qla_tgt_sess
*sess
;
790 if (!vha
->hw
->tgt
.tgt_ops
)
793 if (!tgt
|| (fcport
->port_type
!= FCT_INITIATOR
))
796 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
798 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
801 sess
= qlt_find_sess_by_port_name(tgt
, fcport
->port_name
);
803 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
807 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf008, "qla_tgt_fc_port_deleted %p", sess
);
810 qlt_schedule_sess_for_deletion(sess
, false);
811 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
814 static inline int test_tgt_sess_count(struct qla_tgt
*tgt
)
816 struct qla_hw_data
*ha
= tgt
->ha
;
820 * We need to protect against race, when tgt is freed before or
823 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
824 ql_dbg(ql_dbg_tgt
, tgt
->vha
, 0xe002,
825 "tgt %p, empty(sess_list)=%d sess_count=%d\n",
826 tgt
, list_empty(&tgt
->sess_list
), tgt
->sess_count
);
827 res
= (tgt
->sess_count
== 0);
828 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
833 /* Called by tcm_qla2xxx configfs code */
834 int qlt_stop_phase1(struct qla_tgt
*tgt
)
836 struct scsi_qla_host
*vha
= tgt
->vha
;
837 struct qla_hw_data
*ha
= tgt
->ha
;
840 mutex_lock(&qla_tgt_mutex
);
841 if (!vha
->fc_vport
) {
842 struct Scsi_Host
*sh
= vha
->host
;
843 struct fc_host_attrs
*fc_host
= shost_to_fc_host(sh
);
846 spin_lock_irqsave(sh
->host_lock
, flags
);
847 npiv_vports
= (fc_host
->npiv_vports_inuse
);
848 spin_unlock_irqrestore(sh
->host_lock
, flags
);
851 mutex_unlock(&qla_tgt_mutex
);
855 if (tgt
->tgt_stop
|| tgt
->tgt_stopped
) {
856 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf04e,
857 "Already in tgt->tgt_stop or tgt_stopped state\n");
858 mutex_unlock(&qla_tgt_mutex
);
862 ql_dbg(ql_dbg_tgt
, vha
, 0xe003, "Stopping target for host %ld(%p)\n",
865 * Mutex needed to sync with qla_tgt_fc_port_[added,deleted].
866 * Lock is needed, because we still can get an incoming packet.
868 mutex_lock(&vha
->vha_tgt
.tgt_mutex
);
869 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
871 qlt_clear_tgt_db(tgt
);
872 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
873 mutex_unlock(&vha
->vha_tgt
.tgt_mutex
);
874 mutex_unlock(&qla_tgt_mutex
);
876 flush_delayed_work(&tgt
->sess_del_work
);
878 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf009,
879 "Waiting for sess works (tgt %p)", tgt
);
880 spin_lock_irqsave(&tgt
->sess_work_lock
, flags
);
881 while (!list_empty(&tgt
->sess_works_list
)) {
882 spin_unlock_irqrestore(&tgt
->sess_work_lock
, flags
);
883 flush_scheduled_work();
884 spin_lock_irqsave(&tgt
->sess_work_lock
, flags
);
886 spin_unlock_irqrestore(&tgt
->sess_work_lock
, flags
);
888 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf00a,
889 "Waiting for tgt %p: list_empty(sess_list)=%d "
890 "sess_count=%d\n", tgt
, list_empty(&tgt
->sess_list
),
893 wait_event(tgt
->waitQ
, test_tgt_sess_count(tgt
));
896 if (!ha
->flags
.host_shutting_down
&& qla_tgt_mode_enabled(vha
))
897 qlt_disable_vha(vha
);
899 /* Wait for sessions to clear out (just in case) */
900 wait_event(tgt
->waitQ
, test_tgt_sess_count(tgt
));
903 EXPORT_SYMBOL(qlt_stop_phase1
);
905 /* Called by tcm_qla2xxx configfs code */
906 void qlt_stop_phase2(struct qla_tgt
*tgt
)
908 struct qla_hw_data
*ha
= tgt
->ha
;
909 scsi_qla_host_t
*vha
= pci_get_drvdata(ha
->pdev
);
912 if (tgt
->tgt_stopped
) {
913 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf04f,
914 "Already in tgt->tgt_stopped state\n");
919 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf00b,
920 "Waiting for %d IRQ commands to complete (tgt %p)",
921 tgt
->irq_cmd_count
, tgt
);
923 mutex_lock(&vha
->vha_tgt
.tgt_mutex
);
924 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
925 while (tgt
->irq_cmd_count
!= 0) {
926 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
928 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
931 tgt
->tgt_stopped
= 1;
932 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
933 mutex_unlock(&vha
->vha_tgt
.tgt_mutex
);
935 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf00c, "Stop of tgt %p finished",
938 EXPORT_SYMBOL(qlt_stop_phase2
);
940 /* Called from qlt_remove_target() -> qla2x00_remove_one() */
941 static void qlt_release(struct qla_tgt
*tgt
)
943 scsi_qla_host_t
*vha
= tgt
->vha
;
945 if ((vha
->vha_tgt
.qla_tgt
!= NULL
) && !tgt
->tgt_stopped
)
946 qlt_stop_phase2(tgt
);
948 vha
->vha_tgt
.qla_tgt
= NULL
;
950 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf00d,
951 "Release of tgt %p finished\n", tgt
);
956 /* ha->hardware_lock supposed to be held on entry */
957 static int qlt_sched_sess_work(struct qla_tgt
*tgt
, int type
,
958 const void *param
, unsigned int param_size
)
960 struct qla_tgt_sess_work_param
*prm
;
963 prm
= kzalloc(sizeof(*prm
), GFP_ATOMIC
);
965 ql_dbg(ql_dbg_tgt_mgt
, tgt
->vha
, 0xf050,
966 "qla_target(%d): Unable to create session "
967 "work, command will be refused", 0);
971 ql_dbg(ql_dbg_tgt_mgt
, tgt
->vha
, 0xf00e,
972 "Scheduling work (type %d, prm %p)"
973 " to find session for param %p (size %d, tgt %p)\n",
974 type
, prm
, param
, param_size
, tgt
);
977 memcpy(&prm
->tm_iocb
, param
, param_size
);
979 spin_lock_irqsave(&tgt
->sess_work_lock
, flags
);
980 list_add_tail(&prm
->sess_works_list_entry
, &tgt
->sess_works_list
);
981 spin_unlock_irqrestore(&tgt
->sess_work_lock
, flags
);
983 schedule_work(&tgt
->sess_work
);
989 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
991 static void qlt_send_notify_ack(struct scsi_qla_host
*vha
,
992 struct imm_ntfy_from_isp
*ntfy
,
993 uint32_t add_flags
, uint16_t resp_code
, int resp_code_valid
,
994 uint16_t srr_flags
, uint16_t srr_reject_code
, uint8_t srr_explan
)
996 struct qla_hw_data
*ha
= vha
->hw
;
998 struct nack_to_isp
*nack
;
1000 ql_dbg(ql_dbg_tgt
, vha
, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha
);
1002 /* Send marker if required */
1003 if (qlt_issue_marker(vha
, 1) != QLA_SUCCESS
)
1006 pkt
= (request_t
*)qla2x00_alloc_iocbs(vha
, NULL
);
1008 ql_dbg(ql_dbg_tgt
, vha
, 0xe049,
1009 "qla_target(%d): %s failed: unable to allocate "
1010 "request packet\n", vha
->vp_idx
, __func__
);
1014 if (vha
->vha_tgt
.qla_tgt
!= NULL
)
1015 vha
->vha_tgt
.qla_tgt
->notify_ack_expected
++;
1017 pkt
->entry_type
= NOTIFY_ACK_TYPE
;
1018 pkt
->entry_count
= 1;
1020 nack
= (struct nack_to_isp
*)pkt
;
1021 nack
->ox_id
= ntfy
->ox_id
;
1023 nack
->u
.isp24
.nport_handle
= ntfy
->u
.isp24
.nport_handle
;
1024 if (le16_to_cpu(ntfy
->u
.isp24
.status
) == IMM_NTFY_ELS
) {
1025 nack
->u
.isp24
.flags
= ntfy
->u
.isp24
.flags
&
1026 __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB
);
1028 nack
->u
.isp24
.srr_rx_id
= ntfy
->u
.isp24
.srr_rx_id
;
1029 nack
->u
.isp24
.status
= ntfy
->u
.isp24
.status
;
1030 nack
->u
.isp24
.status_subcode
= ntfy
->u
.isp24
.status_subcode
;
1031 nack
->u
.isp24
.fw_handle
= ntfy
->u
.isp24
.fw_handle
;
1032 nack
->u
.isp24
.exchange_address
= ntfy
->u
.isp24
.exchange_address
;
1033 nack
->u
.isp24
.srr_rel_offs
= ntfy
->u
.isp24
.srr_rel_offs
;
1034 nack
->u
.isp24
.srr_ui
= ntfy
->u
.isp24
.srr_ui
;
1035 nack
->u
.isp24
.srr_flags
= cpu_to_le16(srr_flags
);
1036 nack
->u
.isp24
.srr_reject_code
= srr_reject_code
;
1037 nack
->u
.isp24
.srr_reject_code_expl
= srr_explan
;
1038 nack
->u
.isp24
.vp_index
= ntfy
->u
.isp24
.vp_index
;
1040 ql_dbg(ql_dbg_tgt
, vha
, 0xe005,
1041 "qla_target(%d): Sending 24xx Notify Ack %d\n",
1042 vha
->vp_idx
, nack
->u
.isp24
.status
);
1044 /* Memory Barrier */
1046 qla2x00_start_iocbs(vha
, vha
->req
);
1050 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1052 static void qlt_24xx_send_abts_resp(struct scsi_qla_host
*vha
,
1053 struct abts_recv_from_24xx
*abts
, uint32_t status
,
1056 struct qla_hw_data
*ha
= vha
->hw
;
1057 struct abts_resp_to_24xx
*resp
;
1061 ql_dbg(ql_dbg_tgt
, vha
, 0xe006,
1062 "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n",
1065 /* Send marker if required */
1066 if (qlt_issue_marker(vha
, 1) != QLA_SUCCESS
)
1069 resp
= (struct abts_resp_to_24xx
*)qla2x00_alloc_iocbs_ready(vha
, NULL
);
1071 ql_dbg(ql_dbg_tgt
, vha
, 0xe04a,
1072 "qla_target(%d): %s failed: unable to allocate "
1073 "request packet", vha
->vp_idx
, __func__
);
1077 resp
->entry_type
= ABTS_RESP_24XX
;
1078 resp
->entry_count
= 1;
1079 resp
->nport_handle
= abts
->nport_handle
;
1080 resp
->vp_index
= vha
->vp_idx
;
1081 resp
->sof_type
= abts
->sof_type
;
1082 resp
->exchange_address
= abts
->exchange_address
;
1083 resp
->fcp_hdr_le
= abts
->fcp_hdr_le
;
1084 f_ctl
= __constant_cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP
|
1085 F_CTL_LAST_SEQ
| F_CTL_END_SEQ
|
1086 F_CTL_SEQ_INITIATIVE
);
1087 p
= (uint8_t *)&f_ctl
;
1088 resp
->fcp_hdr_le
.f_ctl
[0] = *p
++;
1089 resp
->fcp_hdr_le
.f_ctl
[1] = *p
++;
1090 resp
->fcp_hdr_le
.f_ctl
[2] = *p
;
1092 resp
->fcp_hdr_le
.d_id
[0] = abts
->fcp_hdr_le
.d_id
[0];
1093 resp
->fcp_hdr_le
.d_id
[1] = abts
->fcp_hdr_le
.d_id
[1];
1094 resp
->fcp_hdr_le
.d_id
[2] = abts
->fcp_hdr_le
.d_id
[2];
1095 resp
->fcp_hdr_le
.s_id
[0] = abts
->fcp_hdr_le
.s_id
[0];
1096 resp
->fcp_hdr_le
.s_id
[1] = abts
->fcp_hdr_le
.s_id
[1];
1097 resp
->fcp_hdr_le
.s_id
[2] = abts
->fcp_hdr_le
.s_id
[2];
1099 resp
->fcp_hdr_le
.d_id
[0] = abts
->fcp_hdr_le
.s_id
[0];
1100 resp
->fcp_hdr_le
.d_id
[1] = abts
->fcp_hdr_le
.s_id
[1];
1101 resp
->fcp_hdr_le
.d_id
[2] = abts
->fcp_hdr_le
.s_id
[2];
1102 resp
->fcp_hdr_le
.s_id
[0] = abts
->fcp_hdr_le
.d_id
[0];
1103 resp
->fcp_hdr_le
.s_id
[1] = abts
->fcp_hdr_le
.d_id
[1];
1104 resp
->fcp_hdr_le
.s_id
[2] = abts
->fcp_hdr_le
.d_id
[2];
1106 resp
->exchange_addr_to_abort
= abts
->exchange_addr_to_abort
;
1107 if (status
== FCP_TMF_CMPL
) {
1108 resp
->fcp_hdr_le
.r_ctl
= R_CTL_BASIC_LINK_SERV
| R_CTL_B_ACC
;
1109 resp
->payload
.ba_acct
.seq_id_valid
= SEQ_ID_INVALID
;
1110 resp
->payload
.ba_acct
.low_seq_cnt
= 0x0000;
1111 resp
->payload
.ba_acct
.high_seq_cnt
= 0xFFFF;
1112 resp
->payload
.ba_acct
.ox_id
= abts
->fcp_hdr_le
.ox_id
;
1113 resp
->payload
.ba_acct
.rx_id
= abts
->fcp_hdr_le
.rx_id
;
1115 resp
->fcp_hdr_le
.r_ctl
= R_CTL_BASIC_LINK_SERV
| R_CTL_B_RJT
;
1116 resp
->payload
.ba_rjt
.reason_code
=
1117 BA_RJT_REASON_CODE_UNABLE_TO_PERFORM
;
1118 /* Other bytes are zero */
1121 vha
->vha_tgt
.qla_tgt
->abts_resp_expected
++;
1123 /* Memory Barrier */
1125 qla2x00_start_iocbs(vha
, vha
->req
);
1129 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1131 static void qlt_24xx_retry_term_exchange(struct scsi_qla_host
*vha
,
1132 struct abts_resp_from_24xx_fw
*entry
)
1134 struct ctio7_to_24xx
*ctio
;
1136 ql_dbg(ql_dbg_tgt
, vha
, 0xe007,
1137 "Sending retry TERM EXCH CTIO7 (ha=%p)\n", vha
->hw
);
1138 /* Send marker if required */
1139 if (qlt_issue_marker(vha
, 1) != QLA_SUCCESS
)
1142 ctio
= (struct ctio7_to_24xx
*)qla2x00_alloc_iocbs_ready(vha
, NULL
);
1144 ql_dbg(ql_dbg_tgt
, vha
, 0xe04b,
1145 "qla_target(%d): %s failed: unable to allocate "
1146 "request packet\n", vha
->vp_idx
, __func__
);
1151 * We've got on entrance firmware's response on by us generated
1152 * ABTS response. So, in it ID fields are reversed.
1155 ctio
->entry_type
= CTIO_TYPE7
;
1156 ctio
->entry_count
= 1;
1157 ctio
->nport_handle
= entry
->nport_handle
;
1158 ctio
->handle
= QLA_TGT_SKIP_HANDLE
| CTIO_COMPLETION_HANDLE_MARK
;
1159 ctio
->timeout
= __constant_cpu_to_le16(QLA_TGT_TIMEOUT
);
1160 ctio
->vp_index
= vha
->vp_idx
;
1161 ctio
->initiator_id
[0] = entry
->fcp_hdr_le
.d_id
[0];
1162 ctio
->initiator_id
[1] = entry
->fcp_hdr_le
.d_id
[1];
1163 ctio
->initiator_id
[2] = entry
->fcp_hdr_le
.d_id
[2];
1164 ctio
->exchange_addr
= entry
->exchange_addr_to_abort
;
1165 ctio
->u
.status1
.flags
=
1166 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1
|
1167 CTIO7_FLAGS_TERMINATE
);
1168 ctio
->u
.status1
.ox_id
= cpu_to_le16(entry
->fcp_hdr_le
.ox_id
);
1170 /* Memory Barrier */
1172 qla2x00_start_iocbs(vha
, vha
->req
);
1174 qlt_24xx_send_abts_resp(vha
, (struct abts_recv_from_24xx
*)entry
,
1175 FCP_TMF_CMPL
, true);
1178 /* ha->hardware_lock supposed to be held on entry */
1179 static int __qlt_24xx_handle_abts(struct scsi_qla_host
*vha
,
1180 struct abts_recv_from_24xx
*abts
, struct qla_tgt_sess
*sess
)
1182 struct qla_hw_data
*ha
= vha
->hw
;
1183 struct se_session
*se_sess
= sess
->se_sess
;
1184 struct qla_tgt_mgmt_cmd
*mcmd
;
1185 struct se_cmd
*se_cmd
;
1188 bool found_lun
= false;
1190 spin_lock(&se_sess
->sess_cmd_lock
);
1191 list_for_each_entry(se_cmd
, &se_sess
->sess_cmd_list
, se_cmd_list
) {
1192 struct qla_tgt_cmd
*cmd
=
1193 container_of(se_cmd
, struct qla_tgt_cmd
, se_cmd
);
1194 if (cmd
->tag
== abts
->exchange_addr_to_abort
) {
1195 lun
= cmd
->unpacked_lun
;
1200 spin_unlock(&se_sess
->sess_cmd_lock
);
1205 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf00f,
1206 "qla_target(%d): task abort (tag=%d)\n",
1207 vha
->vp_idx
, abts
->exchange_addr_to_abort
);
1209 mcmd
= mempool_alloc(qla_tgt_mgmt_cmd_mempool
, GFP_ATOMIC
);
1211 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf051,
1212 "qla_target(%d): %s: Allocation of ABORT cmd failed",
1213 vha
->vp_idx
, __func__
);
1216 memset(mcmd
, 0, sizeof(*mcmd
));
1219 memcpy(&mcmd
->orig_iocb
.abts
, abts
, sizeof(mcmd
->orig_iocb
.abts
));
1220 mcmd
->reset_count
= vha
->hw
->chip_reset
;
1222 rc
= ha
->tgt
.tgt_ops
->handle_tmr(mcmd
, lun
, TMR_ABORT_TASK
,
1223 abts
->exchange_addr_to_abort
);
1225 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf052,
1226 "qla_target(%d): tgt_ops->handle_tmr()"
1227 " failed: %d", vha
->vp_idx
, rc
);
1228 mempool_free(mcmd
, qla_tgt_mgmt_cmd_mempool
);
1236 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1238 static void qlt_24xx_handle_abts(struct scsi_qla_host
*vha
,
1239 struct abts_recv_from_24xx
*abts
)
1241 struct qla_hw_data
*ha
= vha
->hw
;
1242 struct qla_tgt_sess
*sess
;
1243 uint32_t tag
= abts
->exchange_addr_to_abort
;
1247 if (le32_to_cpu(abts
->fcp_hdr_le
.parameter
) & ABTS_PARAM_ABORT_SEQ
) {
1248 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf053,
1249 "qla_target(%d): ABTS: Abort Sequence not "
1250 "supported\n", vha
->vp_idx
);
1251 qlt_24xx_send_abts_resp(vha
, abts
, FCP_TMF_REJECTED
, false);
1255 if (tag
== ATIO_EXCHANGE_ADDRESS_UNKNOWN
) {
1256 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf010,
1257 "qla_target(%d): ABTS: Unknown Exchange "
1258 "Address received\n", vha
->vp_idx
);
1259 qlt_24xx_send_abts_resp(vha
, abts
, FCP_TMF_REJECTED
, false);
1263 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf011,
1264 "qla_target(%d): task abort (s_id=%x:%x:%x, "
1265 "tag=%d, param=%x)\n", vha
->vp_idx
, abts
->fcp_hdr_le
.s_id
[2],
1266 abts
->fcp_hdr_le
.s_id
[1], abts
->fcp_hdr_le
.s_id
[0], tag
,
1267 le32_to_cpu(abts
->fcp_hdr_le
.parameter
));
1269 s_id
[0] = abts
->fcp_hdr_le
.s_id
[2];
1270 s_id
[1] = abts
->fcp_hdr_le
.s_id
[1];
1271 s_id
[2] = abts
->fcp_hdr_le
.s_id
[0];
1273 sess
= ha
->tgt
.tgt_ops
->find_sess_by_s_id(vha
, s_id
);
1275 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf012,
1276 "qla_target(%d): task abort for non-existant session\n",
1278 rc
= qlt_sched_sess_work(vha
->vha_tgt
.qla_tgt
,
1279 QLA_TGT_SESS_WORK_ABORT
, abts
, sizeof(*abts
));
1281 qlt_24xx_send_abts_resp(vha
, abts
, FCP_TMF_REJECTED
,
1287 rc
= __qlt_24xx_handle_abts(vha
, abts
, sess
);
1289 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf054,
1290 "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n",
1292 qlt_24xx_send_abts_resp(vha
, abts
, FCP_TMF_REJECTED
, false);
1298 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1300 static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host
*ha
,
1301 struct qla_tgt_mgmt_cmd
*mcmd
, uint32_t resp_code
)
1303 struct atio_from_isp
*atio
= &mcmd
->orig_iocb
.atio
;
1304 struct ctio7_to_24xx
*ctio
;
1307 ql_dbg(ql_dbg_tgt
, ha
, 0xe008,
1308 "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n",
1309 ha
, atio
, resp_code
);
1311 /* Send marker if required */
1312 if (qlt_issue_marker(ha
, 1) != QLA_SUCCESS
)
1315 ctio
= (struct ctio7_to_24xx
*)qla2x00_alloc_iocbs(ha
, NULL
);
1317 ql_dbg(ql_dbg_tgt
, ha
, 0xe04c,
1318 "qla_target(%d): %s failed: unable to allocate "
1319 "request packet\n", ha
->vp_idx
, __func__
);
1323 ctio
->entry_type
= CTIO_TYPE7
;
1324 ctio
->entry_count
= 1;
1325 ctio
->handle
= QLA_TGT_SKIP_HANDLE
| CTIO_COMPLETION_HANDLE_MARK
;
1326 ctio
->nport_handle
= mcmd
->sess
->loop_id
;
1327 ctio
->timeout
= __constant_cpu_to_le16(QLA_TGT_TIMEOUT
);
1328 ctio
->vp_index
= ha
->vp_idx
;
1329 ctio
->initiator_id
[0] = atio
->u
.isp24
.fcp_hdr
.s_id
[2];
1330 ctio
->initiator_id
[1] = atio
->u
.isp24
.fcp_hdr
.s_id
[1];
1331 ctio
->initiator_id
[2] = atio
->u
.isp24
.fcp_hdr
.s_id
[0];
1332 ctio
->exchange_addr
= atio
->u
.isp24
.exchange_addr
;
1333 ctio
->u
.status1
.flags
= (atio
->u
.isp24
.attr
<< 9) |
1334 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1
|
1335 CTIO7_FLAGS_SEND_STATUS
);
1336 temp
= be16_to_cpu(atio
->u
.isp24
.fcp_hdr
.ox_id
);
1337 ctio
->u
.status1
.ox_id
= cpu_to_le16(temp
);
1338 ctio
->u
.status1
.scsi_status
=
1339 __constant_cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID
);
1340 ctio
->u
.status1
.response_len
= __constant_cpu_to_le16(8);
1341 ctio
->u
.status1
.sense_data
[0] = resp_code
;
1343 /* Memory Barrier */
1345 qla2x00_start_iocbs(ha
, ha
->req
);
1348 void qlt_free_mcmd(struct qla_tgt_mgmt_cmd
*mcmd
)
1350 mempool_free(mcmd
, qla_tgt_mgmt_cmd_mempool
);
1352 EXPORT_SYMBOL(qlt_free_mcmd
);
1354 /* callback from target fabric module code */
1355 void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd
*mcmd
)
1357 struct scsi_qla_host
*vha
= mcmd
->sess
->vha
;
1358 struct qla_hw_data
*ha
= vha
->hw
;
1359 unsigned long flags
;
1361 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf013,
1362 "TM response mcmd (%p) status %#x state %#x",
1363 mcmd
, mcmd
->fc_tm_rsp
, mcmd
->flags
);
1365 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
1367 if (qla2x00_reset_active(vha
) || mcmd
->reset_count
!= ha
->chip_reset
) {
1369 * Either a chip reset is active or this request was from
1370 * previous life, just abort the processing.
1372 ql_dbg(ql_dbg_async
, vha
, 0xe100,
1373 "RESET-TMR active/old-count/new-count = %d/%d/%d.\n",
1374 qla2x00_reset_active(vha
), mcmd
->reset_count
,
1376 ha
->tgt
.tgt_ops
->free_mcmd(mcmd
);
1377 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1381 if (mcmd
->flags
== QLA24XX_MGMT_SEND_NACK
)
1382 qlt_send_notify_ack(vha
, &mcmd
->orig_iocb
.imm_ntfy
,
1385 if (mcmd
->se_cmd
.se_tmr_req
->function
== TMR_ABORT_TASK
)
1386 qlt_24xx_send_abts_resp(vha
, &mcmd
->orig_iocb
.abts
,
1387 mcmd
->fc_tm_rsp
, false);
1389 qlt_24xx_send_task_mgmt_ctio(vha
, mcmd
,
1393 * Make the callback for ->free_mcmd() to queue_work() and invoke
1394 * target_put_sess_cmd() to drop cmd_kref to 1. The final
1395 * target_put_sess_cmd() call will be made from TFO->check_stop_free()
1396 * -> tcm_qla2xxx_check_stop_free() to release the TMR associated se_cmd
1397 * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() ->
1398 * qlt_xmit_tm_rsp() returns here..
1400 ha
->tgt
.tgt_ops
->free_mcmd(mcmd
);
1401 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1403 EXPORT_SYMBOL(qlt_xmit_tm_rsp
);
1406 static int qlt_pci_map_calc_cnt(struct qla_tgt_prm
*prm
)
1408 struct qla_tgt_cmd
*cmd
= prm
->cmd
;
1410 BUG_ON(cmd
->sg_cnt
== 0);
1412 prm
->sg
= (struct scatterlist
*)cmd
->sg
;
1413 prm
->seg_cnt
= pci_map_sg(prm
->tgt
->ha
->pdev
, cmd
->sg
,
1414 cmd
->sg_cnt
, cmd
->dma_data_direction
);
1415 if (unlikely(prm
->seg_cnt
== 0))
1418 prm
->cmd
->sg_mapped
= 1;
1420 if (cmd
->se_cmd
.prot_op
== TARGET_PROT_NORMAL
) {
1422 * If greater than four sg entries then we need to allocate
1423 * the continuation entries
1425 if (prm
->seg_cnt
> prm
->tgt
->datasegs_per_cmd
)
1426 prm
->req_cnt
+= DIV_ROUND_UP(prm
->seg_cnt
-
1427 prm
->tgt
->datasegs_per_cmd
,
1428 prm
->tgt
->datasegs_per_cont
);
1431 if ((cmd
->se_cmd
.prot_op
== TARGET_PROT_DIN_INSERT
) ||
1432 (cmd
->se_cmd
.prot_op
== TARGET_PROT_DOUT_STRIP
)) {
1433 prm
->seg_cnt
= DIV_ROUND_UP(cmd
->bufflen
, cmd
->blk_sz
);
1434 prm
->tot_dsds
= prm
->seg_cnt
;
1436 prm
->tot_dsds
= prm
->seg_cnt
;
1438 if (cmd
->prot_sg_cnt
) {
1439 prm
->prot_sg
= cmd
->prot_sg
;
1440 prm
->prot_seg_cnt
= pci_map_sg(prm
->tgt
->ha
->pdev
,
1441 cmd
->prot_sg
, cmd
->prot_sg_cnt
,
1442 cmd
->dma_data_direction
);
1443 if (unlikely(prm
->prot_seg_cnt
== 0))
1446 if ((cmd
->se_cmd
.prot_op
== TARGET_PROT_DIN_INSERT
) ||
1447 (cmd
->se_cmd
.prot_op
== TARGET_PROT_DOUT_STRIP
)) {
1448 /* Dif Bundling not support here */
1449 prm
->prot_seg_cnt
= DIV_ROUND_UP(cmd
->bufflen
,
1451 prm
->tot_dsds
+= prm
->prot_seg_cnt
;
1453 prm
->tot_dsds
+= prm
->prot_seg_cnt
;
1460 ql_dbg(ql_dbg_tgt
, prm
->cmd
->vha
, 0xe04d,
1461 "qla_target(%d): PCI mapping failed: sg_cnt=%d",
1462 0, prm
->cmd
->sg_cnt
);
1466 static void qlt_unmap_sg(struct scsi_qla_host
*vha
, struct qla_tgt_cmd
*cmd
)
1468 struct qla_hw_data
*ha
= vha
->hw
;
1470 if (!cmd
->sg_mapped
)
1473 pci_unmap_sg(ha
->pdev
, cmd
->sg
, cmd
->sg_cnt
, cmd
->dma_data_direction
);
1476 if (cmd
->prot_sg_cnt
)
1477 pci_unmap_sg(ha
->pdev
, cmd
->prot_sg
, cmd
->prot_sg_cnt
,
1478 cmd
->dma_data_direction
);
1480 if (cmd
->ctx_dsd_alloced
)
1481 qla2x00_clean_dsd_pool(ha
, NULL
, cmd
);
1484 dma_pool_free(ha
->dl_dma_pool
, cmd
->ctx
, cmd
->ctx
->crc_ctx_dma
);
1487 static int qlt_check_reserve_free_req(struct scsi_qla_host
*vha
,
1490 uint32_t cnt
, cnt_in
;
1492 if (vha
->req
->cnt
< (req_cnt
+ 2)) {
1493 cnt
= (uint16_t)RD_REG_DWORD(vha
->req
->req_q_out
);
1494 cnt_in
= (uint16_t)RD_REG_DWORD(vha
->req
->req_q_in
);
1496 if (vha
->req
->ring_index
< cnt
)
1497 vha
->req
->cnt
= cnt
- vha
->req
->ring_index
;
1499 vha
->req
->cnt
= vha
->req
->length
-
1500 (vha
->req
->ring_index
- cnt
);
1503 if (unlikely(vha
->req
->cnt
< (req_cnt
+ 2))) {
1504 ql_dbg(ql_dbg_io
, vha
, 0x305a,
1505 "qla_target(%d): There is no room in the request ring: vha->req->ring_index=%d, vha->req->cnt=%d, req_cnt=%d Req-out=%d Req-in=%d Req-Length=%d\n",
1506 vha
->vp_idx
, vha
->req
->ring_index
,
1507 vha
->req
->cnt
, req_cnt
, cnt
, cnt_in
, vha
->req
->length
);
1510 vha
->req
->cnt
-= req_cnt
;
1516 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1518 static inline void *qlt_get_req_pkt(struct scsi_qla_host
*vha
)
1520 /* Adjust ring index. */
1521 vha
->req
->ring_index
++;
1522 if (vha
->req
->ring_index
== vha
->req
->length
) {
1523 vha
->req
->ring_index
= 0;
1524 vha
->req
->ring_ptr
= vha
->req
->ring
;
1526 vha
->req
->ring_ptr
++;
1528 return (cont_entry_t
*)vha
->req
->ring_ptr
;
1531 /* ha->hardware_lock supposed to be held on entry */
1532 static inline uint32_t qlt_make_handle(struct scsi_qla_host
*vha
)
1534 struct qla_hw_data
*ha
= vha
->hw
;
1537 h
= ha
->tgt
.current_handle
;
1538 /* always increment cmd handle */
1541 if (h
> DEFAULT_OUTSTANDING_COMMANDS
)
1542 h
= 1; /* 0 is QLA_TGT_NULL_HANDLE */
1543 if (h
== ha
->tgt
.current_handle
) {
1544 ql_dbg(ql_dbg_io
, vha
, 0x305b,
1545 "qla_target(%d): Ran out of "
1546 "empty cmd slots in ha %p\n", vha
->vp_idx
, ha
);
1547 h
= QLA_TGT_NULL_HANDLE
;
1550 } while ((h
== QLA_TGT_NULL_HANDLE
) ||
1551 (h
== QLA_TGT_SKIP_HANDLE
) ||
1552 (ha
->tgt
.cmds
[h
-1] != NULL
));
1554 if (h
!= QLA_TGT_NULL_HANDLE
)
1555 ha
->tgt
.current_handle
= h
;
1560 /* ha->hardware_lock supposed to be held on entry */
1561 static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm
*prm
,
1562 struct scsi_qla_host
*vha
)
1565 struct ctio7_to_24xx
*pkt
;
1566 struct qla_hw_data
*ha
= vha
->hw
;
1567 struct atio_from_isp
*atio
= &prm
->cmd
->atio
;
1570 pkt
= (struct ctio7_to_24xx
*)vha
->req
->ring_ptr
;
1572 memset(pkt
, 0, sizeof(*pkt
));
1574 pkt
->entry_type
= CTIO_TYPE7
;
1575 pkt
->entry_count
= (uint8_t)prm
->req_cnt
;
1576 pkt
->vp_index
= vha
->vp_idx
;
1578 h
= qlt_make_handle(vha
);
1579 if (unlikely(h
== QLA_TGT_NULL_HANDLE
)) {
1581 * CTIO type 7 from the firmware doesn't provide a way to
1582 * know the initiator's LOOP ID, hence we can't find
1583 * the session and, so, the command.
1587 ha
->tgt
.cmds
[h
-1] = prm
->cmd
;
1589 pkt
->handle
= h
| CTIO_COMPLETION_HANDLE_MARK
;
1590 pkt
->nport_handle
= prm
->cmd
->loop_id
;
1591 pkt
->timeout
= __constant_cpu_to_le16(QLA_TGT_TIMEOUT
);
1592 pkt
->initiator_id
[0] = atio
->u
.isp24
.fcp_hdr
.s_id
[2];
1593 pkt
->initiator_id
[1] = atio
->u
.isp24
.fcp_hdr
.s_id
[1];
1594 pkt
->initiator_id
[2] = atio
->u
.isp24
.fcp_hdr
.s_id
[0];
1595 pkt
->exchange_addr
= atio
->u
.isp24
.exchange_addr
;
1596 pkt
->u
.status0
.flags
|= (atio
->u
.isp24
.attr
<< 9);
1597 temp
= be16_to_cpu(atio
->u
.isp24
.fcp_hdr
.ox_id
);
1598 pkt
->u
.status0
.ox_id
= cpu_to_le16(temp
);
1599 pkt
->u
.status0
.relative_offset
= cpu_to_le32(prm
->cmd
->offset
);
1605 * ha->hardware_lock supposed to be held on entry. We have already made sure
1606 * that there is sufficient amount of request entries to not drop it.
1608 static void qlt_load_cont_data_segments(struct qla_tgt_prm
*prm
,
1609 struct scsi_qla_host
*vha
)
1612 uint32_t *dword_ptr
;
1613 int enable_64bit_addressing
= prm
->tgt
->tgt_enable_64bit_addr
;
1615 /* Build continuation packets */
1616 while (prm
->seg_cnt
> 0) {
1617 cont_a64_entry_t
*cont_pkt64
=
1618 (cont_a64_entry_t
*)qlt_get_req_pkt(vha
);
1621 * Make sure that from cont_pkt64 none of
1622 * 64-bit specific fields used for 32-bit
1623 * addressing. Cast to (cont_entry_t *) for
1627 memset(cont_pkt64
, 0, sizeof(*cont_pkt64
));
1629 cont_pkt64
->entry_count
= 1;
1630 cont_pkt64
->sys_define
= 0;
1632 if (enable_64bit_addressing
) {
1633 cont_pkt64
->entry_type
= CONTINUE_A64_TYPE
;
1635 (uint32_t *)&cont_pkt64
->dseg_0_address
;
1637 cont_pkt64
->entry_type
= CONTINUE_TYPE
;
1639 (uint32_t *)&((cont_entry_t
*)
1640 cont_pkt64
)->dseg_0_address
;
1643 /* Load continuation entry data segments */
1645 cnt
< prm
->tgt
->datasegs_per_cont
&& prm
->seg_cnt
;
1646 cnt
++, prm
->seg_cnt
--) {
1648 cpu_to_le32(pci_dma_lo32
1649 (sg_dma_address(prm
->sg
)));
1650 if (enable_64bit_addressing
) {
1652 cpu_to_le32(pci_dma_hi32
1656 *dword_ptr
++ = cpu_to_le32(sg_dma_len(prm
->sg
));
1658 prm
->sg
= sg_next(prm
->sg
);
1664 * ha->hardware_lock supposed to be held on entry. We have already made sure
1665 * that there is sufficient amount of request entries to not drop it.
1667 static void qlt_load_data_segments(struct qla_tgt_prm
*prm
,
1668 struct scsi_qla_host
*vha
)
1671 uint32_t *dword_ptr
;
1672 int enable_64bit_addressing
= prm
->tgt
->tgt_enable_64bit_addr
;
1673 struct ctio7_to_24xx
*pkt24
= (struct ctio7_to_24xx
*)prm
->pkt
;
1675 pkt24
->u
.status0
.transfer_length
= cpu_to_le32(prm
->cmd
->bufflen
);
1677 /* Setup packet address segment pointer */
1678 dword_ptr
= pkt24
->u
.status0
.dseg_0_address
;
1680 /* Set total data segment count */
1682 pkt24
->dseg_count
= cpu_to_le16(prm
->seg_cnt
);
1684 if (prm
->seg_cnt
== 0) {
1685 /* No data transfer */
1691 /* If scatter gather */
1693 /* Load command entry data segments */
1695 (cnt
< prm
->tgt
->datasegs_per_cmd
) && prm
->seg_cnt
;
1696 cnt
++, prm
->seg_cnt
--) {
1698 cpu_to_le32(pci_dma_lo32(sg_dma_address(prm
->sg
)));
1699 if (enable_64bit_addressing
) {
1701 cpu_to_le32(pci_dma_hi32(
1702 sg_dma_address(prm
->sg
)));
1704 *dword_ptr
++ = cpu_to_le32(sg_dma_len(prm
->sg
));
1706 prm
->sg
= sg_next(prm
->sg
);
1709 qlt_load_cont_data_segments(prm
, vha
);
1712 static inline int qlt_has_data(struct qla_tgt_cmd
*cmd
)
1714 return cmd
->bufflen
> 0;
1718 * Called without ha->hardware_lock held
1720 static int qlt_pre_xmit_response(struct qla_tgt_cmd
*cmd
,
1721 struct qla_tgt_prm
*prm
, int xmit_type
, uint8_t scsi_status
,
1722 uint32_t *full_req_cnt
)
1724 struct qla_tgt
*tgt
= cmd
->tgt
;
1725 struct scsi_qla_host
*vha
= tgt
->vha
;
1726 struct qla_hw_data
*ha
= vha
->hw
;
1727 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
1729 if (unlikely(cmd
->aborted
)) {
1730 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf014,
1731 "qla_target(%d): terminating exchange "
1732 "for aborted cmd=%p (se_cmd=%p, tag=%d)", vha
->vp_idx
, cmd
,
1735 cmd
->state
= QLA_TGT_STATE_ABORTED
;
1736 cmd
->cmd_flags
|= BIT_6
;
1738 qlt_send_term_exchange(vha
, cmd
, &cmd
->atio
, 0);
1740 /* !! At this point cmd could be already freed !! */
1741 return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED
;
1746 prm
->rq_result
= scsi_status
;
1747 prm
->sense_buffer
= &cmd
->sense_buffer
[0];
1748 prm
->sense_buffer_len
= TRANSPORT_SENSE_BUFFER
;
1752 prm
->add_status_pkt
= 0;
1754 /* Send marker if required */
1755 if (qlt_issue_marker(vha
, 0) != QLA_SUCCESS
)
1758 if ((xmit_type
& QLA_TGT_XMIT_DATA
) && qlt_has_data(cmd
)) {
1759 if (qlt_pci_map_calc_cnt(prm
) != 0)
1763 *full_req_cnt
= prm
->req_cnt
;
1765 if (se_cmd
->se_cmd_flags
& SCF_UNDERFLOW_BIT
) {
1766 prm
->residual
= se_cmd
->residual_count
;
1767 ql_dbg(ql_dbg_io
+ ql_dbg_verbose
, vha
, 0x305c,
1768 "Residual underflow: %d (tag %d, "
1769 "op %x, bufflen %d, rq_result %x)\n", prm
->residual
,
1770 cmd
->tag
, se_cmd
->t_task_cdb
? se_cmd
->t_task_cdb
[0] : 0,
1771 cmd
->bufflen
, prm
->rq_result
);
1772 prm
->rq_result
|= SS_RESIDUAL_UNDER
;
1773 } else if (se_cmd
->se_cmd_flags
& SCF_OVERFLOW_BIT
) {
1774 prm
->residual
= se_cmd
->residual_count
;
1775 ql_dbg(ql_dbg_io
, vha
, 0x305d,
1776 "Residual overflow: %d (tag %d, "
1777 "op %x, bufflen %d, rq_result %x)\n", prm
->residual
,
1778 cmd
->tag
, se_cmd
->t_task_cdb
? se_cmd
->t_task_cdb
[0] : 0,
1779 cmd
->bufflen
, prm
->rq_result
);
1780 prm
->rq_result
|= SS_RESIDUAL_OVER
;
1783 if (xmit_type
& QLA_TGT_XMIT_STATUS
) {
1785 * If QLA_TGT_XMIT_DATA is not set, add_status_pkt will be
1786 * ignored in *xmit_response() below
1788 if (qlt_has_data(cmd
)) {
1789 if (QLA_TGT_SENSE_VALID(prm
->sense_buffer
) ||
1790 (IS_FWI2_CAPABLE(ha
) &&
1791 (prm
->rq_result
!= 0))) {
1792 prm
->add_status_pkt
= 1;
1801 static inline int qlt_need_explicit_conf(struct qla_hw_data
*ha
,
1802 struct qla_tgt_cmd
*cmd
, int sending_sense
)
1804 if (ha
->tgt
.enable_class_2
)
1808 return cmd
->conf_compl_supported
;
1810 return ha
->tgt
.enable_explicit_conf
&&
1811 cmd
->conf_compl_supported
;
1814 #ifdef CONFIG_QLA_TGT_DEBUG_SRR
1816 * Original taken from the XFS code
1818 static unsigned long qlt_srr_random(void)
1821 static unsigned long RandomValue
;
1822 static DEFINE_SPINLOCK(lock
);
1823 /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */
1827 unsigned long flags
;
1829 spin_lock_irqsave(&lock
, flags
);
1831 RandomValue
= jiffies
;
1837 rv
= 16807 * lo
- 2836 * hi
;
1841 spin_unlock_irqrestore(&lock
, flags
);
1845 static void qlt_check_srr_debug(struct qla_tgt_cmd
*cmd
, int *xmit_type
)
1847 #if 0 /* This is not a real status packets lost, so it won't lead to SRR */
1848 if ((*xmit_type
& QLA_TGT_XMIT_STATUS
) && (qlt_srr_random() % 200)
1850 *xmit_type
&= ~QLA_TGT_XMIT_STATUS
;
1851 ql_dbg(ql_dbg_tgt_mgt
, cmd
->vha
, 0xf015,
1852 "Dropping cmd %p (tag %d) status", cmd
, cmd
->tag
);
1856 * It's currently not possible to simulate SRRs for FCP_WRITE without
1857 * a physical link layer failure, so don't even try here..
1859 if (cmd
->dma_data_direction
!= DMA_FROM_DEVICE
)
1862 if (qlt_has_data(cmd
) && (cmd
->sg_cnt
> 1) &&
1863 ((qlt_srr_random() % 100) == 20)) {
1865 unsigned int tot_len
= 0;
1868 leave
= qlt_srr_random() % cmd
->sg_cnt
;
1870 for (i
= 0; i
< leave
; i
++)
1871 tot_len
+= cmd
->sg
[i
].length
;
1873 ql_dbg(ql_dbg_tgt_mgt
, cmd
->vha
, 0xf016,
1874 "Cutting cmd %p (tag %d) buffer"
1875 " tail to len %d, sg_cnt %d (cmd->bufflen %d,"
1876 " cmd->sg_cnt %d)", cmd
, cmd
->tag
, tot_len
, leave
,
1877 cmd
->bufflen
, cmd
->sg_cnt
);
1879 cmd
->bufflen
= tot_len
;
1880 cmd
->sg_cnt
= leave
;
1883 if (qlt_has_data(cmd
) && ((qlt_srr_random() % 100) == 70)) {
1884 unsigned int offset
= qlt_srr_random() % cmd
->bufflen
;
1886 ql_dbg(ql_dbg_tgt_mgt
, cmd
->vha
, 0xf017,
1887 "Cutting cmd %p (tag %d) buffer head "
1888 "to offset %d (cmd->bufflen %d)", cmd
, cmd
->tag
, offset
,
1891 *xmit_type
&= ~QLA_TGT_XMIT_DATA
;
1892 else if (qlt_set_data_offset(cmd
, offset
)) {
1893 ql_dbg(ql_dbg_tgt_mgt
, cmd
->vha
, 0xf018,
1894 "qlt_set_data_offset() failed (tag %d)", cmd
->tag
);
1899 static inline void qlt_check_srr_debug(struct qla_tgt_cmd
*cmd
, int *xmit_type
)
1903 static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx
*ctio
,
1904 struct qla_tgt_prm
*prm
)
1906 prm
->sense_buffer_len
= min_t(uint32_t, prm
->sense_buffer_len
,
1907 (uint32_t)sizeof(ctio
->u
.status1
.sense_data
));
1908 ctio
->u
.status0
.flags
|=
1909 __constant_cpu_to_le16(CTIO7_FLAGS_SEND_STATUS
);
1910 if (qlt_need_explicit_conf(prm
->tgt
->ha
, prm
->cmd
, 0)) {
1911 ctio
->u
.status0
.flags
|= __constant_cpu_to_le16(
1912 CTIO7_FLAGS_EXPLICIT_CONFORM
|
1913 CTIO7_FLAGS_CONFORM_REQ
);
1915 ctio
->u
.status0
.residual
= cpu_to_le32(prm
->residual
);
1916 ctio
->u
.status0
.scsi_status
= cpu_to_le16(prm
->rq_result
);
1917 if (QLA_TGT_SENSE_VALID(prm
->sense_buffer
)) {
1920 if (qlt_need_explicit_conf(prm
->tgt
->ha
, prm
->cmd
, 1)) {
1921 if (prm
->cmd
->se_cmd
.scsi_status
!= 0) {
1922 ql_dbg(ql_dbg_tgt
, prm
->cmd
->vha
, 0xe017,
1923 "Skipping EXPLICIT_CONFORM and "
1924 "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ "
1925 "non GOOD status\n");
1926 goto skip_explict_conf
;
1928 ctio
->u
.status1
.flags
|= __constant_cpu_to_le16(
1929 CTIO7_FLAGS_EXPLICIT_CONFORM
|
1930 CTIO7_FLAGS_CONFORM_REQ
);
1933 ctio
->u
.status1
.flags
&=
1934 ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0
);
1935 ctio
->u
.status1
.flags
|=
1936 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1
);
1937 ctio
->u
.status1
.scsi_status
|=
1938 __constant_cpu_to_le16(SS_SENSE_LEN_VALID
);
1939 ctio
->u
.status1
.sense_length
=
1940 cpu_to_le16(prm
->sense_buffer_len
);
1941 for (i
= 0; i
< prm
->sense_buffer_len
/4; i
++)
1942 ((uint32_t *)ctio
->u
.status1
.sense_data
)[i
] =
1943 cpu_to_be32(((uint32_t *)prm
->sense_buffer
)[i
]);
1945 if (unlikely((prm
->sense_buffer_len
% 4) != 0)) {
1948 ql_dbg(ql_dbg_tgt
, vha
, 0xe04f,
1949 "qla_target(%d): %d bytes of sense "
1950 "lost", prm
->tgt
->ha
->vp_idx
,
1951 prm
->sense_buffer_len
% 4);
1957 ctio
->u
.status1
.flags
&=
1958 ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0
);
1959 ctio
->u
.status1
.flags
|=
1960 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1
);
1961 ctio
->u
.status1
.sense_length
= 0;
1962 memset(ctio
->u
.status1
.sense_data
, 0,
1963 sizeof(ctio
->u
.status1
.sense_data
));
1966 /* Sense with len > 24, is it possible ??? */
1973 qlt_hba_err_chk_enabled(struct se_cmd
*se_cmd
)
1976 * Uncomment when corresponding SCSI changes are done.
1978 if (!sp->cmd->prot_chk)
1982 switch (se_cmd
->prot_op
) {
1983 case TARGET_PROT_DOUT_INSERT
:
1984 case TARGET_PROT_DIN_STRIP
:
1985 if (ql2xenablehba_err_chk
>= 1)
1988 case TARGET_PROT_DOUT_PASS
:
1989 case TARGET_PROT_DIN_PASS
:
1990 if (ql2xenablehba_err_chk
>= 2)
1993 case TARGET_PROT_DIN_INSERT
:
1994 case TARGET_PROT_DOUT_STRIP
:
2003 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
2007 qlt_set_t10dif_tags(struct se_cmd
*se_cmd
, struct crc_context
*ctx
)
2009 uint32_t lba
= 0xffffffff & se_cmd
->t_task_lba
;
2011 /* wait til Mode Sense/Select cmd, modepage Ah, subpage 2
2012 * have been immplemented by TCM, before AppTag is avail.
2013 * Look for modesense_handlers[]
2016 ctx
->app_tag_mask
[0] = 0x0;
2017 ctx
->app_tag_mask
[1] = 0x0;
2019 switch (se_cmd
->prot_type
) {
2020 case TARGET_DIF_TYPE0_PROT
:
2022 * No check for ql2xenablehba_err_chk, as it would be an
2023 * I/O error if hba tag generation is not done.
2025 ctx
->ref_tag
= cpu_to_le32(lba
);
2027 if (!qlt_hba_err_chk_enabled(se_cmd
))
2030 /* enable ALL bytes of the ref tag */
2031 ctx
->ref_tag_mask
[0] = 0xff;
2032 ctx
->ref_tag_mask
[1] = 0xff;
2033 ctx
->ref_tag_mask
[2] = 0xff;
2034 ctx
->ref_tag_mask
[3] = 0xff;
2037 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
2040 case TARGET_DIF_TYPE1_PROT
:
2041 ctx
->ref_tag
= cpu_to_le32(lba
);
2043 if (!qlt_hba_err_chk_enabled(se_cmd
))
2046 /* enable ALL bytes of the ref tag */
2047 ctx
->ref_tag_mask
[0] = 0xff;
2048 ctx
->ref_tag_mask
[1] = 0xff;
2049 ctx
->ref_tag_mask
[2] = 0xff;
2050 ctx
->ref_tag_mask
[3] = 0xff;
2053 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
2054 * match LBA in CDB + N
2056 case TARGET_DIF_TYPE2_PROT
:
2057 ctx
->ref_tag
= cpu_to_le32(lba
);
2059 if (!qlt_hba_err_chk_enabled(se_cmd
))
2062 /* enable ALL bytes of the ref tag */
2063 ctx
->ref_tag_mask
[0] = 0xff;
2064 ctx
->ref_tag_mask
[1] = 0xff;
2065 ctx
->ref_tag_mask
[2] = 0xff;
2066 ctx
->ref_tag_mask
[3] = 0xff;
2069 /* For Type 3 protection: 16 bit GUARD only */
2070 case TARGET_DIF_TYPE3_PROT
:
2071 ctx
->ref_tag_mask
[0] = ctx
->ref_tag_mask
[1] =
2072 ctx
->ref_tag_mask
[2] = ctx
->ref_tag_mask
[3] = 0x00;
2079 qlt_build_ctio_crc2_pkt(struct qla_tgt_prm
*prm
, scsi_qla_host_t
*vha
)
2083 uint32_t transfer_length
= 0;
2084 uint32_t data_bytes
;
2086 uint8_t bundling
= 1;
2088 struct crc_context
*crc_ctx_pkt
= NULL
;
2089 struct qla_hw_data
*ha
;
2090 struct ctio_crc2_to_fw
*pkt
;
2091 dma_addr_t crc_ctx_dma
;
2092 uint16_t fw_prot_opts
= 0;
2093 struct qla_tgt_cmd
*cmd
= prm
->cmd
;
2094 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
2096 struct atio_from_isp
*atio
= &prm
->cmd
->atio
;
2102 pkt
= (struct ctio_crc2_to_fw
*)vha
->req
->ring_ptr
;
2104 memset(pkt
, 0, sizeof(*pkt
));
2106 ql_dbg(ql_dbg_tgt
, vha
, 0xe071,
2107 "qla_target(%d):%s: se_cmd[%p] CRC2 prot_op[0x%x] cmd prot sg:cnt[%p:%x] lba[%llu]\n",
2108 vha
->vp_idx
, __func__
, se_cmd
, se_cmd
->prot_op
,
2109 prm
->prot_sg
, prm
->prot_seg_cnt
, se_cmd
->t_task_lba
);
2111 if ((se_cmd
->prot_op
== TARGET_PROT_DIN_INSERT
) ||
2112 (se_cmd
->prot_op
== TARGET_PROT_DOUT_STRIP
))
2115 /* Compute dif len and adjust data len to incude protection */
2116 data_bytes
= cmd
->bufflen
;
2117 dif_bytes
= (data_bytes
/ cmd
->blk_sz
) * 8;
2119 switch (se_cmd
->prot_op
) {
2120 case TARGET_PROT_DIN_INSERT
:
2121 case TARGET_PROT_DOUT_STRIP
:
2122 transfer_length
= data_bytes
;
2123 data_bytes
+= dif_bytes
;
2126 case TARGET_PROT_DIN_STRIP
:
2127 case TARGET_PROT_DOUT_INSERT
:
2128 case TARGET_PROT_DIN_PASS
:
2129 case TARGET_PROT_DOUT_PASS
:
2130 transfer_length
= data_bytes
+ dif_bytes
;
2138 if (!qlt_hba_err_chk_enabled(se_cmd
))
2139 fw_prot_opts
|= 0x10; /* Disable Guard tag checking */
2140 /* HBA error checking enabled */
2141 else if (IS_PI_UNINIT_CAPABLE(ha
)) {
2142 if ((se_cmd
->prot_type
== TARGET_DIF_TYPE1_PROT
) ||
2143 (se_cmd
->prot_type
== TARGET_DIF_TYPE2_PROT
))
2144 fw_prot_opts
|= PO_DIS_VALD_APP_ESC
;
2145 else if (se_cmd
->prot_type
== TARGET_DIF_TYPE3_PROT
)
2146 fw_prot_opts
|= PO_DIS_VALD_APP_REF_ESC
;
2149 switch (se_cmd
->prot_op
) {
2150 case TARGET_PROT_DIN_INSERT
:
2151 case TARGET_PROT_DOUT_INSERT
:
2152 fw_prot_opts
|= PO_MODE_DIF_INSERT
;
2154 case TARGET_PROT_DIN_STRIP
:
2155 case TARGET_PROT_DOUT_STRIP
:
2156 fw_prot_opts
|= PO_MODE_DIF_REMOVE
;
2158 case TARGET_PROT_DIN_PASS
:
2159 case TARGET_PROT_DOUT_PASS
:
2160 fw_prot_opts
|= PO_MODE_DIF_PASS
;
2161 /* FUTURE: does tcm require T10CRC<->IPCKSUM conversion? */
2163 default:/* Normal Request */
2164 fw_prot_opts
|= PO_MODE_DIF_PASS
;
2170 /* Update entry type to indicate Command Type CRC_2 IOCB */
2171 pkt
->entry_type
= CTIO_CRC2
;
2172 pkt
->entry_count
= 1;
2173 pkt
->vp_index
= vha
->vp_idx
;
2175 h
= qlt_make_handle(vha
);
2176 if (unlikely(h
== QLA_TGT_NULL_HANDLE
)) {
2178 * CTIO type 7 from the firmware doesn't provide a way to
2179 * know the initiator's LOOP ID, hence we can't find
2180 * the session and, so, the command.
2184 ha
->tgt
.cmds
[h
-1] = prm
->cmd
;
2187 pkt
->handle
= h
| CTIO_COMPLETION_HANDLE_MARK
;
2188 pkt
->nport_handle
= prm
->cmd
->loop_id
;
2189 pkt
->timeout
= __constant_cpu_to_le16(QLA_TGT_TIMEOUT
);
2190 pkt
->initiator_id
[0] = atio
->u
.isp24
.fcp_hdr
.s_id
[2];
2191 pkt
->initiator_id
[1] = atio
->u
.isp24
.fcp_hdr
.s_id
[1];
2192 pkt
->initiator_id
[2] = atio
->u
.isp24
.fcp_hdr
.s_id
[0];
2193 pkt
->exchange_addr
= atio
->u
.isp24
.exchange_addr
;
2195 /* silence compile warning */
2196 t16
= be16_to_cpu(atio
->u
.isp24
.fcp_hdr
.ox_id
);
2197 pkt
->ox_id
= cpu_to_le16(t16
);
2199 t16
= (atio
->u
.isp24
.attr
<< 9);
2200 pkt
->flags
|= cpu_to_le16(t16
);
2201 pkt
->relative_offset
= cpu_to_le32(prm
->cmd
->offset
);
2203 /* Set transfer direction */
2204 if (cmd
->dma_data_direction
== DMA_TO_DEVICE
)
2205 pkt
->flags
= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN
);
2206 else if (cmd
->dma_data_direction
== DMA_FROM_DEVICE
)
2207 pkt
->flags
= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT
);
2210 pkt
->dseg_count
= prm
->tot_dsds
;
2211 /* Fibre channel byte count */
2212 pkt
->transfer_length
= cpu_to_le32(transfer_length
);
2215 /* ----- CRC context -------- */
2217 /* Allocate CRC context from global pool */
2218 crc_ctx_pkt
= cmd
->ctx
=
2219 dma_pool_alloc(ha
->dl_dma_pool
, GFP_ATOMIC
, &crc_ctx_dma
);
2222 goto crc_queuing_error
;
2224 /* Zero out CTX area. */
2225 clr_ptr
= (uint8_t *)crc_ctx_pkt
;
2226 memset(clr_ptr
, 0, sizeof(*crc_ctx_pkt
));
2228 crc_ctx_pkt
->crc_ctx_dma
= crc_ctx_dma
;
2229 INIT_LIST_HEAD(&crc_ctx_pkt
->dsd_list
);
2232 crc_ctx_pkt
->handle
= pkt
->handle
;
2234 qlt_set_t10dif_tags(se_cmd
, crc_ctx_pkt
);
2236 pkt
->crc_context_address
[0] = cpu_to_le32(LSD(crc_ctx_dma
));
2237 pkt
->crc_context_address
[1] = cpu_to_le32(MSD(crc_ctx_dma
));
2238 pkt
->crc_context_len
= CRC_CONTEXT_LEN_FW
;
2242 cur_dsd
= (uint32_t *) &crc_ctx_pkt
->u
.nobundling
.data_address
;
2245 * Configure Bundling if we need to fetch interlaving
2246 * protection PCI accesses
2248 fw_prot_opts
|= PO_ENABLE_DIF_BUNDLING
;
2249 crc_ctx_pkt
->u
.bundling
.dif_byte_count
= cpu_to_le32(dif_bytes
);
2250 crc_ctx_pkt
->u
.bundling
.dseg_count
=
2251 cpu_to_le16(prm
->tot_dsds
- prm
->prot_seg_cnt
);
2252 cur_dsd
= (uint32_t *) &crc_ctx_pkt
->u
.bundling
.data_address
;
2255 /* Finish the common fields of CRC pkt */
2256 crc_ctx_pkt
->blk_size
= cpu_to_le16(cmd
->blk_sz
);
2257 crc_ctx_pkt
->prot_opts
= cpu_to_le16(fw_prot_opts
);
2258 crc_ctx_pkt
->byte_count
= cpu_to_le32(data_bytes
);
2259 crc_ctx_pkt
->guard_seed
= __constant_cpu_to_le16(0);
2262 /* Walks data segments */
2263 pkt
->flags
|= __constant_cpu_to_le16(CTIO7_FLAGS_DSD_PTR
);
2265 if (!bundling
&& prm
->prot_seg_cnt
) {
2266 if (qla24xx_walk_and_build_sglist_no_difb(ha
, NULL
, cur_dsd
,
2267 prm
->tot_dsds
, cmd
))
2268 goto crc_queuing_error
;
2269 } else if (qla24xx_walk_and_build_sglist(ha
, NULL
, cur_dsd
,
2270 (prm
->tot_dsds
- prm
->prot_seg_cnt
), cmd
))
2271 goto crc_queuing_error
;
2273 if (bundling
&& prm
->prot_seg_cnt
) {
2274 /* Walks dif segments */
2275 pkt
->add_flags
|= CTIO_CRC2_AF_DIF_DSD_ENA
;
2277 cur_dsd
= (uint32_t *) &crc_ctx_pkt
->u
.bundling
.dif_address
;
2278 if (qla24xx_walk_and_build_prot_sglist(ha
, NULL
, cur_dsd
,
2279 prm
->prot_seg_cnt
, cmd
))
2280 goto crc_queuing_error
;
2285 /* Cleanup will be performed by the caller */
2287 return QLA_FUNCTION_FAILED
;
2292 * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and *
2293 * QLA_TGT_XMIT_STATUS for >= 24xx silicon
2295 int qlt_xmit_response(struct qla_tgt_cmd
*cmd
, int xmit_type
,
2296 uint8_t scsi_status
)
2298 struct scsi_qla_host
*vha
= cmd
->vha
;
2299 struct qla_hw_data
*ha
= vha
->hw
;
2300 struct ctio7_to_24xx
*pkt
;
2301 struct qla_tgt_prm prm
;
2302 uint32_t full_req_cnt
= 0;
2303 unsigned long flags
= 0;
2306 memset(&prm
, 0, sizeof(prm
));
2307 qlt_check_srr_debug(cmd
, &xmit_type
);
2309 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe018,
2310 "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p]\n",
2311 (xmit_type
& QLA_TGT_XMIT_STATUS
) ?
2312 1 : 0, cmd
->bufflen
, cmd
->sg_cnt
, cmd
->dma_data_direction
,
2315 res
= qlt_pre_xmit_response(cmd
, &prm
, xmit_type
, scsi_status
,
2317 if (unlikely(res
!= 0)) {
2318 if (res
== QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED
)
2324 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
2326 if (qla2x00_reset_active(vha
) || cmd
->reset_count
!= ha
->chip_reset
) {
2328 * Either a chip reset is active or this request was from
2329 * previous life, just abort the processing.
2331 cmd
->state
= QLA_TGT_STATE_PROCESSED
;
2332 qlt_abort_cmd_on_host_reset(cmd
->vha
, cmd
);
2333 ql_dbg(ql_dbg_async
, vha
, 0xe101,
2334 "RESET-RSP active/old-count/new-count = %d/%d/%d.\n",
2335 qla2x00_reset_active(vha
), cmd
->reset_count
,
2337 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2341 /* Does F/W have an IOCBs for this request */
2342 res
= qlt_check_reserve_free_req(vha
, full_req_cnt
);
2344 goto out_unmap_unlock
;
2346 if (cmd
->se_cmd
.prot_op
&& (xmit_type
& QLA_TGT_XMIT_DATA
))
2347 res
= qlt_build_ctio_crc2_pkt(&prm
, vha
);
2349 res
= qlt_24xx_build_ctio_pkt(&prm
, vha
);
2350 if (unlikely(res
!= 0))
2351 goto out_unmap_unlock
;
2354 pkt
= (struct ctio7_to_24xx
*)prm
.pkt
;
2356 if (qlt_has_data(cmd
) && (xmit_type
& QLA_TGT_XMIT_DATA
)) {
2357 pkt
->u
.status0
.flags
|=
2358 __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN
|
2359 CTIO7_FLAGS_STATUS_MODE_0
);
2361 if (cmd
->se_cmd
.prot_op
== TARGET_PROT_NORMAL
)
2362 qlt_load_data_segments(&prm
, vha
);
2364 if (prm
.add_status_pkt
== 0) {
2365 if (xmit_type
& QLA_TGT_XMIT_STATUS
) {
2366 pkt
->u
.status0
.scsi_status
=
2367 cpu_to_le16(prm
.rq_result
);
2368 pkt
->u
.status0
.residual
=
2369 cpu_to_le32(prm
.residual
);
2370 pkt
->u
.status0
.flags
|= __constant_cpu_to_le16(
2371 CTIO7_FLAGS_SEND_STATUS
);
2372 if (qlt_need_explicit_conf(ha
, cmd
, 0)) {
2373 pkt
->u
.status0
.flags
|=
2374 __constant_cpu_to_le16(
2375 CTIO7_FLAGS_EXPLICIT_CONFORM
|
2376 CTIO7_FLAGS_CONFORM_REQ
);
2382 * We have already made sure that there is sufficient
2383 * amount of request entries to not drop HW lock in
2386 struct ctio7_to_24xx
*ctio
=
2387 (struct ctio7_to_24xx
*)qlt_get_req_pkt(vha
);
2389 ql_dbg(ql_dbg_io
, vha
, 0x305e,
2390 "Building additional status packet 0x%p.\n",
2394 * T10Dif: ctio_crc2_to_fw overlay ontop of
2397 memcpy(ctio
, pkt
, sizeof(*ctio
));
2398 /* reset back to CTIO7 */
2399 ctio
->entry_count
= 1;
2400 ctio
->entry_type
= CTIO_TYPE7
;
2401 ctio
->dseg_count
= 0;
2402 ctio
->u
.status1
.flags
&= ~__constant_cpu_to_le16(
2403 CTIO7_FLAGS_DATA_IN
);
2405 /* Real finish is ctio_m1's finish */
2406 pkt
->handle
|= CTIO_INTERMEDIATE_HANDLE_MARK
;
2407 pkt
->u
.status0
.flags
|= __constant_cpu_to_le16(
2408 CTIO7_FLAGS_DONT_RET_CTIO
);
2410 /* qlt_24xx_init_ctio_to_isp will correct
2411 * all neccessary fields that's part of CTIO7.
2412 * There should be no residual of CTIO-CRC2 data.
2414 qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx
*)ctio
,
2416 pr_debug("Status CTIO7: %p\n", ctio
);
2419 qlt_24xx_init_ctio_to_isp(pkt
, &prm
);
2422 cmd
->state
= QLA_TGT_STATE_PROCESSED
; /* Mid-level is done processing */
2423 cmd
->cmd_sent_to_fw
= 1;
2425 /* Memory Barrier */
2427 qla2x00_start_iocbs(vha
, vha
->req
);
2428 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2433 qlt_unmap_sg(vha
, cmd
);
2434 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2438 EXPORT_SYMBOL(qlt_xmit_response
);
2440 int qlt_rdy_to_xfer(struct qla_tgt_cmd
*cmd
)
2442 struct ctio7_to_24xx
*pkt
;
2443 struct scsi_qla_host
*vha
= cmd
->vha
;
2444 struct qla_hw_data
*ha
= vha
->hw
;
2445 struct qla_tgt
*tgt
= cmd
->tgt
;
2446 struct qla_tgt_prm prm
;
2447 unsigned long flags
;
2450 memset(&prm
, 0, sizeof(prm
));
2456 /* Send marker if required */
2457 if (qlt_issue_marker(vha
, 0) != QLA_SUCCESS
)
2460 /* Calculate number of entries and segments required */
2461 if (qlt_pci_map_calc_cnt(&prm
) != 0)
2464 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
2466 if (qla2x00_reset_active(vha
) || cmd
->reset_count
!= ha
->chip_reset
) {
2468 * Either a chip reset is active or this request was from
2469 * previous life, just abort the processing.
2471 cmd
->state
= QLA_TGT_STATE_NEED_DATA
;
2472 qlt_abort_cmd_on_host_reset(cmd
->vha
, cmd
);
2473 ql_dbg(ql_dbg_async
, vha
, 0xe102,
2474 "RESET-XFR active/old-count/new-count = %d/%d/%d.\n",
2475 qla2x00_reset_active(vha
), cmd
->reset_count
,
2477 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2481 /* Does F/W have an IOCBs for this request */
2482 res
= qlt_check_reserve_free_req(vha
, prm
.req_cnt
);
2484 goto out_unlock_free_unmap
;
2485 if (cmd
->se_cmd
.prot_op
)
2486 res
= qlt_build_ctio_crc2_pkt(&prm
, vha
);
2488 res
= qlt_24xx_build_ctio_pkt(&prm
, vha
);
2490 if (unlikely(res
!= 0))
2491 goto out_unlock_free_unmap
;
2492 pkt
= (struct ctio7_to_24xx
*)prm
.pkt
;
2493 pkt
->u
.status0
.flags
|= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT
|
2494 CTIO7_FLAGS_STATUS_MODE_0
);
2496 if (cmd
->se_cmd
.prot_op
== TARGET_PROT_NORMAL
)
2497 qlt_load_data_segments(&prm
, vha
);
2499 cmd
->state
= QLA_TGT_STATE_NEED_DATA
;
2500 cmd
->cmd_sent_to_fw
= 1;
2502 /* Memory Barrier */
2504 qla2x00_start_iocbs(vha
, vha
->req
);
2505 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2509 out_unlock_free_unmap
:
2510 qlt_unmap_sg(vha
, cmd
);
2511 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2515 EXPORT_SYMBOL(qlt_rdy_to_xfer
);
2519 * Checks the guard or meta-data for the type of error
2520 * detected by the HBA.
2523 qlt_handle_dif_error(struct scsi_qla_host
*vha
, struct qla_tgt_cmd
*cmd
,
2524 struct ctio_crc_from_fw
*sts
)
2526 uint8_t *ap
= &sts
->actual_dif
[0];
2527 uint8_t *ep
= &sts
->expected_dif
[0];
2528 uint32_t e_ref_tag
, a_ref_tag
;
2529 uint16_t e_app_tag
, a_app_tag
;
2530 uint16_t e_guard
, a_guard
;
2531 uint64_t lba
= cmd
->se_cmd
.t_task_lba
;
2533 a_guard
= be16_to_cpu(*(uint16_t *)(ap
+ 0));
2534 a_app_tag
= be16_to_cpu(*(uint16_t *)(ap
+ 2));
2535 a_ref_tag
= be32_to_cpu(*(uint32_t *)(ap
+ 4));
2537 e_guard
= be16_to_cpu(*(uint16_t *)(ep
+ 0));
2538 e_app_tag
= be16_to_cpu(*(uint16_t *)(ep
+ 2));
2539 e_ref_tag
= be32_to_cpu(*(uint32_t *)(ep
+ 4));
2541 ql_dbg(ql_dbg_tgt
, vha
, 0xe075,
2542 "iocb(s) %p Returned STATUS.\n", sts
);
2544 ql_dbg(ql_dbg_tgt
, vha
, 0xf075,
2545 "dif check TGT cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x]\n",
2546 cmd
->atio
.u
.isp24
.fcp_cmnd
.cdb
[0], lba
,
2547 a_ref_tag
, e_ref_tag
, a_app_tag
, e_app_tag
, a_guard
, e_guard
);
2551 * For type 3: ref & app tag is all 'f's
2552 * For type 0,1,2: app tag is all 'f's
2554 if ((a_app_tag
== 0xffff) &&
2555 ((cmd
->se_cmd
.prot_type
!= TARGET_DIF_TYPE3_PROT
) ||
2556 (a_ref_tag
== 0xffffffff))) {
2557 uint32_t blocks_done
;
2559 /* 2TB boundary case covered automatically with this */
2560 blocks_done
= e_ref_tag
- (uint32_t)lba
+ 1;
2561 cmd
->se_cmd
.bad_sector
= e_ref_tag
;
2562 cmd
->se_cmd
.pi_err
= 0;
2563 ql_dbg(ql_dbg_tgt
, vha
, 0xf074,
2564 "need to return scsi good\n");
2566 /* Update protection tag */
2567 if (cmd
->prot_sg_cnt
) {
2568 uint32_t i
, j
= 0, k
= 0, num_ent
;
2569 struct scatterlist
*sg
, *sgl
;
2574 /* Patch the corresponding protection tags */
2575 for_each_sg(sgl
, sg
, cmd
->prot_sg_cnt
, i
) {
2576 num_ent
= sg_dma_len(sg
) / 8;
2577 if (k
+ num_ent
< blocks_done
) {
2581 j
= blocks_done
- k
- 1;
2586 if (k
!= blocks_done
) {
2587 ql_log(ql_log_warn
, vha
, 0xf076,
2588 "unexpected tag values tag:lba=%u:%llu)\n",
2589 e_ref_tag
, (unsigned long long)lba
);
2594 struct sd_dif_tuple
*spt
;
2596 * This section came from initiator. Is it valid here?
2597 * should ulp be override with actual val???
2599 spt
= page_address(sg_page(sg
)) + sg
->offset
;
2602 spt
->app_tag
= 0xffff;
2603 if (cmd
->se_cmd
.prot_type
== SCSI_PROT_DIF_TYPE3
)
2604 spt
->ref_tag
= 0xffffffff;
2612 if (e_guard
!= a_guard
) {
2613 cmd
->se_cmd
.pi_err
= TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED
;
2614 cmd
->se_cmd
.bad_sector
= cmd
->se_cmd
.t_task_lba
;
2616 ql_log(ql_log_warn
, vha
, 0xe076,
2617 "Guard ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
2618 cmd
->atio
.u
.isp24
.fcp_cmnd
.cdb
[0], lba
,
2619 a_ref_tag
, e_ref_tag
, a_app_tag
, e_app_tag
,
2620 a_guard
, e_guard
, cmd
);
2625 if (e_ref_tag
!= a_ref_tag
) {
2626 cmd
->se_cmd
.pi_err
= TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED
;
2627 cmd
->se_cmd
.bad_sector
= e_ref_tag
;
2629 ql_log(ql_log_warn
, vha
, 0xe077,
2630 "Ref Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
2631 cmd
->atio
.u
.isp24
.fcp_cmnd
.cdb
[0], lba
,
2632 a_ref_tag
, e_ref_tag
, a_app_tag
, e_app_tag
,
2633 a_guard
, e_guard
, cmd
);
2637 /* check appl tag */
2638 if (e_app_tag
!= a_app_tag
) {
2639 cmd
->se_cmd
.pi_err
= TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED
;
2640 cmd
->se_cmd
.bad_sector
= cmd
->se_cmd
.t_task_lba
;
2642 ql_log(ql_log_warn
, vha
, 0xe078,
2643 "App Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
2644 cmd
->atio
.u
.isp24
.fcp_cmnd
.cdb
[0], lba
,
2645 a_ref_tag
, e_ref_tag
, a_app_tag
, e_app_tag
,
2646 a_guard
, e_guard
, cmd
);
2654 /* If hardware_lock held on entry, might drop it, then reaquire */
2655 /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
2656 static int __qlt_send_term_exchange(struct scsi_qla_host
*vha
,
2657 struct qla_tgt_cmd
*cmd
,
2658 struct atio_from_isp
*atio
)
2660 struct ctio7_to_24xx
*ctio24
;
2661 struct qla_hw_data
*ha
= vha
->hw
;
2666 ql_dbg(ql_dbg_tgt
, vha
, 0xe01c, "Sending TERM EXCH CTIO (ha=%p)\n", ha
);
2668 pkt
= (request_t
*)qla2x00_alloc_iocbs_ready(vha
, NULL
);
2670 ql_dbg(ql_dbg_tgt
, vha
, 0xe050,
2671 "qla_target(%d): %s failed: unable to allocate "
2672 "request packet\n", vha
->vp_idx
, __func__
);
2677 if (cmd
->state
< QLA_TGT_STATE_PROCESSED
) {
2678 ql_dbg(ql_dbg_tgt
, vha
, 0xe051,
2679 "qla_target(%d): Terminating cmd %p with "
2680 "incorrect state %d\n", vha
->vp_idx
, cmd
,
2686 pkt
->entry_count
= 1;
2687 pkt
->handle
= QLA_TGT_SKIP_HANDLE
| CTIO_COMPLETION_HANDLE_MARK
;
2689 ctio24
= (struct ctio7_to_24xx
*)pkt
;
2690 ctio24
->entry_type
= CTIO_TYPE7
;
2691 ctio24
->nport_handle
= cmd
? cmd
->loop_id
: CTIO7_NHANDLE_UNRECOGNIZED
;
2692 ctio24
->timeout
= __constant_cpu_to_le16(QLA_TGT_TIMEOUT
);
2693 ctio24
->vp_index
= vha
->vp_idx
;
2694 ctio24
->initiator_id
[0] = atio
->u
.isp24
.fcp_hdr
.s_id
[2];
2695 ctio24
->initiator_id
[1] = atio
->u
.isp24
.fcp_hdr
.s_id
[1];
2696 ctio24
->initiator_id
[2] = atio
->u
.isp24
.fcp_hdr
.s_id
[0];
2697 ctio24
->exchange_addr
= atio
->u
.isp24
.exchange_addr
;
2698 ctio24
->u
.status1
.flags
= (atio
->u
.isp24
.attr
<< 9) |
2699 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1
|
2700 CTIO7_FLAGS_TERMINATE
);
2701 temp
= be16_to_cpu(atio
->u
.isp24
.fcp_hdr
.ox_id
);
2702 ctio24
->u
.status1
.ox_id
= cpu_to_le16(temp
);
2704 /* Most likely, it isn't needed */
2705 ctio24
->u
.status1
.residual
= get_unaligned((uint32_t *)
2706 &atio
->u
.isp24
.fcp_cmnd
.add_cdb
[
2707 atio
->u
.isp24
.fcp_cmnd
.add_cdb_len
]);
2708 if (ctio24
->u
.status1
.residual
!= 0)
2709 ctio24
->u
.status1
.scsi_status
|= SS_RESIDUAL_UNDER
;
2711 /* Memory Barrier */
2713 qla2x00_start_iocbs(vha
, vha
->req
);
2717 static void qlt_send_term_exchange(struct scsi_qla_host
*vha
,
2718 struct qla_tgt_cmd
*cmd
, struct atio_from_isp
*atio
, int ha_locked
)
2720 unsigned long flags
;
2723 if (qlt_issue_marker(vha
, ha_locked
) < 0)
2727 rc
= __qlt_send_term_exchange(vha
, cmd
, atio
);
2729 qlt_alloc_qfull_cmd(vha
, atio
, 0, 0);
2732 spin_lock_irqsave(&vha
->hw
->hardware_lock
, flags
);
2733 rc
= __qlt_send_term_exchange(vha
, cmd
, atio
);
2735 qlt_alloc_qfull_cmd(vha
, atio
, 0, 0);
2736 spin_unlock_irqrestore(&vha
->hw
->hardware_lock
, flags
);
2739 if (cmd
&& ((cmd
->state
!= QLA_TGT_STATE_ABORTED
) ||
2740 !cmd
->cmd_sent_to_fw
)) {
2741 if (!ha_locked
&& !in_interrupt())
2742 msleep(250); /* just in case */
2744 qlt_unmap_sg(vha
, cmd
);
2745 vha
->hw
->tgt
.tgt_ops
->free_cmd(cmd
);
2750 static void qlt_init_term_exchange(struct scsi_qla_host
*vha
)
2752 struct list_head free_list
;
2753 struct qla_tgt_cmd
*cmd
, *tcmd
;
2755 vha
->hw
->tgt
.leak_exchg_thresh_hold
=
2756 (vha
->hw
->fw_xcb_count
/100) * LEAK_EXCHG_THRESH_HOLD_PERCENT
;
2759 if (!list_empty(&vha
->hw
->tgt
.q_full_list
)) {
2760 INIT_LIST_HEAD(&free_list
);
2761 list_splice_init(&vha
->hw
->tgt
.q_full_list
, &free_list
);
2763 list_for_each_entry_safe(cmd
, tcmd
, &free_list
, cmd_list
) {
2764 list_del(&cmd
->cmd_list
);
2765 /* This cmd was never sent to TCM. There is no need
2766 * to schedule free or call free_cmd
2769 vha
->hw
->tgt
.num_qfull_cmds_alloc
--;
2772 vha
->hw
->tgt
.num_qfull_cmds_dropped
= 0;
2775 static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host
*vha
)
2777 uint32_t total_leaked
;
2779 total_leaked
= vha
->hw
->tgt
.num_qfull_cmds_dropped
;
2781 if (vha
->hw
->tgt
.leak_exchg_thresh_hold
&&
2782 (total_leaked
> vha
->hw
->tgt
.leak_exchg_thresh_hold
)) {
2784 ql_dbg(ql_dbg_tgt
, vha
, 0xe079,
2785 "Chip reset due to exchange starvation: %d/%d.\n",
2786 total_leaked
, vha
->hw
->fw_xcb_count
);
2788 if (IS_P3P_TYPE(vha
->hw
))
2789 set_bit(FCOE_CTX_RESET_NEEDED
, &vha
->dpc_flags
);
2791 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
2792 qla2xxx_wake_dpc(vha
);
2797 void qlt_free_cmd(struct qla_tgt_cmd
*cmd
)
2799 struct qla_tgt_sess
*sess
= cmd
->sess
;
2801 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe074,
2802 "%s: se_cmd[%p] ox_id %04x\n",
2803 __func__
, &cmd
->se_cmd
,
2804 be16_to_cpu(cmd
->atio
.u
.isp24
.fcp_hdr
.ox_id
));
2806 BUG_ON(cmd
->cmd_in_wq
);
2809 qlt_decr_num_pend_cmds(cmd
->vha
);
2811 BUG_ON(cmd
->sg_mapped
);
2812 cmd
->jiffies_at_free
= get_jiffies_64();
2813 if (unlikely(cmd
->free_sg
))
2816 if (!sess
|| !sess
->se_sess
) {
2820 cmd
->jiffies_at_free
= get_jiffies_64();
2821 percpu_ida_free(&sess
->se_sess
->sess_tag_pool
, cmd
->se_cmd
.map_tag
);
2823 EXPORT_SYMBOL(qlt_free_cmd
);
2825 /* ha->hardware_lock supposed to be held on entry */
2826 static int qlt_prepare_srr_ctio(struct scsi_qla_host
*vha
,
2827 struct qla_tgt_cmd
*cmd
, void *ctio
)
2829 struct qla_tgt_srr_ctio
*sc
;
2830 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
2831 struct qla_tgt_srr_imm
*imm
;
2834 cmd
->cmd_flags
|= BIT_15
;
2836 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf019,
2837 "qla_target(%d): CTIO with SRR status received\n", vha
->vp_idx
);
2840 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf055,
2841 "qla_target(%d): SRR CTIO, but ctio is NULL\n",
2846 sc
= kzalloc(sizeof(*sc
), GFP_ATOMIC
);
2849 /* IRQ is already OFF */
2850 spin_lock(&tgt
->srr_lock
);
2851 sc
->srr_id
= tgt
->ctio_srr_id
;
2852 list_add_tail(&sc
->srr_list_entry
,
2853 &tgt
->srr_ctio_list
);
2854 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf01a,
2855 "CTIO SRR %p added (id %d)\n", sc
, sc
->srr_id
);
2856 if (tgt
->imm_srr_id
== tgt
->ctio_srr_id
) {
2858 list_for_each_entry(imm
, &tgt
->srr_imm_list
,
2860 if (imm
->srr_id
== sc
->srr_id
) {
2866 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf01b,
2867 "Scheduling srr work\n");
2868 schedule_work(&tgt
->srr_work
);
2870 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf056,
2871 "qla_target(%d): imm_srr_id "
2872 "== ctio_srr_id (%d), but there is no "
2873 "corresponding SRR IMM, deleting CTIO "
2874 "SRR %p\n", vha
->vp_idx
,
2875 tgt
->ctio_srr_id
, sc
);
2876 list_del(&sc
->srr_list_entry
);
2877 spin_unlock(&tgt
->srr_lock
);
2883 spin_unlock(&tgt
->srr_lock
);
2885 struct qla_tgt_srr_imm
*ti
;
2887 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf057,
2888 "qla_target(%d): Unable to allocate SRR CTIO entry\n",
2890 spin_lock(&tgt
->srr_lock
);
2891 list_for_each_entry_safe(imm
, ti
, &tgt
->srr_imm_list
,
2893 if (imm
->srr_id
== tgt
->ctio_srr_id
) {
2894 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf01c,
2895 "IMM SRR %p deleted (id %d)\n",
2897 list_del(&imm
->srr_list_entry
);
2898 qlt_reject_free_srr_imm(vha
, imm
, 1);
2901 spin_unlock(&tgt
->srr_lock
);
2910 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2912 static int qlt_term_ctio_exchange(struct scsi_qla_host
*vha
, void *ctio
,
2913 struct qla_tgt_cmd
*cmd
, uint32_t status
)
2918 struct ctio7_from_24xx
*c
= (struct ctio7_from_24xx
*)ctio
;
2920 __constant_cpu_to_le16(OF_TERM_EXCH
));
2925 qlt_send_term_exchange(vha
, cmd
, &cmd
->atio
, 1);
2930 /* ha->hardware_lock supposed to be held on entry */
2931 static inline struct qla_tgt_cmd
*qlt_get_cmd(struct scsi_qla_host
*vha
,
2934 struct qla_hw_data
*ha
= vha
->hw
;
2937 if (ha
->tgt
.cmds
[handle
] != NULL
) {
2938 struct qla_tgt_cmd
*cmd
= ha
->tgt
.cmds
[handle
];
2939 ha
->tgt
.cmds
[handle
] = NULL
;
2945 /* ha->hardware_lock supposed to be held on entry */
2946 static struct qla_tgt_cmd
*qlt_ctio_to_cmd(struct scsi_qla_host
*vha
,
2947 uint32_t handle
, void *ctio
)
2949 struct qla_tgt_cmd
*cmd
= NULL
;
2951 /* Clear out internal marks */
2952 handle
&= ~(CTIO_COMPLETION_HANDLE_MARK
|
2953 CTIO_INTERMEDIATE_HANDLE_MARK
);
2955 if (handle
!= QLA_TGT_NULL_HANDLE
) {
2956 if (unlikely(handle
== QLA_TGT_SKIP_HANDLE
))
2959 /* handle-1 is actually used */
2960 if (unlikely(handle
> DEFAULT_OUTSTANDING_COMMANDS
)) {
2961 ql_dbg(ql_dbg_tgt
, vha
, 0xe052,
2962 "qla_target(%d): Wrong handle %x received\n",
2963 vha
->vp_idx
, handle
);
2966 cmd
= qlt_get_cmd(vha
, handle
);
2967 if (unlikely(cmd
== NULL
)) {
2968 ql_dbg(ql_dbg_tgt
, vha
, 0xe053,
2969 "qla_target(%d): Suspicious: unable to "
2970 "find the command with handle %x\n", vha
->vp_idx
,
2974 } else if (ctio
!= NULL
) {
2975 /* We can't get loop ID from CTIO7 */
2976 ql_dbg(ql_dbg_tgt
, vha
, 0xe054,
2977 "qla_target(%d): Wrong CTIO received: QLA24xx doesn't "
2978 "support NULL handles\n", vha
->vp_idx
);
2985 /* hardware_lock should be held by caller. */
2987 qlt_abort_cmd_on_host_reset(struct scsi_qla_host
*vha
, struct qla_tgt_cmd
*cmd
)
2989 struct qla_hw_data
*ha
= vha
->hw
;
2993 qlt_unmap_sg(vha
, cmd
);
2995 handle
= qlt_make_handle(vha
);
2997 /* TODO: fix debug message type and ids. */
2998 if (cmd
->state
== QLA_TGT_STATE_PROCESSED
) {
2999 ql_dbg(ql_dbg_io
, vha
, 0xff00,
3000 "HOST-ABORT: handle=%d, state=PROCESSED.\n", handle
);
3001 } else if (cmd
->state
== QLA_TGT_STATE_NEED_DATA
) {
3002 cmd
->write_data_transferred
= 0;
3003 cmd
->state
= QLA_TGT_STATE_DATA_IN
;
3005 ql_dbg(ql_dbg_io
, vha
, 0xff01,
3006 "HOST-ABORT: handle=%d, state=DATA_IN.\n", handle
);
3008 ha
->tgt
.tgt_ops
->handle_data(cmd
);
3010 } else if (cmd
->state
== QLA_TGT_STATE_ABORTED
) {
3011 ql_dbg(ql_dbg_io
, vha
, 0xff02,
3012 "HOST-ABORT: handle=%d, state=ABORTED.\n", handle
);
3014 ql_dbg(ql_dbg_io
, vha
, 0xff03,
3015 "HOST-ABORT: handle=%d, state=BAD(%d).\n", handle
,
3020 cmd
->cmd_flags
|= BIT_12
;
3021 ha
->tgt
.tgt_ops
->free_cmd(cmd
);
3025 qlt_host_reset_handler(struct qla_hw_data
*ha
)
3027 struct qla_tgt_cmd
*cmd
;
3028 unsigned long flags
;
3029 scsi_qla_host_t
*base_vha
= pci_get_drvdata(ha
->pdev
);
3030 scsi_qla_host_t
*vha
= NULL
;
3031 struct qla_tgt
*tgt
= base_vha
->vha_tgt
.qla_tgt
;
3034 if (!base_vha
->hw
->tgt
.tgt_ops
)
3037 if (!tgt
|| qla_ini_mode_enabled(base_vha
)) {
3038 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf003,
3039 "Target mode disabled\n");
3043 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xff10,
3044 "HOST-ABORT-HNDLR: base_vha->dpc_flags=%lx.\n",
3045 base_vha
->dpc_flags
);
3047 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
3048 for (i
= 1; i
< DEFAULT_OUTSTANDING_COMMANDS
+ 1; i
++) {
3049 cmd
= qlt_get_cmd(base_vha
, i
);
3052 /* ha->tgt.cmds entry is cleared by qlt_get_cmd. */
3054 qlt_abort_cmd_on_host_reset(vha
, cmd
);
3056 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
3061 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3063 static void qlt_do_ctio_completion(struct scsi_qla_host
*vha
, uint32_t handle
,
3064 uint32_t status
, void *ctio
)
3066 struct qla_hw_data
*ha
= vha
->hw
;
3067 struct se_cmd
*se_cmd
;
3068 struct target_core_fabric_ops
*tfo
;
3069 struct qla_tgt_cmd
*cmd
;
3071 if (handle
& CTIO_INTERMEDIATE_HANDLE_MARK
) {
3072 /* That could happen only in case of an error/reset/abort */
3073 if (status
!= CTIO_SUCCESS
) {
3074 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf01d,
3075 "Intermediate CTIO received"
3076 " (status %x)\n", status
);
3081 cmd
= qlt_ctio_to_cmd(vha
, handle
, ctio
);
3085 se_cmd
= &cmd
->se_cmd
;
3086 tfo
= se_cmd
->se_tfo
;
3087 cmd
->cmd_sent_to_fw
= 0;
3089 qlt_unmap_sg(vha
, cmd
);
3091 if (unlikely(status
!= CTIO_SUCCESS
)) {
3092 switch (status
& 0xFFFF) {
3093 case CTIO_LIP_RESET
:
3094 case CTIO_TARGET_RESET
:
3096 /* driver request abort via Terminate exchange */
3098 case CTIO_INVALID_RX_ID
:
3100 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf058,
3101 "qla_target(%d): CTIO with "
3102 "status %#x received, state %x, se_cmd %p, "
3103 "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, "
3104 "TIMEOUT=b, INVALID_RX_ID=8)\n", vha
->vp_idx
,
3105 status
, cmd
->state
, se_cmd
);
3108 case CTIO_PORT_LOGGED_OUT
:
3109 case CTIO_PORT_UNAVAILABLE
:
3110 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf059,
3111 "qla_target(%d): CTIO with PORT LOGGED "
3112 "OUT (29) or PORT UNAVAILABLE (28) status %x "
3113 "received (state %x, se_cmd %p)\n", vha
->vp_idx
,
3114 status
, cmd
->state
, se_cmd
);
3117 case CTIO_SRR_RECEIVED
:
3118 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf05a,
3119 "qla_target(%d): CTIO with SRR_RECEIVED"
3120 " status %x received (state %x, se_cmd %p)\n",
3121 vha
->vp_idx
, status
, cmd
->state
, se_cmd
);
3122 if (qlt_prepare_srr_ctio(vha
, cmd
, ctio
) != 0)
3127 case CTIO_DIF_ERROR
: {
3128 struct ctio_crc_from_fw
*crc
=
3129 (struct ctio_crc_from_fw
*)ctio
;
3130 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf073,
3131 "qla_target(%d): CTIO with DIF_ERROR status %x received (state %x, se_cmd %p) actual_dif[0x%llx] expect_dif[0x%llx]\n",
3132 vha
->vp_idx
, status
, cmd
->state
, se_cmd
,
3133 *((u64
*)&crc
->actual_dif
[0]),
3134 *((u64
*)&crc
->expected_dif
[0]));
3136 if (qlt_handle_dif_error(vha
, cmd
, ctio
)) {
3137 if (cmd
->state
== QLA_TGT_STATE_NEED_DATA
) {
3138 /* scsi Write/xfer rdy complete */
3141 /* scsi read/xmit respond complete
3142 * call handle dif to send scsi status
3143 * rather than terminate exchange.
3145 cmd
->state
= QLA_TGT_STATE_PROCESSED
;
3146 ha
->tgt
.tgt_ops
->handle_dif_err(cmd
);
3150 /* Need to generate a SCSI good completion.
3151 * because FW did not send scsi status.
3159 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf05b,
3160 "qla_target(%d): CTIO with error status 0x%x received (state %x, se_cmd %p\n",
3161 vha
->vp_idx
, status
, cmd
->state
, se_cmd
);
3166 /* "cmd->state == QLA_TGT_STATE_ABORTED" means
3167 * cmd is already aborted/terminated, we don't
3168 * need to terminate again. The exchange is already
3169 * cleaned up/freed at FW level. Just cleanup at driver
3172 if ((cmd
->state
!= QLA_TGT_STATE_NEED_DATA
) &&
3173 (cmd
->state
!= QLA_TGT_STATE_ABORTED
)) {
3174 cmd
->cmd_flags
|= BIT_13
;
3175 if (qlt_term_ctio_exchange(vha
, ctio
, cmd
, status
))
3181 if (cmd
->state
== QLA_TGT_STATE_PROCESSED
) {
3183 } else if (cmd
->state
== QLA_TGT_STATE_NEED_DATA
) {
3186 cmd
->state
= QLA_TGT_STATE_DATA_IN
;
3188 if (unlikely(status
!= CTIO_SUCCESS
))
3191 cmd
->write_data_transferred
= 1;
3193 ha
->tgt
.tgt_ops
->handle_data(cmd
);
3195 } else if (cmd
->state
== QLA_TGT_STATE_ABORTED
) {
3196 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf01e,
3197 "Aborted command %p (tag %d) finished\n", cmd
, cmd
->tag
);
3199 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf05c,
3200 "qla_target(%d): A command in state (%d) should "
3201 "not return a CTIO complete\n", vha
->vp_idx
, cmd
->state
);
3204 if (unlikely(status
!= CTIO_SUCCESS
) &&
3205 (cmd
->state
!= QLA_TGT_STATE_ABORTED
)) {
3206 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf01f, "Finishing failed CTIO\n");
3211 ha
->tgt
.tgt_ops
->free_cmd(cmd
);
3214 static inline int qlt_get_fcp_task_attr(struct scsi_qla_host
*vha
,
3219 switch (task_codes
) {
3220 case ATIO_SIMPLE_QUEUE
:
3221 fcp_task_attr
= MSG_SIMPLE_TAG
;
3223 case ATIO_HEAD_OF_QUEUE
:
3224 fcp_task_attr
= MSG_HEAD_TAG
;
3226 case ATIO_ORDERED_QUEUE
:
3227 fcp_task_attr
= MSG_ORDERED_TAG
;
3229 case ATIO_ACA_QUEUE
:
3230 fcp_task_attr
= MSG_ACA_TAG
;
3233 fcp_task_attr
= MSG_SIMPLE_TAG
;
3236 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf05d,
3237 "qla_target: unknown task code %x, use ORDERED instead\n",
3239 fcp_task_attr
= MSG_ORDERED_TAG
;
3243 return fcp_task_attr
;
3246 static struct qla_tgt_sess
*qlt_make_local_sess(struct scsi_qla_host
*,
3249 * Process context for I/O path into tcm_qla2xxx code
3251 static void __qlt_do_work(struct qla_tgt_cmd
*cmd
)
3253 scsi_qla_host_t
*vha
= cmd
->vha
;
3254 struct qla_hw_data
*ha
= vha
->hw
;
3255 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
3256 struct qla_tgt_sess
*sess
= cmd
->sess
;
3257 struct atio_from_isp
*atio
= &cmd
->atio
;
3259 unsigned long flags
;
3260 uint32_t data_length
;
3261 int ret
, fcp_task_attr
, data_dir
, bidi
= 0;
3264 cmd
->cmd_flags
|= BIT_1
;
3268 cdb
= &atio
->u
.isp24
.fcp_cmnd
.cdb
[0];
3269 cmd
->tag
= atio
->u
.isp24
.exchange_addr
;
3270 cmd
->unpacked_lun
= scsilun_to_int(
3271 (struct scsi_lun
*)&atio
->u
.isp24
.fcp_cmnd
.lun
);
3273 if (atio
->u
.isp24
.fcp_cmnd
.rddata
&&
3274 atio
->u
.isp24
.fcp_cmnd
.wrdata
) {
3276 data_dir
= DMA_TO_DEVICE
;
3277 } else if (atio
->u
.isp24
.fcp_cmnd
.rddata
)
3278 data_dir
= DMA_FROM_DEVICE
;
3279 else if (atio
->u
.isp24
.fcp_cmnd
.wrdata
)
3280 data_dir
= DMA_TO_DEVICE
;
3282 data_dir
= DMA_NONE
;
3284 fcp_task_attr
= qlt_get_fcp_task_attr(vha
,
3285 atio
->u
.isp24
.fcp_cmnd
.task_attr
);
3286 data_length
= be32_to_cpu(get_unaligned((uint32_t *)
3287 &atio
->u
.isp24
.fcp_cmnd
.add_cdb
[
3288 atio
->u
.isp24
.fcp_cmnd
.add_cdb_len
]));
3290 ret
= ha
->tgt
.tgt_ops
->handle_cmd(vha
, cmd
, cdb
, data_length
,
3291 fcp_task_attr
, data_dir
, bidi
);
3295 * Drop extra session reference from qla_tgt_handle_cmd_for_atio*(
3297 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
3298 ha
->tgt
.tgt_ops
->put_sess(sess
);
3299 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
3303 ql_dbg(ql_dbg_io
, vha
, 0x3060, "Terminating work cmd %p", cmd
);
3305 * cmd has not sent to target yet, so pass NULL as the second
3306 * argument to qlt_send_term_exchange() and free the memory here.
3308 cmd
->cmd_flags
|= BIT_2
;
3309 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
3310 qlt_send_term_exchange(vha
, NULL
, &cmd
->atio
, 1);
3312 qlt_decr_num_pend_cmds(vha
);
3313 percpu_ida_free(&sess
->se_sess
->sess_tag_pool
, cmd
->se_cmd
.map_tag
);
3314 ha
->tgt
.tgt_ops
->put_sess(sess
);
3315 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
3318 static void qlt_do_work(struct work_struct
*work
)
3320 struct qla_tgt_cmd
*cmd
= container_of(work
, struct qla_tgt_cmd
, work
);
3325 static struct qla_tgt_cmd
*qlt_get_tag(scsi_qla_host_t
*vha
,
3326 struct qla_tgt_sess
*sess
,
3327 struct atio_from_isp
*atio
)
3329 struct se_session
*se_sess
= sess
->se_sess
;
3330 struct qla_tgt_cmd
*cmd
;
3333 tag
= percpu_ida_alloc(&se_sess
->sess_tag_pool
, TASK_RUNNING
);
3337 cmd
= &((struct qla_tgt_cmd
*)se_sess
->sess_cmd_map
)[tag
];
3338 memset(cmd
, 0, sizeof(struct qla_tgt_cmd
));
3340 memcpy(&cmd
->atio
, atio
, sizeof(*atio
));
3341 cmd
->state
= QLA_TGT_STATE_NEW
;
3342 cmd
->tgt
= vha
->vha_tgt
.qla_tgt
;
3343 qlt_incr_num_pend_cmds(vha
);
3345 cmd
->se_cmd
.map_tag
= tag
;
3347 cmd
->loop_id
= sess
->loop_id
;
3348 cmd
->conf_compl_supported
= sess
->conf_compl_supported
;
3353 static void qlt_send_busy(struct scsi_qla_host
*, struct atio_from_isp
*,
3356 static void qlt_create_sess_from_atio(struct work_struct
*work
)
3358 struct qla_tgt_sess_op
*op
= container_of(work
,
3359 struct qla_tgt_sess_op
, work
);
3360 scsi_qla_host_t
*vha
= op
->vha
;
3361 struct qla_hw_data
*ha
= vha
->hw
;
3362 struct qla_tgt_sess
*sess
;
3363 struct qla_tgt_cmd
*cmd
;
3364 unsigned long flags
;
3365 uint8_t *s_id
= op
->atio
.u
.isp24
.fcp_hdr
.s_id
;
3367 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf022,
3368 "qla_target(%d): Unable to find wwn login"
3369 " (s_id %x:%x:%x), trying to create it manually\n",
3370 vha
->vp_idx
, s_id
[0], s_id
[1], s_id
[2]);
3372 if (op
->atio
.u
.raw
.entry_count
> 1) {
3373 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf023,
3374 "Dropping multy entry atio %p\n", &op
->atio
);
3378 mutex_lock(&vha
->vha_tgt
.tgt_mutex
);
3379 sess
= qlt_make_local_sess(vha
, s_id
);
3380 /* sess has an extra creation ref. */
3381 mutex_unlock(&vha
->vha_tgt
.tgt_mutex
);
3386 * Now obtain a pre-allocated session tag using the original op->atio
3387 * packet header, and dispatch into __qlt_do_work() using the existing
3390 cmd
= qlt_get_tag(vha
, sess
, &op
->atio
);
3392 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
3393 qlt_send_busy(vha
, &op
->atio
, SAM_STAT_BUSY
);
3394 ha
->tgt
.tgt_ops
->put_sess(sess
);
3395 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
3400 * __qlt_do_work() will call ha->tgt.tgt_ops->put_sess() to release
3401 * the extra reference taken above by qlt_make_local_sess()
3408 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
3409 qlt_send_term_exchange(vha
, NULL
, &op
->atio
, 1);
3410 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
3415 /* ha->hardware_lock supposed to be held on entry */
3416 static int qlt_handle_cmd_for_atio(struct scsi_qla_host
*vha
,
3417 struct atio_from_isp
*atio
)
3419 struct qla_hw_data
*ha
= vha
->hw
;
3420 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
3421 struct qla_tgt_sess
*sess
;
3422 struct qla_tgt_cmd
*cmd
;
3424 if (unlikely(tgt
->tgt_stop
)) {
3425 ql_dbg(ql_dbg_io
, vha
, 0x3061,
3426 "New command while device %p is shutting down\n", tgt
);
3430 sess
= ha
->tgt
.tgt_ops
->find_sess_by_s_id(vha
, atio
->u
.isp24
.fcp_hdr
.s_id
);
3431 if (unlikely(!sess
)) {
3432 struct qla_tgt_sess_op
*op
= kzalloc(sizeof(struct qla_tgt_sess_op
),
3437 memcpy(&op
->atio
, atio
, sizeof(*atio
));
3439 INIT_WORK(&op
->work
, qlt_create_sess_from_atio
);
3440 queue_work(qla_tgt_wq
, &op
->work
);
3444 * Do kref_get() before returning + dropping qla_hw_data->hardware_lock.
3446 kref_get(&sess
->se_sess
->sess_kref
);
3448 cmd
= qlt_get_tag(vha
, sess
, atio
);
3450 ql_dbg(ql_dbg_io
, vha
, 0x3062,
3451 "qla_target(%d): Allocation of cmd failed\n", vha
->vp_idx
);
3452 ha
->tgt
.tgt_ops
->put_sess(sess
);
3457 cmd
->jiffies_at_alloc
= get_jiffies_64();
3459 cmd
->reset_count
= vha
->hw
->chip_reset
;
3462 cmd
->cmd_flags
|= BIT_0
;
3463 INIT_WORK(&cmd
->work
, qlt_do_work
);
3464 queue_work(qla_tgt_wq
, &cmd
->work
);
3469 /* ha->hardware_lock supposed to be held on entry */
3470 static int qlt_issue_task_mgmt(struct qla_tgt_sess
*sess
, uint32_t lun
,
3471 int fn
, void *iocb
, int flags
)
3473 struct scsi_qla_host
*vha
= sess
->vha
;
3474 struct qla_hw_data
*ha
= vha
->hw
;
3475 struct qla_tgt_mgmt_cmd
*mcmd
;
3479 mcmd
= mempool_alloc(qla_tgt_mgmt_cmd_mempool
, GFP_ATOMIC
);
3481 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x10009,
3482 "qla_target(%d): Allocation of management "
3483 "command failed, some commands and their data could "
3484 "leak\n", vha
->vp_idx
);
3487 memset(mcmd
, 0, sizeof(*mcmd
));
3491 memcpy(&mcmd
->orig_iocb
.imm_ntfy
, iocb
,
3492 sizeof(mcmd
->orig_iocb
.imm_ntfy
));
3494 mcmd
->tmr_func
= fn
;
3495 mcmd
->flags
= flags
;
3496 mcmd
->reset_count
= vha
->hw
->chip_reset
;
3499 case QLA_TGT_CLEAR_ACA
:
3500 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x10000,
3501 "qla_target(%d): CLEAR_ACA received\n", sess
->vha
->vp_idx
);
3502 tmr_func
= TMR_CLEAR_ACA
;
3505 case QLA_TGT_TARGET_RESET
:
3506 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x10001,
3507 "qla_target(%d): TARGET_RESET received\n",
3509 tmr_func
= TMR_TARGET_WARM_RESET
;
3512 case QLA_TGT_LUN_RESET
:
3513 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x10002,
3514 "qla_target(%d): LUN_RESET received\n", sess
->vha
->vp_idx
);
3515 tmr_func
= TMR_LUN_RESET
;
3518 case QLA_TGT_CLEAR_TS
:
3519 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x10003,
3520 "qla_target(%d): CLEAR_TS received\n", sess
->vha
->vp_idx
);
3521 tmr_func
= TMR_CLEAR_TASK_SET
;
3524 case QLA_TGT_ABORT_TS
:
3525 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x10004,
3526 "qla_target(%d): ABORT_TS received\n", sess
->vha
->vp_idx
);
3527 tmr_func
= TMR_ABORT_TASK_SET
;
3530 case QLA_TGT_ABORT_ALL
:
3531 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x10005,
3532 "qla_target(%d): Doing ABORT_ALL_TASKS\n",
3537 case QLA_TGT_ABORT_ALL_SESS
:
3538 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x10006,
3539 "qla_target(%d): Doing ABORT_ALL_TASKS_SESS\n",
3544 case QLA_TGT_NEXUS_LOSS_SESS
:
3545 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x10007,
3546 "qla_target(%d): Doing NEXUS_LOSS_SESS\n",
3551 case QLA_TGT_NEXUS_LOSS
:
3552 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x10008,
3553 "qla_target(%d): Doing NEXUS_LOSS\n", sess
->vha
->vp_idx
);
3558 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x1000a,
3559 "qla_target(%d): Unknown task mgmt fn 0x%x\n",
3560 sess
->vha
->vp_idx
, fn
);
3561 mempool_free(mcmd
, qla_tgt_mgmt_cmd_mempool
);
3565 res
= ha
->tgt
.tgt_ops
->handle_tmr(mcmd
, lun
, tmr_func
, 0);
3567 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x1000b,
3568 "qla_target(%d): tgt.tgt_ops->handle_tmr() failed: %d\n",
3569 sess
->vha
->vp_idx
, res
);
3570 mempool_free(mcmd
, qla_tgt_mgmt_cmd_mempool
);
3577 /* ha->hardware_lock supposed to be held on entry */
3578 static int qlt_handle_task_mgmt(struct scsi_qla_host
*vha
, void *iocb
)
3580 struct atio_from_isp
*a
= (struct atio_from_isp
*)iocb
;
3581 struct qla_hw_data
*ha
= vha
->hw
;
3582 struct qla_tgt
*tgt
;
3583 struct qla_tgt_sess
*sess
;
3584 uint32_t lun
, unpacked_lun
;
3587 tgt
= vha
->vha_tgt
.qla_tgt
;
3589 lun
= a
->u
.isp24
.fcp_cmnd
.lun
;
3590 lun_size
= sizeof(a
->u
.isp24
.fcp_cmnd
.lun
);
3591 fn
= a
->u
.isp24
.fcp_cmnd
.task_mgmt_flags
;
3592 sess
= ha
->tgt
.tgt_ops
->find_sess_by_s_id(vha
,
3593 a
->u
.isp24
.fcp_hdr
.s_id
);
3594 unpacked_lun
= scsilun_to_int((struct scsi_lun
*)&lun
);
3597 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf024,
3598 "qla_target(%d): task mgmt fn 0x%x for "
3599 "non-existant session\n", vha
->vp_idx
, fn
);
3600 return qlt_sched_sess_work(tgt
, QLA_TGT_SESS_WORK_TM
, iocb
,
3601 sizeof(struct atio_from_isp
));
3604 return qlt_issue_task_mgmt(sess
, unpacked_lun
, fn
, iocb
, 0);
3607 /* ha->hardware_lock supposed to be held on entry */
3608 static int __qlt_abort_task(struct scsi_qla_host
*vha
,
3609 struct imm_ntfy_from_isp
*iocb
, struct qla_tgt_sess
*sess
)
3611 struct atio_from_isp
*a
= (struct atio_from_isp
*)iocb
;
3612 struct qla_hw_data
*ha
= vha
->hw
;
3613 struct qla_tgt_mgmt_cmd
*mcmd
;
3614 uint32_t lun
, unpacked_lun
;
3617 mcmd
= mempool_alloc(qla_tgt_mgmt_cmd_mempool
, GFP_ATOMIC
);
3619 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf05f,
3620 "qla_target(%d): %s: Allocation of ABORT cmd failed\n",
3621 vha
->vp_idx
, __func__
);
3624 memset(mcmd
, 0, sizeof(*mcmd
));
3627 memcpy(&mcmd
->orig_iocb
.imm_ntfy
, iocb
,
3628 sizeof(mcmd
->orig_iocb
.imm_ntfy
));
3630 lun
= a
->u
.isp24
.fcp_cmnd
.lun
;
3631 unpacked_lun
= scsilun_to_int((struct scsi_lun
*)&lun
);
3632 mcmd
->reset_count
= vha
->hw
->chip_reset
;
3634 rc
= ha
->tgt
.tgt_ops
->handle_tmr(mcmd
, unpacked_lun
, TMR_ABORT_TASK
,
3635 le16_to_cpu(iocb
->u
.isp2x
.seq_id
));
3637 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf060,
3638 "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n",
3640 mempool_free(mcmd
, qla_tgt_mgmt_cmd_mempool
);
3647 /* ha->hardware_lock supposed to be held on entry */
3648 static int qlt_abort_task(struct scsi_qla_host
*vha
,
3649 struct imm_ntfy_from_isp
*iocb
)
3651 struct qla_hw_data
*ha
= vha
->hw
;
3652 struct qla_tgt_sess
*sess
;
3655 loop_id
= GET_TARGET_ID(ha
, (struct atio_from_isp
*)iocb
);
3657 sess
= ha
->tgt
.tgt_ops
->find_sess_by_loop_id(vha
, loop_id
);
3659 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf025,
3660 "qla_target(%d): task abort for unexisting "
3661 "session\n", vha
->vp_idx
);
3662 return qlt_sched_sess_work(vha
->vha_tgt
.qla_tgt
,
3663 QLA_TGT_SESS_WORK_ABORT
, iocb
, sizeof(*iocb
));
3666 return __qlt_abort_task(vha
, iocb
, sess
);
3670 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3672 static int qlt_24xx_handle_els(struct scsi_qla_host
*vha
,
3673 struct imm_ntfy_from_isp
*iocb
)
3677 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf026,
3678 "qla_target(%d): Port ID: 0x%3phC ELS opcode: 0x%02x\n",
3679 vha
->vp_idx
, iocb
->u
.isp24
.port_id
, iocb
->u
.isp24
.status_subcode
);
3681 switch (iocb
->u
.isp24
.status_subcode
) {
3687 res
= qlt_reset(vha
, iocb
, QLA_TGT_NEXUS_LOSS_SESS
);
3692 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
3693 if (tgt
->link_reinit_iocb_pending
) {
3694 qlt_send_notify_ack(vha
, &tgt
->link_reinit_iocb
,
3696 tgt
->link_reinit_iocb_pending
= 0;
3698 res
= 1; /* send notify ack */
3703 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf061,
3704 "qla_target(%d): Unsupported ELS command %x "
3705 "received\n", vha
->vp_idx
, iocb
->u
.isp24
.status_subcode
);
3706 res
= qlt_reset(vha
, iocb
, QLA_TGT_NEXUS_LOSS_SESS
);
3713 static int qlt_set_data_offset(struct qla_tgt_cmd
*cmd
, uint32_t offset
)
3715 struct scatterlist
*sg
, *sgp
, *sg_srr
, *sg_srr_start
= NULL
;
3716 size_t first_offset
= 0, rem_offset
= offset
, tmp
= 0;
3717 int i
, sg_srr_cnt
, bufflen
= 0;
3719 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe023,
3720 "Entering qla_tgt_set_data_offset: cmd: %p, cmd->sg: %p, "
3721 "cmd->sg_cnt: %u, direction: %d\n",
3722 cmd
, cmd
->sg
, cmd
->sg_cnt
, cmd
->dma_data_direction
);
3725 * FIXME: Reject non zero SRR relative offset until we can test
3726 * this code properly.
3728 pr_debug("Rejecting non zero SRR rel_offs: %u\n", offset
);
3731 if (!cmd
->sg
|| !cmd
->sg_cnt
) {
3732 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe055,
3733 "Missing cmd->sg or zero cmd->sg_cnt in"
3734 " qla_tgt_set_data_offset\n");
3738 * Walk the current cmd->sg list until we locate the new sg_srr_start
3740 for_each_sg(cmd
->sg
, sg
, cmd
->sg_cnt
, i
) {
3741 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe024,
3742 "sg[%d]: %p page: %p, length: %d, offset: %d\n",
3743 i
, sg
, sg_page(sg
), sg
->length
, sg
->offset
);
3745 if ((sg
->length
+ tmp
) > offset
) {
3746 first_offset
= rem_offset
;
3748 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe025,
3749 "Found matching sg[%d], using %p as sg_srr_start, "
3750 "and using first_offset: %zu\n", i
, sg
,
3755 rem_offset
-= sg
->length
;
3758 if (!sg_srr_start
) {
3759 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe056,
3760 "Unable to locate sg_srr_start for offset: %u\n", offset
);
3763 sg_srr_cnt
= (cmd
->sg_cnt
- i
);
3765 sg_srr
= kzalloc(sizeof(struct scatterlist
) * sg_srr_cnt
, GFP_KERNEL
);
3767 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe057,
3768 "Unable to allocate sgp\n");
3771 sg_init_table(sg_srr
, sg_srr_cnt
);
3774 * Walk the remaining list for sg_srr_start, mapping to the newly
3775 * allocated sg_srr taking first_offset into account.
3777 for_each_sg(sg_srr_start
, sg
, sg_srr_cnt
, i
) {
3779 sg_set_page(sgp
, sg_page(sg
),
3780 (sg
->length
- first_offset
), first_offset
);
3783 sg_set_page(sgp
, sg_page(sg
), sg
->length
, 0);
3785 bufflen
+= sgp
->length
;
3793 cmd
->sg_cnt
= sg_srr_cnt
;
3794 cmd
->bufflen
= bufflen
;
3795 cmd
->offset
+= offset
;
3798 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe026, "New cmd->sg: %p\n", cmd
->sg
);
3799 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe027, "New cmd->sg_cnt: %u\n",
3801 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe028, "New cmd->bufflen: %u\n",
3803 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe029, "New cmd->offset: %u\n",
3806 if (cmd
->sg_cnt
< 0)
3809 if (cmd
->bufflen
< 0)
3815 static inline int qlt_srr_adjust_data(struct qla_tgt_cmd
*cmd
,
3816 uint32_t srr_rel_offs
, int *xmit_type
)
3818 int res
= 0, rel_offs
;
3820 rel_offs
= srr_rel_offs
- cmd
->offset
;
3821 ql_dbg(ql_dbg_tgt_mgt
, cmd
->vha
, 0xf027, "srr_rel_offs=%d, rel_offs=%d",
3822 srr_rel_offs
, rel_offs
);
3824 *xmit_type
= QLA_TGT_XMIT_ALL
;
3827 ql_dbg(ql_dbg_tgt_mgt
, cmd
->vha
, 0xf062,
3828 "qla_target(%d): SRR rel_offs (%d) < 0",
3829 cmd
->vha
->vp_idx
, rel_offs
);
3831 } else if (rel_offs
== cmd
->bufflen
)
3832 *xmit_type
= QLA_TGT_XMIT_STATUS
;
3833 else if (rel_offs
> 0)
3834 res
= qlt_set_data_offset(cmd
, rel_offs
);
3839 /* No locks, thread context */
3840 static void qlt_handle_srr(struct scsi_qla_host
*vha
,
3841 struct qla_tgt_srr_ctio
*sctio
, struct qla_tgt_srr_imm
*imm
)
3843 struct imm_ntfy_from_isp
*ntfy
=
3844 (struct imm_ntfy_from_isp
*)&imm
->imm_ntfy
;
3845 struct qla_hw_data
*ha
= vha
->hw
;
3846 struct qla_tgt_cmd
*cmd
= sctio
->cmd
;
3847 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
3848 unsigned long flags
;
3849 int xmit_type
= 0, resp
= 0;
3853 offset
= le32_to_cpu(ntfy
->u
.isp24
.srr_rel_offs
);
3854 srr_ui
= ntfy
->u
.isp24
.srr_ui
;
3856 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf028, "SRR cmd %p, srr_ui %x\n",
3861 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
3862 qlt_send_notify_ack(vha
, ntfy
,
3863 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT
, 0, 0);
3864 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
3865 xmit_type
= QLA_TGT_XMIT_STATUS
;
3868 case SRR_IU_DATA_IN
:
3869 if (!cmd
->sg
|| !cmd
->sg_cnt
) {
3870 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf063,
3871 "Unable to process SRR_IU_DATA_IN due to"
3872 " missing cmd->sg, state: %d\n", cmd
->state
);
3876 if (se_cmd
->scsi_status
!= 0) {
3877 ql_dbg(ql_dbg_tgt
, vha
, 0xe02a,
3878 "Rejecting SRR_IU_DATA_IN with non GOOD "
3882 cmd
->bufflen
= se_cmd
->data_length
;
3884 if (qlt_has_data(cmd
)) {
3885 if (qlt_srr_adjust_data(cmd
, offset
, &xmit_type
) != 0)
3887 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
3888 qlt_send_notify_ack(vha
, ntfy
,
3889 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT
, 0, 0);
3890 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
3893 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf064,
3894 "qla_target(%d): SRR for in data for cmd "
3895 "without them (tag %d, SCSI status %d), "
3896 "reject", vha
->vp_idx
, cmd
->tag
,
3897 cmd
->se_cmd
.scsi_status
);
3901 case SRR_IU_DATA_OUT
:
3902 if (!cmd
->sg
|| !cmd
->sg_cnt
) {
3903 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf065,
3904 "Unable to process SRR_IU_DATA_OUT due to"
3905 " missing cmd->sg\n");
3909 if (se_cmd
->scsi_status
!= 0) {
3910 ql_dbg(ql_dbg_tgt
, vha
, 0xe02b,
3911 "Rejecting SRR_IU_DATA_OUT"
3912 " with non GOOD scsi_status\n");
3915 cmd
->bufflen
= se_cmd
->data_length
;
3917 if (qlt_has_data(cmd
)) {
3918 if (qlt_srr_adjust_data(cmd
, offset
, &xmit_type
) != 0)
3920 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
3921 qlt_send_notify_ack(vha
, ntfy
,
3922 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT
, 0, 0);
3923 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
3924 if (xmit_type
& QLA_TGT_XMIT_DATA
) {
3925 cmd
->cmd_flags
|= BIT_8
;
3926 qlt_rdy_to_xfer(cmd
);
3929 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf066,
3930 "qla_target(%d): SRR for out data for cmd "
3931 "without them (tag %d, SCSI status %d), "
3932 "reject", vha
->vp_idx
, cmd
->tag
,
3933 cmd
->se_cmd
.scsi_status
);
3938 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf067,
3939 "qla_target(%d): Unknown srr_ui value %x",
3940 vha
->vp_idx
, srr_ui
);
3944 /* Transmit response in case of status and data-in cases */
3946 cmd
->cmd_flags
|= BIT_7
;
3947 qlt_xmit_response(cmd
, xmit_type
, se_cmd
->scsi_status
);
3953 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
3954 qlt_send_notify_ack(vha
, ntfy
, 0, 0, 0,
3955 NOTIFY_ACK_SRR_FLAGS_REJECT
,
3956 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM
,
3957 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL
);
3958 if (cmd
->state
== QLA_TGT_STATE_NEED_DATA
) {
3959 cmd
->state
= QLA_TGT_STATE_DATA_IN
;
3962 cmd
->cmd_flags
|= BIT_9
;
3963 qlt_send_term_exchange(vha
, cmd
, &cmd
->atio
, 1);
3965 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
3968 static void qlt_reject_free_srr_imm(struct scsi_qla_host
*vha
,
3969 struct qla_tgt_srr_imm
*imm
, int ha_locked
)
3971 struct qla_hw_data
*ha
= vha
->hw
;
3972 unsigned long flags
= 0;
3975 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
3977 qlt_send_notify_ack(vha
, (void *)&imm
->imm_ntfy
, 0, 0, 0,
3978 NOTIFY_ACK_SRR_FLAGS_REJECT
,
3979 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM
,
3980 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL
);
3983 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
3988 static void qlt_handle_srr_work(struct work_struct
*work
)
3990 struct qla_tgt
*tgt
= container_of(work
, struct qla_tgt
, srr_work
);
3991 struct scsi_qla_host
*vha
= tgt
->vha
;
3992 struct qla_tgt_srr_ctio
*sctio
;
3993 unsigned long flags
;
3995 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf029, "Entering SRR work (tgt %p)\n",
3999 spin_lock_irqsave(&tgt
->srr_lock
, flags
);
4000 list_for_each_entry(sctio
, &tgt
->srr_ctio_list
, srr_list_entry
) {
4001 struct qla_tgt_srr_imm
*imm
, *i
, *ti
;
4002 struct qla_tgt_cmd
*cmd
;
4003 struct se_cmd
*se_cmd
;
4006 list_for_each_entry_safe(i
, ti
, &tgt
->srr_imm_list
,
4008 if (i
->srr_id
== sctio
->srr_id
) {
4009 list_del(&i
->srr_list_entry
);
4011 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf068,
4012 "qla_target(%d): There must be "
4013 "only one IMM SRR per CTIO SRR "
4014 "(IMM SRR %p, id %d, CTIO %p\n",
4015 vha
->vp_idx
, i
, i
->srr_id
, sctio
);
4016 qlt_reject_free_srr_imm(tgt
->vha
, i
, 0);
4022 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf02a,
4023 "IMM SRR %p, CTIO SRR %p (id %d)\n", imm
, sctio
,
4027 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf02b,
4028 "Not found matching IMM for SRR CTIO (id %d)\n",
4032 list_del(&sctio
->srr_list_entry
);
4034 spin_unlock_irqrestore(&tgt
->srr_lock
, flags
);
4038 * Reset qla_tgt_cmd SRR values and SGL pointer+count to follow
4039 * tcm_qla2xxx_write_pending() and tcm_qla2xxx_queue_data_in()
4048 se_cmd
= &cmd
->se_cmd
;
4050 cmd
->sg_cnt
= se_cmd
->t_data_nents
;
4051 cmd
->sg
= se_cmd
->t_data_sg
;
4053 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf02c,
4054 "SRR cmd %p (se_cmd %p, tag %d, op %x), "
4055 "sg_cnt=%d, offset=%d", cmd
, &cmd
->se_cmd
, cmd
->tag
,
4056 se_cmd
->t_task_cdb
? se_cmd
->t_task_cdb
[0] : 0,
4057 cmd
->sg_cnt
, cmd
->offset
);
4059 qlt_handle_srr(vha
, sctio
, imm
);
4065 spin_unlock_irqrestore(&tgt
->srr_lock
, flags
);
4068 /* ha->hardware_lock supposed to be held on entry */
4069 static void qlt_prepare_srr_imm(struct scsi_qla_host
*vha
,
4070 struct imm_ntfy_from_isp
*iocb
)
4072 struct qla_tgt_srr_imm
*imm
;
4073 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
4074 struct qla_tgt_srr_ctio
*sctio
;
4078 ql_log(ql_log_warn
, vha
, 0xf02d, "qla_target(%d): SRR received\n",
4081 imm
= kzalloc(sizeof(*imm
), GFP_ATOMIC
);
4083 memcpy(&imm
->imm_ntfy
, iocb
, sizeof(imm
->imm_ntfy
));
4085 /* IRQ is already OFF */
4086 spin_lock(&tgt
->srr_lock
);
4087 imm
->srr_id
= tgt
->imm_srr_id
;
4088 list_add_tail(&imm
->srr_list_entry
,
4089 &tgt
->srr_imm_list
);
4090 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf02e,
4091 "IMM NTFY SRR %p added (id %d, ui %x)\n",
4092 imm
, imm
->srr_id
, iocb
->u
.isp24
.srr_ui
);
4093 if (tgt
->imm_srr_id
== tgt
->ctio_srr_id
) {
4095 list_for_each_entry(sctio
, &tgt
->srr_ctio_list
,
4097 if (sctio
->srr_id
== imm
->srr_id
) {
4103 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf02f, "%s",
4104 "Scheduling srr work\n");
4105 schedule_work(&tgt
->srr_work
);
4107 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf030,
4108 "qla_target(%d): imm_srr_id "
4109 "== ctio_srr_id (%d), but there is no "
4110 "corresponding SRR CTIO, deleting IMM "
4111 "SRR %p\n", vha
->vp_idx
, tgt
->ctio_srr_id
,
4113 list_del(&imm
->srr_list_entry
);
4117 spin_unlock(&tgt
->srr_lock
);
4121 spin_unlock(&tgt
->srr_lock
);
4123 struct qla_tgt_srr_ctio
*ts
;
4125 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf069,
4126 "qla_target(%d): Unable to allocate SRR IMM "
4127 "entry, SRR request will be rejected\n", vha
->vp_idx
);
4129 /* IRQ is already OFF */
4130 spin_lock(&tgt
->srr_lock
);
4131 list_for_each_entry_safe(sctio
, ts
, &tgt
->srr_ctio_list
,
4133 if (sctio
->srr_id
== tgt
->imm_srr_id
) {
4134 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf031,
4135 "CTIO SRR %p deleted (id %d)\n",
4136 sctio
, sctio
->srr_id
);
4137 list_del(&sctio
->srr_list_entry
);
4138 qlt_send_term_exchange(vha
, sctio
->cmd
,
4139 &sctio
->cmd
->atio
, 1);
4143 spin_unlock(&tgt
->srr_lock
);
4150 qlt_send_notify_ack(vha
, iocb
, 0, 0, 0,
4151 NOTIFY_ACK_SRR_FLAGS_REJECT
,
4152 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM
,
4153 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL
);
4157 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
4159 static void qlt_handle_imm_notify(struct scsi_qla_host
*vha
,
4160 struct imm_ntfy_from_isp
*iocb
)
4162 struct qla_hw_data
*ha
= vha
->hw
;
4163 uint32_t add_flags
= 0;
4164 int send_notify_ack
= 1;
4167 status
= le16_to_cpu(iocb
->u
.isp2x
.status
);
4169 case IMM_NTFY_LIP_RESET
:
4171 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf032,
4172 "qla_target(%d): LIP reset (loop %#x), subcode %x\n",
4173 vha
->vp_idx
, le16_to_cpu(iocb
->u
.isp24
.nport_handle
),
4174 iocb
->u
.isp24
.status_subcode
);
4176 if (qlt_reset(vha
, iocb
, QLA_TGT_ABORT_ALL
) == 0)
4177 send_notify_ack
= 0;
4181 case IMM_NTFY_LIP_LINK_REINIT
:
4183 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
4184 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf033,
4185 "qla_target(%d): LINK REINIT (loop %#x, "
4186 "subcode %x)\n", vha
->vp_idx
,
4187 le16_to_cpu(iocb
->u
.isp24
.nport_handle
),
4188 iocb
->u
.isp24
.status_subcode
);
4189 if (tgt
->link_reinit_iocb_pending
) {
4190 qlt_send_notify_ack(vha
, &tgt
->link_reinit_iocb
,
4193 memcpy(&tgt
->link_reinit_iocb
, iocb
, sizeof(*iocb
));
4194 tgt
->link_reinit_iocb_pending
= 1;
4196 * QLogic requires to wait after LINK REINIT for possible
4197 * PDISC or ADISC ELS commands
4199 send_notify_ack
= 0;
4203 case IMM_NTFY_PORT_LOGOUT
:
4204 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf034,
4205 "qla_target(%d): Port logout (loop "
4206 "%#x, subcode %x)\n", vha
->vp_idx
,
4207 le16_to_cpu(iocb
->u
.isp24
.nport_handle
),
4208 iocb
->u
.isp24
.status_subcode
);
4210 if (qlt_reset(vha
, iocb
, QLA_TGT_NEXUS_LOSS_SESS
) == 0)
4211 send_notify_ack
= 0;
4212 /* The sessions will be cleared in the callback, if needed */
4215 case IMM_NTFY_GLBL_TPRLO
:
4216 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf035,
4217 "qla_target(%d): Global TPRLO (%x)\n", vha
->vp_idx
, status
);
4218 if (qlt_reset(vha
, iocb
, QLA_TGT_NEXUS_LOSS
) == 0)
4219 send_notify_ack
= 0;
4220 /* The sessions will be cleared in the callback, if needed */
4223 case IMM_NTFY_PORT_CONFIG
:
4224 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf036,
4225 "qla_target(%d): Port config changed (%x)\n", vha
->vp_idx
,
4227 if (qlt_reset(vha
, iocb
, QLA_TGT_ABORT_ALL
) == 0)
4228 send_notify_ack
= 0;
4229 /* The sessions will be cleared in the callback, if needed */
4232 case IMM_NTFY_GLBL_LOGO
:
4233 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf06a,
4234 "qla_target(%d): Link failure detected\n",
4236 /* I_T nexus loss */
4237 if (qlt_reset(vha
, iocb
, QLA_TGT_NEXUS_LOSS
) == 0)
4238 send_notify_ack
= 0;
4241 case IMM_NTFY_IOCB_OVERFLOW
:
4242 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf06b,
4243 "qla_target(%d): Cannot provide requested "
4244 "capability (IOCB overflowed the immediate notify "
4245 "resource count)\n", vha
->vp_idx
);
4248 case IMM_NTFY_ABORT_TASK
:
4249 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf037,
4250 "qla_target(%d): Abort Task (S %08x I %#x -> "
4251 "L %#x)\n", vha
->vp_idx
,
4252 le16_to_cpu(iocb
->u
.isp2x
.seq_id
),
4253 GET_TARGET_ID(ha
, (struct atio_from_isp
*)iocb
),
4254 le16_to_cpu(iocb
->u
.isp2x
.lun
));
4255 if (qlt_abort_task(vha
, iocb
) == 0)
4256 send_notify_ack
= 0;
4259 case IMM_NTFY_RESOURCE
:
4260 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf06c,
4261 "qla_target(%d): Out of resources, host %ld\n",
4262 vha
->vp_idx
, vha
->host_no
);
4265 case IMM_NTFY_MSG_RX
:
4266 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf038,
4267 "qla_target(%d): Immediate notify task %x\n",
4268 vha
->vp_idx
, iocb
->u
.isp2x
.task_flags
);
4269 if (qlt_handle_task_mgmt(vha
, iocb
) == 0)
4270 send_notify_ack
= 0;
4274 if (qlt_24xx_handle_els(vha
, iocb
) == 0)
4275 send_notify_ack
= 0;
4279 qlt_prepare_srr_imm(vha
, iocb
);
4280 send_notify_ack
= 0;
4284 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf06d,
4285 "qla_target(%d): Received unknown immediate "
4286 "notify status %x\n", vha
->vp_idx
, status
);
4290 if (send_notify_ack
)
4291 qlt_send_notify_ack(vha
, iocb
, add_flags
, 0, 0, 0, 0, 0);
4295 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
4296 * This function sends busy to ISP 2xxx or 24xx.
4298 static int __qlt_send_busy(struct scsi_qla_host
*vha
,
4299 struct atio_from_isp
*atio
, uint16_t status
)
4301 struct ctio7_to_24xx
*ctio24
;
4302 struct qla_hw_data
*ha
= vha
->hw
;
4304 struct qla_tgt_sess
*sess
= NULL
;
4306 sess
= ha
->tgt
.tgt_ops
->find_sess_by_s_id(vha
,
4307 atio
->u
.isp24
.fcp_hdr
.s_id
);
4309 qlt_send_term_exchange(vha
, NULL
, atio
, 1);
4312 /* Sending marker isn't necessary, since we called from ISR */
4314 pkt
= (request_t
*)qla2x00_alloc_iocbs(vha
, NULL
);
4316 ql_dbg(ql_dbg_io
, vha
, 0x3063,
4317 "qla_target(%d): %s failed: unable to allocate "
4318 "request packet", vha
->vp_idx
, __func__
);
4322 pkt
->entry_count
= 1;
4323 pkt
->handle
= QLA_TGT_SKIP_HANDLE
| CTIO_COMPLETION_HANDLE_MARK
;
4325 ctio24
= (struct ctio7_to_24xx
*)pkt
;
4326 ctio24
->entry_type
= CTIO_TYPE7
;
4327 ctio24
->nport_handle
= sess
->loop_id
;
4328 ctio24
->timeout
= __constant_cpu_to_le16(QLA_TGT_TIMEOUT
);
4329 ctio24
->vp_index
= vha
->vp_idx
;
4330 ctio24
->initiator_id
[0] = atio
->u
.isp24
.fcp_hdr
.s_id
[2];
4331 ctio24
->initiator_id
[1] = atio
->u
.isp24
.fcp_hdr
.s_id
[1];
4332 ctio24
->initiator_id
[2] = atio
->u
.isp24
.fcp_hdr
.s_id
[0];
4333 ctio24
->exchange_addr
= atio
->u
.isp24
.exchange_addr
;
4334 ctio24
->u
.status1
.flags
= (atio
->u
.isp24
.attr
<< 9) |
4335 __constant_cpu_to_le16(
4336 CTIO7_FLAGS_STATUS_MODE_1
| CTIO7_FLAGS_SEND_STATUS
|
4337 CTIO7_FLAGS_DONT_RET_CTIO
);
4339 * CTIO from fw w/o se_cmd doesn't provide enough info to retry it,
4340 * if the explicit conformation is used.
4342 ctio24
->u
.status1
.ox_id
= swab16(atio
->u
.isp24
.fcp_hdr
.ox_id
);
4343 ctio24
->u
.status1
.scsi_status
= cpu_to_le16(status
);
4344 /* Memory Barrier */
4346 qla2x00_start_iocbs(vha
, vha
->req
);
4351 * This routine is used to allocate a command for either a QFull condition
4352 * (ie reply SAM_STAT_BUSY) or to terminate an exchange that did not go
4356 qlt_alloc_qfull_cmd(struct scsi_qla_host
*vha
,
4357 struct atio_from_isp
*atio
, uint16_t status
, int qfull
)
4359 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
4360 struct qla_hw_data
*ha
= vha
->hw
;
4361 struct qla_tgt_sess
*sess
;
4362 struct se_session
*se_sess
;
4363 struct qla_tgt_cmd
*cmd
;
4366 if (unlikely(tgt
->tgt_stop
)) {
4367 ql_dbg(ql_dbg_io
, vha
, 0x300a,
4368 "New command while device %p is shutting down\n", tgt
);
4372 if ((vha
->hw
->tgt
.num_qfull_cmds_alloc
+ 1) > MAX_QFULL_CMDS_ALLOC
) {
4373 vha
->hw
->tgt
.num_qfull_cmds_dropped
++;
4374 if (vha
->hw
->tgt
.num_qfull_cmds_dropped
>
4375 vha
->hw
->qla_stats
.stat_max_qfull_cmds_dropped
)
4376 vha
->hw
->qla_stats
.stat_max_qfull_cmds_dropped
=
4377 vha
->hw
->tgt
.num_qfull_cmds_dropped
;
4379 ql_dbg(ql_dbg_io
, vha
, 0x3068,
4380 "qla_target(%d): %s: QFull CMD dropped[%d]\n",
4381 vha
->vp_idx
, __func__
,
4382 vha
->hw
->tgt
.num_qfull_cmds_dropped
);
4384 qlt_chk_exch_leak_thresh_hold(vha
);
4388 sess
= ha
->tgt
.tgt_ops
->find_sess_by_s_id
4389 (vha
, atio
->u
.isp24
.fcp_hdr
.s_id
);
4393 se_sess
= sess
->se_sess
;
4395 tag
= percpu_ida_alloc(&se_sess
->sess_tag_pool
, TASK_RUNNING
);
4399 cmd
= &((struct qla_tgt_cmd
*)se_sess
->sess_cmd_map
)[tag
];
4401 ql_dbg(ql_dbg_io
, vha
, 0x3009,
4402 "qla_target(%d): %s: Allocation of cmd failed\n",
4403 vha
->vp_idx
, __func__
);
4405 vha
->hw
->tgt
.num_qfull_cmds_dropped
++;
4406 if (vha
->hw
->tgt
.num_qfull_cmds_dropped
>
4407 vha
->hw
->qla_stats
.stat_max_qfull_cmds_dropped
)
4408 vha
->hw
->qla_stats
.stat_max_qfull_cmds_dropped
=
4409 vha
->hw
->tgt
.num_qfull_cmds_dropped
;
4411 qlt_chk_exch_leak_thresh_hold(vha
);
4415 memset(cmd
, 0, sizeof(struct qla_tgt_cmd
));
4417 qlt_incr_num_pend_cmds(vha
);
4418 INIT_LIST_HEAD(&cmd
->cmd_list
);
4419 memcpy(&cmd
->atio
, atio
, sizeof(*atio
));
4421 cmd
->tgt
= vha
->vha_tgt
.qla_tgt
;
4423 cmd
->reset_count
= vha
->hw
->chip_reset
;
4428 /* NOTE: borrowing the state field to carry the status */
4429 cmd
->state
= status
;
4431 cmd
->term_exchg
= 1;
4433 list_add_tail(&cmd
->cmd_list
, &vha
->hw
->tgt
.q_full_list
);
4435 vha
->hw
->tgt
.num_qfull_cmds_alloc
++;
4436 if (vha
->hw
->tgt
.num_qfull_cmds_alloc
>
4437 vha
->hw
->qla_stats
.stat_max_qfull_cmds_alloc
)
4438 vha
->hw
->qla_stats
.stat_max_qfull_cmds_alloc
=
4439 vha
->hw
->tgt
.num_qfull_cmds_alloc
;
4443 qlt_free_qfull_cmds(struct scsi_qla_host
*vha
)
4445 struct qla_hw_data
*ha
= vha
->hw
;
4446 unsigned long flags
;
4447 struct qla_tgt_cmd
*cmd
, *tcmd
;
4448 struct list_head free_list
;
4451 if (list_empty(&ha
->tgt
.q_full_list
))
4454 INIT_LIST_HEAD(&free_list
);
4456 spin_lock_irqsave(&vha
->hw
->hardware_lock
, flags
);
4458 if (list_empty(&ha
->tgt
.q_full_list
)) {
4459 spin_unlock_irqrestore(&vha
->hw
->hardware_lock
, flags
);
4463 list_for_each_entry_safe(cmd
, tcmd
, &ha
->tgt
.q_full_list
, cmd_list
) {
4465 /* cmd->state is a borrowed field to hold status */
4466 rc
= __qlt_send_busy(vha
, &cmd
->atio
, cmd
->state
);
4467 else if (cmd
->term_exchg
)
4468 rc
= __qlt_send_term_exchange(vha
, NULL
, &cmd
->atio
);
4474 ql_dbg(ql_dbg_io
, vha
, 0x3006,
4475 "%s: busy sent for ox_id[%04x]\n", __func__
,
4476 be16_to_cpu(cmd
->atio
.u
.isp24
.fcp_hdr
.ox_id
));
4477 else if (cmd
->term_exchg
)
4478 ql_dbg(ql_dbg_io
, vha
, 0x3007,
4479 "%s: Term exchg sent for ox_id[%04x]\n", __func__
,
4480 be16_to_cpu(cmd
->atio
.u
.isp24
.fcp_hdr
.ox_id
));
4482 ql_dbg(ql_dbg_io
, vha
, 0x3008,
4483 "%s: Unexpected cmd in QFull list %p\n", __func__
,
4486 list_del(&cmd
->cmd_list
);
4487 list_add_tail(&cmd
->cmd_list
, &free_list
);
4489 /* piggy back on hardware_lock for protection */
4490 vha
->hw
->tgt
.num_qfull_cmds_alloc
--;
4492 spin_unlock_irqrestore(&vha
->hw
->hardware_lock
, flags
);
4496 list_for_each_entry_safe(cmd
, tcmd
, &free_list
, cmd_list
) {
4497 list_del(&cmd
->cmd_list
);
4498 /* This cmd was never sent to TCM. There is no need
4499 * to schedule free or call free_cmd
4507 qlt_send_busy(struct scsi_qla_host
*vha
,
4508 struct atio_from_isp
*atio
, uint16_t status
)
4512 rc
= __qlt_send_busy(vha
, atio
, status
);
4514 qlt_alloc_qfull_cmd(vha
, atio
, status
, 1);
4518 qlt_chk_qfull_thresh_hold(struct scsi_qla_host
*vha
,
4519 struct atio_from_isp
*atio
)
4521 struct qla_hw_data
*ha
= vha
->hw
;
4524 if (ha
->tgt
.num_pend_cmds
< Q_FULL_THRESH_HOLD(ha
))
4527 status
= temp_sam_status
;
4528 qlt_send_busy(vha
, atio
, status
);
4532 /* ha->hardware_lock supposed to be held on entry */
4533 /* called via callback from qla2xxx */
4534 static void qlt_24xx_atio_pkt(struct scsi_qla_host
*vha
,
4535 struct atio_from_isp
*atio
)
4537 struct qla_hw_data
*ha
= vha
->hw
;
4538 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
4541 if (unlikely(tgt
== NULL
)) {
4542 ql_dbg(ql_dbg_io
, vha
, 0x3064,
4543 "ATIO pkt, but no tgt (ha %p)", ha
);
4547 * In tgt_stop mode we also should allow all requests to pass.
4548 * Otherwise, some commands can stuck.
4551 tgt
->irq_cmd_count
++;
4553 switch (atio
->u
.raw
.entry_type
) {
4555 if (unlikely(atio
->u
.isp24
.exchange_addr
==
4556 ATIO_EXCHANGE_ADDRESS_UNKNOWN
)) {
4557 ql_dbg(ql_dbg_io
, vha
, 0x3065,
4558 "qla_target(%d): ATIO_TYPE7 "
4559 "received with UNKNOWN exchange address, "
4560 "sending QUEUE_FULL\n", vha
->vp_idx
);
4561 qlt_send_busy(vha
, atio
, SAM_STAT_TASK_SET_FULL
);
4567 if (likely(atio
->u
.isp24
.fcp_cmnd
.task_mgmt_flags
== 0)) {
4568 rc
= qlt_chk_qfull_thresh_hold(vha
, atio
);
4570 tgt
->irq_cmd_count
--;
4573 rc
= qlt_handle_cmd_for_atio(vha
, atio
);
4575 rc
= qlt_handle_task_mgmt(vha
, atio
);
4577 if (unlikely(rc
!= 0)) {
4579 #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
4580 qlt_send_busy(vha
, atio
, SAM_STAT_BUSY
);
4582 qlt_send_term_exchange(vha
, NULL
, atio
, 1);
4585 if (tgt
->tgt_stop
) {
4586 ql_dbg(ql_dbg_tgt
, vha
, 0xe059,
4587 "qla_target: Unable to send "
4588 "command to target for req, "
4591 ql_dbg(ql_dbg_tgt
, vha
, 0xe05a,
4592 "qla_target(%d): Unable to send "
4593 "command to target, sending BUSY "
4594 "status.\n", vha
->vp_idx
);
4595 qlt_send_busy(vha
, atio
, SAM_STAT_BUSY
);
4601 case IMMED_NOTIFY_TYPE
:
4603 if (unlikely(atio
->u
.isp2x
.entry_status
!= 0)) {
4604 ql_dbg(ql_dbg_tgt
, vha
, 0xe05b,
4605 "qla_target(%d): Received ATIO packet %x "
4606 "with error status %x\n", vha
->vp_idx
,
4607 atio
->u
.raw
.entry_type
,
4608 atio
->u
.isp2x
.entry_status
);
4611 ql_dbg(ql_dbg_tgt
, vha
, 0xe02e, "%s", "IMMED_NOTIFY ATIO");
4612 qlt_handle_imm_notify(vha
, (struct imm_ntfy_from_isp
*)atio
);
4617 ql_dbg(ql_dbg_tgt
, vha
, 0xe05c,
4618 "qla_target(%d): Received unknown ATIO atio "
4619 "type %x\n", vha
->vp_idx
, atio
->u
.raw
.entry_type
);
4623 tgt
->irq_cmd_count
--;
4626 /* ha->hardware_lock supposed to be held on entry */
4627 /* called via callback from qla2xxx */
4628 static void qlt_response_pkt(struct scsi_qla_host
*vha
, response_t
*pkt
)
4630 struct qla_hw_data
*ha
= vha
->hw
;
4631 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
4633 if (unlikely(tgt
== NULL
)) {
4634 ql_dbg(ql_dbg_tgt
, vha
, 0xe05d,
4635 "qla_target(%d): Response pkt %x received, but no "
4636 "tgt (ha %p)\n", vha
->vp_idx
, pkt
->entry_type
, ha
);
4641 * In tgt_stop mode we also should allow all requests to pass.
4642 * Otherwise, some commands can stuck.
4645 tgt
->irq_cmd_count
++;
4647 switch (pkt
->entry_type
) {
4651 struct ctio7_from_24xx
*entry
= (struct ctio7_from_24xx
*)pkt
;
4652 qlt_do_ctio_completion(vha
, entry
->handle
,
4653 le16_to_cpu(entry
->status
)|(pkt
->entry_status
<< 16),
4658 case ACCEPT_TGT_IO_TYPE
:
4660 struct atio_from_isp
*atio
= (struct atio_from_isp
*)pkt
;
4662 if (atio
->u
.isp2x
.status
!=
4663 __constant_cpu_to_le16(ATIO_CDB_VALID
)) {
4664 ql_dbg(ql_dbg_tgt
, vha
, 0xe05e,
4665 "qla_target(%d): ATIO with error "
4666 "status %x received\n", vha
->vp_idx
,
4667 le16_to_cpu(atio
->u
.isp2x
.status
));
4671 rc
= qlt_chk_qfull_thresh_hold(vha
, atio
);
4673 tgt
->irq_cmd_count
--;
4677 rc
= qlt_handle_cmd_for_atio(vha
, atio
);
4678 if (unlikely(rc
!= 0)) {
4680 #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
4681 qlt_send_busy(vha
, atio
, 0);
4683 qlt_send_term_exchange(vha
, NULL
, atio
, 1);
4686 if (tgt
->tgt_stop
) {
4687 ql_dbg(ql_dbg_tgt
, vha
, 0xe05f,
4688 "qla_target: Unable to send "
4689 "command to target, sending TERM "
4690 "EXCHANGE for rsp\n");
4691 qlt_send_term_exchange(vha
, NULL
,
4694 ql_dbg(ql_dbg_tgt
, vha
, 0xe060,
4695 "qla_target(%d): Unable to send "
4696 "command to target, sending BUSY "
4697 "status\n", vha
->vp_idx
);
4698 qlt_send_busy(vha
, atio
, 0);
4705 case CONTINUE_TGT_IO_TYPE
:
4707 struct ctio_to_2xxx
*entry
= (struct ctio_to_2xxx
*)pkt
;
4708 qlt_do_ctio_completion(vha
, entry
->handle
,
4709 le16_to_cpu(entry
->status
)|(pkt
->entry_status
<< 16),
4716 struct ctio_to_2xxx
*entry
= (struct ctio_to_2xxx
*)pkt
;
4717 qlt_do_ctio_completion(vha
, entry
->handle
,
4718 le16_to_cpu(entry
->status
)|(pkt
->entry_status
<< 16),
4723 case IMMED_NOTIFY_TYPE
:
4724 ql_dbg(ql_dbg_tgt
, vha
, 0xe035, "%s", "IMMED_NOTIFY\n");
4725 qlt_handle_imm_notify(vha
, (struct imm_ntfy_from_isp
*)pkt
);
4728 case NOTIFY_ACK_TYPE
:
4729 if (tgt
->notify_ack_expected
> 0) {
4730 struct nack_to_isp
*entry
= (struct nack_to_isp
*)pkt
;
4731 ql_dbg(ql_dbg_tgt
, vha
, 0xe036,
4732 "NOTIFY_ACK seq %08x status %x\n",
4733 le16_to_cpu(entry
->u
.isp2x
.seq_id
),
4734 le16_to_cpu(entry
->u
.isp2x
.status
));
4735 tgt
->notify_ack_expected
--;
4736 if (entry
->u
.isp2x
.status
!=
4737 __constant_cpu_to_le16(NOTIFY_ACK_SUCCESS
)) {
4738 ql_dbg(ql_dbg_tgt
, vha
, 0xe061,
4739 "qla_target(%d): NOTIFY_ACK "
4740 "failed %x\n", vha
->vp_idx
,
4741 le16_to_cpu(entry
->u
.isp2x
.status
));
4744 ql_dbg(ql_dbg_tgt
, vha
, 0xe062,
4745 "qla_target(%d): Unexpected NOTIFY_ACK received\n",
4750 case ABTS_RECV_24XX
:
4751 ql_dbg(ql_dbg_tgt
, vha
, 0xe037,
4752 "ABTS_RECV_24XX: instance %d\n", vha
->vp_idx
);
4753 qlt_24xx_handle_abts(vha
, (struct abts_recv_from_24xx
*)pkt
);
4756 case ABTS_RESP_24XX
:
4757 if (tgt
->abts_resp_expected
> 0) {
4758 struct abts_resp_from_24xx_fw
*entry
=
4759 (struct abts_resp_from_24xx_fw
*)pkt
;
4760 ql_dbg(ql_dbg_tgt
, vha
, 0xe038,
4761 "ABTS_RESP_24XX: compl_status %x\n",
4762 entry
->compl_status
);
4763 tgt
->abts_resp_expected
--;
4764 if (le16_to_cpu(entry
->compl_status
) !=
4765 ABTS_RESP_COMPL_SUCCESS
) {
4766 if ((entry
->error_subcode1
== 0x1E) &&
4767 (entry
->error_subcode2
== 0)) {
4769 * We've got a race here: aborted
4770 * exchange not terminated, i.e.
4771 * response for the aborted command was
4772 * sent between the abort request was
4773 * received and processed.
4774 * Unfortunately, the firmware has a
4775 * silly requirement that all aborted
4776 * exchanges must be explicitely
4777 * terminated, otherwise it refuses to
4778 * send responses for the abort
4779 * requests. So, we have to
4780 * (re)terminate the exchange and retry
4781 * the abort response.
4783 qlt_24xx_retry_term_exchange(vha
,
4786 ql_dbg(ql_dbg_tgt
, vha
, 0xe063,
4787 "qla_target(%d): ABTS_RESP_24XX "
4788 "failed %x (subcode %x:%x)",
4789 vha
->vp_idx
, entry
->compl_status
,
4790 entry
->error_subcode1
,
4791 entry
->error_subcode2
);
4794 ql_dbg(ql_dbg_tgt
, vha
, 0xe064,
4795 "qla_target(%d): Unexpected ABTS_RESP_24XX "
4796 "received\n", vha
->vp_idx
);
4801 ql_dbg(ql_dbg_tgt
, vha
, 0xe065,
4802 "qla_target(%d): Received unknown response pkt "
4803 "type %x\n", vha
->vp_idx
, pkt
->entry_type
);
4807 tgt
->irq_cmd_count
--;
4811 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
4813 void qlt_async_event(uint16_t code
, struct scsi_qla_host
*vha
,
4816 struct qla_hw_data
*ha
= vha
->hw
;
4817 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
4820 if (!ha
->tgt
.tgt_ops
)
4823 if (unlikely(tgt
== NULL
)) {
4824 ql_dbg(ql_dbg_tgt
, vha
, 0xe03a,
4825 "ASYNC EVENT %#x, but no tgt (ha %p)\n", code
, ha
);
4829 if (((code
== MBA_POINT_TO_POINT
) || (code
== MBA_CHG_IN_CONNECTION
)) &&
4833 * In tgt_stop mode we also should allow all requests to pass.
4834 * Otherwise, some commands can stuck.
4837 tgt
->irq_cmd_count
++;
4840 case MBA_RESET
: /* Reset */
4841 case MBA_SYSTEM_ERR
: /* System Error */
4842 case MBA_REQ_TRANSFER_ERR
: /* Request Transfer Error */
4843 case MBA_RSP_TRANSFER_ERR
: /* Response Transfer Error */
4844 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf03a,
4845 "qla_target(%d): System error async event %#x "
4846 "occurred", vha
->vp_idx
, code
);
4848 case MBA_WAKEUP_THRES
: /* Request Queue Wake-up. */
4849 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
4854 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf03b,
4855 "qla_target(%d): Async LOOP_UP occurred "
4856 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha
->vp_idx
,
4857 le16_to_cpu(mailbox
[0]), le16_to_cpu(mailbox
[1]),
4858 le16_to_cpu(mailbox
[2]), le16_to_cpu(mailbox
[3]));
4859 if (tgt
->link_reinit_iocb_pending
) {
4860 qlt_send_notify_ack(vha
, (void *)&tgt
->link_reinit_iocb
,
4862 tgt
->link_reinit_iocb_pending
= 0;
4867 case MBA_LIP_OCCURRED
:
4870 case MBA_RSCN_UPDATE
:
4871 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf03c,
4872 "qla_target(%d): Async event %#x occurred "
4873 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha
->vp_idx
, code
,
4874 le16_to_cpu(mailbox
[0]), le16_to_cpu(mailbox
[1]),
4875 le16_to_cpu(mailbox
[2]), le16_to_cpu(mailbox
[3]));
4878 case MBA_PORT_UPDATE
:
4879 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf03d,
4880 "qla_target(%d): Port update async event %#x "
4881 "occurred: updating the ports database (m[0]=%x, m[1]=%x, "
4882 "m[2]=%x, m[3]=%x)", vha
->vp_idx
, code
,
4883 le16_to_cpu(mailbox
[0]), le16_to_cpu(mailbox
[1]),
4884 le16_to_cpu(mailbox
[2]), le16_to_cpu(mailbox
[3]));
4886 login_code
= le16_to_cpu(mailbox
[2]);
4887 if (login_code
== 0x4)
4888 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf03e,
4889 "Async MB 2: Got PLOGI Complete\n");
4890 else if (login_code
== 0x7)
4891 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf03f,
4892 "Async MB 2: Port Logged Out\n");
4899 tgt
->irq_cmd_count
--;
4902 static fc_port_t
*qlt_get_port_database(struct scsi_qla_host
*vha
,
4908 fcport
= kzalloc(sizeof(*fcport
), GFP_KERNEL
);
4910 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf06f,
4911 "qla_target(%d): Allocation of tmp FC port failed",
4916 fcport
->loop_id
= loop_id
;
4918 rc
= qla2x00_get_port_database(vha
, fcport
, 0);
4919 if (rc
!= QLA_SUCCESS
) {
4920 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf070,
4921 "qla_target(%d): Failed to retrieve fcport "
4922 "information -- get_port_database() returned %x "
4923 "(loop_id=0x%04x)", vha
->vp_idx
, rc
, loop_id
);
4931 /* Must be called under tgt_mutex */
4932 static struct qla_tgt_sess
*qlt_make_local_sess(struct scsi_qla_host
*vha
,
4935 struct qla_tgt_sess
*sess
= NULL
;
4936 fc_port_t
*fcport
= NULL
;
4937 int rc
, global_resets
;
4938 uint16_t loop_id
= 0;
4942 atomic_read(&vha
->vha_tgt
.qla_tgt
->tgt_global_resets_count
);
4944 rc
= qla24xx_get_loop_id(vha
, s_id
, &loop_id
);
4946 if ((s_id
[0] == 0xFF) &&
4947 (s_id
[1] == 0xFC)) {
4949 * This is Domain Controller, so it should be
4950 * OK to drop SCSI commands from it.
4952 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf042,
4953 "Unable to find initiator with S_ID %x:%x:%x",
4954 s_id
[0], s_id
[1], s_id
[2]);
4956 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf071,
4957 "qla_target(%d): Unable to find "
4958 "initiator with S_ID %x:%x:%x",
4959 vha
->vp_idx
, s_id
[0], s_id
[1],
4964 fcport
= qlt_get_port_database(vha
, loop_id
);
4968 if (global_resets
!=
4969 atomic_read(&vha
->vha_tgt
.qla_tgt
->tgt_global_resets_count
)) {
4970 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf043,
4971 "qla_target(%d): global reset during session discovery "
4972 "(counter was %d, new %d), retrying", vha
->vp_idx
,
4974 atomic_read(&vha
->vha_tgt
.
4975 qla_tgt
->tgt_global_resets_count
));
4979 sess
= qlt_create_sess(vha
, fcport
, true);
4985 static void qlt_abort_work(struct qla_tgt
*tgt
,
4986 struct qla_tgt_sess_work_param
*prm
)
4988 struct scsi_qla_host
*vha
= tgt
->vha
;
4989 struct qla_hw_data
*ha
= vha
->hw
;
4990 struct qla_tgt_sess
*sess
= NULL
;
4991 unsigned long flags
;
4996 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
5001 s_id
[0] = prm
->abts
.fcp_hdr_le
.s_id
[2];
5002 s_id
[1] = prm
->abts
.fcp_hdr_le
.s_id
[1];
5003 s_id
[2] = prm
->abts
.fcp_hdr_le
.s_id
[0];
5005 sess
= ha
->tgt
.tgt_ops
->find_sess_by_s_id(vha
,
5006 (unsigned char *)&be_s_id
);
5008 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
5010 mutex_lock(&vha
->vha_tgt
.tgt_mutex
);
5011 sess
= qlt_make_local_sess(vha
, s_id
);
5012 /* sess has got an extra creation ref */
5013 mutex_unlock(&vha
->vha_tgt
.tgt_mutex
);
5015 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
5019 kref_get(&sess
->se_sess
->sess_kref
);
5025 rc
= __qlt_24xx_handle_abts(vha
, &prm
->abts
, sess
);
5029 ha
->tgt
.tgt_ops
->put_sess(sess
);
5030 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
5034 qlt_24xx_send_abts_resp(vha
, &prm
->abts
, FCP_TMF_REJECTED
, false);
5036 ha
->tgt
.tgt_ops
->put_sess(sess
);
5037 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
5040 static void qlt_tmr_work(struct qla_tgt
*tgt
,
5041 struct qla_tgt_sess_work_param
*prm
)
5043 struct atio_from_isp
*a
= &prm
->tm_iocb2
;
5044 struct scsi_qla_host
*vha
= tgt
->vha
;
5045 struct qla_hw_data
*ha
= vha
->hw
;
5046 struct qla_tgt_sess
*sess
= NULL
;
5047 unsigned long flags
;
5048 uint8_t *s_id
= NULL
; /* to hide compiler warnings */
5050 uint32_t lun
, unpacked_lun
;
5054 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
5059 s_id
= prm
->tm_iocb2
.u
.isp24
.fcp_hdr
.s_id
;
5060 sess
= ha
->tgt
.tgt_ops
->find_sess_by_s_id(vha
, s_id
);
5062 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
5064 mutex_lock(&vha
->vha_tgt
.tgt_mutex
);
5065 sess
= qlt_make_local_sess(vha
, s_id
);
5066 /* sess has got an extra creation ref */
5067 mutex_unlock(&vha
->vha_tgt
.tgt_mutex
);
5069 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
5073 kref_get(&sess
->se_sess
->sess_kref
);
5077 lun
= a
->u
.isp24
.fcp_cmnd
.lun
;
5078 lun_size
= sizeof(lun
);
5079 fn
= a
->u
.isp24
.fcp_cmnd
.task_mgmt_flags
;
5080 unpacked_lun
= scsilun_to_int((struct scsi_lun
*)&lun
);
5082 rc
= qlt_issue_task_mgmt(sess
, unpacked_lun
, fn
, iocb
, 0);
5086 ha
->tgt
.tgt_ops
->put_sess(sess
);
5087 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
5091 qlt_send_term_exchange(vha
, NULL
, &prm
->tm_iocb2
, 1);
5093 ha
->tgt
.tgt_ops
->put_sess(sess
);
5094 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
5097 static void qlt_sess_work_fn(struct work_struct
*work
)
5099 struct qla_tgt
*tgt
= container_of(work
, struct qla_tgt
, sess_work
);
5100 struct scsi_qla_host
*vha
= tgt
->vha
;
5101 unsigned long flags
;
5103 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf000, "Sess work (tgt %p)", tgt
);
5105 spin_lock_irqsave(&tgt
->sess_work_lock
, flags
);
5106 while (!list_empty(&tgt
->sess_works_list
)) {
5107 struct qla_tgt_sess_work_param
*prm
= list_entry(
5108 tgt
->sess_works_list
.next
, typeof(*prm
),
5109 sess_works_list_entry
);
5112 * This work can be scheduled on several CPUs at time, so we
5113 * must delete the entry to eliminate double processing
5115 list_del(&prm
->sess_works_list_entry
);
5117 spin_unlock_irqrestore(&tgt
->sess_work_lock
, flags
);
5119 switch (prm
->type
) {
5120 case QLA_TGT_SESS_WORK_ABORT
:
5121 qlt_abort_work(tgt
, prm
);
5123 case QLA_TGT_SESS_WORK_TM
:
5124 qlt_tmr_work(tgt
, prm
);
5131 spin_lock_irqsave(&tgt
->sess_work_lock
, flags
);
5135 spin_unlock_irqrestore(&tgt
->sess_work_lock
, flags
);
5138 /* Must be called under tgt_host_action_mutex */
5139 int qlt_add_target(struct qla_hw_data
*ha
, struct scsi_qla_host
*base_vha
)
5141 struct qla_tgt
*tgt
;
5143 if (!QLA_TGT_MODE_ENABLED())
5146 if (!IS_TGT_MODE_CAPABLE(ha
)) {
5147 ql_log(ql_log_warn
, base_vha
, 0xe070,
5148 "This adapter does not support target mode.\n");
5152 ql_dbg(ql_dbg_tgt
, base_vha
, 0xe03b,
5153 "Registering target for host %ld(%p).\n", base_vha
->host_no
, ha
);
5155 BUG_ON(base_vha
->vha_tgt
.qla_tgt
!= NULL
);
5157 tgt
= kzalloc(sizeof(struct qla_tgt
), GFP_KERNEL
);
5159 ql_dbg(ql_dbg_tgt
, base_vha
, 0xe066,
5160 "Unable to allocate struct qla_tgt\n");
5164 if (!(base_vha
->host
->hostt
->supported_mode
& MODE_TARGET
))
5165 base_vha
->host
->hostt
->supported_mode
|= MODE_TARGET
;
5168 tgt
->vha
= base_vha
;
5169 init_waitqueue_head(&tgt
->waitQ
);
5170 INIT_LIST_HEAD(&tgt
->sess_list
);
5171 INIT_LIST_HEAD(&tgt
->del_sess_list
);
5172 INIT_DELAYED_WORK(&tgt
->sess_del_work
,
5173 (void (*)(struct work_struct
*))qlt_del_sess_work_fn
);
5174 spin_lock_init(&tgt
->sess_work_lock
);
5175 INIT_WORK(&tgt
->sess_work
, qlt_sess_work_fn
);
5176 INIT_LIST_HEAD(&tgt
->sess_works_list
);
5177 spin_lock_init(&tgt
->srr_lock
);
5178 INIT_LIST_HEAD(&tgt
->srr_ctio_list
);
5179 INIT_LIST_HEAD(&tgt
->srr_imm_list
);
5180 INIT_WORK(&tgt
->srr_work
, qlt_handle_srr_work
);
5181 atomic_set(&tgt
->tgt_global_resets_count
, 0);
5183 base_vha
->vha_tgt
.qla_tgt
= tgt
;
5185 ql_dbg(ql_dbg_tgt
, base_vha
, 0xe067,
5186 "qla_target(%d): using 64 Bit PCI addressing",
5188 tgt
->tgt_enable_64bit_addr
= 1;
5190 tgt
->sg_tablesize
= QLA_TGT_MAX_SG_24XX(base_vha
->req
->length
- 3);
5191 tgt
->datasegs_per_cmd
= QLA_TGT_DATASEGS_PER_CMD_24XX
;
5192 tgt
->datasegs_per_cont
= QLA_TGT_DATASEGS_PER_CONT_24XX
;
5194 if (base_vha
->fc_vport
)
5197 mutex_lock(&qla_tgt_mutex
);
5198 list_add_tail(&tgt
->tgt_list_entry
, &qla_tgt_glist
);
5199 mutex_unlock(&qla_tgt_mutex
);
5204 /* Must be called under tgt_host_action_mutex */
5205 int qlt_remove_target(struct qla_hw_data
*ha
, struct scsi_qla_host
*vha
)
5207 if (!vha
->vha_tgt
.qla_tgt
)
5210 if (vha
->fc_vport
) {
5211 qlt_release(vha
->vha_tgt
.qla_tgt
);
5215 /* free left over qfull cmds */
5216 qlt_init_term_exchange(vha
);
5218 mutex_lock(&qla_tgt_mutex
);
5219 list_del(&vha
->vha_tgt
.qla_tgt
->tgt_list_entry
);
5220 mutex_unlock(&qla_tgt_mutex
);
5222 ql_dbg(ql_dbg_tgt
, vha
, 0xe03c, "Unregistering target for host %ld(%p)",
5224 qlt_release(vha
->vha_tgt
.qla_tgt
);
5229 static void qlt_lport_dump(struct scsi_qla_host
*vha
, u64 wwpn
,
5234 pr_debug("qla2xxx HW vha->node_name: ");
5235 for (i
= 0; i
< WWN_SIZE
; i
++)
5236 pr_debug("%02x ", vha
->node_name
[i
]);
5238 pr_debug("qla2xxx HW vha->port_name: ");
5239 for (i
= 0; i
< WWN_SIZE
; i
++)
5240 pr_debug("%02x ", vha
->port_name
[i
]);
5243 pr_debug("qla2xxx passed configfs WWPN: ");
5244 put_unaligned_be64(wwpn
, b
);
5245 for (i
= 0; i
< WWN_SIZE
; i
++)
5246 pr_debug("%02x ", b
[i
]);
5251 * qla_tgt_lport_register - register lport with external module
5253 * @qla_tgt_ops: Pointer for tcm_qla2xxx qla_tgt_ops
5254 * @wwpn: Passwd FC target WWPN
5255 * @callback: lport initialization callback for tcm_qla2xxx code
5256 * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data
5258 int qlt_lport_register(void *target_lport_ptr
, u64 phys_wwpn
,
5259 u64 npiv_wwpn
, u64 npiv_wwnn
,
5260 int (*callback
)(struct scsi_qla_host
*, void *, u64
, u64
))
5262 struct qla_tgt
*tgt
;
5263 struct scsi_qla_host
*vha
;
5264 struct qla_hw_data
*ha
;
5265 struct Scsi_Host
*host
;
5266 unsigned long flags
;
5270 mutex_lock(&qla_tgt_mutex
);
5271 list_for_each_entry(tgt
, &qla_tgt_glist
, tgt_list_entry
) {
5279 if (!(host
->hostt
->supported_mode
& MODE_TARGET
))
5282 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
5283 if ((!npiv_wwpn
|| !npiv_wwnn
) && host
->active_mode
& MODE_TARGET
) {
5284 pr_debug("MODE_TARGET already active on qla2xxx(%d)\n",
5286 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
5289 if (tgt
->tgt_stop
) {
5290 pr_debug("MODE_TARGET in shutdown on qla2xxx(%d)\n",
5292 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
5295 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
5297 if (!scsi_host_get(host
)) {
5298 ql_dbg(ql_dbg_tgt
, vha
, 0xe068,
5299 "Unable to scsi_host_get() for"
5300 " qla2xxx scsi_host\n");
5303 qlt_lport_dump(vha
, phys_wwpn
, b
);
5305 if (memcmp(vha
->port_name
, b
, WWN_SIZE
)) {
5306 scsi_host_put(host
);
5309 rc
= (*callback
)(vha
, target_lport_ptr
, npiv_wwpn
, npiv_wwnn
);
5311 scsi_host_put(host
);
5313 mutex_unlock(&qla_tgt_mutex
);
5316 mutex_unlock(&qla_tgt_mutex
);
5320 EXPORT_SYMBOL(qlt_lport_register
);
5323 * qla_tgt_lport_deregister - Degister lport
5325 * @vha: Registered scsi_qla_host pointer
5327 void qlt_lport_deregister(struct scsi_qla_host
*vha
)
5329 struct qla_hw_data
*ha
= vha
->hw
;
5330 struct Scsi_Host
*sh
= vha
->host
;
5332 * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data
5334 vha
->vha_tgt
.target_lport_ptr
= NULL
;
5335 ha
->tgt
.tgt_ops
= NULL
;
5337 * Release the Scsi_Host reference for the underlying qla2xxx host
5341 EXPORT_SYMBOL(qlt_lport_deregister
);
5343 /* Must be called under HW lock */
5344 static void qlt_set_mode(struct scsi_qla_host
*vha
)
5346 struct qla_hw_data
*ha
= vha
->hw
;
5348 switch (ql2x_ini_mode
) {
5349 case QLA2XXX_INI_MODE_DISABLED
:
5350 case QLA2XXX_INI_MODE_EXCLUSIVE
:
5351 vha
->host
->active_mode
= MODE_TARGET
;
5353 case QLA2XXX_INI_MODE_ENABLED
:
5354 vha
->host
->active_mode
|= MODE_TARGET
;
5360 if (ha
->tgt
.ini_mode_force_reverse
)
5361 qla_reverse_ini_mode(vha
);
5364 /* Must be called under HW lock */
5365 static void qlt_clear_mode(struct scsi_qla_host
*vha
)
5367 struct qla_hw_data
*ha
= vha
->hw
;
5369 switch (ql2x_ini_mode
) {
5370 case QLA2XXX_INI_MODE_DISABLED
:
5371 vha
->host
->active_mode
= MODE_UNKNOWN
;
5373 case QLA2XXX_INI_MODE_EXCLUSIVE
:
5374 vha
->host
->active_mode
= MODE_INITIATOR
;
5376 case QLA2XXX_INI_MODE_ENABLED
:
5377 vha
->host
->active_mode
&= ~MODE_TARGET
;
5383 if (ha
->tgt
.ini_mode_force_reverse
)
5384 qla_reverse_ini_mode(vha
);
5388 * qla_tgt_enable_vha - NO LOCK HELD
5390 * host_reset, bring up w/ Target Mode Enabled
5393 qlt_enable_vha(struct scsi_qla_host
*vha
)
5395 struct qla_hw_data
*ha
= vha
->hw
;
5396 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
5397 unsigned long flags
;
5398 scsi_qla_host_t
*base_vha
= pci_get_drvdata(ha
->pdev
);
5401 ql_dbg(ql_dbg_tgt
, vha
, 0xe069,
5402 "Unable to locate qla_tgt pointer from"
5403 " struct qla_hw_data\n");
5408 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
5409 tgt
->tgt_stopped
= 0;
5411 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
5414 qla24xx_disable_vp(vha
);
5415 qla24xx_enable_vp(vha
);
5417 set_bit(ISP_ABORT_NEEDED
, &base_vha
->dpc_flags
);
5418 qla2xxx_wake_dpc(base_vha
);
5419 qla2x00_wait_for_hba_online(base_vha
);
5422 EXPORT_SYMBOL(qlt_enable_vha
);
5425 * qla_tgt_disable_vha - NO LOCK HELD
5427 * Disable Target Mode and reset the adapter
5429 static void qlt_disable_vha(struct scsi_qla_host
*vha
)
5431 struct qla_hw_data
*ha
= vha
->hw
;
5432 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
5433 unsigned long flags
;
5436 ql_dbg(ql_dbg_tgt
, vha
, 0xe06a,
5437 "Unable to locate qla_tgt pointer from"
5438 " struct qla_hw_data\n");
5443 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
5444 qlt_clear_mode(vha
);
5445 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
5447 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
5448 qla2xxx_wake_dpc(vha
);
5449 qla2x00_wait_for_hba_online(vha
);
5453 * Called from qla_init.c:qla24xx_vport_create() contex to setup
5454 * the target mode specific struct scsi_qla_host and struct qla_hw_data
5458 qlt_vport_create(struct scsi_qla_host
*vha
, struct qla_hw_data
*ha
)
5460 if (!qla_tgt_mode_enabled(vha
))
5463 vha
->vha_tgt
.qla_tgt
= NULL
;
5465 mutex_init(&vha
->vha_tgt
.tgt_mutex
);
5466 mutex_init(&vha
->vha_tgt
.tgt_host_action_mutex
);
5468 qlt_clear_mode(vha
);
5471 * NOTE: Currently the value is kept the same for <24xx and
5472 * >=24xx ISPs. If it is necessary to change it,
5473 * the check should be added for specific ISPs,
5474 * assigning the value appropriately.
5476 ha
->tgt
.atio_q_length
= ATIO_ENTRY_CNT_24XX
;
5478 qlt_add_target(ha
, vha
);
5482 qlt_rff_id(struct scsi_qla_host
*vha
, struct ct_sns_req
*ct_req
)
5485 * FC-4 Feature bit 0 indicates target functionality to the name server.
5487 if (qla_tgt_mode_enabled(vha
)) {
5488 if (qla_ini_mode_enabled(vha
))
5489 ct_req
->req
.rff_id
.fc4_feature
= BIT_0
| BIT_1
;
5491 ct_req
->req
.rff_id
.fc4_feature
= BIT_0
;
5492 } else if (qla_ini_mode_enabled(vha
)) {
5493 ct_req
->req
.rff_id
.fc4_feature
= BIT_1
;
5498 * qlt_init_atio_q_entries() - Initializes ATIO queue entries.
5501 * Beginning of ATIO ring has initialization control block already built
5502 * by nvram config routine.
5504 * Returns 0 on success.
5507 qlt_init_atio_q_entries(struct scsi_qla_host
*vha
)
5509 struct qla_hw_data
*ha
= vha
->hw
;
5511 struct atio_from_isp
*pkt
= (struct atio_from_isp
*)ha
->tgt
.atio_ring
;
5513 if (!qla_tgt_mode_enabled(vha
))
5516 for (cnt
= 0; cnt
< ha
->tgt
.atio_q_length
; cnt
++) {
5517 pkt
->u
.raw
.signature
= ATIO_PROCESSED
;
5524 * qlt_24xx_process_atio_queue() - Process ATIO queue entries.
5525 * @ha: SCSI driver HA context
5528 qlt_24xx_process_atio_queue(struct scsi_qla_host
*vha
)
5530 struct qla_hw_data
*ha
= vha
->hw
;
5531 struct atio_from_isp
*pkt
;
5534 if (!vha
->flags
.online
)
5537 while (ha
->tgt
.atio_ring_ptr
->signature
!= ATIO_PROCESSED
) {
5538 pkt
= (struct atio_from_isp
*)ha
->tgt
.atio_ring_ptr
;
5539 cnt
= pkt
->u
.raw
.entry_count
;
5541 qlt_24xx_atio_pkt_all_vps(vha
, (struct atio_from_isp
*)pkt
);
5543 for (i
= 0; i
< cnt
; i
++) {
5544 ha
->tgt
.atio_ring_index
++;
5545 if (ha
->tgt
.atio_ring_index
== ha
->tgt
.atio_q_length
) {
5546 ha
->tgt
.atio_ring_index
= 0;
5547 ha
->tgt
.atio_ring_ptr
= ha
->tgt
.atio_ring
;
5549 ha
->tgt
.atio_ring_ptr
++;
5551 pkt
->u
.raw
.signature
= ATIO_PROCESSED
;
5552 pkt
= (struct atio_from_isp
*)ha
->tgt
.atio_ring_ptr
;
5557 /* Adjust ring index */
5558 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha
), ha
->tgt
.atio_ring_index
);
5562 qlt_24xx_config_rings(struct scsi_qla_host
*vha
)
5564 struct qla_hw_data
*ha
= vha
->hw
;
5565 if (!QLA_TGT_MODE_ENABLED())
5568 WRT_REG_DWORD(ISP_ATIO_Q_IN(vha
), 0);
5569 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha
), 0);
5570 RD_REG_DWORD(ISP_ATIO_Q_OUT(vha
));
5572 if (IS_ATIO_MSIX_CAPABLE(ha
)) {
5573 struct qla_msix_entry
*msix
= &ha
->msix_entries
[2];
5574 struct init_cb_24xx
*icb
= (struct init_cb_24xx
*)ha
->init_cb
;
5576 icb
->msix_atio
= cpu_to_le16(msix
->entry
);
5577 ql_dbg(ql_dbg_init
, vha
, 0xf072,
5578 "Registering ICB vector 0x%x for atio que.\n",
5584 qlt_24xx_config_nvram_stage1(struct scsi_qla_host
*vha
, struct nvram_24xx
*nv
)
5586 struct qla_hw_data
*ha
= vha
->hw
;
5588 if (qla_tgt_mode_enabled(vha
)) {
5589 if (!ha
->tgt
.saved_set
) {
5590 /* We save only once */
5591 ha
->tgt
.saved_exchange_count
= nv
->exchange_count
;
5592 ha
->tgt
.saved_firmware_options_1
=
5593 nv
->firmware_options_1
;
5594 ha
->tgt
.saved_firmware_options_2
=
5595 nv
->firmware_options_2
;
5596 ha
->tgt
.saved_firmware_options_3
=
5597 nv
->firmware_options_3
;
5598 ha
->tgt
.saved_set
= 1;
5601 nv
->exchange_count
= __constant_cpu_to_le16(0xFFFF);
5603 /* Enable target mode */
5604 nv
->firmware_options_1
|= __constant_cpu_to_le32(BIT_4
);
5606 /* Disable ini mode, if requested */
5607 if (!qla_ini_mode_enabled(vha
))
5608 nv
->firmware_options_1
|= __constant_cpu_to_le32(BIT_5
);
5610 /* Disable Full Login after LIP */
5611 nv
->firmware_options_1
&= __constant_cpu_to_le32(~BIT_13
);
5612 /* Enable initial LIP */
5613 nv
->firmware_options_1
&= __constant_cpu_to_le32(~BIT_9
);
5614 if (ql2xtgt_tape_enable
)
5615 /* Enable FC Tape support */
5616 nv
->firmware_options_2
|= cpu_to_le32(BIT_12
);
5618 /* Disable FC Tape support */
5619 nv
->firmware_options_2
&= cpu_to_le32(~BIT_12
);
5621 /* Disable Full Login after LIP */
5622 nv
->host_p
&= __constant_cpu_to_le32(~BIT_10
);
5623 /* Enable target PRLI control */
5624 nv
->firmware_options_2
|= __constant_cpu_to_le32(BIT_14
);
5626 if (ha
->tgt
.saved_set
) {
5627 nv
->exchange_count
= ha
->tgt
.saved_exchange_count
;
5628 nv
->firmware_options_1
=
5629 ha
->tgt
.saved_firmware_options_1
;
5630 nv
->firmware_options_2
=
5631 ha
->tgt
.saved_firmware_options_2
;
5632 nv
->firmware_options_3
=
5633 ha
->tgt
.saved_firmware_options_3
;
5638 /* out-of-order frames reassembly */
5639 nv
->firmware_options_3
|= BIT_6
|BIT_9
;
5641 if (ha
->tgt
.enable_class_2
) {
5642 if (vha
->flags
.init_done
)
5643 fc_host_supported_classes(vha
->host
) =
5644 FC_COS_CLASS2
| FC_COS_CLASS3
;
5646 nv
->firmware_options_2
|= __constant_cpu_to_le32(BIT_8
);
5648 if (vha
->flags
.init_done
)
5649 fc_host_supported_classes(vha
->host
) = FC_COS_CLASS3
;
5651 nv
->firmware_options_2
&= ~__constant_cpu_to_le32(BIT_8
);
5656 qlt_24xx_config_nvram_stage2(struct scsi_qla_host
*vha
,
5657 struct init_cb_24xx
*icb
)
5659 struct qla_hw_data
*ha
= vha
->hw
;
5661 if (ha
->tgt
.node_name_set
) {
5662 memcpy(icb
->node_name
, ha
->tgt
.tgt_node_name
, WWN_SIZE
);
5663 icb
->firmware_options_1
|= __constant_cpu_to_le32(BIT_14
);
5668 qlt_81xx_config_nvram_stage1(struct scsi_qla_host
*vha
, struct nvram_81xx
*nv
)
5670 struct qla_hw_data
*ha
= vha
->hw
;
5672 if (!QLA_TGT_MODE_ENABLED())
5675 if (qla_tgt_mode_enabled(vha
)) {
5676 if (!ha
->tgt
.saved_set
) {
5677 /* We save only once */
5678 ha
->tgt
.saved_exchange_count
= nv
->exchange_count
;
5679 ha
->tgt
.saved_firmware_options_1
=
5680 nv
->firmware_options_1
;
5681 ha
->tgt
.saved_firmware_options_2
=
5682 nv
->firmware_options_2
;
5683 ha
->tgt
.saved_firmware_options_3
=
5684 nv
->firmware_options_3
;
5685 ha
->tgt
.saved_set
= 1;
5688 nv
->exchange_count
= __constant_cpu_to_le16(0xFFFF);
5690 /* Enable target mode */
5691 nv
->firmware_options_1
|= __constant_cpu_to_le32(BIT_4
);
5693 /* Disable ini mode, if requested */
5694 if (!qla_ini_mode_enabled(vha
))
5695 nv
->firmware_options_1
|=
5696 __constant_cpu_to_le32(BIT_5
);
5698 /* Disable Full Login after LIP */
5699 nv
->firmware_options_1
&= __constant_cpu_to_le32(~BIT_13
);
5700 /* Enable initial LIP */
5701 nv
->firmware_options_1
&= __constant_cpu_to_le32(~BIT_9
);
5702 if (ql2xtgt_tape_enable
)
5703 /* Enable FC tape support */
5704 nv
->firmware_options_2
|= cpu_to_le32(BIT_12
);
5706 /* Disable FC tape support */
5707 nv
->firmware_options_2
&= cpu_to_le32(~BIT_12
);
5709 /* Disable Full Login after LIP */
5710 nv
->host_p
&= __constant_cpu_to_le32(~BIT_10
);
5711 /* Enable target PRLI control */
5712 nv
->firmware_options_2
|= __constant_cpu_to_le32(BIT_14
);
5714 if (ha
->tgt
.saved_set
) {
5715 nv
->exchange_count
= ha
->tgt
.saved_exchange_count
;
5716 nv
->firmware_options_1
=
5717 ha
->tgt
.saved_firmware_options_1
;
5718 nv
->firmware_options_2
=
5719 ha
->tgt
.saved_firmware_options_2
;
5720 nv
->firmware_options_3
=
5721 ha
->tgt
.saved_firmware_options_3
;
5726 /* out-of-order frames reassembly */
5727 nv
->firmware_options_3
|= BIT_6
|BIT_9
;
5729 if (ha
->tgt
.enable_class_2
) {
5730 if (vha
->flags
.init_done
)
5731 fc_host_supported_classes(vha
->host
) =
5732 FC_COS_CLASS2
| FC_COS_CLASS3
;
5734 nv
->firmware_options_2
|= __constant_cpu_to_le32(BIT_8
);
5736 if (vha
->flags
.init_done
)
5737 fc_host_supported_classes(vha
->host
) = FC_COS_CLASS3
;
5739 nv
->firmware_options_2
&= ~__constant_cpu_to_le32(BIT_8
);
5744 qlt_81xx_config_nvram_stage2(struct scsi_qla_host
*vha
,
5745 struct init_cb_81xx
*icb
)
5747 struct qla_hw_data
*ha
= vha
->hw
;
5749 if (!QLA_TGT_MODE_ENABLED())
5752 if (ha
->tgt
.node_name_set
) {
5753 memcpy(icb
->node_name
, ha
->tgt
.tgt_node_name
, WWN_SIZE
);
5754 icb
->firmware_options_1
|= __constant_cpu_to_le32(BIT_14
);
5759 qlt_83xx_iospace_config(struct qla_hw_data
*ha
)
5761 if (!QLA_TGT_MODE_ENABLED())
5764 ha
->msix_count
+= 1; /* For ATIO Q */
5768 qlt_24xx_process_response_error(struct scsi_qla_host
*vha
,
5769 struct sts_entry_24xx
*pkt
)
5771 switch (pkt
->entry_type
) {
5772 case ABTS_RECV_24XX
:
5773 case ABTS_RESP_24XX
:
5775 case NOTIFY_ACK_TYPE
:
5784 qlt_modify_vp_config(struct scsi_qla_host
*vha
,
5785 struct vp_config_entry_24xx
*vpmod
)
5787 if (qla_tgt_mode_enabled(vha
))
5788 vpmod
->options_idx1
&= ~BIT_5
;
5789 /* Disable ini mode, if requested */
5790 if (!qla_ini_mode_enabled(vha
))
5791 vpmod
->options_idx1
&= ~BIT_4
;
5795 qlt_probe_one_stage1(struct scsi_qla_host
*base_vha
, struct qla_hw_data
*ha
)
5797 if (!QLA_TGT_MODE_ENABLED())
5800 if (ha
->mqenable
|| IS_QLA83XX(ha
)) {
5801 ISP_ATIO_Q_IN(base_vha
) = &ha
->mqiobase
->isp25mq
.atio_q_in
;
5802 ISP_ATIO_Q_OUT(base_vha
) = &ha
->mqiobase
->isp25mq
.atio_q_out
;
5804 ISP_ATIO_Q_IN(base_vha
) = &ha
->iobase
->isp24
.atio_q_in
;
5805 ISP_ATIO_Q_OUT(base_vha
) = &ha
->iobase
->isp24
.atio_q_out
;
5808 mutex_init(&base_vha
->vha_tgt
.tgt_mutex
);
5809 mutex_init(&base_vha
->vha_tgt
.tgt_host_action_mutex
);
5810 qlt_clear_mode(base_vha
);
5814 qla83xx_msix_atio_q(int irq
, void *dev_id
)
5816 struct rsp_que
*rsp
;
5817 scsi_qla_host_t
*vha
;
5818 struct qla_hw_data
*ha
;
5819 unsigned long flags
;
5821 rsp
= (struct rsp_que
*) dev_id
;
5823 vha
= pci_get_drvdata(ha
->pdev
);
5825 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
5827 qlt_24xx_process_atio_queue(vha
);
5828 qla24xx_process_response_queue(vha
, rsp
);
5830 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
5836 qlt_mem_alloc(struct qla_hw_data
*ha
)
5838 if (!QLA_TGT_MODE_ENABLED())
5841 ha
->tgt
.tgt_vp_map
= kzalloc(sizeof(struct qla_tgt_vp_map
) *
5842 MAX_MULTI_ID_FABRIC
, GFP_KERNEL
);
5843 if (!ha
->tgt
.tgt_vp_map
)
5846 ha
->tgt
.atio_ring
= dma_alloc_coherent(&ha
->pdev
->dev
,
5847 (ha
->tgt
.atio_q_length
+ 1) * sizeof(struct atio_from_isp
),
5848 &ha
->tgt
.atio_dma
, GFP_KERNEL
);
5849 if (!ha
->tgt
.atio_ring
) {
5850 kfree(ha
->tgt
.tgt_vp_map
);
5857 qlt_mem_free(struct qla_hw_data
*ha
)
5859 if (!QLA_TGT_MODE_ENABLED())
5862 if (ha
->tgt
.atio_ring
) {
5863 dma_free_coherent(&ha
->pdev
->dev
, (ha
->tgt
.atio_q_length
+ 1) *
5864 sizeof(struct atio_from_isp
), ha
->tgt
.atio_ring
,
5867 kfree(ha
->tgt
.tgt_vp_map
);
5870 /* vport_slock to be held by the caller */
5872 qlt_update_vp_map(struct scsi_qla_host
*vha
, int cmd
)
5874 if (!QLA_TGT_MODE_ENABLED())
5879 vha
->hw
->tgt
.tgt_vp_map
[vha
->vp_idx
].vha
= vha
;
5882 vha
->hw
->tgt
.tgt_vp_map
[vha
->d_id
.b
.al_pa
].idx
= vha
->vp_idx
;
5885 vha
->hw
->tgt
.tgt_vp_map
[vha
->vp_idx
].vha
= NULL
;
5888 vha
->hw
->tgt
.tgt_vp_map
[vha
->d_id
.b
.al_pa
].idx
= 0;
5893 static int __init
qlt_parse_ini_mode(void)
5895 if (strcasecmp(qlini_mode
, QLA2XXX_INI_MODE_STR_EXCLUSIVE
) == 0)
5896 ql2x_ini_mode
= QLA2XXX_INI_MODE_EXCLUSIVE
;
5897 else if (strcasecmp(qlini_mode
, QLA2XXX_INI_MODE_STR_DISABLED
) == 0)
5898 ql2x_ini_mode
= QLA2XXX_INI_MODE_DISABLED
;
5899 else if (strcasecmp(qlini_mode
, QLA2XXX_INI_MODE_STR_ENABLED
) == 0)
5900 ql2x_ini_mode
= QLA2XXX_INI_MODE_ENABLED
;
5907 int __init
qlt_init(void)
5911 if (!qlt_parse_ini_mode()) {
5912 ql_log(ql_log_fatal
, NULL
, 0xe06b,
5913 "qlt_parse_ini_mode() failed\n");
5917 if (!QLA_TGT_MODE_ENABLED())
5920 qla_tgt_mgmt_cmd_cachep
= kmem_cache_create("qla_tgt_mgmt_cmd_cachep",
5921 sizeof(struct qla_tgt_mgmt_cmd
), __alignof__(struct
5922 qla_tgt_mgmt_cmd
), 0, NULL
);
5923 if (!qla_tgt_mgmt_cmd_cachep
) {
5924 ql_log(ql_log_fatal
, NULL
, 0xe06d,
5925 "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n");
5929 qla_tgt_mgmt_cmd_mempool
= mempool_create(25, mempool_alloc_slab
,
5930 mempool_free_slab
, qla_tgt_mgmt_cmd_cachep
);
5931 if (!qla_tgt_mgmt_cmd_mempool
) {
5932 ql_log(ql_log_fatal
, NULL
, 0xe06e,
5933 "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n");
5935 goto out_mgmt_cmd_cachep
;
5938 qla_tgt_wq
= alloc_workqueue("qla_tgt_wq", 0, 0);
5940 ql_log(ql_log_fatal
, NULL
, 0xe06f,
5941 "alloc_workqueue for qla_tgt_wq failed\n");
5943 goto out_cmd_mempool
;
5946 * Return 1 to signal that initiator-mode is being disabled
5948 return (ql2x_ini_mode
== QLA2XXX_INI_MODE_DISABLED
) ? 1 : 0;
5951 mempool_destroy(qla_tgt_mgmt_cmd_mempool
);
5952 out_mgmt_cmd_cachep
:
5953 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep
);
5959 if (!QLA_TGT_MODE_ENABLED())
5962 destroy_workqueue(qla_tgt_wq
);
5963 mempool_destroy(qla_tgt_mgmt_cmd_mempool
);
5964 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep
);