2 * qla_target.c SCSI LLD infrastructure for QLogic 22xx/23xx/24xx/25xx
4 * based on qla2x00t.c code:
6 * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net>
7 * Copyright (C) 2004 - 2005 Leonid Stoljar
8 * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
9 * Copyright (C) 2006 - 2010 ID7 Ltd.
11 * Forward port and refactoring to modern qla2xxx and target/configfs
13 * Copyright (C) 2010-2013 Nicholas A. Bellinger <nab@kernel.org>
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation, version 2
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
26 #include <linux/module.h>
27 #include <linux/init.h>
28 #include <linux/types.h>
29 #include <linux/blkdev.h>
30 #include <linux/interrupt.h>
31 #include <linux/pci.h>
32 #include <linux/delay.h>
33 #include <linux/list.h>
34 #include <linux/workqueue.h>
35 #include <asm/unaligned.h>
36 #include <scsi/scsi.h>
37 #include <scsi/scsi_host.h>
38 #include <scsi/scsi_tcq.h>
39 #include <target/target_core_base.h>
40 #include <target/target_core_fabric.h>
43 #include "qla_target.h"
45 static int ql2xtgt_tape_enable
;
46 module_param(ql2xtgt_tape_enable
, int, S_IRUGO
|S_IWUSR
);
47 MODULE_PARM_DESC(ql2xtgt_tape_enable
,
48 "Enables Sequence level error recovery (aka FC Tape). Default is 0 - no SLER. 1 - Enable SLER.");
50 static char *qlini_mode
= QLA2XXX_INI_MODE_STR_ENABLED
;
51 module_param(qlini_mode
, charp
, S_IRUGO
);
52 MODULE_PARM_DESC(qlini_mode
,
53 "Determines when initiator mode will be enabled. Possible values: "
54 "\"exclusive\" - initiator mode will be enabled on load, "
55 "disabled on enabling target mode and then on disabling target mode "
57 "\"disabled\" - initiator mode will never be enabled; "
58 "\"enabled\" (default) - initiator mode will always stay enabled.");
60 int ql2x_ini_mode
= QLA2XXX_INI_MODE_EXCLUSIVE
;
62 static int temp_sam_status
= SAM_STAT_BUSY
;
65 * From scsi/fc/fc_fcp.h
67 enum fcp_resp_rsp_codes
{
69 FCP_DATA_LEN_INVALID
= 1,
70 FCP_CMND_FIELDS_INVALID
= 2,
71 FCP_DATA_PARAM_MISMATCH
= 3,
74 FCP_TMF_INVALID_LUN
= 9,
78 * fc_pri_ta from scsi/fc/fc_fcp.h
80 #define FCP_PTA_SIMPLE 0 /* simple task attribute */
81 #define FCP_PTA_HEADQ 1 /* head of queue task attribute */
82 #define FCP_PTA_ORDERED 2 /* ordered task attribute */
83 #define FCP_PTA_ACA 4 /* auto. contingent allegiance */
84 #define FCP_PTA_MASK 7 /* mask for task attribute field */
85 #define FCP_PRI_SHIFT 3 /* priority field starts in bit 3 */
86 #define FCP_PRI_RESVD_MASK 0x80 /* reserved bits in priority field */
89 * This driver calls qla2x00_alloc_iocbs() and qla2x00_issue_marker(), which
90 * must be called under HW lock and could unlock/lock it inside.
91 * It isn't an issue, since in the current implementation on the time when
92 * those functions are called:
94 * - Either context is IRQ and only IRQ handler can modify HW data,
95 * including rings related fields,
97 * - Or access to target mode variables from struct qla_tgt doesn't
98 * cross those functions boundaries, except tgt_stop, which
99 * additionally protected by irq_cmd_count.
101 /* Predefs for callbacks handed to qla2xxx LLD */
102 static void qlt_24xx_atio_pkt(struct scsi_qla_host
*ha
,
103 struct atio_from_isp
*pkt
);
104 static void qlt_response_pkt(struct scsi_qla_host
*ha
, response_t
*pkt
);
105 static int qlt_issue_task_mgmt(struct qla_tgt_sess
*sess
, uint32_t lun
,
106 int fn
, void *iocb
, int flags
);
107 static void qlt_send_term_exchange(struct scsi_qla_host
*ha
, struct qla_tgt_cmd
108 *cmd
, struct atio_from_isp
*atio
, int ha_locked
);
109 static void qlt_reject_free_srr_imm(struct scsi_qla_host
*ha
,
110 struct qla_tgt_srr_imm
*imm
, int ha_lock
);
111 static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host
*vha
,
112 struct qla_tgt_cmd
*cmd
);
113 static void qlt_alloc_qfull_cmd(struct scsi_qla_host
*vha
,
114 struct atio_from_isp
*atio
, uint16_t status
, int qfull
);
115 static void qlt_disable_vha(struct scsi_qla_host
*vha
);
116 static void qlt_clear_tgt_db(struct qla_tgt
*tgt
);
117 static void qlt_send_notify_ack(struct scsi_qla_host
*vha
,
118 struct imm_ntfy_from_isp
*ntfy
,
119 uint32_t add_flags
, uint16_t resp_code
, int resp_code_valid
,
120 uint16_t srr_flags
, uint16_t srr_reject_code
, uint8_t srr_explan
);
124 static struct kmem_cache
*qla_tgt_mgmt_cmd_cachep
;
125 static mempool_t
*qla_tgt_mgmt_cmd_mempool
;
126 static struct workqueue_struct
*qla_tgt_wq
;
127 static DEFINE_MUTEX(qla_tgt_mutex
);
128 static LIST_HEAD(qla_tgt_glist
);
130 /* This API intentionally takes dest as a parameter, rather than returning
131 * int value to avoid caller forgetting to issue wmb() after the store */
132 void qlt_do_generation_tick(struct scsi_qla_host
*vha
, int *dest
)
134 scsi_qla_host_t
*base_vha
= pci_get_drvdata(vha
->hw
->pdev
);
135 *dest
= atomic_inc_return(&base_vha
->generation_tick
);
140 /* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
141 static struct qla_tgt_sess
*qlt_find_sess_by_port_name(
143 const uint8_t *port_name
)
145 struct qla_tgt_sess
*sess
;
147 list_for_each_entry(sess
, &tgt
->sess_list
, sess_list_entry
) {
148 if (!memcmp(sess
->port_name
, port_name
, WWN_SIZE
))
155 /* Might release hw lock, then reaquire!! */
156 static inline int qlt_issue_marker(struct scsi_qla_host
*vha
, int vha_locked
)
158 /* Send marker if required */
159 if (unlikely(vha
->marker_needed
!= 0)) {
160 int rc
= qla2x00_issue_marker(vha
, vha_locked
);
161 if (rc
!= QLA_SUCCESS
) {
162 ql_dbg(ql_dbg_tgt
, vha
, 0xe03d,
163 "qla_target(%d): issue_marker() failed\n",
172 struct scsi_qla_host
*qlt_find_host_by_d_id(struct scsi_qla_host
*vha
,
175 struct qla_hw_data
*ha
= vha
->hw
;
178 if ((vha
->d_id
.b
.area
!= d_id
[1]) || (vha
->d_id
.b
.domain
!= d_id
[0]))
181 if (vha
->d_id
.b
.al_pa
== d_id
[2])
184 BUG_ON(ha
->tgt
.tgt_vp_map
== NULL
);
185 vp_idx
= ha
->tgt
.tgt_vp_map
[d_id
[2]].idx
;
186 if (likely(test_bit(vp_idx
, ha
->vp_idx_map
)))
187 return ha
->tgt
.tgt_vp_map
[vp_idx
].vha
;
193 struct scsi_qla_host
*qlt_find_host_by_vp_idx(struct scsi_qla_host
*vha
,
196 struct qla_hw_data
*ha
= vha
->hw
;
198 if (vha
->vp_idx
== vp_idx
)
201 BUG_ON(ha
->tgt
.tgt_vp_map
== NULL
);
202 if (likely(test_bit(vp_idx
, ha
->vp_idx_map
)))
203 return ha
->tgt
.tgt_vp_map
[vp_idx
].vha
;
208 static inline void qlt_incr_num_pend_cmds(struct scsi_qla_host
*vha
)
212 spin_lock_irqsave(&vha
->hw
->tgt
.q_full_lock
, flags
);
214 vha
->hw
->tgt
.num_pend_cmds
++;
215 if (vha
->hw
->tgt
.num_pend_cmds
> vha
->hw
->qla_stats
.stat_max_pend_cmds
)
216 vha
->hw
->qla_stats
.stat_max_pend_cmds
=
217 vha
->hw
->tgt
.num_pend_cmds
;
218 spin_unlock_irqrestore(&vha
->hw
->tgt
.q_full_lock
, flags
);
220 static inline void qlt_decr_num_pend_cmds(struct scsi_qla_host
*vha
)
224 spin_lock_irqsave(&vha
->hw
->tgt
.q_full_lock
, flags
);
225 vha
->hw
->tgt
.num_pend_cmds
--;
226 spin_unlock_irqrestore(&vha
->hw
->tgt
.q_full_lock
, flags
);
229 static void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host
*vha
,
230 struct atio_from_isp
*atio
)
232 ql_dbg(ql_dbg_tgt
, vha
, 0xe072,
233 "%s: qla_target(%d): type %x ox_id %04x\n",
234 __func__
, vha
->vp_idx
, atio
->u
.raw
.entry_type
,
235 be16_to_cpu(atio
->u
.isp24
.fcp_hdr
.ox_id
));
237 switch (atio
->u
.raw
.entry_type
) {
240 struct scsi_qla_host
*host
= qlt_find_host_by_d_id(vha
,
241 atio
->u
.isp24
.fcp_hdr
.d_id
);
242 if (unlikely(NULL
== host
)) {
243 ql_dbg(ql_dbg_tgt
, vha
, 0xe03e,
244 "qla_target(%d): Received ATIO_TYPE7 "
245 "with unknown d_id %x:%x:%x\n", vha
->vp_idx
,
246 atio
->u
.isp24
.fcp_hdr
.d_id
[0],
247 atio
->u
.isp24
.fcp_hdr
.d_id
[1],
248 atio
->u
.isp24
.fcp_hdr
.d_id
[2]);
251 qlt_24xx_atio_pkt(host
, atio
);
255 case IMMED_NOTIFY_TYPE
:
257 struct scsi_qla_host
*host
= vha
;
258 struct imm_ntfy_from_isp
*entry
=
259 (struct imm_ntfy_from_isp
*)atio
;
261 if ((entry
->u
.isp24
.vp_index
!= 0xFF) &&
262 (entry
->u
.isp24
.nport_handle
!= 0xFFFF)) {
263 host
= qlt_find_host_by_vp_idx(vha
,
264 entry
->u
.isp24
.vp_index
);
265 if (unlikely(!host
)) {
266 ql_dbg(ql_dbg_tgt
, vha
, 0xe03f,
267 "qla_target(%d): Received "
268 "ATIO (IMMED_NOTIFY_TYPE) "
269 "with unknown vp_index %d\n",
270 vha
->vp_idx
, entry
->u
.isp24
.vp_index
);
274 qlt_24xx_atio_pkt(host
, atio
);
279 ql_dbg(ql_dbg_tgt
, vha
, 0xe040,
280 "qla_target(%d): Received unknown ATIO atio "
281 "type %x\n", vha
->vp_idx
, atio
->u
.raw
.entry_type
);
288 void qlt_response_pkt_all_vps(struct scsi_qla_host
*vha
, response_t
*pkt
)
290 switch (pkt
->entry_type
) {
292 ql_dbg(ql_dbg_tgt
, vha
, 0xe073,
293 "qla_target(%d):%s: CRC2 Response pkt\n",
294 vha
->vp_idx
, __func__
);
297 struct ctio7_from_24xx
*entry
= (struct ctio7_from_24xx
*)pkt
;
298 struct scsi_qla_host
*host
= qlt_find_host_by_vp_idx(vha
,
300 if (unlikely(!host
)) {
301 ql_dbg(ql_dbg_tgt
, vha
, 0xe041,
302 "qla_target(%d): Response pkt (CTIO_TYPE7) "
303 "received, with unknown vp_index %d\n",
304 vha
->vp_idx
, entry
->vp_index
);
307 qlt_response_pkt(host
, pkt
);
311 case IMMED_NOTIFY_TYPE
:
313 struct scsi_qla_host
*host
= vha
;
314 struct imm_ntfy_from_isp
*entry
=
315 (struct imm_ntfy_from_isp
*)pkt
;
317 host
= qlt_find_host_by_vp_idx(vha
, entry
->u
.isp24
.vp_index
);
318 if (unlikely(!host
)) {
319 ql_dbg(ql_dbg_tgt
, vha
, 0xe042,
320 "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) "
321 "received, with unknown vp_index %d\n",
322 vha
->vp_idx
, entry
->u
.isp24
.vp_index
);
325 qlt_response_pkt(host
, pkt
);
329 case NOTIFY_ACK_TYPE
:
331 struct scsi_qla_host
*host
= vha
;
332 struct nack_to_isp
*entry
= (struct nack_to_isp
*)pkt
;
334 if (0xFF != entry
->u
.isp24
.vp_index
) {
335 host
= qlt_find_host_by_vp_idx(vha
,
336 entry
->u
.isp24
.vp_index
);
337 if (unlikely(!host
)) {
338 ql_dbg(ql_dbg_tgt
, vha
, 0xe043,
339 "qla_target(%d): Response "
340 "pkt (NOTIFY_ACK_TYPE) "
341 "received, with unknown "
342 "vp_index %d\n", vha
->vp_idx
,
343 entry
->u
.isp24
.vp_index
);
347 qlt_response_pkt(host
, pkt
);
353 struct abts_recv_from_24xx
*entry
=
354 (struct abts_recv_from_24xx
*)pkt
;
355 struct scsi_qla_host
*host
= qlt_find_host_by_vp_idx(vha
,
357 if (unlikely(!host
)) {
358 ql_dbg(ql_dbg_tgt
, vha
, 0xe044,
359 "qla_target(%d): Response pkt "
360 "(ABTS_RECV_24XX) received, with unknown "
361 "vp_index %d\n", vha
->vp_idx
, entry
->vp_index
);
364 qlt_response_pkt(host
, pkt
);
370 struct abts_resp_to_24xx
*entry
=
371 (struct abts_resp_to_24xx
*)pkt
;
372 struct scsi_qla_host
*host
= qlt_find_host_by_vp_idx(vha
,
374 if (unlikely(!host
)) {
375 ql_dbg(ql_dbg_tgt
, vha
, 0xe045,
376 "qla_target(%d): Response pkt "
377 "(ABTS_RECV_24XX) received, with unknown "
378 "vp_index %d\n", vha
->vp_idx
, entry
->vp_index
);
381 qlt_response_pkt(host
, pkt
);
386 qlt_response_pkt(vha
, pkt
);
392 static void qlt_free_session_done(struct work_struct
*work
)
394 struct qla_tgt_sess
*sess
= container_of(work
, struct qla_tgt_sess
,
396 struct qla_tgt
*tgt
= sess
->tgt
;
397 struct scsi_qla_host
*vha
= sess
->vha
;
398 struct qla_hw_data
*ha
= vha
->hw
;
400 bool logout_started
= false;
403 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf084,
404 "%s: se_sess %p / sess %p from port %8phC loop_id %#04x"
405 " s_id %02x:%02x:%02x logout %d keep %d plogi %d\n",
406 __func__
, sess
->se_sess
, sess
, sess
->port_name
, sess
->loop_id
,
407 sess
->s_id
.b
.domain
, sess
->s_id
.b
.area
, sess
->s_id
.b
.al_pa
,
408 sess
->logout_on_delete
, sess
->keep_nport_handle
,
409 sess
->plogi_ack_needed
);
413 if (sess
->logout_on_delete
) {
416 memset(&fcport
, 0, sizeof(fcport
));
417 fcport
.loop_id
= sess
->loop_id
;
418 fcport
.d_id
= sess
->s_id
;
419 memcpy(fcport
.port_name
, sess
->port_name
, WWN_SIZE
);
421 fcport
.tgt_session
= sess
;
423 rc
= qla2x00_post_async_logout_work(vha
, &fcport
, NULL
);
424 if (rc
!= QLA_SUCCESS
)
425 ql_log(ql_log_warn
, vha
, 0xf085,
426 "Schedule logo failed sess %p rc %d\n",
429 logout_started
= true;
433 * Release the target session for FC Nexus from fabric module code.
435 if (sess
->se_sess
!= NULL
)
436 ha
->tgt
.tgt_ops
->free_session(sess
);
438 if (logout_started
) {
441 while (!ACCESS_ONCE(sess
->logout_completed
)) {
443 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf086,
444 "%s: waiting for sess %p logout\n",
451 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf087,
452 "%s: sess %p logout completed\n",
456 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
458 if (sess
->plogi_ack_needed
)
459 qlt_send_notify_ack(vha
, &sess
->tm_iocb
,
462 list_del(&sess
->sess_list_entry
);
464 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
466 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf001,
467 "Unregistration of sess %p finished\n", sess
);
471 * We need to protect against race, when tgt is freed before or
475 if (tgt
->sess_count
== 0)
476 wake_up_all(&tgt
->waitQ
);
479 /* ha->hardware_lock supposed to be held on entry */
480 void qlt_unreg_sess(struct qla_tgt_sess
*sess
)
482 struct scsi_qla_host
*vha
= sess
->vha
;
484 vha
->hw
->tgt
.tgt_ops
->clear_nacl_from_fcport_map(sess
);
486 if (!list_empty(&sess
->del_list_entry
))
487 list_del_init(&sess
->del_list_entry
);
488 sess
->deleted
= QLA_SESS_DELETION_IN_PROGRESS
;
490 INIT_WORK(&sess
->free_work
, qlt_free_session_done
);
491 schedule_work(&sess
->free_work
);
493 EXPORT_SYMBOL(qlt_unreg_sess
);
495 /* ha->hardware_lock supposed to be held on entry */
496 static int qlt_reset(struct scsi_qla_host
*vha
, void *iocb
, int mcmd
)
498 struct qla_hw_data
*ha
= vha
->hw
;
499 struct qla_tgt_sess
*sess
= NULL
;
500 uint32_t unpacked_lun
, lun
= 0;
503 struct imm_ntfy_from_isp
*n
= (struct imm_ntfy_from_isp
*)iocb
;
504 struct atio_from_isp
*a
= (struct atio_from_isp
*)iocb
;
506 loop_id
= le16_to_cpu(n
->u
.isp24
.nport_handle
);
507 if (loop_id
== 0xFFFF) {
509 atomic_inc(&vha
->vha_tgt
.qla_tgt
->tgt_global_resets_count
);
510 qlt_clear_tgt_db(vha
->vha_tgt
.qla_tgt
);
511 #if 0 /* FIXME: do we need to choose a session here? */
512 if (!list_empty(&ha
->tgt
.qla_tgt
->sess_list
)) {
513 sess
= list_entry(ha
->tgt
.qla_tgt
->sess_list
.next
,
514 typeof(*sess
), sess_list_entry
);
516 case QLA_TGT_NEXUS_LOSS_SESS
:
517 mcmd
= QLA_TGT_NEXUS_LOSS
;
519 case QLA_TGT_ABORT_ALL_SESS
:
520 mcmd
= QLA_TGT_ABORT_ALL
;
522 case QLA_TGT_NEXUS_LOSS
:
523 case QLA_TGT_ABORT_ALL
:
526 ql_dbg(ql_dbg_tgt
, vha
, 0xe046,
527 "qla_target(%d): Not allowed "
528 "command %x in %s", vha
->vp_idx
,
537 sess
= ha
->tgt
.tgt_ops
->find_sess_by_loop_id(vha
, loop_id
);
540 ql_dbg(ql_dbg_tgt
, vha
, 0xe000,
541 "Using sess for qla_tgt_reset: %p\n", sess
);
547 ql_dbg(ql_dbg_tgt
, vha
, 0xe047,
548 "scsi(%ld): resetting (session %p from port %8phC mcmd %x, "
549 "loop_id %d)\n", vha
->host_no
, sess
, sess
->port_name
,
552 lun
= a
->u
.isp24
.fcp_cmnd
.lun
;
553 unpacked_lun
= scsilun_to_int((struct scsi_lun
*)&lun
);
555 return qlt_issue_task_mgmt(sess
, unpacked_lun
, mcmd
,
556 iocb
, QLA24XX_MGMT_SEND_NACK
);
559 /* ha->hardware_lock supposed to be held on entry */
560 static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess
*sess
,
563 struct qla_tgt
*tgt
= sess
->tgt
;
564 uint32_t dev_loss_tmo
= tgt
->ha
->port_down_retry_count
+ 5;
567 /* Upgrade to unconditional deletion in case it was temporary */
568 if (immediate
&& sess
->deleted
== QLA_SESS_DELETION_PENDING
)
569 list_del(&sess
->del_list_entry
);
574 ql_dbg(ql_dbg_tgt
, sess
->vha
, 0xe001,
575 "Scheduling sess %p for deletion\n", sess
);
579 sess
->deleted
= QLA_SESS_DELETION_IN_PROGRESS
;
580 list_add(&sess
->del_list_entry
, &tgt
->del_sess_list
);
582 sess
->deleted
= QLA_SESS_DELETION_PENDING
;
583 list_add_tail(&sess
->del_list_entry
, &tgt
->del_sess_list
);
586 sess
->expires
= jiffies
+ dev_loss_tmo
* HZ
;
588 ql_dbg(ql_dbg_tgt
, sess
->vha
, 0xe048,
589 "qla_target(%d): session for port %8phC (loop ID %d s_id %02x:%02x:%02x)"
590 " scheduled for deletion in %u secs (expires: %lu) immed: %d, logout: %d, gen: %#x\n",
591 sess
->vha
->vp_idx
, sess
->port_name
, sess
->loop_id
,
592 sess
->s_id
.b
.domain
, sess
->s_id
.b
.area
, sess
->s_id
.b
.al_pa
,
593 dev_loss_tmo
, sess
->expires
, immediate
, sess
->logout_on_delete
,
597 mod_delayed_work(system_wq
, &tgt
->sess_del_work
, 0);
599 schedule_delayed_work(&tgt
->sess_del_work
,
600 sess
->expires
- jiffies
);
603 /* ha->hardware_lock supposed to be held on entry */
604 static void qlt_clear_tgt_db(struct qla_tgt
*tgt
)
606 struct qla_tgt_sess
*sess
;
608 list_for_each_entry(sess
, &tgt
->sess_list
, sess_list_entry
)
609 qlt_schedule_sess_for_deletion(sess
, true);
611 /* At this point tgt could be already dead */
614 static int qla24xx_get_loop_id(struct scsi_qla_host
*vha
, const uint8_t *s_id
,
617 struct qla_hw_data
*ha
= vha
->hw
;
618 dma_addr_t gid_list_dma
;
619 struct gid_list_info
*gid_list
;
624 gid_list
= dma_alloc_coherent(&ha
->pdev
->dev
, qla2x00_gid_list_size(ha
),
625 &gid_list_dma
, GFP_KERNEL
);
627 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf044,
628 "qla_target(%d): DMA Alloc failed of %u\n",
629 vha
->vp_idx
, qla2x00_gid_list_size(ha
));
633 /* Get list of logged in devices */
634 rc
= qla2x00_get_id_list(vha
, gid_list
, gid_list_dma
, &entries
);
635 if (rc
!= QLA_SUCCESS
) {
636 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf045,
637 "qla_target(%d): get_id_list() failed: %x\n",
640 goto out_free_id_list
;
643 id_iter
= (char *)gid_list
;
645 for (i
= 0; i
< entries
; i
++) {
646 struct gid_list_info
*gid
= (struct gid_list_info
*)id_iter
;
647 if ((gid
->al_pa
== s_id
[2]) &&
648 (gid
->area
== s_id
[1]) &&
649 (gid
->domain
== s_id
[0])) {
650 *loop_id
= le16_to_cpu(gid
->loop_id
);
654 id_iter
+= ha
->gid_list_info_size
;
658 dma_free_coherent(&ha
->pdev
->dev
, qla2x00_gid_list_size(ha
),
659 gid_list
, gid_list_dma
);
663 /* ha->hardware_lock supposed to be held on entry */
664 static void qlt_undelete_sess(struct qla_tgt_sess
*sess
)
666 BUG_ON(sess
->deleted
!= QLA_SESS_DELETION_PENDING
);
668 list_del_init(&sess
->del_list_entry
);
672 static void qlt_del_sess_work_fn(struct delayed_work
*work
)
674 struct qla_tgt
*tgt
= container_of(work
, struct qla_tgt
,
676 struct scsi_qla_host
*vha
= tgt
->vha
;
677 struct qla_hw_data
*ha
= vha
->hw
;
678 struct qla_tgt_sess
*sess
;
679 unsigned long flags
, elapsed
;
681 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
682 while (!list_empty(&tgt
->del_sess_list
)) {
683 sess
= list_entry(tgt
->del_sess_list
.next
, typeof(*sess
),
686 if (time_after_eq(elapsed
, sess
->expires
)) {
687 /* No turning back */
688 list_del_init(&sess
->del_list_entry
);
689 sess
->deleted
= QLA_SESS_DELETION_IN_PROGRESS
;
691 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf004,
692 "Timeout: sess %p about to be deleted\n",
694 ha
->tgt
.tgt_ops
->shutdown_sess(sess
);
695 ha
->tgt
.tgt_ops
->put_sess(sess
);
697 schedule_delayed_work(&tgt
->sess_del_work
,
698 sess
->expires
- elapsed
);
702 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
706 * Adds an extra ref to allow to drop hw lock after adding sess to the list.
707 * Caller must put it.
709 static struct qla_tgt_sess
*qlt_create_sess(
710 struct scsi_qla_host
*vha
,
714 struct qla_hw_data
*ha
= vha
->hw
;
715 struct qla_tgt_sess
*sess
;
717 unsigned char be_sid
[3];
719 /* Check to avoid double sessions */
720 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
721 list_for_each_entry(sess
, &vha
->vha_tgt
.qla_tgt
->sess_list
,
723 if (!memcmp(sess
->port_name
, fcport
->port_name
, WWN_SIZE
)) {
724 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf005,
725 "Double sess %p found (s_id %x:%x:%x, "
726 "loop_id %d), updating to d_id %x:%x:%x, "
727 "loop_id %d", sess
, sess
->s_id
.b
.domain
,
728 sess
->s_id
.b
.al_pa
, sess
->s_id
.b
.area
,
729 sess
->loop_id
, fcport
->d_id
.b
.domain
,
730 fcport
->d_id
.b
.al_pa
, fcport
->d_id
.b
.area
,
733 /* Cannot undelete at this point */
734 if (sess
->deleted
== QLA_SESS_DELETION_IN_PROGRESS
) {
735 spin_unlock_irqrestore(&ha
->hardware_lock
,
741 qlt_undelete_sess(sess
);
743 kref_get(&sess
->se_sess
->sess_kref
);
744 ha
->tgt
.tgt_ops
->update_sess(sess
, fcport
->d_id
, fcport
->loop_id
,
745 (fcport
->flags
& FCF_CONF_COMP_SUPPORTED
));
747 if (sess
->local
&& !local
)
750 qlt_do_generation_tick(vha
, &sess
->generation
);
752 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
757 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
759 sess
= kzalloc(sizeof(*sess
), GFP_KERNEL
);
761 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf04a,
762 "qla_target(%u): session allocation failed, all commands "
763 "from port %8phC will be refused", vha
->vp_idx
,
768 sess
->tgt
= vha
->vha_tgt
.qla_tgt
;
770 sess
->s_id
= fcport
->d_id
;
771 sess
->loop_id
= fcport
->loop_id
;
773 INIT_LIST_HEAD(&sess
->del_list_entry
);
775 /* Under normal circumstances we want to logout from firmware when
776 * session eventually ends and release corresponding nport handle.
777 * In the exception cases (e.g. when new PLOGI is waiting) corresponding
778 * code will adjust these flags as necessary. */
779 sess
->logout_on_delete
= 1;
780 sess
->keep_nport_handle
= 0;
782 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf006,
783 "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n",
784 sess
, vha
->vha_tgt
.qla_tgt
);
786 be_sid
[0] = sess
->s_id
.b
.domain
;
787 be_sid
[1] = sess
->s_id
.b
.area
;
788 be_sid
[2] = sess
->s_id
.b
.al_pa
;
790 * Determine if this fc_port->port_name is allowed to access
791 * target mode using explict NodeACLs+MappedLUNs, or using
792 * TPG demo mode. If this is successful a target mode FC nexus
795 if (ha
->tgt
.tgt_ops
->check_initiator_node_acl(vha
,
796 &fcport
->port_name
[0], sess
, &be_sid
[0], fcport
->loop_id
) < 0) {
801 * Take an extra reference to ->sess_kref here to handle qla_tgt_sess
802 * access across ->hardware_lock reaquire.
804 kref_get(&sess
->se_sess
->sess_kref
);
806 sess
->conf_compl_supported
= (fcport
->flags
& FCF_CONF_COMP_SUPPORTED
);
807 BUILD_BUG_ON(sizeof(sess
->port_name
) != sizeof(fcport
->port_name
));
808 memcpy(sess
->port_name
, fcport
->port_name
, sizeof(sess
->port_name
));
810 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
811 list_add_tail(&sess
->sess_list_entry
, &vha
->vha_tgt
.qla_tgt
->sess_list
);
812 vha
->vha_tgt
.qla_tgt
->sess_count
++;
813 qlt_do_generation_tick(vha
, &sess
->generation
);
814 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
816 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf04b,
817 "qla_target(%d): %ssession for wwn %8phC (loop_id %d, "
818 "s_id %x:%x:%x, confirmed completion %ssupported) added\n",
819 vha
->vp_idx
, local
? "local " : "", fcport
->port_name
,
820 fcport
->loop_id
, sess
->s_id
.b
.domain
, sess
->s_id
.b
.area
,
821 sess
->s_id
.b
.al_pa
, sess
->conf_compl_supported
? "" : "not ");
827 * Called from qla2x00_reg_remote_port()
829 void qlt_fc_port_added(struct scsi_qla_host
*vha
, fc_port_t
*fcport
)
831 struct qla_hw_data
*ha
= vha
->hw
;
832 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
833 struct qla_tgt_sess
*sess
;
836 if (!vha
->hw
->tgt
.tgt_ops
)
839 if (!tgt
|| (fcport
->port_type
!= FCT_INITIATOR
))
842 if (qla_ini_mode_enabled(vha
))
845 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
847 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
850 sess
= qlt_find_sess_by_port_name(tgt
, fcport
->port_name
);
852 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
854 mutex_lock(&vha
->vha_tgt
.tgt_mutex
);
855 sess
= qlt_create_sess(vha
, fcport
, false);
856 mutex_unlock(&vha
->vha_tgt
.tgt_mutex
);
858 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
859 } else if (sess
->deleted
== QLA_SESS_DELETION_IN_PROGRESS
) {
860 /* Point of no return */
861 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
864 kref_get(&sess
->se_sess
->sess_kref
);
867 qlt_undelete_sess(sess
);
869 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf04c,
870 "qla_target(%u): %ssession for port %8phC "
871 "(loop ID %d) reappeared\n", vha
->vp_idx
,
872 sess
->local
? "local " : "", sess
->port_name
,
875 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf007,
876 "Reappeared sess %p\n", sess
);
878 ha
->tgt
.tgt_ops
->update_sess(sess
, fcport
->d_id
, fcport
->loop_id
,
879 (fcport
->flags
& FCF_CONF_COMP_SUPPORTED
));
882 if (sess
&& sess
->local
) {
883 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf04d,
884 "qla_target(%u): local session for "
885 "port %8phC (loop ID %d) became global\n", vha
->vp_idx
,
886 fcport
->port_name
, sess
->loop_id
);
889 ha
->tgt
.tgt_ops
->put_sess(sess
);
890 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
894 * max_gen - specifies maximum session generation
895 * at which this deletion requestion is still valid
898 qlt_fc_port_deleted(struct scsi_qla_host
*vha
, fc_port_t
*fcport
, int max_gen
)
900 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
901 struct qla_tgt_sess
*sess
;
903 if (!vha
->hw
->tgt
.tgt_ops
)
912 sess
= qlt_find_sess_by_port_name(tgt
, fcport
->port_name
);
917 if (max_gen
- sess
->generation
< 0) {
918 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf092,
919 "Ignoring stale deletion request for se_sess %p / sess %p"
920 " for port %8phC, req_gen %d, sess_gen %d\n",
921 sess
->se_sess
, sess
, sess
->port_name
, max_gen
,
926 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf008, "qla_tgt_fc_port_deleted %p", sess
);
929 qlt_schedule_sess_for_deletion(sess
, false);
932 static inline int test_tgt_sess_count(struct qla_tgt
*tgt
)
934 struct qla_hw_data
*ha
= tgt
->ha
;
938 * We need to protect against race, when tgt is freed before or
941 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
942 ql_dbg(ql_dbg_tgt
, tgt
->vha
, 0xe002,
943 "tgt %p, empty(sess_list)=%d sess_count=%d\n",
944 tgt
, list_empty(&tgt
->sess_list
), tgt
->sess_count
);
945 res
= (tgt
->sess_count
== 0);
946 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
951 /* Called by tcm_qla2xxx configfs code */
952 int qlt_stop_phase1(struct qla_tgt
*tgt
)
954 struct scsi_qla_host
*vha
= tgt
->vha
;
955 struct qla_hw_data
*ha
= tgt
->ha
;
958 mutex_lock(&qla_tgt_mutex
);
959 if (!vha
->fc_vport
) {
960 struct Scsi_Host
*sh
= vha
->host
;
961 struct fc_host_attrs
*fc_host
= shost_to_fc_host(sh
);
964 spin_lock_irqsave(sh
->host_lock
, flags
);
965 npiv_vports
= (fc_host
->npiv_vports_inuse
);
966 spin_unlock_irqrestore(sh
->host_lock
, flags
);
969 mutex_unlock(&qla_tgt_mutex
);
973 if (tgt
->tgt_stop
|| tgt
->tgt_stopped
) {
974 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf04e,
975 "Already in tgt->tgt_stop or tgt_stopped state\n");
976 mutex_unlock(&qla_tgt_mutex
);
980 ql_dbg(ql_dbg_tgt
, vha
, 0xe003, "Stopping target for host %ld(%p)\n",
983 * Mutex needed to sync with qla_tgt_fc_port_[added,deleted].
984 * Lock is needed, because we still can get an incoming packet.
986 mutex_lock(&vha
->vha_tgt
.tgt_mutex
);
987 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
989 qlt_clear_tgt_db(tgt
);
990 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
991 mutex_unlock(&vha
->vha_tgt
.tgt_mutex
);
992 mutex_unlock(&qla_tgt_mutex
);
994 flush_delayed_work(&tgt
->sess_del_work
);
996 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf009,
997 "Waiting for sess works (tgt %p)", tgt
);
998 spin_lock_irqsave(&tgt
->sess_work_lock
, flags
);
999 while (!list_empty(&tgt
->sess_works_list
)) {
1000 spin_unlock_irqrestore(&tgt
->sess_work_lock
, flags
);
1001 flush_scheduled_work();
1002 spin_lock_irqsave(&tgt
->sess_work_lock
, flags
);
1004 spin_unlock_irqrestore(&tgt
->sess_work_lock
, flags
);
1006 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf00a,
1007 "Waiting for tgt %p: list_empty(sess_list)=%d "
1008 "sess_count=%d\n", tgt
, list_empty(&tgt
->sess_list
),
1011 wait_event(tgt
->waitQ
, test_tgt_sess_count(tgt
));
1014 if (!ha
->flags
.host_shutting_down
&& qla_tgt_mode_enabled(vha
))
1015 qlt_disable_vha(vha
);
1017 /* Wait for sessions to clear out (just in case) */
1018 wait_event(tgt
->waitQ
, test_tgt_sess_count(tgt
));
1021 EXPORT_SYMBOL(qlt_stop_phase1
);
1023 /* Called by tcm_qla2xxx configfs code */
1024 void qlt_stop_phase2(struct qla_tgt
*tgt
)
1026 struct qla_hw_data
*ha
= tgt
->ha
;
1027 scsi_qla_host_t
*vha
= pci_get_drvdata(ha
->pdev
);
1028 unsigned long flags
;
1030 if (tgt
->tgt_stopped
) {
1031 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf04f,
1032 "Already in tgt->tgt_stopped state\n");
1037 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf00b,
1038 "Waiting for %d IRQ commands to complete (tgt %p)",
1039 tgt
->irq_cmd_count
, tgt
);
1041 mutex_lock(&vha
->vha_tgt
.tgt_mutex
);
1042 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
1043 while (tgt
->irq_cmd_count
!= 0) {
1044 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1046 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
1049 tgt
->tgt_stopped
= 1;
1050 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1051 mutex_unlock(&vha
->vha_tgt
.tgt_mutex
);
1053 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf00c, "Stop of tgt %p finished",
1056 EXPORT_SYMBOL(qlt_stop_phase2
);
1058 /* Called from qlt_remove_target() -> qla2x00_remove_one() */
1059 static void qlt_release(struct qla_tgt
*tgt
)
1061 scsi_qla_host_t
*vha
= tgt
->vha
;
1063 if ((vha
->vha_tgt
.qla_tgt
!= NULL
) && !tgt
->tgt_stopped
)
1064 qlt_stop_phase2(tgt
);
1066 vha
->vha_tgt
.qla_tgt
= NULL
;
1068 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf00d,
1069 "Release of tgt %p finished\n", tgt
);
1074 /* ha->hardware_lock supposed to be held on entry */
1075 static int qlt_sched_sess_work(struct qla_tgt
*tgt
, int type
,
1076 const void *param
, unsigned int param_size
)
1078 struct qla_tgt_sess_work_param
*prm
;
1079 unsigned long flags
;
1081 prm
= kzalloc(sizeof(*prm
), GFP_ATOMIC
);
1083 ql_dbg(ql_dbg_tgt_mgt
, tgt
->vha
, 0xf050,
1084 "qla_target(%d): Unable to create session "
1085 "work, command will be refused", 0);
1089 ql_dbg(ql_dbg_tgt_mgt
, tgt
->vha
, 0xf00e,
1090 "Scheduling work (type %d, prm %p)"
1091 " to find session for param %p (size %d, tgt %p)\n",
1092 type
, prm
, param
, param_size
, tgt
);
1095 memcpy(&prm
->tm_iocb
, param
, param_size
);
1097 spin_lock_irqsave(&tgt
->sess_work_lock
, flags
);
1098 list_add_tail(&prm
->sess_works_list_entry
, &tgt
->sess_works_list
);
1099 spin_unlock_irqrestore(&tgt
->sess_work_lock
, flags
);
1101 schedule_work(&tgt
->sess_work
);
1107 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1109 static void qlt_send_notify_ack(struct scsi_qla_host
*vha
,
1110 struct imm_ntfy_from_isp
*ntfy
,
1111 uint32_t add_flags
, uint16_t resp_code
, int resp_code_valid
,
1112 uint16_t srr_flags
, uint16_t srr_reject_code
, uint8_t srr_explan
)
1114 struct qla_hw_data
*ha
= vha
->hw
;
1116 struct nack_to_isp
*nack
;
1118 ql_dbg(ql_dbg_tgt
, vha
, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha
);
1120 /* Send marker if required */
1121 if (qlt_issue_marker(vha
, 1) != QLA_SUCCESS
)
1124 pkt
= (request_t
*)qla2x00_alloc_iocbs(vha
, NULL
);
1126 ql_dbg(ql_dbg_tgt
, vha
, 0xe049,
1127 "qla_target(%d): %s failed: unable to allocate "
1128 "request packet\n", vha
->vp_idx
, __func__
);
1132 if (vha
->vha_tgt
.qla_tgt
!= NULL
)
1133 vha
->vha_tgt
.qla_tgt
->notify_ack_expected
++;
1135 pkt
->entry_type
= NOTIFY_ACK_TYPE
;
1136 pkt
->entry_count
= 1;
1138 nack
= (struct nack_to_isp
*)pkt
;
1139 nack
->ox_id
= ntfy
->ox_id
;
1141 nack
->u
.isp24
.nport_handle
= ntfy
->u
.isp24
.nport_handle
;
1142 if (le16_to_cpu(ntfy
->u
.isp24
.status
) == IMM_NTFY_ELS
) {
1143 nack
->u
.isp24
.flags
= ntfy
->u
.isp24
.flags
&
1144 cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB
);
1146 nack
->u
.isp24
.srr_rx_id
= ntfy
->u
.isp24
.srr_rx_id
;
1147 nack
->u
.isp24
.status
= ntfy
->u
.isp24
.status
;
1148 nack
->u
.isp24
.status_subcode
= ntfy
->u
.isp24
.status_subcode
;
1149 nack
->u
.isp24
.fw_handle
= ntfy
->u
.isp24
.fw_handle
;
1150 nack
->u
.isp24
.exchange_address
= ntfy
->u
.isp24
.exchange_address
;
1151 nack
->u
.isp24
.srr_rel_offs
= ntfy
->u
.isp24
.srr_rel_offs
;
1152 nack
->u
.isp24
.srr_ui
= ntfy
->u
.isp24
.srr_ui
;
1153 nack
->u
.isp24
.srr_flags
= cpu_to_le16(srr_flags
);
1154 nack
->u
.isp24
.srr_reject_code
= srr_reject_code
;
1155 nack
->u
.isp24
.srr_reject_code_expl
= srr_explan
;
1156 nack
->u
.isp24
.vp_index
= ntfy
->u
.isp24
.vp_index
;
1158 ql_dbg(ql_dbg_tgt
, vha
, 0xe005,
1159 "qla_target(%d): Sending 24xx Notify Ack %d\n",
1160 vha
->vp_idx
, nack
->u
.isp24
.status
);
1162 /* Memory Barrier */
1164 qla2x00_start_iocbs(vha
, vha
->req
);
1168 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1170 static void qlt_24xx_send_abts_resp(struct scsi_qla_host
*vha
,
1171 struct abts_recv_from_24xx
*abts
, uint32_t status
,
1174 struct qla_hw_data
*ha
= vha
->hw
;
1175 struct abts_resp_to_24xx
*resp
;
1179 ql_dbg(ql_dbg_tgt
, vha
, 0xe006,
1180 "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n",
1183 /* Send marker if required */
1184 if (qlt_issue_marker(vha
, 1) != QLA_SUCCESS
)
1187 resp
= (struct abts_resp_to_24xx
*)qla2x00_alloc_iocbs_ready(vha
, NULL
);
1189 ql_dbg(ql_dbg_tgt
, vha
, 0xe04a,
1190 "qla_target(%d): %s failed: unable to allocate "
1191 "request packet", vha
->vp_idx
, __func__
);
1195 resp
->entry_type
= ABTS_RESP_24XX
;
1196 resp
->entry_count
= 1;
1197 resp
->nport_handle
= abts
->nport_handle
;
1198 resp
->vp_index
= vha
->vp_idx
;
1199 resp
->sof_type
= abts
->sof_type
;
1200 resp
->exchange_address
= abts
->exchange_address
;
1201 resp
->fcp_hdr_le
= abts
->fcp_hdr_le
;
1202 f_ctl
= cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP
|
1203 F_CTL_LAST_SEQ
| F_CTL_END_SEQ
|
1204 F_CTL_SEQ_INITIATIVE
);
1205 p
= (uint8_t *)&f_ctl
;
1206 resp
->fcp_hdr_le
.f_ctl
[0] = *p
++;
1207 resp
->fcp_hdr_le
.f_ctl
[1] = *p
++;
1208 resp
->fcp_hdr_le
.f_ctl
[2] = *p
;
1210 resp
->fcp_hdr_le
.d_id
[0] = abts
->fcp_hdr_le
.d_id
[0];
1211 resp
->fcp_hdr_le
.d_id
[1] = abts
->fcp_hdr_le
.d_id
[1];
1212 resp
->fcp_hdr_le
.d_id
[2] = abts
->fcp_hdr_le
.d_id
[2];
1213 resp
->fcp_hdr_le
.s_id
[0] = abts
->fcp_hdr_le
.s_id
[0];
1214 resp
->fcp_hdr_le
.s_id
[1] = abts
->fcp_hdr_le
.s_id
[1];
1215 resp
->fcp_hdr_le
.s_id
[2] = abts
->fcp_hdr_le
.s_id
[2];
1217 resp
->fcp_hdr_le
.d_id
[0] = abts
->fcp_hdr_le
.s_id
[0];
1218 resp
->fcp_hdr_le
.d_id
[1] = abts
->fcp_hdr_le
.s_id
[1];
1219 resp
->fcp_hdr_le
.d_id
[2] = abts
->fcp_hdr_le
.s_id
[2];
1220 resp
->fcp_hdr_le
.s_id
[0] = abts
->fcp_hdr_le
.d_id
[0];
1221 resp
->fcp_hdr_le
.s_id
[1] = abts
->fcp_hdr_le
.d_id
[1];
1222 resp
->fcp_hdr_le
.s_id
[2] = abts
->fcp_hdr_le
.d_id
[2];
1224 resp
->exchange_addr_to_abort
= abts
->exchange_addr_to_abort
;
1225 if (status
== FCP_TMF_CMPL
) {
1226 resp
->fcp_hdr_le
.r_ctl
= R_CTL_BASIC_LINK_SERV
| R_CTL_B_ACC
;
1227 resp
->payload
.ba_acct
.seq_id_valid
= SEQ_ID_INVALID
;
1228 resp
->payload
.ba_acct
.low_seq_cnt
= 0x0000;
1229 resp
->payload
.ba_acct
.high_seq_cnt
= 0xFFFF;
1230 resp
->payload
.ba_acct
.ox_id
= abts
->fcp_hdr_le
.ox_id
;
1231 resp
->payload
.ba_acct
.rx_id
= abts
->fcp_hdr_le
.rx_id
;
1233 resp
->fcp_hdr_le
.r_ctl
= R_CTL_BASIC_LINK_SERV
| R_CTL_B_RJT
;
1234 resp
->payload
.ba_rjt
.reason_code
=
1235 BA_RJT_REASON_CODE_UNABLE_TO_PERFORM
;
1236 /* Other bytes are zero */
1239 vha
->vha_tgt
.qla_tgt
->abts_resp_expected
++;
1241 /* Memory Barrier */
1243 qla2x00_start_iocbs(vha
, vha
->req
);
1247 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1249 static void qlt_24xx_retry_term_exchange(struct scsi_qla_host
*vha
,
1250 struct abts_resp_from_24xx_fw
*entry
)
1252 struct ctio7_to_24xx
*ctio
;
1254 ql_dbg(ql_dbg_tgt
, vha
, 0xe007,
1255 "Sending retry TERM EXCH CTIO7 (ha=%p)\n", vha
->hw
);
1256 /* Send marker if required */
1257 if (qlt_issue_marker(vha
, 1) != QLA_SUCCESS
)
1260 ctio
= (struct ctio7_to_24xx
*)qla2x00_alloc_iocbs_ready(vha
, NULL
);
1262 ql_dbg(ql_dbg_tgt
, vha
, 0xe04b,
1263 "qla_target(%d): %s failed: unable to allocate "
1264 "request packet\n", vha
->vp_idx
, __func__
);
1269 * We've got on entrance firmware's response on by us generated
1270 * ABTS response. So, in it ID fields are reversed.
1273 ctio
->entry_type
= CTIO_TYPE7
;
1274 ctio
->entry_count
= 1;
1275 ctio
->nport_handle
= entry
->nport_handle
;
1276 ctio
->handle
= QLA_TGT_SKIP_HANDLE
| CTIO_COMPLETION_HANDLE_MARK
;
1277 ctio
->timeout
= cpu_to_le16(QLA_TGT_TIMEOUT
);
1278 ctio
->vp_index
= vha
->vp_idx
;
1279 ctio
->initiator_id
[0] = entry
->fcp_hdr_le
.d_id
[0];
1280 ctio
->initiator_id
[1] = entry
->fcp_hdr_le
.d_id
[1];
1281 ctio
->initiator_id
[2] = entry
->fcp_hdr_le
.d_id
[2];
1282 ctio
->exchange_addr
= entry
->exchange_addr_to_abort
;
1283 ctio
->u
.status1
.flags
= cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1
|
1284 CTIO7_FLAGS_TERMINATE
);
1285 ctio
->u
.status1
.ox_id
= cpu_to_le16(entry
->fcp_hdr_le
.ox_id
);
1287 /* Memory Barrier */
1289 qla2x00_start_iocbs(vha
, vha
->req
);
1291 qlt_24xx_send_abts_resp(vha
, (struct abts_recv_from_24xx
*)entry
,
1292 FCP_TMF_CMPL
, true);
1295 static int abort_cmd_for_tag(struct scsi_qla_host
*vha
, uint32_t tag
)
1297 struct qla_tgt_sess_op
*op
;
1298 struct qla_tgt_cmd
*cmd
;
1300 spin_lock(&vha
->cmd_list_lock
);
1302 list_for_each_entry(op
, &vha
->qla_sess_op_cmd_list
, cmd_list
) {
1303 if (tag
== op
->atio
.u
.isp24
.exchange_addr
) {
1305 spin_unlock(&vha
->cmd_list_lock
);
1310 list_for_each_entry(cmd
, &vha
->qla_cmd_list
, cmd_list
) {
1311 if (tag
== cmd
->atio
.u
.isp24
.exchange_addr
) {
1312 cmd
->state
= QLA_TGT_STATE_ABORTED
;
1313 spin_unlock(&vha
->cmd_list_lock
);
1318 spin_unlock(&vha
->cmd_list_lock
);
1322 /* drop cmds for the given lun
1323 * XXX only looks for cmds on the port through which lun reset was recieved
1324 * XXX does not go through the list of other port (which may have cmds
1327 static void abort_cmds_for_lun(struct scsi_qla_host
*vha
,
1328 uint32_t lun
, uint8_t *s_id
)
1330 struct qla_tgt_sess_op
*op
;
1331 struct qla_tgt_cmd
*cmd
;
1334 key
= sid_to_key(s_id
);
1335 spin_lock(&vha
->cmd_list_lock
);
1336 list_for_each_entry(op
, &vha
->qla_sess_op_cmd_list
, cmd_list
) {
1340 op_key
= sid_to_key(op
->atio
.u
.isp24
.fcp_hdr
.s_id
);
1341 op_lun
= scsilun_to_int(
1342 (struct scsi_lun
*)&op
->atio
.u
.isp24
.fcp_cmnd
.lun
);
1343 if (op_key
== key
&& op_lun
== lun
)
1346 list_for_each_entry(cmd
, &vha
->qla_cmd_list
, cmd_list
) {
1350 cmd_key
= sid_to_key(cmd
->atio
.u
.isp24
.fcp_hdr
.s_id
);
1351 cmd_lun
= scsilun_to_int(
1352 (struct scsi_lun
*)&cmd
->atio
.u
.isp24
.fcp_cmnd
.lun
);
1353 if (cmd_key
== key
&& cmd_lun
== lun
)
1354 cmd
->state
= QLA_TGT_STATE_ABORTED
;
1356 spin_unlock(&vha
->cmd_list_lock
);
1359 /* ha->hardware_lock supposed to be held on entry */
1360 static int __qlt_24xx_handle_abts(struct scsi_qla_host
*vha
,
1361 struct abts_recv_from_24xx
*abts
, struct qla_tgt_sess
*sess
)
1363 struct qla_hw_data
*ha
= vha
->hw
;
1364 struct se_session
*se_sess
= sess
->se_sess
;
1365 struct qla_tgt_mgmt_cmd
*mcmd
;
1366 struct se_cmd
*se_cmd
;
1369 bool found_lun
= false;
1371 spin_lock(&se_sess
->sess_cmd_lock
);
1372 list_for_each_entry(se_cmd
, &se_sess
->sess_cmd_list
, se_cmd_list
) {
1373 struct qla_tgt_cmd
*cmd
=
1374 container_of(se_cmd
, struct qla_tgt_cmd
, se_cmd
);
1375 if (se_cmd
->tag
== abts
->exchange_addr_to_abort
) {
1376 lun
= cmd
->unpacked_lun
;
1381 spin_unlock(&se_sess
->sess_cmd_lock
);
1383 /* cmd not in LIO lists, look in qla list */
1385 if (abort_cmd_for_tag(vha
, abts
->exchange_addr_to_abort
)) {
1386 /* send TASK_ABORT response immediately */
1387 qlt_24xx_send_abts_resp(vha
, abts
, FCP_TMF_CMPL
, false);
1390 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf081,
1391 "unable to find cmd in driver or LIO for tag 0x%x\n",
1392 abts
->exchange_addr_to_abort
);
1397 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf00f,
1398 "qla_target(%d): task abort (tag=%d)\n",
1399 vha
->vp_idx
, abts
->exchange_addr_to_abort
);
1401 mcmd
= mempool_alloc(qla_tgt_mgmt_cmd_mempool
, GFP_ATOMIC
);
1403 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf051,
1404 "qla_target(%d): %s: Allocation of ABORT cmd failed",
1405 vha
->vp_idx
, __func__
);
1408 memset(mcmd
, 0, sizeof(*mcmd
));
1411 memcpy(&mcmd
->orig_iocb
.abts
, abts
, sizeof(mcmd
->orig_iocb
.abts
));
1412 mcmd
->reset_count
= vha
->hw
->chip_reset
;
1414 rc
= ha
->tgt
.tgt_ops
->handle_tmr(mcmd
, lun
, TMR_ABORT_TASK
,
1415 abts
->exchange_addr_to_abort
);
1417 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf052,
1418 "qla_target(%d): tgt_ops->handle_tmr()"
1419 " failed: %d", vha
->vp_idx
, rc
);
1420 mempool_free(mcmd
, qla_tgt_mgmt_cmd_mempool
);
1428 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1430 static void qlt_24xx_handle_abts(struct scsi_qla_host
*vha
,
1431 struct abts_recv_from_24xx
*abts
)
1433 struct qla_hw_data
*ha
= vha
->hw
;
1434 struct qla_tgt_sess
*sess
;
1435 uint32_t tag
= abts
->exchange_addr_to_abort
;
1439 if (le32_to_cpu(abts
->fcp_hdr_le
.parameter
) & ABTS_PARAM_ABORT_SEQ
) {
1440 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf053,
1441 "qla_target(%d): ABTS: Abort Sequence not "
1442 "supported\n", vha
->vp_idx
);
1443 qlt_24xx_send_abts_resp(vha
, abts
, FCP_TMF_REJECTED
, false);
1447 if (tag
== ATIO_EXCHANGE_ADDRESS_UNKNOWN
) {
1448 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf010,
1449 "qla_target(%d): ABTS: Unknown Exchange "
1450 "Address received\n", vha
->vp_idx
);
1451 qlt_24xx_send_abts_resp(vha
, abts
, FCP_TMF_REJECTED
, false);
1455 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf011,
1456 "qla_target(%d): task abort (s_id=%x:%x:%x, "
1457 "tag=%d, param=%x)\n", vha
->vp_idx
, abts
->fcp_hdr_le
.s_id
[2],
1458 abts
->fcp_hdr_le
.s_id
[1], abts
->fcp_hdr_le
.s_id
[0], tag
,
1459 le32_to_cpu(abts
->fcp_hdr_le
.parameter
));
1461 s_id
[0] = abts
->fcp_hdr_le
.s_id
[2];
1462 s_id
[1] = abts
->fcp_hdr_le
.s_id
[1];
1463 s_id
[2] = abts
->fcp_hdr_le
.s_id
[0];
1465 sess
= ha
->tgt
.tgt_ops
->find_sess_by_s_id(vha
, s_id
);
1467 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf012,
1468 "qla_target(%d): task abort for non-existant session\n",
1470 rc
= qlt_sched_sess_work(vha
->vha_tgt
.qla_tgt
,
1471 QLA_TGT_SESS_WORK_ABORT
, abts
, sizeof(*abts
));
1473 qlt_24xx_send_abts_resp(vha
, abts
, FCP_TMF_REJECTED
,
1479 if (sess
->deleted
== QLA_SESS_DELETION_IN_PROGRESS
) {
1480 qlt_24xx_send_abts_resp(vha
, abts
, FCP_TMF_REJECTED
, false);
1484 rc
= __qlt_24xx_handle_abts(vha
, abts
, sess
);
1486 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf054,
1487 "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n",
1489 qlt_24xx_send_abts_resp(vha
, abts
, FCP_TMF_REJECTED
, false);
1495 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1497 static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host
*ha
,
1498 struct qla_tgt_mgmt_cmd
*mcmd
, uint32_t resp_code
)
1500 struct atio_from_isp
*atio
= &mcmd
->orig_iocb
.atio
;
1501 struct ctio7_to_24xx
*ctio
;
1504 ql_dbg(ql_dbg_tgt
, ha
, 0xe008,
1505 "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n",
1506 ha
, atio
, resp_code
);
1508 /* Send marker if required */
1509 if (qlt_issue_marker(ha
, 1) != QLA_SUCCESS
)
1512 ctio
= (struct ctio7_to_24xx
*)qla2x00_alloc_iocbs(ha
, NULL
);
1514 ql_dbg(ql_dbg_tgt
, ha
, 0xe04c,
1515 "qla_target(%d): %s failed: unable to allocate "
1516 "request packet\n", ha
->vp_idx
, __func__
);
1520 ctio
->entry_type
= CTIO_TYPE7
;
1521 ctio
->entry_count
= 1;
1522 ctio
->handle
= QLA_TGT_SKIP_HANDLE
| CTIO_COMPLETION_HANDLE_MARK
;
1523 ctio
->nport_handle
= mcmd
->sess
->loop_id
;
1524 ctio
->timeout
= cpu_to_le16(QLA_TGT_TIMEOUT
);
1525 ctio
->vp_index
= ha
->vp_idx
;
1526 ctio
->initiator_id
[0] = atio
->u
.isp24
.fcp_hdr
.s_id
[2];
1527 ctio
->initiator_id
[1] = atio
->u
.isp24
.fcp_hdr
.s_id
[1];
1528 ctio
->initiator_id
[2] = atio
->u
.isp24
.fcp_hdr
.s_id
[0];
1529 ctio
->exchange_addr
= atio
->u
.isp24
.exchange_addr
;
1530 ctio
->u
.status1
.flags
= (atio
->u
.isp24
.attr
<< 9) |
1531 cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1
| CTIO7_FLAGS_SEND_STATUS
);
1532 temp
= be16_to_cpu(atio
->u
.isp24
.fcp_hdr
.ox_id
);
1533 ctio
->u
.status1
.ox_id
= cpu_to_le16(temp
);
1534 ctio
->u
.status1
.scsi_status
=
1535 cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID
);
1536 ctio
->u
.status1
.response_len
= cpu_to_le16(8);
1537 ctio
->u
.status1
.sense_data
[0] = resp_code
;
1539 /* Memory Barrier */
1541 qla2x00_start_iocbs(ha
, ha
->req
);
1544 void qlt_free_mcmd(struct qla_tgt_mgmt_cmd
*mcmd
)
1546 mempool_free(mcmd
, qla_tgt_mgmt_cmd_mempool
);
1548 EXPORT_SYMBOL(qlt_free_mcmd
);
1550 /* callback from target fabric module code */
1551 void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd
*mcmd
)
1553 struct scsi_qla_host
*vha
= mcmd
->sess
->vha
;
1554 struct qla_hw_data
*ha
= vha
->hw
;
1555 unsigned long flags
;
1557 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf013,
1558 "TM response mcmd (%p) status %#x state %#x",
1559 mcmd
, mcmd
->fc_tm_rsp
, mcmd
->flags
);
1561 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
1563 if (qla2x00_reset_active(vha
) || mcmd
->reset_count
!= ha
->chip_reset
) {
1565 * Either a chip reset is active or this request was from
1566 * previous life, just abort the processing.
1568 ql_dbg(ql_dbg_async
, vha
, 0xe100,
1569 "RESET-TMR active/old-count/new-count = %d/%d/%d.\n",
1570 qla2x00_reset_active(vha
), mcmd
->reset_count
,
1572 ha
->tgt
.tgt_ops
->free_mcmd(mcmd
);
1573 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1577 if (mcmd
->flags
== QLA24XX_MGMT_SEND_NACK
)
1578 qlt_send_notify_ack(vha
, &mcmd
->orig_iocb
.imm_ntfy
,
1581 if (mcmd
->se_cmd
.se_tmr_req
->function
== TMR_ABORT_TASK
)
1582 qlt_24xx_send_abts_resp(vha
, &mcmd
->orig_iocb
.abts
,
1583 mcmd
->fc_tm_rsp
, false);
1585 qlt_24xx_send_task_mgmt_ctio(vha
, mcmd
,
1589 * Make the callback for ->free_mcmd() to queue_work() and invoke
1590 * target_put_sess_cmd() to drop cmd_kref to 1. The final
1591 * target_put_sess_cmd() call will be made from TFO->check_stop_free()
1592 * -> tcm_qla2xxx_check_stop_free() to release the TMR associated se_cmd
1593 * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() ->
1594 * qlt_xmit_tm_rsp() returns here..
1596 ha
->tgt
.tgt_ops
->free_mcmd(mcmd
);
1597 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1599 EXPORT_SYMBOL(qlt_xmit_tm_rsp
);
1602 static int qlt_pci_map_calc_cnt(struct qla_tgt_prm
*prm
)
1604 struct qla_tgt_cmd
*cmd
= prm
->cmd
;
1606 BUG_ON(cmd
->sg_cnt
== 0);
1608 prm
->sg
= (struct scatterlist
*)cmd
->sg
;
1609 prm
->seg_cnt
= pci_map_sg(prm
->tgt
->ha
->pdev
, cmd
->sg
,
1610 cmd
->sg_cnt
, cmd
->dma_data_direction
);
1611 if (unlikely(prm
->seg_cnt
== 0))
1614 prm
->cmd
->sg_mapped
= 1;
1616 if (cmd
->se_cmd
.prot_op
== TARGET_PROT_NORMAL
) {
1618 * If greater than four sg entries then we need to allocate
1619 * the continuation entries
1621 if (prm
->seg_cnt
> prm
->tgt
->datasegs_per_cmd
)
1622 prm
->req_cnt
+= DIV_ROUND_UP(prm
->seg_cnt
-
1623 prm
->tgt
->datasegs_per_cmd
,
1624 prm
->tgt
->datasegs_per_cont
);
1627 if ((cmd
->se_cmd
.prot_op
== TARGET_PROT_DIN_INSERT
) ||
1628 (cmd
->se_cmd
.prot_op
== TARGET_PROT_DOUT_STRIP
)) {
1629 prm
->seg_cnt
= DIV_ROUND_UP(cmd
->bufflen
, cmd
->blk_sz
);
1630 prm
->tot_dsds
= prm
->seg_cnt
;
1632 prm
->tot_dsds
= prm
->seg_cnt
;
1634 if (cmd
->prot_sg_cnt
) {
1635 prm
->prot_sg
= cmd
->prot_sg
;
1636 prm
->prot_seg_cnt
= pci_map_sg(prm
->tgt
->ha
->pdev
,
1637 cmd
->prot_sg
, cmd
->prot_sg_cnt
,
1638 cmd
->dma_data_direction
);
1639 if (unlikely(prm
->prot_seg_cnt
== 0))
1642 if ((cmd
->se_cmd
.prot_op
== TARGET_PROT_DIN_INSERT
) ||
1643 (cmd
->se_cmd
.prot_op
== TARGET_PROT_DOUT_STRIP
)) {
1644 /* Dif Bundling not support here */
1645 prm
->prot_seg_cnt
= DIV_ROUND_UP(cmd
->bufflen
,
1647 prm
->tot_dsds
+= prm
->prot_seg_cnt
;
1649 prm
->tot_dsds
+= prm
->prot_seg_cnt
;
1656 ql_dbg(ql_dbg_tgt
, prm
->cmd
->vha
, 0xe04d,
1657 "qla_target(%d): PCI mapping failed: sg_cnt=%d",
1658 0, prm
->cmd
->sg_cnt
);
1662 static void qlt_unmap_sg(struct scsi_qla_host
*vha
, struct qla_tgt_cmd
*cmd
)
1664 struct qla_hw_data
*ha
= vha
->hw
;
1666 if (!cmd
->sg_mapped
)
1669 pci_unmap_sg(ha
->pdev
, cmd
->sg
, cmd
->sg_cnt
, cmd
->dma_data_direction
);
1672 if (cmd
->prot_sg_cnt
)
1673 pci_unmap_sg(ha
->pdev
, cmd
->prot_sg
, cmd
->prot_sg_cnt
,
1674 cmd
->dma_data_direction
);
1676 if (cmd
->ctx_dsd_alloced
)
1677 qla2x00_clean_dsd_pool(ha
, NULL
, cmd
);
1680 dma_pool_free(ha
->dl_dma_pool
, cmd
->ctx
, cmd
->ctx
->crc_ctx_dma
);
1683 static int qlt_check_reserve_free_req(struct scsi_qla_host
*vha
,
1686 uint32_t cnt
, cnt_in
;
1688 if (vha
->req
->cnt
< (req_cnt
+ 2)) {
1689 cnt
= (uint16_t)RD_REG_DWORD(vha
->req
->req_q_out
);
1690 cnt_in
= (uint16_t)RD_REG_DWORD(vha
->req
->req_q_in
);
1692 if (vha
->req
->ring_index
< cnt
)
1693 vha
->req
->cnt
= cnt
- vha
->req
->ring_index
;
1695 vha
->req
->cnt
= vha
->req
->length
-
1696 (vha
->req
->ring_index
- cnt
);
1699 if (unlikely(vha
->req
->cnt
< (req_cnt
+ 2))) {
1700 ql_dbg(ql_dbg_io
, vha
, 0x305a,
1701 "qla_target(%d): There is no room in the request ring: vha->req->ring_index=%d, vha->req->cnt=%d, req_cnt=%d Req-out=%d Req-in=%d Req-Length=%d\n",
1702 vha
->vp_idx
, vha
->req
->ring_index
,
1703 vha
->req
->cnt
, req_cnt
, cnt
, cnt_in
, vha
->req
->length
);
1706 vha
->req
->cnt
-= req_cnt
;
1712 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1714 static inline void *qlt_get_req_pkt(struct scsi_qla_host
*vha
)
1716 /* Adjust ring index. */
1717 vha
->req
->ring_index
++;
1718 if (vha
->req
->ring_index
== vha
->req
->length
) {
1719 vha
->req
->ring_index
= 0;
1720 vha
->req
->ring_ptr
= vha
->req
->ring
;
1722 vha
->req
->ring_ptr
++;
1724 return (cont_entry_t
*)vha
->req
->ring_ptr
;
1727 /* ha->hardware_lock supposed to be held on entry */
1728 static inline uint32_t qlt_make_handle(struct scsi_qla_host
*vha
)
1730 struct qla_hw_data
*ha
= vha
->hw
;
1733 h
= ha
->tgt
.current_handle
;
1734 /* always increment cmd handle */
1737 if (h
> DEFAULT_OUTSTANDING_COMMANDS
)
1738 h
= 1; /* 0 is QLA_TGT_NULL_HANDLE */
1739 if (h
== ha
->tgt
.current_handle
) {
1740 ql_dbg(ql_dbg_io
, vha
, 0x305b,
1741 "qla_target(%d): Ran out of "
1742 "empty cmd slots in ha %p\n", vha
->vp_idx
, ha
);
1743 h
= QLA_TGT_NULL_HANDLE
;
1746 } while ((h
== QLA_TGT_NULL_HANDLE
) ||
1747 (h
== QLA_TGT_SKIP_HANDLE
) ||
1748 (ha
->tgt
.cmds
[h
-1] != NULL
));
1750 if (h
!= QLA_TGT_NULL_HANDLE
)
1751 ha
->tgt
.current_handle
= h
;
1756 /* ha->hardware_lock supposed to be held on entry */
1757 static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm
*prm
,
1758 struct scsi_qla_host
*vha
)
1761 struct ctio7_to_24xx
*pkt
;
1762 struct qla_hw_data
*ha
= vha
->hw
;
1763 struct atio_from_isp
*atio
= &prm
->cmd
->atio
;
1766 pkt
= (struct ctio7_to_24xx
*)vha
->req
->ring_ptr
;
1768 memset(pkt
, 0, sizeof(*pkt
));
1770 pkt
->entry_type
= CTIO_TYPE7
;
1771 pkt
->entry_count
= (uint8_t)prm
->req_cnt
;
1772 pkt
->vp_index
= vha
->vp_idx
;
1774 h
= qlt_make_handle(vha
);
1775 if (unlikely(h
== QLA_TGT_NULL_HANDLE
)) {
1777 * CTIO type 7 from the firmware doesn't provide a way to
1778 * know the initiator's LOOP ID, hence we can't find
1779 * the session and, so, the command.
1783 ha
->tgt
.cmds
[h
-1] = prm
->cmd
;
1785 pkt
->handle
= h
| CTIO_COMPLETION_HANDLE_MARK
;
1786 pkt
->nport_handle
= prm
->cmd
->loop_id
;
1787 pkt
->timeout
= cpu_to_le16(QLA_TGT_TIMEOUT
);
1788 pkt
->initiator_id
[0] = atio
->u
.isp24
.fcp_hdr
.s_id
[2];
1789 pkt
->initiator_id
[1] = atio
->u
.isp24
.fcp_hdr
.s_id
[1];
1790 pkt
->initiator_id
[2] = atio
->u
.isp24
.fcp_hdr
.s_id
[0];
1791 pkt
->exchange_addr
= atio
->u
.isp24
.exchange_addr
;
1792 pkt
->u
.status0
.flags
|= (atio
->u
.isp24
.attr
<< 9);
1793 temp
= be16_to_cpu(atio
->u
.isp24
.fcp_hdr
.ox_id
);
1794 pkt
->u
.status0
.ox_id
= cpu_to_le16(temp
);
1795 pkt
->u
.status0
.relative_offset
= cpu_to_le32(prm
->cmd
->offset
);
1801 * ha->hardware_lock supposed to be held on entry. We have already made sure
1802 * that there is sufficient amount of request entries to not drop it.
1804 static void qlt_load_cont_data_segments(struct qla_tgt_prm
*prm
,
1805 struct scsi_qla_host
*vha
)
1808 uint32_t *dword_ptr
;
1809 int enable_64bit_addressing
= prm
->tgt
->tgt_enable_64bit_addr
;
1811 /* Build continuation packets */
1812 while (prm
->seg_cnt
> 0) {
1813 cont_a64_entry_t
*cont_pkt64
=
1814 (cont_a64_entry_t
*)qlt_get_req_pkt(vha
);
1817 * Make sure that from cont_pkt64 none of
1818 * 64-bit specific fields used for 32-bit
1819 * addressing. Cast to (cont_entry_t *) for
1823 memset(cont_pkt64
, 0, sizeof(*cont_pkt64
));
1825 cont_pkt64
->entry_count
= 1;
1826 cont_pkt64
->sys_define
= 0;
1828 if (enable_64bit_addressing
) {
1829 cont_pkt64
->entry_type
= CONTINUE_A64_TYPE
;
1831 (uint32_t *)&cont_pkt64
->dseg_0_address
;
1833 cont_pkt64
->entry_type
= CONTINUE_TYPE
;
1835 (uint32_t *)&((cont_entry_t
*)
1836 cont_pkt64
)->dseg_0_address
;
1839 /* Load continuation entry data segments */
1841 cnt
< prm
->tgt
->datasegs_per_cont
&& prm
->seg_cnt
;
1842 cnt
++, prm
->seg_cnt
--) {
1844 cpu_to_le32(pci_dma_lo32
1845 (sg_dma_address(prm
->sg
)));
1846 if (enable_64bit_addressing
) {
1848 cpu_to_le32(pci_dma_hi32
1852 *dword_ptr
++ = cpu_to_le32(sg_dma_len(prm
->sg
));
1854 prm
->sg
= sg_next(prm
->sg
);
1860 * ha->hardware_lock supposed to be held on entry. We have already made sure
1861 * that there is sufficient amount of request entries to not drop it.
1863 static void qlt_load_data_segments(struct qla_tgt_prm
*prm
,
1864 struct scsi_qla_host
*vha
)
1867 uint32_t *dword_ptr
;
1868 int enable_64bit_addressing
= prm
->tgt
->tgt_enable_64bit_addr
;
1869 struct ctio7_to_24xx
*pkt24
= (struct ctio7_to_24xx
*)prm
->pkt
;
1871 pkt24
->u
.status0
.transfer_length
= cpu_to_le32(prm
->cmd
->bufflen
);
1873 /* Setup packet address segment pointer */
1874 dword_ptr
= pkt24
->u
.status0
.dseg_0_address
;
1876 /* Set total data segment count */
1878 pkt24
->dseg_count
= cpu_to_le16(prm
->seg_cnt
);
1880 if (prm
->seg_cnt
== 0) {
1881 /* No data transfer */
1887 /* If scatter gather */
1889 /* Load command entry data segments */
1891 (cnt
< prm
->tgt
->datasegs_per_cmd
) && prm
->seg_cnt
;
1892 cnt
++, prm
->seg_cnt
--) {
1894 cpu_to_le32(pci_dma_lo32(sg_dma_address(prm
->sg
)));
1895 if (enable_64bit_addressing
) {
1897 cpu_to_le32(pci_dma_hi32(
1898 sg_dma_address(prm
->sg
)));
1900 *dword_ptr
++ = cpu_to_le32(sg_dma_len(prm
->sg
));
1902 prm
->sg
= sg_next(prm
->sg
);
1905 qlt_load_cont_data_segments(prm
, vha
);
1908 static inline int qlt_has_data(struct qla_tgt_cmd
*cmd
)
1910 return cmd
->bufflen
> 0;
1914 * Called without ha->hardware_lock held
1916 static int qlt_pre_xmit_response(struct qla_tgt_cmd
*cmd
,
1917 struct qla_tgt_prm
*prm
, int xmit_type
, uint8_t scsi_status
,
1918 uint32_t *full_req_cnt
)
1920 struct qla_tgt
*tgt
= cmd
->tgt
;
1921 struct scsi_qla_host
*vha
= tgt
->vha
;
1922 struct qla_hw_data
*ha
= vha
->hw
;
1923 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
1927 prm
->rq_result
= scsi_status
;
1928 prm
->sense_buffer
= &cmd
->sense_buffer
[0];
1929 prm
->sense_buffer_len
= TRANSPORT_SENSE_BUFFER
;
1933 prm
->add_status_pkt
= 0;
1935 /* Send marker if required */
1936 if (qlt_issue_marker(vha
, 0) != QLA_SUCCESS
)
1939 if ((xmit_type
& QLA_TGT_XMIT_DATA
) && qlt_has_data(cmd
)) {
1940 if (qlt_pci_map_calc_cnt(prm
) != 0)
1944 *full_req_cnt
= prm
->req_cnt
;
1946 if (se_cmd
->se_cmd_flags
& SCF_UNDERFLOW_BIT
) {
1947 prm
->residual
= se_cmd
->residual_count
;
1948 ql_dbg(ql_dbg_io
+ ql_dbg_verbose
, vha
, 0x305c,
1949 "Residual underflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n",
1950 prm
->residual
, se_cmd
->tag
,
1951 se_cmd
->t_task_cdb
? se_cmd
->t_task_cdb
[0] : 0,
1952 cmd
->bufflen
, prm
->rq_result
);
1953 prm
->rq_result
|= SS_RESIDUAL_UNDER
;
1954 } else if (se_cmd
->se_cmd_flags
& SCF_OVERFLOW_BIT
) {
1955 prm
->residual
= se_cmd
->residual_count
;
1956 ql_dbg(ql_dbg_io
, vha
, 0x305d,
1957 "Residual overflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n",
1958 prm
->residual
, se_cmd
->tag
, se_cmd
->t_task_cdb
?
1959 se_cmd
->t_task_cdb
[0] : 0, cmd
->bufflen
, prm
->rq_result
);
1960 prm
->rq_result
|= SS_RESIDUAL_OVER
;
1963 if (xmit_type
& QLA_TGT_XMIT_STATUS
) {
1965 * If QLA_TGT_XMIT_DATA is not set, add_status_pkt will be
1966 * ignored in *xmit_response() below
1968 if (qlt_has_data(cmd
)) {
1969 if (QLA_TGT_SENSE_VALID(prm
->sense_buffer
) ||
1970 (IS_FWI2_CAPABLE(ha
) &&
1971 (prm
->rq_result
!= 0))) {
1972 prm
->add_status_pkt
= 1;
1981 static inline int qlt_need_explicit_conf(struct qla_hw_data
*ha
,
1982 struct qla_tgt_cmd
*cmd
, int sending_sense
)
1984 if (ha
->tgt
.enable_class_2
)
1988 return cmd
->conf_compl_supported
;
1990 return ha
->tgt
.enable_explicit_conf
&&
1991 cmd
->conf_compl_supported
;
1994 #ifdef CONFIG_QLA_TGT_DEBUG_SRR
1996 * Original taken from the XFS code
1998 static unsigned long qlt_srr_random(void)
2001 static unsigned long RandomValue
;
2002 static DEFINE_SPINLOCK(lock
);
2003 /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */
2007 unsigned long flags
;
2009 spin_lock_irqsave(&lock
, flags
);
2011 RandomValue
= jiffies
;
2017 rv
= 16807 * lo
- 2836 * hi
;
2021 spin_unlock_irqrestore(&lock
, flags
);
2025 static void qlt_check_srr_debug(struct qla_tgt_cmd
*cmd
, int *xmit_type
)
2027 #if 0 /* This is not a real status packets lost, so it won't lead to SRR */
2028 if ((*xmit_type
& QLA_TGT_XMIT_STATUS
) && (qlt_srr_random() % 200)
2030 *xmit_type
&= ~QLA_TGT_XMIT_STATUS
;
2031 ql_dbg(ql_dbg_tgt_mgt
, cmd
->vha
, 0xf015,
2032 "Dropping cmd %p (tag %d) status", cmd
, se_cmd
->tag
);
2036 * It's currently not possible to simulate SRRs for FCP_WRITE without
2037 * a physical link layer failure, so don't even try here..
2039 if (cmd
->dma_data_direction
!= DMA_FROM_DEVICE
)
2042 if (qlt_has_data(cmd
) && (cmd
->sg_cnt
> 1) &&
2043 ((qlt_srr_random() % 100) == 20)) {
2045 unsigned int tot_len
= 0;
2048 leave
= qlt_srr_random() % cmd
->sg_cnt
;
2050 for (i
= 0; i
< leave
; i
++)
2051 tot_len
+= cmd
->sg
[i
].length
;
2053 ql_dbg(ql_dbg_tgt_mgt
, cmd
->vha
, 0xf016,
2054 "Cutting cmd %p (tag %d) buffer"
2055 " tail to len %d, sg_cnt %d (cmd->bufflen %d,"
2056 " cmd->sg_cnt %d)", cmd
, se_cmd
->tag
, tot_len
, leave
,
2057 cmd
->bufflen
, cmd
->sg_cnt
);
2059 cmd
->bufflen
= tot_len
;
2060 cmd
->sg_cnt
= leave
;
2063 if (qlt_has_data(cmd
) && ((qlt_srr_random() % 100) == 70)) {
2064 unsigned int offset
= qlt_srr_random() % cmd
->bufflen
;
2066 ql_dbg(ql_dbg_tgt_mgt
, cmd
->vha
, 0xf017,
2067 "Cutting cmd %p (tag %d) buffer head "
2068 "to offset %d (cmd->bufflen %d)", cmd
, se_cmd
->tag
, offset
,
2071 *xmit_type
&= ~QLA_TGT_XMIT_DATA
;
2072 else if (qlt_set_data_offset(cmd
, offset
)) {
2073 ql_dbg(ql_dbg_tgt_mgt
, cmd
->vha
, 0xf018,
2074 "qlt_set_data_offset() failed (tag %d)", se_cmd
->tag
);
2079 static inline void qlt_check_srr_debug(struct qla_tgt_cmd
*cmd
, int *xmit_type
)
2083 static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx
*ctio
,
2084 struct qla_tgt_prm
*prm
)
2086 prm
->sense_buffer_len
= min_t(uint32_t, prm
->sense_buffer_len
,
2087 (uint32_t)sizeof(ctio
->u
.status1
.sense_data
));
2088 ctio
->u
.status0
.flags
|= cpu_to_le16(CTIO7_FLAGS_SEND_STATUS
);
2089 if (qlt_need_explicit_conf(prm
->tgt
->ha
, prm
->cmd
, 0)) {
2090 ctio
->u
.status0
.flags
|= cpu_to_le16(
2091 CTIO7_FLAGS_EXPLICIT_CONFORM
|
2092 CTIO7_FLAGS_CONFORM_REQ
);
2094 ctio
->u
.status0
.residual
= cpu_to_le32(prm
->residual
);
2095 ctio
->u
.status0
.scsi_status
= cpu_to_le16(prm
->rq_result
);
2096 if (QLA_TGT_SENSE_VALID(prm
->sense_buffer
)) {
2099 if (qlt_need_explicit_conf(prm
->tgt
->ha
, prm
->cmd
, 1)) {
2100 if (prm
->cmd
->se_cmd
.scsi_status
!= 0) {
2101 ql_dbg(ql_dbg_tgt
, prm
->cmd
->vha
, 0xe017,
2102 "Skipping EXPLICIT_CONFORM and "
2103 "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ "
2104 "non GOOD status\n");
2105 goto skip_explict_conf
;
2107 ctio
->u
.status1
.flags
|= cpu_to_le16(
2108 CTIO7_FLAGS_EXPLICIT_CONFORM
|
2109 CTIO7_FLAGS_CONFORM_REQ
);
2112 ctio
->u
.status1
.flags
&=
2113 ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0
);
2114 ctio
->u
.status1
.flags
|=
2115 cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1
);
2116 ctio
->u
.status1
.scsi_status
|=
2117 cpu_to_le16(SS_SENSE_LEN_VALID
);
2118 ctio
->u
.status1
.sense_length
=
2119 cpu_to_le16(prm
->sense_buffer_len
);
2120 for (i
= 0; i
< prm
->sense_buffer_len
/4; i
++)
2121 ((uint32_t *)ctio
->u
.status1
.sense_data
)[i
] =
2122 cpu_to_be32(((uint32_t *)prm
->sense_buffer
)[i
]);
2124 if (unlikely((prm
->sense_buffer_len
% 4) != 0)) {
2127 ql_dbg(ql_dbg_tgt
, vha
, 0xe04f,
2128 "qla_target(%d): %d bytes of sense "
2129 "lost", prm
->tgt
->ha
->vp_idx
,
2130 prm
->sense_buffer_len
% 4);
2136 ctio
->u
.status1
.flags
&=
2137 ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0
);
2138 ctio
->u
.status1
.flags
|=
2139 cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1
);
2140 ctio
->u
.status1
.sense_length
= 0;
2141 memset(ctio
->u
.status1
.sense_data
, 0,
2142 sizeof(ctio
->u
.status1
.sense_data
));
2145 /* Sense with len > 24, is it possible ??? */
2152 qlt_hba_err_chk_enabled(struct se_cmd
*se_cmd
)
2155 * Uncomment when corresponding SCSI changes are done.
2157 if (!sp->cmd->prot_chk)
2161 switch (se_cmd
->prot_op
) {
2162 case TARGET_PROT_DOUT_INSERT
:
2163 case TARGET_PROT_DIN_STRIP
:
2164 if (ql2xenablehba_err_chk
>= 1)
2167 case TARGET_PROT_DOUT_PASS
:
2168 case TARGET_PROT_DIN_PASS
:
2169 if (ql2xenablehba_err_chk
>= 2)
2172 case TARGET_PROT_DIN_INSERT
:
2173 case TARGET_PROT_DOUT_STRIP
:
2182 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
2186 qlt_set_t10dif_tags(struct se_cmd
*se_cmd
, struct crc_context
*ctx
)
2188 uint32_t lba
= 0xffffffff & se_cmd
->t_task_lba
;
2190 /* wait til Mode Sense/Select cmd, modepage Ah, subpage 2
2191 * have been immplemented by TCM, before AppTag is avail.
2192 * Look for modesense_handlers[]
2195 ctx
->app_tag_mask
[0] = 0x0;
2196 ctx
->app_tag_mask
[1] = 0x0;
2198 switch (se_cmd
->prot_type
) {
2199 case TARGET_DIF_TYPE0_PROT
:
2201 * No check for ql2xenablehba_err_chk, as it would be an
2202 * I/O error if hba tag generation is not done.
2204 ctx
->ref_tag
= cpu_to_le32(lba
);
2206 if (!qlt_hba_err_chk_enabled(se_cmd
))
2209 /* enable ALL bytes of the ref tag */
2210 ctx
->ref_tag_mask
[0] = 0xff;
2211 ctx
->ref_tag_mask
[1] = 0xff;
2212 ctx
->ref_tag_mask
[2] = 0xff;
2213 ctx
->ref_tag_mask
[3] = 0xff;
2216 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
2219 case TARGET_DIF_TYPE1_PROT
:
2220 ctx
->ref_tag
= cpu_to_le32(lba
);
2222 if (!qlt_hba_err_chk_enabled(se_cmd
))
2225 /* enable ALL bytes of the ref tag */
2226 ctx
->ref_tag_mask
[0] = 0xff;
2227 ctx
->ref_tag_mask
[1] = 0xff;
2228 ctx
->ref_tag_mask
[2] = 0xff;
2229 ctx
->ref_tag_mask
[3] = 0xff;
2232 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
2233 * match LBA in CDB + N
2235 case TARGET_DIF_TYPE2_PROT
:
2236 ctx
->ref_tag
= cpu_to_le32(lba
);
2238 if (!qlt_hba_err_chk_enabled(se_cmd
))
2241 /* enable ALL bytes of the ref tag */
2242 ctx
->ref_tag_mask
[0] = 0xff;
2243 ctx
->ref_tag_mask
[1] = 0xff;
2244 ctx
->ref_tag_mask
[2] = 0xff;
2245 ctx
->ref_tag_mask
[3] = 0xff;
2248 /* For Type 3 protection: 16 bit GUARD only */
2249 case TARGET_DIF_TYPE3_PROT
:
2250 ctx
->ref_tag_mask
[0] = ctx
->ref_tag_mask
[1] =
2251 ctx
->ref_tag_mask
[2] = ctx
->ref_tag_mask
[3] = 0x00;
2258 qlt_build_ctio_crc2_pkt(struct qla_tgt_prm
*prm
, scsi_qla_host_t
*vha
)
2261 uint32_t transfer_length
= 0;
2262 uint32_t data_bytes
;
2264 uint8_t bundling
= 1;
2266 struct crc_context
*crc_ctx_pkt
= NULL
;
2267 struct qla_hw_data
*ha
;
2268 struct ctio_crc2_to_fw
*pkt
;
2269 dma_addr_t crc_ctx_dma
;
2270 uint16_t fw_prot_opts
= 0;
2271 struct qla_tgt_cmd
*cmd
= prm
->cmd
;
2272 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
2274 struct atio_from_isp
*atio
= &prm
->cmd
->atio
;
2279 pkt
= (struct ctio_crc2_to_fw
*)vha
->req
->ring_ptr
;
2281 memset(pkt
, 0, sizeof(*pkt
));
2283 ql_dbg(ql_dbg_tgt
, vha
, 0xe071,
2284 "qla_target(%d):%s: se_cmd[%p] CRC2 prot_op[0x%x] cmd prot sg:cnt[%p:%x] lba[%llu]\n",
2285 vha
->vp_idx
, __func__
, se_cmd
, se_cmd
->prot_op
,
2286 prm
->prot_sg
, prm
->prot_seg_cnt
, se_cmd
->t_task_lba
);
2288 if ((se_cmd
->prot_op
== TARGET_PROT_DIN_INSERT
) ||
2289 (se_cmd
->prot_op
== TARGET_PROT_DOUT_STRIP
))
2292 /* Compute dif len and adjust data len to incude protection */
2293 data_bytes
= cmd
->bufflen
;
2294 dif_bytes
= (data_bytes
/ cmd
->blk_sz
) * 8;
2296 switch (se_cmd
->prot_op
) {
2297 case TARGET_PROT_DIN_INSERT
:
2298 case TARGET_PROT_DOUT_STRIP
:
2299 transfer_length
= data_bytes
;
2300 data_bytes
+= dif_bytes
;
2303 case TARGET_PROT_DIN_STRIP
:
2304 case TARGET_PROT_DOUT_INSERT
:
2305 case TARGET_PROT_DIN_PASS
:
2306 case TARGET_PROT_DOUT_PASS
:
2307 transfer_length
= data_bytes
+ dif_bytes
;
2315 if (!qlt_hba_err_chk_enabled(se_cmd
))
2316 fw_prot_opts
|= 0x10; /* Disable Guard tag checking */
2317 /* HBA error checking enabled */
2318 else if (IS_PI_UNINIT_CAPABLE(ha
)) {
2319 if ((se_cmd
->prot_type
== TARGET_DIF_TYPE1_PROT
) ||
2320 (se_cmd
->prot_type
== TARGET_DIF_TYPE2_PROT
))
2321 fw_prot_opts
|= PO_DIS_VALD_APP_ESC
;
2322 else if (se_cmd
->prot_type
== TARGET_DIF_TYPE3_PROT
)
2323 fw_prot_opts
|= PO_DIS_VALD_APP_REF_ESC
;
2326 switch (se_cmd
->prot_op
) {
2327 case TARGET_PROT_DIN_INSERT
:
2328 case TARGET_PROT_DOUT_INSERT
:
2329 fw_prot_opts
|= PO_MODE_DIF_INSERT
;
2331 case TARGET_PROT_DIN_STRIP
:
2332 case TARGET_PROT_DOUT_STRIP
:
2333 fw_prot_opts
|= PO_MODE_DIF_REMOVE
;
2335 case TARGET_PROT_DIN_PASS
:
2336 case TARGET_PROT_DOUT_PASS
:
2337 fw_prot_opts
|= PO_MODE_DIF_PASS
;
2338 /* FUTURE: does tcm require T10CRC<->IPCKSUM conversion? */
2340 default:/* Normal Request */
2341 fw_prot_opts
|= PO_MODE_DIF_PASS
;
2347 /* Update entry type to indicate Command Type CRC_2 IOCB */
2348 pkt
->entry_type
= CTIO_CRC2
;
2349 pkt
->entry_count
= 1;
2350 pkt
->vp_index
= vha
->vp_idx
;
2352 h
= qlt_make_handle(vha
);
2353 if (unlikely(h
== QLA_TGT_NULL_HANDLE
)) {
2355 * CTIO type 7 from the firmware doesn't provide a way to
2356 * know the initiator's LOOP ID, hence we can't find
2357 * the session and, so, the command.
2361 ha
->tgt
.cmds
[h
-1] = prm
->cmd
;
2364 pkt
->handle
= h
| CTIO_COMPLETION_HANDLE_MARK
;
2365 pkt
->nport_handle
= prm
->cmd
->loop_id
;
2366 pkt
->timeout
= cpu_to_le16(QLA_TGT_TIMEOUT
);
2367 pkt
->initiator_id
[0] = atio
->u
.isp24
.fcp_hdr
.s_id
[2];
2368 pkt
->initiator_id
[1] = atio
->u
.isp24
.fcp_hdr
.s_id
[1];
2369 pkt
->initiator_id
[2] = atio
->u
.isp24
.fcp_hdr
.s_id
[0];
2370 pkt
->exchange_addr
= atio
->u
.isp24
.exchange_addr
;
2372 /* silence compile warning */
2373 t16
= be16_to_cpu(atio
->u
.isp24
.fcp_hdr
.ox_id
);
2374 pkt
->ox_id
= cpu_to_le16(t16
);
2376 t16
= (atio
->u
.isp24
.attr
<< 9);
2377 pkt
->flags
|= cpu_to_le16(t16
);
2378 pkt
->relative_offset
= cpu_to_le32(prm
->cmd
->offset
);
2380 /* Set transfer direction */
2381 if (cmd
->dma_data_direction
== DMA_TO_DEVICE
)
2382 pkt
->flags
= cpu_to_le16(CTIO7_FLAGS_DATA_IN
);
2383 else if (cmd
->dma_data_direction
== DMA_FROM_DEVICE
)
2384 pkt
->flags
= cpu_to_le16(CTIO7_FLAGS_DATA_OUT
);
2387 pkt
->dseg_count
= prm
->tot_dsds
;
2388 /* Fibre channel byte count */
2389 pkt
->transfer_length
= cpu_to_le32(transfer_length
);
2392 /* ----- CRC context -------- */
2394 /* Allocate CRC context from global pool */
2395 crc_ctx_pkt
= cmd
->ctx
=
2396 dma_pool_alloc(ha
->dl_dma_pool
, GFP_ATOMIC
, &crc_ctx_dma
);
2399 goto crc_queuing_error
;
2401 /* Zero out CTX area. */
2402 clr_ptr
= (uint8_t *)crc_ctx_pkt
;
2403 memset(clr_ptr
, 0, sizeof(*crc_ctx_pkt
));
2405 crc_ctx_pkt
->crc_ctx_dma
= crc_ctx_dma
;
2406 INIT_LIST_HEAD(&crc_ctx_pkt
->dsd_list
);
2409 crc_ctx_pkt
->handle
= pkt
->handle
;
2411 qlt_set_t10dif_tags(se_cmd
, crc_ctx_pkt
);
2413 pkt
->crc_context_address
[0] = cpu_to_le32(LSD(crc_ctx_dma
));
2414 pkt
->crc_context_address
[1] = cpu_to_le32(MSD(crc_ctx_dma
));
2415 pkt
->crc_context_len
= CRC_CONTEXT_LEN_FW
;
2419 cur_dsd
= (uint32_t *) &crc_ctx_pkt
->u
.nobundling
.data_address
;
2422 * Configure Bundling if we need to fetch interlaving
2423 * protection PCI accesses
2425 fw_prot_opts
|= PO_ENABLE_DIF_BUNDLING
;
2426 crc_ctx_pkt
->u
.bundling
.dif_byte_count
= cpu_to_le32(dif_bytes
);
2427 crc_ctx_pkt
->u
.bundling
.dseg_count
=
2428 cpu_to_le16(prm
->tot_dsds
- prm
->prot_seg_cnt
);
2429 cur_dsd
= (uint32_t *) &crc_ctx_pkt
->u
.bundling
.data_address
;
2432 /* Finish the common fields of CRC pkt */
2433 crc_ctx_pkt
->blk_size
= cpu_to_le16(cmd
->blk_sz
);
2434 crc_ctx_pkt
->prot_opts
= cpu_to_le16(fw_prot_opts
);
2435 crc_ctx_pkt
->byte_count
= cpu_to_le32(data_bytes
);
2436 crc_ctx_pkt
->guard_seed
= cpu_to_le16(0);
2439 /* Walks data segments */
2440 pkt
->flags
|= cpu_to_le16(CTIO7_FLAGS_DSD_PTR
);
2442 if (!bundling
&& prm
->prot_seg_cnt
) {
2443 if (qla24xx_walk_and_build_sglist_no_difb(ha
, NULL
, cur_dsd
,
2444 prm
->tot_dsds
, cmd
))
2445 goto crc_queuing_error
;
2446 } else if (qla24xx_walk_and_build_sglist(ha
, NULL
, cur_dsd
,
2447 (prm
->tot_dsds
- prm
->prot_seg_cnt
), cmd
))
2448 goto crc_queuing_error
;
2450 if (bundling
&& prm
->prot_seg_cnt
) {
2451 /* Walks dif segments */
2452 pkt
->add_flags
|= CTIO_CRC2_AF_DIF_DSD_ENA
;
2454 cur_dsd
= (uint32_t *) &crc_ctx_pkt
->u
.bundling
.dif_address
;
2455 if (qla24xx_walk_and_build_prot_sglist(ha
, NULL
, cur_dsd
,
2456 prm
->prot_seg_cnt
, cmd
))
2457 goto crc_queuing_error
;
2462 /* Cleanup will be performed by the caller */
2464 return QLA_FUNCTION_FAILED
;
2469 * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and *
2470 * QLA_TGT_XMIT_STATUS for >= 24xx silicon
2472 int qlt_xmit_response(struct qla_tgt_cmd
*cmd
, int xmit_type
,
2473 uint8_t scsi_status
)
2475 struct scsi_qla_host
*vha
= cmd
->vha
;
2476 struct qla_hw_data
*ha
= vha
->hw
;
2477 struct ctio7_to_24xx
*pkt
;
2478 struct qla_tgt_prm prm
;
2479 uint32_t full_req_cnt
= 0;
2480 unsigned long flags
= 0;
2483 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
2484 if (cmd
->sess
&& cmd
->sess
->deleted
== QLA_SESS_DELETION_IN_PROGRESS
) {
2485 cmd
->state
= QLA_TGT_STATE_PROCESSED
;
2486 if (cmd
->sess
->logout_completed
)
2487 /* no need to terminate. FW already freed exchange. */
2488 qlt_abort_cmd_on_host_reset(cmd
->vha
, cmd
);
2490 qlt_send_term_exchange(vha
, cmd
, &cmd
->atio
, 1);
2491 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2494 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2496 memset(&prm
, 0, sizeof(prm
));
2497 qlt_check_srr_debug(cmd
, &xmit_type
);
2499 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe018,
2500 "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p]\n",
2501 (xmit_type
& QLA_TGT_XMIT_STATUS
) ?
2502 1 : 0, cmd
->bufflen
, cmd
->sg_cnt
, cmd
->dma_data_direction
,
2505 res
= qlt_pre_xmit_response(cmd
, &prm
, xmit_type
, scsi_status
,
2507 if (unlikely(res
!= 0)) {
2511 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
2513 if (qla2x00_reset_active(vha
) || cmd
->reset_count
!= ha
->chip_reset
) {
2515 * Either a chip reset is active or this request was from
2516 * previous life, just abort the processing.
2518 cmd
->state
= QLA_TGT_STATE_PROCESSED
;
2519 qlt_abort_cmd_on_host_reset(cmd
->vha
, cmd
);
2520 ql_dbg(ql_dbg_async
, vha
, 0xe101,
2521 "RESET-RSP active/old-count/new-count = %d/%d/%d.\n",
2522 qla2x00_reset_active(vha
), cmd
->reset_count
,
2524 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2528 /* Does F/W have an IOCBs for this request */
2529 res
= qlt_check_reserve_free_req(vha
, full_req_cnt
);
2531 goto out_unmap_unlock
;
2533 if (cmd
->se_cmd
.prot_op
&& (xmit_type
& QLA_TGT_XMIT_DATA
))
2534 res
= qlt_build_ctio_crc2_pkt(&prm
, vha
);
2536 res
= qlt_24xx_build_ctio_pkt(&prm
, vha
);
2537 if (unlikely(res
!= 0)) {
2538 vha
->req
->cnt
+= full_req_cnt
;
2539 goto out_unmap_unlock
;
2542 pkt
= (struct ctio7_to_24xx
*)prm
.pkt
;
2544 if (qlt_has_data(cmd
) && (xmit_type
& QLA_TGT_XMIT_DATA
)) {
2545 pkt
->u
.status0
.flags
|=
2546 cpu_to_le16(CTIO7_FLAGS_DATA_IN
|
2547 CTIO7_FLAGS_STATUS_MODE_0
);
2549 if (cmd
->se_cmd
.prot_op
== TARGET_PROT_NORMAL
)
2550 qlt_load_data_segments(&prm
, vha
);
2552 if (prm
.add_status_pkt
== 0) {
2553 if (xmit_type
& QLA_TGT_XMIT_STATUS
) {
2554 pkt
->u
.status0
.scsi_status
=
2555 cpu_to_le16(prm
.rq_result
);
2556 pkt
->u
.status0
.residual
=
2557 cpu_to_le32(prm
.residual
);
2558 pkt
->u
.status0
.flags
|= cpu_to_le16(
2559 CTIO7_FLAGS_SEND_STATUS
);
2560 if (qlt_need_explicit_conf(ha
, cmd
, 0)) {
2561 pkt
->u
.status0
.flags
|=
2563 CTIO7_FLAGS_EXPLICIT_CONFORM
|
2564 CTIO7_FLAGS_CONFORM_REQ
);
2570 * We have already made sure that there is sufficient
2571 * amount of request entries to not drop HW lock in
2574 struct ctio7_to_24xx
*ctio
=
2575 (struct ctio7_to_24xx
*)qlt_get_req_pkt(vha
);
2577 ql_dbg(ql_dbg_io
, vha
, 0x305e,
2578 "Building additional status packet 0x%p.\n",
2582 * T10Dif: ctio_crc2_to_fw overlay ontop of
2585 memcpy(ctio
, pkt
, sizeof(*ctio
));
2586 /* reset back to CTIO7 */
2587 ctio
->entry_count
= 1;
2588 ctio
->entry_type
= CTIO_TYPE7
;
2589 ctio
->dseg_count
= 0;
2590 ctio
->u
.status1
.flags
&= ~cpu_to_le16(
2591 CTIO7_FLAGS_DATA_IN
);
2593 /* Real finish is ctio_m1's finish */
2594 pkt
->handle
|= CTIO_INTERMEDIATE_HANDLE_MARK
;
2595 pkt
->u
.status0
.flags
|= cpu_to_le16(
2596 CTIO7_FLAGS_DONT_RET_CTIO
);
2598 /* qlt_24xx_init_ctio_to_isp will correct
2599 * all neccessary fields that's part of CTIO7.
2600 * There should be no residual of CTIO-CRC2 data.
2602 qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx
*)ctio
,
2604 pr_debug("Status CTIO7: %p\n", ctio
);
2607 qlt_24xx_init_ctio_to_isp(pkt
, &prm
);
2610 cmd
->state
= QLA_TGT_STATE_PROCESSED
; /* Mid-level is done processing */
2611 cmd
->cmd_sent_to_fw
= 1;
2613 /* Memory Barrier */
2615 qla2x00_start_iocbs(vha
, vha
->req
);
2616 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2621 qlt_unmap_sg(vha
, cmd
);
2622 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2626 EXPORT_SYMBOL(qlt_xmit_response
);
2628 int qlt_rdy_to_xfer(struct qla_tgt_cmd
*cmd
)
2630 struct ctio7_to_24xx
*pkt
;
2631 struct scsi_qla_host
*vha
= cmd
->vha
;
2632 struct qla_hw_data
*ha
= vha
->hw
;
2633 struct qla_tgt
*tgt
= cmd
->tgt
;
2634 struct qla_tgt_prm prm
;
2635 unsigned long flags
;
2638 memset(&prm
, 0, sizeof(prm
));
2644 /* Send marker if required */
2645 if (qlt_issue_marker(vha
, 0) != QLA_SUCCESS
)
2648 /* Calculate number of entries and segments required */
2649 if (qlt_pci_map_calc_cnt(&prm
) != 0)
2652 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
2654 if (qla2x00_reset_active(vha
) || (cmd
->reset_count
!= ha
->chip_reset
) ||
2655 (cmd
->sess
&& cmd
->sess
->deleted
== QLA_SESS_DELETION_IN_PROGRESS
)) {
2657 * Either a chip reset is active or this request was from
2658 * previous life, just abort the processing.
2660 cmd
->state
= QLA_TGT_STATE_NEED_DATA
;
2661 qlt_abort_cmd_on_host_reset(cmd
->vha
, cmd
);
2662 ql_dbg(ql_dbg_async
, vha
, 0xe102,
2663 "RESET-XFR active/old-count/new-count = %d/%d/%d.\n",
2664 qla2x00_reset_active(vha
), cmd
->reset_count
,
2666 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2670 /* Does F/W have an IOCBs for this request */
2671 res
= qlt_check_reserve_free_req(vha
, prm
.req_cnt
);
2673 goto out_unlock_free_unmap
;
2674 if (cmd
->se_cmd
.prot_op
)
2675 res
= qlt_build_ctio_crc2_pkt(&prm
, vha
);
2677 res
= qlt_24xx_build_ctio_pkt(&prm
, vha
);
2679 if (unlikely(res
!= 0)) {
2680 vha
->req
->cnt
+= prm
.req_cnt
;
2681 goto out_unlock_free_unmap
;
2684 pkt
= (struct ctio7_to_24xx
*)prm
.pkt
;
2685 pkt
->u
.status0
.flags
|= cpu_to_le16(CTIO7_FLAGS_DATA_OUT
|
2686 CTIO7_FLAGS_STATUS_MODE_0
);
2688 if (cmd
->se_cmd
.prot_op
== TARGET_PROT_NORMAL
)
2689 qlt_load_data_segments(&prm
, vha
);
2691 cmd
->state
= QLA_TGT_STATE_NEED_DATA
;
2692 cmd
->cmd_sent_to_fw
= 1;
2694 /* Memory Barrier */
2696 qla2x00_start_iocbs(vha
, vha
->req
);
2697 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2701 out_unlock_free_unmap
:
2702 qlt_unmap_sg(vha
, cmd
);
2703 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2707 EXPORT_SYMBOL(qlt_rdy_to_xfer
);
2711 * Checks the guard or meta-data for the type of error
2712 * detected by the HBA.
2715 qlt_handle_dif_error(struct scsi_qla_host
*vha
, struct qla_tgt_cmd
*cmd
,
2716 struct ctio_crc_from_fw
*sts
)
2718 uint8_t *ap
= &sts
->actual_dif
[0];
2719 uint8_t *ep
= &sts
->expected_dif
[0];
2720 uint32_t e_ref_tag
, a_ref_tag
;
2721 uint16_t e_app_tag
, a_app_tag
;
2722 uint16_t e_guard
, a_guard
;
2723 uint64_t lba
= cmd
->se_cmd
.t_task_lba
;
2725 a_guard
= be16_to_cpu(*(uint16_t *)(ap
+ 0));
2726 a_app_tag
= be16_to_cpu(*(uint16_t *)(ap
+ 2));
2727 a_ref_tag
= be32_to_cpu(*(uint32_t *)(ap
+ 4));
2729 e_guard
= be16_to_cpu(*(uint16_t *)(ep
+ 0));
2730 e_app_tag
= be16_to_cpu(*(uint16_t *)(ep
+ 2));
2731 e_ref_tag
= be32_to_cpu(*(uint32_t *)(ep
+ 4));
2733 ql_dbg(ql_dbg_tgt
, vha
, 0xe075,
2734 "iocb(s) %p Returned STATUS.\n", sts
);
2736 ql_dbg(ql_dbg_tgt
, vha
, 0xf075,
2737 "dif check TGT cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x]\n",
2738 cmd
->atio
.u
.isp24
.fcp_cmnd
.cdb
[0], lba
,
2739 a_ref_tag
, e_ref_tag
, a_app_tag
, e_app_tag
, a_guard
, e_guard
);
2743 * For type 3: ref & app tag is all 'f's
2744 * For type 0,1,2: app tag is all 'f's
2746 if ((a_app_tag
== 0xffff) &&
2747 ((cmd
->se_cmd
.prot_type
!= TARGET_DIF_TYPE3_PROT
) ||
2748 (a_ref_tag
== 0xffffffff))) {
2749 uint32_t blocks_done
;
2751 /* 2TB boundary case covered automatically with this */
2752 blocks_done
= e_ref_tag
- (uint32_t)lba
+ 1;
2753 cmd
->se_cmd
.bad_sector
= e_ref_tag
;
2754 cmd
->se_cmd
.pi_err
= 0;
2755 ql_dbg(ql_dbg_tgt
, vha
, 0xf074,
2756 "need to return scsi good\n");
2758 /* Update protection tag */
2759 if (cmd
->prot_sg_cnt
) {
2760 uint32_t i
, k
= 0, num_ent
;
2761 struct scatterlist
*sg
, *sgl
;
2766 /* Patch the corresponding protection tags */
2767 for_each_sg(sgl
, sg
, cmd
->prot_sg_cnt
, i
) {
2768 num_ent
= sg_dma_len(sg
) / 8;
2769 if (k
+ num_ent
< blocks_done
) {
2777 if (k
!= blocks_done
) {
2778 ql_log(ql_log_warn
, vha
, 0xf076,
2779 "unexpected tag values tag:lba=%u:%llu)\n",
2780 e_ref_tag
, (unsigned long long)lba
);
2785 struct sd_dif_tuple
*spt
;
2787 * This section came from initiator. Is it valid here?
2788 * should ulp be override with actual val???
2790 spt
= page_address(sg_page(sg
)) + sg
->offset
;
2793 spt
->app_tag
= 0xffff;
2794 if (cmd
->se_cmd
.prot_type
== SCSI_PROT_DIF_TYPE3
)
2795 spt
->ref_tag
= 0xffffffff;
2803 if (e_guard
!= a_guard
) {
2804 cmd
->se_cmd
.pi_err
= TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED
;
2805 cmd
->se_cmd
.bad_sector
= cmd
->se_cmd
.t_task_lba
;
2807 ql_log(ql_log_warn
, vha
, 0xe076,
2808 "Guard ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
2809 cmd
->atio
.u
.isp24
.fcp_cmnd
.cdb
[0], lba
,
2810 a_ref_tag
, e_ref_tag
, a_app_tag
, e_app_tag
,
2811 a_guard
, e_guard
, cmd
);
2816 if (e_ref_tag
!= a_ref_tag
) {
2817 cmd
->se_cmd
.pi_err
= TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED
;
2818 cmd
->se_cmd
.bad_sector
= e_ref_tag
;
2820 ql_log(ql_log_warn
, vha
, 0xe077,
2821 "Ref Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
2822 cmd
->atio
.u
.isp24
.fcp_cmnd
.cdb
[0], lba
,
2823 a_ref_tag
, e_ref_tag
, a_app_tag
, e_app_tag
,
2824 a_guard
, e_guard
, cmd
);
2828 /* check appl tag */
2829 if (e_app_tag
!= a_app_tag
) {
2830 cmd
->se_cmd
.pi_err
= TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED
;
2831 cmd
->se_cmd
.bad_sector
= cmd
->se_cmd
.t_task_lba
;
2833 ql_log(ql_log_warn
, vha
, 0xe078,
2834 "App Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
2835 cmd
->atio
.u
.isp24
.fcp_cmnd
.cdb
[0], lba
,
2836 a_ref_tag
, e_ref_tag
, a_app_tag
, e_app_tag
,
2837 a_guard
, e_guard
, cmd
);
2845 /* If hardware_lock held on entry, might drop it, then reaquire */
2846 /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
2847 static int __qlt_send_term_imm_notif(struct scsi_qla_host
*vha
,
2848 struct imm_ntfy_from_isp
*ntfy
)
2850 struct nack_to_isp
*nack
;
2851 struct qla_hw_data
*ha
= vha
->hw
;
2855 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0xe01c,
2856 "Sending TERM ELS CTIO (ha=%p)\n", ha
);
2858 pkt
= (request_t
*)qla2x00_alloc_iocbs_ready(vha
, NULL
);
2860 ql_dbg(ql_dbg_tgt
, vha
, 0xe080,
2861 "qla_target(%d): %s failed: unable to allocate "
2862 "request packet\n", vha
->vp_idx
, __func__
);
2866 pkt
->entry_type
= NOTIFY_ACK_TYPE
;
2867 pkt
->entry_count
= 1;
2868 pkt
->handle
= QLA_TGT_SKIP_HANDLE
| CTIO_COMPLETION_HANDLE_MARK
;
2870 nack
= (struct nack_to_isp
*)pkt
;
2871 nack
->ox_id
= ntfy
->ox_id
;
2873 nack
->u
.isp24
.nport_handle
= ntfy
->u
.isp24
.nport_handle
;
2874 if (le16_to_cpu(ntfy
->u
.isp24
.status
) == IMM_NTFY_ELS
) {
2875 nack
->u
.isp24
.flags
= ntfy
->u
.isp24
.flags
&
2876 __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB
);
2880 nack
->u
.isp24
.flags
|=
2881 __constant_cpu_to_le16(NOTIFY_ACK_FLAGS_TERMINATE
);
2883 nack
->u
.isp24
.srr_rx_id
= ntfy
->u
.isp24
.srr_rx_id
;
2884 nack
->u
.isp24
.status
= ntfy
->u
.isp24
.status
;
2885 nack
->u
.isp24
.status_subcode
= ntfy
->u
.isp24
.status_subcode
;
2886 nack
->u
.isp24
.fw_handle
= ntfy
->u
.isp24
.fw_handle
;
2887 nack
->u
.isp24
.exchange_address
= ntfy
->u
.isp24
.exchange_address
;
2888 nack
->u
.isp24
.srr_rel_offs
= ntfy
->u
.isp24
.srr_rel_offs
;
2889 nack
->u
.isp24
.srr_ui
= ntfy
->u
.isp24
.srr_ui
;
2890 nack
->u
.isp24
.vp_index
= ntfy
->u
.isp24
.vp_index
;
2892 qla2x00_start_iocbs(vha
, vha
->req
);
2896 static void qlt_send_term_imm_notif(struct scsi_qla_host
*vha
,
2897 struct imm_ntfy_from_isp
*imm
, int ha_locked
)
2899 unsigned long flags
= 0;
2902 if (qlt_issue_marker(vha
, ha_locked
) < 0)
2906 rc
= __qlt_send_term_imm_notif(vha
, imm
);
2910 qlt_alloc_qfull_cmd(vha
, imm
, 0, 0);
2915 spin_lock_irqsave(&vha
->hw
->hardware_lock
, flags
);
2916 rc
= __qlt_send_term_imm_notif(vha
, imm
);
2920 qlt_alloc_qfull_cmd(vha
, imm
, 0, 0);
2925 spin_unlock_irqrestore(&vha
->hw
->hardware_lock
, flags
);
2928 /* If hardware_lock held on entry, might drop it, then reaquire */
2929 /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
2930 static int __qlt_send_term_exchange(struct scsi_qla_host
*vha
,
2931 struct qla_tgt_cmd
*cmd
,
2932 struct atio_from_isp
*atio
)
2934 struct ctio7_to_24xx
*ctio24
;
2935 struct qla_hw_data
*ha
= vha
->hw
;
2940 ql_dbg(ql_dbg_tgt
, vha
, 0xe01c, "Sending TERM EXCH CTIO (ha=%p)\n", ha
);
2942 pkt
= (request_t
*)qla2x00_alloc_iocbs_ready(vha
, NULL
);
2944 ql_dbg(ql_dbg_tgt
, vha
, 0xe050,
2945 "qla_target(%d): %s failed: unable to allocate "
2946 "request packet\n", vha
->vp_idx
, __func__
);
2951 if (cmd
->state
< QLA_TGT_STATE_PROCESSED
) {
2952 ql_dbg(ql_dbg_tgt
, vha
, 0xe051,
2953 "qla_target(%d): Terminating cmd %p with "
2954 "incorrect state %d\n", vha
->vp_idx
, cmd
,
2960 pkt
->entry_count
= 1;
2961 pkt
->handle
= QLA_TGT_SKIP_HANDLE
| CTIO_COMPLETION_HANDLE_MARK
;
2963 ctio24
= (struct ctio7_to_24xx
*)pkt
;
2964 ctio24
->entry_type
= CTIO_TYPE7
;
2965 ctio24
->nport_handle
= cmd
? cmd
->loop_id
: CTIO7_NHANDLE_UNRECOGNIZED
;
2966 ctio24
->timeout
= cpu_to_le16(QLA_TGT_TIMEOUT
);
2967 ctio24
->vp_index
= vha
->vp_idx
;
2968 ctio24
->initiator_id
[0] = atio
->u
.isp24
.fcp_hdr
.s_id
[2];
2969 ctio24
->initiator_id
[1] = atio
->u
.isp24
.fcp_hdr
.s_id
[1];
2970 ctio24
->initiator_id
[2] = atio
->u
.isp24
.fcp_hdr
.s_id
[0];
2971 ctio24
->exchange_addr
= atio
->u
.isp24
.exchange_addr
;
2972 ctio24
->u
.status1
.flags
= (atio
->u
.isp24
.attr
<< 9) |
2973 cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1
|
2974 CTIO7_FLAGS_TERMINATE
);
2975 temp
= be16_to_cpu(atio
->u
.isp24
.fcp_hdr
.ox_id
);
2976 ctio24
->u
.status1
.ox_id
= cpu_to_le16(temp
);
2978 /* Most likely, it isn't needed */
2979 ctio24
->u
.status1
.residual
= get_unaligned((uint32_t *)
2980 &atio
->u
.isp24
.fcp_cmnd
.add_cdb
[
2981 atio
->u
.isp24
.fcp_cmnd
.add_cdb_len
]);
2982 if (ctio24
->u
.status1
.residual
!= 0)
2983 ctio24
->u
.status1
.scsi_status
|= SS_RESIDUAL_UNDER
;
2985 /* Memory Barrier */
2987 qla2x00_start_iocbs(vha
, vha
->req
);
2991 static void qlt_send_term_exchange(struct scsi_qla_host
*vha
,
2992 struct qla_tgt_cmd
*cmd
, struct atio_from_isp
*atio
, int ha_locked
)
2994 unsigned long flags
= 0;
2997 if (qlt_issue_marker(vha
, ha_locked
) < 0)
3001 rc
= __qlt_send_term_exchange(vha
, cmd
, atio
);
3003 qlt_alloc_qfull_cmd(vha
, atio
, 0, 0);
3006 spin_lock_irqsave(&vha
->hw
->hardware_lock
, flags
);
3007 rc
= __qlt_send_term_exchange(vha
, cmd
, atio
);
3009 qlt_alloc_qfull_cmd(vha
, atio
, 0, 0);
3012 if (cmd
&& ((cmd
->state
!= QLA_TGT_STATE_ABORTED
) ||
3013 !cmd
->cmd_sent_to_fw
)) {
3015 qlt_unmap_sg(vha
, cmd
);
3016 vha
->hw
->tgt
.tgt_ops
->free_cmd(cmd
);
3020 spin_unlock_irqrestore(&vha
->hw
->hardware_lock
, flags
);
3025 static void qlt_init_term_exchange(struct scsi_qla_host
*vha
)
3027 struct list_head free_list
;
3028 struct qla_tgt_cmd
*cmd
, *tcmd
;
3030 vha
->hw
->tgt
.leak_exchg_thresh_hold
=
3031 (vha
->hw
->fw_xcb_count
/100) * LEAK_EXCHG_THRESH_HOLD_PERCENT
;
3034 if (!list_empty(&vha
->hw
->tgt
.q_full_list
)) {
3035 INIT_LIST_HEAD(&free_list
);
3036 list_splice_init(&vha
->hw
->tgt
.q_full_list
, &free_list
);
3038 list_for_each_entry_safe(cmd
, tcmd
, &free_list
, cmd_list
) {
3039 list_del(&cmd
->cmd_list
);
3040 /* This cmd was never sent to TCM. There is no need
3041 * to schedule free or call free_cmd
3044 vha
->hw
->tgt
.num_qfull_cmds_alloc
--;
3047 vha
->hw
->tgt
.num_qfull_cmds_dropped
= 0;
3050 static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host
*vha
)
3052 uint32_t total_leaked
;
3054 total_leaked
= vha
->hw
->tgt
.num_qfull_cmds_dropped
;
3056 if (vha
->hw
->tgt
.leak_exchg_thresh_hold
&&
3057 (total_leaked
> vha
->hw
->tgt
.leak_exchg_thresh_hold
)) {
3059 ql_dbg(ql_dbg_tgt
, vha
, 0xe079,
3060 "Chip reset due to exchange starvation: %d/%d.\n",
3061 total_leaked
, vha
->hw
->fw_xcb_count
);
3063 if (IS_P3P_TYPE(vha
->hw
))
3064 set_bit(FCOE_CTX_RESET_NEEDED
, &vha
->dpc_flags
);
3066 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
3067 qla2xxx_wake_dpc(vha
);
3072 void qlt_abort_cmd(struct qla_tgt_cmd
*cmd
)
3074 struct qla_tgt
*tgt
= cmd
->tgt
;
3075 struct scsi_qla_host
*vha
= tgt
->vha
;
3076 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
3078 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf014,
3079 "qla_target(%d): terminating exchange for aborted cmd=%p "
3080 "(se_cmd=%p, tag=%llu)", vha
->vp_idx
, cmd
, &cmd
->se_cmd
,
3083 cmd
->state
= QLA_TGT_STATE_ABORTED
;
3084 cmd
->cmd_flags
|= BIT_6
;
3086 qlt_send_term_exchange(vha
, cmd
, &cmd
->atio
, 0);
3088 EXPORT_SYMBOL(qlt_abort_cmd
);
3090 void qlt_free_cmd(struct qla_tgt_cmd
*cmd
)
3092 struct qla_tgt_sess
*sess
= cmd
->sess
;
3094 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe074,
3095 "%s: se_cmd[%p] ox_id %04x\n",
3096 __func__
, &cmd
->se_cmd
,
3097 be16_to_cpu(cmd
->atio
.u
.isp24
.fcp_hdr
.ox_id
));
3099 BUG_ON(cmd
->cmd_in_wq
);
3102 qlt_decr_num_pend_cmds(cmd
->vha
);
3104 BUG_ON(cmd
->sg_mapped
);
3105 cmd
->jiffies_at_free
= get_jiffies_64();
3106 if (unlikely(cmd
->free_sg
))
3109 if (!sess
|| !sess
->se_sess
) {
3113 cmd
->jiffies_at_free
= get_jiffies_64();
3114 percpu_ida_free(&sess
->se_sess
->sess_tag_pool
, cmd
->se_cmd
.map_tag
);
3116 EXPORT_SYMBOL(qlt_free_cmd
);
3118 /* ha->hardware_lock supposed to be held on entry */
3119 static int qlt_prepare_srr_ctio(struct scsi_qla_host
*vha
,
3120 struct qla_tgt_cmd
*cmd
, void *ctio
)
3122 struct qla_tgt_srr_ctio
*sc
;
3123 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
3124 struct qla_tgt_srr_imm
*imm
;
3127 cmd
->cmd_flags
|= BIT_15
;
3129 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf019,
3130 "qla_target(%d): CTIO with SRR status received\n", vha
->vp_idx
);
3133 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf055,
3134 "qla_target(%d): SRR CTIO, but ctio is NULL\n",
3139 sc
= kzalloc(sizeof(*sc
), GFP_ATOMIC
);
3142 /* IRQ is already OFF */
3143 spin_lock(&tgt
->srr_lock
);
3144 sc
->srr_id
= tgt
->ctio_srr_id
;
3145 list_add_tail(&sc
->srr_list_entry
,
3146 &tgt
->srr_ctio_list
);
3147 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf01a,
3148 "CTIO SRR %p added (id %d)\n", sc
, sc
->srr_id
);
3149 if (tgt
->imm_srr_id
== tgt
->ctio_srr_id
) {
3151 list_for_each_entry(imm
, &tgt
->srr_imm_list
,
3153 if (imm
->srr_id
== sc
->srr_id
) {
3159 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf01b,
3160 "Scheduling srr work\n");
3161 schedule_work(&tgt
->srr_work
);
3163 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf056,
3164 "qla_target(%d): imm_srr_id "
3165 "== ctio_srr_id (%d), but there is no "
3166 "corresponding SRR IMM, deleting CTIO "
3167 "SRR %p\n", vha
->vp_idx
,
3168 tgt
->ctio_srr_id
, sc
);
3169 list_del(&sc
->srr_list_entry
);
3170 spin_unlock(&tgt
->srr_lock
);
3176 spin_unlock(&tgt
->srr_lock
);
3178 struct qla_tgt_srr_imm
*ti
;
3180 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf057,
3181 "qla_target(%d): Unable to allocate SRR CTIO entry\n",
3183 spin_lock(&tgt
->srr_lock
);
3184 list_for_each_entry_safe(imm
, ti
, &tgt
->srr_imm_list
,
3186 if (imm
->srr_id
== tgt
->ctio_srr_id
) {
3187 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf01c,
3188 "IMM SRR %p deleted (id %d)\n",
3190 list_del(&imm
->srr_list_entry
);
3191 qlt_reject_free_srr_imm(vha
, imm
, 1);
3194 spin_unlock(&tgt
->srr_lock
);
3203 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3205 static int qlt_term_ctio_exchange(struct scsi_qla_host
*vha
, void *ctio
,
3206 struct qla_tgt_cmd
*cmd
, uint32_t status
)
3211 struct ctio7_from_24xx
*c
= (struct ctio7_from_24xx
*)ctio
;
3213 cpu_to_le16(OF_TERM_EXCH
));
3218 qlt_send_term_exchange(vha
, cmd
, &cmd
->atio
, 1);
3223 /* ha->hardware_lock supposed to be held on entry */
3224 static inline struct qla_tgt_cmd
*qlt_get_cmd(struct scsi_qla_host
*vha
,
3227 struct qla_hw_data
*ha
= vha
->hw
;
3230 if (ha
->tgt
.cmds
[handle
] != NULL
) {
3231 struct qla_tgt_cmd
*cmd
= ha
->tgt
.cmds
[handle
];
3232 ha
->tgt
.cmds
[handle
] = NULL
;
3238 /* ha->hardware_lock supposed to be held on entry */
3239 static struct qla_tgt_cmd
*qlt_ctio_to_cmd(struct scsi_qla_host
*vha
,
3240 uint32_t handle
, void *ctio
)
3242 struct qla_tgt_cmd
*cmd
= NULL
;
3244 /* Clear out internal marks */
3245 handle
&= ~(CTIO_COMPLETION_HANDLE_MARK
|
3246 CTIO_INTERMEDIATE_HANDLE_MARK
);
3248 if (handle
!= QLA_TGT_NULL_HANDLE
) {
3249 if (unlikely(handle
== QLA_TGT_SKIP_HANDLE
))
3252 /* handle-1 is actually used */
3253 if (unlikely(handle
> DEFAULT_OUTSTANDING_COMMANDS
)) {
3254 ql_dbg(ql_dbg_tgt
, vha
, 0xe052,
3255 "qla_target(%d): Wrong handle %x received\n",
3256 vha
->vp_idx
, handle
);
3259 cmd
= qlt_get_cmd(vha
, handle
);
3260 if (unlikely(cmd
== NULL
)) {
3261 ql_dbg(ql_dbg_tgt
, vha
, 0xe053,
3262 "qla_target(%d): Suspicious: unable to "
3263 "find the command with handle %x\n", vha
->vp_idx
,
3267 } else if (ctio
!= NULL
) {
3268 /* We can't get loop ID from CTIO7 */
3269 ql_dbg(ql_dbg_tgt
, vha
, 0xe054,
3270 "qla_target(%d): Wrong CTIO received: QLA24xx doesn't "
3271 "support NULL handles\n", vha
->vp_idx
);
3278 /* hardware_lock should be held by caller. */
3280 qlt_abort_cmd_on_host_reset(struct scsi_qla_host
*vha
, struct qla_tgt_cmd
*cmd
)
3282 struct qla_hw_data
*ha
= vha
->hw
;
3286 qlt_unmap_sg(vha
, cmd
);
3288 handle
= qlt_make_handle(vha
);
3290 /* TODO: fix debug message type and ids. */
3291 if (cmd
->state
== QLA_TGT_STATE_PROCESSED
) {
3292 ql_dbg(ql_dbg_io
, vha
, 0xff00,
3293 "HOST-ABORT: handle=%d, state=PROCESSED.\n", handle
);
3294 } else if (cmd
->state
== QLA_TGT_STATE_NEED_DATA
) {
3295 cmd
->write_data_transferred
= 0;
3296 cmd
->state
= QLA_TGT_STATE_DATA_IN
;
3298 ql_dbg(ql_dbg_io
, vha
, 0xff01,
3299 "HOST-ABORT: handle=%d, state=DATA_IN.\n", handle
);
3301 ha
->tgt
.tgt_ops
->handle_data(cmd
);
3303 } else if (cmd
->state
== QLA_TGT_STATE_ABORTED
) {
3304 ql_dbg(ql_dbg_io
, vha
, 0xff02,
3305 "HOST-ABORT: handle=%d, state=ABORTED.\n", handle
);
3307 ql_dbg(ql_dbg_io
, vha
, 0xff03,
3308 "HOST-ABORT: handle=%d, state=BAD(%d).\n", handle
,
3313 cmd
->cmd_flags
|= BIT_17
;
3314 ha
->tgt
.tgt_ops
->free_cmd(cmd
);
3318 qlt_host_reset_handler(struct qla_hw_data
*ha
)
3320 struct qla_tgt_cmd
*cmd
;
3321 unsigned long flags
;
3322 scsi_qla_host_t
*base_vha
= pci_get_drvdata(ha
->pdev
);
3323 scsi_qla_host_t
*vha
= NULL
;
3324 struct qla_tgt
*tgt
= base_vha
->vha_tgt
.qla_tgt
;
3327 if (!base_vha
->hw
->tgt
.tgt_ops
)
3330 if (!tgt
|| qla_ini_mode_enabled(base_vha
)) {
3331 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf003,
3332 "Target mode disabled\n");
3336 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xff10,
3337 "HOST-ABORT-HNDLR: base_vha->dpc_flags=%lx.\n",
3338 base_vha
->dpc_flags
);
3340 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
3341 for (i
= 1; i
< DEFAULT_OUTSTANDING_COMMANDS
+ 1; i
++) {
3342 cmd
= qlt_get_cmd(base_vha
, i
);
3345 /* ha->tgt.cmds entry is cleared by qlt_get_cmd. */
3347 qlt_abort_cmd_on_host_reset(vha
, cmd
);
3349 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
3354 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3356 static void qlt_do_ctio_completion(struct scsi_qla_host
*vha
, uint32_t handle
,
3357 uint32_t status
, void *ctio
)
3359 struct qla_hw_data
*ha
= vha
->hw
;
3360 struct se_cmd
*se_cmd
;
3361 struct qla_tgt_cmd
*cmd
;
3363 if (handle
& CTIO_INTERMEDIATE_HANDLE_MARK
) {
3364 /* That could happen only in case of an error/reset/abort */
3365 if (status
!= CTIO_SUCCESS
) {
3366 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf01d,
3367 "Intermediate CTIO received"
3368 " (status %x)\n", status
);
3373 cmd
= qlt_ctio_to_cmd(vha
, handle
, ctio
);
3377 se_cmd
= &cmd
->se_cmd
;
3378 cmd
->cmd_sent_to_fw
= 0;
3380 qlt_unmap_sg(vha
, cmd
);
3382 if (unlikely(status
!= CTIO_SUCCESS
)) {
3383 switch (status
& 0xFFFF) {
3384 case CTIO_LIP_RESET
:
3385 case CTIO_TARGET_RESET
:
3387 /* driver request abort via Terminate exchange */
3389 case CTIO_INVALID_RX_ID
:
3391 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf058,
3392 "qla_target(%d): CTIO with "
3393 "status %#x received, state %x, se_cmd %p, "
3394 "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, "
3395 "TIMEOUT=b, INVALID_RX_ID=8)\n", vha
->vp_idx
,
3396 status
, cmd
->state
, se_cmd
);
3399 case CTIO_PORT_LOGGED_OUT
:
3400 case CTIO_PORT_UNAVAILABLE
:
3401 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf059,
3402 "qla_target(%d): CTIO with PORT LOGGED "
3403 "OUT (29) or PORT UNAVAILABLE (28) status %x "
3404 "received (state %x, se_cmd %p)\n", vha
->vp_idx
,
3405 status
, cmd
->state
, se_cmd
);
3408 case CTIO_SRR_RECEIVED
:
3409 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf05a,
3410 "qla_target(%d): CTIO with SRR_RECEIVED"
3411 " status %x received (state %x, se_cmd %p)\n",
3412 vha
->vp_idx
, status
, cmd
->state
, se_cmd
);
3413 if (qlt_prepare_srr_ctio(vha
, cmd
, ctio
) != 0)
3418 case CTIO_DIF_ERROR
: {
3419 struct ctio_crc_from_fw
*crc
=
3420 (struct ctio_crc_from_fw
*)ctio
;
3421 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf073,
3422 "qla_target(%d): CTIO with DIF_ERROR status %x received (state %x, se_cmd %p) actual_dif[0x%llx] expect_dif[0x%llx]\n",
3423 vha
->vp_idx
, status
, cmd
->state
, se_cmd
,
3424 *((u64
*)&crc
->actual_dif
[0]),
3425 *((u64
*)&crc
->expected_dif
[0]));
3427 if (qlt_handle_dif_error(vha
, cmd
, ctio
)) {
3428 if (cmd
->state
== QLA_TGT_STATE_NEED_DATA
) {
3429 /* scsi Write/xfer rdy complete */
3432 /* scsi read/xmit respond complete
3433 * call handle dif to send scsi status
3434 * rather than terminate exchange.
3436 cmd
->state
= QLA_TGT_STATE_PROCESSED
;
3437 ha
->tgt
.tgt_ops
->handle_dif_err(cmd
);
3441 /* Need to generate a SCSI good completion.
3442 * because FW did not send scsi status.
3450 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf05b,
3451 "qla_target(%d): CTIO with error status 0x%x received (state %x, se_cmd %p\n",
3452 vha
->vp_idx
, status
, cmd
->state
, se_cmd
);
3457 /* "cmd->state == QLA_TGT_STATE_ABORTED" means
3458 * cmd is already aborted/terminated, we don't
3459 * need to terminate again. The exchange is already
3460 * cleaned up/freed at FW level. Just cleanup at driver
3463 if ((cmd
->state
!= QLA_TGT_STATE_NEED_DATA
) &&
3464 (cmd
->state
!= QLA_TGT_STATE_ABORTED
)) {
3465 cmd
->cmd_flags
|= BIT_13
;
3466 if (qlt_term_ctio_exchange(vha
, ctio
, cmd
, status
))
3472 if (cmd
->state
== QLA_TGT_STATE_PROCESSED
) {
3473 cmd
->cmd_flags
|= BIT_12
;
3474 } else if (cmd
->state
== QLA_TGT_STATE_NEED_DATA
) {
3475 cmd
->state
= QLA_TGT_STATE_DATA_IN
;
3477 if (status
== CTIO_SUCCESS
)
3478 cmd
->write_data_transferred
= 1;
3480 ha
->tgt
.tgt_ops
->handle_data(cmd
);
3482 } else if (cmd
->state
== QLA_TGT_STATE_ABORTED
) {
3483 cmd
->cmd_flags
|= BIT_18
;
3484 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf01e,
3485 "Aborted command %p (tag %lld) finished\n", cmd
, se_cmd
->tag
);
3487 cmd
->cmd_flags
|= BIT_19
;
3488 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf05c,
3489 "qla_target(%d): A command in state (%d) should "
3490 "not return a CTIO complete\n", vha
->vp_idx
, cmd
->state
);
3493 if (unlikely(status
!= CTIO_SUCCESS
) &&
3494 (cmd
->state
!= QLA_TGT_STATE_ABORTED
)) {
3495 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf01f, "Finishing failed CTIO\n");
3499 ha
->tgt
.tgt_ops
->free_cmd(cmd
);
3502 static inline int qlt_get_fcp_task_attr(struct scsi_qla_host
*vha
,
3507 switch (task_codes
) {
3508 case ATIO_SIMPLE_QUEUE
:
3509 fcp_task_attr
= TCM_SIMPLE_TAG
;
3511 case ATIO_HEAD_OF_QUEUE
:
3512 fcp_task_attr
= TCM_HEAD_TAG
;
3514 case ATIO_ORDERED_QUEUE
:
3515 fcp_task_attr
= TCM_ORDERED_TAG
;
3517 case ATIO_ACA_QUEUE
:
3518 fcp_task_attr
= TCM_ACA_TAG
;
3521 fcp_task_attr
= TCM_SIMPLE_TAG
;
3524 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf05d,
3525 "qla_target: unknown task code %x, use ORDERED instead\n",
3527 fcp_task_attr
= TCM_ORDERED_TAG
;
3531 return fcp_task_attr
;
3534 static struct qla_tgt_sess
*qlt_make_local_sess(struct scsi_qla_host
*,
3537 * Process context for I/O path into tcm_qla2xxx code
3539 static void __qlt_do_work(struct qla_tgt_cmd
*cmd
)
3541 scsi_qla_host_t
*vha
= cmd
->vha
;
3542 struct qla_hw_data
*ha
= vha
->hw
;
3543 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
3544 struct qla_tgt_sess
*sess
= cmd
->sess
;
3545 struct atio_from_isp
*atio
= &cmd
->atio
;
3547 unsigned long flags
;
3548 uint32_t data_length
;
3549 int ret
, fcp_task_attr
, data_dir
, bidi
= 0;
3552 cmd
->cmd_flags
|= BIT_1
;
3556 if (cmd
->state
== QLA_TGT_STATE_ABORTED
) {
3557 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf082,
3558 "cmd with tag %u is aborted\n",
3559 cmd
->atio
.u
.isp24
.exchange_addr
);
3563 cdb
= &atio
->u
.isp24
.fcp_cmnd
.cdb
[0];
3564 cmd
->se_cmd
.tag
= atio
->u
.isp24
.exchange_addr
;
3565 cmd
->unpacked_lun
= scsilun_to_int(
3566 (struct scsi_lun
*)&atio
->u
.isp24
.fcp_cmnd
.lun
);
3568 if (atio
->u
.isp24
.fcp_cmnd
.rddata
&&
3569 atio
->u
.isp24
.fcp_cmnd
.wrdata
) {
3571 data_dir
= DMA_TO_DEVICE
;
3572 } else if (atio
->u
.isp24
.fcp_cmnd
.rddata
)
3573 data_dir
= DMA_FROM_DEVICE
;
3574 else if (atio
->u
.isp24
.fcp_cmnd
.wrdata
)
3575 data_dir
= DMA_TO_DEVICE
;
3577 data_dir
= DMA_NONE
;
3579 fcp_task_attr
= qlt_get_fcp_task_attr(vha
,
3580 atio
->u
.isp24
.fcp_cmnd
.task_attr
);
3581 data_length
= be32_to_cpu(get_unaligned((uint32_t *)
3582 &atio
->u
.isp24
.fcp_cmnd
.add_cdb
[
3583 atio
->u
.isp24
.fcp_cmnd
.add_cdb_len
]));
3585 ret
= ha
->tgt
.tgt_ops
->handle_cmd(vha
, cmd
, cdb
, data_length
,
3586 fcp_task_attr
, data_dir
, bidi
);
3590 * Drop extra session reference from qla_tgt_handle_cmd_for_atio*(
3592 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
3593 ha
->tgt
.tgt_ops
->put_sess(sess
);
3594 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
3598 ql_dbg(ql_dbg_io
, vha
, 0x3060, "Terminating work cmd %p", cmd
);
3600 * cmd has not sent to target yet, so pass NULL as the second
3601 * argument to qlt_send_term_exchange() and free the memory here.
3603 cmd
->cmd_flags
|= BIT_2
;
3604 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
3605 qlt_send_term_exchange(vha
, NULL
, &cmd
->atio
, 1);
3607 qlt_decr_num_pend_cmds(vha
);
3608 percpu_ida_free(&sess
->se_sess
->sess_tag_pool
, cmd
->se_cmd
.map_tag
);
3609 ha
->tgt
.tgt_ops
->put_sess(sess
);
3610 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
3613 static void qlt_do_work(struct work_struct
*work
)
3615 struct qla_tgt_cmd
*cmd
= container_of(work
, struct qla_tgt_cmd
, work
);
3616 scsi_qla_host_t
*vha
= cmd
->vha
;
3617 unsigned long flags
;
3619 spin_lock_irqsave(&vha
->cmd_list_lock
, flags
);
3620 list_del(&cmd
->cmd_list
);
3621 spin_unlock_irqrestore(&vha
->cmd_list_lock
, flags
);
3626 static struct qla_tgt_cmd
*qlt_get_tag(scsi_qla_host_t
*vha
,
3627 struct qla_tgt_sess
*sess
,
3628 struct atio_from_isp
*atio
)
3630 struct se_session
*se_sess
= sess
->se_sess
;
3631 struct qla_tgt_cmd
*cmd
;
3634 tag
= percpu_ida_alloc(&se_sess
->sess_tag_pool
, TASK_RUNNING
);
3638 cmd
= &((struct qla_tgt_cmd
*)se_sess
->sess_cmd_map
)[tag
];
3639 memset(cmd
, 0, sizeof(struct qla_tgt_cmd
));
3641 memcpy(&cmd
->atio
, atio
, sizeof(*atio
));
3642 cmd
->state
= QLA_TGT_STATE_NEW
;
3643 cmd
->tgt
= vha
->vha_tgt
.qla_tgt
;
3644 qlt_incr_num_pend_cmds(vha
);
3646 cmd
->se_cmd
.map_tag
= tag
;
3648 cmd
->loop_id
= sess
->loop_id
;
3649 cmd
->conf_compl_supported
= sess
->conf_compl_supported
;
3652 cmd
->jiffies_at_alloc
= get_jiffies_64();
3654 cmd
->reset_count
= vha
->hw
->chip_reset
;
3659 static void qlt_send_busy(struct scsi_qla_host
*, struct atio_from_isp
*,
3662 static void qlt_create_sess_from_atio(struct work_struct
*work
)
3664 struct qla_tgt_sess_op
*op
= container_of(work
,
3665 struct qla_tgt_sess_op
, work
);
3666 scsi_qla_host_t
*vha
= op
->vha
;
3667 struct qla_hw_data
*ha
= vha
->hw
;
3668 struct qla_tgt_sess
*sess
;
3669 struct qla_tgt_cmd
*cmd
;
3670 unsigned long flags
;
3671 uint8_t *s_id
= op
->atio
.u
.isp24
.fcp_hdr
.s_id
;
3673 spin_lock_irqsave(&vha
->cmd_list_lock
, flags
);
3674 list_del(&op
->cmd_list
);
3675 spin_unlock_irqrestore(&vha
->cmd_list_lock
, flags
);
3678 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf083,
3679 "sess_op with tag %u is aborted\n",
3680 op
->atio
.u
.isp24
.exchange_addr
);
3684 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf022,
3685 "qla_target(%d): Unable to find wwn login"
3686 " (s_id %x:%x:%x), trying to create it manually\n",
3687 vha
->vp_idx
, s_id
[0], s_id
[1], s_id
[2]);
3689 if (op
->atio
.u
.raw
.entry_count
> 1) {
3690 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf023,
3691 "Dropping multy entry atio %p\n", &op
->atio
);
3695 mutex_lock(&vha
->vha_tgt
.tgt_mutex
);
3696 sess
= qlt_make_local_sess(vha
, s_id
);
3697 /* sess has an extra creation ref. */
3698 mutex_unlock(&vha
->vha_tgt
.tgt_mutex
);
3703 * Now obtain a pre-allocated session tag using the original op->atio
3704 * packet header, and dispatch into __qlt_do_work() using the existing
3707 cmd
= qlt_get_tag(vha
, sess
, &op
->atio
);
3709 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
3710 qlt_send_busy(vha
, &op
->atio
, SAM_STAT_BUSY
);
3711 ha
->tgt
.tgt_ops
->put_sess(sess
);
3712 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
3717 * __qlt_do_work() will call ha->tgt.tgt_ops->put_sess() to release
3718 * the extra reference taken above by qlt_make_local_sess()
3725 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
3726 qlt_send_term_exchange(vha
, NULL
, &op
->atio
, 1);
3727 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
3732 /* ha->hardware_lock supposed to be held on entry */
3733 static int qlt_handle_cmd_for_atio(struct scsi_qla_host
*vha
,
3734 struct atio_from_isp
*atio
)
3736 struct qla_hw_data
*ha
= vha
->hw
;
3737 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
3738 struct qla_tgt_sess
*sess
;
3739 struct qla_tgt_cmd
*cmd
;
3741 if (unlikely(tgt
->tgt_stop
)) {
3742 ql_dbg(ql_dbg_io
, vha
, 0x3061,
3743 "New command while device %p is shutting down\n", tgt
);
3747 sess
= ha
->tgt
.tgt_ops
->find_sess_by_s_id(vha
, atio
->u
.isp24
.fcp_hdr
.s_id
);
3748 if (unlikely(!sess
)) {
3749 struct qla_tgt_sess_op
*op
= kzalloc(sizeof(struct qla_tgt_sess_op
),
3754 memcpy(&op
->atio
, atio
, sizeof(*atio
));
3757 spin_lock(&vha
->cmd_list_lock
);
3758 list_add_tail(&op
->cmd_list
, &vha
->qla_sess_op_cmd_list
);
3759 spin_unlock(&vha
->cmd_list_lock
);
3761 INIT_WORK(&op
->work
, qlt_create_sess_from_atio
);
3762 queue_work(qla_tgt_wq
, &op
->work
);
3766 /* Another WWN used to have our s_id. Our PLOGI scheduled its
3767 * session deletion, but it's still in sess_del_work wq */
3768 if (sess
->deleted
== QLA_SESS_DELETION_IN_PROGRESS
) {
3769 ql_dbg(ql_dbg_io
, vha
, 0x3061,
3770 "New command while old session %p is being deleted\n",
3776 * Do kref_get() before returning + dropping qla_hw_data->hardware_lock.
3778 kref_get(&sess
->se_sess
->sess_kref
);
3780 cmd
= qlt_get_tag(vha
, sess
, atio
);
3782 ql_dbg(ql_dbg_io
, vha
, 0x3062,
3783 "qla_target(%d): Allocation of cmd failed\n", vha
->vp_idx
);
3784 ha
->tgt
.tgt_ops
->put_sess(sess
);
3789 cmd
->cmd_flags
|= BIT_0
;
3791 spin_lock(&vha
->cmd_list_lock
);
3792 list_add_tail(&cmd
->cmd_list
, &vha
->qla_cmd_list
);
3793 spin_unlock(&vha
->cmd_list_lock
);
3795 INIT_WORK(&cmd
->work
, qlt_do_work
);
3796 queue_work(qla_tgt_wq
, &cmd
->work
);
3801 /* ha->hardware_lock supposed to be held on entry */
3802 static int qlt_issue_task_mgmt(struct qla_tgt_sess
*sess
, uint32_t lun
,
3803 int fn
, void *iocb
, int flags
)
3805 struct scsi_qla_host
*vha
= sess
->vha
;
3806 struct qla_hw_data
*ha
= vha
->hw
;
3807 struct qla_tgt_mgmt_cmd
*mcmd
;
3808 struct atio_from_isp
*a
= (struct atio_from_isp
*)iocb
;
3812 mcmd
= mempool_alloc(qla_tgt_mgmt_cmd_mempool
, GFP_ATOMIC
);
3814 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x10009,
3815 "qla_target(%d): Allocation of management "
3816 "command failed, some commands and their data could "
3817 "leak\n", vha
->vp_idx
);
3820 memset(mcmd
, 0, sizeof(*mcmd
));
3824 memcpy(&mcmd
->orig_iocb
.imm_ntfy
, iocb
,
3825 sizeof(mcmd
->orig_iocb
.imm_ntfy
));
3827 mcmd
->tmr_func
= fn
;
3828 mcmd
->flags
= flags
;
3829 mcmd
->reset_count
= vha
->hw
->chip_reset
;
3832 case QLA_TGT_CLEAR_ACA
:
3833 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x10000,
3834 "qla_target(%d): CLEAR_ACA received\n", sess
->vha
->vp_idx
);
3835 tmr_func
= TMR_CLEAR_ACA
;
3838 case QLA_TGT_TARGET_RESET
:
3839 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x10001,
3840 "qla_target(%d): TARGET_RESET received\n",
3842 tmr_func
= TMR_TARGET_WARM_RESET
;
3845 case QLA_TGT_LUN_RESET
:
3846 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x10002,
3847 "qla_target(%d): LUN_RESET received\n", sess
->vha
->vp_idx
);
3848 tmr_func
= TMR_LUN_RESET
;
3849 abort_cmds_for_lun(vha
, lun
, a
->u
.isp24
.fcp_hdr
.s_id
);
3852 case QLA_TGT_CLEAR_TS
:
3853 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x10003,
3854 "qla_target(%d): CLEAR_TS received\n", sess
->vha
->vp_idx
);
3855 tmr_func
= TMR_CLEAR_TASK_SET
;
3858 case QLA_TGT_ABORT_TS
:
3859 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x10004,
3860 "qla_target(%d): ABORT_TS received\n", sess
->vha
->vp_idx
);
3861 tmr_func
= TMR_ABORT_TASK_SET
;
3864 case QLA_TGT_ABORT_ALL
:
3865 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x10005,
3866 "qla_target(%d): Doing ABORT_ALL_TASKS\n",
3871 case QLA_TGT_ABORT_ALL_SESS
:
3872 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x10006,
3873 "qla_target(%d): Doing ABORT_ALL_TASKS_SESS\n",
3878 case QLA_TGT_NEXUS_LOSS_SESS
:
3879 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x10007,
3880 "qla_target(%d): Doing NEXUS_LOSS_SESS\n",
3885 case QLA_TGT_NEXUS_LOSS
:
3886 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x10008,
3887 "qla_target(%d): Doing NEXUS_LOSS\n", sess
->vha
->vp_idx
);
3892 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x1000a,
3893 "qla_target(%d): Unknown task mgmt fn 0x%x\n",
3894 sess
->vha
->vp_idx
, fn
);
3895 mempool_free(mcmd
, qla_tgt_mgmt_cmd_mempool
);
3899 res
= ha
->tgt
.tgt_ops
->handle_tmr(mcmd
, lun
, tmr_func
, 0);
3901 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x1000b,
3902 "qla_target(%d): tgt.tgt_ops->handle_tmr() failed: %d\n",
3903 sess
->vha
->vp_idx
, res
);
3904 mempool_free(mcmd
, qla_tgt_mgmt_cmd_mempool
);
3911 /* ha->hardware_lock supposed to be held on entry */
3912 static int qlt_handle_task_mgmt(struct scsi_qla_host
*vha
, void *iocb
)
3914 struct atio_from_isp
*a
= (struct atio_from_isp
*)iocb
;
3915 struct qla_hw_data
*ha
= vha
->hw
;
3916 struct qla_tgt
*tgt
;
3917 struct qla_tgt_sess
*sess
;
3918 uint32_t lun
, unpacked_lun
;
3921 tgt
= vha
->vha_tgt
.qla_tgt
;
3923 lun
= a
->u
.isp24
.fcp_cmnd
.lun
;
3924 fn
= a
->u
.isp24
.fcp_cmnd
.task_mgmt_flags
;
3925 sess
= ha
->tgt
.tgt_ops
->find_sess_by_s_id(vha
,
3926 a
->u
.isp24
.fcp_hdr
.s_id
);
3927 unpacked_lun
= scsilun_to_int((struct scsi_lun
*)&lun
);
3930 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf024,
3931 "qla_target(%d): task mgmt fn 0x%x for "
3932 "non-existant session\n", vha
->vp_idx
, fn
);
3933 return qlt_sched_sess_work(tgt
, QLA_TGT_SESS_WORK_TM
, iocb
,
3934 sizeof(struct atio_from_isp
));
3937 if (sess
->deleted
== QLA_SESS_DELETION_IN_PROGRESS
)
3940 return qlt_issue_task_mgmt(sess
, unpacked_lun
, fn
, iocb
, 0);
3943 /* ha->hardware_lock supposed to be held on entry */
3944 static int __qlt_abort_task(struct scsi_qla_host
*vha
,
3945 struct imm_ntfy_from_isp
*iocb
, struct qla_tgt_sess
*sess
)
3947 struct atio_from_isp
*a
= (struct atio_from_isp
*)iocb
;
3948 struct qla_hw_data
*ha
= vha
->hw
;
3949 struct qla_tgt_mgmt_cmd
*mcmd
;
3950 uint32_t lun
, unpacked_lun
;
3953 mcmd
= mempool_alloc(qla_tgt_mgmt_cmd_mempool
, GFP_ATOMIC
);
3955 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf05f,
3956 "qla_target(%d): %s: Allocation of ABORT cmd failed\n",
3957 vha
->vp_idx
, __func__
);
3960 memset(mcmd
, 0, sizeof(*mcmd
));
3963 memcpy(&mcmd
->orig_iocb
.imm_ntfy
, iocb
,
3964 sizeof(mcmd
->orig_iocb
.imm_ntfy
));
3966 lun
= a
->u
.isp24
.fcp_cmnd
.lun
;
3967 unpacked_lun
= scsilun_to_int((struct scsi_lun
*)&lun
);
3968 mcmd
->reset_count
= vha
->hw
->chip_reset
;
3970 rc
= ha
->tgt
.tgt_ops
->handle_tmr(mcmd
, unpacked_lun
, TMR_ABORT_TASK
,
3971 le16_to_cpu(iocb
->u
.isp2x
.seq_id
));
3973 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf060,
3974 "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n",
3976 mempool_free(mcmd
, qla_tgt_mgmt_cmd_mempool
);
3983 /* ha->hardware_lock supposed to be held on entry */
3984 static int qlt_abort_task(struct scsi_qla_host
*vha
,
3985 struct imm_ntfy_from_isp
*iocb
)
3987 struct qla_hw_data
*ha
= vha
->hw
;
3988 struct qla_tgt_sess
*sess
;
3991 loop_id
= GET_TARGET_ID(ha
, (struct atio_from_isp
*)iocb
);
3993 sess
= ha
->tgt
.tgt_ops
->find_sess_by_loop_id(vha
, loop_id
);
3995 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf025,
3996 "qla_target(%d): task abort for unexisting "
3997 "session\n", vha
->vp_idx
);
3998 return qlt_sched_sess_work(vha
->vha_tgt
.qla_tgt
,
3999 QLA_TGT_SESS_WORK_ABORT
, iocb
, sizeof(*iocb
));
4002 return __qlt_abort_task(vha
, iocb
, sess
);
4005 void qlt_logo_completion_handler(fc_port_t
*fcport
, int rc
)
4007 if (fcport
->tgt_session
) {
4008 if (rc
!= MBS_COMMAND_COMPLETE
) {
4009 ql_dbg(ql_dbg_tgt_mgt
, fcport
->vha
, 0xf093,
4010 "%s: se_sess %p / sess %p from"
4011 " port %8phC loop_id %#04x s_id %02x:%02x:%02x"
4012 " LOGO failed: %#x\n",
4014 fcport
->tgt_session
->se_sess
,
4015 fcport
->tgt_session
,
4016 fcport
->port_name
, fcport
->loop_id
,
4017 fcport
->d_id
.b
.domain
, fcport
->d_id
.b
.area
,
4018 fcport
->d_id
.b
.al_pa
, rc
);
4021 fcport
->tgt_session
->logout_completed
= 1;
4025 static void qlt_swap_imm_ntfy_iocb(struct imm_ntfy_from_isp
*a
,
4026 struct imm_ntfy_from_isp
*b
)
4028 struct imm_ntfy_from_isp tmp
;
4029 memcpy(&tmp
, a
, sizeof(struct imm_ntfy_from_isp
));
4030 memcpy(a
, b
, sizeof(struct imm_ntfy_from_isp
));
4031 memcpy(b
, &tmp
, sizeof(struct imm_ntfy_from_isp
));
4035 * ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list)
4037 * Schedules sessions with matching port_id/loop_id but different wwn for
4038 * deletion. Returns existing session with matching wwn if present.
4041 static struct qla_tgt_sess
*
4042 qlt_find_sess_invalidate_other(struct qla_tgt
*tgt
, uint64_t wwn
,
4043 port_id_t port_id
, uint16_t loop_id
)
4045 struct qla_tgt_sess
*sess
= NULL
, *other_sess
;
4048 list_for_each_entry(other_sess
, &tgt
->sess_list
, sess_list_entry
) {
4050 other_wwn
= wwn_to_u64(other_sess
->port_name
);
4052 if (wwn
== other_wwn
) {
4058 /* find other sess with nport_id collision */
4059 if (port_id
.b24
== other_sess
->s_id
.b24
) {
4060 if (loop_id
!= other_sess
->loop_id
) {
4061 ql_dbg(ql_dbg_tgt_tmr
, tgt
->vha
, 0x1000c,
4062 "Invalidating sess %p loop_id %d wwn %llx.\n",
4063 other_sess
, other_sess
->loop_id
, other_wwn
);
4066 * logout_on_delete is set by default, but another
4067 * session that has the same s_id/loop_id combo
4068 * might have cleared it when requested this session
4069 * deletion, so don't touch it
4071 qlt_schedule_sess_for_deletion(other_sess
, true);
4074 * Another wwn used to have our s_id/loop_id
4075 * combo - kill the session, but don't log out
4077 sess
->logout_on_delete
= 0;
4078 qlt_schedule_sess_for_deletion(other_sess
,
4084 /* find other sess with nport handle collision */
4085 if (loop_id
== other_sess
->loop_id
) {
4086 ql_dbg(ql_dbg_tgt_tmr
, tgt
->vha
, 0x1000d,
4087 "Invalidating sess %p loop_id %d wwn %llx.\n",
4088 other_sess
, other_sess
->loop_id
, other_wwn
);
4090 /* Same loop_id but different s_id
4091 * Ok to kill and logout */
4092 qlt_schedule_sess_for_deletion(other_sess
, true);
4099 /* Abort any commands for this s_id waiting on qla_tgt_wq workqueue */
4100 static int abort_cmds_for_s_id(struct scsi_qla_host
*vha
, port_id_t
*s_id
)
4102 struct qla_tgt_sess_op
*op
;
4103 struct qla_tgt_cmd
*cmd
;
4107 key
= (((u32
)s_id
->b
.domain
<< 16) |
4108 ((u32
)s_id
->b
.area
<< 8) |
4109 ((u32
)s_id
->b
.al_pa
));
4111 spin_lock(&vha
->cmd_list_lock
);
4112 list_for_each_entry(op
, &vha
->qla_sess_op_cmd_list
, cmd_list
) {
4113 uint32_t op_key
= sid_to_key(op
->atio
.u
.isp24
.fcp_hdr
.s_id
);
4114 if (op_key
== key
) {
4119 list_for_each_entry(cmd
, &vha
->qla_cmd_list
, cmd_list
) {
4120 uint32_t cmd_key
= sid_to_key(cmd
->atio
.u
.isp24
.fcp_hdr
.s_id
);
4121 if (cmd_key
== key
) {
4122 cmd
->state
= QLA_TGT_STATE_ABORTED
;
4126 spin_unlock(&vha
->cmd_list_lock
);
4132 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
4134 static int qlt_24xx_handle_els(struct scsi_qla_host
*vha
,
4135 struct imm_ntfy_from_isp
*iocb
)
4137 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
4138 struct qla_hw_data
*ha
= vha
->hw
;
4139 struct qla_tgt_sess
*sess
= NULL
;
4146 wwn
= wwn_to_u64(iocb
->u
.isp24
.port_name
);
4148 port_id
.b
.domain
= iocb
->u
.isp24
.port_id
[2];
4149 port_id
.b
.area
= iocb
->u
.isp24
.port_id
[1];
4150 port_id
.b
.al_pa
= iocb
->u
.isp24
.port_id
[0];
4151 port_id
.b
.rsvd_1
= 0;
4153 loop_id
= le16_to_cpu(iocb
->u
.isp24
.nport_handle
);
4155 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf026,
4156 "qla_target(%d): Port ID: 0x%3phC ELS opcode: 0x%02x\n",
4157 vha
->vp_idx
, iocb
->u
.isp24
.port_id
, iocb
->u
.isp24
.status_subcode
);
4159 /* res = 1 means ack at the end of thread
4160 * res = 0 means ack async/later.
4162 switch (iocb
->u
.isp24
.status_subcode
) {
4165 /* Mark all stale commands in qla_tgt_wq for deletion */
4166 abort_cmds_for_s_id(vha
, &port_id
);
4169 sess
= qlt_find_sess_invalidate_other(tgt
, wwn
,
4172 if (!sess
|| IS_SW_RESV_ADDR(sess
->s_id
)) {
4177 if (sess
->plogi_ack_needed
) {
4179 * Initiator sent another PLOGI before last PLOGI could
4180 * finish. Swap plogi iocbs and terminate old one
4181 * without acking, new one will get acked when session
4182 * deletion completes.
4184 ql_log(ql_log_warn
, sess
->vha
, 0xf094,
4185 "sess %p received double plogi.\n", sess
);
4187 qlt_swap_imm_ntfy_iocb(iocb
, &sess
->tm_iocb
);
4189 qlt_send_term_imm_notif(vha
, iocb
, 1);
4198 * Save immediate Notif IOCB for Ack when sess is done
4199 * and being deleted.
4201 memcpy(&sess
->tm_iocb
, iocb
, sizeof(sess
->tm_iocb
));
4202 sess
->plogi_ack_needed
= 1;
4205 * Under normal circumstances we want to release nport handle
4206 * during LOGO process to avoid nport handle leaks inside FW.
4207 * The exception is when LOGO is done while another PLOGI with
4208 * the same nport handle is waiting as might be the case here.
4209 * Note: there is always a possibily of a race where session
4210 * deletion has already started for other reasons (e.g. ACL
4211 * removal) and now PLOGI arrives:
4212 * 1. if PLOGI arrived in FW after nport handle has been freed,
4213 * FW must have assigned this PLOGI a new/same handle and we
4214 * can proceed ACK'ing it as usual when session deletion
4216 * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT
4217 * bit reached it, the handle has now been released. We'll
4218 * get an error when we ACK this PLOGI. Nothing will be sent
4219 * back to initiator. Initiator should eventually retry
4220 * PLOGI and situation will correct itself.
4222 sess
->keep_nport_handle
= ((sess
->loop_id
== loop_id
) &&
4223 (sess
->s_id
.b24
== port_id
.b24
));
4224 qlt_schedule_sess_for_deletion(sess
, true);
4228 wd3_lo
= le16_to_cpu(iocb
->u
.isp24
.u
.prli
.wd3_lo
);
4231 sess
= qlt_find_sess_invalidate_other(tgt
, wwn
, port_id
,
4235 if (sess
->deleted
) {
4237 * Impatient initiator sent PRLI before last
4238 * PLOGI could finish. Will force him to re-try,
4239 * while last one finishes.
4241 ql_log(ql_log_warn
, sess
->vha
, 0xf095,
4242 "sess %p PRLI received, before plogi ack.\n",
4244 qlt_send_term_imm_notif(vha
, iocb
, 1);
4250 * This shouldn't happen under normal circumstances,
4251 * since we have deleted the old session during PLOGI
4253 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf096,
4254 "PRLI (loop_id %#04x) for existing sess %p (loop_id %#04x)\n",
4255 sess
->loop_id
, sess
, iocb
->u
.isp24
.nport_handle
);
4258 sess
->loop_id
= loop_id
;
4259 sess
->s_id
= port_id
;
4262 sess
->conf_compl_supported
= 1;
4265 res
= 1; /* send notify ack */
4267 /* Make session global (not used in fabric mode) */
4268 if (ha
->current_topology
!= ISP_CFG_F
) {
4269 set_bit(LOOP_RESYNC_NEEDED
, &vha
->dpc_flags
);
4270 set_bit(LOCAL_LOOP_UPDATE
, &vha
->dpc_flags
);
4271 qla2xxx_wake_dpc(vha
);
4273 /* todo: else - create sess here. */
4274 res
= 1; /* send notify ack */
4281 res
= qlt_reset(vha
, iocb
, QLA_TGT_NEXUS_LOSS_SESS
);
4286 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
4287 if (tgt
->link_reinit_iocb_pending
) {
4288 qlt_send_notify_ack(vha
, &tgt
->link_reinit_iocb
,
4290 tgt
->link_reinit_iocb_pending
= 0;
4292 res
= 1; /* send notify ack */
4296 case ELS_FLOGI
: /* should never happen */
4298 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf061,
4299 "qla_target(%d): Unsupported ELS command %x "
4300 "received\n", vha
->vp_idx
, iocb
->u
.isp24
.status_subcode
);
4301 res
= qlt_reset(vha
, iocb
, QLA_TGT_NEXUS_LOSS_SESS
);
4308 static int qlt_set_data_offset(struct qla_tgt_cmd
*cmd
, uint32_t offset
)
4312 * FIXME: Reject non zero SRR relative offset until we can test
4313 * this code properly.
4315 pr_debug("Rejecting non zero SRR rel_offs: %u\n", offset
);
4318 struct scatterlist
*sg
, *sgp
, *sg_srr
, *sg_srr_start
= NULL
;
4319 size_t first_offset
= 0, rem_offset
= offset
, tmp
= 0;
4320 int i
, sg_srr_cnt
, bufflen
= 0;
4322 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe023,
4323 "Entering qla_tgt_set_data_offset: cmd: %p, cmd->sg: %p, "
4324 "cmd->sg_cnt: %u, direction: %d\n",
4325 cmd
, cmd
->sg
, cmd
->sg_cnt
, cmd
->dma_data_direction
);
4327 if (!cmd
->sg
|| !cmd
->sg_cnt
) {
4328 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe055,
4329 "Missing cmd->sg or zero cmd->sg_cnt in"
4330 " qla_tgt_set_data_offset\n");
4334 * Walk the current cmd->sg list until we locate the new sg_srr_start
4336 for_each_sg(cmd
->sg
, sg
, cmd
->sg_cnt
, i
) {
4337 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe024,
4338 "sg[%d]: %p page: %p, length: %d, offset: %d\n",
4339 i
, sg
, sg_page(sg
), sg
->length
, sg
->offset
);
4341 if ((sg
->length
+ tmp
) > offset
) {
4342 first_offset
= rem_offset
;
4344 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe025,
4345 "Found matching sg[%d], using %p as sg_srr_start, "
4346 "and using first_offset: %zu\n", i
, sg
,
4351 rem_offset
-= sg
->length
;
4354 if (!sg_srr_start
) {
4355 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe056,
4356 "Unable to locate sg_srr_start for offset: %u\n", offset
);
4359 sg_srr_cnt
= (cmd
->sg_cnt
- i
);
4361 sg_srr
= kzalloc(sizeof(struct scatterlist
) * sg_srr_cnt
, GFP_KERNEL
);
4363 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe057,
4364 "Unable to allocate sgp\n");
4367 sg_init_table(sg_srr
, sg_srr_cnt
);
4370 * Walk the remaining list for sg_srr_start, mapping to the newly
4371 * allocated sg_srr taking first_offset into account.
4373 for_each_sg(sg_srr_start
, sg
, sg_srr_cnt
, i
) {
4375 sg_set_page(sgp
, sg_page(sg
),
4376 (sg
->length
- first_offset
), first_offset
);
4379 sg_set_page(sgp
, sg_page(sg
), sg
->length
, 0);
4381 bufflen
+= sgp
->length
;
4389 cmd
->sg_cnt
= sg_srr_cnt
;
4390 cmd
->bufflen
= bufflen
;
4391 cmd
->offset
+= offset
;
4394 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe026, "New cmd->sg: %p\n", cmd
->sg
);
4395 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe027, "New cmd->sg_cnt: %u\n",
4397 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe028, "New cmd->bufflen: %u\n",
4399 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe029, "New cmd->offset: %u\n",
4402 if (cmd
->sg_cnt
< 0)
4405 if (cmd
->bufflen
< 0)
4412 static inline int qlt_srr_adjust_data(struct qla_tgt_cmd
*cmd
,
4413 uint32_t srr_rel_offs
, int *xmit_type
)
4415 int res
= 0, rel_offs
;
4417 rel_offs
= srr_rel_offs
- cmd
->offset
;
4418 ql_dbg(ql_dbg_tgt_mgt
, cmd
->vha
, 0xf027, "srr_rel_offs=%d, rel_offs=%d",
4419 srr_rel_offs
, rel_offs
);
4421 *xmit_type
= QLA_TGT_XMIT_ALL
;
4424 ql_dbg(ql_dbg_tgt_mgt
, cmd
->vha
, 0xf062,
4425 "qla_target(%d): SRR rel_offs (%d) < 0",
4426 cmd
->vha
->vp_idx
, rel_offs
);
4428 } else if (rel_offs
== cmd
->bufflen
)
4429 *xmit_type
= QLA_TGT_XMIT_STATUS
;
4430 else if (rel_offs
> 0)
4431 res
= qlt_set_data_offset(cmd
, rel_offs
);
4436 /* No locks, thread context */
4437 static void qlt_handle_srr(struct scsi_qla_host
*vha
,
4438 struct qla_tgt_srr_ctio
*sctio
, struct qla_tgt_srr_imm
*imm
)
4440 struct imm_ntfy_from_isp
*ntfy
=
4441 (struct imm_ntfy_from_isp
*)&imm
->imm_ntfy
;
4442 struct qla_hw_data
*ha
= vha
->hw
;
4443 struct qla_tgt_cmd
*cmd
= sctio
->cmd
;
4444 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
4445 unsigned long flags
;
4446 int xmit_type
= 0, resp
= 0;
4450 offset
= le32_to_cpu(ntfy
->u
.isp24
.srr_rel_offs
);
4451 srr_ui
= ntfy
->u
.isp24
.srr_ui
;
4453 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf028, "SRR cmd %p, srr_ui %x\n",
4458 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
4459 qlt_send_notify_ack(vha
, ntfy
,
4460 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT
, 0, 0);
4461 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
4462 xmit_type
= QLA_TGT_XMIT_STATUS
;
4465 case SRR_IU_DATA_IN
:
4466 if (!cmd
->sg
|| !cmd
->sg_cnt
) {
4467 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf063,
4468 "Unable to process SRR_IU_DATA_IN due to"
4469 " missing cmd->sg, state: %d\n", cmd
->state
);
4473 if (se_cmd
->scsi_status
!= 0) {
4474 ql_dbg(ql_dbg_tgt
, vha
, 0xe02a,
4475 "Rejecting SRR_IU_DATA_IN with non GOOD "
4479 cmd
->bufflen
= se_cmd
->data_length
;
4481 if (qlt_has_data(cmd
)) {
4482 if (qlt_srr_adjust_data(cmd
, offset
, &xmit_type
) != 0)
4484 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
4485 qlt_send_notify_ack(vha
, ntfy
,
4486 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT
, 0, 0);
4487 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
4490 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf064,
4491 "qla_target(%d): SRR for in data for cmd without them (tag %lld, SCSI status %d), reject",
4492 vha
->vp_idx
, se_cmd
->tag
,
4493 cmd
->se_cmd
.scsi_status
);
4497 case SRR_IU_DATA_OUT
:
4498 if (!cmd
->sg
|| !cmd
->sg_cnt
) {
4499 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf065,
4500 "Unable to process SRR_IU_DATA_OUT due to"
4501 " missing cmd->sg\n");
4505 if (se_cmd
->scsi_status
!= 0) {
4506 ql_dbg(ql_dbg_tgt
, vha
, 0xe02b,
4507 "Rejecting SRR_IU_DATA_OUT"
4508 " with non GOOD scsi_status\n");
4511 cmd
->bufflen
= se_cmd
->data_length
;
4513 if (qlt_has_data(cmd
)) {
4514 if (qlt_srr_adjust_data(cmd
, offset
, &xmit_type
) != 0)
4516 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
4517 qlt_send_notify_ack(vha
, ntfy
,
4518 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT
, 0, 0);
4519 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
4520 if (xmit_type
& QLA_TGT_XMIT_DATA
) {
4521 cmd
->cmd_flags
|= BIT_8
;
4522 qlt_rdy_to_xfer(cmd
);
4525 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf066,
4526 "qla_target(%d): SRR for out data for cmd without them (tag %lld, SCSI status %d), reject",
4527 vha
->vp_idx
, se_cmd
->tag
, cmd
->se_cmd
.scsi_status
);
4532 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf067,
4533 "qla_target(%d): Unknown srr_ui value %x",
4534 vha
->vp_idx
, srr_ui
);
4538 /* Transmit response in case of status and data-in cases */
4540 cmd
->cmd_flags
|= BIT_7
;
4541 qlt_xmit_response(cmd
, xmit_type
, se_cmd
->scsi_status
);
4547 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
4548 qlt_send_notify_ack(vha
, ntfy
, 0, 0, 0,
4549 NOTIFY_ACK_SRR_FLAGS_REJECT
,
4550 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM
,
4551 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL
);
4552 if (cmd
->state
== QLA_TGT_STATE_NEED_DATA
) {
4553 cmd
->state
= QLA_TGT_STATE_DATA_IN
;
4556 cmd
->cmd_flags
|= BIT_9
;
4557 qlt_send_term_exchange(vha
, cmd
, &cmd
->atio
, 1);
4559 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
4562 static void qlt_reject_free_srr_imm(struct scsi_qla_host
*vha
,
4563 struct qla_tgt_srr_imm
*imm
, int ha_locked
)
4565 struct qla_hw_data
*ha
= vha
->hw
;
4566 unsigned long flags
= 0;
4570 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
4573 qlt_send_notify_ack(vha
, (void *)&imm
->imm_ntfy
, 0, 0, 0,
4574 NOTIFY_ACK_SRR_FLAGS_REJECT
,
4575 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM
,
4576 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL
);
4580 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
4586 static void qlt_handle_srr_work(struct work_struct
*work
)
4588 struct qla_tgt
*tgt
= container_of(work
, struct qla_tgt
, srr_work
);
4589 struct scsi_qla_host
*vha
= tgt
->vha
;
4590 struct qla_tgt_srr_ctio
*sctio
;
4591 unsigned long flags
;
4593 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf029, "Entering SRR work (tgt %p)\n",
4597 spin_lock_irqsave(&tgt
->srr_lock
, flags
);
4598 list_for_each_entry(sctio
, &tgt
->srr_ctio_list
, srr_list_entry
) {
4599 struct qla_tgt_srr_imm
*imm
, *i
, *ti
;
4600 struct qla_tgt_cmd
*cmd
;
4601 struct se_cmd
*se_cmd
;
4604 list_for_each_entry_safe(i
, ti
, &tgt
->srr_imm_list
,
4606 if (i
->srr_id
== sctio
->srr_id
) {
4607 list_del(&i
->srr_list_entry
);
4609 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf068,
4610 "qla_target(%d): There must be "
4611 "only one IMM SRR per CTIO SRR "
4612 "(IMM SRR %p, id %d, CTIO %p\n",
4613 vha
->vp_idx
, i
, i
->srr_id
, sctio
);
4614 qlt_reject_free_srr_imm(tgt
->vha
, i
, 0);
4620 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf02a,
4621 "IMM SRR %p, CTIO SRR %p (id %d)\n", imm
, sctio
,
4625 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf02b,
4626 "Not found matching IMM for SRR CTIO (id %d)\n",
4630 list_del(&sctio
->srr_list_entry
);
4632 spin_unlock_irqrestore(&tgt
->srr_lock
, flags
);
4636 * Reset qla_tgt_cmd SRR values and SGL pointer+count to follow
4637 * tcm_qla2xxx_write_pending() and tcm_qla2xxx_queue_data_in()
4646 se_cmd
= &cmd
->se_cmd
;
4648 cmd
->sg_cnt
= se_cmd
->t_data_nents
;
4649 cmd
->sg
= se_cmd
->t_data_sg
;
4651 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf02c,
4652 "SRR cmd %p (se_cmd %p, tag %lld, op %x), sg_cnt=%d, offset=%d",
4653 cmd
, &cmd
->se_cmd
, se_cmd
->tag
, se_cmd
->t_task_cdb
?
4654 se_cmd
->t_task_cdb
[0] : 0, cmd
->sg_cnt
, cmd
->offset
);
4656 qlt_handle_srr(vha
, sctio
, imm
);
4662 spin_unlock_irqrestore(&tgt
->srr_lock
, flags
);
4665 /* ha->hardware_lock supposed to be held on entry */
4666 static void qlt_prepare_srr_imm(struct scsi_qla_host
*vha
,
4667 struct imm_ntfy_from_isp
*iocb
)
4669 struct qla_tgt_srr_imm
*imm
;
4670 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
4671 struct qla_tgt_srr_ctio
*sctio
;
4675 ql_log(ql_log_warn
, vha
, 0xf02d, "qla_target(%d): SRR received\n",
4678 imm
= kzalloc(sizeof(*imm
), GFP_ATOMIC
);
4680 memcpy(&imm
->imm_ntfy
, iocb
, sizeof(imm
->imm_ntfy
));
4682 /* IRQ is already OFF */
4683 spin_lock(&tgt
->srr_lock
);
4684 imm
->srr_id
= tgt
->imm_srr_id
;
4685 list_add_tail(&imm
->srr_list_entry
,
4686 &tgt
->srr_imm_list
);
4687 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf02e,
4688 "IMM NTFY SRR %p added (id %d, ui %x)\n",
4689 imm
, imm
->srr_id
, iocb
->u
.isp24
.srr_ui
);
4690 if (tgt
->imm_srr_id
== tgt
->ctio_srr_id
) {
4692 list_for_each_entry(sctio
, &tgt
->srr_ctio_list
,
4694 if (sctio
->srr_id
== imm
->srr_id
) {
4700 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf02f, "%s",
4701 "Scheduling srr work\n");
4702 schedule_work(&tgt
->srr_work
);
4704 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf030,
4705 "qla_target(%d): imm_srr_id "
4706 "== ctio_srr_id (%d), but there is no "
4707 "corresponding SRR CTIO, deleting IMM "
4708 "SRR %p\n", vha
->vp_idx
, tgt
->ctio_srr_id
,
4710 list_del(&imm
->srr_list_entry
);
4714 spin_unlock(&tgt
->srr_lock
);
4718 spin_unlock(&tgt
->srr_lock
);
4720 struct qla_tgt_srr_ctio
*ts
;
4722 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf069,
4723 "qla_target(%d): Unable to allocate SRR IMM "
4724 "entry, SRR request will be rejected\n", vha
->vp_idx
);
4726 /* IRQ is already OFF */
4727 spin_lock(&tgt
->srr_lock
);
4728 list_for_each_entry_safe(sctio
, ts
, &tgt
->srr_ctio_list
,
4730 if (sctio
->srr_id
== tgt
->imm_srr_id
) {
4731 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf031,
4732 "CTIO SRR %p deleted (id %d)\n",
4733 sctio
, sctio
->srr_id
);
4734 list_del(&sctio
->srr_list_entry
);
4735 qlt_send_term_exchange(vha
, sctio
->cmd
,
4736 &sctio
->cmd
->atio
, 1);
4740 spin_unlock(&tgt
->srr_lock
);
4747 qlt_send_notify_ack(vha
, iocb
, 0, 0, 0,
4748 NOTIFY_ACK_SRR_FLAGS_REJECT
,
4749 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM
,
4750 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL
);
4754 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
4756 static void qlt_handle_imm_notify(struct scsi_qla_host
*vha
,
4757 struct imm_ntfy_from_isp
*iocb
)
4759 struct qla_hw_data
*ha
= vha
->hw
;
4760 uint32_t add_flags
= 0;
4761 int send_notify_ack
= 1;
4764 status
= le16_to_cpu(iocb
->u
.isp2x
.status
);
4766 case IMM_NTFY_LIP_RESET
:
4768 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf032,
4769 "qla_target(%d): LIP reset (loop %#x), subcode %x\n",
4770 vha
->vp_idx
, le16_to_cpu(iocb
->u
.isp24
.nport_handle
),
4771 iocb
->u
.isp24
.status_subcode
);
4773 if (qlt_reset(vha
, iocb
, QLA_TGT_ABORT_ALL
) == 0)
4774 send_notify_ack
= 0;
4778 case IMM_NTFY_LIP_LINK_REINIT
:
4780 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
4781 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf033,
4782 "qla_target(%d): LINK REINIT (loop %#x, "
4783 "subcode %x)\n", vha
->vp_idx
,
4784 le16_to_cpu(iocb
->u
.isp24
.nport_handle
),
4785 iocb
->u
.isp24
.status_subcode
);
4786 if (tgt
->link_reinit_iocb_pending
) {
4787 qlt_send_notify_ack(vha
, &tgt
->link_reinit_iocb
,
4790 memcpy(&tgt
->link_reinit_iocb
, iocb
, sizeof(*iocb
));
4791 tgt
->link_reinit_iocb_pending
= 1;
4793 * QLogic requires to wait after LINK REINIT for possible
4794 * PDISC or ADISC ELS commands
4796 send_notify_ack
= 0;
4800 case IMM_NTFY_PORT_LOGOUT
:
4801 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf034,
4802 "qla_target(%d): Port logout (loop "
4803 "%#x, subcode %x)\n", vha
->vp_idx
,
4804 le16_to_cpu(iocb
->u
.isp24
.nport_handle
),
4805 iocb
->u
.isp24
.status_subcode
);
4807 if (qlt_reset(vha
, iocb
, QLA_TGT_NEXUS_LOSS_SESS
) == 0)
4808 send_notify_ack
= 0;
4809 /* The sessions will be cleared in the callback, if needed */
4812 case IMM_NTFY_GLBL_TPRLO
:
4813 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf035,
4814 "qla_target(%d): Global TPRLO (%x)\n", vha
->vp_idx
, status
);
4815 if (qlt_reset(vha
, iocb
, QLA_TGT_NEXUS_LOSS
) == 0)
4816 send_notify_ack
= 0;
4817 /* The sessions will be cleared in the callback, if needed */
4820 case IMM_NTFY_PORT_CONFIG
:
4821 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf036,
4822 "qla_target(%d): Port config changed (%x)\n", vha
->vp_idx
,
4824 if (qlt_reset(vha
, iocb
, QLA_TGT_ABORT_ALL
) == 0)
4825 send_notify_ack
= 0;
4826 /* The sessions will be cleared in the callback, if needed */
4829 case IMM_NTFY_GLBL_LOGO
:
4830 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf06a,
4831 "qla_target(%d): Link failure detected\n",
4833 /* I_T nexus loss */
4834 if (qlt_reset(vha
, iocb
, QLA_TGT_NEXUS_LOSS
) == 0)
4835 send_notify_ack
= 0;
4838 case IMM_NTFY_IOCB_OVERFLOW
:
4839 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf06b,
4840 "qla_target(%d): Cannot provide requested "
4841 "capability (IOCB overflowed the immediate notify "
4842 "resource count)\n", vha
->vp_idx
);
4845 case IMM_NTFY_ABORT_TASK
:
4846 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf037,
4847 "qla_target(%d): Abort Task (S %08x I %#x -> "
4848 "L %#x)\n", vha
->vp_idx
,
4849 le16_to_cpu(iocb
->u
.isp2x
.seq_id
),
4850 GET_TARGET_ID(ha
, (struct atio_from_isp
*)iocb
),
4851 le16_to_cpu(iocb
->u
.isp2x
.lun
));
4852 if (qlt_abort_task(vha
, iocb
) == 0)
4853 send_notify_ack
= 0;
4856 case IMM_NTFY_RESOURCE
:
4857 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf06c,
4858 "qla_target(%d): Out of resources, host %ld\n",
4859 vha
->vp_idx
, vha
->host_no
);
4862 case IMM_NTFY_MSG_RX
:
4863 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf038,
4864 "qla_target(%d): Immediate notify task %x\n",
4865 vha
->vp_idx
, iocb
->u
.isp2x
.task_flags
);
4866 if (qlt_handle_task_mgmt(vha
, iocb
) == 0)
4867 send_notify_ack
= 0;
4871 if (qlt_24xx_handle_els(vha
, iocb
) == 0)
4872 send_notify_ack
= 0;
4876 qlt_prepare_srr_imm(vha
, iocb
);
4877 send_notify_ack
= 0;
4881 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf06d,
4882 "qla_target(%d): Received unknown immediate "
4883 "notify status %x\n", vha
->vp_idx
, status
);
4887 if (send_notify_ack
)
4888 qlt_send_notify_ack(vha
, iocb
, add_flags
, 0, 0, 0, 0, 0);
4892 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
4893 * This function sends busy to ISP 2xxx or 24xx.
4895 static int __qlt_send_busy(struct scsi_qla_host
*vha
,
4896 struct atio_from_isp
*atio
, uint16_t status
)
4898 struct ctio7_to_24xx
*ctio24
;
4899 struct qla_hw_data
*ha
= vha
->hw
;
4901 struct qla_tgt_sess
*sess
= NULL
;
4903 sess
= ha
->tgt
.tgt_ops
->find_sess_by_s_id(vha
,
4904 atio
->u
.isp24
.fcp_hdr
.s_id
);
4906 qlt_send_term_exchange(vha
, NULL
, atio
, 1);
4909 /* Sending marker isn't necessary, since we called from ISR */
4911 pkt
= (request_t
*)qla2x00_alloc_iocbs(vha
, NULL
);
4913 ql_dbg(ql_dbg_io
, vha
, 0x3063,
4914 "qla_target(%d): %s failed: unable to allocate "
4915 "request packet", vha
->vp_idx
, __func__
);
4919 pkt
->entry_count
= 1;
4920 pkt
->handle
= QLA_TGT_SKIP_HANDLE
| CTIO_COMPLETION_HANDLE_MARK
;
4922 ctio24
= (struct ctio7_to_24xx
*)pkt
;
4923 ctio24
->entry_type
= CTIO_TYPE7
;
4924 ctio24
->nport_handle
= sess
->loop_id
;
4925 ctio24
->timeout
= cpu_to_le16(QLA_TGT_TIMEOUT
);
4926 ctio24
->vp_index
= vha
->vp_idx
;
4927 ctio24
->initiator_id
[0] = atio
->u
.isp24
.fcp_hdr
.s_id
[2];
4928 ctio24
->initiator_id
[1] = atio
->u
.isp24
.fcp_hdr
.s_id
[1];
4929 ctio24
->initiator_id
[2] = atio
->u
.isp24
.fcp_hdr
.s_id
[0];
4930 ctio24
->exchange_addr
= atio
->u
.isp24
.exchange_addr
;
4931 ctio24
->u
.status1
.flags
= (atio
->u
.isp24
.attr
<< 9) |
4933 CTIO7_FLAGS_STATUS_MODE_1
| CTIO7_FLAGS_SEND_STATUS
|
4934 CTIO7_FLAGS_DONT_RET_CTIO
);
4936 * CTIO from fw w/o se_cmd doesn't provide enough info to retry it,
4937 * if the explicit conformation is used.
4939 ctio24
->u
.status1
.ox_id
= swab16(atio
->u
.isp24
.fcp_hdr
.ox_id
);
4940 ctio24
->u
.status1
.scsi_status
= cpu_to_le16(status
);
4941 /* Memory Barrier */
4943 qla2x00_start_iocbs(vha
, vha
->req
);
4948 * This routine is used to allocate a command for either a QFull condition
4949 * (ie reply SAM_STAT_BUSY) or to terminate an exchange that did not go
4953 qlt_alloc_qfull_cmd(struct scsi_qla_host
*vha
,
4954 struct atio_from_isp
*atio
, uint16_t status
, int qfull
)
4956 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
4957 struct qla_hw_data
*ha
= vha
->hw
;
4958 struct qla_tgt_sess
*sess
;
4959 struct se_session
*se_sess
;
4960 struct qla_tgt_cmd
*cmd
;
4963 if (unlikely(tgt
->tgt_stop
)) {
4964 ql_dbg(ql_dbg_io
, vha
, 0x300a,
4965 "New command while device %p is shutting down\n", tgt
);
4969 if ((vha
->hw
->tgt
.num_qfull_cmds_alloc
+ 1) > MAX_QFULL_CMDS_ALLOC
) {
4970 vha
->hw
->tgt
.num_qfull_cmds_dropped
++;
4971 if (vha
->hw
->tgt
.num_qfull_cmds_dropped
>
4972 vha
->hw
->qla_stats
.stat_max_qfull_cmds_dropped
)
4973 vha
->hw
->qla_stats
.stat_max_qfull_cmds_dropped
=
4974 vha
->hw
->tgt
.num_qfull_cmds_dropped
;
4976 ql_dbg(ql_dbg_io
, vha
, 0x3068,
4977 "qla_target(%d): %s: QFull CMD dropped[%d]\n",
4978 vha
->vp_idx
, __func__
,
4979 vha
->hw
->tgt
.num_qfull_cmds_dropped
);
4981 qlt_chk_exch_leak_thresh_hold(vha
);
4985 sess
= ha
->tgt
.tgt_ops
->find_sess_by_s_id
4986 (vha
, atio
->u
.isp24
.fcp_hdr
.s_id
);
4990 se_sess
= sess
->se_sess
;
4992 tag
= percpu_ida_alloc(&se_sess
->sess_tag_pool
, TASK_RUNNING
);
4996 cmd
= &((struct qla_tgt_cmd
*)se_sess
->sess_cmd_map
)[tag
];
4998 ql_dbg(ql_dbg_io
, vha
, 0x3009,
4999 "qla_target(%d): %s: Allocation of cmd failed\n",
5000 vha
->vp_idx
, __func__
);
5002 vha
->hw
->tgt
.num_qfull_cmds_dropped
++;
5003 if (vha
->hw
->tgt
.num_qfull_cmds_dropped
>
5004 vha
->hw
->qla_stats
.stat_max_qfull_cmds_dropped
)
5005 vha
->hw
->qla_stats
.stat_max_qfull_cmds_dropped
=
5006 vha
->hw
->tgt
.num_qfull_cmds_dropped
;
5008 qlt_chk_exch_leak_thresh_hold(vha
);
5012 memset(cmd
, 0, sizeof(struct qla_tgt_cmd
));
5014 qlt_incr_num_pend_cmds(vha
);
5015 INIT_LIST_HEAD(&cmd
->cmd_list
);
5016 memcpy(&cmd
->atio
, atio
, sizeof(*atio
));
5018 cmd
->tgt
= vha
->vha_tgt
.qla_tgt
;
5020 cmd
->reset_count
= vha
->hw
->chip_reset
;
5025 /* NOTE: borrowing the state field to carry the status */
5026 cmd
->state
= status
;
5028 cmd
->term_exchg
= 1;
5030 list_add_tail(&cmd
->cmd_list
, &vha
->hw
->tgt
.q_full_list
);
5032 vha
->hw
->tgt
.num_qfull_cmds_alloc
++;
5033 if (vha
->hw
->tgt
.num_qfull_cmds_alloc
>
5034 vha
->hw
->qla_stats
.stat_max_qfull_cmds_alloc
)
5035 vha
->hw
->qla_stats
.stat_max_qfull_cmds_alloc
=
5036 vha
->hw
->tgt
.num_qfull_cmds_alloc
;
5040 qlt_free_qfull_cmds(struct scsi_qla_host
*vha
)
5042 struct qla_hw_data
*ha
= vha
->hw
;
5043 unsigned long flags
;
5044 struct qla_tgt_cmd
*cmd
, *tcmd
;
5045 struct list_head free_list
;
5048 if (list_empty(&ha
->tgt
.q_full_list
))
5051 INIT_LIST_HEAD(&free_list
);
5053 spin_lock_irqsave(&vha
->hw
->hardware_lock
, flags
);
5055 if (list_empty(&ha
->tgt
.q_full_list
)) {
5056 spin_unlock_irqrestore(&vha
->hw
->hardware_lock
, flags
);
5060 list_for_each_entry_safe(cmd
, tcmd
, &ha
->tgt
.q_full_list
, cmd_list
) {
5062 /* cmd->state is a borrowed field to hold status */
5063 rc
= __qlt_send_busy(vha
, &cmd
->atio
, cmd
->state
);
5064 else if (cmd
->term_exchg
)
5065 rc
= __qlt_send_term_exchange(vha
, NULL
, &cmd
->atio
);
5071 ql_dbg(ql_dbg_io
, vha
, 0x3006,
5072 "%s: busy sent for ox_id[%04x]\n", __func__
,
5073 be16_to_cpu(cmd
->atio
.u
.isp24
.fcp_hdr
.ox_id
));
5074 else if (cmd
->term_exchg
)
5075 ql_dbg(ql_dbg_io
, vha
, 0x3007,
5076 "%s: Term exchg sent for ox_id[%04x]\n", __func__
,
5077 be16_to_cpu(cmd
->atio
.u
.isp24
.fcp_hdr
.ox_id
));
5079 ql_dbg(ql_dbg_io
, vha
, 0x3008,
5080 "%s: Unexpected cmd in QFull list %p\n", __func__
,
5083 list_del(&cmd
->cmd_list
);
5084 list_add_tail(&cmd
->cmd_list
, &free_list
);
5086 /* piggy back on hardware_lock for protection */
5087 vha
->hw
->tgt
.num_qfull_cmds_alloc
--;
5089 spin_unlock_irqrestore(&vha
->hw
->hardware_lock
, flags
);
5093 list_for_each_entry_safe(cmd
, tcmd
, &free_list
, cmd_list
) {
5094 list_del(&cmd
->cmd_list
);
5095 /* This cmd was never sent to TCM. There is no need
5096 * to schedule free or call free_cmd
5104 qlt_send_busy(struct scsi_qla_host
*vha
,
5105 struct atio_from_isp
*atio
, uint16_t status
)
5109 rc
= __qlt_send_busy(vha
, atio
, status
);
5111 qlt_alloc_qfull_cmd(vha
, atio
, status
, 1);
5115 qlt_chk_qfull_thresh_hold(struct scsi_qla_host
*vha
,
5116 struct atio_from_isp
*atio
)
5118 struct qla_hw_data
*ha
= vha
->hw
;
5121 if (ha
->tgt
.num_pend_cmds
< Q_FULL_THRESH_HOLD(ha
))
5124 status
= temp_sam_status
;
5125 qlt_send_busy(vha
, atio
, status
);
5129 /* ha->hardware_lock supposed to be held on entry */
5130 /* called via callback from qla2xxx */
5131 static void qlt_24xx_atio_pkt(struct scsi_qla_host
*vha
,
5132 struct atio_from_isp
*atio
)
5134 struct qla_hw_data
*ha
= vha
->hw
;
5135 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
5138 if (unlikely(tgt
== NULL
)) {
5139 ql_dbg(ql_dbg_io
, vha
, 0x3064,
5140 "ATIO pkt, but no tgt (ha %p)", ha
);
5144 * In tgt_stop mode we also should allow all requests to pass.
5145 * Otherwise, some commands can stuck.
5148 tgt
->irq_cmd_count
++;
5150 switch (atio
->u
.raw
.entry_type
) {
5152 if (unlikely(atio
->u
.isp24
.exchange_addr
==
5153 ATIO_EXCHANGE_ADDRESS_UNKNOWN
)) {
5154 ql_dbg(ql_dbg_io
, vha
, 0x3065,
5155 "qla_target(%d): ATIO_TYPE7 "
5156 "received with UNKNOWN exchange address, "
5157 "sending QUEUE_FULL\n", vha
->vp_idx
);
5158 qlt_send_busy(vha
, atio
, SAM_STAT_TASK_SET_FULL
);
5164 if (likely(atio
->u
.isp24
.fcp_cmnd
.task_mgmt_flags
== 0)) {
5165 rc
= qlt_chk_qfull_thresh_hold(vha
, atio
);
5167 tgt
->irq_cmd_count
--;
5170 rc
= qlt_handle_cmd_for_atio(vha
, atio
);
5172 rc
= qlt_handle_task_mgmt(vha
, atio
);
5174 if (unlikely(rc
!= 0)) {
5176 #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
5177 qlt_send_busy(vha
, atio
, SAM_STAT_BUSY
);
5179 qlt_send_term_exchange(vha
, NULL
, atio
, 1);
5182 if (tgt
->tgt_stop
) {
5183 ql_dbg(ql_dbg_tgt
, vha
, 0xe059,
5184 "qla_target: Unable to send "
5185 "command to target for req, "
5188 ql_dbg(ql_dbg_tgt
, vha
, 0xe05a,
5189 "qla_target(%d): Unable to send "
5190 "command to target, sending BUSY "
5191 "status.\n", vha
->vp_idx
);
5192 qlt_send_busy(vha
, atio
, SAM_STAT_BUSY
);
5198 case IMMED_NOTIFY_TYPE
:
5200 if (unlikely(atio
->u
.isp2x
.entry_status
!= 0)) {
5201 ql_dbg(ql_dbg_tgt
, vha
, 0xe05b,
5202 "qla_target(%d): Received ATIO packet %x "
5203 "with error status %x\n", vha
->vp_idx
,
5204 atio
->u
.raw
.entry_type
,
5205 atio
->u
.isp2x
.entry_status
);
5208 ql_dbg(ql_dbg_tgt
, vha
, 0xe02e, "%s", "IMMED_NOTIFY ATIO");
5209 qlt_handle_imm_notify(vha
, (struct imm_ntfy_from_isp
*)atio
);
5214 ql_dbg(ql_dbg_tgt
, vha
, 0xe05c,
5215 "qla_target(%d): Received unknown ATIO atio "
5216 "type %x\n", vha
->vp_idx
, atio
->u
.raw
.entry_type
);
5220 tgt
->irq_cmd_count
--;
5223 /* ha->hardware_lock supposed to be held on entry */
5224 /* called via callback from qla2xxx */
5225 static void qlt_response_pkt(struct scsi_qla_host
*vha
, response_t
*pkt
)
5227 struct qla_hw_data
*ha
= vha
->hw
;
5228 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
5230 if (unlikely(tgt
== NULL
)) {
5231 ql_dbg(ql_dbg_tgt
, vha
, 0xe05d,
5232 "qla_target(%d): Response pkt %x received, but no "
5233 "tgt (ha %p)\n", vha
->vp_idx
, pkt
->entry_type
, ha
);
5238 * In tgt_stop mode we also should allow all requests to pass.
5239 * Otherwise, some commands can stuck.
5242 tgt
->irq_cmd_count
++;
5244 switch (pkt
->entry_type
) {
5248 struct ctio7_from_24xx
*entry
= (struct ctio7_from_24xx
*)pkt
;
5249 qlt_do_ctio_completion(vha
, entry
->handle
,
5250 le16_to_cpu(entry
->status
)|(pkt
->entry_status
<< 16),
5255 case ACCEPT_TGT_IO_TYPE
:
5257 struct atio_from_isp
*atio
= (struct atio_from_isp
*)pkt
;
5259 if (atio
->u
.isp2x
.status
!=
5260 cpu_to_le16(ATIO_CDB_VALID
)) {
5261 ql_dbg(ql_dbg_tgt
, vha
, 0xe05e,
5262 "qla_target(%d): ATIO with error "
5263 "status %x received\n", vha
->vp_idx
,
5264 le16_to_cpu(atio
->u
.isp2x
.status
));
5268 rc
= qlt_chk_qfull_thresh_hold(vha
, atio
);
5270 tgt
->irq_cmd_count
--;
5274 rc
= qlt_handle_cmd_for_atio(vha
, atio
);
5275 if (unlikely(rc
!= 0)) {
5277 #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
5278 qlt_send_busy(vha
, atio
, 0);
5280 qlt_send_term_exchange(vha
, NULL
, atio
, 1);
5283 if (tgt
->tgt_stop
) {
5284 ql_dbg(ql_dbg_tgt
, vha
, 0xe05f,
5285 "qla_target: Unable to send "
5286 "command to target, sending TERM "
5287 "EXCHANGE for rsp\n");
5288 qlt_send_term_exchange(vha
, NULL
,
5291 ql_dbg(ql_dbg_tgt
, vha
, 0xe060,
5292 "qla_target(%d): Unable to send "
5293 "command to target, sending BUSY "
5294 "status\n", vha
->vp_idx
);
5295 qlt_send_busy(vha
, atio
, 0);
5302 case CONTINUE_TGT_IO_TYPE
:
5304 struct ctio_to_2xxx
*entry
= (struct ctio_to_2xxx
*)pkt
;
5305 qlt_do_ctio_completion(vha
, entry
->handle
,
5306 le16_to_cpu(entry
->status
)|(pkt
->entry_status
<< 16),
5313 struct ctio_to_2xxx
*entry
= (struct ctio_to_2xxx
*)pkt
;
5314 qlt_do_ctio_completion(vha
, entry
->handle
,
5315 le16_to_cpu(entry
->status
)|(pkt
->entry_status
<< 16),
5320 case IMMED_NOTIFY_TYPE
:
5321 ql_dbg(ql_dbg_tgt
, vha
, 0xe035, "%s", "IMMED_NOTIFY\n");
5322 qlt_handle_imm_notify(vha
, (struct imm_ntfy_from_isp
*)pkt
);
5325 case NOTIFY_ACK_TYPE
:
5326 if (tgt
->notify_ack_expected
> 0) {
5327 struct nack_to_isp
*entry
= (struct nack_to_isp
*)pkt
;
5328 ql_dbg(ql_dbg_tgt
, vha
, 0xe036,
5329 "NOTIFY_ACK seq %08x status %x\n",
5330 le16_to_cpu(entry
->u
.isp2x
.seq_id
),
5331 le16_to_cpu(entry
->u
.isp2x
.status
));
5332 tgt
->notify_ack_expected
--;
5333 if (entry
->u
.isp2x
.status
!=
5334 cpu_to_le16(NOTIFY_ACK_SUCCESS
)) {
5335 ql_dbg(ql_dbg_tgt
, vha
, 0xe061,
5336 "qla_target(%d): NOTIFY_ACK "
5337 "failed %x\n", vha
->vp_idx
,
5338 le16_to_cpu(entry
->u
.isp2x
.status
));
5341 ql_dbg(ql_dbg_tgt
, vha
, 0xe062,
5342 "qla_target(%d): Unexpected NOTIFY_ACK received\n",
5347 case ABTS_RECV_24XX
:
5348 ql_dbg(ql_dbg_tgt
, vha
, 0xe037,
5349 "ABTS_RECV_24XX: instance %d\n", vha
->vp_idx
);
5350 qlt_24xx_handle_abts(vha
, (struct abts_recv_from_24xx
*)pkt
);
5353 case ABTS_RESP_24XX
:
5354 if (tgt
->abts_resp_expected
> 0) {
5355 struct abts_resp_from_24xx_fw
*entry
=
5356 (struct abts_resp_from_24xx_fw
*)pkt
;
5357 ql_dbg(ql_dbg_tgt
, vha
, 0xe038,
5358 "ABTS_RESP_24XX: compl_status %x\n",
5359 entry
->compl_status
);
5360 tgt
->abts_resp_expected
--;
5361 if (le16_to_cpu(entry
->compl_status
) !=
5362 ABTS_RESP_COMPL_SUCCESS
) {
5363 if ((entry
->error_subcode1
== 0x1E) &&
5364 (entry
->error_subcode2
== 0)) {
5366 * We've got a race here: aborted
5367 * exchange not terminated, i.e.
5368 * response for the aborted command was
5369 * sent between the abort request was
5370 * received and processed.
5371 * Unfortunately, the firmware has a
5372 * silly requirement that all aborted
5373 * exchanges must be explicitely
5374 * terminated, otherwise it refuses to
5375 * send responses for the abort
5376 * requests. So, we have to
5377 * (re)terminate the exchange and retry
5378 * the abort response.
5380 qlt_24xx_retry_term_exchange(vha
,
5383 ql_dbg(ql_dbg_tgt
, vha
, 0xe063,
5384 "qla_target(%d): ABTS_RESP_24XX "
5385 "failed %x (subcode %x:%x)",
5386 vha
->vp_idx
, entry
->compl_status
,
5387 entry
->error_subcode1
,
5388 entry
->error_subcode2
);
5391 ql_dbg(ql_dbg_tgt
, vha
, 0xe064,
5392 "qla_target(%d): Unexpected ABTS_RESP_24XX "
5393 "received\n", vha
->vp_idx
);
5398 ql_dbg(ql_dbg_tgt
, vha
, 0xe065,
5399 "qla_target(%d): Received unknown response pkt "
5400 "type %x\n", vha
->vp_idx
, pkt
->entry_type
);
5404 tgt
->irq_cmd_count
--;
5408 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
5410 void qlt_async_event(uint16_t code
, struct scsi_qla_host
*vha
,
5413 struct qla_hw_data
*ha
= vha
->hw
;
5414 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
5417 if (!ha
->tgt
.tgt_ops
)
5420 if (unlikely(tgt
== NULL
)) {
5421 ql_dbg(ql_dbg_tgt
, vha
, 0xe03a,
5422 "ASYNC EVENT %#x, but no tgt (ha %p)\n", code
, ha
);
5426 if (((code
== MBA_POINT_TO_POINT
) || (code
== MBA_CHG_IN_CONNECTION
)) &&
5430 * In tgt_stop mode we also should allow all requests to pass.
5431 * Otherwise, some commands can stuck.
5434 tgt
->irq_cmd_count
++;
5437 case MBA_RESET
: /* Reset */
5438 case MBA_SYSTEM_ERR
: /* System Error */
5439 case MBA_REQ_TRANSFER_ERR
: /* Request Transfer Error */
5440 case MBA_RSP_TRANSFER_ERR
: /* Response Transfer Error */
5441 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf03a,
5442 "qla_target(%d): System error async event %#x "
5443 "occurred", vha
->vp_idx
, code
);
5445 case MBA_WAKEUP_THRES
: /* Request Queue Wake-up. */
5446 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
5451 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf03b,
5452 "qla_target(%d): Async LOOP_UP occurred "
5453 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha
->vp_idx
,
5454 le16_to_cpu(mailbox
[0]), le16_to_cpu(mailbox
[1]),
5455 le16_to_cpu(mailbox
[2]), le16_to_cpu(mailbox
[3]));
5456 if (tgt
->link_reinit_iocb_pending
) {
5457 qlt_send_notify_ack(vha
, (void *)&tgt
->link_reinit_iocb
,
5459 tgt
->link_reinit_iocb_pending
= 0;
5464 case MBA_LIP_OCCURRED
:
5467 case MBA_RSCN_UPDATE
:
5468 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf03c,
5469 "qla_target(%d): Async event %#x occurred "
5470 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha
->vp_idx
, code
,
5471 le16_to_cpu(mailbox
[0]), le16_to_cpu(mailbox
[1]),
5472 le16_to_cpu(mailbox
[2]), le16_to_cpu(mailbox
[3]));
5475 case MBA_PORT_UPDATE
:
5476 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf03d,
5477 "qla_target(%d): Port update async event %#x "
5478 "occurred: updating the ports database (m[0]=%x, m[1]=%x, "
5479 "m[2]=%x, m[3]=%x)", vha
->vp_idx
, code
,
5480 le16_to_cpu(mailbox
[0]), le16_to_cpu(mailbox
[1]),
5481 le16_to_cpu(mailbox
[2]), le16_to_cpu(mailbox
[3]));
5483 login_code
= le16_to_cpu(mailbox
[2]);
5484 if (login_code
== 0x4)
5485 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf03e,
5486 "Async MB 2: Got PLOGI Complete\n");
5487 else if (login_code
== 0x7)
5488 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf03f,
5489 "Async MB 2: Port Logged Out\n");
5496 tgt
->irq_cmd_count
--;
5499 static fc_port_t
*qlt_get_port_database(struct scsi_qla_host
*vha
,
5505 fcport
= kzalloc(sizeof(*fcport
), GFP_KERNEL
);
5507 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf06f,
5508 "qla_target(%d): Allocation of tmp FC port failed",
5513 fcport
->loop_id
= loop_id
;
5515 rc
= qla2x00_get_port_database(vha
, fcport
, 0);
5516 if (rc
!= QLA_SUCCESS
) {
5517 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf070,
5518 "qla_target(%d): Failed to retrieve fcport "
5519 "information -- get_port_database() returned %x "
5520 "(loop_id=0x%04x)", vha
->vp_idx
, rc
, loop_id
);
5528 /* Must be called under tgt_mutex */
5529 static struct qla_tgt_sess
*qlt_make_local_sess(struct scsi_qla_host
*vha
,
5532 struct qla_tgt_sess
*sess
= NULL
;
5533 fc_port_t
*fcport
= NULL
;
5534 int rc
, global_resets
;
5535 uint16_t loop_id
= 0;
5539 atomic_read(&vha
->vha_tgt
.qla_tgt
->tgt_global_resets_count
);
5541 rc
= qla24xx_get_loop_id(vha
, s_id
, &loop_id
);
5543 if ((s_id
[0] == 0xFF) &&
5544 (s_id
[1] == 0xFC)) {
5546 * This is Domain Controller, so it should be
5547 * OK to drop SCSI commands from it.
5549 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf042,
5550 "Unable to find initiator with S_ID %x:%x:%x",
5551 s_id
[0], s_id
[1], s_id
[2]);
5553 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf071,
5554 "qla_target(%d): Unable to find "
5555 "initiator with S_ID %x:%x:%x",
5556 vha
->vp_idx
, s_id
[0], s_id
[1],
5561 fcport
= qlt_get_port_database(vha
, loop_id
);
5565 if (global_resets
!=
5566 atomic_read(&vha
->vha_tgt
.qla_tgt
->tgt_global_resets_count
)) {
5567 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf043,
5568 "qla_target(%d): global reset during session discovery "
5569 "(counter was %d, new %d), retrying", vha
->vp_idx
,
5571 atomic_read(&vha
->vha_tgt
.
5572 qla_tgt
->tgt_global_resets_count
));
5576 sess
= qlt_create_sess(vha
, fcport
, true);
5582 static void qlt_abort_work(struct qla_tgt
*tgt
,
5583 struct qla_tgt_sess_work_param
*prm
)
5585 struct scsi_qla_host
*vha
= tgt
->vha
;
5586 struct qla_hw_data
*ha
= vha
->hw
;
5587 struct qla_tgt_sess
*sess
= NULL
;
5588 unsigned long flags
;
5593 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
5598 s_id
[0] = prm
->abts
.fcp_hdr_le
.s_id
[2];
5599 s_id
[1] = prm
->abts
.fcp_hdr_le
.s_id
[1];
5600 s_id
[2] = prm
->abts
.fcp_hdr_le
.s_id
[0];
5602 sess
= ha
->tgt
.tgt_ops
->find_sess_by_s_id(vha
,
5603 (unsigned char *)&be_s_id
);
5605 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
5607 mutex_lock(&vha
->vha_tgt
.tgt_mutex
);
5608 sess
= qlt_make_local_sess(vha
, s_id
);
5609 /* sess has got an extra creation ref */
5610 mutex_unlock(&vha
->vha_tgt
.tgt_mutex
);
5612 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
5616 if (sess
->deleted
== QLA_SESS_DELETION_IN_PROGRESS
) {
5621 kref_get(&sess
->se_sess
->sess_kref
);
5627 rc
= __qlt_24xx_handle_abts(vha
, &prm
->abts
, sess
);
5631 ha
->tgt
.tgt_ops
->put_sess(sess
);
5632 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
5636 qlt_24xx_send_abts_resp(vha
, &prm
->abts
, FCP_TMF_REJECTED
, false);
5638 ha
->tgt
.tgt_ops
->put_sess(sess
);
5639 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
5642 static void qlt_tmr_work(struct qla_tgt
*tgt
,
5643 struct qla_tgt_sess_work_param
*prm
)
5645 struct atio_from_isp
*a
= &prm
->tm_iocb2
;
5646 struct scsi_qla_host
*vha
= tgt
->vha
;
5647 struct qla_hw_data
*ha
= vha
->hw
;
5648 struct qla_tgt_sess
*sess
= NULL
;
5649 unsigned long flags
;
5650 uint8_t *s_id
= NULL
; /* to hide compiler warnings */
5652 uint32_t lun
, unpacked_lun
;
5656 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
5661 s_id
= prm
->tm_iocb2
.u
.isp24
.fcp_hdr
.s_id
;
5662 sess
= ha
->tgt
.tgt_ops
->find_sess_by_s_id(vha
, s_id
);
5664 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
5666 mutex_lock(&vha
->vha_tgt
.tgt_mutex
);
5667 sess
= qlt_make_local_sess(vha
, s_id
);
5668 /* sess has got an extra creation ref */
5669 mutex_unlock(&vha
->vha_tgt
.tgt_mutex
);
5671 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
5675 if (sess
->deleted
== QLA_SESS_DELETION_IN_PROGRESS
) {
5680 kref_get(&sess
->se_sess
->sess_kref
);
5684 lun
= a
->u
.isp24
.fcp_cmnd
.lun
;
5685 fn
= a
->u
.isp24
.fcp_cmnd
.task_mgmt_flags
;
5686 unpacked_lun
= scsilun_to_int((struct scsi_lun
*)&lun
);
5688 rc
= qlt_issue_task_mgmt(sess
, unpacked_lun
, fn
, iocb
, 0);
5692 ha
->tgt
.tgt_ops
->put_sess(sess
);
5693 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
5697 qlt_send_term_exchange(vha
, NULL
, &prm
->tm_iocb2
, 1);
5699 ha
->tgt
.tgt_ops
->put_sess(sess
);
5700 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
5703 static void qlt_sess_work_fn(struct work_struct
*work
)
5705 struct qla_tgt
*tgt
= container_of(work
, struct qla_tgt
, sess_work
);
5706 struct scsi_qla_host
*vha
= tgt
->vha
;
5707 unsigned long flags
;
5709 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf000, "Sess work (tgt %p)", tgt
);
5711 spin_lock_irqsave(&tgt
->sess_work_lock
, flags
);
5712 while (!list_empty(&tgt
->sess_works_list
)) {
5713 struct qla_tgt_sess_work_param
*prm
= list_entry(
5714 tgt
->sess_works_list
.next
, typeof(*prm
),
5715 sess_works_list_entry
);
5718 * This work can be scheduled on several CPUs at time, so we
5719 * must delete the entry to eliminate double processing
5721 list_del(&prm
->sess_works_list_entry
);
5723 spin_unlock_irqrestore(&tgt
->sess_work_lock
, flags
);
5725 switch (prm
->type
) {
5726 case QLA_TGT_SESS_WORK_ABORT
:
5727 qlt_abort_work(tgt
, prm
);
5729 case QLA_TGT_SESS_WORK_TM
:
5730 qlt_tmr_work(tgt
, prm
);
5737 spin_lock_irqsave(&tgt
->sess_work_lock
, flags
);
5741 spin_unlock_irqrestore(&tgt
->sess_work_lock
, flags
);
5744 /* Must be called under tgt_host_action_mutex */
5745 int qlt_add_target(struct qla_hw_data
*ha
, struct scsi_qla_host
*base_vha
)
5747 struct qla_tgt
*tgt
;
5749 if (!QLA_TGT_MODE_ENABLED())
5752 if (!IS_TGT_MODE_CAPABLE(ha
)) {
5753 ql_log(ql_log_warn
, base_vha
, 0xe070,
5754 "This adapter does not support target mode.\n");
5758 ql_dbg(ql_dbg_tgt
, base_vha
, 0xe03b,
5759 "Registering target for host %ld(%p).\n", base_vha
->host_no
, ha
);
5761 BUG_ON(base_vha
->vha_tgt
.qla_tgt
!= NULL
);
5763 tgt
= kzalloc(sizeof(struct qla_tgt
), GFP_KERNEL
);
5765 ql_dbg(ql_dbg_tgt
, base_vha
, 0xe066,
5766 "Unable to allocate struct qla_tgt\n");
5770 if (!(base_vha
->host
->hostt
->supported_mode
& MODE_TARGET
))
5771 base_vha
->host
->hostt
->supported_mode
|= MODE_TARGET
;
5774 tgt
->vha
= base_vha
;
5775 init_waitqueue_head(&tgt
->waitQ
);
5776 INIT_LIST_HEAD(&tgt
->sess_list
);
5777 INIT_LIST_HEAD(&tgt
->del_sess_list
);
5778 INIT_DELAYED_WORK(&tgt
->sess_del_work
,
5779 (void (*)(struct work_struct
*))qlt_del_sess_work_fn
);
5780 spin_lock_init(&tgt
->sess_work_lock
);
5781 INIT_WORK(&tgt
->sess_work
, qlt_sess_work_fn
);
5782 INIT_LIST_HEAD(&tgt
->sess_works_list
);
5783 spin_lock_init(&tgt
->srr_lock
);
5784 INIT_LIST_HEAD(&tgt
->srr_ctio_list
);
5785 INIT_LIST_HEAD(&tgt
->srr_imm_list
);
5786 INIT_WORK(&tgt
->srr_work
, qlt_handle_srr_work
);
5787 atomic_set(&tgt
->tgt_global_resets_count
, 0);
5789 base_vha
->vha_tgt
.qla_tgt
= tgt
;
5791 ql_dbg(ql_dbg_tgt
, base_vha
, 0xe067,
5792 "qla_target(%d): using 64 Bit PCI addressing",
5794 tgt
->tgt_enable_64bit_addr
= 1;
5796 tgt
->sg_tablesize
= QLA_TGT_MAX_SG_24XX(base_vha
->req
->length
- 3);
5797 tgt
->datasegs_per_cmd
= QLA_TGT_DATASEGS_PER_CMD_24XX
;
5798 tgt
->datasegs_per_cont
= QLA_TGT_DATASEGS_PER_CONT_24XX
;
5800 if (base_vha
->fc_vport
)
5803 mutex_lock(&qla_tgt_mutex
);
5804 list_add_tail(&tgt
->tgt_list_entry
, &qla_tgt_glist
);
5805 mutex_unlock(&qla_tgt_mutex
);
5810 /* Must be called under tgt_host_action_mutex */
5811 int qlt_remove_target(struct qla_hw_data
*ha
, struct scsi_qla_host
*vha
)
5813 if (!vha
->vha_tgt
.qla_tgt
)
5816 if (vha
->fc_vport
) {
5817 qlt_release(vha
->vha_tgt
.qla_tgt
);
5821 /* free left over qfull cmds */
5822 qlt_init_term_exchange(vha
);
5824 mutex_lock(&qla_tgt_mutex
);
5825 list_del(&vha
->vha_tgt
.qla_tgt
->tgt_list_entry
);
5826 mutex_unlock(&qla_tgt_mutex
);
5828 ql_dbg(ql_dbg_tgt
, vha
, 0xe03c, "Unregistering target for host %ld(%p)",
5830 qlt_release(vha
->vha_tgt
.qla_tgt
);
5835 static void qlt_lport_dump(struct scsi_qla_host
*vha
, u64 wwpn
,
5840 pr_debug("qla2xxx HW vha->node_name: ");
5841 for (i
= 0; i
< WWN_SIZE
; i
++)
5842 pr_debug("%02x ", vha
->node_name
[i
]);
5844 pr_debug("qla2xxx HW vha->port_name: ");
5845 for (i
= 0; i
< WWN_SIZE
; i
++)
5846 pr_debug("%02x ", vha
->port_name
[i
]);
5849 pr_debug("qla2xxx passed configfs WWPN: ");
5850 put_unaligned_be64(wwpn
, b
);
5851 for (i
= 0; i
< WWN_SIZE
; i
++)
5852 pr_debug("%02x ", b
[i
]);
5857 * qla_tgt_lport_register - register lport with external module
5859 * @qla_tgt_ops: Pointer for tcm_qla2xxx qla_tgt_ops
5860 * @wwpn: Passwd FC target WWPN
5861 * @callback: lport initialization callback for tcm_qla2xxx code
5862 * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data
5864 int qlt_lport_register(void *target_lport_ptr
, u64 phys_wwpn
,
5865 u64 npiv_wwpn
, u64 npiv_wwnn
,
5866 int (*callback
)(struct scsi_qla_host
*, void *, u64
, u64
))
5868 struct qla_tgt
*tgt
;
5869 struct scsi_qla_host
*vha
;
5870 struct qla_hw_data
*ha
;
5871 struct Scsi_Host
*host
;
5872 unsigned long flags
;
5876 mutex_lock(&qla_tgt_mutex
);
5877 list_for_each_entry(tgt
, &qla_tgt_glist
, tgt_list_entry
) {
5885 if (!(host
->hostt
->supported_mode
& MODE_TARGET
))
5888 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
5889 if ((!npiv_wwpn
|| !npiv_wwnn
) && host
->active_mode
& MODE_TARGET
) {
5890 pr_debug("MODE_TARGET already active on qla2xxx(%d)\n",
5892 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
5895 if (tgt
->tgt_stop
) {
5896 pr_debug("MODE_TARGET in shutdown on qla2xxx(%d)\n",
5898 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
5901 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
5903 if (!scsi_host_get(host
)) {
5904 ql_dbg(ql_dbg_tgt
, vha
, 0xe068,
5905 "Unable to scsi_host_get() for"
5906 " qla2xxx scsi_host\n");
5909 qlt_lport_dump(vha
, phys_wwpn
, b
);
5911 if (memcmp(vha
->port_name
, b
, WWN_SIZE
)) {
5912 scsi_host_put(host
);
5915 rc
= (*callback
)(vha
, target_lport_ptr
, npiv_wwpn
, npiv_wwnn
);
5917 scsi_host_put(host
);
5919 mutex_unlock(&qla_tgt_mutex
);
5922 mutex_unlock(&qla_tgt_mutex
);
5926 EXPORT_SYMBOL(qlt_lport_register
);
5929 * qla_tgt_lport_deregister - Degister lport
5931 * @vha: Registered scsi_qla_host pointer
5933 void qlt_lport_deregister(struct scsi_qla_host
*vha
)
5935 struct qla_hw_data
*ha
= vha
->hw
;
5936 struct Scsi_Host
*sh
= vha
->host
;
5938 * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data
5940 vha
->vha_tgt
.target_lport_ptr
= NULL
;
5941 ha
->tgt
.tgt_ops
= NULL
;
5943 * Release the Scsi_Host reference for the underlying qla2xxx host
5947 EXPORT_SYMBOL(qlt_lport_deregister
);
5949 /* Must be called under HW lock */
5950 static void qlt_set_mode(struct scsi_qla_host
*vha
)
5952 struct qla_hw_data
*ha
= vha
->hw
;
5954 switch (ql2x_ini_mode
) {
5955 case QLA2XXX_INI_MODE_DISABLED
:
5956 case QLA2XXX_INI_MODE_EXCLUSIVE
:
5957 vha
->host
->active_mode
= MODE_TARGET
;
5959 case QLA2XXX_INI_MODE_ENABLED
:
5960 vha
->host
->active_mode
|= MODE_TARGET
;
5966 if (ha
->tgt
.ini_mode_force_reverse
)
5967 qla_reverse_ini_mode(vha
);
5970 /* Must be called under HW lock */
5971 static void qlt_clear_mode(struct scsi_qla_host
*vha
)
5973 struct qla_hw_data
*ha
= vha
->hw
;
5975 switch (ql2x_ini_mode
) {
5976 case QLA2XXX_INI_MODE_DISABLED
:
5977 vha
->host
->active_mode
= MODE_UNKNOWN
;
5979 case QLA2XXX_INI_MODE_EXCLUSIVE
:
5980 vha
->host
->active_mode
= MODE_INITIATOR
;
5982 case QLA2XXX_INI_MODE_ENABLED
:
5983 vha
->host
->active_mode
&= ~MODE_TARGET
;
5989 if (ha
->tgt
.ini_mode_force_reverse
)
5990 qla_reverse_ini_mode(vha
);
5994 * qla_tgt_enable_vha - NO LOCK HELD
5996 * host_reset, bring up w/ Target Mode Enabled
5999 qlt_enable_vha(struct scsi_qla_host
*vha
)
6001 struct qla_hw_data
*ha
= vha
->hw
;
6002 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
6003 unsigned long flags
;
6004 scsi_qla_host_t
*base_vha
= pci_get_drvdata(ha
->pdev
);
6007 ql_dbg(ql_dbg_tgt
, vha
, 0xe069,
6008 "Unable to locate qla_tgt pointer from"
6009 " struct qla_hw_data\n");
6014 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
6015 tgt
->tgt_stopped
= 0;
6017 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
6020 qla24xx_disable_vp(vha
);
6021 qla24xx_enable_vp(vha
);
6023 set_bit(ISP_ABORT_NEEDED
, &base_vha
->dpc_flags
);
6024 qla2xxx_wake_dpc(base_vha
);
6025 qla2x00_wait_for_hba_online(base_vha
);
6028 EXPORT_SYMBOL(qlt_enable_vha
);
6031 * qla_tgt_disable_vha - NO LOCK HELD
6033 * Disable Target Mode and reset the adapter
6035 static void qlt_disable_vha(struct scsi_qla_host
*vha
)
6037 struct qla_hw_data
*ha
= vha
->hw
;
6038 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
6039 unsigned long flags
;
6042 ql_dbg(ql_dbg_tgt
, vha
, 0xe06a,
6043 "Unable to locate qla_tgt pointer from"
6044 " struct qla_hw_data\n");
6049 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
6050 qlt_clear_mode(vha
);
6051 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
6053 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
6054 qla2xxx_wake_dpc(vha
);
6055 qla2x00_wait_for_hba_online(vha
);
6059 * Called from qla_init.c:qla24xx_vport_create() contex to setup
6060 * the target mode specific struct scsi_qla_host and struct qla_hw_data
6064 qlt_vport_create(struct scsi_qla_host
*vha
, struct qla_hw_data
*ha
)
6066 if (!qla_tgt_mode_enabled(vha
))
6069 vha
->vha_tgt
.qla_tgt
= NULL
;
6071 mutex_init(&vha
->vha_tgt
.tgt_mutex
);
6072 mutex_init(&vha
->vha_tgt
.tgt_host_action_mutex
);
6074 qlt_clear_mode(vha
);
6077 * NOTE: Currently the value is kept the same for <24xx and
6078 * >=24xx ISPs. If it is necessary to change it,
6079 * the check should be added for specific ISPs,
6080 * assigning the value appropriately.
6082 ha
->tgt
.atio_q_length
= ATIO_ENTRY_CNT_24XX
;
6084 qlt_add_target(ha
, vha
);
6088 qlt_rff_id(struct scsi_qla_host
*vha
, struct ct_sns_req
*ct_req
)
6091 * FC-4 Feature bit 0 indicates target functionality to the name server.
6093 if (qla_tgt_mode_enabled(vha
)) {
6094 if (qla_ini_mode_enabled(vha
))
6095 ct_req
->req
.rff_id
.fc4_feature
= BIT_0
| BIT_1
;
6097 ct_req
->req
.rff_id
.fc4_feature
= BIT_0
;
6098 } else if (qla_ini_mode_enabled(vha
)) {
6099 ct_req
->req
.rff_id
.fc4_feature
= BIT_1
;
6104 * qlt_init_atio_q_entries() - Initializes ATIO queue entries.
6107 * Beginning of ATIO ring has initialization control block already built
6108 * by nvram config routine.
6110 * Returns 0 on success.
6113 qlt_init_atio_q_entries(struct scsi_qla_host
*vha
)
6115 struct qla_hw_data
*ha
= vha
->hw
;
6117 struct atio_from_isp
*pkt
= (struct atio_from_isp
*)ha
->tgt
.atio_ring
;
6119 if (!qla_tgt_mode_enabled(vha
))
6122 for (cnt
= 0; cnt
< ha
->tgt
.atio_q_length
; cnt
++) {
6123 pkt
->u
.raw
.signature
= ATIO_PROCESSED
;
6130 * qlt_24xx_process_atio_queue() - Process ATIO queue entries.
6131 * @ha: SCSI driver HA context
6134 qlt_24xx_process_atio_queue(struct scsi_qla_host
*vha
)
6136 struct qla_hw_data
*ha
= vha
->hw
;
6137 struct atio_from_isp
*pkt
;
6140 if (!vha
->flags
.online
)
6143 while (ha
->tgt
.atio_ring_ptr
->signature
!= ATIO_PROCESSED
) {
6144 pkt
= (struct atio_from_isp
*)ha
->tgt
.atio_ring_ptr
;
6145 cnt
= pkt
->u
.raw
.entry_count
;
6147 qlt_24xx_atio_pkt_all_vps(vha
, (struct atio_from_isp
*)pkt
);
6149 for (i
= 0; i
< cnt
; i
++) {
6150 ha
->tgt
.atio_ring_index
++;
6151 if (ha
->tgt
.atio_ring_index
== ha
->tgt
.atio_q_length
) {
6152 ha
->tgt
.atio_ring_index
= 0;
6153 ha
->tgt
.atio_ring_ptr
= ha
->tgt
.atio_ring
;
6155 ha
->tgt
.atio_ring_ptr
++;
6157 pkt
->u
.raw
.signature
= ATIO_PROCESSED
;
6158 pkt
= (struct atio_from_isp
*)ha
->tgt
.atio_ring_ptr
;
6163 /* Adjust ring index */
6164 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha
), ha
->tgt
.atio_ring_index
);
6165 RD_REG_DWORD_RELAXED(ISP_ATIO_Q_OUT(vha
));
6169 qlt_24xx_config_rings(struct scsi_qla_host
*vha
)
6171 struct qla_hw_data
*ha
= vha
->hw
;
6172 if (!QLA_TGT_MODE_ENABLED())
6175 WRT_REG_DWORD(ISP_ATIO_Q_IN(vha
), 0);
6176 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha
), 0);
6177 RD_REG_DWORD(ISP_ATIO_Q_OUT(vha
));
6179 if (IS_ATIO_MSIX_CAPABLE(ha
)) {
6180 struct qla_msix_entry
*msix
= &ha
->msix_entries
[2];
6181 struct init_cb_24xx
*icb
= (struct init_cb_24xx
*)ha
->init_cb
;
6183 icb
->msix_atio
= cpu_to_le16(msix
->entry
);
6184 ql_dbg(ql_dbg_init
, vha
, 0xf072,
6185 "Registering ICB vector 0x%x for atio que.\n",
6191 qlt_24xx_config_nvram_stage1(struct scsi_qla_host
*vha
, struct nvram_24xx
*nv
)
6193 struct qla_hw_data
*ha
= vha
->hw
;
6195 if (qla_tgt_mode_enabled(vha
)) {
6196 if (!ha
->tgt
.saved_set
) {
6197 /* We save only once */
6198 ha
->tgt
.saved_exchange_count
= nv
->exchange_count
;
6199 ha
->tgt
.saved_firmware_options_1
=
6200 nv
->firmware_options_1
;
6201 ha
->tgt
.saved_firmware_options_2
=
6202 nv
->firmware_options_2
;
6203 ha
->tgt
.saved_firmware_options_3
=
6204 nv
->firmware_options_3
;
6205 ha
->tgt
.saved_set
= 1;
6208 nv
->exchange_count
= cpu_to_le16(0xFFFF);
6210 /* Enable target mode */
6211 nv
->firmware_options_1
|= cpu_to_le32(BIT_4
);
6213 /* Disable ini mode, if requested */
6214 if (!qla_ini_mode_enabled(vha
))
6215 nv
->firmware_options_1
|= cpu_to_le32(BIT_5
);
6217 /* Disable Full Login after LIP */
6218 nv
->firmware_options_1
&= cpu_to_le32(~BIT_13
);
6219 /* Enable initial LIP */
6220 nv
->firmware_options_1
&= cpu_to_le32(~BIT_9
);
6221 if (ql2xtgt_tape_enable
)
6222 /* Enable FC Tape support */
6223 nv
->firmware_options_2
|= cpu_to_le32(BIT_12
);
6225 /* Disable FC Tape support */
6226 nv
->firmware_options_2
&= cpu_to_le32(~BIT_12
);
6228 /* Disable Full Login after LIP */
6229 nv
->host_p
&= cpu_to_le32(~BIT_10
);
6230 /* Enable target PRLI control */
6231 nv
->firmware_options_2
|= cpu_to_le32(BIT_14
);
6233 if (ha
->tgt
.saved_set
) {
6234 nv
->exchange_count
= ha
->tgt
.saved_exchange_count
;
6235 nv
->firmware_options_1
=
6236 ha
->tgt
.saved_firmware_options_1
;
6237 nv
->firmware_options_2
=
6238 ha
->tgt
.saved_firmware_options_2
;
6239 nv
->firmware_options_3
=
6240 ha
->tgt
.saved_firmware_options_3
;
6245 /* out-of-order frames reassembly */
6246 nv
->firmware_options_3
|= BIT_6
|BIT_9
;
6248 if (ha
->tgt
.enable_class_2
) {
6249 if (vha
->flags
.init_done
)
6250 fc_host_supported_classes(vha
->host
) =
6251 FC_COS_CLASS2
| FC_COS_CLASS3
;
6253 nv
->firmware_options_2
|= cpu_to_le32(BIT_8
);
6255 if (vha
->flags
.init_done
)
6256 fc_host_supported_classes(vha
->host
) = FC_COS_CLASS3
;
6258 nv
->firmware_options_2
&= ~cpu_to_le32(BIT_8
);
6263 qlt_24xx_config_nvram_stage2(struct scsi_qla_host
*vha
,
6264 struct init_cb_24xx
*icb
)
6266 struct qla_hw_data
*ha
= vha
->hw
;
6268 if (ha
->tgt
.node_name_set
) {
6269 memcpy(icb
->node_name
, ha
->tgt
.tgt_node_name
, WWN_SIZE
);
6270 icb
->firmware_options_1
|= cpu_to_le32(BIT_14
);
6275 qlt_81xx_config_nvram_stage1(struct scsi_qla_host
*vha
, struct nvram_81xx
*nv
)
6277 struct qla_hw_data
*ha
= vha
->hw
;
6279 if (!QLA_TGT_MODE_ENABLED())
6282 if (qla_tgt_mode_enabled(vha
)) {
6283 if (!ha
->tgt
.saved_set
) {
6284 /* We save only once */
6285 ha
->tgt
.saved_exchange_count
= nv
->exchange_count
;
6286 ha
->tgt
.saved_firmware_options_1
=
6287 nv
->firmware_options_1
;
6288 ha
->tgt
.saved_firmware_options_2
=
6289 nv
->firmware_options_2
;
6290 ha
->tgt
.saved_firmware_options_3
=
6291 nv
->firmware_options_3
;
6292 ha
->tgt
.saved_set
= 1;
6295 nv
->exchange_count
= cpu_to_le16(0xFFFF);
6297 /* Enable target mode */
6298 nv
->firmware_options_1
|= cpu_to_le32(BIT_4
);
6300 /* Disable ini mode, if requested */
6301 if (!qla_ini_mode_enabled(vha
))
6302 nv
->firmware_options_1
|= cpu_to_le32(BIT_5
);
6304 /* Disable Full Login after LIP */
6305 nv
->firmware_options_1
&= cpu_to_le32(~BIT_13
);
6306 /* Enable initial LIP */
6307 nv
->firmware_options_1
&= cpu_to_le32(~BIT_9
);
6308 if (ql2xtgt_tape_enable
)
6309 /* Enable FC tape support */
6310 nv
->firmware_options_2
|= cpu_to_le32(BIT_12
);
6312 /* Disable FC tape support */
6313 nv
->firmware_options_2
&= cpu_to_le32(~BIT_12
);
6315 /* Disable Full Login after LIP */
6316 nv
->host_p
&= cpu_to_le32(~BIT_10
);
6317 /* Enable target PRLI control */
6318 nv
->firmware_options_2
|= cpu_to_le32(BIT_14
);
6320 if (ha
->tgt
.saved_set
) {
6321 nv
->exchange_count
= ha
->tgt
.saved_exchange_count
;
6322 nv
->firmware_options_1
=
6323 ha
->tgt
.saved_firmware_options_1
;
6324 nv
->firmware_options_2
=
6325 ha
->tgt
.saved_firmware_options_2
;
6326 nv
->firmware_options_3
=
6327 ha
->tgt
.saved_firmware_options_3
;
6332 /* out-of-order frames reassembly */
6333 nv
->firmware_options_3
|= BIT_6
|BIT_9
;
6335 if (ha
->tgt
.enable_class_2
) {
6336 if (vha
->flags
.init_done
)
6337 fc_host_supported_classes(vha
->host
) =
6338 FC_COS_CLASS2
| FC_COS_CLASS3
;
6340 nv
->firmware_options_2
|= cpu_to_le32(BIT_8
);
6342 if (vha
->flags
.init_done
)
6343 fc_host_supported_classes(vha
->host
) = FC_COS_CLASS3
;
6345 nv
->firmware_options_2
&= ~cpu_to_le32(BIT_8
);
6350 qlt_81xx_config_nvram_stage2(struct scsi_qla_host
*vha
,
6351 struct init_cb_81xx
*icb
)
6353 struct qla_hw_data
*ha
= vha
->hw
;
6355 if (!QLA_TGT_MODE_ENABLED())
6358 if (ha
->tgt
.node_name_set
) {
6359 memcpy(icb
->node_name
, ha
->tgt
.tgt_node_name
, WWN_SIZE
);
6360 icb
->firmware_options_1
|= cpu_to_le32(BIT_14
);
6365 qlt_83xx_iospace_config(struct qla_hw_data
*ha
)
6367 if (!QLA_TGT_MODE_ENABLED())
6370 ha
->msix_count
+= 1; /* For ATIO Q */
6374 qlt_24xx_process_response_error(struct scsi_qla_host
*vha
,
6375 struct sts_entry_24xx
*pkt
)
6377 switch (pkt
->entry_type
) {
6378 case ABTS_RECV_24XX
:
6379 case ABTS_RESP_24XX
:
6381 case NOTIFY_ACK_TYPE
:
6390 qlt_modify_vp_config(struct scsi_qla_host
*vha
,
6391 struct vp_config_entry_24xx
*vpmod
)
6393 if (qla_tgt_mode_enabled(vha
))
6394 vpmod
->options_idx1
&= ~BIT_5
;
6395 /* Disable ini mode, if requested */
6396 if (!qla_ini_mode_enabled(vha
))
6397 vpmod
->options_idx1
&= ~BIT_4
;
6401 qlt_probe_one_stage1(struct scsi_qla_host
*base_vha
, struct qla_hw_data
*ha
)
6403 if (!QLA_TGT_MODE_ENABLED())
6406 if (ha
->mqenable
|| IS_QLA83XX(ha
) || IS_QLA27XX(ha
)) {
6407 ISP_ATIO_Q_IN(base_vha
) = &ha
->mqiobase
->isp25mq
.atio_q_in
;
6408 ISP_ATIO_Q_OUT(base_vha
) = &ha
->mqiobase
->isp25mq
.atio_q_out
;
6410 ISP_ATIO_Q_IN(base_vha
) = &ha
->iobase
->isp24
.atio_q_in
;
6411 ISP_ATIO_Q_OUT(base_vha
) = &ha
->iobase
->isp24
.atio_q_out
;
6414 mutex_init(&base_vha
->vha_tgt
.tgt_mutex
);
6415 mutex_init(&base_vha
->vha_tgt
.tgt_host_action_mutex
);
6416 qlt_clear_mode(base_vha
);
6420 qla83xx_msix_atio_q(int irq
, void *dev_id
)
6422 struct rsp_que
*rsp
;
6423 scsi_qla_host_t
*vha
;
6424 struct qla_hw_data
*ha
;
6425 unsigned long flags
;
6427 rsp
= (struct rsp_que
*) dev_id
;
6429 vha
= pci_get_drvdata(ha
->pdev
);
6431 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
6433 qlt_24xx_process_atio_queue(vha
);
6434 qla24xx_process_response_queue(vha
, rsp
);
6436 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
6442 qlt_mem_alloc(struct qla_hw_data
*ha
)
6444 if (!QLA_TGT_MODE_ENABLED())
6447 ha
->tgt
.tgt_vp_map
= kzalloc(sizeof(struct qla_tgt_vp_map
) *
6448 MAX_MULTI_ID_FABRIC
, GFP_KERNEL
);
6449 if (!ha
->tgt
.tgt_vp_map
)
6452 ha
->tgt
.atio_ring
= dma_alloc_coherent(&ha
->pdev
->dev
,
6453 (ha
->tgt
.atio_q_length
+ 1) * sizeof(struct atio_from_isp
),
6454 &ha
->tgt
.atio_dma
, GFP_KERNEL
);
6455 if (!ha
->tgt
.atio_ring
) {
6456 kfree(ha
->tgt
.tgt_vp_map
);
6463 qlt_mem_free(struct qla_hw_data
*ha
)
6465 if (!QLA_TGT_MODE_ENABLED())
6468 if (ha
->tgt
.atio_ring
) {
6469 dma_free_coherent(&ha
->pdev
->dev
, (ha
->tgt
.atio_q_length
+ 1) *
6470 sizeof(struct atio_from_isp
), ha
->tgt
.atio_ring
,
6473 kfree(ha
->tgt
.tgt_vp_map
);
6476 /* vport_slock to be held by the caller */
6478 qlt_update_vp_map(struct scsi_qla_host
*vha
, int cmd
)
6480 if (!QLA_TGT_MODE_ENABLED())
6485 vha
->hw
->tgt
.tgt_vp_map
[vha
->vp_idx
].vha
= vha
;
6488 vha
->hw
->tgt
.tgt_vp_map
[vha
->d_id
.b
.al_pa
].idx
= vha
->vp_idx
;
6491 vha
->hw
->tgt
.tgt_vp_map
[vha
->vp_idx
].vha
= NULL
;
6494 vha
->hw
->tgt
.tgt_vp_map
[vha
->d_id
.b
.al_pa
].idx
= 0;
6499 static int __init
qlt_parse_ini_mode(void)
6501 if (strcasecmp(qlini_mode
, QLA2XXX_INI_MODE_STR_EXCLUSIVE
) == 0)
6502 ql2x_ini_mode
= QLA2XXX_INI_MODE_EXCLUSIVE
;
6503 else if (strcasecmp(qlini_mode
, QLA2XXX_INI_MODE_STR_DISABLED
) == 0)
6504 ql2x_ini_mode
= QLA2XXX_INI_MODE_DISABLED
;
6505 else if (strcasecmp(qlini_mode
, QLA2XXX_INI_MODE_STR_ENABLED
) == 0)
6506 ql2x_ini_mode
= QLA2XXX_INI_MODE_ENABLED
;
6513 int __init
qlt_init(void)
6517 if (!qlt_parse_ini_mode()) {
6518 ql_log(ql_log_fatal
, NULL
, 0xe06b,
6519 "qlt_parse_ini_mode() failed\n");
6523 if (!QLA_TGT_MODE_ENABLED())
6526 qla_tgt_mgmt_cmd_cachep
= kmem_cache_create("qla_tgt_mgmt_cmd_cachep",
6527 sizeof(struct qla_tgt_mgmt_cmd
), __alignof__(struct
6528 qla_tgt_mgmt_cmd
), 0, NULL
);
6529 if (!qla_tgt_mgmt_cmd_cachep
) {
6530 ql_log(ql_log_fatal
, NULL
, 0xe06d,
6531 "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n");
6535 qla_tgt_mgmt_cmd_mempool
= mempool_create(25, mempool_alloc_slab
,
6536 mempool_free_slab
, qla_tgt_mgmt_cmd_cachep
);
6537 if (!qla_tgt_mgmt_cmd_mempool
) {
6538 ql_log(ql_log_fatal
, NULL
, 0xe06e,
6539 "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n");
6541 goto out_mgmt_cmd_cachep
;
6544 qla_tgt_wq
= alloc_workqueue("qla_tgt_wq", 0, 0);
6546 ql_log(ql_log_fatal
, NULL
, 0xe06f,
6547 "alloc_workqueue for qla_tgt_wq failed\n");
6549 goto out_cmd_mempool
;
6552 * Return 1 to signal that initiator-mode is being disabled
6554 return (ql2x_ini_mode
== QLA2XXX_INI_MODE_DISABLED
) ? 1 : 0;
6557 mempool_destroy(qla_tgt_mgmt_cmd_mempool
);
6558 out_mgmt_cmd_cachep
:
6559 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep
);
6565 if (!QLA_TGT_MODE_ENABLED())
6568 destroy_workqueue(qla_tgt_wq
);
6569 mempool_destroy(qla_tgt_mgmt_cmd_mempool
);
6570 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep
);