2 * qla_target.c SCSI LLD infrastructure for QLogic 22xx/23xx/24xx/25xx
4 * based on qla2x00t.c code:
6 * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net>
7 * Copyright (C) 2004 - 2005 Leonid Stoljar
8 * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
9 * Copyright (C) 2006 - 2010 ID7 Ltd.
11 * Forward port and refactoring to modern qla2xxx and target/configfs
13 * Copyright (C) 2010-2013 Nicholas A. Bellinger <nab@kernel.org>
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation, version 2
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
26 #include <linux/module.h>
27 #include <linux/init.h>
28 #include <linux/types.h>
29 #include <linux/blkdev.h>
30 #include <linux/interrupt.h>
31 #include <linux/pci.h>
32 #include <linux/delay.h>
33 #include <linux/list.h>
34 #include <linux/workqueue.h>
35 #include <asm/unaligned.h>
36 #include <scsi/scsi.h>
37 #include <scsi/scsi_host.h>
38 #include <scsi/scsi_tcq.h>
39 #include <target/target_core_base.h>
40 #include <target/target_core_fabric.h>
43 #include "qla_target.h"
45 static char *qlini_mode
= QLA2XXX_INI_MODE_STR_ENABLED
;
46 module_param(qlini_mode
, charp
, S_IRUGO
);
47 MODULE_PARM_DESC(qlini_mode
,
48 "Determines when initiator mode will be enabled. Possible values: "
49 "\"exclusive\" - initiator mode will be enabled on load, "
50 "disabled on enabling target mode and then on disabling target mode "
52 "\"disabled\" - initiator mode will never be enabled; "
53 "\"enabled\" (default) - initiator mode will always stay enabled.");
55 int ql2x_ini_mode
= QLA2XXX_INI_MODE_EXCLUSIVE
;
58 * From scsi/fc/fc_fcp.h
60 enum fcp_resp_rsp_codes
{
62 FCP_DATA_LEN_INVALID
= 1,
63 FCP_CMND_FIELDS_INVALID
= 2,
64 FCP_DATA_PARAM_MISMATCH
= 3,
67 FCP_TMF_INVALID_LUN
= 9,
71 * fc_pri_ta from scsi/fc/fc_fcp.h
73 #define FCP_PTA_SIMPLE 0 /* simple task attribute */
74 #define FCP_PTA_HEADQ 1 /* head of queue task attribute */
75 #define FCP_PTA_ORDERED 2 /* ordered task attribute */
76 #define FCP_PTA_ACA 4 /* auto. contingent allegiance */
77 #define FCP_PTA_MASK 7 /* mask for task attribute field */
78 #define FCP_PRI_SHIFT 3 /* priority field starts in bit 3 */
79 #define FCP_PRI_RESVD_MASK 0x80 /* reserved bits in priority field */
82 * This driver calls qla2x00_alloc_iocbs() and qla2x00_issue_marker(), which
83 * must be called under HW lock and could unlock/lock it inside.
84 * It isn't an issue, since in the current implementation on the time when
85 * those functions are called:
87 * - Either context is IRQ and only IRQ handler can modify HW data,
88 * including rings related fields,
90 * - Or access to target mode variables from struct qla_tgt doesn't
91 * cross those functions boundaries, except tgt_stop, which
92 * additionally protected by irq_cmd_count.
94 /* Predefs for callbacks handed to qla2xxx LLD */
95 static void qlt_24xx_atio_pkt(struct scsi_qla_host
*ha
,
96 struct atio_from_isp
*pkt
);
97 static void qlt_response_pkt(struct scsi_qla_host
*ha
, response_t
*pkt
);
98 static int qlt_issue_task_mgmt(struct qla_tgt_sess
*sess
, uint32_t lun
,
99 int fn
, void *iocb
, int flags
);
100 static void qlt_send_term_exchange(struct scsi_qla_host
*ha
, struct qla_tgt_cmd
101 *cmd
, struct atio_from_isp
*atio
, int ha_locked
);
102 static void qlt_reject_free_srr_imm(struct scsi_qla_host
*ha
,
103 struct qla_tgt_srr_imm
*imm
, int ha_lock
);
107 static struct kmem_cache
*qla_tgt_cmd_cachep
;
108 static struct kmem_cache
*qla_tgt_mgmt_cmd_cachep
;
109 static mempool_t
*qla_tgt_mgmt_cmd_mempool
;
110 static struct workqueue_struct
*qla_tgt_wq
;
111 static DEFINE_MUTEX(qla_tgt_mutex
);
112 static LIST_HEAD(qla_tgt_glist
);
114 /* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
115 static struct qla_tgt_sess
*qlt_find_sess_by_port_name(
117 const uint8_t *port_name
)
119 struct qla_tgt_sess
*sess
;
121 list_for_each_entry(sess
, &tgt
->sess_list
, sess_list_entry
) {
122 if (!memcmp(sess
->port_name
, port_name
, WWN_SIZE
))
129 /* Might release hw lock, then reaquire!! */
130 static inline int qlt_issue_marker(struct scsi_qla_host
*vha
, int vha_locked
)
132 /* Send marker if required */
133 if (unlikely(vha
->marker_needed
!= 0)) {
134 int rc
= qla2x00_issue_marker(vha
, vha_locked
);
135 if (rc
!= QLA_SUCCESS
) {
136 ql_dbg(ql_dbg_tgt
, vha
, 0xe03d,
137 "qla_target(%d): issue_marker() failed\n",
146 struct scsi_qla_host
*qlt_find_host_by_d_id(struct scsi_qla_host
*vha
,
149 struct qla_hw_data
*ha
= vha
->hw
;
152 if ((vha
->d_id
.b
.area
!= d_id
[1]) || (vha
->d_id
.b
.domain
!= d_id
[0]))
155 if (vha
->d_id
.b
.al_pa
== d_id
[2])
158 BUG_ON(ha
->tgt
.tgt_vp_map
== NULL
);
159 vp_idx
= ha
->tgt
.tgt_vp_map
[d_id
[2]].idx
;
160 if (likely(test_bit(vp_idx
, ha
->vp_idx_map
)))
161 return ha
->tgt
.tgt_vp_map
[vp_idx
].vha
;
167 struct scsi_qla_host
*qlt_find_host_by_vp_idx(struct scsi_qla_host
*vha
,
170 struct qla_hw_data
*ha
= vha
->hw
;
172 if (vha
->vp_idx
== vp_idx
)
175 BUG_ON(ha
->tgt
.tgt_vp_map
== NULL
);
176 if (likely(test_bit(vp_idx
, ha
->vp_idx_map
)))
177 return ha
->tgt
.tgt_vp_map
[vp_idx
].vha
;
182 void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host
*vha
,
183 struct atio_from_isp
*atio
)
185 switch (atio
->u
.raw
.entry_type
) {
188 struct scsi_qla_host
*host
= qlt_find_host_by_d_id(vha
,
189 atio
->u
.isp24
.fcp_hdr
.d_id
);
190 if (unlikely(NULL
== host
)) {
191 ql_dbg(ql_dbg_tgt
, vha
, 0xe03e,
192 "qla_target(%d): Received ATIO_TYPE7 "
193 "with unknown d_id %x:%x:%x\n", vha
->vp_idx
,
194 atio
->u
.isp24
.fcp_hdr
.d_id
[0],
195 atio
->u
.isp24
.fcp_hdr
.d_id
[1],
196 atio
->u
.isp24
.fcp_hdr
.d_id
[2]);
199 qlt_24xx_atio_pkt(host
, atio
);
203 case IMMED_NOTIFY_TYPE
:
205 struct scsi_qla_host
*host
= vha
;
206 struct imm_ntfy_from_isp
*entry
=
207 (struct imm_ntfy_from_isp
*)atio
;
209 if ((entry
->u
.isp24
.vp_index
!= 0xFF) &&
210 (entry
->u
.isp24
.nport_handle
!= 0xFFFF)) {
211 host
= qlt_find_host_by_vp_idx(vha
,
212 entry
->u
.isp24
.vp_index
);
213 if (unlikely(!host
)) {
214 ql_dbg(ql_dbg_tgt
, vha
, 0xe03f,
215 "qla_target(%d): Received "
216 "ATIO (IMMED_NOTIFY_TYPE) "
217 "with unknown vp_index %d\n",
218 vha
->vp_idx
, entry
->u
.isp24
.vp_index
);
222 qlt_24xx_atio_pkt(host
, atio
);
227 ql_dbg(ql_dbg_tgt
, vha
, 0xe040,
228 "qla_target(%d): Received unknown ATIO atio "
229 "type %x\n", vha
->vp_idx
, atio
->u
.raw
.entry_type
);
236 void qlt_response_pkt_all_vps(struct scsi_qla_host
*vha
, response_t
*pkt
)
238 switch (pkt
->entry_type
) {
241 struct ctio7_from_24xx
*entry
= (struct ctio7_from_24xx
*)pkt
;
242 struct scsi_qla_host
*host
= qlt_find_host_by_vp_idx(vha
,
244 if (unlikely(!host
)) {
245 ql_dbg(ql_dbg_tgt
, vha
, 0xe041,
246 "qla_target(%d): Response pkt (CTIO_TYPE7) "
247 "received, with unknown vp_index %d\n",
248 vha
->vp_idx
, entry
->vp_index
);
251 qlt_response_pkt(host
, pkt
);
255 case IMMED_NOTIFY_TYPE
:
257 struct scsi_qla_host
*host
= vha
;
258 struct imm_ntfy_from_isp
*entry
=
259 (struct imm_ntfy_from_isp
*)pkt
;
261 host
= qlt_find_host_by_vp_idx(vha
, entry
->u
.isp24
.vp_index
);
262 if (unlikely(!host
)) {
263 ql_dbg(ql_dbg_tgt
, vha
, 0xe042,
264 "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) "
265 "received, with unknown vp_index %d\n",
266 vha
->vp_idx
, entry
->u
.isp24
.vp_index
);
269 qlt_response_pkt(host
, pkt
);
273 case NOTIFY_ACK_TYPE
:
275 struct scsi_qla_host
*host
= vha
;
276 struct nack_to_isp
*entry
= (struct nack_to_isp
*)pkt
;
278 if (0xFF != entry
->u
.isp24
.vp_index
) {
279 host
= qlt_find_host_by_vp_idx(vha
,
280 entry
->u
.isp24
.vp_index
);
281 if (unlikely(!host
)) {
282 ql_dbg(ql_dbg_tgt
, vha
, 0xe043,
283 "qla_target(%d): Response "
284 "pkt (NOTIFY_ACK_TYPE) "
285 "received, with unknown "
286 "vp_index %d\n", vha
->vp_idx
,
287 entry
->u
.isp24
.vp_index
);
291 qlt_response_pkt(host
, pkt
);
297 struct abts_recv_from_24xx
*entry
=
298 (struct abts_recv_from_24xx
*)pkt
;
299 struct scsi_qla_host
*host
= qlt_find_host_by_vp_idx(vha
,
301 if (unlikely(!host
)) {
302 ql_dbg(ql_dbg_tgt
, vha
, 0xe044,
303 "qla_target(%d): Response pkt "
304 "(ABTS_RECV_24XX) received, with unknown "
305 "vp_index %d\n", vha
->vp_idx
, entry
->vp_index
);
308 qlt_response_pkt(host
, pkt
);
314 struct abts_resp_to_24xx
*entry
=
315 (struct abts_resp_to_24xx
*)pkt
;
316 struct scsi_qla_host
*host
= qlt_find_host_by_vp_idx(vha
,
318 if (unlikely(!host
)) {
319 ql_dbg(ql_dbg_tgt
, vha
, 0xe045,
320 "qla_target(%d): Response pkt "
321 "(ABTS_RECV_24XX) received, with unknown "
322 "vp_index %d\n", vha
->vp_idx
, entry
->vp_index
);
325 qlt_response_pkt(host
, pkt
);
330 qlt_response_pkt(vha
, pkt
);
336 static void qlt_free_session_done(struct work_struct
*work
)
338 struct qla_tgt_sess
*sess
= container_of(work
, struct qla_tgt_sess
,
340 struct qla_tgt
*tgt
= sess
->tgt
;
341 struct scsi_qla_host
*vha
= sess
->vha
;
342 struct qla_hw_data
*ha
= vha
->hw
;
346 * Release the target session for FC Nexus from fabric module code.
348 if (sess
->se_sess
!= NULL
)
349 ha
->tgt
.tgt_ops
->free_session(sess
);
351 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf001,
352 "Unregistration of sess %p finished\n", sess
);
356 * We need to protect against race, when tgt is freed before or
360 if (tgt
->sess_count
== 0)
361 wake_up_all(&tgt
->waitQ
);
364 /* ha->hardware_lock supposed to be held on entry */
365 void qlt_unreg_sess(struct qla_tgt_sess
*sess
)
367 struct scsi_qla_host
*vha
= sess
->vha
;
369 vha
->hw
->tgt
.tgt_ops
->clear_nacl_from_fcport_map(sess
);
371 list_del(&sess
->sess_list_entry
);
373 list_del(&sess
->del_list_entry
);
375 INIT_WORK(&sess
->free_work
, qlt_free_session_done
);
376 schedule_work(&sess
->free_work
);
378 EXPORT_SYMBOL(qlt_unreg_sess
);
380 /* ha->hardware_lock supposed to be held on entry */
381 static int qlt_reset(struct scsi_qla_host
*vha
, void *iocb
, int mcmd
)
383 struct qla_hw_data
*ha
= vha
->hw
;
384 struct qla_tgt_sess
*sess
= NULL
;
385 uint32_t unpacked_lun
, lun
= 0;
388 struct imm_ntfy_from_isp
*n
= (struct imm_ntfy_from_isp
*)iocb
;
389 struct atio_from_isp
*a
= (struct atio_from_isp
*)iocb
;
391 loop_id
= le16_to_cpu(n
->u
.isp24
.nport_handle
);
392 if (loop_id
== 0xFFFF) {
393 #if 0 /* FIXME: Re-enable Global event handling.. */
395 atomic_inc(&ha
->tgt
.qla_tgt
->tgt_global_resets_count
);
396 qlt_clear_tgt_db(ha
->tgt
.qla_tgt
, 1);
397 if (!list_empty(&ha
->tgt
.qla_tgt
->sess_list
)) {
398 sess
= list_entry(ha
->tgt
.qla_tgt
->sess_list
.next
,
399 typeof(*sess
), sess_list_entry
);
401 case QLA_TGT_NEXUS_LOSS_SESS
:
402 mcmd
= QLA_TGT_NEXUS_LOSS
;
404 case QLA_TGT_ABORT_ALL_SESS
:
405 mcmd
= QLA_TGT_ABORT_ALL
;
407 case QLA_TGT_NEXUS_LOSS
:
408 case QLA_TGT_ABORT_ALL
:
411 ql_dbg(ql_dbg_tgt
, vha
, 0xe046,
412 "qla_target(%d): Not allowed "
413 "command %x in %s", vha
->vp_idx
,
422 sess
= ha
->tgt
.tgt_ops
->find_sess_by_loop_id(vha
, loop_id
);
425 ql_dbg(ql_dbg_tgt
, vha
, 0xe000,
426 "Using sess for qla_tgt_reset: %p\n", sess
);
432 ql_dbg(ql_dbg_tgt
, vha
, 0xe047,
433 "scsi(%ld): resetting (session %p from port %8phC mcmd %x, "
434 "loop_id %d)\n", vha
->host_no
, sess
, sess
->port_name
,
437 lun
= a
->u
.isp24
.fcp_cmnd
.lun
;
438 unpacked_lun
= scsilun_to_int((struct scsi_lun
*)&lun
);
440 return qlt_issue_task_mgmt(sess
, unpacked_lun
, mcmd
,
441 iocb
, QLA24XX_MGMT_SEND_NACK
);
444 /* ha->hardware_lock supposed to be held on entry */
445 static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess
*sess
,
448 struct qla_tgt
*tgt
= sess
->tgt
;
449 uint32_t dev_loss_tmo
= tgt
->ha
->port_down_retry_count
+ 5;
454 ql_dbg(ql_dbg_tgt
, sess
->vha
, 0xe001,
455 "Scheduling sess %p for deletion\n", sess
);
456 list_add_tail(&sess
->del_list_entry
, &tgt
->del_sess_list
);
462 sess
->expires
= jiffies
+ dev_loss_tmo
* HZ
;
464 ql_dbg(ql_dbg_tgt
, sess
->vha
, 0xe048,
465 "qla_target(%d): session for port %8phC (loop ID %d) scheduled for "
466 "deletion in %u secs (expires: %lu) immed: %d\n",
467 sess
->vha
->vp_idx
, sess
->port_name
, sess
->loop_id
, dev_loss_tmo
,
468 sess
->expires
, immediate
);
471 schedule_delayed_work(&tgt
->sess_del_work
, 0);
473 schedule_delayed_work(&tgt
->sess_del_work
,
474 sess
->expires
- jiffies
);
477 /* ha->hardware_lock supposed to be held on entry */
478 static void qlt_clear_tgt_db(struct qla_tgt
*tgt
, bool local_only
)
480 struct qla_tgt_sess
*sess
;
482 list_for_each_entry(sess
, &tgt
->sess_list
, sess_list_entry
)
483 qlt_schedule_sess_for_deletion(sess
, true);
485 /* At this point tgt could be already dead */
488 static int qla24xx_get_loop_id(struct scsi_qla_host
*vha
, const uint8_t *s_id
,
491 struct qla_hw_data
*ha
= vha
->hw
;
492 dma_addr_t gid_list_dma
;
493 struct gid_list_info
*gid_list
;
498 gid_list
= dma_alloc_coherent(&ha
->pdev
->dev
, qla2x00_gid_list_size(ha
),
499 &gid_list_dma
, GFP_KERNEL
);
501 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf044,
502 "qla_target(%d): DMA Alloc failed of %u\n",
503 vha
->vp_idx
, qla2x00_gid_list_size(ha
));
507 /* Get list of logged in devices */
508 rc
= qla2x00_get_id_list(vha
, gid_list
, gid_list_dma
, &entries
);
509 if (rc
!= QLA_SUCCESS
) {
510 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf045,
511 "qla_target(%d): get_id_list() failed: %x\n",
514 goto out_free_id_list
;
517 id_iter
= (char *)gid_list
;
519 for (i
= 0; i
< entries
; i
++) {
520 struct gid_list_info
*gid
= (struct gid_list_info
*)id_iter
;
521 if ((gid
->al_pa
== s_id
[2]) &&
522 (gid
->area
== s_id
[1]) &&
523 (gid
->domain
== s_id
[0])) {
524 *loop_id
= le16_to_cpu(gid
->loop_id
);
528 id_iter
+= ha
->gid_list_info_size
;
532 dma_free_coherent(&ha
->pdev
->dev
, qla2x00_gid_list_size(ha
),
533 gid_list
, gid_list_dma
);
537 /* ha->hardware_lock supposed to be held on entry */
538 static void qlt_undelete_sess(struct qla_tgt_sess
*sess
)
540 BUG_ON(!sess
->deleted
);
542 list_del(&sess
->del_list_entry
);
546 static void qlt_del_sess_work_fn(struct delayed_work
*work
)
548 struct qla_tgt
*tgt
= container_of(work
, struct qla_tgt
,
550 struct scsi_qla_host
*vha
= tgt
->vha
;
551 struct qla_hw_data
*ha
= vha
->hw
;
552 struct qla_tgt_sess
*sess
;
553 unsigned long flags
, elapsed
;
555 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
556 while (!list_empty(&tgt
->del_sess_list
)) {
557 sess
= list_entry(tgt
->del_sess_list
.next
, typeof(*sess
),
560 if (time_after_eq(elapsed
, sess
->expires
)) {
561 qlt_undelete_sess(sess
);
563 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf004,
564 "Timeout: sess %p about to be deleted\n",
566 ha
->tgt
.tgt_ops
->shutdown_sess(sess
);
567 ha
->tgt
.tgt_ops
->put_sess(sess
);
569 schedule_delayed_work(&tgt
->sess_del_work
,
570 sess
->expires
- elapsed
);
574 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
578 * Adds an extra ref to allow to drop hw lock after adding sess to the list.
579 * Caller must put it.
581 static struct qla_tgt_sess
*qlt_create_sess(
582 struct scsi_qla_host
*vha
,
586 struct qla_hw_data
*ha
= vha
->hw
;
587 struct qla_tgt_sess
*sess
;
589 unsigned char be_sid
[3];
591 /* Check to avoid double sessions */
592 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
593 list_for_each_entry(sess
, &vha
->vha_tgt
.qla_tgt
->sess_list
,
595 if (!memcmp(sess
->port_name
, fcport
->port_name
, WWN_SIZE
)) {
596 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf005,
597 "Double sess %p found (s_id %x:%x:%x, "
598 "loop_id %d), updating to d_id %x:%x:%x, "
599 "loop_id %d", sess
, sess
->s_id
.b
.domain
,
600 sess
->s_id
.b
.al_pa
, sess
->s_id
.b
.area
,
601 sess
->loop_id
, fcport
->d_id
.b
.domain
,
602 fcport
->d_id
.b
.al_pa
, fcport
->d_id
.b
.area
,
606 qlt_undelete_sess(sess
);
608 kref_get(&sess
->se_sess
->sess_kref
);
609 ha
->tgt
.tgt_ops
->update_sess(sess
, fcport
->d_id
, fcport
->loop_id
,
610 (fcport
->flags
& FCF_CONF_COMP_SUPPORTED
));
612 if (sess
->local
&& !local
)
614 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
619 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
621 sess
= kzalloc(sizeof(*sess
), GFP_KERNEL
);
623 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf04a,
624 "qla_target(%u): session allocation failed, all commands "
625 "from port %8phC will be refused", vha
->vp_idx
,
630 sess
->tgt
= vha
->vha_tgt
.qla_tgt
;
632 sess
->s_id
= fcport
->d_id
;
633 sess
->loop_id
= fcport
->loop_id
;
636 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf006,
637 "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n",
638 sess
, vha
->vha_tgt
.qla_tgt
);
640 be_sid
[0] = sess
->s_id
.b
.domain
;
641 be_sid
[1] = sess
->s_id
.b
.area
;
642 be_sid
[2] = sess
->s_id
.b
.al_pa
;
644 * Determine if this fc_port->port_name is allowed to access
645 * target mode using explict NodeACLs+MappedLUNs, or using
646 * TPG demo mode. If this is successful a target mode FC nexus
649 if (ha
->tgt
.tgt_ops
->check_initiator_node_acl(vha
,
650 &fcport
->port_name
[0], sess
, &be_sid
[0], fcport
->loop_id
) < 0) {
655 * Take an extra reference to ->sess_kref here to handle qla_tgt_sess
656 * access across ->hardware_lock reaquire.
658 kref_get(&sess
->se_sess
->sess_kref
);
660 sess
->conf_compl_supported
= (fcport
->flags
& FCF_CONF_COMP_SUPPORTED
);
661 BUILD_BUG_ON(sizeof(sess
->port_name
) != sizeof(fcport
->port_name
));
662 memcpy(sess
->port_name
, fcport
->port_name
, sizeof(sess
->port_name
));
664 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
665 list_add_tail(&sess
->sess_list_entry
, &vha
->vha_tgt
.qla_tgt
->sess_list
);
666 vha
->vha_tgt
.qla_tgt
->sess_count
++;
667 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
669 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf04b,
670 "qla_target(%d): %ssession for wwn %8phC (loop_id %d, "
671 "s_id %x:%x:%x, confirmed completion %ssupported) added\n",
672 vha
->vp_idx
, local
? "local " : "", fcport
->port_name
,
673 fcport
->loop_id
, sess
->s_id
.b
.domain
, sess
->s_id
.b
.area
,
674 sess
->s_id
.b
.al_pa
, sess
->conf_compl_supported
? "" : "not ");
680 * Called from drivers/scsi/qla2xxx/qla_init.c:qla2x00_reg_remote_port()
682 void qlt_fc_port_added(struct scsi_qla_host
*vha
, fc_port_t
*fcport
)
684 struct qla_hw_data
*ha
= vha
->hw
;
685 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
686 struct qla_tgt_sess
*sess
;
689 if (!vha
->hw
->tgt
.tgt_ops
)
692 if (!tgt
|| (fcport
->port_type
!= FCT_INITIATOR
))
695 if (qla_ini_mode_enabled(vha
))
698 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
700 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
703 sess
= qlt_find_sess_by_port_name(tgt
, fcport
->port_name
);
705 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
707 mutex_lock(&vha
->vha_tgt
.tgt_mutex
);
708 sess
= qlt_create_sess(vha
, fcport
, false);
709 mutex_unlock(&vha
->vha_tgt
.tgt_mutex
);
711 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
713 kref_get(&sess
->se_sess
->sess_kref
);
716 qlt_undelete_sess(sess
);
718 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf04c,
719 "qla_target(%u): %ssession for port %8phC "
720 "(loop ID %d) reappeared\n", vha
->vp_idx
,
721 sess
->local
? "local " : "", sess
->port_name
,
724 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf007,
725 "Reappeared sess %p\n", sess
);
727 ha
->tgt
.tgt_ops
->update_sess(sess
, fcport
->d_id
, fcport
->loop_id
,
728 (fcport
->flags
& FCF_CONF_COMP_SUPPORTED
));
731 if (sess
&& sess
->local
) {
732 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf04d,
733 "qla_target(%u): local session for "
734 "port %8phC (loop ID %d) became global\n", vha
->vp_idx
,
735 fcport
->port_name
, sess
->loop_id
);
738 ha
->tgt
.tgt_ops
->put_sess(sess
);
739 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
742 void qlt_fc_port_deleted(struct scsi_qla_host
*vha
, fc_port_t
*fcport
)
744 struct qla_hw_data
*ha
= vha
->hw
;
745 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
746 struct qla_tgt_sess
*sess
;
749 if (!vha
->hw
->tgt
.tgt_ops
)
752 if (!tgt
|| (fcport
->port_type
!= FCT_INITIATOR
))
755 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
757 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
760 sess
= qlt_find_sess_by_port_name(tgt
, fcport
->port_name
);
762 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
766 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf008, "qla_tgt_fc_port_deleted %p", sess
);
769 qlt_schedule_sess_for_deletion(sess
, false);
770 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
773 static inline int test_tgt_sess_count(struct qla_tgt
*tgt
)
775 struct qla_hw_data
*ha
= tgt
->ha
;
779 * We need to protect against race, when tgt is freed before or
782 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
783 ql_dbg(ql_dbg_tgt
, tgt
->vha
, 0xe002,
784 "tgt %p, empty(sess_list)=%d sess_count=%d\n",
785 tgt
, list_empty(&tgt
->sess_list
), tgt
->sess_count
);
786 res
= (tgt
->sess_count
== 0);
787 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
792 /* Called by tcm_qla2xxx configfs code */
793 int qlt_stop_phase1(struct qla_tgt
*tgt
)
795 struct scsi_qla_host
*vha
= tgt
->vha
;
796 struct qla_hw_data
*ha
= tgt
->ha
;
799 mutex_lock(&qla_tgt_mutex
);
800 if (!vha
->fc_vport
) {
801 struct Scsi_Host
*sh
= vha
->host
;
802 struct fc_host_attrs
*fc_host
= shost_to_fc_host(sh
);
805 spin_lock_irqsave(sh
->host_lock
, flags
);
806 npiv_vports
= (fc_host
->npiv_vports_inuse
);
807 spin_unlock_irqrestore(sh
->host_lock
, flags
);
810 mutex_unlock(&qla_tgt_mutex
);
814 if (tgt
->tgt_stop
|| tgt
->tgt_stopped
) {
815 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf04e,
816 "Already in tgt->tgt_stop or tgt_stopped state\n");
817 mutex_unlock(&qla_tgt_mutex
);
821 ql_dbg(ql_dbg_tgt
, vha
, 0xe003, "Stopping target for host %ld(%p)\n",
824 * Mutex needed to sync with qla_tgt_fc_port_[added,deleted].
825 * Lock is needed, because we still can get an incoming packet.
827 mutex_lock(&vha
->vha_tgt
.tgt_mutex
);
828 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
830 qlt_clear_tgt_db(tgt
, true);
831 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
832 mutex_unlock(&vha
->vha_tgt
.tgt_mutex
);
833 mutex_unlock(&qla_tgt_mutex
);
835 flush_delayed_work(&tgt
->sess_del_work
);
837 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf009,
838 "Waiting for sess works (tgt %p)", tgt
);
839 spin_lock_irqsave(&tgt
->sess_work_lock
, flags
);
840 while (!list_empty(&tgt
->sess_works_list
)) {
841 spin_unlock_irqrestore(&tgt
->sess_work_lock
, flags
);
842 flush_scheduled_work();
843 spin_lock_irqsave(&tgt
->sess_work_lock
, flags
);
845 spin_unlock_irqrestore(&tgt
->sess_work_lock
, flags
);
847 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf00a,
848 "Waiting for tgt %p: list_empty(sess_list)=%d "
849 "sess_count=%d\n", tgt
, list_empty(&tgt
->sess_list
),
852 wait_event(tgt
->waitQ
, test_tgt_sess_count(tgt
));
855 if (!ha
->flags
.host_shutting_down
&& qla_tgt_mode_enabled(vha
))
856 qlt_disable_vha(vha
);
858 /* Wait for sessions to clear out (just in case) */
859 wait_event(tgt
->waitQ
, test_tgt_sess_count(tgt
));
862 EXPORT_SYMBOL(qlt_stop_phase1
);
864 /* Called by tcm_qla2xxx configfs code */
865 void qlt_stop_phase2(struct qla_tgt
*tgt
)
867 struct qla_hw_data
*ha
= tgt
->ha
;
868 scsi_qla_host_t
*vha
= pci_get_drvdata(ha
->pdev
);
871 if (tgt
->tgt_stopped
) {
872 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf04f,
873 "Already in tgt->tgt_stopped state\n");
878 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf00b,
879 "Waiting for %d IRQ commands to complete (tgt %p)",
880 tgt
->irq_cmd_count
, tgt
);
882 mutex_lock(&vha
->vha_tgt
.tgt_mutex
);
883 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
884 while (tgt
->irq_cmd_count
!= 0) {
885 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
887 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
890 tgt
->tgt_stopped
= 1;
891 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
892 mutex_unlock(&vha
->vha_tgt
.tgt_mutex
);
894 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf00c, "Stop of tgt %p finished",
897 EXPORT_SYMBOL(qlt_stop_phase2
);
899 /* Called from qlt_remove_target() -> qla2x00_remove_one() */
900 static void qlt_release(struct qla_tgt
*tgt
)
902 scsi_qla_host_t
*vha
= tgt
->vha
;
904 if ((vha
->vha_tgt
.qla_tgt
!= NULL
) && !tgt
->tgt_stopped
)
905 qlt_stop_phase2(tgt
);
907 vha
->vha_tgt
.qla_tgt
= NULL
;
909 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf00d,
910 "Release of tgt %p finished\n", tgt
);
915 /* ha->hardware_lock supposed to be held on entry */
916 static int qlt_sched_sess_work(struct qla_tgt
*tgt
, int type
,
917 const void *param
, unsigned int param_size
)
919 struct qla_tgt_sess_work_param
*prm
;
922 prm
= kzalloc(sizeof(*prm
), GFP_ATOMIC
);
924 ql_dbg(ql_dbg_tgt_mgt
, tgt
->vha
, 0xf050,
925 "qla_target(%d): Unable to create session "
926 "work, command will be refused", 0);
930 ql_dbg(ql_dbg_tgt_mgt
, tgt
->vha
, 0xf00e,
931 "Scheduling work (type %d, prm %p)"
932 " to find session for param %p (size %d, tgt %p)\n",
933 type
, prm
, param
, param_size
, tgt
);
936 memcpy(&prm
->tm_iocb
, param
, param_size
);
938 spin_lock_irqsave(&tgt
->sess_work_lock
, flags
);
939 list_add_tail(&prm
->sess_works_list_entry
, &tgt
->sess_works_list
);
940 spin_unlock_irqrestore(&tgt
->sess_work_lock
, flags
);
942 schedule_work(&tgt
->sess_work
);
948 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
950 static void qlt_send_notify_ack(struct scsi_qla_host
*vha
,
951 struct imm_ntfy_from_isp
*ntfy
,
952 uint32_t add_flags
, uint16_t resp_code
, int resp_code_valid
,
953 uint16_t srr_flags
, uint16_t srr_reject_code
, uint8_t srr_explan
)
955 struct qla_hw_data
*ha
= vha
->hw
;
957 struct nack_to_isp
*nack
;
959 ql_dbg(ql_dbg_tgt
, vha
, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha
);
961 /* Send marker if required */
962 if (qlt_issue_marker(vha
, 1) != QLA_SUCCESS
)
965 pkt
= (request_t
*)qla2x00_alloc_iocbs(vha
, NULL
);
967 ql_dbg(ql_dbg_tgt
, vha
, 0xe049,
968 "qla_target(%d): %s failed: unable to allocate "
969 "request packet\n", vha
->vp_idx
, __func__
);
973 if (vha
->vha_tgt
.qla_tgt
!= NULL
)
974 vha
->vha_tgt
.qla_tgt
->notify_ack_expected
++;
976 pkt
->entry_type
= NOTIFY_ACK_TYPE
;
977 pkt
->entry_count
= 1;
979 nack
= (struct nack_to_isp
*)pkt
;
980 nack
->ox_id
= ntfy
->ox_id
;
982 nack
->u
.isp24
.nport_handle
= ntfy
->u
.isp24
.nport_handle
;
983 if (le16_to_cpu(ntfy
->u
.isp24
.status
) == IMM_NTFY_ELS
) {
984 nack
->u
.isp24
.flags
= ntfy
->u
.isp24
.flags
&
985 __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB
);
987 nack
->u
.isp24
.srr_rx_id
= ntfy
->u
.isp24
.srr_rx_id
;
988 nack
->u
.isp24
.status
= ntfy
->u
.isp24
.status
;
989 nack
->u
.isp24
.status_subcode
= ntfy
->u
.isp24
.status_subcode
;
990 nack
->u
.isp24
.fw_handle
= ntfy
->u
.isp24
.fw_handle
;
991 nack
->u
.isp24
.exchange_address
= ntfy
->u
.isp24
.exchange_address
;
992 nack
->u
.isp24
.srr_rel_offs
= ntfy
->u
.isp24
.srr_rel_offs
;
993 nack
->u
.isp24
.srr_ui
= ntfy
->u
.isp24
.srr_ui
;
994 nack
->u
.isp24
.srr_flags
= cpu_to_le16(srr_flags
);
995 nack
->u
.isp24
.srr_reject_code
= srr_reject_code
;
996 nack
->u
.isp24
.srr_reject_code_expl
= srr_explan
;
997 nack
->u
.isp24
.vp_index
= ntfy
->u
.isp24
.vp_index
;
999 ql_dbg(ql_dbg_tgt
, vha
, 0xe005,
1000 "qla_target(%d): Sending 24xx Notify Ack %d\n",
1001 vha
->vp_idx
, nack
->u
.isp24
.status
);
1003 qla2x00_start_iocbs(vha
, vha
->req
);
1007 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1009 static void qlt_24xx_send_abts_resp(struct scsi_qla_host
*vha
,
1010 struct abts_recv_from_24xx
*abts
, uint32_t status
,
1013 struct qla_hw_data
*ha
= vha
->hw
;
1014 struct abts_resp_to_24xx
*resp
;
1018 ql_dbg(ql_dbg_tgt
, vha
, 0xe006,
1019 "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n",
1022 /* Send marker if required */
1023 if (qlt_issue_marker(vha
, 1) != QLA_SUCCESS
)
1026 resp
= (struct abts_resp_to_24xx
*)qla2x00_alloc_iocbs(vha
, NULL
);
1028 ql_dbg(ql_dbg_tgt
, vha
, 0xe04a,
1029 "qla_target(%d): %s failed: unable to allocate "
1030 "request packet", vha
->vp_idx
, __func__
);
1034 resp
->entry_type
= ABTS_RESP_24XX
;
1035 resp
->entry_count
= 1;
1036 resp
->nport_handle
= abts
->nport_handle
;
1037 resp
->vp_index
= vha
->vp_idx
;
1038 resp
->sof_type
= abts
->sof_type
;
1039 resp
->exchange_address
= abts
->exchange_address
;
1040 resp
->fcp_hdr_le
= abts
->fcp_hdr_le
;
1041 f_ctl
= __constant_cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP
|
1042 F_CTL_LAST_SEQ
| F_CTL_END_SEQ
|
1043 F_CTL_SEQ_INITIATIVE
);
1044 p
= (uint8_t *)&f_ctl
;
1045 resp
->fcp_hdr_le
.f_ctl
[0] = *p
++;
1046 resp
->fcp_hdr_le
.f_ctl
[1] = *p
++;
1047 resp
->fcp_hdr_le
.f_ctl
[2] = *p
;
1049 resp
->fcp_hdr_le
.d_id
[0] = abts
->fcp_hdr_le
.d_id
[0];
1050 resp
->fcp_hdr_le
.d_id
[1] = abts
->fcp_hdr_le
.d_id
[1];
1051 resp
->fcp_hdr_le
.d_id
[2] = abts
->fcp_hdr_le
.d_id
[2];
1052 resp
->fcp_hdr_le
.s_id
[0] = abts
->fcp_hdr_le
.s_id
[0];
1053 resp
->fcp_hdr_le
.s_id
[1] = abts
->fcp_hdr_le
.s_id
[1];
1054 resp
->fcp_hdr_le
.s_id
[2] = abts
->fcp_hdr_le
.s_id
[2];
1056 resp
->fcp_hdr_le
.d_id
[0] = abts
->fcp_hdr_le
.s_id
[0];
1057 resp
->fcp_hdr_le
.d_id
[1] = abts
->fcp_hdr_le
.s_id
[1];
1058 resp
->fcp_hdr_le
.d_id
[2] = abts
->fcp_hdr_le
.s_id
[2];
1059 resp
->fcp_hdr_le
.s_id
[0] = abts
->fcp_hdr_le
.d_id
[0];
1060 resp
->fcp_hdr_le
.s_id
[1] = abts
->fcp_hdr_le
.d_id
[1];
1061 resp
->fcp_hdr_le
.s_id
[2] = abts
->fcp_hdr_le
.d_id
[2];
1063 resp
->exchange_addr_to_abort
= abts
->exchange_addr_to_abort
;
1064 if (status
== FCP_TMF_CMPL
) {
1065 resp
->fcp_hdr_le
.r_ctl
= R_CTL_BASIC_LINK_SERV
| R_CTL_B_ACC
;
1066 resp
->payload
.ba_acct
.seq_id_valid
= SEQ_ID_INVALID
;
1067 resp
->payload
.ba_acct
.low_seq_cnt
= 0x0000;
1068 resp
->payload
.ba_acct
.high_seq_cnt
= 0xFFFF;
1069 resp
->payload
.ba_acct
.ox_id
= abts
->fcp_hdr_le
.ox_id
;
1070 resp
->payload
.ba_acct
.rx_id
= abts
->fcp_hdr_le
.rx_id
;
1072 resp
->fcp_hdr_le
.r_ctl
= R_CTL_BASIC_LINK_SERV
| R_CTL_B_RJT
;
1073 resp
->payload
.ba_rjt
.reason_code
=
1074 BA_RJT_REASON_CODE_UNABLE_TO_PERFORM
;
1075 /* Other bytes are zero */
1078 vha
->vha_tgt
.qla_tgt
->abts_resp_expected
++;
1080 qla2x00_start_iocbs(vha
, vha
->req
);
1084 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1086 static void qlt_24xx_retry_term_exchange(struct scsi_qla_host
*vha
,
1087 struct abts_resp_from_24xx_fw
*entry
)
1089 struct ctio7_to_24xx
*ctio
;
1091 ql_dbg(ql_dbg_tgt
, vha
, 0xe007,
1092 "Sending retry TERM EXCH CTIO7 (ha=%p)\n", vha
->hw
);
1093 /* Send marker if required */
1094 if (qlt_issue_marker(vha
, 1) != QLA_SUCCESS
)
1097 ctio
= (struct ctio7_to_24xx
*)qla2x00_alloc_iocbs(vha
, NULL
);
1099 ql_dbg(ql_dbg_tgt
, vha
, 0xe04b,
1100 "qla_target(%d): %s failed: unable to allocate "
1101 "request packet\n", vha
->vp_idx
, __func__
);
1106 * We've got on entrance firmware's response on by us generated
1107 * ABTS response. So, in it ID fields are reversed.
1110 ctio
->entry_type
= CTIO_TYPE7
;
1111 ctio
->entry_count
= 1;
1112 ctio
->nport_handle
= entry
->nport_handle
;
1113 ctio
->handle
= QLA_TGT_SKIP_HANDLE
| CTIO_COMPLETION_HANDLE_MARK
;
1114 ctio
->timeout
= __constant_cpu_to_le16(QLA_TGT_TIMEOUT
);
1115 ctio
->vp_index
= vha
->vp_idx
;
1116 ctio
->initiator_id
[0] = entry
->fcp_hdr_le
.d_id
[0];
1117 ctio
->initiator_id
[1] = entry
->fcp_hdr_le
.d_id
[1];
1118 ctio
->initiator_id
[2] = entry
->fcp_hdr_le
.d_id
[2];
1119 ctio
->exchange_addr
= entry
->exchange_addr_to_abort
;
1120 ctio
->u
.status1
.flags
=
1121 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1
|
1122 CTIO7_FLAGS_TERMINATE
);
1123 ctio
->u
.status1
.ox_id
= entry
->fcp_hdr_le
.ox_id
;
1125 qla2x00_start_iocbs(vha
, vha
->req
);
1127 qlt_24xx_send_abts_resp(vha
, (struct abts_recv_from_24xx
*)entry
,
1128 FCP_TMF_CMPL
, true);
1131 /* ha->hardware_lock supposed to be held on entry */
1132 static int __qlt_24xx_handle_abts(struct scsi_qla_host
*vha
,
1133 struct abts_recv_from_24xx
*abts
, struct qla_tgt_sess
*sess
)
1135 struct qla_hw_data
*ha
= vha
->hw
;
1136 struct se_session
*se_sess
= sess
->se_sess
;
1137 struct qla_tgt_mgmt_cmd
*mcmd
;
1138 struct se_cmd
*se_cmd
;
1141 bool found_lun
= false;
1143 spin_lock(&se_sess
->sess_cmd_lock
);
1144 list_for_each_entry(se_cmd
, &se_sess
->sess_cmd_list
, se_cmd_list
) {
1145 struct qla_tgt_cmd
*cmd
=
1146 container_of(se_cmd
, struct qla_tgt_cmd
, se_cmd
);
1147 if (cmd
->tag
== abts
->exchange_addr_to_abort
) {
1148 lun
= cmd
->unpacked_lun
;
1153 spin_unlock(&se_sess
->sess_cmd_lock
);
1158 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf00f,
1159 "qla_target(%d): task abort (tag=%d)\n",
1160 vha
->vp_idx
, abts
->exchange_addr_to_abort
);
1162 mcmd
= mempool_alloc(qla_tgt_mgmt_cmd_mempool
, GFP_ATOMIC
);
1164 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf051,
1165 "qla_target(%d): %s: Allocation of ABORT cmd failed",
1166 vha
->vp_idx
, __func__
);
1169 memset(mcmd
, 0, sizeof(*mcmd
));
1172 memcpy(&mcmd
->orig_iocb
.abts
, abts
, sizeof(mcmd
->orig_iocb
.abts
));
1174 rc
= ha
->tgt
.tgt_ops
->handle_tmr(mcmd
, lun
, TMR_ABORT_TASK
,
1175 abts
->exchange_addr_to_abort
);
1177 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf052,
1178 "qla_target(%d): tgt_ops->handle_tmr()"
1179 " failed: %d", vha
->vp_idx
, rc
);
1180 mempool_free(mcmd
, qla_tgt_mgmt_cmd_mempool
);
1188 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1190 static void qlt_24xx_handle_abts(struct scsi_qla_host
*vha
,
1191 struct abts_recv_from_24xx
*abts
)
1193 struct qla_hw_data
*ha
= vha
->hw
;
1194 struct qla_tgt_sess
*sess
;
1195 uint32_t tag
= abts
->exchange_addr_to_abort
;
1199 if (le32_to_cpu(abts
->fcp_hdr_le
.parameter
) & ABTS_PARAM_ABORT_SEQ
) {
1200 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf053,
1201 "qla_target(%d): ABTS: Abort Sequence not "
1202 "supported\n", vha
->vp_idx
);
1203 qlt_24xx_send_abts_resp(vha
, abts
, FCP_TMF_REJECTED
, false);
1207 if (tag
== ATIO_EXCHANGE_ADDRESS_UNKNOWN
) {
1208 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf010,
1209 "qla_target(%d): ABTS: Unknown Exchange "
1210 "Address received\n", vha
->vp_idx
);
1211 qlt_24xx_send_abts_resp(vha
, abts
, FCP_TMF_REJECTED
, false);
1215 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf011,
1216 "qla_target(%d): task abort (s_id=%x:%x:%x, "
1217 "tag=%d, param=%x)\n", vha
->vp_idx
, abts
->fcp_hdr_le
.s_id
[2],
1218 abts
->fcp_hdr_le
.s_id
[1], abts
->fcp_hdr_le
.s_id
[0], tag
,
1219 le32_to_cpu(abts
->fcp_hdr_le
.parameter
));
1221 s_id
[0] = abts
->fcp_hdr_le
.s_id
[2];
1222 s_id
[1] = abts
->fcp_hdr_le
.s_id
[1];
1223 s_id
[2] = abts
->fcp_hdr_le
.s_id
[0];
1225 sess
= ha
->tgt
.tgt_ops
->find_sess_by_s_id(vha
, s_id
);
1227 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf012,
1228 "qla_target(%d): task abort for non-existant session\n",
1230 rc
= qlt_sched_sess_work(vha
->vha_tgt
.qla_tgt
,
1231 QLA_TGT_SESS_WORK_ABORT
, abts
, sizeof(*abts
));
1233 qlt_24xx_send_abts_resp(vha
, abts
, FCP_TMF_REJECTED
,
1239 rc
= __qlt_24xx_handle_abts(vha
, abts
, sess
);
1241 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf054,
1242 "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n",
1244 qlt_24xx_send_abts_resp(vha
, abts
, FCP_TMF_REJECTED
, false);
1250 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1252 static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host
*ha
,
1253 struct qla_tgt_mgmt_cmd
*mcmd
, uint32_t resp_code
)
1255 struct atio_from_isp
*atio
= &mcmd
->orig_iocb
.atio
;
1256 struct ctio7_to_24xx
*ctio
;
1258 ql_dbg(ql_dbg_tgt
, ha
, 0xe008,
1259 "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n",
1260 ha
, atio
, resp_code
);
1262 /* Send marker if required */
1263 if (qlt_issue_marker(ha
, 1) != QLA_SUCCESS
)
1266 ctio
= (struct ctio7_to_24xx
*)qla2x00_alloc_iocbs(ha
, NULL
);
1268 ql_dbg(ql_dbg_tgt
, ha
, 0xe04c,
1269 "qla_target(%d): %s failed: unable to allocate "
1270 "request packet\n", ha
->vp_idx
, __func__
);
1274 ctio
->entry_type
= CTIO_TYPE7
;
1275 ctio
->entry_count
= 1;
1276 ctio
->handle
= QLA_TGT_SKIP_HANDLE
| CTIO_COMPLETION_HANDLE_MARK
;
1277 ctio
->nport_handle
= mcmd
->sess
->loop_id
;
1278 ctio
->timeout
= __constant_cpu_to_le16(QLA_TGT_TIMEOUT
);
1279 ctio
->vp_index
= ha
->vp_idx
;
1280 ctio
->initiator_id
[0] = atio
->u
.isp24
.fcp_hdr
.s_id
[2];
1281 ctio
->initiator_id
[1] = atio
->u
.isp24
.fcp_hdr
.s_id
[1];
1282 ctio
->initiator_id
[2] = atio
->u
.isp24
.fcp_hdr
.s_id
[0];
1283 ctio
->exchange_addr
= atio
->u
.isp24
.exchange_addr
;
1284 ctio
->u
.status1
.flags
= (atio
->u
.isp24
.attr
<< 9) |
1285 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1
|
1286 CTIO7_FLAGS_SEND_STATUS
);
1287 ctio
->u
.status1
.ox_id
= swab16(atio
->u
.isp24
.fcp_hdr
.ox_id
);
1288 ctio
->u
.status1
.scsi_status
=
1289 __constant_cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID
);
1290 ctio
->u
.status1
.response_len
= __constant_cpu_to_le16(8);
1291 ctio
->u
.status1
.sense_data
[0] = resp_code
;
1293 qla2x00_start_iocbs(ha
, ha
->req
);
1296 void qlt_free_mcmd(struct qla_tgt_mgmt_cmd
*mcmd
)
1298 mempool_free(mcmd
, qla_tgt_mgmt_cmd_mempool
);
1300 EXPORT_SYMBOL(qlt_free_mcmd
);
1302 /* callback from target fabric module code */
1303 void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd
*mcmd
)
1305 struct scsi_qla_host
*vha
= mcmd
->sess
->vha
;
1306 struct qla_hw_data
*ha
= vha
->hw
;
1307 unsigned long flags
;
1309 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf013,
1310 "TM response mcmd (%p) status %#x state %#x",
1311 mcmd
, mcmd
->fc_tm_rsp
, mcmd
->flags
);
1313 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
1314 if (mcmd
->flags
== QLA24XX_MGMT_SEND_NACK
)
1315 qlt_send_notify_ack(vha
, &mcmd
->orig_iocb
.imm_ntfy
,
1318 if (mcmd
->se_cmd
.se_tmr_req
->function
== TMR_ABORT_TASK
)
1319 qlt_24xx_send_abts_resp(vha
, &mcmd
->orig_iocb
.abts
,
1320 mcmd
->fc_tm_rsp
, false);
1322 qlt_24xx_send_task_mgmt_ctio(vha
, mcmd
,
1326 * Make the callback for ->free_mcmd() to queue_work() and invoke
1327 * target_put_sess_cmd() to drop cmd_kref to 1. The final
1328 * target_put_sess_cmd() call will be made from TFO->check_stop_free()
1329 * -> tcm_qla2xxx_check_stop_free() to release the TMR associated se_cmd
1330 * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() ->
1331 * qlt_xmit_tm_rsp() returns here..
1333 ha
->tgt
.tgt_ops
->free_mcmd(mcmd
);
1334 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1336 EXPORT_SYMBOL(qlt_xmit_tm_rsp
);
1339 static int qlt_pci_map_calc_cnt(struct qla_tgt_prm
*prm
)
1341 struct qla_tgt_cmd
*cmd
= prm
->cmd
;
1343 BUG_ON(cmd
->sg_cnt
== 0);
1345 prm
->sg
= (struct scatterlist
*)cmd
->sg
;
1346 prm
->seg_cnt
= pci_map_sg(prm
->tgt
->ha
->pdev
, cmd
->sg
,
1347 cmd
->sg_cnt
, cmd
->dma_data_direction
);
1348 if (unlikely(prm
->seg_cnt
== 0))
1351 prm
->cmd
->sg_mapped
= 1;
1354 * If greater than four sg entries then we need to allocate
1355 * the continuation entries
1357 if (prm
->seg_cnt
> prm
->tgt
->datasegs_per_cmd
)
1358 prm
->req_cnt
+= DIV_ROUND_UP(prm
->seg_cnt
-
1359 prm
->tgt
->datasegs_per_cmd
, prm
->tgt
->datasegs_per_cont
);
1361 ql_dbg(ql_dbg_tgt
, prm
->cmd
->vha
, 0xe009, "seg_cnt=%d, req_cnt=%d\n",
1362 prm
->seg_cnt
, prm
->req_cnt
);
1366 ql_dbg(ql_dbg_tgt
, prm
->cmd
->vha
, 0xe04d,
1367 "qla_target(%d): PCI mapping failed: sg_cnt=%d",
1368 0, prm
->cmd
->sg_cnt
);
1372 static inline void qlt_unmap_sg(struct scsi_qla_host
*vha
,
1373 struct qla_tgt_cmd
*cmd
)
1375 struct qla_hw_data
*ha
= vha
->hw
;
1377 BUG_ON(!cmd
->sg_mapped
);
1378 pci_unmap_sg(ha
->pdev
, cmd
->sg
, cmd
->sg_cnt
, cmd
->dma_data_direction
);
1382 static int qlt_check_reserve_free_req(struct scsi_qla_host
*vha
,
1387 if (vha
->req
->cnt
< (req_cnt
+ 2)) {
1388 cnt
= (uint16_t)RD_REG_DWORD(vha
->req
->req_q_out
);
1390 ql_dbg(ql_dbg_tgt
, vha
, 0xe00a,
1391 "Request ring circled: cnt=%d, vha->->ring_index=%d, "
1392 "vha->req->cnt=%d, req_cnt=%d\n", cnt
,
1393 vha
->req
->ring_index
, vha
->req
->cnt
, req_cnt
);
1394 if (vha
->req
->ring_index
< cnt
)
1395 vha
->req
->cnt
= cnt
- vha
->req
->ring_index
;
1397 vha
->req
->cnt
= vha
->req
->length
-
1398 (vha
->req
->ring_index
- cnt
);
1401 if (unlikely(vha
->req
->cnt
< (req_cnt
+ 2))) {
1402 ql_dbg(ql_dbg_tgt
, vha
, 0xe00b,
1403 "qla_target(%d): There is no room in the "
1404 "request ring: vha->req->ring_index=%d, vha->req->cnt=%d, "
1405 "req_cnt=%d\n", vha
->vp_idx
, vha
->req
->ring_index
,
1406 vha
->req
->cnt
, req_cnt
);
1409 vha
->req
->cnt
-= req_cnt
;
1415 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1417 static inline void *qlt_get_req_pkt(struct scsi_qla_host
*vha
)
1419 /* Adjust ring index. */
1420 vha
->req
->ring_index
++;
1421 if (vha
->req
->ring_index
== vha
->req
->length
) {
1422 vha
->req
->ring_index
= 0;
1423 vha
->req
->ring_ptr
= vha
->req
->ring
;
1425 vha
->req
->ring_ptr
++;
1427 return (cont_entry_t
*)vha
->req
->ring_ptr
;
1430 /* ha->hardware_lock supposed to be held on entry */
1431 static inline uint32_t qlt_make_handle(struct scsi_qla_host
*vha
)
1433 struct qla_hw_data
*ha
= vha
->hw
;
1436 h
= ha
->tgt
.current_handle
;
1437 /* always increment cmd handle */
1440 if (h
> DEFAULT_OUTSTANDING_COMMANDS
)
1441 h
= 1; /* 0 is QLA_TGT_NULL_HANDLE */
1442 if (h
== ha
->tgt
.current_handle
) {
1443 ql_dbg(ql_dbg_tgt
, vha
, 0xe04e,
1444 "qla_target(%d): Ran out of "
1445 "empty cmd slots in ha %p\n", vha
->vp_idx
, ha
);
1446 h
= QLA_TGT_NULL_HANDLE
;
1449 } while ((h
== QLA_TGT_NULL_HANDLE
) ||
1450 (h
== QLA_TGT_SKIP_HANDLE
) ||
1451 (ha
->tgt
.cmds
[h
-1] != NULL
));
1453 if (h
!= QLA_TGT_NULL_HANDLE
)
1454 ha
->tgt
.current_handle
= h
;
1459 /* ha->hardware_lock supposed to be held on entry */
1460 static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm
*prm
,
1461 struct scsi_qla_host
*vha
)
1464 struct ctio7_to_24xx
*pkt
;
1465 struct qla_hw_data
*ha
= vha
->hw
;
1466 struct atio_from_isp
*atio
= &prm
->cmd
->atio
;
1468 pkt
= (struct ctio7_to_24xx
*)vha
->req
->ring_ptr
;
1470 memset(pkt
, 0, sizeof(*pkt
));
1472 pkt
->entry_type
= CTIO_TYPE7
;
1473 pkt
->entry_count
= (uint8_t)prm
->req_cnt
;
1474 pkt
->vp_index
= vha
->vp_idx
;
1476 h
= qlt_make_handle(vha
);
1477 if (unlikely(h
== QLA_TGT_NULL_HANDLE
)) {
1479 * CTIO type 7 from the firmware doesn't provide a way to
1480 * know the initiator's LOOP ID, hence we can't find
1481 * the session and, so, the command.
1485 ha
->tgt
.cmds
[h
-1] = prm
->cmd
;
1487 pkt
->handle
= h
| CTIO_COMPLETION_HANDLE_MARK
;
1488 pkt
->nport_handle
= prm
->cmd
->loop_id
;
1489 pkt
->timeout
= __constant_cpu_to_le16(QLA_TGT_TIMEOUT
);
1490 pkt
->initiator_id
[0] = atio
->u
.isp24
.fcp_hdr
.s_id
[2];
1491 pkt
->initiator_id
[1] = atio
->u
.isp24
.fcp_hdr
.s_id
[1];
1492 pkt
->initiator_id
[2] = atio
->u
.isp24
.fcp_hdr
.s_id
[0];
1493 pkt
->exchange_addr
= atio
->u
.isp24
.exchange_addr
;
1494 pkt
->u
.status0
.flags
|= (atio
->u
.isp24
.attr
<< 9);
1495 pkt
->u
.status0
.ox_id
= swab16(atio
->u
.isp24
.fcp_hdr
.ox_id
);
1496 pkt
->u
.status0
.relative_offset
= cpu_to_le32(prm
->cmd
->offset
);
1498 ql_dbg(ql_dbg_tgt
, vha
, 0xe00c,
1499 "qla_target(%d): handle(cmd) -> %08x, timeout %d, ox_id %#x\n",
1500 vha
->vp_idx
, pkt
->handle
, QLA_TGT_TIMEOUT
,
1501 le16_to_cpu(pkt
->u
.status0
.ox_id
));
1506 * ha->hardware_lock supposed to be held on entry. We have already made sure
1507 * that there is sufficient amount of request entries to not drop it.
1509 static void qlt_load_cont_data_segments(struct qla_tgt_prm
*prm
,
1510 struct scsi_qla_host
*vha
)
1513 uint32_t *dword_ptr
;
1514 int enable_64bit_addressing
= prm
->tgt
->tgt_enable_64bit_addr
;
1516 /* Build continuation packets */
1517 while (prm
->seg_cnt
> 0) {
1518 cont_a64_entry_t
*cont_pkt64
=
1519 (cont_a64_entry_t
*)qlt_get_req_pkt(vha
);
1522 * Make sure that from cont_pkt64 none of
1523 * 64-bit specific fields used for 32-bit
1524 * addressing. Cast to (cont_entry_t *) for
1528 memset(cont_pkt64
, 0, sizeof(*cont_pkt64
));
1530 cont_pkt64
->entry_count
= 1;
1531 cont_pkt64
->sys_define
= 0;
1533 if (enable_64bit_addressing
) {
1534 cont_pkt64
->entry_type
= CONTINUE_A64_TYPE
;
1536 (uint32_t *)&cont_pkt64
->dseg_0_address
;
1538 cont_pkt64
->entry_type
= CONTINUE_TYPE
;
1540 (uint32_t *)&((cont_entry_t
*)
1541 cont_pkt64
)->dseg_0_address
;
1544 /* Load continuation entry data segments */
1546 cnt
< prm
->tgt
->datasegs_per_cont
&& prm
->seg_cnt
;
1547 cnt
++, prm
->seg_cnt
--) {
1549 cpu_to_le32(pci_dma_lo32
1550 (sg_dma_address(prm
->sg
)));
1551 if (enable_64bit_addressing
) {
1553 cpu_to_le32(pci_dma_hi32
1557 *dword_ptr
++ = cpu_to_le32(sg_dma_len(prm
->sg
));
1559 ql_dbg(ql_dbg_tgt
, vha
, 0xe00d,
1560 "S/G Segment Cont. phys_addr=%llx:%llx, len=%d\n",
1561 (long long unsigned int)
1562 pci_dma_hi32(sg_dma_address(prm
->sg
)),
1563 (long long unsigned int)
1564 pci_dma_lo32(sg_dma_address(prm
->sg
)),
1565 (int)sg_dma_len(prm
->sg
));
1567 prm
->sg
= sg_next(prm
->sg
);
1573 * ha->hardware_lock supposed to be held on entry. We have already made sure
1574 * that there is sufficient amount of request entries to not drop it.
1576 static void qlt_load_data_segments(struct qla_tgt_prm
*prm
,
1577 struct scsi_qla_host
*vha
)
1580 uint32_t *dword_ptr
;
1581 int enable_64bit_addressing
= prm
->tgt
->tgt_enable_64bit_addr
;
1582 struct ctio7_to_24xx
*pkt24
= (struct ctio7_to_24xx
*)prm
->pkt
;
1584 ql_dbg(ql_dbg_tgt
, vha
, 0xe00e,
1585 "iocb->scsi_status=%x, iocb->flags=%x\n",
1586 le16_to_cpu(pkt24
->u
.status0
.scsi_status
),
1587 le16_to_cpu(pkt24
->u
.status0
.flags
));
1589 pkt24
->u
.status0
.transfer_length
= cpu_to_le32(prm
->cmd
->bufflen
);
1591 /* Setup packet address segment pointer */
1592 dword_ptr
= pkt24
->u
.status0
.dseg_0_address
;
1594 /* Set total data segment count */
1596 pkt24
->dseg_count
= cpu_to_le16(prm
->seg_cnt
);
1598 if (prm
->seg_cnt
== 0) {
1599 /* No data transfer */
1605 /* If scatter gather */
1606 ql_dbg(ql_dbg_tgt
, vha
, 0xe00f, "%s", "Building S/G data segments...");
1608 /* Load command entry data segments */
1610 (cnt
< prm
->tgt
->datasegs_per_cmd
) && prm
->seg_cnt
;
1611 cnt
++, prm
->seg_cnt
--) {
1613 cpu_to_le32(pci_dma_lo32(sg_dma_address(prm
->sg
)));
1614 if (enable_64bit_addressing
) {
1616 cpu_to_le32(pci_dma_hi32(
1617 sg_dma_address(prm
->sg
)));
1619 *dword_ptr
++ = cpu_to_le32(sg_dma_len(prm
->sg
));
1621 ql_dbg(ql_dbg_tgt
, vha
, 0xe010,
1622 "S/G Segment phys_addr=%llx:%llx, len=%d\n",
1623 (long long unsigned int)pci_dma_hi32(sg_dma_address(
1625 (long long unsigned int)pci_dma_lo32(sg_dma_address(
1627 (int)sg_dma_len(prm
->sg
));
1629 prm
->sg
= sg_next(prm
->sg
);
1632 qlt_load_cont_data_segments(prm
, vha
);
1635 static inline int qlt_has_data(struct qla_tgt_cmd
*cmd
)
1637 return cmd
->bufflen
> 0;
1641 * Called without ha->hardware_lock held
1643 static int qlt_pre_xmit_response(struct qla_tgt_cmd
*cmd
,
1644 struct qla_tgt_prm
*prm
, int xmit_type
, uint8_t scsi_status
,
1645 uint32_t *full_req_cnt
)
1647 struct qla_tgt
*tgt
= cmd
->tgt
;
1648 struct scsi_qla_host
*vha
= tgt
->vha
;
1649 struct qla_hw_data
*ha
= vha
->hw
;
1650 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
1652 if (unlikely(cmd
->aborted
)) {
1653 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf014,
1654 "qla_target(%d): terminating exchange "
1655 "for aborted cmd=%p (se_cmd=%p, tag=%d)", vha
->vp_idx
, cmd
,
1658 cmd
->state
= QLA_TGT_STATE_ABORTED
;
1660 qlt_send_term_exchange(vha
, cmd
, &cmd
->atio
, 0);
1662 /* !! At this point cmd could be already freed !! */
1663 return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED
;
1666 ql_dbg(ql_dbg_tgt
, vha
, 0xe011, "qla_target(%d): tag=%u\n",
1667 vha
->vp_idx
, cmd
->tag
);
1671 prm
->rq_result
= scsi_status
;
1672 prm
->sense_buffer
= &cmd
->sense_buffer
[0];
1673 prm
->sense_buffer_len
= TRANSPORT_SENSE_BUFFER
;
1677 prm
->add_status_pkt
= 0;
1679 ql_dbg(ql_dbg_tgt
, vha
, 0xe012, "rq_result=%x, xmit_type=%x\n",
1680 prm
->rq_result
, xmit_type
);
1682 /* Send marker if required */
1683 if (qlt_issue_marker(vha
, 0) != QLA_SUCCESS
)
1686 ql_dbg(ql_dbg_tgt
, vha
, 0xe013, "CTIO start: vha(%d)\n", vha
->vp_idx
);
1688 if ((xmit_type
& QLA_TGT_XMIT_DATA
) && qlt_has_data(cmd
)) {
1689 if (qlt_pci_map_calc_cnt(prm
) != 0)
1693 *full_req_cnt
= prm
->req_cnt
;
1695 if (se_cmd
->se_cmd_flags
& SCF_UNDERFLOW_BIT
) {
1696 prm
->residual
= se_cmd
->residual_count
;
1697 ql_dbg(ql_dbg_tgt
, vha
, 0xe014,
1698 "Residual underflow: %d (tag %d, "
1699 "op %x, bufflen %d, rq_result %x)\n", prm
->residual
,
1700 cmd
->tag
, se_cmd
->t_task_cdb
? se_cmd
->t_task_cdb
[0] : 0,
1701 cmd
->bufflen
, prm
->rq_result
);
1702 prm
->rq_result
|= SS_RESIDUAL_UNDER
;
1703 } else if (se_cmd
->se_cmd_flags
& SCF_OVERFLOW_BIT
) {
1704 prm
->residual
= se_cmd
->residual_count
;
1705 ql_dbg(ql_dbg_tgt
, vha
, 0xe015,
1706 "Residual overflow: %d (tag %d, "
1707 "op %x, bufflen %d, rq_result %x)\n", prm
->residual
,
1708 cmd
->tag
, se_cmd
->t_task_cdb
? se_cmd
->t_task_cdb
[0] : 0,
1709 cmd
->bufflen
, prm
->rq_result
);
1710 prm
->rq_result
|= SS_RESIDUAL_OVER
;
1713 if (xmit_type
& QLA_TGT_XMIT_STATUS
) {
1715 * If QLA_TGT_XMIT_DATA is not set, add_status_pkt will be
1716 * ignored in *xmit_response() below
1718 if (qlt_has_data(cmd
)) {
1719 if (QLA_TGT_SENSE_VALID(prm
->sense_buffer
) ||
1720 (IS_FWI2_CAPABLE(ha
) &&
1721 (prm
->rq_result
!= 0))) {
1722 prm
->add_status_pkt
= 1;
1728 ql_dbg(ql_dbg_tgt
, vha
, 0xe016,
1729 "req_cnt=%d, full_req_cnt=%d, add_status_pkt=%d\n",
1730 prm
->req_cnt
, *full_req_cnt
, prm
->add_status_pkt
);
1735 static inline int qlt_need_explicit_conf(struct qla_hw_data
*ha
,
1736 struct qla_tgt_cmd
*cmd
, int sending_sense
)
1738 if (ha
->tgt
.enable_class_2
)
1742 return cmd
->conf_compl_supported
;
1744 return ha
->tgt
.enable_explicit_conf
&&
1745 cmd
->conf_compl_supported
;
1748 #ifdef CONFIG_QLA_TGT_DEBUG_SRR
1750 * Original taken from the XFS code
1752 static unsigned long qlt_srr_random(void)
1755 static unsigned long RandomValue
;
1756 static DEFINE_SPINLOCK(lock
);
1757 /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */
1761 unsigned long flags
;
1763 spin_lock_irqsave(&lock
, flags
);
1765 RandomValue
= jiffies
;
1771 rv
= 16807 * lo
- 2836 * hi
;
1775 spin_unlock_irqrestore(&lock
, flags
);
1779 static void qlt_check_srr_debug(struct qla_tgt_cmd
*cmd
, int *xmit_type
)
1781 #if 0 /* This is not a real status packets lost, so it won't lead to SRR */
1782 if ((*xmit_type
& QLA_TGT_XMIT_STATUS
) && (qlt_srr_random() % 200)
1784 *xmit_type
&= ~QLA_TGT_XMIT_STATUS
;
1785 ql_dbg(ql_dbg_tgt_mgt
, cmd
->vha
, 0xf015,
1786 "Dropping cmd %p (tag %d) status", cmd
, cmd
->tag
);
1790 * It's currently not possible to simulate SRRs for FCP_WRITE without
1791 * a physical link layer failure, so don't even try here..
1793 if (cmd
->dma_data_direction
!= DMA_FROM_DEVICE
)
1796 if (qlt_has_data(cmd
) && (cmd
->sg_cnt
> 1) &&
1797 ((qlt_srr_random() % 100) == 20)) {
1799 unsigned int tot_len
= 0;
1802 leave
= qlt_srr_random() % cmd
->sg_cnt
;
1804 for (i
= 0; i
< leave
; i
++)
1805 tot_len
+= cmd
->sg
[i
].length
;
1807 ql_dbg(ql_dbg_tgt_mgt
, cmd
->vha
, 0xf016,
1808 "Cutting cmd %p (tag %d) buffer"
1809 " tail to len %d, sg_cnt %d (cmd->bufflen %d,"
1810 " cmd->sg_cnt %d)", cmd
, cmd
->tag
, tot_len
, leave
,
1811 cmd
->bufflen
, cmd
->sg_cnt
);
1813 cmd
->bufflen
= tot_len
;
1814 cmd
->sg_cnt
= leave
;
1817 if (qlt_has_data(cmd
) && ((qlt_srr_random() % 100) == 70)) {
1818 unsigned int offset
= qlt_srr_random() % cmd
->bufflen
;
1820 ql_dbg(ql_dbg_tgt_mgt
, cmd
->vha
, 0xf017,
1821 "Cutting cmd %p (tag %d) buffer head "
1822 "to offset %d (cmd->bufflen %d)", cmd
, cmd
->tag
, offset
,
1825 *xmit_type
&= ~QLA_TGT_XMIT_DATA
;
1826 else if (qlt_set_data_offset(cmd
, offset
)) {
1827 ql_dbg(ql_dbg_tgt_mgt
, cmd
->vha
, 0xf018,
1828 "qlt_set_data_offset() failed (tag %d)", cmd
->tag
);
1833 static inline void qlt_check_srr_debug(struct qla_tgt_cmd
*cmd
, int *xmit_type
)
1837 static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx
*ctio
,
1838 struct qla_tgt_prm
*prm
)
1840 prm
->sense_buffer_len
= min_t(uint32_t, prm
->sense_buffer_len
,
1841 (uint32_t)sizeof(ctio
->u
.status1
.sense_data
));
1842 ctio
->u
.status0
.flags
|=
1843 __constant_cpu_to_le16(CTIO7_FLAGS_SEND_STATUS
);
1844 if (qlt_need_explicit_conf(prm
->tgt
->ha
, prm
->cmd
, 0)) {
1845 ctio
->u
.status0
.flags
|= __constant_cpu_to_le16(
1846 CTIO7_FLAGS_EXPLICIT_CONFORM
|
1847 CTIO7_FLAGS_CONFORM_REQ
);
1849 ctio
->u
.status0
.residual
= cpu_to_le32(prm
->residual
);
1850 ctio
->u
.status0
.scsi_status
= cpu_to_le16(prm
->rq_result
);
1851 if (QLA_TGT_SENSE_VALID(prm
->sense_buffer
)) {
1854 if (qlt_need_explicit_conf(prm
->tgt
->ha
, prm
->cmd
, 1)) {
1855 if (prm
->cmd
->se_cmd
.scsi_status
!= 0) {
1856 ql_dbg(ql_dbg_tgt
, prm
->cmd
->vha
, 0xe017,
1857 "Skipping EXPLICIT_CONFORM and "
1858 "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ "
1859 "non GOOD status\n");
1860 goto skip_explict_conf
;
1862 ctio
->u
.status1
.flags
|= __constant_cpu_to_le16(
1863 CTIO7_FLAGS_EXPLICIT_CONFORM
|
1864 CTIO7_FLAGS_CONFORM_REQ
);
1867 ctio
->u
.status1
.flags
&=
1868 ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0
);
1869 ctio
->u
.status1
.flags
|=
1870 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1
);
1871 ctio
->u
.status1
.scsi_status
|=
1872 __constant_cpu_to_le16(SS_SENSE_LEN_VALID
);
1873 ctio
->u
.status1
.sense_length
=
1874 cpu_to_le16(prm
->sense_buffer_len
);
1875 for (i
= 0; i
< prm
->sense_buffer_len
/4; i
++)
1876 ((uint32_t *)ctio
->u
.status1
.sense_data
)[i
] =
1877 cpu_to_be32(((uint32_t *)prm
->sense_buffer
)[i
]);
1879 if (unlikely((prm
->sense_buffer_len
% 4) != 0)) {
1882 ql_dbg(ql_dbg_tgt
, vha
, 0xe04f,
1883 "qla_target(%d): %d bytes of sense "
1884 "lost", prm
->tgt
->ha
->vp_idx
,
1885 prm
->sense_buffer_len
% 4);
1891 ctio
->u
.status1
.flags
&=
1892 ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0
);
1893 ctio
->u
.status1
.flags
|=
1894 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1
);
1895 ctio
->u
.status1
.sense_length
= 0;
1896 memset(ctio
->u
.status1
.sense_data
, 0,
1897 sizeof(ctio
->u
.status1
.sense_data
));
1900 /* Sense with len > 24, is it possible ??? */
1904 * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and *
1905 * QLA_TGT_XMIT_STATUS for >= 24xx silicon
1907 int qlt_xmit_response(struct qla_tgt_cmd
*cmd
, int xmit_type
,
1908 uint8_t scsi_status
)
1910 struct scsi_qla_host
*vha
= cmd
->vha
;
1911 struct qla_hw_data
*ha
= vha
->hw
;
1912 struct ctio7_to_24xx
*pkt
;
1913 struct qla_tgt_prm prm
;
1914 uint32_t full_req_cnt
= 0;
1915 unsigned long flags
= 0;
1918 memset(&prm
, 0, sizeof(prm
));
1919 qlt_check_srr_debug(cmd
, &xmit_type
);
1921 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe018,
1922 "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, "
1923 "cmd->dma_data_direction=%d\n", (xmit_type
& QLA_TGT_XMIT_STATUS
) ?
1924 1 : 0, cmd
->bufflen
, cmd
->sg_cnt
, cmd
->dma_data_direction
);
1926 res
= qlt_pre_xmit_response(cmd
, &prm
, xmit_type
, scsi_status
,
1928 if (unlikely(res
!= 0)) {
1929 if (res
== QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED
)
1935 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
1937 /* Does F/W have an IOCBs for this request */
1938 res
= qlt_check_reserve_free_req(vha
, full_req_cnt
);
1940 goto out_unmap_unlock
;
1942 res
= qlt_24xx_build_ctio_pkt(&prm
, vha
);
1943 if (unlikely(res
!= 0))
1944 goto out_unmap_unlock
;
1947 pkt
= (struct ctio7_to_24xx
*)prm
.pkt
;
1949 if (qlt_has_data(cmd
) && (xmit_type
& QLA_TGT_XMIT_DATA
)) {
1950 pkt
->u
.status0
.flags
|=
1951 __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN
|
1952 CTIO7_FLAGS_STATUS_MODE_0
);
1954 qlt_load_data_segments(&prm
, vha
);
1956 if (prm
.add_status_pkt
== 0) {
1957 if (xmit_type
& QLA_TGT_XMIT_STATUS
) {
1958 pkt
->u
.status0
.scsi_status
=
1959 cpu_to_le16(prm
.rq_result
);
1960 pkt
->u
.status0
.residual
=
1961 cpu_to_le32(prm
.residual
);
1962 pkt
->u
.status0
.flags
|= __constant_cpu_to_le16(
1963 CTIO7_FLAGS_SEND_STATUS
);
1964 if (qlt_need_explicit_conf(ha
, cmd
, 0)) {
1965 pkt
->u
.status0
.flags
|=
1966 __constant_cpu_to_le16(
1967 CTIO7_FLAGS_EXPLICIT_CONFORM
|
1968 CTIO7_FLAGS_CONFORM_REQ
);
1974 * We have already made sure that there is sufficient
1975 * amount of request entries to not drop HW lock in
1978 struct ctio7_to_24xx
*ctio
=
1979 (struct ctio7_to_24xx
*)qlt_get_req_pkt(vha
);
1981 ql_dbg(ql_dbg_tgt
, vha
, 0xe019,
1982 "Building additional status packet\n");
1984 memcpy(ctio
, pkt
, sizeof(*ctio
));
1985 ctio
->entry_count
= 1;
1986 ctio
->dseg_count
= 0;
1987 ctio
->u
.status1
.flags
&= ~__constant_cpu_to_le16(
1988 CTIO7_FLAGS_DATA_IN
);
1990 /* Real finish is ctio_m1's finish */
1991 pkt
->handle
|= CTIO_INTERMEDIATE_HANDLE_MARK
;
1992 pkt
->u
.status0
.flags
|= __constant_cpu_to_le16(
1993 CTIO7_FLAGS_DONT_RET_CTIO
);
1994 qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx
*)ctio
,
1996 pr_debug("Status CTIO7: %p\n", ctio
);
1999 qlt_24xx_init_ctio_to_isp(pkt
, &prm
);
2002 cmd
->state
= QLA_TGT_STATE_PROCESSED
; /* Mid-level is done processing */
2004 ql_dbg(ql_dbg_tgt
, vha
, 0xe01a,
2005 "Xmitting CTIO7 response pkt for 24xx: %p scsi_status: 0x%02x\n",
2008 qla2x00_start_iocbs(vha
, vha
->req
);
2009 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2015 qlt_unmap_sg(vha
, cmd
);
2016 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2020 EXPORT_SYMBOL(qlt_xmit_response
);
2022 int qlt_rdy_to_xfer(struct qla_tgt_cmd
*cmd
)
2024 struct ctio7_to_24xx
*pkt
;
2025 struct scsi_qla_host
*vha
= cmd
->vha
;
2026 struct qla_hw_data
*ha
= vha
->hw
;
2027 struct qla_tgt
*tgt
= cmd
->tgt
;
2028 struct qla_tgt_prm prm
;
2029 unsigned long flags
;
2032 memset(&prm
, 0, sizeof(prm
));
2038 /* Send marker if required */
2039 if (qlt_issue_marker(vha
, 0) != QLA_SUCCESS
)
2042 ql_dbg(ql_dbg_tgt
, vha
, 0xe01b, "CTIO_start: vha(%d)",
2045 /* Calculate number of entries and segments required */
2046 if (qlt_pci_map_calc_cnt(&prm
) != 0)
2049 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
2051 /* Does F/W have an IOCBs for this request */
2052 res
= qlt_check_reserve_free_req(vha
, prm
.req_cnt
);
2054 goto out_unlock_free_unmap
;
2056 res
= qlt_24xx_build_ctio_pkt(&prm
, vha
);
2057 if (unlikely(res
!= 0))
2058 goto out_unlock_free_unmap
;
2059 pkt
= (struct ctio7_to_24xx
*)prm
.pkt
;
2060 pkt
->u
.status0
.flags
|= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT
|
2061 CTIO7_FLAGS_STATUS_MODE_0
);
2062 qlt_load_data_segments(&prm
, vha
);
2064 cmd
->state
= QLA_TGT_STATE_NEED_DATA
;
2066 qla2x00_start_iocbs(vha
, vha
->req
);
2067 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2071 out_unlock_free_unmap
:
2073 qlt_unmap_sg(vha
, cmd
);
2074 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2078 EXPORT_SYMBOL(qlt_rdy_to_xfer
);
2080 /* If hardware_lock held on entry, might drop it, then reaquire */
2081 /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
2082 static int __qlt_send_term_exchange(struct scsi_qla_host
*vha
,
2083 struct qla_tgt_cmd
*cmd
,
2084 struct atio_from_isp
*atio
)
2086 struct ctio7_to_24xx
*ctio24
;
2087 struct qla_hw_data
*ha
= vha
->hw
;
2091 ql_dbg(ql_dbg_tgt
, vha
, 0xe01c, "Sending TERM EXCH CTIO (ha=%p)\n", ha
);
2093 pkt
= (request_t
*)qla2x00_alloc_iocbs(vha
, NULL
);
2095 ql_dbg(ql_dbg_tgt
, vha
, 0xe050,
2096 "qla_target(%d): %s failed: unable to allocate "
2097 "request packet\n", vha
->vp_idx
, __func__
);
2102 if (cmd
->state
< QLA_TGT_STATE_PROCESSED
) {
2103 ql_dbg(ql_dbg_tgt
, vha
, 0xe051,
2104 "qla_target(%d): Terminating cmd %p with "
2105 "incorrect state %d\n", vha
->vp_idx
, cmd
,
2111 pkt
->entry_count
= 1;
2112 pkt
->handle
= QLA_TGT_SKIP_HANDLE
| CTIO_COMPLETION_HANDLE_MARK
;
2114 ctio24
= (struct ctio7_to_24xx
*)pkt
;
2115 ctio24
->entry_type
= CTIO_TYPE7
;
2116 ctio24
->nport_handle
= cmd
? cmd
->loop_id
: CTIO7_NHANDLE_UNRECOGNIZED
;
2117 ctio24
->timeout
= __constant_cpu_to_le16(QLA_TGT_TIMEOUT
);
2118 ctio24
->vp_index
= vha
->vp_idx
;
2119 ctio24
->initiator_id
[0] = atio
->u
.isp24
.fcp_hdr
.s_id
[2];
2120 ctio24
->initiator_id
[1] = atio
->u
.isp24
.fcp_hdr
.s_id
[1];
2121 ctio24
->initiator_id
[2] = atio
->u
.isp24
.fcp_hdr
.s_id
[0];
2122 ctio24
->exchange_addr
= atio
->u
.isp24
.exchange_addr
;
2123 ctio24
->u
.status1
.flags
= (atio
->u
.isp24
.attr
<< 9) |
2124 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1
|
2125 CTIO7_FLAGS_TERMINATE
);
2126 ctio24
->u
.status1
.ox_id
= swab16(atio
->u
.isp24
.fcp_hdr
.ox_id
);
2128 /* Most likely, it isn't needed */
2129 ctio24
->u
.status1
.residual
= get_unaligned((uint32_t *)
2130 &atio
->u
.isp24
.fcp_cmnd
.add_cdb
[
2131 atio
->u
.isp24
.fcp_cmnd
.add_cdb_len
]);
2132 if (ctio24
->u
.status1
.residual
!= 0)
2133 ctio24
->u
.status1
.scsi_status
|= SS_RESIDUAL_UNDER
;
2135 qla2x00_start_iocbs(vha
, vha
->req
);
2139 static void qlt_send_term_exchange(struct scsi_qla_host
*vha
,
2140 struct qla_tgt_cmd
*cmd
, struct atio_from_isp
*atio
, int ha_locked
)
2142 unsigned long flags
;
2145 if (qlt_issue_marker(vha
, ha_locked
) < 0)
2149 rc
= __qlt_send_term_exchange(vha
, cmd
, atio
);
2152 spin_lock_irqsave(&vha
->hw
->hardware_lock
, flags
);
2153 rc
= __qlt_send_term_exchange(vha
, cmd
, atio
);
2154 spin_unlock_irqrestore(&vha
->hw
->hardware_lock
, flags
);
2157 if (!ha_locked
&& !in_interrupt())
2158 msleep(250); /* just in case */
2160 vha
->hw
->tgt
.tgt_ops
->free_cmd(cmd
);
2164 void qlt_free_cmd(struct qla_tgt_cmd
*cmd
)
2166 BUG_ON(cmd
->sg_mapped
);
2168 if (unlikely(cmd
->free_sg
))
2170 kmem_cache_free(qla_tgt_cmd_cachep
, cmd
);
2172 EXPORT_SYMBOL(qlt_free_cmd
);
2174 /* ha->hardware_lock supposed to be held on entry */
2175 static int qlt_prepare_srr_ctio(struct scsi_qla_host
*vha
,
2176 struct qla_tgt_cmd
*cmd
, void *ctio
)
2178 struct qla_tgt_srr_ctio
*sc
;
2179 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
2180 struct qla_tgt_srr_imm
*imm
;
2184 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf019,
2185 "qla_target(%d): CTIO with SRR status received\n", vha
->vp_idx
);
2188 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf055,
2189 "qla_target(%d): SRR CTIO, but ctio is NULL\n",
2194 sc
= kzalloc(sizeof(*sc
), GFP_ATOMIC
);
2197 /* IRQ is already OFF */
2198 spin_lock(&tgt
->srr_lock
);
2199 sc
->srr_id
= tgt
->ctio_srr_id
;
2200 list_add_tail(&sc
->srr_list_entry
,
2201 &tgt
->srr_ctio_list
);
2202 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf01a,
2203 "CTIO SRR %p added (id %d)\n", sc
, sc
->srr_id
);
2204 if (tgt
->imm_srr_id
== tgt
->ctio_srr_id
) {
2206 list_for_each_entry(imm
, &tgt
->srr_imm_list
,
2208 if (imm
->srr_id
== sc
->srr_id
) {
2214 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf01b,
2215 "Scheduling srr work\n");
2216 schedule_work(&tgt
->srr_work
);
2218 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf056,
2219 "qla_target(%d): imm_srr_id "
2220 "== ctio_srr_id (%d), but there is no "
2221 "corresponding SRR IMM, deleting CTIO "
2222 "SRR %p\n", vha
->vp_idx
,
2223 tgt
->ctio_srr_id
, sc
);
2224 list_del(&sc
->srr_list_entry
);
2225 spin_unlock(&tgt
->srr_lock
);
2231 spin_unlock(&tgt
->srr_lock
);
2233 struct qla_tgt_srr_imm
*ti
;
2235 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf057,
2236 "qla_target(%d): Unable to allocate SRR CTIO entry\n",
2238 spin_lock(&tgt
->srr_lock
);
2239 list_for_each_entry_safe(imm
, ti
, &tgt
->srr_imm_list
,
2241 if (imm
->srr_id
== tgt
->ctio_srr_id
) {
2242 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf01c,
2243 "IMM SRR %p deleted (id %d)\n",
2245 list_del(&imm
->srr_list_entry
);
2246 qlt_reject_free_srr_imm(vha
, imm
, 1);
2249 spin_unlock(&tgt
->srr_lock
);
2258 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2260 static int qlt_term_ctio_exchange(struct scsi_qla_host
*vha
, void *ctio
,
2261 struct qla_tgt_cmd
*cmd
, uint32_t status
)
2266 struct ctio7_from_24xx
*c
= (struct ctio7_from_24xx
*)ctio
;
2268 __constant_cpu_to_le16(OF_TERM_EXCH
));
2273 qlt_send_term_exchange(vha
, cmd
, &cmd
->atio
, 1);
2278 /* ha->hardware_lock supposed to be held on entry */
2279 static inline struct qla_tgt_cmd
*qlt_get_cmd(struct scsi_qla_host
*vha
,
2282 struct qla_hw_data
*ha
= vha
->hw
;
2285 if (ha
->tgt
.cmds
[handle
] != NULL
) {
2286 struct qla_tgt_cmd
*cmd
= ha
->tgt
.cmds
[handle
];
2287 ha
->tgt
.cmds
[handle
] = NULL
;
2293 /* ha->hardware_lock supposed to be held on entry */
2294 static struct qla_tgt_cmd
*qlt_ctio_to_cmd(struct scsi_qla_host
*vha
,
2295 uint32_t handle
, void *ctio
)
2297 struct qla_tgt_cmd
*cmd
= NULL
;
2299 /* Clear out internal marks */
2300 handle
&= ~(CTIO_COMPLETION_HANDLE_MARK
|
2301 CTIO_INTERMEDIATE_HANDLE_MARK
);
2303 if (handle
!= QLA_TGT_NULL_HANDLE
) {
2304 if (unlikely(handle
== QLA_TGT_SKIP_HANDLE
)) {
2305 ql_dbg(ql_dbg_tgt
, vha
, 0xe01d, "%s",
2306 "SKIP_HANDLE CTIO\n");
2309 /* handle-1 is actually used */
2310 if (unlikely(handle
> DEFAULT_OUTSTANDING_COMMANDS
)) {
2311 ql_dbg(ql_dbg_tgt
, vha
, 0xe052,
2312 "qla_target(%d): Wrong handle %x received\n",
2313 vha
->vp_idx
, handle
);
2316 cmd
= qlt_get_cmd(vha
, handle
);
2317 if (unlikely(cmd
== NULL
)) {
2318 ql_dbg(ql_dbg_tgt
, vha
, 0xe053,
2319 "qla_target(%d): Suspicious: unable to "
2320 "find the command with handle %x\n", vha
->vp_idx
,
2324 } else if (ctio
!= NULL
) {
2325 /* We can't get loop ID from CTIO7 */
2326 ql_dbg(ql_dbg_tgt
, vha
, 0xe054,
2327 "qla_target(%d): Wrong CTIO received: QLA24xx doesn't "
2328 "support NULL handles\n", vha
->vp_idx
);
2336 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2338 static void qlt_do_ctio_completion(struct scsi_qla_host
*vha
, uint32_t handle
,
2339 uint32_t status
, void *ctio
)
2341 struct qla_hw_data
*ha
= vha
->hw
;
2342 struct se_cmd
*se_cmd
;
2343 struct target_core_fabric_ops
*tfo
;
2344 struct qla_tgt_cmd
*cmd
;
2346 ql_dbg(ql_dbg_tgt
, vha
, 0xe01e,
2347 "qla_target(%d): handle(ctio %p status %#x) <- %08x\n",
2348 vha
->vp_idx
, ctio
, status
, handle
);
2350 if (handle
& CTIO_INTERMEDIATE_HANDLE_MARK
) {
2351 /* That could happen only in case of an error/reset/abort */
2352 if (status
!= CTIO_SUCCESS
) {
2353 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf01d,
2354 "Intermediate CTIO received"
2355 " (status %x)\n", status
);
2360 cmd
= qlt_ctio_to_cmd(vha
, handle
, ctio
);
2364 se_cmd
= &cmd
->se_cmd
;
2365 tfo
= se_cmd
->se_tfo
;
2368 qlt_unmap_sg(vha
, cmd
);
2370 if (unlikely(status
!= CTIO_SUCCESS
)) {
2371 switch (status
& 0xFFFF) {
2372 case CTIO_LIP_RESET
:
2373 case CTIO_TARGET_RESET
:
2376 case CTIO_INVALID_RX_ID
:
2378 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf058,
2379 "qla_target(%d): CTIO with "
2380 "status %#x received, state %x, se_cmd %p, "
2381 "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, "
2382 "TIMEOUT=b, INVALID_RX_ID=8)\n", vha
->vp_idx
,
2383 status
, cmd
->state
, se_cmd
);
2386 case CTIO_PORT_LOGGED_OUT
:
2387 case CTIO_PORT_UNAVAILABLE
:
2388 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf059,
2389 "qla_target(%d): CTIO with PORT LOGGED "
2390 "OUT (29) or PORT UNAVAILABLE (28) status %x "
2391 "received (state %x, se_cmd %p)\n", vha
->vp_idx
,
2392 status
, cmd
->state
, se_cmd
);
2395 case CTIO_SRR_RECEIVED
:
2396 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf05a,
2397 "qla_target(%d): CTIO with SRR_RECEIVED"
2398 " status %x received (state %x, se_cmd %p)\n",
2399 vha
->vp_idx
, status
, cmd
->state
, se_cmd
);
2400 if (qlt_prepare_srr_ctio(vha
, cmd
, ctio
) != 0)
2406 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf05b,
2407 "qla_target(%d): CTIO with error status "
2408 "0x%x received (state %x, se_cmd %p\n",
2409 vha
->vp_idx
, status
, cmd
->state
, se_cmd
);
2413 if (cmd
->state
!= QLA_TGT_STATE_NEED_DATA
)
2414 if (qlt_term_ctio_exchange(vha
, ctio
, cmd
, status
))
2418 if (cmd
->state
== QLA_TGT_STATE_PROCESSED
) {
2419 ql_dbg(ql_dbg_tgt
, vha
, 0xe01f, "Command %p finished\n", cmd
);
2420 } else if (cmd
->state
== QLA_TGT_STATE_NEED_DATA
) {
2423 cmd
->state
= QLA_TGT_STATE_DATA_IN
;
2425 if (unlikely(status
!= CTIO_SUCCESS
))
2428 cmd
->write_data_transferred
= 1;
2430 ql_dbg(ql_dbg_tgt
, vha
, 0xe020,
2431 "Data received, context %x, rx_status %d\n",
2434 ha
->tgt
.tgt_ops
->handle_data(cmd
);
2436 } else if (cmd
->state
== QLA_TGT_STATE_ABORTED
) {
2437 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf01e,
2438 "Aborted command %p (tag %d) finished\n", cmd
, cmd
->tag
);
2440 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf05c,
2441 "qla_target(%d): A command in state (%d) should "
2442 "not return a CTIO complete\n", vha
->vp_idx
, cmd
->state
);
2445 if (unlikely(status
!= CTIO_SUCCESS
)) {
2446 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf01f, "Finishing failed CTIO\n");
2450 ha
->tgt
.tgt_ops
->free_cmd(cmd
);
2453 static inline int qlt_get_fcp_task_attr(struct scsi_qla_host
*vha
,
2458 switch (task_codes
) {
2459 case ATIO_SIMPLE_QUEUE
:
2460 fcp_task_attr
= MSG_SIMPLE_TAG
;
2462 case ATIO_HEAD_OF_QUEUE
:
2463 fcp_task_attr
= MSG_HEAD_TAG
;
2465 case ATIO_ORDERED_QUEUE
:
2466 fcp_task_attr
= MSG_ORDERED_TAG
;
2468 case ATIO_ACA_QUEUE
:
2469 fcp_task_attr
= MSG_ACA_TAG
;
2472 fcp_task_attr
= MSG_SIMPLE_TAG
;
2475 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf05d,
2476 "qla_target: unknown task code %x, use ORDERED instead\n",
2478 fcp_task_attr
= MSG_ORDERED_TAG
;
2482 return fcp_task_attr
;
2485 static struct qla_tgt_sess
*qlt_make_local_sess(struct scsi_qla_host
*,
2488 * Process context for I/O path into tcm_qla2xxx code
2490 static void qlt_do_work(struct work_struct
*work
)
2492 struct qla_tgt_cmd
*cmd
= container_of(work
, struct qla_tgt_cmd
, work
);
2493 scsi_qla_host_t
*vha
= cmd
->vha
;
2494 struct qla_hw_data
*ha
= vha
->hw
;
2495 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
2496 struct qla_tgt_sess
*sess
= NULL
;
2497 struct atio_from_isp
*atio
= &cmd
->atio
;
2499 unsigned long flags
;
2500 uint32_t data_length
;
2501 int ret
, fcp_task_attr
, data_dir
, bidi
= 0;
2506 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
2507 sess
= ha
->tgt
.tgt_ops
->find_sess_by_s_id(vha
,
2508 atio
->u
.isp24
.fcp_hdr
.s_id
);
2509 /* Do kref_get() before dropping qla_hw_data->hardware_lock. */
2511 kref_get(&sess
->se_sess
->sess_kref
);
2512 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2514 if (unlikely(!sess
)) {
2515 uint8_t *s_id
= atio
->u
.isp24
.fcp_hdr
.s_id
;
2517 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf022,
2518 "qla_target(%d): Unable to find wwn login"
2519 " (s_id %x:%x:%x), trying to create it manually\n",
2520 vha
->vp_idx
, s_id
[0], s_id
[1], s_id
[2]);
2522 if (atio
->u
.raw
.entry_count
> 1) {
2523 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf023,
2524 "Dropping multy entry cmd %p\n", cmd
);
2528 mutex_lock(&vha
->vha_tgt
.tgt_mutex
);
2529 sess
= qlt_make_local_sess(vha
, s_id
);
2530 /* sess has an extra creation ref. */
2531 mutex_unlock(&vha
->vha_tgt
.tgt_mutex
);
2538 cmd
->loop_id
= sess
->loop_id
;
2539 cmd
->conf_compl_supported
= sess
->conf_compl_supported
;
2541 cdb
= &atio
->u
.isp24
.fcp_cmnd
.cdb
[0];
2542 cmd
->tag
= atio
->u
.isp24
.exchange_addr
;
2543 cmd
->unpacked_lun
= scsilun_to_int(
2544 (struct scsi_lun
*)&atio
->u
.isp24
.fcp_cmnd
.lun
);
2546 if (atio
->u
.isp24
.fcp_cmnd
.rddata
&&
2547 atio
->u
.isp24
.fcp_cmnd
.wrdata
) {
2549 data_dir
= DMA_TO_DEVICE
;
2550 } else if (atio
->u
.isp24
.fcp_cmnd
.rddata
)
2551 data_dir
= DMA_FROM_DEVICE
;
2552 else if (atio
->u
.isp24
.fcp_cmnd
.wrdata
)
2553 data_dir
= DMA_TO_DEVICE
;
2555 data_dir
= DMA_NONE
;
2557 fcp_task_attr
= qlt_get_fcp_task_attr(vha
,
2558 atio
->u
.isp24
.fcp_cmnd
.task_attr
);
2559 data_length
= be32_to_cpu(get_unaligned((uint32_t *)
2560 &atio
->u
.isp24
.fcp_cmnd
.add_cdb
[
2561 atio
->u
.isp24
.fcp_cmnd
.add_cdb_len
]));
2563 ql_dbg(ql_dbg_tgt
, vha
, 0xe022,
2564 "qla_target: START qla command: %p lun: 0x%04x (tag %d)\n",
2565 cmd
, cmd
->unpacked_lun
, cmd
->tag
);
2567 ret
= vha
->hw
->tgt
.tgt_ops
->handle_cmd(vha
, cmd
, cdb
, data_length
,
2568 fcp_task_attr
, data_dir
, bidi
);
2572 * Drop extra session reference from qla_tgt_handle_cmd_for_atio*(
2574 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
2575 ha
->tgt
.tgt_ops
->put_sess(sess
);
2576 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2580 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf020, "Terminating work cmd %p", cmd
);
2582 * cmd has not sent to target yet, so pass NULL as the second
2583 * argument to qlt_send_term_exchange() and free the memory here.
2585 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
2586 qlt_send_term_exchange(vha
, NULL
, &cmd
->atio
, 1);
2587 kmem_cache_free(qla_tgt_cmd_cachep
, cmd
);
2589 ha
->tgt
.tgt_ops
->put_sess(sess
);
2590 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2593 /* ha->hardware_lock supposed to be held on entry */
2594 static int qlt_handle_cmd_for_atio(struct scsi_qla_host
*vha
,
2595 struct atio_from_isp
*atio
)
2597 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
2598 struct qla_tgt_cmd
*cmd
;
2600 if (unlikely(tgt
->tgt_stop
)) {
2601 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf021,
2602 "New command while device %p is shutting down\n", tgt
);
2606 cmd
= kmem_cache_zalloc(qla_tgt_cmd_cachep
, GFP_ATOMIC
);
2608 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf05e,
2609 "qla_target(%d): Allocation of cmd failed\n", vha
->vp_idx
);
2613 memcpy(&cmd
->atio
, atio
, sizeof(*atio
));
2614 cmd
->state
= QLA_TGT_STATE_NEW
;
2615 cmd
->tgt
= vha
->vha_tgt
.qla_tgt
;
2618 INIT_WORK(&cmd
->work
, qlt_do_work
);
2619 queue_work(qla_tgt_wq
, &cmd
->work
);
2624 /* ha->hardware_lock supposed to be held on entry */
2625 static int qlt_issue_task_mgmt(struct qla_tgt_sess
*sess
, uint32_t lun
,
2626 int fn
, void *iocb
, int flags
)
2628 struct scsi_qla_host
*vha
= sess
->vha
;
2629 struct qla_hw_data
*ha
= vha
->hw
;
2630 struct qla_tgt_mgmt_cmd
*mcmd
;
2634 mcmd
= mempool_alloc(qla_tgt_mgmt_cmd_mempool
, GFP_ATOMIC
);
2636 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x10009,
2637 "qla_target(%d): Allocation of management "
2638 "command failed, some commands and their data could "
2639 "leak\n", vha
->vp_idx
);
2642 memset(mcmd
, 0, sizeof(*mcmd
));
2646 memcpy(&mcmd
->orig_iocb
.imm_ntfy
, iocb
,
2647 sizeof(mcmd
->orig_iocb
.imm_ntfy
));
2649 mcmd
->tmr_func
= fn
;
2650 mcmd
->flags
= flags
;
2653 case QLA_TGT_CLEAR_ACA
:
2654 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x10000,
2655 "qla_target(%d): CLEAR_ACA received\n", sess
->vha
->vp_idx
);
2656 tmr_func
= TMR_CLEAR_ACA
;
2659 case QLA_TGT_TARGET_RESET
:
2660 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x10001,
2661 "qla_target(%d): TARGET_RESET received\n",
2663 tmr_func
= TMR_TARGET_WARM_RESET
;
2666 case QLA_TGT_LUN_RESET
:
2667 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x10002,
2668 "qla_target(%d): LUN_RESET received\n", sess
->vha
->vp_idx
);
2669 tmr_func
= TMR_LUN_RESET
;
2672 case QLA_TGT_CLEAR_TS
:
2673 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x10003,
2674 "qla_target(%d): CLEAR_TS received\n", sess
->vha
->vp_idx
);
2675 tmr_func
= TMR_CLEAR_TASK_SET
;
2678 case QLA_TGT_ABORT_TS
:
2679 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x10004,
2680 "qla_target(%d): ABORT_TS received\n", sess
->vha
->vp_idx
);
2681 tmr_func
= TMR_ABORT_TASK_SET
;
2684 case QLA_TGT_ABORT_ALL
:
2685 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x10005,
2686 "qla_target(%d): Doing ABORT_ALL_TASKS\n",
2691 case QLA_TGT_ABORT_ALL_SESS
:
2692 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x10006,
2693 "qla_target(%d): Doing ABORT_ALL_TASKS_SESS\n",
2698 case QLA_TGT_NEXUS_LOSS_SESS
:
2699 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x10007,
2700 "qla_target(%d): Doing NEXUS_LOSS_SESS\n",
2705 case QLA_TGT_NEXUS_LOSS
:
2706 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x10008,
2707 "qla_target(%d): Doing NEXUS_LOSS\n", sess
->vha
->vp_idx
);
2712 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x1000a,
2713 "qla_target(%d): Unknown task mgmt fn 0x%x\n",
2714 sess
->vha
->vp_idx
, fn
);
2715 mempool_free(mcmd
, qla_tgt_mgmt_cmd_mempool
);
2719 res
= ha
->tgt
.tgt_ops
->handle_tmr(mcmd
, lun
, tmr_func
, 0);
2721 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x1000b,
2722 "qla_target(%d): tgt.tgt_ops->handle_tmr() failed: %d\n",
2723 sess
->vha
->vp_idx
, res
);
2724 mempool_free(mcmd
, qla_tgt_mgmt_cmd_mempool
);
2731 /* ha->hardware_lock supposed to be held on entry */
2732 static int qlt_handle_task_mgmt(struct scsi_qla_host
*vha
, void *iocb
)
2734 struct atio_from_isp
*a
= (struct atio_from_isp
*)iocb
;
2735 struct qla_hw_data
*ha
= vha
->hw
;
2736 struct qla_tgt
*tgt
;
2737 struct qla_tgt_sess
*sess
;
2738 uint32_t lun
, unpacked_lun
;
2741 tgt
= vha
->vha_tgt
.qla_tgt
;
2743 lun
= a
->u
.isp24
.fcp_cmnd
.lun
;
2744 lun_size
= sizeof(a
->u
.isp24
.fcp_cmnd
.lun
);
2745 fn
= a
->u
.isp24
.fcp_cmnd
.task_mgmt_flags
;
2746 sess
= ha
->tgt
.tgt_ops
->find_sess_by_s_id(vha
,
2747 a
->u
.isp24
.fcp_hdr
.s_id
);
2748 unpacked_lun
= scsilun_to_int((struct scsi_lun
*)&lun
);
2751 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf024,
2752 "qla_target(%d): task mgmt fn 0x%x for "
2753 "non-existant session\n", vha
->vp_idx
, fn
);
2754 return qlt_sched_sess_work(tgt
, QLA_TGT_SESS_WORK_TM
, iocb
,
2755 sizeof(struct atio_from_isp
));
2758 return qlt_issue_task_mgmt(sess
, unpacked_lun
, fn
, iocb
, 0);
2761 /* ha->hardware_lock supposed to be held on entry */
2762 static int __qlt_abort_task(struct scsi_qla_host
*vha
,
2763 struct imm_ntfy_from_isp
*iocb
, struct qla_tgt_sess
*sess
)
2765 struct atio_from_isp
*a
= (struct atio_from_isp
*)iocb
;
2766 struct qla_hw_data
*ha
= vha
->hw
;
2767 struct qla_tgt_mgmt_cmd
*mcmd
;
2768 uint32_t lun
, unpacked_lun
;
2771 mcmd
= mempool_alloc(qla_tgt_mgmt_cmd_mempool
, GFP_ATOMIC
);
2773 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf05f,
2774 "qla_target(%d): %s: Allocation of ABORT cmd failed\n",
2775 vha
->vp_idx
, __func__
);
2778 memset(mcmd
, 0, sizeof(*mcmd
));
2781 memcpy(&mcmd
->orig_iocb
.imm_ntfy
, iocb
,
2782 sizeof(mcmd
->orig_iocb
.imm_ntfy
));
2784 lun
= a
->u
.isp24
.fcp_cmnd
.lun
;
2785 unpacked_lun
= scsilun_to_int((struct scsi_lun
*)&lun
);
2787 rc
= ha
->tgt
.tgt_ops
->handle_tmr(mcmd
, unpacked_lun
, TMR_ABORT_TASK
,
2788 le16_to_cpu(iocb
->u
.isp2x
.seq_id
));
2790 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf060,
2791 "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n",
2793 mempool_free(mcmd
, qla_tgt_mgmt_cmd_mempool
);
2800 /* ha->hardware_lock supposed to be held on entry */
2801 static int qlt_abort_task(struct scsi_qla_host
*vha
,
2802 struct imm_ntfy_from_isp
*iocb
)
2804 struct qla_hw_data
*ha
= vha
->hw
;
2805 struct qla_tgt_sess
*sess
;
2808 loop_id
= GET_TARGET_ID(ha
, (struct atio_from_isp
*)iocb
);
2810 sess
= ha
->tgt
.tgt_ops
->find_sess_by_loop_id(vha
, loop_id
);
2812 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf025,
2813 "qla_target(%d): task abort for unexisting "
2814 "session\n", vha
->vp_idx
);
2815 return qlt_sched_sess_work(vha
->vha_tgt
.qla_tgt
,
2816 QLA_TGT_SESS_WORK_ABORT
, iocb
, sizeof(*iocb
));
2819 return __qlt_abort_task(vha
, iocb
, sess
);
2823 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2825 static int qlt_24xx_handle_els(struct scsi_qla_host
*vha
,
2826 struct imm_ntfy_from_isp
*iocb
)
2830 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf026,
2831 "qla_target(%d): Port ID: 0x%3phC ELS opcode: 0x%02x\n",
2832 vha
->vp_idx
, iocb
->u
.isp24
.port_id
, iocb
->u
.isp24
.status_subcode
);
2834 switch (iocb
->u
.isp24
.status_subcode
) {
2840 res
= qlt_reset(vha
, iocb
, QLA_TGT_NEXUS_LOSS_SESS
);
2845 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
2846 if (tgt
->link_reinit_iocb_pending
) {
2847 qlt_send_notify_ack(vha
, &tgt
->link_reinit_iocb
,
2849 tgt
->link_reinit_iocb_pending
= 0;
2851 res
= 1; /* send notify ack */
2856 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf061,
2857 "qla_target(%d): Unsupported ELS command %x "
2858 "received\n", vha
->vp_idx
, iocb
->u
.isp24
.status_subcode
);
2859 res
= qlt_reset(vha
, iocb
, QLA_TGT_NEXUS_LOSS_SESS
);
2866 static int qlt_set_data_offset(struct qla_tgt_cmd
*cmd
, uint32_t offset
)
2868 struct scatterlist
*sg
, *sgp
, *sg_srr
, *sg_srr_start
= NULL
;
2869 size_t first_offset
= 0, rem_offset
= offset
, tmp
= 0;
2870 int i
, sg_srr_cnt
, bufflen
= 0;
2872 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe023,
2873 "Entering qla_tgt_set_data_offset: cmd: %p, cmd->sg: %p, "
2874 "cmd->sg_cnt: %u, direction: %d\n",
2875 cmd
, cmd
->sg
, cmd
->sg_cnt
, cmd
->dma_data_direction
);
2878 * FIXME: Reject non zero SRR relative offset until we can test
2879 * this code properly.
2881 pr_debug("Rejecting non zero SRR rel_offs: %u\n", offset
);
2884 if (!cmd
->sg
|| !cmd
->sg_cnt
) {
2885 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe055,
2886 "Missing cmd->sg or zero cmd->sg_cnt in"
2887 " qla_tgt_set_data_offset\n");
2891 * Walk the current cmd->sg list until we locate the new sg_srr_start
2893 for_each_sg(cmd
->sg
, sg
, cmd
->sg_cnt
, i
) {
2894 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe024,
2895 "sg[%d]: %p page: %p, length: %d, offset: %d\n",
2896 i
, sg
, sg_page(sg
), sg
->length
, sg
->offset
);
2898 if ((sg
->length
+ tmp
) > offset
) {
2899 first_offset
= rem_offset
;
2901 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe025,
2902 "Found matching sg[%d], using %p as sg_srr_start, "
2903 "and using first_offset: %zu\n", i
, sg
,
2908 rem_offset
-= sg
->length
;
2911 if (!sg_srr_start
) {
2912 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe056,
2913 "Unable to locate sg_srr_start for offset: %u\n", offset
);
2916 sg_srr_cnt
= (cmd
->sg_cnt
- i
);
2918 sg_srr
= kzalloc(sizeof(struct scatterlist
) * sg_srr_cnt
, GFP_KERNEL
);
2920 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe057,
2921 "Unable to allocate sgp\n");
2924 sg_init_table(sg_srr
, sg_srr_cnt
);
2927 * Walk the remaining list for sg_srr_start, mapping to the newly
2928 * allocated sg_srr taking first_offset into account.
2930 for_each_sg(sg_srr_start
, sg
, sg_srr_cnt
, i
) {
2932 sg_set_page(sgp
, sg_page(sg
),
2933 (sg
->length
- first_offset
), first_offset
);
2936 sg_set_page(sgp
, sg_page(sg
), sg
->length
, 0);
2938 bufflen
+= sgp
->length
;
2946 cmd
->sg_cnt
= sg_srr_cnt
;
2947 cmd
->bufflen
= bufflen
;
2948 cmd
->offset
+= offset
;
2951 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe026, "New cmd->sg: %p\n", cmd
->sg
);
2952 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe027, "New cmd->sg_cnt: %u\n",
2954 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe028, "New cmd->bufflen: %u\n",
2956 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe029, "New cmd->offset: %u\n",
2959 if (cmd
->sg_cnt
< 0)
2962 if (cmd
->bufflen
< 0)
2968 static inline int qlt_srr_adjust_data(struct qla_tgt_cmd
*cmd
,
2969 uint32_t srr_rel_offs
, int *xmit_type
)
2971 int res
= 0, rel_offs
;
2973 rel_offs
= srr_rel_offs
- cmd
->offset
;
2974 ql_dbg(ql_dbg_tgt_mgt
, cmd
->vha
, 0xf027, "srr_rel_offs=%d, rel_offs=%d",
2975 srr_rel_offs
, rel_offs
);
2977 *xmit_type
= QLA_TGT_XMIT_ALL
;
2980 ql_dbg(ql_dbg_tgt_mgt
, cmd
->vha
, 0xf062,
2981 "qla_target(%d): SRR rel_offs (%d) < 0",
2982 cmd
->vha
->vp_idx
, rel_offs
);
2984 } else if (rel_offs
== cmd
->bufflen
)
2985 *xmit_type
= QLA_TGT_XMIT_STATUS
;
2986 else if (rel_offs
> 0)
2987 res
= qlt_set_data_offset(cmd
, rel_offs
);
2992 /* No locks, thread context */
2993 static void qlt_handle_srr(struct scsi_qla_host
*vha
,
2994 struct qla_tgt_srr_ctio
*sctio
, struct qla_tgt_srr_imm
*imm
)
2996 struct imm_ntfy_from_isp
*ntfy
=
2997 (struct imm_ntfy_from_isp
*)&imm
->imm_ntfy
;
2998 struct qla_hw_data
*ha
= vha
->hw
;
2999 struct qla_tgt_cmd
*cmd
= sctio
->cmd
;
3000 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
3001 unsigned long flags
;
3002 int xmit_type
= 0, resp
= 0;
3006 offset
= le32_to_cpu(ntfy
->u
.isp24
.srr_rel_offs
);
3007 srr_ui
= ntfy
->u
.isp24
.srr_ui
;
3009 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf028, "SRR cmd %p, srr_ui %x\n",
3014 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
3015 qlt_send_notify_ack(vha
, ntfy
,
3016 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT
, 0, 0);
3017 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
3018 xmit_type
= QLA_TGT_XMIT_STATUS
;
3021 case SRR_IU_DATA_IN
:
3022 if (!cmd
->sg
|| !cmd
->sg_cnt
) {
3023 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf063,
3024 "Unable to process SRR_IU_DATA_IN due to"
3025 " missing cmd->sg, state: %d\n", cmd
->state
);
3029 if (se_cmd
->scsi_status
!= 0) {
3030 ql_dbg(ql_dbg_tgt
, vha
, 0xe02a,
3031 "Rejecting SRR_IU_DATA_IN with non GOOD "
3035 cmd
->bufflen
= se_cmd
->data_length
;
3037 if (qlt_has_data(cmd
)) {
3038 if (qlt_srr_adjust_data(cmd
, offset
, &xmit_type
) != 0)
3040 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
3041 qlt_send_notify_ack(vha
, ntfy
,
3042 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT
, 0, 0);
3043 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
3046 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf064,
3047 "qla_target(%d): SRR for in data for cmd "
3048 "without them (tag %d, SCSI status %d), "
3049 "reject", vha
->vp_idx
, cmd
->tag
,
3050 cmd
->se_cmd
.scsi_status
);
3054 case SRR_IU_DATA_OUT
:
3055 if (!cmd
->sg
|| !cmd
->sg_cnt
) {
3056 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf065,
3057 "Unable to process SRR_IU_DATA_OUT due to"
3058 " missing cmd->sg\n");
3062 if (se_cmd
->scsi_status
!= 0) {
3063 ql_dbg(ql_dbg_tgt
, vha
, 0xe02b,
3064 "Rejecting SRR_IU_DATA_OUT"
3065 " with non GOOD scsi_status\n");
3068 cmd
->bufflen
= se_cmd
->data_length
;
3070 if (qlt_has_data(cmd
)) {
3071 if (qlt_srr_adjust_data(cmd
, offset
, &xmit_type
) != 0)
3073 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
3074 qlt_send_notify_ack(vha
, ntfy
,
3075 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT
, 0, 0);
3076 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
3077 if (xmit_type
& QLA_TGT_XMIT_DATA
)
3078 qlt_rdy_to_xfer(cmd
);
3080 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf066,
3081 "qla_target(%d): SRR for out data for cmd "
3082 "without them (tag %d, SCSI status %d), "
3083 "reject", vha
->vp_idx
, cmd
->tag
,
3084 cmd
->se_cmd
.scsi_status
);
3089 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf067,
3090 "qla_target(%d): Unknown srr_ui value %x",
3091 vha
->vp_idx
, srr_ui
);
3095 /* Transmit response in case of status and data-in cases */
3097 qlt_xmit_response(cmd
, xmit_type
, se_cmd
->scsi_status
);
3102 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
3103 qlt_send_notify_ack(vha
, ntfy
, 0, 0, 0,
3104 NOTIFY_ACK_SRR_FLAGS_REJECT
,
3105 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM
,
3106 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL
);
3107 if (cmd
->state
== QLA_TGT_STATE_NEED_DATA
) {
3108 cmd
->state
= QLA_TGT_STATE_DATA_IN
;
3111 qlt_send_term_exchange(vha
, cmd
, &cmd
->atio
, 1);
3112 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
3115 static void qlt_reject_free_srr_imm(struct scsi_qla_host
*vha
,
3116 struct qla_tgt_srr_imm
*imm
, int ha_locked
)
3118 struct qla_hw_data
*ha
= vha
->hw
;
3119 unsigned long flags
= 0;
3122 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
3124 qlt_send_notify_ack(vha
, (void *)&imm
->imm_ntfy
, 0, 0, 0,
3125 NOTIFY_ACK_SRR_FLAGS_REJECT
,
3126 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM
,
3127 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL
);
3130 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
3135 static void qlt_handle_srr_work(struct work_struct
*work
)
3137 struct qla_tgt
*tgt
= container_of(work
, struct qla_tgt
, srr_work
);
3138 struct scsi_qla_host
*vha
= tgt
->vha
;
3139 struct qla_tgt_srr_ctio
*sctio
;
3140 unsigned long flags
;
3142 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf029, "Entering SRR work (tgt %p)\n",
3146 spin_lock_irqsave(&tgt
->srr_lock
, flags
);
3147 list_for_each_entry(sctio
, &tgt
->srr_ctio_list
, srr_list_entry
) {
3148 struct qla_tgt_srr_imm
*imm
, *i
, *ti
;
3149 struct qla_tgt_cmd
*cmd
;
3150 struct se_cmd
*se_cmd
;
3153 list_for_each_entry_safe(i
, ti
, &tgt
->srr_imm_list
,
3155 if (i
->srr_id
== sctio
->srr_id
) {
3156 list_del(&i
->srr_list_entry
);
3158 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf068,
3159 "qla_target(%d): There must be "
3160 "only one IMM SRR per CTIO SRR "
3161 "(IMM SRR %p, id %d, CTIO %p\n",
3162 vha
->vp_idx
, i
, i
->srr_id
, sctio
);
3163 qlt_reject_free_srr_imm(tgt
->vha
, i
, 0);
3169 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf02a,
3170 "IMM SRR %p, CTIO SRR %p (id %d)\n", imm
, sctio
,
3174 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf02b,
3175 "Not found matching IMM for SRR CTIO (id %d)\n",
3179 list_del(&sctio
->srr_list_entry
);
3181 spin_unlock_irqrestore(&tgt
->srr_lock
, flags
);
3185 * Reset qla_tgt_cmd SRR values and SGL pointer+count to follow
3186 * tcm_qla2xxx_write_pending() and tcm_qla2xxx_queue_data_in()
3195 se_cmd
= &cmd
->se_cmd
;
3197 cmd
->sg_cnt
= se_cmd
->t_data_nents
;
3198 cmd
->sg
= se_cmd
->t_data_sg
;
3200 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf02c,
3201 "SRR cmd %p (se_cmd %p, tag %d, op %x), "
3202 "sg_cnt=%d, offset=%d", cmd
, &cmd
->se_cmd
, cmd
->tag
,
3203 se_cmd
->t_task_cdb
? se_cmd
->t_task_cdb
[0] : 0,
3204 cmd
->sg_cnt
, cmd
->offset
);
3206 qlt_handle_srr(vha
, sctio
, imm
);
3212 spin_unlock_irqrestore(&tgt
->srr_lock
, flags
);
3215 /* ha->hardware_lock supposed to be held on entry */
3216 static void qlt_prepare_srr_imm(struct scsi_qla_host
*vha
,
3217 struct imm_ntfy_from_isp
*iocb
)
3219 struct qla_tgt_srr_imm
*imm
;
3220 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
3221 struct qla_tgt_srr_ctio
*sctio
;
3225 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf02d, "qla_target(%d): SRR received\n",
3228 imm
= kzalloc(sizeof(*imm
), GFP_ATOMIC
);
3230 memcpy(&imm
->imm_ntfy
, iocb
, sizeof(imm
->imm_ntfy
));
3232 /* IRQ is already OFF */
3233 spin_lock(&tgt
->srr_lock
);
3234 imm
->srr_id
= tgt
->imm_srr_id
;
3235 list_add_tail(&imm
->srr_list_entry
,
3236 &tgt
->srr_imm_list
);
3237 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf02e,
3238 "IMM NTFY SRR %p added (id %d, ui %x)\n",
3239 imm
, imm
->srr_id
, iocb
->u
.isp24
.srr_ui
);
3240 if (tgt
->imm_srr_id
== tgt
->ctio_srr_id
) {
3242 list_for_each_entry(sctio
, &tgt
->srr_ctio_list
,
3244 if (sctio
->srr_id
== imm
->srr_id
) {
3250 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf02f, "%s",
3251 "Scheduling srr work\n");
3252 schedule_work(&tgt
->srr_work
);
3254 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf030,
3255 "qla_target(%d): imm_srr_id "
3256 "== ctio_srr_id (%d), but there is no "
3257 "corresponding SRR CTIO, deleting IMM "
3258 "SRR %p\n", vha
->vp_idx
, tgt
->ctio_srr_id
,
3260 list_del(&imm
->srr_list_entry
);
3264 spin_unlock(&tgt
->srr_lock
);
3268 spin_unlock(&tgt
->srr_lock
);
3270 struct qla_tgt_srr_ctio
*ts
;
3272 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf069,
3273 "qla_target(%d): Unable to allocate SRR IMM "
3274 "entry, SRR request will be rejected\n", vha
->vp_idx
);
3276 /* IRQ is already OFF */
3277 spin_lock(&tgt
->srr_lock
);
3278 list_for_each_entry_safe(sctio
, ts
, &tgt
->srr_ctio_list
,
3280 if (sctio
->srr_id
== tgt
->imm_srr_id
) {
3281 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf031,
3282 "CTIO SRR %p deleted (id %d)\n",
3283 sctio
, sctio
->srr_id
);
3284 list_del(&sctio
->srr_list_entry
);
3285 qlt_send_term_exchange(vha
, sctio
->cmd
,
3286 &sctio
->cmd
->atio
, 1);
3290 spin_unlock(&tgt
->srr_lock
);
3297 qlt_send_notify_ack(vha
, iocb
, 0, 0, 0,
3298 NOTIFY_ACK_SRR_FLAGS_REJECT
,
3299 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM
,
3300 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL
);
3304 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3306 static void qlt_handle_imm_notify(struct scsi_qla_host
*vha
,
3307 struct imm_ntfy_from_isp
*iocb
)
3309 struct qla_hw_data
*ha
= vha
->hw
;
3310 uint32_t add_flags
= 0;
3311 int send_notify_ack
= 1;
3314 status
= le16_to_cpu(iocb
->u
.isp2x
.status
);
3316 case IMM_NTFY_LIP_RESET
:
3318 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf032,
3319 "qla_target(%d): LIP reset (loop %#x), subcode %x\n",
3320 vha
->vp_idx
, le16_to_cpu(iocb
->u
.isp24
.nport_handle
),
3321 iocb
->u
.isp24
.status_subcode
);
3323 if (qlt_reset(vha
, iocb
, QLA_TGT_ABORT_ALL
) == 0)
3324 send_notify_ack
= 0;
3328 case IMM_NTFY_LIP_LINK_REINIT
:
3330 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
3331 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf033,
3332 "qla_target(%d): LINK REINIT (loop %#x, "
3333 "subcode %x)\n", vha
->vp_idx
,
3334 le16_to_cpu(iocb
->u
.isp24
.nport_handle
),
3335 iocb
->u
.isp24
.status_subcode
);
3336 if (tgt
->link_reinit_iocb_pending
) {
3337 qlt_send_notify_ack(vha
, &tgt
->link_reinit_iocb
,
3340 memcpy(&tgt
->link_reinit_iocb
, iocb
, sizeof(*iocb
));
3341 tgt
->link_reinit_iocb_pending
= 1;
3343 * QLogic requires to wait after LINK REINIT for possible
3344 * PDISC or ADISC ELS commands
3346 send_notify_ack
= 0;
3350 case IMM_NTFY_PORT_LOGOUT
:
3351 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf034,
3352 "qla_target(%d): Port logout (loop "
3353 "%#x, subcode %x)\n", vha
->vp_idx
,
3354 le16_to_cpu(iocb
->u
.isp24
.nport_handle
),
3355 iocb
->u
.isp24
.status_subcode
);
3357 if (qlt_reset(vha
, iocb
, QLA_TGT_NEXUS_LOSS_SESS
) == 0)
3358 send_notify_ack
= 0;
3359 /* The sessions will be cleared in the callback, if needed */
3362 case IMM_NTFY_GLBL_TPRLO
:
3363 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf035,
3364 "qla_target(%d): Global TPRLO (%x)\n", vha
->vp_idx
, status
);
3365 if (qlt_reset(vha
, iocb
, QLA_TGT_NEXUS_LOSS
) == 0)
3366 send_notify_ack
= 0;
3367 /* The sessions will be cleared in the callback, if needed */
3370 case IMM_NTFY_PORT_CONFIG
:
3371 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf036,
3372 "qla_target(%d): Port config changed (%x)\n", vha
->vp_idx
,
3374 if (qlt_reset(vha
, iocb
, QLA_TGT_ABORT_ALL
) == 0)
3375 send_notify_ack
= 0;
3376 /* The sessions will be cleared in the callback, if needed */
3379 case IMM_NTFY_GLBL_LOGO
:
3380 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf06a,
3381 "qla_target(%d): Link failure detected\n",
3383 /* I_T nexus loss */
3384 if (qlt_reset(vha
, iocb
, QLA_TGT_NEXUS_LOSS
) == 0)
3385 send_notify_ack
= 0;
3388 case IMM_NTFY_IOCB_OVERFLOW
:
3389 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf06b,
3390 "qla_target(%d): Cannot provide requested "
3391 "capability (IOCB overflowed the immediate notify "
3392 "resource count)\n", vha
->vp_idx
);
3395 case IMM_NTFY_ABORT_TASK
:
3396 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf037,
3397 "qla_target(%d): Abort Task (S %08x I %#x -> "
3398 "L %#x)\n", vha
->vp_idx
,
3399 le16_to_cpu(iocb
->u
.isp2x
.seq_id
),
3400 GET_TARGET_ID(ha
, (struct atio_from_isp
*)iocb
),
3401 le16_to_cpu(iocb
->u
.isp2x
.lun
));
3402 if (qlt_abort_task(vha
, iocb
) == 0)
3403 send_notify_ack
= 0;
3406 case IMM_NTFY_RESOURCE
:
3407 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf06c,
3408 "qla_target(%d): Out of resources, host %ld\n",
3409 vha
->vp_idx
, vha
->host_no
);
3412 case IMM_NTFY_MSG_RX
:
3413 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf038,
3414 "qla_target(%d): Immediate notify task %x\n",
3415 vha
->vp_idx
, iocb
->u
.isp2x
.task_flags
);
3416 if (qlt_handle_task_mgmt(vha
, iocb
) == 0)
3417 send_notify_ack
= 0;
3421 if (qlt_24xx_handle_els(vha
, iocb
) == 0)
3422 send_notify_ack
= 0;
3426 qlt_prepare_srr_imm(vha
, iocb
);
3427 send_notify_ack
= 0;
3431 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf06d,
3432 "qla_target(%d): Received unknown immediate "
3433 "notify status %x\n", vha
->vp_idx
, status
);
3437 if (send_notify_ack
)
3438 qlt_send_notify_ack(vha
, iocb
, add_flags
, 0, 0, 0, 0, 0);
3442 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3443 * This function sends busy to ISP 2xxx or 24xx.
3445 static void qlt_send_busy(struct scsi_qla_host
*vha
,
3446 struct atio_from_isp
*atio
, uint16_t status
)
3448 struct ctio7_to_24xx
*ctio24
;
3449 struct qla_hw_data
*ha
= vha
->hw
;
3451 struct qla_tgt_sess
*sess
= NULL
;
3453 sess
= ha
->tgt
.tgt_ops
->find_sess_by_s_id(vha
,
3454 atio
->u
.isp24
.fcp_hdr
.s_id
);
3456 qlt_send_term_exchange(vha
, NULL
, atio
, 1);
3459 /* Sending marker isn't necessary, since we called from ISR */
3461 pkt
= (request_t
*)qla2x00_alloc_iocbs(vha
, NULL
);
3463 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf06e,
3464 "qla_target(%d): %s failed: unable to allocate "
3465 "request packet", vha
->vp_idx
, __func__
);
3469 pkt
->entry_count
= 1;
3470 pkt
->handle
= QLA_TGT_SKIP_HANDLE
| CTIO_COMPLETION_HANDLE_MARK
;
3472 ctio24
= (struct ctio7_to_24xx
*)pkt
;
3473 ctio24
->entry_type
= CTIO_TYPE7
;
3474 ctio24
->nport_handle
= sess
->loop_id
;
3475 ctio24
->timeout
= __constant_cpu_to_le16(QLA_TGT_TIMEOUT
);
3476 ctio24
->vp_index
= vha
->vp_idx
;
3477 ctio24
->initiator_id
[0] = atio
->u
.isp24
.fcp_hdr
.s_id
[2];
3478 ctio24
->initiator_id
[1] = atio
->u
.isp24
.fcp_hdr
.s_id
[1];
3479 ctio24
->initiator_id
[2] = atio
->u
.isp24
.fcp_hdr
.s_id
[0];
3480 ctio24
->exchange_addr
= atio
->u
.isp24
.exchange_addr
;
3481 ctio24
->u
.status1
.flags
= (atio
->u
.isp24
.attr
<< 9) |
3482 __constant_cpu_to_le16(
3483 CTIO7_FLAGS_STATUS_MODE_1
| CTIO7_FLAGS_SEND_STATUS
|
3484 CTIO7_FLAGS_DONT_RET_CTIO
);
3486 * CTIO from fw w/o se_cmd doesn't provide enough info to retry it,
3487 * if the explicit conformation is used.
3489 ctio24
->u
.status1
.ox_id
= swab16(atio
->u
.isp24
.fcp_hdr
.ox_id
);
3490 ctio24
->u
.status1
.scsi_status
= cpu_to_le16(status
);
3491 ctio24
->u
.status1
.residual
= get_unaligned((uint32_t *)
3492 &atio
->u
.isp24
.fcp_cmnd
.add_cdb
[
3493 atio
->u
.isp24
.fcp_cmnd
.add_cdb_len
]);
3494 if (ctio24
->u
.status1
.residual
!= 0)
3495 ctio24
->u
.status1
.scsi_status
|= SS_RESIDUAL_UNDER
;
3497 qla2x00_start_iocbs(vha
, vha
->req
);
3500 /* ha->hardware_lock supposed to be held on entry */
3501 /* called via callback from qla2xxx */
3502 static void qlt_24xx_atio_pkt(struct scsi_qla_host
*vha
,
3503 struct atio_from_isp
*atio
)
3505 struct qla_hw_data
*ha
= vha
->hw
;
3506 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
3509 if (unlikely(tgt
== NULL
)) {
3510 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf039,
3511 "ATIO pkt, but no tgt (ha %p)", ha
);
3514 ql_dbg(ql_dbg_tgt
, vha
, 0xe02c,
3515 "qla_target(%d): ATIO pkt %p: type %02x count %02x",
3516 vha
->vp_idx
, atio
, atio
->u
.raw
.entry_type
,
3517 atio
->u
.raw
.entry_count
);
3519 * In tgt_stop mode we also should allow all requests to pass.
3520 * Otherwise, some commands can stuck.
3523 tgt
->irq_cmd_count
++;
3525 switch (atio
->u
.raw
.entry_type
) {
3527 ql_dbg(ql_dbg_tgt
, vha
, 0xe02d,
3528 "ATIO_TYPE7 instance %d, lun %Lx, read/write %d/%d, "
3529 "add_cdb_len %d, data_length %04x, s_id %x:%x:%x\n",
3530 vha
->vp_idx
, atio
->u
.isp24
.fcp_cmnd
.lun
,
3531 atio
->u
.isp24
.fcp_cmnd
.rddata
,
3532 atio
->u
.isp24
.fcp_cmnd
.wrdata
,
3533 atio
->u
.isp24
.fcp_cmnd
.add_cdb_len
,
3534 be32_to_cpu(get_unaligned((uint32_t *)
3535 &atio
->u
.isp24
.fcp_cmnd
.add_cdb
[
3536 atio
->u
.isp24
.fcp_cmnd
.add_cdb_len
])),
3537 atio
->u
.isp24
.fcp_hdr
.s_id
[0],
3538 atio
->u
.isp24
.fcp_hdr
.s_id
[1],
3539 atio
->u
.isp24
.fcp_hdr
.s_id
[2]);
3541 if (unlikely(atio
->u
.isp24
.exchange_addr
==
3542 ATIO_EXCHANGE_ADDRESS_UNKNOWN
)) {
3543 ql_dbg(ql_dbg_tgt
, vha
, 0xe058,
3544 "qla_target(%d): ATIO_TYPE7 "
3545 "received with UNKNOWN exchange address, "
3546 "sending QUEUE_FULL\n", vha
->vp_idx
);
3547 qlt_send_busy(vha
, atio
, SAM_STAT_TASK_SET_FULL
);
3550 if (likely(atio
->u
.isp24
.fcp_cmnd
.task_mgmt_flags
== 0))
3551 rc
= qlt_handle_cmd_for_atio(vha
, atio
);
3553 rc
= qlt_handle_task_mgmt(vha
, atio
);
3554 if (unlikely(rc
!= 0)) {
3556 #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
3557 qlt_send_busy(vha
, atio
, SAM_STAT_BUSY
);
3559 qlt_send_term_exchange(vha
, NULL
, atio
, 1);
3562 if (tgt
->tgt_stop
) {
3563 ql_dbg(ql_dbg_tgt
, vha
, 0xe059,
3564 "qla_target: Unable to send "
3565 "command to target for req, "
3568 ql_dbg(ql_dbg_tgt
, vha
, 0xe05a,
3569 "qla_target(%d): Unable to send "
3570 "command to target, sending BUSY "
3571 "status.\n", vha
->vp_idx
);
3572 qlt_send_busy(vha
, atio
, SAM_STAT_BUSY
);
3578 case IMMED_NOTIFY_TYPE
:
3580 if (unlikely(atio
->u
.isp2x
.entry_status
!= 0)) {
3581 ql_dbg(ql_dbg_tgt
, vha
, 0xe05b,
3582 "qla_target(%d): Received ATIO packet %x "
3583 "with error status %x\n", vha
->vp_idx
,
3584 atio
->u
.raw
.entry_type
,
3585 atio
->u
.isp2x
.entry_status
);
3588 ql_dbg(ql_dbg_tgt
, vha
, 0xe02e, "%s", "IMMED_NOTIFY ATIO");
3589 qlt_handle_imm_notify(vha
, (struct imm_ntfy_from_isp
*)atio
);
3594 ql_dbg(ql_dbg_tgt
, vha
, 0xe05c,
3595 "qla_target(%d): Received unknown ATIO atio "
3596 "type %x\n", vha
->vp_idx
, atio
->u
.raw
.entry_type
);
3600 tgt
->irq_cmd_count
--;
3603 /* ha->hardware_lock supposed to be held on entry */
3604 /* called via callback from qla2xxx */
3605 static void qlt_response_pkt(struct scsi_qla_host
*vha
, response_t
*pkt
)
3607 struct qla_hw_data
*ha
= vha
->hw
;
3608 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
3610 if (unlikely(tgt
== NULL
)) {
3611 ql_dbg(ql_dbg_tgt
, vha
, 0xe05d,
3612 "qla_target(%d): Response pkt %x received, but no "
3613 "tgt (ha %p)\n", vha
->vp_idx
, pkt
->entry_type
, ha
);
3617 ql_dbg(ql_dbg_tgt
, vha
, 0xe02f,
3618 "qla_target(%d): response pkt %p: T %02x C %02x S %02x "
3619 "handle %#x\n", vha
->vp_idx
, pkt
, pkt
->entry_type
,
3620 pkt
->entry_count
, pkt
->entry_status
, pkt
->handle
);
3623 * In tgt_stop mode we also should allow all requests to pass.
3624 * Otherwise, some commands can stuck.
3627 tgt
->irq_cmd_count
++;
3629 switch (pkt
->entry_type
) {
3632 struct ctio7_from_24xx
*entry
= (struct ctio7_from_24xx
*)pkt
;
3633 ql_dbg(ql_dbg_tgt
, vha
, 0xe030, "CTIO_TYPE7: instance %d\n",
3635 qlt_do_ctio_completion(vha
, entry
->handle
,
3636 le16_to_cpu(entry
->status
)|(pkt
->entry_status
<< 16),
3641 case ACCEPT_TGT_IO_TYPE
:
3643 struct atio_from_isp
*atio
= (struct atio_from_isp
*)pkt
;
3645 ql_dbg(ql_dbg_tgt
, vha
, 0xe031,
3646 "ACCEPT_TGT_IO instance %d status %04x "
3647 "lun %04x read/write %d data_length %04x "
3648 "target_id %02x rx_id %04x\n ", vha
->vp_idx
,
3649 le16_to_cpu(atio
->u
.isp2x
.status
),
3650 le16_to_cpu(atio
->u
.isp2x
.lun
),
3651 atio
->u
.isp2x
.execution_codes
,
3652 le32_to_cpu(atio
->u
.isp2x
.data_length
), GET_TARGET_ID(ha
,
3653 atio
), atio
->u
.isp2x
.rx_id
);
3654 if (atio
->u
.isp2x
.status
!=
3655 __constant_cpu_to_le16(ATIO_CDB_VALID
)) {
3656 ql_dbg(ql_dbg_tgt
, vha
, 0xe05e,
3657 "qla_target(%d): ATIO with error "
3658 "status %x received\n", vha
->vp_idx
,
3659 le16_to_cpu(atio
->u
.isp2x
.status
));
3662 ql_dbg(ql_dbg_tgt
, vha
, 0xe032,
3663 "FCP CDB: 0x%02x, sizeof(cdb): %lu",
3664 atio
->u
.isp2x
.cdb
[0], (unsigned long
3665 int)sizeof(atio
->u
.isp2x
.cdb
));
3667 rc
= qlt_handle_cmd_for_atio(vha
, atio
);
3668 if (unlikely(rc
!= 0)) {
3670 #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
3671 qlt_send_busy(vha
, atio
, 0);
3673 qlt_send_term_exchange(vha
, NULL
, atio
, 1);
3676 if (tgt
->tgt_stop
) {
3677 ql_dbg(ql_dbg_tgt
, vha
, 0xe05f,
3678 "qla_target: Unable to send "
3679 "command to target, sending TERM "
3680 "EXCHANGE for rsp\n");
3681 qlt_send_term_exchange(vha
, NULL
,
3684 ql_dbg(ql_dbg_tgt
, vha
, 0xe060,
3685 "qla_target(%d): Unable to send "
3686 "command to target, sending BUSY "
3687 "status\n", vha
->vp_idx
);
3688 qlt_send_busy(vha
, atio
, 0);
3695 case CONTINUE_TGT_IO_TYPE
:
3697 struct ctio_to_2xxx
*entry
= (struct ctio_to_2xxx
*)pkt
;
3698 ql_dbg(ql_dbg_tgt
, vha
, 0xe033,
3699 "CONTINUE_TGT_IO: instance %d\n", vha
->vp_idx
);
3700 qlt_do_ctio_completion(vha
, entry
->handle
,
3701 le16_to_cpu(entry
->status
)|(pkt
->entry_status
<< 16),
3708 struct ctio_to_2xxx
*entry
= (struct ctio_to_2xxx
*)pkt
;
3709 ql_dbg(ql_dbg_tgt
, vha
, 0xe034, "CTIO_A64: instance %d\n",
3711 qlt_do_ctio_completion(vha
, entry
->handle
,
3712 le16_to_cpu(entry
->status
)|(pkt
->entry_status
<< 16),
3717 case IMMED_NOTIFY_TYPE
:
3718 ql_dbg(ql_dbg_tgt
, vha
, 0xe035, "%s", "IMMED_NOTIFY\n");
3719 qlt_handle_imm_notify(vha
, (struct imm_ntfy_from_isp
*)pkt
);
3722 case NOTIFY_ACK_TYPE
:
3723 if (tgt
->notify_ack_expected
> 0) {
3724 struct nack_to_isp
*entry
= (struct nack_to_isp
*)pkt
;
3725 ql_dbg(ql_dbg_tgt
, vha
, 0xe036,
3726 "NOTIFY_ACK seq %08x status %x\n",
3727 le16_to_cpu(entry
->u
.isp2x
.seq_id
),
3728 le16_to_cpu(entry
->u
.isp2x
.status
));
3729 tgt
->notify_ack_expected
--;
3730 if (entry
->u
.isp2x
.status
!=
3731 __constant_cpu_to_le16(NOTIFY_ACK_SUCCESS
)) {
3732 ql_dbg(ql_dbg_tgt
, vha
, 0xe061,
3733 "qla_target(%d): NOTIFY_ACK "
3734 "failed %x\n", vha
->vp_idx
,
3735 le16_to_cpu(entry
->u
.isp2x
.status
));
3738 ql_dbg(ql_dbg_tgt
, vha
, 0xe062,
3739 "qla_target(%d): Unexpected NOTIFY_ACK received\n",
3744 case ABTS_RECV_24XX
:
3745 ql_dbg(ql_dbg_tgt
, vha
, 0xe037,
3746 "ABTS_RECV_24XX: instance %d\n", vha
->vp_idx
);
3747 qlt_24xx_handle_abts(vha
, (struct abts_recv_from_24xx
*)pkt
);
3750 case ABTS_RESP_24XX
:
3751 if (tgt
->abts_resp_expected
> 0) {
3752 struct abts_resp_from_24xx_fw
*entry
=
3753 (struct abts_resp_from_24xx_fw
*)pkt
;
3754 ql_dbg(ql_dbg_tgt
, vha
, 0xe038,
3755 "ABTS_RESP_24XX: compl_status %x\n",
3756 entry
->compl_status
);
3757 tgt
->abts_resp_expected
--;
3758 if (le16_to_cpu(entry
->compl_status
) !=
3759 ABTS_RESP_COMPL_SUCCESS
) {
3760 if ((entry
->error_subcode1
== 0x1E) &&
3761 (entry
->error_subcode2
== 0)) {
3763 * We've got a race here: aborted
3764 * exchange not terminated, i.e.
3765 * response for the aborted command was
3766 * sent between the abort request was
3767 * received and processed.
3768 * Unfortunately, the firmware has a
3769 * silly requirement that all aborted
3770 * exchanges must be explicitely
3771 * terminated, otherwise it refuses to
3772 * send responses for the abort
3773 * requests. So, we have to
3774 * (re)terminate the exchange and retry
3775 * the abort response.
3777 qlt_24xx_retry_term_exchange(vha
,
3780 ql_dbg(ql_dbg_tgt
, vha
, 0xe063,
3781 "qla_target(%d): ABTS_RESP_24XX "
3782 "failed %x (subcode %x:%x)",
3783 vha
->vp_idx
, entry
->compl_status
,
3784 entry
->error_subcode1
,
3785 entry
->error_subcode2
);
3788 ql_dbg(ql_dbg_tgt
, vha
, 0xe064,
3789 "qla_target(%d): Unexpected ABTS_RESP_24XX "
3790 "received\n", vha
->vp_idx
);
3795 ql_dbg(ql_dbg_tgt
, vha
, 0xe065,
3796 "qla_target(%d): Received unknown response pkt "
3797 "type %x\n", vha
->vp_idx
, pkt
->entry_type
);
3801 tgt
->irq_cmd_count
--;
3805 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3807 void qlt_async_event(uint16_t code
, struct scsi_qla_host
*vha
,
3810 struct qla_hw_data
*ha
= vha
->hw
;
3811 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
3814 ql_dbg(ql_dbg_tgt
, vha
, 0xe039,
3815 "scsi(%ld): ha state %d init_done %d oper_mode %d topo %d\n",
3816 vha
->host_no
, atomic_read(&vha
->loop_state
), vha
->flags
.init_done
,
3817 ha
->operating_mode
, ha
->current_topology
);
3819 if (!ha
->tgt
.tgt_ops
)
3822 if (unlikely(tgt
== NULL
)) {
3823 ql_dbg(ql_dbg_tgt
, vha
, 0xe03a,
3824 "ASYNC EVENT %#x, but no tgt (ha %p)\n", code
, ha
);
3828 if (((code
== MBA_POINT_TO_POINT
) || (code
== MBA_CHG_IN_CONNECTION
)) &&
3832 * In tgt_stop mode we also should allow all requests to pass.
3833 * Otherwise, some commands can stuck.
3836 tgt
->irq_cmd_count
++;
3839 case MBA_RESET
: /* Reset */
3840 case MBA_SYSTEM_ERR
: /* System Error */
3841 case MBA_REQ_TRANSFER_ERR
: /* Request Transfer Error */
3842 case MBA_RSP_TRANSFER_ERR
: /* Response Transfer Error */
3843 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf03a,
3844 "qla_target(%d): System error async event %#x "
3845 "occurred", vha
->vp_idx
, code
);
3847 case MBA_WAKEUP_THRES
: /* Request Queue Wake-up. */
3848 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
3853 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf03b,
3854 "qla_target(%d): Async LOOP_UP occurred "
3855 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha
->vp_idx
,
3856 le16_to_cpu(mailbox
[0]), le16_to_cpu(mailbox
[1]),
3857 le16_to_cpu(mailbox
[2]), le16_to_cpu(mailbox
[3]));
3858 if (tgt
->link_reinit_iocb_pending
) {
3859 qlt_send_notify_ack(vha
, (void *)&tgt
->link_reinit_iocb
,
3861 tgt
->link_reinit_iocb_pending
= 0;
3866 case MBA_LIP_OCCURRED
:
3869 case MBA_RSCN_UPDATE
:
3870 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf03c,
3871 "qla_target(%d): Async event %#x occurred "
3872 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha
->vp_idx
, code
,
3873 le16_to_cpu(mailbox
[0]), le16_to_cpu(mailbox
[1]),
3874 le16_to_cpu(mailbox
[2]), le16_to_cpu(mailbox
[3]));
3877 case MBA_PORT_UPDATE
:
3878 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf03d,
3879 "qla_target(%d): Port update async event %#x "
3880 "occurred: updating the ports database (m[0]=%x, m[1]=%x, "
3881 "m[2]=%x, m[3]=%x)", vha
->vp_idx
, code
,
3882 le16_to_cpu(mailbox
[0]), le16_to_cpu(mailbox
[1]),
3883 le16_to_cpu(mailbox
[2]), le16_to_cpu(mailbox
[3]));
3885 login_code
= le16_to_cpu(mailbox
[2]);
3886 if (login_code
== 0x4)
3887 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf03e,
3888 "Async MB 2: Got PLOGI Complete\n");
3889 else if (login_code
== 0x7)
3890 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf03f,
3891 "Async MB 2: Port Logged Out\n");
3895 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf040,
3896 "qla_target(%d): Async event %#x occurred: "
3897 "ignore (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha
->vp_idx
,
3898 code
, le16_to_cpu(mailbox
[0]), le16_to_cpu(mailbox
[1]),
3899 le16_to_cpu(mailbox
[2]), le16_to_cpu(mailbox
[3]));
3903 tgt
->irq_cmd_count
--;
3906 static fc_port_t
*qlt_get_port_database(struct scsi_qla_host
*vha
,
3912 fcport
= kzalloc(sizeof(*fcport
), GFP_KERNEL
);
3914 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf06f,
3915 "qla_target(%d): Allocation of tmp FC port failed",
3920 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf041, "loop_id %d", loop_id
);
3922 fcport
->loop_id
= loop_id
;
3924 rc
= qla2x00_get_port_database(vha
, fcport
, 0);
3925 if (rc
!= QLA_SUCCESS
) {
3926 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf070,
3927 "qla_target(%d): Failed to retrieve fcport "
3928 "information -- get_port_database() returned %x "
3929 "(loop_id=0x%04x)", vha
->vp_idx
, rc
, loop_id
);
3937 /* Must be called under tgt_mutex */
3938 static struct qla_tgt_sess
*qlt_make_local_sess(struct scsi_qla_host
*vha
,
3941 struct qla_tgt_sess
*sess
= NULL
;
3942 fc_port_t
*fcport
= NULL
;
3943 int rc
, global_resets
;
3944 uint16_t loop_id
= 0;
3948 atomic_read(&vha
->vha_tgt
.qla_tgt
->tgt_global_resets_count
);
3950 rc
= qla24xx_get_loop_id(vha
, s_id
, &loop_id
);
3952 if ((s_id
[0] == 0xFF) &&
3953 (s_id
[1] == 0xFC)) {
3955 * This is Domain Controller, so it should be
3956 * OK to drop SCSI commands from it.
3958 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf042,
3959 "Unable to find initiator with S_ID %x:%x:%x",
3960 s_id
[0], s_id
[1], s_id
[2]);
3962 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf071,
3963 "qla_target(%d): Unable to find "
3964 "initiator with S_ID %x:%x:%x",
3965 vha
->vp_idx
, s_id
[0], s_id
[1],
3970 fcport
= qlt_get_port_database(vha
, loop_id
);
3974 if (global_resets
!=
3975 atomic_read(&vha
->vha_tgt
.qla_tgt
->tgt_global_resets_count
)) {
3976 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf043,
3977 "qla_target(%d): global reset during session discovery "
3978 "(counter was %d, new %d), retrying", vha
->vp_idx
,
3980 atomic_read(&vha
->vha_tgt
.
3981 qla_tgt
->tgt_global_resets_count
));
3985 sess
= qlt_create_sess(vha
, fcport
, true);
3991 static void qlt_abort_work(struct qla_tgt
*tgt
,
3992 struct qla_tgt_sess_work_param
*prm
)
3994 struct scsi_qla_host
*vha
= tgt
->vha
;
3995 struct qla_hw_data
*ha
= vha
->hw
;
3996 struct qla_tgt_sess
*sess
= NULL
;
3997 unsigned long flags
;
4002 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
4007 s_id
[0] = prm
->abts
.fcp_hdr_le
.s_id
[2];
4008 s_id
[1] = prm
->abts
.fcp_hdr_le
.s_id
[1];
4009 s_id
[2] = prm
->abts
.fcp_hdr_le
.s_id
[0];
4011 sess
= ha
->tgt
.tgt_ops
->find_sess_by_s_id(vha
,
4012 (unsigned char *)&be_s_id
);
4014 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
4016 mutex_lock(&vha
->vha_tgt
.tgt_mutex
);
4017 sess
= qlt_make_local_sess(vha
, s_id
);
4018 /* sess has got an extra creation ref */
4019 mutex_unlock(&vha
->vha_tgt
.tgt_mutex
);
4021 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
4025 kref_get(&sess
->se_sess
->sess_kref
);
4031 rc
= __qlt_24xx_handle_abts(vha
, &prm
->abts
, sess
);
4035 ha
->tgt
.tgt_ops
->put_sess(sess
);
4036 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
4040 qlt_24xx_send_abts_resp(vha
, &prm
->abts
, FCP_TMF_REJECTED
, false);
4042 ha
->tgt
.tgt_ops
->put_sess(sess
);
4043 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
4046 static void qlt_tmr_work(struct qla_tgt
*tgt
,
4047 struct qla_tgt_sess_work_param
*prm
)
4049 struct atio_from_isp
*a
= &prm
->tm_iocb2
;
4050 struct scsi_qla_host
*vha
= tgt
->vha
;
4051 struct qla_hw_data
*ha
= vha
->hw
;
4052 struct qla_tgt_sess
*sess
= NULL
;
4053 unsigned long flags
;
4054 uint8_t *s_id
= NULL
; /* to hide compiler warnings */
4056 uint32_t lun
, unpacked_lun
;
4060 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
4065 s_id
= prm
->tm_iocb2
.u
.isp24
.fcp_hdr
.s_id
;
4066 sess
= ha
->tgt
.tgt_ops
->find_sess_by_s_id(vha
, s_id
);
4068 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
4070 mutex_lock(&vha
->vha_tgt
.tgt_mutex
);
4071 sess
= qlt_make_local_sess(vha
, s_id
);
4072 /* sess has got an extra creation ref */
4073 mutex_unlock(&vha
->vha_tgt
.tgt_mutex
);
4075 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
4079 kref_get(&sess
->se_sess
->sess_kref
);
4083 lun
= a
->u
.isp24
.fcp_cmnd
.lun
;
4084 lun_size
= sizeof(lun
);
4085 fn
= a
->u
.isp24
.fcp_cmnd
.task_mgmt_flags
;
4086 unpacked_lun
= scsilun_to_int((struct scsi_lun
*)&lun
);
4088 rc
= qlt_issue_task_mgmt(sess
, unpacked_lun
, fn
, iocb
, 0);
4092 ha
->tgt
.tgt_ops
->put_sess(sess
);
4093 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
4097 qlt_send_term_exchange(vha
, NULL
, &prm
->tm_iocb2
, 1);
4099 ha
->tgt
.tgt_ops
->put_sess(sess
);
4100 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
4103 static void qlt_sess_work_fn(struct work_struct
*work
)
4105 struct qla_tgt
*tgt
= container_of(work
, struct qla_tgt
, sess_work
);
4106 struct scsi_qla_host
*vha
= tgt
->vha
;
4107 unsigned long flags
;
4109 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf000, "Sess work (tgt %p)", tgt
);
4111 spin_lock_irqsave(&tgt
->sess_work_lock
, flags
);
4112 while (!list_empty(&tgt
->sess_works_list
)) {
4113 struct qla_tgt_sess_work_param
*prm
= list_entry(
4114 tgt
->sess_works_list
.next
, typeof(*prm
),
4115 sess_works_list_entry
);
4118 * This work can be scheduled on several CPUs at time, so we
4119 * must delete the entry to eliminate double processing
4121 list_del(&prm
->sess_works_list_entry
);
4123 spin_unlock_irqrestore(&tgt
->sess_work_lock
, flags
);
4125 switch (prm
->type
) {
4126 case QLA_TGT_SESS_WORK_ABORT
:
4127 qlt_abort_work(tgt
, prm
);
4129 case QLA_TGT_SESS_WORK_TM
:
4130 qlt_tmr_work(tgt
, prm
);
4137 spin_lock_irqsave(&tgt
->sess_work_lock
, flags
);
4141 spin_unlock_irqrestore(&tgt
->sess_work_lock
, flags
);
4144 /* Must be called under tgt_host_action_mutex */
4145 int qlt_add_target(struct qla_hw_data
*ha
, struct scsi_qla_host
*base_vha
)
4147 struct qla_tgt
*tgt
;
4149 if (!QLA_TGT_MODE_ENABLED())
4152 if (!IS_TGT_MODE_CAPABLE(ha
)) {
4153 ql_log(ql_log_warn
, base_vha
, 0xe070,
4154 "This adapter does not support target mode.\n");
4158 ql_dbg(ql_dbg_tgt
, base_vha
, 0xe03b,
4159 "Registering target for host %ld(%p).\n", base_vha
->host_no
, ha
);
4161 BUG_ON(base_vha
->vha_tgt
.qla_tgt
!= NULL
);
4163 tgt
= kzalloc(sizeof(struct qla_tgt
), GFP_KERNEL
);
4165 ql_dbg(ql_dbg_tgt
, base_vha
, 0xe066,
4166 "Unable to allocate struct qla_tgt\n");
4170 if (!(base_vha
->host
->hostt
->supported_mode
& MODE_TARGET
))
4171 base_vha
->host
->hostt
->supported_mode
|= MODE_TARGET
;
4174 tgt
->vha
= base_vha
;
4175 init_waitqueue_head(&tgt
->waitQ
);
4176 INIT_LIST_HEAD(&tgt
->sess_list
);
4177 INIT_LIST_HEAD(&tgt
->del_sess_list
);
4178 INIT_DELAYED_WORK(&tgt
->sess_del_work
,
4179 (void (*)(struct work_struct
*))qlt_del_sess_work_fn
);
4180 spin_lock_init(&tgt
->sess_work_lock
);
4181 INIT_WORK(&tgt
->sess_work
, qlt_sess_work_fn
);
4182 INIT_LIST_HEAD(&tgt
->sess_works_list
);
4183 spin_lock_init(&tgt
->srr_lock
);
4184 INIT_LIST_HEAD(&tgt
->srr_ctio_list
);
4185 INIT_LIST_HEAD(&tgt
->srr_imm_list
);
4186 INIT_WORK(&tgt
->srr_work
, qlt_handle_srr_work
);
4187 atomic_set(&tgt
->tgt_global_resets_count
, 0);
4189 base_vha
->vha_tgt
.qla_tgt
= tgt
;
4191 ql_dbg(ql_dbg_tgt
, base_vha
, 0xe067,
4192 "qla_target(%d): using 64 Bit PCI addressing",
4194 tgt
->tgt_enable_64bit_addr
= 1;
4196 tgt
->sg_tablesize
= QLA_TGT_MAX_SG_24XX(base_vha
->req
->length
- 3);
4197 tgt
->datasegs_per_cmd
= QLA_TGT_DATASEGS_PER_CMD_24XX
;
4198 tgt
->datasegs_per_cont
= QLA_TGT_DATASEGS_PER_CONT_24XX
;
4200 if (base_vha
->fc_vport
)
4203 mutex_lock(&qla_tgt_mutex
);
4204 list_add_tail(&tgt
->tgt_list_entry
, &qla_tgt_glist
);
4205 mutex_unlock(&qla_tgt_mutex
);
4210 /* Must be called under tgt_host_action_mutex */
4211 int qlt_remove_target(struct qla_hw_data
*ha
, struct scsi_qla_host
*vha
)
4213 if (!vha
->vha_tgt
.qla_tgt
)
4216 if (vha
->fc_vport
) {
4217 qlt_release(vha
->vha_tgt
.qla_tgt
);
4220 mutex_lock(&qla_tgt_mutex
);
4221 list_del(&vha
->vha_tgt
.qla_tgt
->tgt_list_entry
);
4222 mutex_unlock(&qla_tgt_mutex
);
4224 ql_dbg(ql_dbg_tgt
, vha
, 0xe03c, "Unregistering target for host %ld(%p)",
4226 qlt_release(vha
->vha_tgt
.qla_tgt
);
4231 static void qlt_lport_dump(struct scsi_qla_host
*vha
, u64 wwpn
,
4236 pr_debug("qla2xxx HW vha->node_name: ");
4237 for (i
= 0; i
< WWN_SIZE
; i
++)
4238 pr_debug("%02x ", vha
->node_name
[i
]);
4240 pr_debug("qla2xxx HW vha->port_name: ");
4241 for (i
= 0; i
< WWN_SIZE
; i
++)
4242 pr_debug("%02x ", vha
->port_name
[i
]);
4245 pr_debug("qla2xxx passed configfs WWPN: ");
4246 put_unaligned_be64(wwpn
, b
);
4247 for (i
= 0; i
< WWN_SIZE
; i
++)
4248 pr_debug("%02x ", b
[i
]);
4253 * qla_tgt_lport_register - register lport with external module
4255 * @qla_tgt_ops: Pointer for tcm_qla2xxx qla_tgt_ops
4256 * @wwpn: Passwd FC target WWPN
4257 * @callback: lport initialization callback for tcm_qla2xxx code
4258 * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data
4260 int qlt_lport_register(void *target_lport_ptr
, u64 phys_wwpn
,
4261 u64 npiv_wwpn
, u64 npiv_wwnn
,
4262 int (*callback
)(struct scsi_qla_host
*, void *, u64
, u64
))
4264 struct qla_tgt
*tgt
;
4265 struct scsi_qla_host
*vha
;
4266 struct qla_hw_data
*ha
;
4267 struct Scsi_Host
*host
;
4268 unsigned long flags
;
4272 mutex_lock(&qla_tgt_mutex
);
4273 list_for_each_entry(tgt
, &qla_tgt_glist
, tgt_list_entry
) {
4281 if (!(host
->hostt
->supported_mode
& MODE_TARGET
))
4284 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
4285 if ((!npiv_wwpn
|| !npiv_wwnn
) && host
->active_mode
& MODE_TARGET
) {
4286 pr_debug("MODE_TARGET already active on qla2xxx(%d)\n",
4288 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
4291 if (tgt
->tgt_stop
) {
4292 pr_debug("MODE_TARGET in shutdown on qla2xxx(%d)\n",
4294 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
4297 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
4299 if (!scsi_host_get(host
)) {
4300 ql_dbg(ql_dbg_tgt
, vha
, 0xe068,
4301 "Unable to scsi_host_get() for"
4302 " qla2xxx scsi_host\n");
4305 qlt_lport_dump(vha
, phys_wwpn
, b
);
4307 if (memcmp(vha
->port_name
, b
, WWN_SIZE
)) {
4308 scsi_host_put(host
);
4311 rc
= (*callback
)(vha
, target_lport_ptr
, npiv_wwpn
, npiv_wwnn
);
4313 scsi_host_put(host
);
4315 mutex_unlock(&qla_tgt_mutex
);
4318 mutex_unlock(&qla_tgt_mutex
);
4322 EXPORT_SYMBOL(qlt_lport_register
);
4325 * qla_tgt_lport_deregister - Degister lport
4327 * @vha: Registered scsi_qla_host pointer
4329 void qlt_lport_deregister(struct scsi_qla_host
*vha
)
4331 struct qla_hw_data
*ha
= vha
->hw
;
4332 struct Scsi_Host
*sh
= vha
->host
;
4334 * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data
4336 vha
->vha_tgt
.target_lport_ptr
= NULL
;
4337 ha
->tgt
.tgt_ops
= NULL
;
4339 * Release the Scsi_Host reference for the underlying qla2xxx host
4343 EXPORT_SYMBOL(qlt_lport_deregister
);
4345 /* Must be called under HW lock */
4346 void qlt_set_mode(struct scsi_qla_host
*vha
)
4348 struct qla_hw_data
*ha
= vha
->hw
;
4350 switch (ql2x_ini_mode
) {
4351 case QLA2XXX_INI_MODE_DISABLED
:
4352 case QLA2XXX_INI_MODE_EXCLUSIVE
:
4353 vha
->host
->active_mode
= MODE_TARGET
;
4355 case QLA2XXX_INI_MODE_ENABLED
:
4356 vha
->host
->active_mode
|= MODE_TARGET
;
4362 if (ha
->tgt
.ini_mode_force_reverse
)
4363 qla_reverse_ini_mode(vha
);
4366 /* Must be called under HW lock */
4367 void qlt_clear_mode(struct scsi_qla_host
*vha
)
4369 struct qla_hw_data
*ha
= vha
->hw
;
4371 switch (ql2x_ini_mode
) {
4372 case QLA2XXX_INI_MODE_DISABLED
:
4373 vha
->host
->active_mode
= MODE_UNKNOWN
;
4375 case QLA2XXX_INI_MODE_EXCLUSIVE
:
4376 vha
->host
->active_mode
= MODE_INITIATOR
;
4378 case QLA2XXX_INI_MODE_ENABLED
:
4379 vha
->host
->active_mode
&= ~MODE_TARGET
;
4385 if (ha
->tgt
.ini_mode_force_reverse
)
4386 qla_reverse_ini_mode(vha
);
4390 * qla_tgt_enable_vha - NO LOCK HELD
4392 * host_reset, bring up w/ Target Mode Enabled
4395 qlt_enable_vha(struct scsi_qla_host
*vha
)
4397 struct qla_hw_data
*ha
= vha
->hw
;
4398 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
4399 unsigned long flags
;
4400 scsi_qla_host_t
*base_vha
= pci_get_drvdata(ha
->pdev
);
4403 ql_dbg(ql_dbg_tgt
, vha
, 0xe069,
4404 "Unable to locate qla_tgt pointer from"
4405 " struct qla_hw_data\n");
4410 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
4411 tgt
->tgt_stopped
= 0;
4413 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
4416 qla24xx_disable_vp(vha
);
4417 qla24xx_enable_vp(vha
);
4419 set_bit(ISP_ABORT_NEEDED
, &base_vha
->dpc_flags
);
4420 qla2xxx_wake_dpc(base_vha
);
4421 qla2x00_wait_for_hba_online(base_vha
);
4424 EXPORT_SYMBOL(qlt_enable_vha
);
4427 * qla_tgt_disable_vha - NO LOCK HELD
4429 * Disable Target Mode and reset the adapter
4432 qlt_disable_vha(struct scsi_qla_host
*vha
)
4434 struct qla_hw_data
*ha
= vha
->hw
;
4435 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
4436 unsigned long flags
;
4439 ql_dbg(ql_dbg_tgt
, vha
, 0xe06a,
4440 "Unable to locate qla_tgt pointer from"
4441 " struct qla_hw_data\n");
4446 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
4447 qlt_clear_mode(vha
);
4448 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
4450 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
4451 qla2xxx_wake_dpc(vha
);
4452 qla2x00_wait_for_hba_online(vha
);
4456 * Called from qla_init.c:qla24xx_vport_create() contex to setup
4457 * the target mode specific struct scsi_qla_host and struct qla_hw_data
4461 qlt_vport_create(struct scsi_qla_host
*vha
, struct qla_hw_data
*ha
)
4463 if (!qla_tgt_mode_enabled(vha
))
4466 vha
->vha_tgt
.qla_tgt
= NULL
;
4468 mutex_init(&vha
->vha_tgt
.tgt_mutex
);
4469 mutex_init(&vha
->vha_tgt
.tgt_host_action_mutex
);
4471 qlt_clear_mode(vha
);
4474 * NOTE: Currently the value is kept the same for <24xx and
4475 * >=24xx ISPs. If it is necessary to change it,
4476 * the check should be added for specific ISPs,
4477 * assigning the value appropriately.
4479 ha
->tgt
.atio_q_length
= ATIO_ENTRY_CNT_24XX
;
4481 qlt_add_target(ha
, vha
);
4485 qlt_rff_id(struct scsi_qla_host
*vha
, struct ct_sns_req
*ct_req
)
4488 * FC-4 Feature bit 0 indicates target functionality to the name server.
4490 if (qla_tgt_mode_enabled(vha
)) {
4491 if (qla_ini_mode_enabled(vha
))
4492 ct_req
->req
.rff_id
.fc4_feature
= BIT_0
| BIT_1
;
4494 ct_req
->req
.rff_id
.fc4_feature
= BIT_0
;
4495 } else if (qla_ini_mode_enabled(vha
)) {
4496 ct_req
->req
.rff_id
.fc4_feature
= BIT_1
;
4501 * qlt_init_atio_q_entries() - Initializes ATIO queue entries.
4504 * Beginning of ATIO ring has initialization control block already built
4505 * by nvram config routine.
4507 * Returns 0 on success.
4510 qlt_init_atio_q_entries(struct scsi_qla_host
*vha
)
4512 struct qla_hw_data
*ha
= vha
->hw
;
4514 struct atio_from_isp
*pkt
= (struct atio_from_isp
*)ha
->tgt
.atio_ring
;
4516 if (!qla_tgt_mode_enabled(vha
))
4519 for (cnt
= 0; cnt
< ha
->tgt
.atio_q_length
; cnt
++) {
4520 pkt
->u
.raw
.signature
= ATIO_PROCESSED
;
4527 * qlt_24xx_process_atio_queue() - Process ATIO queue entries.
4528 * @ha: SCSI driver HA context
4531 qlt_24xx_process_atio_queue(struct scsi_qla_host
*vha
)
4533 struct qla_hw_data
*ha
= vha
->hw
;
4534 struct atio_from_isp
*pkt
;
4537 if (!vha
->flags
.online
)
4540 while (ha
->tgt
.atio_ring_ptr
->signature
!= ATIO_PROCESSED
) {
4541 pkt
= (struct atio_from_isp
*)ha
->tgt
.atio_ring_ptr
;
4542 cnt
= pkt
->u
.raw
.entry_count
;
4544 qlt_24xx_atio_pkt_all_vps(vha
, (struct atio_from_isp
*)pkt
);
4546 for (i
= 0; i
< cnt
; i
++) {
4547 ha
->tgt
.atio_ring_index
++;
4548 if (ha
->tgt
.atio_ring_index
== ha
->tgt
.atio_q_length
) {
4549 ha
->tgt
.atio_ring_index
= 0;
4550 ha
->tgt
.atio_ring_ptr
= ha
->tgt
.atio_ring
;
4552 ha
->tgt
.atio_ring_ptr
++;
4554 pkt
->u
.raw
.signature
= ATIO_PROCESSED
;
4555 pkt
= (struct atio_from_isp
*)ha
->tgt
.atio_ring_ptr
;
4560 /* Adjust ring index */
4561 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha
), ha
->tgt
.atio_ring_index
);
4565 qlt_24xx_config_rings(struct scsi_qla_host
*vha
)
4567 struct qla_hw_data
*ha
= vha
->hw
;
4568 if (!QLA_TGT_MODE_ENABLED())
4571 WRT_REG_DWORD(ISP_ATIO_Q_IN(vha
), 0);
4572 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha
), 0);
4573 RD_REG_DWORD(ISP_ATIO_Q_OUT(vha
));
4575 if (IS_ATIO_MSIX_CAPABLE(ha
)) {
4576 struct qla_msix_entry
*msix
= &ha
->msix_entries
[2];
4577 struct init_cb_24xx
*icb
= (struct init_cb_24xx
*)ha
->init_cb
;
4579 icb
->msix_atio
= cpu_to_le16(msix
->entry
);
4580 ql_dbg(ql_dbg_init
, vha
, 0xf072,
4581 "Registering ICB vector 0x%x for atio que.\n",
4587 qlt_24xx_config_nvram_stage1(struct scsi_qla_host
*vha
, struct nvram_24xx
*nv
)
4589 struct qla_hw_data
*ha
= vha
->hw
;
4591 if (qla_tgt_mode_enabled(vha
)) {
4592 if (!ha
->tgt
.saved_set
) {
4593 /* We save only once */
4594 ha
->tgt
.saved_exchange_count
= nv
->exchange_count
;
4595 ha
->tgt
.saved_firmware_options_1
=
4596 nv
->firmware_options_1
;
4597 ha
->tgt
.saved_firmware_options_2
=
4598 nv
->firmware_options_2
;
4599 ha
->tgt
.saved_firmware_options_3
=
4600 nv
->firmware_options_3
;
4601 ha
->tgt
.saved_set
= 1;
4604 nv
->exchange_count
= __constant_cpu_to_le16(0xFFFF);
4606 /* Enable target mode */
4607 nv
->firmware_options_1
|= __constant_cpu_to_le32(BIT_4
);
4609 /* Disable ini mode, if requested */
4610 if (!qla_ini_mode_enabled(vha
))
4611 nv
->firmware_options_1
|= __constant_cpu_to_le32(BIT_5
);
4613 /* Disable Full Login after LIP */
4614 nv
->firmware_options_1
&= __constant_cpu_to_le32(~BIT_13
);
4615 /* Enable initial LIP */
4616 nv
->firmware_options_1
&= __constant_cpu_to_le32(~BIT_9
);
4617 /* Enable FC tapes support */
4618 nv
->firmware_options_2
|= __constant_cpu_to_le32(BIT_12
);
4619 /* Disable Full Login after LIP */
4620 nv
->host_p
&= __constant_cpu_to_le32(~BIT_10
);
4621 /* Enable target PRLI control */
4622 nv
->firmware_options_2
|= __constant_cpu_to_le32(BIT_14
);
4624 if (ha
->tgt
.saved_set
) {
4625 nv
->exchange_count
= ha
->tgt
.saved_exchange_count
;
4626 nv
->firmware_options_1
=
4627 ha
->tgt
.saved_firmware_options_1
;
4628 nv
->firmware_options_2
=
4629 ha
->tgt
.saved_firmware_options_2
;
4630 nv
->firmware_options_3
=
4631 ha
->tgt
.saved_firmware_options_3
;
4636 /* out-of-order frames reassembly */
4637 nv
->firmware_options_3
|= BIT_6
|BIT_9
;
4639 if (ha
->tgt
.enable_class_2
) {
4640 if (vha
->flags
.init_done
)
4641 fc_host_supported_classes(vha
->host
) =
4642 FC_COS_CLASS2
| FC_COS_CLASS3
;
4644 nv
->firmware_options_2
|= __constant_cpu_to_le32(BIT_8
);
4646 if (vha
->flags
.init_done
)
4647 fc_host_supported_classes(vha
->host
) = FC_COS_CLASS3
;
4649 nv
->firmware_options_2
&= ~__constant_cpu_to_le32(BIT_8
);
4654 qlt_24xx_config_nvram_stage2(struct scsi_qla_host
*vha
,
4655 struct init_cb_24xx
*icb
)
4657 struct qla_hw_data
*ha
= vha
->hw
;
4659 if (ha
->tgt
.node_name_set
) {
4660 memcpy(icb
->node_name
, ha
->tgt
.tgt_node_name
, WWN_SIZE
);
4661 icb
->firmware_options_1
|= __constant_cpu_to_le32(BIT_14
);
4666 qlt_81xx_config_nvram_stage1(struct scsi_qla_host
*vha
, struct nvram_81xx
*nv
)
4668 struct qla_hw_data
*ha
= vha
->hw
;
4670 if (!QLA_TGT_MODE_ENABLED())
4673 if (qla_tgt_mode_enabled(vha
)) {
4674 if (!ha
->tgt
.saved_set
) {
4675 /* We save only once */
4676 ha
->tgt
.saved_exchange_count
= nv
->exchange_count
;
4677 ha
->tgt
.saved_firmware_options_1
=
4678 nv
->firmware_options_1
;
4679 ha
->tgt
.saved_firmware_options_2
=
4680 nv
->firmware_options_2
;
4681 ha
->tgt
.saved_firmware_options_3
=
4682 nv
->firmware_options_3
;
4683 ha
->tgt
.saved_set
= 1;
4686 nv
->exchange_count
= __constant_cpu_to_le16(0xFFFF);
4688 /* Enable target mode */
4689 nv
->firmware_options_1
|= __constant_cpu_to_le32(BIT_4
);
4691 /* Disable ini mode, if requested */
4692 if (!qla_ini_mode_enabled(vha
))
4693 nv
->firmware_options_1
|=
4694 __constant_cpu_to_le32(BIT_5
);
4696 /* Disable Full Login after LIP */
4697 nv
->firmware_options_1
&= __constant_cpu_to_le32(~BIT_13
);
4698 /* Enable initial LIP */
4699 nv
->firmware_options_1
&= __constant_cpu_to_le32(~BIT_9
);
4700 /* Enable FC tapes support */
4701 nv
->firmware_options_2
|= __constant_cpu_to_le32(BIT_12
);
4702 /* Disable Full Login after LIP */
4703 nv
->host_p
&= __constant_cpu_to_le32(~BIT_10
);
4704 /* Enable target PRLI control */
4705 nv
->firmware_options_2
|= __constant_cpu_to_le32(BIT_14
);
4707 if (ha
->tgt
.saved_set
) {
4708 nv
->exchange_count
= ha
->tgt
.saved_exchange_count
;
4709 nv
->firmware_options_1
=
4710 ha
->tgt
.saved_firmware_options_1
;
4711 nv
->firmware_options_2
=
4712 ha
->tgt
.saved_firmware_options_2
;
4713 nv
->firmware_options_3
=
4714 ha
->tgt
.saved_firmware_options_3
;
4719 /* out-of-order frames reassembly */
4720 nv
->firmware_options_3
|= BIT_6
|BIT_9
;
4722 if (ha
->tgt
.enable_class_2
) {
4723 if (vha
->flags
.init_done
)
4724 fc_host_supported_classes(vha
->host
) =
4725 FC_COS_CLASS2
| FC_COS_CLASS3
;
4727 nv
->firmware_options_2
|= __constant_cpu_to_le32(BIT_8
);
4729 if (vha
->flags
.init_done
)
4730 fc_host_supported_classes(vha
->host
) = FC_COS_CLASS3
;
4732 nv
->firmware_options_2
&= ~__constant_cpu_to_le32(BIT_8
);
4737 qlt_81xx_config_nvram_stage2(struct scsi_qla_host
*vha
,
4738 struct init_cb_81xx
*icb
)
4740 struct qla_hw_data
*ha
= vha
->hw
;
4742 if (!QLA_TGT_MODE_ENABLED())
4745 if (ha
->tgt
.node_name_set
) {
4746 memcpy(icb
->node_name
, ha
->tgt
.tgt_node_name
, WWN_SIZE
);
4747 icb
->firmware_options_1
|= __constant_cpu_to_le32(BIT_14
);
4752 qlt_83xx_iospace_config(struct qla_hw_data
*ha
)
4754 if (!QLA_TGT_MODE_ENABLED())
4757 ha
->msix_count
+= 1; /* For ATIO Q */
4761 qlt_24xx_process_response_error(struct scsi_qla_host
*vha
,
4762 struct sts_entry_24xx
*pkt
)
4764 switch (pkt
->entry_type
) {
4765 case ABTS_RECV_24XX
:
4766 case ABTS_RESP_24XX
:
4768 case NOTIFY_ACK_TYPE
:
4776 qlt_modify_vp_config(struct scsi_qla_host
*vha
,
4777 struct vp_config_entry_24xx
*vpmod
)
4779 if (qla_tgt_mode_enabled(vha
))
4780 vpmod
->options_idx1
&= ~BIT_5
;
4781 /* Disable ini mode, if requested */
4782 if (!qla_ini_mode_enabled(vha
))
4783 vpmod
->options_idx1
&= ~BIT_4
;
4787 qlt_probe_one_stage1(struct scsi_qla_host
*base_vha
, struct qla_hw_data
*ha
)
4789 if (!QLA_TGT_MODE_ENABLED())
4792 if (ha
->mqenable
|| IS_QLA83XX(ha
)) {
4793 ISP_ATIO_Q_IN(base_vha
) = &ha
->mqiobase
->isp25mq
.atio_q_in
;
4794 ISP_ATIO_Q_OUT(base_vha
) = &ha
->mqiobase
->isp25mq
.atio_q_out
;
4796 ISP_ATIO_Q_IN(base_vha
) = &ha
->iobase
->isp24
.atio_q_in
;
4797 ISP_ATIO_Q_OUT(base_vha
) = &ha
->iobase
->isp24
.atio_q_out
;
4800 mutex_init(&base_vha
->vha_tgt
.tgt_mutex
);
4801 mutex_init(&base_vha
->vha_tgt
.tgt_host_action_mutex
);
4802 qlt_clear_mode(base_vha
);
4806 qla83xx_msix_atio_q(int irq
, void *dev_id
)
4808 struct rsp_que
*rsp
;
4809 scsi_qla_host_t
*vha
;
4810 struct qla_hw_data
*ha
;
4811 unsigned long flags
;
4813 rsp
= (struct rsp_que
*) dev_id
;
4815 vha
= pci_get_drvdata(ha
->pdev
);
4817 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
4819 qlt_24xx_process_atio_queue(vha
);
4820 qla24xx_process_response_queue(vha
, rsp
);
4822 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
4828 qlt_mem_alloc(struct qla_hw_data
*ha
)
4830 if (!QLA_TGT_MODE_ENABLED())
4833 ha
->tgt
.tgt_vp_map
= kzalloc(sizeof(struct qla_tgt_vp_map
) *
4834 MAX_MULTI_ID_FABRIC
, GFP_KERNEL
);
4835 if (!ha
->tgt
.tgt_vp_map
)
4838 ha
->tgt
.atio_ring
= dma_alloc_coherent(&ha
->pdev
->dev
,
4839 (ha
->tgt
.atio_q_length
+ 1) * sizeof(struct atio_from_isp
),
4840 &ha
->tgt
.atio_dma
, GFP_KERNEL
);
4841 if (!ha
->tgt
.atio_ring
) {
4842 kfree(ha
->tgt
.tgt_vp_map
);
4849 qlt_mem_free(struct qla_hw_data
*ha
)
4851 if (!QLA_TGT_MODE_ENABLED())
4854 if (ha
->tgt
.atio_ring
) {
4855 dma_free_coherent(&ha
->pdev
->dev
, (ha
->tgt
.atio_q_length
+ 1) *
4856 sizeof(struct atio_from_isp
), ha
->tgt
.atio_ring
,
4859 kfree(ha
->tgt
.tgt_vp_map
);
4862 /* vport_slock to be held by the caller */
4864 qlt_update_vp_map(struct scsi_qla_host
*vha
, int cmd
)
4866 if (!QLA_TGT_MODE_ENABLED())
4871 vha
->hw
->tgt
.tgt_vp_map
[vha
->vp_idx
].vha
= vha
;
4874 vha
->hw
->tgt
.tgt_vp_map
[vha
->d_id
.b
.al_pa
].idx
= vha
->vp_idx
;
4877 vha
->hw
->tgt
.tgt_vp_map
[vha
->vp_idx
].vha
= NULL
;
4880 vha
->hw
->tgt
.tgt_vp_map
[vha
->d_id
.b
.al_pa
].idx
= 0;
4885 static int __init
qlt_parse_ini_mode(void)
4887 if (strcasecmp(qlini_mode
, QLA2XXX_INI_MODE_STR_EXCLUSIVE
) == 0)
4888 ql2x_ini_mode
= QLA2XXX_INI_MODE_EXCLUSIVE
;
4889 else if (strcasecmp(qlini_mode
, QLA2XXX_INI_MODE_STR_DISABLED
) == 0)
4890 ql2x_ini_mode
= QLA2XXX_INI_MODE_DISABLED
;
4891 else if (strcasecmp(qlini_mode
, QLA2XXX_INI_MODE_STR_ENABLED
) == 0)
4892 ql2x_ini_mode
= QLA2XXX_INI_MODE_ENABLED
;
4899 int __init
qlt_init(void)
4903 if (!qlt_parse_ini_mode()) {
4904 ql_log(ql_log_fatal
, NULL
, 0xe06b,
4905 "qlt_parse_ini_mode() failed\n");
4909 if (!QLA_TGT_MODE_ENABLED())
4912 qla_tgt_cmd_cachep
= kmem_cache_create("qla_tgt_cmd_cachep",
4913 sizeof(struct qla_tgt_cmd
), __alignof__(struct qla_tgt_cmd
), 0,
4915 if (!qla_tgt_cmd_cachep
) {
4916 ql_log(ql_log_fatal
, NULL
, 0xe06c,
4917 "kmem_cache_create for qla_tgt_cmd_cachep failed\n");
4921 qla_tgt_mgmt_cmd_cachep
= kmem_cache_create("qla_tgt_mgmt_cmd_cachep",
4922 sizeof(struct qla_tgt_mgmt_cmd
), __alignof__(struct
4923 qla_tgt_mgmt_cmd
), 0, NULL
);
4924 if (!qla_tgt_mgmt_cmd_cachep
) {
4925 ql_log(ql_log_fatal
, NULL
, 0xe06d,
4926 "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n");
4931 qla_tgt_mgmt_cmd_mempool
= mempool_create(25, mempool_alloc_slab
,
4932 mempool_free_slab
, qla_tgt_mgmt_cmd_cachep
);
4933 if (!qla_tgt_mgmt_cmd_mempool
) {
4934 ql_log(ql_log_fatal
, NULL
, 0xe06e,
4935 "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n");
4937 goto out_mgmt_cmd_cachep
;
4940 qla_tgt_wq
= alloc_workqueue("qla_tgt_wq", 0, 0);
4942 ql_log(ql_log_fatal
, NULL
, 0xe06f,
4943 "alloc_workqueue for qla_tgt_wq failed\n");
4945 goto out_cmd_mempool
;
4948 * Return 1 to signal that initiator-mode is being disabled
4950 return (ql2x_ini_mode
== QLA2XXX_INI_MODE_DISABLED
) ? 1 : 0;
4953 mempool_destroy(qla_tgt_mgmt_cmd_mempool
);
4954 out_mgmt_cmd_cachep
:
4955 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep
);
4957 kmem_cache_destroy(qla_tgt_cmd_cachep
);
4963 if (!QLA_TGT_MODE_ENABLED())
4966 destroy_workqueue(qla_tgt_wq
);
4967 mempool_destroy(qla_tgt_mgmt_cmd_mempool
);
4968 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep
);
4969 kmem_cache_destroy(qla_tgt_cmd_cachep
);