1 // SPDX-License-Identifier: GPL-2.0-only
3 * qla_target.c SCSI LLD infrastructure for QLogic 22xx/23xx/24xx/25xx
5 * based on qla2x00t.c code:
7 * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net>
8 * Copyright (C) 2004 - 2005 Leonid Stoljar
9 * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
10 * Copyright (C) 2006 - 2010 ID7 Ltd.
12 * Forward port and refactoring to modern qla2xxx and target/configfs
14 * Copyright (C) 2010-2013 Nicholas A. Bellinger <nab@kernel.org>
17 #include <linux/module.h>
18 #include <linux/init.h>
19 #include <linux/types.h>
20 #include <linux/blkdev.h>
21 #include <linux/interrupt.h>
22 #include <linux/pci.h>
23 #include <linux/delay.h>
24 #include <linux/list.h>
25 #include <linux/workqueue.h>
26 #include <asm/unaligned.h>
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_host.h>
29 #include <scsi/scsi_tcq.h>
32 #include "qla_target.h"
34 static int ql2xtgt_tape_enable
;
35 module_param(ql2xtgt_tape_enable
, int, S_IRUGO
|S_IWUSR
);
36 MODULE_PARM_DESC(ql2xtgt_tape_enable
,
37 "Enables Sequence level error recovery (aka FC Tape). Default is 0 - no SLER. 1 - Enable SLER.");
39 static char *qlini_mode
= QLA2XXX_INI_MODE_STR_ENABLED
;
40 module_param(qlini_mode
, charp
, S_IRUGO
);
41 MODULE_PARM_DESC(qlini_mode
,
42 "Determines when initiator mode will be enabled. Possible values: "
43 "\"exclusive\" - initiator mode will be enabled on load, "
44 "disabled on enabling target mode and then on disabling target mode "
46 "\"disabled\" - initiator mode will never be enabled; "
47 "\"dual\" - Initiator Modes will be enabled. Target Mode can be activated "
49 "\"enabled\" (default) - initiator mode will always stay enabled.");
51 static int ql_dm_tgt_ex_pct
= 0;
52 module_param(ql_dm_tgt_ex_pct
, int, S_IRUGO
|S_IWUSR
);
53 MODULE_PARM_DESC(ql_dm_tgt_ex_pct
,
54 "For Dual Mode (qlini_mode=dual), this parameter determines "
55 "the percentage of exchanges/cmds FW will allocate resources "
59 module_param(ql2xuctrlirq
, int, 0644);
60 MODULE_PARM_DESC(ql2xuctrlirq
,
61 "User to control IRQ placement via smp_affinity."
62 "Valid with qlini_mode=disabled."
63 "1(default): enable");
65 int ql2x_ini_mode
= QLA2XXX_INI_MODE_EXCLUSIVE
;
67 static int qla_sam_status
= SAM_STAT_BUSY
;
68 static int tc_sam_status
= SAM_STAT_TASK_SET_FULL
; /* target core */
71 * From scsi/fc/fc_fcp.h
73 enum fcp_resp_rsp_codes
{
75 FCP_DATA_LEN_INVALID
= 1,
76 FCP_CMND_FIELDS_INVALID
= 2,
77 FCP_DATA_PARAM_MISMATCH
= 3,
80 FCP_TMF_INVALID_LUN
= 9,
84 * fc_pri_ta from scsi/fc/fc_fcp.h
86 #define FCP_PTA_SIMPLE 0 /* simple task attribute */
87 #define FCP_PTA_HEADQ 1 /* head of queue task attribute */
88 #define FCP_PTA_ORDERED 2 /* ordered task attribute */
89 #define FCP_PTA_ACA 4 /* auto. contingent allegiance */
90 #define FCP_PTA_MASK 7 /* mask for task attribute field */
91 #define FCP_PRI_SHIFT 3 /* priority field starts in bit 3 */
92 #define FCP_PRI_RESVD_MASK 0x80 /* reserved bits in priority field */
95 * This driver calls qla2x00_alloc_iocbs() and qla2x00_issue_marker(), which
96 * must be called under HW lock and could unlock/lock it inside.
97 * It isn't an issue, since in the current implementation on the time when
98 * those functions are called:
100 * - Either context is IRQ and only IRQ handler can modify HW data,
101 * including rings related fields,
103 * - Or access to target mode variables from struct qla_tgt doesn't
104 * cross those functions boundaries, except tgt_stop, which
105 * additionally protected by irq_cmd_count.
107 /* Predefs for callbacks handed to qla2xxx LLD */
108 static void qlt_24xx_atio_pkt(struct scsi_qla_host
*ha
,
109 struct atio_from_isp
*pkt
, uint8_t);
110 static void qlt_response_pkt(struct scsi_qla_host
*ha
, struct rsp_que
*rsp
,
112 static int qlt_issue_task_mgmt(struct fc_port
*sess
, u64 lun
,
113 int fn
, void *iocb
, int flags
);
114 static void qlt_send_term_exchange(struct qla_qpair
*, struct qla_tgt_cmd
115 *cmd
, struct atio_from_isp
*atio
, int ha_locked
, int ul_abort
);
116 static void qlt_alloc_qfull_cmd(struct scsi_qla_host
*vha
,
117 struct atio_from_isp
*atio
, uint16_t status
, int qfull
);
118 static void qlt_disable_vha(struct scsi_qla_host
*vha
);
119 static void qlt_clear_tgt_db(struct qla_tgt
*tgt
);
120 static void qlt_send_notify_ack(struct qla_qpair
*qpair
,
121 struct imm_ntfy_from_isp
*ntfy
,
122 uint32_t add_flags
, uint16_t resp_code
, int resp_code_valid
,
123 uint16_t srr_flags
, uint16_t srr_reject_code
, uint8_t srr_explan
);
124 static void qlt_send_term_imm_notif(struct scsi_qla_host
*vha
,
125 struct imm_ntfy_from_isp
*imm
, int ha_locked
);
126 static struct fc_port
*qlt_create_sess(struct scsi_qla_host
*vha
,
127 fc_port_t
*fcport
, bool local
);
128 void qlt_unreg_sess(struct fc_port
*sess
);
129 static void qlt_24xx_handle_abts(struct scsi_qla_host
*,
130 struct abts_recv_from_24xx
*);
131 static void qlt_send_busy(struct qla_qpair
*, struct atio_from_isp
*,
133 static int qlt_check_reserve_free_req(struct qla_qpair
*qpair
, uint32_t);
134 static inline uint32_t qlt_make_handle(struct qla_qpair
*);
139 static struct kmem_cache
*qla_tgt_mgmt_cmd_cachep
;
140 struct kmem_cache
*qla_tgt_plogi_cachep
;
141 static mempool_t
*qla_tgt_mgmt_cmd_mempool
;
142 static struct workqueue_struct
*qla_tgt_wq
;
143 static DEFINE_MUTEX(qla_tgt_mutex
);
144 static LIST_HEAD(qla_tgt_glist
);
146 static const char *prot_op_str(u32 prot_op
)
149 case TARGET_PROT_NORMAL
: return "NORMAL";
150 case TARGET_PROT_DIN_INSERT
: return "DIN_INSERT";
151 case TARGET_PROT_DOUT_INSERT
: return "DOUT_INSERT";
152 case TARGET_PROT_DIN_STRIP
: return "DIN_STRIP";
153 case TARGET_PROT_DOUT_STRIP
: return "DOUT_STRIP";
154 case TARGET_PROT_DIN_PASS
: return "DIN_PASS";
155 case TARGET_PROT_DOUT_PASS
: return "DOUT_PASS";
156 default: return "UNKNOWN";
160 /* This API intentionally takes dest as a parameter, rather than returning
161 * int value to avoid caller forgetting to issue wmb() after the store */
162 void qlt_do_generation_tick(struct scsi_qla_host
*vha
, int *dest
)
164 scsi_qla_host_t
*base_vha
= pci_get_drvdata(vha
->hw
->pdev
);
165 *dest
= atomic_inc_return(&base_vha
->generation_tick
);
170 /* Might release hw lock, then reaquire!! */
171 static inline int qlt_issue_marker(struct scsi_qla_host
*vha
, int vha_locked
)
173 /* Send marker if required */
174 if (unlikely(vha
->marker_needed
!= 0)) {
175 int rc
= qla2x00_issue_marker(vha
, vha_locked
);
177 if (rc
!= QLA_SUCCESS
) {
178 ql_dbg(ql_dbg_tgt
, vha
, 0xe03d,
179 "qla_target(%d): issue_marker() failed\n",
188 struct scsi_qla_host
*qlt_find_host_by_d_id(struct scsi_qla_host
*vha
,
191 struct scsi_qla_host
*host
;
194 if (vha
->d_id
.b
.area
== d_id
.area
&&
195 vha
->d_id
.b
.domain
== d_id
.domain
&&
196 vha
->d_id
.b
.al_pa
== d_id
.al_pa
)
199 key
= be_to_port_id(d_id
).b24
;
201 host
= btree_lookup32(&vha
->hw
->tgt
.host_map
, key
);
203 ql_dbg(ql_dbg_tgt_mgt
+ ql_dbg_verbose
, vha
, 0xf005,
204 "Unable to find host %06x\n", key
);
210 struct scsi_qla_host
*qlt_find_host_by_vp_idx(struct scsi_qla_host
*vha
,
213 struct qla_hw_data
*ha
= vha
->hw
;
215 if (vha
->vp_idx
== vp_idx
)
218 BUG_ON(ha
->tgt
.tgt_vp_map
== NULL
);
219 if (likely(test_bit(vp_idx
, ha
->vp_idx_map
)))
220 return ha
->tgt
.tgt_vp_map
[vp_idx
].vha
;
225 static inline void qlt_incr_num_pend_cmds(struct scsi_qla_host
*vha
)
229 spin_lock_irqsave(&vha
->hw
->tgt
.q_full_lock
, flags
);
231 vha
->hw
->tgt
.num_pend_cmds
++;
232 if (vha
->hw
->tgt
.num_pend_cmds
> vha
->qla_stats
.stat_max_pend_cmds
)
233 vha
->qla_stats
.stat_max_pend_cmds
=
234 vha
->hw
->tgt
.num_pend_cmds
;
235 spin_unlock_irqrestore(&vha
->hw
->tgt
.q_full_lock
, flags
);
237 static inline void qlt_decr_num_pend_cmds(struct scsi_qla_host
*vha
)
241 spin_lock_irqsave(&vha
->hw
->tgt
.q_full_lock
, flags
);
242 vha
->hw
->tgt
.num_pend_cmds
--;
243 spin_unlock_irqrestore(&vha
->hw
->tgt
.q_full_lock
, flags
);
247 static void qlt_queue_unknown_atio(scsi_qla_host_t
*vha
,
248 struct atio_from_isp
*atio
, uint8_t ha_locked
)
250 struct qla_tgt_sess_op
*u
;
251 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
255 ql_dbg(ql_dbg_async
, vha
, 0x502c,
256 "qla_target(%d): dropping unknown ATIO_TYPE7, because tgt is being stopped",
261 u
= kzalloc(sizeof(*u
), GFP_ATOMIC
);
266 memcpy(&u
->atio
, atio
, sizeof(*atio
));
267 INIT_LIST_HEAD(&u
->cmd_list
);
269 spin_lock_irqsave(&vha
->cmd_list_lock
, flags
);
270 list_add_tail(&u
->cmd_list
, &vha
->unknown_atio_list
);
271 spin_unlock_irqrestore(&vha
->cmd_list_lock
, flags
);
273 schedule_delayed_work(&vha
->unknown_atio_work
, 1);
279 qlt_send_term_exchange(vha
->hw
->base_qpair
, NULL
, atio
, ha_locked
, 0);
283 static void qlt_try_to_dequeue_unknown_atios(struct scsi_qla_host
*vha
,
286 struct qla_tgt_sess_op
*u
, *t
;
287 scsi_qla_host_t
*host
;
288 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
292 list_for_each_entry_safe(u
, t
, &vha
->unknown_atio_list
, cmd_list
) {
294 ql_dbg(ql_dbg_async
, vha
, 0x502e,
295 "Freeing unknown %s %p, because of Abort\n",
297 qlt_send_term_exchange(vha
->hw
->base_qpair
, NULL
,
298 &u
->atio
, ha_locked
, 0);
302 host
= qlt_find_host_by_d_id(vha
, u
->atio
.u
.isp24
.fcp_hdr
.d_id
);
304 ql_dbg(ql_dbg_async
+ ql_dbg_verbose
, vha
, 0x502f,
305 "Requeuing unknown ATIO_TYPE7 %p\n", u
);
306 qlt_24xx_atio_pkt(host
, &u
->atio
, ha_locked
);
307 } else if (tgt
->tgt_stop
) {
308 ql_dbg(ql_dbg_async
+ ql_dbg_verbose
, vha
, 0x503a,
309 "Freeing unknown %s %p, because tgt is being stopped\n",
311 qlt_send_term_exchange(vha
->hw
->base_qpair
, NULL
,
312 &u
->atio
, ha_locked
, 0);
314 ql_dbg(ql_dbg_async
+ ql_dbg_verbose
, vha
, 0x503d,
315 "Reschedule u %p, vha %p, host %p\n", u
, vha
, host
);
318 schedule_delayed_work(&vha
->unknown_atio_work
,
325 spin_lock_irqsave(&vha
->cmd_list_lock
, flags
);
326 list_del(&u
->cmd_list
);
327 spin_unlock_irqrestore(&vha
->cmd_list_lock
, flags
);
332 void qlt_unknown_atio_work_fn(struct work_struct
*work
)
334 struct scsi_qla_host
*vha
= container_of(to_delayed_work(work
),
335 struct scsi_qla_host
, unknown_atio_work
);
337 qlt_try_to_dequeue_unknown_atios(vha
, 0);
340 static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host
*vha
,
341 struct atio_from_isp
*atio
, uint8_t ha_locked
)
343 ql_dbg(ql_dbg_tgt
, vha
, 0xe072,
344 "%s: qla_target(%d): type %x ox_id %04x\n",
345 __func__
, vha
->vp_idx
, atio
->u
.raw
.entry_type
,
346 be16_to_cpu(atio
->u
.isp24
.fcp_hdr
.ox_id
));
348 switch (atio
->u
.raw
.entry_type
) {
351 struct scsi_qla_host
*host
= qlt_find_host_by_d_id(vha
,
352 atio
->u
.isp24
.fcp_hdr
.d_id
);
353 if (unlikely(NULL
== host
)) {
354 ql_dbg(ql_dbg_tgt
, vha
, 0xe03e,
355 "qla_target(%d): Received ATIO_TYPE7 "
356 "with unknown d_id %x:%x:%x\n", vha
->vp_idx
,
357 atio
->u
.isp24
.fcp_hdr
.d_id
.domain
,
358 atio
->u
.isp24
.fcp_hdr
.d_id
.area
,
359 atio
->u
.isp24
.fcp_hdr
.d_id
.al_pa
);
362 qlt_queue_unknown_atio(vha
, atio
, ha_locked
);
365 if (unlikely(!list_empty(&vha
->unknown_atio_list
)))
366 qlt_try_to_dequeue_unknown_atios(vha
, ha_locked
);
368 qlt_24xx_atio_pkt(host
, atio
, ha_locked
);
372 case IMMED_NOTIFY_TYPE
:
374 struct scsi_qla_host
*host
= vha
;
375 struct imm_ntfy_from_isp
*entry
=
376 (struct imm_ntfy_from_isp
*)atio
;
378 qlt_issue_marker(vha
, ha_locked
);
380 if ((entry
->u
.isp24
.vp_index
!= 0xFF) &&
381 (entry
->u
.isp24
.nport_handle
!= cpu_to_le16(0xFFFF))) {
382 host
= qlt_find_host_by_vp_idx(vha
,
383 entry
->u
.isp24
.vp_index
);
384 if (unlikely(!host
)) {
385 ql_dbg(ql_dbg_tgt
, vha
, 0xe03f,
386 "qla_target(%d): Received "
387 "ATIO (IMMED_NOTIFY_TYPE) "
388 "with unknown vp_index %d\n",
389 vha
->vp_idx
, entry
->u
.isp24
.vp_index
);
393 qlt_24xx_atio_pkt(host
, atio
, ha_locked
);
397 case VP_RPT_ID_IOCB_TYPE
:
398 qla24xx_report_id_acquisition(vha
,
399 (struct vp_rpt_id_entry_24xx
*)atio
);
404 struct abts_recv_from_24xx
*entry
=
405 (struct abts_recv_from_24xx
*)atio
;
406 struct scsi_qla_host
*host
= qlt_find_host_by_vp_idx(vha
,
410 if (unlikely(!host
)) {
411 ql_dbg(ql_dbg_tgt
, vha
, 0xe00a,
412 "qla_target(%d): Response pkt (ABTS_RECV_24XX) "
413 "received, with unknown vp_index %d\n",
414 vha
->vp_idx
, entry
->vp_index
);
418 spin_lock_irqsave(&host
->hw
->hardware_lock
, flags
);
419 qlt_24xx_handle_abts(host
, (struct abts_recv_from_24xx
*)atio
);
421 spin_unlock_irqrestore(&host
->hw
->hardware_lock
, flags
);
425 /* case PUREX_IOCB_TYPE: ql2xmvasynctoatio */
428 ql_dbg(ql_dbg_tgt
, vha
, 0xe040,
429 "qla_target(%d): Received unknown ATIO atio "
430 "type %x\n", vha
->vp_idx
, atio
->u
.raw
.entry_type
);
437 void qlt_response_pkt_all_vps(struct scsi_qla_host
*vha
,
438 struct rsp_que
*rsp
, response_t
*pkt
)
440 switch (pkt
->entry_type
) {
442 ql_dbg(ql_dbg_tgt
, vha
, 0xe073,
443 "qla_target(%d):%s: CRC2 Response pkt\n",
444 vha
->vp_idx
, __func__
);
448 struct ctio7_from_24xx
*entry
= (struct ctio7_from_24xx
*)pkt
;
449 struct scsi_qla_host
*host
= qlt_find_host_by_vp_idx(vha
,
451 if (unlikely(!host
)) {
452 ql_dbg(ql_dbg_tgt
, vha
, 0xe041,
453 "qla_target(%d): Response pkt (CTIO_TYPE7) "
454 "received, with unknown vp_index %d\n",
455 vha
->vp_idx
, entry
->vp_index
);
458 qlt_response_pkt(host
, rsp
, pkt
);
462 case IMMED_NOTIFY_TYPE
:
464 struct scsi_qla_host
*host
;
465 struct imm_ntfy_from_isp
*entry
=
466 (struct imm_ntfy_from_isp
*)pkt
;
468 host
= qlt_find_host_by_vp_idx(vha
, entry
->u
.isp24
.vp_index
);
469 if (unlikely(!host
)) {
470 ql_dbg(ql_dbg_tgt
, vha
, 0xe042,
471 "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) "
472 "received, with unknown vp_index %d\n",
473 vha
->vp_idx
, entry
->u
.isp24
.vp_index
);
476 qlt_response_pkt(host
, rsp
, pkt
);
480 case NOTIFY_ACK_TYPE
:
482 struct scsi_qla_host
*host
= vha
;
483 struct nack_to_isp
*entry
= (struct nack_to_isp
*)pkt
;
485 if (0xFF != entry
->u
.isp24
.vp_index
) {
486 host
= qlt_find_host_by_vp_idx(vha
,
487 entry
->u
.isp24
.vp_index
);
488 if (unlikely(!host
)) {
489 ql_dbg(ql_dbg_tgt
, vha
, 0xe043,
490 "qla_target(%d): Response "
491 "pkt (NOTIFY_ACK_TYPE) "
492 "received, with unknown "
493 "vp_index %d\n", vha
->vp_idx
,
494 entry
->u
.isp24
.vp_index
);
498 qlt_response_pkt(host
, rsp
, pkt
);
504 struct abts_recv_from_24xx
*entry
=
505 (struct abts_recv_from_24xx
*)pkt
;
506 struct scsi_qla_host
*host
= qlt_find_host_by_vp_idx(vha
,
508 if (unlikely(!host
)) {
509 ql_dbg(ql_dbg_tgt
, vha
, 0xe044,
510 "qla_target(%d): Response pkt "
511 "(ABTS_RECV_24XX) received, with unknown "
512 "vp_index %d\n", vha
->vp_idx
, entry
->vp_index
);
515 qlt_response_pkt(host
, rsp
, pkt
);
521 struct abts_resp_to_24xx
*entry
=
522 (struct abts_resp_to_24xx
*)pkt
;
523 struct scsi_qla_host
*host
= qlt_find_host_by_vp_idx(vha
,
525 if (unlikely(!host
)) {
526 ql_dbg(ql_dbg_tgt
, vha
, 0xe045,
527 "qla_target(%d): Response pkt "
528 "(ABTS_RECV_24XX) received, with unknown "
529 "vp_index %d\n", vha
->vp_idx
, entry
->vp_index
);
532 qlt_response_pkt(host
, rsp
, pkt
);
536 qlt_response_pkt(vha
, rsp
, pkt
);
543 * All qlt_plogi_ack_t operations are protected by hardware_lock
545 static int qla24xx_post_nack_work(struct scsi_qla_host
*vha
, fc_port_t
*fcport
,
546 struct imm_ntfy_from_isp
*ntfy
, int type
)
548 struct qla_work_evt
*e
;
550 e
= qla2x00_alloc_work(vha
, QLA_EVT_NACK
);
552 return QLA_FUNCTION_FAILED
;
554 e
->u
.nack
.fcport
= fcport
;
555 e
->u
.nack
.type
= type
;
556 memcpy(e
->u
.nack
.iocb
, ntfy
, sizeof(struct imm_ntfy_from_isp
));
557 return qla2x00_post_work(vha
, e
);
560 static void qla2x00_async_nack_sp_done(srb_t
*sp
, int res
)
562 struct scsi_qla_host
*vha
= sp
->vha
;
565 ql_dbg(ql_dbg_disc
, vha
, 0x20f2,
566 "Async done-%s res %x %8phC type %d\n",
567 sp
->name
, res
, sp
->fcport
->port_name
, sp
->type
);
569 spin_lock_irqsave(&vha
->hw
->tgt
.sess_lock
, flags
);
570 sp
->fcport
->flags
&= ~FCF_ASYNC_SENT
;
571 sp
->fcport
->chip_reset
= vha
->hw
->base_qpair
->chip_reset
;
575 sp
->fcport
->login_gen
++;
576 sp
->fcport
->fw_login_state
= DSC_LS_PLOGI_COMP
;
577 sp
->fcport
->logout_on_delete
= 1;
578 sp
->fcport
->plogi_nack_done_deadline
= jiffies
+ HZ
;
579 sp
->fcport
->send_els_logo
= 0;
583 sp
->fcport
->fw_login_state
= DSC_LS_PRLI_COMP
;
584 sp
->fcport
->deleted
= 0;
585 sp
->fcport
->send_els_logo
= 0;
587 if (!sp
->fcport
->login_succ
&&
588 !IS_SW_RESV_ADDR(sp
->fcport
->d_id
)) {
589 sp
->fcport
->login_succ
= 1;
592 spin_unlock_irqrestore(&vha
->hw
->tgt
.sess_lock
, flags
);
593 qla24xx_sched_upd_fcport(sp
->fcport
);
594 spin_lock_irqsave(&vha
->hw
->tgt
.sess_lock
, flags
);
596 sp
->fcport
->login_retry
= 0;
597 qla2x00_set_fcport_disc_state(sp
->fcport
,
599 sp
->fcport
->deleted
= 0;
600 sp
->fcport
->logout_on_delete
= 1;
605 sp
->fcport
->login_gen
++;
606 sp
->fcport
->fw_login_state
= DSC_LS_PORT_UNAVAIL
;
607 qlt_logo_completion_handler(sp
->fcport
, MBS_COMMAND_COMPLETE
);
610 spin_unlock_irqrestore(&vha
->hw
->tgt
.sess_lock
, flags
);
615 int qla24xx_async_notify_ack(scsi_qla_host_t
*vha
, fc_port_t
*fcport
,
616 struct imm_ntfy_from_isp
*ntfy
, int type
)
618 int rval
= QLA_FUNCTION_FAILED
;
622 fcport
->flags
|= FCF_ASYNC_SENT
;
625 fcport
->fw_login_state
= DSC_LS_PLOGI_PEND
;
629 fcport
->fw_login_state
= DSC_LS_PRLI_PEND
;
634 fcport
->fw_login_state
= DSC_LS_LOGO_PEND
;
639 sp
= qla2x00_get_sp(vha
, fcport
, GFP_ATOMIC
);
646 sp
->u
.iocb_cmd
.timeout
= qla2x00_async_iocb_timeout
;
647 qla2x00_init_timer(sp
, qla2x00_get_async_timeout(vha
)+2);
649 sp
->u
.iocb_cmd
.u
.nack
.ntfy
= ntfy
;
650 sp
->done
= qla2x00_async_nack_sp_done
;
652 ql_dbg(ql_dbg_disc
, vha
, 0x20f4,
653 "Async-%s %8phC hndl %x %s\n",
654 sp
->name
, fcport
->port_name
, sp
->handle
, c
);
656 rval
= qla2x00_start_sp(sp
);
657 if (rval
!= QLA_SUCCESS
)
665 fcport
->flags
&= ~FCF_ASYNC_SENT
;
669 void qla24xx_do_nack_work(struct scsi_qla_host
*vha
, struct qla_work_evt
*e
)
673 switch (e
->u
.nack
.type
) {
675 t
= e
->u
.nack
.fcport
;
676 flush_work(&t
->del_work
);
677 flush_work(&t
->free_work
);
678 mutex_lock(&vha
->vha_tgt
.tgt_mutex
);
679 t
= qlt_create_sess(vha
, e
->u
.nack
.fcport
, 0);
680 mutex_unlock(&vha
->vha_tgt
.tgt_mutex
);
682 ql_log(ql_log_info
, vha
, 0xd034,
683 "%s create sess success %p", __func__
, t
);
684 /* create sess has an extra kref */
685 vha
->hw
->tgt
.tgt_ops
->put_sess(e
->u
.nack
.fcport
);
689 qla24xx_async_notify_ack(vha
, e
->u
.nack
.fcport
,
690 (struct imm_ntfy_from_isp
*)e
->u
.nack
.iocb
, e
->u
.nack
.type
);
693 void qla24xx_delete_sess_fn(struct work_struct
*work
)
695 fc_port_t
*fcport
= container_of(work
, struct fc_port
, del_work
);
696 struct qla_hw_data
*ha
= fcport
->vha
->hw
;
698 if (fcport
->se_sess
) {
699 ha
->tgt
.tgt_ops
->shutdown_sess(fcport
);
700 ha
->tgt
.tgt_ops
->put_sess(fcport
);
702 qlt_unreg_sess(fcport
);
707 * Called from qla2x00_reg_remote_port()
709 void qlt_fc_port_added(struct scsi_qla_host
*vha
, fc_port_t
*fcport
)
711 struct qla_hw_data
*ha
= vha
->hw
;
712 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
713 struct fc_port
*sess
= fcport
;
716 if (!vha
->hw
->tgt
.tgt_ops
)
719 spin_lock_irqsave(&ha
->tgt
.sess_lock
, flags
);
721 spin_unlock_irqrestore(&ha
->tgt
.sess_lock
, flags
);
725 if (fcport
->disc_state
== DSC_DELETE_PEND
) {
726 spin_unlock_irqrestore(&ha
->tgt
.sess_lock
, flags
);
730 if (!sess
->se_sess
) {
731 spin_unlock_irqrestore(&ha
->tgt
.sess_lock
, flags
);
733 mutex_lock(&vha
->vha_tgt
.tgt_mutex
);
734 sess
= qlt_create_sess(vha
, fcport
, false);
735 mutex_unlock(&vha
->vha_tgt
.tgt_mutex
);
737 spin_lock_irqsave(&ha
->tgt
.sess_lock
, flags
);
739 if (fcport
->fw_login_state
== DSC_LS_PRLI_COMP
) {
740 spin_unlock_irqrestore(&ha
->tgt
.sess_lock
, flags
);
744 if (!kref_get_unless_zero(&sess
->sess_kref
)) {
745 ql_dbg(ql_dbg_disc
, vha
, 0x2107,
746 "%s: kref_get fail sess %8phC \n",
747 __func__
, sess
->port_name
);
748 spin_unlock_irqrestore(&ha
->tgt
.sess_lock
, flags
);
752 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf04c,
753 "qla_target(%u): %ssession for port %8phC "
754 "(loop ID %d) reappeared\n", vha
->vp_idx
,
755 sess
->local
? "local " : "", sess
->port_name
, sess
->loop_id
);
757 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf007,
758 "Reappeared sess %p\n", sess
);
760 ha
->tgt
.tgt_ops
->update_sess(sess
, fcport
->d_id
,
762 (fcport
->flags
& FCF_CONF_COMP_SUPPORTED
));
765 if (sess
&& sess
->local
) {
766 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf04d,
767 "qla_target(%u): local session for "
768 "port %8phC (loop ID %d) became global\n", vha
->vp_idx
,
769 fcport
->port_name
, sess
->loop_id
);
772 spin_unlock_irqrestore(&ha
->tgt
.sess_lock
, flags
);
774 ha
->tgt
.tgt_ops
->put_sess(sess
);
778 * This is a zero-base ref-counting solution, since hardware_lock
779 * guarantees that ref_count is not modified concurrently.
780 * Upon successful return content of iocb is undefined
782 static struct qlt_plogi_ack_t
*
783 qlt_plogi_ack_find_add(struct scsi_qla_host
*vha
, port_id_t
*id
,
784 struct imm_ntfy_from_isp
*iocb
)
786 struct qlt_plogi_ack_t
*pla
;
788 lockdep_assert_held(&vha
->hw
->hardware_lock
);
790 list_for_each_entry(pla
, &vha
->plogi_ack_list
, list
) {
791 if (pla
->id
.b24
== id
->b24
) {
792 ql_dbg(ql_dbg_disc
+ ql_dbg_verbose
, vha
, 0x210d,
793 "%s %d %8phC Term INOT due to new INOT",
795 pla
->iocb
.u
.isp24
.port_name
);
796 qlt_send_term_imm_notif(vha
, &pla
->iocb
, 1);
797 memcpy(&pla
->iocb
, iocb
, sizeof(pla
->iocb
));
802 pla
= kmem_cache_zalloc(qla_tgt_plogi_cachep
, GFP_ATOMIC
);
804 ql_dbg(ql_dbg_async
, vha
, 0x5088,
805 "qla_target(%d): Allocation of plogi_ack failed\n",
810 memcpy(&pla
->iocb
, iocb
, sizeof(pla
->iocb
));
812 list_add_tail(&pla
->list
, &vha
->plogi_ack_list
);
817 void qlt_plogi_ack_unref(struct scsi_qla_host
*vha
,
818 struct qlt_plogi_ack_t
*pla
)
820 struct imm_ntfy_from_isp
*iocb
= &pla
->iocb
;
823 fc_port_t
*fcport
= pla
->fcport
;
825 BUG_ON(!pla
->ref_count
);
831 ql_dbg(ql_dbg_disc
, vha
, 0x5089,
832 "Sending PLOGI ACK to wwn %8phC s_id %02x:%02x:%02x loop_id %#04x"
833 " exch %#x ox_id %#x\n", iocb
->u
.isp24
.port_name
,
834 iocb
->u
.isp24
.port_id
[2], iocb
->u
.isp24
.port_id
[1],
835 iocb
->u
.isp24
.port_id
[0],
836 le16_to_cpu(iocb
->u
.isp24
.nport_handle
),
837 iocb
->u
.isp24
.exchange_address
, iocb
->ox_id
);
839 port_id
.b
.domain
= iocb
->u
.isp24
.port_id
[2];
840 port_id
.b
.area
= iocb
->u
.isp24
.port_id
[1];
841 port_id
.b
.al_pa
= iocb
->u
.isp24
.port_id
[0];
842 port_id
.b
.rsvd_1
= 0;
844 loop_id
= le16_to_cpu(iocb
->u
.isp24
.nport_handle
);
846 fcport
->loop_id
= loop_id
;
847 fcport
->d_id
= port_id
;
848 if (iocb
->u
.isp24
.status_subcode
== ELS_PLOGI
)
849 qla24xx_post_nack_work(vha
, fcport
, iocb
, SRB_NACK_PLOGI
);
851 qla24xx_post_nack_work(vha
, fcport
, iocb
, SRB_NACK_PRLI
);
853 list_for_each_entry(fcport
, &vha
->vp_fcports
, list
) {
854 if (fcport
->plogi_link
[QLT_PLOGI_LINK_SAME_WWN
] == pla
)
855 fcport
->plogi_link
[QLT_PLOGI_LINK_SAME_WWN
] = NULL
;
856 if (fcport
->plogi_link
[QLT_PLOGI_LINK_CONFLICT
] == pla
)
857 fcport
->plogi_link
[QLT_PLOGI_LINK_CONFLICT
] = NULL
;
860 list_del(&pla
->list
);
861 kmem_cache_free(qla_tgt_plogi_cachep
, pla
);
865 qlt_plogi_ack_link(struct scsi_qla_host
*vha
, struct qlt_plogi_ack_t
*pla
,
866 struct fc_port
*sess
, enum qlt_plogi_link_t link
)
868 struct imm_ntfy_from_isp
*iocb
= &pla
->iocb
;
869 /* Inc ref_count first because link might already be pointing at pla */
872 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf097,
873 "Linking sess %p [%d] wwn %8phC with PLOGI ACK to wwn %8phC"
874 " s_id %02x:%02x:%02x, ref=%d pla %p link %d\n",
875 sess
, link
, sess
->port_name
,
876 iocb
->u
.isp24
.port_name
, iocb
->u
.isp24
.port_id
[2],
877 iocb
->u
.isp24
.port_id
[1], iocb
->u
.isp24
.port_id
[0],
878 pla
->ref_count
, pla
, link
);
880 if (link
== QLT_PLOGI_LINK_CONFLICT
) {
881 switch (sess
->disc_state
) {
883 case DSC_DELETE_PEND
:
891 if (sess
->plogi_link
[link
])
892 qlt_plogi_ack_unref(vha
, sess
->plogi_link
[link
]);
894 if (link
== QLT_PLOGI_LINK_SAME_WWN
)
897 sess
->plogi_link
[link
] = pla
;
901 /* These fields must be initialized by the caller */
904 * number of cmds dropped while we were waiting for
905 * initiator to ack LOGO initialize to 1 if LOGO is
906 * triggered by a command, otherwise, to 0
910 /* These fields are used by callee */
911 struct list_head list
;
915 qlt_send_first_logo(struct scsi_qla_host
*vha
, qlt_port_logo_t
*logo
)
917 qlt_port_logo_t
*tmp
;
920 mutex_lock(&vha
->vha_tgt
.tgt_mutex
);
922 list_for_each_entry(tmp
, &vha
->logo_list
, list
) {
923 if (tmp
->id
.b24
== logo
->id
.b24
) {
924 tmp
->cmd_count
+= logo
->cmd_count
;
925 mutex_unlock(&vha
->vha_tgt
.tgt_mutex
);
930 list_add_tail(&logo
->list
, &vha
->logo_list
);
932 mutex_unlock(&vha
->vha_tgt
.tgt_mutex
);
934 res
= qla24xx_els_dcmd_iocb(vha
, ELS_DCMD_LOGO
, logo
->id
);
936 mutex_lock(&vha
->vha_tgt
.tgt_mutex
);
937 list_del(&logo
->list
);
938 mutex_unlock(&vha
->vha_tgt
.tgt_mutex
);
940 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf098,
941 "Finished LOGO to %02x:%02x:%02x, dropped %d cmds, res = %#x\n",
942 logo
->id
.b
.domain
, logo
->id
.b
.area
, logo
->id
.b
.al_pa
,
943 logo
->cmd_count
, res
);
946 void qlt_free_session_done(struct work_struct
*work
)
948 struct fc_port
*sess
= container_of(work
, struct fc_port
,
950 struct qla_tgt
*tgt
= sess
->tgt
;
951 struct scsi_qla_host
*vha
= sess
->vha
;
952 struct qla_hw_data
*ha
= vha
->hw
;
954 bool logout_started
= false;
955 scsi_qla_host_t
*base_vha
= pci_get_drvdata(ha
->pdev
);
956 struct qlt_plogi_ack_t
*own
=
957 sess
->plogi_link
[QLT_PLOGI_LINK_SAME_WWN
];
959 ql_dbg(ql_dbg_disc
, vha
, 0xf084,
960 "%s: se_sess %p / sess %p from port %8phC loop_id %#04x"
961 " s_id %02x:%02x:%02x logout %d keep %d els_logo %d\n",
962 __func__
, sess
->se_sess
, sess
, sess
->port_name
, sess
->loop_id
,
963 sess
->d_id
.b
.domain
, sess
->d_id
.b
.area
, sess
->d_id
.b
.al_pa
,
964 sess
->logout_on_delete
, sess
->keep_nport_handle
,
965 sess
->send_els_logo
);
967 if (!IS_SW_RESV_ADDR(sess
->d_id
)) {
968 qla2x00_mark_device_lost(vha
, sess
, 0);
970 if (sess
->send_els_logo
) {
971 qlt_port_logo_t logo
;
973 logo
.id
= sess
->d_id
;
976 qlt_send_first_logo(vha
, &logo
);
977 sess
->send_els_logo
= 0;
980 if (sess
->logout_on_delete
&& sess
->loop_id
!= FC_NO_LOOP_ID
) {
985 (own
->iocb
.u
.isp24
.status_subcode
== ELS_PLOGI
))) {
986 rc
= qla2x00_post_async_logout_work(vha
, sess
,
988 if (rc
!= QLA_SUCCESS
)
989 ql_log(ql_log_warn
, vha
, 0xf085,
990 "Schedule logo failed sess %p rc %d\n",
993 logout_started
= true;
994 } else if (own
&& (own
->iocb
.u
.isp24
.status_subcode
==
995 ELS_PRLI
) && ha
->flags
.rida_fmt2
) {
996 rc
= qla2x00_post_async_prlo_work(vha
, sess
,
998 if (rc
!= QLA_SUCCESS
)
999 ql_log(ql_log_warn
, vha
, 0xf085,
1000 "Schedule PRLO failed sess %p rc %d\n",
1003 logout_started
= true;
1005 } /* if sess->logout_on_delete */
1007 if (sess
->nvme_flag
& NVME_FLAG_REGISTERED
&&
1008 !(sess
->nvme_flag
& NVME_FLAG_DELETING
)) {
1009 sess
->nvme_flag
|= NVME_FLAG_DELETING
;
1010 qla_nvme_unregister_remote_port(sess
);
1015 * Release the target session for FC Nexus from fabric module code.
1017 if (sess
->se_sess
!= NULL
)
1018 ha
->tgt
.tgt_ops
->free_session(sess
);
1020 if (logout_started
) {
1021 bool traced
= false;
1024 while (!READ_ONCE(sess
->logout_completed
)) {
1026 ql_dbg(ql_dbg_disc
, vha
, 0xf086,
1027 "%s: waiting for sess %p logout\n",
1037 ql_dbg(ql_dbg_disc
, vha
, 0xf087,
1038 "%s: sess %p logout completed\n", __func__
, sess
);
1041 if (sess
->logo_ack_needed
) {
1042 sess
->logo_ack_needed
= 0;
1043 qla24xx_async_notify_ack(vha
, sess
,
1044 (struct imm_ntfy_from_isp
*)sess
->iocb
, SRB_NACK_LOGO
);
1047 spin_lock_irqsave(&vha
->work_lock
, flags
);
1048 sess
->flags
&= ~FCF_ASYNC_SENT
;
1049 spin_unlock_irqrestore(&vha
->work_lock
, flags
);
1051 spin_lock_irqsave(&ha
->tgt
.sess_lock
, flags
);
1052 if (sess
->se_sess
) {
1053 sess
->se_sess
= NULL
;
1054 if (tgt
&& !IS_SW_RESV_ADDR(sess
->d_id
))
1058 qla2x00_set_fcport_disc_state(sess
, DSC_DELETED
);
1059 sess
->fw_login_state
= DSC_LS_PORT_UNAVAIL
;
1060 sess
->deleted
= QLA_SESS_DELETED
;
1062 if (sess
->login_succ
&& !IS_SW_RESV_ADDR(sess
->d_id
)) {
1063 vha
->fcport_count
--;
1064 sess
->login_succ
= 0;
1067 qla2x00_clear_loop_id(sess
);
1069 if (sess
->conflict
) {
1070 sess
->conflict
->login_pause
= 0;
1071 sess
->conflict
= NULL
;
1072 if (!test_bit(UNLOADING
, &vha
->dpc_flags
))
1073 set_bit(RELOGIN_NEEDED
, &vha
->dpc_flags
);
1077 struct qlt_plogi_ack_t
*con
=
1078 sess
->plogi_link
[QLT_PLOGI_LINK_CONFLICT
];
1079 struct imm_ntfy_from_isp
*iocb
;
1081 own
= sess
->plogi_link
[QLT_PLOGI_LINK_SAME_WWN
];
1085 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf099,
1086 "se_sess %p / sess %p port %8phC is gone,"
1087 " %s (ref=%d), releasing PLOGI for %8phC (ref=%d)\n",
1088 sess
->se_sess
, sess
, sess
->port_name
,
1089 own
? "releasing own PLOGI" : "no own PLOGI pending",
1090 own
? own
->ref_count
: -1,
1091 iocb
->u
.isp24
.port_name
, con
->ref_count
);
1092 qlt_plogi_ack_unref(vha
, con
);
1093 sess
->plogi_link
[QLT_PLOGI_LINK_CONFLICT
] = NULL
;
1095 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf09a,
1096 "se_sess %p / sess %p port %8phC is gone, %s (ref=%d)\n",
1097 sess
->se_sess
, sess
, sess
->port_name
,
1098 own
? "releasing own PLOGI" :
1099 "no own PLOGI pending",
1100 own
? own
->ref_count
: -1);
1104 sess
->fw_login_state
= DSC_LS_PLOGI_PEND
;
1105 qlt_plogi_ack_unref(vha
, own
);
1106 sess
->plogi_link
[QLT_PLOGI_LINK_SAME_WWN
] = NULL
;
1110 sess
->explicit_logout
= 0;
1111 spin_unlock_irqrestore(&ha
->tgt
.sess_lock
, flags
);
1112 sess
->free_pending
= 0;
1114 qla2x00_dfs_remove_rport(vha
, sess
);
1116 ql_dbg(ql_dbg_disc
, vha
, 0xf001,
1117 "Unregistration of sess %p %8phC finished fcp_cnt %d\n",
1118 sess
, sess
->port_name
, vha
->fcport_count
);
1120 if (tgt
&& (tgt
->sess_count
== 0))
1121 wake_up_all(&tgt
->waitQ
);
1123 if (!test_bit(PFLG_DRIVER_REMOVING
, &base_vha
->pci_flags
) &&
1124 !(vha
->vp_idx
&& test_bit(VPORT_DELETE
, &vha
->dpc_flags
)) &&
1125 (!tgt
|| !tgt
->tgt_stop
) && !LOOP_TRANSITION(vha
)) {
1126 switch (vha
->host
->active_mode
) {
1127 case MODE_INITIATOR
:
1129 set_bit(RELOGIN_NEEDED
, &vha
->dpc_flags
);
1130 qla2xxx_wake_dpc(vha
);
1139 if (vha
->fcport_count
== 0)
1140 wake_up_all(&vha
->fcport_waitQ
);
1143 /* ha->tgt.sess_lock supposed to be held on entry */
1144 void qlt_unreg_sess(struct fc_port
*sess
)
1146 struct scsi_qla_host
*vha
= sess
->vha
;
1147 unsigned long flags
;
1149 ql_dbg(ql_dbg_disc
, sess
->vha
, 0x210a,
1150 "%s sess %p for deletion %8phC\n",
1151 __func__
, sess
, sess
->port_name
);
1153 spin_lock_irqsave(&sess
->vha
->work_lock
, flags
);
1154 if (sess
->free_pending
) {
1155 spin_unlock_irqrestore(&sess
->vha
->work_lock
, flags
);
1158 sess
->free_pending
= 1;
1160 * Use FCF_ASYNC_SENT flag to block other cmds used in sess
1161 * management from being sent.
1163 sess
->flags
|= FCF_ASYNC_SENT
;
1164 spin_unlock_irqrestore(&sess
->vha
->work_lock
, flags
);
1167 vha
->hw
->tgt
.tgt_ops
->clear_nacl_from_fcport_map(sess
);
1169 sess
->deleted
= QLA_SESS_DELETION_IN_PROGRESS
;
1170 qla2x00_set_fcport_disc_state(sess
, DSC_DELETE_PEND
);
1171 sess
->last_rscn_gen
= sess
->rscn_gen
;
1172 sess
->last_login_gen
= sess
->login_gen
;
1174 queue_work(sess
->vha
->hw
->wq
, &sess
->free_work
);
1176 EXPORT_SYMBOL(qlt_unreg_sess
);
1178 static int qlt_reset(struct scsi_qla_host
*vha
, void *iocb
, int mcmd
)
1180 struct qla_hw_data
*ha
= vha
->hw
;
1181 struct fc_port
*sess
= NULL
;
1184 struct imm_ntfy_from_isp
*n
= (struct imm_ntfy_from_isp
*)iocb
;
1185 unsigned long flags
;
1187 loop_id
= le16_to_cpu(n
->u
.isp24
.nport_handle
);
1188 if (loop_id
== 0xFFFF) {
1190 atomic_inc(&vha
->vha_tgt
.qla_tgt
->tgt_global_resets_count
);
1191 spin_lock_irqsave(&ha
->tgt
.sess_lock
, flags
);
1192 qlt_clear_tgt_db(vha
->vha_tgt
.qla_tgt
);
1193 spin_unlock_irqrestore(&ha
->tgt
.sess_lock
, flags
);
1195 spin_lock_irqsave(&ha
->tgt
.sess_lock
, flags
);
1196 sess
= ha
->tgt
.tgt_ops
->find_sess_by_loop_id(vha
, loop_id
);
1197 spin_unlock_irqrestore(&ha
->tgt
.sess_lock
, flags
);
1200 ql_dbg(ql_dbg_tgt
, vha
, 0xe000,
1201 "Using sess for qla_tgt_reset: %p\n", sess
);
1207 ql_dbg(ql_dbg_tgt
, vha
, 0xe047,
1208 "scsi(%ld): resetting (session %p from port %8phC mcmd %x, "
1209 "loop_id %d)\n", vha
->host_no
, sess
, sess
->port_name
,
1212 return qlt_issue_task_mgmt(sess
, 0, mcmd
, iocb
, QLA24XX_MGMT_SEND_NACK
);
1215 static void qla24xx_chk_fcp_state(struct fc_port
*sess
)
1217 if (sess
->chip_reset
!= sess
->vha
->hw
->base_qpair
->chip_reset
) {
1218 sess
->logout_on_delete
= 0;
1219 sess
->logo_ack_needed
= 0;
1220 sess
->fw_login_state
= DSC_LS_PORT_UNAVAIL
;
1224 void qlt_schedule_sess_for_deletion(struct fc_port
*sess
)
1226 struct qla_tgt
*tgt
= sess
->tgt
;
1227 unsigned long flags
;
1230 switch (sess
->disc_state
) {
1231 case DSC_DELETE_PEND
:
1234 if (!sess
->plogi_link
[QLT_PLOGI_LINK_SAME_WWN
] &&
1235 !sess
->plogi_link
[QLT_PLOGI_LINK_CONFLICT
]) {
1236 if (tgt
&& tgt
->tgt_stop
&& tgt
->sess_count
== 0)
1237 wake_up_all(&tgt
->waitQ
);
1239 if (sess
->vha
->fcport_count
== 0)
1240 wake_up_all(&sess
->vha
->fcport_waitQ
);
1244 case DSC_UPD_FCPORT
:
1246 * This port is not done reporting to upper layer.
1249 sess
->next_disc_state
= DSC_DELETE_PEND
;
1250 sec
= jiffies_to_msecs(jiffies
-
1251 sess
->jiffies_at_registration
)/1000;
1252 if (sess
->sec_since_registration
< sec
&& sec
&& !(sec
% 5)) {
1253 sess
->sec_since_registration
= sec
;
1254 ql_dbg(ql_dbg_disc
, sess
->vha
, 0xffff,
1255 "%s %8phC : Slow Rport registration(%d Sec)\n",
1256 __func__
, sess
->port_name
, sec
);
1263 spin_lock_irqsave(&sess
->vha
->work_lock
, flags
);
1264 if (sess
->deleted
== QLA_SESS_DELETION_IN_PROGRESS
) {
1265 spin_unlock_irqrestore(&sess
->vha
->work_lock
, flags
);
1268 sess
->deleted
= QLA_SESS_DELETION_IN_PROGRESS
;
1269 spin_unlock_irqrestore(&sess
->vha
->work_lock
, flags
);
1271 sess
->prli_pend_timer
= 0;
1272 qla2x00_set_fcport_disc_state(sess
, DSC_DELETE_PEND
);
1274 qla24xx_chk_fcp_state(sess
);
1276 ql_dbg(ql_log_warn
, sess
->vha
, 0xe001,
1277 "Scheduling sess %p for deletion %8phC\n",
1278 sess
, sess
->port_name
);
1280 WARN_ON(!queue_work(sess
->vha
->hw
->wq
, &sess
->del_work
));
1283 static void qlt_clear_tgt_db(struct qla_tgt
*tgt
)
1285 struct fc_port
*sess
;
1286 scsi_qla_host_t
*vha
= tgt
->vha
;
1288 list_for_each_entry(sess
, &vha
->vp_fcports
, list
) {
1290 qlt_schedule_sess_for_deletion(sess
);
1293 /* At this point tgt could be already dead */
1296 static int qla24xx_get_loop_id(struct scsi_qla_host
*vha
, be_id_t s_id
,
1299 struct qla_hw_data
*ha
= vha
->hw
;
1300 dma_addr_t gid_list_dma
;
1301 struct gid_list_info
*gid_list
, *gid
;
1305 gid_list
= dma_alloc_coherent(&ha
->pdev
->dev
, qla2x00_gid_list_size(ha
),
1306 &gid_list_dma
, GFP_KERNEL
);
1308 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf044,
1309 "qla_target(%d): DMA Alloc failed of %u\n",
1310 vha
->vp_idx
, qla2x00_gid_list_size(ha
));
1314 /* Get list of logged in devices */
1315 rc
= qla24xx_gidlist_wait(vha
, gid_list
, gid_list_dma
, &entries
);
1316 if (rc
!= QLA_SUCCESS
) {
1317 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf045,
1318 "qla_target(%d): get_id_list() failed: %x\n",
1321 goto out_free_id_list
;
1326 for (i
= 0; i
< entries
; i
++) {
1327 if (gid
->al_pa
== s_id
.al_pa
&&
1328 gid
->area
== s_id
.area
&&
1329 gid
->domain
== s_id
.domain
) {
1330 *loop_id
= le16_to_cpu(gid
->loop_id
);
1334 gid
= (void *)gid
+ ha
->gid_list_info_size
;
1338 dma_free_coherent(&ha
->pdev
->dev
, qla2x00_gid_list_size(ha
),
1339 gid_list
, gid_list_dma
);
1344 * Adds an extra ref to allow to drop hw lock after adding sess to the list.
1345 * Caller must put it.
1347 static struct fc_port
*qlt_create_sess(
1348 struct scsi_qla_host
*vha
,
1352 struct qla_hw_data
*ha
= vha
->hw
;
1353 struct fc_port
*sess
= fcport
;
1354 unsigned long flags
;
1356 if (vha
->vha_tgt
.qla_tgt
->tgt_stop
)
1359 if (fcport
->se_sess
) {
1360 if (!kref_get_unless_zero(&sess
->sess_kref
)) {
1361 ql_dbg(ql_dbg_disc
, vha
, 0x20f6,
1362 "%s: kref_get_unless_zero failed for %8phC\n",
1363 __func__
, sess
->port_name
);
1368 sess
->tgt
= vha
->vha_tgt
.qla_tgt
;
1369 sess
->local
= local
;
1372 * Under normal circumstances we want to logout from firmware when
1373 * session eventually ends and release corresponding nport handle.
1374 * In the exception cases (e.g. when new PLOGI is waiting) corresponding
1375 * code will adjust these flags as necessary.
1377 sess
->logout_on_delete
= 1;
1378 sess
->keep_nport_handle
= 0;
1379 sess
->logout_completed
= 0;
1381 if (ha
->tgt
.tgt_ops
->check_initiator_node_acl(vha
,
1382 &fcport
->port_name
[0], sess
) < 0) {
1383 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf015,
1384 "(%d) %8phC check_initiator_node_acl failed\n",
1385 vha
->vp_idx
, fcport
->port_name
);
1388 kref_init(&fcport
->sess_kref
);
1390 * Take an extra reference to ->sess_kref here to handle
1391 * fc_port access across ->tgt.sess_lock reaquire.
1393 if (!kref_get_unless_zero(&sess
->sess_kref
)) {
1394 ql_dbg(ql_dbg_disc
, vha
, 0x20f7,
1395 "%s: kref_get_unless_zero failed for %8phC\n",
1396 __func__
, sess
->port_name
);
1400 spin_lock_irqsave(&ha
->tgt
.sess_lock
, flags
);
1401 if (!IS_SW_RESV_ADDR(sess
->d_id
))
1402 vha
->vha_tgt
.qla_tgt
->sess_count
++;
1404 qlt_do_generation_tick(vha
, &sess
->generation
);
1405 spin_unlock_irqrestore(&ha
->tgt
.sess_lock
, flags
);
1408 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf006,
1409 "Adding sess %p se_sess %p to tgt %p sess_count %d\n",
1410 sess
, sess
->se_sess
, vha
->vha_tgt
.qla_tgt
,
1411 vha
->vha_tgt
.qla_tgt
->sess_count
);
1413 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf04b,
1414 "qla_target(%d): %ssession for wwn %8phC (loop_id %d, "
1415 "s_id %x:%x:%x, confirmed completion %ssupported) added\n",
1416 vha
->vp_idx
, local
? "local " : "", fcport
->port_name
,
1417 fcport
->loop_id
, sess
->d_id
.b
.domain
, sess
->d_id
.b
.area
,
1418 sess
->d_id
.b
.al_pa
, sess
->conf_compl_supported
? "" : "not ");
1424 * max_gen - specifies maximum session generation
1425 * at which this deletion requestion is still valid
1428 qlt_fc_port_deleted(struct scsi_qla_host
*vha
, fc_port_t
*fcport
, int max_gen
)
1430 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
1431 struct fc_port
*sess
= fcport
;
1432 unsigned long flags
;
1434 if (!vha
->hw
->tgt
.tgt_ops
)
1440 spin_lock_irqsave(&vha
->hw
->tgt
.sess_lock
, flags
);
1441 if (tgt
->tgt_stop
) {
1442 spin_unlock_irqrestore(&vha
->hw
->tgt
.sess_lock
, flags
);
1445 if (!sess
->se_sess
) {
1446 spin_unlock_irqrestore(&vha
->hw
->tgt
.sess_lock
, flags
);
1450 if (max_gen
- sess
->generation
< 0) {
1451 spin_unlock_irqrestore(&vha
->hw
->tgt
.sess_lock
, flags
);
1452 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf092,
1453 "Ignoring stale deletion request for se_sess %p / sess %p"
1454 " for port %8phC, req_gen %d, sess_gen %d\n",
1455 sess
->se_sess
, sess
, sess
->port_name
, max_gen
,
1460 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf008, "qla_tgt_fc_port_deleted %p", sess
);
1463 spin_unlock_irqrestore(&vha
->hw
->tgt
.sess_lock
, flags
);
1464 qlt_schedule_sess_for_deletion(sess
);
1467 static inline int test_tgt_sess_count(struct qla_tgt
*tgt
)
1469 struct qla_hw_data
*ha
= tgt
->ha
;
1470 unsigned long flags
;
1473 * We need to protect against race, when tgt is freed before or
1476 spin_lock_irqsave(&ha
->tgt
.sess_lock
, flags
);
1477 ql_dbg(ql_dbg_tgt
, tgt
->vha
, 0xe002,
1478 "tgt %p, sess_count=%d\n",
1479 tgt
, tgt
->sess_count
);
1480 res
= (tgt
->sess_count
== 0);
1481 spin_unlock_irqrestore(&ha
->tgt
.sess_lock
, flags
);
1486 /* Called by tcm_qla2xxx configfs code */
1487 int qlt_stop_phase1(struct qla_tgt
*tgt
)
1489 struct scsi_qla_host
*vha
= tgt
->vha
;
1490 struct qla_hw_data
*ha
= tgt
->ha
;
1491 unsigned long flags
;
1493 mutex_lock(&ha
->optrom_mutex
);
1494 mutex_lock(&qla_tgt_mutex
);
1496 if (tgt
->tgt_stop
|| tgt
->tgt_stopped
) {
1497 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf04e,
1498 "Already in tgt->tgt_stop or tgt_stopped state\n");
1499 mutex_unlock(&qla_tgt_mutex
);
1500 mutex_unlock(&ha
->optrom_mutex
);
1504 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xe003, "Stopping target for host %ld(%p)\n",
1507 * Mutex needed to sync with qla_tgt_fc_port_[added,deleted].
1508 * Lock is needed, because we still can get an incoming packet.
1510 mutex_lock(&vha
->vha_tgt
.tgt_mutex
);
1512 qlt_clear_tgt_db(tgt
);
1513 mutex_unlock(&vha
->vha_tgt
.tgt_mutex
);
1514 mutex_unlock(&qla_tgt_mutex
);
1516 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf009,
1517 "Waiting for sess works (tgt %p)", tgt
);
1518 spin_lock_irqsave(&tgt
->sess_work_lock
, flags
);
1519 while (!list_empty(&tgt
->sess_works_list
)) {
1520 spin_unlock_irqrestore(&tgt
->sess_work_lock
, flags
);
1521 flush_scheduled_work();
1522 spin_lock_irqsave(&tgt
->sess_work_lock
, flags
);
1524 spin_unlock_irqrestore(&tgt
->sess_work_lock
, flags
);
1526 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf00a,
1527 "Waiting for tgt %p: sess_count=%d\n", tgt
, tgt
->sess_count
);
1529 wait_event_timeout(tgt
->waitQ
, test_tgt_sess_count(tgt
), 10*HZ
);
1532 if (!ha
->flags
.host_shutting_down
&&
1533 (qla_tgt_mode_enabled(vha
) || qla_dual_mode_enabled(vha
)))
1534 qlt_disable_vha(vha
);
1536 /* Wait for sessions to clear out (just in case) */
1537 wait_event_timeout(tgt
->waitQ
, test_tgt_sess_count(tgt
), 10*HZ
);
1538 mutex_unlock(&ha
->optrom_mutex
);
1542 EXPORT_SYMBOL(qlt_stop_phase1
);
1544 /* Called by tcm_qla2xxx configfs code */
1545 void qlt_stop_phase2(struct qla_tgt
*tgt
)
1547 scsi_qla_host_t
*vha
= tgt
->vha
;
1549 if (tgt
->tgt_stopped
) {
1550 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf04f,
1551 "Already in tgt->tgt_stopped state\n");
1555 if (!tgt
->tgt_stop
) {
1556 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf00b,
1557 "%s: phase1 stop is not completed\n", __func__
);
1562 mutex_lock(&vha
->vha_tgt
.tgt_mutex
);
1564 tgt
->tgt_stopped
= 1;
1565 mutex_unlock(&vha
->vha_tgt
.tgt_mutex
);
1567 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf00c, "Stop of tgt %p finished\n",
1570 switch (vha
->qlini_mode
) {
1571 case QLA2XXX_INI_MODE_EXCLUSIVE
:
1572 vha
->flags
.online
= 1;
1573 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
1579 EXPORT_SYMBOL(qlt_stop_phase2
);
1581 /* Called from qlt_remove_target() -> qla2x00_remove_one() */
1582 static void qlt_release(struct qla_tgt
*tgt
)
1584 scsi_qla_host_t
*vha
= tgt
->vha
;
1588 struct qla_qpair_hint
*h
;
1589 struct qla_hw_data
*ha
= vha
->hw
;
1591 if (!tgt
->tgt_stop
&& !tgt
->tgt_stopped
)
1592 qlt_stop_phase1(tgt
);
1594 if (!tgt
->tgt_stopped
)
1595 qlt_stop_phase2(tgt
);
1597 for (i
= 0; i
< vha
->hw
->max_qpairs
+ 1; i
++) {
1598 unsigned long flags
;
1600 h
= &tgt
->qphints
[i
];
1602 spin_lock_irqsave(h
->qpair
->qp_lock_ptr
, flags
);
1603 list_del(&h
->hint_elem
);
1604 spin_unlock_irqrestore(h
->qpair
->qp_lock_ptr
, flags
);
1608 kfree(tgt
->qphints
);
1609 mutex_lock(&qla_tgt_mutex
);
1610 list_del(&vha
->vha_tgt
.qla_tgt
->tgt_list_entry
);
1611 mutex_unlock(&qla_tgt_mutex
);
1613 btree_for_each_safe64(&tgt
->lun_qpair_map
, key
, node
)
1614 btree_remove64(&tgt
->lun_qpair_map
, key
);
1616 btree_destroy64(&tgt
->lun_qpair_map
);
1619 if (ha
->tgt
.tgt_ops
&&
1620 ha
->tgt
.tgt_ops
->remove_target
&&
1621 vha
->vha_tgt
.target_lport_ptr
)
1622 ha
->tgt
.tgt_ops
->remove_target(vha
);
1624 vha
->vha_tgt
.qla_tgt
= NULL
;
1626 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf00d,
1627 "Release of tgt %p finished\n", tgt
);
1632 /* ha->hardware_lock supposed to be held on entry */
1633 static int qlt_sched_sess_work(struct qla_tgt
*tgt
, int type
,
1634 const void *param
, unsigned int param_size
)
1636 struct qla_tgt_sess_work_param
*prm
;
1637 unsigned long flags
;
1639 prm
= kzalloc(sizeof(*prm
), GFP_ATOMIC
);
1641 ql_dbg(ql_dbg_tgt_mgt
, tgt
->vha
, 0xf050,
1642 "qla_target(%d): Unable to create session "
1643 "work, command will be refused", 0);
1647 ql_dbg(ql_dbg_tgt_mgt
, tgt
->vha
, 0xf00e,
1648 "Scheduling work (type %d, prm %p)"
1649 " to find session for param %p (size %d, tgt %p)\n",
1650 type
, prm
, param
, param_size
, tgt
);
1653 memcpy(&prm
->tm_iocb
, param
, param_size
);
1655 spin_lock_irqsave(&tgt
->sess_work_lock
, flags
);
1656 list_add_tail(&prm
->sess_works_list_entry
, &tgt
->sess_works_list
);
1657 spin_unlock_irqrestore(&tgt
->sess_work_lock
, flags
);
1659 schedule_work(&tgt
->sess_work
);
1665 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1667 static void qlt_send_notify_ack(struct qla_qpair
*qpair
,
1668 struct imm_ntfy_from_isp
*ntfy
,
1669 uint32_t add_flags
, uint16_t resp_code
, int resp_code_valid
,
1670 uint16_t srr_flags
, uint16_t srr_reject_code
, uint8_t srr_explan
)
1672 struct scsi_qla_host
*vha
= qpair
->vha
;
1673 struct qla_hw_data
*ha
= vha
->hw
;
1675 struct nack_to_isp
*nack
;
1677 if (!ha
->flags
.fw_started
)
1680 ql_dbg(ql_dbg_tgt
, vha
, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha
);
1682 pkt
= (request_t
*)__qla2x00_alloc_iocbs(qpair
, NULL
);
1684 ql_dbg(ql_dbg_tgt
, vha
, 0xe049,
1685 "qla_target(%d): %s failed: unable to allocate "
1686 "request packet\n", vha
->vp_idx
, __func__
);
1690 if (vha
->vha_tgt
.qla_tgt
!= NULL
)
1691 vha
->vha_tgt
.qla_tgt
->notify_ack_expected
++;
1693 pkt
->entry_type
= NOTIFY_ACK_TYPE
;
1694 pkt
->entry_count
= 1;
1696 nack
= (struct nack_to_isp
*)pkt
;
1697 nack
->ox_id
= ntfy
->ox_id
;
1699 nack
->u
.isp24
.handle
= QLA_TGT_SKIP_HANDLE
;
1700 nack
->u
.isp24
.nport_handle
= ntfy
->u
.isp24
.nport_handle
;
1701 if (le16_to_cpu(ntfy
->u
.isp24
.status
) == IMM_NTFY_ELS
) {
1702 nack
->u
.isp24
.flags
= ntfy
->u
.isp24
.flags
&
1703 cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB
);
1705 nack
->u
.isp24
.srr_rx_id
= ntfy
->u
.isp24
.srr_rx_id
;
1706 nack
->u
.isp24
.status
= ntfy
->u
.isp24
.status
;
1707 nack
->u
.isp24
.status_subcode
= ntfy
->u
.isp24
.status_subcode
;
1708 nack
->u
.isp24
.fw_handle
= ntfy
->u
.isp24
.fw_handle
;
1709 nack
->u
.isp24
.exchange_address
= ntfy
->u
.isp24
.exchange_address
;
1710 nack
->u
.isp24
.srr_rel_offs
= ntfy
->u
.isp24
.srr_rel_offs
;
1711 nack
->u
.isp24
.srr_ui
= ntfy
->u
.isp24
.srr_ui
;
1712 nack
->u
.isp24
.srr_flags
= cpu_to_le16(srr_flags
);
1713 nack
->u
.isp24
.srr_reject_code
= srr_reject_code
;
1714 nack
->u
.isp24
.srr_reject_code_expl
= srr_explan
;
1715 nack
->u
.isp24
.vp_index
= ntfy
->u
.isp24
.vp_index
;
1717 ql_dbg(ql_dbg_tgt
, vha
, 0xe005,
1718 "qla_target(%d): Sending 24xx Notify Ack %d\n",
1719 vha
->vp_idx
, nack
->u
.isp24
.status
);
1721 /* Memory Barrier */
1723 qla2x00_start_iocbs(vha
, qpair
->req
);
1726 static int qlt_build_abts_resp_iocb(struct qla_tgt_mgmt_cmd
*mcmd
)
1728 struct scsi_qla_host
*vha
= mcmd
->vha
;
1729 struct qla_hw_data
*ha
= vha
->hw
;
1730 struct abts_resp_to_24xx
*resp
;
1735 struct abts_recv_from_24xx
*abts
= &mcmd
->orig_iocb
.abts
;
1736 struct qla_qpair
*qpair
= mcmd
->qpair
;
1738 ql_dbg(ql_dbg_tgt
, vha
, 0xe006,
1739 "Sending task mgmt ABTS response (ha=%p, status=%x)\n",
1740 ha
, mcmd
->fc_tm_rsp
);
1742 rc
= qlt_check_reserve_free_req(qpair
, 1);
1744 ql_dbg(ql_dbg_tgt
, vha
, 0xe04a,
1745 "qla_target(%d): %s failed: unable to allocate request packet\n",
1746 vha
->vp_idx
, __func__
);
1750 resp
= (struct abts_resp_to_24xx
*)qpair
->req
->ring_ptr
;
1751 memset(resp
, 0, sizeof(*resp
));
1753 h
= qlt_make_handle(qpair
);
1754 if (unlikely(h
== QLA_TGT_NULL_HANDLE
)) {
1756 * CTIO type 7 from the firmware doesn't provide a way to
1757 * know the initiator's LOOP ID, hence we can't find
1758 * the session and, so, the command.
1762 qpair
->req
->outstanding_cmds
[h
] = (srb_t
*)mcmd
;
1765 resp
->handle
= make_handle(qpair
->req
->id
, h
);
1766 resp
->entry_type
= ABTS_RESP_24XX
;
1767 resp
->entry_count
= 1;
1768 resp
->nport_handle
= abts
->nport_handle
;
1769 resp
->vp_index
= vha
->vp_idx
;
1770 resp
->sof_type
= abts
->sof_type
;
1771 resp
->exchange_address
= abts
->exchange_address
;
1772 resp
->fcp_hdr_le
= abts
->fcp_hdr_le
;
1773 f_ctl
= cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP
|
1774 F_CTL_LAST_SEQ
| F_CTL_END_SEQ
|
1775 F_CTL_SEQ_INITIATIVE
);
1776 p
= (uint8_t *)&f_ctl
;
1777 resp
->fcp_hdr_le
.f_ctl
[0] = *p
++;
1778 resp
->fcp_hdr_le
.f_ctl
[1] = *p
++;
1779 resp
->fcp_hdr_le
.f_ctl
[2] = *p
;
1781 resp
->fcp_hdr_le
.d_id
= abts
->fcp_hdr_le
.s_id
;
1782 resp
->fcp_hdr_le
.s_id
= abts
->fcp_hdr_le
.d_id
;
1784 resp
->exchange_addr_to_abort
= abts
->exchange_addr_to_abort
;
1785 if (mcmd
->fc_tm_rsp
== FCP_TMF_CMPL
) {
1786 resp
->fcp_hdr_le
.r_ctl
= R_CTL_BASIC_LINK_SERV
| R_CTL_B_ACC
;
1787 resp
->payload
.ba_acct
.seq_id_valid
= SEQ_ID_INVALID
;
1788 resp
->payload
.ba_acct
.low_seq_cnt
= 0x0000;
1789 resp
->payload
.ba_acct
.high_seq_cnt
= cpu_to_le16(0xFFFF);
1790 resp
->payload
.ba_acct
.ox_id
= abts
->fcp_hdr_le
.ox_id
;
1791 resp
->payload
.ba_acct
.rx_id
= abts
->fcp_hdr_le
.rx_id
;
1793 resp
->fcp_hdr_le
.r_ctl
= R_CTL_BASIC_LINK_SERV
| R_CTL_B_RJT
;
1794 resp
->payload
.ba_rjt
.reason_code
=
1795 BA_RJT_REASON_CODE_UNABLE_TO_PERFORM
;
1796 /* Other bytes are zero */
1799 vha
->vha_tgt
.qla_tgt
->abts_resp_expected
++;
1801 /* Memory Barrier */
1803 if (qpair
->reqq_start_iocbs
)
1804 qpair
->reqq_start_iocbs(qpair
);
1806 qla2x00_start_iocbs(vha
, qpair
->req
);
1812 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1814 static void qlt_24xx_send_abts_resp(struct qla_qpair
*qpair
,
1815 struct abts_recv_from_24xx
*abts
, uint32_t status
,
1818 struct scsi_qla_host
*vha
= qpair
->vha
;
1819 struct qla_hw_data
*ha
= vha
->hw
;
1820 struct abts_resp_to_24xx
*resp
;
1824 ql_dbg(ql_dbg_tgt
, vha
, 0xe006,
1825 "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n",
1828 resp
= (struct abts_resp_to_24xx
*)qla2x00_alloc_iocbs_ready(qpair
,
1831 ql_dbg(ql_dbg_tgt
, vha
, 0xe04a,
1832 "qla_target(%d): %s failed: unable to allocate "
1833 "request packet", vha
->vp_idx
, __func__
);
1837 resp
->entry_type
= ABTS_RESP_24XX
;
1838 resp
->handle
= QLA_TGT_SKIP_HANDLE
;
1839 resp
->entry_count
= 1;
1840 resp
->nport_handle
= abts
->nport_handle
;
1841 resp
->vp_index
= vha
->vp_idx
;
1842 resp
->sof_type
= abts
->sof_type
;
1843 resp
->exchange_address
= abts
->exchange_address
;
1844 resp
->fcp_hdr_le
= abts
->fcp_hdr_le
;
1845 f_ctl
= cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP
|
1846 F_CTL_LAST_SEQ
| F_CTL_END_SEQ
|
1847 F_CTL_SEQ_INITIATIVE
);
1848 p
= (uint8_t *)&f_ctl
;
1849 resp
->fcp_hdr_le
.f_ctl
[0] = *p
++;
1850 resp
->fcp_hdr_le
.f_ctl
[1] = *p
++;
1851 resp
->fcp_hdr_le
.f_ctl
[2] = *p
;
1853 resp
->fcp_hdr_le
.d_id
= abts
->fcp_hdr_le
.d_id
;
1854 resp
->fcp_hdr_le
.s_id
= abts
->fcp_hdr_le
.s_id
;
1856 resp
->fcp_hdr_le
.d_id
= abts
->fcp_hdr_le
.s_id
;
1857 resp
->fcp_hdr_le
.s_id
= abts
->fcp_hdr_le
.d_id
;
1859 resp
->exchange_addr_to_abort
= abts
->exchange_addr_to_abort
;
1860 if (status
== FCP_TMF_CMPL
) {
1861 resp
->fcp_hdr_le
.r_ctl
= R_CTL_BASIC_LINK_SERV
| R_CTL_B_ACC
;
1862 resp
->payload
.ba_acct
.seq_id_valid
= SEQ_ID_INVALID
;
1863 resp
->payload
.ba_acct
.low_seq_cnt
= 0x0000;
1864 resp
->payload
.ba_acct
.high_seq_cnt
= cpu_to_le16(0xFFFF);
1865 resp
->payload
.ba_acct
.ox_id
= abts
->fcp_hdr_le
.ox_id
;
1866 resp
->payload
.ba_acct
.rx_id
= abts
->fcp_hdr_le
.rx_id
;
1868 resp
->fcp_hdr_le
.r_ctl
= R_CTL_BASIC_LINK_SERV
| R_CTL_B_RJT
;
1869 resp
->payload
.ba_rjt
.reason_code
=
1870 BA_RJT_REASON_CODE_UNABLE_TO_PERFORM
;
1871 /* Other bytes are zero */
1874 vha
->vha_tgt
.qla_tgt
->abts_resp_expected
++;
1876 /* Memory Barrier */
1878 if (qpair
->reqq_start_iocbs
)
1879 qpair
->reqq_start_iocbs(qpair
);
1881 qla2x00_start_iocbs(vha
, qpair
->req
);
1885 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1887 static void qlt_24xx_retry_term_exchange(struct scsi_qla_host
*vha
,
1888 struct qla_qpair
*qpair
, response_t
*pkt
, struct qla_tgt_mgmt_cmd
*mcmd
)
1890 struct ctio7_to_24xx
*ctio
;
1892 struct abts_recv_from_24xx
*entry
;
1894 ctio
= (struct ctio7_to_24xx
*)qla2x00_alloc_iocbs_ready(qpair
, NULL
);
1896 ql_dbg(ql_dbg_tgt
, vha
, 0xe04b,
1897 "qla_target(%d): %s failed: unable to allocate "
1898 "request packet\n", vha
->vp_idx
, __func__
);
1903 /* abts from remote port */
1904 entry
= &mcmd
->orig_iocb
.abts
;
1906 /* abts from this driver. */
1907 entry
= (struct abts_recv_from_24xx
*)pkt
;
1910 * We've got on entrance firmware's response on by us generated
1911 * ABTS response. So, in it ID fields are reversed.
1914 ctio
->entry_type
= CTIO_TYPE7
;
1915 ctio
->entry_count
= 1;
1916 ctio
->nport_handle
= entry
->nport_handle
;
1917 ctio
->handle
= QLA_TGT_SKIP_HANDLE
| CTIO_COMPLETION_HANDLE_MARK
;
1918 ctio
->timeout
= cpu_to_le16(QLA_TGT_TIMEOUT
);
1919 ctio
->vp_index
= vha
->vp_idx
;
1920 ctio
->exchange_addr
= entry
->exchange_addr_to_abort
;
1921 tmp
= (CTIO7_FLAGS_STATUS_MODE_1
| CTIO7_FLAGS_TERMINATE
);
1924 ctio
->initiator_id
= entry
->fcp_hdr_le
.s_id
;
1926 if (mcmd
->flags
& QLA24XX_MGMT_ABORT_IO_ATTR_VALID
)
1927 tmp
|= (mcmd
->abort_io_attr
<< 9);
1928 else if (qpair
->retry_term_cnt
& 1)
1931 ctio
->initiator_id
= entry
->fcp_hdr_le
.d_id
;
1933 if (qpair
->retry_term_cnt
& 1)
1936 ctio
->u
.status1
.flags
= cpu_to_le16(tmp
);
1937 ctio
->u
.status1
.ox_id
= entry
->fcp_hdr_le
.ox_id
;
1939 ql_dbg(ql_dbg_tgt
, vha
, 0xe007,
1940 "Sending retry TERM EXCH CTIO7 flags %04xh oxid %04xh attr valid %x\n",
1941 le16_to_cpu(ctio
->u
.status1
.flags
),
1942 le16_to_cpu(ctio
->u
.status1
.ox_id
),
1943 (mcmd
&& mcmd
->flags
& QLA24XX_MGMT_ABORT_IO_ATTR_VALID
) ? 1 : 0);
1945 /* Memory Barrier */
1947 if (qpair
->reqq_start_iocbs
)
1948 qpair
->reqq_start_iocbs(qpair
);
1950 qla2x00_start_iocbs(vha
, qpair
->req
);
1953 qlt_build_abts_resp_iocb(mcmd
);
1955 qlt_24xx_send_abts_resp(qpair
,
1956 (struct abts_recv_from_24xx
*)entry
, FCP_TMF_CMPL
, true);
1960 /* drop cmds for the given lun
1961 * XXX only looks for cmds on the port through which lun reset was recieved
1962 * XXX does not go through the list of other port (which may have cmds
1965 static void abort_cmds_for_lun(struct scsi_qla_host
*vha
, u64 lun
, be_id_t s_id
)
1967 struct qla_tgt_sess_op
*op
;
1968 struct qla_tgt_cmd
*cmd
;
1970 unsigned long flags
;
1972 key
= sid_to_key(s_id
);
1973 spin_lock_irqsave(&vha
->cmd_list_lock
, flags
);
1974 list_for_each_entry(op
, &vha
->qla_sess_op_cmd_list
, cmd_list
) {
1978 op_key
= sid_to_key(op
->atio
.u
.isp24
.fcp_hdr
.s_id
);
1979 op_lun
= scsilun_to_int(
1980 (struct scsi_lun
*)&op
->atio
.u
.isp24
.fcp_cmnd
.lun
);
1981 if (op_key
== key
&& op_lun
== lun
)
1985 list_for_each_entry(op
, &vha
->unknown_atio_list
, cmd_list
) {
1989 op_key
= sid_to_key(op
->atio
.u
.isp24
.fcp_hdr
.s_id
);
1990 op_lun
= scsilun_to_int(
1991 (struct scsi_lun
*)&op
->atio
.u
.isp24
.fcp_cmnd
.lun
);
1992 if (op_key
== key
&& op_lun
== lun
)
1996 list_for_each_entry(cmd
, &vha
->qla_cmd_list
, cmd_list
) {
2000 cmd_key
= sid_to_key(cmd
->atio
.u
.isp24
.fcp_hdr
.s_id
);
2001 cmd_lun
= scsilun_to_int(
2002 (struct scsi_lun
*)&cmd
->atio
.u
.isp24
.fcp_cmnd
.lun
);
2003 if (cmd_key
== key
&& cmd_lun
== lun
)
2006 spin_unlock_irqrestore(&vha
->cmd_list_lock
, flags
);
2009 static struct qla_qpair_hint
*qlt_find_qphint(struct scsi_qla_host
*vha
,
2010 uint64_t unpacked_lun
)
2012 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
2013 struct qla_qpair_hint
*h
= NULL
;
2015 if (vha
->flags
.qpairs_available
) {
2016 h
= btree_lookup64(&tgt
->lun_qpair_map
, unpacked_lun
);
2018 h
= &tgt
->qphints
[0];
2020 h
= &tgt
->qphints
[0];
2026 static void qlt_do_tmr_work(struct work_struct
*work
)
2028 struct qla_tgt_mgmt_cmd
*mcmd
=
2029 container_of(work
, struct qla_tgt_mgmt_cmd
, work
);
2030 struct qla_hw_data
*ha
= mcmd
->vha
->hw
;
2033 unsigned long flags
;
2035 switch (mcmd
->tmr_func
) {
2037 tag
= le32_to_cpu(mcmd
->orig_iocb
.abts
.exchange_addr_to_abort
);
2044 rc
= ha
->tgt
.tgt_ops
->handle_tmr(mcmd
, mcmd
->unpacked_lun
,
2045 mcmd
->tmr_func
, tag
);
2048 spin_lock_irqsave(mcmd
->qpair
->qp_lock_ptr
, flags
);
2049 switch (mcmd
->tmr_func
) {
2051 mcmd
->fc_tm_rsp
= FCP_TMF_REJECTED
;
2052 qlt_build_abts_resp_iocb(mcmd
);
2054 case QLA_TGT_LUN_RESET
:
2055 case QLA_TGT_CLEAR_TS
:
2056 case QLA_TGT_ABORT_TS
:
2057 case QLA_TGT_CLEAR_ACA
:
2058 case QLA_TGT_TARGET_RESET
:
2059 qlt_send_busy(mcmd
->qpair
, &mcmd
->orig_iocb
.atio
,
2063 case QLA_TGT_ABORT_ALL
:
2064 case QLA_TGT_NEXUS_LOSS_SESS
:
2065 case QLA_TGT_NEXUS_LOSS
:
2066 qlt_send_notify_ack(mcmd
->qpair
,
2067 &mcmd
->orig_iocb
.imm_ntfy
, 0, 0, 0, 0, 0, 0);
2070 spin_unlock_irqrestore(mcmd
->qpair
->qp_lock_ptr
, flags
);
2072 ql_dbg(ql_dbg_tgt_mgt
, mcmd
->vha
, 0xf052,
2073 "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n",
2074 mcmd
->vha
->vp_idx
, rc
);
2075 mempool_free(mcmd
, qla_tgt_mgmt_cmd_mempool
);
2079 /* ha->hardware_lock supposed to be held on entry */
2080 static int __qlt_24xx_handle_abts(struct scsi_qla_host
*vha
,
2081 struct abts_recv_from_24xx
*abts
, struct fc_port
*sess
)
2083 struct qla_hw_data
*ha
= vha
->hw
;
2084 struct qla_tgt_mgmt_cmd
*mcmd
;
2085 struct qla_qpair_hint
*h
= &vha
->vha_tgt
.qla_tgt
->qphints
[0];
2086 struct qla_tgt_cmd
*abort_cmd
;
2088 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf00f,
2089 "qla_target(%d): task abort (tag=%d)\n",
2090 vha
->vp_idx
, abts
->exchange_addr_to_abort
);
2092 mcmd
= mempool_alloc(qla_tgt_mgmt_cmd_mempool
, GFP_ATOMIC
);
2094 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf051,
2095 "qla_target(%d): %s: Allocation of ABORT cmd failed",
2096 vha
->vp_idx
, __func__
);
2099 memset(mcmd
, 0, sizeof(*mcmd
));
2100 mcmd
->cmd_type
= TYPE_TGT_TMCMD
;
2102 memcpy(&mcmd
->orig_iocb
.abts
, abts
, sizeof(mcmd
->orig_iocb
.abts
));
2103 mcmd
->reset_count
= ha
->base_qpair
->chip_reset
;
2104 mcmd
->tmr_func
= QLA_TGT_ABTS
;
2105 mcmd
->qpair
= h
->qpair
;
2109 * LUN is looked up by target-core internally based on the passed
2110 * abts->exchange_addr_to_abort tag.
2112 mcmd
->se_cmd
.cpuid
= h
->cpuid
;
2114 abort_cmd
= ha
->tgt
.tgt_ops
->find_cmd_by_tag(sess
,
2115 le32_to_cpu(abts
->exchange_addr_to_abort
));
2118 mcmd
->unpacked_lun
= abort_cmd
->se_cmd
.orig_fe_lun
;
2120 if (abort_cmd
->qpair
) {
2121 mcmd
->qpair
= abort_cmd
->qpair
;
2122 mcmd
->se_cmd
.cpuid
= abort_cmd
->se_cmd
.cpuid
;
2123 mcmd
->abort_io_attr
= abort_cmd
->atio
.u
.isp24
.attr
;
2124 mcmd
->flags
= QLA24XX_MGMT_ABORT_IO_ATTR_VALID
;
2127 INIT_WORK(&mcmd
->work
, qlt_do_tmr_work
);
2128 queue_work_on(mcmd
->se_cmd
.cpuid
, qla_tgt_wq
, &mcmd
->work
);
2134 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2136 static void qlt_24xx_handle_abts(struct scsi_qla_host
*vha
,
2137 struct abts_recv_from_24xx
*abts
)
2139 struct qla_hw_data
*ha
= vha
->hw
;
2140 struct fc_port
*sess
;
2141 uint32_t tag
= le32_to_cpu(abts
->exchange_addr_to_abort
);
2144 unsigned long flags
;
2146 if (le32_to_cpu(abts
->fcp_hdr_le
.parameter
) & ABTS_PARAM_ABORT_SEQ
) {
2147 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf053,
2148 "qla_target(%d): ABTS: Abort Sequence not "
2149 "supported\n", vha
->vp_idx
);
2150 qlt_24xx_send_abts_resp(ha
->base_qpair
, abts
, FCP_TMF_REJECTED
,
2155 if (tag
== ATIO_EXCHANGE_ADDRESS_UNKNOWN
) {
2156 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf010,
2157 "qla_target(%d): ABTS: Unknown Exchange "
2158 "Address received\n", vha
->vp_idx
);
2159 qlt_24xx_send_abts_resp(ha
->base_qpair
, abts
, FCP_TMF_REJECTED
,
2164 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf011,
2165 "qla_target(%d): task abort (s_id=%x:%x:%x, "
2166 "tag=%d, param=%x)\n", vha
->vp_idx
, abts
->fcp_hdr_le
.s_id
.domain
,
2167 abts
->fcp_hdr_le
.s_id
.area
, abts
->fcp_hdr_le
.s_id
.al_pa
, tag
,
2168 le32_to_cpu(abts
->fcp_hdr_le
.parameter
));
2170 s_id
= le_id_to_be(abts
->fcp_hdr_le
.s_id
);
2172 spin_lock_irqsave(&ha
->tgt
.sess_lock
, flags
);
2173 sess
= ha
->tgt
.tgt_ops
->find_sess_by_s_id(vha
, s_id
);
2175 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf012,
2176 "qla_target(%d): task abort for non-existent session\n",
2178 spin_unlock_irqrestore(&ha
->tgt
.sess_lock
, flags
);
2180 qlt_24xx_send_abts_resp(ha
->base_qpair
, abts
, FCP_TMF_REJECTED
,
2184 spin_unlock_irqrestore(&ha
->tgt
.sess_lock
, flags
);
2187 if (sess
->deleted
) {
2188 qlt_24xx_send_abts_resp(ha
->base_qpair
, abts
, FCP_TMF_REJECTED
,
2193 rc
= __qlt_24xx_handle_abts(vha
, abts
, sess
);
2195 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf054,
2196 "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n",
2198 qlt_24xx_send_abts_resp(ha
->base_qpair
, abts
, FCP_TMF_REJECTED
,
2205 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2207 static void qlt_24xx_send_task_mgmt_ctio(struct qla_qpair
*qpair
,
2208 struct qla_tgt_mgmt_cmd
*mcmd
, uint32_t resp_code
)
2210 struct scsi_qla_host
*ha
= mcmd
->vha
;
2211 struct atio_from_isp
*atio
= &mcmd
->orig_iocb
.atio
;
2212 struct ctio7_to_24xx
*ctio
;
2215 ql_dbg(ql_dbg_tgt
, ha
, 0xe008,
2216 "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n",
2217 ha
, atio
, resp_code
);
2220 ctio
= (struct ctio7_to_24xx
*)__qla2x00_alloc_iocbs(qpair
, NULL
);
2222 ql_dbg(ql_dbg_tgt
, ha
, 0xe04c,
2223 "qla_target(%d): %s failed: unable to allocate "
2224 "request packet\n", ha
->vp_idx
, __func__
);
2228 ctio
->entry_type
= CTIO_TYPE7
;
2229 ctio
->entry_count
= 1;
2230 ctio
->handle
= QLA_TGT_SKIP_HANDLE
| CTIO_COMPLETION_HANDLE_MARK
;
2231 ctio
->nport_handle
= cpu_to_le16(mcmd
->sess
->loop_id
);
2232 ctio
->timeout
= cpu_to_le16(QLA_TGT_TIMEOUT
);
2233 ctio
->vp_index
= ha
->vp_idx
;
2234 ctio
->initiator_id
= be_id_to_le(atio
->u
.isp24
.fcp_hdr
.s_id
);
2235 ctio
->exchange_addr
= atio
->u
.isp24
.exchange_addr
;
2236 temp
= (atio
->u
.isp24
.attr
<< 9)|
2237 CTIO7_FLAGS_STATUS_MODE_1
| CTIO7_FLAGS_SEND_STATUS
;
2238 ctio
->u
.status1
.flags
= cpu_to_le16(temp
);
2239 temp
= be16_to_cpu(atio
->u
.isp24
.fcp_hdr
.ox_id
);
2240 ctio
->u
.status1
.ox_id
= cpu_to_le16(temp
);
2241 ctio
->u
.status1
.scsi_status
=
2242 cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID
);
2243 ctio
->u
.status1
.response_len
= cpu_to_le16(8);
2244 ctio
->u
.status1
.sense_data
[0] = resp_code
;
2246 /* Memory Barrier */
2248 if (qpair
->reqq_start_iocbs
)
2249 qpair
->reqq_start_iocbs(qpair
);
2251 qla2x00_start_iocbs(ha
, qpair
->req
);
2254 void qlt_free_mcmd(struct qla_tgt_mgmt_cmd
*mcmd
)
2256 mempool_free(mcmd
, qla_tgt_mgmt_cmd_mempool
);
2258 EXPORT_SYMBOL(qlt_free_mcmd
);
2261 * ha->hardware_lock supposed to be held on entry. Might drop it, then
2264 void qlt_send_resp_ctio(struct qla_qpair
*qpair
, struct qla_tgt_cmd
*cmd
,
2265 uint8_t scsi_status
, uint8_t sense_key
, uint8_t asc
, uint8_t ascq
)
2267 struct atio_from_isp
*atio
= &cmd
->atio
;
2268 struct ctio7_to_24xx
*ctio
;
2270 struct scsi_qla_host
*vha
= cmd
->vha
;
2272 ql_dbg(ql_dbg_tgt_dif
, vha
, 0x3066,
2273 "Sending response CTIO7 (vha=%p, atio=%p, scsi_status=%02x, "
2274 "sense_key=%02x, asc=%02x, ascq=%02x",
2275 vha
, atio
, scsi_status
, sense_key
, asc
, ascq
);
2277 ctio
= (struct ctio7_to_24xx
*)qla2x00_alloc_iocbs(vha
, NULL
);
2279 ql_dbg(ql_dbg_async
, vha
, 0x3067,
2280 "qla2x00t(%ld): %s failed: unable to allocate request packet",
2281 vha
->host_no
, __func__
);
2285 ctio
->entry_type
= CTIO_TYPE7
;
2286 ctio
->entry_count
= 1;
2287 ctio
->handle
= QLA_TGT_SKIP_HANDLE
;
2288 ctio
->nport_handle
= cpu_to_le16(cmd
->sess
->loop_id
);
2289 ctio
->timeout
= cpu_to_le16(QLA_TGT_TIMEOUT
);
2290 ctio
->vp_index
= vha
->vp_idx
;
2291 ctio
->initiator_id
= be_id_to_le(atio
->u
.isp24
.fcp_hdr
.s_id
);
2292 ctio
->exchange_addr
= atio
->u
.isp24
.exchange_addr
;
2293 temp
= (atio
->u
.isp24
.attr
<< 9) |
2294 CTIO7_FLAGS_STATUS_MODE_1
| CTIO7_FLAGS_SEND_STATUS
;
2295 ctio
->u
.status1
.flags
= cpu_to_le16(temp
);
2296 temp
= be16_to_cpu(atio
->u
.isp24
.fcp_hdr
.ox_id
);
2297 ctio
->u
.status1
.ox_id
= cpu_to_le16(temp
);
2298 ctio
->u
.status1
.scsi_status
=
2299 cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID
| scsi_status
);
2300 ctio
->u
.status1
.response_len
= cpu_to_le16(18);
2301 ctio
->u
.status1
.residual
= cpu_to_le32(get_datalen_for_atio(atio
));
2303 if (ctio
->u
.status1
.residual
!= 0)
2304 ctio
->u
.status1
.scsi_status
|=
2305 cpu_to_le16(SS_RESIDUAL_UNDER
);
2307 /* Fixed format sense data. */
2308 ctio
->u
.status1
.sense_data
[0] = 0x70;
2309 ctio
->u
.status1
.sense_data
[2] = sense_key
;
2310 /* Additional sense length */
2311 ctio
->u
.status1
.sense_data
[7] = 0xa;
2313 ctio
->u
.status1
.sense_data
[12] = asc
;
2314 ctio
->u
.status1
.sense_data
[13] = ascq
;
2316 /* Memory Barrier */
2319 if (qpair
->reqq_start_iocbs
)
2320 qpair
->reqq_start_iocbs(qpair
);
2322 qla2x00_start_iocbs(vha
, qpair
->req
);
2328 /* callback from target fabric module code */
2329 void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd
*mcmd
)
2331 struct scsi_qla_host
*vha
= mcmd
->sess
->vha
;
2332 struct qla_hw_data
*ha
= vha
->hw
;
2333 unsigned long flags
;
2334 struct qla_qpair
*qpair
= mcmd
->qpair
;
2335 bool free_mcmd
= true;
2337 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf013,
2338 "TM response mcmd (%p) status %#x state %#x",
2339 mcmd
, mcmd
->fc_tm_rsp
, mcmd
->flags
);
2341 spin_lock_irqsave(qpair
->qp_lock_ptr
, flags
);
2343 if (!vha
->flags
.online
|| mcmd
->reset_count
!= qpair
->chip_reset
) {
2345 * Either the port is not online or this request was from
2346 * previous life, just abort the processing.
2348 ql_dbg(ql_dbg_async
, vha
, 0xe100,
2349 "RESET-TMR online/active/old-count/new-count = %d/%d/%d/%d.\n",
2350 vha
->flags
.online
, qla2x00_reset_active(vha
),
2351 mcmd
->reset_count
, qpair
->chip_reset
);
2352 ha
->tgt
.tgt_ops
->free_mcmd(mcmd
);
2353 spin_unlock_irqrestore(qpair
->qp_lock_ptr
, flags
);
2357 if (mcmd
->flags
== QLA24XX_MGMT_SEND_NACK
) {
2358 switch (mcmd
->orig_iocb
.imm_ntfy
.u
.isp24
.status_subcode
) {
2362 ql_dbg(ql_dbg_disc
, vha
, 0x2106,
2363 "TM response logo %8phC status %#x state %#x",
2364 mcmd
->sess
->port_name
, mcmd
->fc_tm_rsp
,
2366 qlt_schedule_sess_for_deletion(mcmd
->sess
);
2369 qlt_send_notify_ack(vha
->hw
->base_qpair
,
2370 &mcmd
->orig_iocb
.imm_ntfy
, 0, 0, 0, 0, 0, 0);
2374 if (mcmd
->orig_iocb
.atio
.u
.raw
.entry_type
== ABTS_RECV_24XX
) {
2375 qlt_build_abts_resp_iocb(mcmd
);
2378 qlt_24xx_send_task_mgmt_ctio(qpair
, mcmd
,
2382 * Make the callback for ->free_mcmd() to queue_work() and invoke
2383 * target_put_sess_cmd() to drop cmd_kref to 1. The final
2384 * target_put_sess_cmd() call will be made from TFO->check_stop_free()
2385 * -> tcm_qla2xxx_check_stop_free() to release the TMR associated se_cmd
2386 * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() ->
2387 * qlt_xmit_tm_rsp() returns here..
2390 ha
->tgt
.tgt_ops
->free_mcmd(mcmd
);
2392 spin_unlock_irqrestore(qpair
->qp_lock_ptr
, flags
);
2394 EXPORT_SYMBOL(qlt_xmit_tm_rsp
);
2397 static int qlt_pci_map_calc_cnt(struct qla_tgt_prm
*prm
)
2399 struct qla_tgt_cmd
*cmd
= prm
->cmd
;
2401 BUG_ON(cmd
->sg_cnt
== 0);
2403 prm
->sg
= (struct scatterlist
*)cmd
->sg
;
2404 prm
->seg_cnt
= dma_map_sg(&cmd
->qpair
->pdev
->dev
, cmd
->sg
,
2405 cmd
->sg_cnt
, cmd
->dma_data_direction
);
2406 if (unlikely(prm
->seg_cnt
== 0))
2409 prm
->cmd
->sg_mapped
= 1;
2411 if (cmd
->se_cmd
.prot_op
== TARGET_PROT_NORMAL
) {
2413 * If greater than four sg entries then we need to allocate
2414 * the continuation entries
2416 if (prm
->seg_cnt
> QLA_TGT_DATASEGS_PER_CMD_24XX
)
2417 prm
->req_cnt
+= DIV_ROUND_UP(prm
->seg_cnt
-
2418 QLA_TGT_DATASEGS_PER_CMD_24XX
,
2419 QLA_TGT_DATASEGS_PER_CONT_24XX
);
2422 if ((cmd
->se_cmd
.prot_op
== TARGET_PROT_DIN_INSERT
) ||
2423 (cmd
->se_cmd
.prot_op
== TARGET_PROT_DOUT_STRIP
)) {
2424 prm
->seg_cnt
= DIV_ROUND_UP(cmd
->bufflen
, cmd
->blk_sz
);
2425 prm
->tot_dsds
= prm
->seg_cnt
;
2427 prm
->tot_dsds
= prm
->seg_cnt
;
2429 if (cmd
->prot_sg_cnt
) {
2430 prm
->prot_sg
= cmd
->prot_sg
;
2431 prm
->prot_seg_cnt
= dma_map_sg(&cmd
->qpair
->pdev
->dev
,
2432 cmd
->prot_sg
, cmd
->prot_sg_cnt
,
2433 cmd
->dma_data_direction
);
2434 if (unlikely(prm
->prot_seg_cnt
== 0))
2437 if ((cmd
->se_cmd
.prot_op
== TARGET_PROT_DIN_INSERT
) ||
2438 (cmd
->se_cmd
.prot_op
== TARGET_PROT_DOUT_STRIP
)) {
2439 /* Dif Bundling not support here */
2440 prm
->prot_seg_cnt
= DIV_ROUND_UP(cmd
->bufflen
,
2442 prm
->tot_dsds
+= prm
->prot_seg_cnt
;
2444 prm
->tot_dsds
+= prm
->prot_seg_cnt
;
2451 ql_dbg_qp(ql_dbg_tgt
, prm
->cmd
->qpair
, 0xe04d,
2452 "qla_target(%d): PCI mapping failed: sg_cnt=%d",
2453 0, prm
->cmd
->sg_cnt
);
2457 static void qlt_unmap_sg(struct scsi_qla_host
*vha
, struct qla_tgt_cmd
*cmd
)
2459 struct qla_hw_data
*ha
;
2460 struct qla_qpair
*qpair
;
2462 if (!cmd
->sg_mapped
)
2467 dma_unmap_sg(&qpair
->pdev
->dev
, cmd
->sg
, cmd
->sg_cnt
,
2468 cmd
->dma_data_direction
);
2471 if (cmd
->prot_sg_cnt
)
2472 dma_unmap_sg(&qpair
->pdev
->dev
, cmd
->prot_sg
, cmd
->prot_sg_cnt
,
2473 cmd
->dma_data_direction
);
2478 if (cmd
->ctx_dsd_alloced
)
2479 qla2x00_clean_dsd_pool(ha
, cmd
->ctx
);
2481 dma_pool_free(ha
->dl_dma_pool
, cmd
->ctx
, cmd
->ctx
->crc_ctx_dma
);
2484 static int qlt_check_reserve_free_req(struct qla_qpair
*qpair
,
2488 struct req_que
*req
= qpair
->req
;
2490 if (req
->cnt
< (req_cnt
+ 2)) {
2491 cnt
= (uint16_t)(qpair
->use_shadow_reg
? *req
->out_ptr
:
2492 rd_reg_dword_relaxed(req
->req_q_out
));
2494 if (req
->ring_index
< cnt
)
2495 req
->cnt
= cnt
- req
->ring_index
;
2497 req
->cnt
= req
->length
- (req
->ring_index
- cnt
);
2499 if (unlikely(req
->cnt
< (req_cnt
+ 2)))
2503 req
->cnt
-= req_cnt
;
2509 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2511 static inline void *qlt_get_req_pkt(struct req_que
*req
)
2513 /* Adjust ring index. */
2515 if (req
->ring_index
== req
->length
) {
2516 req
->ring_index
= 0;
2517 req
->ring_ptr
= req
->ring
;
2521 return (cont_entry_t
*)req
->ring_ptr
;
2524 /* ha->hardware_lock supposed to be held on entry */
2525 static inline uint32_t qlt_make_handle(struct qla_qpair
*qpair
)
2530 struct req_que
*req
= qpair
->req
;
2532 h
= req
->current_outstanding_cmd
;
2534 for (index
= 1; index
< req
->num_outstanding_cmds
; index
++) {
2536 if (h
== req
->num_outstanding_cmds
)
2539 if (h
== QLA_TGT_SKIP_HANDLE
)
2542 if (!req
->outstanding_cmds
[h
]) {
2549 req
->current_outstanding_cmd
= h
;
2551 ql_dbg(ql_dbg_io
, qpair
->vha
, 0x305b,
2552 "qla_target(%d): Ran out of empty cmd slots\n",
2553 qpair
->vha
->vp_idx
);
2554 h
= QLA_TGT_NULL_HANDLE
;
2560 /* ha->hardware_lock supposed to be held on entry */
2561 static int qlt_24xx_build_ctio_pkt(struct qla_qpair
*qpair
,
2562 struct qla_tgt_prm
*prm
)
2565 struct ctio7_to_24xx
*pkt
;
2566 struct atio_from_isp
*atio
= &prm
->cmd
->atio
;
2569 pkt
= (struct ctio7_to_24xx
*)qpair
->req
->ring_ptr
;
2571 memset(pkt
, 0, sizeof(*pkt
));
2573 pkt
->entry_type
= CTIO_TYPE7
;
2574 pkt
->entry_count
= (uint8_t)prm
->req_cnt
;
2575 pkt
->vp_index
= prm
->cmd
->vp_idx
;
2577 h
= qlt_make_handle(qpair
);
2578 if (unlikely(h
== QLA_TGT_NULL_HANDLE
)) {
2580 * CTIO type 7 from the firmware doesn't provide a way to
2581 * know the initiator's LOOP ID, hence we can't find
2582 * the session and, so, the command.
2586 qpair
->req
->outstanding_cmds
[h
] = (srb_t
*)prm
->cmd
;
2588 pkt
->handle
= make_handle(qpair
->req
->id
, h
);
2589 pkt
->handle
|= CTIO_COMPLETION_HANDLE_MARK
;
2590 pkt
->nport_handle
= cpu_to_le16(prm
->cmd
->loop_id
);
2591 pkt
->timeout
= cpu_to_le16(QLA_TGT_TIMEOUT
);
2592 pkt
->initiator_id
= be_id_to_le(atio
->u
.isp24
.fcp_hdr
.s_id
);
2593 pkt
->exchange_addr
= atio
->u
.isp24
.exchange_addr
;
2594 temp
= atio
->u
.isp24
.attr
<< 9;
2595 pkt
->u
.status0
.flags
|= cpu_to_le16(temp
);
2596 temp
= be16_to_cpu(atio
->u
.isp24
.fcp_hdr
.ox_id
);
2597 pkt
->u
.status0
.ox_id
= cpu_to_le16(temp
);
2598 pkt
->u
.status0
.relative_offset
= cpu_to_le32(prm
->cmd
->offset
);
2604 * ha->hardware_lock supposed to be held on entry. We have already made sure
2605 * that there is sufficient amount of request entries to not drop it.
2607 static void qlt_load_cont_data_segments(struct qla_tgt_prm
*prm
)
2610 struct dsd64
*cur_dsd
;
2612 /* Build continuation packets */
2613 while (prm
->seg_cnt
> 0) {
2614 cont_a64_entry_t
*cont_pkt64
=
2615 (cont_a64_entry_t
*)qlt_get_req_pkt(
2616 prm
->cmd
->qpair
->req
);
2619 * Make sure that from cont_pkt64 none of
2620 * 64-bit specific fields used for 32-bit
2621 * addressing. Cast to (cont_entry_t *) for
2625 memset(cont_pkt64
, 0, sizeof(*cont_pkt64
));
2627 cont_pkt64
->entry_count
= 1;
2628 cont_pkt64
->sys_define
= 0;
2630 cont_pkt64
->entry_type
= CONTINUE_A64_TYPE
;
2631 cur_dsd
= cont_pkt64
->dsd
;
2633 /* Load continuation entry data segments */
2635 cnt
< QLA_TGT_DATASEGS_PER_CONT_24XX
&& prm
->seg_cnt
;
2636 cnt
++, prm
->seg_cnt
--) {
2637 append_dsd64(&cur_dsd
, prm
->sg
);
2638 prm
->sg
= sg_next(prm
->sg
);
2644 * ha->hardware_lock supposed to be held on entry. We have already made sure
2645 * that there is sufficient amount of request entries to not drop it.
2647 static void qlt_load_data_segments(struct qla_tgt_prm
*prm
)
2650 struct dsd64
*cur_dsd
;
2651 struct ctio7_to_24xx
*pkt24
= (struct ctio7_to_24xx
*)prm
->pkt
;
2653 pkt24
->u
.status0
.transfer_length
= cpu_to_le32(prm
->cmd
->bufflen
);
2655 /* Setup packet address segment pointer */
2656 cur_dsd
= &pkt24
->u
.status0
.dsd
;
2658 /* Set total data segment count */
2660 pkt24
->dseg_count
= cpu_to_le16(prm
->seg_cnt
);
2662 if (prm
->seg_cnt
== 0) {
2663 /* No data transfer */
2664 cur_dsd
->address
= 0;
2665 cur_dsd
->length
= 0;
2669 /* If scatter gather */
2671 /* Load command entry data segments */
2673 (cnt
< QLA_TGT_DATASEGS_PER_CMD_24XX
) && prm
->seg_cnt
;
2674 cnt
++, prm
->seg_cnt
--) {
2675 append_dsd64(&cur_dsd
, prm
->sg
);
2676 prm
->sg
= sg_next(prm
->sg
);
2679 qlt_load_cont_data_segments(prm
);
2682 static inline int qlt_has_data(struct qla_tgt_cmd
*cmd
)
2684 return cmd
->bufflen
> 0;
2687 static void qlt_print_dif_err(struct qla_tgt_prm
*prm
)
2689 struct qla_tgt_cmd
*cmd
;
2690 struct scsi_qla_host
*vha
;
2692 /* asc 0x10=dif error */
2693 if (prm
->sense_buffer
&& (prm
->sense_buffer
[12] == 0x10)) {
2697 switch (prm
->sense_buffer
[13]) {
2699 ql_dbg(ql_dbg_tgt_dif
, vha
, 0xe00b,
2700 "BE detected Guard TAG ERR: lba[0x%llx|%lld] len[0x%x] "
2701 "se_cmd=%p tag[%x]",
2702 cmd
->lba
, cmd
->lba
, cmd
->num_blks
, &cmd
->se_cmd
,
2703 cmd
->atio
.u
.isp24
.exchange_addr
);
2706 ql_dbg(ql_dbg_tgt_dif
, vha
, 0xe00c,
2707 "BE detected APP TAG ERR: lba[0x%llx|%lld] len[0x%x] "
2708 "se_cmd=%p tag[%x]",
2709 cmd
->lba
, cmd
->lba
, cmd
->num_blks
, &cmd
->se_cmd
,
2710 cmd
->atio
.u
.isp24
.exchange_addr
);
2713 ql_dbg(ql_dbg_tgt_dif
, vha
, 0xe00f,
2714 "BE detected REF TAG ERR: lba[0x%llx|%lld] len[0x%x] "
2715 "se_cmd=%p tag[%x]",
2716 cmd
->lba
, cmd
->lba
, cmd
->num_blks
, &cmd
->se_cmd
,
2717 cmd
->atio
.u
.isp24
.exchange_addr
);
2720 ql_dbg(ql_dbg_tgt_dif
, vha
, 0xe010,
2721 "BE detected Dif ERR: lba[%llx|%lld] len[%x] "
2722 "se_cmd=%p tag[%x]",
2723 cmd
->lba
, cmd
->lba
, cmd
->num_blks
, &cmd
->se_cmd
,
2724 cmd
->atio
.u
.isp24
.exchange_addr
);
2727 ql_dump_buffer(ql_dbg_tgt_dif
, vha
, 0xe011, cmd
->cdb
, 16);
2732 * Called without ha->hardware_lock held
2734 static int qlt_pre_xmit_response(struct qla_tgt_cmd
*cmd
,
2735 struct qla_tgt_prm
*prm
, int xmit_type
, uint8_t scsi_status
,
2736 uint32_t *full_req_cnt
)
2738 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
2739 struct qla_qpair
*qpair
= cmd
->qpair
;
2742 prm
->tgt
= cmd
->tgt
;
2744 prm
->rq_result
= scsi_status
;
2745 prm
->sense_buffer
= &cmd
->sense_buffer
[0];
2746 prm
->sense_buffer_len
= TRANSPORT_SENSE_BUFFER
;
2751 prm
->add_status_pkt
= 0;
2752 prm
->prot_sg
= NULL
;
2753 prm
->prot_seg_cnt
= 0;
2756 if ((xmit_type
& QLA_TGT_XMIT_DATA
) && qlt_has_data(cmd
)) {
2757 if (qlt_pci_map_calc_cnt(prm
) != 0)
2761 *full_req_cnt
= prm
->req_cnt
;
2763 if (se_cmd
->se_cmd_flags
& SCF_UNDERFLOW_BIT
) {
2764 prm
->residual
= se_cmd
->residual_count
;
2765 ql_dbg_qp(ql_dbg_io
+ ql_dbg_verbose
, qpair
, 0x305c,
2766 "Residual underflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n",
2767 prm
->residual
, se_cmd
->tag
,
2768 se_cmd
->t_task_cdb
? se_cmd
->t_task_cdb
[0] : 0,
2769 cmd
->bufflen
, prm
->rq_result
);
2770 prm
->rq_result
|= SS_RESIDUAL_UNDER
;
2771 } else if (se_cmd
->se_cmd_flags
& SCF_OVERFLOW_BIT
) {
2772 prm
->residual
= se_cmd
->residual_count
;
2773 ql_dbg_qp(ql_dbg_io
, qpair
, 0x305d,
2774 "Residual overflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n",
2775 prm
->residual
, se_cmd
->tag
, se_cmd
->t_task_cdb
?
2776 se_cmd
->t_task_cdb
[0] : 0, cmd
->bufflen
, prm
->rq_result
);
2777 prm
->rq_result
|= SS_RESIDUAL_OVER
;
2780 if (xmit_type
& QLA_TGT_XMIT_STATUS
) {
2782 * If QLA_TGT_XMIT_DATA is not set, add_status_pkt will be
2783 * ignored in *xmit_response() below
2785 if (qlt_has_data(cmd
)) {
2786 if (QLA_TGT_SENSE_VALID(prm
->sense_buffer
) ||
2787 (IS_FWI2_CAPABLE(cmd
->vha
->hw
) &&
2788 (prm
->rq_result
!= 0))) {
2789 prm
->add_status_pkt
= 1;
2798 static inline int qlt_need_explicit_conf(struct qla_tgt_cmd
*cmd
,
2801 if (cmd
->qpair
->enable_class_2
)
2805 return cmd
->conf_compl_supported
;
2807 return cmd
->qpair
->enable_explicit_conf
&&
2808 cmd
->conf_compl_supported
;
2811 static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx
*ctio
,
2812 struct qla_tgt_prm
*prm
)
2814 prm
->sense_buffer_len
= min_t(uint32_t, prm
->sense_buffer_len
,
2815 (uint32_t)sizeof(ctio
->u
.status1
.sense_data
));
2816 ctio
->u
.status0
.flags
|= cpu_to_le16(CTIO7_FLAGS_SEND_STATUS
);
2817 if (qlt_need_explicit_conf(prm
->cmd
, 0)) {
2818 ctio
->u
.status0
.flags
|= cpu_to_le16(
2819 CTIO7_FLAGS_EXPLICIT_CONFORM
|
2820 CTIO7_FLAGS_CONFORM_REQ
);
2822 ctio
->u
.status0
.residual
= cpu_to_le32(prm
->residual
);
2823 ctio
->u
.status0
.scsi_status
= cpu_to_le16(prm
->rq_result
);
2824 if (QLA_TGT_SENSE_VALID(prm
->sense_buffer
)) {
2827 if (qlt_need_explicit_conf(prm
->cmd
, 1)) {
2828 if ((prm
->rq_result
& SS_SCSI_STATUS_BYTE
) != 0) {
2829 ql_dbg_qp(ql_dbg_tgt
, prm
->cmd
->qpair
, 0xe017,
2830 "Skipping EXPLICIT_CONFORM and "
2831 "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ "
2832 "non GOOD status\n");
2833 goto skip_explict_conf
;
2835 ctio
->u
.status1
.flags
|= cpu_to_le16(
2836 CTIO7_FLAGS_EXPLICIT_CONFORM
|
2837 CTIO7_FLAGS_CONFORM_REQ
);
2840 ctio
->u
.status1
.flags
&=
2841 ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0
);
2842 ctio
->u
.status1
.flags
|=
2843 cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1
);
2844 ctio
->u
.status1
.scsi_status
|=
2845 cpu_to_le16(SS_SENSE_LEN_VALID
);
2846 ctio
->u
.status1
.sense_length
=
2847 cpu_to_le16(prm
->sense_buffer_len
);
2848 for (i
= 0; i
< prm
->sense_buffer_len
/4; i
++) {
2851 v
= get_unaligned_be32(
2852 &((uint32_t *)prm
->sense_buffer
)[i
]);
2853 put_unaligned_le32(v
,
2854 &((uint32_t *)ctio
->u
.status1
.sense_data
)[i
]);
2856 qlt_print_dif_err(prm
);
2859 ctio
->u
.status1
.flags
&=
2860 ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0
);
2861 ctio
->u
.status1
.flags
|=
2862 cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1
);
2863 ctio
->u
.status1
.sense_length
= 0;
2864 memset(ctio
->u
.status1
.sense_data
, 0,
2865 sizeof(ctio
->u
.status1
.sense_data
));
2868 /* Sense with len > 24, is it possible ??? */
2872 qlt_hba_err_chk_enabled(struct se_cmd
*se_cmd
)
2874 switch (se_cmd
->prot_op
) {
2875 case TARGET_PROT_DOUT_INSERT
:
2876 case TARGET_PROT_DIN_STRIP
:
2877 if (ql2xenablehba_err_chk
>= 1)
2880 case TARGET_PROT_DOUT_PASS
:
2881 case TARGET_PROT_DIN_PASS
:
2882 if (ql2xenablehba_err_chk
>= 2)
2885 case TARGET_PROT_DIN_INSERT
:
2886 case TARGET_PROT_DOUT_STRIP
:
2895 qla_tgt_ref_mask_check(struct se_cmd
*se_cmd
)
2897 switch (se_cmd
->prot_op
) {
2898 case TARGET_PROT_DIN_INSERT
:
2899 case TARGET_PROT_DOUT_INSERT
:
2900 case TARGET_PROT_DIN_STRIP
:
2901 case TARGET_PROT_DOUT_STRIP
:
2902 case TARGET_PROT_DIN_PASS
:
2903 case TARGET_PROT_DOUT_PASS
:
2912 * qla_tgt_set_dif_tags - Extract Ref and App tags from SCSI command
2915 qla_tgt_set_dif_tags(struct qla_tgt_cmd
*cmd
, struct crc_context
*ctx
,
2916 uint16_t *pfw_prot_opts
)
2918 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
2919 uint32_t lba
= 0xffffffff & se_cmd
->t_task_lba
;
2920 scsi_qla_host_t
*vha
= cmd
->tgt
->vha
;
2921 struct qla_hw_data
*ha
= vha
->hw
;
2925 * wait till Mode Sense/Select cmd, modepage Ah, subpage 2
2926 * have been immplemented by TCM, before AppTag is avail.
2927 * Look for modesense_handlers[]
2930 ctx
->app_tag_mask
[0] = 0x0;
2931 ctx
->app_tag_mask
[1] = 0x0;
2933 if (IS_PI_UNINIT_CAPABLE(ha
)) {
2934 if ((se_cmd
->prot_type
== TARGET_DIF_TYPE1_PROT
) ||
2935 (se_cmd
->prot_type
== TARGET_DIF_TYPE2_PROT
))
2936 *pfw_prot_opts
|= PO_DIS_VALD_APP_ESC
;
2937 else if (se_cmd
->prot_type
== TARGET_DIF_TYPE3_PROT
)
2938 *pfw_prot_opts
|= PO_DIS_VALD_APP_REF_ESC
;
2941 t32
= ha
->tgt
.tgt_ops
->get_dif_tags(cmd
, pfw_prot_opts
);
2943 switch (se_cmd
->prot_type
) {
2944 case TARGET_DIF_TYPE0_PROT
:
2946 * No check for ql2xenablehba_err_chk, as it
2947 * would be an I/O error if hba tag generation
2950 ctx
->ref_tag
= cpu_to_le32(lba
);
2951 /* enable ALL bytes of the ref tag */
2952 ctx
->ref_tag_mask
[0] = 0xff;
2953 ctx
->ref_tag_mask
[1] = 0xff;
2954 ctx
->ref_tag_mask
[2] = 0xff;
2955 ctx
->ref_tag_mask
[3] = 0xff;
2957 case TARGET_DIF_TYPE1_PROT
:
2959 * For TYPE 1 protection: 16 bit GUARD tag, 32 bit
2960 * REF tag, and 16 bit app tag.
2962 ctx
->ref_tag
= cpu_to_le32(lba
);
2963 if (!qla_tgt_ref_mask_check(se_cmd
) ||
2964 !(ha
->tgt
.tgt_ops
->chk_dif_tags(t32
))) {
2965 *pfw_prot_opts
|= PO_DIS_REF_TAG_VALD
;
2968 /* enable ALL bytes of the ref tag */
2969 ctx
->ref_tag_mask
[0] = 0xff;
2970 ctx
->ref_tag_mask
[1] = 0xff;
2971 ctx
->ref_tag_mask
[2] = 0xff;
2972 ctx
->ref_tag_mask
[3] = 0xff;
2974 case TARGET_DIF_TYPE2_PROT
:
2976 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF
2977 * tag has to match LBA in CDB + N
2979 ctx
->ref_tag
= cpu_to_le32(lba
);
2980 if (!qla_tgt_ref_mask_check(se_cmd
) ||
2981 !(ha
->tgt
.tgt_ops
->chk_dif_tags(t32
))) {
2982 *pfw_prot_opts
|= PO_DIS_REF_TAG_VALD
;
2985 /* enable ALL bytes of the ref tag */
2986 ctx
->ref_tag_mask
[0] = 0xff;
2987 ctx
->ref_tag_mask
[1] = 0xff;
2988 ctx
->ref_tag_mask
[2] = 0xff;
2989 ctx
->ref_tag_mask
[3] = 0xff;
2991 case TARGET_DIF_TYPE3_PROT
:
2992 /* For TYPE 3 protection: 16 bit GUARD only */
2993 *pfw_prot_opts
|= PO_DIS_REF_TAG_VALD
;
2994 ctx
->ref_tag_mask
[0] = ctx
->ref_tag_mask
[1] =
2995 ctx
->ref_tag_mask
[2] = ctx
->ref_tag_mask
[3] = 0x00;
3001 qlt_build_ctio_crc2_pkt(struct qla_qpair
*qpair
, struct qla_tgt_prm
*prm
)
3003 struct dsd64
*cur_dsd
;
3004 uint32_t transfer_length
= 0;
3005 uint32_t data_bytes
;
3007 uint8_t bundling
= 1;
3008 struct crc_context
*crc_ctx_pkt
= NULL
;
3009 struct qla_hw_data
*ha
;
3010 struct ctio_crc2_to_fw
*pkt
;
3011 dma_addr_t crc_ctx_dma
;
3012 uint16_t fw_prot_opts
= 0;
3013 struct qla_tgt_cmd
*cmd
= prm
->cmd
;
3014 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
3016 struct atio_from_isp
*atio
= &prm
->cmd
->atio
;
3017 struct qla_tc_param tc
;
3019 scsi_qla_host_t
*vha
= cmd
->vha
;
3023 pkt
= (struct ctio_crc2_to_fw
*)qpair
->req
->ring_ptr
;
3025 memset(pkt
, 0, sizeof(*pkt
));
3027 ql_dbg_qp(ql_dbg_tgt
, cmd
->qpair
, 0xe071,
3028 "qla_target(%d):%s: se_cmd[%p] CRC2 prot_op[0x%x] cmd prot sg:cnt[%p:%x] lba[%llu]\n",
3029 cmd
->vp_idx
, __func__
, se_cmd
, se_cmd
->prot_op
,
3030 prm
->prot_sg
, prm
->prot_seg_cnt
, se_cmd
->t_task_lba
);
3032 if ((se_cmd
->prot_op
== TARGET_PROT_DIN_INSERT
) ||
3033 (se_cmd
->prot_op
== TARGET_PROT_DOUT_STRIP
))
3036 /* Compute dif len and adjust data len to incude protection */
3037 data_bytes
= cmd
->bufflen
;
3038 dif_bytes
= (data_bytes
/ cmd
->blk_sz
) * 8;
3040 switch (se_cmd
->prot_op
) {
3041 case TARGET_PROT_DIN_INSERT
:
3042 case TARGET_PROT_DOUT_STRIP
:
3043 transfer_length
= data_bytes
;
3044 if (cmd
->prot_sg_cnt
)
3045 data_bytes
+= dif_bytes
;
3047 case TARGET_PROT_DIN_STRIP
:
3048 case TARGET_PROT_DOUT_INSERT
:
3049 case TARGET_PROT_DIN_PASS
:
3050 case TARGET_PROT_DOUT_PASS
:
3051 transfer_length
= data_bytes
+ dif_bytes
;
3058 if (!qlt_hba_err_chk_enabled(se_cmd
))
3059 fw_prot_opts
|= 0x10; /* Disable Guard tag checking */
3060 /* HBA error checking enabled */
3061 else if (IS_PI_UNINIT_CAPABLE(ha
)) {
3062 if ((se_cmd
->prot_type
== TARGET_DIF_TYPE1_PROT
) ||
3063 (se_cmd
->prot_type
== TARGET_DIF_TYPE2_PROT
))
3064 fw_prot_opts
|= PO_DIS_VALD_APP_ESC
;
3065 else if (se_cmd
->prot_type
== TARGET_DIF_TYPE3_PROT
)
3066 fw_prot_opts
|= PO_DIS_VALD_APP_REF_ESC
;
3069 switch (se_cmd
->prot_op
) {
3070 case TARGET_PROT_DIN_INSERT
:
3071 case TARGET_PROT_DOUT_INSERT
:
3072 fw_prot_opts
|= PO_MODE_DIF_INSERT
;
3074 case TARGET_PROT_DIN_STRIP
:
3075 case TARGET_PROT_DOUT_STRIP
:
3076 fw_prot_opts
|= PO_MODE_DIF_REMOVE
;
3078 case TARGET_PROT_DIN_PASS
:
3079 case TARGET_PROT_DOUT_PASS
:
3080 fw_prot_opts
|= PO_MODE_DIF_PASS
;
3081 /* FUTURE: does tcm require T10CRC<->IPCKSUM conversion? */
3083 default:/* Normal Request */
3084 fw_prot_opts
|= PO_MODE_DIF_PASS
;
3089 /* Update entry type to indicate Command Type CRC_2 IOCB */
3090 pkt
->entry_type
= CTIO_CRC2
;
3091 pkt
->entry_count
= 1;
3092 pkt
->vp_index
= cmd
->vp_idx
;
3094 h
= qlt_make_handle(qpair
);
3095 if (unlikely(h
== QLA_TGT_NULL_HANDLE
)) {
3097 * CTIO type 7 from the firmware doesn't provide a way to
3098 * know the initiator's LOOP ID, hence we can't find
3099 * the session and, so, the command.
3103 qpair
->req
->outstanding_cmds
[h
] = (srb_t
*)prm
->cmd
;
3105 pkt
->handle
= make_handle(qpair
->req
->id
, h
);
3106 pkt
->handle
|= CTIO_COMPLETION_HANDLE_MARK
;
3107 pkt
->nport_handle
= cpu_to_le16(prm
->cmd
->loop_id
);
3108 pkt
->timeout
= cpu_to_le16(QLA_TGT_TIMEOUT
);
3109 pkt
->initiator_id
= be_id_to_le(atio
->u
.isp24
.fcp_hdr
.s_id
);
3110 pkt
->exchange_addr
= atio
->u
.isp24
.exchange_addr
;
3112 /* silence compile warning */
3113 t16
= be16_to_cpu(atio
->u
.isp24
.fcp_hdr
.ox_id
);
3114 pkt
->ox_id
= cpu_to_le16(t16
);
3116 t16
= (atio
->u
.isp24
.attr
<< 9);
3117 pkt
->flags
|= cpu_to_le16(t16
);
3118 pkt
->relative_offset
= cpu_to_le32(prm
->cmd
->offset
);
3120 /* Set transfer direction */
3121 if (cmd
->dma_data_direction
== DMA_TO_DEVICE
)
3122 pkt
->flags
= cpu_to_le16(CTIO7_FLAGS_DATA_IN
);
3123 else if (cmd
->dma_data_direction
== DMA_FROM_DEVICE
)
3124 pkt
->flags
= cpu_to_le16(CTIO7_FLAGS_DATA_OUT
);
3126 pkt
->dseg_count
= cpu_to_le16(prm
->tot_dsds
);
3127 /* Fibre channel byte count */
3128 pkt
->transfer_length
= cpu_to_le32(transfer_length
);
3130 /* ----- CRC context -------- */
3132 /* Allocate CRC context from global pool */
3133 crc_ctx_pkt
= cmd
->ctx
=
3134 dma_pool_zalloc(ha
->dl_dma_pool
, GFP_ATOMIC
, &crc_ctx_dma
);
3137 goto crc_queuing_error
;
3139 crc_ctx_pkt
->crc_ctx_dma
= crc_ctx_dma
;
3140 INIT_LIST_HEAD(&crc_ctx_pkt
->dsd_list
);
3143 crc_ctx_pkt
->handle
= pkt
->handle
;
3145 qla_tgt_set_dif_tags(cmd
, crc_ctx_pkt
, &fw_prot_opts
);
3147 put_unaligned_le64(crc_ctx_dma
, &pkt
->crc_context_address
);
3148 pkt
->crc_context_len
= cpu_to_le16(CRC_CONTEXT_LEN_FW
);
3151 cur_dsd
= &crc_ctx_pkt
->u
.nobundling
.data_dsd
[0];
3154 * Configure Bundling if we need to fetch interlaving
3155 * protection PCI accesses
3157 fw_prot_opts
|= PO_ENABLE_DIF_BUNDLING
;
3158 crc_ctx_pkt
->u
.bundling
.dif_byte_count
= cpu_to_le32(dif_bytes
);
3159 crc_ctx_pkt
->u
.bundling
.dseg_count
=
3160 cpu_to_le16(prm
->tot_dsds
- prm
->prot_seg_cnt
);
3161 cur_dsd
= &crc_ctx_pkt
->u
.bundling
.data_dsd
[0];
3164 /* Finish the common fields of CRC pkt */
3165 crc_ctx_pkt
->blk_size
= cpu_to_le16(cmd
->blk_sz
);
3166 crc_ctx_pkt
->prot_opts
= cpu_to_le16(fw_prot_opts
);
3167 crc_ctx_pkt
->byte_count
= cpu_to_le32(data_bytes
);
3168 crc_ctx_pkt
->guard_seed
= cpu_to_le16(0);
3170 memset((uint8_t *)&tc
, 0 , sizeof(tc
));
3172 tc
.blk_sz
= cmd
->blk_sz
;
3173 tc
.bufflen
= cmd
->bufflen
;
3175 tc
.prot_sg
= cmd
->prot_sg
;
3176 tc
.ctx
= crc_ctx_pkt
;
3177 tc
.ctx_dsd_alloced
= &cmd
->ctx_dsd_alloced
;
3179 /* Walks data segments */
3180 pkt
->flags
|= cpu_to_le16(CTIO7_FLAGS_DSD_PTR
);
3182 if (!bundling
&& prm
->prot_seg_cnt
) {
3183 if (qla24xx_walk_and_build_sglist_no_difb(ha
, NULL
, cur_dsd
,
3184 prm
->tot_dsds
, &tc
))
3185 goto crc_queuing_error
;
3186 } else if (qla24xx_walk_and_build_sglist(ha
, NULL
, cur_dsd
,
3187 (prm
->tot_dsds
- prm
->prot_seg_cnt
), &tc
))
3188 goto crc_queuing_error
;
3190 if (bundling
&& prm
->prot_seg_cnt
) {
3191 /* Walks dif segments */
3192 pkt
->add_flags
|= CTIO_CRC2_AF_DIF_DSD_ENA
;
3194 cur_dsd
= &crc_ctx_pkt
->u
.bundling
.dif_dsd
;
3195 if (qla24xx_walk_and_build_prot_sglist(ha
, NULL
, cur_dsd
,
3196 prm
->prot_seg_cnt
, cmd
))
3197 goto crc_queuing_error
;
3202 /* Cleanup will be performed by the caller */
3203 qpair
->req
->outstanding_cmds
[h
] = NULL
;
3205 return QLA_FUNCTION_FAILED
;
3209 * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and *
3210 * QLA_TGT_XMIT_STATUS for >= 24xx silicon
3212 int qlt_xmit_response(struct qla_tgt_cmd
*cmd
, int xmit_type
,
3213 uint8_t scsi_status
)
3215 struct scsi_qla_host
*vha
= cmd
->vha
;
3216 struct qla_qpair
*qpair
= cmd
->qpair
;
3217 struct ctio7_to_24xx
*pkt
;
3218 struct qla_tgt_prm prm
;
3219 uint32_t full_req_cnt
= 0;
3220 unsigned long flags
= 0;
3223 if (!qpair
->fw_started
|| (cmd
->reset_count
!= qpair
->chip_reset
) ||
3224 (cmd
->sess
&& cmd
->sess
->deleted
)) {
3225 cmd
->state
= QLA_TGT_STATE_PROCESSED
;
3230 ql_dbg_qp(ql_dbg_tgt
, qpair
, 0xe018,
3231 "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p] qp %d\n",
3232 (xmit_type
& QLA_TGT_XMIT_STATUS
) ?
3233 1 : 0, cmd
->bufflen
, cmd
->sg_cnt
, cmd
->dma_data_direction
,
3234 &cmd
->se_cmd
, qpair
->id
);
3236 res
= qlt_pre_xmit_response(cmd
, &prm
, xmit_type
, scsi_status
,
3238 if (unlikely(res
!= 0))
3241 spin_lock_irqsave(qpair
->qp_lock_ptr
, flags
);
3243 if (xmit_type
== QLA_TGT_XMIT_STATUS
)
3244 qpair
->tgt_counters
.core_qla_snd_status
++;
3246 qpair
->tgt_counters
.core_qla_que_buf
++;
3248 if (!qpair
->fw_started
|| cmd
->reset_count
!= qpair
->chip_reset
) {
3250 * Either the port is not online or this request was from
3251 * previous life, just abort the processing.
3253 cmd
->state
= QLA_TGT_STATE_PROCESSED
;
3254 ql_dbg_qp(ql_dbg_async
, qpair
, 0xe101,
3255 "RESET-RSP online/active/old-count/new-count = %d/%d/%d/%d.\n",
3256 vha
->flags
.online
, qla2x00_reset_active(vha
),
3257 cmd
->reset_count
, qpair
->chip_reset
);
3258 spin_unlock_irqrestore(qpair
->qp_lock_ptr
, flags
);
3263 /* Does F/W have an IOCBs for this request */
3264 res
= qlt_check_reserve_free_req(qpair
, full_req_cnt
);
3266 goto out_unmap_unlock
;
3268 if (cmd
->se_cmd
.prot_op
&& (xmit_type
& QLA_TGT_XMIT_DATA
))
3269 res
= qlt_build_ctio_crc2_pkt(qpair
, &prm
);
3271 res
= qlt_24xx_build_ctio_pkt(qpair
, &prm
);
3272 if (unlikely(res
!= 0)) {
3273 qpair
->req
->cnt
+= full_req_cnt
;
3274 goto out_unmap_unlock
;
3277 pkt
= (struct ctio7_to_24xx
*)prm
.pkt
;
3279 if (qlt_has_data(cmd
) && (xmit_type
& QLA_TGT_XMIT_DATA
)) {
3280 pkt
->u
.status0
.flags
|=
3281 cpu_to_le16(CTIO7_FLAGS_DATA_IN
|
3282 CTIO7_FLAGS_STATUS_MODE_0
);
3284 if (cmd
->se_cmd
.prot_op
== TARGET_PROT_NORMAL
)
3285 qlt_load_data_segments(&prm
);
3287 if (prm
.add_status_pkt
== 0) {
3288 if (xmit_type
& QLA_TGT_XMIT_STATUS
) {
3289 pkt
->u
.status0
.scsi_status
=
3290 cpu_to_le16(prm
.rq_result
);
3291 pkt
->u
.status0
.residual
=
3292 cpu_to_le32(prm
.residual
);
3293 pkt
->u
.status0
.flags
|= cpu_to_le16(
3294 CTIO7_FLAGS_SEND_STATUS
);
3295 if (qlt_need_explicit_conf(cmd
, 0)) {
3296 pkt
->u
.status0
.flags
|=
3298 CTIO7_FLAGS_EXPLICIT_CONFORM
|
3299 CTIO7_FLAGS_CONFORM_REQ
);
3305 * We have already made sure that there is sufficient
3306 * amount of request entries to not drop HW lock in
3309 struct ctio7_to_24xx
*ctio
=
3310 (struct ctio7_to_24xx
*)qlt_get_req_pkt(
3313 ql_dbg_qp(ql_dbg_tgt
, qpair
, 0x305e,
3314 "Building additional status packet 0x%p.\n",
3318 * T10Dif: ctio_crc2_to_fw overlay ontop of
3321 memcpy(ctio
, pkt
, sizeof(*ctio
));
3322 /* reset back to CTIO7 */
3323 ctio
->entry_count
= 1;
3324 ctio
->entry_type
= CTIO_TYPE7
;
3325 ctio
->dseg_count
= 0;
3326 ctio
->u
.status1
.flags
&= ~cpu_to_le16(
3327 CTIO7_FLAGS_DATA_IN
);
3329 /* Real finish is ctio_m1's finish */
3330 pkt
->handle
|= CTIO_INTERMEDIATE_HANDLE_MARK
;
3331 pkt
->u
.status0
.flags
|= cpu_to_le16(
3332 CTIO7_FLAGS_DONT_RET_CTIO
);
3334 /* qlt_24xx_init_ctio_to_isp will correct
3335 * all neccessary fields that's part of CTIO7.
3336 * There should be no residual of CTIO-CRC2 data.
3338 qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx
*)ctio
,
3342 qlt_24xx_init_ctio_to_isp(pkt
, &prm
);
3345 cmd
->state
= QLA_TGT_STATE_PROCESSED
; /* Mid-level is done processing */
3346 cmd
->cmd_sent_to_fw
= 1;
3347 cmd
->ctio_flags
= le16_to_cpu(pkt
->u
.status0
.flags
);
3349 /* Memory Barrier */
3351 if (qpair
->reqq_start_iocbs
)
3352 qpair
->reqq_start_iocbs(qpair
);
3354 qla2x00_start_iocbs(vha
, qpair
->req
);
3355 spin_unlock_irqrestore(qpair
->qp_lock_ptr
, flags
);
3360 qlt_unmap_sg(vha
, cmd
);
3361 spin_unlock_irqrestore(qpair
->qp_lock_ptr
, flags
);
3364 vha
->hw
->tgt
.tgt_ops
->free_cmd(cmd
);
3367 EXPORT_SYMBOL(qlt_xmit_response
);
3369 int qlt_rdy_to_xfer(struct qla_tgt_cmd
*cmd
)
3371 struct ctio7_to_24xx
*pkt
;
3372 struct scsi_qla_host
*vha
= cmd
->vha
;
3373 struct qla_tgt
*tgt
= cmd
->tgt
;
3374 struct qla_tgt_prm prm
;
3375 unsigned long flags
= 0;
3377 struct qla_qpair
*qpair
= cmd
->qpair
;
3379 memset(&prm
, 0, sizeof(prm
));
3385 /* Calculate number of entries and segments required */
3386 if (qlt_pci_map_calc_cnt(&prm
) != 0)
3389 if (!qpair
->fw_started
|| (cmd
->reset_count
!= qpair
->chip_reset
) ||
3390 (cmd
->sess
&& cmd
->sess
->deleted
)) {
3392 * Either the port is not online or this request was from
3393 * previous life, just abort the processing.
3396 cmd
->write_data_transferred
= 0;
3397 cmd
->state
= QLA_TGT_STATE_DATA_IN
;
3398 vha
->hw
->tgt
.tgt_ops
->handle_data(cmd
);
3399 ql_dbg_qp(ql_dbg_async
, qpair
, 0xe102,
3400 "RESET-XFR online/active/old-count/new-count = %d/%d/%d/%d.\n",
3401 vha
->flags
.online
, qla2x00_reset_active(vha
),
3402 cmd
->reset_count
, qpair
->chip_reset
);
3406 spin_lock_irqsave(qpair
->qp_lock_ptr
, flags
);
3407 /* Does F/W have an IOCBs for this request */
3408 res
= qlt_check_reserve_free_req(qpair
, prm
.req_cnt
);
3410 goto out_unlock_free_unmap
;
3411 if (cmd
->se_cmd
.prot_op
)
3412 res
= qlt_build_ctio_crc2_pkt(qpair
, &prm
);
3414 res
= qlt_24xx_build_ctio_pkt(qpair
, &prm
);
3416 if (unlikely(res
!= 0)) {
3417 qpair
->req
->cnt
+= prm
.req_cnt
;
3418 goto out_unlock_free_unmap
;
3421 pkt
= (struct ctio7_to_24xx
*)prm
.pkt
;
3422 pkt
->u
.status0
.flags
|= cpu_to_le16(CTIO7_FLAGS_DATA_OUT
|
3423 CTIO7_FLAGS_STATUS_MODE_0
);
3425 if (cmd
->se_cmd
.prot_op
== TARGET_PROT_NORMAL
)
3426 qlt_load_data_segments(&prm
);
3428 cmd
->state
= QLA_TGT_STATE_NEED_DATA
;
3429 cmd
->cmd_sent_to_fw
= 1;
3430 cmd
->ctio_flags
= le16_to_cpu(pkt
->u
.status0
.flags
);
3432 /* Memory Barrier */
3434 if (qpair
->reqq_start_iocbs
)
3435 qpair
->reqq_start_iocbs(qpair
);
3437 qla2x00_start_iocbs(vha
, qpair
->req
);
3438 spin_unlock_irqrestore(qpair
->qp_lock_ptr
, flags
);
3442 out_unlock_free_unmap
:
3443 qlt_unmap_sg(vha
, cmd
);
3444 spin_unlock_irqrestore(qpair
->qp_lock_ptr
, flags
);
3448 EXPORT_SYMBOL(qlt_rdy_to_xfer
);
3452 * it is assumed either hardware_lock or qpair lock is held.
3455 qlt_handle_dif_error(struct qla_qpair
*qpair
, struct qla_tgt_cmd
*cmd
,
3456 struct ctio_crc_from_fw
*sts
)
3458 uint8_t *ap
= &sts
->actual_dif
[0];
3459 uint8_t *ep
= &sts
->expected_dif
[0];
3460 uint64_t lba
= cmd
->se_cmd
.t_task_lba
;
3461 uint8_t scsi_status
, sense_key
, asc
, ascq
;
3462 unsigned long flags
;
3463 struct scsi_qla_host
*vha
= cmd
->vha
;
3465 cmd
->trc_flags
|= TRC_DIF_ERR
;
3467 cmd
->a_guard
= get_unaligned_be16(ap
+ 0);
3468 cmd
->a_app_tag
= get_unaligned_be16(ap
+ 2);
3469 cmd
->a_ref_tag
= get_unaligned_be32(ap
+ 4);
3471 cmd
->e_guard
= get_unaligned_be16(ep
+ 0);
3472 cmd
->e_app_tag
= get_unaligned_be16(ep
+ 2);
3473 cmd
->e_ref_tag
= get_unaligned_be32(ep
+ 4);
3475 ql_dbg(ql_dbg_tgt_dif
, vha
, 0xf075,
3476 "%s: aborted %d state %d\n", __func__
, cmd
->aborted
, cmd
->state
);
3478 scsi_status
= sense_key
= asc
= ascq
= 0;
3480 /* check appl tag */
3481 if (cmd
->e_app_tag
!= cmd
->a_app_tag
) {
3482 ql_dbg(ql_dbg_tgt_dif
, vha
, 0xe00d,
3483 "App Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard [%x|%x] cmd=%p ox_id[%04x]",
3484 cmd
->cdb
[0], lba
, (lba
+cmd
->num_blks
), cmd
->num_blks
,
3485 cmd
->a_ref_tag
, cmd
->e_ref_tag
, cmd
->a_app_tag
,
3486 cmd
->e_app_tag
, cmd
->a_guard
, cmd
->e_guard
, cmd
,
3487 cmd
->atio
.u
.isp24
.fcp_hdr
.ox_id
);
3489 cmd
->dif_err_code
= DIF_ERR_APP
;
3490 scsi_status
= SAM_STAT_CHECK_CONDITION
;
3491 sense_key
= ABORTED_COMMAND
;
3497 if (cmd
->e_ref_tag
!= cmd
->a_ref_tag
) {
3498 ql_dbg(ql_dbg_tgt_dif
, vha
, 0xe00e,
3499 "Ref Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard[%x|%x] cmd=%p ox_id[%04x] ",
3500 cmd
->cdb
[0], lba
, (lba
+cmd
->num_blks
), cmd
->num_blks
,
3501 cmd
->a_ref_tag
, cmd
->e_ref_tag
, cmd
->a_app_tag
,
3502 cmd
->e_app_tag
, cmd
->a_guard
, cmd
->e_guard
, cmd
,
3503 cmd
->atio
.u
.isp24
.fcp_hdr
.ox_id
);
3505 cmd
->dif_err_code
= DIF_ERR_REF
;
3506 scsi_status
= SAM_STAT_CHECK_CONDITION
;
3507 sense_key
= ABORTED_COMMAND
;
3514 if (cmd
->e_guard
!= cmd
->a_guard
) {
3515 ql_dbg(ql_dbg_tgt_dif
, vha
, 0xe012,
3516 "Guard ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard [%x|%x] cmd=%p ox_id[%04x]",
3517 cmd
->cdb
[0], lba
, (lba
+cmd
->num_blks
), cmd
->num_blks
,
3518 cmd
->a_ref_tag
, cmd
->e_ref_tag
, cmd
->a_app_tag
,
3519 cmd
->e_app_tag
, cmd
->a_guard
, cmd
->e_guard
, cmd
,
3520 cmd
->atio
.u
.isp24
.fcp_hdr
.ox_id
);
3522 cmd
->dif_err_code
= DIF_ERR_GRD
;
3523 scsi_status
= SAM_STAT_CHECK_CONDITION
;
3524 sense_key
= ABORTED_COMMAND
;
3529 switch (cmd
->state
) {
3530 case QLA_TGT_STATE_NEED_DATA
:
3531 /* handle_data will load DIF error code */
3532 cmd
->state
= QLA_TGT_STATE_DATA_IN
;
3533 vha
->hw
->tgt
.tgt_ops
->handle_data(cmd
);
3536 spin_lock_irqsave(&cmd
->cmd_lock
, flags
);
3538 spin_unlock_irqrestore(&cmd
->cmd_lock
, flags
);
3539 vha
->hw
->tgt
.tgt_ops
->free_cmd(cmd
);
3542 spin_unlock_irqrestore(&cmd
->cmd_lock
, flags
);
3544 qlt_send_resp_ctio(qpair
, cmd
, scsi_status
, sense_key
, asc
,
3546 /* assume scsi status gets out on the wire.
3547 * Will not wait for completion.
3549 vha
->hw
->tgt
.tgt_ops
->free_cmd(cmd
);
3554 /* If hardware_lock held on entry, might drop it, then reaquire */
3555 /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
3556 static int __qlt_send_term_imm_notif(struct scsi_qla_host
*vha
,
3557 struct imm_ntfy_from_isp
*ntfy
)
3559 struct nack_to_isp
*nack
;
3560 struct qla_hw_data
*ha
= vha
->hw
;
3564 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0xe01c,
3565 "Sending TERM ELS CTIO (ha=%p)\n", ha
);
3567 pkt
= (request_t
*)qla2x00_alloc_iocbs(vha
, NULL
);
3569 ql_dbg(ql_dbg_tgt
, vha
, 0xe080,
3570 "qla_target(%d): %s failed: unable to allocate "
3571 "request packet\n", vha
->vp_idx
, __func__
);
3575 pkt
->entry_type
= NOTIFY_ACK_TYPE
;
3576 pkt
->entry_count
= 1;
3577 pkt
->handle
= QLA_TGT_SKIP_HANDLE
;
3579 nack
= (struct nack_to_isp
*)pkt
;
3580 nack
->ox_id
= ntfy
->ox_id
;
3582 nack
->u
.isp24
.nport_handle
= ntfy
->u
.isp24
.nport_handle
;
3583 if (le16_to_cpu(ntfy
->u
.isp24
.status
) == IMM_NTFY_ELS
) {
3584 nack
->u
.isp24
.flags
= ntfy
->u
.isp24
.flags
&
3585 cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB
);
3589 nack
->u
.isp24
.flags
|=
3590 __constant_cpu_to_le16(NOTIFY_ACK_FLAGS_TERMINATE
);
3592 nack
->u
.isp24
.srr_rx_id
= ntfy
->u
.isp24
.srr_rx_id
;
3593 nack
->u
.isp24
.status
= ntfy
->u
.isp24
.status
;
3594 nack
->u
.isp24
.status_subcode
= ntfy
->u
.isp24
.status_subcode
;
3595 nack
->u
.isp24
.fw_handle
= ntfy
->u
.isp24
.fw_handle
;
3596 nack
->u
.isp24
.exchange_address
= ntfy
->u
.isp24
.exchange_address
;
3597 nack
->u
.isp24
.srr_rel_offs
= ntfy
->u
.isp24
.srr_rel_offs
;
3598 nack
->u
.isp24
.srr_ui
= ntfy
->u
.isp24
.srr_ui
;
3599 nack
->u
.isp24
.vp_index
= ntfy
->u
.isp24
.vp_index
;
3601 qla2x00_start_iocbs(vha
, vha
->req
);
3605 static void qlt_send_term_imm_notif(struct scsi_qla_host
*vha
,
3606 struct imm_ntfy_from_isp
*imm
, int ha_locked
)
3610 WARN_ON_ONCE(!ha_locked
);
3611 rc
= __qlt_send_term_imm_notif(vha
, imm
);
3612 pr_debug("rc = %d\n", rc
);
3616 * If hardware_lock held on entry, might drop it, then reaquire
3617 * This function sends the appropriate CTIO to ISP 2xxx or 24xx
3619 static int __qlt_send_term_exchange(struct qla_qpair
*qpair
,
3620 struct qla_tgt_cmd
*cmd
,
3621 struct atio_from_isp
*atio
)
3623 struct scsi_qla_host
*vha
= qpair
->vha
;
3624 struct ctio7_to_24xx
*ctio24
;
3625 struct qla_hw_data
*ha
= vha
->hw
;
3630 ql_dbg(ql_dbg_tgt
, vha
, 0xe009, "Sending TERM EXCH CTIO (ha=%p)\n", ha
);
3635 pkt
= (request_t
*)qla2x00_alloc_iocbs_ready(qpair
, NULL
);
3637 ql_dbg(ql_dbg_tgt
, vha
, 0xe050,
3638 "qla_target(%d): %s failed: unable to allocate "
3639 "request packet\n", vha
->vp_idx
, __func__
);
3644 if (cmd
->state
< QLA_TGT_STATE_PROCESSED
) {
3645 ql_dbg(ql_dbg_tgt
, vha
, 0xe051,
3646 "qla_target(%d): Terminating cmd %p with "
3647 "incorrect state %d\n", vha
->vp_idx
, cmd
,
3653 qpair
->tgt_counters
.num_term_xchg_sent
++;
3654 pkt
->entry_count
= 1;
3655 pkt
->handle
= QLA_TGT_SKIP_HANDLE
| CTIO_COMPLETION_HANDLE_MARK
;
3657 ctio24
= (struct ctio7_to_24xx
*)pkt
;
3658 ctio24
->entry_type
= CTIO_TYPE7
;
3659 ctio24
->nport_handle
= cpu_to_le16(CTIO7_NHANDLE_UNRECOGNIZED
);
3660 ctio24
->timeout
= cpu_to_le16(QLA_TGT_TIMEOUT
);
3661 ctio24
->vp_index
= vha
->vp_idx
;
3662 ctio24
->initiator_id
= be_id_to_le(atio
->u
.isp24
.fcp_hdr
.s_id
);
3663 ctio24
->exchange_addr
= atio
->u
.isp24
.exchange_addr
;
3664 temp
= (atio
->u
.isp24
.attr
<< 9) | CTIO7_FLAGS_STATUS_MODE_1
|
3665 CTIO7_FLAGS_TERMINATE
;
3666 ctio24
->u
.status1
.flags
= cpu_to_le16(temp
);
3667 temp
= be16_to_cpu(atio
->u
.isp24
.fcp_hdr
.ox_id
);
3668 ctio24
->u
.status1
.ox_id
= cpu_to_le16(temp
);
3670 /* Memory Barrier */
3672 if (qpair
->reqq_start_iocbs
)
3673 qpair
->reqq_start_iocbs(qpair
);
3675 qla2x00_start_iocbs(vha
, qpair
->req
);
3679 static void qlt_send_term_exchange(struct qla_qpair
*qpair
,
3680 struct qla_tgt_cmd
*cmd
, struct atio_from_isp
*atio
, int ha_locked
,
3683 struct scsi_qla_host
*vha
;
3684 unsigned long flags
= 0;
3687 /* why use different vha? NPIV */
3694 rc
= __qlt_send_term_exchange(qpair
, cmd
, atio
);
3696 qlt_alloc_qfull_cmd(vha
, atio
, 0, 0);
3699 spin_lock_irqsave(qpair
->qp_lock_ptr
, flags
);
3700 rc
= __qlt_send_term_exchange(qpair
, cmd
, atio
);
3702 qlt_alloc_qfull_cmd(vha
, atio
, 0, 0);
3705 if (cmd
&& !ul_abort
&& !cmd
->aborted
) {
3707 qlt_unmap_sg(vha
, cmd
);
3708 vha
->hw
->tgt
.tgt_ops
->free_cmd(cmd
);
3712 spin_unlock_irqrestore(qpair
->qp_lock_ptr
, flags
);
3717 static void qlt_init_term_exchange(struct scsi_qla_host
*vha
)
3719 struct list_head free_list
;
3720 struct qla_tgt_cmd
*cmd
, *tcmd
;
3722 vha
->hw
->tgt
.leak_exchg_thresh_hold
=
3723 (vha
->hw
->cur_fw_xcb_count
/100) * LEAK_EXCHG_THRESH_HOLD_PERCENT
;
3726 if (!list_empty(&vha
->hw
->tgt
.q_full_list
)) {
3727 INIT_LIST_HEAD(&free_list
);
3728 list_splice_init(&vha
->hw
->tgt
.q_full_list
, &free_list
);
3730 list_for_each_entry_safe(cmd
, tcmd
, &free_list
, cmd_list
) {
3731 list_del(&cmd
->cmd_list
);
3732 /* This cmd was never sent to TCM. There is no need
3733 * to schedule free or call free_cmd
3736 vha
->hw
->tgt
.num_qfull_cmds_alloc
--;
3739 vha
->hw
->tgt
.num_qfull_cmds_dropped
= 0;
3742 static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host
*vha
)
3744 uint32_t total_leaked
;
3746 total_leaked
= vha
->hw
->tgt
.num_qfull_cmds_dropped
;
3748 if (vha
->hw
->tgt
.leak_exchg_thresh_hold
&&
3749 (total_leaked
> vha
->hw
->tgt
.leak_exchg_thresh_hold
)) {
3751 ql_dbg(ql_dbg_tgt
, vha
, 0xe079,
3752 "Chip reset due to exchange starvation: %d/%d.\n",
3753 total_leaked
, vha
->hw
->cur_fw_xcb_count
);
3755 if (IS_P3P_TYPE(vha
->hw
))
3756 set_bit(FCOE_CTX_RESET_NEEDED
, &vha
->dpc_flags
);
3758 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
3759 qla2xxx_wake_dpc(vha
);
3764 int qlt_abort_cmd(struct qla_tgt_cmd
*cmd
)
3766 struct qla_tgt
*tgt
= cmd
->tgt
;
3767 struct scsi_qla_host
*vha
= tgt
->vha
;
3768 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
3769 unsigned long flags
;
3771 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf014,
3772 "qla_target(%d): terminating exchange for aborted cmd=%p "
3773 "(se_cmd=%p, tag=%llu)", vha
->vp_idx
, cmd
, &cmd
->se_cmd
,
3776 spin_lock_irqsave(&cmd
->cmd_lock
, flags
);
3778 spin_unlock_irqrestore(&cmd
->cmd_lock
, flags
);
3780 * It's normal to see 2 calls in this path:
3781 * 1) XFER Rdy completion + CMD_T_ABORT
3782 * 2) TCM TMR - drain_state_list
3784 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf016,
3785 "multiple abort. %p transport_state %x, t_state %x, "
3786 "se_cmd_flags %x\n", cmd
, cmd
->se_cmd
.transport_state
,
3787 cmd
->se_cmd
.t_state
, cmd
->se_cmd
.se_cmd_flags
);
3791 cmd
->trc_flags
|= TRC_ABORT
;
3792 spin_unlock_irqrestore(&cmd
->cmd_lock
, flags
);
3794 qlt_send_term_exchange(cmd
->qpair
, cmd
, &cmd
->atio
, 0, 1);
3797 EXPORT_SYMBOL(qlt_abort_cmd
);
3799 void qlt_free_cmd(struct qla_tgt_cmd
*cmd
)
3801 struct fc_port
*sess
= cmd
->sess
;
3803 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe074,
3804 "%s: se_cmd[%p] ox_id %04x\n",
3805 __func__
, &cmd
->se_cmd
,
3806 be16_to_cpu(cmd
->atio
.u
.isp24
.fcp_hdr
.ox_id
));
3808 BUG_ON(cmd
->cmd_in_wq
);
3811 qlt_unmap_sg(cmd
->vha
, cmd
);
3814 qlt_decr_num_pend_cmds(cmd
->vha
);
3816 BUG_ON(cmd
->sg_mapped
);
3817 cmd
->jiffies_at_free
= get_jiffies_64();
3818 if (unlikely(cmd
->free_sg
))
3821 if (!sess
|| !sess
->se_sess
) {
3825 cmd
->jiffies_at_free
= get_jiffies_64();
3826 cmd
->vha
->hw
->tgt
.tgt_ops
->rel_cmd(cmd
);
3828 EXPORT_SYMBOL(qlt_free_cmd
);
3831 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3833 static int qlt_term_ctio_exchange(struct qla_qpair
*qpair
, void *ctio
,
3834 struct qla_tgt_cmd
*cmd
, uint32_t status
)
3837 struct scsi_qla_host
*vha
= qpair
->vha
;
3839 if (cmd
->se_cmd
.prot_op
)
3840 ql_dbg(ql_dbg_tgt_dif
, vha
, 0xe013,
3841 "Term DIF cmd: lba[0x%llx|%lld] len[0x%x] "
3842 "se_cmd=%p tag[%x] op %#x/%s",
3844 cmd
->num_blks
, &cmd
->se_cmd
,
3845 cmd
->atio
.u
.isp24
.exchange_addr
,
3846 cmd
->se_cmd
.prot_op
,
3847 prot_op_str(cmd
->se_cmd
.prot_op
));
3850 struct ctio7_from_24xx
*c
= (struct ctio7_from_24xx
*)ctio
;
3853 cpu_to_le16(OF_TERM_EXCH
));
3858 qlt_send_term_exchange(qpair
, cmd
, &cmd
->atio
, 1, 0);
3864 /* ha->hardware_lock supposed to be held on entry */
3865 static void *qlt_ctio_to_cmd(struct scsi_qla_host
*vha
,
3866 struct rsp_que
*rsp
, uint32_t handle
, void *ctio
)
3869 struct req_que
*req
;
3870 int qid
= GET_QID(handle
);
3871 uint32_t h
= handle
& ~QLA_TGT_HANDLE_MASK
;
3873 if (unlikely(h
== QLA_TGT_SKIP_HANDLE
))
3876 if (qid
== rsp
->req
->id
) {
3878 } else if (vha
->hw
->req_q_map
[qid
]) {
3879 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0x1000a,
3880 "qla_target(%d): CTIO completion with different QID %d handle %x\n",
3881 vha
->vp_idx
, rsp
->id
, handle
);
3882 req
= vha
->hw
->req_q_map
[qid
];
3887 h
&= QLA_CMD_HANDLE_MASK
;
3889 if (h
!= QLA_TGT_NULL_HANDLE
) {
3890 if (unlikely(h
>= req
->num_outstanding_cmds
)) {
3891 ql_dbg(ql_dbg_tgt
, vha
, 0xe052,
3892 "qla_target(%d): Wrong handle %x received\n",
3893 vha
->vp_idx
, handle
);
3897 cmd
= req
->outstanding_cmds
[h
];
3898 if (unlikely(cmd
== NULL
)) {
3899 ql_dbg(ql_dbg_async
, vha
, 0xe053,
3900 "qla_target(%d): Suspicious: unable to find the command with handle %x req->id %d rsp->id %d\n",
3901 vha
->vp_idx
, handle
, req
->id
, rsp
->id
);
3904 req
->outstanding_cmds
[h
] = NULL
;
3905 } else if (ctio
!= NULL
) {
3906 /* We can't get loop ID from CTIO7 */
3907 ql_dbg(ql_dbg_tgt
, vha
, 0xe054,
3908 "qla_target(%d): Wrong CTIO received: QLA24xx doesn't "
3909 "support NULL handles\n", vha
->vp_idx
);
3917 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3919 static void qlt_do_ctio_completion(struct scsi_qla_host
*vha
,
3920 struct rsp_que
*rsp
, uint32_t handle
, uint32_t status
, void *ctio
)
3922 struct qla_hw_data
*ha
= vha
->hw
;
3923 struct se_cmd
*se_cmd
;
3924 struct qla_tgt_cmd
*cmd
;
3925 struct qla_qpair
*qpair
= rsp
->qpair
;
3927 if (handle
& CTIO_INTERMEDIATE_HANDLE_MARK
) {
3928 /* That could happen only in case of an error/reset/abort */
3929 if (status
!= CTIO_SUCCESS
) {
3930 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf01d,
3931 "Intermediate CTIO received"
3932 " (status %x)\n", status
);
3937 cmd
= qlt_ctio_to_cmd(vha
, rsp
, handle
, ctio
);
3941 se_cmd
= &cmd
->se_cmd
;
3942 cmd
->cmd_sent_to_fw
= 0;
3944 qlt_unmap_sg(vha
, cmd
);
3946 if (unlikely(status
!= CTIO_SUCCESS
)) {
3947 switch (status
& 0xFFFF) {
3948 case CTIO_INVALID_RX_ID
:
3949 if (printk_ratelimit())
3950 dev_info(&vha
->hw
->pdev
->dev
,
3951 "qla_target(%d): CTIO with INVALID_RX_ID ATIO attr %x CTIO Flags %x|%x\n",
3952 vha
->vp_idx
, cmd
->atio
.u
.isp24
.attr
,
3953 ((cmd
->ctio_flags
>> 9) & 0xf),
3957 case CTIO_LIP_RESET
:
3958 case CTIO_TARGET_RESET
:
3960 /* driver request abort via Terminate exchange */
3963 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf058,
3964 "qla_target(%d): CTIO with "
3965 "status %#x received, state %x, se_cmd %p, "
3966 "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, "
3967 "TIMEOUT=b, INVALID_RX_ID=8)\n", vha
->vp_idx
,
3968 status
, cmd
->state
, se_cmd
);
3971 case CTIO_PORT_LOGGED_OUT
:
3972 case CTIO_PORT_UNAVAILABLE
:
3975 (status
& 0xFFFF) == CTIO_PORT_LOGGED_OUT
;
3977 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf059,
3978 "qla_target(%d): CTIO with %s status %x "
3979 "received (state %x, se_cmd %p)\n", vha
->vp_idx
,
3980 logged_out
? "PORT LOGGED OUT" : "PORT UNAVAILABLE",
3981 status
, cmd
->state
, se_cmd
);
3983 if (logged_out
&& cmd
->sess
) {
3985 * Session is already logged out, but we need
3986 * to notify initiator, who's not aware of this
3988 cmd
->sess
->send_els_logo
= 1;
3989 ql_dbg(ql_dbg_disc
, vha
, 0x20f8,
3990 "%s %d %8phC post del sess\n",
3991 __func__
, __LINE__
, cmd
->sess
->port_name
);
3993 qlt_schedule_sess_for_deletion(cmd
->sess
);
3997 case CTIO_DIF_ERROR
: {
3998 struct ctio_crc_from_fw
*crc
=
3999 (struct ctio_crc_from_fw
*)ctio
;
4000 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf073,
4001 "qla_target(%d): CTIO with DIF_ERROR status %x "
4002 "received (state %x, ulp_cmd %p) actual_dif[0x%llx] "
4003 "expect_dif[0x%llx]\n",
4004 vha
->vp_idx
, status
, cmd
->state
, se_cmd
,
4005 *((u64
*)&crc
->actual_dif
[0]),
4006 *((u64
*)&crc
->expected_dif
[0]));
4008 qlt_handle_dif_error(qpair
, cmd
, ctio
);
4012 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf05b,
4013 "qla_target(%d): CTIO with error status 0x%x received (state %x, se_cmd %p\n",
4014 vha
->vp_idx
, status
, cmd
->state
, se_cmd
);
4019 /* "cmd->aborted" means
4020 * cmd is already aborted/terminated, we don't
4021 * need to terminate again. The exchange is already
4022 * cleaned up/freed at FW level. Just cleanup at driver
4025 if ((cmd
->state
!= QLA_TGT_STATE_NEED_DATA
) &&
4027 cmd
->trc_flags
|= TRC_CTIO_ERR
;
4028 if (qlt_term_ctio_exchange(qpair
, ctio
, cmd
, status
))
4033 if (cmd
->state
== QLA_TGT_STATE_PROCESSED
) {
4034 cmd
->trc_flags
|= TRC_CTIO_DONE
;
4035 } else if (cmd
->state
== QLA_TGT_STATE_NEED_DATA
) {
4036 cmd
->state
= QLA_TGT_STATE_DATA_IN
;
4038 if (status
== CTIO_SUCCESS
)
4039 cmd
->write_data_transferred
= 1;
4041 ha
->tgt
.tgt_ops
->handle_data(cmd
);
4043 } else if (cmd
->aborted
) {
4044 cmd
->trc_flags
|= TRC_CTIO_ABORTED
;
4045 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf01e,
4046 "Aborted command %p (tag %lld) finished\n", cmd
, se_cmd
->tag
);
4048 cmd
->trc_flags
|= TRC_CTIO_STRANGE
;
4049 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf05c,
4050 "qla_target(%d): A command in state (%d) should "
4051 "not return a CTIO complete\n", vha
->vp_idx
, cmd
->state
);
4054 if (unlikely(status
!= CTIO_SUCCESS
) &&
4056 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf01f, "Finishing failed CTIO\n");
4060 ha
->tgt
.tgt_ops
->free_cmd(cmd
);
4063 static inline int qlt_get_fcp_task_attr(struct scsi_qla_host
*vha
,
4068 switch (task_codes
) {
4069 case ATIO_SIMPLE_QUEUE
:
4070 fcp_task_attr
= TCM_SIMPLE_TAG
;
4072 case ATIO_HEAD_OF_QUEUE
:
4073 fcp_task_attr
= TCM_HEAD_TAG
;
4075 case ATIO_ORDERED_QUEUE
:
4076 fcp_task_attr
= TCM_ORDERED_TAG
;
4078 case ATIO_ACA_QUEUE
:
4079 fcp_task_attr
= TCM_ACA_TAG
;
4082 fcp_task_attr
= TCM_SIMPLE_TAG
;
4085 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf05d,
4086 "qla_target: unknown task code %x, use ORDERED instead\n",
4088 fcp_task_attr
= TCM_ORDERED_TAG
;
4092 return fcp_task_attr
;
4096 * Process context for I/O path into tcm_qla2xxx code
4098 static void __qlt_do_work(struct qla_tgt_cmd
*cmd
)
4100 scsi_qla_host_t
*vha
= cmd
->vha
;
4101 struct qla_hw_data
*ha
= vha
->hw
;
4102 struct fc_port
*sess
= cmd
->sess
;
4103 struct atio_from_isp
*atio
= &cmd
->atio
;
4105 unsigned long flags
;
4106 uint32_t data_length
;
4107 int ret
, fcp_task_attr
, data_dir
, bidi
= 0;
4108 struct qla_qpair
*qpair
= cmd
->qpair
;
4111 cmd
->trc_flags
|= TRC_DO_WORK
;
4114 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf082,
4115 "cmd with tag %u is aborted\n",
4116 cmd
->atio
.u
.isp24
.exchange_addr
);
4120 spin_lock_init(&cmd
->cmd_lock
);
4121 cdb
= &atio
->u
.isp24
.fcp_cmnd
.cdb
[0];
4122 cmd
->se_cmd
.tag
= le32_to_cpu(atio
->u
.isp24
.exchange_addr
);
4124 if (atio
->u
.isp24
.fcp_cmnd
.rddata
&&
4125 atio
->u
.isp24
.fcp_cmnd
.wrdata
) {
4127 data_dir
= DMA_TO_DEVICE
;
4128 } else if (atio
->u
.isp24
.fcp_cmnd
.rddata
)
4129 data_dir
= DMA_FROM_DEVICE
;
4130 else if (atio
->u
.isp24
.fcp_cmnd
.wrdata
)
4131 data_dir
= DMA_TO_DEVICE
;
4133 data_dir
= DMA_NONE
;
4135 fcp_task_attr
= qlt_get_fcp_task_attr(vha
,
4136 atio
->u
.isp24
.fcp_cmnd
.task_attr
);
4137 data_length
= get_datalen_for_atio(atio
);
4139 ret
= ha
->tgt
.tgt_ops
->handle_cmd(vha
, cmd
, cdb
, data_length
,
4140 fcp_task_attr
, data_dir
, bidi
);
4144 * Drop extra session reference from qlt_handle_cmd_for_atio().
4146 ha
->tgt
.tgt_ops
->put_sess(sess
);
4150 ql_dbg(ql_dbg_io
, vha
, 0x3060, "Terminating work cmd %p", cmd
);
4152 * cmd has not sent to target yet, so pass NULL as the second
4153 * argument to qlt_send_term_exchange() and free the memory here.
4155 cmd
->trc_flags
|= TRC_DO_WORK_ERR
;
4156 spin_lock_irqsave(qpair
->qp_lock_ptr
, flags
);
4157 qlt_send_term_exchange(qpair
, NULL
, &cmd
->atio
, 1, 0);
4159 qlt_decr_num_pend_cmds(vha
);
4160 cmd
->vha
->hw
->tgt
.tgt_ops
->rel_cmd(cmd
);
4161 spin_unlock_irqrestore(qpair
->qp_lock_ptr
, flags
);
4163 ha
->tgt
.tgt_ops
->put_sess(sess
);
4166 static void qlt_do_work(struct work_struct
*work
)
4168 struct qla_tgt_cmd
*cmd
= container_of(work
, struct qla_tgt_cmd
, work
);
4169 scsi_qla_host_t
*vha
= cmd
->vha
;
4170 unsigned long flags
;
4172 spin_lock_irqsave(&vha
->cmd_list_lock
, flags
);
4173 list_del(&cmd
->cmd_list
);
4174 spin_unlock_irqrestore(&vha
->cmd_list_lock
, flags
);
4179 void qlt_clr_qp_table(struct scsi_qla_host
*vha
)
4181 unsigned long flags
;
4182 struct qla_hw_data
*ha
= vha
->hw
;
4183 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
4187 ql_log(ql_log_info
, vha
, 0x706c,
4188 "User update Number of Active Qpairs %d\n",
4189 ha
->tgt
.num_act_qpairs
);
4191 spin_lock_irqsave(&ha
->tgt
.atio_lock
, flags
);
4193 btree_for_each_safe64(&tgt
->lun_qpair_map
, key
, node
)
4194 btree_remove64(&tgt
->lun_qpair_map
, key
);
4196 ha
->base_qpair
->lun_cnt
= 0;
4197 for (key
= 0; key
< ha
->max_qpairs
; key
++)
4198 if (ha
->queue_pair_map
[key
])
4199 ha
->queue_pair_map
[key
]->lun_cnt
= 0;
4201 spin_unlock_irqrestore(&ha
->tgt
.atio_lock
, flags
);
4204 static void qlt_assign_qpair(struct scsi_qla_host
*vha
,
4205 struct qla_tgt_cmd
*cmd
)
4207 struct qla_qpair
*qpair
, *qp
;
4208 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
4209 struct qla_qpair_hint
*h
;
4211 if (vha
->flags
.qpairs_available
) {
4212 h
= btree_lookup64(&tgt
->lun_qpair_map
, cmd
->unpacked_lun
);
4214 /* spread lun to qpair ratio evently */
4216 struct scsi_qla_host
*base_vha
=
4217 pci_get_drvdata(vha
->hw
->pdev
);
4219 qpair
= vha
->hw
->base_qpair
;
4220 if (qpair
->lun_cnt
== 0) {
4222 h
= qla_qpair_to_hint(tgt
, qpair
);
4224 rc
= btree_insert64(&tgt
->lun_qpair_map
,
4225 cmd
->unpacked_lun
, h
, GFP_ATOMIC
);
4228 ql_log(ql_log_info
, vha
, 0xd037,
4229 "Unable to insert lun %llx into lun_qpair_map\n",
4234 lcnt
= qpair
->lun_cnt
;
4238 list_for_each_entry(qp
, &base_vha
->qp_list
,
4240 if (qp
->lun_cnt
== 0) {
4242 h
= qla_qpair_to_hint(tgt
, qp
);
4244 rc
= btree_insert64(&tgt
->lun_qpair_map
,
4245 cmd
->unpacked_lun
, h
, GFP_ATOMIC
);
4248 ql_log(ql_log_info
, vha
, 0xd038,
4249 "Unable to insert lun %llx into lun_qpair_map\n",
4255 if (qp
->lun_cnt
< lcnt
) {
4264 h
= qla_qpair_to_hint(tgt
, qpair
);
4266 rc
= btree_insert64(&tgt
->lun_qpair_map
,
4267 cmd
->unpacked_lun
, h
, GFP_ATOMIC
);
4270 ql_log(ql_log_info
, vha
, 0xd039,
4271 "Unable to insert lun %llx into lun_qpair_map\n",
4276 h
= &tgt
->qphints
[0];
4279 cmd
->qpair
= h
->qpair
;
4280 cmd
->se_cmd
.cpuid
= h
->cpuid
;
4283 static struct qla_tgt_cmd
*qlt_get_tag(scsi_qla_host_t
*vha
,
4284 struct fc_port
*sess
,
4285 struct atio_from_isp
*atio
)
4287 struct qla_tgt_cmd
*cmd
;
4289 cmd
= vha
->hw
->tgt
.tgt_ops
->get_cmd(sess
);
4293 cmd
->cmd_type
= TYPE_TGT_CMD
;
4294 memcpy(&cmd
->atio
, atio
, sizeof(*atio
));
4295 INIT_LIST_HEAD(&cmd
->sess_cmd_list
);
4296 cmd
->state
= QLA_TGT_STATE_NEW
;
4297 cmd
->tgt
= vha
->vha_tgt
.qla_tgt
;
4298 qlt_incr_num_pend_cmds(vha
);
4301 cmd
->loop_id
= sess
->loop_id
;
4302 cmd
->conf_compl_supported
= sess
->conf_compl_supported
;
4305 cmd
->jiffies_at_alloc
= get_jiffies_64();
4307 cmd
->unpacked_lun
= scsilun_to_int(
4308 (struct scsi_lun
*)&atio
->u
.isp24
.fcp_cmnd
.lun
);
4309 qlt_assign_qpair(vha
, cmd
);
4310 cmd
->reset_count
= vha
->hw
->base_qpair
->chip_reset
;
4311 cmd
->vp_idx
= vha
->vp_idx
;
4316 /* ha->hardware_lock supposed to be held on entry */
4317 static int qlt_handle_cmd_for_atio(struct scsi_qla_host
*vha
,
4318 struct atio_from_isp
*atio
)
4320 struct qla_hw_data
*ha
= vha
->hw
;
4321 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
4322 struct fc_port
*sess
;
4323 struct qla_tgt_cmd
*cmd
;
4324 unsigned long flags
;
4327 if (unlikely(tgt
->tgt_stop
)) {
4328 ql_dbg(ql_dbg_io
, vha
, 0x3061,
4329 "New command while device %p is shutting down\n", tgt
);
4333 id
= be_to_port_id(atio
->u
.isp24
.fcp_hdr
.s_id
);
4334 if (IS_SW_RESV_ADDR(id
))
4337 sess
= ha
->tgt
.tgt_ops
->find_sess_by_s_id(vha
, atio
->u
.isp24
.fcp_hdr
.s_id
);
4338 if (unlikely(!sess
))
4341 /* Another WWN used to have our s_id. Our PLOGI scheduled its
4342 * session deletion, but it's still in sess_del_work wq */
4343 if (sess
->deleted
) {
4344 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf002,
4345 "New command while old session %p is being deleted\n",
4351 * Do kref_get() before returning + dropping qla_hw_data->hardware_lock.
4353 if (!kref_get_unless_zero(&sess
->sess_kref
)) {
4354 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf004,
4355 "%s: kref_get fail, %8phC oxid %x \n",
4356 __func__
, sess
->port_name
,
4357 be16_to_cpu(atio
->u
.isp24
.fcp_hdr
.ox_id
));
4361 cmd
= qlt_get_tag(vha
, sess
, atio
);
4363 ql_dbg(ql_dbg_io
, vha
, 0x3062,
4364 "qla_target(%d): Allocation of cmd failed\n", vha
->vp_idx
);
4365 ha
->tgt
.tgt_ops
->put_sess(sess
);
4370 cmd
->trc_flags
|= TRC_NEW_CMD
;
4372 spin_lock_irqsave(&vha
->cmd_list_lock
, flags
);
4373 list_add_tail(&cmd
->cmd_list
, &vha
->qla_cmd_list
);
4374 spin_unlock_irqrestore(&vha
->cmd_list_lock
, flags
);
4376 INIT_WORK(&cmd
->work
, qlt_do_work
);
4377 if (vha
->flags
.qpairs_available
) {
4378 queue_work_on(cmd
->se_cmd
.cpuid
, qla_tgt_wq
, &cmd
->work
);
4379 } else if (ha
->msix_count
) {
4380 if (cmd
->atio
.u
.isp24
.fcp_cmnd
.rddata
)
4381 queue_work_on(smp_processor_id(), qla_tgt_wq
,
4384 queue_work_on(cmd
->se_cmd
.cpuid
, qla_tgt_wq
,
4387 queue_work(qla_tgt_wq
, &cmd
->work
);
4393 /* ha->hardware_lock supposed to be held on entry */
4394 static int qlt_issue_task_mgmt(struct fc_port
*sess
, u64 lun
,
4395 int fn
, void *iocb
, int flags
)
4397 struct scsi_qla_host
*vha
= sess
->vha
;
4398 struct qla_hw_data
*ha
= vha
->hw
;
4399 struct qla_tgt_mgmt_cmd
*mcmd
;
4400 struct atio_from_isp
*a
= (struct atio_from_isp
*)iocb
;
4401 struct qla_qpair_hint
*h
= &vha
->vha_tgt
.qla_tgt
->qphints
[0];
4403 mcmd
= mempool_alloc(qla_tgt_mgmt_cmd_mempool
, GFP_ATOMIC
);
4405 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x10009,
4406 "qla_target(%d): Allocation of management "
4407 "command failed, some commands and their data could "
4408 "leak\n", vha
->vp_idx
);
4411 memset(mcmd
, 0, sizeof(*mcmd
));
4415 memcpy(&mcmd
->orig_iocb
.imm_ntfy
, iocb
,
4416 sizeof(mcmd
->orig_iocb
.imm_ntfy
));
4418 mcmd
->tmr_func
= fn
;
4419 mcmd
->flags
= flags
;
4420 mcmd
->reset_count
= ha
->base_qpair
->chip_reset
;
4421 mcmd
->qpair
= h
->qpair
;
4423 mcmd
->se_cmd
.cpuid
= h
->cpuid
;
4424 mcmd
->unpacked_lun
= lun
;
4427 case QLA_TGT_LUN_RESET
:
4428 case QLA_TGT_CLEAR_TS
:
4429 case QLA_TGT_ABORT_TS
:
4430 abort_cmds_for_lun(vha
, lun
, a
->u
.isp24
.fcp_hdr
.s_id
);
4432 case QLA_TGT_CLEAR_ACA
:
4433 h
= qlt_find_qphint(vha
, mcmd
->unpacked_lun
);
4434 mcmd
->qpair
= h
->qpair
;
4435 mcmd
->se_cmd
.cpuid
= h
->cpuid
;
4438 case QLA_TGT_TARGET_RESET
:
4439 case QLA_TGT_NEXUS_LOSS_SESS
:
4440 case QLA_TGT_NEXUS_LOSS
:
4441 case QLA_TGT_ABORT_ALL
:
4447 INIT_WORK(&mcmd
->work
, qlt_do_tmr_work
);
4448 queue_work_on(mcmd
->se_cmd
.cpuid
, qla_tgt_wq
,
4454 /* ha->hardware_lock supposed to be held on entry */
4455 static int qlt_handle_task_mgmt(struct scsi_qla_host
*vha
, void *iocb
)
4457 struct atio_from_isp
*a
= (struct atio_from_isp
*)iocb
;
4458 struct qla_hw_data
*ha
= vha
->hw
;
4459 struct fc_port
*sess
;
4462 unsigned long flags
;
4464 fn
= a
->u
.isp24
.fcp_cmnd
.task_mgmt_flags
;
4466 spin_lock_irqsave(&ha
->tgt
.sess_lock
, flags
);
4467 sess
= ha
->tgt
.tgt_ops
->find_sess_by_s_id(vha
,
4468 a
->u
.isp24
.fcp_hdr
.s_id
);
4469 spin_unlock_irqrestore(&ha
->tgt
.sess_lock
, flags
);
4472 scsilun_to_int((struct scsi_lun
*)&a
->u
.isp24
.fcp_cmnd
.lun
);
4474 if (sess
== NULL
|| sess
->deleted
)
4477 return qlt_issue_task_mgmt(sess
, unpacked_lun
, fn
, iocb
, 0);
4480 /* ha->hardware_lock supposed to be held on entry */
4481 static int __qlt_abort_task(struct scsi_qla_host
*vha
,
4482 struct imm_ntfy_from_isp
*iocb
, struct fc_port
*sess
)
4484 struct atio_from_isp
*a
= (struct atio_from_isp
*)iocb
;
4485 struct qla_hw_data
*ha
= vha
->hw
;
4486 struct qla_tgt_mgmt_cmd
*mcmd
;
4490 mcmd
= mempool_alloc(qla_tgt_mgmt_cmd_mempool
, GFP_ATOMIC
);
4492 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf05f,
4493 "qla_target(%d): %s: Allocation of ABORT cmd failed\n",
4494 vha
->vp_idx
, __func__
);
4497 memset(mcmd
, 0, sizeof(*mcmd
));
4500 memcpy(&mcmd
->orig_iocb
.imm_ntfy
, iocb
,
4501 sizeof(mcmd
->orig_iocb
.imm_ntfy
));
4504 scsilun_to_int((struct scsi_lun
*)&a
->u
.isp24
.fcp_cmnd
.lun
);
4505 mcmd
->reset_count
= ha
->base_qpair
->chip_reset
;
4506 mcmd
->tmr_func
= QLA_TGT_2G_ABORT_TASK
;
4507 mcmd
->qpair
= ha
->base_qpair
;
4509 rc
= ha
->tgt
.tgt_ops
->handle_tmr(mcmd
, unpacked_lun
, mcmd
->tmr_func
,
4510 le16_to_cpu(iocb
->u
.isp2x
.seq_id
));
4512 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf060,
4513 "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n",
4515 mempool_free(mcmd
, qla_tgt_mgmt_cmd_mempool
);
4522 /* ha->hardware_lock supposed to be held on entry */
4523 static int qlt_abort_task(struct scsi_qla_host
*vha
,
4524 struct imm_ntfy_from_isp
*iocb
)
4526 struct qla_hw_data
*ha
= vha
->hw
;
4527 struct fc_port
*sess
;
4529 unsigned long flags
;
4531 loop_id
= GET_TARGET_ID(ha
, (struct atio_from_isp
*)iocb
);
4533 spin_lock_irqsave(&ha
->tgt
.sess_lock
, flags
);
4534 sess
= ha
->tgt
.tgt_ops
->find_sess_by_loop_id(vha
, loop_id
);
4535 spin_unlock_irqrestore(&ha
->tgt
.sess_lock
, flags
);
4538 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf025,
4539 "qla_target(%d): task abort for unexisting "
4540 "session\n", vha
->vp_idx
);
4541 return qlt_sched_sess_work(vha
->vha_tgt
.qla_tgt
,
4542 QLA_TGT_SESS_WORK_ABORT
, iocb
, sizeof(*iocb
));
4545 return __qlt_abort_task(vha
, iocb
, sess
);
4548 void qlt_logo_completion_handler(fc_port_t
*fcport
, int rc
)
4550 if (rc
!= MBS_COMMAND_COMPLETE
) {
4551 ql_dbg(ql_dbg_tgt_mgt
, fcport
->vha
, 0xf093,
4552 "%s: se_sess %p / sess %p from"
4553 " port %8phC loop_id %#04x s_id %02x:%02x:%02x"
4554 " LOGO failed: %#x\n",
4558 fcport
->port_name
, fcport
->loop_id
,
4559 fcport
->d_id
.b
.domain
, fcport
->d_id
.b
.area
,
4560 fcport
->d_id
.b
.al_pa
, rc
);
4563 fcport
->logout_completed
= 1;
4567 * ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list)
4569 * Schedules sessions with matching port_id/loop_id but different wwn for
4570 * deletion. Returns existing session with matching wwn if present.
4574 qlt_find_sess_invalidate_other(scsi_qla_host_t
*vha
, uint64_t wwn
,
4575 port_id_t port_id
, uint16_t loop_id
, struct fc_port
**conflict_sess
)
4577 struct fc_port
*sess
= NULL
, *other_sess
;
4580 *conflict_sess
= NULL
;
4582 list_for_each_entry(other_sess
, &vha
->vp_fcports
, list
) {
4584 other_wwn
= wwn_to_u64(other_sess
->port_name
);
4586 if (wwn
== other_wwn
) {
4592 /* find other sess with nport_id collision */
4593 if (port_id
.b24
== other_sess
->d_id
.b24
) {
4594 if (loop_id
!= other_sess
->loop_id
) {
4595 ql_dbg(ql_dbg_disc
, vha
, 0x1000c,
4596 "Invalidating sess %p loop_id %d wwn %llx.\n",
4597 other_sess
, other_sess
->loop_id
, other_wwn
);
4600 * logout_on_delete is set by default, but another
4601 * session that has the same s_id/loop_id combo
4602 * might have cleared it when requested this session
4603 * deletion, so don't touch it
4605 qlt_schedule_sess_for_deletion(other_sess
);
4608 * Another wwn used to have our s_id/loop_id
4609 * kill the session, but don't free the loop_id
4611 ql_dbg(ql_dbg_disc
, vha
, 0xf01b,
4612 "Invalidating sess %p loop_id %d wwn %llx.\n",
4613 other_sess
, other_sess
->loop_id
, other_wwn
);
4615 other_sess
->keep_nport_handle
= 1;
4616 if (other_sess
->disc_state
!= DSC_DELETED
)
4617 *conflict_sess
= other_sess
;
4618 qlt_schedule_sess_for_deletion(other_sess
);
4623 /* find other sess with nport handle collision */
4624 if ((loop_id
== other_sess
->loop_id
) &&
4625 (loop_id
!= FC_NO_LOOP_ID
)) {
4626 ql_dbg(ql_dbg_disc
, vha
, 0x1000d,
4627 "Invalidating sess %p loop_id %d wwn %llx.\n",
4628 other_sess
, other_sess
->loop_id
, other_wwn
);
4630 /* Same loop_id but different s_id
4631 * Ok to kill and logout */
4632 qlt_schedule_sess_for_deletion(other_sess
);
4639 /* Abort any commands for this s_id waiting on qla_tgt_wq workqueue */
4640 static int abort_cmds_for_s_id(struct scsi_qla_host
*vha
, port_id_t
*s_id
)
4642 struct qla_tgt_sess_op
*op
;
4643 struct qla_tgt_cmd
*cmd
;
4646 unsigned long flags
;
4648 key
= (((u32
)s_id
->b
.domain
<< 16) |
4649 ((u32
)s_id
->b
.area
<< 8) |
4650 ((u32
)s_id
->b
.al_pa
));
4652 spin_lock_irqsave(&vha
->cmd_list_lock
, flags
);
4653 list_for_each_entry(op
, &vha
->qla_sess_op_cmd_list
, cmd_list
) {
4654 uint32_t op_key
= sid_to_key(op
->atio
.u
.isp24
.fcp_hdr
.s_id
);
4656 if (op_key
== key
) {
4662 list_for_each_entry(op
, &vha
->unknown_atio_list
, cmd_list
) {
4663 uint32_t op_key
= sid_to_key(op
->atio
.u
.isp24
.fcp_hdr
.s_id
);
4665 if (op_key
== key
) {
4671 list_for_each_entry(cmd
, &vha
->qla_cmd_list
, cmd_list
) {
4672 uint32_t cmd_key
= sid_to_key(cmd
->atio
.u
.isp24
.fcp_hdr
.s_id
);
4674 if (cmd_key
== key
) {
4679 spin_unlock_irqrestore(&vha
->cmd_list_lock
, flags
);
4684 static int qlt_handle_login(struct scsi_qla_host
*vha
,
4685 struct imm_ntfy_from_isp
*iocb
)
4687 struct fc_port
*sess
= NULL
, *conflict_sess
= NULL
;
4690 uint16_t loop_id
, wd3_lo
;
4692 struct qlt_plogi_ack_t
*pla
;
4693 unsigned long flags
;
4695 lockdep_assert_held(&vha
->hw
->hardware_lock
);
4697 wwn
= wwn_to_u64(iocb
->u
.isp24
.port_name
);
4699 port_id
.b
.domain
= iocb
->u
.isp24
.port_id
[2];
4700 port_id
.b
.area
= iocb
->u
.isp24
.port_id
[1];
4701 port_id
.b
.al_pa
= iocb
->u
.isp24
.port_id
[0];
4702 port_id
.b
.rsvd_1
= 0;
4704 loop_id
= le16_to_cpu(iocb
->u
.isp24
.nport_handle
);
4706 /* Mark all stale commands sitting in qla_tgt_wq for deletion */
4707 abort_cmds_for_s_id(vha
, &port_id
);
4710 spin_lock_irqsave(&vha
->hw
->tgt
.sess_lock
, flags
);
4711 sess
= qlt_find_sess_invalidate_other(vha
, wwn
,
4712 port_id
, loop_id
, &conflict_sess
);
4713 spin_unlock_irqrestore(&vha
->hw
->tgt
.sess_lock
, flags
);
4715 ql_dbg(ql_dbg_disc
, vha
, 0xffff,
4716 "%s %d Term INOT due to WWN=0 lid=%d, NportID %06X ",
4717 __func__
, __LINE__
, loop_id
, port_id
.b24
);
4718 qlt_send_term_imm_notif(vha
, iocb
, 1);
4722 if (IS_SW_RESV_ADDR(port_id
)) {
4727 pla
= qlt_plogi_ack_find_add(vha
, &port_id
, iocb
);
4729 ql_dbg(ql_dbg_disc
+ ql_dbg_verbose
, vha
, 0xffff,
4730 "%s %d %8phC Term INOT due to mem alloc fail",
4732 iocb
->u
.isp24
.port_name
);
4733 qlt_send_term_imm_notif(vha
, iocb
, 1);
4737 if (conflict_sess
) {
4738 conflict_sess
->login_gen
++;
4739 qlt_plogi_ack_link(vha
, pla
, conflict_sess
,
4740 QLT_PLOGI_LINK_CONFLICT
);
4745 ql_dbg(ql_dbg_disc
, vha
, 0xffff,
4746 "%s %d %8phC post new sess\n",
4747 __func__
, __LINE__
, iocb
->u
.isp24
.port_name
);
4748 if (iocb
->u
.isp24
.status_subcode
== ELS_PLOGI
)
4749 qla24xx_post_newsess_work(vha
, &port_id
,
4750 iocb
->u
.isp24
.port_name
,
4751 iocb
->u
.isp24
.u
.plogi
.node_name
,
4754 qla24xx_post_newsess_work(vha
, &port_id
,
4755 iocb
->u
.isp24
.port_name
, NULL
,
4761 if (sess
->disc_state
== DSC_UPD_FCPORT
) {
4765 * Remote port registration is still going on from
4766 * previous login. Allow it to finish before we
4767 * accept the new login.
4769 sess
->next_disc_state
= DSC_DELETE_PEND
;
4770 sec
= jiffies_to_msecs(jiffies
-
4771 sess
->jiffies_at_registration
) / 1000;
4772 if (sess
->sec_since_registration
< sec
&& sec
&&
4774 sess
->sec_since_registration
= sec
;
4775 ql_dbg(ql_dbg_disc
, vha
, 0xffff,
4776 "%s %8phC - Slow Rport registration (%d Sec)\n",
4777 __func__
, sess
->port_name
, sec
);
4780 if (!conflict_sess
) {
4781 list_del(&pla
->list
);
4782 kmem_cache_free(qla_tgt_plogi_cachep
, pla
);
4785 qlt_send_term_imm_notif(vha
, iocb
, 1);
4789 qlt_plogi_ack_link(vha
, pla
, sess
, QLT_PLOGI_LINK_SAME_WWN
);
4790 sess
->d_id
= port_id
;
4793 if (iocb
->u
.isp24
.status_subcode
== ELS_PRLI
) {
4794 sess
->fw_login_state
= DSC_LS_PRLI_PEND
;
4796 sess
->loop_id
= loop_id
;
4797 sess
->d_id
= port_id
;
4798 sess
->fw_login_state
= DSC_LS_PRLI_PEND
;
4799 wd3_lo
= le16_to_cpu(iocb
->u
.isp24
.u
.prli
.wd3_lo
);
4802 sess
->conf_compl_supported
= 1;
4804 if ((wd3_lo
& BIT_4
) == 0)
4805 sess
->port_type
= FCT_INITIATOR
;
4807 sess
->port_type
= FCT_TARGET
;
4810 sess
->fw_login_state
= DSC_LS_PLOGI_PEND
;
4813 ql_dbg(ql_dbg_disc
, vha
, 0x20f9,
4814 "%s %d %8phC DS %d\n",
4815 __func__
, __LINE__
, sess
->port_name
, sess
->disc_state
);
4817 switch (sess
->disc_state
) {
4819 case DSC_LOGIN_PEND
:
4820 qlt_plogi_ack_unref(vha
, pla
);
4825 * Under normal circumstances we want to release nport handle
4826 * during LOGO process to avoid nport handle leaks inside FW.
4827 * The exception is when LOGO is done while another PLOGI with
4828 * the same nport handle is waiting as might be the case here.
4829 * Note: there is always a possibily of a race where session
4830 * deletion has already started for other reasons (e.g. ACL
4831 * removal) and now PLOGI arrives:
4832 * 1. if PLOGI arrived in FW after nport handle has been freed,
4833 * FW must have assigned this PLOGI a new/same handle and we
4834 * can proceed ACK'ing it as usual when session deletion
4836 * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT
4837 * bit reached it, the handle has now been released. We'll
4838 * get an error when we ACK this PLOGI. Nothing will be sent
4839 * back to initiator. Initiator should eventually retry
4840 * PLOGI and situation will correct itself.
4842 sess
->keep_nport_handle
= ((sess
->loop_id
== loop_id
) &&
4843 (sess
->d_id
.b24
== port_id
.b24
));
4845 ql_dbg(ql_dbg_disc
, vha
, 0x20f9,
4846 "%s %d %8phC post del sess\n",
4847 __func__
, __LINE__
, sess
->port_name
);
4850 qlt_schedule_sess_for_deletion(sess
);
4858 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
4860 static int qlt_24xx_handle_els(struct scsi_qla_host
*vha
,
4861 struct imm_ntfy_from_isp
*iocb
)
4863 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
4864 struct qla_hw_data
*ha
= vha
->hw
;
4865 struct fc_port
*sess
= NULL
, *conflict_sess
= NULL
;
4871 unsigned long flags
;
4873 lockdep_assert_held(&ha
->hardware_lock
);
4875 wwn
= wwn_to_u64(iocb
->u
.isp24
.port_name
);
4877 port_id
.b
.domain
= iocb
->u
.isp24
.port_id
[2];
4878 port_id
.b
.area
= iocb
->u
.isp24
.port_id
[1];
4879 port_id
.b
.al_pa
= iocb
->u
.isp24
.port_id
[0];
4880 port_id
.b
.rsvd_1
= 0;
4882 loop_id
= le16_to_cpu(iocb
->u
.isp24
.nport_handle
);
4884 ql_dbg(ql_dbg_disc
, vha
, 0xf026,
4885 "qla_target(%d): Port ID: %02x:%02x:%02x ELS opcode: 0x%02x lid %d %8phC\n",
4886 vha
->vp_idx
, iocb
->u
.isp24
.port_id
[2],
4887 iocb
->u
.isp24
.port_id
[1], iocb
->u
.isp24
.port_id
[0],
4888 iocb
->u
.isp24
.status_subcode
, loop_id
,
4889 iocb
->u
.isp24
.port_name
);
4891 /* res = 1 means ack at the end of thread
4892 * res = 0 means ack async/later.
4894 switch (iocb
->u
.isp24
.status_subcode
) {
4896 res
= qlt_handle_login(vha
, iocb
);
4901 sess
= qla2x00_find_fcport_by_wwpn(vha
,
4902 iocb
->u
.isp24
.port_name
, 1);
4904 if (sess
&& sess
->plogi_link
[QLT_PLOGI_LINK_SAME_WWN
]) {
4905 ql_dbg(ql_dbg_disc
, vha
, 0xffff,
4906 "%s %d %8phC Term PRLI due to PLOGI ACK not completed\n",
4908 iocb
->u
.isp24
.port_name
);
4909 qlt_send_term_imm_notif(vha
, iocb
, 1);
4913 res
= qlt_handle_login(vha
, iocb
);
4917 if (IS_SW_RESV_ADDR(port_id
)) {
4922 wd3_lo
= le16_to_cpu(iocb
->u
.isp24
.u
.prli
.wd3_lo
);
4925 spin_lock_irqsave(&tgt
->ha
->tgt
.sess_lock
, flags
);
4926 sess
= qlt_find_sess_invalidate_other(vha
, wwn
, port_id
,
4927 loop_id
, &conflict_sess
);
4928 spin_unlock_irqrestore(&tgt
->ha
->tgt
.sess_lock
, flags
);
4931 if (conflict_sess
) {
4932 switch (conflict_sess
->disc_state
) {
4934 case DSC_DELETE_PEND
:
4937 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf09b,
4938 "PRLI with conflicting sess %p port %8phC\n",
4939 conflict_sess
, conflict_sess
->port_name
);
4940 conflict_sess
->fw_login_state
=
4941 DSC_LS_PORT_UNAVAIL
;
4942 qlt_send_term_imm_notif(vha
, iocb
, 1);
4949 bool delete = false;
4952 spin_lock_irqsave(&tgt
->ha
->tgt
.sess_lock
, flags
);
4953 switch (sess
->fw_login_state
) {
4954 case DSC_LS_PLOGI_PEND
:
4955 case DSC_LS_PLOGI_COMP
:
4956 case DSC_LS_PRLI_COMP
:
4963 switch (sess
->disc_state
) {
4964 case DSC_UPD_FCPORT
:
4965 spin_unlock_irqrestore(&tgt
->ha
->tgt
.sess_lock
,
4968 sec
= jiffies_to_msecs(jiffies
-
4969 sess
->jiffies_at_registration
)/1000;
4970 if (sess
->sec_since_registration
< sec
&& sec
&&
4972 sess
->sec_since_registration
= sec
;
4973 ql_dbg(ql_dbg_disc
, sess
->vha
, 0xffff,
4974 "%s %8phC : Slow Rport registration(%d Sec)\n",
4975 __func__
, sess
->port_name
, sec
);
4977 qlt_send_term_imm_notif(vha
, iocb
, 1);
4980 case DSC_LOGIN_PEND
:
4982 case DSC_LOGIN_COMPLETE
:
4991 spin_unlock_irqrestore(&tgt
->ha
->tgt
.sess_lock
,
4994 * Impatient initiator sent PRLI before last
4995 * PLOGI could finish. Will force him to re-try,
4996 * while last one finishes.
4998 ql_log(ql_log_warn
, sess
->vha
, 0xf095,
4999 "sess %p PRLI received, before plogi ack.\n",
5001 qlt_send_term_imm_notif(vha
, iocb
, 1);
5007 * This shouldn't happen under normal circumstances,
5008 * since we have deleted the old session during PLOGI
5010 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf096,
5011 "PRLI (loop_id %#04x) for existing sess %p (loop_id %#04x)\n",
5012 sess
->loop_id
, sess
, iocb
->u
.isp24
.nport_handle
);
5015 sess
->loop_id
= loop_id
;
5016 sess
->d_id
= port_id
;
5017 sess
->fw_login_state
= DSC_LS_PRLI_PEND
;
5020 sess
->conf_compl_supported
= 1;
5022 if ((wd3_lo
& BIT_4
) == 0)
5023 sess
->port_type
= FCT_INITIATOR
;
5025 sess
->port_type
= FCT_TARGET
;
5027 spin_unlock_irqrestore(&tgt
->ha
->tgt
.sess_lock
, flags
);
5029 res
= 1; /* send notify ack */
5031 /* Make session global (not used in fabric mode) */
5032 if (ha
->current_topology
!= ISP_CFG_F
) {
5034 ql_dbg(ql_dbg_disc
, vha
, 0x20fa,
5035 "%s %d %8phC post nack\n",
5036 __func__
, __LINE__
, sess
->port_name
);
5037 qla24xx_post_nack_work(vha
, sess
, iocb
,
5041 set_bit(LOOP_RESYNC_NEEDED
, &vha
->dpc_flags
);
5042 set_bit(LOCAL_LOOP_UPDATE
, &vha
->dpc_flags
);
5043 qla2xxx_wake_dpc(vha
);
5047 ql_dbg(ql_dbg_disc
, vha
, 0x20fb,
5048 "%s %d %8phC post nack\n",
5049 __func__
, __LINE__
, sess
->port_name
);
5050 qla24xx_post_nack_work(vha
, sess
, iocb
,
5058 if (le16_to_cpu(iocb
->u
.isp24
.flags
) &
5059 NOTIFY24XX_FLAGS_GLOBAL_TPRLO
) {
5061 qlt_reset(vha
, iocb
, QLA_TGT_NEXUS_LOSS
);
5068 spin_lock_irqsave(&ha
->tgt
.sess_lock
, flags
);
5069 sess
= qla2x00_find_fcport_by_loopid(vha
, loop_id
);
5070 spin_unlock_irqrestore(&ha
->tgt
.sess_lock
, flags
);
5074 sess
->fw_login_state
= DSC_LS_LOGO_PEND
;
5075 sess
->logo_ack_needed
= 1;
5076 memcpy(sess
->iocb
, iocb
, IOCB_SIZE
);
5079 res
= qlt_reset(vha
, iocb
, QLA_TGT_NEXUS_LOSS_SESS
);
5081 ql_dbg(ql_dbg_disc
, vha
, 0x20fc,
5082 "%s: logo %llx res %d sess %p ",
5083 __func__
, wwn
, res
, sess
);
5086 * cmd went upper layer, look for qlt_xmit_tm_rsp()
5087 * for LOGO_ACK & sess delete
5092 /* cmd did not go to upper layer. */
5094 qlt_schedule_sess_for_deletion(sess
);
5097 /* else logo will be ack */
5103 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
5105 if (tgt
->link_reinit_iocb_pending
) {
5106 qlt_send_notify_ack(ha
->base_qpair
,
5107 &tgt
->link_reinit_iocb
, 0, 0, 0, 0, 0, 0);
5108 tgt
->link_reinit_iocb_pending
= 0;
5111 sess
= qla2x00_find_fcport_by_wwpn(vha
,
5112 iocb
->u
.isp24
.port_name
, 1);
5114 ql_dbg(ql_dbg_disc
, vha
, 0x20fd,
5115 "sess %p lid %d|%d DS %d LS %d\n",
5116 sess
, sess
->loop_id
, loop_id
,
5117 sess
->disc_state
, sess
->fw_login_state
);
5120 res
= 1; /* send notify ack */
5124 case ELS_FLOGI
: /* should never happen */
5126 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf061,
5127 "qla_target(%d): Unsupported ELS command %x "
5128 "received\n", vha
->vp_idx
, iocb
->u
.isp24
.status_subcode
);
5129 res
= qlt_reset(vha
, iocb
, QLA_TGT_NEXUS_LOSS_SESS
);
5133 ql_dbg(ql_dbg_disc
, vha
, 0xf026,
5134 "qla_target(%d): Exit ELS opcode: 0x%02x res %d\n",
5135 vha
->vp_idx
, iocb
->u
.isp24
.status_subcode
, res
);
5141 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
5143 static void qlt_handle_imm_notify(struct scsi_qla_host
*vha
,
5144 struct imm_ntfy_from_isp
*iocb
)
5146 struct qla_hw_data
*ha
= vha
->hw
;
5147 uint32_t add_flags
= 0;
5148 int send_notify_ack
= 1;
5151 lockdep_assert_held(&ha
->hardware_lock
);
5153 status
= le16_to_cpu(iocb
->u
.isp2x
.status
);
5155 case IMM_NTFY_LIP_RESET
:
5157 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf032,
5158 "qla_target(%d): LIP reset (loop %#x), subcode %x\n",
5159 vha
->vp_idx
, le16_to_cpu(iocb
->u
.isp24
.nport_handle
),
5160 iocb
->u
.isp24
.status_subcode
);
5162 if (qlt_reset(vha
, iocb
, QLA_TGT_ABORT_ALL
) == 0)
5163 send_notify_ack
= 0;
5167 case IMM_NTFY_LIP_LINK_REINIT
:
5169 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
5171 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf033,
5172 "qla_target(%d): LINK REINIT (loop %#x, "
5173 "subcode %x)\n", vha
->vp_idx
,
5174 le16_to_cpu(iocb
->u
.isp24
.nport_handle
),
5175 iocb
->u
.isp24
.status_subcode
);
5176 if (tgt
->link_reinit_iocb_pending
) {
5177 qlt_send_notify_ack(ha
->base_qpair
,
5178 &tgt
->link_reinit_iocb
, 0, 0, 0, 0, 0, 0);
5180 memcpy(&tgt
->link_reinit_iocb
, iocb
, sizeof(*iocb
));
5181 tgt
->link_reinit_iocb_pending
= 1;
5183 * QLogic requires to wait after LINK REINIT for possible
5184 * PDISC or ADISC ELS commands
5186 send_notify_ack
= 0;
5190 case IMM_NTFY_PORT_LOGOUT
:
5191 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf034,
5192 "qla_target(%d): Port logout (loop "
5193 "%#x, subcode %x)\n", vha
->vp_idx
,
5194 le16_to_cpu(iocb
->u
.isp24
.nport_handle
),
5195 iocb
->u
.isp24
.status_subcode
);
5197 if (qlt_reset(vha
, iocb
, QLA_TGT_NEXUS_LOSS_SESS
) == 0)
5198 send_notify_ack
= 0;
5199 /* The sessions will be cleared in the callback, if needed */
5202 case IMM_NTFY_GLBL_TPRLO
:
5203 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf035,
5204 "qla_target(%d): Global TPRLO (%x)\n", vha
->vp_idx
, status
);
5205 if (qlt_reset(vha
, iocb
, QLA_TGT_NEXUS_LOSS
) == 0)
5206 send_notify_ack
= 0;
5207 /* The sessions will be cleared in the callback, if needed */
5210 case IMM_NTFY_PORT_CONFIG
:
5211 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf036,
5212 "qla_target(%d): Port config changed (%x)\n", vha
->vp_idx
,
5214 if (qlt_reset(vha
, iocb
, QLA_TGT_ABORT_ALL
) == 0)
5215 send_notify_ack
= 0;
5216 /* The sessions will be cleared in the callback, if needed */
5219 case IMM_NTFY_GLBL_LOGO
:
5220 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf06a,
5221 "qla_target(%d): Link failure detected\n",
5223 /* I_T nexus loss */
5224 if (qlt_reset(vha
, iocb
, QLA_TGT_NEXUS_LOSS
) == 0)
5225 send_notify_ack
= 0;
5228 case IMM_NTFY_IOCB_OVERFLOW
:
5229 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf06b,
5230 "qla_target(%d): Cannot provide requested "
5231 "capability (IOCB overflowed the immediate notify "
5232 "resource count)\n", vha
->vp_idx
);
5235 case IMM_NTFY_ABORT_TASK
:
5236 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf037,
5237 "qla_target(%d): Abort Task (S %08x I %#x -> "
5238 "L %#x)\n", vha
->vp_idx
,
5239 le16_to_cpu(iocb
->u
.isp2x
.seq_id
),
5240 GET_TARGET_ID(ha
, (struct atio_from_isp
*)iocb
),
5241 le16_to_cpu(iocb
->u
.isp2x
.lun
));
5242 if (qlt_abort_task(vha
, iocb
) == 0)
5243 send_notify_ack
= 0;
5246 case IMM_NTFY_RESOURCE
:
5247 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf06c,
5248 "qla_target(%d): Out of resources, host %ld\n",
5249 vha
->vp_idx
, vha
->host_no
);
5252 case IMM_NTFY_MSG_RX
:
5253 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf038,
5254 "qla_target(%d): Immediate notify task %x\n",
5255 vha
->vp_idx
, iocb
->u
.isp2x
.task_flags
);
5259 if (qlt_24xx_handle_els(vha
, iocb
) == 0)
5260 send_notify_ack
= 0;
5263 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf06d,
5264 "qla_target(%d): Received unknown immediate "
5265 "notify status %x\n", vha
->vp_idx
, status
);
5269 if (send_notify_ack
)
5270 qlt_send_notify_ack(ha
->base_qpair
, iocb
, add_flags
, 0, 0, 0,
5275 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
5276 * This function sends busy to ISP 2xxx or 24xx.
5278 static int __qlt_send_busy(struct qla_qpair
*qpair
,
5279 struct atio_from_isp
*atio
, uint16_t status
)
5281 struct scsi_qla_host
*vha
= qpair
->vha
;
5282 struct ctio7_to_24xx
*ctio24
;
5283 struct qla_hw_data
*ha
= vha
->hw
;
5285 struct fc_port
*sess
= NULL
;
5286 unsigned long flags
;
5290 id
= be_to_port_id(atio
->u
.isp24
.fcp_hdr
.s_id
);
5292 spin_lock_irqsave(&ha
->tgt
.sess_lock
, flags
);
5293 sess
= qla2x00_find_fcport_by_nportid(vha
, &id
, 1);
5294 spin_unlock_irqrestore(&ha
->tgt
.sess_lock
, flags
);
5296 qlt_send_term_exchange(qpair
, NULL
, atio
, 1, 0);
5299 /* Sending marker isn't necessary, since we called from ISR */
5301 pkt
= (request_t
*)__qla2x00_alloc_iocbs(qpair
, NULL
);
5303 ql_dbg(ql_dbg_io
, vha
, 0x3063,
5304 "qla_target(%d): %s failed: unable to allocate "
5305 "request packet", vha
->vp_idx
, __func__
);
5309 qpair
->tgt_counters
.num_q_full_sent
++;
5310 pkt
->entry_count
= 1;
5311 pkt
->handle
= QLA_TGT_SKIP_HANDLE
| CTIO_COMPLETION_HANDLE_MARK
;
5313 ctio24
= (struct ctio7_to_24xx
*)pkt
;
5314 ctio24
->entry_type
= CTIO_TYPE7
;
5315 ctio24
->nport_handle
= cpu_to_le16(sess
->loop_id
);
5316 ctio24
->timeout
= cpu_to_le16(QLA_TGT_TIMEOUT
);
5317 ctio24
->vp_index
= vha
->vp_idx
;
5318 ctio24
->initiator_id
= be_id_to_le(atio
->u
.isp24
.fcp_hdr
.s_id
);
5319 ctio24
->exchange_addr
= atio
->u
.isp24
.exchange_addr
;
5320 temp
= (atio
->u
.isp24
.attr
<< 9) |
5321 CTIO7_FLAGS_STATUS_MODE_1
| CTIO7_FLAGS_SEND_STATUS
|
5322 CTIO7_FLAGS_DONT_RET_CTIO
;
5323 ctio24
->u
.status1
.flags
= cpu_to_le16(temp
);
5325 * CTIO from fw w/o se_cmd doesn't provide enough info to retry it,
5326 * if the explicit conformation is used.
5328 ctio24
->u
.status1
.ox_id
=
5329 cpu_to_le16(be16_to_cpu(atio
->u
.isp24
.fcp_hdr
.ox_id
));
5330 ctio24
->u
.status1
.scsi_status
= cpu_to_le16(status
);
5332 ctio24
->u
.status1
.residual
= cpu_to_le32(get_datalen_for_atio(atio
));
5334 if (ctio24
->u
.status1
.residual
!= 0)
5335 ctio24
->u
.status1
.scsi_status
|= cpu_to_le16(SS_RESIDUAL_UNDER
);
5337 /* Memory Barrier */
5339 if (qpair
->reqq_start_iocbs
)
5340 qpair
->reqq_start_iocbs(qpair
);
5342 qla2x00_start_iocbs(vha
, qpair
->req
);
5347 * This routine is used to allocate a command for either a QFull condition
5348 * (ie reply SAM_STAT_BUSY) or to terminate an exchange that did not go
5352 qlt_alloc_qfull_cmd(struct scsi_qla_host
*vha
,
5353 struct atio_from_isp
*atio
, uint16_t status
, int qfull
)
5355 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
5356 struct qla_hw_data
*ha
= vha
->hw
;
5357 struct fc_port
*sess
;
5358 struct qla_tgt_cmd
*cmd
;
5359 unsigned long flags
;
5361 if (unlikely(tgt
->tgt_stop
)) {
5362 ql_dbg(ql_dbg_io
, vha
, 0x300a,
5363 "New command while device %p is shutting down\n", tgt
);
5367 if ((vha
->hw
->tgt
.num_qfull_cmds_alloc
+ 1) > MAX_QFULL_CMDS_ALLOC
) {
5368 vha
->hw
->tgt
.num_qfull_cmds_dropped
++;
5369 if (vha
->hw
->tgt
.num_qfull_cmds_dropped
>
5370 vha
->qla_stats
.stat_max_qfull_cmds_dropped
)
5371 vha
->qla_stats
.stat_max_qfull_cmds_dropped
=
5372 vha
->hw
->tgt
.num_qfull_cmds_dropped
;
5374 ql_dbg(ql_dbg_io
, vha
, 0x3068,
5375 "qla_target(%d): %s: QFull CMD dropped[%d]\n",
5376 vha
->vp_idx
, __func__
,
5377 vha
->hw
->tgt
.num_qfull_cmds_dropped
);
5379 qlt_chk_exch_leak_thresh_hold(vha
);
5383 sess
= ha
->tgt
.tgt_ops
->find_sess_by_s_id
5384 (vha
, atio
->u
.isp24
.fcp_hdr
.s_id
);
5388 cmd
= ha
->tgt
.tgt_ops
->get_cmd(sess
);
5390 ql_dbg(ql_dbg_io
, vha
, 0x3009,
5391 "qla_target(%d): %s: Allocation of cmd failed\n",
5392 vha
->vp_idx
, __func__
);
5394 vha
->hw
->tgt
.num_qfull_cmds_dropped
++;
5395 if (vha
->hw
->tgt
.num_qfull_cmds_dropped
>
5396 vha
->qla_stats
.stat_max_qfull_cmds_dropped
)
5397 vha
->qla_stats
.stat_max_qfull_cmds_dropped
=
5398 vha
->hw
->tgt
.num_qfull_cmds_dropped
;
5400 qlt_chk_exch_leak_thresh_hold(vha
);
5404 qlt_incr_num_pend_cmds(vha
);
5405 INIT_LIST_HEAD(&cmd
->cmd_list
);
5406 memcpy(&cmd
->atio
, atio
, sizeof(*atio
));
5408 cmd
->tgt
= vha
->vha_tgt
.qla_tgt
;
5410 cmd
->reset_count
= ha
->base_qpair
->chip_reset
;
5412 cmd
->qpair
= ha
->base_qpair
;
5416 /* NOTE: borrowing the state field to carry the status */
5417 cmd
->state
= status
;
5419 cmd
->term_exchg
= 1;
5421 spin_lock_irqsave(&vha
->hw
->tgt
.q_full_lock
, flags
);
5422 list_add_tail(&cmd
->cmd_list
, &vha
->hw
->tgt
.q_full_list
);
5424 vha
->hw
->tgt
.num_qfull_cmds_alloc
++;
5425 if (vha
->hw
->tgt
.num_qfull_cmds_alloc
>
5426 vha
->qla_stats
.stat_max_qfull_cmds_alloc
)
5427 vha
->qla_stats
.stat_max_qfull_cmds_alloc
=
5428 vha
->hw
->tgt
.num_qfull_cmds_alloc
;
5429 spin_unlock_irqrestore(&vha
->hw
->tgt
.q_full_lock
, flags
);
5433 qlt_free_qfull_cmds(struct qla_qpair
*qpair
)
5435 struct scsi_qla_host
*vha
= qpair
->vha
;
5436 struct qla_hw_data
*ha
= vha
->hw
;
5437 unsigned long flags
;
5438 struct qla_tgt_cmd
*cmd
, *tcmd
;
5439 struct list_head free_list
, q_full_list
;
5442 if (list_empty(&ha
->tgt
.q_full_list
))
5445 INIT_LIST_HEAD(&free_list
);
5446 INIT_LIST_HEAD(&q_full_list
);
5448 spin_lock_irqsave(&vha
->hw
->tgt
.q_full_lock
, flags
);
5449 if (list_empty(&ha
->tgt
.q_full_list
)) {
5450 spin_unlock_irqrestore(&vha
->hw
->tgt
.q_full_lock
, flags
);
5454 list_splice_init(&vha
->hw
->tgt
.q_full_list
, &q_full_list
);
5455 spin_unlock_irqrestore(&vha
->hw
->tgt
.q_full_lock
, flags
);
5457 spin_lock_irqsave(qpair
->qp_lock_ptr
, flags
);
5458 list_for_each_entry_safe(cmd
, tcmd
, &q_full_list
, cmd_list
) {
5460 /* cmd->state is a borrowed field to hold status */
5461 rc
= __qlt_send_busy(qpair
, &cmd
->atio
, cmd
->state
);
5462 else if (cmd
->term_exchg
)
5463 rc
= __qlt_send_term_exchange(qpair
, NULL
, &cmd
->atio
);
5469 ql_dbg(ql_dbg_io
, vha
, 0x3006,
5470 "%s: busy sent for ox_id[%04x]\n", __func__
,
5471 be16_to_cpu(cmd
->atio
.u
.isp24
.fcp_hdr
.ox_id
));
5472 else if (cmd
->term_exchg
)
5473 ql_dbg(ql_dbg_io
, vha
, 0x3007,
5474 "%s: Term exchg sent for ox_id[%04x]\n", __func__
,
5475 be16_to_cpu(cmd
->atio
.u
.isp24
.fcp_hdr
.ox_id
));
5477 ql_dbg(ql_dbg_io
, vha
, 0x3008,
5478 "%s: Unexpected cmd in QFull list %p\n", __func__
,
5481 list_del(&cmd
->cmd_list
);
5482 list_add_tail(&cmd
->cmd_list
, &free_list
);
5484 /* piggy back on hardware_lock for protection */
5485 vha
->hw
->tgt
.num_qfull_cmds_alloc
--;
5487 spin_unlock_irqrestore(qpair
->qp_lock_ptr
, flags
);
5491 list_for_each_entry_safe(cmd
, tcmd
, &free_list
, cmd_list
) {
5492 list_del(&cmd
->cmd_list
);
5493 /* This cmd was never sent to TCM. There is no need
5494 * to schedule free or call free_cmd
5499 if (!list_empty(&q_full_list
)) {
5500 spin_lock_irqsave(&vha
->hw
->tgt
.q_full_lock
, flags
);
5501 list_splice(&q_full_list
, &vha
->hw
->tgt
.q_full_list
);
5502 spin_unlock_irqrestore(&vha
->hw
->tgt
.q_full_lock
, flags
);
5509 qlt_send_busy(struct qla_qpair
*qpair
, struct atio_from_isp
*atio
,
5513 struct scsi_qla_host
*vha
= qpair
->vha
;
5515 rc
= __qlt_send_busy(qpair
, atio
, status
);
5517 qlt_alloc_qfull_cmd(vha
, atio
, status
, 1);
5521 qlt_chk_qfull_thresh_hold(struct scsi_qla_host
*vha
, struct qla_qpair
*qpair
,
5522 struct atio_from_isp
*atio
, uint8_t ha_locked
)
5524 struct qla_hw_data
*ha
= vha
->hw
;
5525 unsigned long flags
;
5527 if (ha
->tgt
.num_pend_cmds
< Q_FULL_THRESH_HOLD(ha
))
5531 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
5532 qlt_send_busy(qpair
, atio
, qla_sam_status
);
5534 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
5539 /* ha->hardware_lock supposed to be held on entry */
5540 /* called via callback from qla2xxx */
5541 static void qlt_24xx_atio_pkt(struct scsi_qla_host
*vha
,
5542 struct atio_from_isp
*atio
, uint8_t ha_locked
)
5544 struct qla_hw_data
*ha
= vha
->hw
;
5545 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
5547 unsigned long flags
= 0;
5549 if (unlikely(tgt
== NULL
)) {
5550 ql_dbg(ql_dbg_tgt
, vha
, 0x3064,
5551 "ATIO pkt, but no tgt (ha %p)", ha
);
5555 * In tgt_stop mode we also should allow all requests to pass.
5556 * Otherwise, some commands can stuck.
5559 tgt
->atio_irq_cmd_count
++;
5561 switch (atio
->u
.raw
.entry_type
) {
5563 if (unlikely(atio
->u
.isp24
.exchange_addr
==
5564 cpu_to_le32(ATIO_EXCHANGE_ADDRESS_UNKNOWN
))) {
5565 ql_dbg(ql_dbg_io
, vha
, 0x3065,
5566 "qla_target(%d): ATIO_TYPE7 "
5567 "received with UNKNOWN exchange address, "
5568 "sending QUEUE_FULL\n", vha
->vp_idx
);
5570 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
5571 qlt_send_busy(ha
->base_qpair
, atio
, qla_sam_status
);
5573 spin_unlock_irqrestore(&ha
->hardware_lock
,
5578 if (likely(atio
->u
.isp24
.fcp_cmnd
.task_mgmt_flags
== 0)) {
5579 rc
= qlt_chk_qfull_thresh_hold(vha
, ha
->base_qpair
,
5582 tgt
->atio_irq_cmd_count
--;
5585 rc
= qlt_handle_cmd_for_atio(vha
, atio
);
5587 rc
= qlt_handle_task_mgmt(vha
, atio
);
5589 if (unlikely(rc
!= 0)) {
5591 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
5594 ql_dbg(ql_dbg_tgt
, vha
, 0xe05f,
5595 "qla_target: Unable to send command to target\n");
5598 ql_dbg(ql_dbg_tgt
, vha
, 0xe05f,
5599 "qla_target: Unable to send command to target, sending TERM EXCHANGE for rsp\n");
5600 qlt_send_term_exchange(ha
->base_qpair
, NULL
,
5604 ql_dbg(ql_dbg_tgt
, vha
, 0xe060,
5605 "qla_target(%d): Unable to send command to target, sending BUSY status\n",
5607 qlt_send_busy(ha
->base_qpair
, atio
,
5611 ql_dbg(ql_dbg_tgt
, vha
, 0xe060,
5612 "qla_target(%d): Unable to send command to target, sending BUSY status\n",
5614 qlt_send_busy(ha
->base_qpair
, atio
,
5619 spin_unlock_irqrestore(&ha
->hardware_lock
,
5624 case IMMED_NOTIFY_TYPE
:
5626 if (unlikely(atio
->u
.isp2x
.entry_status
!= 0)) {
5627 ql_dbg(ql_dbg_tgt
, vha
, 0xe05b,
5628 "qla_target(%d): Received ATIO packet %x "
5629 "with error status %x\n", vha
->vp_idx
,
5630 atio
->u
.raw
.entry_type
,
5631 atio
->u
.isp2x
.entry_status
);
5634 ql_dbg(ql_dbg_tgt
, vha
, 0xe02e, "%s", "IMMED_NOTIFY ATIO");
5637 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
5638 qlt_handle_imm_notify(vha
, (struct imm_ntfy_from_isp
*)atio
);
5640 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
5645 ql_dbg(ql_dbg_tgt
, vha
, 0xe05c,
5646 "qla_target(%d): Received unknown ATIO atio "
5647 "type %x\n", vha
->vp_idx
, atio
->u
.raw
.entry_type
);
5651 tgt
->atio_irq_cmd_count
--;
5655 * qpair lock is assume to be held
5656 * rc = 0 : send terminate & abts respond
5657 * rc != 0: do not send term & abts respond
5659 static int qlt_chk_unresolv_exchg(struct scsi_qla_host
*vha
,
5660 struct qla_qpair
*qpair
, struct abts_resp_from_24xx_fw
*entry
)
5662 struct qla_hw_data
*ha
= vha
->hw
;
5666 * Detect unresolved exchange. If the same ABTS is unable
5667 * to terminate an existing command and the same ABTS loops
5668 * between FW & Driver, then force FW dump. Under 1 jiff,
5669 * we should see multiple loops.
5671 if (qpair
->retry_term_exchg_addr
== entry
->exchange_addr_to_abort
&&
5672 qpair
->retry_term_jiff
== jiffies
) {
5673 /* found existing exchange */
5674 qpair
->retry_term_cnt
++;
5675 if (qpair
->retry_term_cnt
>= 5) {
5677 qpair
->retry_term_cnt
= 0;
5678 ql_log(ql_log_warn
, vha
, 0xffff,
5679 "Unable to send ABTS Respond. Dumping firmware.\n");
5680 ql_dump_buffer(ql_dbg_tgt_mgt
+ ql_dbg_buffer
,
5681 vha
, 0xffff, (uint8_t *)entry
, sizeof(*entry
));
5683 if (qpair
== ha
->base_qpair
)
5684 ha
->isp_ops
->fw_dump(vha
);
5686 qla2xxx_dump_fw(vha
);
5688 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
5689 qla2xxx_wake_dpc(vha
);
5691 } else if (qpair
->retry_term_jiff
!= jiffies
) {
5692 qpair
->retry_term_exchg_addr
= entry
->exchange_addr_to_abort
;
5693 qpair
->retry_term_cnt
= 0;
5694 qpair
->retry_term_jiff
= jiffies
;
5701 static void qlt_handle_abts_completion(struct scsi_qla_host
*vha
,
5702 struct rsp_que
*rsp
, response_t
*pkt
)
5704 struct abts_resp_from_24xx_fw
*entry
=
5705 (struct abts_resp_from_24xx_fw
*)pkt
;
5706 u32 h
= pkt
->handle
& ~QLA_TGT_HANDLE_MASK
;
5707 struct qla_tgt_mgmt_cmd
*mcmd
;
5708 struct qla_hw_data
*ha
= vha
->hw
;
5710 mcmd
= qlt_ctio_to_cmd(vha
, rsp
, pkt
->handle
, pkt
);
5711 if (mcmd
== NULL
&& h
!= QLA_TGT_SKIP_HANDLE
) {
5712 ql_dbg(ql_dbg_async
, vha
, 0xe064,
5713 "qla_target(%d): ABTS Comp without mcmd\n",
5720 vha
->vha_tgt
.qla_tgt
->abts_resp_expected
--;
5722 ql_dbg(ql_dbg_tgt
, vha
, 0xe038,
5723 "ABTS_RESP_24XX: compl_status %x\n",
5724 entry
->compl_status
);
5726 if (le16_to_cpu(entry
->compl_status
) != ABTS_RESP_COMPL_SUCCESS
) {
5727 if (le32_to_cpu(entry
->error_subcode1
) == 0x1E &&
5728 le32_to_cpu(entry
->error_subcode2
) == 0) {
5729 if (qlt_chk_unresolv_exchg(vha
, rsp
->qpair
, entry
)) {
5730 ha
->tgt
.tgt_ops
->free_mcmd(mcmd
);
5733 qlt_24xx_retry_term_exchange(vha
, rsp
->qpair
,
5736 ql_dbg(ql_dbg_tgt
, vha
, 0xe063,
5737 "qla_target(%d): ABTS_RESP_24XX failed %x (subcode %x:%x)",
5738 vha
->vp_idx
, entry
->compl_status
,
5739 entry
->error_subcode1
,
5740 entry
->error_subcode2
);
5741 ha
->tgt
.tgt_ops
->free_mcmd(mcmd
);
5744 ha
->tgt
.tgt_ops
->free_mcmd(mcmd
);
5748 /* ha->hardware_lock supposed to be held on entry */
5749 /* called via callback from qla2xxx */
5750 static void qlt_response_pkt(struct scsi_qla_host
*vha
,
5751 struct rsp_que
*rsp
, response_t
*pkt
)
5753 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
5755 if (unlikely(tgt
== NULL
)) {
5756 ql_dbg(ql_dbg_tgt
, vha
, 0xe05d,
5757 "qla_target(%d): Response pkt %x received, but no tgt (ha %p)\n",
5758 vha
->vp_idx
, pkt
->entry_type
, vha
->hw
);
5763 * In tgt_stop mode we also should allow all requests to pass.
5764 * Otherwise, some commands can stuck.
5767 switch (pkt
->entry_type
) {
5771 struct ctio7_from_24xx
*entry
= (struct ctio7_from_24xx
*)pkt
;
5773 qlt_do_ctio_completion(vha
, rsp
, entry
->handle
,
5774 le16_to_cpu(entry
->status
)|(pkt
->entry_status
<< 16),
5779 case ACCEPT_TGT_IO_TYPE
:
5781 struct atio_from_isp
*atio
= (struct atio_from_isp
*)pkt
;
5784 if (atio
->u
.isp2x
.status
!=
5785 cpu_to_le16(ATIO_CDB_VALID
)) {
5786 ql_dbg(ql_dbg_tgt
, vha
, 0xe05e,
5787 "qla_target(%d): ATIO with error "
5788 "status %x received\n", vha
->vp_idx
,
5789 le16_to_cpu(atio
->u
.isp2x
.status
));
5793 rc
= qlt_chk_qfull_thresh_hold(vha
, rsp
->qpair
, atio
, 1);
5797 rc
= qlt_handle_cmd_for_atio(vha
, atio
);
5798 if (unlikely(rc
!= 0)) {
5801 ql_dbg(ql_dbg_tgt
, vha
, 0xe05f,
5802 "qla_target: Unable to send command to target\n");
5805 ql_dbg(ql_dbg_tgt
, vha
, 0xe05f,
5806 "qla_target: Unable to send command to target, sending TERM EXCHANGE for rsp\n");
5807 qlt_send_term_exchange(rsp
->qpair
, NULL
,
5811 ql_dbg(ql_dbg_tgt
, vha
, 0xe060,
5812 "qla_target(%d): Unable to send command to target, sending BUSY status\n",
5814 qlt_send_busy(rsp
->qpair
, atio
,
5818 ql_dbg(ql_dbg_tgt
, vha
, 0xe060,
5819 "qla_target(%d): Unable to send command to target, sending BUSY status\n",
5821 qlt_send_busy(rsp
->qpair
, atio
,
5829 case CONTINUE_TGT_IO_TYPE
:
5831 struct ctio_to_2xxx
*entry
= (struct ctio_to_2xxx
*)pkt
;
5833 qlt_do_ctio_completion(vha
, rsp
, entry
->handle
,
5834 le16_to_cpu(entry
->status
)|(pkt
->entry_status
<< 16),
5841 struct ctio_to_2xxx
*entry
= (struct ctio_to_2xxx
*)pkt
;
5843 qlt_do_ctio_completion(vha
, rsp
, entry
->handle
,
5844 le16_to_cpu(entry
->status
)|(pkt
->entry_status
<< 16),
5849 case IMMED_NOTIFY_TYPE
:
5850 ql_dbg(ql_dbg_tgt
, vha
, 0xe035, "%s", "IMMED_NOTIFY\n");
5851 qlt_handle_imm_notify(vha
, (struct imm_ntfy_from_isp
*)pkt
);
5854 case NOTIFY_ACK_TYPE
:
5855 if (tgt
->notify_ack_expected
> 0) {
5856 struct nack_to_isp
*entry
= (struct nack_to_isp
*)pkt
;
5858 ql_dbg(ql_dbg_tgt
, vha
, 0xe036,
5859 "NOTIFY_ACK seq %08x status %x\n",
5860 le16_to_cpu(entry
->u
.isp2x
.seq_id
),
5861 le16_to_cpu(entry
->u
.isp2x
.status
));
5862 tgt
->notify_ack_expected
--;
5863 if (entry
->u
.isp2x
.status
!=
5864 cpu_to_le16(NOTIFY_ACK_SUCCESS
)) {
5865 ql_dbg(ql_dbg_tgt
, vha
, 0xe061,
5866 "qla_target(%d): NOTIFY_ACK "
5867 "failed %x\n", vha
->vp_idx
,
5868 le16_to_cpu(entry
->u
.isp2x
.status
));
5871 ql_dbg(ql_dbg_tgt
, vha
, 0xe062,
5872 "qla_target(%d): Unexpected NOTIFY_ACK received\n",
5877 case ABTS_RECV_24XX
:
5878 ql_dbg(ql_dbg_tgt
, vha
, 0xe037,
5879 "ABTS_RECV_24XX: instance %d\n", vha
->vp_idx
);
5880 qlt_24xx_handle_abts(vha
, (struct abts_recv_from_24xx
*)pkt
);
5883 case ABTS_RESP_24XX
:
5884 if (tgt
->abts_resp_expected
> 0) {
5885 qlt_handle_abts_completion(vha
, rsp
, pkt
);
5887 ql_dbg(ql_dbg_tgt
, vha
, 0xe064,
5888 "qla_target(%d): Unexpected ABTS_RESP_24XX "
5889 "received\n", vha
->vp_idx
);
5894 ql_dbg(ql_dbg_tgt
, vha
, 0xe065,
5895 "qla_target(%d): Received unknown response pkt "
5896 "type %x\n", vha
->vp_idx
, pkt
->entry_type
);
5903 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
5905 void qlt_async_event(uint16_t code
, struct scsi_qla_host
*vha
,
5908 struct qla_hw_data
*ha
= vha
->hw
;
5909 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
5912 if (!tgt
|| tgt
->tgt_stop
|| tgt
->tgt_stopped
)
5915 if (((code
== MBA_POINT_TO_POINT
) || (code
== MBA_CHG_IN_CONNECTION
)) &&
5919 * In tgt_stop mode we also should allow all requests to pass.
5920 * Otherwise, some commands can stuck.
5925 case MBA_RESET
: /* Reset */
5926 case MBA_SYSTEM_ERR
: /* System Error */
5927 case MBA_REQ_TRANSFER_ERR
: /* Request Transfer Error */
5928 case MBA_RSP_TRANSFER_ERR
: /* Response Transfer Error */
5929 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf03a,
5930 "qla_target(%d): System error async event %#x "
5931 "occurred", vha
->vp_idx
, code
);
5933 case MBA_WAKEUP_THRES
: /* Request Queue Wake-up. */
5934 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
5939 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf03b,
5940 "qla_target(%d): Async LOOP_UP occurred "
5941 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha
->vp_idx
,
5942 mailbox
[0], mailbox
[1], mailbox
[2], mailbox
[3]);
5943 if (tgt
->link_reinit_iocb_pending
) {
5944 qlt_send_notify_ack(ha
->base_qpair
,
5945 &tgt
->link_reinit_iocb
,
5947 tgt
->link_reinit_iocb_pending
= 0;
5952 case MBA_LIP_OCCURRED
:
5955 case MBA_RSCN_UPDATE
:
5956 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf03c,
5957 "qla_target(%d): Async event %#x occurred "
5958 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha
->vp_idx
, code
,
5959 mailbox
[0], mailbox
[1], mailbox
[2], mailbox
[3]);
5962 case MBA_REJECTED_FCP_CMD
:
5963 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf017,
5964 "qla_target(%d): Async event LS_REJECT occurred (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)",
5966 mailbox
[0], mailbox
[1], mailbox
[2], mailbox
[3]);
5968 if (mailbox
[3] == 1) {
5969 /* exchange starvation. */
5970 vha
->hw
->exch_starvation
++;
5971 if (vha
->hw
->exch_starvation
> 5) {
5972 ql_log(ql_log_warn
, vha
, 0xd03a,
5973 "Exchange starvation-. Resetting RISC\n");
5975 vha
->hw
->exch_starvation
= 0;
5976 if (IS_P3P_TYPE(vha
->hw
))
5977 set_bit(FCOE_CTX_RESET_NEEDED
,
5980 set_bit(ISP_ABORT_NEEDED
,
5982 qla2xxx_wake_dpc(vha
);
5987 case MBA_PORT_UPDATE
:
5988 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf03d,
5989 "qla_target(%d): Port update async event %#x "
5990 "occurred: updating the ports database (m[0]=%x, m[1]=%x, "
5991 "m[2]=%x, m[3]=%x)", vha
->vp_idx
, code
,
5992 mailbox
[0], mailbox
[1], mailbox
[2], mailbox
[3]);
5994 login_code
= mailbox
[2];
5995 if (login_code
== 0x4) {
5996 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf03e,
5997 "Async MB 2: Got PLOGI Complete\n");
5998 vha
->hw
->exch_starvation
= 0;
5999 } else if (login_code
== 0x7)
6000 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf03f,
6001 "Async MB 2: Port Logged Out\n");
6009 static fc_port_t
*qlt_get_port_database(struct scsi_qla_host
*vha
,
6012 fc_port_t
*fcport
, *tfcp
, *del
;
6014 unsigned long flags
;
6017 fcport
= qla2x00_alloc_fcport(vha
, GFP_KERNEL
);
6019 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf06f,
6020 "qla_target(%d): Allocation of tmp FC port failed",
6025 fcport
->loop_id
= loop_id
;
6027 rc
= qla24xx_gpdb_wait(vha
, fcport
, 0);
6028 if (rc
!= QLA_SUCCESS
) {
6029 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf070,
6030 "qla_target(%d): Failed to retrieve fcport "
6031 "information -- get_port_database() returned %x "
6032 "(loop_id=0x%04x)", vha
->vp_idx
, rc
, loop_id
);
6038 spin_lock_irqsave(&vha
->hw
->tgt
.sess_lock
, flags
);
6039 tfcp
= qla2x00_find_fcport_by_wwpn(vha
, fcport
->port_name
, 1);
6042 tfcp
->d_id
= fcport
->d_id
;
6043 tfcp
->port_type
= fcport
->port_type
;
6044 tfcp
->supported_classes
= fcport
->supported_classes
;
6045 tfcp
->flags
|= fcport
->flags
;
6046 tfcp
->scan_state
= QLA_FCPORT_FOUND
;
6051 if (vha
->hw
->current_topology
== ISP_CFG_F
)
6052 fcport
->flags
|= FCF_FABRIC_DEVICE
;
6054 list_add_tail(&fcport
->list
, &vha
->vp_fcports
);
6055 if (!IS_SW_RESV_ADDR(fcport
->d_id
))
6056 vha
->fcport_count
++;
6057 fcport
->login_gen
++;
6058 qla2x00_set_fcport_disc_state(fcport
, DSC_LOGIN_COMPLETE
);
6059 fcport
->login_succ
= 1;
6063 fcport
->deleted
= 0;
6064 spin_unlock_irqrestore(&vha
->hw
->tgt
.sess_lock
, flags
);
6066 switch (vha
->host
->active_mode
) {
6067 case MODE_INITIATOR
:
6070 if (!IS_IIDMA_CAPABLE(vha
->hw
) || !vha
->hw
->flags
.gpsc_supported
) {
6071 qla24xx_sched_upd_fcport(fcport
);
6073 ql_dbg(ql_dbg_disc
, vha
, 0x20ff,
6074 "%s %d %8phC post gpsc fcp_cnt %d\n",
6075 __func__
, __LINE__
, fcport
->port_name
, vha
->fcport_count
);
6076 qla24xx_post_gpsc_work(vha
, fcport
);
6086 qla2x00_free_fcport(del
);
6091 /* Must be called under tgt_mutex */
6092 static struct fc_port
*qlt_make_local_sess(struct scsi_qla_host
*vha
,
6095 struct fc_port
*sess
= NULL
;
6096 fc_port_t
*fcport
= NULL
;
6097 int rc
, global_resets
;
6098 uint16_t loop_id
= 0;
6100 if (s_id
.domain
== 0xFF && s_id
.area
== 0xFC) {
6102 * This is Domain Controller, so it should be
6103 * OK to drop SCSI commands from it.
6105 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf042,
6106 "Unable to find initiator with S_ID %x:%x:%x",
6107 s_id
.domain
, s_id
.area
, s_id
.al_pa
);
6111 mutex_lock(&vha
->vha_tgt
.tgt_mutex
);
6115 atomic_read(&vha
->vha_tgt
.qla_tgt
->tgt_global_resets_count
);
6117 rc
= qla24xx_get_loop_id(vha
, s_id
, &loop_id
);
6119 mutex_unlock(&vha
->vha_tgt
.tgt_mutex
);
6121 ql_log(ql_log_info
, vha
, 0xf071,
6122 "qla_target(%d): Unable to find "
6123 "initiator with S_ID %x:%x:%x",
6124 vha
->vp_idx
, s_id
.domain
, s_id
.area
, s_id
.al_pa
);
6126 if (rc
== -ENOENT
) {
6127 qlt_port_logo_t logo
;
6129 logo
.id
= be_to_port_id(s_id
);
6131 qlt_send_first_logo(vha
, &logo
);
6137 fcport
= qlt_get_port_database(vha
, loop_id
);
6139 mutex_unlock(&vha
->vha_tgt
.tgt_mutex
);
6143 if (global_resets
!=
6144 atomic_read(&vha
->vha_tgt
.qla_tgt
->tgt_global_resets_count
)) {
6145 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf043,
6146 "qla_target(%d): global reset during session discovery "
6147 "(counter was %d, new %d), retrying", vha
->vp_idx
,
6149 atomic_read(&vha
->vha_tgt
.
6150 qla_tgt
->tgt_global_resets_count
));
6154 sess
= qlt_create_sess(vha
, fcport
, true);
6156 mutex_unlock(&vha
->vha_tgt
.tgt_mutex
);
6161 static void qlt_abort_work(struct qla_tgt
*tgt
,
6162 struct qla_tgt_sess_work_param
*prm
)
6164 struct scsi_qla_host
*vha
= tgt
->vha
;
6165 struct qla_hw_data
*ha
= vha
->hw
;
6166 struct fc_port
*sess
= NULL
;
6167 unsigned long flags
= 0, flags2
= 0;
6171 spin_lock_irqsave(&ha
->tgt
.sess_lock
, flags2
);
6176 s_id
= le_id_to_be(prm
->abts
.fcp_hdr_le
.s_id
);
6178 sess
= ha
->tgt
.tgt_ops
->find_sess_by_s_id(vha
, s_id
);
6180 spin_unlock_irqrestore(&ha
->tgt
.sess_lock
, flags2
);
6182 sess
= qlt_make_local_sess(vha
, s_id
);
6183 /* sess has got an extra creation ref */
6185 spin_lock_irqsave(&ha
->tgt
.sess_lock
, flags2
);
6189 if (sess
->deleted
) {
6194 if (!kref_get_unless_zero(&sess
->sess_kref
)) {
6195 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0xf01c,
6196 "%s: kref_get fail %8phC \n",
6197 __func__
, sess
->port_name
);
6203 rc
= __qlt_24xx_handle_abts(vha
, &prm
->abts
, sess
);
6204 spin_unlock_irqrestore(&ha
->tgt
.sess_lock
, flags2
);
6206 ha
->tgt
.tgt_ops
->put_sess(sess
);
6213 spin_unlock_irqrestore(&ha
->tgt
.sess_lock
, flags2
);
6216 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
6217 qlt_24xx_send_abts_resp(ha
->base_qpair
, &prm
->abts
,
6218 FCP_TMF_REJECTED
, false);
6219 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
6222 static void qlt_tmr_work(struct qla_tgt
*tgt
,
6223 struct qla_tgt_sess_work_param
*prm
)
6225 struct atio_from_isp
*a
= &prm
->tm_iocb2
;
6226 struct scsi_qla_host
*vha
= tgt
->vha
;
6227 struct qla_hw_data
*ha
= vha
->hw
;
6228 struct fc_port
*sess
;
6229 unsigned long flags
;
6236 spin_lock_irqsave(&ha
->tgt
.sess_lock
, flags
);
6241 s_id
= prm
->tm_iocb2
.u
.isp24
.fcp_hdr
.s_id
;
6242 sess
= ha
->tgt
.tgt_ops
->find_sess_by_s_id(vha
, s_id
);
6244 spin_unlock_irqrestore(&ha
->tgt
.sess_lock
, flags
);
6246 sess
= qlt_make_local_sess(vha
, s_id
);
6247 /* sess has got an extra creation ref */
6249 spin_lock_irqsave(&ha
->tgt
.sess_lock
, flags
);
6253 if (sess
->deleted
) {
6257 if (!kref_get_unless_zero(&sess
->sess_kref
)) {
6258 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0xf020,
6259 "%s: kref_get fail %8phC\n",
6260 __func__
, sess
->port_name
);
6266 fn
= a
->u
.isp24
.fcp_cmnd
.task_mgmt_flags
;
6268 scsilun_to_int((struct scsi_lun
*)&a
->u
.isp24
.fcp_cmnd
.lun
);
6270 rc
= qlt_issue_task_mgmt(sess
, unpacked_lun
, fn
, iocb
, 0);
6271 spin_unlock_irqrestore(&ha
->tgt
.sess_lock
, flags
);
6273 ha
->tgt
.tgt_ops
->put_sess(sess
);
6280 spin_unlock_irqrestore(&ha
->tgt
.sess_lock
, flags
);
6282 qlt_send_term_exchange(ha
->base_qpair
, NULL
, &prm
->tm_iocb2
, 1, 0);
6285 static void qlt_sess_work_fn(struct work_struct
*work
)
6287 struct qla_tgt
*tgt
= container_of(work
, struct qla_tgt
, sess_work
);
6288 struct scsi_qla_host
*vha
= tgt
->vha
;
6289 unsigned long flags
;
6291 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf000, "Sess work (tgt %p)", tgt
);
6293 spin_lock_irqsave(&tgt
->sess_work_lock
, flags
);
6294 while (!list_empty(&tgt
->sess_works_list
)) {
6295 struct qla_tgt_sess_work_param
*prm
= list_entry(
6296 tgt
->sess_works_list
.next
, typeof(*prm
),
6297 sess_works_list_entry
);
6300 * This work can be scheduled on several CPUs at time, so we
6301 * must delete the entry to eliminate double processing
6303 list_del(&prm
->sess_works_list_entry
);
6305 spin_unlock_irqrestore(&tgt
->sess_work_lock
, flags
);
6307 switch (prm
->type
) {
6308 case QLA_TGT_SESS_WORK_ABORT
:
6309 qlt_abort_work(tgt
, prm
);
6311 case QLA_TGT_SESS_WORK_TM
:
6312 qlt_tmr_work(tgt
, prm
);
6319 spin_lock_irqsave(&tgt
->sess_work_lock
, flags
);
6323 spin_unlock_irqrestore(&tgt
->sess_work_lock
, flags
);
6326 /* Must be called under tgt_host_action_mutex */
6327 int qlt_add_target(struct qla_hw_data
*ha
, struct scsi_qla_host
*base_vha
)
6329 struct qla_tgt
*tgt
;
6331 struct qla_qpair_hint
*h
;
6333 if (!QLA_TGT_MODE_ENABLED())
6336 if (!IS_TGT_MODE_CAPABLE(ha
)) {
6337 ql_log(ql_log_warn
, base_vha
, 0xe070,
6338 "This adapter does not support target mode.\n");
6342 ql_dbg(ql_dbg_tgt
, base_vha
, 0xe03b,
6343 "Registering target for host %ld(%p).\n", base_vha
->host_no
, ha
);
6345 BUG_ON(base_vha
->vha_tgt
.qla_tgt
!= NULL
);
6347 tgt
= kzalloc(sizeof(struct qla_tgt
), GFP_KERNEL
);
6349 ql_dbg(ql_dbg_tgt
, base_vha
, 0xe066,
6350 "Unable to allocate struct qla_tgt\n");
6354 tgt
->qphints
= kcalloc(ha
->max_qpairs
+ 1,
6355 sizeof(struct qla_qpair_hint
),
6357 if (!tgt
->qphints
) {
6359 ql_log(ql_log_warn
, base_vha
, 0x0197,
6360 "Unable to allocate qpair hints.\n");
6364 if (!(base_vha
->host
->hostt
->supported_mode
& MODE_TARGET
))
6365 base_vha
->host
->hostt
->supported_mode
|= MODE_TARGET
;
6367 rc
= btree_init64(&tgt
->lun_qpair_map
);
6369 kfree(tgt
->qphints
);
6371 ql_log(ql_log_info
, base_vha
, 0x0198,
6372 "Unable to initialize lun_qpair_map btree\n");
6375 h
= &tgt
->qphints
[0];
6376 h
->qpair
= ha
->base_qpair
;
6377 INIT_LIST_HEAD(&h
->hint_elem
);
6378 h
->cpuid
= ha
->base_qpair
->cpuid
;
6379 list_add_tail(&h
->hint_elem
, &ha
->base_qpair
->hints_list
);
6381 for (i
= 0; i
< ha
->max_qpairs
; i
++) {
6382 unsigned long flags
;
6384 struct qla_qpair
*qpair
= ha
->queue_pair_map
[i
];
6386 h
= &tgt
->qphints
[i
+ 1];
6387 INIT_LIST_HEAD(&h
->hint_elem
);
6390 spin_lock_irqsave(qpair
->qp_lock_ptr
, flags
);
6391 list_add_tail(&h
->hint_elem
, &qpair
->hints_list
);
6392 spin_unlock_irqrestore(qpair
->qp_lock_ptr
, flags
);
6393 h
->cpuid
= qpair
->cpuid
;
6398 tgt
->vha
= base_vha
;
6399 init_waitqueue_head(&tgt
->waitQ
);
6400 INIT_LIST_HEAD(&tgt
->del_sess_list
);
6401 spin_lock_init(&tgt
->sess_work_lock
);
6402 INIT_WORK(&tgt
->sess_work
, qlt_sess_work_fn
);
6403 INIT_LIST_HEAD(&tgt
->sess_works_list
);
6404 atomic_set(&tgt
->tgt_global_resets_count
, 0);
6406 base_vha
->vha_tgt
.qla_tgt
= tgt
;
6408 ql_dbg(ql_dbg_tgt
, base_vha
, 0xe067,
6409 "qla_target(%d): using 64 Bit PCI addressing",
6412 tgt
->sg_tablesize
= QLA_TGT_MAX_SG_24XX(base_vha
->req
->length
- 3);
6414 mutex_lock(&qla_tgt_mutex
);
6415 list_add_tail(&tgt
->tgt_list_entry
, &qla_tgt_glist
);
6416 mutex_unlock(&qla_tgt_mutex
);
6418 if (ha
->tgt
.tgt_ops
&& ha
->tgt
.tgt_ops
->add_target
)
6419 ha
->tgt
.tgt_ops
->add_target(base_vha
);
6424 /* Must be called under tgt_host_action_mutex */
6425 int qlt_remove_target(struct qla_hw_data
*ha
, struct scsi_qla_host
*vha
)
6427 if (!vha
->vha_tgt
.qla_tgt
)
6430 if (vha
->fc_vport
) {
6431 qlt_release(vha
->vha_tgt
.qla_tgt
);
6435 /* free left over qfull cmds */
6436 qlt_init_term_exchange(vha
);
6438 ql_dbg(ql_dbg_tgt
, vha
, 0xe03c, "Unregistering target for host %ld(%p)",
6440 qlt_release(vha
->vha_tgt
.qla_tgt
);
6445 void qlt_remove_target_resources(struct qla_hw_data
*ha
)
6447 struct scsi_qla_host
*node
;
6450 btree_for_each_safe32(&ha
->tgt
.host_map
, key
, node
)
6451 btree_remove32(&ha
->tgt
.host_map
, key
);
6453 btree_destroy32(&ha
->tgt
.host_map
);
6456 static void qlt_lport_dump(struct scsi_qla_host
*vha
, u64 wwpn
,
6459 pr_debug("qla2xxx HW vha->node_name: %8phC\n", vha
->node_name
);
6460 pr_debug("qla2xxx HW vha->port_name: %8phC\n", vha
->port_name
);
6461 put_unaligned_be64(wwpn
, b
);
6462 pr_debug("qla2xxx passed configfs WWPN: %8phC\n", b
);
6466 * qla_tgt_lport_register - register lport with external module
6468 * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data
6469 * @phys_wwpn: physical port WWPN
6470 * @npiv_wwpn: NPIV WWPN
6471 * @npiv_wwnn: NPIV WWNN
6472 * @callback: lport initialization callback for tcm_qla2xxx code
6474 int qlt_lport_register(void *target_lport_ptr
, u64 phys_wwpn
,
6475 u64 npiv_wwpn
, u64 npiv_wwnn
,
6476 int (*callback
)(struct scsi_qla_host
*, void *, u64
, u64
))
6478 struct qla_tgt
*tgt
;
6479 struct scsi_qla_host
*vha
;
6480 struct qla_hw_data
*ha
;
6481 struct Scsi_Host
*host
;
6482 unsigned long flags
;
6486 mutex_lock(&qla_tgt_mutex
);
6487 list_for_each_entry(tgt
, &qla_tgt_glist
, tgt_list_entry
) {
6495 if (!(host
->hostt
->supported_mode
& MODE_TARGET
))
6498 if (vha
->qlini_mode
== QLA2XXX_INI_MODE_ENABLED
)
6501 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
6502 if ((!npiv_wwpn
|| !npiv_wwnn
) && host
->active_mode
& MODE_TARGET
) {
6503 pr_debug("MODE_TARGET already active on qla2xxx(%d)\n",
6505 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
6508 if (tgt
->tgt_stop
) {
6509 pr_debug("MODE_TARGET in shutdown on qla2xxx(%d)\n",
6511 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
6514 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
6516 if (!scsi_host_get(host
)) {
6517 ql_dbg(ql_dbg_tgt
, vha
, 0xe068,
6518 "Unable to scsi_host_get() for"
6519 " qla2xxx scsi_host\n");
6522 qlt_lport_dump(vha
, phys_wwpn
, b
);
6524 if (memcmp(vha
->port_name
, b
, WWN_SIZE
)) {
6525 scsi_host_put(host
);
6528 rc
= (*callback
)(vha
, target_lport_ptr
, npiv_wwpn
, npiv_wwnn
);
6530 scsi_host_put(host
);
6532 mutex_unlock(&qla_tgt_mutex
);
6535 mutex_unlock(&qla_tgt_mutex
);
6539 EXPORT_SYMBOL(qlt_lport_register
);
6542 * qla_tgt_lport_deregister - Degister lport
6544 * @vha: Registered scsi_qla_host pointer
6546 void qlt_lport_deregister(struct scsi_qla_host
*vha
)
6548 struct qla_hw_data
*ha
= vha
->hw
;
6549 struct Scsi_Host
*sh
= vha
->host
;
6551 * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data
6553 vha
->vha_tgt
.target_lport_ptr
= NULL
;
6554 ha
->tgt
.tgt_ops
= NULL
;
6556 * Release the Scsi_Host reference for the underlying qla2xxx host
6560 EXPORT_SYMBOL(qlt_lport_deregister
);
6562 /* Must be called under HW lock */
6563 void qlt_set_mode(struct scsi_qla_host
*vha
)
6565 switch (vha
->qlini_mode
) {
6566 case QLA2XXX_INI_MODE_DISABLED
:
6567 case QLA2XXX_INI_MODE_EXCLUSIVE
:
6568 vha
->host
->active_mode
= MODE_TARGET
;
6570 case QLA2XXX_INI_MODE_ENABLED
:
6571 vha
->host
->active_mode
= MODE_INITIATOR
;
6573 case QLA2XXX_INI_MODE_DUAL
:
6574 vha
->host
->active_mode
= MODE_DUAL
;
6581 /* Must be called under HW lock */
6582 static void qlt_clear_mode(struct scsi_qla_host
*vha
)
6584 switch (vha
->qlini_mode
) {
6585 case QLA2XXX_INI_MODE_DISABLED
:
6586 vha
->host
->active_mode
= MODE_UNKNOWN
;
6588 case QLA2XXX_INI_MODE_EXCLUSIVE
:
6589 vha
->host
->active_mode
= MODE_INITIATOR
;
6591 case QLA2XXX_INI_MODE_ENABLED
:
6592 case QLA2XXX_INI_MODE_DUAL
:
6593 vha
->host
->active_mode
= MODE_INITIATOR
;
6601 * qla_tgt_enable_vha - NO LOCK HELD
6603 * host_reset, bring up w/ Target Mode Enabled
6606 qlt_enable_vha(struct scsi_qla_host
*vha
)
6608 struct qla_hw_data
*ha
= vha
->hw
;
6609 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
6610 unsigned long flags
;
6611 scsi_qla_host_t
*base_vha
= pci_get_drvdata(ha
->pdev
);
6614 ql_dbg(ql_dbg_tgt
, vha
, 0xe069,
6615 "Unable to locate qla_tgt pointer from"
6616 " struct qla_hw_data\n");
6620 if (vha
->qlini_mode
== QLA2XXX_INI_MODE_ENABLED
)
6623 if (ha
->tgt
.num_act_qpairs
> ha
->max_qpairs
)
6624 ha
->tgt
.num_act_qpairs
= ha
->max_qpairs
;
6625 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
6626 tgt
->tgt_stopped
= 0;
6628 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
6630 mutex_lock(&ha
->optrom_mutex
);
6631 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf021,
6634 qla24xx_disable_vp(vha
);
6635 qla24xx_enable_vp(vha
);
6637 set_bit(ISP_ABORT_NEEDED
, &base_vha
->dpc_flags
);
6638 qla2xxx_wake_dpc(base_vha
);
6639 WARN_ON_ONCE(qla2x00_wait_for_hba_online(base_vha
) !=
6642 mutex_unlock(&ha
->optrom_mutex
);
6644 EXPORT_SYMBOL(qlt_enable_vha
);
6647 * qla_tgt_disable_vha - NO LOCK HELD
6649 * Disable Target Mode and reset the adapter
6651 static void qlt_disable_vha(struct scsi_qla_host
*vha
)
6653 struct qla_hw_data
*ha
= vha
->hw
;
6654 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
6655 unsigned long flags
;
6658 ql_dbg(ql_dbg_tgt
, vha
, 0xe06a,
6659 "Unable to locate qla_tgt pointer from"
6660 " struct qla_hw_data\n");
6665 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
6666 qlt_clear_mode(vha
);
6667 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
6669 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
6670 qla2xxx_wake_dpc(vha
);
6673 * We are expecting the offline state.
6674 * QLA_FUNCTION_FAILED means that adapter is offline.
6676 if (qla2x00_wait_for_hba_online(vha
) != QLA_SUCCESS
)
6677 ql_dbg(ql_dbg_tgt
, vha
, 0xe081,
6678 "adapter is offline\n");
6682 * Called from qla_init.c:qla24xx_vport_create() contex to setup
6683 * the target mode specific struct scsi_qla_host and struct qla_hw_data
6687 qlt_vport_create(struct scsi_qla_host
*vha
, struct qla_hw_data
*ha
)
6689 vha
->vha_tgt
.qla_tgt
= NULL
;
6691 mutex_init(&vha
->vha_tgt
.tgt_mutex
);
6692 mutex_init(&vha
->vha_tgt
.tgt_host_action_mutex
);
6694 qlt_clear_mode(vha
);
6697 * NOTE: Currently the value is kept the same for <24xx and
6698 * >=24xx ISPs. If it is necessary to change it,
6699 * the check should be added for specific ISPs,
6700 * assigning the value appropriately.
6702 ha
->tgt
.atio_q_length
= ATIO_ENTRY_CNT_24XX
;
6704 qlt_add_target(ha
, vha
);
6708 qlt_rff_id(struct scsi_qla_host
*vha
)
6712 * FC-4 Feature bit 0 indicates target functionality to the name server.
6714 if (qla_tgt_mode_enabled(vha
)) {
6715 fc4_feature
= BIT_0
;
6716 } else if (qla_ini_mode_enabled(vha
)) {
6717 fc4_feature
= BIT_1
;
6718 } else if (qla_dual_mode_enabled(vha
))
6719 fc4_feature
= BIT_0
| BIT_1
;
6725 * qlt_init_atio_q_entries() - Initializes ATIO queue entries.
6728 * Beginning of ATIO ring has initialization control block already built
6729 * by nvram config routine.
6731 * Returns 0 on success.
6734 qlt_init_atio_q_entries(struct scsi_qla_host
*vha
)
6736 struct qla_hw_data
*ha
= vha
->hw
;
6738 struct atio_from_isp
*pkt
= (struct atio_from_isp
*)ha
->tgt
.atio_ring
;
6740 if (qla_ini_mode_enabled(vha
))
6743 for (cnt
= 0; cnt
< ha
->tgt
.atio_q_length
; cnt
++) {
6744 pkt
->u
.raw
.signature
= cpu_to_le32(ATIO_PROCESSED
);
6751 * qlt_24xx_process_atio_queue() - Process ATIO queue entries.
6752 * @ha: SCSI driver HA context
6755 qlt_24xx_process_atio_queue(struct scsi_qla_host
*vha
, uint8_t ha_locked
)
6757 struct qla_hw_data
*ha
= vha
->hw
;
6758 struct atio_from_isp
*pkt
;
6761 if (!ha
->flags
.fw_started
)
6764 while ((ha
->tgt
.atio_ring_ptr
->signature
!= ATIO_PROCESSED
) ||
6765 fcpcmd_is_corrupted(ha
->tgt
.atio_ring_ptr
)) {
6766 pkt
= (struct atio_from_isp
*)ha
->tgt
.atio_ring_ptr
;
6767 cnt
= pkt
->u
.raw
.entry_count
;
6769 if (unlikely(fcpcmd_is_corrupted(ha
->tgt
.atio_ring_ptr
))) {
6771 * This packet is corrupted. The header + payload
6772 * can not be trusted. There is no point in passing
6775 ql_log(ql_log_warn
, vha
, 0xd03c,
6776 "corrupted fcp frame SID[%3phN] OXID[%04x] EXCG[%x] %64phN\n",
6777 &pkt
->u
.isp24
.fcp_hdr
.s_id
,
6778 be16_to_cpu(pkt
->u
.isp24
.fcp_hdr
.ox_id
),
6779 pkt
->u
.isp24
.exchange_addr
, pkt
);
6781 adjust_corrupted_atio(pkt
);
6782 qlt_send_term_exchange(ha
->base_qpair
, NULL
, pkt
,
6785 qlt_24xx_atio_pkt_all_vps(vha
,
6786 (struct atio_from_isp
*)pkt
, ha_locked
);
6789 for (i
= 0; i
< cnt
; i
++) {
6790 ha
->tgt
.atio_ring_index
++;
6791 if (ha
->tgt
.atio_ring_index
== ha
->tgt
.atio_q_length
) {
6792 ha
->tgt
.atio_ring_index
= 0;
6793 ha
->tgt
.atio_ring_ptr
= ha
->tgt
.atio_ring
;
6795 ha
->tgt
.atio_ring_ptr
++;
6797 pkt
->u
.raw
.signature
= cpu_to_le32(ATIO_PROCESSED
);
6798 pkt
= (struct atio_from_isp
*)ha
->tgt
.atio_ring_ptr
;
6803 /* Adjust ring index */
6804 wrt_reg_dword(ISP_ATIO_Q_OUT(vha
), ha
->tgt
.atio_ring_index
);
6808 qlt_24xx_config_rings(struct scsi_qla_host
*vha
)
6810 struct qla_hw_data
*ha
= vha
->hw
;
6811 struct qla_msix_entry
*msix
= &ha
->msix_entries
[2];
6812 struct init_cb_24xx
*icb
= (struct init_cb_24xx
*)ha
->init_cb
;
6814 if (!QLA_TGT_MODE_ENABLED())
6817 wrt_reg_dword(ISP_ATIO_Q_IN(vha
), 0);
6818 wrt_reg_dword(ISP_ATIO_Q_OUT(vha
), 0);
6819 rd_reg_dword(ISP_ATIO_Q_OUT(vha
));
6821 if (ha
->flags
.msix_enabled
) {
6822 if (IS_QLA83XX(ha
) || IS_QLA27XX(ha
) || IS_QLA28XX(ha
)) {
6823 if (IS_QLA2071(ha
)) {
6824 /* 4 ports Baker: Enable Interrupt Handshake */
6826 icb
->firmware_options_2
|= cpu_to_le32(BIT_26
);
6828 icb
->msix_atio
= cpu_to_le16(msix
->entry
);
6829 icb
->firmware_options_2
&= cpu_to_le32(~BIT_26
);
6831 ql_dbg(ql_dbg_init
, vha
, 0xf072,
6832 "Registering ICB vector 0x%x for atio que.\n",
6837 if (IS_QLA83XX(ha
) || IS_QLA27XX(ha
) || IS_QLA28XX(ha
)) {
6839 icb
->firmware_options_2
|= cpu_to_le32(BIT_26
);
6840 ql_dbg(ql_dbg_init
, vha
, 0xf072,
6841 "%s: Use INTx for ATIOQ.\n", __func__
);
6847 qlt_24xx_config_nvram_stage1(struct scsi_qla_host
*vha
, struct nvram_24xx
*nv
)
6849 struct qla_hw_data
*ha
= vha
->hw
;
6852 if (!QLA_TGT_MODE_ENABLED())
6855 if (qla_tgt_mode_enabled(vha
) || qla_dual_mode_enabled(vha
)) {
6856 if (!ha
->tgt
.saved_set
) {
6857 /* We save only once */
6858 ha
->tgt
.saved_exchange_count
= nv
->exchange_count
;
6859 ha
->tgt
.saved_firmware_options_1
=
6860 nv
->firmware_options_1
;
6861 ha
->tgt
.saved_firmware_options_2
=
6862 nv
->firmware_options_2
;
6863 ha
->tgt
.saved_firmware_options_3
=
6864 nv
->firmware_options_3
;
6865 ha
->tgt
.saved_set
= 1;
6868 if (qla_tgt_mode_enabled(vha
))
6869 nv
->exchange_count
= cpu_to_le16(0xFFFF);
6871 nv
->exchange_count
= cpu_to_le16(vha
->ql2xexchoffld
);
6873 /* Enable target mode */
6874 nv
->firmware_options_1
|= cpu_to_le32(BIT_4
);
6876 /* Disable ini mode, if requested */
6877 if (qla_tgt_mode_enabled(vha
))
6878 nv
->firmware_options_1
|= cpu_to_le32(BIT_5
);
6880 /* Disable Full Login after LIP */
6881 nv
->firmware_options_1
&= cpu_to_le32(~BIT_13
);
6882 /* Enable initial LIP */
6883 nv
->firmware_options_1
&= cpu_to_le32(~BIT_9
);
6884 if (ql2xtgt_tape_enable
)
6885 /* Enable FC Tape support */
6886 nv
->firmware_options_2
|= cpu_to_le32(BIT_12
);
6888 /* Disable FC Tape support */
6889 nv
->firmware_options_2
&= cpu_to_le32(~BIT_12
);
6891 /* Disable Full Login after LIP */
6892 nv
->host_p
&= cpu_to_le32(~BIT_10
);
6895 * clear BIT 15 explicitly as we have seen at least
6896 * a couple of instances where this was set and this
6897 * was causing the firmware to not be initialized.
6899 nv
->firmware_options_1
&= cpu_to_le32(~BIT_15
);
6900 /* Enable target PRLI control */
6901 nv
->firmware_options_2
|= cpu_to_le32(BIT_14
);
6903 if (IS_QLA25XX(ha
)) {
6904 /* Change Loop-prefer to Pt-Pt */
6905 tmp
= ~(BIT_4
|BIT_5
|BIT_6
);
6906 nv
->firmware_options_2
&= cpu_to_le32(tmp
);
6908 nv
->firmware_options_2
|= cpu_to_le32(tmp
);
6911 if (ha
->tgt
.saved_set
) {
6912 nv
->exchange_count
= ha
->tgt
.saved_exchange_count
;
6913 nv
->firmware_options_1
=
6914 ha
->tgt
.saved_firmware_options_1
;
6915 nv
->firmware_options_2
=
6916 ha
->tgt
.saved_firmware_options_2
;
6917 nv
->firmware_options_3
=
6918 ha
->tgt
.saved_firmware_options_3
;
6923 if (ha
->base_qpair
->enable_class_2
) {
6924 if (vha
->flags
.init_done
)
6925 fc_host_supported_classes(vha
->host
) =
6926 FC_COS_CLASS2
| FC_COS_CLASS3
;
6928 nv
->firmware_options_2
|= cpu_to_le32(BIT_8
);
6930 if (vha
->flags
.init_done
)
6931 fc_host_supported_classes(vha
->host
) = FC_COS_CLASS3
;
6933 nv
->firmware_options_2
&= ~cpu_to_le32(BIT_8
);
6938 qlt_24xx_config_nvram_stage2(struct scsi_qla_host
*vha
,
6939 struct init_cb_24xx
*icb
)
6941 struct qla_hw_data
*ha
= vha
->hw
;
6943 if (!QLA_TGT_MODE_ENABLED())
6946 if (ha
->tgt
.node_name_set
) {
6947 memcpy(icb
->node_name
, ha
->tgt
.tgt_node_name
, WWN_SIZE
);
6948 icb
->firmware_options_1
|= cpu_to_le32(BIT_14
);
6953 qlt_81xx_config_nvram_stage1(struct scsi_qla_host
*vha
, struct nvram_81xx
*nv
)
6955 struct qla_hw_data
*ha
= vha
->hw
;
6958 if (!QLA_TGT_MODE_ENABLED())
6961 if (qla_tgt_mode_enabled(vha
) || qla_dual_mode_enabled(vha
)) {
6962 if (!ha
->tgt
.saved_set
) {
6963 /* We save only once */
6964 ha
->tgt
.saved_exchange_count
= nv
->exchange_count
;
6965 ha
->tgt
.saved_firmware_options_1
=
6966 nv
->firmware_options_1
;
6967 ha
->tgt
.saved_firmware_options_2
=
6968 nv
->firmware_options_2
;
6969 ha
->tgt
.saved_firmware_options_3
=
6970 nv
->firmware_options_3
;
6971 ha
->tgt
.saved_set
= 1;
6974 if (qla_tgt_mode_enabled(vha
))
6975 nv
->exchange_count
= cpu_to_le16(0xFFFF);
6977 nv
->exchange_count
= cpu_to_le16(vha
->ql2xexchoffld
);
6979 /* Enable target mode */
6980 nv
->firmware_options_1
|= cpu_to_le32(BIT_4
);
6982 /* Disable ini mode, if requested */
6983 if (qla_tgt_mode_enabled(vha
))
6984 nv
->firmware_options_1
|= cpu_to_le32(BIT_5
);
6985 /* Disable Full Login after LIP */
6986 nv
->firmware_options_1
&= cpu_to_le32(~BIT_13
);
6987 /* Enable initial LIP */
6988 nv
->firmware_options_1
&= cpu_to_le32(~BIT_9
);
6990 * clear BIT 15 explicitly as we have seen at
6991 * least a couple of instances where this was set
6992 * and this was causing the firmware to not be
6995 nv
->firmware_options_1
&= cpu_to_le32(~BIT_15
);
6996 if (ql2xtgt_tape_enable
)
6997 /* Enable FC tape support */
6998 nv
->firmware_options_2
|= cpu_to_le32(BIT_12
);
7000 /* Disable FC tape support */
7001 nv
->firmware_options_2
&= cpu_to_le32(~BIT_12
);
7003 /* Disable Full Login after LIP */
7004 nv
->host_p
&= cpu_to_le32(~BIT_10
);
7005 /* Enable target PRLI control */
7006 nv
->firmware_options_2
|= cpu_to_le32(BIT_14
);
7008 /* Change Loop-prefer to Pt-Pt */
7009 tmp
= ~(BIT_4
|BIT_5
|BIT_6
);
7010 nv
->firmware_options_2
&= cpu_to_le32(tmp
);
7012 nv
->firmware_options_2
|= cpu_to_le32(tmp
);
7014 if (ha
->tgt
.saved_set
) {
7015 nv
->exchange_count
= ha
->tgt
.saved_exchange_count
;
7016 nv
->firmware_options_1
=
7017 ha
->tgt
.saved_firmware_options_1
;
7018 nv
->firmware_options_2
=
7019 ha
->tgt
.saved_firmware_options_2
;
7020 nv
->firmware_options_3
=
7021 ha
->tgt
.saved_firmware_options_3
;
7026 if (ha
->base_qpair
->enable_class_2
) {
7027 if (vha
->flags
.init_done
)
7028 fc_host_supported_classes(vha
->host
) =
7029 FC_COS_CLASS2
| FC_COS_CLASS3
;
7031 nv
->firmware_options_2
|= cpu_to_le32(BIT_8
);
7033 if (vha
->flags
.init_done
)
7034 fc_host_supported_classes(vha
->host
) = FC_COS_CLASS3
;
7036 nv
->firmware_options_2
&= ~cpu_to_le32(BIT_8
);
7041 qlt_81xx_config_nvram_stage2(struct scsi_qla_host
*vha
,
7042 struct init_cb_81xx
*icb
)
7044 struct qla_hw_data
*ha
= vha
->hw
;
7046 if (!QLA_TGT_MODE_ENABLED())
7049 if (ha
->tgt
.node_name_set
) {
7050 memcpy(icb
->node_name
, ha
->tgt
.tgt_node_name
, WWN_SIZE
);
7051 icb
->firmware_options_1
|= cpu_to_le32(BIT_14
);
7056 qlt_83xx_iospace_config(struct qla_hw_data
*ha
)
7058 if (!QLA_TGT_MODE_ENABLED())
7061 ha
->msix_count
+= 1; /* For ATIO Q */
7066 qlt_modify_vp_config(struct scsi_qla_host
*vha
,
7067 struct vp_config_entry_24xx
*vpmod
)
7069 /* enable target mode. Bit5 = 1 => disable */
7070 if (qla_tgt_mode_enabled(vha
) || qla_dual_mode_enabled(vha
))
7071 vpmod
->options_idx1
&= ~BIT_5
;
7073 /* Disable ini mode, if requested. bit4 = 1 => disable */
7074 if (qla_tgt_mode_enabled(vha
))
7075 vpmod
->options_idx1
&= ~BIT_4
;
7079 qlt_probe_one_stage1(struct scsi_qla_host
*base_vha
, struct qla_hw_data
*ha
)
7083 if (!QLA_TGT_MODE_ENABLED())
7086 if ((ql2xenablemsix
== 0) || IS_QLA83XX(ha
) || IS_QLA27XX(ha
) ||
7088 ISP_ATIO_Q_IN(base_vha
) = &ha
->mqiobase
->isp25mq
.atio_q_in
;
7089 ISP_ATIO_Q_OUT(base_vha
) = &ha
->mqiobase
->isp25mq
.atio_q_out
;
7091 ISP_ATIO_Q_IN(base_vha
) = &ha
->iobase
->isp24
.atio_q_in
;
7092 ISP_ATIO_Q_OUT(base_vha
) = &ha
->iobase
->isp24
.atio_q_out
;
7095 mutex_init(&base_vha
->vha_tgt
.tgt_mutex
);
7096 mutex_init(&base_vha
->vha_tgt
.tgt_host_action_mutex
);
7098 INIT_LIST_HEAD(&base_vha
->unknown_atio_list
);
7099 INIT_DELAYED_WORK(&base_vha
->unknown_atio_work
,
7100 qlt_unknown_atio_work_fn
);
7102 qlt_clear_mode(base_vha
);
7104 rc
= btree_init32(&ha
->tgt
.host_map
);
7106 ql_log(ql_log_info
, base_vha
, 0xd03d,
7107 "Unable to initialize ha->host_map btree\n");
7109 qlt_update_vp_map(base_vha
, SET_VP_IDX
);
7113 qla83xx_msix_atio_q(int irq
, void *dev_id
)
7115 struct rsp_que
*rsp
;
7116 scsi_qla_host_t
*vha
;
7117 struct qla_hw_data
*ha
;
7118 unsigned long flags
;
7120 rsp
= (struct rsp_que
*) dev_id
;
7122 vha
= pci_get_drvdata(ha
->pdev
);
7124 spin_lock_irqsave(&ha
->tgt
.atio_lock
, flags
);
7126 qlt_24xx_process_atio_queue(vha
, 0);
7128 spin_unlock_irqrestore(&ha
->tgt
.atio_lock
, flags
);
7134 qlt_handle_abts_recv_work(struct work_struct
*work
)
7136 struct qla_tgt_sess_op
*op
= container_of(work
,
7137 struct qla_tgt_sess_op
, work
);
7138 scsi_qla_host_t
*vha
= op
->vha
;
7139 struct qla_hw_data
*ha
= vha
->hw
;
7140 unsigned long flags
;
7142 if (qla2x00_reset_active(vha
) ||
7143 (op
->chip_reset
!= ha
->base_qpair
->chip_reset
))
7146 spin_lock_irqsave(&ha
->tgt
.atio_lock
, flags
);
7147 qlt_24xx_process_atio_queue(vha
, 0);
7148 spin_unlock_irqrestore(&ha
->tgt
.atio_lock
, flags
);
7150 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
7151 qlt_response_pkt_all_vps(vha
, op
->rsp
, (response_t
*)&op
->atio
);
7152 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
7158 qlt_handle_abts_recv(struct scsi_qla_host
*vha
, struct rsp_que
*rsp
,
7161 struct qla_tgt_sess_op
*op
;
7163 op
= kzalloc(sizeof(*op
), GFP_ATOMIC
);
7166 /* do not reach for ATIO queue here. This is best effort err
7167 * recovery at this point.
7169 qlt_response_pkt_all_vps(vha
, rsp
, pkt
);
7173 memcpy(&op
->atio
, pkt
, sizeof(*pkt
));
7175 op
->chip_reset
= vha
->hw
->base_qpair
->chip_reset
;
7177 INIT_WORK(&op
->work
, qlt_handle_abts_recv_work
);
7178 queue_work(qla_tgt_wq
, &op
->work
);
7183 qlt_mem_alloc(struct qla_hw_data
*ha
)
7185 if (!QLA_TGT_MODE_ENABLED())
7188 ha
->tgt
.tgt_vp_map
= kcalloc(MAX_MULTI_ID_FABRIC
,
7189 sizeof(struct qla_tgt_vp_map
),
7191 if (!ha
->tgt
.tgt_vp_map
)
7194 ha
->tgt
.atio_ring
= dma_alloc_coherent(&ha
->pdev
->dev
,
7195 (ha
->tgt
.atio_q_length
+ 1) * sizeof(struct atio_from_isp
),
7196 &ha
->tgt
.atio_dma
, GFP_KERNEL
);
7197 if (!ha
->tgt
.atio_ring
) {
7198 kfree(ha
->tgt
.tgt_vp_map
);
7205 qlt_mem_free(struct qla_hw_data
*ha
)
7207 if (!QLA_TGT_MODE_ENABLED())
7210 if (ha
->tgt
.atio_ring
) {
7211 dma_free_coherent(&ha
->pdev
->dev
, (ha
->tgt
.atio_q_length
+ 1) *
7212 sizeof(struct atio_from_isp
), ha
->tgt
.atio_ring
,
7215 ha
->tgt
.atio_ring
= NULL
;
7216 ha
->tgt
.atio_dma
= 0;
7217 kfree(ha
->tgt
.tgt_vp_map
);
7218 ha
->tgt
.tgt_vp_map
= NULL
;
7221 /* vport_slock to be held by the caller */
7223 qlt_update_vp_map(struct scsi_qla_host
*vha
, int cmd
)
7229 if (!QLA_TGT_MODE_ENABLED())
7232 key
= vha
->d_id
.b24
;
7236 vha
->hw
->tgt
.tgt_vp_map
[vha
->vp_idx
].vha
= vha
;
7239 slot
= btree_lookup32(&vha
->hw
->tgt
.host_map
, key
);
7241 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf018,
7242 "Save vha in host_map %p %06x\n", vha
, key
);
7243 rc
= btree_insert32(&vha
->hw
->tgt
.host_map
,
7244 key
, vha
, GFP_ATOMIC
);
7246 ql_log(ql_log_info
, vha
, 0xd03e,
7247 "Unable to insert s_id into host_map: %06x\n",
7251 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf019,
7252 "replace existing vha in host_map %p %06x\n", vha
, key
);
7253 btree_update32(&vha
->hw
->tgt
.host_map
, key
, vha
);
7256 vha
->hw
->tgt
.tgt_vp_map
[vha
->vp_idx
].vha
= NULL
;
7259 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf01a,
7260 "clear vha in host_map %p %06x\n", vha
, key
);
7261 slot
= btree_lookup32(&vha
->hw
->tgt
.host_map
, key
);
7263 btree_remove32(&vha
->hw
->tgt
.host_map
, key
);
7269 void qlt_update_host_map(struct scsi_qla_host
*vha
, port_id_t id
)
7272 if (!vha
->d_id
.b24
) {
7274 qlt_update_vp_map(vha
, SET_AL_PA
);
7275 } else if (vha
->d_id
.b24
!= id
.b24
) {
7276 qlt_update_vp_map(vha
, RESET_AL_PA
);
7278 qlt_update_vp_map(vha
, SET_AL_PA
);
7282 static int __init
qlt_parse_ini_mode(void)
7284 if (strcasecmp(qlini_mode
, QLA2XXX_INI_MODE_STR_EXCLUSIVE
) == 0)
7285 ql2x_ini_mode
= QLA2XXX_INI_MODE_EXCLUSIVE
;
7286 else if (strcasecmp(qlini_mode
, QLA2XXX_INI_MODE_STR_DISABLED
) == 0)
7287 ql2x_ini_mode
= QLA2XXX_INI_MODE_DISABLED
;
7288 else if (strcasecmp(qlini_mode
, QLA2XXX_INI_MODE_STR_ENABLED
) == 0)
7289 ql2x_ini_mode
= QLA2XXX_INI_MODE_ENABLED
;
7290 else if (strcasecmp(qlini_mode
, QLA2XXX_INI_MODE_STR_DUAL
) == 0)
7291 ql2x_ini_mode
= QLA2XXX_INI_MODE_DUAL
;
7298 int __init
qlt_init(void)
7302 BUILD_BUG_ON(sizeof(struct ctio7_to_24xx
) != 64);
7303 BUILD_BUG_ON(sizeof(struct ctio_to_2xxx
) != 64);
7305 if (!qlt_parse_ini_mode()) {
7306 ql_log(ql_log_fatal
, NULL
, 0xe06b,
7307 "qlt_parse_ini_mode() failed\n");
7311 if (!QLA_TGT_MODE_ENABLED())
7314 qla_tgt_mgmt_cmd_cachep
= kmem_cache_create("qla_tgt_mgmt_cmd_cachep",
7315 sizeof(struct qla_tgt_mgmt_cmd
), __alignof__(struct
7316 qla_tgt_mgmt_cmd
), 0, NULL
);
7317 if (!qla_tgt_mgmt_cmd_cachep
) {
7318 ql_log(ql_log_fatal
, NULL
, 0xd04b,
7319 "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n");
7323 qla_tgt_plogi_cachep
= kmem_cache_create("qla_tgt_plogi_cachep",
7324 sizeof(struct qlt_plogi_ack_t
), __alignof__(struct qlt_plogi_ack_t
),
7327 if (!qla_tgt_plogi_cachep
) {
7328 ql_log(ql_log_fatal
, NULL
, 0xe06d,
7329 "kmem_cache_create for qla_tgt_plogi_cachep failed\n");
7331 goto out_mgmt_cmd_cachep
;
7334 qla_tgt_mgmt_cmd_mempool
= mempool_create(25, mempool_alloc_slab
,
7335 mempool_free_slab
, qla_tgt_mgmt_cmd_cachep
);
7336 if (!qla_tgt_mgmt_cmd_mempool
) {
7337 ql_log(ql_log_fatal
, NULL
, 0xe06e,
7338 "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n");
7340 goto out_plogi_cachep
;
7343 qla_tgt_wq
= alloc_workqueue("qla_tgt_wq", 0, 0);
7345 ql_log(ql_log_fatal
, NULL
, 0xe06f,
7346 "alloc_workqueue for qla_tgt_wq failed\n");
7348 goto out_cmd_mempool
;
7351 * Return 1 to signal that initiator-mode is being disabled
7353 return (ql2x_ini_mode
== QLA2XXX_INI_MODE_DISABLED
) ? 1 : 0;
7356 mempool_destroy(qla_tgt_mgmt_cmd_mempool
);
7358 kmem_cache_destroy(qla_tgt_plogi_cachep
);
7359 out_mgmt_cmd_cachep
:
7360 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep
);
7366 if (!QLA_TGT_MODE_ENABLED())
7369 destroy_workqueue(qla_tgt_wq
);
7370 mempool_destroy(qla_tgt_mgmt_cmd_mempool
);
7371 kmem_cache_destroy(qla_tgt_plogi_cachep
);
7372 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep
);