1 // SPDX-License-Identifier: GPL-2.0-only
3 * qla_target.c SCSI LLD infrastructure for QLogic 22xx/23xx/24xx/25xx
5 * based on qla2x00t.c code:
7 * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net>
8 * Copyright (C) 2004 - 2005 Leonid Stoljar
9 * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
10 * Copyright (C) 2006 - 2010 ID7 Ltd.
12 * Forward port and refactoring to modern qla2xxx and target/configfs
14 * Copyright (C) 2010-2013 Nicholas A. Bellinger <nab@kernel.org>
17 #include <linux/module.h>
18 #include <linux/init.h>
19 #include <linux/types.h>
20 #include <linux/blkdev.h>
21 #include <linux/interrupt.h>
22 #include <linux/pci.h>
23 #include <linux/delay.h>
24 #include <linux/list.h>
25 #include <linux/workqueue.h>
26 #include <asm/unaligned.h>
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_host.h>
29 #include <scsi/scsi_tcq.h>
30 #include <target/target_core_base.h>
31 #include <target/target_core_fabric.h>
34 #include "qla_target.h"
36 static int ql2xtgt_tape_enable
;
37 module_param(ql2xtgt_tape_enable
, int, S_IRUGO
|S_IWUSR
);
38 MODULE_PARM_DESC(ql2xtgt_tape_enable
,
39 "Enables Sequence level error recovery (aka FC Tape). Default is 0 - no SLER. 1 - Enable SLER.");
41 static char *qlini_mode
= QLA2XXX_INI_MODE_STR_ENABLED
;
42 module_param(qlini_mode
, charp
, S_IRUGO
);
43 MODULE_PARM_DESC(qlini_mode
,
44 "Determines when initiator mode will be enabled. Possible values: "
45 "\"exclusive\" - initiator mode will be enabled on load, "
46 "disabled on enabling target mode and then on disabling target mode "
48 "\"disabled\" - initiator mode will never be enabled; "
49 "\"dual\" - Initiator Modes will be enabled. Target Mode can be activated "
51 "\"enabled\" (default) - initiator mode will always stay enabled.");
53 static int ql_dm_tgt_ex_pct
= 0;
54 module_param(ql_dm_tgt_ex_pct
, int, S_IRUGO
|S_IWUSR
);
55 MODULE_PARM_DESC(ql_dm_tgt_ex_pct
,
56 "For Dual Mode (qlini_mode=dual), this parameter determines "
57 "the percentage of exchanges/cmds FW will allocate resources "
61 module_param(ql2xuctrlirq
, int, 0644);
62 MODULE_PARM_DESC(ql2xuctrlirq
,
63 "User to control IRQ placement via smp_affinity."
64 "Valid with qlini_mode=disabled."
65 "1(default): enable");
67 int ql2x_ini_mode
= QLA2XXX_INI_MODE_EXCLUSIVE
;
69 static int qla_sam_status
= SAM_STAT_BUSY
;
70 static int tc_sam_status
= SAM_STAT_TASK_SET_FULL
; /* target core */
73 * From scsi/fc/fc_fcp.h
75 enum fcp_resp_rsp_codes
{
77 FCP_DATA_LEN_INVALID
= 1,
78 FCP_CMND_FIELDS_INVALID
= 2,
79 FCP_DATA_PARAM_MISMATCH
= 3,
82 FCP_TMF_INVALID_LUN
= 9,
86 * fc_pri_ta from scsi/fc/fc_fcp.h
88 #define FCP_PTA_SIMPLE 0 /* simple task attribute */
89 #define FCP_PTA_HEADQ 1 /* head of queue task attribute */
90 #define FCP_PTA_ORDERED 2 /* ordered task attribute */
91 #define FCP_PTA_ACA 4 /* auto. contingent allegiance */
92 #define FCP_PTA_MASK 7 /* mask for task attribute field */
93 #define FCP_PRI_SHIFT 3 /* priority field starts in bit 3 */
94 #define FCP_PRI_RESVD_MASK 0x80 /* reserved bits in priority field */
97 * This driver calls qla2x00_alloc_iocbs() and qla2x00_issue_marker(), which
98 * must be called under HW lock and could unlock/lock it inside.
99 * It isn't an issue, since in the current implementation on the time when
100 * those functions are called:
102 * - Either context is IRQ and only IRQ handler can modify HW data,
103 * including rings related fields,
105 * - Or access to target mode variables from struct qla_tgt doesn't
106 * cross those functions boundaries, except tgt_stop, which
107 * additionally protected by irq_cmd_count.
109 /* Predefs for callbacks handed to qla2xxx LLD */
110 static void qlt_24xx_atio_pkt(struct scsi_qla_host
*ha
,
111 struct atio_from_isp
*pkt
, uint8_t);
112 static void qlt_response_pkt(struct scsi_qla_host
*ha
, struct rsp_que
*rsp
,
114 static int qlt_issue_task_mgmt(struct fc_port
*sess
, u64 lun
,
115 int fn
, void *iocb
, int flags
);
116 static void qlt_send_term_exchange(struct qla_qpair
*, struct qla_tgt_cmd
117 *cmd
, struct atio_from_isp
*atio
, int ha_locked
, int ul_abort
);
118 static void qlt_alloc_qfull_cmd(struct scsi_qla_host
*vha
,
119 struct atio_from_isp
*atio
, uint16_t status
, int qfull
);
120 static void qlt_disable_vha(struct scsi_qla_host
*vha
);
121 static void qlt_clear_tgt_db(struct qla_tgt
*tgt
);
122 static void qlt_send_notify_ack(struct qla_qpair
*qpair
,
123 struct imm_ntfy_from_isp
*ntfy
,
124 uint32_t add_flags
, uint16_t resp_code
, int resp_code_valid
,
125 uint16_t srr_flags
, uint16_t srr_reject_code
, uint8_t srr_explan
);
126 static void qlt_send_term_imm_notif(struct scsi_qla_host
*vha
,
127 struct imm_ntfy_from_isp
*imm
, int ha_locked
);
128 static struct fc_port
*qlt_create_sess(struct scsi_qla_host
*vha
,
129 fc_port_t
*fcport
, bool local
);
130 void qlt_unreg_sess(struct fc_port
*sess
);
131 static void qlt_24xx_handle_abts(struct scsi_qla_host
*,
132 struct abts_recv_from_24xx
*);
133 static void qlt_send_busy(struct qla_qpair
*, struct atio_from_isp
*,
135 static int qlt_check_reserve_free_req(struct qla_qpair
*qpair
, uint32_t);
136 static inline uint32_t qlt_make_handle(struct qla_qpair
*);
141 static struct kmem_cache
*qla_tgt_mgmt_cmd_cachep
;
142 struct kmem_cache
*qla_tgt_plogi_cachep
;
143 static mempool_t
*qla_tgt_mgmt_cmd_mempool
;
144 static struct workqueue_struct
*qla_tgt_wq
;
145 static DEFINE_MUTEX(qla_tgt_mutex
);
146 static LIST_HEAD(qla_tgt_glist
);
148 static const char *prot_op_str(u32 prot_op
)
151 case TARGET_PROT_NORMAL
: return "NORMAL";
152 case TARGET_PROT_DIN_INSERT
: return "DIN_INSERT";
153 case TARGET_PROT_DOUT_INSERT
: return "DOUT_INSERT";
154 case TARGET_PROT_DIN_STRIP
: return "DIN_STRIP";
155 case TARGET_PROT_DOUT_STRIP
: return "DOUT_STRIP";
156 case TARGET_PROT_DIN_PASS
: return "DIN_PASS";
157 case TARGET_PROT_DOUT_PASS
: return "DOUT_PASS";
158 default: return "UNKNOWN";
162 /* This API intentionally takes dest as a parameter, rather than returning
163 * int value to avoid caller forgetting to issue wmb() after the store */
164 void qlt_do_generation_tick(struct scsi_qla_host
*vha
, int *dest
)
166 scsi_qla_host_t
*base_vha
= pci_get_drvdata(vha
->hw
->pdev
);
167 *dest
= atomic_inc_return(&base_vha
->generation_tick
);
172 /* Might release hw lock, then reaquire!! */
173 static inline int qlt_issue_marker(struct scsi_qla_host
*vha
, int vha_locked
)
175 /* Send marker if required */
176 if (unlikely(vha
->marker_needed
!= 0)) {
177 int rc
= qla2x00_issue_marker(vha
, vha_locked
);
179 if (rc
!= QLA_SUCCESS
) {
180 ql_dbg(ql_dbg_tgt
, vha
, 0xe03d,
181 "qla_target(%d): issue_marker() failed\n",
190 struct scsi_qla_host
*qlt_find_host_by_d_id(struct scsi_qla_host
*vha
,
193 struct scsi_qla_host
*host
;
196 if (vha
->d_id
.b
.area
== d_id
.area
&&
197 vha
->d_id
.b
.domain
== d_id
.domain
&&
198 vha
->d_id
.b
.al_pa
== d_id
.al_pa
)
201 key
= be_to_port_id(d_id
).b24
;
203 host
= btree_lookup32(&vha
->hw
->tgt
.host_map
, key
);
205 ql_dbg(ql_dbg_tgt_mgt
+ ql_dbg_verbose
, vha
, 0xf005,
206 "Unable to find host %06x\n", key
);
212 struct scsi_qla_host
*qlt_find_host_by_vp_idx(struct scsi_qla_host
*vha
,
215 struct qla_hw_data
*ha
= vha
->hw
;
217 if (vha
->vp_idx
== vp_idx
)
220 BUG_ON(ha
->tgt
.tgt_vp_map
== NULL
);
221 if (likely(test_bit(vp_idx
, ha
->vp_idx_map
)))
222 return ha
->tgt
.tgt_vp_map
[vp_idx
].vha
;
227 static inline void qlt_incr_num_pend_cmds(struct scsi_qla_host
*vha
)
231 spin_lock_irqsave(&vha
->hw
->tgt
.q_full_lock
, flags
);
233 vha
->hw
->tgt
.num_pend_cmds
++;
234 if (vha
->hw
->tgt
.num_pend_cmds
> vha
->qla_stats
.stat_max_pend_cmds
)
235 vha
->qla_stats
.stat_max_pend_cmds
=
236 vha
->hw
->tgt
.num_pend_cmds
;
237 spin_unlock_irqrestore(&vha
->hw
->tgt
.q_full_lock
, flags
);
239 static inline void qlt_decr_num_pend_cmds(struct scsi_qla_host
*vha
)
243 spin_lock_irqsave(&vha
->hw
->tgt
.q_full_lock
, flags
);
244 vha
->hw
->tgt
.num_pend_cmds
--;
245 spin_unlock_irqrestore(&vha
->hw
->tgt
.q_full_lock
, flags
);
249 static void qlt_queue_unknown_atio(scsi_qla_host_t
*vha
,
250 struct atio_from_isp
*atio
, uint8_t ha_locked
)
252 struct qla_tgt_sess_op
*u
;
253 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
257 ql_dbg(ql_dbg_async
, vha
, 0x502c,
258 "qla_target(%d): dropping unknown ATIO_TYPE7, because tgt is being stopped",
263 u
= kzalloc(sizeof(*u
), GFP_ATOMIC
);
268 memcpy(&u
->atio
, atio
, sizeof(*atio
));
269 INIT_LIST_HEAD(&u
->cmd_list
);
271 spin_lock_irqsave(&vha
->cmd_list_lock
, flags
);
272 list_add_tail(&u
->cmd_list
, &vha
->unknown_atio_list
);
273 spin_unlock_irqrestore(&vha
->cmd_list_lock
, flags
);
275 schedule_delayed_work(&vha
->unknown_atio_work
, 1);
281 qlt_send_term_exchange(vha
->hw
->base_qpair
, NULL
, atio
, ha_locked
, 0);
285 static void qlt_try_to_dequeue_unknown_atios(struct scsi_qla_host
*vha
,
288 struct qla_tgt_sess_op
*u
, *t
;
289 scsi_qla_host_t
*host
;
290 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
294 list_for_each_entry_safe(u
, t
, &vha
->unknown_atio_list
, cmd_list
) {
296 ql_dbg(ql_dbg_async
, vha
, 0x502e,
297 "Freeing unknown %s %p, because of Abort\n",
299 qlt_send_term_exchange(vha
->hw
->base_qpair
, NULL
,
300 &u
->atio
, ha_locked
, 0);
304 host
= qlt_find_host_by_d_id(vha
, u
->atio
.u
.isp24
.fcp_hdr
.d_id
);
306 ql_dbg(ql_dbg_async
+ ql_dbg_verbose
, vha
, 0x502f,
307 "Requeuing unknown ATIO_TYPE7 %p\n", u
);
308 qlt_24xx_atio_pkt(host
, &u
->atio
, ha_locked
);
309 } else if (tgt
->tgt_stop
) {
310 ql_dbg(ql_dbg_async
+ ql_dbg_verbose
, vha
, 0x503a,
311 "Freeing unknown %s %p, because tgt is being stopped\n",
313 qlt_send_term_exchange(vha
->hw
->base_qpair
, NULL
,
314 &u
->atio
, ha_locked
, 0);
316 ql_dbg(ql_dbg_async
+ ql_dbg_verbose
, vha
, 0x503d,
317 "Reschedule u %p, vha %p, host %p\n", u
, vha
, host
);
320 schedule_delayed_work(&vha
->unknown_atio_work
,
327 spin_lock_irqsave(&vha
->cmd_list_lock
, flags
);
328 list_del(&u
->cmd_list
);
329 spin_unlock_irqrestore(&vha
->cmd_list_lock
, flags
);
334 void qlt_unknown_atio_work_fn(struct work_struct
*work
)
336 struct scsi_qla_host
*vha
= container_of(to_delayed_work(work
),
337 struct scsi_qla_host
, unknown_atio_work
);
339 qlt_try_to_dequeue_unknown_atios(vha
, 0);
342 static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host
*vha
,
343 struct atio_from_isp
*atio
, uint8_t ha_locked
)
345 ql_dbg(ql_dbg_tgt
, vha
, 0xe072,
346 "%s: qla_target(%d): type %x ox_id %04x\n",
347 __func__
, vha
->vp_idx
, atio
->u
.raw
.entry_type
,
348 be16_to_cpu(atio
->u
.isp24
.fcp_hdr
.ox_id
));
350 switch (atio
->u
.raw
.entry_type
) {
353 struct scsi_qla_host
*host
= qlt_find_host_by_d_id(vha
,
354 atio
->u
.isp24
.fcp_hdr
.d_id
);
355 if (unlikely(NULL
== host
)) {
356 ql_dbg(ql_dbg_tgt
, vha
, 0xe03e,
357 "qla_target(%d): Received ATIO_TYPE7 "
358 "with unknown d_id %x:%x:%x\n", vha
->vp_idx
,
359 atio
->u
.isp24
.fcp_hdr
.d_id
.domain
,
360 atio
->u
.isp24
.fcp_hdr
.d_id
.area
,
361 atio
->u
.isp24
.fcp_hdr
.d_id
.al_pa
);
364 qlt_queue_unknown_atio(vha
, atio
, ha_locked
);
367 if (unlikely(!list_empty(&vha
->unknown_atio_list
)))
368 qlt_try_to_dequeue_unknown_atios(vha
, ha_locked
);
370 qlt_24xx_atio_pkt(host
, atio
, ha_locked
);
374 case IMMED_NOTIFY_TYPE
:
376 struct scsi_qla_host
*host
= vha
;
377 struct imm_ntfy_from_isp
*entry
=
378 (struct imm_ntfy_from_isp
*)atio
;
380 qlt_issue_marker(vha
, ha_locked
);
382 if ((entry
->u
.isp24
.vp_index
!= 0xFF) &&
383 (entry
->u
.isp24
.nport_handle
!= 0xFFFF)) {
384 host
= qlt_find_host_by_vp_idx(vha
,
385 entry
->u
.isp24
.vp_index
);
386 if (unlikely(!host
)) {
387 ql_dbg(ql_dbg_tgt
, vha
, 0xe03f,
388 "qla_target(%d): Received "
389 "ATIO (IMMED_NOTIFY_TYPE) "
390 "with unknown vp_index %d\n",
391 vha
->vp_idx
, entry
->u
.isp24
.vp_index
);
395 qlt_24xx_atio_pkt(host
, atio
, ha_locked
);
399 case VP_RPT_ID_IOCB_TYPE
:
400 qla24xx_report_id_acquisition(vha
,
401 (struct vp_rpt_id_entry_24xx
*)atio
);
406 struct abts_recv_from_24xx
*entry
=
407 (struct abts_recv_from_24xx
*)atio
;
408 struct scsi_qla_host
*host
= qlt_find_host_by_vp_idx(vha
,
412 if (unlikely(!host
)) {
413 ql_dbg(ql_dbg_tgt
, vha
, 0xe00a,
414 "qla_target(%d): Response pkt (ABTS_RECV_24XX) "
415 "received, with unknown vp_index %d\n",
416 vha
->vp_idx
, entry
->vp_index
);
420 spin_lock_irqsave(&host
->hw
->hardware_lock
, flags
);
421 qlt_24xx_handle_abts(host
, (struct abts_recv_from_24xx
*)atio
);
423 spin_unlock_irqrestore(&host
->hw
->hardware_lock
, flags
);
427 /* case PUREX_IOCB_TYPE: ql2xmvasynctoatio */
430 ql_dbg(ql_dbg_tgt
, vha
, 0xe040,
431 "qla_target(%d): Received unknown ATIO atio "
432 "type %x\n", vha
->vp_idx
, atio
->u
.raw
.entry_type
);
439 void qlt_response_pkt_all_vps(struct scsi_qla_host
*vha
,
440 struct rsp_que
*rsp
, response_t
*pkt
)
442 switch (pkt
->entry_type
) {
444 ql_dbg(ql_dbg_tgt
, vha
, 0xe073,
445 "qla_target(%d):%s: CRC2 Response pkt\n",
446 vha
->vp_idx
, __func__
);
450 struct ctio7_from_24xx
*entry
= (struct ctio7_from_24xx
*)pkt
;
451 struct scsi_qla_host
*host
= qlt_find_host_by_vp_idx(vha
,
453 if (unlikely(!host
)) {
454 ql_dbg(ql_dbg_tgt
, vha
, 0xe041,
455 "qla_target(%d): Response pkt (CTIO_TYPE7) "
456 "received, with unknown vp_index %d\n",
457 vha
->vp_idx
, entry
->vp_index
);
460 qlt_response_pkt(host
, rsp
, pkt
);
464 case IMMED_NOTIFY_TYPE
:
466 struct scsi_qla_host
*host
;
467 struct imm_ntfy_from_isp
*entry
=
468 (struct imm_ntfy_from_isp
*)pkt
;
470 host
= qlt_find_host_by_vp_idx(vha
, entry
->u
.isp24
.vp_index
);
471 if (unlikely(!host
)) {
472 ql_dbg(ql_dbg_tgt
, vha
, 0xe042,
473 "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) "
474 "received, with unknown vp_index %d\n",
475 vha
->vp_idx
, entry
->u
.isp24
.vp_index
);
478 qlt_response_pkt(host
, rsp
, pkt
);
482 case NOTIFY_ACK_TYPE
:
484 struct scsi_qla_host
*host
= vha
;
485 struct nack_to_isp
*entry
= (struct nack_to_isp
*)pkt
;
487 if (0xFF != entry
->u
.isp24
.vp_index
) {
488 host
= qlt_find_host_by_vp_idx(vha
,
489 entry
->u
.isp24
.vp_index
);
490 if (unlikely(!host
)) {
491 ql_dbg(ql_dbg_tgt
, vha
, 0xe043,
492 "qla_target(%d): Response "
493 "pkt (NOTIFY_ACK_TYPE) "
494 "received, with unknown "
495 "vp_index %d\n", vha
->vp_idx
,
496 entry
->u
.isp24
.vp_index
);
500 qlt_response_pkt(host
, rsp
, pkt
);
506 struct abts_recv_from_24xx
*entry
=
507 (struct abts_recv_from_24xx
*)pkt
;
508 struct scsi_qla_host
*host
= qlt_find_host_by_vp_idx(vha
,
510 if (unlikely(!host
)) {
511 ql_dbg(ql_dbg_tgt
, vha
, 0xe044,
512 "qla_target(%d): Response pkt "
513 "(ABTS_RECV_24XX) received, with unknown "
514 "vp_index %d\n", vha
->vp_idx
, entry
->vp_index
);
517 qlt_response_pkt(host
, rsp
, pkt
);
523 struct abts_resp_to_24xx
*entry
=
524 (struct abts_resp_to_24xx
*)pkt
;
525 struct scsi_qla_host
*host
= qlt_find_host_by_vp_idx(vha
,
527 if (unlikely(!host
)) {
528 ql_dbg(ql_dbg_tgt
, vha
, 0xe045,
529 "qla_target(%d): Response pkt "
530 "(ABTS_RECV_24XX) received, with unknown "
531 "vp_index %d\n", vha
->vp_idx
, entry
->vp_index
);
534 qlt_response_pkt(host
, rsp
, pkt
);
538 qlt_response_pkt(vha
, rsp
, pkt
);
545 * All qlt_plogi_ack_t operations are protected by hardware_lock
547 static int qla24xx_post_nack_work(struct scsi_qla_host
*vha
, fc_port_t
*fcport
,
548 struct imm_ntfy_from_isp
*ntfy
, int type
)
550 struct qla_work_evt
*e
;
552 e
= qla2x00_alloc_work(vha
, QLA_EVT_NACK
);
554 return QLA_FUNCTION_FAILED
;
556 e
->u
.nack
.fcport
= fcport
;
557 e
->u
.nack
.type
= type
;
558 memcpy(e
->u
.nack
.iocb
, ntfy
, sizeof(struct imm_ntfy_from_isp
));
559 return qla2x00_post_work(vha
, e
);
562 static void qla2x00_async_nack_sp_done(srb_t
*sp
, int res
)
564 struct scsi_qla_host
*vha
= sp
->vha
;
567 ql_dbg(ql_dbg_disc
, vha
, 0x20f2,
568 "Async done-%s res %x %8phC type %d\n",
569 sp
->name
, res
, sp
->fcport
->port_name
, sp
->type
);
571 spin_lock_irqsave(&vha
->hw
->tgt
.sess_lock
, flags
);
572 sp
->fcport
->flags
&= ~FCF_ASYNC_SENT
;
573 sp
->fcport
->chip_reset
= vha
->hw
->base_qpair
->chip_reset
;
577 sp
->fcport
->login_gen
++;
578 sp
->fcport
->fw_login_state
= DSC_LS_PLOGI_COMP
;
579 sp
->fcport
->logout_on_delete
= 1;
580 sp
->fcport
->plogi_nack_done_deadline
= jiffies
+ HZ
;
581 sp
->fcport
->send_els_logo
= 0;
585 sp
->fcport
->fw_login_state
= DSC_LS_PRLI_COMP
;
586 sp
->fcport
->deleted
= 0;
587 sp
->fcport
->send_els_logo
= 0;
589 if (!sp
->fcport
->login_succ
&&
590 !IS_SW_RESV_ADDR(sp
->fcport
->d_id
)) {
591 sp
->fcport
->login_succ
= 1;
594 spin_unlock_irqrestore(&vha
->hw
->tgt
.sess_lock
, flags
);
595 qla24xx_sched_upd_fcport(sp
->fcport
);
596 spin_lock_irqsave(&vha
->hw
->tgt
.sess_lock
, flags
);
598 sp
->fcport
->login_retry
= 0;
599 qla2x00_set_fcport_disc_state(sp
->fcport
,
601 sp
->fcport
->deleted
= 0;
602 sp
->fcport
->logout_on_delete
= 1;
607 sp
->fcport
->login_gen
++;
608 sp
->fcport
->fw_login_state
= DSC_LS_PORT_UNAVAIL
;
609 qlt_logo_completion_handler(sp
->fcport
, MBS_COMMAND_COMPLETE
);
612 spin_unlock_irqrestore(&vha
->hw
->tgt
.sess_lock
, flags
);
617 int qla24xx_async_notify_ack(scsi_qla_host_t
*vha
, fc_port_t
*fcport
,
618 struct imm_ntfy_from_isp
*ntfy
, int type
)
620 int rval
= QLA_FUNCTION_FAILED
;
624 fcport
->flags
|= FCF_ASYNC_SENT
;
627 fcport
->fw_login_state
= DSC_LS_PLOGI_PEND
;
631 fcport
->fw_login_state
= DSC_LS_PRLI_PEND
;
636 fcport
->fw_login_state
= DSC_LS_LOGO_PEND
;
641 sp
= qla2x00_get_sp(vha
, fcport
, GFP_ATOMIC
);
648 sp
->u
.iocb_cmd
.timeout
= qla2x00_async_iocb_timeout
;
649 qla2x00_init_timer(sp
, qla2x00_get_async_timeout(vha
)+2);
651 sp
->u
.iocb_cmd
.u
.nack
.ntfy
= ntfy
;
652 sp
->done
= qla2x00_async_nack_sp_done
;
654 ql_dbg(ql_dbg_disc
, vha
, 0x20f4,
655 "Async-%s %8phC hndl %x %s\n",
656 sp
->name
, fcport
->port_name
, sp
->handle
, c
);
658 rval
= qla2x00_start_sp(sp
);
659 if (rval
!= QLA_SUCCESS
)
667 fcport
->flags
&= ~FCF_ASYNC_SENT
;
671 void qla24xx_do_nack_work(struct scsi_qla_host
*vha
, struct qla_work_evt
*e
)
675 switch (e
->u
.nack
.type
) {
677 t
= e
->u
.nack
.fcport
;
678 flush_work(&t
->del_work
);
679 flush_work(&t
->free_work
);
680 mutex_lock(&vha
->vha_tgt
.tgt_mutex
);
681 t
= qlt_create_sess(vha
, e
->u
.nack
.fcport
, 0);
682 mutex_unlock(&vha
->vha_tgt
.tgt_mutex
);
684 ql_log(ql_log_info
, vha
, 0xd034,
685 "%s create sess success %p", __func__
, t
);
686 /* create sess has an extra kref */
687 vha
->hw
->tgt
.tgt_ops
->put_sess(e
->u
.nack
.fcport
);
691 qla24xx_async_notify_ack(vha
, e
->u
.nack
.fcport
,
692 (struct imm_ntfy_from_isp
*)e
->u
.nack
.iocb
, e
->u
.nack
.type
);
695 void qla24xx_delete_sess_fn(struct work_struct
*work
)
697 fc_port_t
*fcport
= container_of(work
, struct fc_port
, del_work
);
698 struct qla_hw_data
*ha
= fcport
->vha
->hw
;
700 if (fcport
->se_sess
) {
701 ha
->tgt
.tgt_ops
->shutdown_sess(fcport
);
702 ha
->tgt
.tgt_ops
->put_sess(fcport
);
704 qlt_unreg_sess(fcport
);
709 * Called from qla2x00_reg_remote_port()
711 void qlt_fc_port_added(struct scsi_qla_host
*vha
, fc_port_t
*fcport
)
713 struct qla_hw_data
*ha
= vha
->hw
;
714 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
715 struct fc_port
*sess
= fcport
;
718 if (!vha
->hw
->tgt
.tgt_ops
)
721 spin_lock_irqsave(&ha
->tgt
.sess_lock
, flags
);
723 spin_unlock_irqrestore(&ha
->tgt
.sess_lock
, flags
);
727 if (fcport
->disc_state
== DSC_DELETE_PEND
) {
728 spin_unlock_irqrestore(&ha
->tgt
.sess_lock
, flags
);
732 if (!sess
->se_sess
) {
733 spin_unlock_irqrestore(&ha
->tgt
.sess_lock
, flags
);
735 mutex_lock(&vha
->vha_tgt
.tgt_mutex
);
736 sess
= qlt_create_sess(vha
, fcport
, false);
737 mutex_unlock(&vha
->vha_tgt
.tgt_mutex
);
739 spin_lock_irqsave(&ha
->tgt
.sess_lock
, flags
);
741 if (fcport
->fw_login_state
== DSC_LS_PRLI_COMP
) {
742 spin_unlock_irqrestore(&ha
->tgt
.sess_lock
, flags
);
746 if (!kref_get_unless_zero(&sess
->sess_kref
)) {
747 ql_dbg(ql_dbg_disc
, vha
, 0x2107,
748 "%s: kref_get fail sess %8phC \n",
749 __func__
, sess
->port_name
);
750 spin_unlock_irqrestore(&ha
->tgt
.sess_lock
, flags
);
754 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf04c,
755 "qla_target(%u): %ssession for port %8phC "
756 "(loop ID %d) reappeared\n", vha
->vp_idx
,
757 sess
->local
? "local " : "", sess
->port_name
, sess
->loop_id
);
759 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf007,
760 "Reappeared sess %p\n", sess
);
762 ha
->tgt
.tgt_ops
->update_sess(sess
, fcport
->d_id
,
764 (fcport
->flags
& FCF_CONF_COMP_SUPPORTED
));
767 if (sess
&& sess
->local
) {
768 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf04d,
769 "qla_target(%u): local session for "
770 "port %8phC (loop ID %d) became global\n", vha
->vp_idx
,
771 fcport
->port_name
, sess
->loop_id
);
774 spin_unlock_irqrestore(&ha
->tgt
.sess_lock
, flags
);
776 ha
->tgt
.tgt_ops
->put_sess(sess
);
780 * This is a zero-base ref-counting solution, since hardware_lock
781 * guarantees that ref_count is not modified concurrently.
782 * Upon successful return content of iocb is undefined
784 static struct qlt_plogi_ack_t
*
785 qlt_plogi_ack_find_add(struct scsi_qla_host
*vha
, port_id_t
*id
,
786 struct imm_ntfy_from_isp
*iocb
)
788 struct qlt_plogi_ack_t
*pla
;
790 lockdep_assert_held(&vha
->hw
->hardware_lock
);
792 list_for_each_entry(pla
, &vha
->plogi_ack_list
, list
) {
793 if (pla
->id
.b24
== id
->b24
) {
794 ql_dbg(ql_dbg_disc
+ ql_dbg_verbose
, vha
, 0x210d,
795 "%s %d %8phC Term INOT due to new INOT",
797 pla
->iocb
.u
.isp24
.port_name
);
798 qlt_send_term_imm_notif(vha
, &pla
->iocb
, 1);
799 memcpy(&pla
->iocb
, iocb
, sizeof(pla
->iocb
));
804 pla
= kmem_cache_zalloc(qla_tgt_plogi_cachep
, GFP_ATOMIC
);
806 ql_dbg(ql_dbg_async
, vha
, 0x5088,
807 "qla_target(%d): Allocation of plogi_ack failed\n",
812 memcpy(&pla
->iocb
, iocb
, sizeof(pla
->iocb
));
814 list_add_tail(&pla
->list
, &vha
->plogi_ack_list
);
819 void qlt_plogi_ack_unref(struct scsi_qla_host
*vha
,
820 struct qlt_plogi_ack_t
*pla
)
822 struct imm_ntfy_from_isp
*iocb
= &pla
->iocb
;
825 fc_port_t
*fcport
= pla
->fcport
;
827 BUG_ON(!pla
->ref_count
);
833 ql_dbg(ql_dbg_disc
, vha
, 0x5089,
834 "Sending PLOGI ACK to wwn %8phC s_id %02x:%02x:%02x loop_id %#04x"
835 " exch %#x ox_id %#x\n", iocb
->u
.isp24
.port_name
,
836 iocb
->u
.isp24
.port_id
[2], iocb
->u
.isp24
.port_id
[1],
837 iocb
->u
.isp24
.port_id
[0],
838 le16_to_cpu(iocb
->u
.isp24
.nport_handle
),
839 iocb
->u
.isp24
.exchange_address
, iocb
->ox_id
);
841 port_id
.b
.domain
= iocb
->u
.isp24
.port_id
[2];
842 port_id
.b
.area
= iocb
->u
.isp24
.port_id
[1];
843 port_id
.b
.al_pa
= iocb
->u
.isp24
.port_id
[0];
844 port_id
.b
.rsvd_1
= 0;
846 loop_id
= le16_to_cpu(iocb
->u
.isp24
.nport_handle
);
848 fcport
->loop_id
= loop_id
;
849 fcport
->d_id
= port_id
;
850 if (iocb
->u
.isp24
.status_subcode
== ELS_PLOGI
)
851 qla24xx_post_nack_work(vha
, fcport
, iocb
, SRB_NACK_PLOGI
);
853 qla24xx_post_nack_work(vha
, fcport
, iocb
, SRB_NACK_PRLI
);
855 list_for_each_entry(fcport
, &vha
->vp_fcports
, list
) {
856 if (fcport
->plogi_link
[QLT_PLOGI_LINK_SAME_WWN
] == pla
)
857 fcport
->plogi_link
[QLT_PLOGI_LINK_SAME_WWN
] = NULL
;
858 if (fcport
->plogi_link
[QLT_PLOGI_LINK_CONFLICT
] == pla
)
859 fcport
->plogi_link
[QLT_PLOGI_LINK_CONFLICT
] = NULL
;
862 list_del(&pla
->list
);
863 kmem_cache_free(qla_tgt_plogi_cachep
, pla
);
867 qlt_plogi_ack_link(struct scsi_qla_host
*vha
, struct qlt_plogi_ack_t
*pla
,
868 struct fc_port
*sess
, enum qlt_plogi_link_t link
)
870 struct imm_ntfy_from_isp
*iocb
= &pla
->iocb
;
871 /* Inc ref_count first because link might already be pointing at pla */
874 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf097,
875 "Linking sess %p [%d] wwn %8phC with PLOGI ACK to wwn %8phC"
876 " s_id %02x:%02x:%02x, ref=%d pla %p link %d\n",
877 sess
, link
, sess
->port_name
,
878 iocb
->u
.isp24
.port_name
, iocb
->u
.isp24
.port_id
[2],
879 iocb
->u
.isp24
.port_id
[1], iocb
->u
.isp24
.port_id
[0],
880 pla
->ref_count
, pla
, link
);
882 if (link
== QLT_PLOGI_LINK_CONFLICT
) {
883 switch (sess
->disc_state
) {
885 case DSC_DELETE_PEND
:
893 if (sess
->plogi_link
[link
])
894 qlt_plogi_ack_unref(vha
, sess
->plogi_link
[link
]);
896 if (link
== QLT_PLOGI_LINK_SAME_WWN
)
899 sess
->plogi_link
[link
] = pla
;
903 /* These fields must be initialized by the caller */
906 * number of cmds dropped while we were waiting for
907 * initiator to ack LOGO initialize to 1 if LOGO is
908 * triggered by a command, otherwise, to 0
912 /* These fields are used by callee */
913 struct list_head list
;
917 qlt_send_first_logo(struct scsi_qla_host
*vha
, qlt_port_logo_t
*logo
)
919 qlt_port_logo_t
*tmp
;
922 mutex_lock(&vha
->vha_tgt
.tgt_mutex
);
924 list_for_each_entry(tmp
, &vha
->logo_list
, list
) {
925 if (tmp
->id
.b24
== logo
->id
.b24
) {
926 tmp
->cmd_count
+= logo
->cmd_count
;
927 mutex_unlock(&vha
->vha_tgt
.tgt_mutex
);
932 list_add_tail(&logo
->list
, &vha
->logo_list
);
934 mutex_unlock(&vha
->vha_tgt
.tgt_mutex
);
936 res
= qla24xx_els_dcmd_iocb(vha
, ELS_DCMD_LOGO
, logo
->id
);
938 mutex_lock(&vha
->vha_tgt
.tgt_mutex
);
939 list_del(&logo
->list
);
940 mutex_unlock(&vha
->vha_tgt
.tgt_mutex
);
942 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf098,
943 "Finished LOGO to %02x:%02x:%02x, dropped %d cmds, res = %#x\n",
944 logo
->id
.b
.domain
, logo
->id
.b
.area
, logo
->id
.b
.al_pa
,
945 logo
->cmd_count
, res
);
948 void qlt_free_session_done(struct work_struct
*work
)
950 struct fc_port
*sess
= container_of(work
, struct fc_port
,
952 struct qla_tgt
*tgt
= sess
->tgt
;
953 struct scsi_qla_host
*vha
= sess
->vha
;
954 struct qla_hw_data
*ha
= vha
->hw
;
956 bool logout_started
= false;
957 scsi_qla_host_t
*base_vha
= pci_get_drvdata(ha
->pdev
);
958 struct qlt_plogi_ack_t
*own
=
959 sess
->plogi_link
[QLT_PLOGI_LINK_SAME_WWN
];
961 ql_dbg(ql_dbg_disc
, vha
, 0xf084,
962 "%s: se_sess %p / sess %p from port %8phC loop_id %#04x"
963 " s_id %02x:%02x:%02x logout %d keep %d els_logo %d\n",
964 __func__
, sess
->se_sess
, sess
, sess
->port_name
, sess
->loop_id
,
965 sess
->d_id
.b
.domain
, sess
->d_id
.b
.area
, sess
->d_id
.b
.al_pa
,
966 sess
->logout_on_delete
, sess
->keep_nport_handle
,
967 sess
->send_els_logo
);
969 if (!IS_SW_RESV_ADDR(sess
->d_id
)) {
970 qla2x00_mark_device_lost(vha
, sess
, 0);
972 if (sess
->send_els_logo
) {
973 qlt_port_logo_t logo
;
975 logo
.id
= sess
->d_id
;
978 qlt_send_first_logo(vha
, &logo
);
979 sess
->send_els_logo
= 0;
982 if (sess
->logout_on_delete
&& sess
->loop_id
!= FC_NO_LOOP_ID
) {
987 (own
->iocb
.u
.isp24
.status_subcode
== ELS_PLOGI
))) {
988 rc
= qla2x00_post_async_logout_work(vha
, sess
,
990 if (rc
!= QLA_SUCCESS
)
991 ql_log(ql_log_warn
, vha
, 0xf085,
992 "Schedule logo failed sess %p rc %d\n",
995 logout_started
= true;
996 } else if (own
&& (own
->iocb
.u
.isp24
.status_subcode
==
997 ELS_PRLI
) && ha
->flags
.rida_fmt2
) {
998 rc
= qla2x00_post_async_prlo_work(vha
, sess
,
1000 if (rc
!= QLA_SUCCESS
)
1001 ql_log(ql_log_warn
, vha
, 0xf085,
1002 "Schedule PRLO failed sess %p rc %d\n",
1005 logout_started
= true;
1007 } /* if sess->logout_on_delete */
1009 if (sess
->nvme_flag
& NVME_FLAG_REGISTERED
&&
1010 !(sess
->nvme_flag
& NVME_FLAG_DELETING
)) {
1011 sess
->nvme_flag
|= NVME_FLAG_DELETING
;
1012 qla_nvme_unregister_remote_port(sess
);
1017 * Release the target session for FC Nexus from fabric module code.
1019 if (sess
->se_sess
!= NULL
)
1020 ha
->tgt
.tgt_ops
->free_session(sess
);
1022 if (logout_started
) {
1023 bool traced
= false;
1026 while (!READ_ONCE(sess
->logout_completed
)) {
1028 ql_dbg(ql_dbg_disc
, vha
, 0xf086,
1029 "%s: waiting for sess %p logout\n",
1039 ql_dbg(ql_dbg_disc
, vha
, 0xf087,
1040 "%s: sess %p logout completed\n", __func__
, sess
);
1043 if (sess
->logo_ack_needed
) {
1044 sess
->logo_ack_needed
= 0;
1045 qla24xx_async_notify_ack(vha
, sess
,
1046 (struct imm_ntfy_from_isp
*)sess
->iocb
, SRB_NACK_LOGO
);
1049 spin_lock_irqsave(&vha
->work_lock
, flags
);
1050 sess
->flags
&= ~FCF_ASYNC_SENT
;
1051 spin_unlock_irqrestore(&vha
->work_lock
, flags
);
1053 spin_lock_irqsave(&ha
->tgt
.sess_lock
, flags
);
1054 if (sess
->se_sess
) {
1055 sess
->se_sess
= NULL
;
1056 if (tgt
&& !IS_SW_RESV_ADDR(sess
->d_id
))
1060 qla2x00_set_fcport_disc_state(sess
, DSC_DELETED
);
1061 sess
->fw_login_state
= DSC_LS_PORT_UNAVAIL
;
1062 sess
->deleted
= QLA_SESS_DELETED
;
1064 if (sess
->login_succ
&& !IS_SW_RESV_ADDR(sess
->d_id
)) {
1065 vha
->fcport_count
--;
1066 sess
->login_succ
= 0;
1069 qla2x00_clear_loop_id(sess
);
1071 if (sess
->conflict
) {
1072 sess
->conflict
->login_pause
= 0;
1073 sess
->conflict
= NULL
;
1074 if (!test_bit(UNLOADING
, &vha
->dpc_flags
))
1075 set_bit(RELOGIN_NEEDED
, &vha
->dpc_flags
);
1079 struct qlt_plogi_ack_t
*con
=
1080 sess
->plogi_link
[QLT_PLOGI_LINK_CONFLICT
];
1081 struct imm_ntfy_from_isp
*iocb
;
1083 own
= sess
->plogi_link
[QLT_PLOGI_LINK_SAME_WWN
];
1087 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf099,
1088 "se_sess %p / sess %p port %8phC is gone,"
1089 " %s (ref=%d), releasing PLOGI for %8phC (ref=%d)\n",
1090 sess
->se_sess
, sess
, sess
->port_name
,
1091 own
? "releasing own PLOGI" : "no own PLOGI pending",
1092 own
? own
->ref_count
: -1,
1093 iocb
->u
.isp24
.port_name
, con
->ref_count
);
1094 qlt_plogi_ack_unref(vha
, con
);
1095 sess
->plogi_link
[QLT_PLOGI_LINK_CONFLICT
] = NULL
;
1097 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf09a,
1098 "se_sess %p / sess %p port %8phC is gone, %s (ref=%d)\n",
1099 sess
->se_sess
, sess
, sess
->port_name
,
1100 own
? "releasing own PLOGI" :
1101 "no own PLOGI pending",
1102 own
? own
->ref_count
: -1);
1106 sess
->fw_login_state
= DSC_LS_PLOGI_PEND
;
1107 qlt_plogi_ack_unref(vha
, own
);
1108 sess
->plogi_link
[QLT_PLOGI_LINK_SAME_WWN
] = NULL
;
1112 sess
->explicit_logout
= 0;
1113 spin_unlock_irqrestore(&ha
->tgt
.sess_lock
, flags
);
1114 sess
->free_pending
= 0;
1116 ql_dbg(ql_dbg_disc
, vha
, 0xf001,
1117 "Unregistration of sess %p %8phC finished fcp_cnt %d\n",
1118 sess
, sess
->port_name
, vha
->fcport_count
);
1120 if (tgt
&& (tgt
->sess_count
== 0))
1121 wake_up_all(&tgt
->waitQ
);
1123 if (!test_bit(PFLG_DRIVER_REMOVING
, &base_vha
->pci_flags
) &&
1124 !(vha
->vp_idx
&& test_bit(VPORT_DELETE
, &vha
->dpc_flags
)) &&
1125 (!tgt
|| !tgt
->tgt_stop
) && !LOOP_TRANSITION(vha
)) {
1126 switch (vha
->host
->active_mode
) {
1127 case MODE_INITIATOR
:
1129 set_bit(RELOGIN_NEEDED
, &vha
->dpc_flags
);
1130 qla2xxx_wake_dpc(vha
);
1139 if (vha
->fcport_count
== 0)
1140 wake_up_all(&vha
->fcport_waitQ
);
1143 /* ha->tgt.sess_lock supposed to be held on entry */
1144 void qlt_unreg_sess(struct fc_port
*sess
)
1146 struct scsi_qla_host
*vha
= sess
->vha
;
1147 unsigned long flags
;
1149 ql_dbg(ql_dbg_disc
, sess
->vha
, 0x210a,
1150 "%s sess %p for deletion %8phC\n",
1151 __func__
, sess
, sess
->port_name
);
1153 spin_lock_irqsave(&sess
->vha
->work_lock
, flags
);
1154 if (sess
->free_pending
) {
1155 spin_unlock_irqrestore(&sess
->vha
->work_lock
, flags
);
1158 sess
->free_pending
= 1;
1160 * Use FCF_ASYNC_SENT flag to block other cmds used in sess
1161 * management from being sent.
1163 sess
->flags
|= FCF_ASYNC_SENT
;
1164 spin_unlock_irqrestore(&sess
->vha
->work_lock
, flags
);
1167 vha
->hw
->tgt
.tgt_ops
->clear_nacl_from_fcport_map(sess
);
1169 sess
->deleted
= QLA_SESS_DELETION_IN_PROGRESS
;
1170 qla2x00_set_fcport_disc_state(sess
, DSC_DELETE_PEND
);
1171 sess
->last_rscn_gen
= sess
->rscn_gen
;
1172 sess
->last_login_gen
= sess
->login_gen
;
1174 queue_work(sess
->vha
->hw
->wq
, &sess
->free_work
);
1176 EXPORT_SYMBOL(qlt_unreg_sess
);
1178 static int qlt_reset(struct scsi_qla_host
*vha
, void *iocb
, int mcmd
)
1180 struct qla_hw_data
*ha
= vha
->hw
;
1181 struct fc_port
*sess
= NULL
;
1184 struct imm_ntfy_from_isp
*n
= (struct imm_ntfy_from_isp
*)iocb
;
1185 unsigned long flags
;
1187 loop_id
= le16_to_cpu(n
->u
.isp24
.nport_handle
);
1188 if (loop_id
== 0xFFFF) {
1190 atomic_inc(&vha
->vha_tgt
.qla_tgt
->tgt_global_resets_count
);
1191 spin_lock_irqsave(&ha
->tgt
.sess_lock
, flags
);
1192 qlt_clear_tgt_db(vha
->vha_tgt
.qla_tgt
);
1193 spin_unlock_irqrestore(&ha
->tgt
.sess_lock
, flags
);
1195 spin_lock_irqsave(&ha
->tgt
.sess_lock
, flags
);
1196 sess
= ha
->tgt
.tgt_ops
->find_sess_by_loop_id(vha
, loop_id
);
1197 spin_unlock_irqrestore(&ha
->tgt
.sess_lock
, flags
);
1200 ql_dbg(ql_dbg_tgt
, vha
, 0xe000,
1201 "Using sess for qla_tgt_reset: %p\n", sess
);
1207 ql_dbg(ql_dbg_tgt
, vha
, 0xe047,
1208 "scsi(%ld): resetting (session %p from port %8phC mcmd %x, "
1209 "loop_id %d)\n", vha
->host_no
, sess
, sess
->port_name
,
1212 return qlt_issue_task_mgmt(sess
, 0, mcmd
, iocb
, QLA24XX_MGMT_SEND_NACK
);
1215 static void qla24xx_chk_fcp_state(struct fc_port
*sess
)
1217 if (sess
->chip_reset
!= sess
->vha
->hw
->base_qpair
->chip_reset
) {
1218 sess
->logout_on_delete
= 0;
1219 sess
->logo_ack_needed
= 0;
1220 sess
->fw_login_state
= DSC_LS_PORT_UNAVAIL
;
1224 void qlt_schedule_sess_for_deletion(struct fc_port
*sess
)
1226 struct qla_tgt
*tgt
= sess
->tgt
;
1227 unsigned long flags
;
1230 switch (sess
->disc_state
) {
1231 case DSC_DELETE_PEND
:
1234 if (tgt
&& tgt
->tgt_stop
&& (tgt
->sess_count
== 0))
1235 wake_up_all(&tgt
->waitQ
);
1236 if (sess
->vha
->fcport_count
== 0)
1237 wake_up_all(&sess
->vha
->fcport_waitQ
);
1239 if (!sess
->plogi_link
[QLT_PLOGI_LINK_SAME_WWN
] &&
1240 !sess
->plogi_link
[QLT_PLOGI_LINK_CONFLICT
])
1243 case DSC_UPD_FCPORT
:
1245 * This port is not done reporting to upper layer.
1248 sess
->next_disc_state
= DSC_DELETE_PEND
;
1249 sec
= jiffies_to_msecs(jiffies
-
1250 sess
->jiffies_at_registration
)/1000;
1251 if (sess
->sec_since_registration
< sec
&& sec
&& !(sec
% 5)) {
1252 sess
->sec_since_registration
= sec
;
1253 ql_dbg(ql_dbg_disc
, sess
->vha
, 0xffff,
1254 "%s %8phC : Slow Rport registration(%d Sec)\n",
1255 __func__
, sess
->port_name
, sec
);
1262 spin_lock_irqsave(&sess
->vha
->work_lock
, flags
);
1263 if (sess
->deleted
== QLA_SESS_DELETION_IN_PROGRESS
) {
1264 spin_unlock_irqrestore(&sess
->vha
->work_lock
, flags
);
1267 sess
->deleted
= QLA_SESS_DELETION_IN_PROGRESS
;
1268 spin_unlock_irqrestore(&sess
->vha
->work_lock
, flags
);
1270 sess
->prli_pend_timer
= 0;
1271 qla2x00_set_fcport_disc_state(sess
, DSC_DELETE_PEND
);
1273 qla24xx_chk_fcp_state(sess
);
1275 ql_dbg(ql_dbg_tgt
, sess
->vha
, 0xe001,
1276 "Scheduling sess %p for deletion %8phC\n",
1277 sess
, sess
->port_name
);
1279 WARN_ON(!queue_work(sess
->vha
->hw
->wq
, &sess
->del_work
));
1282 static void qlt_clear_tgt_db(struct qla_tgt
*tgt
)
1284 struct fc_port
*sess
;
1285 scsi_qla_host_t
*vha
= tgt
->vha
;
1287 list_for_each_entry(sess
, &vha
->vp_fcports
, list
) {
1289 qlt_schedule_sess_for_deletion(sess
);
1292 /* At this point tgt could be already dead */
1295 static int qla24xx_get_loop_id(struct scsi_qla_host
*vha
, be_id_t s_id
,
1298 struct qla_hw_data
*ha
= vha
->hw
;
1299 dma_addr_t gid_list_dma
;
1300 struct gid_list_info
*gid_list
, *gid
;
1304 gid_list
= dma_alloc_coherent(&ha
->pdev
->dev
, qla2x00_gid_list_size(ha
),
1305 &gid_list_dma
, GFP_KERNEL
);
1307 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf044,
1308 "qla_target(%d): DMA Alloc failed of %u\n",
1309 vha
->vp_idx
, qla2x00_gid_list_size(ha
));
1313 /* Get list of logged in devices */
1314 rc
= qla24xx_gidlist_wait(vha
, gid_list
, gid_list_dma
, &entries
);
1315 if (rc
!= QLA_SUCCESS
) {
1316 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf045,
1317 "qla_target(%d): get_id_list() failed: %x\n",
1320 goto out_free_id_list
;
1325 for (i
= 0; i
< entries
; i
++) {
1326 if (gid
->al_pa
== s_id
.al_pa
&&
1327 gid
->area
== s_id
.area
&&
1328 gid
->domain
== s_id
.domain
) {
1329 *loop_id
= le16_to_cpu(gid
->loop_id
);
1333 gid
= (void *)gid
+ ha
->gid_list_info_size
;
1337 dma_free_coherent(&ha
->pdev
->dev
, qla2x00_gid_list_size(ha
),
1338 gid_list
, gid_list_dma
);
1343 * Adds an extra ref to allow to drop hw lock after adding sess to the list.
1344 * Caller must put it.
1346 static struct fc_port
*qlt_create_sess(
1347 struct scsi_qla_host
*vha
,
1351 struct qla_hw_data
*ha
= vha
->hw
;
1352 struct fc_port
*sess
= fcport
;
1353 unsigned long flags
;
1355 if (vha
->vha_tgt
.qla_tgt
->tgt_stop
)
1358 if (fcport
->se_sess
) {
1359 if (!kref_get_unless_zero(&sess
->sess_kref
)) {
1360 ql_dbg(ql_dbg_disc
, vha
, 0x20f6,
1361 "%s: kref_get_unless_zero failed for %8phC\n",
1362 __func__
, sess
->port_name
);
1367 sess
->tgt
= vha
->vha_tgt
.qla_tgt
;
1368 sess
->local
= local
;
1371 * Under normal circumstances we want to logout from firmware when
1372 * session eventually ends and release corresponding nport handle.
1373 * In the exception cases (e.g. when new PLOGI is waiting) corresponding
1374 * code will adjust these flags as necessary.
1376 sess
->logout_on_delete
= 1;
1377 sess
->keep_nport_handle
= 0;
1378 sess
->logout_completed
= 0;
1380 if (ha
->tgt
.tgt_ops
->check_initiator_node_acl(vha
,
1381 &fcport
->port_name
[0], sess
) < 0) {
1382 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf015,
1383 "(%d) %8phC check_initiator_node_acl failed\n",
1384 vha
->vp_idx
, fcport
->port_name
);
1387 kref_init(&fcport
->sess_kref
);
1389 * Take an extra reference to ->sess_kref here to handle
1390 * fc_port access across ->tgt.sess_lock reaquire.
1392 if (!kref_get_unless_zero(&sess
->sess_kref
)) {
1393 ql_dbg(ql_dbg_disc
, vha
, 0x20f7,
1394 "%s: kref_get_unless_zero failed for %8phC\n",
1395 __func__
, sess
->port_name
);
1399 spin_lock_irqsave(&ha
->tgt
.sess_lock
, flags
);
1400 if (!IS_SW_RESV_ADDR(sess
->d_id
))
1401 vha
->vha_tgt
.qla_tgt
->sess_count
++;
1403 qlt_do_generation_tick(vha
, &sess
->generation
);
1404 spin_unlock_irqrestore(&ha
->tgt
.sess_lock
, flags
);
1407 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf006,
1408 "Adding sess %p se_sess %p to tgt %p sess_count %d\n",
1409 sess
, sess
->se_sess
, vha
->vha_tgt
.qla_tgt
,
1410 vha
->vha_tgt
.qla_tgt
->sess_count
);
1412 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf04b,
1413 "qla_target(%d): %ssession for wwn %8phC (loop_id %d, "
1414 "s_id %x:%x:%x, confirmed completion %ssupported) added\n",
1415 vha
->vp_idx
, local
? "local " : "", fcport
->port_name
,
1416 fcport
->loop_id
, sess
->d_id
.b
.domain
, sess
->d_id
.b
.area
,
1417 sess
->d_id
.b
.al_pa
, sess
->conf_compl_supported
? "" : "not ");
1423 * max_gen - specifies maximum session generation
1424 * at which this deletion requestion is still valid
1427 qlt_fc_port_deleted(struct scsi_qla_host
*vha
, fc_port_t
*fcport
, int max_gen
)
1429 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
1430 struct fc_port
*sess
= fcport
;
1431 unsigned long flags
;
1433 if (!vha
->hw
->tgt
.tgt_ops
)
1439 spin_lock_irqsave(&vha
->hw
->tgt
.sess_lock
, flags
);
1440 if (tgt
->tgt_stop
) {
1441 spin_unlock_irqrestore(&vha
->hw
->tgt
.sess_lock
, flags
);
1444 if (!sess
->se_sess
) {
1445 spin_unlock_irqrestore(&vha
->hw
->tgt
.sess_lock
, flags
);
1449 if (max_gen
- sess
->generation
< 0) {
1450 spin_unlock_irqrestore(&vha
->hw
->tgt
.sess_lock
, flags
);
1451 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf092,
1452 "Ignoring stale deletion request for se_sess %p / sess %p"
1453 " for port %8phC, req_gen %d, sess_gen %d\n",
1454 sess
->se_sess
, sess
, sess
->port_name
, max_gen
,
1459 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf008, "qla_tgt_fc_port_deleted %p", sess
);
1462 spin_unlock_irqrestore(&vha
->hw
->tgt
.sess_lock
, flags
);
1463 qlt_schedule_sess_for_deletion(sess
);
1466 static inline int test_tgt_sess_count(struct qla_tgt
*tgt
)
1468 struct qla_hw_data
*ha
= tgt
->ha
;
1469 unsigned long flags
;
1472 * We need to protect against race, when tgt is freed before or
1475 spin_lock_irqsave(&ha
->tgt
.sess_lock
, flags
);
1476 ql_dbg(ql_dbg_tgt
, tgt
->vha
, 0xe002,
1477 "tgt %p, sess_count=%d\n",
1478 tgt
, tgt
->sess_count
);
1479 res
= (tgt
->sess_count
== 0);
1480 spin_unlock_irqrestore(&ha
->tgt
.sess_lock
, flags
);
1485 /* Called by tcm_qla2xxx configfs code */
1486 int qlt_stop_phase1(struct qla_tgt
*tgt
)
1488 struct scsi_qla_host
*vha
= tgt
->vha
;
1489 struct qla_hw_data
*ha
= tgt
->ha
;
1490 unsigned long flags
;
1492 mutex_lock(&ha
->optrom_mutex
);
1493 mutex_lock(&qla_tgt_mutex
);
1495 if (tgt
->tgt_stop
|| tgt
->tgt_stopped
) {
1496 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf04e,
1497 "Already in tgt->tgt_stop or tgt_stopped state\n");
1498 mutex_unlock(&qla_tgt_mutex
);
1499 mutex_unlock(&ha
->optrom_mutex
);
1503 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xe003, "Stopping target for host %ld(%p)\n",
1506 * Mutex needed to sync with qla_tgt_fc_port_[added,deleted].
1507 * Lock is needed, because we still can get an incoming packet.
1509 mutex_lock(&vha
->vha_tgt
.tgt_mutex
);
1511 qlt_clear_tgt_db(tgt
);
1512 mutex_unlock(&vha
->vha_tgt
.tgt_mutex
);
1513 mutex_unlock(&qla_tgt_mutex
);
1515 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf009,
1516 "Waiting for sess works (tgt %p)", tgt
);
1517 spin_lock_irqsave(&tgt
->sess_work_lock
, flags
);
1518 while (!list_empty(&tgt
->sess_works_list
)) {
1519 spin_unlock_irqrestore(&tgt
->sess_work_lock
, flags
);
1520 flush_scheduled_work();
1521 spin_lock_irqsave(&tgt
->sess_work_lock
, flags
);
1523 spin_unlock_irqrestore(&tgt
->sess_work_lock
, flags
);
1525 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf00a,
1526 "Waiting for tgt %p: sess_count=%d\n", tgt
, tgt
->sess_count
);
1528 wait_event_timeout(tgt
->waitQ
, test_tgt_sess_count(tgt
), 10*HZ
);
1531 if (!ha
->flags
.host_shutting_down
&&
1532 (qla_tgt_mode_enabled(vha
) || qla_dual_mode_enabled(vha
)))
1533 qlt_disable_vha(vha
);
1535 /* Wait for sessions to clear out (just in case) */
1536 wait_event_timeout(tgt
->waitQ
, test_tgt_sess_count(tgt
), 10*HZ
);
1537 mutex_unlock(&ha
->optrom_mutex
);
1541 EXPORT_SYMBOL(qlt_stop_phase1
);
1543 /* Called by tcm_qla2xxx configfs code */
1544 void qlt_stop_phase2(struct qla_tgt
*tgt
)
1546 scsi_qla_host_t
*vha
= tgt
->vha
;
1548 if (tgt
->tgt_stopped
) {
1549 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf04f,
1550 "Already in tgt->tgt_stopped state\n");
1554 if (!tgt
->tgt_stop
) {
1555 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf00b,
1556 "%s: phase1 stop is not completed\n", __func__
);
1561 mutex_lock(&vha
->vha_tgt
.tgt_mutex
);
1563 tgt
->tgt_stopped
= 1;
1564 mutex_unlock(&vha
->vha_tgt
.tgt_mutex
);
1566 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf00c, "Stop of tgt %p finished\n",
1569 switch (vha
->qlini_mode
) {
1570 case QLA2XXX_INI_MODE_EXCLUSIVE
:
1571 vha
->flags
.online
= 1;
1572 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
1578 EXPORT_SYMBOL(qlt_stop_phase2
);
1580 /* Called from qlt_remove_target() -> qla2x00_remove_one() */
1581 static void qlt_release(struct qla_tgt
*tgt
)
1583 scsi_qla_host_t
*vha
= tgt
->vha
;
1587 struct qla_qpair_hint
*h
;
1588 struct qla_hw_data
*ha
= vha
->hw
;
1590 if (!tgt
->tgt_stop
&& !tgt
->tgt_stopped
)
1591 qlt_stop_phase1(tgt
);
1593 if (!tgt
->tgt_stopped
)
1594 qlt_stop_phase2(tgt
);
1596 for (i
= 0; i
< vha
->hw
->max_qpairs
+ 1; i
++) {
1597 unsigned long flags
;
1599 h
= &tgt
->qphints
[i
];
1601 spin_lock_irqsave(h
->qpair
->qp_lock_ptr
, flags
);
1602 list_del(&h
->hint_elem
);
1603 spin_unlock_irqrestore(h
->qpair
->qp_lock_ptr
, flags
);
1607 kfree(tgt
->qphints
);
1608 mutex_lock(&qla_tgt_mutex
);
1609 list_del(&vha
->vha_tgt
.qla_tgt
->tgt_list_entry
);
1610 mutex_unlock(&qla_tgt_mutex
);
1612 btree_for_each_safe64(&tgt
->lun_qpair_map
, key
, node
)
1613 btree_remove64(&tgt
->lun_qpair_map
, key
);
1615 btree_destroy64(&tgt
->lun_qpair_map
);
1618 if (ha
->tgt
.tgt_ops
&&
1619 ha
->tgt
.tgt_ops
->remove_target
&&
1620 vha
->vha_tgt
.target_lport_ptr
)
1621 ha
->tgt
.tgt_ops
->remove_target(vha
);
1623 vha
->vha_tgt
.qla_tgt
= NULL
;
1625 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf00d,
1626 "Release of tgt %p finished\n", tgt
);
1631 /* ha->hardware_lock supposed to be held on entry */
1632 static int qlt_sched_sess_work(struct qla_tgt
*tgt
, int type
,
1633 const void *param
, unsigned int param_size
)
1635 struct qla_tgt_sess_work_param
*prm
;
1636 unsigned long flags
;
1638 prm
= kzalloc(sizeof(*prm
), GFP_ATOMIC
);
1640 ql_dbg(ql_dbg_tgt_mgt
, tgt
->vha
, 0xf050,
1641 "qla_target(%d): Unable to create session "
1642 "work, command will be refused", 0);
1646 ql_dbg(ql_dbg_tgt_mgt
, tgt
->vha
, 0xf00e,
1647 "Scheduling work (type %d, prm %p)"
1648 " to find session for param %p (size %d, tgt %p)\n",
1649 type
, prm
, param
, param_size
, tgt
);
1652 memcpy(&prm
->tm_iocb
, param
, param_size
);
1654 spin_lock_irqsave(&tgt
->sess_work_lock
, flags
);
1655 list_add_tail(&prm
->sess_works_list_entry
, &tgt
->sess_works_list
);
1656 spin_unlock_irqrestore(&tgt
->sess_work_lock
, flags
);
1658 schedule_work(&tgt
->sess_work
);
1664 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1666 static void qlt_send_notify_ack(struct qla_qpair
*qpair
,
1667 struct imm_ntfy_from_isp
*ntfy
,
1668 uint32_t add_flags
, uint16_t resp_code
, int resp_code_valid
,
1669 uint16_t srr_flags
, uint16_t srr_reject_code
, uint8_t srr_explan
)
1671 struct scsi_qla_host
*vha
= qpair
->vha
;
1672 struct qla_hw_data
*ha
= vha
->hw
;
1674 struct nack_to_isp
*nack
;
1676 if (!ha
->flags
.fw_started
)
1679 ql_dbg(ql_dbg_tgt
, vha
, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha
);
1681 pkt
= (request_t
*)__qla2x00_alloc_iocbs(qpair
, NULL
);
1683 ql_dbg(ql_dbg_tgt
, vha
, 0xe049,
1684 "qla_target(%d): %s failed: unable to allocate "
1685 "request packet\n", vha
->vp_idx
, __func__
);
1689 if (vha
->vha_tgt
.qla_tgt
!= NULL
)
1690 vha
->vha_tgt
.qla_tgt
->notify_ack_expected
++;
1692 pkt
->entry_type
= NOTIFY_ACK_TYPE
;
1693 pkt
->entry_count
= 1;
1695 nack
= (struct nack_to_isp
*)pkt
;
1696 nack
->ox_id
= ntfy
->ox_id
;
1698 nack
->u
.isp24
.handle
= QLA_TGT_SKIP_HANDLE
;
1699 nack
->u
.isp24
.nport_handle
= ntfy
->u
.isp24
.nport_handle
;
1700 if (le16_to_cpu(ntfy
->u
.isp24
.status
) == IMM_NTFY_ELS
) {
1701 nack
->u
.isp24
.flags
= ntfy
->u
.isp24
.flags
&
1702 cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB
);
1704 nack
->u
.isp24
.srr_rx_id
= ntfy
->u
.isp24
.srr_rx_id
;
1705 nack
->u
.isp24
.status
= ntfy
->u
.isp24
.status
;
1706 nack
->u
.isp24
.status_subcode
= ntfy
->u
.isp24
.status_subcode
;
1707 nack
->u
.isp24
.fw_handle
= ntfy
->u
.isp24
.fw_handle
;
1708 nack
->u
.isp24
.exchange_address
= ntfy
->u
.isp24
.exchange_address
;
1709 nack
->u
.isp24
.srr_rel_offs
= ntfy
->u
.isp24
.srr_rel_offs
;
1710 nack
->u
.isp24
.srr_ui
= ntfy
->u
.isp24
.srr_ui
;
1711 nack
->u
.isp24
.srr_flags
= cpu_to_le16(srr_flags
);
1712 nack
->u
.isp24
.srr_reject_code
= srr_reject_code
;
1713 nack
->u
.isp24
.srr_reject_code_expl
= srr_explan
;
1714 nack
->u
.isp24
.vp_index
= ntfy
->u
.isp24
.vp_index
;
1716 ql_dbg(ql_dbg_tgt
, vha
, 0xe005,
1717 "qla_target(%d): Sending 24xx Notify Ack %d\n",
1718 vha
->vp_idx
, nack
->u
.isp24
.status
);
1720 /* Memory Barrier */
1722 qla2x00_start_iocbs(vha
, qpair
->req
);
1725 static int qlt_build_abts_resp_iocb(struct qla_tgt_mgmt_cmd
*mcmd
)
1727 struct scsi_qla_host
*vha
= mcmd
->vha
;
1728 struct qla_hw_data
*ha
= vha
->hw
;
1729 struct abts_resp_to_24xx
*resp
;
1733 struct abts_recv_from_24xx
*abts
= &mcmd
->orig_iocb
.abts
;
1734 struct qla_qpair
*qpair
= mcmd
->qpair
;
1736 ql_dbg(ql_dbg_tgt
, vha
, 0xe006,
1737 "Sending task mgmt ABTS response (ha=%p, status=%x)\n",
1738 ha
, mcmd
->fc_tm_rsp
);
1740 rc
= qlt_check_reserve_free_req(qpair
, 1);
1742 ql_dbg(ql_dbg_tgt
, vha
, 0xe04a,
1743 "qla_target(%d): %s failed: unable to allocate request packet\n",
1744 vha
->vp_idx
, __func__
);
1748 resp
= (struct abts_resp_to_24xx
*)qpair
->req
->ring_ptr
;
1749 memset(resp
, 0, sizeof(*resp
));
1751 h
= qlt_make_handle(qpair
);
1752 if (unlikely(h
== QLA_TGT_NULL_HANDLE
)) {
1754 * CTIO type 7 from the firmware doesn't provide a way to
1755 * know the initiator's LOOP ID, hence we can't find
1756 * the session and, so, the command.
1760 qpair
->req
->outstanding_cmds
[h
] = (srb_t
*)mcmd
;
1763 resp
->handle
= MAKE_HANDLE(qpair
->req
->id
, h
);
1764 resp
->entry_type
= ABTS_RESP_24XX
;
1765 resp
->entry_count
= 1;
1766 resp
->nport_handle
= abts
->nport_handle
;
1767 resp
->vp_index
= vha
->vp_idx
;
1768 resp
->sof_type
= abts
->sof_type
;
1769 resp
->exchange_address
= abts
->exchange_address
;
1770 resp
->fcp_hdr_le
= abts
->fcp_hdr_le
;
1771 f_ctl
= cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP
|
1772 F_CTL_LAST_SEQ
| F_CTL_END_SEQ
|
1773 F_CTL_SEQ_INITIATIVE
);
1774 p
= (uint8_t *)&f_ctl
;
1775 resp
->fcp_hdr_le
.f_ctl
[0] = *p
++;
1776 resp
->fcp_hdr_le
.f_ctl
[1] = *p
++;
1777 resp
->fcp_hdr_le
.f_ctl
[2] = *p
;
1779 resp
->fcp_hdr_le
.d_id
= abts
->fcp_hdr_le
.s_id
;
1780 resp
->fcp_hdr_le
.s_id
= abts
->fcp_hdr_le
.d_id
;
1782 resp
->exchange_addr_to_abort
= abts
->exchange_addr_to_abort
;
1783 if (mcmd
->fc_tm_rsp
== FCP_TMF_CMPL
) {
1784 resp
->fcp_hdr_le
.r_ctl
= R_CTL_BASIC_LINK_SERV
| R_CTL_B_ACC
;
1785 resp
->payload
.ba_acct
.seq_id_valid
= SEQ_ID_INVALID
;
1786 resp
->payload
.ba_acct
.low_seq_cnt
= 0x0000;
1787 resp
->payload
.ba_acct
.high_seq_cnt
= 0xFFFF;
1788 resp
->payload
.ba_acct
.ox_id
= abts
->fcp_hdr_le
.ox_id
;
1789 resp
->payload
.ba_acct
.rx_id
= abts
->fcp_hdr_le
.rx_id
;
1791 resp
->fcp_hdr_le
.r_ctl
= R_CTL_BASIC_LINK_SERV
| R_CTL_B_RJT
;
1792 resp
->payload
.ba_rjt
.reason_code
=
1793 BA_RJT_REASON_CODE_UNABLE_TO_PERFORM
;
1794 /* Other bytes are zero */
1797 vha
->vha_tgt
.qla_tgt
->abts_resp_expected
++;
1799 /* Memory Barrier */
1801 if (qpair
->reqq_start_iocbs
)
1802 qpair
->reqq_start_iocbs(qpair
);
1804 qla2x00_start_iocbs(vha
, qpair
->req
);
1810 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1812 static void qlt_24xx_send_abts_resp(struct qla_qpair
*qpair
,
1813 struct abts_recv_from_24xx
*abts
, uint32_t status
,
1816 struct scsi_qla_host
*vha
= qpair
->vha
;
1817 struct qla_hw_data
*ha
= vha
->hw
;
1818 struct abts_resp_to_24xx
*resp
;
1822 ql_dbg(ql_dbg_tgt
, vha
, 0xe006,
1823 "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n",
1826 resp
= (struct abts_resp_to_24xx
*)qla2x00_alloc_iocbs_ready(qpair
,
1829 ql_dbg(ql_dbg_tgt
, vha
, 0xe04a,
1830 "qla_target(%d): %s failed: unable to allocate "
1831 "request packet", vha
->vp_idx
, __func__
);
1835 resp
->entry_type
= ABTS_RESP_24XX
;
1836 resp
->handle
= QLA_TGT_SKIP_HANDLE
;
1837 resp
->entry_count
= 1;
1838 resp
->nport_handle
= abts
->nport_handle
;
1839 resp
->vp_index
= vha
->vp_idx
;
1840 resp
->sof_type
= abts
->sof_type
;
1841 resp
->exchange_address
= abts
->exchange_address
;
1842 resp
->fcp_hdr_le
= abts
->fcp_hdr_le
;
1843 f_ctl
= cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP
|
1844 F_CTL_LAST_SEQ
| F_CTL_END_SEQ
|
1845 F_CTL_SEQ_INITIATIVE
);
1846 p
= (uint8_t *)&f_ctl
;
1847 resp
->fcp_hdr_le
.f_ctl
[0] = *p
++;
1848 resp
->fcp_hdr_le
.f_ctl
[1] = *p
++;
1849 resp
->fcp_hdr_le
.f_ctl
[2] = *p
;
1851 resp
->fcp_hdr_le
.d_id
= abts
->fcp_hdr_le
.d_id
;
1852 resp
->fcp_hdr_le
.s_id
= abts
->fcp_hdr_le
.s_id
;
1854 resp
->fcp_hdr_le
.d_id
= abts
->fcp_hdr_le
.s_id
;
1855 resp
->fcp_hdr_le
.s_id
= abts
->fcp_hdr_le
.d_id
;
1857 resp
->exchange_addr_to_abort
= abts
->exchange_addr_to_abort
;
1858 if (status
== FCP_TMF_CMPL
) {
1859 resp
->fcp_hdr_le
.r_ctl
= R_CTL_BASIC_LINK_SERV
| R_CTL_B_ACC
;
1860 resp
->payload
.ba_acct
.seq_id_valid
= SEQ_ID_INVALID
;
1861 resp
->payload
.ba_acct
.low_seq_cnt
= 0x0000;
1862 resp
->payload
.ba_acct
.high_seq_cnt
= 0xFFFF;
1863 resp
->payload
.ba_acct
.ox_id
= abts
->fcp_hdr_le
.ox_id
;
1864 resp
->payload
.ba_acct
.rx_id
= abts
->fcp_hdr_le
.rx_id
;
1866 resp
->fcp_hdr_le
.r_ctl
= R_CTL_BASIC_LINK_SERV
| R_CTL_B_RJT
;
1867 resp
->payload
.ba_rjt
.reason_code
=
1868 BA_RJT_REASON_CODE_UNABLE_TO_PERFORM
;
1869 /* Other bytes are zero */
1872 vha
->vha_tgt
.qla_tgt
->abts_resp_expected
++;
1874 /* Memory Barrier */
1876 if (qpair
->reqq_start_iocbs
)
1877 qpair
->reqq_start_iocbs(qpair
);
1879 qla2x00_start_iocbs(vha
, qpair
->req
);
1883 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1885 static void qlt_24xx_retry_term_exchange(struct scsi_qla_host
*vha
,
1886 struct qla_qpair
*qpair
, response_t
*pkt
, struct qla_tgt_mgmt_cmd
*mcmd
)
1888 struct ctio7_to_24xx
*ctio
;
1890 struct abts_recv_from_24xx
*entry
;
1892 ctio
= (struct ctio7_to_24xx
*)qla2x00_alloc_iocbs_ready(qpair
, NULL
);
1894 ql_dbg(ql_dbg_tgt
, vha
, 0xe04b,
1895 "qla_target(%d): %s failed: unable to allocate "
1896 "request packet\n", vha
->vp_idx
, __func__
);
1901 /* abts from remote port */
1902 entry
= &mcmd
->orig_iocb
.abts
;
1904 /* abts from this driver. */
1905 entry
= (struct abts_recv_from_24xx
*)pkt
;
1908 * We've got on entrance firmware's response on by us generated
1909 * ABTS response. So, in it ID fields are reversed.
1912 ctio
->entry_type
= CTIO_TYPE7
;
1913 ctio
->entry_count
= 1;
1914 ctio
->nport_handle
= entry
->nport_handle
;
1915 ctio
->handle
= QLA_TGT_SKIP_HANDLE
| CTIO_COMPLETION_HANDLE_MARK
;
1916 ctio
->timeout
= cpu_to_le16(QLA_TGT_TIMEOUT
);
1917 ctio
->vp_index
= vha
->vp_idx
;
1918 ctio
->exchange_addr
= entry
->exchange_addr_to_abort
;
1919 tmp
= (CTIO7_FLAGS_STATUS_MODE_1
| CTIO7_FLAGS_TERMINATE
);
1922 ctio
->initiator_id
= entry
->fcp_hdr_le
.s_id
;
1924 if (mcmd
->flags
& QLA24XX_MGMT_ABORT_IO_ATTR_VALID
)
1925 tmp
|= (mcmd
->abort_io_attr
<< 9);
1926 else if (qpair
->retry_term_cnt
& 1)
1929 ctio
->initiator_id
= entry
->fcp_hdr_le
.d_id
;
1931 if (qpair
->retry_term_cnt
& 1)
1934 ctio
->u
.status1
.flags
= cpu_to_le16(tmp
);
1935 ctio
->u
.status1
.ox_id
= entry
->fcp_hdr_le
.ox_id
;
1937 ql_dbg(ql_dbg_tgt
, vha
, 0xe007,
1938 "Sending retry TERM EXCH CTIO7 flags %04xh oxid %04xh attr valid %x\n",
1939 le16_to_cpu(ctio
->u
.status1
.flags
),
1940 le16_to_cpu(ctio
->u
.status1
.ox_id
),
1941 (mcmd
&& mcmd
->flags
& QLA24XX_MGMT_ABORT_IO_ATTR_VALID
) ? 1 : 0);
1943 /* Memory Barrier */
1945 if (qpair
->reqq_start_iocbs
)
1946 qpair
->reqq_start_iocbs(qpair
);
1948 qla2x00_start_iocbs(vha
, qpair
->req
);
1951 qlt_build_abts_resp_iocb(mcmd
);
1953 qlt_24xx_send_abts_resp(qpair
,
1954 (struct abts_recv_from_24xx
*)entry
, FCP_TMF_CMPL
, true);
1958 /* drop cmds for the given lun
1959 * XXX only looks for cmds on the port through which lun reset was recieved
1960 * XXX does not go through the list of other port (which may have cmds
1963 static void abort_cmds_for_lun(struct scsi_qla_host
*vha
, u64 lun
, be_id_t s_id
)
1965 struct qla_tgt_sess_op
*op
;
1966 struct qla_tgt_cmd
*cmd
;
1968 unsigned long flags
;
1970 key
= sid_to_key(s_id
);
1971 spin_lock_irqsave(&vha
->cmd_list_lock
, flags
);
1972 list_for_each_entry(op
, &vha
->qla_sess_op_cmd_list
, cmd_list
) {
1976 op_key
= sid_to_key(op
->atio
.u
.isp24
.fcp_hdr
.s_id
);
1977 op_lun
= scsilun_to_int(
1978 (struct scsi_lun
*)&op
->atio
.u
.isp24
.fcp_cmnd
.lun
);
1979 if (op_key
== key
&& op_lun
== lun
)
1983 list_for_each_entry(op
, &vha
->unknown_atio_list
, cmd_list
) {
1987 op_key
= sid_to_key(op
->atio
.u
.isp24
.fcp_hdr
.s_id
);
1988 op_lun
= scsilun_to_int(
1989 (struct scsi_lun
*)&op
->atio
.u
.isp24
.fcp_cmnd
.lun
);
1990 if (op_key
== key
&& op_lun
== lun
)
1994 list_for_each_entry(cmd
, &vha
->qla_cmd_list
, cmd_list
) {
1998 cmd_key
= sid_to_key(cmd
->atio
.u
.isp24
.fcp_hdr
.s_id
);
1999 cmd_lun
= scsilun_to_int(
2000 (struct scsi_lun
*)&cmd
->atio
.u
.isp24
.fcp_cmnd
.lun
);
2001 if (cmd_key
== key
&& cmd_lun
== lun
)
2004 spin_unlock_irqrestore(&vha
->cmd_list_lock
, flags
);
2007 static struct qla_qpair_hint
*qlt_find_qphint(struct scsi_qla_host
*vha
,
2008 uint64_t unpacked_lun
)
2010 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
2011 struct qla_qpair_hint
*h
= NULL
;
2013 if (vha
->flags
.qpairs_available
) {
2014 h
= btree_lookup64(&tgt
->lun_qpair_map
, unpacked_lun
);
2016 h
= &tgt
->qphints
[0];
2018 h
= &tgt
->qphints
[0];
2024 static void qlt_do_tmr_work(struct work_struct
*work
)
2026 struct qla_tgt_mgmt_cmd
*mcmd
=
2027 container_of(work
, struct qla_tgt_mgmt_cmd
, work
);
2028 struct qla_hw_data
*ha
= mcmd
->vha
->hw
;
2031 unsigned long flags
;
2033 switch (mcmd
->tmr_func
) {
2035 tag
= mcmd
->orig_iocb
.abts
.exchange_addr_to_abort
;
2042 rc
= ha
->tgt
.tgt_ops
->handle_tmr(mcmd
, mcmd
->unpacked_lun
,
2043 mcmd
->tmr_func
, tag
);
2046 spin_lock_irqsave(mcmd
->qpair
->qp_lock_ptr
, flags
);
2047 switch (mcmd
->tmr_func
) {
2049 mcmd
->fc_tm_rsp
= FCP_TMF_REJECTED
;
2050 qlt_build_abts_resp_iocb(mcmd
);
2052 case QLA_TGT_LUN_RESET
:
2053 case QLA_TGT_CLEAR_TS
:
2054 case QLA_TGT_ABORT_TS
:
2055 case QLA_TGT_CLEAR_ACA
:
2056 case QLA_TGT_TARGET_RESET
:
2057 qlt_send_busy(mcmd
->qpair
, &mcmd
->orig_iocb
.atio
,
2061 case QLA_TGT_ABORT_ALL
:
2062 case QLA_TGT_NEXUS_LOSS_SESS
:
2063 case QLA_TGT_NEXUS_LOSS
:
2064 qlt_send_notify_ack(mcmd
->qpair
,
2065 &mcmd
->orig_iocb
.imm_ntfy
, 0, 0, 0, 0, 0, 0);
2068 spin_unlock_irqrestore(mcmd
->qpair
->qp_lock_ptr
, flags
);
2070 ql_dbg(ql_dbg_tgt_mgt
, mcmd
->vha
, 0xf052,
2071 "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n",
2072 mcmd
->vha
->vp_idx
, rc
);
2073 mempool_free(mcmd
, qla_tgt_mgmt_cmd_mempool
);
2077 /* ha->hardware_lock supposed to be held on entry */
2078 static int __qlt_24xx_handle_abts(struct scsi_qla_host
*vha
,
2079 struct abts_recv_from_24xx
*abts
, struct fc_port
*sess
)
2081 struct qla_hw_data
*ha
= vha
->hw
;
2082 struct qla_tgt_mgmt_cmd
*mcmd
;
2083 struct qla_qpair_hint
*h
= &vha
->vha_tgt
.qla_tgt
->qphints
[0];
2085 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf00f,
2086 "qla_target(%d): task abort (tag=%d)\n",
2087 vha
->vp_idx
, abts
->exchange_addr_to_abort
);
2089 mcmd
= mempool_alloc(qla_tgt_mgmt_cmd_mempool
, GFP_ATOMIC
);
2091 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf051,
2092 "qla_target(%d): %s: Allocation of ABORT cmd failed",
2093 vha
->vp_idx
, __func__
);
2096 memset(mcmd
, 0, sizeof(*mcmd
));
2097 mcmd
->cmd_type
= TYPE_TGT_TMCMD
;
2099 memcpy(&mcmd
->orig_iocb
.abts
, abts
, sizeof(mcmd
->orig_iocb
.abts
));
2100 mcmd
->reset_count
= ha
->base_qpair
->chip_reset
;
2101 mcmd
->tmr_func
= QLA_TGT_ABTS
;
2102 mcmd
->qpair
= h
->qpair
;
2106 * LUN is looked up by target-core internally based on the passed
2107 * abts->exchange_addr_to_abort tag.
2109 mcmd
->se_cmd
.cpuid
= h
->cpuid
;
2111 if (ha
->tgt
.tgt_ops
->find_cmd_by_tag
) {
2112 struct qla_tgt_cmd
*abort_cmd
;
2114 abort_cmd
= ha
->tgt
.tgt_ops
->find_cmd_by_tag(sess
,
2115 abts
->exchange_addr_to_abort
);
2116 if (abort_cmd
&& abort_cmd
->qpair
) {
2117 mcmd
->qpair
= abort_cmd
->qpair
;
2118 mcmd
->se_cmd
.cpuid
= abort_cmd
->se_cmd
.cpuid
;
2119 mcmd
->abort_io_attr
= abort_cmd
->atio
.u
.isp24
.attr
;
2120 mcmd
->flags
= QLA24XX_MGMT_ABORT_IO_ATTR_VALID
;
2124 INIT_WORK(&mcmd
->work
, qlt_do_tmr_work
);
2125 queue_work_on(mcmd
->se_cmd
.cpuid
, qla_tgt_wq
, &mcmd
->work
);
2131 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2133 static void qlt_24xx_handle_abts(struct scsi_qla_host
*vha
,
2134 struct abts_recv_from_24xx
*abts
)
2136 struct qla_hw_data
*ha
= vha
->hw
;
2137 struct fc_port
*sess
;
2138 uint32_t tag
= abts
->exchange_addr_to_abort
;
2141 unsigned long flags
;
2143 if (le32_to_cpu(abts
->fcp_hdr_le
.parameter
) & ABTS_PARAM_ABORT_SEQ
) {
2144 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf053,
2145 "qla_target(%d): ABTS: Abort Sequence not "
2146 "supported\n", vha
->vp_idx
);
2147 qlt_24xx_send_abts_resp(ha
->base_qpair
, abts
, FCP_TMF_REJECTED
,
2152 if (tag
== ATIO_EXCHANGE_ADDRESS_UNKNOWN
) {
2153 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf010,
2154 "qla_target(%d): ABTS: Unknown Exchange "
2155 "Address received\n", vha
->vp_idx
);
2156 qlt_24xx_send_abts_resp(ha
->base_qpair
, abts
, FCP_TMF_REJECTED
,
2161 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf011,
2162 "qla_target(%d): task abort (s_id=%x:%x:%x, "
2163 "tag=%d, param=%x)\n", vha
->vp_idx
, abts
->fcp_hdr_le
.s_id
.domain
,
2164 abts
->fcp_hdr_le
.s_id
.area
, abts
->fcp_hdr_le
.s_id
.al_pa
, tag
,
2165 le32_to_cpu(abts
->fcp_hdr_le
.parameter
));
2167 s_id
= le_id_to_be(abts
->fcp_hdr_le
.s_id
);
2169 spin_lock_irqsave(&ha
->tgt
.sess_lock
, flags
);
2170 sess
= ha
->tgt
.tgt_ops
->find_sess_by_s_id(vha
, s_id
);
2172 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf012,
2173 "qla_target(%d): task abort for non-existent session\n",
2175 spin_unlock_irqrestore(&ha
->tgt
.sess_lock
, flags
);
2177 qlt_24xx_send_abts_resp(ha
->base_qpair
, abts
, FCP_TMF_REJECTED
,
2181 spin_unlock_irqrestore(&ha
->tgt
.sess_lock
, flags
);
2184 if (sess
->deleted
) {
2185 qlt_24xx_send_abts_resp(ha
->base_qpair
, abts
, FCP_TMF_REJECTED
,
2190 rc
= __qlt_24xx_handle_abts(vha
, abts
, sess
);
2192 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf054,
2193 "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n",
2195 qlt_24xx_send_abts_resp(ha
->base_qpair
, abts
, FCP_TMF_REJECTED
,
2202 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2204 static void qlt_24xx_send_task_mgmt_ctio(struct qla_qpair
*qpair
,
2205 struct qla_tgt_mgmt_cmd
*mcmd
, uint32_t resp_code
)
2207 struct scsi_qla_host
*ha
= mcmd
->vha
;
2208 struct atio_from_isp
*atio
= &mcmd
->orig_iocb
.atio
;
2209 struct ctio7_to_24xx
*ctio
;
2212 ql_dbg(ql_dbg_tgt
, ha
, 0xe008,
2213 "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n",
2214 ha
, atio
, resp_code
);
2217 ctio
= (struct ctio7_to_24xx
*)__qla2x00_alloc_iocbs(qpair
, NULL
);
2219 ql_dbg(ql_dbg_tgt
, ha
, 0xe04c,
2220 "qla_target(%d): %s failed: unable to allocate "
2221 "request packet\n", ha
->vp_idx
, __func__
);
2225 ctio
->entry_type
= CTIO_TYPE7
;
2226 ctio
->entry_count
= 1;
2227 ctio
->handle
= QLA_TGT_SKIP_HANDLE
| CTIO_COMPLETION_HANDLE_MARK
;
2228 ctio
->nport_handle
= mcmd
->sess
->loop_id
;
2229 ctio
->timeout
= cpu_to_le16(QLA_TGT_TIMEOUT
);
2230 ctio
->vp_index
= ha
->vp_idx
;
2231 ctio
->initiator_id
= be_id_to_le(atio
->u
.isp24
.fcp_hdr
.s_id
);
2232 ctio
->exchange_addr
= atio
->u
.isp24
.exchange_addr
;
2233 temp
= (atio
->u
.isp24
.attr
<< 9)|
2234 CTIO7_FLAGS_STATUS_MODE_1
| CTIO7_FLAGS_SEND_STATUS
;
2235 ctio
->u
.status1
.flags
= cpu_to_le16(temp
);
2236 temp
= be16_to_cpu(atio
->u
.isp24
.fcp_hdr
.ox_id
);
2237 ctio
->u
.status1
.ox_id
= cpu_to_le16(temp
);
2238 ctio
->u
.status1
.scsi_status
=
2239 cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID
);
2240 ctio
->u
.status1
.response_len
= cpu_to_le16(8);
2241 ctio
->u
.status1
.sense_data
[0] = resp_code
;
2243 /* Memory Barrier */
2245 if (qpair
->reqq_start_iocbs
)
2246 qpair
->reqq_start_iocbs(qpair
);
2248 qla2x00_start_iocbs(ha
, qpair
->req
);
2251 void qlt_free_mcmd(struct qla_tgt_mgmt_cmd
*mcmd
)
2253 mempool_free(mcmd
, qla_tgt_mgmt_cmd_mempool
);
2255 EXPORT_SYMBOL(qlt_free_mcmd
);
2258 * ha->hardware_lock supposed to be held on entry. Might drop it, then
2261 void qlt_send_resp_ctio(struct qla_qpair
*qpair
, struct qla_tgt_cmd
*cmd
,
2262 uint8_t scsi_status
, uint8_t sense_key
, uint8_t asc
, uint8_t ascq
)
2264 struct atio_from_isp
*atio
= &cmd
->atio
;
2265 struct ctio7_to_24xx
*ctio
;
2267 struct scsi_qla_host
*vha
= cmd
->vha
;
2269 ql_dbg(ql_dbg_tgt_dif
, vha
, 0x3066,
2270 "Sending response CTIO7 (vha=%p, atio=%p, scsi_status=%02x, "
2271 "sense_key=%02x, asc=%02x, ascq=%02x",
2272 vha
, atio
, scsi_status
, sense_key
, asc
, ascq
);
2274 ctio
= (struct ctio7_to_24xx
*)qla2x00_alloc_iocbs(vha
, NULL
);
2276 ql_dbg(ql_dbg_async
, vha
, 0x3067,
2277 "qla2x00t(%ld): %s failed: unable to allocate request packet",
2278 vha
->host_no
, __func__
);
2282 ctio
->entry_type
= CTIO_TYPE7
;
2283 ctio
->entry_count
= 1;
2284 ctio
->handle
= QLA_TGT_SKIP_HANDLE
;
2285 ctio
->nport_handle
= cmd
->sess
->loop_id
;
2286 ctio
->timeout
= cpu_to_le16(QLA_TGT_TIMEOUT
);
2287 ctio
->vp_index
= vha
->vp_idx
;
2288 ctio
->initiator_id
= be_id_to_le(atio
->u
.isp24
.fcp_hdr
.s_id
);
2289 ctio
->exchange_addr
= atio
->u
.isp24
.exchange_addr
;
2290 temp
= (atio
->u
.isp24
.attr
<< 9) |
2291 CTIO7_FLAGS_STATUS_MODE_1
| CTIO7_FLAGS_SEND_STATUS
;
2292 ctio
->u
.status1
.flags
= cpu_to_le16(temp
);
2293 temp
= be16_to_cpu(atio
->u
.isp24
.fcp_hdr
.ox_id
);
2294 ctio
->u
.status1
.ox_id
= cpu_to_le16(temp
);
2295 ctio
->u
.status1
.scsi_status
=
2296 cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID
| scsi_status
);
2297 ctio
->u
.status1
.response_len
= cpu_to_le16(18);
2298 ctio
->u
.status1
.residual
= cpu_to_le32(get_datalen_for_atio(atio
));
2300 if (ctio
->u
.status1
.residual
!= 0)
2301 ctio
->u
.status1
.scsi_status
|=
2302 cpu_to_le16(SS_RESIDUAL_UNDER
);
2304 /* Fixed format sense data. */
2305 ctio
->u
.status1
.sense_data
[0] = 0x70;
2306 ctio
->u
.status1
.sense_data
[2] = sense_key
;
2307 /* Additional sense length */
2308 ctio
->u
.status1
.sense_data
[7] = 0xa;
2310 ctio
->u
.status1
.sense_data
[12] = asc
;
2311 ctio
->u
.status1
.sense_data
[13] = ascq
;
2313 /* Memory Barrier */
2316 if (qpair
->reqq_start_iocbs
)
2317 qpair
->reqq_start_iocbs(qpair
);
2319 qla2x00_start_iocbs(vha
, qpair
->req
);
2325 /* callback from target fabric module code */
2326 void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd
*mcmd
)
2328 struct scsi_qla_host
*vha
= mcmd
->sess
->vha
;
2329 struct qla_hw_data
*ha
= vha
->hw
;
2330 unsigned long flags
;
2331 struct qla_qpair
*qpair
= mcmd
->qpair
;
2332 bool free_mcmd
= true;
2334 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf013,
2335 "TM response mcmd (%p) status %#x state %#x",
2336 mcmd
, mcmd
->fc_tm_rsp
, mcmd
->flags
);
2338 spin_lock_irqsave(qpair
->qp_lock_ptr
, flags
);
2340 if (!vha
->flags
.online
|| mcmd
->reset_count
!= qpair
->chip_reset
) {
2342 * Either the port is not online or this request was from
2343 * previous life, just abort the processing.
2345 ql_dbg(ql_dbg_async
, vha
, 0xe100,
2346 "RESET-TMR online/active/old-count/new-count = %d/%d/%d/%d.\n",
2347 vha
->flags
.online
, qla2x00_reset_active(vha
),
2348 mcmd
->reset_count
, qpair
->chip_reset
);
2349 ha
->tgt
.tgt_ops
->free_mcmd(mcmd
);
2350 spin_unlock_irqrestore(qpair
->qp_lock_ptr
, flags
);
2354 if (mcmd
->flags
== QLA24XX_MGMT_SEND_NACK
) {
2355 switch (mcmd
->orig_iocb
.imm_ntfy
.u
.isp24
.status_subcode
) {
2359 ql_dbg(ql_dbg_disc
, vha
, 0x2106,
2360 "TM response logo %8phC status %#x state %#x",
2361 mcmd
->sess
->port_name
, mcmd
->fc_tm_rsp
,
2363 qlt_schedule_sess_for_deletion(mcmd
->sess
);
2366 qlt_send_notify_ack(vha
->hw
->base_qpair
,
2367 &mcmd
->orig_iocb
.imm_ntfy
, 0, 0, 0, 0, 0, 0);
2371 if (mcmd
->orig_iocb
.atio
.u
.raw
.entry_type
== ABTS_RECV_24XX
) {
2372 qlt_build_abts_resp_iocb(mcmd
);
2375 qlt_24xx_send_task_mgmt_ctio(qpair
, mcmd
,
2379 * Make the callback for ->free_mcmd() to queue_work() and invoke
2380 * target_put_sess_cmd() to drop cmd_kref to 1. The final
2381 * target_put_sess_cmd() call will be made from TFO->check_stop_free()
2382 * -> tcm_qla2xxx_check_stop_free() to release the TMR associated se_cmd
2383 * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() ->
2384 * qlt_xmit_tm_rsp() returns here..
2387 ha
->tgt
.tgt_ops
->free_mcmd(mcmd
);
2389 spin_unlock_irqrestore(qpair
->qp_lock_ptr
, flags
);
2391 EXPORT_SYMBOL(qlt_xmit_tm_rsp
);
2394 static int qlt_pci_map_calc_cnt(struct qla_tgt_prm
*prm
)
2396 struct qla_tgt_cmd
*cmd
= prm
->cmd
;
2398 BUG_ON(cmd
->sg_cnt
== 0);
2400 prm
->sg
= (struct scatterlist
*)cmd
->sg
;
2401 prm
->seg_cnt
= dma_map_sg(&cmd
->qpair
->pdev
->dev
, cmd
->sg
,
2402 cmd
->sg_cnt
, cmd
->dma_data_direction
);
2403 if (unlikely(prm
->seg_cnt
== 0))
2406 prm
->cmd
->sg_mapped
= 1;
2408 if (cmd
->se_cmd
.prot_op
== TARGET_PROT_NORMAL
) {
2410 * If greater than four sg entries then we need to allocate
2411 * the continuation entries
2413 if (prm
->seg_cnt
> QLA_TGT_DATASEGS_PER_CMD_24XX
)
2414 prm
->req_cnt
+= DIV_ROUND_UP(prm
->seg_cnt
-
2415 QLA_TGT_DATASEGS_PER_CMD_24XX
,
2416 QLA_TGT_DATASEGS_PER_CONT_24XX
);
2419 if ((cmd
->se_cmd
.prot_op
== TARGET_PROT_DIN_INSERT
) ||
2420 (cmd
->se_cmd
.prot_op
== TARGET_PROT_DOUT_STRIP
)) {
2421 prm
->seg_cnt
= DIV_ROUND_UP(cmd
->bufflen
, cmd
->blk_sz
);
2422 prm
->tot_dsds
= prm
->seg_cnt
;
2424 prm
->tot_dsds
= prm
->seg_cnt
;
2426 if (cmd
->prot_sg_cnt
) {
2427 prm
->prot_sg
= cmd
->prot_sg
;
2428 prm
->prot_seg_cnt
= dma_map_sg(&cmd
->qpair
->pdev
->dev
,
2429 cmd
->prot_sg
, cmd
->prot_sg_cnt
,
2430 cmd
->dma_data_direction
);
2431 if (unlikely(prm
->prot_seg_cnt
== 0))
2434 if ((cmd
->se_cmd
.prot_op
== TARGET_PROT_DIN_INSERT
) ||
2435 (cmd
->se_cmd
.prot_op
== TARGET_PROT_DOUT_STRIP
)) {
2436 /* Dif Bundling not support here */
2437 prm
->prot_seg_cnt
= DIV_ROUND_UP(cmd
->bufflen
,
2439 prm
->tot_dsds
+= prm
->prot_seg_cnt
;
2441 prm
->tot_dsds
+= prm
->prot_seg_cnt
;
2448 ql_dbg_qp(ql_dbg_tgt
, prm
->cmd
->qpair
, 0xe04d,
2449 "qla_target(%d): PCI mapping failed: sg_cnt=%d",
2450 0, prm
->cmd
->sg_cnt
);
2454 static void qlt_unmap_sg(struct scsi_qla_host
*vha
, struct qla_tgt_cmd
*cmd
)
2456 struct qla_hw_data
*ha
;
2457 struct qla_qpair
*qpair
;
2459 if (!cmd
->sg_mapped
)
2464 dma_unmap_sg(&qpair
->pdev
->dev
, cmd
->sg
, cmd
->sg_cnt
,
2465 cmd
->dma_data_direction
);
2468 if (cmd
->prot_sg_cnt
)
2469 dma_unmap_sg(&qpair
->pdev
->dev
, cmd
->prot_sg
, cmd
->prot_sg_cnt
,
2470 cmd
->dma_data_direction
);
2475 if (cmd
->ctx_dsd_alloced
)
2476 qla2x00_clean_dsd_pool(ha
, cmd
->ctx
);
2478 dma_pool_free(ha
->dl_dma_pool
, cmd
->ctx
, cmd
->ctx
->crc_ctx_dma
);
2481 static int qlt_check_reserve_free_req(struct qla_qpair
*qpair
,
2485 struct req_que
*req
= qpair
->req
;
2487 if (req
->cnt
< (req_cnt
+ 2)) {
2488 cnt
= (uint16_t)(qpair
->use_shadow_reg
? *req
->out_ptr
:
2489 RD_REG_DWORD_RELAXED(req
->req_q_out
));
2491 if (req
->ring_index
< cnt
)
2492 req
->cnt
= cnt
- req
->ring_index
;
2494 req
->cnt
= req
->length
- (req
->ring_index
- cnt
);
2496 if (unlikely(req
->cnt
< (req_cnt
+ 2)))
2500 req
->cnt
-= req_cnt
;
2506 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2508 static inline void *qlt_get_req_pkt(struct req_que
*req
)
2510 /* Adjust ring index. */
2512 if (req
->ring_index
== req
->length
) {
2513 req
->ring_index
= 0;
2514 req
->ring_ptr
= req
->ring
;
2518 return (cont_entry_t
*)req
->ring_ptr
;
2521 /* ha->hardware_lock supposed to be held on entry */
2522 static inline uint32_t qlt_make_handle(struct qla_qpair
*qpair
)
2527 struct req_que
*req
= qpair
->req
;
2529 h
= req
->current_outstanding_cmd
;
2531 for (index
= 1; index
< req
->num_outstanding_cmds
; index
++) {
2533 if (h
== req
->num_outstanding_cmds
)
2536 if (h
== QLA_TGT_SKIP_HANDLE
)
2539 if (!req
->outstanding_cmds
[h
]) {
2546 req
->current_outstanding_cmd
= h
;
2548 ql_dbg(ql_dbg_io
, qpair
->vha
, 0x305b,
2549 "qla_target(%d): Ran out of empty cmd slots\n",
2550 qpair
->vha
->vp_idx
);
2551 h
= QLA_TGT_NULL_HANDLE
;
2557 /* ha->hardware_lock supposed to be held on entry */
2558 static int qlt_24xx_build_ctio_pkt(struct qla_qpair
*qpair
,
2559 struct qla_tgt_prm
*prm
)
2562 struct ctio7_to_24xx
*pkt
;
2563 struct atio_from_isp
*atio
= &prm
->cmd
->atio
;
2566 pkt
= (struct ctio7_to_24xx
*)qpair
->req
->ring_ptr
;
2568 memset(pkt
, 0, sizeof(*pkt
));
2570 pkt
->entry_type
= CTIO_TYPE7
;
2571 pkt
->entry_count
= (uint8_t)prm
->req_cnt
;
2572 pkt
->vp_index
= prm
->cmd
->vp_idx
;
2574 h
= qlt_make_handle(qpair
);
2575 if (unlikely(h
== QLA_TGT_NULL_HANDLE
)) {
2577 * CTIO type 7 from the firmware doesn't provide a way to
2578 * know the initiator's LOOP ID, hence we can't find
2579 * the session and, so, the command.
2583 qpair
->req
->outstanding_cmds
[h
] = (srb_t
*)prm
->cmd
;
2585 pkt
->handle
= MAKE_HANDLE(qpair
->req
->id
, h
);
2586 pkt
->handle
|= CTIO_COMPLETION_HANDLE_MARK
;
2587 pkt
->nport_handle
= cpu_to_le16(prm
->cmd
->loop_id
);
2588 pkt
->timeout
= cpu_to_le16(QLA_TGT_TIMEOUT
);
2589 pkt
->initiator_id
= be_id_to_le(atio
->u
.isp24
.fcp_hdr
.s_id
);
2590 pkt
->exchange_addr
= atio
->u
.isp24
.exchange_addr
;
2591 temp
= atio
->u
.isp24
.attr
<< 9;
2592 pkt
->u
.status0
.flags
|= cpu_to_le16(temp
);
2593 temp
= be16_to_cpu(atio
->u
.isp24
.fcp_hdr
.ox_id
);
2594 pkt
->u
.status0
.ox_id
= cpu_to_le16(temp
);
2595 pkt
->u
.status0
.relative_offset
= cpu_to_le32(prm
->cmd
->offset
);
2601 * ha->hardware_lock supposed to be held on entry. We have already made sure
2602 * that there is sufficient amount of request entries to not drop it.
2604 static void qlt_load_cont_data_segments(struct qla_tgt_prm
*prm
)
2607 struct dsd64
*cur_dsd
;
2609 /* Build continuation packets */
2610 while (prm
->seg_cnt
> 0) {
2611 cont_a64_entry_t
*cont_pkt64
=
2612 (cont_a64_entry_t
*)qlt_get_req_pkt(
2613 prm
->cmd
->qpair
->req
);
2616 * Make sure that from cont_pkt64 none of
2617 * 64-bit specific fields used for 32-bit
2618 * addressing. Cast to (cont_entry_t *) for
2622 memset(cont_pkt64
, 0, sizeof(*cont_pkt64
));
2624 cont_pkt64
->entry_count
= 1;
2625 cont_pkt64
->sys_define
= 0;
2627 cont_pkt64
->entry_type
= CONTINUE_A64_TYPE
;
2628 cur_dsd
= cont_pkt64
->dsd
;
2630 /* Load continuation entry data segments */
2632 cnt
< QLA_TGT_DATASEGS_PER_CONT_24XX
&& prm
->seg_cnt
;
2633 cnt
++, prm
->seg_cnt
--) {
2634 append_dsd64(&cur_dsd
, prm
->sg
);
2635 prm
->sg
= sg_next(prm
->sg
);
2641 * ha->hardware_lock supposed to be held on entry. We have already made sure
2642 * that there is sufficient amount of request entries to not drop it.
2644 static void qlt_load_data_segments(struct qla_tgt_prm
*prm
)
2647 struct dsd64
*cur_dsd
;
2648 struct ctio7_to_24xx
*pkt24
= (struct ctio7_to_24xx
*)prm
->pkt
;
2650 pkt24
->u
.status0
.transfer_length
= cpu_to_le32(prm
->cmd
->bufflen
);
2652 /* Setup packet address segment pointer */
2653 cur_dsd
= &pkt24
->u
.status0
.dsd
;
2655 /* Set total data segment count */
2657 pkt24
->dseg_count
= cpu_to_le16(prm
->seg_cnt
);
2659 if (prm
->seg_cnt
== 0) {
2660 /* No data transfer */
2661 cur_dsd
->address
= 0;
2662 cur_dsd
->length
= 0;
2666 /* If scatter gather */
2668 /* Load command entry data segments */
2670 (cnt
< QLA_TGT_DATASEGS_PER_CMD_24XX
) && prm
->seg_cnt
;
2671 cnt
++, prm
->seg_cnt
--) {
2672 append_dsd64(&cur_dsd
, prm
->sg
);
2673 prm
->sg
= sg_next(prm
->sg
);
2676 qlt_load_cont_data_segments(prm
);
2679 static inline int qlt_has_data(struct qla_tgt_cmd
*cmd
)
2681 return cmd
->bufflen
> 0;
2684 static void qlt_print_dif_err(struct qla_tgt_prm
*prm
)
2686 struct qla_tgt_cmd
*cmd
;
2687 struct scsi_qla_host
*vha
;
2689 /* asc 0x10=dif error */
2690 if (prm
->sense_buffer
&& (prm
->sense_buffer
[12] == 0x10)) {
2694 switch (prm
->sense_buffer
[13]) {
2696 ql_dbg(ql_dbg_tgt_dif
, vha
, 0xe00b,
2697 "BE detected Guard TAG ERR: lba[0x%llx|%lld] len[0x%x] "
2698 "se_cmd=%p tag[%x]",
2699 cmd
->lba
, cmd
->lba
, cmd
->num_blks
, &cmd
->se_cmd
,
2700 cmd
->atio
.u
.isp24
.exchange_addr
);
2703 ql_dbg(ql_dbg_tgt_dif
, vha
, 0xe00c,
2704 "BE detected APP TAG ERR: lba[0x%llx|%lld] len[0x%x] "
2705 "se_cmd=%p tag[%x]",
2706 cmd
->lba
, cmd
->lba
, cmd
->num_blks
, &cmd
->se_cmd
,
2707 cmd
->atio
.u
.isp24
.exchange_addr
);
2710 ql_dbg(ql_dbg_tgt_dif
, vha
, 0xe00f,
2711 "BE detected REF TAG ERR: lba[0x%llx|%lld] len[0x%x] "
2712 "se_cmd=%p tag[%x]",
2713 cmd
->lba
, cmd
->lba
, cmd
->num_blks
, &cmd
->se_cmd
,
2714 cmd
->atio
.u
.isp24
.exchange_addr
);
2717 ql_dbg(ql_dbg_tgt_dif
, vha
, 0xe010,
2718 "BE detected Dif ERR: lba[%llx|%lld] len[%x] "
2719 "se_cmd=%p tag[%x]",
2720 cmd
->lba
, cmd
->lba
, cmd
->num_blks
, &cmd
->se_cmd
,
2721 cmd
->atio
.u
.isp24
.exchange_addr
);
2724 ql_dump_buffer(ql_dbg_tgt_dif
, vha
, 0xe011, cmd
->cdb
, 16);
2729 * Called without ha->hardware_lock held
2731 static int qlt_pre_xmit_response(struct qla_tgt_cmd
*cmd
,
2732 struct qla_tgt_prm
*prm
, int xmit_type
, uint8_t scsi_status
,
2733 uint32_t *full_req_cnt
)
2735 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
2736 struct qla_qpair
*qpair
= cmd
->qpair
;
2739 prm
->tgt
= cmd
->tgt
;
2741 prm
->rq_result
= scsi_status
;
2742 prm
->sense_buffer
= &cmd
->sense_buffer
[0];
2743 prm
->sense_buffer_len
= TRANSPORT_SENSE_BUFFER
;
2748 prm
->add_status_pkt
= 0;
2749 prm
->prot_sg
= NULL
;
2750 prm
->prot_seg_cnt
= 0;
2753 if ((xmit_type
& QLA_TGT_XMIT_DATA
) && qlt_has_data(cmd
)) {
2754 if (qlt_pci_map_calc_cnt(prm
) != 0)
2758 *full_req_cnt
= prm
->req_cnt
;
2760 if (se_cmd
->se_cmd_flags
& SCF_UNDERFLOW_BIT
) {
2761 prm
->residual
= se_cmd
->residual_count
;
2762 ql_dbg_qp(ql_dbg_io
+ ql_dbg_verbose
, qpair
, 0x305c,
2763 "Residual underflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n",
2764 prm
->residual
, se_cmd
->tag
,
2765 se_cmd
->t_task_cdb
? se_cmd
->t_task_cdb
[0] : 0,
2766 cmd
->bufflen
, prm
->rq_result
);
2767 prm
->rq_result
|= SS_RESIDUAL_UNDER
;
2768 } else if (se_cmd
->se_cmd_flags
& SCF_OVERFLOW_BIT
) {
2769 prm
->residual
= se_cmd
->residual_count
;
2770 ql_dbg_qp(ql_dbg_io
, qpair
, 0x305d,
2771 "Residual overflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n",
2772 prm
->residual
, se_cmd
->tag
, se_cmd
->t_task_cdb
?
2773 se_cmd
->t_task_cdb
[0] : 0, cmd
->bufflen
, prm
->rq_result
);
2774 prm
->rq_result
|= SS_RESIDUAL_OVER
;
2777 if (xmit_type
& QLA_TGT_XMIT_STATUS
) {
2779 * If QLA_TGT_XMIT_DATA is not set, add_status_pkt will be
2780 * ignored in *xmit_response() below
2782 if (qlt_has_data(cmd
)) {
2783 if (QLA_TGT_SENSE_VALID(prm
->sense_buffer
) ||
2784 (IS_FWI2_CAPABLE(cmd
->vha
->hw
) &&
2785 (prm
->rq_result
!= 0))) {
2786 prm
->add_status_pkt
= 1;
2795 static inline int qlt_need_explicit_conf(struct qla_tgt_cmd
*cmd
,
2798 if (cmd
->qpair
->enable_class_2
)
2802 return cmd
->conf_compl_supported
;
2804 return cmd
->qpair
->enable_explicit_conf
&&
2805 cmd
->conf_compl_supported
;
2808 static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx
*ctio
,
2809 struct qla_tgt_prm
*prm
)
2811 prm
->sense_buffer_len
= min_t(uint32_t, prm
->sense_buffer_len
,
2812 (uint32_t)sizeof(ctio
->u
.status1
.sense_data
));
2813 ctio
->u
.status0
.flags
|= cpu_to_le16(CTIO7_FLAGS_SEND_STATUS
);
2814 if (qlt_need_explicit_conf(prm
->cmd
, 0)) {
2815 ctio
->u
.status0
.flags
|= cpu_to_le16(
2816 CTIO7_FLAGS_EXPLICIT_CONFORM
|
2817 CTIO7_FLAGS_CONFORM_REQ
);
2819 ctio
->u
.status0
.residual
= cpu_to_le32(prm
->residual
);
2820 ctio
->u
.status0
.scsi_status
= cpu_to_le16(prm
->rq_result
);
2821 if (QLA_TGT_SENSE_VALID(prm
->sense_buffer
)) {
2824 if (qlt_need_explicit_conf(prm
->cmd
, 1)) {
2825 if ((prm
->rq_result
& SS_SCSI_STATUS_BYTE
) != 0) {
2826 ql_dbg_qp(ql_dbg_tgt
, prm
->cmd
->qpair
, 0xe017,
2827 "Skipping EXPLICIT_CONFORM and "
2828 "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ "
2829 "non GOOD status\n");
2830 goto skip_explict_conf
;
2832 ctio
->u
.status1
.flags
|= cpu_to_le16(
2833 CTIO7_FLAGS_EXPLICIT_CONFORM
|
2834 CTIO7_FLAGS_CONFORM_REQ
);
2837 ctio
->u
.status1
.flags
&=
2838 ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0
);
2839 ctio
->u
.status1
.flags
|=
2840 cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1
);
2841 ctio
->u
.status1
.scsi_status
|=
2842 cpu_to_le16(SS_SENSE_LEN_VALID
);
2843 ctio
->u
.status1
.sense_length
=
2844 cpu_to_le16(prm
->sense_buffer_len
);
2845 for (i
= 0; i
< prm
->sense_buffer_len
/4; i
++)
2846 ((uint32_t *)ctio
->u
.status1
.sense_data
)[i
] =
2847 cpu_to_be32(((uint32_t *)prm
->sense_buffer
)[i
]);
2849 qlt_print_dif_err(prm
);
2852 ctio
->u
.status1
.flags
&=
2853 ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0
);
2854 ctio
->u
.status1
.flags
|=
2855 cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1
);
2856 ctio
->u
.status1
.sense_length
= 0;
2857 memset(ctio
->u
.status1
.sense_data
, 0,
2858 sizeof(ctio
->u
.status1
.sense_data
));
2861 /* Sense with len > 24, is it possible ??? */
2865 qlt_hba_err_chk_enabled(struct se_cmd
*se_cmd
)
2867 switch (se_cmd
->prot_op
) {
2868 case TARGET_PROT_DOUT_INSERT
:
2869 case TARGET_PROT_DIN_STRIP
:
2870 if (ql2xenablehba_err_chk
>= 1)
2873 case TARGET_PROT_DOUT_PASS
:
2874 case TARGET_PROT_DIN_PASS
:
2875 if (ql2xenablehba_err_chk
>= 2)
2878 case TARGET_PROT_DIN_INSERT
:
2879 case TARGET_PROT_DOUT_STRIP
:
2888 qla_tgt_ref_mask_check(struct se_cmd
*se_cmd
)
2890 switch (se_cmd
->prot_op
) {
2891 case TARGET_PROT_DIN_INSERT
:
2892 case TARGET_PROT_DOUT_INSERT
:
2893 case TARGET_PROT_DIN_STRIP
:
2894 case TARGET_PROT_DOUT_STRIP
:
2895 case TARGET_PROT_DIN_PASS
:
2896 case TARGET_PROT_DOUT_PASS
:
2905 * qla_tgt_set_dif_tags - Extract Ref and App tags from SCSI command
2908 qla_tgt_set_dif_tags(struct qla_tgt_cmd
*cmd
, struct crc_context
*ctx
,
2909 uint16_t *pfw_prot_opts
)
2911 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
2912 uint32_t lba
= 0xffffffff & se_cmd
->t_task_lba
;
2913 scsi_qla_host_t
*vha
= cmd
->tgt
->vha
;
2914 struct qla_hw_data
*ha
= vha
->hw
;
2918 * wait till Mode Sense/Select cmd, modepage Ah, subpage 2
2919 * have been immplemented by TCM, before AppTag is avail.
2920 * Look for modesense_handlers[]
2923 ctx
->app_tag_mask
[0] = 0x0;
2924 ctx
->app_tag_mask
[1] = 0x0;
2926 if (IS_PI_UNINIT_CAPABLE(ha
)) {
2927 if ((se_cmd
->prot_type
== TARGET_DIF_TYPE1_PROT
) ||
2928 (se_cmd
->prot_type
== TARGET_DIF_TYPE2_PROT
))
2929 *pfw_prot_opts
|= PO_DIS_VALD_APP_ESC
;
2930 else if (se_cmd
->prot_type
== TARGET_DIF_TYPE3_PROT
)
2931 *pfw_prot_opts
|= PO_DIS_VALD_APP_REF_ESC
;
2934 t32
= ha
->tgt
.tgt_ops
->get_dif_tags(cmd
, pfw_prot_opts
);
2936 switch (se_cmd
->prot_type
) {
2937 case TARGET_DIF_TYPE0_PROT
:
2939 * No check for ql2xenablehba_err_chk, as it
2940 * would be an I/O error if hba tag generation
2943 ctx
->ref_tag
= cpu_to_le32(lba
);
2944 /* enable ALL bytes of the ref tag */
2945 ctx
->ref_tag_mask
[0] = 0xff;
2946 ctx
->ref_tag_mask
[1] = 0xff;
2947 ctx
->ref_tag_mask
[2] = 0xff;
2948 ctx
->ref_tag_mask
[3] = 0xff;
2950 case TARGET_DIF_TYPE1_PROT
:
2952 * For TYPE 1 protection: 16 bit GUARD tag, 32 bit
2953 * REF tag, and 16 bit app tag.
2955 ctx
->ref_tag
= cpu_to_le32(lba
);
2956 if (!qla_tgt_ref_mask_check(se_cmd
) ||
2957 !(ha
->tgt
.tgt_ops
->chk_dif_tags(t32
))) {
2958 *pfw_prot_opts
|= PO_DIS_REF_TAG_VALD
;
2961 /* enable ALL bytes of the ref tag */
2962 ctx
->ref_tag_mask
[0] = 0xff;
2963 ctx
->ref_tag_mask
[1] = 0xff;
2964 ctx
->ref_tag_mask
[2] = 0xff;
2965 ctx
->ref_tag_mask
[3] = 0xff;
2967 case TARGET_DIF_TYPE2_PROT
:
2969 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF
2970 * tag has to match LBA in CDB + N
2972 ctx
->ref_tag
= cpu_to_le32(lba
);
2973 if (!qla_tgt_ref_mask_check(se_cmd
) ||
2974 !(ha
->tgt
.tgt_ops
->chk_dif_tags(t32
))) {
2975 *pfw_prot_opts
|= PO_DIS_REF_TAG_VALD
;
2978 /* enable ALL bytes of the ref tag */
2979 ctx
->ref_tag_mask
[0] = 0xff;
2980 ctx
->ref_tag_mask
[1] = 0xff;
2981 ctx
->ref_tag_mask
[2] = 0xff;
2982 ctx
->ref_tag_mask
[3] = 0xff;
2984 case TARGET_DIF_TYPE3_PROT
:
2985 /* For TYPE 3 protection: 16 bit GUARD only */
2986 *pfw_prot_opts
|= PO_DIS_REF_TAG_VALD
;
2987 ctx
->ref_tag_mask
[0] = ctx
->ref_tag_mask
[1] =
2988 ctx
->ref_tag_mask
[2] = ctx
->ref_tag_mask
[3] = 0x00;
2994 qlt_build_ctio_crc2_pkt(struct qla_qpair
*qpair
, struct qla_tgt_prm
*prm
)
2996 struct dsd64
*cur_dsd
;
2997 uint32_t transfer_length
= 0;
2998 uint32_t data_bytes
;
3000 uint8_t bundling
= 1;
3001 struct crc_context
*crc_ctx_pkt
= NULL
;
3002 struct qla_hw_data
*ha
;
3003 struct ctio_crc2_to_fw
*pkt
;
3004 dma_addr_t crc_ctx_dma
;
3005 uint16_t fw_prot_opts
= 0;
3006 struct qla_tgt_cmd
*cmd
= prm
->cmd
;
3007 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
3009 struct atio_from_isp
*atio
= &prm
->cmd
->atio
;
3010 struct qla_tc_param tc
;
3012 scsi_qla_host_t
*vha
= cmd
->vha
;
3016 pkt
= (struct ctio_crc2_to_fw
*)qpair
->req
->ring_ptr
;
3018 memset(pkt
, 0, sizeof(*pkt
));
3020 ql_dbg_qp(ql_dbg_tgt
, cmd
->qpair
, 0xe071,
3021 "qla_target(%d):%s: se_cmd[%p] CRC2 prot_op[0x%x] cmd prot sg:cnt[%p:%x] lba[%llu]\n",
3022 cmd
->vp_idx
, __func__
, se_cmd
, se_cmd
->prot_op
,
3023 prm
->prot_sg
, prm
->prot_seg_cnt
, se_cmd
->t_task_lba
);
3025 if ((se_cmd
->prot_op
== TARGET_PROT_DIN_INSERT
) ||
3026 (se_cmd
->prot_op
== TARGET_PROT_DOUT_STRIP
))
3029 /* Compute dif len and adjust data len to incude protection */
3030 data_bytes
= cmd
->bufflen
;
3031 dif_bytes
= (data_bytes
/ cmd
->blk_sz
) * 8;
3033 switch (se_cmd
->prot_op
) {
3034 case TARGET_PROT_DIN_INSERT
:
3035 case TARGET_PROT_DOUT_STRIP
:
3036 transfer_length
= data_bytes
;
3037 if (cmd
->prot_sg_cnt
)
3038 data_bytes
+= dif_bytes
;
3040 case TARGET_PROT_DIN_STRIP
:
3041 case TARGET_PROT_DOUT_INSERT
:
3042 case TARGET_PROT_DIN_PASS
:
3043 case TARGET_PROT_DOUT_PASS
:
3044 transfer_length
= data_bytes
+ dif_bytes
;
3051 if (!qlt_hba_err_chk_enabled(se_cmd
))
3052 fw_prot_opts
|= 0x10; /* Disable Guard tag checking */
3053 /* HBA error checking enabled */
3054 else if (IS_PI_UNINIT_CAPABLE(ha
)) {
3055 if ((se_cmd
->prot_type
== TARGET_DIF_TYPE1_PROT
) ||
3056 (se_cmd
->prot_type
== TARGET_DIF_TYPE2_PROT
))
3057 fw_prot_opts
|= PO_DIS_VALD_APP_ESC
;
3058 else if (se_cmd
->prot_type
== TARGET_DIF_TYPE3_PROT
)
3059 fw_prot_opts
|= PO_DIS_VALD_APP_REF_ESC
;
3062 switch (se_cmd
->prot_op
) {
3063 case TARGET_PROT_DIN_INSERT
:
3064 case TARGET_PROT_DOUT_INSERT
:
3065 fw_prot_opts
|= PO_MODE_DIF_INSERT
;
3067 case TARGET_PROT_DIN_STRIP
:
3068 case TARGET_PROT_DOUT_STRIP
:
3069 fw_prot_opts
|= PO_MODE_DIF_REMOVE
;
3071 case TARGET_PROT_DIN_PASS
:
3072 case TARGET_PROT_DOUT_PASS
:
3073 fw_prot_opts
|= PO_MODE_DIF_PASS
;
3074 /* FUTURE: does tcm require T10CRC<->IPCKSUM conversion? */
3076 default:/* Normal Request */
3077 fw_prot_opts
|= PO_MODE_DIF_PASS
;
3082 /* Update entry type to indicate Command Type CRC_2 IOCB */
3083 pkt
->entry_type
= CTIO_CRC2
;
3084 pkt
->entry_count
= 1;
3085 pkt
->vp_index
= cmd
->vp_idx
;
3087 h
= qlt_make_handle(qpair
);
3088 if (unlikely(h
== QLA_TGT_NULL_HANDLE
)) {
3090 * CTIO type 7 from the firmware doesn't provide a way to
3091 * know the initiator's LOOP ID, hence we can't find
3092 * the session and, so, the command.
3096 qpair
->req
->outstanding_cmds
[h
] = (srb_t
*)prm
->cmd
;
3098 pkt
->handle
= MAKE_HANDLE(qpair
->req
->id
, h
);
3099 pkt
->handle
|= CTIO_COMPLETION_HANDLE_MARK
;
3100 pkt
->nport_handle
= cpu_to_le16(prm
->cmd
->loop_id
);
3101 pkt
->timeout
= cpu_to_le16(QLA_TGT_TIMEOUT
);
3102 pkt
->initiator_id
= be_id_to_le(atio
->u
.isp24
.fcp_hdr
.s_id
);
3103 pkt
->exchange_addr
= atio
->u
.isp24
.exchange_addr
;
3105 /* silence compile warning */
3106 t16
= be16_to_cpu(atio
->u
.isp24
.fcp_hdr
.ox_id
);
3107 pkt
->ox_id
= cpu_to_le16(t16
);
3109 t16
= (atio
->u
.isp24
.attr
<< 9);
3110 pkt
->flags
|= cpu_to_le16(t16
);
3111 pkt
->relative_offset
= cpu_to_le32(prm
->cmd
->offset
);
3113 /* Set transfer direction */
3114 if (cmd
->dma_data_direction
== DMA_TO_DEVICE
)
3115 pkt
->flags
= cpu_to_le16(CTIO7_FLAGS_DATA_IN
);
3116 else if (cmd
->dma_data_direction
== DMA_FROM_DEVICE
)
3117 pkt
->flags
= cpu_to_le16(CTIO7_FLAGS_DATA_OUT
);
3119 pkt
->dseg_count
= prm
->tot_dsds
;
3120 /* Fibre channel byte count */
3121 pkt
->transfer_length
= cpu_to_le32(transfer_length
);
3123 /* ----- CRC context -------- */
3125 /* Allocate CRC context from global pool */
3126 crc_ctx_pkt
= cmd
->ctx
=
3127 dma_pool_zalloc(ha
->dl_dma_pool
, GFP_ATOMIC
, &crc_ctx_dma
);
3130 goto crc_queuing_error
;
3132 crc_ctx_pkt
->crc_ctx_dma
= crc_ctx_dma
;
3133 INIT_LIST_HEAD(&crc_ctx_pkt
->dsd_list
);
3136 crc_ctx_pkt
->handle
= pkt
->handle
;
3138 qla_tgt_set_dif_tags(cmd
, crc_ctx_pkt
, &fw_prot_opts
);
3140 put_unaligned_le64(crc_ctx_dma
, &pkt
->crc_context_address
);
3141 pkt
->crc_context_len
= CRC_CONTEXT_LEN_FW
;
3144 cur_dsd
= &crc_ctx_pkt
->u
.nobundling
.data_dsd
[0];
3147 * Configure Bundling if we need to fetch interlaving
3148 * protection PCI accesses
3150 fw_prot_opts
|= PO_ENABLE_DIF_BUNDLING
;
3151 crc_ctx_pkt
->u
.bundling
.dif_byte_count
= cpu_to_le32(dif_bytes
);
3152 crc_ctx_pkt
->u
.bundling
.dseg_count
=
3153 cpu_to_le16(prm
->tot_dsds
- prm
->prot_seg_cnt
);
3154 cur_dsd
= &crc_ctx_pkt
->u
.bundling
.data_dsd
[0];
3157 /* Finish the common fields of CRC pkt */
3158 crc_ctx_pkt
->blk_size
= cpu_to_le16(cmd
->blk_sz
);
3159 crc_ctx_pkt
->prot_opts
= cpu_to_le16(fw_prot_opts
);
3160 crc_ctx_pkt
->byte_count
= cpu_to_le32(data_bytes
);
3161 crc_ctx_pkt
->guard_seed
= cpu_to_le16(0);
3163 memset((uint8_t *)&tc
, 0 , sizeof(tc
));
3165 tc
.blk_sz
= cmd
->blk_sz
;
3166 tc
.bufflen
= cmd
->bufflen
;
3168 tc
.prot_sg
= cmd
->prot_sg
;
3169 tc
.ctx
= crc_ctx_pkt
;
3170 tc
.ctx_dsd_alloced
= &cmd
->ctx_dsd_alloced
;
3172 /* Walks data segments */
3173 pkt
->flags
|= cpu_to_le16(CTIO7_FLAGS_DSD_PTR
);
3175 if (!bundling
&& prm
->prot_seg_cnt
) {
3176 if (qla24xx_walk_and_build_sglist_no_difb(ha
, NULL
, cur_dsd
,
3177 prm
->tot_dsds
, &tc
))
3178 goto crc_queuing_error
;
3179 } else if (qla24xx_walk_and_build_sglist(ha
, NULL
, cur_dsd
,
3180 (prm
->tot_dsds
- prm
->prot_seg_cnt
), &tc
))
3181 goto crc_queuing_error
;
3183 if (bundling
&& prm
->prot_seg_cnt
) {
3184 /* Walks dif segments */
3185 pkt
->add_flags
|= CTIO_CRC2_AF_DIF_DSD_ENA
;
3187 cur_dsd
= &crc_ctx_pkt
->u
.bundling
.dif_dsd
;
3188 if (qla24xx_walk_and_build_prot_sglist(ha
, NULL
, cur_dsd
,
3189 prm
->prot_seg_cnt
, cmd
))
3190 goto crc_queuing_error
;
3195 /* Cleanup will be performed by the caller */
3196 qpair
->req
->outstanding_cmds
[h
] = NULL
;
3198 return QLA_FUNCTION_FAILED
;
3202 * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and *
3203 * QLA_TGT_XMIT_STATUS for >= 24xx silicon
3205 int qlt_xmit_response(struct qla_tgt_cmd
*cmd
, int xmit_type
,
3206 uint8_t scsi_status
)
3208 struct scsi_qla_host
*vha
= cmd
->vha
;
3209 struct qla_qpair
*qpair
= cmd
->qpair
;
3210 struct ctio7_to_24xx
*pkt
;
3211 struct qla_tgt_prm prm
;
3212 uint32_t full_req_cnt
= 0;
3213 unsigned long flags
= 0;
3216 if (!qpair
->fw_started
|| (cmd
->reset_count
!= qpair
->chip_reset
) ||
3217 (cmd
->sess
&& cmd
->sess
->deleted
)) {
3218 cmd
->state
= QLA_TGT_STATE_PROCESSED
;
3223 ql_dbg_qp(ql_dbg_tgt
, qpair
, 0xe018,
3224 "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p] qp %d\n",
3225 (xmit_type
& QLA_TGT_XMIT_STATUS
) ?
3226 1 : 0, cmd
->bufflen
, cmd
->sg_cnt
, cmd
->dma_data_direction
,
3227 &cmd
->se_cmd
, qpair
->id
);
3229 res
= qlt_pre_xmit_response(cmd
, &prm
, xmit_type
, scsi_status
,
3231 if (unlikely(res
!= 0))
3234 spin_lock_irqsave(qpair
->qp_lock_ptr
, flags
);
3236 if (xmit_type
== QLA_TGT_XMIT_STATUS
)
3237 qpair
->tgt_counters
.core_qla_snd_status
++;
3239 qpair
->tgt_counters
.core_qla_que_buf
++;
3241 if (!qpair
->fw_started
|| cmd
->reset_count
!= qpair
->chip_reset
) {
3243 * Either the port is not online or this request was from
3244 * previous life, just abort the processing.
3246 cmd
->state
= QLA_TGT_STATE_PROCESSED
;
3247 ql_dbg_qp(ql_dbg_async
, qpair
, 0xe101,
3248 "RESET-RSP online/active/old-count/new-count = %d/%d/%d/%d.\n",
3249 vha
->flags
.online
, qla2x00_reset_active(vha
),
3250 cmd
->reset_count
, qpair
->chip_reset
);
3251 spin_unlock_irqrestore(qpair
->qp_lock_ptr
, flags
);
3256 /* Does F/W have an IOCBs for this request */
3257 res
= qlt_check_reserve_free_req(qpair
, full_req_cnt
);
3259 goto out_unmap_unlock
;
3261 if (cmd
->se_cmd
.prot_op
&& (xmit_type
& QLA_TGT_XMIT_DATA
))
3262 res
= qlt_build_ctio_crc2_pkt(qpair
, &prm
);
3264 res
= qlt_24xx_build_ctio_pkt(qpair
, &prm
);
3265 if (unlikely(res
!= 0)) {
3266 qpair
->req
->cnt
+= full_req_cnt
;
3267 goto out_unmap_unlock
;
3270 pkt
= (struct ctio7_to_24xx
*)prm
.pkt
;
3272 if (qlt_has_data(cmd
) && (xmit_type
& QLA_TGT_XMIT_DATA
)) {
3273 pkt
->u
.status0
.flags
|=
3274 cpu_to_le16(CTIO7_FLAGS_DATA_IN
|
3275 CTIO7_FLAGS_STATUS_MODE_0
);
3277 if (cmd
->se_cmd
.prot_op
== TARGET_PROT_NORMAL
)
3278 qlt_load_data_segments(&prm
);
3280 if (prm
.add_status_pkt
== 0) {
3281 if (xmit_type
& QLA_TGT_XMIT_STATUS
) {
3282 pkt
->u
.status0
.scsi_status
=
3283 cpu_to_le16(prm
.rq_result
);
3284 pkt
->u
.status0
.residual
=
3285 cpu_to_le32(prm
.residual
);
3286 pkt
->u
.status0
.flags
|= cpu_to_le16(
3287 CTIO7_FLAGS_SEND_STATUS
);
3288 if (qlt_need_explicit_conf(cmd
, 0)) {
3289 pkt
->u
.status0
.flags
|=
3291 CTIO7_FLAGS_EXPLICIT_CONFORM
|
3292 CTIO7_FLAGS_CONFORM_REQ
);
3298 * We have already made sure that there is sufficient
3299 * amount of request entries to not drop HW lock in
3302 struct ctio7_to_24xx
*ctio
=
3303 (struct ctio7_to_24xx
*)qlt_get_req_pkt(
3306 ql_dbg_qp(ql_dbg_tgt
, qpair
, 0x305e,
3307 "Building additional status packet 0x%p.\n",
3311 * T10Dif: ctio_crc2_to_fw overlay ontop of
3314 memcpy(ctio
, pkt
, sizeof(*ctio
));
3315 /* reset back to CTIO7 */
3316 ctio
->entry_count
= 1;
3317 ctio
->entry_type
= CTIO_TYPE7
;
3318 ctio
->dseg_count
= 0;
3319 ctio
->u
.status1
.flags
&= ~cpu_to_le16(
3320 CTIO7_FLAGS_DATA_IN
);
3322 /* Real finish is ctio_m1's finish */
3323 pkt
->handle
|= CTIO_INTERMEDIATE_HANDLE_MARK
;
3324 pkt
->u
.status0
.flags
|= cpu_to_le16(
3325 CTIO7_FLAGS_DONT_RET_CTIO
);
3327 /* qlt_24xx_init_ctio_to_isp will correct
3328 * all neccessary fields that's part of CTIO7.
3329 * There should be no residual of CTIO-CRC2 data.
3331 qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx
*)ctio
,
3335 qlt_24xx_init_ctio_to_isp(pkt
, &prm
);
3338 cmd
->state
= QLA_TGT_STATE_PROCESSED
; /* Mid-level is done processing */
3339 cmd
->cmd_sent_to_fw
= 1;
3340 cmd
->ctio_flags
= le16_to_cpu(pkt
->u
.status0
.flags
);
3342 /* Memory Barrier */
3344 if (qpair
->reqq_start_iocbs
)
3345 qpair
->reqq_start_iocbs(qpair
);
3347 qla2x00_start_iocbs(vha
, qpair
->req
);
3348 spin_unlock_irqrestore(qpair
->qp_lock_ptr
, flags
);
3353 qlt_unmap_sg(vha
, cmd
);
3354 spin_unlock_irqrestore(qpair
->qp_lock_ptr
, flags
);
3357 vha
->hw
->tgt
.tgt_ops
->free_cmd(cmd
);
3360 EXPORT_SYMBOL(qlt_xmit_response
);
3362 int qlt_rdy_to_xfer(struct qla_tgt_cmd
*cmd
)
3364 struct ctio7_to_24xx
*pkt
;
3365 struct scsi_qla_host
*vha
= cmd
->vha
;
3366 struct qla_tgt
*tgt
= cmd
->tgt
;
3367 struct qla_tgt_prm prm
;
3368 unsigned long flags
= 0;
3370 struct qla_qpair
*qpair
= cmd
->qpair
;
3372 memset(&prm
, 0, sizeof(prm
));
3378 /* Calculate number of entries and segments required */
3379 if (qlt_pci_map_calc_cnt(&prm
) != 0)
3382 if (!qpair
->fw_started
|| (cmd
->reset_count
!= qpair
->chip_reset
) ||
3383 (cmd
->sess
&& cmd
->sess
->deleted
)) {
3385 * Either the port is not online or this request was from
3386 * previous life, just abort the processing.
3389 cmd
->write_data_transferred
= 0;
3390 cmd
->state
= QLA_TGT_STATE_DATA_IN
;
3391 vha
->hw
->tgt
.tgt_ops
->handle_data(cmd
);
3392 ql_dbg_qp(ql_dbg_async
, qpair
, 0xe102,
3393 "RESET-XFR online/active/old-count/new-count = %d/%d/%d/%d.\n",
3394 vha
->flags
.online
, qla2x00_reset_active(vha
),
3395 cmd
->reset_count
, qpair
->chip_reset
);
3399 spin_lock_irqsave(qpair
->qp_lock_ptr
, flags
);
3400 /* Does F/W have an IOCBs for this request */
3401 res
= qlt_check_reserve_free_req(qpair
, prm
.req_cnt
);
3403 goto out_unlock_free_unmap
;
3404 if (cmd
->se_cmd
.prot_op
)
3405 res
= qlt_build_ctio_crc2_pkt(qpair
, &prm
);
3407 res
= qlt_24xx_build_ctio_pkt(qpair
, &prm
);
3409 if (unlikely(res
!= 0)) {
3410 qpair
->req
->cnt
+= prm
.req_cnt
;
3411 goto out_unlock_free_unmap
;
3414 pkt
= (struct ctio7_to_24xx
*)prm
.pkt
;
3415 pkt
->u
.status0
.flags
|= cpu_to_le16(CTIO7_FLAGS_DATA_OUT
|
3416 CTIO7_FLAGS_STATUS_MODE_0
);
3418 if (cmd
->se_cmd
.prot_op
== TARGET_PROT_NORMAL
)
3419 qlt_load_data_segments(&prm
);
3421 cmd
->state
= QLA_TGT_STATE_NEED_DATA
;
3422 cmd
->cmd_sent_to_fw
= 1;
3423 cmd
->ctio_flags
= le16_to_cpu(pkt
->u
.status0
.flags
);
3425 /* Memory Barrier */
3427 if (qpair
->reqq_start_iocbs
)
3428 qpair
->reqq_start_iocbs(qpair
);
3430 qla2x00_start_iocbs(vha
, qpair
->req
);
3431 spin_unlock_irqrestore(qpair
->qp_lock_ptr
, flags
);
3435 out_unlock_free_unmap
:
3436 qlt_unmap_sg(vha
, cmd
);
3437 spin_unlock_irqrestore(qpair
->qp_lock_ptr
, flags
);
3441 EXPORT_SYMBOL(qlt_rdy_to_xfer
);
3445 * it is assumed either hardware_lock or qpair lock is held.
3448 qlt_handle_dif_error(struct qla_qpair
*qpair
, struct qla_tgt_cmd
*cmd
,
3449 struct ctio_crc_from_fw
*sts
)
3451 uint8_t *ap
= &sts
->actual_dif
[0];
3452 uint8_t *ep
= &sts
->expected_dif
[0];
3453 uint64_t lba
= cmd
->se_cmd
.t_task_lba
;
3454 uint8_t scsi_status
, sense_key
, asc
, ascq
;
3455 unsigned long flags
;
3456 struct scsi_qla_host
*vha
= cmd
->vha
;
3458 cmd
->trc_flags
|= TRC_DIF_ERR
;
3460 cmd
->a_guard
= get_unaligned_be16(ap
+ 0);
3461 cmd
->a_app_tag
= get_unaligned_be16(ap
+ 2);
3462 cmd
->a_ref_tag
= get_unaligned_be32(ap
+ 4);
3464 cmd
->e_guard
= get_unaligned_be16(ep
+ 0);
3465 cmd
->e_app_tag
= get_unaligned_be16(ep
+ 2);
3466 cmd
->e_ref_tag
= get_unaligned_be32(ep
+ 4);
3468 ql_dbg(ql_dbg_tgt_dif
, vha
, 0xf075,
3469 "%s: aborted %d state %d\n", __func__
, cmd
->aborted
, cmd
->state
);
3471 scsi_status
= sense_key
= asc
= ascq
= 0;
3473 /* check appl tag */
3474 if (cmd
->e_app_tag
!= cmd
->a_app_tag
) {
3475 ql_dbg(ql_dbg_tgt_dif
, vha
, 0xe00d,
3476 "App Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard [%x|%x] cmd=%p ox_id[%04x]",
3477 cmd
->cdb
[0], lba
, (lba
+cmd
->num_blks
), cmd
->num_blks
,
3478 cmd
->a_ref_tag
, cmd
->e_ref_tag
, cmd
->a_app_tag
,
3479 cmd
->e_app_tag
, cmd
->a_guard
, cmd
->e_guard
, cmd
,
3480 cmd
->atio
.u
.isp24
.fcp_hdr
.ox_id
);
3482 cmd
->dif_err_code
= DIF_ERR_APP
;
3483 scsi_status
= SAM_STAT_CHECK_CONDITION
;
3484 sense_key
= ABORTED_COMMAND
;
3490 if (cmd
->e_ref_tag
!= cmd
->a_ref_tag
) {
3491 ql_dbg(ql_dbg_tgt_dif
, vha
, 0xe00e,
3492 "Ref Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard[%x|%x] cmd=%p ox_id[%04x] ",
3493 cmd
->cdb
[0], lba
, (lba
+cmd
->num_blks
), cmd
->num_blks
,
3494 cmd
->a_ref_tag
, cmd
->e_ref_tag
, cmd
->a_app_tag
,
3495 cmd
->e_app_tag
, cmd
->a_guard
, cmd
->e_guard
, cmd
,
3496 cmd
->atio
.u
.isp24
.fcp_hdr
.ox_id
);
3498 cmd
->dif_err_code
= DIF_ERR_REF
;
3499 scsi_status
= SAM_STAT_CHECK_CONDITION
;
3500 sense_key
= ABORTED_COMMAND
;
3507 if (cmd
->e_guard
!= cmd
->a_guard
) {
3508 ql_dbg(ql_dbg_tgt_dif
, vha
, 0xe012,
3509 "Guard ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard [%x|%x] cmd=%p ox_id[%04x]",
3510 cmd
->cdb
[0], lba
, (lba
+cmd
->num_blks
), cmd
->num_blks
,
3511 cmd
->a_ref_tag
, cmd
->e_ref_tag
, cmd
->a_app_tag
,
3512 cmd
->e_app_tag
, cmd
->a_guard
, cmd
->e_guard
, cmd
,
3513 cmd
->atio
.u
.isp24
.fcp_hdr
.ox_id
);
3515 cmd
->dif_err_code
= DIF_ERR_GRD
;
3516 scsi_status
= SAM_STAT_CHECK_CONDITION
;
3517 sense_key
= ABORTED_COMMAND
;
3522 switch (cmd
->state
) {
3523 case QLA_TGT_STATE_NEED_DATA
:
3524 /* handle_data will load DIF error code */
3525 cmd
->state
= QLA_TGT_STATE_DATA_IN
;
3526 vha
->hw
->tgt
.tgt_ops
->handle_data(cmd
);
3529 spin_lock_irqsave(&cmd
->cmd_lock
, flags
);
3531 spin_unlock_irqrestore(&cmd
->cmd_lock
, flags
);
3532 vha
->hw
->tgt
.tgt_ops
->free_cmd(cmd
);
3535 spin_unlock_irqrestore(&cmd
->cmd_lock
, flags
);
3537 qlt_send_resp_ctio(qpair
, cmd
, scsi_status
, sense_key
, asc
,
3539 /* assume scsi status gets out on the wire.
3540 * Will not wait for completion.
3542 vha
->hw
->tgt
.tgt_ops
->free_cmd(cmd
);
3547 /* If hardware_lock held on entry, might drop it, then reaquire */
3548 /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
3549 static int __qlt_send_term_imm_notif(struct scsi_qla_host
*vha
,
3550 struct imm_ntfy_from_isp
*ntfy
)
3552 struct nack_to_isp
*nack
;
3553 struct qla_hw_data
*ha
= vha
->hw
;
3557 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0xe01c,
3558 "Sending TERM ELS CTIO (ha=%p)\n", ha
);
3560 pkt
= (request_t
*)qla2x00_alloc_iocbs(vha
, NULL
);
3562 ql_dbg(ql_dbg_tgt
, vha
, 0xe080,
3563 "qla_target(%d): %s failed: unable to allocate "
3564 "request packet\n", vha
->vp_idx
, __func__
);
3568 pkt
->entry_type
= NOTIFY_ACK_TYPE
;
3569 pkt
->entry_count
= 1;
3570 pkt
->handle
= QLA_TGT_SKIP_HANDLE
;
3572 nack
= (struct nack_to_isp
*)pkt
;
3573 nack
->ox_id
= ntfy
->ox_id
;
3575 nack
->u
.isp24
.nport_handle
= ntfy
->u
.isp24
.nport_handle
;
3576 if (le16_to_cpu(ntfy
->u
.isp24
.status
) == IMM_NTFY_ELS
) {
3577 nack
->u
.isp24
.flags
= ntfy
->u
.isp24
.flags
&
3578 __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB
);
3582 nack
->u
.isp24
.flags
|=
3583 __constant_cpu_to_le16(NOTIFY_ACK_FLAGS_TERMINATE
);
3585 nack
->u
.isp24
.srr_rx_id
= ntfy
->u
.isp24
.srr_rx_id
;
3586 nack
->u
.isp24
.status
= ntfy
->u
.isp24
.status
;
3587 nack
->u
.isp24
.status_subcode
= ntfy
->u
.isp24
.status_subcode
;
3588 nack
->u
.isp24
.fw_handle
= ntfy
->u
.isp24
.fw_handle
;
3589 nack
->u
.isp24
.exchange_address
= ntfy
->u
.isp24
.exchange_address
;
3590 nack
->u
.isp24
.srr_rel_offs
= ntfy
->u
.isp24
.srr_rel_offs
;
3591 nack
->u
.isp24
.srr_ui
= ntfy
->u
.isp24
.srr_ui
;
3592 nack
->u
.isp24
.vp_index
= ntfy
->u
.isp24
.vp_index
;
3594 qla2x00_start_iocbs(vha
, vha
->req
);
3598 static void qlt_send_term_imm_notif(struct scsi_qla_host
*vha
,
3599 struct imm_ntfy_from_isp
*imm
, int ha_locked
)
3603 WARN_ON_ONCE(!ha_locked
);
3604 rc
= __qlt_send_term_imm_notif(vha
, imm
);
3605 pr_debug("rc = %d\n", rc
);
3609 * If hardware_lock held on entry, might drop it, then reaquire
3610 * This function sends the appropriate CTIO to ISP 2xxx or 24xx
3612 static int __qlt_send_term_exchange(struct qla_qpair
*qpair
,
3613 struct qla_tgt_cmd
*cmd
,
3614 struct atio_from_isp
*atio
)
3616 struct scsi_qla_host
*vha
= qpair
->vha
;
3617 struct ctio7_to_24xx
*ctio24
;
3618 struct qla_hw_data
*ha
= vha
->hw
;
3623 ql_dbg(ql_dbg_tgt
, vha
, 0xe009, "Sending TERM EXCH CTIO (ha=%p)\n", ha
);
3628 pkt
= (request_t
*)qla2x00_alloc_iocbs_ready(qpair
, NULL
);
3630 ql_dbg(ql_dbg_tgt
, vha
, 0xe050,
3631 "qla_target(%d): %s failed: unable to allocate "
3632 "request packet\n", vha
->vp_idx
, __func__
);
3637 if (cmd
->state
< QLA_TGT_STATE_PROCESSED
) {
3638 ql_dbg(ql_dbg_tgt
, vha
, 0xe051,
3639 "qla_target(%d): Terminating cmd %p with "
3640 "incorrect state %d\n", vha
->vp_idx
, cmd
,
3646 qpair
->tgt_counters
.num_term_xchg_sent
++;
3647 pkt
->entry_count
= 1;
3648 pkt
->handle
= QLA_TGT_SKIP_HANDLE
| CTIO_COMPLETION_HANDLE_MARK
;
3650 ctio24
= (struct ctio7_to_24xx
*)pkt
;
3651 ctio24
->entry_type
= CTIO_TYPE7
;
3652 ctio24
->nport_handle
= CTIO7_NHANDLE_UNRECOGNIZED
;
3653 ctio24
->timeout
= cpu_to_le16(QLA_TGT_TIMEOUT
);
3654 ctio24
->vp_index
= vha
->vp_idx
;
3655 ctio24
->initiator_id
= be_id_to_le(atio
->u
.isp24
.fcp_hdr
.s_id
);
3656 ctio24
->exchange_addr
= atio
->u
.isp24
.exchange_addr
;
3657 temp
= (atio
->u
.isp24
.attr
<< 9) | CTIO7_FLAGS_STATUS_MODE_1
|
3658 CTIO7_FLAGS_TERMINATE
;
3659 ctio24
->u
.status1
.flags
= cpu_to_le16(temp
);
3660 temp
= be16_to_cpu(atio
->u
.isp24
.fcp_hdr
.ox_id
);
3661 ctio24
->u
.status1
.ox_id
= cpu_to_le16(temp
);
3663 /* Memory Barrier */
3665 if (qpair
->reqq_start_iocbs
)
3666 qpair
->reqq_start_iocbs(qpair
);
3668 qla2x00_start_iocbs(vha
, qpair
->req
);
3672 static void qlt_send_term_exchange(struct qla_qpair
*qpair
,
3673 struct qla_tgt_cmd
*cmd
, struct atio_from_isp
*atio
, int ha_locked
,
3676 struct scsi_qla_host
*vha
;
3677 unsigned long flags
= 0;
3680 /* why use different vha? NPIV */
3687 rc
= __qlt_send_term_exchange(qpair
, cmd
, atio
);
3689 qlt_alloc_qfull_cmd(vha
, atio
, 0, 0);
3692 spin_lock_irqsave(qpair
->qp_lock_ptr
, flags
);
3693 rc
= __qlt_send_term_exchange(qpair
, cmd
, atio
);
3695 qlt_alloc_qfull_cmd(vha
, atio
, 0, 0);
3698 if (cmd
&& !ul_abort
&& !cmd
->aborted
) {
3700 qlt_unmap_sg(vha
, cmd
);
3701 vha
->hw
->tgt
.tgt_ops
->free_cmd(cmd
);
3705 spin_unlock_irqrestore(qpair
->qp_lock_ptr
, flags
);
3710 static void qlt_init_term_exchange(struct scsi_qla_host
*vha
)
3712 struct list_head free_list
;
3713 struct qla_tgt_cmd
*cmd
, *tcmd
;
3715 vha
->hw
->tgt
.leak_exchg_thresh_hold
=
3716 (vha
->hw
->cur_fw_xcb_count
/100) * LEAK_EXCHG_THRESH_HOLD_PERCENT
;
3719 if (!list_empty(&vha
->hw
->tgt
.q_full_list
)) {
3720 INIT_LIST_HEAD(&free_list
);
3721 list_splice_init(&vha
->hw
->tgt
.q_full_list
, &free_list
);
3723 list_for_each_entry_safe(cmd
, tcmd
, &free_list
, cmd_list
) {
3724 list_del(&cmd
->cmd_list
);
3725 /* This cmd was never sent to TCM. There is no need
3726 * to schedule free or call free_cmd
3729 vha
->hw
->tgt
.num_qfull_cmds_alloc
--;
3732 vha
->hw
->tgt
.num_qfull_cmds_dropped
= 0;
3735 static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host
*vha
)
3737 uint32_t total_leaked
;
3739 total_leaked
= vha
->hw
->tgt
.num_qfull_cmds_dropped
;
3741 if (vha
->hw
->tgt
.leak_exchg_thresh_hold
&&
3742 (total_leaked
> vha
->hw
->tgt
.leak_exchg_thresh_hold
)) {
3744 ql_dbg(ql_dbg_tgt
, vha
, 0xe079,
3745 "Chip reset due to exchange starvation: %d/%d.\n",
3746 total_leaked
, vha
->hw
->cur_fw_xcb_count
);
3748 if (IS_P3P_TYPE(vha
->hw
))
3749 set_bit(FCOE_CTX_RESET_NEEDED
, &vha
->dpc_flags
);
3751 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
3752 qla2xxx_wake_dpc(vha
);
3757 int qlt_abort_cmd(struct qla_tgt_cmd
*cmd
)
3759 struct qla_tgt
*tgt
= cmd
->tgt
;
3760 struct scsi_qla_host
*vha
= tgt
->vha
;
3761 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
3762 unsigned long flags
;
3764 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf014,
3765 "qla_target(%d): terminating exchange for aborted cmd=%p "
3766 "(se_cmd=%p, tag=%llu)", vha
->vp_idx
, cmd
, &cmd
->se_cmd
,
3769 spin_lock_irqsave(&cmd
->cmd_lock
, flags
);
3771 spin_unlock_irqrestore(&cmd
->cmd_lock
, flags
);
3773 * It's normal to see 2 calls in this path:
3774 * 1) XFER Rdy completion + CMD_T_ABORT
3775 * 2) TCM TMR - drain_state_list
3777 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf016,
3778 "multiple abort. %p transport_state %x, t_state %x, "
3779 "se_cmd_flags %x\n", cmd
, cmd
->se_cmd
.transport_state
,
3780 cmd
->se_cmd
.t_state
, cmd
->se_cmd
.se_cmd_flags
);
3784 cmd
->trc_flags
|= TRC_ABORT
;
3785 spin_unlock_irqrestore(&cmd
->cmd_lock
, flags
);
3787 qlt_send_term_exchange(cmd
->qpair
, cmd
, &cmd
->atio
, 0, 1);
3790 EXPORT_SYMBOL(qlt_abort_cmd
);
3792 void qlt_free_cmd(struct qla_tgt_cmd
*cmd
)
3794 struct fc_port
*sess
= cmd
->sess
;
3796 ql_dbg(ql_dbg_tgt
, cmd
->vha
, 0xe074,
3797 "%s: se_cmd[%p] ox_id %04x\n",
3798 __func__
, &cmd
->se_cmd
,
3799 be16_to_cpu(cmd
->atio
.u
.isp24
.fcp_hdr
.ox_id
));
3801 BUG_ON(cmd
->cmd_in_wq
);
3804 qlt_unmap_sg(cmd
->vha
, cmd
);
3807 qlt_decr_num_pend_cmds(cmd
->vha
);
3809 BUG_ON(cmd
->sg_mapped
);
3810 cmd
->jiffies_at_free
= get_jiffies_64();
3811 if (unlikely(cmd
->free_sg
))
3814 if (!sess
|| !sess
->se_sess
) {
3818 cmd
->jiffies_at_free
= get_jiffies_64();
3819 target_free_tag(sess
->se_sess
, &cmd
->se_cmd
);
3821 EXPORT_SYMBOL(qlt_free_cmd
);
3824 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3826 static int qlt_term_ctio_exchange(struct qla_qpair
*qpair
, void *ctio
,
3827 struct qla_tgt_cmd
*cmd
, uint32_t status
)
3830 struct scsi_qla_host
*vha
= qpair
->vha
;
3832 if (cmd
->se_cmd
.prot_op
)
3833 ql_dbg(ql_dbg_tgt_dif
, vha
, 0xe013,
3834 "Term DIF cmd: lba[0x%llx|%lld] len[0x%x] "
3835 "se_cmd=%p tag[%x] op %#x/%s",
3837 cmd
->num_blks
, &cmd
->se_cmd
,
3838 cmd
->atio
.u
.isp24
.exchange_addr
,
3839 cmd
->se_cmd
.prot_op
,
3840 prot_op_str(cmd
->se_cmd
.prot_op
));
3843 struct ctio7_from_24xx
*c
= (struct ctio7_from_24xx
*)ctio
;
3846 cpu_to_le16(OF_TERM_EXCH
));
3851 qlt_send_term_exchange(qpair
, cmd
, &cmd
->atio
, 1, 0);
3857 /* ha->hardware_lock supposed to be held on entry */
3858 static void *qlt_ctio_to_cmd(struct scsi_qla_host
*vha
,
3859 struct rsp_que
*rsp
, uint32_t handle
, void *ctio
)
3862 struct req_que
*req
;
3863 int qid
= GET_QID(handle
);
3864 uint32_t h
= handle
& ~QLA_TGT_HANDLE_MASK
;
3866 if (unlikely(h
== QLA_TGT_SKIP_HANDLE
))
3869 if (qid
== rsp
->req
->id
) {
3871 } else if (vha
->hw
->req_q_map
[qid
]) {
3872 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0x1000a,
3873 "qla_target(%d): CTIO completion with different QID %d handle %x\n",
3874 vha
->vp_idx
, rsp
->id
, handle
);
3875 req
= vha
->hw
->req_q_map
[qid
];
3880 h
&= QLA_CMD_HANDLE_MASK
;
3882 if (h
!= QLA_TGT_NULL_HANDLE
) {
3883 if (unlikely(h
>= req
->num_outstanding_cmds
)) {
3884 ql_dbg(ql_dbg_tgt
, vha
, 0xe052,
3885 "qla_target(%d): Wrong handle %x received\n",
3886 vha
->vp_idx
, handle
);
3890 cmd
= (void *) req
->outstanding_cmds
[h
];
3891 if (unlikely(cmd
== NULL
)) {
3892 ql_dbg(ql_dbg_async
, vha
, 0xe053,
3893 "qla_target(%d): Suspicious: unable to find the command with handle %x req->id %d rsp->id %d\n",
3894 vha
->vp_idx
, handle
, req
->id
, rsp
->id
);
3897 req
->outstanding_cmds
[h
] = NULL
;
3898 } else if (ctio
!= NULL
) {
3899 /* We can't get loop ID from CTIO7 */
3900 ql_dbg(ql_dbg_tgt
, vha
, 0xe054,
3901 "qla_target(%d): Wrong CTIO received: QLA24xx doesn't "
3902 "support NULL handles\n", vha
->vp_idx
);
3910 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3912 static void qlt_do_ctio_completion(struct scsi_qla_host
*vha
,
3913 struct rsp_que
*rsp
, uint32_t handle
, uint32_t status
, void *ctio
)
3915 struct qla_hw_data
*ha
= vha
->hw
;
3916 struct se_cmd
*se_cmd
;
3917 struct qla_tgt_cmd
*cmd
;
3918 struct qla_qpair
*qpair
= rsp
->qpair
;
3920 if (handle
& CTIO_INTERMEDIATE_HANDLE_MARK
) {
3921 /* That could happen only in case of an error/reset/abort */
3922 if (status
!= CTIO_SUCCESS
) {
3923 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf01d,
3924 "Intermediate CTIO received"
3925 " (status %x)\n", status
);
3930 cmd
= qlt_ctio_to_cmd(vha
, rsp
, handle
, ctio
);
3934 se_cmd
= &cmd
->se_cmd
;
3935 cmd
->cmd_sent_to_fw
= 0;
3937 qlt_unmap_sg(vha
, cmd
);
3939 if (unlikely(status
!= CTIO_SUCCESS
)) {
3940 switch (status
& 0xFFFF) {
3941 case CTIO_INVALID_RX_ID
:
3942 if (printk_ratelimit())
3943 dev_info(&vha
->hw
->pdev
->dev
,
3944 "qla_target(%d): CTIO with INVALID_RX_ID ATIO attr %x CTIO Flags %x|%x\n",
3945 vha
->vp_idx
, cmd
->atio
.u
.isp24
.attr
,
3946 ((cmd
->ctio_flags
>> 9) & 0xf),
3950 case CTIO_LIP_RESET
:
3951 case CTIO_TARGET_RESET
:
3953 /* driver request abort via Terminate exchange */
3956 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf058,
3957 "qla_target(%d): CTIO with "
3958 "status %#x received, state %x, se_cmd %p, "
3959 "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, "
3960 "TIMEOUT=b, INVALID_RX_ID=8)\n", vha
->vp_idx
,
3961 status
, cmd
->state
, se_cmd
);
3964 case CTIO_PORT_LOGGED_OUT
:
3965 case CTIO_PORT_UNAVAILABLE
:
3968 (status
& 0xFFFF) == CTIO_PORT_LOGGED_OUT
;
3970 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf059,
3971 "qla_target(%d): CTIO with %s status %x "
3972 "received (state %x, se_cmd %p)\n", vha
->vp_idx
,
3973 logged_out
? "PORT LOGGED OUT" : "PORT UNAVAILABLE",
3974 status
, cmd
->state
, se_cmd
);
3976 if (logged_out
&& cmd
->sess
) {
3978 * Session is already logged out, but we need
3979 * to notify initiator, who's not aware of this
3981 cmd
->sess
->send_els_logo
= 1;
3982 ql_dbg(ql_dbg_disc
, vha
, 0x20f8,
3983 "%s %d %8phC post del sess\n",
3984 __func__
, __LINE__
, cmd
->sess
->port_name
);
3986 qlt_schedule_sess_for_deletion(cmd
->sess
);
3990 case CTIO_DIF_ERROR
: {
3991 struct ctio_crc_from_fw
*crc
=
3992 (struct ctio_crc_from_fw
*)ctio
;
3993 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf073,
3994 "qla_target(%d): CTIO with DIF_ERROR status %x "
3995 "received (state %x, ulp_cmd %p) actual_dif[0x%llx] "
3996 "expect_dif[0x%llx]\n",
3997 vha
->vp_idx
, status
, cmd
->state
, se_cmd
,
3998 *((u64
*)&crc
->actual_dif
[0]),
3999 *((u64
*)&crc
->expected_dif
[0]));
4001 qlt_handle_dif_error(qpair
, cmd
, ctio
);
4005 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf05b,
4006 "qla_target(%d): CTIO with error status 0x%x received (state %x, se_cmd %p\n",
4007 vha
->vp_idx
, status
, cmd
->state
, se_cmd
);
4012 /* "cmd->aborted" means
4013 * cmd is already aborted/terminated, we don't
4014 * need to terminate again. The exchange is already
4015 * cleaned up/freed at FW level. Just cleanup at driver
4018 if ((cmd
->state
!= QLA_TGT_STATE_NEED_DATA
) &&
4020 cmd
->trc_flags
|= TRC_CTIO_ERR
;
4021 if (qlt_term_ctio_exchange(qpair
, ctio
, cmd
, status
))
4026 if (cmd
->state
== QLA_TGT_STATE_PROCESSED
) {
4027 cmd
->trc_flags
|= TRC_CTIO_DONE
;
4028 } else if (cmd
->state
== QLA_TGT_STATE_NEED_DATA
) {
4029 cmd
->state
= QLA_TGT_STATE_DATA_IN
;
4031 if (status
== CTIO_SUCCESS
)
4032 cmd
->write_data_transferred
= 1;
4034 ha
->tgt
.tgt_ops
->handle_data(cmd
);
4036 } else if (cmd
->aborted
) {
4037 cmd
->trc_flags
|= TRC_CTIO_ABORTED
;
4038 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf01e,
4039 "Aborted command %p (tag %lld) finished\n", cmd
, se_cmd
->tag
);
4041 cmd
->trc_flags
|= TRC_CTIO_STRANGE
;
4042 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf05c,
4043 "qla_target(%d): A command in state (%d) should "
4044 "not return a CTIO complete\n", vha
->vp_idx
, cmd
->state
);
4047 if (unlikely(status
!= CTIO_SUCCESS
) &&
4049 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf01f, "Finishing failed CTIO\n");
4053 ha
->tgt
.tgt_ops
->free_cmd(cmd
);
4056 static inline int qlt_get_fcp_task_attr(struct scsi_qla_host
*vha
,
4061 switch (task_codes
) {
4062 case ATIO_SIMPLE_QUEUE
:
4063 fcp_task_attr
= TCM_SIMPLE_TAG
;
4065 case ATIO_HEAD_OF_QUEUE
:
4066 fcp_task_attr
= TCM_HEAD_TAG
;
4068 case ATIO_ORDERED_QUEUE
:
4069 fcp_task_attr
= TCM_ORDERED_TAG
;
4071 case ATIO_ACA_QUEUE
:
4072 fcp_task_attr
= TCM_ACA_TAG
;
4075 fcp_task_attr
= TCM_SIMPLE_TAG
;
4078 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf05d,
4079 "qla_target: unknown task code %x, use ORDERED instead\n",
4081 fcp_task_attr
= TCM_ORDERED_TAG
;
4085 return fcp_task_attr
;
4089 * Process context for I/O path into tcm_qla2xxx code
4091 static void __qlt_do_work(struct qla_tgt_cmd
*cmd
)
4093 scsi_qla_host_t
*vha
= cmd
->vha
;
4094 struct qla_hw_data
*ha
= vha
->hw
;
4095 struct fc_port
*sess
= cmd
->sess
;
4096 struct atio_from_isp
*atio
= &cmd
->atio
;
4098 unsigned long flags
;
4099 uint32_t data_length
;
4100 int ret
, fcp_task_attr
, data_dir
, bidi
= 0;
4101 struct qla_qpair
*qpair
= cmd
->qpair
;
4104 cmd
->trc_flags
|= TRC_DO_WORK
;
4107 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf082,
4108 "cmd with tag %u is aborted\n",
4109 cmd
->atio
.u
.isp24
.exchange_addr
);
4113 spin_lock_init(&cmd
->cmd_lock
);
4114 cdb
= &atio
->u
.isp24
.fcp_cmnd
.cdb
[0];
4115 cmd
->se_cmd
.tag
= atio
->u
.isp24
.exchange_addr
;
4117 if (atio
->u
.isp24
.fcp_cmnd
.rddata
&&
4118 atio
->u
.isp24
.fcp_cmnd
.wrdata
) {
4120 data_dir
= DMA_TO_DEVICE
;
4121 } else if (atio
->u
.isp24
.fcp_cmnd
.rddata
)
4122 data_dir
= DMA_FROM_DEVICE
;
4123 else if (atio
->u
.isp24
.fcp_cmnd
.wrdata
)
4124 data_dir
= DMA_TO_DEVICE
;
4126 data_dir
= DMA_NONE
;
4128 fcp_task_attr
= qlt_get_fcp_task_attr(vha
,
4129 atio
->u
.isp24
.fcp_cmnd
.task_attr
);
4130 data_length
= get_datalen_for_atio(atio
);
4132 ret
= ha
->tgt
.tgt_ops
->handle_cmd(vha
, cmd
, cdb
, data_length
,
4133 fcp_task_attr
, data_dir
, bidi
);
4137 * Drop extra session reference from qlt_handle_cmd_for_atio().
4139 ha
->tgt
.tgt_ops
->put_sess(sess
);
4143 ql_dbg(ql_dbg_io
, vha
, 0x3060, "Terminating work cmd %p", cmd
);
4145 * cmd has not sent to target yet, so pass NULL as the second
4146 * argument to qlt_send_term_exchange() and free the memory here.
4148 cmd
->trc_flags
|= TRC_DO_WORK_ERR
;
4149 spin_lock_irqsave(qpair
->qp_lock_ptr
, flags
);
4150 qlt_send_term_exchange(qpair
, NULL
, &cmd
->atio
, 1, 0);
4152 qlt_decr_num_pend_cmds(vha
);
4153 target_free_tag(sess
->se_sess
, &cmd
->se_cmd
);
4154 spin_unlock_irqrestore(qpair
->qp_lock_ptr
, flags
);
4156 ha
->tgt
.tgt_ops
->put_sess(sess
);
4159 static void qlt_do_work(struct work_struct
*work
)
4161 struct qla_tgt_cmd
*cmd
= container_of(work
, struct qla_tgt_cmd
, work
);
4162 scsi_qla_host_t
*vha
= cmd
->vha
;
4163 unsigned long flags
;
4165 spin_lock_irqsave(&vha
->cmd_list_lock
, flags
);
4166 list_del(&cmd
->cmd_list
);
4167 spin_unlock_irqrestore(&vha
->cmd_list_lock
, flags
);
4172 void qlt_clr_qp_table(struct scsi_qla_host
*vha
)
4174 unsigned long flags
;
4175 struct qla_hw_data
*ha
= vha
->hw
;
4176 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
4180 ql_log(ql_log_info
, vha
, 0x706c,
4181 "User update Number of Active Qpairs %d\n",
4182 ha
->tgt
.num_act_qpairs
);
4184 spin_lock_irqsave(&ha
->tgt
.atio_lock
, flags
);
4186 btree_for_each_safe64(&tgt
->lun_qpair_map
, key
, node
)
4187 btree_remove64(&tgt
->lun_qpair_map
, key
);
4189 ha
->base_qpair
->lun_cnt
= 0;
4190 for (key
= 0; key
< ha
->max_qpairs
; key
++)
4191 if (ha
->queue_pair_map
[key
])
4192 ha
->queue_pair_map
[key
]->lun_cnt
= 0;
4194 spin_unlock_irqrestore(&ha
->tgt
.atio_lock
, flags
);
4197 static void qlt_assign_qpair(struct scsi_qla_host
*vha
,
4198 struct qla_tgt_cmd
*cmd
)
4200 struct qla_qpair
*qpair
, *qp
;
4201 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
4202 struct qla_qpair_hint
*h
;
4204 if (vha
->flags
.qpairs_available
) {
4205 h
= btree_lookup64(&tgt
->lun_qpair_map
, cmd
->unpacked_lun
);
4207 /* spread lun to qpair ratio evently */
4209 struct scsi_qla_host
*base_vha
=
4210 pci_get_drvdata(vha
->hw
->pdev
);
4212 qpair
= vha
->hw
->base_qpair
;
4213 if (qpair
->lun_cnt
== 0) {
4215 h
= qla_qpair_to_hint(tgt
, qpair
);
4217 rc
= btree_insert64(&tgt
->lun_qpair_map
,
4218 cmd
->unpacked_lun
, h
, GFP_ATOMIC
);
4221 ql_log(ql_log_info
, vha
, 0xd037,
4222 "Unable to insert lun %llx into lun_qpair_map\n",
4227 lcnt
= qpair
->lun_cnt
;
4231 list_for_each_entry(qp
, &base_vha
->qp_list
,
4233 if (qp
->lun_cnt
== 0) {
4235 h
= qla_qpair_to_hint(tgt
, qp
);
4237 rc
= btree_insert64(&tgt
->lun_qpair_map
,
4238 cmd
->unpacked_lun
, h
, GFP_ATOMIC
);
4241 ql_log(ql_log_info
, vha
, 0xd038,
4242 "Unable to insert lun %llx into lun_qpair_map\n",
4248 if (qp
->lun_cnt
< lcnt
) {
4257 h
= qla_qpair_to_hint(tgt
, qpair
);
4259 rc
= btree_insert64(&tgt
->lun_qpair_map
,
4260 cmd
->unpacked_lun
, h
, GFP_ATOMIC
);
4263 ql_log(ql_log_info
, vha
, 0xd039,
4264 "Unable to insert lun %llx into lun_qpair_map\n",
4269 h
= &tgt
->qphints
[0];
4272 cmd
->qpair
= h
->qpair
;
4273 cmd
->se_cmd
.cpuid
= h
->cpuid
;
4276 static struct qla_tgt_cmd
*qlt_get_tag(scsi_qla_host_t
*vha
,
4277 struct fc_port
*sess
,
4278 struct atio_from_isp
*atio
)
4280 struct se_session
*se_sess
= sess
->se_sess
;
4281 struct qla_tgt_cmd
*cmd
;
4284 tag
= sbitmap_queue_get(&se_sess
->sess_tag_pool
, &cpu
);
4288 cmd
= &((struct qla_tgt_cmd
*)se_sess
->sess_cmd_map
)[tag
];
4289 memset(cmd
, 0, sizeof(struct qla_tgt_cmd
));
4290 cmd
->cmd_type
= TYPE_TGT_CMD
;
4291 memcpy(&cmd
->atio
, atio
, sizeof(*atio
));
4292 cmd
->state
= QLA_TGT_STATE_NEW
;
4293 cmd
->tgt
= vha
->vha_tgt
.qla_tgt
;
4294 qlt_incr_num_pend_cmds(vha
);
4296 cmd
->se_cmd
.map_tag
= tag
;
4297 cmd
->se_cmd
.map_cpu
= cpu
;
4299 cmd
->loop_id
= sess
->loop_id
;
4300 cmd
->conf_compl_supported
= sess
->conf_compl_supported
;
4303 cmd
->jiffies_at_alloc
= get_jiffies_64();
4305 cmd
->unpacked_lun
= scsilun_to_int(
4306 (struct scsi_lun
*)&atio
->u
.isp24
.fcp_cmnd
.lun
);
4307 qlt_assign_qpair(vha
, cmd
);
4308 cmd
->reset_count
= vha
->hw
->base_qpair
->chip_reset
;
4309 cmd
->vp_idx
= vha
->vp_idx
;
4314 /* ha->hardware_lock supposed to be held on entry */
4315 static int qlt_handle_cmd_for_atio(struct scsi_qla_host
*vha
,
4316 struct atio_from_isp
*atio
)
4318 struct qla_hw_data
*ha
= vha
->hw
;
4319 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
4320 struct fc_port
*sess
;
4321 struct qla_tgt_cmd
*cmd
;
4322 unsigned long flags
;
4325 if (unlikely(tgt
->tgt_stop
)) {
4326 ql_dbg(ql_dbg_io
, vha
, 0x3061,
4327 "New command while device %p is shutting down\n", tgt
);
4331 id
= be_to_port_id(atio
->u
.isp24
.fcp_hdr
.s_id
);
4332 if (IS_SW_RESV_ADDR(id
))
4335 sess
= ha
->tgt
.tgt_ops
->find_sess_by_s_id(vha
, atio
->u
.isp24
.fcp_hdr
.s_id
);
4336 if (unlikely(!sess
))
4339 /* Another WWN used to have our s_id. Our PLOGI scheduled its
4340 * session deletion, but it's still in sess_del_work wq */
4341 if (sess
->deleted
) {
4342 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf002,
4343 "New command while old session %p is being deleted\n",
4349 * Do kref_get() before returning + dropping qla_hw_data->hardware_lock.
4351 if (!kref_get_unless_zero(&sess
->sess_kref
)) {
4352 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf004,
4353 "%s: kref_get fail, %8phC oxid %x \n",
4354 __func__
, sess
->port_name
,
4355 be16_to_cpu(atio
->u
.isp24
.fcp_hdr
.ox_id
));
4359 cmd
= qlt_get_tag(vha
, sess
, atio
);
4361 ql_dbg(ql_dbg_io
, vha
, 0x3062,
4362 "qla_target(%d): Allocation of cmd failed\n", vha
->vp_idx
);
4363 ha
->tgt
.tgt_ops
->put_sess(sess
);
4368 cmd
->trc_flags
|= TRC_NEW_CMD
;
4370 spin_lock_irqsave(&vha
->cmd_list_lock
, flags
);
4371 list_add_tail(&cmd
->cmd_list
, &vha
->qla_cmd_list
);
4372 spin_unlock_irqrestore(&vha
->cmd_list_lock
, flags
);
4374 INIT_WORK(&cmd
->work
, qlt_do_work
);
4375 if (vha
->flags
.qpairs_available
) {
4376 queue_work_on(cmd
->se_cmd
.cpuid
, qla_tgt_wq
, &cmd
->work
);
4377 } else if (ha
->msix_count
) {
4378 if (cmd
->atio
.u
.isp24
.fcp_cmnd
.rddata
)
4379 queue_work_on(smp_processor_id(), qla_tgt_wq
,
4382 queue_work_on(cmd
->se_cmd
.cpuid
, qla_tgt_wq
,
4385 queue_work(qla_tgt_wq
, &cmd
->work
);
4391 /* ha->hardware_lock supposed to be held on entry */
4392 static int qlt_issue_task_mgmt(struct fc_port
*sess
, u64 lun
,
4393 int fn
, void *iocb
, int flags
)
4395 struct scsi_qla_host
*vha
= sess
->vha
;
4396 struct qla_hw_data
*ha
= vha
->hw
;
4397 struct qla_tgt_mgmt_cmd
*mcmd
;
4398 struct atio_from_isp
*a
= (struct atio_from_isp
*)iocb
;
4399 struct qla_qpair_hint
*h
= &vha
->vha_tgt
.qla_tgt
->qphints
[0];
4401 mcmd
= mempool_alloc(qla_tgt_mgmt_cmd_mempool
, GFP_ATOMIC
);
4403 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0x10009,
4404 "qla_target(%d): Allocation of management "
4405 "command failed, some commands and their data could "
4406 "leak\n", vha
->vp_idx
);
4409 memset(mcmd
, 0, sizeof(*mcmd
));
4413 memcpy(&mcmd
->orig_iocb
.imm_ntfy
, iocb
,
4414 sizeof(mcmd
->orig_iocb
.imm_ntfy
));
4416 mcmd
->tmr_func
= fn
;
4417 mcmd
->flags
= flags
;
4418 mcmd
->reset_count
= ha
->base_qpair
->chip_reset
;
4419 mcmd
->qpair
= h
->qpair
;
4421 mcmd
->se_cmd
.cpuid
= h
->cpuid
;
4422 mcmd
->unpacked_lun
= lun
;
4425 case QLA_TGT_LUN_RESET
:
4426 case QLA_TGT_CLEAR_TS
:
4427 case QLA_TGT_ABORT_TS
:
4428 abort_cmds_for_lun(vha
, lun
, a
->u
.isp24
.fcp_hdr
.s_id
);
4430 case QLA_TGT_CLEAR_ACA
:
4431 h
= qlt_find_qphint(vha
, mcmd
->unpacked_lun
);
4432 mcmd
->qpair
= h
->qpair
;
4433 mcmd
->se_cmd
.cpuid
= h
->cpuid
;
4436 case QLA_TGT_TARGET_RESET
:
4437 case QLA_TGT_NEXUS_LOSS_SESS
:
4438 case QLA_TGT_NEXUS_LOSS
:
4439 case QLA_TGT_ABORT_ALL
:
4445 INIT_WORK(&mcmd
->work
, qlt_do_tmr_work
);
4446 queue_work_on(mcmd
->se_cmd
.cpuid
, qla_tgt_wq
,
4452 /* ha->hardware_lock supposed to be held on entry */
4453 static int qlt_handle_task_mgmt(struct scsi_qla_host
*vha
, void *iocb
)
4455 struct atio_from_isp
*a
= (struct atio_from_isp
*)iocb
;
4456 struct qla_hw_data
*ha
= vha
->hw
;
4457 struct fc_port
*sess
;
4460 unsigned long flags
;
4462 fn
= a
->u
.isp24
.fcp_cmnd
.task_mgmt_flags
;
4464 spin_lock_irqsave(&ha
->tgt
.sess_lock
, flags
);
4465 sess
= ha
->tgt
.tgt_ops
->find_sess_by_s_id(vha
,
4466 a
->u
.isp24
.fcp_hdr
.s_id
);
4467 spin_unlock_irqrestore(&ha
->tgt
.sess_lock
, flags
);
4470 scsilun_to_int((struct scsi_lun
*)&a
->u
.isp24
.fcp_cmnd
.lun
);
4472 if (sess
== NULL
|| sess
->deleted
)
4475 return qlt_issue_task_mgmt(sess
, unpacked_lun
, fn
, iocb
, 0);
4478 /* ha->hardware_lock supposed to be held on entry */
4479 static int __qlt_abort_task(struct scsi_qla_host
*vha
,
4480 struct imm_ntfy_from_isp
*iocb
, struct fc_port
*sess
)
4482 struct atio_from_isp
*a
= (struct atio_from_isp
*)iocb
;
4483 struct qla_hw_data
*ha
= vha
->hw
;
4484 struct qla_tgt_mgmt_cmd
*mcmd
;
4488 mcmd
= mempool_alloc(qla_tgt_mgmt_cmd_mempool
, GFP_ATOMIC
);
4490 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf05f,
4491 "qla_target(%d): %s: Allocation of ABORT cmd failed\n",
4492 vha
->vp_idx
, __func__
);
4495 memset(mcmd
, 0, sizeof(*mcmd
));
4498 memcpy(&mcmd
->orig_iocb
.imm_ntfy
, iocb
,
4499 sizeof(mcmd
->orig_iocb
.imm_ntfy
));
4502 scsilun_to_int((struct scsi_lun
*)&a
->u
.isp24
.fcp_cmnd
.lun
);
4503 mcmd
->reset_count
= ha
->base_qpair
->chip_reset
;
4504 mcmd
->tmr_func
= QLA_TGT_2G_ABORT_TASK
;
4505 mcmd
->qpair
= ha
->base_qpair
;
4507 rc
= ha
->tgt
.tgt_ops
->handle_tmr(mcmd
, unpacked_lun
, mcmd
->tmr_func
,
4508 le16_to_cpu(iocb
->u
.isp2x
.seq_id
));
4510 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf060,
4511 "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n",
4513 mempool_free(mcmd
, qla_tgt_mgmt_cmd_mempool
);
4520 /* ha->hardware_lock supposed to be held on entry */
4521 static int qlt_abort_task(struct scsi_qla_host
*vha
,
4522 struct imm_ntfy_from_isp
*iocb
)
4524 struct qla_hw_data
*ha
= vha
->hw
;
4525 struct fc_port
*sess
;
4527 unsigned long flags
;
4529 loop_id
= GET_TARGET_ID(ha
, (struct atio_from_isp
*)iocb
);
4531 spin_lock_irqsave(&ha
->tgt
.sess_lock
, flags
);
4532 sess
= ha
->tgt
.tgt_ops
->find_sess_by_loop_id(vha
, loop_id
);
4533 spin_unlock_irqrestore(&ha
->tgt
.sess_lock
, flags
);
4536 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf025,
4537 "qla_target(%d): task abort for unexisting "
4538 "session\n", vha
->vp_idx
);
4539 return qlt_sched_sess_work(vha
->vha_tgt
.qla_tgt
,
4540 QLA_TGT_SESS_WORK_ABORT
, iocb
, sizeof(*iocb
));
4543 return __qlt_abort_task(vha
, iocb
, sess
);
4546 void qlt_logo_completion_handler(fc_port_t
*fcport
, int rc
)
4548 if (rc
!= MBS_COMMAND_COMPLETE
) {
4549 ql_dbg(ql_dbg_tgt_mgt
, fcport
->vha
, 0xf093,
4550 "%s: se_sess %p / sess %p from"
4551 " port %8phC loop_id %#04x s_id %02x:%02x:%02x"
4552 " LOGO failed: %#x\n",
4556 fcport
->port_name
, fcport
->loop_id
,
4557 fcport
->d_id
.b
.domain
, fcport
->d_id
.b
.area
,
4558 fcport
->d_id
.b
.al_pa
, rc
);
4561 fcport
->logout_completed
= 1;
4565 * ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list)
4567 * Schedules sessions with matching port_id/loop_id but different wwn for
4568 * deletion. Returns existing session with matching wwn if present.
4572 qlt_find_sess_invalidate_other(scsi_qla_host_t
*vha
, uint64_t wwn
,
4573 port_id_t port_id
, uint16_t loop_id
, struct fc_port
**conflict_sess
)
4575 struct fc_port
*sess
= NULL
, *other_sess
;
4578 *conflict_sess
= NULL
;
4580 list_for_each_entry(other_sess
, &vha
->vp_fcports
, list
) {
4582 other_wwn
= wwn_to_u64(other_sess
->port_name
);
4584 if (wwn
== other_wwn
) {
4590 /* find other sess with nport_id collision */
4591 if (port_id
.b24
== other_sess
->d_id
.b24
) {
4592 if (loop_id
!= other_sess
->loop_id
) {
4593 ql_dbg(ql_dbg_disc
, vha
, 0x1000c,
4594 "Invalidating sess %p loop_id %d wwn %llx.\n",
4595 other_sess
, other_sess
->loop_id
, other_wwn
);
4598 * logout_on_delete is set by default, but another
4599 * session that has the same s_id/loop_id combo
4600 * might have cleared it when requested this session
4601 * deletion, so don't touch it
4603 qlt_schedule_sess_for_deletion(other_sess
);
4606 * Another wwn used to have our s_id/loop_id
4607 * kill the session, but don't free the loop_id
4609 ql_dbg(ql_dbg_disc
, vha
, 0xf01b,
4610 "Invalidating sess %p loop_id %d wwn %llx.\n",
4611 other_sess
, other_sess
->loop_id
, other_wwn
);
4613 other_sess
->keep_nport_handle
= 1;
4614 if (other_sess
->disc_state
!= DSC_DELETED
)
4615 *conflict_sess
= other_sess
;
4616 qlt_schedule_sess_for_deletion(other_sess
);
4621 /* find other sess with nport handle collision */
4622 if ((loop_id
== other_sess
->loop_id
) &&
4623 (loop_id
!= FC_NO_LOOP_ID
)) {
4624 ql_dbg(ql_dbg_disc
, vha
, 0x1000d,
4625 "Invalidating sess %p loop_id %d wwn %llx.\n",
4626 other_sess
, other_sess
->loop_id
, other_wwn
);
4628 /* Same loop_id but different s_id
4629 * Ok to kill and logout */
4630 qlt_schedule_sess_for_deletion(other_sess
);
4637 /* Abort any commands for this s_id waiting on qla_tgt_wq workqueue */
4638 static int abort_cmds_for_s_id(struct scsi_qla_host
*vha
, port_id_t
*s_id
)
4640 struct qla_tgt_sess_op
*op
;
4641 struct qla_tgt_cmd
*cmd
;
4644 unsigned long flags
;
4646 key
= (((u32
)s_id
->b
.domain
<< 16) |
4647 ((u32
)s_id
->b
.area
<< 8) |
4648 ((u32
)s_id
->b
.al_pa
));
4650 spin_lock_irqsave(&vha
->cmd_list_lock
, flags
);
4651 list_for_each_entry(op
, &vha
->qla_sess_op_cmd_list
, cmd_list
) {
4652 uint32_t op_key
= sid_to_key(op
->atio
.u
.isp24
.fcp_hdr
.s_id
);
4654 if (op_key
== key
) {
4660 list_for_each_entry(op
, &vha
->unknown_atio_list
, cmd_list
) {
4661 uint32_t op_key
= sid_to_key(op
->atio
.u
.isp24
.fcp_hdr
.s_id
);
4663 if (op_key
== key
) {
4669 list_for_each_entry(cmd
, &vha
->qla_cmd_list
, cmd_list
) {
4670 uint32_t cmd_key
= sid_to_key(cmd
->atio
.u
.isp24
.fcp_hdr
.s_id
);
4672 if (cmd_key
== key
) {
4677 spin_unlock_irqrestore(&vha
->cmd_list_lock
, flags
);
4682 static int qlt_handle_login(struct scsi_qla_host
*vha
,
4683 struct imm_ntfy_from_isp
*iocb
)
4685 struct fc_port
*sess
= NULL
, *conflict_sess
= NULL
;
4688 uint16_t loop_id
, wd3_lo
;
4690 struct qlt_plogi_ack_t
*pla
;
4691 unsigned long flags
;
4693 lockdep_assert_held(&vha
->hw
->hardware_lock
);
4695 wwn
= wwn_to_u64(iocb
->u
.isp24
.port_name
);
4697 port_id
.b
.domain
= iocb
->u
.isp24
.port_id
[2];
4698 port_id
.b
.area
= iocb
->u
.isp24
.port_id
[1];
4699 port_id
.b
.al_pa
= iocb
->u
.isp24
.port_id
[0];
4700 port_id
.b
.rsvd_1
= 0;
4702 loop_id
= le16_to_cpu(iocb
->u
.isp24
.nport_handle
);
4704 /* Mark all stale commands sitting in qla_tgt_wq for deletion */
4705 abort_cmds_for_s_id(vha
, &port_id
);
4708 spin_lock_irqsave(&vha
->hw
->tgt
.sess_lock
, flags
);
4709 sess
= qlt_find_sess_invalidate_other(vha
, wwn
,
4710 port_id
, loop_id
, &conflict_sess
);
4711 spin_unlock_irqrestore(&vha
->hw
->tgt
.sess_lock
, flags
);
4713 ql_dbg(ql_dbg_disc
, vha
, 0xffff,
4714 "%s %d Term INOT due to WWN=0 lid=%d, NportID %06X ",
4715 __func__
, __LINE__
, loop_id
, port_id
.b24
);
4716 qlt_send_term_imm_notif(vha
, iocb
, 1);
4720 if (IS_SW_RESV_ADDR(port_id
)) {
4725 pla
= qlt_plogi_ack_find_add(vha
, &port_id
, iocb
);
4727 ql_dbg(ql_dbg_disc
+ ql_dbg_verbose
, vha
, 0xffff,
4728 "%s %d %8phC Term INOT due to mem alloc fail",
4730 iocb
->u
.isp24
.port_name
);
4731 qlt_send_term_imm_notif(vha
, iocb
, 1);
4735 if (conflict_sess
) {
4736 conflict_sess
->login_gen
++;
4737 qlt_plogi_ack_link(vha
, pla
, conflict_sess
,
4738 QLT_PLOGI_LINK_CONFLICT
);
4743 ql_dbg(ql_dbg_disc
, vha
, 0xffff,
4744 "%s %d %8phC post new sess\n",
4745 __func__
, __LINE__
, iocb
->u
.isp24
.port_name
);
4746 if (iocb
->u
.isp24
.status_subcode
== ELS_PLOGI
)
4747 qla24xx_post_newsess_work(vha
, &port_id
,
4748 iocb
->u
.isp24
.port_name
,
4749 iocb
->u
.isp24
.u
.plogi
.node_name
,
4750 pla
, FC4_TYPE_UNKNOWN
);
4752 qla24xx_post_newsess_work(vha
, &port_id
,
4753 iocb
->u
.isp24
.port_name
, NULL
,
4754 pla
, FC4_TYPE_UNKNOWN
);
4759 if (sess
->disc_state
== DSC_UPD_FCPORT
) {
4763 * Remote port registration is still going on from
4764 * previous login. Allow it to finish before we
4765 * accept the new login.
4767 sess
->next_disc_state
= DSC_DELETE_PEND
;
4768 sec
= jiffies_to_msecs(jiffies
-
4769 sess
->jiffies_at_registration
) / 1000;
4770 if (sess
->sec_since_registration
< sec
&& sec
&&
4772 sess
->sec_since_registration
= sec
;
4773 ql_dbg(ql_dbg_disc
, vha
, 0xffff,
4774 "%s %8phC - Slow Rport registration (%d Sec)\n",
4775 __func__
, sess
->port_name
, sec
);
4778 if (!conflict_sess
) {
4779 list_del(&pla
->list
);
4780 kmem_cache_free(qla_tgt_plogi_cachep
, pla
);
4783 qlt_send_term_imm_notif(vha
, iocb
, 1);
4787 qlt_plogi_ack_link(vha
, pla
, sess
, QLT_PLOGI_LINK_SAME_WWN
);
4788 sess
->d_id
= port_id
;
4791 if (iocb
->u
.isp24
.status_subcode
== ELS_PRLI
) {
4792 sess
->fw_login_state
= DSC_LS_PRLI_PEND
;
4794 sess
->loop_id
= loop_id
;
4795 sess
->d_id
= port_id
;
4796 sess
->fw_login_state
= DSC_LS_PRLI_PEND
;
4797 wd3_lo
= le16_to_cpu(iocb
->u
.isp24
.u
.prli
.wd3_lo
);
4800 sess
->conf_compl_supported
= 1;
4802 if ((wd3_lo
& BIT_4
) == 0)
4803 sess
->port_type
= FCT_INITIATOR
;
4805 sess
->port_type
= FCT_TARGET
;
4808 sess
->fw_login_state
= DSC_LS_PLOGI_PEND
;
4811 ql_dbg(ql_dbg_disc
, vha
, 0x20f9,
4812 "%s %d %8phC DS %d\n",
4813 __func__
, __LINE__
, sess
->port_name
, sess
->disc_state
);
4815 switch (sess
->disc_state
) {
4817 case DSC_LOGIN_PEND
:
4818 qlt_plogi_ack_unref(vha
, pla
);
4823 * Under normal circumstances we want to release nport handle
4824 * during LOGO process to avoid nport handle leaks inside FW.
4825 * The exception is when LOGO is done while another PLOGI with
4826 * the same nport handle is waiting as might be the case here.
4827 * Note: there is always a possibily of a race where session
4828 * deletion has already started for other reasons (e.g. ACL
4829 * removal) and now PLOGI arrives:
4830 * 1. if PLOGI arrived in FW after nport handle has been freed,
4831 * FW must have assigned this PLOGI a new/same handle and we
4832 * can proceed ACK'ing it as usual when session deletion
4834 * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT
4835 * bit reached it, the handle has now been released. We'll
4836 * get an error when we ACK this PLOGI. Nothing will be sent
4837 * back to initiator. Initiator should eventually retry
4838 * PLOGI and situation will correct itself.
4840 sess
->keep_nport_handle
= ((sess
->loop_id
== loop_id
) &&
4841 (sess
->d_id
.b24
== port_id
.b24
));
4843 ql_dbg(ql_dbg_disc
, vha
, 0x20f9,
4844 "%s %d %8phC post del sess\n",
4845 __func__
, __LINE__
, sess
->port_name
);
4848 qlt_schedule_sess_for_deletion(sess
);
4856 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
4858 static int qlt_24xx_handle_els(struct scsi_qla_host
*vha
,
4859 struct imm_ntfy_from_isp
*iocb
)
4861 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
4862 struct qla_hw_data
*ha
= vha
->hw
;
4863 struct fc_port
*sess
= NULL
, *conflict_sess
= NULL
;
4869 unsigned long flags
;
4871 lockdep_assert_held(&ha
->hardware_lock
);
4873 wwn
= wwn_to_u64(iocb
->u
.isp24
.port_name
);
4875 port_id
.b
.domain
= iocb
->u
.isp24
.port_id
[2];
4876 port_id
.b
.area
= iocb
->u
.isp24
.port_id
[1];
4877 port_id
.b
.al_pa
= iocb
->u
.isp24
.port_id
[0];
4878 port_id
.b
.rsvd_1
= 0;
4880 loop_id
= le16_to_cpu(iocb
->u
.isp24
.nport_handle
);
4882 ql_dbg(ql_dbg_disc
, vha
, 0xf026,
4883 "qla_target(%d): Port ID: %02x:%02x:%02x ELS opcode: 0x%02x lid %d %8phC\n",
4884 vha
->vp_idx
, iocb
->u
.isp24
.port_id
[2],
4885 iocb
->u
.isp24
.port_id
[1], iocb
->u
.isp24
.port_id
[0],
4886 iocb
->u
.isp24
.status_subcode
, loop_id
,
4887 iocb
->u
.isp24
.port_name
);
4889 /* res = 1 means ack at the end of thread
4890 * res = 0 means ack async/later.
4892 switch (iocb
->u
.isp24
.status_subcode
) {
4894 res
= qlt_handle_login(vha
, iocb
);
4899 sess
= qla2x00_find_fcport_by_wwpn(vha
,
4900 iocb
->u
.isp24
.port_name
, 1);
4902 if (sess
&& sess
->plogi_link
[QLT_PLOGI_LINK_SAME_WWN
]) {
4903 ql_dbg(ql_dbg_disc
, vha
, 0xffff,
4904 "%s %d %8phC Term PRLI due to PLOGI ACK not completed\n",
4906 iocb
->u
.isp24
.port_name
);
4907 qlt_send_term_imm_notif(vha
, iocb
, 1);
4911 res
= qlt_handle_login(vha
, iocb
);
4915 if (IS_SW_RESV_ADDR(port_id
)) {
4920 wd3_lo
= le16_to_cpu(iocb
->u
.isp24
.u
.prli
.wd3_lo
);
4923 spin_lock_irqsave(&tgt
->ha
->tgt
.sess_lock
, flags
);
4924 sess
= qlt_find_sess_invalidate_other(vha
, wwn
, port_id
,
4925 loop_id
, &conflict_sess
);
4926 spin_unlock_irqrestore(&tgt
->ha
->tgt
.sess_lock
, flags
);
4929 if (conflict_sess
) {
4930 switch (conflict_sess
->disc_state
) {
4932 case DSC_DELETE_PEND
:
4935 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf09b,
4936 "PRLI with conflicting sess %p port %8phC\n",
4937 conflict_sess
, conflict_sess
->port_name
);
4938 conflict_sess
->fw_login_state
=
4939 DSC_LS_PORT_UNAVAIL
;
4940 qlt_send_term_imm_notif(vha
, iocb
, 1);
4947 bool delete = false;
4950 spin_lock_irqsave(&tgt
->ha
->tgt
.sess_lock
, flags
);
4951 switch (sess
->fw_login_state
) {
4952 case DSC_LS_PLOGI_PEND
:
4953 case DSC_LS_PLOGI_COMP
:
4954 case DSC_LS_PRLI_COMP
:
4961 switch (sess
->disc_state
) {
4962 case DSC_UPD_FCPORT
:
4963 spin_unlock_irqrestore(&tgt
->ha
->tgt
.sess_lock
,
4966 sec
= jiffies_to_msecs(jiffies
-
4967 sess
->jiffies_at_registration
)/1000;
4968 if (sess
->sec_since_registration
< sec
&& sec
&&
4970 sess
->sec_since_registration
= sec
;
4971 ql_dbg(ql_dbg_disc
, sess
->vha
, 0xffff,
4972 "%s %8phC : Slow Rport registration(%d Sec)\n",
4973 __func__
, sess
->port_name
, sec
);
4975 qlt_send_term_imm_notif(vha
, iocb
, 1);
4978 case DSC_LOGIN_PEND
:
4980 case DSC_LOGIN_COMPLETE
:
4989 spin_unlock_irqrestore(&tgt
->ha
->tgt
.sess_lock
,
4992 * Impatient initiator sent PRLI before last
4993 * PLOGI could finish. Will force him to re-try,
4994 * while last one finishes.
4996 ql_log(ql_log_warn
, sess
->vha
, 0xf095,
4997 "sess %p PRLI received, before plogi ack.\n",
4999 qlt_send_term_imm_notif(vha
, iocb
, 1);
5005 * This shouldn't happen under normal circumstances,
5006 * since we have deleted the old session during PLOGI
5008 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf096,
5009 "PRLI (loop_id %#04x) for existing sess %p (loop_id %#04x)\n",
5010 sess
->loop_id
, sess
, iocb
->u
.isp24
.nport_handle
);
5013 sess
->loop_id
= loop_id
;
5014 sess
->d_id
= port_id
;
5015 sess
->fw_login_state
= DSC_LS_PRLI_PEND
;
5018 sess
->conf_compl_supported
= 1;
5020 if ((wd3_lo
& BIT_4
) == 0)
5021 sess
->port_type
= FCT_INITIATOR
;
5023 sess
->port_type
= FCT_TARGET
;
5025 spin_unlock_irqrestore(&tgt
->ha
->tgt
.sess_lock
, flags
);
5027 res
= 1; /* send notify ack */
5029 /* Make session global (not used in fabric mode) */
5030 if (ha
->current_topology
!= ISP_CFG_F
) {
5032 ql_dbg(ql_dbg_disc
, vha
, 0x20fa,
5033 "%s %d %8phC post nack\n",
5034 __func__
, __LINE__
, sess
->port_name
);
5035 qla24xx_post_nack_work(vha
, sess
, iocb
,
5039 set_bit(LOOP_RESYNC_NEEDED
, &vha
->dpc_flags
);
5040 set_bit(LOCAL_LOOP_UPDATE
, &vha
->dpc_flags
);
5041 qla2xxx_wake_dpc(vha
);
5045 ql_dbg(ql_dbg_disc
, vha
, 0x20fb,
5046 "%s %d %8phC post nack\n",
5047 __func__
, __LINE__
, sess
->port_name
);
5048 qla24xx_post_nack_work(vha
, sess
, iocb
,
5056 if (le16_to_cpu(iocb
->u
.isp24
.flags
) &
5057 NOTIFY24XX_FLAGS_GLOBAL_TPRLO
) {
5059 qlt_reset(vha
, iocb
, QLA_TGT_NEXUS_LOSS
);
5066 spin_lock_irqsave(&ha
->tgt
.sess_lock
, flags
);
5067 sess
= qla2x00_find_fcport_by_loopid(vha
, loop_id
);
5068 spin_unlock_irqrestore(&ha
->tgt
.sess_lock
, flags
);
5072 sess
->fw_login_state
= DSC_LS_LOGO_PEND
;
5073 sess
->logo_ack_needed
= 1;
5074 memcpy(sess
->iocb
, iocb
, IOCB_SIZE
);
5077 res
= qlt_reset(vha
, iocb
, QLA_TGT_NEXUS_LOSS_SESS
);
5079 ql_dbg(ql_dbg_disc
, vha
, 0x20fc,
5080 "%s: logo %llx res %d sess %p ",
5081 __func__
, wwn
, res
, sess
);
5084 * cmd went upper layer, look for qlt_xmit_tm_rsp()
5085 * for LOGO_ACK & sess delete
5090 /* cmd did not go to upper layer. */
5092 qlt_schedule_sess_for_deletion(sess
);
5095 /* else logo will be ack */
5101 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
5103 if (tgt
->link_reinit_iocb_pending
) {
5104 qlt_send_notify_ack(ha
->base_qpair
,
5105 &tgt
->link_reinit_iocb
, 0, 0, 0, 0, 0, 0);
5106 tgt
->link_reinit_iocb_pending
= 0;
5109 sess
= qla2x00_find_fcport_by_wwpn(vha
,
5110 iocb
->u
.isp24
.port_name
, 1);
5112 ql_dbg(ql_dbg_disc
, vha
, 0x20fd,
5113 "sess %p lid %d|%d DS %d LS %d\n",
5114 sess
, sess
->loop_id
, loop_id
,
5115 sess
->disc_state
, sess
->fw_login_state
);
5118 res
= 1; /* send notify ack */
5122 case ELS_FLOGI
: /* should never happen */
5124 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf061,
5125 "qla_target(%d): Unsupported ELS command %x "
5126 "received\n", vha
->vp_idx
, iocb
->u
.isp24
.status_subcode
);
5127 res
= qlt_reset(vha
, iocb
, QLA_TGT_NEXUS_LOSS_SESS
);
5131 ql_dbg(ql_dbg_disc
, vha
, 0xf026,
5132 "qla_target(%d): Exit ELS opcode: 0x%02x res %d\n",
5133 vha
->vp_idx
, iocb
->u
.isp24
.status_subcode
, res
);
5139 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
5141 static void qlt_handle_imm_notify(struct scsi_qla_host
*vha
,
5142 struct imm_ntfy_from_isp
*iocb
)
5144 struct qla_hw_data
*ha
= vha
->hw
;
5145 uint32_t add_flags
= 0;
5146 int send_notify_ack
= 1;
5149 lockdep_assert_held(&ha
->hardware_lock
);
5151 status
= le16_to_cpu(iocb
->u
.isp2x
.status
);
5153 case IMM_NTFY_LIP_RESET
:
5155 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf032,
5156 "qla_target(%d): LIP reset (loop %#x), subcode %x\n",
5157 vha
->vp_idx
, le16_to_cpu(iocb
->u
.isp24
.nport_handle
),
5158 iocb
->u
.isp24
.status_subcode
);
5160 if (qlt_reset(vha
, iocb
, QLA_TGT_ABORT_ALL
) == 0)
5161 send_notify_ack
= 0;
5165 case IMM_NTFY_LIP_LINK_REINIT
:
5167 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
5169 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf033,
5170 "qla_target(%d): LINK REINIT (loop %#x, "
5171 "subcode %x)\n", vha
->vp_idx
,
5172 le16_to_cpu(iocb
->u
.isp24
.nport_handle
),
5173 iocb
->u
.isp24
.status_subcode
);
5174 if (tgt
->link_reinit_iocb_pending
) {
5175 qlt_send_notify_ack(ha
->base_qpair
,
5176 &tgt
->link_reinit_iocb
, 0, 0, 0, 0, 0, 0);
5178 memcpy(&tgt
->link_reinit_iocb
, iocb
, sizeof(*iocb
));
5179 tgt
->link_reinit_iocb_pending
= 1;
5181 * QLogic requires to wait after LINK REINIT for possible
5182 * PDISC or ADISC ELS commands
5184 send_notify_ack
= 0;
5188 case IMM_NTFY_PORT_LOGOUT
:
5189 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf034,
5190 "qla_target(%d): Port logout (loop "
5191 "%#x, subcode %x)\n", vha
->vp_idx
,
5192 le16_to_cpu(iocb
->u
.isp24
.nport_handle
),
5193 iocb
->u
.isp24
.status_subcode
);
5195 if (qlt_reset(vha
, iocb
, QLA_TGT_NEXUS_LOSS_SESS
) == 0)
5196 send_notify_ack
= 0;
5197 /* The sessions will be cleared in the callback, if needed */
5200 case IMM_NTFY_GLBL_TPRLO
:
5201 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf035,
5202 "qla_target(%d): Global TPRLO (%x)\n", vha
->vp_idx
, status
);
5203 if (qlt_reset(vha
, iocb
, QLA_TGT_NEXUS_LOSS
) == 0)
5204 send_notify_ack
= 0;
5205 /* The sessions will be cleared in the callback, if needed */
5208 case IMM_NTFY_PORT_CONFIG
:
5209 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf036,
5210 "qla_target(%d): Port config changed (%x)\n", vha
->vp_idx
,
5212 if (qlt_reset(vha
, iocb
, QLA_TGT_ABORT_ALL
) == 0)
5213 send_notify_ack
= 0;
5214 /* The sessions will be cleared in the callback, if needed */
5217 case IMM_NTFY_GLBL_LOGO
:
5218 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf06a,
5219 "qla_target(%d): Link failure detected\n",
5221 /* I_T nexus loss */
5222 if (qlt_reset(vha
, iocb
, QLA_TGT_NEXUS_LOSS
) == 0)
5223 send_notify_ack
= 0;
5226 case IMM_NTFY_IOCB_OVERFLOW
:
5227 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf06b,
5228 "qla_target(%d): Cannot provide requested "
5229 "capability (IOCB overflowed the immediate notify "
5230 "resource count)\n", vha
->vp_idx
);
5233 case IMM_NTFY_ABORT_TASK
:
5234 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf037,
5235 "qla_target(%d): Abort Task (S %08x I %#x -> "
5236 "L %#x)\n", vha
->vp_idx
,
5237 le16_to_cpu(iocb
->u
.isp2x
.seq_id
),
5238 GET_TARGET_ID(ha
, (struct atio_from_isp
*)iocb
),
5239 le16_to_cpu(iocb
->u
.isp2x
.lun
));
5240 if (qlt_abort_task(vha
, iocb
) == 0)
5241 send_notify_ack
= 0;
5244 case IMM_NTFY_RESOURCE
:
5245 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf06c,
5246 "qla_target(%d): Out of resources, host %ld\n",
5247 vha
->vp_idx
, vha
->host_no
);
5250 case IMM_NTFY_MSG_RX
:
5251 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf038,
5252 "qla_target(%d): Immediate notify task %x\n",
5253 vha
->vp_idx
, iocb
->u
.isp2x
.task_flags
);
5257 if (qlt_24xx_handle_els(vha
, iocb
) == 0)
5258 send_notify_ack
= 0;
5261 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf06d,
5262 "qla_target(%d): Received unknown immediate "
5263 "notify status %x\n", vha
->vp_idx
, status
);
5267 if (send_notify_ack
)
5268 qlt_send_notify_ack(ha
->base_qpair
, iocb
, add_flags
, 0, 0, 0,
5273 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
5274 * This function sends busy to ISP 2xxx or 24xx.
5276 static int __qlt_send_busy(struct qla_qpair
*qpair
,
5277 struct atio_from_isp
*atio
, uint16_t status
)
5279 struct scsi_qla_host
*vha
= qpair
->vha
;
5280 struct ctio7_to_24xx
*ctio24
;
5281 struct qla_hw_data
*ha
= vha
->hw
;
5283 struct fc_port
*sess
= NULL
;
5284 unsigned long flags
;
5288 id
= be_to_port_id(atio
->u
.isp24
.fcp_hdr
.s_id
);
5290 spin_lock_irqsave(&ha
->tgt
.sess_lock
, flags
);
5291 sess
= qla2x00_find_fcport_by_nportid(vha
, &id
, 1);
5292 spin_unlock_irqrestore(&ha
->tgt
.sess_lock
, flags
);
5294 qlt_send_term_exchange(qpair
, NULL
, atio
, 1, 0);
5297 /* Sending marker isn't necessary, since we called from ISR */
5299 pkt
= (request_t
*)__qla2x00_alloc_iocbs(qpair
, NULL
);
5301 ql_dbg(ql_dbg_io
, vha
, 0x3063,
5302 "qla_target(%d): %s failed: unable to allocate "
5303 "request packet", vha
->vp_idx
, __func__
);
5307 qpair
->tgt_counters
.num_q_full_sent
++;
5308 pkt
->entry_count
= 1;
5309 pkt
->handle
= QLA_TGT_SKIP_HANDLE
| CTIO_COMPLETION_HANDLE_MARK
;
5311 ctio24
= (struct ctio7_to_24xx
*)pkt
;
5312 ctio24
->entry_type
= CTIO_TYPE7
;
5313 ctio24
->nport_handle
= sess
->loop_id
;
5314 ctio24
->timeout
= cpu_to_le16(QLA_TGT_TIMEOUT
);
5315 ctio24
->vp_index
= vha
->vp_idx
;
5316 ctio24
->initiator_id
= be_id_to_le(atio
->u
.isp24
.fcp_hdr
.s_id
);
5317 ctio24
->exchange_addr
= atio
->u
.isp24
.exchange_addr
;
5318 temp
= (atio
->u
.isp24
.attr
<< 9) |
5319 CTIO7_FLAGS_STATUS_MODE_1
| CTIO7_FLAGS_SEND_STATUS
|
5320 CTIO7_FLAGS_DONT_RET_CTIO
;
5321 ctio24
->u
.status1
.flags
= cpu_to_le16(temp
);
5323 * CTIO from fw w/o se_cmd doesn't provide enough info to retry it,
5324 * if the explicit conformation is used.
5326 ctio24
->u
.status1
.ox_id
= swab16(atio
->u
.isp24
.fcp_hdr
.ox_id
);
5327 ctio24
->u
.status1
.scsi_status
= cpu_to_le16(status
);
5329 ctio24
->u
.status1
.residual
= get_datalen_for_atio(atio
);
5331 if (ctio24
->u
.status1
.residual
!= 0)
5332 ctio24
->u
.status1
.scsi_status
|= SS_RESIDUAL_UNDER
;
5334 /* Memory Barrier */
5336 if (qpair
->reqq_start_iocbs
)
5337 qpair
->reqq_start_iocbs(qpair
);
5339 qla2x00_start_iocbs(vha
, qpair
->req
);
5344 * This routine is used to allocate a command for either a QFull condition
5345 * (ie reply SAM_STAT_BUSY) or to terminate an exchange that did not go
5349 qlt_alloc_qfull_cmd(struct scsi_qla_host
*vha
,
5350 struct atio_from_isp
*atio
, uint16_t status
, int qfull
)
5352 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
5353 struct qla_hw_data
*ha
= vha
->hw
;
5354 struct fc_port
*sess
;
5355 struct se_session
*se_sess
;
5356 struct qla_tgt_cmd
*cmd
;
5358 unsigned long flags
;
5360 if (unlikely(tgt
->tgt_stop
)) {
5361 ql_dbg(ql_dbg_io
, vha
, 0x300a,
5362 "New command while device %p is shutting down\n", tgt
);
5366 if ((vha
->hw
->tgt
.num_qfull_cmds_alloc
+ 1) > MAX_QFULL_CMDS_ALLOC
) {
5367 vha
->hw
->tgt
.num_qfull_cmds_dropped
++;
5368 if (vha
->hw
->tgt
.num_qfull_cmds_dropped
>
5369 vha
->qla_stats
.stat_max_qfull_cmds_dropped
)
5370 vha
->qla_stats
.stat_max_qfull_cmds_dropped
=
5371 vha
->hw
->tgt
.num_qfull_cmds_dropped
;
5373 ql_dbg(ql_dbg_io
, vha
, 0x3068,
5374 "qla_target(%d): %s: QFull CMD dropped[%d]\n",
5375 vha
->vp_idx
, __func__
,
5376 vha
->hw
->tgt
.num_qfull_cmds_dropped
);
5378 qlt_chk_exch_leak_thresh_hold(vha
);
5382 sess
= ha
->tgt
.tgt_ops
->find_sess_by_s_id
5383 (vha
, atio
->u
.isp24
.fcp_hdr
.s_id
);
5387 se_sess
= sess
->se_sess
;
5389 tag
= sbitmap_queue_get(&se_sess
->sess_tag_pool
, &cpu
);
5391 ql_dbg(ql_dbg_io
, vha
, 0x3009,
5392 "qla_target(%d): %s: Allocation of cmd failed\n",
5393 vha
->vp_idx
, __func__
);
5395 vha
->hw
->tgt
.num_qfull_cmds_dropped
++;
5396 if (vha
->hw
->tgt
.num_qfull_cmds_dropped
>
5397 vha
->qla_stats
.stat_max_qfull_cmds_dropped
)
5398 vha
->qla_stats
.stat_max_qfull_cmds_dropped
=
5399 vha
->hw
->tgt
.num_qfull_cmds_dropped
;
5401 qlt_chk_exch_leak_thresh_hold(vha
);
5405 cmd
= &((struct qla_tgt_cmd
*)se_sess
->sess_cmd_map
)[tag
];
5406 memset(cmd
, 0, sizeof(struct qla_tgt_cmd
));
5408 qlt_incr_num_pend_cmds(vha
);
5409 INIT_LIST_HEAD(&cmd
->cmd_list
);
5410 memcpy(&cmd
->atio
, atio
, sizeof(*atio
));
5412 cmd
->tgt
= vha
->vha_tgt
.qla_tgt
;
5414 cmd
->reset_count
= ha
->base_qpair
->chip_reset
;
5416 cmd
->qpair
= ha
->base_qpair
;
5417 cmd
->se_cmd
.map_cpu
= cpu
;
5421 /* NOTE: borrowing the state field to carry the status */
5422 cmd
->state
= status
;
5424 cmd
->term_exchg
= 1;
5426 spin_lock_irqsave(&vha
->hw
->tgt
.q_full_lock
, flags
);
5427 list_add_tail(&cmd
->cmd_list
, &vha
->hw
->tgt
.q_full_list
);
5429 vha
->hw
->tgt
.num_qfull_cmds_alloc
++;
5430 if (vha
->hw
->tgt
.num_qfull_cmds_alloc
>
5431 vha
->qla_stats
.stat_max_qfull_cmds_alloc
)
5432 vha
->qla_stats
.stat_max_qfull_cmds_alloc
=
5433 vha
->hw
->tgt
.num_qfull_cmds_alloc
;
5434 spin_unlock_irqrestore(&vha
->hw
->tgt
.q_full_lock
, flags
);
5438 qlt_free_qfull_cmds(struct qla_qpair
*qpair
)
5440 struct scsi_qla_host
*vha
= qpair
->vha
;
5441 struct qla_hw_data
*ha
= vha
->hw
;
5442 unsigned long flags
;
5443 struct qla_tgt_cmd
*cmd
, *tcmd
;
5444 struct list_head free_list
, q_full_list
;
5447 if (list_empty(&ha
->tgt
.q_full_list
))
5450 INIT_LIST_HEAD(&free_list
);
5451 INIT_LIST_HEAD(&q_full_list
);
5453 spin_lock_irqsave(&vha
->hw
->tgt
.q_full_lock
, flags
);
5454 if (list_empty(&ha
->tgt
.q_full_list
)) {
5455 spin_unlock_irqrestore(&vha
->hw
->tgt
.q_full_lock
, flags
);
5459 list_splice_init(&vha
->hw
->tgt
.q_full_list
, &q_full_list
);
5460 spin_unlock_irqrestore(&vha
->hw
->tgt
.q_full_lock
, flags
);
5462 spin_lock_irqsave(qpair
->qp_lock_ptr
, flags
);
5463 list_for_each_entry_safe(cmd
, tcmd
, &q_full_list
, cmd_list
) {
5465 /* cmd->state is a borrowed field to hold status */
5466 rc
= __qlt_send_busy(qpair
, &cmd
->atio
, cmd
->state
);
5467 else if (cmd
->term_exchg
)
5468 rc
= __qlt_send_term_exchange(qpair
, NULL
, &cmd
->atio
);
5474 ql_dbg(ql_dbg_io
, vha
, 0x3006,
5475 "%s: busy sent for ox_id[%04x]\n", __func__
,
5476 be16_to_cpu(cmd
->atio
.u
.isp24
.fcp_hdr
.ox_id
));
5477 else if (cmd
->term_exchg
)
5478 ql_dbg(ql_dbg_io
, vha
, 0x3007,
5479 "%s: Term exchg sent for ox_id[%04x]\n", __func__
,
5480 be16_to_cpu(cmd
->atio
.u
.isp24
.fcp_hdr
.ox_id
));
5482 ql_dbg(ql_dbg_io
, vha
, 0x3008,
5483 "%s: Unexpected cmd in QFull list %p\n", __func__
,
5486 list_del(&cmd
->cmd_list
);
5487 list_add_tail(&cmd
->cmd_list
, &free_list
);
5489 /* piggy back on hardware_lock for protection */
5490 vha
->hw
->tgt
.num_qfull_cmds_alloc
--;
5492 spin_unlock_irqrestore(qpair
->qp_lock_ptr
, flags
);
5496 list_for_each_entry_safe(cmd
, tcmd
, &free_list
, cmd_list
) {
5497 list_del(&cmd
->cmd_list
);
5498 /* This cmd was never sent to TCM. There is no need
5499 * to schedule free or call free_cmd
5504 if (!list_empty(&q_full_list
)) {
5505 spin_lock_irqsave(&vha
->hw
->tgt
.q_full_lock
, flags
);
5506 list_splice(&q_full_list
, &vha
->hw
->tgt
.q_full_list
);
5507 spin_unlock_irqrestore(&vha
->hw
->tgt
.q_full_lock
, flags
);
5514 qlt_send_busy(struct qla_qpair
*qpair
, struct atio_from_isp
*atio
,
5518 struct scsi_qla_host
*vha
= qpair
->vha
;
5520 rc
= __qlt_send_busy(qpair
, atio
, status
);
5522 qlt_alloc_qfull_cmd(vha
, atio
, status
, 1);
5526 qlt_chk_qfull_thresh_hold(struct scsi_qla_host
*vha
, struct qla_qpair
*qpair
,
5527 struct atio_from_isp
*atio
, uint8_t ha_locked
)
5529 struct qla_hw_data
*ha
= vha
->hw
;
5530 unsigned long flags
;
5532 if (ha
->tgt
.num_pend_cmds
< Q_FULL_THRESH_HOLD(ha
))
5536 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
5537 qlt_send_busy(qpair
, atio
, qla_sam_status
);
5539 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
5544 /* ha->hardware_lock supposed to be held on entry */
5545 /* called via callback from qla2xxx */
5546 static void qlt_24xx_atio_pkt(struct scsi_qla_host
*vha
,
5547 struct atio_from_isp
*atio
, uint8_t ha_locked
)
5549 struct qla_hw_data
*ha
= vha
->hw
;
5550 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
5552 unsigned long flags
= 0;
5554 if (unlikely(tgt
== NULL
)) {
5555 ql_dbg(ql_dbg_tgt
, vha
, 0x3064,
5556 "ATIO pkt, but no tgt (ha %p)", ha
);
5560 * In tgt_stop mode we also should allow all requests to pass.
5561 * Otherwise, some commands can stuck.
5564 tgt
->atio_irq_cmd_count
++;
5566 switch (atio
->u
.raw
.entry_type
) {
5568 if (unlikely(atio
->u
.isp24
.exchange_addr
==
5569 ATIO_EXCHANGE_ADDRESS_UNKNOWN
)) {
5570 ql_dbg(ql_dbg_io
, vha
, 0x3065,
5571 "qla_target(%d): ATIO_TYPE7 "
5572 "received with UNKNOWN exchange address, "
5573 "sending QUEUE_FULL\n", vha
->vp_idx
);
5575 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
5576 qlt_send_busy(ha
->base_qpair
, atio
, qla_sam_status
);
5578 spin_unlock_irqrestore(&ha
->hardware_lock
,
5583 if (likely(atio
->u
.isp24
.fcp_cmnd
.task_mgmt_flags
== 0)) {
5584 rc
= qlt_chk_qfull_thresh_hold(vha
, ha
->base_qpair
,
5587 tgt
->atio_irq_cmd_count
--;
5590 rc
= qlt_handle_cmd_for_atio(vha
, atio
);
5592 rc
= qlt_handle_task_mgmt(vha
, atio
);
5594 if (unlikely(rc
!= 0)) {
5596 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
5599 ql_dbg(ql_dbg_tgt
, vha
, 0xe05f,
5600 "qla_target: Unable to send command to target\n");
5603 ql_dbg(ql_dbg_tgt
, vha
, 0xe05f,
5604 "qla_target: Unable to send command to target, sending TERM EXCHANGE for rsp\n");
5605 qlt_send_term_exchange(ha
->base_qpair
, NULL
,
5609 ql_dbg(ql_dbg_tgt
, vha
, 0xe060,
5610 "qla_target(%d): Unable to send command to target, sending BUSY status\n",
5612 qlt_send_busy(ha
->base_qpair
, atio
,
5616 ql_dbg(ql_dbg_tgt
, vha
, 0xe060,
5617 "qla_target(%d): Unable to send command to target, sending BUSY status\n",
5619 qlt_send_busy(ha
->base_qpair
, atio
,
5624 spin_unlock_irqrestore(&ha
->hardware_lock
,
5629 case IMMED_NOTIFY_TYPE
:
5631 if (unlikely(atio
->u
.isp2x
.entry_status
!= 0)) {
5632 ql_dbg(ql_dbg_tgt
, vha
, 0xe05b,
5633 "qla_target(%d): Received ATIO packet %x "
5634 "with error status %x\n", vha
->vp_idx
,
5635 atio
->u
.raw
.entry_type
,
5636 atio
->u
.isp2x
.entry_status
);
5639 ql_dbg(ql_dbg_tgt
, vha
, 0xe02e, "%s", "IMMED_NOTIFY ATIO");
5642 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
5643 qlt_handle_imm_notify(vha
, (struct imm_ntfy_from_isp
*)atio
);
5645 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
5650 ql_dbg(ql_dbg_tgt
, vha
, 0xe05c,
5651 "qla_target(%d): Received unknown ATIO atio "
5652 "type %x\n", vha
->vp_idx
, atio
->u
.raw
.entry_type
);
5656 tgt
->atio_irq_cmd_count
--;
5660 * qpair lock is assume to be held
5661 * rc = 0 : send terminate & abts respond
5662 * rc != 0: do not send term & abts respond
5664 static int qlt_chk_unresolv_exchg(struct scsi_qla_host
*vha
,
5665 struct qla_qpair
*qpair
, struct abts_resp_from_24xx_fw
*entry
)
5667 struct qla_hw_data
*ha
= vha
->hw
;
5671 * Detect unresolved exchange. If the same ABTS is unable
5672 * to terminate an existing command and the same ABTS loops
5673 * between FW & Driver, then force FW dump. Under 1 jiff,
5674 * we should see multiple loops.
5676 if (qpair
->retry_term_exchg_addr
== entry
->exchange_addr_to_abort
&&
5677 qpair
->retry_term_jiff
== jiffies
) {
5678 /* found existing exchange */
5679 qpair
->retry_term_cnt
++;
5680 if (qpair
->retry_term_cnt
>= 5) {
5682 qpair
->retry_term_cnt
= 0;
5683 ql_log(ql_log_warn
, vha
, 0xffff,
5684 "Unable to send ABTS Respond. Dumping firmware.\n");
5685 ql_dump_buffer(ql_dbg_tgt_mgt
+ ql_dbg_buffer
,
5686 vha
, 0xffff, (uint8_t *)entry
, sizeof(*entry
));
5688 if (qpair
== ha
->base_qpair
)
5689 ha
->isp_ops
->fw_dump(vha
, 1);
5691 ha
->isp_ops
->fw_dump(vha
, 0);
5693 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
5694 qla2xxx_wake_dpc(vha
);
5696 } else if (qpair
->retry_term_jiff
!= jiffies
) {
5697 qpair
->retry_term_exchg_addr
= entry
->exchange_addr_to_abort
;
5698 qpair
->retry_term_cnt
= 0;
5699 qpair
->retry_term_jiff
= jiffies
;
5706 static void qlt_handle_abts_completion(struct scsi_qla_host
*vha
,
5707 struct rsp_que
*rsp
, response_t
*pkt
)
5709 struct abts_resp_from_24xx_fw
*entry
=
5710 (struct abts_resp_from_24xx_fw
*)pkt
;
5711 u32 h
= pkt
->handle
& ~QLA_TGT_HANDLE_MASK
;
5712 struct qla_tgt_mgmt_cmd
*mcmd
;
5713 struct qla_hw_data
*ha
= vha
->hw
;
5715 mcmd
= qlt_ctio_to_cmd(vha
, rsp
, pkt
->handle
, pkt
);
5716 if (mcmd
== NULL
&& h
!= QLA_TGT_SKIP_HANDLE
) {
5717 ql_dbg(ql_dbg_async
, vha
, 0xe064,
5718 "qla_target(%d): ABTS Comp without mcmd\n",
5725 vha
->vha_tgt
.qla_tgt
->abts_resp_expected
--;
5727 ql_dbg(ql_dbg_tgt
, vha
, 0xe038,
5728 "ABTS_RESP_24XX: compl_status %x\n",
5729 entry
->compl_status
);
5731 if (le16_to_cpu(entry
->compl_status
) != ABTS_RESP_COMPL_SUCCESS
) {
5732 if ((entry
->error_subcode1
== 0x1E) &&
5733 (entry
->error_subcode2
== 0)) {
5734 if (qlt_chk_unresolv_exchg(vha
, rsp
->qpair
, entry
)) {
5735 ha
->tgt
.tgt_ops
->free_mcmd(mcmd
);
5738 qlt_24xx_retry_term_exchange(vha
, rsp
->qpair
,
5741 ql_dbg(ql_dbg_tgt
, vha
, 0xe063,
5742 "qla_target(%d): ABTS_RESP_24XX failed %x (subcode %x:%x)",
5743 vha
->vp_idx
, entry
->compl_status
,
5744 entry
->error_subcode1
,
5745 entry
->error_subcode2
);
5746 ha
->tgt
.tgt_ops
->free_mcmd(mcmd
);
5749 ha
->tgt
.tgt_ops
->free_mcmd(mcmd
);
5753 /* ha->hardware_lock supposed to be held on entry */
5754 /* called via callback from qla2xxx */
5755 static void qlt_response_pkt(struct scsi_qla_host
*vha
,
5756 struct rsp_que
*rsp
, response_t
*pkt
)
5758 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
5760 if (unlikely(tgt
== NULL
)) {
5761 ql_dbg(ql_dbg_tgt
, vha
, 0xe05d,
5762 "qla_target(%d): Response pkt %x received, but no tgt (ha %p)\n",
5763 vha
->vp_idx
, pkt
->entry_type
, vha
->hw
);
5768 * In tgt_stop mode we also should allow all requests to pass.
5769 * Otherwise, some commands can stuck.
5772 switch (pkt
->entry_type
) {
5776 struct ctio7_from_24xx
*entry
= (struct ctio7_from_24xx
*)pkt
;
5778 qlt_do_ctio_completion(vha
, rsp
, entry
->handle
,
5779 le16_to_cpu(entry
->status
)|(pkt
->entry_status
<< 16),
5784 case ACCEPT_TGT_IO_TYPE
:
5786 struct atio_from_isp
*atio
= (struct atio_from_isp
*)pkt
;
5789 if (atio
->u
.isp2x
.status
!=
5790 cpu_to_le16(ATIO_CDB_VALID
)) {
5791 ql_dbg(ql_dbg_tgt
, vha
, 0xe05e,
5792 "qla_target(%d): ATIO with error "
5793 "status %x received\n", vha
->vp_idx
,
5794 le16_to_cpu(atio
->u
.isp2x
.status
));
5798 rc
= qlt_chk_qfull_thresh_hold(vha
, rsp
->qpair
, atio
, 1);
5802 rc
= qlt_handle_cmd_for_atio(vha
, atio
);
5803 if (unlikely(rc
!= 0)) {
5806 ql_dbg(ql_dbg_tgt
, vha
, 0xe05f,
5807 "qla_target: Unable to send command to target\n");
5810 ql_dbg(ql_dbg_tgt
, vha
, 0xe05f,
5811 "qla_target: Unable to send command to target, sending TERM EXCHANGE for rsp\n");
5812 qlt_send_term_exchange(rsp
->qpair
, NULL
,
5816 ql_dbg(ql_dbg_tgt
, vha
, 0xe060,
5817 "qla_target(%d): Unable to send command to target, sending BUSY status\n",
5819 qlt_send_busy(rsp
->qpair
, atio
,
5823 ql_dbg(ql_dbg_tgt
, vha
, 0xe060,
5824 "qla_target(%d): Unable to send command to target, sending BUSY status\n",
5826 qlt_send_busy(rsp
->qpair
, atio
,
5834 case CONTINUE_TGT_IO_TYPE
:
5836 struct ctio_to_2xxx
*entry
= (struct ctio_to_2xxx
*)pkt
;
5838 qlt_do_ctio_completion(vha
, rsp
, entry
->handle
,
5839 le16_to_cpu(entry
->status
)|(pkt
->entry_status
<< 16),
5846 struct ctio_to_2xxx
*entry
= (struct ctio_to_2xxx
*)pkt
;
5848 qlt_do_ctio_completion(vha
, rsp
, entry
->handle
,
5849 le16_to_cpu(entry
->status
)|(pkt
->entry_status
<< 16),
5854 case IMMED_NOTIFY_TYPE
:
5855 ql_dbg(ql_dbg_tgt
, vha
, 0xe035, "%s", "IMMED_NOTIFY\n");
5856 qlt_handle_imm_notify(vha
, (struct imm_ntfy_from_isp
*)pkt
);
5859 case NOTIFY_ACK_TYPE
:
5860 if (tgt
->notify_ack_expected
> 0) {
5861 struct nack_to_isp
*entry
= (struct nack_to_isp
*)pkt
;
5863 ql_dbg(ql_dbg_tgt
, vha
, 0xe036,
5864 "NOTIFY_ACK seq %08x status %x\n",
5865 le16_to_cpu(entry
->u
.isp2x
.seq_id
),
5866 le16_to_cpu(entry
->u
.isp2x
.status
));
5867 tgt
->notify_ack_expected
--;
5868 if (entry
->u
.isp2x
.status
!=
5869 cpu_to_le16(NOTIFY_ACK_SUCCESS
)) {
5870 ql_dbg(ql_dbg_tgt
, vha
, 0xe061,
5871 "qla_target(%d): NOTIFY_ACK "
5872 "failed %x\n", vha
->vp_idx
,
5873 le16_to_cpu(entry
->u
.isp2x
.status
));
5876 ql_dbg(ql_dbg_tgt
, vha
, 0xe062,
5877 "qla_target(%d): Unexpected NOTIFY_ACK received\n",
5882 case ABTS_RECV_24XX
:
5883 ql_dbg(ql_dbg_tgt
, vha
, 0xe037,
5884 "ABTS_RECV_24XX: instance %d\n", vha
->vp_idx
);
5885 qlt_24xx_handle_abts(vha
, (struct abts_recv_from_24xx
*)pkt
);
5888 case ABTS_RESP_24XX
:
5889 if (tgt
->abts_resp_expected
> 0) {
5890 qlt_handle_abts_completion(vha
, rsp
, pkt
);
5892 ql_dbg(ql_dbg_tgt
, vha
, 0xe064,
5893 "qla_target(%d): Unexpected ABTS_RESP_24XX "
5894 "received\n", vha
->vp_idx
);
5899 ql_dbg(ql_dbg_tgt
, vha
, 0xe065,
5900 "qla_target(%d): Received unknown response pkt "
5901 "type %x\n", vha
->vp_idx
, pkt
->entry_type
);
5908 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
5910 void qlt_async_event(uint16_t code
, struct scsi_qla_host
*vha
,
5913 struct qla_hw_data
*ha
= vha
->hw
;
5914 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
5917 if (!tgt
|| tgt
->tgt_stop
|| tgt
->tgt_stopped
)
5920 if (((code
== MBA_POINT_TO_POINT
) || (code
== MBA_CHG_IN_CONNECTION
)) &&
5924 * In tgt_stop mode we also should allow all requests to pass.
5925 * Otherwise, some commands can stuck.
5930 case MBA_RESET
: /* Reset */
5931 case MBA_SYSTEM_ERR
: /* System Error */
5932 case MBA_REQ_TRANSFER_ERR
: /* Request Transfer Error */
5933 case MBA_RSP_TRANSFER_ERR
: /* Response Transfer Error */
5934 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf03a,
5935 "qla_target(%d): System error async event %#x "
5936 "occurred", vha
->vp_idx
, code
);
5938 case MBA_WAKEUP_THRES
: /* Request Queue Wake-up. */
5939 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
5944 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf03b,
5945 "qla_target(%d): Async LOOP_UP occurred "
5946 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha
->vp_idx
,
5947 le16_to_cpu(mailbox
[0]), le16_to_cpu(mailbox
[1]),
5948 le16_to_cpu(mailbox
[2]), le16_to_cpu(mailbox
[3]));
5949 if (tgt
->link_reinit_iocb_pending
) {
5950 qlt_send_notify_ack(ha
->base_qpair
,
5951 (void *)&tgt
->link_reinit_iocb
,
5953 tgt
->link_reinit_iocb_pending
= 0;
5958 case MBA_LIP_OCCURRED
:
5961 case MBA_RSCN_UPDATE
:
5962 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf03c,
5963 "qla_target(%d): Async event %#x occurred "
5964 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha
->vp_idx
, code
,
5965 le16_to_cpu(mailbox
[0]), le16_to_cpu(mailbox
[1]),
5966 le16_to_cpu(mailbox
[2]), le16_to_cpu(mailbox
[3]));
5969 case MBA_REJECTED_FCP_CMD
:
5970 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf017,
5971 "qla_target(%d): Async event LS_REJECT occurred (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)",
5973 le16_to_cpu(mailbox
[0]), le16_to_cpu(mailbox
[1]),
5974 le16_to_cpu(mailbox
[2]), le16_to_cpu(mailbox
[3]));
5976 if (le16_to_cpu(mailbox
[3]) == 1) {
5977 /* exchange starvation. */
5978 vha
->hw
->exch_starvation
++;
5979 if (vha
->hw
->exch_starvation
> 5) {
5980 ql_log(ql_log_warn
, vha
, 0xd03a,
5981 "Exchange starvation-. Resetting RISC\n");
5983 vha
->hw
->exch_starvation
= 0;
5984 if (IS_P3P_TYPE(vha
->hw
))
5985 set_bit(FCOE_CTX_RESET_NEEDED
,
5988 set_bit(ISP_ABORT_NEEDED
,
5990 qla2xxx_wake_dpc(vha
);
5995 case MBA_PORT_UPDATE
:
5996 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf03d,
5997 "qla_target(%d): Port update async event %#x "
5998 "occurred: updating the ports database (m[0]=%x, m[1]=%x, "
5999 "m[2]=%x, m[3]=%x)", vha
->vp_idx
, code
,
6000 le16_to_cpu(mailbox
[0]), le16_to_cpu(mailbox
[1]),
6001 le16_to_cpu(mailbox
[2]), le16_to_cpu(mailbox
[3]));
6003 login_code
= le16_to_cpu(mailbox
[2]);
6004 if (login_code
== 0x4) {
6005 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf03e,
6006 "Async MB 2: Got PLOGI Complete\n");
6007 vha
->hw
->exch_starvation
= 0;
6008 } else if (login_code
== 0x7)
6009 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf03f,
6010 "Async MB 2: Port Logged Out\n");
6018 static fc_port_t
*qlt_get_port_database(struct scsi_qla_host
*vha
,
6021 fc_port_t
*fcport
, *tfcp
, *del
;
6023 unsigned long flags
;
6026 fcport
= qla2x00_alloc_fcport(vha
, GFP_KERNEL
);
6028 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf06f,
6029 "qla_target(%d): Allocation of tmp FC port failed",
6034 fcport
->loop_id
= loop_id
;
6036 rc
= qla24xx_gpdb_wait(vha
, fcport
, 0);
6037 if (rc
!= QLA_SUCCESS
) {
6038 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf070,
6039 "qla_target(%d): Failed to retrieve fcport "
6040 "information -- get_port_database() returned %x "
6041 "(loop_id=0x%04x)", vha
->vp_idx
, rc
, loop_id
);
6047 spin_lock_irqsave(&vha
->hw
->tgt
.sess_lock
, flags
);
6048 tfcp
= qla2x00_find_fcport_by_wwpn(vha
, fcport
->port_name
, 1);
6051 tfcp
->d_id
= fcport
->d_id
;
6052 tfcp
->port_type
= fcport
->port_type
;
6053 tfcp
->supported_classes
= fcport
->supported_classes
;
6054 tfcp
->flags
|= fcport
->flags
;
6055 tfcp
->scan_state
= QLA_FCPORT_FOUND
;
6060 if (vha
->hw
->current_topology
== ISP_CFG_F
)
6061 fcport
->flags
|= FCF_FABRIC_DEVICE
;
6063 list_add_tail(&fcport
->list
, &vha
->vp_fcports
);
6064 if (!IS_SW_RESV_ADDR(fcport
->d_id
))
6065 vha
->fcport_count
++;
6066 fcport
->login_gen
++;
6067 qla2x00_set_fcport_disc_state(fcport
, DSC_LOGIN_COMPLETE
);
6068 fcport
->login_succ
= 1;
6072 fcport
->deleted
= 0;
6073 spin_unlock_irqrestore(&vha
->hw
->tgt
.sess_lock
, flags
);
6075 switch (vha
->host
->active_mode
) {
6076 case MODE_INITIATOR
:
6079 if (!IS_IIDMA_CAPABLE(vha
->hw
) || !vha
->hw
->flags
.gpsc_supported
) {
6080 qla24xx_sched_upd_fcport(fcport
);
6082 ql_dbg(ql_dbg_disc
, vha
, 0x20ff,
6083 "%s %d %8phC post gpsc fcp_cnt %d\n",
6084 __func__
, __LINE__
, fcport
->port_name
, vha
->fcport_count
);
6085 qla24xx_post_gpsc_work(vha
, fcport
);
6095 qla2x00_free_fcport(del
);
6100 /* Must be called under tgt_mutex */
6101 static struct fc_port
*qlt_make_local_sess(struct scsi_qla_host
*vha
,
6104 struct fc_port
*sess
= NULL
;
6105 fc_port_t
*fcport
= NULL
;
6106 int rc
, global_resets
;
6107 uint16_t loop_id
= 0;
6109 if (s_id
.domain
== 0xFF && s_id
.area
== 0xFC) {
6111 * This is Domain Controller, so it should be
6112 * OK to drop SCSI commands from it.
6114 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf042,
6115 "Unable to find initiator with S_ID %x:%x:%x",
6116 s_id
.domain
, s_id
.area
, s_id
.al_pa
);
6120 mutex_lock(&vha
->vha_tgt
.tgt_mutex
);
6124 atomic_read(&vha
->vha_tgt
.qla_tgt
->tgt_global_resets_count
);
6126 rc
= qla24xx_get_loop_id(vha
, s_id
, &loop_id
);
6128 mutex_unlock(&vha
->vha_tgt
.tgt_mutex
);
6130 ql_log(ql_log_info
, vha
, 0xf071,
6131 "qla_target(%d): Unable to find "
6132 "initiator with S_ID %x:%x:%x",
6133 vha
->vp_idx
, s_id
.domain
, s_id
.area
, s_id
.al_pa
);
6135 if (rc
== -ENOENT
) {
6136 qlt_port_logo_t logo
;
6138 logo
.id
= be_to_port_id(s_id
);
6140 qlt_send_first_logo(vha
, &logo
);
6146 fcport
= qlt_get_port_database(vha
, loop_id
);
6148 mutex_unlock(&vha
->vha_tgt
.tgt_mutex
);
6152 if (global_resets
!=
6153 atomic_read(&vha
->vha_tgt
.qla_tgt
->tgt_global_resets_count
)) {
6154 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf043,
6155 "qla_target(%d): global reset during session discovery "
6156 "(counter was %d, new %d), retrying", vha
->vp_idx
,
6158 atomic_read(&vha
->vha_tgt
.
6159 qla_tgt
->tgt_global_resets_count
));
6163 sess
= qlt_create_sess(vha
, fcport
, true);
6165 mutex_unlock(&vha
->vha_tgt
.tgt_mutex
);
6170 static void qlt_abort_work(struct qla_tgt
*tgt
,
6171 struct qla_tgt_sess_work_param
*prm
)
6173 struct scsi_qla_host
*vha
= tgt
->vha
;
6174 struct qla_hw_data
*ha
= vha
->hw
;
6175 struct fc_port
*sess
= NULL
;
6176 unsigned long flags
= 0, flags2
= 0;
6180 spin_lock_irqsave(&ha
->tgt
.sess_lock
, flags2
);
6185 s_id
= le_id_to_be(prm
->abts
.fcp_hdr_le
.s_id
);
6187 sess
= ha
->tgt
.tgt_ops
->find_sess_by_s_id(vha
, s_id
);
6189 spin_unlock_irqrestore(&ha
->tgt
.sess_lock
, flags2
);
6191 sess
= qlt_make_local_sess(vha
, s_id
);
6192 /* sess has got an extra creation ref */
6194 spin_lock_irqsave(&ha
->tgt
.sess_lock
, flags2
);
6198 if (sess
->deleted
) {
6203 if (!kref_get_unless_zero(&sess
->sess_kref
)) {
6204 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0xf01c,
6205 "%s: kref_get fail %8phC \n",
6206 __func__
, sess
->port_name
);
6212 rc
= __qlt_24xx_handle_abts(vha
, &prm
->abts
, sess
);
6213 spin_unlock_irqrestore(&ha
->tgt
.sess_lock
, flags2
);
6215 ha
->tgt
.tgt_ops
->put_sess(sess
);
6222 spin_unlock_irqrestore(&ha
->tgt
.sess_lock
, flags2
);
6225 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
6226 qlt_24xx_send_abts_resp(ha
->base_qpair
, &prm
->abts
,
6227 FCP_TMF_REJECTED
, false);
6228 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
6231 static void qlt_tmr_work(struct qla_tgt
*tgt
,
6232 struct qla_tgt_sess_work_param
*prm
)
6234 struct atio_from_isp
*a
= &prm
->tm_iocb2
;
6235 struct scsi_qla_host
*vha
= tgt
->vha
;
6236 struct qla_hw_data
*ha
= vha
->hw
;
6237 struct fc_port
*sess
;
6238 unsigned long flags
;
6245 spin_lock_irqsave(&ha
->tgt
.sess_lock
, flags
);
6250 s_id
= prm
->tm_iocb2
.u
.isp24
.fcp_hdr
.s_id
;
6251 sess
= ha
->tgt
.tgt_ops
->find_sess_by_s_id(vha
, s_id
);
6253 spin_unlock_irqrestore(&ha
->tgt
.sess_lock
, flags
);
6255 sess
= qlt_make_local_sess(vha
, s_id
);
6256 /* sess has got an extra creation ref */
6258 spin_lock_irqsave(&ha
->tgt
.sess_lock
, flags
);
6262 if (sess
->deleted
) {
6266 if (!kref_get_unless_zero(&sess
->sess_kref
)) {
6267 ql_dbg(ql_dbg_tgt_tmr
, vha
, 0xf020,
6268 "%s: kref_get fail %8phC\n",
6269 __func__
, sess
->port_name
);
6275 fn
= a
->u
.isp24
.fcp_cmnd
.task_mgmt_flags
;
6277 scsilun_to_int((struct scsi_lun
*)&a
->u
.isp24
.fcp_cmnd
.lun
);
6279 rc
= qlt_issue_task_mgmt(sess
, unpacked_lun
, fn
, iocb
, 0);
6280 spin_unlock_irqrestore(&ha
->tgt
.sess_lock
, flags
);
6282 ha
->tgt
.tgt_ops
->put_sess(sess
);
6289 spin_unlock_irqrestore(&ha
->tgt
.sess_lock
, flags
);
6291 qlt_send_term_exchange(ha
->base_qpair
, NULL
, &prm
->tm_iocb2
, 1, 0);
6294 static void qlt_sess_work_fn(struct work_struct
*work
)
6296 struct qla_tgt
*tgt
= container_of(work
, struct qla_tgt
, sess_work
);
6297 struct scsi_qla_host
*vha
= tgt
->vha
;
6298 unsigned long flags
;
6300 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf000, "Sess work (tgt %p)", tgt
);
6302 spin_lock_irqsave(&tgt
->sess_work_lock
, flags
);
6303 while (!list_empty(&tgt
->sess_works_list
)) {
6304 struct qla_tgt_sess_work_param
*prm
= list_entry(
6305 tgt
->sess_works_list
.next
, typeof(*prm
),
6306 sess_works_list_entry
);
6309 * This work can be scheduled on several CPUs at time, so we
6310 * must delete the entry to eliminate double processing
6312 list_del(&prm
->sess_works_list_entry
);
6314 spin_unlock_irqrestore(&tgt
->sess_work_lock
, flags
);
6316 switch (prm
->type
) {
6317 case QLA_TGT_SESS_WORK_ABORT
:
6318 qlt_abort_work(tgt
, prm
);
6320 case QLA_TGT_SESS_WORK_TM
:
6321 qlt_tmr_work(tgt
, prm
);
6328 spin_lock_irqsave(&tgt
->sess_work_lock
, flags
);
6332 spin_unlock_irqrestore(&tgt
->sess_work_lock
, flags
);
6335 /* Must be called under tgt_host_action_mutex */
6336 int qlt_add_target(struct qla_hw_data
*ha
, struct scsi_qla_host
*base_vha
)
6338 struct qla_tgt
*tgt
;
6340 struct qla_qpair_hint
*h
;
6342 if (!QLA_TGT_MODE_ENABLED())
6345 if (!IS_TGT_MODE_CAPABLE(ha
)) {
6346 ql_log(ql_log_warn
, base_vha
, 0xe070,
6347 "This adapter does not support target mode.\n");
6351 ql_dbg(ql_dbg_tgt
, base_vha
, 0xe03b,
6352 "Registering target for host %ld(%p).\n", base_vha
->host_no
, ha
);
6354 BUG_ON(base_vha
->vha_tgt
.qla_tgt
!= NULL
);
6356 tgt
= kzalloc(sizeof(struct qla_tgt
), GFP_KERNEL
);
6358 ql_dbg(ql_dbg_tgt
, base_vha
, 0xe066,
6359 "Unable to allocate struct qla_tgt\n");
6363 tgt
->qphints
= kcalloc(ha
->max_qpairs
+ 1,
6364 sizeof(struct qla_qpair_hint
),
6366 if (!tgt
->qphints
) {
6368 ql_log(ql_log_warn
, base_vha
, 0x0197,
6369 "Unable to allocate qpair hints.\n");
6373 if (!(base_vha
->host
->hostt
->supported_mode
& MODE_TARGET
))
6374 base_vha
->host
->hostt
->supported_mode
|= MODE_TARGET
;
6376 rc
= btree_init64(&tgt
->lun_qpair_map
);
6378 kfree(tgt
->qphints
);
6380 ql_log(ql_log_info
, base_vha
, 0x0198,
6381 "Unable to initialize lun_qpair_map btree\n");
6384 h
= &tgt
->qphints
[0];
6385 h
->qpair
= ha
->base_qpair
;
6386 INIT_LIST_HEAD(&h
->hint_elem
);
6387 h
->cpuid
= ha
->base_qpair
->cpuid
;
6388 list_add_tail(&h
->hint_elem
, &ha
->base_qpair
->hints_list
);
6390 for (i
= 0; i
< ha
->max_qpairs
; i
++) {
6391 unsigned long flags
;
6393 struct qla_qpair
*qpair
= ha
->queue_pair_map
[i
];
6395 h
= &tgt
->qphints
[i
+ 1];
6396 INIT_LIST_HEAD(&h
->hint_elem
);
6399 spin_lock_irqsave(qpair
->qp_lock_ptr
, flags
);
6400 list_add_tail(&h
->hint_elem
, &qpair
->hints_list
);
6401 spin_unlock_irqrestore(qpair
->qp_lock_ptr
, flags
);
6402 h
->cpuid
= qpair
->cpuid
;
6407 tgt
->vha
= base_vha
;
6408 init_waitqueue_head(&tgt
->waitQ
);
6409 INIT_LIST_HEAD(&tgt
->del_sess_list
);
6410 spin_lock_init(&tgt
->sess_work_lock
);
6411 INIT_WORK(&tgt
->sess_work
, qlt_sess_work_fn
);
6412 INIT_LIST_HEAD(&tgt
->sess_works_list
);
6413 atomic_set(&tgt
->tgt_global_resets_count
, 0);
6415 base_vha
->vha_tgt
.qla_tgt
= tgt
;
6417 ql_dbg(ql_dbg_tgt
, base_vha
, 0xe067,
6418 "qla_target(%d): using 64 Bit PCI addressing",
6421 tgt
->sg_tablesize
= QLA_TGT_MAX_SG_24XX(base_vha
->req
->length
- 3);
6423 mutex_lock(&qla_tgt_mutex
);
6424 list_add_tail(&tgt
->tgt_list_entry
, &qla_tgt_glist
);
6425 mutex_unlock(&qla_tgt_mutex
);
6427 if (ha
->tgt
.tgt_ops
&& ha
->tgt
.tgt_ops
->add_target
)
6428 ha
->tgt
.tgt_ops
->add_target(base_vha
);
6433 /* Must be called under tgt_host_action_mutex */
6434 int qlt_remove_target(struct qla_hw_data
*ha
, struct scsi_qla_host
*vha
)
6436 if (!vha
->vha_tgt
.qla_tgt
)
6439 if (vha
->fc_vport
) {
6440 qlt_release(vha
->vha_tgt
.qla_tgt
);
6444 /* free left over qfull cmds */
6445 qlt_init_term_exchange(vha
);
6447 ql_dbg(ql_dbg_tgt
, vha
, 0xe03c, "Unregistering target for host %ld(%p)",
6449 qlt_release(vha
->vha_tgt
.qla_tgt
);
6454 void qlt_remove_target_resources(struct qla_hw_data
*ha
)
6456 struct scsi_qla_host
*node
;
6459 btree_for_each_safe32(&ha
->tgt
.host_map
, key
, node
)
6460 btree_remove32(&ha
->tgt
.host_map
, key
);
6462 btree_destroy32(&ha
->tgt
.host_map
);
6465 static void qlt_lport_dump(struct scsi_qla_host
*vha
, u64 wwpn
,
6468 pr_debug("qla2xxx HW vha->node_name: %8phC\n", vha
->node_name
);
6469 pr_debug("qla2xxx HW vha->port_name: %8phC\n", vha
->port_name
);
6470 put_unaligned_be64(wwpn
, b
);
6471 pr_debug("qla2xxx passed configfs WWPN: %8phC\n", b
);
6475 * qla_tgt_lport_register - register lport with external module
6477 * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data
6478 * @phys_wwpn: physical port WWPN
6479 * @npiv_wwpn: NPIV WWPN
6480 * @npiv_wwnn: NPIV WWNN
6481 * @callback: lport initialization callback for tcm_qla2xxx code
6483 int qlt_lport_register(void *target_lport_ptr
, u64 phys_wwpn
,
6484 u64 npiv_wwpn
, u64 npiv_wwnn
,
6485 int (*callback
)(struct scsi_qla_host
*, void *, u64
, u64
))
6487 struct qla_tgt
*tgt
;
6488 struct scsi_qla_host
*vha
;
6489 struct qla_hw_data
*ha
;
6490 struct Scsi_Host
*host
;
6491 unsigned long flags
;
6495 mutex_lock(&qla_tgt_mutex
);
6496 list_for_each_entry(tgt
, &qla_tgt_glist
, tgt_list_entry
) {
6504 if (!(host
->hostt
->supported_mode
& MODE_TARGET
))
6507 if (vha
->qlini_mode
== QLA2XXX_INI_MODE_ENABLED
)
6510 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
6511 if ((!npiv_wwpn
|| !npiv_wwnn
) && host
->active_mode
& MODE_TARGET
) {
6512 pr_debug("MODE_TARGET already active on qla2xxx(%d)\n",
6514 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
6517 if (tgt
->tgt_stop
) {
6518 pr_debug("MODE_TARGET in shutdown on qla2xxx(%d)\n",
6520 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
6523 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
6525 if (!scsi_host_get(host
)) {
6526 ql_dbg(ql_dbg_tgt
, vha
, 0xe068,
6527 "Unable to scsi_host_get() for"
6528 " qla2xxx scsi_host\n");
6531 qlt_lport_dump(vha
, phys_wwpn
, b
);
6533 if (memcmp(vha
->port_name
, b
, WWN_SIZE
)) {
6534 scsi_host_put(host
);
6537 rc
= (*callback
)(vha
, target_lport_ptr
, npiv_wwpn
, npiv_wwnn
);
6539 scsi_host_put(host
);
6541 mutex_unlock(&qla_tgt_mutex
);
6544 mutex_unlock(&qla_tgt_mutex
);
6548 EXPORT_SYMBOL(qlt_lport_register
);
6551 * qla_tgt_lport_deregister - Degister lport
6553 * @vha: Registered scsi_qla_host pointer
6555 void qlt_lport_deregister(struct scsi_qla_host
*vha
)
6557 struct qla_hw_data
*ha
= vha
->hw
;
6558 struct Scsi_Host
*sh
= vha
->host
;
6560 * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data
6562 vha
->vha_tgt
.target_lport_ptr
= NULL
;
6563 ha
->tgt
.tgt_ops
= NULL
;
6565 * Release the Scsi_Host reference for the underlying qla2xxx host
6569 EXPORT_SYMBOL(qlt_lport_deregister
);
6571 /* Must be called under HW lock */
6572 void qlt_set_mode(struct scsi_qla_host
*vha
)
6574 switch (vha
->qlini_mode
) {
6575 case QLA2XXX_INI_MODE_DISABLED
:
6576 case QLA2XXX_INI_MODE_EXCLUSIVE
:
6577 vha
->host
->active_mode
= MODE_TARGET
;
6579 case QLA2XXX_INI_MODE_ENABLED
:
6580 vha
->host
->active_mode
= MODE_INITIATOR
;
6582 case QLA2XXX_INI_MODE_DUAL
:
6583 vha
->host
->active_mode
= MODE_DUAL
;
6590 /* Must be called under HW lock */
6591 static void qlt_clear_mode(struct scsi_qla_host
*vha
)
6593 switch (vha
->qlini_mode
) {
6594 case QLA2XXX_INI_MODE_DISABLED
:
6595 vha
->host
->active_mode
= MODE_UNKNOWN
;
6597 case QLA2XXX_INI_MODE_EXCLUSIVE
:
6598 vha
->host
->active_mode
= MODE_INITIATOR
;
6600 case QLA2XXX_INI_MODE_ENABLED
:
6601 case QLA2XXX_INI_MODE_DUAL
:
6602 vha
->host
->active_mode
= MODE_INITIATOR
;
6610 * qla_tgt_enable_vha - NO LOCK HELD
6612 * host_reset, bring up w/ Target Mode Enabled
6615 qlt_enable_vha(struct scsi_qla_host
*vha
)
6617 struct qla_hw_data
*ha
= vha
->hw
;
6618 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
6619 unsigned long flags
;
6620 scsi_qla_host_t
*base_vha
= pci_get_drvdata(ha
->pdev
);
6623 ql_dbg(ql_dbg_tgt
, vha
, 0xe069,
6624 "Unable to locate qla_tgt pointer from"
6625 " struct qla_hw_data\n");
6629 if (vha
->qlini_mode
== QLA2XXX_INI_MODE_ENABLED
)
6632 if (ha
->tgt
.num_act_qpairs
> ha
->max_qpairs
)
6633 ha
->tgt
.num_act_qpairs
= ha
->max_qpairs
;
6634 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
6635 tgt
->tgt_stopped
= 0;
6637 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
6639 mutex_lock(&ha
->optrom_mutex
);
6640 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf021,
6643 qla24xx_disable_vp(vha
);
6644 qla24xx_enable_vp(vha
);
6646 set_bit(ISP_ABORT_NEEDED
, &base_vha
->dpc_flags
);
6647 qla2xxx_wake_dpc(base_vha
);
6648 WARN_ON_ONCE(qla2x00_wait_for_hba_online(base_vha
) !=
6651 mutex_unlock(&ha
->optrom_mutex
);
6653 EXPORT_SYMBOL(qlt_enable_vha
);
6656 * qla_tgt_disable_vha - NO LOCK HELD
6658 * Disable Target Mode and reset the adapter
6660 static void qlt_disable_vha(struct scsi_qla_host
*vha
)
6662 struct qla_hw_data
*ha
= vha
->hw
;
6663 struct qla_tgt
*tgt
= vha
->vha_tgt
.qla_tgt
;
6664 unsigned long flags
;
6667 ql_dbg(ql_dbg_tgt
, vha
, 0xe06a,
6668 "Unable to locate qla_tgt pointer from"
6669 " struct qla_hw_data\n");
6674 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
6675 qlt_clear_mode(vha
);
6676 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
6678 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
6679 qla2xxx_wake_dpc(vha
);
6680 if (qla2x00_wait_for_hba_online(vha
) != QLA_SUCCESS
)
6681 ql_dbg(ql_dbg_tgt
, vha
, 0xe081,
6682 "qla2x00_wait_for_hba_online() failed\n");
6686 * Called from qla_init.c:qla24xx_vport_create() contex to setup
6687 * the target mode specific struct scsi_qla_host and struct qla_hw_data
6691 qlt_vport_create(struct scsi_qla_host
*vha
, struct qla_hw_data
*ha
)
6693 vha
->vha_tgt
.qla_tgt
= NULL
;
6695 mutex_init(&vha
->vha_tgt
.tgt_mutex
);
6696 mutex_init(&vha
->vha_tgt
.tgt_host_action_mutex
);
6698 qlt_clear_mode(vha
);
6701 * NOTE: Currently the value is kept the same for <24xx and
6702 * >=24xx ISPs. If it is necessary to change it,
6703 * the check should be added for specific ISPs,
6704 * assigning the value appropriately.
6706 ha
->tgt
.atio_q_length
= ATIO_ENTRY_CNT_24XX
;
6708 qlt_add_target(ha
, vha
);
6712 qlt_rff_id(struct scsi_qla_host
*vha
)
6716 * FC-4 Feature bit 0 indicates target functionality to the name server.
6718 if (qla_tgt_mode_enabled(vha
)) {
6719 fc4_feature
= BIT_0
;
6720 } else if (qla_ini_mode_enabled(vha
)) {
6721 fc4_feature
= BIT_1
;
6722 } else if (qla_dual_mode_enabled(vha
))
6723 fc4_feature
= BIT_0
| BIT_1
;
6729 * qlt_init_atio_q_entries() - Initializes ATIO queue entries.
6732 * Beginning of ATIO ring has initialization control block already built
6733 * by nvram config routine.
6735 * Returns 0 on success.
6738 qlt_init_atio_q_entries(struct scsi_qla_host
*vha
)
6740 struct qla_hw_data
*ha
= vha
->hw
;
6742 struct atio_from_isp
*pkt
= (struct atio_from_isp
*)ha
->tgt
.atio_ring
;
6744 if (qla_ini_mode_enabled(vha
))
6747 for (cnt
= 0; cnt
< ha
->tgt
.atio_q_length
; cnt
++) {
6748 pkt
->u
.raw
.signature
= ATIO_PROCESSED
;
6755 * qlt_24xx_process_atio_queue() - Process ATIO queue entries.
6756 * @ha: SCSI driver HA context
6759 qlt_24xx_process_atio_queue(struct scsi_qla_host
*vha
, uint8_t ha_locked
)
6761 struct qla_hw_data
*ha
= vha
->hw
;
6762 struct atio_from_isp
*pkt
;
6765 if (!ha
->flags
.fw_started
)
6768 while ((ha
->tgt
.atio_ring_ptr
->signature
!= ATIO_PROCESSED
) ||
6769 fcpcmd_is_corrupted(ha
->tgt
.atio_ring_ptr
)) {
6770 pkt
= (struct atio_from_isp
*)ha
->tgt
.atio_ring_ptr
;
6771 cnt
= pkt
->u
.raw
.entry_count
;
6773 if (unlikely(fcpcmd_is_corrupted(ha
->tgt
.atio_ring_ptr
))) {
6775 * This packet is corrupted. The header + payload
6776 * can not be trusted. There is no point in passing
6779 ql_log(ql_log_warn
, vha
, 0xd03c,
6780 "corrupted fcp frame SID[%3phN] OXID[%04x] EXCG[%x] %64phN\n",
6781 &pkt
->u
.isp24
.fcp_hdr
.s_id
,
6782 be16_to_cpu(pkt
->u
.isp24
.fcp_hdr
.ox_id
),
6783 le32_to_cpu(pkt
->u
.isp24
.exchange_addr
), pkt
);
6785 adjust_corrupted_atio(pkt
);
6786 qlt_send_term_exchange(ha
->base_qpair
, NULL
, pkt
,
6789 qlt_24xx_atio_pkt_all_vps(vha
,
6790 (struct atio_from_isp
*)pkt
, ha_locked
);
6793 for (i
= 0; i
< cnt
; i
++) {
6794 ha
->tgt
.atio_ring_index
++;
6795 if (ha
->tgt
.atio_ring_index
== ha
->tgt
.atio_q_length
) {
6796 ha
->tgt
.atio_ring_index
= 0;
6797 ha
->tgt
.atio_ring_ptr
= ha
->tgt
.atio_ring
;
6799 ha
->tgt
.atio_ring_ptr
++;
6801 pkt
->u
.raw
.signature
= ATIO_PROCESSED
;
6802 pkt
= (struct atio_from_isp
*)ha
->tgt
.atio_ring_ptr
;
6807 /* Adjust ring index */
6808 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha
), ha
->tgt
.atio_ring_index
);
6812 qlt_24xx_config_rings(struct scsi_qla_host
*vha
)
6814 struct qla_hw_data
*ha
= vha
->hw
;
6815 struct qla_msix_entry
*msix
= &ha
->msix_entries
[2];
6816 struct init_cb_24xx
*icb
= (struct init_cb_24xx
*)ha
->init_cb
;
6818 if (!QLA_TGT_MODE_ENABLED())
6821 WRT_REG_DWORD(ISP_ATIO_Q_IN(vha
), 0);
6822 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha
), 0);
6823 RD_REG_DWORD(ISP_ATIO_Q_OUT(vha
));
6825 if (ha
->flags
.msix_enabled
) {
6826 if (IS_QLA83XX(ha
) || IS_QLA27XX(ha
) || IS_QLA28XX(ha
)) {
6827 if (IS_QLA2071(ha
)) {
6828 /* 4 ports Baker: Enable Interrupt Handshake */
6830 icb
->firmware_options_2
|= BIT_26
;
6832 icb
->msix_atio
= cpu_to_le16(msix
->entry
);
6833 icb
->firmware_options_2
&= ~BIT_26
;
6835 ql_dbg(ql_dbg_init
, vha
, 0xf072,
6836 "Registering ICB vector 0x%x for atio que.\n",
6841 if (IS_QLA83XX(ha
) || IS_QLA27XX(ha
) || IS_QLA28XX(ha
)) {
6843 icb
->firmware_options_2
|= BIT_26
;
6844 ql_dbg(ql_dbg_init
, vha
, 0xf072,
6845 "%s: Use INTx for ATIOQ.\n", __func__
);
6851 qlt_24xx_config_nvram_stage1(struct scsi_qla_host
*vha
, struct nvram_24xx
*nv
)
6853 struct qla_hw_data
*ha
= vha
->hw
;
6856 if (!QLA_TGT_MODE_ENABLED())
6859 if (qla_tgt_mode_enabled(vha
) || qla_dual_mode_enabled(vha
)) {
6860 if (!ha
->tgt
.saved_set
) {
6861 /* We save only once */
6862 ha
->tgt
.saved_exchange_count
= nv
->exchange_count
;
6863 ha
->tgt
.saved_firmware_options_1
=
6864 nv
->firmware_options_1
;
6865 ha
->tgt
.saved_firmware_options_2
=
6866 nv
->firmware_options_2
;
6867 ha
->tgt
.saved_firmware_options_3
=
6868 nv
->firmware_options_3
;
6869 ha
->tgt
.saved_set
= 1;
6872 if (qla_tgt_mode_enabled(vha
))
6873 nv
->exchange_count
= cpu_to_le16(0xFFFF);
6875 nv
->exchange_count
= cpu_to_le16(vha
->ql2xexchoffld
);
6877 /* Enable target mode */
6878 nv
->firmware_options_1
|= cpu_to_le32(BIT_4
);
6880 /* Disable ini mode, if requested */
6881 if (qla_tgt_mode_enabled(vha
))
6882 nv
->firmware_options_1
|= cpu_to_le32(BIT_5
);
6884 /* Disable Full Login after LIP */
6885 nv
->firmware_options_1
&= cpu_to_le32(~BIT_13
);
6886 /* Enable initial LIP */
6887 nv
->firmware_options_1
&= cpu_to_le32(~BIT_9
);
6888 if (ql2xtgt_tape_enable
)
6889 /* Enable FC Tape support */
6890 nv
->firmware_options_2
|= cpu_to_le32(BIT_12
);
6892 /* Disable FC Tape support */
6893 nv
->firmware_options_2
&= cpu_to_le32(~BIT_12
);
6895 /* Disable Full Login after LIP */
6896 nv
->host_p
&= cpu_to_le32(~BIT_10
);
6899 * clear BIT 15 explicitly as we have seen at least
6900 * a couple of instances where this was set and this
6901 * was causing the firmware to not be initialized.
6903 nv
->firmware_options_1
&= cpu_to_le32(~BIT_15
);
6904 /* Enable target PRLI control */
6905 nv
->firmware_options_2
|= cpu_to_le32(BIT_14
);
6907 if (IS_QLA25XX(ha
)) {
6908 /* Change Loop-prefer to Pt-Pt */
6909 tmp
= ~(BIT_4
|BIT_5
|BIT_6
);
6910 nv
->firmware_options_2
&= cpu_to_le32(tmp
);
6912 nv
->firmware_options_2
|= cpu_to_le32(tmp
);
6915 if (ha
->tgt
.saved_set
) {
6916 nv
->exchange_count
= ha
->tgt
.saved_exchange_count
;
6917 nv
->firmware_options_1
=
6918 ha
->tgt
.saved_firmware_options_1
;
6919 nv
->firmware_options_2
=
6920 ha
->tgt
.saved_firmware_options_2
;
6921 nv
->firmware_options_3
=
6922 ha
->tgt
.saved_firmware_options_3
;
6927 if (ha
->base_qpair
->enable_class_2
) {
6928 if (vha
->flags
.init_done
)
6929 fc_host_supported_classes(vha
->host
) =
6930 FC_COS_CLASS2
| FC_COS_CLASS3
;
6932 nv
->firmware_options_2
|= cpu_to_le32(BIT_8
);
6934 if (vha
->flags
.init_done
)
6935 fc_host_supported_classes(vha
->host
) = FC_COS_CLASS3
;
6937 nv
->firmware_options_2
&= ~cpu_to_le32(BIT_8
);
6942 qlt_24xx_config_nvram_stage2(struct scsi_qla_host
*vha
,
6943 struct init_cb_24xx
*icb
)
6945 struct qla_hw_data
*ha
= vha
->hw
;
6947 if (!QLA_TGT_MODE_ENABLED())
6950 if (ha
->tgt
.node_name_set
) {
6951 memcpy(icb
->node_name
, ha
->tgt
.tgt_node_name
, WWN_SIZE
);
6952 icb
->firmware_options_1
|= cpu_to_le32(BIT_14
);
6957 qlt_81xx_config_nvram_stage1(struct scsi_qla_host
*vha
, struct nvram_81xx
*nv
)
6959 struct qla_hw_data
*ha
= vha
->hw
;
6962 if (!QLA_TGT_MODE_ENABLED())
6965 if (qla_tgt_mode_enabled(vha
) || qla_dual_mode_enabled(vha
)) {
6966 if (!ha
->tgt
.saved_set
) {
6967 /* We save only once */
6968 ha
->tgt
.saved_exchange_count
= nv
->exchange_count
;
6969 ha
->tgt
.saved_firmware_options_1
=
6970 nv
->firmware_options_1
;
6971 ha
->tgt
.saved_firmware_options_2
=
6972 nv
->firmware_options_2
;
6973 ha
->tgt
.saved_firmware_options_3
=
6974 nv
->firmware_options_3
;
6975 ha
->tgt
.saved_set
= 1;
6978 if (qla_tgt_mode_enabled(vha
))
6979 nv
->exchange_count
= cpu_to_le16(0xFFFF);
6981 nv
->exchange_count
= cpu_to_le16(vha
->ql2xexchoffld
);
6983 /* Enable target mode */
6984 nv
->firmware_options_1
|= cpu_to_le32(BIT_4
);
6986 /* Disable ini mode, if requested */
6987 if (qla_tgt_mode_enabled(vha
))
6988 nv
->firmware_options_1
|= cpu_to_le32(BIT_5
);
6989 /* Disable Full Login after LIP */
6990 nv
->firmware_options_1
&= cpu_to_le32(~BIT_13
);
6991 /* Enable initial LIP */
6992 nv
->firmware_options_1
&= cpu_to_le32(~BIT_9
);
6994 * clear BIT 15 explicitly as we have seen at
6995 * least a couple of instances where this was set
6996 * and this was causing the firmware to not be
6999 nv
->firmware_options_1
&= cpu_to_le32(~BIT_15
);
7000 if (ql2xtgt_tape_enable
)
7001 /* Enable FC tape support */
7002 nv
->firmware_options_2
|= cpu_to_le32(BIT_12
);
7004 /* Disable FC tape support */
7005 nv
->firmware_options_2
&= cpu_to_le32(~BIT_12
);
7007 /* Disable Full Login after LIP */
7008 nv
->host_p
&= cpu_to_le32(~BIT_10
);
7009 /* Enable target PRLI control */
7010 nv
->firmware_options_2
|= cpu_to_le32(BIT_14
);
7012 /* Change Loop-prefer to Pt-Pt */
7013 tmp
= ~(BIT_4
|BIT_5
|BIT_6
);
7014 nv
->firmware_options_2
&= cpu_to_le32(tmp
);
7016 nv
->firmware_options_2
|= cpu_to_le32(tmp
);
7018 if (ha
->tgt
.saved_set
) {
7019 nv
->exchange_count
= ha
->tgt
.saved_exchange_count
;
7020 nv
->firmware_options_1
=
7021 ha
->tgt
.saved_firmware_options_1
;
7022 nv
->firmware_options_2
=
7023 ha
->tgt
.saved_firmware_options_2
;
7024 nv
->firmware_options_3
=
7025 ha
->tgt
.saved_firmware_options_3
;
7030 if (ha
->base_qpair
->enable_class_2
) {
7031 if (vha
->flags
.init_done
)
7032 fc_host_supported_classes(vha
->host
) =
7033 FC_COS_CLASS2
| FC_COS_CLASS3
;
7035 nv
->firmware_options_2
|= cpu_to_le32(BIT_8
);
7037 if (vha
->flags
.init_done
)
7038 fc_host_supported_classes(vha
->host
) = FC_COS_CLASS3
;
7040 nv
->firmware_options_2
&= ~cpu_to_le32(BIT_8
);
7045 qlt_81xx_config_nvram_stage2(struct scsi_qla_host
*vha
,
7046 struct init_cb_81xx
*icb
)
7048 struct qla_hw_data
*ha
= vha
->hw
;
7050 if (!QLA_TGT_MODE_ENABLED())
7053 if (ha
->tgt
.node_name_set
) {
7054 memcpy(icb
->node_name
, ha
->tgt
.tgt_node_name
, WWN_SIZE
);
7055 icb
->firmware_options_1
|= cpu_to_le32(BIT_14
);
7060 qlt_83xx_iospace_config(struct qla_hw_data
*ha
)
7062 if (!QLA_TGT_MODE_ENABLED())
7065 ha
->msix_count
+= 1; /* For ATIO Q */
7070 qlt_modify_vp_config(struct scsi_qla_host
*vha
,
7071 struct vp_config_entry_24xx
*vpmod
)
7073 /* enable target mode. Bit5 = 1 => disable */
7074 if (qla_tgt_mode_enabled(vha
) || qla_dual_mode_enabled(vha
))
7075 vpmod
->options_idx1
&= ~BIT_5
;
7077 /* Disable ini mode, if requested. bit4 = 1 => disable */
7078 if (qla_tgt_mode_enabled(vha
))
7079 vpmod
->options_idx1
&= ~BIT_4
;
7083 qlt_probe_one_stage1(struct scsi_qla_host
*base_vha
, struct qla_hw_data
*ha
)
7087 if (!QLA_TGT_MODE_ENABLED())
7090 if ((ql2xenablemsix
== 0) || IS_QLA83XX(ha
) || IS_QLA27XX(ha
) ||
7092 ISP_ATIO_Q_IN(base_vha
) = &ha
->mqiobase
->isp25mq
.atio_q_in
;
7093 ISP_ATIO_Q_OUT(base_vha
) = &ha
->mqiobase
->isp25mq
.atio_q_out
;
7095 ISP_ATIO_Q_IN(base_vha
) = &ha
->iobase
->isp24
.atio_q_in
;
7096 ISP_ATIO_Q_OUT(base_vha
) = &ha
->iobase
->isp24
.atio_q_out
;
7099 mutex_init(&base_vha
->vha_tgt
.tgt_mutex
);
7100 mutex_init(&base_vha
->vha_tgt
.tgt_host_action_mutex
);
7102 INIT_LIST_HEAD(&base_vha
->unknown_atio_list
);
7103 INIT_DELAYED_WORK(&base_vha
->unknown_atio_work
,
7104 qlt_unknown_atio_work_fn
);
7106 qlt_clear_mode(base_vha
);
7108 rc
= btree_init32(&ha
->tgt
.host_map
);
7110 ql_log(ql_log_info
, base_vha
, 0xd03d,
7111 "Unable to initialize ha->host_map btree\n");
7113 qlt_update_vp_map(base_vha
, SET_VP_IDX
);
7117 qla83xx_msix_atio_q(int irq
, void *dev_id
)
7119 struct rsp_que
*rsp
;
7120 scsi_qla_host_t
*vha
;
7121 struct qla_hw_data
*ha
;
7122 unsigned long flags
;
7124 rsp
= (struct rsp_que
*) dev_id
;
7126 vha
= pci_get_drvdata(ha
->pdev
);
7128 spin_lock_irqsave(&ha
->tgt
.atio_lock
, flags
);
7130 qlt_24xx_process_atio_queue(vha
, 0);
7132 spin_unlock_irqrestore(&ha
->tgt
.atio_lock
, flags
);
7138 qlt_handle_abts_recv_work(struct work_struct
*work
)
7140 struct qla_tgt_sess_op
*op
= container_of(work
,
7141 struct qla_tgt_sess_op
, work
);
7142 scsi_qla_host_t
*vha
= op
->vha
;
7143 struct qla_hw_data
*ha
= vha
->hw
;
7144 unsigned long flags
;
7146 if (qla2x00_reset_active(vha
) ||
7147 (op
->chip_reset
!= ha
->base_qpair
->chip_reset
))
7150 spin_lock_irqsave(&ha
->tgt
.atio_lock
, flags
);
7151 qlt_24xx_process_atio_queue(vha
, 0);
7152 spin_unlock_irqrestore(&ha
->tgt
.atio_lock
, flags
);
7154 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
7155 qlt_response_pkt_all_vps(vha
, op
->rsp
, (response_t
*)&op
->atio
);
7156 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
7162 qlt_handle_abts_recv(struct scsi_qla_host
*vha
, struct rsp_que
*rsp
,
7165 struct qla_tgt_sess_op
*op
;
7167 op
= kzalloc(sizeof(*op
), GFP_ATOMIC
);
7170 /* do not reach for ATIO queue here. This is best effort err
7171 * recovery at this point.
7173 qlt_response_pkt_all_vps(vha
, rsp
, pkt
);
7177 memcpy(&op
->atio
, pkt
, sizeof(*pkt
));
7179 op
->chip_reset
= vha
->hw
->base_qpair
->chip_reset
;
7181 INIT_WORK(&op
->work
, qlt_handle_abts_recv_work
);
7182 queue_work(qla_tgt_wq
, &op
->work
);
7187 qlt_mem_alloc(struct qla_hw_data
*ha
)
7189 if (!QLA_TGT_MODE_ENABLED())
7192 ha
->tgt
.tgt_vp_map
= kcalloc(MAX_MULTI_ID_FABRIC
,
7193 sizeof(struct qla_tgt_vp_map
),
7195 if (!ha
->tgt
.tgt_vp_map
)
7198 ha
->tgt
.atio_ring
= dma_alloc_coherent(&ha
->pdev
->dev
,
7199 (ha
->tgt
.atio_q_length
+ 1) * sizeof(struct atio_from_isp
),
7200 &ha
->tgt
.atio_dma
, GFP_KERNEL
);
7201 if (!ha
->tgt
.atio_ring
) {
7202 kfree(ha
->tgt
.tgt_vp_map
);
7209 qlt_mem_free(struct qla_hw_data
*ha
)
7211 if (!QLA_TGT_MODE_ENABLED())
7214 if (ha
->tgt
.atio_ring
) {
7215 dma_free_coherent(&ha
->pdev
->dev
, (ha
->tgt
.atio_q_length
+ 1) *
7216 sizeof(struct atio_from_isp
), ha
->tgt
.atio_ring
,
7219 ha
->tgt
.atio_ring
= NULL
;
7220 ha
->tgt
.atio_dma
= 0;
7221 kfree(ha
->tgt
.tgt_vp_map
);
7222 ha
->tgt
.tgt_vp_map
= NULL
;
7225 /* vport_slock to be held by the caller */
7227 qlt_update_vp_map(struct scsi_qla_host
*vha
, int cmd
)
7233 if (!QLA_TGT_MODE_ENABLED())
7236 key
= vha
->d_id
.b24
;
7240 vha
->hw
->tgt
.tgt_vp_map
[vha
->vp_idx
].vha
= vha
;
7243 slot
= btree_lookup32(&vha
->hw
->tgt
.host_map
, key
);
7245 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf018,
7246 "Save vha in host_map %p %06x\n", vha
, key
);
7247 rc
= btree_insert32(&vha
->hw
->tgt
.host_map
,
7248 key
, vha
, GFP_ATOMIC
);
7250 ql_log(ql_log_info
, vha
, 0xd03e,
7251 "Unable to insert s_id into host_map: %06x\n",
7255 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf019,
7256 "replace existing vha in host_map %p %06x\n", vha
, key
);
7257 btree_update32(&vha
->hw
->tgt
.host_map
, key
, vha
);
7260 vha
->hw
->tgt
.tgt_vp_map
[vha
->vp_idx
].vha
= NULL
;
7263 ql_dbg(ql_dbg_tgt_mgt
, vha
, 0xf01a,
7264 "clear vha in host_map %p %06x\n", vha
, key
);
7265 slot
= btree_lookup32(&vha
->hw
->tgt
.host_map
, key
);
7267 btree_remove32(&vha
->hw
->tgt
.host_map
, key
);
7273 void qlt_update_host_map(struct scsi_qla_host
*vha
, port_id_t id
)
7276 if (!vha
->d_id
.b24
) {
7278 qlt_update_vp_map(vha
, SET_AL_PA
);
7279 } else if (vha
->d_id
.b24
!= id
.b24
) {
7280 qlt_update_vp_map(vha
, RESET_AL_PA
);
7282 qlt_update_vp_map(vha
, SET_AL_PA
);
7286 static int __init
qlt_parse_ini_mode(void)
7288 if (strcasecmp(qlini_mode
, QLA2XXX_INI_MODE_STR_EXCLUSIVE
) == 0)
7289 ql2x_ini_mode
= QLA2XXX_INI_MODE_EXCLUSIVE
;
7290 else if (strcasecmp(qlini_mode
, QLA2XXX_INI_MODE_STR_DISABLED
) == 0)
7291 ql2x_ini_mode
= QLA2XXX_INI_MODE_DISABLED
;
7292 else if (strcasecmp(qlini_mode
, QLA2XXX_INI_MODE_STR_ENABLED
) == 0)
7293 ql2x_ini_mode
= QLA2XXX_INI_MODE_ENABLED
;
7294 else if (strcasecmp(qlini_mode
, QLA2XXX_INI_MODE_STR_DUAL
) == 0)
7295 ql2x_ini_mode
= QLA2XXX_INI_MODE_DUAL
;
7302 int __init
qlt_init(void)
7306 BUILD_BUG_ON(sizeof(struct ctio7_to_24xx
) != 64);
7307 BUILD_BUG_ON(sizeof(struct ctio_to_2xxx
) != 64);
7309 if (!qlt_parse_ini_mode()) {
7310 ql_log(ql_log_fatal
, NULL
, 0xe06b,
7311 "qlt_parse_ini_mode() failed\n");
7315 if (!QLA_TGT_MODE_ENABLED())
7318 qla_tgt_mgmt_cmd_cachep
= kmem_cache_create("qla_tgt_mgmt_cmd_cachep",
7319 sizeof(struct qla_tgt_mgmt_cmd
), __alignof__(struct
7320 qla_tgt_mgmt_cmd
), 0, NULL
);
7321 if (!qla_tgt_mgmt_cmd_cachep
) {
7322 ql_log(ql_log_fatal
, NULL
, 0xd04b,
7323 "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n");
7327 qla_tgt_plogi_cachep
= kmem_cache_create("qla_tgt_plogi_cachep",
7328 sizeof(struct qlt_plogi_ack_t
), __alignof__(struct qlt_plogi_ack_t
),
7331 if (!qla_tgt_plogi_cachep
) {
7332 ql_log(ql_log_fatal
, NULL
, 0xe06d,
7333 "kmem_cache_create for qla_tgt_plogi_cachep failed\n");
7335 goto out_mgmt_cmd_cachep
;
7338 qla_tgt_mgmt_cmd_mempool
= mempool_create(25, mempool_alloc_slab
,
7339 mempool_free_slab
, qla_tgt_mgmt_cmd_cachep
);
7340 if (!qla_tgt_mgmt_cmd_mempool
) {
7341 ql_log(ql_log_fatal
, NULL
, 0xe06e,
7342 "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n");
7344 goto out_plogi_cachep
;
7347 qla_tgt_wq
= alloc_workqueue("qla_tgt_wq", 0, 0);
7349 ql_log(ql_log_fatal
, NULL
, 0xe06f,
7350 "alloc_workqueue for qla_tgt_wq failed\n");
7352 goto out_cmd_mempool
;
7355 * Return 1 to signal that initiator-mode is being disabled
7357 return (ql2x_ini_mode
== QLA2XXX_INI_MODE_DISABLED
) ? 1 : 0;
7360 mempool_destroy(qla_tgt_mgmt_cmd_mempool
);
7362 kmem_cache_destroy(qla_tgt_plogi_cachep
);
7363 out_mgmt_cmd_cachep
:
7364 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep
);
7370 if (!QLA_TGT_MODE_ENABLED())
7373 destroy_workqueue(qla_tgt_wq
);
7374 mempool_destroy(qla_tgt_mgmt_cmd_mempool
);
7375 kmem_cache_destroy(qla_tgt_plogi_cachep
);
7376 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep
);