4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2014, 2015 Nexenta Systems, Inc. All rights reserved.
27 #include <sys/cpuvar.h>
28 #include <sys/types.h>
33 #include <sys/sunddi.h>
34 #include <sys/modctl.h>
35 #include <sys/sysmacros.h>
36 #include <sys/socket.h>
37 #include <sys/strsubr.h>
38 #include <sys/nvpair.h>
41 #include <sys/stmf_ioctl.h>
42 #include <sys/portif.h>
43 #include <sys/idm/idm.h>
44 #include <sys/idm/idm_conn_sm.h>
46 #include "iscsit_isns.h"
49 #define ISCSIT_VERSION BUILD_DATE "-1.18dev"
50 #define ISCSIT_NAME_VERSION "COMSTAR ISCSIT v" ISCSIT_VERSION
55 static int iscsit_drv_attach(dev_info_t
*, ddi_attach_cmd_t
);
56 static int iscsit_drv_detach(dev_info_t
*, ddi_detach_cmd_t
);
57 static int iscsit_drv_getinfo(dev_info_t
*, ddi_info_cmd_t
, void *, void **);
58 static int iscsit_drv_open(dev_t
*, int, int, cred_t
*);
59 static int iscsit_drv_close(dev_t
, int, int, cred_t
*);
60 static boolean_t
iscsit_drv_busy(void);
61 static int iscsit_drv_ioctl(dev_t
, int, intptr_t, int, cred_t
*, int *);
63 extern struct mod_ops mod_miscops
;
66 static struct cb_ops iscsit_cb_ops
= {
67 iscsit_drv_open
, /* cb_open */
68 iscsit_drv_close
, /* cb_close */
69 nodev
, /* cb_strategy */
74 iscsit_drv_ioctl
, /* cb_ioctl */
75 nodev
, /* cb_devmap */
77 nodev
, /* cb_segmap */
78 nochpoll
, /* cb_chpoll */
79 ddi_prop_op
, /* cb_prop_op */
80 NULL
, /* cb_streamtab */
84 nodev
, /* cb_awrite */
87 static struct dev_ops iscsit_dev_ops
= {
88 DEVO_REV
, /* devo_rev */
90 iscsit_drv_getinfo
, /* devo_getinfo */
91 nulldev
, /* devo_identify */
92 nulldev
, /* devo_probe */
93 iscsit_drv_attach
, /* devo_attach */
94 iscsit_drv_detach
, /* devo_detach */
95 nodev
, /* devo_reset */
96 &iscsit_cb_ops
, /* devo_cb_ops */
97 NULL
, /* devo_bus_ops */
98 NULL
, /* devo_power */
99 ddi_quiesce_not_needed
, /* quiesce */
102 static struct modldrv modldrv
= {
108 static struct modlinkage modlinkage
= {
115 iscsit_global_t iscsit_global
;
117 kmem_cache_t
*iscsit_status_pdu_cache
;
119 boolean_t iscsit_sm_logging
= B_FALSE
;
121 kmutex_t login_sm_session_mutex
;
123 static idm_status_t
iscsit_init(dev_info_t
*dip
);
124 static idm_status_t
iscsit_enable_svc(iscsit_hostinfo_t
*hostinfo
);
125 static void iscsit_disable_svc(void);
128 iscsit_check_cmdsn_and_queue(idm_pdu_t
*rx_pdu
);
131 iscsit_add_pdu_to_queue(iscsit_sess_t
*ist
, idm_pdu_t
*rx_pdu
);
134 iscsit_remove_pdu_from_queue(iscsit_sess_t
*ist
, uint32_t cmdsn
);
137 iscsit_process_pdu_in_queue(iscsit_sess_t
*ist
);
140 iscsit_rxpdu_queue_monitor_session(iscsit_sess_t
*ist
);
143 iscsit_rxpdu_queue_monitor(void *arg
);
146 iscsit_post_staged_pdu(idm_pdu_t
*rx_pdu
);
149 iscsit_post_scsi_cmd(idm_conn_t
*ic
, idm_pdu_t
*rx_pdu
);
152 iscsit_op_scsi_task_mgmt(iscsit_conn_t
*ict
, idm_pdu_t
*rx_pdu
);
155 iscsit_pdu_op_noop(iscsit_conn_t
*ict
, idm_pdu_t
*rx_pdu
);
158 iscsit_pdu_op_login_cmd(iscsit_conn_t
*ict
, idm_pdu_t
*rx_pdu
);
161 iscsit_pdu_op_text_cmd(iscsit_conn_t
*ict
, idm_pdu_t
*rx_pdu
);
164 iscsit_pdu_op_logout_cmd(iscsit_conn_t
*ict
, idm_pdu_t
*rx_pdu
);
166 int iscsit_cmd_window();
169 iscsit_sna_lt(uint32_t sn1
, uint32_t sn2
);
172 iscsit_set_cmdsn(iscsit_conn_t
*ict
, idm_pdu_t
*rx_pdu
);
175 iscsit_deferred_dispatch(idm_pdu_t
*rx_pdu
);
178 iscsit_deferred(void *rx_pdu_void
);
181 iscsit_conn_accept(idm_conn_t
*ic
);
184 iscsit_ffp_enabled(idm_conn_t
*ic
);
187 iscsit_ffp_disabled(idm_conn_t
*ic
, idm_ffp_disable_t disable_class
);
190 iscsit_conn_lost(idm_conn_t
*ic
);
193 iscsit_conn_destroy(idm_conn_t
*ic
);
195 static stmf_data_buf_t
*
196 iscsit_dbuf_alloc(scsi_task_t
*task
, uint32_t size
, uint32_t *pminsize
,
200 iscsit_dbuf_free(stmf_dbuf_store_t
*ds
, stmf_data_buf_t
*dbuf
);
203 iscsit_buf_xfer_cb(idm_buf_t
*idb
, idm_status_t status
);
206 iscsit_send_good_status_done(idm_pdu_t
*pdu
, idm_status_t status
);
209 iscsit_send_status_done(idm_pdu_t
*pdu
, idm_status_t status
);
212 iscsit_idm_to_stmf(idm_status_t idmrc
);
214 static iscsit_task_t
*
215 iscsit_task_alloc(iscsit_conn_t
*ict
);
218 iscsit_task_free(iscsit_task_t
*itask
);
220 static iscsit_task_t
*
221 iscsit_tm_task_alloc(iscsit_conn_t
*ict
);
224 iscsit_tm_task_free(iscsit_task_t
*itask
);
227 iscsit_task_start(iscsit_task_t
*itask
);
230 iscsit_task_done(iscsit_task_t
*itask
);
233 iscsit_status_pdu_constructor(void *pdu_void
, void *arg
, int flags
);
236 iscsit_pp_cb(struct stmf_port_provider
*pp
, int cmd
, void *arg
, uint32_t flags
);
238 static it_cfg_status_t
239 iscsit_config_merge(it_config_t
*cfg
);
242 iscsit_login_fail(idm_conn_t
*ic
);
244 static boolean_t
iscsit_cmdsn_in_window(iscsit_conn_t
*ict
, uint32_t cmdsn
);
245 static void iscsit_send_direct_scsi_resp(iscsit_conn_t
*ict
, idm_pdu_t
*rx_pdu
,
246 uint8_t response
, uint8_t cmd_status
);
247 static void iscsit_send_task_mgmt_resp(idm_pdu_t
*tm_resp_pdu
,
251 * MC/S: Out-of-order commands are staged on a session-wide wait
252 * queue until a system-tunable threshold is reached. A separate
253 * thread is used to scan the staging queue on all the session,
254 * If a delayed PDU does not arrive within a timeout, the target
255 * will advance to the staged PDU that is next in sequence, skipping
256 * over the missing PDU(s) to go past a hole in the sequence.
258 volatile int rxpdu_queue_threshold
= ISCSIT_RXPDU_QUEUE_THRESHOLD
;
260 static kmutex_t iscsit_rxpdu_queue_monitor_mutex
;
261 kthread_t
*iscsit_rxpdu_queue_monitor_thr_id
;
262 static kt_did_t iscsit_rxpdu_queue_monitor_thr_did
;
263 static boolean_t iscsit_rxpdu_queue_monitor_thr_running
;
264 static kcondvar_t iscsit_rxpdu_queue_monitor_cv
;
271 rw_init(&iscsit_global
.global_rwlock
, NULL
, RW_DRIVER
, NULL
);
272 mutex_init(&iscsit_global
.global_state_mutex
, NULL
,
274 iscsit_global
.global_svc_state
= ISE_DETACHED
;
276 mutex_init(&iscsit_rxpdu_queue_monitor_mutex
, NULL
,
278 mutex_init(&login_sm_session_mutex
, NULL
, MUTEX_DRIVER
, NULL
);
279 iscsit_rxpdu_queue_monitor_thr_id
= NULL
;
280 iscsit_rxpdu_queue_monitor_thr_running
= B_FALSE
;
281 cv_init(&iscsit_rxpdu_queue_monitor_cv
, NULL
, CV_DEFAULT
, NULL
);
283 if ((rc
= mod_install(&modlinkage
)) != 0) {
284 mutex_destroy(&iscsit_global
.global_state_mutex
);
285 rw_destroy(&iscsit_global
.global_rwlock
);
293 _info(struct modinfo
*modinfop
)
295 return (mod_info(&modlinkage
, modinfop
));
303 rc
= mod_remove(&modlinkage
);
306 mutex_destroy(&iscsit_rxpdu_queue_monitor_mutex
);
307 mutex_destroy(&login_sm_session_mutex
);
308 cv_destroy(&iscsit_rxpdu_queue_monitor_cv
);
309 mutex_destroy(&iscsit_global
.global_state_mutex
);
310 rw_destroy(&iscsit_global
.global_rwlock
);
322 iscsit_drv_getinfo(dev_info_t
*dip
, ddi_info_cmd_t cmd
, void *arg
,
325 ulong_t instance
= getminor((dev_t
)arg
);
328 case DDI_INFO_DEVT2DEVINFO
:
329 *result
= iscsit_global
.global_dip
;
330 return (DDI_SUCCESS
);
332 case DDI_INFO_DEVT2INSTANCE
:
333 *result
= (void *)instance
;
334 return (DDI_SUCCESS
);
340 return (DDI_FAILURE
);
344 iscsit_drv_attach(dev_info_t
*dip
, ddi_attach_cmd_t cmd
)
346 if (cmd
!= DDI_ATTACH
) {
347 return (DDI_FAILURE
);
350 if (ddi_get_instance(dip
) != 0) {
351 /* we only allow instance 0 to attach */
352 return (DDI_FAILURE
);
355 /* create the minor node */
356 if (ddi_create_minor_node(dip
, ISCSIT_MODNAME
, S_IFCHR
, 0,
357 DDI_PSEUDO
, 0) != DDI_SUCCESS
) {
358 cmn_err(CE_WARN
, "iscsit_drv_attach: "
359 "failed creating minor node");
360 return (DDI_FAILURE
);
363 if (iscsit_init(dip
) != IDM_STATUS_SUCCESS
) {
364 cmn_err(CE_WARN
, "iscsit_drv_attach: "
365 "failed to initialize");
366 ddi_remove_minor_node(dip
, NULL
);
367 return (DDI_FAILURE
);
370 iscsit_global
.global_svc_state
= ISE_DISABLED
;
371 iscsit_global
.global_dip
= dip
;
373 return (DDI_SUCCESS
);
378 iscsit_drv_detach(dev_info_t
*dip
, ddi_detach_cmd_t cmd
)
380 if (cmd
!= DDI_DETACH
)
381 return (DDI_FAILURE
);
384 * drv_detach is called in a context that owns the
385 * device node for the /dev/pseudo device. If this thread blocks
386 * for any resource, other threads that need the /dev/pseudo device
387 * may end up in a deadlock with this thread.Hence, we use a
388 * separate lock just for the structures that drv_detach needs
391 mutex_enter(&iscsit_global
.global_state_mutex
);
392 if (iscsit_drv_busy()) {
393 mutex_exit(&iscsit_global
.global_state_mutex
);
397 iscsit_global
.global_dip
= NULL
;
398 ddi_remove_minor_node(dip
, NULL
);
400 ldi_ident_release(iscsit_global
.global_li
);
401 iscsit_global
.global_svc_state
= ISE_DETACHED
;
403 mutex_exit(&iscsit_global
.global_state_mutex
);
405 return (DDI_SUCCESS
);
410 iscsit_drv_open(dev_t
*devp
, int flag
, int otyp
, cred_t
*credp
)
417 iscsit_drv_close(dev_t dev
, int flag
, int otyp
, cred_t
*credp
)
423 iscsit_drv_busy(void)
425 ASSERT(MUTEX_HELD(&iscsit_global
.global_state_mutex
));
427 switch (iscsit_global
.global_svc_state
) {
439 iscsit_drv_ioctl(dev_t drv
, int cmd
, intptr_t argp
, int flag
, cred_t
*cred
,
442 iscsit_ioc_set_config_t setcfg
;
443 iscsit_ioc_set_config32_t setcfg32
;
444 char *cfg_pnvlist
= NULL
;
445 nvlist_t
*cfg_nvlist
= NULL
;
446 it_config_t
*cfg
= NULL
;
450 if (drv_priv(cred
) != 0) {
454 mutex_enter(&iscsit_global
.global_state_mutex
);
457 * Validate ioctl requests against global service state
459 switch (iscsit_global
.global_svc_state
) {
461 if (cmd
== ISCSIT_IOC_DISABLE_SVC
) {
462 iscsit_global
.global_svc_state
= ISE_DISABLING
;
463 } else if (cmd
== ISCSIT_IOC_ENABLE_SVC
) {
464 /* Already enabled */
465 mutex_exit(&iscsit_global
.global_state_mutex
);
468 iscsit_global
.global_svc_state
= ISE_BUSY
;
472 if (cmd
== ISCSIT_IOC_ENABLE_SVC
) {
473 iscsit_global
.global_svc_state
= ISE_ENABLING
;
474 } else if (cmd
== ISCSIT_IOC_DISABLE_SVC
) {
475 /* Already disabled */
476 mutex_exit(&iscsit_global
.global_state_mutex
);
493 mutex_exit(&iscsit_global
.global_state_mutex
);
497 /* Handle ioctl request (enable/disable have already been handled) */
499 case ISCSIT_IOC_SET_CONFIG
:
500 /* Any errors must set state back to ISE_ENABLED */
501 switch (ddi_model_convert_from(flag
& FMODELS
)) {
502 case DDI_MODEL_ILP32
:
503 if (ddi_copyin((void *)argp
, &setcfg32
,
504 sizeof (iscsit_ioc_set_config32_t
), flag
) != 0) {
509 setcfg
.set_cfg_pnvlist
=
510 (char *)((uintptr_t)setcfg32
.set_cfg_pnvlist
);
511 setcfg
.set_cfg_vers
= setcfg32
.set_cfg_vers
;
512 setcfg
.set_cfg_pnvlist_len
=
513 setcfg32
.set_cfg_pnvlist_len
;
516 if (ddi_copyin((void *)argp
, &setcfg
,
517 sizeof (iscsit_ioc_set_config_t
), flag
) != 0) {
527 /* Check API version */
528 if (setcfg
.set_cfg_vers
!= ISCSIT_API_VERS0
) {
533 /* Config is in packed nvlist format so unpack it */
534 cfg_pnvlist
= kmem_alloc(setcfg
.set_cfg_pnvlist_len
,
536 ASSERT(cfg_pnvlist
!= NULL
);
538 if (ddi_copyin(setcfg
.set_cfg_pnvlist
, cfg_pnvlist
,
539 setcfg
.set_cfg_pnvlist_len
, flag
) != 0) {
544 rc
= nvlist_unpack(cfg_pnvlist
, setcfg
.set_cfg_pnvlist_len
,
545 &cfg_nvlist
, KM_SLEEP
);
550 /* Translate nvlist */
551 rc
= it_nv_to_config(cfg_nvlist
, &cfg
);
553 cmn_err(CE_WARN
, "Configuration is invalid");
558 rc
= iscsit_config_merge(cfg
);
563 it_config_free_cmn(cfg
);
565 kmem_free(cfg_pnvlist
, setcfg
.set_cfg_pnvlist_len
);
566 nvlist_free(cfg_nvlist
);
569 * Now that the reconfig is complete set our state back to
572 mutex_enter(&iscsit_global
.global_state_mutex
);
573 iscsit_global
.global_svc_state
= ISE_ENABLED
;
574 mutex_exit(&iscsit_global
.global_state_mutex
);
576 case ISCSIT_IOC_ENABLE_SVC
: {
577 iscsit_hostinfo_t hostinfo
;
579 if (ddi_copyin((void *)argp
, &hostinfo
.length
,
580 sizeof (hostinfo
.length
), flag
) != 0) {
581 mutex_enter(&iscsit_global
.global_state_mutex
);
582 iscsit_global
.global_svc_state
= ISE_DISABLED
;
583 mutex_exit(&iscsit_global
.global_state_mutex
);
587 if (hostinfo
.length
> sizeof (hostinfo
.fqhn
))
588 hostinfo
.length
= sizeof (hostinfo
.fqhn
);
590 if (ddi_copyin((void *)((caddr_t
)argp
+
591 sizeof (hostinfo
.length
)), &hostinfo
.fqhn
,
592 hostinfo
.length
, flag
) != 0) {
593 mutex_enter(&iscsit_global
.global_state_mutex
);
594 iscsit_global
.global_svc_state
= ISE_DISABLED
;
595 mutex_exit(&iscsit_global
.global_state_mutex
);
599 idmrc
= iscsit_enable_svc(&hostinfo
);
600 mutex_enter(&iscsit_global
.global_state_mutex
);
601 if (idmrc
== IDM_STATUS_SUCCESS
) {
602 iscsit_global
.global_svc_state
= ISE_ENABLED
;
605 iscsit_global
.global_svc_state
= ISE_DISABLED
;
607 mutex_exit(&iscsit_global
.global_state_mutex
);
610 case ISCSIT_IOC_DISABLE_SVC
:
611 iscsit_disable_svc();
612 mutex_enter(&iscsit_global
.global_state_mutex
);
613 iscsit_global
.global_svc_state
= ISE_DISABLED
;
614 mutex_exit(&iscsit_global
.global_state_mutex
);
619 mutex_enter(&iscsit_global
.global_state_mutex
);
620 iscsit_global
.global_svc_state
= ISE_ENABLED
;
621 mutex_exit(&iscsit_global
.global_state_mutex
);
628 iscsit_init(dev_info_t
*dip
)
632 rc
= ldi_ident_from_dip(dip
, &iscsit_global
.global_li
);
633 ASSERT(rc
== 0); /* Failure indicates invalid argument */
635 iscsit_global
.global_svc_state
= ISE_DISABLED
;
637 return (IDM_STATUS_SUCCESS
);
643 * registers all the configured targets and target portals with STMF
646 iscsit_enable_svc(iscsit_hostinfo_t
*hostinfo
)
648 stmf_port_provider_t
*pp
;
649 stmf_dbuf_store_t
*dbuf_store
;
650 boolean_t did_iscsit_isns_init
;
651 idm_status_t retval
= IDM_STATUS_SUCCESS
;
653 ASSERT(iscsit_global
.global_svc_state
== ISE_ENABLING
);
656 * Make sure that can tell if we have partially allocated
657 * in case we need to exit and tear down anything allocated.
659 iscsit_global
.global_tsih_pool
= NULL
;
660 iscsit_global
.global_dbuf_store
= NULL
;
661 iscsit_status_pdu_cache
= NULL
;
663 iscsit_global
.global_pp
= NULL
;
664 iscsit_global
.global_default_tpg
= NULL
;
665 did_iscsit_isns_init
= B_FALSE
;
666 iscsit_global
.global_dispatch_taskq
= NULL
;
668 /* Setup remaining fields in iscsit_global_t */
669 idm_refcnt_init(&iscsit_global
.global_refcnt
,
672 avl_create(&iscsit_global
.global_discovery_sessions
,
673 iscsit_sess_avl_compare
, sizeof (iscsit_sess_t
),
674 offsetof(iscsit_sess_t
, ist_tgt_ln
));
676 avl_create(&iscsit_global
.global_target_list
,
677 iscsit_tgt_avl_compare
, sizeof (iscsit_tgt_t
),
678 offsetof(iscsit_tgt_t
, target_global_ln
));
680 list_create(&iscsit_global
.global_deleted_target_list
,
681 sizeof (iscsit_tgt_t
),
682 offsetof(iscsit_tgt_t
, target_global_deleted_ln
));
684 avl_create(&iscsit_global
.global_tpg_list
,
685 iscsit_tpg_avl_compare
, sizeof (iscsit_tpg_t
),
686 offsetof(iscsit_tpg_t
, tpg_global_ln
));
688 avl_create(&iscsit_global
.global_ini_list
,
689 iscsit_ini_avl_compare
, sizeof (iscsit_ini_t
),
690 offsetof(iscsit_ini_t
, ini_global_ln
));
692 iscsit_global
.global_tsih_pool
= vmem_create("iscsit_tsih_pool",
693 (void *)1, ISCSI_MAX_TSIH
, 1, NULL
, NULL
, NULL
, 0,
694 VM_SLEEP
| VMC_IDENTIFIER
);
697 * Setup STMF dbuf store. Our buffers are bound to a specific
698 * connection so we really can't let STMF cache buffers for us.
699 * Consequently we'll just allocate one global buffer store.
701 dbuf_store
= stmf_alloc(STMF_STRUCT_DBUF_STORE
, 0, 0);
702 if (dbuf_store
== NULL
) {
703 retval
= IDM_STATUS_FAIL
;
704 goto tear_down_and_return
;
706 dbuf_store
->ds_alloc_data_buf
= iscsit_dbuf_alloc
;
707 dbuf_store
->ds_free_data_buf
= iscsit_dbuf_free
;
708 dbuf_store
->ds_port_private
= NULL
;
709 iscsit_global
.global_dbuf_store
= dbuf_store
;
711 /* Status PDU cache */
712 iscsit_status_pdu_cache
= kmem_cache_create("iscsit_status_pdu_cache",
713 sizeof (idm_pdu_t
) + sizeof (iscsi_scsi_rsp_hdr_t
), 8,
714 &iscsit_status_pdu_constructor
,
715 NULL
, NULL
, NULL
, NULL
, KM_SLEEP
);
717 /* Default TPG and portal */
718 iscsit_global
.global_default_tpg
= iscsit_tpg_createdefault();
719 if (iscsit_global
.global_default_tpg
== NULL
) {
720 retval
= IDM_STATUS_FAIL
;
721 goto tear_down_and_return
;
724 /* initialize isns client */
725 (void) iscsit_isns_init(hostinfo
);
726 did_iscsit_isns_init
= B_TRUE
;
728 /* Register port provider */
729 pp
= stmf_alloc(STMF_STRUCT_PORT_PROVIDER
, 0, 0);
731 retval
= IDM_STATUS_FAIL
;
732 goto tear_down_and_return
;
735 pp
->pp_portif_rev
= PORTIF_REV_1
;
737 pp
->pp_name
= ISCSIT_MODNAME
;
738 pp
->pp_cb
= iscsit_pp_cb
;
740 iscsit_global
.global_pp
= pp
;
743 if (stmf_register_port_provider(pp
) != STMF_SUCCESS
) {
744 retval
= IDM_STATUS_FAIL
;
745 goto tear_down_and_return
;
748 iscsit_global
.global_dispatch_taskq
= taskq_create("iscsit_dispatch",
749 1, minclsyspri
, 16, 16, TASKQ_PREPOPULATE
);
751 /* Scan staged PDUs, meaningful in MC/S situations */
752 iscsit_rxpdu_queue_monitor_start();
754 return (IDM_STATUS_SUCCESS
);
756 tear_down_and_return
:
758 if (iscsit_global
.global_dispatch_taskq
) {
759 taskq_destroy(iscsit_global
.global_dispatch_taskq
);
760 iscsit_global
.global_dispatch_taskq
= NULL
;
763 if (did_iscsit_isns_init
)
766 if (iscsit_global
.global_default_tpg
) {
767 iscsit_tpg_destroydefault(iscsit_global
.global_default_tpg
);
768 iscsit_global
.global_default_tpg
= NULL
;
771 if (iscsit_global
.global_pp
)
772 iscsit_global
.global_pp
= NULL
;
777 if (iscsit_status_pdu_cache
) {
778 kmem_cache_destroy(iscsit_status_pdu_cache
);
779 iscsit_status_pdu_cache
= NULL
;
782 if (iscsit_global
.global_dbuf_store
) {
783 stmf_free(iscsit_global
.global_dbuf_store
);
784 iscsit_global
.global_dbuf_store
= NULL
;
787 if (iscsit_global
.global_tsih_pool
) {
788 vmem_destroy(iscsit_global
.global_tsih_pool
);
789 iscsit_global
.global_tsih_pool
= NULL
;
792 avl_destroy(&iscsit_global
.global_ini_list
);
793 avl_destroy(&iscsit_global
.global_tpg_list
);
794 list_destroy(&iscsit_global
.global_deleted_target_list
);
795 avl_destroy(&iscsit_global
.global_target_list
);
796 avl_destroy(&iscsit_global
.global_discovery_sessions
);
798 idm_refcnt_destroy(&iscsit_global
.global_refcnt
);
806 * clean up all existing connections and deregister targets from STMF
809 iscsit_disable_svc(void)
813 ASSERT(iscsit_global
.global_svc_state
== ISE_DISABLING
);
815 iscsit_rxpdu_queue_monitor_stop();
817 /* tear down discovery sessions */
818 for (sess
= avl_first(&iscsit_global
.global_discovery_sessions
);
820 sess
= AVL_NEXT(&iscsit_global
.global_discovery_sessions
, sess
))
821 iscsit_sess_close(sess
);
824 * Passing NULL to iscsit_config_merge tells it to go to an empty
827 (void) iscsit_config_merge(NULL
);
830 * Wait until there are no more global references
832 idm_refcnt_wait_ref(&iscsit_global
.global_refcnt
);
833 idm_refcnt_destroy(&iscsit_global
.global_refcnt
);
836 * Default TPG must be destroyed after global_refcnt is 0.
838 iscsit_tpg_destroydefault(iscsit_global
.global_default_tpg
);
840 avl_destroy(&iscsit_global
.global_discovery_sessions
);
841 list_destroy(&iscsit_global
.global_deleted_target_list
);
842 avl_destroy(&iscsit_global
.global_target_list
);
843 avl_destroy(&iscsit_global
.global_tpg_list
);
844 avl_destroy(&iscsit_global
.global_ini_list
);
846 taskq_destroy(iscsit_global
.global_dispatch_taskq
);
850 stmf_free(iscsit_global
.global_dbuf_store
);
851 iscsit_global
.global_dbuf_store
= NULL
;
853 (void) stmf_deregister_port_provider(iscsit_global
.global_pp
);
854 stmf_free(iscsit_global
.global_pp
);
855 iscsit_global
.global_pp
= NULL
;
857 kmem_cache_destroy(iscsit_status_pdu_cache
);
858 iscsit_status_pdu_cache
= NULL
;
860 vmem_destroy(iscsit_global
.global_tsih_pool
);
861 iscsit_global
.global_tsih_pool
= NULL
;
868 * To take out a global hold, we must either own the global
869 * state mutex or we must be running inside of an ioctl that
870 * has set the global state to ISE_BUSY, ISE_DISABLING, or
871 * ISE_ENABLING. We don't track the "owner" for these flags,
872 * so just checking if they are set is enough for now.
874 ASSERT((iscsit_global
.global_svc_state
== ISE_ENABLING
) ||
875 (iscsit_global
.global_svc_state
== ISE_DISABLING
) ||
876 (iscsit_global
.global_svc_state
== ISE_BUSY
) ||
877 MUTEX_HELD(&iscsit_global
.global_state_mutex
));
879 idm_refcnt_hold(&iscsit_global
.global_refcnt
);
885 idm_refcnt_rele(&iscsit_global
.global_refcnt
);
889 iscsit_global_wait_ref()
891 idm_refcnt_wait_ref(&iscsit_global
.global_refcnt
);
900 iscsit_rx_pdu(idm_conn_t
*ic
, idm_pdu_t
*rx_pdu
)
902 iscsit_conn_t
*ict
= ic
->ic_handle
;
903 switch (IDM_PDU_OPCODE(rx_pdu
)) {
904 case ISCSI_OP_SCSI_CMD
:
905 ASSERT(0); /* Shouldn't happen */
906 idm_pdu_complete(rx_pdu
, IDM_STATUS_SUCCESS
);
908 case ISCSI_OP_SNACK_CMD
:
910 * We'll need to handle this when we support ERL1/2. For
911 * now we treat it as a protocol error.
913 idm_pdu_complete(rx_pdu
, IDM_STATUS_SUCCESS
);
914 idm_conn_event(ic
, CE_TRANSPORT_FAIL
, (uintptr_t)NULL
);
916 case ISCSI_OP_SCSI_TASK_MGT_MSG
:
917 if (iscsit_check_cmdsn_and_queue(rx_pdu
)) {
918 iscsit_set_cmdsn(ict
, rx_pdu
);
919 iscsit_op_scsi_task_mgmt(ict
, rx_pdu
);
922 case ISCSI_OP_NOOP_OUT
:
923 case ISCSI_OP_LOGIN_CMD
:
924 case ISCSI_OP_TEXT_CMD
:
925 case ISCSI_OP_LOGOUT_CMD
:
927 * If/when we switch to userland processing these PDU's
928 * will be handled by iscsitd.
930 iscsit_deferred_dispatch(rx_pdu
);
934 idm_pdu_complete(rx_pdu
, IDM_STATUS_SUCCESS
);
935 idm_conn_event(ic
, CE_TRANSPORT_FAIL
, (uintptr_t)NULL
);
942 iscsit_rx_pdu_error(idm_conn_t
*ic
, idm_pdu_t
*rx_pdu
, idm_status_t status
)
944 idm_pdu_complete(rx_pdu
, IDM_STATUS_SUCCESS
);
948 * iscsit_rx_scsi_rsp -- cause the connection to be closed if response rx'd
950 * A target sends an SCSI Response PDU, it should never receive one.
951 * This has been seen when running the Codemonicon suite of tests which
952 * does negative testing of the protocol. If such a condition occurs using
953 * a normal initiator it most likely means there's data corruption in the
954 * header and that's grounds for dropping the connection as well.
957 iscsit_rx_scsi_rsp(idm_conn_t
*ic
, idm_pdu_t
*rx_pdu
)
959 idm_pdu_complete(rx_pdu
, IDM_STATUS_SUCCESS
);
960 idm_conn_event(ic
, CE_TRANSPORT_FAIL
, 0);
964 iscsit_task_aborted(idm_task_t
*idt
, idm_status_t status
)
966 iscsit_task_t
*itask
= idt
->idt_private
;
969 case IDM_STATUS_SUSPENDED
:
971 case IDM_STATUS_ABORTED
:
972 mutex_enter(&itask
->it_mutex
);
973 itask
->it_aborted
= B_TRUE
;
975 * We rely on the fact that STMF tracks outstanding
976 * buffer transfers and will free all of our buffers
977 * before freeing the task so we don't need to
978 * explicitly free the buffers from iscsit/idm
980 if (itask
->it_stmf_abort
) {
981 mutex_exit(&itask
->it_mutex
);
983 * Task is no longer active
985 iscsit_task_done(itask
);
988 * STMF has already asked for this task to be aborted
990 * STMF specification is wrong... says to return
991 * STMF_ABORTED, the code actually looks for
992 * STMF_ABORT_SUCCESS.
994 stmf_task_lport_aborted(itask
->it_stmf_task
,
995 STMF_ABORT_SUCCESS
, STMF_IOF_LPORT_DONE
);
998 mutex_exit(&itask
->it_mutex
);
1000 * Tell STMF to stop processing the task.
1002 stmf_abort(STMF_QUEUE_TASK_ABORT
, itask
->it_stmf_task
,
1003 STMF_ABORTED
, NULL
);
1014 iscsit_client_notify(idm_conn_t
*ic
, idm_client_notify_t icn
,
1017 idm_status_t rc
= IDM_STATUS_SUCCESS
;
1020 * IDM client notifications will never occur at interrupt level
1021 * since they are generated from the connection state machine which
1022 * running on taskq threads.
1026 case CN_CONNECT_ACCEPT
:
1027 rc
= iscsit_conn_accept(ic
); /* No data */
1029 case CN_FFP_ENABLED
:
1030 rc
= iscsit_ffp_enabled(ic
); /* No data */
1032 case CN_FFP_DISABLED
:
1034 * Data indicates whether this was the result of an
1035 * explicit logout request.
1037 rc
= iscsit_ffp_disabled(ic
, (idm_ffp_disable_t
)data
);
1039 case CN_CONNECT_LOST
:
1040 rc
= iscsit_conn_lost(ic
);
1042 case CN_CONNECT_DESTROY
:
1043 rc
= iscsit_conn_destroy(ic
);
1047 * Force the login state machine to completion
1049 rc
= iscsit_login_fail(ic
);
1052 rc
= IDM_STATUS_REJECT
;
1060 * iscsit_update_statsn is invoked for all the PDUs which have the StatSN
1061 * field in the header. The StatSN is incremented if the IDM_PDU_ADVANCE_STATSN
1062 * flag is set in the pdu flags field. The StatSN is connection-wide and is
1063 * protected by the mutex ict_statsn_mutex. For Data-In PDUs, if the flag
1064 * IDM_TASK_PHASECOLLAPSE_REQ is set, the status (phase-collapse) is also filled
1067 iscsit_update_statsn(idm_task_t
*idm_task
, idm_pdu_t
*pdu
)
1069 iscsi_scsi_rsp_hdr_t
*rsp
= (iscsi_scsi_rsp_hdr_t
*)pdu
->isp_hdr
;
1070 iscsit_conn_t
*ict
= (iscsit_conn_t
*)pdu
->isp_ic
->ic_handle
;
1071 iscsit_task_t
*itask
= NULL
;
1072 scsi_task_t
*task
= NULL
;
1074 mutex_enter(&ict
->ict_statsn_mutex
);
1075 rsp
->statsn
= htonl(ict
->ict_statsn
);
1076 if (pdu
->isp_flags
& IDM_PDU_ADVANCE_STATSN
)
1078 mutex_exit(&ict
->ict_statsn_mutex
);
1081 * The last SCSI Data PDU passed for a command may also contain the
1082 * status if the status indicates termination with no expections, i.e.
1083 * no sense data or response involved. If the command completes with
1084 * an error, then the response and sense data will be sent in a
1085 * separate iSCSI Response PDU.
1087 if ((idm_task
) && (idm_task
->idt_flags
& IDM_TASK_PHASECOLLAPSE_REQ
)) {
1088 itask
= idm_task
->idt_private
;
1089 task
= itask
->it_stmf_task
;
1091 rsp
->cmd_status
= task
->task_scsi_status
;
1092 rsp
->flags
|= ISCSI_FLAG_DATA_STATUS
;
1093 if (task
->task_status_ctrl
& TASK_SCTRL_OVER
) {
1094 rsp
->flags
|= ISCSI_FLAG_CMD_OVERFLOW
;
1095 } else if (task
->task_status_ctrl
& TASK_SCTRL_UNDER
) {
1096 rsp
->flags
|= ISCSI_FLAG_CMD_UNDERFLOW
;
1098 rsp
->residual_count
= htonl(task
->task_resid
);
1101 * Removing the task from the session task list
1102 * just before the status is sent in the last
1105 iscsit_task_done(itask
);
1110 iscsit_build_hdr(idm_task_t
*idm_task
, idm_pdu_t
*pdu
, uint8_t opcode
)
1112 iscsit_task_t
*itask
= idm_task
->idt_private
;
1113 iscsi_data_rsp_hdr_t
*dh
= (iscsi_data_rsp_hdr_t
*)pdu
->isp_hdr
;
1116 * We acquired iscsit_sess_t.ist_sn_mutex in iscsit_xfer_scsi_data
1118 ASSERT(MUTEX_HELD(&itask
->it_ict
->ict_sess
->ist_sn_mutex
));
1120 * On incoming data, the target transfer tag and Lun is only
1121 * provided by the target if the A bit is set, Since the target
1122 * does not currently support Error Recovery Level 1, the A
1125 dh
->opcode
= opcode
;
1126 dh
->itt
= itask
->it_itt
;
1127 dh
->ttt
= ((opcode
& ISCSI_OPCODE_MASK
) == ISCSI_OP_SCSI_DATA_RSP
) ?
1128 ISCSI_RSVD_TASK_TAG
: itask
->it_ttt
;
1130 dh
->expcmdsn
= htonl(itask
->it_ict
->ict_sess
->ist_expcmdsn
);
1131 dh
->maxcmdsn
= htonl(itask
->it_ict
->ict_sess
->ist_maxcmdsn
);
1136 * data.flags and rtt.flags
1140 * statsn, residual_count and cmd_status (for phase collapse)
1148 iscsit_keepalive(idm_conn_t
*ic
)
1150 idm_pdu_t
*nop_in_pdu
;
1151 iscsi_nop_in_hdr_t
*nop_in
;
1152 iscsit_conn_t
*ict
= ic
->ic_handle
;
1155 * IDM noticed the connection has been idle for too long so it's
1156 * time to provoke some activity. Build and transmit an iSCSI
1157 * nop-in PDU -- when the initiator responds it will be counted
1158 * as "activity" and keep the connection alive.
1160 * We don't actually care about the response here at the iscsit level
1161 * so we will just throw it away without looking at it when it arrives.
1163 nop_in_pdu
= idm_pdu_alloc(sizeof (*nop_in
), 0);
1164 idm_pdu_init(nop_in_pdu
, ic
, NULL
, NULL
);
1165 nop_in
= (iscsi_nop_in_hdr_t
*)nop_in_pdu
->isp_hdr
;
1166 bzero(nop_in
, sizeof (*nop_in
));
1167 nop_in
->opcode
= ISCSI_OP_NOOP_IN
;
1168 nop_in
->flags
= ISCSI_FLAG_FINAL
;
1169 nop_in
->itt
= ISCSI_RSVD_TASK_TAG
;
1171 * When the target sends a NOP-In as a Ping, the target transfer tag
1172 * is set to a valid (not reserved) value and the initiator task tag
1173 * is set to ISCSI_RSVD_TASK_TAG (0xffffffff). In this case the StatSN
1174 * will always contain the next sequence number but the StatSN for the
1175 * connection is not advanced after this PDU is sent.
1177 nop_in_pdu
->isp_flags
|= IDM_PDU_SET_STATSN
;
1179 * This works because we don't currently allocate ttt's anywhere else
1180 * in iscsit so as long as we stay out of IDM's range we are safe.
1181 * If we need to allocate ttt's for other PDU's in the future this will
1182 * need to be improved.
1184 mutex_enter(&ict
->ict_mutex
);
1185 nop_in
->ttt
= ict
->ict_keepalive_ttt
;
1186 ict
->ict_keepalive_ttt
++;
1187 if (ict
->ict_keepalive_ttt
== ISCSI_RSVD_TASK_TAG
)
1188 ict
->ict_keepalive_ttt
= IDM_TASKIDS_MAX
;
1189 mutex_exit(&ict
->ict_mutex
);
1191 iscsit_pdu_tx(nop_in_pdu
);
1195 iscsit_conn_accept(idm_conn_t
*ic
)
1200 * We need to get a global hold here to ensure that the service
1201 * doesn't get shutdown prior to establishing a session. This
1202 * gets released in iscsit_conn_destroy().
1204 mutex_enter(&iscsit_global
.global_state_mutex
);
1205 if (iscsit_global
.global_svc_state
!= ISE_ENABLED
) {
1206 mutex_exit(&iscsit_global
.global_state_mutex
);
1207 return (IDM_STATUS_FAIL
);
1209 iscsit_global_hold();
1210 mutex_exit(&iscsit_global
.global_state_mutex
);
1213 * Allocate an associated iscsit structure to represent this
1214 * connection. We shouldn't really create a session until we
1215 * get the first login PDU.
1217 ict
= kmem_zalloc(sizeof (*ict
), KM_SLEEP
);
1220 ict
->ict_statsn
= 1;
1221 ict
->ict_keepalive_ttt
= IDM_TASKIDS_MAX
; /* Avoid IDM TT range */
1222 ic
->ic_handle
= ict
;
1223 mutex_init(&ict
->ict_mutex
, NULL
, MUTEX_DRIVER
, NULL
);
1224 mutex_init(&ict
->ict_statsn_mutex
, NULL
, MUTEX_DRIVER
, NULL
);
1225 idm_refcnt_init(&ict
->ict_refcnt
, ict
);
1228 * Initialize login state machine
1230 if (iscsit_login_sm_init(ict
) != IDM_STATUS_SUCCESS
) {
1231 iscsit_global_rele();
1233 * Cleanup the ict after idm notifies us about this failure
1235 return (IDM_STATUS_FAIL
);
1238 return (IDM_STATUS_SUCCESS
);
1242 iscsit_conn_reinstate(iscsit_conn_t
*reinstate_ict
, iscsit_conn_t
*new_ict
)
1244 idm_status_t result
;
1247 * Note in new connection state that this connection is
1248 * reinstating an existing connection.
1250 new_ict
->ict_reinstating
= B_TRUE
;
1251 new_ict
->ict_reinstate_conn
= reinstate_ict
;
1252 new_ict
->ict_statsn
= reinstate_ict
->ict_statsn
;
1255 * Now generate connection state machine event to existing connection
1256 * so that it starts the cleanup process.
1258 result
= idm_conn_reinstate_event(reinstate_ict
->ict_ic
,
1265 iscsit_conn_hold(iscsit_conn_t
*ict
)
1267 idm_refcnt_hold(&ict
->ict_refcnt
);
1271 iscsit_conn_rele(iscsit_conn_t
*ict
)
1273 idm_refcnt_rele(&ict
->ict_refcnt
);
1277 iscsit_conn_dispatch_hold(iscsit_conn_t
*ict
)
1279 idm_refcnt_hold(&ict
->ict_dispatch_refcnt
);
1283 iscsit_conn_dispatch_rele(iscsit_conn_t
*ict
)
1285 idm_refcnt_rele(&ict
->ict_dispatch_refcnt
);
1289 iscsit_login_fail(idm_conn_t
*ic
)
1291 iscsit_conn_t
*ict
= ic
->ic_handle
;
1293 /* Generate login state machine event */
1294 iscsit_login_sm_event(ict
, ILE_LOGIN_CONN_ERROR
, NULL
);
1296 return (IDM_STATUS_SUCCESS
);
1300 iscsit_ffp_enabled(idm_conn_t
*ic
)
1302 iscsit_conn_t
*ict
= ic
->ic_handle
;
1304 /* Generate session state machine event */
1305 iscsit_sess_sm_event(ict
->ict_sess
, SE_CONN_LOGGED_IN
, ict
);
1307 return (IDM_STATUS_SUCCESS
);
1311 iscsit_ffp_disabled(idm_conn_t
*ic
, idm_ffp_disable_t disable_class
)
1313 iscsit_conn_t
*ict
= ic
->ic_handle
;
1315 /* Generate session state machine event */
1316 switch (disable_class
) {
1318 iscsit_sess_sm_event(ict
->ict_sess
, SE_CONN_FFP_FAIL
, ict
);
1320 case FD_CONN_LOGOUT
:
1321 iscsit_sess_sm_event(ict
->ict_sess
, SE_CONN_FFP_DISABLE
, ict
);
1323 case FD_SESS_LOGOUT
:
1324 iscsit_sess_sm_event(ict
->ict_sess
, SE_SESSION_CLOSE
, ict
);
1330 return (IDM_STATUS_SUCCESS
);
1334 iscsit_conn_lost(idm_conn_t
*ic
)
1336 iscsit_conn_t
*ict
= ic
->ic_handle
;
1337 iscsit_sess_t
*ist
= ict
->ict_sess
;
1338 iscsit_cbuf_t
*cbuf
;
1342 mutex_enter(&ict
->ict_mutex
);
1343 ict
->ict_lost
= B_TRUE
;
1344 mutex_exit(&ict
->ict_mutex
);
1346 * scrub the staging queue for all PDUs on this connection
1349 mutex_enter(&ist
->ist_sn_mutex
);
1350 for (cbuf
= ist
->ist_rxpdu_queue
, i
= 0;
1351 ((cbuf
->cb_num_elems
> 0) && (i
< ISCSIT_RXPDU_QUEUE_LEN
));
1353 if (((rx_pdu
= cbuf
->cb_buffer
[i
]) != NULL
) &&
1354 (rx_pdu
->isp_ic
== ic
)) {
1355 /* conn is lost, drop the pdu */
1356 DTRACE_PROBE3(scrubbing__staging__queue
,
1357 iscsit_sess_t
*, ist
, idm_conn_t
*, ic
,
1358 idm_pdu_t
*, rx_pdu
);
1359 idm_pdu_complete(rx_pdu
, IDM_STATUS_FAIL
);
1360 cbuf
->cb_buffer
[i
] = NULL
;
1361 cbuf
->cb_num_elems
--;
1362 iscsit_conn_dispatch_rele(ict
);
1365 mutex_exit(&ist
->ist_sn_mutex
);
1368 * Make sure there aren't any PDU's transitioning from the receive
1369 * handler to the dispatch taskq.
1371 idm_refcnt_wait_ref(&ict
->ict_dispatch_refcnt
);
1373 return (IDM_STATUS_SUCCESS
);
1377 iscsit_conn_destroy(idm_conn_t
*ic
)
1379 iscsit_conn_t
*ict
= ic
->ic_handle
;
1381 mutex_enter(&ict
->ict_mutex
);
1382 ict
->ict_destroyed
= B_TRUE
;
1383 mutex_exit(&ict
->ict_mutex
);
1385 /* Generate session state machine event */
1386 if (ict
->ict_sess
!= NULL
) {
1388 * Session state machine will call iscsit_conn_destroy_done()
1389 * when it has removed references to this connection.
1391 iscsit_sess_sm_event(ict
->ict_sess
, SE_CONN_FAIL
, ict
);
1394 idm_refcnt_wait_ref(&ict
->ict_refcnt
);
1396 * The session state machine does not need to post
1397 * events to IDM any longer, so it is safe to set
1398 * the idm connection reference to NULL
1402 /* Reap the login state machine */
1403 iscsit_login_sm_fini(ict
);
1405 /* Clean up any text command remnants */
1406 iscsit_text_cmd_fini(ict
);
1408 mutex_destroy(&ict
->ict_mutex
);
1409 idm_refcnt_destroy(&ict
->ict_refcnt
);
1410 kmem_free(ict
, sizeof (*ict
));
1412 iscsit_global_rele();
1414 return (IDM_STATUS_SUCCESS
);
1418 iscsit_conn_logout(iscsit_conn_t
*ict
)
1421 * If the iscsi connection is active, then
1422 * logout the IDM connection by sending a
1423 * CE_LOGOUT_SESSION_SUCCESS, else, no action
1424 * needs to be taken because the connection
1425 * is already in the teardown process.
1427 mutex_enter(&ict
->ict_mutex
);
1428 if (ict
->ict_lost
== B_FALSE
&& ict
->ict_destroyed
== B_FALSE
) {
1429 idm_conn_event(ict
->ict_ic
, CE_LOGOUT_SESSION_SUCCESS
,
1432 mutex_exit(&ict
->ict_mutex
);
1436 * STMF-related functions
1438 * iSCSI to STMF mapping
1441 * Connection == bound to local port but not itself a local port
1443 * Target portal (group?) == local port (really but we're not going to do this)
1444 * iscsit needs to map connections to local ports (whatever we decide
1450 static stmf_data_buf_t
*
1451 iscsit_dbuf_alloc(scsi_task_t
*task
, uint32_t size
, uint32_t *pminsize
,
1454 iscsit_task_t
*itask
= task
->task_port_private
;
1455 idm_buf_t
*idm_buffer
;
1457 stmf_data_buf_t
*result
;
1461 * If the requested size is larger than MaxBurstLength and the
1462 * given pminsize is also larger than MaxBurstLength, then the
1463 * allocation fails (dbuf = NULL) and pminsize is modified to
1464 * be equal to MaxBurstLength. stmf/sbd then should re-invoke
1465 * this function with the corrected values for transfer.
1468 if (size
<= itask
->it_ict
->ict_op
.op_max_burst_length
) {
1470 } else if (*pminsize
<= itask
->it_ict
->ict_op
.op_max_burst_length
) {
1471 bsize
= itask
->it_ict
->ict_op
.op_max_burst_length
;
1473 *pminsize
= itask
->it_ict
->ict_op
.op_max_burst_length
;
1478 idm_buffer
= idm_buf_alloc(itask
->it_ict
->ict_ic
, NULL
, bsize
);
1479 if (idm_buffer
!= NULL
) {
1480 result
= stmf_alloc(STMF_STRUCT_DATA_BUF
,
1481 sizeof (iscsit_buf_t
), 0);
1482 if (result
!= NULL
) {
1483 /* Fill in stmf_data_buf_t */
1484 ibuf
= result
->db_port_private
;
1485 ibuf
->ibuf_idm_buf
= idm_buffer
;
1486 ibuf
->ibuf_stmf_buf
= result
;
1487 ibuf
->ibuf_is_immed
= B_FALSE
;
1488 result
->db_flags
= DB_DONT_CACHE
;
1489 result
->db_buf_size
= bsize
;
1490 result
->db_data_size
= bsize
;
1491 result
->db_sglist_length
= 1;
1492 result
->db_sglist
[0].seg_addr
= idm_buffer
->idb_buf
;
1493 result
->db_sglist
[0].seg_length
=
1494 idm_buffer
->idb_buflen
;
1498 /* Couldn't get the stmf_data_buf_t so free the buffer */
1499 idm_buf_free(idm_buffer
);
1507 iscsit_dbuf_free(stmf_dbuf_store_t
*ds
, stmf_data_buf_t
*dbuf
)
1509 iscsit_buf_t
*ibuf
= dbuf
->db_port_private
;
1511 if (ibuf
->ibuf_is_immed
) {
1513 * The iscsit_buf_t structure itself will be freed with its
1514 * associated task. Here we just need to free the PDU that
1515 * held the immediate data.
1517 idm_pdu_complete(ibuf
->ibuf_immed_data_pdu
, IDM_STATUS_SUCCESS
);
1518 ibuf
->ibuf_immed_data_pdu
= 0;
1520 idm_buf_free(ibuf
->ibuf_idm_buf
);
1527 iscsit_xfer_scsi_data(scsi_task_t
*task
, stmf_data_buf_t
*dbuf
,
1530 iscsit_task_t
*iscsit_task
= task
->task_port_private
;
1531 iscsit_sess_t
*ict_sess
= iscsit_task
->it_ict
->ict_sess
;
1532 iscsit_buf_t
*ibuf
= dbuf
->db_port_private
;
1536 * If we are aborting then we can ignore this request
1538 if (iscsit_task
->it_stmf_abort
) {
1539 return (STMF_SUCCESS
);
1543 * If it's not immediate data then start the transfer
1545 ASSERT(ibuf
->ibuf_is_immed
== B_FALSE
);
1546 if (dbuf
->db_flags
& DB_DIRECTION_TO_RPORT
) {
1548 * The DB_SEND_STATUS_GOOD flag in the STMF data buffer allows
1549 * the port provider to phase-collapse, i.e. send the status
1550 * along with the final data PDU for the command. The port
1551 * provider passes this request to the transport layer by
1552 * setting a flag IDM_TASK_PHASECOLLAPSE_REQ in the task.
1554 if (dbuf
->db_flags
& DB_SEND_STATUS_GOOD
)
1555 iscsit_task
->it_idm_task
->idt_flags
|=
1556 IDM_TASK_PHASECOLLAPSE_REQ
;
1558 * IDM will call iscsit_build_hdr so lock now to serialize
1559 * access to the SN values. We need to lock here to enforce
1562 mutex_enter(&ict_sess
->ist_sn_mutex
);
1563 idm_rc
= idm_buf_tx_to_ini(iscsit_task
->it_idm_task
,
1564 ibuf
->ibuf_idm_buf
, dbuf
->db_relative_offset
,
1565 dbuf
->db_data_size
, &iscsit_buf_xfer_cb
, dbuf
);
1566 mutex_exit(&ict_sess
->ist_sn_mutex
);
1568 return (iscsit_idm_to_stmf(idm_rc
));
1569 } else if (dbuf
->db_flags
& DB_DIRECTION_FROM_RPORT
) {
1570 /* Grab the SN lock (see comment above) */
1571 mutex_enter(&ict_sess
->ist_sn_mutex
);
1572 idm_rc
= idm_buf_rx_from_ini(iscsit_task
->it_idm_task
,
1573 ibuf
->ibuf_idm_buf
, dbuf
->db_relative_offset
,
1574 dbuf
->db_data_size
, &iscsit_buf_xfer_cb
, dbuf
);
1575 mutex_exit(&ict_sess
->ist_sn_mutex
);
1577 return (iscsit_idm_to_stmf(idm_rc
));
1580 /* What are we supposed to do if there is no direction? */
1581 return (STMF_INVALID_ARG
);
1585 iscsit_buf_xfer_cb(idm_buf_t
*idb
, idm_status_t status
)
1587 iscsit_task_t
*itask
= idb
->idb_task_binding
->idt_private
;
1588 stmf_data_buf_t
*dbuf
= idb
->idb_cb_arg
;
1590 dbuf
->db_xfer_status
= iscsit_idm_to_stmf(status
);
1593 * If the task has been aborted then we don't need to call STMF
1595 if (itask
->it_stmf_abort
) {
1600 * For ISCSI over TCP (not iSER), the last SCSI Data PDU passed
1601 * for a successful command contains the status as requested by
1602 * by COMSTAR (via the DB_SEND_STATUS_GOOD flag). But the iSER
1603 * transport does not support phase-collapse. So pretend we are
1604 * COMSTAR and send the status in a separate PDU now.
1606 if (idb
->idb_task_binding
->idt_flags
& IDM_TASK_PHASECOLLAPSE_SUCCESS
) {
1608 * Mark task complete and notify COMSTAR
1609 * that the status has been sent.
1611 itask
->it_idm_task
->idt_state
= TASK_COMPLETE
;
1612 stmf_send_status_done(itask
->it_stmf_task
,
1613 iscsit_idm_to_stmf(status
), STMF_IOF_LPORT_DONE
);
1614 } else if ((dbuf
->db_flags
& DB_SEND_STATUS_GOOD
) &&
1615 status
== IDM_STATUS_SUCCESS
) {
1618 * The iscsi target port provider - for iSER, emulates the
1619 * DB_SEND_STATUS_GOOD optimization if requested by STMF;
1620 * it sends the status in a separate PDU after the data
1621 * transfer. In this case the port provider should first
1622 * call stmf_data_xfer_done() to mark the transfer complete
1623 * and then send the status. Although STMF will free the
1624 * buffer at the time the task is freed, even if the transfer
1625 * is not marked complete, this behavior makes statistics
1626 * gathering and task state tracking more difficult than it
1629 stmf_data_xfer_done(itask
->it_stmf_task
, dbuf
, 0);
1630 if (iscsit_send_scsi_status(itask
->it_stmf_task
, 0)
1632 stmf_send_status_done(itask
->it_stmf_task
,
1633 STMF_FAILURE
, STMF_IOF_LPORT_DONE
);
1636 stmf_data_xfer_done(itask
->it_stmf_task
, dbuf
, 0);
1637 /* don't touch dbuf after stmf_data_xfer_done */
1644 iscsit_send_scsi_status(scsi_task_t
*task
, uint32_t ioflags
)
1646 iscsit_task_t
*itask
= task
->task_port_private
;
1647 iscsi_scsi_rsp_hdr_t
*rsp
;
1652 * If this task is aborted then we don't need to respond.
1654 if (itask
->it_stmf_abort
) {
1655 return (STMF_SUCCESS
);
1659 * If this is a task management status, handle it elsewhere.
1661 if (task
->task_mgmt_function
!= TM_NONE
) {
1663 * Don't wait for the PDU completion to tell STMF
1664 * the task is done -- it doesn't really matter and
1665 * it makes life complicated if STMF later asks us to
1666 * abort the request and we don't know whether the
1667 * status has been sent or not.
1669 itask
->it_tm_responded
= B_TRUE
;
1670 iscsit_send_task_mgmt_resp(itask
->it_tm_pdu
,
1671 (task
->task_completion_status
== STMF_SUCCESS
) ?
1672 SCSI_TCP_TM_RESP_COMPLETE
: SCSI_TCP_TM_RESP_FUNC_NOT_SUPP
);
1673 stmf_send_status_done(task
, STMF_SUCCESS
,
1674 STMF_IOF_LPORT_DONE
);
1675 return (STMF_SUCCESS
);
1679 * Remove the task from the session task list
1681 iscsit_task_done(itask
);
1686 mutex_enter(&itask
->it_idm_task
->idt_mutex
);
1687 if ((itask
->it_idm_task
->idt_state
== TASK_ACTIVE
) &&
1688 (task
->task_completion_status
== STMF_SUCCESS
) &&
1689 (task
->task_sense_length
== 0) &&
1690 (task
->task_resid
== 0)) {
1691 itask
->it_idm_task
->idt_state
= TASK_COMPLETE
;
1692 /* PDU callback releases task hold */
1693 idm_task_hold(itask
->it_idm_task
);
1694 mutex_exit(&itask
->it_idm_task
->idt_mutex
);
1696 * Fast path. Cached status PDU's are already
1697 * initialized. We just need to fill in
1698 * connection and task information. StatSN is
1699 * incremented by 1 for every status sent a
1702 pdu
= kmem_cache_alloc(iscsit_status_pdu_cache
, KM_SLEEP
);
1703 pdu
->isp_ic
= itask
->it_ict
->ict_ic
;
1704 pdu
->isp_private
= itask
;
1705 pdu
->isp_flags
|= IDM_PDU_SET_STATSN
| IDM_PDU_ADVANCE_STATSN
;
1707 rsp
= (iscsi_scsi_rsp_hdr_t
*)pdu
->isp_hdr
;
1708 rsp
->itt
= itask
->it_itt
;
1710 * ExpDataSN is the number of R2T and Data-In (read)
1711 * PDUs the target has sent for the SCSI command.
1713 * Since there is no support for bidirectional transfer
1714 * yet, either idt_exp_datasn or idt_exp_rttsn, but not
1715 * both is valid at any time
1717 rsp
->expdatasn
= (itask
->it_idm_task
->idt_exp_datasn
!= 0) ?
1718 htonl(itask
->it_idm_task
->idt_exp_datasn
):
1719 htonl(itask
->it_idm_task
->idt_exp_rttsn
);
1720 rsp
->cmd_status
= task
->task_scsi_status
;
1722 return (STMF_SUCCESS
);
1724 if (itask
->it_idm_task
->idt_state
!= TASK_ACTIVE
) {
1725 mutex_exit(&itask
->it_idm_task
->idt_mutex
);
1726 return (STMF_FAILURE
);
1728 itask
->it_idm_task
->idt_state
= TASK_COMPLETE
;
1729 /* PDU callback releases task hold */
1730 idm_task_hold(itask
->it_idm_task
);
1731 mutex_exit(&itask
->it_idm_task
->idt_mutex
);
1733 resp_datalen
= (task
->task_sense_length
== 0) ? 0 :
1734 (task
->task_sense_length
+ sizeof (uint16_t));
1736 pdu
= idm_pdu_alloc(sizeof (iscsi_hdr_t
), resp_datalen
);
1737 idm_pdu_init(pdu
, itask
->it_ict
->ict_ic
, itask
,
1738 iscsit_send_status_done
);
1739 pdu
->isp_flags
|= IDM_PDU_SET_STATSN
| IDM_PDU_ADVANCE_STATSN
;
1741 rsp
= (iscsi_scsi_rsp_hdr_t
*)pdu
->isp_hdr
;
1742 bzero(rsp
, sizeof (*rsp
));
1743 rsp
->opcode
= ISCSI_OP_SCSI_RSP
;
1745 rsp
->flags
= ISCSI_FLAG_FINAL
;
1746 if (task
->task_status_ctrl
& TASK_SCTRL_OVER
) {
1747 rsp
->flags
|= ISCSI_FLAG_CMD_OVERFLOW
;
1748 } else if (task
->task_status_ctrl
& TASK_SCTRL_UNDER
) {
1749 rsp
->flags
|= ISCSI_FLAG_CMD_UNDERFLOW
;
1752 rsp
->bi_residual_count
= 0;
1753 rsp
->residual_count
= htonl(task
->task_resid
);
1754 rsp
->itt
= itask
->it_itt
;
1755 rsp
->response
= ISCSI_STATUS_CMD_COMPLETED
;
1756 rsp
->expdatasn
= (itask
->it_idm_task
->idt_exp_datasn
!= 0) ?
1757 htonl(itask
->it_idm_task
->idt_exp_datasn
):
1758 htonl(itask
->it_idm_task
->idt_exp_rttsn
);
1759 rsp
->cmd_status
= task
->task_scsi_status
;
1760 if (task
->task_sense_length
!= 0) {
1762 * Add a byte to provide the sense length in
1765 *(uint16_t *)((void *)pdu
->isp_data
) =
1766 htons(task
->task_sense_length
);
1767 bcopy(task
->task_sense_data
,
1768 (uint8_t *)pdu
->isp_data
+
1770 task
->task_sense_length
);
1771 hton24(rsp
->dlength
, resp_datalen
);
1774 DTRACE_PROBE5(iscsi__scsi__response
,
1775 iscsit_conn_t
*, itask
->it_ict
,
1776 uint8_t, rsp
->response
,
1777 uint8_t, rsp
->cmd_status
,
1779 scsi_task_t
*, task
);
1783 return (STMF_SUCCESS
);
1789 iscsit_send_good_status_done(idm_pdu_t
*pdu
, idm_status_t status
)
1791 iscsit_task_t
*itask
;
1794 itask
= pdu
->isp_private
;
1795 aborted
= itask
->it_stmf_abort
;
1798 * After releasing the hold the task may be freed at any time so
1801 idm_task_rele(itask
->it_idm_task
);
1803 stmf_send_status_done(itask
->it_stmf_task
,
1804 iscsit_idm_to_stmf(pdu
->isp_status
), STMF_IOF_LPORT_DONE
);
1806 kmem_cache_free(iscsit_status_pdu_cache
, pdu
);
1811 iscsit_send_status_done(idm_pdu_t
*pdu
, idm_status_t status
)
1813 iscsit_task_t
*itask
;
1816 itask
= pdu
->isp_private
;
1817 aborted
= itask
->it_stmf_abort
;
1820 * After releasing the hold the task may be freed at any time so
1823 idm_task_rele(itask
->it_idm_task
);
1825 stmf_send_status_done(itask
->it_stmf_task
,
1826 iscsit_idm_to_stmf(pdu
->isp_status
), STMF_IOF_LPORT_DONE
);
1833 iscsit_lport_task_free(scsi_task_t
*task
)
1835 iscsit_task_t
*itask
= task
->task_port_private
;
1837 /* We only call idm_task_start for regular tasks, not task management */
1838 if (task
->task_mgmt_function
== TM_NONE
) {
1839 idm_task_done(itask
->it_idm_task
);
1840 iscsit_task_free(itask
);
1843 iscsit_tm_task_free(itask
);
1849 iscsit_abort(stmf_local_port_t
*lport
, int abort_cmd
, void *arg
, uint32_t flags
)
1851 scsi_task_t
*st
= (scsi_task_t
*)arg
;
1852 iscsit_task_t
*iscsit_task
;
1856 * If this is a task management request then there's really not much to
1859 if (st
->task_mgmt_function
!= TM_NONE
) {
1860 return (STMF_ABORT_SUCCESS
);
1864 * Regular task, start cleaning up
1866 iscsit_task
= st
->task_port_private
;
1867 idt
= iscsit_task
->it_idm_task
;
1868 mutex_enter(&iscsit_task
->it_mutex
);
1869 iscsit_task
->it_stmf_abort
= B_TRUE
;
1870 if (iscsit_task
->it_aborted
) {
1871 mutex_exit(&iscsit_task
->it_mutex
);
1873 * Task is no longer active
1875 iscsit_task_done(iscsit_task
);
1878 * STMF specification is wrong... says to return
1879 * STMF_ABORTED, the code actually looks for
1880 * STMF_ABORT_SUCCESS.
1882 return (STMF_ABORT_SUCCESS
);
1884 mutex_exit(&iscsit_task
->it_mutex
);
1886 * Call IDM to abort the task. Due to a variety of
1887 * circumstances the task may already be in the process of
1889 * We'll let IDM worry about rationalizing all that except
1890 * for one particular instance. If the state of the task
1891 * is TASK_COMPLETE, we need to indicate to the framework
1892 * that we are in fact done. This typically happens with
1893 * framework-initiated task management type requests
1894 * (e.g. abort task).
1896 if (idt
->idt_state
== TASK_COMPLETE
) {
1897 idm_refcnt_wait_ref(&idt
->idt_refcnt
);
1898 return (STMF_ABORT_SUCCESS
);
1900 idm_task_abort(idt
->idt_ic
, idt
, AT_TASK_MGMT_ABORT
);
1901 return (STMF_SUCCESS
);
1910 iscsit_ctl(stmf_local_port_t
*lport
, int cmd
, void *arg
)
1912 iscsit_tgt_t
*iscsit_tgt
;
1914 ASSERT((cmd
== STMF_CMD_LPORT_ONLINE
) ||
1915 (cmd
== STMF_ACK_LPORT_ONLINE_COMPLETE
) ||
1916 (cmd
== STMF_CMD_LPORT_OFFLINE
) ||
1917 (cmd
== STMF_ACK_LPORT_OFFLINE_COMPLETE
));
1919 iscsit_tgt
= (iscsit_tgt_t
*)lport
->lport_port_private
;
1922 case STMF_CMD_LPORT_ONLINE
:
1923 iscsit_tgt_sm_event(iscsit_tgt
, TE_STMF_ONLINE_REQ
);
1925 case STMF_CMD_LPORT_OFFLINE
:
1926 iscsit_tgt_sm_event(iscsit_tgt
, TE_STMF_OFFLINE_REQ
);
1928 case STMF_ACK_LPORT_ONLINE_COMPLETE
:
1929 iscsit_tgt_sm_event(iscsit_tgt
, TE_STMF_ONLINE_COMPLETE_ACK
);
1931 case STMF_ACK_LPORT_OFFLINE_COMPLETE
:
1932 iscsit_tgt_sm_event(iscsit_tgt
, TE_STMF_OFFLINE_COMPLETE_ACK
);
1940 static stmf_status_t
1941 iscsit_idm_to_stmf(idm_status_t idmrc
)
1944 case IDM_STATUS_SUCCESS
:
1945 return (STMF_SUCCESS
);
1947 return (STMF_FAILURE
);
1953 iscsit_op_scsi_cmd(idm_conn_t
*ic
, idm_pdu_t
*rx_pdu
)
1955 iscsit_conn_t
*ict
= ic
->ic_handle
;
1957 if (iscsit_check_cmdsn_and_queue(rx_pdu
)) {
1958 iscsit_post_scsi_cmd(ic
, rx_pdu
);
1960 iscsit_process_pdu_in_queue(ict
->ict_sess
);
1968 iscsit_post_scsi_cmd(idm_conn_t
*ic
, idm_pdu_t
*rx_pdu
)
1971 iscsit_task_t
*itask
;
1974 iscsi_scsi_cmd_hdr_t
*iscsi_scsi
=
1975 (iscsi_scsi_cmd_hdr_t
*)rx_pdu
->isp_hdr
;
1976 iscsi_addl_hdr_t
*ahs_hdr
;
1977 uint16_t addl_cdb_len
= 0;
1979 ict
= ic
->ic_handle
;
1981 itask
= iscsit_task_alloc(ict
);
1982 if (itask
== NULL
) {
1983 /* Finish processing request */
1984 iscsit_set_cmdsn(ict
, rx_pdu
);
1986 iscsit_send_direct_scsi_resp(ict
, rx_pdu
,
1987 ISCSI_STATUS_CMD_COMPLETED
, STATUS_BUSY
);
1988 idm_pdu_complete(rx_pdu
, IDM_STATUS_SUCCESS
);
1993 * Note CmdSN and ITT in task. IDM will have already validated this
1994 * request against the connection state so we don't need to check
1995 * that (the connection may have changed state in the meantime but
1996 * we will catch that when we try to send a response)
1998 itask
->it_cmdsn
= ntohl(iscsi_scsi
->cmdsn
);
1999 itask
->it_itt
= iscsi_scsi
->itt
;
2002 * Check for extended CDB AHS
2004 if (iscsi_scsi
->hlength
> 0) {
2005 ahs_hdr
= (iscsi_addl_hdr_t
*)iscsi_scsi
;
2006 addl_cdb_len
= ((ahs_hdr
->ahs_hlen_hi
<< 8) |
2007 ahs_hdr
->ahs_hlen_lo
) - 1; /* Adjust for reserved byte */
2008 if (((addl_cdb_len
+ 4) / sizeof (uint32_t)) >
2009 iscsi_scsi
->hlength
) {
2010 /* Mangled header info, drop it */
2011 idm_pdu_complete(rx_pdu
, IDM_STATUS_SUCCESS
);
2016 ict
= rx_pdu
->isp_ic
->ic_handle
; /* IDM client private */
2019 * Add task to session list. This function will also check to
2020 * ensure that the task does not already exist.
2022 if (iscsit_task_start(itask
) != IDM_STATUS_SUCCESS
) {
2024 * Task exists, free all resources and reject. Don't
2025 * update expcmdsn in this case because RFC 3720 says
2026 * "The CmdSN of the rejected command PDU (if it is a
2027 * non-immediate command) MUST NOT be considered received
2028 * by the target (i.e., a command sequence gap must be
2029 * assumed for the CmdSN), even though the CmdSN of the
2030 * rejected command PDU may be reliably ascertained. Upon
2031 * receiving the Reject, the initiator MUST plug the CmdSN
2032 * gap in order to continue to use the session. The gap
2033 * may be plugged either by transmitting a command PDU
2034 * with the same CmdSN, or by aborting the task (see section
2035 * 6.9 on how an abort may plug a CmdSN gap)." (Section 6.3)
2037 iscsit_task_free(itask
);
2038 iscsit_send_reject(ict
, rx_pdu
, ISCSI_REJECT_TASK_IN_PROGRESS
);
2039 idm_pdu_complete(rx_pdu
, IDM_STATUS_SUCCESS
);
2043 /* Update sequence numbers */
2044 iscsit_set_cmdsn(ict
, rx_pdu
);
2047 * Allocate STMF task
2049 itask
->it_stmf_task
= stmf_task_alloc(
2050 itask
->it_ict
->ict_sess
->ist_lport
,
2051 itask
->it_ict
->ict_sess
->ist_stmf_sess
, iscsi_scsi
->lun
,
2052 16 + addl_cdb_len
, 0);
2053 if (itask
->it_stmf_task
== NULL
) {
2055 * Either stmf really couldn't get memory for a task or,
2056 * more likely, the LU is currently in reset. Either way
2057 * we have no choice but to fail the request.
2059 iscsit_task_done(itask
);
2060 iscsit_task_free(itask
);
2061 iscsit_send_direct_scsi_resp(ict
, rx_pdu
,
2062 ISCSI_STATUS_CMD_COMPLETED
, STATUS_BUSY
);
2063 idm_pdu_complete(rx_pdu
, IDM_STATUS_SUCCESS
);
2067 task
= itask
->it_stmf_task
;
2068 task
->task_port_private
= itask
;
2070 bcopy(iscsi_scsi
->lun
, task
->task_lun_no
, sizeof (task
->task_lun_no
));
2073 * iSCSI and Comstar use the same values. Should we rely on this
2074 * or translate them bit-wise?
2078 (((iscsi_scsi
->flags
& ISCSI_FLAG_CMD_READ
) ? TF_READ_DATA
: 0) |
2079 ((iscsi_scsi
->flags
& ISCSI_FLAG_CMD_WRITE
) ? TF_WRITE_DATA
: 0) |
2080 ((rx_pdu
->isp_datalen
== 0) ? 0 : TF_INITIAL_BURST
));
2082 switch (iscsi_scsi
->flags
& ISCSI_FLAG_CMD_ATTR_MASK
) {
2083 case ISCSI_ATTR_UNTAGGED
:
2085 case ISCSI_ATTR_SIMPLE
:
2086 task
->task_additional_flags
|= TF_ATTR_SIMPLE_QUEUE
;
2088 case ISCSI_ATTR_ORDERED
:
2089 task
->task_additional_flags
|= TF_ATTR_ORDERED_QUEUE
;
2091 case ISCSI_ATTR_HEAD_OF_QUEUE
:
2092 task
->task_additional_flags
|= TF_ATTR_HEAD_OF_QUEUE
;
2094 case ISCSI_ATTR_ACA
:
2095 task
->task_additional_flags
|= TF_ATTR_ACA
;
2098 /* Protocol error but just take it, treat as untagged */
2103 task
->task_additional_flags
= 0;
2104 task
->task_priority
= 0;
2105 task
->task_mgmt_function
= TM_NONE
;
2108 * This "task_max_nbufs" doesn't map well to BIDI. We probably need
2109 * parameter for each direction. "MaxOutstandingR2T" may very well
2110 * be set to one which could prevent us from doing simultaneous
2111 * transfers in each direction.
2113 task
->task_max_nbufs
= (iscsi_scsi
->flags
& ISCSI_FLAG_CMD_WRITE
) ?
2114 ict
->ict_op
.op_max_outstanding_r2t
: STMF_BUFS_MAX
;
2115 task
->task_cmd_seq_no
= ntohl(iscsi_scsi
->itt
);
2116 task
->task_expected_xfer_length
= ntohl(iscsi_scsi
->data_length
);
2119 bcopy(iscsi_scsi
->scb
, task
->task_cdb
, 16);
2120 if (addl_cdb_len
> 0) {
2121 bcopy(ahs_hdr
->ahs_extscb
, task
->task_cdb
+ 16, addl_cdb_len
);
2124 DTRACE_ISCSI_3(scsi__command
, idm_conn_t
*, ic
,
2125 iscsi_scsi_cmd_hdr_t
*, (iscsi_scsi_cmd_hdr_t
*)rx_pdu
->isp_hdr
,
2126 scsi_task_t
*, task
);
2129 * Copy the transport header into the task handle from the PDU
2130 * handle. The transport header describes this task's remote tagged
2133 if (rx_pdu
->isp_transport_hdrlen
!= 0) {
2134 bcopy(rx_pdu
->isp_transport_hdr
,
2135 itask
->it_idm_task
->idt_transport_hdr
,
2136 rx_pdu
->isp_transport_hdrlen
);
2140 * Tell IDM about our new active task
2142 idm_task_start(itask
->it_idm_task
, (uintptr_t)itask
->it_itt
);
2145 * If we have any immediate data then setup the immediate buffer
2146 * context that comes with the task
2148 if (rx_pdu
->isp_datalen
) {
2149 ibuf
= itask
->it_immed_data
;
2150 ibuf
->ibuf_immed_data_pdu
= rx_pdu
;
2151 ibuf
->ibuf_stmf_buf
->db_data_size
= rx_pdu
->isp_datalen
;
2152 ibuf
->ibuf_stmf_buf
->db_buf_size
= rx_pdu
->isp_datalen
;
2153 ibuf
->ibuf_stmf_buf
->db_relative_offset
= 0;
2154 ibuf
->ibuf_stmf_buf
->db_sglist
[0].seg_length
=
2155 rx_pdu
->isp_datalen
;
2156 ibuf
->ibuf_stmf_buf
->db_sglist
[0].seg_addr
= rx_pdu
->isp_data
;
2158 DTRACE_ISCSI_8(xfer__start
, idm_conn_t
*, ic
,
2159 uintptr_t, ibuf
->ibuf_stmf_buf
->db_sglist
[0].seg_addr
,
2160 uint32_t, ibuf
->ibuf_stmf_buf
->db_relative_offset
,
2161 uint64_t, 0, uint32_t, 0, uint32_t, 0, /* no raddr */
2162 uint32_t, rx_pdu
->isp_datalen
, int, XFER_BUF_TX_TO_INI
);
2165 * For immediate data transfer, there is no callback from
2166 * stmf to indicate that the initial burst of data is
2167 * transferred successfully. In some cases, the task can
2168 * get freed before execution returns from stmf_post_task.
2169 * Although this xfer-start/done probe accurately tracks
2170 * the size of the transfer, it does only provide a best
2171 * effort on the timing of the transfer.
2173 DTRACE_ISCSI_8(xfer__done
, idm_conn_t
*, ic
,
2174 uintptr_t, ibuf
->ibuf_stmf_buf
->db_sglist
[0].seg_addr
,
2175 uint32_t, ibuf
->ibuf_stmf_buf
->db_relative_offset
,
2176 uint64_t, 0, uint32_t, 0, uint32_t, 0, /* no raddr */
2177 uint32_t, rx_pdu
->isp_datalen
, int, XFER_BUF_TX_TO_INI
);
2178 stmf_post_task(task
, ibuf
->ibuf_stmf_buf
);
2181 stmf_post_task(task
, NULL
);
2182 idm_pdu_complete(rx_pdu
, IDM_STATUS_SUCCESS
);
2187 iscsit_deferred_dispatch(idm_pdu_t
*rx_pdu
)
2189 iscsit_conn_t
*ict
= rx_pdu
->isp_ic
->ic_handle
;
2192 * If this isn't a login packet, we need a session. Otherwise
2193 * this is a protocol error (perhaps one IDM should've caught?).
2195 if (IDM_PDU_OPCODE(rx_pdu
) != ISCSI_OP_LOGIN_CMD
&&
2196 ict
->ict_sess
== NULL
) {
2197 DTRACE_PROBE2(iscsi__idm__deferred__no__session
,
2198 iscsit_conn_t
*, ict
, idm_pdu_t
*, rx_pdu
);
2199 idm_pdu_complete(rx_pdu
, IDM_STATUS_FAIL
);
2204 * If the connection has been lost then ignore new PDU's
2206 mutex_enter(&ict
->ict_mutex
);
2207 if (ict
->ict_lost
) {
2208 mutex_exit(&ict
->ict_mutex
);
2209 idm_pdu_complete(rx_pdu
, IDM_STATUS_FAIL
);
2214 * Grab a hold on the connection to prevent it from going away
2215 * between now and when the taskq function is called.
2217 iscsit_conn_dispatch_hold(ict
);
2218 mutex_exit(&ict
->ict_mutex
);
2220 taskq_dispatch_ent(iscsit_global
.global_dispatch_taskq
,
2221 iscsit_deferred
, rx_pdu
, 0, &rx_pdu
->isp_tqent
);
2225 iscsit_deferred(void *rx_pdu_void
)
2227 idm_pdu_t
*rx_pdu
= rx_pdu_void
;
2228 idm_conn_t
*ic
= rx_pdu
->isp_ic
;
2229 iscsit_conn_t
*ict
= ic
->ic_handle
;
2232 * NOP and Task Management Commands can be marked for immediate
2233 * delivery. Commands marked as 'Immediate' are to be considered
2234 * for execution as soon as they arrive on the target. So these
2235 * should not be checked for sequence order and put in a queue.
2236 * The CmdSN is not advanced for Immediate Commands.
2238 switch (IDM_PDU_OPCODE(rx_pdu
)) {
2239 case ISCSI_OP_NOOP_OUT
:
2240 if (iscsit_check_cmdsn_and_queue(rx_pdu
)) {
2241 iscsit_set_cmdsn(ict
, rx_pdu
);
2242 iscsit_pdu_op_noop(ict
, rx_pdu
);
2245 case ISCSI_OP_LOGIN_CMD
:
2246 iscsit_pdu_op_login_cmd(ict
, rx_pdu
);
2247 iscsit_conn_dispatch_rele(ict
);
2249 case ISCSI_OP_TEXT_CMD
:
2250 if (iscsit_check_cmdsn_and_queue(rx_pdu
)) {
2251 iscsit_set_cmdsn(ict
, rx_pdu
);
2252 iscsit_pdu_op_text_cmd(ict
, rx_pdu
);
2255 case ISCSI_OP_LOGOUT_CMD
:
2256 if (iscsit_check_cmdsn_and_queue(rx_pdu
)) {
2257 iscsit_set_cmdsn(ict
, rx_pdu
);
2258 iscsit_pdu_op_logout_cmd(ict
, rx_pdu
);
2262 /* Protocol error. IDM should have caught this */
2263 idm_pdu_complete(rx_pdu
, IDM_STATUS_FAIL
);
2268 * Check if there are other PDUs in the session staging queue
2269 * waiting to be posted to SCSI layer.
2271 iscsit_process_pdu_in_queue(ict
->ict_sess
);
2273 iscsit_conn_dispatch_rele(ict
);
2277 iscsit_send_direct_scsi_resp(iscsit_conn_t
*ict
, idm_pdu_t
*rx_pdu
,
2278 uint8_t response
, uint8_t cmd_status
)
2282 iscsi_scsi_rsp_hdr_t
*resp
;
2283 iscsi_scsi_cmd_hdr_t
*req
=
2284 (iscsi_scsi_cmd_hdr_t
*)rx_pdu
->isp_hdr
;
2288 rsp_pdu
= idm_pdu_alloc(sizeof (iscsi_scsi_rsp_hdr_t
), 0);
2289 idm_pdu_init(rsp_pdu
, ic
, NULL
, NULL
);
2291 * StatSN is incremented by 1 for every response sent on
2292 * a connection except for responses sent as a result of
2295 rsp_pdu
->isp_flags
|= IDM_PDU_SET_STATSN
| IDM_PDU_ADVANCE_STATSN
;
2297 resp
= (iscsi_scsi_rsp_hdr_t
*)rsp_pdu
->isp_hdr
;
2299 resp
->opcode
= ISCSI_OP_SCSI_RSP
;
2300 resp
->flags
= ISCSI_FLAG_FINAL
;
2301 resp
->response
= response
;
2302 resp
->cmd_status
= cmd_status
;
2303 resp
->itt
= req
->itt
;
2304 if ((response
== ISCSI_STATUS_CMD_COMPLETED
) &&
2305 (req
->data_length
!= 0) &&
2306 ((req
->flags
& ISCSI_FLAG_CMD_READ
) ||
2307 (req
->flags
& ISCSI_FLAG_CMD_WRITE
))) {
2308 resp
->flags
|= ISCSI_FLAG_CMD_UNDERFLOW
;
2309 resp
->residual_count
= req
->data_length
;
2312 DTRACE_PROBE4(iscsi__scsi__direct__response
,
2313 iscsit_conn_t
*, ict
,
2314 uint8_t, resp
->response
,
2315 uint8_t, resp
->cmd_status
,
2316 idm_pdu_t
*, rsp_pdu
);
2318 iscsit_pdu_tx(rsp_pdu
);
2322 iscsit_send_task_mgmt_resp(idm_pdu_t
*tm_resp_pdu
, uint8_t tm_status
)
2324 iscsi_scsi_task_mgt_rsp_hdr_t
*tm_resp
;
2327 * The target must take note of the last-sent StatSN.
2328 * The StatSN is to be incremented after sending a
2329 * task management response. Digest recovery can only
2330 * work if StatSN is incremented.
2332 tm_resp_pdu
->isp_flags
|= IDM_PDU_SET_STATSN
| IDM_PDU_ADVANCE_STATSN
;
2333 tm_resp
= (iscsi_scsi_task_mgt_rsp_hdr_t
*)tm_resp_pdu
->isp_hdr
;
2334 tm_resp
->response
= tm_status
;
2336 DTRACE_PROBE3(iscsi__scsi__tm__response
,
2337 iscsit_conn_t
*, tm_resp_pdu
->isp_ic
->ic_handle
,
2338 uint8_t, tm_resp
->response
,
2339 idm_pdu_t
*, tm_resp_pdu
);
2340 iscsit_pdu_tx(tm_resp_pdu
);
2344 iscsit_op_scsi_task_mgmt(iscsit_conn_t
*ict
, idm_pdu_t
*rx_pdu
)
2346 idm_pdu_t
*tm_resp_pdu
;
2347 iscsit_task_t
*itask
;
2348 iscsit_task_t
*tm_itask
;
2350 iscsi_scsi_task_mgt_hdr_t
*iscsi_tm
=
2351 (iscsi_scsi_task_mgt_hdr_t
*)rx_pdu
->isp_hdr
;
2352 iscsi_scsi_task_mgt_rsp_hdr_t
*iscsi_tm_rsp
=
2353 (iscsi_scsi_task_mgt_rsp_hdr_t
*)rx_pdu
->isp_hdr
;
2354 uint32_t rtt
, cmdsn
, refcmdsn
;
2358 * Setup response PDU (response field will get filled in later)
2360 tm_resp_pdu
= idm_pdu_alloc(sizeof (iscsi_scsi_task_mgt_rsp_hdr_t
), 0);
2361 if (tm_resp_pdu
== NULL
) {
2362 /* Can't respond, just drop it */
2363 idm_pdu_complete(rx_pdu
, IDM_STATUS_SUCCESS
);
2366 idm_pdu_init(tm_resp_pdu
, ict
->ict_ic
, NULL
, NULL
);
2367 iscsi_tm_rsp
= (iscsi_scsi_task_mgt_rsp_hdr_t
*)tm_resp_pdu
->isp_hdr
;
2368 bzero(iscsi_tm_rsp
, sizeof (iscsi_scsi_task_mgt_rsp_hdr_t
));
2369 iscsi_tm_rsp
->opcode
= ISCSI_OP_SCSI_TASK_MGT_RSP
;
2370 iscsi_tm_rsp
->flags
= ISCSI_FLAG_FINAL
;
2371 iscsi_tm_rsp
->itt
= rx_pdu
->isp_hdr
->itt
;
2374 * Figure out what we're being asked to do.
2376 DTRACE_PROBE4(iscsi__scsi__tm__request
,
2377 iscsit_conn_t
*, ict
,
2378 uint8_t, (iscsi_tm
->function
& ISCSI_FLAG_TASK_MGMT_FUNCTION_MASK
),
2379 uint32_t, iscsi_tm
->rtt
,
2380 idm_pdu_t
*, rx_pdu
);
2381 switch (iscsi_tm
->function
& ISCSI_FLAG_TASK_MGMT_FUNCTION_MASK
) {
2382 case ISCSI_TM_FUNC_ABORT_TASK
:
2384 * STMF doesn't currently support the "abort task" task
2385 * management command although it does support aborting
2386 * an individual task. We'll get STMF to abort the task
2387 * for us but handle the details of the task management
2388 * command ourselves.
2390 * Find the task associated with the referenced task tag.
2392 rtt
= iscsi_tm
->rtt
;
2393 itask
= (iscsit_task_t
*)idm_task_find_by_handle(ict
->ict_ic
,
2396 if (itask
== NULL
) {
2397 cmdsn
= ntohl(iscsi_tm
->cmdsn
);
2398 refcmdsn
= ntohl(iscsi_tm
->refcmdsn
);
2401 * Task was not found. But the SCSI command could be
2402 * on the rxpdu wait queue. If RefCmdSN is within
2403 * the CmdSN window and less than CmdSN of the TM
2404 * function, return "Function Complete". Otherwise,
2405 * return "Task Does Not Exist".
2408 if (iscsit_cmdsn_in_window(ict
, refcmdsn
) &&
2409 iscsit_sna_lt(refcmdsn
, cmdsn
)) {
2410 mutex_enter(&ict
->ict_sess
->ist_sn_mutex
);
2411 if (iscsit_remove_pdu_from_queue(
2412 ict
->ict_sess
, refcmdsn
)) {
2413 iscsit_conn_dispatch_rele(ict
);
2415 mutex_exit(&ict
->ict_sess
->ist_sn_mutex
);
2416 iscsit_send_task_mgmt_resp(tm_resp_pdu
,
2417 SCSI_TCP_TM_RESP_COMPLETE
);
2419 iscsit_send_task_mgmt_resp(tm_resp_pdu
,
2420 SCSI_TCP_TM_RESP_NO_TASK
);
2425 * Tell STMF to abort the task. This will do no harm
2426 * if the task is already complete.
2428 stmf_abort(STMF_QUEUE_TASK_ABORT
, itask
->it_stmf_task
,
2429 STMF_ABORTED
, NULL
);
2432 * Make sure the task hasn't already completed
2434 mutex_enter(&itask
->it_idm_task
->idt_mutex
);
2435 if ((itask
->it_idm_task
->idt_state
== TASK_COMPLETE
) ||
2436 (itask
->it_idm_task
->idt_state
== TASK_IDLE
)) {
2438 * Task is complete, return "Task Does Not
2441 mutex_exit(&itask
->it_idm_task
->idt_mutex
);
2442 iscsit_send_task_mgmt_resp(tm_resp_pdu
,
2443 SCSI_TCP_TM_RESP_NO_TASK
);
2446 * STMF is now aborting the task, return
2447 * "Function Complete"
2449 mutex_exit(&itask
->it_idm_task
->idt_mutex
);
2450 iscsit_send_task_mgmt_resp(tm_resp_pdu
,
2451 SCSI_TCP_TM_RESP_COMPLETE
);
2453 idm_task_rele(itask
->it_idm_task
);
2455 idm_pdu_complete(rx_pdu
, IDM_STATUS_SUCCESS
);
2458 case ISCSI_TM_FUNC_ABORT_TASK_SET
:
2459 tm_func
= TM_ABORT_TASK_SET
;
2462 case ISCSI_TM_FUNC_CLEAR_ACA
:
2463 tm_func
= TM_CLEAR_ACA
;
2466 case ISCSI_TM_FUNC_CLEAR_TASK_SET
:
2467 tm_func
= TM_CLEAR_TASK_SET
;
2470 case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET
:
2471 tm_func
= TM_LUN_RESET
;
2474 case ISCSI_TM_FUNC_TARGET_WARM_RESET
:
2475 tm_func
= TM_TARGET_WARM_RESET
;
2478 case ISCSI_TM_FUNC_TARGET_COLD_RESET
:
2479 tm_func
= TM_TARGET_COLD_RESET
;
2482 case ISCSI_TM_FUNC_TASK_REASSIGN
:
2484 * We do not currently support allegiance reassignment. When
2485 * we start supporting ERL1+, we will need to.
2487 iscsit_send_task_mgmt_resp(tm_resp_pdu
,
2488 SCSI_TCP_TM_RESP_NO_ALLG_REASSN
);
2489 idm_pdu_complete(rx_pdu
, IDM_STATUS_SUCCESS
);
2493 iscsit_send_task_mgmt_resp(tm_resp_pdu
,
2494 SCSI_TCP_TM_RESP_REJECTED
);
2495 idm_pdu_complete(rx_pdu
, IDM_STATUS_SUCCESS
);
2499 tm_itask
= iscsit_tm_task_alloc(ict
);
2500 if (tm_itask
== NULL
) {
2501 iscsit_send_task_mgmt_resp(tm_resp_pdu
,
2502 SCSI_TCP_TM_RESP_REJECTED
);
2503 idm_pdu_complete(rx_pdu
, IDM_STATUS_SUCCESS
);
2508 task
= stmf_task_alloc(ict
->ict_sess
->ist_lport
,
2509 ict
->ict_sess
->ist_stmf_sess
, iscsi_tm
->lun
,
2510 0, STMF_TASK_EXT_NONE
);
2513 * If this happens, either the LU is in reset, couldn't
2514 * get memory, or some other condition in which we simply
2515 * can't complete this request. It would be nice to return
2516 * an error code like "busy" but the closest we have is
2519 iscsit_send_task_mgmt_resp(tm_resp_pdu
,
2520 SCSI_TCP_TM_RESP_REJECTED
);
2521 iscsit_tm_task_free(tm_itask
);
2522 idm_pdu_complete(rx_pdu
, IDM_STATUS_SUCCESS
);
2526 tm_itask
->it_tm_pdu
= tm_resp_pdu
;
2527 tm_itask
->it_stmf_task
= task
;
2528 task
->task_port_private
= tm_itask
;
2529 task
->task_mgmt_function
= tm_func
;
2530 task
->task_additional_flags
= TASK_AF_NO_EXPECTED_XFER_LENGTH
;
2531 task
->task_priority
= 0;
2532 task
->task_max_nbufs
= STMF_BUFS_MAX
;
2533 task
->task_cmd_seq_no
= iscsi_tm
->itt
;
2534 task
->task_expected_xfer_length
= 0;
2536 stmf_post_task(task
, NULL
);
2537 idm_pdu_complete(rx_pdu
, IDM_STATUS_SUCCESS
);
2541 iscsit_pdu_op_noop(iscsit_conn_t
*ict
, idm_pdu_t
*rx_pdu
)
2543 iscsi_nop_out_hdr_t
*out
= (iscsi_nop_out_hdr_t
*)rx_pdu
->isp_hdr
;
2544 iscsi_nop_in_hdr_t
*in
;
2548 /* Ignore the response from initiator */
2549 if ((out
->itt
== ISCSI_RSVD_TASK_TAG
) ||
2550 (out
->ttt
!= ISCSI_RSVD_TASK_TAG
)) {
2551 idm_pdu_complete(rx_pdu
, IDM_STATUS_SUCCESS
);
2555 /* Allocate a PDU to respond */
2556 resp_datalen
= ntoh24(out
->dlength
);
2557 resp
= idm_pdu_alloc(sizeof (iscsi_hdr_t
), resp_datalen
);
2558 idm_pdu_init(resp
, ict
->ict_ic
, NULL
, NULL
);
2559 if (resp_datalen
> 0) {
2560 bcopy(rx_pdu
->isp_data
, resp
->isp_data
, resp_datalen
);
2564 * When sending a NOP-In as a response to a NOP-Out from the initiator,
2565 * the target must respond with the same initiator task tag that was
2566 * provided in the NOP-Out request, the target transfer tag must be
2567 * ISCSI_RSVD_TASK_TAG (0xffffffff) and StatSN will contain the next
2568 * status sequence number. The StatSN for the connection is advanced
2569 * after this PDU is sent.
2571 in
= (iscsi_nop_in_hdr_t
*)resp
->isp_hdr
;
2572 bzero(in
, sizeof (*in
));
2573 in
->opcode
= ISCSI_OP_NOOP_IN
;
2574 in
->flags
= ISCSI_FLAG_FINAL
;
2575 bcopy(out
->lun
, in
->lun
, 8);
2577 in
->ttt
= ISCSI_RSVD_TASK_TAG
;
2578 hton24(in
->dlength
, resp_datalen
);
2579 resp
->isp_flags
|= IDM_PDU_SET_STATSN
| IDM_PDU_ADVANCE_STATSN
;
2580 /* Any other field in resp to be set? */
2581 iscsit_pdu_tx(resp
);
2582 idm_pdu_complete(rx_pdu
, IDM_STATUS_SUCCESS
);
2586 iscsit_pdu_op_login_cmd(iscsit_conn_t
*ict
, idm_pdu_t
*rx_pdu
)
2590 * Submit PDU to login state machine. State machine will free the
2593 iscsit_login_sm_event(ict
, ILE_LOGIN_RCV
, rx_pdu
);
2597 iscsit_pdu_op_logout_cmd(iscsit_conn_t
*ict
, idm_pdu_t
*rx_pdu
)
2599 iscsi_logout_hdr_t
*logout_req
=
2600 (iscsi_logout_hdr_t
*)rx_pdu
->isp_hdr
;
2601 iscsi_logout_rsp_hdr_t
*logout_rsp
;
2604 /* Allocate a PDU to respond */
2605 resp
= idm_pdu_alloc(sizeof (iscsi_hdr_t
), 0);
2606 idm_pdu_init(resp
, ict
->ict_ic
, NULL
, NULL
);
2608 * The StatSN is to be sent to the initiator,
2609 * it is not required to increment the number
2610 * as the connection is terminating.
2612 resp
->isp_flags
|= IDM_PDU_SET_STATSN
;
2614 * Logout results in the immediate termination of all tasks except
2615 * if the logout reason is ISCSI_LOGOUT_REASON_RECOVERY. The
2616 * connection state machine will drive this task cleanup automatically
2617 * so we don't need to handle that here.
2619 logout_rsp
= (iscsi_logout_rsp_hdr_t
*)resp
->isp_hdr
;
2620 bzero(logout_rsp
, sizeof (*logout_rsp
));
2621 logout_rsp
->opcode
= ISCSI_OP_LOGOUT_RSP
;
2622 logout_rsp
->flags
= ISCSI_FLAG_FINAL
;
2623 logout_rsp
->itt
= logout_req
->itt
;
2624 if ((logout_req
->flags
& ISCSI_FLAG_LOGOUT_REASON_MASK
) >
2625 ISCSI_LOGOUT_REASON_RECOVERY
) {
2626 logout_rsp
->response
= ISCSI_LOGOUT_RECOVERY_UNSUPPORTED
;
2628 logout_rsp
->response
= ISCSI_LOGOUT_SUCCESS
;
2631 iscsit_pdu_tx(resp
);
2632 idm_pdu_complete(rx_pdu
, IDM_STATUS_SUCCESS
);
2636 * Calculate the number of outstanding commands we can process
2642 * Instead of using a pre-defined constant for the command window,
2643 * it should be made confiurable and dynamic. With MC/S, sequence
2644 * numbers will be used up at a much faster rate than with SC/S.
2646 return (ISCSIT_MAX_WINDOW
);
2650 * Set local registers based on incoming PDU
2653 iscsit_set_cmdsn(iscsit_conn_t
*ict
, idm_pdu_t
*rx_pdu
)
2656 iscsi_scsi_cmd_hdr_t
*req
;
2658 ist
= ict
->ict_sess
;
2660 req
= (iscsi_scsi_cmd_hdr_t
*)rx_pdu
->isp_hdr
;
2661 if (req
->opcode
& ISCSI_OP_IMMEDIATE
) {
2662 /* no cmdsn increment for immediate PDUs */
2666 /* Ensure that the ExpCmdSN advances in an orderly manner */
2667 mutex_enter(&ist
->ist_sn_mutex
);
2668 ist
->ist_expcmdsn
= ntohl(req
->cmdsn
) + 1;
2669 ist
->ist_maxcmdsn
= ntohl(req
->cmdsn
) + iscsit_cmd_window();
2670 mutex_exit(&ist
->ist_sn_mutex
);
2674 * Wrapper funtion, calls iscsi_calc_rspsn and idm_pdu_tx
2677 iscsit_pdu_tx(idm_pdu_t
*pdu
)
2679 iscsit_conn_t
*ict
= pdu
->isp_ic
->ic_handle
;
2680 iscsi_scsi_rsp_hdr_t
*rsp
= (iscsi_scsi_rsp_hdr_t
*)pdu
->isp_hdr
;
2681 iscsit_sess_t
*ist
= ict
->ict_sess
;
2684 * The command sequence numbers are session-wide and must stay
2685 * consistent across the transfer, so protect the cmdsn with a
2686 * mutex lock on the session. The status sequence number will
2687 * be updated just before the transport layer transmits the PDU.
2690 mutex_enter(&ict
->ict_sess
->ist_sn_mutex
);
2691 /* Set ExpCmdSN and MaxCmdSN */
2692 rsp
->maxcmdsn
= htonl(ist
->ist_maxcmdsn
);
2693 rsp
->expcmdsn
= htonl(ist
->ist_expcmdsn
);
2695 mutex_exit(&ict
->ict_sess
->ist_sn_mutex
);
2699 * Internal functions
2703 iscsit_send_async_event(iscsit_conn_t
*ict
, uint8_t event
)
2706 iscsi_async_evt_hdr_t
*async_abt
;
2709 * Get a PDU to build the abort request.
2711 abt
= idm_pdu_alloc(sizeof (iscsi_hdr_t
), 0);
2713 idm_conn_event(ict
->ict_ic
, CE_TRANSPORT_FAIL
, (uintptr_t)NULL
);
2718 * A asynchronous message is sent by the target to request a logout.
2719 * The StatSN for the connection is advanced after the PDU is sent
2720 * to allow for initiator and target state synchronization.
2722 idm_pdu_init(abt
, ict
->ict_ic
, NULL
, NULL
);
2723 abt
->isp_datalen
= 0;
2724 abt
->isp_flags
|= IDM_PDU_SET_STATSN
| IDM_PDU_ADVANCE_STATSN
;
2726 async_abt
= (iscsi_async_evt_hdr_t
*)abt
->isp_hdr
;
2727 bzero(async_abt
, sizeof (*async_abt
));
2728 async_abt
->opcode
= ISCSI_OP_ASYNC_EVENT
;
2729 async_abt
->async_event
= event
;
2730 async_abt
->flags
= ISCSI_FLAG_FINAL
;
2731 async_abt
->rsvd4
[0] = 0xff;
2732 async_abt
->rsvd4
[1] = 0xff;
2733 async_abt
->rsvd4
[2] = 0xff;
2734 async_abt
->rsvd4
[3] = 0xff;
2737 case ISCSI_ASYNC_EVENT_REQUEST_LOGOUT
:
2738 async_abt
->param3
= htons(IDM_LOGOUT_SECONDS
);
2740 case ISCSI_ASYNC_EVENT_SCSI_EVENT
:
2741 case ISCSI_ASYNC_EVENT_DROPPING_CONNECTION
:
2742 case ISCSI_ASYNC_EVENT_DROPPING_ALL_CONNECTIONS
:
2743 case ISCSI_ASYNC_EVENT_PARAM_NEGOTIATION
:
2752 iscsit_send_reject(iscsit_conn_t
*ict
, idm_pdu_t
*rejected_pdu
, uint8_t reason
)
2754 idm_pdu_t
*reject_pdu
;
2755 iscsi_reject_rsp_hdr_t
*reject
;
2758 * Get a PDU to build the abort request.
2760 reject_pdu
= idm_pdu_alloc(sizeof (iscsi_hdr_t
),
2761 rejected_pdu
->isp_hdrlen
);
2762 if (reject_pdu
== NULL
) {
2763 idm_conn_event(ict
->ict_ic
, CE_TRANSPORT_FAIL
, (uintptr_t)NULL
);
2766 idm_pdu_init(reject_pdu
, ict
->ict_ic
, NULL
, NULL
);
2767 /* StatSN is advanced after a Reject PDU */
2768 reject_pdu
->isp_flags
|= IDM_PDU_SET_STATSN
| IDM_PDU_ADVANCE_STATSN
;
2769 reject_pdu
->isp_datalen
= rejected_pdu
->isp_hdrlen
;
2770 bcopy(rejected_pdu
->isp_hdr
, reject_pdu
->isp_data
,
2771 rejected_pdu
->isp_hdrlen
);
2773 reject
= (iscsi_reject_rsp_hdr_t
*)reject_pdu
->isp_hdr
;
2774 bzero(reject
, sizeof (*reject
));
2775 reject
->opcode
= ISCSI_OP_REJECT_MSG
;
2776 reject
->reason
= reason
;
2777 reject
->flags
= ISCSI_FLAG_FINAL
;
2778 hton24(reject
->dlength
, rejected_pdu
->isp_hdrlen
);
2779 reject
->must_be_ff
[0] = 0xff;
2780 reject
->must_be_ff
[1] = 0xff;
2781 reject
->must_be_ff
[2] = 0xff;
2782 reject
->must_be_ff
[3] = 0xff;
2784 iscsit_pdu_tx(reject_pdu
);
2788 static iscsit_task_t
*
2789 iscsit_task_alloc(iscsit_conn_t
*ict
)
2791 iscsit_task_t
*itask
;
2792 iscsit_buf_t
*immed_ibuf
;
2795 * Possible items to pre-alloc if we cache iscsit_task_t's:
2797 * Status PDU w/ sense buffer
2798 * stmf_data_buf_t for immediate data
2800 itask
= kmem_alloc(sizeof (iscsit_task_t
) + sizeof (iscsit_buf_t
) +
2801 sizeof (stmf_data_buf_t
), KM_NOSLEEP
);
2802 if (itask
!= NULL
) {
2803 mutex_init(&itask
->it_mutex
, NULL
, MUTEX_DRIVER
, NULL
);
2804 itask
->it_aborted
= itask
->it_stmf_abort
=
2805 itask
->it_tm_task
= 0;
2807 immed_ibuf
= (iscsit_buf_t
*)(itask
+ 1);
2808 bzero(immed_ibuf
, sizeof (*immed_ibuf
));
2809 immed_ibuf
->ibuf_is_immed
= B_TRUE
;
2810 immed_ibuf
->ibuf_stmf_buf
= (stmf_data_buf_t
*)(immed_ibuf
+ 1);
2812 bzero(immed_ibuf
->ibuf_stmf_buf
, sizeof (stmf_data_buf_t
));
2813 immed_ibuf
->ibuf_stmf_buf
->db_port_private
= immed_ibuf
;
2814 immed_ibuf
->ibuf_stmf_buf
->db_sglist_length
= 1;
2815 immed_ibuf
->ibuf_stmf_buf
->db_flags
= DB_DIRECTION_FROM_RPORT
|
2817 itask
->it_immed_data
= immed_ibuf
;
2818 itask
->it_idm_task
= idm_task_alloc(ict
->ict_ic
);
2819 if (itask
->it_idm_task
!= NULL
) {
2820 itask
->it_idm_task
->idt_private
= itask
;
2821 itask
->it_ict
= ict
;
2822 itask
->it_ttt
= itask
->it_idm_task
->idt_tt
;
2825 kmem_free(itask
, sizeof (iscsit_task_t
) +
2826 sizeof (iscsit_buf_t
) + sizeof (stmf_data_buf_t
));
2834 iscsit_task_free(iscsit_task_t
*itask
)
2836 idm_task_free(itask
->it_idm_task
);
2837 mutex_destroy(&itask
->it_mutex
);
2838 kmem_free(itask
, sizeof (iscsit_task_t
) +
2839 sizeof (iscsit_buf_t
) + sizeof (stmf_data_buf_t
));
2842 static iscsit_task_t
*
2843 iscsit_tm_task_alloc(iscsit_conn_t
*ict
)
2845 iscsit_task_t
*itask
;
2847 itask
= kmem_zalloc(sizeof (iscsit_task_t
), KM_NOSLEEP
);
2848 if (itask
!= NULL
) {
2849 idm_conn_hold(ict
->ict_ic
);
2850 mutex_init(&itask
->it_mutex
, NULL
, MUTEX_DRIVER
, NULL
);
2851 itask
->it_aborted
= itask
->it_stmf_abort
=
2852 itask
->it_tm_responded
= 0;
2853 itask
->it_tm_pdu
= NULL
;
2854 itask
->it_tm_task
= 1;
2855 itask
->it_ict
= ict
;
2862 iscsit_tm_task_free(iscsit_task_t
*itask
)
2865 * If we responded then the call to idm_pdu_complete will free the
2866 * PDU. Otherwise we got aborted before the TM function could
2867 * complete and we need to free the PDU explicitly.
2869 if (itask
->it_tm_pdu
!= NULL
&& !itask
->it_tm_responded
)
2870 idm_pdu_free(itask
->it_tm_pdu
);
2871 idm_conn_rele(itask
->it_ict
->ict_ic
);
2872 mutex_destroy(&itask
->it_mutex
);
2873 kmem_free(itask
, sizeof (iscsit_task_t
));
2877 iscsit_task_start(iscsit_task_t
*itask
)
2879 iscsit_sess_t
*ist
= itask
->it_ict
->ict_sess
;
2883 * Sanity check the ITT and ensure that this task does not already
2884 * exist. If not then add the task to the session task list.
2886 mutex_enter(&ist
->ist_mutex
);
2887 mutex_enter(&itask
->it_mutex
);
2888 itask
->it_active
= 1;
2889 if (avl_find(&ist
->ist_task_list
, itask
, &where
) == NULL
) {
2890 /* New task, add to AVL */
2891 avl_insert(&ist
->ist_task_list
, itask
, where
);
2892 mutex_exit(&itask
->it_mutex
);
2893 mutex_exit(&ist
->ist_mutex
);
2894 return (IDM_STATUS_SUCCESS
);
2896 mutex_exit(&itask
->it_mutex
);
2897 mutex_exit(&ist
->ist_mutex
);
2899 return (IDM_STATUS_REJECT
);
2903 iscsit_task_done(iscsit_task_t
*itask
)
2905 iscsit_sess_t
*ist
= itask
->it_ict
->ict_sess
;
2907 mutex_enter(&ist
->ist_mutex
);
2908 mutex_enter(&itask
->it_mutex
);
2909 if (itask
->it_active
) {
2910 avl_remove(&ist
->ist_task_list
, itask
);
2911 itask
->it_active
= 0;
2913 mutex_exit(&itask
->it_mutex
);
2914 mutex_exit(&ist
->ist_mutex
);
2918 * iscsit status PDU cache
2923 iscsit_status_pdu_constructor(void *pdu_void
, void *arg
, int flags
)
2925 idm_pdu_t
*pdu
= pdu_void
;
2926 iscsi_scsi_rsp_hdr_t
*rsp
;
2928 bzero(pdu
, sizeof (idm_pdu_t
));
2929 pdu
->isp_callback
= iscsit_send_good_status_done
;
2930 pdu
->isp_magic
= IDM_PDU_MAGIC
;
2931 pdu
->isp_hdr
= (iscsi_hdr_t
*)(pdu
+ 1); /* Ptr arithmetic */
2932 pdu
->isp_hdrlen
= sizeof (iscsi_hdr_t
);
2934 /* Setup status response */
2935 rsp
= (iscsi_scsi_rsp_hdr_t
*)pdu
->isp_hdr
;
2936 bzero(rsp
, sizeof (*rsp
));
2937 rsp
->opcode
= ISCSI_OP_SCSI_RSP
;
2938 rsp
->flags
= ISCSI_FLAG_FINAL
;
2939 rsp
->response
= ISCSI_STATUS_CMD_COMPLETED
;
2945 * iscsit private data handler
2950 iscsit_pp_cb(struct stmf_port_provider
*pp
, int cmd
, void *arg
, uint32_t flags
)
2954 iscsit_service_enabled_t old_state
;
2956 if ((cmd
!= STMF_PROVIDER_DATA_UPDATED
) || (arg
== NULL
)) {
2960 nvl
= (nvlist_t
*)arg
;
2962 /* Translate nvlist */
2963 if (it_nv_to_config(nvl
, &cfg
) != 0) {
2964 cmn_err(CE_WARN
, "Configuration is invalid");
2968 /* Check that no iSCSI ioctl is currently running */
2969 mutex_enter(&iscsit_global
.global_state_mutex
);
2970 old_state
= iscsit_global
.global_svc_state
;
2971 switch (iscsit_global
.global_svc_state
) {
2974 iscsit_global
.global_svc_state
= ISE_BUSY
;
2978 * It is OK for the iscsit_pp_cb to be called from inside of
2979 * an iSCSI ioctl only if we are currently executing inside
2980 * of stmf_register_port_provider.
2982 ASSERT((flags
& STMF_PCB_PREG_COMPLETE
) != 0);
2985 cmn_err(CE_WARN
, "iscsit_pp_cb called when global_svc_state"
2986 " is not ENABLED(0x%x) -- ignoring",
2987 iscsit_global
.global_svc_state
);
2988 mutex_exit(&iscsit_global
.global_state_mutex
);
2989 it_config_free_cmn(cfg
);
2992 mutex_exit(&iscsit_global
.global_state_mutex
);
2995 (void) iscsit_config_merge(cfg
);
2997 it_config_free_cmn(cfg
);
2999 /* Restore old iSCSI driver global state */
3000 mutex_enter(&iscsit_global
.global_state_mutex
);
3001 ASSERT(iscsit_global
.global_svc_state
== ISE_BUSY
||
3002 iscsit_global
.global_svc_state
== ISE_ENABLING
);
3003 iscsit_global
.global_svc_state
= old_state
;
3004 mutex_exit(&iscsit_global
.global_state_mutex
);
3008 static it_cfg_status_t
3009 iscsit_config_merge(it_config_t
*in_cfg
)
3011 it_cfg_status_t status
;
3013 it_config_t tmp_cfg
;
3014 list_t tpg_del_list
;
3019 /* Make empty config */
3020 bzero(&tmp_cfg
, sizeof (tmp_cfg
));
3024 list_create(&tpg_del_list
, sizeof (iscsit_tpg_t
),
3025 offsetof(iscsit_tpg_t
, tpg_delete_ln
));
3028 * Update targets, initiator contexts, target portal groups,
3031 ISCSIT_GLOBAL_LOCK(RW_WRITER
);
3032 if (((status
= iscsit_config_merge_tpg(cfg
, &tpg_del_list
))
3034 ((status
= iscsit_config_merge_tgt(cfg
)) != 0) ||
3035 ((status
= iscsit_config_merge_ini(cfg
)) != 0) ||
3036 ((status
= isnst_config_merge(cfg
)) != 0)) {
3037 ISCSIT_GLOBAL_UNLOCK();
3041 /* Update other global config parameters */
3042 if (iscsit_global
.global_props
) {
3043 nvlist_free(iscsit_global
.global_props
);
3044 iscsit_global
.global_props
= NULL
;
3047 (void) nvlist_dup(cfg
->config_global_properties
,
3048 &iscsit_global
.global_props
, KM_SLEEP
);
3050 ISCSIT_GLOBAL_UNLOCK();
3052 iscsit_config_destroy_tpgs(&tpg_del_list
);
3054 list_destroy(&tpg_del_list
);
3056 return (ITCFG_SUCCESS
);
3062 * Compare serial numbers using serial number arithmetic as defined in
3065 * NOTE: This code is duplicated in the isns server. It ought to be common.
3069 iscsit_sna_lt(uint32_t sn1
, uint32_t sn2
)
3071 return ((sn1
!= sn2
) &&
3072 (((sn1
< sn2
) && ((sn2
- sn1
) < ISCSIT_SNA32_CHECK
)) ||
3073 ((sn1
> sn2
) && ((sn1
- sn2
) > ISCSIT_SNA32_CHECK
))));
3077 iscsit_sna_lte(uint32_t sn1
, uint32_t sn2
)
3079 return ((sn1
== sn2
) ||
3080 (((sn1
< sn2
) && ((sn2
- sn1
) < ISCSIT_SNA32_CHECK
)) ||
3081 ((sn1
> sn2
) && ((sn1
- sn2
) > ISCSIT_SNA32_CHECK
))));
3086 iscsit_cmdsn_in_window(iscsit_conn_t
*ict
, uint32_t cmdsn
)
3088 iscsit_sess_t
*ist
= ict
->ict_sess
;
3091 ist
= ict
->ict_sess
;
3093 mutex_enter(&ist
->ist_sn_mutex
);
3096 * If cmdsn is less than ist_expcmdsn - iscsit_cmd_window() or
3097 * greater than ist_expcmdsn, it's not in the window.
3100 if (iscsit_sna_lt(cmdsn
, (ist
->ist_expcmdsn
- iscsit_cmd_window())) ||
3101 !iscsit_sna_lte(cmdsn
, ist
->ist_expcmdsn
)) {
3105 mutex_exit(&ist
->ist_sn_mutex
);
3111 * iscsit_check_cmdsn_and_queue
3113 * Independent of the order in which the iSCSI target receives non-immediate
3114 * command PDU across the entire session and any multiple connections within
3115 * the session, the target must deliver the commands to the SCSI layer in
3116 * CmdSN order. So out-of-order non-immediate commands are queued up on a
3117 * session-wide wait queue. Duplicate commands are ignored.
3121 iscsit_check_cmdsn_and_queue(idm_pdu_t
*rx_pdu
)
3123 idm_conn_t
*ic
= rx_pdu
->isp_ic
;
3124 iscsit_conn_t
*ict
= ic
->ic_handle
;
3125 iscsit_sess_t
*ist
= ict
->ict_sess
;
3126 iscsi_scsi_cmd_hdr_t
*hdr
= (iscsi_scsi_cmd_hdr_t
*)rx_pdu
->isp_hdr
;
3128 mutex_enter(&ist
->ist_sn_mutex
);
3129 if (hdr
->opcode
& ISCSI_OP_IMMEDIATE
) {
3130 /* do not queue, handle it immediately */
3131 DTRACE_PROBE2(immediate__cmd
, iscsit_sess_t
*, ist
,
3132 idm_pdu_t
*, rx_pdu
);
3133 mutex_exit(&ist
->ist_sn_mutex
);
3134 return (ISCSIT_CMDSN_EQ_EXPCMDSN
);
3136 if (iscsit_sna_lt(ist
->ist_expcmdsn
, ntohl(hdr
->cmdsn
))) {
3138 * Out-of-order commands (cmdSN higher than ExpCmdSN)
3139 * are staged on a fixed-size circular buffer until
3140 * the missing command is delivered to the SCSI layer.
3141 * Irrespective of the order of insertion into the
3142 * staging queue, the commands are processed out of the
3143 * queue in cmdSN order only.
3145 rx_pdu
->isp_queue_time
= gethrtime();
3146 iscsit_add_pdu_to_queue(ist
, rx_pdu
);
3147 mutex_exit(&ist
->ist_sn_mutex
);
3148 return (ISCSIT_CMDSN_GT_EXPCMDSN
);
3149 } else if (iscsit_sna_lt(ntohl(hdr
->cmdsn
), ist
->ist_expcmdsn
)) {
3150 DTRACE_PROBE3(cmdsn__lt__expcmdsn
, iscsit_sess_t
*, ist
,
3151 iscsit_conn_t
*, ict
, idm_pdu_t
*, rx_pdu
);
3152 mutex_exit(&ist
->ist_sn_mutex
);
3153 return (ISCSIT_CMDSN_LT_EXPCMDSN
);
3155 mutex_exit(&ist
->ist_sn_mutex
);
3156 return (ISCSIT_CMDSN_EQ_EXPCMDSN
);
3161 * iscsit_add_pdu_to_queue() adds PDUs into the array indexed by
3162 * their cmdsn value. The length of the array is kept above the
3163 * maximum window size. The window keeps the cmdsn within a range
3164 * such that there are no collisons. e.g. the assumption is that
3165 * the windowing checks make it impossible to receive PDUs that
3166 * index into the same location in the array.
3169 iscsit_add_pdu_to_queue(iscsit_sess_t
*ist
, idm_pdu_t
*rx_pdu
)
3171 iscsit_cbuf_t
*cbuf
= ist
->ist_rxpdu_queue
;
3172 iscsit_conn_t
*ict
= rx_pdu
->isp_ic
->ic_handle
;
3174 ((iscsi_scsi_cmd_hdr_t
*)rx_pdu
->isp_hdr
)->cmdsn
;
3177 ASSERT(MUTEX_HELD(&ist
->ist_sn_mutex
));
3179 * If the connection is being torn down, then
3180 * don't add the PDU to the staging queue
3182 mutex_enter(&ict
->ict_mutex
);
3183 if (ict
->ict_lost
) {
3184 mutex_exit(&ict
->ict_mutex
);
3185 idm_pdu_complete(rx_pdu
, IDM_STATUS_FAIL
);
3188 iscsit_conn_dispatch_hold(ict
);
3189 mutex_exit(&ict
->ict_mutex
);
3191 index
= ntohl(cmdsn
) % ISCSIT_RXPDU_QUEUE_LEN
;
3193 * In the normal case, assuming that the Initiator is not
3194 * buggy and that we don't have packet duplication occuring,
3195 * the entry in the array will be NULL. However, we may have
3196 * received a duplicate PDU with cmdsn > expsn , and in that
3197 * case we just ignore this PDU -- the previously received one
3198 * remains queued for processing. We need to be careful not
3199 * to leak this one however.
3201 if (cbuf
->cb_buffer
[index
] != NULL
) {
3202 idm_pdu_complete(rx_pdu
, IDM_STATUS_FAIL
);
3204 cbuf
->cb_buffer
[index
] = rx_pdu
;
3205 cbuf
->cb_num_elems
++;
3210 iscsit_remove_pdu_from_queue(iscsit_sess_t
*ist
, uint32_t cmdsn
)
3212 iscsit_cbuf_t
*cbuf
= ist
->ist_rxpdu_queue
;
3213 idm_pdu_t
*pdu
= NULL
;
3216 ASSERT(MUTEX_HELD(&ist
->ist_sn_mutex
));
3217 index
= cmdsn
% ISCSIT_RXPDU_QUEUE_LEN
;
3218 if ((pdu
= cbuf
->cb_buffer
[index
]) != NULL
) {
3220 ntohl(((iscsi_scsi_cmd_hdr_t
*)pdu
->isp_hdr
)->cmdsn
));
3221 cbuf
->cb_buffer
[index
] = NULL
;
3222 cbuf
->cb_num_elems
--;
3229 * iscsit_process_pdu_in_queue() finds the next pdu in sequence
3230 * and posts it to the SCSI layer
3233 iscsit_process_pdu_in_queue(iscsit_sess_t
*ist
)
3235 iscsit_cbuf_t
*cbuf
= ist
->ist_rxpdu_queue
;
3236 idm_pdu_t
*pdu
= NULL
;
3240 mutex_enter(&ist
->ist_sn_mutex
);
3241 if (cbuf
->cb_num_elems
== 0) {
3242 mutex_exit(&ist
->ist_sn_mutex
);
3245 expcmdsn
= ist
->ist_expcmdsn
;
3246 if ((pdu
= iscsit_remove_pdu_from_queue(ist
, expcmdsn
))
3248 mutex_exit(&ist
->ist_sn_mutex
);
3251 mutex_exit(&ist
->ist_sn_mutex
);
3252 iscsit_post_staged_pdu(pdu
);
3257 iscsit_post_staged_pdu(idm_pdu_t
*rx_pdu
)
3259 iscsit_conn_t
*ict
= rx_pdu
->isp_ic
->ic_handle
;
3261 /* Post the PDU to the SCSI layer */
3262 switch (IDM_PDU_OPCODE(rx_pdu
)) {
3263 case ISCSI_OP_NOOP_OUT
:
3264 iscsit_set_cmdsn(ict
, rx_pdu
);
3265 iscsit_pdu_op_noop(ict
, rx_pdu
);
3267 case ISCSI_OP_TEXT_CMD
:
3268 iscsit_set_cmdsn(ict
, rx_pdu
);
3269 iscsit_pdu_op_text_cmd(ict
, rx_pdu
);
3271 case ISCSI_OP_SCSI_TASK_MGT_MSG
:
3272 iscsit_set_cmdsn(ict
, rx_pdu
);
3273 iscsit_op_scsi_task_mgmt(ict
, rx_pdu
);
3275 case ISCSI_OP_SCSI_CMD
:
3276 /* cmdSN will be incremented after creating itask */
3277 iscsit_post_scsi_cmd(rx_pdu
->isp_ic
, rx_pdu
);
3279 case ISCSI_OP_LOGOUT_CMD
:
3280 iscsit_set_cmdsn(ict
, rx_pdu
);
3281 iscsit_pdu_op_logout_cmd(ict
, rx_pdu
);
3284 /* No other PDUs should be placed on the queue */
3287 iscsit_conn_dispatch_rele(ict
); /* release hold on the conn */
3292 iscsit_rxpdu_queue_monitor_start(void)
3294 mutex_enter(&iscsit_rxpdu_queue_monitor_mutex
);
3295 if (iscsit_rxpdu_queue_monitor_thr_running
) {
3296 mutex_exit(&iscsit_rxpdu_queue_monitor_mutex
);
3299 iscsit_rxpdu_queue_monitor_thr_id
=
3300 thread_create(NULL
, 0, iscsit_rxpdu_queue_monitor
, NULL
,
3301 0, &p0
, TS_RUN
, minclsyspri
);
3302 while (!iscsit_rxpdu_queue_monitor_thr_running
) {
3303 cv_wait(&iscsit_rxpdu_queue_monitor_cv
,
3304 &iscsit_rxpdu_queue_monitor_mutex
);
3306 mutex_exit(&iscsit_rxpdu_queue_monitor_mutex
);
3312 iscsit_rxpdu_queue_monitor_stop(void)
3314 mutex_enter(&iscsit_rxpdu_queue_monitor_mutex
);
3315 if (iscsit_rxpdu_queue_monitor_thr_running
) {
3316 iscsit_rxpdu_queue_monitor_thr_running
= B_FALSE
;
3317 cv_signal(&iscsit_rxpdu_queue_monitor_cv
);
3318 mutex_exit(&iscsit_rxpdu_queue_monitor_mutex
);
3320 thread_join(iscsit_rxpdu_queue_monitor_thr_did
);
3323 mutex_exit(&iscsit_rxpdu_queue_monitor_mutex
);
3327 * A separate thread is used to scan the staging queue on all the
3328 * sessions, If a delayed PDU does not arrive within a timeout, the
3329 * target will advance to the staged PDU that is next in sequence
3330 * and exceeded the threshold wait time. It is up to the initiator
3331 * to note that the target has not acknowledged a particular cmdsn
3332 * and take appropriate action.
3336 iscsit_rxpdu_queue_monitor(void *arg
)
3341 mutex_enter(&iscsit_rxpdu_queue_monitor_mutex
);
3342 iscsit_rxpdu_queue_monitor_thr_did
= curthread
->t_did
;
3343 iscsit_rxpdu_queue_monitor_thr_running
= B_TRUE
;
3344 cv_signal(&iscsit_rxpdu_queue_monitor_cv
);
3346 while (iscsit_rxpdu_queue_monitor_thr_running
) {
3347 ISCSIT_GLOBAL_LOCK(RW_READER
);
3348 for (tgt
= avl_first(&iscsit_global
.global_target_list
);
3350 tgt
= AVL_NEXT(&iscsit_global
.global_target_list
, tgt
)) {
3351 mutex_enter(&tgt
->target_mutex
);
3352 for (ist
= avl_first(&tgt
->target_sess_list
);
3354 ist
= AVL_NEXT(&tgt
->target_sess_list
, ist
)) {
3356 iscsit_rxpdu_queue_monitor_session(ist
);
3358 mutex_exit(&tgt
->target_mutex
);
3360 ISCSIT_GLOBAL_UNLOCK();
3361 if (iscsit_rxpdu_queue_monitor_thr_running
== B_FALSE
) {
3364 (void) cv_reltimedwait(&iscsit_rxpdu_queue_monitor_cv
,
3365 &iscsit_rxpdu_queue_monitor_mutex
,
3366 ISCSIT_RXPDU_QUEUE_MONITOR_INTERVAL
* drv_usectohz(1000000),
3369 mutex_exit(&iscsit_rxpdu_queue_monitor_mutex
);
3374 iscsit_rxpdu_queue_monitor_session(iscsit_sess_t
*ist
)
3376 iscsit_cbuf_t
*cbuf
= ist
->ist_rxpdu_queue
;
3377 idm_pdu_t
*next_pdu
= NULL
;
3378 uint32_t index
, next_cmdsn
, i
;
3381 * Assume that all PDUs in the staging queue have a cmdsn >= expcmdsn.
3382 * Starting with the expcmdsn, iterate over the staged PDUs to find
3383 * the next PDU with a wait time greater than the threshold. If found
3384 * advance the staged PDU to the SCSI layer, skipping over the missing
3385 * PDU(s) to get past the hole in the command sequence. It is up to
3386 * the initiator to note that the target has not acknowledged a cmdsn
3387 * and take appropriate action.
3389 * Since the PDU(s) arrive in any random order, it is possible that
3390 * that the actual wait time for a particular PDU is much longer than
3391 * the defined threshold. e.g. Consider a case where commands are sent
3392 * over 4 different connections, and cmdsn = 1004 arrives first, then
3393 * 1003, and 1002 and 1001 are lost due to a connection failure.
3394 * So now 1003 is waiting for 1002 to be delivered, and although the
3395 * wait time of 1004 > wait time of 1003, only 1003 will be considered
3396 * by the monitor thread. 1004 will be automatically processed by
3397 * iscsit_process_pdu_in_queue() once the scan is complete and the
3398 * expcmdsn becomes current.
3400 mutex_enter(&ist
->ist_sn_mutex
);
3401 cbuf
= ist
->ist_rxpdu_queue
;
3402 if (cbuf
->cb_num_elems
== 0) {
3403 mutex_exit(&ist
->ist_sn_mutex
);
3406 for (next_pdu
= NULL
, i
= 0; ; i
++) {
3407 next_cmdsn
= ist
->ist_expcmdsn
+ i
; /* start at expcmdsn */
3408 index
= next_cmdsn
% ISCSIT_RXPDU_QUEUE_LEN
;
3409 if ((next_pdu
= cbuf
->cb_buffer
[index
]) != NULL
) {
3411 * If the PDU wait time has not exceeded threshold
3412 * stop scanning the staging queue until the timer
3415 if ((gethrtime() - next_pdu
->isp_queue_time
)
3416 < (rxpdu_queue_threshold
* NANOSEC
)) {
3417 mutex_exit(&ist
->ist_sn_mutex
);
3421 * Remove the next PDU from the queue and post it
3422 * to the SCSI layer, skipping over the missing
3423 * PDU. Stop scanning the staging queue until
3424 * the monitor timer fires again
3426 (void) iscsit_remove_pdu_from_queue(ist
, next_cmdsn
);
3427 mutex_exit(&ist
->ist_sn_mutex
);
3428 DTRACE_PROBE3(advanced__to__blocked__cmdsn
,
3429 iscsit_sess_t
*, ist
, idm_pdu_t
*, next_pdu
,
3430 uint32_t, next_cmdsn
);
3431 iscsit_post_staged_pdu(next_pdu
);
3432 /* Deliver any subsequent PDUs immediately */
3433 iscsit_process_pdu_in_queue(ist
);
3437 * Skipping over i PDUs, e.g. a case where commands 1001 and
3438 * 1002 are lost in the network, skip over both and post 1003
3439 * expcmdsn then becomes 1004 at the end of the scan.
3441 DTRACE_PROBE2(skipping__over__cmdsn
, iscsit_sess_t
*, ist
,
3442 uint32_t, next_cmdsn
);
3445 * following the assumption, staged cmdsn >= expcmdsn, this statement