1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*******************************************************************************
3 * Filename: target_core_tpg.c
5 * This file contains generic Target Portal Group related functions.
7 * (c) Copyright 2002-2013 Datera, Inc.
9 * Nicholas A. Bellinger <nab@kernel.org>
11 ******************************************************************************/
13 #include <linux/net.h>
14 #include <linux/string.h>
15 #include <linux/timer.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
19 #include <linux/export.h>
22 #include <scsi/scsi_proto.h>
24 #include <target/target_core_base.h>
25 #include <target/target_core_backend.h>
26 #include <target/target_core_fabric.h>
28 #include "target_core_internal.h"
29 #include "target_core_alua.h"
30 #include "target_core_pr.h"
31 #include "target_core_ua.h"
33 extern struct se_device
*g_lun0_dev
;
34 static DEFINE_XARRAY_ALLOC(tpg_xa
);
36 /* __core_tpg_get_initiator_node_acl():
38 * mutex_lock(&tpg->acl_node_mutex); must be held when calling
40 struct se_node_acl
*__core_tpg_get_initiator_node_acl(
41 struct se_portal_group
*tpg
,
42 const char *initiatorname
)
44 struct se_node_acl
*acl
;
46 list_for_each_entry(acl
, &tpg
->acl_node_list
, acl_list
) {
47 if (!strcmp(acl
->initiatorname
, initiatorname
))
54 /* core_tpg_get_initiator_node_acl():
58 struct se_node_acl
*core_tpg_get_initiator_node_acl(
59 struct se_portal_group
*tpg
,
60 unsigned char *initiatorname
)
62 struct se_node_acl
*acl
;
64 * Obtain se_node_acl->acl_kref using fabric driver provided
65 * initiatorname[] during node acl endpoint lookup driven by
66 * new se_session login.
68 * The reference is held until se_session shutdown -> release
69 * occurs via fabric driver invoked transport_deregister_session()
70 * or transport_free_session() code.
72 mutex_lock(&tpg
->acl_node_mutex
);
73 acl
= __core_tpg_get_initiator_node_acl(tpg
, initiatorname
);
75 if (!kref_get_unless_zero(&acl
->acl_kref
))
78 mutex_unlock(&tpg
->acl_node_mutex
);
82 EXPORT_SYMBOL(core_tpg_get_initiator_node_acl
);
84 void core_allocate_nexus_loss_ua(
85 struct se_node_acl
*nacl
)
87 struct se_dev_entry
*deve
;
93 hlist_for_each_entry_rcu(deve
, &nacl
->lun_entry_hlist
, link
)
94 core_scsi3_ua_allocate(deve
, 0x29,
95 ASCQ_29H_NEXUS_LOSS_OCCURRED
);
98 EXPORT_SYMBOL(core_allocate_nexus_loss_ua
);
100 /* core_tpg_add_node_to_devs():
104 void core_tpg_add_node_to_devs(
105 struct se_node_acl
*acl
,
106 struct se_portal_group
*tpg
,
107 struct se_lun
*lun_orig
)
109 bool lun_access_ro
= true;
111 struct se_device
*dev
;
113 mutex_lock(&tpg
->tpg_lun_mutex
);
114 hlist_for_each_entry_rcu(lun
, &tpg
->tpg_lun_hlist
, link
) {
115 if (lun_orig
&& lun
!= lun_orig
)
118 dev
= rcu_dereference_check(lun
->lun_se_dev
,
119 lockdep_is_held(&tpg
->tpg_lun_mutex
));
121 * By default in LIO-Target $FABRIC_MOD,
122 * demo_mode_write_protect is ON, or READ_ONLY;
124 if (!tpg
->se_tpg_tfo
->tpg_check_demo_mode_write_protect(tpg
)) {
125 lun_access_ro
= false;
128 * Allow only optical drives to issue R/W in default RO
131 if (dev
->transport
->get_device_type(dev
) == TYPE_DISK
)
132 lun_access_ro
= true;
134 lun_access_ro
= false;
137 pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%llu] - Adding %s"
138 " access for LUN in Demo Mode\n",
139 tpg
->se_tpg_tfo
->fabric_name
,
140 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), lun
->unpacked_lun
,
141 lun_access_ro
? "READ-ONLY" : "READ-WRITE");
143 core_enable_device_list_for_node(lun
, NULL
, lun
->unpacked_lun
,
144 lun_access_ro
, acl
, tpg
);
146 * Check to see if there are any existing persistent reservation
147 * APTPL pre-registrations that need to be enabled for this dynamic
150 core_scsi3_check_aptpl_registration(dev
, tpg
, lun
, acl
,
153 mutex_unlock(&tpg
->tpg_lun_mutex
);
157 target_set_nacl_queue_depth(struct se_portal_group
*tpg
,
158 struct se_node_acl
*acl
, u32 queue_depth
)
160 acl
->queue_depth
= queue_depth
;
162 if (!acl
->queue_depth
) {
163 pr_warn("Queue depth for %s Initiator Node: %s is 0,"
164 "defaulting to 1.\n", tpg
->se_tpg_tfo
->fabric_name
,
166 acl
->queue_depth
= 1;
170 static struct se_node_acl
*target_alloc_node_acl(struct se_portal_group
*tpg
,
171 const unsigned char *initiatorname
)
173 struct se_node_acl
*acl
;
176 acl
= kzalloc(max(sizeof(*acl
), tpg
->se_tpg_tfo
->node_acl_size
),
181 INIT_LIST_HEAD(&acl
->acl_list
);
182 INIT_LIST_HEAD(&acl
->acl_sess_list
);
183 INIT_HLIST_HEAD(&acl
->lun_entry_hlist
);
184 kref_init(&acl
->acl_kref
);
185 init_completion(&acl
->acl_free_comp
);
186 spin_lock_init(&acl
->nacl_sess_lock
);
187 mutex_init(&acl
->lun_entry_mutex
);
188 atomic_set(&acl
->acl_pr_ref_count
, 0);
190 if (tpg
->se_tpg_tfo
->tpg_get_default_depth
)
191 queue_depth
= tpg
->se_tpg_tfo
->tpg_get_default_depth(tpg
);
194 target_set_nacl_queue_depth(tpg
, acl
, queue_depth
);
196 snprintf(acl
->initiatorname
, TRANSPORT_IQN_LEN
, "%s", initiatorname
);
198 acl
->acl_index
= scsi_get_new_index(SCSI_AUTH_INTR_INDEX
);
200 tpg
->se_tpg_tfo
->set_default_node_attributes(acl
);
205 static void target_add_node_acl(struct se_node_acl
*acl
)
207 struct se_portal_group
*tpg
= acl
->se_tpg
;
209 mutex_lock(&tpg
->acl_node_mutex
);
210 list_add_tail(&acl
->acl_list
, &tpg
->acl_node_list
);
211 mutex_unlock(&tpg
->acl_node_mutex
);
213 pr_debug("%s_TPG[%hu] - Added %s ACL with TCQ Depth: %d for %s"
214 " Initiator Node: %s\n",
215 tpg
->se_tpg_tfo
->fabric_name
,
216 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
),
217 acl
->dynamic_node_acl
? "DYNAMIC" : "",
219 tpg
->se_tpg_tfo
->fabric_name
,
223 bool target_tpg_has_node_acl(struct se_portal_group
*tpg
,
224 const char *initiatorname
)
226 struct se_node_acl
*acl
;
229 mutex_lock(&tpg
->acl_node_mutex
);
230 list_for_each_entry(acl
, &tpg
->acl_node_list
, acl_list
) {
231 if (!strcmp(acl
->initiatorname
, initiatorname
)) {
236 mutex_unlock(&tpg
->acl_node_mutex
);
240 EXPORT_SYMBOL(target_tpg_has_node_acl
);
242 struct se_node_acl
*core_tpg_check_initiator_node_acl(
243 struct se_portal_group
*tpg
,
244 unsigned char *initiatorname
)
246 struct se_node_acl
*acl
;
248 acl
= core_tpg_get_initiator_node_acl(tpg
, initiatorname
);
252 if (!tpg
->se_tpg_tfo
->tpg_check_demo_mode(tpg
))
255 acl
= target_alloc_node_acl(tpg
, initiatorname
);
259 * When allocating a dynamically generated node_acl, go ahead
260 * and take the extra kref now before returning to the fabric
263 * Note this reference will be released at session shutdown
264 * time within transport_free_session() code.
266 kref_get(&acl
->acl_kref
);
267 acl
->dynamic_node_acl
= 1;
270 * Here we only create demo-mode MappedLUNs from the active
271 * TPG LUNs if the fabric is not explicitly asking for
272 * tpg_check_demo_mode_login_only() == 1.
274 if ((tpg
->se_tpg_tfo
->tpg_check_demo_mode_login_only
== NULL
) ||
275 (tpg
->se_tpg_tfo
->tpg_check_demo_mode_login_only(tpg
) != 1))
276 core_tpg_add_node_to_devs(acl
, tpg
, NULL
);
278 target_add_node_acl(acl
);
281 EXPORT_SYMBOL(core_tpg_check_initiator_node_acl
);
283 void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl
*nacl
)
285 while (atomic_read(&nacl
->acl_pr_ref_count
) != 0)
289 struct se_node_acl
*core_tpg_add_initiator_node_acl(
290 struct se_portal_group
*tpg
,
291 const char *initiatorname
)
293 struct se_node_acl
*acl
;
295 mutex_lock(&tpg
->acl_node_mutex
);
296 acl
= __core_tpg_get_initiator_node_acl(tpg
, initiatorname
);
298 if (acl
->dynamic_node_acl
) {
299 acl
->dynamic_node_acl
= 0;
300 pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
301 " for %s\n", tpg
->se_tpg_tfo
->fabric_name
,
302 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), initiatorname
);
303 mutex_unlock(&tpg
->acl_node_mutex
);
307 pr_err("ACL entry for %s Initiator"
308 " Node %s already exists for TPG %u, ignoring"
309 " request.\n", tpg
->se_tpg_tfo
->fabric_name
,
310 initiatorname
, tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
311 mutex_unlock(&tpg
->acl_node_mutex
);
312 return ERR_PTR(-EEXIST
);
314 mutex_unlock(&tpg
->acl_node_mutex
);
316 acl
= target_alloc_node_acl(tpg
, initiatorname
);
318 return ERR_PTR(-ENOMEM
);
320 target_add_node_acl(acl
);
324 static void target_shutdown_sessions(struct se_node_acl
*acl
)
326 struct se_session
*sess
;
330 spin_lock_irqsave(&acl
->nacl_sess_lock
, flags
);
331 list_for_each_entry(sess
, &acl
->acl_sess_list
, sess_acl_list
) {
332 if (sess
->cmd_cnt
&& atomic_read(&sess
->cmd_cnt
->stopped
))
335 list_del_init(&sess
->sess_acl_list
);
336 spin_unlock_irqrestore(&acl
->nacl_sess_lock
, flags
);
338 if (acl
->se_tpg
->se_tpg_tfo
->close_session
)
339 acl
->se_tpg
->se_tpg_tfo
->close_session(sess
);
342 spin_unlock_irqrestore(&acl
->nacl_sess_lock
, flags
);
345 void core_tpg_del_initiator_node_acl(struct se_node_acl
*acl
)
347 struct se_portal_group
*tpg
= acl
->se_tpg
;
349 mutex_lock(&tpg
->acl_node_mutex
);
350 if (acl
->dynamic_node_acl
)
351 acl
->dynamic_node_acl
= 0;
352 list_del_init(&acl
->acl_list
);
353 mutex_unlock(&tpg
->acl_node_mutex
);
355 target_shutdown_sessions(acl
);
357 target_put_nacl(acl
);
359 * Wait for last target_put_nacl() to complete in target_complete_nacl()
360 * for active fabric session transport_deregister_session() callbacks.
362 wait_for_completion(&acl
->acl_free_comp
);
364 core_tpg_wait_for_nacl_pr_ref(acl
);
365 core_free_device_list_for_node(acl
, tpg
);
367 pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
368 " Initiator Node: %s\n", tpg
->se_tpg_tfo
->fabric_name
,
369 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), acl
->queue_depth
,
370 tpg
->se_tpg_tfo
->fabric_name
, acl
->initiatorname
);
375 /* core_tpg_set_initiator_node_queue_depth():
379 int core_tpg_set_initiator_node_queue_depth(
380 struct se_node_acl
*acl
,
383 struct se_portal_group
*tpg
= acl
->se_tpg
;
386 * Allow the setting of se_node_acl queue_depth to be idempotent,
387 * and not force a session shutdown event if the value is not
390 if (acl
->queue_depth
== queue_depth
)
393 * User has requested to change the queue depth for a Initiator Node.
394 * Change the value in the Node's struct se_node_acl, and call
395 * target_set_nacl_queue_depth() to set the new queue depth.
397 target_set_nacl_queue_depth(tpg
, acl
, queue_depth
);
400 * Shutdown all pending sessions to force session reinstatement.
402 target_shutdown_sessions(acl
);
404 pr_debug("Successfully changed queue depth to: %d for Initiator"
405 " Node: %s on %s Target Portal Group: %u\n", acl
->queue_depth
,
406 acl
->initiatorname
, tpg
->se_tpg_tfo
->fabric_name
,
407 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
411 EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth
);
413 /* core_tpg_set_initiator_node_tag():
415 * Initiator nodeacl tags are not used internally, but may be used by
416 * userspace to emulate aliases or groups.
417 * Returns length of newly-set tag or -EINVAL.
419 int core_tpg_set_initiator_node_tag(
420 struct se_portal_group
*tpg
,
421 struct se_node_acl
*acl
,
424 if (strlen(new_tag
) >= MAX_ACL_TAG_SIZE
)
427 if (!strncmp("NULL", new_tag
, 4)) {
428 acl
->acl_tag
[0] = '\0';
432 return snprintf(acl
->acl_tag
, MAX_ACL_TAG_SIZE
, "%s", new_tag
);
434 EXPORT_SYMBOL(core_tpg_set_initiator_node_tag
);
436 static void core_tpg_lun_ref_release(struct percpu_ref
*ref
)
438 struct se_lun
*lun
= container_of(ref
, struct se_lun
, lun_ref
);
440 complete(&lun
->lun_shutdown_comp
);
443 static int target_tpg_register_rtpi(struct se_portal_group
*se_tpg
)
448 if (se_tpg
->rtpi_manual
) {
449 ret
= xa_insert(&tpg_xa
, se_tpg
->tpg_rtpi
, se_tpg
, GFP_KERNEL
);
451 pr_info("%s_TPG[%hu] - Can not set RTPI %#x, it is already busy",
452 se_tpg
->se_tpg_tfo
->fabric_name
,
453 se_tpg
->se_tpg_tfo
->tpg_get_tag(se_tpg
),
458 ret
= xa_alloc(&tpg_xa
, &val
, se_tpg
,
459 XA_LIMIT(1, USHRT_MAX
), GFP_KERNEL
);
461 se_tpg
->tpg_rtpi
= val
;
467 static void target_tpg_deregister_rtpi(struct se_portal_group
*se_tpg
)
469 if (se_tpg
->tpg_rtpi
&& se_tpg
->enabled
)
470 xa_erase(&tpg_xa
, se_tpg
->tpg_rtpi
);
473 int target_tpg_enable(struct se_portal_group
*se_tpg
)
477 ret
= target_tpg_register_rtpi(se_tpg
);
481 ret
= se_tpg
->se_tpg_tfo
->fabric_enable_tpg(se_tpg
, true);
483 target_tpg_deregister_rtpi(se_tpg
);
487 se_tpg
->enabled
= true;
492 int target_tpg_disable(struct se_portal_group
*se_tpg
)
496 target_tpg_deregister_rtpi(se_tpg
);
498 ret
= se_tpg
->se_tpg_tfo
->fabric_enable_tpg(se_tpg
, false);
500 se_tpg
->enabled
= false;
505 /* Does not change se_wwn->priv. */
506 int core_tpg_register(
507 struct se_wwn
*se_wwn
,
508 struct se_portal_group
*se_tpg
,
516 * For the typical case where core_tpg_register() is called by a
517 * fabric driver from target_core_fabric_ops->fabric_make_tpg()
518 * configfs context, use the original tf_ops pointer already saved
519 * by target-core in target_fabric_make_wwn().
521 * Otherwise, for special cases like iscsi-target discovery TPGs
522 * the caller is responsible for setting ->se_tpg_tfo ahead of
523 * calling core_tpg_register().
526 se_tpg
->se_tpg_tfo
= se_wwn
->wwn_tf
->tf_ops
;
528 if (!se_tpg
->se_tpg_tfo
) {
529 pr_err("Unable to locate se_tpg->se_tpg_tfo pointer\n");
533 INIT_HLIST_HEAD(&se_tpg
->tpg_lun_hlist
);
534 se_tpg
->proto_id
= proto_id
;
535 se_tpg
->se_tpg_wwn
= se_wwn
;
536 atomic_set(&se_tpg
->tpg_pr_ref_count
, 0);
537 INIT_LIST_HEAD(&se_tpg
->acl_node_list
);
538 INIT_LIST_HEAD(&se_tpg
->tpg_sess_list
);
539 spin_lock_init(&se_tpg
->session_lock
);
540 mutex_init(&se_tpg
->tpg_lun_mutex
);
541 mutex_init(&se_tpg
->acl_node_mutex
);
543 if (se_tpg
->proto_id
>= 0) {
544 se_tpg
->tpg_virt_lun0
= core_tpg_alloc_lun(se_tpg
, 0);
545 if (IS_ERR(se_tpg
->tpg_virt_lun0
))
546 return PTR_ERR(se_tpg
->tpg_virt_lun0
);
548 ret
= core_tpg_add_lun(se_tpg
, se_tpg
->tpg_virt_lun0
,
551 kfree(se_tpg
->tpg_virt_lun0
);
556 pr_debug("TARGET_CORE[%s]: Allocated portal_group for endpoint: %s, "
557 "Proto: %d, Portal Tag: %u\n", se_tpg
->se_tpg_tfo
->fabric_name
,
558 se_tpg
->se_tpg_tfo
->tpg_get_wwn(se_tpg
) ?
559 se_tpg
->se_tpg_tfo
->tpg_get_wwn(se_tpg
) : NULL
,
560 se_tpg
->proto_id
, se_tpg
->se_tpg_tfo
->tpg_get_tag(se_tpg
));
564 EXPORT_SYMBOL(core_tpg_register
);
566 int core_tpg_deregister(struct se_portal_group
*se_tpg
)
568 const struct target_core_fabric_ops
*tfo
= se_tpg
->se_tpg_tfo
;
569 struct se_node_acl
*nacl
, *nacl_tmp
;
570 LIST_HEAD(node_list
);
572 pr_debug("TARGET_CORE[%s]: Deallocating portal_group for endpoint: %s, "
573 "Proto: %d, Portal Tag: %u\n", tfo
->fabric_name
,
574 tfo
->tpg_get_wwn(se_tpg
) ? tfo
->tpg_get_wwn(se_tpg
) : NULL
,
575 se_tpg
->proto_id
, tfo
->tpg_get_tag(se_tpg
));
577 while (atomic_read(&se_tpg
->tpg_pr_ref_count
) != 0)
580 mutex_lock(&se_tpg
->acl_node_mutex
);
581 list_splice_init(&se_tpg
->acl_node_list
, &node_list
);
582 mutex_unlock(&se_tpg
->acl_node_mutex
);
584 * Release any remaining demo-mode generated se_node_acl that have
585 * not been released because of TFO->tpg_check_demo_mode_cache() == 1
586 * in transport_deregister_session().
588 list_for_each_entry_safe(nacl
, nacl_tmp
, &node_list
, acl_list
) {
589 list_del_init(&nacl
->acl_list
);
591 core_tpg_wait_for_nacl_pr_ref(nacl
);
592 core_free_device_list_for_node(nacl
, se_tpg
);
596 if (se_tpg
->proto_id
>= 0) {
597 core_tpg_remove_lun(se_tpg
, se_tpg
->tpg_virt_lun0
);
598 kfree_rcu(se_tpg
->tpg_virt_lun0
, rcu_head
);
601 target_tpg_deregister_rtpi(se_tpg
);
605 EXPORT_SYMBOL(core_tpg_deregister
);
607 struct se_lun
*core_tpg_alloc_lun(
608 struct se_portal_group
*tpg
,
613 lun
= kzalloc(sizeof(*lun
), GFP_KERNEL
);
615 pr_err("Unable to allocate se_lun memory\n");
616 return ERR_PTR(-ENOMEM
);
618 lun
->unpacked_lun
= unpacked_lun
;
619 atomic_set(&lun
->lun_acl_count
, 0);
620 init_completion(&lun
->lun_shutdown_comp
);
621 INIT_LIST_HEAD(&lun
->lun_deve_list
);
622 INIT_LIST_HEAD(&lun
->lun_dev_link
);
623 atomic_set(&lun
->lun_tg_pt_secondary_offline
, 0);
624 spin_lock_init(&lun
->lun_deve_lock
);
625 mutex_init(&lun
->lun_tg_pt_md_mutex
);
626 INIT_LIST_HEAD(&lun
->lun_tg_pt_gp_link
);
627 spin_lock_init(&lun
->lun_tg_pt_gp_lock
);
633 int core_tpg_add_lun(
634 struct se_portal_group
*tpg
,
637 struct se_device
*dev
)
641 ret
= percpu_ref_init(&lun
->lun_ref
, core_tpg_lun_ref_release
, 0,
646 if (!(dev
->transport_flags
& TRANSPORT_FLAG_PASSTHROUGH_ALUA
) &&
647 !(dev
->se_hba
->hba_flags
& HBA_FLAGS_INTERNAL_USE
))
648 target_attach_tg_pt_gp(lun
, dev
->t10_alua
.default_tg_pt_gp
);
650 mutex_lock(&tpg
->tpg_lun_mutex
);
652 spin_lock(&dev
->se_port_lock
);
653 lun
->lun_index
= dev
->dev_index
;
654 rcu_assign_pointer(lun
->lun_se_dev
, dev
);
656 list_add_tail(&lun
->lun_dev_link
, &dev
->dev_sep_list
);
657 spin_unlock(&dev
->se_port_lock
);
659 if (dev
->dev_flags
& DF_READ_ONLY
)
660 lun
->lun_access_ro
= true;
662 lun
->lun_access_ro
= lun_access_ro
;
663 if (!(dev
->se_hba
->hba_flags
& HBA_FLAGS_INTERNAL_USE
))
664 hlist_add_head_rcu(&lun
->link
, &tpg
->tpg_lun_hlist
);
665 mutex_unlock(&tpg
->tpg_lun_mutex
);
673 void core_tpg_remove_lun(
674 struct se_portal_group
*tpg
,
678 * rcu_dereference_raw protected by se_lun->lun_group symlink
679 * reference to se_device->dev_group.
681 struct se_device
*dev
= rcu_dereference_raw(lun
->lun_se_dev
);
683 lun
->lun_shutdown
= true;
685 core_clear_lun_from_tpg(lun
, tpg
);
687 * Wait for any active I/O references to percpu se_lun->lun_ref to
688 * be released. Also, se_lun->lun_ref is now used by PR and ALUA
689 * logic when referencing a remote target port during ALL_TGT_PT=1
690 * and generating UNIT_ATTENTIONs for ALUA access state transition.
692 transport_clear_lun_ref(lun
);
694 mutex_lock(&tpg
->tpg_lun_mutex
);
695 if (lun
->lun_se_dev
) {
696 target_detach_tg_pt_gp(lun
);
698 spin_lock(&dev
->se_port_lock
);
699 list_del(&lun
->lun_dev_link
);
701 rcu_assign_pointer(lun
->lun_se_dev
, NULL
);
702 spin_unlock(&dev
->se_port_lock
);
704 if (!(dev
->se_hba
->hba_flags
& HBA_FLAGS_INTERNAL_USE
))
705 hlist_del_rcu(&lun
->link
);
707 lun
->lun_shutdown
= false;
708 mutex_unlock(&tpg
->tpg_lun_mutex
);
710 percpu_ref_exit(&lun
->lun_ref
);