1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*******************************************************************************
3 * Filename: target_core_tpg.c
5 * This file contains generic Target Portal Group related functions.
7 * (c) Copyright 2002-2013 Datera, Inc.
9 * Nicholas A. Bellinger <nab@kernel.org>
11 ******************************************************************************/
13 #include <linux/net.h>
14 #include <linux/string.h>
15 #include <linux/timer.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
19 #include <linux/export.h>
22 #include <scsi/scsi_proto.h>
24 #include <target/target_core_base.h>
25 #include <target/target_core_backend.h>
26 #include <target/target_core_fabric.h>
28 #include "target_core_internal.h"
29 #include "target_core_alua.h"
30 #include "target_core_pr.h"
31 #include "target_core_ua.h"
33 extern struct se_device
*g_lun0_dev
;
35 /* __core_tpg_get_initiator_node_acl():
37 * mutex_lock(&tpg->acl_node_mutex); must be held when calling
39 struct se_node_acl
*__core_tpg_get_initiator_node_acl(
40 struct se_portal_group
*tpg
,
41 const char *initiatorname
)
43 struct se_node_acl
*acl
;
45 list_for_each_entry(acl
, &tpg
->acl_node_list
, acl_list
) {
46 if (!strcmp(acl
->initiatorname
, initiatorname
))
53 /* core_tpg_get_initiator_node_acl():
57 struct se_node_acl
*core_tpg_get_initiator_node_acl(
58 struct se_portal_group
*tpg
,
59 unsigned char *initiatorname
)
61 struct se_node_acl
*acl
;
63 * Obtain se_node_acl->acl_kref using fabric driver provided
64 * initiatorname[] during node acl endpoint lookup driven by
65 * new se_session login.
67 * The reference is held until se_session shutdown -> release
68 * occurs via fabric driver invoked transport_deregister_session()
69 * or transport_free_session() code.
71 mutex_lock(&tpg
->acl_node_mutex
);
72 acl
= __core_tpg_get_initiator_node_acl(tpg
, initiatorname
);
74 if (!kref_get_unless_zero(&acl
->acl_kref
))
77 mutex_unlock(&tpg
->acl_node_mutex
);
81 EXPORT_SYMBOL(core_tpg_get_initiator_node_acl
);
83 void core_allocate_nexus_loss_ua(
84 struct se_node_acl
*nacl
)
86 struct se_dev_entry
*deve
;
92 hlist_for_each_entry_rcu(deve
, &nacl
->lun_entry_hlist
, link
)
93 core_scsi3_ua_allocate(deve
, 0x29,
94 ASCQ_29H_NEXUS_LOSS_OCCURRED
);
97 EXPORT_SYMBOL(core_allocate_nexus_loss_ua
);
99 /* core_tpg_add_node_to_devs():
103 void core_tpg_add_node_to_devs(
104 struct se_node_acl
*acl
,
105 struct se_portal_group
*tpg
,
106 struct se_lun
*lun_orig
)
108 bool lun_access_ro
= true;
110 struct se_device
*dev
;
112 mutex_lock(&tpg
->tpg_lun_mutex
);
113 hlist_for_each_entry_rcu(lun
, &tpg
->tpg_lun_hlist
, link
) {
114 if (lun_orig
&& lun
!= lun_orig
)
117 dev
= rcu_dereference_check(lun
->lun_se_dev
,
118 lockdep_is_held(&tpg
->tpg_lun_mutex
));
120 * By default in LIO-Target $FABRIC_MOD,
121 * demo_mode_write_protect is ON, or READ_ONLY;
123 if (!tpg
->se_tpg_tfo
->tpg_check_demo_mode_write_protect(tpg
)) {
124 lun_access_ro
= false;
127 * Allow only optical drives to issue R/W in default RO
130 if (dev
->transport
->get_device_type(dev
) == TYPE_DISK
)
131 lun_access_ro
= true;
133 lun_access_ro
= false;
136 pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%llu] - Adding %s"
137 " access for LUN in Demo Mode\n",
138 tpg
->se_tpg_tfo
->fabric_name
,
139 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), lun
->unpacked_lun
,
140 lun_access_ro
? "READ-ONLY" : "READ-WRITE");
142 core_enable_device_list_for_node(lun
, NULL
, lun
->unpacked_lun
,
143 lun_access_ro
, acl
, tpg
);
145 * Check to see if there are any existing persistent reservation
146 * APTPL pre-registrations that need to be enabled for this dynamic
149 core_scsi3_check_aptpl_registration(dev
, tpg
, lun
, acl
,
152 mutex_unlock(&tpg
->tpg_lun_mutex
);
156 target_set_nacl_queue_depth(struct se_portal_group
*tpg
,
157 struct se_node_acl
*acl
, u32 queue_depth
)
159 acl
->queue_depth
= queue_depth
;
161 if (!acl
->queue_depth
) {
162 pr_warn("Queue depth for %s Initiator Node: %s is 0,"
163 "defaulting to 1.\n", tpg
->se_tpg_tfo
->fabric_name
,
165 acl
->queue_depth
= 1;
169 static struct se_node_acl
*target_alloc_node_acl(struct se_portal_group
*tpg
,
170 const unsigned char *initiatorname
)
172 struct se_node_acl
*acl
;
175 acl
= kzalloc(max(sizeof(*acl
), tpg
->se_tpg_tfo
->node_acl_size
),
180 INIT_LIST_HEAD(&acl
->acl_list
);
181 INIT_LIST_HEAD(&acl
->acl_sess_list
);
182 INIT_HLIST_HEAD(&acl
->lun_entry_hlist
);
183 kref_init(&acl
->acl_kref
);
184 init_completion(&acl
->acl_free_comp
);
185 spin_lock_init(&acl
->nacl_sess_lock
);
186 mutex_init(&acl
->lun_entry_mutex
);
187 atomic_set(&acl
->acl_pr_ref_count
, 0);
189 if (tpg
->se_tpg_tfo
->tpg_get_default_depth
)
190 queue_depth
= tpg
->se_tpg_tfo
->tpg_get_default_depth(tpg
);
193 target_set_nacl_queue_depth(tpg
, acl
, queue_depth
);
195 snprintf(acl
->initiatorname
, TRANSPORT_IQN_LEN
, "%s", initiatorname
);
197 acl
->acl_index
= scsi_get_new_index(SCSI_AUTH_INTR_INDEX
);
199 tpg
->se_tpg_tfo
->set_default_node_attributes(acl
);
204 static void target_add_node_acl(struct se_node_acl
*acl
)
206 struct se_portal_group
*tpg
= acl
->se_tpg
;
208 mutex_lock(&tpg
->acl_node_mutex
);
209 list_add_tail(&acl
->acl_list
, &tpg
->acl_node_list
);
210 mutex_unlock(&tpg
->acl_node_mutex
);
212 pr_debug("%s_TPG[%hu] - Added %s ACL with TCQ Depth: %d for %s"
213 " Initiator Node: %s\n",
214 tpg
->se_tpg_tfo
->fabric_name
,
215 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
),
216 acl
->dynamic_node_acl
? "DYNAMIC" : "",
218 tpg
->se_tpg_tfo
->fabric_name
,
222 bool target_tpg_has_node_acl(struct se_portal_group
*tpg
,
223 const char *initiatorname
)
225 struct se_node_acl
*acl
;
228 mutex_lock(&tpg
->acl_node_mutex
);
229 list_for_each_entry(acl
, &tpg
->acl_node_list
, acl_list
) {
230 if (!strcmp(acl
->initiatorname
, initiatorname
)) {
235 mutex_unlock(&tpg
->acl_node_mutex
);
239 EXPORT_SYMBOL(target_tpg_has_node_acl
);
241 struct se_node_acl
*core_tpg_check_initiator_node_acl(
242 struct se_portal_group
*tpg
,
243 unsigned char *initiatorname
)
245 struct se_node_acl
*acl
;
247 acl
= core_tpg_get_initiator_node_acl(tpg
, initiatorname
);
251 if (!tpg
->se_tpg_tfo
->tpg_check_demo_mode(tpg
))
254 acl
= target_alloc_node_acl(tpg
, initiatorname
);
258 * When allocating a dynamically generated node_acl, go ahead
259 * and take the extra kref now before returning to the fabric
262 * Note this reference will be released at session shutdown
263 * time within transport_free_session() code.
265 kref_get(&acl
->acl_kref
);
266 acl
->dynamic_node_acl
= 1;
269 * Here we only create demo-mode MappedLUNs from the active
270 * TPG LUNs if the fabric is not explicitly asking for
271 * tpg_check_demo_mode_login_only() == 1.
273 if ((tpg
->se_tpg_tfo
->tpg_check_demo_mode_login_only
== NULL
) ||
274 (tpg
->se_tpg_tfo
->tpg_check_demo_mode_login_only(tpg
) != 1))
275 core_tpg_add_node_to_devs(acl
, tpg
, NULL
);
277 target_add_node_acl(acl
);
280 EXPORT_SYMBOL(core_tpg_check_initiator_node_acl
);
282 void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl
*nacl
)
284 while (atomic_read(&nacl
->acl_pr_ref_count
) != 0)
288 struct se_node_acl
*core_tpg_add_initiator_node_acl(
289 struct se_portal_group
*tpg
,
290 const char *initiatorname
)
292 struct se_node_acl
*acl
;
294 mutex_lock(&tpg
->acl_node_mutex
);
295 acl
= __core_tpg_get_initiator_node_acl(tpg
, initiatorname
);
297 if (acl
->dynamic_node_acl
) {
298 acl
->dynamic_node_acl
= 0;
299 pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
300 " for %s\n", tpg
->se_tpg_tfo
->fabric_name
,
301 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), initiatorname
);
302 mutex_unlock(&tpg
->acl_node_mutex
);
306 pr_err("ACL entry for %s Initiator"
307 " Node %s already exists for TPG %u, ignoring"
308 " request.\n", tpg
->se_tpg_tfo
->fabric_name
,
309 initiatorname
, tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
310 mutex_unlock(&tpg
->acl_node_mutex
);
311 return ERR_PTR(-EEXIST
);
313 mutex_unlock(&tpg
->acl_node_mutex
);
315 acl
= target_alloc_node_acl(tpg
, initiatorname
);
317 return ERR_PTR(-ENOMEM
);
319 target_add_node_acl(acl
);
323 static void target_shutdown_sessions(struct se_node_acl
*acl
)
325 struct se_session
*sess
;
329 spin_lock_irqsave(&acl
->nacl_sess_lock
, flags
);
330 list_for_each_entry(sess
, &acl
->acl_sess_list
, sess_acl_list
) {
331 if (atomic_read(&sess
->stopped
))
334 list_del_init(&sess
->sess_acl_list
);
335 spin_unlock_irqrestore(&acl
->nacl_sess_lock
, flags
);
337 if (acl
->se_tpg
->se_tpg_tfo
->close_session
)
338 acl
->se_tpg
->se_tpg_tfo
->close_session(sess
);
341 spin_unlock_irqrestore(&acl
->nacl_sess_lock
, flags
);
344 void core_tpg_del_initiator_node_acl(struct se_node_acl
*acl
)
346 struct se_portal_group
*tpg
= acl
->se_tpg
;
348 mutex_lock(&tpg
->acl_node_mutex
);
349 if (acl
->dynamic_node_acl
)
350 acl
->dynamic_node_acl
= 0;
351 list_del_init(&acl
->acl_list
);
352 mutex_unlock(&tpg
->acl_node_mutex
);
354 target_shutdown_sessions(acl
);
356 target_put_nacl(acl
);
358 * Wait for last target_put_nacl() to complete in target_complete_nacl()
359 * for active fabric session transport_deregister_session() callbacks.
361 wait_for_completion(&acl
->acl_free_comp
);
363 core_tpg_wait_for_nacl_pr_ref(acl
);
364 core_free_device_list_for_node(acl
, tpg
);
366 pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
367 " Initiator Node: %s\n", tpg
->se_tpg_tfo
->fabric_name
,
368 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), acl
->queue_depth
,
369 tpg
->se_tpg_tfo
->fabric_name
, acl
->initiatorname
);
374 /* core_tpg_set_initiator_node_queue_depth():
378 int core_tpg_set_initiator_node_queue_depth(
379 struct se_node_acl
*acl
,
382 struct se_portal_group
*tpg
= acl
->se_tpg
;
385 * Allow the setting of se_node_acl queue_depth to be idempotent,
386 * and not force a session shutdown event if the value is not
389 if (acl
->queue_depth
== queue_depth
)
392 * User has requested to change the queue depth for a Initiator Node.
393 * Change the value in the Node's struct se_node_acl, and call
394 * target_set_nacl_queue_depth() to set the new queue depth.
396 target_set_nacl_queue_depth(tpg
, acl
, queue_depth
);
399 * Shutdown all pending sessions to force session reinstatement.
401 target_shutdown_sessions(acl
);
403 pr_debug("Successfully changed queue depth to: %d for Initiator"
404 " Node: %s on %s Target Portal Group: %u\n", acl
->queue_depth
,
405 acl
->initiatorname
, tpg
->se_tpg_tfo
->fabric_name
,
406 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
410 EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth
);
412 /* core_tpg_set_initiator_node_tag():
414 * Initiator nodeacl tags are not used internally, but may be used by
415 * userspace to emulate aliases or groups.
416 * Returns length of newly-set tag or -EINVAL.
418 int core_tpg_set_initiator_node_tag(
419 struct se_portal_group
*tpg
,
420 struct se_node_acl
*acl
,
423 if (strlen(new_tag
) >= MAX_ACL_TAG_SIZE
)
426 if (!strncmp("NULL", new_tag
, 4)) {
427 acl
->acl_tag
[0] = '\0';
431 return snprintf(acl
->acl_tag
, MAX_ACL_TAG_SIZE
, "%s", new_tag
);
433 EXPORT_SYMBOL(core_tpg_set_initiator_node_tag
);
435 static void core_tpg_lun_ref_release(struct percpu_ref
*ref
)
437 struct se_lun
*lun
= container_of(ref
, struct se_lun
, lun_ref
);
439 complete(&lun
->lun_shutdown_comp
);
442 /* Does not change se_wwn->priv. */
443 int core_tpg_register(
444 struct se_wwn
*se_wwn
,
445 struct se_portal_group
*se_tpg
,
453 * For the typical case where core_tpg_register() is called by a
454 * fabric driver from target_core_fabric_ops->fabric_make_tpg()
455 * configfs context, use the original tf_ops pointer already saved
456 * by target-core in target_fabric_make_wwn().
458 * Otherwise, for special cases like iscsi-target discovery TPGs
459 * the caller is responsible for setting ->se_tpg_tfo ahead of
460 * calling core_tpg_register().
463 se_tpg
->se_tpg_tfo
= se_wwn
->wwn_tf
->tf_ops
;
465 if (!se_tpg
->se_tpg_tfo
) {
466 pr_err("Unable to locate se_tpg->se_tpg_tfo pointer\n");
470 INIT_HLIST_HEAD(&se_tpg
->tpg_lun_hlist
);
471 se_tpg
->proto_id
= proto_id
;
472 se_tpg
->se_tpg_wwn
= se_wwn
;
473 atomic_set(&se_tpg
->tpg_pr_ref_count
, 0);
474 INIT_LIST_HEAD(&se_tpg
->acl_node_list
);
475 INIT_LIST_HEAD(&se_tpg
->tpg_sess_list
);
476 spin_lock_init(&se_tpg
->session_lock
);
477 mutex_init(&se_tpg
->tpg_lun_mutex
);
478 mutex_init(&se_tpg
->acl_node_mutex
);
480 if (se_tpg
->proto_id
>= 0) {
481 se_tpg
->tpg_virt_lun0
= core_tpg_alloc_lun(se_tpg
, 0);
482 if (IS_ERR(se_tpg
->tpg_virt_lun0
))
483 return PTR_ERR(se_tpg
->tpg_virt_lun0
);
485 ret
= core_tpg_add_lun(se_tpg
, se_tpg
->tpg_virt_lun0
,
488 kfree(se_tpg
->tpg_virt_lun0
);
493 pr_debug("TARGET_CORE[%s]: Allocated portal_group for endpoint: %s, "
494 "Proto: %d, Portal Tag: %u\n", se_tpg
->se_tpg_tfo
->fabric_name
,
495 se_tpg
->se_tpg_tfo
->tpg_get_wwn(se_tpg
) ?
496 se_tpg
->se_tpg_tfo
->tpg_get_wwn(se_tpg
) : NULL
,
497 se_tpg
->proto_id
, se_tpg
->se_tpg_tfo
->tpg_get_tag(se_tpg
));
501 EXPORT_SYMBOL(core_tpg_register
);
503 int core_tpg_deregister(struct se_portal_group
*se_tpg
)
505 const struct target_core_fabric_ops
*tfo
= se_tpg
->se_tpg_tfo
;
506 struct se_node_acl
*nacl
, *nacl_tmp
;
507 LIST_HEAD(node_list
);
509 pr_debug("TARGET_CORE[%s]: Deallocating portal_group for endpoint: %s, "
510 "Proto: %d, Portal Tag: %u\n", tfo
->fabric_name
,
511 tfo
->tpg_get_wwn(se_tpg
) ? tfo
->tpg_get_wwn(se_tpg
) : NULL
,
512 se_tpg
->proto_id
, tfo
->tpg_get_tag(se_tpg
));
514 while (atomic_read(&se_tpg
->tpg_pr_ref_count
) != 0)
517 mutex_lock(&se_tpg
->acl_node_mutex
);
518 list_splice_init(&se_tpg
->acl_node_list
, &node_list
);
519 mutex_unlock(&se_tpg
->acl_node_mutex
);
521 * Release any remaining demo-mode generated se_node_acl that have
522 * not been released because of TFO->tpg_check_demo_mode_cache() == 1
523 * in transport_deregister_session().
525 list_for_each_entry_safe(nacl
, nacl_tmp
, &node_list
, acl_list
) {
526 list_del_init(&nacl
->acl_list
);
528 core_tpg_wait_for_nacl_pr_ref(nacl
);
529 core_free_device_list_for_node(nacl
, se_tpg
);
533 if (se_tpg
->proto_id
>= 0) {
534 core_tpg_remove_lun(se_tpg
, se_tpg
->tpg_virt_lun0
);
535 kfree_rcu(se_tpg
->tpg_virt_lun0
, rcu_head
);
540 EXPORT_SYMBOL(core_tpg_deregister
);
542 struct se_lun
*core_tpg_alloc_lun(
543 struct se_portal_group
*tpg
,
548 lun
= kzalloc(sizeof(*lun
), GFP_KERNEL
);
550 pr_err("Unable to allocate se_lun memory\n");
551 return ERR_PTR(-ENOMEM
);
553 lun
->unpacked_lun
= unpacked_lun
;
554 atomic_set(&lun
->lun_acl_count
, 0);
555 init_completion(&lun
->lun_shutdown_comp
);
556 INIT_LIST_HEAD(&lun
->lun_deve_list
);
557 INIT_LIST_HEAD(&lun
->lun_dev_link
);
558 atomic_set(&lun
->lun_tg_pt_secondary_offline
, 0);
559 spin_lock_init(&lun
->lun_deve_lock
);
560 mutex_init(&lun
->lun_tg_pt_md_mutex
);
561 INIT_LIST_HEAD(&lun
->lun_tg_pt_gp_link
);
562 spin_lock_init(&lun
->lun_tg_pt_gp_lock
);
568 int core_tpg_add_lun(
569 struct se_portal_group
*tpg
,
572 struct se_device
*dev
)
576 ret
= percpu_ref_init(&lun
->lun_ref
, core_tpg_lun_ref_release
, 0,
581 ret
= core_alloc_rtpi(lun
, dev
);
585 if (!(dev
->transport_flags
& TRANSPORT_FLAG_PASSTHROUGH_ALUA
) &&
586 !(dev
->se_hba
->hba_flags
& HBA_FLAGS_INTERNAL_USE
))
587 target_attach_tg_pt_gp(lun
, dev
->t10_alua
.default_tg_pt_gp
);
589 mutex_lock(&tpg
->tpg_lun_mutex
);
591 spin_lock(&dev
->se_port_lock
);
592 lun
->lun_index
= dev
->dev_index
;
593 rcu_assign_pointer(lun
->lun_se_dev
, dev
);
595 list_add_tail(&lun
->lun_dev_link
, &dev
->dev_sep_list
);
596 spin_unlock(&dev
->se_port_lock
);
598 if (dev
->dev_flags
& DF_READ_ONLY
)
599 lun
->lun_access_ro
= true;
601 lun
->lun_access_ro
= lun_access_ro
;
602 if (!(dev
->se_hba
->hba_flags
& HBA_FLAGS_INTERNAL_USE
))
603 hlist_add_head_rcu(&lun
->link
, &tpg
->tpg_lun_hlist
);
604 mutex_unlock(&tpg
->tpg_lun_mutex
);
609 percpu_ref_exit(&lun
->lun_ref
);
614 void core_tpg_remove_lun(
615 struct se_portal_group
*tpg
,
619 * rcu_dereference_raw protected by se_lun->lun_group symlink
620 * reference to se_device->dev_group.
622 struct se_device
*dev
= rcu_dereference_raw(lun
->lun_se_dev
);
624 lun
->lun_shutdown
= true;
626 core_clear_lun_from_tpg(lun
, tpg
);
628 * Wait for any active I/O references to percpu se_lun->lun_ref to
629 * be released. Also, se_lun->lun_ref is now used by PR and ALUA
630 * logic when referencing a remote target port during ALL_TGT_PT=1
631 * and generating UNIT_ATTENTIONs for ALUA access state transition.
633 transport_clear_lun_ref(lun
);
635 mutex_lock(&tpg
->tpg_lun_mutex
);
636 if (lun
->lun_se_dev
) {
637 target_detach_tg_pt_gp(lun
);
639 spin_lock(&dev
->se_port_lock
);
640 list_del(&lun
->lun_dev_link
);
642 rcu_assign_pointer(lun
->lun_se_dev
, NULL
);
643 spin_unlock(&dev
->se_port_lock
);
645 if (!(dev
->se_hba
->hba_flags
& HBA_FLAGS_INTERNAL_USE
))
646 hlist_del_rcu(&lun
->link
);
648 lun
->lun_shutdown
= false;
649 mutex_unlock(&tpg
->tpg_lun_mutex
);
651 percpu_ref_exit(&lun
->lun_ref
);