1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*******************************************************************************
3 * Filename: target_core_tpg.c
5 * This file contains generic Target Portal Group related functions.
7 * (c) Copyright 2002-2013 Datera, Inc.
9 * Nicholas A. Bellinger <nab@kernel.org>
11 ******************************************************************************/
13 #include <linux/net.h>
14 #include <linux/string.h>
15 #include <linux/timer.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
19 #include <linux/export.h>
22 #include <scsi/scsi_proto.h>
24 #include <target/target_core_base.h>
25 #include <target/target_core_backend.h>
26 #include <target/target_core_fabric.h>
28 #include "target_core_internal.h"
29 #include "target_core_alua.h"
30 #include "target_core_pr.h"
31 #include "target_core_ua.h"
33 extern struct se_device
*g_lun0_dev
;
35 static DEFINE_SPINLOCK(tpg_lock
);
36 static LIST_HEAD(tpg_list
);
38 /* __core_tpg_get_initiator_node_acl():
40 * mutex_lock(&tpg->acl_node_mutex); must be held when calling
42 struct se_node_acl
*__core_tpg_get_initiator_node_acl(
43 struct se_portal_group
*tpg
,
44 const char *initiatorname
)
46 struct se_node_acl
*acl
;
48 list_for_each_entry(acl
, &tpg
->acl_node_list
, acl_list
) {
49 if (!strcmp(acl
->initiatorname
, initiatorname
))
56 /* core_tpg_get_initiator_node_acl():
60 struct se_node_acl
*core_tpg_get_initiator_node_acl(
61 struct se_portal_group
*tpg
,
62 unsigned char *initiatorname
)
64 struct se_node_acl
*acl
;
66 * Obtain se_node_acl->acl_kref using fabric driver provided
67 * initiatorname[] during node acl endpoint lookup driven by
68 * new se_session login.
70 * The reference is held until se_session shutdown -> release
71 * occurs via fabric driver invoked transport_deregister_session()
72 * or transport_free_session() code.
74 mutex_lock(&tpg
->acl_node_mutex
);
75 acl
= __core_tpg_get_initiator_node_acl(tpg
, initiatorname
);
77 if (!kref_get_unless_zero(&acl
->acl_kref
))
80 mutex_unlock(&tpg
->acl_node_mutex
);
84 EXPORT_SYMBOL(core_tpg_get_initiator_node_acl
);
86 void core_allocate_nexus_loss_ua(
87 struct se_node_acl
*nacl
)
89 struct se_dev_entry
*deve
;
95 hlist_for_each_entry_rcu(deve
, &nacl
->lun_entry_hlist
, link
)
96 core_scsi3_ua_allocate(deve
, 0x29,
97 ASCQ_29H_NEXUS_LOSS_OCCURRED
);
100 EXPORT_SYMBOL(core_allocate_nexus_loss_ua
);
102 /* core_tpg_add_node_to_devs():
106 void core_tpg_add_node_to_devs(
107 struct se_node_acl
*acl
,
108 struct se_portal_group
*tpg
,
109 struct se_lun
*lun_orig
)
111 bool lun_access_ro
= true;
113 struct se_device
*dev
;
115 mutex_lock(&tpg
->tpg_lun_mutex
);
116 hlist_for_each_entry_rcu(lun
, &tpg
->tpg_lun_hlist
, link
) {
117 if (lun_orig
&& lun
!= lun_orig
)
120 dev
= rcu_dereference_check(lun
->lun_se_dev
,
121 lockdep_is_held(&tpg
->tpg_lun_mutex
));
123 * By default in LIO-Target $FABRIC_MOD,
124 * demo_mode_write_protect is ON, or READ_ONLY;
126 if (!tpg
->se_tpg_tfo
->tpg_check_demo_mode_write_protect(tpg
)) {
127 lun_access_ro
= false;
130 * Allow only optical drives to issue R/W in default RO
133 if (dev
->transport
->get_device_type(dev
) == TYPE_DISK
)
134 lun_access_ro
= true;
136 lun_access_ro
= false;
139 pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%llu] - Adding %s"
140 " access for LUN in Demo Mode\n",
141 tpg
->se_tpg_tfo
->fabric_name
,
142 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), lun
->unpacked_lun
,
143 lun_access_ro
? "READ-ONLY" : "READ-WRITE");
145 core_enable_device_list_for_node(lun
, NULL
, lun
->unpacked_lun
,
146 lun_access_ro
, acl
, tpg
);
148 * Check to see if there are any existing persistent reservation
149 * APTPL pre-registrations that need to be enabled for this dynamic
152 core_scsi3_check_aptpl_registration(dev
, tpg
, lun
, acl
,
155 mutex_unlock(&tpg
->tpg_lun_mutex
);
159 target_set_nacl_queue_depth(struct se_portal_group
*tpg
,
160 struct se_node_acl
*acl
, u32 queue_depth
)
162 acl
->queue_depth
= queue_depth
;
164 if (!acl
->queue_depth
) {
165 pr_warn("Queue depth for %s Initiator Node: %s is 0,"
166 "defaulting to 1.\n", tpg
->se_tpg_tfo
->fabric_name
,
168 acl
->queue_depth
= 1;
172 static struct se_node_acl
*target_alloc_node_acl(struct se_portal_group
*tpg
,
173 const unsigned char *initiatorname
)
175 struct se_node_acl
*acl
;
178 acl
= kzalloc(max(sizeof(*acl
), tpg
->se_tpg_tfo
->node_acl_size
),
183 INIT_LIST_HEAD(&acl
->acl_list
);
184 INIT_LIST_HEAD(&acl
->acl_sess_list
);
185 INIT_HLIST_HEAD(&acl
->lun_entry_hlist
);
186 kref_init(&acl
->acl_kref
);
187 init_completion(&acl
->acl_free_comp
);
188 spin_lock_init(&acl
->nacl_sess_lock
);
189 mutex_init(&acl
->lun_entry_mutex
);
190 atomic_set(&acl
->acl_pr_ref_count
, 0);
192 if (tpg
->se_tpg_tfo
->tpg_get_default_depth
)
193 queue_depth
= tpg
->se_tpg_tfo
->tpg_get_default_depth(tpg
);
196 target_set_nacl_queue_depth(tpg
, acl
, queue_depth
);
198 snprintf(acl
->initiatorname
, TRANSPORT_IQN_LEN
, "%s", initiatorname
);
200 acl
->acl_index
= scsi_get_new_index(SCSI_AUTH_INTR_INDEX
);
202 tpg
->se_tpg_tfo
->set_default_node_attributes(acl
);
207 static void target_add_node_acl(struct se_node_acl
*acl
)
209 struct se_portal_group
*tpg
= acl
->se_tpg
;
211 mutex_lock(&tpg
->acl_node_mutex
);
212 list_add_tail(&acl
->acl_list
, &tpg
->acl_node_list
);
213 mutex_unlock(&tpg
->acl_node_mutex
);
215 pr_debug("%s_TPG[%hu] - Added %s ACL with TCQ Depth: %d for %s"
216 " Initiator Node: %s\n",
217 tpg
->se_tpg_tfo
->fabric_name
,
218 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
),
219 acl
->dynamic_node_acl
? "DYNAMIC" : "",
221 tpg
->se_tpg_tfo
->fabric_name
,
225 bool target_tpg_has_node_acl(struct se_portal_group
*tpg
,
226 const char *initiatorname
)
228 struct se_node_acl
*acl
;
231 mutex_lock(&tpg
->acl_node_mutex
);
232 list_for_each_entry(acl
, &tpg
->acl_node_list
, acl_list
) {
233 if (!strcmp(acl
->initiatorname
, initiatorname
)) {
238 mutex_unlock(&tpg
->acl_node_mutex
);
242 EXPORT_SYMBOL(target_tpg_has_node_acl
);
244 struct se_node_acl
*core_tpg_check_initiator_node_acl(
245 struct se_portal_group
*tpg
,
246 unsigned char *initiatorname
)
248 struct se_node_acl
*acl
;
250 acl
= core_tpg_get_initiator_node_acl(tpg
, initiatorname
);
254 if (!tpg
->se_tpg_tfo
->tpg_check_demo_mode(tpg
))
257 acl
= target_alloc_node_acl(tpg
, initiatorname
);
261 * When allocating a dynamically generated node_acl, go ahead
262 * and take the extra kref now before returning to the fabric
265 * Note this reference will be released at session shutdown
266 * time within transport_free_session() code.
268 kref_get(&acl
->acl_kref
);
269 acl
->dynamic_node_acl
= 1;
272 * Here we only create demo-mode MappedLUNs from the active
273 * TPG LUNs if the fabric is not explicitly asking for
274 * tpg_check_demo_mode_login_only() == 1.
276 if ((tpg
->se_tpg_tfo
->tpg_check_demo_mode_login_only
== NULL
) ||
277 (tpg
->se_tpg_tfo
->tpg_check_demo_mode_login_only(tpg
) != 1))
278 core_tpg_add_node_to_devs(acl
, tpg
, NULL
);
280 target_add_node_acl(acl
);
283 EXPORT_SYMBOL(core_tpg_check_initiator_node_acl
);
285 void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl
*nacl
)
287 while (atomic_read(&nacl
->acl_pr_ref_count
) != 0)
291 struct se_node_acl
*core_tpg_add_initiator_node_acl(
292 struct se_portal_group
*tpg
,
293 const char *initiatorname
)
295 struct se_node_acl
*acl
;
297 mutex_lock(&tpg
->acl_node_mutex
);
298 acl
= __core_tpg_get_initiator_node_acl(tpg
, initiatorname
);
300 if (acl
->dynamic_node_acl
) {
301 acl
->dynamic_node_acl
= 0;
302 pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
303 " for %s\n", tpg
->se_tpg_tfo
->fabric_name
,
304 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), initiatorname
);
305 mutex_unlock(&tpg
->acl_node_mutex
);
309 pr_err("ACL entry for %s Initiator"
310 " Node %s already exists for TPG %u, ignoring"
311 " request.\n", tpg
->se_tpg_tfo
->fabric_name
,
312 initiatorname
, tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
313 mutex_unlock(&tpg
->acl_node_mutex
);
314 return ERR_PTR(-EEXIST
);
316 mutex_unlock(&tpg
->acl_node_mutex
);
318 acl
= target_alloc_node_acl(tpg
, initiatorname
);
320 return ERR_PTR(-ENOMEM
);
322 target_add_node_acl(acl
);
326 static void target_shutdown_sessions(struct se_node_acl
*acl
)
328 struct se_session
*sess
;
332 spin_lock_irqsave(&acl
->nacl_sess_lock
, flags
);
333 list_for_each_entry(sess
, &acl
->acl_sess_list
, sess_acl_list
) {
334 if (sess
->sess_tearing_down
)
337 list_del_init(&sess
->sess_acl_list
);
338 spin_unlock_irqrestore(&acl
->nacl_sess_lock
, flags
);
340 if (acl
->se_tpg
->se_tpg_tfo
->close_session
)
341 acl
->se_tpg
->se_tpg_tfo
->close_session(sess
);
344 spin_unlock_irqrestore(&acl
->nacl_sess_lock
, flags
);
347 void core_tpg_del_initiator_node_acl(struct se_node_acl
*acl
)
349 struct se_portal_group
*tpg
= acl
->se_tpg
;
351 mutex_lock(&tpg
->acl_node_mutex
);
352 if (acl
->dynamic_node_acl
)
353 acl
->dynamic_node_acl
= 0;
354 list_del_init(&acl
->acl_list
);
355 mutex_unlock(&tpg
->acl_node_mutex
);
357 target_shutdown_sessions(acl
);
359 target_put_nacl(acl
);
361 * Wait for last target_put_nacl() to complete in target_complete_nacl()
362 * for active fabric session transport_deregister_session() callbacks.
364 wait_for_completion(&acl
->acl_free_comp
);
366 core_tpg_wait_for_nacl_pr_ref(acl
);
367 core_free_device_list_for_node(acl
, tpg
);
369 pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
370 " Initiator Node: %s\n", tpg
->se_tpg_tfo
->fabric_name
,
371 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), acl
->queue_depth
,
372 tpg
->se_tpg_tfo
->fabric_name
, acl
->initiatorname
);
377 /* core_tpg_set_initiator_node_queue_depth():
381 int core_tpg_set_initiator_node_queue_depth(
382 struct se_node_acl
*acl
,
385 struct se_portal_group
*tpg
= acl
->se_tpg
;
388 * Allow the setting of se_node_acl queue_depth to be idempotent,
389 * and not force a session shutdown event if the value is not
392 if (acl
->queue_depth
== queue_depth
)
395 * User has requested to change the queue depth for a Initiator Node.
396 * Change the value in the Node's struct se_node_acl, and call
397 * target_set_nacl_queue_depth() to set the new queue depth.
399 target_set_nacl_queue_depth(tpg
, acl
, queue_depth
);
402 * Shutdown all pending sessions to force session reinstatement.
404 target_shutdown_sessions(acl
);
406 pr_debug("Successfully changed queue depth to: %d for Initiator"
407 " Node: %s on %s Target Portal Group: %u\n", acl
->queue_depth
,
408 acl
->initiatorname
, tpg
->se_tpg_tfo
->fabric_name
,
409 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
413 EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth
);
415 /* core_tpg_set_initiator_node_tag():
417 * Initiator nodeacl tags are not used internally, but may be used by
418 * userspace to emulate aliases or groups.
419 * Returns length of newly-set tag or -EINVAL.
421 int core_tpg_set_initiator_node_tag(
422 struct se_portal_group
*tpg
,
423 struct se_node_acl
*acl
,
426 if (strlen(new_tag
) >= MAX_ACL_TAG_SIZE
)
429 if (!strncmp("NULL", new_tag
, 4)) {
430 acl
->acl_tag
[0] = '\0';
434 return snprintf(acl
->acl_tag
, MAX_ACL_TAG_SIZE
, "%s", new_tag
);
436 EXPORT_SYMBOL(core_tpg_set_initiator_node_tag
);
438 static void core_tpg_lun_ref_release(struct percpu_ref
*ref
)
440 struct se_lun
*lun
= container_of(ref
, struct se_lun
, lun_ref
);
442 complete(&lun
->lun_shutdown_comp
);
445 /* Does not change se_wwn->priv. */
446 int core_tpg_register(
447 struct se_wwn
*se_wwn
,
448 struct se_portal_group
*se_tpg
,
456 * For the typical case where core_tpg_register() is called by a
457 * fabric driver from target_core_fabric_ops->fabric_make_tpg()
458 * configfs context, use the original tf_ops pointer already saved
459 * by target-core in target_fabric_make_wwn().
461 * Otherwise, for special cases like iscsi-target discovery TPGs
462 * the caller is responsible for setting ->se_tpg_tfo ahead of
463 * calling core_tpg_register().
466 se_tpg
->se_tpg_tfo
= se_wwn
->wwn_tf
->tf_ops
;
468 if (!se_tpg
->se_tpg_tfo
) {
469 pr_err("Unable to locate se_tpg->se_tpg_tfo pointer\n");
473 INIT_HLIST_HEAD(&se_tpg
->tpg_lun_hlist
);
474 se_tpg
->proto_id
= proto_id
;
475 se_tpg
->se_tpg_wwn
= se_wwn
;
476 atomic_set(&se_tpg
->tpg_pr_ref_count
, 0);
477 INIT_LIST_HEAD(&se_tpg
->acl_node_list
);
478 INIT_LIST_HEAD(&se_tpg
->se_tpg_node
);
479 INIT_LIST_HEAD(&se_tpg
->tpg_sess_list
);
480 spin_lock_init(&se_tpg
->session_lock
);
481 mutex_init(&se_tpg
->tpg_lun_mutex
);
482 mutex_init(&se_tpg
->acl_node_mutex
);
484 if (se_tpg
->proto_id
>= 0) {
485 se_tpg
->tpg_virt_lun0
= core_tpg_alloc_lun(se_tpg
, 0);
486 if (IS_ERR(se_tpg
->tpg_virt_lun0
))
487 return PTR_ERR(se_tpg
->tpg_virt_lun0
);
489 ret
= core_tpg_add_lun(se_tpg
, se_tpg
->tpg_virt_lun0
,
492 kfree(se_tpg
->tpg_virt_lun0
);
497 spin_lock_bh(&tpg_lock
);
498 list_add_tail(&se_tpg
->se_tpg_node
, &tpg_list
);
499 spin_unlock_bh(&tpg_lock
);
501 pr_debug("TARGET_CORE[%s]: Allocated portal_group for endpoint: %s, "
502 "Proto: %d, Portal Tag: %u\n", se_tpg
->se_tpg_tfo
->fabric_name
,
503 se_tpg
->se_tpg_tfo
->tpg_get_wwn(se_tpg
) ?
504 se_tpg
->se_tpg_tfo
->tpg_get_wwn(se_tpg
) : NULL
,
505 se_tpg
->proto_id
, se_tpg
->se_tpg_tfo
->tpg_get_tag(se_tpg
));
509 EXPORT_SYMBOL(core_tpg_register
);
511 int core_tpg_deregister(struct se_portal_group
*se_tpg
)
513 const struct target_core_fabric_ops
*tfo
= se_tpg
->se_tpg_tfo
;
514 struct se_node_acl
*nacl
, *nacl_tmp
;
515 LIST_HEAD(node_list
);
517 pr_debug("TARGET_CORE[%s]: Deallocating portal_group for endpoint: %s, "
518 "Proto: %d, Portal Tag: %u\n", tfo
->fabric_name
,
519 tfo
->tpg_get_wwn(se_tpg
) ? tfo
->tpg_get_wwn(se_tpg
) : NULL
,
520 se_tpg
->proto_id
, tfo
->tpg_get_tag(se_tpg
));
522 spin_lock_bh(&tpg_lock
);
523 list_del(&se_tpg
->se_tpg_node
);
524 spin_unlock_bh(&tpg_lock
);
526 while (atomic_read(&se_tpg
->tpg_pr_ref_count
) != 0)
529 mutex_lock(&se_tpg
->acl_node_mutex
);
530 list_splice_init(&se_tpg
->acl_node_list
, &node_list
);
531 mutex_unlock(&se_tpg
->acl_node_mutex
);
533 * Release any remaining demo-mode generated se_node_acl that have
534 * not been released because of TFO->tpg_check_demo_mode_cache() == 1
535 * in transport_deregister_session().
537 list_for_each_entry_safe(nacl
, nacl_tmp
, &node_list
, acl_list
) {
538 list_del_init(&nacl
->acl_list
);
540 core_tpg_wait_for_nacl_pr_ref(nacl
);
541 core_free_device_list_for_node(nacl
, se_tpg
);
545 if (se_tpg
->proto_id
>= 0) {
546 core_tpg_remove_lun(se_tpg
, se_tpg
->tpg_virt_lun0
);
547 kfree_rcu(se_tpg
->tpg_virt_lun0
, rcu_head
);
552 EXPORT_SYMBOL(core_tpg_deregister
);
554 struct se_lun
*core_tpg_alloc_lun(
555 struct se_portal_group
*tpg
,
560 lun
= kzalloc(sizeof(*lun
), GFP_KERNEL
);
562 pr_err("Unable to allocate se_lun memory\n");
563 return ERR_PTR(-ENOMEM
);
565 lun
->unpacked_lun
= unpacked_lun
;
566 atomic_set(&lun
->lun_acl_count
, 0);
567 init_completion(&lun
->lun_shutdown_comp
);
568 INIT_LIST_HEAD(&lun
->lun_deve_list
);
569 INIT_LIST_HEAD(&lun
->lun_dev_link
);
570 atomic_set(&lun
->lun_tg_pt_secondary_offline
, 0);
571 spin_lock_init(&lun
->lun_deve_lock
);
572 mutex_init(&lun
->lun_tg_pt_md_mutex
);
573 INIT_LIST_HEAD(&lun
->lun_tg_pt_gp_link
);
574 spin_lock_init(&lun
->lun_tg_pt_gp_lock
);
580 int core_tpg_add_lun(
581 struct se_portal_group
*tpg
,
584 struct se_device
*dev
)
588 ret
= percpu_ref_init(&lun
->lun_ref
, core_tpg_lun_ref_release
, 0,
593 ret
= core_alloc_rtpi(lun
, dev
);
597 if (!(dev
->transport
->transport_flags
&
598 TRANSPORT_FLAG_PASSTHROUGH_ALUA
) &&
599 !(dev
->se_hba
->hba_flags
& HBA_FLAGS_INTERNAL_USE
))
600 target_attach_tg_pt_gp(lun
, dev
->t10_alua
.default_tg_pt_gp
);
602 mutex_lock(&tpg
->tpg_lun_mutex
);
604 spin_lock(&dev
->se_port_lock
);
605 lun
->lun_index
= dev
->dev_index
;
606 rcu_assign_pointer(lun
->lun_se_dev
, dev
);
608 list_add_tail(&lun
->lun_dev_link
, &dev
->dev_sep_list
);
609 spin_unlock(&dev
->se_port_lock
);
611 if (dev
->dev_flags
& DF_READ_ONLY
)
612 lun
->lun_access_ro
= true;
614 lun
->lun_access_ro
= lun_access_ro
;
615 if (!(dev
->se_hba
->hba_flags
& HBA_FLAGS_INTERNAL_USE
))
616 hlist_add_head_rcu(&lun
->link
, &tpg
->tpg_lun_hlist
);
617 mutex_unlock(&tpg
->tpg_lun_mutex
);
622 percpu_ref_exit(&lun
->lun_ref
);
627 void core_tpg_remove_lun(
628 struct se_portal_group
*tpg
,
632 * rcu_dereference_raw protected by se_lun->lun_group symlink
633 * reference to se_device->dev_group.
635 struct se_device
*dev
= rcu_dereference_raw(lun
->lun_se_dev
);
637 lun
->lun_shutdown
= true;
639 core_clear_lun_from_tpg(lun
, tpg
);
641 * Wait for any active I/O references to percpu se_lun->lun_ref to
642 * be released. Also, se_lun->lun_ref is now used by PR and ALUA
643 * logic when referencing a remote target port during ALL_TGT_PT=1
644 * and generating UNIT_ATTENTIONs for ALUA access state transition.
646 transport_clear_lun_ref(lun
);
648 mutex_lock(&tpg
->tpg_lun_mutex
);
649 if (lun
->lun_se_dev
) {
650 target_detach_tg_pt_gp(lun
);
652 spin_lock(&dev
->se_port_lock
);
653 list_del(&lun
->lun_dev_link
);
655 rcu_assign_pointer(lun
->lun_se_dev
, NULL
);
656 spin_unlock(&dev
->se_port_lock
);
658 if (!(dev
->se_hba
->hba_flags
& HBA_FLAGS_INTERNAL_USE
))
659 hlist_del_rcu(&lun
->link
);
661 lun
->lun_shutdown
= false;
662 mutex_unlock(&tpg
->tpg_lun_mutex
);
664 percpu_ref_exit(&lun
->lun_ref
);