1 /*******************************************************************************
2 * Filename: target_core_tpg.c
4 * This file contains generic Target Portal Group related functions.
6 * (c) Copyright 2002-2013 Datera, Inc.
8 * Nicholas A. Bellinger <nab@kernel.org>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 ******************************************************************************/
26 #include <linux/net.h>
27 #include <linux/string.h>
28 #include <linux/timer.h>
29 #include <linux/slab.h>
30 #include <linux/spinlock.h>
32 #include <linux/export.h>
35 #include <scsi/scsi_proto.h>
37 #include <target/target_core_base.h>
38 #include <target/target_core_backend.h>
39 #include <target/target_core_fabric.h>
41 #include "target_core_internal.h"
42 #include "target_core_alua.h"
43 #include "target_core_pr.h"
44 #include "target_core_ua.h"
46 extern struct se_device
*g_lun0_dev
;
48 static DEFINE_SPINLOCK(tpg_lock
);
49 static LIST_HEAD(tpg_list
);
51 /* __core_tpg_get_initiator_node_acl():
53 * mutex_lock(&tpg->acl_node_mutex); must be held when calling
55 struct se_node_acl
*__core_tpg_get_initiator_node_acl(
56 struct se_portal_group
*tpg
,
57 const char *initiatorname
)
59 struct se_node_acl
*acl
;
61 list_for_each_entry(acl
, &tpg
->acl_node_list
, acl_list
) {
62 if (!strcmp(acl
->initiatorname
, initiatorname
))
69 /* core_tpg_get_initiator_node_acl():
73 struct se_node_acl
*core_tpg_get_initiator_node_acl(
74 struct se_portal_group
*tpg
,
75 unsigned char *initiatorname
)
77 struct se_node_acl
*acl
;
79 * Obtain se_node_acl->acl_kref using fabric driver provided
80 * initiatorname[] during node acl endpoint lookup driven by
81 * new se_session login.
83 * The reference is held until se_session shutdown -> release
84 * occurs via fabric driver invoked transport_deregister_session()
85 * or transport_free_session() code.
87 mutex_lock(&tpg
->acl_node_mutex
);
88 acl
= __core_tpg_get_initiator_node_acl(tpg
, initiatorname
);
90 if (!kref_get_unless_zero(&acl
->acl_kref
))
93 mutex_unlock(&tpg
->acl_node_mutex
);
97 EXPORT_SYMBOL(core_tpg_get_initiator_node_acl
);
99 void core_allocate_nexus_loss_ua(
100 struct se_node_acl
*nacl
)
102 struct se_dev_entry
*deve
;
108 hlist_for_each_entry_rcu(deve
, &nacl
->lun_entry_hlist
, link
)
109 core_scsi3_ua_allocate(deve
, 0x29,
110 ASCQ_29H_NEXUS_LOSS_OCCURRED
);
113 EXPORT_SYMBOL(core_allocate_nexus_loss_ua
);
115 /* core_tpg_add_node_to_devs():
119 void core_tpg_add_node_to_devs(
120 struct se_node_acl
*acl
,
121 struct se_portal_group
*tpg
,
122 struct se_lun
*lun_orig
)
124 bool lun_access_ro
= true;
126 struct se_device
*dev
;
128 mutex_lock(&tpg
->tpg_lun_mutex
);
129 hlist_for_each_entry_rcu(lun
, &tpg
->tpg_lun_hlist
, link
) {
130 if (lun_orig
&& lun
!= lun_orig
)
133 dev
= rcu_dereference_check(lun
->lun_se_dev
,
134 lockdep_is_held(&tpg
->tpg_lun_mutex
));
136 * By default in LIO-Target $FABRIC_MOD,
137 * demo_mode_write_protect is ON, or READ_ONLY;
139 if (!tpg
->se_tpg_tfo
->tpg_check_demo_mode_write_protect(tpg
)) {
140 lun_access_ro
= false;
143 * Allow only optical drives to issue R/W in default RO
146 if (dev
->transport
->get_device_type(dev
) == TYPE_DISK
)
147 lun_access_ro
= true;
149 lun_access_ro
= false;
152 pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%llu] - Adding %s"
153 " access for LUN in Demo Mode\n",
154 tpg
->se_tpg_tfo
->get_fabric_name(),
155 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), lun
->unpacked_lun
,
156 lun_access_ro
? "READ-ONLY" : "READ-WRITE");
158 core_enable_device_list_for_node(lun
, NULL
, lun
->unpacked_lun
,
159 lun_access_ro
, acl
, tpg
);
161 * Check to see if there are any existing persistent reservation
162 * APTPL pre-registrations that need to be enabled for this dynamic
165 core_scsi3_check_aptpl_registration(dev
, tpg
, lun
, acl
,
168 mutex_unlock(&tpg
->tpg_lun_mutex
);
172 target_set_nacl_queue_depth(struct se_portal_group
*tpg
,
173 struct se_node_acl
*acl
, u32 queue_depth
)
175 acl
->queue_depth
= queue_depth
;
177 if (!acl
->queue_depth
) {
178 pr_warn("Queue depth for %s Initiator Node: %s is 0,"
179 "defaulting to 1.\n", tpg
->se_tpg_tfo
->get_fabric_name(),
181 acl
->queue_depth
= 1;
185 static struct se_node_acl
*target_alloc_node_acl(struct se_portal_group
*tpg
,
186 const unsigned char *initiatorname
)
188 struct se_node_acl
*acl
;
191 acl
= kzalloc(max(sizeof(*acl
), tpg
->se_tpg_tfo
->node_acl_size
),
196 INIT_LIST_HEAD(&acl
->acl_list
);
197 INIT_LIST_HEAD(&acl
->acl_sess_list
);
198 INIT_HLIST_HEAD(&acl
->lun_entry_hlist
);
199 kref_init(&acl
->acl_kref
);
200 init_completion(&acl
->acl_free_comp
);
201 spin_lock_init(&acl
->nacl_sess_lock
);
202 mutex_init(&acl
->lun_entry_mutex
);
203 atomic_set(&acl
->acl_pr_ref_count
, 0);
205 if (tpg
->se_tpg_tfo
->tpg_get_default_depth
)
206 queue_depth
= tpg
->se_tpg_tfo
->tpg_get_default_depth(tpg
);
209 target_set_nacl_queue_depth(tpg
, acl
, queue_depth
);
211 snprintf(acl
->initiatorname
, TRANSPORT_IQN_LEN
, "%s", initiatorname
);
213 acl
->acl_index
= scsi_get_new_index(SCSI_AUTH_INTR_INDEX
);
215 tpg
->se_tpg_tfo
->set_default_node_attributes(acl
);
220 static void target_add_node_acl(struct se_node_acl
*acl
)
222 struct se_portal_group
*tpg
= acl
->se_tpg
;
224 mutex_lock(&tpg
->acl_node_mutex
);
225 list_add_tail(&acl
->acl_list
, &tpg
->acl_node_list
);
226 mutex_unlock(&tpg
->acl_node_mutex
);
228 pr_debug("%s_TPG[%hu] - Added %s ACL with TCQ Depth: %d for %s"
229 " Initiator Node: %s\n",
230 tpg
->se_tpg_tfo
->get_fabric_name(),
231 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
),
232 acl
->dynamic_node_acl
? "DYNAMIC" : "",
234 tpg
->se_tpg_tfo
->get_fabric_name(),
238 bool target_tpg_has_node_acl(struct se_portal_group
*tpg
,
239 const char *initiatorname
)
241 struct se_node_acl
*acl
;
244 mutex_lock(&tpg
->acl_node_mutex
);
245 list_for_each_entry(acl
, &tpg
->acl_node_list
, acl_list
) {
246 if (!strcmp(acl
->initiatorname
, initiatorname
)) {
251 mutex_unlock(&tpg
->acl_node_mutex
);
255 EXPORT_SYMBOL(target_tpg_has_node_acl
);
257 struct se_node_acl
*core_tpg_check_initiator_node_acl(
258 struct se_portal_group
*tpg
,
259 unsigned char *initiatorname
)
261 struct se_node_acl
*acl
;
263 acl
= core_tpg_get_initiator_node_acl(tpg
, initiatorname
);
267 if (!tpg
->se_tpg_tfo
->tpg_check_demo_mode(tpg
))
270 acl
= target_alloc_node_acl(tpg
, initiatorname
);
274 * When allocating a dynamically generated node_acl, go ahead
275 * and take the extra kref now before returning to the fabric
278 * Note this reference will be released at session shutdown
279 * time within transport_free_session() code.
281 kref_get(&acl
->acl_kref
);
282 acl
->dynamic_node_acl
= 1;
285 * Here we only create demo-mode MappedLUNs from the active
286 * TPG LUNs if the fabric is not explicitly asking for
287 * tpg_check_demo_mode_login_only() == 1.
289 if ((tpg
->se_tpg_tfo
->tpg_check_demo_mode_login_only
== NULL
) ||
290 (tpg
->se_tpg_tfo
->tpg_check_demo_mode_login_only(tpg
) != 1))
291 core_tpg_add_node_to_devs(acl
, tpg
, NULL
);
293 target_add_node_acl(acl
);
296 EXPORT_SYMBOL(core_tpg_check_initiator_node_acl
);
298 void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl
*nacl
)
300 while (atomic_read(&nacl
->acl_pr_ref_count
) != 0)
304 struct se_node_acl
*core_tpg_add_initiator_node_acl(
305 struct se_portal_group
*tpg
,
306 const char *initiatorname
)
308 struct se_node_acl
*acl
;
310 mutex_lock(&tpg
->acl_node_mutex
);
311 acl
= __core_tpg_get_initiator_node_acl(tpg
, initiatorname
);
313 if (acl
->dynamic_node_acl
) {
314 acl
->dynamic_node_acl
= 0;
315 pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
316 " for %s\n", tpg
->se_tpg_tfo
->get_fabric_name(),
317 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), initiatorname
);
318 mutex_unlock(&tpg
->acl_node_mutex
);
322 pr_err("ACL entry for %s Initiator"
323 " Node %s already exists for TPG %u, ignoring"
324 " request.\n", tpg
->se_tpg_tfo
->get_fabric_name(),
325 initiatorname
, tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
326 mutex_unlock(&tpg
->acl_node_mutex
);
327 return ERR_PTR(-EEXIST
);
329 mutex_unlock(&tpg
->acl_node_mutex
);
331 acl
= target_alloc_node_acl(tpg
, initiatorname
);
333 return ERR_PTR(-ENOMEM
);
335 target_add_node_acl(acl
);
339 static void target_shutdown_sessions(struct se_node_acl
*acl
)
341 struct se_session
*sess
;
345 spin_lock_irqsave(&acl
->nacl_sess_lock
, flags
);
346 list_for_each_entry(sess
, &acl
->acl_sess_list
, sess_acl_list
) {
347 if (sess
->sess_tearing_down
)
350 list_del_init(&sess
->sess_acl_list
);
351 spin_unlock_irqrestore(&acl
->nacl_sess_lock
, flags
);
353 if (acl
->se_tpg
->se_tpg_tfo
->close_session
)
354 acl
->se_tpg
->se_tpg_tfo
->close_session(sess
);
357 spin_unlock_irqrestore(&acl
->nacl_sess_lock
, flags
);
360 void core_tpg_del_initiator_node_acl(struct se_node_acl
*acl
)
362 struct se_portal_group
*tpg
= acl
->se_tpg
;
364 mutex_lock(&tpg
->acl_node_mutex
);
365 if (acl
->dynamic_node_acl
)
366 acl
->dynamic_node_acl
= 0;
367 list_del(&acl
->acl_list
);
368 mutex_unlock(&tpg
->acl_node_mutex
);
370 target_shutdown_sessions(acl
);
372 target_put_nacl(acl
);
374 * Wait for last target_put_nacl() to complete in target_complete_nacl()
375 * for active fabric session transport_deregister_session() callbacks.
377 wait_for_completion(&acl
->acl_free_comp
);
379 core_tpg_wait_for_nacl_pr_ref(acl
);
380 core_free_device_list_for_node(acl
, tpg
);
382 pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
383 " Initiator Node: %s\n", tpg
->se_tpg_tfo
->get_fabric_name(),
384 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), acl
->queue_depth
,
385 tpg
->se_tpg_tfo
->get_fabric_name(), acl
->initiatorname
);
390 /* core_tpg_set_initiator_node_queue_depth():
394 int core_tpg_set_initiator_node_queue_depth(
395 struct se_node_acl
*acl
,
398 struct se_portal_group
*tpg
= acl
->se_tpg
;
401 * User has requested to change the queue depth for a Initiator Node.
402 * Change the value in the Node's struct se_node_acl, and call
403 * target_set_nacl_queue_depth() to set the new queue depth.
405 target_set_nacl_queue_depth(tpg
, acl
, queue_depth
);
408 * Shutdown all pending sessions to force session reinstatement.
410 target_shutdown_sessions(acl
);
412 pr_debug("Successfully changed queue depth to: %d for Initiator"
413 " Node: %s on %s Target Portal Group: %u\n", acl
->queue_depth
,
414 acl
->initiatorname
, tpg
->se_tpg_tfo
->get_fabric_name(),
415 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
419 EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth
);
421 /* core_tpg_set_initiator_node_tag():
423 * Initiator nodeacl tags are not used internally, but may be used by
424 * userspace to emulate aliases or groups.
425 * Returns length of newly-set tag or -EINVAL.
427 int core_tpg_set_initiator_node_tag(
428 struct se_portal_group
*tpg
,
429 struct se_node_acl
*acl
,
432 if (strlen(new_tag
) >= MAX_ACL_TAG_SIZE
)
435 if (!strncmp("NULL", new_tag
, 4)) {
436 acl
->acl_tag
[0] = '\0';
440 return snprintf(acl
->acl_tag
, MAX_ACL_TAG_SIZE
, "%s", new_tag
);
442 EXPORT_SYMBOL(core_tpg_set_initiator_node_tag
);
444 static void core_tpg_lun_ref_release(struct percpu_ref
*ref
)
446 struct se_lun
*lun
= container_of(ref
, struct se_lun
, lun_ref
);
448 complete(&lun
->lun_ref_comp
);
451 int core_tpg_register(
452 struct se_wwn
*se_wwn
,
453 struct se_portal_group
*se_tpg
,
461 * For the typical case where core_tpg_register() is called by a
462 * fabric driver from target_core_fabric_ops->fabric_make_tpg()
463 * configfs context, use the original tf_ops pointer already saved
464 * by target-core in target_fabric_make_wwn().
466 * Otherwise, for special cases like iscsi-target discovery TPGs
467 * the caller is responsible for setting ->se_tpg_tfo ahead of
468 * calling core_tpg_register().
471 se_tpg
->se_tpg_tfo
= se_wwn
->wwn_tf
->tf_ops
;
473 if (!se_tpg
->se_tpg_tfo
) {
474 pr_err("Unable to locate se_tpg->se_tpg_tfo pointer\n");
478 INIT_HLIST_HEAD(&se_tpg
->tpg_lun_hlist
);
479 se_tpg
->proto_id
= proto_id
;
480 se_tpg
->se_tpg_wwn
= se_wwn
;
481 atomic_set(&se_tpg
->tpg_pr_ref_count
, 0);
482 INIT_LIST_HEAD(&se_tpg
->acl_node_list
);
483 INIT_LIST_HEAD(&se_tpg
->se_tpg_node
);
484 INIT_LIST_HEAD(&se_tpg
->tpg_sess_list
);
485 spin_lock_init(&se_tpg
->session_lock
);
486 mutex_init(&se_tpg
->tpg_lun_mutex
);
487 mutex_init(&se_tpg
->acl_node_mutex
);
489 if (se_tpg
->proto_id
>= 0) {
490 se_tpg
->tpg_virt_lun0
= core_tpg_alloc_lun(se_tpg
, 0);
491 if (IS_ERR(se_tpg
->tpg_virt_lun0
))
492 return PTR_ERR(se_tpg
->tpg_virt_lun0
);
494 ret
= core_tpg_add_lun(se_tpg
, se_tpg
->tpg_virt_lun0
,
497 kfree(se_tpg
->tpg_virt_lun0
);
502 spin_lock_bh(&tpg_lock
);
503 list_add_tail(&se_tpg
->se_tpg_node
, &tpg_list
);
504 spin_unlock_bh(&tpg_lock
);
506 pr_debug("TARGET_CORE[%s]: Allocated portal_group for endpoint: %s, "
507 "Proto: %d, Portal Tag: %u\n", se_tpg
->se_tpg_tfo
->get_fabric_name(),
508 se_tpg
->se_tpg_tfo
->tpg_get_wwn(se_tpg
) ?
509 se_tpg
->se_tpg_tfo
->tpg_get_wwn(se_tpg
) : NULL
,
510 se_tpg
->proto_id
, se_tpg
->se_tpg_tfo
->tpg_get_tag(se_tpg
));
514 EXPORT_SYMBOL(core_tpg_register
);
516 int core_tpg_deregister(struct se_portal_group
*se_tpg
)
518 const struct target_core_fabric_ops
*tfo
= se_tpg
->se_tpg_tfo
;
519 struct se_node_acl
*nacl
, *nacl_tmp
;
520 LIST_HEAD(node_list
);
522 pr_debug("TARGET_CORE[%s]: Deallocating portal_group for endpoint: %s, "
523 "Proto: %d, Portal Tag: %u\n", tfo
->get_fabric_name(),
524 tfo
->tpg_get_wwn(se_tpg
) ? tfo
->tpg_get_wwn(se_tpg
) : NULL
,
525 se_tpg
->proto_id
, tfo
->tpg_get_tag(se_tpg
));
527 spin_lock_bh(&tpg_lock
);
528 list_del(&se_tpg
->se_tpg_node
);
529 spin_unlock_bh(&tpg_lock
);
531 while (atomic_read(&se_tpg
->tpg_pr_ref_count
) != 0)
534 mutex_lock(&se_tpg
->acl_node_mutex
);
535 list_splice_init(&se_tpg
->acl_node_list
, &node_list
);
536 mutex_unlock(&se_tpg
->acl_node_mutex
);
538 * Release any remaining demo-mode generated se_node_acl that have
539 * not been released because of TFO->tpg_check_demo_mode_cache() == 1
540 * in transport_deregister_session().
542 list_for_each_entry_safe(nacl
, nacl_tmp
, &node_list
, acl_list
) {
543 list_del(&nacl
->acl_list
);
545 core_tpg_wait_for_nacl_pr_ref(nacl
);
546 core_free_device_list_for_node(nacl
, se_tpg
);
550 if (se_tpg
->proto_id
>= 0) {
551 core_tpg_remove_lun(se_tpg
, se_tpg
->tpg_virt_lun0
);
552 kfree_rcu(se_tpg
->tpg_virt_lun0
, rcu_head
);
557 EXPORT_SYMBOL(core_tpg_deregister
);
559 struct se_lun
*core_tpg_alloc_lun(
560 struct se_portal_group
*tpg
,
565 lun
= kzalloc(sizeof(*lun
), GFP_KERNEL
);
567 pr_err("Unable to allocate se_lun memory\n");
568 return ERR_PTR(-ENOMEM
);
570 lun
->unpacked_lun
= unpacked_lun
;
571 lun
->lun_link_magic
= SE_LUN_LINK_MAGIC
;
572 atomic_set(&lun
->lun_acl_count
, 0);
573 init_completion(&lun
->lun_ref_comp
);
574 INIT_LIST_HEAD(&lun
->lun_deve_list
);
575 INIT_LIST_HEAD(&lun
->lun_dev_link
);
576 atomic_set(&lun
->lun_tg_pt_secondary_offline
, 0);
577 spin_lock_init(&lun
->lun_deve_lock
);
578 mutex_init(&lun
->lun_tg_pt_md_mutex
);
579 INIT_LIST_HEAD(&lun
->lun_tg_pt_gp_link
);
580 spin_lock_init(&lun
->lun_tg_pt_gp_lock
);
586 int core_tpg_add_lun(
587 struct se_portal_group
*tpg
,
590 struct se_device
*dev
)
594 ret
= percpu_ref_init(&lun
->lun_ref
, core_tpg_lun_ref_release
, 0,
599 ret
= core_alloc_rtpi(lun
, dev
);
603 if (!(dev
->transport
->transport_flags
& TRANSPORT_FLAG_PASSTHROUGH
) &&
604 !(dev
->se_hba
->hba_flags
& HBA_FLAGS_INTERNAL_USE
))
605 target_attach_tg_pt_gp(lun
, dev
->t10_alua
.default_tg_pt_gp
);
607 mutex_lock(&tpg
->tpg_lun_mutex
);
609 spin_lock(&dev
->se_port_lock
);
610 lun
->lun_index
= dev
->dev_index
;
611 rcu_assign_pointer(lun
->lun_se_dev
, dev
);
613 list_add_tail(&lun
->lun_dev_link
, &dev
->dev_sep_list
);
614 spin_unlock(&dev
->se_port_lock
);
616 if (dev
->dev_flags
& DF_READ_ONLY
)
617 lun
->lun_access_ro
= true;
619 lun
->lun_access_ro
= lun_access_ro
;
620 if (!(dev
->se_hba
->hba_flags
& HBA_FLAGS_INTERNAL_USE
))
621 hlist_add_head_rcu(&lun
->link
, &tpg
->tpg_lun_hlist
);
622 mutex_unlock(&tpg
->tpg_lun_mutex
);
627 percpu_ref_exit(&lun
->lun_ref
);
632 void core_tpg_remove_lun(
633 struct se_portal_group
*tpg
,
637 * rcu_dereference_raw protected by se_lun->lun_group symlink
638 * reference to se_device->dev_group.
640 struct se_device
*dev
= rcu_dereference_raw(lun
->lun_se_dev
);
642 core_clear_lun_from_tpg(lun
, tpg
);
644 * Wait for any active I/O references to percpu se_lun->lun_ref to
645 * be released. Also, se_lun->lun_ref is now used by PR and ALUA
646 * logic when referencing a remote target port during ALL_TGT_PT=1
647 * and generating UNIT_ATTENTIONs for ALUA access state transition.
649 transport_clear_lun_ref(lun
);
651 mutex_lock(&tpg
->tpg_lun_mutex
);
652 if (lun
->lun_se_dev
) {
653 target_detach_tg_pt_gp(lun
);
655 spin_lock(&dev
->se_port_lock
);
656 list_del(&lun
->lun_dev_link
);
658 rcu_assign_pointer(lun
->lun_se_dev
, NULL
);
659 spin_unlock(&dev
->se_port_lock
);
661 if (!(dev
->se_hba
->hba_flags
& HBA_FLAGS_INTERNAL_USE
))
662 hlist_del_rcu(&lun
->link
);
663 mutex_unlock(&tpg
->tpg_lun_mutex
);
665 percpu_ref_exit(&lun
->lun_ref
);