1 /*******************************************************************************
2 * Filename: target_core_tpg.c
4 * This file contains generic Target Portal Group related functions.
6 * (c) Copyright 2002-2013 Datera, Inc.
8 * Nicholas A. Bellinger <nab@kernel.org>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 ******************************************************************************/
26 #include <linux/net.h>
27 #include <linux/string.h>
28 #include <linux/timer.h>
29 #include <linux/slab.h>
30 #include <linux/spinlock.h>
32 #include <linux/export.h>
35 #include <scsi/scsi.h>
36 #include <scsi/scsi_cmnd.h>
38 #include <target/target_core_base.h>
39 #include <target/target_core_backend.h>
40 #include <target/target_core_fabric.h>
42 #include "target_core_internal.h"
44 extern struct se_device
*g_lun0_dev
;
46 static DEFINE_SPINLOCK(tpg_lock
);
47 static LIST_HEAD(tpg_list
);
49 /* core_clear_initiator_node_from_tpg():
53 static void core_clear_initiator_node_from_tpg(
54 struct se_node_acl
*nacl
,
55 struct se_portal_group
*tpg
)
58 struct se_dev_entry
*deve
;
61 spin_lock_irq(&nacl
->device_list_lock
);
62 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
63 deve
= nacl
->device_list
[i
];
65 if (!(deve
->lun_flags
& TRANSPORT_LUNFLAGS_INITIATOR_ACCESS
))
69 pr_err("%s device entries device pointer is"
70 " NULL, but Initiator has access.\n",
71 tpg
->se_tpg_tfo
->get_fabric_name());
76 spin_unlock_irq(&nacl
->device_list_lock
);
77 core_disable_device_list_for_node(lun
, NULL
, deve
->mapped_lun
,
78 TRANSPORT_LUNFLAGS_NO_ACCESS
, nacl
, tpg
);
80 spin_lock_irq(&nacl
->device_list_lock
);
82 spin_unlock_irq(&nacl
->device_list_lock
);
85 /* __core_tpg_get_initiator_node_acl():
87 * spin_lock_bh(&tpg->acl_node_lock); must be held when calling
89 struct se_node_acl
*__core_tpg_get_initiator_node_acl(
90 struct se_portal_group
*tpg
,
91 const char *initiatorname
)
93 struct se_node_acl
*acl
;
95 list_for_each_entry(acl
, &tpg
->acl_node_list
, acl_list
) {
96 if (!strcmp(acl
->initiatorname
, initiatorname
))
103 /* core_tpg_get_initiator_node_acl():
107 struct se_node_acl
*core_tpg_get_initiator_node_acl(
108 struct se_portal_group
*tpg
,
109 unsigned char *initiatorname
)
111 struct se_node_acl
*acl
;
113 spin_lock_irq(&tpg
->acl_node_lock
);
114 acl
= __core_tpg_get_initiator_node_acl(tpg
, initiatorname
);
115 spin_unlock_irq(&tpg
->acl_node_lock
);
119 EXPORT_SYMBOL(core_tpg_get_initiator_node_acl
);
121 /* core_tpg_add_node_to_devs():
125 void core_tpg_add_node_to_devs(
126 struct se_node_acl
*acl
,
127 struct se_portal_group
*tpg
)
132 struct se_device
*dev
;
134 spin_lock(&tpg
->tpg_lun_lock
);
135 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
136 lun
= tpg
->tpg_lun_list
[i
];
137 if (lun
->lun_status
!= TRANSPORT_LUN_STATUS_ACTIVE
)
140 spin_unlock(&tpg
->tpg_lun_lock
);
142 dev
= lun
->lun_se_dev
;
144 * By default in LIO-Target $FABRIC_MOD,
145 * demo_mode_write_protect is ON, or READ_ONLY;
147 if (!tpg
->se_tpg_tfo
->tpg_check_demo_mode_write_protect(tpg
)) {
148 lun_access
= TRANSPORT_LUNFLAGS_READ_WRITE
;
151 * Allow only optical drives to issue R/W in default RO
154 if (dev
->transport
->get_device_type(dev
) == TYPE_DISK
)
155 lun_access
= TRANSPORT_LUNFLAGS_READ_ONLY
;
157 lun_access
= TRANSPORT_LUNFLAGS_READ_WRITE
;
160 pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s"
161 " access for LUN in Demo Mode\n",
162 tpg
->se_tpg_tfo
->get_fabric_name(),
163 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), lun
->unpacked_lun
,
164 (lun_access
== TRANSPORT_LUNFLAGS_READ_WRITE
) ?
165 "READ-WRITE" : "READ-ONLY");
167 core_enable_device_list_for_node(lun
, NULL
, lun
->unpacked_lun
,
168 lun_access
, acl
, tpg
);
169 spin_lock(&tpg
->tpg_lun_lock
);
171 spin_unlock(&tpg
->tpg_lun_lock
);
174 /* core_set_queue_depth_for_node():
178 static int core_set_queue_depth_for_node(
179 struct se_portal_group
*tpg
,
180 struct se_node_acl
*acl
)
182 if (!acl
->queue_depth
) {
183 pr_err("Queue depth for %s Initiator Node: %s is 0,"
184 "defaulting to 1.\n", tpg
->se_tpg_tfo
->get_fabric_name(),
186 acl
->queue_depth
= 1;
192 void array_free(void *array
, int n
)
197 for (i
= 0; i
< n
; i
++)
202 static void *array_zalloc(int n
, size_t size
, gfp_t flags
)
207 a
= kzalloc(n
* sizeof(void*), flags
);
210 for (i
= 0; i
< n
; i
++) {
211 a
[i
] = kzalloc(size
, flags
);
220 /* core_create_device_list_for_node():
224 static int core_create_device_list_for_node(struct se_node_acl
*nacl
)
226 struct se_dev_entry
*deve
;
229 nacl
->device_list
= array_zalloc(TRANSPORT_MAX_LUNS_PER_TPG
,
230 sizeof(struct se_dev_entry
), GFP_KERNEL
);
231 if (!nacl
->device_list
) {
232 pr_err("Unable to allocate memory for"
233 " struct se_node_acl->device_list\n");
236 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
237 deve
= nacl
->device_list
[i
];
239 atomic_set(&deve
->ua_count
, 0);
240 atomic_set(&deve
->pr_ref_count
, 0);
241 spin_lock_init(&deve
->ua_lock
);
242 INIT_LIST_HEAD(&deve
->alua_port_list
);
243 INIT_LIST_HEAD(&deve
->ua_list
);
249 /* core_tpg_check_initiator_node_acl()
253 struct se_node_acl
*core_tpg_check_initiator_node_acl(
254 struct se_portal_group
*tpg
,
255 unsigned char *initiatorname
)
257 struct se_node_acl
*acl
;
259 acl
= core_tpg_get_initiator_node_acl(tpg
, initiatorname
);
263 if (!tpg
->se_tpg_tfo
->tpg_check_demo_mode(tpg
))
266 acl
= tpg
->se_tpg_tfo
->tpg_alloc_fabric_acl(tpg
);
270 INIT_LIST_HEAD(&acl
->acl_list
);
271 INIT_LIST_HEAD(&acl
->acl_sess_list
);
272 kref_init(&acl
->acl_kref
);
273 init_completion(&acl
->acl_free_comp
);
274 spin_lock_init(&acl
->device_list_lock
);
275 spin_lock_init(&acl
->nacl_sess_lock
);
276 atomic_set(&acl
->acl_pr_ref_count
, 0);
277 acl
->queue_depth
= tpg
->se_tpg_tfo
->tpg_get_default_depth(tpg
);
278 snprintf(acl
->initiatorname
, TRANSPORT_IQN_LEN
, "%s", initiatorname
);
280 acl
->acl_index
= scsi_get_new_index(SCSI_AUTH_INTR_INDEX
);
281 acl
->dynamic_node_acl
= 1;
283 tpg
->se_tpg_tfo
->set_default_node_attributes(acl
);
285 if (core_create_device_list_for_node(acl
) < 0) {
286 tpg
->se_tpg_tfo
->tpg_release_fabric_acl(tpg
, acl
);
290 if (core_set_queue_depth_for_node(tpg
, acl
) < 0) {
291 core_free_device_list_for_node(acl
, tpg
);
292 tpg
->se_tpg_tfo
->tpg_release_fabric_acl(tpg
, acl
);
296 * Here we only create demo-mode MappedLUNs from the active
297 * TPG LUNs if the fabric is not explicitly asking for
298 * tpg_check_demo_mode_login_only() == 1.
300 if ((tpg
->se_tpg_tfo
->tpg_check_demo_mode_login_only
== NULL
) ||
301 (tpg
->se_tpg_tfo
->tpg_check_demo_mode_login_only(tpg
) != 1))
302 core_tpg_add_node_to_devs(acl
, tpg
);
304 spin_lock_irq(&tpg
->acl_node_lock
);
305 list_add_tail(&acl
->acl_list
, &tpg
->acl_node_list
);
306 tpg
->num_node_acls
++;
307 spin_unlock_irq(&tpg
->acl_node_lock
);
309 pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
310 " Initiator Node: %s\n", tpg
->se_tpg_tfo
->get_fabric_name(),
311 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), acl
->queue_depth
,
312 tpg
->se_tpg_tfo
->get_fabric_name(), initiatorname
);
316 EXPORT_SYMBOL(core_tpg_check_initiator_node_acl
);
318 void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl
*nacl
)
320 while (atomic_read(&nacl
->acl_pr_ref_count
) != 0)
324 void core_tpg_clear_object_luns(struct se_portal_group
*tpg
)
329 spin_lock(&tpg
->tpg_lun_lock
);
330 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
331 lun
= tpg
->tpg_lun_list
[i
];
333 if ((lun
->lun_status
!= TRANSPORT_LUN_STATUS_ACTIVE
) ||
334 (lun
->lun_se_dev
== NULL
))
337 spin_unlock(&tpg
->tpg_lun_lock
);
338 core_dev_del_lun(tpg
, lun
->unpacked_lun
);
339 spin_lock(&tpg
->tpg_lun_lock
);
341 spin_unlock(&tpg
->tpg_lun_lock
);
343 EXPORT_SYMBOL(core_tpg_clear_object_luns
);
345 /* core_tpg_add_initiator_node_acl():
349 struct se_node_acl
*core_tpg_add_initiator_node_acl(
350 struct se_portal_group
*tpg
,
351 struct se_node_acl
*se_nacl
,
352 const char *initiatorname
,
355 struct se_node_acl
*acl
= NULL
;
357 spin_lock_irq(&tpg
->acl_node_lock
);
358 acl
= __core_tpg_get_initiator_node_acl(tpg
, initiatorname
);
360 if (acl
->dynamic_node_acl
) {
361 acl
->dynamic_node_acl
= 0;
362 pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
363 " for %s\n", tpg
->se_tpg_tfo
->get_fabric_name(),
364 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), initiatorname
);
365 spin_unlock_irq(&tpg
->acl_node_lock
);
367 * Release the locally allocated struct se_node_acl
368 * because * core_tpg_add_initiator_node_acl() returned
369 * a pointer to an existing demo mode node ACL.
372 tpg
->se_tpg_tfo
->tpg_release_fabric_acl(tpg
,
377 pr_err("ACL entry for %s Initiator"
378 " Node %s already exists for TPG %u, ignoring"
379 " request.\n", tpg
->se_tpg_tfo
->get_fabric_name(),
380 initiatorname
, tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
381 spin_unlock_irq(&tpg
->acl_node_lock
);
382 return ERR_PTR(-EEXIST
);
384 spin_unlock_irq(&tpg
->acl_node_lock
);
387 pr_err("struct se_node_acl pointer is NULL\n");
388 return ERR_PTR(-EINVAL
);
391 * For v4.x logic the se_node_acl_s is hanging off a fabric
392 * dependent structure allocated via
393 * struct target_core_fabric_ops->fabric_make_nodeacl()
397 INIT_LIST_HEAD(&acl
->acl_list
);
398 INIT_LIST_HEAD(&acl
->acl_sess_list
);
399 kref_init(&acl
->acl_kref
);
400 init_completion(&acl
->acl_free_comp
);
401 spin_lock_init(&acl
->device_list_lock
);
402 spin_lock_init(&acl
->nacl_sess_lock
);
403 atomic_set(&acl
->acl_pr_ref_count
, 0);
404 acl
->queue_depth
= queue_depth
;
405 snprintf(acl
->initiatorname
, TRANSPORT_IQN_LEN
, "%s", initiatorname
);
407 acl
->acl_index
= scsi_get_new_index(SCSI_AUTH_INTR_INDEX
);
409 tpg
->se_tpg_tfo
->set_default_node_attributes(acl
);
411 if (core_create_device_list_for_node(acl
) < 0) {
412 tpg
->se_tpg_tfo
->tpg_release_fabric_acl(tpg
, acl
);
413 return ERR_PTR(-ENOMEM
);
416 if (core_set_queue_depth_for_node(tpg
, acl
) < 0) {
417 core_free_device_list_for_node(acl
, tpg
);
418 tpg
->se_tpg_tfo
->tpg_release_fabric_acl(tpg
, acl
);
419 return ERR_PTR(-EINVAL
);
422 spin_lock_irq(&tpg
->acl_node_lock
);
423 list_add_tail(&acl
->acl_list
, &tpg
->acl_node_list
);
424 tpg
->num_node_acls
++;
425 spin_unlock_irq(&tpg
->acl_node_lock
);
428 pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
429 " Initiator Node: %s\n", tpg
->se_tpg_tfo
->get_fabric_name(),
430 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), acl
->queue_depth
,
431 tpg
->se_tpg_tfo
->get_fabric_name(), initiatorname
);
435 EXPORT_SYMBOL(core_tpg_add_initiator_node_acl
);
437 /* core_tpg_del_initiator_node_acl():
441 int core_tpg_del_initiator_node_acl(
442 struct se_portal_group
*tpg
,
443 struct se_node_acl
*acl
,
446 LIST_HEAD(sess_list
);
447 struct se_session
*sess
, *sess_tmp
;
451 spin_lock_irq(&tpg
->acl_node_lock
);
452 if (acl
->dynamic_node_acl
) {
453 acl
->dynamic_node_acl
= 0;
455 list_del(&acl
->acl_list
);
456 tpg
->num_node_acls
--;
457 spin_unlock_irq(&tpg
->acl_node_lock
);
459 spin_lock_irqsave(&acl
->nacl_sess_lock
, flags
);
462 list_for_each_entry_safe(sess
, sess_tmp
, &acl
->acl_sess_list
,
464 if (sess
->sess_tearing_down
!= 0)
467 target_get_session(sess
);
468 list_move(&sess
->sess_acl_list
, &sess_list
);
470 spin_unlock_irqrestore(&acl
->nacl_sess_lock
, flags
);
472 list_for_each_entry_safe(sess
, sess_tmp
, &sess_list
, sess_acl_list
) {
473 list_del(&sess
->sess_acl_list
);
475 rc
= tpg
->se_tpg_tfo
->shutdown_session(sess
);
476 target_put_session(sess
);
479 target_put_session(sess
);
481 target_put_nacl(acl
);
483 * Wait for last target_put_nacl() to complete in target_complete_nacl()
484 * for active fabric session transport_deregister_session() callbacks.
486 wait_for_completion(&acl
->acl_free_comp
);
488 core_tpg_wait_for_nacl_pr_ref(acl
);
489 core_clear_initiator_node_from_tpg(acl
, tpg
);
490 core_free_device_list_for_node(acl
, tpg
);
492 pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
493 " Initiator Node: %s\n", tpg
->se_tpg_tfo
->get_fabric_name(),
494 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), acl
->queue_depth
,
495 tpg
->se_tpg_tfo
->get_fabric_name(), acl
->initiatorname
);
499 EXPORT_SYMBOL(core_tpg_del_initiator_node_acl
);
501 /* core_tpg_set_initiator_node_queue_depth():
505 int core_tpg_set_initiator_node_queue_depth(
506 struct se_portal_group
*tpg
,
507 unsigned char *initiatorname
,
511 struct se_session
*sess
, *init_sess
= NULL
;
512 struct se_node_acl
*acl
;
516 spin_lock_irq(&tpg
->acl_node_lock
);
517 acl
= __core_tpg_get_initiator_node_acl(tpg
, initiatorname
);
519 pr_err("Access Control List entry for %s Initiator"
520 " Node %s does not exists for TPG %hu, ignoring"
521 " request.\n", tpg
->se_tpg_tfo
->get_fabric_name(),
522 initiatorname
, tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
523 spin_unlock_irq(&tpg
->acl_node_lock
);
526 if (acl
->dynamic_node_acl
) {
527 acl
->dynamic_node_acl
= 0;
530 spin_unlock_irq(&tpg
->acl_node_lock
);
532 spin_lock_irqsave(&tpg
->session_lock
, flags
);
533 list_for_each_entry(sess
, &tpg
->tpg_sess_list
, sess_list
) {
534 if (sess
->se_node_acl
!= acl
)
538 pr_err("Unable to change queue depth for %s"
539 " Initiator Node: %s while session is"
540 " operational. To forcefully change the queue"
541 " depth and force session reinstatement"
542 " use the \"force=1\" parameter.\n",
543 tpg
->se_tpg_tfo
->get_fabric_name(), initiatorname
);
544 spin_unlock_irqrestore(&tpg
->session_lock
, flags
);
546 spin_lock_irq(&tpg
->acl_node_lock
);
548 acl
->dynamic_node_acl
= 1;
549 spin_unlock_irq(&tpg
->acl_node_lock
);
553 * Determine if the session needs to be closed by our context.
555 if (!tpg
->se_tpg_tfo
->shutdown_session(sess
))
563 * User has requested to change the queue depth for a Initiator Node.
564 * Change the value in the Node's struct se_node_acl, and call
565 * core_set_queue_depth_for_node() to add the requested queue depth.
567 * Finally call tpg->se_tpg_tfo->close_session() to force session
568 * reinstatement to occur if there is an active session for the
569 * $FABRIC_MOD Initiator Node in question.
571 acl
->queue_depth
= queue_depth
;
573 if (core_set_queue_depth_for_node(tpg
, acl
) < 0) {
574 spin_unlock_irqrestore(&tpg
->session_lock
, flags
);
576 * Force session reinstatement if
577 * core_set_queue_depth_for_node() failed, because we assume
578 * the $FABRIC_MOD has already the set session reinstatement
579 * bit from tpg->se_tpg_tfo->shutdown_session() called above.
582 tpg
->se_tpg_tfo
->close_session(init_sess
);
584 spin_lock_irq(&tpg
->acl_node_lock
);
586 acl
->dynamic_node_acl
= 1;
587 spin_unlock_irq(&tpg
->acl_node_lock
);
590 spin_unlock_irqrestore(&tpg
->session_lock
, flags
);
592 * If the $FABRIC_MOD session for the Initiator Node ACL exists,
593 * forcefully shutdown the $FABRIC_MOD session/nexus.
596 tpg
->se_tpg_tfo
->close_session(init_sess
);
598 pr_debug("Successfully changed queue depth to: %d for Initiator"
599 " Node: %s on %s Target Portal Group: %u\n", queue_depth
,
600 initiatorname
, tpg
->se_tpg_tfo
->get_fabric_name(),
601 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
603 spin_lock_irq(&tpg
->acl_node_lock
);
605 acl
->dynamic_node_acl
= 1;
606 spin_unlock_irq(&tpg
->acl_node_lock
);
610 EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth
);
612 /* core_tpg_set_initiator_node_tag():
614 * Initiator nodeacl tags are not used internally, but may be used by
615 * userspace to emulate aliases or groups.
616 * Returns length of newly-set tag or -EINVAL.
618 int core_tpg_set_initiator_node_tag(
619 struct se_portal_group
*tpg
,
620 struct se_node_acl
*acl
,
623 if (strlen(new_tag
) >= MAX_ACL_TAG_SIZE
)
626 if (!strncmp("NULL", new_tag
, 4)) {
627 acl
->acl_tag
[0] = '\0';
631 return snprintf(acl
->acl_tag
, MAX_ACL_TAG_SIZE
, "%s", new_tag
);
633 EXPORT_SYMBOL(core_tpg_set_initiator_node_tag
);
635 static void core_tpg_lun_ref_release(struct percpu_ref
*ref
)
637 struct se_lun
*lun
= container_of(ref
, struct se_lun
, lun_ref
);
639 complete(&lun
->lun_ref_comp
);
642 static int core_tpg_setup_virtual_lun0(struct se_portal_group
*se_tpg
)
644 /* Set in core_dev_setup_virtual_lun0() */
645 struct se_device
*dev
= g_lun0_dev
;
646 struct se_lun
*lun
= &se_tpg
->tpg_virt_lun0
;
647 u32 lun_access
= TRANSPORT_LUNFLAGS_READ_ONLY
;
650 lun
->unpacked_lun
= 0;
651 lun
->lun_status
= TRANSPORT_LUN_STATUS_FREE
;
652 atomic_set(&lun
->lun_acl_count
, 0);
653 init_completion(&lun
->lun_shutdown_comp
);
654 INIT_LIST_HEAD(&lun
->lun_acl_list
);
655 spin_lock_init(&lun
->lun_acl_lock
);
656 spin_lock_init(&lun
->lun_sep_lock
);
657 init_completion(&lun
->lun_ref_comp
);
659 ret
= core_tpg_add_lun(se_tpg
, lun
, lun_access
, dev
);
666 static void core_tpg_release_virtual_lun0(struct se_portal_group
*se_tpg
)
668 struct se_lun
*lun
= &se_tpg
->tpg_virt_lun0
;
670 core_tpg_post_dellun(se_tpg
, lun
);
673 int core_tpg_register(
674 struct target_core_fabric_ops
*tfo
,
675 struct se_wwn
*se_wwn
,
676 struct se_portal_group
*se_tpg
,
677 void *tpg_fabric_ptr
,
683 se_tpg
->tpg_lun_list
= array_zalloc(TRANSPORT_MAX_LUNS_PER_TPG
,
684 sizeof(struct se_lun
), GFP_KERNEL
);
685 if (!se_tpg
->tpg_lun_list
) {
686 pr_err("Unable to allocate struct se_portal_group->"
691 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
692 lun
= se_tpg
->tpg_lun_list
[i
];
693 lun
->unpacked_lun
= i
;
694 lun
->lun_link_magic
= SE_LUN_LINK_MAGIC
;
695 lun
->lun_status
= TRANSPORT_LUN_STATUS_FREE
;
696 atomic_set(&lun
->lun_acl_count
, 0);
697 init_completion(&lun
->lun_shutdown_comp
);
698 INIT_LIST_HEAD(&lun
->lun_acl_list
);
699 spin_lock_init(&lun
->lun_acl_lock
);
700 spin_lock_init(&lun
->lun_sep_lock
);
701 init_completion(&lun
->lun_ref_comp
);
704 se_tpg
->se_tpg_type
= se_tpg_type
;
705 se_tpg
->se_tpg_fabric_ptr
= tpg_fabric_ptr
;
706 se_tpg
->se_tpg_tfo
= tfo
;
707 se_tpg
->se_tpg_wwn
= se_wwn
;
708 atomic_set(&se_tpg
->tpg_pr_ref_count
, 0);
709 INIT_LIST_HEAD(&se_tpg
->acl_node_list
);
710 INIT_LIST_HEAD(&se_tpg
->se_tpg_node
);
711 INIT_LIST_HEAD(&se_tpg
->tpg_sess_list
);
712 spin_lock_init(&se_tpg
->acl_node_lock
);
713 spin_lock_init(&se_tpg
->session_lock
);
714 spin_lock_init(&se_tpg
->tpg_lun_lock
);
716 if (se_tpg
->se_tpg_type
== TRANSPORT_TPG_TYPE_NORMAL
) {
717 if (core_tpg_setup_virtual_lun0(se_tpg
) < 0) {
718 array_free(se_tpg
->tpg_lun_list
,
719 TRANSPORT_MAX_LUNS_PER_TPG
);
724 spin_lock_bh(&tpg_lock
);
725 list_add_tail(&se_tpg
->se_tpg_node
, &tpg_list
);
726 spin_unlock_bh(&tpg_lock
);
728 pr_debug("TARGET_CORE[%s]: Allocated %s struct se_portal_group for"
729 " endpoint: %s, Portal Tag: %u\n", tfo
->get_fabric_name(),
730 (se_tpg
->se_tpg_type
== TRANSPORT_TPG_TYPE_NORMAL
) ?
731 "Normal" : "Discovery", (tfo
->tpg_get_wwn(se_tpg
) == NULL
) ?
732 "None" : tfo
->tpg_get_wwn(se_tpg
), tfo
->tpg_get_tag(se_tpg
));
736 EXPORT_SYMBOL(core_tpg_register
);
738 int core_tpg_deregister(struct se_portal_group
*se_tpg
)
740 struct se_node_acl
*nacl
, *nacl_tmp
;
742 pr_debug("TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
743 " for endpoint: %s Portal Tag %u\n",
744 (se_tpg
->se_tpg_type
== TRANSPORT_TPG_TYPE_NORMAL
) ?
745 "Normal" : "Discovery", se_tpg
->se_tpg_tfo
->get_fabric_name(),
746 se_tpg
->se_tpg_tfo
->tpg_get_wwn(se_tpg
),
747 se_tpg
->se_tpg_tfo
->tpg_get_tag(se_tpg
));
749 spin_lock_bh(&tpg_lock
);
750 list_del(&se_tpg
->se_tpg_node
);
751 spin_unlock_bh(&tpg_lock
);
753 while (atomic_read(&se_tpg
->tpg_pr_ref_count
) != 0)
756 * Release any remaining demo-mode generated se_node_acl that have
757 * not been released because of TFO->tpg_check_demo_mode_cache() == 1
758 * in transport_deregister_session().
760 spin_lock_irq(&se_tpg
->acl_node_lock
);
761 list_for_each_entry_safe(nacl
, nacl_tmp
, &se_tpg
->acl_node_list
,
763 list_del(&nacl
->acl_list
);
764 se_tpg
->num_node_acls
--;
765 spin_unlock_irq(&se_tpg
->acl_node_lock
);
767 core_tpg_wait_for_nacl_pr_ref(nacl
);
768 core_free_device_list_for_node(nacl
, se_tpg
);
769 se_tpg
->se_tpg_tfo
->tpg_release_fabric_acl(se_tpg
, nacl
);
771 spin_lock_irq(&se_tpg
->acl_node_lock
);
773 spin_unlock_irq(&se_tpg
->acl_node_lock
);
775 if (se_tpg
->se_tpg_type
== TRANSPORT_TPG_TYPE_NORMAL
)
776 core_tpg_release_virtual_lun0(se_tpg
);
778 se_tpg
->se_tpg_fabric_ptr
= NULL
;
779 array_free(se_tpg
->tpg_lun_list
, TRANSPORT_MAX_LUNS_PER_TPG
);
782 EXPORT_SYMBOL(core_tpg_deregister
);
784 struct se_lun
*core_tpg_alloc_lun(
785 struct se_portal_group
*tpg
,
790 if (unpacked_lun
> (TRANSPORT_MAX_LUNS_PER_TPG
-1)) {
791 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
792 "-1: %u for Target Portal Group: %u\n",
793 tpg
->se_tpg_tfo
->get_fabric_name(),
794 unpacked_lun
, TRANSPORT_MAX_LUNS_PER_TPG
-1,
795 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
796 return ERR_PTR(-EOVERFLOW
);
799 spin_lock(&tpg
->tpg_lun_lock
);
800 lun
= tpg
->tpg_lun_list
[unpacked_lun
];
801 if (lun
->lun_status
== TRANSPORT_LUN_STATUS_ACTIVE
) {
802 pr_err("TPG Logical Unit Number: %u is already active"
803 " on %s Target Portal Group: %u, ignoring request.\n",
804 unpacked_lun
, tpg
->se_tpg_tfo
->get_fabric_name(),
805 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
806 spin_unlock(&tpg
->tpg_lun_lock
);
807 return ERR_PTR(-EINVAL
);
809 spin_unlock(&tpg
->tpg_lun_lock
);
814 int core_tpg_add_lun(
815 struct se_portal_group
*tpg
,
818 struct se_device
*dev
)
822 ret
= percpu_ref_init(&lun
->lun_ref
, core_tpg_lun_ref_release
);
826 ret
= core_dev_export(dev
, tpg
, lun
);
828 percpu_ref_cancel_init(&lun
->lun_ref
);
832 spin_lock(&tpg
->tpg_lun_lock
);
833 lun
->lun_access
= lun_access
;
834 lun
->lun_status
= TRANSPORT_LUN_STATUS_ACTIVE
;
835 spin_unlock(&tpg
->tpg_lun_lock
);
840 struct se_lun
*core_tpg_pre_dellun(
841 struct se_portal_group
*tpg
,
846 if (unpacked_lun
> (TRANSPORT_MAX_LUNS_PER_TPG
-1)) {
847 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
848 "-1: %u for Target Portal Group: %u\n",
849 tpg
->se_tpg_tfo
->get_fabric_name(), unpacked_lun
,
850 TRANSPORT_MAX_LUNS_PER_TPG
-1,
851 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
852 return ERR_PTR(-EOVERFLOW
);
855 spin_lock(&tpg
->tpg_lun_lock
);
856 lun
= tpg
->tpg_lun_list
[unpacked_lun
];
857 if (lun
->lun_status
!= TRANSPORT_LUN_STATUS_ACTIVE
) {
858 pr_err("%s Logical Unit Number: %u is not active on"
859 " Target Portal Group: %u, ignoring request.\n",
860 tpg
->se_tpg_tfo
->get_fabric_name(), unpacked_lun
,
861 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
862 spin_unlock(&tpg
->tpg_lun_lock
);
863 return ERR_PTR(-ENODEV
);
865 spin_unlock(&tpg
->tpg_lun_lock
);
870 int core_tpg_post_dellun(
871 struct se_portal_group
*tpg
,
874 core_clear_lun_from_tpg(lun
, tpg
);
875 transport_clear_lun_ref(lun
);
877 core_dev_unexport(lun
->lun_se_dev
, tpg
, lun
);
879 spin_lock(&tpg
->tpg_lun_lock
);
880 lun
->lun_status
= TRANSPORT_LUN_STATUS_FREE
;
881 spin_unlock(&tpg
->tpg_lun_lock
);