1 /*******************************************************************************
2 * Filename: target_core_tpg.c
4 * This file contains generic Target Portal Group related functions.
6 * (c) Copyright 2002-2013 Datera, Inc.
8 * Nicholas A. Bellinger <nab@kernel.org>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 ******************************************************************************/
26 #include <linux/net.h>
27 #include <linux/string.h>
28 #include <linux/timer.h>
29 #include <linux/slab.h>
30 #include <linux/spinlock.h>
32 #include <linux/export.h>
35 #include <scsi/scsi.h>
36 #include <scsi/scsi_cmnd.h>
38 #include <target/target_core_base.h>
39 #include <target/target_core_backend.h>
40 #include <target/target_core_fabric.h>
42 #include "target_core_internal.h"
44 extern struct se_device
*g_lun0_dev
;
46 static DEFINE_SPINLOCK(tpg_lock
);
47 static LIST_HEAD(tpg_list
);
49 /* core_clear_initiator_node_from_tpg():
53 static void core_clear_initiator_node_from_tpg(
54 struct se_node_acl
*nacl
,
55 struct se_portal_group
*tpg
)
58 struct se_dev_entry
*deve
;
61 spin_lock_irq(&nacl
->device_list_lock
);
62 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
63 deve
= nacl
->device_list
[i
];
65 if (!(deve
->lun_flags
& TRANSPORT_LUNFLAGS_INITIATOR_ACCESS
))
69 pr_err("%s device entries device pointer is"
70 " NULL, but Initiator has access.\n",
71 tpg
->se_tpg_tfo
->get_fabric_name());
76 spin_unlock_irq(&nacl
->device_list_lock
);
77 core_disable_device_list_for_node(lun
, NULL
, deve
->mapped_lun
,
78 TRANSPORT_LUNFLAGS_NO_ACCESS
, nacl
, tpg
);
80 spin_lock_irq(&nacl
->device_list_lock
);
82 spin_unlock_irq(&nacl
->device_list_lock
);
85 /* __core_tpg_get_initiator_node_acl():
87 * spin_lock_bh(&tpg->acl_node_lock); must be held when calling
89 struct se_node_acl
*__core_tpg_get_initiator_node_acl(
90 struct se_portal_group
*tpg
,
91 const char *initiatorname
)
93 struct se_node_acl
*acl
;
95 list_for_each_entry(acl
, &tpg
->acl_node_list
, acl_list
) {
96 if (!strcmp(acl
->initiatorname
, initiatorname
))
103 /* core_tpg_get_initiator_node_acl():
107 struct se_node_acl
*core_tpg_get_initiator_node_acl(
108 struct se_portal_group
*tpg
,
109 unsigned char *initiatorname
)
111 struct se_node_acl
*acl
;
113 spin_lock_irq(&tpg
->acl_node_lock
);
114 acl
= __core_tpg_get_initiator_node_acl(tpg
, initiatorname
);
115 spin_unlock_irq(&tpg
->acl_node_lock
);
120 /* core_tpg_add_node_to_devs():
124 void core_tpg_add_node_to_devs(
125 struct se_node_acl
*acl
,
126 struct se_portal_group
*tpg
)
131 struct se_device
*dev
;
133 spin_lock(&tpg
->tpg_lun_lock
);
134 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
135 lun
= tpg
->tpg_lun_list
[i
];
136 if (lun
->lun_status
!= TRANSPORT_LUN_STATUS_ACTIVE
)
139 spin_unlock(&tpg
->tpg_lun_lock
);
141 dev
= lun
->lun_se_dev
;
143 * By default in LIO-Target $FABRIC_MOD,
144 * demo_mode_write_protect is ON, or READ_ONLY;
146 if (!tpg
->se_tpg_tfo
->tpg_check_demo_mode_write_protect(tpg
)) {
147 lun_access
= TRANSPORT_LUNFLAGS_READ_WRITE
;
150 * Allow only optical drives to issue R/W in default RO
153 if (dev
->transport
->get_device_type(dev
) == TYPE_DISK
)
154 lun_access
= TRANSPORT_LUNFLAGS_READ_ONLY
;
156 lun_access
= TRANSPORT_LUNFLAGS_READ_WRITE
;
159 pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s"
160 " access for LUN in Demo Mode\n",
161 tpg
->se_tpg_tfo
->get_fabric_name(),
162 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), lun
->unpacked_lun
,
163 (lun_access
== TRANSPORT_LUNFLAGS_READ_WRITE
) ?
164 "READ-WRITE" : "READ-ONLY");
166 core_enable_device_list_for_node(lun
, NULL
, lun
->unpacked_lun
,
167 lun_access
, acl
, tpg
);
168 spin_lock(&tpg
->tpg_lun_lock
);
170 spin_unlock(&tpg
->tpg_lun_lock
);
173 /* core_set_queue_depth_for_node():
177 static int core_set_queue_depth_for_node(
178 struct se_portal_group
*tpg
,
179 struct se_node_acl
*acl
)
181 if (!acl
->queue_depth
) {
182 pr_err("Queue depth for %s Initiator Node: %s is 0,"
183 "defaulting to 1.\n", tpg
->se_tpg_tfo
->get_fabric_name(),
185 acl
->queue_depth
= 1;
191 void array_free(void *array
, int n
)
196 for (i
= 0; i
< n
; i
++)
201 static void *array_zalloc(int n
, size_t size
, gfp_t flags
)
206 a
= kzalloc(n
* sizeof(void*), flags
);
209 for (i
= 0; i
< n
; i
++) {
210 a
[i
] = kzalloc(size
, flags
);
219 /* core_create_device_list_for_node():
223 static int core_create_device_list_for_node(struct se_node_acl
*nacl
)
225 struct se_dev_entry
*deve
;
228 nacl
->device_list
= array_zalloc(TRANSPORT_MAX_LUNS_PER_TPG
,
229 sizeof(struct se_dev_entry
), GFP_KERNEL
);
230 if (!nacl
->device_list
) {
231 pr_err("Unable to allocate memory for"
232 " struct se_node_acl->device_list\n");
235 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
236 deve
= nacl
->device_list
[i
];
238 atomic_set(&deve
->ua_count
, 0);
239 atomic_set(&deve
->pr_ref_count
, 0);
240 spin_lock_init(&deve
->ua_lock
);
241 INIT_LIST_HEAD(&deve
->alua_port_list
);
242 INIT_LIST_HEAD(&deve
->ua_list
);
248 /* core_tpg_check_initiator_node_acl()
252 struct se_node_acl
*core_tpg_check_initiator_node_acl(
253 struct se_portal_group
*tpg
,
254 unsigned char *initiatorname
)
256 struct se_node_acl
*acl
;
258 acl
= core_tpg_get_initiator_node_acl(tpg
, initiatorname
);
262 if (!tpg
->se_tpg_tfo
->tpg_check_demo_mode(tpg
))
265 acl
= tpg
->se_tpg_tfo
->tpg_alloc_fabric_acl(tpg
);
269 INIT_LIST_HEAD(&acl
->acl_list
);
270 INIT_LIST_HEAD(&acl
->acl_sess_list
);
271 kref_init(&acl
->acl_kref
);
272 init_completion(&acl
->acl_free_comp
);
273 spin_lock_init(&acl
->device_list_lock
);
274 spin_lock_init(&acl
->nacl_sess_lock
);
275 atomic_set(&acl
->acl_pr_ref_count
, 0);
276 acl
->queue_depth
= tpg
->se_tpg_tfo
->tpg_get_default_depth(tpg
);
277 snprintf(acl
->initiatorname
, TRANSPORT_IQN_LEN
, "%s", initiatorname
);
279 acl
->acl_index
= scsi_get_new_index(SCSI_AUTH_INTR_INDEX
);
280 spin_lock_init(&acl
->stats_lock
);
281 acl
->dynamic_node_acl
= 1;
283 tpg
->se_tpg_tfo
->set_default_node_attributes(acl
);
285 if (core_create_device_list_for_node(acl
) < 0) {
286 tpg
->se_tpg_tfo
->tpg_release_fabric_acl(tpg
, acl
);
290 if (core_set_queue_depth_for_node(tpg
, acl
) < 0) {
291 core_free_device_list_for_node(acl
, tpg
);
292 tpg
->se_tpg_tfo
->tpg_release_fabric_acl(tpg
, acl
);
296 * Here we only create demo-mode MappedLUNs from the active
297 * TPG LUNs if the fabric is not explicitly asking for
298 * tpg_check_demo_mode_login_only() == 1.
300 if ((tpg
->se_tpg_tfo
->tpg_check_demo_mode_login_only
== NULL
) ||
301 (tpg
->se_tpg_tfo
->tpg_check_demo_mode_login_only(tpg
) != 1))
302 core_tpg_add_node_to_devs(acl
, tpg
);
304 spin_lock_irq(&tpg
->acl_node_lock
);
305 list_add_tail(&acl
->acl_list
, &tpg
->acl_node_list
);
306 tpg
->num_node_acls
++;
307 spin_unlock_irq(&tpg
->acl_node_lock
);
309 pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
310 " Initiator Node: %s\n", tpg
->se_tpg_tfo
->get_fabric_name(),
311 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), acl
->queue_depth
,
312 tpg
->se_tpg_tfo
->get_fabric_name(), initiatorname
);
316 EXPORT_SYMBOL(core_tpg_check_initiator_node_acl
);
318 void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl
*nacl
)
320 while (atomic_read(&nacl
->acl_pr_ref_count
) != 0)
324 void core_tpg_clear_object_luns(struct se_portal_group
*tpg
)
329 spin_lock(&tpg
->tpg_lun_lock
);
330 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
331 lun
= tpg
->tpg_lun_list
[i
];
333 if ((lun
->lun_status
!= TRANSPORT_LUN_STATUS_ACTIVE
) ||
334 (lun
->lun_se_dev
== NULL
))
337 spin_unlock(&tpg
->tpg_lun_lock
);
338 core_dev_del_lun(tpg
, lun
->unpacked_lun
);
339 spin_lock(&tpg
->tpg_lun_lock
);
341 spin_unlock(&tpg
->tpg_lun_lock
);
343 EXPORT_SYMBOL(core_tpg_clear_object_luns
);
345 /* core_tpg_add_initiator_node_acl():
349 struct se_node_acl
*core_tpg_add_initiator_node_acl(
350 struct se_portal_group
*tpg
,
351 struct se_node_acl
*se_nacl
,
352 const char *initiatorname
,
355 struct se_node_acl
*acl
= NULL
;
357 spin_lock_irq(&tpg
->acl_node_lock
);
358 acl
= __core_tpg_get_initiator_node_acl(tpg
, initiatorname
);
360 if (acl
->dynamic_node_acl
) {
361 acl
->dynamic_node_acl
= 0;
362 pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
363 " for %s\n", tpg
->se_tpg_tfo
->get_fabric_name(),
364 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), initiatorname
);
365 spin_unlock_irq(&tpg
->acl_node_lock
);
367 * Release the locally allocated struct se_node_acl
368 * because * core_tpg_add_initiator_node_acl() returned
369 * a pointer to an existing demo mode node ACL.
372 tpg
->se_tpg_tfo
->tpg_release_fabric_acl(tpg
,
377 pr_err("ACL entry for %s Initiator"
378 " Node %s already exists for TPG %u, ignoring"
379 " request.\n", tpg
->se_tpg_tfo
->get_fabric_name(),
380 initiatorname
, tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
381 spin_unlock_irq(&tpg
->acl_node_lock
);
382 return ERR_PTR(-EEXIST
);
384 spin_unlock_irq(&tpg
->acl_node_lock
);
387 pr_err("struct se_node_acl pointer is NULL\n");
388 return ERR_PTR(-EINVAL
);
391 * For v4.x logic the se_node_acl_s is hanging off a fabric
392 * dependent structure allocated via
393 * struct target_core_fabric_ops->fabric_make_nodeacl()
397 INIT_LIST_HEAD(&acl
->acl_list
);
398 INIT_LIST_HEAD(&acl
->acl_sess_list
);
399 kref_init(&acl
->acl_kref
);
400 init_completion(&acl
->acl_free_comp
);
401 spin_lock_init(&acl
->device_list_lock
);
402 spin_lock_init(&acl
->nacl_sess_lock
);
403 atomic_set(&acl
->acl_pr_ref_count
, 0);
404 acl
->queue_depth
= queue_depth
;
405 snprintf(acl
->initiatorname
, TRANSPORT_IQN_LEN
, "%s", initiatorname
);
407 acl
->acl_index
= scsi_get_new_index(SCSI_AUTH_INTR_INDEX
);
408 spin_lock_init(&acl
->stats_lock
);
410 tpg
->se_tpg_tfo
->set_default_node_attributes(acl
);
412 if (core_create_device_list_for_node(acl
) < 0) {
413 tpg
->se_tpg_tfo
->tpg_release_fabric_acl(tpg
, acl
);
414 return ERR_PTR(-ENOMEM
);
417 if (core_set_queue_depth_for_node(tpg
, acl
) < 0) {
418 core_free_device_list_for_node(acl
, tpg
);
419 tpg
->se_tpg_tfo
->tpg_release_fabric_acl(tpg
, acl
);
420 return ERR_PTR(-EINVAL
);
423 spin_lock_irq(&tpg
->acl_node_lock
);
424 list_add_tail(&acl
->acl_list
, &tpg
->acl_node_list
);
425 tpg
->num_node_acls
++;
426 spin_unlock_irq(&tpg
->acl_node_lock
);
429 pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
430 " Initiator Node: %s\n", tpg
->se_tpg_tfo
->get_fabric_name(),
431 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), acl
->queue_depth
,
432 tpg
->se_tpg_tfo
->get_fabric_name(), initiatorname
);
436 EXPORT_SYMBOL(core_tpg_add_initiator_node_acl
);
438 /* core_tpg_del_initiator_node_acl():
442 int core_tpg_del_initiator_node_acl(
443 struct se_portal_group
*tpg
,
444 struct se_node_acl
*acl
,
447 LIST_HEAD(sess_list
);
448 struct se_session
*sess
, *sess_tmp
;
452 spin_lock_irq(&tpg
->acl_node_lock
);
453 if (acl
->dynamic_node_acl
) {
454 acl
->dynamic_node_acl
= 0;
456 list_del(&acl
->acl_list
);
457 tpg
->num_node_acls
--;
458 spin_unlock_irq(&tpg
->acl_node_lock
);
460 spin_lock_irqsave(&acl
->nacl_sess_lock
, flags
);
463 list_for_each_entry_safe(sess
, sess_tmp
, &acl
->acl_sess_list
,
465 if (sess
->sess_tearing_down
!= 0)
468 target_get_session(sess
);
469 list_move(&sess
->sess_acl_list
, &sess_list
);
471 spin_unlock_irqrestore(&acl
->nacl_sess_lock
, flags
);
473 list_for_each_entry_safe(sess
, sess_tmp
, &sess_list
, sess_acl_list
) {
474 list_del(&sess
->sess_acl_list
);
476 rc
= tpg
->se_tpg_tfo
->shutdown_session(sess
);
477 target_put_session(sess
);
480 target_put_session(sess
);
482 target_put_nacl(acl
);
484 * Wait for last target_put_nacl() to complete in target_complete_nacl()
485 * for active fabric session transport_deregister_session() callbacks.
487 wait_for_completion(&acl
->acl_free_comp
);
489 core_tpg_wait_for_nacl_pr_ref(acl
);
490 core_clear_initiator_node_from_tpg(acl
, tpg
);
491 core_free_device_list_for_node(acl
, tpg
);
493 pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
494 " Initiator Node: %s\n", tpg
->se_tpg_tfo
->get_fabric_name(),
495 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), acl
->queue_depth
,
496 tpg
->se_tpg_tfo
->get_fabric_name(), acl
->initiatorname
);
500 EXPORT_SYMBOL(core_tpg_del_initiator_node_acl
);
502 /* core_tpg_set_initiator_node_queue_depth():
506 int core_tpg_set_initiator_node_queue_depth(
507 struct se_portal_group
*tpg
,
508 unsigned char *initiatorname
,
512 struct se_session
*sess
, *init_sess
= NULL
;
513 struct se_node_acl
*acl
;
517 spin_lock_irq(&tpg
->acl_node_lock
);
518 acl
= __core_tpg_get_initiator_node_acl(tpg
, initiatorname
);
520 pr_err("Access Control List entry for %s Initiator"
521 " Node %s does not exists for TPG %hu, ignoring"
522 " request.\n", tpg
->se_tpg_tfo
->get_fabric_name(),
523 initiatorname
, tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
524 spin_unlock_irq(&tpg
->acl_node_lock
);
527 if (acl
->dynamic_node_acl
) {
528 acl
->dynamic_node_acl
= 0;
531 spin_unlock_irq(&tpg
->acl_node_lock
);
533 spin_lock_irqsave(&tpg
->session_lock
, flags
);
534 list_for_each_entry(sess
, &tpg
->tpg_sess_list
, sess_list
) {
535 if (sess
->se_node_acl
!= acl
)
539 pr_err("Unable to change queue depth for %s"
540 " Initiator Node: %s while session is"
541 " operational. To forcefully change the queue"
542 " depth and force session reinstatement"
543 " use the \"force=1\" parameter.\n",
544 tpg
->se_tpg_tfo
->get_fabric_name(), initiatorname
);
545 spin_unlock_irqrestore(&tpg
->session_lock
, flags
);
547 spin_lock_irq(&tpg
->acl_node_lock
);
549 acl
->dynamic_node_acl
= 1;
550 spin_unlock_irq(&tpg
->acl_node_lock
);
554 * Determine if the session needs to be closed by our context.
556 if (!tpg
->se_tpg_tfo
->shutdown_session(sess
))
564 * User has requested to change the queue depth for a Initiator Node.
565 * Change the value in the Node's struct se_node_acl, and call
566 * core_set_queue_depth_for_node() to add the requested queue depth.
568 * Finally call tpg->se_tpg_tfo->close_session() to force session
569 * reinstatement to occur if there is an active session for the
570 * $FABRIC_MOD Initiator Node in question.
572 acl
->queue_depth
= queue_depth
;
574 if (core_set_queue_depth_for_node(tpg
, acl
) < 0) {
575 spin_unlock_irqrestore(&tpg
->session_lock
, flags
);
577 * Force session reinstatement if
578 * core_set_queue_depth_for_node() failed, because we assume
579 * the $FABRIC_MOD has already the set session reinstatement
580 * bit from tpg->se_tpg_tfo->shutdown_session() called above.
583 tpg
->se_tpg_tfo
->close_session(init_sess
);
585 spin_lock_irq(&tpg
->acl_node_lock
);
587 acl
->dynamic_node_acl
= 1;
588 spin_unlock_irq(&tpg
->acl_node_lock
);
591 spin_unlock_irqrestore(&tpg
->session_lock
, flags
);
593 * If the $FABRIC_MOD session for the Initiator Node ACL exists,
594 * forcefully shutdown the $FABRIC_MOD session/nexus.
597 tpg
->se_tpg_tfo
->close_session(init_sess
);
599 pr_debug("Successfully changed queue depth to: %d for Initiator"
600 " Node: %s on %s Target Portal Group: %u\n", queue_depth
,
601 initiatorname
, tpg
->se_tpg_tfo
->get_fabric_name(),
602 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
604 spin_lock_irq(&tpg
->acl_node_lock
);
606 acl
->dynamic_node_acl
= 1;
607 spin_unlock_irq(&tpg
->acl_node_lock
);
611 EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth
);
613 /* core_tpg_set_initiator_node_tag():
615 * Initiator nodeacl tags are not used internally, but may be used by
616 * userspace to emulate aliases or groups.
617 * Returns length of newly-set tag or -EINVAL.
619 int core_tpg_set_initiator_node_tag(
620 struct se_portal_group
*tpg
,
621 struct se_node_acl
*acl
,
624 if (strlen(new_tag
) >= MAX_ACL_TAG_SIZE
)
627 if (!strncmp("NULL", new_tag
, 4)) {
628 acl
->acl_tag
[0] = '\0';
632 return snprintf(acl
->acl_tag
, MAX_ACL_TAG_SIZE
, "%s", new_tag
);
634 EXPORT_SYMBOL(core_tpg_set_initiator_node_tag
);
636 static int core_tpg_setup_virtual_lun0(struct se_portal_group
*se_tpg
)
638 /* Set in core_dev_setup_virtual_lun0() */
639 struct se_device
*dev
= g_lun0_dev
;
640 struct se_lun
*lun
= &se_tpg
->tpg_virt_lun0
;
641 u32 lun_access
= TRANSPORT_LUNFLAGS_READ_ONLY
;
644 lun
->unpacked_lun
= 0;
645 lun
->lun_status
= TRANSPORT_LUN_STATUS_FREE
;
646 atomic_set(&lun
->lun_acl_count
, 0);
647 init_completion(&lun
->lun_shutdown_comp
);
648 INIT_LIST_HEAD(&lun
->lun_acl_list
);
649 INIT_LIST_HEAD(&lun
->lun_cmd_list
);
650 spin_lock_init(&lun
->lun_acl_lock
);
651 spin_lock_init(&lun
->lun_cmd_lock
);
652 spin_lock_init(&lun
->lun_sep_lock
);
654 ret
= core_tpg_post_addlun(se_tpg
, lun
, lun_access
, dev
);
661 static void core_tpg_release_virtual_lun0(struct se_portal_group
*se_tpg
)
663 struct se_lun
*lun
= &se_tpg
->tpg_virt_lun0
;
665 core_tpg_post_dellun(se_tpg
, lun
);
668 int core_tpg_register(
669 struct target_core_fabric_ops
*tfo
,
670 struct se_wwn
*se_wwn
,
671 struct se_portal_group
*se_tpg
,
672 void *tpg_fabric_ptr
,
678 se_tpg
->tpg_lun_list
= array_zalloc(TRANSPORT_MAX_LUNS_PER_TPG
,
679 sizeof(struct se_lun
), GFP_KERNEL
);
680 if (!se_tpg
->tpg_lun_list
) {
681 pr_err("Unable to allocate struct se_portal_group->"
686 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
687 lun
= se_tpg
->tpg_lun_list
[i
];
688 lun
->unpacked_lun
= i
;
689 lun
->lun_link_magic
= SE_LUN_LINK_MAGIC
;
690 lun
->lun_status
= TRANSPORT_LUN_STATUS_FREE
;
691 atomic_set(&lun
->lun_acl_count
, 0);
692 init_completion(&lun
->lun_shutdown_comp
);
693 INIT_LIST_HEAD(&lun
->lun_acl_list
);
694 INIT_LIST_HEAD(&lun
->lun_cmd_list
);
695 spin_lock_init(&lun
->lun_acl_lock
);
696 spin_lock_init(&lun
->lun_cmd_lock
);
697 spin_lock_init(&lun
->lun_sep_lock
);
700 se_tpg
->se_tpg_type
= se_tpg_type
;
701 se_tpg
->se_tpg_fabric_ptr
= tpg_fabric_ptr
;
702 se_tpg
->se_tpg_tfo
= tfo
;
703 se_tpg
->se_tpg_wwn
= se_wwn
;
704 atomic_set(&se_tpg
->tpg_pr_ref_count
, 0);
705 INIT_LIST_HEAD(&se_tpg
->acl_node_list
);
706 INIT_LIST_HEAD(&se_tpg
->se_tpg_node
);
707 INIT_LIST_HEAD(&se_tpg
->tpg_sess_list
);
708 spin_lock_init(&se_tpg
->acl_node_lock
);
709 spin_lock_init(&se_tpg
->session_lock
);
710 spin_lock_init(&se_tpg
->tpg_lun_lock
);
712 if (se_tpg
->se_tpg_type
== TRANSPORT_TPG_TYPE_NORMAL
) {
713 if (core_tpg_setup_virtual_lun0(se_tpg
) < 0) {
714 array_free(se_tpg
->tpg_lun_list
,
715 TRANSPORT_MAX_LUNS_PER_TPG
);
720 spin_lock_bh(&tpg_lock
);
721 list_add_tail(&se_tpg
->se_tpg_node
, &tpg_list
);
722 spin_unlock_bh(&tpg_lock
);
724 pr_debug("TARGET_CORE[%s]: Allocated %s struct se_portal_group for"
725 " endpoint: %s, Portal Tag: %u\n", tfo
->get_fabric_name(),
726 (se_tpg
->se_tpg_type
== TRANSPORT_TPG_TYPE_NORMAL
) ?
727 "Normal" : "Discovery", (tfo
->tpg_get_wwn(se_tpg
) == NULL
) ?
728 "None" : tfo
->tpg_get_wwn(se_tpg
), tfo
->tpg_get_tag(se_tpg
));
732 EXPORT_SYMBOL(core_tpg_register
);
734 int core_tpg_deregister(struct se_portal_group
*se_tpg
)
736 struct se_node_acl
*nacl
, *nacl_tmp
;
738 pr_debug("TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
739 " for endpoint: %s Portal Tag %u\n",
740 (se_tpg
->se_tpg_type
== TRANSPORT_TPG_TYPE_NORMAL
) ?
741 "Normal" : "Discovery", se_tpg
->se_tpg_tfo
->get_fabric_name(),
742 se_tpg
->se_tpg_tfo
->tpg_get_wwn(se_tpg
),
743 se_tpg
->se_tpg_tfo
->tpg_get_tag(se_tpg
));
745 spin_lock_bh(&tpg_lock
);
746 list_del(&se_tpg
->se_tpg_node
);
747 spin_unlock_bh(&tpg_lock
);
749 while (atomic_read(&se_tpg
->tpg_pr_ref_count
) != 0)
752 * Release any remaining demo-mode generated se_node_acl that have
753 * not been released because of TFO->tpg_check_demo_mode_cache() == 1
754 * in transport_deregister_session().
756 spin_lock_irq(&se_tpg
->acl_node_lock
);
757 list_for_each_entry_safe(nacl
, nacl_tmp
, &se_tpg
->acl_node_list
,
759 list_del(&nacl
->acl_list
);
760 se_tpg
->num_node_acls
--;
761 spin_unlock_irq(&se_tpg
->acl_node_lock
);
763 core_tpg_wait_for_nacl_pr_ref(nacl
);
764 core_free_device_list_for_node(nacl
, se_tpg
);
765 se_tpg
->se_tpg_tfo
->tpg_release_fabric_acl(se_tpg
, nacl
);
767 spin_lock_irq(&se_tpg
->acl_node_lock
);
769 spin_unlock_irq(&se_tpg
->acl_node_lock
);
771 if (se_tpg
->se_tpg_type
== TRANSPORT_TPG_TYPE_NORMAL
)
772 core_tpg_release_virtual_lun0(se_tpg
);
774 se_tpg
->se_tpg_fabric_ptr
= NULL
;
775 array_free(se_tpg
->tpg_lun_list
, TRANSPORT_MAX_LUNS_PER_TPG
);
778 EXPORT_SYMBOL(core_tpg_deregister
);
780 struct se_lun
*core_tpg_pre_addlun(
781 struct se_portal_group
*tpg
,
786 if (unpacked_lun
> (TRANSPORT_MAX_LUNS_PER_TPG
-1)) {
787 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
788 "-1: %u for Target Portal Group: %u\n",
789 tpg
->se_tpg_tfo
->get_fabric_name(),
790 unpacked_lun
, TRANSPORT_MAX_LUNS_PER_TPG
-1,
791 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
792 return ERR_PTR(-EOVERFLOW
);
795 spin_lock(&tpg
->tpg_lun_lock
);
796 lun
= tpg
->tpg_lun_list
[unpacked_lun
];
797 if (lun
->lun_status
== TRANSPORT_LUN_STATUS_ACTIVE
) {
798 pr_err("TPG Logical Unit Number: %u is already active"
799 " on %s Target Portal Group: %u, ignoring request.\n",
800 unpacked_lun
, tpg
->se_tpg_tfo
->get_fabric_name(),
801 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
802 spin_unlock(&tpg
->tpg_lun_lock
);
803 return ERR_PTR(-EINVAL
);
805 spin_unlock(&tpg
->tpg_lun_lock
);
810 int core_tpg_post_addlun(
811 struct se_portal_group
*tpg
,
818 ret
= core_dev_export(lun_ptr
, tpg
, lun
);
822 spin_lock(&tpg
->tpg_lun_lock
);
823 lun
->lun_access
= lun_access
;
824 lun
->lun_status
= TRANSPORT_LUN_STATUS_ACTIVE
;
825 spin_unlock(&tpg
->tpg_lun_lock
);
830 static void core_tpg_shutdown_lun(
831 struct se_portal_group
*tpg
,
834 core_clear_lun_from_tpg(lun
, tpg
);
835 transport_clear_lun_from_sessions(lun
);
838 struct se_lun
*core_tpg_pre_dellun(
839 struct se_portal_group
*tpg
,
844 if (unpacked_lun
> (TRANSPORT_MAX_LUNS_PER_TPG
-1)) {
845 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
846 "-1: %u for Target Portal Group: %u\n",
847 tpg
->se_tpg_tfo
->get_fabric_name(), unpacked_lun
,
848 TRANSPORT_MAX_LUNS_PER_TPG
-1,
849 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
850 return ERR_PTR(-EOVERFLOW
);
853 spin_lock(&tpg
->tpg_lun_lock
);
854 lun
= tpg
->tpg_lun_list
[unpacked_lun
];
855 if (lun
->lun_status
!= TRANSPORT_LUN_STATUS_ACTIVE
) {
856 pr_err("%s Logical Unit Number: %u is not active on"
857 " Target Portal Group: %u, ignoring request.\n",
858 tpg
->se_tpg_tfo
->get_fabric_name(), unpacked_lun
,
859 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
860 spin_unlock(&tpg
->tpg_lun_lock
);
861 return ERR_PTR(-ENODEV
);
863 spin_unlock(&tpg
->tpg_lun_lock
);
868 int core_tpg_post_dellun(
869 struct se_portal_group
*tpg
,
872 core_tpg_shutdown_lun(tpg
, lun
);
874 core_dev_unexport(lun
->lun_se_dev
, tpg
, lun
);
876 spin_lock(&tpg
->tpg_lun_lock
);
877 lun
->lun_status
= TRANSPORT_LUN_STATUS_FREE
;
878 spin_unlock(&tpg
->tpg_lun_lock
);