1 /*******************************************************************************
2 * Filename: target_core_tpg.c
4 * This file contains generic Target Portal Group related functions.
6 * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
7 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
8 * Copyright (c) 2007-2010 Rising Tide Systems
9 * Copyright (c) 2008-2010 Linux-iSCSI.org
11 * Nicholas A. Bellinger <nab@kernel.org>
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
27 ******************************************************************************/
29 #include <linux/net.h>
30 #include <linux/string.h>
31 #include <linux/timer.h>
32 #include <linux/slab.h>
33 #include <linux/spinlock.h>
35 #include <linux/export.h>
38 #include <scsi/scsi.h>
39 #include <scsi/scsi_cmnd.h>
41 #include <target/target_core_base.h>
42 #include <target/target_core_backend.h>
43 #include <target/target_core_fabric.h>
45 #include "target_core_internal.h"
47 extern struct se_device
*g_lun0_dev
;
49 static DEFINE_SPINLOCK(tpg_lock
);
50 static LIST_HEAD(tpg_list
);
52 /* core_clear_initiator_node_from_tpg():
56 static void core_clear_initiator_node_from_tpg(
57 struct se_node_acl
*nacl
,
58 struct se_portal_group
*tpg
)
61 struct se_dev_entry
*deve
;
64 spin_lock_irq(&nacl
->device_list_lock
);
65 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
66 deve
= nacl
->device_list
[i
];
68 if (!(deve
->lun_flags
& TRANSPORT_LUNFLAGS_INITIATOR_ACCESS
))
72 pr_err("%s device entries device pointer is"
73 " NULL, but Initiator has access.\n",
74 tpg
->se_tpg_tfo
->get_fabric_name());
79 spin_unlock_irq(&nacl
->device_list_lock
);
80 core_update_device_list_for_node(lun
, NULL
, deve
->mapped_lun
,
81 TRANSPORT_LUNFLAGS_NO_ACCESS
, nacl
, tpg
, 0);
83 spin_lock_irq(&nacl
->device_list_lock
);
85 spin_unlock_irq(&nacl
->device_list_lock
);
88 /* __core_tpg_get_initiator_node_acl():
90 * spin_lock_bh(&tpg->acl_node_lock); must be held when calling
92 struct se_node_acl
*__core_tpg_get_initiator_node_acl(
93 struct se_portal_group
*tpg
,
94 const char *initiatorname
)
96 struct se_node_acl
*acl
;
98 list_for_each_entry(acl
, &tpg
->acl_node_list
, acl_list
) {
99 if (!strcmp(acl
->initiatorname
, initiatorname
))
106 /* core_tpg_get_initiator_node_acl():
110 struct se_node_acl
*core_tpg_get_initiator_node_acl(
111 struct se_portal_group
*tpg
,
112 unsigned char *initiatorname
)
114 struct se_node_acl
*acl
;
116 spin_lock_irq(&tpg
->acl_node_lock
);
117 acl
= __core_tpg_get_initiator_node_acl(tpg
, initiatorname
);
118 spin_unlock_irq(&tpg
->acl_node_lock
);
123 /* core_tpg_add_node_to_devs():
127 void core_tpg_add_node_to_devs(
128 struct se_node_acl
*acl
,
129 struct se_portal_group
*tpg
)
134 struct se_device
*dev
;
136 spin_lock(&tpg
->tpg_lun_lock
);
137 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
138 lun
= tpg
->tpg_lun_list
[i
];
139 if (lun
->lun_status
!= TRANSPORT_LUN_STATUS_ACTIVE
)
142 spin_unlock(&tpg
->tpg_lun_lock
);
144 dev
= lun
->lun_se_dev
;
146 * By default in LIO-Target $FABRIC_MOD,
147 * demo_mode_write_protect is ON, or READ_ONLY;
149 if (!tpg
->se_tpg_tfo
->tpg_check_demo_mode_write_protect(tpg
)) {
150 if (dev
->dev_flags
& DF_READ_ONLY
)
151 lun_access
= TRANSPORT_LUNFLAGS_READ_ONLY
;
153 lun_access
= TRANSPORT_LUNFLAGS_READ_WRITE
;
156 * Allow only optical drives to issue R/W in default RO
159 if (dev
->transport
->get_device_type(dev
) == TYPE_DISK
)
160 lun_access
= TRANSPORT_LUNFLAGS_READ_ONLY
;
162 lun_access
= TRANSPORT_LUNFLAGS_READ_WRITE
;
165 pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s"
166 " access for LUN in Demo Mode\n",
167 tpg
->se_tpg_tfo
->get_fabric_name(),
168 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), lun
->unpacked_lun
,
169 (lun_access
== TRANSPORT_LUNFLAGS_READ_WRITE
) ?
170 "READ-WRITE" : "READ-ONLY");
172 core_update_device_list_for_node(lun
, NULL
, lun
->unpacked_lun
,
173 lun_access
, acl
, tpg
, 1);
174 spin_lock(&tpg
->tpg_lun_lock
);
176 spin_unlock(&tpg
->tpg_lun_lock
);
179 /* core_set_queue_depth_for_node():
183 static int core_set_queue_depth_for_node(
184 struct se_portal_group
*tpg
,
185 struct se_node_acl
*acl
)
187 if (!acl
->queue_depth
) {
188 pr_err("Queue depth for %s Initiator Node: %s is 0,"
189 "defaulting to 1.\n", tpg
->se_tpg_tfo
->get_fabric_name(),
191 acl
->queue_depth
= 1;
197 void array_free(void *array
, int n
)
202 for (i
= 0; i
< n
; i
++)
207 static void *array_zalloc(int n
, size_t size
, gfp_t flags
)
212 a
= kzalloc(n
* sizeof(void*), flags
);
215 for (i
= 0; i
< n
; i
++) {
216 a
[i
] = kzalloc(size
, flags
);
225 /* core_create_device_list_for_node():
229 static int core_create_device_list_for_node(struct se_node_acl
*nacl
)
231 struct se_dev_entry
*deve
;
234 nacl
->device_list
= array_zalloc(TRANSPORT_MAX_LUNS_PER_TPG
,
235 sizeof(struct se_dev_entry
), GFP_KERNEL
);
236 if (!nacl
->device_list
) {
237 pr_err("Unable to allocate memory for"
238 " struct se_node_acl->device_list\n");
241 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
242 deve
= nacl
->device_list
[i
];
244 atomic_set(&deve
->ua_count
, 0);
245 atomic_set(&deve
->pr_ref_count
, 0);
246 spin_lock_init(&deve
->ua_lock
);
247 INIT_LIST_HEAD(&deve
->alua_port_list
);
248 INIT_LIST_HEAD(&deve
->ua_list
);
254 /* core_tpg_check_initiator_node_acl()
258 struct se_node_acl
*core_tpg_check_initiator_node_acl(
259 struct se_portal_group
*tpg
,
260 unsigned char *initiatorname
)
262 struct se_node_acl
*acl
;
264 acl
= core_tpg_get_initiator_node_acl(tpg
, initiatorname
);
268 if (!tpg
->se_tpg_tfo
->tpg_check_demo_mode(tpg
))
271 acl
= tpg
->se_tpg_tfo
->tpg_alloc_fabric_acl(tpg
);
275 INIT_LIST_HEAD(&acl
->acl_list
);
276 INIT_LIST_HEAD(&acl
->acl_sess_list
);
277 kref_init(&acl
->acl_kref
);
278 init_completion(&acl
->acl_free_comp
);
279 spin_lock_init(&acl
->device_list_lock
);
280 spin_lock_init(&acl
->nacl_sess_lock
);
281 atomic_set(&acl
->acl_pr_ref_count
, 0);
282 acl
->queue_depth
= tpg
->se_tpg_tfo
->tpg_get_default_depth(tpg
);
283 snprintf(acl
->initiatorname
, TRANSPORT_IQN_LEN
, "%s", initiatorname
);
285 acl
->acl_index
= scsi_get_new_index(SCSI_AUTH_INTR_INDEX
);
286 spin_lock_init(&acl
->stats_lock
);
287 acl
->dynamic_node_acl
= 1;
289 tpg
->se_tpg_tfo
->set_default_node_attributes(acl
);
291 if (core_create_device_list_for_node(acl
) < 0) {
292 tpg
->se_tpg_tfo
->tpg_release_fabric_acl(tpg
, acl
);
296 if (core_set_queue_depth_for_node(tpg
, acl
) < 0) {
297 core_free_device_list_for_node(acl
, tpg
);
298 tpg
->se_tpg_tfo
->tpg_release_fabric_acl(tpg
, acl
);
302 * Here we only create demo-mode MappedLUNs from the active
303 * TPG LUNs if the fabric is not explictly asking for
304 * tpg_check_demo_mode_login_only() == 1.
306 if ((tpg
->se_tpg_tfo
->tpg_check_demo_mode_login_only
!= NULL
) &&
307 (tpg
->se_tpg_tfo
->tpg_check_demo_mode_login_only(tpg
) == 1))
310 core_tpg_add_node_to_devs(acl
, tpg
);
312 spin_lock_irq(&tpg
->acl_node_lock
);
313 list_add_tail(&acl
->acl_list
, &tpg
->acl_node_list
);
314 tpg
->num_node_acls
++;
315 spin_unlock_irq(&tpg
->acl_node_lock
);
317 pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
318 " Initiator Node: %s\n", tpg
->se_tpg_tfo
->get_fabric_name(),
319 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), acl
->queue_depth
,
320 tpg
->se_tpg_tfo
->get_fabric_name(), initiatorname
);
324 EXPORT_SYMBOL(core_tpg_check_initiator_node_acl
);
326 void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl
*nacl
)
328 while (atomic_read(&nacl
->acl_pr_ref_count
) != 0)
332 void core_tpg_clear_object_luns(struct se_portal_group
*tpg
)
337 spin_lock(&tpg
->tpg_lun_lock
);
338 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
339 lun
= tpg
->tpg_lun_list
[i
];
341 if ((lun
->lun_status
!= TRANSPORT_LUN_STATUS_ACTIVE
) ||
342 (lun
->lun_se_dev
== NULL
))
345 spin_unlock(&tpg
->tpg_lun_lock
);
346 core_dev_del_lun(tpg
, lun
->unpacked_lun
);
347 spin_lock(&tpg
->tpg_lun_lock
);
349 spin_unlock(&tpg
->tpg_lun_lock
);
351 EXPORT_SYMBOL(core_tpg_clear_object_luns
);
353 /* core_tpg_add_initiator_node_acl():
357 struct se_node_acl
*core_tpg_add_initiator_node_acl(
358 struct se_portal_group
*tpg
,
359 struct se_node_acl
*se_nacl
,
360 const char *initiatorname
,
363 struct se_node_acl
*acl
= NULL
;
365 spin_lock_irq(&tpg
->acl_node_lock
);
366 acl
= __core_tpg_get_initiator_node_acl(tpg
, initiatorname
);
368 if (acl
->dynamic_node_acl
) {
369 acl
->dynamic_node_acl
= 0;
370 pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
371 " for %s\n", tpg
->se_tpg_tfo
->get_fabric_name(),
372 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), initiatorname
);
373 spin_unlock_irq(&tpg
->acl_node_lock
);
375 * Release the locally allocated struct se_node_acl
376 * because * core_tpg_add_initiator_node_acl() returned
377 * a pointer to an existing demo mode node ACL.
380 tpg
->se_tpg_tfo
->tpg_release_fabric_acl(tpg
,
385 pr_err("ACL entry for %s Initiator"
386 " Node %s already exists for TPG %u, ignoring"
387 " request.\n", tpg
->se_tpg_tfo
->get_fabric_name(),
388 initiatorname
, tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
389 spin_unlock_irq(&tpg
->acl_node_lock
);
390 return ERR_PTR(-EEXIST
);
392 spin_unlock_irq(&tpg
->acl_node_lock
);
395 pr_err("struct se_node_acl pointer is NULL\n");
396 return ERR_PTR(-EINVAL
);
399 * For v4.x logic the se_node_acl_s is hanging off a fabric
400 * dependent structure allocated via
401 * struct target_core_fabric_ops->fabric_make_nodeacl()
405 INIT_LIST_HEAD(&acl
->acl_list
);
406 INIT_LIST_HEAD(&acl
->acl_sess_list
);
407 kref_init(&acl
->acl_kref
);
408 init_completion(&acl
->acl_free_comp
);
409 spin_lock_init(&acl
->device_list_lock
);
410 spin_lock_init(&acl
->nacl_sess_lock
);
411 atomic_set(&acl
->acl_pr_ref_count
, 0);
412 acl
->queue_depth
= queue_depth
;
413 snprintf(acl
->initiatorname
, TRANSPORT_IQN_LEN
, "%s", initiatorname
);
415 acl
->acl_index
= scsi_get_new_index(SCSI_AUTH_INTR_INDEX
);
416 spin_lock_init(&acl
->stats_lock
);
418 tpg
->se_tpg_tfo
->set_default_node_attributes(acl
);
420 if (core_create_device_list_for_node(acl
) < 0) {
421 tpg
->se_tpg_tfo
->tpg_release_fabric_acl(tpg
, acl
);
422 return ERR_PTR(-ENOMEM
);
425 if (core_set_queue_depth_for_node(tpg
, acl
) < 0) {
426 core_free_device_list_for_node(acl
, tpg
);
427 tpg
->se_tpg_tfo
->tpg_release_fabric_acl(tpg
, acl
);
428 return ERR_PTR(-EINVAL
);
431 spin_lock_irq(&tpg
->acl_node_lock
);
432 list_add_tail(&acl
->acl_list
, &tpg
->acl_node_list
);
433 tpg
->num_node_acls
++;
434 spin_unlock_irq(&tpg
->acl_node_lock
);
437 pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
438 " Initiator Node: %s\n", tpg
->se_tpg_tfo
->get_fabric_name(),
439 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), acl
->queue_depth
,
440 tpg
->se_tpg_tfo
->get_fabric_name(), initiatorname
);
444 EXPORT_SYMBOL(core_tpg_add_initiator_node_acl
);
446 /* core_tpg_del_initiator_node_acl():
450 int core_tpg_del_initiator_node_acl(
451 struct se_portal_group
*tpg
,
452 struct se_node_acl
*acl
,
455 LIST_HEAD(sess_list
);
456 struct se_session
*sess
, *sess_tmp
;
460 spin_lock_irq(&tpg
->acl_node_lock
);
461 if (acl
->dynamic_node_acl
) {
462 acl
->dynamic_node_acl
= 0;
464 list_del(&acl
->acl_list
);
465 tpg
->num_node_acls
--;
466 spin_unlock_irq(&tpg
->acl_node_lock
);
468 spin_lock_irqsave(&acl
->nacl_sess_lock
, flags
);
471 list_for_each_entry_safe(sess
, sess_tmp
, &acl
->acl_sess_list
,
473 if (sess
->sess_tearing_down
!= 0)
476 target_get_session(sess
);
477 list_move(&sess
->sess_acl_list
, &sess_list
);
479 spin_unlock_irqrestore(&acl
->nacl_sess_lock
, flags
);
481 list_for_each_entry_safe(sess
, sess_tmp
, &sess_list
, sess_acl_list
) {
482 list_del(&sess
->sess_acl_list
);
484 rc
= tpg
->se_tpg_tfo
->shutdown_session(sess
);
485 target_put_session(sess
);
488 target_put_session(sess
);
490 target_put_nacl(acl
);
492 * Wait for last target_put_nacl() to complete in target_complete_nacl()
493 * for active fabric session transport_deregister_session() callbacks.
495 wait_for_completion(&acl
->acl_free_comp
);
497 core_tpg_wait_for_nacl_pr_ref(acl
);
498 core_clear_initiator_node_from_tpg(acl
, tpg
);
499 core_free_device_list_for_node(acl
, tpg
);
501 pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
502 " Initiator Node: %s\n", tpg
->se_tpg_tfo
->get_fabric_name(),
503 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), acl
->queue_depth
,
504 tpg
->se_tpg_tfo
->get_fabric_name(), acl
->initiatorname
);
508 EXPORT_SYMBOL(core_tpg_del_initiator_node_acl
);
510 /* core_tpg_set_initiator_node_queue_depth():
514 int core_tpg_set_initiator_node_queue_depth(
515 struct se_portal_group
*tpg
,
516 unsigned char *initiatorname
,
520 struct se_session
*sess
, *init_sess
= NULL
;
521 struct se_node_acl
*acl
;
525 spin_lock_irq(&tpg
->acl_node_lock
);
526 acl
= __core_tpg_get_initiator_node_acl(tpg
, initiatorname
);
528 pr_err("Access Control List entry for %s Initiator"
529 " Node %s does not exists for TPG %hu, ignoring"
530 " request.\n", tpg
->se_tpg_tfo
->get_fabric_name(),
531 initiatorname
, tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
532 spin_unlock_irq(&tpg
->acl_node_lock
);
535 if (acl
->dynamic_node_acl
) {
536 acl
->dynamic_node_acl
= 0;
539 spin_unlock_irq(&tpg
->acl_node_lock
);
541 spin_lock_irqsave(&tpg
->session_lock
, flags
);
542 list_for_each_entry(sess
, &tpg
->tpg_sess_list
, sess_list
) {
543 if (sess
->se_node_acl
!= acl
)
547 pr_err("Unable to change queue depth for %s"
548 " Initiator Node: %s while session is"
549 " operational. To forcefully change the queue"
550 " depth and force session reinstatement"
551 " use the \"force=1\" parameter.\n",
552 tpg
->se_tpg_tfo
->get_fabric_name(), initiatorname
);
553 spin_unlock_irqrestore(&tpg
->session_lock
, flags
);
555 spin_lock_irq(&tpg
->acl_node_lock
);
557 acl
->dynamic_node_acl
= 1;
558 spin_unlock_irq(&tpg
->acl_node_lock
);
562 * Determine if the session needs to be closed by our context.
564 if (!tpg
->se_tpg_tfo
->shutdown_session(sess
))
572 * User has requested to change the queue depth for a Initiator Node.
573 * Change the value in the Node's struct se_node_acl, and call
574 * core_set_queue_depth_for_node() to add the requested queue depth.
576 * Finally call tpg->se_tpg_tfo->close_session() to force session
577 * reinstatement to occur if there is an active session for the
578 * $FABRIC_MOD Initiator Node in question.
580 acl
->queue_depth
= queue_depth
;
582 if (core_set_queue_depth_for_node(tpg
, acl
) < 0) {
583 spin_unlock_irqrestore(&tpg
->session_lock
, flags
);
585 * Force session reinstatement if
586 * core_set_queue_depth_for_node() failed, because we assume
587 * the $FABRIC_MOD has already the set session reinstatement
588 * bit from tpg->se_tpg_tfo->shutdown_session() called above.
591 tpg
->se_tpg_tfo
->close_session(init_sess
);
593 spin_lock_irq(&tpg
->acl_node_lock
);
595 acl
->dynamic_node_acl
= 1;
596 spin_unlock_irq(&tpg
->acl_node_lock
);
599 spin_unlock_irqrestore(&tpg
->session_lock
, flags
);
601 * If the $FABRIC_MOD session for the Initiator Node ACL exists,
602 * forcefully shutdown the $FABRIC_MOD session/nexus.
605 tpg
->se_tpg_tfo
->close_session(init_sess
);
607 pr_debug("Successfully changed queue depth to: %d for Initiator"
608 " Node: %s on %s Target Portal Group: %u\n", queue_depth
,
609 initiatorname
, tpg
->se_tpg_tfo
->get_fabric_name(),
610 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
612 spin_lock_irq(&tpg
->acl_node_lock
);
614 acl
->dynamic_node_acl
= 1;
615 spin_unlock_irq(&tpg
->acl_node_lock
);
619 EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth
);
621 static int core_tpg_setup_virtual_lun0(struct se_portal_group
*se_tpg
)
623 /* Set in core_dev_setup_virtual_lun0() */
624 struct se_device
*dev
= g_lun0_dev
;
625 struct se_lun
*lun
= &se_tpg
->tpg_virt_lun0
;
626 u32 lun_access
= TRANSPORT_LUNFLAGS_READ_ONLY
;
629 lun
->unpacked_lun
= 0;
630 lun
->lun_status
= TRANSPORT_LUN_STATUS_FREE
;
631 atomic_set(&lun
->lun_acl_count
, 0);
632 init_completion(&lun
->lun_shutdown_comp
);
633 INIT_LIST_HEAD(&lun
->lun_acl_list
);
634 INIT_LIST_HEAD(&lun
->lun_cmd_list
);
635 spin_lock_init(&lun
->lun_acl_lock
);
636 spin_lock_init(&lun
->lun_cmd_lock
);
637 spin_lock_init(&lun
->lun_sep_lock
);
639 ret
= core_tpg_post_addlun(se_tpg
, lun
, lun_access
, dev
);
646 static void core_tpg_release_virtual_lun0(struct se_portal_group
*se_tpg
)
648 struct se_lun
*lun
= &se_tpg
->tpg_virt_lun0
;
650 core_tpg_post_dellun(se_tpg
, lun
);
653 int core_tpg_register(
654 struct target_core_fabric_ops
*tfo
,
655 struct se_wwn
*se_wwn
,
656 struct se_portal_group
*se_tpg
,
657 void *tpg_fabric_ptr
,
663 se_tpg
->tpg_lun_list
= array_zalloc(TRANSPORT_MAX_LUNS_PER_TPG
,
664 sizeof(struct se_lun
), GFP_KERNEL
);
665 if (!se_tpg
->tpg_lun_list
) {
666 pr_err("Unable to allocate struct se_portal_group->"
671 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
672 lun
= se_tpg
->tpg_lun_list
[i
];
673 lun
->unpacked_lun
= i
;
674 lun
->lun_link_magic
= SE_LUN_LINK_MAGIC
;
675 lun
->lun_status
= TRANSPORT_LUN_STATUS_FREE
;
676 atomic_set(&lun
->lun_acl_count
, 0);
677 init_completion(&lun
->lun_shutdown_comp
);
678 INIT_LIST_HEAD(&lun
->lun_acl_list
);
679 INIT_LIST_HEAD(&lun
->lun_cmd_list
);
680 spin_lock_init(&lun
->lun_acl_lock
);
681 spin_lock_init(&lun
->lun_cmd_lock
);
682 spin_lock_init(&lun
->lun_sep_lock
);
685 se_tpg
->se_tpg_type
= se_tpg_type
;
686 se_tpg
->se_tpg_fabric_ptr
= tpg_fabric_ptr
;
687 se_tpg
->se_tpg_tfo
= tfo
;
688 se_tpg
->se_tpg_wwn
= se_wwn
;
689 atomic_set(&se_tpg
->tpg_pr_ref_count
, 0);
690 INIT_LIST_HEAD(&se_tpg
->acl_node_list
);
691 INIT_LIST_HEAD(&se_tpg
->se_tpg_node
);
692 INIT_LIST_HEAD(&se_tpg
->tpg_sess_list
);
693 spin_lock_init(&se_tpg
->acl_node_lock
);
694 spin_lock_init(&se_tpg
->session_lock
);
695 spin_lock_init(&se_tpg
->tpg_lun_lock
);
697 if (se_tpg
->se_tpg_type
== TRANSPORT_TPG_TYPE_NORMAL
) {
698 if (core_tpg_setup_virtual_lun0(se_tpg
) < 0) {
704 spin_lock_bh(&tpg_lock
);
705 list_add_tail(&se_tpg
->se_tpg_node
, &tpg_list
);
706 spin_unlock_bh(&tpg_lock
);
708 pr_debug("TARGET_CORE[%s]: Allocated %s struct se_portal_group for"
709 " endpoint: %s, Portal Tag: %u\n", tfo
->get_fabric_name(),
710 (se_tpg
->se_tpg_type
== TRANSPORT_TPG_TYPE_NORMAL
) ?
711 "Normal" : "Discovery", (tfo
->tpg_get_wwn(se_tpg
) == NULL
) ?
712 "None" : tfo
->tpg_get_wwn(se_tpg
), tfo
->tpg_get_tag(se_tpg
));
716 EXPORT_SYMBOL(core_tpg_register
);
718 int core_tpg_deregister(struct se_portal_group
*se_tpg
)
720 struct se_node_acl
*nacl
, *nacl_tmp
;
722 pr_debug("TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
723 " for endpoint: %s Portal Tag %u\n",
724 (se_tpg
->se_tpg_type
== TRANSPORT_TPG_TYPE_NORMAL
) ?
725 "Normal" : "Discovery", se_tpg
->se_tpg_tfo
->get_fabric_name(),
726 se_tpg
->se_tpg_tfo
->tpg_get_wwn(se_tpg
),
727 se_tpg
->se_tpg_tfo
->tpg_get_tag(se_tpg
));
729 spin_lock_bh(&tpg_lock
);
730 list_del(&se_tpg
->se_tpg_node
);
731 spin_unlock_bh(&tpg_lock
);
733 while (atomic_read(&se_tpg
->tpg_pr_ref_count
) != 0)
736 * Release any remaining demo-mode generated se_node_acl that have
737 * not been released because of TFO->tpg_check_demo_mode_cache() == 1
738 * in transport_deregister_session().
740 spin_lock_irq(&se_tpg
->acl_node_lock
);
741 list_for_each_entry_safe(nacl
, nacl_tmp
, &se_tpg
->acl_node_list
,
743 list_del(&nacl
->acl_list
);
744 se_tpg
->num_node_acls
--;
745 spin_unlock_irq(&se_tpg
->acl_node_lock
);
747 core_tpg_wait_for_nacl_pr_ref(nacl
);
748 core_free_device_list_for_node(nacl
, se_tpg
);
749 se_tpg
->se_tpg_tfo
->tpg_release_fabric_acl(se_tpg
, nacl
);
751 spin_lock_irq(&se_tpg
->acl_node_lock
);
753 spin_unlock_irq(&se_tpg
->acl_node_lock
);
755 if (se_tpg
->se_tpg_type
== TRANSPORT_TPG_TYPE_NORMAL
)
756 core_tpg_release_virtual_lun0(se_tpg
);
758 se_tpg
->se_tpg_fabric_ptr
= NULL
;
759 array_free(se_tpg
->tpg_lun_list
, TRANSPORT_MAX_LUNS_PER_TPG
);
762 EXPORT_SYMBOL(core_tpg_deregister
);
764 struct se_lun
*core_tpg_pre_addlun(
765 struct se_portal_group
*tpg
,
770 if (unpacked_lun
> (TRANSPORT_MAX_LUNS_PER_TPG
-1)) {
771 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
772 "-1: %u for Target Portal Group: %u\n",
773 tpg
->se_tpg_tfo
->get_fabric_name(),
774 unpacked_lun
, TRANSPORT_MAX_LUNS_PER_TPG
-1,
775 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
776 return ERR_PTR(-EOVERFLOW
);
779 spin_lock(&tpg
->tpg_lun_lock
);
780 lun
= tpg
->tpg_lun_list
[unpacked_lun
];
781 if (lun
->lun_status
== TRANSPORT_LUN_STATUS_ACTIVE
) {
782 pr_err("TPG Logical Unit Number: %u is already active"
783 " on %s Target Portal Group: %u, ignoring request.\n",
784 unpacked_lun
, tpg
->se_tpg_tfo
->get_fabric_name(),
785 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
786 spin_unlock(&tpg
->tpg_lun_lock
);
787 return ERR_PTR(-EINVAL
);
789 spin_unlock(&tpg
->tpg_lun_lock
);
794 int core_tpg_post_addlun(
795 struct se_portal_group
*tpg
,
802 ret
= core_dev_export(lun_ptr
, tpg
, lun
);
806 spin_lock(&tpg
->tpg_lun_lock
);
807 lun
->lun_access
= lun_access
;
808 lun
->lun_status
= TRANSPORT_LUN_STATUS_ACTIVE
;
809 spin_unlock(&tpg
->tpg_lun_lock
);
814 static void core_tpg_shutdown_lun(
815 struct se_portal_group
*tpg
,
818 core_clear_lun_from_tpg(lun
, tpg
);
819 transport_clear_lun_from_sessions(lun
);
822 struct se_lun
*core_tpg_pre_dellun(
823 struct se_portal_group
*tpg
,
828 if (unpacked_lun
> (TRANSPORT_MAX_LUNS_PER_TPG
-1)) {
829 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
830 "-1: %u for Target Portal Group: %u\n",
831 tpg
->se_tpg_tfo
->get_fabric_name(), unpacked_lun
,
832 TRANSPORT_MAX_LUNS_PER_TPG
-1,
833 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
834 return ERR_PTR(-EOVERFLOW
);
837 spin_lock(&tpg
->tpg_lun_lock
);
838 lun
= tpg
->tpg_lun_list
[unpacked_lun
];
839 if (lun
->lun_status
!= TRANSPORT_LUN_STATUS_ACTIVE
) {
840 pr_err("%s Logical Unit Number: %u is not active on"
841 " Target Portal Group: %u, ignoring request.\n",
842 tpg
->se_tpg_tfo
->get_fabric_name(), unpacked_lun
,
843 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
844 spin_unlock(&tpg
->tpg_lun_lock
);
845 return ERR_PTR(-ENODEV
);
847 spin_unlock(&tpg
->tpg_lun_lock
);
852 int core_tpg_post_dellun(
853 struct se_portal_group
*tpg
,
856 core_tpg_shutdown_lun(tpg
, lun
);
858 core_dev_unexport(lun
->lun_se_dev
, tpg
, lun
);
860 spin_lock(&tpg
->tpg_lun_lock
);
861 lun
->lun_status
= TRANSPORT_LUN_STATUS_FREE
;
862 spin_unlock(&tpg
->tpg_lun_lock
);