1 /*******************************************************************************
2 * Filename: target_core_tpg.c
4 * This file contains generic Target Portal Group related functions.
6 * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
7 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
8 * Copyright (c) 2007-2010 Rising Tide Systems
9 * Copyright (c) 2008-2010 Linux-iSCSI.org
11 * Nicholas A. Bellinger <nab@kernel.org>
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
27 ******************************************************************************/
29 #include <linux/net.h>
30 #include <linux/string.h>
31 #include <linux/timer.h>
32 #include <linux/slab.h>
33 #include <linux/spinlock.h>
35 #include <linux/export.h>
38 #include <scsi/scsi.h>
39 #include <scsi/scsi_cmnd.h>
41 #include <target/target_core_base.h>
42 #include <target/target_core_backend.h>
43 #include <target/target_core_fabric.h>
45 #include "target_core_internal.h"
47 extern struct se_device
*g_lun0_dev
;
49 static DEFINE_SPINLOCK(tpg_lock
);
50 static LIST_HEAD(tpg_list
);
52 /* core_clear_initiator_node_from_tpg():
56 static void core_clear_initiator_node_from_tpg(
57 struct se_node_acl
*nacl
,
58 struct se_portal_group
*tpg
)
61 struct se_dev_entry
*deve
;
64 spin_lock_irq(&nacl
->device_list_lock
);
65 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
66 deve
= &nacl
->device_list
[i
];
68 if (!(deve
->lun_flags
& TRANSPORT_LUNFLAGS_INITIATOR_ACCESS
))
72 pr_err("%s device entries device pointer is"
73 " NULL, but Initiator has access.\n",
74 tpg
->se_tpg_tfo
->get_fabric_name());
79 spin_unlock_irq(&nacl
->device_list_lock
);
80 core_update_device_list_for_node(lun
, NULL
, deve
->mapped_lun
,
81 TRANSPORT_LUNFLAGS_NO_ACCESS
, nacl
, tpg
, 0);
83 spin_lock_irq(&nacl
->device_list_lock
);
85 spin_unlock_irq(&nacl
->device_list_lock
);
88 /* __core_tpg_get_initiator_node_acl():
90 * spin_lock_bh(&tpg->acl_node_lock); must be held when calling
92 struct se_node_acl
*__core_tpg_get_initiator_node_acl(
93 struct se_portal_group
*tpg
,
94 const char *initiatorname
)
96 struct se_node_acl
*acl
;
98 list_for_each_entry(acl
, &tpg
->acl_node_list
, acl_list
) {
99 if (!strcmp(acl
->initiatorname
, initiatorname
))
106 /* core_tpg_get_initiator_node_acl():
110 struct se_node_acl
*core_tpg_get_initiator_node_acl(
111 struct se_portal_group
*tpg
,
112 unsigned char *initiatorname
)
114 struct se_node_acl
*acl
;
116 spin_lock_irq(&tpg
->acl_node_lock
);
117 list_for_each_entry(acl
, &tpg
->acl_node_list
, acl_list
) {
118 if (!strcmp(acl
->initiatorname
, initiatorname
) &&
119 !acl
->dynamic_node_acl
) {
120 spin_unlock_irq(&tpg
->acl_node_lock
);
124 spin_unlock_irq(&tpg
->acl_node_lock
);
129 /* core_tpg_add_node_to_devs():
133 void core_tpg_add_node_to_devs(
134 struct se_node_acl
*acl
,
135 struct se_portal_group
*tpg
)
140 struct se_device
*dev
;
142 spin_lock(&tpg
->tpg_lun_lock
);
143 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
144 lun
= &tpg
->tpg_lun_list
[i
];
145 if (lun
->lun_status
!= TRANSPORT_LUN_STATUS_ACTIVE
)
148 spin_unlock(&tpg
->tpg_lun_lock
);
150 dev
= lun
->lun_se_dev
;
152 * By default in LIO-Target $FABRIC_MOD,
153 * demo_mode_write_protect is ON, or READ_ONLY;
155 if (!tpg
->se_tpg_tfo
->tpg_check_demo_mode_write_protect(tpg
)) {
156 if (dev
->dev_flags
& DF_READ_ONLY
)
157 lun_access
= TRANSPORT_LUNFLAGS_READ_ONLY
;
159 lun_access
= TRANSPORT_LUNFLAGS_READ_WRITE
;
162 * Allow only optical drives to issue R/W in default RO
165 if (dev
->transport
->get_device_type(dev
) == TYPE_DISK
)
166 lun_access
= TRANSPORT_LUNFLAGS_READ_ONLY
;
168 lun_access
= TRANSPORT_LUNFLAGS_READ_WRITE
;
171 pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s"
172 " access for LUN in Demo Mode\n",
173 tpg
->se_tpg_tfo
->get_fabric_name(),
174 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), lun
->unpacked_lun
,
175 (lun_access
== TRANSPORT_LUNFLAGS_READ_WRITE
) ?
176 "READ-WRITE" : "READ-ONLY");
178 core_update_device_list_for_node(lun
, NULL
, lun
->unpacked_lun
,
179 lun_access
, acl
, tpg
, 1);
180 spin_lock(&tpg
->tpg_lun_lock
);
182 spin_unlock(&tpg
->tpg_lun_lock
);
185 /* core_set_queue_depth_for_node():
189 static int core_set_queue_depth_for_node(
190 struct se_portal_group
*tpg
,
191 struct se_node_acl
*acl
)
193 if (!acl
->queue_depth
) {
194 pr_err("Queue depth for %s Initiator Node: %s is 0,"
195 "defaulting to 1.\n", tpg
->se_tpg_tfo
->get_fabric_name(),
197 acl
->queue_depth
= 1;
203 /* core_create_device_list_for_node():
207 static int core_create_device_list_for_node(struct se_node_acl
*nacl
)
209 struct se_dev_entry
*deve
;
212 nacl
->device_list
= kzalloc(sizeof(struct se_dev_entry
) *
213 TRANSPORT_MAX_LUNS_PER_TPG
, GFP_KERNEL
);
214 if (!nacl
->device_list
) {
215 pr_err("Unable to allocate memory for"
216 " struct se_node_acl->device_list\n");
219 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
220 deve
= &nacl
->device_list
[i
];
222 atomic_set(&deve
->ua_count
, 0);
223 atomic_set(&deve
->pr_ref_count
, 0);
224 spin_lock_init(&deve
->ua_lock
);
225 INIT_LIST_HEAD(&deve
->alua_port_list
);
226 INIT_LIST_HEAD(&deve
->ua_list
);
232 /* core_tpg_check_initiator_node_acl()
236 struct se_node_acl
*core_tpg_check_initiator_node_acl(
237 struct se_portal_group
*tpg
,
238 unsigned char *initiatorname
)
240 struct se_node_acl
*acl
;
242 acl
= core_tpg_get_initiator_node_acl(tpg
, initiatorname
);
246 if (!tpg
->se_tpg_tfo
->tpg_check_demo_mode(tpg
))
249 acl
= tpg
->se_tpg_tfo
->tpg_alloc_fabric_acl(tpg
);
253 INIT_LIST_HEAD(&acl
->acl_list
);
254 INIT_LIST_HEAD(&acl
->acl_sess_list
);
255 spin_lock_init(&acl
->device_list_lock
);
256 spin_lock_init(&acl
->nacl_sess_lock
);
257 atomic_set(&acl
->acl_pr_ref_count
, 0);
258 acl
->queue_depth
= tpg
->se_tpg_tfo
->tpg_get_default_depth(tpg
);
259 snprintf(acl
->initiatorname
, TRANSPORT_IQN_LEN
, "%s", initiatorname
);
261 acl
->acl_index
= scsi_get_new_index(SCSI_AUTH_INTR_INDEX
);
262 spin_lock_init(&acl
->stats_lock
);
263 acl
->dynamic_node_acl
= 1;
265 tpg
->se_tpg_tfo
->set_default_node_attributes(acl
);
267 if (core_create_device_list_for_node(acl
) < 0) {
268 tpg
->se_tpg_tfo
->tpg_release_fabric_acl(tpg
, acl
);
272 if (core_set_queue_depth_for_node(tpg
, acl
) < 0) {
273 core_free_device_list_for_node(acl
, tpg
);
274 tpg
->se_tpg_tfo
->tpg_release_fabric_acl(tpg
, acl
);
278 * Here we only create demo-mode MappedLUNs from the active
279 * TPG LUNs if the fabric is not explictly asking for
280 * tpg_check_demo_mode_login_only() == 1.
282 if ((tpg
->se_tpg_tfo
->tpg_check_demo_mode_login_only
!= NULL
) &&
283 (tpg
->se_tpg_tfo
->tpg_check_demo_mode_login_only(tpg
) == 1))
286 core_tpg_add_node_to_devs(acl
, tpg
);
288 spin_lock_irq(&tpg
->acl_node_lock
);
289 list_add_tail(&acl
->acl_list
, &tpg
->acl_node_list
);
290 tpg
->num_node_acls
++;
291 spin_unlock_irq(&tpg
->acl_node_lock
);
293 pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
294 " Initiator Node: %s\n", tpg
->se_tpg_tfo
->get_fabric_name(),
295 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), acl
->queue_depth
,
296 tpg
->se_tpg_tfo
->get_fabric_name(), initiatorname
);
300 EXPORT_SYMBOL(core_tpg_check_initiator_node_acl
);
302 void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl
*nacl
)
304 while (atomic_read(&nacl
->acl_pr_ref_count
) != 0)
308 void core_tpg_clear_object_luns(struct se_portal_group
*tpg
)
313 spin_lock(&tpg
->tpg_lun_lock
);
314 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
315 lun
= &tpg
->tpg_lun_list
[i
];
317 if ((lun
->lun_status
!= TRANSPORT_LUN_STATUS_ACTIVE
) ||
318 (lun
->lun_se_dev
== NULL
))
321 spin_unlock(&tpg
->tpg_lun_lock
);
322 ret
= core_dev_del_lun(tpg
, lun
->unpacked_lun
);
323 spin_lock(&tpg
->tpg_lun_lock
);
325 spin_unlock(&tpg
->tpg_lun_lock
);
327 EXPORT_SYMBOL(core_tpg_clear_object_luns
);
329 /* core_tpg_add_initiator_node_acl():
333 struct se_node_acl
*core_tpg_add_initiator_node_acl(
334 struct se_portal_group
*tpg
,
335 struct se_node_acl
*se_nacl
,
336 const char *initiatorname
,
339 struct se_node_acl
*acl
= NULL
;
341 spin_lock_irq(&tpg
->acl_node_lock
);
342 acl
= __core_tpg_get_initiator_node_acl(tpg
, initiatorname
);
344 if (acl
->dynamic_node_acl
) {
345 acl
->dynamic_node_acl
= 0;
346 pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
347 " for %s\n", tpg
->se_tpg_tfo
->get_fabric_name(),
348 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), initiatorname
);
349 spin_unlock_irq(&tpg
->acl_node_lock
);
351 * Release the locally allocated struct se_node_acl
352 * because * core_tpg_add_initiator_node_acl() returned
353 * a pointer to an existing demo mode node ACL.
356 tpg
->se_tpg_tfo
->tpg_release_fabric_acl(tpg
,
361 pr_err("ACL entry for %s Initiator"
362 " Node %s already exists for TPG %u, ignoring"
363 " request.\n", tpg
->se_tpg_tfo
->get_fabric_name(),
364 initiatorname
, tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
365 spin_unlock_irq(&tpg
->acl_node_lock
);
366 return ERR_PTR(-EEXIST
);
368 spin_unlock_irq(&tpg
->acl_node_lock
);
371 pr_err("struct se_node_acl pointer is NULL\n");
372 return ERR_PTR(-EINVAL
);
375 * For v4.x logic the se_node_acl_s is hanging off a fabric
376 * dependent structure allocated via
377 * struct target_core_fabric_ops->fabric_make_nodeacl()
381 INIT_LIST_HEAD(&acl
->acl_list
);
382 INIT_LIST_HEAD(&acl
->acl_sess_list
);
383 spin_lock_init(&acl
->device_list_lock
);
384 spin_lock_init(&acl
->nacl_sess_lock
);
385 atomic_set(&acl
->acl_pr_ref_count
, 0);
386 acl
->queue_depth
= queue_depth
;
387 snprintf(acl
->initiatorname
, TRANSPORT_IQN_LEN
, "%s", initiatorname
);
389 acl
->acl_index
= scsi_get_new_index(SCSI_AUTH_INTR_INDEX
);
390 spin_lock_init(&acl
->stats_lock
);
392 tpg
->se_tpg_tfo
->set_default_node_attributes(acl
);
394 if (core_create_device_list_for_node(acl
) < 0) {
395 tpg
->se_tpg_tfo
->tpg_release_fabric_acl(tpg
, acl
);
396 return ERR_PTR(-ENOMEM
);
399 if (core_set_queue_depth_for_node(tpg
, acl
) < 0) {
400 core_free_device_list_for_node(acl
, tpg
);
401 tpg
->se_tpg_tfo
->tpg_release_fabric_acl(tpg
, acl
);
402 return ERR_PTR(-EINVAL
);
405 spin_lock_irq(&tpg
->acl_node_lock
);
406 list_add_tail(&acl
->acl_list
, &tpg
->acl_node_list
);
407 tpg
->num_node_acls
++;
408 spin_unlock_irq(&tpg
->acl_node_lock
);
411 pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
412 " Initiator Node: %s\n", tpg
->se_tpg_tfo
->get_fabric_name(),
413 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), acl
->queue_depth
,
414 tpg
->se_tpg_tfo
->get_fabric_name(), initiatorname
);
418 EXPORT_SYMBOL(core_tpg_add_initiator_node_acl
);
420 /* core_tpg_del_initiator_node_acl():
424 int core_tpg_del_initiator_node_acl(
425 struct se_portal_group
*tpg
,
426 struct se_node_acl
*acl
,
429 struct se_session
*sess
, *sess_tmp
;
432 spin_lock_irq(&tpg
->acl_node_lock
);
433 if (acl
->dynamic_node_acl
) {
434 acl
->dynamic_node_acl
= 0;
437 list_del(&acl
->acl_list
);
438 tpg
->num_node_acls
--;
439 spin_unlock_irq(&tpg
->acl_node_lock
);
441 spin_lock_bh(&tpg
->session_lock
);
442 list_for_each_entry_safe(sess
, sess_tmp
,
443 &tpg
->tpg_sess_list
, sess_list
) {
444 if (sess
->se_node_acl
!= acl
)
447 * Determine if the session needs to be closed by our context.
449 if (!tpg
->se_tpg_tfo
->shutdown_session(sess
))
452 spin_unlock_bh(&tpg
->session_lock
);
454 * If the $FABRIC_MOD session for the Initiator Node ACL exists,
455 * forcefully shutdown the $FABRIC_MOD session/nexus.
457 tpg
->se_tpg_tfo
->close_session(sess
);
459 spin_lock_bh(&tpg
->session_lock
);
461 spin_unlock_bh(&tpg
->session_lock
);
463 core_tpg_wait_for_nacl_pr_ref(acl
);
464 core_clear_initiator_node_from_tpg(acl
, tpg
);
465 core_free_device_list_for_node(acl
, tpg
);
467 pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
468 " Initiator Node: %s\n", tpg
->se_tpg_tfo
->get_fabric_name(),
469 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), acl
->queue_depth
,
470 tpg
->se_tpg_tfo
->get_fabric_name(), acl
->initiatorname
);
474 EXPORT_SYMBOL(core_tpg_del_initiator_node_acl
);
476 /* core_tpg_set_initiator_node_queue_depth():
480 int core_tpg_set_initiator_node_queue_depth(
481 struct se_portal_group
*tpg
,
482 unsigned char *initiatorname
,
486 struct se_session
*sess
, *init_sess
= NULL
;
487 struct se_node_acl
*acl
;
490 spin_lock_irq(&tpg
->acl_node_lock
);
491 acl
= __core_tpg_get_initiator_node_acl(tpg
, initiatorname
);
493 pr_err("Access Control List entry for %s Initiator"
494 " Node %s does not exists for TPG %hu, ignoring"
495 " request.\n", tpg
->se_tpg_tfo
->get_fabric_name(),
496 initiatorname
, tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
497 spin_unlock_irq(&tpg
->acl_node_lock
);
500 if (acl
->dynamic_node_acl
) {
501 acl
->dynamic_node_acl
= 0;
504 spin_unlock_irq(&tpg
->acl_node_lock
);
506 spin_lock_bh(&tpg
->session_lock
);
507 list_for_each_entry(sess
, &tpg
->tpg_sess_list
, sess_list
) {
508 if (sess
->se_node_acl
!= acl
)
512 pr_err("Unable to change queue depth for %s"
513 " Initiator Node: %s while session is"
514 " operational. To forcefully change the queue"
515 " depth and force session reinstatement"
516 " use the \"force=1\" parameter.\n",
517 tpg
->se_tpg_tfo
->get_fabric_name(), initiatorname
);
518 spin_unlock_bh(&tpg
->session_lock
);
520 spin_lock_irq(&tpg
->acl_node_lock
);
522 acl
->dynamic_node_acl
= 1;
523 spin_unlock_irq(&tpg
->acl_node_lock
);
527 * Determine if the session needs to be closed by our context.
529 if (!tpg
->se_tpg_tfo
->shutdown_session(sess
))
537 * User has requested to change the queue depth for a Initiator Node.
538 * Change the value in the Node's struct se_node_acl, and call
539 * core_set_queue_depth_for_node() to add the requested queue depth.
541 * Finally call tpg->se_tpg_tfo->close_session() to force session
542 * reinstatement to occur if there is an active session for the
543 * $FABRIC_MOD Initiator Node in question.
545 acl
->queue_depth
= queue_depth
;
547 if (core_set_queue_depth_for_node(tpg
, acl
) < 0) {
548 spin_unlock_bh(&tpg
->session_lock
);
550 * Force session reinstatement if
551 * core_set_queue_depth_for_node() failed, because we assume
552 * the $FABRIC_MOD has already the set session reinstatement
553 * bit from tpg->se_tpg_tfo->shutdown_session() called above.
556 tpg
->se_tpg_tfo
->close_session(init_sess
);
558 spin_lock_irq(&tpg
->acl_node_lock
);
560 acl
->dynamic_node_acl
= 1;
561 spin_unlock_irq(&tpg
->acl_node_lock
);
564 spin_unlock_bh(&tpg
->session_lock
);
566 * If the $FABRIC_MOD session for the Initiator Node ACL exists,
567 * forcefully shutdown the $FABRIC_MOD session/nexus.
570 tpg
->se_tpg_tfo
->close_session(init_sess
);
572 pr_debug("Successfully changed queue depth to: %d for Initiator"
573 " Node: %s on %s Target Portal Group: %u\n", queue_depth
,
574 initiatorname
, tpg
->se_tpg_tfo
->get_fabric_name(),
575 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
577 spin_lock_irq(&tpg
->acl_node_lock
);
579 acl
->dynamic_node_acl
= 1;
580 spin_unlock_irq(&tpg
->acl_node_lock
);
584 EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth
);
586 static int core_tpg_setup_virtual_lun0(struct se_portal_group
*se_tpg
)
588 /* Set in core_dev_setup_virtual_lun0() */
589 struct se_device
*dev
= g_lun0_dev
;
590 struct se_lun
*lun
= &se_tpg
->tpg_virt_lun0
;
591 u32 lun_access
= TRANSPORT_LUNFLAGS_READ_ONLY
;
594 lun
->unpacked_lun
= 0;
595 lun
->lun_status
= TRANSPORT_LUN_STATUS_FREE
;
596 atomic_set(&lun
->lun_acl_count
, 0);
597 init_completion(&lun
->lun_shutdown_comp
);
598 INIT_LIST_HEAD(&lun
->lun_acl_list
);
599 INIT_LIST_HEAD(&lun
->lun_cmd_list
);
600 spin_lock_init(&lun
->lun_acl_lock
);
601 spin_lock_init(&lun
->lun_cmd_lock
);
602 spin_lock_init(&lun
->lun_sep_lock
);
604 ret
= core_tpg_post_addlun(se_tpg
, lun
, lun_access
, dev
);
611 static void core_tpg_release_virtual_lun0(struct se_portal_group
*se_tpg
)
613 struct se_lun
*lun
= &se_tpg
->tpg_virt_lun0
;
615 core_tpg_post_dellun(se_tpg
, lun
);
618 int core_tpg_register(
619 struct target_core_fabric_ops
*tfo
,
620 struct se_wwn
*se_wwn
,
621 struct se_portal_group
*se_tpg
,
622 void *tpg_fabric_ptr
,
628 se_tpg
->tpg_lun_list
= kzalloc((sizeof(struct se_lun
) *
629 TRANSPORT_MAX_LUNS_PER_TPG
), GFP_KERNEL
);
630 if (!se_tpg
->tpg_lun_list
) {
631 pr_err("Unable to allocate struct se_portal_group->"
636 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
637 lun
= &se_tpg
->tpg_lun_list
[i
];
638 lun
->unpacked_lun
= i
;
639 lun
->lun_status
= TRANSPORT_LUN_STATUS_FREE
;
640 atomic_set(&lun
->lun_acl_count
, 0);
641 init_completion(&lun
->lun_shutdown_comp
);
642 INIT_LIST_HEAD(&lun
->lun_acl_list
);
643 INIT_LIST_HEAD(&lun
->lun_cmd_list
);
644 spin_lock_init(&lun
->lun_acl_lock
);
645 spin_lock_init(&lun
->lun_cmd_lock
);
646 spin_lock_init(&lun
->lun_sep_lock
);
649 se_tpg
->se_tpg_type
= se_tpg_type
;
650 se_tpg
->se_tpg_fabric_ptr
= tpg_fabric_ptr
;
651 se_tpg
->se_tpg_tfo
= tfo
;
652 se_tpg
->se_tpg_wwn
= se_wwn
;
653 atomic_set(&se_tpg
->tpg_pr_ref_count
, 0);
654 INIT_LIST_HEAD(&se_tpg
->acl_node_list
);
655 INIT_LIST_HEAD(&se_tpg
->se_tpg_node
);
656 INIT_LIST_HEAD(&se_tpg
->tpg_sess_list
);
657 spin_lock_init(&se_tpg
->acl_node_lock
);
658 spin_lock_init(&se_tpg
->session_lock
);
659 spin_lock_init(&se_tpg
->tpg_lun_lock
);
661 if (se_tpg
->se_tpg_type
== TRANSPORT_TPG_TYPE_NORMAL
) {
662 if (core_tpg_setup_virtual_lun0(se_tpg
) < 0) {
668 spin_lock_bh(&tpg_lock
);
669 list_add_tail(&se_tpg
->se_tpg_node
, &tpg_list
);
670 spin_unlock_bh(&tpg_lock
);
672 pr_debug("TARGET_CORE[%s]: Allocated %s struct se_portal_group for"
673 " endpoint: %s, Portal Tag: %u\n", tfo
->get_fabric_name(),
674 (se_tpg
->se_tpg_type
== TRANSPORT_TPG_TYPE_NORMAL
) ?
675 "Normal" : "Discovery", (tfo
->tpg_get_wwn(se_tpg
) == NULL
) ?
676 "None" : tfo
->tpg_get_wwn(se_tpg
), tfo
->tpg_get_tag(se_tpg
));
680 EXPORT_SYMBOL(core_tpg_register
);
682 int core_tpg_deregister(struct se_portal_group
*se_tpg
)
684 struct se_node_acl
*nacl
, *nacl_tmp
;
686 pr_debug("TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
687 " for endpoint: %s Portal Tag %u\n",
688 (se_tpg
->se_tpg_type
== TRANSPORT_TPG_TYPE_NORMAL
) ?
689 "Normal" : "Discovery", se_tpg
->se_tpg_tfo
->get_fabric_name(),
690 se_tpg
->se_tpg_tfo
->tpg_get_wwn(se_tpg
),
691 se_tpg
->se_tpg_tfo
->tpg_get_tag(se_tpg
));
693 spin_lock_bh(&tpg_lock
);
694 list_del(&se_tpg
->se_tpg_node
);
695 spin_unlock_bh(&tpg_lock
);
697 while (atomic_read(&se_tpg
->tpg_pr_ref_count
) != 0)
700 * Release any remaining demo-mode generated se_node_acl that have
701 * not been released because of TFO->tpg_check_demo_mode_cache() == 1
702 * in transport_deregister_session().
704 spin_lock_irq(&se_tpg
->acl_node_lock
);
705 list_for_each_entry_safe(nacl
, nacl_tmp
, &se_tpg
->acl_node_list
,
707 list_del(&nacl
->acl_list
);
708 se_tpg
->num_node_acls
--;
709 spin_unlock_irq(&se_tpg
->acl_node_lock
);
711 core_tpg_wait_for_nacl_pr_ref(nacl
);
712 core_free_device_list_for_node(nacl
, se_tpg
);
713 se_tpg
->se_tpg_tfo
->tpg_release_fabric_acl(se_tpg
, nacl
);
715 spin_lock_irq(&se_tpg
->acl_node_lock
);
717 spin_unlock_irq(&se_tpg
->acl_node_lock
);
719 if (se_tpg
->se_tpg_type
== TRANSPORT_TPG_TYPE_NORMAL
)
720 core_tpg_release_virtual_lun0(se_tpg
);
722 se_tpg
->se_tpg_fabric_ptr
= NULL
;
723 kfree(se_tpg
->tpg_lun_list
);
726 EXPORT_SYMBOL(core_tpg_deregister
);
728 struct se_lun
*core_tpg_pre_addlun(
729 struct se_portal_group
*tpg
,
734 if (unpacked_lun
> (TRANSPORT_MAX_LUNS_PER_TPG
-1)) {
735 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
736 "-1: %u for Target Portal Group: %u\n",
737 tpg
->se_tpg_tfo
->get_fabric_name(),
738 unpacked_lun
, TRANSPORT_MAX_LUNS_PER_TPG
-1,
739 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
740 return ERR_PTR(-EOVERFLOW
);
743 spin_lock(&tpg
->tpg_lun_lock
);
744 lun
= &tpg
->tpg_lun_list
[unpacked_lun
];
745 if (lun
->lun_status
== TRANSPORT_LUN_STATUS_ACTIVE
) {
746 pr_err("TPG Logical Unit Number: %u is already active"
747 " on %s Target Portal Group: %u, ignoring request.\n",
748 unpacked_lun
, tpg
->se_tpg_tfo
->get_fabric_name(),
749 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
750 spin_unlock(&tpg
->tpg_lun_lock
);
751 return ERR_PTR(-EINVAL
);
753 spin_unlock(&tpg
->tpg_lun_lock
);
758 int core_tpg_post_addlun(
759 struct se_portal_group
*tpg
,
766 ret
= core_dev_export(lun_ptr
, tpg
, lun
);
770 spin_lock(&tpg
->tpg_lun_lock
);
771 lun
->lun_access
= lun_access
;
772 lun
->lun_status
= TRANSPORT_LUN_STATUS_ACTIVE
;
773 spin_unlock(&tpg
->tpg_lun_lock
);
778 static void core_tpg_shutdown_lun(
779 struct se_portal_group
*tpg
,
782 core_clear_lun_from_tpg(lun
, tpg
);
783 transport_clear_lun_from_sessions(lun
);
786 struct se_lun
*core_tpg_pre_dellun(
787 struct se_portal_group
*tpg
,
792 if (unpacked_lun
> (TRANSPORT_MAX_LUNS_PER_TPG
-1)) {
793 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
794 "-1: %u for Target Portal Group: %u\n",
795 tpg
->se_tpg_tfo
->get_fabric_name(), unpacked_lun
,
796 TRANSPORT_MAX_LUNS_PER_TPG
-1,
797 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
798 return ERR_PTR(-EOVERFLOW
);
801 spin_lock(&tpg
->tpg_lun_lock
);
802 lun
= &tpg
->tpg_lun_list
[unpacked_lun
];
803 if (lun
->lun_status
!= TRANSPORT_LUN_STATUS_ACTIVE
) {
804 pr_err("%s Logical Unit Number: %u is not active on"
805 " Target Portal Group: %u, ignoring request.\n",
806 tpg
->se_tpg_tfo
->get_fabric_name(), unpacked_lun
,
807 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
808 spin_unlock(&tpg
->tpg_lun_lock
);
809 return ERR_PTR(-ENODEV
);
811 spin_unlock(&tpg
->tpg_lun_lock
);
816 int core_tpg_post_dellun(
817 struct se_portal_group
*tpg
,
820 core_tpg_shutdown_lun(tpg
, lun
);
822 core_dev_unexport(lun
->lun_se_dev
, tpg
, lun
);
824 spin_lock(&tpg
->tpg_lun_lock
);
825 lun
->lun_status
= TRANSPORT_LUN_STATUS_FREE
;
826 spin_unlock(&tpg
->tpg_lun_lock
);