1 /*******************************************************************************
2 * Filename: target_core_tpg.c
4 * This file contains generic Target Portal Group related functions.
6 * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
7 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
8 * Copyright (c) 2007-2010 Rising Tide Systems
9 * Copyright (c) 2008-2010 Linux-iSCSI.org
11 * Nicholas A. Bellinger <nab@kernel.org>
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
27 ******************************************************************************/
29 #include <linux/net.h>
30 #include <linux/string.h>
31 #include <linux/timer.h>
32 #include <linux/slab.h>
33 #include <linux/spinlock.h>
35 #include <linux/export.h>
38 #include <scsi/scsi.h>
39 #include <scsi/scsi_cmnd.h>
41 #include <target/target_core_base.h>
42 #include <target/target_core_backend.h>
43 #include <target/target_core_fabric.h>
45 #include "target_core_internal.h"
47 extern struct se_device
*g_lun0_dev
;
49 static DEFINE_SPINLOCK(tpg_lock
);
50 static LIST_HEAD(tpg_list
);
52 /* core_clear_initiator_node_from_tpg():
56 static void core_clear_initiator_node_from_tpg(
57 struct se_node_acl
*nacl
,
58 struct se_portal_group
*tpg
)
61 struct se_dev_entry
*deve
;
63 struct se_lun_acl
*acl
, *acl_tmp
;
65 spin_lock_irq(&nacl
->device_list_lock
);
66 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
67 deve
= &nacl
->device_list
[i
];
69 if (!(deve
->lun_flags
& TRANSPORT_LUNFLAGS_INITIATOR_ACCESS
))
73 pr_err("%s device entries device pointer is"
74 " NULL, but Initiator has access.\n",
75 tpg
->se_tpg_tfo
->get_fabric_name());
80 spin_unlock_irq(&nacl
->device_list_lock
);
81 core_update_device_list_for_node(lun
, NULL
, deve
->mapped_lun
,
82 TRANSPORT_LUNFLAGS_NO_ACCESS
, nacl
, tpg
, 0);
84 spin_lock(&lun
->lun_acl_lock
);
85 list_for_each_entry_safe(acl
, acl_tmp
,
86 &lun
->lun_acl_list
, lacl_list
) {
87 if (!strcmp(acl
->initiatorname
, nacl
->initiatorname
) &&
88 (acl
->mapped_lun
== deve
->mapped_lun
))
93 pr_err("Unable to locate struct se_lun_acl for %s,"
94 " mapped_lun: %u\n", nacl
->initiatorname
,
96 spin_unlock(&lun
->lun_acl_lock
);
97 spin_lock_irq(&nacl
->device_list_lock
);
101 list_del(&acl
->lacl_list
);
102 spin_unlock(&lun
->lun_acl_lock
);
104 spin_lock_irq(&nacl
->device_list_lock
);
107 spin_unlock_irq(&nacl
->device_list_lock
);
110 /* __core_tpg_get_initiator_node_acl():
112 * spin_lock_bh(&tpg->acl_node_lock); must be held when calling
114 struct se_node_acl
*__core_tpg_get_initiator_node_acl(
115 struct se_portal_group
*tpg
,
116 const char *initiatorname
)
118 struct se_node_acl
*acl
;
120 list_for_each_entry(acl
, &tpg
->acl_node_list
, acl_list
) {
121 if (!strcmp(acl
->initiatorname
, initiatorname
))
128 /* core_tpg_get_initiator_node_acl():
132 struct se_node_acl
*core_tpg_get_initiator_node_acl(
133 struct se_portal_group
*tpg
,
134 unsigned char *initiatorname
)
136 struct se_node_acl
*acl
;
138 spin_lock_irq(&tpg
->acl_node_lock
);
139 list_for_each_entry(acl
, &tpg
->acl_node_list
, acl_list
) {
140 if (!strcmp(acl
->initiatorname
, initiatorname
) &&
141 !acl
->dynamic_node_acl
) {
142 spin_unlock_irq(&tpg
->acl_node_lock
);
146 spin_unlock_irq(&tpg
->acl_node_lock
);
151 /* core_tpg_add_node_to_devs():
155 void core_tpg_add_node_to_devs(
156 struct se_node_acl
*acl
,
157 struct se_portal_group
*tpg
)
162 struct se_device
*dev
;
164 spin_lock(&tpg
->tpg_lun_lock
);
165 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
166 lun
= &tpg
->tpg_lun_list
[i
];
167 if (lun
->lun_status
!= TRANSPORT_LUN_STATUS_ACTIVE
)
170 spin_unlock(&tpg
->tpg_lun_lock
);
172 dev
= lun
->lun_se_dev
;
174 * By default in LIO-Target $FABRIC_MOD,
175 * demo_mode_write_protect is ON, or READ_ONLY;
177 if (!tpg
->se_tpg_tfo
->tpg_check_demo_mode_write_protect(tpg
)) {
178 if (dev
->dev_flags
& DF_READ_ONLY
)
179 lun_access
= TRANSPORT_LUNFLAGS_READ_ONLY
;
181 lun_access
= TRANSPORT_LUNFLAGS_READ_WRITE
;
184 * Allow only optical drives to issue R/W in default RO
187 if (dev
->transport
->get_device_type(dev
) == TYPE_DISK
)
188 lun_access
= TRANSPORT_LUNFLAGS_READ_ONLY
;
190 lun_access
= TRANSPORT_LUNFLAGS_READ_WRITE
;
193 pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s"
194 " access for LUN in Demo Mode\n",
195 tpg
->se_tpg_tfo
->get_fabric_name(),
196 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), lun
->unpacked_lun
,
197 (lun_access
== TRANSPORT_LUNFLAGS_READ_WRITE
) ?
198 "READ-WRITE" : "READ-ONLY");
200 core_update_device_list_for_node(lun
, NULL
, lun
->unpacked_lun
,
201 lun_access
, acl
, tpg
, 1);
202 spin_lock(&tpg
->tpg_lun_lock
);
204 spin_unlock(&tpg
->tpg_lun_lock
);
207 /* core_set_queue_depth_for_node():
211 static int core_set_queue_depth_for_node(
212 struct se_portal_group
*tpg
,
213 struct se_node_acl
*acl
)
215 if (!acl
->queue_depth
) {
216 pr_err("Queue depth for %s Initiator Node: %s is 0,"
217 "defaulting to 1.\n", tpg
->se_tpg_tfo
->get_fabric_name(),
219 acl
->queue_depth
= 1;
225 /* core_create_device_list_for_node():
229 static int core_create_device_list_for_node(struct se_node_acl
*nacl
)
231 struct se_dev_entry
*deve
;
234 nacl
->device_list
= kzalloc(sizeof(struct se_dev_entry
) *
235 TRANSPORT_MAX_LUNS_PER_TPG
, GFP_KERNEL
);
236 if (!nacl
->device_list
) {
237 pr_err("Unable to allocate memory for"
238 " struct se_node_acl->device_list\n");
241 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
242 deve
= &nacl
->device_list
[i
];
244 atomic_set(&deve
->ua_count
, 0);
245 atomic_set(&deve
->pr_ref_count
, 0);
246 spin_lock_init(&deve
->ua_lock
);
247 INIT_LIST_HEAD(&deve
->alua_port_list
);
248 INIT_LIST_HEAD(&deve
->ua_list
);
254 /* core_tpg_check_initiator_node_acl()
258 struct se_node_acl
*core_tpg_check_initiator_node_acl(
259 struct se_portal_group
*tpg
,
260 unsigned char *initiatorname
)
262 struct se_node_acl
*acl
;
264 acl
= core_tpg_get_initiator_node_acl(tpg
, initiatorname
);
268 if (!tpg
->se_tpg_tfo
->tpg_check_demo_mode(tpg
))
271 acl
= tpg
->se_tpg_tfo
->tpg_alloc_fabric_acl(tpg
);
275 INIT_LIST_HEAD(&acl
->acl_list
);
276 INIT_LIST_HEAD(&acl
->acl_sess_list
);
277 spin_lock_init(&acl
->device_list_lock
);
278 spin_lock_init(&acl
->nacl_sess_lock
);
279 atomic_set(&acl
->acl_pr_ref_count
, 0);
280 acl
->queue_depth
= tpg
->se_tpg_tfo
->tpg_get_default_depth(tpg
);
281 snprintf(acl
->initiatorname
, TRANSPORT_IQN_LEN
, "%s", initiatorname
);
283 acl
->acl_index
= scsi_get_new_index(SCSI_AUTH_INTR_INDEX
);
284 spin_lock_init(&acl
->stats_lock
);
285 acl
->dynamic_node_acl
= 1;
287 tpg
->se_tpg_tfo
->set_default_node_attributes(acl
);
289 if (core_create_device_list_for_node(acl
) < 0) {
290 tpg
->se_tpg_tfo
->tpg_release_fabric_acl(tpg
, acl
);
294 if (core_set_queue_depth_for_node(tpg
, acl
) < 0) {
295 core_free_device_list_for_node(acl
, tpg
);
296 tpg
->se_tpg_tfo
->tpg_release_fabric_acl(tpg
, acl
);
300 * Here we only create demo-mode MappedLUNs from the active
301 * TPG LUNs if the fabric is not explictly asking for
302 * tpg_check_demo_mode_login_only() == 1.
304 if ((tpg
->se_tpg_tfo
->tpg_check_demo_mode_login_only
!= NULL
) &&
305 (tpg
->se_tpg_tfo
->tpg_check_demo_mode_login_only(tpg
) == 1))
308 core_tpg_add_node_to_devs(acl
, tpg
);
310 spin_lock_irq(&tpg
->acl_node_lock
);
311 list_add_tail(&acl
->acl_list
, &tpg
->acl_node_list
);
312 tpg
->num_node_acls
++;
313 spin_unlock_irq(&tpg
->acl_node_lock
);
315 pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
316 " Initiator Node: %s\n", tpg
->se_tpg_tfo
->get_fabric_name(),
317 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), acl
->queue_depth
,
318 tpg
->se_tpg_tfo
->get_fabric_name(), initiatorname
);
322 EXPORT_SYMBOL(core_tpg_check_initiator_node_acl
);
324 void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl
*nacl
)
326 while (atomic_read(&nacl
->acl_pr_ref_count
) != 0)
330 void core_tpg_clear_object_luns(struct se_portal_group
*tpg
)
335 spin_lock(&tpg
->tpg_lun_lock
);
336 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
337 lun
= &tpg
->tpg_lun_list
[i
];
339 if ((lun
->lun_status
!= TRANSPORT_LUN_STATUS_ACTIVE
) ||
340 (lun
->lun_se_dev
== NULL
))
343 spin_unlock(&tpg
->tpg_lun_lock
);
344 ret
= core_dev_del_lun(tpg
, lun
->unpacked_lun
);
345 spin_lock(&tpg
->tpg_lun_lock
);
347 spin_unlock(&tpg
->tpg_lun_lock
);
349 EXPORT_SYMBOL(core_tpg_clear_object_luns
);
351 /* core_tpg_add_initiator_node_acl():
355 struct se_node_acl
*core_tpg_add_initiator_node_acl(
356 struct se_portal_group
*tpg
,
357 struct se_node_acl
*se_nacl
,
358 const char *initiatorname
,
361 struct se_node_acl
*acl
= NULL
;
363 spin_lock_irq(&tpg
->acl_node_lock
);
364 acl
= __core_tpg_get_initiator_node_acl(tpg
, initiatorname
);
366 if (acl
->dynamic_node_acl
) {
367 acl
->dynamic_node_acl
= 0;
368 pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
369 " for %s\n", tpg
->se_tpg_tfo
->get_fabric_name(),
370 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), initiatorname
);
371 spin_unlock_irq(&tpg
->acl_node_lock
);
373 * Release the locally allocated struct se_node_acl
374 * because * core_tpg_add_initiator_node_acl() returned
375 * a pointer to an existing demo mode node ACL.
378 tpg
->se_tpg_tfo
->tpg_release_fabric_acl(tpg
,
383 pr_err("ACL entry for %s Initiator"
384 " Node %s already exists for TPG %u, ignoring"
385 " request.\n", tpg
->se_tpg_tfo
->get_fabric_name(),
386 initiatorname
, tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
387 spin_unlock_irq(&tpg
->acl_node_lock
);
388 return ERR_PTR(-EEXIST
);
390 spin_unlock_irq(&tpg
->acl_node_lock
);
393 pr_err("struct se_node_acl pointer is NULL\n");
394 return ERR_PTR(-EINVAL
);
397 * For v4.x logic the se_node_acl_s is hanging off a fabric
398 * dependent structure allocated via
399 * struct target_core_fabric_ops->fabric_make_nodeacl()
403 INIT_LIST_HEAD(&acl
->acl_list
);
404 INIT_LIST_HEAD(&acl
->acl_sess_list
);
405 spin_lock_init(&acl
->device_list_lock
);
406 spin_lock_init(&acl
->nacl_sess_lock
);
407 atomic_set(&acl
->acl_pr_ref_count
, 0);
408 acl
->queue_depth
= queue_depth
;
409 snprintf(acl
->initiatorname
, TRANSPORT_IQN_LEN
, "%s", initiatorname
);
411 acl
->acl_index
= scsi_get_new_index(SCSI_AUTH_INTR_INDEX
);
412 spin_lock_init(&acl
->stats_lock
);
414 tpg
->se_tpg_tfo
->set_default_node_attributes(acl
);
416 if (core_create_device_list_for_node(acl
) < 0) {
417 tpg
->se_tpg_tfo
->tpg_release_fabric_acl(tpg
, acl
);
418 return ERR_PTR(-ENOMEM
);
421 if (core_set_queue_depth_for_node(tpg
, acl
) < 0) {
422 core_free_device_list_for_node(acl
, tpg
);
423 tpg
->se_tpg_tfo
->tpg_release_fabric_acl(tpg
, acl
);
424 return ERR_PTR(-EINVAL
);
427 spin_lock_irq(&tpg
->acl_node_lock
);
428 list_add_tail(&acl
->acl_list
, &tpg
->acl_node_list
);
429 tpg
->num_node_acls
++;
430 spin_unlock_irq(&tpg
->acl_node_lock
);
433 pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
434 " Initiator Node: %s\n", tpg
->se_tpg_tfo
->get_fabric_name(),
435 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), acl
->queue_depth
,
436 tpg
->se_tpg_tfo
->get_fabric_name(), initiatorname
);
440 EXPORT_SYMBOL(core_tpg_add_initiator_node_acl
);
442 /* core_tpg_del_initiator_node_acl():
446 int core_tpg_del_initiator_node_acl(
447 struct se_portal_group
*tpg
,
448 struct se_node_acl
*acl
,
451 struct se_session
*sess
, *sess_tmp
;
454 spin_lock_irq(&tpg
->acl_node_lock
);
455 if (acl
->dynamic_node_acl
) {
456 acl
->dynamic_node_acl
= 0;
459 list_del(&acl
->acl_list
);
460 tpg
->num_node_acls
--;
461 spin_unlock_irq(&tpg
->acl_node_lock
);
463 spin_lock_bh(&tpg
->session_lock
);
464 list_for_each_entry_safe(sess
, sess_tmp
,
465 &tpg
->tpg_sess_list
, sess_list
) {
466 if (sess
->se_node_acl
!= acl
)
469 * Determine if the session needs to be closed by our context.
471 if (!tpg
->se_tpg_tfo
->shutdown_session(sess
))
474 spin_unlock_bh(&tpg
->session_lock
);
476 * If the $FABRIC_MOD session for the Initiator Node ACL exists,
477 * forcefully shutdown the $FABRIC_MOD session/nexus.
479 tpg
->se_tpg_tfo
->close_session(sess
);
481 spin_lock_bh(&tpg
->session_lock
);
483 spin_unlock_bh(&tpg
->session_lock
);
485 core_tpg_wait_for_nacl_pr_ref(acl
);
486 core_clear_initiator_node_from_tpg(acl
, tpg
);
487 core_free_device_list_for_node(acl
, tpg
);
489 pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
490 " Initiator Node: %s\n", tpg
->se_tpg_tfo
->get_fabric_name(),
491 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), acl
->queue_depth
,
492 tpg
->se_tpg_tfo
->get_fabric_name(), acl
->initiatorname
);
496 EXPORT_SYMBOL(core_tpg_del_initiator_node_acl
);
498 /* core_tpg_set_initiator_node_queue_depth():
502 int core_tpg_set_initiator_node_queue_depth(
503 struct se_portal_group
*tpg
,
504 unsigned char *initiatorname
,
508 struct se_session
*sess
, *init_sess
= NULL
;
509 struct se_node_acl
*acl
;
512 spin_lock_irq(&tpg
->acl_node_lock
);
513 acl
= __core_tpg_get_initiator_node_acl(tpg
, initiatorname
);
515 pr_err("Access Control List entry for %s Initiator"
516 " Node %s does not exists for TPG %hu, ignoring"
517 " request.\n", tpg
->se_tpg_tfo
->get_fabric_name(),
518 initiatorname
, tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
519 spin_unlock_irq(&tpg
->acl_node_lock
);
522 if (acl
->dynamic_node_acl
) {
523 acl
->dynamic_node_acl
= 0;
526 spin_unlock_irq(&tpg
->acl_node_lock
);
528 spin_lock_bh(&tpg
->session_lock
);
529 list_for_each_entry(sess
, &tpg
->tpg_sess_list
, sess_list
) {
530 if (sess
->se_node_acl
!= acl
)
534 pr_err("Unable to change queue depth for %s"
535 " Initiator Node: %s while session is"
536 " operational. To forcefully change the queue"
537 " depth and force session reinstatement"
538 " use the \"force=1\" parameter.\n",
539 tpg
->se_tpg_tfo
->get_fabric_name(), initiatorname
);
540 spin_unlock_bh(&tpg
->session_lock
);
542 spin_lock_irq(&tpg
->acl_node_lock
);
544 acl
->dynamic_node_acl
= 1;
545 spin_unlock_irq(&tpg
->acl_node_lock
);
549 * Determine if the session needs to be closed by our context.
551 if (!tpg
->se_tpg_tfo
->shutdown_session(sess
))
559 * User has requested to change the queue depth for a Initiator Node.
560 * Change the value in the Node's struct se_node_acl, and call
561 * core_set_queue_depth_for_node() to add the requested queue depth.
563 * Finally call tpg->se_tpg_tfo->close_session() to force session
564 * reinstatement to occur if there is an active session for the
565 * $FABRIC_MOD Initiator Node in question.
567 acl
->queue_depth
= queue_depth
;
569 if (core_set_queue_depth_for_node(tpg
, acl
) < 0) {
570 spin_unlock_bh(&tpg
->session_lock
);
572 * Force session reinstatement if
573 * core_set_queue_depth_for_node() failed, because we assume
574 * the $FABRIC_MOD has already the set session reinstatement
575 * bit from tpg->se_tpg_tfo->shutdown_session() called above.
578 tpg
->se_tpg_tfo
->close_session(init_sess
);
580 spin_lock_irq(&tpg
->acl_node_lock
);
582 acl
->dynamic_node_acl
= 1;
583 spin_unlock_irq(&tpg
->acl_node_lock
);
586 spin_unlock_bh(&tpg
->session_lock
);
588 * If the $FABRIC_MOD session for the Initiator Node ACL exists,
589 * forcefully shutdown the $FABRIC_MOD session/nexus.
592 tpg
->se_tpg_tfo
->close_session(init_sess
);
594 pr_debug("Successfully changed queue depth to: %d for Initiator"
595 " Node: %s on %s Target Portal Group: %u\n", queue_depth
,
596 initiatorname
, tpg
->se_tpg_tfo
->get_fabric_name(),
597 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
599 spin_lock_irq(&tpg
->acl_node_lock
);
601 acl
->dynamic_node_acl
= 1;
602 spin_unlock_irq(&tpg
->acl_node_lock
);
606 EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth
);
608 static int core_tpg_setup_virtual_lun0(struct se_portal_group
*se_tpg
)
610 /* Set in core_dev_setup_virtual_lun0() */
611 struct se_device
*dev
= g_lun0_dev
;
612 struct se_lun
*lun
= &se_tpg
->tpg_virt_lun0
;
613 u32 lun_access
= TRANSPORT_LUNFLAGS_READ_ONLY
;
616 lun
->unpacked_lun
= 0;
617 lun
->lun_status
= TRANSPORT_LUN_STATUS_FREE
;
618 atomic_set(&lun
->lun_acl_count
, 0);
619 init_completion(&lun
->lun_shutdown_comp
);
620 INIT_LIST_HEAD(&lun
->lun_acl_list
);
621 INIT_LIST_HEAD(&lun
->lun_cmd_list
);
622 spin_lock_init(&lun
->lun_acl_lock
);
623 spin_lock_init(&lun
->lun_cmd_lock
);
624 spin_lock_init(&lun
->lun_sep_lock
);
626 ret
= core_tpg_post_addlun(se_tpg
, lun
, lun_access
, dev
);
633 static void core_tpg_release_virtual_lun0(struct se_portal_group
*se_tpg
)
635 struct se_lun
*lun
= &se_tpg
->tpg_virt_lun0
;
637 core_tpg_post_dellun(se_tpg
, lun
);
640 int core_tpg_register(
641 struct target_core_fabric_ops
*tfo
,
642 struct se_wwn
*se_wwn
,
643 struct se_portal_group
*se_tpg
,
644 void *tpg_fabric_ptr
,
650 se_tpg
->tpg_lun_list
= kzalloc((sizeof(struct se_lun
) *
651 TRANSPORT_MAX_LUNS_PER_TPG
), GFP_KERNEL
);
652 if (!se_tpg
->tpg_lun_list
) {
653 pr_err("Unable to allocate struct se_portal_group->"
658 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
659 lun
= &se_tpg
->tpg_lun_list
[i
];
660 lun
->unpacked_lun
= i
;
661 lun
->lun_status
= TRANSPORT_LUN_STATUS_FREE
;
662 atomic_set(&lun
->lun_acl_count
, 0);
663 init_completion(&lun
->lun_shutdown_comp
);
664 INIT_LIST_HEAD(&lun
->lun_acl_list
);
665 INIT_LIST_HEAD(&lun
->lun_cmd_list
);
666 spin_lock_init(&lun
->lun_acl_lock
);
667 spin_lock_init(&lun
->lun_cmd_lock
);
668 spin_lock_init(&lun
->lun_sep_lock
);
671 se_tpg
->se_tpg_type
= se_tpg_type
;
672 se_tpg
->se_tpg_fabric_ptr
= tpg_fabric_ptr
;
673 se_tpg
->se_tpg_tfo
= tfo
;
674 se_tpg
->se_tpg_wwn
= se_wwn
;
675 atomic_set(&se_tpg
->tpg_pr_ref_count
, 0);
676 INIT_LIST_HEAD(&se_tpg
->acl_node_list
);
677 INIT_LIST_HEAD(&se_tpg
->se_tpg_node
);
678 INIT_LIST_HEAD(&se_tpg
->tpg_sess_list
);
679 spin_lock_init(&se_tpg
->acl_node_lock
);
680 spin_lock_init(&se_tpg
->session_lock
);
681 spin_lock_init(&se_tpg
->tpg_lun_lock
);
683 if (se_tpg
->se_tpg_type
== TRANSPORT_TPG_TYPE_NORMAL
) {
684 if (core_tpg_setup_virtual_lun0(se_tpg
) < 0) {
690 spin_lock_bh(&tpg_lock
);
691 list_add_tail(&se_tpg
->se_tpg_node
, &tpg_list
);
692 spin_unlock_bh(&tpg_lock
);
694 pr_debug("TARGET_CORE[%s]: Allocated %s struct se_portal_group for"
695 " endpoint: %s, Portal Tag: %u\n", tfo
->get_fabric_name(),
696 (se_tpg
->se_tpg_type
== TRANSPORT_TPG_TYPE_NORMAL
) ?
697 "Normal" : "Discovery", (tfo
->tpg_get_wwn(se_tpg
) == NULL
) ?
698 "None" : tfo
->tpg_get_wwn(se_tpg
), tfo
->tpg_get_tag(se_tpg
));
702 EXPORT_SYMBOL(core_tpg_register
);
704 int core_tpg_deregister(struct se_portal_group
*se_tpg
)
706 struct se_node_acl
*nacl
, *nacl_tmp
;
708 pr_debug("TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
709 " for endpoint: %s Portal Tag %u\n",
710 (se_tpg
->se_tpg_type
== TRANSPORT_TPG_TYPE_NORMAL
) ?
711 "Normal" : "Discovery", se_tpg
->se_tpg_tfo
->get_fabric_name(),
712 se_tpg
->se_tpg_tfo
->tpg_get_wwn(se_tpg
),
713 se_tpg
->se_tpg_tfo
->tpg_get_tag(se_tpg
));
715 spin_lock_bh(&tpg_lock
);
716 list_del(&se_tpg
->se_tpg_node
);
717 spin_unlock_bh(&tpg_lock
);
719 while (atomic_read(&se_tpg
->tpg_pr_ref_count
) != 0)
722 * Release any remaining demo-mode generated se_node_acl that have
723 * not been released because of TFO->tpg_check_demo_mode_cache() == 1
724 * in transport_deregister_session().
726 spin_lock_irq(&se_tpg
->acl_node_lock
);
727 list_for_each_entry_safe(nacl
, nacl_tmp
, &se_tpg
->acl_node_list
,
729 list_del(&nacl
->acl_list
);
730 se_tpg
->num_node_acls
--;
731 spin_unlock_irq(&se_tpg
->acl_node_lock
);
733 core_tpg_wait_for_nacl_pr_ref(nacl
);
734 core_free_device_list_for_node(nacl
, se_tpg
);
735 se_tpg
->se_tpg_tfo
->tpg_release_fabric_acl(se_tpg
, nacl
);
737 spin_lock_irq(&se_tpg
->acl_node_lock
);
739 spin_unlock_irq(&se_tpg
->acl_node_lock
);
741 if (se_tpg
->se_tpg_type
== TRANSPORT_TPG_TYPE_NORMAL
)
742 core_tpg_release_virtual_lun0(se_tpg
);
744 se_tpg
->se_tpg_fabric_ptr
= NULL
;
745 kfree(se_tpg
->tpg_lun_list
);
748 EXPORT_SYMBOL(core_tpg_deregister
);
750 struct se_lun
*core_tpg_pre_addlun(
751 struct se_portal_group
*tpg
,
756 if (unpacked_lun
> (TRANSPORT_MAX_LUNS_PER_TPG
-1)) {
757 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
758 "-1: %u for Target Portal Group: %u\n",
759 tpg
->se_tpg_tfo
->get_fabric_name(),
760 unpacked_lun
, TRANSPORT_MAX_LUNS_PER_TPG
-1,
761 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
762 return ERR_PTR(-EOVERFLOW
);
765 spin_lock(&tpg
->tpg_lun_lock
);
766 lun
= &tpg
->tpg_lun_list
[unpacked_lun
];
767 if (lun
->lun_status
== TRANSPORT_LUN_STATUS_ACTIVE
) {
768 pr_err("TPG Logical Unit Number: %u is already active"
769 " on %s Target Portal Group: %u, ignoring request.\n",
770 unpacked_lun
, tpg
->se_tpg_tfo
->get_fabric_name(),
771 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
772 spin_unlock(&tpg
->tpg_lun_lock
);
773 return ERR_PTR(-EINVAL
);
775 spin_unlock(&tpg
->tpg_lun_lock
);
780 int core_tpg_post_addlun(
781 struct se_portal_group
*tpg
,
788 ret
= core_dev_export(lun_ptr
, tpg
, lun
);
792 spin_lock(&tpg
->tpg_lun_lock
);
793 lun
->lun_access
= lun_access
;
794 lun
->lun_status
= TRANSPORT_LUN_STATUS_ACTIVE
;
795 spin_unlock(&tpg
->tpg_lun_lock
);
800 static void core_tpg_shutdown_lun(
801 struct se_portal_group
*tpg
,
804 core_clear_lun_from_tpg(lun
, tpg
);
805 transport_clear_lun_from_sessions(lun
);
808 struct se_lun
*core_tpg_pre_dellun(
809 struct se_portal_group
*tpg
,
814 if (unpacked_lun
> (TRANSPORT_MAX_LUNS_PER_TPG
-1)) {
815 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
816 "-1: %u for Target Portal Group: %u\n",
817 tpg
->se_tpg_tfo
->get_fabric_name(), unpacked_lun
,
818 TRANSPORT_MAX_LUNS_PER_TPG
-1,
819 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
820 return ERR_PTR(-EOVERFLOW
);
823 spin_lock(&tpg
->tpg_lun_lock
);
824 lun
= &tpg
->tpg_lun_list
[unpacked_lun
];
825 if (lun
->lun_status
!= TRANSPORT_LUN_STATUS_ACTIVE
) {
826 pr_err("%s Logical Unit Number: %u is not active on"
827 " Target Portal Group: %u, ignoring request.\n",
828 tpg
->se_tpg_tfo
->get_fabric_name(), unpacked_lun
,
829 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
830 spin_unlock(&tpg
->tpg_lun_lock
);
831 return ERR_PTR(-ENODEV
);
833 spin_unlock(&tpg
->tpg_lun_lock
);
838 int core_tpg_post_dellun(
839 struct se_portal_group
*tpg
,
842 core_tpg_shutdown_lun(tpg
, lun
);
844 core_dev_unexport(lun
->lun_se_dev
, tpg
, lun
);
846 spin_lock(&tpg
->tpg_lun_lock
);
847 lun
->lun_status
= TRANSPORT_LUN_STATUS_FREE
;
848 spin_unlock(&tpg
->tpg_lun_lock
);