mfd: wm8350-i2c: Make sure the i2c regmap functions are compiled
[linux/fpc-iii.git] / drivers / target / target_core_tpg.c
blobd7258359742721d58c4d3cb5104572b69ef1deb1
1 /*******************************************************************************
2 * Filename: target_core_tpg.c
4 * This file contains generic Target Portal Group related functions.
6 * (c) Copyright 2002-2013 Datera, Inc.
8 * Nicholas A. Bellinger <nab@kernel.org>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 ******************************************************************************/
26 #include <linux/net.h>
27 #include <linux/string.h>
28 #include <linux/timer.h>
29 #include <linux/slab.h>
30 #include <linux/spinlock.h>
31 #include <linux/in.h>
32 #include <linux/export.h>
33 #include <net/sock.h>
34 #include <net/tcp.h>
35 #include <scsi/scsi.h>
36 #include <scsi/scsi_cmnd.h>
38 #include <target/target_core_base.h>
39 #include <target/target_core_backend.h>
40 #include <target/target_core_fabric.h>
42 #include "target_core_internal.h"
43 #include "target_core_pr.h"
45 extern struct se_device *g_lun0_dev;
47 static DEFINE_SPINLOCK(tpg_lock);
48 static LIST_HEAD(tpg_list);
50 /* core_clear_initiator_node_from_tpg():
54 static void core_clear_initiator_node_from_tpg(
55 struct se_node_acl *nacl,
56 struct se_portal_group *tpg)
58 int i;
59 struct se_dev_entry *deve;
60 struct se_lun *lun;
62 spin_lock_irq(&nacl->device_list_lock);
63 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
64 deve = nacl->device_list[i];
66 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
67 continue;
69 if (!deve->se_lun) {
70 pr_err("%s device entries device pointer is"
71 " NULL, but Initiator has access.\n",
72 tpg->se_tpg_tfo->get_fabric_name());
73 continue;
76 lun = deve->se_lun;
77 spin_unlock_irq(&nacl->device_list_lock);
78 core_disable_device_list_for_node(lun, NULL, deve->mapped_lun,
79 TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg);
81 spin_lock_irq(&nacl->device_list_lock);
83 spin_unlock_irq(&nacl->device_list_lock);
86 /* __core_tpg_get_initiator_node_acl():
88 * spin_lock_bh(&tpg->acl_node_lock); must be held when calling
90 struct se_node_acl *__core_tpg_get_initiator_node_acl(
91 struct se_portal_group *tpg,
92 const char *initiatorname)
94 struct se_node_acl *acl;
96 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
97 if (!strcmp(acl->initiatorname, initiatorname))
98 return acl;
101 return NULL;
104 /* core_tpg_get_initiator_node_acl():
108 struct se_node_acl *core_tpg_get_initiator_node_acl(
109 struct se_portal_group *tpg,
110 unsigned char *initiatorname)
112 struct se_node_acl *acl;
114 spin_lock_irq(&tpg->acl_node_lock);
115 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
116 spin_unlock_irq(&tpg->acl_node_lock);
118 return acl;
121 /* core_tpg_add_node_to_devs():
125 void core_tpg_add_node_to_devs(
126 struct se_node_acl *acl,
127 struct se_portal_group *tpg)
129 int i = 0;
130 u32 lun_access = 0;
131 struct se_lun *lun;
132 struct se_device *dev;
134 spin_lock(&tpg->tpg_lun_lock);
135 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
136 lun = tpg->tpg_lun_list[i];
137 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE)
138 continue;
140 spin_unlock(&tpg->tpg_lun_lock);
142 dev = lun->lun_se_dev;
144 * By default in LIO-Target $FABRIC_MOD,
145 * demo_mode_write_protect is ON, or READ_ONLY;
147 if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
148 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
149 } else {
151 * Allow only optical drives to issue R/W in default RO
152 * demo mode.
154 if (dev->transport->get_device_type(dev) == TYPE_DISK)
155 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
156 else
157 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
160 pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s"
161 " access for LUN in Demo Mode\n",
162 tpg->se_tpg_tfo->get_fabric_name(),
163 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
164 (lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ?
165 "READ-WRITE" : "READ-ONLY");
167 core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun,
168 lun_access, acl, tpg);
170 * Check to see if there are any existing persistent reservation
171 * APTPL pre-registrations that need to be enabled for this dynamic
172 * LUN ACL now..
174 core_scsi3_check_aptpl_registration(dev, tpg, lun, acl,
175 lun->unpacked_lun);
176 spin_lock(&tpg->tpg_lun_lock);
178 spin_unlock(&tpg->tpg_lun_lock);
181 /* core_set_queue_depth_for_node():
185 static int core_set_queue_depth_for_node(
186 struct se_portal_group *tpg,
187 struct se_node_acl *acl)
189 if (!acl->queue_depth) {
190 pr_err("Queue depth for %s Initiator Node: %s is 0,"
191 "defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(),
192 acl->initiatorname);
193 acl->queue_depth = 1;
196 return 0;
199 void array_free(void *array, int n)
201 void **a = array;
202 int i;
204 for (i = 0; i < n; i++)
205 kfree(a[i]);
206 kfree(a);
209 static void *array_zalloc(int n, size_t size, gfp_t flags)
211 void **a;
212 int i;
214 a = kzalloc(n * sizeof(void*), flags);
215 if (!a)
216 return NULL;
217 for (i = 0; i < n; i++) {
218 a[i] = kzalloc(size, flags);
219 if (!a[i]) {
220 array_free(a, n);
221 return NULL;
224 return a;
227 /* core_create_device_list_for_node():
231 static int core_create_device_list_for_node(struct se_node_acl *nacl)
233 struct se_dev_entry *deve;
234 int i;
236 nacl->device_list = array_zalloc(TRANSPORT_MAX_LUNS_PER_TPG,
237 sizeof(struct se_dev_entry), GFP_KERNEL);
238 if (!nacl->device_list) {
239 pr_err("Unable to allocate memory for"
240 " struct se_node_acl->device_list\n");
241 return -ENOMEM;
243 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
244 deve = nacl->device_list[i];
246 atomic_set(&deve->ua_count, 0);
247 atomic_set(&deve->pr_ref_count, 0);
248 spin_lock_init(&deve->ua_lock);
249 INIT_LIST_HEAD(&deve->alua_port_list);
250 INIT_LIST_HEAD(&deve->ua_list);
253 return 0;
256 /* core_tpg_check_initiator_node_acl()
260 struct se_node_acl *core_tpg_check_initiator_node_acl(
261 struct se_portal_group *tpg,
262 unsigned char *initiatorname)
264 struct se_node_acl *acl;
266 acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
267 if (acl)
268 return acl;
270 if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg))
271 return NULL;
273 acl = tpg->se_tpg_tfo->tpg_alloc_fabric_acl(tpg);
274 if (!acl)
275 return NULL;
277 INIT_LIST_HEAD(&acl->acl_list);
278 INIT_LIST_HEAD(&acl->acl_sess_list);
279 kref_init(&acl->acl_kref);
280 init_completion(&acl->acl_free_comp);
281 spin_lock_init(&acl->device_list_lock);
282 spin_lock_init(&acl->nacl_sess_lock);
283 atomic_set(&acl->acl_pr_ref_count, 0);
284 acl->queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
285 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
286 acl->se_tpg = tpg;
287 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
288 spin_lock_init(&acl->stats_lock);
289 acl->dynamic_node_acl = 1;
291 tpg->se_tpg_tfo->set_default_node_attributes(acl);
293 if (core_create_device_list_for_node(acl) < 0) {
294 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
295 return NULL;
298 if (core_set_queue_depth_for_node(tpg, acl) < 0) {
299 core_free_device_list_for_node(acl, tpg);
300 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
301 return NULL;
304 * Here we only create demo-mode MappedLUNs from the active
305 * TPG LUNs if the fabric is not explicitly asking for
306 * tpg_check_demo_mode_login_only() == 1.
308 if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only == NULL) ||
309 (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) != 1))
310 core_tpg_add_node_to_devs(acl, tpg);
312 spin_lock_irq(&tpg->acl_node_lock);
313 list_add_tail(&acl->acl_list, &tpg->acl_node_list);
314 tpg->num_node_acls++;
315 spin_unlock_irq(&tpg->acl_node_lock);
317 pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
318 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
319 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
320 tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
322 return acl;
324 EXPORT_SYMBOL(core_tpg_check_initiator_node_acl);
326 void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
328 while (atomic_read(&nacl->acl_pr_ref_count) != 0)
329 cpu_relax();
332 void core_tpg_clear_object_luns(struct se_portal_group *tpg)
334 int i;
335 struct se_lun *lun;
337 spin_lock(&tpg->tpg_lun_lock);
338 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
339 lun = tpg->tpg_lun_list[i];
341 if ((lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) ||
342 (lun->lun_se_dev == NULL))
343 continue;
345 spin_unlock(&tpg->tpg_lun_lock);
346 core_dev_del_lun(tpg, lun->unpacked_lun);
347 spin_lock(&tpg->tpg_lun_lock);
349 spin_unlock(&tpg->tpg_lun_lock);
351 EXPORT_SYMBOL(core_tpg_clear_object_luns);
353 /* core_tpg_add_initiator_node_acl():
357 struct se_node_acl *core_tpg_add_initiator_node_acl(
358 struct se_portal_group *tpg,
359 struct se_node_acl *se_nacl,
360 const char *initiatorname,
361 u32 queue_depth)
363 struct se_node_acl *acl = NULL;
365 spin_lock_irq(&tpg->acl_node_lock);
366 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
367 if (acl) {
368 if (acl->dynamic_node_acl) {
369 acl->dynamic_node_acl = 0;
370 pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
371 " for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
372 tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
373 spin_unlock_irq(&tpg->acl_node_lock);
375 * Release the locally allocated struct se_node_acl
376 * because * core_tpg_add_initiator_node_acl() returned
377 * a pointer to an existing demo mode node ACL.
379 if (se_nacl)
380 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg,
381 se_nacl);
382 goto done;
385 pr_err("ACL entry for %s Initiator"
386 " Node %s already exists for TPG %u, ignoring"
387 " request.\n", tpg->se_tpg_tfo->get_fabric_name(),
388 initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
389 spin_unlock_irq(&tpg->acl_node_lock);
390 return ERR_PTR(-EEXIST);
392 spin_unlock_irq(&tpg->acl_node_lock);
394 if (!se_nacl) {
395 pr_err("struct se_node_acl pointer is NULL\n");
396 return ERR_PTR(-EINVAL);
399 * For v4.x logic the se_node_acl_s is hanging off a fabric
400 * dependent structure allocated via
401 * struct target_core_fabric_ops->fabric_make_nodeacl()
403 acl = se_nacl;
405 INIT_LIST_HEAD(&acl->acl_list);
406 INIT_LIST_HEAD(&acl->acl_sess_list);
407 kref_init(&acl->acl_kref);
408 init_completion(&acl->acl_free_comp);
409 spin_lock_init(&acl->device_list_lock);
410 spin_lock_init(&acl->nacl_sess_lock);
411 atomic_set(&acl->acl_pr_ref_count, 0);
412 acl->queue_depth = queue_depth;
413 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
414 acl->se_tpg = tpg;
415 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
416 spin_lock_init(&acl->stats_lock);
418 tpg->se_tpg_tfo->set_default_node_attributes(acl);
420 if (core_create_device_list_for_node(acl) < 0) {
421 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
422 return ERR_PTR(-ENOMEM);
425 if (core_set_queue_depth_for_node(tpg, acl) < 0) {
426 core_free_device_list_for_node(acl, tpg);
427 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
428 return ERR_PTR(-EINVAL);
431 spin_lock_irq(&tpg->acl_node_lock);
432 list_add_tail(&acl->acl_list, &tpg->acl_node_list);
433 tpg->num_node_acls++;
434 spin_unlock_irq(&tpg->acl_node_lock);
436 done:
437 pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
438 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
439 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
440 tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
442 return acl;
444 EXPORT_SYMBOL(core_tpg_add_initiator_node_acl);
446 /* core_tpg_del_initiator_node_acl():
450 int core_tpg_del_initiator_node_acl(
451 struct se_portal_group *tpg,
452 struct se_node_acl *acl,
453 int force)
455 LIST_HEAD(sess_list);
456 struct se_session *sess, *sess_tmp;
457 unsigned long flags;
458 int rc;
460 spin_lock_irq(&tpg->acl_node_lock);
461 if (acl->dynamic_node_acl) {
462 acl->dynamic_node_acl = 0;
464 list_del(&acl->acl_list);
465 tpg->num_node_acls--;
466 spin_unlock_irq(&tpg->acl_node_lock);
468 spin_lock_irqsave(&acl->nacl_sess_lock, flags);
469 acl->acl_stop = 1;
471 list_for_each_entry_safe(sess, sess_tmp, &acl->acl_sess_list,
472 sess_acl_list) {
473 if (sess->sess_tearing_down != 0)
474 continue;
476 target_get_session(sess);
477 list_move(&sess->sess_acl_list, &sess_list);
479 spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
481 list_for_each_entry_safe(sess, sess_tmp, &sess_list, sess_acl_list) {
482 list_del(&sess->sess_acl_list);
484 rc = tpg->se_tpg_tfo->shutdown_session(sess);
485 target_put_session(sess);
486 if (!rc)
487 continue;
488 target_put_session(sess);
490 target_put_nacl(acl);
492 * Wait for last target_put_nacl() to complete in target_complete_nacl()
493 * for active fabric session transport_deregister_session() callbacks.
495 wait_for_completion(&acl->acl_free_comp);
497 core_tpg_wait_for_nacl_pr_ref(acl);
498 core_clear_initiator_node_from_tpg(acl, tpg);
499 core_free_device_list_for_node(acl, tpg);
501 pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
502 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
503 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
504 tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname);
506 return 0;
508 EXPORT_SYMBOL(core_tpg_del_initiator_node_acl);
510 /* core_tpg_set_initiator_node_queue_depth():
514 int core_tpg_set_initiator_node_queue_depth(
515 struct se_portal_group *tpg,
516 unsigned char *initiatorname,
517 u32 queue_depth,
518 int force)
520 struct se_session *sess, *init_sess = NULL;
521 struct se_node_acl *acl;
522 unsigned long flags;
523 int dynamic_acl = 0;
525 spin_lock_irq(&tpg->acl_node_lock);
526 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
527 if (!acl) {
528 pr_err("Access Control List entry for %s Initiator"
529 " Node %s does not exists for TPG %hu, ignoring"
530 " request.\n", tpg->se_tpg_tfo->get_fabric_name(),
531 initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
532 spin_unlock_irq(&tpg->acl_node_lock);
533 return -ENODEV;
535 if (acl->dynamic_node_acl) {
536 acl->dynamic_node_acl = 0;
537 dynamic_acl = 1;
539 spin_unlock_irq(&tpg->acl_node_lock);
541 spin_lock_irqsave(&tpg->session_lock, flags);
542 list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) {
543 if (sess->se_node_acl != acl)
544 continue;
546 if (!force) {
547 pr_err("Unable to change queue depth for %s"
548 " Initiator Node: %s while session is"
549 " operational. To forcefully change the queue"
550 " depth and force session reinstatement"
551 " use the \"force=1\" parameter.\n",
552 tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
553 spin_unlock_irqrestore(&tpg->session_lock, flags);
555 spin_lock_irq(&tpg->acl_node_lock);
556 if (dynamic_acl)
557 acl->dynamic_node_acl = 1;
558 spin_unlock_irq(&tpg->acl_node_lock);
559 return -EEXIST;
562 * Determine if the session needs to be closed by our context.
564 if (!tpg->se_tpg_tfo->shutdown_session(sess))
565 continue;
567 init_sess = sess;
568 break;
572 * User has requested to change the queue depth for a Initiator Node.
573 * Change the value in the Node's struct se_node_acl, and call
574 * core_set_queue_depth_for_node() to add the requested queue depth.
576 * Finally call tpg->se_tpg_tfo->close_session() to force session
577 * reinstatement to occur if there is an active session for the
578 * $FABRIC_MOD Initiator Node in question.
580 acl->queue_depth = queue_depth;
582 if (core_set_queue_depth_for_node(tpg, acl) < 0) {
583 spin_unlock_irqrestore(&tpg->session_lock, flags);
585 * Force session reinstatement if
586 * core_set_queue_depth_for_node() failed, because we assume
587 * the $FABRIC_MOD has already the set session reinstatement
588 * bit from tpg->se_tpg_tfo->shutdown_session() called above.
590 if (init_sess)
591 tpg->se_tpg_tfo->close_session(init_sess);
593 spin_lock_irq(&tpg->acl_node_lock);
594 if (dynamic_acl)
595 acl->dynamic_node_acl = 1;
596 spin_unlock_irq(&tpg->acl_node_lock);
597 return -EINVAL;
599 spin_unlock_irqrestore(&tpg->session_lock, flags);
601 * If the $FABRIC_MOD session for the Initiator Node ACL exists,
602 * forcefully shutdown the $FABRIC_MOD session/nexus.
604 if (init_sess)
605 tpg->se_tpg_tfo->close_session(init_sess);
607 pr_debug("Successfully changed queue depth to: %d for Initiator"
608 " Node: %s on %s Target Portal Group: %u\n", queue_depth,
609 initiatorname, tpg->se_tpg_tfo->get_fabric_name(),
610 tpg->se_tpg_tfo->tpg_get_tag(tpg));
612 spin_lock_irq(&tpg->acl_node_lock);
613 if (dynamic_acl)
614 acl->dynamic_node_acl = 1;
615 spin_unlock_irq(&tpg->acl_node_lock);
617 return 0;
619 EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
621 /* core_tpg_set_initiator_node_tag():
623 * Initiator nodeacl tags are not used internally, but may be used by
624 * userspace to emulate aliases or groups.
625 * Returns length of newly-set tag or -EINVAL.
627 int core_tpg_set_initiator_node_tag(
628 struct se_portal_group *tpg,
629 struct se_node_acl *acl,
630 const char *new_tag)
632 if (strlen(new_tag) >= MAX_ACL_TAG_SIZE)
633 return -EINVAL;
635 if (!strncmp("NULL", new_tag, 4)) {
636 acl->acl_tag[0] = '\0';
637 return 0;
640 return snprintf(acl->acl_tag, MAX_ACL_TAG_SIZE, "%s", new_tag);
642 EXPORT_SYMBOL(core_tpg_set_initiator_node_tag);
644 static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
646 /* Set in core_dev_setup_virtual_lun0() */
647 struct se_device *dev = g_lun0_dev;
648 struct se_lun *lun = &se_tpg->tpg_virt_lun0;
649 u32 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
650 int ret;
652 lun->unpacked_lun = 0;
653 lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
654 atomic_set(&lun->lun_acl_count, 0);
655 init_completion(&lun->lun_shutdown_comp);
656 INIT_LIST_HEAD(&lun->lun_acl_list);
657 INIT_LIST_HEAD(&lun->lun_cmd_list);
658 spin_lock_init(&lun->lun_acl_lock);
659 spin_lock_init(&lun->lun_cmd_lock);
660 spin_lock_init(&lun->lun_sep_lock);
662 ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev);
663 if (ret < 0)
664 return ret;
666 return 0;
669 static void core_tpg_release_virtual_lun0(struct se_portal_group *se_tpg)
671 struct se_lun *lun = &se_tpg->tpg_virt_lun0;
673 core_tpg_post_dellun(se_tpg, lun);
676 int core_tpg_register(
677 struct target_core_fabric_ops *tfo,
678 struct se_wwn *se_wwn,
679 struct se_portal_group *se_tpg,
680 void *tpg_fabric_ptr,
681 int se_tpg_type)
683 struct se_lun *lun;
684 u32 i;
686 se_tpg->tpg_lun_list = array_zalloc(TRANSPORT_MAX_LUNS_PER_TPG,
687 sizeof(struct se_lun), GFP_KERNEL);
688 if (!se_tpg->tpg_lun_list) {
689 pr_err("Unable to allocate struct se_portal_group->"
690 "tpg_lun_list\n");
691 return -ENOMEM;
694 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
695 lun = se_tpg->tpg_lun_list[i];
696 lun->unpacked_lun = i;
697 lun->lun_link_magic = SE_LUN_LINK_MAGIC;
698 lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
699 atomic_set(&lun->lun_acl_count, 0);
700 init_completion(&lun->lun_shutdown_comp);
701 INIT_LIST_HEAD(&lun->lun_acl_list);
702 INIT_LIST_HEAD(&lun->lun_cmd_list);
703 spin_lock_init(&lun->lun_acl_lock);
704 spin_lock_init(&lun->lun_cmd_lock);
705 spin_lock_init(&lun->lun_sep_lock);
708 se_tpg->se_tpg_type = se_tpg_type;
709 se_tpg->se_tpg_fabric_ptr = tpg_fabric_ptr;
710 se_tpg->se_tpg_tfo = tfo;
711 se_tpg->se_tpg_wwn = se_wwn;
712 atomic_set(&se_tpg->tpg_pr_ref_count, 0);
713 INIT_LIST_HEAD(&se_tpg->acl_node_list);
714 INIT_LIST_HEAD(&se_tpg->se_tpg_node);
715 INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
716 spin_lock_init(&se_tpg->acl_node_lock);
717 spin_lock_init(&se_tpg->session_lock);
718 spin_lock_init(&se_tpg->tpg_lun_lock);
720 if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) {
721 if (core_tpg_setup_virtual_lun0(se_tpg) < 0) {
722 array_free(se_tpg->tpg_lun_list,
723 TRANSPORT_MAX_LUNS_PER_TPG);
724 return -ENOMEM;
728 spin_lock_bh(&tpg_lock);
729 list_add_tail(&se_tpg->se_tpg_node, &tpg_list);
730 spin_unlock_bh(&tpg_lock);
732 pr_debug("TARGET_CORE[%s]: Allocated %s struct se_portal_group for"
733 " endpoint: %s, Portal Tag: %u\n", tfo->get_fabric_name(),
734 (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
735 "Normal" : "Discovery", (tfo->tpg_get_wwn(se_tpg) == NULL) ?
736 "None" : tfo->tpg_get_wwn(se_tpg), tfo->tpg_get_tag(se_tpg));
738 return 0;
740 EXPORT_SYMBOL(core_tpg_register);
742 int core_tpg_deregister(struct se_portal_group *se_tpg)
744 struct se_node_acl *nacl, *nacl_tmp;
746 pr_debug("TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
747 " for endpoint: %s Portal Tag %u\n",
748 (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
749 "Normal" : "Discovery", se_tpg->se_tpg_tfo->get_fabric_name(),
750 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
751 se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
753 spin_lock_bh(&tpg_lock);
754 list_del(&se_tpg->se_tpg_node);
755 spin_unlock_bh(&tpg_lock);
757 while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
758 cpu_relax();
760 * Release any remaining demo-mode generated se_node_acl that have
761 * not been released because of TFO->tpg_check_demo_mode_cache() == 1
762 * in transport_deregister_session().
764 spin_lock_irq(&se_tpg->acl_node_lock);
765 list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list,
766 acl_list) {
767 list_del(&nacl->acl_list);
768 se_tpg->num_node_acls--;
769 spin_unlock_irq(&se_tpg->acl_node_lock);
771 core_tpg_wait_for_nacl_pr_ref(nacl);
772 core_free_device_list_for_node(nacl, se_tpg);
773 se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, nacl);
775 spin_lock_irq(&se_tpg->acl_node_lock);
777 spin_unlock_irq(&se_tpg->acl_node_lock);
779 if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL)
780 core_tpg_release_virtual_lun0(se_tpg);
782 se_tpg->se_tpg_fabric_ptr = NULL;
783 array_free(se_tpg->tpg_lun_list, TRANSPORT_MAX_LUNS_PER_TPG);
784 return 0;
786 EXPORT_SYMBOL(core_tpg_deregister);
788 struct se_lun *core_tpg_pre_addlun(
789 struct se_portal_group *tpg,
790 u32 unpacked_lun)
792 struct se_lun *lun;
794 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
795 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
796 "-1: %u for Target Portal Group: %u\n",
797 tpg->se_tpg_tfo->get_fabric_name(),
798 unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1,
799 tpg->se_tpg_tfo->tpg_get_tag(tpg));
800 return ERR_PTR(-EOVERFLOW);
803 spin_lock(&tpg->tpg_lun_lock);
804 lun = tpg->tpg_lun_list[unpacked_lun];
805 if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) {
806 pr_err("TPG Logical Unit Number: %u is already active"
807 " on %s Target Portal Group: %u, ignoring request.\n",
808 unpacked_lun, tpg->se_tpg_tfo->get_fabric_name(),
809 tpg->se_tpg_tfo->tpg_get_tag(tpg));
810 spin_unlock(&tpg->tpg_lun_lock);
811 return ERR_PTR(-EINVAL);
813 spin_unlock(&tpg->tpg_lun_lock);
815 return lun;
818 int core_tpg_post_addlun(
819 struct se_portal_group *tpg,
820 struct se_lun *lun,
821 u32 lun_access,
822 void *lun_ptr)
824 int ret;
826 ret = core_dev_export(lun_ptr, tpg, lun);
827 if (ret < 0)
828 return ret;
830 spin_lock(&tpg->tpg_lun_lock);
831 lun->lun_access = lun_access;
832 lun->lun_status = TRANSPORT_LUN_STATUS_ACTIVE;
833 spin_unlock(&tpg->tpg_lun_lock);
835 return 0;
838 static void core_tpg_shutdown_lun(
839 struct se_portal_group *tpg,
840 struct se_lun *lun)
842 core_clear_lun_from_tpg(lun, tpg);
843 transport_clear_lun_from_sessions(lun);
846 struct se_lun *core_tpg_pre_dellun(
847 struct se_portal_group *tpg,
848 u32 unpacked_lun)
850 struct se_lun *lun;
852 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
853 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
854 "-1: %u for Target Portal Group: %u\n",
855 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
856 TRANSPORT_MAX_LUNS_PER_TPG-1,
857 tpg->se_tpg_tfo->tpg_get_tag(tpg));
858 return ERR_PTR(-EOVERFLOW);
861 spin_lock(&tpg->tpg_lun_lock);
862 lun = tpg->tpg_lun_list[unpacked_lun];
863 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
864 pr_err("%s Logical Unit Number: %u is not active on"
865 " Target Portal Group: %u, ignoring request.\n",
866 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
867 tpg->se_tpg_tfo->tpg_get_tag(tpg));
868 spin_unlock(&tpg->tpg_lun_lock);
869 return ERR_PTR(-ENODEV);
871 spin_unlock(&tpg->tpg_lun_lock);
873 return lun;
876 int core_tpg_post_dellun(
877 struct se_portal_group *tpg,
878 struct se_lun *lun)
880 core_tpg_shutdown_lun(tpg, lun);
882 core_dev_unexport(lun->lun_se_dev, tpg, lun);
884 spin_lock(&tpg->tpg_lun_lock);
885 lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
886 spin_unlock(&tpg->tpg_lun_lock);
888 return 0;