ubi/upd: Always flush after prepared for an update
[linux/fpc-iii.git] / drivers / target / target_core_tpg.c
blob2794c6ec5c3c5e43daf4783189ea9a87b15a8604
1 /*******************************************************************************
2 * Filename: target_core_tpg.c
4 * This file contains generic Target Portal Group related functions.
6 * (c) Copyright 2002-2013 Datera, Inc.
8 * Nicholas A. Bellinger <nab@kernel.org>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 ******************************************************************************/
26 #include <linux/net.h>
27 #include <linux/string.h>
28 #include <linux/timer.h>
29 #include <linux/slab.h>
30 #include <linux/spinlock.h>
31 #include <linux/in.h>
32 #include <linux/export.h>
33 #include <net/sock.h>
34 #include <net/tcp.h>
35 #include <scsi/scsi_proto.h>
37 #include <target/target_core_base.h>
38 #include <target/target_core_backend.h>
39 #include <target/target_core_fabric.h>
41 #include "target_core_internal.h"
42 #include "target_core_alua.h"
43 #include "target_core_pr.h"
44 #include "target_core_ua.h"
46 extern struct se_device *g_lun0_dev;
48 static DEFINE_SPINLOCK(tpg_lock);
49 static LIST_HEAD(tpg_list);
51 /* __core_tpg_get_initiator_node_acl():
53 * mutex_lock(&tpg->acl_node_mutex); must be held when calling
55 struct se_node_acl *__core_tpg_get_initiator_node_acl(
56 struct se_portal_group *tpg,
57 const char *initiatorname)
59 struct se_node_acl *acl;
61 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
62 if (!strcmp(acl->initiatorname, initiatorname))
63 return acl;
66 return NULL;
69 /* core_tpg_get_initiator_node_acl():
73 struct se_node_acl *core_tpg_get_initiator_node_acl(
74 struct se_portal_group *tpg,
75 unsigned char *initiatorname)
77 struct se_node_acl *acl;
79 * Obtain se_node_acl->acl_kref using fabric driver provided
80 * initiatorname[] during node acl endpoint lookup driven by
81 * new se_session login.
83 * The reference is held until se_session shutdown -> release
84 * occurs via fabric driver invoked transport_deregister_session()
85 * or transport_free_session() code.
87 mutex_lock(&tpg->acl_node_mutex);
88 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
89 if (acl) {
90 if (!kref_get_unless_zero(&acl->acl_kref))
91 acl = NULL;
93 mutex_unlock(&tpg->acl_node_mutex);
95 return acl;
97 EXPORT_SYMBOL(core_tpg_get_initiator_node_acl);
99 void core_allocate_nexus_loss_ua(
100 struct se_node_acl *nacl)
102 struct se_dev_entry *deve;
104 if (!nacl)
105 return;
107 rcu_read_lock();
108 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
109 core_scsi3_ua_allocate(deve, 0x29,
110 ASCQ_29H_NEXUS_LOSS_OCCURRED);
111 rcu_read_unlock();
113 EXPORT_SYMBOL(core_allocate_nexus_loss_ua);
115 /* core_tpg_add_node_to_devs():
119 void core_tpg_add_node_to_devs(
120 struct se_node_acl *acl,
121 struct se_portal_group *tpg,
122 struct se_lun *lun_orig)
124 u32 lun_access = 0;
125 struct se_lun *lun;
126 struct se_device *dev;
128 mutex_lock(&tpg->tpg_lun_mutex);
129 hlist_for_each_entry_rcu(lun, &tpg->tpg_lun_hlist, link) {
130 if (lun_orig && lun != lun_orig)
131 continue;
133 dev = rcu_dereference_check(lun->lun_se_dev,
134 lockdep_is_held(&tpg->tpg_lun_mutex));
136 * By default in LIO-Target $FABRIC_MOD,
137 * demo_mode_write_protect is ON, or READ_ONLY;
139 if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
140 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
141 } else {
143 * Allow only optical drives to issue R/W in default RO
144 * demo mode.
146 if (dev->transport->get_device_type(dev) == TYPE_DISK)
147 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
148 else
149 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
152 pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%llu] - Adding %s"
153 " access for LUN in Demo Mode\n",
154 tpg->se_tpg_tfo->get_fabric_name(),
155 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
156 (lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ?
157 "READ-WRITE" : "READ-ONLY");
159 core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun,
160 lun_access, acl, tpg);
162 * Check to see if there are any existing persistent reservation
163 * APTPL pre-registrations that need to be enabled for this dynamic
164 * LUN ACL now..
166 core_scsi3_check_aptpl_registration(dev, tpg, lun, acl,
167 lun->unpacked_lun);
169 mutex_unlock(&tpg->tpg_lun_mutex);
172 /* core_set_queue_depth_for_node():
176 static int core_set_queue_depth_for_node(
177 struct se_portal_group *tpg,
178 struct se_node_acl *acl)
180 if (!acl->queue_depth) {
181 pr_err("Queue depth for %s Initiator Node: %s is 0,"
182 "defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(),
183 acl->initiatorname);
184 acl->queue_depth = 1;
187 return 0;
190 static struct se_node_acl *target_alloc_node_acl(struct se_portal_group *tpg,
191 const unsigned char *initiatorname)
193 struct se_node_acl *acl;
195 acl = kzalloc(max(sizeof(*acl), tpg->se_tpg_tfo->node_acl_size),
196 GFP_KERNEL);
197 if (!acl)
198 return NULL;
200 INIT_LIST_HEAD(&acl->acl_list);
201 INIT_LIST_HEAD(&acl->acl_sess_list);
202 INIT_HLIST_HEAD(&acl->lun_entry_hlist);
203 kref_init(&acl->acl_kref);
204 init_completion(&acl->acl_free_comp);
205 spin_lock_init(&acl->nacl_sess_lock);
206 mutex_init(&acl->lun_entry_mutex);
207 atomic_set(&acl->acl_pr_ref_count, 0);
208 if (tpg->se_tpg_tfo->tpg_get_default_depth)
209 acl->queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
210 else
211 acl->queue_depth = 1;
212 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
213 acl->se_tpg = tpg;
214 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
216 tpg->se_tpg_tfo->set_default_node_attributes(acl);
218 if (core_set_queue_depth_for_node(tpg, acl) < 0)
219 goto out_free_acl;
221 return acl;
223 out_free_acl:
224 kfree(acl);
225 return NULL;
228 static void target_add_node_acl(struct se_node_acl *acl)
230 struct se_portal_group *tpg = acl->se_tpg;
232 mutex_lock(&tpg->acl_node_mutex);
233 list_add_tail(&acl->acl_list, &tpg->acl_node_list);
234 tpg->num_node_acls++;
235 mutex_unlock(&tpg->acl_node_mutex);
237 pr_debug("%s_TPG[%hu] - Added %s ACL with TCQ Depth: %d for %s"
238 " Initiator Node: %s\n",
239 tpg->se_tpg_tfo->get_fabric_name(),
240 tpg->se_tpg_tfo->tpg_get_tag(tpg),
241 acl->dynamic_node_acl ? "DYNAMIC" : "",
242 acl->queue_depth,
243 tpg->se_tpg_tfo->get_fabric_name(),
244 acl->initiatorname);
247 bool target_tpg_has_node_acl(struct se_portal_group *tpg,
248 const char *initiatorname)
250 struct se_node_acl *acl;
251 bool found = false;
253 mutex_lock(&tpg->acl_node_mutex);
254 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
255 if (!strcmp(acl->initiatorname, initiatorname)) {
256 found = true;
257 break;
260 mutex_unlock(&tpg->acl_node_mutex);
262 return found;
264 EXPORT_SYMBOL(target_tpg_has_node_acl);
266 struct se_node_acl *core_tpg_check_initiator_node_acl(
267 struct se_portal_group *tpg,
268 unsigned char *initiatorname)
270 struct se_node_acl *acl;
272 acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
273 if (acl)
274 return acl;
276 if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg))
277 return NULL;
279 acl = target_alloc_node_acl(tpg, initiatorname);
280 if (!acl)
281 return NULL;
283 * When allocating a dynamically generated node_acl, go ahead
284 * and take the extra kref now before returning to the fabric
285 * driver caller.
287 * Note this reference will be released at session shutdown
288 * time within transport_free_session() code.
290 kref_get(&acl->acl_kref);
291 acl->dynamic_node_acl = 1;
294 * Here we only create demo-mode MappedLUNs from the active
295 * TPG LUNs if the fabric is not explicitly asking for
296 * tpg_check_demo_mode_login_only() == 1.
298 if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only == NULL) ||
299 (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) != 1))
300 core_tpg_add_node_to_devs(acl, tpg, NULL);
302 target_add_node_acl(acl);
303 return acl;
305 EXPORT_SYMBOL(core_tpg_check_initiator_node_acl);
307 void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
309 while (atomic_read(&nacl->acl_pr_ref_count) != 0)
310 cpu_relax();
313 struct se_node_acl *core_tpg_add_initiator_node_acl(
314 struct se_portal_group *tpg,
315 const char *initiatorname)
317 struct se_node_acl *acl;
319 mutex_lock(&tpg->acl_node_mutex);
320 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
321 if (acl) {
322 if (acl->dynamic_node_acl) {
323 acl->dynamic_node_acl = 0;
324 pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
325 " for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
326 tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
327 mutex_unlock(&tpg->acl_node_mutex);
328 return acl;
331 pr_err("ACL entry for %s Initiator"
332 " Node %s already exists for TPG %u, ignoring"
333 " request.\n", tpg->se_tpg_tfo->get_fabric_name(),
334 initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
335 mutex_unlock(&tpg->acl_node_mutex);
336 return ERR_PTR(-EEXIST);
338 mutex_unlock(&tpg->acl_node_mutex);
340 acl = target_alloc_node_acl(tpg, initiatorname);
341 if (!acl)
342 return ERR_PTR(-ENOMEM);
344 target_add_node_acl(acl);
345 return acl;
348 void core_tpg_del_initiator_node_acl(struct se_node_acl *acl)
350 struct se_portal_group *tpg = acl->se_tpg;
351 LIST_HEAD(sess_list);
352 struct se_session *sess, *sess_tmp;
353 unsigned long flags;
354 int rc;
356 mutex_lock(&tpg->acl_node_mutex);
357 if (acl->dynamic_node_acl) {
358 acl->dynamic_node_acl = 0;
360 list_del(&acl->acl_list);
361 tpg->num_node_acls--;
362 mutex_unlock(&tpg->acl_node_mutex);
364 spin_lock_irqsave(&acl->nacl_sess_lock, flags);
365 acl->acl_stop = 1;
367 list_for_each_entry_safe(sess, sess_tmp, &acl->acl_sess_list,
368 sess_acl_list) {
369 if (sess->sess_tearing_down != 0)
370 continue;
372 target_get_session(sess);
373 list_move(&sess->sess_acl_list, &sess_list);
375 spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
377 list_for_each_entry_safe(sess, sess_tmp, &sess_list, sess_acl_list) {
378 list_del(&sess->sess_acl_list);
380 rc = tpg->se_tpg_tfo->shutdown_session(sess);
381 target_put_session(sess);
382 if (!rc)
383 continue;
384 target_put_session(sess);
386 target_put_nacl(acl);
388 * Wait for last target_put_nacl() to complete in target_complete_nacl()
389 * for active fabric session transport_deregister_session() callbacks.
391 wait_for_completion(&acl->acl_free_comp);
393 core_tpg_wait_for_nacl_pr_ref(acl);
394 core_free_device_list_for_node(acl, tpg);
396 pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
397 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
398 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
399 tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname);
401 kfree(acl);
404 /* core_tpg_set_initiator_node_queue_depth():
408 int core_tpg_set_initiator_node_queue_depth(
409 struct se_portal_group *tpg,
410 unsigned char *initiatorname,
411 u32 queue_depth,
412 int force)
414 struct se_session *sess, *init_sess = NULL;
415 struct se_node_acl *acl;
416 unsigned long flags;
417 int dynamic_acl = 0;
419 mutex_lock(&tpg->acl_node_mutex);
420 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
421 if (!acl) {
422 pr_err("Access Control List entry for %s Initiator"
423 " Node %s does not exists for TPG %hu, ignoring"
424 " request.\n", tpg->se_tpg_tfo->get_fabric_name(),
425 initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
426 mutex_unlock(&tpg->acl_node_mutex);
427 return -ENODEV;
429 if (acl->dynamic_node_acl) {
430 acl->dynamic_node_acl = 0;
431 dynamic_acl = 1;
433 mutex_unlock(&tpg->acl_node_mutex);
435 spin_lock_irqsave(&tpg->session_lock, flags);
436 list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) {
437 if (sess->se_node_acl != acl)
438 continue;
440 if (!force) {
441 pr_err("Unable to change queue depth for %s"
442 " Initiator Node: %s while session is"
443 " operational. To forcefully change the queue"
444 " depth and force session reinstatement"
445 " use the \"force=1\" parameter.\n",
446 tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
447 spin_unlock_irqrestore(&tpg->session_lock, flags);
449 mutex_lock(&tpg->acl_node_mutex);
450 if (dynamic_acl)
451 acl->dynamic_node_acl = 1;
452 mutex_unlock(&tpg->acl_node_mutex);
453 return -EEXIST;
456 * Determine if the session needs to be closed by our context.
458 if (!tpg->se_tpg_tfo->shutdown_session(sess))
459 continue;
461 init_sess = sess;
462 break;
466 * User has requested to change the queue depth for a Initiator Node.
467 * Change the value in the Node's struct se_node_acl, and call
468 * core_set_queue_depth_for_node() to add the requested queue depth.
470 * Finally call tpg->se_tpg_tfo->close_session() to force session
471 * reinstatement to occur if there is an active session for the
472 * $FABRIC_MOD Initiator Node in question.
474 acl->queue_depth = queue_depth;
476 if (core_set_queue_depth_for_node(tpg, acl) < 0) {
477 spin_unlock_irqrestore(&tpg->session_lock, flags);
479 * Force session reinstatement if
480 * core_set_queue_depth_for_node() failed, because we assume
481 * the $FABRIC_MOD has already the set session reinstatement
482 * bit from tpg->se_tpg_tfo->shutdown_session() called above.
484 if (init_sess)
485 tpg->se_tpg_tfo->close_session(init_sess);
487 mutex_lock(&tpg->acl_node_mutex);
488 if (dynamic_acl)
489 acl->dynamic_node_acl = 1;
490 mutex_unlock(&tpg->acl_node_mutex);
491 return -EINVAL;
493 spin_unlock_irqrestore(&tpg->session_lock, flags);
495 * If the $FABRIC_MOD session for the Initiator Node ACL exists,
496 * forcefully shutdown the $FABRIC_MOD session/nexus.
498 if (init_sess)
499 tpg->se_tpg_tfo->close_session(init_sess);
501 pr_debug("Successfully changed queue depth to: %d for Initiator"
502 " Node: %s on %s Target Portal Group: %u\n", queue_depth,
503 initiatorname, tpg->se_tpg_tfo->get_fabric_name(),
504 tpg->se_tpg_tfo->tpg_get_tag(tpg));
506 mutex_lock(&tpg->acl_node_mutex);
507 if (dynamic_acl)
508 acl->dynamic_node_acl = 1;
509 mutex_unlock(&tpg->acl_node_mutex);
511 return 0;
513 EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
515 /* core_tpg_set_initiator_node_tag():
517 * Initiator nodeacl tags are not used internally, but may be used by
518 * userspace to emulate aliases or groups.
519 * Returns length of newly-set tag or -EINVAL.
521 int core_tpg_set_initiator_node_tag(
522 struct se_portal_group *tpg,
523 struct se_node_acl *acl,
524 const char *new_tag)
526 if (strlen(new_tag) >= MAX_ACL_TAG_SIZE)
527 return -EINVAL;
529 if (!strncmp("NULL", new_tag, 4)) {
530 acl->acl_tag[0] = '\0';
531 return 0;
534 return snprintf(acl->acl_tag, MAX_ACL_TAG_SIZE, "%s", new_tag);
536 EXPORT_SYMBOL(core_tpg_set_initiator_node_tag);
538 static void core_tpg_lun_ref_release(struct percpu_ref *ref)
540 struct se_lun *lun = container_of(ref, struct se_lun, lun_ref);
542 complete(&lun->lun_shutdown_comp);
545 int core_tpg_register(
546 struct se_wwn *se_wwn,
547 struct se_portal_group *se_tpg,
548 int proto_id)
550 int ret;
552 if (!se_tpg)
553 return -EINVAL;
555 * For the typical case where core_tpg_register() is called by a
556 * fabric driver from target_core_fabric_ops->fabric_make_tpg()
557 * configfs context, use the original tf_ops pointer already saved
558 * by target-core in target_fabric_make_wwn().
560 * Otherwise, for special cases like iscsi-target discovery TPGs
561 * the caller is responsible for setting ->se_tpg_tfo ahead of
562 * calling core_tpg_register().
564 if (se_wwn)
565 se_tpg->se_tpg_tfo = se_wwn->wwn_tf->tf_ops;
567 if (!se_tpg->se_tpg_tfo) {
568 pr_err("Unable to locate se_tpg->se_tpg_tfo pointer\n");
569 return -EINVAL;
572 INIT_HLIST_HEAD(&se_tpg->tpg_lun_hlist);
573 se_tpg->proto_id = proto_id;
574 se_tpg->se_tpg_wwn = se_wwn;
575 atomic_set(&se_tpg->tpg_pr_ref_count, 0);
576 INIT_LIST_HEAD(&se_tpg->acl_node_list);
577 INIT_LIST_HEAD(&se_tpg->se_tpg_node);
578 INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
579 spin_lock_init(&se_tpg->session_lock);
580 mutex_init(&se_tpg->tpg_lun_mutex);
581 mutex_init(&se_tpg->acl_node_mutex);
583 if (se_tpg->proto_id >= 0) {
584 se_tpg->tpg_virt_lun0 = core_tpg_alloc_lun(se_tpg, 0);
585 if (IS_ERR(se_tpg->tpg_virt_lun0))
586 return PTR_ERR(se_tpg->tpg_virt_lun0);
588 ret = core_tpg_add_lun(se_tpg, se_tpg->tpg_virt_lun0,
589 TRANSPORT_LUNFLAGS_READ_ONLY, g_lun0_dev);
590 if (ret < 0) {
591 kfree(se_tpg->tpg_virt_lun0);
592 return ret;
596 spin_lock_bh(&tpg_lock);
597 list_add_tail(&se_tpg->se_tpg_node, &tpg_list);
598 spin_unlock_bh(&tpg_lock);
600 pr_debug("TARGET_CORE[%s]: Allocated portal_group for endpoint: %s, "
601 "Proto: %d, Portal Tag: %u\n", se_tpg->se_tpg_tfo->get_fabric_name(),
602 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) ?
603 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) : NULL,
604 se_tpg->proto_id, se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
606 return 0;
608 EXPORT_SYMBOL(core_tpg_register);
610 int core_tpg_deregister(struct se_portal_group *se_tpg)
612 const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
613 struct se_node_acl *nacl, *nacl_tmp;
614 LIST_HEAD(node_list);
616 pr_debug("TARGET_CORE[%s]: Deallocating portal_group for endpoint: %s, "
617 "Proto: %d, Portal Tag: %u\n", tfo->get_fabric_name(),
618 tfo->tpg_get_wwn(se_tpg) ? tfo->tpg_get_wwn(se_tpg) : NULL,
619 se_tpg->proto_id, tfo->tpg_get_tag(se_tpg));
621 spin_lock_bh(&tpg_lock);
622 list_del(&se_tpg->se_tpg_node);
623 spin_unlock_bh(&tpg_lock);
625 while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
626 cpu_relax();
628 mutex_lock(&se_tpg->acl_node_mutex);
629 list_splice_init(&se_tpg->acl_node_list, &node_list);
630 mutex_unlock(&se_tpg->acl_node_mutex);
632 * Release any remaining demo-mode generated se_node_acl that have
633 * not been released because of TFO->tpg_check_demo_mode_cache() == 1
634 * in transport_deregister_session().
636 list_for_each_entry_safe(nacl, nacl_tmp, &node_list, acl_list) {
637 list_del(&nacl->acl_list);
638 se_tpg->num_node_acls--;
640 core_tpg_wait_for_nacl_pr_ref(nacl);
641 core_free_device_list_for_node(nacl, se_tpg);
642 kfree(nacl);
645 if (se_tpg->proto_id >= 0) {
646 core_tpg_remove_lun(se_tpg, se_tpg->tpg_virt_lun0);
647 kfree_rcu(se_tpg->tpg_virt_lun0, rcu_head);
650 return 0;
652 EXPORT_SYMBOL(core_tpg_deregister);
654 struct se_lun *core_tpg_alloc_lun(
655 struct se_portal_group *tpg,
656 u64 unpacked_lun)
658 struct se_lun *lun;
660 lun = kzalloc(sizeof(*lun), GFP_KERNEL);
661 if (!lun) {
662 pr_err("Unable to allocate se_lun memory\n");
663 return ERR_PTR(-ENOMEM);
665 lun->unpacked_lun = unpacked_lun;
666 lun->lun_link_magic = SE_LUN_LINK_MAGIC;
667 atomic_set(&lun->lun_acl_count, 0);
668 init_completion(&lun->lun_ref_comp);
669 init_completion(&lun->lun_shutdown_comp);
670 INIT_LIST_HEAD(&lun->lun_deve_list);
671 INIT_LIST_HEAD(&lun->lun_dev_link);
672 atomic_set(&lun->lun_tg_pt_secondary_offline, 0);
673 spin_lock_init(&lun->lun_deve_lock);
674 mutex_init(&lun->lun_tg_pt_md_mutex);
675 INIT_LIST_HEAD(&lun->lun_tg_pt_gp_link);
676 spin_lock_init(&lun->lun_tg_pt_gp_lock);
677 lun->lun_tpg = tpg;
679 return lun;
682 int core_tpg_add_lun(
683 struct se_portal_group *tpg,
684 struct se_lun *lun,
685 u32 lun_access,
686 struct se_device *dev)
688 int ret;
690 ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release, 0,
691 GFP_KERNEL);
692 if (ret < 0)
693 goto out;
695 ret = core_alloc_rtpi(lun, dev);
696 if (ret)
697 goto out_kill_ref;
699 if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) &&
700 !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
701 target_attach_tg_pt_gp(lun, dev->t10_alua.default_tg_pt_gp);
703 mutex_lock(&tpg->tpg_lun_mutex);
705 spin_lock(&dev->se_port_lock);
706 lun->lun_index = dev->dev_index;
707 rcu_assign_pointer(lun->lun_se_dev, dev);
708 dev->export_count++;
709 list_add_tail(&lun->lun_dev_link, &dev->dev_sep_list);
710 spin_unlock(&dev->se_port_lock);
712 if (dev->dev_flags & DF_READ_ONLY)
713 lun->lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
714 else
715 lun->lun_access = lun_access;
716 if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
717 hlist_add_head_rcu(&lun->link, &tpg->tpg_lun_hlist);
718 mutex_unlock(&tpg->tpg_lun_mutex);
720 return 0;
722 out_kill_ref:
723 percpu_ref_exit(&lun->lun_ref);
724 out:
725 return ret;
728 void core_tpg_remove_lun(
729 struct se_portal_group *tpg,
730 struct se_lun *lun)
733 * rcu_dereference_raw protected by se_lun->lun_group symlink
734 * reference to se_device->dev_group.
736 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
738 core_clear_lun_from_tpg(lun, tpg);
740 * Wait for any active I/O references to percpu se_lun->lun_ref to
741 * be released. Also, se_lun->lun_ref is now used by PR and ALUA
742 * logic when referencing a remote target port during ALL_TGT_PT=1
743 * and generating UNIT_ATTENTIONs for ALUA access state transition.
745 transport_clear_lun_ref(lun);
747 mutex_lock(&tpg->tpg_lun_mutex);
748 if (lun->lun_se_dev) {
749 target_detach_tg_pt_gp(lun);
751 spin_lock(&dev->se_port_lock);
752 list_del(&lun->lun_dev_link);
753 dev->export_count--;
754 rcu_assign_pointer(lun->lun_se_dev, NULL);
755 spin_unlock(&dev->se_port_lock);
757 if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
758 hlist_del_rcu(&lun->link);
759 mutex_unlock(&tpg->tpg_lun_mutex);
761 percpu_ref_exit(&lun->lun_ref);