1 /*******************************************************************************
2 * Filename: target_core_device.c (based on iscsi_target_device.c)
4 * This file contains the TCM Virtual Device and Disk Transport
5 * agnostic related functions.
7 * (c) Copyright 2003-2013 Datera, Inc.
9 * Nicholas A. Bellinger <nab@kernel.org>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
25 ******************************************************************************/
27 #include <linux/net.h>
28 #include <linux/string.h>
29 #include <linux/delay.h>
30 #include <linux/timer.h>
31 #include <linux/slab.h>
32 #include <linux/spinlock.h>
33 #include <linux/kthread.h>
35 #include <linux/export.h>
36 #include <asm/unaligned.h>
39 #include <scsi/scsi_common.h>
40 #include <scsi/scsi_proto.h>
42 #include <target/target_core_base.h>
43 #include <target/target_core_backend.h>
44 #include <target/target_core_fabric.h>
46 #include "target_core_internal.h"
47 #include "target_core_alua.h"
48 #include "target_core_pr.h"
49 #include "target_core_ua.h"
51 DEFINE_MUTEX(g_device_mutex
);
52 LIST_HEAD(g_device_list
);
54 static struct se_hba
*lun0_hba
;
55 /* not static, needed by tpg.c */
56 struct se_device
*g_lun0_dev
;
59 transport_lookup_cmd_lun(struct se_cmd
*se_cmd
, u64 unpacked_lun
)
61 struct se_lun
*se_lun
= NULL
;
62 struct se_session
*se_sess
= se_cmd
->se_sess
;
63 struct se_node_acl
*nacl
= se_sess
->se_node_acl
;
64 struct se_dev_entry
*deve
;
65 sense_reason_t ret
= TCM_NO_SENSE
;
68 deve
= target_nacl_find_deve(nacl
, unpacked_lun
);
70 atomic_long_inc(&deve
->total_cmds
);
72 if (se_cmd
->data_direction
== DMA_TO_DEVICE
)
73 atomic_long_add(se_cmd
->data_length
,
75 else if (se_cmd
->data_direction
== DMA_FROM_DEVICE
)
76 atomic_long_add(se_cmd
->data_length
,
79 se_lun
= rcu_dereference(deve
->se_lun
);
80 se_cmd
->se_lun
= rcu_dereference(deve
->se_lun
);
81 se_cmd
->pr_res_key
= deve
->pr_res_key
;
82 se_cmd
->orig_fe_lun
= unpacked_lun
;
83 se_cmd
->se_cmd_flags
|= SCF_SE_LUN_CMD
;
85 percpu_ref_get(&se_lun
->lun_ref
);
86 se_cmd
->lun_ref_active
= true;
88 if ((se_cmd
->data_direction
== DMA_TO_DEVICE
) &&
89 deve
->lun_access_ro
) {
90 pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
91 " Access for 0x%08llx\n",
92 se_cmd
->se_tfo
->get_fabric_name(),
95 ret
= TCM_WRITE_PROTECTED
;
103 * Use the se_portal_group->tpg_virt_lun0 to allow for
104 * REPORT_LUNS, et al to be returned when no active
105 * MappedLUN=0 exists for this Initiator Port.
107 if (unpacked_lun
!= 0) {
108 pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
109 " Access for 0x%08llx\n",
110 se_cmd
->se_tfo
->get_fabric_name(),
112 return TCM_NON_EXISTENT_LUN
;
115 se_lun
= se_sess
->se_tpg
->tpg_virt_lun0
;
116 se_cmd
->se_lun
= se_sess
->se_tpg
->tpg_virt_lun0
;
117 se_cmd
->orig_fe_lun
= 0;
118 se_cmd
->se_cmd_flags
|= SCF_SE_LUN_CMD
;
120 percpu_ref_get(&se_lun
->lun_ref
);
121 se_cmd
->lun_ref_active
= true;
124 * Force WRITE PROTECT for virtual LUN 0
126 if ((se_cmd
->data_direction
!= DMA_FROM_DEVICE
) &&
127 (se_cmd
->data_direction
!= DMA_NONE
)) {
128 ret
= TCM_WRITE_PROTECTED
;
133 * RCU reference protected by percpu se_lun->lun_ref taken above that
134 * must drop to zero (including initial reference) before this se_lun
135 * pointer can be kfree_rcu() by the final se_lun->lun_group put via
136 * target_core_fabric_configfs.c:target_fabric_port_release
139 se_cmd
->se_dev
= rcu_dereference_raw(se_lun
->lun_se_dev
);
140 atomic_long_inc(&se_cmd
->se_dev
->num_cmds
);
142 if (se_cmd
->data_direction
== DMA_TO_DEVICE
)
143 atomic_long_add(se_cmd
->data_length
,
144 &se_cmd
->se_dev
->write_bytes
);
145 else if (se_cmd
->data_direction
== DMA_FROM_DEVICE
)
146 atomic_long_add(se_cmd
->data_length
,
147 &se_cmd
->se_dev
->read_bytes
);
151 EXPORT_SYMBOL(transport_lookup_cmd_lun
);
153 int transport_lookup_tmr_lun(struct se_cmd
*se_cmd
, u64 unpacked_lun
)
155 struct se_dev_entry
*deve
;
156 struct se_lun
*se_lun
= NULL
;
157 struct se_session
*se_sess
= se_cmd
->se_sess
;
158 struct se_node_acl
*nacl
= se_sess
->se_node_acl
;
159 struct se_tmr_req
*se_tmr
= se_cmd
->se_tmr_req
;
163 deve
= target_nacl_find_deve(nacl
, unpacked_lun
);
165 se_tmr
->tmr_lun
= rcu_dereference(deve
->se_lun
);
166 se_cmd
->se_lun
= rcu_dereference(deve
->se_lun
);
167 se_lun
= rcu_dereference(deve
->se_lun
);
168 se_cmd
->pr_res_key
= deve
->pr_res_key
;
169 se_cmd
->orig_fe_lun
= unpacked_lun
;
174 pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
175 " Access for 0x%08llx\n",
176 se_cmd
->se_tfo
->get_fabric_name(),
181 * XXX: Add percpu se_lun->lun_ref reference count for TMR
183 se_cmd
->se_dev
= rcu_dereference_raw(se_lun
->lun_se_dev
);
184 se_tmr
->tmr_dev
= rcu_dereference_raw(se_lun
->lun_se_dev
);
186 spin_lock_irqsave(&se_tmr
->tmr_dev
->se_tmr_lock
, flags
);
187 list_add_tail(&se_tmr
->tmr_list
, &se_tmr
->tmr_dev
->dev_tmr_list
);
188 spin_unlock_irqrestore(&se_tmr
->tmr_dev
->se_tmr_lock
, flags
);
192 EXPORT_SYMBOL(transport_lookup_tmr_lun
);
194 bool target_lun_is_rdonly(struct se_cmd
*cmd
)
196 struct se_session
*se_sess
= cmd
->se_sess
;
197 struct se_dev_entry
*deve
;
201 deve
= target_nacl_find_deve(se_sess
->se_node_acl
, cmd
->orig_fe_lun
);
202 ret
= deve
&& deve
->lun_access_ro
;
207 EXPORT_SYMBOL(target_lun_is_rdonly
);
210 * This function is called from core_scsi3_emulate_pro_register_and_move()
211 * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_kref
212 * when a matching rtpi is found.
214 struct se_dev_entry
*core_get_se_deve_from_rtpi(
215 struct se_node_acl
*nacl
,
218 struct se_dev_entry
*deve
;
220 struct se_portal_group
*tpg
= nacl
->se_tpg
;
223 hlist_for_each_entry_rcu(deve
, &nacl
->lun_entry_hlist
, link
) {
224 lun
= rcu_dereference(deve
->se_lun
);
226 pr_err("%s device entries device pointer is"
227 " NULL, but Initiator has access.\n",
228 tpg
->se_tpg_tfo
->get_fabric_name());
231 if (lun
->lun_rtpi
!= rtpi
)
234 kref_get(&deve
->pr_kref
);
244 void core_free_device_list_for_node(
245 struct se_node_acl
*nacl
,
246 struct se_portal_group
*tpg
)
248 struct se_dev_entry
*deve
;
250 mutex_lock(&nacl
->lun_entry_mutex
);
251 hlist_for_each_entry_rcu(deve
, &nacl
->lun_entry_hlist
, link
) {
252 struct se_lun
*lun
= rcu_dereference_check(deve
->se_lun
,
253 lockdep_is_held(&nacl
->lun_entry_mutex
));
254 core_disable_device_list_for_node(lun
, deve
, nacl
, tpg
);
256 mutex_unlock(&nacl
->lun_entry_mutex
);
259 void core_update_device_list_access(
262 struct se_node_acl
*nacl
)
264 struct se_dev_entry
*deve
;
266 mutex_lock(&nacl
->lun_entry_mutex
);
267 deve
= target_nacl_find_deve(nacl
, mapped_lun
);
269 deve
->lun_access_ro
= lun_access_ro
;
270 mutex_unlock(&nacl
->lun_entry_mutex
);
274 * Called with rcu_read_lock or nacl->device_list_lock held.
276 struct se_dev_entry
*target_nacl_find_deve(struct se_node_acl
*nacl
, u64 mapped_lun
)
278 struct se_dev_entry
*deve
;
280 hlist_for_each_entry_rcu(deve
, &nacl
->lun_entry_hlist
, link
)
281 if (deve
->mapped_lun
== mapped_lun
)
286 EXPORT_SYMBOL(target_nacl_find_deve
);
288 void target_pr_kref_release(struct kref
*kref
)
290 struct se_dev_entry
*deve
= container_of(kref
, struct se_dev_entry
,
292 complete(&deve
->pr_comp
);
296 target_luns_data_has_changed(struct se_node_acl
*nacl
, struct se_dev_entry
*new,
299 struct se_dev_entry
*tmp
;
302 hlist_for_each_entry_rcu(tmp
, &nacl
->lun_entry_hlist
, link
) {
303 if (skip_new
&& tmp
== new)
305 core_scsi3_ua_allocate(tmp
, 0x3F,
306 ASCQ_3FH_REPORTED_LUNS_DATA_HAS_CHANGED
);
311 int core_enable_device_list_for_node(
313 struct se_lun_acl
*lun_acl
,
316 struct se_node_acl
*nacl
,
317 struct se_portal_group
*tpg
)
319 struct se_dev_entry
*orig
, *new;
321 new = kzalloc(sizeof(*new), GFP_KERNEL
);
323 pr_err("Unable to allocate se_dev_entry memory\n");
327 atomic_set(&new->ua_count
, 0);
328 spin_lock_init(&new->ua_lock
);
329 INIT_LIST_HEAD(&new->ua_list
);
330 INIT_LIST_HEAD(&new->lun_link
);
332 new->mapped_lun
= mapped_lun
;
333 kref_init(&new->pr_kref
);
334 init_completion(&new->pr_comp
);
336 new->lun_access_ro
= lun_access_ro
;
337 new->creation_time
= get_jiffies_64();
340 mutex_lock(&nacl
->lun_entry_mutex
);
341 orig
= target_nacl_find_deve(nacl
, mapped_lun
);
342 if (orig
&& orig
->se_lun
) {
343 struct se_lun
*orig_lun
= rcu_dereference_check(orig
->se_lun
,
344 lockdep_is_held(&nacl
->lun_entry_mutex
));
346 if (orig_lun
!= lun
) {
347 pr_err("Existing orig->se_lun doesn't match new lun"
348 " for dynamic -> explicit NodeACL conversion:"
349 " %s\n", nacl
->initiatorname
);
350 mutex_unlock(&nacl
->lun_entry_mutex
);
354 BUG_ON(orig
->se_lun_acl
!= NULL
);
356 rcu_assign_pointer(new->se_lun
, lun
);
357 rcu_assign_pointer(new->se_lun_acl
, lun_acl
);
358 hlist_del_rcu(&orig
->link
);
359 hlist_add_head_rcu(&new->link
, &nacl
->lun_entry_hlist
);
360 mutex_unlock(&nacl
->lun_entry_mutex
);
362 spin_lock(&lun
->lun_deve_lock
);
363 list_del(&orig
->lun_link
);
364 list_add_tail(&new->lun_link
, &lun
->lun_deve_list
);
365 spin_unlock(&lun
->lun_deve_lock
);
367 kref_put(&orig
->pr_kref
, target_pr_kref_release
);
368 wait_for_completion(&orig
->pr_comp
);
370 target_luns_data_has_changed(nacl
, new, true);
371 kfree_rcu(orig
, rcu_head
);
375 rcu_assign_pointer(new->se_lun
, lun
);
376 rcu_assign_pointer(new->se_lun_acl
, lun_acl
);
377 hlist_add_head_rcu(&new->link
, &nacl
->lun_entry_hlist
);
378 mutex_unlock(&nacl
->lun_entry_mutex
);
380 spin_lock(&lun
->lun_deve_lock
);
381 list_add_tail(&new->lun_link
, &lun
->lun_deve_list
);
382 spin_unlock(&lun
->lun_deve_lock
);
384 target_luns_data_has_changed(nacl
, new, true);
389 * Called with se_node_acl->lun_entry_mutex held.
391 void core_disable_device_list_for_node(
393 struct se_dev_entry
*orig
,
394 struct se_node_acl
*nacl
,
395 struct se_portal_group
*tpg
)
398 * rcu_dereference_raw protected by se_lun->lun_group symlink
399 * reference to se_device->dev_group.
401 struct se_device
*dev
= rcu_dereference_raw(lun
->lun_se_dev
);
403 * If the MappedLUN entry is being disabled, the entry in
404 * lun->lun_deve_list must be removed now before clearing the
405 * struct se_dev_entry pointers below as logic in
406 * core_alua_do_transition_tg_pt() depends on these being present.
408 * deve->se_lun_acl will be NULL for demo-mode created LUNs
409 * that have not been explicitly converted to MappedLUNs ->
410 * struct se_lun_acl, but we remove deve->lun_link from
411 * lun->lun_deve_list. This also means that active UAs and
412 * NodeACL context specific PR metadata for demo-mode
413 * MappedLUN *deve will be released below..
415 spin_lock(&lun
->lun_deve_lock
);
416 list_del(&orig
->lun_link
);
417 spin_unlock(&lun
->lun_deve_lock
);
419 * Disable struct se_dev_entry LUN ACL mapping
421 core_scsi3_ua_release_all(orig
);
423 hlist_del_rcu(&orig
->link
);
424 clear_bit(DEF_PR_REG_ACTIVE
, &orig
->deve_flags
);
425 orig
->lun_access_ro
= false;
426 orig
->creation_time
= 0;
427 orig
->attach_count
--;
429 * Before firing off RCU callback, wait for any in process SPEC_I_PT=1
430 * or REGISTER_AND_MOVE PR operation to complete.
432 kref_put(&orig
->pr_kref
, target_pr_kref_release
);
433 wait_for_completion(&orig
->pr_comp
);
435 rcu_assign_pointer(orig
->se_lun
, NULL
);
436 rcu_assign_pointer(orig
->se_lun_acl
, NULL
);
438 kfree_rcu(orig
, rcu_head
);
440 core_scsi3_free_pr_reg_from_nacl(dev
, nacl
);
441 target_luns_data_has_changed(nacl
, NULL
, false);
444 /* core_clear_lun_from_tpg():
448 void core_clear_lun_from_tpg(struct se_lun
*lun
, struct se_portal_group
*tpg
)
450 struct se_node_acl
*nacl
;
451 struct se_dev_entry
*deve
;
453 mutex_lock(&tpg
->acl_node_mutex
);
454 list_for_each_entry(nacl
, &tpg
->acl_node_list
, acl_list
) {
456 mutex_lock(&nacl
->lun_entry_mutex
);
457 hlist_for_each_entry_rcu(deve
, &nacl
->lun_entry_hlist
, link
) {
458 struct se_lun
*tmp_lun
= rcu_dereference_check(deve
->se_lun
,
459 lockdep_is_held(&nacl
->lun_entry_mutex
));
464 core_disable_device_list_for_node(lun
, deve
, nacl
, tpg
);
466 mutex_unlock(&nacl
->lun_entry_mutex
);
468 mutex_unlock(&tpg
->acl_node_mutex
);
471 int core_alloc_rtpi(struct se_lun
*lun
, struct se_device
*dev
)
475 spin_lock(&dev
->se_port_lock
);
476 if (dev
->export_count
== 0x0000ffff) {
477 pr_warn("Reached dev->dev_port_count =="
479 spin_unlock(&dev
->se_port_lock
);
484 * Allocate the next RELATIVE TARGET PORT IDENTIFIER for this struct se_device
485 * Here is the table from spc4r17 section 7.7.3.8.
487 * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
491 * 1h Relative port 1, historically known as port A
492 * 2h Relative port 2, historically known as port B
493 * 3h to FFFFh Relative port 3 through 65 535
495 lun
->lun_rtpi
= dev
->dev_rpti_counter
++;
499 list_for_each_entry(tmp
, &dev
->dev_sep_list
, lun_dev_link
) {
501 * Make sure RELATIVE TARGET PORT IDENTIFIER is unique
504 if (lun
->lun_rtpi
== tmp
->lun_rtpi
)
507 spin_unlock(&dev
->se_port_lock
);
512 static void se_release_vpd_for_dev(struct se_device
*dev
)
514 struct t10_vpd
*vpd
, *vpd_tmp
;
516 spin_lock(&dev
->t10_wwn
.t10_vpd_lock
);
517 list_for_each_entry_safe(vpd
, vpd_tmp
,
518 &dev
->t10_wwn
.t10_vpd_list
, vpd_list
) {
519 list_del(&vpd
->vpd_list
);
522 spin_unlock(&dev
->t10_wwn
.t10_vpd_lock
);
525 static u32
se_dev_align_max_sectors(u32 max_sectors
, u32 block_size
)
527 u32 aligned_max_sectors
;
530 * Limit max_sectors to a PAGE_SIZE aligned value for modern
531 * transport_allocate_data_tasks() operation.
533 alignment
= max(1ul, PAGE_SIZE
/ block_size
);
534 aligned_max_sectors
= rounddown(max_sectors
, alignment
);
536 if (max_sectors
!= aligned_max_sectors
)
537 pr_info("Rounding down aligned max_sectors from %u to %u\n",
538 max_sectors
, aligned_max_sectors
);
540 return aligned_max_sectors
;
543 int core_dev_add_lun(
544 struct se_portal_group
*tpg
,
545 struct se_device
*dev
,
550 rc
= core_tpg_add_lun(tpg
, lun
, false, dev
);
554 pr_debug("%s_TPG[%u]_LUN[%llu] - Activated %s Logical Unit from"
555 " CORE HBA: %u\n", tpg
->se_tpg_tfo
->get_fabric_name(),
556 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), lun
->unpacked_lun
,
557 tpg
->se_tpg_tfo
->get_fabric_name(), dev
->se_hba
->hba_id
);
559 * Update LUN maps for dynamically added initiators when
560 * generate_node_acl is enabled.
562 if (tpg
->se_tpg_tfo
->tpg_check_demo_mode(tpg
)) {
563 struct se_node_acl
*acl
;
565 mutex_lock(&tpg
->acl_node_mutex
);
566 list_for_each_entry(acl
, &tpg
->acl_node_list
, acl_list
) {
567 if (acl
->dynamic_node_acl
&&
568 (!tpg
->se_tpg_tfo
->tpg_check_demo_mode_login_only
||
569 !tpg
->se_tpg_tfo
->tpg_check_demo_mode_login_only(tpg
))) {
570 core_tpg_add_node_to_devs(acl
, tpg
, lun
);
573 mutex_unlock(&tpg
->acl_node_mutex
);
579 /* core_dev_del_lun():
583 void core_dev_del_lun(
584 struct se_portal_group
*tpg
,
587 pr_debug("%s_TPG[%u]_LUN[%llu] - Deactivating %s Logical Unit from"
588 " device object\n", tpg
->se_tpg_tfo
->get_fabric_name(),
589 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), lun
->unpacked_lun
,
590 tpg
->se_tpg_tfo
->get_fabric_name());
592 core_tpg_remove_lun(tpg
, lun
);
595 struct se_lun_acl
*core_dev_init_initiator_node_lun_acl(
596 struct se_portal_group
*tpg
,
597 struct se_node_acl
*nacl
,
601 struct se_lun_acl
*lacl
;
603 if (strlen(nacl
->initiatorname
) >= TRANSPORT_IQN_LEN
) {
604 pr_err("%s InitiatorName exceeds maximum size.\n",
605 tpg
->se_tpg_tfo
->get_fabric_name());
609 lacl
= kzalloc(sizeof(struct se_lun_acl
), GFP_KERNEL
);
611 pr_err("Unable to allocate memory for struct se_lun_acl.\n");
616 lacl
->mapped_lun
= mapped_lun
;
617 lacl
->se_lun_nacl
= nacl
;
622 int core_dev_add_initiator_node_lun_acl(
623 struct se_portal_group
*tpg
,
624 struct se_lun_acl
*lacl
,
628 struct se_node_acl
*nacl
= lacl
->se_lun_nacl
;
630 * rcu_dereference_raw protected by se_lun->lun_group symlink
631 * reference to se_device->dev_group.
633 struct se_device
*dev
= rcu_dereference_raw(lun
->lun_se_dev
);
638 if (lun
->lun_access_ro
)
639 lun_access_ro
= true;
643 if (core_enable_device_list_for_node(lun
, lacl
, lacl
->mapped_lun
,
644 lun_access_ro
, nacl
, tpg
) < 0)
647 pr_debug("%s_TPG[%hu]_LUN[%llu->%llu] - Added %s ACL for "
648 " InitiatorNode: %s\n", tpg
->se_tpg_tfo
->get_fabric_name(),
649 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), lun
->unpacked_lun
, lacl
->mapped_lun
,
650 lun_access_ro
? "RO" : "RW",
651 nacl
->initiatorname
);
653 * Check to see if there are any existing persistent reservation APTPL
654 * pre-registrations that need to be enabled for this LUN ACL..
656 core_scsi3_check_aptpl_registration(dev
, tpg
, lun
, nacl
,
661 int core_dev_del_initiator_node_lun_acl(
663 struct se_lun_acl
*lacl
)
665 struct se_portal_group
*tpg
= lun
->lun_tpg
;
666 struct se_node_acl
*nacl
;
667 struct se_dev_entry
*deve
;
669 nacl
= lacl
->se_lun_nacl
;
673 mutex_lock(&nacl
->lun_entry_mutex
);
674 deve
= target_nacl_find_deve(nacl
, lacl
->mapped_lun
);
676 core_disable_device_list_for_node(lun
, deve
, nacl
, tpg
);
677 mutex_unlock(&nacl
->lun_entry_mutex
);
679 pr_debug("%s_TPG[%hu]_LUN[%llu] - Removed ACL for"
680 " InitiatorNode: %s Mapped LUN: %llu\n",
681 tpg
->se_tpg_tfo
->get_fabric_name(),
682 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), lun
->unpacked_lun
,
683 nacl
->initiatorname
, lacl
->mapped_lun
);
688 void core_dev_free_initiator_node_lun_acl(
689 struct se_portal_group
*tpg
,
690 struct se_lun_acl
*lacl
)
692 pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
693 " Mapped LUN: %llu\n", tpg
->se_tpg_tfo
->get_fabric_name(),
694 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
),
695 tpg
->se_tpg_tfo
->get_fabric_name(),
696 lacl
->se_lun_nacl
->initiatorname
, lacl
->mapped_lun
);
701 static void scsi_dump_inquiry(struct se_device
*dev
)
703 struct t10_wwn
*wwn
= &dev
->t10_wwn
;
707 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
709 for (i
= 0; i
< 8; i
++)
710 if (wwn
->vendor
[i
] >= 0x20)
711 buf
[i
] = wwn
->vendor
[i
];
715 pr_debug(" Vendor: %s\n", buf
);
717 for (i
= 0; i
< 16; i
++)
718 if (wwn
->model
[i
] >= 0x20)
719 buf
[i
] = wwn
->model
[i
];
723 pr_debug(" Model: %s\n", buf
);
725 for (i
= 0; i
< 4; i
++)
726 if (wwn
->revision
[i
] >= 0x20)
727 buf
[i
] = wwn
->revision
[i
];
731 pr_debug(" Revision: %s\n", buf
);
733 device_type
= dev
->transport
->get_device_type(dev
);
734 pr_debug(" Type: %s ", scsi_device_type(device_type
));
737 struct se_device
*target_alloc_device(struct se_hba
*hba
, const char *name
)
739 struct se_device
*dev
;
740 struct se_lun
*xcopy_lun
;
742 dev
= hba
->backend
->ops
->alloc_device(hba
, name
);
746 dev
->dev_link_magic
= SE_DEV_LINK_MAGIC
;
748 dev
->transport
= hba
->backend
->ops
;
749 dev
->prot_length
= sizeof(struct t10_pi_tuple
);
750 dev
->hba_index
= hba
->hba_index
;
752 INIT_LIST_HEAD(&dev
->dev_list
);
753 INIT_LIST_HEAD(&dev
->dev_sep_list
);
754 INIT_LIST_HEAD(&dev
->dev_tmr_list
);
755 INIT_LIST_HEAD(&dev
->delayed_cmd_list
);
756 INIT_LIST_HEAD(&dev
->state_list
);
757 INIT_LIST_HEAD(&dev
->qf_cmd_list
);
758 INIT_LIST_HEAD(&dev
->g_dev_node
);
759 spin_lock_init(&dev
->execute_task_lock
);
760 spin_lock_init(&dev
->delayed_cmd_lock
);
761 spin_lock_init(&dev
->dev_reservation_lock
);
762 spin_lock_init(&dev
->se_port_lock
);
763 spin_lock_init(&dev
->se_tmr_lock
);
764 spin_lock_init(&dev
->qf_cmd_lock
);
765 sema_init(&dev
->caw_sem
, 1);
766 INIT_LIST_HEAD(&dev
->t10_wwn
.t10_vpd_list
);
767 spin_lock_init(&dev
->t10_wwn
.t10_vpd_lock
);
768 INIT_LIST_HEAD(&dev
->t10_pr
.registration_list
);
769 INIT_LIST_HEAD(&dev
->t10_pr
.aptpl_reg_list
);
770 spin_lock_init(&dev
->t10_pr
.registration_lock
);
771 spin_lock_init(&dev
->t10_pr
.aptpl_reg_lock
);
772 INIT_LIST_HEAD(&dev
->t10_alua
.tg_pt_gps_list
);
773 spin_lock_init(&dev
->t10_alua
.tg_pt_gps_lock
);
774 INIT_LIST_HEAD(&dev
->t10_alua
.lba_map_list
);
775 spin_lock_init(&dev
->t10_alua
.lba_map_lock
);
777 dev
->t10_wwn
.t10_dev
= dev
;
778 dev
->t10_alua
.t10_dev
= dev
;
780 dev
->dev_attrib
.da_dev
= dev
;
781 dev
->dev_attrib
.emulate_model_alias
= DA_EMULATE_MODEL_ALIAS
;
782 dev
->dev_attrib
.emulate_dpo
= 1;
783 dev
->dev_attrib
.emulate_fua_write
= 1;
784 dev
->dev_attrib
.emulate_fua_read
= 1;
785 dev
->dev_attrib
.emulate_write_cache
= DA_EMULATE_WRITE_CACHE
;
786 dev
->dev_attrib
.emulate_ua_intlck_ctrl
= DA_EMULATE_UA_INTLLCK_CTRL
;
787 dev
->dev_attrib
.emulate_tas
= DA_EMULATE_TAS
;
788 dev
->dev_attrib
.emulate_tpu
= DA_EMULATE_TPU
;
789 dev
->dev_attrib
.emulate_tpws
= DA_EMULATE_TPWS
;
790 dev
->dev_attrib
.emulate_caw
= DA_EMULATE_CAW
;
791 dev
->dev_attrib
.emulate_3pc
= DA_EMULATE_3PC
;
792 dev
->dev_attrib
.pi_prot_type
= TARGET_DIF_TYPE0_PROT
;
793 dev
->dev_attrib
.enforce_pr_isids
= DA_ENFORCE_PR_ISIDS
;
794 dev
->dev_attrib
.force_pr_aptpl
= DA_FORCE_PR_APTPL
;
795 dev
->dev_attrib
.is_nonrot
= DA_IS_NONROT
;
796 dev
->dev_attrib
.emulate_rest_reord
= DA_EMULATE_REST_REORD
;
797 dev
->dev_attrib
.max_unmap_lba_count
= DA_MAX_UNMAP_LBA_COUNT
;
798 dev
->dev_attrib
.max_unmap_block_desc_count
=
799 DA_MAX_UNMAP_BLOCK_DESC_COUNT
;
800 dev
->dev_attrib
.unmap_granularity
= DA_UNMAP_GRANULARITY_DEFAULT
;
801 dev
->dev_attrib
.unmap_granularity_alignment
=
802 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT
;
803 dev
->dev_attrib
.unmap_zeroes_data
=
804 DA_UNMAP_ZEROES_DATA_DEFAULT
;
805 dev
->dev_attrib
.max_write_same_len
= DA_MAX_WRITE_SAME_LEN
;
807 xcopy_lun
= &dev
->xcopy_lun
;
808 rcu_assign_pointer(xcopy_lun
->lun_se_dev
, dev
);
809 init_completion(&xcopy_lun
->lun_ref_comp
);
810 INIT_LIST_HEAD(&xcopy_lun
->lun_deve_list
);
811 INIT_LIST_HEAD(&xcopy_lun
->lun_dev_link
);
812 mutex_init(&xcopy_lun
->lun_tg_pt_md_mutex
);
813 xcopy_lun
->lun_tpg
= &xcopy_pt_tpg
;
819 * Check if the underlying struct block_device request_queue supports
820 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
821 * in ATA and we need to set TPE=1
823 bool target_configure_unmap_from_queue(struct se_dev_attrib
*attrib
,
824 struct request_queue
*q
, int block_size
)
826 if (!blk_queue_discard(q
))
829 attrib
->max_unmap_lba_count
= (q
->limits
.max_discard_sectors
<< 9) /
832 * Currently hardcoded to 1 in Linux/SCSI code..
834 attrib
->max_unmap_block_desc_count
= 1;
835 attrib
->unmap_granularity
= q
->limits
.discard_granularity
/ block_size
;
836 attrib
->unmap_granularity_alignment
= q
->limits
.discard_alignment
/
838 attrib
->unmap_zeroes_data
= q
->limits
.discard_zeroes_data
;
841 EXPORT_SYMBOL(target_configure_unmap_from_queue
);
844 * Convert from blocksize advertised to the initiator to the 512 byte
845 * units unconditionally used by the Linux block layer.
847 sector_t
target_to_linux_sector(struct se_device
*dev
, sector_t lb
)
849 switch (dev
->dev_attrib
.block_size
) {
860 EXPORT_SYMBOL(target_to_linux_sector
);
862 int target_configure_device(struct se_device
*dev
)
864 struct se_hba
*hba
= dev
->se_hba
;
867 if (dev
->dev_flags
& DF_CONFIGURED
) {
868 pr_err("se_dev->se_dev_ptr already set for storage"
873 ret
= dev
->transport
->configure_device(dev
);
877 * XXX: there is not much point to have two different values here..
879 dev
->dev_attrib
.block_size
= dev
->dev_attrib
.hw_block_size
;
880 dev
->dev_attrib
.queue_depth
= dev
->dev_attrib
.hw_queue_depth
;
883 * Align max_hw_sectors down to PAGE_SIZE I/O transfers
885 dev
->dev_attrib
.hw_max_sectors
=
886 se_dev_align_max_sectors(dev
->dev_attrib
.hw_max_sectors
,
887 dev
->dev_attrib
.hw_block_size
);
888 dev
->dev_attrib
.optimal_sectors
= dev
->dev_attrib
.hw_max_sectors
;
890 dev
->dev_index
= scsi_get_new_index(SCSI_DEVICE_INDEX
);
891 dev
->creation_time
= get_jiffies_64();
893 ret
= core_setup_alua(dev
);
898 * Startup the struct se_device processing thread
900 dev
->tmr_wq
= alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM
| WQ_UNBOUND
, 1,
901 dev
->transport
->name
);
903 pr_err("Unable to create tmr workqueue for %s\n",
904 dev
->transport
->name
);
910 * Setup work_queue for QUEUE_FULL
912 INIT_WORK(&dev
->qf_work_queue
, target_qf_do_work
);
915 * Preload the initial INQUIRY const values if we are doing
916 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
917 * passthrough because this is being provided by the backend LLD.
919 if (!(dev
->transport
->transport_flags
& TRANSPORT_FLAG_PASSTHROUGH
)) {
920 strncpy(&dev
->t10_wwn
.vendor
[0], "LIO-ORG", 8);
921 strncpy(&dev
->t10_wwn
.model
[0],
922 dev
->transport
->inquiry_prod
, 16);
923 strncpy(&dev
->t10_wwn
.revision
[0],
924 dev
->transport
->inquiry_rev
, 4);
927 scsi_dump_inquiry(dev
);
929 spin_lock(&hba
->device_lock
);
931 spin_unlock(&hba
->device_lock
);
933 mutex_lock(&g_device_mutex
);
934 list_add_tail(&dev
->g_dev_node
, &g_device_list
);
935 mutex_unlock(&g_device_mutex
);
937 dev
->dev_flags
|= DF_CONFIGURED
;
942 core_alua_free_lu_gp_mem(dev
);
944 se_release_vpd_for_dev(dev
);
948 void target_free_device(struct se_device
*dev
)
950 struct se_hba
*hba
= dev
->se_hba
;
952 WARN_ON(!list_empty(&dev
->dev_sep_list
));
954 if (dev
->dev_flags
& DF_CONFIGURED
) {
955 destroy_workqueue(dev
->tmr_wq
);
957 mutex_lock(&g_device_mutex
);
958 list_del(&dev
->g_dev_node
);
959 mutex_unlock(&g_device_mutex
);
961 spin_lock(&hba
->device_lock
);
963 spin_unlock(&hba
->device_lock
);
966 core_alua_free_lu_gp_mem(dev
);
967 core_alua_set_lba_map(dev
, NULL
, 0, 0);
968 core_scsi3_free_all_registrations(dev
);
969 se_release_vpd_for_dev(dev
);
971 if (dev
->transport
->free_prot
)
972 dev
->transport
->free_prot(dev
);
974 dev
->transport
->free_device(dev
);
977 int core_dev_setup_virtual_lun0(void)
980 struct se_device
*dev
;
981 char buf
[] = "rd_pages=8,rd_nullio=1";
984 hba
= core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE
);
988 dev
= target_alloc_device(hba
, "virt_lun0");
994 hba
->backend
->ops
->set_configfs_dev_params(dev
, buf
, sizeof(buf
));
996 ret
= target_configure_device(dev
);
998 goto out_free_se_dev
;
1005 target_free_device(dev
);
1007 core_delete_hba(hba
);
1012 void core_dev_release_virtual_lun0(void)
1014 struct se_hba
*hba
= lun0_hba
;
1020 target_free_device(g_lun0_dev
);
1021 core_delete_hba(hba
);
1025 * Common CDB parsing for kernel and user passthrough.
1028 passthrough_parse_cdb(struct se_cmd
*cmd
,
1029 sense_reason_t (*exec_cmd
)(struct se_cmd
*cmd
))
1031 unsigned char *cdb
= cmd
->t_task_cdb
;
1034 * Clear a lun set in the cdb if the initiator talking to use spoke
1035 * and old standards version, as we can't assume the underlying device
1036 * won't choke up on it.
1039 case READ_10
: /* SBC - RDProtect */
1040 case READ_12
: /* SBC - RDProtect */
1041 case READ_16
: /* SBC - RDProtect */
1042 case SEND_DIAGNOSTIC
: /* SPC - SELF-TEST Code */
1043 case VERIFY
: /* SBC - VRProtect */
1044 case VERIFY_16
: /* SBC - VRProtect */
1045 case WRITE_VERIFY
: /* SBC - VRProtect */
1046 case WRITE_VERIFY_12
: /* SBC - VRProtect */
1047 case MAINTENANCE_IN
: /* SPC - Parameter Data Format for SA RTPG */
1050 cdb
[1] &= 0x1f; /* clear logical unit number */
1055 * For REPORT LUNS we always need to emulate the response, for everything
1058 if (cdb
[0] == REPORT_LUNS
) {
1059 cmd
->execute_cmd
= spc_emulate_report_luns
;
1060 return TCM_NO_SENSE
;
1063 /* Set DATA_CDB flag for ops that should have it */
1074 case WRITE_VERIFY_12
:
1075 case 0x8e: /* WRITE_VERIFY_16 */
1076 case COMPARE_AND_WRITE
:
1077 case XDWRITEREAD_10
:
1078 cmd
->se_cmd_flags
|= SCF_SCSI_DATA_CDB
;
1080 case VARIABLE_LENGTH_CMD
:
1081 switch (get_unaligned_be16(&cdb
[8])) {
1084 case 0x0c: /* WRITE_VERIFY_32 */
1085 case XDWRITEREAD_32
:
1086 cmd
->se_cmd_flags
|= SCF_SCSI_DATA_CDB
;
1091 cmd
->execute_cmd
= exec_cmd
;
1093 return TCM_NO_SENSE
;
1095 EXPORT_SYMBOL(passthrough_parse_cdb
);