1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*******************************************************************************
3 * Filename: target_core_configfs.c
5 * This file contains ConfigFS logic for the Generic Target Engine project.
7 * (c) Copyright 2008-2013 Datera, Inc.
9 * Nicholas A. Bellinger <nab@kernel.org>
11 * based on configfs Copyright (C) 2005 Oracle. All rights reserved.
13 ****************************************************************************/
15 #include <linux/module.h>
16 #include <linux/moduleparam.h>
17 #include <generated/utsrelease.h>
18 #include <linux/utsname.h>
19 #include <linux/init.h>
21 #include <linux/namei.h>
22 #include <linux/slab.h>
23 #include <linux/types.h>
24 #include <linux/delay.h>
25 #include <linux/unistd.h>
26 #include <linux/string.h>
27 #include <linux/parser.h>
28 #include <linux/syscalls.h>
29 #include <linux/configfs.h>
30 #include <linux/spinlock.h>
32 #include <target/target_core_base.h>
33 #include <target/target_core_backend.h>
34 #include <target/target_core_fabric.h>
36 #include "target_core_internal.h"
37 #include "target_core_alua.h"
38 #include "target_core_pr.h"
39 #include "target_core_rd.h"
40 #include "target_core_xcopy.h"
42 #define TB_CIT_SETUP(_name, _item_ops, _group_ops, _attrs) \
43 static void target_core_setup_##_name##_cit(struct target_backend *tb) \
45 struct config_item_type *cit = &tb->tb_##_name##_cit; \
47 cit->ct_item_ops = _item_ops; \
48 cit->ct_group_ops = _group_ops; \
49 cit->ct_attrs = _attrs; \
50 cit->ct_owner = tb->ops->owner; \
51 pr_debug("Setup generic %s\n", __stringify(_name)); \
54 #define TB_CIT_SETUP_DRV(_name, _item_ops, _group_ops) \
55 static void target_core_setup_##_name##_cit(struct target_backend *tb) \
57 struct config_item_type *cit = &tb->tb_##_name##_cit; \
59 cit->ct_item_ops = _item_ops; \
60 cit->ct_group_ops = _group_ops; \
61 cit->ct_attrs = tb->ops->tb_##_name##_attrs; \
62 cit->ct_owner = tb->ops->owner; \
63 pr_debug("Setup generic %s\n", __stringify(_name)); \
66 extern struct t10_alua_lu_gp
*default_lu_gp
;
68 static LIST_HEAD(g_tf_list
);
69 static DEFINE_MUTEX(g_tf_lock
);
71 static struct config_group target_core_hbagroup
;
72 static struct config_group alua_group
;
73 static struct config_group alua_lu_gps_group
;
75 static inline struct se_hba
*
76 item_to_hba(struct config_item
*item
)
78 return container_of(to_config_group(item
), struct se_hba
, hba_group
);
82 * Attributes for /sys/kernel/config/target/
84 static ssize_t
target_core_item_version_show(struct config_item
*item
,
87 return sprintf(page
, "Target Engine Core ConfigFS Infrastructure %s"
88 " on %s/%s on "UTS_RELEASE
"\n", TARGET_CORE_VERSION
,
89 utsname()->sysname
, utsname()->machine
);
92 CONFIGFS_ATTR_RO(target_core_item_
, version
);
94 char db_root
[DB_ROOT_LEN
] = DB_ROOT_DEFAULT
;
95 static char db_root_stage
[DB_ROOT_LEN
];
97 static ssize_t
target_core_item_dbroot_show(struct config_item
*item
,
100 return sprintf(page
, "%s\n", db_root
);
103 static ssize_t
target_core_item_dbroot_store(struct config_item
*item
,
104 const char *page
, size_t count
)
109 mutex_lock(&g_tf_lock
);
110 if (!list_empty(&g_tf_list
)) {
111 mutex_unlock(&g_tf_lock
);
112 pr_err("db_root: cannot be changed: target drivers registered");
116 if (count
> (DB_ROOT_LEN
- 1)) {
117 mutex_unlock(&g_tf_lock
);
118 pr_err("db_root: count %d exceeds DB_ROOT_LEN-1: %u\n",
119 (int)count
, DB_ROOT_LEN
- 1);
123 read_bytes
= snprintf(db_root_stage
, DB_ROOT_LEN
, "%s", page
);
125 mutex_unlock(&g_tf_lock
);
128 if (db_root_stage
[read_bytes
- 1] == '\n')
129 db_root_stage
[read_bytes
- 1] = '\0';
131 /* validate new db root before accepting it */
132 fp
= filp_open(db_root_stage
, O_RDONLY
, 0);
134 mutex_unlock(&g_tf_lock
);
135 pr_err("db_root: cannot open: %s\n", db_root_stage
);
138 if (!S_ISDIR(file_inode(fp
)->i_mode
)) {
139 filp_close(fp
, NULL
);
140 mutex_unlock(&g_tf_lock
);
141 pr_err("db_root: not a directory: %s\n", db_root_stage
);
144 filp_close(fp
, NULL
);
146 strncpy(db_root
, db_root_stage
, read_bytes
);
148 mutex_unlock(&g_tf_lock
);
150 pr_debug("Target_Core_ConfigFS: db_root set to %s\n", db_root
);
155 CONFIGFS_ATTR(target_core_item_
, dbroot
);
157 static struct target_fabric_configfs
*target_core_get_fabric(
160 struct target_fabric_configfs
*tf
;
165 mutex_lock(&g_tf_lock
);
166 list_for_each_entry(tf
, &g_tf_list
, tf_list
) {
167 const char *cmp_name
= tf
->tf_ops
->fabric_alias
;
169 cmp_name
= tf
->tf_ops
->fabric_name
;
170 if (!strcmp(cmp_name
, name
)) {
171 atomic_inc(&tf
->tf_access_cnt
);
172 mutex_unlock(&g_tf_lock
);
176 mutex_unlock(&g_tf_lock
);
182 * Called from struct target_core_group_ops->make_group()
184 static struct config_group
*target_core_register_fabric(
185 struct config_group
*group
,
188 struct target_fabric_configfs
*tf
;
191 pr_debug("Target_Core_ConfigFS: REGISTER -> group: %p name:"
192 " %s\n", group
, name
);
194 tf
= target_core_get_fabric(name
);
196 pr_debug("target_core_register_fabric() trying autoload for %s\n",
200 * Below are some hardcoded request_module() calls to automatically
201 * local fabric modules when the following is called:
203 * mkdir -p /sys/kernel/config/target/$MODULE_NAME
205 * Note that this does not limit which TCM fabric module can be
206 * registered, but simply provids auto loading logic for modules with
207 * mkdir(2) system calls with known TCM fabric modules.
210 if (!strncmp(name
, "iscsi", 5)) {
212 * Automatically load the LIO Target fabric module when the
213 * following is called:
215 * mkdir -p $CONFIGFS/target/iscsi
217 ret
= request_module("iscsi_target_mod");
219 pr_debug("request_module() failed for"
220 " iscsi_target_mod.ko: %d\n", ret
);
221 return ERR_PTR(-EINVAL
);
223 } else if (!strncmp(name
, "loopback", 8)) {
225 * Automatically load the tcm_loop fabric module when the
226 * following is called:
228 * mkdir -p $CONFIGFS/target/loopback
230 ret
= request_module("tcm_loop");
232 pr_debug("request_module() failed for"
233 " tcm_loop.ko: %d\n", ret
);
234 return ERR_PTR(-EINVAL
);
238 tf
= target_core_get_fabric(name
);
242 pr_debug("target_core_get_fabric() failed for %s\n",
244 return ERR_PTR(-EINVAL
);
246 pr_debug("Target_Core_ConfigFS: REGISTER -> Located fabric:"
247 " %s\n", tf
->tf_ops
->fabric_name
);
249 * On a successful target_core_get_fabric() look, the returned
250 * struct target_fabric_configfs *tf will contain a usage reference.
252 pr_debug("Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n",
255 config_group_init_type_name(&tf
->tf_group
, name
, &tf
->tf_wwn_cit
);
257 config_group_init_type_name(&tf
->tf_disc_group
, "discovery_auth",
258 &tf
->tf_discovery_cit
);
259 configfs_add_default_group(&tf
->tf_disc_group
, &tf
->tf_group
);
261 pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric: %s\n",
262 config_item_name(&tf
->tf_group
.cg_item
));
263 return &tf
->tf_group
;
267 * Called from struct target_core_group_ops->drop_item()
269 static void target_core_deregister_fabric(
270 struct config_group
*group
,
271 struct config_item
*item
)
273 struct target_fabric_configfs
*tf
= container_of(
274 to_config_group(item
), struct target_fabric_configfs
, tf_group
);
276 pr_debug("Target_Core_ConfigFS: DEREGISTER -> Looking up %s in"
277 " tf list\n", config_item_name(item
));
279 pr_debug("Target_Core_ConfigFS: DEREGISTER -> located fabric:"
280 " %s\n", tf
->tf_ops
->fabric_name
);
281 atomic_dec(&tf
->tf_access_cnt
);
283 pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing ci"
284 " %s\n", config_item_name(item
));
286 configfs_remove_default_groups(&tf
->tf_group
);
287 config_item_put(item
);
290 static struct configfs_group_operations target_core_fabric_group_ops
= {
291 .make_group
= &target_core_register_fabric
,
292 .drop_item
= &target_core_deregister_fabric
,
296 * All item attributes appearing in /sys/kernel/target/ appear here.
298 static struct configfs_attribute
*target_core_fabric_item_attrs
[] = {
299 &target_core_item_attr_version
,
300 &target_core_item_attr_dbroot
,
305 * Provides Fabrics Groups and Item Attributes for /sys/kernel/config/target/
307 static const struct config_item_type target_core_fabrics_item
= {
308 .ct_group_ops
= &target_core_fabric_group_ops
,
309 .ct_attrs
= target_core_fabric_item_attrs
,
310 .ct_owner
= THIS_MODULE
,
313 static struct configfs_subsystem target_core_fabrics
= {
316 .ci_namebuf
= "target",
317 .ci_type
= &target_core_fabrics_item
,
322 int target_depend_item(struct config_item
*item
)
324 return configfs_depend_item(&target_core_fabrics
, item
);
326 EXPORT_SYMBOL(target_depend_item
);
328 void target_undepend_item(struct config_item
*item
)
330 return configfs_undepend_item(item
);
332 EXPORT_SYMBOL(target_undepend_item
);
334 /*##############################################################################
335 // Start functions called by external Target Fabrics Modules
336 //############################################################################*/
338 static int target_fabric_tf_ops_check(const struct target_core_fabric_ops
*tfo
)
340 if (tfo
->fabric_alias
) {
341 if (strlen(tfo
->fabric_alias
) >= TARGET_FABRIC_NAME_SIZE
) {
342 pr_err("Passed alias: %s exceeds "
343 "TARGET_FABRIC_NAME_SIZE\n", tfo
->fabric_alias
);
347 if (!tfo
->fabric_name
) {
348 pr_err("Missing tfo->fabric_name\n");
351 if (strlen(tfo
->fabric_name
) >= TARGET_FABRIC_NAME_SIZE
) {
352 pr_err("Passed name: %s exceeds "
353 "TARGET_FABRIC_NAME_SIZE\n", tfo
->fabric_name
);
356 if (!tfo
->tpg_get_wwn
) {
357 pr_err("Missing tfo->tpg_get_wwn()\n");
360 if (!tfo
->tpg_get_tag
) {
361 pr_err("Missing tfo->tpg_get_tag()\n");
364 if (!tfo
->tpg_check_demo_mode
) {
365 pr_err("Missing tfo->tpg_check_demo_mode()\n");
368 if (!tfo
->tpg_check_demo_mode_cache
) {
369 pr_err("Missing tfo->tpg_check_demo_mode_cache()\n");
372 if (!tfo
->tpg_check_demo_mode_write_protect
) {
373 pr_err("Missing tfo->tpg_check_demo_mode_write_protect()\n");
376 if (!tfo
->tpg_check_prod_mode_write_protect
) {
377 pr_err("Missing tfo->tpg_check_prod_mode_write_protect()\n");
380 if (!tfo
->tpg_get_inst_index
) {
381 pr_err("Missing tfo->tpg_get_inst_index()\n");
384 if (!tfo
->release_cmd
) {
385 pr_err("Missing tfo->release_cmd()\n");
388 if (!tfo
->sess_get_index
) {
389 pr_err("Missing tfo->sess_get_index()\n");
392 if (!tfo
->write_pending
) {
393 pr_err("Missing tfo->write_pending()\n");
396 if (!tfo
->set_default_node_attributes
) {
397 pr_err("Missing tfo->set_default_node_attributes()\n");
400 if (!tfo
->get_cmd_state
) {
401 pr_err("Missing tfo->get_cmd_state()\n");
404 if (!tfo
->queue_data_in
) {
405 pr_err("Missing tfo->queue_data_in()\n");
408 if (!tfo
->queue_status
) {
409 pr_err("Missing tfo->queue_status()\n");
412 if (!tfo
->queue_tm_rsp
) {
413 pr_err("Missing tfo->queue_tm_rsp()\n");
416 if (!tfo
->aborted_task
) {
417 pr_err("Missing tfo->aborted_task()\n");
420 if (!tfo
->check_stop_free
) {
421 pr_err("Missing tfo->check_stop_free()\n");
425 * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn()
426 * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in
427 * target_core_fabric_configfs.c WWN+TPG group context code.
429 if (!tfo
->fabric_make_wwn
) {
430 pr_err("Missing tfo->fabric_make_wwn()\n");
433 if (!tfo
->fabric_drop_wwn
) {
434 pr_err("Missing tfo->fabric_drop_wwn()\n");
437 if (!tfo
->fabric_make_tpg
) {
438 pr_err("Missing tfo->fabric_make_tpg()\n");
441 if (!tfo
->fabric_drop_tpg
) {
442 pr_err("Missing tfo->fabric_drop_tpg()\n");
449 int target_register_template(const struct target_core_fabric_ops
*fo
)
451 struct target_fabric_configfs
*tf
;
454 ret
= target_fabric_tf_ops_check(fo
);
458 tf
= kzalloc(sizeof(struct target_fabric_configfs
), GFP_KERNEL
);
460 pr_err("%s: could not allocate memory!\n", __func__
);
464 INIT_LIST_HEAD(&tf
->tf_list
);
465 atomic_set(&tf
->tf_access_cnt
, 0);
467 target_fabric_setup_cits(tf
);
469 mutex_lock(&g_tf_lock
);
470 list_add_tail(&tf
->tf_list
, &g_tf_list
);
471 mutex_unlock(&g_tf_lock
);
475 EXPORT_SYMBOL(target_register_template
);
477 void target_unregister_template(const struct target_core_fabric_ops
*fo
)
479 struct target_fabric_configfs
*t
;
481 mutex_lock(&g_tf_lock
);
482 list_for_each_entry(t
, &g_tf_list
, tf_list
) {
483 if (!strcmp(t
->tf_ops
->fabric_name
, fo
->fabric_name
)) {
484 BUG_ON(atomic_read(&t
->tf_access_cnt
));
485 list_del(&t
->tf_list
);
486 mutex_unlock(&g_tf_lock
);
488 * Wait for any outstanding fabric se_deve_entry->rcu_head
489 * callbacks to complete post kfree_rcu(), before allowing
490 * fabric driver unload of TFO->module to proceed.
497 mutex_unlock(&g_tf_lock
);
499 EXPORT_SYMBOL(target_unregister_template
);
501 /*##############################################################################
502 // Stop functions called by external Target Fabrics Modules
503 //############################################################################*/
505 static inline struct se_dev_attrib
*to_attrib(struct config_item
*item
)
507 return container_of(to_config_group(item
), struct se_dev_attrib
,
511 /* Start functions for struct config_item_type tb_dev_attrib_cit */
512 #define DEF_CONFIGFS_ATTRIB_SHOW(_name) \
513 static ssize_t _name##_show(struct config_item *item, char *page) \
515 return snprintf(page, PAGE_SIZE, "%u\n", to_attrib(item)->_name); \
518 DEF_CONFIGFS_ATTRIB_SHOW(emulate_model_alias
);
519 DEF_CONFIGFS_ATTRIB_SHOW(emulate_dpo
);
520 DEF_CONFIGFS_ATTRIB_SHOW(emulate_fua_write
);
521 DEF_CONFIGFS_ATTRIB_SHOW(emulate_fua_read
);
522 DEF_CONFIGFS_ATTRIB_SHOW(emulate_write_cache
);
523 DEF_CONFIGFS_ATTRIB_SHOW(emulate_ua_intlck_ctrl
);
524 DEF_CONFIGFS_ATTRIB_SHOW(emulate_tas
);
525 DEF_CONFIGFS_ATTRIB_SHOW(emulate_tpu
);
526 DEF_CONFIGFS_ATTRIB_SHOW(emulate_tpws
);
527 DEF_CONFIGFS_ATTRIB_SHOW(emulate_caw
);
528 DEF_CONFIGFS_ATTRIB_SHOW(emulate_3pc
);
529 DEF_CONFIGFS_ATTRIB_SHOW(emulate_pr
);
530 DEF_CONFIGFS_ATTRIB_SHOW(pi_prot_type
);
531 DEF_CONFIGFS_ATTRIB_SHOW(hw_pi_prot_type
);
532 DEF_CONFIGFS_ATTRIB_SHOW(pi_prot_verify
);
533 DEF_CONFIGFS_ATTRIB_SHOW(enforce_pr_isids
);
534 DEF_CONFIGFS_ATTRIB_SHOW(is_nonrot
);
535 DEF_CONFIGFS_ATTRIB_SHOW(emulate_rest_reord
);
536 DEF_CONFIGFS_ATTRIB_SHOW(force_pr_aptpl
);
537 DEF_CONFIGFS_ATTRIB_SHOW(hw_block_size
);
538 DEF_CONFIGFS_ATTRIB_SHOW(block_size
);
539 DEF_CONFIGFS_ATTRIB_SHOW(hw_max_sectors
);
540 DEF_CONFIGFS_ATTRIB_SHOW(optimal_sectors
);
541 DEF_CONFIGFS_ATTRIB_SHOW(hw_queue_depth
);
542 DEF_CONFIGFS_ATTRIB_SHOW(queue_depth
);
543 DEF_CONFIGFS_ATTRIB_SHOW(max_unmap_lba_count
);
544 DEF_CONFIGFS_ATTRIB_SHOW(max_unmap_block_desc_count
);
545 DEF_CONFIGFS_ATTRIB_SHOW(unmap_granularity
);
546 DEF_CONFIGFS_ATTRIB_SHOW(unmap_granularity_alignment
);
547 DEF_CONFIGFS_ATTRIB_SHOW(unmap_zeroes_data
);
548 DEF_CONFIGFS_ATTRIB_SHOW(max_write_same_len
);
550 #define DEF_CONFIGFS_ATTRIB_STORE_U32(_name) \
551 static ssize_t _name##_store(struct config_item *item, const char *page,\
554 struct se_dev_attrib *da = to_attrib(item); \
558 ret = kstrtou32(page, 0, &val); \
565 DEF_CONFIGFS_ATTRIB_STORE_U32(max_unmap_lba_count
);
566 DEF_CONFIGFS_ATTRIB_STORE_U32(max_unmap_block_desc_count
);
567 DEF_CONFIGFS_ATTRIB_STORE_U32(unmap_granularity
);
568 DEF_CONFIGFS_ATTRIB_STORE_U32(unmap_granularity_alignment
);
569 DEF_CONFIGFS_ATTRIB_STORE_U32(max_write_same_len
);
571 #define DEF_CONFIGFS_ATTRIB_STORE_BOOL(_name) \
572 static ssize_t _name##_store(struct config_item *item, const char *page, \
575 struct se_dev_attrib *da = to_attrib(item); \
579 ret = strtobool(page, &flag); \
586 DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_fua_write
);
587 DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_caw
);
588 DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_3pc
);
589 DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_pr
);
590 DEF_CONFIGFS_ATTRIB_STORE_BOOL(enforce_pr_isids
);
591 DEF_CONFIGFS_ATTRIB_STORE_BOOL(is_nonrot
);
593 #define DEF_CONFIGFS_ATTRIB_STORE_STUB(_name) \
594 static ssize_t _name##_store(struct config_item *item, const char *page,\
597 printk_once(KERN_WARNING \
598 "ignoring deprecated %s attribute\n", \
599 __stringify(_name)); \
603 DEF_CONFIGFS_ATTRIB_STORE_STUB(emulate_dpo
);
604 DEF_CONFIGFS_ATTRIB_STORE_STUB(emulate_fua_read
);
606 static void dev_set_t10_wwn_model_alias(struct se_device
*dev
)
608 const char *configname
;
610 configname
= config_item_name(&dev
->dev_group
.cg_item
);
611 if (strlen(configname
) >= INQUIRY_MODEL_LEN
) {
612 pr_warn("dev[%p]: Backstore name '%s' is too long for "
613 "INQUIRY_MODEL, truncating to 15 characters\n", dev
,
617 * XXX We can't use sizeof(dev->t10_wwn.model) (INQUIRY_MODEL_LEN + 1)
618 * here without potentially breaking existing setups, so continue to
619 * truncate one byte shorter than what can be carried in INQUIRY.
621 strlcpy(dev
->t10_wwn
.model
, configname
, INQUIRY_MODEL_LEN
);
624 static ssize_t
emulate_model_alias_store(struct config_item
*item
,
625 const char *page
, size_t count
)
627 struct se_dev_attrib
*da
= to_attrib(item
);
628 struct se_device
*dev
= da
->da_dev
;
632 if (dev
->export_count
) {
633 pr_err("dev[%p]: Unable to change model alias"
634 " while export_count is %d\n",
635 dev
, dev
->export_count
);
639 ret
= strtobool(page
, &flag
);
643 BUILD_BUG_ON(sizeof(dev
->t10_wwn
.model
) != INQUIRY_MODEL_LEN
+ 1);
645 dev_set_t10_wwn_model_alias(dev
);
647 strlcpy(dev
->t10_wwn
.model
, dev
->transport
->inquiry_prod
,
648 sizeof(dev
->t10_wwn
.model
));
650 da
->emulate_model_alias
= flag
;
654 static ssize_t
emulate_write_cache_store(struct config_item
*item
,
655 const char *page
, size_t count
)
657 struct se_dev_attrib
*da
= to_attrib(item
);
661 ret
= strtobool(page
, &flag
);
665 if (flag
&& da
->da_dev
->transport
->get_write_cache
) {
666 pr_err("emulate_write_cache not supported for this device\n");
670 da
->emulate_write_cache
= flag
;
671 pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
676 static ssize_t
emulate_ua_intlck_ctrl_store(struct config_item
*item
,
677 const char *page
, size_t count
)
679 struct se_dev_attrib
*da
= to_attrib(item
);
683 ret
= kstrtou32(page
, 0, &val
);
687 if (val
!= TARGET_UA_INTLCK_CTRL_CLEAR
688 && val
!= TARGET_UA_INTLCK_CTRL_NO_CLEAR
689 && val
!= TARGET_UA_INTLCK_CTRL_ESTABLISH_UA
) {
690 pr_err("Illegal value %d\n", val
);
694 if (da
->da_dev
->export_count
) {
695 pr_err("dev[%p]: Unable to change SE Device"
696 " UA_INTRLCK_CTRL while export_count is %d\n",
697 da
->da_dev
, da
->da_dev
->export_count
);
700 da
->emulate_ua_intlck_ctrl
= val
;
701 pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
706 static ssize_t
emulate_tas_store(struct config_item
*item
,
707 const char *page
, size_t count
)
709 struct se_dev_attrib
*da
= to_attrib(item
);
713 ret
= strtobool(page
, &flag
);
717 if (da
->da_dev
->export_count
) {
718 pr_err("dev[%p]: Unable to change SE Device TAS while"
719 " export_count is %d\n",
720 da
->da_dev
, da
->da_dev
->export_count
);
723 da
->emulate_tas
= flag
;
724 pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
725 da
->da_dev
, flag
? "Enabled" : "Disabled");
730 static ssize_t
emulate_tpu_store(struct config_item
*item
,
731 const char *page
, size_t count
)
733 struct se_dev_attrib
*da
= to_attrib(item
);
737 ret
= strtobool(page
, &flag
);
742 * We expect this value to be non-zero when generic Block Layer
743 * Discard supported is detected iblock_create_virtdevice().
745 if (flag
&& !da
->max_unmap_block_desc_count
) {
746 pr_err("Generic Block Discard not supported\n");
750 da
->emulate_tpu
= flag
;
751 pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
756 static ssize_t
emulate_tpws_store(struct config_item
*item
,
757 const char *page
, size_t count
)
759 struct se_dev_attrib
*da
= to_attrib(item
);
763 ret
= strtobool(page
, &flag
);
768 * We expect this value to be non-zero when generic Block Layer
769 * Discard supported is detected iblock_create_virtdevice().
771 if (flag
&& !da
->max_unmap_block_desc_count
) {
772 pr_err("Generic Block Discard not supported\n");
776 da
->emulate_tpws
= flag
;
777 pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
782 static ssize_t
pi_prot_type_store(struct config_item
*item
,
783 const char *page
, size_t count
)
785 struct se_dev_attrib
*da
= to_attrib(item
);
786 int old_prot
= da
->pi_prot_type
, ret
;
787 struct se_device
*dev
= da
->da_dev
;
790 ret
= kstrtou32(page
, 0, &flag
);
794 if (flag
!= 0 && flag
!= 1 && flag
!= 2 && flag
!= 3) {
795 pr_err("Illegal value %d for pi_prot_type\n", flag
);
799 pr_err("DIF TYPE2 protection currently not supported\n");
802 if (da
->hw_pi_prot_type
) {
803 pr_warn("DIF protection enabled on underlying hardware,"
807 if (!dev
->transport
->init_prot
|| !dev
->transport
->free_prot
) {
808 /* 0 is only allowed value for non-supporting backends */
812 pr_err("DIF protection not supported by backend: %s\n",
813 dev
->transport
->name
);
816 if (!target_dev_configured(dev
)) {
817 pr_err("DIF protection requires device to be configured\n");
820 if (dev
->export_count
) {
821 pr_err("dev[%p]: Unable to change SE Device PROT type while"
822 " export_count is %d\n", dev
, dev
->export_count
);
826 da
->pi_prot_type
= flag
;
828 if (flag
&& !old_prot
) {
829 ret
= dev
->transport
->init_prot(dev
);
831 da
->pi_prot_type
= old_prot
;
832 da
->pi_prot_verify
= (bool) da
->pi_prot_type
;
836 } else if (!flag
&& old_prot
) {
837 dev
->transport
->free_prot(dev
);
840 da
->pi_prot_verify
= (bool) da
->pi_prot_type
;
841 pr_debug("dev[%p]: SE Device Protection Type: %d\n", dev
, flag
);
845 /* always zero, but attr needs to remain RW to avoid userspace breakage */
846 static ssize_t
pi_prot_format_show(struct config_item
*item
, char *page
)
848 return snprintf(page
, PAGE_SIZE
, "0\n");
851 static ssize_t
pi_prot_format_store(struct config_item
*item
,
852 const char *page
, size_t count
)
854 struct se_dev_attrib
*da
= to_attrib(item
);
855 struct se_device
*dev
= da
->da_dev
;
859 ret
= strtobool(page
, &flag
);
866 if (!dev
->transport
->format_prot
) {
867 pr_err("DIF protection format not supported by backend %s\n",
868 dev
->transport
->name
);
871 if (!target_dev_configured(dev
)) {
872 pr_err("DIF protection format requires device to be configured\n");
875 if (dev
->export_count
) {
876 pr_err("dev[%p]: Unable to format SE Device PROT type while"
877 " export_count is %d\n", dev
, dev
->export_count
);
881 ret
= dev
->transport
->format_prot(dev
);
885 pr_debug("dev[%p]: SE Device Protection Format complete\n", dev
);
889 static ssize_t
pi_prot_verify_store(struct config_item
*item
,
890 const char *page
, size_t count
)
892 struct se_dev_attrib
*da
= to_attrib(item
);
896 ret
= strtobool(page
, &flag
);
901 da
->pi_prot_verify
= flag
;
904 if (da
->hw_pi_prot_type
) {
905 pr_warn("DIF protection enabled on underlying hardware,"
909 if (!da
->pi_prot_type
) {
910 pr_warn("DIF protection not supported by backend, ignoring\n");
913 da
->pi_prot_verify
= flag
;
918 static ssize_t
force_pr_aptpl_store(struct config_item
*item
,
919 const char *page
, size_t count
)
921 struct se_dev_attrib
*da
= to_attrib(item
);
925 ret
= strtobool(page
, &flag
);
928 if (da
->da_dev
->export_count
) {
929 pr_err("dev[%p]: Unable to set force_pr_aptpl while"
930 " export_count is %d\n",
931 da
->da_dev
, da
->da_dev
->export_count
);
935 da
->force_pr_aptpl
= flag
;
936 pr_debug("dev[%p]: SE Device force_pr_aptpl: %d\n", da
->da_dev
, flag
);
940 static ssize_t
emulate_rest_reord_store(struct config_item
*item
,
941 const char *page
, size_t count
)
943 struct se_dev_attrib
*da
= to_attrib(item
);
947 ret
= strtobool(page
, &flag
);
952 printk(KERN_ERR
"dev[%p]: SE Device emulation of restricted"
953 " reordering not implemented\n", da
->da_dev
);
956 da
->emulate_rest_reord
= flag
;
957 pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n",
962 static ssize_t
unmap_zeroes_data_store(struct config_item
*item
,
963 const char *page
, size_t count
)
965 struct se_dev_attrib
*da
= to_attrib(item
);
969 ret
= strtobool(page
, &flag
);
973 if (da
->da_dev
->export_count
) {
974 pr_err("dev[%p]: Unable to change SE Device"
975 " unmap_zeroes_data while export_count is %d\n",
976 da
->da_dev
, da
->da_dev
->export_count
);
980 * We expect this value to be non-zero when generic Block Layer
981 * Discard supported is detected iblock_configure_device().
983 if (flag
&& !da
->max_unmap_block_desc_count
) {
984 pr_err("dev[%p]: Thin Provisioning LBPRZ will not be set"
985 " because max_unmap_block_desc_count is zero\n",
989 da
->unmap_zeroes_data
= flag
;
990 pr_debug("dev[%p]: SE Device Thin Provisioning LBPRZ bit: %d\n",
996 * Note, this can only be called on unexported SE Device Object.
998 static ssize_t
queue_depth_store(struct config_item
*item
,
999 const char *page
, size_t count
)
1001 struct se_dev_attrib
*da
= to_attrib(item
);
1002 struct se_device
*dev
= da
->da_dev
;
1006 ret
= kstrtou32(page
, 0, &val
);
1010 if (dev
->export_count
) {
1011 pr_err("dev[%p]: Unable to change SE Device TCQ while"
1012 " export_count is %d\n",
1013 dev
, dev
->export_count
);
1017 pr_err("dev[%p]: Illegal ZERO value for queue_depth\n", dev
);
1021 if (val
> dev
->dev_attrib
.queue_depth
) {
1022 if (val
> dev
->dev_attrib
.hw_queue_depth
) {
1023 pr_err("dev[%p]: Passed queue_depth:"
1024 " %u exceeds TCM/SE_Device MAX"
1025 " TCQ: %u\n", dev
, val
,
1026 dev
->dev_attrib
.hw_queue_depth
);
1030 da
->queue_depth
= dev
->queue_depth
= val
;
1031 pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n", dev
, val
);
1035 static ssize_t
optimal_sectors_store(struct config_item
*item
,
1036 const char *page
, size_t count
)
1038 struct se_dev_attrib
*da
= to_attrib(item
);
1042 ret
= kstrtou32(page
, 0, &val
);
1046 if (da
->da_dev
->export_count
) {
1047 pr_err("dev[%p]: Unable to change SE Device"
1048 " optimal_sectors while export_count is %d\n",
1049 da
->da_dev
, da
->da_dev
->export_count
);
1052 if (val
> da
->hw_max_sectors
) {
1053 pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
1054 " greater than hw_max_sectors: %u\n",
1055 da
->da_dev
, val
, da
->hw_max_sectors
);
1059 da
->optimal_sectors
= val
;
1060 pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n",
1065 static ssize_t
block_size_store(struct config_item
*item
,
1066 const char *page
, size_t count
)
1068 struct se_dev_attrib
*da
= to_attrib(item
);
1072 ret
= kstrtou32(page
, 0, &val
);
1076 if (da
->da_dev
->export_count
) {
1077 pr_err("dev[%p]: Unable to change SE Device block_size"
1078 " while export_count is %d\n",
1079 da
->da_dev
, da
->da_dev
->export_count
);
1083 if (val
!= 512 && val
!= 1024 && val
!= 2048 && val
!= 4096) {
1084 pr_err("dev[%p]: Illegal value for block_device: %u"
1085 " for SE device, must be 512, 1024, 2048 or 4096\n",
1090 da
->block_size
= val
;
1091 if (da
->max_bytes_per_io
)
1092 da
->hw_max_sectors
= da
->max_bytes_per_io
/ val
;
1094 pr_debug("dev[%p]: SE Device block_size changed to %u\n",
1099 static ssize_t
alua_support_show(struct config_item
*item
, char *page
)
1101 struct se_dev_attrib
*da
= to_attrib(item
);
1102 u8 flags
= da
->da_dev
->transport_flags
;
1104 return snprintf(page
, PAGE_SIZE
, "%d\n",
1105 flags
& TRANSPORT_FLAG_PASSTHROUGH_ALUA
? 0 : 1);
1108 static ssize_t
alua_support_store(struct config_item
*item
,
1109 const char *page
, size_t count
)
1111 struct se_dev_attrib
*da
= to_attrib(item
);
1112 struct se_device
*dev
= da
->da_dev
;
1116 if (!(dev
->transport
->transport_flags_changeable
&
1117 TRANSPORT_FLAG_PASSTHROUGH_ALUA
)) {
1118 pr_err("dev[%p]: Unable to change SE Device alua_support:"
1119 " alua_support has fixed value\n", dev
);
1123 ret
= strtobool(page
, &flag
);
1128 dev
->transport_flags
&= ~TRANSPORT_FLAG_PASSTHROUGH_ALUA
;
1130 dev
->transport_flags
|= TRANSPORT_FLAG_PASSTHROUGH_ALUA
;
1134 static ssize_t
pgr_support_show(struct config_item
*item
, char *page
)
1136 struct se_dev_attrib
*da
= to_attrib(item
);
1137 u8 flags
= da
->da_dev
->transport_flags
;
1139 return snprintf(page
, PAGE_SIZE
, "%d\n",
1140 flags
& TRANSPORT_FLAG_PASSTHROUGH_PGR
? 0 : 1);
1143 static ssize_t
pgr_support_store(struct config_item
*item
,
1144 const char *page
, size_t count
)
1146 struct se_dev_attrib
*da
= to_attrib(item
);
1147 struct se_device
*dev
= da
->da_dev
;
1151 if (!(dev
->transport
->transport_flags_changeable
&
1152 TRANSPORT_FLAG_PASSTHROUGH_PGR
)) {
1153 pr_err("dev[%p]: Unable to change SE Device pgr_support:"
1154 " pgr_support has fixed value\n", dev
);
1158 ret
= strtobool(page
, &flag
);
1163 dev
->transport_flags
&= ~TRANSPORT_FLAG_PASSTHROUGH_PGR
;
1165 dev
->transport_flags
|= TRANSPORT_FLAG_PASSTHROUGH_PGR
;
1169 CONFIGFS_ATTR(, emulate_model_alias
);
1170 CONFIGFS_ATTR(, emulate_dpo
);
1171 CONFIGFS_ATTR(, emulate_fua_write
);
1172 CONFIGFS_ATTR(, emulate_fua_read
);
1173 CONFIGFS_ATTR(, emulate_write_cache
);
1174 CONFIGFS_ATTR(, emulate_ua_intlck_ctrl
);
1175 CONFIGFS_ATTR(, emulate_tas
);
1176 CONFIGFS_ATTR(, emulate_tpu
);
1177 CONFIGFS_ATTR(, emulate_tpws
);
1178 CONFIGFS_ATTR(, emulate_caw
);
1179 CONFIGFS_ATTR(, emulate_3pc
);
1180 CONFIGFS_ATTR(, emulate_pr
);
1181 CONFIGFS_ATTR(, pi_prot_type
);
1182 CONFIGFS_ATTR_RO(, hw_pi_prot_type
);
1183 CONFIGFS_ATTR(, pi_prot_format
);
1184 CONFIGFS_ATTR(, pi_prot_verify
);
1185 CONFIGFS_ATTR(, enforce_pr_isids
);
1186 CONFIGFS_ATTR(, is_nonrot
);
1187 CONFIGFS_ATTR(, emulate_rest_reord
);
1188 CONFIGFS_ATTR(, force_pr_aptpl
);
1189 CONFIGFS_ATTR_RO(, hw_block_size
);
1190 CONFIGFS_ATTR(, block_size
);
1191 CONFIGFS_ATTR_RO(, hw_max_sectors
);
1192 CONFIGFS_ATTR(, optimal_sectors
);
1193 CONFIGFS_ATTR_RO(, hw_queue_depth
);
1194 CONFIGFS_ATTR(, queue_depth
);
1195 CONFIGFS_ATTR(, max_unmap_lba_count
);
1196 CONFIGFS_ATTR(, max_unmap_block_desc_count
);
1197 CONFIGFS_ATTR(, unmap_granularity
);
1198 CONFIGFS_ATTR(, unmap_granularity_alignment
);
1199 CONFIGFS_ATTR(, unmap_zeroes_data
);
1200 CONFIGFS_ATTR(, max_write_same_len
);
1201 CONFIGFS_ATTR(, alua_support
);
1202 CONFIGFS_ATTR(, pgr_support
);
1205 * dev_attrib attributes for devices using the target core SBC/SPC
1206 * interpreter. Any backend using spc_parse_cdb should be using
1209 struct configfs_attribute
*sbc_attrib_attrs
[] = {
1210 &attr_emulate_model_alias
,
1212 &attr_emulate_fua_write
,
1213 &attr_emulate_fua_read
,
1214 &attr_emulate_write_cache
,
1215 &attr_emulate_ua_intlck_ctrl
,
1223 &attr_hw_pi_prot_type
,
1224 &attr_pi_prot_format
,
1225 &attr_pi_prot_verify
,
1226 &attr_enforce_pr_isids
,
1228 &attr_emulate_rest_reord
,
1229 &attr_force_pr_aptpl
,
1230 &attr_hw_block_size
,
1232 &attr_hw_max_sectors
,
1233 &attr_optimal_sectors
,
1234 &attr_hw_queue_depth
,
1236 &attr_max_unmap_lba_count
,
1237 &attr_max_unmap_block_desc_count
,
1238 &attr_unmap_granularity
,
1239 &attr_unmap_granularity_alignment
,
1240 &attr_unmap_zeroes_data
,
1241 &attr_max_write_same_len
,
1246 EXPORT_SYMBOL(sbc_attrib_attrs
);
1249 * Minimal dev_attrib attributes for devices passing through CDBs.
1250 * In this case we only provide a few read-only attributes for
1251 * backwards compatibility.
1253 struct configfs_attribute
*passthrough_attrib_attrs
[] = {
1254 &attr_hw_pi_prot_type
,
1255 &attr_hw_block_size
,
1256 &attr_hw_max_sectors
,
1257 &attr_hw_queue_depth
,
1263 EXPORT_SYMBOL(passthrough_attrib_attrs
);
1266 * pr related dev_attrib attributes for devices passing through CDBs,
1267 * but allowing in core pr emulation.
1269 struct configfs_attribute
*passthrough_pr_attrib_attrs
[] = {
1270 &attr_enforce_pr_isids
,
1271 &attr_force_pr_aptpl
,
1274 EXPORT_SYMBOL(passthrough_pr_attrib_attrs
);
1276 TB_CIT_SETUP_DRV(dev_attrib
, NULL
, NULL
);
1277 TB_CIT_SETUP_DRV(dev_action
, NULL
, NULL
);
1279 /* End functions for struct config_item_type tb_dev_attrib_cit */
1281 /* Start functions for struct config_item_type tb_dev_wwn_cit */
1283 static struct t10_wwn
*to_t10_wwn(struct config_item
*item
)
1285 return container_of(to_config_group(item
), struct t10_wwn
, t10_wwn_group
);
1288 static ssize_t
target_check_inquiry_data(char *buf
)
1297 * ASCII data fields shall contain only ASCII printable characters
1298 * (i.e., code values 20h to 7Eh) and may be terminated with one or
1299 * more ASCII null (00h) characters.
1301 for (i
= 0; i
< len
; i
++) {
1302 if (buf
[i
] < 0x20 || buf
[i
] > 0x7E) {
1303 pr_err("Emulated T10 Inquiry Data contains non-ASCII-printable characters\n");
1312 * STANDARD and VPD page 0x83 T10 Vendor Identification
1314 static ssize_t
target_wwn_vendor_id_show(struct config_item
*item
,
1317 return sprintf(page
, "%s\n", &to_t10_wwn(item
)->vendor
[0]);
1320 static ssize_t
target_wwn_vendor_id_store(struct config_item
*item
,
1321 const char *page
, size_t count
)
1323 struct t10_wwn
*t10_wwn
= to_t10_wwn(item
);
1324 struct se_device
*dev
= t10_wwn
->t10_dev
;
1325 /* +2 to allow for a trailing (stripped) '\n' and null-terminator */
1326 unsigned char buf
[INQUIRY_VENDOR_LEN
+ 2];
1327 char *stripped
= NULL
;
1331 len
= strlcpy(buf
, page
, sizeof(buf
));
1332 if (len
< sizeof(buf
)) {
1333 /* Strip any newline added from userspace. */
1334 stripped
= strstrip(buf
);
1335 len
= strlen(stripped
);
1337 if (len
> INQUIRY_VENDOR_LEN
) {
1338 pr_err("Emulated T10 Vendor Identification exceeds"
1339 " INQUIRY_VENDOR_LEN: " __stringify(INQUIRY_VENDOR_LEN
)
1344 ret
= target_check_inquiry_data(stripped
);
1350 * Check to see if any active exports exist. If they do exist, fail
1351 * here as changing this information on the fly (underneath the
1352 * initiator side OS dependent multipath code) could cause negative
1355 if (dev
->export_count
) {
1356 pr_err("Unable to set T10 Vendor Identification while"
1357 " active %d exports exist\n", dev
->export_count
);
1361 BUILD_BUG_ON(sizeof(dev
->t10_wwn
.vendor
) != INQUIRY_VENDOR_LEN
+ 1);
1362 strlcpy(dev
->t10_wwn
.vendor
, stripped
, sizeof(dev
->t10_wwn
.vendor
));
1364 pr_debug("Target_Core_ConfigFS: Set emulated T10 Vendor Identification:"
1365 " %s\n", dev
->t10_wwn
.vendor
);
1370 static ssize_t
target_wwn_product_id_show(struct config_item
*item
,
1373 return sprintf(page
, "%s\n", &to_t10_wwn(item
)->model
[0]);
1376 static ssize_t
target_wwn_product_id_store(struct config_item
*item
,
1377 const char *page
, size_t count
)
1379 struct t10_wwn
*t10_wwn
= to_t10_wwn(item
);
1380 struct se_device
*dev
= t10_wwn
->t10_dev
;
1381 /* +2 to allow for a trailing (stripped) '\n' and null-terminator */
1382 unsigned char buf
[INQUIRY_MODEL_LEN
+ 2];
1383 char *stripped
= NULL
;
1387 len
= strlcpy(buf
, page
, sizeof(buf
));
1388 if (len
< sizeof(buf
)) {
1389 /* Strip any newline added from userspace. */
1390 stripped
= strstrip(buf
);
1391 len
= strlen(stripped
);
1393 if (len
> INQUIRY_MODEL_LEN
) {
1394 pr_err("Emulated T10 Vendor exceeds INQUIRY_MODEL_LEN: "
1395 __stringify(INQUIRY_MODEL_LEN
)
1400 ret
= target_check_inquiry_data(stripped
);
1406 * Check to see if any active exports exist. If they do exist, fail
1407 * here as changing this information on the fly (underneath the
1408 * initiator side OS dependent multipath code) could cause negative
1411 if (dev
->export_count
) {
1412 pr_err("Unable to set T10 Model while active %d exports exist\n",
1417 BUILD_BUG_ON(sizeof(dev
->t10_wwn
.model
) != INQUIRY_MODEL_LEN
+ 1);
1418 strlcpy(dev
->t10_wwn
.model
, stripped
, sizeof(dev
->t10_wwn
.model
));
1420 pr_debug("Target_Core_ConfigFS: Set emulated T10 Model Identification: %s\n",
1421 dev
->t10_wwn
.model
);
1426 static ssize_t
target_wwn_revision_show(struct config_item
*item
,
1429 return sprintf(page
, "%s\n", &to_t10_wwn(item
)->revision
[0]);
1432 static ssize_t
target_wwn_revision_store(struct config_item
*item
,
1433 const char *page
, size_t count
)
1435 struct t10_wwn
*t10_wwn
= to_t10_wwn(item
);
1436 struct se_device
*dev
= t10_wwn
->t10_dev
;
1437 /* +2 to allow for a trailing (stripped) '\n' and null-terminator */
1438 unsigned char buf
[INQUIRY_REVISION_LEN
+ 2];
1439 char *stripped
= NULL
;
1443 len
= strlcpy(buf
, page
, sizeof(buf
));
1444 if (len
< sizeof(buf
)) {
1445 /* Strip any newline added from userspace. */
1446 stripped
= strstrip(buf
);
1447 len
= strlen(stripped
);
1449 if (len
> INQUIRY_REVISION_LEN
) {
1450 pr_err("Emulated T10 Revision exceeds INQUIRY_REVISION_LEN: "
1451 __stringify(INQUIRY_REVISION_LEN
)
1456 ret
= target_check_inquiry_data(stripped
);
1462 * Check to see if any active exports exist. If they do exist, fail
1463 * here as changing this information on the fly (underneath the
1464 * initiator side OS dependent multipath code) could cause negative
1467 if (dev
->export_count
) {
1468 pr_err("Unable to set T10 Revision while active %d exports exist\n",
1473 BUILD_BUG_ON(sizeof(dev
->t10_wwn
.revision
) != INQUIRY_REVISION_LEN
+ 1);
1474 strlcpy(dev
->t10_wwn
.revision
, stripped
, sizeof(dev
->t10_wwn
.revision
));
1476 pr_debug("Target_Core_ConfigFS: Set emulated T10 Revision: %s\n",
1477 dev
->t10_wwn
.revision
);
1483 * VPD page 0x80 Unit serial
1485 static ssize_t
target_wwn_vpd_unit_serial_show(struct config_item
*item
,
1488 return sprintf(page
, "T10 VPD Unit Serial Number: %s\n",
1489 &to_t10_wwn(item
)->unit_serial
[0]);
1492 static ssize_t
target_wwn_vpd_unit_serial_store(struct config_item
*item
,
1493 const char *page
, size_t count
)
1495 struct t10_wwn
*t10_wwn
= to_t10_wwn(item
);
1496 struct se_device
*dev
= t10_wwn
->t10_dev
;
1497 unsigned char buf
[INQUIRY_VPD_SERIAL_LEN
];
1500 * If Linux/SCSI subsystem_api_t plugin got a VPD Unit Serial
1501 * from the struct scsi_device level firmware, do not allow
1502 * VPD Unit Serial to be emulated.
1504 * Note this struct scsi_device could also be emulating VPD
1505 * information from its drivers/scsi LLD. But for now we assume
1506 * it is doing 'the right thing' wrt a world wide unique
1507 * VPD Unit Serial Number that OS dependent multipath can depend on.
1509 if (dev
->dev_flags
& DF_FIRMWARE_VPD_UNIT_SERIAL
) {
1510 pr_err("Underlying SCSI device firmware provided VPD"
1511 " Unit Serial, ignoring request\n");
1515 if (strlen(page
) >= INQUIRY_VPD_SERIAL_LEN
) {
1516 pr_err("Emulated VPD Unit Serial exceeds"
1517 " INQUIRY_VPD_SERIAL_LEN: %d\n", INQUIRY_VPD_SERIAL_LEN
);
1521 * Check to see if any active $FABRIC_MOD exports exist. If they
1522 * do exist, fail here as changing this information on the fly
1523 * (underneath the initiator side OS dependent multipath code)
1524 * could cause negative effects.
1526 if (dev
->export_count
) {
1527 pr_err("Unable to set VPD Unit Serial while"
1528 " active %d $FABRIC_MOD exports exist\n",
1534 * This currently assumes ASCII encoding for emulated VPD Unit Serial.
1536 * Also, strip any newline added from the userspace
1537 * echo $UUID > $TARGET/$HBA/$STORAGE_OBJECT/wwn/vpd_unit_serial
1539 memset(buf
, 0, INQUIRY_VPD_SERIAL_LEN
);
1540 snprintf(buf
, INQUIRY_VPD_SERIAL_LEN
, "%s", page
);
1541 snprintf(dev
->t10_wwn
.unit_serial
, INQUIRY_VPD_SERIAL_LEN
,
1542 "%s", strstrip(buf
));
1543 dev
->dev_flags
|= DF_EMULATED_VPD_UNIT_SERIAL
;
1545 pr_debug("Target_Core_ConfigFS: Set emulated VPD Unit Serial:"
1546 " %s\n", dev
->t10_wwn
.unit_serial
);
1552 * VPD page 0x83 Protocol Identifier
1554 static ssize_t
target_wwn_vpd_protocol_identifier_show(struct config_item
*item
,
1557 struct t10_wwn
*t10_wwn
= to_t10_wwn(item
);
1558 struct t10_vpd
*vpd
;
1559 unsigned char buf
[VPD_TMP_BUF_SIZE
];
1562 memset(buf
, 0, VPD_TMP_BUF_SIZE
);
1564 spin_lock(&t10_wwn
->t10_vpd_lock
);
1565 list_for_each_entry(vpd
, &t10_wwn
->t10_vpd_list
, vpd_list
) {
1566 if (!vpd
->protocol_identifier_set
)
1569 transport_dump_vpd_proto_id(vpd
, buf
, VPD_TMP_BUF_SIZE
);
1571 if (len
+ strlen(buf
) >= PAGE_SIZE
)
1574 len
+= sprintf(page
+len
, "%s", buf
);
1576 spin_unlock(&t10_wwn
->t10_vpd_lock
);
1582 * Generic wrapper for dumping VPD identifiers by association.
1584 #define DEF_DEV_WWN_ASSOC_SHOW(_name, _assoc) \
1585 static ssize_t target_wwn_##_name##_show(struct config_item *item, \
1588 struct t10_wwn *t10_wwn = to_t10_wwn(item); \
1589 struct t10_vpd *vpd; \
1590 unsigned char buf[VPD_TMP_BUF_SIZE]; \
1593 spin_lock(&t10_wwn->t10_vpd_lock); \
1594 list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) { \
1595 if (vpd->association != _assoc) \
1598 memset(buf, 0, VPD_TMP_BUF_SIZE); \
1599 transport_dump_vpd_assoc(vpd, buf, VPD_TMP_BUF_SIZE); \
1600 if (len + strlen(buf) >= PAGE_SIZE) \
1602 len += sprintf(page+len, "%s", buf); \
1604 memset(buf, 0, VPD_TMP_BUF_SIZE); \
1605 transport_dump_vpd_ident_type(vpd, buf, VPD_TMP_BUF_SIZE); \
1606 if (len + strlen(buf) >= PAGE_SIZE) \
1608 len += sprintf(page+len, "%s", buf); \
1610 memset(buf, 0, VPD_TMP_BUF_SIZE); \
1611 transport_dump_vpd_ident(vpd, buf, VPD_TMP_BUF_SIZE); \
1612 if (len + strlen(buf) >= PAGE_SIZE) \
1614 len += sprintf(page+len, "%s", buf); \
1616 spin_unlock(&t10_wwn->t10_vpd_lock); \
1621 /* VPD page 0x83 Association: Logical Unit */
1622 DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_logical_unit
, 0x00);
1623 /* VPD page 0x83 Association: Target Port */
1624 DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_target_port
, 0x10);
1625 /* VPD page 0x83 Association: SCSI Target Device */
1626 DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_scsi_target_device
, 0x20);
1628 CONFIGFS_ATTR(target_wwn_
, vendor_id
);
1629 CONFIGFS_ATTR(target_wwn_
, product_id
);
1630 CONFIGFS_ATTR(target_wwn_
, revision
);
1631 CONFIGFS_ATTR(target_wwn_
, vpd_unit_serial
);
1632 CONFIGFS_ATTR_RO(target_wwn_
, vpd_protocol_identifier
);
1633 CONFIGFS_ATTR_RO(target_wwn_
, vpd_assoc_logical_unit
);
1634 CONFIGFS_ATTR_RO(target_wwn_
, vpd_assoc_target_port
);
1635 CONFIGFS_ATTR_RO(target_wwn_
, vpd_assoc_scsi_target_device
);
1637 static struct configfs_attribute
*target_core_dev_wwn_attrs
[] = {
1638 &target_wwn_attr_vendor_id
,
1639 &target_wwn_attr_product_id
,
1640 &target_wwn_attr_revision
,
1641 &target_wwn_attr_vpd_unit_serial
,
1642 &target_wwn_attr_vpd_protocol_identifier
,
1643 &target_wwn_attr_vpd_assoc_logical_unit
,
1644 &target_wwn_attr_vpd_assoc_target_port
,
1645 &target_wwn_attr_vpd_assoc_scsi_target_device
,
1649 TB_CIT_SETUP(dev_wwn
, NULL
, NULL
, target_core_dev_wwn_attrs
);
1651 /* End functions for struct config_item_type tb_dev_wwn_cit */
1653 /* Start functions for struct config_item_type tb_dev_pr_cit */
1655 static struct se_device
*pr_to_dev(struct config_item
*item
)
1657 return container_of(to_config_group(item
), struct se_device
,
1661 static ssize_t
target_core_dev_pr_show_spc3_res(struct se_device
*dev
,
1664 struct se_node_acl
*se_nacl
;
1665 struct t10_pr_registration
*pr_reg
;
1666 char i_buf
[PR_REG_ISID_ID_LEN
];
1668 memset(i_buf
, 0, PR_REG_ISID_ID_LEN
);
1670 pr_reg
= dev
->dev_pr_res_holder
;
1672 return sprintf(page
, "No SPC-3 Reservation holder\n");
1674 se_nacl
= pr_reg
->pr_reg_nacl
;
1675 core_pr_dump_initiator_port(pr_reg
, i_buf
, PR_REG_ISID_ID_LEN
);
1677 return sprintf(page
, "SPC-3 Reservation: %s Initiator: %s%s\n",
1678 se_nacl
->se_tpg
->se_tpg_tfo
->fabric_name
,
1679 se_nacl
->initiatorname
, i_buf
);
1682 static ssize_t
target_core_dev_pr_show_spc2_res(struct se_device
*dev
,
1685 struct se_session
*sess
= dev
->reservation_holder
;
1686 struct se_node_acl
*se_nacl
;
1690 se_nacl
= sess
->se_node_acl
;
1692 "SPC-2 Reservation: %s Initiator: %s\n",
1693 se_nacl
->se_tpg
->se_tpg_tfo
->fabric_name
,
1694 se_nacl
->initiatorname
);
1696 len
= sprintf(page
, "No SPC-2 Reservation holder\n");
1701 static ssize_t
target_pr_res_holder_show(struct config_item
*item
, char *page
)
1703 struct se_device
*dev
= pr_to_dev(item
);
1706 if (!dev
->dev_attrib
.emulate_pr
)
1707 return sprintf(page
, "SPC_RESERVATIONS_DISABLED\n");
1709 if (dev
->transport_flags
& TRANSPORT_FLAG_PASSTHROUGH_PGR
)
1710 return sprintf(page
, "Passthrough\n");
1712 spin_lock(&dev
->dev_reservation_lock
);
1713 if (dev
->dev_reservation_flags
& DRF_SPC2_RESERVATIONS
)
1714 ret
= target_core_dev_pr_show_spc2_res(dev
, page
);
1716 ret
= target_core_dev_pr_show_spc3_res(dev
, page
);
1717 spin_unlock(&dev
->dev_reservation_lock
);
1721 static ssize_t
target_pr_res_pr_all_tgt_pts_show(struct config_item
*item
,
1724 struct se_device
*dev
= pr_to_dev(item
);
1727 spin_lock(&dev
->dev_reservation_lock
);
1728 if (!dev
->dev_pr_res_holder
) {
1729 len
= sprintf(page
, "No SPC-3 Reservation holder\n");
1730 } else if (dev
->dev_pr_res_holder
->pr_reg_all_tg_pt
) {
1731 len
= sprintf(page
, "SPC-3 Reservation: All Target"
1732 " Ports registration\n");
1734 len
= sprintf(page
, "SPC-3 Reservation: Single"
1735 " Target Port registration\n");
1738 spin_unlock(&dev
->dev_reservation_lock
);
1742 static ssize_t
target_pr_res_pr_generation_show(struct config_item
*item
,
1745 return sprintf(page
, "0x%08x\n", pr_to_dev(item
)->t10_pr
.pr_generation
);
1749 static ssize_t
target_pr_res_pr_holder_tg_port_show(struct config_item
*item
,
1752 struct se_device
*dev
= pr_to_dev(item
);
1753 struct se_node_acl
*se_nacl
;
1754 struct se_portal_group
*se_tpg
;
1755 struct t10_pr_registration
*pr_reg
;
1756 const struct target_core_fabric_ops
*tfo
;
1759 spin_lock(&dev
->dev_reservation_lock
);
1760 pr_reg
= dev
->dev_pr_res_holder
;
1762 len
= sprintf(page
, "No SPC-3 Reservation holder\n");
1766 se_nacl
= pr_reg
->pr_reg_nacl
;
1767 se_tpg
= se_nacl
->se_tpg
;
1768 tfo
= se_tpg
->se_tpg_tfo
;
1770 len
+= sprintf(page
+len
, "SPC-3 Reservation: %s"
1771 " Target Node Endpoint: %s\n", tfo
->fabric_name
,
1772 tfo
->tpg_get_wwn(se_tpg
));
1773 len
+= sprintf(page
+len
, "SPC-3 Reservation: Relative Port"
1774 " Identifier Tag: %hu %s Portal Group Tag: %hu"
1775 " %s Logical Unit: %llu\n", pr_reg
->tg_pt_sep_rtpi
,
1776 tfo
->fabric_name
, tfo
->tpg_get_tag(se_tpg
),
1777 tfo
->fabric_name
, pr_reg
->pr_aptpl_target_lun
);
1780 spin_unlock(&dev
->dev_reservation_lock
);
1785 static ssize_t
target_pr_res_pr_registered_i_pts_show(struct config_item
*item
,
1788 struct se_device
*dev
= pr_to_dev(item
);
1789 const struct target_core_fabric_ops
*tfo
;
1790 struct t10_pr_registration
*pr_reg
;
1791 unsigned char buf
[384];
1792 char i_buf
[PR_REG_ISID_ID_LEN
];
1796 len
+= sprintf(page
+len
, "SPC-3 PR Registrations:\n");
1798 spin_lock(&dev
->t10_pr
.registration_lock
);
1799 list_for_each_entry(pr_reg
, &dev
->t10_pr
.registration_list
,
1802 memset(buf
, 0, 384);
1803 memset(i_buf
, 0, PR_REG_ISID_ID_LEN
);
1804 tfo
= pr_reg
->pr_reg_nacl
->se_tpg
->se_tpg_tfo
;
1805 core_pr_dump_initiator_port(pr_reg
, i_buf
,
1806 PR_REG_ISID_ID_LEN
);
1807 sprintf(buf
, "%s Node: %s%s Key: 0x%016Lx PRgen: 0x%08x\n",
1809 pr_reg
->pr_reg_nacl
->initiatorname
, i_buf
, pr_reg
->pr_res_key
,
1810 pr_reg
->pr_res_generation
);
1812 if (len
+ strlen(buf
) >= PAGE_SIZE
)
1815 len
+= sprintf(page
+len
, "%s", buf
);
1818 spin_unlock(&dev
->t10_pr
.registration_lock
);
1821 len
+= sprintf(page
+len
, "None\n");
1826 static ssize_t
target_pr_res_pr_type_show(struct config_item
*item
, char *page
)
1828 struct se_device
*dev
= pr_to_dev(item
);
1829 struct t10_pr_registration
*pr_reg
;
1832 spin_lock(&dev
->dev_reservation_lock
);
1833 pr_reg
= dev
->dev_pr_res_holder
;
1835 len
= sprintf(page
, "SPC-3 Reservation Type: %s\n",
1836 core_scsi3_pr_dump_type(pr_reg
->pr_res_type
));
1838 len
= sprintf(page
, "No SPC-3 Reservation holder\n");
1841 spin_unlock(&dev
->dev_reservation_lock
);
1845 static ssize_t
target_pr_res_type_show(struct config_item
*item
, char *page
)
1847 struct se_device
*dev
= pr_to_dev(item
);
1849 if (!dev
->dev_attrib
.emulate_pr
)
1850 return sprintf(page
, "SPC_RESERVATIONS_DISABLED\n");
1851 if (dev
->transport_flags
& TRANSPORT_FLAG_PASSTHROUGH_PGR
)
1852 return sprintf(page
, "SPC_PASSTHROUGH\n");
1853 if (dev
->dev_reservation_flags
& DRF_SPC2_RESERVATIONS
)
1854 return sprintf(page
, "SPC2_RESERVATIONS\n");
1856 return sprintf(page
, "SPC3_PERSISTENT_RESERVATIONS\n");
1859 static ssize_t
target_pr_res_aptpl_active_show(struct config_item
*item
,
1862 struct se_device
*dev
= pr_to_dev(item
);
1864 if (!dev
->dev_attrib
.emulate_pr
||
1865 (dev
->transport_flags
& TRANSPORT_FLAG_PASSTHROUGH_PGR
))
1868 return sprintf(page
, "APTPL Bit Status: %s\n",
1869 (dev
->t10_pr
.pr_aptpl_active
) ? "Activated" : "Disabled");
1872 static ssize_t
target_pr_res_aptpl_metadata_show(struct config_item
*item
,
1875 struct se_device
*dev
= pr_to_dev(item
);
1877 if (!dev
->dev_attrib
.emulate_pr
||
1878 (dev
->transport_flags
& TRANSPORT_FLAG_PASSTHROUGH_PGR
))
1881 return sprintf(page
, "Ready to process PR APTPL metadata..\n");
1885 Opt_initiator_fabric
, Opt_initiator_node
, Opt_initiator_sid
,
1886 Opt_sa_res_key
, Opt_res_holder
, Opt_res_type
, Opt_res_scope
,
1887 Opt_res_all_tg_pt
, Opt_mapped_lun
, Opt_target_fabric
,
1888 Opt_target_node
, Opt_tpgt
, Opt_port_rtpi
, Opt_target_lun
, Opt_err
1891 static match_table_t tokens
= {
1892 {Opt_initiator_fabric
, "initiator_fabric=%s"},
1893 {Opt_initiator_node
, "initiator_node=%s"},
1894 {Opt_initiator_sid
, "initiator_sid=%s"},
1895 {Opt_sa_res_key
, "sa_res_key=%s"},
1896 {Opt_res_holder
, "res_holder=%d"},
1897 {Opt_res_type
, "res_type=%d"},
1898 {Opt_res_scope
, "res_scope=%d"},
1899 {Opt_res_all_tg_pt
, "res_all_tg_pt=%d"},
1900 {Opt_mapped_lun
, "mapped_lun=%u"},
1901 {Opt_target_fabric
, "target_fabric=%s"},
1902 {Opt_target_node
, "target_node=%s"},
1903 {Opt_tpgt
, "tpgt=%d"},
1904 {Opt_port_rtpi
, "port_rtpi=%d"},
1905 {Opt_target_lun
, "target_lun=%u"},
1909 static ssize_t
target_pr_res_aptpl_metadata_store(struct config_item
*item
,
1910 const char *page
, size_t count
)
1912 struct se_device
*dev
= pr_to_dev(item
);
1913 unsigned char *i_fabric
= NULL
, *i_port
= NULL
, *isid
= NULL
;
1914 unsigned char *t_fabric
= NULL
, *t_port
= NULL
;
1915 char *orig
, *ptr
, *opts
;
1916 substring_t args
[MAX_OPT_ARGS
];
1917 unsigned long long tmp_ll
;
1919 u64 mapped_lun
= 0, target_lun
= 0;
1920 int ret
= -1, res_holder
= 0, all_tg_pt
= 0, arg
, token
;
1924 if (!dev
->dev_attrib
.emulate_pr
||
1925 (dev
->transport_flags
& TRANSPORT_FLAG_PASSTHROUGH_PGR
))
1927 if (dev
->dev_reservation_flags
& DRF_SPC2_RESERVATIONS
)
1930 if (dev
->export_count
) {
1931 pr_debug("Unable to process APTPL metadata while"
1932 " active fabric exports exist\n");
1936 opts
= kstrdup(page
, GFP_KERNEL
);
1941 while ((ptr
= strsep(&opts
, ",\n")) != NULL
) {
1945 token
= match_token(ptr
, tokens
, args
);
1947 case Opt_initiator_fabric
:
1948 i_fabric
= match_strdup(args
);
1954 case Opt_initiator_node
:
1955 i_port
= match_strdup(args
);
1960 if (strlen(i_port
) >= PR_APTPL_MAX_IPORT_LEN
) {
1961 pr_err("APTPL metadata initiator_node="
1962 " exceeds PR_APTPL_MAX_IPORT_LEN: %d\n",
1963 PR_APTPL_MAX_IPORT_LEN
);
1968 case Opt_initiator_sid
:
1969 isid
= match_strdup(args
);
1974 if (strlen(isid
) >= PR_REG_ISID_LEN
) {
1975 pr_err("APTPL metadata initiator_isid"
1976 "= exceeds PR_REG_ISID_LEN: %d\n",
1982 case Opt_sa_res_key
:
1983 ret
= match_u64(args
, &tmp_ll
);
1985 pr_err("kstrtoull() failed for sa_res_key=\n");
1988 sa_res_key
= (u64
)tmp_ll
;
1991 * PR APTPL Metadata for Reservation
1993 case Opt_res_holder
:
1994 ret
= match_int(args
, &arg
);
2000 ret
= match_int(args
, &arg
);
2006 ret
= match_int(args
, &arg
);
2010 case Opt_res_all_tg_pt
:
2011 ret
= match_int(args
, &arg
);
2014 all_tg_pt
= (int)arg
;
2016 case Opt_mapped_lun
:
2017 ret
= match_u64(args
, &tmp_ll
);
2020 mapped_lun
= (u64
)tmp_ll
;
2023 * PR APTPL Metadata for Target Port
2025 case Opt_target_fabric
:
2026 t_fabric
= match_strdup(args
);
2032 case Opt_target_node
:
2033 t_port
= match_strdup(args
);
2038 if (strlen(t_port
) >= PR_APTPL_MAX_TPORT_LEN
) {
2039 pr_err("APTPL metadata target_node="
2040 " exceeds PR_APTPL_MAX_TPORT_LEN: %d\n",
2041 PR_APTPL_MAX_TPORT_LEN
);
2047 ret
= match_int(args
, &arg
);
2053 ret
= match_int(args
, &arg
);
2057 case Opt_target_lun
:
2058 ret
= match_u64(args
, &tmp_ll
);
2061 target_lun
= (u64
)tmp_ll
;
2068 if (!i_port
|| !t_port
|| !sa_res_key
) {
2069 pr_err("Illegal parameters for APTPL registration\n");
2074 if (res_holder
&& !(type
)) {
2075 pr_err("Illegal PR type: 0x%02x for reservation"
2081 ret
= core_scsi3_alloc_aptpl_registration(&dev
->t10_pr
, sa_res_key
,
2082 i_port
, isid
, mapped_lun
, t_port
, tpgt
, target_lun
,
2083 res_holder
, all_tg_pt
, type
);
2091 return (ret
== 0) ? count
: ret
;
2095 CONFIGFS_ATTR_RO(target_pr_
, res_holder
);
2096 CONFIGFS_ATTR_RO(target_pr_
, res_pr_all_tgt_pts
);
2097 CONFIGFS_ATTR_RO(target_pr_
, res_pr_generation
);
2098 CONFIGFS_ATTR_RO(target_pr_
, res_pr_holder_tg_port
);
2099 CONFIGFS_ATTR_RO(target_pr_
, res_pr_registered_i_pts
);
2100 CONFIGFS_ATTR_RO(target_pr_
, res_pr_type
);
2101 CONFIGFS_ATTR_RO(target_pr_
, res_type
);
2102 CONFIGFS_ATTR_RO(target_pr_
, res_aptpl_active
);
2103 CONFIGFS_ATTR(target_pr_
, res_aptpl_metadata
);
2105 static struct configfs_attribute
*target_core_dev_pr_attrs
[] = {
2106 &target_pr_attr_res_holder
,
2107 &target_pr_attr_res_pr_all_tgt_pts
,
2108 &target_pr_attr_res_pr_generation
,
2109 &target_pr_attr_res_pr_holder_tg_port
,
2110 &target_pr_attr_res_pr_registered_i_pts
,
2111 &target_pr_attr_res_pr_type
,
2112 &target_pr_attr_res_type
,
2113 &target_pr_attr_res_aptpl_active
,
2114 &target_pr_attr_res_aptpl_metadata
,
2118 TB_CIT_SETUP(dev_pr
, NULL
, NULL
, target_core_dev_pr_attrs
);
2120 /* End functions for struct config_item_type tb_dev_pr_cit */
2122 /* Start functions for struct config_item_type tb_dev_cit */
2124 static inline struct se_device
*to_device(struct config_item
*item
)
2126 return container_of(to_config_group(item
), struct se_device
, dev_group
);
2129 static ssize_t
target_dev_info_show(struct config_item
*item
, char *page
)
2131 struct se_device
*dev
= to_device(item
);
2133 ssize_t read_bytes
= 0;
2135 transport_dump_dev_state(dev
, page
, &bl
);
2137 read_bytes
+= dev
->transport
->show_configfs_dev_params(dev
,
2142 static ssize_t
target_dev_control_store(struct config_item
*item
,
2143 const char *page
, size_t count
)
2145 struct se_device
*dev
= to_device(item
);
2147 return dev
->transport
->set_configfs_dev_params(dev
, page
, count
);
2150 static ssize_t
target_dev_alias_show(struct config_item
*item
, char *page
)
2152 struct se_device
*dev
= to_device(item
);
2154 if (!(dev
->dev_flags
& DF_USING_ALIAS
))
2157 return snprintf(page
, PAGE_SIZE
, "%s\n", dev
->dev_alias
);
2160 static ssize_t
target_dev_alias_store(struct config_item
*item
,
2161 const char *page
, size_t count
)
2163 struct se_device
*dev
= to_device(item
);
2164 struct se_hba
*hba
= dev
->se_hba
;
2167 if (count
> (SE_DEV_ALIAS_LEN
-1)) {
2168 pr_err("alias count: %d exceeds"
2169 " SE_DEV_ALIAS_LEN-1: %u\n", (int)count
,
2170 SE_DEV_ALIAS_LEN
-1);
2174 read_bytes
= snprintf(&dev
->dev_alias
[0], SE_DEV_ALIAS_LEN
, "%s", page
);
2177 if (dev
->dev_alias
[read_bytes
- 1] == '\n')
2178 dev
->dev_alias
[read_bytes
- 1] = '\0';
2180 dev
->dev_flags
|= DF_USING_ALIAS
;
2182 pr_debug("Target_Core_ConfigFS: %s/%s set alias: %s\n",
2183 config_item_name(&hba
->hba_group
.cg_item
),
2184 config_item_name(&dev
->dev_group
.cg_item
),
2190 static ssize_t
target_dev_udev_path_show(struct config_item
*item
, char *page
)
2192 struct se_device
*dev
= to_device(item
);
2194 if (!(dev
->dev_flags
& DF_USING_UDEV_PATH
))
2197 return snprintf(page
, PAGE_SIZE
, "%s\n", dev
->udev_path
);
2200 static ssize_t
target_dev_udev_path_store(struct config_item
*item
,
2201 const char *page
, size_t count
)
2203 struct se_device
*dev
= to_device(item
);
2204 struct se_hba
*hba
= dev
->se_hba
;
2207 if (count
> (SE_UDEV_PATH_LEN
-1)) {
2208 pr_err("udev_path count: %d exceeds"
2209 " SE_UDEV_PATH_LEN-1: %u\n", (int)count
,
2210 SE_UDEV_PATH_LEN
-1);
2214 read_bytes
= snprintf(&dev
->udev_path
[0], SE_UDEV_PATH_LEN
,
2218 if (dev
->udev_path
[read_bytes
- 1] == '\n')
2219 dev
->udev_path
[read_bytes
- 1] = '\0';
2221 dev
->dev_flags
|= DF_USING_UDEV_PATH
;
2223 pr_debug("Target_Core_ConfigFS: %s/%s set udev_path: %s\n",
2224 config_item_name(&hba
->hba_group
.cg_item
),
2225 config_item_name(&dev
->dev_group
.cg_item
),
2231 static ssize_t
target_dev_enable_show(struct config_item
*item
, char *page
)
2233 struct se_device
*dev
= to_device(item
);
2235 return snprintf(page
, PAGE_SIZE
, "%d\n", target_dev_configured(dev
));
2238 static ssize_t
target_dev_enable_store(struct config_item
*item
,
2239 const char *page
, size_t count
)
2241 struct se_device
*dev
= to_device(item
);
2245 ptr
= strstr(page
, "1");
2247 pr_err("For dev_enable ops, only valid value"
2252 ret
= target_configure_device(dev
);
2258 static ssize_t
target_dev_alua_lu_gp_show(struct config_item
*item
, char *page
)
2260 struct se_device
*dev
= to_device(item
);
2261 struct config_item
*lu_ci
;
2262 struct t10_alua_lu_gp
*lu_gp
;
2263 struct t10_alua_lu_gp_member
*lu_gp_mem
;
2266 lu_gp_mem
= dev
->dev_alua_lu_gp_mem
;
2270 spin_lock(&lu_gp_mem
->lu_gp_mem_lock
);
2271 lu_gp
= lu_gp_mem
->lu_gp
;
2273 lu_ci
= &lu_gp
->lu_gp_group
.cg_item
;
2274 len
+= sprintf(page
, "LU Group Alias: %s\nLU Group ID: %hu\n",
2275 config_item_name(lu_ci
), lu_gp
->lu_gp_id
);
2277 spin_unlock(&lu_gp_mem
->lu_gp_mem_lock
);
2282 static ssize_t
target_dev_alua_lu_gp_store(struct config_item
*item
,
2283 const char *page
, size_t count
)
2285 struct se_device
*dev
= to_device(item
);
2286 struct se_hba
*hba
= dev
->se_hba
;
2287 struct t10_alua_lu_gp
*lu_gp
= NULL
, *lu_gp_new
= NULL
;
2288 struct t10_alua_lu_gp_member
*lu_gp_mem
;
2289 unsigned char buf
[LU_GROUP_NAME_BUF
];
2292 lu_gp_mem
= dev
->dev_alua_lu_gp_mem
;
2296 if (count
> LU_GROUP_NAME_BUF
) {
2297 pr_err("ALUA LU Group Alias too large!\n");
2300 memset(buf
, 0, LU_GROUP_NAME_BUF
);
2301 memcpy(buf
, page
, count
);
2303 * Any ALUA logical unit alias besides "NULL" means we will be
2304 * making a new group association.
2306 if (strcmp(strstrip(buf
), "NULL")) {
2308 * core_alua_get_lu_gp_by_name() will increment reference to
2309 * struct t10_alua_lu_gp. This reference is released with
2310 * core_alua_get_lu_gp_by_name below().
2312 lu_gp_new
= core_alua_get_lu_gp_by_name(strstrip(buf
));
2317 spin_lock(&lu_gp_mem
->lu_gp_mem_lock
);
2318 lu_gp
= lu_gp_mem
->lu_gp
;
2321 * Clearing an existing lu_gp association, and replacing
2325 pr_debug("Target_Core_ConfigFS: Releasing %s/%s"
2326 " from ALUA LU Group: core/alua/lu_gps/%s, ID:"
2328 config_item_name(&hba
->hba_group
.cg_item
),
2329 config_item_name(&dev
->dev_group
.cg_item
),
2330 config_item_name(&lu_gp
->lu_gp_group
.cg_item
),
2333 __core_alua_drop_lu_gp_mem(lu_gp_mem
, lu_gp
);
2334 spin_unlock(&lu_gp_mem
->lu_gp_mem_lock
);
2339 * Removing existing association of lu_gp_mem with lu_gp
2341 __core_alua_drop_lu_gp_mem(lu_gp_mem
, lu_gp
);
2345 * Associate lu_gp_mem with lu_gp_new.
2347 __core_alua_attach_lu_gp_mem(lu_gp_mem
, lu_gp_new
);
2348 spin_unlock(&lu_gp_mem
->lu_gp_mem_lock
);
2350 pr_debug("Target_Core_ConfigFS: %s %s/%s to ALUA LU Group:"
2351 " core/alua/lu_gps/%s, ID: %hu\n",
2352 (move
) ? "Moving" : "Adding",
2353 config_item_name(&hba
->hba_group
.cg_item
),
2354 config_item_name(&dev
->dev_group
.cg_item
),
2355 config_item_name(&lu_gp_new
->lu_gp_group
.cg_item
),
2356 lu_gp_new
->lu_gp_id
);
2358 core_alua_put_lu_gp_from_name(lu_gp_new
);
2362 static ssize_t
target_dev_lba_map_show(struct config_item
*item
, char *page
)
2364 struct se_device
*dev
= to_device(item
);
2365 struct t10_alua_lba_map
*map
;
2366 struct t10_alua_lba_map_member
*mem
;
2371 spin_lock(&dev
->t10_alua
.lba_map_lock
);
2372 if (!list_empty(&dev
->t10_alua
.lba_map_list
))
2373 bl
+= sprintf(b
+ bl
, "%u %u\n",
2374 dev
->t10_alua
.lba_map_segment_size
,
2375 dev
->t10_alua
.lba_map_segment_multiplier
);
2376 list_for_each_entry(map
, &dev
->t10_alua
.lba_map_list
, lba_map_list
) {
2377 bl
+= sprintf(b
+ bl
, "%llu %llu",
2378 map
->lba_map_first_lba
, map
->lba_map_last_lba
);
2379 list_for_each_entry(mem
, &map
->lba_map_mem_list
,
2381 switch (mem
->lba_map_mem_alua_state
) {
2382 case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED
:
2385 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED
:
2388 case ALUA_ACCESS_STATE_STANDBY
:
2391 case ALUA_ACCESS_STATE_UNAVAILABLE
:
2398 bl
+= sprintf(b
+ bl
, " %d:%c",
2399 mem
->lba_map_mem_alua_pg_id
, state
);
2401 bl
+= sprintf(b
+ bl
, "\n");
2403 spin_unlock(&dev
->t10_alua
.lba_map_lock
);
2407 static ssize_t
target_dev_lba_map_store(struct config_item
*item
,
2408 const char *page
, size_t count
)
2410 struct se_device
*dev
= to_device(item
);
2411 struct t10_alua_lba_map
*lba_map
= NULL
;
2412 struct list_head lba_list
;
2413 char *map_entries
, *orig
, *ptr
;
2415 int pg_num
= -1, pg
;
2416 int ret
= 0, num
= 0, pg_id
, alua_state
;
2417 unsigned long start_lba
= -1, end_lba
= -1;
2418 unsigned long segment_size
= -1, segment_mult
= -1;
2420 orig
= map_entries
= kstrdup(page
, GFP_KERNEL
);
2424 INIT_LIST_HEAD(&lba_list
);
2425 while ((ptr
= strsep(&map_entries
, "\n")) != NULL
) {
2430 if (sscanf(ptr
, "%lu %lu\n",
2431 &segment_size
, &segment_mult
) != 2) {
2432 pr_err("Invalid line %d\n", num
);
2439 if (sscanf(ptr
, "%lu %lu", &start_lba
, &end_lba
) != 2) {
2440 pr_err("Invalid line %d\n", num
);
2444 ptr
= strchr(ptr
, ' ');
2446 pr_err("Invalid line %d, missing end lba\n", num
);
2451 ptr
= strchr(ptr
, ' ');
2453 pr_err("Invalid line %d, missing state definitions\n",
2459 lba_map
= core_alua_allocate_lba_map(&lba_list
,
2460 start_lba
, end_lba
);
2461 if (IS_ERR(lba_map
)) {
2462 ret
= PTR_ERR(lba_map
);
2466 while (sscanf(ptr
, "%d:%c", &pg_id
, &state
) == 2) {
2469 alua_state
= ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED
;
2472 alua_state
= ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED
;
2475 alua_state
= ALUA_ACCESS_STATE_STANDBY
;
2478 alua_state
= ALUA_ACCESS_STATE_UNAVAILABLE
;
2481 pr_err("Invalid ALUA state '%c'\n", state
);
2486 ret
= core_alua_allocate_lba_map_mem(lba_map
,
2489 pr_err("Invalid target descriptor %d:%c "
2495 ptr
= strchr(ptr
, ' ');
2503 else if (pg
!= pg_num
) {
2504 pr_err("Only %d from %d port groups definitions "
2505 "at line %d\n", pg
, pg_num
, num
);
2513 core_alua_free_lba_map(&lba_list
);
2516 core_alua_set_lba_map(dev
, &lba_list
,
2517 segment_size
, segment_mult
);
2522 CONFIGFS_ATTR_RO(target_dev_
, info
);
2523 CONFIGFS_ATTR_WO(target_dev_
, control
);
2524 CONFIGFS_ATTR(target_dev_
, alias
);
2525 CONFIGFS_ATTR(target_dev_
, udev_path
);
2526 CONFIGFS_ATTR(target_dev_
, enable
);
2527 CONFIGFS_ATTR(target_dev_
, alua_lu_gp
);
2528 CONFIGFS_ATTR(target_dev_
, lba_map
);
2530 static struct configfs_attribute
*target_core_dev_attrs
[] = {
2531 &target_dev_attr_info
,
2532 &target_dev_attr_control
,
2533 &target_dev_attr_alias
,
2534 &target_dev_attr_udev_path
,
2535 &target_dev_attr_enable
,
2536 &target_dev_attr_alua_lu_gp
,
2537 &target_dev_attr_lba_map
,
2541 static void target_core_dev_release(struct config_item
*item
)
2543 struct config_group
*dev_cg
= to_config_group(item
);
2544 struct se_device
*dev
=
2545 container_of(dev_cg
, struct se_device
, dev_group
);
2547 target_free_device(dev
);
2551 * Used in target_core_fabric_configfs.c to verify valid se_device symlink
2552 * within target_fabric_port_link()
2554 struct configfs_item_operations target_core_dev_item_ops
= {
2555 .release
= target_core_dev_release
,
2558 TB_CIT_SETUP(dev
, &target_core_dev_item_ops
, NULL
, target_core_dev_attrs
);
2560 /* End functions for struct config_item_type tb_dev_cit */
2562 /* Start functions for struct config_item_type target_core_alua_lu_gp_cit */
2564 static inline struct t10_alua_lu_gp
*to_lu_gp(struct config_item
*item
)
2566 return container_of(to_config_group(item
), struct t10_alua_lu_gp
,
2570 static ssize_t
target_lu_gp_lu_gp_id_show(struct config_item
*item
, char *page
)
2572 struct t10_alua_lu_gp
*lu_gp
= to_lu_gp(item
);
2574 if (!lu_gp
->lu_gp_valid_id
)
2576 return sprintf(page
, "%hu\n", lu_gp
->lu_gp_id
);
2579 static ssize_t
target_lu_gp_lu_gp_id_store(struct config_item
*item
,
2580 const char *page
, size_t count
)
2582 struct t10_alua_lu_gp
*lu_gp
= to_lu_gp(item
);
2583 struct config_group
*alua_lu_gp_cg
= &lu_gp
->lu_gp_group
;
2584 unsigned long lu_gp_id
;
2587 ret
= kstrtoul(page
, 0, &lu_gp_id
);
2589 pr_err("kstrtoul() returned %d for"
2590 " lu_gp_id\n", ret
);
2593 if (lu_gp_id
> 0x0000ffff) {
2594 pr_err("ALUA lu_gp_id: %lu exceeds maximum:"
2595 " 0x0000ffff\n", lu_gp_id
);
2599 ret
= core_alua_set_lu_gp_id(lu_gp
, (u16
)lu_gp_id
);
2603 pr_debug("Target_Core_ConfigFS: Set ALUA Logical Unit"
2604 " Group: core/alua/lu_gps/%s to ID: %hu\n",
2605 config_item_name(&alua_lu_gp_cg
->cg_item
),
2611 static ssize_t
target_lu_gp_members_show(struct config_item
*item
, char *page
)
2613 struct t10_alua_lu_gp
*lu_gp
= to_lu_gp(item
);
2614 struct se_device
*dev
;
2616 struct t10_alua_lu_gp_member
*lu_gp_mem
;
2617 ssize_t len
= 0, cur_len
;
2618 unsigned char buf
[LU_GROUP_NAME_BUF
];
2620 memset(buf
, 0, LU_GROUP_NAME_BUF
);
2622 spin_lock(&lu_gp
->lu_gp_lock
);
2623 list_for_each_entry(lu_gp_mem
, &lu_gp
->lu_gp_mem_list
, lu_gp_mem_list
) {
2624 dev
= lu_gp_mem
->lu_gp_mem_dev
;
2627 cur_len
= snprintf(buf
, LU_GROUP_NAME_BUF
, "%s/%s\n",
2628 config_item_name(&hba
->hba_group
.cg_item
),
2629 config_item_name(&dev
->dev_group
.cg_item
));
2630 cur_len
++; /* Extra byte for NULL terminator */
2632 if ((cur_len
+ len
) > PAGE_SIZE
) {
2633 pr_warn("Ran out of lu_gp_show_attr"
2634 "_members buffer\n");
2637 memcpy(page
+len
, buf
, cur_len
);
2640 spin_unlock(&lu_gp
->lu_gp_lock
);
2645 CONFIGFS_ATTR(target_lu_gp_
, lu_gp_id
);
2646 CONFIGFS_ATTR_RO(target_lu_gp_
, members
);
2648 static struct configfs_attribute
*target_core_alua_lu_gp_attrs
[] = {
2649 &target_lu_gp_attr_lu_gp_id
,
2650 &target_lu_gp_attr_members
,
2654 static void target_core_alua_lu_gp_release(struct config_item
*item
)
2656 struct t10_alua_lu_gp
*lu_gp
= container_of(to_config_group(item
),
2657 struct t10_alua_lu_gp
, lu_gp_group
);
2659 core_alua_free_lu_gp(lu_gp
);
2662 static struct configfs_item_operations target_core_alua_lu_gp_ops
= {
2663 .release
= target_core_alua_lu_gp_release
,
2666 static const struct config_item_type target_core_alua_lu_gp_cit
= {
2667 .ct_item_ops
= &target_core_alua_lu_gp_ops
,
2668 .ct_attrs
= target_core_alua_lu_gp_attrs
,
2669 .ct_owner
= THIS_MODULE
,
2672 /* End functions for struct config_item_type target_core_alua_lu_gp_cit */
2674 /* Start functions for struct config_item_type target_core_alua_lu_gps_cit */
2676 static struct config_group
*target_core_alua_create_lu_gp(
2677 struct config_group
*group
,
2680 struct t10_alua_lu_gp
*lu_gp
;
2681 struct config_group
*alua_lu_gp_cg
= NULL
;
2682 struct config_item
*alua_lu_gp_ci
= NULL
;
2684 lu_gp
= core_alua_allocate_lu_gp(name
, 0);
2688 alua_lu_gp_cg
= &lu_gp
->lu_gp_group
;
2689 alua_lu_gp_ci
= &alua_lu_gp_cg
->cg_item
;
2691 config_group_init_type_name(alua_lu_gp_cg
, name
,
2692 &target_core_alua_lu_gp_cit
);
2694 pr_debug("Target_Core_ConfigFS: Allocated ALUA Logical Unit"
2695 " Group: core/alua/lu_gps/%s\n",
2696 config_item_name(alua_lu_gp_ci
));
2698 return alua_lu_gp_cg
;
2702 static void target_core_alua_drop_lu_gp(
2703 struct config_group
*group
,
2704 struct config_item
*item
)
2706 struct t10_alua_lu_gp
*lu_gp
= container_of(to_config_group(item
),
2707 struct t10_alua_lu_gp
, lu_gp_group
);
2709 pr_debug("Target_Core_ConfigFS: Releasing ALUA Logical Unit"
2710 " Group: core/alua/lu_gps/%s, ID: %hu\n",
2711 config_item_name(item
), lu_gp
->lu_gp_id
);
2713 * core_alua_free_lu_gp() is called from target_core_alua_lu_gp_ops->release()
2714 * -> target_core_alua_lu_gp_release()
2716 config_item_put(item
);
2719 static struct configfs_group_operations target_core_alua_lu_gps_group_ops
= {
2720 .make_group
= &target_core_alua_create_lu_gp
,
2721 .drop_item
= &target_core_alua_drop_lu_gp
,
2724 static const struct config_item_type target_core_alua_lu_gps_cit
= {
2725 .ct_item_ops
= NULL
,
2726 .ct_group_ops
= &target_core_alua_lu_gps_group_ops
,
2727 .ct_owner
= THIS_MODULE
,
2730 /* End functions for struct config_item_type target_core_alua_lu_gps_cit */
2732 /* Start functions for struct config_item_type target_core_alua_tg_pt_gp_cit */
2734 static inline struct t10_alua_tg_pt_gp
*to_tg_pt_gp(struct config_item
*item
)
2736 return container_of(to_config_group(item
), struct t10_alua_tg_pt_gp
,
2740 static ssize_t
target_tg_pt_gp_alua_access_state_show(struct config_item
*item
,
2743 return sprintf(page
, "%d\n",
2744 to_tg_pt_gp(item
)->tg_pt_gp_alua_access_state
);
2747 static ssize_t
target_tg_pt_gp_alua_access_state_store(struct config_item
*item
,
2748 const char *page
, size_t count
)
2750 struct t10_alua_tg_pt_gp
*tg_pt_gp
= to_tg_pt_gp(item
);
2751 struct se_device
*dev
= tg_pt_gp
->tg_pt_gp_dev
;
2755 if (!tg_pt_gp
->tg_pt_gp_valid_id
) {
2756 pr_err("Unable to do implicit ALUA on non valid"
2757 " tg_pt_gp ID: %hu\n", tg_pt_gp
->tg_pt_gp_valid_id
);
2760 if (!target_dev_configured(dev
)) {
2761 pr_err("Unable to set alua_access_state while device is"
2762 " not configured\n");
2766 ret
= kstrtoul(page
, 0, &tmp
);
2768 pr_err("Unable to extract new ALUA access state from"
2772 new_state
= (int)tmp
;
2774 if (!(tg_pt_gp
->tg_pt_gp_alua_access_type
& TPGS_IMPLICIT_ALUA
)) {
2775 pr_err("Unable to process implicit configfs ALUA"
2776 " transition while TPGS_IMPLICIT_ALUA is disabled\n");
2779 if (tg_pt_gp
->tg_pt_gp_alua_access_type
& TPGS_EXPLICIT_ALUA
&&
2780 new_state
== ALUA_ACCESS_STATE_LBA_DEPENDENT
) {
2781 /* LBA DEPENDENT is only allowed with implicit ALUA */
2782 pr_err("Unable to process implicit configfs ALUA transition"
2783 " while explicit ALUA management is enabled\n");
2787 ret
= core_alua_do_port_transition(tg_pt_gp
, dev
,
2788 NULL
, NULL
, new_state
, 0);
2789 return (!ret
) ? count
: -EINVAL
;
2792 static ssize_t
target_tg_pt_gp_alua_access_status_show(struct config_item
*item
,
2795 struct t10_alua_tg_pt_gp
*tg_pt_gp
= to_tg_pt_gp(item
);
2796 return sprintf(page
, "%s\n",
2797 core_alua_dump_status(tg_pt_gp
->tg_pt_gp_alua_access_status
));
2800 static ssize_t
target_tg_pt_gp_alua_access_status_store(
2801 struct config_item
*item
, const char *page
, size_t count
)
2803 struct t10_alua_tg_pt_gp
*tg_pt_gp
= to_tg_pt_gp(item
);
2805 int new_status
, ret
;
2807 if (!tg_pt_gp
->tg_pt_gp_valid_id
) {
2808 pr_err("Unable to do set ALUA access status on non"
2809 " valid tg_pt_gp ID: %hu\n",
2810 tg_pt_gp
->tg_pt_gp_valid_id
);
2814 ret
= kstrtoul(page
, 0, &tmp
);
2816 pr_err("Unable to extract new ALUA access status"
2817 " from %s\n", page
);
2820 new_status
= (int)tmp
;
2822 if ((new_status
!= ALUA_STATUS_NONE
) &&
2823 (new_status
!= ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG
) &&
2824 (new_status
!= ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA
)) {
2825 pr_err("Illegal ALUA access status: 0x%02x\n",
2830 tg_pt_gp
->tg_pt_gp_alua_access_status
= new_status
;
2834 static ssize_t
target_tg_pt_gp_alua_access_type_show(struct config_item
*item
,
2837 return core_alua_show_access_type(to_tg_pt_gp(item
), page
);
2840 static ssize_t
target_tg_pt_gp_alua_access_type_store(struct config_item
*item
,
2841 const char *page
, size_t count
)
2843 return core_alua_store_access_type(to_tg_pt_gp(item
), page
, count
);
2846 #define ALUA_SUPPORTED_STATE_ATTR(_name, _bit) \
2847 static ssize_t target_tg_pt_gp_alua_support_##_name##_show( \
2848 struct config_item *item, char *p) \
2850 struct t10_alua_tg_pt_gp *t = to_tg_pt_gp(item); \
2851 return sprintf(p, "%d\n", \
2852 !!(t->tg_pt_gp_alua_supported_states & _bit)); \
2855 static ssize_t target_tg_pt_gp_alua_support_##_name##_store( \
2856 struct config_item *item, const char *p, size_t c) \
2858 struct t10_alua_tg_pt_gp *t = to_tg_pt_gp(item); \
2859 unsigned long tmp; \
2862 if (!t->tg_pt_gp_valid_id) { \
2863 pr_err("Unable to do set " #_name " ALUA state on non" \
2864 " valid tg_pt_gp ID: %hu\n", \
2865 t->tg_pt_gp_valid_id); \
2869 ret = kstrtoul(p, 0, &tmp); \
2871 pr_err("Invalid value '%s', must be '0' or '1'\n", p); \
2875 pr_err("Invalid value '%ld', must be '0' or '1'\n", tmp); \
2879 t->tg_pt_gp_alua_supported_states |= _bit; \
2881 t->tg_pt_gp_alua_supported_states &= ~_bit; \
2886 ALUA_SUPPORTED_STATE_ATTR(transitioning
, ALUA_T_SUP
);
2887 ALUA_SUPPORTED_STATE_ATTR(offline
, ALUA_O_SUP
);
2888 ALUA_SUPPORTED_STATE_ATTR(lba_dependent
, ALUA_LBD_SUP
);
2889 ALUA_SUPPORTED_STATE_ATTR(unavailable
, ALUA_U_SUP
);
2890 ALUA_SUPPORTED_STATE_ATTR(standby
, ALUA_S_SUP
);
2891 ALUA_SUPPORTED_STATE_ATTR(active_optimized
, ALUA_AO_SUP
);
2892 ALUA_SUPPORTED_STATE_ATTR(active_nonoptimized
, ALUA_AN_SUP
);
2894 static ssize_t
target_tg_pt_gp_alua_write_metadata_show(
2895 struct config_item
*item
, char *page
)
2897 return sprintf(page
, "%d\n",
2898 to_tg_pt_gp(item
)->tg_pt_gp_write_metadata
);
2901 static ssize_t
target_tg_pt_gp_alua_write_metadata_store(
2902 struct config_item
*item
, const char *page
, size_t count
)
2904 struct t10_alua_tg_pt_gp
*tg_pt_gp
= to_tg_pt_gp(item
);
2908 ret
= kstrtoul(page
, 0, &tmp
);
2910 pr_err("Unable to extract alua_write_metadata\n");
2914 if ((tmp
!= 0) && (tmp
!= 1)) {
2915 pr_err("Illegal value for alua_write_metadata:"
2919 tg_pt_gp
->tg_pt_gp_write_metadata
= (int)tmp
;
2924 static ssize_t
target_tg_pt_gp_nonop_delay_msecs_show(struct config_item
*item
,
2927 return core_alua_show_nonop_delay_msecs(to_tg_pt_gp(item
), page
);
2930 static ssize_t
target_tg_pt_gp_nonop_delay_msecs_store(struct config_item
*item
,
2931 const char *page
, size_t count
)
2933 return core_alua_store_nonop_delay_msecs(to_tg_pt_gp(item
), page
,
2937 static ssize_t
target_tg_pt_gp_trans_delay_msecs_show(struct config_item
*item
,
2940 return core_alua_show_trans_delay_msecs(to_tg_pt_gp(item
), page
);
2943 static ssize_t
target_tg_pt_gp_trans_delay_msecs_store(struct config_item
*item
,
2944 const char *page
, size_t count
)
2946 return core_alua_store_trans_delay_msecs(to_tg_pt_gp(item
), page
,
2950 static ssize_t
target_tg_pt_gp_implicit_trans_secs_show(
2951 struct config_item
*item
, char *page
)
2953 return core_alua_show_implicit_trans_secs(to_tg_pt_gp(item
), page
);
2956 static ssize_t
target_tg_pt_gp_implicit_trans_secs_store(
2957 struct config_item
*item
, const char *page
, size_t count
)
2959 return core_alua_store_implicit_trans_secs(to_tg_pt_gp(item
), page
,
2963 static ssize_t
target_tg_pt_gp_preferred_show(struct config_item
*item
,
2966 return core_alua_show_preferred_bit(to_tg_pt_gp(item
), page
);
2969 static ssize_t
target_tg_pt_gp_preferred_store(struct config_item
*item
,
2970 const char *page
, size_t count
)
2972 return core_alua_store_preferred_bit(to_tg_pt_gp(item
), page
, count
);
2975 static ssize_t
target_tg_pt_gp_tg_pt_gp_id_show(struct config_item
*item
,
2978 struct t10_alua_tg_pt_gp
*tg_pt_gp
= to_tg_pt_gp(item
);
2980 if (!tg_pt_gp
->tg_pt_gp_valid_id
)
2982 return sprintf(page
, "%hu\n", tg_pt_gp
->tg_pt_gp_id
);
2985 static ssize_t
target_tg_pt_gp_tg_pt_gp_id_store(struct config_item
*item
,
2986 const char *page
, size_t count
)
2988 struct t10_alua_tg_pt_gp
*tg_pt_gp
= to_tg_pt_gp(item
);
2989 struct config_group
*alua_tg_pt_gp_cg
= &tg_pt_gp
->tg_pt_gp_group
;
2990 unsigned long tg_pt_gp_id
;
2993 ret
= kstrtoul(page
, 0, &tg_pt_gp_id
);
2995 pr_err("ALUA tg_pt_gp_id: invalid value '%s' for tg_pt_gp_id\n",
2999 if (tg_pt_gp_id
> 0x0000ffff) {
3000 pr_err("ALUA tg_pt_gp_id: %lu exceeds maximum: 0x0000ffff\n",
3005 ret
= core_alua_set_tg_pt_gp_id(tg_pt_gp
, (u16
)tg_pt_gp_id
);
3009 pr_debug("Target_Core_ConfigFS: Set ALUA Target Port Group: "
3010 "core/alua/tg_pt_gps/%s to ID: %hu\n",
3011 config_item_name(&alua_tg_pt_gp_cg
->cg_item
),
3012 tg_pt_gp
->tg_pt_gp_id
);
3017 static ssize_t
target_tg_pt_gp_members_show(struct config_item
*item
,
3020 struct t10_alua_tg_pt_gp
*tg_pt_gp
= to_tg_pt_gp(item
);
3022 ssize_t len
= 0, cur_len
;
3023 unsigned char buf
[TG_PT_GROUP_NAME_BUF
];
3025 memset(buf
, 0, TG_PT_GROUP_NAME_BUF
);
3027 spin_lock(&tg_pt_gp
->tg_pt_gp_lock
);
3028 list_for_each_entry(lun
, &tg_pt_gp
->tg_pt_gp_lun_list
,
3029 lun_tg_pt_gp_link
) {
3030 struct se_portal_group
*tpg
= lun
->lun_tpg
;
3032 cur_len
= snprintf(buf
, TG_PT_GROUP_NAME_BUF
, "%s/%s/tpgt_%hu"
3033 "/%s\n", tpg
->se_tpg_tfo
->fabric_name
,
3034 tpg
->se_tpg_tfo
->tpg_get_wwn(tpg
),
3035 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
),
3036 config_item_name(&lun
->lun_group
.cg_item
));
3037 cur_len
++; /* Extra byte for NULL terminator */
3039 if ((cur_len
+ len
) > PAGE_SIZE
) {
3040 pr_warn("Ran out of lu_gp_show_attr"
3041 "_members buffer\n");
3044 memcpy(page
+len
, buf
, cur_len
);
3047 spin_unlock(&tg_pt_gp
->tg_pt_gp_lock
);
3052 CONFIGFS_ATTR(target_tg_pt_gp_
, alua_access_state
);
3053 CONFIGFS_ATTR(target_tg_pt_gp_
, alua_access_status
);
3054 CONFIGFS_ATTR(target_tg_pt_gp_
, alua_access_type
);
3055 CONFIGFS_ATTR(target_tg_pt_gp_
, alua_support_transitioning
);
3056 CONFIGFS_ATTR(target_tg_pt_gp_
, alua_support_offline
);
3057 CONFIGFS_ATTR(target_tg_pt_gp_
, alua_support_lba_dependent
);
3058 CONFIGFS_ATTR(target_tg_pt_gp_
, alua_support_unavailable
);
3059 CONFIGFS_ATTR(target_tg_pt_gp_
, alua_support_standby
);
3060 CONFIGFS_ATTR(target_tg_pt_gp_
, alua_support_active_optimized
);
3061 CONFIGFS_ATTR(target_tg_pt_gp_
, alua_support_active_nonoptimized
);
3062 CONFIGFS_ATTR(target_tg_pt_gp_
, alua_write_metadata
);
3063 CONFIGFS_ATTR(target_tg_pt_gp_
, nonop_delay_msecs
);
3064 CONFIGFS_ATTR(target_tg_pt_gp_
, trans_delay_msecs
);
3065 CONFIGFS_ATTR(target_tg_pt_gp_
, implicit_trans_secs
);
3066 CONFIGFS_ATTR(target_tg_pt_gp_
, preferred
);
3067 CONFIGFS_ATTR(target_tg_pt_gp_
, tg_pt_gp_id
);
3068 CONFIGFS_ATTR_RO(target_tg_pt_gp_
, members
);
3070 static struct configfs_attribute
*target_core_alua_tg_pt_gp_attrs
[] = {
3071 &target_tg_pt_gp_attr_alua_access_state
,
3072 &target_tg_pt_gp_attr_alua_access_status
,
3073 &target_tg_pt_gp_attr_alua_access_type
,
3074 &target_tg_pt_gp_attr_alua_support_transitioning
,
3075 &target_tg_pt_gp_attr_alua_support_offline
,
3076 &target_tg_pt_gp_attr_alua_support_lba_dependent
,
3077 &target_tg_pt_gp_attr_alua_support_unavailable
,
3078 &target_tg_pt_gp_attr_alua_support_standby
,
3079 &target_tg_pt_gp_attr_alua_support_active_nonoptimized
,
3080 &target_tg_pt_gp_attr_alua_support_active_optimized
,
3081 &target_tg_pt_gp_attr_alua_write_metadata
,
3082 &target_tg_pt_gp_attr_nonop_delay_msecs
,
3083 &target_tg_pt_gp_attr_trans_delay_msecs
,
3084 &target_tg_pt_gp_attr_implicit_trans_secs
,
3085 &target_tg_pt_gp_attr_preferred
,
3086 &target_tg_pt_gp_attr_tg_pt_gp_id
,
3087 &target_tg_pt_gp_attr_members
,
3091 static void target_core_alua_tg_pt_gp_release(struct config_item
*item
)
3093 struct t10_alua_tg_pt_gp
*tg_pt_gp
= container_of(to_config_group(item
),
3094 struct t10_alua_tg_pt_gp
, tg_pt_gp_group
);
3096 core_alua_free_tg_pt_gp(tg_pt_gp
);
3099 static struct configfs_item_operations target_core_alua_tg_pt_gp_ops
= {
3100 .release
= target_core_alua_tg_pt_gp_release
,
3103 static const struct config_item_type target_core_alua_tg_pt_gp_cit
= {
3104 .ct_item_ops
= &target_core_alua_tg_pt_gp_ops
,
3105 .ct_attrs
= target_core_alua_tg_pt_gp_attrs
,
3106 .ct_owner
= THIS_MODULE
,
3109 /* End functions for struct config_item_type target_core_alua_tg_pt_gp_cit */
3111 /* Start functions for struct config_item_type tb_alua_tg_pt_gps_cit */
3113 static struct config_group
*target_core_alua_create_tg_pt_gp(
3114 struct config_group
*group
,
3117 struct t10_alua
*alua
= container_of(group
, struct t10_alua
,
3118 alua_tg_pt_gps_group
);
3119 struct t10_alua_tg_pt_gp
*tg_pt_gp
;
3120 struct config_group
*alua_tg_pt_gp_cg
= NULL
;
3121 struct config_item
*alua_tg_pt_gp_ci
= NULL
;
3123 tg_pt_gp
= core_alua_allocate_tg_pt_gp(alua
->t10_dev
, name
, 0);
3127 alua_tg_pt_gp_cg
= &tg_pt_gp
->tg_pt_gp_group
;
3128 alua_tg_pt_gp_ci
= &alua_tg_pt_gp_cg
->cg_item
;
3130 config_group_init_type_name(alua_tg_pt_gp_cg
, name
,
3131 &target_core_alua_tg_pt_gp_cit
);
3133 pr_debug("Target_Core_ConfigFS: Allocated ALUA Target Port"
3134 " Group: alua/tg_pt_gps/%s\n",
3135 config_item_name(alua_tg_pt_gp_ci
));
3137 return alua_tg_pt_gp_cg
;
3140 static void target_core_alua_drop_tg_pt_gp(
3141 struct config_group
*group
,
3142 struct config_item
*item
)
3144 struct t10_alua_tg_pt_gp
*tg_pt_gp
= container_of(to_config_group(item
),
3145 struct t10_alua_tg_pt_gp
, tg_pt_gp_group
);
3147 pr_debug("Target_Core_ConfigFS: Releasing ALUA Target Port"
3148 " Group: alua/tg_pt_gps/%s, ID: %hu\n",
3149 config_item_name(item
), tg_pt_gp
->tg_pt_gp_id
);
3151 * core_alua_free_tg_pt_gp() is called from target_core_alua_tg_pt_gp_ops->release()
3152 * -> target_core_alua_tg_pt_gp_release().
3154 config_item_put(item
);
3157 static struct configfs_group_operations target_core_alua_tg_pt_gps_group_ops
= {
3158 .make_group
= &target_core_alua_create_tg_pt_gp
,
3159 .drop_item
= &target_core_alua_drop_tg_pt_gp
,
3162 TB_CIT_SETUP(dev_alua_tg_pt_gps
, NULL
, &target_core_alua_tg_pt_gps_group_ops
, NULL
);
3164 /* End functions for struct config_item_type tb_alua_tg_pt_gps_cit */
3166 /* Start functions for struct config_item_type target_core_alua_cit */
3169 * target_core_alua_cit is a ConfigFS group that lives under
3170 * /sys/kernel/config/target/core/alua. There are default groups
3171 * core/alua/lu_gps and core/alua/tg_pt_gps that are attached to
3172 * target_core_alua_cit in target_core_init_configfs() below.
3174 static const struct config_item_type target_core_alua_cit
= {
3175 .ct_item_ops
= NULL
,
3177 .ct_owner
= THIS_MODULE
,
3180 /* End functions for struct config_item_type target_core_alua_cit */
3182 /* Start functions for struct config_item_type tb_dev_stat_cit */
3184 static struct config_group
*target_core_stat_mkdir(
3185 struct config_group
*group
,
3188 return ERR_PTR(-ENOSYS
);
3191 static void target_core_stat_rmdir(
3192 struct config_group
*group
,
3193 struct config_item
*item
)
3198 static struct configfs_group_operations target_core_stat_group_ops
= {
3199 .make_group
= &target_core_stat_mkdir
,
3200 .drop_item
= &target_core_stat_rmdir
,
3203 TB_CIT_SETUP(dev_stat
, NULL
, &target_core_stat_group_ops
, NULL
);
3205 /* End functions for struct config_item_type tb_dev_stat_cit */
3207 /* Start functions for struct config_item_type target_core_hba_cit */
3209 static struct config_group
*target_core_make_subdev(
3210 struct config_group
*group
,
3213 struct t10_alua_tg_pt_gp
*tg_pt_gp
;
3214 struct config_item
*hba_ci
= &group
->cg_item
;
3215 struct se_hba
*hba
= item_to_hba(hba_ci
);
3216 struct target_backend
*tb
= hba
->backend
;
3217 struct se_device
*dev
;
3218 int errno
= -ENOMEM
, ret
;
3220 ret
= mutex_lock_interruptible(&hba
->hba_access_mutex
);
3222 return ERR_PTR(ret
);
3224 dev
= target_alloc_device(hba
, name
);
3228 config_group_init_type_name(&dev
->dev_group
, name
, &tb
->tb_dev_cit
);
3230 config_group_init_type_name(&dev
->dev_action_group
, "action",
3231 &tb
->tb_dev_action_cit
);
3232 configfs_add_default_group(&dev
->dev_action_group
, &dev
->dev_group
);
3234 config_group_init_type_name(&dev
->dev_attrib
.da_group
, "attrib",
3235 &tb
->tb_dev_attrib_cit
);
3236 configfs_add_default_group(&dev
->dev_attrib
.da_group
, &dev
->dev_group
);
3238 config_group_init_type_name(&dev
->dev_pr_group
, "pr",
3239 &tb
->tb_dev_pr_cit
);
3240 configfs_add_default_group(&dev
->dev_pr_group
, &dev
->dev_group
);
3242 config_group_init_type_name(&dev
->t10_wwn
.t10_wwn_group
, "wwn",
3243 &tb
->tb_dev_wwn_cit
);
3244 configfs_add_default_group(&dev
->t10_wwn
.t10_wwn_group
,
3247 config_group_init_type_name(&dev
->t10_alua
.alua_tg_pt_gps_group
,
3248 "alua", &tb
->tb_dev_alua_tg_pt_gps_cit
);
3249 configfs_add_default_group(&dev
->t10_alua
.alua_tg_pt_gps_group
,
3252 config_group_init_type_name(&dev
->dev_stat_grps
.stat_group
,
3253 "statistics", &tb
->tb_dev_stat_cit
);
3254 configfs_add_default_group(&dev
->dev_stat_grps
.stat_group
,
3258 * Add core/$HBA/$DEV/alua/default_tg_pt_gp
3260 tg_pt_gp
= core_alua_allocate_tg_pt_gp(dev
, "default_tg_pt_gp", 1);
3262 goto out_free_device
;
3263 dev
->t10_alua
.default_tg_pt_gp
= tg_pt_gp
;
3265 config_group_init_type_name(&tg_pt_gp
->tg_pt_gp_group
,
3266 "default_tg_pt_gp", &target_core_alua_tg_pt_gp_cit
);
3267 configfs_add_default_group(&tg_pt_gp
->tg_pt_gp_group
,
3268 &dev
->t10_alua
.alua_tg_pt_gps_group
);
3271 * Add core/$HBA/$DEV/statistics/ default groups
3273 target_stat_setup_dev_default_groups(dev
);
3275 mutex_unlock(&hba
->hba_access_mutex
);
3276 return &dev
->dev_group
;
3279 target_free_device(dev
);
3281 mutex_unlock(&hba
->hba_access_mutex
);
3282 return ERR_PTR(errno
);
3285 static void target_core_drop_subdev(
3286 struct config_group
*group
,
3287 struct config_item
*item
)
3289 struct config_group
*dev_cg
= to_config_group(item
);
3290 struct se_device
*dev
=
3291 container_of(dev_cg
, struct se_device
, dev_group
);
3294 hba
= item_to_hba(&dev
->se_hba
->hba_group
.cg_item
);
3296 mutex_lock(&hba
->hba_access_mutex
);
3298 configfs_remove_default_groups(&dev
->dev_stat_grps
.stat_group
);
3299 configfs_remove_default_groups(&dev
->t10_alua
.alua_tg_pt_gps_group
);
3302 * core_alua_free_tg_pt_gp() is called from ->default_tg_pt_gp
3303 * directly from target_core_alua_tg_pt_gp_release().
3305 dev
->t10_alua
.default_tg_pt_gp
= NULL
;
3307 configfs_remove_default_groups(dev_cg
);
3310 * se_dev is released from target_core_dev_item_ops->release()
3312 config_item_put(item
);
3313 mutex_unlock(&hba
->hba_access_mutex
);
3316 static struct configfs_group_operations target_core_hba_group_ops
= {
3317 .make_group
= target_core_make_subdev
,
3318 .drop_item
= target_core_drop_subdev
,
3322 static inline struct se_hba
*to_hba(struct config_item
*item
)
3324 return container_of(to_config_group(item
), struct se_hba
, hba_group
);
3327 static ssize_t
target_hba_info_show(struct config_item
*item
, char *page
)
3329 struct se_hba
*hba
= to_hba(item
);
3331 return sprintf(page
, "HBA Index: %d plugin: %s version: %s\n",
3332 hba
->hba_id
, hba
->backend
->ops
->name
,
3333 TARGET_CORE_VERSION
);
3336 static ssize_t
target_hba_mode_show(struct config_item
*item
, char *page
)
3338 struct se_hba
*hba
= to_hba(item
);
3341 if (hba
->hba_flags
& HBA_FLAGS_PSCSI_MODE
)
3344 return sprintf(page
, "%d\n", hba_mode
);
3347 static ssize_t
target_hba_mode_store(struct config_item
*item
,
3348 const char *page
, size_t count
)
3350 struct se_hba
*hba
= to_hba(item
);
3351 unsigned long mode_flag
;
3354 if (hba
->backend
->ops
->pmode_enable_hba
== NULL
)
3357 ret
= kstrtoul(page
, 0, &mode_flag
);
3359 pr_err("Unable to extract hba mode flag: %d\n", ret
);
3363 if (hba
->dev_count
) {
3364 pr_err("Unable to set hba_mode with active devices\n");
3368 ret
= hba
->backend
->ops
->pmode_enable_hba(hba
, mode_flag
);
3372 hba
->hba_flags
|= HBA_FLAGS_PSCSI_MODE
;
3374 hba
->hba_flags
&= ~HBA_FLAGS_PSCSI_MODE
;
3379 CONFIGFS_ATTR_RO(target_
, hba_info
);
3380 CONFIGFS_ATTR(target_
, hba_mode
);
3382 static void target_core_hba_release(struct config_item
*item
)
3384 struct se_hba
*hba
= container_of(to_config_group(item
),
3385 struct se_hba
, hba_group
);
3386 core_delete_hba(hba
);
3389 static struct configfs_attribute
*target_core_hba_attrs
[] = {
3390 &target_attr_hba_info
,
3391 &target_attr_hba_mode
,
3395 static struct configfs_item_operations target_core_hba_item_ops
= {
3396 .release
= target_core_hba_release
,
3399 static const struct config_item_type target_core_hba_cit
= {
3400 .ct_item_ops
= &target_core_hba_item_ops
,
3401 .ct_group_ops
= &target_core_hba_group_ops
,
3402 .ct_attrs
= target_core_hba_attrs
,
3403 .ct_owner
= THIS_MODULE
,
3406 static struct config_group
*target_core_call_addhbatotarget(
3407 struct config_group
*group
,
3410 char *se_plugin_str
, *str
, *str2
;
3412 char buf
[TARGET_CORE_NAME_MAX_LEN
];
3413 unsigned long plugin_dep_id
= 0;
3416 memset(buf
, 0, TARGET_CORE_NAME_MAX_LEN
);
3417 if (strlen(name
) >= TARGET_CORE_NAME_MAX_LEN
) {
3418 pr_err("Passed *name strlen(): %d exceeds"
3419 " TARGET_CORE_NAME_MAX_LEN: %d\n", (int)strlen(name
),
3420 TARGET_CORE_NAME_MAX_LEN
);
3421 return ERR_PTR(-ENAMETOOLONG
);
3423 snprintf(buf
, TARGET_CORE_NAME_MAX_LEN
, "%s", name
);
3425 str
= strstr(buf
, "_");
3427 pr_err("Unable to locate \"_\" for $SUBSYSTEM_PLUGIN_$HOST_ID\n");
3428 return ERR_PTR(-EINVAL
);
3430 se_plugin_str
= buf
;
3432 * Special case for subsystem plugins that have "_" in their names.
3433 * Namely rd_direct and rd_mcp..
3435 str2
= strstr(str
+1, "_");
3437 *str2
= '\0'; /* Terminate for *se_plugin_str */
3438 str2
++; /* Skip to start of plugin dependent ID */
3441 *str
= '\0'; /* Terminate for *se_plugin_str */
3442 str
++; /* Skip to start of plugin dependent ID */
3445 ret
= kstrtoul(str
, 0, &plugin_dep_id
);
3447 pr_err("kstrtoul() returned %d for"
3448 " plugin_dep_id\n", ret
);
3449 return ERR_PTR(ret
);
3452 * Load up TCM subsystem plugins if they have not already been loaded.
3454 transport_subsystem_check_init();
3456 hba
= core_alloc_hba(se_plugin_str
, plugin_dep_id
, 0);
3458 return ERR_CAST(hba
);
3460 config_group_init_type_name(&hba
->hba_group
, name
,
3461 &target_core_hba_cit
);
3463 return &hba
->hba_group
;
3466 static void target_core_call_delhbafromtarget(
3467 struct config_group
*group
,
3468 struct config_item
*item
)
3471 * core_delete_hba() is called from target_core_hba_item_ops->release()
3472 * -> target_core_hba_release()
3474 config_item_put(item
);
3477 static struct configfs_group_operations target_core_group_ops
= {
3478 .make_group
= target_core_call_addhbatotarget
,
3479 .drop_item
= target_core_call_delhbafromtarget
,
3482 static const struct config_item_type target_core_cit
= {
3483 .ct_item_ops
= NULL
,
3484 .ct_group_ops
= &target_core_group_ops
,
3486 .ct_owner
= THIS_MODULE
,
3489 /* Stop functions for struct config_item_type target_core_hba_cit */
3491 void target_setup_backend_cits(struct target_backend
*tb
)
3493 target_core_setup_dev_cit(tb
);
3494 target_core_setup_dev_action_cit(tb
);
3495 target_core_setup_dev_attrib_cit(tb
);
3496 target_core_setup_dev_pr_cit(tb
);
3497 target_core_setup_dev_wwn_cit(tb
);
3498 target_core_setup_dev_alua_tg_pt_gps_cit(tb
);
3499 target_core_setup_dev_stat_cit(tb
);
3502 static void target_init_dbroot(void)
3506 snprintf(db_root_stage
, DB_ROOT_LEN
, DB_ROOT_PREFERRED
);
3507 fp
= filp_open(db_root_stage
, O_RDONLY
, 0);
3509 pr_err("db_root: cannot open: %s\n", db_root_stage
);
3512 if (!S_ISDIR(file_inode(fp
)->i_mode
)) {
3513 filp_close(fp
, NULL
);
3514 pr_err("db_root: not a valid directory: %s\n", db_root_stage
);
3517 filp_close(fp
, NULL
);
3519 strncpy(db_root
, db_root_stage
, DB_ROOT_LEN
);
3520 pr_debug("Target_Core_ConfigFS: db_root set to %s\n", db_root
);
3523 static int __init
target_core_init_configfs(void)
3525 struct configfs_subsystem
*subsys
= &target_core_fabrics
;
3526 struct t10_alua_lu_gp
*lu_gp
;
3529 pr_debug("TARGET_CORE[0]: Loading Generic Kernel Storage"
3530 " Engine: %s on %s/%s on "UTS_RELEASE
"\n",
3531 TARGET_CORE_VERSION
, utsname()->sysname
, utsname()->machine
);
3533 config_group_init(&subsys
->su_group
);
3534 mutex_init(&subsys
->su_mutex
);
3536 ret
= init_se_kmem_caches();
3540 * Create $CONFIGFS/target/core default group for HBA <-> Storage Object
3541 * and ALUA Logical Unit Group and Target Port Group infrastructure.
3543 config_group_init_type_name(&target_core_hbagroup
, "core",
3545 configfs_add_default_group(&target_core_hbagroup
, &subsys
->su_group
);
3548 * Create ALUA infrastructure under /sys/kernel/config/target/core/alua/
3550 config_group_init_type_name(&alua_group
, "alua", &target_core_alua_cit
);
3551 configfs_add_default_group(&alua_group
, &target_core_hbagroup
);
3554 * Add ALUA Logical Unit Group and Target Port Group ConfigFS
3555 * groups under /sys/kernel/config/target/core/alua/
3557 config_group_init_type_name(&alua_lu_gps_group
, "lu_gps",
3558 &target_core_alua_lu_gps_cit
);
3559 configfs_add_default_group(&alua_lu_gps_group
, &alua_group
);
3562 * Add core/alua/lu_gps/default_lu_gp
3564 lu_gp
= core_alua_allocate_lu_gp("default_lu_gp", 1);
3565 if (IS_ERR(lu_gp
)) {
3570 config_group_init_type_name(&lu_gp
->lu_gp_group
, "default_lu_gp",
3571 &target_core_alua_lu_gp_cit
);
3572 configfs_add_default_group(&lu_gp
->lu_gp_group
, &alua_lu_gps_group
);
3574 default_lu_gp
= lu_gp
;
3577 * Register the target_core_mod subsystem with configfs.
3579 ret
= configfs_register_subsystem(subsys
);
3581 pr_err("Error %d while registering subsystem %s\n",
3582 ret
, subsys
->su_group
.cg_item
.ci_namebuf
);
3585 pr_debug("TARGET_CORE[0]: Initialized ConfigFS Fabric"
3586 " Infrastructure: "TARGET_CORE_VERSION
" on %s/%s"
3587 " on "UTS_RELEASE
"\n", utsname()->sysname
, utsname()->machine
);
3589 * Register built-in RAMDISK subsystem logic for virtual LUN 0
3591 ret
= rd_module_init();
3595 ret
= core_dev_setup_virtual_lun0();
3599 ret
= target_xcopy_setup_pt();
3603 target_init_dbroot();
3608 configfs_unregister_subsystem(subsys
);
3609 core_dev_release_virtual_lun0();
3612 if (default_lu_gp
) {
3613 core_alua_free_lu_gp(default_lu_gp
);
3614 default_lu_gp
= NULL
;
3616 release_se_kmem_caches();
3620 static void __exit
target_core_exit_configfs(void)
3622 configfs_remove_default_groups(&alua_lu_gps_group
);
3623 configfs_remove_default_groups(&alua_group
);
3624 configfs_remove_default_groups(&target_core_hbagroup
);
3627 * We expect subsys->su_group.default_groups to be released
3628 * by configfs subsystem provider logic..
3630 configfs_unregister_subsystem(&target_core_fabrics
);
3632 core_alua_free_lu_gp(default_lu_gp
);
3633 default_lu_gp
= NULL
;
3635 pr_debug("TARGET_CORE[0]: Released ConfigFS Fabric"
3636 " Infrastructure\n");
3638 core_dev_release_virtual_lun0();
3640 target_xcopy_release_pt();
3641 release_se_kmem_caches();
3644 MODULE_DESCRIPTION("Target_Core_Mod/ConfigFS");
3645 MODULE_AUTHOR("nab@Linux-iSCSI.org");
3646 MODULE_LICENSE("GPL");
3648 module_init(target_core_init_configfs
);
3649 module_exit(target_core_exit_configfs
);