4 * SCSI sysfs interface routines.
6 * Created to pull SCSI mid layer sysfs routines into one file.
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <linux/init.h>
12 #include <linux/blkdev.h>
13 #include <linux/device.h>
14 #include <linux/pm_runtime.h>
16 #include <scsi/scsi.h>
17 #include <scsi/scsi_device.h>
18 #include <scsi/scsi_host.h>
19 #include <scsi/scsi_tcq.h>
20 #include <scsi/scsi_dh.h>
21 #include <scsi/scsi_transport.h>
22 #include <scsi/scsi_driver.h>
24 #include "scsi_priv.h"
25 #include "scsi_logging.h"
27 static struct device_type scsi_dev_type
;
30 enum scsi_device_state value
;
33 { SDEV_CREATED
, "created" },
34 { SDEV_RUNNING
, "running" },
35 { SDEV_CANCEL
, "cancel" },
36 { SDEV_DEL
, "deleted" },
37 { SDEV_QUIESCE
, "quiesce" },
38 { SDEV_OFFLINE
, "offline" },
39 { SDEV_TRANSPORT_OFFLINE
, "transport-offline" },
40 { SDEV_BLOCK
, "blocked" },
41 { SDEV_CREATED_BLOCK
, "created-blocked" },
44 const char *scsi_device_state_name(enum scsi_device_state state
)
49 for (i
= 0; i
< ARRAY_SIZE(sdev_states
); i
++) {
50 if (sdev_states
[i
].value
== state
) {
51 name
= sdev_states
[i
].name
;
59 enum scsi_host_state value
;
62 { SHOST_CREATED
, "created" },
63 { SHOST_RUNNING
, "running" },
64 { SHOST_CANCEL
, "cancel" },
65 { SHOST_DEL
, "deleted" },
66 { SHOST_RECOVERY
, "recovery" },
67 { SHOST_CANCEL_RECOVERY
, "cancel/recovery" },
68 { SHOST_DEL_RECOVERY
, "deleted/recovery", },
70 const char *scsi_host_state_name(enum scsi_host_state state
)
75 for (i
= 0; i
< ARRAY_SIZE(shost_states
); i
++) {
76 if (shost_states
[i
].value
== state
) {
77 name
= shost_states
[i
].name
;
88 } sdev_access_states
[] = {
89 { SCSI_ACCESS_STATE_OPTIMAL
, "active/optimized" },
90 { SCSI_ACCESS_STATE_ACTIVE
, "active/non-optimized" },
91 { SCSI_ACCESS_STATE_STANDBY
, "standby" },
92 { SCSI_ACCESS_STATE_UNAVAILABLE
, "unavailable" },
93 { SCSI_ACCESS_STATE_LBA
, "lba-dependent" },
94 { SCSI_ACCESS_STATE_OFFLINE
, "offline" },
95 { SCSI_ACCESS_STATE_TRANSITIONING
, "transitioning" },
98 static const char *scsi_access_state_name(unsigned char state
)
103 for (i
= 0; i
< ARRAY_SIZE(sdev_access_states
); i
++) {
104 if (sdev_access_states
[i
].value
== state
) {
105 name
= sdev_access_states
[i
].name
;
113 static int check_set(unsigned long long *val
, char *src
)
117 if (strncmp(src
, "-", 20) == 0) {
118 *val
= SCAN_WILD_CARD
;
121 * Doesn't check for int overflow
123 *val
= simple_strtoull(src
, &last
, 0);
130 static int scsi_scan(struct Scsi_Host
*shost
, const char *str
)
132 char s1
[15], s2
[15], s3
[17], junk
;
133 unsigned long long channel
, id
, lun
;
136 res
= sscanf(str
, "%10s %10s %16s %c", s1
, s2
, s3
, &junk
);
139 if (check_set(&channel
, s1
))
141 if (check_set(&id
, s2
))
143 if (check_set(&lun
, s3
))
145 if (shost
->transportt
->user_scan
)
146 res
= shost
->transportt
->user_scan(shost
, channel
, id
, lun
);
148 res
= scsi_scan_host_selected(shost
, channel
, id
, lun
,
154 * shost_show_function: macro to create an attr function that can be used to
155 * show a non-bit field.
157 #define shost_show_function(name, field, format_string) \
159 show_##name (struct device *dev, struct device_attribute *attr, \
162 struct Scsi_Host *shost = class_to_shost(dev); \
163 return snprintf (buf, 20, format_string, shost->field); \
167 * shost_rd_attr: macro to create a function and attribute variable for a
170 #define shost_rd_attr2(name, field, format_string) \
171 shost_show_function(name, field, format_string) \
172 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL);
174 #define shost_rd_attr(field, format_string) \
175 shost_rd_attr2(field, field, format_string)
178 * Create the actual show/store functions and data structures.
182 store_scan(struct device
*dev
, struct device_attribute
*attr
,
183 const char *buf
, size_t count
)
185 struct Scsi_Host
*shost
= class_to_shost(dev
);
188 res
= scsi_scan(shost
, buf
);
193 static DEVICE_ATTR(scan
, S_IWUSR
, NULL
, store_scan
);
196 store_shost_state(struct device
*dev
, struct device_attribute
*attr
,
197 const char *buf
, size_t count
)
200 struct Scsi_Host
*shost
= class_to_shost(dev
);
201 enum scsi_host_state state
= 0;
203 for (i
= 0; i
< ARRAY_SIZE(shost_states
); i
++) {
204 const int len
= strlen(shost_states
[i
].name
);
205 if (strncmp(shost_states
[i
].name
, buf
, len
) == 0 &&
207 state
= shost_states
[i
].value
;
214 if (scsi_host_set_state(shost
, state
))
220 show_shost_state(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
222 struct Scsi_Host
*shost
= class_to_shost(dev
);
223 const char *name
= scsi_host_state_name(shost
->shost_state
);
228 return snprintf(buf
, 20, "%s\n", name
);
231 /* DEVICE_ATTR(state) clashes with dev_attr_state for sdev */
232 static struct device_attribute dev_attr_hstate
=
233 __ATTR(state
, S_IRUGO
| S_IWUSR
, show_shost_state
, store_shost_state
);
236 show_shost_mode(unsigned int mode
, char *buf
)
240 if (mode
& MODE_INITIATOR
)
241 len
= sprintf(buf
, "%s", "Initiator");
243 if (mode
& MODE_TARGET
)
244 len
+= sprintf(buf
+ len
, "%s%s", len
? ", " : "", "Target");
246 len
+= sprintf(buf
+ len
, "\n");
252 show_shost_supported_mode(struct device
*dev
, struct device_attribute
*attr
,
255 struct Scsi_Host
*shost
= class_to_shost(dev
);
256 unsigned int supported_mode
= shost
->hostt
->supported_mode
;
258 if (supported_mode
== MODE_UNKNOWN
)
259 /* by default this should be initiator */
260 supported_mode
= MODE_INITIATOR
;
262 return show_shost_mode(supported_mode
, buf
);
265 static DEVICE_ATTR(supported_mode
, S_IRUGO
| S_IWUSR
, show_shost_supported_mode
, NULL
);
268 show_shost_active_mode(struct device
*dev
,
269 struct device_attribute
*attr
, char *buf
)
271 struct Scsi_Host
*shost
= class_to_shost(dev
);
273 if (shost
->active_mode
== MODE_UNKNOWN
)
274 return snprintf(buf
, 20, "unknown\n");
276 return show_shost_mode(shost
->active_mode
, buf
);
279 static DEVICE_ATTR(active_mode
, S_IRUGO
| S_IWUSR
, show_shost_active_mode
, NULL
);
281 static int check_reset_type(const char *str
)
283 if (sysfs_streq(str
, "adapter"))
284 return SCSI_ADAPTER_RESET
;
285 else if (sysfs_streq(str
, "firmware"))
286 return SCSI_FIRMWARE_RESET
;
292 store_host_reset(struct device
*dev
, struct device_attribute
*attr
,
293 const char *buf
, size_t count
)
295 struct Scsi_Host
*shost
= class_to_shost(dev
);
296 struct scsi_host_template
*sht
= shost
->hostt
;
300 type
= check_reset_type(buf
);
302 goto exit_store_host_reset
;
305 ret
= sht
->host_reset(shost
, type
);
307 exit_store_host_reset
:
313 static DEVICE_ATTR(host_reset
, S_IWUSR
, NULL
, store_host_reset
);
316 show_shost_eh_deadline(struct device
*dev
,
317 struct device_attribute
*attr
, char *buf
)
319 struct Scsi_Host
*shost
= class_to_shost(dev
);
321 if (shost
->eh_deadline
== -1)
322 return snprintf(buf
, strlen("off") + 2, "off\n");
323 return sprintf(buf
, "%u\n", shost
->eh_deadline
/ HZ
);
327 store_shost_eh_deadline(struct device
*dev
, struct device_attribute
*attr
,
328 const char *buf
, size_t count
)
330 struct Scsi_Host
*shost
= class_to_shost(dev
);
332 unsigned long deadline
, flags
;
334 if (shost
->transportt
&&
335 (shost
->transportt
->eh_strategy_handler
||
336 !shost
->hostt
->eh_host_reset_handler
))
339 if (!strncmp(buf
, "off", strlen("off")))
342 ret
= kstrtoul(buf
, 10, &deadline
);
345 if (deadline
* HZ
> UINT_MAX
)
349 spin_lock_irqsave(shost
->host_lock
, flags
);
350 if (scsi_host_in_recovery(shost
))
354 shost
->eh_deadline
= -1;
356 shost
->eh_deadline
= deadline
* HZ
;
360 spin_unlock_irqrestore(shost
->host_lock
, flags
);
365 static DEVICE_ATTR(eh_deadline
, S_IRUGO
| S_IWUSR
, show_shost_eh_deadline
, store_shost_eh_deadline
);
367 shost_rd_attr(use_blk_mq
, "%d\n");
368 shost_rd_attr(unique_id
, "%u\n");
369 shost_rd_attr(cmd_per_lun
, "%hd\n");
370 shost_rd_attr(can_queue
, "%hd\n");
371 shost_rd_attr(sg_tablesize
, "%hu\n");
372 shost_rd_attr(sg_prot_tablesize
, "%hu\n");
373 shost_rd_attr(unchecked_isa_dma
, "%d\n");
374 shost_rd_attr(prot_capabilities
, "%u\n");
375 shost_rd_attr(prot_guard_type
, "%hd\n");
376 shost_rd_attr2(proc_name
, hostt
->proc_name
, "%s\n");
379 show_host_busy(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
381 struct Scsi_Host
*shost
= class_to_shost(dev
);
382 return snprintf(buf
, 20, "%d\n", atomic_read(&shost
->host_busy
));
384 static DEVICE_ATTR(host_busy
, S_IRUGO
, show_host_busy
, NULL
);
386 static struct attribute
*scsi_sysfs_shost_attrs
[] = {
387 &dev_attr_use_blk_mq
.attr
,
388 &dev_attr_unique_id
.attr
,
389 &dev_attr_host_busy
.attr
,
390 &dev_attr_cmd_per_lun
.attr
,
391 &dev_attr_can_queue
.attr
,
392 &dev_attr_sg_tablesize
.attr
,
393 &dev_attr_sg_prot_tablesize
.attr
,
394 &dev_attr_unchecked_isa_dma
.attr
,
395 &dev_attr_proc_name
.attr
,
397 &dev_attr_hstate
.attr
,
398 &dev_attr_supported_mode
.attr
,
399 &dev_attr_active_mode
.attr
,
400 &dev_attr_prot_capabilities
.attr
,
401 &dev_attr_prot_guard_type
.attr
,
402 &dev_attr_host_reset
.attr
,
403 &dev_attr_eh_deadline
.attr
,
407 static struct attribute_group scsi_shost_attr_group
= {
408 .attrs
= scsi_sysfs_shost_attrs
,
411 const struct attribute_group
*scsi_sysfs_shost_attr_groups
[] = {
412 &scsi_shost_attr_group
,
416 static void scsi_device_cls_release(struct device
*class_dev
)
418 struct scsi_device
*sdev
;
420 sdev
= class_to_sdev(class_dev
);
421 put_device(&sdev
->sdev_gendev
);
424 static void scsi_device_dev_release_usercontext(struct work_struct
*work
)
426 struct scsi_device
*sdev
;
427 struct device
*parent
;
428 struct list_head
*this, *tmp
;
431 sdev
= container_of(work
, struct scsi_device
, ew
.work
);
433 scsi_dh_release_device(sdev
);
435 parent
= sdev
->sdev_gendev
.parent
;
437 spin_lock_irqsave(sdev
->host
->host_lock
, flags
);
438 list_del(&sdev
->siblings
);
439 list_del(&sdev
->same_target_siblings
);
440 list_del(&sdev
->starved_entry
);
441 spin_unlock_irqrestore(sdev
->host
->host_lock
, flags
);
443 cancel_work_sync(&sdev
->event_work
);
445 list_for_each_safe(this, tmp
, &sdev
->event_list
) {
446 struct scsi_event
*evt
;
448 evt
= list_entry(this, struct scsi_event
, node
);
449 list_del(&evt
->node
);
453 blk_put_queue(sdev
->request_queue
);
454 /* NULL queue means the device can't be used */
455 sdev
->request_queue
= NULL
;
457 kfree(sdev
->vpd_pg83
);
458 kfree(sdev
->vpd_pg80
);
459 kfree(sdev
->inquiry
);
466 static void scsi_device_dev_release(struct device
*dev
)
468 struct scsi_device
*sdp
= to_scsi_device(dev
);
469 execute_in_process_context(scsi_device_dev_release_usercontext
,
473 static struct class sdev_class
= {
474 .name
= "scsi_device",
475 .dev_release
= scsi_device_cls_release
,
478 /* all probing is done in the individual ->probe routines */
479 static int scsi_bus_match(struct device
*dev
, struct device_driver
*gendrv
)
481 struct scsi_device
*sdp
;
483 if (dev
->type
!= &scsi_dev_type
)
486 sdp
= to_scsi_device(dev
);
487 if (sdp
->no_uld_attach
)
489 return (sdp
->inq_periph_qual
== SCSI_INQ_PQ_CON
)? 1: 0;
492 static int scsi_bus_uevent(struct device
*dev
, struct kobj_uevent_env
*env
)
494 struct scsi_device
*sdev
;
496 if (dev
->type
!= &scsi_dev_type
)
499 sdev
= to_scsi_device(dev
);
501 add_uevent_var(env
, "MODALIAS=" SCSI_DEVICE_MODALIAS_FMT
, sdev
->type
);
505 struct bus_type scsi_bus_type
= {
507 .match
= scsi_bus_match
,
508 .uevent
= scsi_bus_uevent
,
510 .pm
= &scsi_bus_pm_ops
,
513 EXPORT_SYMBOL_GPL(scsi_bus_type
);
515 int scsi_sysfs_register(void)
519 error
= bus_register(&scsi_bus_type
);
521 error
= class_register(&sdev_class
);
523 bus_unregister(&scsi_bus_type
);
529 void scsi_sysfs_unregister(void)
531 class_unregister(&sdev_class
);
532 bus_unregister(&scsi_bus_type
);
536 * sdev_show_function: macro to create an attr function that can be used to
537 * show a non-bit field.
539 #define sdev_show_function(field, format_string) \
541 sdev_show_##field (struct device *dev, struct device_attribute *attr, \
544 struct scsi_device *sdev; \
545 sdev = to_scsi_device(dev); \
546 return snprintf (buf, 20, format_string, sdev->field); \
550 * sdev_rd_attr: macro to create a function and attribute variable for a
553 #define sdev_rd_attr(field, format_string) \
554 sdev_show_function(field, format_string) \
555 static DEVICE_ATTR(field, S_IRUGO, sdev_show_##field, NULL);
559 * sdev_rw_attr: create a function and attribute variable for a
562 #define sdev_rw_attr(field, format_string) \
563 sdev_show_function(field, format_string) \
566 sdev_store_##field (struct device *dev, struct device_attribute *attr, \
567 const char *buf, size_t count) \
569 struct scsi_device *sdev; \
570 sdev = to_scsi_device(dev); \
571 sscanf (buf, format_string, &sdev->field); \
574 static DEVICE_ATTR(field, S_IRUGO | S_IWUSR, sdev_show_##field, sdev_store_##field);
576 /* Currently we don't export bit fields, but we might in future,
577 * so leave this code in */
580 * sdev_rd_attr: create a function and attribute variable for a
581 * read/write bit field.
583 #define sdev_rw_attr_bit(field) \
584 sdev_show_function(field, "%d\n") \
587 sdev_store_##field (struct device *dev, struct device_attribute *attr, \
588 const char *buf, size_t count) \
591 struct scsi_device *sdev; \
592 ret = scsi_sdev_check_buf_bit(buf); \
594 sdev = to_scsi_device(dev); \
600 static DEVICE_ATTR(field, S_IRUGO | S_IWUSR, sdev_show_##field, sdev_store_##field);
603 * scsi_sdev_check_buf_bit: return 0 if buf is "0", return 1 if buf is "1",
604 * else return -EINVAL.
606 static int scsi_sdev_check_buf_bit(const char *buf
)
608 if ((buf
[1] == '\0') || ((buf
[1] == '\n') && (buf
[2] == '\0'))) {
611 else if (buf
[0] == '0')
620 * Create the actual show/store functions and data structures.
622 sdev_rd_attr (type
, "%d\n");
623 sdev_rd_attr (scsi_level
, "%d\n");
624 sdev_rd_attr (vendor
, "%.8s\n");
625 sdev_rd_attr (model
, "%.16s\n");
626 sdev_rd_attr (rev
, "%.4s\n");
629 sdev_show_device_busy(struct device
*dev
, struct device_attribute
*attr
,
632 struct scsi_device
*sdev
= to_scsi_device(dev
);
633 return snprintf(buf
, 20, "%d\n", atomic_read(&sdev
->device_busy
));
635 static DEVICE_ATTR(device_busy
, S_IRUGO
, sdev_show_device_busy
, NULL
);
638 sdev_show_device_blocked(struct device
*dev
, struct device_attribute
*attr
,
641 struct scsi_device
*sdev
= to_scsi_device(dev
);
642 return snprintf(buf
, 20, "%d\n", atomic_read(&sdev
->device_blocked
));
644 static DEVICE_ATTR(device_blocked
, S_IRUGO
, sdev_show_device_blocked
, NULL
);
647 * TODO: can we make these symlinks to the block layer ones?
650 sdev_show_timeout (struct device
*dev
, struct device_attribute
*attr
, char *buf
)
652 struct scsi_device
*sdev
;
653 sdev
= to_scsi_device(dev
);
654 return snprintf(buf
, 20, "%d\n", sdev
->request_queue
->rq_timeout
/ HZ
);
658 sdev_store_timeout (struct device
*dev
, struct device_attribute
*attr
,
659 const char *buf
, size_t count
)
661 struct scsi_device
*sdev
;
663 sdev
= to_scsi_device(dev
);
664 sscanf (buf
, "%d\n", &timeout
);
665 blk_queue_rq_timeout(sdev
->request_queue
, timeout
* HZ
);
668 static DEVICE_ATTR(timeout
, S_IRUGO
| S_IWUSR
, sdev_show_timeout
, sdev_store_timeout
);
671 sdev_show_eh_timeout(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
673 struct scsi_device
*sdev
;
674 sdev
= to_scsi_device(dev
);
675 return snprintf(buf
, 20, "%u\n", sdev
->eh_timeout
/ HZ
);
679 sdev_store_eh_timeout(struct device
*dev
, struct device_attribute
*attr
,
680 const char *buf
, size_t count
)
682 struct scsi_device
*sdev
;
683 unsigned int eh_timeout
;
686 if (!capable(CAP_SYS_ADMIN
))
689 sdev
= to_scsi_device(dev
);
690 err
= kstrtouint(buf
, 10, &eh_timeout
);
693 sdev
->eh_timeout
= eh_timeout
* HZ
;
697 static DEVICE_ATTR(eh_timeout
, S_IRUGO
| S_IWUSR
, sdev_show_eh_timeout
, sdev_store_eh_timeout
);
700 store_rescan_field (struct device
*dev
, struct device_attribute
*attr
,
701 const char *buf
, size_t count
)
703 scsi_rescan_device(dev
);
706 static DEVICE_ATTR(rescan
, S_IWUSR
, NULL
, store_rescan_field
);
709 sdev_store_delete(struct device
*dev
, struct device_attribute
*attr
,
710 const char *buf
, size_t count
)
712 if (device_remove_file_self(dev
, attr
))
713 scsi_remove_device(to_scsi_device(dev
));
716 static DEVICE_ATTR(delete, S_IWUSR
, NULL
, sdev_store_delete
);
719 store_state_field(struct device
*dev
, struct device_attribute
*attr
,
720 const char *buf
, size_t count
)
723 struct scsi_device
*sdev
= to_scsi_device(dev
);
724 enum scsi_device_state state
= 0;
726 for (i
= 0; i
< ARRAY_SIZE(sdev_states
); i
++) {
727 const int len
= strlen(sdev_states
[i
].name
);
728 if (strncmp(sdev_states
[i
].name
, buf
, len
) == 0 &&
730 state
= sdev_states
[i
].value
;
737 if (scsi_device_set_state(sdev
, state
))
743 show_state_field(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
745 struct scsi_device
*sdev
= to_scsi_device(dev
);
746 const char *name
= scsi_device_state_name(sdev
->sdev_state
);
751 return snprintf(buf
, 20, "%s\n", name
);
754 static DEVICE_ATTR(state
, S_IRUGO
| S_IWUSR
, show_state_field
, store_state_field
);
757 show_queue_type_field(struct device
*dev
, struct device_attribute
*attr
,
760 struct scsi_device
*sdev
= to_scsi_device(dev
);
761 const char *name
= "none";
763 if (sdev
->simple_tags
)
766 return snprintf(buf
, 20, "%s\n", name
);
770 store_queue_type_field(struct device
*dev
, struct device_attribute
*attr
,
771 const char *buf
, size_t count
)
773 struct scsi_device
*sdev
= to_scsi_device(dev
);
775 if (!sdev
->tagged_supported
)
778 sdev_printk(KERN_INFO
, sdev
,
779 "ignoring write to deprecated queue_type attribute");
783 static DEVICE_ATTR(queue_type
, S_IRUGO
| S_IWUSR
, show_queue_type_field
,
784 store_queue_type_field
);
786 #define sdev_vpd_pg_attr(_page) \
788 show_vpd_##_page(struct file *filp, struct kobject *kobj, \
789 struct bin_attribute *bin_attr, \
790 char *buf, loff_t off, size_t count) \
792 struct device *dev = container_of(kobj, struct device, kobj); \
793 struct scsi_device *sdev = to_scsi_device(dev); \
795 if (!sdev->vpd_##_page) \
798 ret = memory_read_from_buffer(buf, count, &off, \
799 rcu_dereference(sdev->vpd_##_page), \
800 sdev->vpd_##_page##_len); \
804 static struct bin_attribute dev_attr_vpd_##_page = { \
805 .attr = {.name = __stringify(vpd_##_page), .mode = S_IRUGO }, \
807 .read = show_vpd_##_page, \
810 sdev_vpd_pg_attr(pg83
);
811 sdev_vpd_pg_attr(pg80
);
813 static ssize_t
show_inquiry(struct file
*filep
, struct kobject
*kobj
,
814 struct bin_attribute
*bin_attr
,
815 char *buf
, loff_t off
, size_t count
)
817 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
818 struct scsi_device
*sdev
= to_scsi_device(dev
);
823 return memory_read_from_buffer(buf
, count
, &off
, sdev
->inquiry
,
827 static struct bin_attribute dev_attr_inquiry
= {
833 .read
= show_inquiry
,
837 show_iostat_counterbits(struct device
*dev
, struct device_attribute
*attr
,
840 return snprintf(buf
, 20, "%d\n", (int)sizeof(atomic_t
) * 8);
843 static DEVICE_ATTR(iocounterbits
, S_IRUGO
, show_iostat_counterbits
, NULL
);
845 #define show_sdev_iostat(field) \
847 show_iostat_##field(struct device *dev, struct device_attribute *attr, \
850 struct scsi_device *sdev = to_scsi_device(dev); \
851 unsigned long long count = atomic_read(&sdev->field); \
852 return snprintf(buf, 20, "0x%llx\n", count); \
854 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
856 show_sdev_iostat(iorequest_cnt
);
857 show_sdev_iostat(iodone_cnt
);
858 show_sdev_iostat(ioerr_cnt
);
861 sdev_show_modalias(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
863 struct scsi_device
*sdev
;
864 sdev
= to_scsi_device(dev
);
865 return snprintf (buf
, 20, SCSI_DEVICE_MODALIAS_FMT
"\n", sdev
->type
);
867 static DEVICE_ATTR(modalias
, S_IRUGO
, sdev_show_modalias
, NULL
);
869 #define DECLARE_EVT_SHOW(name, Cap_name) \
871 sdev_show_evt_##name(struct device *dev, struct device_attribute *attr, \
874 struct scsi_device *sdev = to_scsi_device(dev); \
875 int val = test_bit(SDEV_EVT_##Cap_name, sdev->supported_events);\
876 return snprintf(buf, 20, "%d\n", val); \
879 #define DECLARE_EVT_STORE(name, Cap_name) \
881 sdev_store_evt_##name(struct device *dev, struct device_attribute *attr,\
882 const char *buf, size_t count) \
884 struct scsi_device *sdev = to_scsi_device(dev); \
885 int val = simple_strtoul(buf, NULL, 0); \
887 clear_bit(SDEV_EVT_##Cap_name, sdev->supported_events); \
889 set_bit(SDEV_EVT_##Cap_name, sdev->supported_events); \
895 #define DECLARE_EVT(name, Cap_name) \
896 DECLARE_EVT_SHOW(name, Cap_name) \
897 DECLARE_EVT_STORE(name, Cap_name) \
898 static DEVICE_ATTR(evt_##name, S_IRUGO, sdev_show_evt_##name, \
899 sdev_store_evt_##name);
900 #define REF_EVT(name) &dev_attr_evt_##name.attr
902 DECLARE_EVT(media_change
, MEDIA_CHANGE
)
903 DECLARE_EVT(inquiry_change_reported
, INQUIRY_CHANGE_REPORTED
)
904 DECLARE_EVT(capacity_change_reported
, CAPACITY_CHANGE_REPORTED
)
905 DECLARE_EVT(soft_threshold_reached
, SOFT_THRESHOLD_REACHED_REPORTED
)
906 DECLARE_EVT(mode_parameter_change_reported
, MODE_PARAMETER_CHANGE_REPORTED
)
907 DECLARE_EVT(lun_change_reported
, LUN_CHANGE_REPORTED
)
910 sdev_store_queue_depth(struct device
*dev
, struct device_attribute
*attr
,
911 const char *buf
, size_t count
)
914 struct scsi_device
*sdev
= to_scsi_device(dev
);
915 struct scsi_host_template
*sht
= sdev
->host
->hostt
;
917 if (!sht
->change_queue_depth
)
920 depth
= simple_strtoul(buf
, NULL
, 0);
922 if (depth
< 1 || depth
> sdev
->host
->can_queue
)
925 retval
= sht
->change_queue_depth(sdev
, depth
);
929 sdev
->max_queue_depth
= sdev
->queue_depth
;
933 sdev_show_function(queue_depth
, "%d\n");
935 static DEVICE_ATTR(queue_depth
, S_IRUGO
| S_IWUSR
, sdev_show_queue_depth
,
936 sdev_store_queue_depth
);
939 sdev_show_wwid(struct device
*dev
, struct device_attribute
*attr
,
942 struct scsi_device
*sdev
= to_scsi_device(dev
);
945 count
= scsi_vpd_lun_id(sdev
, buf
, PAGE_SIZE
);
952 static DEVICE_ATTR(wwid
, S_IRUGO
, sdev_show_wwid
, NULL
);
954 #ifdef CONFIG_SCSI_DH
956 sdev_show_dh_state(struct device
*dev
, struct device_attribute
*attr
,
959 struct scsi_device
*sdev
= to_scsi_device(dev
);
962 return snprintf(buf
, 20, "detached\n");
964 return snprintf(buf
, 20, "%s\n", sdev
->handler
->name
);
968 sdev_store_dh_state(struct device
*dev
, struct device_attribute
*attr
,
969 const char *buf
, size_t count
)
971 struct scsi_device
*sdev
= to_scsi_device(dev
);
974 if (sdev
->sdev_state
== SDEV_CANCEL
||
975 sdev
->sdev_state
== SDEV_DEL
)
978 if (!sdev
->handler
) {
980 * Attach to a device handler
982 err
= scsi_dh_attach(sdev
->request_queue
, buf
);
983 } else if (!strncmp(buf
, "activate", 8)) {
985 * Activate a device handler
987 if (sdev
->handler
->activate
)
988 err
= sdev
->handler
->activate(sdev
, NULL
, NULL
);
991 } else if (!strncmp(buf
, "detach", 6)) {
993 * Detach from a device handler
995 sdev_printk(KERN_WARNING
, sdev
,
996 "can't detach handler %s.\n",
997 sdev
->handler
->name
);
1001 return err
< 0 ? err
: count
;
1004 static DEVICE_ATTR(dh_state
, S_IRUGO
| S_IWUSR
, sdev_show_dh_state
,
1005 sdev_store_dh_state
);
1008 sdev_show_access_state(struct device
*dev
,
1009 struct device_attribute
*attr
,
1012 struct scsi_device
*sdev
= to_scsi_device(dev
);
1013 unsigned char access_state
;
1014 const char *access_state_name
;
1019 access_state
= (sdev
->access_state
& SCSI_ACCESS_STATE_MASK
);
1020 access_state_name
= scsi_access_state_name(access_state
);
1022 return sprintf(buf
, "%s\n",
1023 access_state_name
? access_state_name
: "unknown");
1025 static DEVICE_ATTR(access_state
, S_IRUGO
, sdev_show_access_state
, NULL
);
1028 sdev_show_preferred_path(struct device
*dev
,
1029 struct device_attribute
*attr
,
1032 struct scsi_device
*sdev
= to_scsi_device(dev
);
1037 if (sdev
->access_state
& SCSI_ACCESS_STATE_PREFERRED
)
1038 return sprintf(buf
, "1\n");
1040 return sprintf(buf
, "0\n");
1042 static DEVICE_ATTR(preferred_path
, S_IRUGO
, sdev_show_preferred_path
, NULL
);
1046 sdev_show_queue_ramp_up_period(struct device
*dev
,
1047 struct device_attribute
*attr
,
1050 struct scsi_device
*sdev
;
1051 sdev
= to_scsi_device(dev
);
1052 return snprintf(buf
, 20, "%u\n",
1053 jiffies_to_msecs(sdev
->queue_ramp_up_period
));
1057 sdev_store_queue_ramp_up_period(struct device
*dev
,
1058 struct device_attribute
*attr
,
1059 const char *buf
, size_t count
)
1061 struct scsi_device
*sdev
= to_scsi_device(dev
);
1062 unsigned int period
;
1064 if (kstrtouint(buf
, 10, &period
))
1067 sdev
->queue_ramp_up_period
= msecs_to_jiffies(period
);
1071 static DEVICE_ATTR(queue_ramp_up_period
, S_IRUGO
| S_IWUSR
,
1072 sdev_show_queue_ramp_up_period
,
1073 sdev_store_queue_ramp_up_period
);
1075 static umode_t
scsi_sdev_attr_is_visible(struct kobject
*kobj
,
1076 struct attribute
*attr
, int i
)
1078 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
1079 struct scsi_device
*sdev
= to_scsi_device(dev
);
1082 if (attr
== &dev_attr_queue_depth
.attr
&&
1083 !sdev
->host
->hostt
->change_queue_depth
)
1086 if (attr
== &dev_attr_queue_ramp_up_period
.attr
&&
1087 !sdev
->host
->hostt
->change_queue_depth
)
1090 #ifdef CONFIG_SCSI_DH
1091 if (attr
== &dev_attr_access_state
.attr
&&
1094 if (attr
== &dev_attr_preferred_path
.attr
&&
1101 static umode_t
scsi_sdev_bin_attr_is_visible(struct kobject
*kobj
,
1102 struct bin_attribute
*attr
, int i
)
1104 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
1105 struct scsi_device
*sdev
= to_scsi_device(dev
);
1108 if (attr
== &dev_attr_vpd_pg80
&& !sdev
->vpd_pg80
)
1111 if (attr
== &dev_attr_vpd_pg83
&& !sdev
->vpd_pg83
)
1117 /* Default template for device attributes. May NOT be modified */
1118 static struct attribute
*scsi_sdev_attrs
[] = {
1119 &dev_attr_device_blocked
.attr
,
1120 &dev_attr_type
.attr
,
1121 &dev_attr_scsi_level
.attr
,
1122 &dev_attr_device_busy
.attr
,
1123 &dev_attr_vendor
.attr
,
1124 &dev_attr_model
.attr
,
1126 &dev_attr_rescan
.attr
,
1127 &dev_attr_delete
.attr
,
1128 &dev_attr_state
.attr
,
1129 &dev_attr_timeout
.attr
,
1130 &dev_attr_eh_timeout
.attr
,
1131 &dev_attr_iocounterbits
.attr
,
1132 &dev_attr_iorequest_cnt
.attr
,
1133 &dev_attr_iodone_cnt
.attr
,
1134 &dev_attr_ioerr_cnt
.attr
,
1135 &dev_attr_modalias
.attr
,
1136 &dev_attr_queue_depth
.attr
,
1137 &dev_attr_queue_type
.attr
,
1138 &dev_attr_wwid
.attr
,
1139 #ifdef CONFIG_SCSI_DH
1140 &dev_attr_dh_state
.attr
,
1141 &dev_attr_access_state
.attr
,
1142 &dev_attr_preferred_path
.attr
,
1144 &dev_attr_queue_ramp_up_period
.attr
,
1145 REF_EVT(media_change
),
1146 REF_EVT(inquiry_change_reported
),
1147 REF_EVT(capacity_change_reported
),
1148 REF_EVT(soft_threshold_reached
),
1149 REF_EVT(mode_parameter_change_reported
),
1150 REF_EVT(lun_change_reported
),
1154 static struct bin_attribute
*scsi_sdev_bin_attrs
[] = {
1160 static struct attribute_group scsi_sdev_attr_group
= {
1161 .attrs
= scsi_sdev_attrs
,
1162 .bin_attrs
= scsi_sdev_bin_attrs
,
1163 .is_visible
= scsi_sdev_attr_is_visible
,
1164 .is_bin_visible
= scsi_sdev_bin_attr_is_visible
,
1167 static const struct attribute_group
*scsi_sdev_attr_groups
[] = {
1168 &scsi_sdev_attr_group
,
1172 static int scsi_target_add(struct scsi_target
*starget
)
1176 if (starget
->state
!= STARGET_CREATED
)
1179 error
= device_add(&starget
->dev
);
1181 dev_err(&starget
->dev
, "target device_add failed, error %d\n", error
);
1184 transport_add_device(&starget
->dev
);
1185 starget
->state
= STARGET_RUNNING
;
1187 pm_runtime_set_active(&starget
->dev
);
1188 pm_runtime_enable(&starget
->dev
);
1189 device_enable_async_suspend(&starget
->dev
);
1195 * scsi_sysfs_add_sdev - add scsi device to sysfs
1196 * @sdev: scsi_device to add
1199 * 0 on Success / non-zero on Failure
1201 int scsi_sysfs_add_sdev(struct scsi_device
*sdev
)
1204 struct request_queue
*rq
= sdev
->request_queue
;
1205 struct scsi_target
*starget
= sdev
->sdev_target
;
1207 error
= scsi_device_set_state(sdev
, SDEV_RUNNING
);
1211 error
= scsi_target_add(starget
);
1215 transport_configure_device(&starget
->dev
);
1217 device_enable_async_suspend(&sdev
->sdev_gendev
);
1218 scsi_autopm_get_target(starget
);
1219 pm_runtime_set_active(&sdev
->sdev_gendev
);
1220 pm_runtime_forbid(&sdev
->sdev_gendev
);
1221 pm_runtime_enable(&sdev
->sdev_gendev
);
1222 scsi_autopm_put_target(starget
);
1224 scsi_autopm_get_device(sdev
);
1226 error
= scsi_dh_add_device(sdev
);
1229 * device_handler is optional, so any error can be ignored
1231 sdev_printk(KERN_INFO
, sdev
,
1232 "failed to add device handler: %d\n", error
);
1234 error
= device_add(&sdev
->sdev_gendev
);
1236 sdev_printk(KERN_INFO
, sdev
,
1237 "failed to add device: %d\n", error
);
1238 scsi_dh_remove_device(sdev
);
1242 device_enable_async_suspend(&sdev
->sdev_dev
);
1243 error
= device_add(&sdev
->sdev_dev
);
1245 sdev_printk(KERN_INFO
, sdev
,
1246 "failed to add class device: %d\n", error
);
1247 scsi_dh_remove_device(sdev
);
1248 device_del(&sdev
->sdev_gendev
);
1251 transport_add_device(&sdev
->sdev_gendev
);
1252 sdev
->is_visible
= 1;
1254 error
= bsg_register_queue(rq
, &sdev
->sdev_gendev
, NULL
, NULL
);
1257 /* we're treating error on bsg register as non-fatal,
1258 * so pretend nothing went wrong */
1259 sdev_printk(KERN_INFO
, sdev
,
1260 "Failed to register bsg queue, errno=%d\n", error
);
1262 /* add additional host specific attributes */
1263 if (sdev
->host
->hostt
->sdev_attrs
) {
1264 for (i
= 0; sdev
->host
->hostt
->sdev_attrs
[i
]; i
++) {
1265 error
= device_create_file(&sdev
->sdev_gendev
,
1266 sdev
->host
->hostt
->sdev_attrs
[i
]);
1272 scsi_autopm_put_device(sdev
);
1276 void __scsi_remove_device(struct scsi_device
*sdev
)
1278 struct device
*dev
= &sdev
->sdev_gendev
;
1281 * This cleanup path is not reentrant and while it is impossible
1282 * to get a new reference with scsi_device_get() someone can still
1283 * hold a previously acquired one.
1285 if (sdev
->sdev_state
== SDEV_DEL
)
1288 if (sdev
->is_visible
) {
1289 if (scsi_device_set_state(sdev
, SDEV_CANCEL
) != 0)
1292 bsg_unregister_queue(sdev
->request_queue
);
1293 device_unregister(&sdev
->sdev_dev
);
1294 transport_remove_device(dev
);
1295 scsi_dh_remove_device(sdev
);
1298 put_device(&sdev
->sdev_dev
);
1301 * Stop accepting new requests and wait until all queuecommand() and
1302 * scsi_run_queue() invocations have finished before tearing down the
1305 scsi_device_set_state(sdev
, SDEV_DEL
);
1306 blk_cleanup_queue(sdev
->request_queue
);
1307 cancel_work_sync(&sdev
->requeue_work
);
1309 if (sdev
->host
->hostt
->slave_destroy
)
1310 sdev
->host
->hostt
->slave_destroy(sdev
);
1311 transport_destroy_device(dev
);
1314 * Paired with the kref_get() in scsi_sysfs_initialize(). We have
1315 * remoed sysfs visibility from the device, so make the target
1316 * invisible if this was the last device underneath it.
1318 scsi_target_reap(scsi_target(sdev
));
1324 * scsi_remove_device - unregister a device from the scsi bus
1325 * @sdev: scsi_device to unregister
1327 void scsi_remove_device(struct scsi_device
*sdev
)
1329 struct Scsi_Host
*shost
= sdev
->host
;
1331 mutex_lock(&shost
->scan_mutex
);
1332 __scsi_remove_device(sdev
);
1333 mutex_unlock(&shost
->scan_mutex
);
1335 EXPORT_SYMBOL(scsi_remove_device
);
1337 static void __scsi_remove_target(struct scsi_target
*starget
)
1339 struct Scsi_Host
*shost
= dev_to_shost(starget
->dev
.parent
);
1340 unsigned long flags
;
1341 struct scsi_device
*sdev
;
1343 spin_lock_irqsave(shost
->host_lock
, flags
);
1345 list_for_each_entry(sdev
, &shost
->__devices
, siblings
) {
1346 if (sdev
->channel
!= starget
->channel
||
1347 sdev
->id
!= starget
->id
||
1348 scsi_device_get(sdev
))
1350 spin_unlock_irqrestore(shost
->host_lock
, flags
);
1351 scsi_remove_device(sdev
);
1352 scsi_device_put(sdev
);
1353 spin_lock_irqsave(shost
->host_lock
, flags
);
1356 spin_unlock_irqrestore(shost
->host_lock
, flags
);
1360 * scsi_remove_target - try to remove a target and all its devices
1361 * @dev: generic starget or parent of generic stargets to be removed
1363 * Note: This is slightly racy. It is possible that if the user
1364 * requests the addition of another device then the target won't be
1367 void scsi_remove_target(struct device
*dev
)
1369 struct Scsi_Host
*shost
= dev_to_shost(dev
->parent
);
1370 struct scsi_target
*starget
;
1371 unsigned long flags
;
1374 spin_lock_irqsave(shost
->host_lock
, flags
);
1375 list_for_each_entry(starget
, &shost
->__targets
, siblings
) {
1376 if (starget
->state
== STARGET_DEL
||
1377 starget
->state
== STARGET_REMOVE
)
1379 if (starget
->dev
.parent
== dev
|| &starget
->dev
== dev
) {
1380 kref_get(&starget
->reap_ref
);
1381 starget
->state
= STARGET_REMOVE
;
1382 spin_unlock_irqrestore(shost
->host_lock
, flags
);
1383 __scsi_remove_target(starget
);
1384 scsi_target_reap(starget
);
1388 spin_unlock_irqrestore(shost
->host_lock
, flags
);
1390 EXPORT_SYMBOL(scsi_remove_target
);
1392 int scsi_register_driver(struct device_driver
*drv
)
1394 drv
->bus
= &scsi_bus_type
;
1396 return driver_register(drv
);
1398 EXPORT_SYMBOL(scsi_register_driver
);
1400 int scsi_register_interface(struct class_interface
*intf
)
1402 intf
->class = &sdev_class
;
1404 return class_interface_register(intf
);
1406 EXPORT_SYMBOL(scsi_register_interface
);
1409 * scsi_sysfs_add_host - add scsi host to subsystem
1410 * @shost: scsi host struct to add to subsystem
1412 int scsi_sysfs_add_host(struct Scsi_Host
*shost
)
1416 /* add host specific attributes */
1417 if (shost
->hostt
->shost_attrs
) {
1418 for (i
= 0; shost
->hostt
->shost_attrs
[i
]; i
++) {
1419 error
= device_create_file(&shost
->shost_dev
,
1420 shost
->hostt
->shost_attrs
[i
]);
1426 transport_register_device(&shost
->shost_gendev
);
1427 transport_configure_device(&shost
->shost_gendev
);
1431 static struct device_type scsi_dev_type
= {
1432 .name
= "scsi_device",
1433 .release
= scsi_device_dev_release
,
1434 .groups
= scsi_sdev_attr_groups
,
1437 void scsi_sysfs_device_initialize(struct scsi_device
*sdev
)
1439 unsigned long flags
;
1440 struct Scsi_Host
*shost
= sdev
->host
;
1441 struct scsi_target
*starget
= sdev
->sdev_target
;
1443 device_initialize(&sdev
->sdev_gendev
);
1444 sdev
->sdev_gendev
.bus
= &scsi_bus_type
;
1445 sdev
->sdev_gendev
.type
= &scsi_dev_type
;
1446 dev_set_name(&sdev
->sdev_gendev
, "%d:%d:%d:%llu",
1447 sdev
->host
->host_no
, sdev
->channel
, sdev
->id
, sdev
->lun
);
1449 device_initialize(&sdev
->sdev_dev
);
1450 sdev
->sdev_dev
.parent
= get_device(&sdev
->sdev_gendev
);
1451 sdev
->sdev_dev
.class = &sdev_class
;
1452 dev_set_name(&sdev
->sdev_dev
, "%d:%d:%d:%llu",
1453 sdev
->host
->host_no
, sdev
->channel
, sdev
->id
, sdev
->lun
);
1455 * Get a default scsi_level from the target (derived from sibling
1456 * devices). This is the best we can do for guessing how to set
1457 * sdev->lun_in_cdb for the initial INQUIRY command. For LUN 0 the
1458 * setting doesn't matter, because all the bits are zero anyway.
1459 * But it does matter for higher LUNs.
1461 sdev
->scsi_level
= starget
->scsi_level
;
1462 if (sdev
->scsi_level
<= SCSI_2
&&
1463 sdev
->scsi_level
!= SCSI_UNKNOWN
&&
1464 !shost
->no_scsi2_lun_in_cdb
)
1465 sdev
->lun_in_cdb
= 1;
1467 transport_setup_device(&sdev
->sdev_gendev
);
1468 spin_lock_irqsave(shost
->host_lock
, flags
);
1469 list_add_tail(&sdev
->same_target_siblings
, &starget
->devices
);
1470 list_add_tail(&sdev
->siblings
, &shost
->__devices
);
1471 spin_unlock_irqrestore(shost
->host_lock
, flags
);
1473 * device can now only be removed via __scsi_remove_device() so hold
1474 * the target. Target will be held in CREATED state until something
1475 * beneath it becomes visible (in which case it moves to RUNNING)
1477 kref_get(&starget
->reap_ref
);
1480 int scsi_is_sdev_device(const struct device
*dev
)
1482 return dev
->type
== &scsi_dev_type
;
1484 EXPORT_SYMBOL(scsi_is_sdev_device
);
1486 /* A blank transport template that is used in drivers that don't
1487 * yet implement Transport Attributes */
1488 struct scsi_transport_template blank_transport_template
= { { { {NULL
, }, }, }, };