1 // SPDX-License-Identifier: GPL-2.0
3 * Functions related to sysfs handling
5 #include <linux/kernel.h>
6 #include <linux/slab.h>
7 #include <linux/module.h>
9 #include <linux/blkdev.h>
10 #include <linux/backing-dev.h>
11 #include <linux/blktrace_api.h>
12 #include <linux/blk-mq.h>
13 #include <linux/blk-cgroup.h>
17 #include "blk-mq-debugfs.h"
20 struct queue_sysfs_entry
{
21 struct attribute attr
;
22 ssize_t (*show
)(struct request_queue
*, char *);
23 ssize_t (*store
)(struct request_queue
*, const char *, size_t);
27 queue_var_show(unsigned long var
, char *page
)
29 return sprintf(page
, "%lu\n", var
);
33 queue_var_store(unsigned long *var
, const char *page
, size_t count
)
38 err
= kstrtoul(page
, 10, &v
);
39 if (err
|| v
> UINT_MAX
)
47 static ssize_t
queue_var_store64(s64
*var
, const char *page
)
52 err
= kstrtos64(page
, 10, &v
);
60 static ssize_t
queue_requests_show(struct request_queue
*q
, char *page
)
62 return queue_var_show(q
->nr_requests
, (page
));
66 queue_requests_store(struct request_queue
*q
, const char *page
, size_t count
)
71 if (!q
->request_fn
&& !q
->mq_ops
)
74 ret
= queue_var_store(&nr
, page
, count
);
78 if (nr
< BLKDEV_MIN_RQ
)
82 err
= blk_update_nr_requests(q
, nr
);
84 err
= blk_mq_update_nr_requests(q
, nr
);
92 static ssize_t
queue_ra_show(struct request_queue
*q
, char *page
)
94 unsigned long ra_kb
= q
->backing_dev_info
->ra_pages
<<
97 return queue_var_show(ra_kb
, (page
));
101 queue_ra_store(struct request_queue
*q
, const char *page
, size_t count
)
104 ssize_t ret
= queue_var_store(&ra_kb
, page
, count
);
109 q
->backing_dev_info
->ra_pages
= ra_kb
>> (PAGE_SHIFT
- 10);
114 static ssize_t
queue_max_sectors_show(struct request_queue
*q
, char *page
)
116 int max_sectors_kb
= queue_max_sectors(q
) >> 1;
118 return queue_var_show(max_sectors_kb
, (page
));
121 static ssize_t
queue_max_segments_show(struct request_queue
*q
, char *page
)
123 return queue_var_show(queue_max_segments(q
), (page
));
126 static ssize_t
queue_max_discard_segments_show(struct request_queue
*q
,
129 return queue_var_show(queue_max_discard_segments(q
), (page
));
132 static ssize_t
queue_max_integrity_segments_show(struct request_queue
*q
, char *page
)
134 return queue_var_show(q
->limits
.max_integrity_segments
, (page
));
137 static ssize_t
queue_max_segment_size_show(struct request_queue
*q
, char *page
)
139 if (blk_queue_cluster(q
))
140 return queue_var_show(queue_max_segment_size(q
), (page
));
142 return queue_var_show(PAGE_SIZE
, (page
));
145 static ssize_t
queue_logical_block_size_show(struct request_queue
*q
, char *page
)
147 return queue_var_show(queue_logical_block_size(q
), page
);
150 static ssize_t
queue_physical_block_size_show(struct request_queue
*q
, char *page
)
152 return queue_var_show(queue_physical_block_size(q
), page
);
155 static ssize_t
queue_chunk_sectors_show(struct request_queue
*q
, char *page
)
157 return queue_var_show(q
->limits
.chunk_sectors
, page
);
160 static ssize_t
queue_io_min_show(struct request_queue
*q
, char *page
)
162 return queue_var_show(queue_io_min(q
), page
);
165 static ssize_t
queue_io_opt_show(struct request_queue
*q
, char *page
)
167 return queue_var_show(queue_io_opt(q
), page
);
170 static ssize_t
queue_discard_granularity_show(struct request_queue
*q
, char *page
)
172 return queue_var_show(q
->limits
.discard_granularity
, page
);
175 static ssize_t
queue_discard_max_hw_show(struct request_queue
*q
, char *page
)
178 return sprintf(page
, "%llu\n",
179 (unsigned long long)q
->limits
.max_hw_discard_sectors
<< 9);
182 static ssize_t
queue_discard_max_show(struct request_queue
*q
, char *page
)
184 return sprintf(page
, "%llu\n",
185 (unsigned long long)q
->limits
.max_discard_sectors
<< 9);
188 static ssize_t
queue_discard_max_store(struct request_queue
*q
,
189 const char *page
, size_t count
)
191 unsigned long max_discard
;
192 ssize_t ret
= queue_var_store(&max_discard
, page
, count
);
197 if (max_discard
& (q
->limits
.discard_granularity
- 1))
201 if (max_discard
> UINT_MAX
)
204 if (max_discard
> q
->limits
.max_hw_discard_sectors
)
205 max_discard
= q
->limits
.max_hw_discard_sectors
;
207 q
->limits
.max_discard_sectors
= max_discard
;
211 static ssize_t
queue_discard_zeroes_data_show(struct request_queue
*q
, char *page
)
213 return queue_var_show(0, page
);
216 static ssize_t
queue_write_same_max_show(struct request_queue
*q
, char *page
)
218 return sprintf(page
, "%llu\n",
219 (unsigned long long)q
->limits
.max_write_same_sectors
<< 9);
222 static ssize_t
queue_write_zeroes_max_show(struct request_queue
*q
, char *page
)
224 return sprintf(page
, "%llu\n",
225 (unsigned long long)q
->limits
.max_write_zeroes_sectors
<< 9);
229 queue_max_sectors_store(struct request_queue
*q
, const char *page
, size_t count
)
231 unsigned long max_sectors_kb
,
232 max_hw_sectors_kb
= queue_max_hw_sectors(q
) >> 1,
233 page_kb
= 1 << (PAGE_SHIFT
- 10);
234 ssize_t ret
= queue_var_store(&max_sectors_kb
, page
, count
);
239 max_hw_sectors_kb
= min_not_zero(max_hw_sectors_kb
, (unsigned long)
240 q
->limits
.max_dev_sectors
>> 1);
242 if (max_sectors_kb
> max_hw_sectors_kb
|| max_sectors_kb
< page_kb
)
245 spin_lock_irq(q
->queue_lock
);
246 q
->limits
.max_sectors
= max_sectors_kb
<< 1;
247 q
->backing_dev_info
->io_pages
= max_sectors_kb
>> (PAGE_SHIFT
- 10);
248 spin_unlock_irq(q
->queue_lock
);
253 static ssize_t
queue_max_hw_sectors_show(struct request_queue
*q
, char *page
)
255 int max_hw_sectors_kb
= queue_max_hw_sectors(q
) >> 1;
257 return queue_var_show(max_hw_sectors_kb
, (page
));
260 #define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \
262 queue_show_##name(struct request_queue *q, char *page) \
265 bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \
266 return queue_var_show(neg ? !bit : bit, page); \
269 queue_store_##name(struct request_queue *q, const char *page, size_t count) \
273 ret = queue_var_store(&val, page, count); \
280 blk_queue_flag_set(QUEUE_FLAG_##flag, q); \
282 blk_queue_flag_clear(QUEUE_FLAG_##flag, q); \
286 QUEUE_SYSFS_BIT_FNS(nonrot
, NONROT
, 1);
287 QUEUE_SYSFS_BIT_FNS(random
, ADD_RANDOM
, 0);
288 QUEUE_SYSFS_BIT_FNS(iostats
, IO_STAT
, 0);
289 #undef QUEUE_SYSFS_BIT_FNS
291 static ssize_t
queue_zoned_show(struct request_queue
*q
, char *page
)
293 switch (blk_queue_zoned_model(q
)) {
295 return sprintf(page
, "host-aware\n");
297 return sprintf(page
, "host-managed\n");
299 return sprintf(page
, "none\n");
303 static ssize_t
queue_nr_zones_show(struct request_queue
*q
, char *page
)
305 return queue_var_show(blk_queue_nr_zones(q
), page
);
308 static ssize_t
queue_nomerges_show(struct request_queue
*q
, char *page
)
310 return queue_var_show((blk_queue_nomerges(q
) << 1) |
311 blk_queue_noxmerges(q
), page
);
314 static ssize_t
queue_nomerges_store(struct request_queue
*q
, const char *page
,
318 ssize_t ret
= queue_var_store(&nm
, page
, count
);
323 spin_lock_irq(q
->queue_lock
);
324 queue_flag_clear(QUEUE_FLAG_NOMERGES
, q
);
325 queue_flag_clear(QUEUE_FLAG_NOXMERGES
, q
);
327 queue_flag_set(QUEUE_FLAG_NOMERGES
, q
);
329 queue_flag_set(QUEUE_FLAG_NOXMERGES
, q
);
330 spin_unlock_irq(q
->queue_lock
);
335 static ssize_t
queue_rq_affinity_show(struct request_queue
*q
, char *page
)
337 bool set
= test_bit(QUEUE_FLAG_SAME_COMP
, &q
->queue_flags
);
338 bool force
= test_bit(QUEUE_FLAG_SAME_FORCE
, &q
->queue_flags
);
340 return queue_var_show(set
<< force
, page
);
344 queue_rq_affinity_store(struct request_queue
*q
, const char *page
, size_t count
)
346 ssize_t ret
= -EINVAL
;
350 ret
= queue_var_store(&val
, page
, count
);
354 spin_lock_irq(q
->queue_lock
);
356 queue_flag_set(QUEUE_FLAG_SAME_COMP
, q
);
357 queue_flag_set(QUEUE_FLAG_SAME_FORCE
, q
);
358 } else if (val
== 1) {
359 queue_flag_set(QUEUE_FLAG_SAME_COMP
, q
);
360 queue_flag_clear(QUEUE_FLAG_SAME_FORCE
, q
);
361 } else if (val
== 0) {
362 queue_flag_clear(QUEUE_FLAG_SAME_COMP
, q
);
363 queue_flag_clear(QUEUE_FLAG_SAME_FORCE
, q
);
365 spin_unlock_irq(q
->queue_lock
);
370 static ssize_t
queue_poll_delay_show(struct request_queue
*q
, char *page
)
374 if (q
->poll_nsec
== -1)
377 val
= q
->poll_nsec
/ 1000;
379 return sprintf(page
, "%d\n", val
);
382 static ssize_t
queue_poll_delay_store(struct request_queue
*q
, const char *page
,
387 if (!q
->mq_ops
|| !q
->mq_ops
->poll
)
390 err
= kstrtoint(page
, 10, &val
);
397 q
->poll_nsec
= val
* 1000;
402 static ssize_t
queue_poll_show(struct request_queue
*q
, char *page
)
404 return queue_var_show(test_bit(QUEUE_FLAG_POLL
, &q
->queue_flags
), page
);
407 static ssize_t
queue_poll_store(struct request_queue
*q
, const char *page
,
410 unsigned long poll_on
;
413 if (!q
->mq_ops
|| !q
->mq_ops
->poll
)
416 ret
= queue_var_store(&poll_on
, page
, count
);
421 blk_queue_flag_set(QUEUE_FLAG_POLL
, q
);
423 blk_queue_flag_clear(QUEUE_FLAG_POLL
, q
);
428 static ssize_t
queue_wb_lat_show(struct request_queue
*q
, char *page
)
433 return sprintf(page
, "%llu\n", div_u64(wbt_get_min_lat(q
), 1000));
436 static ssize_t
queue_wb_lat_store(struct request_queue
*q
, const char *page
,
443 ret
= queue_var_store64(&val
, page
);
449 rqos
= wbt_rq_qos(q
);
457 val
= wbt_default_latency_nsec(q
);
462 * Ensure that the queue is idled, in case the latency update
463 * ends up either enabling or disabling wbt completely. We can't
464 * have IO inflight if that happens.
467 blk_mq_freeze_queue(q
);
468 blk_mq_quiesce_queue(q
);
470 blk_queue_bypass_start(q
);
472 wbt_set_min_lat(q
, val
);
473 wbt_update_limits(q
);
476 blk_mq_unquiesce_queue(q
);
477 blk_mq_unfreeze_queue(q
);
479 blk_queue_bypass_end(q
);
484 static ssize_t
queue_wc_show(struct request_queue
*q
, char *page
)
486 if (test_bit(QUEUE_FLAG_WC
, &q
->queue_flags
))
487 return sprintf(page
, "write back\n");
489 return sprintf(page
, "write through\n");
492 static ssize_t
queue_wc_store(struct request_queue
*q
, const char *page
,
497 if (!strncmp(page
, "write back", 10))
499 else if (!strncmp(page
, "write through", 13) ||
500 !strncmp(page
, "none", 4))
507 blk_queue_flag_set(QUEUE_FLAG_WC
, q
);
509 blk_queue_flag_clear(QUEUE_FLAG_WC
, q
);
514 static ssize_t
queue_fua_show(struct request_queue
*q
, char *page
)
516 return sprintf(page
, "%u\n", test_bit(QUEUE_FLAG_FUA
, &q
->queue_flags
));
519 static ssize_t
queue_dax_show(struct request_queue
*q
, char *page
)
521 return queue_var_show(blk_queue_dax(q
), page
);
524 static struct queue_sysfs_entry queue_requests_entry
= {
525 .attr
= {.name
= "nr_requests", .mode
= 0644 },
526 .show
= queue_requests_show
,
527 .store
= queue_requests_store
,
530 static struct queue_sysfs_entry queue_ra_entry
= {
531 .attr
= {.name
= "read_ahead_kb", .mode
= 0644 },
532 .show
= queue_ra_show
,
533 .store
= queue_ra_store
,
536 static struct queue_sysfs_entry queue_max_sectors_entry
= {
537 .attr
= {.name
= "max_sectors_kb", .mode
= 0644 },
538 .show
= queue_max_sectors_show
,
539 .store
= queue_max_sectors_store
,
542 static struct queue_sysfs_entry queue_max_hw_sectors_entry
= {
543 .attr
= {.name
= "max_hw_sectors_kb", .mode
= 0444 },
544 .show
= queue_max_hw_sectors_show
,
547 static struct queue_sysfs_entry queue_max_segments_entry
= {
548 .attr
= {.name
= "max_segments", .mode
= 0444 },
549 .show
= queue_max_segments_show
,
552 static struct queue_sysfs_entry queue_max_discard_segments_entry
= {
553 .attr
= {.name
= "max_discard_segments", .mode
= 0444 },
554 .show
= queue_max_discard_segments_show
,
557 static struct queue_sysfs_entry queue_max_integrity_segments_entry
= {
558 .attr
= {.name
= "max_integrity_segments", .mode
= 0444 },
559 .show
= queue_max_integrity_segments_show
,
562 static struct queue_sysfs_entry queue_max_segment_size_entry
= {
563 .attr
= {.name
= "max_segment_size", .mode
= 0444 },
564 .show
= queue_max_segment_size_show
,
567 static struct queue_sysfs_entry queue_iosched_entry
= {
568 .attr
= {.name
= "scheduler", .mode
= 0644 },
569 .show
= elv_iosched_show
,
570 .store
= elv_iosched_store
,
573 static struct queue_sysfs_entry queue_hw_sector_size_entry
= {
574 .attr
= {.name
= "hw_sector_size", .mode
= 0444 },
575 .show
= queue_logical_block_size_show
,
578 static struct queue_sysfs_entry queue_logical_block_size_entry
= {
579 .attr
= {.name
= "logical_block_size", .mode
= 0444 },
580 .show
= queue_logical_block_size_show
,
583 static struct queue_sysfs_entry queue_physical_block_size_entry
= {
584 .attr
= {.name
= "physical_block_size", .mode
= 0444 },
585 .show
= queue_physical_block_size_show
,
588 static struct queue_sysfs_entry queue_chunk_sectors_entry
= {
589 .attr
= {.name
= "chunk_sectors", .mode
= 0444 },
590 .show
= queue_chunk_sectors_show
,
593 static struct queue_sysfs_entry queue_io_min_entry
= {
594 .attr
= {.name
= "minimum_io_size", .mode
= 0444 },
595 .show
= queue_io_min_show
,
598 static struct queue_sysfs_entry queue_io_opt_entry
= {
599 .attr
= {.name
= "optimal_io_size", .mode
= 0444 },
600 .show
= queue_io_opt_show
,
603 static struct queue_sysfs_entry queue_discard_granularity_entry
= {
604 .attr
= {.name
= "discard_granularity", .mode
= 0444 },
605 .show
= queue_discard_granularity_show
,
608 static struct queue_sysfs_entry queue_discard_max_hw_entry
= {
609 .attr
= {.name
= "discard_max_hw_bytes", .mode
= 0444 },
610 .show
= queue_discard_max_hw_show
,
613 static struct queue_sysfs_entry queue_discard_max_entry
= {
614 .attr
= {.name
= "discard_max_bytes", .mode
= 0644 },
615 .show
= queue_discard_max_show
,
616 .store
= queue_discard_max_store
,
619 static struct queue_sysfs_entry queue_discard_zeroes_data_entry
= {
620 .attr
= {.name
= "discard_zeroes_data", .mode
= 0444 },
621 .show
= queue_discard_zeroes_data_show
,
624 static struct queue_sysfs_entry queue_write_same_max_entry
= {
625 .attr
= {.name
= "write_same_max_bytes", .mode
= 0444 },
626 .show
= queue_write_same_max_show
,
629 static struct queue_sysfs_entry queue_write_zeroes_max_entry
= {
630 .attr
= {.name
= "write_zeroes_max_bytes", .mode
= 0444 },
631 .show
= queue_write_zeroes_max_show
,
634 static struct queue_sysfs_entry queue_nonrot_entry
= {
635 .attr
= {.name
= "rotational", .mode
= 0644 },
636 .show
= queue_show_nonrot
,
637 .store
= queue_store_nonrot
,
640 static struct queue_sysfs_entry queue_zoned_entry
= {
641 .attr
= {.name
= "zoned", .mode
= 0444 },
642 .show
= queue_zoned_show
,
645 static struct queue_sysfs_entry queue_nr_zones_entry
= {
646 .attr
= {.name
= "nr_zones", .mode
= 0444 },
647 .show
= queue_nr_zones_show
,
650 static struct queue_sysfs_entry queue_nomerges_entry
= {
651 .attr
= {.name
= "nomerges", .mode
= 0644 },
652 .show
= queue_nomerges_show
,
653 .store
= queue_nomerges_store
,
656 static struct queue_sysfs_entry queue_rq_affinity_entry
= {
657 .attr
= {.name
= "rq_affinity", .mode
= 0644 },
658 .show
= queue_rq_affinity_show
,
659 .store
= queue_rq_affinity_store
,
662 static struct queue_sysfs_entry queue_iostats_entry
= {
663 .attr
= {.name
= "iostats", .mode
= 0644 },
664 .show
= queue_show_iostats
,
665 .store
= queue_store_iostats
,
668 static struct queue_sysfs_entry queue_random_entry
= {
669 .attr
= {.name
= "add_random", .mode
= 0644 },
670 .show
= queue_show_random
,
671 .store
= queue_store_random
,
674 static struct queue_sysfs_entry queue_poll_entry
= {
675 .attr
= {.name
= "io_poll", .mode
= 0644 },
676 .show
= queue_poll_show
,
677 .store
= queue_poll_store
,
680 static struct queue_sysfs_entry queue_poll_delay_entry
= {
681 .attr
= {.name
= "io_poll_delay", .mode
= 0644 },
682 .show
= queue_poll_delay_show
,
683 .store
= queue_poll_delay_store
,
686 static struct queue_sysfs_entry queue_wc_entry
= {
687 .attr
= {.name
= "write_cache", .mode
= 0644 },
688 .show
= queue_wc_show
,
689 .store
= queue_wc_store
,
692 static struct queue_sysfs_entry queue_fua_entry
= {
693 .attr
= {.name
= "fua", .mode
= 0444 },
694 .show
= queue_fua_show
,
697 static struct queue_sysfs_entry queue_dax_entry
= {
698 .attr
= {.name
= "dax", .mode
= 0444 },
699 .show
= queue_dax_show
,
702 static struct queue_sysfs_entry queue_wb_lat_entry
= {
703 .attr
= {.name
= "wbt_lat_usec", .mode
= 0644 },
704 .show
= queue_wb_lat_show
,
705 .store
= queue_wb_lat_store
,
708 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
709 static struct queue_sysfs_entry throtl_sample_time_entry
= {
710 .attr
= {.name
= "throttle_sample_time", .mode
= 0644 },
711 .show
= blk_throtl_sample_time_show
,
712 .store
= blk_throtl_sample_time_store
,
716 static struct attribute
*default_attrs
[] = {
717 &queue_requests_entry
.attr
,
718 &queue_ra_entry
.attr
,
719 &queue_max_hw_sectors_entry
.attr
,
720 &queue_max_sectors_entry
.attr
,
721 &queue_max_segments_entry
.attr
,
722 &queue_max_discard_segments_entry
.attr
,
723 &queue_max_integrity_segments_entry
.attr
,
724 &queue_max_segment_size_entry
.attr
,
725 &queue_iosched_entry
.attr
,
726 &queue_hw_sector_size_entry
.attr
,
727 &queue_logical_block_size_entry
.attr
,
728 &queue_physical_block_size_entry
.attr
,
729 &queue_chunk_sectors_entry
.attr
,
730 &queue_io_min_entry
.attr
,
731 &queue_io_opt_entry
.attr
,
732 &queue_discard_granularity_entry
.attr
,
733 &queue_discard_max_entry
.attr
,
734 &queue_discard_max_hw_entry
.attr
,
735 &queue_discard_zeroes_data_entry
.attr
,
736 &queue_write_same_max_entry
.attr
,
737 &queue_write_zeroes_max_entry
.attr
,
738 &queue_nonrot_entry
.attr
,
739 &queue_zoned_entry
.attr
,
740 &queue_nr_zones_entry
.attr
,
741 &queue_nomerges_entry
.attr
,
742 &queue_rq_affinity_entry
.attr
,
743 &queue_iostats_entry
.attr
,
744 &queue_random_entry
.attr
,
745 &queue_poll_entry
.attr
,
746 &queue_wc_entry
.attr
,
747 &queue_fua_entry
.attr
,
748 &queue_dax_entry
.attr
,
749 &queue_wb_lat_entry
.attr
,
750 &queue_poll_delay_entry
.attr
,
751 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
752 &throtl_sample_time_entry
.attr
,
757 #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
760 queue_attr_show(struct kobject
*kobj
, struct attribute
*attr
, char *page
)
762 struct queue_sysfs_entry
*entry
= to_queue(attr
);
763 struct request_queue
*q
=
764 container_of(kobj
, struct request_queue
, kobj
);
769 mutex_lock(&q
->sysfs_lock
);
770 if (blk_queue_dying(q
)) {
771 mutex_unlock(&q
->sysfs_lock
);
774 res
= entry
->show(q
, page
);
775 mutex_unlock(&q
->sysfs_lock
);
780 queue_attr_store(struct kobject
*kobj
, struct attribute
*attr
,
781 const char *page
, size_t length
)
783 struct queue_sysfs_entry
*entry
= to_queue(attr
);
784 struct request_queue
*q
;
790 q
= container_of(kobj
, struct request_queue
, kobj
);
791 mutex_lock(&q
->sysfs_lock
);
792 if (blk_queue_dying(q
)) {
793 mutex_unlock(&q
->sysfs_lock
);
796 res
= entry
->store(q
, page
, length
);
797 mutex_unlock(&q
->sysfs_lock
);
801 static void blk_free_queue_rcu(struct rcu_head
*rcu_head
)
803 struct request_queue
*q
= container_of(rcu_head
, struct request_queue
,
805 kmem_cache_free(blk_requestq_cachep
, q
);
809 * __blk_release_queue - release a request queue when it is no longer needed
810 * @work: pointer to the release_work member of the request queue to be released
813 * blk_release_queue is the counterpart of blk_init_queue(). It should be
814 * called when a request queue is being released; typically when a block
815 * device is being de-registered. Its primary task it to free the queue
819 * The low level driver must have finished any outstanding requests first
820 * via blk_cleanup_queue().
822 * Although blk_release_queue() may be called with preemption disabled,
823 * __blk_release_queue() may sleep.
825 static void __blk_release_queue(struct work_struct
*work
)
827 struct request_queue
*q
= container_of(work
, typeof(*q
), release_work
);
829 if (test_bit(QUEUE_FLAG_POLL_STATS
, &q
->queue_flags
))
830 blk_stat_remove_callback(q
, q
->poll_cb
);
831 blk_stat_free_callback(q
->poll_cb
);
833 if (!blk_queue_dead(q
)) {
835 * Last reference was dropped without having called
836 * blk_cleanup_queue().
838 WARN_ONCE(blk_queue_init_done(q
),
839 "request queue %p has been registered but blk_cleanup_queue() has not been called for that queue\n",
844 WARN(blk_queue_root_blkg(q
),
845 "request queue %p is being released but it has not yet been removed from the blkcg controller\n",
848 blk_free_queue_stats(q
->stats
);
850 blk_exit_rl(q
, &q
->root_rl
);
853 __blk_queue_free_tags(q
);
855 blk_queue_free_zone_bitmaps(q
);
859 q
->exit_rq_fn(q
, q
->fq
->flush_rq
);
860 blk_free_flush_queue(q
->fq
);
865 blk_trace_shutdown(q
);
868 blk_mq_debugfs_unregister(q
);
870 bioset_exit(&q
->bio_split
);
872 ida_simple_remove(&blk_queue_ida
, q
->id
);
873 call_rcu(&q
->rcu_head
, blk_free_queue_rcu
);
876 static void blk_release_queue(struct kobject
*kobj
)
878 struct request_queue
*q
=
879 container_of(kobj
, struct request_queue
, kobj
);
881 INIT_WORK(&q
->release_work
, __blk_release_queue
);
882 schedule_work(&q
->release_work
);
885 static const struct sysfs_ops queue_sysfs_ops
= {
886 .show
= queue_attr_show
,
887 .store
= queue_attr_store
,
890 struct kobj_type blk_queue_ktype
= {
891 .sysfs_ops
= &queue_sysfs_ops
,
892 .default_attrs
= default_attrs
,
893 .release
= blk_release_queue
,
897 * blk_register_queue - register a block layer queue with sysfs
898 * @disk: Disk of which the request queue should be registered with sysfs.
900 int blk_register_queue(struct gendisk
*disk
)
903 struct device
*dev
= disk_to_dev(disk
);
904 struct request_queue
*q
= disk
->queue
;
909 WARN_ONCE(test_bit(QUEUE_FLAG_REGISTERED
, &q
->queue_flags
),
910 "%s is registering an already registered queue\n",
911 kobject_name(&dev
->kobj
));
912 queue_flag_set_unlocked(QUEUE_FLAG_REGISTERED
, q
);
915 * SCSI probing may synchronously create and destroy a lot of
916 * request_queues for non-existent devices. Shutting down a fully
917 * functional queue takes measureable wallclock time as RCU grace
918 * periods are involved. To avoid excessive latency in these
919 * cases, a request_queue starts out in a degraded mode which is
920 * faster to shut down and is made fully functional here as
921 * request_queues for non-existent devices never get registered.
923 if (!blk_queue_init_done(q
)) {
924 queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE
, q
);
925 percpu_ref_switch_to_percpu(&q
->q_usage_counter
);
926 blk_queue_bypass_end(q
);
929 ret
= blk_trace_init_sysfs(dev
);
933 /* Prevent changes through sysfs until registration is completed. */
934 mutex_lock(&q
->sysfs_lock
);
936 ret
= kobject_add(&q
->kobj
, kobject_get(&dev
->kobj
), "%s", "queue");
938 blk_trace_remove_sysfs(dev
);
943 __blk_mq_register_dev(dev
, q
);
944 blk_mq_debugfs_register(q
);
947 kobject_uevent(&q
->kobj
, KOBJ_ADD
);
949 wbt_enable_default(q
);
951 blk_throtl_register_queue(q
);
953 if (q
->request_fn
|| (q
->mq_ops
&& q
->elevator
)) {
954 ret
= elv_register_queue(q
);
956 mutex_unlock(&q
->sysfs_lock
);
957 kobject_uevent(&q
->kobj
, KOBJ_REMOVE
);
958 kobject_del(&q
->kobj
);
959 blk_trace_remove_sysfs(dev
);
960 kobject_put(&dev
->kobj
);
966 mutex_unlock(&q
->sysfs_lock
);
969 EXPORT_SYMBOL_GPL(blk_register_queue
);
972 * blk_unregister_queue - counterpart of blk_register_queue()
973 * @disk: Disk of which the request queue should be unregistered from sysfs.
975 * Note: the caller is responsible for guaranteeing that this function is called
976 * after blk_register_queue() has finished.
978 void blk_unregister_queue(struct gendisk
*disk
)
980 struct request_queue
*q
= disk
->queue
;
985 /* Return early if disk->queue was never registered. */
986 if (!test_bit(QUEUE_FLAG_REGISTERED
, &q
->queue_flags
))
990 * Since sysfs_remove_dir() prevents adding new directory entries
991 * before removal of existing entries starts, protect against
992 * concurrent elv_iosched_store() calls.
994 mutex_lock(&q
->sysfs_lock
);
996 blk_queue_flag_clear(QUEUE_FLAG_REGISTERED
, q
);
999 * Remove the sysfs attributes before unregistering the queue data
1000 * structures that can be modified through sysfs.
1003 blk_mq_unregister_dev(disk_to_dev(disk
), q
);
1004 mutex_unlock(&q
->sysfs_lock
);
1006 kobject_uevent(&q
->kobj
, KOBJ_REMOVE
);
1007 kobject_del(&q
->kobj
);
1008 blk_trace_remove_sysfs(disk_to_dev(disk
));
1010 mutex_lock(&q
->sysfs_lock
);
1011 if (q
->request_fn
|| (q
->mq_ops
&& q
->elevator
))
1012 elv_unregister_queue(q
);
1013 mutex_unlock(&q
->sysfs_lock
);
1015 kobject_put(&disk_to_dev(disk
)->kobj
);