2 * Functions related to sysfs handling
4 #include <linux/kernel.h>
5 #include <linux/slab.h>
6 #include <linux/module.h>
8 #include <linux/blkdev.h>
9 #include <linux/backing-dev.h>
10 #include <linux/blktrace_api.h>
11 #include <linux/blk-mq.h>
12 #include <linux/blk-cgroup.h>
16 #include "blk-mq-debugfs.h"
19 struct queue_sysfs_entry
{
20 struct attribute attr
;
21 ssize_t (*show
)(struct request_queue
*, char *);
22 ssize_t (*store
)(struct request_queue
*, const char *, size_t);
26 queue_var_show(unsigned long var
, char *page
)
28 return sprintf(page
, "%lu\n", var
);
32 queue_var_store(unsigned long *var
, const char *page
, size_t count
)
37 err
= kstrtoul(page
, 10, &v
);
38 if (err
|| v
> UINT_MAX
)
46 static ssize_t
queue_var_store64(s64
*var
, const char *page
)
51 err
= kstrtos64(page
, 10, &v
);
59 static ssize_t
queue_requests_show(struct request_queue
*q
, char *page
)
61 return queue_var_show(q
->nr_requests
, (page
));
65 queue_requests_store(struct request_queue
*q
, const char *page
, size_t count
)
70 if (!q
->request_fn
&& !q
->mq_ops
)
73 ret
= queue_var_store(&nr
, page
, count
);
77 if (nr
< BLKDEV_MIN_RQ
)
81 err
= blk_update_nr_requests(q
, nr
);
83 err
= blk_mq_update_nr_requests(q
, nr
);
91 static ssize_t
queue_ra_show(struct request_queue
*q
, char *page
)
93 unsigned long ra_kb
= q
->backing_dev_info
->ra_pages
<<
96 return queue_var_show(ra_kb
, (page
));
100 queue_ra_store(struct request_queue
*q
, const char *page
, size_t count
)
103 ssize_t ret
= queue_var_store(&ra_kb
, page
, count
);
108 q
->backing_dev_info
->ra_pages
= ra_kb
>> (PAGE_SHIFT
- 10);
113 static ssize_t
queue_max_sectors_show(struct request_queue
*q
, char *page
)
115 int max_sectors_kb
= queue_max_sectors(q
) >> 1;
117 return queue_var_show(max_sectors_kb
, (page
));
120 static ssize_t
queue_max_segments_show(struct request_queue
*q
, char *page
)
122 return queue_var_show(queue_max_segments(q
), (page
));
125 static ssize_t
queue_max_discard_segments_show(struct request_queue
*q
,
128 return queue_var_show(queue_max_discard_segments(q
), (page
));
131 static ssize_t
queue_max_integrity_segments_show(struct request_queue
*q
, char *page
)
133 return queue_var_show(q
->limits
.max_integrity_segments
, (page
));
136 static ssize_t
queue_max_segment_size_show(struct request_queue
*q
, char *page
)
138 if (blk_queue_cluster(q
))
139 return queue_var_show(queue_max_segment_size(q
), (page
));
141 return queue_var_show(PAGE_SIZE
, (page
));
144 static ssize_t
queue_logical_block_size_show(struct request_queue
*q
, char *page
)
146 return queue_var_show(queue_logical_block_size(q
), page
);
149 static ssize_t
queue_physical_block_size_show(struct request_queue
*q
, char *page
)
151 return queue_var_show(queue_physical_block_size(q
), page
);
154 static ssize_t
queue_chunk_sectors_show(struct request_queue
*q
, char *page
)
156 return queue_var_show(q
->limits
.chunk_sectors
, page
);
159 static ssize_t
queue_io_min_show(struct request_queue
*q
, char *page
)
161 return queue_var_show(queue_io_min(q
), page
);
164 static ssize_t
queue_io_opt_show(struct request_queue
*q
, char *page
)
166 return queue_var_show(queue_io_opt(q
), page
);
169 static ssize_t
queue_discard_granularity_show(struct request_queue
*q
, char *page
)
171 return queue_var_show(q
->limits
.discard_granularity
, page
);
174 static ssize_t
queue_discard_max_hw_show(struct request_queue
*q
, char *page
)
177 return sprintf(page
, "%llu\n",
178 (unsigned long long)q
->limits
.max_hw_discard_sectors
<< 9);
181 static ssize_t
queue_discard_max_show(struct request_queue
*q
, char *page
)
183 return sprintf(page
, "%llu\n",
184 (unsigned long long)q
->limits
.max_discard_sectors
<< 9);
187 static ssize_t
queue_discard_max_store(struct request_queue
*q
,
188 const char *page
, size_t count
)
190 unsigned long max_discard
;
191 ssize_t ret
= queue_var_store(&max_discard
, page
, count
);
196 if (max_discard
& (q
->limits
.discard_granularity
- 1))
200 if (max_discard
> UINT_MAX
)
203 if (max_discard
> q
->limits
.max_hw_discard_sectors
)
204 max_discard
= q
->limits
.max_hw_discard_sectors
;
206 q
->limits
.max_discard_sectors
= max_discard
;
210 static ssize_t
queue_discard_zeroes_data_show(struct request_queue
*q
, char *page
)
212 return queue_var_show(0, page
);
215 static ssize_t
queue_write_same_max_show(struct request_queue
*q
, char *page
)
217 return sprintf(page
, "%llu\n",
218 (unsigned long long)q
->limits
.max_write_same_sectors
<< 9);
221 static ssize_t
queue_write_zeroes_max_show(struct request_queue
*q
, char *page
)
223 return sprintf(page
, "%llu\n",
224 (unsigned long long)q
->limits
.max_write_zeroes_sectors
<< 9);
228 queue_max_sectors_store(struct request_queue
*q
, const char *page
, size_t count
)
230 unsigned long max_sectors_kb
,
231 max_hw_sectors_kb
= queue_max_hw_sectors(q
) >> 1,
232 page_kb
= 1 << (PAGE_SHIFT
- 10);
233 ssize_t ret
= queue_var_store(&max_sectors_kb
, page
, count
);
238 max_hw_sectors_kb
= min_not_zero(max_hw_sectors_kb
, (unsigned long)
239 q
->limits
.max_dev_sectors
>> 1);
241 if (max_sectors_kb
> max_hw_sectors_kb
|| max_sectors_kb
< page_kb
)
244 spin_lock_irq(q
->queue_lock
);
245 q
->limits
.max_sectors
= max_sectors_kb
<< 1;
246 q
->backing_dev_info
->io_pages
= max_sectors_kb
>> (PAGE_SHIFT
- 10);
247 spin_unlock_irq(q
->queue_lock
);
252 static ssize_t
queue_max_hw_sectors_show(struct request_queue
*q
, char *page
)
254 int max_hw_sectors_kb
= queue_max_hw_sectors(q
) >> 1;
256 return queue_var_show(max_hw_sectors_kb
, (page
));
259 #define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \
261 queue_show_##name(struct request_queue *q, char *page) \
264 bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \
265 return queue_var_show(neg ? !bit : bit, page); \
268 queue_store_##name(struct request_queue *q, const char *page, size_t count) \
272 ret = queue_var_store(&val, page, count); \
278 spin_lock_irq(q->queue_lock); \
280 queue_flag_set(QUEUE_FLAG_##flag, q); \
282 queue_flag_clear(QUEUE_FLAG_##flag, q); \
283 spin_unlock_irq(q->queue_lock); \
287 QUEUE_SYSFS_BIT_FNS(nonrot
, NONROT
, 1);
288 QUEUE_SYSFS_BIT_FNS(random
, ADD_RANDOM
, 0);
289 QUEUE_SYSFS_BIT_FNS(iostats
, IO_STAT
, 0);
290 #undef QUEUE_SYSFS_BIT_FNS
292 static ssize_t
queue_zoned_show(struct request_queue
*q
, char *page
)
294 switch (blk_queue_zoned_model(q
)) {
296 return sprintf(page
, "host-aware\n");
298 return sprintf(page
, "host-managed\n");
300 return sprintf(page
, "none\n");
304 static ssize_t
queue_nomerges_show(struct request_queue
*q
, char *page
)
306 return queue_var_show((blk_queue_nomerges(q
) << 1) |
307 blk_queue_noxmerges(q
), page
);
310 static ssize_t
queue_nomerges_store(struct request_queue
*q
, const char *page
,
314 ssize_t ret
= queue_var_store(&nm
, page
, count
);
319 spin_lock_irq(q
->queue_lock
);
320 queue_flag_clear(QUEUE_FLAG_NOMERGES
, q
);
321 queue_flag_clear(QUEUE_FLAG_NOXMERGES
, q
);
323 queue_flag_set(QUEUE_FLAG_NOMERGES
, q
);
325 queue_flag_set(QUEUE_FLAG_NOXMERGES
, q
);
326 spin_unlock_irq(q
->queue_lock
);
331 static ssize_t
queue_rq_affinity_show(struct request_queue
*q
, char *page
)
333 bool set
= test_bit(QUEUE_FLAG_SAME_COMP
, &q
->queue_flags
);
334 bool force
= test_bit(QUEUE_FLAG_SAME_FORCE
, &q
->queue_flags
);
336 return queue_var_show(set
<< force
, page
);
340 queue_rq_affinity_store(struct request_queue
*q
, const char *page
, size_t count
)
342 ssize_t ret
= -EINVAL
;
346 ret
= queue_var_store(&val
, page
, count
);
350 spin_lock_irq(q
->queue_lock
);
352 queue_flag_set(QUEUE_FLAG_SAME_COMP
, q
);
353 queue_flag_set(QUEUE_FLAG_SAME_FORCE
, q
);
354 } else if (val
== 1) {
355 queue_flag_set(QUEUE_FLAG_SAME_COMP
, q
);
356 queue_flag_clear(QUEUE_FLAG_SAME_FORCE
, q
);
357 } else if (val
== 0) {
358 queue_flag_clear(QUEUE_FLAG_SAME_COMP
, q
);
359 queue_flag_clear(QUEUE_FLAG_SAME_FORCE
, q
);
361 spin_unlock_irq(q
->queue_lock
);
366 static ssize_t
queue_poll_delay_show(struct request_queue
*q
, char *page
)
370 if (q
->poll_nsec
== -1)
373 val
= q
->poll_nsec
/ 1000;
375 return sprintf(page
, "%d\n", val
);
378 static ssize_t
queue_poll_delay_store(struct request_queue
*q
, const char *page
,
383 if (!q
->mq_ops
|| !q
->mq_ops
->poll
)
386 err
= kstrtoint(page
, 10, &val
);
393 q
->poll_nsec
= val
* 1000;
398 static ssize_t
queue_poll_show(struct request_queue
*q
, char *page
)
400 return queue_var_show(test_bit(QUEUE_FLAG_POLL
, &q
->queue_flags
), page
);
403 static ssize_t
queue_poll_store(struct request_queue
*q
, const char *page
,
406 unsigned long poll_on
;
409 if (!q
->mq_ops
|| !q
->mq_ops
->poll
)
412 ret
= queue_var_store(&poll_on
, page
, count
);
416 spin_lock_irq(q
->queue_lock
);
418 queue_flag_set(QUEUE_FLAG_POLL
, q
);
420 queue_flag_clear(QUEUE_FLAG_POLL
, q
);
421 spin_unlock_irq(q
->queue_lock
);
426 static ssize_t
queue_wb_lat_show(struct request_queue
*q
, char *page
)
431 return sprintf(page
, "%llu\n", div_u64(q
->rq_wb
->min_lat_nsec
, 1000));
434 static ssize_t
queue_wb_lat_store(struct request_queue
*q
, const char *page
,
441 ret
= queue_var_store64(&val
, page
);
459 rwb
->min_lat_nsec
= wbt_default_latency_nsec(q
);
461 rwb
->min_lat_nsec
= val
* 1000ULL;
463 if (rwb
->enable_state
== WBT_STATE_ON_DEFAULT
)
464 rwb
->enable_state
= WBT_STATE_ON_MANUAL
;
466 wbt_update_limits(rwb
);
470 static ssize_t
queue_wc_show(struct request_queue
*q
, char *page
)
472 if (test_bit(QUEUE_FLAG_WC
, &q
->queue_flags
))
473 return sprintf(page
, "write back\n");
475 return sprintf(page
, "write through\n");
478 static ssize_t
queue_wc_store(struct request_queue
*q
, const char *page
,
483 if (!strncmp(page
, "write back", 10))
485 else if (!strncmp(page
, "write through", 13) ||
486 !strncmp(page
, "none", 4))
492 spin_lock_irq(q
->queue_lock
);
494 queue_flag_set(QUEUE_FLAG_WC
, q
);
496 queue_flag_clear(QUEUE_FLAG_WC
, q
);
497 spin_unlock_irq(q
->queue_lock
);
502 static ssize_t
queue_dax_show(struct request_queue
*q
, char *page
)
504 return queue_var_show(blk_queue_dax(q
), page
);
507 static struct queue_sysfs_entry queue_requests_entry
= {
508 .attr
= {.name
= "nr_requests", .mode
= S_IRUGO
| S_IWUSR
},
509 .show
= queue_requests_show
,
510 .store
= queue_requests_store
,
513 static struct queue_sysfs_entry queue_ra_entry
= {
514 .attr
= {.name
= "read_ahead_kb", .mode
= S_IRUGO
| S_IWUSR
},
515 .show
= queue_ra_show
,
516 .store
= queue_ra_store
,
519 static struct queue_sysfs_entry queue_max_sectors_entry
= {
520 .attr
= {.name
= "max_sectors_kb", .mode
= S_IRUGO
| S_IWUSR
},
521 .show
= queue_max_sectors_show
,
522 .store
= queue_max_sectors_store
,
525 static struct queue_sysfs_entry queue_max_hw_sectors_entry
= {
526 .attr
= {.name
= "max_hw_sectors_kb", .mode
= S_IRUGO
},
527 .show
= queue_max_hw_sectors_show
,
530 static struct queue_sysfs_entry queue_max_segments_entry
= {
531 .attr
= {.name
= "max_segments", .mode
= S_IRUGO
},
532 .show
= queue_max_segments_show
,
535 static struct queue_sysfs_entry queue_max_discard_segments_entry
= {
536 .attr
= {.name
= "max_discard_segments", .mode
= S_IRUGO
},
537 .show
= queue_max_discard_segments_show
,
540 static struct queue_sysfs_entry queue_max_integrity_segments_entry
= {
541 .attr
= {.name
= "max_integrity_segments", .mode
= S_IRUGO
},
542 .show
= queue_max_integrity_segments_show
,
545 static struct queue_sysfs_entry queue_max_segment_size_entry
= {
546 .attr
= {.name
= "max_segment_size", .mode
= S_IRUGO
},
547 .show
= queue_max_segment_size_show
,
550 static struct queue_sysfs_entry queue_iosched_entry
= {
551 .attr
= {.name
= "scheduler", .mode
= S_IRUGO
| S_IWUSR
},
552 .show
= elv_iosched_show
,
553 .store
= elv_iosched_store
,
556 static struct queue_sysfs_entry queue_hw_sector_size_entry
= {
557 .attr
= {.name
= "hw_sector_size", .mode
= S_IRUGO
},
558 .show
= queue_logical_block_size_show
,
561 static struct queue_sysfs_entry queue_logical_block_size_entry
= {
562 .attr
= {.name
= "logical_block_size", .mode
= S_IRUGO
},
563 .show
= queue_logical_block_size_show
,
566 static struct queue_sysfs_entry queue_physical_block_size_entry
= {
567 .attr
= {.name
= "physical_block_size", .mode
= S_IRUGO
},
568 .show
= queue_physical_block_size_show
,
571 static struct queue_sysfs_entry queue_chunk_sectors_entry
= {
572 .attr
= {.name
= "chunk_sectors", .mode
= S_IRUGO
},
573 .show
= queue_chunk_sectors_show
,
576 static struct queue_sysfs_entry queue_io_min_entry
= {
577 .attr
= {.name
= "minimum_io_size", .mode
= S_IRUGO
},
578 .show
= queue_io_min_show
,
581 static struct queue_sysfs_entry queue_io_opt_entry
= {
582 .attr
= {.name
= "optimal_io_size", .mode
= S_IRUGO
},
583 .show
= queue_io_opt_show
,
586 static struct queue_sysfs_entry queue_discard_granularity_entry
= {
587 .attr
= {.name
= "discard_granularity", .mode
= S_IRUGO
},
588 .show
= queue_discard_granularity_show
,
591 static struct queue_sysfs_entry queue_discard_max_hw_entry
= {
592 .attr
= {.name
= "discard_max_hw_bytes", .mode
= S_IRUGO
},
593 .show
= queue_discard_max_hw_show
,
596 static struct queue_sysfs_entry queue_discard_max_entry
= {
597 .attr
= {.name
= "discard_max_bytes", .mode
= S_IRUGO
| S_IWUSR
},
598 .show
= queue_discard_max_show
,
599 .store
= queue_discard_max_store
,
602 static struct queue_sysfs_entry queue_discard_zeroes_data_entry
= {
603 .attr
= {.name
= "discard_zeroes_data", .mode
= S_IRUGO
},
604 .show
= queue_discard_zeroes_data_show
,
607 static struct queue_sysfs_entry queue_write_same_max_entry
= {
608 .attr
= {.name
= "write_same_max_bytes", .mode
= S_IRUGO
},
609 .show
= queue_write_same_max_show
,
612 static struct queue_sysfs_entry queue_write_zeroes_max_entry
= {
613 .attr
= {.name
= "write_zeroes_max_bytes", .mode
= S_IRUGO
},
614 .show
= queue_write_zeroes_max_show
,
617 static struct queue_sysfs_entry queue_nonrot_entry
= {
618 .attr
= {.name
= "rotational", .mode
= S_IRUGO
| S_IWUSR
},
619 .show
= queue_show_nonrot
,
620 .store
= queue_store_nonrot
,
623 static struct queue_sysfs_entry queue_zoned_entry
= {
624 .attr
= {.name
= "zoned", .mode
= S_IRUGO
},
625 .show
= queue_zoned_show
,
628 static struct queue_sysfs_entry queue_nomerges_entry
= {
629 .attr
= {.name
= "nomerges", .mode
= S_IRUGO
| S_IWUSR
},
630 .show
= queue_nomerges_show
,
631 .store
= queue_nomerges_store
,
634 static struct queue_sysfs_entry queue_rq_affinity_entry
= {
635 .attr
= {.name
= "rq_affinity", .mode
= S_IRUGO
| S_IWUSR
},
636 .show
= queue_rq_affinity_show
,
637 .store
= queue_rq_affinity_store
,
640 static struct queue_sysfs_entry queue_iostats_entry
= {
641 .attr
= {.name
= "iostats", .mode
= S_IRUGO
| S_IWUSR
},
642 .show
= queue_show_iostats
,
643 .store
= queue_store_iostats
,
646 static struct queue_sysfs_entry queue_random_entry
= {
647 .attr
= {.name
= "add_random", .mode
= S_IRUGO
| S_IWUSR
},
648 .show
= queue_show_random
,
649 .store
= queue_store_random
,
652 static struct queue_sysfs_entry queue_poll_entry
= {
653 .attr
= {.name
= "io_poll", .mode
= S_IRUGO
| S_IWUSR
},
654 .show
= queue_poll_show
,
655 .store
= queue_poll_store
,
658 static struct queue_sysfs_entry queue_poll_delay_entry
= {
659 .attr
= {.name
= "io_poll_delay", .mode
= S_IRUGO
| S_IWUSR
},
660 .show
= queue_poll_delay_show
,
661 .store
= queue_poll_delay_store
,
664 static struct queue_sysfs_entry queue_wc_entry
= {
665 .attr
= {.name
= "write_cache", .mode
= S_IRUGO
| S_IWUSR
},
666 .show
= queue_wc_show
,
667 .store
= queue_wc_store
,
670 static struct queue_sysfs_entry queue_dax_entry
= {
671 .attr
= {.name
= "dax", .mode
= S_IRUGO
},
672 .show
= queue_dax_show
,
675 static struct queue_sysfs_entry queue_wb_lat_entry
= {
676 .attr
= {.name
= "wbt_lat_usec", .mode
= S_IRUGO
| S_IWUSR
},
677 .show
= queue_wb_lat_show
,
678 .store
= queue_wb_lat_store
,
681 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
682 static struct queue_sysfs_entry throtl_sample_time_entry
= {
683 .attr
= {.name
= "throttle_sample_time", .mode
= S_IRUGO
| S_IWUSR
},
684 .show
= blk_throtl_sample_time_show
,
685 .store
= blk_throtl_sample_time_store
,
689 static struct attribute
*default_attrs
[] = {
690 &queue_requests_entry
.attr
,
691 &queue_ra_entry
.attr
,
692 &queue_max_hw_sectors_entry
.attr
,
693 &queue_max_sectors_entry
.attr
,
694 &queue_max_segments_entry
.attr
,
695 &queue_max_discard_segments_entry
.attr
,
696 &queue_max_integrity_segments_entry
.attr
,
697 &queue_max_segment_size_entry
.attr
,
698 &queue_iosched_entry
.attr
,
699 &queue_hw_sector_size_entry
.attr
,
700 &queue_logical_block_size_entry
.attr
,
701 &queue_physical_block_size_entry
.attr
,
702 &queue_chunk_sectors_entry
.attr
,
703 &queue_io_min_entry
.attr
,
704 &queue_io_opt_entry
.attr
,
705 &queue_discard_granularity_entry
.attr
,
706 &queue_discard_max_entry
.attr
,
707 &queue_discard_max_hw_entry
.attr
,
708 &queue_discard_zeroes_data_entry
.attr
,
709 &queue_write_same_max_entry
.attr
,
710 &queue_write_zeroes_max_entry
.attr
,
711 &queue_nonrot_entry
.attr
,
712 &queue_zoned_entry
.attr
,
713 &queue_nomerges_entry
.attr
,
714 &queue_rq_affinity_entry
.attr
,
715 &queue_iostats_entry
.attr
,
716 &queue_random_entry
.attr
,
717 &queue_poll_entry
.attr
,
718 &queue_wc_entry
.attr
,
719 &queue_dax_entry
.attr
,
720 &queue_wb_lat_entry
.attr
,
721 &queue_poll_delay_entry
.attr
,
722 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
723 &throtl_sample_time_entry
.attr
,
728 #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
731 queue_attr_show(struct kobject
*kobj
, struct attribute
*attr
, char *page
)
733 struct queue_sysfs_entry
*entry
= to_queue(attr
);
734 struct request_queue
*q
=
735 container_of(kobj
, struct request_queue
, kobj
);
740 mutex_lock(&q
->sysfs_lock
);
741 if (blk_queue_dying(q
)) {
742 mutex_unlock(&q
->sysfs_lock
);
745 res
= entry
->show(q
, page
);
746 mutex_unlock(&q
->sysfs_lock
);
751 queue_attr_store(struct kobject
*kobj
, struct attribute
*attr
,
752 const char *page
, size_t length
)
754 struct queue_sysfs_entry
*entry
= to_queue(attr
);
755 struct request_queue
*q
;
761 q
= container_of(kobj
, struct request_queue
, kobj
);
762 mutex_lock(&q
->sysfs_lock
);
763 if (blk_queue_dying(q
)) {
764 mutex_unlock(&q
->sysfs_lock
);
767 res
= entry
->store(q
, page
, length
);
768 mutex_unlock(&q
->sysfs_lock
);
772 static void blk_free_queue_rcu(struct rcu_head
*rcu_head
)
774 struct request_queue
*q
= container_of(rcu_head
, struct request_queue
,
776 kmem_cache_free(blk_requestq_cachep
, q
);
780 * __blk_release_queue - release a request queue when it is no longer needed
781 * @work: pointer to the release_work member of the request queue to be released
784 * blk_release_queue is the counterpart of blk_init_queue(). It should be
785 * called when a request queue is being released; typically when a block
786 * device is being de-registered. Its primary task it to free the queue
790 * The low level driver must have finished any outstanding requests first
791 * via blk_cleanup_queue().
793 * Although blk_release_queue() may be called with preemption disabled,
794 * __blk_release_queue() may sleep.
796 static void __blk_release_queue(struct work_struct
*work
)
798 struct request_queue
*q
= container_of(work
, typeof(*q
), release_work
);
800 if (test_bit(QUEUE_FLAG_POLL_STATS
, &q
->queue_flags
))
801 blk_stat_remove_callback(q
, q
->poll_cb
);
802 blk_stat_free_callback(q
->poll_cb
);
803 bdi_put(q
->backing_dev_info
);
808 elevator_exit(q
, q
->elevator
);
811 blk_free_queue_stats(q
->stats
);
813 blk_exit_rl(q
, &q
->root_rl
);
816 __blk_queue_free_tags(q
);
820 q
->exit_rq_fn(q
, q
->fq
->flush_rq
);
821 blk_free_flush_queue(q
->fq
);
826 blk_trace_shutdown(q
);
829 blk_mq_debugfs_unregister(q
);
832 bioset_free(q
->bio_split
);
834 ida_simple_remove(&blk_queue_ida
, q
->id
);
835 call_rcu(&q
->rcu_head
, blk_free_queue_rcu
);
838 static void blk_release_queue(struct kobject
*kobj
)
840 struct request_queue
*q
=
841 container_of(kobj
, struct request_queue
, kobj
);
843 INIT_WORK(&q
->release_work
, __blk_release_queue
);
844 schedule_work(&q
->release_work
);
847 static const struct sysfs_ops queue_sysfs_ops
= {
848 .show
= queue_attr_show
,
849 .store
= queue_attr_store
,
852 struct kobj_type blk_queue_ktype
= {
853 .sysfs_ops
= &queue_sysfs_ops
,
854 .default_attrs
= default_attrs
,
855 .release
= blk_release_queue
,
858 int blk_register_queue(struct gendisk
*disk
)
861 struct device
*dev
= disk_to_dev(disk
);
862 struct request_queue
*q
= disk
->queue
;
867 WARN_ONCE(test_bit(QUEUE_FLAG_REGISTERED
, &q
->queue_flags
),
868 "%s is registering an already registered queue\n",
869 kobject_name(&dev
->kobj
));
870 queue_flag_set_unlocked(QUEUE_FLAG_REGISTERED
, q
);
873 * SCSI probing may synchronously create and destroy a lot of
874 * request_queues for non-existent devices. Shutting down a fully
875 * functional queue takes measureable wallclock time as RCU grace
876 * periods are involved. To avoid excessive latency in these
877 * cases, a request_queue starts out in a degraded mode which is
878 * faster to shut down and is made fully functional here as
879 * request_queues for non-existent devices never get registered.
881 if (!blk_queue_init_done(q
)) {
882 queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE
, q
);
883 percpu_ref_switch_to_percpu(&q
->q_usage_counter
);
884 blk_queue_bypass_end(q
);
887 ret
= blk_trace_init_sysfs(dev
);
891 /* Prevent changes through sysfs until registration is completed. */
892 mutex_lock(&q
->sysfs_lock
);
894 ret
= kobject_add(&q
->kobj
, kobject_get(&dev
->kobj
), "%s", "queue");
896 blk_trace_remove_sysfs(dev
);
901 __blk_mq_register_dev(dev
, q
);
902 blk_mq_debugfs_register(q
);
905 kobject_uevent(&q
->kobj
, KOBJ_ADD
);
907 wbt_enable_default(q
);
909 blk_throtl_register_queue(q
);
911 if (q
->request_fn
|| (q
->mq_ops
&& q
->elevator
)) {
912 ret
= elv_register_queue(q
);
914 kobject_uevent(&q
->kobj
, KOBJ_REMOVE
);
915 kobject_del(&q
->kobj
);
916 blk_trace_remove_sysfs(dev
);
917 kobject_put(&dev
->kobj
);
923 mutex_unlock(&q
->sysfs_lock
);
927 void blk_unregister_queue(struct gendisk
*disk
)
929 struct request_queue
*q
= disk
->queue
;
934 mutex_lock(&q
->sysfs_lock
);
935 queue_flag_clear_unlocked(QUEUE_FLAG_REGISTERED
, q
);
936 mutex_unlock(&q
->sysfs_lock
);
942 blk_mq_unregister_dev(disk_to_dev(disk
), q
);
944 if (q
->request_fn
|| (q
->mq_ops
&& q
->elevator
))
945 elv_unregister_queue(q
);
947 kobject_uevent(&q
->kobj
, KOBJ_REMOVE
);
948 kobject_del(&q
->kobj
);
949 blk_trace_remove_sysfs(disk_to_dev(disk
));
950 kobject_put(&disk_to_dev(disk
)->kobj
);