2 * Functions related to sysfs handling
4 #include <linux/kernel.h>
5 #include <linux/slab.h>
6 #include <linux/module.h>
8 #include <linux/blkdev.h>
9 #include <linux/blktrace_api.h>
12 #include "blk-cgroup.h"
14 struct queue_sysfs_entry
{
15 struct attribute attr
;
16 ssize_t (*show
)(struct request_queue
*, char *);
17 ssize_t (*store
)(struct request_queue
*, const char *, size_t);
21 queue_var_show(unsigned long var
, char *page
)
23 return sprintf(page
, "%lu\n", var
);
27 queue_var_store(unsigned long *var
, const char *page
, size_t count
)
29 char *p
= (char *) page
;
31 *var
= simple_strtoul(p
, &p
, 10);
35 static ssize_t
queue_requests_show(struct request_queue
*q
, char *page
)
37 return queue_var_show(q
->nr_requests
, (page
));
41 queue_requests_store(struct request_queue
*q
, const char *page
, size_t count
)
43 struct request_list
*rl
= &q
->rq
;
50 ret
= queue_var_store(&nr
, page
, count
);
51 if (nr
< BLKDEV_MIN_RQ
)
54 spin_lock_irq(q
->queue_lock
);
56 blk_queue_congestion_threshold(q
);
58 if (rl
->count
[BLK_RW_SYNC
] >= queue_congestion_on_threshold(q
))
59 blk_set_queue_congested(q
, BLK_RW_SYNC
);
60 else if (rl
->count
[BLK_RW_SYNC
] < queue_congestion_off_threshold(q
))
61 blk_clear_queue_congested(q
, BLK_RW_SYNC
);
63 if (rl
->count
[BLK_RW_ASYNC
] >= queue_congestion_on_threshold(q
))
64 blk_set_queue_congested(q
, BLK_RW_ASYNC
);
65 else if (rl
->count
[BLK_RW_ASYNC
] < queue_congestion_off_threshold(q
))
66 blk_clear_queue_congested(q
, BLK_RW_ASYNC
);
68 if (rl
->count
[BLK_RW_SYNC
] >= q
->nr_requests
) {
69 blk_set_queue_full(q
, BLK_RW_SYNC
);
71 blk_clear_queue_full(q
, BLK_RW_SYNC
);
72 wake_up(&rl
->wait
[BLK_RW_SYNC
]);
75 if (rl
->count
[BLK_RW_ASYNC
] >= q
->nr_requests
) {
76 blk_set_queue_full(q
, BLK_RW_ASYNC
);
78 blk_clear_queue_full(q
, BLK_RW_ASYNC
);
79 wake_up(&rl
->wait
[BLK_RW_ASYNC
]);
81 spin_unlock_irq(q
->queue_lock
);
85 static ssize_t
queue_ra_show(struct request_queue
*q
, char *page
)
87 unsigned long ra_kb
= q
->backing_dev_info
.ra_pages
<<
88 (PAGE_CACHE_SHIFT
- 10);
90 return queue_var_show(ra_kb
, (page
));
94 queue_ra_store(struct request_queue
*q
, const char *page
, size_t count
)
97 ssize_t ret
= queue_var_store(&ra_kb
, page
, count
);
99 q
->backing_dev_info
.ra_pages
= ra_kb
>> (PAGE_CACHE_SHIFT
- 10);
104 static ssize_t
queue_max_sectors_show(struct request_queue
*q
, char *page
)
106 int max_sectors_kb
= queue_max_sectors(q
) >> 1;
108 return queue_var_show(max_sectors_kb
, (page
));
111 static ssize_t
queue_max_segments_show(struct request_queue
*q
, char *page
)
113 return queue_var_show(queue_max_segments(q
), (page
));
116 static ssize_t
queue_max_integrity_segments_show(struct request_queue
*q
, char *page
)
118 return queue_var_show(q
->limits
.max_integrity_segments
, (page
));
121 static ssize_t
queue_max_segment_size_show(struct request_queue
*q
, char *page
)
123 if (blk_queue_cluster(q
))
124 return queue_var_show(queue_max_segment_size(q
), (page
));
126 return queue_var_show(PAGE_CACHE_SIZE
, (page
));
129 static ssize_t
queue_logical_block_size_show(struct request_queue
*q
, char *page
)
131 return queue_var_show(queue_logical_block_size(q
), page
);
134 static ssize_t
queue_physical_block_size_show(struct request_queue
*q
, char *page
)
136 return queue_var_show(queue_physical_block_size(q
), page
);
139 static ssize_t
queue_io_min_show(struct request_queue
*q
, char *page
)
141 return queue_var_show(queue_io_min(q
), page
);
144 static ssize_t
queue_io_opt_show(struct request_queue
*q
, char *page
)
146 return queue_var_show(queue_io_opt(q
), page
);
149 static ssize_t
queue_discard_granularity_show(struct request_queue
*q
, char *page
)
151 return queue_var_show(q
->limits
.discard_granularity
, page
);
154 static ssize_t
queue_discard_max_show(struct request_queue
*q
, char *page
)
156 return sprintf(page
, "%llu\n",
157 (unsigned long long)q
->limits
.max_discard_sectors
<< 9);
160 static ssize_t
queue_discard_zeroes_data_show(struct request_queue
*q
, char *page
)
162 return queue_var_show(queue_discard_zeroes_data(q
), page
);
166 queue_max_sectors_store(struct request_queue
*q
, const char *page
, size_t count
)
168 unsigned long max_sectors_kb
,
169 max_hw_sectors_kb
= queue_max_hw_sectors(q
) >> 1,
170 page_kb
= 1 << (PAGE_CACHE_SHIFT
- 10);
171 ssize_t ret
= queue_var_store(&max_sectors_kb
, page
, count
);
173 if (max_sectors_kb
> max_hw_sectors_kb
|| max_sectors_kb
< page_kb
)
176 spin_lock_irq(q
->queue_lock
);
177 q
->limits
.max_sectors
= max_sectors_kb
<< 1;
178 spin_unlock_irq(q
->queue_lock
);
183 static ssize_t
queue_max_hw_sectors_show(struct request_queue
*q
, char *page
)
185 int max_hw_sectors_kb
= queue_max_hw_sectors(q
) >> 1;
187 return queue_var_show(max_hw_sectors_kb
, (page
));
190 #define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \
192 queue_show_##name(struct request_queue *q, char *page) \
195 bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \
196 return queue_var_show(neg ? !bit : bit, page); \
199 queue_store_##name(struct request_queue *q, const char *page, size_t count) \
203 ret = queue_var_store(&val, page, count); \
207 spin_lock_irq(q->queue_lock); \
209 queue_flag_set(QUEUE_FLAG_##flag, q); \
211 queue_flag_clear(QUEUE_FLAG_##flag, q); \
212 spin_unlock_irq(q->queue_lock); \
216 QUEUE_SYSFS_BIT_FNS(nonrot
, NONROT
, 1);
217 QUEUE_SYSFS_BIT_FNS(random
, ADD_RANDOM
, 0);
218 QUEUE_SYSFS_BIT_FNS(iostats
, IO_STAT
, 0);
219 #undef QUEUE_SYSFS_BIT_FNS
221 static ssize_t
queue_nomerges_show(struct request_queue
*q
, char *page
)
223 return queue_var_show((blk_queue_nomerges(q
) << 1) |
224 blk_queue_noxmerges(q
), page
);
227 static ssize_t
queue_nomerges_store(struct request_queue
*q
, const char *page
,
231 ssize_t ret
= queue_var_store(&nm
, page
, count
);
233 spin_lock_irq(q
->queue_lock
);
234 queue_flag_clear(QUEUE_FLAG_NOMERGES
, q
);
235 queue_flag_clear(QUEUE_FLAG_NOXMERGES
, q
);
237 queue_flag_set(QUEUE_FLAG_NOMERGES
, q
);
239 queue_flag_set(QUEUE_FLAG_NOXMERGES
, q
);
240 spin_unlock_irq(q
->queue_lock
);
245 static ssize_t
queue_rq_affinity_show(struct request_queue
*q
, char *page
)
247 bool set
= test_bit(QUEUE_FLAG_SAME_COMP
, &q
->queue_flags
);
248 bool force
= test_bit(QUEUE_FLAG_SAME_FORCE
, &q
->queue_flags
);
250 return queue_var_show(set
<< force
, page
);
254 queue_rq_affinity_store(struct request_queue
*q
, const char *page
, size_t count
)
256 ssize_t ret
= -EINVAL
;
257 #if defined(CONFIG_USE_GENERIC_SMP_HELPERS)
260 ret
= queue_var_store(&val
, page
, count
);
261 spin_lock_irq(q
->queue_lock
);
263 queue_flag_set(QUEUE_FLAG_SAME_COMP
, q
);
264 queue_flag_set(QUEUE_FLAG_SAME_FORCE
, q
);
265 } else if (val
== 1) {
266 queue_flag_set(QUEUE_FLAG_SAME_COMP
, q
);
267 queue_flag_clear(QUEUE_FLAG_SAME_FORCE
, q
);
268 } else if (val
== 0) {
269 queue_flag_clear(QUEUE_FLAG_SAME_COMP
, q
);
270 queue_flag_clear(QUEUE_FLAG_SAME_FORCE
, q
);
272 spin_unlock_irq(q
->queue_lock
);
277 static struct queue_sysfs_entry queue_requests_entry
= {
278 .attr
= {.name
= "nr_requests", .mode
= S_IRUGO
| S_IWUSR
},
279 .show
= queue_requests_show
,
280 .store
= queue_requests_store
,
283 static struct queue_sysfs_entry queue_ra_entry
= {
284 .attr
= {.name
= "read_ahead_kb", .mode
= S_IRUGO
| S_IWUSR
},
285 .show
= queue_ra_show
,
286 .store
= queue_ra_store
,
289 static struct queue_sysfs_entry queue_max_sectors_entry
= {
290 .attr
= {.name
= "max_sectors_kb", .mode
= S_IRUGO
| S_IWUSR
},
291 .show
= queue_max_sectors_show
,
292 .store
= queue_max_sectors_store
,
295 static struct queue_sysfs_entry queue_max_hw_sectors_entry
= {
296 .attr
= {.name
= "max_hw_sectors_kb", .mode
= S_IRUGO
},
297 .show
= queue_max_hw_sectors_show
,
300 static struct queue_sysfs_entry queue_max_segments_entry
= {
301 .attr
= {.name
= "max_segments", .mode
= S_IRUGO
},
302 .show
= queue_max_segments_show
,
305 static struct queue_sysfs_entry queue_max_integrity_segments_entry
= {
306 .attr
= {.name
= "max_integrity_segments", .mode
= S_IRUGO
},
307 .show
= queue_max_integrity_segments_show
,
310 static struct queue_sysfs_entry queue_max_segment_size_entry
= {
311 .attr
= {.name
= "max_segment_size", .mode
= S_IRUGO
},
312 .show
= queue_max_segment_size_show
,
315 static struct queue_sysfs_entry queue_iosched_entry
= {
316 .attr
= {.name
= "scheduler", .mode
= S_IRUGO
| S_IWUSR
},
317 .show
= elv_iosched_show
,
318 .store
= elv_iosched_store
,
321 static struct queue_sysfs_entry queue_hw_sector_size_entry
= {
322 .attr
= {.name
= "hw_sector_size", .mode
= S_IRUGO
},
323 .show
= queue_logical_block_size_show
,
326 static struct queue_sysfs_entry queue_logical_block_size_entry
= {
327 .attr
= {.name
= "logical_block_size", .mode
= S_IRUGO
},
328 .show
= queue_logical_block_size_show
,
331 static struct queue_sysfs_entry queue_physical_block_size_entry
= {
332 .attr
= {.name
= "physical_block_size", .mode
= S_IRUGO
},
333 .show
= queue_physical_block_size_show
,
336 static struct queue_sysfs_entry queue_io_min_entry
= {
337 .attr
= {.name
= "minimum_io_size", .mode
= S_IRUGO
},
338 .show
= queue_io_min_show
,
341 static struct queue_sysfs_entry queue_io_opt_entry
= {
342 .attr
= {.name
= "optimal_io_size", .mode
= S_IRUGO
},
343 .show
= queue_io_opt_show
,
346 static struct queue_sysfs_entry queue_discard_granularity_entry
= {
347 .attr
= {.name
= "discard_granularity", .mode
= S_IRUGO
},
348 .show
= queue_discard_granularity_show
,
351 static struct queue_sysfs_entry queue_discard_max_entry
= {
352 .attr
= {.name
= "discard_max_bytes", .mode
= S_IRUGO
},
353 .show
= queue_discard_max_show
,
356 static struct queue_sysfs_entry queue_discard_zeroes_data_entry
= {
357 .attr
= {.name
= "discard_zeroes_data", .mode
= S_IRUGO
},
358 .show
= queue_discard_zeroes_data_show
,
361 static struct queue_sysfs_entry queue_nonrot_entry
= {
362 .attr
= {.name
= "rotational", .mode
= S_IRUGO
| S_IWUSR
},
363 .show
= queue_show_nonrot
,
364 .store
= queue_store_nonrot
,
367 static struct queue_sysfs_entry queue_nomerges_entry
= {
368 .attr
= {.name
= "nomerges", .mode
= S_IRUGO
| S_IWUSR
},
369 .show
= queue_nomerges_show
,
370 .store
= queue_nomerges_store
,
373 static struct queue_sysfs_entry queue_rq_affinity_entry
= {
374 .attr
= {.name
= "rq_affinity", .mode
= S_IRUGO
| S_IWUSR
},
375 .show
= queue_rq_affinity_show
,
376 .store
= queue_rq_affinity_store
,
379 static struct queue_sysfs_entry queue_iostats_entry
= {
380 .attr
= {.name
= "iostats", .mode
= S_IRUGO
| S_IWUSR
},
381 .show
= queue_show_iostats
,
382 .store
= queue_store_iostats
,
385 static struct queue_sysfs_entry queue_random_entry
= {
386 .attr
= {.name
= "add_random", .mode
= S_IRUGO
| S_IWUSR
},
387 .show
= queue_show_random
,
388 .store
= queue_store_random
,
391 static struct attribute
*default_attrs
[] = {
392 &queue_requests_entry
.attr
,
393 &queue_ra_entry
.attr
,
394 &queue_max_hw_sectors_entry
.attr
,
395 &queue_max_sectors_entry
.attr
,
396 &queue_max_segments_entry
.attr
,
397 &queue_max_integrity_segments_entry
.attr
,
398 &queue_max_segment_size_entry
.attr
,
399 &queue_iosched_entry
.attr
,
400 &queue_hw_sector_size_entry
.attr
,
401 &queue_logical_block_size_entry
.attr
,
402 &queue_physical_block_size_entry
.attr
,
403 &queue_io_min_entry
.attr
,
404 &queue_io_opt_entry
.attr
,
405 &queue_discard_granularity_entry
.attr
,
406 &queue_discard_max_entry
.attr
,
407 &queue_discard_zeroes_data_entry
.attr
,
408 &queue_nonrot_entry
.attr
,
409 &queue_nomerges_entry
.attr
,
410 &queue_rq_affinity_entry
.attr
,
411 &queue_iostats_entry
.attr
,
412 &queue_random_entry
.attr
,
416 #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
419 queue_attr_show(struct kobject
*kobj
, struct attribute
*attr
, char *page
)
421 struct queue_sysfs_entry
*entry
= to_queue(attr
);
422 struct request_queue
*q
=
423 container_of(kobj
, struct request_queue
, kobj
);
428 mutex_lock(&q
->sysfs_lock
);
429 if (blk_queue_dead(q
)) {
430 mutex_unlock(&q
->sysfs_lock
);
433 res
= entry
->show(q
, page
);
434 mutex_unlock(&q
->sysfs_lock
);
439 queue_attr_store(struct kobject
*kobj
, struct attribute
*attr
,
440 const char *page
, size_t length
)
442 struct queue_sysfs_entry
*entry
= to_queue(attr
);
443 struct request_queue
*q
;
449 q
= container_of(kobj
, struct request_queue
, kobj
);
450 mutex_lock(&q
->sysfs_lock
);
451 if (blk_queue_dead(q
)) {
452 mutex_unlock(&q
->sysfs_lock
);
455 res
= entry
->store(q
, page
, length
);
456 mutex_unlock(&q
->sysfs_lock
);
461 * blk_release_queue: - release a &struct request_queue when it is no longer needed
462 * @kobj: the kobj belonging to the request queue to be released
465 * blk_release_queue is the pair to blk_init_queue() or
466 * blk_queue_make_request(). It should be called when a request queue is
467 * being released; typically when a block device is being de-registered.
468 * Currently, its primary task it to free all the &struct request
469 * structures that were allocated to the queue and the queue itself.
472 * Hopefully the low level driver will have finished any
473 * outstanding requests first...
475 static void blk_release_queue(struct kobject
*kobj
)
477 struct request_queue
*q
=
478 container_of(kobj
, struct request_queue
, kobj
);
479 struct request_list
*rl
= &q
->rq
;
486 spin_lock_irq(q
->queue_lock
);
488 spin_unlock_irq(q
->queue_lock
);
489 elevator_exit(q
->elevator
);
493 mempool_destroy(rl
->rq_pool
);
496 __blk_queue_free_tags(q
);
498 blk_trace_shutdown(q
);
500 bdi_destroy(&q
->backing_dev_info
);
502 ida_simple_remove(&blk_queue_ida
, q
->id
);
503 kmem_cache_free(blk_requestq_cachep
, q
);
506 static const struct sysfs_ops queue_sysfs_ops
= {
507 .show
= queue_attr_show
,
508 .store
= queue_attr_store
,
511 struct kobj_type blk_queue_ktype
= {
512 .sysfs_ops
= &queue_sysfs_ops
,
513 .default_attrs
= default_attrs
,
514 .release
= blk_release_queue
,
517 int blk_register_queue(struct gendisk
*disk
)
520 struct device
*dev
= disk_to_dev(disk
);
521 struct request_queue
*q
= disk
->queue
;
526 ret
= blk_trace_init_sysfs(dev
);
530 ret
= kobject_add(&q
->kobj
, kobject_get(&dev
->kobj
), "%s", "queue");
532 blk_trace_remove_sysfs(dev
);
536 kobject_uevent(&q
->kobj
, KOBJ_ADD
);
541 ret
= elv_register_queue(q
);
543 kobject_uevent(&q
->kobj
, KOBJ_REMOVE
);
544 kobject_del(&q
->kobj
);
545 blk_trace_remove_sysfs(dev
);
546 kobject_put(&dev
->kobj
);
553 void blk_unregister_queue(struct gendisk
*disk
)
555 struct request_queue
*q
= disk
->queue
;
561 elv_unregister_queue(q
);
563 kobject_uevent(&q
->kobj
, KOBJ_REMOVE
);
564 kobject_del(&q
->kobj
);
565 blk_trace_remove_sysfs(disk_to_dev(disk
));
566 kobject_put(&disk_to_dev(disk
)->kobj
);