1 // SPDX-License-Identifier: GPL-2.0
3 * bcache sysfs interfaces
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
13 #include "writeback.h"
16 #include <linux/blkdev.h>
17 #include <linux/sort.h>
18 #include <linux/sched/clock.h>
20 extern bool bcache_is_reboot
;
22 /* Default is 0 ("writethrough") */
23 static const char * const bch_cache_modes
[] = {
31 static const char * const bch_reada_cache_policies
[] = {
37 /* Default is 0 ("auto") */
38 static const char * const bch_stop_on_failure_modes
[] = {
44 static const char * const cache_replacement_policies
[] = {
51 static const char * const error_actions
[] = {
57 write_attribute(attach
);
58 write_attribute(detach
);
59 write_attribute(unregister
);
60 write_attribute(stop
);
61 write_attribute(clear_stats
);
62 write_attribute(trigger_gc
);
63 write_attribute(prune_cache
);
64 write_attribute(flash_vol_create
);
66 read_attribute(bucket_size
);
67 read_attribute(block_size
);
68 read_attribute(nbuckets
);
69 read_attribute(tree_depth
);
70 read_attribute(root_usage_percent
);
71 read_attribute(priority_stats
);
72 read_attribute(btree_cache_size
);
73 read_attribute(btree_cache_max_chain
);
74 read_attribute(cache_available_percent
);
75 read_attribute(written
);
76 read_attribute(btree_written
);
77 read_attribute(metadata_written
);
78 read_attribute(active_journal_entries
);
79 read_attribute(backing_dev_name
);
80 read_attribute(backing_dev_uuid
);
82 sysfs_time_stats_attribute(btree_gc
, sec
, ms
);
83 sysfs_time_stats_attribute(btree_split
, sec
, us
);
84 sysfs_time_stats_attribute(btree_sort
, ms
, us
);
85 sysfs_time_stats_attribute(btree_read
, ms
, us
);
87 read_attribute(btree_nodes
);
88 read_attribute(btree_used_percent
);
89 read_attribute(average_key_size
);
90 read_attribute(dirty_data
);
91 read_attribute(bset_tree_stats
);
92 read_attribute(feature_compat
);
93 read_attribute(feature_ro_compat
);
94 read_attribute(feature_incompat
);
96 read_attribute(state
);
97 read_attribute(cache_read_races
);
98 read_attribute(reclaim
);
99 read_attribute(reclaimed_journal_buckets
);
100 read_attribute(flush_write
);
101 read_attribute(writeback_keys_done
);
102 read_attribute(writeback_keys_failed
);
103 read_attribute(io_errors
);
104 read_attribute(congested
);
105 read_attribute(cutoff_writeback
);
106 read_attribute(cutoff_writeback_sync
);
107 rw_attribute(congested_read_threshold_us
);
108 rw_attribute(congested_write_threshold_us
);
110 rw_attribute(sequential_cutoff
);
111 rw_attribute(data_csum
);
112 rw_attribute(cache_mode
);
113 rw_attribute(readahead_cache_policy
);
114 rw_attribute(stop_when_cache_set_failed
);
115 rw_attribute(writeback_metadata
);
116 rw_attribute(writeback_running
);
117 rw_attribute(writeback_percent
);
118 rw_attribute(writeback_delay
);
119 rw_attribute(writeback_rate
);
121 rw_attribute(writeback_rate_update_seconds
);
122 rw_attribute(writeback_rate_i_term_inverse
);
123 rw_attribute(writeback_rate_p_term_inverse
);
124 rw_attribute(writeback_rate_minimum
);
125 read_attribute(writeback_rate_debug
);
127 read_attribute(stripe_size
);
128 read_attribute(partial_stripes_expensive
);
130 rw_attribute(synchronous
);
131 rw_attribute(journal_delay_ms
);
132 rw_attribute(io_disable
);
133 rw_attribute(discard
);
134 rw_attribute(running
);
136 rw_attribute(readahead
);
137 rw_attribute(errors
);
138 rw_attribute(io_error_limit
);
139 rw_attribute(io_error_halflife
);
140 rw_attribute(verify
);
141 rw_attribute(bypass_torture_test
);
142 rw_attribute(key_merging_disabled
);
143 rw_attribute(gc_always_rewrite
);
144 rw_attribute(expensive_debug_checks
);
145 rw_attribute(cache_replacement_policy
);
146 rw_attribute(btree_shrinker_disabled
);
147 rw_attribute(copy_gc_enabled
);
148 rw_attribute(idle_max_writeback_rate
);
149 rw_attribute(gc_after_writeback
);
152 static ssize_t
bch_snprint_string_list(char *buf
,
154 const char * const list
[],
160 for (i
= 0; list
[i
]; i
++)
161 out
+= scnprintf(out
, buf
+ size
- out
,
162 i
== selected
? "[%s] " : "%s ", list
[i
]);
168 SHOW(__bch_cached_dev
)
170 struct cached_dev
*dc
= container_of(kobj
, struct cached_dev
,
172 char const *states
[] = { "no cache", "clean", "dirty", "inconsistent" };
173 int wb
= dc
->writeback_running
;
175 #define var(stat) (dc->stat)
177 if (attr
== &sysfs_cache_mode
)
178 return bch_snprint_string_list(buf
, PAGE_SIZE
,
180 BDEV_CACHE_MODE(&dc
->sb
));
182 if (attr
== &sysfs_readahead_cache_policy
)
183 return bch_snprint_string_list(buf
, PAGE_SIZE
,
184 bch_reada_cache_policies
,
185 dc
->cache_readahead_policy
);
187 if (attr
== &sysfs_stop_when_cache_set_failed
)
188 return bch_snprint_string_list(buf
, PAGE_SIZE
,
189 bch_stop_on_failure_modes
,
190 dc
->stop_when_cache_set_failed
);
193 sysfs_printf(data_csum
, "%i", dc
->disk
.data_csum
);
194 var_printf(verify
, "%i");
195 var_printf(bypass_torture_test
, "%i");
196 var_printf(writeback_metadata
, "%i");
197 var_printf(writeback_running
, "%i");
198 var_print(writeback_delay
);
199 var_print(writeback_percent
);
200 sysfs_hprint(writeback_rate
,
201 wb
? atomic_long_read(&dc
->writeback_rate
.rate
) << 9 : 0);
202 sysfs_printf(io_errors
, "%i", atomic_read(&dc
->io_errors
));
203 sysfs_printf(io_error_limit
, "%i", dc
->error_limit
);
204 sysfs_printf(io_disable
, "%i", dc
->io_disable
);
205 var_print(writeback_rate_update_seconds
);
206 var_print(writeback_rate_i_term_inverse
);
207 var_print(writeback_rate_p_term_inverse
);
208 var_print(writeback_rate_minimum
);
210 if (attr
== &sysfs_writeback_rate_debug
) {
214 char proportional
[20];
220 * Except for dirty and target, other values should
221 * be 0 if writeback is not running.
224 wb
? atomic_long_read(&dc
->writeback_rate
.rate
) << 9
226 bch_hprint(dirty
, bcache_dev_sectors_dirty(&dc
->disk
) << 9);
227 bch_hprint(target
, dc
->writeback_rate_target
<< 9);
228 bch_hprint(proportional
,
229 wb
? dc
->writeback_rate_proportional
<< 9 : 0);
231 wb
? dc
->writeback_rate_integral_scaled
<< 9 : 0);
232 bch_hprint(change
, wb
? dc
->writeback_rate_change
<< 9 : 0);
233 next_io
= wb
? div64_s64(dc
->writeback_rate
.next
-local_clock(),
240 "proportional:\t%s\n"
242 "change:\t\t%s/sec\n"
243 "next io:\t%llims\n",
244 rate
, dirty
, target
, proportional
,
245 integral
, change
, next_io
);
248 sysfs_hprint(dirty_data
,
249 bcache_dev_sectors_dirty(&dc
->disk
) << 9);
251 sysfs_hprint(stripe_size
, ((uint64_t)dc
->disk
.stripe_size
) << 9);
252 var_printf(partial_stripes_expensive
, "%u");
254 var_hprint(sequential_cutoff
);
255 var_hprint(readahead
);
257 sysfs_print(running
, atomic_read(&dc
->running
));
258 sysfs_print(state
, states
[BDEV_STATE(&dc
->sb
)]);
260 if (attr
== &sysfs_label
) {
261 memcpy(buf
, dc
->sb
.label
, SB_LABEL_SIZE
);
262 buf
[SB_LABEL_SIZE
+ 1] = '\0';
267 if (attr
== &sysfs_backing_dev_name
) {
268 snprintf(buf
, BDEVNAME_SIZE
+ 1, "%s", dc
->backing_dev_name
);
273 if (attr
== &sysfs_backing_dev_uuid
) {
274 /* convert binary uuid into 36-byte string plus '\0' */
275 snprintf(buf
, 36+1, "%pU", dc
->sb
.uuid
);
283 SHOW_LOCKED(bch_cached_dev
)
287 struct cached_dev
*dc
= container_of(kobj
, struct cached_dev
,
291 struct kobj_uevent_env
*env
;
293 /* no user space access if system is rebooting */
294 if (bcache_is_reboot
)
297 #define d_strtoul(var) sysfs_strtoul(var, dc->var)
298 #define d_strtoul_nonzero(var) sysfs_strtoul_clamp(var, dc->var, 1, INT_MAX)
299 #define d_strtoi_h(var) sysfs_hatoi(var, dc->var)
301 sysfs_strtoul(data_csum
, dc
->disk
.data_csum
);
303 sysfs_strtoul_bool(bypass_torture_test
, dc
->bypass_torture_test
);
304 sysfs_strtoul_bool(writeback_metadata
, dc
->writeback_metadata
);
305 sysfs_strtoul_bool(writeback_running
, dc
->writeback_running
);
306 sysfs_strtoul_clamp(writeback_delay
, dc
->writeback_delay
, 0, UINT_MAX
);
308 sysfs_strtoul_clamp(writeback_percent
, dc
->writeback_percent
,
309 0, bch_cutoff_writeback
);
311 if (attr
== &sysfs_writeback_rate
) {
313 long int v
= atomic_long_read(&dc
->writeback_rate
.rate
);
315 ret
= strtoul_safe_clamp(buf
, v
, 1, INT_MAX
);
318 atomic_long_set(&dc
->writeback_rate
.rate
, v
);
325 sysfs_strtoul_clamp(writeback_rate_update_seconds
,
326 dc
->writeback_rate_update_seconds
,
327 1, WRITEBACK_RATE_UPDATE_SECS_MAX
);
328 sysfs_strtoul_clamp(writeback_rate_i_term_inverse
,
329 dc
->writeback_rate_i_term_inverse
,
331 sysfs_strtoul_clamp(writeback_rate_p_term_inverse
,
332 dc
->writeback_rate_p_term_inverse
,
334 sysfs_strtoul_clamp(writeback_rate_minimum
,
335 dc
->writeback_rate_minimum
,
338 sysfs_strtoul_clamp(io_error_limit
, dc
->error_limit
, 0, INT_MAX
);
340 if (attr
== &sysfs_io_disable
) {
341 int v
= strtoul_or_return(buf
);
343 dc
->io_disable
= v
? 1 : 0;
346 sysfs_strtoul_clamp(sequential_cutoff
,
347 dc
->sequential_cutoff
,
349 d_strtoi_h(readahead
);
351 if (attr
== &sysfs_clear_stats
)
352 bch_cache_accounting_clear(&dc
->accounting
);
354 if (attr
== &sysfs_running
&&
355 strtoul_or_return(buf
)) {
356 v
= bch_cached_dev_run(dc
);
361 if (attr
== &sysfs_cache_mode
) {
362 v
= __sysfs_match_string(bch_cache_modes
, -1, buf
);
366 if ((unsigned int) v
!= BDEV_CACHE_MODE(&dc
->sb
)) {
367 SET_BDEV_CACHE_MODE(&dc
->sb
, v
);
368 bch_write_bdev_super(dc
, NULL
);
372 if (attr
== &sysfs_readahead_cache_policy
) {
373 v
= __sysfs_match_string(bch_reada_cache_policies
, -1, buf
);
377 if ((unsigned int) v
!= dc
->cache_readahead_policy
)
378 dc
->cache_readahead_policy
= v
;
381 if (attr
== &sysfs_stop_when_cache_set_failed
) {
382 v
= __sysfs_match_string(bch_stop_on_failure_modes
, -1, buf
);
386 dc
->stop_when_cache_set_failed
= v
;
389 if (attr
== &sysfs_label
) {
390 if (size
> SB_LABEL_SIZE
)
392 memcpy(dc
->sb
.label
, buf
, size
);
393 if (size
< SB_LABEL_SIZE
)
394 dc
->sb
.label
[size
] = '\0';
395 if (size
&& dc
->sb
.label
[size
- 1] == '\n')
396 dc
->sb
.label
[size
- 1] = '\0';
397 bch_write_bdev_super(dc
, NULL
);
399 memcpy(dc
->disk
.c
->uuids
[dc
->disk
.id
].label
,
401 bch_uuid_write(dc
->disk
.c
);
403 env
= kzalloc(sizeof(struct kobj_uevent_env
), GFP_KERNEL
);
406 add_uevent_var(env
, "DRIVER=bcache");
407 add_uevent_var(env
, "CACHED_UUID=%pU", dc
->sb
.uuid
);
408 add_uevent_var(env
, "CACHED_LABEL=%s", buf
);
409 kobject_uevent_env(&disk_to_dev(dc
->disk
.disk
)->kobj
,
415 if (attr
== &sysfs_attach
) {
416 uint8_t set_uuid
[16];
418 if (bch_parse_uuid(buf
, set_uuid
) < 16)
422 list_for_each_entry(c
, &bch_cache_sets
, list
) {
423 v
= bch_cached_dev_attach(dc
, c
, set_uuid
);
428 pr_err("Can't attach %s: cache set not found\n", buf
);
432 if (attr
== &sysfs_detach
&& dc
->disk
.c
)
433 bch_cached_dev_detach(dc
);
435 if (attr
== &sysfs_stop
)
436 bcache_device_stop(&dc
->disk
);
441 STORE(bch_cached_dev
)
443 struct cached_dev
*dc
= container_of(kobj
, struct cached_dev
,
446 /* no user space access if system is rebooting */
447 if (bcache_is_reboot
)
450 mutex_lock(&bch_register_lock
);
451 size
= __cached_dev_store(kobj
, attr
, buf
, size
);
453 if (attr
== &sysfs_writeback_running
) {
454 /* dc->writeback_running changed in __cached_dev_store() */
455 if (IS_ERR_OR_NULL(dc
->writeback_thread
)) {
457 * reject setting it to 1 via sysfs if writeback
458 * kthread is not created yet.
460 if (dc
->writeback_running
) {
461 dc
->writeback_running
= false;
462 pr_err("%s: failed to run non-existent writeback thread\n",
463 dc
->disk
.disk
->disk_name
);
467 * writeback kthread will check if dc->writeback_running
470 bch_writeback_queue(dc
);
474 * Only set BCACHE_DEV_WB_RUNNING when cached device attached to
475 * a cache set, otherwise it doesn't make sense.
477 if (attr
== &sysfs_writeback_percent
)
478 if ((dc
->disk
.c
!= NULL
) &&
479 (!test_and_set_bit(BCACHE_DEV_WB_RUNNING
, &dc
->disk
.flags
)))
480 schedule_delayed_work(&dc
->writeback_rate_update
,
481 dc
->writeback_rate_update_seconds
* HZ
);
483 mutex_unlock(&bch_register_lock
);
487 static struct attribute
*bch_cached_dev_files
[] = {
495 &sysfs_readahead_cache_policy
,
496 &sysfs_stop_when_cache_set_failed
,
497 &sysfs_writeback_metadata
,
498 &sysfs_writeback_running
,
499 &sysfs_writeback_delay
,
500 &sysfs_writeback_percent
,
501 &sysfs_writeback_rate
,
502 &sysfs_writeback_rate_update_seconds
,
503 &sysfs_writeback_rate_i_term_inverse
,
504 &sysfs_writeback_rate_p_term_inverse
,
505 &sysfs_writeback_rate_minimum
,
506 &sysfs_writeback_rate_debug
,
508 &sysfs_io_error_limit
,
512 &sysfs_partial_stripes_expensive
,
513 &sysfs_sequential_cutoff
,
519 #ifdef CONFIG_BCACHE_DEBUG
521 &sysfs_bypass_torture_test
,
523 &sysfs_backing_dev_name
,
524 &sysfs_backing_dev_uuid
,
527 KTYPE(bch_cached_dev
);
531 struct bcache_device
*d
= container_of(kobj
, struct bcache_device
,
533 struct uuid_entry
*u
= &d
->c
->uuids
[d
->id
];
535 sysfs_printf(data_csum
, "%i", d
->data_csum
);
536 sysfs_hprint(size
, u
->sectors
<< 9);
538 if (attr
== &sysfs_label
) {
539 memcpy(buf
, u
->label
, SB_LABEL_SIZE
);
540 buf
[SB_LABEL_SIZE
+ 1] = '\0';
548 STORE(__bch_flash_dev
)
550 struct bcache_device
*d
= container_of(kobj
, struct bcache_device
,
552 struct uuid_entry
*u
= &d
->c
->uuids
[d
->id
];
554 /* no user space access if system is rebooting */
555 if (bcache_is_reboot
)
558 sysfs_strtoul(data_csum
, d
->data_csum
);
560 if (attr
== &sysfs_size
) {
563 strtoi_h_or_return(buf
, v
);
566 bch_uuid_write(d
->c
);
567 set_capacity(d
->disk
, u
->sectors
);
570 if (attr
== &sysfs_label
) {
571 memcpy(u
->label
, buf
, SB_LABEL_SIZE
);
572 bch_uuid_write(d
->c
);
575 if (attr
== &sysfs_unregister
) {
576 set_bit(BCACHE_DEV_DETACHING
, &d
->flags
);
577 bcache_device_stop(d
);
582 STORE_LOCKED(bch_flash_dev
)
584 static struct attribute
*bch_flash_dev_files
[] = {
593 KTYPE(bch_flash_dev
);
595 struct bset_stats_op
{
598 struct bset_stats stats
;
601 static int bch_btree_bset_stats(struct btree_op
*b_op
, struct btree
*b
)
603 struct bset_stats_op
*op
= container_of(b_op
, struct bset_stats_op
, op
);
606 bch_btree_keys_stats(&b
->keys
, &op
->stats
);
611 static int bch_bset_print_stats(struct cache_set
*c
, char *buf
)
613 struct bset_stats_op op
;
616 memset(&op
, 0, sizeof(op
));
617 bch_btree_op_init(&op
.op
, -1);
619 ret
= bch_btree_map_nodes(&op
.op
, c
, &ZERO_KEY
, bch_btree_bset_stats
);
623 return snprintf(buf
, PAGE_SIZE
,
625 "written sets: %zu\n"
626 "unwritten sets: %zu\n"
627 "written key bytes: %zu\n"
628 "unwritten key bytes: %zu\n"
632 op
.stats
.sets_written
, op
.stats
.sets_unwritten
,
633 op
.stats
.bytes_written
, op
.stats
.bytes_unwritten
,
634 op
.stats
.floats
, op
.stats
.failed
);
637 static unsigned int bch_root_usage(struct cache_set
*c
)
639 unsigned int bytes
= 0;
642 struct btree_iter iter
;
650 rw_lock(false, b
, b
->level
);
651 } while (b
!= c
->root
);
653 for_each_key_filter(&b
->keys
, k
, &iter
, bch_ptr_bad
)
654 bytes
+= bkey_bytes(k
);
658 return (bytes
* 100) / btree_bytes(c
);
661 static size_t bch_cache_size(struct cache_set
*c
)
666 mutex_lock(&c
->bucket_lock
);
667 list_for_each_entry(b
, &c
->btree_cache
, list
)
668 ret
+= 1 << (b
->keys
.page_order
+ PAGE_SHIFT
);
670 mutex_unlock(&c
->bucket_lock
);
674 static unsigned int bch_cache_max_chain(struct cache_set
*c
)
676 unsigned int ret
= 0;
677 struct hlist_head
*h
;
679 mutex_lock(&c
->bucket_lock
);
681 for (h
= c
->bucket_hash
;
682 h
< c
->bucket_hash
+ (1 << BUCKET_HASH_BITS
);
685 struct hlist_node
*p
;
693 mutex_unlock(&c
->bucket_lock
);
697 static unsigned int bch_btree_used(struct cache_set
*c
)
699 return div64_u64(c
->gc_stats
.key_bytes
* 100,
700 (c
->gc_stats
.nodes
?: 1) * btree_bytes(c
));
703 static unsigned int bch_average_key_size(struct cache_set
*c
)
705 return c
->gc_stats
.nkeys
706 ? div64_u64(c
->gc_stats
.data
, c
->gc_stats
.nkeys
)
710 SHOW(__bch_cache_set
)
712 struct cache_set
*c
= container_of(kobj
, struct cache_set
, kobj
);
714 sysfs_print(synchronous
, CACHE_SYNC(&c
->cache
->sb
));
715 sysfs_print(journal_delay_ms
, c
->journal_delay_ms
);
716 sysfs_hprint(bucket_size
, bucket_bytes(c
->cache
));
717 sysfs_hprint(block_size
, block_bytes(c
->cache
));
718 sysfs_print(tree_depth
, c
->root
->level
);
719 sysfs_print(root_usage_percent
, bch_root_usage(c
));
721 sysfs_hprint(btree_cache_size
, bch_cache_size(c
));
722 sysfs_print(btree_cache_max_chain
, bch_cache_max_chain(c
));
723 sysfs_print(cache_available_percent
, 100 - c
->gc_stats
.in_use
);
725 sysfs_print_time_stats(&c
->btree_gc_time
, btree_gc
, sec
, ms
);
726 sysfs_print_time_stats(&c
->btree_split_time
, btree_split
, sec
, us
);
727 sysfs_print_time_stats(&c
->sort
.time
, btree_sort
, ms
, us
);
728 sysfs_print_time_stats(&c
->btree_read_time
, btree_read
, ms
, us
);
730 sysfs_print(btree_used_percent
, bch_btree_used(c
));
731 sysfs_print(btree_nodes
, c
->gc_stats
.nodes
);
732 sysfs_hprint(average_key_size
, bch_average_key_size(c
));
734 sysfs_print(cache_read_races
,
735 atomic_long_read(&c
->cache_read_races
));
738 atomic_long_read(&c
->reclaim
));
740 sysfs_print(reclaimed_journal_buckets
,
741 atomic_long_read(&c
->reclaimed_journal_buckets
));
743 sysfs_print(flush_write
,
744 atomic_long_read(&c
->flush_write
));
746 sysfs_print(writeback_keys_done
,
747 atomic_long_read(&c
->writeback_keys_done
));
748 sysfs_print(writeback_keys_failed
,
749 atomic_long_read(&c
->writeback_keys_failed
));
751 if (attr
== &sysfs_errors
)
752 return bch_snprint_string_list(buf
, PAGE_SIZE
, error_actions
,
755 /* See count_io_errors for why 88 */
756 sysfs_print(io_error_halflife
, c
->error_decay
* 88);
757 sysfs_print(io_error_limit
, c
->error_limit
);
759 sysfs_hprint(congested
,
760 ((uint64_t) bch_get_congested(c
)) << 9);
761 sysfs_print(congested_read_threshold_us
,
762 c
->congested_read_threshold_us
);
763 sysfs_print(congested_write_threshold_us
,
764 c
->congested_write_threshold_us
);
766 sysfs_print(cutoff_writeback
, bch_cutoff_writeback
);
767 sysfs_print(cutoff_writeback_sync
, bch_cutoff_writeback_sync
);
769 sysfs_print(active_journal_entries
, fifo_used(&c
->journal
.pin
));
770 sysfs_printf(verify
, "%i", c
->verify
);
771 sysfs_printf(key_merging_disabled
, "%i", c
->key_merging_disabled
);
772 sysfs_printf(expensive_debug_checks
,
773 "%i", c
->expensive_debug_checks
);
774 sysfs_printf(gc_always_rewrite
, "%i", c
->gc_always_rewrite
);
775 sysfs_printf(btree_shrinker_disabled
, "%i", c
->shrinker_disabled
);
776 sysfs_printf(copy_gc_enabled
, "%i", c
->copy_gc_enabled
);
777 sysfs_printf(idle_max_writeback_rate
, "%i",
778 c
->idle_max_writeback_rate_enabled
);
779 sysfs_printf(gc_after_writeback
, "%i", c
->gc_after_writeback
);
780 sysfs_printf(io_disable
, "%i",
781 test_bit(CACHE_SET_IO_DISABLE
, &c
->flags
));
783 if (attr
== &sysfs_bset_tree_stats
)
784 return bch_bset_print_stats(c
, buf
);
786 if (attr
== &sysfs_feature_compat
)
787 return bch_print_cache_set_feature_compat(c
, buf
, PAGE_SIZE
);
788 if (attr
== &sysfs_feature_ro_compat
)
789 return bch_print_cache_set_feature_ro_compat(c
, buf
, PAGE_SIZE
);
790 if (attr
== &sysfs_feature_incompat
)
791 return bch_print_cache_set_feature_incompat(c
, buf
, PAGE_SIZE
);
795 SHOW_LOCKED(bch_cache_set
)
797 STORE(__bch_cache_set
)
799 struct cache_set
*c
= container_of(kobj
, struct cache_set
, kobj
);
802 /* no user space access if system is rebooting */
803 if (bcache_is_reboot
)
806 if (attr
== &sysfs_unregister
)
807 bch_cache_set_unregister(c
);
809 if (attr
== &sysfs_stop
)
810 bch_cache_set_stop(c
);
812 if (attr
== &sysfs_synchronous
) {
813 bool sync
= strtoul_or_return(buf
);
815 if (sync
!= CACHE_SYNC(&c
->cache
->sb
)) {
816 SET_CACHE_SYNC(&c
->cache
->sb
, sync
);
817 bcache_write_super(c
);
821 if (attr
== &sysfs_flash_vol_create
) {
825 strtoi_h_or_return(buf
, v
);
827 r
= bch_flash_dev_create(c
, v
);
832 if (attr
== &sysfs_clear_stats
) {
833 atomic_long_set(&c
->writeback_keys_done
, 0);
834 atomic_long_set(&c
->writeback_keys_failed
, 0);
836 memset(&c
->gc_stats
, 0, sizeof(struct gc_stat
));
837 bch_cache_accounting_clear(&c
->accounting
);
840 if (attr
== &sysfs_trigger_gc
)
843 if (attr
== &sysfs_prune_cache
) {
844 struct shrink_control sc
;
846 sc
.gfp_mask
= GFP_KERNEL
;
847 sc
.nr_to_scan
= strtoul_or_return(buf
);
848 c
->shrink
.scan_objects(&c
->shrink
, &sc
);
851 sysfs_strtoul_clamp(congested_read_threshold_us
,
852 c
->congested_read_threshold_us
,
854 sysfs_strtoul_clamp(congested_write_threshold_us
,
855 c
->congested_write_threshold_us
,
858 if (attr
== &sysfs_errors
) {
859 v
= __sysfs_match_string(error_actions
, -1, buf
);
866 sysfs_strtoul_clamp(io_error_limit
, c
->error_limit
, 0, UINT_MAX
);
868 /* See count_io_errors() for why 88 */
869 if (attr
== &sysfs_io_error_halflife
) {
873 ret
= strtoul_safe_clamp(buf
, v
, 0, UINT_MAX
);
875 c
->error_decay
= v
/ 88;
881 if (attr
== &sysfs_io_disable
) {
882 v
= strtoul_or_return(buf
);
884 if (test_and_set_bit(CACHE_SET_IO_DISABLE
,
886 pr_warn("CACHE_SET_IO_DISABLE already set\n");
888 if (!test_and_clear_bit(CACHE_SET_IO_DISABLE
,
890 pr_warn("CACHE_SET_IO_DISABLE already cleared\n");
894 sysfs_strtoul_clamp(journal_delay_ms
,
897 sysfs_strtoul_bool(verify
, c
->verify
);
898 sysfs_strtoul_bool(key_merging_disabled
, c
->key_merging_disabled
);
899 sysfs_strtoul(expensive_debug_checks
, c
->expensive_debug_checks
);
900 sysfs_strtoul_bool(gc_always_rewrite
, c
->gc_always_rewrite
);
901 sysfs_strtoul_bool(btree_shrinker_disabled
, c
->shrinker_disabled
);
902 sysfs_strtoul_bool(copy_gc_enabled
, c
->copy_gc_enabled
);
903 sysfs_strtoul_bool(idle_max_writeback_rate
,
904 c
->idle_max_writeback_rate_enabled
);
907 * write gc_after_writeback here may overwrite an already set
908 * BCH_DO_AUTO_GC, it doesn't matter because this flag will be
909 * set in next chance.
911 sysfs_strtoul_clamp(gc_after_writeback
, c
->gc_after_writeback
, 0, 1);
915 STORE_LOCKED(bch_cache_set
)
917 SHOW(bch_cache_set_internal
)
919 struct cache_set
*c
= container_of(kobj
, struct cache_set
, internal
);
921 return bch_cache_set_show(&c
->kobj
, attr
, buf
);
924 STORE(bch_cache_set_internal
)
926 struct cache_set
*c
= container_of(kobj
, struct cache_set
, internal
);
928 /* no user space access if system is rebooting */
929 if (bcache_is_reboot
)
932 return bch_cache_set_store(&c
->kobj
, attr
, buf
, size
);
935 static void bch_cache_set_internal_release(struct kobject
*k
)
939 static struct attribute
*bch_cache_set_files
[] = {
943 &sysfs_journal_delay_ms
,
944 &sysfs_flash_vol_create
,
949 &sysfs_root_usage_percent
,
950 &sysfs_btree_cache_size
,
951 &sysfs_cache_available_percent
,
953 &sysfs_average_key_size
,
956 &sysfs_io_error_limit
,
957 &sysfs_io_error_halflife
,
959 &sysfs_congested_read_threshold_us
,
960 &sysfs_congested_write_threshold_us
,
964 KTYPE(bch_cache_set
);
966 static struct attribute
*bch_cache_set_internal_files
[] = {
967 &sysfs_active_journal_entries
,
969 sysfs_time_stats_attribute_list(btree_gc
, sec
, ms
)
970 sysfs_time_stats_attribute_list(btree_split
, sec
, us
)
971 sysfs_time_stats_attribute_list(btree_sort
, ms
, us
)
972 sysfs_time_stats_attribute_list(btree_read
, ms
, us
)
975 &sysfs_btree_used_percent
,
976 &sysfs_btree_cache_max_chain
,
978 &sysfs_bset_tree_stats
,
979 &sysfs_cache_read_races
,
981 &sysfs_reclaimed_journal_buckets
,
983 &sysfs_writeback_keys_done
,
984 &sysfs_writeback_keys_failed
,
988 #ifdef CONFIG_BCACHE_DEBUG
990 &sysfs_key_merging_disabled
,
991 &sysfs_expensive_debug_checks
,
993 &sysfs_gc_always_rewrite
,
994 &sysfs_btree_shrinker_disabled
,
995 &sysfs_copy_gc_enabled
,
996 &sysfs_idle_max_writeback_rate
,
997 &sysfs_gc_after_writeback
,
999 &sysfs_cutoff_writeback
,
1000 &sysfs_cutoff_writeback_sync
,
1001 &sysfs_feature_compat
,
1002 &sysfs_feature_ro_compat
,
1003 &sysfs_feature_incompat
,
1006 KTYPE(bch_cache_set_internal
);
1008 static int __bch_cache_cmp(const void *l
, const void *r
)
1011 return *((uint16_t *)r
) - *((uint16_t *)l
);
1016 struct cache
*ca
= container_of(kobj
, struct cache
, kobj
);
1018 sysfs_hprint(bucket_size
, bucket_bytes(ca
));
1019 sysfs_hprint(block_size
, block_bytes(ca
));
1020 sysfs_print(nbuckets
, ca
->sb
.nbuckets
);
1021 sysfs_print(discard
, ca
->discard
);
1022 sysfs_hprint(written
, atomic_long_read(&ca
->sectors_written
) << 9);
1023 sysfs_hprint(btree_written
,
1024 atomic_long_read(&ca
->btree_sectors_written
) << 9);
1025 sysfs_hprint(metadata_written
,
1026 (atomic_long_read(&ca
->meta_sectors_written
) +
1027 atomic_long_read(&ca
->btree_sectors_written
)) << 9);
1029 sysfs_print(io_errors
,
1030 atomic_read(&ca
->io_errors
) >> IO_ERROR_SHIFT
);
1032 if (attr
== &sysfs_cache_replacement_policy
)
1033 return bch_snprint_string_list(buf
, PAGE_SIZE
,
1034 cache_replacement_policies
,
1035 CACHE_REPLACEMENT(&ca
->sb
));
1037 if (attr
== &sysfs_priority_stats
) {
1039 size_t n
= ca
->sb
.nbuckets
, i
;
1040 size_t unused
= 0, available
= 0, dirty
= 0, meta
= 0;
1042 /* Compute 31 quantiles */
1043 uint16_t q
[31], *p
, *cached
;
1046 cached
= p
= vmalloc(array_size(sizeof(uint16_t),
1051 mutex_lock(&ca
->set
->bucket_lock
);
1052 for_each_bucket(b
, ca
) {
1053 if (!GC_SECTORS_USED(b
))
1055 if (GC_MARK(b
) == GC_MARK_RECLAIMABLE
)
1057 if (GC_MARK(b
) == GC_MARK_DIRTY
)
1059 if (GC_MARK(b
) == GC_MARK_METADATA
)
1063 for (i
= ca
->sb
.first_bucket
; i
< n
; i
++)
1064 p
[i
] = ca
->buckets
[i
].prio
;
1065 mutex_unlock(&ca
->set
->bucket_lock
);
1067 sort(p
, n
, sizeof(uint16_t), __bch_cache_cmp
, NULL
);
1073 while (cached
< p
+ n
&&
1074 *cached
== BTREE_PRIO
)
1077 for (i
= 0; i
< n
; i
++)
1078 sum
+= INITIAL_PRIO
- cached
[i
];
1083 for (i
= 0; i
< ARRAY_SIZE(q
); i
++)
1084 q
[i
] = INITIAL_PRIO
- cached
[n
* (i
+ 1) /
1085 (ARRAY_SIZE(q
) + 1)];
1089 ret
= scnprintf(buf
, PAGE_SIZE
,
1095 "Sectors per Q: %zu\n"
1097 unused
* 100 / (size_t) ca
->sb
.nbuckets
,
1098 available
* 100 / (size_t) ca
->sb
.nbuckets
,
1099 dirty
* 100 / (size_t) ca
->sb
.nbuckets
,
1100 meta
* 100 / (size_t) ca
->sb
.nbuckets
, sum
,
1101 n
* ca
->sb
.bucket_size
/ (ARRAY_SIZE(q
) + 1));
1103 for (i
= 0; i
< ARRAY_SIZE(q
); i
++)
1104 ret
+= scnprintf(buf
+ ret
, PAGE_SIZE
- ret
,
1108 ret
+= scnprintf(buf
+ ret
, PAGE_SIZE
- ret
, "]\n");
1115 SHOW_LOCKED(bch_cache
)
1119 struct cache
*ca
= container_of(kobj
, struct cache
, kobj
);
1122 /* no user space access if system is rebooting */
1123 if (bcache_is_reboot
)
1126 if (attr
== &sysfs_discard
) {
1127 bool v
= strtoul_or_return(buf
);
1129 if (blk_queue_discard(bdev_get_queue(ca
->bdev
)))
1132 if (v
!= CACHE_DISCARD(&ca
->sb
)) {
1133 SET_CACHE_DISCARD(&ca
->sb
, v
);
1134 bcache_write_super(ca
->set
);
1138 if (attr
== &sysfs_cache_replacement_policy
) {
1139 v
= __sysfs_match_string(cache_replacement_policies
, -1, buf
);
1143 if ((unsigned int) v
!= CACHE_REPLACEMENT(&ca
->sb
)) {
1144 mutex_lock(&ca
->set
->bucket_lock
);
1145 SET_CACHE_REPLACEMENT(&ca
->sb
, v
);
1146 mutex_unlock(&ca
->set
->bucket_lock
);
1148 bcache_write_super(ca
->set
);
1152 if (attr
== &sysfs_clear_stats
) {
1153 atomic_long_set(&ca
->sectors_written
, 0);
1154 atomic_long_set(&ca
->btree_sectors_written
, 0);
1155 atomic_long_set(&ca
->meta_sectors_written
, 0);
1156 atomic_set(&ca
->io_count
, 0);
1157 atomic_set(&ca
->io_errors
, 0);
1162 STORE_LOCKED(bch_cache
)
1164 static struct attribute
*bch_cache_files
[] = {
1168 &sysfs_priority_stats
,
1171 &sysfs_btree_written
,
1172 &sysfs_metadata_written
,
1175 &sysfs_cache_replacement_policy
,