doc: Update rcu_assign_pointer() definition in whatisRCU.txt
[linux/fpc-iii.git] / drivers / md / bcache / sysfs.c
blobf90f1361698080654ffe41778a4c637cbd40898b
1 /*
2 * bcache sysfs interfaces
4 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
5 * Copyright 2012 Google, Inc.
6 */
8 #include "bcache.h"
9 #include "sysfs.h"
10 #include "btree.h"
11 #include "request.h"
12 #include "writeback.h"
14 #include <linux/blkdev.h>
15 #include <linux/sort.h>
16 #include <linux/sched/clock.h>
18 static const char * const cache_replacement_policies[] = {
19 "lru",
20 "fifo",
21 "random",
22 NULL
25 static const char * const error_actions[] = {
26 "unregister",
27 "panic",
28 NULL
31 write_attribute(attach);
32 write_attribute(detach);
33 write_attribute(unregister);
34 write_attribute(stop);
35 write_attribute(clear_stats);
36 write_attribute(trigger_gc);
37 write_attribute(prune_cache);
38 write_attribute(flash_vol_create);
40 read_attribute(bucket_size);
41 read_attribute(block_size);
42 read_attribute(nbuckets);
43 read_attribute(tree_depth);
44 read_attribute(root_usage_percent);
45 read_attribute(priority_stats);
46 read_attribute(btree_cache_size);
47 read_attribute(btree_cache_max_chain);
48 read_attribute(cache_available_percent);
49 read_attribute(written);
50 read_attribute(btree_written);
51 read_attribute(metadata_written);
52 read_attribute(active_journal_entries);
54 sysfs_time_stats_attribute(btree_gc, sec, ms);
55 sysfs_time_stats_attribute(btree_split, sec, us);
56 sysfs_time_stats_attribute(btree_sort, ms, us);
57 sysfs_time_stats_attribute(btree_read, ms, us);
59 read_attribute(btree_nodes);
60 read_attribute(btree_used_percent);
61 read_attribute(average_key_size);
62 read_attribute(dirty_data);
63 read_attribute(bset_tree_stats);
65 read_attribute(state);
66 read_attribute(cache_read_races);
67 read_attribute(writeback_keys_done);
68 read_attribute(writeback_keys_failed);
69 read_attribute(io_errors);
70 read_attribute(congested);
71 rw_attribute(congested_read_threshold_us);
72 rw_attribute(congested_write_threshold_us);
74 rw_attribute(sequential_cutoff);
75 rw_attribute(data_csum);
76 rw_attribute(cache_mode);
77 rw_attribute(writeback_metadata);
78 rw_attribute(writeback_running);
79 rw_attribute(writeback_percent);
80 rw_attribute(writeback_delay);
81 rw_attribute(writeback_rate);
83 rw_attribute(writeback_rate_update_seconds);
84 rw_attribute(writeback_rate_d_term);
85 rw_attribute(writeback_rate_p_term_inverse);
86 read_attribute(writeback_rate_debug);
88 read_attribute(stripe_size);
89 read_attribute(partial_stripes_expensive);
91 rw_attribute(synchronous);
92 rw_attribute(journal_delay_ms);
93 rw_attribute(discard);
94 rw_attribute(running);
95 rw_attribute(label);
96 rw_attribute(readahead);
97 rw_attribute(errors);
98 rw_attribute(io_error_limit);
99 rw_attribute(io_error_halflife);
100 rw_attribute(verify);
101 rw_attribute(bypass_torture_test);
102 rw_attribute(key_merging_disabled);
103 rw_attribute(gc_always_rewrite);
104 rw_attribute(expensive_debug_checks);
105 rw_attribute(cache_replacement_policy);
106 rw_attribute(btree_shrinker_disabled);
107 rw_attribute(copy_gc_enabled);
108 rw_attribute(size);
110 SHOW(__bch_cached_dev)
112 struct cached_dev *dc = container_of(kobj, struct cached_dev,
113 disk.kobj);
114 const char *states[] = { "no cache", "clean", "dirty", "inconsistent" };
116 #define var(stat) (dc->stat)
118 if (attr == &sysfs_cache_mode)
119 return bch_snprint_string_list(buf, PAGE_SIZE,
120 bch_cache_modes + 1,
121 BDEV_CACHE_MODE(&dc->sb));
123 sysfs_printf(data_csum, "%i", dc->disk.data_csum);
124 var_printf(verify, "%i");
125 var_printf(bypass_torture_test, "%i");
126 var_printf(writeback_metadata, "%i");
127 var_printf(writeback_running, "%i");
128 var_print(writeback_delay);
129 var_print(writeback_percent);
130 sysfs_hprint(writeback_rate, dc->writeback_rate.rate << 9);
132 var_print(writeback_rate_update_seconds);
133 var_print(writeback_rate_d_term);
134 var_print(writeback_rate_p_term_inverse);
136 if (attr == &sysfs_writeback_rate_debug) {
137 char rate[20];
138 char dirty[20];
139 char target[20];
140 char proportional[20];
141 char derivative[20];
142 char change[20];
143 s64 next_io;
145 bch_hprint(rate, dc->writeback_rate.rate << 9);
146 bch_hprint(dirty, bcache_dev_sectors_dirty(&dc->disk) << 9);
147 bch_hprint(target, dc->writeback_rate_target << 9);
148 bch_hprint(proportional,dc->writeback_rate_proportional << 9);
149 bch_hprint(derivative, dc->writeback_rate_derivative << 9);
150 bch_hprint(change, dc->writeback_rate_change << 9);
152 next_io = div64_s64(dc->writeback_rate.next - local_clock(),
153 NSEC_PER_MSEC);
155 return sprintf(buf,
156 "rate:\t\t%s/sec\n"
157 "dirty:\t\t%s\n"
158 "target:\t\t%s\n"
159 "proportional:\t%s\n"
160 "derivative:\t%s\n"
161 "change:\t\t%s/sec\n"
162 "next io:\t%llims\n",
163 rate, dirty, target, proportional,
164 derivative, change, next_io);
167 sysfs_hprint(dirty_data,
168 bcache_dev_sectors_dirty(&dc->disk) << 9);
170 sysfs_hprint(stripe_size, dc->disk.stripe_size << 9);
171 var_printf(partial_stripes_expensive, "%u");
173 var_hprint(sequential_cutoff);
174 var_hprint(readahead);
176 sysfs_print(running, atomic_read(&dc->running));
177 sysfs_print(state, states[BDEV_STATE(&dc->sb)]);
179 if (attr == &sysfs_label) {
180 memcpy(buf, dc->sb.label, SB_LABEL_SIZE);
181 buf[SB_LABEL_SIZE + 1] = '\0';
182 strcat(buf, "\n");
183 return strlen(buf);
186 #undef var
187 return 0;
189 SHOW_LOCKED(bch_cached_dev)
191 STORE(__cached_dev)
193 struct cached_dev *dc = container_of(kobj, struct cached_dev,
194 disk.kobj);
195 unsigned v = size;
196 struct cache_set *c;
197 struct kobj_uevent_env *env;
199 #define d_strtoul(var) sysfs_strtoul(var, dc->var)
200 #define d_strtoul_nonzero(var) sysfs_strtoul_clamp(var, dc->var, 1, INT_MAX)
201 #define d_strtoi_h(var) sysfs_hatoi(var, dc->var)
203 sysfs_strtoul(data_csum, dc->disk.data_csum);
204 d_strtoul(verify);
205 d_strtoul(bypass_torture_test);
206 d_strtoul(writeback_metadata);
207 d_strtoul(writeback_running);
208 d_strtoul(writeback_delay);
210 sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent, 0, 40);
212 sysfs_strtoul_clamp(writeback_rate,
213 dc->writeback_rate.rate, 1, INT_MAX);
215 d_strtoul_nonzero(writeback_rate_update_seconds);
216 d_strtoul(writeback_rate_d_term);
217 d_strtoul_nonzero(writeback_rate_p_term_inverse);
219 d_strtoi_h(sequential_cutoff);
220 d_strtoi_h(readahead);
222 if (attr == &sysfs_clear_stats)
223 bch_cache_accounting_clear(&dc->accounting);
225 if (attr == &sysfs_running &&
226 strtoul_or_return(buf))
227 bch_cached_dev_run(dc);
229 if (attr == &sysfs_cache_mode) {
230 ssize_t v = bch_read_string_list(buf, bch_cache_modes + 1);
232 if (v < 0)
233 return v;
235 if ((unsigned) v != BDEV_CACHE_MODE(&dc->sb)) {
236 SET_BDEV_CACHE_MODE(&dc->sb, v);
237 bch_write_bdev_super(dc, NULL);
241 if (attr == &sysfs_label) {
242 if (size > SB_LABEL_SIZE)
243 return -EINVAL;
244 memcpy(dc->sb.label, buf, size);
245 if (size < SB_LABEL_SIZE)
246 dc->sb.label[size] = '\0';
247 if (size && dc->sb.label[size - 1] == '\n')
248 dc->sb.label[size - 1] = '\0';
249 bch_write_bdev_super(dc, NULL);
250 if (dc->disk.c) {
251 memcpy(dc->disk.c->uuids[dc->disk.id].label,
252 buf, SB_LABEL_SIZE);
253 bch_uuid_write(dc->disk.c);
255 env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
256 if (!env)
257 return -ENOMEM;
258 add_uevent_var(env, "DRIVER=bcache");
259 add_uevent_var(env, "CACHED_UUID=%pU", dc->sb.uuid),
260 add_uevent_var(env, "CACHED_LABEL=%s", buf);
261 kobject_uevent_env(
262 &disk_to_dev(dc->disk.disk)->kobj, KOBJ_CHANGE, env->envp);
263 kfree(env);
266 if (attr == &sysfs_attach) {
267 if (bch_parse_uuid(buf, dc->sb.set_uuid) < 16)
268 return -EINVAL;
270 list_for_each_entry(c, &bch_cache_sets, list) {
271 v = bch_cached_dev_attach(dc, c);
272 if (!v)
273 return size;
276 pr_err("Can't attach %s: cache set not found", buf);
277 size = v;
280 if (attr == &sysfs_detach && dc->disk.c)
281 bch_cached_dev_detach(dc);
283 if (attr == &sysfs_stop)
284 bcache_device_stop(&dc->disk);
286 return size;
289 STORE(bch_cached_dev)
291 struct cached_dev *dc = container_of(kobj, struct cached_dev,
292 disk.kobj);
294 mutex_lock(&bch_register_lock);
295 size = __cached_dev_store(kobj, attr, buf, size);
297 if (attr == &sysfs_writeback_running)
298 bch_writeback_queue(dc);
300 if (attr == &sysfs_writeback_percent)
301 schedule_delayed_work(&dc->writeback_rate_update,
302 dc->writeback_rate_update_seconds * HZ);
304 mutex_unlock(&bch_register_lock);
305 return size;
308 static struct attribute *bch_cached_dev_files[] = {
309 &sysfs_attach,
310 &sysfs_detach,
311 &sysfs_stop,
312 #if 0
313 &sysfs_data_csum,
314 #endif
315 &sysfs_cache_mode,
316 &sysfs_writeback_metadata,
317 &sysfs_writeback_running,
318 &sysfs_writeback_delay,
319 &sysfs_writeback_percent,
320 &sysfs_writeback_rate,
321 &sysfs_writeback_rate_update_seconds,
322 &sysfs_writeback_rate_d_term,
323 &sysfs_writeback_rate_p_term_inverse,
324 &sysfs_writeback_rate_debug,
325 &sysfs_dirty_data,
326 &sysfs_stripe_size,
327 &sysfs_partial_stripes_expensive,
328 &sysfs_sequential_cutoff,
329 &sysfs_clear_stats,
330 &sysfs_running,
331 &sysfs_state,
332 &sysfs_label,
333 &sysfs_readahead,
334 #ifdef CONFIG_BCACHE_DEBUG
335 &sysfs_verify,
336 &sysfs_bypass_torture_test,
337 #endif
338 NULL
340 KTYPE(bch_cached_dev);
342 SHOW(bch_flash_dev)
344 struct bcache_device *d = container_of(kobj, struct bcache_device,
345 kobj);
346 struct uuid_entry *u = &d->c->uuids[d->id];
348 sysfs_printf(data_csum, "%i", d->data_csum);
349 sysfs_hprint(size, u->sectors << 9);
351 if (attr == &sysfs_label) {
352 memcpy(buf, u->label, SB_LABEL_SIZE);
353 buf[SB_LABEL_SIZE + 1] = '\0';
354 strcat(buf, "\n");
355 return strlen(buf);
358 return 0;
361 STORE(__bch_flash_dev)
363 struct bcache_device *d = container_of(kobj, struct bcache_device,
364 kobj);
365 struct uuid_entry *u = &d->c->uuids[d->id];
367 sysfs_strtoul(data_csum, d->data_csum);
369 if (attr == &sysfs_size) {
370 uint64_t v;
371 strtoi_h_or_return(buf, v);
373 u->sectors = v >> 9;
374 bch_uuid_write(d->c);
375 set_capacity(d->disk, u->sectors);
378 if (attr == &sysfs_label) {
379 memcpy(u->label, buf, SB_LABEL_SIZE);
380 bch_uuid_write(d->c);
383 if (attr == &sysfs_unregister) {
384 set_bit(BCACHE_DEV_DETACHING, &d->flags);
385 bcache_device_stop(d);
388 return size;
390 STORE_LOCKED(bch_flash_dev)
392 static struct attribute *bch_flash_dev_files[] = {
393 &sysfs_unregister,
394 #if 0
395 &sysfs_data_csum,
396 #endif
397 &sysfs_label,
398 &sysfs_size,
399 NULL
401 KTYPE(bch_flash_dev);
403 struct bset_stats_op {
404 struct btree_op op;
405 size_t nodes;
406 struct bset_stats stats;
409 static int bch_btree_bset_stats(struct btree_op *b_op, struct btree *b)
411 struct bset_stats_op *op = container_of(b_op, struct bset_stats_op, op);
413 op->nodes++;
414 bch_btree_keys_stats(&b->keys, &op->stats);
416 return MAP_CONTINUE;
419 static int bch_bset_print_stats(struct cache_set *c, char *buf)
421 struct bset_stats_op op;
422 int ret;
424 memset(&op, 0, sizeof(op));
425 bch_btree_op_init(&op.op, -1);
427 ret = bch_btree_map_nodes(&op.op, c, &ZERO_KEY, bch_btree_bset_stats);
428 if (ret < 0)
429 return ret;
431 return snprintf(buf, PAGE_SIZE,
432 "btree nodes: %zu\n"
433 "written sets: %zu\n"
434 "unwritten sets: %zu\n"
435 "written key bytes: %zu\n"
436 "unwritten key bytes: %zu\n"
437 "floats: %zu\n"
438 "failed: %zu\n",
439 op.nodes,
440 op.stats.sets_written, op.stats.sets_unwritten,
441 op.stats.bytes_written, op.stats.bytes_unwritten,
442 op.stats.floats, op.stats.failed);
445 static unsigned bch_root_usage(struct cache_set *c)
447 unsigned bytes = 0;
448 struct bkey *k;
449 struct btree *b;
450 struct btree_iter iter;
452 goto lock_root;
454 do {
455 rw_unlock(false, b);
456 lock_root:
457 b = c->root;
458 rw_lock(false, b, b->level);
459 } while (b != c->root);
461 for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
462 bytes += bkey_bytes(k);
464 rw_unlock(false, b);
466 return (bytes * 100) / btree_bytes(c);
469 static size_t bch_cache_size(struct cache_set *c)
471 size_t ret = 0;
472 struct btree *b;
474 mutex_lock(&c->bucket_lock);
475 list_for_each_entry(b, &c->btree_cache, list)
476 ret += 1 << (b->keys.page_order + PAGE_SHIFT);
478 mutex_unlock(&c->bucket_lock);
479 return ret;
482 static unsigned bch_cache_max_chain(struct cache_set *c)
484 unsigned ret = 0;
485 struct hlist_head *h;
487 mutex_lock(&c->bucket_lock);
489 for (h = c->bucket_hash;
490 h < c->bucket_hash + (1 << BUCKET_HASH_BITS);
491 h++) {
492 unsigned i = 0;
493 struct hlist_node *p;
495 hlist_for_each(p, h)
496 i++;
498 ret = max(ret, i);
501 mutex_unlock(&c->bucket_lock);
502 return ret;
505 static unsigned bch_btree_used(struct cache_set *c)
507 return div64_u64(c->gc_stats.key_bytes * 100,
508 (c->gc_stats.nodes ?: 1) * btree_bytes(c));
511 static unsigned bch_average_key_size(struct cache_set *c)
513 return c->gc_stats.nkeys
514 ? div64_u64(c->gc_stats.data, c->gc_stats.nkeys)
515 : 0;
518 SHOW(__bch_cache_set)
520 struct cache_set *c = container_of(kobj, struct cache_set, kobj);
522 sysfs_print(synchronous, CACHE_SYNC(&c->sb));
523 sysfs_print(journal_delay_ms, c->journal_delay_ms);
524 sysfs_hprint(bucket_size, bucket_bytes(c));
525 sysfs_hprint(block_size, block_bytes(c));
526 sysfs_print(tree_depth, c->root->level);
527 sysfs_print(root_usage_percent, bch_root_usage(c));
529 sysfs_hprint(btree_cache_size, bch_cache_size(c));
530 sysfs_print(btree_cache_max_chain, bch_cache_max_chain(c));
531 sysfs_print(cache_available_percent, 100 - c->gc_stats.in_use);
533 sysfs_print_time_stats(&c->btree_gc_time, btree_gc, sec, ms);
534 sysfs_print_time_stats(&c->btree_split_time, btree_split, sec, us);
535 sysfs_print_time_stats(&c->sort.time, btree_sort, ms, us);
536 sysfs_print_time_stats(&c->btree_read_time, btree_read, ms, us);
538 sysfs_print(btree_used_percent, bch_btree_used(c));
539 sysfs_print(btree_nodes, c->gc_stats.nodes);
540 sysfs_hprint(average_key_size, bch_average_key_size(c));
542 sysfs_print(cache_read_races,
543 atomic_long_read(&c->cache_read_races));
545 sysfs_print(writeback_keys_done,
546 atomic_long_read(&c->writeback_keys_done));
547 sysfs_print(writeback_keys_failed,
548 atomic_long_read(&c->writeback_keys_failed));
550 if (attr == &sysfs_errors)
551 return bch_snprint_string_list(buf, PAGE_SIZE, error_actions,
552 c->on_error);
554 /* See count_io_errors for why 88 */
555 sysfs_print(io_error_halflife, c->error_decay * 88);
556 sysfs_print(io_error_limit, c->error_limit >> IO_ERROR_SHIFT);
558 sysfs_hprint(congested,
559 ((uint64_t) bch_get_congested(c)) << 9);
560 sysfs_print(congested_read_threshold_us,
561 c->congested_read_threshold_us);
562 sysfs_print(congested_write_threshold_us,
563 c->congested_write_threshold_us);
565 sysfs_print(active_journal_entries, fifo_used(&c->journal.pin));
566 sysfs_printf(verify, "%i", c->verify);
567 sysfs_printf(key_merging_disabled, "%i", c->key_merging_disabled);
568 sysfs_printf(expensive_debug_checks,
569 "%i", c->expensive_debug_checks);
570 sysfs_printf(gc_always_rewrite, "%i", c->gc_always_rewrite);
571 sysfs_printf(btree_shrinker_disabled, "%i", c->shrinker_disabled);
572 sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled);
574 if (attr == &sysfs_bset_tree_stats)
575 return bch_bset_print_stats(c, buf);
577 return 0;
579 SHOW_LOCKED(bch_cache_set)
581 STORE(__bch_cache_set)
583 struct cache_set *c = container_of(kobj, struct cache_set, kobj);
585 if (attr == &sysfs_unregister)
586 bch_cache_set_unregister(c);
588 if (attr == &sysfs_stop)
589 bch_cache_set_stop(c);
591 if (attr == &sysfs_synchronous) {
592 bool sync = strtoul_or_return(buf);
594 if (sync != CACHE_SYNC(&c->sb)) {
595 SET_CACHE_SYNC(&c->sb, sync);
596 bcache_write_super(c);
600 if (attr == &sysfs_flash_vol_create) {
601 int r;
602 uint64_t v;
603 strtoi_h_or_return(buf, v);
605 r = bch_flash_dev_create(c, v);
606 if (r)
607 return r;
610 if (attr == &sysfs_clear_stats) {
611 atomic_long_set(&c->writeback_keys_done, 0);
612 atomic_long_set(&c->writeback_keys_failed, 0);
614 memset(&c->gc_stats, 0, sizeof(struct gc_stat));
615 bch_cache_accounting_clear(&c->accounting);
618 if (attr == &sysfs_trigger_gc)
619 wake_up_gc(c);
621 if (attr == &sysfs_prune_cache) {
622 struct shrink_control sc;
623 sc.gfp_mask = GFP_KERNEL;
624 sc.nr_to_scan = strtoul_or_return(buf);
625 c->shrink.scan_objects(&c->shrink, &sc);
628 sysfs_strtoul(congested_read_threshold_us,
629 c->congested_read_threshold_us);
630 sysfs_strtoul(congested_write_threshold_us,
631 c->congested_write_threshold_us);
633 if (attr == &sysfs_errors) {
634 ssize_t v = bch_read_string_list(buf, error_actions);
636 if (v < 0)
637 return v;
639 c->on_error = v;
642 if (attr == &sysfs_io_error_limit)
643 c->error_limit = strtoul_or_return(buf) << IO_ERROR_SHIFT;
645 /* See count_io_errors() for why 88 */
646 if (attr == &sysfs_io_error_halflife)
647 c->error_decay = strtoul_or_return(buf) / 88;
649 sysfs_strtoul(journal_delay_ms, c->journal_delay_ms);
650 sysfs_strtoul(verify, c->verify);
651 sysfs_strtoul(key_merging_disabled, c->key_merging_disabled);
652 sysfs_strtoul(expensive_debug_checks, c->expensive_debug_checks);
653 sysfs_strtoul(gc_always_rewrite, c->gc_always_rewrite);
654 sysfs_strtoul(btree_shrinker_disabled, c->shrinker_disabled);
655 sysfs_strtoul(copy_gc_enabled, c->copy_gc_enabled);
657 return size;
659 STORE_LOCKED(bch_cache_set)
661 SHOW(bch_cache_set_internal)
663 struct cache_set *c = container_of(kobj, struct cache_set, internal);
664 return bch_cache_set_show(&c->kobj, attr, buf);
667 STORE(bch_cache_set_internal)
669 struct cache_set *c = container_of(kobj, struct cache_set, internal);
670 return bch_cache_set_store(&c->kobj, attr, buf, size);
673 static void bch_cache_set_internal_release(struct kobject *k)
677 static struct attribute *bch_cache_set_files[] = {
678 &sysfs_unregister,
679 &sysfs_stop,
680 &sysfs_synchronous,
681 &sysfs_journal_delay_ms,
682 &sysfs_flash_vol_create,
684 &sysfs_bucket_size,
685 &sysfs_block_size,
686 &sysfs_tree_depth,
687 &sysfs_root_usage_percent,
688 &sysfs_btree_cache_size,
689 &sysfs_cache_available_percent,
691 &sysfs_average_key_size,
693 &sysfs_errors,
694 &sysfs_io_error_limit,
695 &sysfs_io_error_halflife,
696 &sysfs_congested,
697 &sysfs_congested_read_threshold_us,
698 &sysfs_congested_write_threshold_us,
699 &sysfs_clear_stats,
700 NULL
702 KTYPE(bch_cache_set);
704 static struct attribute *bch_cache_set_internal_files[] = {
705 &sysfs_active_journal_entries,
707 sysfs_time_stats_attribute_list(btree_gc, sec, ms)
708 sysfs_time_stats_attribute_list(btree_split, sec, us)
709 sysfs_time_stats_attribute_list(btree_sort, ms, us)
710 sysfs_time_stats_attribute_list(btree_read, ms, us)
712 &sysfs_btree_nodes,
713 &sysfs_btree_used_percent,
714 &sysfs_btree_cache_max_chain,
716 &sysfs_bset_tree_stats,
717 &sysfs_cache_read_races,
718 &sysfs_writeback_keys_done,
719 &sysfs_writeback_keys_failed,
721 &sysfs_trigger_gc,
722 &sysfs_prune_cache,
723 #ifdef CONFIG_BCACHE_DEBUG
724 &sysfs_verify,
725 &sysfs_key_merging_disabled,
726 &sysfs_expensive_debug_checks,
727 #endif
728 &sysfs_gc_always_rewrite,
729 &sysfs_btree_shrinker_disabled,
730 &sysfs_copy_gc_enabled,
731 NULL
733 KTYPE(bch_cache_set_internal);
735 SHOW(__bch_cache)
737 struct cache *ca = container_of(kobj, struct cache, kobj);
739 sysfs_hprint(bucket_size, bucket_bytes(ca));
740 sysfs_hprint(block_size, block_bytes(ca));
741 sysfs_print(nbuckets, ca->sb.nbuckets);
742 sysfs_print(discard, ca->discard);
743 sysfs_hprint(written, atomic_long_read(&ca->sectors_written) << 9);
744 sysfs_hprint(btree_written,
745 atomic_long_read(&ca->btree_sectors_written) << 9);
746 sysfs_hprint(metadata_written,
747 (atomic_long_read(&ca->meta_sectors_written) +
748 atomic_long_read(&ca->btree_sectors_written)) << 9);
750 sysfs_print(io_errors,
751 atomic_read(&ca->io_errors) >> IO_ERROR_SHIFT);
753 if (attr == &sysfs_cache_replacement_policy)
754 return bch_snprint_string_list(buf, PAGE_SIZE,
755 cache_replacement_policies,
756 CACHE_REPLACEMENT(&ca->sb));
758 if (attr == &sysfs_priority_stats) {
759 int cmp(const void *l, const void *r)
760 { return *((uint16_t *) r) - *((uint16_t *) l); }
762 struct bucket *b;
763 size_t n = ca->sb.nbuckets, i;
764 size_t unused = 0, available = 0, dirty = 0, meta = 0;
765 uint64_t sum = 0;
766 /* Compute 31 quantiles */
767 uint16_t q[31], *p, *cached;
768 ssize_t ret;
770 cached = p = vmalloc(ca->sb.nbuckets * sizeof(uint16_t));
771 if (!p)
772 return -ENOMEM;
774 mutex_lock(&ca->set->bucket_lock);
775 for_each_bucket(b, ca) {
776 if (!GC_SECTORS_USED(b))
777 unused++;
778 if (GC_MARK(b) == GC_MARK_RECLAIMABLE)
779 available++;
780 if (GC_MARK(b) == GC_MARK_DIRTY)
781 dirty++;
782 if (GC_MARK(b) == GC_MARK_METADATA)
783 meta++;
786 for (i = ca->sb.first_bucket; i < n; i++)
787 p[i] = ca->buckets[i].prio;
788 mutex_unlock(&ca->set->bucket_lock);
790 sort(p, n, sizeof(uint16_t), cmp, NULL);
792 while (n &&
793 !cached[n - 1])
794 --n;
796 unused = ca->sb.nbuckets - n;
798 while (cached < p + n &&
799 *cached == BTREE_PRIO)
800 cached++, n--;
802 for (i = 0; i < n; i++)
803 sum += INITIAL_PRIO - cached[i];
805 if (n)
806 do_div(sum, n);
808 for (i = 0; i < ARRAY_SIZE(q); i++)
809 q[i] = INITIAL_PRIO - cached[n * (i + 1) /
810 (ARRAY_SIZE(q) + 1)];
812 vfree(p);
814 ret = scnprintf(buf, PAGE_SIZE,
815 "Unused: %zu%%\n"
816 "Clean: %zu%%\n"
817 "Dirty: %zu%%\n"
818 "Metadata: %zu%%\n"
819 "Average: %llu\n"
820 "Sectors per Q: %zu\n"
821 "Quantiles: [",
822 unused * 100 / (size_t) ca->sb.nbuckets,
823 available * 100 / (size_t) ca->sb.nbuckets,
824 dirty * 100 / (size_t) ca->sb.nbuckets,
825 meta * 100 / (size_t) ca->sb.nbuckets, sum,
826 n * ca->sb.bucket_size / (ARRAY_SIZE(q) + 1));
828 for (i = 0; i < ARRAY_SIZE(q); i++)
829 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
830 "%u ", q[i]);
831 ret--;
833 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "]\n");
835 return ret;
838 return 0;
840 SHOW_LOCKED(bch_cache)
842 STORE(__bch_cache)
844 struct cache *ca = container_of(kobj, struct cache, kobj);
846 if (attr == &sysfs_discard) {
847 bool v = strtoul_or_return(buf);
849 if (blk_queue_discard(bdev_get_queue(ca->bdev)))
850 ca->discard = v;
852 if (v != CACHE_DISCARD(&ca->sb)) {
853 SET_CACHE_DISCARD(&ca->sb, v);
854 bcache_write_super(ca->set);
858 if (attr == &sysfs_cache_replacement_policy) {
859 ssize_t v = bch_read_string_list(buf, cache_replacement_policies);
861 if (v < 0)
862 return v;
864 if ((unsigned) v != CACHE_REPLACEMENT(&ca->sb)) {
865 mutex_lock(&ca->set->bucket_lock);
866 SET_CACHE_REPLACEMENT(&ca->sb, v);
867 mutex_unlock(&ca->set->bucket_lock);
869 bcache_write_super(ca->set);
873 if (attr == &sysfs_clear_stats) {
874 atomic_long_set(&ca->sectors_written, 0);
875 atomic_long_set(&ca->btree_sectors_written, 0);
876 atomic_long_set(&ca->meta_sectors_written, 0);
877 atomic_set(&ca->io_count, 0);
878 atomic_set(&ca->io_errors, 0);
881 return size;
883 STORE_LOCKED(bch_cache)
885 static struct attribute *bch_cache_files[] = {
886 &sysfs_bucket_size,
887 &sysfs_block_size,
888 &sysfs_nbuckets,
889 &sysfs_priority_stats,
890 &sysfs_discard,
891 &sysfs_written,
892 &sysfs_btree_written,
893 &sysfs_metadata_written,
894 &sysfs_io_errors,
895 &sysfs_clear_stats,
896 &sysfs_cache_replacement_policy,
897 NULL
899 KTYPE(bch_cache);