1 // SPDX-License-Identifier: GPL-2.0-only
3 #include <linux/blkdev.h>
4 #include <linux/wait.h>
5 #include <linux/rbtree.h>
6 #include <linux/kthread.h>
7 #include <linux/backing-dev.h>
8 #include <linux/blk-cgroup.h>
9 #include <linux/freezer.h>
11 #include <linux/pagemap.h>
13 #include <linux/sched/mm.h>
14 #include <linux/sched.h>
15 #include <linux/module.h>
16 #include <linux/writeback.h>
17 #include <linux/device.h>
18 #include <trace/events/writeback.h>
21 struct backing_dev_info noop_backing_dev_info
;
22 EXPORT_SYMBOL_GPL(noop_backing_dev_info
);
24 static const char *bdi_unknown_name
= "(unknown)";
27 * bdi_lock protects bdi_tree and updates to bdi_list. bdi_list has RCU
28 * reader side locking.
30 DEFINE_SPINLOCK(bdi_lock
);
31 static u64 bdi_id_cursor
;
32 static struct rb_root bdi_tree
= RB_ROOT
;
35 /* bdi_wq serves all asynchronous writeback tasks */
36 struct workqueue_struct
*bdi_wq
;
38 #ifdef CONFIG_DEBUG_FS
39 #include <linux/debugfs.h>
40 #include <linux/seq_file.h>
43 unsigned long nr_dirty
;
45 unsigned long nr_more_io
;
46 unsigned long nr_dirty_time
;
47 unsigned long nr_writeback
;
48 unsigned long nr_reclaimable
;
49 unsigned long nr_dirtied
;
50 unsigned long nr_written
;
51 unsigned long dirty_thresh
;
52 unsigned long wb_thresh
;
55 static struct dentry
*bdi_debug_root
;
57 static void bdi_debug_init(void)
59 bdi_debug_root
= debugfs_create_dir("bdi", NULL
);
62 static void collect_wb_stats(struct wb_stats
*stats
,
63 struct bdi_writeback
*wb
)
67 spin_lock(&wb
->list_lock
);
68 list_for_each_entry(inode
, &wb
->b_dirty
, i_io_list
)
70 list_for_each_entry(inode
, &wb
->b_io
, i_io_list
)
72 list_for_each_entry(inode
, &wb
->b_more_io
, i_io_list
)
74 list_for_each_entry(inode
, &wb
->b_dirty_time
, i_io_list
)
75 if (inode
->i_state
& I_DIRTY_TIME
)
76 stats
->nr_dirty_time
++;
77 spin_unlock(&wb
->list_lock
);
79 stats
->nr_writeback
+= wb_stat(wb
, WB_WRITEBACK
);
80 stats
->nr_reclaimable
+= wb_stat(wb
, WB_RECLAIMABLE
);
81 stats
->nr_dirtied
+= wb_stat(wb
, WB_DIRTIED
);
82 stats
->nr_written
+= wb_stat(wb
, WB_WRITTEN
);
83 stats
->wb_thresh
+= wb_calc_thresh(wb
, stats
->dirty_thresh
);
86 #ifdef CONFIG_CGROUP_WRITEBACK
87 static void bdi_collect_stats(struct backing_dev_info
*bdi
,
88 struct wb_stats
*stats
)
90 struct bdi_writeback
*wb
;
93 list_for_each_entry_rcu(wb
, &bdi
->wb_list
, bdi_node
) {
97 collect_wb_stats(stats
, wb
);
103 static void bdi_collect_stats(struct backing_dev_info
*bdi
,
104 struct wb_stats
*stats
)
106 collect_wb_stats(stats
, &bdi
->wb
);
110 static int bdi_debug_stats_show(struct seq_file
*m
, void *v
)
112 struct backing_dev_info
*bdi
= m
->private;
113 unsigned long background_thresh
;
114 unsigned long dirty_thresh
;
115 struct wb_stats stats
;
116 unsigned long tot_bw
;
118 global_dirty_limits(&background_thresh
, &dirty_thresh
);
120 memset(&stats
, 0, sizeof(stats
));
121 stats
.dirty_thresh
= dirty_thresh
;
122 bdi_collect_stats(bdi
, &stats
);
123 tot_bw
= atomic_long_read(&bdi
->tot_write_bandwidth
);
126 "BdiWriteback: %10lu kB\n"
127 "BdiReclaimable: %10lu kB\n"
128 "BdiDirtyThresh: %10lu kB\n"
129 "DirtyThresh: %10lu kB\n"
130 "BackgroundThresh: %10lu kB\n"
131 "BdiDirtied: %10lu kB\n"
132 "BdiWritten: %10lu kB\n"
133 "BdiWriteBandwidth: %10lu kBps\n"
137 "b_dirty_time: %10lu\n"
140 K(stats
.nr_writeback
),
141 K(stats
.nr_reclaimable
),
144 K(background_thresh
),
152 !list_empty(&bdi
->bdi_list
), bdi
->wb
.state
);
156 DEFINE_SHOW_ATTRIBUTE(bdi_debug_stats
);
158 static void wb_stats_show(struct seq_file
*m
, struct bdi_writeback
*wb
,
159 struct wb_stats
*stats
)
164 "WbWriteback: %10lu kB\n"
165 "WbReclaimable: %10lu kB\n"
166 "WbDirtyThresh: %10lu kB\n"
167 "WbDirtied: %10lu kB\n"
168 "WbWritten: %10lu kB\n"
169 "WbWriteBandwidth: %10lu kBps\n"
173 "b_dirty_time: %10lu\n"
175 #ifdef CONFIG_CGROUP_WRITEBACK
176 cgroup_ino(wb
->memcg_css
->cgroup
),
180 K(stats
->nr_writeback
),
181 K(stats
->nr_reclaimable
),
183 K(stats
->nr_dirtied
),
184 K(stats
->nr_written
),
185 K(wb
->avg_write_bandwidth
),
189 stats
->nr_dirty_time
,
193 static int cgwb_debug_stats_show(struct seq_file
*m
, void *v
)
195 struct backing_dev_info
*bdi
= m
->private;
196 unsigned long background_thresh
;
197 unsigned long dirty_thresh
;
198 struct bdi_writeback
*wb
;
200 global_dirty_limits(&background_thresh
, &dirty_thresh
);
203 list_for_each_entry_rcu(wb
, &bdi
->wb_list
, bdi_node
) {
204 struct wb_stats stats
= { .dirty_thresh
= dirty_thresh
};
209 collect_wb_stats(&stats
, wb
);
212 * Calculate thresh of wb in writeback cgroup which is min of
213 * thresh in global domain and thresh in cgroup domain. Drop
214 * rcu lock because cgwb_calc_thresh may sleep in
215 * cgroup_rstat_flush. We can do so here because we have a ref.
217 if (mem_cgroup_wb_domain(wb
)) {
219 stats
.wb_thresh
= min(stats
.wb_thresh
, cgwb_calc_thresh(wb
));
223 wb_stats_show(m
, wb
, &stats
);
231 DEFINE_SHOW_ATTRIBUTE(cgwb_debug_stats
);
233 static void bdi_debug_register(struct backing_dev_info
*bdi
, const char *name
)
235 bdi
->debug_dir
= debugfs_create_dir(name
, bdi_debug_root
);
237 debugfs_create_file("stats", 0444, bdi
->debug_dir
, bdi
,
238 &bdi_debug_stats_fops
);
239 debugfs_create_file("wb_stats", 0444, bdi
->debug_dir
, bdi
,
240 &cgwb_debug_stats_fops
);
243 static void bdi_debug_unregister(struct backing_dev_info
*bdi
)
245 debugfs_remove_recursive(bdi
->debug_dir
);
247 #else /* CONFIG_DEBUG_FS */
248 static inline void bdi_debug_init(void)
251 static inline void bdi_debug_register(struct backing_dev_info
*bdi
,
255 static inline void bdi_debug_unregister(struct backing_dev_info
*bdi
)
258 #endif /* CONFIG_DEBUG_FS */
260 static ssize_t
read_ahead_kb_store(struct device
*dev
,
261 struct device_attribute
*attr
,
262 const char *buf
, size_t count
)
264 struct backing_dev_info
*bdi
= dev_get_drvdata(dev
);
265 unsigned long read_ahead_kb
;
268 ret
= kstrtoul(buf
, 10, &read_ahead_kb
);
272 bdi
->ra_pages
= read_ahead_kb
>> (PAGE_SHIFT
- 10);
277 #define BDI_SHOW(name, expr) \
278 static ssize_t name##_show(struct device *dev, \
279 struct device_attribute *attr, char *buf) \
281 struct backing_dev_info *bdi = dev_get_drvdata(dev); \
283 return sysfs_emit(buf, "%lld\n", (long long)expr); \
285 static DEVICE_ATTR_RW(name);
287 BDI_SHOW(read_ahead_kb
, K(bdi
->ra_pages
))
289 static ssize_t
min_ratio_store(struct device
*dev
,
290 struct device_attribute
*attr
, const char *buf
, size_t count
)
292 struct backing_dev_info
*bdi
= dev_get_drvdata(dev
);
296 ret
= kstrtouint(buf
, 10, &ratio
);
300 ret
= bdi_set_min_ratio(bdi
, ratio
);
306 BDI_SHOW(min_ratio
, bdi
->min_ratio
/ BDI_RATIO_SCALE
)
308 static ssize_t
min_ratio_fine_store(struct device
*dev
,
309 struct device_attribute
*attr
, const char *buf
, size_t count
)
311 struct backing_dev_info
*bdi
= dev_get_drvdata(dev
);
315 ret
= kstrtouint(buf
, 10, &ratio
);
319 ret
= bdi_set_min_ratio_no_scale(bdi
, ratio
);
325 BDI_SHOW(min_ratio_fine
, bdi
->min_ratio
)
327 static ssize_t
max_ratio_store(struct device
*dev
,
328 struct device_attribute
*attr
, const char *buf
, size_t count
)
330 struct backing_dev_info
*bdi
= dev_get_drvdata(dev
);
334 ret
= kstrtouint(buf
, 10, &ratio
);
338 ret
= bdi_set_max_ratio(bdi
, ratio
);
344 BDI_SHOW(max_ratio
, bdi
->max_ratio
/ BDI_RATIO_SCALE
)
346 static ssize_t
max_ratio_fine_store(struct device
*dev
,
347 struct device_attribute
*attr
, const char *buf
, size_t count
)
349 struct backing_dev_info
*bdi
= dev_get_drvdata(dev
);
353 ret
= kstrtouint(buf
, 10, &ratio
);
357 ret
= bdi_set_max_ratio_no_scale(bdi
, ratio
);
363 BDI_SHOW(max_ratio_fine
, bdi
->max_ratio
)
365 static ssize_t
min_bytes_show(struct device
*dev
,
366 struct device_attribute
*attr
,
369 struct backing_dev_info
*bdi
= dev_get_drvdata(dev
);
371 return sysfs_emit(buf
, "%llu\n", bdi_get_min_bytes(bdi
));
374 static ssize_t
min_bytes_store(struct device
*dev
,
375 struct device_attribute
*attr
, const char *buf
, size_t count
)
377 struct backing_dev_info
*bdi
= dev_get_drvdata(dev
);
381 ret
= kstrtoull(buf
, 10, &bytes
);
385 ret
= bdi_set_min_bytes(bdi
, bytes
);
391 static DEVICE_ATTR_RW(min_bytes
);
393 static ssize_t
max_bytes_show(struct device
*dev
,
394 struct device_attribute
*attr
,
397 struct backing_dev_info
*bdi
= dev_get_drvdata(dev
);
399 return sysfs_emit(buf
, "%llu\n", bdi_get_max_bytes(bdi
));
402 static ssize_t
max_bytes_store(struct device
*dev
,
403 struct device_attribute
*attr
, const char *buf
, size_t count
)
405 struct backing_dev_info
*bdi
= dev_get_drvdata(dev
);
409 ret
= kstrtoull(buf
, 10, &bytes
);
413 ret
= bdi_set_max_bytes(bdi
, bytes
);
419 static DEVICE_ATTR_RW(max_bytes
);
421 static ssize_t
stable_pages_required_show(struct device
*dev
,
422 struct device_attribute
*attr
,
426 "the stable_pages_required attribute has been removed. Use the stable_writes queue attribute instead.\n");
427 return sysfs_emit(buf
, "%d\n", 0);
429 static DEVICE_ATTR_RO(stable_pages_required
);
431 static ssize_t
strict_limit_store(struct device
*dev
,
432 struct device_attribute
*attr
, const char *buf
, size_t count
)
434 struct backing_dev_info
*bdi
= dev_get_drvdata(dev
);
435 unsigned int strict_limit
;
438 ret
= kstrtouint(buf
, 10, &strict_limit
);
442 ret
= bdi_set_strict_limit(bdi
, strict_limit
);
449 static ssize_t
strict_limit_show(struct device
*dev
,
450 struct device_attribute
*attr
, char *buf
)
452 struct backing_dev_info
*bdi
= dev_get_drvdata(dev
);
454 return sysfs_emit(buf
, "%d\n",
455 !!(bdi
->capabilities
& BDI_CAP_STRICTLIMIT
));
457 static DEVICE_ATTR_RW(strict_limit
);
459 static struct attribute
*bdi_dev_attrs
[] = {
460 &dev_attr_read_ahead_kb
.attr
,
461 &dev_attr_min_ratio
.attr
,
462 &dev_attr_min_ratio_fine
.attr
,
463 &dev_attr_max_ratio
.attr
,
464 &dev_attr_max_ratio_fine
.attr
,
465 &dev_attr_min_bytes
.attr
,
466 &dev_attr_max_bytes
.attr
,
467 &dev_attr_stable_pages_required
.attr
,
468 &dev_attr_strict_limit
.attr
,
471 ATTRIBUTE_GROUPS(bdi_dev
);
473 static const struct class bdi_class
= {
475 .dev_groups
= bdi_dev_groups
,
478 static __init
int bdi_class_init(void)
482 ret
= class_register(&bdi_class
);
490 postcore_initcall(bdi_class_init
);
492 static int __init
default_bdi_init(void)
494 bdi_wq
= alloc_workqueue("writeback", WQ_MEM_RECLAIM
| WQ_UNBOUND
|
500 subsys_initcall(default_bdi_init
);
502 static void wb_update_bandwidth_workfn(struct work_struct
*work
)
504 struct bdi_writeback
*wb
= container_of(to_delayed_work(work
),
505 struct bdi_writeback
, bw_dwork
);
507 wb_update_bandwidth(wb
);
511 * Initial write bandwidth: 100 MB/s
513 #define INIT_BW (100 << (20 - PAGE_SHIFT))
515 static int wb_init(struct bdi_writeback
*wb
, struct backing_dev_info
*bdi
,
520 memset(wb
, 0, sizeof(*wb
));
523 wb
->last_old_flush
= jiffies
;
524 INIT_LIST_HEAD(&wb
->b_dirty
);
525 INIT_LIST_HEAD(&wb
->b_io
);
526 INIT_LIST_HEAD(&wb
->b_more_io
);
527 INIT_LIST_HEAD(&wb
->b_dirty_time
);
528 spin_lock_init(&wb
->list_lock
);
530 atomic_set(&wb
->writeback_inodes
, 0);
531 wb
->bw_time_stamp
= jiffies
;
532 wb
->balanced_dirty_ratelimit
= INIT_BW
;
533 wb
->dirty_ratelimit
= INIT_BW
;
534 wb
->write_bandwidth
= INIT_BW
;
535 wb
->avg_write_bandwidth
= INIT_BW
;
537 spin_lock_init(&wb
->work_lock
);
538 INIT_LIST_HEAD(&wb
->work_list
);
539 INIT_DELAYED_WORK(&wb
->dwork
, wb_workfn
);
540 INIT_DELAYED_WORK(&wb
->bw_dwork
, wb_update_bandwidth_workfn
);
542 err
= fprop_local_init_percpu(&wb
->completions
, gfp
);
546 err
= percpu_counter_init_many(wb
->stat
, 0, gfp
, NR_WB_STAT_ITEMS
);
548 fprop_local_destroy_percpu(&wb
->completions
);
553 static void cgwb_remove_from_bdi_list(struct bdi_writeback
*wb
);
556 * Remove bdi from the global list and shutdown any threads we have running
558 static void wb_shutdown(struct bdi_writeback
*wb
)
560 /* Make sure nobody queues further work */
561 spin_lock_irq(&wb
->work_lock
);
562 if (!test_and_clear_bit(WB_registered
, &wb
->state
)) {
563 spin_unlock_irq(&wb
->work_lock
);
566 spin_unlock_irq(&wb
->work_lock
);
568 cgwb_remove_from_bdi_list(wb
);
570 * Drain work list and shutdown the delayed_work. !WB_registered
571 * tells wb_workfn() that @wb is dying and its work_list needs to
572 * be drained no matter what.
574 mod_delayed_work(bdi_wq
, &wb
->dwork
, 0);
575 flush_delayed_work(&wb
->dwork
);
576 WARN_ON(!list_empty(&wb
->work_list
));
577 flush_delayed_work(&wb
->bw_dwork
);
580 static void wb_exit(struct bdi_writeback
*wb
)
582 WARN_ON(delayed_work_pending(&wb
->dwork
));
583 percpu_counter_destroy_many(wb
->stat
, NR_WB_STAT_ITEMS
);
584 fprop_local_destroy_percpu(&wb
->completions
);
587 #ifdef CONFIG_CGROUP_WRITEBACK
589 #include <linux/memcontrol.h>
592 * cgwb_lock protects bdi->cgwb_tree, blkcg->cgwb_list, offline_cgwbs and
593 * memcg->cgwb_list. bdi->cgwb_tree is also RCU protected.
595 static DEFINE_SPINLOCK(cgwb_lock
);
596 static struct workqueue_struct
*cgwb_release_wq
;
598 static LIST_HEAD(offline_cgwbs
);
599 static void cleanup_offline_cgwbs_workfn(struct work_struct
*work
);
600 static DECLARE_WORK(cleanup_offline_cgwbs_work
, cleanup_offline_cgwbs_workfn
);
602 static void cgwb_free_rcu(struct rcu_head
*rcu_head
)
604 struct bdi_writeback
*wb
= container_of(rcu_head
,
605 struct bdi_writeback
, rcu
);
607 percpu_ref_exit(&wb
->refcnt
);
611 static void cgwb_release_workfn(struct work_struct
*work
)
613 struct bdi_writeback
*wb
= container_of(work
, struct bdi_writeback
,
615 struct backing_dev_info
*bdi
= wb
->bdi
;
617 mutex_lock(&wb
->bdi
->cgwb_release_mutex
);
620 css_put(wb
->memcg_css
);
621 css_put(wb
->blkcg_css
);
622 mutex_unlock(&wb
->bdi
->cgwb_release_mutex
);
624 /* triggers blkg destruction if no online users left */
625 blkcg_unpin_online(wb
->blkcg_css
);
627 fprop_local_destroy_percpu(&wb
->memcg_completions
);
629 spin_lock_irq(&cgwb_lock
);
630 list_del(&wb
->offline_node
);
631 spin_unlock_irq(&cgwb_lock
);
635 WARN_ON_ONCE(!list_empty(&wb
->b_attached
));
636 call_rcu(&wb
->rcu
, cgwb_free_rcu
);
639 static void cgwb_release(struct percpu_ref
*refcnt
)
641 struct bdi_writeback
*wb
= container_of(refcnt
, struct bdi_writeback
,
643 queue_work(cgwb_release_wq
, &wb
->release_work
);
646 static void cgwb_kill(struct bdi_writeback
*wb
)
648 lockdep_assert_held(&cgwb_lock
);
650 WARN_ON(!radix_tree_delete(&wb
->bdi
->cgwb_tree
, wb
->memcg_css
->id
));
651 list_del(&wb
->memcg_node
);
652 list_del(&wb
->blkcg_node
);
653 list_add(&wb
->offline_node
, &offline_cgwbs
);
654 percpu_ref_kill(&wb
->refcnt
);
657 static void cgwb_remove_from_bdi_list(struct bdi_writeback
*wb
)
659 spin_lock_irq(&cgwb_lock
);
660 list_del_rcu(&wb
->bdi_node
);
661 spin_unlock_irq(&cgwb_lock
);
664 static int cgwb_create(struct backing_dev_info
*bdi
,
665 struct cgroup_subsys_state
*memcg_css
, gfp_t gfp
)
667 struct mem_cgroup
*memcg
;
668 struct cgroup_subsys_state
*blkcg_css
;
669 struct list_head
*memcg_cgwb_list
, *blkcg_cgwb_list
;
670 struct bdi_writeback
*wb
;
674 memcg
= mem_cgroup_from_css(memcg_css
);
675 blkcg_css
= cgroup_get_e_css(memcg_css
->cgroup
, &io_cgrp_subsys
);
676 memcg_cgwb_list
= &memcg
->cgwb_list
;
677 blkcg_cgwb_list
= blkcg_get_cgwb_list(blkcg_css
);
679 /* look up again under lock and discard on blkcg mismatch */
680 spin_lock_irqsave(&cgwb_lock
, flags
);
681 wb
= radix_tree_lookup(&bdi
->cgwb_tree
, memcg_css
->id
);
682 if (wb
&& wb
->blkcg_css
!= blkcg_css
) {
686 spin_unlock_irqrestore(&cgwb_lock
, flags
);
690 /* need to create a new one */
691 wb
= kmalloc(sizeof(*wb
), gfp
);
697 ret
= wb_init(wb
, bdi
, gfp
);
701 ret
= percpu_ref_init(&wb
->refcnt
, cgwb_release
, 0, gfp
);
705 ret
= fprop_local_init_percpu(&wb
->memcg_completions
, gfp
);
709 wb
->memcg_css
= memcg_css
;
710 wb
->blkcg_css
= blkcg_css
;
711 INIT_LIST_HEAD(&wb
->b_attached
);
712 INIT_WORK(&wb
->release_work
, cgwb_release_workfn
);
713 set_bit(WB_registered
, &wb
->state
);
717 * The root wb determines the registered state of the whole bdi and
718 * memcg_cgwb_list and blkcg_cgwb_list's next pointers indicate
719 * whether they're still online. Don't link @wb if any is dead.
720 * See wb_memcg_offline() and wb_blkcg_offline().
723 spin_lock_irqsave(&cgwb_lock
, flags
);
724 if (test_bit(WB_registered
, &bdi
->wb
.state
) &&
725 blkcg_cgwb_list
->next
&& memcg_cgwb_list
->next
) {
726 /* we might have raced another instance of this function */
727 ret
= radix_tree_insert(&bdi
->cgwb_tree
, memcg_css
->id
, wb
);
729 list_add_tail_rcu(&wb
->bdi_node
, &bdi
->wb_list
);
730 list_add(&wb
->memcg_node
, memcg_cgwb_list
);
731 list_add(&wb
->blkcg_node
, blkcg_cgwb_list
);
732 blkcg_pin_online(blkcg_css
);
737 spin_unlock_irqrestore(&cgwb_lock
, flags
);
747 fprop_local_destroy_percpu(&wb
->memcg_completions
);
749 percpu_ref_exit(&wb
->refcnt
);
760 * wb_get_lookup - get wb for a given memcg
762 * @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref)
764 * Try to get the wb for @memcg_css on @bdi. The returned wb has its
765 * refcount incremented.
767 * This function uses css_get() on @memcg_css and thus expects its refcnt
768 * to be positive on invocation. IOW, rcu_read_lock() protection on
769 * @memcg_css isn't enough. try_get it before calling this function.
771 * A wb is keyed by its associated memcg. As blkcg implicitly enables
772 * memcg on the default hierarchy, memcg association is guaranteed to be
773 * more specific (equal or descendant to the associated blkcg) and thus can
774 * identify both the memcg and blkcg associations.
776 * Because the blkcg associated with a memcg may change as blkcg is enabled
777 * and disabled closer to root in the hierarchy, each wb keeps track of
778 * both the memcg and blkcg associated with it and verifies the blkcg on
779 * each lookup. On mismatch, the existing wb is discarded and a new one is
782 struct bdi_writeback
*wb_get_lookup(struct backing_dev_info
*bdi
,
783 struct cgroup_subsys_state
*memcg_css
)
785 struct bdi_writeback
*wb
;
787 if (!memcg_css
->parent
)
791 wb
= radix_tree_lookup(&bdi
->cgwb_tree
, memcg_css
->id
);
793 struct cgroup_subsys_state
*blkcg_css
;
795 /* see whether the blkcg association has changed */
796 blkcg_css
= cgroup_get_e_css(memcg_css
->cgroup
, &io_cgrp_subsys
);
797 if (unlikely(wb
->blkcg_css
!= blkcg_css
|| !wb_tryget(wb
)))
807 * wb_get_create - get wb for a given memcg, create if necessary
809 * @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref)
810 * @gfp: allocation mask to use
812 * Try to get the wb for @memcg_css on @bdi. If it doesn't exist, try to
813 * create one. See wb_get_lookup() for more details.
815 struct bdi_writeback
*wb_get_create(struct backing_dev_info
*bdi
,
816 struct cgroup_subsys_state
*memcg_css
,
819 struct bdi_writeback
*wb
;
824 wb
= wb_get_lookup(bdi
, memcg_css
);
825 } while (!wb
&& !cgwb_create(bdi
, memcg_css
, gfp
));
830 static int cgwb_bdi_init(struct backing_dev_info
*bdi
)
834 INIT_RADIX_TREE(&bdi
->cgwb_tree
, GFP_ATOMIC
);
835 mutex_init(&bdi
->cgwb_release_mutex
);
836 init_rwsem(&bdi
->wb_switch_rwsem
);
838 ret
= wb_init(&bdi
->wb
, bdi
, GFP_KERNEL
);
840 bdi
->wb
.memcg_css
= &root_mem_cgroup
->css
;
841 bdi
->wb
.blkcg_css
= blkcg_root_css
;
846 static void cgwb_bdi_unregister(struct backing_dev_info
*bdi
)
848 struct radix_tree_iter iter
;
850 struct bdi_writeback
*wb
;
852 WARN_ON(test_bit(WB_registered
, &bdi
->wb
.state
));
854 spin_lock_irq(&cgwb_lock
);
855 radix_tree_for_each_slot(slot
, &bdi
->cgwb_tree
, &iter
, 0)
857 spin_unlock_irq(&cgwb_lock
);
859 mutex_lock(&bdi
->cgwb_release_mutex
);
860 spin_lock_irq(&cgwb_lock
);
861 while (!list_empty(&bdi
->wb_list
)) {
862 wb
= list_first_entry(&bdi
->wb_list
, struct bdi_writeback
,
864 spin_unlock_irq(&cgwb_lock
);
866 spin_lock_irq(&cgwb_lock
);
868 spin_unlock_irq(&cgwb_lock
);
869 mutex_unlock(&bdi
->cgwb_release_mutex
);
873 * cleanup_offline_cgwbs_workfn - try to release dying cgwbs
875 * Try to release dying cgwbs by switching attached inodes to the nearest
876 * living ancestor's writeback. Processed wbs are placed at the end
877 * of the list to guarantee the forward progress.
879 static void cleanup_offline_cgwbs_workfn(struct work_struct
*work
)
881 struct bdi_writeback
*wb
;
882 LIST_HEAD(processed
);
884 spin_lock_irq(&cgwb_lock
);
886 while (!list_empty(&offline_cgwbs
)) {
887 wb
= list_first_entry(&offline_cgwbs
, struct bdi_writeback
,
889 list_move(&wb
->offline_node
, &processed
);
892 * If wb is dirty, cleaning up the writeback by switching
893 * attached inodes will result in an effective removal of any
894 * bandwidth restrictions, which isn't the goal. Instead,
895 * it can be postponed until the next time, when all io
896 * will be likely completed. If in the meantime some inodes
897 * will get re-dirtied, they should be eventually switched to
900 if (wb_has_dirty_io(wb
))
906 spin_unlock_irq(&cgwb_lock
);
907 while (cleanup_offline_cgwb(wb
))
909 spin_lock_irq(&cgwb_lock
);
914 if (!list_empty(&processed
))
915 list_splice_tail(&processed
, &offline_cgwbs
);
917 spin_unlock_irq(&cgwb_lock
);
921 * wb_memcg_offline - kill all wb's associated with a memcg being offlined
922 * @memcg: memcg being offlined
924 * Also prevents creation of any new wb's associated with @memcg.
926 void wb_memcg_offline(struct mem_cgroup
*memcg
)
928 struct list_head
*memcg_cgwb_list
= &memcg
->cgwb_list
;
929 struct bdi_writeback
*wb
, *next
;
931 spin_lock_irq(&cgwb_lock
);
932 list_for_each_entry_safe(wb
, next
, memcg_cgwb_list
, memcg_node
)
934 memcg_cgwb_list
->next
= NULL
; /* prevent new wb's */
935 spin_unlock_irq(&cgwb_lock
);
937 queue_work(system_unbound_wq
, &cleanup_offline_cgwbs_work
);
941 * wb_blkcg_offline - kill all wb's associated with a blkcg being offlined
942 * @css: blkcg being offlined
944 * Also prevents creation of any new wb's associated with @blkcg.
946 void wb_blkcg_offline(struct cgroup_subsys_state
*css
)
948 struct bdi_writeback
*wb
, *next
;
949 struct list_head
*list
= blkcg_get_cgwb_list(css
);
951 spin_lock_irq(&cgwb_lock
);
952 list_for_each_entry_safe(wb
, next
, list
, blkcg_node
)
954 list
->next
= NULL
; /* prevent new wb's */
955 spin_unlock_irq(&cgwb_lock
);
958 static void cgwb_bdi_register(struct backing_dev_info
*bdi
)
960 spin_lock_irq(&cgwb_lock
);
961 list_add_tail_rcu(&bdi
->wb
.bdi_node
, &bdi
->wb_list
);
962 spin_unlock_irq(&cgwb_lock
);
965 static int __init
cgwb_init(void)
968 * There can be many concurrent release work items overwhelming
969 * system_wq. Put them in a separate wq and limit concurrency.
970 * There's no point in executing many of these in parallel.
972 cgwb_release_wq
= alloc_workqueue("cgwb_release", 0, 1);
973 if (!cgwb_release_wq
)
978 subsys_initcall(cgwb_init
);
980 #else /* CONFIG_CGROUP_WRITEBACK */
982 static int cgwb_bdi_init(struct backing_dev_info
*bdi
)
984 return wb_init(&bdi
->wb
, bdi
, GFP_KERNEL
);
987 static void cgwb_bdi_unregister(struct backing_dev_info
*bdi
) { }
989 static void cgwb_bdi_register(struct backing_dev_info
*bdi
)
991 list_add_tail_rcu(&bdi
->wb
.bdi_node
, &bdi
->wb_list
);
994 static void cgwb_remove_from_bdi_list(struct bdi_writeback
*wb
)
996 list_del_rcu(&wb
->bdi_node
);
999 #endif /* CONFIG_CGROUP_WRITEBACK */
1001 int bdi_init(struct backing_dev_info
*bdi
)
1005 kref_init(&bdi
->refcnt
);
1007 bdi
->max_ratio
= 100 * BDI_RATIO_SCALE
;
1008 bdi
->max_prop_frac
= FPROP_FRAC_BASE
;
1009 INIT_LIST_HEAD(&bdi
->bdi_list
);
1010 INIT_LIST_HEAD(&bdi
->wb_list
);
1011 init_waitqueue_head(&bdi
->wb_waitq
);
1012 bdi
->last_bdp_sleep
= jiffies
;
1014 return cgwb_bdi_init(bdi
);
1017 struct backing_dev_info
*bdi_alloc(int node_id
)
1019 struct backing_dev_info
*bdi
;
1021 bdi
= kzalloc_node(sizeof(*bdi
), GFP_KERNEL
, node_id
);
1025 if (bdi_init(bdi
)) {
1029 bdi
->capabilities
= BDI_CAP_WRITEBACK
| BDI_CAP_WRITEBACK_ACCT
;
1030 bdi
->ra_pages
= VM_READAHEAD_PAGES
;
1031 bdi
->io_pages
= VM_READAHEAD_PAGES
;
1032 timer_setup(&bdi
->laptop_mode_wb_timer
, laptop_mode_timer_fn
, 0);
1035 EXPORT_SYMBOL(bdi_alloc
);
1037 static struct rb_node
**bdi_lookup_rb_node(u64 id
, struct rb_node
**parentp
)
1039 struct rb_node
**p
= &bdi_tree
.rb_node
;
1040 struct rb_node
*parent
= NULL
;
1041 struct backing_dev_info
*bdi
;
1043 lockdep_assert_held(&bdi_lock
);
1047 bdi
= rb_entry(parent
, struct backing_dev_info
, rb_node
);
1051 else if (bdi
->id
< id
)
1052 p
= &(*p
)->rb_right
;
1063 * bdi_get_by_id - lookup and get bdi from its id
1064 * @id: bdi id to lookup
1066 * Find bdi matching @id and get it. Returns NULL if the matching bdi
1067 * doesn't exist or is already unregistered.
1069 struct backing_dev_info
*bdi_get_by_id(u64 id
)
1071 struct backing_dev_info
*bdi
= NULL
;
1074 spin_lock_bh(&bdi_lock
);
1075 p
= bdi_lookup_rb_node(id
, NULL
);
1077 bdi
= rb_entry(*p
, struct backing_dev_info
, rb_node
);
1080 spin_unlock_bh(&bdi_lock
);
1085 int bdi_register_va(struct backing_dev_info
*bdi
, const char *fmt
, va_list args
)
1088 struct rb_node
*parent
, **p
;
1090 if (bdi
->dev
) /* The driver needs to use separate queues per device */
1093 vsnprintf(bdi
->dev_name
, sizeof(bdi
->dev_name
), fmt
, args
);
1094 dev
= device_create(&bdi_class
, NULL
, MKDEV(0, 0), bdi
, bdi
->dev_name
);
1096 return PTR_ERR(dev
);
1098 cgwb_bdi_register(bdi
);
1101 bdi_debug_register(bdi
, dev_name(dev
));
1102 set_bit(WB_registered
, &bdi
->wb
.state
);
1104 spin_lock_bh(&bdi_lock
);
1106 bdi
->id
= ++bdi_id_cursor
;
1108 p
= bdi_lookup_rb_node(bdi
->id
, &parent
);
1109 rb_link_node(&bdi
->rb_node
, parent
, p
);
1110 rb_insert_color(&bdi
->rb_node
, &bdi_tree
);
1112 list_add_tail_rcu(&bdi
->bdi_list
, &bdi_list
);
1114 spin_unlock_bh(&bdi_lock
);
1116 trace_writeback_bdi_register(bdi
);
1120 int bdi_register(struct backing_dev_info
*bdi
, const char *fmt
, ...)
1125 va_start(args
, fmt
);
1126 ret
= bdi_register_va(bdi
, fmt
, args
);
1130 EXPORT_SYMBOL(bdi_register
);
1132 void bdi_set_owner(struct backing_dev_info
*bdi
, struct device
*owner
)
1134 WARN_ON_ONCE(bdi
->owner
);
1140 * Remove bdi from bdi_list, and ensure that it is no longer visible
1142 static void bdi_remove_from_list(struct backing_dev_info
*bdi
)
1144 spin_lock_bh(&bdi_lock
);
1145 rb_erase(&bdi
->rb_node
, &bdi_tree
);
1146 list_del_rcu(&bdi
->bdi_list
);
1147 spin_unlock_bh(&bdi_lock
);
1149 synchronize_rcu_expedited();
1152 void bdi_unregister(struct backing_dev_info
*bdi
)
1154 del_timer_sync(&bdi
->laptop_mode_wb_timer
);
1156 /* make sure nobody finds us on the bdi_list anymore */
1157 bdi_remove_from_list(bdi
);
1158 wb_shutdown(&bdi
->wb
);
1159 cgwb_bdi_unregister(bdi
);
1162 * If this BDI's min ratio has been set, use bdi_set_min_ratio() to
1163 * update the global bdi_min_ratio.
1166 bdi_set_min_ratio(bdi
, 0);
1169 bdi_debug_unregister(bdi
);
1170 device_unregister(bdi
->dev
);
1175 put_device(bdi
->owner
);
1179 EXPORT_SYMBOL(bdi_unregister
);
1181 static void release_bdi(struct kref
*ref
)
1183 struct backing_dev_info
*bdi
=
1184 container_of(ref
, struct backing_dev_info
, refcnt
);
1186 WARN_ON_ONCE(test_bit(WB_registered
, &bdi
->wb
.state
));
1187 WARN_ON_ONCE(bdi
->dev
);
1192 void bdi_put(struct backing_dev_info
*bdi
)
1194 kref_put(&bdi
->refcnt
, release_bdi
);
1196 EXPORT_SYMBOL(bdi_put
);
1198 struct backing_dev_info
*inode_to_bdi(struct inode
*inode
)
1200 struct super_block
*sb
;
1203 return &noop_backing_dev_info
;
1207 if (sb_is_blkdev_sb(sb
))
1208 return I_BDEV(inode
)->bd_disk
->bdi
;
1212 EXPORT_SYMBOL(inode_to_bdi
);
1214 const char *bdi_dev_name(struct backing_dev_info
*bdi
)
1216 if (!bdi
|| !bdi
->dev
)
1217 return bdi_unknown_name
;
1218 return bdi
->dev_name
;
1220 EXPORT_SYMBOL_GPL(bdi_dev_name
);