2 #include <linux/wait.h>
3 #include <linux/backing-dev.h>
4 #include <linux/kthread.h>
5 #include <linux/freezer.h>
7 #include <linux/pagemap.h>
9 #include <linux/sched.h>
10 #include <linux/module.h>
11 #include <linux/writeback.h>
12 #include <linux/device.h>
13 #include <trace/events/writeback.h>
15 struct backing_dev_info noop_backing_dev_info
= {
17 .capabilities
= BDI_CAP_NO_ACCT_AND_WRITEBACK
,
19 EXPORT_SYMBOL_GPL(noop_backing_dev_info
);
21 static struct class *bdi_class
;
24 * bdi_lock protects updates to bdi_list. bdi_list has RCU reader side
27 DEFINE_SPINLOCK(bdi_lock
);
30 /* bdi_wq serves all asynchronous writeback tasks */
31 struct workqueue_struct
*bdi_wq
;
33 #ifdef CONFIG_DEBUG_FS
34 #include <linux/debugfs.h>
35 #include <linux/seq_file.h>
37 static struct dentry
*bdi_debug_root
;
39 static void bdi_debug_init(void)
41 bdi_debug_root
= debugfs_create_dir("bdi", NULL
);
44 static int bdi_debug_stats_show(struct seq_file
*m
, void *v
)
46 struct backing_dev_info
*bdi
= m
->private;
47 struct bdi_writeback
*wb
= &bdi
->wb
;
48 unsigned long background_thresh
;
49 unsigned long dirty_thresh
;
50 unsigned long wb_thresh
;
51 unsigned long nr_dirty
, nr_io
, nr_more_io
, nr_dirty_time
;
54 nr_dirty
= nr_io
= nr_more_io
= nr_dirty_time
= 0;
55 spin_lock(&wb
->list_lock
);
56 list_for_each_entry(inode
, &wb
->b_dirty
, i_io_list
)
58 list_for_each_entry(inode
, &wb
->b_io
, i_io_list
)
60 list_for_each_entry(inode
, &wb
->b_more_io
, i_io_list
)
62 list_for_each_entry(inode
, &wb
->b_dirty_time
, i_io_list
)
63 if (inode
->i_state
& I_DIRTY_TIME
)
65 spin_unlock(&wb
->list_lock
);
67 global_dirty_limits(&background_thresh
, &dirty_thresh
);
68 wb_thresh
= wb_calc_thresh(wb
, dirty_thresh
);
70 #define K(x) ((x) << (PAGE_SHIFT - 10))
72 "BdiWriteback: %10lu kB\n"
73 "BdiReclaimable: %10lu kB\n"
74 "BdiDirtyThresh: %10lu kB\n"
75 "DirtyThresh: %10lu kB\n"
76 "BackgroundThresh: %10lu kB\n"
77 "BdiDirtied: %10lu kB\n"
78 "BdiWritten: %10lu kB\n"
79 "BdiWriteBandwidth: %10lu kBps\n"
83 "b_dirty_time: %10lu\n"
86 (unsigned long) K(wb_stat(wb
, WB_WRITEBACK
)),
87 (unsigned long) K(wb_stat(wb
, WB_RECLAIMABLE
)),
91 (unsigned long) K(wb_stat(wb
, WB_DIRTIED
)),
92 (unsigned long) K(wb_stat(wb
, WB_WRITTEN
)),
93 (unsigned long) K(wb
->write_bandwidth
),
98 !list_empty(&bdi
->bdi_list
), bdi
->wb
.state
);
104 static int bdi_debug_stats_open(struct inode
*inode
, struct file
*file
)
106 return single_open(file
, bdi_debug_stats_show
, inode
->i_private
);
109 static const struct file_operations bdi_debug_stats_fops
= {
110 .open
= bdi_debug_stats_open
,
113 .release
= single_release
,
116 static int bdi_debug_register(struct backing_dev_info
*bdi
, const char *name
)
121 bdi
->debug_dir
= debugfs_create_dir(name
, bdi_debug_root
);
125 bdi
->debug_stats
= debugfs_create_file("stats", 0444, bdi
->debug_dir
,
126 bdi
, &bdi_debug_stats_fops
);
127 if (!bdi
->debug_stats
) {
128 debugfs_remove(bdi
->debug_dir
);
129 bdi
->debug_dir
= NULL
;
136 static void bdi_debug_unregister(struct backing_dev_info
*bdi
)
138 debugfs_remove(bdi
->debug_stats
);
139 debugfs_remove(bdi
->debug_dir
);
142 static inline void bdi_debug_init(void)
145 static inline int bdi_debug_register(struct backing_dev_info
*bdi
,
150 static inline void bdi_debug_unregister(struct backing_dev_info
*bdi
)
155 static ssize_t
read_ahead_kb_store(struct device
*dev
,
156 struct device_attribute
*attr
,
157 const char *buf
, size_t count
)
159 struct backing_dev_info
*bdi
= dev_get_drvdata(dev
);
160 unsigned long read_ahead_kb
;
163 ret
= kstrtoul(buf
, 10, &read_ahead_kb
);
167 bdi
->ra_pages
= read_ahead_kb
>> (PAGE_SHIFT
- 10);
172 #define K(pages) ((pages) << (PAGE_SHIFT - 10))
174 #define BDI_SHOW(name, expr) \
175 static ssize_t name##_show(struct device *dev, \
176 struct device_attribute *attr, char *page) \
178 struct backing_dev_info *bdi = dev_get_drvdata(dev); \
180 return snprintf(page, PAGE_SIZE-1, "%lld\n", (long long)expr); \
182 static DEVICE_ATTR_RW(name);
184 BDI_SHOW(read_ahead_kb
, K(bdi
->ra_pages
))
186 static ssize_t
min_ratio_store(struct device
*dev
,
187 struct device_attribute
*attr
, const char *buf
, size_t count
)
189 struct backing_dev_info
*bdi
= dev_get_drvdata(dev
);
193 ret
= kstrtouint(buf
, 10, &ratio
);
197 ret
= bdi_set_min_ratio(bdi
, ratio
);
203 BDI_SHOW(min_ratio
, bdi
->min_ratio
)
205 static ssize_t
max_ratio_store(struct device
*dev
,
206 struct device_attribute
*attr
, const char *buf
, size_t count
)
208 struct backing_dev_info
*bdi
= dev_get_drvdata(dev
);
212 ret
= kstrtouint(buf
, 10, &ratio
);
216 ret
= bdi_set_max_ratio(bdi
, ratio
);
222 BDI_SHOW(max_ratio
, bdi
->max_ratio
)
224 static ssize_t
stable_pages_required_show(struct device
*dev
,
225 struct device_attribute
*attr
,
228 struct backing_dev_info
*bdi
= dev_get_drvdata(dev
);
230 return snprintf(page
, PAGE_SIZE
-1, "%d\n",
231 bdi_cap_stable_pages_required(bdi
) ? 1 : 0);
233 static DEVICE_ATTR_RO(stable_pages_required
);
235 static struct attribute
*bdi_dev_attrs
[] = {
236 &dev_attr_read_ahead_kb
.attr
,
237 &dev_attr_min_ratio
.attr
,
238 &dev_attr_max_ratio
.attr
,
239 &dev_attr_stable_pages_required
.attr
,
242 ATTRIBUTE_GROUPS(bdi_dev
);
244 static __init
int bdi_class_init(void)
246 bdi_class
= class_create(THIS_MODULE
, "bdi");
247 if (IS_ERR(bdi_class
))
248 return PTR_ERR(bdi_class
);
250 bdi_class
->dev_groups
= bdi_dev_groups
;
255 postcore_initcall(bdi_class_init
);
257 static int bdi_init(struct backing_dev_info
*bdi
);
259 static int __init
default_bdi_init(void)
263 bdi_wq
= alloc_workqueue("writeback", WQ_MEM_RECLAIM
| WQ_FREEZABLE
|
264 WQ_UNBOUND
| WQ_SYSFS
, 0);
268 err
= bdi_init(&noop_backing_dev_info
);
272 subsys_initcall(default_bdi_init
);
275 * This function is used when the first inode for this wb is marked dirty. It
276 * wakes-up the corresponding bdi thread which should then take care of the
277 * periodic background write-out of dirty inodes. Since the write-out would
278 * starts only 'dirty_writeback_interval' centisecs from now anyway, we just
279 * set up a timer which wakes the bdi thread up later.
281 * Note, we wouldn't bother setting up the timer, but this function is on the
282 * fast-path (used by '__mark_inode_dirty()'), so we save few context switches
283 * by delaying the wake-up.
285 * We have to be careful not to postpone flush work if it is scheduled for
286 * earlier. Thus we use queue_delayed_work().
288 void wb_wakeup_delayed(struct bdi_writeback
*wb
)
290 unsigned long timeout
;
292 timeout
= msecs_to_jiffies(dirty_writeback_interval
* 10);
293 spin_lock_bh(&wb
->work_lock
);
294 if (test_bit(WB_registered
, &wb
->state
))
295 queue_delayed_work(bdi_wq
, &wb
->dwork
, timeout
);
296 spin_unlock_bh(&wb
->work_lock
);
300 * Initial write bandwidth: 100 MB/s
302 #define INIT_BW (100 << (20 - PAGE_SHIFT))
304 static int wb_init(struct bdi_writeback
*wb
, struct backing_dev_info
*bdi
,
305 int blkcg_id
, gfp_t gfp
)
309 memset(wb
, 0, sizeof(*wb
));
314 wb
->last_old_flush
= jiffies
;
315 INIT_LIST_HEAD(&wb
->b_dirty
);
316 INIT_LIST_HEAD(&wb
->b_io
);
317 INIT_LIST_HEAD(&wb
->b_more_io
);
318 INIT_LIST_HEAD(&wb
->b_dirty_time
);
319 spin_lock_init(&wb
->list_lock
);
321 wb
->bw_time_stamp
= jiffies
;
322 wb
->balanced_dirty_ratelimit
= INIT_BW
;
323 wb
->dirty_ratelimit
= INIT_BW
;
324 wb
->write_bandwidth
= INIT_BW
;
325 wb
->avg_write_bandwidth
= INIT_BW
;
327 spin_lock_init(&wb
->work_lock
);
328 INIT_LIST_HEAD(&wb
->work_list
);
329 INIT_DELAYED_WORK(&wb
->dwork
, wb_workfn
);
330 wb
->dirty_sleep
= jiffies
;
332 wb
->congested
= wb_congested_get_create(bdi
, blkcg_id
, gfp
);
333 if (!wb
->congested
) {
338 err
= fprop_local_init_percpu(&wb
->completions
, gfp
);
342 for (i
= 0; i
< NR_WB_STAT_ITEMS
; i
++) {
343 err
= percpu_counter_init(&wb
->stat
[i
], 0, gfp
);
345 goto out_destroy_stat
;
352 percpu_counter_destroy(&wb
->stat
[i
]);
353 fprop_local_destroy_percpu(&wb
->completions
);
355 wb_congested_put(wb
->congested
);
362 static void cgwb_remove_from_bdi_list(struct bdi_writeback
*wb
);
365 * Remove bdi from the global list and shutdown any threads we have running
367 static void wb_shutdown(struct bdi_writeback
*wb
)
369 /* Make sure nobody queues further work */
370 spin_lock_bh(&wb
->work_lock
);
371 if (!test_and_clear_bit(WB_registered
, &wb
->state
)) {
372 spin_unlock_bh(&wb
->work_lock
);
374 * Wait for wb shutdown to finish if someone else is just
375 * running wb_shutdown(). Otherwise we could proceed to wb /
376 * bdi destruction before wb_shutdown() is finished.
378 wait_on_bit(&wb
->state
, WB_shutting_down
, TASK_UNINTERRUPTIBLE
);
381 set_bit(WB_shutting_down
, &wb
->state
);
382 spin_unlock_bh(&wb
->work_lock
);
384 cgwb_remove_from_bdi_list(wb
);
386 * Drain work list and shutdown the delayed_work. !WB_registered
387 * tells wb_workfn() that @wb is dying and its work_list needs to
388 * be drained no matter what.
390 mod_delayed_work(bdi_wq
, &wb
->dwork
, 0);
391 flush_delayed_work(&wb
->dwork
);
392 WARN_ON(!list_empty(&wb
->work_list
));
394 * Make sure bit gets cleared after shutdown is finished. Matches with
395 * the barrier provided by test_and_clear_bit() above.
398 clear_and_wake_up_bit(WB_shutting_down
, &wb
->state
);
401 static void wb_exit(struct bdi_writeback
*wb
)
405 WARN_ON(delayed_work_pending(&wb
->dwork
));
407 for (i
= 0; i
< NR_WB_STAT_ITEMS
; i
++)
408 percpu_counter_destroy(&wb
->stat
[i
]);
410 fprop_local_destroy_percpu(&wb
->completions
);
411 wb_congested_put(wb
->congested
);
412 if (wb
!= &wb
->bdi
->wb
)
416 #ifdef CONFIG_CGROUP_WRITEBACK
418 #include <linux/memcontrol.h>
421 * cgwb_lock protects bdi->cgwb_tree, bdi->cgwb_congested_tree,
422 * blkcg->cgwb_list, and memcg->cgwb_list. bdi->cgwb_tree is also RCU
425 static DEFINE_SPINLOCK(cgwb_lock
);
428 * wb_congested_get_create - get or create a wb_congested
429 * @bdi: associated bdi
430 * @blkcg_id: ID of the associated blkcg
431 * @gfp: allocation mask
433 * Look up the wb_congested for @blkcg_id on @bdi. If missing, create one.
434 * The returned wb_congested has its reference count incremented. Returns
437 struct bdi_writeback_congested
*
438 wb_congested_get_create(struct backing_dev_info
*bdi
, int blkcg_id
, gfp_t gfp
)
440 struct bdi_writeback_congested
*new_congested
= NULL
, *congested
;
441 struct rb_node
**node
, *parent
;
444 spin_lock_irqsave(&cgwb_lock
, flags
);
446 node
= &bdi
->cgwb_congested_tree
.rb_node
;
449 while (*node
!= NULL
) {
451 congested
= rb_entry(parent
, struct bdi_writeback_congested
,
453 if (congested
->blkcg_id
< blkcg_id
)
454 node
= &parent
->rb_left
;
455 else if (congested
->blkcg_id
> blkcg_id
)
456 node
= &parent
->rb_right
;
462 /* !found and storage for new one already allocated, insert */
463 congested
= new_congested
;
464 new_congested
= NULL
;
465 rb_link_node(&congested
->rb_node
, parent
, node
);
466 rb_insert_color(&congested
->rb_node
, &bdi
->cgwb_congested_tree
);
470 spin_unlock_irqrestore(&cgwb_lock
, flags
);
472 /* allocate storage for new one and retry */
473 new_congested
= kzalloc(sizeof(*new_congested
), gfp
);
477 atomic_set(&new_congested
->refcnt
, 0);
478 new_congested
->__bdi
= bdi
;
479 new_congested
->blkcg_id
= blkcg_id
;
483 atomic_inc(&congested
->refcnt
);
484 spin_unlock_irqrestore(&cgwb_lock
, flags
);
485 kfree(new_congested
);
490 * wb_congested_put - put a wb_congested
491 * @congested: wb_congested to put
493 * Put @congested and destroy it if the refcnt reaches zero.
495 void wb_congested_put(struct bdi_writeback_congested
*congested
)
499 local_irq_save(flags
);
500 if (!atomic_dec_and_lock(&congested
->refcnt
, &cgwb_lock
)) {
501 local_irq_restore(flags
);
505 /* bdi might already have been destroyed leaving @congested unlinked */
506 if (congested
->__bdi
) {
507 rb_erase(&congested
->rb_node
,
508 &congested
->__bdi
->cgwb_congested_tree
);
509 congested
->__bdi
= NULL
;
512 spin_unlock_irqrestore(&cgwb_lock
, flags
);
516 static void cgwb_release_workfn(struct work_struct
*work
)
518 struct bdi_writeback
*wb
= container_of(work
, struct bdi_writeback
,
523 css_put(wb
->memcg_css
);
524 css_put(wb
->blkcg_css
);
526 fprop_local_destroy_percpu(&wb
->memcg_completions
);
527 percpu_ref_exit(&wb
->refcnt
);
532 static void cgwb_release(struct percpu_ref
*refcnt
)
534 struct bdi_writeback
*wb
= container_of(refcnt
, struct bdi_writeback
,
536 schedule_work(&wb
->release_work
);
539 static void cgwb_kill(struct bdi_writeback
*wb
)
541 lockdep_assert_held(&cgwb_lock
);
543 WARN_ON(!radix_tree_delete(&wb
->bdi
->cgwb_tree
, wb
->memcg_css
->id
));
544 list_del(&wb
->memcg_node
);
545 list_del(&wb
->blkcg_node
);
546 percpu_ref_kill(&wb
->refcnt
);
549 static void cgwb_remove_from_bdi_list(struct bdi_writeback
*wb
)
551 spin_lock_irq(&cgwb_lock
);
552 list_del_rcu(&wb
->bdi_node
);
553 spin_unlock_irq(&cgwb_lock
);
556 static int cgwb_create(struct backing_dev_info
*bdi
,
557 struct cgroup_subsys_state
*memcg_css
, gfp_t gfp
)
559 struct mem_cgroup
*memcg
;
560 struct cgroup_subsys_state
*blkcg_css
;
562 struct list_head
*memcg_cgwb_list
, *blkcg_cgwb_list
;
563 struct bdi_writeback
*wb
;
567 memcg
= mem_cgroup_from_css(memcg_css
);
568 blkcg_css
= cgroup_get_e_css(memcg_css
->cgroup
, &io_cgrp_subsys
);
569 blkcg
= css_to_blkcg(blkcg_css
);
570 memcg_cgwb_list
= mem_cgroup_cgwb_list(memcg
);
571 blkcg_cgwb_list
= &blkcg
->cgwb_list
;
573 /* look up again under lock and discard on blkcg mismatch */
574 spin_lock_irqsave(&cgwb_lock
, flags
);
575 wb
= radix_tree_lookup(&bdi
->cgwb_tree
, memcg_css
->id
);
576 if (wb
&& wb
->blkcg_css
!= blkcg_css
) {
580 spin_unlock_irqrestore(&cgwb_lock
, flags
);
584 /* need to create a new one */
585 wb
= kmalloc(sizeof(*wb
), gfp
);
591 ret
= wb_init(wb
, bdi
, blkcg_css
->id
, gfp
);
595 ret
= percpu_ref_init(&wb
->refcnt
, cgwb_release
, 0, gfp
);
599 ret
= fprop_local_init_percpu(&wb
->memcg_completions
, gfp
);
603 wb
->memcg_css
= memcg_css
;
604 wb
->blkcg_css
= blkcg_css
;
605 INIT_WORK(&wb
->release_work
, cgwb_release_workfn
);
606 set_bit(WB_registered
, &wb
->state
);
609 * The root wb determines the registered state of the whole bdi and
610 * memcg_cgwb_list and blkcg_cgwb_list's next pointers indicate
611 * whether they're still online. Don't link @wb if any is dead.
612 * See wb_memcg_offline() and wb_blkcg_offline().
615 spin_lock_irqsave(&cgwb_lock
, flags
);
616 if (test_bit(WB_registered
, &bdi
->wb
.state
) &&
617 blkcg_cgwb_list
->next
&& memcg_cgwb_list
->next
) {
618 /* we might have raced another instance of this function */
619 ret
= radix_tree_insert(&bdi
->cgwb_tree
, memcg_css
->id
, wb
);
621 list_add_tail_rcu(&wb
->bdi_node
, &bdi
->wb_list
);
622 list_add(&wb
->memcg_node
, memcg_cgwb_list
);
623 list_add(&wb
->blkcg_node
, blkcg_cgwb_list
);
628 spin_unlock_irqrestore(&cgwb_lock
, flags
);
637 fprop_local_destroy_percpu(&wb
->memcg_completions
);
639 percpu_ref_exit(&wb
->refcnt
);
650 * wb_get_create - get wb for a given memcg, create if necessary
652 * @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref)
653 * @gfp: allocation mask to use
655 * Try to get the wb for @memcg_css on @bdi. If it doesn't exist, try to
656 * create one. The returned wb has its refcount incremented.
658 * This function uses css_get() on @memcg_css and thus expects its refcnt
659 * to be positive on invocation. IOW, rcu_read_lock() protection on
660 * @memcg_css isn't enough. try_get it before calling this function.
662 * A wb is keyed by its associated memcg. As blkcg implicitly enables
663 * memcg on the default hierarchy, memcg association is guaranteed to be
664 * more specific (equal or descendant to the associated blkcg) and thus can
665 * identify both the memcg and blkcg associations.
667 * Because the blkcg associated with a memcg may change as blkcg is enabled
668 * and disabled closer to root in the hierarchy, each wb keeps track of
669 * both the memcg and blkcg associated with it and verifies the blkcg on
670 * each lookup. On mismatch, the existing wb is discarded and a new one is
673 struct bdi_writeback
*wb_get_create(struct backing_dev_info
*bdi
,
674 struct cgroup_subsys_state
*memcg_css
,
677 struct bdi_writeback
*wb
;
679 might_sleep_if(gfpflags_allow_blocking(gfp
));
681 if (!memcg_css
->parent
)
686 wb
= radix_tree_lookup(&bdi
->cgwb_tree
, memcg_css
->id
);
688 struct cgroup_subsys_state
*blkcg_css
;
690 /* see whether the blkcg association has changed */
691 blkcg_css
= cgroup_get_e_css(memcg_css
->cgroup
,
693 if (unlikely(wb
->blkcg_css
!= blkcg_css
||
699 } while (!wb
&& !cgwb_create(bdi
, memcg_css
, gfp
));
704 static int cgwb_bdi_init(struct backing_dev_info
*bdi
)
708 INIT_RADIX_TREE(&bdi
->cgwb_tree
, GFP_ATOMIC
);
709 bdi
->cgwb_congested_tree
= RB_ROOT
;
711 ret
= wb_init(&bdi
->wb
, bdi
, 1, GFP_KERNEL
);
713 bdi
->wb
.memcg_css
= &root_mem_cgroup
->css
;
714 bdi
->wb
.blkcg_css
= blkcg_root_css
;
719 static void cgwb_bdi_unregister(struct backing_dev_info
*bdi
)
721 struct radix_tree_iter iter
;
723 struct bdi_writeback
*wb
;
725 WARN_ON(test_bit(WB_registered
, &bdi
->wb
.state
));
727 spin_lock_irq(&cgwb_lock
);
728 radix_tree_for_each_slot(slot
, &bdi
->cgwb_tree
, &iter
, 0)
731 while (!list_empty(&bdi
->wb_list
)) {
732 wb
= list_first_entry(&bdi
->wb_list
, struct bdi_writeback
,
734 spin_unlock_irq(&cgwb_lock
);
736 spin_lock_irq(&cgwb_lock
);
738 spin_unlock_irq(&cgwb_lock
);
742 * wb_memcg_offline - kill all wb's associated with a memcg being offlined
743 * @memcg: memcg being offlined
745 * Also prevents creation of any new wb's associated with @memcg.
747 void wb_memcg_offline(struct mem_cgroup
*memcg
)
749 LIST_HEAD(to_destroy
);
750 struct list_head
*memcg_cgwb_list
= mem_cgroup_cgwb_list(memcg
);
751 struct bdi_writeback
*wb
, *next
;
753 spin_lock_irq(&cgwb_lock
);
754 list_for_each_entry_safe(wb
, next
, memcg_cgwb_list
, memcg_node
)
756 memcg_cgwb_list
->next
= NULL
; /* prevent new wb's */
757 spin_unlock_irq(&cgwb_lock
);
761 * wb_blkcg_offline - kill all wb's associated with a blkcg being offlined
762 * @blkcg: blkcg being offlined
764 * Also prevents creation of any new wb's associated with @blkcg.
766 void wb_blkcg_offline(struct blkcg
*blkcg
)
768 LIST_HEAD(to_destroy
);
769 struct bdi_writeback
*wb
, *next
;
771 spin_lock_irq(&cgwb_lock
);
772 list_for_each_entry_safe(wb
, next
, &blkcg
->cgwb_list
, blkcg_node
)
774 blkcg
->cgwb_list
.next
= NULL
; /* prevent new wb's */
775 spin_unlock_irq(&cgwb_lock
);
778 static void cgwb_bdi_exit(struct backing_dev_info
*bdi
)
782 spin_lock_irq(&cgwb_lock
);
783 while ((rbn
= rb_first(&bdi
->cgwb_congested_tree
))) {
784 struct bdi_writeback_congested
*congested
=
785 rb_entry(rbn
, struct bdi_writeback_congested
, rb_node
);
787 rb_erase(rbn
, &bdi
->cgwb_congested_tree
);
788 congested
->__bdi
= NULL
; /* mark @congested unlinked */
790 spin_unlock_irq(&cgwb_lock
);
793 static void cgwb_bdi_register(struct backing_dev_info
*bdi
)
795 spin_lock_irq(&cgwb_lock
);
796 list_add_tail_rcu(&bdi
->wb
.bdi_node
, &bdi
->wb_list
);
797 spin_unlock_irq(&cgwb_lock
);
800 #else /* CONFIG_CGROUP_WRITEBACK */
802 static int cgwb_bdi_init(struct backing_dev_info
*bdi
)
806 bdi
->wb_congested
= kzalloc(sizeof(*bdi
->wb_congested
), GFP_KERNEL
);
807 if (!bdi
->wb_congested
)
810 atomic_set(&bdi
->wb_congested
->refcnt
, 1);
812 err
= wb_init(&bdi
->wb
, bdi
, 1, GFP_KERNEL
);
814 wb_congested_put(bdi
->wb_congested
);
820 static void cgwb_bdi_unregister(struct backing_dev_info
*bdi
) { }
822 static void cgwb_bdi_exit(struct backing_dev_info
*bdi
)
824 wb_congested_put(bdi
->wb_congested
);
827 static void cgwb_bdi_register(struct backing_dev_info
*bdi
)
829 list_add_tail_rcu(&bdi
->wb
.bdi_node
, &bdi
->wb_list
);
832 static void cgwb_remove_from_bdi_list(struct bdi_writeback
*wb
)
834 list_del_rcu(&wb
->bdi_node
);
837 #endif /* CONFIG_CGROUP_WRITEBACK */
839 static int bdi_init(struct backing_dev_info
*bdi
)
845 kref_init(&bdi
->refcnt
);
847 bdi
->max_ratio
= 100;
848 bdi
->max_prop_frac
= FPROP_FRAC_BASE
;
849 INIT_LIST_HEAD(&bdi
->bdi_list
);
850 INIT_LIST_HEAD(&bdi
->wb_list
);
851 init_waitqueue_head(&bdi
->wb_waitq
);
853 ret
= cgwb_bdi_init(bdi
);
858 struct backing_dev_info
*bdi_alloc_node(gfp_t gfp_mask
, int node_id
)
860 struct backing_dev_info
*bdi
;
862 bdi
= kmalloc_node(sizeof(struct backing_dev_info
),
863 gfp_mask
| __GFP_ZERO
, node_id
);
873 EXPORT_SYMBOL(bdi_alloc_node
);
875 int bdi_register_va(struct backing_dev_info
*bdi
, const char *fmt
, va_list args
)
879 if (bdi
->dev
) /* The driver needs to use separate queues per device */
882 dev
= device_create_vargs(bdi_class
, NULL
, MKDEV(0, 0), bdi
, fmt
, args
);
886 cgwb_bdi_register(bdi
);
889 bdi_debug_register(bdi
, dev_name(dev
));
890 set_bit(WB_registered
, &bdi
->wb
.state
);
892 spin_lock_bh(&bdi_lock
);
893 list_add_tail_rcu(&bdi
->bdi_list
, &bdi_list
);
894 spin_unlock_bh(&bdi_lock
);
896 trace_writeback_bdi_register(bdi
);
899 EXPORT_SYMBOL(bdi_register_va
);
901 int bdi_register(struct backing_dev_info
*bdi
, const char *fmt
, ...)
907 ret
= bdi_register_va(bdi
, fmt
, args
);
911 EXPORT_SYMBOL(bdi_register
);
913 int bdi_register_owner(struct backing_dev_info
*bdi
, struct device
*owner
)
917 rc
= bdi_register(bdi
, "%u:%u", MAJOR(owner
->devt
), MINOR(owner
->devt
));
920 /* Leaking owner reference... */
926 EXPORT_SYMBOL(bdi_register_owner
);
929 * Remove bdi from bdi_list, and ensure that it is no longer visible
931 static void bdi_remove_from_list(struct backing_dev_info
*bdi
)
933 spin_lock_bh(&bdi_lock
);
934 list_del_rcu(&bdi
->bdi_list
);
935 spin_unlock_bh(&bdi_lock
);
937 synchronize_rcu_expedited();
940 void bdi_unregister(struct backing_dev_info
*bdi
)
942 /* make sure nobody finds us on the bdi_list anymore */
943 bdi_remove_from_list(bdi
);
944 wb_shutdown(&bdi
->wb
);
945 cgwb_bdi_unregister(bdi
);
948 bdi_debug_unregister(bdi
);
949 device_unregister(bdi
->dev
);
954 put_device(bdi
->owner
);
959 static void release_bdi(struct kref
*ref
)
961 struct backing_dev_info
*bdi
=
962 container_of(ref
, struct backing_dev_info
, refcnt
);
964 if (test_bit(WB_registered
, &bdi
->wb
.state
))
966 WARN_ON_ONCE(bdi
->dev
);
972 void bdi_put(struct backing_dev_info
*bdi
)
974 kref_put(&bdi
->refcnt
, release_bdi
);
976 EXPORT_SYMBOL(bdi_put
);
978 static wait_queue_head_t congestion_wqh
[2] = {
979 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh
[0]),
980 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh
[1])
982 static atomic_t nr_wb_congested
[2];
984 void clear_wb_congested(struct bdi_writeback_congested
*congested
, int sync
)
986 wait_queue_head_t
*wqh
= &congestion_wqh
[sync
];
987 enum wb_congested_state bit
;
989 bit
= sync
? WB_sync_congested
: WB_async_congested
;
990 if (test_and_clear_bit(bit
, &congested
->state
))
991 atomic_dec(&nr_wb_congested
[sync
]);
992 smp_mb__after_atomic();
993 if (waitqueue_active(wqh
))
996 EXPORT_SYMBOL(clear_wb_congested
);
998 void set_wb_congested(struct bdi_writeback_congested
*congested
, int sync
)
1000 enum wb_congested_state bit
;
1002 bit
= sync
? WB_sync_congested
: WB_async_congested
;
1003 if (!test_and_set_bit(bit
, &congested
->state
))
1004 atomic_inc(&nr_wb_congested
[sync
]);
1006 EXPORT_SYMBOL(set_wb_congested
);
1009 * congestion_wait - wait for a backing_dev to become uncongested
1010 * @sync: SYNC or ASYNC IO
1011 * @timeout: timeout in jiffies
1013 * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit
1014 * write congestion. If no backing_devs are congested then just wait for the
1015 * next write to be completed.
1017 long congestion_wait(int sync
, long timeout
)
1020 unsigned long start
= jiffies
;
1022 wait_queue_head_t
*wqh
= &congestion_wqh
[sync
];
1024 prepare_to_wait(wqh
, &wait
, TASK_UNINTERRUPTIBLE
);
1025 ret
= io_schedule_timeout(timeout
);
1026 finish_wait(wqh
, &wait
);
1028 trace_writeback_congestion_wait(jiffies_to_usecs(timeout
),
1029 jiffies_to_usecs(jiffies
- start
));
1033 EXPORT_SYMBOL(congestion_wait
);
1036 * wait_iff_congested - Conditionally wait for a backing_dev to become uncongested or a pgdat to complete writes
1037 * @pgdat: A pgdat to check if it is heavily congested
1038 * @sync: SYNC or ASYNC IO
1039 * @timeout: timeout in jiffies
1041 * In the event of a congested backing_dev (any backing_dev) and the given
1042 * @pgdat has experienced recent congestion, this waits for up to @timeout
1043 * jiffies for either a BDI to exit congestion of the given @sync queue
1044 * or a write to complete.
1046 * In the absence of pgdat congestion, cond_resched() is called to yield
1047 * the processor if necessary but otherwise does not sleep.
1049 * The return value is 0 if the sleep is for the full timeout. Otherwise,
1050 * it is the number of jiffies that were still remaining when the function
1051 * returned. return_value == timeout implies the function did not sleep.
1053 long wait_iff_congested(struct pglist_data
*pgdat
, int sync
, long timeout
)
1056 unsigned long start
= jiffies
;
1058 wait_queue_head_t
*wqh
= &congestion_wqh
[sync
];
1061 * If there is no congestion, or heavy congestion is not being
1062 * encountered in the current pgdat, yield if necessary instead
1063 * of sleeping on the congestion queue
1065 if (atomic_read(&nr_wb_congested
[sync
]) == 0 ||
1066 !test_bit(PGDAT_CONGESTED
, &pgdat
->flags
)) {
1069 /* In case we scheduled, work out time remaining */
1070 ret
= timeout
- (jiffies
- start
);
1077 /* Sleep until uncongested or a write happens */
1078 prepare_to_wait(wqh
, &wait
, TASK_UNINTERRUPTIBLE
);
1079 ret
= io_schedule_timeout(timeout
);
1080 finish_wait(wqh
, &wait
);
1083 trace_writeback_wait_iff_congested(jiffies_to_usecs(timeout
),
1084 jiffies_to_usecs(jiffies
- start
));
1088 EXPORT_SYMBOL(wait_iff_congested
);