This does involve additional use of the spin lock in idr.c. Is this an
[linux-2.6/next.git] / mm / backing-dev.c
blobd73a5aa0805796303083e5175123021ef11d7a76
2 #include <linux/wait.h>
3 #include <linux/backing-dev.h>
4 #include <linux/kthread.h>
5 #include <linux/freezer.h>
6 #include <linux/fs.h>
7 #include <linux/pagemap.h>
8 #include <linux/mm.h>
9 #include <linux/sched.h>
10 #include <linux/module.h>
11 #include <linux/writeback.h>
12 #include <linux/device.h>
13 #include <trace/events/writeback.h>
15 static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
17 struct backing_dev_info default_backing_dev_info = {
18 .name = "default",
19 .ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE,
20 .state = 0,
21 .capabilities = BDI_CAP_MAP_COPY,
23 EXPORT_SYMBOL_GPL(default_backing_dev_info);
25 struct backing_dev_info noop_backing_dev_info = {
26 .name = "noop",
27 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
29 EXPORT_SYMBOL_GPL(noop_backing_dev_info);
31 static struct class *bdi_class;
34 * bdi_lock protects updates to bdi_list and bdi_pending_list, as well as
35 * reader side protection for bdi_pending_list. bdi_list has RCU reader side
36 * locking.
38 DEFINE_SPINLOCK(bdi_lock);
39 LIST_HEAD(bdi_list);
40 LIST_HEAD(bdi_pending_list);
42 static struct task_struct *sync_supers_tsk;
43 static struct timer_list sync_supers_timer;
45 static int bdi_sync_supers(void *);
46 static void sync_supers_timer_fn(unsigned long);
48 void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2)
50 if (wb1 < wb2) {
51 spin_lock(&wb1->list_lock);
52 spin_lock_nested(&wb2->list_lock, 1);
53 } else {
54 spin_lock(&wb2->list_lock);
55 spin_lock_nested(&wb1->list_lock, 1);
59 #ifdef CONFIG_DEBUG_FS
60 #include <linux/debugfs.h>
61 #include <linux/seq_file.h>
63 static struct dentry *bdi_debug_root;
65 static void bdi_debug_init(void)
67 bdi_debug_root = debugfs_create_dir("bdi", NULL);
70 static int bdi_debug_stats_show(struct seq_file *m, void *v)
72 struct backing_dev_info *bdi = m->private;
73 struct bdi_writeback *wb = &bdi->wb;
74 unsigned long background_thresh;
75 unsigned long dirty_thresh;
76 unsigned long bdi_thresh;
77 unsigned long nr_dirty, nr_io, nr_more_io;
78 struct inode *inode;
80 nr_dirty = nr_io = nr_more_io = 0;
81 spin_lock(&wb->list_lock);
82 list_for_each_entry(inode, &wb->b_dirty, i_wb_list)
83 nr_dirty++;
84 list_for_each_entry(inode, &wb->b_io, i_wb_list)
85 nr_io++;
86 list_for_each_entry(inode, &wb->b_more_io, i_wb_list)
87 nr_more_io++;
88 spin_unlock(&wb->list_lock);
90 global_dirty_limits(&background_thresh, &dirty_thresh);
91 bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
93 #define K(x) ((x) << (PAGE_SHIFT - 10))
94 seq_printf(m,
95 "BdiWriteback: %10lu kB\n"
96 "BdiReclaimable: %10lu kB\n"
97 "BdiDirtyThresh: %10lu kB\n"
98 "DirtyThresh: %10lu kB\n"
99 "BackgroundThresh: %10lu kB\n"
100 "BdiWritten: %10lu kB\n"
101 "BdiWriteBandwidth: %10lu kBps\n"
102 "b_dirty: %10lu\n"
103 "b_io: %10lu\n"
104 "b_more_io: %10lu\n"
105 "bdi_list: %10u\n"
106 "state: %10lx\n",
107 (unsigned long) K(bdi_stat(bdi, BDI_WRITEBACK)),
108 (unsigned long) K(bdi_stat(bdi, BDI_RECLAIMABLE)),
109 K(bdi_thresh),
110 K(dirty_thresh),
111 K(background_thresh),
112 (unsigned long) K(bdi_stat(bdi, BDI_WRITTEN)),
113 (unsigned long) K(bdi->write_bandwidth),
114 nr_dirty,
115 nr_io,
116 nr_more_io,
117 !list_empty(&bdi->bdi_list), bdi->state);
118 #undef K
120 return 0;
123 static int bdi_debug_stats_open(struct inode *inode, struct file *file)
125 return single_open(file, bdi_debug_stats_show, inode->i_private);
128 static const struct file_operations bdi_debug_stats_fops = {
129 .open = bdi_debug_stats_open,
130 .read = seq_read,
131 .llseek = seq_lseek,
132 .release = single_release,
135 static void bdi_debug_register(struct backing_dev_info *bdi, const char *name)
137 bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root);
138 bdi->debug_stats = debugfs_create_file("stats", 0444, bdi->debug_dir,
139 bdi, &bdi_debug_stats_fops);
142 static void bdi_debug_unregister(struct backing_dev_info *bdi)
144 debugfs_remove(bdi->debug_stats);
145 debugfs_remove(bdi->debug_dir);
147 #else
148 static inline void bdi_debug_init(void)
151 static inline void bdi_debug_register(struct backing_dev_info *bdi,
152 const char *name)
155 static inline void bdi_debug_unregister(struct backing_dev_info *bdi)
158 #endif
160 static ssize_t read_ahead_kb_store(struct device *dev,
161 struct device_attribute *attr,
162 const char *buf, size_t count)
164 struct backing_dev_info *bdi = dev_get_drvdata(dev);
165 char *end;
166 unsigned long read_ahead_kb;
167 ssize_t ret = -EINVAL;
169 read_ahead_kb = simple_strtoul(buf, &end, 10);
170 if (*buf && (end[0] == '\0' || (end[0] == '\n' && end[1] == '\0'))) {
171 bdi->ra_pages = read_ahead_kb >> (PAGE_SHIFT - 10);
172 ret = count;
174 return ret;
177 #define K(pages) ((pages) << (PAGE_SHIFT - 10))
179 #define BDI_SHOW(name, expr) \
180 static ssize_t name##_show(struct device *dev, \
181 struct device_attribute *attr, char *page) \
183 struct backing_dev_info *bdi = dev_get_drvdata(dev); \
185 return snprintf(page, PAGE_SIZE-1, "%lld\n", (long long)expr); \
188 BDI_SHOW(read_ahead_kb, K(bdi->ra_pages))
190 static ssize_t min_ratio_store(struct device *dev,
191 struct device_attribute *attr, const char *buf, size_t count)
193 struct backing_dev_info *bdi = dev_get_drvdata(dev);
194 char *end;
195 unsigned int ratio;
196 ssize_t ret = -EINVAL;
198 ratio = simple_strtoul(buf, &end, 10);
199 if (*buf && (end[0] == '\0' || (end[0] == '\n' && end[1] == '\0'))) {
200 ret = bdi_set_min_ratio(bdi, ratio);
201 if (!ret)
202 ret = count;
204 return ret;
206 BDI_SHOW(min_ratio, bdi->min_ratio)
208 static ssize_t max_ratio_store(struct device *dev,
209 struct device_attribute *attr, const char *buf, size_t count)
211 struct backing_dev_info *bdi = dev_get_drvdata(dev);
212 char *end;
213 unsigned int ratio;
214 ssize_t ret = -EINVAL;
216 ratio = simple_strtoul(buf, &end, 10);
217 if (*buf && (end[0] == '\0' || (end[0] == '\n' && end[1] == '\0'))) {
218 ret = bdi_set_max_ratio(bdi, ratio);
219 if (!ret)
220 ret = count;
222 return ret;
224 BDI_SHOW(max_ratio, bdi->max_ratio)
226 #define __ATTR_RW(attr) __ATTR(attr, 0644, attr##_show, attr##_store)
228 static struct device_attribute bdi_dev_attrs[] = {
229 __ATTR_RW(read_ahead_kb),
230 __ATTR_RW(min_ratio),
231 __ATTR_RW(max_ratio),
232 __ATTR_NULL,
235 static __init int bdi_class_init(void)
237 bdi_class = class_create(THIS_MODULE, "bdi");
238 if (IS_ERR(bdi_class))
239 return PTR_ERR(bdi_class);
241 bdi_class->dev_attrs = bdi_dev_attrs;
242 bdi_debug_init();
243 return 0;
245 postcore_initcall(bdi_class_init);
247 static int __init default_bdi_init(void)
249 int err;
251 sync_supers_tsk = kthread_run(bdi_sync_supers, NULL, "sync_supers");
252 BUG_ON(IS_ERR(sync_supers_tsk));
254 setup_timer(&sync_supers_timer, sync_supers_timer_fn, 0);
255 bdi_arm_supers_timer();
257 err = bdi_init(&default_backing_dev_info);
258 if (!err)
259 bdi_register(&default_backing_dev_info, NULL, "default");
260 err = bdi_init(&noop_backing_dev_info);
262 return err;
264 subsys_initcall(default_bdi_init);
266 int bdi_has_dirty_io(struct backing_dev_info *bdi)
268 return wb_has_dirty_io(&bdi->wb);
272 * kupdated() used to do this. We cannot do it from the bdi_forker_thread()
273 * or we risk deadlocking on ->s_umount. The longer term solution would be
274 * to implement sync_supers_bdi() or similar and simply do it from the
275 * bdi writeback thread individually.
277 static int bdi_sync_supers(void *unused)
279 set_user_nice(current, 0);
281 while (!kthread_should_stop()) {
282 set_current_state(TASK_INTERRUPTIBLE);
283 schedule();
286 * Do this periodically, like kupdated() did before.
288 sync_supers();
291 return 0;
294 void bdi_arm_supers_timer(void)
296 unsigned long next;
298 if (!dirty_writeback_interval)
299 return;
301 next = msecs_to_jiffies(dirty_writeback_interval * 10) + jiffies;
302 mod_timer(&sync_supers_timer, round_jiffies_up(next));
305 static void sync_supers_timer_fn(unsigned long unused)
307 wake_up_process(sync_supers_tsk);
308 bdi_arm_supers_timer();
311 static void wakeup_timer_fn(unsigned long data)
313 struct backing_dev_info *bdi = (struct backing_dev_info *)data;
315 spin_lock_bh(&bdi->wb_lock);
316 if (bdi->wb.task) {
317 trace_writeback_wake_thread(bdi);
318 wake_up_process(bdi->wb.task);
319 } else {
321 * When bdi tasks are inactive for long time, they are killed.
322 * In this case we have to wake-up the forker thread which
323 * should create and run the bdi thread.
325 trace_writeback_wake_forker_thread(bdi);
326 wake_up_process(default_backing_dev_info.wb.task);
328 spin_unlock_bh(&bdi->wb_lock);
332 * This function is used when the first inode for this bdi is marked dirty. It
333 * wakes-up the corresponding bdi thread which should then take care of the
334 * periodic background write-out of dirty inodes. Since the write-out would
335 * starts only 'dirty_writeback_interval' centisecs from now anyway, we just
336 * set up a timer which wakes the bdi thread up later.
338 * Note, we wouldn't bother setting up the timer, but this function is on the
339 * fast-path (used by '__mark_inode_dirty()'), so we save few context switches
340 * by delaying the wake-up.
342 void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi)
344 unsigned long timeout;
346 timeout = msecs_to_jiffies(dirty_writeback_interval * 10);
347 mod_timer(&bdi->wb.wakeup_timer, jiffies + timeout);
351 * Calculate the longest interval (jiffies) bdi threads are allowed to be
352 * inactive.
354 static unsigned long bdi_longest_inactive(void)
356 unsigned long interval;
358 interval = msecs_to_jiffies(dirty_writeback_interval * 10);
359 return max(5UL * 60 * HZ, interval);
362 static int bdi_forker_thread(void *ptr)
364 struct bdi_writeback *me = ptr;
366 current->flags |= PF_SWAPWRITE;
367 set_freezable();
370 * Our parent may run at a different priority, just set us to normal
372 set_user_nice(current, 0);
374 for (;;) {
375 struct task_struct *task = NULL;
376 struct backing_dev_info *bdi;
377 enum {
378 NO_ACTION, /* Nothing to do */
379 FORK_THREAD, /* Fork bdi thread */
380 KILL_THREAD, /* Kill inactive bdi thread */
381 } action = NO_ACTION;
384 * Temporary measure, we want to make sure we don't see
385 * dirty data on the default backing_dev_info
387 if (wb_has_dirty_io(me) || !list_empty(&me->bdi->work_list)) {
388 del_timer(&me->wakeup_timer);
389 wb_do_writeback(me, 0);
392 spin_lock_bh(&bdi_lock);
393 set_current_state(TASK_INTERRUPTIBLE);
395 list_for_each_entry(bdi, &bdi_list, bdi_list) {
396 bool have_dirty_io;
398 if (!bdi_cap_writeback_dirty(bdi) ||
399 bdi_cap_flush_forker(bdi))
400 continue;
402 WARN(!test_bit(BDI_registered, &bdi->state),
403 "bdi %p/%s is not registered!\n", bdi, bdi->name);
405 have_dirty_io = !list_empty(&bdi->work_list) ||
406 wb_has_dirty_io(&bdi->wb);
409 * If the bdi has work to do, but the thread does not
410 * exist - create it.
412 if (!bdi->wb.task && have_dirty_io) {
414 * Set the pending bit - if someone will try to
415 * unregister this bdi - it'll wait on this bit.
417 set_bit(BDI_pending, &bdi->state);
418 action = FORK_THREAD;
419 break;
422 spin_lock(&bdi->wb_lock);
425 * If there is no work to do and the bdi thread was
426 * inactive long enough - kill it. The wb_lock is taken
427 * to make sure no-one adds more work to this bdi and
428 * wakes the bdi thread up.
430 if (bdi->wb.task && !have_dirty_io &&
431 time_after(jiffies, bdi->wb.last_active +
432 bdi_longest_inactive())) {
433 task = bdi->wb.task;
434 bdi->wb.task = NULL;
435 spin_unlock(&bdi->wb_lock);
436 set_bit(BDI_pending, &bdi->state);
437 action = KILL_THREAD;
438 break;
440 spin_unlock(&bdi->wb_lock);
442 spin_unlock_bh(&bdi_lock);
444 /* Keep working if default bdi still has things to do */
445 if (!list_empty(&me->bdi->work_list))
446 __set_current_state(TASK_RUNNING);
448 switch (action) {
449 case FORK_THREAD:
450 __set_current_state(TASK_RUNNING);
451 task = kthread_create(bdi_writeback_thread, &bdi->wb,
452 "flush-%s", dev_name(bdi->dev));
453 if (IS_ERR(task)) {
455 * If thread creation fails, force writeout of
456 * the bdi from the thread. Hopefully 1024 is
457 * large enough for efficient IO.
459 writeback_inodes_wb(&bdi->wb, 1024);
460 } else {
462 * The spinlock makes sure we do not lose
463 * wake-ups when racing with 'bdi_queue_work()'.
464 * And as soon as the bdi thread is visible, we
465 * can start it.
467 spin_lock_bh(&bdi->wb_lock);
468 bdi->wb.task = task;
469 spin_unlock_bh(&bdi->wb_lock);
470 wake_up_process(task);
472 break;
474 case KILL_THREAD:
475 __set_current_state(TASK_RUNNING);
476 kthread_stop(task);
477 break;
479 case NO_ACTION:
480 if (!wb_has_dirty_io(me) || !dirty_writeback_interval)
482 * There are no dirty data. The only thing we
483 * should now care about is checking for
484 * inactive bdi threads and killing them. Thus,
485 * let's sleep for longer time, save energy and
486 * be friendly for battery-driven devices.
488 schedule_timeout(bdi_longest_inactive());
489 else
490 schedule_timeout(msecs_to_jiffies(dirty_writeback_interval * 10));
491 try_to_freeze();
492 /* Back to the main loop */
493 continue;
497 * Clear pending bit and wakeup anybody waiting to tear us down.
499 clear_bit(BDI_pending, &bdi->state);
500 smp_mb__after_clear_bit();
501 wake_up_bit(&bdi->state, BDI_pending);
504 return 0;
508 * Remove bdi from bdi_list, and ensure that it is no longer visible
510 static void bdi_remove_from_list(struct backing_dev_info *bdi)
512 spin_lock_bh(&bdi_lock);
513 list_del_rcu(&bdi->bdi_list);
514 spin_unlock_bh(&bdi_lock);
516 synchronize_rcu_expedited();
519 int bdi_register(struct backing_dev_info *bdi, struct device *parent,
520 const char *fmt, ...)
522 va_list args;
523 struct device *dev;
525 if (bdi->dev) /* The driver needs to use separate queues per device */
526 return 0;
528 va_start(args, fmt);
529 dev = device_create_vargs(bdi_class, parent, MKDEV(0, 0), bdi, fmt, args);
530 va_end(args);
531 if (IS_ERR(dev))
532 return PTR_ERR(dev);
534 bdi->dev = dev;
537 * Just start the forker thread for our default backing_dev_info,
538 * and add other bdi's to the list. They will get a thread created
539 * on-demand when they need it.
541 if (bdi_cap_flush_forker(bdi)) {
542 struct bdi_writeback *wb = &bdi->wb;
544 wb->task = kthread_run(bdi_forker_thread, wb, "bdi-%s",
545 dev_name(dev));
546 if (IS_ERR(wb->task))
547 return PTR_ERR(wb->task);
550 bdi_debug_register(bdi, dev_name(dev));
551 set_bit(BDI_registered, &bdi->state);
553 spin_lock_bh(&bdi_lock);
554 list_add_tail_rcu(&bdi->bdi_list, &bdi_list);
555 spin_unlock_bh(&bdi_lock);
557 trace_writeback_bdi_register(bdi);
558 return 0;
560 EXPORT_SYMBOL(bdi_register);
562 int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev)
564 return bdi_register(bdi, NULL, "%u:%u", MAJOR(dev), MINOR(dev));
566 EXPORT_SYMBOL(bdi_register_dev);
569 * Remove bdi from the global list and shutdown any threads we have running
571 static void bdi_wb_shutdown(struct backing_dev_info *bdi)
573 if (!bdi_cap_writeback_dirty(bdi))
574 return;
577 * Make sure nobody finds us on the bdi_list anymore
579 bdi_remove_from_list(bdi);
582 * If setup is pending, wait for that to complete first
584 wait_on_bit(&bdi->state, BDI_pending, bdi_sched_wait,
585 TASK_UNINTERRUPTIBLE);
588 * Finally, kill the kernel thread. We don't need to be RCU
589 * safe anymore, since the bdi is gone from visibility.
591 if (bdi->wb.task)
592 kthread_stop(bdi->wb.task);
596 * This bdi is going away now, make sure that no super_blocks point to it
598 static void bdi_prune_sb(struct backing_dev_info *bdi)
600 struct super_block *sb;
602 spin_lock(&sb_lock);
603 list_for_each_entry(sb, &super_blocks, s_list) {
604 if (sb->s_bdi == bdi)
605 sb->s_bdi = &default_backing_dev_info;
607 spin_unlock(&sb_lock);
610 void bdi_unregister(struct backing_dev_info *bdi)
612 if (bdi->dev) {
613 bdi_set_min_ratio(bdi, 0);
614 trace_writeback_bdi_unregister(bdi);
615 bdi_prune_sb(bdi);
616 del_timer_sync(&bdi->wb.wakeup_timer);
618 if (!bdi_cap_flush_forker(bdi))
619 bdi_wb_shutdown(bdi);
620 bdi_debug_unregister(bdi);
621 device_unregister(bdi->dev);
622 bdi->dev = NULL;
625 EXPORT_SYMBOL(bdi_unregister);
627 static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi)
629 memset(wb, 0, sizeof(*wb));
631 wb->bdi = bdi;
632 wb->last_old_flush = jiffies;
633 INIT_LIST_HEAD(&wb->b_dirty);
634 INIT_LIST_HEAD(&wb->b_io);
635 INIT_LIST_HEAD(&wb->b_more_io);
636 spin_lock_init(&wb->list_lock);
637 setup_timer(&wb->wakeup_timer, wakeup_timer_fn, (unsigned long)bdi);
641 * Initial write bandwidth: 100 MB/s
643 #define INIT_BW (100 << (20 - PAGE_SHIFT))
645 int bdi_init(struct backing_dev_info *bdi)
647 int i, err;
649 bdi->dev = NULL;
651 bdi->min_ratio = 0;
652 bdi->max_ratio = 100;
653 bdi->max_prop_frac = PROP_FRAC_BASE;
654 spin_lock_init(&bdi->wb_lock);
655 INIT_LIST_HEAD(&bdi->bdi_list);
656 INIT_LIST_HEAD(&bdi->work_list);
658 bdi_wb_init(&bdi->wb, bdi);
660 for (i = 0; i < NR_BDI_STAT_ITEMS; i++) {
661 err = percpu_counter_init(&bdi->bdi_stat[i], 0);
662 if (err)
663 goto err;
666 bdi->dirty_exceeded = 0;
668 bdi->bw_time_stamp = jiffies;
669 bdi->written_stamp = 0;
671 bdi->write_bandwidth = INIT_BW;
672 bdi->avg_write_bandwidth = INIT_BW;
674 err = prop_local_init_percpu(&bdi->completions);
676 if (err) {
677 err:
678 while (i--)
679 percpu_counter_destroy(&bdi->bdi_stat[i]);
682 return err;
684 EXPORT_SYMBOL(bdi_init);
686 void bdi_destroy(struct backing_dev_info *bdi)
688 int i;
691 * Splice our entries to the default_backing_dev_info, if this
692 * bdi disappears
694 if (bdi_has_dirty_io(bdi)) {
695 struct bdi_writeback *dst = &default_backing_dev_info.wb;
697 bdi_lock_two(&bdi->wb, dst);
698 list_splice(&bdi->wb.b_dirty, &dst->b_dirty);
699 list_splice(&bdi->wb.b_io, &dst->b_io);
700 list_splice(&bdi->wb.b_more_io, &dst->b_more_io);
701 spin_unlock(&bdi->wb.list_lock);
702 spin_unlock(&dst->list_lock);
705 bdi_unregister(bdi);
707 for (i = 0; i < NR_BDI_STAT_ITEMS; i++)
708 percpu_counter_destroy(&bdi->bdi_stat[i]);
710 prop_local_destroy_percpu(&bdi->completions);
712 EXPORT_SYMBOL(bdi_destroy);
715 * For use from filesystems to quickly init and register a bdi associated
716 * with dirty writeback
718 int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
719 unsigned int cap)
721 char tmp[32];
722 int err;
724 bdi->name = name;
725 bdi->capabilities = cap;
726 err = bdi_init(bdi);
727 if (err)
728 return err;
730 sprintf(tmp, "%.28s%s", name, "-%d");
731 err = bdi_register(bdi, NULL, tmp, atomic_long_inc_return(&bdi_seq));
732 if (err) {
733 bdi_destroy(bdi);
734 return err;
737 return 0;
739 EXPORT_SYMBOL(bdi_setup_and_register);
741 static wait_queue_head_t congestion_wqh[2] = {
742 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
743 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
745 static atomic_t nr_bdi_congested[2];
747 void clear_bdi_congested(struct backing_dev_info *bdi, int sync)
749 enum bdi_state bit;
750 wait_queue_head_t *wqh = &congestion_wqh[sync];
752 bit = sync ? BDI_sync_congested : BDI_async_congested;
753 if (test_and_clear_bit(bit, &bdi->state))
754 atomic_dec(&nr_bdi_congested[sync]);
755 smp_mb__after_clear_bit();
756 if (waitqueue_active(wqh))
757 wake_up(wqh);
759 EXPORT_SYMBOL(clear_bdi_congested);
761 void set_bdi_congested(struct backing_dev_info *bdi, int sync)
763 enum bdi_state bit;
765 bit = sync ? BDI_sync_congested : BDI_async_congested;
766 if (!test_and_set_bit(bit, &bdi->state))
767 atomic_inc(&nr_bdi_congested[sync]);
769 EXPORT_SYMBOL(set_bdi_congested);
772 * congestion_wait - wait for a backing_dev to become uncongested
773 * @sync: SYNC or ASYNC IO
774 * @timeout: timeout in jiffies
776 * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit
777 * write congestion. If no backing_devs are congested then just wait for the
778 * next write to be completed.
780 long congestion_wait(int sync, long timeout)
782 long ret;
783 unsigned long start = jiffies;
784 DEFINE_WAIT(wait);
785 wait_queue_head_t *wqh = &congestion_wqh[sync];
787 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
788 ret = io_schedule_timeout(timeout);
789 finish_wait(wqh, &wait);
791 trace_writeback_congestion_wait(jiffies_to_usecs(timeout),
792 jiffies_to_usecs(jiffies - start));
794 return ret;
796 EXPORT_SYMBOL(congestion_wait);
799 * wait_iff_congested - Conditionally wait for a backing_dev to become uncongested or a zone to complete writes
800 * @zone: A zone to check if it is heavily congested
801 * @sync: SYNC or ASYNC IO
802 * @timeout: timeout in jiffies
804 * In the event of a congested backing_dev (any backing_dev) and the given
805 * @zone has experienced recent congestion, this waits for up to @timeout
806 * jiffies for either a BDI to exit congestion of the given @sync queue
807 * or a write to complete.
809 * In the absence of zone congestion, cond_resched() is called to yield
810 * the processor if necessary but otherwise does not sleep.
812 * The return value is 0 if the sleep is for the full timeout. Otherwise,
813 * it is the number of jiffies that were still remaining when the function
814 * returned. return_value == timeout implies the function did not sleep.
816 long wait_iff_congested(struct zone *zone, int sync, long timeout)
818 long ret;
819 unsigned long start = jiffies;
820 DEFINE_WAIT(wait);
821 wait_queue_head_t *wqh = &congestion_wqh[sync];
824 * If there is no congestion, or heavy congestion is not being
825 * encountered in the current zone, yield if necessary instead
826 * of sleeping on the congestion queue
828 if (atomic_read(&nr_bdi_congested[sync]) == 0 ||
829 !zone_is_reclaim_congested(zone)) {
830 cond_resched();
832 /* In case we scheduled, work out time remaining */
833 ret = timeout - (jiffies - start);
834 if (ret < 0)
835 ret = 0;
837 goto out;
840 /* Sleep until uncongested or a write happens */
841 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
842 ret = io_schedule_timeout(timeout);
843 finish_wait(wqh, &wait);
845 out:
846 trace_writeback_wait_iff_congested(jiffies_to_usecs(timeout),
847 jiffies_to_usecs(jiffies - start));
849 return ret;
851 EXPORT_SYMBOL(wait_iff_congested);