MOXA linux-2.6.x / linux-2.6.9-uc0 from sdlinux-moxaart.tgz
[linux-2.6.9-moxart.git] / mm / page-writeback.c
blob343998d46bb03344d27845a79b6c20096e1508ca
1 /*
2 * mm/page-writeback.c.
4 * Copyright (C) 2002, Linus Torvalds.
6 * Contains functions related to writing back dirty pages at the
7 * address_space level.
9 * 10Apr2002 akpm@zip.com.au
10 * Initial version
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/spinlock.h>
16 #include <linux/fs.h>
17 #include <linux/mm.h>
18 #include <linux/swap.h>
19 #include <linux/slab.h>
20 #include <linux/pagemap.h>
21 #include <linux/writeback.h>
22 #include <linux/init.h>
23 #include <linux/backing-dev.h>
24 #include <linux/blkdev.h>
25 #include <linux/mpage.h>
26 #include <linux/percpu.h>
27 #include <linux/notifier.h>
28 #include <linux/smp.h>
29 #include <linux/sysctl.h>
30 #include <linux/cpu.h>
31 #include <linux/syscalls.h>
34 * The maximum number of pages to writeout in a single bdflush/kupdate
35 * operation. We do this so we don't hold I_LOCK against an inode for
36 * enormous amounts of time, which would block a userspace task which has
37 * been forced to throttle against that inode. Also, the code reevaluates
38 * the dirty each time it has written this many pages.
40 #define MAX_WRITEBACK_PAGES 1024
43 * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited
44 * will look to see if it needs to force writeback or throttling.
46 static long ratelimit_pages = 32;
48 static long total_pages; /* The total number of pages in the machine. */
49 static int dirty_exceeded; /* Dirty mem may be over limit */
52 * When balance_dirty_pages decides that the caller needs to perform some
53 * non-background writeback, this is how many pages it will attempt to write.
54 * It should be somewhat larger than RATELIMIT_PAGES to ensure that reasonably
55 * large amounts of I/O are submitted.
57 static inline long sync_writeback_pages(void)
59 return ratelimit_pages + ratelimit_pages / 2;
62 /* The following parameters are exported via /proc/sys/vm */
65 * Start background writeback (via pdflush) at this percentage
67 int dirty_background_ratio = 10;
70 * The generator of dirty data starts writeback at this percentage
72 int vm_dirty_ratio = 40;
75 * The interval between `kupdate'-style writebacks, in centiseconds
76 * (hundredths of a second)
78 int dirty_writeback_centisecs = 5 * 100;
81 * The longest number of centiseconds for which data is allowed to remain dirty
83 int dirty_expire_centisecs = 30 * 100;
86 * Flag that makes the machine dump writes/reads and block dirtyings.
88 int block_dump;
91 * Flag that puts the machine in "laptop mode".
93 int laptop_mode;
95 EXPORT_SYMBOL(laptop_mode);
97 /* End of sysctl-exported parameters */
100 static void background_writeout(unsigned long _min_pages);
102 struct writeback_state
104 unsigned long nr_dirty;
105 unsigned long nr_unstable;
106 unsigned long nr_mapped;
107 unsigned long nr_writeback;
110 static void get_writeback_state(struct writeback_state *wbs)
112 wbs->nr_dirty = read_page_state(nr_dirty);
113 wbs->nr_unstable = read_page_state(nr_unstable);
114 wbs->nr_mapped = read_page_state(nr_mapped);
115 wbs->nr_writeback = read_page_state(nr_writeback);
119 * Work out the current dirty-memory clamping and background writeout
120 * thresholds.
122 * The main aim here is to lower them aggressively if there is a lot of mapped
123 * memory around. To avoid stressing page reclaim with lots of unreclaimable
124 * pages. It is better to clamp down on writers than to start swapping, and
125 * performing lots of scanning.
127 * We only allow 1/2 of the currently-unmapped memory to be dirtied.
129 * We don't permit the clamping level to fall below 5% - that is getting rather
130 * excessive.
132 * We make sure that the background writeout level is below the adjusted
133 * clamping level.
135 static void
136 get_dirty_limits(struct writeback_state *wbs, long *pbackground, long *pdirty)
138 int background_ratio; /* Percentages */
139 int dirty_ratio;
140 int unmapped_ratio;
141 long background;
142 long dirty;
143 struct task_struct *tsk;
145 get_writeback_state(wbs);
147 unmapped_ratio = 100 - (wbs->nr_mapped * 100) / total_pages;
149 dirty_ratio = vm_dirty_ratio;
150 if (dirty_ratio > unmapped_ratio / 2)
151 dirty_ratio = unmapped_ratio / 2;
153 if (dirty_ratio < 5)
154 dirty_ratio = 5;
156 background_ratio = dirty_background_ratio;
157 if (background_ratio >= dirty_ratio)
158 background_ratio = dirty_ratio / 2;
160 background = (background_ratio * total_pages) / 100;
161 dirty = (dirty_ratio * total_pages) / 100;
162 tsk = current;
163 if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
164 background += background / 4;
165 dirty += dirty / 4;
167 *pbackground = background;
168 *pdirty = dirty;
172 * balance_dirty_pages() must be called by processes which are generating dirty
173 * data. It looks at the number of dirty pages in the machine and will force
174 * the caller to perform writeback if the system is over `vm_dirty_ratio'.
175 * If we're over `background_thresh' then pdflush is woken to perform some
176 * writeout.
178 static void balance_dirty_pages(struct address_space *mapping)
180 struct writeback_state wbs;
181 long nr_reclaimable;
182 long background_thresh;
183 long dirty_thresh;
184 unsigned long pages_written = 0;
185 unsigned long write_chunk = sync_writeback_pages();
187 struct backing_dev_info *bdi = mapping->backing_dev_info;
189 for (;;) {
190 struct writeback_control wbc = {
191 .bdi = bdi,
192 .sync_mode = WB_SYNC_NONE,
193 .older_than_this = NULL,
194 .nr_to_write = write_chunk,
197 get_dirty_limits(&wbs, &background_thresh, &dirty_thresh);
198 nr_reclaimable = wbs.nr_dirty + wbs.nr_unstable;
199 if (nr_reclaimable + wbs.nr_writeback <= dirty_thresh)
200 break;
202 dirty_exceeded = 1;
204 /* Note: nr_reclaimable denotes nr_dirty + nr_unstable.
205 * Unstable writes are a feature of certain networked
206 * filesystems (i.e. NFS) in which data may have been
207 * written to the server's write cache, but has not yet
208 * been flushed to permanent storage.
210 if (nr_reclaimable) {
211 writeback_inodes(&wbc);
212 get_dirty_limits(&wbs, &background_thresh,
213 &dirty_thresh);
214 nr_reclaimable = wbs.nr_dirty + wbs.nr_unstable;
215 if (nr_reclaimable + wbs.nr_writeback <= dirty_thresh)
216 break;
217 pages_written += write_chunk - wbc.nr_to_write;
218 if (pages_written >= write_chunk)
219 break; /* We've done our duty */
221 blk_congestion_wait(WRITE, HZ/10);
224 if (nr_reclaimable + wbs.nr_writeback <= dirty_thresh)
225 dirty_exceeded = 0;
227 if (writeback_in_progress(bdi))
228 return; /* pdflush is already working this queue */
231 * In laptop mode, we wait until hitting the higher threshold before
232 * starting background writeout, and then write out all the way down
233 * to the lower threshold. So slow writers cause minimal disk activity.
235 * In normal mode, we start background writeout at the lower
236 * background_thresh, to keep the amount of dirty memory low.
238 if ((laptop_mode && pages_written) ||
239 (!laptop_mode && (nr_reclaimable > background_thresh)))
240 pdflush_operation(background_writeout, 0);
244 * balance_dirty_pages_ratelimited - balance dirty memory state
245 * @mapping - address_space which was dirtied
247 * Processes which are dirtying memory should call in here once for each page
248 * which was newly dirtied. The function will periodically check the system's
249 * dirty state and will initiate writeback if needed.
251 * On really big machines, get_writeback_state is expensive, so try to avoid
252 * calling it too often (ratelimiting). But once we're over the dirty memory
253 * limit we decrease the ratelimiting by a lot, to prevent individual processes
254 * from overshooting the limit by (ratelimit_pages) each.
256 void balance_dirty_pages_ratelimited(struct address_space *mapping)
258 static DEFINE_PER_CPU(int, ratelimits) = 0;
259 long ratelimit;
261 ratelimit = ratelimit_pages;
262 if (dirty_exceeded)
263 ratelimit = 8;
266 * Check the rate limiting. Also, we do not want to throttle real-time
267 * tasks in balance_dirty_pages(). Period.
269 if (get_cpu_var(ratelimits)++ >= ratelimit) {
270 __get_cpu_var(ratelimits) = 0;
271 put_cpu_var(ratelimits);
272 balance_dirty_pages(mapping);
273 return;
275 put_cpu_var(ratelimits);
277 EXPORT_SYMBOL(balance_dirty_pages_ratelimited);
280 * writeback at least _min_pages, and keep writing until the amount of dirty
281 * memory is less than the background threshold, or until we're all clean.
283 static void background_writeout(unsigned long _min_pages)
285 long min_pages = _min_pages;
286 struct writeback_control wbc = {
287 .bdi = NULL,
288 .sync_mode = WB_SYNC_NONE,
289 .older_than_this = NULL,
290 .nr_to_write = 0,
291 .nonblocking = 1,
294 for ( ; ; ) {
295 struct writeback_state wbs;
296 long background_thresh;
297 long dirty_thresh;
299 get_dirty_limits(&wbs, &background_thresh, &dirty_thresh);
300 if (wbs.nr_dirty + wbs.nr_unstable < background_thresh
301 && min_pages <= 0)
302 break;
303 wbc.encountered_congestion = 0;
304 wbc.nr_to_write = MAX_WRITEBACK_PAGES;
305 wbc.pages_skipped = 0;
306 writeback_inodes(&wbc);
307 min_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
308 if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) {
309 /* Wrote less than expected */
310 blk_congestion_wait(WRITE, HZ/10);
311 if (!wbc.encountered_congestion)
312 break;
318 * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back
319 * the whole world. Returns 0 if a pdflush thread was dispatched. Returns
320 * -1 if all pdflush threads were busy.
322 int wakeup_bdflush(long nr_pages)
324 if (nr_pages == 0) {
325 struct writeback_state wbs;
327 get_writeback_state(&wbs);
328 nr_pages = wbs.nr_dirty + wbs.nr_unstable;
330 return pdflush_operation(background_writeout, nr_pages);
333 static void wb_timer_fn(unsigned long unused);
334 static void laptop_timer_fn(unsigned long unused);
336 static struct timer_list wb_timer =
337 TIMER_INITIALIZER(wb_timer_fn, 0, 0);
338 static struct timer_list laptop_mode_wb_timer =
339 TIMER_INITIALIZER(laptop_timer_fn, 0, 0);
342 * Periodic writeback of "old" data.
344 * Define "old": the first time one of an inode's pages is dirtied, we mark the
345 * dirtying-time in the inode's address_space. So this periodic writeback code
346 * just walks the superblock inode list, writing back any inodes which are
347 * older than a specific point in time.
349 * Try to run once per dirty_writeback_centisecs. But if a writeback event
350 * takes longer than a dirty_writeback_centisecs interval, then leave a
351 * one-second gap.
353 * older_than_this takes precedence over nr_to_write. So we'll only write back
354 * all dirty pages if they are all attached to "old" mappings.
356 static void wb_kupdate(unsigned long arg)
358 unsigned long oldest_jif;
359 unsigned long start_jif;
360 unsigned long next_jif;
361 long nr_to_write;
362 struct writeback_state wbs;
363 struct writeback_control wbc = {
364 .bdi = NULL,
365 .sync_mode = WB_SYNC_NONE,
366 .older_than_this = &oldest_jif,
367 .nr_to_write = 0,
368 .nonblocking = 1,
369 .for_kupdate = 1,
372 sync_supers();
374 get_writeback_state(&wbs);
375 oldest_jif = jiffies - (dirty_expire_centisecs * HZ) / 100;
376 start_jif = jiffies;
377 next_jif = start_jif + (dirty_writeback_centisecs * HZ) / 100;
378 nr_to_write = wbs.nr_dirty + wbs.nr_unstable +
379 (inodes_stat.nr_inodes - inodes_stat.nr_unused);
380 while (nr_to_write > 0) {
381 wbc.encountered_congestion = 0;
382 wbc.nr_to_write = MAX_WRITEBACK_PAGES;
383 writeback_inodes(&wbc);
384 if (wbc.nr_to_write > 0) {
385 if (wbc.encountered_congestion)
386 blk_congestion_wait(WRITE, HZ/10);
387 else
388 break; /* All the old data is written */
390 nr_to_write -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
392 if (time_before(next_jif, jiffies + HZ))
393 next_jif = jiffies + HZ;
394 if (dirty_writeback_centisecs)
395 mod_timer(&wb_timer, next_jif);
399 * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
401 int dirty_writeback_centisecs_handler(ctl_table *table, int write,
402 struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
404 proc_dointvec(table, write, file, buffer, length, ppos);
405 if (dirty_writeback_centisecs) {
406 mod_timer(&wb_timer,
407 jiffies + (dirty_writeback_centisecs * HZ) / 100);
408 } else {
409 del_timer(&wb_timer);
411 return 0;
414 static void wb_timer_fn(unsigned long unused)
416 if (pdflush_operation(wb_kupdate, 0) < 0)
417 mod_timer(&wb_timer, jiffies + HZ); /* delay 1 second */
420 static void laptop_flush(unsigned long unused)
422 sys_sync();
425 static void laptop_timer_fn(unsigned long unused)
427 pdflush_operation(laptop_flush, 0);
431 * We've spun up the disk and we're in laptop mode: schedule writeback
432 * of all dirty data a few seconds from now. If the flush is already scheduled
433 * then push it back - the user is still using the disk.
435 void laptop_io_completion(void)
437 mod_timer(&laptop_mode_wb_timer, jiffies + laptop_mode * HZ);
441 * We're in laptop mode and we've just synced. The sync's writes will have
442 * caused another writeback to be scheduled by laptop_io_completion.
443 * Nothing needs to be written back anymore, so we unschedule the writeback.
445 void laptop_sync_completion(void)
447 del_timer(&laptop_mode_wb_timer);
451 * If ratelimit_pages is too high then we can get into dirty-data overload
452 * if a large number of processes all perform writes at the same time.
453 * If it is too low then SMP machines will call the (expensive)
454 * get_writeback_state too often.
456 * Here we set ratelimit_pages to a level which ensures that when all CPUs are
457 * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory
458 * thresholds before writeback cuts in.
460 * But the limit should not be set too high. Because it also controls the
461 * amount of memory which the balance_dirty_pages() caller has to write back.
462 * If this is too large then the caller will block on the IO queue all the
463 * time. So limit it to four megabytes - the balance_dirty_pages() caller
464 * will write six megabyte chunks, max.
467 static void set_ratelimit(void)
469 ratelimit_pages = total_pages / (num_online_cpus() * 32);
470 if (ratelimit_pages < 16)
471 ratelimit_pages = 16;
472 if (ratelimit_pages * PAGE_CACHE_SIZE > 4096 * 1024)
473 ratelimit_pages = (4096 * 1024) / PAGE_CACHE_SIZE;
476 static int
477 ratelimit_handler(struct notifier_block *self, unsigned long u, void *v)
479 set_ratelimit();
480 return 0;
483 static struct notifier_block ratelimit_nb = {
484 .notifier_call = ratelimit_handler,
485 .next = NULL,
489 * If the machine has a large highmem:lowmem ratio then scale back the default
490 * dirty memory thresholds: allowing too much dirty highmem pins an excessive
491 * number of buffer_heads.
493 void __init page_writeback_init(void)
495 long buffer_pages = nr_free_buffer_pages();
496 long correction;
498 total_pages = nr_free_pagecache_pages();
500 correction = (100 * 4 * buffer_pages) / total_pages;
502 if (correction < 100) {
503 dirty_background_ratio *= correction;
504 dirty_background_ratio /= 100;
505 vm_dirty_ratio *= correction;
506 vm_dirty_ratio /= 100;
508 mod_timer(&wb_timer, jiffies + (dirty_writeback_centisecs * HZ) / 100);
509 set_ratelimit();
510 register_cpu_notifier(&ratelimit_nb);
513 int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
515 if (wbc->nr_to_write <= 0)
516 return 0;
517 if (mapping->a_ops->writepages)
518 return mapping->a_ops->writepages(mapping, wbc);
519 return generic_writepages(mapping, wbc);
523 * write_one_page - write out a single page and optionally wait on I/O
525 * @page - the page to write
526 * @wait - if true, wait on writeout
528 * The page must be locked by the caller and will be unlocked upon return.
530 * write_one_page() returns a negative error code if I/O failed.
532 int write_one_page(struct page *page, int wait)
534 struct address_space *mapping = page->mapping;
535 int ret = 0;
536 struct writeback_control wbc = {
537 .sync_mode = WB_SYNC_ALL,
538 .nr_to_write = 1,
541 BUG_ON(!PageLocked(page));
543 if (wait)
544 wait_on_page_writeback(page);
546 if (clear_page_dirty_for_io(page)) {
547 page_cache_get(page);
548 ret = mapping->a_ops->writepage(page, &wbc);
549 if (ret == 0 && wait) {
550 wait_on_page_writeback(page);
551 if (PageError(page))
552 ret = -EIO;
554 page_cache_release(page);
555 } else {
556 unlock_page(page);
558 return ret;
560 EXPORT_SYMBOL(write_one_page);
563 * For address_spaces which do not use buffers. Just tag the page as dirty in
564 * its radix tree.
566 * This is also used when a single buffer is being dirtied: we want to set the
567 * page dirty in that case, but not all the buffers. This is a "bottom-up"
568 * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying.
570 * Most callers have locked the page, which pins the address_space in memory.
571 * But zap_pte_range() does not lock the page, however in that case the
572 * mapping is pinned by the vma's ->vm_file reference.
574 * We take care to handle the case where the page was truncated from the
575 * mapping by re-checking page_mapping() insode tree_lock.
577 int __set_page_dirty_nobuffers(struct page *page)
579 int ret = 0;
581 if (!TestSetPageDirty(page)) {
582 struct address_space *mapping = page_mapping(page);
584 if (mapping) {
585 spin_lock_irq(&mapping->tree_lock);
586 mapping = page_mapping(page);
587 if (page_mapping(page)) { /* Race with truncate? */
588 BUG_ON(page_mapping(page) != mapping);
589 if (!mapping->backing_dev_info->memory_backed)
590 inc_page_state(nr_dirty);
591 radix_tree_tag_set(&mapping->page_tree,
592 page_index(page), PAGECACHE_TAG_DIRTY);
594 spin_unlock_irq(&mapping->tree_lock);
595 if (mapping->host) {
596 /* !PageAnon && !swapper_space */
597 __mark_inode_dirty(mapping->host,
598 I_DIRTY_PAGES);
602 return ret;
604 EXPORT_SYMBOL(__set_page_dirty_nobuffers);
607 * When a writepage implementation decides that it doesn't want to write this
608 * page for some reason, it should redirty the locked page via
609 * redirty_page_for_writepage() and it should then unlock the page and return 0
611 int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page)
613 wbc->pages_skipped++;
614 return __set_page_dirty_nobuffers(page);
616 EXPORT_SYMBOL(redirty_page_for_writepage);
619 * If the mapping doesn't provide a set_page_dirty a_op, then
620 * just fall through and assume that it wants buffer_heads.
622 int fastcall set_page_dirty(struct page *page)
624 struct address_space *mapping = page_mapping(page);
626 if (likely(mapping)) {
627 int (*spd)(struct page *) = mapping->a_ops->set_page_dirty;
628 if (spd)
629 return (*spd)(page);
630 return __set_page_dirty_buffers(page);
632 if (!PageDirty(page))
633 SetPageDirty(page);
634 return 0;
636 EXPORT_SYMBOL(set_page_dirty);
639 * set_page_dirty() is racy if the caller has no reference against
640 * page->mapping->host, and if the page is unlocked. This is because another
641 * CPU could truncate the page off the mapping and then free the mapping.
643 * Usually, the page _is_ locked, or the caller is a user-space process which
644 * holds a reference on the inode by having an open file.
646 * In other cases, the page should be locked before running set_page_dirty().
648 int set_page_dirty_lock(struct page *page)
650 int ret;
652 lock_page(page);
653 ret = set_page_dirty(page);
654 unlock_page(page);
655 return ret;
657 EXPORT_SYMBOL(set_page_dirty_lock);
660 * Clear a page's dirty flag, while caring for dirty memory accounting.
661 * Returns true if the page was previously dirty.
663 int test_clear_page_dirty(struct page *page)
665 struct address_space *mapping = page_mapping(page);
666 unsigned long flags;
668 if (mapping) {
669 spin_lock_irqsave(&mapping->tree_lock, flags);
670 if (TestClearPageDirty(page)) {
671 radix_tree_tag_clear(&mapping->page_tree,
672 page_index(page),
673 PAGECACHE_TAG_DIRTY);
674 spin_unlock_irqrestore(&mapping->tree_lock, flags);
675 if (!mapping->backing_dev_info->memory_backed)
676 dec_page_state(nr_dirty);
677 return 1;
679 spin_unlock_irqrestore(&mapping->tree_lock, flags);
680 return 0;
682 return TestClearPageDirty(page);
684 EXPORT_SYMBOL(test_clear_page_dirty);
687 * Clear a page's dirty flag, while caring for dirty memory accounting.
688 * Returns true if the page was previously dirty.
690 * This is for preparing to put the page under writeout. We leave the page
691 * tagged as dirty in the radix tree so that a concurrent write-for-sync
692 * can discover it via a PAGECACHE_TAG_DIRTY walk. The ->writepage
693 * implementation will run either set_page_writeback() or set_page_dirty(),
694 * at which stage we bring the page's dirty flag and radix-tree dirty tag
695 * back into sync.
697 * This incoherency between the page's dirty flag and radix-tree tag is
698 * unfortunate, but it only exists while the page is locked.
700 int clear_page_dirty_for_io(struct page *page)
702 struct address_space *mapping = page_mapping(page);
704 if (mapping) {
705 if (TestClearPageDirty(page)) {
706 if (!mapping->backing_dev_info->memory_backed)
707 dec_page_state(nr_dirty);
708 return 1;
710 return 0;
712 return TestClearPageDirty(page);
714 EXPORT_SYMBOL(clear_page_dirty_for_io);
717 * Clear a page's dirty flag while ignoring dirty memory accounting
719 int __clear_page_dirty(struct page *page)
721 struct address_space *mapping = page_mapping(page);
723 if (mapping) {
724 unsigned long flags;
726 spin_lock_irqsave(&mapping->tree_lock, flags);
727 if (TestClearPageDirty(page)) {
728 radix_tree_tag_clear(&mapping->page_tree,
729 page_index(page),
730 PAGECACHE_TAG_DIRTY);
731 spin_unlock_irqrestore(&mapping->tree_lock, flags);
732 return 1;
734 spin_unlock_irqrestore(&mapping->tree_lock, flags);
735 return 0;
737 return TestClearPageDirty(page);
740 int test_clear_page_writeback(struct page *page)
742 struct address_space *mapping = page_mapping(page);
743 int ret;
745 if (mapping) {
746 unsigned long flags;
748 spin_lock_irqsave(&mapping->tree_lock, flags);
749 ret = TestClearPageWriteback(page);
750 if (ret)
751 radix_tree_tag_clear(&mapping->page_tree,
752 page_index(page),
753 PAGECACHE_TAG_WRITEBACK);
754 spin_unlock_irqrestore(&mapping->tree_lock, flags);
755 } else {
756 ret = TestClearPageWriteback(page);
758 return ret;
761 int test_set_page_writeback(struct page *page)
763 struct address_space *mapping = page_mapping(page);
764 int ret;
766 if (mapping) {
767 unsigned long flags;
769 spin_lock_irqsave(&mapping->tree_lock, flags);
770 ret = TestSetPageWriteback(page);
771 if (!ret)
772 radix_tree_tag_set(&mapping->page_tree,
773 page_index(page),
774 PAGECACHE_TAG_WRITEBACK);
775 if (!PageDirty(page))
776 radix_tree_tag_clear(&mapping->page_tree,
777 page_index(page),
778 PAGECACHE_TAG_DIRTY);
779 spin_unlock_irqrestore(&mapping->tree_lock, flags);
780 } else {
781 ret = TestSetPageWriteback(page);
783 return ret;
786 EXPORT_SYMBOL(test_set_page_writeback);
789 * Return true if any of the pages in the mapping are marged with the
790 * passed tag.
792 int mapping_tagged(struct address_space *mapping, int tag)
794 unsigned long flags;
795 int ret;
797 spin_lock_irqsave(&mapping->tree_lock, flags);
798 ret = radix_tree_tagged(&mapping->page_tree, tag);
799 spin_unlock_irqrestore(&mapping->tree_lock, flags);
800 return ret;
802 EXPORT_SYMBOL(mapping_tagged);