1 #ifndef _LINUX_PAGEMAP_H
2 #define _LINUX_PAGEMAP_H
5 * Copyright 1995 Linus Torvalds
9 #include <linux/list.h>
10 #include <linux/highmem.h>
11 #include <linux/compiler.h>
12 #include <asm/uaccess.h>
13 #include <linux/gfp.h>
16 * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page
17 * allocation mode flags.
19 #define AS_EIO (__GFP_BITS_SHIFT + 0) /* IO error on async write */
20 #define AS_ENOSPC (__GFP_BITS_SHIFT + 1) /* ENOSPC on async write */
22 static inline gfp_t
mapping_gfp_mask(struct address_space
* mapping
)
24 return (__force gfp_t
)mapping
->flags
& __GFP_BITS_MASK
;
28 * This is non-atomic. Only to be used before the mapping is activated.
29 * Probably needs a barrier...
31 static inline void mapping_set_gfp_mask(struct address_space
*m
, gfp_t mask
)
33 m
->flags
= (m
->flags
& ~(__force
unsigned long)__GFP_BITS_MASK
) |
34 (__force
unsigned long)mask
;
38 * The page cache can done in larger chunks than
39 * one page, because it allows for more efficient
40 * throughput (it can then be mapped into user
41 * space in smaller chunks for same flexibility).
43 * Or rather, it _will_ be done in larger chunks.
45 #define PAGE_CACHE_SHIFT PAGE_SHIFT
46 #define PAGE_CACHE_SIZE PAGE_SIZE
47 #define PAGE_CACHE_MASK PAGE_MASK
48 #define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
50 #define page_cache_get(page) get_page(page)
51 #define page_cache_release(page) put_page(page)
52 void release_pages(struct page
**pages
, int nr
, int cold
);
54 static inline struct page
*page_cache_alloc(struct address_space
*x
)
56 return alloc_pages(mapping_gfp_mask(x
), 0);
59 static inline struct page
*page_cache_alloc_cold(struct address_space
*x
)
61 return alloc_pages(mapping_gfp_mask(x
)|__GFP_COLD
, 0);
64 typedef int filler_t(void *, struct page
*);
66 extern struct page
* find_get_page(struct address_space
*mapping
,
68 extern struct page
* find_lock_page(struct address_space
*mapping
,
70 extern struct page
* find_trylock_page(struct address_space
*mapping
,
72 extern struct page
* find_or_create_page(struct address_space
*mapping
,
73 unsigned long index
, gfp_t gfp_mask
);
74 unsigned find_get_pages(struct address_space
*mapping
, pgoff_t start
,
75 unsigned int nr_pages
, struct page
**pages
);
76 unsigned find_get_pages_tag(struct address_space
*mapping
, pgoff_t
*index
,
77 int tag
, unsigned int nr_pages
, struct page
**pages
);
80 * Returns locked page at given index in given cache, creating it if needed.
82 static inline struct page
*grab_cache_page(struct address_space
*mapping
, unsigned long index
)
84 return find_or_create_page(mapping
, index
, mapping_gfp_mask(mapping
));
87 extern struct page
* grab_cache_page_nowait(struct address_space
*mapping
,
89 extern struct page
* read_cache_page(struct address_space
*mapping
,
90 unsigned long index
, filler_t
*filler
,
92 extern int read_cache_pages(struct address_space
*mapping
,
93 struct list_head
*pages
, filler_t
*filler
, void *data
);
95 int add_to_page_cache(struct page
*page
, struct address_space
*mapping
,
96 unsigned long index
, gfp_t gfp_mask
);
97 int add_to_page_cache_lru(struct page
*page
, struct address_space
*mapping
,
98 unsigned long index
, gfp_t gfp_mask
);
99 extern void remove_from_page_cache(struct page
*page
);
100 extern void __remove_from_page_cache(struct page
*page
);
102 extern atomic_t nr_pagecache
;
106 #define PAGECACHE_ACCT_THRESHOLD max(16, NR_CPUS * 2)
107 DECLARE_PER_CPU(long, nr_pagecache_local
);
110 * pagecache_acct implements approximate accounting for pagecache.
111 * vm_enough_memory() do not need high accuracy. Writers will keep
112 * an offset in their per-cpu arena and will spill that into the
113 * global count whenever the absolute value of the local count
114 * exceeds the counter's threshold.
116 * MUST be protected from preemption.
117 * current protection is mapping->page_lock.
119 static inline void pagecache_acct(int count
)
123 local
= &__get_cpu_var(nr_pagecache_local
);
125 if (*local
> PAGECACHE_ACCT_THRESHOLD
|| *local
< -PAGECACHE_ACCT_THRESHOLD
) {
126 atomic_add(*local
, &nr_pagecache
);
133 static inline void pagecache_acct(int count
)
135 atomic_add(count
, &nr_pagecache
);
139 static inline unsigned long get_page_cache_size(void)
141 int ret
= atomic_read(&nr_pagecache
);
142 if (unlikely(ret
< 0))
148 * Return byte-offset into filesystem object for page.
150 static inline loff_t
page_offset(struct page
*page
)
152 return ((loff_t
)page
->index
) << PAGE_CACHE_SHIFT
;
155 static inline pgoff_t
linear_page_index(struct vm_area_struct
*vma
,
156 unsigned long address
)
158 pgoff_t pgoff
= (address
- vma
->vm_start
) >> PAGE_SHIFT
;
159 pgoff
+= vma
->vm_pgoff
;
160 return pgoff
>> (PAGE_CACHE_SHIFT
- PAGE_SHIFT
);
163 extern void FASTCALL(__lock_page(struct page
*page
));
164 extern void FASTCALL(unlock_page(struct page
*page
));
166 static inline void lock_page(struct page
*page
)
169 if (TestSetPageLocked(page
))
174 * This is exported only for wait_on_page_locked/wait_on_page_writeback.
175 * Never use this directly!
177 extern void FASTCALL(wait_on_page_bit(struct page
*page
, int bit_nr
));
180 * Wait for a page to be unlocked.
182 * This must be called with the caller "holding" the page,
183 * ie with increased "page->count" so that the page won't
184 * go away during the wait..
186 static inline void wait_on_page_locked(struct page
*page
)
188 if (PageLocked(page
))
189 wait_on_page_bit(page
, PG_locked
);
193 * Wait for a page to complete writeback
195 static inline void wait_on_page_writeback(struct page
*page
)
197 if (PageWriteback(page
))
198 wait_on_page_bit(page
, PG_writeback
);
201 extern void end_page_writeback(struct page
*page
);
204 * Fault a userspace page into pagetables. Return non-zero on a fault.
206 * This assumes that two userspace pages are always sufficient. That's
207 * not true if PAGE_CACHE_SIZE > PAGE_SIZE.
209 static inline int fault_in_pages_writeable(char __user
*uaddr
, int size
)
214 * Writing zeroes into userspace here is OK, because we know that if
215 * the zero gets there, we'll be overwriting it.
217 ret
= __put_user(0, uaddr
);
219 char __user
*end
= uaddr
+ size
- 1;
222 * If the page was already mapped, this will get a cache miss
223 * for sure, so try to avoid doing it.
225 if (((unsigned long)uaddr
& PAGE_MASK
) !=
226 ((unsigned long)end
& PAGE_MASK
))
227 ret
= __put_user(0, end
);
232 static inline void fault_in_pages_readable(const char __user
*uaddr
, int size
)
237 ret
= __get_user(c
, uaddr
);
239 const char __user
*end
= uaddr
+ size
- 1;
241 if (((unsigned long)uaddr
& PAGE_MASK
) !=
242 ((unsigned long)end
& PAGE_MASK
))
247 #endif /* _LINUX_PAGEMAP_H */