new retransmit part 3
[cor_2_6_31.git] / include / linux / backing-dev.h
blob1d52425a61189e15c739a1009449e97022bd2eb8
1 /*
2 * include/linux/backing-dev.h
4 * low-level device information and state which is propagated up through
5 * to high-level code.
6 */
8 #ifndef _LINUX_BACKING_DEV_H
9 #define _LINUX_BACKING_DEV_H
11 #include <linux/percpu_counter.h>
12 #include <linux/log2.h>
13 #include <linux/proportions.h>
14 #include <linux/kernel.h>
15 #include <linux/fs.h>
16 #include <asm/atomic.h>
18 struct page;
19 struct device;
20 struct dentry;
23 * Bits in backing_dev_info.state
25 enum bdi_state {
26 BDI_pdflush, /* A pdflush thread is working this device */
27 BDI_async_congested, /* The async (write) queue is getting full */
28 BDI_sync_congested, /* The sync queue is getting full */
29 BDI_unused, /* Available bits start here */
32 typedef int (congested_fn)(void *, int);
34 enum bdi_stat_item {
35 BDI_RECLAIMABLE,
36 BDI_WRITEBACK,
37 NR_BDI_STAT_ITEMS
40 #define BDI_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))
42 struct backing_dev_info {
43 unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */
44 unsigned long state; /* Always use atomic bitops on this */
45 unsigned int capabilities; /* Device capabilities */
46 congested_fn *congested_fn; /* Function pointer if device is md/dm */
47 void *congested_data; /* Pointer to aux data for congested func */
48 void (*unplug_io_fn)(struct backing_dev_info *, struct page *);
49 void *unplug_io_data;
51 struct percpu_counter bdi_stat[NR_BDI_STAT_ITEMS];
53 struct prop_local_percpu completions;
54 int dirty_exceeded;
56 unsigned int min_ratio;
57 unsigned int max_ratio, max_prop_frac;
59 struct device *dev;
61 #ifdef CONFIG_DEBUG_FS
62 struct dentry *debug_dir;
63 struct dentry *debug_stats;
64 #endif
67 int bdi_init(struct backing_dev_info *bdi);
68 void bdi_destroy(struct backing_dev_info *bdi);
70 int bdi_register(struct backing_dev_info *bdi, struct device *parent,
71 const char *fmt, ...);
72 int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
73 void bdi_unregister(struct backing_dev_info *bdi);
75 static inline void __add_bdi_stat(struct backing_dev_info *bdi,
76 enum bdi_stat_item item, s64 amount)
78 __percpu_counter_add(&bdi->bdi_stat[item], amount, BDI_STAT_BATCH);
81 static inline void __inc_bdi_stat(struct backing_dev_info *bdi,
82 enum bdi_stat_item item)
84 __add_bdi_stat(bdi, item, 1);
87 static inline void inc_bdi_stat(struct backing_dev_info *bdi,
88 enum bdi_stat_item item)
90 unsigned long flags;
92 local_irq_save(flags);
93 __inc_bdi_stat(bdi, item);
94 local_irq_restore(flags);
97 static inline void __dec_bdi_stat(struct backing_dev_info *bdi,
98 enum bdi_stat_item item)
100 __add_bdi_stat(bdi, item, -1);
103 static inline void dec_bdi_stat(struct backing_dev_info *bdi,
104 enum bdi_stat_item item)
106 unsigned long flags;
108 local_irq_save(flags);
109 __dec_bdi_stat(bdi, item);
110 local_irq_restore(flags);
113 static inline s64 bdi_stat(struct backing_dev_info *bdi,
114 enum bdi_stat_item item)
116 return percpu_counter_read_positive(&bdi->bdi_stat[item]);
119 static inline s64 __bdi_stat_sum(struct backing_dev_info *bdi,
120 enum bdi_stat_item item)
122 return percpu_counter_sum_positive(&bdi->bdi_stat[item]);
125 static inline s64 bdi_stat_sum(struct backing_dev_info *bdi,
126 enum bdi_stat_item item)
128 s64 sum;
129 unsigned long flags;
131 local_irq_save(flags);
132 sum = __bdi_stat_sum(bdi, item);
133 local_irq_restore(flags);
135 return sum;
138 extern void bdi_writeout_inc(struct backing_dev_info *bdi);
141 * maximal error of a stat counter.
143 static inline unsigned long bdi_stat_error(struct backing_dev_info *bdi)
145 #ifdef CONFIG_SMP
146 return nr_cpu_ids * BDI_STAT_BATCH;
147 #else
148 return 1;
149 #endif
152 int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio);
153 int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
156 * Flags in backing_dev_info::capability
158 * The first three flags control whether dirty pages will contribute to the
159 * VM's accounting and whether writepages() should be called for dirty pages
160 * (something that would not, for example, be appropriate for ramfs)
162 * WARNING: these flags are closely related and should not normally be
163 * used separately. The BDI_CAP_NO_ACCT_AND_WRITEBACK combines these
164 * three flags into a single convenience macro.
166 * BDI_CAP_NO_ACCT_DIRTY: Dirty pages shouldn't contribute to accounting
167 * BDI_CAP_NO_WRITEBACK: Don't write pages back
168 * BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages
170 * These flags let !MMU mmap() govern direct device mapping vs immediate
171 * copying more easily for MAP_PRIVATE, especially for ROM filesystems.
173 * BDI_CAP_MAP_COPY: Copy can be mapped (MAP_PRIVATE)
174 * BDI_CAP_MAP_DIRECT: Can be mapped directly (MAP_SHARED)
175 * BDI_CAP_READ_MAP: Can be mapped for reading
176 * BDI_CAP_WRITE_MAP: Can be mapped for writing
177 * BDI_CAP_EXEC_MAP: Can be mapped for execution
179 * BDI_CAP_SWAP_BACKED: Count shmem/tmpfs objects as swap-backed.
181 #define BDI_CAP_NO_ACCT_DIRTY 0x00000001
182 #define BDI_CAP_NO_WRITEBACK 0x00000002
183 #define BDI_CAP_MAP_COPY 0x00000004
184 #define BDI_CAP_MAP_DIRECT 0x00000008
185 #define BDI_CAP_READ_MAP 0x00000010
186 #define BDI_CAP_WRITE_MAP 0x00000020
187 #define BDI_CAP_EXEC_MAP 0x00000040
188 #define BDI_CAP_NO_ACCT_WB 0x00000080
189 #define BDI_CAP_SWAP_BACKED 0x00000100
191 #define BDI_CAP_VMFLAGS \
192 (BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP)
194 #define BDI_CAP_NO_ACCT_AND_WRITEBACK \
195 (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
197 #if defined(VM_MAYREAD) && \
198 (BDI_CAP_READ_MAP != VM_MAYREAD || \
199 BDI_CAP_WRITE_MAP != VM_MAYWRITE || \
200 BDI_CAP_EXEC_MAP != VM_MAYEXEC)
201 #error please change backing_dev_info::capabilities flags
202 #endif
204 extern struct backing_dev_info default_backing_dev_info;
205 void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page);
207 int writeback_in_progress(struct backing_dev_info *bdi);
209 static inline int bdi_congested(struct backing_dev_info *bdi, int bdi_bits)
211 if (bdi->congested_fn)
212 return bdi->congested_fn(bdi->congested_data, bdi_bits);
213 return (bdi->state & bdi_bits);
216 static inline int bdi_read_congested(struct backing_dev_info *bdi)
218 return bdi_congested(bdi, 1 << BDI_sync_congested);
221 static inline int bdi_write_congested(struct backing_dev_info *bdi)
223 return bdi_congested(bdi, 1 << BDI_async_congested);
226 static inline int bdi_rw_congested(struct backing_dev_info *bdi)
228 return bdi_congested(bdi, (1 << BDI_sync_congested) |
229 (1 << BDI_async_congested));
232 enum {
233 BLK_RW_ASYNC = 0,
234 BLK_RW_SYNC = 1,
237 void clear_bdi_congested(struct backing_dev_info *bdi, int sync);
238 void set_bdi_congested(struct backing_dev_info *bdi, int sync);
239 long congestion_wait(int sync, long timeout);
242 static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi)
244 return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK);
247 static inline bool bdi_cap_account_dirty(struct backing_dev_info *bdi)
249 return !(bdi->capabilities & BDI_CAP_NO_ACCT_DIRTY);
252 static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi)
254 /* Paranoia: BDI_CAP_NO_WRITEBACK implies BDI_CAP_NO_ACCT_WB */
255 return !(bdi->capabilities & (BDI_CAP_NO_ACCT_WB |
256 BDI_CAP_NO_WRITEBACK));
259 static inline bool bdi_cap_swap_backed(struct backing_dev_info *bdi)
261 return bdi->capabilities & BDI_CAP_SWAP_BACKED;
264 static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
266 return bdi_cap_writeback_dirty(mapping->backing_dev_info);
269 static inline bool mapping_cap_account_dirty(struct address_space *mapping)
271 return bdi_cap_account_dirty(mapping->backing_dev_info);
274 static inline bool mapping_cap_swap_backed(struct address_space *mapping)
276 return bdi_cap_swap_backed(mapping->backing_dev_info);
279 #endif /* _LINUX_BACKING_DEV_H */