1 #ifndef __LINUX_BACKING_DEV_DEFS_H
2 #define __LINUX_BACKING_DEV_DEFS_H
4 #include <linux/list.h>
5 #include <linux/radix-tree.h>
6 #include <linux/rbtree.h>
7 #include <linux/spinlock.h>
8 #include <linux/percpu_counter.h>
9 #include <linux/percpu-refcount.h>
10 #include <linux/flex_proportions.h>
11 #include <linux/timer.h>
12 #include <linux/workqueue.h>
19 * Bits in bdi_writeback.state
22 WB_registered
, /* bdi_register() was done */
23 WB_writeback_running
, /* Writeback is in progress */
24 WB_has_dirty_io
, /* Dirty inodes on ->b_{dirty|io|more_io} */
27 enum wb_congested_state
{
28 WB_async_congested
, /* The async (write) queue is getting full */
29 WB_sync_congested
, /* The sync queue is getting full */
32 typedef int (congested_fn
)(void *, int);
42 #define WB_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))
45 * For cgroup writeback, multiple wb's may map to the same blkcg. Those
46 * wb's can operate mostly independently but should share the congested
47 * state. To facilitate such sharing, the congested state is tracked using
48 * the following struct which is created on demand, indexed by blkcg ID on
49 * its bdi, and refcounted.
51 struct bdi_writeback_congested
{
52 unsigned long state
; /* WB_[a]sync_congested flags */
53 atomic_t refcnt
; /* nr of attached wb's and blkg */
55 #ifdef CONFIG_CGROUP_WRITEBACK
56 struct backing_dev_info
*bdi
; /* the associated bdi */
57 int blkcg_id
; /* ID of the associated blkcg */
58 struct rb_node rb_node
; /* on bdi->cgwb_congestion_tree */
63 * Each wb (bdi_writeback) can perform writeback operations, is measured
64 * and throttled, independently. Without cgroup writeback, each bdi
65 * (bdi_writeback) is served by its embedded bdi->wb.
67 * On the default hierarchy, blkcg implicitly enables memcg. This allows
68 * using memcg's page ownership for attributing writeback IOs, and every
69 * memcg - blkcg combination can be served by its own wb by assigning a
70 * dedicated wb to each memcg, which enables isolation across different
71 * cgroups and propagation of IO back pressure down from the IO layer upto
72 * the tasks which are generating the dirty pages to be written back.
74 * A cgroup wb is indexed on its bdi by the ID of the associated memcg,
75 * refcounted with the number of inodes attached to it, and pins the memcg
76 * and the corresponding blkcg. As the corresponding blkcg for a memcg may
77 * change as blkcg is disabled and enabled higher up in the hierarchy, a wb
78 * is tested for blkcg after lookup and removed from index on mismatch so
79 * that a new wb for the combination can be created.
81 struct bdi_writeback
{
82 struct backing_dev_info
*bdi
; /* our parent bdi */
84 unsigned long state
; /* Always use atomic bitops on this */
85 unsigned long last_old_flush
; /* last old data flush */
87 struct list_head b_dirty
; /* dirty inodes */
88 struct list_head b_io
; /* parked for writeback */
89 struct list_head b_more_io
; /* parked for more writeback */
90 struct list_head b_dirty_time
; /* time stamps are dirty */
91 spinlock_t list_lock
; /* protects the b_* lists */
93 struct percpu_counter stat
[NR_WB_STAT_ITEMS
];
95 struct bdi_writeback_congested
*congested
;
97 unsigned long bw_time_stamp
; /* last time write bw is updated */
98 unsigned long dirtied_stamp
;
99 unsigned long written_stamp
; /* pages written at bw_time_stamp */
100 unsigned long write_bandwidth
; /* the estimated write bandwidth */
101 unsigned long avg_write_bandwidth
; /* further smoothed write bw, > 0 */
104 * The base dirty throttle rate, re-calculated on every 200ms.
105 * All the bdi tasks' dirty rate will be curbed under it.
106 * @dirty_ratelimit tracks the estimated @balanced_dirty_ratelimit
107 * in small steps and is much more smooth/stable than the latter.
109 unsigned long dirty_ratelimit
;
110 unsigned long balanced_dirty_ratelimit
;
112 struct fprop_local_percpu completions
;
115 spinlock_t work_lock
; /* protects work_list & dwork scheduling */
116 struct list_head work_list
;
117 struct delayed_work dwork
; /* work item used for writeback */
119 struct list_head bdi_node
; /* anchored at bdi->wb_list */
121 #ifdef CONFIG_CGROUP_WRITEBACK
122 struct percpu_ref refcnt
; /* used only for !root wb's */
123 struct fprop_local_percpu memcg_completions
;
124 struct cgroup_subsys_state
*memcg_css
; /* the associated memcg */
125 struct cgroup_subsys_state
*blkcg_css
; /* and blkcg */
126 struct list_head memcg_node
; /* anchored at memcg->cgwb_list */
127 struct list_head blkcg_node
; /* anchored at blkcg->cgwb_list */
130 struct work_struct release_work
;
136 struct backing_dev_info
{
137 struct list_head bdi_list
;
138 unsigned long ra_pages
; /* max readahead in PAGE_SIZE units */
139 unsigned int capabilities
; /* Device capabilities */
140 congested_fn
*congested_fn
; /* Function pointer if device is md/dm */
141 void *congested_data
; /* Pointer to aux data for congested func */
145 unsigned int min_ratio
;
146 unsigned int max_ratio
, max_prop_frac
;
149 * Sum of avg_write_bw of wbs with dirty inodes. > 0 if there are
150 * any dirty wbs, which is depended upon by bdi_has_dirty().
152 atomic_long_t tot_write_bandwidth
;
154 struct bdi_writeback wb
; /* the root writeback info for this bdi */
155 struct list_head wb_list
; /* list of all wbs */
156 #ifdef CONFIG_CGROUP_WRITEBACK
157 struct radix_tree_root cgwb_tree
; /* radix tree of active cgroup wbs */
158 struct rb_root cgwb_congested_tree
; /* their congested states */
159 atomic_t usage_cnt
; /* counts both cgwbs and cgwb_contested's */
161 struct bdi_writeback_congested
*wb_congested
;
163 wait_queue_head_t wb_waitq
;
167 struct timer_list laptop_mode_wb_timer
;
169 #ifdef CONFIG_DEBUG_FS
170 struct dentry
*debug_dir
;
171 struct dentry
*debug_stats
;
180 void clear_wb_congested(struct bdi_writeback_congested
*congested
, int sync
);
181 void set_wb_congested(struct bdi_writeback_congested
*congested
, int sync
);
183 static inline void clear_bdi_congested(struct backing_dev_info
*bdi
, int sync
)
185 clear_wb_congested(bdi
->wb
.congested
, sync
);
188 static inline void set_bdi_congested(struct backing_dev_info
*bdi
, int sync
)
190 set_wb_congested(bdi
->wb
.congested
, sync
);
193 #ifdef CONFIG_CGROUP_WRITEBACK
196 * wb_tryget - try to increment a wb's refcount
197 * @wb: bdi_writeback to get
199 static inline bool wb_tryget(struct bdi_writeback
*wb
)
201 if (wb
!= &wb
->bdi
->wb
)
202 return percpu_ref_tryget(&wb
->refcnt
);
207 * wb_get - increment a wb's refcount
208 * @wb: bdi_writeback to get
210 static inline void wb_get(struct bdi_writeback
*wb
)
212 if (wb
!= &wb
->bdi
->wb
)
213 percpu_ref_get(&wb
->refcnt
);
217 * wb_put - decrement a wb's refcount
218 * @wb: bdi_writeback to put
220 static inline void wb_put(struct bdi_writeback
*wb
)
222 if (wb
!= &wb
->bdi
->wb
)
223 percpu_ref_put(&wb
->refcnt
);
227 * wb_dying - is a wb dying?
228 * @wb: bdi_writeback of interest
230 * Returns whether @wb is unlinked and being drained.
232 static inline bool wb_dying(struct bdi_writeback
*wb
)
234 return percpu_ref_is_dying(&wb
->refcnt
);
237 #else /* CONFIG_CGROUP_WRITEBACK */
239 static inline bool wb_tryget(struct bdi_writeback
*wb
)
244 static inline void wb_get(struct bdi_writeback
*wb
)
248 static inline void wb_put(struct bdi_writeback
*wb
)
252 static inline bool wb_dying(struct bdi_writeback
*wb
)
257 #endif /* CONFIG_CGROUP_WRITEBACK */
259 #endif /* __LINUX_BACKING_DEV_DEFS_H */