1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_BACKING_DEV_DEFS_H
3 #define __LINUX_BACKING_DEV_DEFS_H
5 #include <linux/list.h>
6 #include <linux/radix-tree.h>
7 #include <linux/rbtree.h>
8 #include <linux/spinlock.h>
9 #include <linux/percpu_counter.h>
10 #include <linux/percpu-refcount.h>
11 #include <linux/flex_proportions.h>
12 #include <linux/timer.h>
13 #include <linux/workqueue.h>
14 #include <linux/kref.h>
21 * Bits in bdi_writeback.state
24 WB_registered
, /* bdi_register() was done */
25 WB_shutting_down
, /* wb_shutdown() in progress */
26 WB_writeback_running
, /* Writeback is in progress */
27 WB_has_dirty_io
, /* Dirty inodes on ->b_{dirty|io|more_io} */
30 enum wb_congested_state
{
31 WB_async_congested
, /* The async (write) queue is getting full */
32 WB_sync_congested
, /* The sync queue is getting full */
35 typedef int (congested_fn
)(void *, int);
45 #define WB_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))
48 * For cgroup writeback, multiple wb's may map to the same blkcg. Those
49 * wb's can operate mostly independently but should share the congested
50 * state. To facilitate such sharing, the congested state is tracked using
51 * the following struct which is created on demand, indexed by blkcg ID on
52 * its bdi, and refcounted.
54 struct bdi_writeback_congested
{
55 unsigned long state
; /* WB_[a]sync_congested flags */
56 atomic_t refcnt
; /* nr of attached wb's and blkg */
58 #ifdef CONFIG_CGROUP_WRITEBACK
59 struct backing_dev_info
*__bdi
; /* the associated bdi, set to NULL
60 * on bdi unregistration. For memcg-wb
61 * internal use only! */
62 int blkcg_id
; /* ID of the associated blkcg */
63 struct rb_node rb_node
; /* on bdi->cgwb_congestion_tree */
68 * Each wb (bdi_writeback) can perform writeback operations, is measured
69 * and throttled, independently. Without cgroup writeback, each bdi
70 * (bdi_writeback) is served by its embedded bdi->wb.
72 * On the default hierarchy, blkcg implicitly enables memcg. This allows
73 * using memcg's page ownership for attributing writeback IOs, and every
74 * memcg - blkcg combination can be served by its own wb by assigning a
75 * dedicated wb to each memcg, which enables isolation across different
76 * cgroups and propagation of IO back pressure down from the IO layer upto
77 * the tasks which are generating the dirty pages to be written back.
79 * A cgroup wb is indexed on its bdi by the ID of the associated memcg,
80 * refcounted with the number of inodes attached to it, and pins the memcg
81 * and the corresponding blkcg. As the corresponding blkcg for a memcg may
82 * change as blkcg is disabled and enabled higher up in the hierarchy, a wb
83 * is tested for blkcg after lookup and removed from index on mismatch so
84 * that a new wb for the combination can be created.
86 struct bdi_writeback
{
87 struct backing_dev_info
*bdi
; /* our parent bdi */
89 unsigned long state
; /* Always use atomic bitops on this */
90 unsigned long last_old_flush
; /* last old data flush */
92 struct list_head b_dirty
; /* dirty inodes */
93 struct list_head b_io
; /* parked for writeback */
94 struct list_head b_more_io
; /* parked for more writeback */
95 struct list_head b_dirty_time
; /* time stamps are dirty */
96 spinlock_t list_lock
; /* protects the b_* lists */
98 struct percpu_counter stat
[NR_WB_STAT_ITEMS
];
100 struct bdi_writeback_congested
*congested
;
102 unsigned long bw_time_stamp
; /* last time write bw is updated */
103 unsigned long dirtied_stamp
;
104 unsigned long written_stamp
; /* pages written at bw_time_stamp */
105 unsigned long write_bandwidth
; /* the estimated write bandwidth */
106 unsigned long avg_write_bandwidth
; /* further smoothed write bw, > 0 */
109 * The base dirty throttle rate, re-calculated on every 200ms.
110 * All the bdi tasks' dirty rate will be curbed under it.
111 * @dirty_ratelimit tracks the estimated @balanced_dirty_ratelimit
112 * in small steps and is much more smooth/stable than the latter.
114 unsigned long dirty_ratelimit
;
115 unsigned long balanced_dirty_ratelimit
;
117 struct fprop_local_percpu completions
;
120 spinlock_t work_lock
; /* protects work_list & dwork scheduling */
121 struct list_head work_list
;
122 struct delayed_work dwork
; /* work item used for writeback */
124 unsigned long dirty_sleep
; /* last wait */
126 struct list_head bdi_node
; /* anchored at bdi->wb_list */
128 #ifdef CONFIG_CGROUP_WRITEBACK
129 struct percpu_ref refcnt
; /* used only for !root wb's */
130 struct fprop_local_percpu memcg_completions
;
131 struct cgroup_subsys_state
*memcg_css
; /* the associated memcg */
132 struct cgroup_subsys_state
*blkcg_css
; /* and blkcg */
133 struct list_head memcg_node
; /* anchored at memcg->cgwb_list */
134 struct list_head blkcg_node
; /* anchored at blkcg->cgwb_list */
137 struct work_struct release_work
;
143 struct backing_dev_info
{
144 struct list_head bdi_list
;
145 unsigned long ra_pages
; /* max readahead in PAGE_SIZE units */
146 unsigned long io_pages
; /* max allowed IO size */
147 congested_fn
*congested_fn
; /* Function pointer if device is md/dm */
148 void *congested_data
; /* Pointer to aux data for congested func */
152 struct kref refcnt
; /* Reference counter for the structure */
153 unsigned int capabilities
; /* Device capabilities */
154 unsigned int min_ratio
;
155 unsigned int max_ratio
, max_prop_frac
;
158 * Sum of avg_write_bw of wbs with dirty inodes. > 0 if there are
159 * any dirty wbs, which is depended upon by bdi_has_dirty().
161 atomic_long_t tot_write_bandwidth
;
163 struct bdi_writeback wb
; /* the root writeback info for this bdi */
164 struct list_head wb_list
; /* list of all wbs */
165 #ifdef CONFIG_CGROUP_WRITEBACK
166 struct radix_tree_root cgwb_tree
; /* radix tree of active cgroup wbs */
167 struct rb_root cgwb_congested_tree
; /* their congested states */
169 struct bdi_writeback_congested
*wb_congested
;
171 wait_queue_head_t wb_waitq
;
174 struct device
*owner
;
176 struct timer_list laptop_mode_wb_timer
;
178 #ifdef CONFIG_DEBUG_FS
179 struct dentry
*debug_dir
;
180 struct dentry
*debug_stats
;
189 void clear_wb_congested(struct bdi_writeback_congested
*congested
, int sync
);
190 void set_wb_congested(struct bdi_writeback_congested
*congested
, int sync
);
192 static inline void clear_bdi_congested(struct backing_dev_info
*bdi
, int sync
)
194 clear_wb_congested(bdi
->wb
.congested
, sync
);
197 static inline void set_bdi_congested(struct backing_dev_info
*bdi
, int sync
)
199 set_wb_congested(bdi
->wb
.congested
, sync
);
202 struct wb_lock_cookie
{
207 #ifdef CONFIG_CGROUP_WRITEBACK
210 * wb_tryget - try to increment a wb's refcount
211 * @wb: bdi_writeback to get
213 static inline bool wb_tryget(struct bdi_writeback
*wb
)
215 if (wb
!= &wb
->bdi
->wb
)
216 return percpu_ref_tryget(&wb
->refcnt
);
221 * wb_get - increment a wb's refcount
222 * @wb: bdi_writeback to get
224 static inline void wb_get(struct bdi_writeback
*wb
)
226 if (wb
!= &wb
->bdi
->wb
)
227 percpu_ref_get(&wb
->refcnt
);
231 * wb_put - decrement a wb's refcount
232 * @wb: bdi_writeback to put
234 static inline void wb_put(struct bdi_writeback
*wb
)
236 if (wb
!= &wb
->bdi
->wb
)
237 percpu_ref_put(&wb
->refcnt
);
241 * wb_dying - is a wb dying?
242 * @wb: bdi_writeback of interest
244 * Returns whether @wb is unlinked and being drained.
246 static inline bool wb_dying(struct bdi_writeback
*wb
)
248 return percpu_ref_is_dying(&wb
->refcnt
);
251 #else /* CONFIG_CGROUP_WRITEBACK */
253 static inline bool wb_tryget(struct bdi_writeback
*wb
)
258 static inline void wb_get(struct bdi_writeback
*wb
)
262 static inline void wb_put(struct bdi_writeback
*wb
)
266 static inline bool wb_dying(struct bdi_writeback
*wb
)
271 #endif /* CONFIG_CGROUP_WRITEBACK */
273 #endif /* __LINUX_BACKING_DEV_DEFS_H */