1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2007 Oracle. All rights reserved.
4 * Copyright (C) 2014 Fujitsu. All rights reserved.
7 #include <linux/kthread.h>
8 #include <linux/slab.h>
9 #include <linux/list.h>
10 #include <linux/spinlock.h>
11 #include <linux/freezer.h>
12 #include "async-thread.h"
15 #define WORK_DONE_BIT 0
16 #define WORK_ORDER_DONE_BIT 1
17 #define WORK_HIGH_PRIO_BIT 2
19 #define NO_THRESHOLD (-1)
20 #define DFT_THRESHOLD (32)
22 struct __btrfs_workqueue
{
23 struct workqueue_struct
*normal_wq
;
25 /* File system this workqueue services */
26 struct btrfs_fs_info
*fs_info
;
28 /* List head pointing to ordered work list */
29 struct list_head ordered_list
;
31 /* Spinlock for ordered_list */
34 /* Thresholding related variants */
37 /* Up limit of concurrency workers */
40 /* Current number of concurrency workers */
43 /* Threshold to change current_active */
46 spinlock_t thres_lock
;
49 struct btrfs_workqueue
{
50 struct __btrfs_workqueue
*normal
;
51 struct __btrfs_workqueue
*high
;
54 static void normal_work_helper(struct btrfs_work
*work
);
56 #define BTRFS_WORK_HELPER(name) \
57 noinline_for_stack void btrfs_##name(struct work_struct *arg) \
59 struct btrfs_work *work = container_of(arg, struct btrfs_work, \
61 normal_work_helper(work); \
64 struct btrfs_fs_info
*
65 btrfs_workqueue_owner(const struct __btrfs_workqueue
*wq
)
70 struct btrfs_fs_info
*
71 btrfs_work_owner(const struct btrfs_work
*work
)
73 return work
->wq
->fs_info
;
76 bool btrfs_workqueue_normal_congested(const struct btrfs_workqueue
*wq
)
79 * We could compare wq->normal->pending with num_online_cpus()
80 * to support "thresh == NO_THRESHOLD" case, but it requires
81 * moving up atomic_inc/dec in thresh_queue/exec_hook. Let's
82 * postpone it until someone needs the support of that case.
84 if (wq
->normal
->thresh
== NO_THRESHOLD
)
87 return atomic_read(&wq
->normal
->pending
) > wq
->normal
->thresh
* 2;
90 BTRFS_WORK_HELPER(worker_helper
);
91 BTRFS_WORK_HELPER(delalloc_helper
);
92 BTRFS_WORK_HELPER(flush_delalloc_helper
);
93 BTRFS_WORK_HELPER(cache_helper
);
94 BTRFS_WORK_HELPER(submit_helper
);
95 BTRFS_WORK_HELPER(fixup_helper
);
96 BTRFS_WORK_HELPER(endio_helper
);
97 BTRFS_WORK_HELPER(endio_meta_helper
);
98 BTRFS_WORK_HELPER(endio_meta_write_helper
);
99 BTRFS_WORK_HELPER(endio_raid56_helper
);
100 BTRFS_WORK_HELPER(endio_repair_helper
);
101 BTRFS_WORK_HELPER(rmw_helper
);
102 BTRFS_WORK_HELPER(endio_write_helper
);
103 BTRFS_WORK_HELPER(freespace_write_helper
);
104 BTRFS_WORK_HELPER(delayed_meta_helper
);
105 BTRFS_WORK_HELPER(readahead_helper
);
106 BTRFS_WORK_HELPER(qgroup_rescan_helper
);
107 BTRFS_WORK_HELPER(extent_refs_helper
);
108 BTRFS_WORK_HELPER(scrub_helper
);
109 BTRFS_WORK_HELPER(scrubwrc_helper
);
110 BTRFS_WORK_HELPER(scrubnc_helper
);
111 BTRFS_WORK_HELPER(scrubparity_helper
);
113 static struct __btrfs_workqueue
*
114 __btrfs_alloc_workqueue(struct btrfs_fs_info
*fs_info
, const char *name
,
115 unsigned int flags
, int limit_active
, int thresh
)
117 struct __btrfs_workqueue
*ret
= kzalloc(sizeof(*ret
), GFP_KERNEL
);
122 ret
->fs_info
= fs_info
;
123 ret
->limit_active
= limit_active
;
124 atomic_set(&ret
->pending
, 0);
126 thresh
= DFT_THRESHOLD
;
127 /* For low threshold, disabling threshold is a better choice */
128 if (thresh
< DFT_THRESHOLD
) {
129 ret
->current_active
= limit_active
;
130 ret
->thresh
= NO_THRESHOLD
;
133 * For threshold-able wq, let its concurrency grow on demand.
134 * Use minimal max_active at alloc time to reduce resource
137 ret
->current_active
= 1;
138 ret
->thresh
= thresh
;
141 if (flags
& WQ_HIGHPRI
)
142 ret
->normal_wq
= alloc_workqueue("%s-%s-high", flags
,
143 ret
->current_active
, "btrfs",
146 ret
->normal_wq
= alloc_workqueue("%s-%s", flags
,
147 ret
->current_active
, "btrfs",
149 if (!ret
->normal_wq
) {
154 INIT_LIST_HEAD(&ret
->ordered_list
);
155 spin_lock_init(&ret
->list_lock
);
156 spin_lock_init(&ret
->thres_lock
);
157 trace_btrfs_workqueue_alloc(ret
, name
, flags
& WQ_HIGHPRI
);
162 __btrfs_destroy_workqueue(struct __btrfs_workqueue
*wq
);
164 struct btrfs_workqueue
*btrfs_alloc_workqueue(struct btrfs_fs_info
*fs_info
,
170 struct btrfs_workqueue
*ret
= kzalloc(sizeof(*ret
), GFP_KERNEL
);
175 ret
->normal
= __btrfs_alloc_workqueue(fs_info
, name
,
177 limit_active
, thresh
);
183 if (flags
& WQ_HIGHPRI
) {
184 ret
->high
= __btrfs_alloc_workqueue(fs_info
, name
, flags
,
185 limit_active
, thresh
);
187 __btrfs_destroy_workqueue(ret
->normal
);
196 * Hook for threshold which will be called in btrfs_queue_work.
197 * This hook WILL be called in IRQ handler context,
198 * so workqueue_set_max_active MUST NOT be called in this hook
200 static inline void thresh_queue_hook(struct __btrfs_workqueue
*wq
)
202 if (wq
->thresh
== NO_THRESHOLD
)
204 atomic_inc(&wq
->pending
);
208 * Hook for threshold which will be called before executing the work,
209 * This hook is called in kthread content.
210 * So workqueue_set_max_active is called here.
212 static inline void thresh_exec_hook(struct __btrfs_workqueue
*wq
)
214 int new_current_active
;
218 if (wq
->thresh
== NO_THRESHOLD
)
221 atomic_dec(&wq
->pending
);
222 spin_lock(&wq
->thres_lock
);
224 * Use wq->count to limit the calling frequency of
225 * workqueue_set_max_active.
228 wq
->count
%= (wq
->thresh
/ 4);
231 new_current_active
= wq
->current_active
;
234 * pending may be changed later, but it's OK since we really
235 * don't need it so accurate to calculate new_max_active.
237 pending
= atomic_read(&wq
->pending
);
238 if (pending
> wq
->thresh
)
239 new_current_active
++;
240 if (pending
< wq
->thresh
/ 2)
241 new_current_active
--;
242 new_current_active
= clamp_val(new_current_active
, 1, wq
->limit_active
);
243 if (new_current_active
!= wq
->current_active
) {
245 wq
->current_active
= new_current_active
;
248 spin_unlock(&wq
->thres_lock
);
251 workqueue_set_max_active(wq
->normal_wq
, wq
->current_active
);
255 static void run_ordered_work(struct __btrfs_workqueue
*wq
,
256 struct btrfs_work
*self
)
258 struct list_head
*list
= &wq
->ordered_list
;
259 struct btrfs_work
*work
;
260 spinlock_t
*lock
= &wq
->list_lock
;
263 bool free_self
= false;
266 spin_lock_irqsave(lock
, flags
);
267 if (list_empty(list
))
269 work
= list_entry(list
->next
, struct btrfs_work
,
271 if (!test_bit(WORK_DONE_BIT
, &work
->flags
))
275 * we are going to call the ordered done function, but
276 * we leave the work item on the list as a barrier so
277 * that later work items that are done don't have their
278 * functions called before this one returns
280 if (test_and_set_bit(WORK_ORDER_DONE_BIT
, &work
->flags
))
282 trace_btrfs_ordered_sched(work
);
283 spin_unlock_irqrestore(lock
, flags
);
284 work
->ordered_func(work
);
286 /* now take the lock again and drop our item from the list */
287 spin_lock_irqsave(lock
, flags
);
288 list_del(&work
->ordered_list
);
289 spin_unlock_irqrestore(lock
, flags
);
293 * This is the work item that the worker is currently
296 * The kernel workqueue code guarantees non-reentrancy
297 * of work items. I.e., if a work item with the same
298 * address and work function is queued twice, the second
299 * execution is blocked until the first one finishes. A
300 * work item may be freed and recycled with the same
301 * work function; the workqueue code assumes that the
302 * original work item cannot depend on the recycled work
303 * item in that case (see find_worker_executing_work()).
305 * Note that the work of one Btrfs filesystem may depend
306 * on the work of another Btrfs filesystem via, e.g., a
307 * loop device. Therefore, we must not allow the current
308 * work item to be recycled until we are really done,
309 * otherwise we break the above assumption and can
315 * We don't want to call the ordered free functions with
316 * the lock held though. Save the work as tag for the
317 * trace event, because the callback could free the
321 work
->ordered_free(work
);
322 trace_btrfs_all_work_done(wq
->fs_info
, wtag
);
325 spin_unlock_irqrestore(lock
, flags
);
329 self
->ordered_free(self
);
330 trace_btrfs_all_work_done(wq
->fs_info
, wtag
);
334 static void normal_work_helper(struct btrfs_work
*work
)
336 struct __btrfs_workqueue
*wq
;
341 * We should not touch things inside work in the following cases:
342 * 1) after work->func() if it has no ordered_free
343 * Since the struct is freed in work->func().
344 * 2) after setting WORK_DONE_BIT
345 * The work may be freed in other threads almost instantly.
346 * So we save the needed things here.
348 if (work
->ordered_func
)
351 /* Safe for tracepoints in case work gets freed by the callback */
354 trace_btrfs_work_sched(work
);
355 thresh_exec_hook(wq
);
358 set_bit(WORK_DONE_BIT
, &work
->flags
);
359 run_ordered_work(wq
, work
);
362 trace_btrfs_all_work_done(wq
->fs_info
, wtag
);
365 void btrfs_init_work(struct btrfs_work
*work
, btrfs_work_func_t uniq_func
,
367 btrfs_func_t ordered_func
,
368 btrfs_func_t ordered_free
)
371 work
->ordered_func
= ordered_func
;
372 work
->ordered_free
= ordered_free
;
373 INIT_WORK(&work
->normal_work
, uniq_func
);
374 INIT_LIST_HEAD(&work
->ordered_list
);
378 static inline void __btrfs_queue_work(struct __btrfs_workqueue
*wq
,
379 struct btrfs_work
*work
)
384 thresh_queue_hook(wq
);
385 if (work
->ordered_func
) {
386 spin_lock_irqsave(&wq
->list_lock
, flags
);
387 list_add_tail(&work
->ordered_list
, &wq
->ordered_list
);
388 spin_unlock_irqrestore(&wq
->list_lock
, flags
);
390 trace_btrfs_work_queued(work
);
391 queue_work(wq
->normal_wq
, &work
->normal_work
);
394 void btrfs_queue_work(struct btrfs_workqueue
*wq
,
395 struct btrfs_work
*work
)
397 struct __btrfs_workqueue
*dest_wq
;
399 if (test_bit(WORK_HIGH_PRIO_BIT
, &work
->flags
) && wq
->high
)
402 dest_wq
= wq
->normal
;
403 __btrfs_queue_work(dest_wq
, work
);
407 __btrfs_destroy_workqueue(struct __btrfs_workqueue
*wq
)
409 destroy_workqueue(wq
->normal_wq
);
410 trace_btrfs_workqueue_destroy(wq
);
414 void btrfs_destroy_workqueue(struct btrfs_workqueue
*wq
)
419 __btrfs_destroy_workqueue(wq
->high
);
420 __btrfs_destroy_workqueue(wq
->normal
);
424 void btrfs_workqueue_set_max(struct btrfs_workqueue
*wq
, int limit_active
)
428 wq
->normal
->limit_active
= limit_active
;
430 wq
->high
->limit_active
= limit_active
;
433 void btrfs_set_work_high_priority(struct btrfs_work
*work
)
435 set_bit(WORK_HIGH_PRIO_BIT
, &work
->flags
);
438 void btrfs_flush_workqueue(struct btrfs_workqueue
*wq
)
441 flush_workqueue(wq
->high
->normal_wq
);
443 flush_workqueue(wq
->normal
->normal_wq
);