2 * Copyright (C) 2007 Oracle. All rights reserved.
3 * Copyright (C) 2014 Fujitsu. All rights reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public
7 * License v2 as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public
15 * License along with this program; if not, write to the
16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 021110-1307, USA.
20 #include <linux/kthread.h>
21 #include <linux/slab.h>
22 #include <linux/list.h>
23 #include <linux/spinlock.h>
24 #include <linux/freezer.h>
25 #include "async-thread.h"
28 #define WORK_DONE_BIT 0
29 #define WORK_ORDER_DONE_BIT 1
30 #define WORK_HIGH_PRIO_BIT 2
32 #define NO_THRESHOLD (-1)
33 #define DFT_THRESHOLD (32)
35 struct __btrfs_workqueue
{
36 struct workqueue_struct
*normal_wq
;
38 /* File system this workqueue services */
39 struct btrfs_fs_info
*fs_info
;
41 /* List head pointing to ordered work list */
42 struct list_head ordered_list
;
44 /* Spinlock for ordered_list */
47 /* Thresholding related variants */
50 /* Up limit of concurrency workers */
53 /* Current number of concurrency workers */
56 /* Threshold to change current_active */
59 spinlock_t thres_lock
;
62 struct btrfs_workqueue
{
63 struct __btrfs_workqueue
*normal
;
64 struct __btrfs_workqueue
*high
;
67 static void normal_work_helper(struct btrfs_work
*work
);
69 #define BTRFS_WORK_HELPER(name) \
70 void btrfs_##name(struct work_struct *arg) \
72 struct btrfs_work *work = container_of(arg, struct btrfs_work, \
74 normal_work_helper(work); \
77 struct btrfs_fs_info
*
78 btrfs_workqueue_owner(struct __btrfs_workqueue
*wq
)
83 struct btrfs_fs_info
*
84 btrfs_work_owner(struct btrfs_work
*work
)
86 return work
->wq
->fs_info
;
89 bool btrfs_workqueue_normal_congested(struct btrfs_workqueue
*wq
)
92 * We could compare wq->normal->pending with num_online_cpus()
93 * to support "thresh == NO_THRESHOLD" case, but it requires
94 * moving up atomic_inc/dec in thresh_queue/exec_hook. Let's
95 * postpone it until someone needs the support of that case.
97 if (wq
->normal
->thresh
== NO_THRESHOLD
)
100 return atomic_read(&wq
->normal
->pending
) > wq
->normal
->thresh
* 2;
103 BTRFS_WORK_HELPER(worker_helper
);
104 BTRFS_WORK_HELPER(delalloc_helper
);
105 BTRFS_WORK_HELPER(flush_delalloc_helper
);
106 BTRFS_WORK_HELPER(cache_helper
);
107 BTRFS_WORK_HELPER(submit_helper
);
108 BTRFS_WORK_HELPER(fixup_helper
);
109 BTRFS_WORK_HELPER(endio_helper
);
110 BTRFS_WORK_HELPER(endio_meta_helper
);
111 BTRFS_WORK_HELPER(endio_meta_write_helper
);
112 BTRFS_WORK_HELPER(endio_raid56_helper
);
113 BTRFS_WORK_HELPER(endio_repair_helper
);
114 BTRFS_WORK_HELPER(rmw_helper
);
115 BTRFS_WORK_HELPER(endio_write_helper
);
116 BTRFS_WORK_HELPER(freespace_write_helper
);
117 BTRFS_WORK_HELPER(delayed_meta_helper
);
118 BTRFS_WORK_HELPER(readahead_helper
);
119 BTRFS_WORK_HELPER(qgroup_rescan_helper
);
120 BTRFS_WORK_HELPER(extent_refs_helper
);
121 BTRFS_WORK_HELPER(scrub_helper
);
122 BTRFS_WORK_HELPER(scrubwrc_helper
);
123 BTRFS_WORK_HELPER(scrubnc_helper
);
124 BTRFS_WORK_HELPER(scrubparity_helper
);
126 static struct __btrfs_workqueue
*
127 __btrfs_alloc_workqueue(struct btrfs_fs_info
*fs_info
, const char *name
,
128 unsigned int flags
, int limit_active
, int thresh
)
130 struct __btrfs_workqueue
*ret
= kzalloc(sizeof(*ret
), GFP_KERNEL
);
135 ret
->fs_info
= fs_info
;
136 ret
->limit_active
= limit_active
;
137 atomic_set(&ret
->pending
, 0);
139 thresh
= DFT_THRESHOLD
;
140 /* For low threshold, disabling threshold is a better choice */
141 if (thresh
< DFT_THRESHOLD
) {
142 ret
->current_active
= limit_active
;
143 ret
->thresh
= NO_THRESHOLD
;
146 * For threshold-able wq, let its concurrency grow on demand.
147 * Use minimal max_active at alloc time to reduce resource
150 ret
->current_active
= 1;
151 ret
->thresh
= thresh
;
154 if (flags
& WQ_HIGHPRI
)
155 ret
->normal_wq
= alloc_workqueue("%s-%s-high", flags
,
156 ret
->current_active
, "btrfs",
159 ret
->normal_wq
= alloc_workqueue("%s-%s", flags
,
160 ret
->current_active
, "btrfs",
162 if (!ret
->normal_wq
) {
167 INIT_LIST_HEAD(&ret
->ordered_list
);
168 spin_lock_init(&ret
->list_lock
);
169 spin_lock_init(&ret
->thres_lock
);
170 trace_btrfs_workqueue_alloc(ret
, name
, flags
& WQ_HIGHPRI
);
175 __btrfs_destroy_workqueue(struct __btrfs_workqueue
*wq
);
177 struct btrfs_workqueue
*btrfs_alloc_workqueue(struct btrfs_fs_info
*fs_info
,
183 struct btrfs_workqueue
*ret
= kzalloc(sizeof(*ret
), GFP_KERNEL
);
188 ret
->normal
= __btrfs_alloc_workqueue(fs_info
, name
,
190 limit_active
, thresh
);
196 if (flags
& WQ_HIGHPRI
) {
197 ret
->high
= __btrfs_alloc_workqueue(fs_info
, name
, flags
,
198 limit_active
, thresh
);
200 __btrfs_destroy_workqueue(ret
->normal
);
209 * Hook for threshold which will be called in btrfs_queue_work.
210 * This hook WILL be called in IRQ handler context,
211 * so workqueue_set_max_active MUST NOT be called in this hook
213 static inline void thresh_queue_hook(struct __btrfs_workqueue
*wq
)
215 if (wq
->thresh
== NO_THRESHOLD
)
217 atomic_inc(&wq
->pending
);
221 * Hook for threshold which will be called before executing the work,
222 * This hook is called in kthread content.
223 * So workqueue_set_max_active is called here.
225 static inline void thresh_exec_hook(struct __btrfs_workqueue
*wq
)
227 int new_current_active
;
231 if (wq
->thresh
== NO_THRESHOLD
)
234 atomic_dec(&wq
->pending
);
235 spin_lock(&wq
->thres_lock
);
237 * Use wq->count to limit the calling frequency of
238 * workqueue_set_max_active.
241 wq
->count
%= (wq
->thresh
/ 4);
244 new_current_active
= wq
->current_active
;
247 * pending may be changed later, but it's OK since we really
248 * don't need it so accurate to calculate new_max_active.
250 pending
= atomic_read(&wq
->pending
);
251 if (pending
> wq
->thresh
)
252 new_current_active
++;
253 if (pending
< wq
->thresh
/ 2)
254 new_current_active
--;
255 new_current_active
= clamp_val(new_current_active
, 1, wq
->limit_active
);
256 if (new_current_active
!= wq
->current_active
) {
258 wq
->current_active
= new_current_active
;
261 spin_unlock(&wq
->thres_lock
);
264 workqueue_set_max_active(wq
->normal_wq
, wq
->current_active
);
268 static void run_ordered_work(struct __btrfs_workqueue
*wq
)
270 struct list_head
*list
= &wq
->ordered_list
;
271 struct btrfs_work
*work
;
272 spinlock_t
*lock
= &wq
->list_lock
;
278 spin_lock_irqsave(lock
, flags
);
279 if (list_empty(list
))
281 work
= list_entry(list
->next
, struct btrfs_work
,
283 if (!test_bit(WORK_DONE_BIT
, &work
->flags
))
287 * we are going to call the ordered done function, but
288 * we leave the work item on the list as a barrier so
289 * that later work items that are done don't have their
290 * functions called before this one returns
292 if (test_and_set_bit(WORK_ORDER_DONE_BIT
, &work
->flags
))
294 trace_btrfs_ordered_sched(work
);
295 spin_unlock_irqrestore(lock
, flags
);
296 work
->ordered_func(work
);
298 /* now take the lock again and drop our item from the list */
299 spin_lock_irqsave(lock
, flags
);
300 list_del(&work
->ordered_list
);
301 spin_unlock_irqrestore(lock
, flags
);
304 * We don't want to call the ordered free functions with the
305 * lock held though. Save the work as tag for the trace event,
306 * because the callback could free the structure.
309 work
->ordered_free(work
);
310 trace_btrfs_all_work_done(wq
->fs_info
, wtag
);
312 spin_unlock_irqrestore(lock
, flags
);
315 static void normal_work_helper(struct btrfs_work
*work
)
317 struct __btrfs_workqueue
*wq
;
322 * We should not touch things inside work in the following cases:
323 * 1) after work->func() if it has no ordered_free
324 * Since the struct is freed in work->func().
325 * 2) after setting WORK_DONE_BIT
326 * The work may be freed in other threads almost instantly.
327 * So we save the needed things here.
329 if (work
->ordered_func
)
332 /* Safe for tracepoints in case work gets freed by the callback */
335 trace_btrfs_work_sched(work
);
336 thresh_exec_hook(wq
);
339 set_bit(WORK_DONE_BIT
, &work
->flags
);
340 run_ordered_work(wq
);
343 trace_btrfs_all_work_done(wq
->fs_info
, wtag
);
346 void btrfs_init_work(struct btrfs_work
*work
, btrfs_work_func_t uniq_func
,
348 btrfs_func_t ordered_func
,
349 btrfs_func_t ordered_free
)
352 work
->ordered_func
= ordered_func
;
353 work
->ordered_free
= ordered_free
;
354 INIT_WORK(&work
->normal_work
, uniq_func
);
355 INIT_LIST_HEAD(&work
->ordered_list
);
359 static inline void __btrfs_queue_work(struct __btrfs_workqueue
*wq
,
360 struct btrfs_work
*work
)
365 thresh_queue_hook(wq
);
366 if (work
->ordered_func
) {
367 spin_lock_irqsave(&wq
->list_lock
, flags
);
368 list_add_tail(&work
->ordered_list
, &wq
->ordered_list
);
369 spin_unlock_irqrestore(&wq
->list_lock
, flags
);
371 trace_btrfs_work_queued(work
);
372 queue_work(wq
->normal_wq
, &work
->normal_work
);
375 void btrfs_queue_work(struct btrfs_workqueue
*wq
,
376 struct btrfs_work
*work
)
378 struct __btrfs_workqueue
*dest_wq
;
380 if (test_bit(WORK_HIGH_PRIO_BIT
, &work
->flags
) && wq
->high
)
383 dest_wq
= wq
->normal
;
384 __btrfs_queue_work(dest_wq
, work
);
388 __btrfs_destroy_workqueue(struct __btrfs_workqueue
*wq
)
390 destroy_workqueue(wq
->normal_wq
);
391 trace_btrfs_workqueue_destroy(wq
);
395 void btrfs_destroy_workqueue(struct btrfs_workqueue
*wq
)
400 __btrfs_destroy_workqueue(wq
->high
);
401 __btrfs_destroy_workqueue(wq
->normal
);
405 void btrfs_workqueue_set_max(struct btrfs_workqueue
*wq
, int limit_active
)
409 wq
->normal
->limit_active
= limit_active
;
411 wq
->high
->limit_active
= limit_active
;
414 void btrfs_set_work_high_priority(struct btrfs_work
*work
)
416 set_bit(WORK_HIGH_PRIO_BIT
, &work
->flags
);