2 * Copyright (C) 2007 Oracle. All rights reserved.
3 * Copyright (C) 2014 Fujitsu. All rights reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public
7 * License v2 as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public
15 * License along with this program; if not, write to the
16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 021110-1307, USA.
20 #include <linux/kthread.h>
21 #include <linux/slab.h>
22 #include <linux/list.h>
23 #include <linux/spinlock.h>
24 #include <linux/freezer.h>
25 #include "async-thread.h"
28 #define WORK_DONE_BIT 0
29 #define WORK_ORDER_DONE_BIT 1
30 #define WORK_HIGH_PRIO_BIT 2
32 #define NO_THRESHOLD (-1)
33 #define DFT_THRESHOLD (32)
35 struct __btrfs_workqueue
{
36 struct workqueue_struct
*normal_wq
;
38 /* File system this workqueue services */
39 struct btrfs_fs_info
*fs_info
;
41 /* List head pointing to ordered work list */
42 struct list_head ordered_list
;
44 /* Spinlock for ordered_list */
47 /* Thresholding related variants */
50 /* Up limit of concurrency workers */
53 /* Current number of concurrency workers */
56 /* Threshold to change current_active */
59 spinlock_t thres_lock
;
62 struct btrfs_workqueue
{
63 struct __btrfs_workqueue
*normal
;
64 struct __btrfs_workqueue
*high
;
67 static void normal_work_helper(struct btrfs_work
*work
);
69 #define BTRFS_WORK_HELPER(name) \
70 void btrfs_##name(struct work_struct *arg) \
72 struct btrfs_work *work = container_of(arg, struct btrfs_work, \
74 normal_work_helper(work); \
77 struct btrfs_fs_info
*
78 btrfs_workqueue_owner(struct __btrfs_workqueue
*wq
)
83 struct btrfs_fs_info
*
84 btrfs_work_owner(struct btrfs_work
*work
)
86 return work
->wq
->fs_info
;
89 BTRFS_WORK_HELPER(worker_helper
);
90 BTRFS_WORK_HELPER(delalloc_helper
);
91 BTRFS_WORK_HELPER(flush_delalloc_helper
);
92 BTRFS_WORK_HELPER(cache_helper
);
93 BTRFS_WORK_HELPER(submit_helper
);
94 BTRFS_WORK_HELPER(fixup_helper
);
95 BTRFS_WORK_HELPER(endio_helper
);
96 BTRFS_WORK_HELPER(endio_meta_helper
);
97 BTRFS_WORK_HELPER(endio_meta_write_helper
);
98 BTRFS_WORK_HELPER(endio_raid56_helper
);
99 BTRFS_WORK_HELPER(endio_repair_helper
);
100 BTRFS_WORK_HELPER(rmw_helper
);
101 BTRFS_WORK_HELPER(endio_write_helper
);
102 BTRFS_WORK_HELPER(freespace_write_helper
);
103 BTRFS_WORK_HELPER(delayed_meta_helper
);
104 BTRFS_WORK_HELPER(readahead_helper
);
105 BTRFS_WORK_HELPER(qgroup_rescan_helper
);
106 BTRFS_WORK_HELPER(extent_refs_helper
);
107 BTRFS_WORK_HELPER(scrub_helper
);
108 BTRFS_WORK_HELPER(scrubwrc_helper
);
109 BTRFS_WORK_HELPER(scrubnc_helper
);
110 BTRFS_WORK_HELPER(scrubparity_helper
);
112 static struct __btrfs_workqueue
*
113 __btrfs_alloc_workqueue(struct btrfs_fs_info
*fs_info
, const char *name
,
114 unsigned int flags
, int limit_active
, int thresh
)
116 struct __btrfs_workqueue
*ret
= kzalloc(sizeof(*ret
), GFP_KERNEL
);
121 ret
->fs_info
= fs_info
;
122 ret
->limit_active
= limit_active
;
123 atomic_set(&ret
->pending
, 0);
125 thresh
= DFT_THRESHOLD
;
126 /* For low threshold, disabling threshold is a better choice */
127 if (thresh
< DFT_THRESHOLD
) {
128 ret
->current_active
= limit_active
;
129 ret
->thresh
= NO_THRESHOLD
;
132 * For threshold-able wq, let its concurrency grow on demand.
133 * Use minimal max_active at alloc time to reduce resource
136 ret
->current_active
= 1;
137 ret
->thresh
= thresh
;
140 if (flags
& WQ_HIGHPRI
)
141 ret
->normal_wq
= alloc_workqueue("%s-%s-high", flags
,
142 ret
->current_active
, "btrfs",
145 ret
->normal_wq
= alloc_workqueue("%s-%s", flags
,
146 ret
->current_active
, "btrfs",
148 if (!ret
->normal_wq
) {
153 INIT_LIST_HEAD(&ret
->ordered_list
);
154 spin_lock_init(&ret
->list_lock
);
155 spin_lock_init(&ret
->thres_lock
);
156 trace_btrfs_workqueue_alloc(ret
, name
, flags
& WQ_HIGHPRI
);
161 __btrfs_destroy_workqueue(struct __btrfs_workqueue
*wq
);
163 struct btrfs_workqueue
*btrfs_alloc_workqueue(struct btrfs_fs_info
*fs_info
,
169 struct btrfs_workqueue
*ret
= kzalloc(sizeof(*ret
), GFP_KERNEL
);
174 ret
->normal
= __btrfs_alloc_workqueue(fs_info
, name
,
176 limit_active
, thresh
);
182 if (flags
& WQ_HIGHPRI
) {
183 ret
->high
= __btrfs_alloc_workqueue(fs_info
, name
, flags
,
184 limit_active
, thresh
);
186 __btrfs_destroy_workqueue(ret
->normal
);
195 * Hook for threshold which will be called in btrfs_queue_work.
196 * This hook WILL be called in IRQ handler context,
197 * so workqueue_set_max_active MUST NOT be called in this hook
199 static inline void thresh_queue_hook(struct __btrfs_workqueue
*wq
)
201 if (wq
->thresh
== NO_THRESHOLD
)
203 atomic_inc(&wq
->pending
);
207 * Hook for threshold which will be called before executing the work,
208 * This hook is called in kthread content.
209 * So workqueue_set_max_active is called here.
211 static inline void thresh_exec_hook(struct __btrfs_workqueue
*wq
)
213 int new_current_active
;
217 if (wq
->thresh
== NO_THRESHOLD
)
220 atomic_dec(&wq
->pending
);
221 spin_lock(&wq
->thres_lock
);
223 * Use wq->count to limit the calling frequency of
224 * workqueue_set_max_active.
227 wq
->count
%= (wq
->thresh
/ 4);
230 new_current_active
= wq
->current_active
;
233 * pending may be changed later, but it's OK since we really
234 * don't need it so accurate to calculate new_max_active.
236 pending
= atomic_read(&wq
->pending
);
237 if (pending
> wq
->thresh
)
238 new_current_active
++;
239 if (pending
< wq
->thresh
/ 2)
240 new_current_active
--;
241 new_current_active
= clamp_val(new_current_active
, 1, wq
->limit_active
);
242 if (new_current_active
!= wq
->current_active
) {
244 wq
->current_active
= new_current_active
;
247 spin_unlock(&wq
->thres_lock
);
250 workqueue_set_max_active(wq
->normal_wq
, wq
->current_active
);
254 static void run_ordered_work(struct __btrfs_workqueue
*wq
)
256 struct list_head
*list
= &wq
->ordered_list
;
257 struct btrfs_work
*work
;
258 spinlock_t
*lock
= &wq
->list_lock
;
262 spin_lock_irqsave(lock
, flags
);
263 if (list_empty(list
))
265 work
= list_entry(list
->next
, struct btrfs_work
,
267 if (!test_bit(WORK_DONE_BIT
, &work
->flags
))
271 * we are going to call the ordered done function, but
272 * we leave the work item on the list as a barrier so
273 * that later work items that are done don't have their
274 * functions called before this one returns
276 if (test_and_set_bit(WORK_ORDER_DONE_BIT
, &work
->flags
))
278 trace_btrfs_ordered_sched(work
);
279 spin_unlock_irqrestore(lock
, flags
);
280 work
->ordered_func(work
);
282 /* now take the lock again and drop our item from the list */
283 spin_lock_irqsave(lock
, flags
);
284 list_del(&work
->ordered_list
);
285 spin_unlock_irqrestore(lock
, flags
);
288 * we don't want to call the ordered free functions
289 * with the lock held though
291 work
->ordered_free(work
);
292 trace_btrfs_all_work_done(work
);
294 spin_unlock_irqrestore(lock
, flags
);
297 static void normal_work_helper(struct btrfs_work
*work
)
299 struct __btrfs_workqueue
*wq
;
303 * We should not touch things inside work in the following cases:
304 * 1) after work->func() if it has no ordered_free
305 * Since the struct is freed in work->func().
306 * 2) after setting WORK_DONE_BIT
307 * The work may be freed in other threads almost instantly.
308 * So we save the needed things here.
310 if (work
->ordered_func
)
314 trace_btrfs_work_sched(work
);
315 thresh_exec_hook(wq
);
318 set_bit(WORK_DONE_BIT
, &work
->flags
);
319 run_ordered_work(wq
);
322 trace_btrfs_all_work_done(work
);
325 void btrfs_init_work(struct btrfs_work
*work
, btrfs_work_func_t uniq_func
,
327 btrfs_func_t ordered_func
,
328 btrfs_func_t ordered_free
)
331 work
->ordered_func
= ordered_func
;
332 work
->ordered_free
= ordered_free
;
333 INIT_WORK(&work
->normal_work
, uniq_func
);
334 INIT_LIST_HEAD(&work
->ordered_list
);
338 static inline void __btrfs_queue_work(struct __btrfs_workqueue
*wq
,
339 struct btrfs_work
*work
)
344 thresh_queue_hook(wq
);
345 if (work
->ordered_func
) {
346 spin_lock_irqsave(&wq
->list_lock
, flags
);
347 list_add_tail(&work
->ordered_list
, &wq
->ordered_list
);
348 spin_unlock_irqrestore(&wq
->list_lock
, flags
);
350 trace_btrfs_work_queued(work
);
351 queue_work(wq
->normal_wq
, &work
->normal_work
);
354 void btrfs_queue_work(struct btrfs_workqueue
*wq
,
355 struct btrfs_work
*work
)
357 struct __btrfs_workqueue
*dest_wq
;
359 if (test_bit(WORK_HIGH_PRIO_BIT
, &work
->flags
) && wq
->high
)
362 dest_wq
= wq
->normal
;
363 __btrfs_queue_work(dest_wq
, work
);
367 __btrfs_destroy_workqueue(struct __btrfs_workqueue
*wq
)
369 destroy_workqueue(wq
->normal_wq
);
370 trace_btrfs_workqueue_destroy(wq
);
374 void btrfs_destroy_workqueue(struct btrfs_workqueue
*wq
)
379 __btrfs_destroy_workqueue(wq
->high
);
380 __btrfs_destroy_workqueue(wq
->normal
);
384 void btrfs_workqueue_set_max(struct btrfs_workqueue
*wq
, int limit_active
)
388 wq
->normal
->limit_active
= limit_active
;
390 wq
->high
->limit_active
= limit_active
;
393 void btrfs_set_work_high_priority(struct btrfs_work
*work
)
395 set_bit(WORK_HIGH_PRIO_BIT
, &work
->flags
);