1 /* FS-Cache worker operation management routines
3 * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 * See Documentation/filesystems/caching/operations.txt
14 #define FSCACHE_DEBUG_LEVEL OPERATION
15 #include <linux/module.h>
16 #include <linux/seq_file.h>
17 #include <linux/slab.h>
20 atomic_t fscache_op_debug_id
;
21 EXPORT_SYMBOL(fscache_op_debug_id
);
24 * fscache_enqueue_operation - Enqueue an operation for processing
25 * @op: The operation to enqueue
27 * Enqueue an operation for processing by the FS-Cache thread pool.
29 * This will get its own ref on the object.
31 void fscache_enqueue_operation(struct fscache_operation
*op
)
33 _enter("{OBJ%x OP%x,%u}",
34 op
->object
->debug_id
, op
->debug_id
, atomic_read(&op
->usage
));
36 ASSERT(list_empty(&op
->pend_link
));
37 ASSERT(op
->processor
!= NULL
);
38 ASSERT(fscache_object_is_available(op
->object
));
39 ASSERTCMP(atomic_read(&op
->usage
), >, 0);
40 ASSERTCMP(op
->state
, ==, FSCACHE_OP_ST_IN_PROGRESS
);
42 fscache_stat(&fscache_n_op_enqueue
);
43 switch (op
->flags
& FSCACHE_OP_TYPE
) {
44 case FSCACHE_OP_ASYNC
:
45 _debug("queue async");
46 atomic_inc(&op
->usage
);
47 if (!queue_work(fscache_op_wq
, &op
->work
))
48 fscache_put_operation(op
);
50 case FSCACHE_OP_MYTHREAD
:
51 _debug("queue for caller's attention");
54 printk(KERN_ERR
"FS-Cache: Unexpected op type %lx",
60 EXPORT_SYMBOL(fscache_enqueue_operation
);
65 static void fscache_run_op(struct fscache_object
*object
,
66 struct fscache_operation
*op
)
68 ASSERTCMP(op
->state
, ==, FSCACHE_OP_ST_PENDING
);
70 op
->state
= FSCACHE_OP_ST_IN_PROGRESS
;
71 object
->n_in_progress
++;
72 if (test_and_clear_bit(FSCACHE_OP_WAITING
, &op
->flags
))
73 wake_up_bit(&op
->flags
, FSCACHE_OP_WAITING
);
75 fscache_enqueue_operation(op
);
76 fscache_stat(&fscache_n_op_run
);
80 * submit an exclusive operation for an object
81 * - other ops are excluded from running simultaneously with this one
82 * - this gets any extra refs it needs on an op
84 int fscache_submit_exclusive_op(struct fscache_object
*object
,
85 struct fscache_operation
*op
)
89 _enter("{OBJ%x OP%x},", object
->debug_id
, op
->debug_id
);
91 ASSERTCMP(op
->state
, ==, FSCACHE_OP_ST_INITIALISED
);
92 ASSERTCMP(atomic_read(&op
->usage
), >, 0);
94 spin_lock(&object
->lock
);
95 ASSERTCMP(object
->n_ops
, >=, object
->n_in_progress
);
96 ASSERTCMP(object
->n_ops
, >=, object
->n_exclusive
);
97 ASSERT(list_empty(&op
->pend_link
));
99 op
->state
= FSCACHE_OP_ST_PENDING
;
100 if (fscache_object_is_active(object
)) {
103 object
->n_exclusive
++; /* reads and writes must wait */
105 if (object
->n_in_progress
> 0) {
106 atomic_inc(&op
->usage
);
107 list_add_tail(&op
->pend_link
, &object
->pending_ops
);
108 fscache_stat(&fscache_n_op_pend
);
109 } else if (!list_empty(&object
->pending_ops
)) {
110 atomic_inc(&op
->usage
);
111 list_add_tail(&op
->pend_link
, &object
->pending_ops
);
112 fscache_stat(&fscache_n_op_pend
);
113 fscache_start_operations(object
);
115 ASSERTCMP(object
->n_in_progress
, ==, 0);
116 fscache_run_op(object
, op
);
119 /* need to issue a new write op after this */
120 clear_bit(FSCACHE_OBJECT_PENDING_WRITE
, &object
->flags
);
122 } else if (test_bit(FSCACHE_OBJECT_IS_LOOKED_UP
, &object
->flags
)) {
125 object
->n_exclusive
++; /* reads and writes must wait */
126 atomic_inc(&op
->usage
);
127 list_add_tail(&op
->pend_link
, &object
->pending_ops
);
128 fscache_stat(&fscache_n_op_pend
);
131 /* If we're in any other state, there must have been an I/O
132 * error of some nature.
134 ASSERT(test_bit(FSCACHE_IOERROR
, &object
->cache
->flags
));
138 spin_unlock(&object
->lock
);
143 * report an unexpected submission
145 static void fscache_report_unexpected_submission(struct fscache_object
*object
,
146 struct fscache_operation
*op
,
147 const struct fscache_state
*ostate
)
149 static bool once_only
;
150 struct fscache_operation
*p
;
157 kdebug("unexpected submission OP%x [OBJ%x %s]",
158 op
->debug_id
, object
->debug_id
, object
->state
->name
);
159 kdebug("objstate=%s [%s]", object
->state
->name
, ostate
->name
);
160 kdebug("objflags=%lx", object
->flags
);
161 kdebug("objevent=%lx [%lx]", object
->events
, object
->event_mask
);
162 kdebug("ops=%u inp=%u exc=%u",
163 object
->n_ops
, object
->n_in_progress
, object
->n_exclusive
);
165 if (!list_empty(&object
->pending_ops
)) {
167 list_for_each_entry(p
, &object
->pending_ops
, pend_link
) {
168 ASSERTCMP(p
->object
, ==, object
);
169 kdebug("%p %p", op
->processor
, op
->release
);
180 * submit an operation for an object
181 * - objects may be submitted only in the following states:
182 * - during object creation (write ops may be submitted)
183 * - whilst the object is active
184 * - after an I/O error incurred in one of the two above states (op rejected)
185 * - this gets any extra refs it needs on an op
187 int fscache_submit_op(struct fscache_object
*object
,
188 struct fscache_operation
*op
)
190 const struct fscache_state
*ostate
;
193 _enter("{OBJ%x OP%x},{%u}",
194 object
->debug_id
, op
->debug_id
, atomic_read(&op
->usage
));
196 ASSERTCMP(op
->state
, ==, FSCACHE_OP_ST_INITIALISED
);
197 ASSERTCMP(atomic_read(&op
->usage
), >, 0);
199 spin_lock(&object
->lock
);
200 ASSERTCMP(object
->n_ops
, >=, object
->n_in_progress
);
201 ASSERTCMP(object
->n_ops
, >=, object
->n_exclusive
);
202 ASSERT(list_empty(&op
->pend_link
));
204 ostate
= object
->state
;
207 op
->state
= FSCACHE_OP_ST_PENDING
;
208 if (fscache_object_is_active(object
)) {
212 if (object
->n_exclusive
> 0) {
213 atomic_inc(&op
->usage
);
214 list_add_tail(&op
->pend_link
, &object
->pending_ops
);
215 fscache_stat(&fscache_n_op_pend
);
216 } else if (!list_empty(&object
->pending_ops
)) {
217 atomic_inc(&op
->usage
);
218 list_add_tail(&op
->pend_link
, &object
->pending_ops
);
219 fscache_stat(&fscache_n_op_pend
);
220 fscache_start_operations(object
);
222 ASSERTCMP(object
->n_exclusive
, ==, 0);
223 fscache_run_op(object
, op
);
226 } else if (test_bit(FSCACHE_OBJECT_IS_LOOKED_UP
, &object
->flags
)) {
229 atomic_inc(&op
->usage
);
230 list_add_tail(&op
->pend_link
, &object
->pending_ops
);
231 fscache_stat(&fscache_n_op_pend
);
233 } else if (fscache_object_is_dying(object
)) {
234 fscache_stat(&fscache_n_op_rejected
);
235 op
->state
= FSCACHE_OP_ST_CANCELLED
;
237 } else if (!test_bit(FSCACHE_IOERROR
, &object
->cache
->flags
)) {
238 fscache_report_unexpected_submission(object
, op
, ostate
);
239 ASSERT(!fscache_object_is_active(object
));
240 op
->state
= FSCACHE_OP_ST_CANCELLED
;
243 op
->state
= FSCACHE_OP_ST_CANCELLED
;
247 spin_unlock(&object
->lock
);
252 * queue an object for withdrawal on error, aborting all following asynchronous
255 void fscache_abort_object(struct fscache_object
*object
)
257 _enter("{OBJ%x}", object
->debug_id
);
259 fscache_raise_event(object
, FSCACHE_OBJECT_EV_ERROR
);
263 * Jump start the operation processing on an object. The caller must hold
266 void fscache_start_operations(struct fscache_object
*object
)
268 struct fscache_operation
*op
;
271 while (!list_empty(&object
->pending_ops
) && !stop
) {
272 op
= list_entry(object
->pending_ops
.next
,
273 struct fscache_operation
, pend_link
);
275 if (test_bit(FSCACHE_OP_EXCLUSIVE
, &op
->flags
)) {
276 if (object
->n_in_progress
> 0)
280 list_del_init(&op
->pend_link
);
281 fscache_run_op(object
, op
);
283 /* the pending queue was holding a ref on the object */
284 fscache_put_operation(op
);
287 ASSERTCMP(object
->n_in_progress
, <=, object
->n_ops
);
289 _debug("woke %d ops on OBJ%x",
290 object
->n_in_progress
, object
->debug_id
);
294 * cancel an operation that's pending on an object
296 int fscache_cancel_op(struct fscache_operation
*op
,
297 void (*do_cancel
)(struct fscache_operation
*))
299 struct fscache_object
*object
= op
->object
;
302 _enter("OBJ%x OP%x}", op
->object
->debug_id
, op
->debug_id
);
304 ASSERTCMP(op
->state
, >=, FSCACHE_OP_ST_PENDING
);
305 ASSERTCMP(op
->state
, !=, FSCACHE_OP_ST_CANCELLED
);
306 ASSERTCMP(atomic_read(&op
->usage
), >, 0);
308 spin_lock(&object
->lock
);
311 if (op
->state
== FSCACHE_OP_ST_PENDING
) {
312 ASSERT(!list_empty(&op
->pend_link
));
313 fscache_stat(&fscache_n_op_cancelled
);
314 list_del_init(&op
->pend_link
);
317 op
->state
= FSCACHE_OP_ST_CANCELLED
;
318 if (test_bit(FSCACHE_OP_EXCLUSIVE
, &op
->flags
))
319 object
->n_exclusive
--;
320 if (test_and_clear_bit(FSCACHE_OP_WAITING
, &op
->flags
))
321 wake_up_bit(&op
->flags
, FSCACHE_OP_WAITING
);
322 fscache_put_operation(op
);
326 spin_unlock(&object
->lock
);
327 _leave(" = %d", ret
);
332 * Cancel all pending operations on an object
334 void fscache_cancel_all_ops(struct fscache_object
*object
)
336 struct fscache_operation
*op
;
338 _enter("OBJ%x", object
->debug_id
);
340 spin_lock(&object
->lock
);
342 while (!list_empty(&object
->pending_ops
)) {
343 op
= list_entry(object
->pending_ops
.next
,
344 struct fscache_operation
, pend_link
);
345 fscache_stat(&fscache_n_op_cancelled
);
346 list_del_init(&op
->pend_link
);
348 ASSERTCMP(op
->state
, ==, FSCACHE_OP_ST_PENDING
);
349 op
->state
= FSCACHE_OP_ST_CANCELLED
;
351 if (test_bit(FSCACHE_OP_EXCLUSIVE
, &op
->flags
))
352 object
->n_exclusive
--;
353 if (test_and_clear_bit(FSCACHE_OP_WAITING
, &op
->flags
))
354 wake_up_bit(&op
->flags
, FSCACHE_OP_WAITING
);
355 fscache_put_operation(op
);
356 cond_resched_lock(&object
->lock
);
359 spin_unlock(&object
->lock
);
364 * Record the completion or cancellation of an in-progress operation.
366 void fscache_op_complete(struct fscache_operation
*op
, bool cancelled
)
368 struct fscache_object
*object
= op
->object
;
370 _enter("OBJ%x", object
->debug_id
);
372 ASSERTCMP(op
->state
, ==, FSCACHE_OP_ST_IN_PROGRESS
);
373 ASSERTCMP(object
->n_in_progress
, >, 0);
374 ASSERTIFCMP(test_bit(FSCACHE_OP_EXCLUSIVE
, &op
->flags
),
375 object
->n_exclusive
, >, 0);
376 ASSERTIFCMP(test_bit(FSCACHE_OP_EXCLUSIVE
, &op
->flags
),
377 object
->n_in_progress
, ==, 1);
379 spin_lock(&object
->lock
);
381 op
->state
= cancelled
?
382 FSCACHE_OP_ST_CANCELLED
: FSCACHE_OP_ST_COMPLETE
;
384 if (test_bit(FSCACHE_OP_EXCLUSIVE
, &op
->flags
))
385 object
->n_exclusive
--;
386 object
->n_in_progress
--;
387 if (object
->n_in_progress
== 0)
388 fscache_start_operations(object
);
390 spin_unlock(&object
->lock
);
393 EXPORT_SYMBOL(fscache_op_complete
);
396 * release an operation
397 * - queues pending ops if this is the last in-progress op
399 void fscache_put_operation(struct fscache_operation
*op
)
401 struct fscache_object
*object
;
402 struct fscache_cache
*cache
;
404 _enter("{OBJ%x OP%x,%d}",
405 op
->object
->debug_id
, op
->debug_id
, atomic_read(&op
->usage
));
407 ASSERTCMP(atomic_read(&op
->usage
), >, 0);
409 if (!atomic_dec_and_test(&op
->usage
))
413 ASSERTIFCMP(op
->state
!= FSCACHE_OP_ST_COMPLETE
,
414 op
->state
, ==, FSCACHE_OP_ST_CANCELLED
);
415 op
->state
= FSCACHE_OP_ST_DEAD
;
417 fscache_stat(&fscache_n_op_release
);
426 if (test_bit(FSCACHE_OP_DEC_READ_CNT
, &op
->flags
))
427 atomic_dec(&object
->n_reads
);
428 if (test_bit(FSCACHE_OP_UNUSE_COOKIE
, &op
->flags
))
429 fscache_unuse_cookie(object
);
431 /* now... we may get called with the object spinlock held, so we
432 * complete the cleanup here only if we can immediately acquire the
433 * lock, and defer it otherwise */
434 if (!spin_trylock(&object
->lock
)) {
436 fscache_stat(&fscache_n_op_deferred_release
);
438 cache
= object
->cache
;
439 spin_lock(&cache
->op_gc_list_lock
);
440 list_add_tail(&op
->pend_link
, &cache
->op_gc_list
);
441 spin_unlock(&cache
->op_gc_list_lock
);
442 schedule_work(&cache
->op_gc
);
447 ASSERTCMP(object
->n_ops
, >, 0);
449 if (object
->n_ops
== 0)
450 fscache_raise_event(object
, FSCACHE_OBJECT_EV_CLEARED
);
452 spin_unlock(&object
->lock
);
457 EXPORT_SYMBOL(fscache_put_operation
);
460 * garbage collect operations that have had their release deferred
462 void fscache_operation_gc(struct work_struct
*work
)
464 struct fscache_operation
*op
;
465 struct fscache_object
*object
;
466 struct fscache_cache
*cache
=
467 container_of(work
, struct fscache_cache
, op_gc
);
473 spin_lock(&cache
->op_gc_list_lock
);
474 if (list_empty(&cache
->op_gc_list
)) {
475 spin_unlock(&cache
->op_gc_list_lock
);
479 op
= list_entry(cache
->op_gc_list
.next
,
480 struct fscache_operation
, pend_link
);
481 list_del(&op
->pend_link
);
482 spin_unlock(&cache
->op_gc_list_lock
);
485 spin_lock(&object
->lock
);
487 _debug("GC DEFERRED REL OBJ%x OP%x",
488 object
->debug_id
, op
->debug_id
);
489 fscache_stat(&fscache_n_op_gc
);
491 ASSERTCMP(atomic_read(&op
->usage
), ==, 0);
492 ASSERTCMP(op
->state
, ==, FSCACHE_OP_ST_DEAD
);
494 ASSERTCMP(object
->n_ops
, >, 0);
496 if (object
->n_ops
== 0)
497 fscache_raise_event(object
, FSCACHE_OBJECT_EV_CLEARED
);
499 spin_unlock(&object
->lock
);
502 } while (count
++ < 20);
504 if (!list_empty(&cache
->op_gc_list
))
505 schedule_work(&cache
->op_gc
);
511 * execute an operation using fs_op_wq to provide processing context -
512 * the caller holds a ref to this object, so we don't need to hold one
514 void fscache_op_work_func(struct work_struct
*work
)
516 struct fscache_operation
*op
=
517 container_of(work
, struct fscache_operation
, work
);
520 _enter("{OBJ%x OP%x,%d}",
521 op
->object
->debug_id
, op
->debug_id
, atomic_read(&op
->usage
));
523 ASSERT(op
->processor
!= NULL
);
526 fscache_hist(fscache_ops_histogram
, start
);
527 fscache_put_operation(op
);