1 /* FS-Cache worker operation management routines
3 * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 * See Documentation/filesystems/caching/operations.txt
14 #define FSCACHE_DEBUG_LEVEL OPERATION
15 #include <linux/module.h>
16 #include <linux/seq_file.h>
17 #include <linux/slab.h>
20 atomic_t fscache_op_debug_id
;
21 EXPORT_SYMBOL(fscache_op_debug_id
);
23 static void fscache_operation_dummy_cancel(struct fscache_operation
*op
)
28 * fscache_operation_init - Do basic initialisation of an operation
29 * @op: The operation to initialise
30 * @release: The release function to assign
32 * Do basic initialisation of an operation. The caller must still set flags,
33 * object and processor if needed.
35 void fscache_operation_init(struct fscache_operation
*op
,
36 fscache_operation_processor_t processor
,
37 fscache_operation_cancel_t cancel
,
38 fscache_operation_release_t release
)
40 INIT_WORK(&op
->work
, fscache_op_work_func
);
41 atomic_set(&op
->usage
, 1);
42 op
->state
= FSCACHE_OP_ST_INITIALISED
;
43 op
->debug_id
= atomic_inc_return(&fscache_op_debug_id
);
44 op
->processor
= processor
;
45 op
->cancel
= cancel
?: fscache_operation_dummy_cancel
;
46 op
->release
= release
;
47 INIT_LIST_HEAD(&op
->pend_link
);
48 fscache_stat(&fscache_n_op_initialised
);
50 EXPORT_SYMBOL(fscache_operation_init
);
53 * fscache_enqueue_operation - Enqueue an operation for processing
54 * @op: The operation to enqueue
56 * Enqueue an operation for processing by the FS-Cache thread pool.
58 * This will get its own ref on the object.
60 void fscache_enqueue_operation(struct fscache_operation
*op
)
62 _enter("{OBJ%x OP%x,%u}",
63 op
->object
->debug_id
, op
->debug_id
, atomic_read(&op
->usage
));
65 ASSERT(list_empty(&op
->pend_link
));
66 ASSERT(op
->processor
!= NULL
);
67 ASSERT(fscache_object_is_available(op
->object
));
68 ASSERTCMP(atomic_read(&op
->usage
), >, 0);
69 ASSERTIFCMP(op
->state
!= FSCACHE_OP_ST_IN_PROGRESS
,
70 op
->state
, ==, FSCACHE_OP_ST_CANCELLED
);
72 fscache_stat(&fscache_n_op_enqueue
);
73 switch (op
->flags
& FSCACHE_OP_TYPE
) {
74 case FSCACHE_OP_ASYNC
:
75 _debug("queue async");
76 atomic_inc(&op
->usage
);
77 if (!queue_work(fscache_op_wq
, &op
->work
))
78 fscache_put_operation(op
);
80 case FSCACHE_OP_MYTHREAD
:
81 _debug("queue for caller's attention");
84 pr_err("Unexpected op type %lx", op
->flags
);
89 EXPORT_SYMBOL(fscache_enqueue_operation
);
94 static void fscache_run_op(struct fscache_object
*object
,
95 struct fscache_operation
*op
)
97 ASSERTCMP(op
->state
, ==, FSCACHE_OP_ST_PENDING
);
99 op
->state
= FSCACHE_OP_ST_IN_PROGRESS
;
100 object
->n_in_progress
++;
101 if (test_and_clear_bit(FSCACHE_OP_WAITING
, &op
->flags
))
102 wake_up_bit(&op
->flags
, FSCACHE_OP_WAITING
);
104 fscache_enqueue_operation(op
);
105 fscache_stat(&fscache_n_op_run
);
109 * report an unexpected submission
111 static void fscache_report_unexpected_submission(struct fscache_object
*object
,
112 struct fscache_operation
*op
,
113 const struct fscache_state
*ostate
)
115 static bool once_only
;
116 struct fscache_operation
*p
;
123 kdebug("unexpected submission OP%x [OBJ%x %s]",
124 op
->debug_id
, object
->debug_id
, object
->state
->name
);
125 kdebug("objstate=%s [%s]", object
->state
->name
, ostate
->name
);
126 kdebug("objflags=%lx", object
->flags
);
127 kdebug("objevent=%lx [%lx]", object
->events
, object
->event_mask
);
128 kdebug("ops=%u inp=%u exc=%u",
129 object
->n_ops
, object
->n_in_progress
, object
->n_exclusive
);
131 if (!list_empty(&object
->pending_ops
)) {
133 list_for_each_entry(p
, &object
->pending_ops
, pend_link
) {
134 ASSERTCMP(p
->object
, ==, object
);
135 kdebug("%p %p", op
->processor
, op
->release
);
146 * submit an exclusive operation for an object
147 * - other ops are excluded from running simultaneously with this one
148 * - this gets any extra refs it needs on an op
150 int fscache_submit_exclusive_op(struct fscache_object
*object
,
151 struct fscache_operation
*op
)
153 const struct fscache_state
*ostate
;
157 _enter("{OBJ%x OP%x},", object
->debug_id
, op
->debug_id
);
159 ASSERTCMP(op
->state
, ==, FSCACHE_OP_ST_INITIALISED
);
160 ASSERTCMP(atomic_read(&op
->usage
), >, 0);
162 spin_lock(&object
->lock
);
163 ASSERTCMP(object
->n_ops
, >=, object
->n_in_progress
);
164 ASSERTCMP(object
->n_ops
, >=, object
->n_exclusive
);
165 ASSERT(list_empty(&op
->pend_link
));
167 ostate
= object
->state
;
170 op
->state
= FSCACHE_OP_ST_PENDING
;
171 flags
= READ_ONCE(object
->flags
);
172 if (unlikely(!(flags
& BIT(FSCACHE_OBJECT_IS_LIVE
)))) {
173 fscache_stat(&fscache_n_op_rejected
);
175 op
->state
= FSCACHE_OP_ST_CANCELLED
;
177 } else if (unlikely(fscache_cache_is_broken(object
))) {
179 op
->state
= FSCACHE_OP_ST_CANCELLED
;
181 } else if (flags
& BIT(FSCACHE_OBJECT_IS_AVAILABLE
)) {
184 object
->n_exclusive
++; /* reads and writes must wait */
186 if (object
->n_in_progress
> 0) {
187 atomic_inc(&op
->usage
);
188 list_add_tail(&op
->pend_link
, &object
->pending_ops
);
189 fscache_stat(&fscache_n_op_pend
);
190 } else if (!list_empty(&object
->pending_ops
)) {
191 atomic_inc(&op
->usage
);
192 list_add_tail(&op
->pend_link
, &object
->pending_ops
);
193 fscache_stat(&fscache_n_op_pend
);
194 fscache_start_operations(object
);
196 ASSERTCMP(object
->n_in_progress
, ==, 0);
197 fscache_run_op(object
, op
);
200 /* need to issue a new write op after this */
201 clear_bit(FSCACHE_OBJECT_PENDING_WRITE
, &object
->flags
);
203 } else if (flags
& BIT(FSCACHE_OBJECT_IS_LOOKED_UP
)) {
206 object
->n_exclusive
++; /* reads and writes must wait */
207 atomic_inc(&op
->usage
);
208 list_add_tail(&op
->pend_link
, &object
->pending_ops
);
209 fscache_stat(&fscache_n_op_pend
);
211 } else if (flags
& BIT(FSCACHE_OBJECT_KILLED_BY_CACHE
)) {
213 op
->state
= FSCACHE_OP_ST_CANCELLED
;
216 fscache_report_unexpected_submission(object
, op
, ostate
);
218 op
->state
= FSCACHE_OP_ST_CANCELLED
;
222 spin_unlock(&object
->lock
);
227 * submit an operation for an object
228 * - objects may be submitted only in the following states:
229 * - during object creation (write ops may be submitted)
230 * - whilst the object is active
231 * - after an I/O error incurred in one of the two above states (op rejected)
232 * - this gets any extra refs it needs on an op
234 int fscache_submit_op(struct fscache_object
*object
,
235 struct fscache_operation
*op
)
237 const struct fscache_state
*ostate
;
241 _enter("{OBJ%x OP%x},{%u}",
242 object
->debug_id
, op
->debug_id
, atomic_read(&op
->usage
));
244 ASSERTCMP(op
->state
, ==, FSCACHE_OP_ST_INITIALISED
);
245 ASSERTCMP(atomic_read(&op
->usage
), >, 0);
247 spin_lock(&object
->lock
);
248 ASSERTCMP(object
->n_ops
, >=, object
->n_in_progress
);
249 ASSERTCMP(object
->n_ops
, >=, object
->n_exclusive
);
250 ASSERT(list_empty(&op
->pend_link
));
252 ostate
= object
->state
;
255 op
->state
= FSCACHE_OP_ST_PENDING
;
256 flags
= READ_ONCE(object
->flags
);
257 if (unlikely(!(flags
& BIT(FSCACHE_OBJECT_IS_LIVE
)))) {
258 fscache_stat(&fscache_n_op_rejected
);
260 op
->state
= FSCACHE_OP_ST_CANCELLED
;
262 } else if (unlikely(fscache_cache_is_broken(object
))) {
264 op
->state
= FSCACHE_OP_ST_CANCELLED
;
266 } else if (flags
& BIT(FSCACHE_OBJECT_IS_AVAILABLE
)) {
270 if (object
->n_exclusive
> 0) {
271 atomic_inc(&op
->usage
);
272 list_add_tail(&op
->pend_link
, &object
->pending_ops
);
273 fscache_stat(&fscache_n_op_pend
);
274 } else if (!list_empty(&object
->pending_ops
)) {
275 atomic_inc(&op
->usage
);
276 list_add_tail(&op
->pend_link
, &object
->pending_ops
);
277 fscache_stat(&fscache_n_op_pend
);
278 fscache_start_operations(object
);
280 ASSERTCMP(object
->n_exclusive
, ==, 0);
281 fscache_run_op(object
, op
);
284 } else if (flags
& BIT(FSCACHE_OBJECT_IS_LOOKED_UP
)) {
287 atomic_inc(&op
->usage
);
288 list_add_tail(&op
->pend_link
, &object
->pending_ops
);
289 fscache_stat(&fscache_n_op_pend
);
291 } else if (flags
& BIT(FSCACHE_OBJECT_KILLED_BY_CACHE
)) {
293 op
->state
= FSCACHE_OP_ST_CANCELLED
;
296 fscache_report_unexpected_submission(object
, op
, ostate
);
297 ASSERT(!fscache_object_is_active(object
));
299 op
->state
= FSCACHE_OP_ST_CANCELLED
;
303 spin_unlock(&object
->lock
);
308 * queue an object for withdrawal on error, aborting all following asynchronous
311 void fscache_abort_object(struct fscache_object
*object
)
313 _enter("{OBJ%x}", object
->debug_id
);
315 fscache_raise_event(object
, FSCACHE_OBJECT_EV_ERROR
);
319 * Jump start the operation processing on an object. The caller must hold
322 void fscache_start_operations(struct fscache_object
*object
)
324 struct fscache_operation
*op
;
327 while (!list_empty(&object
->pending_ops
) && !stop
) {
328 op
= list_entry(object
->pending_ops
.next
,
329 struct fscache_operation
, pend_link
);
331 if (test_bit(FSCACHE_OP_EXCLUSIVE
, &op
->flags
)) {
332 if (object
->n_in_progress
> 0)
336 list_del_init(&op
->pend_link
);
337 fscache_run_op(object
, op
);
339 /* the pending queue was holding a ref on the object */
340 fscache_put_operation(op
);
343 ASSERTCMP(object
->n_in_progress
, <=, object
->n_ops
);
345 _debug("woke %d ops on OBJ%x",
346 object
->n_in_progress
, object
->debug_id
);
350 * cancel an operation that's pending on an object
352 int fscache_cancel_op(struct fscache_operation
*op
,
353 bool cancel_in_progress_op
)
355 struct fscache_object
*object
= op
->object
;
359 _enter("OBJ%x OP%x}", op
->object
->debug_id
, op
->debug_id
);
361 ASSERTCMP(op
->state
, >=, FSCACHE_OP_ST_PENDING
);
362 ASSERTCMP(op
->state
, !=, FSCACHE_OP_ST_CANCELLED
);
363 ASSERTCMP(atomic_read(&op
->usage
), >, 0);
365 spin_lock(&object
->lock
);
368 if (op
->state
== FSCACHE_OP_ST_PENDING
) {
369 ASSERT(!list_empty(&op
->pend_link
));
370 list_del_init(&op
->pend_link
);
373 fscache_stat(&fscache_n_op_cancelled
);
375 op
->state
= FSCACHE_OP_ST_CANCELLED
;
376 if (test_bit(FSCACHE_OP_EXCLUSIVE
, &op
->flags
))
377 object
->n_exclusive
--;
378 if (test_and_clear_bit(FSCACHE_OP_WAITING
, &op
->flags
))
379 wake_up_bit(&op
->flags
, FSCACHE_OP_WAITING
);
381 } else if (op
->state
== FSCACHE_OP_ST_IN_PROGRESS
&& cancel_in_progress_op
) {
382 ASSERTCMP(object
->n_in_progress
, >, 0);
383 if (test_bit(FSCACHE_OP_EXCLUSIVE
, &op
->flags
))
384 object
->n_exclusive
--;
385 object
->n_in_progress
--;
386 if (object
->n_in_progress
== 0)
387 fscache_start_operations(object
);
389 fscache_stat(&fscache_n_op_cancelled
);
391 op
->state
= FSCACHE_OP_ST_CANCELLED
;
392 if (test_bit(FSCACHE_OP_EXCLUSIVE
, &op
->flags
))
393 object
->n_exclusive
--;
394 if (test_and_clear_bit(FSCACHE_OP_WAITING
, &op
->flags
))
395 wake_up_bit(&op
->flags
, FSCACHE_OP_WAITING
);
400 fscache_put_operation(op
);
401 spin_unlock(&object
->lock
);
402 _leave(" = %d", ret
);
407 * Cancel all pending operations on an object
409 void fscache_cancel_all_ops(struct fscache_object
*object
)
411 struct fscache_operation
*op
;
413 _enter("OBJ%x", object
->debug_id
);
415 spin_lock(&object
->lock
);
417 while (!list_empty(&object
->pending_ops
)) {
418 op
= list_entry(object
->pending_ops
.next
,
419 struct fscache_operation
, pend_link
);
420 fscache_stat(&fscache_n_op_cancelled
);
421 list_del_init(&op
->pend_link
);
423 ASSERTCMP(op
->state
, ==, FSCACHE_OP_ST_PENDING
);
425 op
->state
= FSCACHE_OP_ST_CANCELLED
;
427 if (test_bit(FSCACHE_OP_EXCLUSIVE
, &op
->flags
))
428 object
->n_exclusive
--;
429 if (test_and_clear_bit(FSCACHE_OP_WAITING
, &op
->flags
))
430 wake_up_bit(&op
->flags
, FSCACHE_OP_WAITING
);
431 fscache_put_operation(op
);
432 cond_resched_lock(&object
->lock
);
435 spin_unlock(&object
->lock
);
440 * Record the completion or cancellation of an in-progress operation.
442 void fscache_op_complete(struct fscache_operation
*op
, bool cancelled
)
444 struct fscache_object
*object
= op
->object
;
446 _enter("OBJ%x", object
->debug_id
);
448 ASSERTCMP(op
->state
, ==, FSCACHE_OP_ST_IN_PROGRESS
);
449 ASSERTCMP(object
->n_in_progress
, >, 0);
450 ASSERTIFCMP(test_bit(FSCACHE_OP_EXCLUSIVE
, &op
->flags
),
451 object
->n_exclusive
, >, 0);
452 ASSERTIFCMP(test_bit(FSCACHE_OP_EXCLUSIVE
, &op
->flags
),
453 object
->n_in_progress
, ==, 1);
455 spin_lock(&object
->lock
);
458 op
->state
= FSCACHE_OP_ST_COMPLETE
;
461 op
->state
= FSCACHE_OP_ST_CANCELLED
;
464 if (test_bit(FSCACHE_OP_EXCLUSIVE
, &op
->flags
))
465 object
->n_exclusive
--;
466 object
->n_in_progress
--;
467 if (object
->n_in_progress
== 0)
468 fscache_start_operations(object
);
470 spin_unlock(&object
->lock
);
473 EXPORT_SYMBOL(fscache_op_complete
);
476 * release an operation
477 * - queues pending ops if this is the last in-progress op
479 void fscache_put_operation(struct fscache_operation
*op
)
481 struct fscache_object
*object
;
482 struct fscache_cache
*cache
;
484 _enter("{OBJ%x OP%x,%d}",
485 op
->object
? op
->object
->debug_id
: 0,
486 op
->debug_id
, atomic_read(&op
->usage
));
488 ASSERTCMP(atomic_read(&op
->usage
), >, 0);
490 if (!atomic_dec_and_test(&op
->usage
))
494 ASSERTIFCMP(op
->state
!= FSCACHE_OP_ST_INITIALISED
&&
495 op
->state
!= FSCACHE_OP_ST_COMPLETE
,
496 op
->state
, ==, FSCACHE_OP_ST_CANCELLED
);
498 fscache_stat(&fscache_n_op_release
);
504 op
->state
= FSCACHE_OP_ST_DEAD
;
507 if (likely(object
)) {
508 if (test_bit(FSCACHE_OP_DEC_READ_CNT
, &op
->flags
))
509 atomic_dec(&object
->n_reads
);
510 if (test_bit(FSCACHE_OP_UNUSE_COOKIE
, &op
->flags
))
511 fscache_unuse_cookie(object
);
513 /* now... we may get called with the object spinlock held, so we
514 * complete the cleanup here only if we can immediately acquire the
515 * lock, and defer it otherwise */
516 if (!spin_trylock(&object
->lock
)) {
518 fscache_stat(&fscache_n_op_deferred_release
);
520 cache
= object
->cache
;
521 spin_lock(&cache
->op_gc_list_lock
);
522 list_add_tail(&op
->pend_link
, &cache
->op_gc_list
);
523 spin_unlock(&cache
->op_gc_list_lock
);
524 schedule_work(&cache
->op_gc
);
529 ASSERTCMP(object
->n_ops
, >, 0);
531 if (object
->n_ops
== 0)
532 fscache_raise_event(object
, FSCACHE_OBJECT_EV_CLEARED
);
534 spin_unlock(&object
->lock
);
540 EXPORT_SYMBOL(fscache_put_operation
);
543 * garbage collect operations that have had their release deferred
545 void fscache_operation_gc(struct work_struct
*work
)
547 struct fscache_operation
*op
;
548 struct fscache_object
*object
;
549 struct fscache_cache
*cache
=
550 container_of(work
, struct fscache_cache
, op_gc
);
556 spin_lock(&cache
->op_gc_list_lock
);
557 if (list_empty(&cache
->op_gc_list
)) {
558 spin_unlock(&cache
->op_gc_list_lock
);
562 op
= list_entry(cache
->op_gc_list
.next
,
563 struct fscache_operation
, pend_link
);
564 list_del(&op
->pend_link
);
565 spin_unlock(&cache
->op_gc_list_lock
);
568 spin_lock(&object
->lock
);
570 _debug("GC DEFERRED REL OBJ%x OP%x",
571 object
->debug_id
, op
->debug_id
);
572 fscache_stat(&fscache_n_op_gc
);
574 ASSERTCMP(atomic_read(&op
->usage
), ==, 0);
575 ASSERTCMP(op
->state
, ==, FSCACHE_OP_ST_DEAD
);
577 ASSERTCMP(object
->n_ops
, >, 0);
579 if (object
->n_ops
== 0)
580 fscache_raise_event(object
, FSCACHE_OBJECT_EV_CLEARED
);
582 spin_unlock(&object
->lock
);
585 } while (count
++ < 20);
587 if (!list_empty(&cache
->op_gc_list
))
588 schedule_work(&cache
->op_gc
);
594 * execute an operation using fs_op_wq to provide processing context -
595 * the caller holds a ref to this object, so we don't need to hold one
597 void fscache_op_work_func(struct work_struct
*work
)
599 struct fscache_operation
*op
=
600 container_of(work
, struct fscache_operation
, work
);
603 _enter("{OBJ%x OP%x,%d}",
604 op
->object
->debug_id
, op
->debug_id
, atomic_read(&op
->usage
));
606 ASSERT(op
->processor
!= NULL
);
609 fscache_hist(fscache_ops_histogram
, start
);
610 fscache_put_operation(op
);