1 /* FS-Cache worker operation management routines
3 * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 * See Documentation/filesystems/caching/operations.txt
14 #define FSCACHE_DEBUG_LEVEL OPERATION
15 #include <linux/module.h>
16 #include <linux/seq_file.h>
17 #include <linux/slab.h>
20 atomic_t fscache_op_debug_id
;
21 EXPORT_SYMBOL(fscache_op_debug_id
);
23 static void fscache_operation_dummy_cancel(struct fscache_operation
*op
)
28 * fscache_operation_init - Do basic initialisation of an operation
29 * @op: The operation to initialise
30 * @release: The release function to assign
32 * Do basic initialisation of an operation. The caller must still set flags,
33 * object and processor if needed.
35 void fscache_operation_init(struct fscache_cookie
*cookie
,
36 struct fscache_operation
*op
,
37 fscache_operation_processor_t processor
,
38 fscache_operation_cancel_t cancel
,
39 fscache_operation_release_t release
)
41 INIT_WORK(&op
->work
, fscache_op_work_func
);
42 atomic_set(&op
->usage
, 1);
43 op
->state
= FSCACHE_OP_ST_INITIALISED
;
44 op
->debug_id
= atomic_inc_return(&fscache_op_debug_id
);
45 op
->processor
= processor
;
46 op
->cancel
= cancel
?: fscache_operation_dummy_cancel
;
47 op
->release
= release
;
48 INIT_LIST_HEAD(&op
->pend_link
);
49 fscache_stat(&fscache_n_op_initialised
);
50 trace_fscache_op(cookie
, op
, fscache_op_init
);
52 EXPORT_SYMBOL(fscache_operation_init
);
55 * fscache_enqueue_operation - Enqueue an operation for processing
56 * @op: The operation to enqueue
58 * Enqueue an operation for processing by the FS-Cache thread pool.
60 * This will get its own ref on the object.
62 void fscache_enqueue_operation(struct fscache_operation
*op
)
64 struct fscache_cookie
*cookie
= op
->object
->cookie
;
66 _enter("{OBJ%x OP%x,%u}",
67 op
->object
->debug_id
, op
->debug_id
, atomic_read(&op
->usage
));
69 ASSERT(list_empty(&op
->pend_link
));
70 ASSERT(op
->processor
!= NULL
);
71 ASSERT(fscache_object_is_available(op
->object
));
72 ASSERTCMP(atomic_read(&op
->usage
), >, 0);
73 ASSERTCMP(op
->state
, ==, FSCACHE_OP_ST_IN_PROGRESS
);
75 fscache_stat(&fscache_n_op_enqueue
);
76 switch (op
->flags
& FSCACHE_OP_TYPE
) {
77 case FSCACHE_OP_ASYNC
:
78 trace_fscache_op(cookie
, op
, fscache_op_enqueue_async
);
79 _debug("queue async");
80 atomic_inc(&op
->usage
);
81 if (!queue_work(fscache_op_wq
, &op
->work
))
82 fscache_put_operation(op
);
84 case FSCACHE_OP_MYTHREAD
:
85 trace_fscache_op(cookie
, op
, fscache_op_enqueue_mythread
);
86 _debug("queue for caller's attention");
89 pr_err("Unexpected op type %lx", op
->flags
);
94 EXPORT_SYMBOL(fscache_enqueue_operation
);
99 static void fscache_run_op(struct fscache_object
*object
,
100 struct fscache_operation
*op
)
102 ASSERTCMP(op
->state
, ==, FSCACHE_OP_ST_PENDING
);
104 op
->state
= FSCACHE_OP_ST_IN_PROGRESS
;
105 object
->n_in_progress
++;
106 if (test_and_clear_bit(FSCACHE_OP_WAITING
, &op
->flags
))
107 wake_up_bit(&op
->flags
, FSCACHE_OP_WAITING
);
109 fscache_enqueue_operation(op
);
111 trace_fscache_op(object
->cookie
, op
, fscache_op_run
);
112 fscache_stat(&fscache_n_op_run
);
116 * report an unexpected submission
118 static void fscache_report_unexpected_submission(struct fscache_object
*object
,
119 struct fscache_operation
*op
,
120 const struct fscache_state
*ostate
)
122 static bool once_only
;
123 struct fscache_operation
*p
;
130 kdebug("unexpected submission OP%x [OBJ%x %s]",
131 op
->debug_id
, object
->debug_id
, object
->state
->name
);
132 kdebug("objstate=%s [%s]", object
->state
->name
, ostate
->name
);
133 kdebug("objflags=%lx", object
->flags
);
134 kdebug("objevent=%lx [%lx]", object
->events
, object
->event_mask
);
135 kdebug("ops=%u inp=%u exc=%u",
136 object
->n_ops
, object
->n_in_progress
, object
->n_exclusive
);
138 if (!list_empty(&object
->pending_ops
)) {
140 list_for_each_entry(p
, &object
->pending_ops
, pend_link
) {
141 ASSERTCMP(p
->object
, ==, object
);
142 kdebug("%p %p", op
->processor
, op
->release
);
153 * submit an exclusive operation for an object
154 * - other ops are excluded from running simultaneously with this one
155 * - this gets any extra refs it needs on an op
157 int fscache_submit_exclusive_op(struct fscache_object
*object
,
158 struct fscache_operation
*op
)
160 const struct fscache_state
*ostate
;
164 _enter("{OBJ%x OP%x},", object
->debug_id
, op
->debug_id
);
166 trace_fscache_op(object
->cookie
, op
, fscache_op_submit_ex
);
168 ASSERTCMP(op
->state
, ==, FSCACHE_OP_ST_INITIALISED
);
169 ASSERTCMP(atomic_read(&op
->usage
), >, 0);
171 spin_lock(&object
->lock
);
172 ASSERTCMP(object
->n_ops
, >=, object
->n_in_progress
);
173 ASSERTCMP(object
->n_ops
, >=, object
->n_exclusive
);
174 ASSERT(list_empty(&op
->pend_link
));
176 ostate
= object
->state
;
179 op
->state
= FSCACHE_OP_ST_PENDING
;
180 flags
= READ_ONCE(object
->flags
);
181 if (unlikely(!(flags
& BIT(FSCACHE_OBJECT_IS_LIVE
)))) {
182 fscache_stat(&fscache_n_op_rejected
);
184 op
->state
= FSCACHE_OP_ST_CANCELLED
;
186 } else if (unlikely(fscache_cache_is_broken(object
))) {
188 op
->state
= FSCACHE_OP_ST_CANCELLED
;
190 } else if (flags
& BIT(FSCACHE_OBJECT_IS_AVAILABLE
)) {
193 object
->n_exclusive
++; /* reads and writes must wait */
195 if (object
->n_in_progress
> 0) {
196 atomic_inc(&op
->usage
);
197 list_add_tail(&op
->pend_link
, &object
->pending_ops
);
198 fscache_stat(&fscache_n_op_pend
);
199 } else if (!list_empty(&object
->pending_ops
)) {
200 atomic_inc(&op
->usage
);
201 list_add_tail(&op
->pend_link
, &object
->pending_ops
);
202 fscache_stat(&fscache_n_op_pend
);
203 fscache_start_operations(object
);
205 ASSERTCMP(object
->n_in_progress
, ==, 0);
206 fscache_run_op(object
, op
);
209 /* need to issue a new write op after this */
210 clear_bit(FSCACHE_OBJECT_PENDING_WRITE
, &object
->flags
);
212 } else if (flags
& BIT(FSCACHE_OBJECT_IS_LOOKED_UP
)) {
215 object
->n_exclusive
++; /* reads and writes must wait */
216 atomic_inc(&op
->usage
);
217 list_add_tail(&op
->pend_link
, &object
->pending_ops
);
218 fscache_stat(&fscache_n_op_pend
);
220 } else if (flags
& BIT(FSCACHE_OBJECT_KILLED_BY_CACHE
)) {
222 op
->state
= FSCACHE_OP_ST_CANCELLED
;
225 fscache_report_unexpected_submission(object
, op
, ostate
);
227 op
->state
= FSCACHE_OP_ST_CANCELLED
;
231 spin_unlock(&object
->lock
);
236 * submit an operation for an object
237 * - objects may be submitted only in the following states:
238 * - during object creation (write ops may be submitted)
239 * - whilst the object is active
240 * - after an I/O error incurred in one of the two above states (op rejected)
241 * - this gets any extra refs it needs on an op
243 int fscache_submit_op(struct fscache_object
*object
,
244 struct fscache_operation
*op
)
246 const struct fscache_state
*ostate
;
250 _enter("{OBJ%x OP%x},{%u}",
251 object
->debug_id
, op
->debug_id
, atomic_read(&op
->usage
));
253 trace_fscache_op(object
->cookie
, op
, fscache_op_submit
);
255 ASSERTCMP(op
->state
, ==, FSCACHE_OP_ST_INITIALISED
);
256 ASSERTCMP(atomic_read(&op
->usage
), >, 0);
258 spin_lock(&object
->lock
);
259 ASSERTCMP(object
->n_ops
, >=, object
->n_in_progress
);
260 ASSERTCMP(object
->n_ops
, >=, object
->n_exclusive
);
261 ASSERT(list_empty(&op
->pend_link
));
263 ostate
= object
->state
;
266 op
->state
= FSCACHE_OP_ST_PENDING
;
267 flags
= READ_ONCE(object
->flags
);
268 if (unlikely(!(flags
& BIT(FSCACHE_OBJECT_IS_LIVE
)))) {
269 fscache_stat(&fscache_n_op_rejected
);
271 op
->state
= FSCACHE_OP_ST_CANCELLED
;
273 } else if (unlikely(fscache_cache_is_broken(object
))) {
275 op
->state
= FSCACHE_OP_ST_CANCELLED
;
277 } else if (flags
& BIT(FSCACHE_OBJECT_IS_AVAILABLE
)) {
281 if (object
->n_exclusive
> 0) {
282 atomic_inc(&op
->usage
);
283 list_add_tail(&op
->pend_link
, &object
->pending_ops
);
284 fscache_stat(&fscache_n_op_pend
);
285 } else if (!list_empty(&object
->pending_ops
)) {
286 atomic_inc(&op
->usage
);
287 list_add_tail(&op
->pend_link
, &object
->pending_ops
);
288 fscache_stat(&fscache_n_op_pend
);
289 fscache_start_operations(object
);
291 ASSERTCMP(object
->n_exclusive
, ==, 0);
292 fscache_run_op(object
, op
);
295 } else if (flags
& BIT(FSCACHE_OBJECT_IS_LOOKED_UP
)) {
298 atomic_inc(&op
->usage
);
299 list_add_tail(&op
->pend_link
, &object
->pending_ops
);
300 fscache_stat(&fscache_n_op_pend
);
302 } else if (flags
& BIT(FSCACHE_OBJECT_KILLED_BY_CACHE
)) {
304 op
->state
= FSCACHE_OP_ST_CANCELLED
;
307 fscache_report_unexpected_submission(object
, op
, ostate
);
308 ASSERT(!fscache_object_is_active(object
));
310 op
->state
= FSCACHE_OP_ST_CANCELLED
;
314 spin_unlock(&object
->lock
);
319 * queue an object for withdrawal on error, aborting all following asynchronous
322 void fscache_abort_object(struct fscache_object
*object
)
324 _enter("{OBJ%x}", object
->debug_id
);
326 fscache_raise_event(object
, FSCACHE_OBJECT_EV_ERROR
);
330 * Jump start the operation processing on an object. The caller must hold
333 void fscache_start_operations(struct fscache_object
*object
)
335 struct fscache_operation
*op
;
338 while (!list_empty(&object
->pending_ops
) && !stop
) {
339 op
= list_entry(object
->pending_ops
.next
,
340 struct fscache_operation
, pend_link
);
342 if (test_bit(FSCACHE_OP_EXCLUSIVE
, &op
->flags
)) {
343 if (object
->n_in_progress
> 0)
347 list_del_init(&op
->pend_link
);
348 fscache_run_op(object
, op
);
350 /* the pending queue was holding a ref on the object */
351 fscache_put_operation(op
);
354 ASSERTCMP(object
->n_in_progress
, <=, object
->n_ops
);
356 _debug("woke %d ops on OBJ%x",
357 object
->n_in_progress
, object
->debug_id
);
361 * cancel an operation that's pending on an object
363 int fscache_cancel_op(struct fscache_operation
*op
,
364 bool cancel_in_progress_op
)
366 struct fscache_object
*object
= op
->object
;
370 _enter("OBJ%x OP%x}", op
->object
->debug_id
, op
->debug_id
);
372 trace_fscache_op(object
->cookie
, op
, fscache_op_cancel
);
374 ASSERTCMP(op
->state
, >=, FSCACHE_OP_ST_PENDING
);
375 ASSERTCMP(op
->state
, !=, FSCACHE_OP_ST_CANCELLED
);
376 ASSERTCMP(atomic_read(&op
->usage
), >, 0);
378 spin_lock(&object
->lock
);
381 if (op
->state
== FSCACHE_OP_ST_PENDING
) {
382 ASSERT(!list_empty(&op
->pend_link
));
383 list_del_init(&op
->pend_link
);
386 fscache_stat(&fscache_n_op_cancelled
);
388 op
->state
= FSCACHE_OP_ST_CANCELLED
;
389 if (test_bit(FSCACHE_OP_EXCLUSIVE
, &op
->flags
))
390 object
->n_exclusive
--;
391 if (test_and_clear_bit(FSCACHE_OP_WAITING
, &op
->flags
))
392 wake_up_bit(&op
->flags
, FSCACHE_OP_WAITING
);
394 } else if (op
->state
== FSCACHE_OP_ST_IN_PROGRESS
&& cancel_in_progress_op
) {
395 ASSERTCMP(object
->n_in_progress
, >, 0);
396 if (test_bit(FSCACHE_OP_EXCLUSIVE
, &op
->flags
))
397 object
->n_exclusive
--;
398 object
->n_in_progress
--;
399 if (object
->n_in_progress
== 0)
400 fscache_start_operations(object
);
402 fscache_stat(&fscache_n_op_cancelled
);
404 op
->state
= FSCACHE_OP_ST_CANCELLED
;
405 if (test_bit(FSCACHE_OP_EXCLUSIVE
, &op
->flags
))
406 object
->n_exclusive
--;
407 if (test_and_clear_bit(FSCACHE_OP_WAITING
, &op
->flags
))
408 wake_up_bit(&op
->flags
, FSCACHE_OP_WAITING
);
413 fscache_put_operation(op
);
414 spin_unlock(&object
->lock
);
415 _leave(" = %d", ret
);
420 * Cancel all pending operations on an object
422 void fscache_cancel_all_ops(struct fscache_object
*object
)
424 struct fscache_operation
*op
;
426 _enter("OBJ%x", object
->debug_id
);
428 spin_lock(&object
->lock
);
430 while (!list_empty(&object
->pending_ops
)) {
431 op
= list_entry(object
->pending_ops
.next
,
432 struct fscache_operation
, pend_link
);
433 fscache_stat(&fscache_n_op_cancelled
);
434 list_del_init(&op
->pend_link
);
436 trace_fscache_op(object
->cookie
, op
, fscache_op_cancel_all
);
438 ASSERTCMP(op
->state
, ==, FSCACHE_OP_ST_PENDING
);
440 op
->state
= FSCACHE_OP_ST_CANCELLED
;
442 if (test_bit(FSCACHE_OP_EXCLUSIVE
, &op
->flags
))
443 object
->n_exclusive
--;
444 if (test_and_clear_bit(FSCACHE_OP_WAITING
, &op
->flags
))
445 wake_up_bit(&op
->flags
, FSCACHE_OP_WAITING
);
446 fscache_put_operation(op
);
447 cond_resched_lock(&object
->lock
);
450 spin_unlock(&object
->lock
);
455 * Record the completion or cancellation of an in-progress operation.
457 void fscache_op_complete(struct fscache_operation
*op
, bool cancelled
)
459 struct fscache_object
*object
= op
->object
;
461 _enter("OBJ%x", object
->debug_id
);
463 ASSERTCMP(op
->state
, ==, FSCACHE_OP_ST_IN_PROGRESS
);
464 ASSERTCMP(object
->n_in_progress
, >, 0);
465 ASSERTIFCMP(test_bit(FSCACHE_OP_EXCLUSIVE
, &op
->flags
),
466 object
->n_exclusive
, >, 0);
467 ASSERTIFCMP(test_bit(FSCACHE_OP_EXCLUSIVE
, &op
->flags
),
468 object
->n_in_progress
, ==, 1);
470 spin_lock(&object
->lock
);
473 trace_fscache_op(object
->cookie
, op
, fscache_op_completed
);
474 op
->state
= FSCACHE_OP_ST_COMPLETE
;
477 trace_fscache_op(object
->cookie
, op
, fscache_op_cancelled
);
478 op
->state
= FSCACHE_OP_ST_CANCELLED
;
481 if (test_bit(FSCACHE_OP_EXCLUSIVE
, &op
->flags
))
482 object
->n_exclusive
--;
483 object
->n_in_progress
--;
484 if (object
->n_in_progress
== 0)
485 fscache_start_operations(object
);
487 spin_unlock(&object
->lock
);
490 EXPORT_SYMBOL(fscache_op_complete
);
493 * release an operation
494 * - queues pending ops if this is the last in-progress op
496 void fscache_put_operation(struct fscache_operation
*op
)
498 struct fscache_object
*object
;
499 struct fscache_cache
*cache
;
501 _enter("{OBJ%x OP%x,%d}",
502 op
->object
->debug_id
, op
->debug_id
, atomic_read(&op
->usage
));
504 ASSERTCMP(atomic_read(&op
->usage
), >, 0);
506 if (!atomic_dec_and_test(&op
->usage
))
509 trace_fscache_op(op
->object
? op
->object
->cookie
: NULL
, op
, fscache_op_put
);
512 ASSERTIFCMP(op
->state
!= FSCACHE_OP_ST_INITIALISED
&&
513 op
->state
!= FSCACHE_OP_ST_COMPLETE
,
514 op
->state
, ==, FSCACHE_OP_ST_CANCELLED
);
516 fscache_stat(&fscache_n_op_release
);
522 op
->state
= FSCACHE_OP_ST_DEAD
;
525 if (likely(object
)) {
526 if (test_bit(FSCACHE_OP_DEC_READ_CNT
, &op
->flags
))
527 atomic_dec(&object
->n_reads
);
528 if (test_bit(FSCACHE_OP_UNUSE_COOKIE
, &op
->flags
))
529 fscache_unuse_cookie(object
);
531 /* now... we may get called with the object spinlock held, so we
532 * complete the cleanup here only if we can immediately acquire the
533 * lock, and defer it otherwise */
534 if (!spin_trylock(&object
->lock
)) {
536 fscache_stat(&fscache_n_op_deferred_release
);
538 cache
= object
->cache
;
539 spin_lock(&cache
->op_gc_list_lock
);
540 list_add_tail(&op
->pend_link
, &cache
->op_gc_list
);
541 spin_unlock(&cache
->op_gc_list_lock
);
542 schedule_work(&cache
->op_gc
);
547 ASSERTCMP(object
->n_ops
, >, 0);
549 if (object
->n_ops
== 0)
550 fscache_raise_event(object
, FSCACHE_OBJECT_EV_CLEARED
);
552 spin_unlock(&object
->lock
);
558 EXPORT_SYMBOL(fscache_put_operation
);
561 * garbage collect operations that have had their release deferred
563 void fscache_operation_gc(struct work_struct
*work
)
565 struct fscache_operation
*op
;
566 struct fscache_object
*object
;
567 struct fscache_cache
*cache
=
568 container_of(work
, struct fscache_cache
, op_gc
);
574 spin_lock(&cache
->op_gc_list_lock
);
575 if (list_empty(&cache
->op_gc_list
)) {
576 spin_unlock(&cache
->op_gc_list_lock
);
580 op
= list_entry(cache
->op_gc_list
.next
,
581 struct fscache_operation
, pend_link
);
582 list_del(&op
->pend_link
);
583 spin_unlock(&cache
->op_gc_list_lock
);
586 trace_fscache_op(object
->cookie
, op
, fscache_op_gc
);
588 spin_lock(&object
->lock
);
590 _debug("GC DEFERRED REL OBJ%x OP%x",
591 object
->debug_id
, op
->debug_id
);
592 fscache_stat(&fscache_n_op_gc
);
594 ASSERTCMP(atomic_read(&op
->usage
), ==, 0);
595 ASSERTCMP(op
->state
, ==, FSCACHE_OP_ST_DEAD
);
597 ASSERTCMP(object
->n_ops
, >, 0);
599 if (object
->n_ops
== 0)
600 fscache_raise_event(object
, FSCACHE_OBJECT_EV_CLEARED
);
602 spin_unlock(&object
->lock
);
605 } while (count
++ < 20);
607 if (!list_empty(&cache
->op_gc_list
))
608 schedule_work(&cache
->op_gc
);
614 * execute an operation using fs_op_wq to provide processing context -
615 * the caller holds a ref to this object, so we don't need to hold one
617 void fscache_op_work_func(struct work_struct
*work
)
619 struct fscache_operation
*op
=
620 container_of(work
, struct fscache_operation
, work
);
623 _enter("{OBJ%x OP%x,%d}",
624 op
->object
->debug_id
, op
->debug_id
, atomic_read(&op
->usage
));
626 trace_fscache_op(op
->object
->cookie
, op
, fscache_op_work
);
628 ASSERT(op
->processor
!= NULL
);
631 fscache_hist(fscache_ops_histogram
, start
);
632 fscache_put_operation(op
);