1 /* FS-Cache worker operation management routines
3 * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 * See Documentation/filesystems/caching/operations.txt
14 #define FSCACHE_DEBUG_LEVEL OPERATION
15 #include <linux/module.h>
16 #include <linux/seq_file.h>
17 #include <linux/slab.h>
20 atomic_t fscache_op_debug_id
;
21 EXPORT_SYMBOL(fscache_op_debug_id
);
23 static void fscache_operation_dummy_cancel(struct fscache_operation
*op
)
28 * fscache_operation_init - Do basic initialisation of an operation
29 * @op: The operation to initialise
30 * @release: The release function to assign
32 * Do basic initialisation of an operation. The caller must still set flags,
33 * object and processor if needed.
35 void fscache_operation_init(struct fscache_cookie
*cookie
,
36 struct fscache_operation
*op
,
37 fscache_operation_processor_t processor
,
38 fscache_operation_cancel_t cancel
,
39 fscache_operation_release_t release
)
41 INIT_WORK(&op
->work
, fscache_op_work_func
);
42 atomic_set(&op
->usage
, 1);
43 op
->state
= FSCACHE_OP_ST_INITIALISED
;
44 op
->debug_id
= atomic_inc_return(&fscache_op_debug_id
);
45 op
->processor
= processor
;
46 op
->cancel
= cancel
?: fscache_operation_dummy_cancel
;
47 op
->release
= release
;
48 INIT_LIST_HEAD(&op
->pend_link
);
49 fscache_stat(&fscache_n_op_initialised
);
50 trace_fscache_op(cookie
, op
, fscache_op_init
);
52 EXPORT_SYMBOL(fscache_operation_init
);
55 * fscache_enqueue_operation - Enqueue an operation for processing
56 * @op: The operation to enqueue
58 * Enqueue an operation for processing by the FS-Cache thread pool.
60 * This will get its own ref on the object.
62 void fscache_enqueue_operation(struct fscache_operation
*op
)
64 struct fscache_cookie
*cookie
= op
->object
->cookie
;
66 _enter("{OBJ%x OP%x,%u}",
67 op
->object
->debug_id
, op
->debug_id
, atomic_read(&op
->usage
));
69 ASSERT(list_empty(&op
->pend_link
));
70 ASSERT(op
->processor
!= NULL
);
71 ASSERT(fscache_object_is_available(op
->object
));
72 ASSERTCMP(atomic_read(&op
->usage
), >, 0);
73 ASSERTIFCMP(op
->state
!= FSCACHE_OP_ST_IN_PROGRESS
,
74 op
->state
, ==, FSCACHE_OP_ST_CANCELLED
);
76 fscache_stat(&fscache_n_op_enqueue
);
77 switch (op
->flags
& FSCACHE_OP_TYPE
) {
78 case FSCACHE_OP_ASYNC
:
79 trace_fscache_op(cookie
, op
, fscache_op_enqueue_async
);
80 _debug("queue async");
81 atomic_inc(&op
->usage
);
82 if (!queue_work(fscache_op_wq
, &op
->work
))
83 fscache_put_operation(op
);
85 case FSCACHE_OP_MYTHREAD
:
86 trace_fscache_op(cookie
, op
, fscache_op_enqueue_mythread
);
87 _debug("queue for caller's attention");
90 pr_err("Unexpected op type %lx", op
->flags
);
95 EXPORT_SYMBOL(fscache_enqueue_operation
);
100 static void fscache_run_op(struct fscache_object
*object
,
101 struct fscache_operation
*op
)
103 ASSERTCMP(op
->state
, ==, FSCACHE_OP_ST_PENDING
);
105 op
->state
= FSCACHE_OP_ST_IN_PROGRESS
;
106 object
->n_in_progress
++;
107 if (test_and_clear_bit(FSCACHE_OP_WAITING
, &op
->flags
))
108 wake_up_bit(&op
->flags
, FSCACHE_OP_WAITING
);
110 fscache_enqueue_operation(op
);
112 trace_fscache_op(object
->cookie
, op
, fscache_op_run
);
113 fscache_stat(&fscache_n_op_run
);
117 * report an unexpected submission
119 static void fscache_report_unexpected_submission(struct fscache_object
*object
,
120 struct fscache_operation
*op
,
121 const struct fscache_state
*ostate
)
123 static bool once_only
;
124 struct fscache_operation
*p
;
131 kdebug("unexpected submission OP%x [OBJ%x %s]",
132 op
->debug_id
, object
->debug_id
, object
->state
->name
);
133 kdebug("objstate=%s [%s]", object
->state
->name
, ostate
->name
);
134 kdebug("objflags=%lx", object
->flags
);
135 kdebug("objevent=%lx [%lx]", object
->events
, object
->event_mask
);
136 kdebug("ops=%u inp=%u exc=%u",
137 object
->n_ops
, object
->n_in_progress
, object
->n_exclusive
);
139 if (!list_empty(&object
->pending_ops
)) {
141 list_for_each_entry(p
, &object
->pending_ops
, pend_link
) {
142 ASSERTCMP(p
->object
, ==, object
);
143 kdebug("%p %p", op
->processor
, op
->release
);
154 * submit an exclusive operation for an object
155 * - other ops are excluded from running simultaneously with this one
156 * - this gets any extra refs it needs on an op
158 int fscache_submit_exclusive_op(struct fscache_object
*object
,
159 struct fscache_operation
*op
)
161 const struct fscache_state
*ostate
;
165 _enter("{OBJ%x OP%x},", object
->debug_id
, op
->debug_id
);
167 trace_fscache_op(object
->cookie
, op
, fscache_op_submit_ex
);
169 ASSERTCMP(op
->state
, ==, FSCACHE_OP_ST_INITIALISED
);
170 ASSERTCMP(atomic_read(&op
->usage
), >, 0);
172 spin_lock(&object
->lock
);
173 ASSERTCMP(object
->n_ops
, >=, object
->n_in_progress
);
174 ASSERTCMP(object
->n_ops
, >=, object
->n_exclusive
);
175 ASSERT(list_empty(&op
->pend_link
));
177 ostate
= object
->state
;
180 op
->state
= FSCACHE_OP_ST_PENDING
;
181 flags
= READ_ONCE(object
->flags
);
182 if (unlikely(!(flags
& BIT(FSCACHE_OBJECT_IS_LIVE
)))) {
183 fscache_stat(&fscache_n_op_rejected
);
185 op
->state
= FSCACHE_OP_ST_CANCELLED
;
187 } else if (unlikely(fscache_cache_is_broken(object
))) {
189 op
->state
= FSCACHE_OP_ST_CANCELLED
;
191 } else if (flags
& BIT(FSCACHE_OBJECT_IS_AVAILABLE
)) {
194 object
->n_exclusive
++; /* reads and writes must wait */
196 if (object
->n_in_progress
> 0) {
197 atomic_inc(&op
->usage
);
198 list_add_tail(&op
->pend_link
, &object
->pending_ops
);
199 fscache_stat(&fscache_n_op_pend
);
200 } else if (!list_empty(&object
->pending_ops
)) {
201 atomic_inc(&op
->usage
);
202 list_add_tail(&op
->pend_link
, &object
->pending_ops
);
203 fscache_stat(&fscache_n_op_pend
);
204 fscache_start_operations(object
);
206 ASSERTCMP(object
->n_in_progress
, ==, 0);
207 fscache_run_op(object
, op
);
210 /* need to issue a new write op after this */
211 clear_bit(FSCACHE_OBJECT_PENDING_WRITE
, &object
->flags
);
213 } else if (flags
& BIT(FSCACHE_OBJECT_IS_LOOKED_UP
)) {
216 object
->n_exclusive
++; /* reads and writes must wait */
217 atomic_inc(&op
->usage
);
218 list_add_tail(&op
->pend_link
, &object
->pending_ops
);
219 fscache_stat(&fscache_n_op_pend
);
221 } else if (flags
& BIT(FSCACHE_OBJECT_KILLED_BY_CACHE
)) {
223 op
->state
= FSCACHE_OP_ST_CANCELLED
;
226 fscache_report_unexpected_submission(object
, op
, ostate
);
228 op
->state
= FSCACHE_OP_ST_CANCELLED
;
232 spin_unlock(&object
->lock
);
237 * submit an operation for an object
238 * - objects may be submitted only in the following states:
239 * - during object creation (write ops may be submitted)
240 * - whilst the object is active
241 * - after an I/O error incurred in one of the two above states (op rejected)
242 * - this gets any extra refs it needs on an op
244 int fscache_submit_op(struct fscache_object
*object
,
245 struct fscache_operation
*op
)
247 const struct fscache_state
*ostate
;
251 _enter("{OBJ%x OP%x},{%u}",
252 object
->debug_id
, op
->debug_id
, atomic_read(&op
->usage
));
254 trace_fscache_op(object
->cookie
, op
, fscache_op_submit
);
256 ASSERTCMP(op
->state
, ==, FSCACHE_OP_ST_INITIALISED
);
257 ASSERTCMP(atomic_read(&op
->usage
), >, 0);
259 spin_lock(&object
->lock
);
260 ASSERTCMP(object
->n_ops
, >=, object
->n_in_progress
);
261 ASSERTCMP(object
->n_ops
, >=, object
->n_exclusive
);
262 ASSERT(list_empty(&op
->pend_link
));
264 ostate
= object
->state
;
267 op
->state
= FSCACHE_OP_ST_PENDING
;
268 flags
= READ_ONCE(object
->flags
);
269 if (unlikely(!(flags
& BIT(FSCACHE_OBJECT_IS_LIVE
)))) {
270 fscache_stat(&fscache_n_op_rejected
);
272 op
->state
= FSCACHE_OP_ST_CANCELLED
;
274 } else if (unlikely(fscache_cache_is_broken(object
))) {
276 op
->state
= FSCACHE_OP_ST_CANCELLED
;
278 } else if (flags
& BIT(FSCACHE_OBJECT_IS_AVAILABLE
)) {
282 if (object
->n_exclusive
> 0) {
283 atomic_inc(&op
->usage
);
284 list_add_tail(&op
->pend_link
, &object
->pending_ops
);
285 fscache_stat(&fscache_n_op_pend
);
286 } else if (!list_empty(&object
->pending_ops
)) {
287 atomic_inc(&op
->usage
);
288 list_add_tail(&op
->pend_link
, &object
->pending_ops
);
289 fscache_stat(&fscache_n_op_pend
);
290 fscache_start_operations(object
);
292 ASSERTCMP(object
->n_exclusive
, ==, 0);
293 fscache_run_op(object
, op
);
296 } else if (flags
& BIT(FSCACHE_OBJECT_IS_LOOKED_UP
)) {
299 atomic_inc(&op
->usage
);
300 list_add_tail(&op
->pend_link
, &object
->pending_ops
);
301 fscache_stat(&fscache_n_op_pend
);
303 } else if (flags
& BIT(FSCACHE_OBJECT_KILLED_BY_CACHE
)) {
305 op
->state
= FSCACHE_OP_ST_CANCELLED
;
308 fscache_report_unexpected_submission(object
, op
, ostate
);
309 ASSERT(!fscache_object_is_active(object
));
311 op
->state
= FSCACHE_OP_ST_CANCELLED
;
315 spin_unlock(&object
->lock
);
320 * queue an object for withdrawal on error, aborting all following asynchronous
323 void fscache_abort_object(struct fscache_object
*object
)
325 _enter("{OBJ%x}", object
->debug_id
);
327 fscache_raise_event(object
, FSCACHE_OBJECT_EV_ERROR
);
331 * Jump start the operation processing on an object. The caller must hold
334 void fscache_start_operations(struct fscache_object
*object
)
336 struct fscache_operation
*op
;
339 while (!list_empty(&object
->pending_ops
) && !stop
) {
340 op
= list_entry(object
->pending_ops
.next
,
341 struct fscache_operation
, pend_link
);
343 if (test_bit(FSCACHE_OP_EXCLUSIVE
, &op
->flags
)) {
344 if (object
->n_in_progress
> 0)
348 list_del_init(&op
->pend_link
);
349 fscache_run_op(object
, op
);
351 /* the pending queue was holding a ref on the object */
352 fscache_put_operation(op
);
355 ASSERTCMP(object
->n_in_progress
, <=, object
->n_ops
);
357 _debug("woke %d ops on OBJ%x",
358 object
->n_in_progress
, object
->debug_id
);
362 * cancel an operation that's pending on an object
364 int fscache_cancel_op(struct fscache_operation
*op
,
365 bool cancel_in_progress_op
)
367 struct fscache_object
*object
= op
->object
;
371 _enter("OBJ%x OP%x}", op
->object
->debug_id
, op
->debug_id
);
373 trace_fscache_op(object
->cookie
, op
, fscache_op_cancel
);
375 ASSERTCMP(op
->state
, >=, FSCACHE_OP_ST_PENDING
);
376 ASSERTCMP(op
->state
, !=, FSCACHE_OP_ST_CANCELLED
);
377 ASSERTCMP(atomic_read(&op
->usage
), >, 0);
379 spin_lock(&object
->lock
);
382 if (op
->state
== FSCACHE_OP_ST_PENDING
) {
383 ASSERT(!list_empty(&op
->pend_link
));
384 list_del_init(&op
->pend_link
);
387 fscache_stat(&fscache_n_op_cancelled
);
389 op
->state
= FSCACHE_OP_ST_CANCELLED
;
390 if (test_bit(FSCACHE_OP_EXCLUSIVE
, &op
->flags
))
391 object
->n_exclusive
--;
392 if (test_and_clear_bit(FSCACHE_OP_WAITING
, &op
->flags
))
393 wake_up_bit(&op
->flags
, FSCACHE_OP_WAITING
);
395 } else if (op
->state
== FSCACHE_OP_ST_IN_PROGRESS
&& cancel_in_progress_op
) {
396 ASSERTCMP(object
->n_in_progress
, >, 0);
397 if (test_bit(FSCACHE_OP_EXCLUSIVE
, &op
->flags
))
398 object
->n_exclusive
--;
399 object
->n_in_progress
--;
400 if (object
->n_in_progress
== 0)
401 fscache_start_operations(object
);
403 fscache_stat(&fscache_n_op_cancelled
);
405 op
->state
= FSCACHE_OP_ST_CANCELLED
;
406 if (test_bit(FSCACHE_OP_EXCLUSIVE
, &op
->flags
))
407 object
->n_exclusive
--;
408 if (test_and_clear_bit(FSCACHE_OP_WAITING
, &op
->flags
))
409 wake_up_bit(&op
->flags
, FSCACHE_OP_WAITING
);
414 fscache_put_operation(op
);
415 spin_unlock(&object
->lock
);
416 _leave(" = %d", ret
);
421 * Cancel all pending operations on an object
423 void fscache_cancel_all_ops(struct fscache_object
*object
)
425 struct fscache_operation
*op
;
427 _enter("OBJ%x", object
->debug_id
);
429 spin_lock(&object
->lock
);
431 while (!list_empty(&object
->pending_ops
)) {
432 op
= list_entry(object
->pending_ops
.next
,
433 struct fscache_operation
, pend_link
);
434 fscache_stat(&fscache_n_op_cancelled
);
435 list_del_init(&op
->pend_link
);
437 trace_fscache_op(object
->cookie
, op
, fscache_op_cancel_all
);
439 ASSERTCMP(op
->state
, ==, FSCACHE_OP_ST_PENDING
);
441 op
->state
= FSCACHE_OP_ST_CANCELLED
;
443 if (test_bit(FSCACHE_OP_EXCLUSIVE
, &op
->flags
))
444 object
->n_exclusive
--;
445 if (test_and_clear_bit(FSCACHE_OP_WAITING
, &op
->flags
))
446 wake_up_bit(&op
->flags
, FSCACHE_OP_WAITING
);
447 fscache_put_operation(op
);
448 cond_resched_lock(&object
->lock
);
451 spin_unlock(&object
->lock
);
456 * Record the completion or cancellation of an in-progress operation.
458 void fscache_op_complete(struct fscache_operation
*op
, bool cancelled
)
460 struct fscache_object
*object
= op
->object
;
462 _enter("OBJ%x", object
->debug_id
);
464 ASSERTCMP(op
->state
, ==, FSCACHE_OP_ST_IN_PROGRESS
);
465 ASSERTCMP(object
->n_in_progress
, >, 0);
466 ASSERTIFCMP(test_bit(FSCACHE_OP_EXCLUSIVE
, &op
->flags
),
467 object
->n_exclusive
, >, 0);
468 ASSERTIFCMP(test_bit(FSCACHE_OP_EXCLUSIVE
, &op
->flags
),
469 object
->n_in_progress
, ==, 1);
471 spin_lock(&object
->lock
);
474 trace_fscache_op(object
->cookie
, op
, fscache_op_completed
);
475 op
->state
= FSCACHE_OP_ST_COMPLETE
;
478 trace_fscache_op(object
->cookie
, op
, fscache_op_cancelled
);
479 op
->state
= FSCACHE_OP_ST_CANCELLED
;
482 if (test_bit(FSCACHE_OP_EXCLUSIVE
, &op
->flags
))
483 object
->n_exclusive
--;
484 object
->n_in_progress
--;
485 if (object
->n_in_progress
== 0)
486 fscache_start_operations(object
);
488 spin_unlock(&object
->lock
);
491 EXPORT_SYMBOL(fscache_op_complete
);
494 * release an operation
495 * - queues pending ops if this is the last in-progress op
497 void fscache_put_operation(struct fscache_operation
*op
)
499 struct fscache_object
*object
;
500 struct fscache_cache
*cache
;
502 _enter("{OBJ%x OP%x,%d}",
503 op
->object
? op
->object
->debug_id
: 0,
504 op
->debug_id
, atomic_read(&op
->usage
));
506 ASSERTCMP(atomic_read(&op
->usage
), >, 0);
508 if (!atomic_dec_and_test(&op
->usage
))
511 trace_fscache_op(op
->object
? op
->object
->cookie
: NULL
, op
, fscache_op_put
);
514 ASSERTIFCMP(op
->state
!= FSCACHE_OP_ST_INITIALISED
&&
515 op
->state
!= FSCACHE_OP_ST_COMPLETE
,
516 op
->state
, ==, FSCACHE_OP_ST_CANCELLED
);
518 fscache_stat(&fscache_n_op_release
);
524 op
->state
= FSCACHE_OP_ST_DEAD
;
527 if (likely(object
)) {
528 if (test_bit(FSCACHE_OP_DEC_READ_CNT
, &op
->flags
))
529 atomic_dec(&object
->n_reads
);
530 if (test_bit(FSCACHE_OP_UNUSE_COOKIE
, &op
->flags
))
531 fscache_unuse_cookie(object
);
533 /* now... we may get called with the object spinlock held, so we
534 * complete the cleanup here only if we can immediately acquire the
535 * lock, and defer it otherwise */
536 if (!spin_trylock(&object
->lock
)) {
538 fscache_stat(&fscache_n_op_deferred_release
);
540 cache
= object
->cache
;
541 spin_lock(&cache
->op_gc_list_lock
);
542 list_add_tail(&op
->pend_link
, &cache
->op_gc_list
);
543 spin_unlock(&cache
->op_gc_list_lock
);
544 schedule_work(&cache
->op_gc
);
549 ASSERTCMP(object
->n_ops
, >, 0);
551 if (object
->n_ops
== 0)
552 fscache_raise_event(object
, FSCACHE_OBJECT_EV_CLEARED
);
554 spin_unlock(&object
->lock
);
560 EXPORT_SYMBOL(fscache_put_operation
);
563 * garbage collect operations that have had their release deferred
565 void fscache_operation_gc(struct work_struct
*work
)
567 struct fscache_operation
*op
;
568 struct fscache_object
*object
;
569 struct fscache_cache
*cache
=
570 container_of(work
, struct fscache_cache
, op_gc
);
576 spin_lock(&cache
->op_gc_list_lock
);
577 if (list_empty(&cache
->op_gc_list
)) {
578 spin_unlock(&cache
->op_gc_list_lock
);
582 op
= list_entry(cache
->op_gc_list
.next
,
583 struct fscache_operation
, pend_link
);
584 list_del(&op
->pend_link
);
585 spin_unlock(&cache
->op_gc_list_lock
);
588 trace_fscache_op(object
->cookie
, op
, fscache_op_gc
);
590 spin_lock(&object
->lock
);
592 _debug("GC DEFERRED REL OBJ%x OP%x",
593 object
->debug_id
, op
->debug_id
);
594 fscache_stat(&fscache_n_op_gc
);
596 ASSERTCMP(atomic_read(&op
->usage
), ==, 0);
597 ASSERTCMP(op
->state
, ==, FSCACHE_OP_ST_DEAD
);
599 ASSERTCMP(object
->n_ops
, >, 0);
601 if (object
->n_ops
== 0)
602 fscache_raise_event(object
, FSCACHE_OBJECT_EV_CLEARED
);
604 spin_unlock(&object
->lock
);
607 } while (count
++ < 20);
609 if (!list_empty(&cache
->op_gc_list
))
610 schedule_work(&cache
->op_gc
);
616 * execute an operation using fs_op_wq to provide processing context -
617 * the caller holds a ref to this object, so we don't need to hold one
619 void fscache_op_work_func(struct work_struct
*work
)
621 struct fscache_operation
*op
=
622 container_of(work
, struct fscache_operation
, work
);
625 _enter("{OBJ%x OP%x,%d}",
626 op
->object
->debug_id
, op
->debug_id
, atomic_read(&op
->usage
));
628 trace_fscache_op(op
->object
->cookie
, op
, fscache_op_work
);
630 ASSERT(op
->processor
!= NULL
);
633 fscache_hist(fscache_ops_histogram
, start
);
634 fscache_put_operation(op
);