1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* FS-Cache worker operation management routines
4 * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
7 * See Documentation/filesystems/caching/operations.txt
10 #define FSCACHE_DEBUG_LEVEL OPERATION
11 #include <linux/module.h>
12 #include <linux/seq_file.h>
13 #include <linux/slab.h>
16 atomic_t fscache_op_debug_id
;
17 EXPORT_SYMBOL(fscache_op_debug_id
);
19 static void fscache_operation_dummy_cancel(struct fscache_operation
*op
)
24 * fscache_operation_init - Do basic initialisation of an operation
25 * @op: The operation to initialise
26 * @release: The release function to assign
28 * Do basic initialisation of an operation. The caller must still set flags,
29 * object and processor if needed.
31 void fscache_operation_init(struct fscache_cookie
*cookie
,
32 struct fscache_operation
*op
,
33 fscache_operation_processor_t processor
,
34 fscache_operation_cancel_t cancel
,
35 fscache_operation_release_t release
)
37 INIT_WORK(&op
->work
, fscache_op_work_func
);
38 atomic_set(&op
->usage
, 1);
39 op
->state
= FSCACHE_OP_ST_INITIALISED
;
40 op
->debug_id
= atomic_inc_return(&fscache_op_debug_id
);
41 op
->processor
= processor
;
42 op
->cancel
= cancel
?: fscache_operation_dummy_cancel
;
43 op
->release
= release
;
44 INIT_LIST_HEAD(&op
->pend_link
);
45 fscache_stat(&fscache_n_op_initialised
);
46 trace_fscache_op(cookie
, op
, fscache_op_init
);
48 EXPORT_SYMBOL(fscache_operation_init
);
51 * fscache_enqueue_operation - Enqueue an operation for processing
52 * @op: The operation to enqueue
54 * Enqueue an operation for processing by the FS-Cache thread pool.
56 * This will get its own ref on the object.
58 void fscache_enqueue_operation(struct fscache_operation
*op
)
60 struct fscache_cookie
*cookie
= op
->object
->cookie
;
62 _enter("{OBJ%x OP%x,%u}",
63 op
->object
->debug_id
, op
->debug_id
, atomic_read(&op
->usage
));
65 ASSERT(list_empty(&op
->pend_link
));
66 ASSERT(op
->processor
!= NULL
);
67 ASSERT(fscache_object_is_available(op
->object
));
68 ASSERTCMP(atomic_read(&op
->usage
), >, 0);
69 ASSERTIFCMP(op
->state
!= FSCACHE_OP_ST_IN_PROGRESS
,
70 op
->state
, ==, FSCACHE_OP_ST_CANCELLED
);
72 fscache_stat(&fscache_n_op_enqueue
);
73 switch (op
->flags
& FSCACHE_OP_TYPE
) {
74 case FSCACHE_OP_ASYNC
:
75 trace_fscache_op(cookie
, op
, fscache_op_enqueue_async
);
76 _debug("queue async");
77 atomic_inc(&op
->usage
);
78 if (!queue_work(fscache_op_wq
, &op
->work
))
79 fscache_put_operation(op
);
81 case FSCACHE_OP_MYTHREAD
:
82 trace_fscache_op(cookie
, op
, fscache_op_enqueue_mythread
);
83 _debug("queue for caller's attention");
86 pr_err("Unexpected op type %lx", op
->flags
);
91 EXPORT_SYMBOL(fscache_enqueue_operation
);
96 static void fscache_run_op(struct fscache_object
*object
,
97 struct fscache_operation
*op
)
99 ASSERTCMP(op
->state
, ==, FSCACHE_OP_ST_PENDING
);
101 op
->state
= FSCACHE_OP_ST_IN_PROGRESS
;
102 object
->n_in_progress
++;
103 if (test_and_clear_bit(FSCACHE_OP_WAITING
, &op
->flags
))
104 wake_up_bit(&op
->flags
, FSCACHE_OP_WAITING
);
106 fscache_enqueue_operation(op
);
108 trace_fscache_op(object
->cookie
, op
, fscache_op_run
);
109 fscache_stat(&fscache_n_op_run
);
113 * report an unexpected submission
115 static void fscache_report_unexpected_submission(struct fscache_object
*object
,
116 struct fscache_operation
*op
,
117 const struct fscache_state
*ostate
)
119 static bool once_only
;
120 struct fscache_operation
*p
;
127 kdebug("unexpected submission OP%x [OBJ%x %s]",
128 op
->debug_id
, object
->debug_id
, object
->state
->name
);
129 kdebug("objstate=%s [%s]", object
->state
->name
, ostate
->name
);
130 kdebug("objflags=%lx", object
->flags
);
131 kdebug("objevent=%lx [%lx]", object
->events
, object
->event_mask
);
132 kdebug("ops=%u inp=%u exc=%u",
133 object
->n_ops
, object
->n_in_progress
, object
->n_exclusive
);
135 if (!list_empty(&object
->pending_ops
)) {
137 list_for_each_entry(p
, &object
->pending_ops
, pend_link
) {
138 ASSERTCMP(p
->object
, ==, object
);
139 kdebug("%p %p", op
->processor
, op
->release
);
150 * submit an exclusive operation for an object
151 * - other ops are excluded from running simultaneously with this one
152 * - this gets any extra refs it needs on an op
154 int fscache_submit_exclusive_op(struct fscache_object
*object
,
155 struct fscache_operation
*op
)
157 const struct fscache_state
*ostate
;
161 _enter("{OBJ%x OP%x},", object
->debug_id
, op
->debug_id
);
163 trace_fscache_op(object
->cookie
, op
, fscache_op_submit_ex
);
165 ASSERTCMP(op
->state
, ==, FSCACHE_OP_ST_INITIALISED
);
166 ASSERTCMP(atomic_read(&op
->usage
), >, 0);
168 spin_lock(&object
->lock
);
169 ASSERTCMP(object
->n_ops
, >=, object
->n_in_progress
);
170 ASSERTCMP(object
->n_ops
, >=, object
->n_exclusive
);
171 ASSERT(list_empty(&op
->pend_link
));
173 ostate
= object
->state
;
176 op
->state
= FSCACHE_OP_ST_PENDING
;
177 flags
= READ_ONCE(object
->flags
);
178 if (unlikely(!(flags
& BIT(FSCACHE_OBJECT_IS_LIVE
)))) {
179 fscache_stat(&fscache_n_op_rejected
);
181 op
->state
= FSCACHE_OP_ST_CANCELLED
;
183 } else if (unlikely(fscache_cache_is_broken(object
))) {
185 op
->state
= FSCACHE_OP_ST_CANCELLED
;
187 } else if (flags
& BIT(FSCACHE_OBJECT_IS_AVAILABLE
)) {
190 object
->n_exclusive
++; /* reads and writes must wait */
192 if (object
->n_in_progress
> 0) {
193 atomic_inc(&op
->usage
);
194 list_add_tail(&op
->pend_link
, &object
->pending_ops
);
195 fscache_stat(&fscache_n_op_pend
);
196 } else if (!list_empty(&object
->pending_ops
)) {
197 atomic_inc(&op
->usage
);
198 list_add_tail(&op
->pend_link
, &object
->pending_ops
);
199 fscache_stat(&fscache_n_op_pend
);
200 fscache_start_operations(object
);
202 ASSERTCMP(object
->n_in_progress
, ==, 0);
203 fscache_run_op(object
, op
);
206 /* need to issue a new write op after this */
207 clear_bit(FSCACHE_OBJECT_PENDING_WRITE
, &object
->flags
);
209 } else if (flags
& BIT(FSCACHE_OBJECT_IS_LOOKED_UP
)) {
212 object
->n_exclusive
++; /* reads and writes must wait */
213 atomic_inc(&op
->usage
);
214 list_add_tail(&op
->pend_link
, &object
->pending_ops
);
215 fscache_stat(&fscache_n_op_pend
);
217 } else if (flags
& BIT(FSCACHE_OBJECT_KILLED_BY_CACHE
)) {
219 op
->state
= FSCACHE_OP_ST_CANCELLED
;
222 fscache_report_unexpected_submission(object
, op
, ostate
);
224 op
->state
= FSCACHE_OP_ST_CANCELLED
;
228 spin_unlock(&object
->lock
);
233 * submit an operation for an object
234 * - objects may be submitted only in the following states:
235 * - during object creation (write ops may be submitted)
236 * - whilst the object is active
237 * - after an I/O error incurred in one of the two above states (op rejected)
238 * - this gets any extra refs it needs on an op
240 int fscache_submit_op(struct fscache_object
*object
,
241 struct fscache_operation
*op
)
243 const struct fscache_state
*ostate
;
247 _enter("{OBJ%x OP%x},{%u}",
248 object
->debug_id
, op
->debug_id
, atomic_read(&op
->usage
));
250 trace_fscache_op(object
->cookie
, op
, fscache_op_submit
);
252 ASSERTCMP(op
->state
, ==, FSCACHE_OP_ST_INITIALISED
);
253 ASSERTCMP(atomic_read(&op
->usage
), >, 0);
255 spin_lock(&object
->lock
);
256 ASSERTCMP(object
->n_ops
, >=, object
->n_in_progress
);
257 ASSERTCMP(object
->n_ops
, >=, object
->n_exclusive
);
258 ASSERT(list_empty(&op
->pend_link
));
260 ostate
= object
->state
;
263 op
->state
= FSCACHE_OP_ST_PENDING
;
264 flags
= READ_ONCE(object
->flags
);
265 if (unlikely(!(flags
& BIT(FSCACHE_OBJECT_IS_LIVE
)))) {
266 fscache_stat(&fscache_n_op_rejected
);
268 op
->state
= FSCACHE_OP_ST_CANCELLED
;
270 } else if (unlikely(fscache_cache_is_broken(object
))) {
272 op
->state
= FSCACHE_OP_ST_CANCELLED
;
274 } else if (flags
& BIT(FSCACHE_OBJECT_IS_AVAILABLE
)) {
278 if (object
->n_exclusive
> 0) {
279 atomic_inc(&op
->usage
);
280 list_add_tail(&op
->pend_link
, &object
->pending_ops
);
281 fscache_stat(&fscache_n_op_pend
);
282 } else if (!list_empty(&object
->pending_ops
)) {
283 atomic_inc(&op
->usage
);
284 list_add_tail(&op
->pend_link
, &object
->pending_ops
);
285 fscache_stat(&fscache_n_op_pend
);
286 fscache_start_operations(object
);
288 ASSERTCMP(object
->n_exclusive
, ==, 0);
289 fscache_run_op(object
, op
);
292 } else if (flags
& BIT(FSCACHE_OBJECT_IS_LOOKED_UP
)) {
295 atomic_inc(&op
->usage
);
296 list_add_tail(&op
->pend_link
, &object
->pending_ops
);
297 fscache_stat(&fscache_n_op_pend
);
299 } else if (flags
& BIT(FSCACHE_OBJECT_KILLED_BY_CACHE
)) {
301 op
->state
= FSCACHE_OP_ST_CANCELLED
;
304 fscache_report_unexpected_submission(object
, op
, ostate
);
305 ASSERT(!fscache_object_is_active(object
));
307 op
->state
= FSCACHE_OP_ST_CANCELLED
;
311 spin_unlock(&object
->lock
);
316 * queue an object for withdrawal on error, aborting all following asynchronous
319 void fscache_abort_object(struct fscache_object
*object
)
321 _enter("{OBJ%x}", object
->debug_id
);
323 fscache_raise_event(object
, FSCACHE_OBJECT_EV_ERROR
);
327 * Jump start the operation processing on an object. The caller must hold
330 void fscache_start_operations(struct fscache_object
*object
)
332 struct fscache_operation
*op
;
335 while (!list_empty(&object
->pending_ops
) && !stop
) {
336 op
= list_entry(object
->pending_ops
.next
,
337 struct fscache_operation
, pend_link
);
339 if (test_bit(FSCACHE_OP_EXCLUSIVE
, &op
->flags
)) {
340 if (object
->n_in_progress
> 0)
344 list_del_init(&op
->pend_link
);
345 fscache_run_op(object
, op
);
347 /* the pending queue was holding a ref on the object */
348 fscache_put_operation(op
);
351 ASSERTCMP(object
->n_in_progress
, <=, object
->n_ops
);
353 _debug("woke %d ops on OBJ%x",
354 object
->n_in_progress
, object
->debug_id
);
358 * cancel an operation that's pending on an object
360 int fscache_cancel_op(struct fscache_operation
*op
,
361 bool cancel_in_progress_op
)
363 struct fscache_object
*object
= op
->object
;
367 _enter("OBJ%x OP%x}", op
->object
->debug_id
, op
->debug_id
);
369 trace_fscache_op(object
->cookie
, op
, fscache_op_cancel
);
371 ASSERTCMP(op
->state
, >=, FSCACHE_OP_ST_PENDING
);
372 ASSERTCMP(op
->state
, !=, FSCACHE_OP_ST_CANCELLED
);
373 ASSERTCMP(atomic_read(&op
->usage
), >, 0);
375 spin_lock(&object
->lock
);
378 if (op
->state
== FSCACHE_OP_ST_PENDING
) {
379 ASSERT(!list_empty(&op
->pend_link
));
380 list_del_init(&op
->pend_link
);
383 fscache_stat(&fscache_n_op_cancelled
);
385 op
->state
= FSCACHE_OP_ST_CANCELLED
;
386 if (test_bit(FSCACHE_OP_EXCLUSIVE
, &op
->flags
))
387 object
->n_exclusive
--;
388 if (test_and_clear_bit(FSCACHE_OP_WAITING
, &op
->flags
))
389 wake_up_bit(&op
->flags
, FSCACHE_OP_WAITING
);
391 } else if (op
->state
== FSCACHE_OP_ST_IN_PROGRESS
&& cancel_in_progress_op
) {
392 ASSERTCMP(object
->n_in_progress
, >, 0);
393 if (test_bit(FSCACHE_OP_EXCLUSIVE
, &op
->flags
))
394 object
->n_exclusive
--;
395 object
->n_in_progress
--;
396 if (object
->n_in_progress
== 0)
397 fscache_start_operations(object
);
399 fscache_stat(&fscache_n_op_cancelled
);
401 op
->state
= FSCACHE_OP_ST_CANCELLED
;
402 if (test_bit(FSCACHE_OP_EXCLUSIVE
, &op
->flags
))
403 object
->n_exclusive
--;
404 if (test_and_clear_bit(FSCACHE_OP_WAITING
, &op
->flags
))
405 wake_up_bit(&op
->flags
, FSCACHE_OP_WAITING
);
410 fscache_put_operation(op
);
411 spin_unlock(&object
->lock
);
412 _leave(" = %d", ret
);
417 * Cancel all pending operations on an object
419 void fscache_cancel_all_ops(struct fscache_object
*object
)
421 struct fscache_operation
*op
;
423 _enter("OBJ%x", object
->debug_id
);
425 spin_lock(&object
->lock
);
427 while (!list_empty(&object
->pending_ops
)) {
428 op
= list_entry(object
->pending_ops
.next
,
429 struct fscache_operation
, pend_link
);
430 fscache_stat(&fscache_n_op_cancelled
);
431 list_del_init(&op
->pend_link
);
433 trace_fscache_op(object
->cookie
, op
, fscache_op_cancel_all
);
435 ASSERTCMP(op
->state
, ==, FSCACHE_OP_ST_PENDING
);
437 op
->state
= FSCACHE_OP_ST_CANCELLED
;
439 if (test_bit(FSCACHE_OP_EXCLUSIVE
, &op
->flags
))
440 object
->n_exclusive
--;
441 if (test_and_clear_bit(FSCACHE_OP_WAITING
, &op
->flags
))
442 wake_up_bit(&op
->flags
, FSCACHE_OP_WAITING
);
443 fscache_put_operation(op
);
444 cond_resched_lock(&object
->lock
);
447 spin_unlock(&object
->lock
);
452 * Record the completion or cancellation of an in-progress operation.
454 void fscache_op_complete(struct fscache_operation
*op
, bool cancelled
)
456 struct fscache_object
*object
= op
->object
;
458 _enter("OBJ%x", object
->debug_id
);
460 ASSERTCMP(op
->state
, ==, FSCACHE_OP_ST_IN_PROGRESS
);
461 ASSERTCMP(object
->n_in_progress
, >, 0);
462 ASSERTIFCMP(test_bit(FSCACHE_OP_EXCLUSIVE
, &op
->flags
),
463 object
->n_exclusive
, >, 0);
464 ASSERTIFCMP(test_bit(FSCACHE_OP_EXCLUSIVE
, &op
->flags
),
465 object
->n_in_progress
, ==, 1);
467 spin_lock(&object
->lock
);
470 trace_fscache_op(object
->cookie
, op
, fscache_op_completed
);
471 op
->state
= FSCACHE_OP_ST_COMPLETE
;
474 trace_fscache_op(object
->cookie
, op
, fscache_op_cancelled
);
475 op
->state
= FSCACHE_OP_ST_CANCELLED
;
478 if (test_bit(FSCACHE_OP_EXCLUSIVE
, &op
->flags
))
479 object
->n_exclusive
--;
480 object
->n_in_progress
--;
481 if (object
->n_in_progress
== 0)
482 fscache_start_operations(object
);
484 spin_unlock(&object
->lock
);
487 EXPORT_SYMBOL(fscache_op_complete
);
490 * release an operation
491 * - queues pending ops if this is the last in-progress op
493 void fscache_put_operation(struct fscache_operation
*op
)
495 struct fscache_object
*object
;
496 struct fscache_cache
*cache
;
498 _enter("{OBJ%x OP%x,%d}",
499 op
->object
? op
->object
->debug_id
: 0,
500 op
->debug_id
, atomic_read(&op
->usage
));
502 ASSERTCMP(atomic_read(&op
->usage
), >, 0);
504 if (!atomic_dec_and_test(&op
->usage
))
507 trace_fscache_op(op
->object
? op
->object
->cookie
: NULL
, op
, fscache_op_put
);
510 ASSERTIFCMP(op
->state
!= FSCACHE_OP_ST_INITIALISED
&&
511 op
->state
!= FSCACHE_OP_ST_COMPLETE
,
512 op
->state
, ==, FSCACHE_OP_ST_CANCELLED
);
514 fscache_stat(&fscache_n_op_release
);
520 op
->state
= FSCACHE_OP_ST_DEAD
;
523 if (likely(object
)) {
524 if (test_bit(FSCACHE_OP_DEC_READ_CNT
, &op
->flags
))
525 atomic_dec(&object
->n_reads
);
526 if (test_bit(FSCACHE_OP_UNUSE_COOKIE
, &op
->flags
))
527 fscache_unuse_cookie(object
);
529 /* now... we may get called with the object spinlock held, so we
530 * complete the cleanup here only if we can immediately acquire the
531 * lock, and defer it otherwise */
532 if (!spin_trylock(&object
->lock
)) {
534 fscache_stat(&fscache_n_op_deferred_release
);
536 cache
= object
->cache
;
537 spin_lock(&cache
->op_gc_list_lock
);
538 list_add_tail(&op
->pend_link
, &cache
->op_gc_list
);
539 spin_unlock(&cache
->op_gc_list_lock
);
540 schedule_work(&cache
->op_gc
);
545 ASSERTCMP(object
->n_ops
, >, 0);
547 if (object
->n_ops
== 0)
548 fscache_raise_event(object
, FSCACHE_OBJECT_EV_CLEARED
);
550 spin_unlock(&object
->lock
);
556 EXPORT_SYMBOL(fscache_put_operation
);
559 * garbage collect operations that have had their release deferred
561 void fscache_operation_gc(struct work_struct
*work
)
563 struct fscache_operation
*op
;
564 struct fscache_object
*object
;
565 struct fscache_cache
*cache
=
566 container_of(work
, struct fscache_cache
, op_gc
);
572 spin_lock(&cache
->op_gc_list_lock
);
573 if (list_empty(&cache
->op_gc_list
)) {
574 spin_unlock(&cache
->op_gc_list_lock
);
578 op
= list_entry(cache
->op_gc_list
.next
,
579 struct fscache_operation
, pend_link
);
580 list_del(&op
->pend_link
);
581 spin_unlock(&cache
->op_gc_list_lock
);
584 trace_fscache_op(object
->cookie
, op
, fscache_op_gc
);
586 spin_lock(&object
->lock
);
588 _debug("GC DEFERRED REL OBJ%x OP%x",
589 object
->debug_id
, op
->debug_id
);
590 fscache_stat(&fscache_n_op_gc
);
592 ASSERTCMP(atomic_read(&op
->usage
), ==, 0);
593 ASSERTCMP(op
->state
, ==, FSCACHE_OP_ST_DEAD
);
595 ASSERTCMP(object
->n_ops
, >, 0);
597 if (object
->n_ops
== 0)
598 fscache_raise_event(object
, FSCACHE_OBJECT_EV_CLEARED
);
600 spin_unlock(&object
->lock
);
603 } while (count
++ < 20);
605 if (!list_empty(&cache
->op_gc_list
))
606 schedule_work(&cache
->op_gc
);
612 * execute an operation using fs_op_wq to provide processing context -
613 * the caller holds a ref to this object, so we don't need to hold one
615 void fscache_op_work_func(struct work_struct
*work
)
617 struct fscache_operation
*op
=
618 container_of(work
, struct fscache_operation
, work
);
621 _enter("{OBJ%x OP%x,%d}",
622 op
->object
->debug_id
, op
->debug_id
, atomic_read(&op
->usage
));
624 trace_fscache_op(op
->object
->cookie
, op
, fscache_op_work
);
626 ASSERT(op
->processor
!= NULL
);
629 fscache_hist(fscache_ops_histogram
, start
);
630 fscache_put_operation(op
);