2 * Functions to sequence FLUSH and FUA writes.
4 * Copyright (C) 2011 Max Planck Institute for Gravitational Physics
5 * Copyright (C) 2011 Tejun Heo <tj@kernel.org>
7 * This file is released under the GPLv2.
9 * REQ_{FLUSH|FUA} requests are decomposed to sequences consisted of three
10 * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request
11 * properties and hardware capability.
13 * If a request doesn't have data, only REQ_FLUSH makes sense, which
14 * indicates a simple flush request. If there is data, REQ_FLUSH indicates
15 * that the device cache should be flushed before the data is executed, and
16 * REQ_FUA means that the data must be on non-volatile media on request
19 * If the device doesn't have writeback cache, FLUSH and FUA don't make any
20 * difference. The requests are either completed immediately if there's no
21 * data or executed as normal requests otherwise.
23 * If the device has writeback cache and supports FUA, REQ_FLUSH is
24 * translated to PREFLUSH but REQ_FUA is passed down directly with DATA.
26 * If the device has writeback cache and doesn't support FUA, REQ_FLUSH is
27 * translated to PREFLUSH and REQ_FUA to POSTFLUSH.
29 * The actual execution of flush is double buffered. Whenever a request
30 * needs to execute PRE or POSTFLUSH, it queues at
31 * fq->flush_queue[fq->flush_pending_idx]. Once certain criteria are met, a
32 * flush is issued and the pending_idx is toggled. When the flush
33 * completes, all the requests which were pending are proceeded to the next
34 * step. This allows arbitrary merging of different types of FLUSH/FUA
37 * Currently, the following conditions are used to determine when to issue
40 * C1. At any given time, only one flush shall be in progress. This makes
41 * double buffering sufficient.
43 * C2. Flush is deferred if any request is executing DATA of its sequence.
44 * This avoids issuing separate POSTFLUSHes for requests which shared
47 * C3. The second condition is ignored if there is a request which has
48 * waited longer than FLUSH_PENDING_TIMEOUT. This is to avoid
49 * starvation in the unlikely case where there are continuous stream of
50 * FUA (without FLUSH) requests.
52 * For devices which support FUA, it isn't clear whether C2 (and thus C3)
55 * Note that a sequenced FLUSH/FUA request with DATA is completed twice.
56 * Once while executing DATA and again after the whole sequence is
57 * complete. The first completion updates the contained bio but doesn't
58 * finish it so that the bio submitter is notified only after the whole
59 * sequence is complete. This is implemented by testing REQ_FLUSH_SEQ in
62 * The above peculiarity requires that each FLUSH/FUA request has only one
63 * bio attached to it, which is guaranteed as they aren't allowed to be
64 * merged in the usual way.
67 #include <linux/kernel.h>
68 #include <linux/module.h>
69 #include <linux/bio.h>
70 #include <linux/blkdev.h>
71 #include <linux/gfp.h>
72 #include <linux/blk-mq.h>
76 #include "blk-mq-tag.h"
78 /* FLUSH/FUA sequences */
80 REQ_FSEQ_PREFLUSH
= (1 << 0), /* pre-flushing in progress */
81 REQ_FSEQ_DATA
= (1 << 1), /* data write in progress */
82 REQ_FSEQ_POSTFLUSH
= (1 << 2), /* post-flushing in progress */
83 REQ_FSEQ_DONE
= (1 << 3),
85 REQ_FSEQ_ACTIONS
= REQ_FSEQ_PREFLUSH
| REQ_FSEQ_DATA
|
89 * If flush has been pending longer than the following timeout,
90 * it's issued even if flush_data requests are still in flight.
92 FLUSH_PENDING_TIMEOUT
= 5 * HZ
,
95 static bool blk_kick_flush(struct request_queue
*q
,
96 struct blk_flush_queue
*fq
);
98 static unsigned int blk_flush_policy(unsigned int fflags
, struct request
*rq
)
100 unsigned int policy
= 0;
102 if (blk_rq_sectors(rq
))
103 policy
|= REQ_FSEQ_DATA
;
105 if (fflags
& REQ_FLUSH
) {
106 if (rq
->cmd_flags
& REQ_FLUSH
)
107 policy
|= REQ_FSEQ_PREFLUSH
;
108 if (!(fflags
& REQ_FUA
) && (rq
->cmd_flags
& REQ_FUA
))
109 policy
|= REQ_FSEQ_POSTFLUSH
;
114 static unsigned int blk_flush_cur_seq(struct request
*rq
)
116 return 1 << ffz(rq
->flush
.seq
);
119 static void blk_flush_restore_request(struct request
*rq
)
122 * After flush data completion, @rq->bio is %NULL but we need to
123 * complete the bio again. @rq->biotail is guaranteed to equal the
124 * original @rq->bio. Restore it.
126 rq
->bio
= rq
->biotail
;
128 /* make @rq a normal request */
129 rq
->cmd_flags
&= ~REQ_FLUSH_SEQ
;
130 rq
->end_io
= rq
->flush
.saved_end_io
;
133 static bool blk_flush_queue_rq(struct request
*rq
, bool add_front
)
136 struct request_queue
*q
= rq
->q
;
138 blk_mq_add_to_requeue_list(rq
, add_front
);
139 blk_mq_kick_requeue_list(q
);
143 list_add(&rq
->queuelist
, &rq
->q
->queue_head
);
145 list_add_tail(&rq
->queuelist
, &rq
->q
->queue_head
);
151 * blk_flush_complete_seq - complete flush sequence
152 * @rq: FLUSH/FUA request being sequenced
154 * @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero)
155 * @error: whether an error occurred
157 * @rq just completed @seq part of its flush sequence, record the
158 * completion and trigger the next step.
161 * spin_lock_irq(q->queue_lock or fq->mq_flush_lock)
164 * %true if requests were added to the dispatch queue, %false otherwise.
166 static bool blk_flush_complete_seq(struct request
*rq
,
167 struct blk_flush_queue
*fq
,
168 unsigned int seq
, int error
)
170 struct request_queue
*q
= rq
->q
;
171 struct list_head
*pending
= &fq
->flush_queue
[fq
->flush_pending_idx
];
172 bool queued
= false, kicked
;
174 BUG_ON(rq
->flush
.seq
& seq
);
175 rq
->flush
.seq
|= seq
;
178 seq
= blk_flush_cur_seq(rq
);
183 case REQ_FSEQ_PREFLUSH
:
184 case REQ_FSEQ_POSTFLUSH
:
185 /* queue for flush */
186 if (list_empty(pending
))
187 fq
->flush_pending_since
= jiffies
;
188 list_move_tail(&rq
->flush
.list
, pending
);
192 list_move_tail(&rq
->flush
.list
, &fq
->flush_data_in_flight
);
193 queued
= blk_flush_queue_rq(rq
, true);
198 * @rq was previously adjusted by blk_flush_issue() for
199 * flush sequencing and may already have gone through the
200 * flush data request completion path. Restore @rq for
201 * normal completion and end it.
203 BUG_ON(!list_empty(&rq
->queuelist
));
204 list_del_init(&rq
->flush
.list
);
205 blk_flush_restore_request(rq
);
207 blk_mq_end_request(rq
, error
);
209 __blk_end_request_all(rq
, error
);
216 kicked
= blk_kick_flush(q
, fq
);
217 return kicked
| queued
;
220 static void flush_end_io(struct request
*flush_rq
, int error
)
222 struct request_queue
*q
= flush_rq
->q
;
223 struct list_head
*running
;
225 struct request
*rq
, *n
;
226 unsigned long flags
= 0;
227 struct blk_flush_queue
*fq
= blk_get_flush_queue(q
, flush_rq
->mq_ctx
);
230 struct blk_mq_hw_ctx
*hctx
;
232 /* release the tag's ownership to the req cloned from */
233 spin_lock_irqsave(&fq
->mq_flush_lock
, flags
);
234 hctx
= q
->mq_ops
->map_queue(q
, flush_rq
->mq_ctx
->cpu
);
235 blk_mq_tag_set_rq(hctx
, flush_rq
->tag
, fq
->orig_rq
);
239 running
= &fq
->flush_queue
[fq
->flush_running_idx
];
240 BUG_ON(fq
->flush_pending_idx
== fq
->flush_running_idx
);
242 /* account completion of the flush request */
243 fq
->flush_running_idx
^= 1;
246 elv_completed_request(q
, flush_rq
);
248 /* and push the waiting requests to the next stage */
249 list_for_each_entry_safe(rq
, n
, running
, flush
.list
) {
250 unsigned int seq
= blk_flush_cur_seq(rq
);
252 BUG_ON(seq
!= REQ_FSEQ_PREFLUSH
&& seq
!= REQ_FSEQ_POSTFLUSH
);
253 queued
|= blk_flush_complete_seq(rq
, fq
, seq
, error
);
257 * Kick the queue to avoid stall for two cases:
258 * 1. Moving a request silently to empty queue_head may stall the
260 * 2. When flush request is running in non-queueable queue, the
261 * queue is hold. Restart the queue after flush request is finished
263 * This function is called from request completion path and calling
264 * directly into request_fn may confuse the driver. Always use
267 if (queued
|| fq
->flush_queue_delayed
) {
269 blk_run_queue_async(q
);
271 fq
->flush_queue_delayed
= 0;
273 spin_unlock_irqrestore(&fq
->mq_flush_lock
, flags
);
277 * blk_kick_flush - consider issuing flush request
278 * @q: request_queue being kicked
281 * Flush related states of @q have changed, consider issuing flush request.
282 * Please read the comment at the top of this file for more info.
285 * spin_lock_irq(q->queue_lock or fq->mq_flush_lock)
288 * %true if flush was issued, %false otherwise.
290 static bool blk_kick_flush(struct request_queue
*q
, struct blk_flush_queue
*fq
)
292 struct list_head
*pending
= &fq
->flush_queue
[fq
->flush_pending_idx
];
293 struct request
*first_rq
=
294 list_first_entry(pending
, struct request
, flush
.list
);
295 struct request
*flush_rq
= fq
->flush_rq
;
297 /* C1 described at the top of this file */
298 if (fq
->flush_pending_idx
!= fq
->flush_running_idx
|| list_empty(pending
))
302 if (!list_empty(&fq
->flush_data_in_flight
) &&
304 fq
->flush_pending_since
+ FLUSH_PENDING_TIMEOUT
))
308 * Issue flush and toggle pending_idx. This makes pending_idx
309 * different from running_idx, which means flush is in flight.
311 fq
->flush_pending_idx
^= 1;
313 blk_rq_init(q
, flush_rq
);
316 * Borrow tag from the first request since they can't
317 * be in flight at the same time. And acquire the tag's
318 * ownership for flush req.
321 struct blk_mq_hw_ctx
*hctx
;
323 flush_rq
->mq_ctx
= first_rq
->mq_ctx
;
324 flush_rq
->tag
= first_rq
->tag
;
325 fq
->orig_rq
= first_rq
;
327 hctx
= q
->mq_ops
->map_queue(q
, first_rq
->mq_ctx
->cpu
);
328 blk_mq_tag_set_rq(hctx
, first_rq
->tag
, flush_rq
);
331 flush_rq
->cmd_type
= REQ_TYPE_FS
;
332 flush_rq
->cmd_flags
= WRITE_FLUSH
| REQ_FLUSH_SEQ
;
333 flush_rq
->rq_disk
= first_rq
->rq_disk
;
334 flush_rq
->end_io
= flush_end_io
;
336 return blk_flush_queue_rq(flush_rq
, false);
339 static void flush_data_end_io(struct request
*rq
, int error
)
341 struct request_queue
*q
= rq
->q
;
342 struct blk_flush_queue
*fq
= blk_get_flush_queue(q
, NULL
);
345 * After populating an empty queue, kick it to avoid stall. Read
346 * the comment in flush_end_io().
348 if (blk_flush_complete_seq(rq
, fq
, REQ_FSEQ_DATA
, error
))
349 blk_run_queue_async(q
);
352 static void mq_flush_data_end_io(struct request
*rq
, int error
)
354 struct request_queue
*q
= rq
->q
;
355 struct blk_mq_hw_ctx
*hctx
;
356 struct blk_mq_ctx
*ctx
= rq
->mq_ctx
;
358 struct blk_flush_queue
*fq
= blk_get_flush_queue(q
, ctx
);
360 hctx
= q
->mq_ops
->map_queue(q
, ctx
->cpu
);
363 * After populating an empty queue, kick it to avoid stall. Read
364 * the comment in flush_end_io().
366 spin_lock_irqsave(&fq
->mq_flush_lock
, flags
);
367 if (blk_flush_complete_seq(rq
, fq
, REQ_FSEQ_DATA
, error
))
368 blk_mq_run_hw_queue(hctx
, true);
369 spin_unlock_irqrestore(&fq
->mq_flush_lock
, flags
);
373 * blk_insert_flush - insert a new FLUSH/FUA request
374 * @rq: request to insert
376 * To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions.
377 * or __blk_mq_run_hw_queue() to dispatch request.
378 * @rq is being submitted. Analyze what needs to be done and put it on the
382 * spin_lock_irq(q->queue_lock) in !mq case
384 void blk_insert_flush(struct request
*rq
)
386 struct request_queue
*q
= rq
->q
;
387 unsigned int fflags
= q
->flush_flags
; /* may change, cache */
388 unsigned int policy
= blk_flush_policy(fflags
, rq
);
389 struct blk_flush_queue
*fq
= blk_get_flush_queue(q
, rq
->mq_ctx
);
392 * @policy now records what operations need to be done. Adjust
393 * REQ_FLUSH and FUA for the driver.
395 rq
->cmd_flags
&= ~REQ_FLUSH
;
396 if (!(fflags
& REQ_FUA
))
397 rq
->cmd_flags
&= ~REQ_FUA
;
400 * An empty flush handed down from a stacking driver may
401 * translate into nothing if the underlying device does not
402 * advertise a write-back cache. In this case, simply
403 * complete the request.
407 blk_mq_end_request(rq
, 0);
409 __blk_end_bidi_request(rq
, 0, 0, 0);
413 BUG_ON(rq
->bio
!= rq
->biotail
); /*assumes zero or single bio rq */
416 * If there's data but flush is not necessary, the request can be
417 * processed directly without going through flush machinery. Queue
418 * for normal execution.
420 if ((policy
& REQ_FSEQ_DATA
) &&
421 !(policy
& (REQ_FSEQ_PREFLUSH
| REQ_FSEQ_POSTFLUSH
))) {
423 blk_mq_insert_request(rq
, false, false, true);
425 list_add_tail(&rq
->queuelist
, &q
->queue_head
);
430 * @rq should go through flush machinery. Mark it part of flush
431 * sequence and submit for further processing.
433 memset(&rq
->flush
, 0, sizeof(rq
->flush
));
434 INIT_LIST_HEAD(&rq
->flush
.list
);
435 rq
->cmd_flags
|= REQ_FLUSH_SEQ
;
436 rq
->flush
.saved_end_io
= rq
->end_io
; /* Usually NULL */
438 rq
->end_io
= mq_flush_data_end_io
;
440 spin_lock_irq(&fq
->mq_flush_lock
);
441 blk_flush_complete_seq(rq
, fq
, REQ_FSEQ_ACTIONS
& ~policy
, 0);
442 spin_unlock_irq(&fq
->mq_flush_lock
);
445 rq
->end_io
= flush_data_end_io
;
447 blk_flush_complete_seq(rq
, fq
, REQ_FSEQ_ACTIONS
& ~policy
, 0);
451 * blkdev_issue_flush - queue a flush
452 * @bdev: blockdev to issue flush for
453 * @gfp_mask: memory allocation flags (for bio_alloc)
454 * @error_sector: error sector
457 * Issue a flush for the block device in question. Caller can supply
458 * room for storing the error offset in case of a flush error, if they
459 * wish to. If WAIT flag is not passed then caller may check only what
460 * request was pushed in some internal queue for later handling.
462 int blkdev_issue_flush(struct block_device
*bdev
, gfp_t gfp_mask
,
463 sector_t
*error_sector
)
465 struct request_queue
*q
;
469 if (bdev
->bd_disk
== NULL
)
472 q
= bdev_get_queue(bdev
);
477 * some block devices may not have their queue correctly set up here
478 * (e.g. loop device without a backing file) and so issuing a flush
479 * here will panic. Ensure there is a request function before issuing
482 if (!q
->make_request_fn
)
485 bio
= bio_alloc(gfp_mask
, 0);
488 ret
= submit_bio_wait(WRITE_FLUSH
, bio
);
491 * The driver must store the error location in ->bi_sector, if
492 * it supports it. For non-stacked drivers, this should be
493 * copied from blk_rq_pos(rq).
496 *error_sector
= bio
->bi_iter
.bi_sector
;
501 EXPORT_SYMBOL(blkdev_issue_flush
);
503 struct blk_flush_queue
*blk_alloc_flush_queue(struct request_queue
*q
,
504 int node
, int cmd_size
)
506 struct blk_flush_queue
*fq
;
507 int rq_sz
= sizeof(struct request
);
509 fq
= kzalloc_node(sizeof(*fq
), GFP_KERNEL
, node
);
514 spin_lock_init(&fq
->mq_flush_lock
);
515 rq_sz
= round_up(rq_sz
+ cmd_size
, cache_line_size());
518 fq
->flush_rq
= kzalloc_node(rq_sz
, GFP_KERNEL
, node
);
522 INIT_LIST_HEAD(&fq
->flush_queue
[0]);
523 INIT_LIST_HEAD(&fq
->flush_queue
[1]);
524 INIT_LIST_HEAD(&fq
->flush_data_in_flight
);
534 void blk_free_flush_queue(struct blk_flush_queue
*fq
)
536 /* bio based request queue hasn't flush queue */