mm: fix exec activate_mm vs TLB shootdown and lazy tlb switching race
[linux/fpc-iii.git] / block / blk-exec.c
blob5c0f3dc446dc7caa1c0cef086740744c1eaafea9
1 /*
2 * Functions related to setting various queue properties from drivers
3 */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/bio.h>
7 #include <linux/blkdev.h>
8 #include <linux/blk-mq.h>
9 #include <linux/sched/sysctl.h>
11 #include "blk.h"
12 #include "blk-mq-sched.h"
14 /**
15 * blk_end_sync_rq - executes a completion event on a request
16 * @rq: request to complete
17 * @error: end I/O status of the request
19 static void blk_end_sync_rq(struct request *rq, blk_status_t error)
21 struct completion *waiting = rq->end_io_data;
23 rq->end_io_data = NULL;
26 * complete last, if this is a stack request the process (and thus
27 * the rq pointer) could be invalid right after this complete()
29 complete(waiting);
32 /**
33 * blk_execute_rq_nowait - insert a request into queue for execution
34 * @q: queue to insert the request in
35 * @bd_disk: matching gendisk
36 * @rq: request to insert
37 * @at_head: insert request at head or tail of queue
38 * @done: I/O completion handler
40 * Description:
41 * Insert a fully prepared request at the back of the I/O scheduler queue
42 * for execution. Don't wait for completion.
44 * Note:
45 * This function will invoke @done directly if the queue is dead.
47 void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
48 struct request *rq, int at_head,
49 rq_end_io_fn *done)
51 int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
53 WARN_ON(irqs_disabled());
54 WARN_ON(!blk_rq_is_passthrough(rq));
56 rq->rq_disk = bd_disk;
57 rq->end_io = done;
60 * don't check dying flag for MQ because the request won't
61 * be reused after dying flag is set
63 if (q->mq_ops) {
64 blk_mq_sched_insert_request(rq, at_head, true, false, false);
65 return;
68 spin_lock_irq(q->queue_lock);
70 if (unlikely(blk_queue_dying(q))) {
71 rq->rq_flags |= RQF_QUIET;
72 __blk_end_request_all(rq, BLK_STS_IOERR);
73 spin_unlock_irq(q->queue_lock);
74 return;
77 __elv_add_request(q, rq, where);
78 __blk_run_queue(q);
79 spin_unlock_irq(q->queue_lock);
81 EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
83 /**
84 * blk_execute_rq - insert a request into queue for execution
85 * @q: queue to insert the request in
86 * @bd_disk: matching gendisk
87 * @rq: request to insert
88 * @at_head: insert request at head or tail of queue
90 * Description:
91 * Insert a fully prepared request at the back of the I/O scheduler queue
92 * for execution and wait for completion.
94 void blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
95 struct request *rq, int at_head)
97 DECLARE_COMPLETION_ONSTACK(wait);
98 unsigned long hang_check;
100 rq->end_io_data = &wait;
101 blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq);
103 /* Prevent hang_check timer from firing at us during very long I/O */
104 hang_check = sysctl_hung_task_timeout_secs;
105 if (hang_check)
106 while (!wait_for_completion_io_timeout(&wait, hang_check * (HZ/2)));
107 else
108 wait_for_completion_io(&wait);
110 EXPORT_SYMBOL(blk_execute_rq);