Linux 3.18-rc4
[linux-2.6/luiz-linux-2.6.git] / block / blk-exec.c
blob9924725fa50dcac5563d2654a24df0d85cfada7a
1 /*
2 * Functions related to setting various queue properties from drivers
3 */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/bio.h>
7 #include <linux/blkdev.h>
8 #include <linux/blk-mq.h>
9 #include <linux/sched/sysctl.h>
11 #include "blk.h"
14 * for max sense size
16 #include <scsi/scsi_cmnd.h>
18 /**
19 * blk_end_sync_rq - executes a completion event on a request
20 * @rq: request to complete
21 * @error: end I/O status of the request
23 static void blk_end_sync_rq(struct request *rq, int error)
25 struct completion *waiting = rq->end_io_data;
27 rq->end_io_data = NULL;
30 * complete last, if this is a stack request the process (and thus
31 * the rq pointer) could be invalid right after this complete()
33 complete(waiting);
36 /**
37 * blk_execute_rq_nowait - insert a request into queue for execution
38 * @q: queue to insert the request in
39 * @bd_disk: matching gendisk
40 * @rq: request to insert
41 * @at_head: insert request at head or tail of queue
42 * @done: I/O completion handler
44 * Description:
45 * Insert a fully prepared request at the back of the I/O scheduler queue
46 * for execution. Don't wait for completion.
48 * Note:
49 * This function will invoke @done directly if the queue is dead.
51 void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
52 struct request *rq, int at_head,
53 rq_end_io_fn *done)
55 int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
56 bool is_pm_resume;
58 WARN_ON(irqs_disabled());
59 WARN_ON(rq->cmd_type == REQ_TYPE_FS);
61 rq->rq_disk = bd_disk;
62 rq->end_io = done;
65 * don't check dying flag for MQ because the request won't
66 * be resued after dying flag is set
68 if (q->mq_ops) {
69 blk_mq_insert_request(rq, at_head, true, false);
70 return;
74 * need to check this before __blk_run_queue(), because rq can
75 * be freed before that returns.
77 is_pm_resume = rq->cmd_type == REQ_TYPE_PM_RESUME;
79 spin_lock_irq(q->queue_lock);
81 if (unlikely(blk_queue_dying(q))) {
82 rq->cmd_flags |= REQ_QUIET;
83 rq->errors = -ENXIO;
84 __blk_end_request_all(rq, rq->errors);
85 spin_unlock_irq(q->queue_lock);
86 return;
89 __elv_add_request(q, rq, where);
90 __blk_run_queue(q);
91 /* the queue is stopped so it won't be run */
92 if (is_pm_resume)
93 __blk_run_queue_uncond(q);
94 spin_unlock_irq(q->queue_lock);
96 EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
98 /**
99 * blk_execute_rq - insert a request into queue for execution
100 * @q: queue to insert the request in
101 * @bd_disk: matching gendisk
102 * @rq: request to insert
103 * @at_head: insert request at head or tail of queue
105 * Description:
106 * Insert a fully prepared request at the back of the I/O scheduler queue
107 * for execution and wait for completion.
109 int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
110 struct request *rq, int at_head)
112 DECLARE_COMPLETION_ONSTACK(wait);
113 char sense[SCSI_SENSE_BUFFERSIZE];
114 int err = 0;
115 unsigned long hang_check;
117 if (!rq->sense) {
118 memset(sense, 0, sizeof(sense));
119 rq->sense = sense;
120 rq->sense_len = 0;
123 rq->end_io_data = &wait;
124 blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq);
126 /* Prevent hang_check timer from firing at us during very long I/O */
127 hang_check = sysctl_hung_task_timeout_secs;
128 if (hang_check)
129 while (!wait_for_completion_io_timeout(&wait, hang_check * (HZ/2)));
130 else
131 wait_for_completion_io(&wait);
133 if (rq->errors)
134 err = -EIO;
136 if (rq->sense == sense) {
137 rq->sense = NULL;
138 rq->sense_len = 0;
141 return err;
143 EXPORT_SYMBOL(blk_execute_rq);