x86/xen: resume timer irqs early
[linux/fpc-iii.git] / block / blk-exec.c
blobae4f27d7944e9a662ddbb51464bd18e10f4707a4
1 /*
2 * Functions related to setting various queue properties from drivers
3 */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/bio.h>
7 #include <linux/blkdev.h>
8 #include <linux/sched/sysctl.h>
10 #include "blk.h"
13 * for max sense size
15 #include <scsi/scsi_cmnd.h>
17 /**
18 * blk_end_sync_rq - executes a completion event on a request
19 * @rq: request to complete
20 * @error: end I/O status of the request
22 static void blk_end_sync_rq(struct request *rq, int error)
24 struct completion *waiting = rq->end_io_data;
26 rq->end_io_data = NULL;
27 __blk_put_request(rq->q, rq);
30 * complete last, if this is a stack request the process (and thus
31 * the rq pointer) could be invalid right after this complete()
33 complete(waiting);
36 /**
37 * blk_execute_rq_nowait - insert a request into queue for execution
38 * @q: queue to insert the request in
39 * @bd_disk: matching gendisk
40 * @rq: request to insert
41 * @at_head: insert request at head or tail of queue
42 * @done: I/O completion handler
44 * Description:
45 * Insert a fully prepared request at the back of the I/O scheduler queue
46 * for execution. Don't wait for completion.
48 * Note:
49 * This function will invoke @done directly if the queue is dead.
51 void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
52 struct request *rq, int at_head,
53 rq_end_io_fn *done)
55 int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
56 bool is_pm_resume;
58 WARN_ON(irqs_disabled());
60 rq->rq_disk = bd_disk;
61 rq->end_io = done;
63 * need to check this before __blk_run_queue(), because rq can
64 * be freed before that returns.
66 is_pm_resume = rq->cmd_type == REQ_TYPE_PM_RESUME;
68 spin_lock_irq(q->queue_lock);
70 if (unlikely(blk_queue_dying(q))) {
71 rq->cmd_flags |= REQ_QUIET;
72 rq->errors = -ENXIO;
73 __blk_end_request_all(rq, rq->errors);
74 spin_unlock_irq(q->queue_lock);
75 return;
78 __elv_add_request(q, rq, where);
79 __blk_run_queue(q);
80 /* the queue is stopped so it won't be run */
81 if (is_pm_resume)
82 __blk_run_queue_uncond(q);
83 spin_unlock_irq(q->queue_lock);
85 EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
87 /**
88 * blk_execute_rq - insert a request into queue for execution
89 * @q: queue to insert the request in
90 * @bd_disk: matching gendisk
91 * @rq: request to insert
92 * @at_head: insert request at head or tail of queue
94 * Description:
95 * Insert a fully prepared request at the back of the I/O scheduler queue
96 * for execution and wait for completion.
98 int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
99 struct request *rq, int at_head)
101 DECLARE_COMPLETION_ONSTACK(wait);
102 char sense[SCSI_SENSE_BUFFERSIZE];
103 int err = 0;
104 unsigned long hang_check;
107 * we need an extra reference to the request, so we can look at
108 * it after io completion
110 rq->ref_count++;
112 if (!rq->sense) {
113 memset(sense, 0, sizeof(sense));
114 rq->sense = sense;
115 rq->sense_len = 0;
118 rq->end_io_data = &wait;
119 blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq);
121 /* Prevent hang_check timer from firing at us during very long I/O */
122 hang_check = sysctl_hung_task_timeout_secs;
123 if (hang_check)
124 while (!wait_for_completion_io_timeout(&wait, hang_check * (HZ/2)));
125 else
126 wait_for_completion_io(&wait);
128 if (rq->errors)
129 err = -EIO;
131 return err;
133 EXPORT_SYMBOL(blk_execute_rq);