2 * Functions related to generic timeout handling of requests.
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/blkdev.h>
7 #include <linux/fault-inject.h>
12 #ifdef CONFIG_FAIL_IO_TIMEOUT
14 static DECLARE_FAULT_ATTR(fail_io_timeout
);
16 static int __init
setup_fail_io_timeout(char *str
)
18 return setup_fault_attr(&fail_io_timeout
, str
);
20 __setup("fail_io_timeout=", setup_fail_io_timeout
);
22 int blk_should_fake_timeout(struct request_queue
*q
)
24 if (!test_bit(QUEUE_FLAG_FAIL_IO
, &q
->queue_flags
))
27 return should_fail(&fail_io_timeout
, 1);
30 static int __init
fail_io_timeout_debugfs(void)
32 struct dentry
*dir
= fault_create_debugfs_attr("fail_io_timeout",
33 NULL
, &fail_io_timeout
);
35 return PTR_ERR_OR_ZERO(dir
);
38 late_initcall(fail_io_timeout_debugfs
);
40 ssize_t
part_timeout_show(struct device
*dev
, struct device_attribute
*attr
,
43 struct gendisk
*disk
= dev_to_disk(dev
);
44 int set
= test_bit(QUEUE_FLAG_FAIL_IO
, &disk
->queue
->queue_flags
);
46 return sprintf(buf
, "%d\n", set
!= 0);
49 ssize_t
part_timeout_store(struct device
*dev
, struct device_attribute
*attr
,
50 const char *buf
, size_t count
)
52 struct gendisk
*disk
= dev_to_disk(dev
);
56 struct request_queue
*q
= disk
->queue
;
57 char *p
= (char *) buf
;
59 val
= simple_strtoul(p
, &p
, 10);
61 blk_queue_flag_set(QUEUE_FLAG_FAIL_IO
, q
);
63 blk_queue_flag_clear(QUEUE_FLAG_FAIL_IO
, q
);
69 #endif /* CONFIG_FAIL_IO_TIMEOUT */
72 * blk_abort_request -- Request request recovery for the specified command
73 * @req: pointer to the request of interest
75 * This function requests that the block layer start recovery for the
76 * request by deleting the timer and calling the q's timeout function.
77 * LLDDs who implement their own error recovery MAY ignore the timeout
78 * event if they generated blk_abort_request.
80 void blk_abort_request(struct request
*req
)
83 * All we need to ensure is that timeout scan takes place
84 * immediately and that scan sees the new timeout value.
85 * No need for fancy synchronizations.
87 WRITE_ONCE(req
->deadline
, jiffies
);
88 kblockd_schedule_work(&req
->q
->timeout_work
);
90 EXPORT_SYMBOL_GPL(blk_abort_request
);
92 unsigned long blk_rq_timeout(unsigned long timeout
)
96 maxt
= round_jiffies_up(jiffies
+ BLK_MAX_TIMEOUT
);
97 if (time_after(timeout
, maxt
))
104 * blk_add_timer - Start timeout timer for a single request
105 * @req: request that is about to start running.
108 * Each request has its own timer, and as it is added to the queue, we
109 * set up the timer. When the request completes, we cancel the timer.
111 void blk_add_timer(struct request
*req
)
113 struct request_queue
*q
= req
->q
;
114 unsigned long expiry
;
117 * Some LLDs, like scsi, peek at the timeout to prevent a
118 * command from being retried forever.
121 req
->timeout
= q
->rq_timeout
;
123 req
->rq_flags
&= ~RQF_TIMED_OUT
;
125 expiry
= jiffies
+ req
->timeout
;
126 WRITE_ONCE(req
->deadline
, expiry
);
129 * If the timer isn't already pending or this timeout is earlier
130 * than an existing one, modify the timer. Round up to next nearest
133 expiry
= blk_rq_timeout(round_jiffies_up(expiry
));
135 if (!timer_pending(&q
->timeout
) ||
136 time_before(expiry
, q
->timeout
.expires
)) {
137 unsigned long diff
= q
->timeout
.expires
- expiry
;
140 * Due to added timer slack to group timers, the timer
141 * will often be a little in front of what we asked for.
142 * So apply some tolerance here too, otherwise we keep
143 * modifying the timer because expires for value X
144 * will be X + something.
146 if (!timer_pending(&q
->timeout
) || (diff
>= HZ
/ 2))
147 mod_timer(&q
->timeout
, expiry
);