1 /* SPDX-License-Identifier: GPL-2.0 */
6 #include <linux/blk-mq.h>
10 /* Amount of time in which a process may batch requests */
11 #define BLK_BATCH_TIME (HZ/50UL)
13 /* Number of requests a "batching" process may submit */
14 #define BLK_BATCH_REQ 32
16 /* Max future timer expiry for timeouts */
17 #define BLK_MAX_TIMEOUT (5 * HZ)
19 #ifdef CONFIG_DEBUG_FS
20 extern struct dentry
*blk_debugfs_root
;
23 struct blk_flush_queue
{
24 unsigned int flush_queue_delayed
:1;
25 unsigned int flush_pending_idx
:1;
26 unsigned int flush_running_idx
:1;
27 unsigned long flush_pending_since
;
28 struct list_head flush_queue
[2];
29 struct list_head flush_data_in_flight
;
30 struct request
*flush_rq
;
33 * flush_rq shares tag with this rq, both can't be active
36 struct request
*orig_rq
;
37 spinlock_t mq_flush_lock
;
40 extern struct kmem_cache
*blk_requestq_cachep
;
41 extern struct kmem_cache
*request_cachep
;
42 extern struct kobj_type blk_queue_ktype
;
43 extern struct ida blk_queue_ida
;
46 * @q->queue_lock is set while a queue is being initialized. Since we know
47 * that no other threads access the queue object before @q->queue_lock has
48 * been set, it is safe to manipulate queue flags without holding the
49 * queue_lock if @q->queue_lock == NULL. See also blk_alloc_queue_node() and
50 * blk_init_allocated_queue().
52 static inline void queue_lockdep_assert_held(struct request_queue
*q
)
55 lockdep_assert_held(q
->queue_lock
);
58 static inline void queue_flag_set_unlocked(unsigned int flag
,
59 struct request_queue
*q
)
61 if (test_bit(QUEUE_FLAG_INIT_DONE
, &q
->queue_flags
) &&
62 kref_read(&q
->kobj
.kref
))
63 lockdep_assert_held(q
->queue_lock
);
64 __set_bit(flag
, &q
->queue_flags
);
67 static inline void queue_flag_clear_unlocked(unsigned int flag
,
68 struct request_queue
*q
)
70 if (test_bit(QUEUE_FLAG_INIT_DONE
, &q
->queue_flags
) &&
71 kref_read(&q
->kobj
.kref
))
72 lockdep_assert_held(q
->queue_lock
);
73 __clear_bit(flag
, &q
->queue_flags
);
76 static inline int queue_flag_test_and_clear(unsigned int flag
,
77 struct request_queue
*q
)
79 queue_lockdep_assert_held(q
);
81 if (test_bit(flag
, &q
->queue_flags
)) {
82 __clear_bit(flag
, &q
->queue_flags
);
89 static inline int queue_flag_test_and_set(unsigned int flag
,
90 struct request_queue
*q
)
92 queue_lockdep_assert_held(q
);
94 if (!test_bit(flag
, &q
->queue_flags
)) {
95 __set_bit(flag
, &q
->queue_flags
);
102 static inline void queue_flag_set(unsigned int flag
, struct request_queue
*q
)
104 queue_lockdep_assert_held(q
);
105 __set_bit(flag
, &q
->queue_flags
);
108 static inline void queue_flag_clear(unsigned int flag
, struct request_queue
*q
)
110 queue_lockdep_assert_held(q
);
111 __clear_bit(flag
, &q
->queue_flags
);
114 static inline struct blk_flush_queue
*blk_get_flush_queue(
115 struct request_queue
*q
, struct blk_mq_ctx
*ctx
)
118 return blk_mq_map_queue(q
, ctx
->cpu
)->fq
;
122 static inline void __blk_get_queue(struct request_queue
*q
)
124 kobject_get(&q
->kobj
);
127 struct blk_flush_queue
*blk_alloc_flush_queue(struct request_queue
*q
,
128 int node
, int cmd_size
, gfp_t flags
);
129 void blk_free_flush_queue(struct blk_flush_queue
*q
);
131 int blk_init_rl(struct request_list
*rl
, struct request_queue
*q
,
133 void blk_exit_rl(struct request_queue
*q
, struct request_list
*rl
);
134 void blk_exit_queue(struct request_queue
*q
);
135 void blk_rq_bio_prep(struct request_queue
*q
, struct request
*rq
,
137 void blk_queue_bypass_start(struct request_queue
*q
);
138 void blk_queue_bypass_end(struct request_queue
*q
);
139 void __blk_queue_free_tags(struct request_queue
*q
);
140 void blk_freeze_queue(struct request_queue
*q
);
142 static inline void blk_queue_enter_live(struct request_queue
*q
)
145 * Given that running in generic_make_request() context
146 * guarantees that a live reference against q_usage_counter has
147 * been established, further references under that same context
148 * need not check that the queue has been frozen (marked dead).
150 percpu_ref_get(&q
->q_usage_counter
);
153 static inline bool biovec_phys_mergeable(struct request_queue
*q
,
154 struct bio_vec
*vec1
, struct bio_vec
*vec2
)
156 unsigned long mask
= queue_segment_boundary(q
);
157 phys_addr_t addr1
= page_to_phys(vec1
->bv_page
) + vec1
->bv_offset
;
158 phys_addr_t addr2
= page_to_phys(vec2
->bv_page
) + vec2
->bv_offset
;
160 if (addr1
+ vec1
->bv_len
!= addr2
)
162 if (xen_domain() && !xen_biovec_phys_mergeable(vec1
, vec2
))
164 if ((addr1
| mask
) != ((addr2
+ vec2
->bv_len
- 1) | mask
))
169 static inline bool __bvec_gap_to_prev(struct request_queue
*q
,
170 struct bio_vec
*bprv
, unsigned int offset
)
172 return (offset
& queue_virt_boundary(q
)) ||
173 ((bprv
->bv_offset
+ bprv
->bv_len
) & queue_virt_boundary(q
));
177 * Check if adding a bio_vec after bprv with offset would create a gap in
178 * the SG list. Most drivers don't care about this, but some do.
180 static inline bool bvec_gap_to_prev(struct request_queue
*q
,
181 struct bio_vec
*bprv
, unsigned int offset
)
183 if (!queue_virt_boundary(q
))
185 return __bvec_gap_to_prev(q
, bprv
, offset
);
188 #ifdef CONFIG_BLK_DEV_INTEGRITY
189 void blk_flush_integrity(void);
190 bool __bio_integrity_endio(struct bio
*);
191 static inline bool bio_integrity_endio(struct bio
*bio
)
193 if (bio_integrity(bio
))
194 return __bio_integrity_endio(bio
);
198 static inline bool integrity_req_gap_back_merge(struct request
*req
,
201 struct bio_integrity_payload
*bip
= bio_integrity(req
->bio
);
202 struct bio_integrity_payload
*bip_next
= bio_integrity(next
);
204 return bvec_gap_to_prev(req
->q
, &bip
->bip_vec
[bip
->bip_vcnt
- 1],
205 bip_next
->bip_vec
[0].bv_offset
);
208 static inline bool integrity_req_gap_front_merge(struct request
*req
,
211 struct bio_integrity_payload
*bip
= bio_integrity(bio
);
212 struct bio_integrity_payload
*bip_next
= bio_integrity(req
->bio
);
214 return bvec_gap_to_prev(req
->q
, &bip
->bip_vec
[bip
->bip_vcnt
- 1],
215 bip_next
->bip_vec
[0].bv_offset
);
217 #else /* CONFIG_BLK_DEV_INTEGRITY */
218 static inline bool integrity_req_gap_back_merge(struct request
*req
,
223 static inline bool integrity_req_gap_front_merge(struct request
*req
,
229 static inline void blk_flush_integrity(void)
232 static inline bool bio_integrity_endio(struct bio
*bio
)
236 #endif /* CONFIG_BLK_DEV_INTEGRITY */
238 void blk_timeout_work(struct work_struct
*work
);
239 unsigned long blk_rq_timeout(unsigned long timeout
);
240 void blk_add_timer(struct request
*req
);
241 void blk_delete_timer(struct request
*);
244 bool bio_attempt_front_merge(struct request_queue
*q
, struct request
*req
,
246 bool bio_attempt_back_merge(struct request_queue
*q
, struct request
*req
,
248 bool bio_attempt_discard_merge(struct request_queue
*q
, struct request
*req
,
250 bool blk_attempt_plug_merge(struct request_queue
*q
, struct bio
*bio
,
251 unsigned int *request_count
,
252 struct request
**same_queue_rq
);
253 unsigned int blk_plug_queued_count(struct request_queue
*q
);
255 void blk_account_io_start(struct request
*req
, bool new_io
);
256 void blk_account_io_completion(struct request
*req
, unsigned int bytes
);
257 void blk_account_io_done(struct request
*req
, u64 now
);
260 * EH timer and IO completion will both attempt to 'grab' the request, make
261 * sure that only one of them succeeds. Steal the bottom bit of the
262 * __deadline field for this.
264 static inline int blk_mark_rq_complete(struct request
*rq
)
266 return test_and_set_bit(0, &rq
->__deadline
);
269 static inline void blk_clear_rq_complete(struct request
*rq
)
271 clear_bit(0, &rq
->__deadline
);
274 static inline bool blk_rq_is_complete(struct request
*rq
)
276 return test_bit(0, &rq
->__deadline
);
280 * Internal elevator interface
282 #define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
284 void blk_insert_flush(struct request
*rq
);
286 static inline void elv_activate_rq(struct request_queue
*q
, struct request
*rq
)
288 struct elevator_queue
*e
= q
->elevator
;
290 if (e
->type
->ops
.sq
.elevator_activate_req_fn
)
291 e
->type
->ops
.sq
.elevator_activate_req_fn(q
, rq
);
294 static inline void elv_deactivate_rq(struct request_queue
*q
, struct request
*rq
)
296 struct elevator_queue
*e
= q
->elevator
;
298 if (e
->type
->ops
.sq
.elevator_deactivate_req_fn
)
299 e
->type
->ops
.sq
.elevator_deactivate_req_fn(q
, rq
);
302 int elevator_init(struct request_queue
*);
303 int elevator_init_mq(struct request_queue
*q
);
304 int elevator_switch_mq(struct request_queue
*q
,
305 struct elevator_type
*new_e
);
306 void elevator_exit(struct request_queue
*, struct elevator_queue
*);
307 int elv_register_queue(struct request_queue
*q
);
308 void elv_unregister_queue(struct request_queue
*q
);
310 struct hd_struct
*__disk_get_part(struct gendisk
*disk
, int partno
);
312 #ifdef CONFIG_FAIL_IO_TIMEOUT
313 int blk_should_fake_timeout(struct request_queue
*);
314 ssize_t
part_timeout_show(struct device
*, struct device_attribute
*, char *);
315 ssize_t
part_timeout_store(struct device
*, struct device_attribute
*,
316 const char *, size_t);
318 static inline int blk_should_fake_timeout(struct request_queue
*q
)
324 int ll_back_merge_fn(struct request_queue
*q
, struct request
*req
,
326 int ll_front_merge_fn(struct request_queue
*q
, struct request
*req
,
328 struct request
*attempt_back_merge(struct request_queue
*q
, struct request
*rq
);
329 struct request
*attempt_front_merge(struct request_queue
*q
, struct request
*rq
);
330 int blk_attempt_req_merge(struct request_queue
*q
, struct request
*rq
,
331 struct request
*next
);
332 void blk_recalc_rq_segments(struct request
*rq
);
333 void blk_rq_set_mixed_merge(struct request
*rq
);
334 bool blk_rq_merge_ok(struct request
*rq
, struct bio
*bio
);
335 enum elv_merge
blk_try_merge(struct request
*rq
, struct bio
*bio
);
337 void blk_queue_congestion_threshold(struct request_queue
*q
);
339 int blk_dev_init(void);
343 * Return the threshold (number of used requests) at which the queue is
344 * considered to be congested. It include a little hysteresis to keep the
345 * context switch rate down.
347 static inline int queue_congestion_on_threshold(struct request_queue
*q
)
349 return q
->nr_congestion_on
;
353 * The threshold at which a queue is considered to be uncongested
355 static inline int queue_congestion_off_threshold(struct request_queue
*q
)
357 return q
->nr_congestion_off
;
360 extern int blk_update_nr_requests(struct request_queue
*, unsigned int);
363 * Contribute to IO statistics IFF:
365 * a) it's attached to a gendisk, and
366 * b) the queue had IO stats enabled when this request was started, and
367 * c) it's a file system request
369 static inline bool blk_do_io_stat(struct request
*rq
)
371 return rq
->rq_disk
&&
372 (rq
->rq_flags
& RQF_IO_STAT
) &&
373 !blk_rq_is_passthrough(rq
);
376 static inline void req_set_nomerge(struct request_queue
*q
, struct request
*req
)
378 req
->cmd_flags
|= REQ_NOMERGE
;
379 if (req
== q
->last_merge
)
380 q
->last_merge
= NULL
;
384 * Steal a bit from this field for legacy IO path atomic IO marking. Note that
385 * setting the deadline clears the bottom bit, potentially clearing the
386 * completed bit. The user has to be OK with this (current ones are fine).
388 static inline void blk_rq_set_deadline(struct request
*rq
, unsigned long time
)
390 rq
->__deadline
= time
& ~0x1UL
;
393 static inline unsigned long blk_rq_deadline(struct request
*rq
)
395 return rq
->__deadline
& ~0x1UL
;
399 * The max size one bio can handle is UINT_MAX becasue bvec_iter.bi_size
400 * is defined as 'unsigned int', meantime it has to aligned to with logical
401 * block size which is the minimum accepted unit by hardware.
403 static inline unsigned int bio_allowed_max_sectors(struct request_queue
*q
)
405 return round_down(UINT_MAX
, queue_logical_block_size(q
)) >> 9;
409 * Internal io_context interface
411 void get_io_context(struct io_context
*ioc
);
412 struct io_cq
*ioc_lookup_icq(struct io_context
*ioc
, struct request_queue
*q
);
413 struct io_cq
*ioc_create_icq(struct io_context
*ioc
, struct request_queue
*q
,
415 void ioc_clear_queue(struct request_queue
*q
);
417 int create_task_io_context(struct task_struct
*task
, gfp_t gfp_mask
, int node
);
420 * rq_ioc - determine io_context for request allocation
421 * @bio: request being allocated is for this bio (can be %NULL)
423 * Determine io_context to use for request allocation for @bio. May return
424 * %NULL if %current->io_context doesn't exist.
426 static inline struct io_context
*rq_ioc(struct bio
*bio
)
428 #ifdef CONFIG_BLK_CGROUP
429 if (bio
&& bio
->bi_ioc
)
432 return current
->io_context
;
436 * create_io_context - try to create task->io_context
437 * @gfp_mask: allocation mask
438 * @node: allocation node
440 * If %current->io_context is %NULL, allocate a new io_context and install
441 * it. Returns the current %current->io_context which may be %NULL if
444 * Note that this function can't be called with IRQ disabled because
445 * task_lock which protects %current->io_context is IRQ-unsafe.
447 static inline struct io_context
*create_io_context(gfp_t gfp_mask
, int node
)
449 WARN_ON_ONCE(irqs_disabled());
450 if (unlikely(!current
->io_context
))
451 create_task_io_context(current
, gfp_mask
, node
);
452 return current
->io_context
;
456 * Internal throttling interface
458 #ifdef CONFIG_BLK_DEV_THROTTLING
459 extern void blk_throtl_drain(struct request_queue
*q
);
460 extern int blk_throtl_init(struct request_queue
*q
);
461 extern void blk_throtl_exit(struct request_queue
*q
);
462 extern void blk_throtl_register_queue(struct request_queue
*q
);
463 #else /* CONFIG_BLK_DEV_THROTTLING */
464 static inline void blk_throtl_drain(struct request_queue
*q
) { }
465 static inline int blk_throtl_init(struct request_queue
*q
) { return 0; }
466 static inline void blk_throtl_exit(struct request_queue
*q
) { }
467 static inline void blk_throtl_register_queue(struct request_queue
*q
) { }
468 #endif /* CONFIG_BLK_DEV_THROTTLING */
469 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
470 extern ssize_t
blk_throtl_sample_time_show(struct request_queue
*q
, char *page
);
471 extern ssize_t
blk_throtl_sample_time_store(struct request_queue
*q
,
472 const char *page
, size_t count
);
473 extern void blk_throtl_bio_endio(struct bio
*bio
);
474 extern void blk_throtl_stat_add(struct request
*rq
, u64 time
);
476 static inline void blk_throtl_bio_endio(struct bio
*bio
) { }
477 static inline void blk_throtl_stat_add(struct request
*rq
, u64 time
) { }
481 extern int init_emergency_isa_pool(void);
482 extern void blk_queue_bounce(struct request_queue
*q
, struct bio
**bio
);
484 static inline int init_emergency_isa_pool(void)
488 static inline void blk_queue_bounce(struct request_queue
*q
, struct bio
**bio
)
491 #endif /* CONFIG_BOUNCE */
493 extern void blk_drain_queue(struct request_queue
*q
);
495 #ifdef CONFIG_BLK_CGROUP_IOLATENCY
496 extern int blk_iolatency_init(struct request_queue
*q
);
498 static inline int blk_iolatency_init(struct request_queue
*q
) { return 0; }
501 struct bio
*blk_next_bio(struct bio
*bio
, unsigned int nr_pages
, gfp_t gfp
);
503 #ifdef CONFIG_BLK_DEV_ZONED
504 void blk_queue_free_zone_bitmaps(struct request_queue
*q
);
506 static inline void blk_queue_free_zone_bitmaps(struct request_queue
*q
) {}
509 #endif /* BLK_INTERNAL_H */