1 /* SPDX-License-Identifier: GPL-2.0 */
5 #include <linux/bio-integrity.h>
6 #include <linux/blk-crypto.h>
7 #include <linux/lockdep.h>
8 #include <linux/memblock.h> /* for max_pfn/max_low_pfn */
9 #include <linux/sched/sysctl.h>
10 #include <linux/timekeeping.h>
12 #include "blk-crypto-internal.h"
16 /* Max future timer expiry for timeouts */
17 #define BLK_MAX_TIMEOUT (5 * HZ)
19 extern struct dentry
*blk_debugfs_root
;
21 struct blk_flush_queue
{
22 spinlock_t mq_flush_lock
;
23 unsigned int flush_pending_idx
:1;
24 unsigned int flush_running_idx
:1;
25 blk_status_t rq_status
;
26 unsigned long flush_pending_since
;
27 struct list_head flush_queue
[2];
28 unsigned long flush_data_in_flight
;
29 struct request
*flush_rq
;
32 bool is_flush_rq(struct request
*req
);
34 struct blk_flush_queue
*blk_alloc_flush_queue(int node
, int cmd_size
,
36 void blk_free_flush_queue(struct blk_flush_queue
*q
);
38 bool __blk_mq_unfreeze_queue(struct request_queue
*q
, bool force_atomic
);
39 bool blk_queue_start_drain(struct request_queue
*q
);
40 bool __blk_freeze_queue_start(struct request_queue
*q
,
41 struct task_struct
*owner
);
42 int __bio_queue_enter(struct request_queue
*q
, struct bio
*bio
);
43 void submit_bio_noacct_nocheck(struct bio
*bio
);
44 void bio_await_chain(struct bio
*bio
);
46 static inline bool blk_try_enter_queue(struct request_queue
*q
, bool pm
)
49 if (!percpu_ref_tryget_live_rcu(&q
->q_usage_counter
))
53 * The code that increments the pm_only counter must ensure that the
54 * counter is globally visible before the queue is unfrozen.
56 if (blk_queue_pm_only(q
) &&
57 (!pm
|| queue_rpm_status(q
) == RPM_SUSPENDED
))
70 static inline int bio_queue_enter(struct bio
*bio
)
72 struct request_queue
*q
= bdev_get_queue(bio
->bi_bdev
);
74 if (blk_try_enter_queue(q
, false)) {
75 rwsem_acquire_read(&q
->io_lockdep_map
, 0, 0, _RET_IP_
);
76 rwsem_release(&q
->io_lockdep_map
, _RET_IP_
);
79 return __bio_queue_enter(q
, bio
);
82 static inline void blk_wait_io(struct completion
*done
)
84 /* Prevent hang_check timer from firing at us during very long I/O */
85 unsigned long timeout
= sysctl_hung_task_timeout_secs
* HZ
/ 2;
88 while (!wait_for_completion_io_timeout(done
, timeout
))
91 wait_for_completion_io(done
);
94 #define BIO_INLINE_VECS 4
95 struct bio_vec
*bvec_alloc(mempool_t
*pool
, unsigned short *nr_vecs
,
97 void bvec_free(mempool_t
*pool
, struct bio_vec
*bv
, unsigned short nr_vecs
);
99 bool bvec_try_merge_hw_page(struct request_queue
*q
, struct bio_vec
*bv
,
100 struct page
*page
, unsigned len
, unsigned offset
,
103 static inline bool biovec_phys_mergeable(struct request_queue
*q
,
104 struct bio_vec
*vec1
, struct bio_vec
*vec2
)
106 unsigned long mask
= queue_segment_boundary(q
);
107 phys_addr_t addr1
= bvec_phys(vec1
);
108 phys_addr_t addr2
= bvec_phys(vec2
);
111 * Merging adjacent physical pages may not work correctly under KMSAN
112 * if their metadata pages aren't adjacent. Just disable merging.
114 if (IS_ENABLED(CONFIG_KMSAN
))
117 if (addr1
+ vec1
->bv_len
!= addr2
)
119 if (xen_domain() && !xen_biovec_phys_mergeable(vec1
, vec2
->bv_page
))
121 if ((addr1
| mask
) != ((addr2
+ vec2
->bv_len
- 1) | mask
))
126 static inline bool __bvec_gap_to_prev(const struct queue_limits
*lim
,
127 struct bio_vec
*bprv
, unsigned int offset
)
129 return (offset
& lim
->virt_boundary_mask
) ||
130 ((bprv
->bv_offset
+ bprv
->bv_len
) & lim
->virt_boundary_mask
);
134 * Check if adding a bio_vec after bprv with offset would create a gap in
135 * the SG list. Most drivers don't care about this, but some do.
137 static inline bool bvec_gap_to_prev(const struct queue_limits
*lim
,
138 struct bio_vec
*bprv
, unsigned int offset
)
140 if (!lim
->virt_boundary_mask
)
142 return __bvec_gap_to_prev(lim
, bprv
, offset
);
145 static inline bool rq_mergeable(struct request
*rq
)
147 if (blk_rq_is_passthrough(rq
))
150 if (req_op(rq
) == REQ_OP_FLUSH
)
153 if (req_op(rq
) == REQ_OP_WRITE_ZEROES
)
156 if (req_op(rq
) == REQ_OP_ZONE_APPEND
)
159 if (rq
->cmd_flags
& REQ_NOMERGE_FLAGS
)
161 if (rq
->rq_flags
& RQF_NOMERGE_FLAGS
)
168 * There are two different ways to handle DISCARD merges:
169 * 1) If max_discard_segments > 1, the driver treats every bio as a range and
170 * send the bios to controller together. The ranges don't need to be
172 * 2) Otherwise, the request will be normal read/write requests. The ranges
173 * need to be contiguous.
175 static inline bool blk_discard_mergable(struct request
*req
)
177 if (req_op(req
) == REQ_OP_DISCARD
&&
178 queue_max_discard_segments(req
->q
) > 1)
183 static inline unsigned int blk_rq_get_max_segments(struct request
*rq
)
185 if (req_op(rq
) == REQ_OP_DISCARD
)
186 return queue_max_discard_segments(rq
->q
);
187 return queue_max_segments(rq
->q
);
190 static inline unsigned int blk_queue_get_max_sectors(struct request
*rq
)
192 struct request_queue
*q
= rq
->q
;
193 enum req_op op
= req_op(rq
);
195 if (unlikely(op
== REQ_OP_DISCARD
|| op
== REQ_OP_SECURE_ERASE
))
196 return min(q
->limits
.max_discard_sectors
,
197 UINT_MAX
>> SECTOR_SHIFT
);
199 if (unlikely(op
== REQ_OP_WRITE_ZEROES
))
200 return q
->limits
.max_write_zeroes_sectors
;
202 if (rq
->cmd_flags
& REQ_ATOMIC
)
203 return q
->limits
.atomic_write_max_sectors
;
205 return q
->limits
.max_sectors
;
208 #ifdef CONFIG_BLK_DEV_INTEGRITY
209 void blk_flush_integrity(void);
210 void bio_integrity_free(struct bio
*bio
);
213 * Integrity payloads can either be owned by the submitter, in which case
214 * bio_uninit will free them, or owned and generated by the block layer,
215 * in which case we'll verify them here (for reads) and free them before
216 * the bio is handed back to the submitted.
218 bool __bio_integrity_endio(struct bio
*bio
);
219 static inline bool bio_integrity_endio(struct bio
*bio
)
221 struct bio_integrity_payload
*bip
= bio_integrity(bio
);
223 if (bip
&& (bip
->bip_flags
& BIP_BLOCK_INTEGRITY
))
224 return __bio_integrity_endio(bio
);
228 bool blk_integrity_merge_rq(struct request_queue
*, struct request
*,
230 bool blk_integrity_merge_bio(struct request_queue
*, struct request
*,
233 static inline bool integrity_req_gap_back_merge(struct request
*req
,
236 struct bio_integrity_payload
*bip
= bio_integrity(req
->bio
);
237 struct bio_integrity_payload
*bip_next
= bio_integrity(next
);
239 return bvec_gap_to_prev(&req
->q
->limits
,
240 &bip
->bip_vec
[bip
->bip_vcnt
- 1],
241 bip_next
->bip_vec
[0].bv_offset
);
244 static inline bool integrity_req_gap_front_merge(struct request
*req
,
247 struct bio_integrity_payload
*bip
= bio_integrity(bio
);
248 struct bio_integrity_payload
*bip_next
= bio_integrity(req
->bio
);
250 return bvec_gap_to_prev(&req
->q
->limits
,
251 &bip
->bip_vec
[bip
->bip_vcnt
- 1],
252 bip_next
->bip_vec
[0].bv_offset
);
255 extern const struct attribute_group blk_integrity_attr_group
;
256 #else /* CONFIG_BLK_DEV_INTEGRITY */
257 static inline bool blk_integrity_merge_rq(struct request_queue
*rq
,
258 struct request
*r1
, struct request
*r2
)
262 static inline bool blk_integrity_merge_bio(struct request_queue
*rq
,
263 struct request
*r
, struct bio
*b
)
267 static inline bool integrity_req_gap_back_merge(struct request
*req
,
272 static inline bool integrity_req_gap_front_merge(struct request
*req
,
278 static inline void blk_flush_integrity(void)
281 static inline bool bio_integrity_endio(struct bio
*bio
)
285 static inline void bio_integrity_free(struct bio
*bio
)
288 #endif /* CONFIG_BLK_DEV_INTEGRITY */
290 unsigned long blk_rq_timeout(unsigned long timeout
);
291 void blk_add_timer(struct request
*req
);
293 enum bio_merge_status
{
299 enum bio_merge_status
bio_attempt_back_merge(struct request
*req
,
300 struct bio
*bio
, unsigned int nr_segs
);
301 bool blk_attempt_plug_merge(struct request_queue
*q
, struct bio
*bio
,
302 unsigned int nr_segs
);
303 bool blk_bio_list_merge(struct request_queue
*q
, struct list_head
*list
,
304 struct bio
*bio
, unsigned int nr_segs
);
309 #define BLK_MAX_REQUEST_COUNT 32
310 #define BLK_PLUG_FLUSH_SIZE (128 * 1024)
313 * Internal elevator interface
315 #define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
317 bool blk_insert_flush(struct request
*rq
);
319 int elevator_switch(struct request_queue
*q
, struct elevator_type
*new_e
);
320 void elevator_disable(struct request_queue
*q
);
321 void elevator_exit(struct request_queue
*q
);
322 int elv_register_queue(struct request_queue
*q
, bool uevent
);
323 void elv_unregister_queue(struct request_queue
*q
);
325 ssize_t
part_size_show(struct device
*dev
, struct device_attribute
*attr
,
327 ssize_t
part_stat_show(struct device
*dev
, struct device_attribute
*attr
,
329 ssize_t
part_inflight_show(struct device
*dev
, struct device_attribute
*attr
,
331 ssize_t
part_fail_show(struct device
*dev
, struct device_attribute
*attr
,
333 ssize_t
part_fail_store(struct device
*dev
, struct device_attribute
*attr
,
334 const char *buf
, size_t count
);
335 ssize_t
part_timeout_show(struct device
*, struct device_attribute
*, char *);
336 ssize_t
part_timeout_store(struct device
*, struct device_attribute
*,
337 const char *, size_t);
339 struct bio
*bio_split_discard(struct bio
*bio
, const struct queue_limits
*lim
,
341 struct bio
*bio_split_write_zeroes(struct bio
*bio
,
342 const struct queue_limits
*lim
, unsigned *nsegs
);
343 struct bio
*bio_split_rw(struct bio
*bio
, const struct queue_limits
*lim
,
345 struct bio
*bio_split_zone_append(struct bio
*bio
,
346 const struct queue_limits
*lim
, unsigned *nr_segs
);
349 * All drivers must accept single-segments bios that are smaller than PAGE_SIZE.
351 * This is a quick and dirty check that relies on the fact that bi_io_vec[0] is
352 * always valid if a bio has data. The check might lead to occasional false
353 * positives when bios are cloned, but compared to the performance impact of
354 * cloned bios themselves the loop below doesn't matter anyway.
356 static inline bool bio_may_need_split(struct bio
*bio
,
357 const struct queue_limits
*lim
)
359 return lim
->chunk_sectors
|| bio
->bi_vcnt
!= 1 ||
360 bio
->bi_io_vec
->bv_len
+ bio
->bi_io_vec
->bv_offset
> PAGE_SIZE
;
364 * __bio_split_to_limits - split a bio to fit the queue limits
365 * @bio: bio to be split
366 * @lim: queue limits to split based on
367 * @nr_segs: returns the number of segments in the returned bio
369 * Check if @bio needs splitting based on the queue limits, and if so split off
370 * a bio fitting the limits from the beginning of @bio and return it. @bio is
371 * shortened to the remainder and re-submitted.
373 * The split bio is allocated from @q->bio_split, which is provided by the
376 static inline struct bio
*__bio_split_to_limits(struct bio
*bio
,
377 const struct queue_limits
*lim
, unsigned int *nr_segs
)
379 switch (bio_op(bio
)) {
382 if (bio_may_need_split(bio
, lim
))
383 return bio_split_rw(bio
, lim
, nr_segs
);
386 case REQ_OP_ZONE_APPEND
:
387 return bio_split_zone_append(bio
, lim
, nr_segs
);
389 case REQ_OP_SECURE_ERASE
:
390 return bio_split_discard(bio
, lim
, nr_segs
);
391 case REQ_OP_WRITE_ZEROES
:
392 return bio_split_write_zeroes(bio
, lim
, nr_segs
);
394 /* other operations can't be split */
400 int ll_back_merge_fn(struct request
*req
, struct bio
*bio
,
401 unsigned int nr_segs
);
402 bool blk_attempt_req_merge(struct request_queue
*q
, struct request
*rq
,
403 struct request
*next
);
404 unsigned int blk_recalc_rq_segments(struct request
*rq
);
405 bool blk_rq_merge_ok(struct request
*rq
, struct bio
*bio
);
406 enum elv_merge
blk_try_merge(struct request
*rq
, struct bio
*bio
);
408 int blk_set_default_limits(struct queue_limits
*lim
);
409 void blk_apply_bdi_limits(struct backing_dev_info
*bdi
,
410 struct queue_limits
*lim
);
411 int blk_dev_init(void);
413 void update_io_ticks(struct block_device
*part
, unsigned long now
, bool end
);
414 unsigned int part_in_flight(struct block_device
*part
);
416 static inline void req_set_nomerge(struct request_queue
*q
, struct request
*req
)
418 req
->cmd_flags
|= REQ_NOMERGE
;
419 if (req
== q
->last_merge
)
420 q
->last_merge
= NULL
;
424 * Internal io_context interface
426 struct io_cq
*ioc_find_get_icq(struct request_queue
*q
);
427 struct io_cq
*ioc_lookup_icq(struct request_queue
*q
);
428 #ifdef CONFIG_BLK_ICQ
429 void ioc_clear_queue(struct request_queue
*q
);
431 static inline void ioc_clear_queue(struct request_queue
*q
)
434 #endif /* CONFIG_BLK_ICQ */
436 struct bio
*__blk_queue_bounce(struct bio
*bio
, struct request_queue
*q
);
438 static inline bool blk_queue_may_bounce(struct request_queue
*q
)
440 return IS_ENABLED(CONFIG_BOUNCE
) &&
441 (q
->limits
.features
& BLK_FEAT_BOUNCE_HIGH
) &&
442 max_low_pfn
>= max_pfn
;
445 static inline struct bio
*blk_queue_bounce(struct bio
*bio
,
446 struct request_queue
*q
)
448 if (unlikely(blk_queue_may_bounce(q
) && bio_has_data(bio
)))
449 return __blk_queue_bounce(bio
, q
);
453 #ifdef CONFIG_BLK_DEV_ZONED
454 void disk_init_zone_resources(struct gendisk
*disk
);
455 void disk_free_zone_resources(struct gendisk
*disk
);
456 static inline bool bio_zone_write_plugging(struct bio
*bio
)
458 return bio_flagged(bio
, BIO_ZONE_WRITE_PLUGGING
);
460 void blk_zone_write_plug_bio_merged(struct bio
*bio
);
461 void blk_zone_write_plug_init_request(struct request
*rq
);
462 static inline void blk_zone_update_request_bio(struct request
*rq
,
466 * For zone append requests, the request sector indicates the location
467 * at which the BIO data was written. Return this value to the BIO
468 * issuer through the BIO iter sector.
469 * For plugged zone writes, which include emulated zone append, we need
470 * the original BIO sector so that blk_zone_write_plug_bio_endio() can
471 * lookup the zone write plug.
473 if (req_op(rq
) == REQ_OP_ZONE_APPEND
|| bio_zone_write_plugging(bio
))
474 bio
->bi_iter
.bi_sector
= rq
->__sector
;
476 void blk_zone_write_plug_bio_endio(struct bio
*bio
);
477 static inline void blk_zone_bio_endio(struct bio
*bio
)
480 * For write BIOs to zoned devices, signal the completion of the BIO so
481 * that the next write BIO can be submitted by zone write plugging.
483 if (bio_zone_write_plugging(bio
))
484 blk_zone_write_plug_bio_endio(bio
);
487 void blk_zone_write_plug_finish_request(struct request
*rq
);
488 static inline void blk_zone_finish_request(struct request
*rq
)
490 if (rq
->rq_flags
& RQF_ZONE_WRITE_PLUGGING
)
491 blk_zone_write_plug_finish_request(rq
);
493 int blkdev_report_zones_ioctl(struct block_device
*bdev
, unsigned int cmd
,
495 int blkdev_zone_mgmt_ioctl(struct block_device
*bdev
, blk_mode_t mode
,
496 unsigned int cmd
, unsigned long arg
);
497 #else /* CONFIG_BLK_DEV_ZONED */
498 static inline void disk_init_zone_resources(struct gendisk
*disk
)
501 static inline void disk_free_zone_resources(struct gendisk
*disk
)
504 static inline bool bio_zone_write_plugging(struct bio
*bio
)
508 static inline void blk_zone_write_plug_bio_merged(struct bio
*bio
)
511 static inline void blk_zone_write_plug_init_request(struct request
*rq
)
514 static inline void blk_zone_update_request_bio(struct request
*rq
,
518 static inline void blk_zone_bio_endio(struct bio
*bio
)
521 static inline void blk_zone_finish_request(struct request
*rq
)
524 static inline int blkdev_report_zones_ioctl(struct block_device
*bdev
,
525 unsigned int cmd
, unsigned long arg
)
529 static inline int blkdev_zone_mgmt_ioctl(struct block_device
*bdev
,
530 blk_mode_t mode
, unsigned int cmd
, unsigned long arg
)
534 #endif /* CONFIG_BLK_DEV_ZONED */
536 struct block_device
*bdev_alloc(struct gendisk
*disk
, u8 partno
);
537 void bdev_add(struct block_device
*bdev
, dev_t dev
);
538 void bdev_unhash(struct block_device
*bdev
);
539 void bdev_drop(struct block_device
*bdev
);
541 int blk_alloc_ext_minor(void);
542 void blk_free_ext_minor(unsigned int minor
);
543 #define ADDPART_FLAG_NONE 0
544 #define ADDPART_FLAG_RAID 1
545 #define ADDPART_FLAG_WHOLEDISK 2
546 #define ADDPART_FLAG_READONLY 4
547 int bdev_add_partition(struct gendisk
*disk
, int partno
, sector_t start
,
549 int bdev_del_partition(struct gendisk
*disk
, int partno
);
550 int bdev_resize_partition(struct gendisk
*disk
, int partno
, sector_t start
,
552 void drop_partition(struct block_device
*part
);
554 void bdev_set_nr_sectors(struct block_device
*bdev
, sector_t sectors
);
556 struct gendisk
*__alloc_disk_node(struct request_queue
*q
, int node_id
,
557 struct lock_class_key
*lkclass
);
559 int bio_add_hw_page(struct request_queue
*q
, struct bio
*bio
,
560 struct page
*page
, unsigned int len
, unsigned int offset
,
561 unsigned int max_sectors
, bool *same_page
);
563 int bio_add_hw_folio(struct request_queue
*q
, struct bio
*bio
,
564 struct folio
*folio
, size_t len
, size_t offset
,
565 unsigned int max_sectors
, bool *same_page
);
568 * Clean up a page appropriately, where the page may be pinned, may have a
569 * ref taken on it or neither.
571 static inline void bio_release_page(struct bio
*bio
, struct page
*page
)
573 if (bio_flagged(bio
, BIO_PAGE_PINNED
))
574 unpin_user_page(page
);
577 struct request_queue
*blk_alloc_queue(struct queue_limits
*lim
, int node_id
);
579 int disk_scan_partitions(struct gendisk
*disk
, blk_mode_t mode
);
581 int disk_alloc_events(struct gendisk
*disk
);
582 void disk_add_events(struct gendisk
*disk
);
583 void disk_del_events(struct gendisk
*disk
);
584 void disk_release_events(struct gendisk
*disk
);
585 void disk_block_events(struct gendisk
*disk
);
586 void disk_unblock_events(struct gendisk
*disk
);
587 void disk_flush_events(struct gendisk
*disk
, unsigned int mask
);
588 extern struct device_attribute dev_attr_events
;
589 extern struct device_attribute dev_attr_events_async
;
590 extern struct device_attribute dev_attr_events_poll_msecs
;
592 extern struct attribute_group blk_trace_attr_group
;
594 blk_mode_t
file_to_blk_mode(struct file
*file
);
595 int truncate_bdev_range(struct block_device
*bdev
, blk_mode_t mode
,
596 loff_t lstart
, loff_t lend
);
597 long blkdev_ioctl(struct file
*file
, unsigned cmd
, unsigned long arg
);
598 int blkdev_uring_cmd(struct io_uring_cmd
*cmd
, unsigned int issue_flags
);
599 long compat_blkdev_ioctl(struct file
*file
, unsigned cmd
, unsigned long arg
);
601 extern const struct address_space_operations def_blk_aops
;
603 int disk_register_independent_access_ranges(struct gendisk
*disk
);
604 void disk_unregister_independent_access_ranges(struct gendisk
*disk
);
606 #ifdef CONFIG_FAIL_MAKE_REQUEST
607 bool should_fail_request(struct block_device
*part
, unsigned int bytes
);
608 #else /* CONFIG_FAIL_MAKE_REQUEST */
609 static inline bool should_fail_request(struct block_device
*part
,
614 #endif /* CONFIG_FAIL_MAKE_REQUEST */
617 * Optimized request reference counting. Ideally we'd make timeouts be more
618 * clever, as that's the only reason we need references at all... But until
619 * this happens, this is faster than using refcount_t. Also see:
621 * abc54d634334 ("io_uring: switch to atomic_t for io_kiocb reference count")
623 #define req_ref_zero_or_close_to_overflow(req) \
624 ((unsigned int) atomic_read(&(req->ref)) + 127u <= 127u)
626 static inline bool req_ref_inc_not_zero(struct request
*req
)
628 return atomic_inc_not_zero(&req
->ref
);
631 static inline bool req_ref_put_and_test(struct request
*req
)
633 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req
));
634 return atomic_dec_and_test(&req
->ref
);
637 static inline void req_ref_set(struct request
*req
, int value
)
639 atomic_set(&req
->ref
, value
);
642 static inline int req_ref_read(struct request
*req
)
644 return atomic_read(&req
->ref
);
647 static inline u64
blk_time_get_ns(void)
649 struct blk_plug
*plug
= current
->plug
;
651 if (!plug
|| !in_task())
652 return ktime_get_ns();
655 * 0 could very well be a valid time, but rather than flag "this is
656 * a valid timestamp" separately, just accept that we'll do an extra
657 * ktime_get_ns() if we just happen to get 0 as the current time.
659 if (!plug
->cur_ktime
) {
660 plug
->cur_ktime
= ktime_get_ns();
661 current
->flags
|= PF_BLOCK_TS
;
663 return plug
->cur_ktime
;
666 static inline ktime_t
blk_time_get(void)
668 return ns_to_ktime(blk_time_get_ns());
672 * From most significant bit:
673 * 1 bit: reserved for other usage, see below
674 * 12 bits: original size of bio
675 * 51 bits: issue time of bio
677 #define BIO_ISSUE_RES_BITS 1
678 #define BIO_ISSUE_SIZE_BITS 12
679 #define BIO_ISSUE_RES_SHIFT (64 - BIO_ISSUE_RES_BITS)
680 #define BIO_ISSUE_SIZE_SHIFT (BIO_ISSUE_RES_SHIFT - BIO_ISSUE_SIZE_BITS)
681 #define BIO_ISSUE_TIME_MASK ((1ULL << BIO_ISSUE_SIZE_SHIFT) - 1)
682 #define BIO_ISSUE_SIZE_MASK \
683 (((1ULL << BIO_ISSUE_SIZE_BITS) - 1) << BIO_ISSUE_SIZE_SHIFT)
684 #define BIO_ISSUE_RES_MASK (~((1ULL << BIO_ISSUE_RES_SHIFT) - 1))
686 /* Reserved bit for blk-throtl */
687 #define BIO_ISSUE_THROTL_SKIP_LATENCY (1ULL << 63)
689 static inline u64
__bio_issue_time(u64 time
)
691 return time
& BIO_ISSUE_TIME_MASK
;
694 static inline u64
bio_issue_time(struct bio_issue
*issue
)
696 return __bio_issue_time(issue
->value
);
699 static inline sector_t
bio_issue_size(struct bio_issue
*issue
)
701 return ((issue
->value
& BIO_ISSUE_SIZE_MASK
) >> BIO_ISSUE_SIZE_SHIFT
);
704 static inline void bio_issue_init(struct bio_issue
*issue
,
707 size
&= (1ULL << BIO_ISSUE_SIZE_BITS
) - 1;
708 issue
->value
= ((issue
->value
& BIO_ISSUE_RES_MASK
) |
709 (blk_time_get_ns() & BIO_ISSUE_TIME_MASK
) |
710 ((u64
)size
<< BIO_ISSUE_SIZE_SHIFT
));
713 void bdev_release(struct file
*bdev_file
);
714 int bdev_open(struct block_device
*bdev
, blk_mode_t mode
, void *holder
,
715 const struct blk_holder_ops
*hops
, struct file
*bdev_file
);
716 int bdev_permission(dev_t dev
, blk_mode_t mode
, void *holder
);
718 void blk_integrity_generate(struct bio
*bio
);
719 void blk_integrity_verify(struct bio
*bio
);
720 void blk_integrity_prepare(struct request
*rq
);
721 void blk_integrity_complete(struct request
*rq
, unsigned int nr_bytes
);
723 static inline void blk_freeze_acquire_lock(struct request_queue
*q
, bool
724 disk_dead
, bool queue_dying
)
727 rwsem_acquire(&q
->io_lockdep_map
, 0, 1, _RET_IP_
);
729 rwsem_acquire(&q
->q_lockdep_map
, 0, 1, _RET_IP_
);
732 static inline void blk_unfreeze_release_lock(struct request_queue
*q
, bool
733 disk_dead
, bool queue_dying
)
736 rwsem_release(&q
->q_lockdep_map
, _RET_IP_
);
738 rwsem_release(&q
->io_lockdep_map
, _RET_IP_
);
741 #endif /* BLK_INTERNAL_H */