1 /* SPDX-License-Identifier: GPL-2.0 */
5 #include <linux/bio-integrity.h>
6 #include <linux/blk-crypto.h>
7 #include <linux/memblock.h> /* for max_pfn/max_low_pfn */
8 #include <linux/sched/sysctl.h>
9 #include <linux/timekeeping.h>
11 #include "blk-crypto-internal.h"
15 /* Max future timer expiry for timeouts */
16 #define BLK_MAX_TIMEOUT (5 * HZ)
18 extern struct dentry
*blk_debugfs_root
;
20 struct blk_flush_queue
{
21 spinlock_t mq_flush_lock
;
22 unsigned int flush_pending_idx
:1;
23 unsigned int flush_running_idx
:1;
24 blk_status_t rq_status
;
25 unsigned long flush_pending_since
;
26 struct list_head flush_queue
[2];
27 unsigned long flush_data_in_flight
;
28 struct request
*flush_rq
;
31 bool is_flush_rq(struct request
*req
);
33 struct blk_flush_queue
*blk_alloc_flush_queue(int node
, int cmd_size
,
35 void blk_free_flush_queue(struct blk_flush_queue
*q
);
37 void blk_freeze_queue(struct request_queue
*q
);
38 void __blk_mq_unfreeze_queue(struct request_queue
*q
, bool force_atomic
);
39 void blk_queue_start_drain(struct request_queue
*q
);
40 int __bio_queue_enter(struct request_queue
*q
, struct bio
*bio
);
41 void submit_bio_noacct_nocheck(struct bio
*bio
);
42 void bio_await_chain(struct bio
*bio
);
44 static inline bool blk_try_enter_queue(struct request_queue
*q
, bool pm
)
47 if (!percpu_ref_tryget_live_rcu(&q
->q_usage_counter
))
51 * The code that increments the pm_only counter must ensure that the
52 * counter is globally visible before the queue is unfrozen.
54 if (blk_queue_pm_only(q
) &&
55 (!pm
|| queue_rpm_status(q
) == RPM_SUSPENDED
))
68 static inline int bio_queue_enter(struct bio
*bio
)
70 struct request_queue
*q
= bdev_get_queue(bio
->bi_bdev
);
72 if (blk_try_enter_queue(q
, false))
74 return __bio_queue_enter(q
, bio
);
77 static inline void blk_wait_io(struct completion
*done
)
79 /* Prevent hang_check timer from firing at us during very long I/O */
80 unsigned long timeout
= sysctl_hung_task_timeout_secs
* HZ
/ 2;
83 while (!wait_for_completion_io_timeout(done
, timeout
))
86 wait_for_completion_io(done
);
89 #define BIO_INLINE_VECS 4
90 struct bio_vec
*bvec_alloc(mempool_t
*pool
, unsigned short *nr_vecs
,
92 void bvec_free(mempool_t
*pool
, struct bio_vec
*bv
, unsigned short nr_vecs
);
94 bool bvec_try_merge_hw_page(struct request_queue
*q
, struct bio_vec
*bv
,
95 struct page
*page
, unsigned len
, unsigned offset
,
98 static inline bool biovec_phys_mergeable(struct request_queue
*q
,
99 struct bio_vec
*vec1
, struct bio_vec
*vec2
)
101 unsigned long mask
= queue_segment_boundary(q
);
102 phys_addr_t addr1
= bvec_phys(vec1
);
103 phys_addr_t addr2
= bvec_phys(vec2
);
106 * Merging adjacent physical pages may not work correctly under KMSAN
107 * if their metadata pages aren't adjacent. Just disable merging.
109 if (IS_ENABLED(CONFIG_KMSAN
))
112 if (addr1
+ vec1
->bv_len
!= addr2
)
114 if (xen_domain() && !xen_biovec_phys_mergeable(vec1
, vec2
->bv_page
))
116 if ((addr1
| mask
) != ((addr2
+ vec2
->bv_len
- 1) | mask
))
121 static inline bool __bvec_gap_to_prev(const struct queue_limits
*lim
,
122 struct bio_vec
*bprv
, unsigned int offset
)
124 return (offset
& lim
->virt_boundary_mask
) ||
125 ((bprv
->bv_offset
+ bprv
->bv_len
) & lim
->virt_boundary_mask
);
129 * Check if adding a bio_vec after bprv with offset would create a gap in
130 * the SG list. Most drivers don't care about this, but some do.
132 static inline bool bvec_gap_to_prev(const struct queue_limits
*lim
,
133 struct bio_vec
*bprv
, unsigned int offset
)
135 if (!lim
->virt_boundary_mask
)
137 return __bvec_gap_to_prev(lim
, bprv
, offset
);
140 static inline bool rq_mergeable(struct request
*rq
)
142 if (blk_rq_is_passthrough(rq
))
145 if (req_op(rq
) == REQ_OP_FLUSH
)
148 if (req_op(rq
) == REQ_OP_WRITE_ZEROES
)
151 if (req_op(rq
) == REQ_OP_ZONE_APPEND
)
154 if (rq
->cmd_flags
& REQ_NOMERGE_FLAGS
)
156 if (rq
->rq_flags
& RQF_NOMERGE_FLAGS
)
163 * There are two different ways to handle DISCARD merges:
164 * 1) If max_discard_segments > 1, the driver treats every bio as a range and
165 * send the bios to controller together. The ranges don't need to be
167 * 2) Otherwise, the request will be normal read/write requests. The ranges
168 * need to be contiguous.
170 static inline bool blk_discard_mergable(struct request
*req
)
172 if (req_op(req
) == REQ_OP_DISCARD
&&
173 queue_max_discard_segments(req
->q
) > 1)
178 static inline unsigned int blk_rq_get_max_segments(struct request
*rq
)
180 if (req_op(rq
) == REQ_OP_DISCARD
)
181 return queue_max_discard_segments(rq
->q
);
182 return queue_max_segments(rq
->q
);
185 static inline unsigned int blk_queue_get_max_sectors(struct request
*rq
)
187 struct request_queue
*q
= rq
->q
;
188 enum req_op op
= req_op(rq
);
190 if (unlikely(op
== REQ_OP_DISCARD
|| op
== REQ_OP_SECURE_ERASE
))
191 return min(q
->limits
.max_discard_sectors
,
192 UINT_MAX
>> SECTOR_SHIFT
);
194 if (unlikely(op
== REQ_OP_WRITE_ZEROES
))
195 return q
->limits
.max_write_zeroes_sectors
;
197 if (rq
->cmd_flags
& REQ_ATOMIC
)
198 return q
->limits
.atomic_write_max_sectors
;
200 return q
->limits
.max_sectors
;
203 #ifdef CONFIG_BLK_DEV_INTEGRITY
204 void blk_flush_integrity(void);
205 void bio_integrity_free(struct bio
*bio
);
208 * Integrity payloads can either be owned by the submitter, in which case
209 * bio_uninit will free them, or owned and generated by the block layer,
210 * in which case we'll verify them here (for reads) and free them before
211 * the bio is handed back to the submitted.
213 bool __bio_integrity_endio(struct bio
*bio
);
214 static inline bool bio_integrity_endio(struct bio
*bio
)
216 struct bio_integrity_payload
*bip
= bio_integrity(bio
);
218 if (bip
&& (bip
->bip_flags
& BIP_BLOCK_INTEGRITY
))
219 return __bio_integrity_endio(bio
);
223 bool blk_integrity_merge_rq(struct request_queue
*, struct request
*,
225 bool blk_integrity_merge_bio(struct request_queue
*, struct request
*,
228 static inline bool integrity_req_gap_back_merge(struct request
*req
,
231 struct bio_integrity_payload
*bip
= bio_integrity(req
->bio
);
232 struct bio_integrity_payload
*bip_next
= bio_integrity(next
);
234 return bvec_gap_to_prev(&req
->q
->limits
,
235 &bip
->bip_vec
[bip
->bip_vcnt
- 1],
236 bip_next
->bip_vec
[0].bv_offset
);
239 static inline bool integrity_req_gap_front_merge(struct request
*req
,
242 struct bio_integrity_payload
*bip
= bio_integrity(bio
);
243 struct bio_integrity_payload
*bip_next
= bio_integrity(req
->bio
);
245 return bvec_gap_to_prev(&req
->q
->limits
,
246 &bip
->bip_vec
[bip
->bip_vcnt
- 1],
247 bip_next
->bip_vec
[0].bv_offset
);
250 extern const struct attribute_group blk_integrity_attr_group
;
251 #else /* CONFIG_BLK_DEV_INTEGRITY */
252 static inline bool blk_integrity_merge_rq(struct request_queue
*rq
,
253 struct request
*r1
, struct request
*r2
)
257 static inline bool blk_integrity_merge_bio(struct request_queue
*rq
,
258 struct request
*r
, struct bio
*b
)
262 static inline bool integrity_req_gap_back_merge(struct request
*req
,
267 static inline bool integrity_req_gap_front_merge(struct request
*req
,
273 static inline void blk_flush_integrity(void)
276 static inline bool bio_integrity_endio(struct bio
*bio
)
280 static inline void bio_integrity_free(struct bio
*bio
)
283 #endif /* CONFIG_BLK_DEV_INTEGRITY */
285 unsigned long blk_rq_timeout(unsigned long timeout
);
286 void blk_add_timer(struct request
*req
);
288 enum bio_merge_status
{
294 enum bio_merge_status
bio_attempt_back_merge(struct request
*req
,
295 struct bio
*bio
, unsigned int nr_segs
);
296 bool blk_attempt_plug_merge(struct request_queue
*q
, struct bio
*bio
,
297 unsigned int nr_segs
);
298 bool blk_bio_list_merge(struct request_queue
*q
, struct list_head
*list
,
299 struct bio
*bio
, unsigned int nr_segs
);
304 #define BLK_MAX_REQUEST_COUNT 32
305 #define BLK_PLUG_FLUSH_SIZE (128 * 1024)
308 * Internal elevator interface
310 #define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
312 bool blk_insert_flush(struct request
*rq
);
314 int elevator_switch(struct request_queue
*q
, struct elevator_type
*new_e
);
315 void elevator_disable(struct request_queue
*q
);
316 void elevator_exit(struct request_queue
*q
);
317 int elv_register_queue(struct request_queue
*q
, bool uevent
);
318 void elv_unregister_queue(struct request_queue
*q
);
320 ssize_t
part_size_show(struct device
*dev
, struct device_attribute
*attr
,
322 ssize_t
part_stat_show(struct device
*dev
, struct device_attribute
*attr
,
324 ssize_t
part_inflight_show(struct device
*dev
, struct device_attribute
*attr
,
326 ssize_t
part_fail_show(struct device
*dev
, struct device_attribute
*attr
,
328 ssize_t
part_fail_store(struct device
*dev
, struct device_attribute
*attr
,
329 const char *buf
, size_t count
);
330 ssize_t
part_timeout_show(struct device
*, struct device_attribute
*, char *);
331 ssize_t
part_timeout_store(struct device
*, struct device_attribute
*,
332 const char *, size_t);
334 struct bio
*bio_split_discard(struct bio
*bio
, const struct queue_limits
*lim
,
336 struct bio
*bio_split_write_zeroes(struct bio
*bio
,
337 const struct queue_limits
*lim
, unsigned *nsegs
);
338 struct bio
*bio_split_rw(struct bio
*bio
, const struct queue_limits
*lim
,
340 struct bio
*bio_split_zone_append(struct bio
*bio
,
341 const struct queue_limits
*lim
, unsigned *nr_segs
);
344 * All drivers must accept single-segments bios that are smaller than PAGE_SIZE.
346 * This is a quick and dirty check that relies on the fact that bi_io_vec[0] is
347 * always valid if a bio has data. The check might lead to occasional false
348 * positives when bios are cloned, but compared to the performance impact of
349 * cloned bios themselves the loop below doesn't matter anyway.
351 static inline bool bio_may_need_split(struct bio
*bio
,
352 const struct queue_limits
*lim
)
354 return lim
->chunk_sectors
|| bio
->bi_vcnt
!= 1 ||
355 bio
->bi_io_vec
->bv_len
+ bio
->bi_io_vec
->bv_offset
> PAGE_SIZE
;
359 * __bio_split_to_limits - split a bio to fit the queue limits
360 * @bio: bio to be split
361 * @lim: queue limits to split based on
362 * @nr_segs: returns the number of segments in the returned bio
364 * Check if @bio needs splitting based on the queue limits, and if so split off
365 * a bio fitting the limits from the beginning of @bio and return it. @bio is
366 * shortened to the remainder and re-submitted.
368 * The split bio is allocated from @q->bio_split, which is provided by the
371 static inline struct bio
*__bio_split_to_limits(struct bio
*bio
,
372 const struct queue_limits
*lim
, unsigned int *nr_segs
)
374 switch (bio_op(bio
)) {
377 if (bio_may_need_split(bio
, lim
))
378 return bio_split_rw(bio
, lim
, nr_segs
);
381 case REQ_OP_ZONE_APPEND
:
382 return bio_split_zone_append(bio
, lim
, nr_segs
);
384 case REQ_OP_SECURE_ERASE
:
385 return bio_split_discard(bio
, lim
, nr_segs
);
386 case REQ_OP_WRITE_ZEROES
:
387 return bio_split_write_zeroes(bio
, lim
, nr_segs
);
389 /* other operations can't be split */
395 int ll_back_merge_fn(struct request
*req
, struct bio
*bio
,
396 unsigned int nr_segs
);
397 bool blk_attempt_req_merge(struct request_queue
*q
, struct request
*rq
,
398 struct request
*next
);
399 unsigned int blk_recalc_rq_segments(struct request
*rq
);
400 bool blk_rq_merge_ok(struct request
*rq
, struct bio
*bio
);
401 enum elv_merge
blk_try_merge(struct request
*rq
, struct bio
*bio
);
403 int blk_set_default_limits(struct queue_limits
*lim
);
404 void blk_apply_bdi_limits(struct backing_dev_info
*bdi
,
405 struct queue_limits
*lim
);
406 int blk_dev_init(void);
409 * Contribute to IO statistics IFF:
411 * a) it's attached to a gendisk, and
412 * b) the queue had IO stats enabled when this request was started
414 static inline bool blk_do_io_stat(struct request
*rq
)
416 return (rq
->rq_flags
& RQF_IO_STAT
) && !blk_rq_is_passthrough(rq
);
419 void update_io_ticks(struct block_device
*part
, unsigned long now
, bool end
);
420 unsigned int part_in_flight(struct block_device
*part
);
422 static inline void req_set_nomerge(struct request_queue
*q
, struct request
*req
)
424 req
->cmd_flags
|= REQ_NOMERGE
;
425 if (req
== q
->last_merge
)
426 q
->last_merge
= NULL
;
430 * Internal io_context interface
432 struct io_cq
*ioc_find_get_icq(struct request_queue
*q
);
433 struct io_cq
*ioc_lookup_icq(struct request_queue
*q
);
434 #ifdef CONFIG_BLK_ICQ
435 void ioc_clear_queue(struct request_queue
*q
);
437 static inline void ioc_clear_queue(struct request_queue
*q
)
440 #endif /* CONFIG_BLK_ICQ */
442 struct bio
*__blk_queue_bounce(struct bio
*bio
, struct request_queue
*q
);
444 static inline bool blk_queue_may_bounce(struct request_queue
*q
)
446 return IS_ENABLED(CONFIG_BOUNCE
) &&
447 (q
->limits
.features
& BLK_FEAT_BOUNCE_HIGH
) &&
448 max_low_pfn
>= max_pfn
;
451 static inline struct bio
*blk_queue_bounce(struct bio
*bio
,
452 struct request_queue
*q
)
454 if (unlikely(blk_queue_may_bounce(q
) && bio_has_data(bio
)))
455 return __blk_queue_bounce(bio
, q
);
459 #ifdef CONFIG_BLK_DEV_ZONED
460 void disk_init_zone_resources(struct gendisk
*disk
);
461 void disk_free_zone_resources(struct gendisk
*disk
);
462 static inline bool bio_zone_write_plugging(struct bio
*bio
)
464 return bio_flagged(bio
, BIO_ZONE_WRITE_PLUGGING
);
466 static inline bool bio_is_zone_append(struct bio
*bio
)
468 return bio_op(bio
) == REQ_OP_ZONE_APPEND
||
469 bio_flagged(bio
, BIO_EMULATES_ZONE_APPEND
);
471 void blk_zone_write_plug_bio_merged(struct bio
*bio
);
472 void blk_zone_write_plug_init_request(struct request
*rq
);
473 static inline void blk_zone_update_request_bio(struct request
*rq
,
477 * For zone append requests, the request sector indicates the location
478 * at which the BIO data was written. Return this value to the BIO
479 * issuer through the BIO iter sector.
480 * For plugged zone writes, which include emulated zone append, we need
481 * the original BIO sector so that blk_zone_write_plug_bio_endio() can
482 * lookup the zone write plug.
484 if (req_op(rq
) == REQ_OP_ZONE_APPEND
|| bio_zone_write_plugging(bio
))
485 bio
->bi_iter
.bi_sector
= rq
->__sector
;
487 void blk_zone_write_plug_bio_endio(struct bio
*bio
);
488 static inline void blk_zone_bio_endio(struct bio
*bio
)
491 * For write BIOs to zoned devices, signal the completion of the BIO so
492 * that the next write BIO can be submitted by zone write plugging.
494 if (bio_zone_write_plugging(bio
))
495 blk_zone_write_plug_bio_endio(bio
);
498 void blk_zone_write_plug_finish_request(struct request
*rq
);
499 static inline void blk_zone_finish_request(struct request
*rq
)
501 if (rq
->rq_flags
& RQF_ZONE_WRITE_PLUGGING
)
502 blk_zone_write_plug_finish_request(rq
);
504 int blkdev_report_zones_ioctl(struct block_device
*bdev
, unsigned int cmd
,
506 int blkdev_zone_mgmt_ioctl(struct block_device
*bdev
, blk_mode_t mode
,
507 unsigned int cmd
, unsigned long arg
);
508 #else /* CONFIG_BLK_DEV_ZONED */
509 static inline void disk_init_zone_resources(struct gendisk
*disk
)
512 static inline void disk_free_zone_resources(struct gendisk
*disk
)
515 static inline bool bio_zone_write_plugging(struct bio
*bio
)
519 static inline bool bio_is_zone_append(struct bio
*bio
)
523 static inline void blk_zone_write_plug_bio_merged(struct bio
*bio
)
526 static inline void blk_zone_write_plug_init_request(struct request
*rq
)
529 static inline void blk_zone_update_request_bio(struct request
*rq
,
533 static inline void blk_zone_bio_endio(struct bio
*bio
)
536 static inline void blk_zone_finish_request(struct request
*rq
)
539 static inline int blkdev_report_zones_ioctl(struct block_device
*bdev
,
540 unsigned int cmd
, unsigned long arg
)
544 static inline int blkdev_zone_mgmt_ioctl(struct block_device
*bdev
,
545 blk_mode_t mode
, unsigned int cmd
, unsigned long arg
)
549 #endif /* CONFIG_BLK_DEV_ZONED */
551 struct block_device
*bdev_alloc(struct gendisk
*disk
, u8 partno
);
552 void bdev_add(struct block_device
*bdev
, dev_t dev
);
553 void bdev_unhash(struct block_device
*bdev
);
554 void bdev_drop(struct block_device
*bdev
);
556 int blk_alloc_ext_minor(void);
557 void blk_free_ext_minor(unsigned int minor
);
558 #define ADDPART_FLAG_NONE 0
559 #define ADDPART_FLAG_RAID 1
560 #define ADDPART_FLAG_WHOLEDISK 2
561 int bdev_add_partition(struct gendisk
*disk
, int partno
, sector_t start
,
563 int bdev_del_partition(struct gendisk
*disk
, int partno
);
564 int bdev_resize_partition(struct gendisk
*disk
, int partno
, sector_t start
,
566 void drop_partition(struct block_device
*part
);
568 void bdev_set_nr_sectors(struct block_device
*bdev
, sector_t sectors
);
570 struct gendisk
*__alloc_disk_node(struct request_queue
*q
, int node_id
,
571 struct lock_class_key
*lkclass
);
573 int bio_add_hw_page(struct request_queue
*q
, struct bio
*bio
,
574 struct page
*page
, unsigned int len
, unsigned int offset
,
575 unsigned int max_sectors
, bool *same_page
);
577 int bio_add_hw_folio(struct request_queue
*q
, struct bio
*bio
,
578 struct folio
*folio
, size_t len
, size_t offset
,
579 unsigned int max_sectors
, bool *same_page
);
582 * Clean up a page appropriately, where the page may be pinned, may have a
583 * ref taken on it or neither.
585 static inline void bio_release_page(struct bio
*bio
, struct page
*page
)
587 if (bio_flagged(bio
, BIO_PAGE_PINNED
))
588 unpin_user_page(page
);
591 struct request_queue
*blk_alloc_queue(struct queue_limits
*lim
, int node_id
);
593 int disk_scan_partitions(struct gendisk
*disk
, blk_mode_t mode
);
595 int disk_alloc_events(struct gendisk
*disk
);
596 void disk_add_events(struct gendisk
*disk
);
597 void disk_del_events(struct gendisk
*disk
);
598 void disk_release_events(struct gendisk
*disk
);
599 void disk_block_events(struct gendisk
*disk
);
600 void disk_unblock_events(struct gendisk
*disk
);
601 void disk_flush_events(struct gendisk
*disk
, unsigned int mask
);
602 extern struct device_attribute dev_attr_events
;
603 extern struct device_attribute dev_attr_events_async
;
604 extern struct device_attribute dev_attr_events_poll_msecs
;
606 extern struct attribute_group blk_trace_attr_group
;
608 blk_mode_t
file_to_blk_mode(struct file
*file
);
609 int truncate_bdev_range(struct block_device
*bdev
, blk_mode_t mode
,
610 loff_t lstart
, loff_t lend
);
611 long blkdev_ioctl(struct file
*file
, unsigned cmd
, unsigned long arg
);
612 int blkdev_uring_cmd(struct io_uring_cmd
*cmd
, unsigned int issue_flags
);
613 long compat_blkdev_ioctl(struct file
*file
, unsigned cmd
, unsigned long arg
);
615 extern const struct address_space_operations def_blk_aops
;
617 int disk_register_independent_access_ranges(struct gendisk
*disk
);
618 void disk_unregister_independent_access_ranges(struct gendisk
*disk
);
620 #ifdef CONFIG_FAIL_MAKE_REQUEST
621 bool should_fail_request(struct block_device
*part
, unsigned int bytes
);
622 #else /* CONFIG_FAIL_MAKE_REQUEST */
623 static inline bool should_fail_request(struct block_device
*part
,
628 #endif /* CONFIG_FAIL_MAKE_REQUEST */
631 * Optimized request reference counting. Ideally we'd make timeouts be more
632 * clever, as that's the only reason we need references at all... But until
633 * this happens, this is faster than using refcount_t. Also see:
635 * abc54d634334 ("io_uring: switch to atomic_t for io_kiocb reference count")
637 #define req_ref_zero_or_close_to_overflow(req) \
638 ((unsigned int) atomic_read(&(req->ref)) + 127u <= 127u)
640 static inline bool req_ref_inc_not_zero(struct request
*req
)
642 return atomic_inc_not_zero(&req
->ref
);
645 static inline bool req_ref_put_and_test(struct request
*req
)
647 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req
));
648 return atomic_dec_and_test(&req
->ref
);
651 static inline void req_ref_set(struct request
*req
, int value
)
653 atomic_set(&req
->ref
, value
);
656 static inline int req_ref_read(struct request
*req
)
658 return atomic_read(&req
->ref
);
661 static inline u64
blk_time_get_ns(void)
663 struct blk_plug
*plug
= current
->plug
;
665 if (!plug
|| !in_task())
666 return ktime_get_ns();
669 * 0 could very well be a valid time, but rather than flag "this is
670 * a valid timestamp" separately, just accept that we'll do an extra
671 * ktime_get_ns() if we just happen to get 0 as the current time.
673 if (!plug
->cur_ktime
) {
674 plug
->cur_ktime
= ktime_get_ns();
675 current
->flags
|= PF_BLOCK_TS
;
677 return plug
->cur_ktime
;
680 static inline ktime_t
blk_time_get(void)
682 return ns_to_ktime(blk_time_get_ns());
686 * From most significant bit:
687 * 1 bit: reserved for other usage, see below
688 * 12 bits: original size of bio
689 * 51 bits: issue time of bio
691 #define BIO_ISSUE_RES_BITS 1
692 #define BIO_ISSUE_SIZE_BITS 12
693 #define BIO_ISSUE_RES_SHIFT (64 - BIO_ISSUE_RES_BITS)
694 #define BIO_ISSUE_SIZE_SHIFT (BIO_ISSUE_RES_SHIFT - BIO_ISSUE_SIZE_BITS)
695 #define BIO_ISSUE_TIME_MASK ((1ULL << BIO_ISSUE_SIZE_SHIFT) - 1)
696 #define BIO_ISSUE_SIZE_MASK \
697 (((1ULL << BIO_ISSUE_SIZE_BITS) - 1) << BIO_ISSUE_SIZE_SHIFT)
698 #define BIO_ISSUE_RES_MASK (~((1ULL << BIO_ISSUE_RES_SHIFT) - 1))
700 /* Reserved bit for blk-throtl */
701 #define BIO_ISSUE_THROTL_SKIP_LATENCY (1ULL << 63)
703 static inline u64
__bio_issue_time(u64 time
)
705 return time
& BIO_ISSUE_TIME_MASK
;
708 static inline u64
bio_issue_time(struct bio_issue
*issue
)
710 return __bio_issue_time(issue
->value
);
713 static inline sector_t
bio_issue_size(struct bio_issue
*issue
)
715 return ((issue
->value
& BIO_ISSUE_SIZE_MASK
) >> BIO_ISSUE_SIZE_SHIFT
);
718 static inline void bio_issue_init(struct bio_issue
*issue
,
721 size
&= (1ULL << BIO_ISSUE_SIZE_BITS
) - 1;
722 issue
->value
= ((issue
->value
& BIO_ISSUE_RES_MASK
) |
723 (blk_time_get_ns() & BIO_ISSUE_TIME_MASK
) |
724 ((u64
)size
<< BIO_ISSUE_SIZE_SHIFT
));
727 void bdev_release(struct file
*bdev_file
);
728 int bdev_open(struct block_device
*bdev
, blk_mode_t mode
, void *holder
,
729 const struct blk_holder_ops
*hops
, struct file
*bdev_file
);
730 int bdev_permission(dev_t dev
, blk_mode_t mode
, void *holder
);
732 void blk_integrity_generate(struct bio
*bio
);
733 void blk_integrity_verify(struct bio
*bio
);
734 void blk_integrity_prepare(struct request
*rq
);
735 void blk_integrity_complete(struct request
*rq
, unsigned int nr_bytes
);
737 #endif /* BLK_INTERNAL_H */