1 // SPDX-License-Identifier: GPL-2.0
3 * Functions related to setting various queue properties from drivers
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/init.h>
9 #include <linux/blk-integrity.h>
10 #include <linux/pagemap.h>
11 #include <linux/backing-dev-defs.h>
12 #include <linux/gcd.h>
13 #include <linux/lcm.h>
14 #include <linux/jiffies.h>
15 #include <linux/gfp.h>
16 #include <linux/dma-mapping.h>
19 #include "blk-rq-qos.h"
22 void blk_queue_rq_timeout(struct request_queue
*q
, unsigned int timeout
)
24 q
->rq_timeout
= timeout
;
26 EXPORT_SYMBOL_GPL(blk_queue_rq_timeout
);
29 * blk_set_stacking_limits - set default limits for stacking devices
30 * @lim: the queue_limits structure to reset
32 * Prepare queue limits for applying limits from underlying devices using
35 void blk_set_stacking_limits(struct queue_limits
*lim
)
37 memset(lim
, 0, sizeof(*lim
));
38 lim
->logical_block_size
= SECTOR_SIZE
;
39 lim
->physical_block_size
= SECTOR_SIZE
;
40 lim
->io_min
= SECTOR_SIZE
;
41 lim
->discard_granularity
= SECTOR_SIZE
;
42 lim
->dma_alignment
= SECTOR_SIZE
- 1;
43 lim
->seg_boundary_mask
= BLK_SEG_BOUNDARY_MASK
;
45 /* Inherit limits from component devices */
46 lim
->max_segments
= USHRT_MAX
;
47 lim
->max_discard_segments
= USHRT_MAX
;
48 lim
->max_hw_sectors
= UINT_MAX
;
49 lim
->max_segment_size
= UINT_MAX
;
50 lim
->max_sectors
= UINT_MAX
;
51 lim
->max_dev_sectors
= UINT_MAX
;
52 lim
->max_write_zeroes_sectors
= UINT_MAX
;
53 lim
->max_hw_zone_append_sectors
= UINT_MAX
;
54 lim
->max_user_discard_sectors
= UINT_MAX
;
56 EXPORT_SYMBOL(blk_set_stacking_limits
);
58 void blk_apply_bdi_limits(struct backing_dev_info
*bdi
,
59 struct queue_limits
*lim
)
62 * For read-ahead of large files to be effective, we need to read ahead
63 * at least twice the optimal I/O size.
65 bdi
->ra_pages
= max(lim
->io_opt
* 2 / PAGE_SIZE
, VM_READAHEAD_PAGES
);
66 bdi
->io_pages
= lim
->max_sectors
>> PAGE_SECTORS_SHIFT
;
69 static int blk_validate_zoned_limits(struct queue_limits
*lim
)
71 if (!(lim
->features
& BLK_FEAT_ZONED
)) {
72 if (WARN_ON_ONCE(lim
->max_open_zones
) ||
73 WARN_ON_ONCE(lim
->max_active_zones
) ||
74 WARN_ON_ONCE(lim
->zone_write_granularity
) ||
75 WARN_ON_ONCE(lim
->max_zone_append_sectors
))
80 if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED
)))
84 * Given that active zones include open zones, the maximum number of
85 * open zones cannot be larger than the maximum number of active zones.
87 if (lim
->max_active_zones
&&
88 lim
->max_open_zones
> lim
->max_active_zones
)
91 if (lim
->zone_write_granularity
< lim
->logical_block_size
)
92 lim
->zone_write_granularity
= lim
->logical_block_size
;
95 * The Zone Append size is limited by the maximum I/O size and the zone
96 * size given that it can't span zones.
98 * If no max_hw_zone_append_sectors limit is provided, the block layer
99 * will emulated it, else we're also bound by the hardware limit.
101 lim
->max_zone_append_sectors
=
102 min_not_zero(lim
->max_hw_zone_append_sectors
,
103 min(lim
->chunk_sectors
, lim
->max_hw_sectors
));
107 static int blk_validate_integrity_limits(struct queue_limits
*lim
)
109 struct blk_integrity
*bi
= &lim
->integrity
;
111 if (!bi
->tuple_size
) {
112 if (bi
->csum_type
!= BLK_INTEGRITY_CSUM_NONE
||
113 bi
->tag_size
|| ((bi
->flags
& BLK_INTEGRITY_REF_TAG
))) {
114 pr_warn("invalid PI settings.\n");
120 if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY
)) {
121 pr_warn("integrity support disabled.\n");
125 if (bi
->csum_type
== BLK_INTEGRITY_CSUM_NONE
&&
126 (bi
->flags
& BLK_INTEGRITY_REF_TAG
)) {
127 pr_warn("ref tag not support without checksum.\n");
131 if (!bi
->interval_exp
)
132 bi
->interval_exp
= ilog2(lim
->logical_block_size
);
138 * Returns max guaranteed bytes which we can fit in a bio.
140 * We request that an atomic_write is ITER_UBUF iov_iter (so a single vector),
141 * so we assume that we can fit in at least PAGE_SIZE in a segment, apart from
142 * the first and last segments.
144 static unsigned int blk_queue_max_guaranteed_bio(struct queue_limits
*lim
)
146 unsigned int max_segments
= min(BIO_MAX_VECS
, lim
->max_segments
);
149 length
= min(max_segments
, 2) * lim
->logical_block_size
;
150 if (max_segments
> 2)
151 length
+= (max_segments
- 2) * PAGE_SIZE
;
156 static void blk_atomic_writes_update_limits(struct queue_limits
*lim
)
158 unsigned int unit_limit
= min(lim
->max_hw_sectors
<< SECTOR_SHIFT
,
159 blk_queue_max_guaranteed_bio(lim
));
161 unit_limit
= rounddown_pow_of_two(unit_limit
);
163 lim
->atomic_write_max_sectors
=
164 min(lim
->atomic_write_hw_max
>> SECTOR_SHIFT
,
165 lim
->max_hw_sectors
);
166 lim
->atomic_write_unit_min
=
167 min(lim
->atomic_write_hw_unit_min
, unit_limit
);
168 lim
->atomic_write_unit_max
=
169 min(lim
->atomic_write_hw_unit_max
, unit_limit
);
170 lim
->atomic_write_boundary_sectors
=
171 lim
->atomic_write_hw_boundary
>> SECTOR_SHIFT
;
174 static void blk_validate_atomic_write_limits(struct queue_limits
*lim
)
176 unsigned int boundary_sectors
;
178 if (!lim
->atomic_write_hw_max
)
181 boundary_sectors
= lim
->atomic_write_hw_boundary
>> SECTOR_SHIFT
;
183 if (boundary_sectors
) {
185 * A feature of boundary support is that it disallows bios to
186 * be merged which would result in a merged request which
187 * crosses either a chunk sector or atomic write HW boundary,
188 * even though chunk sectors may be just set for performance.
189 * For simplicity, disallow atomic writes for a chunk sector
190 * which is non-zero and smaller than atomic write HW boundary.
191 * Furthermore, chunk sectors must be a multiple of atomic
192 * write HW boundary. Otherwise boundary support becomes
194 * Devices which do not conform to these rules can be dealt
195 * with if and when they show up.
197 if (WARN_ON_ONCE(lim
->chunk_sectors
% boundary_sectors
))
201 * The boundary size just needs to be a multiple of unit_max
202 * (and not necessarily a power-of-2), so this following check
203 * could be relaxed in future.
204 * Furthermore, if needed, unit_max could even be reduced so
205 * that it is compliant with a !power-of-2 boundary.
207 if (!is_power_of_2(boundary_sectors
))
211 blk_atomic_writes_update_limits(lim
);
215 lim
->atomic_write_max_sectors
= 0;
216 lim
->atomic_write_boundary_sectors
= 0;
217 lim
->atomic_write_unit_min
= 0;
218 lim
->atomic_write_unit_max
= 0;
222 * Check that the limits in lim are valid, initialize defaults for unset
223 * values, and cap values based on others where needed.
225 int blk_validate_limits(struct queue_limits
*lim
)
227 unsigned int max_hw_sectors
;
228 unsigned int logical_block_sectors
;
232 * Unless otherwise specified, default to 512 byte logical blocks and a
233 * physical block size equal to the logical block size.
235 if (!lim
->logical_block_size
)
236 lim
->logical_block_size
= SECTOR_SIZE
;
237 else if (blk_validate_block_size(lim
->logical_block_size
)) {
238 pr_warn("Invalid logical block size (%d)\n", lim
->logical_block_size
);
241 if (lim
->physical_block_size
< lim
->logical_block_size
)
242 lim
->physical_block_size
= lim
->logical_block_size
;
245 * The minimum I/O size defaults to the physical block size unless
246 * explicitly overridden.
248 if (lim
->io_min
< lim
->physical_block_size
)
249 lim
->io_min
= lim
->physical_block_size
;
252 * max_hw_sectors has a somewhat weird default for historical reason,
253 * but driver really should set their own instead of relying on this
256 * The block layer relies on the fact that every driver can
257 * handle at lest a page worth of data per I/O, and needs the value
258 * aligned to the logical block size.
260 if (!lim
->max_hw_sectors
)
261 lim
->max_hw_sectors
= BLK_SAFE_MAX_SECTORS
;
262 if (WARN_ON_ONCE(lim
->max_hw_sectors
< PAGE_SECTORS
))
264 logical_block_sectors
= lim
->logical_block_size
>> SECTOR_SHIFT
;
265 if (WARN_ON_ONCE(logical_block_sectors
> lim
->max_hw_sectors
))
267 lim
->max_hw_sectors
= round_down(lim
->max_hw_sectors
,
268 logical_block_sectors
);
271 * The actual max_sectors value is a complex beast and also takes the
272 * max_dev_sectors value (set by SCSI ULPs) and a user configurable
273 * value into account. The ->max_sectors value is always calculated
274 * from these, so directly setting it won't have any effect.
276 max_hw_sectors
= min_not_zero(lim
->max_hw_sectors
,
277 lim
->max_dev_sectors
);
278 if (lim
->max_user_sectors
) {
279 if (lim
->max_user_sectors
< PAGE_SIZE
/ SECTOR_SIZE
)
281 lim
->max_sectors
= min(max_hw_sectors
, lim
->max_user_sectors
);
282 } else if (lim
->io_opt
> (BLK_DEF_MAX_SECTORS_CAP
<< SECTOR_SHIFT
)) {
284 min(max_hw_sectors
, lim
->io_opt
>> SECTOR_SHIFT
);
285 } else if (lim
->io_min
> (BLK_DEF_MAX_SECTORS_CAP
<< SECTOR_SHIFT
)) {
287 min(max_hw_sectors
, lim
->io_min
>> SECTOR_SHIFT
);
289 lim
->max_sectors
= min(max_hw_sectors
, BLK_DEF_MAX_SECTORS_CAP
);
291 lim
->max_sectors
= round_down(lim
->max_sectors
,
292 logical_block_sectors
);
295 * Random default for the maximum number of segments. Driver should not
296 * rely on this and set their own.
298 if (!lim
->max_segments
)
299 lim
->max_segments
= BLK_MAX_SEGMENTS
;
301 lim
->max_discard_sectors
=
302 min(lim
->max_hw_discard_sectors
, lim
->max_user_discard_sectors
);
304 if (!lim
->max_discard_segments
)
305 lim
->max_discard_segments
= 1;
307 if (lim
->discard_granularity
< lim
->physical_block_size
)
308 lim
->discard_granularity
= lim
->physical_block_size
;
311 * By default there is no limit on the segment boundary alignment,
312 * but if there is one it can't be smaller than the page size as
313 * that would break all the normal I/O patterns.
315 if (!lim
->seg_boundary_mask
)
316 lim
->seg_boundary_mask
= BLK_SEG_BOUNDARY_MASK
;
317 if (WARN_ON_ONCE(lim
->seg_boundary_mask
< PAGE_SIZE
- 1))
321 * Stacking device may have both virtual boundary and max segment
322 * size limit, so allow this setting now, and long-term the two
323 * might need to move out of stacking limits since we have immutable
324 * bvec and lower layer bio splitting is supposed to handle the two
327 if (lim
->virt_boundary_mask
) {
328 if (!lim
->max_segment_size
)
329 lim
->max_segment_size
= UINT_MAX
;
332 * The maximum segment size has an odd historic 64k default that
333 * drivers probably should override. Just like the I/O size we
334 * require drivers to at least handle a full page per segment.
336 if (!lim
->max_segment_size
)
337 lim
->max_segment_size
= BLK_MAX_SEGMENT_SIZE
;
338 if (WARN_ON_ONCE(lim
->max_segment_size
< PAGE_SIZE
))
343 * We require drivers to at least do logical block aligned I/O, but
344 * historically could not check for that due to the separate calls
345 * to set the limits. Once the transition is finished the check
346 * below should be narrowed down to check the logical block size.
348 if (!lim
->dma_alignment
)
349 lim
->dma_alignment
= SECTOR_SIZE
- 1;
350 if (WARN_ON_ONCE(lim
->dma_alignment
> PAGE_SIZE
))
353 if (lim
->alignment_offset
) {
354 lim
->alignment_offset
&= (lim
->physical_block_size
- 1);
355 lim
->flags
&= ~BLK_FLAG_MISALIGNED
;
358 if (!(lim
->features
& BLK_FEAT_WRITE_CACHE
))
359 lim
->features
&= ~BLK_FEAT_FUA
;
361 blk_validate_atomic_write_limits(lim
);
363 err
= blk_validate_integrity_limits(lim
);
366 return blk_validate_zoned_limits(lim
);
368 EXPORT_SYMBOL_GPL(blk_validate_limits
);
371 * Set the default limits for a newly allocated queue. @lim contains the
372 * initial limits set by the driver, which could be no limit in which case
373 * all fields are cleared to zero.
375 int blk_set_default_limits(struct queue_limits
*lim
)
378 * Most defaults are set by capping the bounds in blk_validate_limits,
379 * but max_user_discard_sectors is special and needs an explicit
380 * initialization to the max value here.
382 lim
->max_user_discard_sectors
= UINT_MAX
;
383 return blk_validate_limits(lim
);
387 * queue_limits_commit_update - commit an atomic update of queue limits
388 * @q: queue to update
389 * @lim: limits to apply
391 * Apply the limits in @lim that were obtained from queue_limits_start_update()
392 * and updated by the caller to @q.
394 * Returns 0 if successful, else a negative error code.
396 int queue_limits_commit_update(struct request_queue
*q
,
397 struct queue_limits
*lim
)
401 error
= blk_validate_limits(lim
);
405 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
406 if (q
->crypto_profile
&& lim
->integrity
.tag_size
) {
407 pr_warn("blk-integrity: Integrity and hardware inline encryption are not supported together.\n");
415 blk_apply_bdi_limits(q
->disk
->bdi
, lim
);
417 mutex_unlock(&q
->limits_lock
);
420 EXPORT_SYMBOL_GPL(queue_limits_commit_update
);
423 * queue_limits_set - apply queue limits to queue
424 * @q: queue to update
425 * @lim: limits to apply
427 * Apply the limits in @lim that were freshly initialized to @q.
428 * To update existing limits use queue_limits_start_update() and
429 * queue_limits_commit_update() instead.
431 * Returns 0 if successful, else a negative error code.
433 int queue_limits_set(struct request_queue
*q
, struct queue_limits
*lim
)
435 mutex_lock(&q
->limits_lock
);
436 return queue_limits_commit_update(q
, lim
);
438 EXPORT_SYMBOL_GPL(queue_limits_set
);
440 static int queue_limit_alignment_offset(const struct queue_limits
*lim
,
443 unsigned int granularity
= max(lim
->physical_block_size
, lim
->io_min
);
444 unsigned int alignment
= sector_div(sector
, granularity
>> SECTOR_SHIFT
)
447 return (granularity
+ lim
->alignment_offset
- alignment
) % granularity
;
450 static unsigned int queue_limit_discard_alignment(
451 const struct queue_limits
*lim
, sector_t sector
)
453 unsigned int alignment
, granularity
, offset
;
455 if (!lim
->max_discard_sectors
)
458 /* Why are these in bytes, not sectors? */
459 alignment
= lim
->discard_alignment
>> SECTOR_SHIFT
;
460 granularity
= lim
->discard_granularity
>> SECTOR_SHIFT
;
464 /* Offset of the partition start in 'granularity' sectors */
465 offset
= sector_div(sector
, granularity
);
467 /* And why do we do this modulus *again* in blkdev_issue_discard()? */
468 offset
= (granularity
+ alignment
- offset
) % granularity
;
470 /* Turn it back into bytes, gaah */
471 return offset
<< SECTOR_SHIFT
;
474 static unsigned int blk_round_down_sectors(unsigned int sectors
, unsigned int lbs
)
476 sectors
= round_down(sectors
, lbs
>> SECTOR_SHIFT
);
477 if (sectors
< PAGE_SIZE
>> SECTOR_SHIFT
)
478 sectors
= PAGE_SIZE
>> SECTOR_SHIFT
;
483 * blk_stack_limits - adjust queue_limits for stacked devices
484 * @t: the stacking driver limits (top device)
485 * @b: the underlying queue limits (bottom, component device)
486 * @start: first data sector within component device
489 * This function is used by stacking drivers like MD and DM to ensure
490 * that all component devices have compatible block sizes and
491 * alignments. The stacking driver must provide a queue_limits
492 * struct (top) and then iteratively call the stacking function for
493 * all component (bottom) devices. The stacking function will
494 * attempt to combine the values and ensure proper alignment.
496 * Returns 0 if the top and bottom queue_limits are compatible. The
497 * top device's block sizes and alignment offsets may be adjusted to
498 * ensure alignment with the bottom device. If no compatible sizes
499 * and alignments exist, -1 is returned and the resulting top
500 * queue_limits will have the misaligned flag set to indicate that
501 * the alignment_offset is undefined.
503 int blk_stack_limits(struct queue_limits
*t
, struct queue_limits
*b
,
506 unsigned int top
, bottom
, alignment
, ret
= 0;
508 t
->features
|= (b
->features
& BLK_FEAT_INHERIT_MASK
);
511 * Some feaures need to be supported both by the stacking driver and all
512 * underlying devices. The stacking driver sets these flags before
513 * stacking the limits, and this will clear the flags if any of the
514 * underlying devices does not support it.
516 if (!(b
->features
& BLK_FEAT_NOWAIT
))
517 t
->features
&= ~BLK_FEAT_NOWAIT
;
518 if (!(b
->features
& BLK_FEAT_POLL
))
519 t
->features
&= ~BLK_FEAT_POLL
;
521 t
->flags
|= (b
->flags
& BLK_FLAG_MISALIGNED
);
523 t
->max_sectors
= min_not_zero(t
->max_sectors
, b
->max_sectors
);
524 t
->max_user_sectors
= min_not_zero(t
->max_user_sectors
,
525 b
->max_user_sectors
);
526 t
->max_hw_sectors
= min_not_zero(t
->max_hw_sectors
, b
->max_hw_sectors
);
527 t
->max_dev_sectors
= min_not_zero(t
->max_dev_sectors
, b
->max_dev_sectors
);
528 t
->max_write_zeroes_sectors
= min(t
->max_write_zeroes_sectors
,
529 b
->max_write_zeroes_sectors
);
530 t
->max_hw_zone_append_sectors
= min(t
->max_hw_zone_append_sectors
,
531 b
->max_hw_zone_append_sectors
);
533 t
->seg_boundary_mask
= min_not_zero(t
->seg_boundary_mask
,
534 b
->seg_boundary_mask
);
535 t
->virt_boundary_mask
= min_not_zero(t
->virt_boundary_mask
,
536 b
->virt_boundary_mask
);
538 t
->max_segments
= min_not_zero(t
->max_segments
, b
->max_segments
);
539 t
->max_discard_segments
= min_not_zero(t
->max_discard_segments
,
540 b
->max_discard_segments
);
541 t
->max_integrity_segments
= min_not_zero(t
->max_integrity_segments
,
542 b
->max_integrity_segments
);
544 t
->max_segment_size
= min_not_zero(t
->max_segment_size
,
545 b
->max_segment_size
);
547 alignment
= queue_limit_alignment_offset(b
, start
);
549 /* Bottom device has different alignment. Check that it is
550 * compatible with the current top alignment.
552 if (t
->alignment_offset
!= alignment
) {
554 top
= max(t
->physical_block_size
, t
->io_min
)
555 + t
->alignment_offset
;
556 bottom
= max(b
->physical_block_size
, b
->io_min
) + alignment
;
558 /* Verify that top and bottom intervals line up */
559 if (max(top
, bottom
) % min(top
, bottom
)) {
560 t
->flags
|= BLK_FLAG_MISALIGNED
;
565 t
->logical_block_size
= max(t
->logical_block_size
,
566 b
->logical_block_size
);
568 t
->physical_block_size
= max(t
->physical_block_size
,
569 b
->physical_block_size
);
571 t
->io_min
= max(t
->io_min
, b
->io_min
);
572 t
->io_opt
= lcm_not_zero(t
->io_opt
, b
->io_opt
);
573 t
->dma_alignment
= max(t
->dma_alignment
, b
->dma_alignment
);
575 /* Set non-power-of-2 compatible chunk_sectors boundary */
576 if (b
->chunk_sectors
)
577 t
->chunk_sectors
= gcd(t
->chunk_sectors
, b
->chunk_sectors
);
579 /* Physical block size a multiple of the logical block size? */
580 if (t
->physical_block_size
& (t
->logical_block_size
- 1)) {
581 t
->physical_block_size
= t
->logical_block_size
;
582 t
->flags
|= BLK_FLAG_MISALIGNED
;
586 /* Minimum I/O a multiple of the physical block size? */
587 if (t
->io_min
& (t
->physical_block_size
- 1)) {
588 t
->io_min
= t
->physical_block_size
;
589 t
->flags
|= BLK_FLAG_MISALIGNED
;
593 /* Optimal I/O a multiple of the physical block size? */
594 if (t
->io_opt
& (t
->physical_block_size
- 1)) {
596 t
->flags
|= BLK_FLAG_MISALIGNED
;
600 /* chunk_sectors a multiple of the physical block size? */
601 if ((t
->chunk_sectors
<< 9) & (t
->physical_block_size
- 1)) {
602 t
->chunk_sectors
= 0;
603 t
->flags
|= BLK_FLAG_MISALIGNED
;
607 /* Find lowest common alignment_offset */
608 t
->alignment_offset
= lcm_not_zero(t
->alignment_offset
, alignment
)
609 % max(t
->physical_block_size
, t
->io_min
);
611 /* Verify that new alignment_offset is on a logical block boundary */
612 if (t
->alignment_offset
& (t
->logical_block_size
- 1)) {
613 t
->flags
|= BLK_FLAG_MISALIGNED
;
617 t
->max_sectors
= blk_round_down_sectors(t
->max_sectors
, t
->logical_block_size
);
618 t
->max_hw_sectors
= blk_round_down_sectors(t
->max_hw_sectors
, t
->logical_block_size
);
619 t
->max_dev_sectors
= blk_round_down_sectors(t
->max_dev_sectors
, t
->logical_block_size
);
621 /* Discard alignment and granularity */
622 if (b
->discard_granularity
) {
623 alignment
= queue_limit_discard_alignment(b
, start
);
625 t
->max_discard_sectors
= min_not_zero(t
->max_discard_sectors
,
626 b
->max_discard_sectors
);
627 t
->max_hw_discard_sectors
= min_not_zero(t
->max_hw_discard_sectors
,
628 b
->max_hw_discard_sectors
);
629 t
->discard_granularity
= max(t
->discard_granularity
,
630 b
->discard_granularity
);
631 t
->discard_alignment
= lcm_not_zero(t
->discard_alignment
, alignment
) %
632 t
->discard_granularity
;
634 t
->max_secure_erase_sectors
= min_not_zero(t
->max_secure_erase_sectors
,
635 b
->max_secure_erase_sectors
);
636 t
->zone_write_granularity
= max(t
->zone_write_granularity
,
637 b
->zone_write_granularity
);
638 if (!(t
->features
& BLK_FEAT_ZONED
)) {
639 t
->zone_write_granularity
= 0;
640 t
->max_zone_append_sectors
= 0;
644 EXPORT_SYMBOL(blk_stack_limits
);
647 * queue_limits_stack_bdev - adjust queue_limits for stacked devices
648 * @t: the stacking driver limits (top device)
649 * @bdev: the underlying block device (bottom)
650 * @offset: offset to beginning of data within component device
651 * @pfx: prefix to use for warnings logged
654 * This function is used by stacking drivers like MD and DM to ensure
655 * that all component devices have compatible block sizes and
656 * alignments. The stacking driver must provide a queue_limits
657 * struct (top) and then iteratively call the stacking function for
658 * all component (bottom) devices. The stacking function will
659 * attempt to combine the values and ensure proper alignment.
661 void queue_limits_stack_bdev(struct queue_limits
*t
, struct block_device
*bdev
,
662 sector_t offset
, const char *pfx
)
664 if (blk_stack_limits(t
, bdev_limits(bdev
),
665 get_start_sect(bdev
) + offset
))
666 pr_notice("%s: Warning: Device %pg is misaligned\n",
669 EXPORT_SYMBOL_GPL(queue_limits_stack_bdev
);
672 * queue_limits_stack_integrity - stack integrity profile
673 * @t: target queue limits
674 * @b: base queue limits
676 * Check if the integrity profile in the @b can be stacked into the
677 * target @t. Stacking is possible if either:
679 * a) does not have any integrity information stacked into it yet
680 * b) the integrity profile in @b is identical to the one in @t
682 * If @b can be stacked into @t, return %true. Else return %false and clear the
683 * integrity information in @t.
685 bool queue_limits_stack_integrity(struct queue_limits
*t
,
686 struct queue_limits
*b
)
688 struct blk_integrity
*ti
= &t
->integrity
;
689 struct blk_integrity
*bi
= &b
->integrity
;
691 if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY
))
694 if (!ti
->tuple_size
) {
695 /* inherit the settings from the first underlying device */
696 if (!(ti
->flags
& BLK_INTEGRITY_STACKED
)) {
697 ti
->flags
= BLK_INTEGRITY_DEVICE_CAPABLE
|
698 (bi
->flags
& BLK_INTEGRITY_REF_TAG
);
699 ti
->csum_type
= bi
->csum_type
;
700 ti
->tuple_size
= bi
->tuple_size
;
701 ti
->pi_offset
= bi
->pi_offset
;
702 ti
->interval_exp
= bi
->interval_exp
;
703 ti
->tag_size
= bi
->tag_size
;
710 if (ti
->tuple_size
!= bi
->tuple_size
)
712 if (ti
->interval_exp
!= bi
->interval_exp
)
714 if (ti
->tag_size
!= bi
->tag_size
)
716 if (ti
->csum_type
!= bi
->csum_type
)
718 if ((ti
->flags
& BLK_INTEGRITY_REF_TAG
) !=
719 (bi
->flags
& BLK_INTEGRITY_REF_TAG
))
723 ti
->flags
|= BLK_INTEGRITY_STACKED
;
727 memset(ti
, 0, sizeof(*ti
));
730 EXPORT_SYMBOL_GPL(queue_limits_stack_integrity
);
733 * blk_set_queue_depth - tell the block layer about the device queue depth
734 * @q: the request queue for the device
735 * @depth: queue depth
738 void blk_set_queue_depth(struct request_queue
*q
, unsigned int depth
)
740 q
->queue_depth
= depth
;
741 rq_qos_queue_depth_changed(q
);
743 EXPORT_SYMBOL(blk_set_queue_depth
);
745 int bdev_alignment_offset(struct block_device
*bdev
)
747 struct request_queue
*q
= bdev_get_queue(bdev
);
749 if (q
->limits
.flags
& BLK_FLAG_MISALIGNED
)
751 if (bdev_is_partition(bdev
))
752 return queue_limit_alignment_offset(&q
->limits
,
753 bdev
->bd_start_sect
);
754 return q
->limits
.alignment_offset
;
756 EXPORT_SYMBOL_GPL(bdev_alignment_offset
);
758 unsigned int bdev_discard_alignment(struct block_device
*bdev
)
760 struct request_queue
*q
= bdev_get_queue(bdev
);
762 if (bdev_is_partition(bdev
))
763 return queue_limit_discard_alignment(&q
->limits
,
764 bdev
->bd_start_sect
);
765 return q
->limits
.discard_alignment
;
767 EXPORT_SYMBOL_GPL(bdev_discard_alignment
);