1 // SPDX-License-Identifier: GPL-2.0
3 * Functions related to generic helpers functions
5 #include <linux/kernel.h>
6 #include <linux/module.h>
8 #include <linux/blkdev.h>
9 #include <linux/scatterlist.h>
13 static struct bio
*next_bio(struct bio
*bio
, unsigned int nr_pages
,
16 struct bio
*new = bio_alloc(gfp
, nr_pages
);
26 int __blkdev_issue_discard(struct block_device
*bdev
, sector_t sector
,
27 sector_t nr_sects
, gfp_t gfp_mask
, int flags
,
30 struct request_queue
*q
= bdev_get_queue(bdev
);
31 struct bio
*bio
= *biop
;
32 unsigned int granularity
;
40 if (flags
& BLKDEV_DISCARD_SECURE
) {
41 if (!blk_queue_secure_erase(q
))
43 op
= REQ_OP_SECURE_ERASE
;
45 if (!blk_queue_discard(q
))
50 bs_mask
= (bdev_logical_block_size(bdev
) >> 9) - 1;
51 if ((sector
| nr_sects
) & bs_mask
)
54 /* Zero-sector (unknown) and one-sector granularities are the same. */
55 granularity
= max(q
->limits
.discard_granularity
>> 9, 1U);
56 alignment
= (bdev_discard_alignment(bdev
) >> 9) % granularity
;
59 unsigned int req_sects
;
60 sector_t end_sect
, tmp
;
62 /* Make sure bi_size doesn't overflow */
63 req_sects
= min_t(sector_t
, nr_sects
, UINT_MAX
>> 9);
66 * If splitting a request, and the next starting sector would be
67 * misaligned, stop the discard at the previous aligned sector.
69 end_sect
= sector
+ req_sects
;
71 if (req_sects
< nr_sects
&&
72 sector_div(tmp
, granularity
) != alignment
) {
73 end_sect
= end_sect
- alignment
;
74 sector_div(end_sect
, granularity
);
75 end_sect
= end_sect
* granularity
+ alignment
;
76 req_sects
= end_sect
- sector
;
79 bio
= next_bio(bio
, 0, gfp_mask
);
80 bio
->bi_iter
.bi_sector
= sector
;
81 bio_set_dev(bio
, bdev
);
82 bio_set_op_attrs(bio
, op
, 0);
84 bio
->bi_iter
.bi_size
= req_sects
<< 9;
85 nr_sects
-= req_sects
;
89 * We can loop for a long time in here, if someone does
90 * full device discards (like mkfs). Be nice and allow
91 * us to schedule out to avoid softlocking if preempt
100 EXPORT_SYMBOL(__blkdev_issue_discard
);
103 * blkdev_issue_discard - queue a discard
104 * @bdev: blockdev to issue discard for
105 * @sector: start sector
106 * @nr_sects: number of sectors to discard
107 * @gfp_mask: memory allocation flags (for bio_alloc)
108 * @flags: BLKDEV_DISCARD_* flags to control behaviour
111 * Issue a discard request for the sectors in question.
113 int blkdev_issue_discard(struct block_device
*bdev
, sector_t sector
,
114 sector_t nr_sects
, gfp_t gfp_mask
, unsigned long flags
)
116 struct bio
*bio
= NULL
;
117 struct blk_plug plug
;
120 blk_start_plug(&plug
);
121 ret
= __blkdev_issue_discard(bdev
, sector
, nr_sects
, gfp_mask
, flags
,
124 ret
= submit_bio_wait(bio
);
125 if (ret
== -EOPNOTSUPP
)
129 blk_finish_plug(&plug
);
133 EXPORT_SYMBOL(blkdev_issue_discard
);
136 * __blkdev_issue_write_same - generate number of bios with same page
137 * @bdev: target blockdev
138 * @sector: start sector
139 * @nr_sects: number of sectors to write
140 * @gfp_mask: memory allocation flags (for bio_alloc)
141 * @page: page containing data to write
142 * @biop: pointer to anchor bio
145 * Generate and issue number of bios(REQ_OP_WRITE_SAME) with same page.
147 static int __blkdev_issue_write_same(struct block_device
*bdev
, sector_t sector
,
148 sector_t nr_sects
, gfp_t gfp_mask
, struct page
*page
,
151 struct request_queue
*q
= bdev_get_queue(bdev
);
152 unsigned int max_write_same_sectors
;
153 struct bio
*bio
= *biop
;
159 bs_mask
= (bdev_logical_block_size(bdev
) >> 9) - 1;
160 if ((sector
| nr_sects
) & bs_mask
)
163 if (!bdev_write_same(bdev
))
166 /* Ensure that max_write_same_sectors doesn't overflow bi_size */
167 max_write_same_sectors
= UINT_MAX
>> 9;
170 bio
= next_bio(bio
, 1, gfp_mask
);
171 bio
->bi_iter
.bi_sector
= sector
;
172 bio_set_dev(bio
, bdev
);
174 bio
->bi_io_vec
->bv_page
= page
;
175 bio
->bi_io_vec
->bv_offset
= 0;
176 bio
->bi_io_vec
->bv_len
= bdev_logical_block_size(bdev
);
177 bio_set_op_attrs(bio
, REQ_OP_WRITE_SAME
, 0);
179 if (nr_sects
> max_write_same_sectors
) {
180 bio
->bi_iter
.bi_size
= max_write_same_sectors
<< 9;
181 nr_sects
-= max_write_same_sectors
;
182 sector
+= max_write_same_sectors
;
184 bio
->bi_iter
.bi_size
= nr_sects
<< 9;
195 * blkdev_issue_write_same - queue a write same operation
196 * @bdev: target blockdev
197 * @sector: start sector
198 * @nr_sects: number of sectors to write
199 * @gfp_mask: memory allocation flags (for bio_alloc)
200 * @page: page containing data
203 * Issue a write same request for the sectors in question.
205 int blkdev_issue_write_same(struct block_device
*bdev
, sector_t sector
,
206 sector_t nr_sects
, gfp_t gfp_mask
,
209 struct bio
*bio
= NULL
;
210 struct blk_plug plug
;
213 blk_start_plug(&plug
);
214 ret
= __blkdev_issue_write_same(bdev
, sector
, nr_sects
, gfp_mask
, page
,
216 if (ret
== 0 && bio
) {
217 ret
= submit_bio_wait(bio
);
220 blk_finish_plug(&plug
);
223 EXPORT_SYMBOL(blkdev_issue_write_same
);
225 static int __blkdev_issue_write_zeroes(struct block_device
*bdev
,
226 sector_t sector
, sector_t nr_sects
, gfp_t gfp_mask
,
227 struct bio
**biop
, unsigned flags
)
229 struct bio
*bio
= *biop
;
230 unsigned int max_write_zeroes_sectors
;
231 struct request_queue
*q
= bdev_get_queue(bdev
);
236 /* Ensure that max_write_zeroes_sectors doesn't overflow bi_size */
237 max_write_zeroes_sectors
= bdev_write_zeroes_sectors(bdev
);
239 if (max_write_zeroes_sectors
== 0)
243 bio
= next_bio(bio
, 0, gfp_mask
);
244 bio
->bi_iter
.bi_sector
= sector
;
245 bio_set_dev(bio
, bdev
);
246 bio
->bi_opf
= REQ_OP_WRITE_ZEROES
;
247 if (flags
& BLKDEV_ZERO_NOUNMAP
)
248 bio
->bi_opf
|= REQ_NOUNMAP
;
250 if (nr_sects
> max_write_zeroes_sectors
) {
251 bio
->bi_iter
.bi_size
= max_write_zeroes_sectors
<< 9;
252 nr_sects
-= max_write_zeroes_sectors
;
253 sector
+= max_write_zeroes_sectors
;
255 bio
->bi_iter
.bi_size
= nr_sects
<< 9;
266 * Convert a number of 512B sectors to a number of pages.
267 * The result is limited to a number of pages that can fit into a BIO.
268 * Also make sure that the result is always at least 1 (page) for the cases
269 * where nr_sects is lower than the number of sectors in a page.
271 static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects
)
273 sector_t pages
= DIV_ROUND_UP_SECTOR_T(nr_sects
, PAGE_SIZE
/ 512);
275 return min(pages
, (sector_t
)BIO_MAX_PAGES
);
278 static int __blkdev_issue_zero_pages(struct block_device
*bdev
,
279 sector_t sector
, sector_t nr_sects
, gfp_t gfp_mask
,
282 struct request_queue
*q
= bdev_get_queue(bdev
);
283 struct bio
*bio
= *biop
;
290 while (nr_sects
!= 0) {
291 bio
= next_bio(bio
, __blkdev_sectors_to_bio_pages(nr_sects
),
293 bio
->bi_iter
.bi_sector
= sector
;
294 bio_set_dev(bio
, bdev
);
295 bio_set_op_attrs(bio
, REQ_OP_WRITE
, 0);
297 while (nr_sects
!= 0) {
298 sz
= min((sector_t
) PAGE_SIZE
, nr_sects
<< 9);
299 bi_size
= bio_add_page(bio
, ZERO_PAGE(0), sz
, 0);
300 nr_sects
-= bi_size
>> 9;
301 sector
+= bi_size
>> 9;
313 * __blkdev_issue_zeroout - generate number of zero filed write bios
314 * @bdev: blockdev to issue
315 * @sector: start sector
316 * @nr_sects: number of sectors to write
317 * @gfp_mask: memory allocation flags (for bio_alloc)
318 * @biop: pointer to anchor bio
319 * @flags: controls detailed behavior
322 * Zero-fill a block range, either using hardware offload or by explicitly
323 * writing zeroes to the device.
325 * If a device is using logical block provisioning, the underlying space will
326 * not be released if %flags contains BLKDEV_ZERO_NOUNMAP.
328 * If %flags contains BLKDEV_ZERO_NOFALLBACK, the function will return
329 * -EOPNOTSUPP if no explicit hardware offload for zeroing is provided.
331 int __blkdev_issue_zeroout(struct block_device
*bdev
, sector_t sector
,
332 sector_t nr_sects
, gfp_t gfp_mask
, struct bio
**biop
,
338 bs_mask
= (bdev_logical_block_size(bdev
) >> 9) - 1;
339 if ((sector
| nr_sects
) & bs_mask
)
342 ret
= __blkdev_issue_write_zeroes(bdev
, sector
, nr_sects
, gfp_mask
,
344 if (ret
!= -EOPNOTSUPP
|| (flags
& BLKDEV_ZERO_NOFALLBACK
))
347 return __blkdev_issue_zero_pages(bdev
, sector
, nr_sects
, gfp_mask
,
350 EXPORT_SYMBOL(__blkdev_issue_zeroout
);
353 * blkdev_issue_zeroout - zero-fill a block range
354 * @bdev: blockdev to write
355 * @sector: start sector
356 * @nr_sects: number of sectors to write
357 * @gfp_mask: memory allocation flags (for bio_alloc)
358 * @flags: controls detailed behavior
361 * Zero-fill a block range, either using hardware offload or by explicitly
362 * writing zeroes to the device. See __blkdev_issue_zeroout() for the
363 * valid values for %flags.
365 int blkdev_issue_zeroout(struct block_device
*bdev
, sector_t sector
,
366 sector_t nr_sects
, gfp_t gfp_mask
, unsigned flags
)
371 struct blk_plug plug
;
372 bool try_write_zeroes
= !!bdev_write_zeroes_sectors(bdev
);
374 bs_mask
= (bdev_logical_block_size(bdev
) >> 9) - 1;
375 if ((sector
| nr_sects
) & bs_mask
)
380 blk_start_plug(&plug
);
381 if (try_write_zeroes
) {
382 ret
= __blkdev_issue_write_zeroes(bdev
, sector
, nr_sects
,
383 gfp_mask
, &bio
, flags
);
384 } else if (!(flags
& BLKDEV_ZERO_NOFALLBACK
)) {
385 ret
= __blkdev_issue_zero_pages(bdev
, sector
, nr_sects
,
388 /* No zeroing offload support */
391 if (ret
== 0 && bio
) {
392 ret
= submit_bio_wait(bio
);
395 blk_finish_plug(&plug
);
396 if (ret
&& try_write_zeroes
) {
397 if (!(flags
& BLKDEV_ZERO_NOFALLBACK
)) {
398 try_write_zeroes
= false;
401 if (!bdev_write_zeroes_sectors(bdev
)) {
403 * Zeroing offload support was indicated, but the
404 * device reported ILLEGAL REQUEST (for some devices
405 * there is no non-destructive way to verify whether
406 * WRITE ZEROES is actually supported).
414 EXPORT_SYMBOL(blkdev_issue_zeroout
);