1 // SPDX-License-Identifier: GPL-2.0
3 * Functions related to generic helpers functions
5 #include <linux/kernel.h>
6 #include <linux/module.h>
8 #include <linux/blkdev.h>
9 #include <linux/scatterlist.h>
13 static struct bio
*next_bio(struct bio
*bio
, unsigned int nr_pages
,
16 struct bio
*new = bio_alloc(gfp
, nr_pages
);
26 int __blkdev_issue_discard(struct block_device
*bdev
, sector_t sector
,
27 sector_t nr_sects
, gfp_t gfp_mask
, int flags
,
30 struct request_queue
*q
= bdev_get_queue(bdev
);
31 struct bio
*bio
= *biop
;
38 if (bdev_read_only(bdev
))
41 if (flags
& BLKDEV_DISCARD_SECURE
) {
42 if (!blk_queue_secure_erase(q
))
44 op
= REQ_OP_SECURE_ERASE
;
46 if (!blk_queue_discard(q
))
51 bs_mask
= (bdev_logical_block_size(bdev
) >> 9) - 1;
52 if ((sector
| nr_sects
) & bs_mask
)
59 sector_t req_sects
= min_t(sector_t
, nr_sects
,
60 bio_allowed_max_sectors(q
));
62 WARN_ON_ONCE((req_sects
<< 9) > UINT_MAX
);
64 bio
= next_bio(bio
, 0, gfp_mask
);
65 bio
->bi_iter
.bi_sector
= sector
;
66 bio_set_dev(bio
, bdev
);
67 bio_set_op_attrs(bio
, op
, 0);
69 bio
->bi_iter
.bi_size
= req_sects
<< 9;
71 nr_sects
-= req_sects
;
74 * We can loop for a long time in here, if someone does
75 * full device discards (like mkfs). Be nice and allow
76 * us to schedule out to avoid softlocking if preempt
85 EXPORT_SYMBOL(__blkdev_issue_discard
);
88 * blkdev_issue_discard - queue a discard
89 * @bdev: blockdev to issue discard for
90 * @sector: start sector
91 * @nr_sects: number of sectors to discard
92 * @gfp_mask: memory allocation flags (for bio_alloc)
93 * @flags: BLKDEV_DISCARD_* flags to control behaviour
96 * Issue a discard request for the sectors in question.
98 int blkdev_issue_discard(struct block_device
*bdev
, sector_t sector
,
99 sector_t nr_sects
, gfp_t gfp_mask
, unsigned long flags
)
101 struct bio
*bio
= NULL
;
102 struct blk_plug plug
;
105 blk_start_plug(&plug
);
106 ret
= __blkdev_issue_discard(bdev
, sector
, nr_sects
, gfp_mask
, flags
,
109 ret
= submit_bio_wait(bio
);
110 if (ret
== -EOPNOTSUPP
)
114 blk_finish_plug(&plug
);
118 EXPORT_SYMBOL(blkdev_issue_discard
);
121 * __blkdev_issue_write_same - generate number of bios with same page
122 * @bdev: target blockdev
123 * @sector: start sector
124 * @nr_sects: number of sectors to write
125 * @gfp_mask: memory allocation flags (for bio_alloc)
126 * @page: page containing data to write
127 * @biop: pointer to anchor bio
130 * Generate and issue number of bios(REQ_OP_WRITE_SAME) with same page.
132 static int __blkdev_issue_write_same(struct block_device
*bdev
, sector_t sector
,
133 sector_t nr_sects
, gfp_t gfp_mask
, struct page
*page
,
136 struct request_queue
*q
= bdev_get_queue(bdev
);
137 unsigned int max_write_same_sectors
;
138 struct bio
*bio
= *biop
;
144 if (bdev_read_only(bdev
))
147 bs_mask
= (bdev_logical_block_size(bdev
) >> 9) - 1;
148 if ((sector
| nr_sects
) & bs_mask
)
151 if (!bdev_write_same(bdev
))
154 /* Ensure that max_write_same_sectors doesn't overflow bi_size */
155 max_write_same_sectors
= bio_allowed_max_sectors(q
);
158 bio
= next_bio(bio
, 1, gfp_mask
);
159 bio
->bi_iter
.bi_sector
= sector
;
160 bio_set_dev(bio
, bdev
);
162 bio
->bi_io_vec
->bv_page
= page
;
163 bio
->bi_io_vec
->bv_offset
= 0;
164 bio
->bi_io_vec
->bv_len
= bdev_logical_block_size(bdev
);
165 bio_set_op_attrs(bio
, REQ_OP_WRITE_SAME
, 0);
167 if (nr_sects
> max_write_same_sectors
) {
168 bio
->bi_iter
.bi_size
= max_write_same_sectors
<< 9;
169 nr_sects
-= max_write_same_sectors
;
170 sector
+= max_write_same_sectors
;
172 bio
->bi_iter
.bi_size
= nr_sects
<< 9;
183 * blkdev_issue_write_same - queue a write same operation
184 * @bdev: target blockdev
185 * @sector: start sector
186 * @nr_sects: number of sectors to write
187 * @gfp_mask: memory allocation flags (for bio_alloc)
188 * @page: page containing data
191 * Issue a write same request for the sectors in question.
193 int blkdev_issue_write_same(struct block_device
*bdev
, sector_t sector
,
194 sector_t nr_sects
, gfp_t gfp_mask
,
197 struct bio
*bio
= NULL
;
198 struct blk_plug plug
;
201 blk_start_plug(&plug
);
202 ret
= __blkdev_issue_write_same(bdev
, sector
, nr_sects
, gfp_mask
, page
,
204 if (ret
== 0 && bio
) {
205 ret
= submit_bio_wait(bio
);
208 blk_finish_plug(&plug
);
211 EXPORT_SYMBOL(blkdev_issue_write_same
);
213 static int __blkdev_issue_write_zeroes(struct block_device
*bdev
,
214 sector_t sector
, sector_t nr_sects
, gfp_t gfp_mask
,
215 struct bio
**biop
, unsigned flags
)
217 struct bio
*bio
= *biop
;
218 unsigned int max_write_zeroes_sectors
;
219 struct request_queue
*q
= bdev_get_queue(bdev
);
224 if (bdev_read_only(bdev
))
227 /* Ensure that max_write_zeroes_sectors doesn't overflow bi_size */
228 max_write_zeroes_sectors
= bdev_write_zeroes_sectors(bdev
);
230 if (max_write_zeroes_sectors
== 0)
234 bio
= next_bio(bio
, 0, gfp_mask
);
235 bio
->bi_iter
.bi_sector
= sector
;
236 bio_set_dev(bio
, bdev
);
237 bio
->bi_opf
= REQ_OP_WRITE_ZEROES
;
238 if (flags
& BLKDEV_ZERO_NOUNMAP
)
239 bio
->bi_opf
|= REQ_NOUNMAP
;
241 if (nr_sects
> max_write_zeroes_sectors
) {
242 bio
->bi_iter
.bi_size
= max_write_zeroes_sectors
<< 9;
243 nr_sects
-= max_write_zeroes_sectors
;
244 sector
+= max_write_zeroes_sectors
;
246 bio
->bi_iter
.bi_size
= nr_sects
<< 9;
257 * Convert a number of 512B sectors to a number of pages.
258 * The result is limited to a number of pages that can fit into a BIO.
259 * Also make sure that the result is always at least 1 (page) for the cases
260 * where nr_sects is lower than the number of sectors in a page.
262 static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects
)
264 sector_t pages
= DIV_ROUND_UP_SECTOR_T(nr_sects
, PAGE_SIZE
/ 512);
266 return min(pages
, (sector_t
)BIO_MAX_PAGES
);
269 static int __blkdev_issue_zero_pages(struct block_device
*bdev
,
270 sector_t sector
, sector_t nr_sects
, gfp_t gfp_mask
,
273 struct request_queue
*q
= bdev_get_queue(bdev
);
274 struct bio
*bio
= *biop
;
281 if (bdev_read_only(bdev
))
284 while (nr_sects
!= 0) {
285 bio
= next_bio(bio
, __blkdev_sectors_to_bio_pages(nr_sects
),
287 bio
->bi_iter
.bi_sector
= sector
;
288 bio_set_dev(bio
, bdev
);
289 bio_set_op_attrs(bio
, REQ_OP_WRITE
, 0);
291 while (nr_sects
!= 0) {
292 sz
= min((sector_t
) PAGE_SIZE
, nr_sects
<< 9);
293 bi_size
= bio_add_page(bio
, ZERO_PAGE(0), sz
, 0);
294 nr_sects
-= bi_size
>> 9;
295 sector
+= bi_size
>> 9;
307 * __blkdev_issue_zeroout - generate number of zero filed write bios
308 * @bdev: blockdev to issue
309 * @sector: start sector
310 * @nr_sects: number of sectors to write
311 * @gfp_mask: memory allocation flags (for bio_alloc)
312 * @biop: pointer to anchor bio
313 * @flags: controls detailed behavior
316 * Zero-fill a block range, either using hardware offload or by explicitly
317 * writing zeroes to the device.
319 * If a device is using logical block provisioning, the underlying space will
320 * not be released if %flags contains BLKDEV_ZERO_NOUNMAP.
322 * If %flags contains BLKDEV_ZERO_NOFALLBACK, the function will return
323 * -EOPNOTSUPP if no explicit hardware offload for zeroing is provided.
325 int __blkdev_issue_zeroout(struct block_device
*bdev
, sector_t sector
,
326 sector_t nr_sects
, gfp_t gfp_mask
, struct bio
**biop
,
332 bs_mask
= (bdev_logical_block_size(bdev
) >> 9) - 1;
333 if ((sector
| nr_sects
) & bs_mask
)
336 ret
= __blkdev_issue_write_zeroes(bdev
, sector
, nr_sects
, gfp_mask
,
338 if (ret
!= -EOPNOTSUPP
|| (flags
& BLKDEV_ZERO_NOFALLBACK
))
341 return __blkdev_issue_zero_pages(bdev
, sector
, nr_sects
, gfp_mask
,
344 EXPORT_SYMBOL(__blkdev_issue_zeroout
);
347 * blkdev_issue_zeroout - zero-fill a block range
348 * @bdev: blockdev to write
349 * @sector: start sector
350 * @nr_sects: number of sectors to write
351 * @gfp_mask: memory allocation flags (for bio_alloc)
352 * @flags: controls detailed behavior
355 * Zero-fill a block range, either using hardware offload or by explicitly
356 * writing zeroes to the device. See __blkdev_issue_zeroout() for the
357 * valid values for %flags.
359 int blkdev_issue_zeroout(struct block_device
*bdev
, sector_t sector
,
360 sector_t nr_sects
, gfp_t gfp_mask
, unsigned flags
)
365 struct blk_plug plug
;
366 bool try_write_zeroes
= !!bdev_write_zeroes_sectors(bdev
);
368 bs_mask
= (bdev_logical_block_size(bdev
) >> 9) - 1;
369 if ((sector
| nr_sects
) & bs_mask
)
374 blk_start_plug(&plug
);
375 if (try_write_zeroes
) {
376 ret
= __blkdev_issue_write_zeroes(bdev
, sector
, nr_sects
,
377 gfp_mask
, &bio
, flags
);
378 } else if (!(flags
& BLKDEV_ZERO_NOFALLBACK
)) {
379 ret
= __blkdev_issue_zero_pages(bdev
, sector
, nr_sects
,
382 /* No zeroing offload support */
385 if (ret
== 0 && bio
) {
386 ret
= submit_bio_wait(bio
);
389 blk_finish_plug(&plug
);
390 if (ret
&& try_write_zeroes
) {
391 if (!(flags
& BLKDEV_ZERO_NOFALLBACK
)) {
392 try_write_zeroes
= false;
395 if (!bdev_write_zeroes_sectors(bdev
)) {
397 * Zeroing offload support was indicated, but the
398 * device reported ILLEGAL REQUEST (for some devices
399 * there is no non-destructive way to verify whether
400 * WRITE ZEROES is actually supported).
408 EXPORT_SYMBOL(blkdev_issue_zeroout
);