udl-kms: handle allocation failure
[linux/fpc-iii.git] / block / blk-lib.c
blob8faa70f26fcd1575123b855330ae69025a88b969
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Functions related to generic helpers functions
4 */
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/bio.h>
8 #include <linux/blkdev.h>
9 #include <linux/scatterlist.h>
11 #include "blk.h"
13 static struct bio *next_bio(struct bio *bio, unsigned int nr_pages,
14 gfp_t gfp)
16 struct bio *new = bio_alloc(gfp, nr_pages);
18 if (bio) {
19 bio_chain(bio, new);
20 submit_bio(bio);
23 return new;
26 int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
27 sector_t nr_sects, gfp_t gfp_mask, int flags,
28 struct bio **biop)
30 struct request_queue *q = bdev_get_queue(bdev);
31 struct bio *bio = *biop;
32 unsigned int granularity;
33 unsigned int op;
34 int alignment;
35 sector_t bs_mask;
37 if (!q)
38 return -ENXIO;
40 if (bdev_read_only(bdev))
41 return -EPERM;
43 if (flags & BLKDEV_DISCARD_SECURE) {
44 if (!blk_queue_secure_erase(q))
45 return -EOPNOTSUPP;
46 op = REQ_OP_SECURE_ERASE;
47 } else {
48 if (!blk_queue_discard(q))
49 return -EOPNOTSUPP;
50 op = REQ_OP_DISCARD;
53 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
54 if ((sector | nr_sects) & bs_mask)
55 return -EINVAL;
57 /* Zero-sector (unknown) and one-sector granularities are the same. */
58 granularity = max(q->limits.discard_granularity >> 9, 1U);
59 alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
61 while (nr_sects) {
62 unsigned int req_sects;
63 sector_t end_sect, tmp;
66 * Issue in chunks of the user defined max discard setting,
67 * ensuring that bi_size doesn't overflow
69 req_sects = min_t(sector_t, nr_sects,
70 q->limits.max_discard_sectors);
71 if (req_sects > UINT_MAX >> 9)
72 req_sects = UINT_MAX >> 9;
75 * If splitting a request, and the next starting sector would be
76 * misaligned, stop the discard at the previous aligned sector.
78 end_sect = sector + req_sects;
79 tmp = end_sect;
80 if (req_sects < nr_sects &&
81 sector_div(tmp, granularity) != alignment) {
82 end_sect = end_sect - alignment;
83 sector_div(end_sect, granularity);
84 end_sect = end_sect * granularity + alignment;
85 req_sects = end_sect - sector;
88 bio = next_bio(bio, 0, gfp_mask);
89 bio->bi_iter.bi_sector = sector;
90 bio_set_dev(bio, bdev);
91 bio_set_op_attrs(bio, op, 0);
93 bio->bi_iter.bi_size = req_sects << 9;
94 nr_sects -= req_sects;
95 sector = end_sect;
98 * We can loop for a long time in here, if someone does
99 * full device discards (like mkfs). Be nice and allow
100 * us to schedule out to avoid softlocking if preempt
101 * is disabled.
103 cond_resched();
106 *biop = bio;
107 return 0;
109 EXPORT_SYMBOL(__blkdev_issue_discard);
112 * blkdev_issue_discard - queue a discard
113 * @bdev: blockdev to issue discard for
114 * @sector: start sector
115 * @nr_sects: number of sectors to discard
116 * @gfp_mask: memory allocation flags (for bio_alloc)
117 * @flags: BLKDEV_DISCARD_* flags to control behaviour
119 * Description:
120 * Issue a discard request for the sectors in question.
122 int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
123 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
125 struct bio *bio = NULL;
126 struct blk_plug plug;
127 int ret;
129 blk_start_plug(&plug);
130 ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags,
131 &bio);
132 if (!ret && bio) {
133 ret = submit_bio_wait(bio);
134 if (ret == -EOPNOTSUPP)
135 ret = 0;
136 bio_put(bio);
138 blk_finish_plug(&plug);
140 return ret;
142 EXPORT_SYMBOL(blkdev_issue_discard);
145 * __blkdev_issue_write_same - generate number of bios with same page
146 * @bdev: target blockdev
147 * @sector: start sector
148 * @nr_sects: number of sectors to write
149 * @gfp_mask: memory allocation flags (for bio_alloc)
150 * @page: page containing data to write
151 * @biop: pointer to anchor bio
153 * Description:
154 * Generate and issue number of bios(REQ_OP_WRITE_SAME) with same page.
156 static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
157 sector_t nr_sects, gfp_t gfp_mask, struct page *page,
158 struct bio **biop)
160 struct request_queue *q = bdev_get_queue(bdev);
161 unsigned int max_write_same_sectors;
162 struct bio *bio = *biop;
163 sector_t bs_mask;
165 if (!q)
166 return -ENXIO;
168 if (bdev_read_only(bdev))
169 return -EPERM;
171 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
172 if ((sector | nr_sects) & bs_mask)
173 return -EINVAL;
175 if (!bdev_write_same(bdev))
176 return -EOPNOTSUPP;
178 /* Ensure that max_write_same_sectors doesn't overflow bi_size */
179 max_write_same_sectors = UINT_MAX >> 9;
181 while (nr_sects) {
182 bio = next_bio(bio, 1, gfp_mask);
183 bio->bi_iter.bi_sector = sector;
184 bio_set_dev(bio, bdev);
185 bio->bi_vcnt = 1;
186 bio->bi_io_vec->bv_page = page;
187 bio->bi_io_vec->bv_offset = 0;
188 bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
189 bio_set_op_attrs(bio, REQ_OP_WRITE_SAME, 0);
191 if (nr_sects > max_write_same_sectors) {
192 bio->bi_iter.bi_size = max_write_same_sectors << 9;
193 nr_sects -= max_write_same_sectors;
194 sector += max_write_same_sectors;
195 } else {
196 bio->bi_iter.bi_size = nr_sects << 9;
197 nr_sects = 0;
199 cond_resched();
202 *biop = bio;
203 return 0;
207 * blkdev_issue_write_same - queue a write same operation
208 * @bdev: target blockdev
209 * @sector: start sector
210 * @nr_sects: number of sectors to write
211 * @gfp_mask: memory allocation flags (for bio_alloc)
212 * @page: page containing data
214 * Description:
215 * Issue a write same request for the sectors in question.
217 int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
218 sector_t nr_sects, gfp_t gfp_mask,
219 struct page *page)
221 struct bio *bio = NULL;
222 struct blk_plug plug;
223 int ret;
225 blk_start_plug(&plug);
226 ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, page,
227 &bio);
228 if (ret == 0 && bio) {
229 ret = submit_bio_wait(bio);
230 bio_put(bio);
232 blk_finish_plug(&plug);
233 return ret;
235 EXPORT_SYMBOL(blkdev_issue_write_same);
237 static int __blkdev_issue_write_zeroes(struct block_device *bdev,
238 sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
239 struct bio **biop, unsigned flags)
241 struct bio *bio = *biop;
242 unsigned int max_write_zeroes_sectors;
243 struct request_queue *q = bdev_get_queue(bdev);
245 if (!q)
246 return -ENXIO;
248 if (bdev_read_only(bdev))
249 return -EPERM;
251 /* Ensure that max_write_zeroes_sectors doesn't overflow bi_size */
252 max_write_zeroes_sectors = bdev_write_zeroes_sectors(bdev);
254 if (max_write_zeroes_sectors == 0)
255 return -EOPNOTSUPP;
257 while (nr_sects) {
258 bio = next_bio(bio, 0, gfp_mask);
259 bio->bi_iter.bi_sector = sector;
260 bio_set_dev(bio, bdev);
261 bio->bi_opf = REQ_OP_WRITE_ZEROES;
262 if (flags & BLKDEV_ZERO_NOUNMAP)
263 bio->bi_opf |= REQ_NOUNMAP;
265 if (nr_sects > max_write_zeroes_sectors) {
266 bio->bi_iter.bi_size = max_write_zeroes_sectors << 9;
267 nr_sects -= max_write_zeroes_sectors;
268 sector += max_write_zeroes_sectors;
269 } else {
270 bio->bi_iter.bi_size = nr_sects << 9;
271 nr_sects = 0;
273 cond_resched();
276 *biop = bio;
277 return 0;
281 * Convert a number of 512B sectors to a number of pages.
282 * The result is limited to a number of pages that can fit into a BIO.
283 * Also make sure that the result is always at least 1 (page) for the cases
284 * where nr_sects is lower than the number of sectors in a page.
286 static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects)
288 sector_t pages = DIV_ROUND_UP_SECTOR_T(nr_sects, PAGE_SIZE / 512);
290 return min(pages, (sector_t)BIO_MAX_PAGES);
293 static int __blkdev_issue_zero_pages(struct block_device *bdev,
294 sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
295 struct bio **biop)
297 struct request_queue *q = bdev_get_queue(bdev);
298 struct bio *bio = *biop;
299 int bi_size = 0;
300 unsigned int sz;
302 if (!q)
303 return -ENXIO;
305 if (bdev_read_only(bdev))
306 return -EPERM;
308 while (nr_sects != 0) {
309 bio = next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects),
310 gfp_mask);
311 bio->bi_iter.bi_sector = sector;
312 bio_set_dev(bio, bdev);
313 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
315 while (nr_sects != 0) {
316 sz = min((sector_t) PAGE_SIZE, nr_sects << 9);
317 bi_size = bio_add_page(bio, ZERO_PAGE(0), sz, 0);
318 nr_sects -= bi_size >> 9;
319 sector += bi_size >> 9;
320 if (bi_size < sz)
321 break;
323 cond_resched();
326 *biop = bio;
327 return 0;
331 * __blkdev_issue_zeroout - generate number of zero filed write bios
332 * @bdev: blockdev to issue
333 * @sector: start sector
334 * @nr_sects: number of sectors to write
335 * @gfp_mask: memory allocation flags (for bio_alloc)
336 * @biop: pointer to anchor bio
337 * @flags: controls detailed behavior
339 * Description:
340 * Zero-fill a block range, either using hardware offload or by explicitly
341 * writing zeroes to the device.
343 * If a device is using logical block provisioning, the underlying space will
344 * not be released if %flags contains BLKDEV_ZERO_NOUNMAP.
346 * If %flags contains BLKDEV_ZERO_NOFALLBACK, the function will return
347 * -EOPNOTSUPP if no explicit hardware offload for zeroing is provided.
349 int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
350 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
351 unsigned flags)
353 int ret;
354 sector_t bs_mask;
356 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
357 if ((sector | nr_sects) & bs_mask)
358 return -EINVAL;
360 ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask,
361 biop, flags);
362 if (ret != -EOPNOTSUPP || (flags & BLKDEV_ZERO_NOFALLBACK))
363 return ret;
365 return __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask,
366 biop);
368 EXPORT_SYMBOL(__blkdev_issue_zeroout);
371 * blkdev_issue_zeroout - zero-fill a block range
372 * @bdev: blockdev to write
373 * @sector: start sector
374 * @nr_sects: number of sectors to write
375 * @gfp_mask: memory allocation flags (for bio_alloc)
376 * @flags: controls detailed behavior
378 * Description:
379 * Zero-fill a block range, either using hardware offload or by explicitly
380 * writing zeroes to the device. See __blkdev_issue_zeroout() for the
381 * valid values for %flags.
383 int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
384 sector_t nr_sects, gfp_t gfp_mask, unsigned flags)
386 int ret = 0;
387 sector_t bs_mask;
388 struct bio *bio;
389 struct blk_plug plug;
390 bool try_write_zeroes = !!bdev_write_zeroes_sectors(bdev);
392 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
393 if ((sector | nr_sects) & bs_mask)
394 return -EINVAL;
396 retry:
397 bio = NULL;
398 blk_start_plug(&plug);
399 if (try_write_zeroes) {
400 ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects,
401 gfp_mask, &bio, flags);
402 } else if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
403 ret = __blkdev_issue_zero_pages(bdev, sector, nr_sects,
404 gfp_mask, &bio);
405 } else {
406 /* No zeroing offload support */
407 ret = -EOPNOTSUPP;
409 if (ret == 0 && bio) {
410 ret = submit_bio_wait(bio);
411 bio_put(bio);
413 blk_finish_plug(&plug);
414 if (ret && try_write_zeroes) {
415 if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
416 try_write_zeroes = false;
417 goto retry;
419 if (!bdev_write_zeroes_sectors(bdev)) {
421 * Zeroing offload support was indicated, but the
422 * device reported ILLEGAL REQUEST (for some devices
423 * there is no non-destructive way to verify whether
424 * WRITE ZEROES is actually supported).
426 ret = -EOPNOTSUPP;
430 return ret;
432 EXPORT_SYMBOL(blkdev_issue_zeroout);