mtd: rawnand: atmel: fix OF child-node lookup
[linux/fpc-iii.git] / block / blk-lib.c
blob1f196cf0aa5de34715cefde262b9dd1dc53e5751
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Functions related to generic helpers functions
4 */
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/bio.h>
8 #include <linux/blkdev.h>
9 #include <linux/scatterlist.h>
11 #include "blk.h"
13 static struct bio *next_bio(struct bio *bio, unsigned int nr_pages,
14 gfp_t gfp)
16 struct bio *new = bio_alloc(gfp, nr_pages);
18 if (bio) {
19 bio_chain(bio, new);
20 submit_bio(bio);
23 return new;
26 int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
27 sector_t nr_sects, gfp_t gfp_mask, int flags,
28 struct bio **biop)
30 struct request_queue *q = bdev_get_queue(bdev);
31 struct bio *bio = *biop;
32 unsigned int op;
33 sector_t bs_mask;
35 if (!q)
36 return -ENXIO;
38 if (bdev_read_only(bdev))
39 return -EPERM;
41 if (flags & BLKDEV_DISCARD_SECURE) {
42 if (!blk_queue_secure_erase(q))
43 return -EOPNOTSUPP;
44 op = REQ_OP_SECURE_ERASE;
45 } else {
46 if (!blk_queue_discard(q))
47 return -EOPNOTSUPP;
48 op = REQ_OP_DISCARD;
51 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
52 if ((sector | nr_sects) & bs_mask)
53 return -EINVAL;
55 while (nr_sects) {
56 unsigned int req_sects = nr_sects;
57 sector_t end_sect;
59 if (!req_sects)
60 goto fail;
61 req_sects = min(req_sects, bio_allowed_max_sectors(q));
63 end_sect = sector + req_sects;
65 bio = next_bio(bio, 0, gfp_mask);
66 bio->bi_iter.bi_sector = sector;
67 bio_set_dev(bio, bdev);
68 bio_set_op_attrs(bio, op, 0);
70 bio->bi_iter.bi_size = req_sects << 9;
71 nr_sects -= req_sects;
72 sector = end_sect;
75 * We can loop for a long time in here, if someone does
76 * full device discards (like mkfs). Be nice and allow
77 * us to schedule out to avoid softlocking if preempt
78 * is disabled.
80 cond_resched();
83 *biop = bio;
84 return 0;
86 fail:
87 if (bio) {
88 submit_bio_wait(bio);
89 bio_put(bio);
91 *biop = NULL;
92 return -EOPNOTSUPP;
94 EXPORT_SYMBOL(__blkdev_issue_discard);
96 /**
97 * blkdev_issue_discard - queue a discard
98 * @bdev: blockdev to issue discard for
99 * @sector: start sector
100 * @nr_sects: number of sectors to discard
101 * @gfp_mask: memory allocation flags (for bio_alloc)
102 * @flags: BLKDEV_DISCARD_* flags to control behaviour
104 * Description:
105 * Issue a discard request for the sectors in question.
107 int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
108 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
110 struct bio *bio = NULL;
111 struct blk_plug plug;
112 int ret;
114 blk_start_plug(&plug);
115 ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags,
116 &bio);
117 if (!ret && bio) {
118 ret = submit_bio_wait(bio);
119 if (ret == -EOPNOTSUPP)
120 ret = 0;
121 bio_put(bio);
123 blk_finish_plug(&plug);
125 return ret;
127 EXPORT_SYMBOL(blkdev_issue_discard);
130 * __blkdev_issue_write_same - generate number of bios with same page
131 * @bdev: target blockdev
132 * @sector: start sector
133 * @nr_sects: number of sectors to write
134 * @gfp_mask: memory allocation flags (for bio_alloc)
135 * @page: page containing data to write
136 * @biop: pointer to anchor bio
138 * Description:
139 * Generate and issue number of bios(REQ_OP_WRITE_SAME) with same page.
141 static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
142 sector_t nr_sects, gfp_t gfp_mask, struct page *page,
143 struct bio **biop)
145 struct request_queue *q = bdev_get_queue(bdev);
146 unsigned int max_write_same_sectors;
147 struct bio *bio = *biop;
148 sector_t bs_mask;
150 if (!q)
151 return -ENXIO;
153 if (bdev_read_only(bdev))
154 return -EPERM;
156 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
157 if ((sector | nr_sects) & bs_mask)
158 return -EINVAL;
160 if (!bdev_write_same(bdev))
161 return -EOPNOTSUPP;
163 /* Ensure that max_write_same_sectors doesn't overflow bi_size */
164 max_write_same_sectors = bio_allowed_max_sectors(q);
166 while (nr_sects) {
167 bio = next_bio(bio, 1, gfp_mask);
168 bio->bi_iter.bi_sector = sector;
169 bio_set_dev(bio, bdev);
170 bio->bi_vcnt = 1;
171 bio->bi_io_vec->bv_page = page;
172 bio->bi_io_vec->bv_offset = 0;
173 bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
174 bio_set_op_attrs(bio, REQ_OP_WRITE_SAME, 0);
176 if (nr_sects > max_write_same_sectors) {
177 bio->bi_iter.bi_size = max_write_same_sectors << 9;
178 nr_sects -= max_write_same_sectors;
179 sector += max_write_same_sectors;
180 } else {
181 bio->bi_iter.bi_size = nr_sects << 9;
182 nr_sects = 0;
184 cond_resched();
187 *biop = bio;
188 return 0;
192 * blkdev_issue_write_same - queue a write same operation
193 * @bdev: target blockdev
194 * @sector: start sector
195 * @nr_sects: number of sectors to write
196 * @gfp_mask: memory allocation flags (for bio_alloc)
197 * @page: page containing data
199 * Description:
200 * Issue a write same request for the sectors in question.
202 int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
203 sector_t nr_sects, gfp_t gfp_mask,
204 struct page *page)
206 struct bio *bio = NULL;
207 struct blk_plug plug;
208 int ret;
210 blk_start_plug(&plug);
211 ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, page,
212 &bio);
213 if (ret == 0 && bio) {
214 ret = submit_bio_wait(bio);
215 bio_put(bio);
217 blk_finish_plug(&plug);
218 return ret;
220 EXPORT_SYMBOL(blkdev_issue_write_same);
222 static int __blkdev_issue_write_zeroes(struct block_device *bdev,
223 sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
224 struct bio **biop, unsigned flags)
226 struct bio *bio = *biop;
227 unsigned int max_write_zeroes_sectors;
228 struct request_queue *q = bdev_get_queue(bdev);
230 if (!q)
231 return -ENXIO;
233 if (bdev_read_only(bdev))
234 return -EPERM;
236 /* Ensure that max_write_zeroes_sectors doesn't overflow bi_size */
237 max_write_zeroes_sectors = bdev_write_zeroes_sectors(bdev);
239 if (max_write_zeroes_sectors == 0)
240 return -EOPNOTSUPP;
242 while (nr_sects) {
243 bio = next_bio(bio, 0, gfp_mask);
244 bio->bi_iter.bi_sector = sector;
245 bio_set_dev(bio, bdev);
246 bio->bi_opf = REQ_OP_WRITE_ZEROES;
247 if (flags & BLKDEV_ZERO_NOUNMAP)
248 bio->bi_opf |= REQ_NOUNMAP;
250 if (nr_sects > max_write_zeroes_sectors) {
251 bio->bi_iter.bi_size = max_write_zeroes_sectors << 9;
252 nr_sects -= max_write_zeroes_sectors;
253 sector += max_write_zeroes_sectors;
254 } else {
255 bio->bi_iter.bi_size = nr_sects << 9;
256 nr_sects = 0;
258 cond_resched();
261 *biop = bio;
262 return 0;
266 * Convert a number of 512B sectors to a number of pages.
267 * The result is limited to a number of pages that can fit into a BIO.
268 * Also make sure that the result is always at least 1 (page) for the cases
269 * where nr_sects is lower than the number of sectors in a page.
271 static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects)
273 sector_t pages = DIV_ROUND_UP_SECTOR_T(nr_sects, PAGE_SIZE / 512);
275 return min(pages, (sector_t)BIO_MAX_PAGES);
278 static int __blkdev_issue_zero_pages(struct block_device *bdev,
279 sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
280 struct bio **biop)
282 struct request_queue *q = bdev_get_queue(bdev);
283 struct bio *bio = *biop;
284 int bi_size = 0;
285 unsigned int sz;
287 if (!q)
288 return -ENXIO;
290 if (bdev_read_only(bdev))
291 return -EPERM;
293 while (nr_sects != 0) {
294 bio = next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects),
295 gfp_mask);
296 bio->bi_iter.bi_sector = sector;
297 bio_set_dev(bio, bdev);
298 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
300 while (nr_sects != 0) {
301 sz = min((sector_t) PAGE_SIZE, nr_sects << 9);
302 bi_size = bio_add_page(bio, ZERO_PAGE(0), sz, 0);
303 nr_sects -= bi_size >> 9;
304 sector += bi_size >> 9;
305 if (bi_size < sz)
306 break;
308 cond_resched();
311 *biop = bio;
312 return 0;
316 * __blkdev_issue_zeroout - generate number of zero filed write bios
317 * @bdev: blockdev to issue
318 * @sector: start sector
319 * @nr_sects: number of sectors to write
320 * @gfp_mask: memory allocation flags (for bio_alloc)
321 * @biop: pointer to anchor bio
322 * @flags: controls detailed behavior
324 * Description:
325 * Zero-fill a block range, either using hardware offload or by explicitly
326 * writing zeroes to the device.
328 * If a device is using logical block provisioning, the underlying space will
329 * not be released if %flags contains BLKDEV_ZERO_NOUNMAP.
331 * If %flags contains BLKDEV_ZERO_NOFALLBACK, the function will return
332 * -EOPNOTSUPP if no explicit hardware offload for zeroing is provided.
334 int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
335 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
336 unsigned flags)
338 int ret;
339 sector_t bs_mask;
341 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
342 if ((sector | nr_sects) & bs_mask)
343 return -EINVAL;
345 ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask,
346 biop, flags);
347 if (ret != -EOPNOTSUPP || (flags & BLKDEV_ZERO_NOFALLBACK))
348 return ret;
350 return __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask,
351 biop);
353 EXPORT_SYMBOL(__blkdev_issue_zeroout);
356 * blkdev_issue_zeroout - zero-fill a block range
357 * @bdev: blockdev to write
358 * @sector: start sector
359 * @nr_sects: number of sectors to write
360 * @gfp_mask: memory allocation flags (for bio_alloc)
361 * @flags: controls detailed behavior
363 * Description:
364 * Zero-fill a block range, either using hardware offload or by explicitly
365 * writing zeroes to the device. See __blkdev_issue_zeroout() for the
366 * valid values for %flags.
368 int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
369 sector_t nr_sects, gfp_t gfp_mask, unsigned flags)
371 int ret = 0;
372 sector_t bs_mask;
373 struct bio *bio;
374 struct blk_plug plug;
375 bool try_write_zeroes = !!bdev_write_zeroes_sectors(bdev);
377 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
378 if ((sector | nr_sects) & bs_mask)
379 return -EINVAL;
381 retry:
382 bio = NULL;
383 blk_start_plug(&plug);
384 if (try_write_zeroes) {
385 ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects,
386 gfp_mask, &bio, flags);
387 } else if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
388 ret = __blkdev_issue_zero_pages(bdev, sector, nr_sects,
389 gfp_mask, &bio);
390 } else {
391 /* No zeroing offload support */
392 ret = -EOPNOTSUPP;
394 if (ret == 0 && bio) {
395 ret = submit_bio_wait(bio);
396 bio_put(bio);
398 blk_finish_plug(&plug);
399 if (ret && try_write_zeroes) {
400 if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
401 try_write_zeroes = false;
402 goto retry;
404 if (!bdev_write_zeroes_sectors(bdev)) {
406 * Zeroing offload support was indicated, but the
407 * device reported ILLEGAL REQUEST (for some devices
408 * there is no non-destructive way to verify whether
409 * WRITE ZEROES is actually supported).
411 ret = -EOPNOTSUPP;
415 return ret;
417 EXPORT_SYMBOL(blkdev_issue_zeroout);