drm/i915: Convert intel_overlay to request tracking
[linux/fpc-iii.git] / block / blk-lib.c
blob23d7f301a1967483ec79a383a1a317881caf3358
1 /*
2 * Functions related to generic helpers functions
3 */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/bio.h>
7 #include <linux/blkdev.h>
8 #include <linux/scatterlist.h>
10 #include "blk.h"
12 static struct bio *next_bio(struct bio *bio, int rw, unsigned int nr_pages,
13 gfp_t gfp)
15 struct bio *new = bio_alloc(gfp, nr_pages);
17 if (bio) {
18 bio_chain(bio, new);
19 submit_bio(rw, bio);
22 return new;
25 int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
26 sector_t nr_sects, gfp_t gfp_mask, int type, struct bio **biop)
28 struct request_queue *q = bdev_get_queue(bdev);
29 struct bio *bio = *biop;
30 unsigned int granularity;
31 int alignment;
33 if (!q)
34 return -ENXIO;
35 if (!blk_queue_discard(q))
36 return -EOPNOTSUPP;
37 if ((type & REQ_SECURE) && !blk_queue_secdiscard(q))
38 return -EOPNOTSUPP;
40 /* Zero-sector (unknown) and one-sector granularities are the same. */
41 granularity = max(q->limits.discard_granularity >> 9, 1U);
42 alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
44 while (nr_sects) {
45 unsigned int req_sects;
46 sector_t end_sect, tmp;
48 /* Make sure bi_size doesn't overflow */
49 req_sects = min_t(sector_t, nr_sects, UINT_MAX >> 9);
51 /**
52 * If splitting a request, and the next starting sector would be
53 * misaligned, stop the discard at the previous aligned sector.
55 end_sect = sector + req_sects;
56 tmp = end_sect;
57 if (req_sects < nr_sects &&
58 sector_div(tmp, granularity) != alignment) {
59 end_sect = end_sect - alignment;
60 sector_div(end_sect, granularity);
61 end_sect = end_sect * granularity + alignment;
62 req_sects = end_sect - sector;
65 bio = next_bio(bio, type, 1, gfp_mask);
66 bio->bi_iter.bi_sector = sector;
67 bio->bi_bdev = bdev;
69 bio->bi_iter.bi_size = req_sects << 9;
70 nr_sects -= req_sects;
71 sector = end_sect;
74 * We can loop for a long time in here, if someone does
75 * full device discards (like mkfs). Be nice and allow
76 * us to schedule out to avoid softlocking if preempt
77 * is disabled.
79 cond_resched();
82 *biop = bio;
83 return 0;
85 EXPORT_SYMBOL(__blkdev_issue_discard);
87 /**
88 * blkdev_issue_discard - queue a discard
89 * @bdev: blockdev to issue discard for
90 * @sector: start sector
91 * @nr_sects: number of sectors to discard
92 * @gfp_mask: memory allocation flags (for bio_alloc)
93 * @flags: BLKDEV_IFL_* flags to control behaviour
95 * Description:
96 * Issue a discard request for the sectors in question.
98 int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
99 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
101 int type = REQ_WRITE | REQ_DISCARD;
102 struct bio *bio = NULL;
103 struct blk_plug plug;
104 int ret;
106 if (flags & BLKDEV_DISCARD_SECURE)
107 type |= REQ_SECURE;
109 blk_start_plug(&plug);
110 ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, type,
111 &bio);
112 if (!ret && bio) {
113 ret = submit_bio_wait(type, bio);
114 if (ret == -EOPNOTSUPP)
115 ret = 0;
117 blk_finish_plug(&plug);
119 return ret;
121 EXPORT_SYMBOL(blkdev_issue_discard);
124 * blkdev_issue_write_same - queue a write same operation
125 * @bdev: target blockdev
126 * @sector: start sector
127 * @nr_sects: number of sectors to write
128 * @gfp_mask: memory allocation flags (for bio_alloc)
129 * @page: page containing data to write
131 * Description:
132 * Issue a write same request for the sectors in question.
134 int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
135 sector_t nr_sects, gfp_t gfp_mask,
136 struct page *page)
138 struct request_queue *q = bdev_get_queue(bdev);
139 unsigned int max_write_same_sectors;
140 struct bio *bio = NULL;
141 int ret = 0;
143 if (!q)
144 return -ENXIO;
146 /* Ensure that max_write_same_sectors doesn't overflow bi_size */
147 max_write_same_sectors = UINT_MAX >> 9;
149 while (nr_sects) {
150 bio = next_bio(bio, REQ_WRITE | REQ_WRITE_SAME, 1, gfp_mask);
151 bio->bi_iter.bi_sector = sector;
152 bio->bi_bdev = bdev;
153 bio->bi_vcnt = 1;
154 bio->bi_io_vec->bv_page = page;
155 bio->bi_io_vec->bv_offset = 0;
156 bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
158 if (nr_sects > max_write_same_sectors) {
159 bio->bi_iter.bi_size = max_write_same_sectors << 9;
160 nr_sects -= max_write_same_sectors;
161 sector += max_write_same_sectors;
162 } else {
163 bio->bi_iter.bi_size = nr_sects << 9;
164 nr_sects = 0;
168 if (bio)
169 ret = submit_bio_wait(REQ_WRITE | REQ_WRITE_SAME, bio);
170 return ret != -EOPNOTSUPP ? ret : 0;
172 EXPORT_SYMBOL(blkdev_issue_write_same);
175 * blkdev_issue_zeroout - generate number of zero filed write bios
176 * @bdev: blockdev to issue
177 * @sector: start sector
178 * @nr_sects: number of sectors to write
179 * @gfp_mask: memory allocation flags (for bio_alloc)
181 * Description:
182 * Generate and issue number of bios with zerofiled pages.
185 static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
186 sector_t nr_sects, gfp_t gfp_mask)
188 int ret;
189 struct bio *bio = NULL;
190 unsigned int sz;
192 while (nr_sects != 0) {
193 bio = next_bio(bio, WRITE,
194 min(nr_sects, (sector_t)BIO_MAX_PAGES),
195 gfp_mask);
196 bio->bi_iter.bi_sector = sector;
197 bio->bi_bdev = bdev;
199 while (nr_sects != 0) {
200 sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
201 ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0);
202 nr_sects -= ret >> 9;
203 sector += ret >> 9;
204 if (ret < (sz << 9))
205 break;
209 if (bio)
210 return submit_bio_wait(WRITE, bio);
211 return 0;
215 * blkdev_issue_zeroout - zero-fill a block range
216 * @bdev: blockdev to write
217 * @sector: start sector
218 * @nr_sects: number of sectors to write
219 * @gfp_mask: memory allocation flags (for bio_alloc)
220 * @discard: whether to discard the block range
222 * Description:
223 * Zero-fill a block range. If the discard flag is set and the block
224 * device guarantees that subsequent READ operations to the block range
225 * in question will return zeroes, the blocks will be discarded. Should
226 * the discard request fail, if the discard flag is not set, or if
227 * discard_zeroes_data is not supported, this function will resort to
228 * zeroing the blocks manually, thus provisioning (allocating,
229 * anchoring) them. If the block device supports the WRITE SAME command
230 * blkdev_issue_zeroout() will use it to optimize the process of
231 * clearing the block range. Otherwise the zeroing will be performed
232 * using regular WRITE calls.
235 int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
236 sector_t nr_sects, gfp_t gfp_mask, bool discard)
238 struct request_queue *q = bdev_get_queue(bdev);
240 if (discard && blk_queue_discard(q) && q->limits.discard_zeroes_data &&
241 blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, 0) == 0)
242 return 0;
244 if (bdev_write_same(bdev) &&
245 blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask,
246 ZERO_PAGE(0)) == 0)
247 return 0;
249 return __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask);
251 EXPORT_SYMBOL(blkdev_issue_zeroout);