2 * Zoned block device handling
4 * Copyright (c) 2015, Hannes Reinecke
5 * Copyright (c) 2015, SUSE Linux GmbH
7 * Copyright (c) 2016, Damien Le Moal
8 * Copyright (c) 2016, Western Digital
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/rbtree.h>
14 #include <linux/blkdev.h>
16 static inline sector_t
blk_zone_start(struct request_queue
*q
,
19 sector_t zone_mask
= blk_queue_zone_size(q
) - 1;
21 return sector
& ~zone_mask
;
25 * Check that a zone report belongs to the partition.
26 * If yes, fix its start sector and write pointer, copy it in the
27 * zone information array and return true. Return false otherwise.
29 static bool blkdev_report_zone(struct block_device
*bdev
,
31 struct blk_zone
*zone
)
33 sector_t offset
= get_start_sect(bdev
);
35 if (rep
->start
< offset
)
39 if (rep
->start
+ rep
->len
> bdev
->bd_part
->nr_sects
)
42 if (rep
->type
== BLK_ZONE_TYPE_CONVENTIONAL
)
43 rep
->wp
= rep
->start
+ rep
->len
;
46 memcpy(zone
, rep
, sizeof(struct blk_zone
));
52 * blkdev_report_zones - Get zones information
53 * @bdev: Target block device
54 * @sector: Sector from which to report zones
55 * @zones: Array of zone structures where to return the zones information
56 * @nr_zones: Number of zone structures in the zone array
57 * @gfp_mask: Memory allocation flags (for bio_alloc)
60 * Get zone information starting from the zone containing @sector.
61 * The number of zone information reported may be less than the number
62 * requested by @nr_zones. The number of zones actually reported is
63 * returned in @nr_zones.
65 int blkdev_report_zones(struct block_device
*bdev
,
67 struct blk_zone
*zones
,
68 unsigned int *nr_zones
,
71 struct request_queue
*q
= bdev_get_queue(bdev
);
72 struct blk_zone_report_hdr
*hdr
;
73 unsigned int nrz
= *nr_zones
;
77 unsigned int nr_pages
;
80 unsigned int i
, n
, nz
;
88 if (!blk_queue_is_zoned(q
))
94 if (sector
> bdev
->bd_part
->nr_sects
) {
100 * The zone report has a header. So make room for it in the
101 * payload. Also make sure that the report fits in a single BIO
102 * that will not be split down the stack.
104 rep_bytes
= sizeof(struct blk_zone_report_hdr
) +
105 sizeof(struct blk_zone
) * nrz
;
106 rep_bytes
= (rep_bytes
+ PAGE_SIZE
- 1) & PAGE_MASK
;
107 if (rep_bytes
> (queue_max_sectors(q
) << 9))
108 rep_bytes
= queue_max_sectors(q
) << 9;
110 nr_pages
= min_t(unsigned int, BIO_MAX_PAGES
,
111 rep_bytes
>> PAGE_SHIFT
);
112 nr_pages
= min_t(unsigned int, nr_pages
,
113 queue_max_segments(q
));
115 bio
= bio_alloc(gfp_mask
, nr_pages
);
120 bio
->bi_iter
.bi_sector
= blk_zone_start(q
, sector
);
121 bio_set_op_attrs(bio
, REQ_OP_ZONE_REPORT
, 0);
123 for (i
= 0; i
< nr_pages
; i
++) {
124 page
= alloc_page(gfp_mask
);
129 if (!bio_add_page(bio
, page
, PAGE_SIZE
, 0)) {
138 ret
= submit_bio_wait(bio
);
143 * Process the report result: skip the header and go through the
144 * reported zones to fixup and fixup the zone information for
145 * partitions. At the same time, return the zone information into
151 bio_for_each_segment_all(bv
, bio
, i
) {
156 addr
= kmap_atomic(bv
->bv_page
);
158 /* Get header in the first page */
161 hdr
= (struct blk_zone_report_hdr
*) addr
;
162 nr_rep
= hdr
->nr_zones
;
163 ofst
= sizeof(struct blk_zone_report_hdr
);
166 /* Fixup and report zones */
167 while (ofst
< bv
->bv_len
&&
168 n
< nr_rep
&& nz
< nrz
) {
169 if (blkdev_report_zone(bdev
, addr
+ ofst
, &zones
[nz
]))
171 ofst
+= sizeof(struct blk_zone
);
177 if (n
>= nr_rep
|| nz
>= nrz
)
184 bio_for_each_segment_all(bv
, bio
, i
)
185 __free_page(bv
->bv_page
);
190 EXPORT_SYMBOL_GPL(blkdev_report_zones
);
193 * blkdev_reset_zones - Reset zones write pointer
194 * @bdev: Target block device
195 * @sector: Start sector of the first zone to reset
196 * @nr_sectors: Number of sectors, at least the length of one zone
197 * @gfp_mask: Memory allocation flags (for bio_alloc)
200 * Reset the write pointer of the zones contained in the range
201 * @sector..@sector+@nr_sectors. Specifying the entire disk sector range
202 * is valid, but the specified range should not contain conventional zones.
204 int blkdev_reset_zones(struct block_device
*bdev
,
205 sector_t sector
, sector_t nr_sectors
,
208 struct request_queue
*q
= bdev_get_queue(bdev
);
209 sector_t zone_sectors
;
210 sector_t end_sector
= sector
+ nr_sectors
;
217 if (!blk_queue_is_zoned(q
))
220 if (end_sector
> bdev
->bd_part
->nr_sects
)
224 /* Check alignment (handle eventual smaller last zone) */
225 zone_sectors
= blk_queue_zone_size(q
);
226 if (sector
& (zone_sectors
- 1))
229 if ((nr_sectors
& (zone_sectors
- 1)) &&
230 end_sector
!= bdev
->bd_part
->nr_sects
)
233 while (sector
< end_sector
) {
235 bio
= bio_alloc(gfp_mask
, 0);
236 bio
->bi_iter
.bi_sector
= sector
;
238 bio_set_op_attrs(bio
, REQ_OP_ZONE_RESET
, 0);
240 ret
= submit_bio_wait(bio
);
246 sector
+= zone_sectors
;
248 /* This may take a while, so be nice to others */
255 EXPORT_SYMBOL_GPL(blkdev_reset_zones
);
258 * BLKREPORTZONE ioctl processing.
259 * Called from blkdev_ioctl.
261 int blkdev_report_zones_ioctl(struct block_device
*bdev
, fmode_t mode
,
262 unsigned int cmd
, unsigned long arg
)
264 void __user
*argp
= (void __user
*)arg
;
265 struct request_queue
*q
;
266 struct blk_zone_report rep
;
267 struct blk_zone
*zones
;
273 q
= bdev_get_queue(bdev
);
277 if (!blk_queue_is_zoned(q
))
280 if (!capable(CAP_SYS_ADMIN
))
283 if (copy_from_user(&rep
, argp
, sizeof(struct blk_zone_report
)))
289 zones
= kcalloc(rep
.nr_zones
, sizeof(struct blk_zone
), GFP_KERNEL
);
293 ret
= blkdev_report_zones(bdev
, rep
.sector
,
294 zones
, &rep
.nr_zones
,
299 if (copy_to_user(argp
, &rep
, sizeof(struct blk_zone_report
))) {
305 if (copy_to_user(argp
+ sizeof(struct blk_zone_report
), zones
,
306 sizeof(struct blk_zone
) * rep
.nr_zones
))
317 * BLKRESETZONE ioctl processing.
318 * Called from blkdev_ioctl.
320 int blkdev_reset_zones_ioctl(struct block_device
*bdev
, fmode_t mode
,
321 unsigned int cmd
, unsigned long arg
)
323 void __user
*argp
= (void __user
*)arg
;
324 struct request_queue
*q
;
325 struct blk_zone_range zrange
;
330 q
= bdev_get_queue(bdev
);
334 if (!blk_queue_is_zoned(q
))
337 if (!capable(CAP_SYS_ADMIN
))
340 if (!(mode
& FMODE_WRITE
))
343 if (copy_from_user(&zrange
, argp
, sizeof(struct blk_zone_range
)))
346 return blkdev_reset_zones(bdev
, zrange
.sector
, zrange
.nr_sectors
,