Merge tag 'regmap-fix-v5.11-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux/fpc-iii.git] / fs / btrfs / zoned.c
blobc3884665901912a72e76c432ba14733129f03e73
1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/slab.h>
4 #include <linux/blkdev.h>
5 #include "ctree.h"
6 #include "volumes.h"
7 #include "zoned.h"
8 #include "rcu-string.h"
10 /* Maximum number of zones to report per blkdev_report_zones() call */
11 #define BTRFS_REPORT_NR_ZONES 4096
13 /* Number of superblock log zones */
14 #define BTRFS_NR_SB_LOG_ZONES 2
16 static int copy_zone_info_cb(struct blk_zone *zone, unsigned int idx, void *data)
18 struct blk_zone *zones = data;
20 memcpy(&zones[idx], zone, sizeof(*zone));
22 return 0;
25 static int sb_write_pointer(struct block_device *bdev, struct blk_zone *zones,
26 u64 *wp_ret)
28 bool empty[BTRFS_NR_SB_LOG_ZONES];
29 bool full[BTRFS_NR_SB_LOG_ZONES];
30 sector_t sector;
32 ASSERT(zones[0].type != BLK_ZONE_TYPE_CONVENTIONAL &&
33 zones[1].type != BLK_ZONE_TYPE_CONVENTIONAL);
35 empty[0] = (zones[0].cond == BLK_ZONE_COND_EMPTY);
36 empty[1] = (zones[1].cond == BLK_ZONE_COND_EMPTY);
37 full[0] = (zones[0].cond == BLK_ZONE_COND_FULL);
38 full[1] = (zones[1].cond == BLK_ZONE_COND_FULL);
41 * Possible states of log buffer zones
43 * Empty[0] In use[0] Full[0]
44 * Empty[1] * x 0
45 * In use[1] 0 x 0
46 * Full[1] 1 1 C
48 * Log position:
49 * *: Special case, no superblock is written
50 * 0: Use write pointer of zones[0]
51 * 1: Use write pointer of zones[1]
52 * C: Compare super blcoks from zones[0] and zones[1], use the latest
53 * one determined by generation
54 * x: Invalid state
57 if (empty[0] && empty[1]) {
58 /* Special case to distinguish no superblock to read */
59 *wp_ret = zones[0].start << SECTOR_SHIFT;
60 return -ENOENT;
61 } else if (full[0] && full[1]) {
62 /* Compare two super blocks */
63 struct address_space *mapping = bdev->bd_inode->i_mapping;
64 struct page *page[BTRFS_NR_SB_LOG_ZONES];
65 struct btrfs_super_block *super[BTRFS_NR_SB_LOG_ZONES];
66 int i;
68 for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) {
69 u64 bytenr;
71 bytenr = ((zones[i].start + zones[i].len)
72 << SECTOR_SHIFT) - BTRFS_SUPER_INFO_SIZE;
74 page[i] = read_cache_page_gfp(mapping,
75 bytenr >> PAGE_SHIFT, GFP_NOFS);
76 if (IS_ERR(page[i])) {
77 if (i == 1)
78 btrfs_release_disk_super(super[0]);
79 return PTR_ERR(page[i]);
81 super[i] = page_address(page[i]);
84 if (super[0]->generation > super[1]->generation)
85 sector = zones[1].start;
86 else
87 sector = zones[0].start;
89 for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++)
90 btrfs_release_disk_super(super[i]);
91 } else if (!full[0] && (empty[1] || full[1])) {
92 sector = zones[0].wp;
93 } else if (full[0]) {
94 sector = zones[1].wp;
95 } else {
96 return -EUCLEAN;
98 *wp_ret = sector << SECTOR_SHIFT;
99 return 0;
103 * The following zones are reserved as the circular buffer on ZONED btrfs.
104 * - The primary superblock: zones 0 and 1
105 * - The first copy: zones 16 and 17
106 * - The second copy: zones 1024 or zone at 256GB which is minimum, and
107 * the following one
109 static inline u32 sb_zone_number(int shift, int mirror)
111 ASSERT(mirror < BTRFS_SUPER_MIRROR_MAX);
113 switch (mirror) {
114 case 0: return 0;
115 case 1: return 16;
116 case 2: return min_t(u64, btrfs_sb_offset(mirror) >> shift, 1024);
119 return 0;
122 static int btrfs_get_dev_zones(struct btrfs_device *device, u64 pos,
123 struct blk_zone *zones, unsigned int *nr_zones)
125 int ret;
127 if (!*nr_zones)
128 return 0;
130 ret = blkdev_report_zones(device->bdev, pos >> SECTOR_SHIFT, *nr_zones,
131 copy_zone_info_cb, zones);
132 if (ret < 0) {
133 btrfs_err_in_rcu(device->fs_info,
134 "zoned: failed to read zone %llu on %s (devid %llu)",
135 pos, rcu_str_deref(device->name),
136 device->devid);
137 return ret;
139 *nr_zones = ret;
140 if (!ret)
141 return -EIO;
143 return 0;
146 int btrfs_get_dev_zone_info(struct btrfs_device *device)
148 struct btrfs_zoned_device_info *zone_info = NULL;
149 struct block_device *bdev = device->bdev;
150 struct request_queue *queue = bdev_get_queue(bdev);
151 sector_t nr_sectors;
152 sector_t sector = 0;
153 struct blk_zone *zones = NULL;
154 unsigned int i, nreported = 0, nr_zones;
155 unsigned int zone_sectors;
156 int ret;
158 if (!bdev_is_zoned(bdev))
159 return 0;
161 if (device->zone_info)
162 return 0;
164 zone_info = kzalloc(sizeof(*zone_info), GFP_KERNEL);
165 if (!zone_info)
166 return -ENOMEM;
168 nr_sectors = bdev_nr_sectors(bdev);
169 zone_sectors = bdev_zone_sectors(bdev);
170 /* Check if it's power of 2 (see is_power_of_2) */
171 ASSERT(zone_sectors != 0 && (zone_sectors & (zone_sectors - 1)) == 0);
172 zone_info->zone_size = zone_sectors << SECTOR_SHIFT;
173 zone_info->zone_size_shift = ilog2(zone_info->zone_size);
174 zone_info->max_zone_append_size =
175 (u64)queue_max_zone_append_sectors(queue) << SECTOR_SHIFT;
176 zone_info->nr_zones = nr_sectors >> ilog2(zone_sectors);
177 if (!IS_ALIGNED(nr_sectors, zone_sectors))
178 zone_info->nr_zones++;
180 zone_info->seq_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
181 if (!zone_info->seq_zones) {
182 ret = -ENOMEM;
183 goto out;
186 zone_info->empty_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
187 if (!zone_info->empty_zones) {
188 ret = -ENOMEM;
189 goto out;
192 zones = kcalloc(BTRFS_REPORT_NR_ZONES, sizeof(struct blk_zone), GFP_KERNEL);
193 if (!zones) {
194 ret = -ENOMEM;
195 goto out;
198 /* Get zones type */
199 while (sector < nr_sectors) {
200 nr_zones = BTRFS_REPORT_NR_ZONES;
201 ret = btrfs_get_dev_zones(device, sector << SECTOR_SHIFT, zones,
202 &nr_zones);
203 if (ret)
204 goto out;
206 for (i = 0; i < nr_zones; i++) {
207 if (zones[i].type == BLK_ZONE_TYPE_SEQWRITE_REQ)
208 __set_bit(nreported, zone_info->seq_zones);
209 if (zones[i].cond == BLK_ZONE_COND_EMPTY)
210 __set_bit(nreported, zone_info->empty_zones);
211 nreported++;
213 sector = zones[nr_zones - 1].start + zones[nr_zones - 1].len;
216 if (nreported != zone_info->nr_zones) {
217 btrfs_err_in_rcu(device->fs_info,
218 "inconsistent number of zones on %s (%u/%u)",
219 rcu_str_deref(device->name), nreported,
220 zone_info->nr_zones);
221 ret = -EIO;
222 goto out;
225 /* Validate superblock log */
226 nr_zones = BTRFS_NR_SB_LOG_ZONES;
227 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
228 u32 sb_zone;
229 u64 sb_wp;
230 int sb_pos = BTRFS_NR_SB_LOG_ZONES * i;
232 sb_zone = sb_zone_number(zone_info->zone_size_shift, i);
233 if (sb_zone + 1 >= zone_info->nr_zones)
234 continue;
236 sector = sb_zone << (zone_info->zone_size_shift - SECTOR_SHIFT);
237 ret = btrfs_get_dev_zones(device, sector << SECTOR_SHIFT,
238 &zone_info->sb_zones[sb_pos],
239 &nr_zones);
240 if (ret)
241 goto out;
243 if (nr_zones != BTRFS_NR_SB_LOG_ZONES) {
244 btrfs_err_in_rcu(device->fs_info,
245 "zoned: failed to read super block log zone info at devid %llu zone %u",
246 device->devid, sb_zone);
247 ret = -EUCLEAN;
248 goto out;
252 * If zones[0] is conventional, always use the beggining of the
253 * zone to record superblock. No need to validate in that case.
255 if (zone_info->sb_zones[BTRFS_NR_SB_LOG_ZONES * i].type ==
256 BLK_ZONE_TYPE_CONVENTIONAL)
257 continue;
259 ret = sb_write_pointer(device->bdev,
260 &zone_info->sb_zones[sb_pos], &sb_wp);
261 if (ret != -ENOENT && ret) {
262 btrfs_err_in_rcu(device->fs_info,
263 "zoned: super block log zone corrupted devid %llu zone %u",
264 device->devid, sb_zone);
265 ret = -EUCLEAN;
266 goto out;
271 kfree(zones);
273 device->zone_info = zone_info;
275 /* device->fs_info is not safe to use for printing messages */
276 btrfs_info_in_rcu(NULL,
277 "host-%s zoned block device %s, %u zones of %llu bytes",
278 bdev_zoned_model(bdev) == BLK_ZONED_HM ? "managed" : "aware",
279 rcu_str_deref(device->name), zone_info->nr_zones,
280 zone_info->zone_size);
282 return 0;
284 out:
285 kfree(zones);
286 bitmap_free(zone_info->empty_zones);
287 bitmap_free(zone_info->seq_zones);
288 kfree(zone_info);
290 return ret;
293 void btrfs_destroy_dev_zone_info(struct btrfs_device *device)
295 struct btrfs_zoned_device_info *zone_info = device->zone_info;
297 if (!zone_info)
298 return;
300 bitmap_free(zone_info->seq_zones);
301 bitmap_free(zone_info->empty_zones);
302 kfree(zone_info);
303 device->zone_info = NULL;
306 int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos,
307 struct blk_zone *zone)
309 unsigned int nr_zones = 1;
310 int ret;
312 ret = btrfs_get_dev_zones(device, pos, zone, &nr_zones);
313 if (ret != 0 || !nr_zones)
314 return ret ? ret : -EIO;
316 return 0;
319 int btrfs_check_zoned_mode(struct btrfs_fs_info *fs_info)
321 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
322 struct btrfs_device *device;
323 u64 zoned_devices = 0;
324 u64 nr_devices = 0;
325 u64 zone_size = 0;
326 u64 max_zone_append_size = 0;
327 const bool incompat_zoned = btrfs_is_zoned(fs_info);
328 int ret = 0;
330 /* Count zoned devices */
331 list_for_each_entry(device, &fs_devices->devices, dev_list) {
332 enum blk_zoned_model model;
334 if (!device->bdev)
335 continue;
337 model = bdev_zoned_model(device->bdev);
338 if (model == BLK_ZONED_HM ||
339 (model == BLK_ZONED_HA && incompat_zoned)) {
340 struct btrfs_zoned_device_info *zone_info;
342 zone_info = device->zone_info;
343 zoned_devices++;
344 if (!zone_size) {
345 zone_size = zone_info->zone_size;
346 } else if (zone_info->zone_size != zone_size) {
347 btrfs_err(fs_info,
348 "zoned: unequal block device zone sizes: have %llu found %llu",
349 device->zone_info->zone_size,
350 zone_size);
351 ret = -EINVAL;
352 goto out;
354 if (!max_zone_append_size ||
355 (zone_info->max_zone_append_size &&
356 zone_info->max_zone_append_size < max_zone_append_size))
357 max_zone_append_size =
358 zone_info->max_zone_append_size;
360 nr_devices++;
363 if (!zoned_devices && !incompat_zoned)
364 goto out;
366 if (!zoned_devices && incompat_zoned) {
367 /* No zoned block device found on ZONED filesystem */
368 btrfs_err(fs_info,
369 "zoned: no zoned devices found on a zoned filesystem");
370 ret = -EINVAL;
371 goto out;
374 if (zoned_devices && !incompat_zoned) {
375 btrfs_err(fs_info,
376 "zoned: mode not enabled but zoned device found");
377 ret = -EINVAL;
378 goto out;
381 if (zoned_devices != nr_devices) {
382 btrfs_err(fs_info,
383 "zoned: cannot mix zoned and regular devices");
384 ret = -EINVAL;
385 goto out;
389 * stripe_size is always aligned to BTRFS_STRIPE_LEN in
390 * __btrfs_alloc_chunk(). Since we want stripe_len == zone_size,
391 * check the alignment here.
393 if (!IS_ALIGNED(zone_size, BTRFS_STRIPE_LEN)) {
394 btrfs_err(fs_info,
395 "zoned: zone size %llu not aligned to stripe %u",
396 zone_size, BTRFS_STRIPE_LEN);
397 ret = -EINVAL;
398 goto out;
401 if (btrfs_fs_incompat(fs_info, MIXED_GROUPS)) {
402 btrfs_err(fs_info, "zoned: mixed block groups not supported");
403 ret = -EINVAL;
404 goto out;
407 fs_info->zone_size = zone_size;
408 fs_info->max_zone_append_size = max_zone_append_size;
410 btrfs_info(fs_info, "zoned mode enabled with zone size %llu", zone_size);
411 out:
412 return ret;
415 int btrfs_check_mountopts_zoned(struct btrfs_fs_info *info)
417 if (!btrfs_is_zoned(info))
418 return 0;
421 * Space cache writing is not COWed. Disable that to avoid write errors
422 * in sequential zones.
424 if (btrfs_test_opt(info, SPACE_CACHE)) {
425 btrfs_err(info, "zoned: space cache v1 is not supported");
426 return -EINVAL;
429 if (btrfs_test_opt(info, NODATACOW)) {
430 btrfs_err(info, "zoned: NODATACOW not supported");
431 return -EINVAL;
434 return 0;
437 static int sb_log_location(struct block_device *bdev, struct blk_zone *zones,
438 int rw, u64 *bytenr_ret)
440 u64 wp;
441 int ret;
443 if (zones[0].type == BLK_ZONE_TYPE_CONVENTIONAL) {
444 *bytenr_ret = zones[0].start << SECTOR_SHIFT;
445 return 0;
448 ret = sb_write_pointer(bdev, zones, &wp);
449 if (ret != -ENOENT && ret < 0)
450 return ret;
452 if (rw == WRITE) {
453 struct blk_zone *reset = NULL;
455 if (wp == zones[0].start << SECTOR_SHIFT)
456 reset = &zones[0];
457 else if (wp == zones[1].start << SECTOR_SHIFT)
458 reset = &zones[1];
460 if (reset && reset->cond != BLK_ZONE_COND_EMPTY) {
461 ASSERT(reset->cond == BLK_ZONE_COND_FULL);
463 ret = blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
464 reset->start, reset->len,
465 GFP_NOFS);
466 if (ret)
467 return ret;
469 reset->cond = BLK_ZONE_COND_EMPTY;
470 reset->wp = reset->start;
472 } else if (ret != -ENOENT) {
473 /* For READ, we want the precious one */
474 if (wp == zones[0].start << SECTOR_SHIFT)
475 wp = (zones[1].start + zones[1].len) << SECTOR_SHIFT;
476 wp -= BTRFS_SUPER_INFO_SIZE;
479 *bytenr_ret = wp;
480 return 0;
484 int btrfs_sb_log_location_bdev(struct block_device *bdev, int mirror, int rw,
485 u64 *bytenr_ret)
487 struct blk_zone zones[BTRFS_NR_SB_LOG_ZONES];
488 unsigned int zone_sectors;
489 u32 sb_zone;
490 int ret;
491 u64 zone_size;
492 u8 zone_sectors_shift;
493 sector_t nr_sectors;
494 u32 nr_zones;
496 if (!bdev_is_zoned(bdev)) {
497 *bytenr_ret = btrfs_sb_offset(mirror);
498 return 0;
501 ASSERT(rw == READ || rw == WRITE);
503 zone_sectors = bdev_zone_sectors(bdev);
504 if (!is_power_of_2(zone_sectors))
505 return -EINVAL;
506 zone_size = zone_sectors << SECTOR_SHIFT;
507 zone_sectors_shift = ilog2(zone_sectors);
508 nr_sectors = bdev_nr_sectors(bdev);
509 nr_zones = nr_sectors >> zone_sectors_shift;
511 sb_zone = sb_zone_number(zone_sectors_shift + SECTOR_SHIFT, mirror);
512 if (sb_zone + 1 >= nr_zones)
513 return -ENOENT;
515 ret = blkdev_report_zones(bdev, sb_zone << zone_sectors_shift,
516 BTRFS_NR_SB_LOG_ZONES, copy_zone_info_cb,
517 zones);
518 if (ret < 0)
519 return ret;
520 if (ret != BTRFS_NR_SB_LOG_ZONES)
521 return -EIO;
523 return sb_log_location(bdev, zones, rw, bytenr_ret);
526 int btrfs_sb_log_location(struct btrfs_device *device, int mirror, int rw,
527 u64 *bytenr_ret)
529 struct btrfs_zoned_device_info *zinfo = device->zone_info;
530 u32 zone_num;
532 if (!zinfo) {
533 *bytenr_ret = btrfs_sb_offset(mirror);
534 return 0;
537 zone_num = sb_zone_number(zinfo->zone_size_shift, mirror);
538 if (zone_num + 1 >= zinfo->nr_zones)
539 return -ENOENT;
541 return sb_log_location(device->bdev,
542 &zinfo->sb_zones[BTRFS_NR_SB_LOG_ZONES * mirror],
543 rw, bytenr_ret);
546 static inline bool is_sb_log_zone(struct btrfs_zoned_device_info *zinfo,
547 int mirror)
549 u32 zone_num;
551 if (!zinfo)
552 return false;
554 zone_num = sb_zone_number(zinfo->zone_size_shift, mirror);
555 if (zone_num + 1 >= zinfo->nr_zones)
556 return false;
558 if (!test_bit(zone_num, zinfo->seq_zones))
559 return false;
561 return true;
564 void btrfs_advance_sb_log(struct btrfs_device *device, int mirror)
566 struct btrfs_zoned_device_info *zinfo = device->zone_info;
567 struct blk_zone *zone;
569 if (!is_sb_log_zone(zinfo, mirror))
570 return;
572 zone = &zinfo->sb_zones[BTRFS_NR_SB_LOG_ZONES * mirror];
573 if (zone->cond != BLK_ZONE_COND_FULL) {
574 if (zone->cond == BLK_ZONE_COND_EMPTY)
575 zone->cond = BLK_ZONE_COND_IMP_OPEN;
577 zone->wp += (BTRFS_SUPER_INFO_SIZE >> SECTOR_SHIFT);
579 if (zone->wp == zone->start + zone->len)
580 zone->cond = BLK_ZONE_COND_FULL;
582 return;
585 zone++;
586 ASSERT(zone->cond != BLK_ZONE_COND_FULL);
587 if (zone->cond == BLK_ZONE_COND_EMPTY)
588 zone->cond = BLK_ZONE_COND_IMP_OPEN;
590 zone->wp += (BTRFS_SUPER_INFO_SIZE >> SECTOR_SHIFT);
592 if (zone->wp == zone->start + zone->len)
593 zone->cond = BLK_ZONE_COND_FULL;
596 int btrfs_reset_sb_log_zones(struct block_device *bdev, int mirror)
598 sector_t zone_sectors;
599 sector_t nr_sectors;
600 u8 zone_sectors_shift;
601 u32 sb_zone;
602 u32 nr_zones;
604 zone_sectors = bdev_zone_sectors(bdev);
605 zone_sectors_shift = ilog2(zone_sectors);
606 nr_sectors = bdev_nr_sectors(bdev);
607 nr_zones = nr_sectors >> zone_sectors_shift;
609 sb_zone = sb_zone_number(zone_sectors_shift + SECTOR_SHIFT, mirror);
610 if (sb_zone + 1 >= nr_zones)
611 return -ENOENT;
613 return blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
614 sb_zone << zone_sectors_shift,
615 zone_sectors * BTRFS_NR_SB_LOG_ZONES, GFP_NOFS);