1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/vmalloc.h>
3 #include <linux/bitmap.h>
6 #define CREATE_TRACE_POINTS
10 #define pr_fmt(fmt) "null_blk: " fmt
12 #define NULL_ZONE_INVALID_WP ((sector_t)-1)
14 static inline sector_t
mb_to_sects(unsigned long mb
)
16 return ((sector_t
)mb
* SZ_1M
) >> SECTOR_SHIFT
;
19 static inline unsigned int null_zone_no(struct nullb_device
*dev
, sector_t sect
)
21 return sect
>> ilog2(dev
->zone_size_sects
);
24 static inline void null_init_zone_lock(struct nullb_device
*dev
,
25 struct nullb_zone
*zone
)
27 if (!dev
->memory_backed
)
28 spin_lock_init(&zone
->spinlock
);
30 mutex_init(&zone
->mutex
);
33 static inline void null_lock_zone(struct nullb_device
*dev
,
34 struct nullb_zone
*zone
)
36 if (!dev
->memory_backed
)
37 spin_lock_irq(&zone
->spinlock
);
39 mutex_lock(&zone
->mutex
);
42 static inline void null_unlock_zone(struct nullb_device
*dev
,
43 struct nullb_zone
*zone
)
45 if (!dev
->memory_backed
)
46 spin_unlock_irq(&zone
->spinlock
);
48 mutex_unlock(&zone
->mutex
);
51 int null_init_zoned_dev(struct nullb_device
*dev
,
52 struct queue_limits
*lim
)
54 sector_t dev_capacity_sects
, zone_capacity_sects
;
55 struct nullb_zone
*zone
;
59 if (!is_power_of_2(dev
->zone_size
)) {
60 pr_err("zone_size must be power-of-two\n");
63 if (dev
->zone_size
> dev
->size
) {
64 pr_err("Zone size larger than device capacity\n");
68 if (!dev
->zone_capacity
)
69 dev
->zone_capacity
= dev
->zone_size
;
71 if (dev
->zone_capacity
> dev
->zone_size
) {
72 pr_err("zone capacity (%lu MB) larger than zone size (%lu MB)\n",
73 dev
->zone_capacity
, dev
->zone_size
);
78 * If a smaller zone capacity was requested, do not allow a smaller last
79 * zone at the same time as such zone configuration does not correspond
80 * to any real zoned device.
82 if (dev
->zone_capacity
!= dev
->zone_size
&&
83 dev
->size
& (dev
->zone_size
- 1)) {
84 pr_err("A smaller last zone is not allowed with zone capacity smaller than zone size.\n");
88 zone_capacity_sects
= mb_to_sects(dev
->zone_capacity
);
89 dev_capacity_sects
= mb_to_sects(dev
->size
);
90 dev
->zone_size_sects
= mb_to_sects(dev
->zone_size
);
91 dev
->nr_zones
= round_up(dev_capacity_sects
, dev
->zone_size_sects
)
92 >> ilog2(dev
->zone_size_sects
);
94 dev
->zones
= kvmalloc_array(dev
->nr_zones
, sizeof(struct nullb_zone
),
95 GFP_KERNEL
| __GFP_ZERO
);
99 spin_lock_init(&dev
->zone_res_lock
);
101 if (dev
->zone_nr_conv
>= dev
->nr_zones
) {
102 dev
->zone_nr_conv
= dev
->nr_zones
- 1;
103 pr_info("changed the number of conventional zones to %u",
107 dev
->zone_append_max_sectors
=
108 min(ALIGN_DOWN(dev
->zone_append_max_sectors
,
109 dev
->blocksize
>> SECTOR_SHIFT
),
110 zone_capacity_sects
);
112 /* Max active zones has to be < nbr of seq zones in order to be enforceable */
113 if (dev
->zone_max_active
>= dev
->nr_zones
- dev
->zone_nr_conv
) {
114 dev
->zone_max_active
= 0;
115 pr_info("zone_max_active limit disabled, limit >= zone count\n");
118 /* Max open zones has to be <= max active zones */
119 if (dev
->zone_max_active
&& dev
->zone_max_open
> dev
->zone_max_active
) {
120 dev
->zone_max_open
= dev
->zone_max_active
;
121 pr_info("changed the maximum number of open zones to %u\n",
123 } else if (dev
->zone_max_open
>= dev
->nr_zones
- dev
->zone_nr_conv
) {
124 dev
->zone_max_open
= 0;
125 pr_info("zone_max_open limit disabled, limit >= zone count\n");
127 dev
->need_zone_res_mgmt
= dev
->zone_max_active
|| dev
->zone_max_open
;
128 dev
->imp_close_zone_no
= dev
->zone_nr_conv
;
130 for (i
= 0; i
< dev
->zone_nr_conv
; i
++) {
131 zone
= &dev
->zones
[i
];
133 null_init_zone_lock(dev
, zone
);
134 zone
->start
= sector
;
135 zone
->len
= dev
->zone_size_sects
;
136 zone
->capacity
= zone
->len
;
137 zone
->wp
= zone
->start
+ zone
->len
;
138 zone
->type
= BLK_ZONE_TYPE_CONVENTIONAL
;
139 zone
->cond
= BLK_ZONE_COND_NOT_WP
;
141 sector
+= dev
->zone_size_sects
;
144 for (i
= dev
->zone_nr_conv
; i
< dev
->nr_zones
; i
++) {
145 zone
= &dev
->zones
[i
];
147 null_init_zone_lock(dev
, zone
);
148 zone
->start
= sector
;
149 if (zone
->start
+ dev
->zone_size_sects
> dev_capacity_sects
)
150 zone
->len
= dev_capacity_sects
- zone
->start
;
152 zone
->len
= dev
->zone_size_sects
;
154 min_t(sector_t
, zone
->len
, zone_capacity_sects
);
155 zone
->type
= BLK_ZONE_TYPE_SEQWRITE_REQ
;
156 if (dev
->zone_full
) {
157 zone
->cond
= BLK_ZONE_COND_FULL
;
158 zone
->wp
= zone
->start
+ zone
->capacity
;
160 zone
->cond
= BLK_ZONE_COND_EMPTY
;
161 zone
->wp
= zone
->start
;
164 sector
+= dev
->zone_size_sects
;
167 lim
->features
|= BLK_FEAT_ZONED
;
168 lim
->chunk_sectors
= dev
->zone_size_sects
;
169 lim
->max_hw_zone_append_sectors
= dev
->zone_append_max_sectors
;
170 lim
->max_open_zones
= dev
->zone_max_open
;
171 lim
->max_active_zones
= dev
->zone_max_active
;
175 int null_register_zoned_dev(struct nullb
*nullb
)
177 struct request_queue
*q
= nullb
->q
;
178 struct gendisk
*disk
= nullb
->disk
;
180 pr_info("%s: using %s zone append\n",
182 queue_emulates_zone_append(q
) ? "emulated" : "native");
184 return blk_revalidate_disk_zones(disk
);
187 void null_free_zoned_dev(struct nullb_device
*dev
)
193 int null_report_zones(struct gendisk
*disk
, sector_t sector
,
194 unsigned int nr_zones
, report_zones_cb cb
, void *data
)
196 struct nullb
*nullb
= disk
->private_data
;
197 struct nullb_device
*dev
= nullb
->dev
;
198 unsigned int first_zone
, i
;
199 struct nullb_zone
*zone
;
200 struct blk_zone blkz
;
203 first_zone
= null_zone_no(dev
, sector
);
204 if (first_zone
>= dev
->nr_zones
)
207 nr_zones
= min(nr_zones
, dev
->nr_zones
- first_zone
);
208 trace_nullb_report_zones(nullb
, nr_zones
);
210 memset(&blkz
, 0, sizeof(struct blk_zone
));
211 zone
= &dev
->zones
[first_zone
];
212 for (i
= 0; i
< nr_zones
; i
++, zone
++) {
214 * Stacked DM target drivers will remap the zone information by
215 * modifying the zone information passed to the report callback.
216 * So use a local copy to avoid corruption of the device zone
219 null_lock_zone(dev
, zone
);
220 blkz
.start
= zone
->start
;
221 blkz
.len
= zone
->len
;
223 blkz
.type
= zone
->type
;
224 blkz
.cond
= zone
->cond
;
225 blkz
.capacity
= zone
->capacity
;
226 null_unlock_zone(dev
, zone
);
228 error
= cb(&blkz
, i
, data
);
237 * This is called in the case of memory backing from null_process_cmd()
238 * with the target zone already locked.
240 size_t null_zone_valid_read_len(struct nullb
*nullb
,
241 sector_t sector
, unsigned int len
)
243 struct nullb_device
*dev
= nullb
->dev
;
244 struct nullb_zone
*zone
= &dev
->zones
[null_zone_no(dev
, sector
)];
245 unsigned int nr_sectors
= len
>> SECTOR_SHIFT
;
247 /* Read must be below the write pointer position */
248 if (zone
->type
== BLK_ZONE_TYPE_CONVENTIONAL
||
249 sector
+ nr_sectors
<= zone
->wp
)
252 if (sector
> zone
->wp
)
255 return (zone
->wp
- sector
) << SECTOR_SHIFT
;
258 static void null_close_imp_open_zone(struct nullb_device
*dev
)
260 struct nullb_zone
*zone
;
263 zno
= dev
->imp_close_zone_no
;
264 if (zno
>= dev
->nr_zones
)
265 zno
= dev
->zone_nr_conv
;
267 for (i
= dev
->zone_nr_conv
; i
< dev
->nr_zones
; i
++) {
268 zone
= &dev
->zones
[zno
];
270 if (zno
>= dev
->nr_zones
)
271 zno
= dev
->zone_nr_conv
;
273 if (zone
->cond
== BLK_ZONE_COND_IMP_OPEN
) {
274 dev
->nr_zones_imp_open
--;
275 if (zone
->wp
== zone
->start
) {
276 zone
->cond
= BLK_ZONE_COND_EMPTY
;
278 zone
->cond
= BLK_ZONE_COND_CLOSED
;
279 dev
->nr_zones_closed
++;
281 dev
->imp_close_zone_no
= zno
;
287 static blk_status_t
null_check_active(struct nullb_device
*dev
)
289 if (!dev
->zone_max_active
)
292 if (dev
->nr_zones_exp_open
+ dev
->nr_zones_imp_open
+
293 dev
->nr_zones_closed
< dev
->zone_max_active
)
296 return BLK_STS_ZONE_ACTIVE_RESOURCE
;
299 static blk_status_t
null_check_open(struct nullb_device
*dev
)
301 if (!dev
->zone_max_open
)
304 if (dev
->nr_zones_exp_open
+ dev
->nr_zones_imp_open
< dev
->zone_max_open
)
307 if (dev
->nr_zones_imp_open
) {
308 if (null_check_active(dev
) == BLK_STS_OK
) {
309 null_close_imp_open_zone(dev
);
314 return BLK_STS_ZONE_OPEN_RESOURCE
;
318 * This function matches the manage open zone resources function in the ZBC standard,
319 * with the addition of max active zones support (added in the ZNS standard).
321 * The function determines if a zone can transition to implicit open or explicit open,
322 * while maintaining the max open zone (and max active zone) limit(s). It may close an
323 * implicit open zone in order to make additional zone resources available.
325 * ZBC states that an implicit open zone shall be closed only if there is not
326 * room within the open limit. However, with the addition of an active limit,
327 * it is not certain that closing an implicit open zone will allow a new zone
328 * to be opened, since we might already be at the active limit capacity.
330 static blk_status_t
null_check_zone_resources(struct nullb_device
*dev
,
331 struct nullb_zone
*zone
)
335 switch (zone
->cond
) {
336 case BLK_ZONE_COND_EMPTY
:
337 ret
= null_check_active(dev
);
338 if (ret
!= BLK_STS_OK
)
341 case BLK_ZONE_COND_CLOSED
:
342 return null_check_open(dev
);
344 /* Should never be called for other states */
346 return BLK_STS_IOERR
;
350 static blk_status_t
null_zone_write(struct nullb_cmd
*cmd
, sector_t sector
,
351 unsigned int nr_sectors
, bool append
)
353 struct nullb_device
*dev
= cmd
->nq
->dev
;
354 unsigned int zno
= null_zone_no(dev
, sector
);
355 struct nullb_zone
*zone
= &dev
->zones
[zno
];
358 trace_nullb_zone_op(cmd
, zno
, zone
->cond
);
360 if (zone
->type
== BLK_ZONE_TYPE_CONVENTIONAL
) {
362 return BLK_STS_IOERR
;
363 return null_process_cmd(cmd
, REQ_OP_WRITE
, sector
, nr_sectors
);
366 null_lock_zone(dev
, zone
);
369 * Regular writes must be at the write pointer position. Zone append
370 * writes are automatically issued at the write pointer and the position
371 * returned using the request sector. Note that we do not check the zone
372 * condition because for FULL, READONLY and OFFLINE zones, the sector
373 * check against the zone write pointer will always result in failing
377 if (WARN_ON_ONCE(!dev
->zone_append_max_sectors
) ||
378 zone
->wp
== NULL_ZONE_INVALID_WP
) {
383 blk_mq_rq_from_pdu(cmd
)->__sector
= sector
;
386 if (sector
!= zone
->wp
||
387 zone
->wp
+ nr_sectors
> zone
->start
+ zone
->capacity
) {
392 if (zone
->cond
== BLK_ZONE_COND_CLOSED
||
393 zone
->cond
== BLK_ZONE_COND_EMPTY
) {
394 if (dev
->need_zone_res_mgmt
) {
395 spin_lock(&dev
->zone_res_lock
);
397 ret
= null_check_zone_resources(dev
, zone
);
398 if (ret
!= BLK_STS_OK
) {
399 spin_unlock(&dev
->zone_res_lock
);
402 if (zone
->cond
== BLK_ZONE_COND_CLOSED
) {
403 dev
->nr_zones_closed
--;
404 dev
->nr_zones_imp_open
++;
405 } else if (zone
->cond
== BLK_ZONE_COND_EMPTY
) {
406 dev
->nr_zones_imp_open
++;
409 spin_unlock(&dev
->zone_res_lock
);
412 zone
->cond
= BLK_ZONE_COND_IMP_OPEN
;
415 ret
= null_process_cmd(cmd
, REQ_OP_WRITE
, sector
, nr_sectors
);
416 if (ret
!= BLK_STS_OK
)
419 zone
->wp
+= nr_sectors
;
420 if (zone
->wp
== zone
->start
+ zone
->capacity
) {
421 if (dev
->need_zone_res_mgmt
) {
422 spin_lock(&dev
->zone_res_lock
);
423 if (zone
->cond
== BLK_ZONE_COND_EXP_OPEN
)
424 dev
->nr_zones_exp_open
--;
425 else if (zone
->cond
== BLK_ZONE_COND_IMP_OPEN
)
426 dev
->nr_zones_imp_open
--;
427 spin_unlock(&dev
->zone_res_lock
);
429 zone
->cond
= BLK_ZONE_COND_FULL
;
435 null_unlock_zone(dev
, zone
);
440 static blk_status_t
null_open_zone(struct nullb_device
*dev
,
441 struct nullb_zone
*zone
)
443 blk_status_t ret
= BLK_STS_OK
;
445 if (zone
->type
== BLK_ZONE_TYPE_CONVENTIONAL
)
446 return BLK_STS_IOERR
;
448 switch (zone
->cond
) {
449 case BLK_ZONE_COND_EXP_OPEN
:
450 /* Open operation on exp open is not an error */
452 case BLK_ZONE_COND_EMPTY
:
453 case BLK_ZONE_COND_IMP_OPEN
:
454 case BLK_ZONE_COND_CLOSED
:
456 case BLK_ZONE_COND_FULL
:
458 return BLK_STS_IOERR
;
461 if (dev
->need_zone_res_mgmt
) {
462 spin_lock(&dev
->zone_res_lock
);
464 switch (zone
->cond
) {
465 case BLK_ZONE_COND_EMPTY
:
466 ret
= null_check_zone_resources(dev
, zone
);
467 if (ret
!= BLK_STS_OK
) {
468 spin_unlock(&dev
->zone_res_lock
);
472 case BLK_ZONE_COND_IMP_OPEN
:
473 dev
->nr_zones_imp_open
--;
475 case BLK_ZONE_COND_CLOSED
:
476 ret
= null_check_zone_resources(dev
, zone
);
477 if (ret
!= BLK_STS_OK
) {
478 spin_unlock(&dev
->zone_res_lock
);
481 dev
->nr_zones_closed
--;
487 dev
->nr_zones_exp_open
++;
489 spin_unlock(&dev
->zone_res_lock
);
492 zone
->cond
= BLK_ZONE_COND_EXP_OPEN
;
497 static blk_status_t
null_close_zone(struct nullb_device
*dev
,
498 struct nullb_zone
*zone
)
500 if (zone
->type
== BLK_ZONE_TYPE_CONVENTIONAL
)
501 return BLK_STS_IOERR
;
503 switch (zone
->cond
) {
504 case BLK_ZONE_COND_CLOSED
:
505 /* close operation on closed is not an error */
507 case BLK_ZONE_COND_IMP_OPEN
:
508 case BLK_ZONE_COND_EXP_OPEN
:
510 case BLK_ZONE_COND_EMPTY
:
511 case BLK_ZONE_COND_FULL
:
513 return BLK_STS_IOERR
;
516 if (dev
->need_zone_res_mgmt
) {
517 spin_lock(&dev
->zone_res_lock
);
519 switch (zone
->cond
) {
520 case BLK_ZONE_COND_IMP_OPEN
:
521 dev
->nr_zones_imp_open
--;
523 case BLK_ZONE_COND_EXP_OPEN
:
524 dev
->nr_zones_exp_open
--;
530 if (zone
->wp
> zone
->start
)
531 dev
->nr_zones_closed
++;
533 spin_unlock(&dev
->zone_res_lock
);
536 if (zone
->wp
== zone
->start
)
537 zone
->cond
= BLK_ZONE_COND_EMPTY
;
539 zone
->cond
= BLK_ZONE_COND_CLOSED
;
544 static blk_status_t
null_finish_zone(struct nullb_device
*dev
,
545 struct nullb_zone
*zone
)
547 blk_status_t ret
= BLK_STS_OK
;
549 if (zone
->type
== BLK_ZONE_TYPE_CONVENTIONAL
)
550 return BLK_STS_IOERR
;
552 if (dev
->need_zone_res_mgmt
) {
553 spin_lock(&dev
->zone_res_lock
);
555 switch (zone
->cond
) {
556 case BLK_ZONE_COND_FULL
:
557 /* Finish operation on full is not an error */
558 spin_unlock(&dev
->zone_res_lock
);
560 case BLK_ZONE_COND_EMPTY
:
561 ret
= null_check_zone_resources(dev
, zone
);
562 if (ret
!= BLK_STS_OK
) {
563 spin_unlock(&dev
->zone_res_lock
);
567 case BLK_ZONE_COND_IMP_OPEN
:
568 dev
->nr_zones_imp_open
--;
570 case BLK_ZONE_COND_EXP_OPEN
:
571 dev
->nr_zones_exp_open
--;
573 case BLK_ZONE_COND_CLOSED
:
574 ret
= null_check_zone_resources(dev
, zone
);
575 if (ret
!= BLK_STS_OK
) {
576 spin_unlock(&dev
->zone_res_lock
);
579 dev
->nr_zones_closed
--;
582 spin_unlock(&dev
->zone_res_lock
);
583 return BLK_STS_IOERR
;
586 spin_unlock(&dev
->zone_res_lock
);
589 zone
->cond
= BLK_ZONE_COND_FULL
;
590 zone
->wp
= zone
->start
+ zone
->len
;
595 static blk_status_t
null_reset_zone(struct nullb_device
*dev
,
596 struct nullb_zone
*zone
)
598 if (zone
->type
== BLK_ZONE_TYPE_CONVENTIONAL
)
599 return BLK_STS_IOERR
;
601 if (dev
->need_zone_res_mgmt
) {
602 spin_lock(&dev
->zone_res_lock
);
604 switch (zone
->cond
) {
605 case BLK_ZONE_COND_IMP_OPEN
:
606 dev
->nr_zones_imp_open
--;
608 case BLK_ZONE_COND_EXP_OPEN
:
609 dev
->nr_zones_exp_open
--;
611 case BLK_ZONE_COND_CLOSED
:
612 dev
->nr_zones_closed
--;
614 case BLK_ZONE_COND_EMPTY
:
615 case BLK_ZONE_COND_FULL
:
618 spin_unlock(&dev
->zone_res_lock
);
619 return BLK_STS_IOERR
;
622 spin_unlock(&dev
->zone_res_lock
);
625 zone
->cond
= BLK_ZONE_COND_EMPTY
;
626 zone
->wp
= zone
->start
;
628 if (dev
->memory_backed
)
629 return null_handle_discard(dev
, zone
->start
, zone
->len
);
634 static blk_status_t
null_zone_mgmt(struct nullb_cmd
*cmd
, enum req_op op
,
637 struct nullb_device
*dev
= cmd
->nq
->dev
;
638 unsigned int zone_no
;
639 struct nullb_zone
*zone
;
643 if (op
== REQ_OP_ZONE_RESET_ALL
) {
644 for (i
= dev
->zone_nr_conv
; i
< dev
->nr_zones
; i
++) {
645 zone
= &dev
->zones
[i
];
646 null_lock_zone(dev
, zone
);
647 if (zone
->cond
!= BLK_ZONE_COND_EMPTY
&&
648 zone
->cond
!= BLK_ZONE_COND_READONLY
&&
649 zone
->cond
!= BLK_ZONE_COND_OFFLINE
) {
650 null_reset_zone(dev
, zone
);
651 trace_nullb_zone_op(cmd
, i
, zone
->cond
);
653 null_unlock_zone(dev
, zone
);
658 zone_no
= null_zone_no(dev
, sector
);
659 zone
= &dev
->zones
[zone_no
];
661 null_lock_zone(dev
, zone
);
663 if (zone
->cond
== BLK_ZONE_COND_READONLY
||
664 zone
->cond
== BLK_ZONE_COND_OFFLINE
) {
670 case REQ_OP_ZONE_RESET
:
671 ret
= null_reset_zone(dev
, zone
);
673 case REQ_OP_ZONE_OPEN
:
674 ret
= null_open_zone(dev
, zone
);
676 case REQ_OP_ZONE_CLOSE
:
677 ret
= null_close_zone(dev
, zone
);
679 case REQ_OP_ZONE_FINISH
:
680 ret
= null_finish_zone(dev
, zone
);
683 ret
= BLK_STS_NOTSUPP
;
687 if (ret
== BLK_STS_OK
)
688 trace_nullb_zone_op(cmd
, zone_no
, zone
->cond
);
691 null_unlock_zone(dev
, zone
);
696 blk_status_t
null_process_zoned_cmd(struct nullb_cmd
*cmd
, enum req_op op
,
697 sector_t sector
, sector_t nr_sectors
)
699 struct nullb_device
*dev
;
700 struct nullb_zone
*zone
;
705 return null_zone_write(cmd
, sector
, nr_sectors
, false);
706 case REQ_OP_ZONE_APPEND
:
707 return null_zone_write(cmd
, sector
, nr_sectors
, true);
708 case REQ_OP_ZONE_RESET
:
709 case REQ_OP_ZONE_RESET_ALL
:
710 case REQ_OP_ZONE_OPEN
:
711 case REQ_OP_ZONE_CLOSE
:
712 case REQ_OP_ZONE_FINISH
:
713 return null_zone_mgmt(cmd
, op
, sector
);
716 zone
= &dev
->zones
[null_zone_no(dev
, sector
)];
717 if (zone
->cond
== BLK_ZONE_COND_OFFLINE
)
718 return BLK_STS_IOERR
;
720 null_lock_zone(dev
, zone
);
721 sts
= null_process_cmd(cmd
, op
, sector
, nr_sectors
);
722 null_unlock_zone(dev
, zone
);
728 * Set a zone in the read-only or offline condition.
730 static void null_set_zone_cond(struct nullb_device
*dev
,
731 struct nullb_zone
*zone
, enum blk_zone_cond cond
)
733 if (WARN_ON_ONCE(cond
!= BLK_ZONE_COND_READONLY
&&
734 cond
!= BLK_ZONE_COND_OFFLINE
))
737 null_lock_zone(dev
, zone
);
740 * If the read-only condition is requested again to zones already in
741 * read-only condition, restore back normal empty condition. Do the same
742 * if the offline condition is requested for offline zones. Otherwise,
743 * set the specified zone condition to the zones. Finish the zones
744 * beforehand to free up zone resources.
746 if (zone
->cond
== cond
) {
747 zone
->cond
= BLK_ZONE_COND_EMPTY
;
748 zone
->wp
= zone
->start
;
749 if (dev
->memory_backed
)
750 null_handle_discard(dev
, zone
->start
, zone
->len
);
752 if (zone
->cond
!= BLK_ZONE_COND_READONLY
&&
753 zone
->cond
!= BLK_ZONE_COND_OFFLINE
)
754 null_finish_zone(dev
, zone
);
756 zone
->wp
= NULL_ZONE_INVALID_WP
;
759 null_unlock_zone(dev
, zone
);
763 * Identify a zone from the sector written to configfs file. Then set zone
764 * condition to the zone.
766 ssize_t
zone_cond_store(struct nullb_device
*dev
, const char *page
,
767 size_t count
, enum blk_zone_cond cond
)
769 unsigned long long sector
;
770 unsigned int zone_no
;
774 pr_err("null_blk device is not zoned\n");
779 pr_err("null_blk device is not yet powered\n");
783 ret
= kstrtoull(page
, 0, §or
);
787 zone_no
= null_zone_no(dev
, sector
);
788 if (zone_no
>= dev
->nr_zones
) {
789 pr_err("Sector out of range\n");
793 if (dev
->zones
[zone_no
].type
== BLK_ZONE_TYPE_CONVENTIONAL
) {
794 pr_err("Can not change condition of conventional zones\n");
798 null_set_zone_cond(dev
, &dev
->zones
[zone_no
], cond
);