1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/vmalloc.h>
5 /* zone_size in MBs to sectors. */
6 #define ZONE_SIZE_SHIFT 11
8 static inline unsigned int null_zone_no(struct nullb_device
*dev
, sector_t sect
)
10 return sect
>> ilog2(dev
->zone_size_sects
);
13 int null_zone_init(struct nullb_device
*dev
)
15 sector_t dev_size
= (sector_t
)dev
->size
* 1024 * 1024;
19 if (!is_power_of_2(dev
->zone_size
)) {
20 pr_err("zone_size must be power-of-two\n");
24 dev
->zone_size_sects
= dev
->zone_size
<< ZONE_SIZE_SHIFT
;
25 dev
->nr_zones
= dev_size
>>
26 (SECTOR_SHIFT
+ ilog2(dev
->zone_size_sects
));
27 dev
->zones
= kvmalloc_array(dev
->nr_zones
, sizeof(struct blk_zone
),
28 GFP_KERNEL
| __GFP_ZERO
);
32 if (dev
->zone_nr_conv
>= dev
->nr_zones
) {
33 dev
->zone_nr_conv
= dev
->nr_zones
- 1;
34 pr_info("changed the number of conventional zones to %u",
38 for (i
= 0; i
< dev
->zone_nr_conv
; i
++) {
39 struct blk_zone
*zone
= &dev
->zones
[i
];
42 zone
->len
= dev
->zone_size_sects
;
43 zone
->wp
= zone
->start
+ zone
->len
;
44 zone
->type
= BLK_ZONE_TYPE_CONVENTIONAL
;
45 zone
->cond
= BLK_ZONE_COND_NOT_WP
;
47 sector
+= dev
->zone_size_sects
;
50 for (i
= dev
->zone_nr_conv
; i
< dev
->nr_zones
; i
++) {
51 struct blk_zone
*zone
= &dev
->zones
[i
];
53 zone
->start
= zone
->wp
= sector
;
54 zone
->len
= dev
->zone_size_sects
;
55 zone
->type
= BLK_ZONE_TYPE_SEQWRITE_REQ
;
56 zone
->cond
= BLK_ZONE_COND_EMPTY
;
58 sector
+= dev
->zone_size_sects
;
64 void null_zone_exit(struct nullb_device
*dev
)
69 int null_report_zones(struct gendisk
*disk
, sector_t sector
,
70 unsigned int nr_zones
, report_zones_cb cb
, void *data
)
72 struct nullb
*nullb
= disk
->private_data
;
73 struct nullb_device
*dev
= nullb
->dev
;
74 unsigned int first_zone
, i
;
78 first_zone
= null_zone_no(dev
, sector
);
79 if (first_zone
>= dev
->nr_zones
)
82 nr_zones
= min(nr_zones
, dev
->nr_zones
- first_zone
);
83 for (i
= 0; i
< nr_zones
; i
++) {
85 * Stacked DM target drivers will remap the zone information by
86 * modifying the zone information passed to the report callback.
87 * So use a local copy to avoid corruption of the device zone
90 memcpy(&zone
, &dev
->zones
[first_zone
+ i
],
91 sizeof(struct blk_zone
));
92 error
= cb(&zone
, i
, data
);
100 size_t null_zone_valid_read_len(struct nullb
*nullb
,
101 sector_t sector
, unsigned int len
)
103 struct nullb_device
*dev
= nullb
->dev
;
104 struct blk_zone
*zone
= &dev
->zones
[null_zone_no(dev
, sector
)];
105 unsigned int nr_sectors
= len
>> SECTOR_SHIFT
;
107 /* Read must be below the write pointer position */
108 if (zone
->type
== BLK_ZONE_TYPE_CONVENTIONAL
||
109 sector
+ nr_sectors
<= zone
->wp
)
112 if (sector
> zone
->wp
)
115 return (zone
->wp
- sector
) << SECTOR_SHIFT
;
118 static blk_status_t
null_zone_write(struct nullb_cmd
*cmd
, sector_t sector
,
119 unsigned int nr_sectors
)
121 struct nullb_device
*dev
= cmd
->nq
->dev
;
122 unsigned int zno
= null_zone_no(dev
, sector
);
123 struct blk_zone
*zone
= &dev
->zones
[zno
];
125 switch (zone
->cond
) {
126 case BLK_ZONE_COND_FULL
:
127 /* Cannot write to a full zone */
128 cmd
->error
= BLK_STS_IOERR
;
129 return BLK_STS_IOERR
;
130 case BLK_ZONE_COND_EMPTY
:
131 case BLK_ZONE_COND_IMP_OPEN
:
132 case BLK_ZONE_COND_EXP_OPEN
:
133 case BLK_ZONE_COND_CLOSED
:
134 /* Writes must be at the write pointer position */
135 if (sector
!= zone
->wp
)
136 return BLK_STS_IOERR
;
138 if (zone
->cond
!= BLK_ZONE_COND_EXP_OPEN
)
139 zone
->cond
= BLK_ZONE_COND_IMP_OPEN
;
141 zone
->wp
+= nr_sectors
;
142 if (zone
->wp
== zone
->start
+ zone
->len
)
143 zone
->cond
= BLK_ZONE_COND_FULL
;
145 case BLK_ZONE_COND_NOT_WP
:
148 /* Invalid zone condition */
149 return BLK_STS_IOERR
;
154 static blk_status_t
null_zone_mgmt(struct nullb_cmd
*cmd
, enum req_opf op
,
157 struct nullb_device
*dev
= cmd
->nq
->dev
;
158 struct blk_zone
*zone
= &dev
->zones
[null_zone_no(dev
, sector
)];
162 case REQ_OP_ZONE_RESET_ALL
:
163 for (i
= 0; i
< dev
->nr_zones
; i
++) {
164 if (zone
[i
].type
== BLK_ZONE_TYPE_CONVENTIONAL
)
166 zone
[i
].cond
= BLK_ZONE_COND_EMPTY
;
167 zone
[i
].wp
= zone
[i
].start
;
170 case REQ_OP_ZONE_RESET
:
171 if (zone
->type
== BLK_ZONE_TYPE_CONVENTIONAL
)
172 return BLK_STS_IOERR
;
174 zone
->cond
= BLK_ZONE_COND_EMPTY
;
175 zone
->wp
= zone
->start
;
177 case REQ_OP_ZONE_OPEN
:
178 if (zone
->type
== BLK_ZONE_TYPE_CONVENTIONAL
)
179 return BLK_STS_IOERR
;
180 if (zone
->cond
== BLK_ZONE_COND_FULL
)
181 return BLK_STS_IOERR
;
183 zone
->cond
= BLK_ZONE_COND_EXP_OPEN
;
185 case REQ_OP_ZONE_CLOSE
:
186 if (zone
->type
== BLK_ZONE_TYPE_CONVENTIONAL
)
187 return BLK_STS_IOERR
;
188 if (zone
->cond
== BLK_ZONE_COND_FULL
)
189 return BLK_STS_IOERR
;
191 if (zone
->wp
== zone
->start
)
192 zone
->cond
= BLK_ZONE_COND_EMPTY
;
194 zone
->cond
= BLK_ZONE_COND_CLOSED
;
196 case REQ_OP_ZONE_FINISH
:
197 if (zone
->type
== BLK_ZONE_TYPE_CONVENTIONAL
)
198 return BLK_STS_IOERR
;
200 zone
->cond
= BLK_ZONE_COND_FULL
;
201 zone
->wp
= zone
->start
+ zone
->len
;
204 return BLK_STS_NOTSUPP
;
209 blk_status_t
null_handle_zoned(struct nullb_cmd
*cmd
, enum req_opf op
,
210 sector_t sector
, sector_t nr_sectors
)
214 return null_zone_write(cmd
, sector
, nr_sectors
);
215 case REQ_OP_ZONE_RESET
:
216 case REQ_OP_ZONE_RESET_ALL
:
217 case REQ_OP_ZONE_OPEN
:
218 case REQ_OP_ZONE_CLOSE
:
219 case REQ_OP_ZONE_FINISH
:
220 return null_zone_mgmt(cmd
, op
, sector
);