2 * SCSI Zoned Block commands
4 * Copyright (C) 2014-2015 SUSE Linux GmbH
5 * Written by: Hannes Reinecke <hare@suse.de>
6 * Modified by: Damien Le Moal <damien.lemoal@hgst.com>
7 * Modified by: Shaun Tancheff <shaun.tancheff@seagate.com>
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version
11 * 2 as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
25 #include <linux/blkdev.h>
27 #include <asm/unaligned.h>
29 #include <scsi/scsi.h>
30 #include <scsi/scsi_cmnd.h>
35 * sd_zbc_parse_report - Convert a zone descriptor to a struct blk_zone,
36 * @sdkp: The disk the report originated from
37 * @buf: Address of the report zone descriptor
38 * @zone: the destination zone structure
40 * All LBA sized values are converted to 512B sectors unit.
42 static void sd_zbc_parse_report(struct scsi_disk
*sdkp
, u8
*buf
,
43 struct blk_zone
*zone
)
45 struct scsi_device
*sdp
= sdkp
->device
;
47 memset(zone
, 0, sizeof(struct blk_zone
));
49 zone
->type
= buf
[0] & 0x0f;
50 zone
->cond
= (buf
[1] >> 4) & 0xf;
56 zone
->len
= logical_to_sectors(sdp
, get_unaligned_be64(&buf
[8]));
57 zone
->start
= logical_to_sectors(sdp
, get_unaligned_be64(&buf
[16]));
58 zone
->wp
= logical_to_sectors(sdp
, get_unaligned_be64(&buf
[24]));
59 if (zone
->type
!= ZBC_ZONE_TYPE_CONV
&&
60 zone
->cond
== ZBC_ZONE_COND_FULL
)
61 zone
->wp
= zone
->start
+ zone
->len
;
65 * sd_zbc_report_zones - Issue a REPORT ZONES scsi command.
66 * @sdkp: The target disk
67 * @buf: Buffer to use for the reply
68 * @buflen: the buffer size
69 * @lba: Start LBA of the report
71 * For internal use during device validation.
73 static int sd_zbc_report_zones(struct scsi_disk
*sdkp
, unsigned char *buf
,
74 unsigned int buflen
, sector_t lba
)
76 struct scsi_device
*sdp
= sdkp
->device
;
77 const int timeout
= sdp
->request_queue
->rq_timeout
;
78 struct scsi_sense_hdr sshdr
;
79 unsigned char cmd
[16];
85 cmd
[1] = ZI_REPORT_ZONES
;
86 put_unaligned_be64(lba
, &cmd
[2]);
87 put_unaligned_be32(buflen
, &cmd
[10]);
88 memset(buf
, 0, buflen
);
90 result
= scsi_execute_req(sdp
, cmd
, DMA_FROM_DEVICE
,
92 timeout
, SD_MAX_RETRIES
, NULL
);
94 sd_printk(KERN_ERR
, sdkp
,
95 "REPORT ZONES lba %llu failed with %d/%d\n",
96 (unsigned long long)lba
,
97 host_byte(result
), driver_byte(result
));
101 rep_len
= get_unaligned_be32(&buf
[0]);
103 sd_printk(KERN_ERR
, sdkp
,
104 "REPORT ZONES report invalid length %u\n",
113 * sd_zbc_setup_report_cmnd - Prepare a REPORT ZONES scsi command
114 * @cmd: The command to setup
116 * Call in sd_init_command() for a REQ_OP_ZONE_REPORT request.
118 int sd_zbc_setup_report_cmnd(struct scsi_cmnd
*cmd
)
120 struct request
*rq
= cmd
->request
;
121 struct scsi_disk
*sdkp
= scsi_disk(rq
->rq_disk
);
122 sector_t lba
, sector
= blk_rq_pos(rq
);
123 unsigned int nr_bytes
= blk_rq_bytes(rq
);
126 WARN_ON(nr_bytes
== 0);
128 if (!sd_is_zoned(sdkp
))
129 /* Not a zoned device */
132 ret
= scsi_init_io(cmd
);
133 if (ret
!= BLKPREP_OK
)
137 memset(cmd
->cmnd
, 0, cmd
->cmd_len
);
138 cmd
->cmnd
[0] = ZBC_IN
;
139 cmd
->cmnd
[1] = ZI_REPORT_ZONES
;
140 lba
= sectors_to_logical(sdkp
->device
, sector
);
141 put_unaligned_be64(lba
, &cmd
->cmnd
[2]);
142 put_unaligned_be32(nr_bytes
, &cmd
->cmnd
[10]);
143 /* Do partial report for speeding things up */
144 cmd
->cmnd
[14] = ZBC_REPORT_ZONE_PARTIAL
;
146 cmd
->sc_data_direction
= DMA_FROM_DEVICE
;
147 cmd
->sdb
.length
= nr_bytes
;
148 cmd
->transfersize
= sdkp
->device
->sector_size
;
152 * Report may return less bytes than requested. Make sure
153 * to report completion on the entire initial request.
155 rq
->__data_len
= nr_bytes
;
161 * sd_zbc_report_zones_complete - Process a REPORT ZONES scsi command reply.
162 * @scmd: The completed report zones command
163 * @good_bytes: reply size in bytes
165 * Convert all reported zone descriptors to struct blk_zone. The conversion
166 * is done in-place, directly in the request specified sg buffer.
168 static void sd_zbc_report_zones_complete(struct scsi_cmnd
*scmd
,
169 unsigned int good_bytes
)
171 struct request
*rq
= scmd
->request
;
172 struct scsi_disk
*sdkp
= scsi_disk(rq
->rq_disk
);
173 struct sg_mapping_iter miter
;
174 struct blk_zone_report_hdr hdr
;
175 struct blk_zone zone
;
176 unsigned int offset
, bytes
= 0;
183 memset(&hdr
, 0, sizeof(struct blk_zone_report_hdr
));
185 sg_miter_start(&miter
, scsi_sglist(scmd
), scsi_sg_count(scmd
),
186 SG_MITER_TO_SG
| SG_MITER_ATOMIC
);
188 local_irq_save(flags
);
189 while (sg_miter_next(&miter
) && bytes
< good_bytes
) {
195 /* Set the report header */
196 hdr
.nr_zones
= min_t(unsigned int,
197 (good_bytes
- 64) / 64,
198 get_unaligned_be32(&buf
[0]) / 64);
199 memcpy(buf
, &hdr
, sizeof(struct blk_zone_report_hdr
));
204 /* Parse zone descriptors */
205 while (offset
< miter
.length
&& hdr
.nr_zones
) {
206 WARN_ON(offset
> miter
.length
);
207 buf
= miter
.addr
+ offset
;
208 sd_zbc_parse_report(sdkp
, buf
, &zone
);
209 memcpy(buf
, &zone
, sizeof(struct blk_zone
));
219 sg_miter_stop(&miter
);
220 local_irq_restore(flags
);
224 * sd_zbc_zone_sectors - Get the device zone size in number of 512B sectors.
225 * @sdkp: The target disk
227 static inline sector_t
sd_zbc_zone_sectors(struct scsi_disk
*sdkp
)
229 return logical_to_sectors(sdkp
->device
, sdkp
->zone_blocks
);
233 * sd_zbc_setup_reset_cmnd - Prepare a RESET WRITE POINTER scsi command.
234 * @cmd: the command to setup
236 * Called from sd_init_command() for a REQ_OP_ZONE_RESET request.
238 int sd_zbc_setup_reset_cmnd(struct scsi_cmnd
*cmd
)
240 struct request
*rq
= cmd
->request
;
241 struct scsi_disk
*sdkp
= scsi_disk(rq
->rq_disk
);
242 sector_t sector
= blk_rq_pos(rq
);
243 sector_t block
= sectors_to_logical(sdkp
->device
, sector
);
245 if (!sd_is_zoned(sdkp
))
246 /* Not a zoned device */
249 if (sdkp
->device
->changed
)
252 if (sector
& (sd_zbc_zone_sectors(sdkp
) - 1))
253 /* Unaligned request */
257 memset(cmd
->cmnd
, 0, cmd
->cmd_len
);
258 cmd
->cmnd
[0] = ZBC_OUT
;
259 cmd
->cmnd
[1] = ZO_RESET_WRITE_POINTER
;
260 put_unaligned_be64(block
, &cmd
->cmnd
[2]);
262 rq
->timeout
= SD_TIMEOUT
;
263 cmd
->sc_data_direction
= DMA_NONE
;
264 cmd
->transfersize
= 0;
271 * sd_zbc_complete - ZBC command post processing.
272 * @cmd: Completed command
273 * @good_bytes: Command reply bytes
274 * @sshdr: command sense header
276 * Called from sd_done(). Process report zones reply and handle reset zone
277 * and write commands errors.
279 void sd_zbc_complete(struct scsi_cmnd
*cmd
, unsigned int good_bytes
,
280 struct scsi_sense_hdr
*sshdr
)
282 int result
= cmd
->result
;
283 struct request
*rq
= cmd
->request
;
285 switch (req_op(rq
)) {
286 case REQ_OP_ZONE_RESET
:
289 sshdr
->sense_key
== ILLEGAL_REQUEST
&&
292 * INVALID FIELD IN CDB error: reset of a conventional
293 * zone was attempted. Nothing to worry about, so be
294 * quiet about the error.
296 rq
->rq_flags
|= RQF_QUIET
;
300 case REQ_OP_WRITE_ZEROES
:
301 case REQ_OP_WRITE_SAME
:
304 case REQ_OP_ZONE_REPORT
:
307 sd_zbc_report_zones_complete(cmd
, good_bytes
);
314 * sd_zbc_read_zoned_characteristics - Read zoned block device characteristics
316 * @buf: Buffer where to store the VPD page data
320 static int sd_zbc_read_zoned_characteristics(struct scsi_disk
*sdkp
,
324 if (scsi_get_vpd_page(sdkp
->device
, 0xb6, buf
, 64)) {
325 sd_printk(KERN_NOTICE
, sdkp
,
326 "Unconstrained-read check failed\n");
330 if (sdkp
->device
->type
!= TYPE_ZBC
) {
333 sdkp
->zones_optimal_open
= get_unaligned_be32(&buf
[8]);
334 sdkp
->zones_optimal_nonseq
= get_unaligned_be32(&buf
[12]);
335 sdkp
->zones_max_open
= 0;
338 sdkp
->urswrz
= buf
[4] & 1;
339 sdkp
->zones_optimal_open
= 0;
340 sdkp
->zones_optimal_nonseq
= 0;
341 sdkp
->zones_max_open
= get_unaligned_be32(&buf
[16]);
348 * sd_zbc_check_capacity - Check reported capacity.
350 * @buf: Buffer to use for commands
352 * ZBC drive may report only the capacity of the first conventional zones at
353 * LBA 0. This is indicated by the RC_BASIS field of the read capacity reply.
354 * Check this here. If the disk reported only its conventional zones capacity,
355 * get the total capacity by doing a report zones.
357 static int sd_zbc_check_capacity(struct scsi_disk
*sdkp
, unsigned char *buf
)
362 if (sdkp
->rc_basis
!= 0)
365 /* Do a report zone to get the maximum LBA to check capacity */
366 ret
= sd_zbc_report_zones(sdkp
, buf
, SD_BUF_SIZE
, 0);
370 /* The max_lba field is the capacity of this device */
371 lba
= get_unaligned_be64(&buf
[8]);
372 if (lba
+ 1 == sdkp
->capacity
)
375 if (sdkp
->first_scan
)
376 sd_printk(KERN_WARNING
, sdkp
,
377 "Changing capacity from %llu to max LBA+1 %llu\n",
378 (unsigned long long)sdkp
->capacity
,
379 (unsigned long long)lba
+ 1);
380 sdkp
->capacity
= lba
+ 1;
385 #define SD_ZBC_BUF_SIZE 131072U
388 * sd_zbc_check_zone_size - Check the device zone sizes
391 * Check that all zones of the device are equal. The last zone can however
392 * be smaller. The zone size must also be a power of two number of LBAs.
394 * Returns the zone size in number of blocks upon success or an error code
397 static s64
sd_zbc_check_zone_size(struct scsi_disk
*sdkp
)
403 unsigned int buf_len
;
404 unsigned int list_length
;
409 buf
= kmalloc(SD_ZBC_BUF_SIZE
, GFP_KERNEL
);
413 /* Do a report zone to get the same field */
414 ret
= sd_zbc_report_zones(sdkp
, buf
, SD_ZBC_BUF_SIZE
, 0);
418 same
= buf
[4] & 0x0f;
421 zone_blocks
= get_unaligned_be64(&rec
[8]);
426 * Check the size of all zones: all zones must be of
427 * equal size, except the last zone which can be smaller
432 /* Parse REPORT ZONES header */
433 list_length
= get_unaligned_be32(&buf
[0]) + 64;
435 buf_len
= min(list_length
, SD_ZBC_BUF_SIZE
);
437 /* Parse zone descriptors */
438 while (rec
< buf
+ buf_len
) {
439 u64 this_zone_blocks
= get_unaligned_be64(&rec
[8]);
441 if (zone_blocks
== 0) {
442 zone_blocks
= this_zone_blocks
;
443 } else if (this_zone_blocks
!= zone_blocks
&&
444 (block
+ this_zone_blocks
< sdkp
->capacity
445 || this_zone_blocks
> zone_blocks
)) {
449 block
+= this_zone_blocks
;
453 if (block
< sdkp
->capacity
) {
454 ret
= sd_zbc_report_zones(sdkp
, buf
,
455 SD_ZBC_BUF_SIZE
, block
);
460 } while (block
< sdkp
->capacity
);
464 if (sdkp
->first_scan
)
465 sd_printk(KERN_NOTICE
, sdkp
,
466 "Devices with non constant zone "
467 "size are not supported\n");
469 } else if (!is_power_of_2(zone_blocks
)) {
470 if (sdkp
->first_scan
)
471 sd_printk(KERN_NOTICE
, sdkp
,
472 "Devices with non power of 2 zone "
473 "size are not supported\n");
475 } else if (logical_to_sectors(sdkp
->device
, zone_blocks
) > UINT_MAX
) {
476 if (sdkp
->first_scan
)
477 sd_printk(KERN_NOTICE
, sdkp
,
478 "Zone size too large\n");
491 * sd_zbc_alloc_zone_bitmap - Allocate a zone bitmap (one bit per zone).
492 * @nr_zones: Number of zones to allocate space for.
493 * @numa_node: NUMA node to allocate the memory from.
495 static inline unsigned long *
496 sd_zbc_alloc_zone_bitmap(u32 nr_zones
, int numa_node
)
498 return kcalloc_node(BITS_TO_LONGS(nr_zones
), sizeof(unsigned long),
499 GFP_KERNEL
, numa_node
);
503 * sd_zbc_get_seq_zones - Parse report zones reply to identify sequential zones
505 * @buf: report reply buffer
506 * @buflen: length of @buf
507 * @zone_shift: logarithm base 2 of the number of blocks in a zone
508 * @seq_zones_bitmap: bitmap of sequential zones to set
510 * Parse reported zone descriptors in @buf to identify sequential zones and
511 * set the reported zone bit in @seq_zones_bitmap accordingly.
512 * Since read-only and offline zones cannot be written, do not
513 * mark them as sequential in the bitmap.
514 * Return the LBA after the last zone reported.
516 static sector_t
sd_zbc_get_seq_zones(struct scsi_disk
*sdkp
, unsigned char *buf
,
517 unsigned int buflen
, u32 zone_shift
,
518 unsigned long *seq_zones_bitmap
)
520 sector_t lba
, next_lba
= sdkp
->capacity
;
521 unsigned int buf_len
, list_length
;
525 list_length
= get_unaligned_be32(&buf
[0]) + 64;
526 buf_len
= min(list_length
, buflen
);
529 while (rec
< buf
+ buf_len
) {
530 type
= rec
[0] & 0x0f;
531 cond
= (rec
[1] >> 4) & 0xf;
532 lba
= get_unaligned_be64(&rec
[16]);
533 if (type
!= ZBC_ZONE_TYPE_CONV
&&
534 cond
!= ZBC_ZONE_COND_READONLY
&&
535 cond
!= ZBC_ZONE_COND_OFFLINE
)
536 set_bit(lba
>> zone_shift
, seq_zones_bitmap
);
537 next_lba
= lba
+ get_unaligned_be64(&rec
[8]);
545 * sd_zbc_setup_seq_zones_bitmap - Initialize a seq zone bitmap.
547 * @zone_shift: logarithm base 2 of the number of blocks in a zone
548 * @nr_zones: number of zones to set up a seq zone bitmap for
550 * Allocate a zone bitmap and initialize it by identifying sequential zones.
552 static unsigned long *
553 sd_zbc_setup_seq_zones_bitmap(struct scsi_disk
*sdkp
, u32 zone_shift
,
556 struct request_queue
*q
= sdkp
->disk
->queue
;
557 unsigned long *seq_zones_bitmap
;
562 seq_zones_bitmap
= sd_zbc_alloc_zone_bitmap(nr_zones
, q
->node
);
563 if (!seq_zones_bitmap
)
564 return ERR_PTR(-ENOMEM
);
566 buf
= kmalloc(SD_ZBC_BUF_SIZE
, GFP_KERNEL
);
570 while (lba
< sdkp
->capacity
) {
571 ret
= sd_zbc_report_zones(sdkp
, buf
, SD_ZBC_BUF_SIZE
, lba
);
574 lba
= sd_zbc_get_seq_zones(sdkp
, buf
, SD_ZBC_BUF_SIZE
,
575 zone_shift
, seq_zones_bitmap
);
578 if (lba
!= sdkp
->capacity
) {
579 /* Something went wrong */
586 kfree(seq_zones_bitmap
);
589 return seq_zones_bitmap
;
592 static void sd_zbc_cleanup(struct scsi_disk
*sdkp
)
594 struct request_queue
*q
= sdkp
->disk
->queue
;
596 kfree(q
->seq_zones_bitmap
);
597 q
->seq_zones_bitmap
= NULL
;
599 kfree(q
->seq_zones_wlock
);
600 q
->seq_zones_wlock
= NULL
;
605 static int sd_zbc_setup(struct scsi_disk
*sdkp
, u32 zone_blocks
)
607 struct request_queue
*q
= sdkp
->disk
->queue
;
608 u32 zone_shift
= ilog2(zone_blocks
);
612 /* chunk_sectors indicates the zone size */
613 blk_queue_chunk_sectors(q
,
614 logical_to_sectors(sdkp
->device
, zone_blocks
));
615 nr_zones
= round_up(sdkp
->capacity
, zone_blocks
) >> zone_shift
;
618 * Initialize the device request queue information if the number
621 if (nr_zones
!= sdkp
->nr_zones
|| nr_zones
!= q
->nr_zones
) {
622 unsigned long *seq_zones_wlock
= NULL
, *seq_zones_bitmap
= NULL
;
623 size_t zone_bitmap_size
;
626 seq_zones_wlock
= sd_zbc_alloc_zone_bitmap(nr_zones
,
628 if (!seq_zones_wlock
) {
633 seq_zones_bitmap
= sd_zbc_setup_seq_zones_bitmap(sdkp
,
634 zone_shift
, nr_zones
);
635 if (IS_ERR(seq_zones_bitmap
)) {
636 ret
= PTR_ERR(seq_zones_bitmap
);
637 kfree(seq_zones_wlock
);
641 zone_bitmap_size
= BITS_TO_LONGS(nr_zones
) *
642 sizeof(unsigned long);
643 blk_mq_freeze_queue(q
);
644 if (q
->nr_zones
!= nr_zones
) {
645 /* READ16/WRITE16 is mandatory for ZBC disks */
646 sdkp
->device
->use_16_for_rw
= 1;
647 sdkp
->device
->use_10_for_rw
= 0;
649 sdkp
->zone_blocks
= zone_blocks
;
650 sdkp
->zone_shift
= zone_shift
;
651 sdkp
->nr_zones
= nr_zones
;
652 q
->nr_zones
= nr_zones
;
653 swap(q
->seq_zones_wlock
, seq_zones_wlock
);
654 swap(q
->seq_zones_bitmap
, seq_zones_bitmap
);
655 } else if (memcmp(q
->seq_zones_bitmap
, seq_zones_bitmap
,
656 zone_bitmap_size
) != 0) {
657 memcpy(q
->seq_zones_bitmap
, seq_zones_bitmap
,
660 blk_mq_unfreeze_queue(q
);
661 kfree(seq_zones_wlock
);
662 kfree(seq_zones_bitmap
);
668 sd_zbc_cleanup(sdkp
);
672 int sd_zbc_read_zones(struct scsi_disk
*sdkp
, unsigned char *buf
)
677 if (!sd_is_zoned(sdkp
))
679 * Device managed or normal SCSI disk,
680 * no special handling required
684 /* Get zoned block device characteristics */
685 ret
= sd_zbc_read_zoned_characteristics(sdkp
, buf
);
690 * Check for unconstrained reads: host-managed devices with
691 * constrained reads (drives failing read after write pointer)
695 if (sdkp
->first_scan
)
696 sd_printk(KERN_NOTICE
, sdkp
,
697 "constrained reads devices are not supported\n");
703 ret
= sd_zbc_check_capacity(sdkp
, buf
);
708 * Check zone size: only devices with a constant zone size (except
709 * an eventual last runt zone) that is a power of 2 are supported.
711 zone_blocks
= sd_zbc_check_zone_size(sdkp
);
713 if (zone_blocks
!= (u32
)zone_blocks
)
719 /* The drive satisfies the kernel restrictions: set it up */
720 ret
= sd_zbc_setup(sdkp
, zone_blocks
);
728 sd_zbc_cleanup(sdkp
);
733 void sd_zbc_remove(struct scsi_disk
*sdkp
)
735 sd_zbc_cleanup(sdkp
);
738 void sd_zbc_print_zones(struct scsi_disk
*sdkp
)
740 if (!sd_is_zoned(sdkp
) || !sdkp
->capacity
)
743 if (sdkp
->capacity
& (sdkp
->zone_blocks
- 1))
744 sd_printk(KERN_NOTICE
, sdkp
,
745 "%u zones of %u logical blocks + 1 runt zone\n",
749 sd_printk(KERN_NOTICE
, sdkp
,
750 "%u zones of %u logical blocks\n",