2 * SCSI Zoned Block commands
4 * Copyright (C) 2014-2015 SUSE Linux GmbH
5 * Written by: Hannes Reinecke <hare@suse.de>
6 * Modified by: Damien Le Moal <damien.lemoal@hgst.com>
7 * Modified by: Shaun Tancheff <shaun.tancheff@seagate.com>
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version
11 * 2 as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
25 #include <linux/blkdev.h>
27 #include <asm/unaligned.h>
29 #include <scsi/scsi.h>
30 #include <scsi/scsi_cmnd.h>
35 * sd_zbc_parse_report - Convert a zone descriptor to a struct blk_zone,
36 * @sdkp: The disk the report originated from
37 * @buf: Address of the report zone descriptor
38 * @zone: the destination zone structure
40 * All LBA sized values are converted to 512B sectors unit.
42 static void sd_zbc_parse_report(struct scsi_disk
*sdkp
, u8
*buf
,
43 struct blk_zone
*zone
)
45 struct scsi_device
*sdp
= sdkp
->device
;
47 memset(zone
, 0, sizeof(struct blk_zone
));
49 zone
->type
= buf
[0] & 0x0f;
50 zone
->cond
= (buf
[1] >> 4) & 0xf;
56 zone
->len
= logical_to_sectors(sdp
, get_unaligned_be64(&buf
[8]));
57 zone
->start
= logical_to_sectors(sdp
, get_unaligned_be64(&buf
[16]));
58 zone
->wp
= logical_to_sectors(sdp
, get_unaligned_be64(&buf
[24]));
59 if (zone
->type
!= ZBC_ZONE_TYPE_CONV
&&
60 zone
->cond
== ZBC_ZONE_COND_FULL
)
61 zone
->wp
= zone
->start
+ zone
->len
;
65 * sd_zbc_report_zones - Issue a REPORT ZONES scsi command.
66 * @sdkp: The target disk
67 * @buf: Buffer to use for the reply
68 * @buflen: the buffer size
69 * @lba: Start LBA of the report
71 * For internal use during device validation.
73 static int sd_zbc_report_zones(struct scsi_disk
*sdkp
, unsigned char *buf
,
74 unsigned int buflen
, sector_t lba
)
76 struct scsi_device
*sdp
= sdkp
->device
;
77 const int timeout
= sdp
->request_queue
->rq_timeout
;
78 struct scsi_sense_hdr sshdr
;
79 unsigned char cmd
[16];
85 cmd
[1] = ZI_REPORT_ZONES
;
86 put_unaligned_be64(lba
, &cmd
[2]);
87 put_unaligned_be32(buflen
, &cmd
[10]);
88 memset(buf
, 0, buflen
);
90 result
= scsi_execute_req(sdp
, cmd
, DMA_FROM_DEVICE
,
92 timeout
, SD_MAX_RETRIES
, NULL
);
94 sd_printk(KERN_ERR
, sdkp
,
95 "REPORT ZONES lba %llu failed with %d/%d\n",
96 (unsigned long long)lba
,
97 host_byte(result
), driver_byte(result
));
101 rep_len
= get_unaligned_be32(&buf
[0]);
103 sd_printk(KERN_ERR
, sdkp
,
104 "REPORT ZONES report invalid length %u\n",
113 * sd_zbc_setup_report_cmnd - Prepare a REPORT ZONES scsi command
114 * @cmd: The command to setup
116 * Call in sd_init_command() for a REQ_OP_ZONE_REPORT request.
118 int sd_zbc_setup_report_cmnd(struct scsi_cmnd
*cmd
)
120 struct request
*rq
= cmd
->request
;
121 struct scsi_disk
*sdkp
= scsi_disk(rq
->rq_disk
);
122 sector_t lba
, sector
= blk_rq_pos(rq
);
123 unsigned int nr_bytes
= blk_rq_bytes(rq
);
126 WARN_ON(nr_bytes
== 0);
128 if (!sd_is_zoned(sdkp
))
129 /* Not a zoned device */
132 ret
= scsi_init_io(cmd
);
133 if (ret
!= BLKPREP_OK
)
137 memset(cmd
->cmnd
, 0, cmd
->cmd_len
);
138 cmd
->cmnd
[0] = ZBC_IN
;
139 cmd
->cmnd
[1] = ZI_REPORT_ZONES
;
140 lba
= sectors_to_logical(sdkp
->device
, sector
);
141 put_unaligned_be64(lba
, &cmd
->cmnd
[2]);
142 put_unaligned_be32(nr_bytes
, &cmd
->cmnd
[10]);
143 /* Do partial report for speeding things up */
144 cmd
->cmnd
[14] = ZBC_REPORT_ZONE_PARTIAL
;
146 cmd
->sc_data_direction
= DMA_FROM_DEVICE
;
147 cmd
->sdb
.length
= nr_bytes
;
148 cmd
->transfersize
= sdkp
->device
->sector_size
;
152 * Report may return less bytes than requested. Make sure
153 * to report completion on the entire initial request.
155 rq
->__data_len
= nr_bytes
;
161 * sd_zbc_report_zones_complete - Process a REPORT ZONES scsi command reply.
162 * @scmd: The completed report zones command
163 * @good_bytes: reply size in bytes
165 * Convert all reported zone descriptors to struct blk_zone. The conversion
166 * is done in-place, directly in the request specified sg buffer.
168 static void sd_zbc_report_zones_complete(struct scsi_cmnd
*scmd
,
169 unsigned int good_bytes
)
171 struct request
*rq
= scmd
->request
;
172 struct scsi_disk
*sdkp
= scsi_disk(rq
->rq_disk
);
173 struct sg_mapping_iter miter
;
174 struct blk_zone_report_hdr hdr
;
175 struct blk_zone zone
;
176 unsigned int offset
, bytes
= 0;
183 memset(&hdr
, 0, sizeof(struct blk_zone_report_hdr
));
185 sg_miter_start(&miter
, scsi_sglist(scmd
), scsi_sg_count(scmd
),
186 SG_MITER_TO_SG
| SG_MITER_ATOMIC
);
188 local_irq_save(flags
);
189 while (sg_miter_next(&miter
) && bytes
< good_bytes
) {
195 /* Set the report header */
196 hdr
.nr_zones
= min_t(unsigned int,
197 (good_bytes
- 64) / 64,
198 get_unaligned_be32(&buf
[0]) / 64);
199 memcpy(buf
, &hdr
, sizeof(struct blk_zone_report_hdr
));
204 /* Parse zone descriptors */
205 while (offset
< miter
.length
&& hdr
.nr_zones
) {
206 WARN_ON(offset
> miter
.length
);
207 buf
= miter
.addr
+ offset
;
208 sd_zbc_parse_report(sdkp
, buf
, &zone
);
209 memcpy(buf
, &zone
, sizeof(struct blk_zone
));
219 sg_miter_stop(&miter
);
220 local_irq_restore(flags
);
224 * sd_zbc_zone_sectors - Get the device zone size in number of 512B sectors.
225 * @sdkp: The target disk
227 static inline sector_t
sd_zbc_zone_sectors(struct scsi_disk
*sdkp
)
229 return logical_to_sectors(sdkp
->device
, sdkp
->zone_blocks
);
233 * sd_zbc_setup_reset_cmnd - Prepare a RESET WRITE POINTER scsi command.
234 * @cmd: the command to setup
236 * Called from sd_init_command() for a REQ_OP_ZONE_RESET request.
238 int sd_zbc_setup_reset_cmnd(struct scsi_cmnd
*cmd
)
240 struct request
*rq
= cmd
->request
;
241 struct scsi_disk
*sdkp
= scsi_disk(rq
->rq_disk
);
242 sector_t sector
= blk_rq_pos(rq
);
243 sector_t block
= sectors_to_logical(sdkp
->device
, sector
);
245 if (!sd_is_zoned(sdkp
))
246 /* Not a zoned device */
249 if (sdkp
->device
->changed
)
252 if (sector
& (sd_zbc_zone_sectors(sdkp
) - 1))
253 /* Unaligned request */
257 memset(cmd
->cmnd
, 0, cmd
->cmd_len
);
258 cmd
->cmnd
[0] = ZBC_OUT
;
259 cmd
->cmnd
[1] = ZO_RESET_WRITE_POINTER
;
260 put_unaligned_be64(block
, &cmd
->cmnd
[2]);
262 rq
->timeout
= SD_TIMEOUT
;
263 cmd
->sc_data_direction
= DMA_NONE
;
264 cmd
->transfersize
= 0;
271 * sd_zbc_complete - ZBC command post processing.
272 * @cmd: Completed command
273 * @good_bytes: Command reply bytes
274 * @sshdr: command sense header
276 * Called from sd_done(). Process report zones reply and handle reset zone
277 * and write commands errors.
279 void sd_zbc_complete(struct scsi_cmnd
*cmd
, unsigned int good_bytes
,
280 struct scsi_sense_hdr
*sshdr
)
282 int result
= cmd
->result
;
283 struct request
*rq
= cmd
->request
;
285 switch (req_op(rq
)) {
286 case REQ_OP_ZONE_RESET
:
289 sshdr
->sense_key
== ILLEGAL_REQUEST
&&
292 * INVALID FIELD IN CDB error: reset of a conventional
293 * zone was attempted. Nothing to worry about, so be
294 * quiet about the error.
296 rq
->rq_flags
|= RQF_QUIET
;
300 case REQ_OP_WRITE_ZEROES
:
301 case REQ_OP_WRITE_SAME
:
304 sshdr
->sense_key
== ILLEGAL_REQUEST
&&
307 * INVALID ADDRESS FOR WRITE error: It is unlikely that
308 * retrying write requests failed with any kind of
309 * alignement error will result in success. So don't.
314 case REQ_OP_ZONE_REPORT
:
317 sd_zbc_report_zones_complete(cmd
, good_bytes
);
324 * sd_zbc_read_zoned_characteristics - Read zoned block device characteristics
326 * @buf: Buffer where to store the VPD page data
330 static int sd_zbc_read_zoned_characteristics(struct scsi_disk
*sdkp
,
334 if (scsi_get_vpd_page(sdkp
->device
, 0xb6, buf
, 64)) {
335 sd_printk(KERN_NOTICE
, sdkp
,
336 "Unconstrained-read check failed\n");
340 if (sdkp
->device
->type
!= TYPE_ZBC
) {
343 sdkp
->zones_optimal_open
= get_unaligned_be32(&buf
[8]);
344 sdkp
->zones_optimal_nonseq
= get_unaligned_be32(&buf
[12]);
345 sdkp
->zones_max_open
= 0;
348 sdkp
->urswrz
= buf
[4] & 1;
349 sdkp
->zones_optimal_open
= 0;
350 sdkp
->zones_optimal_nonseq
= 0;
351 sdkp
->zones_max_open
= get_unaligned_be32(&buf
[16]);
358 * sd_zbc_check_capacity - Check reported capacity.
360 * @buf: Buffer to use for commands
362 * ZBC drive may report only the capacity of the first conventional zones at
363 * LBA 0. This is indicated by the RC_BASIS field of the read capacity reply.
364 * Check this here. If the disk reported only its conventional zones capacity,
365 * get the total capacity by doing a report zones.
367 static int sd_zbc_check_capacity(struct scsi_disk
*sdkp
, unsigned char *buf
)
372 if (sdkp
->rc_basis
!= 0)
375 /* Do a report zone to get the maximum LBA to check capacity */
376 ret
= sd_zbc_report_zones(sdkp
, buf
, SD_BUF_SIZE
, 0);
380 /* The max_lba field is the capacity of this device */
381 lba
= get_unaligned_be64(&buf
[8]);
382 if (lba
+ 1 == sdkp
->capacity
)
385 if (sdkp
->first_scan
)
386 sd_printk(KERN_WARNING
, sdkp
,
387 "Changing capacity from %llu to max LBA+1 %llu\n",
388 (unsigned long long)sdkp
->capacity
,
389 (unsigned long long)lba
+ 1);
390 sdkp
->capacity
= lba
+ 1;
395 #define SD_ZBC_BUF_SIZE 131072U
398 * sd_zbc_check_zone_size - Check the device zone sizes
401 * Check that all zones of the device are equal. The last zone can however
402 * be smaller. The zone size must also be a power of two number of LBAs.
404 * Returns the zone size in bytes upon success or an error code upon failure.
406 static s64
sd_zbc_check_zone_size(struct scsi_disk
*sdkp
)
412 unsigned int buf_len
;
413 unsigned int list_length
;
418 buf
= kmalloc(SD_ZBC_BUF_SIZE
, GFP_KERNEL
);
422 /* Do a report zone to get the same field */
423 ret
= sd_zbc_report_zones(sdkp
, buf
, SD_ZBC_BUF_SIZE
, 0);
427 same
= buf
[4] & 0x0f;
430 zone_blocks
= get_unaligned_be64(&rec
[8]);
435 * Check the size of all zones: all zones must be of
436 * equal size, except the last zone which can be smaller
441 /* Parse REPORT ZONES header */
442 list_length
= get_unaligned_be32(&buf
[0]) + 64;
444 buf_len
= min(list_length
, SD_ZBC_BUF_SIZE
);
446 /* Parse zone descriptors */
447 while (rec
< buf
+ buf_len
) {
448 u64 this_zone_blocks
= get_unaligned_be64(&rec
[8]);
450 if (zone_blocks
== 0) {
451 zone_blocks
= this_zone_blocks
;
452 } else if (this_zone_blocks
!= zone_blocks
&&
453 (block
+ this_zone_blocks
< sdkp
->capacity
454 || this_zone_blocks
> zone_blocks
)) {
455 this_zone_blocks
= 0;
458 block
+= this_zone_blocks
;
462 if (block
< sdkp
->capacity
) {
463 ret
= sd_zbc_report_zones(sdkp
, buf
,
464 SD_ZBC_BUF_SIZE
, block
);
469 } while (block
< sdkp
->capacity
);
473 if (sdkp
->first_scan
)
474 sd_printk(KERN_NOTICE
, sdkp
,
475 "Devices with non constant zone "
476 "size are not supported\n");
478 } else if (!is_power_of_2(zone_blocks
)) {
479 if (sdkp
->first_scan
)
480 sd_printk(KERN_NOTICE
, sdkp
,
481 "Devices with non power of 2 zone "
482 "size are not supported\n");
484 } else if (logical_to_sectors(sdkp
->device
, zone_blocks
) > UINT_MAX
) {
485 if (sdkp
->first_scan
)
486 sd_printk(KERN_NOTICE
, sdkp
,
487 "Zone size too large\n");
500 * sd_zbc_alloc_zone_bitmap - Allocate a zone bitmap (one bit per zone).
501 * @nr_zones: Number of zones to allocate space for.
502 * @numa_node: NUMA node to allocate the memory from.
504 static inline unsigned long *
505 sd_zbc_alloc_zone_bitmap(u32 nr_zones
, int numa_node
)
507 return kzalloc_node(BITS_TO_LONGS(nr_zones
) * sizeof(unsigned long),
508 GFP_KERNEL
, numa_node
);
512 * sd_zbc_get_seq_zones - Parse report zones reply to identify sequential zones
514 * @buf: report reply buffer
515 * @zone_shift: logarithm base 2 of the number of blocks in a zone
516 * @seq_zone_bitamp: bitmap of sequential zones to set
518 * Parse reported zone descriptors in @buf to identify sequential zones and
519 * set the reported zone bit in @seq_zones_bitmap accordingly.
520 * Since read-only and offline zones cannot be written, do not
521 * mark them as sequential in the bitmap.
522 * Return the LBA after the last zone reported.
524 static sector_t
sd_zbc_get_seq_zones(struct scsi_disk
*sdkp
, unsigned char *buf
,
525 unsigned int buflen
, u32 zone_shift
,
526 unsigned long *seq_zones_bitmap
)
528 sector_t lba
, next_lba
= sdkp
->capacity
;
529 unsigned int buf_len
, list_length
;
533 list_length
= get_unaligned_be32(&buf
[0]) + 64;
534 buf_len
= min(list_length
, buflen
);
537 while (rec
< buf
+ buf_len
) {
538 type
= rec
[0] & 0x0f;
539 cond
= (rec
[1] >> 4) & 0xf;
540 lba
= get_unaligned_be64(&rec
[16]);
541 if (type
!= ZBC_ZONE_TYPE_CONV
&&
542 cond
!= ZBC_ZONE_COND_READONLY
&&
543 cond
!= ZBC_ZONE_COND_OFFLINE
)
544 set_bit(lba
>> zone_shift
, seq_zones_bitmap
);
545 next_lba
= lba
+ get_unaligned_be64(&rec
[8]);
553 * sd_zbc_setup_seq_zones_bitmap - Initialize a seq zone bitmap.
555 * @zone_shift: logarithm base 2 of the number of blocks in a zone
556 * @nr_zones: number of zones to set up a seq zone bitmap for
558 * Allocate a zone bitmap and initialize it by identifying sequential zones.
560 static unsigned long *
561 sd_zbc_setup_seq_zones_bitmap(struct scsi_disk
*sdkp
, u32 zone_shift
,
564 struct request_queue
*q
= sdkp
->disk
->queue
;
565 unsigned long *seq_zones_bitmap
;
570 seq_zones_bitmap
= sd_zbc_alloc_zone_bitmap(nr_zones
, q
->node
);
571 if (!seq_zones_bitmap
)
572 return ERR_PTR(-ENOMEM
);
574 buf
= kmalloc(SD_ZBC_BUF_SIZE
, GFP_KERNEL
);
578 while (lba
< sdkp
->capacity
) {
579 ret
= sd_zbc_report_zones(sdkp
, buf
, SD_ZBC_BUF_SIZE
, lba
);
582 lba
= sd_zbc_get_seq_zones(sdkp
, buf
, SD_ZBC_BUF_SIZE
,
583 zone_shift
, seq_zones_bitmap
);
586 if (lba
!= sdkp
->capacity
) {
587 /* Something went wrong */
594 kfree(seq_zones_bitmap
);
597 return seq_zones_bitmap
;
600 static void sd_zbc_cleanup(struct scsi_disk
*sdkp
)
602 struct request_queue
*q
= sdkp
->disk
->queue
;
604 kfree(q
->seq_zones_bitmap
);
605 q
->seq_zones_bitmap
= NULL
;
607 kfree(q
->seq_zones_wlock
);
608 q
->seq_zones_wlock
= NULL
;
613 static int sd_zbc_setup(struct scsi_disk
*sdkp
, u32 zone_blocks
)
615 struct request_queue
*q
= sdkp
->disk
->queue
;
616 u32 zone_shift
= ilog2(zone_blocks
);
620 /* chunk_sectors indicates the zone size */
621 blk_queue_chunk_sectors(q
,
622 logical_to_sectors(sdkp
->device
, zone_blocks
));
623 nr_zones
= round_up(sdkp
->capacity
, zone_blocks
) >> zone_shift
;
626 * Initialize the device request queue information if the number
629 if (nr_zones
!= sdkp
->nr_zones
|| nr_zones
!= q
->nr_zones
) {
630 unsigned long *seq_zones_wlock
= NULL
, *seq_zones_bitmap
= NULL
;
631 size_t zone_bitmap_size
;
634 seq_zones_wlock
= sd_zbc_alloc_zone_bitmap(nr_zones
,
636 if (!seq_zones_wlock
) {
641 seq_zones_bitmap
= sd_zbc_setup_seq_zones_bitmap(sdkp
,
642 zone_shift
, nr_zones
);
643 if (IS_ERR(seq_zones_bitmap
)) {
644 ret
= PTR_ERR(seq_zones_bitmap
);
645 kfree(seq_zones_wlock
);
649 zone_bitmap_size
= BITS_TO_LONGS(nr_zones
) *
650 sizeof(unsigned long);
651 blk_mq_freeze_queue(q
);
652 if (q
->nr_zones
!= nr_zones
) {
653 /* READ16/WRITE16 is mandatory for ZBC disks */
654 sdkp
->device
->use_16_for_rw
= 1;
655 sdkp
->device
->use_10_for_rw
= 0;
657 sdkp
->zone_blocks
= zone_blocks
;
658 sdkp
->zone_shift
= zone_shift
;
659 sdkp
->nr_zones
= nr_zones
;
660 q
->nr_zones
= nr_zones
;
661 swap(q
->seq_zones_wlock
, seq_zones_wlock
);
662 swap(q
->seq_zones_bitmap
, seq_zones_bitmap
);
663 } else if (memcmp(q
->seq_zones_bitmap
, seq_zones_bitmap
,
664 zone_bitmap_size
) != 0) {
665 memcpy(q
->seq_zones_bitmap
, seq_zones_bitmap
,
668 blk_mq_unfreeze_queue(q
);
669 kfree(seq_zones_wlock
);
670 kfree(seq_zones_bitmap
);
676 sd_zbc_cleanup(sdkp
);
680 int sd_zbc_read_zones(struct scsi_disk
*sdkp
, unsigned char *buf
)
685 if (!sd_is_zoned(sdkp
))
687 * Device managed or normal SCSI disk,
688 * no special handling required
692 /* Get zoned block device characteristics */
693 ret
= sd_zbc_read_zoned_characteristics(sdkp
, buf
);
698 * Check for unconstrained reads: host-managed devices with
699 * constrained reads (drives failing read after write pointer)
703 if (sdkp
->first_scan
)
704 sd_printk(KERN_NOTICE
, sdkp
,
705 "constrained reads devices are not supported\n");
711 ret
= sd_zbc_check_capacity(sdkp
, buf
);
716 * Check zone size: only devices with a constant zone size (except
717 * an eventual last runt zone) that is a power of 2 are supported.
719 zone_blocks
= sd_zbc_check_zone_size(sdkp
);
721 if (zone_blocks
!= (u32
)zone_blocks
)
727 /* The drive satisfies the kernel restrictions: set it up */
728 ret
= sd_zbc_setup(sdkp
, zone_blocks
);
736 sd_zbc_cleanup(sdkp
);
741 void sd_zbc_remove(struct scsi_disk
*sdkp
)
743 sd_zbc_cleanup(sdkp
);
746 void sd_zbc_print_zones(struct scsi_disk
*sdkp
)
748 if (!sd_is_zoned(sdkp
) || !sdkp
->capacity
)
751 if (sdkp
->capacity
& (sdkp
->zone_blocks
- 1))
752 sd_printk(KERN_NOTICE
, sdkp
,
753 "%u zones of %u logical blocks + 1 runt zone\n",
757 sd_printk(KERN_NOTICE
, sdkp
,
758 "%u zones of %u logical blocks\n",