2 * SCSI Zoned Block commands
4 * Copyright (C) 2014-2015 SUSE Linux GmbH
5 * Written by: Hannes Reinecke <hare@suse.de>
6 * Modified by: Damien Le Moal <damien.lemoal@hgst.com>
7 * Modified by: Shaun Tancheff <shaun.tancheff@seagate.com>
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version
11 * 2 as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
25 #include <linux/blkdev.h>
27 #include <asm/unaligned.h>
29 #include <scsi/scsi.h>
30 #include <scsi/scsi_cmnd.h>
35 * sd_zbc_parse_report - Convert a zone descriptor to a struct blk_zone,
36 * @sdkp: The disk the report originated from
37 * @buf: Address of the report zone descriptor
38 * @zone: the destination zone structure
40 * All LBA sized values are converted to 512B sectors unit.
42 static void sd_zbc_parse_report(struct scsi_disk
*sdkp
, u8
*buf
,
43 struct blk_zone
*zone
)
45 struct scsi_device
*sdp
= sdkp
->device
;
47 memset(zone
, 0, sizeof(struct blk_zone
));
49 zone
->type
= buf
[0] & 0x0f;
50 zone
->cond
= (buf
[1] >> 4) & 0xf;
56 zone
->len
= logical_to_sectors(sdp
, get_unaligned_be64(&buf
[8]));
57 zone
->start
= logical_to_sectors(sdp
, get_unaligned_be64(&buf
[16]));
58 zone
->wp
= logical_to_sectors(sdp
, get_unaligned_be64(&buf
[24]));
59 if (zone
->type
!= ZBC_ZONE_TYPE_CONV
&&
60 zone
->cond
== ZBC_ZONE_COND_FULL
)
61 zone
->wp
= zone
->start
+ zone
->len
;
65 * sd_zbc_report_zones - Issue a REPORT ZONES scsi command.
66 * @sdkp: The target disk
67 * @buf: Buffer to use for the reply
68 * @buflen: the buffer size
69 * @lba: Start LBA of the report
71 * For internal use during device validation.
73 static int sd_zbc_report_zones(struct scsi_disk
*sdkp
, unsigned char *buf
,
74 unsigned int buflen
, sector_t lba
)
76 struct scsi_device
*sdp
= sdkp
->device
;
77 const int timeout
= sdp
->request_queue
->rq_timeout
;
78 struct scsi_sense_hdr sshdr
;
79 unsigned char cmd
[16];
85 cmd
[1] = ZI_REPORT_ZONES
;
86 put_unaligned_be64(lba
, &cmd
[2]);
87 put_unaligned_be32(buflen
, &cmd
[10]);
88 memset(buf
, 0, buflen
);
90 result
= scsi_execute_req(sdp
, cmd
, DMA_FROM_DEVICE
,
92 timeout
, SD_MAX_RETRIES
, NULL
);
94 sd_printk(KERN_ERR
, sdkp
,
95 "REPORT ZONES lba %llu failed with %d/%d\n",
96 (unsigned long long)lba
,
97 host_byte(result
), driver_byte(result
));
101 rep_len
= get_unaligned_be32(&buf
[0]);
103 sd_printk(KERN_ERR
, sdkp
,
104 "REPORT ZONES report invalid length %u\n",
113 * sd_zbc_setup_report_cmnd - Prepare a REPORT ZONES scsi command
114 * @cmd: The command to setup
116 * Call in sd_init_command() for a REQ_OP_ZONE_REPORT request.
118 int sd_zbc_setup_report_cmnd(struct scsi_cmnd
*cmd
)
120 struct request
*rq
= cmd
->request
;
121 struct scsi_disk
*sdkp
= scsi_disk(rq
->rq_disk
);
122 sector_t lba
, sector
= blk_rq_pos(rq
);
123 unsigned int nr_bytes
= blk_rq_bytes(rq
);
126 WARN_ON(nr_bytes
== 0);
128 if (!sd_is_zoned(sdkp
))
129 /* Not a zoned device */
132 ret
= scsi_init_io(cmd
);
133 if (ret
!= BLKPREP_OK
)
137 memset(cmd
->cmnd
, 0, cmd
->cmd_len
);
138 cmd
->cmnd
[0] = ZBC_IN
;
139 cmd
->cmnd
[1] = ZI_REPORT_ZONES
;
140 lba
= sectors_to_logical(sdkp
->device
, sector
);
141 put_unaligned_be64(lba
, &cmd
->cmnd
[2]);
142 put_unaligned_be32(nr_bytes
, &cmd
->cmnd
[10]);
143 /* Do partial report for speeding things up */
144 cmd
->cmnd
[14] = ZBC_REPORT_ZONE_PARTIAL
;
146 cmd
->sc_data_direction
= DMA_FROM_DEVICE
;
147 cmd
->sdb
.length
= nr_bytes
;
148 cmd
->transfersize
= sdkp
->device
->sector_size
;
152 * Report may return less bytes than requested. Make sure
153 * to report completion on the entire initial request.
155 rq
->__data_len
= nr_bytes
;
161 * sd_zbc_report_zones_complete - Process a REPORT ZONES scsi command reply.
162 * @scmd: The completed report zones command
163 * @good_bytes: reply size in bytes
165 * Convert all reported zone descriptors to struct blk_zone. The conversion
166 * is done in-place, directly in the request specified sg buffer.
168 static void sd_zbc_report_zones_complete(struct scsi_cmnd
*scmd
,
169 unsigned int good_bytes
)
171 struct request
*rq
= scmd
->request
;
172 struct scsi_disk
*sdkp
= scsi_disk(rq
->rq_disk
);
173 struct sg_mapping_iter miter
;
174 struct blk_zone_report_hdr hdr
;
175 struct blk_zone zone
;
176 unsigned int offset
, bytes
= 0;
183 memset(&hdr
, 0, sizeof(struct blk_zone_report_hdr
));
185 sg_miter_start(&miter
, scsi_sglist(scmd
), scsi_sg_count(scmd
),
186 SG_MITER_TO_SG
| SG_MITER_ATOMIC
);
188 local_irq_save(flags
);
189 while (sg_miter_next(&miter
) && bytes
< good_bytes
) {
195 /* Set the report header */
196 hdr
.nr_zones
= min_t(unsigned int,
197 (good_bytes
- 64) / 64,
198 get_unaligned_be32(&buf
[0]) / 64);
199 memcpy(buf
, &hdr
, sizeof(struct blk_zone_report_hdr
));
204 /* Parse zone descriptors */
205 while (offset
< miter
.length
&& hdr
.nr_zones
) {
206 WARN_ON(offset
> miter
.length
);
207 buf
= miter
.addr
+ offset
;
208 sd_zbc_parse_report(sdkp
, buf
, &zone
);
209 memcpy(buf
, &zone
, sizeof(struct blk_zone
));
219 sg_miter_stop(&miter
);
220 local_irq_restore(flags
);
224 * sd_zbc_zone_sectors - Get the device zone size in number of 512B sectors.
225 * @sdkp: The target disk
227 static inline sector_t
sd_zbc_zone_sectors(struct scsi_disk
*sdkp
)
229 return logical_to_sectors(sdkp
->device
, sdkp
->zone_blocks
);
233 * sd_zbc_setup_reset_cmnd - Prepare a RESET WRITE POINTER scsi command.
234 * @cmd: the command to setup
236 * Called from sd_init_command() for a REQ_OP_ZONE_RESET request.
238 int sd_zbc_setup_reset_cmnd(struct scsi_cmnd
*cmd
)
240 struct request
*rq
= cmd
->request
;
241 struct scsi_disk
*sdkp
= scsi_disk(rq
->rq_disk
);
242 sector_t sector
= blk_rq_pos(rq
);
243 sector_t block
= sectors_to_logical(sdkp
->device
, sector
);
245 if (!sd_is_zoned(sdkp
))
246 /* Not a zoned device */
249 if (sdkp
->device
->changed
)
252 if (sector
& (sd_zbc_zone_sectors(sdkp
) - 1))
253 /* Unaligned request */
257 memset(cmd
->cmnd
, 0, cmd
->cmd_len
);
258 cmd
->cmnd
[0] = ZBC_OUT
;
259 cmd
->cmnd
[1] = ZO_RESET_WRITE_POINTER
;
260 put_unaligned_be64(block
, &cmd
->cmnd
[2]);
262 rq
->timeout
= SD_TIMEOUT
;
263 cmd
->sc_data_direction
= DMA_NONE
;
264 cmd
->transfersize
= 0;
271 * sd_zbc_complete - ZBC command post processing.
272 * @cmd: Completed command
273 * @good_bytes: Command reply bytes
274 * @sshdr: command sense header
276 * Called from sd_done(). Process report zones reply and handle reset zone
277 * and write commands errors.
279 void sd_zbc_complete(struct scsi_cmnd
*cmd
, unsigned int good_bytes
,
280 struct scsi_sense_hdr
*sshdr
)
282 int result
= cmd
->result
;
283 struct request
*rq
= cmd
->request
;
285 switch (req_op(rq
)) {
286 case REQ_OP_ZONE_RESET
:
289 sshdr
->sense_key
== ILLEGAL_REQUEST
&&
292 * INVALID FIELD IN CDB error: reset of a conventional
293 * zone was attempted. Nothing to worry about, so be
294 * quiet about the error.
296 rq
->rq_flags
|= RQF_QUIET
;
300 case REQ_OP_WRITE_ZEROES
:
301 case REQ_OP_WRITE_SAME
:
304 case REQ_OP_ZONE_REPORT
:
307 sd_zbc_report_zones_complete(cmd
, good_bytes
);
314 * sd_zbc_read_zoned_characteristics - Read zoned block device characteristics
316 * @buf: Buffer where to store the VPD page data
320 static int sd_zbc_read_zoned_characteristics(struct scsi_disk
*sdkp
,
324 if (scsi_get_vpd_page(sdkp
->device
, 0xb6, buf
, 64)) {
325 sd_printk(KERN_NOTICE
, sdkp
,
326 "Unconstrained-read check failed\n");
330 if (sdkp
->device
->type
!= TYPE_ZBC
) {
333 sdkp
->zones_optimal_open
= get_unaligned_be32(&buf
[8]);
334 sdkp
->zones_optimal_nonseq
= get_unaligned_be32(&buf
[12]);
335 sdkp
->zones_max_open
= 0;
338 sdkp
->urswrz
= buf
[4] & 1;
339 sdkp
->zones_optimal_open
= 0;
340 sdkp
->zones_optimal_nonseq
= 0;
341 sdkp
->zones_max_open
= get_unaligned_be32(&buf
[16]);
348 * sd_zbc_check_capacity - Check reported capacity.
350 * @buf: Buffer to use for commands
352 * ZBC drive may report only the capacity of the first conventional zones at
353 * LBA 0. This is indicated by the RC_BASIS field of the read capacity reply.
354 * Check this here. If the disk reported only its conventional zones capacity,
355 * get the total capacity by doing a report zones.
357 static int sd_zbc_check_capacity(struct scsi_disk
*sdkp
, unsigned char *buf
)
362 if (sdkp
->rc_basis
!= 0)
365 /* Do a report zone to get the maximum LBA to check capacity */
366 ret
= sd_zbc_report_zones(sdkp
, buf
, SD_BUF_SIZE
, 0);
370 /* The max_lba field is the capacity of this device */
371 lba
= get_unaligned_be64(&buf
[8]);
372 if (lba
+ 1 == sdkp
->capacity
)
375 if (sdkp
->first_scan
)
376 sd_printk(KERN_WARNING
, sdkp
,
377 "Changing capacity from %llu to max LBA+1 %llu\n",
378 (unsigned long long)sdkp
->capacity
,
379 (unsigned long long)lba
+ 1);
380 sdkp
->capacity
= lba
+ 1;
385 #define SD_ZBC_BUF_SIZE 131072U
388 * sd_zbc_check_zone_size - Check the device zone sizes
391 * Check that all zones of the device are equal. The last zone can however
392 * be smaller. The zone size must also be a power of two number of LBAs.
394 * Returns the zone size in bytes upon success or an error code upon failure.
396 static s64
sd_zbc_check_zone_size(struct scsi_disk
*sdkp
)
402 unsigned int buf_len
;
403 unsigned int list_length
;
408 buf
= kmalloc(SD_ZBC_BUF_SIZE
, GFP_KERNEL
);
412 /* Do a report zone to get the same field */
413 ret
= sd_zbc_report_zones(sdkp
, buf
, SD_ZBC_BUF_SIZE
, 0);
417 same
= buf
[4] & 0x0f;
420 zone_blocks
= get_unaligned_be64(&rec
[8]);
425 * Check the size of all zones: all zones must be of
426 * equal size, except the last zone which can be smaller
431 /* Parse REPORT ZONES header */
432 list_length
= get_unaligned_be32(&buf
[0]) + 64;
434 buf_len
= min(list_length
, SD_ZBC_BUF_SIZE
);
436 /* Parse zone descriptors */
437 while (rec
< buf
+ buf_len
) {
438 u64 this_zone_blocks
= get_unaligned_be64(&rec
[8]);
440 if (zone_blocks
== 0) {
441 zone_blocks
= this_zone_blocks
;
442 } else if (this_zone_blocks
!= zone_blocks
&&
443 (block
+ this_zone_blocks
< sdkp
->capacity
444 || this_zone_blocks
> zone_blocks
)) {
448 block
+= this_zone_blocks
;
452 if (block
< sdkp
->capacity
) {
453 ret
= sd_zbc_report_zones(sdkp
, buf
,
454 SD_ZBC_BUF_SIZE
, block
);
459 } while (block
< sdkp
->capacity
);
463 if (sdkp
->first_scan
)
464 sd_printk(KERN_NOTICE
, sdkp
,
465 "Devices with non constant zone "
466 "size are not supported\n");
468 } else if (!is_power_of_2(zone_blocks
)) {
469 if (sdkp
->first_scan
)
470 sd_printk(KERN_NOTICE
, sdkp
,
471 "Devices with non power of 2 zone "
472 "size are not supported\n");
474 } else if (logical_to_sectors(sdkp
->device
, zone_blocks
) > UINT_MAX
) {
475 if (sdkp
->first_scan
)
476 sd_printk(KERN_NOTICE
, sdkp
,
477 "Zone size too large\n");
490 * sd_zbc_alloc_zone_bitmap - Allocate a zone bitmap (one bit per zone).
491 * @nr_zones: Number of zones to allocate space for.
492 * @numa_node: NUMA node to allocate the memory from.
494 static inline unsigned long *
495 sd_zbc_alloc_zone_bitmap(u32 nr_zones
, int numa_node
)
497 return kcalloc_node(BITS_TO_LONGS(nr_zones
), sizeof(unsigned long),
498 GFP_KERNEL
, numa_node
);
502 * sd_zbc_get_seq_zones - Parse report zones reply to identify sequential zones
504 * @buf: report reply buffer
505 * @buflen: length of @buf
506 * @zone_shift: logarithm base 2 of the number of blocks in a zone
507 * @seq_zones_bitmap: bitmap of sequential zones to set
509 * Parse reported zone descriptors in @buf to identify sequential zones and
510 * set the reported zone bit in @seq_zones_bitmap accordingly.
511 * Since read-only and offline zones cannot be written, do not
512 * mark them as sequential in the bitmap.
513 * Return the LBA after the last zone reported.
515 static sector_t
sd_zbc_get_seq_zones(struct scsi_disk
*sdkp
, unsigned char *buf
,
516 unsigned int buflen
, u32 zone_shift
,
517 unsigned long *seq_zones_bitmap
)
519 sector_t lba
, next_lba
= sdkp
->capacity
;
520 unsigned int buf_len
, list_length
;
524 list_length
= get_unaligned_be32(&buf
[0]) + 64;
525 buf_len
= min(list_length
, buflen
);
528 while (rec
< buf
+ buf_len
) {
529 type
= rec
[0] & 0x0f;
530 cond
= (rec
[1] >> 4) & 0xf;
531 lba
= get_unaligned_be64(&rec
[16]);
532 if (type
!= ZBC_ZONE_TYPE_CONV
&&
533 cond
!= ZBC_ZONE_COND_READONLY
&&
534 cond
!= ZBC_ZONE_COND_OFFLINE
)
535 set_bit(lba
>> zone_shift
, seq_zones_bitmap
);
536 next_lba
= lba
+ get_unaligned_be64(&rec
[8]);
544 * sd_zbc_setup_seq_zones_bitmap - Initialize a seq zone bitmap.
546 * @zone_shift: logarithm base 2 of the number of blocks in a zone
547 * @nr_zones: number of zones to set up a seq zone bitmap for
549 * Allocate a zone bitmap and initialize it by identifying sequential zones.
551 static unsigned long *
552 sd_zbc_setup_seq_zones_bitmap(struct scsi_disk
*sdkp
, u32 zone_shift
,
555 struct request_queue
*q
= sdkp
->disk
->queue
;
556 unsigned long *seq_zones_bitmap
;
561 seq_zones_bitmap
= sd_zbc_alloc_zone_bitmap(nr_zones
, q
->node
);
562 if (!seq_zones_bitmap
)
563 return ERR_PTR(-ENOMEM
);
565 buf
= kmalloc(SD_ZBC_BUF_SIZE
, GFP_KERNEL
);
569 while (lba
< sdkp
->capacity
) {
570 ret
= sd_zbc_report_zones(sdkp
, buf
, SD_ZBC_BUF_SIZE
, lba
);
573 lba
= sd_zbc_get_seq_zones(sdkp
, buf
, SD_ZBC_BUF_SIZE
,
574 zone_shift
, seq_zones_bitmap
);
577 if (lba
!= sdkp
->capacity
) {
578 /* Something went wrong */
585 kfree(seq_zones_bitmap
);
588 return seq_zones_bitmap
;
591 static void sd_zbc_cleanup(struct scsi_disk
*sdkp
)
593 struct request_queue
*q
= sdkp
->disk
->queue
;
595 kfree(q
->seq_zones_bitmap
);
596 q
->seq_zones_bitmap
= NULL
;
598 kfree(q
->seq_zones_wlock
);
599 q
->seq_zones_wlock
= NULL
;
604 static int sd_zbc_setup(struct scsi_disk
*sdkp
, u32 zone_blocks
)
606 struct request_queue
*q
= sdkp
->disk
->queue
;
607 u32 zone_shift
= ilog2(zone_blocks
);
611 /* chunk_sectors indicates the zone size */
612 blk_queue_chunk_sectors(q
,
613 logical_to_sectors(sdkp
->device
, zone_blocks
));
614 nr_zones
= round_up(sdkp
->capacity
, zone_blocks
) >> zone_shift
;
617 * Initialize the device request queue information if the number
620 if (nr_zones
!= sdkp
->nr_zones
|| nr_zones
!= q
->nr_zones
) {
621 unsigned long *seq_zones_wlock
= NULL
, *seq_zones_bitmap
= NULL
;
622 size_t zone_bitmap_size
;
625 seq_zones_wlock
= sd_zbc_alloc_zone_bitmap(nr_zones
,
627 if (!seq_zones_wlock
) {
632 seq_zones_bitmap
= sd_zbc_setup_seq_zones_bitmap(sdkp
,
633 zone_shift
, nr_zones
);
634 if (IS_ERR(seq_zones_bitmap
)) {
635 ret
= PTR_ERR(seq_zones_bitmap
);
636 kfree(seq_zones_wlock
);
640 zone_bitmap_size
= BITS_TO_LONGS(nr_zones
) *
641 sizeof(unsigned long);
642 blk_mq_freeze_queue(q
);
643 if (q
->nr_zones
!= nr_zones
) {
644 /* READ16/WRITE16 is mandatory for ZBC disks */
645 sdkp
->device
->use_16_for_rw
= 1;
646 sdkp
->device
->use_10_for_rw
= 0;
648 sdkp
->zone_blocks
= zone_blocks
;
649 sdkp
->zone_shift
= zone_shift
;
650 sdkp
->nr_zones
= nr_zones
;
651 q
->nr_zones
= nr_zones
;
652 swap(q
->seq_zones_wlock
, seq_zones_wlock
);
653 swap(q
->seq_zones_bitmap
, seq_zones_bitmap
);
654 } else if (memcmp(q
->seq_zones_bitmap
, seq_zones_bitmap
,
655 zone_bitmap_size
) != 0) {
656 memcpy(q
->seq_zones_bitmap
, seq_zones_bitmap
,
659 blk_mq_unfreeze_queue(q
);
660 kfree(seq_zones_wlock
);
661 kfree(seq_zones_bitmap
);
667 sd_zbc_cleanup(sdkp
);
671 int sd_zbc_read_zones(struct scsi_disk
*sdkp
, unsigned char *buf
)
676 if (!sd_is_zoned(sdkp
))
678 * Device managed or normal SCSI disk,
679 * no special handling required
683 /* Get zoned block device characteristics */
684 ret
= sd_zbc_read_zoned_characteristics(sdkp
, buf
);
689 * Check for unconstrained reads: host-managed devices with
690 * constrained reads (drives failing read after write pointer)
694 if (sdkp
->first_scan
)
695 sd_printk(KERN_NOTICE
, sdkp
,
696 "constrained reads devices are not supported\n");
702 ret
= sd_zbc_check_capacity(sdkp
, buf
);
707 * Check zone size: only devices with a constant zone size (except
708 * an eventual last runt zone) that is a power of 2 are supported.
710 zone_blocks
= sd_zbc_check_zone_size(sdkp
);
712 if (zone_blocks
!= (u32
)zone_blocks
)
718 /* The drive satisfies the kernel restrictions: set it up */
719 ret
= sd_zbc_setup(sdkp
, zone_blocks
);
727 sd_zbc_cleanup(sdkp
);
732 void sd_zbc_remove(struct scsi_disk
*sdkp
)
734 sd_zbc_cleanup(sdkp
);
737 void sd_zbc_print_zones(struct scsi_disk
*sdkp
)
739 if (!sd_is_zoned(sdkp
) || !sdkp
->capacity
)
742 if (sdkp
->capacity
& (sdkp
->zone_blocks
- 1))
743 sd_printk(KERN_NOTICE
, sdkp
,
744 "%u zones of %u logical blocks + 1 runt zone\n",
748 sd_printk(KERN_NOTICE
, sdkp
,
749 "%u zones of %u logical blocks\n",