1 // SPDX-License-Identifier: GPL-2.0-only
3 * SCSI Zoned Block commands
5 * Copyright (C) 2014-2015 SUSE Linux GmbH
6 * Written by: Hannes Reinecke <hare@suse.de>
7 * Modified by: Damien Le Moal <damien.lemoal@hgst.com>
8 * Modified by: Shaun Tancheff <shaun.tancheff@seagate.com>
11 #include <linux/blkdev.h>
12 #include <linux/vmalloc.h>
13 #include <linux/sched/mm.h>
14 #include <linux/mutex.h>
16 #include <asm/unaligned.h>
18 #include <scsi/scsi.h>
19 #include <scsi/scsi_cmnd.h>
23 static unsigned int sd_zbc_get_zone_wp_offset(struct blk_zone
*zone
)
25 if (zone
->type
== ZBC_ZONE_TYPE_CONV
)
29 case BLK_ZONE_COND_IMP_OPEN
:
30 case BLK_ZONE_COND_EXP_OPEN
:
31 case BLK_ZONE_COND_CLOSED
:
32 return zone
->wp
- zone
->start
;
33 case BLK_ZONE_COND_FULL
:
35 case BLK_ZONE_COND_EMPTY
:
36 case BLK_ZONE_COND_OFFLINE
:
37 case BLK_ZONE_COND_READONLY
:
40 * Offline and read-only zones do not have a valid
41 * write pointer. Use 0 as for an empty zone.
47 static int sd_zbc_parse_report(struct scsi_disk
*sdkp
, u8
*buf
,
48 unsigned int idx
, report_zones_cb cb
, void *data
)
50 struct scsi_device
*sdp
= sdkp
->device
;
51 struct blk_zone zone
= { 0 };
54 zone
.type
= buf
[0] & 0x0f;
55 zone
.cond
= (buf
[1] >> 4) & 0xf;
61 zone
.len
= logical_to_sectors(sdp
, get_unaligned_be64(&buf
[8]));
62 zone
.capacity
= zone
.len
;
63 zone
.start
= logical_to_sectors(sdp
, get_unaligned_be64(&buf
[16]));
64 zone
.wp
= logical_to_sectors(sdp
, get_unaligned_be64(&buf
[24]));
65 if (zone
.type
!= ZBC_ZONE_TYPE_CONV
&&
66 zone
.cond
== ZBC_ZONE_COND_FULL
)
67 zone
.wp
= zone
.start
+ zone
.len
;
69 ret
= cb(&zone
, idx
, data
);
73 if (sdkp
->rev_wp_offset
)
74 sdkp
->rev_wp_offset
[idx
] = sd_zbc_get_zone_wp_offset(&zone
);
80 * sd_zbc_do_report_zones - Issue a REPORT ZONES scsi command.
81 * @sdkp: The target disk
82 * @buf: vmalloc-ed buffer to use for the reply
83 * @buflen: the buffer size
84 * @lba: Start LBA of the report
85 * @partial: Do partial report
87 * For internal use during device validation.
88 * Using partial=true can significantly speed up execution of a report zones
89 * command because the disk does not have to count all possible report matching
90 * zones and will only report the count of zones fitting in the command reply
93 static int sd_zbc_do_report_zones(struct scsi_disk
*sdkp
, unsigned char *buf
,
94 unsigned int buflen
, sector_t lba
,
97 struct scsi_device
*sdp
= sdkp
->device
;
98 const int timeout
= sdp
->request_queue
->rq_timeout
;
99 struct scsi_sense_hdr sshdr
;
100 unsigned char cmd
[16];
101 unsigned int rep_len
;
106 cmd
[1] = ZI_REPORT_ZONES
;
107 put_unaligned_be64(lba
, &cmd
[2]);
108 put_unaligned_be32(buflen
, &cmd
[10]);
110 cmd
[14] = ZBC_REPORT_ZONE_PARTIAL
;
112 result
= scsi_execute_req(sdp
, cmd
, DMA_FROM_DEVICE
,
114 timeout
, SD_MAX_RETRIES
, NULL
);
116 sd_printk(KERN_ERR
, sdkp
,
117 "REPORT ZONES start lba %llu failed\n", lba
);
118 sd_print_result(sdkp
, "REPORT ZONES", result
);
119 if (driver_byte(result
) == DRIVER_SENSE
&&
120 scsi_sense_valid(&sshdr
))
121 sd_print_sense_hdr(sdkp
, &sshdr
);
125 rep_len
= get_unaligned_be32(&buf
[0]);
127 sd_printk(KERN_ERR
, sdkp
,
128 "REPORT ZONES report invalid length %u\n",
137 * Allocate a buffer for report zones reply.
138 * @sdkp: The target disk
139 * @nr_zones: Maximum number of zones to report
140 * @buflen: Size of the buffer allocated
142 * Try to allocate a reply buffer for the number of requested zones.
143 * The size of the buffer allocated may be smaller than requested to
144 * satify the device constraint (max_hw_sectors, max_segments, etc).
146 * Return the address of the allocated buffer and update @buflen with
147 * the size of the allocated buffer.
149 static void *sd_zbc_alloc_report_buffer(struct scsi_disk
*sdkp
,
150 unsigned int nr_zones
, size_t *buflen
)
152 struct request_queue
*q
= sdkp
->disk
->queue
;
157 * Report zone buffer size should be at most 64B times the number of
158 * zones requested plus the 64B reply header, but should be at least
159 * SECTOR_SIZE for ATA devices.
160 * Make sure that this size does not exceed the hardware capabilities.
161 * Furthermore, since the report zone command cannot be split, make
162 * sure that the allocated buffer can always be mapped by limiting the
163 * number of pages allocated to the HBA max segments limit.
165 nr_zones
= min(nr_zones
, sdkp
->nr_zones
);
166 bufsize
= roundup((nr_zones
+ 1) * 64, SECTOR_SIZE
);
167 bufsize
= min_t(size_t, bufsize
,
168 queue_max_hw_sectors(q
) << SECTOR_SHIFT
);
169 bufsize
= min_t(size_t, bufsize
, queue_max_segments(q
) << PAGE_SHIFT
);
171 while (bufsize
>= SECTOR_SIZE
) {
172 buf
= __vmalloc(bufsize
,
173 GFP_KERNEL
| __GFP_ZERO
| __GFP_NORETRY
);
185 * sd_zbc_zone_sectors - Get the device zone size in number of 512B sectors.
186 * @sdkp: The target disk
188 static inline sector_t
sd_zbc_zone_sectors(struct scsi_disk
*sdkp
)
190 return logical_to_sectors(sdkp
->device
, sdkp
->zone_blocks
);
193 int sd_zbc_report_zones(struct gendisk
*disk
, sector_t sector
,
194 unsigned int nr_zones
, report_zones_cb cb
, void *data
)
196 struct scsi_disk
*sdkp
= scsi_disk(disk
);
197 sector_t capacity
= logical_to_sectors(sdkp
->device
, sdkp
->capacity
);
200 size_t offset
, buflen
= 0;
204 if (!sd_is_zoned(sdkp
))
205 /* Not a zoned device */
209 /* Device gone or invalid */
212 buf
= sd_zbc_alloc_report_buffer(sdkp
, nr_zones
, &buflen
);
216 while (zone_idx
< nr_zones
&& sector
< capacity
) {
217 ret
= sd_zbc_do_report_zones(sdkp
, buf
, buflen
,
218 sectors_to_logical(sdkp
->device
, sector
), true);
223 nr
= min(nr_zones
, get_unaligned_be32(&buf
[0]) / 64);
227 for (i
= 0; i
< nr
&& zone_idx
< nr_zones
; i
++) {
229 ret
= sd_zbc_parse_report(sdkp
, buf
+ offset
, zone_idx
,
236 sector
+= sd_zbc_zone_sectors(sdkp
) * i
;
245 static blk_status_t
sd_zbc_cmnd_checks(struct scsi_cmnd
*cmd
)
247 struct request
*rq
= cmd
->request
;
248 struct scsi_disk
*sdkp
= scsi_disk(rq
->rq_disk
);
249 sector_t sector
= blk_rq_pos(rq
);
251 if (!sd_is_zoned(sdkp
))
252 /* Not a zoned device */
253 return BLK_STS_IOERR
;
255 if (sdkp
->device
->changed
)
256 return BLK_STS_IOERR
;
258 if (sector
& (sd_zbc_zone_sectors(sdkp
) - 1))
259 /* Unaligned request */
260 return BLK_STS_IOERR
;
265 #define SD_ZBC_INVALID_WP_OFST (~0u)
266 #define SD_ZBC_UPDATING_WP_OFST (SD_ZBC_INVALID_WP_OFST - 1)
268 static int sd_zbc_update_wp_offset_cb(struct blk_zone
*zone
, unsigned int idx
,
271 struct scsi_disk
*sdkp
= data
;
273 lockdep_assert_held(&sdkp
->zones_wp_offset_lock
);
275 sdkp
->zones_wp_offset
[idx
] = sd_zbc_get_zone_wp_offset(zone
);
280 static void sd_zbc_update_wp_offset_workfn(struct work_struct
*work
)
282 struct scsi_disk
*sdkp
;
286 sdkp
= container_of(work
, struct scsi_disk
, zone_wp_offset_work
);
288 spin_lock_bh(&sdkp
->zones_wp_offset_lock
);
289 for (zno
= 0; zno
< sdkp
->nr_zones
; zno
++) {
290 if (sdkp
->zones_wp_offset
[zno
] != SD_ZBC_UPDATING_WP_OFST
)
293 spin_unlock_bh(&sdkp
->zones_wp_offset_lock
);
294 ret
= sd_zbc_do_report_zones(sdkp
, sdkp
->zone_wp_update_buf
,
296 zno
* sdkp
->zone_blocks
, true);
297 spin_lock_bh(&sdkp
->zones_wp_offset_lock
);
299 sd_zbc_parse_report(sdkp
, sdkp
->zone_wp_update_buf
+ 64,
300 zno
, sd_zbc_update_wp_offset_cb
,
303 spin_unlock_bh(&sdkp
->zones_wp_offset_lock
);
305 scsi_device_put(sdkp
->device
);
309 * sd_zbc_prepare_zone_append() - Prepare an emulated ZONE_APPEND command.
310 * @cmd: the command to setup
311 * @lba: the LBA to patch
312 * @nr_blocks: the number of LBAs to be written
314 * Called from sd_setup_read_write_cmnd() for REQ_OP_ZONE_APPEND.
315 * @sd_zbc_prepare_zone_append() handles the necessary zone wrote locking and
316 * patching of the lba for an emulated ZONE_APPEND command.
318 * In case the cached write pointer offset is %SD_ZBC_INVALID_WP_OFST it will
319 * schedule a REPORT ZONES command and return BLK_STS_IOERR.
321 blk_status_t
sd_zbc_prepare_zone_append(struct scsi_cmnd
*cmd
, sector_t
*lba
,
322 unsigned int nr_blocks
)
324 struct request
*rq
= cmd
->request
;
325 struct scsi_disk
*sdkp
= scsi_disk(rq
->rq_disk
);
326 unsigned int wp_offset
, zno
= blk_rq_zone_no(rq
);
329 ret
= sd_zbc_cmnd_checks(cmd
);
330 if (ret
!= BLK_STS_OK
)
333 if (!blk_rq_zone_is_seq(rq
))
334 return BLK_STS_IOERR
;
336 /* Unlock of the write lock will happen in sd_zbc_complete() */
337 if (!blk_req_zone_write_trylock(rq
))
338 return BLK_STS_ZONE_RESOURCE
;
340 spin_lock_bh(&sdkp
->zones_wp_offset_lock
);
341 wp_offset
= sdkp
->zones_wp_offset
[zno
];
343 case SD_ZBC_INVALID_WP_OFST
:
345 * We are about to schedule work to update a zone write pointer
346 * offset, which will cause the zone append command to be
347 * requeued. So make sure that the scsi device does not go away
348 * while the work is being processed.
350 if (scsi_device_get(sdkp
->device
)) {
354 sdkp
->zones_wp_offset
[zno
] = SD_ZBC_UPDATING_WP_OFST
;
355 schedule_work(&sdkp
->zone_wp_offset_work
);
357 case SD_ZBC_UPDATING_WP_OFST
:
358 ret
= BLK_STS_DEV_RESOURCE
;
361 wp_offset
= sectors_to_logical(sdkp
->device
, wp_offset
);
362 if (wp_offset
+ nr_blocks
> sdkp
->zone_blocks
) {
369 spin_unlock_bh(&sdkp
->zones_wp_offset_lock
);
371 blk_req_zone_write_unlock(rq
);
376 * sd_zbc_setup_zone_mgmt_cmnd - Prepare a zone ZBC_OUT command. The operations
377 * can be RESET WRITE POINTER, OPEN, CLOSE or FINISH.
378 * @cmd: the command to setup
379 * @op: Operation to be performed
380 * @all: All zones control
382 * Called from sd_init_command() for REQ_OP_ZONE_RESET, REQ_OP_ZONE_RESET_ALL,
383 * REQ_OP_ZONE_OPEN, REQ_OP_ZONE_CLOSE or REQ_OP_ZONE_FINISH requests.
385 blk_status_t
sd_zbc_setup_zone_mgmt_cmnd(struct scsi_cmnd
*cmd
,
386 unsigned char op
, bool all
)
388 struct request
*rq
= cmd
->request
;
389 sector_t sector
= blk_rq_pos(rq
);
390 struct scsi_disk
*sdkp
= scsi_disk(rq
->rq_disk
);
391 sector_t block
= sectors_to_logical(sdkp
->device
, sector
);
394 ret
= sd_zbc_cmnd_checks(cmd
);
395 if (ret
!= BLK_STS_OK
)
399 memset(cmd
->cmnd
, 0, cmd
->cmd_len
);
400 cmd
->cmnd
[0] = ZBC_OUT
;
405 put_unaligned_be64(block
, &cmd
->cmnd
[2]);
407 rq
->timeout
= SD_TIMEOUT
;
408 cmd
->sc_data_direction
= DMA_NONE
;
409 cmd
->transfersize
= 0;
415 static bool sd_zbc_need_zone_wp_update(struct request
*rq
)
417 switch (req_op(rq
)) {
418 case REQ_OP_ZONE_APPEND
:
419 case REQ_OP_ZONE_FINISH
:
420 case REQ_OP_ZONE_RESET
:
421 case REQ_OP_ZONE_RESET_ALL
:
424 case REQ_OP_WRITE_ZEROES
:
425 case REQ_OP_WRITE_SAME
:
426 return blk_rq_zone_is_seq(rq
);
433 * sd_zbc_zone_wp_update - Update cached zone write pointer upon cmd completion
434 * @cmd: Completed command
435 * @good_bytes: Command reply bytes
437 * Called from sd_zbc_complete() to handle the update of the cached zone write
438 * pointer value in case an update is needed.
440 static unsigned int sd_zbc_zone_wp_update(struct scsi_cmnd
*cmd
,
441 unsigned int good_bytes
)
443 int result
= cmd
->result
;
444 struct request
*rq
= cmd
->request
;
445 struct scsi_disk
*sdkp
= scsi_disk(rq
->rq_disk
);
446 unsigned int zno
= blk_rq_zone_no(rq
);
447 enum req_opf op
= req_op(rq
);
450 * If we got an error for a command that needs updating the write
451 * pointer offset cache, we must mark the zone wp offset entry as
452 * invalid to force an update from disk the next time a zone append
455 spin_lock_bh(&sdkp
->zones_wp_offset_lock
);
457 if (result
&& op
!= REQ_OP_ZONE_RESET_ALL
) {
458 if (op
== REQ_OP_ZONE_APPEND
) {
459 /* Force complete completion (no retry) */
461 scsi_set_resid(cmd
, blk_rq_bytes(rq
));
465 * Force an update of the zone write pointer offset on
466 * the next zone append access.
468 if (sdkp
->zones_wp_offset
[zno
] != SD_ZBC_UPDATING_WP_OFST
)
469 sdkp
->zones_wp_offset
[zno
] = SD_ZBC_INVALID_WP_OFST
;
470 goto unlock_wp_offset
;
474 case REQ_OP_ZONE_APPEND
:
475 rq
->__sector
+= sdkp
->zones_wp_offset
[zno
];
477 case REQ_OP_WRITE_ZEROES
:
478 case REQ_OP_WRITE_SAME
:
480 if (sdkp
->zones_wp_offset
[zno
] < sd_zbc_zone_sectors(sdkp
))
481 sdkp
->zones_wp_offset
[zno
] +=
482 good_bytes
>> SECTOR_SHIFT
;
484 case REQ_OP_ZONE_RESET
:
485 sdkp
->zones_wp_offset
[zno
] = 0;
487 case REQ_OP_ZONE_FINISH
:
488 sdkp
->zones_wp_offset
[zno
] = sd_zbc_zone_sectors(sdkp
);
490 case REQ_OP_ZONE_RESET_ALL
:
491 memset(sdkp
->zones_wp_offset
, 0,
492 sdkp
->nr_zones
* sizeof(unsigned int));
499 spin_unlock_bh(&sdkp
->zones_wp_offset_lock
);
505 * sd_zbc_complete - ZBC command post processing.
506 * @cmd: Completed command
507 * @good_bytes: Command reply bytes
508 * @sshdr: command sense header
510 * Called from sd_done() to handle zone commands errors and updates to the
511 * device queue zone write pointer offset cahce.
513 unsigned int sd_zbc_complete(struct scsi_cmnd
*cmd
, unsigned int good_bytes
,
514 struct scsi_sense_hdr
*sshdr
)
516 int result
= cmd
->result
;
517 struct request
*rq
= cmd
->request
;
519 if (op_is_zone_mgmt(req_op(rq
)) &&
521 sshdr
->sense_key
== ILLEGAL_REQUEST
&&
522 sshdr
->asc
== 0x24) {
524 * INVALID FIELD IN CDB error: a zone management command was
525 * attempted on a conventional zone. Nothing to worry about,
526 * so be quiet about the error.
528 rq
->rq_flags
|= RQF_QUIET
;
529 } else if (sd_zbc_need_zone_wp_update(rq
))
530 good_bytes
= sd_zbc_zone_wp_update(cmd
, good_bytes
);
532 if (req_op(rq
) == REQ_OP_ZONE_APPEND
)
533 blk_req_zone_write_unlock(rq
);
539 * sd_zbc_check_zoned_characteristics - Check zoned block device characteristics
541 * @buf: Buffer where to store the VPD page data
543 * Read VPD page B6, get information and check that reads are unconstrained.
545 static int sd_zbc_check_zoned_characteristics(struct scsi_disk
*sdkp
,
549 if (scsi_get_vpd_page(sdkp
->device
, 0xb6, buf
, 64)) {
550 sd_printk(KERN_NOTICE
, sdkp
,
551 "Read zoned characteristics VPD page failed\n");
555 if (sdkp
->device
->type
!= TYPE_ZBC
) {
558 sdkp
->zones_optimal_open
= get_unaligned_be32(&buf
[8]);
559 sdkp
->zones_optimal_nonseq
= get_unaligned_be32(&buf
[12]);
560 sdkp
->zones_max_open
= 0;
563 sdkp
->urswrz
= buf
[4] & 1;
564 sdkp
->zones_optimal_open
= 0;
565 sdkp
->zones_optimal_nonseq
= 0;
566 sdkp
->zones_max_open
= get_unaligned_be32(&buf
[16]);
570 * Check for unconstrained reads: host-managed devices with
571 * constrained reads (drives failing read after write pointer)
575 if (sdkp
->first_scan
)
576 sd_printk(KERN_NOTICE
, sdkp
,
577 "constrained reads devices are not supported\n");
585 * sd_zbc_check_capacity - Check the device capacity
587 * @buf: command buffer
588 * @zblocks: zone size in number of blocks
590 * Get the device zone size and check that the device capacity as reported
591 * by READ CAPACITY matches the max_lba value (plus one) of the report zones
592 * command reply for devices with RC_BASIS == 0.
594 * Returns 0 upon success or an error code upon failure.
596 static int sd_zbc_check_capacity(struct scsi_disk
*sdkp
, unsigned char *buf
,
604 /* Do a report zone to get max_lba and the size of the first zone */
605 ret
= sd_zbc_do_report_zones(sdkp
, buf
, SD_BUF_SIZE
, 0, false);
609 if (sdkp
->rc_basis
== 0) {
610 /* The max_lba field is the capacity of this device */
611 max_lba
= get_unaligned_be64(&buf
[8]);
612 if (sdkp
->capacity
!= max_lba
+ 1) {
613 if (sdkp
->first_scan
)
614 sd_printk(KERN_WARNING
, sdkp
,
615 "Changing capacity from %llu to max LBA+1 %llu\n",
616 (unsigned long long)sdkp
->capacity
,
617 (unsigned long long)max_lba
+ 1);
618 sdkp
->capacity
= max_lba
+ 1;
622 /* Get the size of the first reported zone */
624 zone_blocks
= get_unaligned_be64(&rec
[8]);
625 if (logical_to_sectors(sdkp
->device
, zone_blocks
) > UINT_MAX
) {
626 if (sdkp
->first_scan
)
627 sd_printk(KERN_NOTICE
, sdkp
,
628 "Zone size too large\n");
632 *zblocks
= zone_blocks
;
637 static void sd_zbc_print_zones(struct scsi_disk
*sdkp
)
639 if (!sd_is_zoned(sdkp
) || !sdkp
->capacity
)
642 if (sdkp
->capacity
& (sdkp
->zone_blocks
- 1))
643 sd_printk(KERN_NOTICE
, sdkp
,
644 "%u zones of %u logical blocks + 1 runt zone\n",
648 sd_printk(KERN_NOTICE
, sdkp
,
649 "%u zones of %u logical blocks\n",
654 static int sd_zbc_init_disk(struct scsi_disk
*sdkp
)
656 sdkp
->zones_wp_offset
= NULL
;
657 spin_lock_init(&sdkp
->zones_wp_offset_lock
);
658 sdkp
->rev_wp_offset
= NULL
;
659 mutex_init(&sdkp
->rev_mutex
);
660 INIT_WORK(&sdkp
->zone_wp_offset_work
, sd_zbc_update_wp_offset_workfn
);
661 sdkp
->zone_wp_update_buf
= kzalloc(SD_BUF_SIZE
, GFP_KERNEL
);
662 if (!sdkp
->zone_wp_update_buf
)
668 void sd_zbc_release_disk(struct scsi_disk
*sdkp
)
670 kvfree(sdkp
->zones_wp_offset
);
671 sdkp
->zones_wp_offset
= NULL
;
672 kfree(sdkp
->zone_wp_update_buf
);
673 sdkp
->zone_wp_update_buf
= NULL
;
676 static void sd_zbc_revalidate_zones_cb(struct gendisk
*disk
)
678 struct scsi_disk
*sdkp
= scsi_disk(disk
);
680 swap(sdkp
->zones_wp_offset
, sdkp
->rev_wp_offset
);
683 int sd_zbc_revalidate_zones(struct scsi_disk
*sdkp
)
685 struct gendisk
*disk
= sdkp
->disk
;
686 struct request_queue
*q
= disk
->queue
;
687 u32 zone_blocks
= sdkp
->rev_zone_blocks
;
688 unsigned int nr_zones
= sdkp
->rev_nr_zones
;
693 * For all zoned disks, initialize zone append emulation data if not
694 * already done. This is necessary also for host-aware disks used as
695 * regular disks due to the presence of partitions as these partitions
696 * may be deleted and the disk zoned model changed back from
697 * BLK_ZONED_NONE to BLK_ZONED_HA.
699 if (sd_is_zoned(sdkp
) && !sdkp
->zone_wp_update_buf
) {
700 ret
= sd_zbc_init_disk(sdkp
);
706 * There is nothing to do for regular disks, including host-aware disks
707 * that have partitions.
709 if (!blk_queue_is_zoned(q
))
713 * Make sure revalidate zones are serialized to ensure exclusive
714 * updates of the scsi disk data.
716 mutex_lock(&sdkp
->rev_mutex
);
718 if (sdkp
->zone_blocks
== zone_blocks
&&
719 sdkp
->nr_zones
== nr_zones
&&
720 disk
->queue
->nr_zones
== nr_zones
)
723 sdkp
->zone_blocks
= zone_blocks
;
724 sdkp
->nr_zones
= nr_zones
;
725 sdkp
->rev_wp_offset
= kvcalloc(nr_zones
, sizeof(u32
), GFP_NOIO
);
726 if (!sdkp
->rev_wp_offset
) {
731 ret
= blk_revalidate_disk_zones(disk
, sd_zbc_revalidate_zones_cb
);
733 kvfree(sdkp
->rev_wp_offset
);
734 sdkp
->rev_wp_offset
= NULL
;
737 sdkp
->zone_blocks
= 0;
743 max_append
= min_t(u32
, logical_to_sectors(sdkp
->device
, zone_blocks
),
744 q
->limits
.max_segments
<< (PAGE_SHIFT
- 9));
745 max_append
= min_t(u32
, max_append
, queue_max_hw_sectors(q
));
747 blk_queue_max_zone_append_sectors(q
, max_append
);
749 sd_zbc_print_zones(sdkp
);
752 mutex_unlock(&sdkp
->rev_mutex
);
757 int sd_zbc_read_zones(struct scsi_disk
*sdkp
, unsigned char *buf
)
759 struct gendisk
*disk
= sdkp
->disk
;
760 struct request_queue
*q
= disk
->queue
;
761 unsigned int nr_zones
;
765 if (!sd_is_zoned(sdkp
))
767 * Device managed or normal SCSI disk,
768 * no special handling required
772 /* Check zoned block device characteristics (unconstrained reads) */
773 ret
= sd_zbc_check_zoned_characteristics(sdkp
, buf
);
777 /* Check the device capacity reported by report zones */
778 ret
= sd_zbc_check_capacity(sdkp
, buf
, &zone_blocks
);
782 /* The drive satisfies the kernel restrictions: set it up */
783 blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL
, q
);
784 blk_queue_required_elevator_features(q
, ELEVATOR_F_ZBD_SEQ_WRITE
);
785 if (sdkp
->zones_max_open
== U32_MAX
)
786 blk_queue_max_open_zones(q
, 0);
788 blk_queue_max_open_zones(q
, sdkp
->zones_max_open
);
789 blk_queue_max_active_zones(q
, 0);
790 nr_zones
= round_up(sdkp
->capacity
, zone_blocks
) >> ilog2(zone_blocks
);
792 /* READ16/WRITE16 is mandatory for ZBC disks */
793 sdkp
->device
->use_16_for_rw
= 1;
794 sdkp
->device
->use_10_for_rw
= 0;
796 sdkp
->rev_nr_zones
= nr_zones
;
797 sdkp
->rev_zone_blocks
= zone_blocks
;