1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/capability.h>
3 #include <linux/compat.h>
4 #include <linux/blkdev.h>
5 #include <linux/export.h>
7 #include <linux/blkpg.h>
8 #include <linux/hdreg.h>
9 #include <linux/backing-dev.h>
11 #include <linux/blktrace_api.h>
13 #include <linux/uaccess.h>
14 #include <linux/pagemap.h>
15 #include <linux/io_uring/cmd.h>
16 #include <uapi/linux/blkdev.h>
19 static int blkpg_do_ioctl(struct block_device
*bdev
,
20 struct blkpg_partition __user
*upart
, int op
)
22 struct gendisk
*disk
= bdev
->bd_disk
;
23 struct blkpg_partition p
;
24 sector_t start
, length
, capacity
, end
;
26 if (!capable(CAP_SYS_ADMIN
))
28 if (copy_from_user(&p
, upart
, sizeof(struct blkpg_partition
)))
30 if (bdev_is_partition(bdev
))
36 if (op
== BLKPG_DEL_PARTITION
)
37 return bdev_del_partition(disk
, p
.pno
);
39 if (p
.start
< 0 || p
.length
<= 0 || LLONG_MAX
- p
.length
< p
.start
)
41 /* Check that the partition is aligned to the block size */
42 if (!IS_ALIGNED(p
.start
| p
.length
, bdev_logical_block_size(bdev
)))
45 start
= p
.start
>> SECTOR_SHIFT
;
46 length
= p
.length
>> SECTOR_SHIFT
;
47 capacity
= get_capacity(disk
);
49 if (check_add_overflow(start
, length
, &end
))
52 if (start
>= capacity
|| end
> capacity
)
56 case BLKPG_ADD_PARTITION
:
57 return bdev_add_partition(disk
, p
.pno
, start
, length
);
58 case BLKPG_RESIZE_PARTITION
:
59 return bdev_resize_partition(disk
, p
.pno
, start
, length
);
65 static int blkpg_ioctl(struct block_device
*bdev
,
66 struct blkpg_ioctl_arg __user
*arg
)
68 struct blkpg_partition __user
*udata
;
71 if (get_user(op
, &arg
->op
) || get_user(udata
, &arg
->data
))
74 return blkpg_do_ioctl(bdev
, udata
, op
);
78 struct compat_blkpg_ioctl_arg
{
85 static int compat_blkpg_ioctl(struct block_device
*bdev
,
86 struct compat_blkpg_ioctl_arg __user
*arg
)
91 if (get_user(op
, &arg
->op
) || get_user(udata
, &arg
->data
))
94 return blkpg_do_ioctl(bdev
, compat_ptr(udata
), op
);
99 * Check that [start, start + len) is a valid range from the block device's
100 * perspective, including verifying that it can be correctly translated into
101 * logical block addresses.
103 static int blk_validate_byte_range(struct block_device
*bdev
,
104 uint64_t start
, uint64_t len
)
106 unsigned int bs_mask
= bdev_logical_block_size(bdev
) - 1;
109 if ((start
| len
) & bs_mask
)
113 if (check_add_overflow(start
, len
, &end
) || end
> bdev_nr_bytes(bdev
))
119 static int blk_ioctl_discard(struct block_device
*bdev
, blk_mode_t mode
,
122 uint64_t range
[2], start
, len
;
123 struct bio
*prev
= NULL
, *bio
;
124 sector_t sector
, nr_sects
;
125 struct blk_plug plug
;
128 if (copy_from_user(range
, (void __user
*)arg
, sizeof(range
)))
133 if (!bdev_max_discard_sectors(bdev
))
136 if (!(mode
& BLK_OPEN_WRITE
))
138 if (bdev_read_only(bdev
))
140 err
= blk_validate_byte_range(bdev
, start
, len
);
144 filemap_invalidate_lock(bdev
->bd_mapping
);
145 err
= truncate_bdev_range(bdev
, mode
, start
, start
+ len
- 1);
149 sector
= start
>> SECTOR_SHIFT
;
150 nr_sects
= len
>> SECTOR_SHIFT
;
152 blk_start_plug(&plug
);
154 if (fatal_signal_pending(current
)) {
156 bio_await_chain(prev
);
160 bio
= blk_alloc_discard_bio(bdev
, §or
, &nr_sects
,
164 prev
= bio_chain_and_submit(prev
, bio
);
167 err
= submit_bio_wait(prev
);
168 if (err
== -EOPNOTSUPP
)
173 blk_finish_plug(&plug
);
175 filemap_invalidate_unlock(bdev
->bd_mapping
);
179 static int blk_ioctl_secure_erase(struct block_device
*bdev
, blk_mode_t mode
,
182 uint64_t start
, len
, end
;
186 if (!(mode
& BLK_OPEN_WRITE
))
188 if (!bdev_max_secure_erase_sectors(bdev
))
190 if (copy_from_user(range
, argp
, sizeof(range
)))
195 if ((start
& 511) || (len
& 511))
197 if (check_add_overflow(start
, len
, &end
) ||
198 end
> bdev_nr_bytes(bdev
))
201 filemap_invalidate_lock(bdev
->bd_mapping
);
202 err
= truncate_bdev_range(bdev
, mode
, start
, end
- 1);
204 err
= blkdev_issue_secure_erase(bdev
, start
>> 9, len
>> 9,
206 filemap_invalidate_unlock(bdev
->bd_mapping
);
211 static int blk_ioctl_zeroout(struct block_device
*bdev
, blk_mode_t mode
,
215 uint64_t start
, end
, len
;
218 if (!(mode
& BLK_OPEN_WRITE
))
221 if (copy_from_user(range
, (void __user
*)arg
, sizeof(range
)))
226 end
= start
+ len
- 1;
232 if (end
>= (uint64_t)bdev_nr_bytes(bdev
))
237 /* Invalidate the page cache, including dirty pages */
238 filemap_invalidate_lock(bdev
->bd_mapping
);
239 err
= truncate_bdev_range(bdev
, mode
, start
, end
);
243 err
= blkdev_issue_zeroout(bdev
, start
>> 9, len
>> 9, GFP_KERNEL
,
244 BLKDEV_ZERO_NOUNMAP
| BLKDEV_ZERO_KILLABLE
);
247 filemap_invalidate_unlock(bdev
->bd_mapping
);
251 static int put_ushort(unsigned short __user
*argp
, unsigned short val
)
253 return put_user(val
, argp
);
256 static int put_int(int __user
*argp
, int val
)
258 return put_user(val
, argp
);
261 static int put_uint(unsigned int __user
*argp
, unsigned int val
)
263 return put_user(val
, argp
);
266 static int put_long(long __user
*argp
, long val
)
268 return put_user(val
, argp
);
271 static int put_ulong(unsigned long __user
*argp
, unsigned long val
)
273 return put_user(val
, argp
);
276 static int put_u64(u64 __user
*argp
, u64 val
)
278 return put_user(val
, argp
);
282 static int compat_put_long(compat_long_t __user
*argp
, long val
)
284 return put_user(val
, argp
);
287 static int compat_put_ulong(compat_ulong_t __user
*argp
, compat_ulong_t val
)
289 return put_user(val
, argp
);
295 * This is the equivalent of compat_ptr_ioctl(), to be used by block
296 * drivers that implement only commands that are completely compatible
297 * between 32-bit and 64-bit user space
299 int blkdev_compat_ptr_ioctl(struct block_device
*bdev
, blk_mode_t mode
,
300 unsigned cmd
, unsigned long arg
)
302 struct gendisk
*disk
= bdev
->bd_disk
;
304 if (disk
->fops
->ioctl
)
305 return disk
->fops
->ioctl(bdev
, mode
, cmd
,
306 (unsigned long)compat_ptr(arg
));
310 EXPORT_SYMBOL(blkdev_compat_ptr_ioctl
);
313 static bool blkdev_pr_allowed(struct block_device
*bdev
, blk_mode_t mode
)
315 /* no sense to make reservations for partitions */
316 if (bdev_is_partition(bdev
))
319 if (capable(CAP_SYS_ADMIN
))
322 * Only allow unprivileged reservations if the file descriptor is open
325 return mode
& BLK_OPEN_WRITE
;
328 static int blkdev_pr_register(struct block_device
*bdev
, blk_mode_t mode
,
329 struct pr_registration __user
*arg
)
331 const struct pr_ops
*ops
= bdev
->bd_disk
->fops
->pr_ops
;
332 struct pr_registration reg
;
334 if (!blkdev_pr_allowed(bdev
, mode
))
336 if (!ops
|| !ops
->pr_register
)
338 if (copy_from_user(®
, arg
, sizeof(reg
)))
341 if (reg
.flags
& ~PR_FL_IGNORE_KEY
)
343 return ops
->pr_register(bdev
, reg
.old_key
, reg
.new_key
, reg
.flags
);
346 static int blkdev_pr_reserve(struct block_device
*bdev
, blk_mode_t mode
,
347 struct pr_reservation __user
*arg
)
349 const struct pr_ops
*ops
= bdev
->bd_disk
->fops
->pr_ops
;
350 struct pr_reservation rsv
;
352 if (!blkdev_pr_allowed(bdev
, mode
))
354 if (!ops
|| !ops
->pr_reserve
)
356 if (copy_from_user(&rsv
, arg
, sizeof(rsv
)))
359 if (rsv
.flags
& ~PR_FL_IGNORE_KEY
)
361 return ops
->pr_reserve(bdev
, rsv
.key
, rsv
.type
, rsv
.flags
);
364 static int blkdev_pr_release(struct block_device
*bdev
, blk_mode_t mode
,
365 struct pr_reservation __user
*arg
)
367 const struct pr_ops
*ops
= bdev
->bd_disk
->fops
->pr_ops
;
368 struct pr_reservation rsv
;
370 if (!blkdev_pr_allowed(bdev
, mode
))
372 if (!ops
|| !ops
->pr_release
)
374 if (copy_from_user(&rsv
, arg
, sizeof(rsv
)))
379 return ops
->pr_release(bdev
, rsv
.key
, rsv
.type
);
382 static int blkdev_pr_preempt(struct block_device
*bdev
, blk_mode_t mode
,
383 struct pr_preempt __user
*arg
, bool abort
)
385 const struct pr_ops
*ops
= bdev
->bd_disk
->fops
->pr_ops
;
388 if (!blkdev_pr_allowed(bdev
, mode
))
390 if (!ops
|| !ops
->pr_preempt
)
392 if (copy_from_user(&p
, arg
, sizeof(p
)))
397 return ops
->pr_preempt(bdev
, p
.old_key
, p
.new_key
, p
.type
, abort
);
400 static int blkdev_pr_clear(struct block_device
*bdev
, blk_mode_t mode
,
401 struct pr_clear __user
*arg
)
403 const struct pr_ops
*ops
= bdev
->bd_disk
->fops
->pr_ops
;
406 if (!blkdev_pr_allowed(bdev
, mode
))
408 if (!ops
|| !ops
->pr_clear
)
410 if (copy_from_user(&c
, arg
, sizeof(c
)))
415 return ops
->pr_clear(bdev
, c
.key
);
418 static int blkdev_flushbuf(struct block_device
*bdev
, unsigned cmd
,
421 if (!capable(CAP_SYS_ADMIN
))
424 mutex_lock(&bdev
->bd_holder_lock
);
425 if (bdev
->bd_holder_ops
&& bdev
->bd_holder_ops
->sync
)
426 bdev
->bd_holder_ops
->sync(bdev
);
428 mutex_unlock(&bdev
->bd_holder_lock
);
432 invalidate_bdev(bdev
);
436 static int blkdev_roset(struct block_device
*bdev
, unsigned cmd
,
441 if (!capable(CAP_SYS_ADMIN
))
444 if (get_user(n
, (int __user
*)arg
))
446 if (bdev
->bd_disk
->fops
->set_read_only
) {
447 ret
= bdev
->bd_disk
->fops
->set_read_only(bdev
, n
);
452 bdev_set_flag(bdev
, BD_READ_ONLY
);
454 bdev_clear_flag(bdev
, BD_READ_ONLY
);
458 static int blkdev_getgeo(struct block_device
*bdev
,
459 struct hd_geometry __user
*argp
)
461 struct gendisk
*disk
= bdev
->bd_disk
;
462 struct hd_geometry geo
;
467 if (!disk
->fops
->getgeo
)
471 * We need to set the startsect first, the driver may
472 * want to override it.
474 memset(&geo
, 0, sizeof(geo
));
475 geo
.start
= get_start_sect(bdev
);
476 ret
= disk
->fops
->getgeo(bdev
, &geo
);
479 if (copy_to_user(argp
, &geo
, sizeof(geo
)))
485 struct compat_hd_geometry
{
487 unsigned char sectors
;
488 unsigned short cylinders
;
492 static int compat_hdio_getgeo(struct block_device
*bdev
,
493 struct compat_hd_geometry __user
*ugeo
)
495 struct gendisk
*disk
= bdev
->bd_disk
;
496 struct hd_geometry geo
;
501 if (!disk
->fops
->getgeo
)
504 memset(&geo
, 0, sizeof(geo
));
506 * We need to set the startsect first, the driver may
507 * want to override it.
509 geo
.start
= get_start_sect(bdev
);
510 ret
= disk
->fops
->getgeo(bdev
, &geo
);
514 ret
= copy_to_user(ugeo
, &geo
, 4);
515 ret
|= put_user(geo
.start
, &ugeo
->start
);
523 /* set the logical block size */
524 static int blkdev_bszset(struct file
*file
, blk_mode_t mode
,
527 // this one might be file_inode(file)->i_rdev - a rare valid
528 // use of file_inode() for those.
529 dev_t dev
= I_BDEV(file
->f_mapping
->host
)->bd_dev
;
530 struct file
*excl_file
;
533 if (!capable(CAP_SYS_ADMIN
))
537 if (get_user(n
, argp
))
540 if (mode
& BLK_OPEN_EXCL
)
541 return set_blocksize(file
, n
);
543 excl_file
= bdev_file_open_by_dev(dev
, mode
, &dev
, NULL
);
544 if (IS_ERR(excl_file
))
546 ret
= set_blocksize(excl_file
, n
);
552 * Common commands that are handled the same way on native and compat
553 * user space. Note the separate arg/argp parameters that are needed
554 * to deal with the compat_ptr() conversion.
556 static int blkdev_common_ioctl(struct block_device
*bdev
, blk_mode_t mode
,
557 unsigned int cmd
, unsigned long arg
,
560 unsigned int max_sectors
;
564 return blkdev_flushbuf(bdev
, cmd
, arg
);
566 return blkdev_roset(bdev
, cmd
, arg
);
568 return blk_ioctl_discard(bdev
, mode
, arg
);
570 return blk_ioctl_secure_erase(bdev
, mode
, argp
);
572 return blk_ioctl_zeroout(bdev
, mode
, arg
);
574 return put_u64(argp
, bdev
->bd_disk
->diskseq
);
576 return blkdev_report_zones_ioctl(bdev
, cmd
, arg
);
581 return blkdev_zone_mgmt_ioctl(bdev
, mode
, cmd
, arg
);
583 return put_uint(argp
, bdev_zone_sectors(bdev
));
585 return put_uint(argp
, bdev_nr_zones(bdev
));
587 return put_int(argp
, bdev_read_only(bdev
) != 0);
588 case BLKSSZGET
: /* get block device logical block size */
589 return put_int(argp
, bdev_logical_block_size(bdev
));
590 case BLKPBSZGET
: /* get block device physical block size */
591 return put_uint(argp
, bdev_physical_block_size(bdev
));
593 return put_uint(argp
, bdev_io_min(bdev
));
595 return put_uint(argp
, bdev_io_opt(bdev
));
597 return put_int(argp
, bdev_alignment_offset(bdev
));
598 case BLKDISCARDZEROES
:
599 return put_uint(argp
, 0);
601 max_sectors
= min_t(unsigned int, USHRT_MAX
,
602 queue_max_sectors(bdev_get_queue(bdev
)));
603 return put_ushort(argp
, max_sectors
);
605 return put_ushort(argp
, !bdev_nonrot(bdev
));
608 if(!capable(CAP_SYS_ADMIN
))
610 bdev
->bd_disk
->bdi
->ra_pages
= (arg
* 512) / PAGE_SIZE
;
613 if (!capable(CAP_SYS_ADMIN
))
615 if (bdev_is_partition(bdev
))
617 return disk_scan_partitions(bdev
->bd_disk
,
618 mode
| BLK_OPEN_STRICT_SCAN
);
621 case BLKTRACETEARDOWN
:
622 return blk_trace_ioctl(bdev
, cmd
, argp
);
623 case IOC_PR_REGISTER
:
624 return blkdev_pr_register(bdev
, mode
, argp
);
626 return blkdev_pr_reserve(bdev
, mode
, argp
);
628 return blkdev_pr_release(bdev
, mode
, argp
);
630 return blkdev_pr_preempt(bdev
, mode
, argp
, false);
631 case IOC_PR_PREEMPT_ABORT
:
632 return blkdev_pr_preempt(bdev
, mode
, argp
, true);
634 return blkdev_pr_clear(bdev
, mode
, argp
);
641 * Always keep this in sync with compat_blkdev_ioctl()
642 * to handle all incompatible commands in both functions.
644 * New commands must be compatible and go into blkdev_common_ioctl
646 long blkdev_ioctl(struct file
*file
, unsigned cmd
, unsigned long arg
)
648 struct block_device
*bdev
= I_BDEV(file
->f_mapping
->host
);
649 void __user
*argp
= (void __user
*)arg
;
650 blk_mode_t mode
= file_to_blk_mode(file
);
654 /* These need separate implementations for the data structure */
656 return blkdev_getgeo(bdev
, argp
);
658 return blkpg_ioctl(bdev
, argp
);
660 /* Compat mode returns 32-bit data instead of 'long' */
665 return put_long(argp
,
666 (bdev
->bd_disk
->bdi
->ra_pages
* PAGE_SIZE
) / 512);
668 if (bdev_nr_sectors(bdev
) > ~0UL)
670 return put_ulong(argp
, bdev_nr_sectors(bdev
));
672 /* The data is compatible, but the command number is different */
673 case BLKBSZGET
: /* get block device soft block size (cf. BLKSSZGET) */
674 return put_int(argp
, block_size(bdev
));
676 return blkdev_bszset(file
, mode
, argp
);
678 return put_u64(argp
, bdev_nr_bytes(bdev
));
680 /* Incompatible alignment on i386 */
682 return blk_trace_ioctl(bdev
, cmd
, argp
);
687 ret
= blkdev_common_ioctl(bdev
, mode
, cmd
, arg
, argp
);
688 if (ret
!= -ENOIOCTLCMD
)
691 if (!bdev
->bd_disk
->fops
->ioctl
)
693 return bdev
->bd_disk
->fops
->ioctl(bdev
, mode
, cmd
, arg
);
698 #define BLKBSZGET_32 _IOR(0x12, 112, int)
699 #define BLKBSZSET_32 _IOW(0x12, 113, int)
700 #define BLKGETSIZE64_32 _IOR(0x12, 114, int)
702 /* Most of the generic ioctls are handled in the normal fallback path.
703 This assumes the blkdev's low level compat_ioctl always returns
704 ENOIOCTLCMD for unknown ioctls. */
705 long compat_blkdev_ioctl(struct file
*file
, unsigned cmd
, unsigned long arg
)
708 void __user
*argp
= compat_ptr(arg
);
709 struct block_device
*bdev
= I_BDEV(file
->f_mapping
->host
);
710 struct gendisk
*disk
= bdev
->bd_disk
;
711 blk_mode_t mode
= file_to_blk_mode(file
);
714 /* These need separate implementations for the data structure */
716 return compat_hdio_getgeo(bdev
, argp
);
718 return compat_blkpg_ioctl(bdev
, argp
);
720 /* Compat mode returns 32-bit data instead of 'long' */
725 return compat_put_long(argp
,
726 (bdev
->bd_disk
->bdi
->ra_pages
* PAGE_SIZE
) / 512);
728 if (bdev_nr_sectors(bdev
) > ~(compat_ulong_t
)0)
730 return compat_put_ulong(argp
, bdev_nr_sectors(bdev
));
732 /* The data is compatible, but the command number is different */
733 case BLKBSZGET_32
: /* get the logical block size (cf. BLKSSZGET) */
734 return put_int(argp
, bdev_logical_block_size(bdev
));
736 return blkdev_bszset(file
, mode
, argp
);
737 case BLKGETSIZE64_32
:
738 return put_u64(argp
, bdev_nr_bytes(bdev
));
740 /* Incompatible alignment on i386 */
741 case BLKTRACESETUP32
:
742 return blk_trace_ioctl(bdev
, cmd
, argp
);
747 ret
= blkdev_common_ioctl(bdev
, mode
, cmd
, arg
, argp
);
748 if (ret
== -ENOIOCTLCMD
&& disk
->fops
->compat_ioctl
)
749 ret
= disk
->fops
->compat_ioctl(bdev
, mode
, cmd
, arg
);
760 static void blk_cmd_complete(struct io_uring_cmd
*cmd
, unsigned int issue_flags
)
762 struct blk_iou_cmd
*bic
= io_uring_cmd_to_pdu(cmd
, struct blk_iou_cmd
);
764 if (bic
->res
== -EAGAIN
&& bic
->nowait
)
765 io_uring_cmd_issue_blocking(cmd
);
767 io_uring_cmd_done(cmd
, bic
->res
, 0, issue_flags
);
770 static void bio_cmd_bio_end_io(struct bio
*bio
)
772 struct io_uring_cmd
*cmd
= bio
->bi_private
;
773 struct blk_iou_cmd
*bic
= io_uring_cmd_to_pdu(cmd
, struct blk_iou_cmd
);
775 if (unlikely(bio
->bi_status
) && !bic
->res
)
776 bic
->res
= blk_status_to_errno(bio
->bi_status
);
778 io_uring_cmd_do_in_task_lazy(cmd
, blk_cmd_complete
);
782 static int blkdev_cmd_discard(struct io_uring_cmd
*cmd
,
783 struct block_device
*bdev
,
784 uint64_t start
, uint64_t len
, bool nowait
)
786 struct blk_iou_cmd
*bic
= io_uring_cmd_to_pdu(cmd
, struct blk_iou_cmd
);
787 gfp_t gfp
= nowait
? GFP_NOWAIT
: GFP_KERNEL
;
788 sector_t sector
= start
>> SECTOR_SHIFT
;
789 sector_t nr_sects
= len
>> SECTOR_SHIFT
;
790 struct bio
*prev
= NULL
, *bio
;
793 if (!bdev_max_discard_sectors(bdev
))
795 if (!(file_to_blk_mode(cmd
->file
) & BLK_OPEN_WRITE
))
797 if (bdev_read_only(bdev
))
799 err
= blk_validate_byte_range(bdev
, start
, len
);
803 err
= filemap_invalidate_pages(bdev
->bd_mapping
, start
,
804 start
+ len
- 1, nowait
);
809 bio
= blk_alloc_discard_bio(bdev
, §or
, &nr_sects
, gfp
);
814 * Don't allow multi-bio non-blocking submissions as
815 * subsequent bios may fail but we won't get a direct
816 * indication of that. Normally, the caller should
817 * retry from a blocking context.
819 if (unlikely(nr_sects
)) {
823 bio
->bi_opf
|= REQ_NOWAIT
;
826 prev
= bio_chain_and_submit(prev
, bio
);
830 if (unlikely(nr_sects
))
833 prev
->bi_private
= cmd
;
834 prev
->bi_end_io
= bio_cmd_bio_end_io
;
839 int blkdev_uring_cmd(struct io_uring_cmd
*cmd
, unsigned int issue_flags
)
841 struct block_device
*bdev
= I_BDEV(cmd
->file
->f_mapping
->host
);
842 struct blk_iou_cmd
*bic
= io_uring_cmd_to_pdu(cmd
, struct blk_iou_cmd
);
843 const struct io_uring_sqe
*sqe
= cmd
->sqe
;
844 u32 cmd_op
= cmd
->cmd_op
;
847 if (unlikely(sqe
->ioprio
|| sqe
->__pad1
|| sqe
->len
||
848 sqe
->rw_flags
|| sqe
->file_index
))
852 bic
->nowait
= issue_flags
& IO_URING_F_NONBLOCK
;
854 start
= READ_ONCE(sqe
->addr
);
855 len
= READ_ONCE(sqe
->addr3
);
858 case BLOCK_URING_CMD_DISCARD
:
859 return blkdev_cmd_discard(cmd
, bdev
, start
, len
, bic
->nowait
);