1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Userspace block device - block device which IO is handled from userspace
5 * Take full use of io_uring passthrough command for communicating with
6 * ublk userspace daemon(ublksrvd) for handling basic IO request.
8 * Copyright 2022 Ming Lei <ming.lei@redhat.com>
10 * (part of code stolen from loop.c)
12 #include <linux/module.h>
13 #include <linux/moduleparam.h>
14 #include <linux/sched.h>
16 #include <linux/pagemap.h>
17 #include <linux/file.h>
18 #include <linux/stat.h>
19 #include <linux/errno.h>
20 #include <linux/major.h>
21 #include <linux/wait.h>
22 #include <linux/blkdev.h>
23 #include <linux/init.h>
24 #include <linux/swap.h>
25 #include <linux/slab.h>
26 #include <linux/compat.h>
27 #include <linux/mutex.h>
28 #include <linux/writeback.h>
29 #include <linux/completion.h>
30 #include <linux/highmem.h>
31 #include <linux/sysfs.h>
32 #include <linux/miscdevice.h>
33 #include <linux/falloc.h>
34 #include <linux/uio.h>
35 #include <linux/ioprio.h>
36 #include <linux/sched/mm.h>
37 #include <linux/uaccess.h>
38 #include <linux/cdev.h>
39 #include <linux/io_uring/cmd.h>
40 #include <linux/blk-mq.h>
41 #include <linux/delay.h>
44 #include <linux/task_work.h>
45 #include <linux/namei.h>
46 #include <linux/kref.h>
47 #include <uapi/linux/ublk_cmd.h>
49 #define UBLK_MINORS (1U << MINORBITS)
51 /* private ioctl command mirror */
52 #define UBLK_CMD_DEL_DEV_ASYNC _IOC_NR(UBLK_U_CMD_DEL_DEV_ASYNC)
54 /* All UBLK_F_* have to be included into UBLK_F_ALL */
55 #define UBLK_F_ALL (UBLK_F_SUPPORT_ZERO_COPY \
56 | UBLK_F_URING_CMD_COMP_IN_TASK \
57 | UBLK_F_NEED_GET_DATA \
58 | UBLK_F_USER_RECOVERY \
59 | UBLK_F_USER_RECOVERY_REISSUE \
60 | UBLK_F_UNPRIVILEGED_DEV \
61 | UBLK_F_CMD_IOCTL_ENCODE \
64 | UBLK_F_USER_RECOVERY_FAIL_IO)
66 #define UBLK_F_ALL_RECOVERY_FLAGS (UBLK_F_USER_RECOVERY \
67 | UBLK_F_USER_RECOVERY_REISSUE \
68 | UBLK_F_USER_RECOVERY_FAIL_IO)
70 /* All UBLK_PARAM_TYPE_* should be included here */
71 #define UBLK_PARAM_TYPE_ALL \
72 (UBLK_PARAM_TYPE_BASIC | UBLK_PARAM_TYPE_DISCARD | \
73 UBLK_PARAM_TYPE_DEVT | UBLK_PARAM_TYPE_ZONED)
76 struct llist_node node
;
81 struct ublk_uring_cmd_pdu
{
82 struct ublk_queue
*ubq
;
87 * io command is active: sqe cmd is received, and its cqe isn't done
89 * If the flag is set, the io command is owned by ublk driver, and waited
90 * for incoming blk-mq request from the ublk block device.
92 * If the flag is cleared, the io command will be completed, and owned by
95 #define UBLK_IO_FLAG_ACTIVE 0x01
98 * IO command is completed via cqe, and it is being handled by ublksrv, and
101 * Basically exclusively with UBLK_IO_FLAG_ACTIVE, so can be served for
104 #define UBLK_IO_FLAG_OWNED_BY_SRV 0x02
107 * IO command is aborted, so this flag is set in case of
108 * !UBLK_IO_FLAG_ACTIVE.
110 * After this flag is observed, any pending or new incoming request
111 * associated with this io command will be failed immediately
113 #define UBLK_IO_FLAG_ABORTED 0x04
116 * UBLK_IO_FLAG_NEED_GET_DATA is set because IO command requires
117 * get data buffer address from ublksrv.
119 * Then, bio data could be copied into this data buffer for a WRITE request
120 * after the IO command is issued again and UBLK_IO_FLAG_NEED_GET_DATA is unset.
122 #define UBLK_IO_FLAG_NEED_GET_DATA 0x08
124 /* atomic RW with ubq->cancel_lock */
125 #define UBLK_IO_FLAG_CANCELED 0x80000000
128 /* userspace buffer address from io cmd */
133 struct io_uring_cmd
*cmd
;
141 struct task_struct
*ubq_daemon
;
144 struct llist_head io_cmds
;
146 unsigned long io_addr
; /* mapped vm address */
147 unsigned int max_io_sz
;
151 bool fail_io
; /* copy of dev->state == UBLK_S_DEV_FAIL_IO */
152 unsigned short nr_io_ready
; /* how many ios setup */
153 spinlock_t cancel_lock
;
154 struct ublk_device
*dev
;
155 struct ublk_io ios
[];
159 struct gendisk
*ub_disk
;
163 unsigned int queue_size
;
164 struct ublksrv_ctrl_dev_info dev_info
;
166 struct blk_mq_tag_set tag_set
;
169 struct device cdev_dev
;
171 #define UB_STATE_OPEN 0
172 #define UB_STATE_USED 1
173 #define UB_STATE_DELETED 2
180 struct mm_struct
*mm
;
182 struct ublk_params params
;
184 struct completion completion
;
185 unsigned int nr_queues_ready
;
186 unsigned int nr_privileged_daemon
;
188 struct work_struct nosrv_work
;
191 /* header of ublk_params */
192 struct ublk_params_header
{
197 static bool ublk_abort_requests(struct ublk_device
*ub
, struct ublk_queue
*ubq
);
199 static inline unsigned int ublk_req_build_flags(struct request
*req
);
200 static inline struct ublksrv_io_desc
*ublk_get_iod(struct ublk_queue
*ubq
,
202 static inline bool ublk_dev_is_user_copy(const struct ublk_device
*ub
)
204 return ub
->dev_info
.flags
& UBLK_F_USER_COPY
;
207 static inline bool ublk_dev_is_zoned(const struct ublk_device
*ub
)
209 return ub
->dev_info
.flags
& UBLK_F_ZONED
;
212 static inline bool ublk_queue_is_zoned(struct ublk_queue
*ubq
)
214 return ubq
->flags
& UBLK_F_ZONED
;
217 #ifdef CONFIG_BLK_DEV_ZONED
219 struct ublk_zoned_report_desc
{
225 static DEFINE_XARRAY(ublk_zoned_report_descs
);
227 static int ublk_zoned_insert_report_desc(const struct request
*req
,
228 struct ublk_zoned_report_desc
*desc
)
230 return xa_insert(&ublk_zoned_report_descs
, (unsigned long)req
,
234 static struct ublk_zoned_report_desc
*ublk_zoned_erase_report_desc(
235 const struct request
*req
)
237 return xa_erase(&ublk_zoned_report_descs
, (unsigned long)req
);
240 static struct ublk_zoned_report_desc
*ublk_zoned_get_report_desc(
241 const struct request
*req
)
243 return xa_load(&ublk_zoned_report_descs
, (unsigned long)req
);
246 static int ublk_get_nr_zones(const struct ublk_device
*ub
)
248 const struct ublk_param_basic
*p
= &ub
->params
.basic
;
250 /* Zone size is a power of 2 */
251 return p
->dev_sectors
>> ilog2(p
->chunk_sectors
);
254 static int ublk_revalidate_disk_zones(struct ublk_device
*ub
)
256 return blk_revalidate_disk_zones(ub
->ub_disk
);
259 static int ublk_dev_param_zoned_validate(const struct ublk_device
*ub
)
261 const struct ublk_param_zoned
*p
= &ub
->params
.zoned
;
264 if (!ublk_dev_is_zoned(ub
))
267 if (!p
->max_zone_append_sectors
)
270 nr_zones
= ublk_get_nr_zones(ub
);
272 if (p
->max_active_zones
> nr_zones
)
275 if (p
->max_open_zones
> nr_zones
)
281 static void ublk_dev_param_zoned_apply(struct ublk_device
*ub
)
283 ub
->ub_disk
->nr_zones
= ublk_get_nr_zones(ub
);
286 /* Based on virtblk_alloc_report_buffer */
287 static void *ublk_alloc_report_buffer(struct ublk_device
*ublk
,
288 unsigned int nr_zones
, size_t *buflen
)
290 struct request_queue
*q
= ublk
->ub_disk
->queue
;
294 nr_zones
= min_t(unsigned int, nr_zones
,
295 ublk
->ub_disk
->nr_zones
);
297 bufsize
= nr_zones
* sizeof(struct blk_zone
);
299 min_t(size_t, bufsize
, queue_max_hw_sectors(q
) << SECTOR_SHIFT
);
301 while (bufsize
>= sizeof(struct blk_zone
)) {
302 buf
= kvmalloc(bufsize
, GFP_KERNEL
| __GFP_NORETRY
);
314 static int ublk_report_zones(struct gendisk
*disk
, sector_t sector
,
315 unsigned int nr_zones
, report_zones_cb cb
, void *data
)
317 struct ublk_device
*ub
= disk
->private_data
;
318 unsigned int zone_size_sectors
= disk
->queue
->limits
.chunk_sectors
;
319 unsigned int first_zone
= sector
>> ilog2(zone_size_sectors
);
320 unsigned int done_zones
= 0;
321 unsigned int max_zones_per_request
;
323 struct blk_zone
*buffer
;
324 size_t buffer_length
;
326 nr_zones
= min_t(unsigned int, ub
->ub_disk
->nr_zones
- first_zone
,
329 buffer
= ublk_alloc_report_buffer(ub
, nr_zones
, &buffer_length
);
333 max_zones_per_request
= buffer_length
/ sizeof(struct blk_zone
);
335 while (done_zones
< nr_zones
) {
336 unsigned int remaining_zones
= nr_zones
- done_zones
;
337 unsigned int zones_in_request
=
338 min_t(unsigned int, remaining_zones
, max_zones_per_request
);
340 struct ublk_zoned_report_desc desc
;
343 memset(buffer
, 0, buffer_length
);
345 req
= blk_mq_alloc_request(disk
->queue
, REQ_OP_DRV_IN
, 0);
351 desc
.operation
= UBLK_IO_OP_REPORT_ZONES
;
352 desc
.sector
= sector
;
353 desc
.nr_zones
= zones_in_request
;
354 ret
= ublk_zoned_insert_report_desc(req
, &desc
);
358 ret
= blk_rq_map_kern(disk
->queue
, req
, buffer
, buffer_length
,
363 status
= blk_execute_rq(req
, 0);
364 ret
= blk_status_to_errno(status
);
366 ublk_zoned_erase_report_desc(req
);
368 blk_mq_free_request(req
);
372 for (unsigned int i
= 0; i
< zones_in_request
; i
++) {
373 struct blk_zone
*zone
= buffer
+ i
;
375 /* A zero length zone means no more zones in this response */
379 ret
= cb(zone
, i
, data
);
384 sector
+= zone_size_sectors
;
396 static blk_status_t
ublk_setup_iod_zoned(struct ublk_queue
*ubq
,
399 struct ublksrv_io_desc
*iod
= ublk_get_iod(ubq
, req
->tag
);
400 struct ublk_io
*io
= &ubq
->ios
[req
->tag
];
401 struct ublk_zoned_report_desc
*desc
;
404 switch (req_op(req
)) {
405 case REQ_OP_ZONE_OPEN
:
406 ublk_op
= UBLK_IO_OP_ZONE_OPEN
;
408 case REQ_OP_ZONE_CLOSE
:
409 ublk_op
= UBLK_IO_OP_ZONE_CLOSE
;
411 case REQ_OP_ZONE_FINISH
:
412 ublk_op
= UBLK_IO_OP_ZONE_FINISH
;
414 case REQ_OP_ZONE_RESET
:
415 ublk_op
= UBLK_IO_OP_ZONE_RESET
;
417 case REQ_OP_ZONE_APPEND
:
418 ublk_op
= UBLK_IO_OP_ZONE_APPEND
;
420 case REQ_OP_ZONE_RESET_ALL
:
421 ublk_op
= UBLK_IO_OP_ZONE_RESET_ALL
;
424 desc
= ublk_zoned_get_report_desc(req
);
426 return BLK_STS_IOERR
;
427 ublk_op
= desc
->operation
;
429 case UBLK_IO_OP_REPORT_ZONES
:
430 iod
->op_flags
= ublk_op
| ublk_req_build_flags(req
);
431 iod
->nr_zones
= desc
->nr_zones
;
432 iod
->start_sector
= desc
->sector
;
435 return BLK_STS_IOERR
;
438 /* We do not support drv_out */
439 return BLK_STS_NOTSUPP
;
441 return BLK_STS_IOERR
;
444 iod
->op_flags
= ublk_op
| ublk_req_build_flags(req
);
445 iod
->nr_sectors
= blk_rq_sectors(req
);
446 iod
->start_sector
= blk_rq_pos(req
);
447 iod
->addr
= io
->addr
;
454 #define ublk_report_zones (NULL)
456 static int ublk_dev_param_zoned_validate(const struct ublk_device
*ub
)
461 static void ublk_dev_param_zoned_apply(struct ublk_device
*ub
)
465 static int ublk_revalidate_disk_zones(struct ublk_device
*ub
)
470 static blk_status_t
ublk_setup_iod_zoned(struct ublk_queue
*ubq
,
473 return BLK_STS_NOTSUPP
;
478 static inline void __ublk_complete_rq(struct request
*req
);
479 static void ublk_complete_rq(struct kref
*ref
);
481 static dev_t ublk_chr_devt
;
482 static const struct class ublk_chr_class
= {
486 static DEFINE_IDR(ublk_index_idr
);
487 static DEFINE_SPINLOCK(ublk_idr_lock
);
488 static wait_queue_head_t ublk_idr_wq
; /* wait until one idr is freed */
490 static DEFINE_MUTEX(ublk_ctl_mutex
);
493 * Max ublk devices allowed to add
495 * It can be extended to one per-user limit in future or even controlled
498 #define UBLK_MAX_UBLKS UBLK_MINORS
499 static unsigned int ublks_max
= 64;
500 static unsigned int ublks_added
; /* protected by ublk_ctl_mutex */
502 static struct miscdevice ublk_misc
;
504 static inline unsigned ublk_pos_to_hwq(loff_t pos
)
506 return ((pos
- UBLKSRV_IO_BUF_OFFSET
) >> UBLK_QID_OFF
) &
510 static inline unsigned ublk_pos_to_buf_off(loff_t pos
)
512 return (pos
- UBLKSRV_IO_BUF_OFFSET
) & UBLK_IO_BUF_BITS_MASK
;
515 static inline unsigned ublk_pos_to_tag(loff_t pos
)
517 return ((pos
- UBLKSRV_IO_BUF_OFFSET
) >> UBLK_TAG_OFF
) &
521 static void ublk_dev_param_basic_apply(struct ublk_device
*ub
)
523 const struct ublk_param_basic
*p
= &ub
->params
.basic
;
525 if (p
->attrs
& UBLK_ATTR_READ_ONLY
)
526 set_disk_ro(ub
->ub_disk
, true);
528 set_capacity(ub
->ub_disk
, p
->dev_sectors
);
531 static int ublk_validate_params(const struct ublk_device
*ub
)
533 /* basic param is the only one which must be set */
534 if (ub
->params
.types
& UBLK_PARAM_TYPE_BASIC
) {
535 const struct ublk_param_basic
*p
= &ub
->params
.basic
;
537 if (p
->logical_bs_shift
> PAGE_SHIFT
|| p
->logical_bs_shift
< 9)
540 if (p
->logical_bs_shift
> p
->physical_bs_shift
)
543 if (p
->max_sectors
> (ub
->dev_info
.max_io_buf_bytes
>> 9))
546 if (ublk_dev_is_zoned(ub
) && !p
->chunk_sectors
)
551 if (ub
->params
.types
& UBLK_PARAM_TYPE_DISCARD
) {
552 const struct ublk_param_discard
*p
= &ub
->params
.discard
;
554 /* So far, only support single segment discard */
555 if (p
->max_discard_sectors
&& p
->max_discard_segments
!= 1)
558 if (!p
->discard_granularity
)
562 /* dev_t is read-only */
563 if (ub
->params
.types
& UBLK_PARAM_TYPE_DEVT
)
566 if (ub
->params
.types
& UBLK_PARAM_TYPE_ZONED
)
567 return ublk_dev_param_zoned_validate(ub
);
568 else if (ublk_dev_is_zoned(ub
))
574 static void ublk_apply_params(struct ublk_device
*ub
)
576 ublk_dev_param_basic_apply(ub
);
578 if (ub
->params
.types
& UBLK_PARAM_TYPE_ZONED
)
579 ublk_dev_param_zoned_apply(ub
);
582 static inline bool ublk_support_user_copy(const struct ublk_queue
*ubq
)
584 return ubq
->flags
& UBLK_F_USER_COPY
;
587 static inline bool ublk_need_req_ref(const struct ublk_queue
*ubq
)
590 * read()/write() is involved in user copy, so request reference
593 return ublk_support_user_copy(ubq
);
596 static inline void ublk_init_req_ref(const struct ublk_queue
*ubq
,
599 if (ublk_need_req_ref(ubq
)) {
600 struct ublk_rq_data
*data
= blk_mq_rq_to_pdu(req
);
602 kref_init(&data
->ref
);
606 static inline bool ublk_get_req_ref(const struct ublk_queue
*ubq
,
609 if (ublk_need_req_ref(ubq
)) {
610 struct ublk_rq_data
*data
= blk_mq_rq_to_pdu(req
);
612 return kref_get_unless_zero(&data
->ref
);
618 static inline void ublk_put_req_ref(const struct ublk_queue
*ubq
,
621 if (ublk_need_req_ref(ubq
)) {
622 struct ublk_rq_data
*data
= blk_mq_rq_to_pdu(req
);
624 kref_put(&data
->ref
, ublk_complete_rq
);
626 __ublk_complete_rq(req
);
630 static inline bool ublk_need_get_data(const struct ublk_queue
*ubq
)
632 return ubq
->flags
& UBLK_F_NEED_GET_DATA
;
635 /* Called in slow path only, keep it noinline for trace purpose */
636 static noinline
struct ublk_device
*ublk_get_device(struct ublk_device
*ub
)
638 if (kobject_get_unless_zero(&ub
->cdev_dev
.kobj
))
643 /* Called in slow path only, keep it noinline for trace purpose */
644 static noinline
void ublk_put_device(struct ublk_device
*ub
)
646 put_device(&ub
->cdev_dev
);
649 static inline struct ublk_queue
*ublk_get_queue(struct ublk_device
*dev
,
652 return (struct ublk_queue
*)&(dev
->__queues
[qid
* dev
->queue_size
]);
655 static inline bool ublk_rq_has_data(const struct request
*rq
)
657 return bio_has_data(rq
->bio
);
660 static inline struct ublksrv_io_desc
*ublk_get_iod(struct ublk_queue
*ubq
,
663 return (struct ublksrv_io_desc
*)
664 &(ubq
->io_cmd_buf
[tag
* sizeof(struct ublksrv_io_desc
)]);
667 static inline char *ublk_queue_cmd_buf(struct ublk_device
*ub
, int q_id
)
669 return ublk_get_queue(ub
, q_id
)->io_cmd_buf
;
672 static inline int __ublk_queue_cmd_buf_size(int depth
)
674 return round_up(depth
* sizeof(struct ublksrv_io_desc
), PAGE_SIZE
);
677 static inline int ublk_queue_cmd_buf_size(struct ublk_device
*ub
, int q_id
)
679 struct ublk_queue
*ubq
= ublk_get_queue(ub
, q_id
);
681 return __ublk_queue_cmd_buf_size(ubq
->q_depth
);
684 static int ublk_max_cmd_buf_size(void)
686 return __ublk_queue_cmd_buf_size(UBLK_MAX_QUEUE_DEPTH
);
690 * Should I/O outstanding to the ublk server when it exits be reissued?
691 * If not, outstanding I/O will get errors.
693 static inline bool ublk_nosrv_should_reissue_outstanding(struct ublk_device
*ub
)
695 return (ub
->dev_info
.flags
& UBLK_F_USER_RECOVERY
) &&
696 (ub
->dev_info
.flags
& UBLK_F_USER_RECOVERY_REISSUE
);
700 * Should I/O issued while there is no ublk server queue? If not, I/O
701 * issued while there is no ublk server will get errors.
703 static inline bool ublk_nosrv_dev_should_queue_io(struct ublk_device
*ub
)
705 return (ub
->dev_info
.flags
& UBLK_F_USER_RECOVERY
) &&
706 !(ub
->dev_info
.flags
& UBLK_F_USER_RECOVERY_FAIL_IO
);
710 * Same as ublk_nosrv_dev_should_queue_io, but uses a queue-local copy
711 * of the device flags for smaller cache footprint - better for fast
714 static inline bool ublk_nosrv_should_queue_io(struct ublk_queue
*ubq
)
716 return (ubq
->flags
& UBLK_F_USER_RECOVERY
) &&
717 !(ubq
->flags
& UBLK_F_USER_RECOVERY_FAIL_IO
);
721 * Should ublk devices be stopped (i.e. no recovery possible) when the
722 * ublk server exits? If not, devices can be used again by a future
723 * incarnation of a ublk server via the start_recovery/end_recovery
726 static inline bool ublk_nosrv_should_stop_dev(struct ublk_device
*ub
)
728 return !(ub
->dev_info
.flags
& UBLK_F_USER_RECOVERY
);
731 static inline bool ublk_dev_in_recoverable_state(struct ublk_device
*ub
)
733 return ub
->dev_info
.state
== UBLK_S_DEV_QUIESCED
||
734 ub
->dev_info
.state
== UBLK_S_DEV_FAIL_IO
;
737 static void ublk_free_disk(struct gendisk
*disk
)
739 struct ublk_device
*ub
= disk
->private_data
;
741 clear_bit(UB_STATE_USED
, &ub
->state
);
745 static void ublk_store_owner_uid_gid(unsigned int *owner_uid
,
746 unsigned int *owner_gid
)
751 current_uid_gid(&uid
, &gid
);
753 *owner_uid
= from_kuid(&init_user_ns
, uid
);
754 *owner_gid
= from_kgid(&init_user_ns
, gid
);
757 static int ublk_open(struct gendisk
*disk
, blk_mode_t mode
)
759 struct ublk_device
*ub
= disk
->private_data
;
761 if (capable(CAP_SYS_ADMIN
))
765 * If it is one unprivileged device, only owner can open
766 * the disk. Otherwise it could be one trap made by one
767 * evil user who grants this disk's privileges to other
768 * users deliberately.
770 * This way is reasonable too given anyone can create
771 * unprivileged device, and no need other's grant.
773 if (ub
->dev_info
.flags
& UBLK_F_UNPRIVILEGED_DEV
) {
774 unsigned int curr_uid
, curr_gid
;
776 ublk_store_owner_uid_gid(&curr_uid
, &curr_gid
);
778 if (curr_uid
!= ub
->dev_info
.owner_uid
|| curr_gid
!=
779 ub
->dev_info
.owner_gid
)
786 static const struct block_device_operations ub_fops
= {
787 .owner
= THIS_MODULE
,
789 .free_disk
= ublk_free_disk
,
790 .report_zones
= ublk_report_zones
,
793 #define UBLK_MAX_PIN_PAGES 32
795 struct ublk_io_iter
{
796 struct page
*pages
[UBLK_MAX_PIN_PAGES
];
798 struct bvec_iter iter
;
801 /* return how many pages are copied */
802 static void ublk_copy_io_pages(struct ublk_io_iter
*data
,
803 size_t total
, size_t pg_off
, int dir
)
808 while (done
< total
) {
809 struct bio_vec bv
= bio_iter_iovec(data
->bio
, data
->iter
);
810 unsigned int bytes
= min3(bv
.bv_len
, (unsigned)total
- done
,
811 (unsigned)(PAGE_SIZE
- pg_off
));
812 void *bv_buf
= bvec_kmap_local(&bv
);
813 void *pg_buf
= kmap_local_page(data
->pages
[pg_idx
]);
815 if (dir
== ITER_DEST
)
816 memcpy(pg_buf
+ pg_off
, bv_buf
, bytes
);
818 memcpy(bv_buf
, pg_buf
+ pg_off
, bytes
);
820 kunmap_local(pg_buf
);
821 kunmap_local(bv_buf
);
823 /* advance page array */
825 if (pg_off
== PAGE_SIZE
) {
833 bio_advance_iter_single(data
->bio
, &data
->iter
, bytes
);
834 if (!data
->iter
.bi_size
) {
835 data
->bio
= data
->bio
->bi_next
;
836 if (data
->bio
== NULL
)
838 data
->iter
= data
->bio
->bi_iter
;
843 static bool ublk_advance_io_iter(const struct request
*req
,
844 struct ublk_io_iter
*iter
, unsigned int offset
)
846 struct bio
*bio
= req
->bio
;
849 if (bio
->bi_iter
.bi_size
> offset
) {
851 iter
->iter
= bio
->bi_iter
;
852 bio_advance_iter(iter
->bio
, &iter
->iter
, offset
);
855 offset
-= bio
->bi_iter
.bi_size
;
861 * Copy data between request pages and io_iter, and 'offset'
862 * is the start point of linear offset of request.
864 static size_t ublk_copy_user_pages(const struct request
*req
,
865 unsigned offset
, struct iov_iter
*uiter
, int dir
)
867 struct ublk_io_iter iter
;
870 if (!ublk_advance_io_iter(req
, &iter
, offset
))
873 while (iov_iter_count(uiter
) && iter
.bio
) {
879 len
= iov_iter_get_pages2(uiter
, iter
.pages
,
880 iov_iter_count(uiter
),
881 UBLK_MAX_PIN_PAGES
, &off
);
885 ublk_copy_io_pages(&iter
, len
, off
, dir
);
886 nr_pages
= DIV_ROUND_UP(len
+ off
, PAGE_SIZE
);
887 for (i
= 0; i
< nr_pages
; i
++) {
888 if (dir
== ITER_DEST
)
889 set_page_dirty(iter
.pages
[i
]);
890 put_page(iter
.pages
[i
]);
898 static inline bool ublk_need_map_req(const struct request
*req
)
900 return ublk_rq_has_data(req
) && req_op(req
) == REQ_OP_WRITE
;
903 static inline bool ublk_need_unmap_req(const struct request
*req
)
905 return ublk_rq_has_data(req
) &&
906 (req_op(req
) == REQ_OP_READ
|| req_op(req
) == REQ_OP_DRV_IN
);
909 static int ublk_map_io(const struct ublk_queue
*ubq
, const struct request
*req
,
912 const unsigned int rq_bytes
= blk_rq_bytes(req
);
914 if (ublk_support_user_copy(ubq
))
918 * no zero copy, we delay copy WRITE request data into ublksrv
919 * context and the big benefit is that pinning pages in current
920 * context is pretty fast, see ublk_pin_user_pages
922 if (ublk_need_map_req(req
)) {
923 struct iov_iter iter
;
924 const int dir
= ITER_DEST
;
926 import_ubuf(dir
, u64_to_user_ptr(io
->addr
), rq_bytes
, &iter
);
927 return ublk_copy_user_pages(req
, 0, &iter
, dir
);
932 static int ublk_unmap_io(const struct ublk_queue
*ubq
,
933 const struct request
*req
,
936 const unsigned int rq_bytes
= blk_rq_bytes(req
);
938 if (ublk_support_user_copy(ubq
))
941 if (ublk_need_unmap_req(req
)) {
942 struct iov_iter iter
;
943 const int dir
= ITER_SOURCE
;
945 WARN_ON_ONCE(io
->res
> rq_bytes
);
947 import_ubuf(dir
, u64_to_user_ptr(io
->addr
), io
->res
, &iter
);
948 return ublk_copy_user_pages(req
, 0, &iter
, dir
);
953 static inline unsigned int ublk_req_build_flags(struct request
*req
)
957 if (req
->cmd_flags
& REQ_FAILFAST_DEV
)
958 flags
|= UBLK_IO_F_FAILFAST_DEV
;
960 if (req
->cmd_flags
& REQ_FAILFAST_TRANSPORT
)
961 flags
|= UBLK_IO_F_FAILFAST_TRANSPORT
;
963 if (req
->cmd_flags
& REQ_FAILFAST_DRIVER
)
964 flags
|= UBLK_IO_F_FAILFAST_DRIVER
;
966 if (req
->cmd_flags
& REQ_META
)
967 flags
|= UBLK_IO_F_META
;
969 if (req
->cmd_flags
& REQ_FUA
)
970 flags
|= UBLK_IO_F_FUA
;
972 if (req
->cmd_flags
& REQ_NOUNMAP
)
973 flags
|= UBLK_IO_F_NOUNMAP
;
975 if (req
->cmd_flags
& REQ_SWAP
)
976 flags
|= UBLK_IO_F_SWAP
;
981 static blk_status_t
ublk_setup_iod(struct ublk_queue
*ubq
, struct request
*req
)
983 struct ublksrv_io_desc
*iod
= ublk_get_iod(ubq
, req
->tag
);
984 struct ublk_io
*io
= &ubq
->ios
[req
->tag
];
985 enum req_op op
= req_op(req
);
988 if (!ublk_queue_is_zoned(ubq
) &&
989 (op_is_zone_mgmt(op
) || op
== REQ_OP_ZONE_APPEND
))
990 return BLK_STS_IOERR
;
992 switch (req_op(req
)) {
994 ublk_op
= UBLK_IO_OP_READ
;
997 ublk_op
= UBLK_IO_OP_WRITE
;
1000 ublk_op
= UBLK_IO_OP_FLUSH
;
1002 case REQ_OP_DISCARD
:
1003 ublk_op
= UBLK_IO_OP_DISCARD
;
1005 case REQ_OP_WRITE_ZEROES
:
1006 ublk_op
= UBLK_IO_OP_WRITE_ZEROES
;
1009 if (ublk_queue_is_zoned(ubq
))
1010 return ublk_setup_iod_zoned(ubq
, req
);
1011 return BLK_STS_IOERR
;
1014 /* need to translate since kernel may change */
1015 iod
->op_flags
= ublk_op
| ublk_req_build_flags(req
);
1016 iod
->nr_sectors
= blk_rq_sectors(req
);
1017 iod
->start_sector
= blk_rq_pos(req
);
1018 iod
->addr
= io
->addr
;
1023 static inline struct ublk_uring_cmd_pdu
*ublk_get_uring_cmd_pdu(
1024 struct io_uring_cmd
*ioucmd
)
1026 return (struct ublk_uring_cmd_pdu
*)&ioucmd
->pdu
;
1029 static inline bool ubq_daemon_is_dying(struct ublk_queue
*ubq
)
1031 return ubq
->ubq_daemon
->flags
& PF_EXITING
;
1034 /* todo: handle partial completion */
1035 static inline void __ublk_complete_rq(struct request
*req
)
1037 struct ublk_queue
*ubq
= req
->mq_hctx
->driver_data
;
1038 struct ublk_io
*io
= &ubq
->ios
[req
->tag
];
1039 unsigned int unmapped_bytes
;
1040 blk_status_t res
= BLK_STS_OK
;
1042 /* called from ublk_abort_queue() code path */
1043 if (io
->flags
& UBLK_IO_FLAG_ABORTED
) {
1044 res
= BLK_STS_IOERR
;
1048 /* failed read IO if nothing is read */
1049 if (!io
->res
&& req_op(req
) == REQ_OP_READ
)
1053 res
= errno_to_blk_status(io
->res
);
1058 * FLUSH, DISCARD or WRITE_ZEROES usually won't return bytes returned, so end them
1061 * Both the two needn't unmap.
1063 if (req_op(req
) != REQ_OP_READ
&& req_op(req
) != REQ_OP_WRITE
&&
1064 req_op(req
) != REQ_OP_DRV_IN
)
1067 /* for READ request, writing data in iod->addr to rq buffers */
1068 unmapped_bytes
= ublk_unmap_io(ubq
, req
, io
);
1071 * Extremely impossible since we got data filled in just before
1073 * Re-read simply for this unlikely case.
1075 if (unlikely(unmapped_bytes
< io
->res
))
1076 io
->res
= unmapped_bytes
;
1078 if (blk_update_request(req
, BLK_STS_OK
, io
->res
))
1079 blk_mq_requeue_request(req
, true);
1081 __blk_mq_end_request(req
, BLK_STS_OK
);
1085 blk_mq_end_request(req
, res
);
1088 static void ublk_complete_rq(struct kref
*ref
)
1090 struct ublk_rq_data
*data
= container_of(ref
, struct ublk_rq_data
,
1092 struct request
*req
= blk_mq_rq_from_pdu(data
);
1094 __ublk_complete_rq(req
);
1098 * Since __ublk_rq_task_work always fails requests immediately during
1099 * exiting, __ublk_fail_req() is only called from abort context during
1100 * exiting. So lock is unnecessary.
1102 * Also aborting may not be started yet, keep in mind that one failed
1103 * request may be issued by block layer again.
1105 static void __ublk_fail_req(struct ublk_queue
*ubq
, struct ublk_io
*io
,
1106 struct request
*req
)
1108 WARN_ON_ONCE(io
->flags
& UBLK_IO_FLAG_ACTIVE
);
1110 if (ublk_nosrv_should_reissue_outstanding(ubq
->dev
))
1111 blk_mq_requeue_request(req
, false);
1113 ublk_put_req_ref(ubq
, req
);
1116 static void ubq_complete_io_cmd(struct ublk_io
*io
, int res
,
1117 unsigned issue_flags
)
1119 /* mark this cmd owned by ublksrv */
1120 io
->flags
|= UBLK_IO_FLAG_OWNED_BY_SRV
;
1123 * clear ACTIVE since we are done with this sqe/cmd slot
1124 * We can only accept io cmd in case of being not active.
1126 io
->flags
&= ~UBLK_IO_FLAG_ACTIVE
;
1128 /* tell ublksrv one io request is coming */
1129 io_uring_cmd_done(io
->cmd
, res
, 0, issue_flags
);
1132 #define UBLK_REQUEUE_DELAY_MS 3
1134 static inline void __ublk_abort_rq(struct ublk_queue
*ubq
,
1137 /* We cannot process this rq so just requeue it. */
1138 if (ublk_nosrv_dev_should_queue_io(ubq
->dev
))
1139 blk_mq_requeue_request(rq
, false);
1141 blk_mq_end_request(rq
, BLK_STS_IOERR
);
1144 static inline void __ublk_rq_task_work(struct request
*req
,
1145 unsigned issue_flags
)
1147 struct ublk_queue
*ubq
= req
->mq_hctx
->driver_data
;
1149 struct ublk_io
*io
= &ubq
->ios
[tag
];
1150 unsigned int mapped_bytes
;
1152 pr_devel("%s: complete: op %d, qid %d tag %d io_flags %x addr %llx\n",
1153 __func__
, io
->cmd
->cmd_op
, ubq
->q_id
, req
->tag
, io
->flags
,
1154 ublk_get_iod(ubq
, req
->tag
)->addr
);
1157 * Task is exiting if either:
1159 * (1) current != ubq_daemon.
1160 * io_uring_cmd_complete_in_task() tries to run task_work
1161 * in a workqueue if ubq_daemon(cmd's task) is PF_EXITING.
1163 * (2) current->flags & PF_EXITING.
1165 if (unlikely(current
!= ubq
->ubq_daemon
|| current
->flags
& PF_EXITING
)) {
1166 __ublk_abort_rq(ubq
, req
);
1170 if (ublk_need_get_data(ubq
) && ublk_need_map_req(req
)) {
1172 * We have not handled UBLK_IO_NEED_GET_DATA command yet,
1173 * so immepdately pass UBLK_IO_RES_NEED_GET_DATA to ublksrv
1176 if (!(io
->flags
& UBLK_IO_FLAG_NEED_GET_DATA
)) {
1177 io
->flags
|= UBLK_IO_FLAG_NEED_GET_DATA
;
1178 pr_devel("%s: need get data. op %d, qid %d tag %d io_flags %x\n",
1179 __func__
, io
->cmd
->cmd_op
, ubq
->q_id
,
1180 req
->tag
, io
->flags
);
1181 ubq_complete_io_cmd(io
, UBLK_IO_RES_NEED_GET_DATA
, issue_flags
);
1185 * We have handled UBLK_IO_NEED_GET_DATA command,
1186 * so clear UBLK_IO_FLAG_NEED_GET_DATA now and just
1189 io
->flags
&= ~UBLK_IO_FLAG_NEED_GET_DATA
;
1190 /* update iod->addr because ublksrv may have passed a new io buffer */
1191 ublk_get_iod(ubq
, req
->tag
)->addr
= io
->addr
;
1192 pr_devel("%s: update iod->addr: op %d, qid %d tag %d io_flags %x addr %llx\n",
1193 __func__
, io
->cmd
->cmd_op
, ubq
->q_id
, req
->tag
, io
->flags
,
1194 ublk_get_iod(ubq
, req
->tag
)->addr
);
1197 mapped_bytes
= ublk_map_io(ubq
, req
, io
);
1199 /* partially mapped, update io descriptor */
1200 if (unlikely(mapped_bytes
!= blk_rq_bytes(req
))) {
1202 * Nothing mapped, retry until we succeed.
1204 * We may never succeed in mapping any bytes here because
1205 * of OOM. TODO: reserve one buffer with single page pinned
1206 * for providing forward progress guarantee.
1208 if (unlikely(!mapped_bytes
)) {
1209 blk_mq_requeue_request(req
, false);
1210 blk_mq_delay_kick_requeue_list(req
->q
,
1211 UBLK_REQUEUE_DELAY_MS
);
1215 ublk_get_iod(ubq
, req
->tag
)->nr_sectors
=
1219 ublk_init_req_ref(ubq
, req
);
1220 ubq_complete_io_cmd(io
, UBLK_IO_RES_OK
, issue_flags
);
1223 static inline void ublk_forward_io_cmds(struct ublk_queue
*ubq
,
1224 unsigned issue_flags
)
1226 struct llist_node
*io_cmds
= llist_del_all(&ubq
->io_cmds
);
1227 struct ublk_rq_data
*data
, *tmp
;
1229 io_cmds
= llist_reverse_order(io_cmds
);
1230 llist_for_each_entry_safe(data
, tmp
, io_cmds
, node
)
1231 __ublk_rq_task_work(blk_mq_rq_from_pdu(data
), issue_flags
);
1234 static void ublk_rq_task_work_cb(struct io_uring_cmd
*cmd
, unsigned issue_flags
)
1236 struct ublk_uring_cmd_pdu
*pdu
= ublk_get_uring_cmd_pdu(cmd
);
1237 struct ublk_queue
*ubq
= pdu
->ubq
;
1239 ublk_forward_io_cmds(ubq
, issue_flags
);
1242 static void ublk_queue_cmd(struct ublk_queue
*ubq
, struct request
*rq
)
1244 struct ublk_rq_data
*data
= blk_mq_rq_to_pdu(rq
);
1246 if (llist_add(&data
->node
, &ubq
->io_cmds
)) {
1247 struct ublk_io
*io
= &ubq
->ios
[rq
->tag
];
1249 io_uring_cmd_complete_in_task(io
->cmd
, ublk_rq_task_work_cb
);
1253 static enum blk_eh_timer_return
ublk_timeout(struct request
*rq
)
1255 struct ublk_queue
*ubq
= rq
->mq_hctx
->driver_data
;
1256 unsigned int nr_inflight
= 0;
1259 if (ubq
->flags
& UBLK_F_UNPRIVILEGED_DEV
) {
1260 if (!ubq
->timeout
) {
1261 send_sig(SIGKILL
, ubq
->ubq_daemon
, 0);
1262 ubq
->timeout
= true;
1268 if (!ubq_daemon_is_dying(ubq
))
1269 return BLK_EH_RESET_TIMER
;
1271 for (i
= 0; i
< ubq
->q_depth
; i
++) {
1272 struct ublk_io
*io
= &ubq
->ios
[i
];
1274 if (!(io
->flags
& UBLK_IO_FLAG_ACTIVE
))
1278 /* cancelable uring_cmd can't help us if all commands are in-flight */
1279 if (nr_inflight
== ubq
->q_depth
) {
1280 struct ublk_device
*ub
= ubq
->dev
;
1282 if (ublk_abort_requests(ub
, ubq
)) {
1283 schedule_work(&ub
->nosrv_work
);
1288 return BLK_EH_RESET_TIMER
;
1291 static blk_status_t
ublk_queue_rq(struct blk_mq_hw_ctx
*hctx
,
1292 const struct blk_mq_queue_data
*bd
)
1294 struct ublk_queue
*ubq
= hctx
->driver_data
;
1295 struct request
*rq
= bd
->rq
;
1298 if (unlikely(ubq
->fail_io
)) {
1299 return BLK_STS_TARGET
;
1302 /* fill iod to slot in io cmd buffer */
1303 res
= ublk_setup_iod(ubq
, rq
);
1304 if (unlikely(res
!= BLK_STS_OK
))
1305 return BLK_STS_IOERR
;
1307 /* With recovery feature enabled, force_abort is set in
1308 * ublk_stop_dev() before calling del_gendisk(). We have to
1309 * abort all requeued and new rqs here to let del_gendisk()
1310 * move on. Besides, we cannot not call io_uring_cmd_complete_in_task()
1311 * to avoid UAF on io_uring ctx.
1313 * Note: force_abort is guaranteed to be seen because it is set
1314 * before request queue is unqiuesced.
1316 if (ublk_nosrv_should_queue_io(ubq
) && unlikely(ubq
->force_abort
))
1317 return BLK_STS_IOERR
;
1319 if (unlikely(ubq
->canceling
)) {
1320 __ublk_abort_rq(ubq
, rq
);
1324 blk_mq_start_request(bd
->rq
);
1325 ublk_queue_cmd(ubq
, rq
);
1330 static int ublk_init_hctx(struct blk_mq_hw_ctx
*hctx
, void *driver_data
,
1331 unsigned int hctx_idx
)
1333 struct ublk_device
*ub
= driver_data
;
1334 struct ublk_queue
*ubq
= ublk_get_queue(ub
, hctx
->queue_num
);
1336 hctx
->driver_data
= ubq
;
1340 static const struct blk_mq_ops ublk_mq_ops
= {
1341 .queue_rq
= ublk_queue_rq
,
1342 .init_hctx
= ublk_init_hctx
,
1343 .timeout
= ublk_timeout
,
1346 static int ublk_ch_open(struct inode
*inode
, struct file
*filp
)
1348 struct ublk_device
*ub
= container_of(inode
->i_cdev
,
1349 struct ublk_device
, cdev
);
1351 if (test_and_set_bit(UB_STATE_OPEN
, &ub
->state
))
1353 filp
->private_data
= ub
;
1357 static int ublk_ch_release(struct inode
*inode
, struct file
*filp
)
1359 struct ublk_device
*ub
= filp
->private_data
;
1361 clear_bit(UB_STATE_OPEN
, &ub
->state
);
1365 /* map pre-allocated per-queue cmd buffer to ublksrv daemon */
1366 static int ublk_ch_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
1368 struct ublk_device
*ub
= filp
->private_data
;
1369 size_t sz
= vma
->vm_end
- vma
->vm_start
;
1370 unsigned max_sz
= ublk_max_cmd_buf_size();
1371 unsigned long pfn
, end
, phys_off
= vma
->vm_pgoff
<< PAGE_SHIFT
;
1374 spin_lock(&ub
->lock
);
1376 ub
->mm
= current
->mm
;
1377 if (current
->mm
!= ub
->mm
)
1379 spin_unlock(&ub
->lock
);
1384 if (vma
->vm_flags
& VM_WRITE
)
1387 end
= UBLKSRV_CMD_BUF_OFFSET
+ ub
->dev_info
.nr_hw_queues
* max_sz
;
1388 if (phys_off
< UBLKSRV_CMD_BUF_OFFSET
|| phys_off
>= end
)
1391 q_id
= (phys_off
- UBLKSRV_CMD_BUF_OFFSET
) / max_sz
;
1392 pr_devel("%s: qid %d, pid %d, addr %lx pg_off %lx sz %lu\n",
1393 __func__
, q_id
, current
->pid
, vma
->vm_start
,
1394 phys_off
, (unsigned long)sz
);
1396 if (sz
!= ublk_queue_cmd_buf_size(ub
, q_id
))
1399 pfn
= virt_to_phys(ublk_queue_cmd_buf(ub
, q_id
)) >> PAGE_SHIFT
;
1400 return remap_pfn_range(vma
, vma
->vm_start
, pfn
, sz
, vma
->vm_page_prot
);
1403 static void ublk_commit_completion(struct ublk_device
*ub
,
1404 const struct ublksrv_io_cmd
*ub_cmd
)
1406 u32 qid
= ub_cmd
->q_id
, tag
= ub_cmd
->tag
;
1407 struct ublk_queue
*ubq
= ublk_get_queue(ub
, qid
);
1408 struct ublk_io
*io
= &ubq
->ios
[tag
];
1409 struct request
*req
;
1411 /* now this cmd slot is owned by nbd driver */
1412 io
->flags
&= ~UBLK_IO_FLAG_OWNED_BY_SRV
;
1413 io
->res
= ub_cmd
->result
;
1415 /* find the io request and complete */
1416 req
= blk_mq_tag_to_rq(ub
->tag_set
.tags
[qid
], tag
);
1417 if (WARN_ON_ONCE(unlikely(!req
)))
1420 if (req_op(req
) == REQ_OP_ZONE_APPEND
)
1421 req
->__sector
= ub_cmd
->zone_append_lba
;
1423 if (likely(!blk_should_fake_timeout(req
->q
)))
1424 ublk_put_req_ref(ubq
, req
);
1428 * Called from ubq_daemon context via cancel fn, meantime quiesce ublk
1429 * blk-mq queue, so we are called exclusively with blk-mq and ubq_daemon
1430 * context, so everything is serialized.
1432 static void ublk_abort_queue(struct ublk_device
*ub
, struct ublk_queue
*ubq
)
1436 for (i
= 0; i
< ubq
->q_depth
; i
++) {
1437 struct ublk_io
*io
= &ubq
->ios
[i
];
1439 if (!(io
->flags
& UBLK_IO_FLAG_ACTIVE
)) {
1443 * Either we fail the request or ublk_rq_task_work_fn
1446 rq
= blk_mq_tag_to_rq(ub
->tag_set
.tags
[ubq
->q_id
], i
);
1447 if (rq
&& blk_mq_request_started(rq
)) {
1448 io
->flags
|= UBLK_IO_FLAG_ABORTED
;
1449 __ublk_fail_req(ubq
, io
, rq
);
1455 static bool ublk_abort_requests(struct ublk_device
*ub
, struct ublk_queue
*ubq
)
1457 struct gendisk
*disk
;
1459 spin_lock(&ubq
->cancel_lock
);
1460 if (ubq
->canceling
) {
1461 spin_unlock(&ubq
->cancel_lock
);
1464 ubq
->canceling
= true;
1465 spin_unlock(&ubq
->cancel_lock
);
1467 spin_lock(&ub
->lock
);
1470 get_device(disk_to_dev(disk
));
1471 spin_unlock(&ub
->lock
);
1473 /* Our disk has been dead */
1477 /* Now we are serialized with ublk_queue_rq() */
1478 blk_mq_quiesce_queue(disk
->queue
);
1479 /* abort queue is for making forward progress */
1480 ublk_abort_queue(ub
, ubq
);
1481 blk_mq_unquiesce_queue(disk
->queue
);
1482 put_device(disk_to_dev(disk
));
1487 static void ublk_cancel_cmd(struct ublk_queue
*ubq
, struct ublk_io
*io
,
1488 unsigned int issue_flags
)
1492 if (!(io
->flags
& UBLK_IO_FLAG_ACTIVE
))
1495 spin_lock(&ubq
->cancel_lock
);
1496 done
= !!(io
->flags
& UBLK_IO_FLAG_CANCELED
);
1498 io
->flags
|= UBLK_IO_FLAG_CANCELED
;
1499 spin_unlock(&ubq
->cancel_lock
);
1502 io_uring_cmd_done(io
->cmd
, UBLK_IO_RES_ABORT
, 0, issue_flags
);
1506 * The ublk char device won't be closed when calling cancel fn, so both
1507 * ublk device and queue are guaranteed to be live
1509 static void ublk_uring_cmd_cancel_fn(struct io_uring_cmd
*cmd
,
1510 unsigned int issue_flags
)
1512 struct ublk_uring_cmd_pdu
*pdu
= ublk_get_uring_cmd_pdu(cmd
);
1513 struct ublk_queue
*ubq
= pdu
->ubq
;
1514 struct task_struct
*task
;
1515 struct ublk_device
*ub
;
1519 if (WARN_ON_ONCE(!ubq
))
1522 if (WARN_ON_ONCE(pdu
->tag
>= ubq
->q_depth
))
1525 task
= io_uring_cmd_get_task(cmd
);
1526 if (WARN_ON_ONCE(task
&& task
!= ubq
->ubq_daemon
))
1530 need_schedule
= ublk_abort_requests(ub
, ubq
);
1532 io
= &ubq
->ios
[pdu
->tag
];
1533 WARN_ON_ONCE(io
->cmd
!= cmd
);
1534 ublk_cancel_cmd(ubq
, io
, issue_flags
);
1536 if (need_schedule
) {
1537 schedule_work(&ub
->nosrv_work
);
1541 static inline bool ublk_queue_ready(struct ublk_queue
*ubq
)
1543 return ubq
->nr_io_ready
== ubq
->q_depth
;
1546 static void ublk_cancel_queue(struct ublk_queue
*ubq
)
1550 for (i
= 0; i
< ubq
->q_depth
; i
++)
1551 ublk_cancel_cmd(ubq
, &ubq
->ios
[i
], IO_URING_F_UNLOCKED
);
1554 /* Cancel all pending commands, must be called after del_gendisk() returns */
1555 static void ublk_cancel_dev(struct ublk_device
*ub
)
1559 for (i
= 0; i
< ub
->dev_info
.nr_hw_queues
; i
++)
1560 ublk_cancel_queue(ublk_get_queue(ub
, i
));
1563 static bool ublk_check_inflight_rq(struct request
*rq
, void *data
)
1567 if (blk_mq_request_started(rq
)) {
1574 static void ublk_wait_tagset_rqs_idle(struct ublk_device
*ub
)
1578 WARN_ON_ONCE(!blk_queue_quiesced(ub
->ub_disk
->queue
));
1581 blk_mq_tagset_busy_iter(&ub
->tag_set
,
1582 ublk_check_inflight_rq
, &idle
);
1585 msleep(UBLK_REQUEUE_DELAY_MS
);
1589 static void __ublk_quiesce_dev(struct ublk_device
*ub
)
1591 pr_devel("%s: quiesce ub: dev_id %d state %s\n",
1592 __func__
, ub
->dev_info
.dev_id
,
1593 ub
->dev_info
.state
== UBLK_S_DEV_LIVE
?
1594 "LIVE" : "QUIESCED");
1595 blk_mq_quiesce_queue(ub
->ub_disk
->queue
);
1596 ublk_wait_tagset_rqs_idle(ub
);
1597 ub
->dev_info
.state
= UBLK_S_DEV_QUIESCED
;
1600 static void ublk_unquiesce_dev(struct ublk_device
*ub
)
1604 pr_devel("%s: unquiesce ub: dev_id %d state %s\n",
1605 __func__
, ub
->dev_info
.dev_id
,
1606 ub
->dev_info
.state
== UBLK_S_DEV_LIVE
?
1607 "LIVE" : "QUIESCED");
1608 /* quiesce_work has run. We let requeued rqs be aborted
1609 * before running fallback_wq. "force_abort" must be seen
1610 * after request queue is unqiuesced. Then del_gendisk()
1613 for (i
= 0; i
< ub
->dev_info
.nr_hw_queues
; i
++)
1614 ublk_get_queue(ub
, i
)->force_abort
= true;
1616 blk_mq_unquiesce_queue(ub
->ub_disk
->queue
);
1617 /* We may have requeued some rqs in ublk_quiesce_queue() */
1618 blk_mq_kick_requeue_list(ub
->ub_disk
->queue
);
1621 static void ublk_stop_dev(struct ublk_device
*ub
)
1623 struct gendisk
*disk
;
1625 mutex_lock(&ub
->mutex
);
1626 if (ub
->dev_info
.state
== UBLK_S_DEV_DEAD
)
1628 if (ublk_nosrv_dev_should_queue_io(ub
)) {
1629 if (ub
->dev_info
.state
== UBLK_S_DEV_LIVE
)
1630 __ublk_quiesce_dev(ub
);
1631 ublk_unquiesce_dev(ub
);
1633 del_gendisk(ub
->ub_disk
);
1635 /* Sync with ublk_abort_queue() by holding the lock */
1636 spin_lock(&ub
->lock
);
1638 ub
->dev_info
.state
= UBLK_S_DEV_DEAD
;
1639 ub
->dev_info
.ublksrv_pid
= -1;
1641 spin_unlock(&ub
->lock
);
1644 mutex_unlock(&ub
->mutex
);
1645 ublk_cancel_dev(ub
);
1648 static void ublk_nosrv_work(struct work_struct
*work
)
1650 struct ublk_device
*ub
=
1651 container_of(work
, struct ublk_device
, nosrv_work
);
1654 if (ublk_nosrv_should_stop_dev(ub
)) {
1659 mutex_lock(&ub
->mutex
);
1660 if (ub
->dev_info
.state
!= UBLK_S_DEV_LIVE
)
1663 if (ublk_nosrv_dev_should_queue_io(ub
)) {
1664 __ublk_quiesce_dev(ub
);
1666 blk_mq_quiesce_queue(ub
->ub_disk
->queue
);
1667 ub
->dev_info
.state
= UBLK_S_DEV_FAIL_IO
;
1668 for (i
= 0; i
< ub
->dev_info
.nr_hw_queues
; i
++) {
1669 ublk_get_queue(ub
, i
)->fail_io
= true;
1671 blk_mq_unquiesce_queue(ub
->ub_disk
->queue
);
1675 mutex_unlock(&ub
->mutex
);
1676 ublk_cancel_dev(ub
);
1679 /* device can only be started after all IOs are ready */
1680 static void ublk_mark_io_ready(struct ublk_device
*ub
, struct ublk_queue
*ubq
)
1682 mutex_lock(&ub
->mutex
);
1684 if (ublk_queue_ready(ubq
)) {
1685 ubq
->ubq_daemon
= current
;
1686 get_task_struct(ubq
->ubq_daemon
);
1687 ub
->nr_queues_ready
++;
1689 if (capable(CAP_SYS_ADMIN
))
1690 ub
->nr_privileged_daemon
++;
1692 if (ub
->nr_queues_ready
== ub
->dev_info
.nr_hw_queues
)
1693 complete_all(&ub
->completion
);
1694 mutex_unlock(&ub
->mutex
);
1697 static void ublk_handle_need_get_data(struct ublk_device
*ub
, int q_id
,
1700 struct ublk_queue
*ubq
= ublk_get_queue(ub
, q_id
);
1701 struct request
*req
= blk_mq_tag_to_rq(ub
->tag_set
.tags
[q_id
], tag
);
1703 ublk_queue_cmd(ubq
, req
);
1706 static inline int ublk_check_cmd_op(u32 cmd_op
)
1708 u32 ioc_type
= _IOC_TYPE(cmd_op
);
1710 if (!IS_ENABLED(CONFIG_BLKDEV_UBLK_LEGACY_OPCODES
) && ioc_type
!= 'u')
1713 if (ioc_type
!= 'u' && ioc_type
!= 0)
1719 static inline void ublk_fill_io_cmd(struct ublk_io
*io
,
1720 struct io_uring_cmd
*cmd
, unsigned long buf_addr
)
1723 io
->flags
|= UBLK_IO_FLAG_ACTIVE
;
1724 io
->addr
= buf_addr
;
1727 static inline void ublk_prep_cancel(struct io_uring_cmd
*cmd
,
1728 unsigned int issue_flags
,
1729 struct ublk_queue
*ubq
, unsigned int tag
)
1731 struct ublk_uring_cmd_pdu
*pdu
= ublk_get_uring_cmd_pdu(cmd
);
1734 * Safe to refer to @ubq since ublk_queue won't be died until its
1735 * commands are completed
1739 io_uring_cmd_mark_cancelable(cmd
, issue_flags
);
1742 static int __ublk_ch_uring_cmd(struct io_uring_cmd
*cmd
,
1743 unsigned int issue_flags
,
1744 const struct ublksrv_io_cmd
*ub_cmd
)
1746 struct ublk_device
*ub
= cmd
->file
->private_data
;
1747 struct ublk_queue
*ubq
;
1749 u32 cmd_op
= cmd
->cmd_op
;
1750 unsigned tag
= ub_cmd
->tag
;
1752 struct request
*req
;
1754 pr_devel("%s: received: cmd op %d queue %d tag %d result %d\n",
1755 __func__
, cmd
->cmd_op
, ub_cmd
->q_id
, tag
,
1758 if (ub_cmd
->q_id
>= ub
->dev_info
.nr_hw_queues
)
1761 ubq
= ublk_get_queue(ub
, ub_cmd
->q_id
);
1762 if (!ubq
|| ub_cmd
->q_id
!= ubq
->q_id
)
1765 if (ubq
->ubq_daemon
&& ubq
->ubq_daemon
!= current
)
1768 if (tag
>= ubq
->q_depth
)
1771 io
= &ubq
->ios
[tag
];
1773 /* there is pending io cmd, something must be wrong */
1774 if (io
->flags
& UBLK_IO_FLAG_ACTIVE
) {
1780 * ensure that the user issues UBLK_IO_NEED_GET_DATA
1781 * iff the driver have set the UBLK_IO_FLAG_NEED_GET_DATA.
1783 if ((!!(io
->flags
& UBLK_IO_FLAG_NEED_GET_DATA
))
1784 ^ (_IOC_NR(cmd_op
) == UBLK_IO_NEED_GET_DATA
))
1787 ret
= ublk_check_cmd_op(cmd_op
);
1792 switch (_IOC_NR(cmd_op
)) {
1793 case UBLK_IO_FETCH_REQ
:
1794 /* UBLK_IO_FETCH_REQ is only allowed before queue is setup */
1795 if (ublk_queue_ready(ubq
)) {
1800 * The io is being handled by server, so COMMIT_RQ is expected
1801 * instead of FETCH_REQ
1803 if (io
->flags
& UBLK_IO_FLAG_OWNED_BY_SRV
)
1806 if (!ublk_support_user_copy(ubq
)) {
1808 * FETCH_RQ has to provide IO buffer if NEED GET
1809 * DATA is not enabled
1811 if (!ub_cmd
->addr
&& !ublk_need_get_data(ubq
))
1813 } else if (ub_cmd
->addr
) {
1814 /* User copy requires addr to be unset */
1819 ublk_fill_io_cmd(io
, cmd
, ub_cmd
->addr
);
1820 ublk_mark_io_ready(ub
, ubq
);
1822 case UBLK_IO_COMMIT_AND_FETCH_REQ
:
1823 req
= blk_mq_tag_to_rq(ub
->tag_set
.tags
[ub_cmd
->q_id
], tag
);
1825 if (!(io
->flags
& UBLK_IO_FLAG_OWNED_BY_SRV
))
1828 if (!ublk_support_user_copy(ubq
)) {
1830 * COMMIT_AND_FETCH_REQ has to provide IO buffer if
1831 * NEED GET DATA is not enabled or it is Read IO.
1833 if (!ub_cmd
->addr
&& (!ublk_need_get_data(ubq
) ||
1834 req_op(req
) == REQ_OP_READ
))
1836 } else if (req_op(req
) != REQ_OP_ZONE_APPEND
&& ub_cmd
->addr
) {
1838 * User copy requires addr to be unset when command is
1845 ublk_fill_io_cmd(io
, cmd
, ub_cmd
->addr
);
1846 ublk_commit_completion(ub
, ub_cmd
);
1848 case UBLK_IO_NEED_GET_DATA
:
1849 if (!(io
->flags
& UBLK_IO_FLAG_OWNED_BY_SRV
))
1851 ublk_fill_io_cmd(io
, cmd
, ub_cmd
->addr
);
1852 ublk_handle_need_get_data(ub
, ub_cmd
->q_id
, ub_cmd
->tag
);
1857 ublk_prep_cancel(cmd
, issue_flags
, ubq
, tag
);
1858 return -EIOCBQUEUED
;
1861 io_uring_cmd_done(cmd
, ret
, 0, issue_flags
);
1862 pr_devel("%s: complete: cmd op %d, tag %d ret %x io_flags %x\n",
1863 __func__
, cmd_op
, tag
, ret
, io
->flags
);
1864 return -EIOCBQUEUED
;
1867 static inline struct request
*__ublk_check_and_get_req(struct ublk_device
*ub
,
1868 struct ublk_queue
*ubq
, int tag
, size_t offset
)
1870 struct request
*req
;
1872 if (!ublk_need_req_ref(ubq
))
1875 req
= blk_mq_tag_to_rq(ub
->tag_set
.tags
[ubq
->q_id
], tag
);
1879 if (!ublk_get_req_ref(ubq
, req
))
1882 if (unlikely(!blk_mq_request_started(req
) || req
->tag
!= tag
))
1885 if (!ublk_rq_has_data(req
))
1888 if (offset
> blk_rq_bytes(req
))
1893 ublk_put_req_ref(ubq
, req
);
1897 static inline int ublk_ch_uring_cmd_local(struct io_uring_cmd
*cmd
,
1898 unsigned int issue_flags
)
1901 * Not necessary for async retry, but let's keep it simple and always
1902 * copy the values to avoid any potential reuse.
1904 const struct ublksrv_io_cmd
*ub_src
= io_uring_sqe_cmd(cmd
->sqe
);
1905 const struct ublksrv_io_cmd ub_cmd
= {
1906 .q_id
= READ_ONCE(ub_src
->q_id
),
1907 .tag
= READ_ONCE(ub_src
->tag
),
1908 .result
= READ_ONCE(ub_src
->result
),
1909 .addr
= READ_ONCE(ub_src
->addr
)
1912 WARN_ON_ONCE(issue_flags
& IO_URING_F_UNLOCKED
);
1914 return __ublk_ch_uring_cmd(cmd
, issue_flags
, &ub_cmd
);
1917 static void ublk_ch_uring_cmd_cb(struct io_uring_cmd
*cmd
,
1918 unsigned int issue_flags
)
1920 ublk_ch_uring_cmd_local(cmd
, issue_flags
);
1923 static int ublk_ch_uring_cmd(struct io_uring_cmd
*cmd
, unsigned int issue_flags
)
1925 if (unlikely(issue_flags
& IO_URING_F_CANCEL
)) {
1926 ublk_uring_cmd_cancel_fn(cmd
, issue_flags
);
1930 /* well-implemented server won't run into unlocked */
1931 if (unlikely(issue_flags
& IO_URING_F_UNLOCKED
)) {
1932 io_uring_cmd_complete_in_task(cmd
, ublk_ch_uring_cmd_cb
);
1933 return -EIOCBQUEUED
;
1936 return ublk_ch_uring_cmd_local(cmd
, issue_flags
);
1939 static inline bool ublk_check_ubuf_dir(const struct request
*req
,
1942 /* copy ubuf to request pages */
1943 if ((req_op(req
) == REQ_OP_READ
|| req_op(req
) == REQ_OP_DRV_IN
) &&
1944 ubuf_dir
== ITER_SOURCE
)
1947 /* copy request pages to ubuf */
1948 if ((req_op(req
) == REQ_OP_WRITE
||
1949 req_op(req
) == REQ_OP_ZONE_APPEND
) &&
1950 ubuf_dir
== ITER_DEST
)
1956 static struct request
*ublk_check_and_get_req(struct kiocb
*iocb
,
1957 struct iov_iter
*iter
, size_t *off
, int dir
)
1959 struct ublk_device
*ub
= iocb
->ki_filp
->private_data
;
1960 struct ublk_queue
*ubq
;
1961 struct request
*req
;
1966 return ERR_PTR(-EACCES
);
1968 if (!user_backed_iter(iter
))
1969 return ERR_PTR(-EACCES
);
1971 if (ub
->dev_info
.state
== UBLK_S_DEV_DEAD
)
1972 return ERR_PTR(-EACCES
);
1974 tag
= ublk_pos_to_tag(iocb
->ki_pos
);
1975 q_id
= ublk_pos_to_hwq(iocb
->ki_pos
);
1976 buf_off
= ublk_pos_to_buf_off(iocb
->ki_pos
);
1978 if (q_id
>= ub
->dev_info
.nr_hw_queues
)
1979 return ERR_PTR(-EINVAL
);
1981 ubq
= ublk_get_queue(ub
, q_id
);
1983 return ERR_PTR(-EINVAL
);
1985 if (tag
>= ubq
->q_depth
)
1986 return ERR_PTR(-EINVAL
);
1988 req
= __ublk_check_and_get_req(ub
, ubq
, tag
, buf_off
);
1990 return ERR_PTR(-EINVAL
);
1992 if (!req
->mq_hctx
|| !req
->mq_hctx
->driver_data
)
1995 if (!ublk_check_ubuf_dir(req
, dir
))
2001 ublk_put_req_ref(ubq
, req
);
2002 return ERR_PTR(-EACCES
);
2005 static ssize_t
ublk_ch_read_iter(struct kiocb
*iocb
, struct iov_iter
*to
)
2007 struct ublk_queue
*ubq
;
2008 struct request
*req
;
2012 req
= ublk_check_and_get_req(iocb
, to
, &buf_off
, ITER_DEST
);
2014 return PTR_ERR(req
);
2016 ret
= ublk_copy_user_pages(req
, buf_off
, to
, ITER_DEST
);
2017 ubq
= req
->mq_hctx
->driver_data
;
2018 ublk_put_req_ref(ubq
, req
);
2023 static ssize_t
ublk_ch_write_iter(struct kiocb
*iocb
, struct iov_iter
*from
)
2025 struct ublk_queue
*ubq
;
2026 struct request
*req
;
2030 req
= ublk_check_and_get_req(iocb
, from
, &buf_off
, ITER_SOURCE
);
2032 return PTR_ERR(req
);
2034 ret
= ublk_copy_user_pages(req
, buf_off
, from
, ITER_SOURCE
);
2035 ubq
= req
->mq_hctx
->driver_data
;
2036 ublk_put_req_ref(ubq
, req
);
2041 static const struct file_operations ublk_ch_fops
= {
2042 .owner
= THIS_MODULE
,
2043 .open
= ublk_ch_open
,
2044 .release
= ublk_ch_release
,
2045 .read_iter
= ublk_ch_read_iter
,
2046 .write_iter
= ublk_ch_write_iter
,
2047 .uring_cmd
= ublk_ch_uring_cmd
,
2048 .mmap
= ublk_ch_mmap
,
2051 static void ublk_deinit_queue(struct ublk_device
*ub
, int q_id
)
2053 int size
= ublk_queue_cmd_buf_size(ub
, q_id
);
2054 struct ublk_queue
*ubq
= ublk_get_queue(ub
, q_id
);
2056 if (ubq
->ubq_daemon
)
2057 put_task_struct(ubq
->ubq_daemon
);
2058 if (ubq
->io_cmd_buf
)
2059 free_pages((unsigned long)ubq
->io_cmd_buf
, get_order(size
));
2062 static int ublk_init_queue(struct ublk_device
*ub
, int q_id
)
2064 struct ublk_queue
*ubq
= ublk_get_queue(ub
, q_id
);
2065 gfp_t gfp_flags
= GFP_KERNEL
| __GFP_ZERO
;
2069 spin_lock_init(&ubq
->cancel_lock
);
2070 ubq
->flags
= ub
->dev_info
.flags
;
2072 ubq
->q_depth
= ub
->dev_info
.queue_depth
;
2073 size
= ublk_queue_cmd_buf_size(ub
, q_id
);
2075 ptr
= (void *) __get_free_pages(gfp_flags
, get_order(size
));
2079 ubq
->io_cmd_buf
= ptr
;
2084 static void ublk_deinit_queues(struct ublk_device
*ub
)
2086 int nr_queues
= ub
->dev_info
.nr_hw_queues
;
2092 for (i
= 0; i
< nr_queues
; i
++)
2093 ublk_deinit_queue(ub
, i
);
2094 kfree(ub
->__queues
);
2097 static int ublk_init_queues(struct ublk_device
*ub
)
2099 int nr_queues
= ub
->dev_info
.nr_hw_queues
;
2100 int depth
= ub
->dev_info
.queue_depth
;
2101 int ubq_size
= sizeof(struct ublk_queue
) + depth
* sizeof(struct ublk_io
);
2102 int i
, ret
= -ENOMEM
;
2104 ub
->queue_size
= ubq_size
;
2105 ub
->__queues
= kcalloc(nr_queues
, ubq_size
, GFP_KERNEL
);
2109 for (i
= 0; i
< nr_queues
; i
++) {
2110 if (ublk_init_queue(ub
, i
))
2114 init_completion(&ub
->completion
);
2118 ublk_deinit_queues(ub
);
2122 static int ublk_alloc_dev_number(struct ublk_device
*ub
, int idx
)
2127 spin_lock(&ublk_idr_lock
);
2128 /* allocate id, if @id >= 0, we're requesting that specific id */
2130 err
= idr_alloc(&ublk_index_idr
, ub
, i
, i
+ 1, GFP_NOWAIT
);
2134 err
= idr_alloc(&ublk_index_idr
, ub
, 0, UBLK_MAX_UBLKS
,
2137 spin_unlock(&ublk_idr_lock
);
2140 ub
->ub_number
= err
;
2145 static void ublk_free_dev_number(struct ublk_device
*ub
)
2147 spin_lock(&ublk_idr_lock
);
2148 idr_remove(&ublk_index_idr
, ub
->ub_number
);
2149 wake_up_all(&ublk_idr_wq
);
2150 spin_unlock(&ublk_idr_lock
);
2153 static void ublk_cdev_rel(struct device
*dev
)
2155 struct ublk_device
*ub
= container_of(dev
, struct ublk_device
, cdev_dev
);
2157 blk_mq_free_tag_set(&ub
->tag_set
);
2158 ublk_deinit_queues(ub
);
2159 ublk_free_dev_number(ub
);
2160 mutex_destroy(&ub
->mutex
);
2164 static int ublk_add_chdev(struct ublk_device
*ub
)
2166 struct device
*dev
= &ub
->cdev_dev
;
2167 int minor
= ub
->ub_number
;
2170 dev
->parent
= ublk_misc
.this_device
;
2171 dev
->devt
= MKDEV(MAJOR(ublk_chr_devt
), minor
);
2172 dev
->class = &ublk_chr_class
;
2173 dev
->release
= ublk_cdev_rel
;
2174 device_initialize(dev
);
2176 ret
= dev_set_name(dev
, "ublkc%d", minor
);
2180 cdev_init(&ub
->cdev
, &ublk_ch_fops
);
2181 ret
= cdev_device_add(&ub
->cdev
, dev
);
2192 /* align max io buffer size with PAGE_SIZE */
2193 static void ublk_align_max_io_size(struct ublk_device
*ub
)
2195 unsigned int max_io_bytes
= ub
->dev_info
.max_io_buf_bytes
;
2197 ub
->dev_info
.max_io_buf_bytes
=
2198 round_down(max_io_bytes
, PAGE_SIZE
);
2201 static int ublk_add_tag_set(struct ublk_device
*ub
)
2203 ub
->tag_set
.ops
= &ublk_mq_ops
;
2204 ub
->tag_set
.nr_hw_queues
= ub
->dev_info
.nr_hw_queues
;
2205 ub
->tag_set
.queue_depth
= ub
->dev_info
.queue_depth
;
2206 ub
->tag_set
.numa_node
= NUMA_NO_NODE
;
2207 ub
->tag_set
.cmd_size
= sizeof(struct ublk_rq_data
);
2208 ub
->tag_set
.flags
= BLK_MQ_F_SHOULD_MERGE
;
2209 ub
->tag_set
.driver_data
= ub
;
2210 return blk_mq_alloc_tag_set(&ub
->tag_set
);
2213 static void ublk_remove(struct ublk_device
*ub
)
2216 cancel_work_sync(&ub
->nosrv_work
);
2217 cdev_device_del(&ub
->cdev
, &ub
->cdev_dev
);
2218 ublk_put_device(ub
);
2222 static struct ublk_device
*ublk_get_device_from_id(int idx
)
2224 struct ublk_device
*ub
= NULL
;
2229 spin_lock(&ublk_idr_lock
);
2230 ub
= idr_find(&ublk_index_idr
, idx
);
2232 ub
= ublk_get_device(ub
);
2233 spin_unlock(&ublk_idr_lock
);
2238 static int ublk_ctrl_start_dev(struct ublk_device
*ub
, struct io_uring_cmd
*cmd
)
2240 const struct ublksrv_ctrl_cmd
*header
= io_uring_sqe_cmd(cmd
->sqe
);
2241 const struct ublk_param_basic
*p
= &ub
->params
.basic
;
2242 int ublksrv_pid
= (int)header
->data
[0];
2243 struct queue_limits lim
= {
2244 .logical_block_size
= 1 << p
->logical_bs_shift
,
2245 .physical_block_size
= 1 << p
->physical_bs_shift
,
2246 .io_min
= 1 << p
->io_min_shift
,
2247 .io_opt
= 1 << p
->io_opt_shift
,
2248 .max_hw_sectors
= p
->max_sectors
,
2249 .chunk_sectors
= p
->chunk_sectors
,
2250 .virt_boundary_mask
= p
->virt_boundary_mask
,
2251 .max_segments
= USHRT_MAX
,
2252 .max_segment_size
= UINT_MAX
,
2255 struct gendisk
*disk
;
2258 if (ublksrv_pid
<= 0)
2260 if (!(ub
->params
.types
& UBLK_PARAM_TYPE_BASIC
))
2263 if (ub
->params
.types
& UBLK_PARAM_TYPE_DISCARD
) {
2264 const struct ublk_param_discard
*pd
= &ub
->params
.discard
;
2266 lim
.discard_alignment
= pd
->discard_alignment
;
2267 lim
.discard_granularity
= pd
->discard_granularity
;
2268 lim
.max_hw_discard_sectors
= pd
->max_discard_sectors
;
2269 lim
.max_write_zeroes_sectors
= pd
->max_write_zeroes_sectors
;
2270 lim
.max_discard_segments
= pd
->max_discard_segments
;
2273 if (ub
->params
.types
& UBLK_PARAM_TYPE_ZONED
) {
2274 const struct ublk_param_zoned
*p
= &ub
->params
.zoned
;
2276 if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED
))
2279 lim
.features
|= BLK_FEAT_ZONED
;
2280 lim
.max_active_zones
= p
->max_active_zones
;
2281 lim
.max_open_zones
= p
->max_open_zones
;
2282 lim
.max_hw_zone_append_sectors
= p
->max_zone_append_sectors
;
2285 if (ub
->params
.basic
.attrs
& UBLK_ATTR_VOLATILE_CACHE
) {
2286 lim
.features
|= BLK_FEAT_WRITE_CACHE
;
2287 if (ub
->params
.basic
.attrs
& UBLK_ATTR_FUA
)
2288 lim
.features
|= BLK_FEAT_FUA
;
2291 if (ub
->params
.basic
.attrs
& UBLK_ATTR_ROTATIONAL
)
2292 lim
.features
|= BLK_FEAT_ROTATIONAL
;
2294 if (wait_for_completion_interruptible(&ub
->completion
) != 0)
2297 mutex_lock(&ub
->mutex
);
2298 if (ub
->dev_info
.state
== UBLK_S_DEV_LIVE
||
2299 test_bit(UB_STATE_USED
, &ub
->state
)) {
2304 disk
= blk_mq_alloc_disk(&ub
->tag_set
, &lim
, NULL
);
2306 ret
= PTR_ERR(disk
);
2309 sprintf(disk
->disk_name
, "ublkb%d", ub
->ub_number
);
2310 disk
->fops
= &ub_fops
;
2311 disk
->private_data
= ub
;
2313 ub
->dev_info
.ublksrv_pid
= ublksrv_pid
;
2316 ublk_apply_params(ub
);
2318 /* don't probe partitions if any one ubq daemon is un-trusted */
2319 if (ub
->nr_privileged_daemon
!= ub
->nr_queues_ready
)
2320 set_bit(GD_SUPPRESS_PART_SCAN
, &disk
->state
);
2322 ublk_get_device(ub
);
2323 ub
->dev_info
.state
= UBLK_S_DEV_LIVE
;
2325 if (ublk_dev_is_zoned(ub
)) {
2326 ret
= ublk_revalidate_disk_zones(ub
);
2331 ret
= add_disk(disk
);
2335 set_bit(UB_STATE_USED
, &ub
->state
);
2339 ub
->dev_info
.state
= UBLK_S_DEV_DEAD
;
2340 ublk_put_device(ub
);
2345 mutex_unlock(&ub
->mutex
);
2349 static int ublk_ctrl_get_queue_affinity(struct ublk_device
*ub
,
2350 struct io_uring_cmd
*cmd
)
2352 const struct ublksrv_ctrl_cmd
*header
= io_uring_sqe_cmd(cmd
->sqe
);
2353 void __user
*argp
= (void __user
*)(unsigned long)header
->addr
;
2354 cpumask_var_t cpumask
;
2355 unsigned long queue
;
2356 unsigned int retlen
;
2360 if (header
->len
* BITS_PER_BYTE
< nr_cpu_ids
)
2362 if (header
->len
& (sizeof(unsigned long)-1))
2367 queue
= header
->data
[0];
2368 if (queue
>= ub
->dev_info
.nr_hw_queues
)
2371 if (!zalloc_cpumask_var(&cpumask
, GFP_KERNEL
))
2374 for_each_possible_cpu(i
) {
2375 if (ub
->tag_set
.map
[HCTX_TYPE_DEFAULT
].mq_map
[i
] == queue
)
2376 cpumask_set_cpu(i
, cpumask
);
2380 retlen
= min_t(unsigned short, header
->len
, cpumask_size());
2381 if (copy_to_user(argp
, cpumask
, retlen
))
2382 goto out_free_cpumask
;
2383 if (retlen
!= header
->len
&&
2384 clear_user(argp
+ retlen
, header
->len
- retlen
))
2385 goto out_free_cpumask
;
2389 free_cpumask_var(cpumask
);
2393 static inline void ublk_dump_dev_info(struct ublksrv_ctrl_dev_info
*info
)
2395 pr_devel("%s: dev id %d flags %llx\n", __func__
,
2396 info
->dev_id
, info
->flags
);
2397 pr_devel("\t nr_hw_queues %d queue_depth %d\n",
2398 info
->nr_hw_queues
, info
->queue_depth
);
2401 static int ublk_ctrl_add_dev(struct io_uring_cmd
*cmd
)
2403 const struct ublksrv_ctrl_cmd
*header
= io_uring_sqe_cmd(cmd
->sqe
);
2404 void __user
*argp
= (void __user
*)(unsigned long)header
->addr
;
2405 struct ublksrv_ctrl_dev_info info
;
2406 struct ublk_device
*ub
;
2409 if (header
->len
< sizeof(info
) || !header
->addr
)
2411 if (header
->queue_id
!= (u16
)-1) {
2412 pr_warn("%s: queue_id is wrong %x\n",
2413 __func__
, header
->queue_id
);
2417 if (copy_from_user(&info
, argp
, sizeof(info
)))
2420 if (capable(CAP_SYS_ADMIN
))
2421 info
.flags
&= ~UBLK_F_UNPRIVILEGED_DEV
;
2422 else if (!(info
.flags
& UBLK_F_UNPRIVILEGED_DEV
))
2425 /* forbid nonsense combinations of recovery flags */
2426 switch (info
.flags
& UBLK_F_ALL_RECOVERY_FLAGS
) {
2428 case UBLK_F_USER_RECOVERY
:
2429 case (UBLK_F_USER_RECOVERY
| UBLK_F_USER_RECOVERY_REISSUE
):
2430 case (UBLK_F_USER_RECOVERY
| UBLK_F_USER_RECOVERY_FAIL_IO
):
2433 pr_warn("%s: invalid recovery flags %llx\n", __func__
,
2434 info
.flags
& UBLK_F_ALL_RECOVERY_FLAGS
);
2439 * unprivileged device can't be trusted, but RECOVERY and
2440 * RECOVERY_REISSUE still may hang error handling, so can't
2441 * support recovery features for unprivileged ublk now
2443 * TODO: provide forward progress for RECOVERY handler, so that
2444 * unprivileged device can benefit from it
2446 if (info
.flags
& UBLK_F_UNPRIVILEGED_DEV
) {
2447 info
.flags
&= ~(UBLK_F_USER_RECOVERY_REISSUE
|
2448 UBLK_F_USER_RECOVERY
);
2451 * For USER_COPY, we depends on userspace to fill request
2452 * buffer by pwrite() to ublk char device, which can't be
2453 * used for unprivileged device
2455 if (info
.flags
& UBLK_F_USER_COPY
)
2459 /* the created device is always owned by current user */
2460 ublk_store_owner_uid_gid(&info
.owner_uid
, &info
.owner_gid
);
2462 if (header
->dev_id
!= info
.dev_id
) {
2463 pr_warn("%s: dev id not match %u %u\n",
2464 __func__
, header
->dev_id
, info
.dev_id
);
2468 if (header
->dev_id
!= U32_MAX
&& header
->dev_id
>= UBLK_MAX_UBLKS
) {
2469 pr_warn("%s: dev id is too large. Max supported is %d\n",
2470 __func__
, UBLK_MAX_UBLKS
- 1);
2474 ublk_dump_dev_info(&info
);
2476 ret
= mutex_lock_killable(&ublk_ctl_mutex
);
2481 if (ublks_added
>= ublks_max
)
2485 ub
= kzalloc(sizeof(*ub
), GFP_KERNEL
);
2488 mutex_init(&ub
->mutex
);
2489 spin_lock_init(&ub
->lock
);
2490 INIT_WORK(&ub
->nosrv_work
, ublk_nosrv_work
);
2492 ret
= ublk_alloc_dev_number(ub
, header
->dev_id
);
2496 memcpy(&ub
->dev_info
, &info
, sizeof(info
));
2498 /* update device id */
2499 ub
->dev_info
.dev_id
= ub
->ub_number
;
2502 * 64bit flags will be copied back to userspace as feature
2503 * negotiation result, so have to clear flags which driver
2504 * doesn't support yet, then userspace can get correct flags
2505 * (features) to handle.
2507 ub
->dev_info
.flags
&= UBLK_F_ALL
;
2509 ub
->dev_info
.flags
|= UBLK_F_CMD_IOCTL_ENCODE
|
2510 UBLK_F_URING_CMD_COMP_IN_TASK
;
2512 /* GET_DATA isn't needed any more with USER_COPY */
2513 if (ublk_dev_is_user_copy(ub
))
2514 ub
->dev_info
.flags
&= ~UBLK_F_NEED_GET_DATA
;
2516 /* Zoned storage support requires user copy feature */
2517 if (ublk_dev_is_zoned(ub
) &&
2518 (!IS_ENABLED(CONFIG_BLK_DEV_ZONED
) || !ublk_dev_is_user_copy(ub
))) {
2520 goto out_free_dev_number
;
2523 /* We are not ready to support zero copy */
2524 ub
->dev_info
.flags
&= ~UBLK_F_SUPPORT_ZERO_COPY
;
2526 ub
->dev_info
.nr_hw_queues
= min_t(unsigned int,
2527 ub
->dev_info
.nr_hw_queues
, nr_cpu_ids
);
2528 ublk_align_max_io_size(ub
);
2530 ret
= ublk_init_queues(ub
);
2532 goto out_free_dev_number
;
2534 ret
= ublk_add_tag_set(ub
);
2536 goto out_deinit_queues
;
2539 if (copy_to_user(argp
, &ub
->dev_info
, sizeof(info
)))
2540 goto out_free_tag_set
;
2543 * Add the char dev so that ublksrv daemon can be setup.
2544 * ublk_add_chdev() will cleanup everything if it fails.
2546 ret
= ublk_add_chdev(ub
);
2550 blk_mq_free_tag_set(&ub
->tag_set
);
2552 ublk_deinit_queues(ub
);
2553 out_free_dev_number
:
2554 ublk_free_dev_number(ub
);
2556 mutex_destroy(&ub
->mutex
);
2559 mutex_unlock(&ublk_ctl_mutex
);
2563 static inline bool ublk_idr_freed(int id
)
2567 spin_lock(&ublk_idr_lock
);
2568 ptr
= idr_find(&ublk_index_idr
, id
);
2569 spin_unlock(&ublk_idr_lock
);
2574 static int ublk_ctrl_del_dev(struct ublk_device
**p_ub
, bool wait
)
2576 struct ublk_device
*ub
= *p_ub
;
2577 int idx
= ub
->ub_number
;
2580 ret
= mutex_lock_killable(&ublk_ctl_mutex
);
2584 if (!test_bit(UB_STATE_DELETED
, &ub
->state
)) {
2586 set_bit(UB_STATE_DELETED
, &ub
->state
);
2589 /* Mark the reference as consumed */
2591 ublk_put_device(ub
);
2592 mutex_unlock(&ublk_ctl_mutex
);
2595 * Wait until the idr is removed, then it can be reused after
2596 * DEL_DEV command is returned.
2598 * If we returns because of user interrupt, future delete command
2601 * - the device number isn't freed, this device won't or needn't
2602 * be deleted again, since UB_STATE_DELETED is set, and device
2603 * will be released after the last reference is dropped
2605 * - the device number is freed already, we will not find this
2606 * device via ublk_get_device_from_id()
2608 if (wait
&& wait_event_interruptible(ublk_idr_wq
, ublk_idr_freed(idx
)))
2613 static inline void ublk_ctrl_cmd_dump(struct io_uring_cmd
*cmd
)
2615 const struct ublksrv_ctrl_cmd
*header
= io_uring_sqe_cmd(cmd
->sqe
);
2617 pr_devel("%s: cmd_op %x, dev id %d qid %d data %llx buf %llx len %u\n",
2618 __func__
, cmd
->cmd_op
, header
->dev_id
, header
->queue_id
,
2619 header
->data
[0], header
->addr
, header
->len
);
2622 static int ublk_ctrl_stop_dev(struct ublk_device
*ub
)
2625 cancel_work_sync(&ub
->nosrv_work
);
2629 static int ublk_ctrl_get_dev_info(struct ublk_device
*ub
,
2630 struct io_uring_cmd
*cmd
)
2632 const struct ublksrv_ctrl_cmd
*header
= io_uring_sqe_cmd(cmd
->sqe
);
2633 void __user
*argp
= (void __user
*)(unsigned long)header
->addr
;
2635 if (header
->len
< sizeof(struct ublksrv_ctrl_dev_info
) || !header
->addr
)
2638 if (copy_to_user(argp
, &ub
->dev_info
, sizeof(ub
->dev_info
)))
2644 /* TYPE_DEVT is readonly, so fill it up before returning to userspace */
2645 static void ublk_ctrl_fill_params_devt(struct ublk_device
*ub
)
2647 ub
->params
.devt
.char_major
= MAJOR(ub
->cdev_dev
.devt
);
2648 ub
->params
.devt
.char_minor
= MINOR(ub
->cdev_dev
.devt
);
2651 ub
->params
.devt
.disk_major
= MAJOR(disk_devt(ub
->ub_disk
));
2652 ub
->params
.devt
.disk_minor
= MINOR(disk_devt(ub
->ub_disk
));
2654 ub
->params
.devt
.disk_major
= 0;
2655 ub
->params
.devt
.disk_minor
= 0;
2657 ub
->params
.types
|= UBLK_PARAM_TYPE_DEVT
;
2660 static int ublk_ctrl_get_params(struct ublk_device
*ub
,
2661 struct io_uring_cmd
*cmd
)
2663 const struct ublksrv_ctrl_cmd
*header
= io_uring_sqe_cmd(cmd
->sqe
);
2664 void __user
*argp
= (void __user
*)(unsigned long)header
->addr
;
2665 struct ublk_params_header ph
;
2668 if (header
->len
<= sizeof(ph
) || !header
->addr
)
2671 if (copy_from_user(&ph
, argp
, sizeof(ph
)))
2674 if (ph
.len
> header
->len
|| !ph
.len
)
2677 if (ph
.len
> sizeof(struct ublk_params
))
2678 ph
.len
= sizeof(struct ublk_params
);
2680 mutex_lock(&ub
->mutex
);
2681 ublk_ctrl_fill_params_devt(ub
);
2682 if (copy_to_user(argp
, &ub
->params
, ph
.len
))
2686 mutex_unlock(&ub
->mutex
);
2691 static int ublk_ctrl_set_params(struct ublk_device
*ub
,
2692 struct io_uring_cmd
*cmd
)
2694 const struct ublksrv_ctrl_cmd
*header
= io_uring_sqe_cmd(cmd
->sqe
);
2695 void __user
*argp
= (void __user
*)(unsigned long)header
->addr
;
2696 struct ublk_params_header ph
;
2699 if (header
->len
<= sizeof(ph
) || !header
->addr
)
2702 if (copy_from_user(&ph
, argp
, sizeof(ph
)))
2705 if (ph
.len
> header
->len
|| !ph
.len
|| !ph
.types
)
2708 if (ph
.len
> sizeof(struct ublk_params
))
2709 ph
.len
= sizeof(struct ublk_params
);
2711 /* parameters can only be changed when device isn't live */
2712 mutex_lock(&ub
->mutex
);
2713 if (ub
->dev_info
.state
== UBLK_S_DEV_LIVE
) {
2715 } else if (copy_from_user(&ub
->params
, argp
, ph
.len
)) {
2718 /* clear all we don't support yet */
2719 ub
->params
.types
&= UBLK_PARAM_TYPE_ALL
;
2720 ret
= ublk_validate_params(ub
);
2722 ub
->params
.types
= 0;
2724 mutex_unlock(&ub
->mutex
);
2729 static void ublk_queue_reinit(struct ublk_device
*ub
, struct ublk_queue
*ubq
)
2733 WARN_ON_ONCE(!(ubq
->ubq_daemon
&& ubq_daemon_is_dying(ubq
)));
2735 /* All old ioucmds have to be completed */
2736 ubq
->nr_io_ready
= 0;
2737 /* old daemon is PF_EXITING, put it now */
2738 put_task_struct(ubq
->ubq_daemon
);
2739 /* We have to reset it to NULL, otherwise ub won't accept new FETCH_REQ */
2740 ubq
->ubq_daemon
= NULL
;
2741 ubq
->timeout
= false;
2742 ubq
->canceling
= false;
2744 for (i
= 0; i
< ubq
->q_depth
; i
++) {
2745 struct ublk_io
*io
= &ubq
->ios
[i
];
2747 /* forget everything now and be ready for new FETCH_REQ */
2754 static int ublk_ctrl_start_recovery(struct ublk_device
*ub
,
2755 struct io_uring_cmd
*cmd
)
2757 const struct ublksrv_ctrl_cmd
*header
= io_uring_sqe_cmd(cmd
->sqe
);
2761 mutex_lock(&ub
->mutex
);
2762 if (ublk_nosrv_should_stop_dev(ub
))
2764 if (!ub
->nr_queues_ready
)
2767 * START_RECOVERY is only allowd after:
2769 * (1) UB_STATE_OPEN is not set, which means the dying process is exited
2770 * and related io_uring ctx is freed so file struct of /dev/ublkcX is
2773 * and one of the following holds
2775 * (2) UBLK_S_DEV_QUIESCED is set, which means the quiesce_work:
2776 * (a)has quiesced request queue
2777 * (b)has requeued every inflight rqs whose io_flags is ACTIVE
2778 * (c)has requeued/aborted every inflight rqs whose io_flags is NOT ACTIVE
2779 * (d)has completed/camceled all ioucmds owned by ther dying process
2781 * (3) UBLK_S_DEV_FAIL_IO is set, which means the queue is not
2782 * quiesced, but all I/O is being immediately errored
2784 if (test_bit(UB_STATE_OPEN
, &ub
->state
) || !ublk_dev_in_recoverable_state(ub
)) {
2788 pr_devel("%s: start recovery for dev id %d.\n", __func__
, header
->dev_id
);
2789 for (i
= 0; i
< ub
->dev_info
.nr_hw_queues
; i
++)
2790 ublk_queue_reinit(ub
, ublk_get_queue(ub
, i
));
2791 /* set to NULL, otherwise new ubq_daemon cannot mmap the io_cmd_buf */
2793 ub
->nr_queues_ready
= 0;
2794 ub
->nr_privileged_daemon
= 0;
2795 init_completion(&ub
->completion
);
2798 mutex_unlock(&ub
->mutex
);
2802 static int ublk_ctrl_end_recovery(struct ublk_device
*ub
,
2803 struct io_uring_cmd
*cmd
)
2805 const struct ublksrv_ctrl_cmd
*header
= io_uring_sqe_cmd(cmd
->sqe
);
2806 int ublksrv_pid
= (int)header
->data
[0];
2810 pr_devel("%s: Waiting for new ubq_daemons(nr: %d) are ready, dev id %d...\n",
2811 __func__
, ub
->dev_info
.nr_hw_queues
, header
->dev_id
);
2812 /* wait until new ubq_daemon sending all FETCH_REQ */
2813 if (wait_for_completion_interruptible(&ub
->completion
))
2816 pr_devel("%s: All new ubq_daemons(nr: %d) are ready, dev id %d\n",
2817 __func__
, ub
->dev_info
.nr_hw_queues
, header
->dev_id
);
2819 mutex_lock(&ub
->mutex
);
2820 if (ublk_nosrv_should_stop_dev(ub
))
2823 if (!ublk_dev_in_recoverable_state(ub
)) {
2827 ub
->dev_info
.ublksrv_pid
= ublksrv_pid
;
2828 pr_devel("%s: new ublksrv_pid %d, dev id %d\n",
2829 __func__
, ublksrv_pid
, header
->dev_id
);
2831 if (ublk_nosrv_dev_should_queue_io(ub
)) {
2832 ub
->dev_info
.state
= UBLK_S_DEV_LIVE
;
2833 blk_mq_unquiesce_queue(ub
->ub_disk
->queue
);
2834 pr_devel("%s: queue unquiesced, dev id %d.\n",
2835 __func__
, header
->dev_id
);
2836 blk_mq_kick_requeue_list(ub
->ub_disk
->queue
);
2838 blk_mq_quiesce_queue(ub
->ub_disk
->queue
);
2839 ub
->dev_info
.state
= UBLK_S_DEV_LIVE
;
2840 for (i
= 0; i
< ub
->dev_info
.nr_hw_queues
; i
++) {
2841 ublk_get_queue(ub
, i
)->fail_io
= false;
2843 blk_mq_unquiesce_queue(ub
->ub_disk
->queue
);
2848 mutex_unlock(&ub
->mutex
);
2852 static int ublk_ctrl_get_features(struct io_uring_cmd
*cmd
)
2854 const struct ublksrv_ctrl_cmd
*header
= io_uring_sqe_cmd(cmd
->sqe
);
2855 void __user
*argp
= (void __user
*)(unsigned long)header
->addr
;
2856 u64 features
= UBLK_F_ALL
& ~UBLK_F_SUPPORT_ZERO_COPY
;
2858 if (header
->len
!= UBLK_FEATURES_LEN
|| !header
->addr
)
2861 if (copy_to_user(argp
, &features
, UBLK_FEATURES_LEN
))
2868 * All control commands are sent via /dev/ublk-control, so we have to check
2869 * the destination device's permission
2871 static int ublk_char_dev_permission(struct ublk_device
*ub
,
2872 const char *dev_path
, int mask
)
2878 err
= kern_path(dev_path
, LOOKUP_FOLLOW
, &path
);
2882 err
= vfs_getattr(&path
, &stat
, STATX_TYPE
, AT_STATX_SYNC_AS_STAT
);
2887 if (stat
.rdev
!= ub
->cdev_dev
.devt
|| !S_ISCHR(stat
.mode
))
2890 err
= inode_permission(&nop_mnt_idmap
,
2891 d_backing_inode(path
.dentry
), mask
);
2897 static int ublk_ctrl_uring_cmd_permission(struct ublk_device
*ub
,
2898 struct io_uring_cmd
*cmd
)
2900 struct ublksrv_ctrl_cmd
*header
= (struct ublksrv_ctrl_cmd
*)io_uring_sqe_cmd(cmd
->sqe
);
2901 bool unprivileged
= ub
->dev_info
.flags
& UBLK_F_UNPRIVILEGED_DEV
;
2902 void __user
*argp
= (void __user
*)(unsigned long)header
->addr
;
2903 char *dev_path
= NULL
;
2907 if (!unprivileged
) {
2908 if (!capable(CAP_SYS_ADMIN
))
2911 * The new added command of UBLK_CMD_GET_DEV_INFO2 includes
2912 * char_dev_path in payload too, since userspace may not
2913 * know if the specified device is created as unprivileged
2916 if (_IOC_NR(cmd
->cmd_op
) != UBLK_CMD_GET_DEV_INFO2
)
2921 * User has to provide the char device path for unprivileged ublk
2923 * header->addr always points to the dev path buffer, and
2924 * header->dev_path_len records length of dev path buffer.
2926 if (!header
->dev_path_len
|| header
->dev_path_len
> PATH_MAX
)
2929 if (header
->len
< header
->dev_path_len
)
2932 dev_path
= memdup_user_nul(argp
, header
->dev_path_len
);
2933 if (IS_ERR(dev_path
))
2934 return PTR_ERR(dev_path
);
2937 switch (_IOC_NR(cmd
->cmd_op
)) {
2938 case UBLK_CMD_GET_DEV_INFO
:
2939 case UBLK_CMD_GET_DEV_INFO2
:
2940 case UBLK_CMD_GET_QUEUE_AFFINITY
:
2941 case UBLK_CMD_GET_PARAMS
:
2942 case (_IOC_NR(UBLK_U_CMD_GET_FEATURES
)):
2945 case UBLK_CMD_START_DEV
:
2946 case UBLK_CMD_STOP_DEV
:
2947 case UBLK_CMD_ADD_DEV
:
2948 case UBLK_CMD_DEL_DEV
:
2949 case UBLK_CMD_SET_PARAMS
:
2950 case UBLK_CMD_START_USER_RECOVERY
:
2951 case UBLK_CMD_END_USER_RECOVERY
:
2952 mask
= MAY_READ
| MAY_WRITE
;
2958 ret
= ublk_char_dev_permission(ub
, dev_path
, mask
);
2960 header
->len
-= header
->dev_path_len
;
2961 header
->addr
+= header
->dev_path_len
;
2963 pr_devel("%s: dev id %d cmd_op %x uid %d gid %d path %s ret %d\n",
2964 __func__
, ub
->ub_number
, cmd
->cmd_op
,
2965 ub
->dev_info
.owner_uid
, ub
->dev_info
.owner_gid
,
2972 static int ublk_ctrl_uring_cmd(struct io_uring_cmd
*cmd
,
2973 unsigned int issue_flags
)
2975 const struct ublksrv_ctrl_cmd
*header
= io_uring_sqe_cmd(cmd
->sqe
);
2976 struct ublk_device
*ub
= NULL
;
2977 u32 cmd_op
= cmd
->cmd_op
;
2980 if (issue_flags
& IO_URING_F_NONBLOCK
)
2983 ublk_ctrl_cmd_dump(cmd
);
2985 if (!(issue_flags
& IO_URING_F_SQE128
))
2988 ret
= ublk_check_cmd_op(cmd_op
);
2992 if (cmd_op
== UBLK_U_CMD_GET_FEATURES
) {
2993 ret
= ublk_ctrl_get_features(cmd
);
2997 if (_IOC_NR(cmd_op
) != UBLK_CMD_ADD_DEV
) {
2999 ub
= ublk_get_device_from_id(header
->dev_id
);
3003 ret
= ublk_ctrl_uring_cmd_permission(ub
, cmd
);
3008 switch (_IOC_NR(cmd_op
)) {
3009 case UBLK_CMD_START_DEV
:
3010 ret
= ublk_ctrl_start_dev(ub
, cmd
);
3012 case UBLK_CMD_STOP_DEV
:
3013 ret
= ublk_ctrl_stop_dev(ub
);
3015 case UBLK_CMD_GET_DEV_INFO
:
3016 case UBLK_CMD_GET_DEV_INFO2
:
3017 ret
= ublk_ctrl_get_dev_info(ub
, cmd
);
3019 case UBLK_CMD_ADD_DEV
:
3020 ret
= ublk_ctrl_add_dev(cmd
);
3022 case UBLK_CMD_DEL_DEV
:
3023 ret
= ublk_ctrl_del_dev(&ub
, true);
3025 case UBLK_CMD_DEL_DEV_ASYNC
:
3026 ret
= ublk_ctrl_del_dev(&ub
, false);
3028 case UBLK_CMD_GET_QUEUE_AFFINITY
:
3029 ret
= ublk_ctrl_get_queue_affinity(ub
, cmd
);
3031 case UBLK_CMD_GET_PARAMS
:
3032 ret
= ublk_ctrl_get_params(ub
, cmd
);
3034 case UBLK_CMD_SET_PARAMS
:
3035 ret
= ublk_ctrl_set_params(ub
, cmd
);
3037 case UBLK_CMD_START_USER_RECOVERY
:
3038 ret
= ublk_ctrl_start_recovery(ub
, cmd
);
3040 case UBLK_CMD_END_USER_RECOVERY
:
3041 ret
= ublk_ctrl_end_recovery(ub
, cmd
);
3050 ublk_put_device(ub
);
3052 io_uring_cmd_done(cmd
, ret
, 0, issue_flags
);
3053 pr_devel("%s: cmd done ret %d cmd_op %x, dev id %d qid %d\n",
3054 __func__
, ret
, cmd
->cmd_op
, header
->dev_id
, header
->queue_id
);
3055 return -EIOCBQUEUED
;
3058 static const struct file_operations ublk_ctl_fops
= {
3059 .open
= nonseekable_open
,
3060 .uring_cmd
= ublk_ctrl_uring_cmd
,
3061 .owner
= THIS_MODULE
,
3062 .llseek
= noop_llseek
,
3065 static struct miscdevice ublk_misc
= {
3066 .minor
= MISC_DYNAMIC_MINOR
,
3067 .name
= "ublk-control",
3068 .fops
= &ublk_ctl_fops
,
3071 static int __init
ublk_init(void)
3075 BUILD_BUG_ON((u64
)UBLKSRV_IO_BUF_OFFSET
+
3076 UBLKSRV_IO_BUF_TOTAL_SIZE
< UBLKSRV_IO_BUF_OFFSET
);
3078 init_waitqueue_head(&ublk_idr_wq
);
3080 ret
= misc_register(&ublk_misc
);
3084 ret
= alloc_chrdev_region(&ublk_chr_devt
, 0, UBLK_MINORS
, "ublk-char");
3086 goto unregister_mis
;
3088 ret
= class_register(&ublk_chr_class
);
3090 goto free_chrdev_region
;
3095 unregister_chrdev_region(ublk_chr_devt
, UBLK_MINORS
);
3097 misc_deregister(&ublk_misc
);
3101 static void __exit
ublk_exit(void)
3103 struct ublk_device
*ub
;
3106 idr_for_each_entry(&ublk_index_idr
, ub
, id
)
3109 class_unregister(&ublk_chr_class
);
3110 misc_deregister(&ublk_misc
);
3112 idr_destroy(&ublk_index_idr
);
3113 unregister_chrdev_region(ublk_chr_devt
, UBLK_MINORS
);
3116 module_init(ublk_init
);
3117 module_exit(ublk_exit
);
3119 static int ublk_set_max_ublks(const char *buf
, const struct kernel_param
*kp
)
3121 return param_set_uint_minmax(buf
, kp
, 0, UBLK_MAX_UBLKS
);
3124 static int ublk_get_max_ublks(char *buf
, const struct kernel_param
*kp
)
3126 return sysfs_emit(buf
, "%u\n", ublks_max
);
3129 static const struct kernel_param_ops ublk_max_ublks_ops
= {
3130 .set
= ublk_set_max_ublks
,
3131 .get
= ublk_get_max_ublks
,
3134 module_param_cb(ublks_max
, &ublk_max_ublks_ops
, &ublks_max
, 0644);
3135 MODULE_PARM_DESC(ublks_max
, "max number of ublk devices allowed to add(default: 64)");
3137 MODULE_AUTHOR("Ming Lei <ming.lei@redhat.com>");
3138 MODULE_DESCRIPTION("Userspace block device");
3139 MODULE_LICENSE("GPL");