1 /* SPDX-License-Identifier: GPL-2.0 */
5 #include <linux/eventfd.h>
6 #include <linux/vhost.h>
8 #include <linux/mutex.h>
9 #include <linux/poll.h>
10 #include <linux/file.h>
11 #include <linux/uio.h>
12 #include <linux/virtio_config.h>
13 #include <linux/virtio_ring.h>
14 #include <linux/atomic.h>
17 typedef void (*vhost_work_fn_t
)(struct vhost_work
*work
);
19 #define VHOST_WORK_QUEUED 1
21 struct llist_node node
;
26 /* Poll a file (eventfd or socket) */
27 /* Note: there's nothing vhost specific about this structure. */
30 wait_queue_head_t
*wqh
;
31 wait_queue_entry_t wait
;
32 struct vhost_work work
;
34 struct vhost_dev
*dev
;
37 void vhost_work_init(struct vhost_work
*work
, vhost_work_fn_t fn
);
38 void vhost_work_queue(struct vhost_dev
*dev
, struct vhost_work
*work
);
39 bool vhost_has_work(struct vhost_dev
*dev
);
41 void vhost_poll_init(struct vhost_poll
*poll
, vhost_work_fn_t fn
,
42 __poll_t mask
, struct vhost_dev
*dev
);
43 int vhost_poll_start(struct vhost_poll
*poll
, struct file
*file
);
44 void vhost_poll_stop(struct vhost_poll
*poll
);
45 void vhost_poll_flush(struct vhost_poll
*poll
);
46 void vhost_poll_queue(struct vhost_poll
*poll
);
47 void vhost_work_flush(struct vhost_dev
*dev
, struct vhost_work
*work
);
48 long vhost_vring_ioctl(struct vhost_dev
*d
, unsigned int ioctl
, void __user
*argp
);
55 #define START(node) ((node)->start)
56 #define LAST(node) ((node)->last)
58 struct vhost_umem_node
{
60 struct list_head link
;
71 struct rb_root_cached umem_tree
;
72 struct list_head umem_list
;
76 enum vhost_uaddr_type
{
83 /* The virtqueue structure describes a queue attached to a device. */
84 struct vhost_virtqueue
{
85 struct vhost_dev
*dev
;
87 /* The actual ring of buffers. */
90 struct vring_desc __user
*desc
;
91 struct vring_avail __user
*avail
;
92 struct vring_used __user
*used
;
93 const struct vhost_umem_node
*meta_iotlb
[VHOST_NUM_ADDRS
];
95 struct eventfd_ctx
*call_ctx
;
96 struct eventfd_ctx
*error_ctx
;
97 struct eventfd_ctx
*log_ctx
;
99 struct vhost_poll poll
;
101 /* The routine to call when the Guest pings us, or timeout. */
102 vhost_work_fn_t handle_kick
;
104 /* Last available index we saw. */
107 /* Caches available index value from user. */
110 /* Last index we used. */
116 /* Last used index value we have signalled on */
119 /* Last used index value we have signalled on */
120 bool signalled_used_valid
;
122 /* Log writes to used structure. */
126 struct iovec iov
[UIO_MAXIOV
];
127 struct iovec iotlb_iov
[64];
128 struct iovec
*indirect
;
129 struct vring_used_elem
*heads
;
130 /* Protected by virtqueue mutex. */
131 struct vhost_umem
*umem
;
132 struct vhost_umem
*iotlb
;
135 u64 acked_backend_features
;
136 /* Log write descriptors */
137 void __user
*log_base
;
138 struct vhost_log
*log
;
140 /* Ring endianness. Defaults to legacy native endianness.
141 * Set to true when starting a modern virtio device. */
143 #ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
144 /* Ring endianness requested by userspace for cross-endian support. */
147 u32 busyloop_timeout
;
150 struct vhost_msg_node
{
152 struct vhost_msg msg
;
153 struct vhost_msg_v2 msg_v2
;
155 struct vhost_virtqueue
*vq
;
156 struct list_head node
;
160 struct mm_struct
*mm
;
162 struct vhost_virtqueue
**vqs
;
164 struct eventfd_ctx
*log_ctx
;
165 struct llist_head work_list
;
166 struct task_struct
*worker
;
167 struct vhost_umem
*umem
;
168 struct vhost_umem
*iotlb
;
169 spinlock_t iotlb_lock
;
170 struct list_head read_list
;
171 struct list_head pending_list
;
172 wait_queue_head_t wait
;
178 bool vhost_exceeds_weight(struct vhost_virtqueue
*vq
, int pkts
, int total_len
);
179 void vhost_dev_init(struct vhost_dev
*, struct vhost_virtqueue
**vqs
,
180 int nvqs
, int iov_limit
, int weight
, int byte_weight
);
181 long vhost_dev_set_owner(struct vhost_dev
*dev
);
182 bool vhost_dev_has_owner(struct vhost_dev
*dev
);
183 long vhost_dev_check_owner(struct vhost_dev
*);
184 struct vhost_umem
*vhost_dev_reset_owner_prepare(void);
185 void vhost_dev_reset_owner(struct vhost_dev
*, struct vhost_umem
*);
186 void vhost_dev_cleanup(struct vhost_dev
*);
187 void vhost_dev_stop(struct vhost_dev
*);
188 long vhost_dev_ioctl(struct vhost_dev
*, unsigned int ioctl
, void __user
*argp
);
189 long vhost_vring_ioctl(struct vhost_dev
*d
, unsigned int ioctl
, void __user
*argp
);
190 bool vhost_vq_access_ok(struct vhost_virtqueue
*vq
);
191 bool vhost_log_access_ok(struct vhost_dev
*);
193 int vhost_get_vq_desc(struct vhost_virtqueue
*,
194 struct iovec iov
[], unsigned int iov_count
,
195 unsigned int *out_num
, unsigned int *in_num
,
196 struct vhost_log
*log
, unsigned int *log_num
);
197 void vhost_discard_vq_desc(struct vhost_virtqueue
*, int n
);
199 int vhost_vq_init_access(struct vhost_virtqueue
*);
200 int vhost_add_used(struct vhost_virtqueue
*, unsigned int head
, int len
);
201 int vhost_add_used_n(struct vhost_virtqueue
*, struct vring_used_elem
*heads
,
203 void vhost_add_used_and_signal(struct vhost_dev
*, struct vhost_virtqueue
*,
204 unsigned int id
, int len
);
205 void vhost_add_used_and_signal_n(struct vhost_dev
*, struct vhost_virtqueue
*,
206 struct vring_used_elem
*heads
, unsigned count
);
207 void vhost_signal(struct vhost_dev
*, struct vhost_virtqueue
*);
208 void vhost_disable_notify(struct vhost_dev
*, struct vhost_virtqueue
*);
209 bool vhost_vq_avail_empty(struct vhost_dev
*, struct vhost_virtqueue
*);
210 bool vhost_enable_notify(struct vhost_dev
*, struct vhost_virtqueue
*);
212 int vhost_log_write(struct vhost_virtqueue
*vq
, struct vhost_log
*log
,
213 unsigned int log_num
, u64 len
,
214 struct iovec
*iov
, int count
);
215 int vq_iotlb_prefetch(struct vhost_virtqueue
*vq
);
217 struct vhost_msg_node
*vhost_new_msg(struct vhost_virtqueue
*vq
, int type
);
218 void vhost_enqueue_msg(struct vhost_dev
*dev
,
219 struct list_head
*head
,
220 struct vhost_msg_node
*node
);
221 struct vhost_msg_node
*vhost_dequeue_msg(struct vhost_dev
*dev
,
222 struct list_head
*head
);
223 __poll_t
vhost_chr_poll(struct file
*file
, struct vhost_dev
*dev
,
225 ssize_t
vhost_chr_read_iter(struct vhost_dev
*dev
, struct iov_iter
*to
,
227 ssize_t
vhost_chr_write_iter(struct vhost_dev
*dev
,
228 struct iov_iter
*from
);
229 int vhost_init_device_iotlb(struct vhost_dev
*d
, bool enabled
);
231 #define vq_err(vq, fmt, ...) do { \
232 pr_debug(pr_fmt(fmt), ##__VA_ARGS__); \
233 if ((vq)->error_ctx) \
234 eventfd_signal((vq)->error_ctx, 1);\
238 VHOST_FEATURES
= (1ULL << VIRTIO_F_NOTIFY_ON_EMPTY
) |
239 (1ULL << VIRTIO_RING_F_INDIRECT_DESC
) |
240 (1ULL << VIRTIO_RING_F_EVENT_IDX
) |
241 (1ULL << VHOST_F_LOG_ALL
) |
242 (1ULL << VIRTIO_F_ANY_LAYOUT
) |
243 (1ULL << VIRTIO_F_VERSION_1
)
246 static inline bool vhost_has_feature(struct vhost_virtqueue
*vq
, int bit
)
248 return vq
->acked_features
& (1ULL << bit
);
251 static inline bool vhost_backend_has_feature(struct vhost_virtqueue
*vq
, int bit
)
253 return vq
->acked_backend_features
& (1ULL << bit
);
256 #ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
257 static inline bool vhost_is_little_endian(struct vhost_virtqueue
*vq
)
262 static inline bool vhost_is_little_endian(struct vhost_virtqueue
*vq
)
264 return virtio_legacy_is_little_endian() || vq
->is_le
;
268 /* Memory accessors */
269 static inline u16
vhost16_to_cpu(struct vhost_virtqueue
*vq
, __virtio16 val
)
271 return __virtio16_to_cpu(vhost_is_little_endian(vq
), val
);
274 static inline __virtio16
cpu_to_vhost16(struct vhost_virtqueue
*vq
, u16 val
)
276 return __cpu_to_virtio16(vhost_is_little_endian(vq
), val
);
279 static inline u32
vhost32_to_cpu(struct vhost_virtqueue
*vq
, __virtio32 val
)
281 return __virtio32_to_cpu(vhost_is_little_endian(vq
), val
);
284 static inline __virtio32
cpu_to_vhost32(struct vhost_virtqueue
*vq
, u32 val
)
286 return __cpu_to_virtio32(vhost_is_little_endian(vq
), val
);
289 static inline u64
vhost64_to_cpu(struct vhost_virtqueue
*vq
, __virtio64 val
)
291 return __virtio64_to_cpu(vhost_is_little_endian(vq
), val
);
294 static inline __virtio64
cpu_to_vhost64(struct vhost_virtqueue
*vq
, u64 val
)
296 return __cpu_to_virtio64(vhost_is_little_endian(vq
), val
);