Merge tag 'block-5.11-2021-01-10' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / drivers / vhost / vhost.h
blobb063324c7669d20bda8b0b45f822ae419c047997
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _VHOST_H
3 #define _VHOST_H
5 #include <linux/eventfd.h>
6 #include <linux/vhost.h>
7 #include <linux/mm.h>
8 #include <linux/mutex.h>
9 #include <linux/poll.h>
10 #include <linux/file.h>
11 #include <linux/uio.h>
12 #include <linux/virtio_config.h>
13 #include <linux/virtio_ring.h>
14 #include <linux/atomic.h>
15 #include <linux/vhost_iotlb.h>
16 #include <linux/irqbypass.h>
18 struct vhost_work;
19 typedef void (*vhost_work_fn_t)(struct vhost_work *work);
21 #define VHOST_WORK_QUEUED 1
22 struct vhost_work {
23 struct llist_node node;
24 vhost_work_fn_t fn;
25 unsigned long flags;
28 /* Poll a file (eventfd or socket) */
29 /* Note: there's nothing vhost specific about this structure. */
30 struct vhost_poll {
31 poll_table table;
32 wait_queue_head_t *wqh;
33 wait_queue_entry_t wait;
34 struct vhost_work work;
35 __poll_t mask;
36 struct vhost_dev *dev;
39 void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn);
40 void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work);
41 bool vhost_has_work(struct vhost_dev *dev);
43 void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
44 __poll_t mask, struct vhost_dev *dev);
45 int vhost_poll_start(struct vhost_poll *poll, struct file *file);
46 void vhost_poll_stop(struct vhost_poll *poll);
47 void vhost_poll_flush(struct vhost_poll *poll);
48 void vhost_poll_queue(struct vhost_poll *poll);
49 void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work);
50 long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp);
52 struct vhost_log {
53 u64 addr;
54 u64 len;
57 enum vhost_uaddr_type {
58 VHOST_ADDR_DESC = 0,
59 VHOST_ADDR_AVAIL = 1,
60 VHOST_ADDR_USED = 2,
61 VHOST_NUM_ADDRS = 3,
64 struct vhost_vring_call {
65 struct eventfd_ctx *ctx;
66 struct irq_bypass_producer producer;
69 /* The virtqueue structure describes a queue attached to a device. */
70 struct vhost_virtqueue {
71 struct vhost_dev *dev;
73 /* The actual ring of buffers. */
74 struct mutex mutex;
75 unsigned int num;
76 vring_desc_t __user *desc;
77 vring_avail_t __user *avail;
78 vring_used_t __user *used;
79 const struct vhost_iotlb_map *meta_iotlb[VHOST_NUM_ADDRS];
80 struct file *kick;
81 struct vhost_vring_call call_ctx;
82 struct eventfd_ctx *error_ctx;
83 struct eventfd_ctx *log_ctx;
85 struct vhost_poll poll;
87 /* The routine to call when the Guest pings us, or timeout. */
88 vhost_work_fn_t handle_kick;
90 /* Last available index we saw. */
91 u16 last_avail_idx;
93 /* Caches available index value from user. */
94 u16 avail_idx;
96 /* Last index we used. */
97 u16 last_used_idx;
99 /* Used flags */
100 u16 used_flags;
102 /* Last used index value we have signalled on */
103 u16 signalled_used;
105 /* Last used index value we have signalled on */
106 bool signalled_used_valid;
108 /* Log writes to used structure. */
109 bool log_used;
110 u64 log_addr;
112 struct iovec iov[UIO_MAXIOV];
113 struct iovec iotlb_iov[64];
114 struct iovec *indirect;
115 struct vring_used_elem *heads;
116 /* Protected by virtqueue mutex. */
117 struct vhost_iotlb *umem;
118 struct vhost_iotlb *iotlb;
119 void *private_data;
120 u64 acked_features;
121 u64 acked_backend_features;
122 /* Log write descriptors */
123 void __user *log_base;
124 struct vhost_log *log;
125 struct iovec log_iov[64];
127 /* Ring endianness. Defaults to legacy native endianness.
128 * Set to true when starting a modern virtio device. */
129 bool is_le;
130 #ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
131 /* Ring endianness requested by userspace for cross-endian support. */
132 bool user_be;
133 #endif
134 u32 busyloop_timeout;
137 struct vhost_msg_node {
138 union {
139 struct vhost_msg msg;
140 struct vhost_msg_v2 msg_v2;
142 struct vhost_virtqueue *vq;
143 struct list_head node;
146 struct vhost_dev {
147 struct mm_struct *mm;
148 struct mutex mutex;
149 struct vhost_virtqueue **vqs;
150 int nvqs;
151 struct eventfd_ctx *log_ctx;
152 struct llist_head work_list;
153 struct task_struct *worker;
154 struct vhost_iotlb *umem;
155 struct vhost_iotlb *iotlb;
156 spinlock_t iotlb_lock;
157 struct list_head read_list;
158 struct list_head pending_list;
159 wait_queue_head_t wait;
160 int iov_limit;
161 int weight;
162 int byte_weight;
163 u64 kcov_handle;
164 bool use_worker;
165 int (*msg_handler)(struct vhost_dev *dev,
166 struct vhost_iotlb_msg *msg);
169 bool vhost_exceeds_weight(struct vhost_virtqueue *vq, int pkts, int total_len);
170 void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs,
171 int nvqs, int iov_limit, int weight, int byte_weight,
172 bool use_worker,
173 int (*msg_handler)(struct vhost_dev *dev,
174 struct vhost_iotlb_msg *msg));
175 long vhost_dev_set_owner(struct vhost_dev *dev);
176 bool vhost_dev_has_owner(struct vhost_dev *dev);
177 long vhost_dev_check_owner(struct vhost_dev *);
178 struct vhost_iotlb *vhost_dev_reset_owner_prepare(void);
179 void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_iotlb *iotlb);
180 void vhost_dev_cleanup(struct vhost_dev *);
181 void vhost_dev_stop(struct vhost_dev *);
182 long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, void __user *argp);
183 long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp);
184 bool vhost_vq_access_ok(struct vhost_virtqueue *vq);
185 bool vhost_log_access_ok(struct vhost_dev *);
187 int vhost_get_vq_desc(struct vhost_virtqueue *,
188 struct iovec iov[], unsigned int iov_count,
189 unsigned int *out_num, unsigned int *in_num,
190 struct vhost_log *log, unsigned int *log_num);
191 void vhost_discard_vq_desc(struct vhost_virtqueue *, int n);
193 bool vhost_vq_is_setup(struct vhost_virtqueue *vq);
194 int vhost_vq_init_access(struct vhost_virtqueue *);
195 int vhost_add_used(struct vhost_virtqueue *, unsigned int head, int len);
196 int vhost_add_used_n(struct vhost_virtqueue *, struct vring_used_elem *heads,
197 unsigned count);
198 void vhost_add_used_and_signal(struct vhost_dev *, struct vhost_virtqueue *,
199 unsigned int id, int len);
200 void vhost_add_used_and_signal_n(struct vhost_dev *, struct vhost_virtqueue *,
201 struct vring_used_elem *heads, unsigned count);
202 void vhost_signal(struct vhost_dev *, struct vhost_virtqueue *);
203 void vhost_disable_notify(struct vhost_dev *, struct vhost_virtqueue *);
204 bool vhost_vq_avail_empty(struct vhost_dev *, struct vhost_virtqueue *);
205 bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *);
207 int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
208 unsigned int log_num, u64 len,
209 struct iovec *iov, int count);
210 int vq_meta_prefetch(struct vhost_virtqueue *vq);
212 struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type);
213 void vhost_enqueue_msg(struct vhost_dev *dev,
214 struct list_head *head,
215 struct vhost_msg_node *node);
216 struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev,
217 struct list_head *head);
218 void vhost_set_backend_features(struct vhost_dev *dev, u64 features);
220 __poll_t vhost_chr_poll(struct file *file, struct vhost_dev *dev,
221 poll_table *wait);
222 ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to,
223 int noblock);
224 ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
225 struct iov_iter *from);
226 int vhost_init_device_iotlb(struct vhost_dev *d, bool enabled);
228 void vhost_iotlb_map_free(struct vhost_iotlb *iotlb,
229 struct vhost_iotlb_map *map);
231 #define vq_err(vq, fmt, ...) do { \
232 pr_debug(pr_fmt(fmt), ##__VA_ARGS__); \
233 if ((vq)->error_ctx) \
234 eventfd_signal((vq)->error_ctx, 1);\
235 } while (0)
237 enum {
238 VHOST_FEATURES = (1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) |
239 (1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
240 (1ULL << VIRTIO_RING_F_EVENT_IDX) |
241 (1ULL << VHOST_F_LOG_ALL) |
242 (1ULL << VIRTIO_F_ANY_LAYOUT) |
243 (1ULL << VIRTIO_F_VERSION_1)
247 * vhost_vq_set_backend - Set backend.
249 * @vq Virtqueue.
250 * @private_data The private data.
252 * Context: Need to call with vq->mutex acquired.
254 static inline void vhost_vq_set_backend(struct vhost_virtqueue *vq,
255 void *private_data)
257 vq->private_data = private_data;
261 * vhost_vq_get_backend - Get backend.
263 * @vq Virtqueue.
265 * Context: Need to call with vq->mutex acquired.
266 * Return: Private data previously set with vhost_vq_set_backend.
268 static inline void *vhost_vq_get_backend(struct vhost_virtqueue *vq)
270 return vq->private_data;
273 static inline bool vhost_has_feature(struct vhost_virtqueue *vq, int bit)
275 return vq->acked_features & (1ULL << bit);
278 static inline bool vhost_backend_has_feature(struct vhost_virtqueue *vq, int bit)
280 return vq->acked_backend_features & (1ULL << bit);
283 #ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
284 static inline bool vhost_is_little_endian(struct vhost_virtqueue *vq)
286 return vq->is_le;
288 #else
289 static inline bool vhost_is_little_endian(struct vhost_virtqueue *vq)
291 return virtio_legacy_is_little_endian() || vq->is_le;
293 #endif
295 /* Memory accessors */
296 static inline u16 vhost16_to_cpu(struct vhost_virtqueue *vq, __virtio16 val)
298 return __virtio16_to_cpu(vhost_is_little_endian(vq), val);
301 static inline __virtio16 cpu_to_vhost16(struct vhost_virtqueue *vq, u16 val)
303 return __cpu_to_virtio16(vhost_is_little_endian(vq), val);
306 static inline u32 vhost32_to_cpu(struct vhost_virtqueue *vq, __virtio32 val)
308 return __virtio32_to_cpu(vhost_is_little_endian(vq), val);
311 static inline __virtio32 cpu_to_vhost32(struct vhost_virtqueue *vq, u32 val)
313 return __cpu_to_virtio32(vhost_is_little_endian(vq), val);
316 static inline u64 vhost64_to_cpu(struct vhost_virtqueue *vq, __virtio64 val)
318 return __virtio64_to_cpu(vhost_is_little_endian(vq), val);
321 static inline __virtio64 cpu_to_vhost64(struct vhost_virtqueue *vq, u64 val)
323 return __cpu_to_virtio64(vhost_is_little_endian(vq), val);
325 #endif