0001
0002 #ifndef _VHOST_H
0003 #define _VHOST_H
0004
0005 #include <linux/eventfd.h>
0006 #include <linux/vhost.h>
0007 #include <linux/mm.h>
0008 #include <linux/mutex.h>
0009 #include <linux/poll.h>
0010 #include <linux/file.h>
0011 #include <linux/uio.h>
0012 #include <linux/virtio_config.h>
0013 #include <linux/virtio_ring.h>
0014 #include <linux/atomic.h>
0015 #include <linux/vhost_iotlb.h>
0016 #include <linux/irqbypass.h>
0017
0018 struct vhost_work;
0019 typedef void (*vhost_work_fn_t)(struct vhost_work *work);
0020
0021 #define VHOST_WORK_QUEUED 1
0022 struct vhost_work {
0023 struct llist_node node;
0024 vhost_work_fn_t fn;
0025 unsigned long flags;
0026 };
0027
0028
0029
0030 struct vhost_poll {
0031 poll_table table;
0032 wait_queue_head_t *wqh;
0033 wait_queue_entry_t wait;
0034 struct vhost_work work;
0035 __poll_t mask;
0036 struct vhost_dev *dev;
0037 };
0038
0039 void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn);
0040 void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work);
0041 bool vhost_has_work(struct vhost_dev *dev);
0042
0043 void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
0044 __poll_t mask, struct vhost_dev *dev);
0045 int vhost_poll_start(struct vhost_poll *poll, struct file *file);
0046 void vhost_poll_stop(struct vhost_poll *poll);
0047 void vhost_poll_queue(struct vhost_poll *poll);
0048 void vhost_dev_flush(struct vhost_dev *dev);
0049
0050 struct vhost_log {
0051 u64 addr;
0052 u64 len;
0053 };
0054
0055 enum vhost_uaddr_type {
0056 VHOST_ADDR_DESC = 0,
0057 VHOST_ADDR_AVAIL = 1,
0058 VHOST_ADDR_USED = 2,
0059 VHOST_NUM_ADDRS = 3,
0060 };
0061
0062 struct vhost_vring_call {
0063 struct eventfd_ctx *ctx;
0064 struct irq_bypass_producer producer;
0065 };
0066
0067
0068 struct vhost_virtqueue {
0069 struct vhost_dev *dev;
0070
0071
0072 struct mutex mutex;
0073 unsigned int num;
0074 vring_desc_t __user *desc;
0075 vring_avail_t __user *avail;
0076 vring_used_t __user *used;
0077 const struct vhost_iotlb_map *meta_iotlb[VHOST_NUM_ADDRS];
0078 struct file *kick;
0079 struct vhost_vring_call call_ctx;
0080 struct eventfd_ctx *error_ctx;
0081 struct eventfd_ctx *log_ctx;
0082
0083 struct vhost_poll poll;
0084
0085
0086 vhost_work_fn_t handle_kick;
0087
0088
0089 u16 last_avail_idx;
0090
0091
0092 u16 avail_idx;
0093
0094
0095 u16 last_used_idx;
0096
0097
0098 u16 used_flags;
0099
0100
0101 u16 signalled_used;
0102
0103
0104 bool signalled_used_valid;
0105
0106
0107 bool log_used;
0108 u64 log_addr;
0109
0110 struct iovec iov[UIO_MAXIOV];
0111 struct iovec iotlb_iov[64];
0112 struct iovec *indirect;
0113 struct vring_used_elem *heads;
0114
0115 struct vhost_iotlb *umem;
0116 struct vhost_iotlb *iotlb;
0117 void *private_data;
0118 u64 acked_features;
0119 u64 acked_backend_features;
0120
0121 void __user *log_base;
0122 struct vhost_log *log;
0123 struct iovec log_iov[64];
0124
0125
0126
0127 bool is_le;
0128 #ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
0129
0130 bool user_be;
0131 #endif
0132 u32 busyloop_timeout;
0133 };
0134
0135 struct vhost_msg_node {
0136 union {
0137 struct vhost_msg msg;
0138 struct vhost_msg_v2 msg_v2;
0139 };
0140 struct vhost_virtqueue *vq;
0141 struct list_head node;
0142 };
0143
0144 struct vhost_dev {
0145 struct mm_struct *mm;
0146 struct mutex mutex;
0147 struct vhost_virtqueue **vqs;
0148 int nvqs;
0149 struct eventfd_ctx *log_ctx;
0150 struct llist_head work_list;
0151 struct task_struct *worker;
0152 struct vhost_iotlb *umem;
0153 struct vhost_iotlb *iotlb;
0154 spinlock_t iotlb_lock;
0155 struct list_head read_list;
0156 struct list_head pending_list;
0157 wait_queue_head_t wait;
0158 int iov_limit;
0159 int weight;
0160 int byte_weight;
0161 u64 kcov_handle;
0162 bool use_worker;
0163 int (*msg_handler)(struct vhost_dev *dev, u32 asid,
0164 struct vhost_iotlb_msg *msg);
0165 };
0166
0167 bool vhost_exceeds_weight(struct vhost_virtqueue *vq, int pkts, int total_len);
0168 void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs,
0169 int nvqs, int iov_limit, int weight, int byte_weight,
0170 bool use_worker,
0171 int (*msg_handler)(struct vhost_dev *dev, u32 asid,
0172 struct vhost_iotlb_msg *msg));
0173 long vhost_dev_set_owner(struct vhost_dev *dev);
0174 bool vhost_dev_has_owner(struct vhost_dev *dev);
0175 long vhost_dev_check_owner(struct vhost_dev *);
0176 struct vhost_iotlb *vhost_dev_reset_owner_prepare(void);
0177 void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_iotlb *iotlb);
0178 void vhost_dev_cleanup(struct vhost_dev *);
0179 void vhost_dev_stop(struct vhost_dev *);
0180 long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, void __user *argp);
0181 long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp);
0182 bool vhost_vq_access_ok(struct vhost_virtqueue *vq);
0183 bool vhost_log_access_ok(struct vhost_dev *);
0184
0185 int vhost_get_vq_desc(struct vhost_virtqueue *,
0186 struct iovec iov[], unsigned int iov_count,
0187 unsigned int *out_num, unsigned int *in_num,
0188 struct vhost_log *log, unsigned int *log_num);
0189 void vhost_discard_vq_desc(struct vhost_virtqueue *, int n);
0190
0191 bool vhost_vq_is_setup(struct vhost_virtqueue *vq);
0192 int vhost_vq_init_access(struct vhost_virtqueue *);
0193 int vhost_add_used(struct vhost_virtqueue *, unsigned int head, int len);
0194 int vhost_add_used_n(struct vhost_virtqueue *, struct vring_used_elem *heads,
0195 unsigned count);
0196 void vhost_add_used_and_signal(struct vhost_dev *, struct vhost_virtqueue *,
0197 unsigned int id, int len);
0198 void vhost_add_used_and_signal_n(struct vhost_dev *, struct vhost_virtqueue *,
0199 struct vring_used_elem *heads, unsigned count);
0200 void vhost_signal(struct vhost_dev *, struct vhost_virtqueue *);
0201 void vhost_disable_notify(struct vhost_dev *, struct vhost_virtqueue *);
0202 bool vhost_vq_avail_empty(struct vhost_dev *, struct vhost_virtqueue *);
0203 bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *);
0204
0205 int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
0206 unsigned int log_num, u64 len,
0207 struct iovec *iov, int count);
0208 int vq_meta_prefetch(struct vhost_virtqueue *vq);
0209
0210 struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type);
0211 void vhost_enqueue_msg(struct vhost_dev *dev,
0212 struct list_head *head,
0213 struct vhost_msg_node *node);
0214 struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev,
0215 struct list_head *head);
0216 void vhost_set_backend_features(struct vhost_dev *dev, u64 features);
0217
0218 __poll_t vhost_chr_poll(struct file *file, struct vhost_dev *dev,
0219 poll_table *wait);
0220 ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to,
0221 int noblock);
0222 ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
0223 struct iov_iter *from);
0224 int vhost_init_device_iotlb(struct vhost_dev *d, bool enabled);
0225
0226 void vhost_iotlb_map_free(struct vhost_iotlb *iotlb,
0227 struct vhost_iotlb_map *map);
0228
0229 #define vq_err(vq, fmt, ...) do { \
0230 pr_debug(pr_fmt(fmt), ##__VA_ARGS__); \
0231 if ((vq)->error_ctx) \
0232 eventfd_signal((vq)->error_ctx, 1);\
0233 } while (0)
0234
0235 enum {
0236 VHOST_FEATURES = (1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) |
0237 (1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
0238 (1ULL << VIRTIO_RING_F_EVENT_IDX) |
0239 (1ULL << VHOST_F_LOG_ALL) |
0240 (1ULL << VIRTIO_F_ANY_LAYOUT) |
0241 (1ULL << VIRTIO_F_VERSION_1)
0242 };
0243
0244
0245
0246
0247
0248
0249
0250
0251
0252 static inline void vhost_vq_set_backend(struct vhost_virtqueue *vq,
0253 void *private_data)
0254 {
0255 vq->private_data = private_data;
0256 }
0257
0258
0259
0260
0261
0262
0263
0264
0265
0266 static inline void *vhost_vq_get_backend(struct vhost_virtqueue *vq)
0267 {
0268 return vq->private_data;
0269 }
0270
0271 static inline bool vhost_has_feature(struct vhost_virtqueue *vq, int bit)
0272 {
0273 return vq->acked_features & (1ULL << bit);
0274 }
0275
0276 static inline bool vhost_backend_has_feature(struct vhost_virtqueue *vq, int bit)
0277 {
0278 return vq->acked_backend_features & (1ULL << bit);
0279 }
0280
0281 #ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
0282 static inline bool vhost_is_little_endian(struct vhost_virtqueue *vq)
0283 {
0284 return vq->is_le;
0285 }
0286 #else
0287 static inline bool vhost_is_little_endian(struct vhost_virtqueue *vq)
0288 {
0289 return virtio_legacy_is_little_endian() || vq->is_le;
0290 }
0291 #endif
0292
0293
0294 static inline u16 vhost16_to_cpu(struct vhost_virtqueue *vq, __virtio16 val)
0295 {
0296 return __virtio16_to_cpu(vhost_is_little_endian(vq), val);
0297 }
0298
0299 static inline __virtio16 cpu_to_vhost16(struct vhost_virtqueue *vq, u16 val)
0300 {
0301 return __cpu_to_virtio16(vhost_is_little_endian(vq), val);
0302 }
0303
0304 static inline u32 vhost32_to_cpu(struct vhost_virtqueue *vq, __virtio32 val)
0305 {
0306 return __virtio32_to_cpu(vhost_is_little_endian(vq), val);
0307 }
0308
0309 static inline __virtio32 cpu_to_vhost32(struct vhost_virtqueue *vq, u32 val)
0310 {
0311 return __cpu_to_virtio32(vhost_is_little_endian(vq), val);
0312 }
0313
0314 static inline u64 vhost64_to_cpu(struct vhost_virtqueue *vq, __virtio64 val)
0315 {
0316 return __virtio64_to_cpu(vhost_is_little_endian(vq), val);
0317 }
0318
0319 static inline __virtio64 cpu_to_vhost64(struct vhost_virtqueue *vq, u64 val)
0320 {
0321 return __cpu_to_virtio64(vhost_is_little_endian(vq), val);
0322 }
0323 #endif