Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /* Copyright (C) 2009 Red Hat, Inc.
0003  * Author: Michael S. Tsirkin <mst@redhat.com>
0004  *
0005  * test virtio server in host kernel.
0006  */
0007 
0008 #include <linux/compat.h>
0009 #include <linux/eventfd.h>
0010 #include <linux/vhost.h>
0011 #include <linux/miscdevice.h>
0012 #include <linux/module.h>
0013 #include <linux/mutex.h>
0014 #include <linux/workqueue.h>
0015 #include <linux/file.h>
0016 #include <linux/slab.h>
0017 
0018 #include "test.h"
0019 #include "vhost.h"
0020 
0021 /* Max number of bytes transferred before requeueing the job.
0022  * Using this limit prevents one virtqueue from starving others. */
0023 #define VHOST_TEST_WEIGHT 0x80000
0024 
0025 /* Max number of packets transferred before requeueing the job.
0026  * Using this limit prevents one virtqueue from starving others with
0027  * pkts.
0028  */
0029 #define VHOST_TEST_PKT_WEIGHT 256
0030 
0031 enum {
0032     VHOST_TEST_VQ = 0,
0033     VHOST_TEST_VQ_MAX = 1,
0034 };
0035 
0036 struct vhost_test {
0037     struct vhost_dev dev;
0038     struct vhost_virtqueue vqs[VHOST_TEST_VQ_MAX];
0039 };
0040 
0041 /* Expects to be always run from workqueue - which acts as
0042  * read-size critical section for our kind of RCU. */
0043 static void handle_vq(struct vhost_test *n)
0044 {
0045     struct vhost_virtqueue *vq = &n->vqs[VHOST_TEST_VQ];
0046     unsigned out, in;
0047     int head;
0048     size_t len, total_len = 0;
0049     void *private;
0050 
0051     mutex_lock(&vq->mutex);
0052     private = vhost_vq_get_backend(vq);
0053     if (!private) {
0054         mutex_unlock(&vq->mutex);
0055         return;
0056     }
0057 
0058     vhost_disable_notify(&n->dev, vq);
0059 
0060     for (;;) {
0061         head = vhost_get_vq_desc(vq, vq->iov,
0062                      ARRAY_SIZE(vq->iov),
0063                      &out, &in,
0064                      NULL, NULL);
0065         /* On error, stop handling until the next kick. */
0066         if (unlikely(head < 0))
0067             break;
0068         /* Nothing new?  Wait for eventfd to tell us they refilled. */
0069         if (head == vq->num) {
0070             if (unlikely(vhost_enable_notify(&n->dev, vq))) {
0071                 vhost_disable_notify(&n->dev, vq);
0072                 continue;
0073             }
0074             break;
0075         }
0076         if (in) {
0077             vq_err(vq, "Unexpected descriptor format for TX: "
0078                    "out %d, int %d\n", out, in);
0079             break;
0080         }
0081         len = iov_length(vq->iov, out);
0082         /* Sanity check */
0083         if (!len) {
0084             vq_err(vq, "Unexpected 0 len for TX\n");
0085             break;
0086         }
0087         vhost_add_used_and_signal(&n->dev, vq, head, 0);
0088         total_len += len;
0089         if (unlikely(vhost_exceeds_weight(vq, 0, total_len)))
0090             break;
0091     }
0092 
0093     mutex_unlock(&vq->mutex);
0094 }
0095 
0096 static void handle_vq_kick(struct vhost_work *work)
0097 {
0098     struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
0099                           poll.work);
0100     struct vhost_test *n = container_of(vq->dev, struct vhost_test, dev);
0101 
0102     handle_vq(n);
0103 }
0104 
0105 static int vhost_test_open(struct inode *inode, struct file *f)
0106 {
0107     struct vhost_test *n = kmalloc(sizeof *n, GFP_KERNEL);
0108     struct vhost_dev *dev;
0109     struct vhost_virtqueue **vqs;
0110 
0111     if (!n)
0112         return -ENOMEM;
0113     vqs = kmalloc_array(VHOST_TEST_VQ_MAX, sizeof(*vqs), GFP_KERNEL);
0114     if (!vqs) {
0115         kfree(n);
0116         return -ENOMEM;
0117     }
0118 
0119     dev = &n->dev;
0120     vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ];
0121     n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick;
0122     vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX, UIO_MAXIOV,
0123                VHOST_TEST_PKT_WEIGHT, VHOST_TEST_WEIGHT, true, NULL);
0124 
0125     f->private_data = n;
0126 
0127     return 0;
0128 }
0129 
0130 static void *vhost_test_stop_vq(struct vhost_test *n,
0131                 struct vhost_virtqueue *vq)
0132 {
0133     void *private;
0134 
0135     mutex_lock(&vq->mutex);
0136     private = vhost_vq_get_backend(vq);
0137     vhost_vq_set_backend(vq, NULL);
0138     mutex_unlock(&vq->mutex);
0139     return private;
0140 }
0141 
0142 static void vhost_test_stop(struct vhost_test *n, void **privatep)
0143 {
0144     *privatep = vhost_test_stop_vq(n, n->vqs + VHOST_TEST_VQ);
0145 }
0146 
0147 static void vhost_test_flush(struct vhost_test *n)
0148 {
0149     vhost_dev_flush(&n->dev);
0150 }
0151 
0152 static int vhost_test_release(struct inode *inode, struct file *f)
0153 {
0154     struct vhost_test *n = f->private_data;
0155     void  *private;
0156 
0157     vhost_test_stop(n, &private);
0158     vhost_test_flush(n);
0159     vhost_dev_stop(&n->dev);
0160     vhost_dev_cleanup(&n->dev);
0161     kfree(n->dev.vqs);
0162     kfree(n);
0163     return 0;
0164 }
0165 
0166 static long vhost_test_run(struct vhost_test *n, int test)
0167 {
0168     void *priv, *oldpriv;
0169     struct vhost_virtqueue *vq;
0170     int r, index;
0171 
0172     if (test < 0 || test > 1)
0173         return -EINVAL;
0174 
0175     mutex_lock(&n->dev.mutex);
0176     r = vhost_dev_check_owner(&n->dev);
0177     if (r)
0178         goto err;
0179 
0180     for (index = 0; index < n->dev.nvqs; ++index) {
0181         /* Verify that ring has been setup correctly. */
0182         if (!vhost_vq_access_ok(&n->vqs[index])) {
0183             r = -EFAULT;
0184             goto err;
0185         }
0186     }
0187 
0188     for (index = 0; index < n->dev.nvqs; ++index) {
0189         vq = n->vqs + index;
0190         mutex_lock(&vq->mutex);
0191         priv = test ? n : NULL;
0192 
0193         /* start polling new socket */
0194         oldpriv = vhost_vq_get_backend(vq);
0195         vhost_vq_set_backend(vq, priv);
0196 
0197         r = vhost_vq_init_access(&n->vqs[index]);
0198 
0199         mutex_unlock(&vq->mutex);
0200 
0201         if (r)
0202             goto err;
0203 
0204         if (oldpriv) {
0205             vhost_test_flush(n);
0206         }
0207     }
0208 
0209     mutex_unlock(&n->dev.mutex);
0210     return 0;
0211 
0212 err:
0213     mutex_unlock(&n->dev.mutex);
0214     return r;
0215 }
0216 
0217 static long vhost_test_reset_owner(struct vhost_test *n)
0218 {
0219     void *priv = NULL;
0220     long err;
0221     struct vhost_iotlb *umem;
0222 
0223     mutex_lock(&n->dev.mutex);
0224     err = vhost_dev_check_owner(&n->dev);
0225     if (err)
0226         goto done;
0227     umem = vhost_dev_reset_owner_prepare();
0228     if (!umem) {
0229         err = -ENOMEM;
0230         goto done;
0231     }
0232     vhost_test_stop(n, &priv);
0233     vhost_test_flush(n);
0234     vhost_dev_stop(&n->dev);
0235     vhost_dev_reset_owner(&n->dev, umem);
0236 done:
0237     mutex_unlock(&n->dev.mutex);
0238     return err;
0239 }
0240 
0241 static int vhost_test_set_features(struct vhost_test *n, u64 features)
0242 {
0243     struct vhost_virtqueue *vq;
0244 
0245     mutex_lock(&n->dev.mutex);
0246     if ((features & (1 << VHOST_F_LOG_ALL)) &&
0247         !vhost_log_access_ok(&n->dev)) {
0248         mutex_unlock(&n->dev.mutex);
0249         return -EFAULT;
0250     }
0251     vq = &n->vqs[VHOST_TEST_VQ];
0252     mutex_lock(&vq->mutex);
0253     vq->acked_features = features;
0254     mutex_unlock(&vq->mutex);
0255     mutex_unlock(&n->dev.mutex);
0256     return 0;
0257 }
0258 
0259 static long vhost_test_set_backend(struct vhost_test *n, unsigned index, int fd)
0260 {
0261     static void *backend;
0262 
0263     const bool enable = fd != -1;
0264     struct vhost_virtqueue *vq;
0265     int r;
0266 
0267     mutex_lock(&n->dev.mutex);
0268     r = vhost_dev_check_owner(&n->dev);
0269     if (r)
0270         goto err;
0271 
0272     if (index >= VHOST_TEST_VQ_MAX) {
0273         r = -ENOBUFS;
0274         goto err;
0275     }
0276     vq = &n->vqs[index];
0277     mutex_lock(&vq->mutex);
0278 
0279     /* Verify that ring has been setup correctly. */
0280     if (!vhost_vq_access_ok(vq)) {
0281         r = -EFAULT;
0282         goto err_vq;
0283     }
0284     if (!enable) {
0285         vhost_poll_stop(&vq->poll);
0286         backend = vhost_vq_get_backend(vq);
0287         vhost_vq_set_backend(vq, NULL);
0288     } else {
0289         vhost_vq_set_backend(vq, backend);
0290         r = vhost_vq_init_access(vq);
0291         if (r == 0)
0292             r = vhost_poll_start(&vq->poll, vq->kick);
0293     }
0294 
0295     mutex_unlock(&vq->mutex);
0296 
0297     if (enable) {
0298         vhost_test_flush(n);
0299     }
0300 
0301     mutex_unlock(&n->dev.mutex);
0302     return 0;
0303 
0304 err_vq:
0305     mutex_unlock(&vq->mutex);
0306 err:
0307     mutex_unlock(&n->dev.mutex);
0308     return r;
0309 }
0310 
0311 static long vhost_test_ioctl(struct file *f, unsigned int ioctl,
0312                  unsigned long arg)
0313 {
0314     struct vhost_vring_file backend;
0315     struct vhost_test *n = f->private_data;
0316     void __user *argp = (void __user *)arg;
0317     u64 __user *featurep = argp;
0318     int test;
0319     u64 features;
0320     int r;
0321     switch (ioctl) {
0322     case VHOST_TEST_RUN:
0323         if (copy_from_user(&test, argp, sizeof test))
0324             return -EFAULT;
0325         return vhost_test_run(n, test);
0326     case VHOST_TEST_SET_BACKEND:
0327         if (copy_from_user(&backend, argp, sizeof backend))
0328             return -EFAULT;
0329         return vhost_test_set_backend(n, backend.index, backend.fd);
0330     case VHOST_GET_FEATURES:
0331         features = VHOST_FEATURES;
0332         if (copy_to_user(featurep, &features, sizeof features))
0333             return -EFAULT;
0334         return 0;
0335     case VHOST_SET_FEATURES:
0336         printk(KERN_ERR "1\n");
0337         if (copy_from_user(&features, featurep, sizeof features))
0338             return -EFAULT;
0339         printk(KERN_ERR "2\n");
0340         if (features & ~VHOST_FEATURES)
0341             return -EOPNOTSUPP;
0342         printk(KERN_ERR "3\n");
0343         return vhost_test_set_features(n, features);
0344     case VHOST_RESET_OWNER:
0345         return vhost_test_reset_owner(n);
0346     default:
0347         mutex_lock(&n->dev.mutex);
0348         r = vhost_dev_ioctl(&n->dev, ioctl, argp);
0349                 if (r == -ENOIOCTLCMD)
0350                         r = vhost_vring_ioctl(&n->dev, ioctl, argp);
0351         vhost_test_flush(n);
0352         mutex_unlock(&n->dev.mutex);
0353         return r;
0354     }
0355 }
0356 
0357 static const struct file_operations vhost_test_fops = {
0358     .owner          = THIS_MODULE,
0359     .release        = vhost_test_release,
0360     .unlocked_ioctl = vhost_test_ioctl,
0361     .compat_ioctl   = compat_ptr_ioctl,
0362     .open           = vhost_test_open,
0363     .llseek     = noop_llseek,
0364 };
0365 
0366 static struct miscdevice vhost_test_misc = {
0367     MISC_DYNAMIC_MINOR,
0368     "vhost-test",
0369     &vhost_test_fops,
0370 };
0371 module_misc_device(vhost_test_misc);
0372 
0373 MODULE_VERSION("0.0.1");
0374 MODULE_LICENSE("GPL v2");
0375 MODULE_AUTHOR("Michael S. Tsirkin");
0376 MODULE_DESCRIPTION("Host kernel side for virtio simulator");