Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Copyright (C) 2022, STMicroelectronics
0004  * Copyright (c) 2016, Linaro Ltd.
0005  * Copyright (c) 2012, Michal Simek <monstr@monstr.eu>
0006  * Copyright (c) 2012, PetaLogix
0007  * Copyright (c) 2011, Texas Instruments, Inc.
0008  * Copyright (c) 2011, Google, Inc.
0009  *
0010  * Based on rpmsg performance statistics driver by Michal Simek, which in turn
0011  * was based on TI & Google OMX rpmsg driver.
0012  */
0013 
0014 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0015 
0016 #include <linux/cdev.h>
0017 #include <linux/device.h>
0018 #include <linux/fs.h>
0019 #include <linux/idr.h>
0020 #include <linux/kernel.h>
0021 #include <linux/module.h>
0022 #include <linux/poll.h>
0023 #include <linux/rpmsg.h>
0024 #include <linux/skbuff.h>
0025 #include <linux/slab.h>
0026 #include <linux/uaccess.h>
0027 #include <uapi/linux/rpmsg.h>
0028 
0029 #include "rpmsg_char.h"
0030 #include "rpmsg_internal.h"
0031 
0032 #define RPMSG_DEV_MAX   (MINORMASK + 1)
0033 
0034 static dev_t rpmsg_major;
0035 
0036 static DEFINE_IDA(rpmsg_ept_ida);
0037 static DEFINE_IDA(rpmsg_minor_ida);
0038 
0039 #define dev_to_eptdev(dev) container_of(dev, struct rpmsg_eptdev, dev)
0040 #define cdev_to_eptdev(i_cdev) container_of(i_cdev, struct rpmsg_eptdev, cdev)
0041 
0042 /**
0043  * struct rpmsg_eptdev - endpoint device context
0044  * @dev:    endpoint device
0045  * @cdev:   cdev for the endpoint device
0046  * @rpdev:  underlaying rpmsg device
0047  * @chinfo: info used to open the endpoint
0048  * @ept_lock:   synchronization of @ept modifications
0049  * @ept:    rpmsg endpoint reference, when open
0050  * @queue_lock: synchronization of @queue operations
0051  * @queue:  incoming message queue
0052  * @readq:  wait object for incoming queue
0053  * @default_ept: set to channel default endpoint if the default endpoint should be re-used
0054  *              on device open to prevent endpoint address update.
0055  */
0056 struct rpmsg_eptdev {
0057     struct device dev;
0058     struct cdev cdev;
0059 
0060     struct rpmsg_device *rpdev;
0061     struct rpmsg_channel_info chinfo;
0062 
0063     struct mutex ept_lock;
0064     struct rpmsg_endpoint *ept;
0065     struct rpmsg_endpoint *default_ept;
0066 
0067     spinlock_t queue_lock;
0068     struct sk_buff_head queue;
0069     wait_queue_head_t readq;
0070 
0071 };
0072 
0073 int rpmsg_chrdev_eptdev_destroy(struct device *dev, void *data)
0074 {
0075     struct rpmsg_eptdev *eptdev = dev_to_eptdev(dev);
0076 
0077     mutex_lock(&eptdev->ept_lock);
0078     if (eptdev->ept) {
0079         rpmsg_destroy_ept(eptdev->ept);
0080         eptdev->ept = NULL;
0081     }
0082     mutex_unlock(&eptdev->ept_lock);
0083 
0084     /* wake up any blocked readers */
0085     wake_up_interruptible(&eptdev->readq);
0086 
0087     cdev_device_del(&eptdev->cdev, &eptdev->dev);
0088     put_device(&eptdev->dev);
0089 
0090     return 0;
0091 }
0092 EXPORT_SYMBOL(rpmsg_chrdev_eptdev_destroy);
0093 
0094 static int rpmsg_ept_cb(struct rpmsg_device *rpdev, void *buf, int len,
0095             void *priv, u32 addr)
0096 {
0097     struct rpmsg_eptdev *eptdev = priv;
0098     struct sk_buff *skb;
0099 
0100     skb = alloc_skb(len, GFP_ATOMIC);
0101     if (!skb)
0102         return -ENOMEM;
0103 
0104     skb_put_data(skb, buf, len);
0105 
0106     spin_lock(&eptdev->queue_lock);
0107     skb_queue_tail(&eptdev->queue, skb);
0108     spin_unlock(&eptdev->queue_lock);
0109 
0110     /* wake up any blocking processes, waiting for new data */
0111     wake_up_interruptible(&eptdev->readq);
0112 
0113     return 0;
0114 }
0115 
0116 static int rpmsg_eptdev_open(struct inode *inode, struct file *filp)
0117 {
0118     struct rpmsg_eptdev *eptdev = cdev_to_eptdev(inode->i_cdev);
0119     struct rpmsg_endpoint *ept;
0120     struct rpmsg_device *rpdev = eptdev->rpdev;
0121     struct device *dev = &eptdev->dev;
0122 
0123     mutex_lock(&eptdev->ept_lock);
0124     if (eptdev->ept) {
0125         mutex_unlock(&eptdev->ept_lock);
0126         return -EBUSY;
0127     }
0128 
0129     get_device(dev);
0130 
0131     /*
0132      * If the default_ept is set, the rpmsg device default endpoint is used.
0133      * Else a new endpoint is created on open that will be destroyed on release.
0134      */
0135     if (eptdev->default_ept)
0136         ept = eptdev->default_ept;
0137     else
0138         ept = rpmsg_create_ept(rpdev, rpmsg_ept_cb, eptdev, eptdev->chinfo);
0139 
0140     if (!ept) {
0141         dev_err(dev, "failed to open %s\n", eptdev->chinfo.name);
0142         put_device(dev);
0143         mutex_unlock(&eptdev->ept_lock);
0144         return -EINVAL;
0145     }
0146 
0147     eptdev->ept = ept;
0148     filp->private_data = eptdev;
0149     mutex_unlock(&eptdev->ept_lock);
0150 
0151     return 0;
0152 }
0153 
0154 static int rpmsg_eptdev_release(struct inode *inode, struct file *filp)
0155 {
0156     struct rpmsg_eptdev *eptdev = cdev_to_eptdev(inode->i_cdev);
0157     struct device *dev = &eptdev->dev;
0158 
0159     /* Close the endpoint, if it's not already destroyed by the parent */
0160     mutex_lock(&eptdev->ept_lock);
0161     if (eptdev->ept) {
0162         if (!eptdev->default_ept)
0163             rpmsg_destroy_ept(eptdev->ept);
0164         eptdev->ept = NULL;
0165     }
0166     mutex_unlock(&eptdev->ept_lock);
0167 
0168     /* Discard all SKBs */
0169     skb_queue_purge(&eptdev->queue);
0170 
0171     put_device(dev);
0172 
0173     return 0;
0174 }
0175 
0176 static ssize_t rpmsg_eptdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
0177 {
0178     struct file *filp = iocb->ki_filp;
0179     struct rpmsg_eptdev *eptdev = filp->private_data;
0180     unsigned long flags;
0181     struct sk_buff *skb;
0182     int use;
0183 
0184     if (!eptdev->ept)
0185         return -EPIPE;
0186 
0187     spin_lock_irqsave(&eptdev->queue_lock, flags);
0188 
0189     /* Wait for data in the queue */
0190     if (skb_queue_empty(&eptdev->queue)) {
0191         spin_unlock_irqrestore(&eptdev->queue_lock, flags);
0192 
0193         if (filp->f_flags & O_NONBLOCK)
0194             return -EAGAIN;
0195 
0196         /* Wait until we get data or the endpoint goes away */
0197         if (wait_event_interruptible(eptdev->readq,
0198                          !skb_queue_empty(&eptdev->queue) ||
0199                          !eptdev->ept))
0200             return -ERESTARTSYS;
0201 
0202         /* We lost the endpoint while waiting */
0203         if (!eptdev->ept)
0204             return -EPIPE;
0205 
0206         spin_lock_irqsave(&eptdev->queue_lock, flags);
0207     }
0208 
0209     skb = skb_dequeue(&eptdev->queue);
0210     spin_unlock_irqrestore(&eptdev->queue_lock, flags);
0211     if (!skb)
0212         return -EFAULT;
0213 
0214     use = min_t(size_t, iov_iter_count(to), skb->len);
0215     if (copy_to_iter(skb->data, use, to) != use)
0216         use = -EFAULT;
0217 
0218     kfree_skb(skb);
0219 
0220     return use;
0221 }
0222 
0223 static ssize_t rpmsg_eptdev_write_iter(struct kiocb *iocb,
0224                        struct iov_iter *from)
0225 {
0226     struct file *filp = iocb->ki_filp;
0227     struct rpmsg_eptdev *eptdev = filp->private_data;
0228     size_t len = iov_iter_count(from);
0229     void *kbuf;
0230     int ret;
0231 
0232     kbuf = kzalloc(len, GFP_KERNEL);
0233     if (!kbuf)
0234         return -ENOMEM;
0235 
0236     if (!copy_from_iter_full(kbuf, len, from)) {
0237         ret = -EFAULT;
0238         goto free_kbuf;
0239     }
0240 
0241     if (mutex_lock_interruptible(&eptdev->ept_lock)) {
0242         ret = -ERESTARTSYS;
0243         goto free_kbuf;
0244     }
0245 
0246     if (!eptdev->ept) {
0247         ret = -EPIPE;
0248         goto unlock_eptdev;
0249     }
0250 
0251     if (filp->f_flags & O_NONBLOCK) {
0252         ret = rpmsg_trysendto(eptdev->ept, kbuf, len, eptdev->chinfo.dst);
0253         if (ret == -ENOMEM)
0254             ret = -EAGAIN;
0255     } else {
0256         ret = rpmsg_sendto(eptdev->ept, kbuf, len, eptdev->chinfo.dst);
0257     }
0258 
0259 unlock_eptdev:
0260     mutex_unlock(&eptdev->ept_lock);
0261 
0262 free_kbuf:
0263     kfree(kbuf);
0264     return ret < 0 ? ret : len;
0265 }
0266 
0267 static __poll_t rpmsg_eptdev_poll(struct file *filp, poll_table *wait)
0268 {
0269     struct rpmsg_eptdev *eptdev = filp->private_data;
0270     __poll_t mask = 0;
0271 
0272     if (!eptdev->ept)
0273         return EPOLLERR;
0274 
0275     poll_wait(filp, &eptdev->readq, wait);
0276 
0277     if (!skb_queue_empty(&eptdev->queue))
0278         mask |= EPOLLIN | EPOLLRDNORM;
0279 
0280     mask |= rpmsg_poll(eptdev->ept, filp, wait);
0281 
0282     return mask;
0283 }
0284 
0285 static long rpmsg_eptdev_ioctl(struct file *fp, unsigned int cmd,
0286                    unsigned long arg)
0287 {
0288     struct rpmsg_eptdev *eptdev = fp->private_data;
0289 
0290     if (cmd != RPMSG_DESTROY_EPT_IOCTL)
0291         return -EINVAL;
0292 
0293     /* Don't allow to destroy a default endpoint. */
0294     if (eptdev->default_ept)
0295         return -EINVAL;
0296 
0297     return rpmsg_chrdev_eptdev_destroy(&eptdev->dev, NULL);
0298 }
0299 
0300 static const struct file_operations rpmsg_eptdev_fops = {
0301     .owner = THIS_MODULE,
0302     .open = rpmsg_eptdev_open,
0303     .release = rpmsg_eptdev_release,
0304     .read_iter = rpmsg_eptdev_read_iter,
0305     .write_iter = rpmsg_eptdev_write_iter,
0306     .poll = rpmsg_eptdev_poll,
0307     .unlocked_ioctl = rpmsg_eptdev_ioctl,
0308     .compat_ioctl = compat_ptr_ioctl,
0309 };
0310 
0311 static ssize_t name_show(struct device *dev, struct device_attribute *attr,
0312              char *buf)
0313 {
0314     struct rpmsg_eptdev *eptdev = dev_get_drvdata(dev);
0315 
0316     return sprintf(buf, "%s\n", eptdev->chinfo.name);
0317 }
0318 static DEVICE_ATTR_RO(name);
0319 
0320 static ssize_t src_show(struct device *dev, struct device_attribute *attr,
0321              char *buf)
0322 {
0323     struct rpmsg_eptdev *eptdev = dev_get_drvdata(dev);
0324 
0325     return sprintf(buf, "%d\n", eptdev->chinfo.src);
0326 }
0327 static DEVICE_ATTR_RO(src);
0328 
0329 static ssize_t dst_show(struct device *dev, struct device_attribute *attr,
0330              char *buf)
0331 {
0332     struct rpmsg_eptdev *eptdev = dev_get_drvdata(dev);
0333 
0334     return sprintf(buf, "%d\n", eptdev->chinfo.dst);
0335 }
0336 static DEVICE_ATTR_RO(dst);
0337 
0338 static struct attribute *rpmsg_eptdev_attrs[] = {
0339     &dev_attr_name.attr,
0340     &dev_attr_src.attr,
0341     &dev_attr_dst.attr,
0342     NULL
0343 };
0344 ATTRIBUTE_GROUPS(rpmsg_eptdev);
0345 
0346 static void rpmsg_eptdev_release_device(struct device *dev)
0347 {
0348     struct rpmsg_eptdev *eptdev = dev_to_eptdev(dev);
0349 
0350     ida_simple_remove(&rpmsg_ept_ida, dev->id);
0351     ida_simple_remove(&rpmsg_minor_ida, MINOR(eptdev->dev.devt));
0352     kfree(eptdev);
0353 }
0354 
0355 static struct rpmsg_eptdev *rpmsg_chrdev_eptdev_alloc(struct rpmsg_device *rpdev,
0356                               struct device *parent)
0357 {
0358     struct rpmsg_eptdev *eptdev;
0359     struct device *dev;
0360 
0361     eptdev = kzalloc(sizeof(*eptdev), GFP_KERNEL);
0362     if (!eptdev)
0363         return ERR_PTR(-ENOMEM);
0364 
0365     dev = &eptdev->dev;
0366     eptdev->rpdev = rpdev;
0367 
0368     mutex_init(&eptdev->ept_lock);
0369     spin_lock_init(&eptdev->queue_lock);
0370     skb_queue_head_init(&eptdev->queue);
0371     init_waitqueue_head(&eptdev->readq);
0372 
0373     device_initialize(dev);
0374     dev->class = rpmsg_class;
0375     dev->parent = parent;
0376     dev->groups = rpmsg_eptdev_groups;
0377     dev_set_drvdata(dev, eptdev);
0378 
0379     cdev_init(&eptdev->cdev, &rpmsg_eptdev_fops);
0380     eptdev->cdev.owner = THIS_MODULE;
0381 
0382     return eptdev;
0383 }
0384 
0385 static int rpmsg_chrdev_eptdev_add(struct rpmsg_eptdev *eptdev, struct rpmsg_channel_info chinfo)
0386 {
0387     struct device *dev = &eptdev->dev;
0388     int ret;
0389 
0390     eptdev->chinfo = chinfo;
0391 
0392     ret = ida_simple_get(&rpmsg_minor_ida, 0, RPMSG_DEV_MAX, GFP_KERNEL);
0393     if (ret < 0)
0394         goto free_eptdev;
0395     dev->devt = MKDEV(MAJOR(rpmsg_major), ret);
0396 
0397     ret = ida_simple_get(&rpmsg_ept_ida, 0, 0, GFP_KERNEL);
0398     if (ret < 0)
0399         goto free_minor_ida;
0400     dev->id = ret;
0401     dev_set_name(dev, "rpmsg%d", ret);
0402 
0403     ret = cdev_device_add(&eptdev->cdev, &eptdev->dev);
0404     if (ret)
0405         goto free_ept_ida;
0406 
0407     /* We can now rely on the release function for cleanup */
0408     dev->release = rpmsg_eptdev_release_device;
0409 
0410     return ret;
0411 
0412 free_ept_ida:
0413     ida_simple_remove(&rpmsg_ept_ida, dev->id);
0414 free_minor_ida:
0415     ida_simple_remove(&rpmsg_minor_ida, MINOR(dev->devt));
0416 free_eptdev:
0417     put_device(dev);
0418     kfree(eptdev);
0419 
0420     return ret;
0421 }
0422 
0423 int rpmsg_chrdev_eptdev_create(struct rpmsg_device *rpdev, struct device *parent,
0424                    struct rpmsg_channel_info chinfo)
0425 {
0426     struct rpmsg_eptdev *eptdev;
0427     int ret;
0428 
0429     eptdev = rpmsg_chrdev_eptdev_alloc(rpdev, parent);
0430     if (IS_ERR(eptdev))
0431         return PTR_ERR(eptdev);
0432 
0433     ret = rpmsg_chrdev_eptdev_add(eptdev, chinfo);
0434 
0435     return ret;
0436 }
0437 EXPORT_SYMBOL(rpmsg_chrdev_eptdev_create);
0438 
0439 static int rpmsg_chrdev_probe(struct rpmsg_device *rpdev)
0440 {
0441     struct rpmsg_channel_info chinfo;
0442     struct rpmsg_eptdev *eptdev;
0443     struct device *dev = &rpdev->dev;
0444 
0445     memcpy(chinfo.name, rpdev->id.name, RPMSG_NAME_SIZE);
0446     chinfo.src = rpdev->src;
0447     chinfo.dst = rpdev->dst;
0448 
0449     eptdev = rpmsg_chrdev_eptdev_alloc(rpdev, dev);
0450     if (IS_ERR(eptdev))
0451         return PTR_ERR(eptdev);
0452 
0453     /* Set the default_ept to the rpmsg device endpoint */
0454     eptdev->default_ept = rpdev->ept;
0455 
0456     /*
0457      * The rpmsg_ept_cb uses *priv parameter to get its rpmsg_eptdev context.
0458      * Storedit in default_ept *priv field.
0459      */
0460     eptdev->default_ept->priv = eptdev;
0461 
0462     return rpmsg_chrdev_eptdev_add(eptdev, chinfo);
0463 }
0464 
0465 static void rpmsg_chrdev_remove(struct rpmsg_device *rpdev)
0466 {
0467     int ret;
0468 
0469     ret = device_for_each_child(&rpdev->dev, NULL, rpmsg_chrdev_eptdev_destroy);
0470     if (ret)
0471         dev_warn(&rpdev->dev, "failed to destroy endpoints: %d\n", ret);
0472 }
0473 
0474 static struct rpmsg_device_id rpmsg_chrdev_id_table[] = {
0475     { .name = "rpmsg-raw" },
0476     { },
0477 };
0478 
0479 static struct rpmsg_driver rpmsg_chrdev_driver = {
0480     .probe = rpmsg_chrdev_probe,
0481     .remove = rpmsg_chrdev_remove,
0482     .callback = rpmsg_ept_cb,
0483     .id_table = rpmsg_chrdev_id_table,
0484     .drv.name = "rpmsg_chrdev",
0485 };
0486 
0487 static int rpmsg_chrdev_init(void)
0488 {
0489     int ret;
0490 
0491     ret = alloc_chrdev_region(&rpmsg_major, 0, RPMSG_DEV_MAX, "rpmsg_char");
0492     if (ret < 0) {
0493         pr_err("failed to allocate char dev region\n");
0494         return ret;
0495     }
0496 
0497     ret = register_rpmsg_driver(&rpmsg_chrdev_driver);
0498     if (ret < 0) {
0499         pr_err("rpmsg: failed to register rpmsg raw driver\n");
0500         goto free_region;
0501     }
0502 
0503     return 0;
0504 
0505 free_region:
0506     unregister_chrdev_region(rpmsg_major, RPMSG_DEV_MAX);
0507 
0508     return ret;
0509 }
0510 postcore_initcall(rpmsg_chrdev_init);
0511 
0512 static void rpmsg_chrdev_exit(void)
0513 {
0514     unregister_rpmsg_driver(&rpmsg_chrdev_driver);
0515     unregister_chrdev_region(rpmsg_major, RPMSG_DEV_MAX);
0516 }
0517 module_exit(rpmsg_chrdev_exit);
0518 
0519 MODULE_ALIAS("rpmsg:rpmsg_chrdev");
0520 MODULE_LICENSE("GPL v2");