0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/module.h>
0009 #include <linux/sched.h>
0010 #include <linux/fs.h>
0011 #include <linux/slab.h>
0012 #include <linux/device.h>
0013 #include <linux/cdev.h>
0014 #include <linux/poll.h>
0015 #include <linux/kfifo.h>
0016 #include <linux/uaccess.h>
0017 #include <linux/idr.h>
0018 #include <linux/most.h>
0019
0020 #define CHRDEV_REGION_SIZE 50
0021
0022 static struct cdev_component {
0023 dev_t devno;
0024 struct ida minor_id;
0025 unsigned int major;
0026 struct class *class;
0027 struct most_component cc;
0028 } comp;
0029
0030 struct comp_channel {
0031 wait_queue_head_t wq;
0032 spinlock_t unlink;
0033 struct cdev cdev;
0034 struct device *dev;
0035 struct mutex io_mutex;
0036 struct most_interface *iface;
0037 struct most_channel_config *cfg;
0038 unsigned int channel_id;
0039 dev_t devno;
0040 size_t mbo_offs;
0041 DECLARE_KFIFO_PTR(fifo, typeof(struct mbo *));
0042 int access_ref;
0043 struct list_head list;
0044 };
0045
0046 #define to_channel(d) container_of(d, struct comp_channel, cdev)
0047 static LIST_HEAD(channel_list);
0048 static DEFINE_SPINLOCK(ch_list_lock);
0049
0050 static inline bool ch_has_mbo(struct comp_channel *c)
0051 {
0052 return channel_has_mbo(c->iface, c->channel_id, &comp.cc) > 0;
0053 }
0054
0055 static inline struct mbo *ch_get_mbo(struct comp_channel *c, struct mbo **mbo)
0056 {
0057 if (!kfifo_peek(&c->fifo, mbo)) {
0058 *mbo = most_get_mbo(c->iface, c->channel_id, &comp.cc);
0059 if (*mbo)
0060 kfifo_in(&c->fifo, mbo, 1);
0061 }
0062 return *mbo;
0063 }
0064
0065 static struct comp_channel *get_channel(struct most_interface *iface, int id)
0066 {
0067 struct comp_channel *c, *tmp;
0068 unsigned long flags;
0069
0070 spin_lock_irqsave(&ch_list_lock, flags);
0071 list_for_each_entry_safe(c, tmp, &channel_list, list) {
0072 if ((c->iface == iface) && (c->channel_id == id)) {
0073 spin_unlock_irqrestore(&ch_list_lock, flags);
0074 return c;
0075 }
0076 }
0077 spin_unlock_irqrestore(&ch_list_lock, flags);
0078 return NULL;
0079 }
0080
0081 static void stop_channel(struct comp_channel *c)
0082 {
0083 struct mbo *mbo;
0084
0085 while (kfifo_out((struct kfifo *)&c->fifo, &mbo, 1))
0086 most_put_mbo(mbo);
0087 most_stop_channel(c->iface, c->channel_id, &comp.cc);
0088 }
0089
0090 static void destroy_cdev(struct comp_channel *c)
0091 {
0092 unsigned long flags;
0093
0094 device_destroy(comp.class, c->devno);
0095 cdev_del(&c->cdev);
0096 spin_lock_irqsave(&ch_list_lock, flags);
0097 list_del(&c->list);
0098 spin_unlock_irqrestore(&ch_list_lock, flags);
0099 }
0100
0101 static void destroy_channel(struct comp_channel *c)
0102 {
0103 ida_simple_remove(&comp.minor_id, MINOR(c->devno));
0104 kfifo_free(&c->fifo);
0105 kfree(c);
0106 }
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116 static int comp_open(struct inode *inode, struct file *filp)
0117 {
0118 struct comp_channel *c;
0119 int ret;
0120
0121 c = to_channel(inode->i_cdev);
0122 filp->private_data = c;
0123
0124 if (((c->cfg->direction == MOST_CH_RX) &&
0125 ((filp->f_flags & O_ACCMODE) != O_RDONLY)) ||
0126 ((c->cfg->direction == MOST_CH_TX) &&
0127 ((filp->f_flags & O_ACCMODE) != O_WRONLY))) {
0128 return -EACCES;
0129 }
0130
0131 mutex_lock(&c->io_mutex);
0132 if (!c->dev) {
0133 mutex_unlock(&c->io_mutex);
0134 return -ENODEV;
0135 }
0136
0137 if (c->access_ref) {
0138 mutex_unlock(&c->io_mutex);
0139 return -EBUSY;
0140 }
0141
0142 c->mbo_offs = 0;
0143 ret = most_start_channel(c->iface, c->channel_id, &comp.cc);
0144 if (!ret)
0145 c->access_ref = 1;
0146 mutex_unlock(&c->io_mutex);
0147 return ret;
0148 }
0149
0150
0151
0152
0153
0154
0155
0156
0157 static int comp_close(struct inode *inode, struct file *filp)
0158 {
0159 struct comp_channel *c = to_channel(inode->i_cdev);
0160
0161 mutex_lock(&c->io_mutex);
0162 spin_lock(&c->unlink);
0163 c->access_ref = 0;
0164 spin_unlock(&c->unlink);
0165 if (c->dev) {
0166 stop_channel(c);
0167 mutex_unlock(&c->io_mutex);
0168 } else {
0169 mutex_unlock(&c->io_mutex);
0170 destroy_channel(c);
0171 }
0172 return 0;
0173 }
0174
0175
0176
0177
0178
0179
0180
0181
0182 static ssize_t comp_write(struct file *filp, const char __user *buf,
0183 size_t count, loff_t *offset)
0184 {
0185 int ret;
0186 size_t to_copy, left;
0187 struct mbo *mbo = NULL;
0188 struct comp_channel *c = filp->private_data;
0189
0190 mutex_lock(&c->io_mutex);
0191 while (c->dev && !ch_get_mbo(c, &mbo)) {
0192 mutex_unlock(&c->io_mutex);
0193
0194 if ((filp->f_flags & O_NONBLOCK))
0195 return -EAGAIN;
0196 if (wait_event_interruptible(c->wq, ch_has_mbo(c) || !c->dev))
0197 return -ERESTARTSYS;
0198 mutex_lock(&c->io_mutex);
0199 }
0200
0201 if (unlikely(!c->dev)) {
0202 ret = -ENODEV;
0203 goto unlock;
0204 }
0205
0206 to_copy = min(count, c->cfg->buffer_size - c->mbo_offs);
0207 left = copy_from_user(mbo->virt_address + c->mbo_offs, buf, to_copy);
0208 if (left == to_copy) {
0209 ret = -EFAULT;
0210 goto unlock;
0211 }
0212
0213 c->mbo_offs += to_copy - left;
0214 if (c->mbo_offs >= c->cfg->buffer_size ||
0215 c->cfg->data_type == MOST_CH_CONTROL ||
0216 c->cfg->data_type == MOST_CH_ASYNC) {
0217 kfifo_skip(&c->fifo);
0218 mbo->buffer_length = c->mbo_offs;
0219 c->mbo_offs = 0;
0220 most_submit_mbo(mbo);
0221 }
0222
0223 ret = to_copy - left;
0224 unlock:
0225 mutex_unlock(&c->io_mutex);
0226 return ret;
0227 }
0228
0229
0230
0231
0232
0233
0234
0235
0236 static ssize_t
0237 comp_read(struct file *filp, char __user *buf, size_t count, loff_t *offset)
0238 {
0239 size_t to_copy, not_copied, copied;
0240 struct mbo *mbo = NULL;
0241 struct comp_channel *c = filp->private_data;
0242
0243 mutex_lock(&c->io_mutex);
0244 while (c->dev && !kfifo_peek(&c->fifo, &mbo)) {
0245 mutex_unlock(&c->io_mutex);
0246 if (filp->f_flags & O_NONBLOCK)
0247 return -EAGAIN;
0248 if (wait_event_interruptible(c->wq,
0249 (!kfifo_is_empty(&c->fifo) ||
0250 (!c->dev))))
0251 return -ERESTARTSYS;
0252 mutex_lock(&c->io_mutex);
0253 }
0254
0255
0256 if (unlikely(!c->dev)) {
0257 mutex_unlock(&c->io_mutex);
0258 return -ENODEV;
0259 }
0260
0261 to_copy = min_t(size_t,
0262 count,
0263 mbo->processed_length - c->mbo_offs);
0264
0265 not_copied = copy_to_user(buf,
0266 mbo->virt_address + c->mbo_offs,
0267 to_copy);
0268
0269 copied = to_copy - not_copied;
0270
0271 c->mbo_offs += copied;
0272 if (c->mbo_offs >= mbo->processed_length) {
0273 kfifo_skip(&c->fifo);
0274 most_put_mbo(mbo);
0275 c->mbo_offs = 0;
0276 }
0277 mutex_unlock(&c->io_mutex);
0278 return copied;
0279 }
0280
0281 static __poll_t comp_poll(struct file *filp, poll_table *wait)
0282 {
0283 struct comp_channel *c = filp->private_data;
0284 __poll_t mask = 0;
0285
0286 poll_wait(filp, &c->wq, wait);
0287
0288 mutex_lock(&c->io_mutex);
0289 if (c->cfg->direction == MOST_CH_RX) {
0290 if (!c->dev || !kfifo_is_empty(&c->fifo))
0291 mask |= EPOLLIN | EPOLLRDNORM;
0292 } else {
0293 if (!c->dev || !kfifo_is_empty(&c->fifo) || ch_has_mbo(c))
0294 mask |= EPOLLOUT | EPOLLWRNORM;
0295 }
0296 mutex_unlock(&c->io_mutex);
0297 return mask;
0298 }
0299
0300
0301
0302
0303 static const struct file_operations channel_fops = {
0304 .owner = THIS_MODULE,
0305 .read = comp_read,
0306 .write = comp_write,
0307 .open = comp_open,
0308 .release = comp_close,
0309 .poll = comp_poll,
0310 };
0311
0312
0313
0314
0315
0316
0317
0318
0319
0320 static int comp_disconnect_channel(struct most_interface *iface, int channel_id)
0321 {
0322 struct comp_channel *c;
0323
0324 c = get_channel(iface, channel_id);
0325 if (!c)
0326 return -EINVAL;
0327
0328 mutex_lock(&c->io_mutex);
0329 spin_lock(&c->unlink);
0330 c->dev = NULL;
0331 spin_unlock(&c->unlink);
0332 destroy_cdev(c);
0333 if (c->access_ref) {
0334 stop_channel(c);
0335 wake_up_interruptible(&c->wq);
0336 mutex_unlock(&c->io_mutex);
0337 } else {
0338 mutex_unlock(&c->io_mutex);
0339 destroy_channel(c);
0340 }
0341 return 0;
0342 }
0343
0344
0345
0346
0347
0348
0349
0350
0351 static int comp_rx_completion(struct mbo *mbo)
0352 {
0353 struct comp_channel *c;
0354
0355 if (!mbo)
0356 return -EINVAL;
0357
0358 c = get_channel(mbo->ifp, mbo->hdm_channel_id);
0359 if (!c)
0360 return -EINVAL;
0361
0362 spin_lock(&c->unlink);
0363 if (!c->access_ref || !c->dev) {
0364 spin_unlock(&c->unlink);
0365 return -ENODEV;
0366 }
0367 kfifo_in(&c->fifo, &mbo, 1);
0368 spin_unlock(&c->unlink);
0369 #ifdef DEBUG_MESG
0370 if (kfifo_is_full(&c->fifo))
0371 dev_warn(c->dev, "Fifo is full\n");
0372 #endif
0373 wake_up_interruptible(&c->wq);
0374 return 0;
0375 }
0376
0377
0378
0379
0380
0381
0382
0383
0384 static int comp_tx_completion(struct most_interface *iface, int channel_id)
0385 {
0386 struct comp_channel *c;
0387
0388 c = get_channel(iface, channel_id);
0389 if (!c)
0390 return -EINVAL;
0391
0392 if ((channel_id < 0) || (channel_id >= iface->num_channels)) {
0393 dev_warn(c->dev, "Channel ID out of range\n");
0394 return -EINVAL;
0395 }
0396
0397 wake_up_interruptible(&c->wq);
0398 return 0;
0399 }
0400
0401
0402
0403
0404
0405
0406
0407
0408
0409
0410
0411
0412 static int comp_probe(struct most_interface *iface, int channel_id,
0413 struct most_channel_config *cfg, char *name, char *args)
0414 {
0415 struct comp_channel *c;
0416 unsigned long cl_flags;
0417 int retval;
0418 int current_minor;
0419
0420 if (!cfg || !name)
0421 return -EINVAL;
0422
0423 c = get_channel(iface, channel_id);
0424 if (c)
0425 return -EEXIST;
0426
0427 current_minor = ida_simple_get(&comp.minor_id, 0, 0, GFP_KERNEL);
0428 if (current_minor < 0)
0429 return current_minor;
0430
0431 c = kzalloc(sizeof(*c), GFP_KERNEL);
0432 if (!c) {
0433 retval = -ENOMEM;
0434 goto err_remove_ida;
0435 }
0436
0437 c->devno = MKDEV(comp.major, current_minor);
0438 cdev_init(&c->cdev, &channel_fops);
0439 c->cdev.owner = THIS_MODULE;
0440 retval = cdev_add(&c->cdev, c->devno, 1);
0441 if (retval < 0)
0442 goto err_free_c;
0443 c->iface = iface;
0444 c->cfg = cfg;
0445 c->channel_id = channel_id;
0446 c->access_ref = 0;
0447 spin_lock_init(&c->unlink);
0448 INIT_KFIFO(c->fifo);
0449 retval = kfifo_alloc(&c->fifo, cfg->num_buffers, GFP_KERNEL);
0450 if (retval)
0451 goto err_del_cdev_and_free_channel;
0452 init_waitqueue_head(&c->wq);
0453 mutex_init(&c->io_mutex);
0454 spin_lock_irqsave(&ch_list_lock, cl_flags);
0455 list_add_tail(&c->list, &channel_list);
0456 spin_unlock_irqrestore(&ch_list_lock, cl_flags);
0457 c->dev = device_create(comp.class, NULL, c->devno, NULL, "%s", name);
0458
0459 if (IS_ERR(c->dev)) {
0460 retval = PTR_ERR(c->dev);
0461 goto err_free_kfifo_and_del_list;
0462 }
0463 kobject_uevent(&c->dev->kobj, KOBJ_ADD);
0464 return 0;
0465
0466 err_free_kfifo_and_del_list:
0467 kfifo_free(&c->fifo);
0468 list_del(&c->list);
0469 err_del_cdev_and_free_channel:
0470 cdev_del(&c->cdev);
0471 err_free_c:
0472 kfree(c);
0473 err_remove_ida:
0474 ida_simple_remove(&comp.minor_id, current_minor);
0475 return retval;
0476 }
0477
0478 static struct cdev_component comp = {
0479 .cc = {
0480 .mod = THIS_MODULE,
0481 .name = "cdev",
0482 .probe_channel = comp_probe,
0483 .disconnect_channel = comp_disconnect_channel,
0484 .rx_completion = comp_rx_completion,
0485 .tx_completion = comp_tx_completion,
0486 },
0487 };
0488
0489 static int __init most_cdev_init(void)
0490 {
0491 int err;
0492
0493 comp.class = class_create(THIS_MODULE, "most_cdev");
0494 if (IS_ERR(comp.class))
0495 return PTR_ERR(comp.class);
0496
0497 ida_init(&comp.minor_id);
0498
0499 err = alloc_chrdev_region(&comp.devno, 0, CHRDEV_REGION_SIZE, "cdev");
0500 if (err < 0)
0501 goto dest_ida;
0502 comp.major = MAJOR(comp.devno);
0503 err = most_register_component(&comp.cc);
0504 if (err)
0505 goto free_cdev;
0506 err = most_register_configfs_subsys(&comp.cc);
0507 if (err)
0508 goto deregister_comp;
0509 return 0;
0510
0511 deregister_comp:
0512 most_deregister_component(&comp.cc);
0513 free_cdev:
0514 unregister_chrdev_region(comp.devno, CHRDEV_REGION_SIZE);
0515 dest_ida:
0516 ida_destroy(&comp.minor_id);
0517 class_destroy(comp.class);
0518 return err;
0519 }
0520
0521 static void __exit most_cdev_exit(void)
0522 {
0523 struct comp_channel *c, *tmp;
0524
0525 most_deregister_configfs_subsys(&comp.cc);
0526 most_deregister_component(&comp.cc);
0527
0528 list_for_each_entry_safe(c, tmp, &channel_list, list) {
0529 destroy_cdev(c);
0530 destroy_channel(c);
0531 }
0532 unregister_chrdev_region(comp.devno, CHRDEV_REGION_SIZE);
0533 ida_destroy(&comp.minor_id);
0534 class_destroy(comp.class);
0535 }
0536
0537 module_init(most_cdev_init);
0538 module_exit(most_cdev_exit);
0539 MODULE_AUTHOR("Christian Gromm <christian.gromm@microchip.com>");
0540 MODULE_LICENSE("GPL");
0541 MODULE_DESCRIPTION("character device component for mostcore");