0001
0002
0003
0004
0005
0006 #include <linux/cdev.h>
0007 #include <linux/counter.h>
0008 #include <linux/err.h>
0009 #include <linux/errno.h>
0010 #include <linux/export.h>
0011 #include <linux/fs.h>
0012 #include <linux/kfifo.h>
0013 #include <linux/list.h>
0014 #include <linux/mutex.h>
0015 #include <linux/nospec.h>
0016 #include <linux/poll.h>
0017 #include <linux/slab.h>
0018 #include <linux/spinlock.h>
0019 #include <linux/timekeeping.h>
0020 #include <linux/types.h>
0021 #include <linux/uaccess.h>
0022 #include <linux/wait.h>
0023
0024 #include "counter-chrdev.h"
0025
0026 struct counter_comp_node {
0027 struct list_head l;
0028 struct counter_component component;
0029 struct counter_comp comp;
0030 void *parent;
0031 };
0032
0033 #define counter_comp_read_is_equal(a, b) \
0034 (a.action_read == b.action_read || \
0035 a.device_u8_read == b.device_u8_read || \
0036 a.count_u8_read == b.count_u8_read || \
0037 a.signal_u8_read == b.signal_u8_read || \
0038 a.device_u32_read == b.device_u32_read || \
0039 a.count_u32_read == b.count_u32_read || \
0040 a.signal_u32_read == b.signal_u32_read || \
0041 a.device_u64_read == b.device_u64_read || \
0042 a.count_u64_read == b.count_u64_read || \
0043 a.signal_u64_read == b.signal_u64_read)
0044
0045 #define counter_comp_read_is_set(comp) \
0046 (comp.action_read || \
0047 comp.device_u8_read || \
0048 comp.count_u8_read || \
0049 comp.signal_u8_read || \
0050 comp.device_u32_read || \
0051 comp.count_u32_read || \
0052 comp.signal_u32_read || \
0053 comp.device_u64_read || \
0054 comp.count_u64_read || \
0055 comp.signal_u64_read)
0056
0057 static ssize_t counter_chrdev_read(struct file *filp, char __user *buf,
0058 size_t len, loff_t *f_ps)
0059 {
0060 struct counter_device *const counter = filp->private_data;
0061 int err;
0062 unsigned int copied;
0063
0064 if (!counter->ops)
0065 return -ENODEV;
0066
0067 if (len < sizeof(struct counter_event))
0068 return -EINVAL;
0069
0070 do {
0071 if (kfifo_is_empty(&counter->events)) {
0072 if (filp->f_flags & O_NONBLOCK)
0073 return -EAGAIN;
0074
0075 err = wait_event_interruptible(counter->events_wait,
0076 !kfifo_is_empty(&counter->events) ||
0077 !counter->ops);
0078 if (err < 0)
0079 return err;
0080 if (!counter->ops)
0081 return -ENODEV;
0082 }
0083
0084 if (mutex_lock_interruptible(&counter->events_out_lock))
0085 return -ERESTARTSYS;
0086 err = kfifo_to_user(&counter->events, buf, len, &copied);
0087 mutex_unlock(&counter->events_out_lock);
0088 if (err < 0)
0089 return err;
0090 } while (!copied);
0091
0092 return copied;
0093 }
0094
0095 static __poll_t counter_chrdev_poll(struct file *filp,
0096 struct poll_table_struct *pollt)
0097 {
0098 struct counter_device *const counter = filp->private_data;
0099 __poll_t events = 0;
0100
0101 if (!counter->ops)
0102 return events;
0103
0104 poll_wait(filp, &counter->events_wait, pollt);
0105
0106 if (!kfifo_is_empty(&counter->events))
0107 events = EPOLLIN | EPOLLRDNORM;
0108
0109 return events;
0110 }
0111
0112 static void counter_events_list_free(struct list_head *const events_list)
0113 {
0114 struct counter_event_node *p, *n;
0115 struct counter_comp_node *q, *o;
0116
0117 list_for_each_entry_safe(p, n, events_list, l) {
0118
0119 list_for_each_entry_safe(q, o, &p->comp_list, l) {
0120 list_del(&q->l);
0121 kfree(q);
0122 }
0123
0124
0125 list_del(&p->l);
0126 kfree(p);
0127 }
0128 }
0129
0130 static int counter_set_event_node(struct counter_device *const counter,
0131 struct counter_watch *const watch,
0132 const struct counter_comp_node *const cfg)
0133 {
0134 struct counter_event_node *event_node;
0135 int err = 0;
0136 struct counter_comp_node *comp_node;
0137
0138
0139 list_for_each_entry(event_node, &counter->next_events_list, l)
0140 if (event_node->event == watch->event &&
0141 event_node->channel == watch->channel)
0142 break;
0143
0144
0145 if (&event_node->l == &counter->next_events_list) {
0146
0147 event_node = kmalloc(sizeof(*event_node), GFP_KERNEL);
0148 if (!event_node)
0149 return -ENOMEM;
0150
0151
0152 event_node->event = watch->event;
0153 event_node->channel = watch->channel;
0154 INIT_LIST_HEAD(&event_node->comp_list);
0155 list_add(&event_node->l, &counter->next_events_list);
0156 }
0157
0158
0159 list_for_each_entry(comp_node, &event_node->comp_list, l)
0160 if (comp_node->parent == cfg->parent &&
0161 counter_comp_read_is_equal(comp_node->comp, cfg->comp)) {
0162 err = -EINVAL;
0163 goto exit_free_event_node;
0164 }
0165
0166
0167 comp_node = kmalloc(sizeof(*comp_node), GFP_KERNEL);
0168 if (!comp_node) {
0169 err = -ENOMEM;
0170 goto exit_free_event_node;
0171 }
0172 *comp_node = *cfg;
0173
0174
0175 list_add_tail(&comp_node->l, &event_node->comp_list);
0176
0177 exit_free_event_node:
0178
0179 if (list_empty(&event_node->comp_list)) {
0180 list_del(&event_node->l);
0181 kfree(event_node);
0182 }
0183
0184 return err;
0185 }
0186
0187 static int counter_enable_events(struct counter_device *const counter)
0188 {
0189 unsigned long flags;
0190 int err = 0;
0191
0192 mutex_lock(&counter->n_events_list_lock);
0193 spin_lock_irqsave(&counter->events_list_lock, flags);
0194
0195 counter_events_list_free(&counter->events_list);
0196 list_replace_init(&counter->next_events_list,
0197 &counter->events_list);
0198
0199 if (counter->ops->events_configure)
0200 err = counter->ops->events_configure(counter);
0201
0202 spin_unlock_irqrestore(&counter->events_list_lock, flags);
0203 mutex_unlock(&counter->n_events_list_lock);
0204
0205 return err;
0206 }
0207
0208 static int counter_disable_events(struct counter_device *const counter)
0209 {
0210 unsigned long flags;
0211 int err = 0;
0212
0213 spin_lock_irqsave(&counter->events_list_lock, flags);
0214
0215 counter_events_list_free(&counter->events_list);
0216
0217 if (counter->ops->events_configure)
0218 err = counter->ops->events_configure(counter);
0219
0220 spin_unlock_irqrestore(&counter->events_list_lock, flags);
0221
0222 mutex_lock(&counter->n_events_list_lock);
0223
0224 counter_events_list_free(&counter->next_events_list);
0225
0226 mutex_unlock(&counter->n_events_list_lock);
0227
0228 return err;
0229 }
0230
0231 static int counter_add_watch(struct counter_device *const counter,
0232 const unsigned long arg)
0233 {
0234 void __user *const uwatch = (void __user *)arg;
0235 struct counter_watch watch;
0236 struct counter_comp_node comp_node = {};
0237 size_t parent, id;
0238 struct counter_comp *ext;
0239 size_t num_ext;
0240 int err = 0;
0241
0242 if (copy_from_user(&watch, uwatch, sizeof(watch)))
0243 return -EFAULT;
0244
0245 if (watch.component.type == COUNTER_COMPONENT_NONE)
0246 goto no_component;
0247
0248 parent = watch.component.parent;
0249
0250
0251 switch (watch.component.scope) {
0252 case COUNTER_SCOPE_DEVICE:
0253 ext = counter->ext;
0254 num_ext = counter->num_ext;
0255 break;
0256 case COUNTER_SCOPE_SIGNAL:
0257 if (parent >= counter->num_signals)
0258 return -EINVAL;
0259 parent = array_index_nospec(parent, counter->num_signals);
0260
0261 comp_node.parent = counter->signals + parent;
0262
0263 ext = counter->signals[parent].ext;
0264 num_ext = counter->signals[parent].num_ext;
0265 break;
0266 case COUNTER_SCOPE_COUNT:
0267 if (parent >= counter->num_counts)
0268 return -EINVAL;
0269 parent = array_index_nospec(parent, counter->num_counts);
0270
0271 comp_node.parent = counter->counts + parent;
0272
0273 ext = counter->counts[parent].ext;
0274 num_ext = counter->counts[parent].num_ext;
0275 break;
0276 default:
0277 return -EINVAL;
0278 }
0279
0280 id = watch.component.id;
0281
0282
0283 switch (watch.component.type) {
0284 case COUNTER_COMPONENT_SIGNAL:
0285 if (watch.component.scope != COUNTER_SCOPE_SIGNAL)
0286 return -EINVAL;
0287
0288 comp_node.comp.type = COUNTER_COMP_SIGNAL_LEVEL;
0289 comp_node.comp.signal_u32_read = counter->ops->signal_read;
0290 break;
0291 case COUNTER_COMPONENT_COUNT:
0292 if (watch.component.scope != COUNTER_SCOPE_COUNT)
0293 return -EINVAL;
0294
0295 comp_node.comp.type = COUNTER_COMP_U64;
0296 comp_node.comp.count_u64_read = counter->ops->count_read;
0297 break;
0298 case COUNTER_COMPONENT_FUNCTION:
0299 if (watch.component.scope != COUNTER_SCOPE_COUNT)
0300 return -EINVAL;
0301
0302 comp_node.comp.type = COUNTER_COMP_FUNCTION;
0303 comp_node.comp.count_u32_read = counter->ops->function_read;
0304 break;
0305 case COUNTER_COMPONENT_SYNAPSE_ACTION:
0306 if (watch.component.scope != COUNTER_SCOPE_COUNT)
0307 return -EINVAL;
0308 if (id >= counter->counts[parent].num_synapses)
0309 return -EINVAL;
0310 id = array_index_nospec(id, counter->counts[parent].num_synapses);
0311
0312 comp_node.comp.type = COUNTER_COMP_SYNAPSE_ACTION;
0313 comp_node.comp.action_read = counter->ops->action_read;
0314 comp_node.comp.priv = counter->counts[parent].synapses + id;
0315 break;
0316 case COUNTER_COMPONENT_EXTENSION:
0317 if (id >= num_ext)
0318 return -EINVAL;
0319 id = array_index_nospec(id, num_ext);
0320
0321 comp_node.comp = ext[id];
0322 break;
0323 default:
0324 return -EINVAL;
0325 }
0326 if (!counter_comp_read_is_set(comp_node.comp))
0327 return -EOPNOTSUPP;
0328
0329 no_component:
0330 mutex_lock(&counter->n_events_list_lock);
0331
0332 if (counter->ops->watch_validate) {
0333 err = counter->ops->watch_validate(counter, &watch);
0334 if (err < 0)
0335 goto err_exit;
0336 }
0337
0338 comp_node.component = watch.component;
0339
0340 err = counter_set_event_node(counter, &watch, &comp_node);
0341
0342 err_exit:
0343 mutex_unlock(&counter->n_events_list_lock);
0344
0345 return err;
0346 }
0347
0348 static long counter_chrdev_ioctl(struct file *filp, unsigned int cmd,
0349 unsigned long arg)
0350 {
0351 struct counter_device *const counter = filp->private_data;
0352 int ret = -ENODEV;
0353
0354 mutex_lock(&counter->ops_exist_lock);
0355
0356 if (!counter->ops)
0357 goto out_unlock;
0358
0359 switch (cmd) {
0360 case COUNTER_ADD_WATCH_IOCTL:
0361 ret = counter_add_watch(counter, arg);
0362 break;
0363 case COUNTER_ENABLE_EVENTS_IOCTL:
0364 ret = counter_enable_events(counter);
0365 break;
0366 case COUNTER_DISABLE_EVENTS_IOCTL:
0367 ret = counter_disable_events(counter);
0368 break;
0369 default:
0370 ret = -ENOIOCTLCMD;
0371 break;
0372 }
0373
0374 out_unlock:
0375 mutex_unlock(&counter->ops_exist_lock);
0376
0377 return ret;
0378 }
0379
0380 static int counter_chrdev_open(struct inode *inode, struct file *filp)
0381 {
0382 struct counter_device *const counter = container_of(inode->i_cdev,
0383 typeof(*counter),
0384 chrdev);
0385
0386 get_device(&counter->dev);
0387 filp->private_data = counter;
0388
0389 return nonseekable_open(inode, filp);
0390 }
0391
0392 static int counter_chrdev_release(struct inode *inode, struct file *filp)
0393 {
0394 struct counter_device *const counter = filp->private_data;
0395 int ret = 0;
0396
0397 mutex_lock(&counter->ops_exist_lock);
0398
0399 if (!counter->ops) {
0400
0401 counter_events_list_free(&counter->events_list);
0402 counter_events_list_free(&counter->next_events_list);
0403 ret = -ENODEV;
0404 goto out_unlock;
0405 }
0406
0407 ret = counter_disable_events(counter);
0408 if (ret < 0) {
0409 mutex_unlock(&counter->ops_exist_lock);
0410 return ret;
0411 }
0412
0413 out_unlock:
0414 mutex_unlock(&counter->ops_exist_lock);
0415
0416 put_device(&counter->dev);
0417
0418 return ret;
0419 }
0420
0421 static const struct file_operations counter_fops = {
0422 .owner = THIS_MODULE,
0423 .llseek = no_llseek,
0424 .read = counter_chrdev_read,
0425 .poll = counter_chrdev_poll,
0426 .unlocked_ioctl = counter_chrdev_ioctl,
0427 .open = counter_chrdev_open,
0428 .release = counter_chrdev_release,
0429 };
0430
0431 int counter_chrdev_add(struct counter_device *const counter)
0432 {
0433
0434 INIT_LIST_HEAD(&counter->events_list);
0435 INIT_LIST_HEAD(&counter->next_events_list);
0436 spin_lock_init(&counter->events_list_lock);
0437 mutex_init(&counter->n_events_list_lock);
0438 init_waitqueue_head(&counter->events_wait);
0439 spin_lock_init(&counter->events_in_lock);
0440 mutex_init(&counter->events_out_lock);
0441
0442
0443 cdev_init(&counter->chrdev, &counter_fops);
0444
0445
0446 return kfifo_alloc(&counter->events, 64, GFP_KERNEL);
0447 }
0448
0449 void counter_chrdev_remove(struct counter_device *const counter)
0450 {
0451 kfifo_free(&counter->events);
0452 }
0453
0454 static int counter_get_data(struct counter_device *const counter,
0455 const struct counter_comp_node *const comp_node,
0456 u64 *const value)
0457 {
0458 const struct counter_comp *const comp = &comp_node->comp;
0459 void *const parent = comp_node->parent;
0460 u8 value_u8 = 0;
0461 u32 value_u32 = 0;
0462 int ret;
0463
0464 if (comp_node->component.type == COUNTER_COMPONENT_NONE)
0465 return 0;
0466
0467 switch (comp->type) {
0468 case COUNTER_COMP_U8:
0469 case COUNTER_COMP_BOOL:
0470 switch (comp_node->component.scope) {
0471 case COUNTER_SCOPE_DEVICE:
0472 ret = comp->device_u8_read(counter, &value_u8);
0473 break;
0474 case COUNTER_SCOPE_SIGNAL:
0475 ret = comp->signal_u8_read(counter, parent, &value_u8);
0476 break;
0477 case COUNTER_SCOPE_COUNT:
0478 ret = comp->count_u8_read(counter, parent, &value_u8);
0479 break;
0480 default:
0481 return -EINVAL;
0482 }
0483 *value = value_u8;
0484 return ret;
0485 case COUNTER_COMP_SIGNAL_LEVEL:
0486 case COUNTER_COMP_FUNCTION:
0487 case COUNTER_COMP_ENUM:
0488 case COUNTER_COMP_COUNT_DIRECTION:
0489 case COUNTER_COMP_COUNT_MODE:
0490 switch (comp_node->component.scope) {
0491 case COUNTER_SCOPE_DEVICE:
0492 ret = comp->device_u32_read(counter, &value_u32);
0493 break;
0494 case COUNTER_SCOPE_SIGNAL:
0495 ret = comp->signal_u32_read(counter, parent,
0496 &value_u32);
0497 break;
0498 case COUNTER_SCOPE_COUNT:
0499 ret = comp->count_u32_read(counter, parent, &value_u32);
0500 break;
0501 default:
0502 return -EINVAL;
0503 }
0504 *value = value_u32;
0505 return ret;
0506 case COUNTER_COMP_U64:
0507 switch (comp_node->component.scope) {
0508 case COUNTER_SCOPE_DEVICE:
0509 return comp->device_u64_read(counter, value);
0510 case COUNTER_SCOPE_SIGNAL:
0511 return comp->signal_u64_read(counter, parent, value);
0512 case COUNTER_SCOPE_COUNT:
0513 return comp->count_u64_read(counter, parent, value);
0514 default:
0515 return -EINVAL;
0516 }
0517 case COUNTER_COMP_SYNAPSE_ACTION:
0518 ret = comp->action_read(counter, parent, comp->priv,
0519 &value_u32);
0520 *value = value_u32;
0521 return ret;
0522 default:
0523 return -EINVAL;
0524 }
0525 }
0526
0527
0528
0529
0530
0531
0532
0533
0534
0535
0536 void counter_push_event(struct counter_device *const counter, const u8 event,
0537 const u8 channel)
0538 {
0539 struct counter_event ev;
0540 unsigned int copied = 0;
0541 unsigned long flags;
0542 struct counter_event_node *event_node;
0543 struct counter_comp_node *comp_node;
0544
0545 ev.timestamp = ktime_get_ns();
0546 ev.watch.event = event;
0547 ev.watch.channel = channel;
0548
0549
0550 spin_lock_irqsave(&counter->events_list_lock, flags);
0551
0552
0553 list_for_each_entry(event_node, &counter->events_list, l)
0554 if (event_node->event == event &&
0555 event_node->channel == channel)
0556 break;
0557
0558
0559 if (&event_node->l == &counter->events_list)
0560 goto exit_early;
0561
0562
0563 list_for_each_entry(comp_node, &event_node->comp_list, l) {
0564 ev.watch.component = comp_node->component;
0565 ev.status = -counter_get_data(counter, comp_node, &ev.value);
0566
0567 copied += kfifo_in_spinlocked_noirqsave(&counter->events, &ev,
0568 1, &counter->events_in_lock);
0569 }
0570
0571 exit_early:
0572 spin_unlock_irqrestore(&counter->events_list_lock, flags);
0573
0574 if (copied)
0575 wake_up_poll(&counter->events_wait, EPOLLIN);
0576 }
0577 EXPORT_SYMBOL_GPL(counter_push_event);