0001
0002
0003
0004
0005
0006 #include <linux/export.h>
0007 #include <linux/kthread.h>
0008 #include <linux/mutex.h>
0009 #include <linux/kmod.h>
0010 #include <linux/sched.h>
0011 #include "rc-core-priv.h"
0012
0013
0014 static LIST_HEAD(ir_raw_client_list);
0015
0016
0017 DEFINE_MUTEX(ir_raw_handler_lock);
0018 static LIST_HEAD(ir_raw_handler_list);
0019 static atomic64_t available_protocols = ATOMIC64_INIT(0);
0020
0021 static int ir_raw_event_thread(void *data)
0022 {
0023 struct ir_raw_event ev;
0024 struct ir_raw_handler *handler;
0025 struct ir_raw_event_ctrl *raw = data;
0026 struct rc_dev *dev = raw->dev;
0027
0028 while (1) {
0029 mutex_lock(&ir_raw_handler_lock);
0030 while (kfifo_out(&raw->kfifo, &ev, 1)) {
0031 if (is_timing_event(ev)) {
0032 if (ev.duration == 0)
0033 dev_warn_once(&dev->dev, "nonsensical timing event of duration 0");
0034 if (is_timing_event(raw->prev_ev) &&
0035 !is_transition(&ev, &raw->prev_ev))
0036 dev_warn_once(&dev->dev, "two consecutive events of type %s",
0037 TO_STR(ev.pulse));
0038 }
0039 list_for_each_entry(handler, &ir_raw_handler_list, list)
0040 if (dev->enabled_protocols &
0041 handler->protocols || !handler->protocols)
0042 handler->decode(dev, ev);
0043 lirc_raw_event(dev, ev);
0044 raw->prev_ev = ev;
0045 }
0046 mutex_unlock(&ir_raw_handler_lock);
0047
0048 set_current_state(TASK_INTERRUPTIBLE);
0049
0050 if (kthread_should_stop()) {
0051 __set_current_state(TASK_RUNNING);
0052 break;
0053 } else if (!kfifo_is_empty(&raw->kfifo))
0054 set_current_state(TASK_RUNNING);
0055
0056 schedule();
0057 }
0058
0059 return 0;
0060 }
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072 int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev)
0073 {
0074 if (!dev->raw)
0075 return -EINVAL;
0076
0077 dev_dbg(&dev->dev, "sample: (%05dus %s)\n",
0078 ev->duration, TO_STR(ev->pulse));
0079
0080 if (!kfifo_put(&dev->raw->kfifo, *ev)) {
0081 dev_err(&dev->dev, "IR event FIFO is full!\n");
0082 return -ENOSPC;
0083 }
0084
0085 return 0;
0086 }
0087 EXPORT_SYMBOL_GPL(ir_raw_event_store);
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100 int ir_raw_event_store_edge(struct rc_dev *dev, bool pulse)
0101 {
0102 ktime_t now;
0103 struct ir_raw_event ev = {};
0104
0105 if (!dev->raw)
0106 return -EINVAL;
0107
0108 now = ktime_get();
0109 ev.duration = ktime_to_us(ktime_sub(now, dev->raw->last_event));
0110 ev.pulse = !pulse;
0111
0112 return ir_raw_event_store_with_timeout(dev, &ev);
0113 }
0114 EXPORT_SYMBOL_GPL(ir_raw_event_store_edge);
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127 int ir_raw_event_store_with_timeout(struct rc_dev *dev, struct ir_raw_event *ev)
0128 {
0129 ktime_t now;
0130 int rc = 0;
0131
0132 if (!dev->raw)
0133 return -EINVAL;
0134
0135 now = ktime_get();
0136
0137 spin_lock(&dev->raw->edge_spinlock);
0138 rc = ir_raw_event_store(dev, ev);
0139
0140 dev->raw->last_event = now;
0141
0142
0143 if (!timer_pending(&dev->raw->edge_handle) ||
0144 time_after(dev->raw->edge_handle.expires,
0145 jiffies + msecs_to_jiffies(15))) {
0146 mod_timer(&dev->raw->edge_handle,
0147 jiffies + msecs_to_jiffies(15));
0148 }
0149 spin_unlock(&dev->raw->edge_spinlock);
0150
0151 return rc;
0152 }
0153 EXPORT_SYMBOL_GPL(ir_raw_event_store_with_timeout);
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164
0165
0166
0167 int ir_raw_event_store_with_filter(struct rc_dev *dev, struct ir_raw_event *ev)
0168 {
0169 if (!dev->raw)
0170 return -EINVAL;
0171
0172
0173 if (dev->idle && !ev->pulse)
0174 return 0;
0175 else if (dev->idle)
0176 ir_raw_event_set_idle(dev, false);
0177
0178 if (!dev->raw->this_ev.duration)
0179 dev->raw->this_ev = *ev;
0180 else if (ev->pulse == dev->raw->this_ev.pulse)
0181 dev->raw->this_ev.duration += ev->duration;
0182 else {
0183 ir_raw_event_store(dev, &dev->raw->this_ev);
0184 dev->raw->this_ev = *ev;
0185 }
0186
0187
0188 if (!ev->pulse && dev->timeout &&
0189 dev->raw->this_ev.duration >= dev->timeout)
0190 ir_raw_event_set_idle(dev, true);
0191
0192 return 1;
0193 }
0194 EXPORT_SYMBOL_GPL(ir_raw_event_store_with_filter);
0195
0196
0197
0198
0199
0200
0201 void ir_raw_event_set_idle(struct rc_dev *dev, bool idle)
0202 {
0203 if (!dev->raw)
0204 return;
0205
0206 dev_dbg(&dev->dev, "%s idle mode\n", idle ? "enter" : "leave");
0207
0208 if (idle) {
0209 dev->raw->this_ev.timeout = true;
0210 ir_raw_event_store(dev, &dev->raw->this_ev);
0211 dev->raw->this_ev = (struct ir_raw_event) {};
0212 }
0213
0214 if (dev->s_idle)
0215 dev->s_idle(dev, idle);
0216
0217 dev->idle = idle;
0218 }
0219 EXPORT_SYMBOL_GPL(ir_raw_event_set_idle);
0220
0221
0222
0223
0224
0225
0226
0227 void ir_raw_event_handle(struct rc_dev *dev)
0228 {
0229 if (!dev->raw || !dev->raw->thread)
0230 return;
0231
0232 wake_up_process(dev->raw->thread);
0233 }
0234 EXPORT_SYMBOL_GPL(ir_raw_event_handle);
0235
0236
0237 u64
0238 ir_raw_get_allowed_protocols(void)
0239 {
0240 return atomic64_read(&available_protocols);
0241 }
0242
0243 static int change_protocol(struct rc_dev *dev, u64 *rc_proto)
0244 {
0245 struct ir_raw_handler *handler;
0246 u32 timeout = 0;
0247
0248 mutex_lock(&ir_raw_handler_lock);
0249 list_for_each_entry(handler, &ir_raw_handler_list, list) {
0250 if (!(dev->enabled_protocols & handler->protocols) &&
0251 (*rc_proto & handler->protocols) && handler->raw_register)
0252 handler->raw_register(dev);
0253
0254 if ((dev->enabled_protocols & handler->protocols) &&
0255 !(*rc_proto & handler->protocols) &&
0256 handler->raw_unregister)
0257 handler->raw_unregister(dev);
0258 }
0259 mutex_unlock(&ir_raw_handler_lock);
0260
0261 if (!dev->max_timeout)
0262 return 0;
0263
0264 mutex_lock(&ir_raw_handler_lock);
0265 list_for_each_entry(handler, &ir_raw_handler_list, list) {
0266 if (handler->protocols & *rc_proto) {
0267 if (timeout < handler->min_timeout)
0268 timeout = handler->min_timeout;
0269 }
0270 }
0271 mutex_unlock(&ir_raw_handler_lock);
0272
0273 if (timeout == 0)
0274 timeout = IR_DEFAULT_TIMEOUT;
0275 else
0276 timeout += MS_TO_US(10);
0277
0278 if (timeout < dev->min_timeout)
0279 timeout = dev->min_timeout;
0280 else if (timeout > dev->max_timeout)
0281 timeout = dev->max_timeout;
0282
0283 if (dev->s_timeout)
0284 dev->s_timeout(dev, timeout);
0285 else
0286 dev->timeout = timeout;
0287
0288 return 0;
0289 }
0290
0291 static void ir_raw_disable_protocols(struct rc_dev *dev, u64 protocols)
0292 {
0293 mutex_lock(&dev->lock);
0294 dev->enabled_protocols &= ~protocols;
0295 mutex_unlock(&dev->lock);
0296 }
0297
0298
0299
0300
0301
0302
0303
0304
0305
0306
0307
0308
0309
0310
0311
0312
0313
0314
0315
0316 int ir_raw_gen_manchester(struct ir_raw_event **ev, unsigned int max,
0317 const struct ir_raw_timings_manchester *timings,
0318 unsigned int n, u64 data)
0319 {
0320 bool need_pulse;
0321 u64 i;
0322 int ret = -ENOBUFS;
0323
0324 i = BIT_ULL(n - 1);
0325
0326 if (timings->leader_pulse) {
0327 if (!max--)
0328 return ret;
0329 init_ir_raw_event_duration((*ev), 1, timings->leader_pulse);
0330 if (timings->leader_space) {
0331 if (!max--)
0332 return ret;
0333 init_ir_raw_event_duration(++(*ev), 0,
0334 timings->leader_space);
0335 }
0336 } else {
0337
0338 --(*ev);
0339 }
0340
0341
0342 while (n && i > 0) {
0343 need_pulse = !(data & i);
0344 if (timings->invert)
0345 need_pulse = !need_pulse;
0346 if (need_pulse == !!(*ev)->pulse) {
0347 (*ev)->duration += timings->clock;
0348 } else {
0349 if (!max--)
0350 goto nobufs;
0351 init_ir_raw_event_duration(++(*ev), need_pulse,
0352 timings->clock);
0353 }
0354
0355 if (!max--)
0356 goto nobufs;
0357 init_ir_raw_event_duration(++(*ev), !need_pulse,
0358 timings->clock);
0359 i >>= 1;
0360 }
0361
0362 if (timings->trailer_space) {
0363 if (!(*ev)->pulse)
0364 (*ev)->duration += timings->trailer_space;
0365 else if (!max--)
0366 goto nobufs;
0367 else
0368 init_ir_raw_event_duration(++(*ev), 0,
0369 timings->trailer_space);
0370 }
0371
0372 ret = 0;
0373 nobufs:
0374
0375 ++(*ev);
0376 return ret;
0377 }
0378 EXPORT_SYMBOL(ir_raw_gen_manchester);
0379
0380
0381
0382
0383
0384
0385
0386
0387
0388
0389
0390
0391
0392
0393
0394
0395
0396
0397
0398 int ir_raw_gen_pd(struct ir_raw_event **ev, unsigned int max,
0399 const struct ir_raw_timings_pd *timings,
0400 unsigned int n, u64 data)
0401 {
0402 int i;
0403 int ret;
0404 unsigned int space;
0405
0406 if (timings->header_pulse) {
0407 ret = ir_raw_gen_pulse_space(ev, &max, timings->header_pulse,
0408 timings->header_space);
0409 if (ret)
0410 return ret;
0411 }
0412
0413 if (timings->msb_first) {
0414 for (i = n - 1; i >= 0; --i) {
0415 space = timings->bit_space[(data >> i) & 1];
0416 ret = ir_raw_gen_pulse_space(ev, &max,
0417 timings->bit_pulse,
0418 space);
0419 if (ret)
0420 return ret;
0421 }
0422 } else {
0423 for (i = 0; i < n; ++i, data >>= 1) {
0424 space = timings->bit_space[data & 1];
0425 ret = ir_raw_gen_pulse_space(ev, &max,
0426 timings->bit_pulse,
0427 space);
0428 if (ret)
0429 return ret;
0430 }
0431 }
0432
0433 ret = ir_raw_gen_pulse_space(ev, &max, timings->trailer_pulse,
0434 timings->trailer_space);
0435 return ret;
0436 }
0437 EXPORT_SYMBOL(ir_raw_gen_pd);
0438
0439
0440
0441
0442
0443
0444
0445
0446
0447
0448
0449
0450
0451
0452
0453
0454
0455
0456
0457 int ir_raw_gen_pl(struct ir_raw_event **ev, unsigned int max,
0458 const struct ir_raw_timings_pl *timings,
0459 unsigned int n, u64 data)
0460 {
0461 int i;
0462 int ret = -ENOBUFS;
0463 unsigned int pulse;
0464
0465 if (!max--)
0466 return ret;
0467
0468 init_ir_raw_event_duration((*ev)++, 1, timings->header_pulse);
0469
0470 if (timings->msb_first) {
0471 for (i = n - 1; i >= 0; --i) {
0472 if (!max--)
0473 return ret;
0474 init_ir_raw_event_duration((*ev)++, 0,
0475 timings->bit_space);
0476 if (!max--)
0477 return ret;
0478 pulse = timings->bit_pulse[(data >> i) & 1];
0479 init_ir_raw_event_duration((*ev)++, 1, pulse);
0480 }
0481 } else {
0482 for (i = 0; i < n; ++i, data >>= 1) {
0483 if (!max--)
0484 return ret;
0485 init_ir_raw_event_duration((*ev)++, 0,
0486 timings->bit_space);
0487 if (!max--)
0488 return ret;
0489 pulse = timings->bit_pulse[data & 1];
0490 init_ir_raw_event_duration((*ev)++, 1, pulse);
0491 }
0492 }
0493
0494 if (!max--)
0495 return ret;
0496
0497 init_ir_raw_event_duration((*ev)++, 0, timings->trailer_space);
0498
0499 return 0;
0500 }
0501 EXPORT_SYMBOL(ir_raw_gen_pl);
0502
0503
0504
0505
0506
0507
0508
0509
0510
0511
0512
0513
0514
0515
0516
0517
0518
0519 int ir_raw_encode_scancode(enum rc_proto protocol, u32 scancode,
0520 struct ir_raw_event *events, unsigned int max)
0521 {
0522 struct ir_raw_handler *handler;
0523 int ret = -EINVAL;
0524 u64 mask = 1ULL << protocol;
0525
0526 ir_raw_load_modules(&mask);
0527
0528 mutex_lock(&ir_raw_handler_lock);
0529 list_for_each_entry(handler, &ir_raw_handler_list, list) {
0530 if (handler->protocols & mask && handler->encode) {
0531 ret = handler->encode(protocol, scancode, events, max);
0532 if (ret >= 0 || ret == -ENOBUFS)
0533 break;
0534 }
0535 }
0536 mutex_unlock(&ir_raw_handler_lock);
0537
0538 return ret;
0539 }
0540 EXPORT_SYMBOL(ir_raw_encode_scancode);
0541
0542
0543
0544
0545
0546
0547
0548
0549
0550
0551
0552
0553 static void ir_raw_edge_handle(struct timer_list *t)
0554 {
0555 struct ir_raw_event_ctrl *raw = from_timer(raw, t, edge_handle);
0556 struct rc_dev *dev = raw->dev;
0557 unsigned long flags;
0558 ktime_t interval;
0559
0560 spin_lock_irqsave(&dev->raw->edge_spinlock, flags);
0561 interval = ktime_sub(ktime_get(), dev->raw->last_event);
0562 if (ktime_to_us(interval) >= dev->timeout) {
0563 struct ir_raw_event ev = {
0564 .timeout = true,
0565 .duration = ktime_to_us(interval)
0566 };
0567
0568 ir_raw_event_store(dev, &ev);
0569 } else {
0570 mod_timer(&dev->raw->edge_handle,
0571 jiffies + usecs_to_jiffies(dev->timeout -
0572 ktime_to_us(interval)));
0573 }
0574 spin_unlock_irqrestore(&dev->raw->edge_spinlock, flags);
0575
0576 ir_raw_event_handle(dev);
0577 }
0578
0579
0580
0581
0582
0583
0584
0585
0586
0587
0588
0589
0590 int ir_raw_encode_carrier(enum rc_proto protocol)
0591 {
0592 struct ir_raw_handler *handler;
0593 int ret = -EINVAL;
0594 u64 mask = BIT_ULL(protocol);
0595
0596 mutex_lock(&ir_raw_handler_lock);
0597 list_for_each_entry(handler, &ir_raw_handler_list, list) {
0598 if (handler->protocols & mask && handler->encode) {
0599 ret = handler->carrier;
0600 break;
0601 }
0602 }
0603 mutex_unlock(&ir_raw_handler_lock);
0604
0605 return ret;
0606 }
0607 EXPORT_SYMBOL(ir_raw_encode_carrier);
0608
0609
0610
0611
0612 int ir_raw_event_prepare(struct rc_dev *dev)
0613 {
0614 if (!dev)
0615 return -EINVAL;
0616
0617 dev->raw = kzalloc(sizeof(*dev->raw), GFP_KERNEL);
0618 if (!dev->raw)
0619 return -ENOMEM;
0620
0621 dev->raw->dev = dev;
0622 dev->change_protocol = change_protocol;
0623 dev->idle = true;
0624 spin_lock_init(&dev->raw->edge_spinlock);
0625 timer_setup(&dev->raw->edge_handle, ir_raw_edge_handle, 0);
0626 INIT_KFIFO(dev->raw->kfifo);
0627
0628 return 0;
0629 }
0630
0631 int ir_raw_event_register(struct rc_dev *dev)
0632 {
0633 struct task_struct *thread;
0634
0635 thread = kthread_run(ir_raw_event_thread, dev->raw, "rc%u", dev->minor);
0636 if (IS_ERR(thread))
0637 return PTR_ERR(thread);
0638
0639 dev->raw->thread = thread;
0640
0641 mutex_lock(&ir_raw_handler_lock);
0642 list_add_tail(&dev->raw->list, &ir_raw_client_list);
0643 mutex_unlock(&ir_raw_handler_lock);
0644
0645 return 0;
0646 }
0647
0648 void ir_raw_event_free(struct rc_dev *dev)
0649 {
0650 if (!dev)
0651 return;
0652
0653 kfree(dev->raw);
0654 dev->raw = NULL;
0655 }
0656
0657 void ir_raw_event_unregister(struct rc_dev *dev)
0658 {
0659 struct ir_raw_handler *handler;
0660
0661 if (!dev || !dev->raw)
0662 return;
0663
0664 kthread_stop(dev->raw->thread);
0665 del_timer_sync(&dev->raw->edge_handle);
0666
0667 mutex_lock(&ir_raw_handler_lock);
0668 list_del(&dev->raw->list);
0669 list_for_each_entry(handler, &ir_raw_handler_list, list)
0670 if (handler->raw_unregister &&
0671 (handler->protocols & dev->enabled_protocols))
0672 handler->raw_unregister(dev);
0673
0674 lirc_bpf_free(dev);
0675
0676 ir_raw_event_free(dev);
0677
0678
0679
0680
0681
0682
0683 mutex_unlock(&ir_raw_handler_lock);
0684 }
0685
0686
0687
0688
0689
0690 int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler)
0691 {
0692 mutex_lock(&ir_raw_handler_lock);
0693 list_add_tail(&ir_raw_handler->list, &ir_raw_handler_list);
0694 atomic64_or(ir_raw_handler->protocols, &available_protocols);
0695 mutex_unlock(&ir_raw_handler_lock);
0696
0697 return 0;
0698 }
0699 EXPORT_SYMBOL(ir_raw_handler_register);
0700
0701 void ir_raw_handler_unregister(struct ir_raw_handler *ir_raw_handler)
0702 {
0703 struct ir_raw_event_ctrl *raw;
0704 u64 protocols = ir_raw_handler->protocols;
0705
0706 mutex_lock(&ir_raw_handler_lock);
0707 list_del(&ir_raw_handler->list);
0708 list_for_each_entry(raw, &ir_raw_client_list, list) {
0709 if (ir_raw_handler->raw_unregister &&
0710 (raw->dev->enabled_protocols & protocols))
0711 ir_raw_handler->raw_unregister(raw->dev);
0712 ir_raw_disable_protocols(raw->dev, protocols);
0713 }
0714 atomic64_andnot(protocols, &available_protocols);
0715 mutex_unlock(&ir_raw_handler_lock);
0716 }
0717 EXPORT_SYMBOL(ir_raw_handler_unregister);