Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 // rc-ir-raw.c - handle IR pulse/space events
0003 //
0004 // Copyright (C) 2010 by Mauro Carvalho Chehab
0005 
0006 #include <linux/export.h>
0007 #include <linux/kthread.h>
0008 #include <linux/mutex.h>
0009 #include <linux/kmod.h>
0010 #include <linux/sched.h>
0011 #include "rc-core-priv.h"
0012 
0013 /* Used to keep track of IR raw clients, protected by ir_raw_handler_lock */
0014 static LIST_HEAD(ir_raw_client_list);
0015 
0016 /* Used to handle IR raw handler extensions */
0017 DEFINE_MUTEX(ir_raw_handler_lock);
0018 static LIST_HEAD(ir_raw_handler_list);
0019 static atomic64_t available_protocols = ATOMIC64_INIT(0);
0020 
0021 static int ir_raw_event_thread(void *data)
0022 {
0023     struct ir_raw_event ev;
0024     struct ir_raw_handler *handler;
0025     struct ir_raw_event_ctrl *raw = data;
0026     struct rc_dev *dev = raw->dev;
0027 
0028     while (1) {
0029         mutex_lock(&ir_raw_handler_lock);
0030         while (kfifo_out(&raw->kfifo, &ev, 1)) {
0031             if (is_timing_event(ev)) {
0032                 if (ev.duration == 0)
0033                     dev_warn_once(&dev->dev, "nonsensical timing event of duration 0");
0034                 if (is_timing_event(raw->prev_ev) &&
0035                     !is_transition(&ev, &raw->prev_ev))
0036                     dev_warn_once(&dev->dev, "two consecutive events of type %s",
0037                               TO_STR(ev.pulse));
0038             }
0039             list_for_each_entry(handler, &ir_raw_handler_list, list)
0040                 if (dev->enabled_protocols &
0041                     handler->protocols || !handler->protocols)
0042                     handler->decode(dev, ev);
0043             lirc_raw_event(dev, ev);
0044             raw->prev_ev = ev;
0045         }
0046         mutex_unlock(&ir_raw_handler_lock);
0047 
0048         set_current_state(TASK_INTERRUPTIBLE);
0049 
0050         if (kthread_should_stop()) {
0051             __set_current_state(TASK_RUNNING);
0052             break;
0053         } else if (!kfifo_is_empty(&raw->kfifo))
0054             set_current_state(TASK_RUNNING);
0055 
0056         schedule();
0057     }
0058 
0059     return 0;
0060 }
0061 
0062 /**
0063  * ir_raw_event_store() - pass a pulse/space duration to the raw ir decoders
0064  * @dev:    the struct rc_dev device descriptor
0065  * @ev:     the struct ir_raw_event descriptor of the pulse/space
0066  *
0067  * This routine (which may be called from an interrupt context) stores a
0068  * pulse/space duration for the raw ir decoding state machines. Pulses are
0069  * signalled as positive values and spaces as negative values. A zero value
0070  * will reset the decoding state machines.
0071  */
0072 int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev)
0073 {
0074     if (!dev->raw)
0075         return -EINVAL;
0076 
0077     dev_dbg(&dev->dev, "sample: (%05dus %s)\n",
0078         ev->duration, TO_STR(ev->pulse));
0079 
0080     if (!kfifo_put(&dev->raw->kfifo, *ev)) {
0081         dev_err(&dev->dev, "IR event FIFO is full!\n");
0082         return -ENOSPC;
0083     }
0084 
0085     return 0;
0086 }
0087 EXPORT_SYMBOL_GPL(ir_raw_event_store);
0088 
0089 /**
0090  * ir_raw_event_store_edge() - notify raw ir decoders of the start of a pulse/space
0091  * @dev:    the struct rc_dev device descriptor
0092  * @pulse:  true for pulse, false for space
0093  *
0094  * This routine (which may be called from an interrupt context) is used to
0095  * store the beginning of an ir pulse or space (or the start/end of ir
0096  * reception) for the raw ir decoding state machines. This is used by
0097  * hardware which does not provide durations directly but only interrupts
0098  * (or similar events) on state change.
0099  */
0100 int ir_raw_event_store_edge(struct rc_dev *dev, bool pulse)
0101 {
0102     ktime_t         now;
0103     struct ir_raw_event ev = {};
0104 
0105     if (!dev->raw)
0106         return -EINVAL;
0107 
0108     now = ktime_get();
0109     ev.duration = ktime_to_us(ktime_sub(now, dev->raw->last_event));
0110     ev.pulse = !pulse;
0111 
0112     return ir_raw_event_store_with_timeout(dev, &ev);
0113 }
0114 EXPORT_SYMBOL_GPL(ir_raw_event_store_edge);
0115 
0116 /*
0117  * ir_raw_event_store_with_timeout() - pass a pulse/space duration to the raw
0118  *                     ir decoders, schedule decoding and
0119  *                     timeout
0120  * @dev:    the struct rc_dev device descriptor
0121  * @ev:     the struct ir_raw_event descriptor of the pulse/space
0122  *
0123  * This routine (which may be called from an interrupt context) stores a
0124  * pulse/space duration for the raw ir decoding state machines, schedules
0125  * decoding and generates a timeout.
0126  */
0127 int ir_raw_event_store_with_timeout(struct rc_dev *dev, struct ir_raw_event *ev)
0128 {
0129     ktime_t     now;
0130     int     rc = 0;
0131 
0132     if (!dev->raw)
0133         return -EINVAL;
0134 
0135     now = ktime_get();
0136 
0137     spin_lock(&dev->raw->edge_spinlock);
0138     rc = ir_raw_event_store(dev, ev);
0139 
0140     dev->raw->last_event = now;
0141 
0142     /* timer could be set to timeout (125ms by default) */
0143     if (!timer_pending(&dev->raw->edge_handle) ||
0144         time_after(dev->raw->edge_handle.expires,
0145                jiffies + msecs_to_jiffies(15))) {
0146         mod_timer(&dev->raw->edge_handle,
0147               jiffies + msecs_to_jiffies(15));
0148     }
0149     spin_unlock(&dev->raw->edge_spinlock);
0150 
0151     return rc;
0152 }
0153 EXPORT_SYMBOL_GPL(ir_raw_event_store_with_timeout);
0154 
0155 /**
0156  * ir_raw_event_store_with_filter() - pass next pulse/space to decoders with some processing
0157  * @dev:    the struct rc_dev device descriptor
0158  * @ev:     the event that has occurred
0159  *
0160  * This routine (which may be called from an interrupt context) works
0161  * in similar manner to ir_raw_event_store_edge.
0162  * This routine is intended for devices with limited internal buffer
0163  * It automerges samples of same type, and handles timeouts. Returns non-zero
0164  * if the event was added, and zero if the event was ignored due to idle
0165  * processing.
0166  */
0167 int ir_raw_event_store_with_filter(struct rc_dev *dev, struct ir_raw_event *ev)
0168 {
0169     if (!dev->raw)
0170         return -EINVAL;
0171 
0172     /* Ignore spaces in idle mode */
0173     if (dev->idle && !ev->pulse)
0174         return 0;
0175     else if (dev->idle)
0176         ir_raw_event_set_idle(dev, false);
0177 
0178     if (!dev->raw->this_ev.duration)
0179         dev->raw->this_ev = *ev;
0180     else if (ev->pulse == dev->raw->this_ev.pulse)
0181         dev->raw->this_ev.duration += ev->duration;
0182     else {
0183         ir_raw_event_store(dev, &dev->raw->this_ev);
0184         dev->raw->this_ev = *ev;
0185     }
0186 
0187     /* Enter idle mode if necessary */
0188     if (!ev->pulse && dev->timeout &&
0189         dev->raw->this_ev.duration >= dev->timeout)
0190         ir_raw_event_set_idle(dev, true);
0191 
0192     return 1;
0193 }
0194 EXPORT_SYMBOL_GPL(ir_raw_event_store_with_filter);
0195 
0196 /**
0197  * ir_raw_event_set_idle() - provide hint to rc-core when the device is idle or not
0198  * @dev:    the struct rc_dev device descriptor
0199  * @idle:   whether the device is idle or not
0200  */
0201 void ir_raw_event_set_idle(struct rc_dev *dev, bool idle)
0202 {
0203     if (!dev->raw)
0204         return;
0205 
0206     dev_dbg(&dev->dev, "%s idle mode\n", idle ? "enter" : "leave");
0207 
0208     if (idle) {
0209         dev->raw->this_ev.timeout = true;
0210         ir_raw_event_store(dev, &dev->raw->this_ev);
0211         dev->raw->this_ev = (struct ir_raw_event) {};
0212     }
0213 
0214     if (dev->s_idle)
0215         dev->s_idle(dev, idle);
0216 
0217     dev->idle = idle;
0218 }
0219 EXPORT_SYMBOL_GPL(ir_raw_event_set_idle);
0220 
0221 /**
0222  * ir_raw_event_handle() - schedules the decoding of stored ir data
0223  * @dev:    the struct rc_dev device descriptor
0224  *
0225  * This routine will tell rc-core to start decoding stored ir data.
0226  */
0227 void ir_raw_event_handle(struct rc_dev *dev)
0228 {
0229     if (!dev->raw || !dev->raw->thread)
0230         return;
0231 
0232     wake_up_process(dev->raw->thread);
0233 }
0234 EXPORT_SYMBOL_GPL(ir_raw_event_handle);
0235 
0236 /* used internally by the sysfs interface */
0237 u64
0238 ir_raw_get_allowed_protocols(void)
0239 {
0240     return atomic64_read(&available_protocols);
0241 }
0242 
0243 static int change_protocol(struct rc_dev *dev, u64 *rc_proto)
0244 {
0245     struct ir_raw_handler *handler;
0246     u32 timeout = 0;
0247 
0248     mutex_lock(&ir_raw_handler_lock);
0249     list_for_each_entry(handler, &ir_raw_handler_list, list) {
0250         if (!(dev->enabled_protocols & handler->protocols) &&
0251             (*rc_proto & handler->protocols) && handler->raw_register)
0252             handler->raw_register(dev);
0253 
0254         if ((dev->enabled_protocols & handler->protocols) &&
0255             !(*rc_proto & handler->protocols) &&
0256             handler->raw_unregister)
0257             handler->raw_unregister(dev);
0258     }
0259     mutex_unlock(&ir_raw_handler_lock);
0260 
0261     if (!dev->max_timeout)
0262         return 0;
0263 
0264     mutex_lock(&ir_raw_handler_lock);
0265     list_for_each_entry(handler, &ir_raw_handler_list, list) {
0266         if (handler->protocols & *rc_proto) {
0267             if (timeout < handler->min_timeout)
0268                 timeout = handler->min_timeout;
0269         }
0270     }
0271     mutex_unlock(&ir_raw_handler_lock);
0272 
0273     if (timeout == 0)
0274         timeout = IR_DEFAULT_TIMEOUT;
0275     else
0276         timeout += MS_TO_US(10);
0277 
0278     if (timeout < dev->min_timeout)
0279         timeout = dev->min_timeout;
0280     else if (timeout > dev->max_timeout)
0281         timeout = dev->max_timeout;
0282 
0283     if (dev->s_timeout)
0284         dev->s_timeout(dev, timeout);
0285     else
0286         dev->timeout = timeout;
0287 
0288     return 0;
0289 }
0290 
0291 static void ir_raw_disable_protocols(struct rc_dev *dev, u64 protocols)
0292 {
0293     mutex_lock(&dev->lock);
0294     dev->enabled_protocols &= ~protocols;
0295     mutex_unlock(&dev->lock);
0296 }
0297 
0298 /**
0299  * ir_raw_gen_manchester() - Encode data with Manchester (bi-phase) modulation.
0300  * @ev:     Pointer to pointer to next free event. *@ev is incremented for
0301  *      each raw event filled.
0302  * @max:    Maximum number of raw events to fill.
0303  * @timings:    Manchester modulation timings.
0304  * @n:      Number of bits of data.
0305  * @data:   Data bits to encode.
0306  *
0307  * Encodes the @n least significant bits of @data using Manchester (bi-phase)
0308  * modulation with the timing characteristics described by @timings, writing up
0309  * to @max raw IR events using the *@ev pointer.
0310  *
0311  * Returns: 0 on success.
0312  *      -ENOBUFS if there isn't enough space in the array to fit the
0313  *      full encoded data. In this case all @max events will have been
0314  *      written.
0315  */
0316 int ir_raw_gen_manchester(struct ir_raw_event **ev, unsigned int max,
0317               const struct ir_raw_timings_manchester *timings,
0318               unsigned int n, u64 data)
0319 {
0320     bool need_pulse;
0321     u64 i;
0322     int ret = -ENOBUFS;
0323 
0324     i = BIT_ULL(n - 1);
0325 
0326     if (timings->leader_pulse) {
0327         if (!max--)
0328             return ret;
0329         init_ir_raw_event_duration((*ev), 1, timings->leader_pulse);
0330         if (timings->leader_space) {
0331             if (!max--)
0332                 return ret;
0333             init_ir_raw_event_duration(++(*ev), 0,
0334                            timings->leader_space);
0335         }
0336     } else {
0337         /* continue existing signal */
0338         --(*ev);
0339     }
0340     /* from here on *ev will point to the last event rather than the next */
0341 
0342     while (n && i > 0) {
0343         need_pulse = !(data & i);
0344         if (timings->invert)
0345             need_pulse = !need_pulse;
0346         if (need_pulse == !!(*ev)->pulse) {
0347             (*ev)->duration += timings->clock;
0348         } else {
0349             if (!max--)
0350                 goto nobufs;
0351             init_ir_raw_event_duration(++(*ev), need_pulse,
0352                            timings->clock);
0353         }
0354 
0355         if (!max--)
0356             goto nobufs;
0357         init_ir_raw_event_duration(++(*ev), !need_pulse,
0358                        timings->clock);
0359         i >>= 1;
0360     }
0361 
0362     if (timings->trailer_space) {
0363         if (!(*ev)->pulse)
0364             (*ev)->duration += timings->trailer_space;
0365         else if (!max--)
0366             goto nobufs;
0367         else
0368             init_ir_raw_event_duration(++(*ev), 0,
0369                            timings->trailer_space);
0370     }
0371 
0372     ret = 0;
0373 nobufs:
0374     /* point to the next event rather than last event before returning */
0375     ++(*ev);
0376     return ret;
0377 }
0378 EXPORT_SYMBOL(ir_raw_gen_manchester);
0379 
0380 /**
0381  * ir_raw_gen_pd() - Encode data to raw events with pulse-distance modulation.
0382  * @ev:     Pointer to pointer to next free event. *@ev is incremented for
0383  *      each raw event filled.
0384  * @max:    Maximum number of raw events to fill.
0385  * @timings:    Pulse distance modulation timings.
0386  * @n:      Number of bits of data.
0387  * @data:   Data bits to encode.
0388  *
0389  * Encodes the @n least significant bits of @data using pulse-distance
0390  * modulation with the timing characteristics described by @timings, writing up
0391  * to @max raw IR events using the *@ev pointer.
0392  *
0393  * Returns: 0 on success.
0394  *      -ENOBUFS if there isn't enough space in the array to fit the
0395  *      full encoded data. In this case all @max events will have been
0396  *      written.
0397  */
0398 int ir_raw_gen_pd(struct ir_raw_event **ev, unsigned int max,
0399           const struct ir_raw_timings_pd *timings,
0400           unsigned int n, u64 data)
0401 {
0402     int i;
0403     int ret;
0404     unsigned int space;
0405 
0406     if (timings->header_pulse) {
0407         ret = ir_raw_gen_pulse_space(ev, &max, timings->header_pulse,
0408                          timings->header_space);
0409         if (ret)
0410             return ret;
0411     }
0412 
0413     if (timings->msb_first) {
0414         for (i = n - 1; i >= 0; --i) {
0415             space = timings->bit_space[(data >> i) & 1];
0416             ret = ir_raw_gen_pulse_space(ev, &max,
0417                              timings->bit_pulse,
0418                              space);
0419             if (ret)
0420                 return ret;
0421         }
0422     } else {
0423         for (i = 0; i < n; ++i, data >>= 1) {
0424             space = timings->bit_space[data & 1];
0425             ret = ir_raw_gen_pulse_space(ev, &max,
0426                              timings->bit_pulse,
0427                              space);
0428             if (ret)
0429                 return ret;
0430         }
0431     }
0432 
0433     ret = ir_raw_gen_pulse_space(ev, &max, timings->trailer_pulse,
0434                      timings->trailer_space);
0435     return ret;
0436 }
0437 EXPORT_SYMBOL(ir_raw_gen_pd);
0438 
0439 /**
0440  * ir_raw_gen_pl() - Encode data to raw events with pulse-length modulation.
0441  * @ev:     Pointer to pointer to next free event. *@ev is incremented for
0442  *      each raw event filled.
0443  * @max:    Maximum number of raw events to fill.
0444  * @timings:    Pulse distance modulation timings.
0445  * @n:      Number of bits of data.
0446  * @data:   Data bits to encode.
0447  *
0448  * Encodes the @n least significant bits of @data using space-distance
0449  * modulation with the timing characteristics described by @timings, writing up
0450  * to @max raw IR events using the *@ev pointer.
0451  *
0452  * Returns: 0 on success.
0453  *      -ENOBUFS if there isn't enough space in the array to fit the
0454  *      full encoded data. In this case all @max events will have been
0455  *      written.
0456  */
0457 int ir_raw_gen_pl(struct ir_raw_event **ev, unsigned int max,
0458           const struct ir_raw_timings_pl *timings,
0459           unsigned int n, u64 data)
0460 {
0461     int i;
0462     int ret = -ENOBUFS;
0463     unsigned int pulse;
0464 
0465     if (!max--)
0466         return ret;
0467 
0468     init_ir_raw_event_duration((*ev)++, 1, timings->header_pulse);
0469 
0470     if (timings->msb_first) {
0471         for (i = n - 1; i >= 0; --i) {
0472             if (!max--)
0473                 return ret;
0474             init_ir_raw_event_duration((*ev)++, 0,
0475                            timings->bit_space);
0476             if (!max--)
0477                 return ret;
0478             pulse = timings->bit_pulse[(data >> i) & 1];
0479             init_ir_raw_event_duration((*ev)++, 1, pulse);
0480         }
0481     } else {
0482         for (i = 0; i < n; ++i, data >>= 1) {
0483             if (!max--)
0484                 return ret;
0485             init_ir_raw_event_duration((*ev)++, 0,
0486                            timings->bit_space);
0487             if (!max--)
0488                 return ret;
0489             pulse = timings->bit_pulse[data & 1];
0490             init_ir_raw_event_duration((*ev)++, 1, pulse);
0491         }
0492     }
0493 
0494     if (!max--)
0495         return ret;
0496 
0497     init_ir_raw_event_duration((*ev)++, 0, timings->trailer_space);
0498 
0499     return 0;
0500 }
0501 EXPORT_SYMBOL(ir_raw_gen_pl);
0502 
0503 /**
0504  * ir_raw_encode_scancode() - Encode a scancode as raw events
0505  *
0506  * @protocol:       protocol
0507  * @scancode:       scancode filter describing a single scancode
0508  * @events:     array of raw events to write into
0509  * @max:        max number of raw events
0510  *
0511  * Attempts to encode the scancode as raw events.
0512  *
0513  * Returns: The number of events written.
0514  *      -ENOBUFS if there isn't enough space in the array to fit the
0515  *      encoding. In this case all @max events will have been written.
0516  *      -EINVAL if the scancode is ambiguous or invalid, or if no
0517  *      compatible encoder was found.
0518  */
0519 int ir_raw_encode_scancode(enum rc_proto protocol, u32 scancode,
0520                struct ir_raw_event *events, unsigned int max)
0521 {
0522     struct ir_raw_handler *handler;
0523     int ret = -EINVAL;
0524     u64 mask = 1ULL << protocol;
0525 
0526     ir_raw_load_modules(&mask);
0527 
0528     mutex_lock(&ir_raw_handler_lock);
0529     list_for_each_entry(handler, &ir_raw_handler_list, list) {
0530         if (handler->protocols & mask && handler->encode) {
0531             ret = handler->encode(protocol, scancode, events, max);
0532             if (ret >= 0 || ret == -ENOBUFS)
0533                 break;
0534         }
0535     }
0536     mutex_unlock(&ir_raw_handler_lock);
0537 
0538     return ret;
0539 }
0540 EXPORT_SYMBOL(ir_raw_encode_scancode);
0541 
0542 /**
0543  * ir_raw_edge_handle() - Handle ir_raw_event_store_edge() processing
0544  *
0545  * @t:      timer_list
0546  *
0547  * This callback is armed by ir_raw_event_store_edge(). It does two things:
0548  * first of all, rather than calling ir_raw_event_handle() for each
0549  * edge and waking up the rc thread, 15 ms after the first edge
0550  * ir_raw_event_handle() is called. Secondly, generate a timeout event
0551  * no more IR is received after the rc_dev timeout.
0552  */
0553 static void ir_raw_edge_handle(struct timer_list *t)
0554 {
0555     struct ir_raw_event_ctrl *raw = from_timer(raw, t, edge_handle);
0556     struct rc_dev *dev = raw->dev;
0557     unsigned long flags;
0558     ktime_t interval;
0559 
0560     spin_lock_irqsave(&dev->raw->edge_spinlock, flags);
0561     interval = ktime_sub(ktime_get(), dev->raw->last_event);
0562     if (ktime_to_us(interval) >= dev->timeout) {
0563         struct ir_raw_event ev = {
0564             .timeout = true,
0565             .duration = ktime_to_us(interval)
0566         };
0567 
0568         ir_raw_event_store(dev, &ev);
0569     } else {
0570         mod_timer(&dev->raw->edge_handle,
0571               jiffies + usecs_to_jiffies(dev->timeout -
0572                              ktime_to_us(interval)));
0573     }
0574     spin_unlock_irqrestore(&dev->raw->edge_spinlock, flags);
0575 
0576     ir_raw_event_handle(dev);
0577 }
0578 
0579 /**
0580  * ir_raw_encode_carrier() - Get carrier used for protocol
0581  *
0582  * @protocol:       protocol
0583  *
0584  * Attempts to find the carrier for the specified protocol
0585  *
0586  * Returns: The carrier in Hz
0587  *      -EINVAL if the protocol is invalid, or if no
0588  *      compatible encoder was found.
0589  */
0590 int ir_raw_encode_carrier(enum rc_proto protocol)
0591 {
0592     struct ir_raw_handler *handler;
0593     int ret = -EINVAL;
0594     u64 mask = BIT_ULL(protocol);
0595 
0596     mutex_lock(&ir_raw_handler_lock);
0597     list_for_each_entry(handler, &ir_raw_handler_list, list) {
0598         if (handler->protocols & mask && handler->encode) {
0599             ret = handler->carrier;
0600             break;
0601         }
0602     }
0603     mutex_unlock(&ir_raw_handler_lock);
0604 
0605     return ret;
0606 }
0607 EXPORT_SYMBOL(ir_raw_encode_carrier);
0608 
0609 /*
0610  * Used to (un)register raw event clients
0611  */
0612 int ir_raw_event_prepare(struct rc_dev *dev)
0613 {
0614     if (!dev)
0615         return -EINVAL;
0616 
0617     dev->raw = kzalloc(sizeof(*dev->raw), GFP_KERNEL);
0618     if (!dev->raw)
0619         return -ENOMEM;
0620 
0621     dev->raw->dev = dev;
0622     dev->change_protocol = change_protocol;
0623     dev->idle = true;
0624     spin_lock_init(&dev->raw->edge_spinlock);
0625     timer_setup(&dev->raw->edge_handle, ir_raw_edge_handle, 0);
0626     INIT_KFIFO(dev->raw->kfifo);
0627 
0628     return 0;
0629 }
0630 
0631 int ir_raw_event_register(struct rc_dev *dev)
0632 {
0633     struct task_struct *thread;
0634 
0635     thread = kthread_run(ir_raw_event_thread, dev->raw, "rc%u", dev->minor);
0636     if (IS_ERR(thread))
0637         return PTR_ERR(thread);
0638 
0639     dev->raw->thread = thread;
0640 
0641     mutex_lock(&ir_raw_handler_lock);
0642     list_add_tail(&dev->raw->list, &ir_raw_client_list);
0643     mutex_unlock(&ir_raw_handler_lock);
0644 
0645     return 0;
0646 }
0647 
0648 void ir_raw_event_free(struct rc_dev *dev)
0649 {
0650     if (!dev)
0651         return;
0652 
0653     kfree(dev->raw);
0654     dev->raw = NULL;
0655 }
0656 
0657 void ir_raw_event_unregister(struct rc_dev *dev)
0658 {
0659     struct ir_raw_handler *handler;
0660 
0661     if (!dev || !dev->raw)
0662         return;
0663 
0664     kthread_stop(dev->raw->thread);
0665     del_timer_sync(&dev->raw->edge_handle);
0666 
0667     mutex_lock(&ir_raw_handler_lock);
0668     list_del(&dev->raw->list);
0669     list_for_each_entry(handler, &ir_raw_handler_list, list)
0670         if (handler->raw_unregister &&
0671             (handler->protocols & dev->enabled_protocols))
0672             handler->raw_unregister(dev);
0673 
0674     lirc_bpf_free(dev);
0675 
0676     ir_raw_event_free(dev);
0677 
0678     /*
0679      * A user can be calling bpf(BPF_PROG_{QUERY|ATTACH|DETACH}), so
0680      * ensure that the raw member is null on unlock; this is how
0681      * "device gone" is checked.
0682      */
0683     mutex_unlock(&ir_raw_handler_lock);
0684 }
0685 
0686 /*
0687  * Extension interface - used to register the IR decoders
0688  */
0689 
0690 int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler)
0691 {
0692     mutex_lock(&ir_raw_handler_lock);
0693     list_add_tail(&ir_raw_handler->list, &ir_raw_handler_list);
0694     atomic64_or(ir_raw_handler->protocols, &available_protocols);
0695     mutex_unlock(&ir_raw_handler_lock);
0696 
0697     return 0;
0698 }
0699 EXPORT_SYMBOL(ir_raw_handler_register);
0700 
0701 void ir_raw_handler_unregister(struct ir_raw_handler *ir_raw_handler)
0702 {
0703     struct ir_raw_event_ctrl *raw;
0704     u64 protocols = ir_raw_handler->protocols;
0705 
0706     mutex_lock(&ir_raw_handler_lock);
0707     list_del(&ir_raw_handler->list);
0708     list_for_each_entry(raw, &ir_raw_client_list, list) {
0709         if (ir_raw_handler->raw_unregister &&
0710             (raw->dev->enabled_protocols & protocols))
0711             ir_raw_handler->raw_unregister(raw->dev);
0712         ir_raw_disable_protocols(raw->dev, protocols);
0713     }
0714     atomic64_andnot(protocols, &available_protocols);
0715     mutex_unlock(&ir_raw_handler_lock);
0716 }
0717 EXPORT_SYMBOL(ir_raw_handler_unregister);