Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  * Copyright (C) 2006 - 2007 Ivo van Doorn
0004  * Copyright (C) 2007 Dmitry Torokhov
0005  * Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
0006  */
0007 
0008 #include <linux/kernel.h>
0009 #include <linux/module.h>
0010 #include <linux/init.h>
0011 #include <linux/workqueue.h>
0012 #include <linux/capability.h>
0013 #include <linux/list.h>
0014 #include <linux/mutex.h>
0015 #include <linux/rfkill.h>
0016 #include <linux/sched.h>
0017 #include <linux/spinlock.h>
0018 #include <linux/device.h>
0019 #include <linux/miscdevice.h>
0020 #include <linux/wait.h>
0021 #include <linux/poll.h>
0022 #include <linux/fs.h>
0023 #include <linux/slab.h>
0024 
0025 #include "rfkill.h"
0026 
0027 #define POLL_INTERVAL       (5 * HZ)
0028 
0029 #define RFKILL_BLOCK_HW     BIT(0)
0030 #define RFKILL_BLOCK_SW     BIT(1)
0031 #define RFKILL_BLOCK_SW_PREV    BIT(2)
0032 #define RFKILL_BLOCK_ANY    (RFKILL_BLOCK_HW |\
0033                  RFKILL_BLOCK_SW |\
0034                  RFKILL_BLOCK_SW_PREV)
0035 #define RFKILL_BLOCK_SW_SETCALL BIT(31)
0036 
0037 struct rfkill {
0038     spinlock_t      lock;
0039 
0040     enum rfkill_type    type;
0041 
0042     unsigned long       state;
0043     unsigned long       hard_block_reasons;
0044 
0045     u32         idx;
0046 
0047     bool            registered;
0048     bool            persistent;
0049     bool            polling_paused;
0050     bool            suspended;
0051 
0052     const struct rfkill_ops *ops;
0053     void            *data;
0054 
0055 #ifdef CONFIG_RFKILL_LEDS
0056     struct led_trigger  led_trigger;
0057     const char      *ledtrigname;
0058 #endif
0059 
0060     struct device       dev;
0061     struct list_head    node;
0062 
0063     struct delayed_work poll_work;
0064     struct work_struct  uevent_work;
0065     struct work_struct  sync_work;
0066     char            name[];
0067 };
0068 #define to_rfkill(d)    container_of(d, struct rfkill, dev)
0069 
0070 struct rfkill_int_event {
0071     struct list_head    list;
0072     struct rfkill_event_ext ev;
0073 };
0074 
0075 struct rfkill_data {
0076     struct list_head    list;
0077     struct list_head    events;
0078     struct mutex        mtx;
0079     wait_queue_head_t   read_wait;
0080     bool            input_handler;
0081     u8          max_size;
0082 };
0083 
0084 
0085 MODULE_AUTHOR("Ivo van Doorn <IvDoorn@gmail.com>");
0086 MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
0087 MODULE_DESCRIPTION("RF switch support");
0088 MODULE_LICENSE("GPL");
0089 
0090 
0091 /*
0092  * The locking here should be made much smarter, we currently have
0093  * a bit of a stupid situation because drivers might want to register
0094  * the rfkill struct under their own lock, and take this lock during
0095  * rfkill method calls -- which will cause an AB-BA deadlock situation.
0096  *
0097  * To fix that, we need to rework this code here to be mostly lock-free
0098  * and only use the mutex for list manipulations, not to protect the
0099  * various other global variables. Then we can avoid holding the mutex
0100  * around driver operations, and all is happy.
0101  */
0102 static LIST_HEAD(rfkill_list);  /* list of registered rf switches */
0103 static DEFINE_MUTEX(rfkill_global_mutex);
0104 static LIST_HEAD(rfkill_fds);   /* list of open fds of /dev/rfkill */
0105 
0106 static unsigned int rfkill_default_state = 1;
0107 module_param_named(default_state, rfkill_default_state, uint, 0444);
0108 MODULE_PARM_DESC(default_state,
0109          "Default initial state for all radio types, 0 = radio off");
0110 
0111 static struct {
0112     bool cur, sav;
0113 } rfkill_global_states[NUM_RFKILL_TYPES];
0114 
0115 static bool rfkill_epo_lock_active;
0116 
0117 
0118 #ifdef CONFIG_RFKILL_LEDS
0119 static void rfkill_led_trigger_event(struct rfkill *rfkill)
0120 {
0121     struct led_trigger *trigger;
0122 
0123     if (!rfkill->registered)
0124         return;
0125 
0126     trigger = &rfkill->led_trigger;
0127 
0128     if (rfkill->state & RFKILL_BLOCK_ANY)
0129         led_trigger_event(trigger, LED_OFF);
0130     else
0131         led_trigger_event(trigger, LED_FULL);
0132 }
0133 
0134 static int rfkill_led_trigger_activate(struct led_classdev *led)
0135 {
0136     struct rfkill *rfkill;
0137 
0138     rfkill = container_of(led->trigger, struct rfkill, led_trigger);
0139 
0140     rfkill_led_trigger_event(rfkill);
0141 
0142     return 0;
0143 }
0144 
0145 const char *rfkill_get_led_trigger_name(struct rfkill *rfkill)
0146 {
0147     return rfkill->led_trigger.name;
0148 }
0149 EXPORT_SYMBOL(rfkill_get_led_trigger_name);
0150 
0151 void rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name)
0152 {
0153     BUG_ON(!rfkill);
0154 
0155     rfkill->ledtrigname = name;
0156 }
0157 EXPORT_SYMBOL(rfkill_set_led_trigger_name);
0158 
0159 static int rfkill_led_trigger_register(struct rfkill *rfkill)
0160 {
0161     rfkill->led_trigger.name = rfkill->ledtrigname
0162                     ? : dev_name(&rfkill->dev);
0163     rfkill->led_trigger.activate = rfkill_led_trigger_activate;
0164     return led_trigger_register(&rfkill->led_trigger);
0165 }
0166 
0167 static void rfkill_led_trigger_unregister(struct rfkill *rfkill)
0168 {
0169     led_trigger_unregister(&rfkill->led_trigger);
0170 }
0171 
0172 static struct led_trigger rfkill_any_led_trigger;
0173 static struct led_trigger rfkill_none_led_trigger;
0174 static struct work_struct rfkill_global_led_trigger_work;
0175 
0176 static void rfkill_global_led_trigger_worker(struct work_struct *work)
0177 {
0178     enum led_brightness brightness = LED_OFF;
0179     struct rfkill *rfkill;
0180 
0181     mutex_lock(&rfkill_global_mutex);
0182     list_for_each_entry(rfkill, &rfkill_list, node) {
0183         if (!(rfkill->state & RFKILL_BLOCK_ANY)) {
0184             brightness = LED_FULL;
0185             break;
0186         }
0187     }
0188     mutex_unlock(&rfkill_global_mutex);
0189 
0190     led_trigger_event(&rfkill_any_led_trigger, brightness);
0191     led_trigger_event(&rfkill_none_led_trigger,
0192               brightness == LED_OFF ? LED_FULL : LED_OFF);
0193 }
0194 
0195 static void rfkill_global_led_trigger_event(void)
0196 {
0197     schedule_work(&rfkill_global_led_trigger_work);
0198 }
0199 
0200 static int rfkill_global_led_trigger_register(void)
0201 {
0202     int ret;
0203 
0204     INIT_WORK(&rfkill_global_led_trigger_work,
0205             rfkill_global_led_trigger_worker);
0206 
0207     rfkill_any_led_trigger.name = "rfkill-any";
0208     ret = led_trigger_register(&rfkill_any_led_trigger);
0209     if (ret)
0210         return ret;
0211 
0212     rfkill_none_led_trigger.name = "rfkill-none";
0213     ret = led_trigger_register(&rfkill_none_led_trigger);
0214     if (ret)
0215         led_trigger_unregister(&rfkill_any_led_trigger);
0216     else
0217         /* Delay activation until all global triggers are registered */
0218         rfkill_global_led_trigger_event();
0219 
0220     return ret;
0221 }
0222 
0223 static void rfkill_global_led_trigger_unregister(void)
0224 {
0225     led_trigger_unregister(&rfkill_none_led_trigger);
0226     led_trigger_unregister(&rfkill_any_led_trigger);
0227     cancel_work_sync(&rfkill_global_led_trigger_work);
0228 }
0229 #else
0230 static void rfkill_led_trigger_event(struct rfkill *rfkill)
0231 {
0232 }
0233 
0234 static inline int rfkill_led_trigger_register(struct rfkill *rfkill)
0235 {
0236     return 0;
0237 }
0238 
0239 static inline void rfkill_led_trigger_unregister(struct rfkill *rfkill)
0240 {
0241 }
0242 
0243 static void rfkill_global_led_trigger_event(void)
0244 {
0245 }
0246 
0247 static int rfkill_global_led_trigger_register(void)
0248 {
0249     return 0;
0250 }
0251 
0252 static void rfkill_global_led_trigger_unregister(void)
0253 {
0254 }
0255 #endif /* CONFIG_RFKILL_LEDS */
0256 
0257 static void rfkill_fill_event(struct rfkill_event_ext *ev,
0258                   struct rfkill *rfkill,
0259                   enum rfkill_operation op)
0260 {
0261     unsigned long flags;
0262 
0263     ev->idx = rfkill->idx;
0264     ev->type = rfkill->type;
0265     ev->op = op;
0266 
0267     spin_lock_irqsave(&rfkill->lock, flags);
0268     ev->hard = !!(rfkill->state & RFKILL_BLOCK_HW);
0269     ev->soft = !!(rfkill->state & (RFKILL_BLOCK_SW |
0270                     RFKILL_BLOCK_SW_PREV));
0271     ev->hard_block_reasons = rfkill->hard_block_reasons;
0272     spin_unlock_irqrestore(&rfkill->lock, flags);
0273 }
0274 
0275 static void rfkill_send_events(struct rfkill *rfkill, enum rfkill_operation op)
0276 {
0277     struct rfkill_data *data;
0278     struct rfkill_int_event *ev;
0279 
0280     list_for_each_entry(data, &rfkill_fds, list) {
0281         ev = kzalloc(sizeof(*ev), GFP_KERNEL);
0282         if (!ev)
0283             continue;
0284         rfkill_fill_event(&ev->ev, rfkill, op);
0285         mutex_lock(&data->mtx);
0286         list_add_tail(&ev->list, &data->events);
0287         mutex_unlock(&data->mtx);
0288         wake_up_interruptible(&data->read_wait);
0289     }
0290 }
0291 
0292 static void rfkill_event(struct rfkill *rfkill)
0293 {
0294     if (!rfkill->registered)
0295         return;
0296 
0297     kobject_uevent(&rfkill->dev.kobj, KOBJ_CHANGE);
0298 
0299     /* also send event to /dev/rfkill */
0300     rfkill_send_events(rfkill, RFKILL_OP_CHANGE);
0301 }
0302 
0303 /**
0304  * rfkill_set_block - wrapper for set_block method
0305  *
0306  * @rfkill: the rfkill struct to use
0307  * @blocked: the new software state
0308  *
0309  * Calls the set_block method (when applicable) and handles notifications
0310  * etc. as well.
0311  */
0312 static void rfkill_set_block(struct rfkill *rfkill, bool blocked)
0313 {
0314     unsigned long flags;
0315     bool prev, curr;
0316     int err;
0317 
0318     if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP))
0319         return;
0320 
0321     /*
0322      * Some platforms (...!) generate input events which affect the
0323      * _hard_ kill state -- whenever something tries to change the
0324      * current software state query the hardware state too.
0325      */
0326     if (rfkill->ops->query)
0327         rfkill->ops->query(rfkill, rfkill->data);
0328 
0329     spin_lock_irqsave(&rfkill->lock, flags);
0330     prev = rfkill->state & RFKILL_BLOCK_SW;
0331 
0332     if (prev)
0333         rfkill->state |= RFKILL_BLOCK_SW_PREV;
0334     else
0335         rfkill->state &= ~RFKILL_BLOCK_SW_PREV;
0336 
0337     if (blocked)
0338         rfkill->state |= RFKILL_BLOCK_SW;
0339     else
0340         rfkill->state &= ~RFKILL_BLOCK_SW;
0341 
0342     rfkill->state |= RFKILL_BLOCK_SW_SETCALL;
0343     spin_unlock_irqrestore(&rfkill->lock, flags);
0344 
0345     err = rfkill->ops->set_block(rfkill->data, blocked);
0346 
0347     spin_lock_irqsave(&rfkill->lock, flags);
0348     if (err) {
0349         /*
0350          * Failed -- reset status to _PREV, which may be different
0351          * from what we have set _PREV to earlier in this function
0352          * if rfkill_set_sw_state was invoked.
0353          */
0354         if (rfkill->state & RFKILL_BLOCK_SW_PREV)
0355             rfkill->state |= RFKILL_BLOCK_SW;
0356         else
0357             rfkill->state &= ~RFKILL_BLOCK_SW;
0358     }
0359     rfkill->state &= ~RFKILL_BLOCK_SW_SETCALL;
0360     rfkill->state &= ~RFKILL_BLOCK_SW_PREV;
0361     curr = rfkill->state & RFKILL_BLOCK_SW;
0362     spin_unlock_irqrestore(&rfkill->lock, flags);
0363 
0364     rfkill_led_trigger_event(rfkill);
0365     rfkill_global_led_trigger_event();
0366 
0367     if (prev != curr)
0368         rfkill_event(rfkill);
0369 }
0370 
0371 static void rfkill_update_global_state(enum rfkill_type type, bool blocked)
0372 {
0373     int i;
0374 
0375     if (type != RFKILL_TYPE_ALL) {
0376         rfkill_global_states[type].cur = blocked;
0377         return;
0378     }
0379 
0380     for (i = 0; i < NUM_RFKILL_TYPES; i++)
0381         rfkill_global_states[i].cur = blocked;
0382 }
0383 
0384 #ifdef CONFIG_RFKILL_INPUT
0385 static atomic_t rfkill_input_disabled = ATOMIC_INIT(0);
0386 
0387 /**
0388  * __rfkill_switch_all - Toggle state of all switches of given type
0389  * @type: type of interfaces to be affected
0390  * @blocked: the new state
0391  *
0392  * This function sets the state of all switches of given type,
0393  * unless a specific switch is suspended.
0394  *
0395  * Caller must have acquired rfkill_global_mutex.
0396  */
0397 static void __rfkill_switch_all(const enum rfkill_type type, bool blocked)
0398 {
0399     struct rfkill *rfkill;
0400 
0401     rfkill_update_global_state(type, blocked);
0402     list_for_each_entry(rfkill, &rfkill_list, node) {
0403         if (rfkill->type != type && type != RFKILL_TYPE_ALL)
0404             continue;
0405 
0406         rfkill_set_block(rfkill, blocked);
0407     }
0408 }
0409 
0410 /**
0411  * rfkill_switch_all - Toggle state of all switches of given type
0412  * @type: type of interfaces to be affected
0413  * @blocked: the new state
0414  *
0415  * Acquires rfkill_global_mutex and calls __rfkill_switch_all(@type, @state).
0416  * Please refer to __rfkill_switch_all() for details.
0417  *
0418  * Does nothing if the EPO lock is active.
0419  */
0420 void rfkill_switch_all(enum rfkill_type type, bool blocked)
0421 {
0422     if (atomic_read(&rfkill_input_disabled))
0423         return;
0424 
0425     mutex_lock(&rfkill_global_mutex);
0426 
0427     if (!rfkill_epo_lock_active)
0428         __rfkill_switch_all(type, blocked);
0429 
0430     mutex_unlock(&rfkill_global_mutex);
0431 }
0432 
0433 /**
0434  * rfkill_epo - emergency power off all transmitters
0435  *
0436  * This kicks all non-suspended rfkill devices to RFKILL_STATE_SOFT_BLOCKED,
0437  * ignoring everything in its path but rfkill_global_mutex and rfkill->mutex.
0438  *
0439  * The global state before the EPO is saved and can be restored later
0440  * using rfkill_restore_states().
0441  */
0442 void rfkill_epo(void)
0443 {
0444     struct rfkill *rfkill;
0445     int i;
0446 
0447     if (atomic_read(&rfkill_input_disabled))
0448         return;
0449 
0450     mutex_lock(&rfkill_global_mutex);
0451 
0452     rfkill_epo_lock_active = true;
0453     list_for_each_entry(rfkill, &rfkill_list, node)
0454         rfkill_set_block(rfkill, true);
0455 
0456     for (i = 0; i < NUM_RFKILL_TYPES; i++) {
0457         rfkill_global_states[i].sav = rfkill_global_states[i].cur;
0458         rfkill_global_states[i].cur = true;
0459     }
0460 
0461     mutex_unlock(&rfkill_global_mutex);
0462 }
0463 
0464 /**
0465  * rfkill_restore_states - restore global states
0466  *
0467  * Restore (and sync switches to) the global state from the
0468  * states in rfkill_default_states.  This can undo the effects of
0469  * a call to rfkill_epo().
0470  */
0471 void rfkill_restore_states(void)
0472 {
0473     int i;
0474 
0475     if (atomic_read(&rfkill_input_disabled))
0476         return;
0477 
0478     mutex_lock(&rfkill_global_mutex);
0479 
0480     rfkill_epo_lock_active = false;
0481     for (i = 0; i < NUM_RFKILL_TYPES; i++)
0482         __rfkill_switch_all(i, rfkill_global_states[i].sav);
0483     mutex_unlock(&rfkill_global_mutex);
0484 }
0485 
0486 /**
0487  * rfkill_remove_epo_lock - unlock state changes
0488  *
0489  * Used by rfkill-input manually unlock state changes, when
0490  * the EPO switch is deactivated.
0491  */
0492 void rfkill_remove_epo_lock(void)
0493 {
0494     if (atomic_read(&rfkill_input_disabled))
0495         return;
0496 
0497     mutex_lock(&rfkill_global_mutex);
0498     rfkill_epo_lock_active = false;
0499     mutex_unlock(&rfkill_global_mutex);
0500 }
0501 
0502 /**
0503  * rfkill_is_epo_lock_active - returns true EPO is active
0504  *
0505  * Returns 0 (false) if there is NOT an active EPO condition,
0506  * and 1 (true) if there is an active EPO condition, which
0507  * locks all radios in one of the BLOCKED states.
0508  *
0509  * Can be called in atomic context.
0510  */
0511 bool rfkill_is_epo_lock_active(void)
0512 {
0513     return rfkill_epo_lock_active;
0514 }
0515 
0516 /**
0517  * rfkill_get_global_sw_state - returns global state for a type
0518  * @type: the type to get the global state of
0519  *
0520  * Returns the current global state for a given wireless
0521  * device type.
0522  */
0523 bool rfkill_get_global_sw_state(const enum rfkill_type type)
0524 {
0525     return rfkill_global_states[type].cur;
0526 }
0527 #endif
0528 
0529 bool rfkill_set_hw_state_reason(struct rfkill *rfkill,
0530                 bool blocked, unsigned long reason)
0531 {
0532     unsigned long flags;
0533     bool ret, prev;
0534 
0535     BUG_ON(!rfkill);
0536 
0537     if (WARN(reason &
0538         ~(RFKILL_HARD_BLOCK_SIGNAL | RFKILL_HARD_BLOCK_NOT_OWNER),
0539         "hw_state reason not supported: 0x%lx", reason))
0540         return blocked;
0541 
0542     spin_lock_irqsave(&rfkill->lock, flags);
0543     prev = !!(rfkill->hard_block_reasons & reason);
0544     if (blocked) {
0545         rfkill->state |= RFKILL_BLOCK_HW;
0546         rfkill->hard_block_reasons |= reason;
0547     } else {
0548         rfkill->hard_block_reasons &= ~reason;
0549         if (!rfkill->hard_block_reasons)
0550             rfkill->state &= ~RFKILL_BLOCK_HW;
0551     }
0552     ret = !!(rfkill->state & RFKILL_BLOCK_ANY);
0553     spin_unlock_irqrestore(&rfkill->lock, flags);
0554 
0555     rfkill_led_trigger_event(rfkill);
0556     rfkill_global_led_trigger_event();
0557 
0558     if (rfkill->registered && prev != blocked)
0559         schedule_work(&rfkill->uevent_work);
0560 
0561     return ret;
0562 }
0563 EXPORT_SYMBOL(rfkill_set_hw_state_reason);
0564 
0565 static void __rfkill_set_sw_state(struct rfkill *rfkill, bool blocked)
0566 {
0567     u32 bit = RFKILL_BLOCK_SW;
0568 
0569     /* if in a ops->set_block right now, use other bit */
0570     if (rfkill->state & RFKILL_BLOCK_SW_SETCALL)
0571         bit = RFKILL_BLOCK_SW_PREV;
0572 
0573     if (blocked)
0574         rfkill->state |= bit;
0575     else
0576         rfkill->state &= ~bit;
0577 }
0578 
0579 bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked)
0580 {
0581     unsigned long flags;
0582     bool prev, hwblock;
0583 
0584     BUG_ON(!rfkill);
0585 
0586     spin_lock_irqsave(&rfkill->lock, flags);
0587     prev = !!(rfkill->state & RFKILL_BLOCK_SW);
0588     __rfkill_set_sw_state(rfkill, blocked);
0589     hwblock = !!(rfkill->state & RFKILL_BLOCK_HW);
0590     blocked = blocked || hwblock;
0591     spin_unlock_irqrestore(&rfkill->lock, flags);
0592 
0593     if (!rfkill->registered)
0594         return blocked;
0595 
0596     if (prev != blocked && !hwblock)
0597         schedule_work(&rfkill->uevent_work);
0598 
0599     rfkill_led_trigger_event(rfkill);
0600     rfkill_global_led_trigger_event();
0601 
0602     return blocked;
0603 }
0604 EXPORT_SYMBOL(rfkill_set_sw_state);
0605 
0606 void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked)
0607 {
0608     unsigned long flags;
0609 
0610     BUG_ON(!rfkill);
0611     BUG_ON(rfkill->registered);
0612 
0613     spin_lock_irqsave(&rfkill->lock, flags);
0614     __rfkill_set_sw_state(rfkill, blocked);
0615     rfkill->persistent = true;
0616     spin_unlock_irqrestore(&rfkill->lock, flags);
0617 }
0618 EXPORT_SYMBOL(rfkill_init_sw_state);
0619 
0620 void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw)
0621 {
0622     unsigned long flags;
0623     bool swprev, hwprev;
0624 
0625     BUG_ON(!rfkill);
0626 
0627     spin_lock_irqsave(&rfkill->lock, flags);
0628 
0629     /*
0630      * No need to care about prev/setblock ... this is for uevent only
0631      * and that will get triggered by rfkill_set_block anyway.
0632      */
0633     swprev = !!(rfkill->state & RFKILL_BLOCK_SW);
0634     hwprev = !!(rfkill->state & RFKILL_BLOCK_HW);
0635     __rfkill_set_sw_state(rfkill, sw);
0636     if (hw)
0637         rfkill->state |= RFKILL_BLOCK_HW;
0638     else
0639         rfkill->state &= ~RFKILL_BLOCK_HW;
0640 
0641     spin_unlock_irqrestore(&rfkill->lock, flags);
0642 
0643     if (!rfkill->registered) {
0644         rfkill->persistent = true;
0645     } else {
0646         if (swprev != sw || hwprev != hw)
0647             schedule_work(&rfkill->uevent_work);
0648 
0649         rfkill_led_trigger_event(rfkill);
0650         rfkill_global_led_trigger_event();
0651     }
0652 }
0653 EXPORT_SYMBOL(rfkill_set_states);
0654 
0655 static const char * const rfkill_types[] = {
0656     NULL, /* RFKILL_TYPE_ALL */
0657     "wlan",
0658     "bluetooth",
0659     "ultrawideband",
0660     "wimax",
0661     "wwan",
0662     "gps",
0663     "fm",
0664     "nfc",
0665 };
0666 
0667 enum rfkill_type rfkill_find_type(const char *name)
0668 {
0669     int i;
0670 
0671     BUILD_BUG_ON(ARRAY_SIZE(rfkill_types) != NUM_RFKILL_TYPES);
0672 
0673     if (!name)
0674         return RFKILL_TYPE_ALL;
0675 
0676     for (i = 1; i < NUM_RFKILL_TYPES; i++)
0677         if (!strcmp(name, rfkill_types[i]))
0678             return i;
0679     return RFKILL_TYPE_ALL;
0680 }
0681 EXPORT_SYMBOL(rfkill_find_type);
0682 
0683 static ssize_t name_show(struct device *dev, struct device_attribute *attr,
0684              char *buf)
0685 {
0686     struct rfkill *rfkill = to_rfkill(dev);
0687 
0688     return sprintf(buf, "%s\n", rfkill->name);
0689 }
0690 static DEVICE_ATTR_RO(name);
0691 
0692 static ssize_t type_show(struct device *dev, struct device_attribute *attr,
0693              char *buf)
0694 {
0695     struct rfkill *rfkill = to_rfkill(dev);
0696 
0697     return sprintf(buf, "%s\n", rfkill_types[rfkill->type]);
0698 }
0699 static DEVICE_ATTR_RO(type);
0700 
0701 static ssize_t index_show(struct device *dev, struct device_attribute *attr,
0702               char *buf)
0703 {
0704     struct rfkill *rfkill = to_rfkill(dev);
0705 
0706     return sprintf(buf, "%d\n", rfkill->idx);
0707 }
0708 static DEVICE_ATTR_RO(index);
0709 
0710 static ssize_t persistent_show(struct device *dev,
0711                    struct device_attribute *attr, char *buf)
0712 {
0713     struct rfkill *rfkill = to_rfkill(dev);
0714 
0715     return sprintf(buf, "%d\n", rfkill->persistent);
0716 }
0717 static DEVICE_ATTR_RO(persistent);
0718 
0719 static ssize_t hard_show(struct device *dev, struct device_attribute *attr,
0720              char *buf)
0721 {
0722     struct rfkill *rfkill = to_rfkill(dev);
0723 
0724     return sprintf(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_HW) ? 1 : 0 );
0725 }
0726 static DEVICE_ATTR_RO(hard);
0727 
0728 static ssize_t soft_show(struct device *dev, struct device_attribute *attr,
0729              char *buf)
0730 {
0731     struct rfkill *rfkill = to_rfkill(dev);
0732 
0733     return sprintf(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_SW) ? 1 : 0 );
0734 }
0735 
0736 static ssize_t soft_store(struct device *dev, struct device_attribute *attr,
0737               const char *buf, size_t count)
0738 {
0739     struct rfkill *rfkill = to_rfkill(dev);
0740     unsigned long state;
0741     int err;
0742 
0743     if (!capable(CAP_NET_ADMIN))
0744         return -EPERM;
0745 
0746     err = kstrtoul(buf, 0, &state);
0747     if (err)
0748         return err;
0749 
0750     if (state > 1 )
0751         return -EINVAL;
0752 
0753     mutex_lock(&rfkill_global_mutex);
0754     rfkill_set_block(rfkill, state);
0755     mutex_unlock(&rfkill_global_mutex);
0756 
0757     return count;
0758 }
0759 static DEVICE_ATTR_RW(soft);
0760 
0761 static ssize_t hard_block_reasons_show(struct device *dev,
0762                        struct device_attribute *attr,
0763                        char *buf)
0764 {
0765     struct rfkill *rfkill = to_rfkill(dev);
0766 
0767     return sprintf(buf, "0x%lx\n", rfkill->hard_block_reasons);
0768 }
0769 static DEVICE_ATTR_RO(hard_block_reasons);
0770 
0771 static u8 user_state_from_blocked(unsigned long state)
0772 {
0773     if (state & RFKILL_BLOCK_HW)
0774         return RFKILL_USER_STATE_HARD_BLOCKED;
0775     if (state & RFKILL_BLOCK_SW)
0776         return RFKILL_USER_STATE_SOFT_BLOCKED;
0777 
0778     return RFKILL_USER_STATE_UNBLOCKED;
0779 }
0780 
0781 static ssize_t state_show(struct device *dev, struct device_attribute *attr,
0782               char *buf)
0783 {
0784     struct rfkill *rfkill = to_rfkill(dev);
0785 
0786     return sprintf(buf, "%d\n", user_state_from_blocked(rfkill->state));
0787 }
0788 
0789 static ssize_t state_store(struct device *dev, struct device_attribute *attr,
0790                const char *buf, size_t count)
0791 {
0792     struct rfkill *rfkill = to_rfkill(dev);
0793     unsigned long state;
0794     int err;
0795 
0796     if (!capable(CAP_NET_ADMIN))
0797         return -EPERM;
0798 
0799     err = kstrtoul(buf, 0, &state);
0800     if (err)
0801         return err;
0802 
0803     if (state != RFKILL_USER_STATE_SOFT_BLOCKED &&
0804         state != RFKILL_USER_STATE_UNBLOCKED)
0805         return -EINVAL;
0806 
0807     mutex_lock(&rfkill_global_mutex);
0808     rfkill_set_block(rfkill, state == RFKILL_USER_STATE_SOFT_BLOCKED);
0809     mutex_unlock(&rfkill_global_mutex);
0810 
0811     return count;
0812 }
0813 static DEVICE_ATTR_RW(state);
0814 
0815 static struct attribute *rfkill_dev_attrs[] = {
0816     &dev_attr_name.attr,
0817     &dev_attr_type.attr,
0818     &dev_attr_index.attr,
0819     &dev_attr_persistent.attr,
0820     &dev_attr_state.attr,
0821     &dev_attr_soft.attr,
0822     &dev_attr_hard.attr,
0823     &dev_attr_hard_block_reasons.attr,
0824     NULL,
0825 };
0826 ATTRIBUTE_GROUPS(rfkill_dev);
0827 
0828 static void rfkill_release(struct device *dev)
0829 {
0830     struct rfkill *rfkill = to_rfkill(dev);
0831 
0832     kfree(rfkill);
0833 }
0834 
0835 static int rfkill_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
0836 {
0837     struct rfkill *rfkill = to_rfkill(dev);
0838     unsigned long flags;
0839     unsigned long reasons;
0840     u32 state;
0841     int error;
0842 
0843     error = add_uevent_var(env, "RFKILL_NAME=%s", rfkill->name);
0844     if (error)
0845         return error;
0846     error = add_uevent_var(env, "RFKILL_TYPE=%s",
0847                    rfkill_types[rfkill->type]);
0848     if (error)
0849         return error;
0850     spin_lock_irqsave(&rfkill->lock, flags);
0851     state = rfkill->state;
0852     reasons = rfkill->hard_block_reasons;
0853     spin_unlock_irqrestore(&rfkill->lock, flags);
0854     error = add_uevent_var(env, "RFKILL_STATE=%d",
0855                    user_state_from_blocked(state));
0856     if (error)
0857         return error;
0858     return add_uevent_var(env, "RFKILL_HW_BLOCK_REASON=0x%lx", reasons);
0859 }
0860 
0861 void rfkill_pause_polling(struct rfkill *rfkill)
0862 {
0863     BUG_ON(!rfkill);
0864 
0865     if (!rfkill->ops->poll)
0866         return;
0867 
0868     rfkill->polling_paused = true;
0869     cancel_delayed_work_sync(&rfkill->poll_work);
0870 }
0871 EXPORT_SYMBOL(rfkill_pause_polling);
0872 
0873 void rfkill_resume_polling(struct rfkill *rfkill)
0874 {
0875     BUG_ON(!rfkill);
0876 
0877     if (!rfkill->ops->poll)
0878         return;
0879 
0880     rfkill->polling_paused = false;
0881 
0882     if (rfkill->suspended)
0883         return;
0884 
0885     queue_delayed_work(system_power_efficient_wq,
0886                &rfkill->poll_work, 0);
0887 }
0888 EXPORT_SYMBOL(rfkill_resume_polling);
0889 
0890 #ifdef CONFIG_PM_SLEEP
0891 static int rfkill_suspend(struct device *dev)
0892 {
0893     struct rfkill *rfkill = to_rfkill(dev);
0894 
0895     rfkill->suspended = true;
0896     cancel_delayed_work_sync(&rfkill->poll_work);
0897 
0898     return 0;
0899 }
0900 
0901 static int rfkill_resume(struct device *dev)
0902 {
0903     struct rfkill *rfkill = to_rfkill(dev);
0904     bool cur;
0905 
0906     rfkill->suspended = false;
0907 
0908     if (!rfkill->registered)
0909         return 0;
0910 
0911     if (!rfkill->persistent) {
0912         cur = !!(rfkill->state & RFKILL_BLOCK_SW);
0913         rfkill_set_block(rfkill, cur);
0914     }
0915 
0916     if (rfkill->ops->poll && !rfkill->polling_paused)
0917         queue_delayed_work(system_power_efficient_wq,
0918                    &rfkill->poll_work, 0);
0919 
0920     return 0;
0921 }
0922 
0923 static SIMPLE_DEV_PM_OPS(rfkill_pm_ops, rfkill_suspend, rfkill_resume);
0924 #define RFKILL_PM_OPS (&rfkill_pm_ops)
0925 #else
0926 #define RFKILL_PM_OPS NULL
0927 #endif
0928 
0929 static struct class rfkill_class = {
0930     .name       = "rfkill",
0931     .dev_release    = rfkill_release,
0932     .dev_groups = rfkill_dev_groups,
0933     .dev_uevent = rfkill_dev_uevent,
0934     .pm     = RFKILL_PM_OPS,
0935 };
0936 
0937 bool rfkill_blocked(struct rfkill *rfkill)
0938 {
0939     unsigned long flags;
0940     u32 state;
0941 
0942     spin_lock_irqsave(&rfkill->lock, flags);
0943     state = rfkill->state;
0944     spin_unlock_irqrestore(&rfkill->lock, flags);
0945 
0946     return !!(state & RFKILL_BLOCK_ANY);
0947 }
0948 EXPORT_SYMBOL(rfkill_blocked);
0949 
0950 bool rfkill_soft_blocked(struct rfkill *rfkill)
0951 {
0952     unsigned long flags;
0953     u32 state;
0954 
0955     spin_lock_irqsave(&rfkill->lock, flags);
0956     state = rfkill->state;
0957     spin_unlock_irqrestore(&rfkill->lock, flags);
0958 
0959     return !!(state & RFKILL_BLOCK_SW);
0960 }
0961 EXPORT_SYMBOL(rfkill_soft_blocked);
0962 
0963 struct rfkill * __must_check rfkill_alloc(const char *name,
0964                       struct device *parent,
0965                       const enum rfkill_type type,
0966                       const struct rfkill_ops *ops,
0967                       void *ops_data)
0968 {
0969     struct rfkill *rfkill;
0970     struct device *dev;
0971 
0972     if (WARN_ON(!ops))
0973         return NULL;
0974 
0975     if (WARN_ON(!ops->set_block))
0976         return NULL;
0977 
0978     if (WARN_ON(!name))
0979         return NULL;
0980 
0981     if (WARN_ON(type == RFKILL_TYPE_ALL || type >= NUM_RFKILL_TYPES))
0982         return NULL;
0983 
0984     rfkill = kzalloc(sizeof(*rfkill) + strlen(name) + 1, GFP_KERNEL);
0985     if (!rfkill)
0986         return NULL;
0987 
0988     spin_lock_init(&rfkill->lock);
0989     INIT_LIST_HEAD(&rfkill->node);
0990     rfkill->type = type;
0991     strcpy(rfkill->name, name);
0992     rfkill->ops = ops;
0993     rfkill->data = ops_data;
0994 
0995     dev = &rfkill->dev;
0996     dev->class = &rfkill_class;
0997     dev->parent = parent;
0998     device_initialize(dev);
0999 
1000     return rfkill;
1001 }
1002 EXPORT_SYMBOL(rfkill_alloc);
1003 
1004 static void rfkill_poll(struct work_struct *work)
1005 {
1006     struct rfkill *rfkill;
1007 
1008     rfkill = container_of(work, struct rfkill, poll_work.work);
1009 
1010     /*
1011      * Poll hardware state -- driver will use one of the
1012      * rfkill_set{,_hw,_sw}_state functions and use its
1013      * return value to update the current status.
1014      */
1015     rfkill->ops->poll(rfkill, rfkill->data);
1016 
1017     queue_delayed_work(system_power_efficient_wq,
1018         &rfkill->poll_work,
1019         round_jiffies_relative(POLL_INTERVAL));
1020 }
1021 
1022 static void rfkill_uevent_work(struct work_struct *work)
1023 {
1024     struct rfkill *rfkill;
1025 
1026     rfkill = container_of(work, struct rfkill, uevent_work);
1027 
1028     mutex_lock(&rfkill_global_mutex);
1029     rfkill_event(rfkill);
1030     mutex_unlock(&rfkill_global_mutex);
1031 }
1032 
1033 static void rfkill_sync_work(struct work_struct *work)
1034 {
1035     struct rfkill *rfkill;
1036     bool cur;
1037 
1038     rfkill = container_of(work, struct rfkill, sync_work);
1039 
1040     mutex_lock(&rfkill_global_mutex);
1041     cur = rfkill_global_states[rfkill->type].cur;
1042     rfkill_set_block(rfkill, cur);
1043     mutex_unlock(&rfkill_global_mutex);
1044 }
1045 
1046 int __must_check rfkill_register(struct rfkill *rfkill)
1047 {
1048     static unsigned long rfkill_no;
1049     struct device *dev;
1050     int error;
1051 
1052     if (!rfkill)
1053         return -EINVAL;
1054 
1055     dev = &rfkill->dev;
1056 
1057     mutex_lock(&rfkill_global_mutex);
1058 
1059     if (rfkill->registered) {
1060         error = -EALREADY;
1061         goto unlock;
1062     }
1063 
1064     rfkill->idx = rfkill_no;
1065     dev_set_name(dev, "rfkill%lu", rfkill_no);
1066     rfkill_no++;
1067 
1068     list_add_tail(&rfkill->node, &rfkill_list);
1069 
1070     error = device_add(dev);
1071     if (error)
1072         goto remove;
1073 
1074     error = rfkill_led_trigger_register(rfkill);
1075     if (error)
1076         goto devdel;
1077 
1078     rfkill->registered = true;
1079 
1080     INIT_DELAYED_WORK(&rfkill->poll_work, rfkill_poll);
1081     INIT_WORK(&rfkill->uevent_work, rfkill_uevent_work);
1082     INIT_WORK(&rfkill->sync_work, rfkill_sync_work);
1083 
1084     if (rfkill->ops->poll)
1085         queue_delayed_work(system_power_efficient_wq,
1086             &rfkill->poll_work,
1087             round_jiffies_relative(POLL_INTERVAL));
1088 
1089     if (!rfkill->persistent || rfkill_epo_lock_active) {
1090         schedule_work(&rfkill->sync_work);
1091     } else {
1092 #ifdef CONFIG_RFKILL_INPUT
1093         bool soft_blocked = !!(rfkill->state & RFKILL_BLOCK_SW);
1094 
1095         if (!atomic_read(&rfkill_input_disabled))
1096             __rfkill_switch_all(rfkill->type, soft_blocked);
1097 #endif
1098     }
1099 
1100     rfkill_global_led_trigger_event();
1101     rfkill_send_events(rfkill, RFKILL_OP_ADD);
1102 
1103     mutex_unlock(&rfkill_global_mutex);
1104     return 0;
1105 
1106  devdel:
1107     device_del(&rfkill->dev);
1108  remove:
1109     list_del_init(&rfkill->node);
1110  unlock:
1111     mutex_unlock(&rfkill_global_mutex);
1112     return error;
1113 }
1114 EXPORT_SYMBOL(rfkill_register);
1115 
1116 void rfkill_unregister(struct rfkill *rfkill)
1117 {
1118     BUG_ON(!rfkill);
1119 
1120     if (rfkill->ops->poll)
1121         cancel_delayed_work_sync(&rfkill->poll_work);
1122 
1123     cancel_work_sync(&rfkill->uevent_work);
1124     cancel_work_sync(&rfkill->sync_work);
1125 
1126     rfkill->registered = false;
1127 
1128     device_del(&rfkill->dev);
1129 
1130     mutex_lock(&rfkill_global_mutex);
1131     rfkill_send_events(rfkill, RFKILL_OP_DEL);
1132     list_del_init(&rfkill->node);
1133     rfkill_global_led_trigger_event();
1134     mutex_unlock(&rfkill_global_mutex);
1135 
1136     rfkill_led_trigger_unregister(rfkill);
1137 }
1138 EXPORT_SYMBOL(rfkill_unregister);
1139 
1140 void rfkill_destroy(struct rfkill *rfkill)
1141 {
1142     if (rfkill)
1143         put_device(&rfkill->dev);
1144 }
1145 EXPORT_SYMBOL(rfkill_destroy);
1146 
1147 static int rfkill_fop_open(struct inode *inode, struct file *file)
1148 {
1149     struct rfkill_data *data;
1150     struct rfkill *rfkill;
1151     struct rfkill_int_event *ev, *tmp;
1152 
1153     data = kzalloc(sizeof(*data), GFP_KERNEL);
1154     if (!data)
1155         return -ENOMEM;
1156 
1157     data->max_size = RFKILL_EVENT_SIZE_V1;
1158 
1159     INIT_LIST_HEAD(&data->events);
1160     mutex_init(&data->mtx);
1161     init_waitqueue_head(&data->read_wait);
1162 
1163     mutex_lock(&rfkill_global_mutex);
1164     mutex_lock(&data->mtx);
1165     /*
1166      * start getting events from elsewhere but hold mtx to get
1167      * startup events added first
1168      */
1169 
1170     list_for_each_entry(rfkill, &rfkill_list, node) {
1171         ev = kzalloc(sizeof(*ev), GFP_KERNEL);
1172         if (!ev)
1173             goto free;
1174         rfkill_fill_event(&ev->ev, rfkill, RFKILL_OP_ADD);
1175         list_add_tail(&ev->list, &data->events);
1176     }
1177     list_add(&data->list, &rfkill_fds);
1178     mutex_unlock(&data->mtx);
1179     mutex_unlock(&rfkill_global_mutex);
1180 
1181     file->private_data = data;
1182 
1183     return stream_open(inode, file);
1184 
1185  free:
1186     mutex_unlock(&data->mtx);
1187     mutex_unlock(&rfkill_global_mutex);
1188     mutex_destroy(&data->mtx);
1189     list_for_each_entry_safe(ev, tmp, &data->events, list)
1190         kfree(ev);
1191     kfree(data);
1192     return -ENOMEM;
1193 }
1194 
1195 static __poll_t rfkill_fop_poll(struct file *file, poll_table *wait)
1196 {
1197     struct rfkill_data *data = file->private_data;
1198     __poll_t res = EPOLLOUT | EPOLLWRNORM;
1199 
1200     poll_wait(file, &data->read_wait, wait);
1201 
1202     mutex_lock(&data->mtx);
1203     if (!list_empty(&data->events))
1204         res = EPOLLIN | EPOLLRDNORM;
1205     mutex_unlock(&data->mtx);
1206 
1207     return res;
1208 }
1209 
1210 static ssize_t rfkill_fop_read(struct file *file, char __user *buf,
1211                    size_t count, loff_t *pos)
1212 {
1213     struct rfkill_data *data = file->private_data;
1214     struct rfkill_int_event *ev;
1215     unsigned long sz;
1216     int ret;
1217 
1218     mutex_lock(&data->mtx);
1219 
1220     while (list_empty(&data->events)) {
1221         if (file->f_flags & O_NONBLOCK) {
1222             ret = -EAGAIN;
1223             goto out;
1224         }
1225         mutex_unlock(&data->mtx);
1226         /* since we re-check and it just compares pointers,
1227          * using !list_empty() without locking isn't a problem
1228          */
1229         ret = wait_event_interruptible(data->read_wait,
1230                            !list_empty(&data->events));
1231         mutex_lock(&data->mtx);
1232 
1233         if (ret)
1234             goto out;
1235     }
1236 
1237     ev = list_first_entry(&data->events, struct rfkill_int_event,
1238                 list);
1239 
1240     sz = min_t(unsigned long, sizeof(ev->ev), count);
1241     sz = min_t(unsigned long, sz, data->max_size);
1242     ret = sz;
1243     if (copy_to_user(buf, &ev->ev, sz))
1244         ret = -EFAULT;
1245 
1246     list_del(&ev->list);
1247     kfree(ev);
1248  out:
1249     mutex_unlock(&data->mtx);
1250     return ret;
1251 }
1252 
1253 static ssize_t rfkill_fop_write(struct file *file, const char __user *buf,
1254                 size_t count, loff_t *pos)
1255 {
1256     struct rfkill_data *data = file->private_data;
1257     struct rfkill *rfkill;
1258     struct rfkill_event_ext ev;
1259     int ret;
1260 
1261     /* we don't need the 'hard' variable but accept it */
1262     if (count < RFKILL_EVENT_SIZE_V1 - 1)
1263         return -EINVAL;
1264 
1265     /*
1266      * Copy as much data as we can accept into our 'ev' buffer,
1267      * but tell userspace how much we've copied so it can determine
1268      * our API version even in a write() call, if it cares.
1269      */
1270     count = min(count, sizeof(ev));
1271     count = min_t(size_t, count, data->max_size);
1272     if (copy_from_user(&ev, buf, count))
1273         return -EFAULT;
1274 
1275     if (ev.type >= NUM_RFKILL_TYPES)
1276         return -EINVAL;
1277 
1278     mutex_lock(&rfkill_global_mutex);
1279 
1280     switch (ev.op) {
1281     case RFKILL_OP_CHANGE_ALL:
1282         rfkill_update_global_state(ev.type, ev.soft);
1283         list_for_each_entry(rfkill, &rfkill_list, node)
1284             if (rfkill->type == ev.type ||
1285                 ev.type == RFKILL_TYPE_ALL)
1286                 rfkill_set_block(rfkill, ev.soft);
1287         ret = 0;
1288         break;
1289     case RFKILL_OP_CHANGE:
1290         list_for_each_entry(rfkill, &rfkill_list, node)
1291             if (rfkill->idx == ev.idx &&
1292                 (rfkill->type == ev.type ||
1293                  ev.type == RFKILL_TYPE_ALL))
1294                 rfkill_set_block(rfkill, ev.soft);
1295         ret = 0;
1296         break;
1297     default:
1298         ret = -EINVAL;
1299         break;
1300     }
1301 
1302     mutex_unlock(&rfkill_global_mutex);
1303 
1304     return ret ?: count;
1305 }
1306 
1307 static int rfkill_fop_release(struct inode *inode, struct file *file)
1308 {
1309     struct rfkill_data *data = file->private_data;
1310     struct rfkill_int_event *ev, *tmp;
1311 
1312     mutex_lock(&rfkill_global_mutex);
1313     list_del(&data->list);
1314     mutex_unlock(&rfkill_global_mutex);
1315 
1316     mutex_destroy(&data->mtx);
1317     list_for_each_entry_safe(ev, tmp, &data->events, list)
1318         kfree(ev);
1319 
1320 #ifdef CONFIG_RFKILL_INPUT
1321     if (data->input_handler)
1322         if (atomic_dec_return(&rfkill_input_disabled) == 0)
1323             printk(KERN_DEBUG "rfkill: input handler enabled\n");
1324 #endif
1325 
1326     kfree(data);
1327 
1328     return 0;
1329 }
1330 
1331 static long rfkill_fop_ioctl(struct file *file, unsigned int cmd,
1332                  unsigned long arg)
1333 {
1334     struct rfkill_data *data = file->private_data;
1335     int ret = -ENOSYS;
1336     u32 size;
1337 
1338     if (_IOC_TYPE(cmd) != RFKILL_IOC_MAGIC)
1339         return -ENOSYS;
1340 
1341     mutex_lock(&data->mtx);
1342     switch (_IOC_NR(cmd)) {
1343 #ifdef CONFIG_RFKILL_INPUT
1344     case RFKILL_IOC_NOINPUT:
1345         if (!data->input_handler) {
1346             if (atomic_inc_return(&rfkill_input_disabled) == 1)
1347                 printk(KERN_DEBUG "rfkill: input handler disabled\n");
1348             data->input_handler = true;
1349         }
1350         ret = 0;
1351         break;
1352 #endif
1353     case RFKILL_IOC_MAX_SIZE:
1354         if (get_user(size, (__u32 __user *)arg)) {
1355             ret = -EFAULT;
1356             break;
1357         }
1358         if (size < RFKILL_EVENT_SIZE_V1 || size > U8_MAX) {
1359             ret = -EINVAL;
1360             break;
1361         }
1362         data->max_size = size;
1363         ret = 0;
1364         break;
1365     default:
1366         break;
1367     }
1368     mutex_unlock(&data->mtx);
1369 
1370     return ret;
1371 }
1372 
1373 static const struct file_operations rfkill_fops = {
1374     .owner      = THIS_MODULE,
1375     .open       = rfkill_fop_open,
1376     .read       = rfkill_fop_read,
1377     .write      = rfkill_fop_write,
1378     .poll       = rfkill_fop_poll,
1379     .release    = rfkill_fop_release,
1380     .unlocked_ioctl = rfkill_fop_ioctl,
1381     .compat_ioctl   = compat_ptr_ioctl,
1382     .llseek     = no_llseek,
1383 };
1384 
1385 #define RFKILL_NAME "rfkill"
1386 
1387 static struct miscdevice rfkill_miscdev = {
1388     .fops   = &rfkill_fops,
1389     .name   = RFKILL_NAME,
1390     .minor  = RFKILL_MINOR,
1391 };
1392 
1393 static int __init rfkill_init(void)
1394 {
1395     int error;
1396 
1397     rfkill_update_global_state(RFKILL_TYPE_ALL, !rfkill_default_state);
1398 
1399     error = class_register(&rfkill_class);
1400     if (error)
1401         goto error_class;
1402 
1403     error = misc_register(&rfkill_miscdev);
1404     if (error)
1405         goto error_misc;
1406 
1407     error = rfkill_global_led_trigger_register();
1408     if (error)
1409         goto error_led_trigger;
1410 
1411 #ifdef CONFIG_RFKILL_INPUT
1412     error = rfkill_handler_init();
1413     if (error)
1414         goto error_input;
1415 #endif
1416 
1417     return 0;
1418 
1419 #ifdef CONFIG_RFKILL_INPUT
1420 error_input:
1421     rfkill_global_led_trigger_unregister();
1422 #endif
1423 error_led_trigger:
1424     misc_deregister(&rfkill_miscdev);
1425 error_misc:
1426     class_unregister(&rfkill_class);
1427 error_class:
1428     return error;
1429 }
1430 subsys_initcall(rfkill_init);
1431 
1432 static void __exit rfkill_exit(void)
1433 {
1434 #ifdef CONFIG_RFKILL_INPUT
1435     rfkill_handler_exit();
1436 #endif
1437     rfkill_global_led_trigger_unregister();
1438     misc_deregister(&rfkill_miscdev);
1439     class_unregister(&rfkill_class);
1440 }
1441 module_exit(rfkill_exit);
1442 
1443 MODULE_ALIAS_MISCDEV(RFKILL_MINOR);
1444 MODULE_ALIAS("devname:" RFKILL_NAME);