0001
0002
0003 #include <linux/device.h>
0004 #include <linux/interrupt.h>
0005 #include <linux/irq.h>
0006 #include <linux/slab.h>
0007 #include <linux/pm_runtime.h>
0008 #include <linux/pm_wakeirq.h>
0009
0010 #include "power.h"
0011
0012
0013
0014
0015
0016
0017
0018
0019 static int dev_pm_attach_wake_irq(struct device *dev, struct wake_irq *wirq)
0020 {
0021 unsigned long flags;
0022
0023 if (!dev || !wirq)
0024 return -EINVAL;
0025
0026 spin_lock_irqsave(&dev->power.lock, flags);
0027 if (dev_WARN_ONCE(dev, dev->power.wakeirq,
0028 "wake irq already initialized\n")) {
0029 spin_unlock_irqrestore(&dev->power.lock, flags);
0030 return -EEXIST;
0031 }
0032
0033 dev->power.wakeirq = wirq;
0034 device_wakeup_attach_irq(dev, wirq);
0035
0036 spin_unlock_irqrestore(&dev->power.lock, flags);
0037 return 0;
0038 }
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050 int dev_pm_set_wake_irq(struct device *dev, int irq)
0051 {
0052 struct wake_irq *wirq;
0053 int err;
0054
0055 if (irq < 0)
0056 return -EINVAL;
0057
0058 wirq = kzalloc(sizeof(*wirq), GFP_KERNEL);
0059 if (!wirq)
0060 return -ENOMEM;
0061
0062 wirq->dev = dev;
0063 wirq->irq = irq;
0064
0065 err = dev_pm_attach_wake_irq(dev, wirq);
0066 if (err)
0067 kfree(wirq);
0068
0069 return err;
0070 }
0071 EXPORT_SYMBOL_GPL(dev_pm_set_wake_irq);
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084 void dev_pm_clear_wake_irq(struct device *dev)
0085 {
0086 struct wake_irq *wirq = dev->power.wakeirq;
0087 unsigned long flags;
0088
0089 if (!wirq)
0090 return;
0091
0092 spin_lock_irqsave(&dev->power.lock, flags);
0093 device_wakeup_detach_irq(dev);
0094 dev->power.wakeirq = NULL;
0095 spin_unlock_irqrestore(&dev->power.lock, flags);
0096
0097 if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED) {
0098 free_irq(wirq->irq, wirq);
0099 wirq->status &= ~WAKE_IRQ_DEDICATED_MASK;
0100 }
0101 kfree(wirq->name);
0102 kfree(wirq);
0103 }
0104 EXPORT_SYMBOL_GPL(dev_pm_clear_wake_irq);
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124 static irqreturn_t handle_threaded_wake_irq(int irq, void *_wirq)
0125 {
0126 struct wake_irq *wirq = _wirq;
0127 int res;
0128
0129
0130 if (irqd_is_wakeup_set(irq_get_irq_data(irq))) {
0131 pm_wakeup_event(wirq->dev, 0);
0132
0133 return IRQ_HANDLED;
0134 }
0135
0136
0137 res = pm_runtime_resume(wirq->dev);
0138 if (res < 0)
0139 dev_warn(wirq->dev,
0140 "wake IRQ with no resume: %i\n", res);
0141
0142 return IRQ_HANDLED;
0143 }
0144
0145 static int __dev_pm_set_dedicated_wake_irq(struct device *dev, int irq, unsigned int flag)
0146 {
0147 struct wake_irq *wirq;
0148 int err;
0149
0150 if (irq < 0)
0151 return -EINVAL;
0152
0153 wirq = kzalloc(sizeof(*wirq), GFP_KERNEL);
0154 if (!wirq)
0155 return -ENOMEM;
0156
0157 wirq->name = kasprintf(GFP_KERNEL, "%s:wakeup", dev_name(dev));
0158 if (!wirq->name) {
0159 err = -ENOMEM;
0160 goto err_free;
0161 }
0162
0163 wirq->dev = dev;
0164 wirq->irq = irq;
0165
0166
0167 irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
0168
0169
0170
0171
0172
0173 err = request_threaded_irq(irq, NULL, handle_threaded_wake_irq,
0174 IRQF_ONESHOT | IRQF_NO_AUTOEN,
0175 wirq->name, wirq);
0176 if (err)
0177 goto err_free_name;
0178
0179 err = dev_pm_attach_wake_irq(dev, wirq);
0180 if (err)
0181 goto err_free_irq;
0182
0183 wirq->status = WAKE_IRQ_DEDICATED_ALLOCATED | flag;
0184
0185 return err;
0186
0187 err_free_irq:
0188 free_irq(irq, wirq);
0189 err_free_name:
0190 kfree(wirq->name);
0191 err_free:
0192 kfree(wirq);
0193
0194 return err;
0195 }
0196
0197
0198
0199
0200
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210
0211
0212
0213
0214
0215 int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
0216 {
0217 return __dev_pm_set_dedicated_wake_irq(dev, irq, 0);
0218 }
0219 EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq);
0220
0221
0222
0223
0224
0225
0226
0227
0228
0229
0230
0231
0232
0233
0234
0235
0236
0237
0238
0239
0240
0241 int dev_pm_set_dedicated_wake_irq_reverse(struct device *dev, int irq)
0242 {
0243 return __dev_pm_set_dedicated_wake_irq(dev, irq, WAKE_IRQ_DEDICATED_REVERSE);
0244 }
0245 EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq_reverse);
0246
0247
0248
0249
0250
0251
0252
0253
0254
0255
0256
0257
0258
0259 void dev_pm_enable_wake_irq(struct device *dev)
0260 {
0261 struct wake_irq *wirq = dev->power.wakeirq;
0262
0263 if (wirq && (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED))
0264 enable_irq(wirq->irq);
0265 }
0266 EXPORT_SYMBOL_GPL(dev_pm_enable_wake_irq);
0267
0268
0269
0270
0271
0272
0273
0274
0275
0276 void dev_pm_disable_wake_irq(struct device *dev)
0277 {
0278 struct wake_irq *wirq = dev->power.wakeirq;
0279
0280 if (wirq && (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED))
0281 disable_irq_nosync(wirq->irq);
0282 }
0283 EXPORT_SYMBOL_GPL(dev_pm_disable_wake_irq);
0284
0285
0286
0287
0288
0289
0290
0291
0292
0293
0294
0295
0296
0297
0298
0299 void dev_pm_enable_wake_irq_check(struct device *dev,
0300 bool can_change_status)
0301 {
0302 struct wake_irq *wirq = dev->power.wakeirq;
0303
0304 if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK))
0305 return;
0306
0307 if (likely(wirq->status & WAKE_IRQ_DEDICATED_MANAGED)) {
0308 goto enable;
0309 } else if (can_change_status) {
0310 wirq->status |= WAKE_IRQ_DEDICATED_MANAGED;
0311 goto enable;
0312 }
0313
0314 return;
0315
0316 enable:
0317 if (!can_change_status || !(wirq->status & WAKE_IRQ_DEDICATED_REVERSE))
0318 enable_irq(wirq->irq);
0319 }
0320
0321
0322
0323
0324
0325
0326
0327
0328
0329 void dev_pm_disable_wake_irq_check(struct device *dev, bool cond_disable)
0330 {
0331 struct wake_irq *wirq = dev->power.wakeirq;
0332
0333 if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK))
0334 return;
0335
0336 if (cond_disable && (wirq->status & WAKE_IRQ_DEDICATED_REVERSE))
0337 return;
0338
0339 if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED)
0340 disable_irq_nosync(wirq->irq);
0341 }
0342
0343
0344
0345
0346
0347
0348
0349
0350
0351
0352
0353 void dev_pm_enable_wake_irq_complete(struct device *dev)
0354 {
0355 struct wake_irq *wirq = dev->power.wakeirq;
0356
0357 if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK))
0358 return;
0359
0360 if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED &&
0361 wirq->status & WAKE_IRQ_DEDICATED_REVERSE)
0362 enable_irq(wirq->irq);
0363 }
0364
0365
0366
0367
0368
0369
0370
0371
0372 void dev_pm_arm_wake_irq(struct wake_irq *wirq)
0373 {
0374 if (!wirq)
0375 return;
0376
0377 if (device_may_wakeup(wirq->dev)) {
0378 if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED &&
0379 !pm_runtime_status_suspended(wirq->dev))
0380 enable_irq(wirq->irq);
0381
0382 enable_irq_wake(wirq->irq);
0383 }
0384 }
0385
0386
0387
0388
0389
0390
0391
0392
0393 void dev_pm_disarm_wake_irq(struct wake_irq *wirq)
0394 {
0395 if (!wirq)
0396 return;
0397
0398 if (device_may_wakeup(wirq->dev)) {
0399 disable_irq_wake(wirq->irq);
0400
0401 if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED &&
0402 !pm_runtime_status_suspended(wirq->dev))
0403 disable_irq_nosync(wirq->irq);
0404 }
0405 }