0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/device.h>
0011 #include <linux/err.h>
0012 #include <linux/interrupt.h>
0013 #include <linux/kernel.h>
0014 #include <linux/reboot.h>
0015 #include <linux/regmap.h>
0016 #include <linux/slab.h>
0017 #include <linux/spinlock.h>
0018 #include <linux/regulator/driver.h>
0019
0020 #include "internal.h"
0021
0022 #define REGULATOR_FORCED_SAFETY_SHUTDOWN_WAIT_MS 10000
0023
0024 struct regulator_irq {
0025 struct regulator_irq_data rdata;
0026 struct regulator_irq_desc desc;
0027 int irq;
0028 int retry_cnt;
0029 struct delayed_work isr_work;
0030 };
0031
0032
0033
0034
0035 static void rdev_flag_err(struct regulator_dev *rdev, int err)
0036 {
0037 spin_lock(&rdev->err_lock);
0038 rdev->cached_err |= err;
0039 spin_unlock(&rdev->err_lock);
0040 }
0041
0042 static void rdev_clear_err(struct regulator_dev *rdev, int err)
0043 {
0044 spin_lock(&rdev->err_lock);
0045 rdev->cached_err &= ~err;
0046 spin_unlock(&rdev->err_lock);
0047 }
0048
0049 static void regulator_notifier_isr_work(struct work_struct *work)
0050 {
0051 struct regulator_irq *h;
0052 struct regulator_irq_desc *d;
0053 struct regulator_irq_data *rid;
0054 int ret = 0;
0055 int tmo, i;
0056 int num_rdevs;
0057
0058 h = container_of(work, struct regulator_irq,
0059 isr_work.work);
0060 d = &h->desc;
0061 rid = &h->rdata;
0062 num_rdevs = rid->num_states;
0063
0064 reread:
0065 if (d->fatal_cnt && h->retry_cnt > d->fatal_cnt) {
0066 if (!d->die)
0067 return hw_protection_shutdown("Regulator HW failure? - no IC recovery",
0068 REGULATOR_FORCED_SAFETY_SHUTDOWN_WAIT_MS);
0069 ret = d->die(rid);
0070
0071
0072
0073
0074 if (ret)
0075 return hw_protection_shutdown("Regulator HW failure. IC recovery failed",
0076 REGULATOR_FORCED_SAFETY_SHUTDOWN_WAIT_MS);
0077
0078
0079
0080
0081
0082
0083 goto enable_out;
0084 }
0085 if (d->renable) {
0086 ret = d->renable(rid);
0087
0088 if (ret == REGULATOR_FAILED_RETRY) {
0089
0090 h->retry_cnt++;
0091 if (!d->reread_ms)
0092 goto reread;
0093
0094 tmo = d->reread_ms;
0095 goto reschedule;
0096 }
0097
0098 if (ret) {
0099
0100
0101
0102
0103 for (i = 0; i < num_rdevs; i++) {
0104 struct regulator_err_state *stat;
0105 struct regulator_dev *rdev;
0106
0107 stat = &rid->states[i];
0108 rdev = stat->rdev;
0109 rdev_clear_err(rdev, (~stat->errors) &
0110 stat->possible_errs);
0111 }
0112 h->retry_cnt++;
0113
0114
0115
0116
0117 tmo = d->irq_off_ms;
0118 goto reschedule;
0119 }
0120 }
0121
0122
0123
0124
0125
0126
0127
0128 for (i = 0; i < num_rdevs; i++) {
0129 struct regulator_err_state *stat;
0130 struct regulator_dev *rdev;
0131
0132 stat = &rid->states[i];
0133 rdev = stat->rdev;
0134 rdev_clear_err(rdev, stat->possible_errs);
0135 }
0136
0137
0138
0139
0140 h->retry_cnt = 0;
0141
0142 enable_out:
0143 enable_irq(h->irq);
0144
0145 return;
0146
0147 reschedule:
0148 if (!d->high_prio)
0149 mod_delayed_work(system_wq, &h->isr_work,
0150 msecs_to_jiffies(tmo));
0151 else
0152 mod_delayed_work(system_highpri_wq, &h->isr_work,
0153 msecs_to_jiffies(tmo));
0154 }
0155
0156 static irqreturn_t regulator_notifier_isr(int irq, void *data)
0157 {
0158 struct regulator_irq *h = data;
0159 struct regulator_irq_desc *d;
0160 struct regulator_irq_data *rid;
0161 unsigned long rdev_map = 0;
0162 int num_rdevs;
0163 int ret, i;
0164
0165 d = &h->desc;
0166 rid = &h->rdata;
0167 num_rdevs = rid->num_states;
0168
0169 if (d->fatal_cnt)
0170 h->retry_cnt++;
0171
0172
0173
0174
0175
0176
0177
0178
0179 ret = d->map_event(irq, rid, &rdev_map);
0180
0181
0182
0183
0184
0185
0186
0187
0188
0189
0190
0191
0192
0193
0194 if (unlikely(ret == REGULATOR_FAILED_RETRY))
0195 goto fail_out;
0196
0197 h->retry_cnt = 0;
0198
0199
0200
0201
0202 if (ret || !rdev_map)
0203 return IRQ_NONE;
0204
0205
0206
0207
0208
0209 if (d->skip_off) {
0210 for_each_set_bit(i, &rdev_map, num_rdevs) {
0211 struct regulator_dev *rdev;
0212 const struct regulator_ops *ops;
0213
0214 rdev = rid->states[i].rdev;
0215 ops = rdev->desc->ops;
0216
0217
0218
0219
0220
0221 if (ops->is_enabled(rdev))
0222 break;
0223 }
0224 if (i == num_rdevs)
0225 return IRQ_NONE;
0226 }
0227
0228
0229 if (d->irq_off_ms)
0230 disable_irq_nosync(irq);
0231
0232
0233
0234
0235
0236 for_each_set_bit(i, &rdev_map, num_rdevs) {
0237 struct regulator_err_state *stat;
0238 struct regulator_dev *rdev;
0239
0240 stat = &rid->states[i];
0241 rdev = stat->rdev;
0242
0243 rdev_dbg(rdev, "Sending regulator notification EVT 0x%lx\n",
0244 stat->notifs);
0245
0246 regulator_notifier_call_chain(rdev, stat->notifs, NULL);
0247 rdev_flag_err(rdev, stat->errors);
0248 }
0249
0250 if (d->irq_off_ms) {
0251 if (!d->high_prio)
0252 schedule_delayed_work(&h->isr_work,
0253 msecs_to_jiffies(d->irq_off_ms));
0254 else
0255 mod_delayed_work(system_highpri_wq,
0256 &h->isr_work,
0257 msecs_to_jiffies(d->irq_off_ms));
0258 }
0259
0260 return IRQ_HANDLED;
0261
0262 fail_out:
0263 if (d->fatal_cnt && h->retry_cnt > d->fatal_cnt) {
0264
0265 if (!d->die) {
0266 hw_protection_shutdown("Regulator failure. Retry count exceeded",
0267 REGULATOR_FORCED_SAFETY_SHUTDOWN_WAIT_MS);
0268 } else {
0269 ret = d->die(rid);
0270
0271 if (ret)
0272 hw_protection_shutdown("Regulator failure. Recovery failed",
0273 REGULATOR_FORCED_SAFETY_SHUTDOWN_WAIT_MS);
0274 }
0275 }
0276
0277 return IRQ_NONE;
0278 }
0279
0280 static int init_rdev_state(struct device *dev, struct regulator_irq *h,
0281 struct regulator_dev **rdev, int common_err,
0282 int *rdev_err, int rdev_amount)
0283 {
0284 int i;
0285
0286 h->rdata.states = devm_kzalloc(dev, sizeof(*h->rdata.states) *
0287 rdev_amount, GFP_KERNEL);
0288 if (!h->rdata.states)
0289 return -ENOMEM;
0290
0291 h->rdata.num_states = rdev_amount;
0292 h->rdata.data = h->desc.data;
0293
0294 for (i = 0; i < rdev_amount; i++) {
0295 h->rdata.states[i].possible_errs = common_err;
0296 if (rdev_err)
0297 h->rdata.states[i].possible_errs |= *rdev_err++;
0298 h->rdata.states[i].rdev = *rdev++;
0299 }
0300
0301 return 0;
0302 }
0303
0304 static void init_rdev_errors(struct regulator_irq *h)
0305 {
0306 int i;
0307
0308 for (i = 0; i < h->rdata.num_states; i++)
0309 if (h->rdata.states[i].possible_errs)
0310 h->rdata.states[i].rdev->use_cached_err = true;
0311 }
0312
0313
0314
0315
0316
0317
0318
0319
0320
0321
0322
0323
0324
0325
0326
0327
0328
0329
0330
0331
0332
0333
0334
0335
0336
0337
0338 void *regulator_irq_helper(struct device *dev,
0339 const struct regulator_irq_desc *d, int irq,
0340 int irq_flags, int common_errs, int *per_rdev_errs,
0341 struct regulator_dev **rdev, int rdev_amount)
0342 {
0343 struct regulator_irq *h;
0344 int ret;
0345
0346 if (!rdev_amount || !d || !d->map_event || !d->name)
0347 return ERR_PTR(-EINVAL);
0348
0349 h = devm_kzalloc(dev, sizeof(*h), GFP_KERNEL);
0350 if (!h)
0351 return ERR_PTR(-ENOMEM);
0352
0353 h->irq = irq;
0354 h->desc = *d;
0355
0356 ret = init_rdev_state(dev, h, rdev, common_errs, per_rdev_errs,
0357 rdev_amount);
0358 if (ret)
0359 return ERR_PTR(ret);
0360
0361 init_rdev_errors(h);
0362
0363 if (h->desc.irq_off_ms)
0364 INIT_DELAYED_WORK(&h->isr_work, regulator_notifier_isr_work);
0365
0366 ret = request_threaded_irq(h->irq, NULL, regulator_notifier_isr,
0367 IRQF_ONESHOT | irq_flags, h->desc.name, h);
0368 if (ret) {
0369 dev_err(dev, "Failed to request IRQ %d\n", irq);
0370
0371 return ERR_PTR(ret);
0372 }
0373
0374 return h;
0375 }
0376 EXPORT_SYMBOL_GPL(regulator_irq_helper);
0377
0378
0379
0380
0381
0382
0383
0384
0385
0386
0387 void regulator_irq_helper_cancel(void **handle)
0388 {
0389 if (handle && *handle) {
0390 struct regulator_irq *h = *handle;
0391
0392 free_irq(h->irq, h);
0393 if (h->desc.irq_off_ms)
0394 cancel_delayed_work_sync(&h->isr_work);
0395
0396 h = NULL;
0397 }
0398 }
0399 EXPORT_SYMBOL_GPL(regulator_irq_helper_cancel);
0400
0401
0402
0403
0404
0405
0406
0407
0408
0409
0410
0411
0412
0413
0414
0415 int regulator_irq_map_event_simple(int irq, struct regulator_irq_data *rid,
0416 unsigned long *dev_mask)
0417 {
0418 int err = rid->states[0].possible_errs;
0419
0420 *dev_mask = 1;
0421
0422
0423
0424
0425
0426
0427 if (WARN_ON(rid->num_states != 1 || hweight32(err) != 1))
0428 return 0;
0429
0430 rid->states[0].errors = err;
0431 rid->states[0].notifs = regulator_err2notif(err);
0432
0433 return 0;
0434 }
0435 EXPORT_SYMBOL_GPL(regulator_irq_map_event_simple);
0436