0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/device.h>
0010 #include <linux/export.h>
0011 #include <linux/interrupt.h>
0012 #include <linux/irq.h>
0013 #include <linux/irqdomain.h>
0014 #include <linux/pm_runtime.h>
0015 #include <linux/regmap.h>
0016 #include <linux/slab.h>
0017
0018 #include "internal.h"
0019
0020 struct regmap_irq_chip_data {
0021 struct mutex lock;
0022 struct irq_chip irq_chip;
0023
0024 struct regmap *map;
0025 const struct regmap_irq_chip *chip;
0026
0027 int irq_base;
0028 struct irq_domain *domain;
0029
0030 int irq;
0031 int wake_count;
0032
0033 unsigned int mask_base;
0034 unsigned int unmask_base;
0035
0036 void *status_reg_buf;
0037 unsigned int *main_status_buf;
0038 unsigned int *status_buf;
0039 unsigned int *mask_buf;
0040 unsigned int *mask_buf_def;
0041 unsigned int *wake_buf;
0042 unsigned int *type_buf;
0043 unsigned int *type_buf_def;
0044 unsigned int **virt_buf;
0045 unsigned int **config_buf;
0046
0047 unsigned int irq_reg_stride;
0048
0049 unsigned int (*get_irq_reg)(struct regmap_irq_chip_data *data,
0050 unsigned int base, int index);
0051
0052 unsigned int clear_status:1;
0053 };
0054
0055 static inline const
0056 struct regmap_irq *irq_to_regmap_irq(struct regmap_irq_chip_data *data,
0057 int irq)
0058 {
0059 return &data->chip->irqs[irq];
0060 }
0061
0062 static bool regmap_irq_can_bulk_read_status(struct regmap_irq_chip_data *data)
0063 {
0064 struct regmap *map = data->map;
0065
0066
0067
0068
0069
0070
0071 return data->irq_reg_stride == 1 && map->reg_stride == 1 &&
0072 data->get_irq_reg == regmap_irq_get_irq_reg_linear &&
0073 !map->use_single_read;
0074 }
0075
0076 static void regmap_irq_lock(struct irq_data *data)
0077 {
0078 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
0079
0080 mutex_lock(&d->lock);
0081 }
0082
0083 static void regmap_irq_sync_unlock(struct irq_data *data)
0084 {
0085 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
0086 struct regmap *map = d->map;
0087 int i, j, ret;
0088 u32 reg;
0089 u32 val;
0090
0091 if (d->chip->runtime_pm) {
0092 ret = pm_runtime_get_sync(map->dev);
0093 if (ret < 0)
0094 dev_err(map->dev, "IRQ sync failed to resume: %d\n",
0095 ret);
0096 }
0097
0098 if (d->clear_status) {
0099 for (i = 0; i < d->chip->num_regs; i++) {
0100 reg = d->get_irq_reg(d, d->chip->status_base, i);
0101
0102 ret = regmap_read(map, reg, &val);
0103 if (ret)
0104 dev_err(d->map->dev,
0105 "Failed to clear the interrupt status bits\n");
0106 }
0107
0108 d->clear_status = false;
0109 }
0110
0111
0112
0113
0114
0115
0116 for (i = 0; i < d->chip->num_regs; i++) {
0117 if (d->mask_base) {
0118 reg = d->get_irq_reg(d, d->mask_base, i);
0119 ret = regmap_update_bits(d->map, reg,
0120 d->mask_buf_def[i], d->mask_buf[i]);
0121 if (ret)
0122 dev_err(d->map->dev, "Failed to sync masks in %x\n",
0123 reg);
0124 }
0125
0126 if (d->unmask_base) {
0127 reg = d->get_irq_reg(d, d->unmask_base, i);
0128 ret = regmap_update_bits(d->map, reg,
0129 d->mask_buf_def[i], ~d->mask_buf[i]);
0130 if (ret)
0131 dev_err(d->map->dev, "Failed to sync masks in %x\n",
0132 reg);
0133 }
0134
0135 reg = d->get_irq_reg(d, d->chip->wake_base, i);
0136 if (d->wake_buf) {
0137 if (d->chip->wake_invert)
0138 ret = regmap_update_bits(d->map, reg,
0139 d->mask_buf_def[i],
0140 ~d->wake_buf[i]);
0141 else
0142 ret = regmap_update_bits(d->map, reg,
0143 d->mask_buf_def[i],
0144 d->wake_buf[i]);
0145 if (ret != 0)
0146 dev_err(d->map->dev,
0147 "Failed to sync wakes in %x: %d\n",
0148 reg, ret);
0149 }
0150
0151 if (!d->chip->init_ack_masked)
0152 continue;
0153
0154
0155
0156
0157
0158 if (d->mask_buf[i] && (d->chip->ack_base || d->chip->use_ack)) {
0159 reg = d->get_irq_reg(d, d->chip->ack_base, i);
0160
0161
0162 if (d->chip->ack_invert)
0163 ret = regmap_write(map, reg, ~d->mask_buf[i]);
0164 else
0165 ret = regmap_write(map, reg, d->mask_buf[i]);
0166 if (d->chip->clear_ack) {
0167 if (d->chip->ack_invert && !ret)
0168 ret = regmap_write(map, reg, UINT_MAX);
0169 else if (!ret)
0170 ret = regmap_write(map, reg, 0);
0171 }
0172 if (ret != 0)
0173 dev_err(d->map->dev, "Failed to ack 0x%x: %d\n",
0174 reg, ret);
0175 }
0176 }
0177
0178
0179 if (!d->chip->type_in_mask) {
0180 for (i = 0; i < d->chip->num_type_reg; i++) {
0181 if (!d->type_buf_def[i])
0182 continue;
0183 reg = d->get_irq_reg(d, d->chip->type_base, i);
0184 if (d->chip->type_invert)
0185 ret = regmap_update_bits(d->map, reg,
0186 d->type_buf_def[i], ~d->type_buf[i]);
0187 else
0188 ret = regmap_update_bits(d->map, reg,
0189 d->type_buf_def[i], d->type_buf[i]);
0190 if (ret != 0)
0191 dev_err(d->map->dev, "Failed to sync type in %x\n",
0192 reg);
0193 }
0194 }
0195
0196 if (d->chip->num_virt_regs) {
0197 for (i = 0; i < d->chip->num_virt_regs; i++) {
0198 for (j = 0; j < d->chip->num_regs; j++) {
0199 reg = d->get_irq_reg(d, d->chip->virt_reg_base[i],
0200 j);
0201 ret = regmap_write(map, reg, d->virt_buf[i][j]);
0202 if (ret != 0)
0203 dev_err(d->map->dev,
0204 "Failed to write virt 0x%x: %d\n",
0205 reg, ret);
0206 }
0207 }
0208 }
0209
0210 for (i = 0; i < d->chip->num_config_bases; i++) {
0211 for (j = 0; j < d->chip->num_config_regs; j++) {
0212 reg = d->get_irq_reg(d, d->chip->config_base[i], j);
0213 ret = regmap_write(map, reg, d->config_buf[i][j]);
0214 if (ret)
0215 dev_err(d->map->dev,
0216 "Failed to write config %x: %d\n",
0217 reg, ret);
0218 }
0219 }
0220
0221 if (d->chip->runtime_pm)
0222 pm_runtime_put(map->dev);
0223
0224
0225 if (d->wake_count < 0)
0226 for (i = d->wake_count; i < 0; i++)
0227 irq_set_irq_wake(d->irq, 0);
0228 else if (d->wake_count > 0)
0229 for (i = 0; i < d->wake_count; i++)
0230 irq_set_irq_wake(d->irq, 1);
0231
0232 d->wake_count = 0;
0233
0234 mutex_unlock(&d->lock);
0235 }
0236
0237 static void regmap_irq_enable(struct irq_data *data)
0238 {
0239 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
0240 struct regmap *map = d->map;
0241 const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
0242 unsigned int reg = irq_data->reg_offset / map->reg_stride;
0243 unsigned int mask;
0244
0245
0246
0247
0248
0249
0250
0251
0252
0253
0254
0255 if (d->chip->type_in_mask && irq_data->type.types_supported)
0256 mask = d->type_buf[reg] & irq_data->mask;
0257 else
0258 mask = irq_data->mask;
0259
0260 if (d->chip->clear_on_unmask)
0261 d->clear_status = true;
0262
0263 d->mask_buf[reg] &= ~mask;
0264 }
0265
0266 static void regmap_irq_disable(struct irq_data *data)
0267 {
0268 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
0269 struct regmap *map = d->map;
0270 const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
0271
0272 d->mask_buf[irq_data->reg_offset / map->reg_stride] |= irq_data->mask;
0273 }
0274
0275 static int regmap_irq_set_type(struct irq_data *data, unsigned int type)
0276 {
0277 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
0278 struct regmap *map = d->map;
0279 const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
0280 int reg, ret;
0281 const struct regmap_irq_type *t = &irq_data->type;
0282
0283 if ((t->types_supported & type) != type)
0284 return 0;
0285
0286 reg = t->type_reg_offset / map->reg_stride;
0287
0288 if (t->type_reg_mask)
0289 d->type_buf[reg] &= ~t->type_reg_mask;
0290 else
0291 d->type_buf[reg] &= ~(t->type_falling_val |
0292 t->type_rising_val |
0293 t->type_level_low_val |
0294 t->type_level_high_val);
0295 switch (type) {
0296 case IRQ_TYPE_EDGE_FALLING:
0297 d->type_buf[reg] |= t->type_falling_val;
0298 break;
0299
0300 case IRQ_TYPE_EDGE_RISING:
0301 d->type_buf[reg] |= t->type_rising_val;
0302 break;
0303
0304 case IRQ_TYPE_EDGE_BOTH:
0305 d->type_buf[reg] |= (t->type_falling_val |
0306 t->type_rising_val);
0307 break;
0308
0309 case IRQ_TYPE_LEVEL_HIGH:
0310 d->type_buf[reg] |= t->type_level_high_val;
0311 break;
0312
0313 case IRQ_TYPE_LEVEL_LOW:
0314 d->type_buf[reg] |= t->type_level_low_val;
0315 break;
0316 default:
0317 return -EINVAL;
0318 }
0319
0320 if (d->chip->set_type_virt) {
0321 ret = d->chip->set_type_virt(d->virt_buf, type, data->hwirq,
0322 reg);
0323 if (ret)
0324 return ret;
0325 }
0326
0327 if (d->chip->set_type_config) {
0328 ret = d->chip->set_type_config(d->config_buf, type,
0329 irq_data, reg);
0330 if (ret)
0331 return ret;
0332 }
0333
0334 return 0;
0335 }
0336
0337 static int regmap_irq_set_wake(struct irq_data *data, unsigned int on)
0338 {
0339 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
0340 struct regmap *map = d->map;
0341 const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
0342
0343 if (on) {
0344 if (d->wake_buf)
0345 d->wake_buf[irq_data->reg_offset / map->reg_stride]
0346 &= ~irq_data->mask;
0347 d->wake_count++;
0348 } else {
0349 if (d->wake_buf)
0350 d->wake_buf[irq_data->reg_offset / map->reg_stride]
0351 |= irq_data->mask;
0352 d->wake_count--;
0353 }
0354
0355 return 0;
0356 }
0357
0358 static const struct irq_chip regmap_irq_chip = {
0359 .irq_bus_lock = regmap_irq_lock,
0360 .irq_bus_sync_unlock = regmap_irq_sync_unlock,
0361 .irq_disable = regmap_irq_disable,
0362 .irq_enable = regmap_irq_enable,
0363 .irq_set_type = regmap_irq_set_type,
0364 .irq_set_wake = regmap_irq_set_wake,
0365 };
0366
0367 static inline int read_sub_irq_data(struct regmap_irq_chip_data *data,
0368 unsigned int b)
0369 {
0370 const struct regmap_irq_chip *chip = data->chip;
0371 struct regmap *map = data->map;
0372 struct regmap_irq_sub_irq_map *subreg;
0373 unsigned int reg;
0374 int i, ret = 0;
0375
0376 if (!chip->sub_reg_offsets) {
0377 reg = data->get_irq_reg(data, chip->status_base, b);
0378 ret = regmap_read(map, reg, &data->status_buf[b]);
0379 } else {
0380
0381
0382
0383
0384 subreg = &chip->sub_reg_offsets[b];
0385 for (i = 0; i < subreg->num_regs; i++) {
0386 unsigned int offset = subreg->offset[i];
0387 unsigned int index = offset / map->reg_stride;
0388
0389 if (chip->not_fixed_stride)
0390 ret = regmap_read(map,
0391 chip->status_base + offset,
0392 &data->status_buf[b]);
0393 else
0394 ret = regmap_read(map,
0395 chip->status_base + offset,
0396 &data->status_buf[index]);
0397
0398 if (ret)
0399 break;
0400 }
0401 }
0402 return ret;
0403 }
0404
0405 static irqreturn_t regmap_irq_thread(int irq, void *d)
0406 {
0407 struct regmap_irq_chip_data *data = d;
0408 const struct regmap_irq_chip *chip = data->chip;
0409 struct regmap *map = data->map;
0410 int ret, i;
0411 bool handled = false;
0412 u32 reg;
0413
0414 if (chip->handle_pre_irq)
0415 chip->handle_pre_irq(chip->irq_drv_data);
0416
0417 if (chip->runtime_pm) {
0418 ret = pm_runtime_get_sync(map->dev);
0419 if (ret < 0) {
0420 dev_err(map->dev, "IRQ thread failed to resume: %d\n",
0421 ret);
0422 goto exit;
0423 }
0424 }
0425
0426
0427
0428
0429
0430
0431
0432 if (chip->num_main_regs) {
0433 unsigned int max_main_bits;
0434 unsigned long size;
0435
0436 size = chip->num_regs * sizeof(unsigned int);
0437
0438 max_main_bits = (chip->num_main_status_bits) ?
0439 chip->num_main_status_bits : chip->num_regs;
0440
0441 memset(data->status_buf, 0, size);
0442
0443
0444
0445
0446
0447
0448 for (i = 0; i < chip->num_main_regs; i++) {
0449
0450
0451
0452
0453 if (data->chip->not_fixed_stride)
0454 reg = chip->main_status +
0455 i * map->reg_stride * data->irq_reg_stride;
0456 else
0457 reg = data->get_irq_reg(data,
0458 chip->main_status, i);
0459
0460 ret = regmap_read(map, reg, &data->main_status_buf[i]);
0461 if (ret) {
0462 dev_err(map->dev,
0463 "Failed to read IRQ status %d\n",
0464 ret);
0465 goto exit;
0466 }
0467 }
0468
0469
0470 for (i = 0; i < chip->num_main_regs; i++) {
0471 unsigned int b;
0472 const unsigned long mreg = data->main_status_buf[i];
0473
0474 for_each_set_bit(b, &mreg, map->format.val_bytes * 8) {
0475 if (i * map->format.val_bytes * 8 + b >
0476 max_main_bits)
0477 break;
0478 ret = read_sub_irq_data(data, b);
0479
0480 if (ret != 0) {
0481 dev_err(map->dev,
0482 "Failed to read IRQ status %d\n",
0483 ret);
0484 goto exit;
0485 }
0486 }
0487
0488 }
0489 } else if (regmap_irq_can_bulk_read_status(data)) {
0490
0491 u8 *buf8 = data->status_reg_buf;
0492 u16 *buf16 = data->status_reg_buf;
0493 u32 *buf32 = data->status_reg_buf;
0494
0495 BUG_ON(!data->status_reg_buf);
0496
0497 ret = regmap_bulk_read(map, chip->status_base,
0498 data->status_reg_buf,
0499 chip->num_regs);
0500 if (ret != 0) {
0501 dev_err(map->dev, "Failed to read IRQ status: %d\n",
0502 ret);
0503 goto exit;
0504 }
0505
0506 for (i = 0; i < data->chip->num_regs; i++) {
0507 switch (map->format.val_bytes) {
0508 case 1:
0509 data->status_buf[i] = buf8[i];
0510 break;
0511 case 2:
0512 data->status_buf[i] = buf16[i];
0513 break;
0514 case 4:
0515 data->status_buf[i] = buf32[i];
0516 break;
0517 default:
0518 BUG();
0519 goto exit;
0520 }
0521 }
0522
0523 } else {
0524 for (i = 0; i < data->chip->num_regs; i++) {
0525 unsigned int reg = data->get_irq_reg(data,
0526 data->chip->status_base, i);
0527 ret = regmap_read(map, reg, &data->status_buf[i]);
0528
0529 if (ret != 0) {
0530 dev_err(map->dev,
0531 "Failed to read IRQ status: %d\n",
0532 ret);
0533 goto exit;
0534 }
0535 }
0536 }
0537
0538 if (chip->status_invert)
0539 for (i = 0; i < data->chip->num_regs; i++)
0540 data->status_buf[i] = ~data->status_buf[i];
0541
0542
0543
0544
0545
0546
0547
0548
0549 for (i = 0; i < data->chip->num_regs; i++) {
0550 data->status_buf[i] &= ~data->mask_buf[i];
0551
0552 if (data->status_buf[i] && (chip->ack_base || chip->use_ack)) {
0553 reg = data->get_irq_reg(data, data->chip->ack_base, i);
0554
0555 if (chip->ack_invert)
0556 ret = regmap_write(map, reg,
0557 ~data->status_buf[i]);
0558 else
0559 ret = regmap_write(map, reg,
0560 data->status_buf[i]);
0561 if (chip->clear_ack) {
0562 if (chip->ack_invert && !ret)
0563 ret = regmap_write(map, reg, UINT_MAX);
0564 else if (!ret)
0565 ret = regmap_write(map, reg, 0);
0566 }
0567 if (ret != 0)
0568 dev_err(map->dev, "Failed to ack 0x%x: %d\n",
0569 reg, ret);
0570 }
0571 }
0572
0573 for (i = 0; i < chip->num_irqs; i++) {
0574 if (data->status_buf[chip->irqs[i].reg_offset /
0575 map->reg_stride] & chip->irqs[i].mask) {
0576 handle_nested_irq(irq_find_mapping(data->domain, i));
0577 handled = true;
0578 }
0579 }
0580
0581 exit:
0582 if (chip->runtime_pm)
0583 pm_runtime_put(map->dev);
0584
0585 if (chip->handle_post_irq)
0586 chip->handle_post_irq(chip->irq_drv_data);
0587
0588 if (handled)
0589 return IRQ_HANDLED;
0590 else
0591 return IRQ_NONE;
0592 }
0593
0594 static int regmap_irq_map(struct irq_domain *h, unsigned int virq,
0595 irq_hw_number_t hw)
0596 {
0597 struct regmap_irq_chip_data *data = h->host_data;
0598
0599 irq_set_chip_data(virq, data);
0600 irq_set_chip(virq, &data->irq_chip);
0601 irq_set_nested_thread(virq, 1);
0602 irq_set_parent(virq, data->irq);
0603 irq_set_noprobe(virq);
0604
0605 return 0;
0606 }
0607
0608 static const struct irq_domain_ops regmap_domain_ops = {
0609 .map = regmap_irq_map,
0610 .xlate = irq_domain_xlate_onetwocell,
0611 };
0612
0613
0614
0615
0616
0617
0618
0619
0620
0621
0622 unsigned int regmap_irq_get_irq_reg_linear(struct regmap_irq_chip_data *data,
0623 unsigned int base, int index)
0624 {
0625 const struct regmap_irq_chip *chip = data->chip;
0626 struct regmap *map = data->map;
0627
0628
0629
0630
0631
0632 if (chip->not_fixed_stride && chip->sub_reg_offsets) {
0633 struct regmap_irq_sub_irq_map *subreg;
0634
0635 subreg = &chip->sub_reg_offsets[0];
0636 return base + subreg->offset[0];
0637 }
0638
0639 return base + index * map->reg_stride * data->irq_reg_stride;
0640 }
0641 EXPORT_SYMBOL_GPL(regmap_irq_get_irq_reg_linear);
0642
0643
0644
0645
0646
0647
0648
0649
0650
0651
0652
0653
0654
0655 int regmap_irq_set_type_config_simple(unsigned int **buf, unsigned int type,
0656 const struct regmap_irq *irq_data, int idx)
0657 {
0658 const struct regmap_irq_type *t = &irq_data->type;
0659
0660 if (t->type_reg_mask)
0661 buf[0][idx] &= ~t->type_reg_mask;
0662 else
0663 buf[0][idx] &= ~(t->type_falling_val |
0664 t->type_rising_val |
0665 t->type_level_low_val |
0666 t->type_level_high_val);
0667
0668 switch (type) {
0669 case IRQ_TYPE_EDGE_FALLING:
0670 buf[0][idx] |= t->type_falling_val;
0671 break;
0672
0673 case IRQ_TYPE_EDGE_RISING:
0674 buf[0][idx] |= t->type_rising_val;
0675 break;
0676
0677 case IRQ_TYPE_EDGE_BOTH:
0678 buf[0][idx] |= (t->type_falling_val |
0679 t->type_rising_val);
0680 break;
0681
0682 case IRQ_TYPE_LEVEL_HIGH:
0683 buf[0][idx] |= t->type_level_high_val;
0684 break;
0685
0686 case IRQ_TYPE_LEVEL_LOW:
0687 buf[0][idx] |= t->type_level_low_val;
0688 break;
0689
0690 default:
0691 return -EINVAL;
0692 }
0693
0694 return 0;
0695 }
0696 EXPORT_SYMBOL_GPL(regmap_irq_set_type_config_simple);
0697
0698
0699
0700
0701
0702
0703
0704
0705
0706
0707
0708
0709
0710
0711
0712
0713
0714
0715 int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
0716 struct regmap *map, int irq,
0717 int irq_flags, int irq_base,
0718 const struct regmap_irq_chip *chip,
0719 struct regmap_irq_chip_data **data)
0720 {
0721 struct regmap_irq_chip_data *d;
0722 int i;
0723 int ret = -ENOMEM;
0724 int num_type_reg;
0725 u32 reg;
0726
0727 if (chip->num_regs <= 0)
0728 return -EINVAL;
0729
0730 if (chip->clear_on_unmask && (chip->ack_base || chip->use_ack))
0731 return -EINVAL;
0732
0733 for (i = 0; i < chip->num_irqs; i++) {
0734 if (chip->irqs[i].reg_offset % map->reg_stride)
0735 return -EINVAL;
0736 if (chip->irqs[i].reg_offset / map->reg_stride >=
0737 chip->num_regs)
0738 return -EINVAL;
0739 }
0740
0741 if (chip->not_fixed_stride) {
0742 dev_warn(map->dev, "not_fixed_stride is deprecated; use ->get_irq_reg() instead");
0743
0744 for (i = 0; i < chip->num_regs; i++)
0745 if (chip->sub_reg_offsets[i].num_regs != 1)
0746 return -EINVAL;
0747 }
0748
0749 if (chip->num_type_reg)
0750 dev_warn(map->dev, "type registers are deprecated; use config registers instead");
0751
0752 if (chip->num_virt_regs || chip->virt_reg_base || chip->set_type_virt)
0753 dev_warn(map->dev, "virtual registers are deprecated; use config registers instead");
0754
0755 if (irq_base) {
0756 irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0);
0757 if (irq_base < 0) {
0758 dev_warn(map->dev, "Failed to allocate IRQs: %d\n",
0759 irq_base);
0760 return irq_base;
0761 }
0762 }
0763
0764 d = kzalloc(sizeof(*d), GFP_KERNEL);
0765 if (!d)
0766 return -ENOMEM;
0767
0768 if (chip->num_main_regs) {
0769 d->main_status_buf = kcalloc(chip->num_main_regs,
0770 sizeof(*d->main_status_buf),
0771 GFP_KERNEL);
0772
0773 if (!d->main_status_buf)
0774 goto err_alloc;
0775 }
0776
0777 d->status_buf = kcalloc(chip->num_regs, sizeof(*d->status_buf),
0778 GFP_KERNEL);
0779 if (!d->status_buf)
0780 goto err_alloc;
0781
0782 d->mask_buf = kcalloc(chip->num_regs, sizeof(*d->mask_buf),
0783 GFP_KERNEL);
0784 if (!d->mask_buf)
0785 goto err_alloc;
0786
0787 d->mask_buf_def = kcalloc(chip->num_regs, sizeof(*d->mask_buf_def),
0788 GFP_KERNEL);
0789 if (!d->mask_buf_def)
0790 goto err_alloc;
0791
0792 if (chip->wake_base) {
0793 d->wake_buf = kcalloc(chip->num_regs, sizeof(*d->wake_buf),
0794 GFP_KERNEL);
0795 if (!d->wake_buf)
0796 goto err_alloc;
0797 }
0798
0799 num_type_reg = chip->type_in_mask ? chip->num_regs : chip->num_type_reg;
0800 if (num_type_reg) {
0801 d->type_buf_def = kcalloc(num_type_reg,
0802 sizeof(*d->type_buf_def), GFP_KERNEL);
0803 if (!d->type_buf_def)
0804 goto err_alloc;
0805
0806 d->type_buf = kcalloc(num_type_reg, sizeof(*d->type_buf),
0807 GFP_KERNEL);
0808 if (!d->type_buf)
0809 goto err_alloc;
0810 }
0811
0812 if (chip->num_virt_regs) {
0813
0814
0815
0816 d->virt_buf = kcalloc(chip->num_virt_regs, sizeof(*d->virt_buf),
0817 GFP_KERNEL);
0818 if (!d->virt_buf)
0819 goto err_alloc;
0820
0821 for (i = 0; i < chip->num_virt_regs; i++) {
0822 d->virt_buf[i] = kcalloc(chip->num_regs,
0823 sizeof(**d->virt_buf),
0824 GFP_KERNEL);
0825 if (!d->virt_buf[i])
0826 goto err_alloc;
0827 }
0828 }
0829
0830 if (chip->num_config_bases && chip->num_config_regs) {
0831
0832
0833
0834 d->config_buf = kcalloc(chip->num_config_bases,
0835 sizeof(*d->config_buf), GFP_KERNEL);
0836 if (!d->config_buf)
0837 goto err_alloc;
0838
0839 for (i = 0; i < chip->num_config_regs; i++) {
0840 d->config_buf[i] = kcalloc(chip->num_config_regs,
0841 sizeof(**d->config_buf),
0842 GFP_KERNEL);
0843 if (!d->config_buf[i])
0844 goto err_alloc;
0845 }
0846 }
0847
0848 d->irq_chip = regmap_irq_chip;
0849 d->irq_chip.name = chip->name;
0850 d->irq = irq;
0851 d->map = map;
0852 d->chip = chip;
0853 d->irq_base = irq_base;
0854
0855 if (chip->mask_base && chip->unmask_base &&
0856 !chip->mask_unmask_non_inverted) {
0857
0858
0859
0860
0861
0862
0863
0864
0865
0866
0867
0868 dev_warn(map->dev, "mask_base and unmask_base are inverted, please fix it");
0869
0870
0871 if (chip->mask_invert)
0872 dev_warn(map->dev, "mask_invert=true ignored");
0873
0874 d->mask_base = chip->unmask_base;
0875 d->unmask_base = chip->mask_base;
0876 } else if (chip->mask_invert) {
0877
0878
0879
0880
0881
0882 dev_warn(map->dev, "mask_invert=true is deprecated; please switch to unmask_base");
0883
0884 d->mask_base = chip->unmask_base;
0885 d->unmask_base = chip->mask_base;
0886 } else {
0887 d->mask_base = chip->mask_base;
0888 d->unmask_base = chip->unmask_base;
0889 }
0890
0891 if (chip->irq_reg_stride)
0892 d->irq_reg_stride = chip->irq_reg_stride;
0893 else
0894 d->irq_reg_stride = 1;
0895
0896 if (chip->get_irq_reg)
0897 d->get_irq_reg = chip->get_irq_reg;
0898 else
0899 d->get_irq_reg = regmap_irq_get_irq_reg_linear;
0900
0901 if (regmap_irq_can_bulk_read_status(d)) {
0902 d->status_reg_buf = kmalloc_array(chip->num_regs,
0903 map->format.val_bytes,
0904 GFP_KERNEL);
0905 if (!d->status_reg_buf)
0906 goto err_alloc;
0907 }
0908
0909 mutex_init(&d->lock);
0910
0911 for (i = 0; i < chip->num_irqs; i++)
0912 d->mask_buf_def[chip->irqs[i].reg_offset / map->reg_stride]
0913 |= chip->irqs[i].mask;
0914
0915
0916 for (i = 0; i < chip->num_regs; i++) {
0917 d->mask_buf[i] = d->mask_buf_def[i];
0918
0919 if (d->mask_base) {
0920 reg = d->get_irq_reg(d, d->mask_base, i);
0921 ret = regmap_update_bits(d->map, reg,
0922 d->mask_buf_def[i], d->mask_buf[i]);
0923 if (ret) {
0924 dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
0925 reg, ret);
0926 goto err_alloc;
0927 }
0928 }
0929
0930 if (d->unmask_base) {
0931 reg = d->get_irq_reg(d, d->unmask_base, i);
0932 ret = regmap_update_bits(d->map, reg,
0933 d->mask_buf_def[i], ~d->mask_buf[i]);
0934 if (ret) {
0935 dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
0936 reg, ret);
0937 goto err_alloc;
0938 }
0939 }
0940
0941 if (!chip->init_ack_masked)
0942 continue;
0943
0944
0945 reg = d->get_irq_reg(d, d->chip->status_base, i);
0946 ret = regmap_read(map, reg, &d->status_buf[i]);
0947 if (ret != 0) {
0948 dev_err(map->dev, "Failed to read IRQ status: %d\n",
0949 ret);
0950 goto err_alloc;
0951 }
0952
0953 if (chip->status_invert)
0954 d->status_buf[i] = ~d->status_buf[i];
0955
0956 if (d->status_buf[i] && (chip->ack_base || chip->use_ack)) {
0957 reg = d->get_irq_reg(d, d->chip->ack_base, i);
0958 if (chip->ack_invert)
0959 ret = regmap_write(map, reg,
0960 ~(d->status_buf[i] & d->mask_buf[i]));
0961 else
0962 ret = regmap_write(map, reg,
0963 d->status_buf[i] & d->mask_buf[i]);
0964 if (chip->clear_ack) {
0965 if (chip->ack_invert && !ret)
0966 ret = regmap_write(map, reg, UINT_MAX);
0967 else if (!ret)
0968 ret = regmap_write(map, reg, 0);
0969 }
0970 if (ret != 0) {
0971 dev_err(map->dev, "Failed to ack 0x%x: %d\n",
0972 reg, ret);
0973 goto err_alloc;
0974 }
0975 }
0976 }
0977
0978
0979 if (d->wake_buf) {
0980 for (i = 0; i < chip->num_regs; i++) {
0981 d->wake_buf[i] = d->mask_buf_def[i];
0982 reg = d->get_irq_reg(d, d->chip->wake_base, i);
0983
0984 if (chip->wake_invert)
0985 ret = regmap_update_bits(d->map, reg,
0986 d->mask_buf_def[i],
0987 0);
0988 else
0989 ret = regmap_update_bits(d->map, reg,
0990 d->mask_buf_def[i],
0991 d->wake_buf[i]);
0992 if (ret != 0) {
0993 dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
0994 reg, ret);
0995 goto err_alloc;
0996 }
0997 }
0998 }
0999
1000 if (chip->num_type_reg && !chip->type_in_mask) {
1001 for (i = 0; i < chip->num_type_reg; ++i) {
1002 reg = d->get_irq_reg(d, d->chip->type_base, i);
1003
1004 ret = regmap_read(map, reg, &d->type_buf_def[i]);
1005
1006 if (d->chip->type_invert)
1007 d->type_buf_def[i] = ~d->type_buf_def[i];
1008
1009 if (ret) {
1010 dev_err(map->dev, "Failed to get type defaults at 0x%x: %d\n",
1011 reg, ret);
1012 goto err_alloc;
1013 }
1014 }
1015 }
1016
1017 if (irq_base)
1018 d->domain = irq_domain_create_legacy(fwnode, chip->num_irqs,
1019 irq_base, 0,
1020 ®map_domain_ops, d);
1021 else
1022 d->domain = irq_domain_create_linear(fwnode, chip->num_irqs,
1023 ®map_domain_ops, d);
1024 if (!d->domain) {
1025 dev_err(map->dev, "Failed to create IRQ domain\n");
1026 ret = -ENOMEM;
1027 goto err_alloc;
1028 }
1029
1030 ret = request_threaded_irq(irq, NULL, regmap_irq_thread,
1031 irq_flags | IRQF_ONESHOT,
1032 chip->name, d);
1033 if (ret != 0) {
1034 dev_err(map->dev, "Failed to request IRQ %d for %s: %d\n",
1035 irq, chip->name, ret);
1036 goto err_domain;
1037 }
1038
1039 *data = d;
1040
1041 return 0;
1042
1043 err_domain:
1044
1045 err_alloc:
1046 kfree(d->type_buf);
1047 kfree(d->type_buf_def);
1048 kfree(d->wake_buf);
1049 kfree(d->mask_buf_def);
1050 kfree(d->mask_buf);
1051 kfree(d->status_buf);
1052 kfree(d->status_reg_buf);
1053 if (d->virt_buf) {
1054 for (i = 0; i < chip->num_virt_regs; i++)
1055 kfree(d->virt_buf[i]);
1056 kfree(d->virt_buf);
1057 }
1058 if (d->config_buf) {
1059 for (i = 0; i < chip->num_config_bases; i++)
1060 kfree(d->config_buf[i]);
1061 kfree(d->config_buf);
1062 }
1063 kfree(d);
1064 return ret;
1065 }
1066 EXPORT_SYMBOL_GPL(regmap_add_irq_chip_fwnode);
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083 int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
1084 int irq_base, const struct regmap_irq_chip *chip,
1085 struct regmap_irq_chip_data **data)
1086 {
1087 return regmap_add_irq_chip_fwnode(dev_fwnode(map->dev), map, irq,
1088 irq_flags, irq_base, chip, data);
1089 }
1090 EXPORT_SYMBOL_GPL(regmap_add_irq_chip);
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100 void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
1101 {
1102 unsigned int virq;
1103 int i, hwirq;
1104
1105 if (!d)
1106 return;
1107
1108 free_irq(irq, d);
1109
1110
1111 for (hwirq = 0; hwirq < d->chip->num_irqs; hwirq++) {
1112
1113 if (!d->chip->irqs[hwirq].mask)
1114 continue;
1115
1116
1117
1118
1119
1120 virq = irq_find_mapping(d->domain, hwirq);
1121 if (virq)
1122 irq_dispose_mapping(virq);
1123 }
1124
1125 irq_domain_remove(d->domain);
1126 kfree(d->type_buf);
1127 kfree(d->type_buf_def);
1128 kfree(d->wake_buf);
1129 kfree(d->mask_buf_def);
1130 kfree(d->mask_buf);
1131 kfree(d->status_reg_buf);
1132 kfree(d->status_buf);
1133 if (d->config_buf) {
1134 for (i = 0; i < d->chip->num_config_bases; i++)
1135 kfree(d->config_buf[i]);
1136 kfree(d->config_buf);
1137 }
1138 kfree(d);
1139 }
1140 EXPORT_SYMBOL_GPL(regmap_del_irq_chip);
1141
1142 static void devm_regmap_irq_chip_release(struct device *dev, void *res)
1143 {
1144 struct regmap_irq_chip_data *d = *(struct regmap_irq_chip_data **)res;
1145
1146 regmap_del_irq_chip(d->irq, d);
1147 }
1148
1149 static int devm_regmap_irq_chip_match(struct device *dev, void *res, void *data)
1150
1151 {
1152 struct regmap_irq_chip_data **r = res;
1153
1154 if (!r || !*r) {
1155 WARN_ON(!r || !*r);
1156 return 0;
1157 }
1158 return *r == data;
1159 }
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178 int devm_regmap_add_irq_chip_fwnode(struct device *dev,
1179 struct fwnode_handle *fwnode,
1180 struct regmap *map, int irq,
1181 int irq_flags, int irq_base,
1182 const struct regmap_irq_chip *chip,
1183 struct regmap_irq_chip_data **data)
1184 {
1185 struct regmap_irq_chip_data **ptr, *d;
1186 int ret;
1187
1188 ptr = devres_alloc(devm_regmap_irq_chip_release, sizeof(*ptr),
1189 GFP_KERNEL);
1190 if (!ptr)
1191 return -ENOMEM;
1192
1193 ret = regmap_add_irq_chip_fwnode(fwnode, map, irq, irq_flags, irq_base,
1194 chip, &d);
1195 if (ret < 0) {
1196 devres_free(ptr);
1197 return ret;
1198 }
1199
1200 *ptr = d;
1201 devres_add(dev, ptr);
1202 *data = d;
1203 return 0;
1204 }
1205 EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip_fwnode);
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223 int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq,
1224 int irq_flags, int irq_base,
1225 const struct regmap_irq_chip *chip,
1226 struct regmap_irq_chip_data **data)
1227 {
1228 return devm_regmap_add_irq_chip_fwnode(dev, dev_fwnode(map->dev), map,
1229 irq, irq_flags, irq_base, chip,
1230 data);
1231 }
1232 EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip);
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243 void devm_regmap_del_irq_chip(struct device *dev, int irq,
1244 struct regmap_irq_chip_data *data)
1245 {
1246 int rc;
1247
1248 WARN_ON(irq != data->irq);
1249 rc = devres_release(dev, devm_regmap_irq_chip_release,
1250 devm_regmap_irq_chip_match, data);
1251
1252 if (rc != 0)
1253 WARN_ON(rc);
1254 }
1255 EXPORT_SYMBOL_GPL(devm_regmap_del_irq_chip);
1256
1257
1258
1259
1260
1261
1262
1263
1264 int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data)
1265 {
1266 WARN_ON(!data->irq_base);
1267 return data->irq_base;
1268 }
1269 EXPORT_SYMBOL_GPL(regmap_irq_chip_get_base);
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279 int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq)
1280 {
1281
1282 if (!data->chip->irqs[irq].mask)
1283 return -EINVAL;
1284
1285 return irq_create_mapping(data->domain, irq);
1286 }
1287 EXPORT_SYMBOL_GPL(regmap_irq_get_virq);
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299 struct irq_domain *regmap_irq_get_domain(struct regmap_irq_chip_data *data)
1300 {
1301 if (data)
1302 return data->domain;
1303 else
1304 return NULL;
1305 }
1306 EXPORT_SYMBOL_GPL(regmap_irq_get_domain);