0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/bsearch.h>
0010 #include <linux/device.h>
0011 #include <linux/export.h>
0012 #include <linux/slab.h>
0013 #include <linux/sort.h>
0014
0015 #include "trace.h"
0016 #include "internal.h"
0017
0018 static const struct regcache_ops *cache_types[] = {
0019 ®cache_rbtree_ops,
0020 #if IS_ENABLED(CONFIG_REGCACHE_COMPRESSED)
0021 ®cache_lzo_ops,
0022 #endif
0023 ®cache_flat_ops,
0024 };
0025
0026 static int regcache_hw_init(struct regmap *map)
0027 {
0028 int i, j;
0029 int ret;
0030 int count;
0031 unsigned int reg, val;
0032 void *tmp_buf;
0033
0034 if (!map->num_reg_defaults_raw)
0035 return -EINVAL;
0036
0037
0038 for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++)
0039 if (regmap_readable(map, i * map->reg_stride) &&
0040 !regmap_volatile(map, i * map->reg_stride))
0041 count++;
0042
0043
0044 if (!count) {
0045 map->cache_bypass = true;
0046 return 0;
0047 }
0048
0049 map->num_reg_defaults = count;
0050 map->reg_defaults = kmalloc_array(count, sizeof(struct reg_default),
0051 GFP_KERNEL);
0052 if (!map->reg_defaults)
0053 return -ENOMEM;
0054
0055 if (!map->reg_defaults_raw) {
0056 bool cache_bypass = map->cache_bypass;
0057 dev_warn(map->dev, "No cache defaults, reading back from HW\n");
0058
0059
0060 map->cache_bypass = true;
0061 tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL);
0062 if (!tmp_buf) {
0063 ret = -ENOMEM;
0064 goto err_free;
0065 }
0066 ret = regmap_raw_read(map, 0, tmp_buf,
0067 map->cache_size_raw);
0068 map->cache_bypass = cache_bypass;
0069 if (ret == 0) {
0070 map->reg_defaults_raw = tmp_buf;
0071 map->cache_free = true;
0072 } else {
0073 kfree(tmp_buf);
0074 }
0075 }
0076
0077
0078 for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) {
0079 reg = i * map->reg_stride;
0080
0081 if (!regmap_readable(map, reg))
0082 continue;
0083
0084 if (regmap_volatile(map, reg))
0085 continue;
0086
0087 if (map->reg_defaults_raw) {
0088 val = regcache_get_val(map, map->reg_defaults_raw, i);
0089 } else {
0090 bool cache_bypass = map->cache_bypass;
0091
0092 map->cache_bypass = true;
0093 ret = regmap_read(map, reg, &val);
0094 map->cache_bypass = cache_bypass;
0095 if (ret != 0) {
0096 dev_err(map->dev, "Failed to read %d: %d\n",
0097 reg, ret);
0098 goto err_free;
0099 }
0100 }
0101
0102 map->reg_defaults[j].reg = reg;
0103 map->reg_defaults[j].def = val;
0104 j++;
0105 }
0106
0107 return 0;
0108
0109 err_free:
0110 kfree(map->reg_defaults);
0111
0112 return ret;
0113 }
0114
0115 int regcache_init(struct regmap *map, const struct regmap_config *config)
0116 {
0117 int ret;
0118 int i;
0119 void *tmp_buf;
0120
0121 if (map->cache_type == REGCACHE_NONE) {
0122 if (config->reg_defaults || config->num_reg_defaults_raw)
0123 dev_warn(map->dev,
0124 "No cache used with register defaults set!\n");
0125
0126 map->cache_bypass = true;
0127 return 0;
0128 }
0129
0130 if (config->reg_defaults && !config->num_reg_defaults) {
0131 dev_err(map->dev,
0132 "Register defaults are set without the number!\n");
0133 return -EINVAL;
0134 }
0135
0136 if (config->num_reg_defaults && !config->reg_defaults) {
0137 dev_err(map->dev,
0138 "Register defaults number are set without the reg!\n");
0139 return -EINVAL;
0140 }
0141
0142 for (i = 0; i < config->num_reg_defaults; i++)
0143 if (config->reg_defaults[i].reg % map->reg_stride)
0144 return -EINVAL;
0145
0146 for (i = 0; i < ARRAY_SIZE(cache_types); i++)
0147 if (cache_types[i]->type == map->cache_type)
0148 break;
0149
0150 if (i == ARRAY_SIZE(cache_types)) {
0151 dev_err(map->dev, "Could not match compress type: %d\n",
0152 map->cache_type);
0153 return -EINVAL;
0154 }
0155
0156 map->num_reg_defaults = config->num_reg_defaults;
0157 map->num_reg_defaults_raw = config->num_reg_defaults_raw;
0158 map->reg_defaults_raw = config->reg_defaults_raw;
0159 map->cache_word_size = DIV_ROUND_UP(config->val_bits, 8);
0160 map->cache_size_raw = map->cache_word_size * config->num_reg_defaults_raw;
0161
0162 map->cache = NULL;
0163 map->cache_ops = cache_types[i];
0164
0165 if (!map->cache_ops->read ||
0166 !map->cache_ops->write ||
0167 !map->cache_ops->name)
0168 return -EINVAL;
0169
0170
0171
0172
0173
0174 if (config->reg_defaults) {
0175 tmp_buf = kmemdup(config->reg_defaults, map->num_reg_defaults *
0176 sizeof(struct reg_default), GFP_KERNEL);
0177 if (!tmp_buf)
0178 return -ENOMEM;
0179 map->reg_defaults = tmp_buf;
0180 } else if (map->num_reg_defaults_raw) {
0181
0182
0183
0184
0185 ret = regcache_hw_init(map);
0186 if (ret < 0)
0187 return ret;
0188 if (map->cache_bypass)
0189 return 0;
0190 }
0191
0192 if (!map->max_register && map->num_reg_defaults_raw)
0193 map->max_register = (map->num_reg_defaults_raw - 1) * map->reg_stride;
0194
0195 if (map->cache_ops->init) {
0196 dev_dbg(map->dev, "Initializing %s cache\n",
0197 map->cache_ops->name);
0198 ret = map->cache_ops->init(map);
0199 if (ret)
0200 goto err_free;
0201 }
0202 return 0;
0203
0204 err_free:
0205 kfree(map->reg_defaults);
0206 if (map->cache_free)
0207 kfree(map->reg_defaults_raw);
0208
0209 return ret;
0210 }
0211
0212 void regcache_exit(struct regmap *map)
0213 {
0214 if (map->cache_type == REGCACHE_NONE)
0215 return;
0216
0217 BUG_ON(!map->cache_ops);
0218
0219 kfree(map->reg_defaults);
0220 if (map->cache_free)
0221 kfree(map->reg_defaults_raw);
0222
0223 if (map->cache_ops->exit) {
0224 dev_dbg(map->dev, "Destroying %s cache\n",
0225 map->cache_ops->name);
0226 map->cache_ops->exit(map);
0227 }
0228 }
0229
0230
0231
0232
0233
0234
0235
0236
0237
0238
0239 int regcache_read(struct regmap *map,
0240 unsigned int reg, unsigned int *value)
0241 {
0242 int ret;
0243
0244 if (map->cache_type == REGCACHE_NONE)
0245 return -ENOSYS;
0246
0247 BUG_ON(!map->cache_ops);
0248
0249 if (!regmap_volatile(map, reg)) {
0250 ret = map->cache_ops->read(map, reg, value);
0251
0252 if (ret == 0)
0253 trace_regmap_reg_read_cache(map, reg, *value);
0254
0255 return ret;
0256 }
0257
0258 return -EINVAL;
0259 }
0260
0261
0262
0263
0264
0265
0266
0267
0268
0269
0270 int regcache_write(struct regmap *map,
0271 unsigned int reg, unsigned int value)
0272 {
0273 if (map->cache_type == REGCACHE_NONE)
0274 return 0;
0275
0276 BUG_ON(!map->cache_ops);
0277
0278 if (!regmap_volatile(map, reg))
0279 return map->cache_ops->write(map, reg, value);
0280
0281 return 0;
0282 }
0283
0284 static bool regcache_reg_needs_sync(struct regmap *map, unsigned int reg,
0285 unsigned int val)
0286 {
0287 int ret;
0288
0289
0290 if (!map->no_sync_defaults)
0291 return true;
0292
0293
0294 ret = regcache_lookup_reg(map, reg);
0295 if (ret >= 0 && val == map->reg_defaults[ret].def)
0296 return false;
0297 return true;
0298 }
0299
0300 static int regcache_default_sync(struct regmap *map, unsigned int min,
0301 unsigned int max)
0302 {
0303 unsigned int reg;
0304
0305 for (reg = min; reg <= max; reg += map->reg_stride) {
0306 unsigned int val;
0307 int ret;
0308
0309 if (regmap_volatile(map, reg) ||
0310 !regmap_writeable(map, reg))
0311 continue;
0312
0313 ret = regcache_read(map, reg, &val);
0314 if (ret)
0315 return ret;
0316
0317 if (!regcache_reg_needs_sync(map, reg, val))
0318 continue;
0319
0320 map->cache_bypass = true;
0321 ret = _regmap_write(map, reg, val);
0322 map->cache_bypass = false;
0323 if (ret) {
0324 dev_err(map->dev, "Unable to sync register %#x. %d\n",
0325 reg, ret);
0326 return ret;
0327 }
0328 dev_dbg(map->dev, "Synced register %#x, value %#x\n", reg, val);
0329 }
0330
0331 return 0;
0332 }
0333
0334
0335
0336
0337
0338
0339
0340
0341
0342
0343
0344
0345 int regcache_sync(struct regmap *map)
0346 {
0347 int ret = 0;
0348 unsigned int i;
0349 const char *name;
0350 bool bypass;
0351
0352 BUG_ON(!map->cache_ops);
0353
0354 map->lock(map->lock_arg);
0355
0356 bypass = map->cache_bypass;
0357 dev_dbg(map->dev, "Syncing %s cache\n",
0358 map->cache_ops->name);
0359 name = map->cache_ops->name;
0360 trace_regcache_sync(map, name, "start");
0361
0362 if (!map->cache_dirty)
0363 goto out;
0364
0365 map->async = true;
0366
0367
0368 map->cache_bypass = true;
0369 for (i = 0; i < map->patch_regs; i++) {
0370 ret = _regmap_write(map, map->patch[i].reg, map->patch[i].def);
0371 if (ret != 0) {
0372 dev_err(map->dev, "Failed to write %x = %x: %d\n",
0373 map->patch[i].reg, map->patch[i].def, ret);
0374 goto out;
0375 }
0376 }
0377 map->cache_bypass = false;
0378
0379 if (map->cache_ops->sync)
0380 ret = map->cache_ops->sync(map, 0, map->max_register);
0381 else
0382 ret = regcache_default_sync(map, 0, map->max_register);
0383
0384 if (ret == 0)
0385 map->cache_dirty = false;
0386
0387 out:
0388
0389 map->async = false;
0390 map->cache_bypass = bypass;
0391 map->no_sync_defaults = false;
0392 map->unlock(map->lock_arg);
0393
0394 regmap_async_complete(map);
0395
0396 trace_regcache_sync(map, name, "stop");
0397
0398 return ret;
0399 }
0400 EXPORT_SYMBOL_GPL(regcache_sync);
0401
0402
0403
0404
0405
0406
0407
0408
0409
0410
0411
0412
0413
0414 int regcache_sync_region(struct regmap *map, unsigned int min,
0415 unsigned int max)
0416 {
0417 int ret = 0;
0418 const char *name;
0419 bool bypass;
0420
0421 BUG_ON(!map->cache_ops);
0422
0423 map->lock(map->lock_arg);
0424
0425
0426 bypass = map->cache_bypass;
0427
0428 name = map->cache_ops->name;
0429 dev_dbg(map->dev, "Syncing %s cache from %d-%d\n", name, min, max);
0430
0431 trace_regcache_sync(map, name, "start region");
0432
0433 if (!map->cache_dirty)
0434 goto out;
0435
0436 map->async = true;
0437
0438 if (map->cache_ops->sync)
0439 ret = map->cache_ops->sync(map, min, max);
0440 else
0441 ret = regcache_default_sync(map, min, max);
0442
0443 out:
0444
0445 map->cache_bypass = bypass;
0446 map->async = false;
0447 map->no_sync_defaults = false;
0448 map->unlock(map->lock_arg);
0449
0450 regmap_async_complete(map);
0451
0452 trace_regcache_sync(map, name, "stop region");
0453
0454 return ret;
0455 }
0456 EXPORT_SYMBOL_GPL(regcache_sync_region);
0457
0458
0459
0460
0461
0462
0463
0464
0465
0466
0467
0468
0469 int regcache_drop_region(struct regmap *map, unsigned int min,
0470 unsigned int max)
0471 {
0472 int ret = 0;
0473
0474 if (!map->cache_ops || !map->cache_ops->drop)
0475 return -EINVAL;
0476
0477 map->lock(map->lock_arg);
0478
0479 trace_regcache_drop_region(map, min, max);
0480
0481 ret = map->cache_ops->drop(map, min, max);
0482
0483 map->unlock(map->lock_arg);
0484
0485 return ret;
0486 }
0487 EXPORT_SYMBOL_GPL(regcache_drop_region);
0488
0489
0490
0491
0492
0493
0494
0495
0496
0497
0498
0499
0500
0501 void regcache_cache_only(struct regmap *map, bool enable)
0502 {
0503 map->lock(map->lock_arg);
0504 WARN_ON(map->cache_type != REGCACHE_NONE &&
0505 map->cache_bypass && enable);
0506 map->cache_only = enable;
0507 trace_regmap_cache_only(map, enable);
0508 map->unlock(map->lock_arg);
0509 }
0510 EXPORT_SYMBOL_GPL(regcache_cache_only);
0511
0512
0513
0514
0515
0516
0517
0518
0519
0520
0521
0522
0523
0524
0525 void regcache_mark_dirty(struct regmap *map)
0526 {
0527 map->lock(map->lock_arg);
0528 map->cache_dirty = true;
0529 map->no_sync_defaults = true;
0530 map->unlock(map->lock_arg);
0531 }
0532 EXPORT_SYMBOL_GPL(regcache_mark_dirty);
0533
0534
0535
0536
0537
0538
0539
0540
0541
0542
0543
0544
0545 void regcache_cache_bypass(struct regmap *map, bool enable)
0546 {
0547 map->lock(map->lock_arg);
0548 WARN_ON(map->cache_only && enable);
0549 map->cache_bypass = enable;
0550 trace_regmap_cache_bypass(map, enable);
0551 map->unlock(map->lock_arg);
0552 }
0553 EXPORT_SYMBOL_GPL(regcache_cache_bypass);
0554
0555 bool regcache_set_val(struct regmap *map, void *base, unsigned int idx,
0556 unsigned int val)
0557 {
0558 if (regcache_get_val(map, base, idx) == val)
0559 return true;
0560
0561
0562 if (map->format.format_val) {
0563 map->format.format_val(base + (map->cache_word_size * idx),
0564 val, 0);
0565 return false;
0566 }
0567
0568 switch (map->cache_word_size) {
0569 case 1: {
0570 u8 *cache = base;
0571
0572 cache[idx] = val;
0573 break;
0574 }
0575 case 2: {
0576 u16 *cache = base;
0577
0578 cache[idx] = val;
0579 break;
0580 }
0581 case 4: {
0582 u32 *cache = base;
0583
0584 cache[idx] = val;
0585 break;
0586 }
0587 #ifdef CONFIG_64BIT
0588 case 8: {
0589 u64 *cache = base;
0590
0591 cache[idx] = val;
0592 break;
0593 }
0594 #endif
0595 default:
0596 BUG();
0597 }
0598 return false;
0599 }
0600
0601 unsigned int regcache_get_val(struct regmap *map, const void *base,
0602 unsigned int idx)
0603 {
0604 if (!base)
0605 return -EINVAL;
0606
0607
0608 if (map->format.parse_val)
0609 return map->format.parse_val(regcache_get_val_addr(map, base,
0610 idx));
0611
0612 switch (map->cache_word_size) {
0613 case 1: {
0614 const u8 *cache = base;
0615
0616 return cache[idx];
0617 }
0618 case 2: {
0619 const u16 *cache = base;
0620
0621 return cache[idx];
0622 }
0623 case 4: {
0624 const u32 *cache = base;
0625
0626 return cache[idx];
0627 }
0628 #ifdef CONFIG_64BIT
0629 case 8: {
0630 const u64 *cache = base;
0631
0632 return cache[idx];
0633 }
0634 #endif
0635 default:
0636 BUG();
0637 }
0638
0639 return -1;
0640 }
0641
0642 static int regcache_default_cmp(const void *a, const void *b)
0643 {
0644 const struct reg_default *_a = a;
0645 const struct reg_default *_b = b;
0646
0647 return _a->reg - _b->reg;
0648 }
0649
0650 int regcache_lookup_reg(struct regmap *map, unsigned int reg)
0651 {
0652 struct reg_default key;
0653 struct reg_default *r;
0654
0655 key.reg = reg;
0656 key.def = 0;
0657
0658 r = bsearch(&key, map->reg_defaults, map->num_reg_defaults,
0659 sizeof(struct reg_default), regcache_default_cmp);
0660
0661 if (r)
0662 return r - map->reg_defaults;
0663 else
0664 return -ENOENT;
0665 }
0666
0667 static bool regcache_reg_present(unsigned long *cache_present, unsigned int idx)
0668 {
0669 if (!cache_present)
0670 return true;
0671
0672 return test_bit(idx, cache_present);
0673 }
0674
0675 static int regcache_sync_block_single(struct regmap *map, void *block,
0676 unsigned long *cache_present,
0677 unsigned int block_base,
0678 unsigned int start, unsigned int end)
0679 {
0680 unsigned int i, regtmp, val;
0681 int ret;
0682
0683 for (i = start; i < end; i++) {
0684 regtmp = block_base + (i * map->reg_stride);
0685
0686 if (!regcache_reg_present(cache_present, i) ||
0687 !regmap_writeable(map, regtmp))
0688 continue;
0689
0690 val = regcache_get_val(map, block, i);
0691 if (!regcache_reg_needs_sync(map, regtmp, val))
0692 continue;
0693
0694 map->cache_bypass = true;
0695
0696 ret = _regmap_write(map, regtmp, val);
0697
0698 map->cache_bypass = false;
0699 if (ret != 0) {
0700 dev_err(map->dev, "Unable to sync register %#x. %d\n",
0701 regtmp, ret);
0702 return ret;
0703 }
0704 dev_dbg(map->dev, "Synced register %#x, value %#x\n",
0705 regtmp, val);
0706 }
0707
0708 return 0;
0709 }
0710
0711 static int regcache_sync_block_raw_flush(struct regmap *map, const void **data,
0712 unsigned int base, unsigned int cur)
0713 {
0714 size_t val_bytes = map->format.val_bytes;
0715 int ret, count;
0716
0717 if (*data == NULL)
0718 return 0;
0719
0720 count = (cur - base) / map->reg_stride;
0721
0722 dev_dbg(map->dev, "Writing %zu bytes for %d registers from 0x%x-0x%x\n",
0723 count * val_bytes, count, base, cur - map->reg_stride);
0724
0725 map->cache_bypass = true;
0726
0727 ret = _regmap_raw_write(map, base, *data, count * val_bytes, false);
0728 if (ret)
0729 dev_err(map->dev, "Unable to sync registers %#x-%#x. %d\n",
0730 base, cur - map->reg_stride, ret);
0731
0732 map->cache_bypass = false;
0733
0734 *data = NULL;
0735
0736 return ret;
0737 }
0738
0739 static int regcache_sync_block_raw(struct regmap *map, void *block,
0740 unsigned long *cache_present,
0741 unsigned int block_base, unsigned int start,
0742 unsigned int end)
0743 {
0744 unsigned int i, val;
0745 unsigned int regtmp = 0;
0746 unsigned int base = 0;
0747 const void *data = NULL;
0748 int ret;
0749
0750 for (i = start; i < end; i++) {
0751 regtmp = block_base + (i * map->reg_stride);
0752
0753 if (!regcache_reg_present(cache_present, i) ||
0754 !regmap_writeable(map, regtmp)) {
0755 ret = regcache_sync_block_raw_flush(map, &data,
0756 base, regtmp);
0757 if (ret != 0)
0758 return ret;
0759 continue;
0760 }
0761
0762 val = regcache_get_val(map, block, i);
0763 if (!regcache_reg_needs_sync(map, regtmp, val)) {
0764 ret = regcache_sync_block_raw_flush(map, &data,
0765 base, regtmp);
0766 if (ret != 0)
0767 return ret;
0768 continue;
0769 }
0770
0771 if (!data) {
0772 data = regcache_get_val_addr(map, block, i);
0773 base = regtmp;
0774 }
0775 }
0776
0777 return regcache_sync_block_raw_flush(map, &data, base, regtmp +
0778 map->reg_stride);
0779 }
0780
0781 int regcache_sync_block(struct regmap *map, void *block,
0782 unsigned long *cache_present,
0783 unsigned int block_base, unsigned int start,
0784 unsigned int end)
0785 {
0786 if (regmap_can_raw_write(map) && !map->use_single_write)
0787 return regcache_sync_block_raw(map, block, cache_present,
0788 block_base, start, end);
0789 else
0790 return regcache_sync_block_single(map, block, cache_present,
0791 block_base, start, end);
0792 }