0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/slab.h>
0010 #include <linux/mutex.h>
0011 #include <linux/debugfs.h>
0012 #include <linux/uaccess.h>
0013 #include <linux/device.h>
0014 #include <linux/list.h>
0015
0016 #include "internal.h"
0017
0018 struct regmap_debugfs_node {
0019 struct regmap *map;
0020 struct list_head link;
0021 };
0022
0023 static unsigned int dummy_index;
0024 static struct dentry *regmap_debugfs_root;
0025 static LIST_HEAD(regmap_debugfs_early_list);
0026 static DEFINE_MUTEX(regmap_debugfs_early_lock);
0027
0028
0029 static size_t regmap_calc_reg_len(int max_val)
0030 {
0031 return snprintf(NULL, 0, "%x", max_val);
0032 }
0033
0034 static ssize_t regmap_name_read_file(struct file *file,
0035 char __user *user_buf, size_t count,
0036 loff_t *ppos)
0037 {
0038 struct regmap *map = file->private_data;
0039 const char *name = "nodev";
0040 int ret;
0041 char *buf;
0042
0043 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
0044 if (!buf)
0045 return -ENOMEM;
0046
0047 if (map->dev && map->dev->driver)
0048 name = map->dev->driver->name;
0049
0050 ret = snprintf(buf, PAGE_SIZE, "%s\n", name);
0051 if (ret < 0) {
0052 kfree(buf);
0053 return ret;
0054 }
0055
0056 ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
0057 kfree(buf);
0058 return ret;
0059 }
0060
0061 static const struct file_operations regmap_name_fops = {
0062 .open = simple_open,
0063 .read = regmap_name_read_file,
0064 .llseek = default_llseek,
0065 };
0066
0067 static void regmap_debugfs_free_dump_cache(struct regmap *map)
0068 {
0069 struct regmap_debugfs_off_cache *c;
0070
0071 while (!list_empty(&map->debugfs_off_cache)) {
0072 c = list_first_entry(&map->debugfs_off_cache,
0073 struct regmap_debugfs_off_cache,
0074 list);
0075 list_del(&c->list);
0076 kfree(c);
0077 }
0078 }
0079
0080 static bool regmap_printable(struct regmap *map, unsigned int reg)
0081 {
0082 if (regmap_precious(map, reg))
0083 return false;
0084
0085 if (!regmap_readable(map, reg) && !regmap_cached(map, reg))
0086 return false;
0087
0088 return true;
0089 }
0090
0091
0092
0093
0094
0095 static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
0096 unsigned int base,
0097 loff_t from,
0098 loff_t *pos)
0099 {
0100 struct regmap_debugfs_off_cache *c = NULL;
0101 loff_t p = 0;
0102 unsigned int i, ret;
0103 unsigned int fpos_offset;
0104 unsigned int reg_offset;
0105
0106
0107 if (base)
0108 return base;
0109
0110
0111
0112
0113
0114 mutex_lock(&map->cache_lock);
0115 i = base;
0116 if (list_empty(&map->debugfs_off_cache)) {
0117 for (; i <= map->max_register; i += map->reg_stride) {
0118
0119 if (!regmap_printable(map, i)) {
0120 if (c) {
0121 c->max = p - 1;
0122 c->max_reg = i - map->reg_stride;
0123 list_add_tail(&c->list,
0124 &map->debugfs_off_cache);
0125 c = NULL;
0126 }
0127
0128 continue;
0129 }
0130
0131
0132 if (!c) {
0133 c = kzalloc(sizeof(*c), GFP_KERNEL);
0134 if (!c) {
0135 regmap_debugfs_free_dump_cache(map);
0136 mutex_unlock(&map->cache_lock);
0137 return base;
0138 }
0139 c->min = p;
0140 c->base_reg = i;
0141 }
0142
0143 p += map->debugfs_tot_len;
0144 }
0145 }
0146
0147
0148 if (c) {
0149 c->max = p - 1;
0150 c->max_reg = i - map->reg_stride;
0151 list_add_tail(&c->list,
0152 &map->debugfs_off_cache);
0153 }
0154
0155
0156
0157
0158
0159
0160 WARN_ON(list_empty(&map->debugfs_off_cache));
0161 ret = base;
0162
0163
0164 list_for_each_entry(c, &map->debugfs_off_cache, list) {
0165 if (from >= c->min && from <= c->max) {
0166 fpos_offset = from - c->min;
0167 reg_offset = fpos_offset / map->debugfs_tot_len;
0168 *pos = c->min + (reg_offset * map->debugfs_tot_len);
0169 mutex_unlock(&map->cache_lock);
0170 return c->base_reg + (reg_offset * map->reg_stride);
0171 }
0172
0173 *pos = c->max;
0174 ret = c->max_reg;
0175 }
0176 mutex_unlock(&map->cache_lock);
0177
0178 return ret;
0179 }
0180
0181 static inline void regmap_calc_tot_len(struct regmap *map,
0182 void *buf, size_t count)
0183 {
0184
0185 if (!map->debugfs_tot_len) {
0186 map->debugfs_reg_len = regmap_calc_reg_len(map->max_register);
0187 map->debugfs_val_len = 2 * map->format.val_bytes;
0188 map->debugfs_tot_len = map->debugfs_reg_len +
0189 map->debugfs_val_len + 3;
0190 }
0191 }
0192
0193 static int regmap_next_readable_reg(struct regmap *map, int reg)
0194 {
0195 struct regmap_debugfs_off_cache *c;
0196 int ret = -EINVAL;
0197
0198 if (regmap_printable(map, reg + map->reg_stride)) {
0199 ret = reg + map->reg_stride;
0200 } else {
0201 mutex_lock(&map->cache_lock);
0202 list_for_each_entry(c, &map->debugfs_off_cache, list) {
0203 if (reg > c->max_reg)
0204 continue;
0205 if (reg < c->base_reg) {
0206 ret = c->base_reg;
0207 break;
0208 }
0209 }
0210 mutex_unlock(&map->cache_lock);
0211 }
0212 return ret;
0213 }
0214
0215 static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from,
0216 unsigned int to, char __user *user_buf,
0217 size_t count, loff_t *ppos)
0218 {
0219 size_t buf_pos = 0;
0220 loff_t p = *ppos;
0221 ssize_t ret;
0222 int i;
0223 char *buf;
0224 unsigned int val, start_reg;
0225
0226 if (*ppos < 0 || !count)
0227 return -EINVAL;
0228
0229 if (count > (PAGE_SIZE << (MAX_ORDER - 1)))
0230 count = PAGE_SIZE << (MAX_ORDER - 1);
0231
0232 buf = kmalloc(count, GFP_KERNEL);
0233 if (!buf)
0234 return -ENOMEM;
0235
0236 regmap_calc_tot_len(map, buf, count);
0237
0238
0239 start_reg = regmap_debugfs_get_dump_start(map, from, *ppos, &p);
0240
0241 for (i = start_reg; i >= 0 && i <= to;
0242 i = regmap_next_readable_reg(map, i)) {
0243
0244
0245 if (p >= *ppos) {
0246
0247 if (buf_pos + map->debugfs_tot_len > count)
0248 break;
0249
0250
0251 snprintf(buf + buf_pos, count - buf_pos, "%.*x: ",
0252 map->debugfs_reg_len, i - from);
0253 buf_pos += map->debugfs_reg_len + 2;
0254
0255
0256 ret = regmap_read(map, i, &val);
0257 if (ret == 0)
0258 snprintf(buf + buf_pos, count - buf_pos,
0259 "%.*x", map->debugfs_val_len, val);
0260 else
0261 memset(buf + buf_pos, 'X',
0262 map->debugfs_val_len);
0263 buf_pos += 2 * map->format.val_bytes;
0264
0265 buf[buf_pos++] = '\n';
0266 }
0267 p += map->debugfs_tot_len;
0268 }
0269
0270 ret = buf_pos;
0271
0272 if (copy_to_user(user_buf, buf, buf_pos)) {
0273 ret = -EFAULT;
0274 goto out;
0275 }
0276
0277 *ppos += buf_pos;
0278
0279 out:
0280 kfree(buf);
0281 return ret;
0282 }
0283
0284 static ssize_t regmap_map_read_file(struct file *file, char __user *user_buf,
0285 size_t count, loff_t *ppos)
0286 {
0287 struct regmap *map = file->private_data;
0288
0289 return regmap_read_debugfs(map, 0, map->max_register, user_buf,
0290 count, ppos);
0291 }
0292
0293 #undef REGMAP_ALLOW_WRITE_DEBUGFS
0294 #ifdef REGMAP_ALLOW_WRITE_DEBUGFS
0295
0296
0297
0298
0299
0300
0301 static ssize_t regmap_map_write_file(struct file *file,
0302 const char __user *user_buf,
0303 size_t count, loff_t *ppos)
0304 {
0305 char buf[32];
0306 size_t buf_size;
0307 char *start = buf;
0308 unsigned long reg, value;
0309 struct regmap *map = file->private_data;
0310 int ret;
0311
0312 buf_size = min(count, (sizeof(buf)-1));
0313 if (copy_from_user(buf, user_buf, buf_size))
0314 return -EFAULT;
0315 buf[buf_size] = 0;
0316
0317 while (*start == ' ')
0318 start++;
0319 reg = simple_strtoul(start, &start, 16);
0320 while (*start == ' ')
0321 start++;
0322 if (kstrtoul(start, 16, &value))
0323 return -EINVAL;
0324
0325
0326 add_taint(TAINT_USER, LOCKDEP_STILL_OK);
0327
0328 ret = regmap_write(map, reg, value);
0329 if (ret < 0)
0330 return ret;
0331 return buf_size;
0332 }
0333 #else
0334 #define regmap_map_write_file NULL
0335 #endif
0336
0337 static const struct file_operations regmap_map_fops = {
0338 .open = simple_open,
0339 .read = regmap_map_read_file,
0340 .write = regmap_map_write_file,
0341 .llseek = default_llseek,
0342 };
0343
0344 static ssize_t regmap_range_read_file(struct file *file, char __user *user_buf,
0345 size_t count, loff_t *ppos)
0346 {
0347 struct regmap_range_node *range = file->private_data;
0348 struct regmap *map = range->map;
0349
0350 return regmap_read_debugfs(map, range->range_min, range->range_max,
0351 user_buf, count, ppos);
0352 }
0353
0354 static const struct file_operations regmap_range_fops = {
0355 .open = simple_open,
0356 .read = regmap_range_read_file,
0357 .llseek = default_llseek,
0358 };
0359
0360 static ssize_t regmap_reg_ranges_read_file(struct file *file,
0361 char __user *user_buf, size_t count,
0362 loff_t *ppos)
0363 {
0364 struct regmap *map = file->private_data;
0365 struct regmap_debugfs_off_cache *c;
0366 loff_t p = 0;
0367 size_t buf_pos = 0;
0368 char *buf;
0369 char *entry;
0370 int ret;
0371 unsigned int entry_len;
0372
0373 if (*ppos < 0 || !count)
0374 return -EINVAL;
0375
0376 if (count > (PAGE_SIZE << (MAX_ORDER - 1)))
0377 count = PAGE_SIZE << (MAX_ORDER - 1);
0378
0379 buf = kmalloc(count, GFP_KERNEL);
0380 if (!buf)
0381 return -ENOMEM;
0382
0383 entry = kmalloc(PAGE_SIZE, GFP_KERNEL);
0384 if (!entry) {
0385 kfree(buf);
0386 return -ENOMEM;
0387 }
0388
0389
0390
0391
0392
0393
0394 regmap_calc_tot_len(map, buf, count);
0395 regmap_debugfs_get_dump_start(map, 0, *ppos, &p);
0396
0397
0398
0399 p = 0;
0400 mutex_lock(&map->cache_lock);
0401 list_for_each_entry(c, &map->debugfs_off_cache, list) {
0402 entry_len = snprintf(entry, PAGE_SIZE, "%x-%x\n",
0403 c->base_reg, c->max_reg);
0404 if (p >= *ppos) {
0405 if (buf_pos + entry_len > count)
0406 break;
0407 memcpy(buf + buf_pos, entry, entry_len);
0408 buf_pos += entry_len;
0409 }
0410 p += entry_len;
0411 }
0412 mutex_unlock(&map->cache_lock);
0413
0414 kfree(entry);
0415 ret = buf_pos;
0416
0417 if (copy_to_user(user_buf, buf, buf_pos)) {
0418 ret = -EFAULT;
0419 goto out_buf;
0420 }
0421
0422 *ppos += buf_pos;
0423 out_buf:
0424 kfree(buf);
0425 return ret;
0426 }
0427
0428 static const struct file_operations regmap_reg_ranges_fops = {
0429 .open = simple_open,
0430 .read = regmap_reg_ranges_read_file,
0431 .llseek = default_llseek,
0432 };
0433
0434 static int regmap_access_show(struct seq_file *s, void *ignored)
0435 {
0436 struct regmap *map = s->private;
0437 int i, reg_len;
0438
0439 reg_len = regmap_calc_reg_len(map->max_register);
0440
0441 for (i = 0; i <= map->max_register; i += map->reg_stride) {
0442
0443 if (!regmap_readable(map, i) && !regmap_writeable(map, i))
0444 continue;
0445
0446
0447 seq_printf(s, "%.*x: %c %c %c %c\n", reg_len, i,
0448 regmap_readable(map, i) ? 'y' : 'n',
0449 regmap_writeable(map, i) ? 'y' : 'n',
0450 regmap_volatile(map, i) ? 'y' : 'n',
0451 regmap_precious(map, i) ? 'y' : 'n');
0452 }
0453
0454 return 0;
0455 }
0456
0457 DEFINE_SHOW_ATTRIBUTE(regmap_access);
0458
0459 static ssize_t regmap_cache_only_write_file(struct file *file,
0460 const char __user *user_buf,
0461 size_t count, loff_t *ppos)
0462 {
0463 struct regmap *map = container_of(file->private_data,
0464 struct regmap, cache_only);
0465 bool new_val, require_sync = false;
0466 int err;
0467
0468 err = kstrtobool_from_user(user_buf, count, &new_val);
0469
0470 if (err)
0471 return count;
0472
0473 err = debugfs_file_get(file->f_path.dentry);
0474 if (err)
0475 return err;
0476
0477 map->lock(map->lock_arg);
0478
0479 if (new_val && !map->cache_only) {
0480 dev_warn(map->dev, "debugfs cache_only=Y forced\n");
0481 add_taint(TAINT_USER, LOCKDEP_STILL_OK);
0482 } else if (!new_val && map->cache_only) {
0483 dev_warn(map->dev, "debugfs cache_only=N forced: syncing cache\n");
0484 require_sync = true;
0485 }
0486 map->cache_only = new_val;
0487
0488 map->unlock(map->lock_arg);
0489 debugfs_file_put(file->f_path.dentry);
0490
0491 if (require_sync) {
0492 err = regcache_sync(map);
0493 if (err)
0494 dev_err(map->dev, "Failed to sync cache %d\n", err);
0495 }
0496
0497 return count;
0498 }
0499
0500 static const struct file_operations regmap_cache_only_fops = {
0501 .open = simple_open,
0502 .read = debugfs_read_file_bool,
0503 .write = regmap_cache_only_write_file,
0504 };
0505
0506 static ssize_t regmap_cache_bypass_write_file(struct file *file,
0507 const char __user *user_buf,
0508 size_t count, loff_t *ppos)
0509 {
0510 struct regmap *map = container_of(file->private_data,
0511 struct regmap, cache_bypass);
0512 bool new_val;
0513 int err;
0514
0515 err = kstrtobool_from_user(user_buf, count, &new_val);
0516
0517 if (err)
0518 return count;
0519
0520 err = debugfs_file_get(file->f_path.dentry);
0521 if (err)
0522 return err;
0523
0524 map->lock(map->lock_arg);
0525
0526 if (new_val && !map->cache_bypass) {
0527 dev_warn(map->dev, "debugfs cache_bypass=Y forced\n");
0528 add_taint(TAINT_USER, LOCKDEP_STILL_OK);
0529 } else if (!new_val && map->cache_bypass) {
0530 dev_warn(map->dev, "debugfs cache_bypass=N forced\n");
0531 }
0532 map->cache_bypass = new_val;
0533
0534 map->unlock(map->lock_arg);
0535 debugfs_file_put(file->f_path.dentry);
0536
0537 return count;
0538 }
0539
0540 static const struct file_operations regmap_cache_bypass_fops = {
0541 .open = simple_open,
0542 .read = debugfs_read_file_bool,
0543 .write = regmap_cache_bypass_write_file,
0544 };
0545
0546 void regmap_debugfs_init(struct regmap *map)
0547 {
0548 struct rb_node *next;
0549 struct regmap_range_node *range_node;
0550 const char *devname = "dummy";
0551 const char *name = map->name;
0552
0553
0554
0555
0556
0557
0558
0559
0560 if (map->debugfs_disable) {
0561 dev_dbg(map->dev, "regmap locking disabled - not creating debugfs entries\n");
0562 return;
0563 }
0564
0565
0566 if (!regmap_debugfs_root) {
0567 struct regmap_debugfs_node *node;
0568 node = kzalloc(sizeof(*node), GFP_KERNEL);
0569 if (!node)
0570 return;
0571 node->map = map;
0572 mutex_lock(®map_debugfs_early_lock);
0573 list_add(&node->link, ®map_debugfs_early_list);
0574 mutex_unlock(®map_debugfs_early_lock);
0575 return;
0576 }
0577
0578 INIT_LIST_HEAD(&map->debugfs_off_cache);
0579 mutex_init(&map->cache_lock);
0580
0581 if (map->dev)
0582 devname = dev_name(map->dev);
0583
0584 if (name) {
0585 if (!map->debugfs_name) {
0586 map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s",
0587 devname, name);
0588 if (!map->debugfs_name)
0589 return;
0590 }
0591 name = map->debugfs_name;
0592 } else {
0593 name = devname;
0594 }
0595
0596 if (!strcmp(name, "dummy")) {
0597 kfree(map->debugfs_name);
0598 map->debugfs_name = kasprintf(GFP_KERNEL, "dummy%d",
0599 dummy_index);
0600 if (!map->debugfs_name)
0601 return;
0602 name = map->debugfs_name;
0603 dummy_index++;
0604 }
0605
0606 map->debugfs = debugfs_create_dir(name, regmap_debugfs_root);
0607
0608 debugfs_create_file("name", 0400, map->debugfs,
0609 map, ®map_name_fops);
0610
0611 debugfs_create_file("range", 0400, map->debugfs,
0612 map, ®map_reg_ranges_fops);
0613
0614 if (map->max_register || regmap_readable(map, 0)) {
0615 umode_t registers_mode;
0616
0617 #if defined(REGMAP_ALLOW_WRITE_DEBUGFS)
0618 registers_mode = 0600;
0619 #else
0620 registers_mode = 0400;
0621 #endif
0622
0623 debugfs_create_file("registers", registers_mode, map->debugfs,
0624 map, ®map_map_fops);
0625 debugfs_create_file("access", 0400, map->debugfs,
0626 map, ®map_access_fops);
0627 }
0628
0629 if (map->cache_type) {
0630 debugfs_create_file("cache_only", 0600, map->debugfs,
0631 &map->cache_only, ®map_cache_only_fops);
0632 debugfs_create_bool("cache_dirty", 0400, map->debugfs,
0633 &map->cache_dirty);
0634 debugfs_create_file("cache_bypass", 0600, map->debugfs,
0635 &map->cache_bypass,
0636 ®map_cache_bypass_fops);
0637 }
0638
0639 next = rb_first(&map->range_tree);
0640 while (next) {
0641 range_node = rb_entry(next, struct regmap_range_node, node);
0642
0643 if (range_node->name)
0644 debugfs_create_file(range_node->name, 0400,
0645 map->debugfs, range_node,
0646 ®map_range_fops);
0647
0648 next = rb_next(&range_node->node);
0649 }
0650
0651 if (map->cache_ops && map->cache_ops->debugfs_init)
0652 map->cache_ops->debugfs_init(map);
0653 }
0654
0655 void regmap_debugfs_exit(struct regmap *map)
0656 {
0657 if (map->debugfs) {
0658 debugfs_remove_recursive(map->debugfs);
0659 mutex_lock(&map->cache_lock);
0660 regmap_debugfs_free_dump_cache(map);
0661 mutex_unlock(&map->cache_lock);
0662 kfree(map->debugfs_name);
0663 map->debugfs_name = NULL;
0664 } else {
0665 struct regmap_debugfs_node *node, *tmp;
0666
0667 mutex_lock(®map_debugfs_early_lock);
0668 list_for_each_entry_safe(node, tmp, ®map_debugfs_early_list,
0669 link) {
0670 if (node->map == map) {
0671 list_del(&node->link);
0672 kfree(node);
0673 }
0674 }
0675 mutex_unlock(®map_debugfs_early_lock);
0676 }
0677 }
0678
0679 void regmap_debugfs_initcall(void)
0680 {
0681 struct regmap_debugfs_node *node, *tmp;
0682
0683 regmap_debugfs_root = debugfs_create_dir("regmap", NULL);
0684
0685 mutex_lock(®map_debugfs_early_lock);
0686 list_for_each_entry_safe(node, tmp, ®map_debugfs_early_list, link) {
0687 regmap_debugfs_init(node->map);
0688 list_del(&node->link);
0689 kfree(node);
0690 }
0691 mutex_unlock(®map_debugfs_early_lock);
0692 }