0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017 #define pr_fmt(fmt) "OF: " fmt
0018
0019 #include <linux/console.h>
0020 #include <linux/ctype.h>
0021 #include <linux/cpu.h>
0022 #include <linux/module.h>
0023 #include <linux/of.h>
0024 #include <linux/of_device.h>
0025 #include <linux/of_graph.h>
0026 #include <linux/spinlock.h>
0027 #include <linux/slab.h>
0028 #include <linux/string.h>
0029 #include <linux/proc_fs.h>
0030
0031 #include "of_private.h"
0032
0033 LIST_HEAD(aliases_lookup);
0034
0035 struct device_node *of_root;
0036 EXPORT_SYMBOL(of_root);
0037 struct device_node *of_chosen;
0038 EXPORT_SYMBOL(of_chosen);
0039 struct device_node *of_aliases;
0040 struct device_node *of_stdout;
0041 static const char *of_stdout_options;
0042
0043 struct kset *of_kset;
0044
0045
0046
0047
0048
0049
0050
0051 DEFINE_MUTEX(of_mutex);
0052
0053
0054
0055
0056 DEFINE_RAW_SPINLOCK(devtree_lock);
0057
0058 bool of_node_name_eq(const struct device_node *np, const char *name)
0059 {
0060 const char *node_name;
0061 size_t len;
0062
0063 if (!np)
0064 return false;
0065
0066 node_name = kbasename(np->full_name);
0067 len = strchrnul(node_name, '@') - node_name;
0068
0069 return (strlen(name) == len) && (strncmp(node_name, name, len) == 0);
0070 }
0071 EXPORT_SYMBOL(of_node_name_eq);
0072
0073 bool of_node_name_prefix(const struct device_node *np, const char *prefix)
0074 {
0075 if (!np)
0076 return false;
0077
0078 return strncmp(kbasename(np->full_name), prefix, strlen(prefix)) == 0;
0079 }
0080 EXPORT_SYMBOL(of_node_name_prefix);
0081
0082 static bool __of_node_is_type(const struct device_node *np, const char *type)
0083 {
0084 const char *match = __of_get_property(np, "device_type", NULL);
0085
0086 return np && match && type && !strcmp(match, type);
0087 }
0088
0089 int of_bus_n_addr_cells(struct device_node *np)
0090 {
0091 u32 cells;
0092
0093 for (; np; np = np->parent)
0094 if (!of_property_read_u32(np, "#address-cells", &cells))
0095 return cells;
0096
0097
0098 return OF_ROOT_NODE_ADDR_CELLS_DEFAULT;
0099 }
0100
0101 int of_n_addr_cells(struct device_node *np)
0102 {
0103 if (np->parent)
0104 np = np->parent;
0105
0106 return of_bus_n_addr_cells(np);
0107 }
0108 EXPORT_SYMBOL(of_n_addr_cells);
0109
0110 int of_bus_n_size_cells(struct device_node *np)
0111 {
0112 u32 cells;
0113
0114 for (; np; np = np->parent)
0115 if (!of_property_read_u32(np, "#size-cells", &cells))
0116 return cells;
0117
0118
0119 return OF_ROOT_NODE_SIZE_CELLS_DEFAULT;
0120 }
0121
0122 int of_n_size_cells(struct device_node *np)
0123 {
0124 if (np->parent)
0125 np = np->parent;
0126
0127 return of_bus_n_size_cells(np);
0128 }
0129 EXPORT_SYMBOL(of_n_size_cells);
0130
0131 #ifdef CONFIG_NUMA
0132 int __weak of_node_to_nid(struct device_node *np)
0133 {
0134 return NUMA_NO_NODE;
0135 }
0136 #endif
0137
0138 #define OF_PHANDLE_CACHE_BITS 7
0139 #define OF_PHANDLE_CACHE_SZ BIT(OF_PHANDLE_CACHE_BITS)
0140
0141 static struct device_node *phandle_cache[OF_PHANDLE_CACHE_SZ];
0142
0143 static u32 of_phandle_cache_hash(phandle handle)
0144 {
0145 return hash_32(handle, OF_PHANDLE_CACHE_BITS);
0146 }
0147
0148
0149
0150
0151 void __of_phandle_cache_inv_entry(phandle handle)
0152 {
0153 u32 handle_hash;
0154 struct device_node *np;
0155
0156 if (!handle)
0157 return;
0158
0159 handle_hash = of_phandle_cache_hash(handle);
0160
0161 np = phandle_cache[handle_hash];
0162 if (np && handle == np->phandle)
0163 phandle_cache[handle_hash] = NULL;
0164 }
0165
0166 void __init of_core_init(void)
0167 {
0168 struct device_node *np;
0169
0170
0171
0172 mutex_lock(&of_mutex);
0173 of_kset = kset_create_and_add("devicetree", NULL, firmware_kobj);
0174 if (!of_kset) {
0175 mutex_unlock(&of_mutex);
0176 pr_err("failed to register existing nodes\n");
0177 return;
0178 }
0179 for_each_of_allnodes(np) {
0180 __of_attach_node_sysfs(np);
0181 if (np->phandle && !phandle_cache[of_phandle_cache_hash(np->phandle)])
0182 phandle_cache[of_phandle_cache_hash(np->phandle)] = np;
0183 }
0184 mutex_unlock(&of_mutex);
0185
0186
0187 if (of_root)
0188 proc_symlink("device-tree", NULL, "/sys/firmware/devicetree/base");
0189 }
0190
0191 static struct property *__of_find_property(const struct device_node *np,
0192 const char *name, int *lenp)
0193 {
0194 struct property *pp;
0195
0196 if (!np)
0197 return NULL;
0198
0199 for (pp = np->properties; pp; pp = pp->next) {
0200 if (of_prop_cmp(pp->name, name) == 0) {
0201 if (lenp)
0202 *lenp = pp->length;
0203 break;
0204 }
0205 }
0206
0207 return pp;
0208 }
0209
0210 struct property *of_find_property(const struct device_node *np,
0211 const char *name,
0212 int *lenp)
0213 {
0214 struct property *pp;
0215 unsigned long flags;
0216
0217 raw_spin_lock_irqsave(&devtree_lock, flags);
0218 pp = __of_find_property(np, name, lenp);
0219 raw_spin_unlock_irqrestore(&devtree_lock, flags);
0220
0221 return pp;
0222 }
0223 EXPORT_SYMBOL(of_find_property);
0224
0225 struct device_node *__of_find_all_nodes(struct device_node *prev)
0226 {
0227 struct device_node *np;
0228 if (!prev) {
0229 np = of_root;
0230 } else if (prev->child) {
0231 np = prev->child;
0232 } else {
0233
0234 np = prev;
0235 while (np->parent && !np->sibling)
0236 np = np->parent;
0237 np = np->sibling;
0238 }
0239 return np;
0240 }
0241
0242
0243
0244
0245
0246
0247
0248
0249
0250 struct device_node *of_find_all_nodes(struct device_node *prev)
0251 {
0252 struct device_node *np;
0253 unsigned long flags;
0254
0255 raw_spin_lock_irqsave(&devtree_lock, flags);
0256 np = __of_find_all_nodes(prev);
0257 of_node_get(np);
0258 of_node_put(prev);
0259 raw_spin_unlock_irqrestore(&devtree_lock, flags);
0260 return np;
0261 }
0262 EXPORT_SYMBOL(of_find_all_nodes);
0263
0264
0265
0266
0267
0268 const void *__of_get_property(const struct device_node *np,
0269 const char *name, int *lenp)
0270 {
0271 struct property *pp = __of_find_property(np, name, lenp);
0272
0273 return pp ? pp->value : NULL;
0274 }
0275
0276
0277
0278
0279
0280 const void *of_get_property(const struct device_node *np, const char *name,
0281 int *lenp)
0282 {
0283 struct property *pp = of_find_property(np, name, lenp);
0284
0285 return pp ? pp->value : NULL;
0286 }
0287 EXPORT_SYMBOL(of_get_property);
0288
0289
0290
0291
0292
0293
0294
0295
0296
0297 u64 of_get_cpu_hwid(struct device_node *cpun, unsigned int thread)
0298 {
0299 const __be32 *cell;
0300 int ac, len;
0301
0302 ac = of_n_addr_cells(cpun);
0303 cell = of_get_property(cpun, "reg", &len);
0304 if (!cell || !ac || ((sizeof(*cell) * ac * (thread + 1)) > len))
0305 return ~0ULL;
0306
0307 cell += ac * thread;
0308 return of_read_number(cell, ac);
0309 }
0310
0311
0312
0313
0314
0315
0316
0317
0318
0319
0320
0321
0322
0323
0324
0325 bool __weak arch_match_cpu_phys_id(int cpu, u64 phys_id)
0326 {
0327 return (u32)phys_id == cpu;
0328 }
0329
0330
0331
0332
0333
0334
0335 static bool __of_find_n_match_cpu_property(struct device_node *cpun,
0336 const char *prop_name, int cpu, unsigned int *thread)
0337 {
0338 const __be32 *cell;
0339 int ac, prop_len, tid;
0340 u64 hwid;
0341
0342 ac = of_n_addr_cells(cpun);
0343 cell = of_get_property(cpun, prop_name, &prop_len);
0344 if (!cell && !ac && arch_match_cpu_phys_id(cpu, 0))
0345 return true;
0346 if (!cell || !ac)
0347 return false;
0348 prop_len /= sizeof(*cell) * ac;
0349 for (tid = 0; tid < prop_len; tid++) {
0350 hwid = of_read_number(cell, ac);
0351 if (arch_match_cpu_phys_id(cpu, hwid)) {
0352 if (thread)
0353 *thread = tid;
0354 return true;
0355 }
0356 cell += ac;
0357 }
0358 return false;
0359 }
0360
0361
0362
0363
0364
0365
0366
0367 bool __weak arch_find_n_match_cpu_physical_id(struct device_node *cpun,
0368 int cpu, unsigned int *thread)
0369 {
0370
0371
0372
0373
0374 if (IS_ENABLED(CONFIG_PPC) &&
0375 __of_find_n_match_cpu_property(cpun,
0376 "ibm,ppc-interrupt-server#s",
0377 cpu, thread))
0378 return true;
0379
0380 return __of_find_n_match_cpu_property(cpun, "reg", cpu, thread);
0381 }
0382
0383
0384
0385
0386
0387
0388
0389
0390
0391
0392
0393
0394
0395
0396
0397
0398
0399
0400
0401
0402 struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
0403 {
0404 struct device_node *cpun;
0405
0406 for_each_of_cpu_node(cpun) {
0407 if (arch_find_n_match_cpu_physical_id(cpun, cpu, thread))
0408 return cpun;
0409 }
0410 return NULL;
0411 }
0412 EXPORT_SYMBOL(of_get_cpu_node);
0413
0414
0415
0416
0417
0418
0419
0420
0421
0422 int of_cpu_node_to_id(struct device_node *cpu_node)
0423 {
0424 int cpu;
0425 bool found = false;
0426 struct device_node *np;
0427
0428 for_each_possible_cpu(cpu) {
0429 np = of_cpu_device_node_get(cpu);
0430 found = (cpu_node == np);
0431 of_node_put(np);
0432 if (found)
0433 return cpu;
0434 }
0435
0436 return -ENODEV;
0437 }
0438 EXPORT_SYMBOL(of_cpu_node_to_id);
0439
0440
0441
0442
0443
0444
0445
0446
0447
0448
0449
0450
0451
0452
0453
0454
0455 struct device_node *of_get_cpu_state_node(struct device_node *cpu_node,
0456 int index)
0457 {
0458 struct of_phandle_args args;
0459 int err;
0460
0461 err = of_parse_phandle_with_args(cpu_node, "power-domains",
0462 "#power-domain-cells", 0, &args);
0463 if (!err) {
0464 struct device_node *state_node =
0465 of_parse_phandle(args.np, "domain-idle-states", index);
0466
0467 of_node_put(args.np);
0468 if (state_node)
0469 return state_node;
0470 }
0471
0472 return of_parse_phandle(cpu_node, "cpu-idle-states", index);
0473 }
0474 EXPORT_SYMBOL(of_get_cpu_state_node);
0475
0476
0477
0478
0479
0480
0481
0482
0483
0484
0485
0486
0487
0488
0489
0490
0491
0492
0493
0494
0495
0496
0497
0498
0499
0500
0501
0502
0503
0504
0505
0506 static int __of_device_is_compatible(const struct device_node *device,
0507 const char *compat, const char *type, const char *name)
0508 {
0509 struct property *prop;
0510 const char *cp;
0511 int index = 0, score = 0;
0512
0513
0514 if (compat && compat[0]) {
0515 prop = __of_find_property(device, "compatible", NULL);
0516 for (cp = of_prop_next_string(prop, NULL); cp;
0517 cp = of_prop_next_string(prop, cp), index++) {
0518 if (of_compat_cmp(cp, compat, strlen(compat)) == 0) {
0519 score = INT_MAX/2 - (index << 2);
0520 break;
0521 }
0522 }
0523 if (!score)
0524 return 0;
0525 }
0526
0527
0528 if (type && type[0]) {
0529 if (!__of_node_is_type(device, type))
0530 return 0;
0531 score += 2;
0532 }
0533
0534
0535 if (name && name[0]) {
0536 if (!of_node_name_eq(device, name))
0537 return 0;
0538 score++;
0539 }
0540
0541 return score;
0542 }
0543
0544
0545
0546
0547 int of_device_is_compatible(const struct device_node *device,
0548 const char *compat)
0549 {
0550 unsigned long flags;
0551 int res;
0552
0553 raw_spin_lock_irqsave(&devtree_lock, flags);
0554 res = __of_device_is_compatible(device, compat, NULL, NULL);
0555 raw_spin_unlock_irqrestore(&devtree_lock, flags);
0556 return res;
0557 }
0558 EXPORT_SYMBOL(of_device_is_compatible);
0559
0560
0561
0562
0563
0564 int of_device_compatible_match(struct device_node *device,
0565 const char *const *compat)
0566 {
0567 unsigned int tmp, score = 0;
0568
0569 if (!compat)
0570 return 0;
0571
0572 while (*compat) {
0573 tmp = of_device_is_compatible(device, *compat);
0574 if (tmp > score)
0575 score = tmp;
0576 compat++;
0577 }
0578
0579 return score;
0580 }
0581
0582
0583
0584
0585
0586
0587
0588
0589 int of_machine_is_compatible(const char *compat)
0590 {
0591 struct device_node *root;
0592 int rc = 0;
0593
0594 root = of_find_node_by_path("/");
0595 if (root) {
0596 rc = of_device_is_compatible(root, compat);
0597 of_node_put(root);
0598 }
0599 return rc;
0600 }
0601 EXPORT_SYMBOL(of_machine_is_compatible);
0602
0603
0604
0605
0606
0607
0608
0609
0610
0611 static bool __of_device_is_available(const struct device_node *device)
0612 {
0613 const char *status;
0614 int statlen;
0615
0616 if (!device)
0617 return false;
0618
0619 status = __of_get_property(device, "status", &statlen);
0620 if (status == NULL)
0621 return true;
0622
0623 if (statlen > 0) {
0624 if (!strcmp(status, "okay") || !strcmp(status, "ok"))
0625 return true;
0626 }
0627
0628 return false;
0629 }
0630
0631
0632
0633
0634
0635
0636
0637
0638
0639 bool of_device_is_available(const struct device_node *device)
0640 {
0641 unsigned long flags;
0642 bool res;
0643
0644 raw_spin_lock_irqsave(&devtree_lock, flags);
0645 res = __of_device_is_available(device);
0646 raw_spin_unlock_irqrestore(&devtree_lock, flags);
0647 return res;
0648
0649 }
0650 EXPORT_SYMBOL(of_device_is_available);
0651
0652
0653
0654
0655
0656
0657
0658
0659
0660 static bool __of_device_is_fail(const struct device_node *device)
0661 {
0662 const char *status;
0663
0664 if (!device)
0665 return false;
0666
0667 status = __of_get_property(device, "status", NULL);
0668 if (status == NULL)
0669 return false;
0670
0671 return !strcmp(status, "fail") || !strncmp(status, "fail-", 5);
0672 }
0673
0674
0675
0676
0677
0678
0679
0680
0681
0682
0683
0684
0685
0686 bool of_device_is_big_endian(const struct device_node *device)
0687 {
0688 if (of_property_read_bool(device, "big-endian"))
0689 return true;
0690 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) &&
0691 of_property_read_bool(device, "native-endian"))
0692 return true;
0693 return false;
0694 }
0695 EXPORT_SYMBOL(of_device_is_big_endian);
0696
0697
0698
0699
0700
0701
0702
0703
0704 struct device_node *of_get_parent(const struct device_node *node)
0705 {
0706 struct device_node *np;
0707 unsigned long flags;
0708
0709 if (!node)
0710 return NULL;
0711
0712 raw_spin_lock_irqsave(&devtree_lock, flags);
0713 np = of_node_get(node->parent);
0714 raw_spin_unlock_irqrestore(&devtree_lock, flags);
0715 return np;
0716 }
0717 EXPORT_SYMBOL(of_get_parent);
0718
0719
0720
0721
0722
0723
0724
0725
0726
0727
0728
0729
0730 struct device_node *of_get_next_parent(struct device_node *node)
0731 {
0732 struct device_node *parent;
0733 unsigned long flags;
0734
0735 if (!node)
0736 return NULL;
0737
0738 raw_spin_lock_irqsave(&devtree_lock, flags);
0739 parent = of_node_get(node->parent);
0740 of_node_put(node);
0741 raw_spin_unlock_irqrestore(&devtree_lock, flags);
0742 return parent;
0743 }
0744 EXPORT_SYMBOL(of_get_next_parent);
0745
0746 static struct device_node *__of_get_next_child(const struct device_node *node,
0747 struct device_node *prev)
0748 {
0749 struct device_node *next;
0750
0751 if (!node)
0752 return NULL;
0753
0754 next = prev ? prev->sibling : node->child;
0755 of_node_get(next);
0756 of_node_put(prev);
0757 return next;
0758 }
0759 #define __for_each_child_of_node(parent, child) \
0760 for (child = __of_get_next_child(parent, NULL); child != NULL; \
0761 child = __of_get_next_child(parent, child))
0762
0763
0764
0765
0766
0767
0768
0769
0770
0771
0772 struct device_node *of_get_next_child(const struct device_node *node,
0773 struct device_node *prev)
0774 {
0775 struct device_node *next;
0776 unsigned long flags;
0777
0778 raw_spin_lock_irqsave(&devtree_lock, flags);
0779 next = __of_get_next_child(node, prev);
0780 raw_spin_unlock_irqrestore(&devtree_lock, flags);
0781 return next;
0782 }
0783 EXPORT_SYMBOL(of_get_next_child);
0784
0785
0786
0787
0788
0789
0790
0791
0792
0793 struct device_node *of_get_next_available_child(const struct device_node *node,
0794 struct device_node *prev)
0795 {
0796 struct device_node *next;
0797 unsigned long flags;
0798
0799 if (!node)
0800 return NULL;
0801
0802 raw_spin_lock_irqsave(&devtree_lock, flags);
0803 next = prev ? prev->sibling : node->child;
0804 for (; next; next = next->sibling) {
0805 if (!__of_device_is_available(next))
0806 continue;
0807 if (of_node_get(next))
0808 break;
0809 }
0810 of_node_put(prev);
0811 raw_spin_unlock_irqrestore(&devtree_lock, flags);
0812 return next;
0813 }
0814 EXPORT_SYMBOL(of_get_next_available_child);
0815
0816
0817
0818
0819
0820
0821
0822
0823
0824
0825
0826
0827 struct device_node *of_get_next_cpu_node(struct device_node *prev)
0828 {
0829 struct device_node *next = NULL;
0830 unsigned long flags;
0831 struct device_node *node;
0832
0833 if (!prev)
0834 node = of_find_node_by_path("/cpus");
0835
0836 raw_spin_lock_irqsave(&devtree_lock, flags);
0837 if (prev)
0838 next = prev->sibling;
0839 else if (node) {
0840 next = node->child;
0841 of_node_put(node);
0842 }
0843 for (; next; next = next->sibling) {
0844 if (__of_device_is_fail(next))
0845 continue;
0846 if (!(of_node_name_eq(next, "cpu") ||
0847 __of_node_is_type(next, "cpu")))
0848 continue;
0849 if (of_node_get(next))
0850 break;
0851 }
0852 of_node_put(prev);
0853 raw_spin_unlock_irqrestore(&devtree_lock, flags);
0854 return next;
0855 }
0856 EXPORT_SYMBOL(of_get_next_cpu_node);
0857
0858
0859
0860
0861
0862
0863
0864
0865
0866
0867
0868
0869 struct device_node *of_get_compatible_child(const struct device_node *parent,
0870 const char *compatible)
0871 {
0872 struct device_node *child;
0873
0874 for_each_child_of_node(parent, child) {
0875 if (of_device_is_compatible(child, compatible))
0876 break;
0877 }
0878
0879 return child;
0880 }
0881 EXPORT_SYMBOL(of_get_compatible_child);
0882
0883
0884
0885
0886
0887
0888
0889
0890
0891
0892
0893
0894 struct device_node *of_get_child_by_name(const struct device_node *node,
0895 const char *name)
0896 {
0897 struct device_node *child;
0898
0899 for_each_child_of_node(node, child)
0900 if (of_node_name_eq(child, name))
0901 break;
0902 return child;
0903 }
0904 EXPORT_SYMBOL(of_get_child_by_name);
0905
0906 struct device_node *__of_find_node_by_path(struct device_node *parent,
0907 const char *path)
0908 {
0909 struct device_node *child;
0910 int len;
0911
0912 len = strcspn(path, "/:");
0913 if (!len)
0914 return NULL;
0915
0916 __for_each_child_of_node(parent, child) {
0917 const char *name = kbasename(child->full_name);
0918 if (strncmp(path, name, len) == 0 && (strlen(name) == len))
0919 return child;
0920 }
0921 return NULL;
0922 }
0923
0924 struct device_node *__of_find_node_by_full_path(struct device_node *node,
0925 const char *path)
0926 {
0927 const char *separator = strchr(path, ':');
0928
0929 while (node && *path == '/') {
0930 struct device_node *tmp = node;
0931
0932 path++;
0933 node = __of_find_node_by_path(node, path);
0934 of_node_put(tmp);
0935 path = strchrnul(path, '/');
0936 if (separator && separator < path)
0937 break;
0938 }
0939 return node;
0940 }
0941
0942
0943
0944
0945
0946
0947
0948
0949
0950
0951
0952
0953
0954
0955
0956
0957
0958
0959
0960 struct device_node *of_find_node_opts_by_path(const char *path, const char **opts)
0961 {
0962 struct device_node *np = NULL;
0963 struct property *pp;
0964 unsigned long flags;
0965 const char *separator = strchr(path, ':');
0966
0967 if (opts)
0968 *opts = separator ? separator + 1 : NULL;
0969
0970 if (strcmp(path, "/") == 0)
0971 return of_node_get(of_root);
0972
0973
0974 if (*path != '/') {
0975 int len;
0976 const char *p = separator;
0977
0978 if (!p)
0979 p = strchrnul(path, '/');
0980 len = p - path;
0981
0982
0983 if (!of_aliases)
0984 return NULL;
0985
0986 for_each_property_of_node(of_aliases, pp) {
0987 if (strlen(pp->name) == len && !strncmp(pp->name, path, len)) {
0988 np = of_find_node_by_path(pp->value);
0989 break;
0990 }
0991 }
0992 if (!np)
0993 return NULL;
0994 path = p;
0995 }
0996
0997
0998 raw_spin_lock_irqsave(&devtree_lock, flags);
0999 if (!np)
1000 np = of_node_get(of_root);
1001 np = __of_find_node_by_full_path(np, path);
1002 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1003 return np;
1004 }
1005 EXPORT_SYMBOL(of_find_node_opts_by_path);
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018 struct device_node *of_find_node_by_name(struct device_node *from,
1019 const char *name)
1020 {
1021 struct device_node *np;
1022 unsigned long flags;
1023
1024 raw_spin_lock_irqsave(&devtree_lock, flags);
1025 for_each_of_allnodes_from(from, np)
1026 if (of_node_name_eq(np, name) && of_node_get(np))
1027 break;
1028 of_node_put(from);
1029 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1030 return np;
1031 }
1032 EXPORT_SYMBOL(of_find_node_by_name);
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046 struct device_node *of_find_node_by_type(struct device_node *from,
1047 const char *type)
1048 {
1049 struct device_node *np;
1050 unsigned long flags;
1051
1052 raw_spin_lock_irqsave(&devtree_lock, flags);
1053 for_each_of_allnodes_from(from, np)
1054 if (__of_node_is_type(np, type) && of_node_get(np))
1055 break;
1056 of_node_put(from);
1057 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1058 return np;
1059 }
1060 EXPORT_SYMBOL(of_find_node_by_type);
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076 struct device_node *of_find_compatible_node(struct device_node *from,
1077 const char *type, const char *compatible)
1078 {
1079 struct device_node *np;
1080 unsigned long flags;
1081
1082 raw_spin_lock_irqsave(&devtree_lock, flags);
1083 for_each_of_allnodes_from(from, np)
1084 if (__of_device_is_compatible(np, compatible, type, NULL) &&
1085 of_node_get(np))
1086 break;
1087 of_node_put(from);
1088 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1089 return np;
1090 }
1091 EXPORT_SYMBOL(of_find_compatible_node);
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105 struct device_node *of_find_node_with_property(struct device_node *from,
1106 const char *prop_name)
1107 {
1108 struct device_node *np;
1109 struct property *pp;
1110 unsigned long flags;
1111
1112 raw_spin_lock_irqsave(&devtree_lock, flags);
1113 for_each_of_allnodes_from(from, np) {
1114 for (pp = np->properties; pp; pp = pp->next) {
1115 if (of_prop_cmp(pp->name, prop_name) == 0) {
1116 of_node_get(np);
1117 goto out;
1118 }
1119 }
1120 }
1121 out:
1122 of_node_put(from);
1123 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1124 return np;
1125 }
1126 EXPORT_SYMBOL(of_find_node_with_property);
1127
1128 static
1129 const struct of_device_id *__of_match_node(const struct of_device_id *matches,
1130 const struct device_node *node)
1131 {
1132 const struct of_device_id *best_match = NULL;
1133 int score, best_score = 0;
1134
1135 if (!matches)
1136 return NULL;
1137
1138 for (; matches->name[0] || matches->type[0] || matches->compatible[0]; matches++) {
1139 score = __of_device_is_compatible(node, matches->compatible,
1140 matches->type, matches->name);
1141 if (score > best_score) {
1142 best_match = matches;
1143 best_score = score;
1144 }
1145 }
1146
1147 return best_match;
1148 }
1149
1150
1151
1152
1153
1154
1155
1156
1157 const struct of_device_id *of_match_node(const struct of_device_id *matches,
1158 const struct device_node *node)
1159 {
1160 const struct of_device_id *match;
1161 unsigned long flags;
1162
1163 raw_spin_lock_irqsave(&devtree_lock, flags);
1164 match = __of_match_node(matches, node);
1165 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1166 return match;
1167 }
1168 EXPORT_SYMBOL(of_match_node);
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183 struct device_node *of_find_matching_node_and_match(struct device_node *from,
1184 const struct of_device_id *matches,
1185 const struct of_device_id **match)
1186 {
1187 struct device_node *np;
1188 const struct of_device_id *m;
1189 unsigned long flags;
1190
1191 if (match)
1192 *match = NULL;
1193
1194 raw_spin_lock_irqsave(&devtree_lock, flags);
1195 for_each_of_allnodes_from(from, np) {
1196 m = __of_match_node(matches, np);
1197 if (m && of_node_get(np)) {
1198 if (match)
1199 *match = m;
1200 break;
1201 }
1202 }
1203 of_node_put(from);
1204 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1205 return np;
1206 }
1207 EXPORT_SYMBOL(of_find_matching_node_and_match);
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222 int of_modalias_node(struct device_node *node, char *modalias, int len)
1223 {
1224 const char *compatible, *p;
1225 int cplen;
1226
1227 compatible = of_get_property(node, "compatible", &cplen);
1228 if (!compatible || strlen(compatible) > cplen)
1229 return -ENODEV;
1230 p = strchr(compatible, ',');
1231 strlcpy(modalias, p ? p + 1 : compatible, len);
1232 return 0;
1233 }
1234 EXPORT_SYMBOL_GPL(of_modalias_node);
1235
1236
1237
1238
1239
1240
1241
1242
1243 struct device_node *of_find_node_by_phandle(phandle handle)
1244 {
1245 struct device_node *np = NULL;
1246 unsigned long flags;
1247 u32 handle_hash;
1248
1249 if (!handle)
1250 return NULL;
1251
1252 handle_hash = of_phandle_cache_hash(handle);
1253
1254 raw_spin_lock_irqsave(&devtree_lock, flags);
1255
1256 if (phandle_cache[handle_hash] &&
1257 handle == phandle_cache[handle_hash]->phandle)
1258 np = phandle_cache[handle_hash];
1259
1260 if (!np) {
1261 for_each_of_allnodes(np)
1262 if (np->phandle == handle &&
1263 !of_node_check_flag(np, OF_DETACHED)) {
1264 phandle_cache[handle_hash] = np;
1265 break;
1266 }
1267 }
1268
1269 of_node_get(np);
1270 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1271 return np;
1272 }
1273 EXPORT_SYMBOL(of_find_node_by_phandle);
1274
1275 void of_print_phandle_args(const char *msg, const struct of_phandle_args *args)
1276 {
1277 int i;
1278 printk("%s %pOF", msg, args->np);
1279 for (i = 0; i < args->args_count; i++) {
1280 const char delim = i ? ',' : ':';
1281
1282 pr_cont("%c%08x", delim, args->args[i]);
1283 }
1284 pr_cont("\n");
1285 }
1286
1287 int of_phandle_iterator_init(struct of_phandle_iterator *it,
1288 const struct device_node *np,
1289 const char *list_name,
1290 const char *cells_name,
1291 int cell_count)
1292 {
1293 const __be32 *list;
1294 int size;
1295
1296 memset(it, 0, sizeof(*it));
1297
1298
1299
1300
1301
1302 if (cell_count < 0 && !cells_name)
1303 return -EINVAL;
1304
1305 list = of_get_property(np, list_name, &size);
1306 if (!list)
1307 return -ENOENT;
1308
1309 it->cells_name = cells_name;
1310 it->cell_count = cell_count;
1311 it->parent = np;
1312 it->list_end = list + size / sizeof(*list);
1313 it->phandle_end = list;
1314 it->cur = list;
1315
1316 return 0;
1317 }
1318 EXPORT_SYMBOL_GPL(of_phandle_iterator_init);
1319
1320 int of_phandle_iterator_next(struct of_phandle_iterator *it)
1321 {
1322 uint32_t count = 0;
1323
1324 if (it->node) {
1325 of_node_put(it->node);
1326 it->node = NULL;
1327 }
1328
1329 if (!it->cur || it->phandle_end >= it->list_end)
1330 return -ENOENT;
1331
1332 it->cur = it->phandle_end;
1333
1334
1335 it->phandle = be32_to_cpup(it->cur++);
1336
1337 if (it->phandle) {
1338
1339
1340
1341
1342
1343 it->node = of_find_node_by_phandle(it->phandle);
1344
1345 if (it->cells_name) {
1346 if (!it->node) {
1347 pr_err("%pOF: could not find phandle %d\n",
1348 it->parent, it->phandle);
1349 goto err;
1350 }
1351
1352 if (of_property_read_u32(it->node, it->cells_name,
1353 &count)) {
1354
1355
1356
1357
1358
1359 if (it->cell_count >= 0) {
1360 count = it->cell_count;
1361 } else {
1362 pr_err("%pOF: could not get %s for %pOF\n",
1363 it->parent,
1364 it->cells_name,
1365 it->node);
1366 goto err;
1367 }
1368 }
1369 } else {
1370 count = it->cell_count;
1371 }
1372
1373
1374
1375
1376
1377 if (it->cur + count > it->list_end) {
1378 if (it->cells_name)
1379 pr_err("%pOF: %s = %d found %td\n",
1380 it->parent, it->cells_name,
1381 count, it->list_end - it->cur);
1382 else
1383 pr_err("%pOF: phandle %s needs %d, found %td\n",
1384 it->parent, of_node_full_name(it->node),
1385 count, it->list_end - it->cur);
1386 goto err;
1387 }
1388 }
1389
1390 it->phandle_end = it->cur + count;
1391 it->cur_count = count;
1392
1393 return 0;
1394
1395 err:
1396 if (it->node) {
1397 of_node_put(it->node);
1398 it->node = NULL;
1399 }
1400
1401 return -EINVAL;
1402 }
1403 EXPORT_SYMBOL_GPL(of_phandle_iterator_next);
1404
1405 int of_phandle_iterator_args(struct of_phandle_iterator *it,
1406 uint32_t *args,
1407 int size)
1408 {
1409 int i, count;
1410
1411 count = it->cur_count;
1412
1413 if (WARN_ON(size < count))
1414 count = size;
1415
1416 for (i = 0; i < count; i++)
1417 args[i] = be32_to_cpup(it->cur++);
1418
1419 return count;
1420 }
1421
1422 int __of_parse_phandle_with_args(const struct device_node *np,
1423 const char *list_name,
1424 const char *cells_name,
1425 int cell_count, int index,
1426 struct of_phandle_args *out_args)
1427 {
1428 struct of_phandle_iterator it;
1429 int rc, cur_index = 0;
1430
1431 if (index < 0)
1432 return -EINVAL;
1433
1434
1435 of_for_each_phandle(&it, rc, np, list_name, cells_name, cell_count) {
1436
1437
1438
1439
1440
1441
1442 rc = -ENOENT;
1443 if (cur_index == index) {
1444 if (!it.phandle)
1445 goto err;
1446
1447 if (out_args) {
1448 int c;
1449
1450 c = of_phandle_iterator_args(&it,
1451 out_args->args,
1452 MAX_PHANDLE_ARGS);
1453 out_args->np = it.node;
1454 out_args->args_count = c;
1455 } else {
1456 of_node_put(it.node);
1457 }
1458
1459
1460 return 0;
1461 }
1462
1463 cur_index++;
1464 }
1465
1466
1467
1468
1469
1470
1471
1472 err:
1473 of_node_put(it.node);
1474 return rc;
1475 }
1476 EXPORT_SYMBOL(__of_parse_phandle_with_args);
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520 int of_parse_phandle_with_args_map(const struct device_node *np,
1521 const char *list_name,
1522 const char *stem_name,
1523 int index, struct of_phandle_args *out_args)
1524 {
1525 char *cells_name, *map_name = NULL, *mask_name = NULL;
1526 char *pass_name = NULL;
1527 struct device_node *cur, *new = NULL;
1528 const __be32 *map, *mask, *pass;
1529 static const __be32 dummy_mask[] = { [0 ... MAX_PHANDLE_ARGS] = ~0 };
1530 static const __be32 dummy_pass[] = { [0 ... MAX_PHANDLE_ARGS] = 0 };
1531 __be32 initial_match_array[MAX_PHANDLE_ARGS];
1532 const __be32 *match_array = initial_match_array;
1533 int i, ret, map_len, match;
1534 u32 list_size, new_size;
1535
1536 if (index < 0)
1537 return -EINVAL;
1538
1539 cells_name = kasprintf(GFP_KERNEL, "#%s-cells", stem_name);
1540 if (!cells_name)
1541 return -ENOMEM;
1542
1543 ret = -ENOMEM;
1544 map_name = kasprintf(GFP_KERNEL, "%s-map", stem_name);
1545 if (!map_name)
1546 goto free;
1547
1548 mask_name = kasprintf(GFP_KERNEL, "%s-map-mask", stem_name);
1549 if (!mask_name)
1550 goto free;
1551
1552 pass_name = kasprintf(GFP_KERNEL, "%s-map-pass-thru", stem_name);
1553 if (!pass_name)
1554 goto free;
1555
1556 ret = __of_parse_phandle_with_args(np, list_name, cells_name, -1, index,
1557 out_args);
1558 if (ret)
1559 goto free;
1560
1561
1562 cur = out_args->np;
1563 ret = of_property_read_u32(cur, cells_name, &list_size);
1564 if (ret < 0)
1565 goto put;
1566
1567
1568 for (i = 0; i < list_size; i++)
1569 initial_match_array[i] = cpu_to_be32(out_args->args[i]);
1570
1571 ret = -EINVAL;
1572 while (cur) {
1573
1574 map = of_get_property(cur, map_name, &map_len);
1575 if (!map) {
1576 ret = 0;
1577 goto free;
1578 }
1579 map_len /= sizeof(u32);
1580
1581
1582 mask = of_get_property(cur, mask_name, NULL);
1583 if (!mask)
1584 mask = dummy_mask;
1585
1586 match = 0;
1587 while (map_len > (list_size + 1) && !match) {
1588
1589 match = 1;
1590 for (i = 0; i < list_size; i++, map_len--)
1591 match &= !((match_array[i] ^ *map++) & mask[i]);
1592
1593 of_node_put(new);
1594 new = of_find_node_by_phandle(be32_to_cpup(map));
1595 map++;
1596 map_len--;
1597
1598
1599 if (!new)
1600 goto put;
1601
1602 if (!of_device_is_available(new))
1603 match = 0;
1604
1605 ret = of_property_read_u32(new, cells_name, &new_size);
1606 if (ret)
1607 goto put;
1608
1609
1610 if (WARN_ON(new_size > MAX_PHANDLE_ARGS))
1611 goto put;
1612 if (map_len < new_size)
1613 goto put;
1614
1615
1616 map += new_size;
1617 map_len -= new_size;
1618 }
1619 if (!match)
1620 goto put;
1621
1622
1623 pass = of_get_property(cur, pass_name, NULL);
1624 if (!pass)
1625 pass = dummy_pass;
1626
1627
1628
1629
1630
1631
1632 match_array = map - new_size;
1633 for (i = 0; i < new_size; i++) {
1634 __be32 val = *(map - new_size + i);
1635
1636 if (i < list_size) {
1637 val &= ~pass[i];
1638 val |= cpu_to_be32(out_args->args[i]) & pass[i];
1639 }
1640
1641 out_args->args[i] = be32_to_cpu(val);
1642 }
1643 out_args->args_count = list_size = new_size;
1644
1645 out_args->np = new;
1646 of_node_put(cur);
1647 cur = new;
1648 }
1649 put:
1650 of_node_put(cur);
1651 of_node_put(new);
1652 free:
1653 kfree(mask_name);
1654 kfree(map_name);
1655 kfree(cells_name);
1656 kfree(pass_name);
1657
1658 return ret;
1659 }
1660 EXPORT_SYMBOL(of_parse_phandle_with_args_map);
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677 int of_count_phandle_with_args(const struct device_node *np, const char *list_name,
1678 const char *cells_name)
1679 {
1680 struct of_phandle_iterator it;
1681 int rc, cur_index = 0;
1682
1683
1684
1685
1686
1687
1688
1689 if (!cells_name) {
1690 const __be32 *list;
1691 int size;
1692
1693 list = of_get_property(np, list_name, &size);
1694 if (!list)
1695 return -ENOENT;
1696
1697 return size / sizeof(*list);
1698 }
1699
1700 rc = of_phandle_iterator_init(&it, np, list_name, cells_name, -1);
1701 if (rc)
1702 return rc;
1703
1704 while ((rc = of_phandle_iterator_next(&it)) == 0)
1705 cur_index += 1;
1706
1707 if (rc != -ENOENT)
1708 return rc;
1709
1710 return cur_index;
1711 }
1712 EXPORT_SYMBOL(of_count_phandle_with_args);
1713
1714
1715
1716
1717
1718
1719 int __of_add_property(struct device_node *np, struct property *prop)
1720 {
1721 struct property **next;
1722
1723 prop->next = NULL;
1724 next = &np->properties;
1725 while (*next) {
1726 if (strcmp(prop->name, (*next)->name) == 0)
1727
1728 return -EEXIST;
1729
1730 next = &(*next)->next;
1731 }
1732 *next = prop;
1733
1734 return 0;
1735 }
1736
1737
1738
1739
1740
1741
1742 int of_add_property(struct device_node *np, struct property *prop)
1743 {
1744 unsigned long flags;
1745 int rc;
1746
1747 mutex_lock(&of_mutex);
1748
1749 raw_spin_lock_irqsave(&devtree_lock, flags);
1750 rc = __of_add_property(np, prop);
1751 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1752
1753 if (!rc)
1754 __of_add_property_sysfs(np, prop);
1755
1756 mutex_unlock(&of_mutex);
1757
1758 if (!rc)
1759 of_property_notify(OF_RECONFIG_ADD_PROPERTY, np, prop, NULL);
1760
1761 return rc;
1762 }
1763 EXPORT_SYMBOL_GPL(of_add_property);
1764
1765 int __of_remove_property(struct device_node *np, struct property *prop)
1766 {
1767 struct property **next;
1768
1769 for (next = &np->properties; *next; next = &(*next)->next) {
1770 if (*next == prop)
1771 break;
1772 }
1773 if (*next == NULL)
1774 return -ENODEV;
1775
1776
1777 *next = prop->next;
1778 prop->next = np->deadprops;
1779 np->deadprops = prop;
1780
1781 return 0;
1782 }
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794 int of_remove_property(struct device_node *np, struct property *prop)
1795 {
1796 unsigned long flags;
1797 int rc;
1798
1799 if (!prop)
1800 return -ENODEV;
1801
1802 mutex_lock(&of_mutex);
1803
1804 raw_spin_lock_irqsave(&devtree_lock, flags);
1805 rc = __of_remove_property(np, prop);
1806 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1807
1808 if (!rc)
1809 __of_remove_property_sysfs(np, prop);
1810
1811 mutex_unlock(&of_mutex);
1812
1813 if (!rc)
1814 of_property_notify(OF_RECONFIG_REMOVE_PROPERTY, np, prop, NULL);
1815
1816 return rc;
1817 }
1818 EXPORT_SYMBOL_GPL(of_remove_property);
1819
1820 int __of_update_property(struct device_node *np, struct property *newprop,
1821 struct property **oldpropp)
1822 {
1823 struct property **next, *oldprop;
1824
1825 for (next = &np->properties; *next; next = &(*next)->next) {
1826 if (of_prop_cmp((*next)->name, newprop->name) == 0)
1827 break;
1828 }
1829 *oldpropp = oldprop = *next;
1830
1831 if (oldprop) {
1832
1833 newprop->next = oldprop->next;
1834 *next = newprop;
1835 oldprop->next = np->deadprops;
1836 np->deadprops = oldprop;
1837 } else {
1838
1839 newprop->next = NULL;
1840 *next = newprop;
1841 }
1842
1843 return 0;
1844 }
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855 int of_update_property(struct device_node *np, struct property *newprop)
1856 {
1857 struct property *oldprop;
1858 unsigned long flags;
1859 int rc;
1860
1861 if (!newprop->name)
1862 return -EINVAL;
1863
1864 mutex_lock(&of_mutex);
1865
1866 raw_spin_lock_irqsave(&devtree_lock, flags);
1867 rc = __of_update_property(np, newprop, &oldprop);
1868 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1869
1870 if (!rc)
1871 __of_update_property_sysfs(np, newprop, oldprop);
1872
1873 mutex_unlock(&of_mutex);
1874
1875 if (!rc)
1876 of_property_notify(OF_RECONFIG_UPDATE_PROPERTY, np, newprop, oldprop);
1877
1878 return rc;
1879 }
1880
1881 static void of_alias_add(struct alias_prop *ap, struct device_node *np,
1882 int id, const char *stem, int stem_len)
1883 {
1884 ap->np = np;
1885 ap->id = id;
1886 strncpy(ap->stem, stem, stem_len);
1887 ap->stem[stem_len] = 0;
1888 list_add_tail(&ap->link, &aliases_lookup);
1889 pr_debug("adding DT alias:%s: stem=%s id=%i node=%pOF\n",
1890 ap->alias, ap->stem, ap->id, np);
1891 }
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902 void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align))
1903 {
1904 struct property *pp;
1905
1906 of_aliases = of_find_node_by_path("/aliases");
1907 of_chosen = of_find_node_by_path("/chosen");
1908 if (of_chosen == NULL)
1909 of_chosen = of_find_node_by_path("/chosen@0");
1910
1911 if (of_chosen) {
1912
1913 const char *name = NULL;
1914
1915 if (of_property_read_string(of_chosen, "stdout-path", &name))
1916 of_property_read_string(of_chosen, "linux,stdout-path",
1917 &name);
1918 if (IS_ENABLED(CONFIG_PPC) && !name)
1919 of_property_read_string(of_aliases, "stdout", &name);
1920 if (name)
1921 of_stdout = of_find_node_opts_by_path(name, &of_stdout_options);
1922 if (of_stdout)
1923 of_stdout->fwnode.flags |= FWNODE_FLAG_BEST_EFFORT;
1924 }
1925
1926 if (!of_aliases)
1927 return;
1928
1929 for_each_property_of_node(of_aliases, pp) {
1930 const char *start = pp->name;
1931 const char *end = start + strlen(start);
1932 struct device_node *np;
1933 struct alias_prop *ap;
1934 int id, len;
1935
1936
1937 if (!strcmp(pp->name, "name") ||
1938 !strcmp(pp->name, "phandle") ||
1939 !strcmp(pp->name, "linux,phandle"))
1940 continue;
1941
1942 np = of_find_node_by_path(pp->value);
1943 if (!np)
1944 continue;
1945
1946
1947
1948 while (isdigit(*(end-1)) && end > start)
1949 end--;
1950 len = end - start;
1951
1952 if (kstrtoint(end, 10, &id) < 0)
1953 continue;
1954
1955
1956 ap = dt_alloc(sizeof(*ap) + len + 1, __alignof__(*ap));
1957 if (!ap)
1958 continue;
1959 memset(ap, 0, sizeof(*ap) + len + 1);
1960 ap->alias = start;
1961 of_alias_add(ap, np, id, start, len);
1962 }
1963 }
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975 int of_alias_get_id(struct device_node *np, const char *stem)
1976 {
1977 struct alias_prop *app;
1978 int id = -ENODEV;
1979
1980 mutex_lock(&of_mutex);
1981 list_for_each_entry(app, &aliases_lookup, link) {
1982 if (strcmp(app->stem, stem) != 0)
1983 continue;
1984
1985 if (np == app->np) {
1986 id = app->id;
1987 break;
1988 }
1989 }
1990 mutex_unlock(&of_mutex);
1991
1992 return id;
1993 }
1994 EXPORT_SYMBOL_GPL(of_alias_get_id);
1995
1996
1997
1998
1999
2000
2001
2002
2003 int of_alias_get_highest_id(const char *stem)
2004 {
2005 struct alias_prop *app;
2006 int id = -ENODEV;
2007
2008 mutex_lock(&of_mutex);
2009 list_for_each_entry(app, &aliases_lookup, link) {
2010 if (strcmp(app->stem, stem) != 0)
2011 continue;
2012
2013 if (app->id > id)
2014 id = app->id;
2015 }
2016 mutex_unlock(&of_mutex);
2017
2018 return id;
2019 }
2020 EXPORT_SYMBOL_GPL(of_alias_get_highest_id);
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033 bool of_console_check(struct device_node *dn, char *name, int index)
2034 {
2035 if (!dn || dn != of_stdout || console_set_on_cmdline)
2036 return false;
2037
2038
2039
2040
2041
2042 return !add_preferred_console(name, index, (char *)of_stdout_options);
2043 }
2044 EXPORT_SYMBOL_GPL(of_console_check);
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054 struct device_node *of_find_next_cache_node(const struct device_node *np)
2055 {
2056 struct device_node *child, *cache_node;
2057
2058 cache_node = of_parse_phandle(np, "l2-cache", 0);
2059 if (!cache_node)
2060 cache_node = of_parse_phandle(np, "next-level-cache", 0);
2061
2062 if (cache_node)
2063 return cache_node;
2064
2065
2066
2067
2068 if (IS_ENABLED(CONFIG_PPC_PMAC) && of_node_is_type(np, "cpu"))
2069 for_each_child_of_node(np, child)
2070 if (of_node_is_type(child, "cache"))
2071 return child;
2072
2073 return NULL;
2074 }
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085 int of_find_last_cache_level(unsigned int cpu)
2086 {
2087 u32 cache_level = 0;
2088 struct device_node *prev = NULL, *np = of_cpu_device_node_get(cpu);
2089
2090 while (np) {
2091 prev = np;
2092 of_node_put(np);
2093 np = of_find_next_cache_node(np);
2094 }
2095
2096 of_property_read_u32(prev, "cache-level", &cache_level);
2097
2098 return cache_level;
2099 }
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120 int of_map_id(struct device_node *np, u32 id,
2121 const char *map_name, const char *map_mask_name,
2122 struct device_node **target, u32 *id_out)
2123 {
2124 u32 map_mask, masked_id;
2125 int map_len;
2126 const __be32 *map = NULL;
2127
2128 if (!np || !map_name || (!target && !id_out))
2129 return -EINVAL;
2130
2131 map = of_get_property(np, map_name, &map_len);
2132 if (!map) {
2133 if (target)
2134 return -ENODEV;
2135
2136 *id_out = id;
2137 return 0;
2138 }
2139
2140 if (!map_len || map_len % (4 * sizeof(*map))) {
2141 pr_err("%pOF: Error: Bad %s length: %d\n", np,
2142 map_name, map_len);
2143 return -EINVAL;
2144 }
2145
2146
2147 map_mask = 0xffffffff;
2148
2149
2150
2151
2152
2153 if (map_mask_name)
2154 of_property_read_u32(np, map_mask_name, &map_mask);
2155
2156 masked_id = map_mask & id;
2157 for ( ; map_len > 0; map_len -= 4 * sizeof(*map), map += 4) {
2158 struct device_node *phandle_node;
2159 u32 id_base = be32_to_cpup(map + 0);
2160 u32 phandle = be32_to_cpup(map + 1);
2161 u32 out_base = be32_to_cpup(map + 2);
2162 u32 id_len = be32_to_cpup(map + 3);
2163
2164 if (id_base & ~map_mask) {
2165 pr_err("%pOF: Invalid %s translation - %s-mask (0x%x) ignores id-base (0x%x)\n",
2166 np, map_name, map_name,
2167 map_mask, id_base);
2168 return -EFAULT;
2169 }
2170
2171 if (masked_id < id_base || masked_id >= id_base + id_len)
2172 continue;
2173
2174 phandle_node = of_find_node_by_phandle(phandle);
2175 if (!phandle_node)
2176 return -ENODEV;
2177
2178 if (target) {
2179 if (*target)
2180 of_node_put(phandle_node);
2181 else
2182 *target = phandle_node;
2183
2184 if (*target != phandle_node)
2185 continue;
2186 }
2187
2188 if (id_out)
2189 *id_out = masked_id - id_base + out_base;
2190
2191 pr_debug("%pOF: %s, using mask %08x, id-base: %08x, out-base: %08x, length: %08x, id: %08x -> %08x\n",
2192 np, map_name, map_mask, id_base, out_base,
2193 id_len, id, masked_id - id_base + out_base);
2194 return 0;
2195 }
2196
2197 pr_info("%pOF: no %s translation for id 0x%x on %pOF\n", np, map_name,
2198 id, target && *target ? *target : NULL);
2199
2200
2201 if (id_out)
2202 *id_out = id;
2203 return 0;
2204 }
2205 EXPORT_SYMBOL_GPL(of_map_id);