0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/debugfs.h>
0010 #include <linux/device.h>
0011 #include <linux/idr.h>
0012 #include <linux/init.h>
0013 #include <linux/interconnect.h>
0014 #include <linux/interconnect-provider.h>
0015 #include <linux/list.h>
0016 #include <linux/module.h>
0017 #include <linux/mutex.h>
0018 #include <linux/slab.h>
0019 #include <linux/of.h>
0020 #include <linux/overflow.h>
0021
0022 #include "internal.h"
0023
0024 #define CREATE_TRACE_POINTS
0025 #include "trace.h"
0026
0027 static DEFINE_IDR(icc_idr);
0028 static LIST_HEAD(icc_providers);
0029 static int providers_count;
0030 static bool synced_state;
0031 static DEFINE_MUTEX(icc_lock);
0032 static struct dentry *icc_debugfs_dir;
0033
0034 static void icc_summary_show_one(struct seq_file *s, struct icc_node *n)
0035 {
0036 if (!n)
0037 return;
0038
0039 seq_printf(s, "%-42s %12u %12u\n",
0040 n->name, n->avg_bw, n->peak_bw);
0041 }
0042
0043 static int icc_summary_show(struct seq_file *s, void *data)
0044 {
0045 struct icc_provider *provider;
0046
0047 seq_puts(s, " node tag avg peak\n");
0048 seq_puts(s, "--------------------------------------------------------------------\n");
0049
0050 mutex_lock(&icc_lock);
0051
0052 list_for_each_entry(provider, &icc_providers, provider_list) {
0053 struct icc_node *n;
0054
0055 list_for_each_entry(n, &provider->nodes, node_list) {
0056 struct icc_req *r;
0057
0058 icc_summary_show_one(s, n);
0059 hlist_for_each_entry(r, &n->req_list, req_node) {
0060 u32 avg_bw = 0, peak_bw = 0;
0061
0062 if (!r->dev)
0063 continue;
0064
0065 if (r->enabled) {
0066 avg_bw = r->avg_bw;
0067 peak_bw = r->peak_bw;
0068 }
0069
0070 seq_printf(s, " %-27s %12u %12u %12u\n",
0071 dev_name(r->dev), r->tag, avg_bw, peak_bw);
0072 }
0073 }
0074 }
0075
0076 mutex_unlock(&icc_lock);
0077
0078 return 0;
0079 }
0080 DEFINE_SHOW_ATTRIBUTE(icc_summary);
0081
0082 static void icc_graph_show_link(struct seq_file *s, int level,
0083 struct icc_node *n, struct icc_node *m)
0084 {
0085 seq_printf(s, "%s\"%d:%s\" -> \"%d:%s\"\n",
0086 level == 2 ? "\t\t" : "\t",
0087 n->id, n->name, m->id, m->name);
0088 }
0089
0090 static void icc_graph_show_node(struct seq_file *s, struct icc_node *n)
0091 {
0092 seq_printf(s, "\t\t\"%d:%s\" [label=\"%d:%s",
0093 n->id, n->name, n->id, n->name);
0094 seq_printf(s, "\n\t\t\t|avg_bw=%ukBps", n->avg_bw);
0095 seq_printf(s, "\n\t\t\t|peak_bw=%ukBps", n->peak_bw);
0096 seq_puts(s, "\"]\n");
0097 }
0098
0099 static int icc_graph_show(struct seq_file *s, void *data)
0100 {
0101 struct icc_provider *provider;
0102 struct icc_node *n;
0103 int cluster_index = 0;
0104 int i;
0105
0106 seq_puts(s, "digraph {\n\trankdir = LR\n\tnode [shape = record]\n");
0107 mutex_lock(&icc_lock);
0108
0109
0110 cluster_index = 0;
0111 list_for_each_entry(provider, &icc_providers, provider_list) {
0112 seq_printf(s, "\tsubgraph cluster_%d {\n", ++cluster_index);
0113 if (provider->dev)
0114 seq_printf(s, "\t\tlabel = \"%s\"\n",
0115 dev_name(provider->dev));
0116
0117
0118 list_for_each_entry(n, &provider->nodes, node_list)
0119 icc_graph_show_node(s, n);
0120
0121
0122 list_for_each_entry(n, &provider->nodes, node_list)
0123 for (i = 0; i < n->num_links; ++i)
0124 if (n->provider == n->links[i]->provider)
0125 icc_graph_show_link(s, 2, n,
0126 n->links[i]);
0127
0128 seq_puts(s, "\t}\n");
0129 }
0130
0131
0132 list_for_each_entry(provider, &icc_providers, provider_list)
0133 list_for_each_entry(n, &provider->nodes, node_list)
0134 for (i = 0; i < n->num_links; ++i)
0135 if (n->provider != n->links[i]->provider)
0136 icc_graph_show_link(s, 1, n,
0137 n->links[i]);
0138
0139 mutex_unlock(&icc_lock);
0140 seq_puts(s, "}");
0141
0142 return 0;
0143 }
0144 DEFINE_SHOW_ATTRIBUTE(icc_graph);
0145
0146 static struct icc_node *node_find(const int id)
0147 {
0148 return idr_find(&icc_idr, id);
0149 }
0150
0151 static struct icc_path *path_init(struct device *dev, struct icc_node *dst,
0152 ssize_t num_nodes)
0153 {
0154 struct icc_node *node = dst;
0155 struct icc_path *path;
0156 int i;
0157
0158 path = kzalloc(struct_size(path, reqs, num_nodes), GFP_KERNEL);
0159 if (!path)
0160 return ERR_PTR(-ENOMEM);
0161
0162 path->num_nodes = num_nodes;
0163
0164 for (i = num_nodes - 1; i >= 0; i--) {
0165 node->provider->users++;
0166 hlist_add_head(&path->reqs[i].req_node, &node->req_list);
0167 path->reqs[i].node = node;
0168 path->reqs[i].dev = dev;
0169 path->reqs[i].enabled = true;
0170
0171 node = node->reverse;
0172 }
0173
0174 return path;
0175 }
0176
0177 static struct icc_path *path_find(struct device *dev, struct icc_node *src,
0178 struct icc_node *dst)
0179 {
0180 struct icc_path *path = ERR_PTR(-EPROBE_DEFER);
0181 struct icc_node *n, *node = NULL;
0182 struct list_head traverse_list;
0183 struct list_head edge_list;
0184 struct list_head visited_list;
0185 size_t i, depth = 1;
0186 bool found = false;
0187
0188 INIT_LIST_HEAD(&traverse_list);
0189 INIT_LIST_HEAD(&edge_list);
0190 INIT_LIST_HEAD(&visited_list);
0191
0192 list_add(&src->search_list, &traverse_list);
0193 src->reverse = NULL;
0194
0195 do {
0196 list_for_each_entry_safe(node, n, &traverse_list, search_list) {
0197 if (node == dst) {
0198 found = true;
0199 list_splice_init(&edge_list, &visited_list);
0200 list_splice_init(&traverse_list, &visited_list);
0201 break;
0202 }
0203 for (i = 0; i < node->num_links; i++) {
0204 struct icc_node *tmp = node->links[i];
0205
0206 if (!tmp) {
0207 path = ERR_PTR(-ENOENT);
0208 goto out;
0209 }
0210
0211 if (tmp->is_traversed)
0212 continue;
0213
0214 tmp->is_traversed = true;
0215 tmp->reverse = node;
0216 list_add_tail(&tmp->search_list, &edge_list);
0217 }
0218 }
0219
0220 if (found)
0221 break;
0222
0223 list_splice_init(&traverse_list, &visited_list);
0224 list_splice_init(&edge_list, &traverse_list);
0225
0226
0227 depth++;
0228
0229 } while (!list_empty(&traverse_list));
0230
0231 out:
0232
0233
0234 list_for_each_entry_reverse(n, &visited_list, search_list)
0235 n->is_traversed = false;
0236
0237 if (found)
0238 path = path_init(dev, dst, depth);
0239
0240 return path;
0241 }
0242
0243
0244
0245
0246
0247
0248
0249
0250 static int aggregate_requests(struct icc_node *node)
0251 {
0252 struct icc_provider *p = node->provider;
0253 struct icc_req *r;
0254 u32 avg_bw, peak_bw;
0255
0256 node->avg_bw = 0;
0257 node->peak_bw = 0;
0258
0259 if (p->pre_aggregate)
0260 p->pre_aggregate(node);
0261
0262 hlist_for_each_entry(r, &node->req_list, req_node) {
0263 if (r->enabled) {
0264 avg_bw = r->avg_bw;
0265 peak_bw = r->peak_bw;
0266 } else {
0267 avg_bw = 0;
0268 peak_bw = 0;
0269 }
0270 p->aggregate(node, r->tag, avg_bw, peak_bw,
0271 &node->avg_bw, &node->peak_bw);
0272
0273
0274 if (!synced_state) {
0275 node->avg_bw = max(node->avg_bw, node->init_avg);
0276 node->peak_bw = max(node->peak_bw, node->init_peak);
0277 }
0278 }
0279
0280 return 0;
0281 }
0282
0283 static int apply_constraints(struct icc_path *path)
0284 {
0285 struct icc_node *next, *prev = NULL;
0286 struct icc_provider *p;
0287 int ret = -EINVAL;
0288 int i;
0289
0290 for (i = 0; i < path->num_nodes; i++) {
0291 next = path->reqs[i].node;
0292 p = next->provider;
0293
0294
0295 if (!prev || (p != prev->provider && !p->inter_set)) {
0296 prev = next;
0297 continue;
0298 }
0299
0300
0301 ret = p->set(prev, next);
0302 if (ret)
0303 goto out;
0304
0305 prev = next;
0306 }
0307 out:
0308 return ret;
0309 }
0310
0311 int icc_std_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
0312 u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
0313 {
0314 *agg_avg += avg_bw;
0315 *agg_peak = max(*agg_peak, peak_bw);
0316
0317 return 0;
0318 }
0319 EXPORT_SYMBOL_GPL(icc_std_aggregate);
0320
0321
0322
0323
0324
0325
0326
0327
0328
0329
0330
0331 struct icc_node *of_icc_xlate_onecell(struct of_phandle_args *spec,
0332 void *data)
0333 {
0334 struct icc_onecell_data *icc_data = data;
0335 unsigned int idx = spec->args[0];
0336
0337 if (idx >= icc_data->num_nodes) {
0338 pr_err("%s: invalid index %u\n", __func__, idx);
0339 return ERR_PTR(-EINVAL);
0340 }
0341
0342 return icc_data->nodes[idx];
0343 }
0344 EXPORT_SYMBOL_GPL(of_icc_xlate_onecell);
0345
0346
0347
0348
0349
0350
0351
0352
0353
0354
0355
0356 struct icc_node_data *of_icc_get_from_provider(struct of_phandle_args *spec)
0357 {
0358 struct icc_node *node = ERR_PTR(-EPROBE_DEFER);
0359 struct icc_node_data *data = NULL;
0360 struct icc_provider *provider;
0361
0362 if (!spec)
0363 return ERR_PTR(-EINVAL);
0364
0365 mutex_lock(&icc_lock);
0366 list_for_each_entry(provider, &icc_providers, provider_list) {
0367 if (provider->dev->of_node == spec->np) {
0368 if (provider->xlate_extended) {
0369 data = provider->xlate_extended(spec, provider->data);
0370 if (!IS_ERR(data)) {
0371 node = data->node;
0372 break;
0373 }
0374 } else {
0375 node = provider->xlate(spec, provider->data);
0376 if (!IS_ERR(node))
0377 break;
0378 }
0379 }
0380 }
0381 mutex_unlock(&icc_lock);
0382
0383 if (IS_ERR(node))
0384 return ERR_CAST(node);
0385
0386 if (!data) {
0387 data = kzalloc(sizeof(*data), GFP_KERNEL);
0388 if (!data)
0389 return ERR_PTR(-ENOMEM);
0390 data->node = node;
0391 }
0392
0393 return data;
0394 }
0395 EXPORT_SYMBOL_GPL(of_icc_get_from_provider);
0396
0397 static void devm_icc_release(struct device *dev, void *res)
0398 {
0399 icc_put(*(struct icc_path **)res);
0400 }
0401
0402 struct icc_path *devm_of_icc_get(struct device *dev, const char *name)
0403 {
0404 struct icc_path **ptr, *path;
0405
0406 ptr = devres_alloc(devm_icc_release, sizeof(*ptr), GFP_KERNEL);
0407 if (!ptr)
0408 return ERR_PTR(-ENOMEM);
0409
0410 path = of_icc_get(dev, name);
0411 if (!IS_ERR(path)) {
0412 *ptr = path;
0413 devres_add(dev, ptr);
0414 } else {
0415 devres_free(ptr);
0416 }
0417
0418 return path;
0419 }
0420 EXPORT_SYMBOL_GPL(devm_of_icc_get);
0421
0422
0423
0424
0425
0426
0427
0428
0429
0430
0431
0432
0433
0434
0435
0436
0437 struct icc_path *of_icc_get_by_index(struct device *dev, int idx)
0438 {
0439 struct icc_path *path;
0440 struct icc_node_data *src_data, *dst_data;
0441 struct device_node *np;
0442 struct of_phandle_args src_args, dst_args;
0443 int ret;
0444
0445 if (!dev || !dev->of_node)
0446 return ERR_PTR(-ENODEV);
0447
0448 np = dev->of_node;
0449
0450
0451
0452
0453
0454 if (!of_find_property(np, "interconnects", NULL))
0455 return NULL;
0456
0457
0458
0459
0460
0461
0462 ret = of_parse_phandle_with_args(np, "interconnects",
0463 "#interconnect-cells", idx * 2,
0464 &src_args);
0465 if (ret)
0466 return ERR_PTR(ret);
0467
0468 of_node_put(src_args.np);
0469
0470 ret = of_parse_phandle_with_args(np, "interconnects",
0471 "#interconnect-cells", idx * 2 + 1,
0472 &dst_args);
0473 if (ret)
0474 return ERR_PTR(ret);
0475
0476 of_node_put(dst_args.np);
0477
0478 src_data = of_icc_get_from_provider(&src_args);
0479
0480 if (IS_ERR(src_data)) {
0481 dev_err_probe(dev, PTR_ERR(src_data), "error finding src node\n");
0482 return ERR_CAST(src_data);
0483 }
0484
0485 dst_data = of_icc_get_from_provider(&dst_args);
0486
0487 if (IS_ERR(dst_data)) {
0488 dev_err_probe(dev, PTR_ERR(dst_data), "error finding dst node\n");
0489 kfree(src_data);
0490 return ERR_CAST(dst_data);
0491 }
0492
0493 mutex_lock(&icc_lock);
0494 path = path_find(dev, src_data->node, dst_data->node);
0495 mutex_unlock(&icc_lock);
0496 if (IS_ERR(path)) {
0497 dev_err(dev, "%s: invalid path=%ld\n", __func__, PTR_ERR(path));
0498 goto free_icc_data;
0499 }
0500
0501 if (src_data->tag && src_data->tag == dst_data->tag)
0502 icc_set_tag(path, src_data->tag);
0503
0504 path->name = kasprintf(GFP_KERNEL, "%s-%s",
0505 src_data->node->name, dst_data->node->name);
0506 if (!path->name) {
0507 kfree(path);
0508 path = ERR_PTR(-ENOMEM);
0509 }
0510
0511 free_icc_data:
0512 kfree(src_data);
0513 kfree(dst_data);
0514 return path;
0515 }
0516 EXPORT_SYMBOL_GPL(of_icc_get_by_index);
0517
0518
0519
0520
0521
0522
0523
0524
0525
0526
0527
0528
0529
0530
0531
0532
0533 struct icc_path *of_icc_get(struct device *dev, const char *name)
0534 {
0535 struct device_node *np;
0536 int idx = 0;
0537
0538 if (!dev || !dev->of_node)
0539 return ERR_PTR(-ENODEV);
0540
0541 np = dev->of_node;
0542
0543
0544
0545
0546
0547 if (!of_find_property(np, "interconnects", NULL))
0548 return NULL;
0549
0550
0551
0552
0553
0554
0555 if (name) {
0556 idx = of_property_match_string(np, "interconnect-names", name);
0557 if (idx < 0)
0558 return ERR_PTR(idx);
0559 }
0560
0561 return of_icc_get_by_index(dev, idx);
0562 }
0563 EXPORT_SYMBOL_GPL(of_icc_get);
0564
0565
0566
0567
0568
0569
0570
0571
0572
0573 void icc_set_tag(struct icc_path *path, u32 tag)
0574 {
0575 int i;
0576
0577 if (!path)
0578 return;
0579
0580 mutex_lock(&icc_lock);
0581
0582 for (i = 0; i < path->num_nodes; i++)
0583 path->reqs[i].tag = tag;
0584
0585 mutex_unlock(&icc_lock);
0586 }
0587 EXPORT_SYMBOL_GPL(icc_set_tag);
0588
0589
0590
0591
0592
0593
0594
0595
0596
0597
0598 const char *icc_get_name(struct icc_path *path)
0599 {
0600 if (!path)
0601 return NULL;
0602
0603 return path->name;
0604 }
0605 EXPORT_SYMBOL_GPL(icc_get_name);
0606
0607
0608
0609
0610
0611
0612
0613
0614
0615
0616
0617
0618
0619
0620
0621
0622 int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw)
0623 {
0624 struct icc_node *node;
0625 u32 old_avg, old_peak;
0626 size_t i;
0627 int ret;
0628
0629 if (!path)
0630 return 0;
0631
0632 if (WARN_ON(IS_ERR(path) || !path->num_nodes))
0633 return -EINVAL;
0634
0635 mutex_lock(&icc_lock);
0636
0637 old_avg = path->reqs[0].avg_bw;
0638 old_peak = path->reqs[0].peak_bw;
0639
0640 for (i = 0; i < path->num_nodes; i++) {
0641 node = path->reqs[i].node;
0642
0643
0644 path->reqs[i].avg_bw = avg_bw;
0645 path->reqs[i].peak_bw = peak_bw;
0646
0647
0648 aggregate_requests(node);
0649
0650 trace_icc_set_bw(path, node, i, avg_bw, peak_bw);
0651 }
0652
0653 ret = apply_constraints(path);
0654 if (ret) {
0655 pr_debug("interconnect: error applying constraints (%d)\n",
0656 ret);
0657
0658 for (i = 0; i < path->num_nodes; i++) {
0659 node = path->reqs[i].node;
0660 path->reqs[i].avg_bw = old_avg;
0661 path->reqs[i].peak_bw = old_peak;
0662 aggregate_requests(node);
0663 }
0664 apply_constraints(path);
0665 }
0666
0667 mutex_unlock(&icc_lock);
0668
0669 trace_icc_set_bw_end(path, ret);
0670
0671 return ret;
0672 }
0673 EXPORT_SYMBOL_GPL(icc_set_bw);
0674
0675 static int __icc_enable(struct icc_path *path, bool enable)
0676 {
0677 int i;
0678
0679 if (!path)
0680 return 0;
0681
0682 if (WARN_ON(IS_ERR(path) || !path->num_nodes))
0683 return -EINVAL;
0684
0685 mutex_lock(&icc_lock);
0686
0687 for (i = 0; i < path->num_nodes; i++)
0688 path->reqs[i].enabled = enable;
0689
0690 mutex_unlock(&icc_lock);
0691
0692 return icc_set_bw(path, path->reqs[0].avg_bw,
0693 path->reqs[0].peak_bw);
0694 }
0695
0696 int icc_enable(struct icc_path *path)
0697 {
0698 return __icc_enable(path, true);
0699 }
0700 EXPORT_SYMBOL_GPL(icc_enable);
0701
0702 int icc_disable(struct icc_path *path)
0703 {
0704 return __icc_enable(path, false);
0705 }
0706 EXPORT_SYMBOL_GPL(icc_disable);
0707
0708
0709
0710
0711
0712
0713
0714
0715
0716
0717
0718
0719
0720
0721
0722
0723
0724 struct icc_path *icc_get(struct device *dev, const int src_id, const int dst_id)
0725 {
0726 struct icc_node *src, *dst;
0727 struct icc_path *path = ERR_PTR(-EPROBE_DEFER);
0728
0729 mutex_lock(&icc_lock);
0730
0731 src = node_find(src_id);
0732 if (!src)
0733 goto out;
0734
0735 dst = node_find(dst_id);
0736 if (!dst)
0737 goto out;
0738
0739 path = path_find(dev, src, dst);
0740 if (IS_ERR(path)) {
0741 dev_err(dev, "%s: invalid path=%ld\n", __func__, PTR_ERR(path));
0742 goto out;
0743 }
0744
0745 path->name = kasprintf(GFP_KERNEL, "%s-%s", src->name, dst->name);
0746 if (!path->name) {
0747 kfree(path);
0748 path = ERR_PTR(-ENOMEM);
0749 }
0750 out:
0751 mutex_unlock(&icc_lock);
0752 return path;
0753 }
0754 EXPORT_SYMBOL_GPL(icc_get);
0755
0756
0757
0758
0759
0760
0761
0762
0763 void icc_put(struct icc_path *path)
0764 {
0765 struct icc_node *node;
0766 size_t i;
0767 int ret;
0768
0769 if (!path || WARN_ON(IS_ERR(path)))
0770 return;
0771
0772 ret = icc_set_bw(path, 0, 0);
0773 if (ret)
0774 pr_err("%s: error (%d)\n", __func__, ret);
0775
0776 mutex_lock(&icc_lock);
0777 for (i = 0; i < path->num_nodes; i++) {
0778 node = path->reqs[i].node;
0779 hlist_del(&path->reqs[i].req_node);
0780 if (!WARN_ON(!node->provider->users))
0781 node->provider->users--;
0782 }
0783 mutex_unlock(&icc_lock);
0784
0785 kfree_const(path->name);
0786 kfree(path);
0787 }
0788 EXPORT_SYMBOL_GPL(icc_put);
0789
0790 static struct icc_node *icc_node_create_nolock(int id)
0791 {
0792 struct icc_node *node;
0793
0794
0795 node = node_find(id);
0796 if (node)
0797 return node;
0798
0799 node = kzalloc(sizeof(*node), GFP_KERNEL);
0800 if (!node)
0801 return ERR_PTR(-ENOMEM);
0802
0803 id = idr_alloc(&icc_idr, node, id, id + 1, GFP_KERNEL);
0804 if (id < 0) {
0805 WARN(1, "%s: couldn't get idr\n", __func__);
0806 kfree(node);
0807 return ERR_PTR(id);
0808 }
0809
0810 node->id = id;
0811
0812 return node;
0813 }
0814
0815
0816
0817
0818
0819
0820
0821 struct icc_node *icc_node_create(int id)
0822 {
0823 struct icc_node *node;
0824
0825 mutex_lock(&icc_lock);
0826
0827 node = icc_node_create_nolock(id);
0828
0829 mutex_unlock(&icc_lock);
0830
0831 return node;
0832 }
0833 EXPORT_SYMBOL_GPL(icc_node_create);
0834
0835
0836
0837
0838
0839 void icc_node_destroy(int id)
0840 {
0841 struct icc_node *node;
0842
0843 mutex_lock(&icc_lock);
0844
0845 node = node_find(id);
0846 if (node) {
0847 idr_remove(&icc_idr, node->id);
0848 WARN_ON(!hlist_empty(&node->req_list));
0849 }
0850
0851 mutex_unlock(&icc_lock);
0852
0853 kfree(node);
0854 }
0855 EXPORT_SYMBOL_GPL(icc_node_destroy);
0856
0857
0858
0859
0860
0861
0862
0863
0864
0865
0866
0867
0868
0869
0870 int icc_link_create(struct icc_node *node, const int dst_id)
0871 {
0872 struct icc_node *dst;
0873 struct icc_node **new;
0874 int ret = 0;
0875
0876 if (!node->provider)
0877 return -EINVAL;
0878
0879 mutex_lock(&icc_lock);
0880
0881 dst = node_find(dst_id);
0882 if (!dst) {
0883 dst = icc_node_create_nolock(dst_id);
0884
0885 if (IS_ERR(dst)) {
0886 ret = PTR_ERR(dst);
0887 goto out;
0888 }
0889 }
0890
0891 new = krealloc(node->links,
0892 (node->num_links + 1) * sizeof(*node->links),
0893 GFP_KERNEL);
0894 if (!new) {
0895 ret = -ENOMEM;
0896 goto out;
0897 }
0898
0899 node->links = new;
0900 node->links[node->num_links++] = dst;
0901
0902 out:
0903 mutex_unlock(&icc_lock);
0904
0905 return ret;
0906 }
0907 EXPORT_SYMBOL_GPL(icc_link_create);
0908
0909
0910
0911
0912
0913
0914
0915
0916 int icc_link_destroy(struct icc_node *src, struct icc_node *dst)
0917 {
0918 struct icc_node **new;
0919 size_t slot;
0920 int ret = 0;
0921
0922 if (IS_ERR_OR_NULL(src))
0923 return -EINVAL;
0924
0925 if (IS_ERR_OR_NULL(dst))
0926 return -EINVAL;
0927
0928 mutex_lock(&icc_lock);
0929
0930 for (slot = 0; slot < src->num_links; slot++)
0931 if (src->links[slot] == dst)
0932 break;
0933
0934 if (WARN_ON(slot == src->num_links)) {
0935 ret = -ENXIO;
0936 goto out;
0937 }
0938
0939 src->links[slot] = src->links[--src->num_links];
0940
0941 new = krealloc(src->links, src->num_links * sizeof(*src->links),
0942 GFP_KERNEL);
0943 if (new)
0944 src->links = new;
0945 else
0946 ret = -ENOMEM;
0947
0948 out:
0949 mutex_unlock(&icc_lock);
0950
0951 return ret;
0952 }
0953 EXPORT_SYMBOL_GPL(icc_link_destroy);
0954
0955
0956
0957
0958
0959
0960 void icc_node_add(struct icc_node *node, struct icc_provider *provider)
0961 {
0962 if (WARN_ON(node->provider))
0963 return;
0964
0965 mutex_lock(&icc_lock);
0966
0967 node->provider = provider;
0968 list_add_tail(&node->node_list, &provider->nodes);
0969
0970
0971 if (provider->get_bw) {
0972 provider->get_bw(node, &node->init_avg, &node->init_peak);
0973 } else {
0974 node->init_avg = INT_MAX;
0975 node->init_peak = INT_MAX;
0976 }
0977 node->avg_bw = node->init_avg;
0978 node->peak_bw = node->init_peak;
0979
0980 if (provider->pre_aggregate)
0981 provider->pre_aggregate(node);
0982
0983 if (provider->aggregate)
0984 provider->aggregate(node, 0, node->init_avg, node->init_peak,
0985 &node->avg_bw, &node->peak_bw);
0986
0987 provider->set(node, node);
0988 node->avg_bw = 0;
0989 node->peak_bw = 0;
0990
0991 mutex_unlock(&icc_lock);
0992 }
0993 EXPORT_SYMBOL_GPL(icc_node_add);
0994
0995
0996
0997
0998
0999 void icc_node_del(struct icc_node *node)
1000 {
1001 mutex_lock(&icc_lock);
1002
1003 list_del(&node->node_list);
1004
1005 mutex_unlock(&icc_lock);
1006 }
1007 EXPORT_SYMBOL_GPL(icc_node_del);
1008
1009
1010
1011
1012
1013
1014
1015 int icc_nodes_remove(struct icc_provider *provider)
1016 {
1017 struct icc_node *n, *tmp;
1018
1019 if (WARN_ON(IS_ERR_OR_NULL(provider)))
1020 return -EINVAL;
1021
1022 list_for_each_entry_safe_reverse(n, tmp, &provider->nodes, node_list) {
1023 icc_node_del(n);
1024 icc_node_destroy(n->id);
1025 }
1026
1027 return 0;
1028 }
1029 EXPORT_SYMBOL_GPL(icc_nodes_remove);
1030
1031
1032
1033
1034
1035
1036
1037 int icc_provider_add(struct icc_provider *provider)
1038 {
1039 if (WARN_ON(!provider->set))
1040 return -EINVAL;
1041 if (WARN_ON(!provider->xlate && !provider->xlate_extended))
1042 return -EINVAL;
1043
1044 mutex_lock(&icc_lock);
1045
1046 INIT_LIST_HEAD(&provider->nodes);
1047 list_add_tail(&provider->provider_list, &icc_providers);
1048
1049 mutex_unlock(&icc_lock);
1050
1051 dev_dbg(provider->dev, "interconnect provider added to topology\n");
1052
1053 return 0;
1054 }
1055 EXPORT_SYMBOL_GPL(icc_provider_add);
1056
1057
1058
1059
1060
1061
1062
1063 int icc_provider_del(struct icc_provider *provider)
1064 {
1065 mutex_lock(&icc_lock);
1066 if (provider->users) {
1067 pr_warn("interconnect provider still has %d users\n",
1068 provider->users);
1069 mutex_unlock(&icc_lock);
1070 return -EBUSY;
1071 }
1072
1073 if (!list_empty(&provider->nodes)) {
1074 pr_warn("interconnect provider still has nodes\n");
1075 mutex_unlock(&icc_lock);
1076 return -EBUSY;
1077 }
1078
1079 list_del(&provider->provider_list);
1080 mutex_unlock(&icc_lock);
1081
1082 return 0;
1083 }
1084 EXPORT_SYMBOL_GPL(icc_provider_del);
1085
1086 static int of_count_icc_providers(struct device_node *np)
1087 {
1088 struct device_node *child;
1089 int count = 0;
1090 const struct of_device_id __maybe_unused ignore_list[] = {
1091 { .compatible = "qcom,sc7180-ipa-virt" },
1092 { .compatible = "qcom,sdx55-ipa-virt" },
1093 {}
1094 };
1095
1096 for_each_available_child_of_node(np, child) {
1097 if (of_property_read_bool(child, "#interconnect-cells") &&
1098 likely(!of_match_node(ignore_list, child)))
1099 count++;
1100 count += of_count_icc_providers(child);
1101 }
1102
1103 return count;
1104 }
1105
1106 void icc_sync_state(struct device *dev)
1107 {
1108 struct icc_provider *p;
1109 struct icc_node *n;
1110 static int count;
1111
1112 count++;
1113
1114 if (count < providers_count)
1115 return;
1116
1117 mutex_lock(&icc_lock);
1118 synced_state = true;
1119 list_for_each_entry(p, &icc_providers, provider_list) {
1120 dev_dbg(p->dev, "interconnect provider is in synced state\n");
1121 list_for_each_entry(n, &p->nodes, node_list) {
1122 if (n->init_avg || n->init_peak) {
1123 n->init_avg = 0;
1124 n->init_peak = 0;
1125 aggregate_requests(n);
1126 p->set(n, n);
1127 }
1128 }
1129 }
1130 mutex_unlock(&icc_lock);
1131 }
1132 EXPORT_SYMBOL_GPL(icc_sync_state);
1133
1134 static int __init icc_init(void)
1135 {
1136 struct device_node *root = of_find_node_by_path("/");
1137
1138 providers_count = of_count_icc_providers(root);
1139 of_node_put(root);
1140
1141 icc_debugfs_dir = debugfs_create_dir("interconnect", NULL);
1142 debugfs_create_file("interconnect_summary", 0444,
1143 icc_debugfs_dir, NULL, &icc_summary_fops);
1144 debugfs_create_file("interconnect_graph", 0444,
1145 icc_debugfs_dir, NULL, &icc_graph_fops);
1146 return 0;
1147 }
1148
1149 device_initcall(icc_init);
1150
1151 MODULE_AUTHOR("Georgi Djakov <georgi.djakov@linaro.org>");
1152 MODULE_DESCRIPTION("Interconnect Driver Core");
1153 MODULE_LICENSE("GPL v2");