Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Copyright (C) 2020 Linaro Ltd
0004  */
0005 
0006 #include <linux/clk.h>
0007 #include <linux/device.h>
0008 #include <linux/interconnect-provider.h>
0009 #include <linux/io.h>
0010 #include <linux/module.h>
0011 #include <linux/of_device.h>
0012 #include <linux/of_platform.h>
0013 #include <linux/platform_device.h>
0014 #include <linux/pm_domain.h>
0015 #include <linux/regmap.h>
0016 #include <linux/slab.h>
0017 
0018 #include "smd-rpm.h"
0019 #include "icc-common.h"
0020 #include "icc-rpm.h"
0021 
0022 /* QNOC QoS */
0023 #define QNOC_QOS_MCTL_LOWn_ADDR(n)  (0x8 + (n * 0x1000))
0024 #define QNOC_QOS_MCTL_DFLT_PRIO_MASK    0x70
0025 #define QNOC_QOS_MCTL_DFLT_PRIO_SHIFT   4
0026 #define QNOC_QOS_MCTL_URGFWD_EN_MASK    0x8
0027 #define QNOC_QOS_MCTL_URGFWD_EN_SHIFT   3
0028 
0029 /* BIMC QoS */
0030 #define M_BKE_REG_BASE(n)       (0x300 + (0x4000 * n))
0031 #define M_BKE_EN_ADDR(n)        (M_BKE_REG_BASE(n))
0032 #define M_BKE_HEALTH_CFG_ADDR(i, n) (M_BKE_REG_BASE(n) + 0x40 + (0x4 * i))
0033 
0034 #define M_BKE_HEALTH_CFG_LIMITCMDS_MASK 0x80000000
0035 #define M_BKE_HEALTH_CFG_AREQPRIO_MASK  0x300
0036 #define M_BKE_HEALTH_CFG_PRIOLVL_MASK   0x3
0037 #define M_BKE_HEALTH_CFG_AREQPRIO_SHIFT 0x8
0038 #define M_BKE_HEALTH_CFG_LIMITCMDS_SHIFT 0x1f
0039 
0040 #define M_BKE_EN_EN_BMASK       0x1
0041 
0042 /* NoC QoS */
0043 #define NOC_QOS_PRIORITYn_ADDR(n)   (0x8 + (n * 0x1000))
0044 #define NOC_QOS_PRIORITY_P1_MASK    0xc
0045 #define NOC_QOS_PRIORITY_P0_MASK    0x3
0046 #define NOC_QOS_PRIORITY_P1_SHIFT   0x2
0047 
0048 #define NOC_QOS_MODEn_ADDR(n)       (0xc + (n * 0x1000))
0049 #define NOC_QOS_MODEn_MASK      0x3
0050 
0051 static int qcom_icc_set_qnoc_qos(struct icc_node *src, u64 max_bw)
0052 {
0053     struct icc_provider *provider = src->provider;
0054     struct qcom_icc_provider *qp = to_qcom_provider(provider);
0055     struct qcom_icc_node *qn = src->data;
0056     struct qcom_icc_qos *qos = &qn->qos;
0057     int rc;
0058 
0059     rc = regmap_update_bits(qp->regmap,
0060             qp->qos_offset + QNOC_QOS_MCTL_LOWn_ADDR(qos->qos_port),
0061             QNOC_QOS_MCTL_DFLT_PRIO_MASK,
0062             qos->areq_prio << QNOC_QOS_MCTL_DFLT_PRIO_SHIFT);
0063     if (rc)
0064         return rc;
0065 
0066     return regmap_update_bits(qp->regmap,
0067             qp->qos_offset + QNOC_QOS_MCTL_LOWn_ADDR(qos->qos_port),
0068             QNOC_QOS_MCTL_URGFWD_EN_MASK,
0069             !!qos->urg_fwd_en << QNOC_QOS_MCTL_URGFWD_EN_SHIFT);
0070 }
0071 
0072 static int qcom_icc_bimc_set_qos_health(struct qcom_icc_provider *qp,
0073                     struct qcom_icc_qos *qos,
0074                     int regnum)
0075 {
0076     u32 val;
0077     u32 mask;
0078 
0079     val = qos->prio_level;
0080     mask = M_BKE_HEALTH_CFG_PRIOLVL_MASK;
0081 
0082     val |= qos->areq_prio << M_BKE_HEALTH_CFG_AREQPRIO_SHIFT;
0083     mask |= M_BKE_HEALTH_CFG_AREQPRIO_MASK;
0084 
0085     /* LIMITCMDS is not present on M_BKE_HEALTH_3 */
0086     if (regnum != 3) {
0087         val |= qos->limit_commands << M_BKE_HEALTH_CFG_LIMITCMDS_SHIFT;
0088         mask |= M_BKE_HEALTH_CFG_LIMITCMDS_MASK;
0089     }
0090 
0091     return regmap_update_bits(qp->regmap,
0092                   qp->qos_offset + M_BKE_HEALTH_CFG_ADDR(regnum, qos->qos_port),
0093                   mask, val);
0094 }
0095 
0096 static int qcom_icc_set_bimc_qos(struct icc_node *src, u64 max_bw)
0097 {
0098     struct qcom_icc_provider *qp;
0099     struct qcom_icc_node *qn;
0100     struct icc_provider *provider;
0101     u32 mode = NOC_QOS_MODE_BYPASS;
0102     u32 val = 0;
0103     int i, rc = 0;
0104 
0105     qn = src->data;
0106     provider = src->provider;
0107     qp = to_qcom_provider(provider);
0108 
0109     if (qn->qos.qos_mode != NOC_QOS_MODE_INVALID)
0110         mode = qn->qos.qos_mode;
0111 
0112     /* QoS Priority: The QoS Health parameters are getting considered
0113      * only if we are NOT in Bypass Mode.
0114      */
0115     if (mode != NOC_QOS_MODE_BYPASS) {
0116         for (i = 3; i >= 0; i--) {
0117             rc = qcom_icc_bimc_set_qos_health(qp,
0118                               &qn->qos, i);
0119             if (rc)
0120                 return rc;
0121         }
0122 
0123         /* Set BKE_EN to 1 when Fixed, Regulator or Limiter Mode */
0124         val = 1;
0125     }
0126 
0127     return regmap_update_bits(qp->regmap,
0128                   qp->qos_offset + M_BKE_EN_ADDR(qn->qos.qos_port),
0129                   M_BKE_EN_EN_BMASK, val);
0130 }
0131 
0132 static int qcom_icc_noc_set_qos_priority(struct qcom_icc_provider *qp,
0133                      struct qcom_icc_qos *qos)
0134 {
0135     u32 val;
0136     int rc;
0137 
0138     /* Must be updated one at a time, P1 first, P0 last */
0139     val = qos->areq_prio << NOC_QOS_PRIORITY_P1_SHIFT;
0140     rc = regmap_update_bits(qp->regmap,
0141                 qp->qos_offset + NOC_QOS_PRIORITYn_ADDR(qos->qos_port),
0142                 NOC_QOS_PRIORITY_P1_MASK, val);
0143     if (rc)
0144         return rc;
0145 
0146     return regmap_update_bits(qp->regmap,
0147                   qp->qos_offset + NOC_QOS_PRIORITYn_ADDR(qos->qos_port),
0148                   NOC_QOS_PRIORITY_P0_MASK, qos->prio_level);
0149 }
0150 
0151 static int qcom_icc_set_noc_qos(struct icc_node *src, u64 max_bw)
0152 {
0153     struct qcom_icc_provider *qp;
0154     struct qcom_icc_node *qn;
0155     struct icc_provider *provider;
0156     u32 mode = NOC_QOS_MODE_BYPASS;
0157     int rc = 0;
0158 
0159     qn = src->data;
0160     provider = src->provider;
0161     qp = to_qcom_provider(provider);
0162 
0163     if (qn->qos.qos_port < 0) {
0164         dev_dbg(src->provider->dev,
0165             "NoC QoS: Skipping %s: vote aggregated on parent.\n",
0166             qn->name);
0167         return 0;
0168     }
0169 
0170     if (qn->qos.qos_mode != NOC_QOS_MODE_INVALID)
0171         mode = qn->qos.qos_mode;
0172 
0173     if (mode == NOC_QOS_MODE_FIXED) {
0174         dev_dbg(src->provider->dev, "NoC QoS: %s: Set Fixed mode\n",
0175             qn->name);
0176         rc = qcom_icc_noc_set_qos_priority(qp, &qn->qos);
0177         if (rc)
0178             return rc;
0179     } else if (mode == NOC_QOS_MODE_BYPASS) {
0180         dev_dbg(src->provider->dev, "NoC QoS: %s: Set Bypass mode\n",
0181             qn->name);
0182     }
0183 
0184     return regmap_update_bits(qp->regmap,
0185                   qp->qos_offset + NOC_QOS_MODEn_ADDR(qn->qos.qos_port),
0186                   NOC_QOS_MODEn_MASK, mode);
0187 }
0188 
0189 static int qcom_icc_qos_set(struct icc_node *node, u64 sum_bw)
0190 {
0191     struct qcom_icc_provider *qp = to_qcom_provider(node->provider);
0192     struct qcom_icc_node *qn = node->data;
0193 
0194     dev_dbg(node->provider->dev, "Setting QoS for %s\n", qn->name);
0195 
0196     switch (qp->type) {
0197     case QCOM_ICC_BIMC:
0198         return qcom_icc_set_bimc_qos(node, sum_bw);
0199     case QCOM_ICC_QNOC:
0200         return qcom_icc_set_qnoc_qos(node, sum_bw);
0201     default:
0202         return qcom_icc_set_noc_qos(node, sum_bw);
0203     }
0204 }
0205 
0206 static int qcom_icc_rpm_set(int mas_rpm_id, int slv_rpm_id, u64 sum_bw)
0207 {
0208     int ret = 0;
0209 
0210     if (mas_rpm_id != -1) {
0211         ret = qcom_icc_rpm_smd_send(QCOM_SMD_RPM_ACTIVE_STATE,
0212                         RPM_BUS_MASTER_REQ,
0213                         mas_rpm_id,
0214                         sum_bw);
0215         if (ret) {
0216             pr_err("qcom_icc_rpm_smd_send mas %d error %d\n",
0217                    mas_rpm_id, ret);
0218             return ret;
0219         }
0220     }
0221 
0222     if (slv_rpm_id != -1) {
0223         ret = qcom_icc_rpm_smd_send(QCOM_SMD_RPM_ACTIVE_STATE,
0224                         RPM_BUS_SLAVE_REQ,
0225                         slv_rpm_id,
0226                         sum_bw);
0227         if (ret) {
0228             pr_err("qcom_icc_rpm_smd_send slv %d error %d\n",
0229                    slv_rpm_id, ret);
0230             return ret;
0231         }
0232     }
0233 
0234     return ret;
0235 }
0236 
0237 static int __qcom_icc_set(struct icc_node *n, struct qcom_icc_node *qn,
0238               u64 sum_bw)
0239 {
0240     int ret;
0241 
0242     if (!qn->qos.ap_owned) {
0243         /* send bandwidth request message to the RPM processor */
0244         ret = qcom_icc_rpm_set(qn->mas_rpm_id, qn->slv_rpm_id, sum_bw);
0245         if (ret)
0246             return ret;
0247     } else if (qn->qos.qos_mode != -1) {
0248         /* set bandwidth directly from the AP */
0249         ret = qcom_icc_qos_set(n, sum_bw);
0250         if (ret)
0251             return ret;
0252     }
0253 
0254     return 0;
0255 }
0256 
0257 /**
0258  * qcom_icc_pre_bw_aggregate - cleans up values before re-aggregate requests
0259  * @node: icc node to operate on
0260  */
0261 static void qcom_icc_pre_bw_aggregate(struct icc_node *node)
0262 {
0263     struct qcom_icc_node *qn;
0264     size_t i;
0265 
0266     qn = node->data;
0267     for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) {
0268         qn->sum_avg[i] = 0;
0269         qn->max_peak[i] = 0;
0270     }
0271 }
0272 
0273 /**
0274  * qcom_icc_bw_aggregate - aggregate bw for buckets indicated by tag
0275  * @node: node to aggregate
0276  * @tag: tag to indicate which buckets to aggregate
0277  * @avg_bw: new bw to sum aggregate
0278  * @peak_bw: new bw to max aggregate
0279  * @agg_avg: existing aggregate avg bw val
0280  * @agg_peak: existing aggregate peak bw val
0281  */
0282 static int qcom_icc_bw_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
0283                  u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
0284 {
0285     size_t i;
0286     struct qcom_icc_node *qn;
0287 
0288     qn = node->data;
0289 
0290     if (!tag)
0291         tag = QCOM_ICC_TAG_ALWAYS;
0292 
0293     for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) {
0294         if (tag & BIT(i)) {
0295             qn->sum_avg[i] += avg_bw;
0296             qn->max_peak[i] = max_t(u32, qn->max_peak[i], peak_bw);
0297         }
0298     }
0299 
0300     *agg_avg += avg_bw;
0301     *agg_peak = max_t(u32, *agg_peak, peak_bw);
0302     return 0;
0303 }
0304 
0305 /**
0306  * qcom_icc_bus_aggregate - aggregate bandwidth by traversing all nodes
0307  * @provider: generic interconnect provider
0308  * @agg_avg: an array for aggregated average bandwidth of buckets
0309  * @agg_peak: an array for aggregated peak bandwidth of buckets
0310  * @max_agg_avg: pointer to max value of aggregated average bandwidth
0311  */
0312 static void qcom_icc_bus_aggregate(struct icc_provider *provider,
0313                    u64 *agg_avg, u64 *agg_peak,
0314                    u64 *max_agg_avg)
0315 {
0316     struct icc_node *node;
0317     struct qcom_icc_node *qn;
0318     int i;
0319 
0320     /* Initialise aggregate values */
0321     for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) {
0322         agg_avg[i] = 0;
0323         agg_peak[i] = 0;
0324     }
0325 
0326     *max_agg_avg = 0;
0327 
0328     /*
0329      * Iterate nodes on the interconnect and aggregate bandwidth
0330      * requests for every bucket.
0331      */
0332     list_for_each_entry(node, &provider->nodes, node_list) {
0333         qn = node->data;
0334         for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) {
0335             agg_avg[i] += qn->sum_avg[i];
0336             agg_peak[i] = max_t(u64, agg_peak[i], qn->max_peak[i]);
0337         }
0338     }
0339 
0340     /* Find maximum values across all buckets */
0341     for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++)
0342         *max_agg_avg = max_t(u64, *max_agg_avg, agg_avg[i]);
0343 }
0344 
0345 static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
0346 {
0347     struct qcom_icc_provider *qp;
0348     struct qcom_icc_node *src_qn = NULL, *dst_qn = NULL;
0349     struct icc_provider *provider;
0350     u64 sum_bw;
0351     u64 rate;
0352     u64 agg_avg[QCOM_ICC_NUM_BUCKETS], agg_peak[QCOM_ICC_NUM_BUCKETS];
0353     u64 max_agg_avg;
0354     int ret, i;
0355     int bucket;
0356 
0357     src_qn = src->data;
0358     if (dst)
0359         dst_qn = dst->data;
0360     provider = src->provider;
0361     qp = to_qcom_provider(provider);
0362 
0363     qcom_icc_bus_aggregate(provider, agg_avg, agg_peak, &max_agg_avg);
0364 
0365     sum_bw = icc_units_to_bps(max_agg_avg);
0366 
0367     ret = __qcom_icc_set(src, src_qn, sum_bw);
0368     if (ret)
0369         return ret;
0370     if (dst_qn) {
0371         ret = __qcom_icc_set(dst, dst_qn, sum_bw);
0372         if (ret)
0373             return ret;
0374     }
0375 
0376     for (i = 0; i < qp->num_clks; i++) {
0377         /*
0378          * Use WAKE bucket for active clock, otherwise, use SLEEP bucket
0379          * for other clocks.  If a platform doesn't set interconnect
0380          * path tags, by default use sleep bucket for all clocks.
0381          *
0382          * Note, AMC bucket is not supported yet.
0383          */
0384         if (!strcmp(qp->bus_clks[i].id, "bus_a"))
0385             bucket = QCOM_ICC_BUCKET_WAKE;
0386         else
0387             bucket = QCOM_ICC_BUCKET_SLEEP;
0388 
0389         rate = icc_units_to_bps(max(agg_avg[bucket], agg_peak[bucket]));
0390         do_div(rate, src_qn->buswidth);
0391         rate = min_t(u64, rate, LONG_MAX);
0392 
0393         if (qp->bus_clk_rate[i] == rate)
0394             continue;
0395 
0396         ret = clk_set_rate(qp->bus_clks[i].clk, rate);
0397         if (ret) {
0398             pr_err("%s clk_set_rate error: %d\n",
0399                    qp->bus_clks[i].id, ret);
0400             return ret;
0401         }
0402         qp->bus_clk_rate[i] = rate;
0403     }
0404 
0405     return 0;
0406 }
0407 
0408 static const char * const bus_clocks[] = {
0409     "bus", "bus_a",
0410 };
0411 
0412 int qnoc_probe(struct platform_device *pdev)
0413 {
0414     struct device *dev = &pdev->dev;
0415     const struct qcom_icc_desc *desc;
0416     struct icc_onecell_data *data;
0417     struct icc_provider *provider;
0418     struct qcom_icc_node * const *qnodes;
0419     struct qcom_icc_provider *qp;
0420     struct icc_node *node;
0421     size_t num_nodes, i;
0422     const char * const *cds;
0423     int cd_num;
0424     int ret;
0425 
0426     /* wait for the RPM proxy */
0427     if (!qcom_icc_rpm_smd_available())
0428         return -EPROBE_DEFER;
0429 
0430     desc = of_device_get_match_data(dev);
0431     if (!desc)
0432         return -EINVAL;
0433 
0434     qnodes = desc->nodes;
0435     num_nodes = desc->num_nodes;
0436 
0437     if (desc->num_clocks) {
0438         cds = desc->clocks;
0439         cd_num = desc->num_clocks;
0440     } else {
0441         cds = bus_clocks;
0442         cd_num = ARRAY_SIZE(bus_clocks);
0443     }
0444 
0445     qp = devm_kzalloc(dev, struct_size(qp, bus_clks, cd_num), GFP_KERNEL);
0446     if (!qp)
0447         return -ENOMEM;
0448 
0449     qp->bus_clk_rate = devm_kcalloc(dev, cd_num, sizeof(*qp->bus_clk_rate),
0450                     GFP_KERNEL);
0451     if (!qp->bus_clk_rate)
0452         return -ENOMEM;
0453 
0454     data = devm_kzalloc(dev, struct_size(data, nodes, num_nodes),
0455                 GFP_KERNEL);
0456     if (!data)
0457         return -ENOMEM;
0458 
0459     for (i = 0; i < cd_num; i++)
0460         qp->bus_clks[i].id = cds[i];
0461     qp->num_clks = cd_num;
0462 
0463     qp->type = desc->type;
0464     qp->qos_offset = desc->qos_offset;
0465 
0466     if (desc->regmap_cfg) {
0467         struct resource *res;
0468         void __iomem *mmio;
0469 
0470         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
0471         if (!res) {
0472             /* Try parent's regmap */
0473             qp->regmap = dev_get_regmap(dev->parent, NULL);
0474             if (qp->regmap)
0475                 goto regmap_done;
0476             return -ENODEV;
0477         }
0478 
0479         mmio = devm_ioremap_resource(dev, res);
0480 
0481         if (IS_ERR(mmio)) {
0482             dev_err(dev, "Cannot ioremap interconnect bus resource\n");
0483             return PTR_ERR(mmio);
0484         }
0485 
0486         qp->regmap = devm_regmap_init_mmio(dev, mmio, desc->regmap_cfg);
0487         if (IS_ERR(qp->regmap)) {
0488             dev_err(dev, "Cannot regmap interconnect bus resource\n");
0489             return PTR_ERR(qp->regmap);
0490         }
0491     }
0492 
0493 regmap_done:
0494     ret = devm_clk_bulk_get(dev, qp->num_clks, qp->bus_clks);
0495     if (ret)
0496         return ret;
0497 
0498     ret = clk_bulk_prepare_enable(qp->num_clks, qp->bus_clks);
0499     if (ret)
0500         return ret;
0501 
0502     if (desc->has_bus_pd) {
0503         ret = dev_pm_domain_attach(dev, true);
0504         if (ret)
0505             return ret;
0506     }
0507 
0508     provider = &qp->provider;
0509     INIT_LIST_HEAD(&provider->nodes);
0510     provider->dev = dev;
0511     provider->set = qcom_icc_set;
0512     provider->pre_aggregate = qcom_icc_pre_bw_aggregate;
0513     provider->aggregate = qcom_icc_bw_aggregate;
0514     provider->xlate_extended = qcom_icc_xlate_extended;
0515     provider->data = data;
0516 
0517     ret = icc_provider_add(provider);
0518     if (ret) {
0519         dev_err(dev, "error adding interconnect provider: %d\n", ret);
0520         clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
0521         return ret;
0522     }
0523 
0524     for (i = 0; i < num_nodes; i++) {
0525         size_t j;
0526 
0527         node = icc_node_create(qnodes[i]->id);
0528         if (IS_ERR(node)) {
0529             ret = PTR_ERR(node);
0530             goto err;
0531         }
0532 
0533         node->name = qnodes[i]->name;
0534         node->data = qnodes[i];
0535         icc_node_add(node, provider);
0536 
0537         for (j = 0; j < qnodes[i]->num_links; j++)
0538             icc_link_create(node, qnodes[i]->links[j]);
0539 
0540         data->nodes[i] = node;
0541     }
0542     data->num_nodes = num_nodes;
0543 
0544     platform_set_drvdata(pdev, qp);
0545 
0546     /* Populate child NoC devices if any */
0547     if (of_get_child_count(dev->of_node) > 0)
0548         return of_platform_populate(dev->of_node, NULL, NULL, dev);
0549 
0550     return 0;
0551 err:
0552     icc_nodes_remove(provider);
0553     clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
0554     icc_provider_del(provider);
0555 
0556     return ret;
0557 }
0558 EXPORT_SYMBOL(qnoc_probe);
0559 
0560 int qnoc_remove(struct platform_device *pdev)
0561 {
0562     struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
0563 
0564     icc_nodes_remove(&qp->provider);
0565     clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
0566     return icc_provider_del(&qp->provider);
0567 }
0568 EXPORT_SYMBOL(qnoc_remove);