0001
0002
0003
0004
0005
0006
0007
0008 #define pr_fmt(fmt) "SCMI Notifications PERF - " fmt
0009
0010 #include <linux/bits.h>
0011 #include <linux/of.h>
0012 #include <linux/io.h>
0013 #include <linux/module.h>
0014 #include <linux/platform_device.h>
0015 #include <linux/pm_opp.h>
0016 #include <linux/scmi_protocol.h>
0017 #include <linux/sort.h>
0018
0019 #include <trace/events/scmi.h>
0020
0021 #include "protocols.h"
0022 #include "notify.h"
0023
0024 #define MAX_OPPS 16
0025
0026 enum scmi_performance_protocol_cmd {
0027 PERF_DOMAIN_ATTRIBUTES = 0x3,
0028 PERF_DESCRIBE_LEVELS = 0x4,
0029 PERF_LIMITS_SET = 0x5,
0030 PERF_LIMITS_GET = 0x6,
0031 PERF_LEVEL_SET = 0x7,
0032 PERF_LEVEL_GET = 0x8,
0033 PERF_NOTIFY_LIMITS = 0x9,
0034 PERF_NOTIFY_LEVEL = 0xa,
0035 PERF_DESCRIBE_FASTCHANNEL = 0xb,
0036 PERF_DOMAIN_NAME_GET = 0xc,
0037 };
0038
0039 enum {
0040 PERF_FC_LEVEL,
0041 PERF_FC_LIMIT,
0042 PERF_FC_MAX,
0043 };
0044
0045 struct scmi_opp {
0046 u32 perf;
0047 u32 power;
0048 u32 trans_latency_us;
0049 };
0050
0051 struct scmi_msg_resp_perf_attributes {
0052 __le16 num_domains;
0053 __le16 flags;
0054 #define POWER_SCALE_IN_MILLIWATT(x) ((x) & BIT(0))
0055 #define POWER_SCALE_IN_MICROWATT(x) ((x) & BIT(1))
0056 __le32 stats_addr_low;
0057 __le32 stats_addr_high;
0058 __le32 stats_size;
0059 };
0060
0061 struct scmi_msg_resp_perf_domain_attributes {
0062 __le32 flags;
0063 #define SUPPORTS_SET_LIMITS(x) ((x) & BIT(31))
0064 #define SUPPORTS_SET_PERF_LVL(x) ((x) & BIT(30))
0065 #define SUPPORTS_PERF_LIMIT_NOTIFY(x) ((x) & BIT(29))
0066 #define SUPPORTS_PERF_LEVEL_NOTIFY(x) ((x) & BIT(28))
0067 #define SUPPORTS_PERF_FASTCHANNELS(x) ((x) & BIT(27))
0068 #define SUPPORTS_EXTENDED_NAMES(x) ((x) & BIT(26))
0069 __le32 rate_limit_us;
0070 __le32 sustained_freq_khz;
0071 __le32 sustained_perf_level;
0072 u8 name[SCMI_SHORT_NAME_MAX_SIZE];
0073 };
0074
0075 struct scmi_msg_perf_describe_levels {
0076 __le32 domain;
0077 __le32 level_index;
0078 };
0079
0080 struct scmi_perf_set_limits {
0081 __le32 domain;
0082 __le32 max_level;
0083 __le32 min_level;
0084 };
0085
0086 struct scmi_perf_get_limits {
0087 __le32 max_level;
0088 __le32 min_level;
0089 };
0090
0091 struct scmi_perf_set_level {
0092 __le32 domain;
0093 __le32 level;
0094 };
0095
0096 struct scmi_perf_notify_level_or_limits {
0097 __le32 domain;
0098 __le32 notify_enable;
0099 };
0100
0101 struct scmi_perf_limits_notify_payld {
0102 __le32 agent_id;
0103 __le32 domain_id;
0104 __le32 range_max;
0105 __le32 range_min;
0106 };
0107
0108 struct scmi_perf_level_notify_payld {
0109 __le32 agent_id;
0110 __le32 domain_id;
0111 __le32 performance_level;
0112 };
0113
0114 struct scmi_msg_resp_perf_describe_levels {
0115 __le16 num_returned;
0116 __le16 num_remaining;
0117 struct {
0118 __le32 perf_val;
0119 __le32 power;
0120 __le16 transition_latency_us;
0121 __le16 reserved;
0122 } opp[];
0123 };
0124
0125 struct perf_dom_info {
0126 bool set_limits;
0127 bool set_perf;
0128 bool perf_limit_notify;
0129 bool perf_level_notify;
0130 bool perf_fastchannels;
0131 u32 opp_count;
0132 u32 sustained_freq_khz;
0133 u32 sustained_perf_level;
0134 u32 mult_factor;
0135 char name[SCMI_MAX_STR_SIZE];
0136 struct scmi_opp opp[MAX_OPPS];
0137 struct scmi_fc_info *fc_info;
0138 };
0139
0140 struct scmi_perf_info {
0141 u32 version;
0142 int num_domains;
0143 enum scmi_power_scale power_scale;
0144 u64 stats_addr;
0145 u32 stats_size;
0146 struct perf_dom_info *dom_info;
0147 };
0148
0149 static enum scmi_performance_protocol_cmd evt_2_cmd[] = {
0150 PERF_NOTIFY_LIMITS,
0151 PERF_NOTIFY_LEVEL,
0152 };
0153
0154 static int scmi_perf_attributes_get(const struct scmi_protocol_handle *ph,
0155 struct scmi_perf_info *pi)
0156 {
0157 int ret;
0158 struct scmi_xfer *t;
0159 struct scmi_msg_resp_perf_attributes *attr;
0160
0161 ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES, 0,
0162 sizeof(*attr), &t);
0163 if (ret)
0164 return ret;
0165
0166 attr = t->rx.buf;
0167
0168 ret = ph->xops->do_xfer(ph, t);
0169 if (!ret) {
0170 u16 flags = le16_to_cpu(attr->flags);
0171
0172 pi->num_domains = le16_to_cpu(attr->num_domains);
0173
0174 if (POWER_SCALE_IN_MILLIWATT(flags))
0175 pi->power_scale = SCMI_POWER_MILLIWATTS;
0176 if (PROTOCOL_REV_MAJOR(pi->version) >= 0x3)
0177 if (POWER_SCALE_IN_MICROWATT(flags))
0178 pi->power_scale = SCMI_POWER_MICROWATTS;
0179
0180 pi->stats_addr = le32_to_cpu(attr->stats_addr_low) |
0181 (u64)le32_to_cpu(attr->stats_addr_high) << 32;
0182 pi->stats_size = le32_to_cpu(attr->stats_size);
0183 }
0184
0185 ph->xops->xfer_put(ph, t);
0186 return ret;
0187 }
0188
0189 static int
0190 scmi_perf_domain_attributes_get(const struct scmi_protocol_handle *ph,
0191 u32 domain, struct perf_dom_info *dom_info,
0192 u32 version)
0193 {
0194 int ret;
0195 u32 flags;
0196 struct scmi_xfer *t;
0197 struct scmi_msg_resp_perf_domain_attributes *attr;
0198
0199 ret = ph->xops->xfer_get_init(ph, PERF_DOMAIN_ATTRIBUTES,
0200 sizeof(domain), sizeof(*attr), &t);
0201 if (ret)
0202 return ret;
0203
0204 put_unaligned_le32(domain, t->tx.buf);
0205 attr = t->rx.buf;
0206
0207 ret = ph->xops->do_xfer(ph, t);
0208 if (!ret) {
0209 flags = le32_to_cpu(attr->flags);
0210
0211 dom_info->set_limits = SUPPORTS_SET_LIMITS(flags);
0212 dom_info->set_perf = SUPPORTS_SET_PERF_LVL(flags);
0213 dom_info->perf_limit_notify = SUPPORTS_PERF_LIMIT_NOTIFY(flags);
0214 dom_info->perf_level_notify = SUPPORTS_PERF_LEVEL_NOTIFY(flags);
0215 dom_info->perf_fastchannels = SUPPORTS_PERF_FASTCHANNELS(flags);
0216 dom_info->sustained_freq_khz =
0217 le32_to_cpu(attr->sustained_freq_khz);
0218 dom_info->sustained_perf_level =
0219 le32_to_cpu(attr->sustained_perf_level);
0220 if (!dom_info->sustained_freq_khz ||
0221 !dom_info->sustained_perf_level)
0222
0223 dom_info->mult_factor = 1000;
0224 else
0225 dom_info->mult_factor =
0226 (dom_info->sustained_freq_khz * 1000) /
0227 dom_info->sustained_perf_level;
0228 strscpy(dom_info->name, attr->name, SCMI_SHORT_NAME_MAX_SIZE);
0229 }
0230
0231 ph->xops->xfer_put(ph, t);
0232
0233
0234
0235
0236
0237 if (!ret && PROTOCOL_REV_MAJOR(version) >= 0x3 &&
0238 SUPPORTS_EXTENDED_NAMES(flags))
0239 ph->hops->extended_name_get(ph, PERF_DOMAIN_NAME_GET, domain,
0240 dom_info->name, SCMI_MAX_STR_SIZE);
0241
0242 return ret;
0243 }
0244
0245 static int opp_cmp_func(const void *opp1, const void *opp2)
0246 {
0247 const struct scmi_opp *t1 = opp1, *t2 = opp2;
0248
0249 return t1->perf - t2->perf;
0250 }
0251
0252 struct scmi_perf_ipriv {
0253 u32 domain;
0254 struct perf_dom_info *perf_dom;
0255 };
0256
0257 static void iter_perf_levels_prepare_message(void *message,
0258 unsigned int desc_index,
0259 const void *priv)
0260 {
0261 struct scmi_msg_perf_describe_levels *msg = message;
0262 const struct scmi_perf_ipriv *p = priv;
0263
0264 msg->domain = cpu_to_le32(p->domain);
0265
0266 msg->level_index = cpu_to_le32(desc_index);
0267 }
0268
0269 static int iter_perf_levels_update_state(struct scmi_iterator_state *st,
0270 const void *response, void *priv)
0271 {
0272 const struct scmi_msg_resp_perf_describe_levels *r = response;
0273
0274 st->num_returned = le16_to_cpu(r->num_returned);
0275 st->num_remaining = le16_to_cpu(r->num_remaining);
0276
0277 return 0;
0278 }
0279
0280 static int
0281 iter_perf_levels_process_response(const struct scmi_protocol_handle *ph,
0282 const void *response,
0283 struct scmi_iterator_state *st, void *priv)
0284 {
0285 struct scmi_opp *opp;
0286 const struct scmi_msg_resp_perf_describe_levels *r = response;
0287 struct scmi_perf_ipriv *p = priv;
0288
0289 opp = &p->perf_dom->opp[st->desc_index + st->loop_idx];
0290 opp->perf = le32_to_cpu(r->opp[st->loop_idx].perf_val);
0291 opp->power = le32_to_cpu(r->opp[st->loop_idx].power);
0292 opp->trans_latency_us =
0293 le16_to_cpu(r->opp[st->loop_idx].transition_latency_us);
0294 p->perf_dom->opp_count++;
0295
0296 dev_dbg(ph->dev, "Level %d Power %d Latency %dus\n",
0297 opp->perf, opp->power, opp->trans_latency_us);
0298
0299 return 0;
0300 }
0301
0302 static int
0303 scmi_perf_describe_levels_get(const struct scmi_protocol_handle *ph, u32 domain,
0304 struct perf_dom_info *perf_dom)
0305 {
0306 int ret;
0307 void *iter;
0308 struct scmi_iterator_ops ops = {
0309 .prepare_message = iter_perf_levels_prepare_message,
0310 .update_state = iter_perf_levels_update_state,
0311 .process_response = iter_perf_levels_process_response,
0312 };
0313 struct scmi_perf_ipriv ppriv = {
0314 .domain = domain,
0315 .perf_dom = perf_dom,
0316 };
0317
0318 iter = ph->hops->iter_response_init(ph, &ops, MAX_OPPS,
0319 PERF_DESCRIBE_LEVELS,
0320 sizeof(struct scmi_msg_perf_describe_levels),
0321 &ppriv);
0322 if (IS_ERR(iter))
0323 return PTR_ERR(iter);
0324
0325 ret = ph->hops->iter_response_run(iter);
0326 if (ret)
0327 return ret;
0328
0329 if (perf_dom->opp_count)
0330 sort(perf_dom->opp, perf_dom->opp_count,
0331 sizeof(struct scmi_opp), opp_cmp_func, NULL);
0332
0333 return ret;
0334 }
0335
0336 static int scmi_perf_mb_limits_set(const struct scmi_protocol_handle *ph,
0337 u32 domain, u32 max_perf, u32 min_perf)
0338 {
0339 int ret;
0340 struct scmi_xfer *t;
0341 struct scmi_perf_set_limits *limits;
0342
0343 ret = ph->xops->xfer_get_init(ph, PERF_LIMITS_SET,
0344 sizeof(*limits), 0, &t);
0345 if (ret)
0346 return ret;
0347
0348 limits = t->tx.buf;
0349 limits->domain = cpu_to_le32(domain);
0350 limits->max_level = cpu_to_le32(max_perf);
0351 limits->min_level = cpu_to_le32(min_perf);
0352
0353 ret = ph->xops->do_xfer(ph, t);
0354
0355 ph->xops->xfer_put(ph, t);
0356 return ret;
0357 }
0358
0359 static int scmi_perf_limits_set(const struct scmi_protocol_handle *ph,
0360 u32 domain, u32 max_perf, u32 min_perf)
0361 {
0362 struct scmi_perf_info *pi = ph->get_priv(ph);
0363 struct perf_dom_info *dom = pi->dom_info + domain;
0364
0365 if (PROTOCOL_REV_MAJOR(pi->version) >= 0x3 && !max_perf && !min_perf)
0366 return -EINVAL;
0367
0368 if (dom->fc_info && dom->fc_info[PERF_FC_LIMIT].set_addr) {
0369 struct scmi_fc_info *fci = &dom->fc_info[PERF_FC_LIMIT];
0370
0371 trace_scmi_fc_call(SCMI_PROTOCOL_PERF, PERF_LIMITS_SET,
0372 domain, min_perf, max_perf);
0373 iowrite32(max_perf, fci->set_addr);
0374 iowrite32(min_perf, fci->set_addr + 4);
0375 ph->hops->fastchannel_db_ring(fci->set_db);
0376 return 0;
0377 }
0378
0379 return scmi_perf_mb_limits_set(ph, domain, max_perf, min_perf);
0380 }
0381
0382 static int scmi_perf_mb_limits_get(const struct scmi_protocol_handle *ph,
0383 u32 domain, u32 *max_perf, u32 *min_perf)
0384 {
0385 int ret;
0386 struct scmi_xfer *t;
0387 struct scmi_perf_get_limits *limits;
0388
0389 ret = ph->xops->xfer_get_init(ph, PERF_LIMITS_GET,
0390 sizeof(__le32), 0, &t);
0391 if (ret)
0392 return ret;
0393
0394 put_unaligned_le32(domain, t->tx.buf);
0395
0396 ret = ph->xops->do_xfer(ph, t);
0397 if (!ret) {
0398 limits = t->rx.buf;
0399
0400 *max_perf = le32_to_cpu(limits->max_level);
0401 *min_perf = le32_to_cpu(limits->min_level);
0402 }
0403
0404 ph->xops->xfer_put(ph, t);
0405 return ret;
0406 }
0407
0408 static int scmi_perf_limits_get(const struct scmi_protocol_handle *ph,
0409 u32 domain, u32 *max_perf, u32 *min_perf)
0410 {
0411 struct scmi_perf_info *pi = ph->get_priv(ph);
0412 struct perf_dom_info *dom = pi->dom_info + domain;
0413
0414 if (dom->fc_info && dom->fc_info[PERF_FC_LIMIT].get_addr) {
0415 struct scmi_fc_info *fci = &dom->fc_info[PERF_FC_LIMIT];
0416
0417 *max_perf = ioread32(fci->get_addr);
0418 *min_perf = ioread32(fci->get_addr + 4);
0419 trace_scmi_fc_call(SCMI_PROTOCOL_PERF, PERF_LIMITS_GET,
0420 domain, *min_perf, *max_perf);
0421 return 0;
0422 }
0423
0424 return scmi_perf_mb_limits_get(ph, domain, max_perf, min_perf);
0425 }
0426
0427 static int scmi_perf_mb_level_set(const struct scmi_protocol_handle *ph,
0428 u32 domain, u32 level, bool poll)
0429 {
0430 int ret;
0431 struct scmi_xfer *t;
0432 struct scmi_perf_set_level *lvl;
0433
0434 ret = ph->xops->xfer_get_init(ph, PERF_LEVEL_SET, sizeof(*lvl), 0, &t);
0435 if (ret)
0436 return ret;
0437
0438 t->hdr.poll_completion = poll;
0439 lvl = t->tx.buf;
0440 lvl->domain = cpu_to_le32(domain);
0441 lvl->level = cpu_to_le32(level);
0442
0443 ret = ph->xops->do_xfer(ph, t);
0444
0445 ph->xops->xfer_put(ph, t);
0446 return ret;
0447 }
0448
0449 static int scmi_perf_level_set(const struct scmi_protocol_handle *ph,
0450 u32 domain, u32 level, bool poll)
0451 {
0452 struct scmi_perf_info *pi = ph->get_priv(ph);
0453 struct perf_dom_info *dom = pi->dom_info + domain;
0454
0455 if (dom->fc_info && dom->fc_info[PERF_FC_LEVEL].set_addr) {
0456 struct scmi_fc_info *fci = &dom->fc_info[PERF_FC_LEVEL];
0457
0458 trace_scmi_fc_call(SCMI_PROTOCOL_PERF, PERF_LEVEL_SET,
0459 domain, level, 0);
0460 iowrite32(level, fci->set_addr);
0461 ph->hops->fastchannel_db_ring(fci->set_db);
0462 return 0;
0463 }
0464
0465 return scmi_perf_mb_level_set(ph, domain, level, poll);
0466 }
0467
0468 static int scmi_perf_mb_level_get(const struct scmi_protocol_handle *ph,
0469 u32 domain, u32 *level, bool poll)
0470 {
0471 int ret;
0472 struct scmi_xfer *t;
0473
0474 ret = ph->xops->xfer_get_init(ph, PERF_LEVEL_GET,
0475 sizeof(u32), sizeof(u32), &t);
0476 if (ret)
0477 return ret;
0478
0479 t->hdr.poll_completion = poll;
0480 put_unaligned_le32(domain, t->tx.buf);
0481
0482 ret = ph->xops->do_xfer(ph, t);
0483 if (!ret)
0484 *level = get_unaligned_le32(t->rx.buf);
0485
0486 ph->xops->xfer_put(ph, t);
0487 return ret;
0488 }
0489
0490 static int scmi_perf_level_get(const struct scmi_protocol_handle *ph,
0491 u32 domain, u32 *level, bool poll)
0492 {
0493 struct scmi_perf_info *pi = ph->get_priv(ph);
0494 struct perf_dom_info *dom = pi->dom_info + domain;
0495
0496 if (dom->fc_info && dom->fc_info[PERF_FC_LEVEL].get_addr) {
0497 *level = ioread32(dom->fc_info[PERF_FC_LEVEL].get_addr);
0498 trace_scmi_fc_call(SCMI_PROTOCOL_PERF, PERF_LEVEL_GET,
0499 domain, *level, 0);
0500 return 0;
0501 }
0502
0503 return scmi_perf_mb_level_get(ph, domain, level, poll);
0504 }
0505
0506 static int scmi_perf_level_limits_notify(const struct scmi_protocol_handle *ph,
0507 u32 domain, int message_id,
0508 bool enable)
0509 {
0510 int ret;
0511 struct scmi_xfer *t;
0512 struct scmi_perf_notify_level_or_limits *notify;
0513
0514 ret = ph->xops->xfer_get_init(ph, message_id, sizeof(*notify), 0, &t);
0515 if (ret)
0516 return ret;
0517
0518 notify = t->tx.buf;
0519 notify->domain = cpu_to_le32(domain);
0520 notify->notify_enable = enable ? cpu_to_le32(BIT(0)) : 0;
0521
0522 ret = ph->xops->do_xfer(ph, t);
0523
0524 ph->xops->xfer_put(ph, t);
0525 return ret;
0526 }
0527
0528 static void scmi_perf_domain_init_fc(const struct scmi_protocol_handle *ph,
0529 u32 domain, struct scmi_fc_info **p_fc)
0530 {
0531 struct scmi_fc_info *fc;
0532
0533 fc = devm_kcalloc(ph->dev, PERF_FC_MAX, sizeof(*fc), GFP_KERNEL);
0534 if (!fc)
0535 return;
0536
0537 ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL,
0538 PERF_LEVEL_SET, 4, domain,
0539 &fc[PERF_FC_LEVEL].set_addr,
0540 &fc[PERF_FC_LEVEL].set_db);
0541
0542 ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL,
0543 PERF_LEVEL_GET, 4, domain,
0544 &fc[PERF_FC_LEVEL].get_addr, NULL);
0545
0546 ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL,
0547 PERF_LIMITS_SET, 8, domain,
0548 &fc[PERF_FC_LIMIT].set_addr,
0549 &fc[PERF_FC_LIMIT].set_db);
0550
0551 ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL,
0552 PERF_LIMITS_GET, 8, domain,
0553 &fc[PERF_FC_LIMIT].get_addr, NULL);
0554
0555 *p_fc = fc;
0556 }
0557
0558
0559 static int scmi_dev_domain_id(struct device *dev)
0560 {
0561 struct of_phandle_args clkspec;
0562
0563 if (of_parse_phandle_with_args(dev->of_node, "clocks", "#clock-cells",
0564 0, &clkspec))
0565 return -EINVAL;
0566
0567 return clkspec.args[0];
0568 }
0569
0570 static int scmi_dvfs_device_opps_add(const struct scmi_protocol_handle *ph,
0571 struct device *dev)
0572 {
0573 int idx, ret, domain;
0574 unsigned long freq;
0575 struct scmi_opp *opp;
0576 struct perf_dom_info *dom;
0577 struct scmi_perf_info *pi = ph->get_priv(ph);
0578
0579 domain = scmi_dev_domain_id(dev);
0580 if (domain < 0)
0581 return domain;
0582
0583 dom = pi->dom_info + domain;
0584
0585 for (opp = dom->opp, idx = 0; idx < dom->opp_count; idx++, opp++) {
0586 freq = opp->perf * dom->mult_factor;
0587
0588 ret = dev_pm_opp_add(dev, freq, 0);
0589 if (ret) {
0590 dev_warn(dev, "failed to add opp %luHz\n", freq);
0591
0592 while (idx-- > 0) {
0593 freq = (--opp)->perf * dom->mult_factor;
0594 dev_pm_opp_remove(dev, freq);
0595 }
0596 return ret;
0597 }
0598 }
0599 return 0;
0600 }
0601
0602 static int
0603 scmi_dvfs_transition_latency_get(const struct scmi_protocol_handle *ph,
0604 struct device *dev)
0605 {
0606 struct perf_dom_info *dom;
0607 struct scmi_perf_info *pi = ph->get_priv(ph);
0608 int domain = scmi_dev_domain_id(dev);
0609
0610 if (domain < 0)
0611 return domain;
0612
0613 dom = pi->dom_info + domain;
0614
0615 return dom->opp[dom->opp_count - 1].trans_latency_us * 1000;
0616 }
0617
0618 static int scmi_dvfs_freq_set(const struct scmi_protocol_handle *ph, u32 domain,
0619 unsigned long freq, bool poll)
0620 {
0621 struct scmi_perf_info *pi = ph->get_priv(ph);
0622 struct perf_dom_info *dom = pi->dom_info + domain;
0623
0624 return scmi_perf_level_set(ph, domain, freq / dom->mult_factor, poll);
0625 }
0626
0627 static int scmi_dvfs_freq_get(const struct scmi_protocol_handle *ph, u32 domain,
0628 unsigned long *freq, bool poll)
0629 {
0630 int ret;
0631 u32 level;
0632 struct scmi_perf_info *pi = ph->get_priv(ph);
0633 struct perf_dom_info *dom = pi->dom_info + domain;
0634
0635 ret = scmi_perf_level_get(ph, domain, &level, poll);
0636 if (!ret)
0637 *freq = level * dom->mult_factor;
0638
0639 return ret;
0640 }
0641
0642 static int scmi_dvfs_est_power_get(const struct scmi_protocol_handle *ph,
0643 u32 domain, unsigned long *freq,
0644 unsigned long *power)
0645 {
0646 struct scmi_perf_info *pi = ph->get_priv(ph);
0647 struct perf_dom_info *dom;
0648 unsigned long opp_freq;
0649 int idx, ret = -EINVAL;
0650 struct scmi_opp *opp;
0651
0652 dom = pi->dom_info + domain;
0653 if (!dom)
0654 return -EIO;
0655
0656 for (opp = dom->opp, idx = 0; idx < dom->opp_count; idx++, opp++) {
0657 opp_freq = opp->perf * dom->mult_factor;
0658 if (opp_freq < *freq)
0659 continue;
0660
0661 *freq = opp_freq;
0662 *power = opp->power;
0663 ret = 0;
0664 break;
0665 }
0666
0667 return ret;
0668 }
0669
0670 static bool scmi_fast_switch_possible(const struct scmi_protocol_handle *ph,
0671 struct device *dev)
0672 {
0673 struct perf_dom_info *dom;
0674 struct scmi_perf_info *pi = ph->get_priv(ph);
0675
0676 dom = pi->dom_info + scmi_dev_domain_id(dev);
0677
0678 return dom->fc_info && dom->fc_info[PERF_FC_LEVEL].set_addr;
0679 }
0680
0681 static enum scmi_power_scale
0682 scmi_power_scale_get(const struct scmi_protocol_handle *ph)
0683 {
0684 struct scmi_perf_info *pi = ph->get_priv(ph);
0685
0686 return pi->power_scale;
0687 }
0688
0689 static const struct scmi_perf_proto_ops perf_proto_ops = {
0690 .limits_set = scmi_perf_limits_set,
0691 .limits_get = scmi_perf_limits_get,
0692 .level_set = scmi_perf_level_set,
0693 .level_get = scmi_perf_level_get,
0694 .device_domain_id = scmi_dev_domain_id,
0695 .transition_latency_get = scmi_dvfs_transition_latency_get,
0696 .device_opps_add = scmi_dvfs_device_opps_add,
0697 .freq_set = scmi_dvfs_freq_set,
0698 .freq_get = scmi_dvfs_freq_get,
0699 .est_power_get = scmi_dvfs_est_power_get,
0700 .fast_switch_possible = scmi_fast_switch_possible,
0701 .power_scale_get = scmi_power_scale_get,
0702 };
0703
0704 static int scmi_perf_set_notify_enabled(const struct scmi_protocol_handle *ph,
0705 u8 evt_id, u32 src_id, bool enable)
0706 {
0707 int ret, cmd_id;
0708
0709 if (evt_id >= ARRAY_SIZE(evt_2_cmd))
0710 return -EINVAL;
0711
0712 cmd_id = evt_2_cmd[evt_id];
0713 ret = scmi_perf_level_limits_notify(ph, src_id, cmd_id, enable);
0714 if (ret)
0715 pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n",
0716 evt_id, src_id, ret);
0717
0718 return ret;
0719 }
0720
0721 static void *scmi_perf_fill_custom_report(const struct scmi_protocol_handle *ph,
0722 u8 evt_id, ktime_t timestamp,
0723 const void *payld, size_t payld_sz,
0724 void *report, u32 *src_id)
0725 {
0726 void *rep = NULL;
0727
0728 switch (evt_id) {
0729 case SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED:
0730 {
0731 const struct scmi_perf_limits_notify_payld *p = payld;
0732 struct scmi_perf_limits_report *r = report;
0733
0734 if (sizeof(*p) != payld_sz)
0735 break;
0736
0737 r->timestamp = timestamp;
0738 r->agent_id = le32_to_cpu(p->agent_id);
0739 r->domain_id = le32_to_cpu(p->domain_id);
0740 r->range_max = le32_to_cpu(p->range_max);
0741 r->range_min = le32_to_cpu(p->range_min);
0742 *src_id = r->domain_id;
0743 rep = r;
0744 break;
0745 }
0746 case SCMI_EVENT_PERFORMANCE_LEVEL_CHANGED:
0747 {
0748 const struct scmi_perf_level_notify_payld *p = payld;
0749 struct scmi_perf_level_report *r = report;
0750
0751 if (sizeof(*p) != payld_sz)
0752 break;
0753
0754 r->timestamp = timestamp;
0755 r->agent_id = le32_to_cpu(p->agent_id);
0756 r->domain_id = le32_to_cpu(p->domain_id);
0757 r->performance_level = le32_to_cpu(p->performance_level);
0758 *src_id = r->domain_id;
0759 rep = r;
0760 break;
0761 }
0762 default:
0763 break;
0764 }
0765
0766 return rep;
0767 }
0768
0769 static int scmi_perf_get_num_sources(const struct scmi_protocol_handle *ph)
0770 {
0771 struct scmi_perf_info *pi = ph->get_priv(ph);
0772
0773 if (!pi)
0774 return -EINVAL;
0775
0776 return pi->num_domains;
0777 }
0778
0779 static const struct scmi_event perf_events[] = {
0780 {
0781 .id = SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED,
0782 .max_payld_sz = sizeof(struct scmi_perf_limits_notify_payld),
0783 .max_report_sz = sizeof(struct scmi_perf_limits_report),
0784 },
0785 {
0786 .id = SCMI_EVENT_PERFORMANCE_LEVEL_CHANGED,
0787 .max_payld_sz = sizeof(struct scmi_perf_level_notify_payld),
0788 .max_report_sz = sizeof(struct scmi_perf_level_report),
0789 },
0790 };
0791
0792 static const struct scmi_event_ops perf_event_ops = {
0793 .get_num_sources = scmi_perf_get_num_sources,
0794 .set_notify_enabled = scmi_perf_set_notify_enabled,
0795 .fill_custom_report = scmi_perf_fill_custom_report,
0796 };
0797
0798 static const struct scmi_protocol_events perf_protocol_events = {
0799 .queue_sz = SCMI_PROTO_QUEUE_SZ,
0800 .ops = &perf_event_ops,
0801 .evts = perf_events,
0802 .num_events = ARRAY_SIZE(perf_events),
0803 };
0804
0805 static int scmi_perf_protocol_init(const struct scmi_protocol_handle *ph)
0806 {
0807 int domain, ret;
0808 u32 version;
0809 struct scmi_perf_info *pinfo;
0810
0811 ret = ph->xops->version_get(ph, &version);
0812 if (ret)
0813 return ret;
0814
0815 dev_dbg(ph->dev, "Performance Version %d.%d\n",
0816 PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
0817
0818 pinfo = devm_kzalloc(ph->dev, sizeof(*pinfo), GFP_KERNEL);
0819 if (!pinfo)
0820 return -ENOMEM;
0821
0822 ret = scmi_perf_attributes_get(ph, pinfo);
0823 if (ret)
0824 return ret;
0825
0826 pinfo->dom_info = devm_kcalloc(ph->dev, pinfo->num_domains,
0827 sizeof(*pinfo->dom_info), GFP_KERNEL);
0828 if (!pinfo->dom_info)
0829 return -ENOMEM;
0830
0831 for (domain = 0; domain < pinfo->num_domains; domain++) {
0832 struct perf_dom_info *dom = pinfo->dom_info + domain;
0833
0834 scmi_perf_domain_attributes_get(ph, domain, dom, version);
0835 scmi_perf_describe_levels_get(ph, domain, dom);
0836
0837 if (dom->perf_fastchannels)
0838 scmi_perf_domain_init_fc(ph, domain, &dom->fc_info);
0839 }
0840
0841 pinfo->version = version;
0842
0843 return ph->set_priv(ph, pinfo);
0844 }
0845
0846 static const struct scmi_protocol scmi_perf = {
0847 .id = SCMI_PROTOCOL_PERF,
0848 .owner = THIS_MODULE,
0849 .instance_init = &scmi_perf_protocol_init,
0850 .ops = &perf_proto_ops,
0851 .events = &perf_protocol_events,
0852 };
0853
0854 DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(perf, scmi_perf)