Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * System Control and Management Interface (SCMI) Clock Protocol
0004  *
0005  * Copyright (C) 2018-2022 ARM Ltd.
0006  */
0007 
0008 #include <linux/module.h>
0009 #include <linux/limits.h>
0010 #include <linux/sort.h>
0011 
0012 #include "protocols.h"
0013 #include "notify.h"
0014 
0015 enum scmi_clock_protocol_cmd {
0016     CLOCK_ATTRIBUTES = 0x3,
0017     CLOCK_DESCRIBE_RATES = 0x4,
0018     CLOCK_RATE_SET = 0x5,
0019     CLOCK_RATE_GET = 0x6,
0020     CLOCK_CONFIG_SET = 0x7,
0021     CLOCK_NAME_GET = 0x8,
0022     CLOCK_RATE_NOTIFY = 0x9,
0023     CLOCK_RATE_CHANGE_REQUESTED_NOTIFY = 0xA,
0024 };
0025 
0026 struct scmi_msg_resp_clock_protocol_attributes {
0027     __le16 num_clocks;
0028     u8 max_async_req;
0029     u8 reserved;
0030 };
0031 
0032 struct scmi_msg_resp_clock_attributes {
0033     __le32 attributes;
0034 #define CLOCK_ENABLE    BIT(0)
0035 #define SUPPORTS_RATE_CHANGED_NOTIF(x)      ((x) & BIT(31))
0036 #define SUPPORTS_RATE_CHANGE_REQUESTED_NOTIF(x) ((x) & BIT(30))
0037 #define SUPPORTS_EXTENDED_NAMES(x)      ((x) & BIT(29))
0038     u8 name[SCMI_SHORT_NAME_MAX_SIZE];
0039     __le32 clock_enable_latency;
0040 };
0041 
0042 struct scmi_clock_set_config {
0043     __le32 id;
0044     __le32 attributes;
0045 };
0046 
0047 struct scmi_msg_clock_describe_rates {
0048     __le32 id;
0049     __le32 rate_index;
0050 };
0051 
0052 struct scmi_msg_resp_clock_describe_rates {
0053     __le32 num_rates_flags;
0054 #define NUM_RETURNED(x)     ((x) & 0xfff)
0055 #define RATE_DISCRETE(x)    !((x) & BIT(12))
0056 #define NUM_REMAINING(x)    ((x) >> 16)
0057     struct {
0058         __le32 value_low;
0059         __le32 value_high;
0060     } rate[];
0061 #define RATE_TO_U64(X)      \
0062 ({              \
0063     typeof(X) x = (X);  \
0064     le32_to_cpu((x).value_low) | (u64)le32_to_cpu((x).value_high) << 32; \
0065 })
0066 };
0067 
0068 struct scmi_clock_set_rate {
0069     __le32 flags;
0070 #define CLOCK_SET_ASYNC     BIT(0)
0071 #define CLOCK_SET_IGNORE_RESP   BIT(1)
0072 #define CLOCK_SET_ROUND_UP  BIT(2)
0073 #define CLOCK_SET_ROUND_AUTO    BIT(3)
0074     __le32 id;
0075     __le32 value_low;
0076     __le32 value_high;
0077 };
0078 
0079 struct scmi_msg_resp_set_rate_complete {
0080     __le32 id;
0081     __le32 rate_low;
0082     __le32 rate_high;
0083 };
0084 
0085 struct scmi_msg_clock_rate_notify {
0086     __le32 clk_id;
0087     __le32 notify_enable;
0088 };
0089 
0090 struct scmi_clock_rate_notify_payld {
0091     __le32 agent_id;
0092     __le32 clock_id;
0093     __le32 rate_low;
0094     __le32 rate_high;
0095 };
0096 
0097 struct clock_info {
0098     u32 version;
0099     int num_clocks;
0100     int max_async_req;
0101     atomic_t cur_async_req;
0102     struct scmi_clock_info *clk;
0103 };
0104 
0105 static enum scmi_clock_protocol_cmd evt_2_cmd[] = {
0106     CLOCK_RATE_NOTIFY,
0107     CLOCK_RATE_CHANGE_REQUESTED_NOTIFY,
0108 };
0109 
0110 static int
0111 scmi_clock_protocol_attributes_get(const struct scmi_protocol_handle *ph,
0112                    struct clock_info *ci)
0113 {
0114     int ret;
0115     struct scmi_xfer *t;
0116     struct scmi_msg_resp_clock_protocol_attributes *attr;
0117 
0118     ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES,
0119                       0, sizeof(*attr), &t);
0120     if (ret)
0121         return ret;
0122 
0123     attr = t->rx.buf;
0124 
0125     ret = ph->xops->do_xfer(ph, t);
0126     if (!ret) {
0127         ci->num_clocks = le16_to_cpu(attr->num_clocks);
0128         ci->max_async_req = attr->max_async_req;
0129     }
0130 
0131     ph->xops->xfer_put(ph, t);
0132     return ret;
0133 }
0134 
0135 static int scmi_clock_attributes_get(const struct scmi_protocol_handle *ph,
0136                      u32 clk_id, struct scmi_clock_info *clk,
0137                      u32 version)
0138 {
0139     int ret;
0140     u32 attributes;
0141     struct scmi_xfer *t;
0142     struct scmi_msg_resp_clock_attributes *attr;
0143 
0144     ret = ph->xops->xfer_get_init(ph, CLOCK_ATTRIBUTES,
0145                       sizeof(clk_id), sizeof(*attr), &t);
0146     if (ret)
0147         return ret;
0148 
0149     put_unaligned_le32(clk_id, t->tx.buf);
0150     attr = t->rx.buf;
0151 
0152     ret = ph->xops->do_xfer(ph, t);
0153     if (!ret) {
0154         u32 latency = 0;
0155         attributes = le32_to_cpu(attr->attributes);
0156         strscpy(clk->name, attr->name, SCMI_SHORT_NAME_MAX_SIZE);
0157         /* clock_enable_latency field is present only since SCMI v3.1 */
0158         if (PROTOCOL_REV_MAJOR(version) >= 0x2)
0159             latency = le32_to_cpu(attr->clock_enable_latency);
0160         clk->enable_latency = latency ? : U32_MAX;
0161     }
0162 
0163     ph->xops->xfer_put(ph, t);
0164 
0165     /*
0166      * If supported overwrite short name with the extended one;
0167      * on error just carry on and use already provided short name.
0168      */
0169     if (!ret && PROTOCOL_REV_MAJOR(version) >= 0x2) {
0170         if (SUPPORTS_EXTENDED_NAMES(attributes))
0171             ph->hops->extended_name_get(ph, CLOCK_NAME_GET, clk_id,
0172                             clk->name,
0173                             SCMI_MAX_STR_SIZE);
0174 
0175         if (SUPPORTS_RATE_CHANGED_NOTIF(attributes))
0176             clk->rate_changed_notifications = true;
0177         if (SUPPORTS_RATE_CHANGE_REQUESTED_NOTIF(attributes))
0178             clk->rate_change_requested_notifications = true;
0179     }
0180 
0181     return ret;
0182 }
0183 
0184 static int rate_cmp_func(const void *_r1, const void *_r2)
0185 {
0186     const u64 *r1 = _r1, *r2 = _r2;
0187 
0188     if (*r1 < *r2)
0189         return -1;
0190     else if (*r1 == *r2)
0191         return 0;
0192     else
0193         return 1;
0194 }
0195 
0196 struct scmi_clk_ipriv {
0197     struct device *dev;
0198     u32 clk_id;
0199     struct scmi_clock_info *clk;
0200 };
0201 
0202 static void iter_clk_describe_prepare_message(void *message,
0203                           const unsigned int desc_index,
0204                           const void *priv)
0205 {
0206     struct scmi_msg_clock_describe_rates *msg = message;
0207     const struct scmi_clk_ipriv *p = priv;
0208 
0209     msg->id = cpu_to_le32(p->clk_id);
0210     /* Set the number of rates to be skipped/already read */
0211     msg->rate_index = cpu_to_le32(desc_index);
0212 }
0213 
0214 static int
0215 iter_clk_describe_update_state(struct scmi_iterator_state *st,
0216                    const void *response, void *priv)
0217 {
0218     u32 flags;
0219     struct scmi_clk_ipriv *p = priv;
0220     const struct scmi_msg_resp_clock_describe_rates *r = response;
0221 
0222     flags = le32_to_cpu(r->num_rates_flags);
0223     st->num_remaining = NUM_REMAINING(flags);
0224     st->num_returned = NUM_RETURNED(flags);
0225     p->clk->rate_discrete = RATE_DISCRETE(flags);
0226 
0227     /* Warn about out of spec replies ... */
0228     if (!p->clk->rate_discrete &&
0229         (st->num_returned != 3 || st->num_remaining != 0)) {
0230         dev_warn(p->dev,
0231              "Out-of-spec CLOCK_DESCRIBE_RATES reply for %s - returned:%d remaining:%d rx_len:%zd\n",
0232              p->clk->name, st->num_returned, st->num_remaining,
0233              st->rx_len);
0234 
0235         /*
0236          * A known quirk: a triplet is returned but num_returned != 3
0237          * Check for a safe payload size and fix.
0238          */
0239         if (st->num_returned != 3 && st->num_remaining == 0 &&
0240             st->rx_len == sizeof(*r) + sizeof(__le32) * 2 * 3) {
0241             st->num_returned = 3;
0242             st->num_remaining = 0;
0243         } else {
0244             dev_err(p->dev,
0245                 "Cannot fix out-of-spec reply !\n");
0246             return -EPROTO;
0247         }
0248     }
0249 
0250     return 0;
0251 }
0252 
0253 static int
0254 iter_clk_describe_process_response(const struct scmi_protocol_handle *ph,
0255                    const void *response,
0256                    struct scmi_iterator_state *st, void *priv)
0257 {
0258     int ret = 0;
0259     struct scmi_clk_ipriv *p = priv;
0260     const struct scmi_msg_resp_clock_describe_rates *r = response;
0261 
0262     if (!p->clk->rate_discrete) {
0263         switch (st->desc_index + st->loop_idx) {
0264         case 0:
0265             p->clk->range.min_rate = RATE_TO_U64(r->rate[0]);
0266             break;
0267         case 1:
0268             p->clk->range.max_rate = RATE_TO_U64(r->rate[1]);
0269             break;
0270         case 2:
0271             p->clk->range.step_size = RATE_TO_U64(r->rate[2]);
0272             break;
0273         default:
0274             ret = -EINVAL;
0275             break;
0276         }
0277     } else {
0278         u64 *rate = &p->clk->list.rates[st->desc_index + st->loop_idx];
0279 
0280         *rate = RATE_TO_U64(r->rate[st->loop_idx]);
0281         p->clk->list.num_rates++;
0282     }
0283 
0284     return ret;
0285 }
0286 
0287 static int
0288 scmi_clock_describe_rates_get(const struct scmi_protocol_handle *ph, u32 clk_id,
0289                   struct scmi_clock_info *clk)
0290 {
0291     int ret;
0292     void *iter;
0293     struct scmi_iterator_ops ops = {
0294         .prepare_message = iter_clk_describe_prepare_message,
0295         .update_state = iter_clk_describe_update_state,
0296         .process_response = iter_clk_describe_process_response,
0297     };
0298     struct scmi_clk_ipriv cpriv = {
0299         .clk_id = clk_id,
0300         .clk = clk,
0301         .dev = ph->dev,
0302     };
0303 
0304     iter = ph->hops->iter_response_init(ph, &ops, SCMI_MAX_NUM_RATES,
0305                         CLOCK_DESCRIBE_RATES,
0306                         sizeof(struct scmi_msg_clock_describe_rates),
0307                         &cpriv);
0308     if (IS_ERR(iter))
0309         return PTR_ERR(iter);
0310 
0311     ret = ph->hops->iter_response_run(iter);
0312     if (ret)
0313         return ret;
0314 
0315     if (!clk->rate_discrete) {
0316         dev_dbg(ph->dev, "Min %llu Max %llu Step %llu Hz\n",
0317             clk->range.min_rate, clk->range.max_rate,
0318             clk->range.step_size);
0319     } else if (clk->list.num_rates) {
0320         sort(clk->list.rates, clk->list.num_rates,
0321              sizeof(clk->list.rates[0]), rate_cmp_func, NULL);
0322     }
0323 
0324     return ret;
0325 }
0326 
0327 static int
0328 scmi_clock_rate_get(const struct scmi_protocol_handle *ph,
0329             u32 clk_id, u64 *value)
0330 {
0331     int ret;
0332     struct scmi_xfer *t;
0333 
0334     ret = ph->xops->xfer_get_init(ph, CLOCK_RATE_GET,
0335                       sizeof(__le32), sizeof(u64), &t);
0336     if (ret)
0337         return ret;
0338 
0339     put_unaligned_le32(clk_id, t->tx.buf);
0340 
0341     ret = ph->xops->do_xfer(ph, t);
0342     if (!ret)
0343         *value = get_unaligned_le64(t->rx.buf);
0344 
0345     ph->xops->xfer_put(ph, t);
0346     return ret;
0347 }
0348 
0349 static int scmi_clock_rate_set(const struct scmi_protocol_handle *ph,
0350                    u32 clk_id, u64 rate)
0351 {
0352     int ret;
0353     u32 flags = 0;
0354     struct scmi_xfer *t;
0355     struct scmi_clock_set_rate *cfg;
0356     struct clock_info *ci = ph->get_priv(ph);
0357 
0358     ret = ph->xops->xfer_get_init(ph, CLOCK_RATE_SET, sizeof(*cfg), 0, &t);
0359     if (ret)
0360         return ret;
0361 
0362     if (ci->max_async_req &&
0363         atomic_inc_return(&ci->cur_async_req) < ci->max_async_req)
0364         flags |= CLOCK_SET_ASYNC;
0365 
0366     cfg = t->tx.buf;
0367     cfg->flags = cpu_to_le32(flags);
0368     cfg->id = cpu_to_le32(clk_id);
0369     cfg->value_low = cpu_to_le32(rate & 0xffffffff);
0370     cfg->value_high = cpu_to_le32(rate >> 32);
0371 
0372     if (flags & CLOCK_SET_ASYNC) {
0373         ret = ph->xops->do_xfer_with_response(ph, t);
0374         if (!ret) {
0375             struct scmi_msg_resp_set_rate_complete *resp;
0376 
0377             resp = t->rx.buf;
0378             if (le32_to_cpu(resp->id) == clk_id)
0379                 dev_dbg(ph->dev,
0380                     "Clk ID %d set async to %llu\n", clk_id,
0381                     get_unaligned_le64(&resp->rate_low));
0382             else
0383                 ret = -EPROTO;
0384         }
0385     } else {
0386         ret = ph->xops->do_xfer(ph, t);
0387     }
0388 
0389     if (ci->max_async_req)
0390         atomic_dec(&ci->cur_async_req);
0391 
0392     ph->xops->xfer_put(ph, t);
0393     return ret;
0394 }
0395 
0396 static int
0397 scmi_clock_config_set(const struct scmi_protocol_handle *ph, u32 clk_id,
0398               u32 config, bool atomic)
0399 {
0400     int ret;
0401     struct scmi_xfer *t;
0402     struct scmi_clock_set_config *cfg;
0403 
0404     ret = ph->xops->xfer_get_init(ph, CLOCK_CONFIG_SET,
0405                       sizeof(*cfg), 0, &t);
0406     if (ret)
0407         return ret;
0408 
0409     t->hdr.poll_completion = atomic;
0410 
0411     cfg = t->tx.buf;
0412     cfg->id = cpu_to_le32(clk_id);
0413     cfg->attributes = cpu_to_le32(config);
0414 
0415     ret = ph->xops->do_xfer(ph, t);
0416 
0417     ph->xops->xfer_put(ph, t);
0418     return ret;
0419 }
0420 
0421 static int scmi_clock_enable(const struct scmi_protocol_handle *ph, u32 clk_id)
0422 {
0423     return scmi_clock_config_set(ph, clk_id, CLOCK_ENABLE, false);
0424 }
0425 
0426 static int scmi_clock_disable(const struct scmi_protocol_handle *ph, u32 clk_id)
0427 {
0428     return scmi_clock_config_set(ph, clk_id, 0, false);
0429 }
0430 
0431 static int scmi_clock_enable_atomic(const struct scmi_protocol_handle *ph,
0432                     u32 clk_id)
0433 {
0434     return scmi_clock_config_set(ph, clk_id, CLOCK_ENABLE, true);
0435 }
0436 
0437 static int scmi_clock_disable_atomic(const struct scmi_protocol_handle *ph,
0438                      u32 clk_id)
0439 {
0440     return scmi_clock_config_set(ph, clk_id, 0, true);
0441 }
0442 
0443 static int scmi_clock_count_get(const struct scmi_protocol_handle *ph)
0444 {
0445     struct clock_info *ci = ph->get_priv(ph);
0446 
0447     return ci->num_clocks;
0448 }
0449 
0450 static const struct scmi_clock_info *
0451 scmi_clock_info_get(const struct scmi_protocol_handle *ph, u32 clk_id)
0452 {
0453     struct scmi_clock_info *clk;
0454     struct clock_info *ci = ph->get_priv(ph);
0455 
0456     if (clk_id >= ci->num_clocks)
0457         return NULL;
0458 
0459     clk = ci->clk + clk_id;
0460     if (!clk->name[0])
0461         return NULL;
0462 
0463     return clk;
0464 }
0465 
0466 static const struct scmi_clk_proto_ops clk_proto_ops = {
0467     .count_get = scmi_clock_count_get,
0468     .info_get = scmi_clock_info_get,
0469     .rate_get = scmi_clock_rate_get,
0470     .rate_set = scmi_clock_rate_set,
0471     .enable = scmi_clock_enable,
0472     .disable = scmi_clock_disable,
0473     .enable_atomic = scmi_clock_enable_atomic,
0474     .disable_atomic = scmi_clock_disable_atomic,
0475 };
0476 
0477 static int scmi_clk_rate_notify(const struct scmi_protocol_handle *ph,
0478                 u32 clk_id, int message_id, bool enable)
0479 {
0480     int ret;
0481     struct scmi_xfer *t;
0482     struct scmi_msg_clock_rate_notify *notify;
0483 
0484     ret = ph->xops->xfer_get_init(ph, message_id, sizeof(*notify), 0, &t);
0485     if (ret)
0486         return ret;
0487 
0488     notify = t->tx.buf;
0489     notify->clk_id = cpu_to_le32(clk_id);
0490     notify->notify_enable = enable ? cpu_to_le32(BIT(0)) : 0;
0491 
0492     ret = ph->xops->do_xfer(ph, t);
0493 
0494     ph->xops->xfer_put(ph, t);
0495     return ret;
0496 }
0497 
0498 static int scmi_clk_set_notify_enabled(const struct scmi_protocol_handle *ph,
0499                        u8 evt_id, u32 src_id, bool enable)
0500 {
0501     int ret, cmd_id;
0502 
0503     if (evt_id >= ARRAY_SIZE(evt_2_cmd))
0504         return -EINVAL;
0505 
0506     cmd_id = evt_2_cmd[evt_id];
0507     ret = scmi_clk_rate_notify(ph, src_id, cmd_id, enable);
0508     if (ret)
0509         pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n",
0510              evt_id, src_id, ret);
0511 
0512     return ret;
0513 }
0514 
0515 static void *scmi_clk_fill_custom_report(const struct scmi_protocol_handle *ph,
0516                      u8 evt_id, ktime_t timestamp,
0517                      const void *payld, size_t payld_sz,
0518                      void *report, u32 *src_id)
0519 {
0520     const struct scmi_clock_rate_notify_payld *p = payld;
0521     struct scmi_clock_rate_notif_report *r = report;
0522 
0523     if (sizeof(*p) != payld_sz ||
0524         (evt_id != SCMI_EVENT_CLOCK_RATE_CHANGED &&
0525          evt_id != SCMI_EVENT_CLOCK_RATE_CHANGE_REQUESTED))
0526         return NULL;
0527 
0528     r->timestamp = timestamp;
0529     r->agent_id = le32_to_cpu(p->agent_id);
0530     r->clock_id = le32_to_cpu(p->clock_id);
0531     r->rate = get_unaligned_le64(&p->rate_low);
0532     *src_id = r->clock_id;
0533 
0534     return r;
0535 }
0536 
0537 static int scmi_clk_get_num_sources(const struct scmi_protocol_handle *ph)
0538 {
0539     struct clock_info *ci = ph->get_priv(ph);
0540 
0541     if (!ci)
0542         return -EINVAL;
0543 
0544     return ci->num_clocks;
0545 }
0546 
0547 static const struct scmi_event clk_events[] = {
0548     {
0549         .id = SCMI_EVENT_CLOCK_RATE_CHANGED,
0550         .max_payld_sz = sizeof(struct scmi_clock_rate_notify_payld),
0551         .max_report_sz = sizeof(struct scmi_clock_rate_notif_report),
0552     },
0553     {
0554         .id = SCMI_EVENT_CLOCK_RATE_CHANGE_REQUESTED,
0555         .max_payld_sz = sizeof(struct scmi_clock_rate_notify_payld),
0556         .max_report_sz = sizeof(struct scmi_clock_rate_notif_report),
0557     },
0558 };
0559 
0560 static const struct scmi_event_ops clk_event_ops = {
0561     .get_num_sources = scmi_clk_get_num_sources,
0562     .set_notify_enabled = scmi_clk_set_notify_enabled,
0563     .fill_custom_report = scmi_clk_fill_custom_report,
0564 };
0565 
0566 static const struct scmi_protocol_events clk_protocol_events = {
0567     .queue_sz = SCMI_PROTO_QUEUE_SZ,
0568     .ops = &clk_event_ops,
0569     .evts = clk_events,
0570     .num_events = ARRAY_SIZE(clk_events),
0571 };
0572 
0573 static int scmi_clock_protocol_init(const struct scmi_protocol_handle *ph)
0574 {
0575     u32 version;
0576     int clkid, ret;
0577     struct clock_info *cinfo;
0578 
0579     ret = ph->xops->version_get(ph, &version);
0580     if (ret)
0581         return ret;
0582 
0583     dev_dbg(ph->dev, "Clock Version %d.%d\n",
0584         PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
0585 
0586     cinfo = devm_kzalloc(ph->dev, sizeof(*cinfo), GFP_KERNEL);
0587     if (!cinfo)
0588         return -ENOMEM;
0589 
0590     ret = scmi_clock_protocol_attributes_get(ph, cinfo);
0591     if (ret)
0592         return ret;
0593 
0594     cinfo->clk = devm_kcalloc(ph->dev, cinfo->num_clocks,
0595                   sizeof(*cinfo->clk), GFP_KERNEL);
0596     if (!cinfo->clk)
0597         return -ENOMEM;
0598 
0599     for (clkid = 0; clkid < cinfo->num_clocks; clkid++) {
0600         struct scmi_clock_info *clk = cinfo->clk + clkid;
0601 
0602         ret = scmi_clock_attributes_get(ph, clkid, clk, version);
0603         if (!ret)
0604             scmi_clock_describe_rates_get(ph, clkid, clk);
0605     }
0606 
0607     cinfo->version = version;
0608     return ph->set_priv(ph, cinfo);
0609 }
0610 
0611 static const struct scmi_protocol scmi_clock = {
0612     .id = SCMI_PROTOCOL_CLOCK,
0613     .owner = THIS_MODULE,
0614     .instance_init = &scmi_clock_protocol_init,
0615     .ops = &clk_proto_ops,
0616     .events = &clk_protocol_events,
0617 };
0618 
0619 DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(clock, scmi_clock)