0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/arm-smccc.h>
0010 #include <linux/atomic.h>
0011 #include <linux/device.h>
0012 #include <linux/err.h>
0013 #include <linux/interrupt.h>
0014 #include <linux/mutex.h>
0015 #include <linux/of.h>
0016 #include <linux/of_address.h>
0017 #include <linux/of_irq.h>
0018 #include <linux/processor.h>
0019 #include <linux/slab.h>
0020
0021 #include "common.h"
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035 struct scmi_smc {
0036 struct scmi_chan_info *cinfo;
0037 struct scmi_shared_mem __iomem *shmem;
0038
0039 struct mutex shmem_lock;
0040 #define INFLIGHT_NONE MSG_TOKEN_MAX
0041 atomic_t inflight;
0042 u32 func_id;
0043 };
0044
0045 static irqreturn_t smc_msg_done_isr(int irq, void *data)
0046 {
0047 struct scmi_smc *scmi_info = data;
0048
0049 scmi_rx_callback(scmi_info->cinfo,
0050 shmem_read_header(scmi_info->shmem), NULL);
0051
0052 return IRQ_HANDLED;
0053 }
0054
0055 static bool smc_chan_available(struct device *dev, int idx)
0056 {
0057 struct device_node *np = of_parse_phandle(dev->of_node, "shmem", 0);
0058 if (!np)
0059 return false;
0060
0061 of_node_put(np);
0062 return true;
0063 }
0064
0065 static inline void smc_channel_lock_init(struct scmi_smc *scmi_info)
0066 {
0067 if (IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE))
0068 atomic_set(&scmi_info->inflight, INFLIGHT_NONE);
0069 else
0070 mutex_init(&scmi_info->shmem_lock);
0071 }
0072
0073 static bool smc_xfer_inflight(struct scmi_xfer *xfer, atomic_t *inflight)
0074 {
0075 int ret;
0076
0077 ret = atomic_cmpxchg(inflight, INFLIGHT_NONE, xfer->hdr.seq);
0078
0079 return ret == INFLIGHT_NONE;
0080 }
0081
0082 static inline void
0083 smc_channel_lock_acquire(struct scmi_smc *scmi_info,
0084 struct scmi_xfer *xfer __maybe_unused)
0085 {
0086 if (IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE))
0087 spin_until_cond(smc_xfer_inflight(xfer, &scmi_info->inflight));
0088 else
0089 mutex_lock(&scmi_info->shmem_lock);
0090 }
0091
0092 static inline void smc_channel_lock_release(struct scmi_smc *scmi_info)
0093 {
0094 if (IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE))
0095 atomic_set(&scmi_info->inflight, INFLIGHT_NONE);
0096 else
0097 mutex_unlock(&scmi_info->shmem_lock);
0098 }
0099
0100 static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
0101 bool tx)
0102 {
0103 struct device *cdev = cinfo->dev;
0104 struct scmi_smc *scmi_info;
0105 resource_size_t size;
0106 struct resource res;
0107 struct device_node *np;
0108 u32 func_id;
0109 int ret, irq;
0110
0111 if (!tx)
0112 return -ENODEV;
0113
0114 scmi_info = devm_kzalloc(dev, sizeof(*scmi_info), GFP_KERNEL);
0115 if (!scmi_info)
0116 return -ENOMEM;
0117
0118 np = of_parse_phandle(cdev->of_node, "shmem", 0);
0119 if (!of_device_is_compatible(np, "arm,scmi-shmem"))
0120 return -ENXIO;
0121
0122 ret = of_address_to_resource(np, 0, &res);
0123 of_node_put(np);
0124 if (ret) {
0125 dev_err(cdev, "failed to get SCMI Tx shared memory\n");
0126 return ret;
0127 }
0128
0129 size = resource_size(&res);
0130 scmi_info->shmem = devm_ioremap(dev, res.start, size);
0131 if (!scmi_info->shmem) {
0132 dev_err(dev, "failed to ioremap SCMI Tx shared memory\n");
0133 return -EADDRNOTAVAIL;
0134 }
0135
0136 ret = of_property_read_u32(dev->of_node, "arm,smc-id", &func_id);
0137 if (ret < 0)
0138 return ret;
0139
0140
0141
0142
0143
0144
0145 irq = of_irq_get_byname(cdev->of_node, "a2p");
0146 if (irq > 0) {
0147 ret = devm_request_irq(dev, irq, smc_msg_done_isr,
0148 IRQF_NO_SUSPEND,
0149 dev_name(dev), scmi_info);
0150 if (ret) {
0151 dev_err(dev, "failed to setup SCMI smc irq\n");
0152 return ret;
0153 }
0154 } else {
0155 cinfo->no_completion_irq = true;
0156 }
0157
0158 scmi_info->func_id = func_id;
0159 scmi_info->cinfo = cinfo;
0160 smc_channel_lock_init(scmi_info);
0161 cinfo->transport_info = scmi_info;
0162
0163 return 0;
0164 }
0165
0166 static int smc_chan_free(int id, void *p, void *data)
0167 {
0168 struct scmi_chan_info *cinfo = p;
0169 struct scmi_smc *scmi_info = cinfo->transport_info;
0170
0171 cinfo->transport_info = NULL;
0172 scmi_info->cinfo = NULL;
0173
0174 scmi_free_channel(cinfo, data, id);
0175
0176 return 0;
0177 }
0178
0179 static int smc_send_message(struct scmi_chan_info *cinfo,
0180 struct scmi_xfer *xfer)
0181 {
0182 struct scmi_smc *scmi_info = cinfo->transport_info;
0183 struct arm_smccc_res res;
0184
0185
0186
0187
0188
0189 smc_channel_lock_acquire(scmi_info, xfer);
0190
0191 shmem_tx_prepare(scmi_info->shmem, xfer);
0192
0193 arm_smccc_1_1_invoke(scmi_info->func_id, 0, 0, 0, 0, 0, 0, 0, &res);
0194
0195
0196 if (res.a0) {
0197 smc_channel_lock_release(scmi_info);
0198 return -EOPNOTSUPP;
0199 }
0200
0201 return 0;
0202 }
0203
0204 static void smc_fetch_response(struct scmi_chan_info *cinfo,
0205 struct scmi_xfer *xfer)
0206 {
0207 struct scmi_smc *scmi_info = cinfo->transport_info;
0208
0209 shmem_fetch_response(scmi_info->shmem, xfer);
0210 }
0211
0212 static void smc_mark_txdone(struct scmi_chan_info *cinfo, int ret,
0213 struct scmi_xfer *__unused)
0214 {
0215 struct scmi_smc *scmi_info = cinfo->transport_info;
0216
0217 smc_channel_lock_release(scmi_info);
0218 }
0219
0220 static const struct scmi_transport_ops scmi_smc_ops = {
0221 .chan_available = smc_chan_available,
0222 .chan_setup = smc_chan_setup,
0223 .chan_free = smc_chan_free,
0224 .send_message = smc_send_message,
0225 .mark_txdone = smc_mark_txdone,
0226 .fetch_response = smc_fetch_response,
0227 };
0228
0229 const struct scmi_desc scmi_smc_desc = {
0230 .ops = &scmi_smc_ops,
0231 .max_rx_timeout_ms = 30,
0232 .max_msg = 20,
0233 .max_msg_size = 128,
0234
0235
0236
0237
0238
0239
0240
0241
0242 .sync_cmds_completed_on_ret = true,
0243 .atomic_enabled = IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE),
0244 };