0001
0002
0003
0004
0005 #include <linux/platform_device.h>
0006 #include <linux/init.h>
0007 #include <linux/cpumask.h>
0008 #include <linux/export.h>
0009 #include <linux/dma-mapping.h>
0010 #include <linux/interconnect.h>
0011 #include <linux/module.h>
0012 #include <linux/types.h>
0013 #include <linux/qcom_scm.h>
0014 #include <linux/of.h>
0015 #include <linux/of_address.h>
0016 #include <linux/of_platform.h>
0017 #include <linux/clk.h>
0018 #include <linux/reset-controller.h>
0019 #include <linux/arm-smccc.h>
0020
0021 #include "qcom_scm.h"
0022
0023 static bool download_mode = IS_ENABLED(CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT);
0024 module_param(download_mode, bool, 0);
0025
0026 #define SCM_HAS_CORE_CLK BIT(0)
0027 #define SCM_HAS_IFACE_CLK BIT(1)
0028 #define SCM_HAS_BUS_CLK BIT(2)
0029
0030 struct qcom_scm {
0031 struct device *dev;
0032 struct clk *core_clk;
0033 struct clk *iface_clk;
0034 struct clk *bus_clk;
0035 struct icc_path *path;
0036 struct reset_controller_dev reset;
0037
0038
0039 struct mutex scm_bw_lock;
0040 int scm_vote_count;
0041
0042 u64 dload_mode_addr;
0043 };
0044
0045 struct qcom_scm_current_perm_info {
0046 __le32 vmid;
0047 __le32 perm;
0048 __le64 ctx;
0049 __le32 ctx_size;
0050 __le32 unused;
0051 };
0052
0053 struct qcom_scm_mem_map_info {
0054 __le64 mem_addr;
0055 __le64 mem_size;
0056 };
0057
0058
0059 static const u8 qcom_scm_cpu_cold_bits[QCOM_SCM_BOOT_MAX_CPUS] = {
0060 0, BIT(0), BIT(3), BIT(5)
0061 };
0062 static const u8 qcom_scm_cpu_warm_bits[QCOM_SCM_BOOT_MAX_CPUS] = {
0063 BIT(2), BIT(1), BIT(4), BIT(6)
0064 };
0065
0066 static const char * const qcom_scm_convention_names[] = {
0067 [SMC_CONVENTION_UNKNOWN] = "unknown",
0068 [SMC_CONVENTION_ARM_32] = "smc arm 32",
0069 [SMC_CONVENTION_ARM_64] = "smc arm 64",
0070 [SMC_CONVENTION_LEGACY] = "smc legacy",
0071 };
0072
0073 static struct qcom_scm *__scm;
0074
0075 static int qcom_scm_clk_enable(void)
0076 {
0077 int ret;
0078
0079 ret = clk_prepare_enable(__scm->core_clk);
0080 if (ret)
0081 goto bail;
0082
0083 ret = clk_prepare_enable(__scm->iface_clk);
0084 if (ret)
0085 goto disable_core;
0086
0087 ret = clk_prepare_enable(__scm->bus_clk);
0088 if (ret)
0089 goto disable_iface;
0090
0091 return 0;
0092
0093 disable_iface:
0094 clk_disable_unprepare(__scm->iface_clk);
0095 disable_core:
0096 clk_disable_unprepare(__scm->core_clk);
0097 bail:
0098 return ret;
0099 }
0100
0101 static void qcom_scm_clk_disable(void)
0102 {
0103 clk_disable_unprepare(__scm->core_clk);
0104 clk_disable_unprepare(__scm->iface_clk);
0105 clk_disable_unprepare(__scm->bus_clk);
0106 }
0107
0108 static int qcom_scm_bw_enable(void)
0109 {
0110 int ret = 0;
0111
0112 if (!__scm->path)
0113 return 0;
0114
0115 if (IS_ERR(__scm->path))
0116 return -EINVAL;
0117
0118 mutex_lock(&__scm->scm_bw_lock);
0119 if (!__scm->scm_vote_count) {
0120 ret = icc_set_bw(__scm->path, 0, UINT_MAX);
0121 if (ret < 0) {
0122 dev_err(__scm->dev, "failed to set bandwidth request\n");
0123 goto err_bw;
0124 }
0125 }
0126 __scm->scm_vote_count++;
0127 err_bw:
0128 mutex_unlock(&__scm->scm_bw_lock);
0129
0130 return ret;
0131 }
0132
0133 static void qcom_scm_bw_disable(void)
0134 {
0135 if (IS_ERR_OR_NULL(__scm->path))
0136 return;
0137
0138 mutex_lock(&__scm->scm_bw_lock);
0139 if (__scm->scm_vote_count-- == 1)
0140 icc_set_bw(__scm->path, 0, 0);
0141 mutex_unlock(&__scm->scm_bw_lock);
0142 }
0143
0144 enum qcom_scm_convention qcom_scm_convention = SMC_CONVENTION_UNKNOWN;
0145 static DEFINE_SPINLOCK(scm_query_lock);
0146
0147 static enum qcom_scm_convention __get_convention(void)
0148 {
0149 unsigned long flags;
0150 struct qcom_scm_desc desc = {
0151 .svc = QCOM_SCM_SVC_INFO,
0152 .cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
0153 .args[0] = SCM_SMC_FNID(QCOM_SCM_SVC_INFO,
0154 QCOM_SCM_INFO_IS_CALL_AVAIL) |
0155 (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT),
0156 .arginfo = QCOM_SCM_ARGS(1),
0157 .owner = ARM_SMCCC_OWNER_SIP,
0158 };
0159 struct qcom_scm_res res;
0160 enum qcom_scm_convention probed_convention;
0161 int ret;
0162 bool forced = false;
0163
0164 if (likely(qcom_scm_convention != SMC_CONVENTION_UNKNOWN))
0165 return qcom_scm_convention;
0166
0167
0168
0169
0170
0171 probed_convention = SMC_CONVENTION_ARM_64;
0172 ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
0173 if (!ret && res.result[0] == 1)
0174 goto found;
0175
0176
0177
0178
0179
0180
0181
0182
0183 if (of_device_is_compatible(__scm ? __scm->dev->of_node : NULL, "qcom,scm-sc7180")) {
0184 forced = true;
0185 goto found;
0186 }
0187
0188 probed_convention = SMC_CONVENTION_ARM_32;
0189 ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
0190 if (!ret && res.result[0] == 1)
0191 goto found;
0192
0193 probed_convention = SMC_CONVENTION_LEGACY;
0194 found:
0195 spin_lock_irqsave(&scm_query_lock, flags);
0196 if (probed_convention != qcom_scm_convention) {
0197 qcom_scm_convention = probed_convention;
0198 pr_info("qcom_scm: convention: %s%s\n",
0199 qcom_scm_convention_names[qcom_scm_convention],
0200 forced ? " (forced)" : "");
0201 }
0202 spin_unlock_irqrestore(&scm_query_lock, flags);
0203
0204 return qcom_scm_convention;
0205 }
0206
0207
0208
0209
0210
0211
0212
0213
0214
0215
0216 static int qcom_scm_call(struct device *dev, const struct qcom_scm_desc *desc,
0217 struct qcom_scm_res *res)
0218 {
0219 might_sleep();
0220 switch (__get_convention()) {
0221 case SMC_CONVENTION_ARM_32:
0222 case SMC_CONVENTION_ARM_64:
0223 return scm_smc_call(dev, desc, res, false);
0224 case SMC_CONVENTION_LEGACY:
0225 return scm_legacy_call(dev, desc, res);
0226 default:
0227 pr_err("Unknown current SCM calling convention.\n");
0228 return -EINVAL;
0229 }
0230 }
0231
0232
0233
0234
0235
0236
0237
0238
0239
0240
0241 static int qcom_scm_call_atomic(struct device *dev,
0242 const struct qcom_scm_desc *desc,
0243 struct qcom_scm_res *res)
0244 {
0245 switch (__get_convention()) {
0246 case SMC_CONVENTION_ARM_32:
0247 case SMC_CONVENTION_ARM_64:
0248 return scm_smc_call(dev, desc, res, true);
0249 case SMC_CONVENTION_LEGACY:
0250 return scm_legacy_call_atomic(dev, desc, res);
0251 default:
0252 pr_err("Unknown current SCM calling convention.\n");
0253 return -EINVAL;
0254 }
0255 }
0256
0257 static bool __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
0258 u32 cmd_id)
0259 {
0260 int ret;
0261 struct qcom_scm_desc desc = {
0262 .svc = QCOM_SCM_SVC_INFO,
0263 .cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
0264 .owner = ARM_SMCCC_OWNER_SIP,
0265 };
0266 struct qcom_scm_res res;
0267
0268 desc.arginfo = QCOM_SCM_ARGS(1);
0269 switch (__get_convention()) {
0270 case SMC_CONVENTION_ARM_32:
0271 case SMC_CONVENTION_ARM_64:
0272 desc.args[0] = SCM_SMC_FNID(svc_id, cmd_id) |
0273 (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT);
0274 break;
0275 case SMC_CONVENTION_LEGACY:
0276 desc.args[0] = SCM_LEGACY_FNID(svc_id, cmd_id);
0277 break;
0278 default:
0279 pr_err("Unknown SMC convention being used\n");
0280 return false;
0281 }
0282
0283 ret = qcom_scm_call(dev, &desc, &res);
0284
0285 return ret ? false : !!res.result[0];
0286 }
0287
0288 static int qcom_scm_set_boot_addr(void *entry, const u8 *cpu_bits)
0289 {
0290 int cpu;
0291 unsigned int flags = 0;
0292 struct qcom_scm_desc desc = {
0293 .svc = QCOM_SCM_SVC_BOOT,
0294 .cmd = QCOM_SCM_BOOT_SET_ADDR,
0295 .arginfo = QCOM_SCM_ARGS(2),
0296 .owner = ARM_SMCCC_OWNER_SIP,
0297 };
0298
0299 for_each_present_cpu(cpu) {
0300 if (cpu >= QCOM_SCM_BOOT_MAX_CPUS)
0301 return -EINVAL;
0302 flags |= cpu_bits[cpu];
0303 }
0304
0305 desc.args[0] = flags;
0306 desc.args[1] = virt_to_phys(entry);
0307
0308 return qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
0309 }
0310
0311 static int qcom_scm_set_boot_addr_mc(void *entry, unsigned int flags)
0312 {
0313 struct qcom_scm_desc desc = {
0314 .svc = QCOM_SCM_SVC_BOOT,
0315 .cmd = QCOM_SCM_BOOT_SET_ADDR_MC,
0316 .owner = ARM_SMCCC_OWNER_SIP,
0317 .arginfo = QCOM_SCM_ARGS(6),
0318 .args = {
0319 virt_to_phys(entry),
0320
0321 ~0ULL, ~0ULL, ~0ULL, ~0ULL,
0322 flags,
0323 },
0324 };
0325
0326
0327 if (!__scm || __get_convention() == SMC_CONVENTION_LEGACY)
0328 return -EOPNOTSUPP;
0329
0330 return qcom_scm_call(__scm->dev, &desc, NULL);
0331 }
0332
0333
0334
0335
0336
0337
0338
0339
0340 int qcom_scm_set_warm_boot_addr(void *entry)
0341 {
0342 if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_WARMBOOT))
0343
0344 return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_warm_bits);
0345 return 0;
0346 }
0347 EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr);
0348
0349
0350
0351
0352
0353 int qcom_scm_set_cold_boot_addr(void *entry)
0354 {
0355 if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_COLDBOOT))
0356
0357 return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_cold_bits);
0358 return 0;
0359 }
0360 EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr);
0361
0362
0363
0364
0365
0366
0367
0368
0369
0370 void qcom_scm_cpu_power_down(u32 flags)
0371 {
0372 struct qcom_scm_desc desc = {
0373 .svc = QCOM_SCM_SVC_BOOT,
0374 .cmd = QCOM_SCM_BOOT_TERMINATE_PC,
0375 .args[0] = flags & QCOM_SCM_FLUSH_FLAG_MASK,
0376 .arginfo = QCOM_SCM_ARGS(1),
0377 .owner = ARM_SMCCC_OWNER_SIP,
0378 };
0379
0380 qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
0381 }
0382 EXPORT_SYMBOL(qcom_scm_cpu_power_down);
0383
0384 int qcom_scm_set_remote_state(u32 state, u32 id)
0385 {
0386 struct qcom_scm_desc desc = {
0387 .svc = QCOM_SCM_SVC_BOOT,
0388 .cmd = QCOM_SCM_BOOT_SET_REMOTE_STATE,
0389 .arginfo = QCOM_SCM_ARGS(2),
0390 .args[0] = state,
0391 .args[1] = id,
0392 .owner = ARM_SMCCC_OWNER_SIP,
0393 };
0394 struct qcom_scm_res res;
0395 int ret;
0396
0397 ret = qcom_scm_call(__scm->dev, &desc, &res);
0398
0399 return ret ? : res.result[0];
0400 }
0401 EXPORT_SYMBOL(qcom_scm_set_remote_state);
0402
0403 static int __qcom_scm_set_dload_mode(struct device *dev, bool enable)
0404 {
0405 struct qcom_scm_desc desc = {
0406 .svc = QCOM_SCM_SVC_BOOT,
0407 .cmd = QCOM_SCM_BOOT_SET_DLOAD_MODE,
0408 .arginfo = QCOM_SCM_ARGS(2),
0409 .args[0] = QCOM_SCM_BOOT_SET_DLOAD_MODE,
0410 .owner = ARM_SMCCC_OWNER_SIP,
0411 };
0412
0413 desc.args[1] = enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0;
0414
0415 return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
0416 }
0417
0418 static void qcom_scm_set_download_mode(bool enable)
0419 {
0420 bool avail;
0421 int ret = 0;
0422
0423 avail = __qcom_scm_is_call_available(__scm->dev,
0424 QCOM_SCM_SVC_BOOT,
0425 QCOM_SCM_BOOT_SET_DLOAD_MODE);
0426 if (avail) {
0427 ret = __qcom_scm_set_dload_mode(__scm->dev, enable);
0428 } else if (__scm->dload_mode_addr) {
0429 ret = qcom_scm_io_writel(__scm->dload_mode_addr,
0430 enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0);
0431 } else {
0432 dev_err(__scm->dev,
0433 "No available mechanism for setting download mode\n");
0434 }
0435
0436 if (ret)
0437 dev_err(__scm->dev, "failed to set download mode: %d\n", ret);
0438 }
0439
0440
0441
0442
0443
0444
0445
0446
0447
0448
0449
0450
0451
0452
0453
0454
0455
0456
0457 int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size,
0458 struct qcom_scm_pas_metadata *ctx)
0459 {
0460 dma_addr_t mdata_phys;
0461 void *mdata_buf;
0462 int ret;
0463 struct qcom_scm_desc desc = {
0464 .svc = QCOM_SCM_SVC_PIL,
0465 .cmd = QCOM_SCM_PIL_PAS_INIT_IMAGE,
0466 .arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW),
0467 .args[0] = peripheral,
0468 .owner = ARM_SMCCC_OWNER_SIP,
0469 };
0470 struct qcom_scm_res res;
0471
0472
0473
0474
0475
0476
0477 mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys,
0478 GFP_KERNEL);
0479 if (!mdata_buf) {
0480 dev_err(__scm->dev, "Allocation of metadata buffer failed.\n");
0481 return -ENOMEM;
0482 }
0483 memcpy(mdata_buf, metadata, size);
0484
0485 ret = qcom_scm_clk_enable();
0486 if (ret)
0487 goto out;
0488
0489 ret = qcom_scm_bw_enable();
0490 if (ret)
0491 return ret;
0492
0493 desc.args[1] = mdata_phys;
0494
0495 ret = qcom_scm_call(__scm->dev, &desc, &res);
0496
0497 qcom_scm_bw_disable();
0498 qcom_scm_clk_disable();
0499
0500 out:
0501 if (ret < 0 || !ctx) {
0502 dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys);
0503 } else if (ctx) {
0504 ctx->ptr = mdata_buf;
0505 ctx->phys = mdata_phys;
0506 ctx->size = size;
0507 }
0508
0509 return ret ? : res.result[0];
0510 }
0511 EXPORT_SYMBOL(qcom_scm_pas_init_image);
0512
0513
0514
0515
0516
0517 void qcom_scm_pas_metadata_release(struct qcom_scm_pas_metadata *ctx)
0518 {
0519 if (!ctx->ptr)
0520 return;
0521
0522 dma_free_coherent(__scm->dev, ctx->size, ctx->ptr, ctx->phys);
0523
0524 ctx->ptr = NULL;
0525 ctx->phys = 0;
0526 ctx->size = 0;
0527 }
0528 EXPORT_SYMBOL(qcom_scm_pas_metadata_release);
0529
0530
0531
0532
0533
0534
0535
0536
0537
0538
0539 int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
0540 {
0541 int ret;
0542 struct qcom_scm_desc desc = {
0543 .svc = QCOM_SCM_SVC_PIL,
0544 .cmd = QCOM_SCM_PIL_PAS_MEM_SETUP,
0545 .arginfo = QCOM_SCM_ARGS(3),
0546 .args[0] = peripheral,
0547 .args[1] = addr,
0548 .args[2] = size,
0549 .owner = ARM_SMCCC_OWNER_SIP,
0550 };
0551 struct qcom_scm_res res;
0552
0553 ret = qcom_scm_clk_enable();
0554 if (ret)
0555 return ret;
0556
0557 ret = qcom_scm_bw_enable();
0558 if (ret)
0559 return ret;
0560
0561 ret = qcom_scm_call(__scm->dev, &desc, &res);
0562 qcom_scm_bw_disable();
0563 qcom_scm_clk_disable();
0564
0565 return ret ? : res.result[0];
0566 }
0567 EXPORT_SYMBOL(qcom_scm_pas_mem_setup);
0568
0569
0570
0571
0572
0573
0574
0575
0576 int qcom_scm_pas_auth_and_reset(u32 peripheral)
0577 {
0578 int ret;
0579 struct qcom_scm_desc desc = {
0580 .svc = QCOM_SCM_SVC_PIL,
0581 .cmd = QCOM_SCM_PIL_PAS_AUTH_AND_RESET,
0582 .arginfo = QCOM_SCM_ARGS(1),
0583 .args[0] = peripheral,
0584 .owner = ARM_SMCCC_OWNER_SIP,
0585 };
0586 struct qcom_scm_res res;
0587
0588 ret = qcom_scm_clk_enable();
0589 if (ret)
0590 return ret;
0591
0592 ret = qcom_scm_bw_enable();
0593 if (ret)
0594 return ret;
0595
0596 ret = qcom_scm_call(__scm->dev, &desc, &res);
0597 qcom_scm_bw_disable();
0598 qcom_scm_clk_disable();
0599
0600 return ret ? : res.result[0];
0601 }
0602 EXPORT_SYMBOL(qcom_scm_pas_auth_and_reset);
0603
0604
0605
0606
0607
0608
0609
0610 int qcom_scm_pas_shutdown(u32 peripheral)
0611 {
0612 int ret;
0613 struct qcom_scm_desc desc = {
0614 .svc = QCOM_SCM_SVC_PIL,
0615 .cmd = QCOM_SCM_PIL_PAS_SHUTDOWN,
0616 .arginfo = QCOM_SCM_ARGS(1),
0617 .args[0] = peripheral,
0618 .owner = ARM_SMCCC_OWNER_SIP,
0619 };
0620 struct qcom_scm_res res;
0621
0622 ret = qcom_scm_clk_enable();
0623 if (ret)
0624 return ret;
0625
0626 ret = qcom_scm_bw_enable();
0627 if (ret)
0628 return ret;
0629
0630 ret = qcom_scm_call(__scm->dev, &desc, &res);
0631
0632 qcom_scm_bw_disable();
0633 qcom_scm_clk_disable();
0634
0635 return ret ? : res.result[0];
0636 }
0637 EXPORT_SYMBOL(qcom_scm_pas_shutdown);
0638
0639
0640
0641
0642
0643
0644
0645
0646 bool qcom_scm_pas_supported(u32 peripheral)
0647 {
0648 int ret;
0649 struct qcom_scm_desc desc = {
0650 .svc = QCOM_SCM_SVC_PIL,
0651 .cmd = QCOM_SCM_PIL_PAS_IS_SUPPORTED,
0652 .arginfo = QCOM_SCM_ARGS(1),
0653 .args[0] = peripheral,
0654 .owner = ARM_SMCCC_OWNER_SIP,
0655 };
0656 struct qcom_scm_res res;
0657
0658 if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
0659 QCOM_SCM_PIL_PAS_IS_SUPPORTED))
0660 return false;
0661
0662 ret = qcom_scm_call(__scm->dev, &desc, &res);
0663
0664 return ret ? false : !!res.result[0];
0665 }
0666 EXPORT_SYMBOL(qcom_scm_pas_supported);
0667
0668 static int __qcom_scm_pas_mss_reset(struct device *dev, bool reset)
0669 {
0670 struct qcom_scm_desc desc = {
0671 .svc = QCOM_SCM_SVC_PIL,
0672 .cmd = QCOM_SCM_PIL_PAS_MSS_RESET,
0673 .arginfo = QCOM_SCM_ARGS(2),
0674 .args[0] = reset,
0675 .args[1] = 0,
0676 .owner = ARM_SMCCC_OWNER_SIP,
0677 };
0678 struct qcom_scm_res res;
0679 int ret;
0680
0681 ret = qcom_scm_call(__scm->dev, &desc, &res);
0682
0683 return ret ? : res.result[0];
0684 }
0685
0686 static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev,
0687 unsigned long idx)
0688 {
0689 if (idx != 0)
0690 return -EINVAL;
0691
0692 return __qcom_scm_pas_mss_reset(__scm->dev, 1);
0693 }
0694
0695 static int qcom_scm_pas_reset_deassert(struct reset_controller_dev *rcdev,
0696 unsigned long idx)
0697 {
0698 if (idx != 0)
0699 return -EINVAL;
0700
0701 return __qcom_scm_pas_mss_reset(__scm->dev, 0);
0702 }
0703
0704 static const struct reset_control_ops qcom_scm_pas_reset_ops = {
0705 .assert = qcom_scm_pas_reset_assert,
0706 .deassert = qcom_scm_pas_reset_deassert,
0707 };
0708
0709 int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val)
0710 {
0711 struct qcom_scm_desc desc = {
0712 .svc = QCOM_SCM_SVC_IO,
0713 .cmd = QCOM_SCM_IO_READ,
0714 .arginfo = QCOM_SCM_ARGS(1),
0715 .args[0] = addr,
0716 .owner = ARM_SMCCC_OWNER_SIP,
0717 };
0718 struct qcom_scm_res res;
0719 int ret;
0720
0721
0722 ret = qcom_scm_call_atomic(__scm->dev, &desc, &res);
0723 if (ret >= 0)
0724 *val = res.result[0];
0725
0726 return ret < 0 ? ret : 0;
0727 }
0728 EXPORT_SYMBOL(qcom_scm_io_readl);
0729
0730 int qcom_scm_io_writel(phys_addr_t addr, unsigned int val)
0731 {
0732 struct qcom_scm_desc desc = {
0733 .svc = QCOM_SCM_SVC_IO,
0734 .cmd = QCOM_SCM_IO_WRITE,
0735 .arginfo = QCOM_SCM_ARGS(2),
0736 .args[0] = addr,
0737 .args[1] = val,
0738 .owner = ARM_SMCCC_OWNER_SIP,
0739 };
0740
0741 return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
0742 }
0743 EXPORT_SYMBOL(qcom_scm_io_writel);
0744
0745
0746
0747
0748
0749
0750
0751 bool qcom_scm_restore_sec_cfg_available(void)
0752 {
0753 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP,
0754 QCOM_SCM_MP_RESTORE_SEC_CFG);
0755 }
0756 EXPORT_SYMBOL(qcom_scm_restore_sec_cfg_available);
0757
0758 int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
0759 {
0760 struct qcom_scm_desc desc = {
0761 .svc = QCOM_SCM_SVC_MP,
0762 .cmd = QCOM_SCM_MP_RESTORE_SEC_CFG,
0763 .arginfo = QCOM_SCM_ARGS(2),
0764 .args[0] = device_id,
0765 .args[1] = spare,
0766 .owner = ARM_SMCCC_OWNER_SIP,
0767 };
0768 struct qcom_scm_res res;
0769 int ret;
0770
0771 ret = qcom_scm_call(__scm->dev, &desc, &res);
0772
0773 return ret ? : res.result[0];
0774 }
0775 EXPORT_SYMBOL(qcom_scm_restore_sec_cfg);
0776
0777 int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size)
0778 {
0779 struct qcom_scm_desc desc = {
0780 .svc = QCOM_SCM_SVC_MP,
0781 .cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE,
0782 .arginfo = QCOM_SCM_ARGS(1),
0783 .args[0] = spare,
0784 .owner = ARM_SMCCC_OWNER_SIP,
0785 };
0786 struct qcom_scm_res res;
0787 int ret;
0788
0789 ret = qcom_scm_call(__scm->dev, &desc, &res);
0790
0791 if (size)
0792 *size = res.result[0];
0793
0794 return ret ? : res.result[1];
0795 }
0796 EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_size);
0797
0798 int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
0799 {
0800 struct qcom_scm_desc desc = {
0801 .svc = QCOM_SCM_SVC_MP,
0802 .cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT,
0803 .arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL,
0804 QCOM_SCM_VAL),
0805 .args[0] = addr,
0806 .args[1] = size,
0807 .args[2] = spare,
0808 .owner = ARM_SMCCC_OWNER_SIP,
0809 };
0810 int ret;
0811
0812 ret = qcom_scm_call(__scm->dev, &desc, NULL);
0813
0814
0815 if (ret == -EPERM)
0816 ret = 0;
0817
0818 return ret;
0819 }
0820 EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init);
0821
0822 int qcom_scm_iommu_set_cp_pool_size(u32 spare, u32 size)
0823 {
0824 struct qcom_scm_desc desc = {
0825 .svc = QCOM_SCM_SVC_MP,
0826 .cmd = QCOM_SCM_MP_IOMMU_SET_CP_POOL_SIZE,
0827 .arginfo = QCOM_SCM_ARGS(2),
0828 .args[0] = size,
0829 .args[1] = spare,
0830 .owner = ARM_SMCCC_OWNER_SIP,
0831 };
0832
0833 return qcom_scm_call(__scm->dev, &desc, NULL);
0834 }
0835 EXPORT_SYMBOL(qcom_scm_iommu_set_cp_pool_size);
0836
0837 int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size,
0838 u32 cp_nonpixel_start,
0839 u32 cp_nonpixel_size)
0840 {
0841 int ret;
0842 struct qcom_scm_desc desc = {
0843 .svc = QCOM_SCM_SVC_MP,
0844 .cmd = QCOM_SCM_MP_VIDEO_VAR,
0845 .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL,
0846 QCOM_SCM_VAL, QCOM_SCM_VAL),
0847 .args[0] = cp_start,
0848 .args[1] = cp_size,
0849 .args[2] = cp_nonpixel_start,
0850 .args[3] = cp_nonpixel_size,
0851 .owner = ARM_SMCCC_OWNER_SIP,
0852 };
0853 struct qcom_scm_res res;
0854
0855 ret = qcom_scm_call(__scm->dev, &desc, &res);
0856
0857 return ret ? : res.result[0];
0858 }
0859 EXPORT_SYMBOL(qcom_scm_mem_protect_video_var);
0860
0861 static int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region,
0862 size_t mem_sz, phys_addr_t src, size_t src_sz,
0863 phys_addr_t dest, size_t dest_sz)
0864 {
0865 int ret;
0866 struct qcom_scm_desc desc = {
0867 .svc = QCOM_SCM_SVC_MP,
0868 .cmd = QCOM_SCM_MP_ASSIGN,
0869 .arginfo = QCOM_SCM_ARGS(7, QCOM_SCM_RO, QCOM_SCM_VAL,
0870 QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_RO,
0871 QCOM_SCM_VAL, QCOM_SCM_VAL),
0872 .args[0] = mem_region,
0873 .args[1] = mem_sz,
0874 .args[2] = src,
0875 .args[3] = src_sz,
0876 .args[4] = dest,
0877 .args[5] = dest_sz,
0878 .args[6] = 0,
0879 .owner = ARM_SMCCC_OWNER_SIP,
0880 };
0881 struct qcom_scm_res res;
0882
0883 ret = qcom_scm_call(dev, &desc, &res);
0884
0885 return ret ? : res.result[0];
0886 }
0887
0888
0889
0890
0891
0892
0893
0894
0895
0896
0897
0898
0899
0900 int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
0901 unsigned int *srcvm,
0902 const struct qcom_scm_vmperm *newvm,
0903 unsigned int dest_cnt)
0904 {
0905 struct qcom_scm_current_perm_info *destvm;
0906 struct qcom_scm_mem_map_info *mem_to_map;
0907 phys_addr_t mem_to_map_phys;
0908 phys_addr_t dest_phys;
0909 dma_addr_t ptr_phys;
0910 size_t mem_to_map_sz;
0911 size_t dest_sz;
0912 size_t src_sz;
0913 size_t ptr_sz;
0914 int next_vm;
0915 __le32 *src;
0916 void *ptr;
0917 int ret, i, b;
0918 unsigned long srcvm_bits = *srcvm;
0919
0920 src_sz = hweight_long(srcvm_bits) * sizeof(*src);
0921 mem_to_map_sz = sizeof(*mem_to_map);
0922 dest_sz = dest_cnt * sizeof(*destvm);
0923 ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) +
0924 ALIGN(dest_sz, SZ_64);
0925
0926 ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL);
0927 if (!ptr)
0928 return -ENOMEM;
0929
0930
0931 src = ptr;
0932 i = 0;
0933 for_each_set_bit(b, &srcvm_bits, BITS_PER_LONG)
0934 src[i++] = cpu_to_le32(b);
0935
0936
0937 mem_to_map = ptr + ALIGN(src_sz, SZ_64);
0938 mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64);
0939 mem_to_map->mem_addr = cpu_to_le64(mem_addr);
0940 mem_to_map->mem_size = cpu_to_le64(mem_sz);
0941
0942 next_vm = 0;
0943
0944 destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
0945 dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
0946 for (i = 0; i < dest_cnt; i++, destvm++, newvm++) {
0947 destvm->vmid = cpu_to_le32(newvm->vmid);
0948 destvm->perm = cpu_to_le32(newvm->perm);
0949 destvm->ctx = 0;
0950 destvm->ctx_size = 0;
0951 next_vm |= BIT(newvm->vmid);
0952 }
0953
0954 ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz,
0955 ptr_phys, src_sz, dest_phys, dest_sz);
0956 dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_phys);
0957 if (ret) {
0958 dev_err(__scm->dev,
0959 "Assign memory protection call failed %d\n", ret);
0960 return -EINVAL;
0961 }
0962
0963 *srcvm = next_vm;
0964 return 0;
0965 }
0966 EXPORT_SYMBOL(qcom_scm_assign_mem);
0967
0968
0969
0970
0971 bool qcom_scm_ocmem_lock_available(void)
0972 {
0973 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_OCMEM,
0974 QCOM_SCM_OCMEM_LOCK_CMD);
0975 }
0976 EXPORT_SYMBOL(qcom_scm_ocmem_lock_available);
0977
0978
0979
0980
0981
0982
0983
0984
0985
0986
0987 int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size,
0988 u32 mode)
0989 {
0990 struct qcom_scm_desc desc = {
0991 .svc = QCOM_SCM_SVC_OCMEM,
0992 .cmd = QCOM_SCM_OCMEM_LOCK_CMD,
0993 .args[0] = id,
0994 .args[1] = offset,
0995 .args[2] = size,
0996 .args[3] = mode,
0997 .arginfo = QCOM_SCM_ARGS(4),
0998 };
0999
1000 return qcom_scm_call(__scm->dev, &desc, NULL);
1001 }
1002 EXPORT_SYMBOL(qcom_scm_ocmem_lock);
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012 int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size)
1013 {
1014 struct qcom_scm_desc desc = {
1015 .svc = QCOM_SCM_SVC_OCMEM,
1016 .cmd = QCOM_SCM_OCMEM_UNLOCK_CMD,
1017 .args[0] = id,
1018 .args[1] = offset,
1019 .args[2] = size,
1020 .arginfo = QCOM_SCM_ARGS(3),
1021 };
1022
1023 return qcom_scm_call(__scm->dev, &desc, NULL);
1024 }
1025 EXPORT_SYMBOL(qcom_scm_ocmem_unlock);
1026
1027
1028
1029
1030
1031
1032
1033 bool qcom_scm_ice_available(void)
1034 {
1035 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
1036 QCOM_SCM_ES_INVALIDATE_ICE_KEY) &&
1037 __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
1038 QCOM_SCM_ES_CONFIG_SET_ICE_KEY);
1039 }
1040 EXPORT_SYMBOL(qcom_scm_ice_available);
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054 int qcom_scm_ice_invalidate_key(u32 index)
1055 {
1056 struct qcom_scm_desc desc = {
1057 .svc = QCOM_SCM_SVC_ES,
1058 .cmd = QCOM_SCM_ES_INVALIDATE_ICE_KEY,
1059 .arginfo = QCOM_SCM_ARGS(1),
1060 .args[0] = index,
1061 .owner = ARM_SMCCC_OWNER_SIP,
1062 };
1063
1064 return qcom_scm_call(__scm->dev, &desc, NULL);
1065 }
1066 EXPORT_SYMBOL(qcom_scm_ice_invalidate_key);
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089 int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size,
1090 enum qcom_scm_ice_cipher cipher, u32 data_unit_size)
1091 {
1092 struct qcom_scm_desc desc = {
1093 .svc = QCOM_SCM_SVC_ES,
1094 .cmd = QCOM_SCM_ES_CONFIG_SET_ICE_KEY,
1095 .arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL, QCOM_SCM_RW,
1096 QCOM_SCM_VAL, QCOM_SCM_VAL,
1097 QCOM_SCM_VAL),
1098 .args[0] = index,
1099 .args[2] = key_size,
1100 .args[3] = cipher,
1101 .args[4] = data_unit_size,
1102 .owner = ARM_SMCCC_OWNER_SIP,
1103 };
1104 void *keybuf;
1105 dma_addr_t key_phys;
1106 int ret;
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118 keybuf = dma_alloc_coherent(__scm->dev, key_size, &key_phys,
1119 GFP_KERNEL);
1120 if (!keybuf)
1121 return -ENOMEM;
1122 memcpy(keybuf, key, key_size);
1123 desc.args[1] = key_phys;
1124
1125 ret = qcom_scm_call(__scm->dev, &desc, NULL);
1126
1127 memzero_explicit(keybuf, key_size);
1128
1129 dma_free_coherent(__scm->dev, key_size, keybuf, key_phys);
1130 return ret;
1131 }
1132 EXPORT_SYMBOL(qcom_scm_ice_set_key);
1133
1134
1135
1136
1137
1138
1139 bool qcom_scm_hdcp_available(void)
1140 {
1141 bool avail;
1142 int ret = qcom_scm_clk_enable();
1143
1144 if (ret)
1145 return ret;
1146
1147 avail = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP,
1148 QCOM_SCM_HDCP_INVOKE);
1149
1150 qcom_scm_clk_disable();
1151
1152 return avail;
1153 }
1154 EXPORT_SYMBOL(qcom_scm_hdcp_available);
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164 int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
1165 {
1166 int ret;
1167 struct qcom_scm_desc desc = {
1168 .svc = QCOM_SCM_SVC_HDCP,
1169 .cmd = QCOM_SCM_HDCP_INVOKE,
1170 .arginfo = QCOM_SCM_ARGS(10),
1171 .args = {
1172 req[0].addr,
1173 req[0].val,
1174 req[1].addr,
1175 req[1].val,
1176 req[2].addr,
1177 req[2].val,
1178 req[3].addr,
1179 req[3].val,
1180 req[4].addr,
1181 req[4].val
1182 },
1183 .owner = ARM_SMCCC_OWNER_SIP,
1184 };
1185 struct qcom_scm_res res;
1186
1187 if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT)
1188 return -ERANGE;
1189
1190 ret = qcom_scm_clk_enable();
1191 if (ret)
1192 return ret;
1193
1194 ret = qcom_scm_call(__scm->dev, &desc, &res);
1195 *resp = res.result[0];
1196
1197 qcom_scm_clk_disable();
1198
1199 return ret;
1200 }
1201 EXPORT_SYMBOL(qcom_scm_hdcp_req);
1202
1203 int qcom_scm_iommu_set_pt_format(u32 sec_id, u32 ctx_num, u32 pt_fmt)
1204 {
1205 struct qcom_scm_desc desc = {
1206 .svc = QCOM_SCM_SVC_SMMU_PROGRAM,
1207 .cmd = QCOM_SCM_SMMU_PT_FORMAT,
1208 .arginfo = QCOM_SCM_ARGS(3),
1209 .args[0] = sec_id,
1210 .args[1] = ctx_num,
1211 .args[2] = pt_fmt,
1212 .owner = ARM_SMCCC_OWNER_SIP,
1213 };
1214
1215 return qcom_scm_call(__scm->dev, &desc, NULL);
1216 }
1217 EXPORT_SYMBOL(qcom_scm_iommu_set_pt_format);
1218
1219 int qcom_scm_qsmmu500_wait_safe_toggle(bool en)
1220 {
1221 struct qcom_scm_desc desc = {
1222 .svc = QCOM_SCM_SVC_SMMU_PROGRAM,
1223 .cmd = QCOM_SCM_SMMU_CONFIG_ERRATA1,
1224 .arginfo = QCOM_SCM_ARGS(2),
1225 .args[0] = QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL,
1226 .args[1] = en,
1227 .owner = ARM_SMCCC_OWNER_SIP,
1228 };
1229
1230
1231 return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
1232 }
1233 EXPORT_SYMBOL(qcom_scm_qsmmu500_wait_safe_toggle);
1234
1235 bool qcom_scm_lmh_dcvsh_available(void)
1236 {
1237 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_LMH, QCOM_SCM_LMH_LIMIT_DCVSH);
1238 }
1239 EXPORT_SYMBOL(qcom_scm_lmh_dcvsh_available);
1240
1241 int qcom_scm_lmh_profile_change(u32 profile_id)
1242 {
1243 struct qcom_scm_desc desc = {
1244 .svc = QCOM_SCM_SVC_LMH,
1245 .cmd = QCOM_SCM_LMH_LIMIT_PROFILE_CHANGE,
1246 .arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL),
1247 .args[0] = profile_id,
1248 .owner = ARM_SMCCC_OWNER_SIP,
1249 };
1250
1251 return qcom_scm_call(__scm->dev, &desc, NULL);
1252 }
1253 EXPORT_SYMBOL(qcom_scm_lmh_profile_change);
1254
1255 int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val,
1256 u64 limit_node, u32 node_id, u64 version)
1257 {
1258 dma_addr_t payload_phys;
1259 u32 *payload_buf;
1260 int ret, payload_size = 5 * sizeof(u32);
1261
1262 struct qcom_scm_desc desc = {
1263 .svc = QCOM_SCM_SVC_LMH,
1264 .cmd = QCOM_SCM_LMH_LIMIT_DCVSH,
1265 .arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_VAL,
1266 QCOM_SCM_VAL, QCOM_SCM_VAL),
1267 .args[1] = payload_size,
1268 .args[2] = limit_node,
1269 .args[3] = node_id,
1270 .args[4] = version,
1271 .owner = ARM_SMCCC_OWNER_SIP,
1272 };
1273
1274 payload_buf = dma_alloc_coherent(__scm->dev, payload_size, &payload_phys, GFP_KERNEL);
1275 if (!payload_buf)
1276 return -ENOMEM;
1277
1278 payload_buf[0] = payload_fn;
1279 payload_buf[1] = 0;
1280 payload_buf[2] = payload_reg;
1281 payload_buf[3] = 1;
1282 payload_buf[4] = payload_val;
1283
1284 desc.args[0] = payload_phys;
1285
1286 ret = qcom_scm_call(__scm->dev, &desc, NULL);
1287
1288 dma_free_coherent(__scm->dev, payload_size, payload_buf, payload_phys);
1289 return ret;
1290 }
1291 EXPORT_SYMBOL(qcom_scm_lmh_dcvsh);
1292
1293 static int qcom_scm_find_dload_address(struct device *dev, u64 *addr)
1294 {
1295 struct device_node *tcsr;
1296 struct device_node *np = dev->of_node;
1297 struct resource res;
1298 u32 offset;
1299 int ret;
1300
1301 tcsr = of_parse_phandle(np, "qcom,dload-mode", 0);
1302 if (!tcsr)
1303 return 0;
1304
1305 ret = of_address_to_resource(tcsr, 0, &res);
1306 of_node_put(tcsr);
1307 if (ret)
1308 return ret;
1309
1310 ret = of_property_read_u32_index(np, "qcom,dload-mode", 1, &offset);
1311 if (ret < 0)
1312 return ret;
1313
1314 *addr = res.start + offset;
1315
1316 return 0;
1317 }
1318
1319
1320
1321
1322 bool qcom_scm_is_available(void)
1323 {
1324 return !!__scm;
1325 }
1326 EXPORT_SYMBOL(qcom_scm_is_available);
1327
1328 static int qcom_scm_probe(struct platform_device *pdev)
1329 {
1330 struct qcom_scm *scm;
1331 unsigned long clks;
1332 int ret;
1333
1334 scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL);
1335 if (!scm)
1336 return -ENOMEM;
1337
1338 ret = qcom_scm_find_dload_address(&pdev->dev, &scm->dload_mode_addr);
1339 if (ret < 0)
1340 return ret;
1341
1342 mutex_init(&scm->scm_bw_lock);
1343
1344 clks = (unsigned long)of_device_get_match_data(&pdev->dev);
1345
1346 scm->path = devm_of_icc_get(&pdev->dev, NULL);
1347 if (IS_ERR(scm->path))
1348 return dev_err_probe(&pdev->dev, PTR_ERR(scm->path),
1349 "failed to acquire interconnect path\n");
1350
1351 scm->core_clk = devm_clk_get(&pdev->dev, "core");
1352 if (IS_ERR(scm->core_clk)) {
1353 if (PTR_ERR(scm->core_clk) == -EPROBE_DEFER)
1354 return PTR_ERR(scm->core_clk);
1355
1356 if (clks & SCM_HAS_CORE_CLK) {
1357 dev_err(&pdev->dev, "failed to acquire core clk\n");
1358 return PTR_ERR(scm->core_clk);
1359 }
1360
1361 scm->core_clk = NULL;
1362 }
1363
1364 scm->iface_clk = devm_clk_get(&pdev->dev, "iface");
1365 if (IS_ERR(scm->iface_clk)) {
1366 if (PTR_ERR(scm->iface_clk) == -EPROBE_DEFER)
1367 return PTR_ERR(scm->iface_clk);
1368
1369 if (clks & SCM_HAS_IFACE_CLK) {
1370 dev_err(&pdev->dev, "failed to acquire iface clk\n");
1371 return PTR_ERR(scm->iface_clk);
1372 }
1373
1374 scm->iface_clk = NULL;
1375 }
1376
1377 scm->bus_clk = devm_clk_get(&pdev->dev, "bus");
1378 if (IS_ERR(scm->bus_clk)) {
1379 if (PTR_ERR(scm->bus_clk) == -EPROBE_DEFER)
1380 return PTR_ERR(scm->bus_clk);
1381
1382 if (clks & SCM_HAS_BUS_CLK) {
1383 dev_err(&pdev->dev, "failed to acquire bus clk\n");
1384 return PTR_ERR(scm->bus_clk);
1385 }
1386
1387 scm->bus_clk = NULL;
1388 }
1389
1390 scm->reset.ops = &qcom_scm_pas_reset_ops;
1391 scm->reset.nr_resets = 1;
1392 scm->reset.of_node = pdev->dev.of_node;
1393 ret = devm_reset_controller_register(&pdev->dev, &scm->reset);
1394 if (ret)
1395 return ret;
1396
1397
1398 ret = clk_set_rate(scm->core_clk, INT_MAX);
1399 if (ret)
1400 return ret;
1401
1402 __scm = scm;
1403 __scm->dev = &pdev->dev;
1404
1405 __get_convention();
1406
1407
1408
1409
1410
1411
1412 if (download_mode)
1413 qcom_scm_set_download_mode(true);
1414
1415 return 0;
1416 }
1417
1418 static void qcom_scm_shutdown(struct platform_device *pdev)
1419 {
1420
1421 if (download_mode)
1422 qcom_scm_set_download_mode(false);
1423 }
1424
1425 static const struct of_device_id qcom_scm_dt_match[] = {
1426 { .compatible = "qcom,scm-apq8064",
1427
1428 },
1429 { .compatible = "qcom,scm-apq8084", .data = (void *)(SCM_HAS_CORE_CLK |
1430 SCM_HAS_IFACE_CLK |
1431 SCM_HAS_BUS_CLK)
1432 },
1433 { .compatible = "qcom,scm-ipq4019" },
1434 { .compatible = "qcom,scm-mdm9607", .data = (void *)(SCM_HAS_CORE_CLK |
1435 SCM_HAS_IFACE_CLK |
1436 SCM_HAS_BUS_CLK) },
1437 { .compatible = "qcom,scm-msm8660", .data = (void *) SCM_HAS_CORE_CLK },
1438 { .compatible = "qcom,scm-msm8960", .data = (void *) SCM_HAS_CORE_CLK },
1439 { .compatible = "qcom,scm-msm8916", .data = (void *)(SCM_HAS_CORE_CLK |
1440 SCM_HAS_IFACE_CLK |
1441 SCM_HAS_BUS_CLK)
1442 },
1443 { .compatible = "qcom,scm-msm8953", .data = (void *)(SCM_HAS_CORE_CLK |
1444 SCM_HAS_IFACE_CLK |
1445 SCM_HAS_BUS_CLK)
1446 },
1447 { .compatible = "qcom,scm-msm8974", .data = (void *)(SCM_HAS_CORE_CLK |
1448 SCM_HAS_IFACE_CLK |
1449 SCM_HAS_BUS_CLK)
1450 },
1451 { .compatible = "qcom,scm-msm8976", .data = (void *)(SCM_HAS_CORE_CLK |
1452 SCM_HAS_IFACE_CLK |
1453 SCM_HAS_BUS_CLK)
1454 },
1455 { .compatible = "qcom,scm-msm8994" },
1456 { .compatible = "qcom,scm-msm8996" },
1457 { .compatible = "qcom,scm" },
1458 {}
1459 };
1460 MODULE_DEVICE_TABLE(of, qcom_scm_dt_match);
1461
1462 static struct platform_driver qcom_scm_driver = {
1463 .driver = {
1464 .name = "qcom_scm",
1465 .of_match_table = qcom_scm_dt_match,
1466 .suppress_bind_attrs = true,
1467 },
1468 .probe = qcom_scm_probe,
1469 .shutdown = qcom_scm_shutdown,
1470 };
1471
1472 static int __init qcom_scm_init(void)
1473 {
1474 return platform_driver_register(&qcom_scm_driver);
1475 }
1476 subsys_initcall(qcom_scm_init);
1477
1478 MODULE_DESCRIPTION("Qualcomm Technologies, Inc. SCM driver");
1479 MODULE_LICENSE("GPL v2");