Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * dcscb.c - Dual Cluster System Configuration Block
0004  *
0005  * Created by:  Nicolas Pitre, May 2012
0006  * Copyright:   (C) 2012-2013  Linaro Limited
0007  */
0008 
0009 #include <linux/init.h>
0010 #include <linux/kernel.h>
0011 #include <linux/io.h>
0012 #include <linux/errno.h>
0013 #include <linux/of_address.h>
0014 #include <linux/vexpress.h>
0015 #include <linux/arm-cci.h>
0016 
0017 #include <asm/mcpm.h>
0018 #include <asm/proc-fns.h>
0019 #include <asm/cacheflush.h>
0020 #include <asm/cputype.h>
0021 #include <asm/cp15.h>
0022 
0023 #include "vexpress.h"
0024 
0025 #define RST_HOLD0   0x0
0026 #define RST_HOLD1   0x4
0027 #define SYS_SWRESET 0x8
0028 #define RST_STAT0   0xc
0029 #define RST_STAT1   0x10
0030 #define EAG_CFG_R   0x20
0031 #define EAG_CFG_W   0x24
0032 #define KFC_CFG_R   0x28
0033 #define KFC_CFG_W   0x2c
0034 #define DCS_CFG_R   0x30
0035 
0036 static void __iomem *dcscb_base;
0037 static int dcscb_allcpus_mask[2];
0038 
0039 static int dcscb_cpu_powerup(unsigned int cpu, unsigned int cluster)
0040 {
0041     unsigned int rst_hold, cpumask = (1 << cpu);
0042 
0043     pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
0044     if (cluster >= 2 || !(cpumask & dcscb_allcpus_mask[cluster]))
0045         return -EINVAL;
0046 
0047     rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4);
0048     rst_hold &= ~(cpumask | (cpumask << 4));
0049     writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4);
0050     return 0;
0051 }
0052 
0053 static int dcscb_cluster_powerup(unsigned int cluster)
0054 {
0055     unsigned int rst_hold;
0056 
0057     pr_debug("%s: cluster %u\n", __func__, cluster);
0058     if (cluster >= 2)
0059         return -EINVAL;
0060 
0061     /* remove cluster reset and add individual CPU's reset */
0062     rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4);
0063     rst_hold &= ~(1 << 8);
0064     rst_hold |= dcscb_allcpus_mask[cluster];
0065     writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4);
0066     return 0;
0067 }
0068 
0069 static void dcscb_cpu_powerdown_prepare(unsigned int cpu, unsigned int cluster)
0070 {
0071     unsigned int rst_hold;
0072 
0073     pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
0074     BUG_ON(cluster >= 2 || !((1 << cpu) & dcscb_allcpus_mask[cluster]));
0075 
0076     rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4);
0077     rst_hold |= (1 << cpu);
0078     writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4);
0079 }
0080 
0081 static void dcscb_cluster_powerdown_prepare(unsigned int cluster)
0082 {
0083     unsigned int rst_hold;
0084 
0085     pr_debug("%s: cluster %u\n", __func__, cluster);
0086     BUG_ON(cluster >= 2);
0087 
0088     rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4);
0089     rst_hold |= (1 << 8);
0090     writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4);
0091 }
0092 
0093 static void dcscb_cpu_cache_disable(void)
0094 {
0095     /* Disable and flush the local CPU cache. */
0096     v7_exit_coherency_flush(louis);
0097 }
0098 
0099 static void dcscb_cluster_cache_disable(void)
0100 {
0101     /* Flush all cache levels for this cluster. */
0102     v7_exit_coherency_flush(all);
0103 
0104     /*
0105      * A full outer cache flush could be needed at this point
0106      * on platforms with such a cache, depending on where the
0107      * outer cache sits. In some cases the notion of a "last
0108      * cluster standing" would need to be implemented if the
0109      * outer cache is shared across clusters. In any case, when
0110      * the outer cache needs flushing, there is no concurrent
0111      * access to the cache controller to worry about and no
0112      * special locking besides what is already provided by the
0113      * MCPM state machinery is needed.
0114      */
0115 
0116     /*
0117      * Disable cluster-level coherency by masking
0118      * incoming snoops and DVM messages:
0119      */
0120     cci_disable_port_by_cpu(read_cpuid_mpidr());
0121 }
0122 
0123 static const struct mcpm_platform_ops dcscb_power_ops = {
0124     .cpu_powerup        = dcscb_cpu_powerup,
0125     .cluster_powerup    = dcscb_cluster_powerup,
0126     .cpu_powerdown_prepare  = dcscb_cpu_powerdown_prepare,
0127     .cluster_powerdown_prepare = dcscb_cluster_powerdown_prepare,
0128     .cpu_cache_disable  = dcscb_cpu_cache_disable,
0129     .cluster_cache_disable  = dcscb_cluster_cache_disable,
0130 };
0131 
0132 extern void dcscb_power_up_setup(unsigned int affinity_level);
0133 
0134 static int __init dcscb_init(void)
0135 {
0136     struct device_node *node;
0137     unsigned int cfg;
0138     int ret;
0139 
0140     if (!cci_probed())
0141         return -ENODEV;
0142 
0143     node = of_find_compatible_node(NULL, NULL, "arm,rtsm,dcscb");
0144     if (!node)
0145         return -ENODEV;
0146     dcscb_base = of_iomap(node, 0);
0147     of_node_put(node);
0148     if (!dcscb_base)
0149         return -EADDRNOTAVAIL;
0150     cfg = readl_relaxed(dcscb_base + DCS_CFG_R);
0151     dcscb_allcpus_mask[0] = (1 << (((cfg >> 16) >> (0 << 2)) & 0xf)) - 1;
0152     dcscb_allcpus_mask[1] = (1 << (((cfg >> 16) >> (1 << 2)) & 0xf)) - 1;
0153 
0154     ret = mcpm_platform_register(&dcscb_power_ops);
0155     if (!ret)
0156         ret = mcpm_sync_init(dcscb_power_up_setup);
0157     if (ret) {
0158         iounmap(dcscb_base);
0159         return ret;
0160     }
0161 
0162     pr_info("VExpress DCSCB support installed\n");
0163 
0164     /*
0165      * Future entries into the kernel can now go
0166      * through the cluster entry vectors.
0167      */
0168     vexpress_flags_set(__pa_symbol(mcpm_entry_point));
0169 
0170     return 0;
0171 }
0172 
0173 early_initcall(dcscb_init);