Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  * Copyright (C) 2015-2016 Socionext Inc.
0004  *   Author: Masahiro Yamada <yamada.masahiro@socionext.com>
0005  */
0006 
0007 #define pr_fmt(fmt)     "uniphier: " fmt
0008 
0009 #include <linux/bitops.h>
0010 #include <linux/init.h>
0011 #include <linux/io.h>
0012 #include <linux/log2.h>
0013 #include <linux/of_address.h>
0014 #include <linux/slab.h>
0015 #include <asm/hardware/cache-uniphier.h>
0016 #include <asm/outercache.h>
0017 
0018 /* control registers */
0019 #define UNIPHIER_SSCC       0x0 /* Control Register */
0020 #define    UNIPHIER_SSCC_BST            BIT(20) /* UCWG burst read */
0021 #define    UNIPHIER_SSCC_ACT            BIT(19) /* Inst-Data separate */
0022 #define    UNIPHIER_SSCC_WTG            BIT(18) /* WT gathering on */
0023 #define    UNIPHIER_SSCC_PRD            BIT(17) /* enable pre-fetch */
0024 #define    UNIPHIER_SSCC_ON         BIT(0)  /* enable cache */
0025 #define UNIPHIER_SSCLPDAWCR 0x30    /* Unified/Data Active Way Control */
0026 #define UNIPHIER_SSCLPIAWCR 0x34    /* Instruction Active Way Control */
0027 
0028 /* revision registers */
0029 #define UNIPHIER_SSCID      0x0 /* ID Register */
0030 
0031 /* operation registers */
0032 #define UNIPHIER_SSCOPE     0x244   /* Cache Operation Primitive Entry */
0033 #define    UNIPHIER_SSCOPE_CM_INV       0x0 /* invalidate */
0034 #define    UNIPHIER_SSCOPE_CM_CLEAN     0x1 /* clean */
0035 #define    UNIPHIER_SSCOPE_CM_FLUSH     0x2 /* flush */
0036 #define    UNIPHIER_SSCOPE_CM_SYNC      0x8 /* sync (drain bufs) */
0037 #define    UNIPHIER_SSCOPE_CM_FLUSH_PREFETCH    0x9 /* flush p-fetch buf */
0038 #define UNIPHIER_SSCOQM     0x248   /* Cache Operation Queue Mode */
0039 #define    UNIPHIER_SSCOQM_S_MASK       (0x3 << 17)
0040 #define    UNIPHIER_SSCOQM_S_RANGE      (0x0 << 17)
0041 #define    UNIPHIER_SSCOQM_S_ALL        (0x1 << 17)
0042 #define    UNIPHIER_SSCOQM_CE           BIT(15) /* notify completion */
0043 #define    UNIPHIER_SSCOQM_CM_INV       0x0 /* invalidate */
0044 #define    UNIPHIER_SSCOQM_CM_CLEAN     0x1 /* clean */
0045 #define    UNIPHIER_SSCOQM_CM_FLUSH     0x2 /* flush */
0046 #define UNIPHIER_SSCOQAD    0x24c   /* Cache Operation Queue Address */
0047 #define UNIPHIER_SSCOQSZ    0x250   /* Cache Operation Queue Size */
0048 #define UNIPHIER_SSCOPPQSEF 0x25c   /* Cache Operation Queue Set Complete*/
0049 #define    UNIPHIER_SSCOPPQSEF_FE       BIT(1)
0050 #define    UNIPHIER_SSCOPPQSEF_OE       BIT(0)
0051 #define UNIPHIER_SSCOLPQS   0x260   /* Cache Operation Queue Status */
0052 #define    UNIPHIER_SSCOLPQS_EF         BIT(2)
0053 #define    UNIPHIER_SSCOLPQS_EST        BIT(1)
0054 #define    UNIPHIER_SSCOLPQS_QST        BIT(0)
0055 
0056 /* Is the operation region specified by address range? */
0057 #define UNIPHIER_SSCOQM_S_IS_RANGE(op) \
0058         ((op & UNIPHIER_SSCOQM_S_MASK) == UNIPHIER_SSCOQM_S_RANGE)
0059 
0060 /**
0061  * uniphier_cache_data - UniPhier outer cache specific data
0062  *
0063  * @ctrl_base: virtual base address of control registers
0064  * @rev_base: virtual base address of revision registers
0065  * @op_base: virtual base address of operation registers
0066  * @way_mask: each bit specifies if the way is present
0067  * @nsets: number of associativity sets
0068  * @line_size: line size in bytes
0069  * @range_op_max_size: max size that can be handled by a single range operation
0070  * @list: list node to include this level in the whole cache hierarchy
0071  */
0072 struct uniphier_cache_data {
0073     void __iomem *ctrl_base;
0074     void __iomem *rev_base;
0075     void __iomem *op_base;
0076     void __iomem *way_ctrl_base;
0077     u32 way_mask;
0078     u32 nsets;
0079     u32 line_size;
0080     u32 range_op_max_size;
0081     struct list_head list;
0082 };
0083 
0084 /*
0085  * List of the whole outer cache hierarchy.  This list is only modified during
0086  * the early boot stage, so no mutex is taken for the access to the list.
0087  */
0088 static LIST_HEAD(uniphier_cache_list);
0089 
0090 /**
0091  * __uniphier_cache_sync - perform a sync point for a particular cache level
0092  *
0093  * @data: cache controller specific data
0094  */
0095 static void __uniphier_cache_sync(struct uniphier_cache_data *data)
0096 {
0097     /* This sequence need not be atomic.  Do not disable IRQ. */
0098     writel_relaxed(UNIPHIER_SSCOPE_CM_SYNC,
0099                data->op_base + UNIPHIER_SSCOPE);
0100     /* need a read back to confirm */
0101     readl_relaxed(data->op_base + UNIPHIER_SSCOPE);
0102 }
0103 
0104 /**
0105  * __uniphier_cache_maint_common - run a queue operation for a particular level
0106  *
0107  * @data: cache controller specific data
0108  * @start: start address of range operation (don't care for "all" operation)
0109  * @size: data size of range operation (don't care for "all" operation)
0110  * @operation: flags to specify the desired cache operation
0111  */
0112 static void __uniphier_cache_maint_common(struct uniphier_cache_data *data,
0113                       unsigned long start,
0114                       unsigned long size,
0115                       u32 operation)
0116 {
0117     unsigned long flags;
0118 
0119     /*
0120      * No spin lock is necessary here because:
0121      *
0122      * [1] This outer cache controller is able to accept maintenance
0123      * operations from multiple CPUs at a time in an SMP system; if a
0124      * maintenance operation is under way and another operation is issued,
0125      * the new one is stored in the queue.  The controller performs one
0126      * operation after another.  If the queue is full, the status register,
0127      * UNIPHIER_SSCOPPQSEF, indicates that the queue registration has
0128      * failed.  The status registers, UNIPHIER_{SSCOPPQSEF, SSCOLPQS}, have
0129      * different instances for each CPU, i.e. each CPU can track the status
0130      * of the maintenance operations triggered by itself.
0131      *
0132      * [2] The cache command registers, UNIPHIER_{SSCOQM, SSCOQAD, SSCOQSZ,
0133      * SSCOQWN}, are shared between multiple CPUs, but the hardware still
0134      * guarantees the registration sequence is atomic; the write access to
0135      * them are arbitrated by the hardware.  The first accessor to the
0136      * register, UNIPHIER_SSCOQM, holds the access right and it is released
0137      * by reading the status register, UNIPHIER_SSCOPPQSEF.  While one CPU
0138      * is holding the access right, other CPUs fail to register operations.
0139      * One CPU should not hold the access right for a long time, so local
0140      * IRQs should be disabled while the following sequence.
0141      */
0142     local_irq_save(flags);
0143 
0144     /* clear the complete notification flag */
0145     writel_relaxed(UNIPHIER_SSCOLPQS_EF, data->op_base + UNIPHIER_SSCOLPQS);
0146 
0147     do {
0148         /* set cache operation */
0149         writel_relaxed(UNIPHIER_SSCOQM_CE | operation,
0150                    data->op_base + UNIPHIER_SSCOQM);
0151 
0152         /* set address range if needed */
0153         if (likely(UNIPHIER_SSCOQM_S_IS_RANGE(operation))) {
0154             writel_relaxed(start, data->op_base + UNIPHIER_SSCOQAD);
0155             writel_relaxed(size, data->op_base + UNIPHIER_SSCOQSZ);
0156         }
0157     } while (unlikely(readl_relaxed(data->op_base + UNIPHIER_SSCOPPQSEF) &
0158               (UNIPHIER_SSCOPPQSEF_FE | UNIPHIER_SSCOPPQSEF_OE)));
0159 
0160     /* wait until the operation is completed */
0161     while (likely(readl_relaxed(data->op_base + UNIPHIER_SSCOLPQS) !=
0162               UNIPHIER_SSCOLPQS_EF))
0163         cpu_relax();
0164 
0165     local_irq_restore(flags);
0166 }
0167 
0168 static void __uniphier_cache_maint_all(struct uniphier_cache_data *data,
0169                        u32 operation)
0170 {
0171     __uniphier_cache_maint_common(data, 0, 0,
0172                       UNIPHIER_SSCOQM_S_ALL | operation);
0173 
0174     __uniphier_cache_sync(data);
0175 }
0176 
0177 static void __uniphier_cache_maint_range(struct uniphier_cache_data *data,
0178                      unsigned long start, unsigned long end,
0179                      u32 operation)
0180 {
0181     unsigned long size;
0182 
0183     /*
0184      * If the start address is not aligned,
0185      * perform a cache operation for the first cache-line
0186      */
0187     start = start & ~(data->line_size - 1);
0188 
0189     size = end - start;
0190 
0191     if (unlikely(size >= (unsigned long)(-data->line_size))) {
0192         /* this means cache operation for all range */
0193         __uniphier_cache_maint_all(data, operation);
0194         return;
0195     }
0196 
0197     /*
0198      * If the end address is not aligned,
0199      * perform a cache operation for the last cache-line
0200      */
0201     size = ALIGN(size, data->line_size);
0202 
0203     while (size) {
0204         unsigned long chunk_size = min_t(unsigned long, size,
0205                          data->range_op_max_size);
0206 
0207         __uniphier_cache_maint_common(data, start, chunk_size,
0208                     UNIPHIER_SSCOQM_S_RANGE | operation);
0209 
0210         start += chunk_size;
0211         size -= chunk_size;
0212     }
0213 
0214     __uniphier_cache_sync(data);
0215 }
0216 
0217 static void __uniphier_cache_enable(struct uniphier_cache_data *data, bool on)
0218 {
0219     u32 val = 0;
0220 
0221     if (on)
0222         val = UNIPHIER_SSCC_WTG | UNIPHIER_SSCC_PRD | UNIPHIER_SSCC_ON;
0223 
0224     writel_relaxed(val, data->ctrl_base + UNIPHIER_SSCC);
0225 }
0226 
0227 static void __init __uniphier_cache_set_active_ways(
0228                     struct uniphier_cache_data *data)
0229 {
0230     unsigned int cpu;
0231 
0232     for_each_possible_cpu(cpu)
0233         writel_relaxed(data->way_mask, data->way_ctrl_base + 4 * cpu);
0234 }
0235 
0236 static void uniphier_cache_maint_range(unsigned long start, unsigned long end,
0237                        u32 operation)
0238 {
0239     struct uniphier_cache_data *data;
0240 
0241     list_for_each_entry(data, &uniphier_cache_list, list)
0242         __uniphier_cache_maint_range(data, start, end, operation);
0243 }
0244 
0245 static void uniphier_cache_maint_all(u32 operation)
0246 {
0247     struct uniphier_cache_data *data;
0248 
0249     list_for_each_entry(data, &uniphier_cache_list, list)
0250         __uniphier_cache_maint_all(data, operation);
0251 }
0252 
0253 static void uniphier_cache_inv_range(unsigned long start, unsigned long end)
0254 {
0255     uniphier_cache_maint_range(start, end, UNIPHIER_SSCOQM_CM_INV);
0256 }
0257 
0258 static void uniphier_cache_clean_range(unsigned long start, unsigned long end)
0259 {
0260     uniphier_cache_maint_range(start, end, UNIPHIER_SSCOQM_CM_CLEAN);
0261 }
0262 
0263 static void uniphier_cache_flush_range(unsigned long start, unsigned long end)
0264 {
0265     uniphier_cache_maint_range(start, end, UNIPHIER_SSCOQM_CM_FLUSH);
0266 }
0267 
0268 static void __init uniphier_cache_inv_all(void)
0269 {
0270     uniphier_cache_maint_all(UNIPHIER_SSCOQM_CM_INV);
0271 }
0272 
0273 static void uniphier_cache_flush_all(void)
0274 {
0275     uniphier_cache_maint_all(UNIPHIER_SSCOQM_CM_FLUSH);
0276 }
0277 
0278 static void uniphier_cache_disable(void)
0279 {
0280     struct uniphier_cache_data *data;
0281 
0282     list_for_each_entry_reverse(data, &uniphier_cache_list, list)
0283         __uniphier_cache_enable(data, false);
0284 
0285     uniphier_cache_flush_all();
0286 }
0287 
0288 static void __init uniphier_cache_enable(void)
0289 {
0290     struct uniphier_cache_data *data;
0291 
0292     uniphier_cache_inv_all();
0293 
0294     list_for_each_entry(data, &uniphier_cache_list, list) {
0295         __uniphier_cache_enable(data, true);
0296         __uniphier_cache_set_active_ways(data);
0297     }
0298 }
0299 
0300 static void uniphier_cache_sync(void)
0301 {
0302     struct uniphier_cache_data *data;
0303 
0304     list_for_each_entry(data, &uniphier_cache_list, list)
0305         __uniphier_cache_sync(data);
0306 }
0307 
0308 static const struct of_device_id uniphier_cache_match[] __initconst = {
0309     { .compatible = "socionext,uniphier-system-cache" },
0310     { /* sentinel */ }
0311 };
0312 
0313 static int __init __uniphier_cache_init(struct device_node *np,
0314                     unsigned int *cache_level)
0315 {
0316     struct uniphier_cache_data *data;
0317     u32 level, cache_size;
0318     struct device_node *next_np;
0319     int ret = 0;
0320 
0321     if (!of_match_node(uniphier_cache_match, np)) {
0322         pr_err("L%d: not compatible with uniphier cache\n",
0323                *cache_level);
0324         return -EINVAL;
0325     }
0326 
0327     if (of_property_read_u32(np, "cache-level", &level)) {
0328         pr_err("L%d: cache-level is not specified\n", *cache_level);
0329         return -EINVAL;
0330     }
0331 
0332     if (level != *cache_level) {
0333         pr_err("L%d: cache-level is unexpected value %d\n",
0334                *cache_level, level);
0335         return -EINVAL;
0336     }
0337 
0338     if (!of_property_read_bool(np, "cache-unified")) {
0339         pr_err("L%d: cache-unified is not specified\n", *cache_level);
0340         return -EINVAL;
0341     }
0342 
0343     data = kzalloc(sizeof(*data), GFP_KERNEL);
0344     if (!data)
0345         return -ENOMEM;
0346 
0347     if (of_property_read_u32(np, "cache-line-size", &data->line_size) ||
0348         !is_power_of_2(data->line_size)) {
0349         pr_err("L%d: cache-line-size is unspecified or invalid\n",
0350                *cache_level);
0351         ret = -EINVAL;
0352         goto err;
0353     }
0354 
0355     if (of_property_read_u32(np, "cache-sets", &data->nsets) ||
0356         !is_power_of_2(data->nsets)) {
0357         pr_err("L%d: cache-sets is unspecified or invalid\n",
0358                *cache_level);
0359         ret = -EINVAL;
0360         goto err;
0361     }
0362 
0363     if (of_property_read_u32(np, "cache-size", &cache_size) ||
0364         cache_size == 0 || cache_size % (data->nsets * data->line_size)) {
0365         pr_err("L%d: cache-size is unspecified or invalid\n",
0366                *cache_level);
0367         ret = -EINVAL;
0368         goto err;
0369     }
0370 
0371     data->way_mask = GENMASK(cache_size / data->nsets / data->line_size - 1,
0372                  0);
0373 
0374     data->ctrl_base = of_iomap(np, 0);
0375     if (!data->ctrl_base) {
0376         pr_err("L%d: failed to map control register\n", *cache_level);
0377         ret = -ENOMEM;
0378         goto err;
0379     }
0380 
0381     data->rev_base = of_iomap(np, 1);
0382     if (!data->rev_base) {
0383         pr_err("L%d: failed to map revision register\n", *cache_level);
0384         ret = -ENOMEM;
0385         goto err;
0386     }
0387 
0388     data->op_base = of_iomap(np, 2);
0389     if (!data->op_base) {
0390         pr_err("L%d: failed to map operation register\n", *cache_level);
0391         ret = -ENOMEM;
0392         goto err;
0393     }
0394 
0395     data->way_ctrl_base = data->ctrl_base + 0xc00;
0396 
0397     if (*cache_level == 2) {
0398         u32 revision = readl(data->rev_base + UNIPHIER_SSCID);
0399         /*
0400          * The size of range operation is limited to (1 << 22) or less
0401          * for PH-sLD8 or older SoCs.
0402          */
0403         if (revision <= 0x16)
0404             data->range_op_max_size = (u32)1 << 22;
0405 
0406         /*
0407          * Unfortunatly, the offset address of active way control base
0408          * varies from SoC to SoC.
0409          */
0410         switch (revision) {
0411         case 0x11:  /* sLD3 */
0412             data->way_ctrl_base = data->ctrl_base + 0x870;
0413             break;
0414         case 0x12:  /* LD4 */
0415         case 0x16:  /* sld8 */
0416             data->way_ctrl_base = data->ctrl_base + 0x840;
0417             break;
0418         default:
0419             break;
0420         }
0421     }
0422 
0423     data->range_op_max_size -= data->line_size;
0424 
0425     INIT_LIST_HEAD(&data->list);
0426     list_add_tail(&data->list, &uniphier_cache_list); /* no mutex */
0427 
0428     /*
0429      * OK, this level has been successfully initialized.  Look for the next
0430      * level cache.  Do not roll back even if the initialization of the
0431      * next level cache fails because we want to continue with available
0432      * cache levels.
0433      */
0434     next_np = of_find_next_cache_node(np);
0435     if (next_np) {
0436         (*cache_level)++;
0437         ret = __uniphier_cache_init(next_np, cache_level);
0438     }
0439     of_node_put(next_np);
0440 
0441     return ret;
0442 err:
0443     iounmap(data->op_base);
0444     iounmap(data->rev_base);
0445     iounmap(data->ctrl_base);
0446     kfree(data);
0447 
0448     return ret;
0449 }
0450 
0451 int __init uniphier_cache_init(void)
0452 {
0453     struct device_node *np = NULL;
0454     unsigned int cache_level;
0455     int ret = 0;
0456 
0457     /* look for level 2 cache */
0458     while ((np = of_find_matching_node(np, uniphier_cache_match)))
0459         if (!of_property_read_u32(np, "cache-level", &cache_level) &&
0460             cache_level == 2)
0461             break;
0462 
0463     if (!np)
0464         return -ENODEV;
0465 
0466     ret = __uniphier_cache_init(np, &cache_level);
0467     of_node_put(np);
0468 
0469     if (ret) {
0470         /*
0471          * Error out iif L2 initialization fails.  Continue with any
0472          * error on L3 or outer because they are optional.
0473          */
0474         if (cache_level == 2) {
0475             pr_err("failed to initialize L2 cache\n");
0476             return ret;
0477         }
0478 
0479         cache_level--;
0480         ret = 0;
0481     }
0482 
0483     outer_cache.inv_range = uniphier_cache_inv_range;
0484     outer_cache.clean_range = uniphier_cache_clean_range;
0485     outer_cache.flush_range = uniphier_cache_flush_range;
0486     outer_cache.flush_all = uniphier_cache_flush_all;
0487     outer_cache.disable = uniphier_cache_disable;
0488     outer_cache.sync = uniphier_cache_sync;
0489 
0490     uniphier_cache_enable();
0491 
0492     pr_info("enabled outer cache (cache level: %d)\n", cache_level);
0493 
0494     return ret;
0495 }