0001
0002
0003
0004
0005
0006
0007 #define pr_fmt(fmt) "uniphier: " fmt
0008
0009 #include <linux/bitops.h>
0010 #include <linux/init.h>
0011 #include <linux/io.h>
0012 #include <linux/log2.h>
0013 #include <linux/of_address.h>
0014 #include <linux/slab.h>
0015 #include <asm/hardware/cache-uniphier.h>
0016 #include <asm/outercache.h>
0017
0018
0019 #define UNIPHIER_SSCC 0x0
0020 #define UNIPHIER_SSCC_BST BIT(20)
0021 #define UNIPHIER_SSCC_ACT BIT(19)
0022 #define UNIPHIER_SSCC_WTG BIT(18)
0023 #define UNIPHIER_SSCC_PRD BIT(17)
0024 #define UNIPHIER_SSCC_ON BIT(0)
0025 #define UNIPHIER_SSCLPDAWCR 0x30
0026 #define UNIPHIER_SSCLPIAWCR 0x34
0027
0028
0029 #define UNIPHIER_SSCID 0x0
0030
0031
0032 #define UNIPHIER_SSCOPE 0x244
0033 #define UNIPHIER_SSCOPE_CM_INV 0x0
0034 #define UNIPHIER_SSCOPE_CM_CLEAN 0x1
0035 #define UNIPHIER_SSCOPE_CM_FLUSH 0x2
0036 #define UNIPHIER_SSCOPE_CM_SYNC 0x8
0037 #define UNIPHIER_SSCOPE_CM_FLUSH_PREFETCH 0x9
0038 #define UNIPHIER_SSCOQM 0x248
0039 #define UNIPHIER_SSCOQM_S_MASK (0x3 << 17)
0040 #define UNIPHIER_SSCOQM_S_RANGE (0x0 << 17)
0041 #define UNIPHIER_SSCOQM_S_ALL (0x1 << 17)
0042 #define UNIPHIER_SSCOQM_CE BIT(15)
0043 #define UNIPHIER_SSCOQM_CM_INV 0x0
0044 #define UNIPHIER_SSCOQM_CM_CLEAN 0x1
0045 #define UNIPHIER_SSCOQM_CM_FLUSH 0x2
0046 #define UNIPHIER_SSCOQAD 0x24c
0047 #define UNIPHIER_SSCOQSZ 0x250
0048 #define UNIPHIER_SSCOPPQSEF 0x25c
0049 #define UNIPHIER_SSCOPPQSEF_FE BIT(1)
0050 #define UNIPHIER_SSCOPPQSEF_OE BIT(0)
0051 #define UNIPHIER_SSCOLPQS 0x260
0052 #define UNIPHIER_SSCOLPQS_EF BIT(2)
0053 #define UNIPHIER_SSCOLPQS_EST BIT(1)
0054 #define UNIPHIER_SSCOLPQS_QST BIT(0)
0055
0056
0057 #define UNIPHIER_SSCOQM_S_IS_RANGE(op) \
0058 ((op & UNIPHIER_SSCOQM_S_MASK) == UNIPHIER_SSCOQM_S_RANGE)
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072 struct uniphier_cache_data {
0073 void __iomem *ctrl_base;
0074 void __iomem *rev_base;
0075 void __iomem *op_base;
0076 void __iomem *way_ctrl_base;
0077 u32 way_mask;
0078 u32 nsets;
0079 u32 line_size;
0080 u32 range_op_max_size;
0081 struct list_head list;
0082 };
0083
0084
0085
0086
0087
0088 static LIST_HEAD(uniphier_cache_list);
0089
0090
0091
0092
0093
0094
0095 static void __uniphier_cache_sync(struct uniphier_cache_data *data)
0096 {
0097
0098 writel_relaxed(UNIPHIER_SSCOPE_CM_SYNC,
0099 data->op_base + UNIPHIER_SSCOPE);
0100
0101 readl_relaxed(data->op_base + UNIPHIER_SSCOPE);
0102 }
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112 static void __uniphier_cache_maint_common(struct uniphier_cache_data *data,
0113 unsigned long start,
0114 unsigned long size,
0115 u32 operation)
0116 {
0117 unsigned long flags;
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142 local_irq_save(flags);
0143
0144
0145 writel_relaxed(UNIPHIER_SSCOLPQS_EF, data->op_base + UNIPHIER_SSCOLPQS);
0146
0147 do {
0148
0149 writel_relaxed(UNIPHIER_SSCOQM_CE | operation,
0150 data->op_base + UNIPHIER_SSCOQM);
0151
0152
0153 if (likely(UNIPHIER_SSCOQM_S_IS_RANGE(operation))) {
0154 writel_relaxed(start, data->op_base + UNIPHIER_SSCOQAD);
0155 writel_relaxed(size, data->op_base + UNIPHIER_SSCOQSZ);
0156 }
0157 } while (unlikely(readl_relaxed(data->op_base + UNIPHIER_SSCOPPQSEF) &
0158 (UNIPHIER_SSCOPPQSEF_FE | UNIPHIER_SSCOPPQSEF_OE)));
0159
0160
0161 while (likely(readl_relaxed(data->op_base + UNIPHIER_SSCOLPQS) !=
0162 UNIPHIER_SSCOLPQS_EF))
0163 cpu_relax();
0164
0165 local_irq_restore(flags);
0166 }
0167
0168 static void __uniphier_cache_maint_all(struct uniphier_cache_data *data,
0169 u32 operation)
0170 {
0171 __uniphier_cache_maint_common(data, 0, 0,
0172 UNIPHIER_SSCOQM_S_ALL | operation);
0173
0174 __uniphier_cache_sync(data);
0175 }
0176
0177 static void __uniphier_cache_maint_range(struct uniphier_cache_data *data,
0178 unsigned long start, unsigned long end,
0179 u32 operation)
0180 {
0181 unsigned long size;
0182
0183
0184
0185
0186
0187 start = start & ~(data->line_size - 1);
0188
0189 size = end - start;
0190
0191 if (unlikely(size >= (unsigned long)(-data->line_size))) {
0192
0193 __uniphier_cache_maint_all(data, operation);
0194 return;
0195 }
0196
0197
0198
0199
0200
0201 size = ALIGN(size, data->line_size);
0202
0203 while (size) {
0204 unsigned long chunk_size = min_t(unsigned long, size,
0205 data->range_op_max_size);
0206
0207 __uniphier_cache_maint_common(data, start, chunk_size,
0208 UNIPHIER_SSCOQM_S_RANGE | operation);
0209
0210 start += chunk_size;
0211 size -= chunk_size;
0212 }
0213
0214 __uniphier_cache_sync(data);
0215 }
0216
0217 static void __uniphier_cache_enable(struct uniphier_cache_data *data, bool on)
0218 {
0219 u32 val = 0;
0220
0221 if (on)
0222 val = UNIPHIER_SSCC_WTG | UNIPHIER_SSCC_PRD | UNIPHIER_SSCC_ON;
0223
0224 writel_relaxed(val, data->ctrl_base + UNIPHIER_SSCC);
0225 }
0226
0227 static void __init __uniphier_cache_set_active_ways(
0228 struct uniphier_cache_data *data)
0229 {
0230 unsigned int cpu;
0231
0232 for_each_possible_cpu(cpu)
0233 writel_relaxed(data->way_mask, data->way_ctrl_base + 4 * cpu);
0234 }
0235
0236 static void uniphier_cache_maint_range(unsigned long start, unsigned long end,
0237 u32 operation)
0238 {
0239 struct uniphier_cache_data *data;
0240
0241 list_for_each_entry(data, &uniphier_cache_list, list)
0242 __uniphier_cache_maint_range(data, start, end, operation);
0243 }
0244
0245 static void uniphier_cache_maint_all(u32 operation)
0246 {
0247 struct uniphier_cache_data *data;
0248
0249 list_for_each_entry(data, &uniphier_cache_list, list)
0250 __uniphier_cache_maint_all(data, operation);
0251 }
0252
0253 static void uniphier_cache_inv_range(unsigned long start, unsigned long end)
0254 {
0255 uniphier_cache_maint_range(start, end, UNIPHIER_SSCOQM_CM_INV);
0256 }
0257
0258 static void uniphier_cache_clean_range(unsigned long start, unsigned long end)
0259 {
0260 uniphier_cache_maint_range(start, end, UNIPHIER_SSCOQM_CM_CLEAN);
0261 }
0262
0263 static void uniphier_cache_flush_range(unsigned long start, unsigned long end)
0264 {
0265 uniphier_cache_maint_range(start, end, UNIPHIER_SSCOQM_CM_FLUSH);
0266 }
0267
0268 static void __init uniphier_cache_inv_all(void)
0269 {
0270 uniphier_cache_maint_all(UNIPHIER_SSCOQM_CM_INV);
0271 }
0272
0273 static void uniphier_cache_flush_all(void)
0274 {
0275 uniphier_cache_maint_all(UNIPHIER_SSCOQM_CM_FLUSH);
0276 }
0277
0278 static void uniphier_cache_disable(void)
0279 {
0280 struct uniphier_cache_data *data;
0281
0282 list_for_each_entry_reverse(data, &uniphier_cache_list, list)
0283 __uniphier_cache_enable(data, false);
0284
0285 uniphier_cache_flush_all();
0286 }
0287
0288 static void __init uniphier_cache_enable(void)
0289 {
0290 struct uniphier_cache_data *data;
0291
0292 uniphier_cache_inv_all();
0293
0294 list_for_each_entry(data, &uniphier_cache_list, list) {
0295 __uniphier_cache_enable(data, true);
0296 __uniphier_cache_set_active_ways(data);
0297 }
0298 }
0299
0300 static void uniphier_cache_sync(void)
0301 {
0302 struct uniphier_cache_data *data;
0303
0304 list_for_each_entry(data, &uniphier_cache_list, list)
0305 __uniphier_cache_sync(data);
0306 }
0307
0308 static const struct of_device_id uniphier_cache_match[] __initconst = {
0309 { .compatible = "socionext,uniphier-system-cache" },
0310 { }
0311 };
0312
0313 static int __init __uniphier_cache_init(struct device_node *np,
0314 unsigned int *cache_level)
0315 {
0316 struct uniphier_cache_data *data;
0317 u32 level, cache_size;
0318 struct device_node *next_np;
0319 int ret = 0;
0320
0321 if (!of_match_node(uniphier_cache_match, np)) {
0322 pr_err("L%d: not compatible with uniphier cache\n",
0323 *cache_level);
0324 return -EINVAL;
0325 }
0326
0327 if (of_property_read_u32(np, "cache-level", &level)) {
0328 pr_err("L%d: cache-level is not specified\n", *cache_level);
0329 return -EINVAL;
0330 }
0331
0332 if (level != *cache_level) {
0333 pr_err("L%d: cache-level is unexpected value %d\n",
0334 *cache_level, level);
0335 return -EINVAL;
0336 }
0337
0338 if (!of_property_read_bool(np, "cache-unified")) {
0339 pr_err("L%d: cache-unified is not specified\n", *cache_level);
0340 return -EINVAL;
0341 }
0342
0343 data = kzalloc(sizeof(*data), GFP_KERNEL);
0344 if (!data)
0345 return -ENOMEM;
0346
0347 if (of_property_read_u32(np, "cache-line-size", &data->line_size) ||
0348 !is_power_of_2(data->line_size)) {
0349 pr_err("L%d: cache-line-size is unspecified or invalid\n",
0350 *cache_level);
0351 ret = -EINVAL;
0352 goto err;
0353 }
0354
0355 if (of_property_read_u32(np, "cache-sets", &data->nsets) ||
0356 !is_power_of_2(data->nsets)) {
0357 pr_err("L%d: cache-sets is unspecified or invalid\n",
0358 *cache_level);
0359 ret = -EINVAL;
0360 goto err;
0361 }
0362
0363 if (of_property_read_u32(np, "cache-size", &cache_size) ||
0364 cache_size == 0 || cache_size % (data->nsets * data->line_size)) {
0365 pr_err("L%d: cache-size is unspecified or invalid\n",
0366 *cache_level);
0367 ret = -EINVAL;
0368 goto err;
0369 }
0370
0371 data->way_mask = GENMASK(cache_size / data->nsets / data->line_size - 1,
0372 0);
0373
0374 data->ctrl_base = of_iomap(np, 0);
0375 if (!data->ctrl_base) {
0376 pr_err("L%d: failed to map control register\n", *cache_level);
0377 ret = -ENOMEM;
0378 goto err;
0379 }
0380
0381 data->rev_base = of_iomap(np, 1);
0382 if (!data->rev_base) {
0383 pr_err("L%d: failed to map revision register\n", *cache_level);
0384 ret = -ENOMEM;
0385 goto err;
0386 }
0387
0388 data->op_base = of_iomap(np, 2);
0389 if (!data->op_base) {
0390 pr_err("L%d: failed to map operation register\n", *cache_level);
0391 ret = -ENOMEM;
0392 goto err;
0393 }
0394
0395 data->way_ctrl_base = data->ctrl_base + 0xc00;
0396
0397 if (*cache_level == 2) {
0398 u32 revision = readl(data->rev_base + UNIPHIER_SSCID);
0399
0400
0401
0402
0403 if (revision <= 0x16)
0404 data->range_op_max_size = (u32)1 << 22;
0405
0406
0407
0408
0409
0410 switch (revision) {
0411 case 0x11:
0412 data->way_ctrl_base = data->ctrl_base + 0x870;
0413 break;
0414 case 0x12:
0415 case 0x16:
0416 data->way_ctrl_base = data->ctrl_base + 0x840;
0417 break;
0418 default:
0419 break;
0420 }
0421 }
0422
0423 data->range_op_max_size -= data->line_size;
0424
0425 INIT_LIST_HEAD(&data->list);
0426 list_add_tail(&data->list, &uniphier_cache_list);
0427
0428
0429
0430
0431
0432
0433
0434 next_np = of_find_next_cache_node(np);
0435 if (next_np) {
0436 (*cache_level)++;
0437 ret = __uniphier_cache_init(next_np, cache_level);
0438 }
0439 of_node_put(next_np);
0440
0441 return ret;
0442 err:
0443 iounmap(data->op_base);
0444 iounmap(data->rev_base);
0445 iounmap(data->ctrl_base);
0446 kfree(data);
0447
0448 return ret;
0449 }
0450
0451 int __init uniphier_cache_init(void)
0452 {
0453 struct device_node *np = NULL;
0454 unsigned int cache_level;
0455 int ret = 0;
0456
0457
0458 while ((np = of_find_matching_node(np, uniphier_cache_match)))
0459 if (!of_property_read_u32(np, "cache-level", &cache_level) &&
0460 cache_level == 2)
0461 break;
0462
0463 if (!np)
0464 return -ENODEV;
0465
0466 ret = __uniphier_cache_init(np, &cache_level);
0467 of_node_put(np);
0468
0469 if (ret) {
0470
0471
0472
0473
0474 if (cache_level == 2) {
0475 pr_err("failed to initialize L2 cache\n");
0476 return ret;
0477 }
0478
0479 cache_level--;
0480 ret = 0;
0481 }
0482
0483 outer_cache.inv_range = uniphier_cache_inv_range;
0484 outer_cache.clean_range = uniphier_cache_clean_range;
0485 outer_cache.flush_range = uniphier_cache_flush_range;
0486 outer_cache.flush_all = uniphier_cache_flush_all;
0487 outer_cache.disable = uniphier_cache_disable;
0488 outer_cache.sync = uniphier_cache_sync;
0489
0490 uniphier_cache_enable();
0491
0492 pr_info("enabled outer cache (cache level: %d)\n", cache_level);
0493
0494 return ret;
0495 }