0001
0002
0003
0004
0005
0006
0007 #include <linux/bitops.h>
0008 #include <linux/memblock.h>
0009 #include <linux/string.h>
0010
0011 #include <asm/cacheflush.h>
0012 #include <asm/cp15.h>
0013 #include <asm/cputype.h>
0014 #include <asm/mpu.h>
0015 #include <asm/sections.h>
0016
0017 #include "mm.h"
0018
0019 struct region {
0020 phys_addr_t base;
0021 phys_addr_t size;
0022 unsigned long subreg;
0023 };
0024
0025 static struct region __initdata mem[MPU_MAX_REGIONS];
0026 #ifdef CONFIG_XIP_KERNEL
0027 static struct region __initdata xip[MPU_MAX_REGIONS];
0028 #endif
0029
0030 static unsigned int __initdata mpu_min_region_order;
0031 static unsigned int __initdata mpu_max_regions;
0032
0033 static int __init __mpu_min_region_order(void);
0034 static int __init __mpu_max_regions(void);
0035
0036 #ifndef CONFIG_CPU_V7M
0037
0038 #define DRBAR __ACCESS_CP15(c6, 0, c1, 0)
0039 #define IRBAR __ACCESS_CP15(c6, 0, c1, 1)
0040 #define DRSR __ACCESS_CP15(c6, 0, c1, 2)
0041 #define IRSR __ACCESS_CP15(c6, 0, c1, 3)
0042 #define DRACR __ACCESS_CP15(c6, 0, c1, 4)
0043 #define IRACR __ACCESS_CP15(c6, 0, c1, 5)
0044 #define RNGNR __ACCESS_CP15(c6, 0, c2, 0)
0045
0046
0047 static inline void rgnr_write(u32 v)
0048 {
0049 write_sysreg(v, RNGNR);
0050 }
0051
0052
0053
0054
0055 static inline void dracr_write(u32 v)
0056 {
0057 write_sysreg(v, DRACR);
0058 }
0059
0060
0061 static inline void drsr_write(u32 v)
0062 {
0063 write_sysreg(v, DRSR);
0064 }
0065
0066
0067 static inline void drbar_write(u32 v)
0068 {
0069 write_sysreg(v, DRBAR);
0070 }
0071
0072 static inline u32 drbar_read(void)
0073 {
0074 return read_sysreg(DRBAR);
0075 }
0076
0077
0078
0079 static inline void iracr_write(u32 v)
0080 {
0081 write_sysreg(v, IRACR);
0082 }
0083
0084
0085 static inline void irsr_write(u32 v)
0086 {
0087 write_sysreg(v, IRSR);
0088 }
0089
0090
0091 static inline void irbar_write(u32 v)
0092 {
0093 write_sysreg(v, IRBAR);
0094 }
0095
0096 static inline u32 irbar_read(void)
0097 {
0098 return read_sysreg(IRBAR);
0099 }
0100
0101 #else
0102
0103 static inline void rgnr_write(u32 v)
0104 {
0105 writel_relaxed(v, BASEADDR_V7M_SCB + PMSAv7_RNR);
0106 }
0107
0108
0109
0110
0111 static inline void dracr_write(u32 v)
0112 {
0113 u32 rsr = readl_relaxed(BASEADDR_V7M_SCB + PMSAv7_RASR) & GENMASK(15, 0);
0114
0115 writel_relaxed((v << 16) | rsr, BASEADDR_V7M_SCB + PMSAv7_RASR);
0116 }
0117
0118
0119 static inline void drsr_write(u32 v)
0120 {
0121 u32 racr = readl_relaxed(BASEADDR_V7M_SCB + PMSAv7_RASR) & GENMASK(31, 16);
0122
0123 writel_relaxed(v | racr, BASEADDR_V7M_SCB + PMSAv7_RASR);
0124 }
0125
0126
0127 static inline void drbar_write(u32 v)
0128 {
0129 writel_relaxed(v, BASEADDR_V7M_SCB + PMSAv7_RBAR);
0130 }
0131
0132 static inline u32 drbar_read(void)
0133 {
0134 return readl_relaxed(BASEADDR_V7M_SCB + PMSAv7_RBAR);
0135 }
0136
0137
0138
0139 static inline void iracr_write(u32 v) {}
0140 static inline void irsr_write(u32 v) {}
0141 static inline void irbar_write(u32 v) {}
0142 static inline unsigned long irbar_read(void) {return 0;}
0143
0144 #endif
0145
0146 static bool __init try_split_region(phys_addr_t base, phys_addr_t size, struct region *region)
0147 {
0148 unsigned long subreg, bslots, sslots;
0149 phys_addr_t abase = base & ~(size - 1);
0150 phys_addr_t asize = base + size - abase;
0151 phys_addr_t p2size = 1 << __fls(asize);
0152 phys_addr_t bdiff, sdiff;
0153
0154 if (p2size != asize)
0155 p2size *= 2;
0156
0157 bdiff = base - abase;
0158 sdiff = p2size - asize;
0159 subreg = p2size / PMSAv7_NR_SUBREGS;
0160
0161 if ((bdiff % subreg) || (sdiff % subreg))
0162 return false;
0163
0164 bslots = bdiff / subreg;
0165 sslots = sdiff / subreg;
0166
0167 if (bslots || sslots) {
0168 int i;
0169
0170 if (subreg < PMSAv7_MIN_SUBREG_SIZE)
0171 return false;
0172
0173 if (bslots + sslots > PMSAv7_NR_SUBREGS)
0174 return false;
0175
0176 for (i = 0; i < bslots; i++)
0177 _set_bit(i, ®ion->subreg);
0178
0179 for (i = 1; i <= sslots; i++)
0180 _set_bit(PMSAv7_NR_SUBREGS - i, ®ion->subreg);
0181 }
0182
0183 region->base = abase;
0184 region->size = p2size;
0185
0186 return true;
0187 }
0188
0189 static int __init allocate_region(phys_addr_t base, phys_addr_t size,
0190 unsigned int limit, struct region *regions)
0191 {
0192 int count = 0;
0193 phys_addr_t diff = size;
0194 int attempts = MPU_MAX_REGIONS;
0195
0196 while (diff) {
0197
0198 if (try_split_region(base, size, ®ions[count])) {
0199 count++;
0200 base += size;
0201 diff -= size;
0202 size = diff;
0203 } else {
0204
0205
0206
0207
0208
0209
0210
0211
0212 phys_addr_t asize = (base - 1) ^ base;
0213 phys_addr_t p2size = (1 << __fls(diff)) - 1;
0214
0215 size = asize < p2size ? asize + 1 : p2size + 1;
0216 }
0217
0218 if (count > limit)
0219 break;
0220
0221 if (!attempts)
0222 break;
0223
0224 attempts--;
0225 }
0226
0227 return count;
0228 }
0229
0230
0231 void __init pmsav7_adjust_lowmem_bounds(void)
0232 {
0233 phys_addr_t specified_mem_size = 0, total_mem_size = 0;
0234 phys_addr_t mem_start;
0235 phys_addr_t mem_end;
0236 phys_addr_t reg_start, reg_end;
0237 unsigned int mem_max_regions;
0238 bool first = true;
0239 int num;
0240 u64 i;
0241
0242
0243 mpu_min_region_order = __mpu_min_region_order();
0244
0245
0246 mpu_max_regions = __mpu_max_regions();
0247
0248 mem_max_regions = min((unsigned int)MPU_MAX_REGIONS, mpu_max_regions);
0249
0250
0251 mem_max_regions--;
0252
0253 #ifndef CONFIG_CPU_V7M
0254
0255 mem_max_regions--;
0256 #endif
0257
0258 #ifdef CONFIG_XIP_KERNEL
0259
0260 num = allocate_region(CONFIG_XIP_PHYS_ADDR, __pa(_exiprom) - CONFIG_XIP_PHYS_ADDR,
0261 mem_max_regions, xip);
0262
0263 mem_max_regions -= num;
0264 #endif
0265
0266 for_each_mem_range(i, ®_start, ®_end) {
0267 if (first) {
0268 phys_addr_t phys_offset = PHYS_OFFSET;
0269
0270
0271
0272
0273 if (reg_start != phys_offset)
0274 panic("First memory bank must be contiguous from PHYS_OFFSET");
0275
0276 mem_start = reg_start;
0277 mem_end = reg_end;
0278 specified_mem_size = mem_end - mem_start;
0279 first = false;
0280 } else {
0281
0282
0283
0284
0285
0286 pr_notice("Ignoring RAM after %pa, memory at %pa ignored\n",
0287 &mem_end, ®_start);
0288 memblock_remove(reg_start, 0 - reg_start);
0289 break;
0290 }
0291 }
0292
0293 memset(mem, 0, sizeof(mem));
0294 num = allocate_region(mem_start, specified_mem_size, mem_max_regions, mem);
0295
0296 for (i = 0; i < num; i++) {
0297 unsigned long subreg = mem[i].size / PMSAv7_NR_SUBREGS;
0298
0299 total_mem_size += mem[i].size - subreg * hweight_long(mem[i].subreg);
0300
0301 pr_debug("MPU: base %pa size %pa disable subregions: %*pbl\n",
0302 &mem[i].base, &mem[i].size, PMSAv7_NR_SUBREGS, &mem[i].subreg);
0303 }
0304
0305 if (total_mem_size != specified_mem_size) {
0306 pr_warn("Truncating memory from %pa to %pa (MPU region constraints)",
0307 &specified_mem_size, &total_mem_size);
0308 memblock_remove(mem_start + total_mem_size,
0309 specified_mem_size - total_mem_size);
0310 }
0311 }
0312
0313 static int __init __mpu_max_regions(void)
0314 {
0315
0316
0317
0318
0319
0320 u32 dregions, iregions, mpuir;
0321
0322 mpuir = read_cpuid_mputype();
0323
0324 dregions = iregions = (mpuir & MPUIR_DREGION_SZMASK) >> MPUIR_DREGION;
0325
0326
0327 if (mpuir & MPUIR_nU)
0328 iregions = (mpuir & MPUIR_IREGION_SZMASK) >> MPUIR_IREGION;
0329
0330
0331 return min(dregions, iregions);
0332 }
0333
0334 static int __init mpu_iside_independent(void)
0335 {
0336
0337 return read_cpuid_mputype() & MPUIR_nU;
0338 }
0339
0340 static int __init __mpu_min_region_order(void)
0341 {
0342 u32 drbar_result, irbar_result;
0343
0344
0345 rgnr_write(PMSAv7_PROBE_REGION);
0346 isb();
0347
0348
0349
0350
0351 drbar_write(0xFFFFFFFC);
0352 drbar_result = irbar_result = drbar_read();
0353 drbar_write(0x0);
0354
0355 if (mpu_iside_independent()) {
0356 irbar_write(0xFFFFFFFC);
0357 irbar_result = irbar_read();
0358 irbar_write(0x0);
0359 }
0360 isb();
0361
0362
0363 return __ffs(max(drbar_result, irbar_result));
0364 }
0365
0366 static int __init mpu_setup_region(unsigned int number, phys_addr_t start,
0367 unsigned int size_order, unsigned int properties,
0368 unsigned int subregions, bool need_flush)
0369 {
0370 u32 size_data;
0371
0372
0373 if (number > mpu_max_regions
0374 || number >= MPU_MAX_REGIONS)
0375 return -ENOENT;
0376
0377 if (size_order > 32)
0378 return -ENOMEM;
0379
0380 if (size_order < mpu_min_region_order)
0381 return -ENOMEM;
0382
0383
0384 size_data = ((size_order - 1) << PMSAv7_RSR_SZ) | 1 << PMSAv7_RSR_EN;
0385 size_data |= subregions << PMSAv7_RSR_SD;
0386
0387 if (need_flush)
0388 flush_cache_all();
0389
0390 dsb();
0391 rgnr_write(number);
0392 isb();
0393 drbar_write(start);
0394 dracr_write(properties);
0395 isb();
0396 drsr_write(size_data);
0397
0398
0399 if (mpu_iside_independent()) {
0400 irbar_write(start);
0401 iracr_write(properties);
0402 isb();
0403 irsr_write(size_data);
0404 }
0405 isb();
0406
0407
0408 mpu_rgn_info.rgns[number].dracr = properties;
0409 mpu_rgn_info.rgns[number].drbar = start;
0410 mpu_rgn_info.rgns[number].drsr = size_data;
0411
0412 mpu_rgn_info.used++;
0413
0414 return 0;
0415 }
0416
0417
0418
0419
0420 void __init pmsav7_setup(void)
0421 {
0422 int i, region = 0, err = 0;
0423
0424
0425
0426
0427 err |= mpu_setup_region(region++, 0, 32,
0428 PMSAv7_ACR_XN | PMSAv7_RGN_STRONGLY_ORDERED | PMSAv7_AP_PL1RW_PL0RW,
0429 0, false);
0430
0431 #ifdef CONFIG_XIP_KERNEL
0432
0433 for (i = 0; i < ARRAY_SIZE(xip); i++) {
0434
0435
0436
0437
0438
0439
0440
0441 bool need_flush = region == PMSAv7_RAM_REGION;
0442
0443 if (!xip[i].size)
0444 continue;
0445
0446 err |= mpu_setup_region(region++, xip[i].base, ilog2(xip[i].size),
0447 PMSAv7_AP_PL1RO_PL0NA | PMSAv7_RGN_NORMAL,
0448 xip[i].subreg, need_flush);
0449 }
0450 #endif
0451
0452
0453 for (i = 0; i < ARRAY_SIZE(mem); i++) {
0454 if (!mem[i].size)
0455 continue;
0456
0457 err |= mpu_setup_region(region++, mem[i].base, ilog2(mem[i].size),
0458 PMSAv7_AP_PL1RW_PL0RW | PMSAv7_RGN_NORMAL,
0459 mem[i].subreg, false);
0460 }
0461
0462
0463 #ifndef CONFIG_CPU_V7M
0464 err |= mpu_setup_region(region++, vectors_base, ilog2(2 * PAGE_SIZE),
0465 PMSAv7_AP_PL1RW_PL0NA | PMSAv7_RGN_NORMAL,
0466 0, false);
0467 #endif
0468 if (err) {
0469 panic("MPU region initialization failure! %d", err);
0470 } else {
0471 pr_info("Using ARMv7 PMSA Compliant MPU. "
0472 "Region independence: %s, Used %d of %d regions\n",
0473 mpu_iside_independent() ? "Yes" : "No",
0474 mpu_rgn_info.used, mpu_max_regions);
0475 }
0476 }