Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /*
0003  *  arch/arm/include/asm/memory.h
0004  *
0005  *  Copyright (C) 2000-2002 Russell King
0006  *  modification for nommu, Hyok S. Choi, 2004
0007  *
0008  *  Note: this file should not be included by non-asm/.h files
0009  */
0010 #ifndef __ASM_ARM_MEMORY_H
0011 #define __ASM_ARM_MEMORY_H
0012 
0013 #include <linux/compiler.h>
0014 #include <linux/const.h>
0015 #include <linux/types.h>
0016 #include <linux/sizes.h>
0017 
0018 #ifdef CONFIG_NEED_MACH_MEMORY_H
0019 #include <mach/memory.h>
0020 #endif
0021 #include <asm/kasan_def.h>
0022 
0023 /*
0024  * PAGE_OFFSET: the virtual address of the start of lowmem, memory above
0025  *   the virtual address range for userspace.
0026  * KERNEL_OFFSET: the virtual address of the start of the kernel image.
0027  *   we may further offset this with TEXT_OFFSET in practice.
0028  */
0029 #define PAGE_OFFSET     UL(CONFIG_PAGE_OFFSET)
0030 #define KERNEL_OFFSET       (PAGE_OFFSET)
0031 
0032 #ifdef CONFIG_MMU
0033 
0034 /*
0035  * TASK_SIZE - the maximum size of a user space task.
0036  * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area
0037  */
0038 #ifndef CONFIG_KASAN
0039 #define TASK_SIZE       (UL(CONFIG_PAGE_OFFSET) - UL(SZ_16M))
0040 #else
0041 #define TASK_SIZE       (KASAN_SHADOW_START)
0042 #endif
0043 #define TASK_UNMAPPED_BASE  ALIGN(TASK_SIZE / 3, SZ_16M)
0044 
0045 /*
0046  * The maximum size of a 26-bit user space task.
0047  */
0048 #define TASK_SIZE_26        (UL(1) << 26)
0049 
0050 /*
0051  * The module space lives between the addresses given by TASK_SIZE
0052  * and PAGE_OFFSET - it must be within 32MB of the kernel text.
0053  */
0054 #ifndef CONFIG_THUMB2_KERNEL
0055 #define MODULES_VADDR       (PAGE_OFFSET - SZ_16M)
0056 #else
0057 /* smaller range for Thumb-2 symbols relocation (2^24)*/
0058 #define MODULES_VADDR       (PAGE_OFFSET - SZ_8M)
0059 #endif
0060 
0061 #if TASK_SIZE > MODULES_VADDR
0062 #error Top of user space clashes with start of module space
0063 #endif
0064 
0065 /*
0066  * The highmem pkmap virtual space shares the end of the module area.
0067  */
0068 #ifdef CONFIG_HIGHMEM
0069 #define MODULES_END     (PAGE_OFFSET - PMD_SIZE)
0070 #else
0071 #define MODULES_END     (PAGE_OFFSET)
0072 #endif
0073 
0074 /*
0075  * The XIP kernel gets mapped at the bottom of the module vm area.
0076  * Since we use sections to map it, this macro replaces the physical address
0077  * with its virtual address while keeping offset from the base section.
0078  */
0079 #define XIP_VIRT_ADDR(physaddr)  (MODULES_VADDR + ((physaddr) & 0x000fffff))
0080 
0081 #define FDT_FIXED_BASE      UL(0xff800000)
0082 #define FDT_FIXED_SIZE      (2 * SECTION_SIZE)
0083 #define FDT_VIRT_BASE(physbase) ((void *)(FDT_FIXED_BASE | (physbase) % SECTION_SIZE))
0084 
0085 #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
0086 /*
0087  * Allow 16MB-aligned ioremap pages
0088  */
0089 #define IOREMAP_MAX_ORDER   24
0090 #endif
0091 
0092 #define VECTORS_BASE        UL(0xffff0000)
0093 
0094 #else /* CONFIG_MMU */
0095 
0096 #ifndef __ASSEMBLY__
0097 extern unsigned long setup_vectors_base(void);
0098 extern unsigned long vectors_base;
0099 #define VECTORS_BASE        vectors_base
0100 #endif
0101 
0102 /*
0103  * The limitation of user task size can grow up to the end of free ram region.
0104  * It is difficult to define and perhaps will never meet the original meaning
0105  * of this define that was meant to.
0106  * Fortunately, there is no reference for this in noMMU mode, for now.
0107  */
0108 #define TASK_SIZE       UL(0xffffffff)
0109 
0110 #ifndef TASK_UNMAPPED_BASE
0111 #define TASK_UNMAPPED_BASE  UL(0x00000000)
0112 #endif
0113 
0114 #ifndef END_MEM
0115 #define END_MEM             (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE)
0116 #endif
0117 
0118 /*
0119  * The module can be at any place in ram in nommu mode.
0120  */
0121 #define MODULES_END     (END_MEM)
0122 #define MODULES_VADDR       PAGE_OFFSET
0123 
0124 #define XIP_VIRT_ADDR(physaddr)  (physaddr)
0125 #define FDT_VIRT_BASE(physbase)  ((void *)(physbase))
0126 
0127 #endif /* !CONFIG_MMU */
0128 
0129 #ifdef CONFIG_XIP_KERNEL
0130 #define KERNEL_START        _sdata
0131 #else
0132 #define KERNEL_START        _stext
0133 #endif
0134 #define KERNEL_END      _end
0135 
0136 /*
0137  * We fix the TCM memories max 32 KiB ITCM resp DTCM at these
0138  * locations
0139  */
0140 #ifdef CONFIG_HAVE_TCM
0141 #define ITCM_OFFSET UL(0xfffe0000)
0142 #define DTCM_OFFSET UL(0xfffe8000)
0143 #endif
0144 
0145 /*
0146  * Convert a page to/from a physical address
0147  */
0148 #define page_to_phys(page)  (__pfn_to_phys(page_to_pfn(page)))
0149 #define phys_to_page(phys)  (pfn_to_page(__phys_to_pfn(phys)))
0150 
0151 /*
0152  * PLAT_PHYS_OFFSET is the offset (from zero) of the start of physical
0153  * memory.  This is used for XIP and NoMMU kernels, and on platforms that don't
0154  * have CONFIG_ARM_PATCH_PHYS_VIRT. Assembly code must always use
0155  * PLAT_PHYS_OFFSET and not PHYS_OFFSET.
0156  */
0157 #define PLAT_PHYS_OFFSET    UL(CONFIG_PHYS_OFFSET)
0158 
0159 #ifndef __ASSEMBLY__
0160 
0161 /*
0162  * Physical start and end address of the kernel sections. These addresses are
0163  * 2MB-aligned to match the section mappings placed over the kernel. We use
0164  * u64 so that LPAE mappings beyond the 32bit limit will work out as well.
0165  */
0166 extern u64 kernel_sec_start;
0167 extern u64 kernel_sec_end;
0168 
0169 /*
0170  * Physical vs virtual RAM address space conversion.  These are
0171  * private definitions which should NOT be used outside memory.h
0172  * files.  Use virt_to_phys/phys_to_virt/__pa/__va instead.
0173  *
0174  * PFNs are used to describe any physical page; this means
0175  * PFN 0 == physical address 0.
0176  */
0177 
0178 #if defined(CONFIG_ARM_PATCH_PHYS_VIRT)
0179 
0180 /*
0181  * Constants used to force the right instruction encodings and shifts
0182  * so that all we need to do is modify the 8-bit constant field.
0183  */
0184 #define __PV_BITS_31_24 0x81000000
0185 #define __PV_BITS_23_16 0x810000
0186 #define __PV_BITS_7_0   0x81
0187 
0188 extern unsigned long __pv_phys_pfn_offset;
0189 extern u64 __pv_offset;
0190 extern void fixup_pv_table(const void *, unsigned long);
0191 extern const void *__pv_table_begin, *__pv_table_end;
0192 
0193 #define PHYS_OFFSET ((phys_addr_t)__pv_phys_pfn_offset << PAGE_SHIFT)
0194 #define PHYS_PFN_OFFSET (__pv_phys_pfn_offset)
0195 
0196 #ifndef CONFIG_THUMB2_KERNEL
0197 #define __pv_stub(from,to,instr)            \
0198     __asm__("@ __pv_stub\n"             \
0199     "1: " instr "   %0, %1, %2\n"       \
0200     "2: " instr "   %0, %0, %3\n"       \
0201     "   .pushsection .pv_table,\"a\"\n"     \
0202     "   .long   1b - ., 2b - .\n"       \
0203     "   .popsection\n"              \
0204     : "=r" (to)                 \
0205     : "r" (from), "I" (__PV_BITS_31_24),        \
0206       "I"(__PV_BITS_23_16))
0207 
0208 #define __pv_add_carry_stub(x, y)           \
0209     __asm__("@ __pv_add_carry_stub\n"       \
0210     "0: movw    %R0, #0\n"          \
0211     "   adds    %Q0, %1, %R0, lsl #20\n"    \
0212     "1: mov %R0, %2\n"          \
0213     "   adc %R0, %R0, #0\n"         \
0214     "   .pushsection .pv_table,\"a\"\n"     \
0215     "   .long   0b - ., 1b - .\n"       \
0216     "   .popsection\n"              \
0217     : "=&r" (y)                 \
0218     : "r" (x), "I" (__PV_BITS_7_0)          \
0219     : "cc")
0220 
0221 #else
0222 #define __pv_stub(from,to,instr)            \
0223     __asm__("@ __pv_stub\n"             \
0224     "0: movw    %0, #0\n"           \
0225     "   lsl %0, #21\n"          \
0226     "   " instr " %0, %1, %0\n"         \
0227     "   .pushsection .pv_table,\"a\"\n"     \
0228     "   .long   0b - .\n"           \
0229     "   .popsection\n"              \
0230     : "=&r" (to)                    \
0231     : "r" (from))
0232 
0233 #define __pv_add_carry_stub(x, y)           \
0234     __asm__("@ __pv_add_carry_stub\n"       \
0235     "0: movw    %R0, #0\n"          \
0236     "   lsls    %R0, #21\n"         \
0237     "   adds    %Q0, %1, %R0\n"         \
0238     "1: mvn %R0, #0\n"          \
0239     "   adc %R0, %R0, #0\n"         \
0240     "   .pushsection .pv_table,\"a\"\n"     \
0241     "   .long   0b - ., 1b - .\n"       \
0242     "   .popsection\n"              \
0243     : "=&r" (y)                 \
0244     : "r" (x)                   \
0245     : "cc")
0246 #endif
0247 
0248 static inline phys_addr_t __virt_to_phys_nodebug(unsigned long x)
0249 {
0250     phys_addr_t t;
0251 
0252     if (sizeof(phys_addr_t) == 4) {
0253         __pv_stub(x, t, "add");
0254     } else {
0255         __pv_add_carry_stub(x, t);
0256     }
0257     return t;
0258 }
0259 
0260 static inline unsigned long __phys_to_virt(phys_addr_t x)
0261 {
0262     unsigned long t;
0263 
0264     /*
0265      * 'unsigned long' cast discard upper word when
0266      * phys_addr_t is 64 bit, and makes sure that inline
0267      * assembler expression receives 32 bit argument
0268      * in place where 'r' 32 bit operand is expected.
0269      */
0270     __pv_stub((unsigned long) x, t, "sub");
0271     return t;
0272 }
0273 
0274 #else
0275 
0276 #define PHYS_OFFSET PLAT_PHYS_OFFSET
0277 #define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT))
0278 
0279 static inline phys_addr_t __virt_to_phys_nodebug(unsigned long x)
0280 {
0281     return (phys_addr_t)x - PAGE_OFFSET + PHYS_OFFSET;
0282 }
0283 
0284 static inline unsigned long __phys_to_virt(phys_addr_t x)
0285 {
0286     return x - PHYS_OFFSET + PAGE_OFFSET;
0287 }
0288 
0289 #endif
0290 
0291 #define virt_to_pfn(kaddr) \
0292     ((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \
0293      PHYS_PFN_OFFSET)
0294 
0295 #define __pa_symbol_nodebug(x)  __virt_to_phys_nodebug((x))
0296 
0297 #ifdef CONFIG_DEBUG_VIRTUAL
0298 extern phys_addr_t __virt_to_phys(unsigned long x);
0299 extern phys_addr_t __phys_addr_symbol(unsigned long x);
0300 #else
0301 #define __virt_to_phys(x)   __virt_to_phys_nodebug(x)
0302 #define __phys_addr_symbol(x)   __pa_symbol_nodebug(x)
0303 #endif
0304 
0305 /*
0306  * These are *only* valid on the kernel direct mapped RAM memory.
0307  * Note: Drivers should NOT use these.  They are the wrong
0308  * translation for translating DMA addresses.  Use the driver
0309  * DMA support - see dma-mapping.h.
0310  */
0311 #define virt_to_phys virt_to_phys
0312 static inline phys_addr_t virt_to_phys(const volatile void *x)
0313 {
0314     return __virt_to_phys((unsigned long)(x));
0315 }
0316 
0317 #define phys_to_virt phys_to_virt
0318 static inline void *phys_to_virt(phys_addr_t x)
0319 {
0320     return (void *)__phys_to_virt(x);
0321 }
0322 
0323 /*
0324  * Drivers should NOT use these either.
0325  */
0326 #define __pa(x)         __virt_to_phys((unsigned long)(x))
0327 #define __pa_symbol(x)      __phys_addr_symbol(RELOC_HIDE((unsigned long)(x), 0))
0328 #define __va(x)         ((void *)__phys_to_virt((phys_addr_t)(x)))
0329 #define pfn_to_kaddr(pfn)   __va((phys_addr_t)(pfn) << PAGE_SHIFT)
0330 
0331 extern long long arch_phys_to_idmap_offset;
0332 
0333 /*
0334  * These are for systems that have a hardware interconnect supported alias
0335  * of physical memory for idmap purposes.  Most cases should leave these
0336  * untouched.  Note: this can only return addresses less than 4GiB.
0337  */
0338 static inline bool arm_has_idmap_alias(void)
0339 {
0340     return IS_ENABLED(CONFIG_MMU) && arch_phys_to_idmap_offset != 0;
0341 }
0342 
0343 #define IDMAP_INVALID_ADDR ((u32)~0)
0344 
0345 static inline unsigned long phys_to_idmap(phys_addr_t addr)
0346 {
0347     if (IS_ENABLED(CONFIG_MMU) && arch_phys_to_idmap_offset) {
0348         addr += arch_phys_to_idmap_offset;
0349         if (addr > (u32)~0)
0350             addr = IDMAP_INVALID_ADDR;
0351     }
0352     return addr;
0353 }
0354 
0355 static inline phys_addr_t idmap_to_phys(unsigned long idmap)
0356 {
0357     phys_addr_t addr = idmap;
0358 
0359     if (IS_ENABLED(CONFIG_MMU) && arch_phys_to_idmap_offset)
0360         addr -= arch_phys_to_idmap_offset;
0361 
0362     return addr;
0363 }
0364 
0365 static inline unsigned long __virt_to_idmap(unsigned long x)
0366 {
0367     return phys_to_idmap(__virt_to_phys(x));
0368 }
0369 
0370 #define virt_to_idmap(x)    __virt_to_idmap((unsigned long)(x))
0371 
0372 /*
0373  * Virtual <-> DMA view memory address translations
0374  * Again, these are *only* valid on the kernel direct mapped RAM
0375  * memory.  Use of these is *deprecated* (and that doesn't mean
0376  * use the __ prefixed forms instead.)  See dma-mapping.h.
0377  */
0378 #ifndef __virt_to_bus
0379 #define __virt_to_bus   __virt_to_phys
0380 #define __bus_to_virt   __phys_to_virt
0381 #endif
0382 
0383 /*
0384  * Conversion between a struct page and a physical address.
0385  *
0386  *  page_to_pfn(page)   convert a struct page * to a PFN number
0387  *  pfn_to_page(pfn)    convert a _valid_ PFN number to struct page *
0388  *
0389  *  virt_to_page(k) convert a _valid_ virtual address to struct page *
0390  *  virt_addr_valid(k)  indicates whether a virtual address is valid
0391  */
0392 #define ARCH_PFN_OFFSET     PHYS_PFN_OFFSET
0393 
0394 #define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
0395 #define virt_addr_valid(kaddr)  (((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) \
0396                     && pfn_valid(virt_to_pfn(kaddr)))
0397 
0398 #endif
0399 
0400 #include <asm-generic/memory_model.h>
0401 
0402 #endif