Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef _SPARC_PGTABLE_H
0003 #define _SPARC_PGTABLE_H
0004 
0005 /*  asm/pgtable.h:  Defines and functions used to work
0006  *                        with Sparc page tables.
0007  *
0008  *  Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
0009  *  Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
0010  */
0011 
0012 #include <linux/const.h>
0013 
0014 #define PMD_SHIFT       18
0015 #define PMD_SIZE            (1UL << PMD_SHIFT)
0016 #define PMD_MASK            (~(PMD_SIZE-1))
0017 #define PMD_ALIGN(__addr)   (((__addr) + ~PMD_MASK) & PMD_MASK)
0018 
0019 #define PGDIR_SHIFT         24
0020 #define PGDIR_SIZE          (1UL << PGDIR_SHIFT)
0021 #define PGDIR_MASK          (~(PGDIR_SIZE-1))
0022 #define PGDIR_ALIGN(__addr)     (((__addr) + ~PGDIR_MASK) & PGDIR_MASK)
0023 
0024 #ifndef __ASSEMBLY__
0025 #include <asm-generic/pgtable-nopud.h>
0026 
0027 #include <linux/spinlock.h>
0028 #include <linux/mm_types.h>
0029 #include <asm/types.h>
0030 #include <asm/pgtsrmmu.h>
0031 #include <asm/vaddrs.h>
0032 #include <asm/oplib.h>
0033 #include <asm/cpu_type.h>
0034 
0035 
0036 struct vm_area_struct;
0037 struct page;
0038 
0039 void load_mmu(void);
0040 unsigned long calc_highpages(void);
0041 unsigned long __init bootmem_init(unsigned long *pages_avail);
0042 
0043 #define pte_ERROR(e)   __builtin_trap()
0044 #define pmd_ERROR(e)   __builtin_trap()
0045 #define pgd_ERROR(e)   __builtin_trap()
0046 
0047 #define PTRS_PER_PTE        64
0048 #define PTRS_PER_PMD        64
0049 #define PTRS_PER_PGD        256
0050 #define USER_PTRS_PER_PGD   PAGE_OFFSET / PGDIR_SIZE
0051 #define PTE_SIZE        (PTRS_PER_PTE*4)
0052 
0053 #define PAGE_NONE   SRMMU_PAGE_NONE
0054 #define PAGE_SHARED SRMMU_PAGE_SHARED
0055 #define PAGE_COPY   SRMMU_PAGE_COPY
0056 #define PAGE_READONLY   SRMMU_PAGE_RDONLY
0057 #define PAGE_KERNEL SRMMU_PAGE_KERNEL
0058 
0059 /* Top-level page directory - dummy used by init-mm.
0060  * srmmu.c will assign the real one (which is dynamically sized) */
0061 #define swapper_pg_dir NULL
0062 
0063 void paging_init(void);
0064 
0065 extern unsigned long ptr_in_current_pgd;
0066 
0067 /* First physical page can be anywhere, the following is needed so that
0068  * va-->pa and vice versa conversions work properly without performance
0069  * hit for all __pa()/__va() operations.
0070  */
0071 extern unsigned long phys_base;
0072 extern unsigned long pfn_base;
0073 
0074 /*
0075  * ZERO_PAGE is a global shared page that is always zero: used
0076  * for zero-mapped memory areas etc..
0077  */
0078 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
0079 
0080 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
0081 
0082 /*
0083  * In general all page table modifications should use the V8 atomic
0084  * swap instruction.  This insures the mmu and the cpu are in sync
0085  * with respect to ref/mod bits in the page tables.
0086  */
0087 static inline unsigned long srmmu_swap(unsigned long *addr, unsigned long value)
0088 {
0089     __asm__ __volatile__("swap [%2], %0" :
0090             "=&r" (value) : "0" (value), "r" (addr) : "memory");
0091     return value;
0092 }
0093 
0094 /* Certain architectures need to do special things when pte's
0095  * within a page table are directly modified.  Thus, the following
0096  * hook is made available.
0097  */
0098 
0099 static inline void set_pte(pte_t *ptep, pte_t pteval)
0100 {
0101     srmmu_swap((unsigned long *)ptep, pte_val(pteval));
0102 }
0103 
0104 #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
0105 
0106 static inline int srmmu_device_memory(unsigned long x)
0107 {
0108     return ((x & 0xF0000000) != 0);
0109 }
0110 
0111 static inline unsigned long pmd_pfn(pmd_t pmd)
0112 {
0113     return (pmd_val(pmd) & SRMMU_PTD_PMASK) >> (PAGE_SHIFT-4);
0114 }
0115 
0116 static inline struct page *pmd_page(pmd_t pmd)
0117 {
0118     if (srmmu_device_memory(pmd_val(pmd)))
0119         BUG();
0120     return pfn_to_page(pmd_pfn(pmd));
0121 }
0122 
0123 static inline unsigned long __pmd_page(pmd_t pmd)
0124 {
0125     unsigned long v;
0126 
0127     if (srmmu_device_memory(pmd_val(pmd)))
0128         BUG();
0129 
0130     v = pmd_val(pmd) & SRMMU_PTD_PMASK;
0131     return (unsigned long)__nocache_va(v << 4);
0132 }
0133 
0134 static inline unsigned long pmd_page_vaddr(pmd_t pmd)
0135 {
0136     unsigned long v = pmd_val(pmd) & SRMMU_PTD_PMASK;
0137     return (unsigned long)__nocache_va(v << 4);
0138 }
0139 
0140 static inline pmd_t *pud_pgtable(pud_t pud)
0141 {
0142     if (srmmu_device_memory(pud_val(pud))) {
0143         return (pmd_t *)~0;
0144     } else {
0145         unsigned long v = pud_val(pud) & SRMMU_PTD_PMASK;
0146         return (pmd_t *)__nocache_va(v << 4);
0147     }
0148 }
0149 
0150 static inline int pte_present(pte_t pte)
0151 {
0152     return ((pte_val(pte) & SRMMU_ET_MASK) == SRMMU_ET_PTE);
0153 }
0154 
0155 static inline int pte_none(pte_t pte)
0156 {
0157     return !pte_val(pte);
0158 }
0159 
0160 static inline void __pte_clear(pte_t *ptep)
0161 {
0162     set_pte(ptep, __pte(0));
0163 }
0164 
0165 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
0166 {
0167     __pte_clear(ptep);
0168 }
0169 
0170 static inline int pmd_bad(pmd_t pmd)
0171 {
0172     return (pmd_val(pmd) & SRMMU_ET_MASK) != SRMMU_ET_PTD;
0173 }
0174 
0175 static inline int pmd_present(pmd_t pmd)
0176 {
0177     return ((pmd_val(pmd) & SRMMU_ET_MASK) == SRMMU_ET_PTD);
0178 }
0179 
0180 static inline int pmd_none(pmd_t pmd)
0181 {
0182     return !pmd_val(pmd);
0183 }
0184 
0185 static inline void pmd_clear(pmd_t *pmdp)
0186 {
0187     set_pte((pte_t *)&pmd_val(*pmdp), __pte(0));
0188 }
0189 
0190 static inline int pud_none(pud_t pud)
0191 {
0192     return !(pud_val(pud) & 0xFFFFFFF);
0193 }
0194 
0195 static inline int pud_bad(pud_t pud)
0196 {
0197     return (pud_val(pud) & SRMMU_ET_MASK) != SRMMU_ET_PTD;
0198 }
0199 
0200 static inline int pud_present(pud_t pud)
0201 {
0202     return ((pud_val(pud) & SRMMU_ET_MASK) == SRMMU_ET_PTD);
0203 }
0204 
0205 static inline void pud_clear(pud_t *pudp)
0206 {
0207     set_pte((pte_t *)pudp, __pte(0));
0208 }
0209 
0210 /*
0211  * The following only work if pte_present() is true.
0212  * Undefined behaviour if not..
0213  */
0214 static inline int pte_write(pte_t pte)
0215 {
0216     return pte_val(pte) & SRMMU_WRITE;
0217 }
0218 
0219 static inline int pte_dirty(pte_t pte)
0220 {
0221     return pte_val(pte) & SRMMU_DIRTY;
0222 }
0223 
0224 static inline int pte_young(pte_t pte)
0225 {
0226     return pte_val(pte) & SRMMU_REF;
0227 }
0228 
0229 static inline pte_t pte_wrprotect(pte_t pte)
0230 {
0231     return __pte(pte_val(pte) & ~SRMMU_WRITE);
0232 }
0233 
0234 static inline pte_t pte_mkclean(pte_t pte)
0235 {
0236     return __pte(pte_val(pte) & ~SRMMU_DIRTY);
0237 }
0238 
0239 static inline pte_t pte_mkold(pte_t pte)
0240 {
0241     return __pte(pte_val(pte) & ~SRMMU_REF);
0242 }
0243 
0244 static inline pte_t pte_mkwrite(pte_t pte)
0245 {
0246     return __pte(pte_val(pte) | SRMMU_WRITE);
0247 }
0248 
0249 static inline pte_t pte_mkdirty(pte_t pte)
0250 {
0251     return __pte(pte_val(pte) | SRMMU_DIRTY);
0252 }
0253 
0254 static inline pte_t pte_mkyoung(pte_t pte)
0255 {
0256     return __pte(pte_val(pte) | SRMMU_REF);
0257 }
0258 
0259 #define pfn_pte(pfn, prot)      mk_pte(pfn_to_page(pfn), prot)
0260 
0261 static inline unsigned long pte_pfn(pte_t pte)
0262 {
0263     if (srmmu_device_memory(pte_val(pte))) {
0264         /* Just return something that will cause
0265          * pfn_valid() to return false.  This makes
0266          * copy_one_pte() to just directly copy to
0267          * PTE over.
0268          */
0269         return ~0UL;
0270     }
0271     return (pte_val(pte) & SRMMU_PTE_PMASK) >> (PAGE_SHIFT-4);
0272 }
0273 
0274 #define pte_page(pte)   pfn_to_page(pte_pfn(pte))
0275 
0276 /*
0277  * Conversion functions: convert a page and protection to a page entry,
0278  * and a page entry and page directory to the page they refer to.
0279  */
0280 static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
0281 {
0282     return __pte((page_to_pfn(page) << (PAGE_SHIFT-4)) | pgprot_val(pgprot));
0283 }
0284 
0285 static inline pte_t mk_pte_phys(unsigned long page, pgprot_t pgprot)
0286 {
0287     return __pte(((page) >> 4) | pgprot_val(pgprot));
0288 }
0289 
0290 static inline pte_t mk_pte_io(unsigned long page, pgprot_t pgprot, int space)
0291 {
0292     return __pte(((page) >> 4) | (space << 28) | pgprot_val(pgprot));
0293 }
0294 
0295 #define pgprot_noncached pgprot_noncached
0296 static inline pgprot_t pgprot_noncached(pgprot_t prot)
0297 {
0298     pgprot_val(prot) &= ~pgprot_val(__pgprot(SRMMU_CACHE));
0299     return prot;
0300 }
0301 
0302 static pte_t pte_modify(pte_t pte, pgprot_t newprot) __attribute_const__;
0303 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
0304 {
0305     return __pte((pte_val(pte) & SRMMU_CHG_MASK) |
0306         pgprot_val(newprot));
0307 }
0308 
0309 /* only used by the huge vmap code, should never be called */
0310 #define pud_page(pud)           NULL
0311 
0312 struct seq_file;
0313 void mmu_info(struct seq_file *m);
0314 
0315 /* Fault handler stuff... */
0316 #define FAULT_CODE_PROT     0x1
0317 #define FAULT_CODE_WRITE    0x2
0318 #define FAULT_CODE_USER     0x4
0319 
0320 #define update_mmu_cache(vma, address, ptep) do { } while (0)
0321 
0322 void srmmu_mapiorange(unsigned int bus, unsigned long xpa,
0323                       unsigned long xva, unsigned int len);
0324 void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len);
0325 
0326 /* Encode and de-code a swap entry */
0327 static inline unsigned long __swp_type(swp_entry_t entry)
0328 {
0329     return (entry.val >> SRMMU_SWP_TYPE_SHIFT) & SRMMU_SWP_TYPE_MASK;
0330 }
0331 
0332 static inline unsigned long __swp_offset(swp_entry_t entry)
0333 {
0334     return (entry.val >> SRMMU_SWP_OFF_SHIFT) & SRMMU_SWP_OFF_MASK;
0335 }
0336 
0337 static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
0338 {
0339     return (swp_entry_t) {
0340         (type & SRMMU_SWP_TYPE_MASK) << SRMMU_SWP_TYPE_SHIFT
0341         | (offset & SRMMU_SWP_OFF_MASK) << SRMMU_SWP_OFF_SHIFT };
0342 }
0343 
0344 #define __pte_to_swp_entry(pte)     ((swp_entry_t) { pte_val(pte) })
0345 #define __swp_entry_to_pte(x)       ((pte_t) { (x).val })
0346 
0347 static inline unsigned long
0348 __get_phys (unsigned long addr)
0349 {
0350     switch (sparc_cpu_model){
0351     case sun4m:
0352     case sun4d:
0353         return ((srmmu_get_pte (addr) & 0xffffff00) << 4);
0354     default:
0355         return 0;
0356     }
0357 }
0358 
0359 static inline int
0360 __get_iospace (unsigned long addr)
0361 {
0362     switch (sparc_cpu_model){
0363     case sun4m:
0364     case sun4d:
0365         return (srmmu_get_pte (addr) >> 28);
0366     default:
0367         return -1;
0368     }
0369 }
0370 
0371 extern unsigned long *sparc_valid_addr_bitmap;
0372 
0373 /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
0374 #define kern_addr_valid(addr) \
0375     (test_bit(__pa((unsigned long)(addr))>>20, sparc_valid_addr_bitmap))
0376 
0377 /*
0378  * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
0379  * its high 4 bits.  These macros/functions put it there or get it from there.
0380  */
0381 #define MK_IOSPACE_PFN(space, pfn)  (pfn | (space << (BITS_PER_LONG - 4)))
0382 #define GET_IOSPACE(pfn)        (pfn >> (BITS_PER_LONG - 4))
0383 #define GET_PFN(pfn)            (pfn & 0x0fffffffUL)
0384 
0385 int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
0386             unsigned long, pgprot_t);
0387 
0388 static inline int io_remap_pfn_range(struct vm_area_struct *vma,
0389                      unsigned long from, unsigned long pfn,
0390                      unsigned long size, pgprot_t prot)
0391 {
0392     unsigned long long offset, space, phys_base;
0393 
0394     offset = ((unsigned long long) GET_PFN(pfn)) << PAGE_SHIFT;
0395     space = GET_IOSPACE(pfn);
0396     phys_base = offset | (space << 32ULL);
0397 
0398     return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
0399 }
0400 #define io_remap_pfn_range io_remap_pfn_range
0401 
0402 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
0403 #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
0404 ({                                    \
0405     int __changed = !pte_same(*(__ptep), __entry);            \
0406     if (__changed) {                          \
0407         set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
0408         flush_tlb_page(__vma, __address);             \
0409     }                                 \
0410     __changed;                            \
0411 })
0412 
0413 #endif /* !(__ASSEMBLY__) */
0414 
0415 #define VMALLOC_START           _AC(0xfe600000,UL)
0416 #define VMALLOC_END             _AC(0xffc00000,UL)
0417 
0418 /* We provide our own get_unmapped_area to cope with VA holes for userland */
0419 #define HAVE_ARCH_UNMAPPED_AREA
0420 
0421 #define pmd_pgtable(pmd)    ((pgtable_t)__pmd_page(pmd))
0422 
0423 #endif /* !(_SPARC_PGTABLE_H) */