Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef _ASM_POWERPC_BOOK3S_32_PGTABLE_H
0003 #define _ASM_POWERPC_BOOK3S_32_PGTABLE_H
0004 
0005 #include <asm-generic/pgtable-nopmd.h>
0006 
0007 /*
0008  * The "classic" 32-bit implementation of the PowerPC MMU uses a hash
0009  * table containing PTEs, together with a set of 16 segment registers,
0010  * to define the virtual to physical address mapping.
0011  *
0012  * We use the hash table as an extended TLB, i.e. a cache of currently
0013  * active mappings.  We maintain a two-level page table tree, much
0014  * like that used by the i386, for the sake of the Linux memory
0015  * management code.  Low-level assembler code in hash_low_32.S
0016  * (procedure hash_page) is responsible for extracting ptes from the
0017  * tree and putting them into the hash table when necessary, and
0018  * updating the accessed and modified bits in the page table tree.
0019  */
0020 
0021 #define _PAGE_PRESENT   0x001   /* software: pte contains a translation */
0022 #define _PAGE_HASHPTE   0x002   /* hash_page has made an HPTE for this pte */
0023 #define _PAGE_USER  0x004   /* usermode access allowed */
0024 #define _PAGE_GUARDED   0x008   /* G: prohibit speculative access */
0025 #define _PAGE_COHERENT  0x010   /* M: enforce memory coherence (SMP systems) */
0026 #define _PAGE_NO_CACHE  0x020   /* I: cache inhibit */
0027 #define _PAGE_WRITETHRU 0x040   /* W: cache write-through */
0028 #define _PAGE_DIRTY 0x080   /* C: page changed */
0029 #define _PAGE_ACCESSED  0x100   /* R: page referenced */
0030 #define _PAGE_EXEC  0x200   /* software: exec allowed */
0031 #define _PAGE_RW    0x400   /* software: user write access allowed */
0032 #define _PAGE_SPECIAL   0x800   /* software: Special page */
0033 
0034 #ifdef CONFIG_PTE_64BIT
0035 /* We never clear the high word of the pte */
0036 #define _PTE_NONE_MASK  (0xffffffff00000000ULL | _PAGE_HASHPTE)
0037 #else
0038 #define _PTE_NONE_MASK  _PAGE_HASHPTE
0039 #endif
0040 
0041 #define _PMD_PRESENT    0
0042 #define _PMD_PRESENT_MASK (PAGE_MASK)
0043 #define _PMD_BAD    (~PAGE_MASK)
0044 
0045 /* And here we include common definitions */
0046 
0047 #define _PAGE_KERNEL_RO     0
0048 #define _PAGE_KERNEL_ROX    (_PAGE_EXEC)
0049 #define _PAGE_KERNEL_RW     (_PAGE_DIRTY | _PAGE_RW)
0050 #define _PAGE_KERNEL_RWX    (_PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC)
0051 
0052 #define _PAGE_HPTEFLAGS _PAGE_HASHPTE
0053 
0054 #ifndef __ASSEMBLY__
0055 
0056 static inline bool pte_user(pte_t pte)
0057 {
0058     return pte_val(pte) & _PAGE_USER;
0059 }
0060 #endif /* __ASSEMBLY__ */
0061 
0062 /*
0063  * Location of the PFN in the PTE. Most 32-bit platforms use the same
0064  * as _PAGE_SHIFT here (ie, naturally aligned).
0065  * Platform who don't just pre-define the value so we don't override it here.
0066  */
0067 #define PTE_RPN_SHIFT   (PAGE_SHIFT)
0068 
0069 /*
0070  * The mask covered by the RPN must be a ULL on 32-bit platforms with
0071  * 64-bit PTEs.
0072  */
0073 #ifdef CONFIG_PTE_64BIT
0074 #define PTE_RPN_MASK    (~((1ULL << PTE_RPN_SHIFT) - 1))
0075 #define MAX_POSSIBLE_PHYSMEM_BITS 36
0076 #else
0077 #define PTE_RPN_MASK    (~((1UL << PTE_RPN_SHIFT) - 1))
0078 #define MAX_POSSIBLE_PHYSMEM_BITS 32
0079 #endif
0080 
0081 /*
0082  * _PAGE_CHG_MASK masks of bits that are to be preserved across
0083  * pgprot changes.
0084  */
0085 #define _PAGE_CHG_MASK  (PTE_RPN_MASK | _PAGE_HASHPTE | _PAGE_DIRTY | \
0086              _PAGE_ACCESSED | _PAGE_SPECIAL)
0087 
0088 /*
0089  * We define 2 sets of base prot bits, one for basic pages (ie,
0090  * cacheable kernel and user pages) and one for non cacheable
0091  * pages. We always set _PAGE_COHERENT when SMP is enabled or
0092  * the processor might need it for DMA coherency.
0093  */
0094 #define _PAGE_BASE_NC   (_PAGE_PRESENT | _PAGE_ACCESSED)
0095 #define _PAGE_BASE  (_PAGE_BASE_NC | _PAGE_COHERENT)
0096 
0097 /*
0098  * Permission masks used to generate the __P and __S table.
0099  *
0100  * Note:__pgprot is defined in arch/powerpc/include/asm/page.h
0101  *
0102  * Write permissions imply read permissions for now.
0103  */
0104 #define PAGE_NONE   __pgprot(_PAGE_BASE)
0105 #define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
0106 #define PAGE_SHARED_X   __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
0107 #define PAGE_COPY   __pgprot(_PAGE_BASE | _PAGE_USER)
0108 #define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
0109 #define PAGE_READONLY   __pgprot(_PAGE_BASE | _PAGE_USER)
0110 #define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
0111 
0112 /* Permission masks used for kernel mappings */
0113 #define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
0114 #define PAGE_KERNEL_NC  __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE)
0115 #define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
0116                  _PAGE_NO_CACHE | _PAGE_GUARDED)
0117 #define PAGE_KERNEL_X   __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX)
0118 #define PAGE_KERNEL_RO  __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
0119 #define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX)
0120 
0121 /*
0122  * Protection used for kernel text. We want the debuggers to be able to
0123  * set breakpoints anywhere, so don't write protect the kernel text
0124  * on platforms where such control is possible.
0125  */
0126 #if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\
0127     defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE)
0128 #define PAGE_KERNEL_TEXT    PAGE_KERNEL_X
0129 #else
0130 #define PAGE_KERNEL_TEXT    PAGE_KERNEL_ROX
0131 #endif
0132 
0133 /* Make modules code happy. We don't set RO yet */
0134 #define PAGE_KERNEL_EXEC    PAGE_KERNEL_X
0135 
0136 /* Advertise special mapping type for AGP */
0137 #define PAGE_AGP        (PAGE_KERNEL_NC)
0138 #define HAVE_PAGE_AGP
0139 
0140 #define PTE_INDEX_SIZE  PTE_SHIFT
0141 #define PMD_INDEX_SIZE  0
0142 #define PUD_INDEX_SIZE  0
0143 #define PGD_INDEX_SIZE  (32 - PGDIR_SHIFT)
0144 
0145 #define PMD_CACHE_INDEX PMD_INDEX_SIZE
0146 #define PUD_CACHE_INDEX PUD_INDEX_SIZE
0147 
0148 #ifndef __ASSEMBLY__
0149 #define PTE_TABLE_SIZE  (sizeof(pte_t) << PTE_INDEX_SIZE)
0150 #define PMD_TABLE_SIZE  0
0151 #define PUD_TABLE_SIZE  0
0152 #define PGD_TABLE_SIZE  (sizeof(pgd_t) << PGD_INDEX_SIZE)
0153 
0154 /* Bits to mask out from a PMD to get to the PTE page */
0155 #define PMD_MASKED_BITS     (PTE_TABLE_SIZE - 1)
0156 #endif  /* __ASSEMBLY__ */
0157 
0158 #define PTRS_PER_PTE    (1 << PTE_INDEX_SIZE)
0159 #define PTRS_PER_PGD    (1 << PGD_INDEX_SIZE)
0160 
0161 /*
0162  * The normal case is that PTEs are 32-bits and we have a 1-page
0163  * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages.  -- paulus
0164  *
0165  * For any >32-bit physical address platform, we can use the following
0166  * two level page table layout where the pgdir is 8KB and the MS 13 bits
0167  * are an index to the second level table.  The combined pgdir/pmd first
0168  * level has 2048 entries and the second level has 512 64-bit PTE entries.
0169  * -Matt
0170  */
0171 /* PGDIR_SHIFT determines what a top-level page table entry can map */
0172 #define PGDIR_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE)
0173 #define PGDIR_SIZE  (1UL << PGDIR_SHIFT)
0174 #define PGDIR_MASK  (~(PGDIR_SIZE-1))
0175 
0176 #define USER_PTRS_PER_PGD   (TASK_SIZE / PGDIR_SIZE)
0177 
0178 #ifndef __ASSEMBLY__
0179 
0180 int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
0181 void unmap_kernel_page(unsigned long va);
0182 
0183 #endif /* !__ASSEMBLY__ */
0184 
0185 /*
0186  * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary
0187  * value (for now) on others, from where we can start layout kernel
0188  * virtual space that goes below PKMAP and FIXMAP
0189  */
0190 #include <asm/fixmap.h>
0191 
0192 /*
0193  * ioremap_bot starts at that address. Early ioremaps move down from there,
0194  * until mem_init() at which point this becomes the top of the vmalloc
0195  * and ioremap space
0196  */
0197 #ifdef CONFIG_HIGHMEM
0198 #define IOREMAP_TOP PKMAP_BASE
0199 #else
0200 #define IOREMAP_TOP FIXADDR_START
0201 #endif
0202 
0203 /* PPC32 shares vmalloc area with ioremap */
0204 #define IOREMAP_START   VMALLOC_START
0205 #define IOREMAP_END VMALLOC_END
0206 
0207 /*
0208  * Just any arbitrary offset to the start of the vmalloc VM area: the
0209  * current 16MB value just means that there will be a 64MB "hole" after the
0210  * physical memory until the kernel virtual memory starts.  That means that
0211  * any out-of-bounds memory accesses will hopefully be caught.
0212  * The vmalloc() routines leaves a hole of 4kB between each vmalloced
0213  * area for the same reason. ;)
0214  *
0215  * We no longer map larger than phys RAM with the BATs so we don't have
0216  * to worry about the VMALLOC_OFFSET causing problems.  We do have to worry
0217  * about clashes between our early calls to ioremap() that start growing down
0218  * from ioremap_base being run into the VM area allocations (growing upwards
0219  * from VMALLOC_START).  For this reason we have ioremap_bot to check when
0220  * we actually run into our mappings setup in the early boot with the VM
0221  * system.  This really does become a problem for machines with good amounts
0222  * of RAM.  -- Cort
0223  */
0224 #define VMALLOC_OFFSET (0x1000000) /* 16M */
0225 
0226 #define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
0227 
0228 #ifdef CONFIG_KASAN_VMALLOC
0229 #define VMALLOC_END ALIGN_DOWN(ioremap_bot, PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
0230 #else
0231 #define VMALLOC_END ioremap_bot
0232 #endif
0233 
0234 #define MODULES_END ALIGN_DOWN(PAGE_OFFSET, SZ_256M)
0235 #define MODULES_VADDR   (MODULES_END - SZ_256M)
0236 
0237 #ifndef __ASSEMBLY__
0238 #include <linux/sched.h>
0239 #include <linux/threads.h>
0240 
0241 /* Bits to mask out from a PGD to get to the PUD page */
0242 #define PGD_MASKED_BITS     0
0243 
0244 #define pte_ERROR(e) \
0245     pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
0246         (unsigned long long)pte_val(e))
0247 #define pgd_ERROR(e) \
0248     pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
0249 /*
0250  * Bits in a linux-style PTE.  These match the bits in the
0251  * (hardware-defined) PowerPC PTE as closely as possible.
0252  */
0253 
0254 #define pte_clear(mm, addr, ptep) \
0255     do { pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, 0, 0); } while (0)
0256 
0257 #define pmd_none(pmd)       (!pmd_val(pmd))
0258 #define pmd_bad(pmd)        (pmd_val(pmd) & _PMD_BAD)
0259 #define pmd_present(pmd)    (pmd_val(pmd) & _PMD_PRESENT_MASK)
0260 static inline void pmd_clear(pmd_t *pmdp)
0261 {
0262     *pmdp = __pmd(0);
0263 }
0264 
0265 
0266 /*
0267  * When flushing the tlb entry for a page, we also need to flush the hash
0268  * table entry.  flush_hash_pages is assembler (for speed) in hashtable.S.
0269  */
0270 extern int flush_hash_pages(unsigned context, unsigned long va,
0271                 unsigned long pmdval, int count);
0272 
0273 /* Add an HPTE to the hash table */
0274 extern void add_hash_page(unsigned context, unsigned long va,
0275               unsigned long pmdval);
0276 
0277 /* Flush an entry from the TLB/hash table */
0278 static inline void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, unsigned long addr)
0279 {
0280     if (mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
0281         unsigned long ptephys = __pa(ptep) & PAGE_MASK;
0282 
0283         flush_hash_pages(mm->context.id, addr, ptephys, 1);
0284     }
0285 }
0286 
0287 /*
0288  * PTE updates. This function is called whenever an existing
0289  * valid PTE is updated. This does -not- include set_pte_at()
0290  * which nowadays only sets a new PTE.
0291  *
0292  * Depending on the type of MMU, we may need to use atomic updates
0293  * and the PTE may be either 32 or 64 bit wide. In the later case,
0294  * when using atomic updates, only the low part of the PTE is
0295  * accessed atomically.
0296  */
0297 static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
0298                      unsigned long clr, unsigned long set, int huge)
0299 {
0300     pte_basic_t old;
0301 
0302     if (mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
0303         unsigned long tmp;
0304 
0305         asm volatile(
0306 #ifndef CONFIG_PTE_64BIT
0307     "1: lwarx   %0, 0, %3\n"
0308     "   andc    %1, %0, %4\n"
0309 #else
0310     "1: lwarx   %L0, 0, %3\n"
0311     "   lwz %0, -4(%3)\n"
0312     "   andc    %1, %L0, %4\n"
0313 #endif
0314     "   or  %1, %1, %5\n"
0315     "   stwcx.  %1, 0, %3\n"
0316     "   bne-    1b"
0317         : "=&r" (old), "=&r" (tmp), "=m" (*p)
0318 #ifndef CONFIG_PTE_64BIT
0319         : "r" (p),
0320 #else
0321         : "b" ((unsigned long)(p) + 4),
0322 #endif
0323           "r" (clr), "r" (set), "m" (*p)
0324         : "cc" );
0325     } else {
0326         old = pte_val(*p);
0327 
0328         *p = __pte((old & ~(pte_basic_t)clr) | set);
0329     }
0330 
0331     return old;
0332 }
0333 
0334 /*
0335  * 2.6 calls this without flushing the TLB entry; this is wrong
0336  * for our hash-based implementation, we fix that up here.
0337  */
0338 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
0339 static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
0340                           unsigned long addr, pte_t *ptep)
0341 {
0342     unsigned long old;
0343     old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
0344     if (old & _PAGE_HASHPTE)
0345         flush_hash_entry(mm, ptep, addr);
0346 
0347     return (old & _PAGE_ACCESSED) != 0;
0348 }
0349 #define ptep_test_and_clear_young(__vma, __addr, __ptep) \
0350     __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep)
0351 
0352 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
0353 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
0354                        pte_t *ptep)
0355 {
0356     return __pte(pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, 0, 0));
0357 }
0358 
0359 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
0360 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
0361                       pte_t *ptep)
0362 {
0363     pte_update(mm, addr, ptep, _PAGE_RW, 0, 0);
0364 }
0365 
0366 static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
0367                        pte_t *ptep, pte_t entry,
0368                        unsigned long address,
0369                        int psize)
0370 {
0371     unsigned long set = pte_val(entry) &
0372         (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
0373 
0374     pte_update(vma->vm_mm, address, ptep, 0, set, 0);
0375 
0376     flush_tlb_page(vma, address);
0377 }
0378 
0379 #define __HAVE_ARCH_PTE_SAME
0380 #define pte_same(A,B)   (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
0381 
0382 #define pmd_pfn(pmd)        (pmd_val(pmd) >> PAGE_SHIFT)
0383 #define pmd_page(pmd)       pfn_to_page(pmd_pfn(pmd))
0384 
0385 /*
0386  * Encode and decode a swap entry.
0387  * Note that the bits we use in a PTE for representing a swap entry
0388  * must not include the _PAGE_PRESENT bit or the _PAGE_HASHPTE bit (if used).
0389  *   -- paulus
0390  */
0391 #define __swp_type(entry)       ((entry).val & 0x1f)
0392 #define __swp_offset(entry)     ((entry).val >> 5)
0393 #define __swp_entry(type, offset)   ((swp_entry_t) { (type) | ((offset) << 5) })
0394 #define __pte_to_swp_entry(pte)     ((swp_entry_t) { pte_val(pte) >> 3 })
0395 #define __swp_entry_to_pte(x)       ((pte_t) { (x).val << 3 })
0396 
0397 /* Generic accessors to PTE bits */
0398 static inline int pte_write(pte_t pte)      { return !!(pte_val(pte) & _PAGE_RW);}
0399 static inline int pte_read(pte_t pte)       { return 1; }
0400 static inline int pte_dirty(pte_t pte)      { return !!(pte_val(pte) & _PAGE_DIRTY); }
0401 static inline int pte_young(pte_t pte)      { return !!(pte_val(pte) & _PAGE_ACCESSED); }
0402 static inline int pte_special(pte_t pte)    { return !!(pte_val(pte) & _PAGE_SPECIAL); }
0403 static inline int pte_none(pte_t pte)       { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
0404 static inline bool pte_exec(pte_t pte)      { return pte_val(pte) & _PAGE_EXEC; }
0405 
0406 static inline int pte_present(pte_t pte)
0407 {
0408     return pte_val(pte) & _PAGE_PRESENT;
0409 }
0410 
0411 static inline bool pte_hw_valid(pte_t pte)
0412 {
0413     return pte_val(pte) & _PAGE_PRESENT;
0414 }
0415 
0416 static inline bool pte_hashpte(pte_t pte)
0417 {
0418     return !!(pte_val(pte) & _PAGE_HASHPTE);
0419 }
0420 
0421 static inline bool pte_ci(pte_t pte)
0422 {
0423     return !!(pte_val(pte) & _PAGE_NO_CACHE);
0424 }
0425 
0426 /*
0427  * We only find page table entry in the last level
0428  * Hence no need for other accessors
0429  */
0430 #define pte_access_permitted pte_access_permitted
0431 static inline bool pte_access_permitted(pte_t pte, bool write)
0432 {
0433     /*
0434      * A read-only access is controlled by _PAGE_USER bit.
0435      * We have _PAGE_READ set for WRITE and EXECUTE
0436      */
0437     if (!pte_present(pte) || !pte_user(pte) || !pte_read(pte))
0438         return false;
0439 
0440     if (write && !pte_write(pte))
0441         return false;
0442 
0443     return true;
0444 }
0445 
0446 /* Conversion functions: convert a page and protection to a page entry,
0447  * and a page entry and page directory to the page they refer to.
0448  *
0449  * Even if PTEs can be unsigned long long, a PFN is always an unsigned
0450  * long for now.
0451  */
0452 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
0453 {
0454     return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |
0455              pgprot_val(pgprot));
0456 }
0457 
0458 static inline unsigned long pte_pfn(pte_t pte)
0459 {
0460     return pte_val(pte) >> PTE_RPN_SHIFT;
0461 }
0462 
0463 /* Generic modifiers for PTE bits */
0464 static inline pte_t pte_wrprotect(pte_t pte)
0465 {
0466     return __pte(pte_val(pte) & ~_PAGE_RW);
0467 }
0468 
0469 static inline pte_t pte_exprotect(pte_t pte)
0470 {
0471     return __pte(pte_val(pte) & ~_PAGE_EXEC);
0472 }
0473 
0474 static inline pte_t pte_mkclean(pte_t pte)
0475 {
0476     return __pte(pte_val(pte) & ~_PAGE_DIRTY);
0477 }
0478 
0479 static inline pte_t pte_mkold(pte_t pte)
0480 {
0481     return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
0482 }
0483 
0484 static inline pte_t pte_mkexec(pte_t pte)
0485 {
0486     return __pte(pte_val(pte) | _PAGE_EXEC);
0487 }
0488 
0489 static inline pte_t pte_mkpte(pte_t pte)
0490 {
0491     return pte;
0492 }
0493 
0494 static inline pte_t pte_mkwrite(pte_t pte)
0495 {
0496     return __pte(pte_val(pte) | _PAGE_RW);
0497 }
0498 
0499 static inline pte_t pte_mkdirty(pte_t pte)
0500 {
0501     return __pte(pte_val(pte) | _PAGE_DIRTY);
0502 }
0503 
0504 static inline pte_t pte_mkyoung(pte_t pte)
0505 {
0506     return __pte(pte_val(pte) | _PAGE_ACCESSED);
0507 }
0508 
0509 static inline pte_t pte_mkspecial(pte_t pte)
0510 {
0511     return __pte(pte_val(pte) | _PAGE_SPECIAL);
0512 }
0513 
0514 static inline pte_t pte_mkhuge(pte_t pte)
0515 {
0516     return pte;
0517 }
0518 
0519 static inline pte_t pte_mkprivileged(pte_t pte)
0520 {
0521     return __pte(pte_val(pte) & ~_PAGE_USER);
0522 }
0523 
0524 static inline pte_t pte_mkuser(pte_t pte)
0525 {
0526     return __pte(pte_val(pte) | _PAGE_USER);
0527 }
0528 
0529 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
0530 {
0531     return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
0532 }
0533 
0534 
0535 
0536 /* This low level function performs the actual PTE insertion
0537  * Setting the PTE depends on the MMU type and other factors. It's
0538  * an horrible mess that I'm not going to try to clean up now but
0539  * I'm keeping it in one place rather than spread around
0540  */
0541 static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
0542                 pte_t *ptep, pte_t pte, int percpu)
0543 {
0544 #if defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT)
0545     /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the
0546      * helper pte_update() which does an atomic update. We need to do that
0547      * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a
0548      * per-CPU PTE such as a kmap_atomic, we do a simple update preserving
0549      * the hash bits instead (ie, same as the non-SMP case)
0550      */
0551     if (percpu)
0552         *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
0553                   | (pte_val(pte) & ~_PAGE_HASHPTE));
0554     else
0555         pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, pte_val(pte), 0);
0556 
0557 #elif defined(CONFIG_PTE_64BIT)
0558     /* Second case is 32-bit with 64-bit PTE.  In this case, we
0559      * can just store as long as we do the two halves in the right order
0560      * with a barrier in between. This is possible because we take care,
0561      * in the hash code, to pre-invalidate if the PTE was already hashed,
0562      * which synchronizes us with any concurrent invalidation.
0563      * In the percpu case, we also fallback to the simple update preserving
0564      * the hash bits
0565      */
0566     if (percpu) {
0567         *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
0568                   | (pte_val(pte) & ~_PAGE_HASHPTE));
0569         return;
0570     }
0571     if (pte_val(*ptep) & _PAGE_HASHPTE)
0572         flush_hash_entry(mm, ptep, addr);
0573     __asm__ __volatile__("\
0574         stw%X0 %2,%0\n\
0575         eieio\n\
0576         stw%X1 %L2,%1"
0577     : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
0578     : "r" (pte) : "memory");
0579 
0580 #else
0581     /* Third case is 32-bit hash table in UP mode, we need to preserve
0582      * the _PAGE_HASHPTE bit since we may not have invalidated the previous
0583      * translation in the hash yet (done in a subsequent flush_tlb_xxx())
0584      * and see we need to keep track that this PTE needs invalidating
0585      */
0586     *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
0587               | (pte_val(pte) & ~_PAGE_HASHPTE));
0588 #endif
0589 }
0590 
0591 /*
0592  * Macro to mark a page protection value as "uncacheable".
0593  */
0594 
0595 #define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
0596              _PAGE_WRITETHRU)
0597 
0598 #define pgprot_noncached pgprot_noncached
0599 static inline pgprot_t pgprot_noncached(pgprot_t prot)
0600 {
0601     return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
0602             _PAGE_NO_CACHE | _PAGE_GUARDED);
0603 }
0604 
0605 #define pgprot_noncached_wc pgprot_noncached_wc
0606 static inline pgprot_t pgprot_noncached_wc(pgprot_t prot)
0607 {
0608     return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
0609             _PAGE_NO_CACHE);
0610 }
0611 
0612 #define pgprot_cached pgprot_cached
0613 static inline pgprot_t pgprot_cached(pgprot_t prot)
0614 {
0615     return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
0616             _PAGE_COHERENT);
0617 }
0618 
0619 #define pgprot_cached_wthru pgprot_cached_wthru
0620 static inline pgprot_t pgprot_cached_wthru(pgprot_t prot)
0621 {
0622     return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
0623             _PAGE_COHERENT | _PAGE_WRITETHRU);
0624 }
0625 
0626 #define pgprot_cached_noncoherent pgprot_cached_noncoherent
0627 static inline pgprot_t pgprot_cached_noncoherent(pgprot_t prot)
0628 {
0629     return __pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL);
0630 }
0631 
0632 #define pgprot_writecombine pgprot_writecombine
0633 static inline pgprot_t pgprot_writecombine(pgprot_t prot)
0634 {
0635     return pgprot_noncached_wc(prot);
0636 }
0637 
0638 #endif /* !__ASSEMBLY__ */
0639 
0640 #endif /*  _ASM_POWERPC_BOOK3S_32_PGTABLE_H */