Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /*
0003  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
0004  */
0005 
0006 /*
0007  * page table flags for software walked/managed MMUv3 (ARC700) and MMUv4 (HS)
0008  * There correspond to the corresponding bits in the TLB
0009  */
0010 
0011 #ifndef _ASM_ARC_PGTABLE_BITS_ARCV2_H
0012 #define _ASM_ARC_PGTABLE_BITS_ARCV2_H
0013 
0014 #ifdef CONFIG_ARC_CACHE_PAGES
0015 #define _PAGE_CACHEABLE     (1 << 0)  /* Cached (H) */
0016 #else
0017 #define _PAGE_CACHEABLE     0
0018 #endif
0019 
0020 #define _PAGE_EXECUTE       (1 << 1)  /* User Execute  (H) */
0021 #define _PAGE_WRITE     (1 << 2)  /* User Write    (H) */
0022 #define _PAGE_READ      (1 << 3)  /* User Read     (H) */
0023 #define _PAGE_ACCESSED      (1 << 4)  /* Accessed      (s) */
0024 #define _PAGE_DIRTY     (1 << 5)  /* Modified      (s) */
0025 #define _PAGE_SPECIAL       (1 << 6)
0026 #define _PAGE_GLOBAL        (1 << 8)  /* ASID agnostic (H) */
0027 #define _PAGE_PRESENT       (1 << 9)  /* PTE/TLB Valid (H) */
0028 
0029 #ifdef CONFIG_ARC_MMU_V4
0030 #define _PAGE_HW_SZ     (1 << 10)  /* Normal/super (H) */
0031 #else
0032 #define _PAGE_HW_SZ     0
0033 #endif
0034 
0035 /* Defaults for every user page */
0036 #define ___DEF      (_PAGE_PRESENT | _PAGE_CACHEABLE)
0037 
0038 /* Set of bits not changed in pte_modify */
0039 #define _PAGE_CHG_MASK  (PAGE_MASK_PHYS | _PAGE_ACCESSED | _PAGE_DIRTY | \
0040                                _PAGE_SPECIAL)
0041 
0042 /* More Abbrevaited helpers */
0043 #define PAGE_U_NONE     __pgprot(___DEF)
0044 #define PAGE_U_R        __pgprot(___DEF | _PAGE_READ)
0045 #define PAGE_U_W_R      __pgprot(___DEF | _PAGE_READ | _PAGE_WRITE)
0046 #define PAGE_U_X_R      __pgprot(___DEF | _PAGE_READ | _PAGE_EXECUTE)
0047 #define PAGE_U_X_W_R    __pgprot(___DEF \
0048                 | _PAGE_READ | _PAGE_WRITE | _PAGE_EXECUTE)
0049 #define PAGE_KERNEL     __pgprot(___DEF | _PAGE_GLOBAL \
0050                 | _PAGE_READ | _PAGE_WRITE | _PAGE_EXECUTE)
0051 
0052 #define PAGE_SHARED PAGE_U_W_R
0053 
0054 #define pgprot_noncached(prot)  (__pgprot(pgprot_val(prot) & ~_PAGE_CACHEABLE))
0055 
0056 /*
0057  * Mapping of vm_flags (Generic VM) to PTE flags (arch specific)
0058  *
0059  * Certain cases have 1:1 mapping
0060  *  e.g. __P101 means VM_READ, VM_EXEC and !VM_SHARED
0061  *       which directly corresponds to  PAGE_U_X_R
0062  *
0063  * Other rules which cause the divergence from 1:1 mapping
0064  *
0065  *  1. Although ARC700 can do exclusive execute/write protection (meaning R
0066  *     can be tracked independet of X/W unlike some other CPUs), still to
0067  *     keep things consistent with other archs:
0068  *      -Write implies Read:   W => R
0069  *      -Execute implies Read: X => R
0070  *
0071  *  2. Pvt Writable doesn't have Write Enabled initially: Pvt-W => !W
0072  *     This is to enable COW mechanism
0073  */
0074     /* xwr */
0075 #ifndef __ASSEMBLY__
0076 
0077 #define pte_write(pte)      (pte_val(pte) & _PAGE_WRITE)
0078 #define pte_dirty(pte)      (pte_val(pte) & _PAGE_DIRTY)
0079 #define pte_young(pte)      (pte_val(pte) & _PAGE_ACCESSED)
0080 #define pte_special(pte)    (pte_val(pte) & _PAGE_SPECIAL)
0081 
0082 #define PTE_BIT_FUNC(fn, op) \
0083     static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
0084 
0085 PTE_BIT_FUNC(mknotpresent,     &= ~(_PAGE_PRESENT));
0086 PTE_BIT_FUNC(wrprotect, &= ~(_PAGE_WRITE));
0087 PTE_BIT_FUNC(mkwrite,   |= (_PAGE_WRITE));
0088 PTE_BIT_FUNC(mkclean,   &= ~(_PAGE_DIRTY));
0089 PTE_BIT_FUNC(mkdirty,   |= (_PAGE_DIRTY));
0090 PTE_BIT_FUNC(mkold, &= ~(_PAGE_ACCESSED));
0091 PTE_BIT_FUNC(mkyoung,   |= (_PAGE_ACCESSED));
0092 PTE_BIT_FUNC(mkspecial, |= (_PAGE_SPECIAL));
0093 PTE_BIT_FUNC(mkhuge,    |= (_PAGE_HW_SZ));
0094 
0095 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
0096 {
0097     return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
0098 }
0099 
0100 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
0101                   pte_t *ptep, pte_t pteval)
0102 {
0103     set_pte(ptep, pteval);
0104 }
0105 
0106 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
0107               pte_t *ptep);
0108 
0109 /* Encode swap {type,off} tuple into PTE
0110  * We reserve 13 bits for 5-bit @type, keeping bits 12-5 zero, ensuring that
0111  * PAGE_PRESENT is zero in a PTE holding swap "identifier"
0112  */
0113 #define __swp_entry(type, off)      ((swp_entry_t) \
0114                     { ((type) & 0x1f) | ((off) << 13) })
0115 
0116 /* Decode a PTE containing swap "identifier "into constituents */
0117 #define __swp_type(pte_lookalike)   (((pte_lookalike).val) & 0x1f)
0118 #define __swp_offset(pte_lookalike) ((pte_lookalike).val >> 13)
0119 
0120 #define __pte_to_swp_entry(pte)     ((swp_entry_t) { pte_val(pte) })
0121 #define __swp_entry_to_pte(x)       ((pte_t) { (x).val })
0122 
0123 #define kern_addr_valid(addr)   (1)
0124 
0125 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
0126 #include <asm/hugepage.h>
0127 #endif
0128 
0129 #endif /* __ASSEMBLY__ */
0130 
0131 #endif