Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 /*
0003  *  S390 version
0004  *    Copyright IBM Corp. 1999, 2000
0005  *    Author(s): Hartmut Penner (hp@de.ibm.com)
0006  */
0007 
0008 #ifndef _S390_PAGE_H
0009 #define _S390_PAGE_H
0010 
0011 #include <linux/const.h>
0012 #include <asm/types.h>
0013 
0014 #define _PAGE_SHIFT 12
0015 #define _PAGE_SIZE  (_AC(1, UL) << _PAGE_SHIFT)
0016 #define _PAGE_MASK  (~(_PAGE_SIZE - 1))
0017 
0018 /* PAGE_SHIFT determines the page size */
0019 #define PAGE_SHIFT  _PAGE_SHIFT
0020 #define PAGE_SIZE   _PAGE_SIZE
0021 #define PAGE_MASK   _PAGE_MASK
0022 #define PAGE_DEFAULT_ACC    0
0023 /* storage-protection override */
0024 #define PAGE_SPO_ACC        9
0025 #define PAGE_DEFAULT_KEY    (PAGE_DEFAULT_ACC << 4)
0026 
0027 #define HPAGE_SHIFT 20
0028 #define HPAGE_SIZE  (1UL << HPAGE_SHIFT)
0029 #define HPAGE_MASK  (~(HPAGE_SIZE - 1))
0030 #define HUGETLB_PAGE_ORDER  (HPAGE_SHIFT - PAGE_SHIFT)
0031 #define HUGE_MAX_HSTATE     2
0032 
0033 #define ARCH_HAS_SETCLEAR_HUGE_PTE
0034 #define ARCH_HAS_HUGE_PTE_TYPE
0035 #define ARCH_HAS_PREPARE_HUGEPAGE
0036 #define ARCH_HAS_HUGEPAGE_CLEAR_FLUSH
0037 
0038 #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
0039 
0040 #include <asm/setup.h>
0041 #ifndef __ASSEMBLY__
0042 
0043 void __storage_key_init_range(unsigned long start, unsigned long end);
0044 
0045 static inline void storage_key_init_range(unsigned long start, unsigned long end)
0046 {
0047     if (PAGE_DEFAULT_KEY != 0)
0048         __storage_key_init_range(start, end);
0049 }
0050 
0051 #define clear_page(page)    memset((page), 0, PAGE_SIZE)
0052 
0053 /*
0054  * copy_page uses the mvcl instruction with 0xb0 padding byte in order to
0055  * bypass caches when copying a page. Especially when copying huge pages
0056  * this keeps L1 and L2 data caches alive.
0057  */
0058 static inline void copy_page(void *to, void *from)
0059 {
0060     union register_pair dst, src;
0061 
0062     dst.even = (unsigned long) to;
0063     dst.odd  = 0x1000;
0064     src.even = (unsigned long) from;
0065     src.odd  = 0xb0001000;
0066 
0067     asm volatile(
0068         "   mvcl    %[dst],%[src]"
0069         : [dst] "+&d" (dst.pair), [src] "+&d" (src.pair)
0070         : : "memory", "cc");
0071 }
0072 
0073 #define clear_user_page(page, vaddr, pg)    clear_page(page)
0074 #define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
0075 
0076 #define alloc_zeroed_user_highpage_movable(vma, vaddr) \
0077     alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, vma, vaddr)
0078 #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
0079 
0080 /*
0081  * These are used to make use of C type-checking..
0082  */
0083 
0084 typedef struct { unsigned long pgprot; } pgprot_t;
0085 typedef struct { unsigned long pgste; } pgste_t;
0086 typedef struct { unsigned long pte; } pte_t;
0087 typedef struct { unsigned long pmd; } pmd_t;
0088 typedef struct { unsigned long pud; } pud_t;
0089 typedef struct { unsigned long p4d; } p4d_t;
0090 typedef struct { unsigned long pgd; } pgd_t;
0091 typedef pte_t *pgtable_t;
0092 
0093 #define pgprot_val(x)   ((x).pgprot)
0094 #define pgste_val(x)    ((x).pgste)
0095 
0096 static inline unsigned long pte_val(pte_t pte)
0097 {
0098     return pte.pte;
0099 }
0100 
0101 static inline unsigned long pmd_val(pmd_t pmd)
0102 {
0103     return pmd.pmd;
0104 }
0105 
0106 static inline unsigned long pud_val(pud_t pud)
0107 {
0108     return pud.pud;
0109 }
0110 
0111 static inline unsigned long p4d_val(p4d_t p4d)
0112 {
0113     return p4d.p4d;
0114 }
0115 
0116 static inline unsigned long pgd_val(pgd_t pgd)
0117 {
0118     return pgd.pgd;
0119 }
0120 
0121 #define __pgste(x)  ((pgste_t) { (x) } )
0122 #define __pte(x)        ((pte_t) { (x) } )
0123 #define __pmd(x)        ((pmd_t) { (x) } )
0124 #define __pud(x)    ((pud_t) { (x) } )
0125 #define __p4d(x)    ((p4d_t) { (x) } )
0126 #define __pgd(x)        ((pgd_t) { (x) } )
0127 #define __pgprot(x)     ((pgprot_t) { (x) } )
0128 
0129 static inline void page_set_storage_key(unsigned long addr,
0130                     unsigned char skey, int mapped)
0131 {
0132     if (!mapped)
0133         asm volatile(".insn rrf,0xb22b0000,%0,%1,8,0"
0134                  : : "d" (skey), "a" (addr));
0135     else
0136         asm volatile("sske %0,%1" : : "d" (skey), "a" (addr));
0137 }
0138 
0139 static inline unsigned char page_get_storage_key(unsigned long addr)
0140 {
0141     unsigned char skey;
0142 
0143     asm volatile("iske %0,%1" : "=d" (skey) : "a" (addr));
0144     return skey;
0145 }
0146 
0147 static inline int page_reset_referenced(unsigned long addr)
0148 {
0149     int cc;
0150 
0151     asm volatile(
0152         "   rrbe    0,%1\n"
0153         "   ipm %0\n"
0154         "   srl %0,28\n"
0155         : "=d" (cc) : "a" (addr) : "cc");
0156     return cc;
0157 }
0158 
0159 /* Bits int the storage key */
0160 #define _PAGE_CHANGED       0x02    /* HW changed bit       */
0161 #define _PAGE_REFERENCED    0x04    /* HW referenced bit        */
0162 #define _PAGE_FP_BIT        0x08    /* HW fetch protection bit  */
0163 #define _PAGE_ACC_BITS      0xf0    /* HW access control bits   */
0164 
0165 struct page;
0166 void arch_free_page(struct page *page, int order);
0167 void arch_alloc_page(struct page *page, int order);
0168 void arch_set_page_dat(struct page *page, int order);
0169 
0170 static inline int devmem_is_allowed(unsigned long pfn)
0171 {
0172     return 0;
0173 }
0174 
0175 #define HAVE_ARCH_FREE_PAGE
0176 #define HAVE_ARCH_ALLOC_PAGE
0177 
0178 #if IS_ENABLED(CONFIG_PGSTE)
0179 int arch_make_page_accessible(struct page *page);
0180 #define HAVE_ARCH_MAKE_PAGE_ACCESSIBLE
0181 #endif
0182 
0183 #endif /* !__ASSEMBLY__ */
0184 
0185 #define __PAGE_OFFSET       0x0UL
0186 #define PAGE_OFFSET     0x0UL
0187 
0188 #define __pa(x)         ((unsigned long)(x))
0189 #define __va(x)         ((void *)(unsigned long)(x))
0190 
0191 #define phys_to_pfn(phys)   ((phys) >> PAGE_SHIFT)
0192 #define pfn_to_phys(pfn)    ((pfn) << PAGE_SHIFT)
0193 
0194 #define phys_to_page(phys)  pfn_to_page(phys_to_pfn(phys))
0195 #define page_to_phys(page)  pfn_to_phys(page_to_pfn(page))
0196 
0197 #define pfn_to_virt(pfn)    __va(pfn_to_phys(pfn))
0198 #define virt_to_pfn(kaddr)  (phys_to_pfn(__pa(kaddr)))
0199 #define pfn_to_kaddr(pfn)   pfn_to_virt(pfn)
0200 
0201 #define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
0202 #define page_to_virt(page)  pfn_to_virt(page_to_pfn(page))
0203 
0204 #define virt_addr_valid(kaddr)  pfn_valid(virt_to_pfn(kaddr))
0205 
0206 #define VM_DATA_DEFAULT_FLAGS   VM_DATA_FLAGS_NON_EXEC
0207 
0208 #include <asm-generic/memory_model.h>
0209 #include <asm-generic/getorder.h>
0210 
0211 #endif /* _S390_PAGE_H */