0001
0002 #ifndef _ASM_POWERPC_BOOK3S_64_PGTABLE_H_
0003 #define _ASM_POWERPC_BOOK3S_64_PGTABLE_H_
0004
0005 #include <asm-generic/pgtable-nop4d.h>
0006
0007 #ifndef __ASSEMBLY__
0008 #include <linux/mmdebug.h>
0009 #include <linux/bug.h>
0010 #include <linux/sizes.h>
0011 #endif
0012
0013
0014
0015
0016
0017 #define _PAGE_EXEC 0x00001
0018 #define _PAGE_WRITE 0x00002
0019 #define _PAGE_READ 0x00004
0020 #define _PAGE_RW (_PAGE_READ | _PAGE_WRITE)
0021 #define _PAGE_RWX (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC)
0022 #define _PAGE_PRIVILEGED 0x00008
0023 #define _PAGE_SAO 0x00010
0024 #define _PAGE_NON_IDEMPOTENT 0x00020
0025 #define _PAGE_TOLERANT 0x00030
0026 #define _PAGE_DIRTY 0x00080
0027 #define _PAGE_ACCESSED 0x00100
0028
0029
0030
0031 #define _RPAGE_SW0 0x2000000000000000UL
0032 #define _RPAGE_SW1 0x00800
0033 #define _RPAGE_SW2 0x00400
0034 #define _RPAGE_SW3 0x00200
0035 #define _RPAGE_RSV1 0x00040UL
0036
0037 #define _RPAGE_PKEY_BIT4 0x1000000000000000UL
0038 #define _RPAGE_PKEY_BIT3 0x0800000000000000UL
0039 #define _RPAGE_PKEY_BIT2 0x0400000000000000UL
0040 #define _RPAGE_PKEY_BIT1 0x0200000000000000UL
0041 #define _RPAGE_PKEY_BIT0 0x0100000000000000UL
0042
0043 #define _PAGE_PTE 0x4000000000000000UL
0044 #define _PAGE_PRESENT 0x8000000000000000UL
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054 #define _PAGE_INVALID _RPAGE_SW0
0055
0056
0057
0058
0059
0060
0061 #define _RPAGE_RPN0 0x01000
0062 #define _RPAGE_RPN1 0x02000
0063 #define _RPAGE_RPN43 0x0080000000000000UL
0064 #define _RPAGE_RPN42 0x0040000000000000UL
0065 #define _RPAGE_RPN41 0x0020000000000000UL
0066
0067
0068 #define _RPAGE_PA_MAX 56
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089 #define _PAGE_PA_MAX 53
0090
0091 #define _PAGE_SOFT_DIRTY _RPAGE_SW3
0092 #define _PAGE_SPECIAL _RPAGE_SW2
0093 #define _PAGE_DEVMAP _RPAGE_SW1
0094
0095
0096
0097
0098
0099
0100 #define _PAGE_NO_CACHE _PAGE_TOLERANT
0101
0102
0103
0104
0105
0106 #define PTE_RPN_MASK (((1UL << _PAGE_PA_MAX) - 1) & (PAGE_MASK))
0107
0108
0109
0110
0111 #define _HPAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
0112 _PAGE_ACCESSED | H_PAGE_THP_HUGE | _PAGE_PTE | \
0113 _PAGE_SOFT_DIRTY | _PAGE_DEVMAP)
0114
0115
0116
0117 #define _PAGE_KERNEL_RW (_PAGE_PRIVILEGED | _PAGE_RW | _PAGE_DIRTY)
0118 #define _PAGE_KERNEL_RO (_PAGE_PRIVILEGED | _PAGE_READ)
0119 #define _PAGE_KERNEL_ROX (_PAGE_PRIVILEGED | _PAGE_READ | _PAGE_EXEC)
0120 #define _PAGE_KERNEL_RWX (_PAGE_PRIVILEGED | _PAGE_DIRTY | \
0121 _PAGE_RW | _PAGE_EXEC)
0122
0123
0124
0125
0126 #define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
0127 _PAGE_ACCESSED | _PAGE_SPECIAL | _PAGE_PTE | \
0128 _PAGE_SOFT_DIRTY | _PAGE_DEVMAP)
0129
0130
0131
0132
0133
0134
0135
0136 #define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED)
0137 #define _PAGE_BASE (_PAGE_BASE_NC)
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147 #define PAGE_NONE __pgprot(_PAGE_BASE | _PAGE_PRIVILEGED)
0148 #define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_RW)
0149 #define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_EXEC)
0150 #define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_READ)
0151 #define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC)
0152 #define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_READ)
0153 #define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC)
0154
0155
0156 #define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
0157 #define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
0158 _PAGE_TOLERANT)
0159 #define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
0160 _PAGE_NON_IDEMPOTENT)
0161 #define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX)
0162 #define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
0163 #define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX)
0164
0165
0166
0167
0168
0169
0170 #if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) || \
0171 defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE)
0172 #define PAGE_KERNEL_TEXT PAGE_KERNEL_X
0173 #else
0174 #define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX
0175 #endif
0176
0177
0178 #define PAGE_KERNEL_EXEC PAGE_KERNEL_X
0179 #define PAGE_AGP (PAGE_KERNEL_NC)
0180
0181 #ifndef __ASSEMBLY__
0182
0183
0184
0185 extern unsigned long __pte_index_size;
0186 extern unsigned long __pmd_index_size;
0187 extern unsigned long __pud_index_size;
0188 extern unsigned long __pgd_index_size;
0189 extern unsigned long __pud_cache_index;
0190 #define PTE_INDEX_SIZE __pte_index_size
0191 #define PMD_INDEX_SIZE __pmd_index_size
0192 #define PUD_INDEX_SIZE __pud_index_size
0193 #define PGD_INDEX_SIZE __pgd_index_size
0194
0195 #define PMD_CACHE_INDEX 0
0196 #define PUD_CACHE_INDEX __pud_cache_index
0197
0198
0199
0200
0201 extern unsigned long __pte_table_size;
0202 extern unsigned long __pmd_table_size;
0203 extern unsigned long __pud_table_size;
0204 extern unsigned long __pgd_table_size;
0205 #define PTE_TABLE_SIZE __pte_table_size
0206 #define PMD_TABLE_SIZE __pmd_table_size
0207 #define PUD_TABLE_SIZE __pud_table_size
0208 #define PGD_TABLE_SIZE __pgd_table_size
0209
0210 extern unsigned long __pmd_val_bits;
0211 extern unsigned long __pud_val_bits;
0212 extern unsigned long __pgd_val_bits;
0213 #define PMD_VAL_BITS __pmd_val_bits
0214 #define PUD_VAL_BITS __pud_val_bits
0215 #define PGD_VAL_BITS __pgd_val_bits
0216
0217 extern unsigned long __pte_frag_nr;
0218 #define PTE_FRAG_NR __pte_frag_nr
0219 extern unsigned long __pte_frag_size_shift;
0220 #define PTE_FRAG_SIZE_SHIFT __pte_frag_size_shift
0221 #define PTE_FRAG_SIZE (1UL << PTE_FRAG_SIZE_SHIFT)
0222
0223 extern unsigned long __pmd_frag_nr;
0224 #define PMD_FRAG_NR __pmd_frag_nr
0225 extern unsigned long __pmd_frag_size_shift;
0226 #define PMD_FRAG_SIZE_SHIFT __pmd_frag_size_shift
0227 #define PMD_FRAG_SIZE (1UL << PMD_FRAG_SIZE_SHIFT)
0228
0229 #define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
0230 #define PTRS_PER_PMD (1 << PMD_INDEX_SIZE)
0231 #define PTRS_PER_PUD (1 << PUD_INDEX_SIZE)
0232 #define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
0233
0234 #define MAX_PTRS_PER_PTE ((H_PTRS_PER_PTE > R_PTRS_PER_PTE) ? H_PTRS_PER_PTE : R_PTRS_PER_PTE)
0235 #define MAX_PTRS_PER_PMD ((H_PTRS_PER_PMD > R_PTRS_PER_PMD) ? H_PTRS_PER_PMD : R_PTRS_PER_PMD)
0236 #define MAX_PTRS_PER_PUD ((H_PTRS_PER_PUD > R_PTRS_PER_PUD) ? H_PTRS_PER_PUD : R_PTRS_PER_PUD)
0237 #define MAX_PTRS_PER_PGD (1 << (H_PGD_INDEX_SIZE > RADIX_PGD_INDEX_SIZE ? \
0238 H_PGD_INDEX_SIZE : RADIX_PGD_INDEX_SIZE))
0239
0240
0241 #define PMD_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE)
0242 #define PMD_SIZE (1UL << PMD_SHIFT)
0243 #define PMD_MASK (~(PMD_SIZE-1))
0244
0245
0246 #define PUD_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE)
0247 #define PUD_SIZE (1UL << PUD_SHIFT)
0248 #define PUD_MASK (~(PUD_SIZE-1))
0249
0250
0251 #define PGDIR_SHIFT (PUD_SHIFT + PUD_INDEX_SIZE)
0252 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
0253 #define PGDIR_MASK (~(PGDIR_SIZE-1))
0254
0255
0256 #define PMD_MASKED_BITS 0xc0000000000000ffUL
0257
0258 #define PUD_MASKED_BITS 0xc0000000000000ffUL
0259
0260 #define P4D_MASKED_BITS 0xc0000000000000ffUL
0261
0262
0263
0264
0265 enum pgtable_index {
0266 PTE_INDEX = 0,
0267 PMD_INDEX,
0268 PUD_INDEX,
0269 PGD_INDEX,
0270
0271
0272
0273 HTLB_16M_INDEX,
0274 HTLB_16G_INDEX,
0275 };
0276
0277 extern unsigned long __vmalloc_start;
0278 extern unsigned long __vmalloc_end;
0279 #define VMALLOC_START __vmalloc_start
0280 #define VMALLOC_END __vmalloc_end
0281
0282 static inline unsigned int ioremap_max_order(void)
0283 {
0284 if (radix_enabled())
0285 return PUD_SHIFT;
0286 return 7 + PAGE_SHIFT;
0287 }
0288 #define IOREMAP_MAX_ORDER ioremap_max_order()
0289
0290 extern unsigned long __kernel_virt_start;
0291 extern unsigned long __kernel_io_start;
0292 extern unsigned long __kernel_io_end;
0293 #define KERN_VIRT_START __kernel_virt_start
0294 #define KERN_IO_START __kernel_io_start
0295 #define KERN_IO_END __kernel_io_end
0296
0297 extern struct page *vmemmap;
0298 extern unsigned long pci_io_base;
0299 #endif
0300
0301 #include <asm/book3s/64/hash.h>
0302 #include <asm/book3s/64/radix.h>
0303
0304 #if H_MAX_PHYSMEM_BITS > R_MAX_PHYSMEM_BITS
0305 #define MAX_PHYSMEM_BITS H_MAX_PHYSMEM_BITS
0306 #else
0307 #define MAX_PHYSMEM_BITS R_MAX_PHYSMEM_BITS
0308 #endif
0309
0310
0311 #ifdef CONFIG_PPC_64K_PAGES
0312 #include <asm/book3s/64/pgtable-64k.h>
0313 #else
0314 #include <asm/book3s/64/pgtable-4k.h>
0315 #endif
0316
0317 #include <asm/barrier.h>
0318
0319
0320
0321
0322
0323
0324
0325
0326 #define FULL_IO_SIZE 0x80000000ul
0327 #define ISA_IO_BASE (KERN_IO_START)
0328 #define ISA_IO_END (KERN_IO_START + 0x10000ul)
0329 #define PHB_IO_BASE (ISA_IO_END)
0330 #define PHB_IO_END (KERN_IO_START + FULL_IO_SIZE)
0331 #define IOREMAP_BASE (PHB_IO_END)
0332 #define IOREMAP_START (ioremap_bot)
0333 #define IOREMAP_END (KERN_IO_END - FIXADDR_SIZE)
0334 #define FIXADDR_SIZE SZ_32M
0335
0336
0337 #define HAVE_PAGE_AGP
0338
0339 #ifndef __ASSEMBLY__
0340
0341
0342
0343
0344
0345
0346 #ifndef __real_pte
0347
0348 #define __real_pte(e, p, o) ((real_pte_t){(e)})
0349 #define __rpte_to_pte(r) ((r).pte)
0350 #define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >> H_PAGE_F_GIX_SHIFT)
0351
0352 #define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \
0353 do { \
0354 index = 0; \
0355 shift = mmu_psize_defs[psize].shift; \
0356
0357 #define pte_iterate_hashed_end() } while(0)
0358
0359
0360
0361
0362
0363 #define pte_pagesize_index(mm, addr, pte) MMU_PAGE_4K
0364
0365 #endif
0366
0367 static inline unsigned long pte_update(struct mm_struct *mm, unsigned long addr,
0368 pte_t *ptep, unsigned long clr,
0369 unsigned long set, int huge)
0370 {
0371 if (radix_enabled())
0372 return radix__pte_update(mm, addr, ptep, clr, set, huge);
0373 return hash__pte_update(mm, addr, ptep, clr, set, huge);
0374 }
0375
0376
0377
0378
0379
0380
0381
0382
0383
0384
0385 static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
0386 unsigned long addr, pte_t *ptep)
0387 {
0388 unsigned long old;
0389
0390 if ((pte_raw(*ptep) & cpu_to_be64(_PAGE_ACCESSED | H_PAGE_HASHPTE)) == 0)
0391 return 0;
0392 old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
0393 return (old & _PAGE_ACCESSED) != 0;
0394 }
0395
0396 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
0397 #define ptep_test_and_clear_young(__vma, __addr, __ptep) \
0398 ({ \
0399 __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep); \
0400 })
0401
0402
0403
0404
0405
0406
0407
0408
0409
0410
0411
0412
0413
0414
0415 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
0416 #define ptep_clear_flush_young ptep_test_and_clear_young
0417
0418 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
0419 #define pmdp_clear_flush_young pmdp_test_and_clear_young
0420
0421 static inline int __pte_write(pte_t pte)
0422 {
0423 return !!(pte_raw(pte) & cpu_to_be64(_PAGE_WRITE));
0424 }
0425
0426 #ifdef CONFIG_NUMA_BALANCING
0427 #define pte_savedwrite pte_savedwrite
0428 static inline bool pte_savedwrite(pte_t pte)
0429 {
0430
0431
0432
0433
0434
0435
0436
0437 return !(pte_raw(pte) & cpu_to_be64(_PAGE_RWX | _PAGE_PRIVILEGED));
0438 }
0439 #else
0440 #define pte_savedwrite pte_savedwrite
0441 static inline bool pte_savedwrite(pte_t pte)
0442 {
0443 return false;
0444 }
0445 #endif
0446
0447 static inline int pte_write(pte_t pte)
0448 {
0449 return __pte_write(pte) || pte_savedwrite(pte);
0450 }
0451
0452 static inline int pte_read(pte_t pte)
0453 {
0454 return !!(pte_raw(pte) & cpu_to_be64(_PAGE_READ));
0455 }
0456
0457 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
0458 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
0459 pte_t *ptep)
0460 {
0461 if (__pte_write(*ptep))
0462 pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 0);
0463 else if (unlikely(pte_savedwrite(*ptep)))
0464 pte_update(mm, addr, ptep, 0, _PAGE_PRIVILEGED, 0);
0465 }
0466
0467 #define __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT
0468 static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
0469 unsigned long addr, pte_t *ptep)
0470 {
0471
0472
0473
0474
0475 if (__pte_write(*ptep))
0476 pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 1);
0477 else if (unlikely(pte_savedwrite(*ptep)))
0478 pte_update(mm, addr, ptep, 0, _PAGE_PRIVILEGED, 1);
0479 }
0480
0481 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
0482 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
0483 unsigned long addr, pte_t *ptep)
0484 {
0485 unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, 0);
0486 return __pte(old);
0487 }
0488
0489 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
0490 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
0491 unsigned long addr,
0492 pte_t *ptep, int full)
0493 {
0494 if (full && radix_enabled()) {
0495
0496
0497
0498
0499 return radix__ptep_get_and_clear_full(mm, addr, ptep, full);
0500 }
0501 return ptep_get_and_clear(mm, addr, ptep);
0502 }
0503
0504
0505 static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
0506 pte_t * ptep)
0507 {
0508 pte_update(mm, addr, ptep, ~0UL, 0, 0);
0509 }
0510
0511 static inline int pte_dirty(pte_t pte)
0512 {
0513 return !!(pte_raw(pte) & cpu_to_be64(_PAGE_DIRTY));
0514 }
0515
0516 static inline int pte_young(pte_t pte)
0517 {
0518 return !!(pte_raw(pte) & cpu_to_be64(_PAGE_ACCESSED));
0519 }
0520
0521 static inline int pte_special(pte_t pte)
0522 {
0523 return !!(pte_raw(pte) & cpu_to_be64(_PAGE_SPECIAL));
0524 }
0525
0526 static inline bool pte_exec(pte_t pte)
0527 {
0528 return !!(pte_raw(pte) & cpu_to_be64(_PAGE_EXEC));
0529 }
0530
0531
0532 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
0533 static inline bool pte_soft_dirty(pte_t pte)
0534 {
0535 return !!(pte_raw(pte) & cpu_to_be64(_PAGE_SOFT_DIRTY));
0536 }
0537
0538 static inline pte_t pte_mksoft_dirty(pte_t pte)
0539 {
0540 return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_SOFT_DIRTY));
0541 }
0542
0543 static inline pte_t pte_clear_soft_dirty(pte_t pte)
0544 {
0545 return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_SOFT_DIRTY));
0546 }
0547 #endif
0548
0549 #ifdef CONFIG_NUMA_BALANCING
0550 static inline int pte_protnone(pte_t pte)
0551 {
0552 return (pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT | _PAGE_PTE | _PAGE_RWX)) ==
0553 cpu_to_be64(_PAGE_PRESENT | _PAGE_PTE);
0554 }
0555
0556 #define pte_mk_savedwrite pte_mk_savedwrite
0557 static inline pte_t pte_mk_savedwrite(pte_t pte)
0558 {
0559
0560
0561
0562
0563
0564 VM_BUG_ON((pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT | _PAGE_RWX | _PAGE_PRIVILEGED)) !=
0565 cpu_to_be64(_PAGE_PRESENT | _PAGE_PRIVILEGED));
0566 return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_PRIVILEGED));
0567 }
0568
0569 #define pte_clear_savedwrite pte_clear_savedwrite
0570 static inline pte_t pte_clear_savedwrite(pte_t pte)
0571 {
0572
0573
0574
0575 VM_BUG_ON(!pte_protnone(pte));
0576 return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_PRIVILEGED));
0577 }
0578 #else
0579 #define pte_clear_savedwrite pte_clear_savedwrite
0580 static inline pte_t pte_clear_savedwrite(pte_t pte)
0581 {
0582 VM_WARN_ON(1);
0583 return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_WRITE));
0584 }
0585 #endif
0586
0587 static inline bool pte_hw_valid(pte_t pte)
0588 {
0589 return (pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT | _PAGE_PTE)) ==
0590 cpu_to_be64(_PAGE_PRESENT | _PAGE_PTE);
0591 }
0592
0593 static inline int pte_present(pte_t pte)
0594 {
0595
0596
0597
0598
0599
0600
0601
0602 if (pte_hw_valid(pte))
0603 return true;
0604 return (pte_raw(pte) & cpu_to_be64(_PAGE_INVALID | _PAGE_PTE)) ==
0605 cpu_to_be64(_PAGE_INVALID | _PAGE_PTE);
0606 }
0607
0608 #ifdef CONFIG_PPC_MEM_KEYS
0609 extern bool arch_pte_access_permitted(u64 pte, bool write, bool execute);
0610 #else
0611 static inline bool arch_pte_access_permitted(u64 pte, bool write, bool execute)
0612 {
0613 return true;
0614 }
0615 #endif
0616
0617 static inline bool pte_user(pte_t pte)
0618 {
0619 return !(pte_raw(pte) & cpu_to_be64(_PAGE_PRIVILEGED));
0620 }
0621
0622 #define pte_access_permitted pte_access_permitted
0623 static inline bool pte_access_permitted(pte_t pte, bool write)
0624 {
0625
0626
0627
0628
0629 if (!pte_present(pte) || !pte_user(pte) || !pte_read(pte))
0630 return false;
0631
0632 if (write && !pte_write(pte))
0633 return false;
0634
0635 return arch_pte_access_permitted(pte_val(pte), write, 0);
0636 }
0637
0638
0639
0640
0641
0642
0643
0644
0645 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
0646 {
0647 VM_BUG_ON(pfn >> (64 - PAGE_SHIFT));
0648 VM_BUG_ON((pfn << PAGE_SHIFT) & ~PTE_RPN_MASK);
0649
0650 return __pte(((pte_basic_t)pfn << PAGE_SHIFT) | pgprot_val(pgprot) | _PAGE_PTE);
0651 }
0652
0653 static inline unsigned long pte_pfn(pte_t pte)
0654 {
0655 return (pte_val(pte) & PTE_RPN_MASK) >> PAGE_SHIFT;
0656 }
0657
0658
0659 static inline pte_t pte_wrprotect(pte_t pte)
0660 {
0661 if (unlikely(pte_savedwrite(pte)))
0662 return pte_clear_savedwrite(pte);
0663 return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_WRITE));
0664 }
0665
0666 static inline pte_t pte_exprotect(pte_t pte)
0667 {
0668 return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_EXEC));
0669 }
0670
0671 static inline pte_t pte_mkclean(pte_t pte)
0672 {
0673 return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_DIRTY));
0674 }
0675
0676 static inline pte_t pte_mkold(pte_t pte)
0677 {
0678 return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_ACCESSED));
0679 }
0680
0681 static inline pte_t pte_mkexec(pte_t pte)
0682 {
0683 return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_EXEC));
0684 }
0685
0686 static inline pte_t pte_mkwrite(pte_t pte)
0687 {
0688
0689
0690
0691 return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_RW));
0692 }
0693
0694 static inline pte_t pte_mkdirty(pte_t pte)
0695 {
0696 return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_DIRTY | _PAGE_SOFT_DIRTY));
0697 }
0698
0699 static inline pte_t pte_mkyoung(pte_t pte)
0700 {
0701 return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_ACCESSED));
0702 }
0703
0704 static inline pte_t pte_mkspecial(pte_t pte)
0705 {
0706 return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_SPECIAL));
0707 }
0708
0709 static inline pte_t pte_mkhuge(pte_t pte)
0710 {
0711 return pte;
0712 }
0713
0714 static inline pte_t pte_mkdevmap(pte_t pte)
0715 {
0716 return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_SPECIAL | _PAGE_DEVMAP));
0717 }
0718
0719 static inline pte_t pte_mkprivileged(pte_t pte)
0720 {
0721 return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_PRIVILEGED));
0722 }
0723
0724 static inline pte_t pte_mkuser(pte_t pte)
0725 {
0726 return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_PRIVILEGED));
0727 }
0728
0729
0730
0731
0732
0733
0734
0735 static inline int pte_devmap(pte_t pte)
0736 {
0737 u64 mask = cpu_to_be64(_PAGE_DEVMAP | _PAGE_PTE);
0738
0739 return (pte_raw(pte) & mask) == mask;
0740 }
0741
0742 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
0743 {
0744
0745 return __pte_raw((pte_raw(pte) & cpu_to_be64(_PAGE_CHG_MASK)) |
0746 cpu_to_be64(pgprot_val(newprot)));
0747 }
0748
0749
0750 #define MAX_SWAPFILES_CHECK() do { \
0751 BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS); \
0752
0753
0754
0755 \
0756 BUILD_BUG_ON(_PAGE_HPTEFLAGS & SWP_TYPE_MASK); \
0757 BUILD_BUG_ON(_PAGE_HPTEFLAGS & _PAGE_SWP_SOFT_DIRTY); \
0758 BUILD_BUG_ON(_PAGE_HPTEFLAGS & _PAGE_SWP_EXCLUSIVE); \
0759 } while (0)
0760
0761 #define SWP_TYPE_BITS 5
0762 #define SWP_TYPE_MASK ((1UL << SWP_TYPE_BITS) - 1)
0763 #define __swp_type(x) ((x).val & SWP_TYPE_MASK)
0764 #define __swp_offset(x) (((x).val & PTE_RPN_MASK) >> PAGE_SHIFT)
0765 #define __swp_entry(type, offset) ((swp_entry_t) { \
0766 (type) | (((offset) << PAGE_SHIFT) & PTE_RPN_MASK)})
0767
0768
0769
0770
0771
0772
0773 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) & ~_PAGE_PTE })
0774 #define __swp_entry_to_pte(x) __pte((x).val | _PAGE_PTE)
0775 #define __pmd_to_swp_entry(pmd) (__pte_to_swp_entry(pmd_pte(pmd)))
0776 #define __swp_entry_to_pmd(x) (pte_pmd(__swp_entry_to_pte(x)))
0777
0778 #ifdef CONFIG_MEM_SOFT_DIRTY
0779 #define _PAGE_SWP_SOFT_DIRTY _PAGE_SOFT_DIRTY
0780 #else
0781 #define _PAGE_SWP_SOFT_DIRTY 0UL
0782 #endif
0783
0784 #define _PAGE_SWP_EXCLUSIVE _PAGE_NON_IDEMPOTENT
0785
0786 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
0787 static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
0788 {
0789 return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_SWP_SOFT_DIRTY));
0790 }
0791
0792 static inline bool pte_swp_soft_dirty(pte_t pte)
0793 {
0794 return !!(pte_raw(pte) & cpu_to_be64(_PAGE_SWP_SOFT_DIRTY));
0795 }
0796
0797 static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
0798 {
0799 return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_SWP_SOFT_DIRTY));
0800 }
0801 #endif
0802
0803 #define __HAVE_ARCH_PTE_SWP_EXCLUSIVE
0804 static inline pte_t pte_swp_mkexclusive(pte_t pte)
0805 {
0806 return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_SWP_EXCLUSIVE));
0807 }
0808
0809 static inline int pte_swp_exclusive(pte_t pte)
0810 {
0811 return !!(pte_raw(pte) & cpu_to_be64(_PAGE_SWP_EXCLUSIVE));
0812 }
0813
0814 static inline pte_t pte_swp_clear_exclusive(pte_t pte)
0815 {
0816 return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_SWP_EXCLUSIVE));
0817 }
0818
0819 static inline bool check_pte_access(unsigned long access, unsigned long ptev)
0820 {
0821
0822
0823
0824 if (access & ~ptev)
0825 return false;
0826
0827
0828
0829 if ((access & _PAGE_PRIVILEGED) != (ptev & _PAGE_PRIVILEGED))
0830 return false;
0831
0832 return true;
0833 }
0834
0835
0836
0837
0838 static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
0839 pte_t *ptep, pte_t entry,
0840 unsigned long address,
0841 int psize)
0842 {
0843 if (radix_enabled())
0844 return radix__ptep_set_access_flags(vma, ptep, entry,
0845 address, psize);
0846 return hash__ptep_set_access_flags(ptep, entry);
0847 }
0848
0849 #define __HAVE_ARCH_PTE_SAME
0850 static inline int pte_same(pte_t pte_a, pte_t pte_b)
0851 {
0852 if (radix_enabled())
0853 return radix__pte_same(pte_a, pte_b);
0854 return hash__pte_same(pte_a, pte_b);
0855 }
0856
0857 static inline int pte_none(pte_t pte)
0858 {
0859 if (radix_enabled())
0860 return radix__pte_none(pte);
0861 return hash__pte_none(pte);
0862 }
0863
0864 static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
0865 pte_t *ptep, pte_t pte, int percpu)
0866 {
0867
0868 VM_WARN_ON(!(pte_raw(pte) & cpu_to_be64(_PAGE_PTE)));
0869
0870
0871
0872
0873 pte = __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_PTE));
0874
0875 if (radix_enabled())
0876 return radix__set_pte_at(mm, addr, ptep, pte, percpu);
0877 return hash__set_pte_at(mm, addr, ptep, pte, percpu);
0878 }
0879
0880 #define _PAGE_CACHE_CTL (_PAGE_SAO | _PAGE_NON_IDEMPOTENT | _PAGE_TOLERANT)
0881
0882 #define pgprot_noncached pgprot_noncached
0883 static inline pgprot_t pgprot_noncached(pgprot_t prot)
0884 {
0885 return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
0886 _PAGE_NON_IDEMPOTENT);
0887 }
0888
0889 #define pgprot_noncached_wc pgprot_noncached_wc
0890 static inline pgprot_t pgprot_noncached_wc(pgprot_t prot)
0891 {
0892 return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
0893 _PAGE_TOLERANT);
0894 }
0895
0896 #define pgprot_cached pgprot_cached
0897 static inline pgprot_t pgprot_cached(pgprot_t prot)
0898 {
0899 return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL));
0900 }
0901
0902 #define pgprot_writecombine pgprot_writecombine
0903 static inline pgprot_t pgprot_writecombine(pgprot_t prot)
0904 {
0905 return pgprot_noncached_wc(prot);
0906 }
0907
0908
0909
0910 static inline bool pte_ci(pte_t pte)
0911 {
0912 __be64 pte_v = pte_raw(pte);
0913
0914 if (((pte_v & cpu_to_be64(_PAGE_CACHE_CTL)) == cpu_to_be64(_PAGE_TOLERANT)) ||
0915 ((pte_v & cpu_to_be64(_PAGE_CACHE_CTL)) == cpu_to_be64(_PAGE_NON_IDEMPOTENT)))
0916 return true;
0917 return false;
0918 }
0919
0920 static inline void pmd_clear(pmd_t *pmdp)
0921 {
0922 if (IS_ENABLED(CONFIG_DEBUG_VM) && !radix_enabled()) {
0923
0924
0925
0926
0927 WARN_ON((pmd_val(*pmdp) & (H_PAGE_HASHPTE | _PAGE_PTE)) == (H_PAGE_HASHPTE | _PAGE_PTE));
0928 }
0929 *pmdp = __pmd(0);
0930 }
0931
0932 static inline int pmd_none(pmd_t pmd)
0933 {
0934 return !pmd_raw(pmd);
0935 }
0936
0937 static inline int pmd_present(pmd_t pmd)
0938 {
0939
0940
0941
0942
0943
0944
0945 if (pmd_raw(pmd) & cpu_to_be64(_PAGE_PRESENT | _PAGE_INVALID))
0946 return true;
0947
0948 return false;
0949 }
0950
0951 static inline int pmd_is_serializing(pmd_t pmd)
0952 {
0953
0954
0955
0956
0957
0958
0959
0960
0961 if ((pmd_raw(pmd) & cpu_to_be64(_PAGE_PRESENT | _PAGE_INVALID)) ==
0962 cpu_to_be64(_PAGE_INVALID))
0963 return true;
0964
0965 return false;
0966 }
0967
0968 static inline int pmd_bad(pmd_t pmd)
0969 {
0970 if (radix_enabled())
0971 return radix__pmd_bad(pmd);
0972 return hash__pmd_bad(pmd);
0973 }
0974
0975 static inline void pud_clear(pud_t *pudp)
0976 {
0977 if (IS_ENABLED(CONFIG_DEBUG_VM) && !radix_enabled()) {
0978
0979
0980
0981
0982 WARN_ON((pud_val(*pudp) & (H_PAGE_HASHPTE | _PAGE_PTE)) == (H_PAGE_HASHPTE | _PAGE_PTE));
0983 }
0984 *pudp = __pud(0);
0985 }
0986
0987 static inline int pud_none(pud_t pud)
0988 {
0989 return !pud_raw(pud);
0990 }
0991
0992 static inline int pud_present(pud_t pud)
0993 {
0994 return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PRESENT));
0995 }
0996
0997 extern struct page *pud_page(pud_t pud);
0998 extern struct page *pmd_page(pmd_t pmd);
0999 static inline pte_t pud_pte(pud_t pud)
1000 {
1001 return __pte_raw(pud_raw(pud));
1002 }
1003
1004 static inline pud_t pte_pud(pte_t pte)
1005 {
1006 return __pud_raw(pte_raw(pte));
1007 }
1008 #define pud_write(pud) pte_write(pud_pte(pud))
1009
1010 static inline int pud_bad(pud_t pud)
1011 {
1012 if (radix_enabled())
1013 return radix__pud_bad(pud);
1014 return hash__pud_bad(pud);
1015 }
1016
1017 #define pud_access_permitted pud_access_permitted
1018 static inline bool pud_access_permitted(pud_t pud, bool write)
1019 {
1020 return pte_access_permitted(pud_pte(pud), write);
1021 }
1022
1023 #define __p4d_raw(x) ((p4d_t) { __pgd_raw(x) })
1024 static inline __be64 p4d_raw(p4d_t x)
1025 {
1026 return pgd_raw(x.pgd);
1027 }
1028
1029 #define p4d_write(p4d) pte_write(p4d_pte(p4d))
1030
1031 static inline void p4d_clear(p4d_t *p4dp)
1032 {
1033 *p4dp = __p4d(0);
1034 }
1035
1036 static inline int p4d_none(p4d_t p4d)
1037 {
1038 return !p4d_raw(p4d);
1039 }
1040
1041 static inline int p4d_present(p4d_t p4d)
1042 {
1043 return !!(p4d_raw(p4d) & cpu_to_be64(_PAGE_PRESENT));
1044 }
1045
1046 static inline pte_t p4d_pte(p4d_t p4d)
1047 {
1048 return __pte_raw(p4d_raw(p4d));
1049 }
1050
1051 static inline p4d_t pte_p4d(pte_t pte)
1052 {
1053 return __p4d_raw(pte_raw(pte));
1054 }
1055
1056 static inline int p4d_bad(p4d_t p4d)
1057 {
1058 if (radix_enabled())
1059 return radix__p4d_bad(p4d);
1060 return hash__p4d_bad(p4d);
1061 }
1062
1063 #define p4d_access_permitted p4d_access_permitted
1064 static inline bool p4d_access_permitted(p4d_t p4d, bool write)
1065 {
1066 return pte_access_permitted(p4d_pte(p4d), write);
1067 }
1068
1069 extern struct page *p4d_page(p4d_t p4d);
1070
1071
1072 #define __pgtable_ptr_val(ptr) __pa(ptr)
1073
1074 static inline pud_t *p4d_pgtable(p4d_t p4d)
1075 {
1076 return (pud_t *)__va(p4d_val(p4d) & ~P4D_MASKED_BITS);
1077 }
1078
1079 static inline pmd_t *pud_pgtable(pud_t pud)
1080 {
1081 return (pmd_t *)__va(pud_val(pud) & ~PUD_MASKED_BITS);
1082 }
1083
1084 #define pte_ERROR(e) \
1085 pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
1086 #define pmd_ERROR(e) \
1087 pr_err("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
1088 #define pud_ERROR(e) \
1089 pr_err("%s:%d: bad pud %08lx.\n", __FILE__, __LINE__, pud_val(e))
1090 #define pgd_ERROR(e) \
1091 pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
1092
1093 static inline int map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot)
1094 {
1095 if (radix_enabled()) {
1096 #if defined(CONFIG_PPC_RADIX_MMU) && defined(DEBUG_VM)
1097 unsigned long page_size = 1 << mmu_psize_defs[mmu_io_psize].shift;
1098 WARN((page_size != PAGE_SIZE), "I/O page size != PAGE_SIZE");
1099 #endif
1100 return radix__map_kernel_page(ea, pa, prot, PAGE_SIZE);
1101 }
1102 return hash__map_kernel_page(ea, pa, prot);
1103 }
1104
1105 void unmap_kernel_page(unsigned long va);
1106
1107 static inline int __meminit vmemmap_create_mapping(unsigned long start,
1108 unsigned long page_size,
1109 unsigned long phys)
1110 {
1111 if (radix_enabled())
1112 return radix__vmemmap_create_mapping(start, page_size, phys);
1113 return hash__vmemmap_create_mapping(start, page_size, phys);
1114 }
1115
1116 #ifdef CONFIG_MEMORY_HOTPLUG
1117 static inline void vmemmap_remove_mapping(unsigned long start,
1118 unsigned long page_size)
1119 {
1120 if (radix_enabled())
1121 return radix__vmemmap_remove_mapping(start, page_size);
1122 return hash__vmemmap_remove_mapping(start, page_size);
1123 }
1124 #endif
1125
1126 #ifdef CONFIG_DEBUG_PAGEALLOC
1127 static inline void __kernel_map_pages(struct page *page, int numpages, int enable)
1128 {
1129 if (radix_enabled())
1130 radix__kernel_map_pages(page, numpages, enable);
1131 else
1132 hash__kernel_map_pages(page, numpages, enable);
1133 }
1134 #endif
1135
1136 static inline pte_t pmd_pte(pmd_t pmd)
1137 {
1138 return __pte_raw(pmd_raw(pmd));
1139 }
1140
1141 static inline pmd_t pte_pmd(pte_t pte)
1142 {
1143 return __pmd_raw(pte_raw(pte));
1144 }
1145
1146 static inline pte_t *pmdp_ptep(pmd_t *pmd)
1147 {
1148 return (pte_t *)pmd;
1149 }
1150 #define pmd_pfn(pmd) pte_pfn(pmd_pte(pmd))
1151 #define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd))
1152 #define pmd_young(pmd) pte_young(pmd_pte(pmd))
1153 #define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
1154 #define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
1155 #define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
1156 #define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd)))
1157 #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
1158 #define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
1159 #define pmd_mk_savedwrite(pmd) pte_pmd(pte_mk_savedwrite(pmd_pte(pmd)))
1160 #define pmd_clear_savedwrite(pmd) pte_pmd(pte_clear_savedwrite(pmd_pte(pmd)))
1161
1162 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
1163 #define pmd_soft_dirty(pmd) pte_soft_dirty(pmd_pte(pmd))
1164 #define pmd_mksoft_dirty(pmd) pte_pmd(pte_mksoft_dirty(pmd_pte(pmd)))
1165 #define pmd_clear_soft_dirty(pmd) pte_pmd(pte_clear_soft_dirty(pmd_pte(pmd)))
1166
1167 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1168 #define pmd_swp_mksoft_dirty(pmd) pte_pmd(pte_swp_mksoft_dirty(pmd_pte(pmd)))
1169 #define pmd_swp_soft_dirty(pmd) pte_swp_soft_dirty(pmd_pte(pmd))
1170 #define pmd_swp_clear_soft_dirty(pmd) pte_pmd(pte_swp_clear_soft_dirty(pmd_pte(pmd)))
1171 #endif
1172 #endif
1173
1174 #ifdef CONFIG_NUMA_BALANCING
1175 static inline int pmd_protnone(pmd_t pmd)
1176 {
1177 return pte_protnone(pmd_pte(pmd));
1178 }
1179 #endif
1180
1181 #define pmd_write(pmd) pte_write(pmd_pte(pmd))
1182 #define __pmd_write(pmd) __pte_write(pmd_pte(pmd))
1183 #define pmd_savedwrite(pmd) pte_savedwrite(pmd_pte(pmd))
1184
1185 #define pmd_access_permitted pmd_access_permitted
1186 static inline bool pmd_access_permitted(pmd_t pmd, bool write)
1187 {
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198 if (pmd_is_serializing(pmd))
1199 return false;
1200
1201 return pte_access_permitted(pmd_pte(pmd), write);
1202 }
1203
1204 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1205 extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot);
1206 extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot);
1207 extern pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot);
1208 extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1209 pmd_t *pmdp, pmd_t pmd);
1210 static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
1211 unsigned long addr, pmd_t *pmd)
1212 {
1213 }
1214
1215 extern int hash__has_transparent_hugepage(void);
1216 static inline int has_transparent_hugepage(void)
1217 {
1218 if (radix_enabled())
1219 return radix__has_transparent_hugepage();
1220 return hash__has_transparent_hugepage();
1221 }
1222 #define has_transparent_hugepage has_transparent_hugepage
1223
1224 static inline unsigned long
1225 pmd_hugepage_update(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp,
1226 unsigned long clr, unsigned long set)
1227 {
1228 if (radix_enabled())
1229 return radix__pmd_hugepage_update(mm, addr, pmdp, clr, set);
1230 return hash__pmd_hugepage_update(mm, addr, pmdp, clr, set);
1231 }
1232
1233
1234
1235
1236
1237 static inline int pmd_large(pmd_t pmd)
1238 {
1239 return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE));
1240 }
1241
1242
1243
1244
1245
1246 static inline int __pmdp_test_and_clear_young(struct mm_struct *mm,
1247 unsigned long addr, pmd_t *pmdp)
1248 {
1249 unsigned long old;
1250
1251 if ((pmd_raw(*pmdp) & cpu_to_be64(_PAGE_ACCESSED | H_PAGE_HASHPTE)) == 0)
1252 return 0;
1253 old = pmd_hugepage_update(mm, addr, pmdp, _PAGE_ACCESSED, 0);
1254 return ((old & _PAGE_ACCESSED) != 0);
1255 }
1256
1257 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
1258 static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr,
1259 pmd_t *pmdp)
1260 {
1261 if (__pmd_write((*pmdp)))
1262 pmd_hugepage_update(mm, addr, pmdp, _PAGE_WRITE, 0);
1263 else if (unlikely(pmd_savedwrite(*pmdp)))
1264 pmd_hugepage_update(mm, addr, pmdp, 0, _PAGE_PRIVILEGED);
1265 }
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278 static inline int pmd_trans_huge(pmd_t pmd)
1279 {
1280 if (!pmd_present(pmd))
1281 return false;
1282
1283 if (radix_enabled())
1284 return radix__pmd_trans_huge(pmd);
1285 return hash__pmd_trans_huge(pmd);
1286 }
1287
1288 #define __HAVE_ARCH_PMD_SAME
1289 static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
1290 {
1291 if (radix_enabled())
1292 return radix__pmd_same(pmd_a, pmd_b);
1293 return hash__pmd_same(pmd_a, pmd_b);
1294 }
1295
1296 static inline pmd_t __pmd_mkhuge(pmd_t pmd)
1297 {
1298 if (radix_enabled())
1299 return radix__pmd_mkhuge(pmd);
1300 return hash__pmd_mkhuge(pmd);
1301 }
1302
1303
1304
1305
1306 static inline pmd_t pmd_mkhuge(pmd_t pmd)
1307 {
1308 #ifdef CONFIG_DEBUG_VM
1309 if (radix_enabled())
1310 WARN_ON((pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE)) == 0);
1311 else
1312 WARN_ON((pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE | H_PAGE_THP_HUGE)) !=
1313 cpu_to_be64(_PAGE_PTE | H_PAGE_THP_HUGE));
1314 #endif
1315 return pmd;
1316 }
1317
1318 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
1319 extern int pmdp_set_access_flags(struct vm_area_struct *vma,
1320 unsigned long address, pmd_t *pmdp,
1321 pmd_t entry, int dirty);
1322
1323 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1324 extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1325 unsigned long address, pmd_t *pmdp);
1326
1327 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
1328 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
1329 unsigned long addr, pmd_t *pmdp)
1330 {
1331 if (radix_enabled())
1332 return radix__pmdp_huge_get_and_clear(mm, addr, pmdp);
1333 return hash__pmdp_huge_get_and_clear(mm, addr, pmdp);
1334 }
1335
1336 static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
1337 unsigned long address, pmd_t *pmdp)
1338 {
1339 if (radix_enabled())
1340 return radix__pmdp_collapse_flush(vma, address, pmdp);
1341 return hash__pmdp_collapse_flush(vma, address, pmdp);
1342 }
1343 #define pmdp_collapse_flush pmdp_collapse_flush
1344
1345 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
1346 pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma,
1347 unsigned long addr,
1348 pmd_t *pmdp, int full);
1349
1350 #define __HAVE_ARCH_PGTABLE_DEPOSIT
1351 static inline void pgtable_trans_huge_deposit(struct mm_struct *mm,
1352 pmd_t *pmdp, pgtable_t pgtable)
1353 {
1354 if (radix_enabled())
1355 return radix__pgtable_trans_huge_deposit(mm, pmdp, pgtable);
1356 return hash__pgtable_trans_huge_deposit(mm, pmdp, pgtable);
1357 }
1358
1359 #define __HAVE_ARCH_PGTABLE_WITHDRAW
1360 static inline pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm,
1361 pmd_t *pmdp)
1362 {
1363 if (radix_enabled())
1364 return radix__pgtable_trans_huge_withdraw(mm, pmdp);
1365 return hash__pgtable_trans_huge_withdraw(mm, pmdp);
1366 }
1367
1368 #define __HAVE_ARCH_PMDP_INVALIDATE
1369 extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
1370 pmd_t *pmdp);
1371
1372 #define pmd_move_must_withdraw pmd_move_must_withdraw
1373 struct spinlock;
1374 extern int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
1375 struct spinlock *old_pmd_ptl,
1376 struct vm_area_struct *vma);
1377
1378
1379
1380
1381 #define arch_needs_pgtable_deposit arch_needs_pgtable_deposit
1382 static inline bool arch_needs_pgtable_deposit(void)
1383 {
1384 if (radix_enabled())
1385 return false;
1386 return true;
1387 }
1388 extern void serialize_against_pte_lookup(struct mm_struct *mm);
1389
1390
1391 static inline pmd_t pmd_mkdevmap(pmd_t pmd)
1392 {
1393 if (radix_enabled())
1394 return radix__pmd_mkdevmap(pmd);
1395 return hash__pmd_mkdevmap(pmd);
1396 }
1397
1398 static inline int pmd_devmap(pmd_t pmd)
1399 {
1400 return pte_devmap(pmd_pte(pmd));
1401 }
1402
1403 static inline int pud_devmap(pud_t pud)
1404 {
1405 return 0;
1406 }
1407
1408 static inline int pgd_devmap(pgd_t pgd)
1409 {
1410 return 0;
1411 }
1412 #endif
1413
1414 static inline int pud_pfn(pud_t pud)
1415 {
1416
1417
1418
1419
1420
1421 BUILD_BUG();
1422 return 0;
1423 }
1424 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1425 pte_t ptep_modify_prot_start(struct vm_area_struct *, unsigned long, pte_t *);
1426 void ptep_modify_prot_commit(struct vm_area_struct *, unsigned long,
1427 pte_t *, pte_t, pte_t);
1428
1429
1430
1431
1432 static inline bool is_pte_rw_upgrade(unsigned long old_val, unsigned long new_val)
1433 {
1434 if (!(old_val & _PAGE_READ))
1435 return false;
1436
1437 if ((!(old_val & _PAGE_WRITE)) && (new_val & _PAGE_WRITE))
1438 return true;
1439
1440 return false;
1441 }
1442
1443
1444
1445
1446 #define pmd_is_leaf pmd_is_leaf
1447 #define pmd_leaf pmd_is_leaf
1448 static inline bool pmd_is_leaf(pmd_t pmd)
1449 {
1450 return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE));
1451 }
1452
1453 #define pud_is_leaf pud_is_leaf
1454 #define pud_leaf pud_is_leaf
1455 static inline bool pud_is_leaf(pud_t pud)
1456 {
1457 return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE));
1458 }
1459
1460 #define p4d_is_leaf p4d_is_leaf
1461 #define p4d_leaf p4d_is_leaf
1462 static inline bool p4d_is_leaf(p4d_t p4d)
1463 {
1464 return !!(p4d_raw(p4d) & cpu_to_be64(_PAGE_PTE));
1465 }
1466
1467 #endif
1468 #endif