0001
0002 #ifndef _ASM_POWERPC_BOOK3S_32_PGTABLE_H
0003 #define _ASM_POWERPC_BOOK3S_32_PGTABLE_H
0004
0005 #include <asm-generic/pgtable-nopmd.h>
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021 #define _PAGE_PRESENT 0x001
0022 #define _PAGE_HASHPTE 0x002
0023 #define _PAGE_USER 0x004
0024 #define _PAGE_GUARDED 0x008
0025 #define _PAGE_COHERENT 0x010
0026 #define _PAGE_NO_CACHE 0x020
0027 #define _PAGE_WRITETHRU 0x040
0028 #define _PAGE_DIRTY 0x080
0029 #define _PAGE_ACCESSED 0x100
0030 #define _PAGE_EXEC 0x200
0031 #define _PAGE_RW 0x400
0032 #define _PAGE_SPECIAL 0x800
0033
0034 #ifdef CONFIG_PTE_64BIT
0035
0036 #define _PTE_NONE_MASK (0xffffffff00000000ULL | _PAGE_HASHPTE)
0037 #else
0038 #define _PTE_NONE_MASK _PAGE_HASHPTE
0039 #endif
0040
0041 #define _PMD_PRESENT 0
0042 #define _PMD_PRESENT_MASK (PAGE_MASK)
0043 #define _PMD_BAD (~PAGE_MASK)
0044
0045
0046
0047 #define _PAGE_KERNEL_RO 0
0048 #define _PAGE_KERNEL_ROX (_PAGE_EXEC)
0049 #define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW)
0050 #define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC)
0051
0052 #define _PAGE_HPTEFLAGS _PAGE_HASHPTE
0053
0054 #ifndef __ASSEMBLY__
0055
0056 static inline bool pte_user(pte_t pte)
0057 {
0058 return pte_val(pte) & _PAGE_USER;
0059 }
0060 #endif
0061
0062
0063
0064
0065
0066
0067 #define PTE_RPN_SHIFT (PAGE_SHIFT)
0068
0069
0070
0071
0072
0073 #ifdef CONFIG_PTE_64BIT
0074 #define PTE_RPN_MASK (~((1ULL << PTE_RPN_SHIFT) - 1))
0075 #define MAX_POSSIBLE_PHYSMEM_BITS 36
0076 #else
0077 #define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1))
0078 #define MAX_POSSIBLE_PHYSMEM_BITS 32
0079 #endif
0080
0081
0082
0083
0084
0085 #define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HASHPTE | _PAGE_DIRTY | \
0086 _PAGE_ACCESSED | _PAGE_SPECIAL)
0087
0088
0089
0090
0091
0092
0093
0094 #define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED)
0095 #define _PAGE_BASE (_PAGE_BASE_NC | _PAGE_COHERENT)
0096
0097
0098
0099
0100
0101
0102
0103
0104 #define PAGE_NONE __pgprot(_PAGE_BASE)
0105 #define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
0106 #define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
0107 #define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
0108 #define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
0109 #define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
0110 #define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
0111
0112
0113 #define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
0114 #define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE)
0115 #define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
0116 _PAGE_NO_CACHE | _PAGE_GUARDED)
0117 #define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX)
0118 #define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
0119 #define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX)
0120
0121
0122
0123
0124
0125
0126 #if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\
0127 defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE)
0128 #define PAGE_KERNEL_TEXT PAGE_KERNEL_X
0129 #else
0130 #define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX
0131 #endif
0132
0133
0134 #define PAGE_KERNEL_EXEC PAGE_KERNEL_X
0135
0136
0137 #define PAGE_AGP (PAGE_KERNEL_NC)
0138 #define HAVE_PAGE_AGP
0139
0140 #define PTE_INDEX_SIZE PTE_SHIFT
0141 #define PMD_INDEX_SIZE 0
0142 #define PUD_INDEX_SIZE 0
0143 #define PGD_INDEX_SIZE (32 - PGDIR_SHIFT)
0144
0145 #define PMD_CACHE_INDEX PMD_INDEX_SIZE
0146 #define PUD_CACHE_INDEX PUD_INDEX_SIZE
0147
0148 #ifndef __ASSEMBLY__
0149 #define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE)
0150 #define PMD_TABLE_SIZE 0
0151 #define PUD_TABLE_SIZE 0
0152 #define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
0153
0154
0155 #define PMD_MASKED_BITS (PTE_TABLE_SIZE - 1)
0156 #endif
0157
0158 #define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
0159 #define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
0160
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170
0171
0172 #define PGDIR_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE)
0173 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
0174 #define PGDIR_MASK (~(PGDIR_SIZE-1))
0175
0176 #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
0177
0178 #ifndef __ASSEMBLY__
0179
0180 int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
0181 void unmap_kernel_page(unsigned long va);
0182
0183 #endif
0184
0185
0186
0187
0188
0189
0190 #include <asm/fixmap.h>
0191
0192
0193
0194
0195
0196
0197 #ifdef CONFIG_HIGHMEM
0198 #define IOREMAP_TOP PKMAP_BASE
0199 #else
0200 #define IOREMAP_TOP FIXADDR_START
0201 #endif
0202
0203
0204 #define IOREMAP_START VMALLOC_START
0205 #define IOREMAP_END VMALLOC_END
0206
0207
0208
0209
0210
0211
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221
0222
0223
0224 #define VMALLOC_OFFSET (0x1000000)
0225
0226 #define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
0227
0228 #ifdef CONFIG_KASAN_VMALLOC
0229 #define VMALLOC_END ALIGN_DOWN(ioremap_bot, PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
0230 #else
0231 #define VMALLOC_END ioremap_bot
0232 #endif
0233
0234 #define MODULES_END ALIGN_DOWN(PAGE_OFFSET, SZ_256M)
0235 #define MODULES_VADDR (MODULES_END - SZ_256M)
0236
0237 #ifndef __ASSEMBLY__
0238 #include <linux/sched.h>
0239 #include <linux/threads.h>
0240
0241
0242 #define PGD_MASKED_BITS 0
0243
0244 #define pte_ERROR(e) \
0245 pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
0246 (unsigned long long)pte_val(e))
0247 #define pgd_ERROR(e) \
0248 pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
0249
0250
0251
0252
0253
0254 #define pte_clear(mm, addr, ptep) \
0255 do { pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, 0, 0); } while (0)
0256
0257 #define pmd_none(pmd) (!pmd_val(pmd))
0258 #define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD)
0259 #define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK)
0260 static inline void pmd_clear(pmd_t *pmdp)
0261 {
0262 *pmdp = __pmd(0);
0263 }
0264
0265
0266
0267
0268
0269
0270 extern int flush_hash_pages(unsigned context, unsigned long va,
0271 unsigned long pmdval, int count);
0272
0273
0274 extern void add_hash_page(unsigned context, unsigned long va,
0275 unsigned long pmdval);
0276
0277
0278 static inline void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, unsigned long addr)
0279 {
0280 if (mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
0281 unsigned long ptephys = __pa(ptep) & PAGE_MASK;
0282
0283 flush_hash_pages(mm->context.id, addr, ptephys, 1);
0284 }
0285 }
0286
0287
0288
0289
0290
0291
0292
0293
0294
0295
0296
0297 static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
0298 unsigned long clr, unsigned long set, int huge)
0299 {
0300 pte_basic_t old;
0301
0302 if (mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
0303 unsigned long tmp;
0304
0305 asm volatile(
0306 #ifndef CONFIG_PTE_64BIT
0307 "1: lwarx %0, 0, %3\n"
0308 " andc %1, %0, %4\n"
0309 #else
0310 "1: lwarx %L0, 0, %3\n"
0311 " lwz %0, -4(%3)\n"
0312 " andc %1, %L0, %4\n"
0313 #endif
0314 " or %1, %1, %5\n"
0315 " stwcx. %1, 0, %3\n"
0316 " bne- 1b"
0317 : "=&r" (old), "=&r" (tmp), "=m" (*p)
0318 #ifndef CONFIG_PTE_64BIT
0319 : "r" (p),
0320 #else
0321 : "b" ((unsigned long)(p) + 4),
0322 #endif
0323 "r" (clr), "r" (set), "m" (*p)
0324 : "cc" );
0325 } else {
0326 old = pte_val(*p);
0327
0328 *p = __pte((old & ~(pte_basic_t)clr) | set);
0329 }
0330
0331 return old;
0332 }
0333
0334
0335
0336
0337
0338 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
0339 static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
0340 unsigned long addr, pte_t *ptep)
0341 {
0342 unsigned long old;
0343 old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
0344 if (old & _PAGE_HASHPTE)
0345 flush_hash_entry(mm, ptep, addr);
0346
0347 return (old & _PAGE_ACCESSED) != 0;
0348 }
0349 #define ptep_test_and_clear_young(__vma, __addr, __ptep) \
0350 __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep)
0351
0352 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
0353 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
0354 pte_t *ptep)
0355 {
0356 return __pte(pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, 0, 0));
0357 }
0358
0359 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
0360 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
0361 pte_t *ptep)
0362 {
0363 pte_update(mm, addr, ptep, _PAGE_RW, 0, 0);
0364 }
0365
0366 static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
0367 pte_t *ptep, pte_t entry,
0368 unsigned long address,
0369 int psize)
0370 {
0371 unsigned long set = pte_val(entry) &
0372 (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
0373
0374 pte_update(vma->vm_mm, address, ptep, 0, set, 0);
0375
0376 flush_tlb_page(vma, address);
0377 }
0378
0379 #define __HAVE_ARCH_PTE_SAME
0380 #define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
0381
0382 #define pmd_pfn(pmd) (pmd_val(pmd) >> PAGE_SHIFT)
0383 #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
0384
0385
0386
0387
0388
0389
0390
0391 #define __swp_type(entry) ((entry).val & 0x1f)
0392 #define __swp_offset(entry) ((entry).val >> 5)
0393 #define __swp_entry(type, offset) ((swp_entry_t) { (type) | ((offset) << 5) })
0394 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 })
0395 #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 })
0396
0397
0398 static inline int pte_write(pte_t pte) { return !!(pte_val(pte) & _PAGE_RW);}
0399 static inline int pte_read(pte_t pte) { return 1; }
0400 static inline int pte_dirty(pte_t pte) { return !!(pte_val(pte) & _PAGE_DIRTY); }
0401 static inline int pte_young(pte_t pte) { return !!(pte_val(pte) & _PAGE_ACCESSED); }
0402 static inline int pte_special(pte_t pte) { return !!(pte_val(pte) & _PAGE_SPECIAL); }
0403 static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
0404 static inline bool pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; }
0405
0406 static inline int pte_present(pte_t pte)
0407 {
0408 return pte_val(pte) & _PAGE_PRESENT;
0409 }
0410
0411 static inline bool pte_hw_valid(pte_t pte)
0412 {
0413 return pte_val(pte) & _PAGE_PRESENT;
0414 }
0415
0416 static inline bool pte_hashpte(pte_t pte)
0417 {
0418 return !!(pte_val(pte) & _PAGE_HASHPTE);
0419 }
0420
0421 static inline bool pte_ci(pte_t pte)
0422 {
0423 return !!(pte_val(pte) & _PAGE_NO_CACHE);
0424 }
0425
0426
0427
0428
0429
0430 #define pte_access_permitted pte_access_permitted
0431 static inline bool pte_access_permitted(pte_t pte, bool write)
0432 {
0433
0434
0435
0436
0437 if (!pte_present(pte) || !pte_user(pte) || !pte_read(pte))
0438 return false;
0439
0440 if (write && !pte_write(pte))
0441 return false;
0442
0443 return true;
0444 }
0445
0446
0447
0448
0449
0450
0451
0452 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
0453 {
0454 return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |
0455 pgprot_val(pgprot));
0456 }
0457
0458 static inline unsigned long pte_pfn(pte_t pte)
0459 {
0460 return pte_val(pte) >> PTE_RPN_SHIFT;
0461 }
0462
0463
0464 static inline pte_t pte_wrprotect(pte_t pte)
0465 {
0466 return __pte(pte_val(pte) & ~_PAGE_RW);
0467 }
0468
0469 static inline pte_t pte_exprotect(pte_t pte)
0470 {
0471 return __pte(pte_val(pte) & ~_PAGE_EXEC);
0472 }
0473
0474 static inline pte_t pte_mkclean(pte_t pte)
0475 {
0476 return __pte(pte_val(pte) & ~_PAGE_DIRTY);
0477 }
0478
0479 static inline pte_t pte_mkold(pte_t pte)
0480 {
0481 return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
0482 }
0483
0484 static inline pte_t pte_mkexec(pte_t pte)
0485 {
0486 return __pte(pte_val(pte) | _PAGE_EXEC);
0487 }
0488
0489 static inline pte_t pte_mkpte(pte_t pte)
0490 {
0491 return pte;
0492 }
0493
0494 static inline pte_t pte_mkwrite(pte_t pte)
0495 {
0496 return __pte(pte_val(pte) | _PAGE_RW);
0497 }
0498
0499 static inline pte_t pte_mkdirty(pte_t pte)
0500 {
0501 return __pte(pte_val(pte) | _PAGE_DIRTY);
0502 }
0503
0504 static inline pte_t pte_mkyoung(pte_t pte)
0505 {
0506 return __pte(pte_val(pte) | _PAGE_ACCESSED);
0507 }
0508
0509 static inline pte_t pte_mkspecial(pte_t pte)
0510 {
0511 return __pte(pte_val(pte) | _PAGE_SPECIAL);
0512 }
0513
0514 static inline pte_t pte_mkhuge(pte_t pte)
0515 {
0516 return pte;
0517 }
0518
0519 static inline pte_t pte_mkprivileged(pte_t pte)
0520 {
0521 return __pte(pte_val(pte) & ~_PAGE_USER);
0522 }
0523
0524 static inline pte_t pte_mkuser(pte_t pte)
0525 {
0526 return __pte(pte_val(pte) | _PAGE_USER);
0527 }
0528
0529 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
0530 {
0531 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
0532 }
0533
0534
0535
0536
0537
0538
0539
0540
0541 static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
0542 pte_t *ptep, pte_t pte, int percpu)
0543 {
0544 #if defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT)
0545
0546
0547
0548
0549
0550
0551 if (percpu)
0552 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
0553 | (pte_val(pte) & ~_PAGE_HASHPTE));
0554 else
0555 pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, pte_val(pte), 0);
0556
0557 #elif defined(CONFIG_PTE_64BIT)
0558
0559
0560
0561
0562
0563
0564
0565
0566 if (percpu) {
0567 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
0568 | (pte_val(pte) & ~_PAGE_HASHPTE));
0569 return;
0570 }
0571 if (pte_val(*ptep) & _PAGE_HASHPTE)
0572 flush_hash_entry(mm, ptep, addr);
0573 __asm__ __volatile__("\
0574 stw%X0 %2,%0\n\
0575 eieio\n\
0576 stw%X1 %L2,%1"
0577 : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
0578 : "r" (pte) : "memory");
0579
0580 #else
0581
0582
0583
0584
0585
0586 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
0587 | (pte_val(pte) & ~_PAGE_HASHPTE));
0588 #endif
0589 }
0590
0591
0592
0593
0594
0595 #define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
0596 _PAGE_WRITETHRU)
0597
0598 #define pgprot_noncached pgprot_noncached
0599 static inline pgprot_t pgprot_noncached(pgprot_t prot)
0600 {
0601 return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
0602 _PAGE_NO_CACHE | _PAGE_GUARDED);
0603 }
0604
0605 #define pgprot_noncached_wc pgprot_noncached_wc
0606 static inline pgprot_t pgprot_noncached_wc(pgprot_t prot)
0607 {
0608 return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
0609 _PAGE_NO_CACHE);
0610 }
0611
0612 #define pgprot_cached pgprot_cached
0613 static inline pgprot_t pgprot_cached(pgprot_t prot)
0614 {
0615 return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
0616 _PAGE_COHERENT);
0617 }
0618
0619 #define pgprot_cached_wthru pgprot_cached_wthru
0620 static inline pgprot_t pgprot_cached_wthru(pgprot_t prot)
0621 {
0622 return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
0623 _PAGE_COHERENT | _PAGE_WRITETHRU);
0624 }
0625
0626 #define pgprot_cached_noncoherent pgprot_cached_noncoherent
0627 static inline pgprot_t pgprot_cached_noncoherent(pgprot_t prot)
0628 {
0629 return __pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL);
0630 }
0631
0632 #define pgprot_writecombine pgprot_writecombine
0633 static inline pgprot_t pgprot_writecombine(pgprot_t prot)
0634 {
0635 return pgprot_noncached_wc(prot);
0636 }
0637
0638 #endif
0639
0640 #endif