0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #ifndef _ASM_S390_PGTABLE_H
0013 #define _ASM_S390_PGTABLE_H
0014
0015 #include <linux/sched.h>
0016 #include <linux/mm_types.h>
0017 #include <linux/page-flags.h>
0018 #include <linux/radix-tree.h>
0019 #include <linux/atomic.h>
0020 #include <asm/sections.h>
0021 #include <asm/bug.h>
0022 #include <asm/page.h>
0023 #include <asm/uv.h>
0024
0025 extern pgd_t swapper_pg_dir[];
0026 extern void paging_init(void);
0027 extern unsigned long s390_invalid_asce;
0028
0029 enum {
0030 PG_DIRECT_MAP_4K = 0,
0031 PG_DIRECT_MAP_1M,
0032 PG_DIRECT_MAP_2G,
0033 PG_DIRECT_MAP_MAX
0034 };
0035
0036 extern atomic_long_t direct_pages_count[PG_DIRECT_MAP_MAX];
0037
0038 static inline void update_page_count(int level, long count)
0039 {
0040 if (IS_ENABLED(CONFIG_PROC_FS))
0041 atomic_long_add(count, &direct_pages_count[level]);
0042 }
0043
0044 struct seq_file;
0045 void arch_report_meminfo(struct seq_file *m);
0046
0047
0048
0049
0050
0051 #define update_mmu_cache(vma, address, ptep) do { } while (0)
0052 #define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
0053
0054
0055
0056
0057
0058
0059 extern unsigned long empty_zero_page;
0060 extern unsigned long zero_page_mask;
0061
0062 #define ZERO_PAGE(vaddr) \
0063 (virt_to_page((void *)(empty_zero_page + \
0064 (((unsigned long)(vaddr)) &zero_page_mask))))
0065 #define __HAVE_COLOR_ZERO_PAGE
0066
0067
0068
0069 #define pte_ERROR(e) \
0070 pr_err("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
0071 #define pmd_ERROR(e) \
0072 pr_err("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
0073 #define pud_ERROR(e) \
0074 pr_err("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
0075 #define p4d_ERROR(e) \
0076 pr_err("%s:%d: bad p4d %016lx.\n", __FILE__, __LINE__, p4d_val(e))
0077 #define pgd_ERROR(e) \
0078 pr_err("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088 extern unsigned long __bootdata_preserved(VMALLOC_START);
0089 extern unsigned long __bootdata_preserved(VMALLOC_END);
0090 #define VMALLOC_DEFAULT_SIZE ((512UL << 30) - MODULES_LEN)
0091 extern struct page *__bootdata_preserved(vmemmap);
0092 extern unsigned long __bootdata_preserved(vmemmap_size);
0093
0094 #define VMEM_MAX_PHYS ((unsigned long) vmemmap)
0095
0096 extern unsigned long __bootdata_preserved(MODULES_VADDR);
0097 extern unsigned long __bootdata_preserved(MODULES_END);
0098 #define MODULES_VADDR MODULES_VADDR
0099 #define MODULES_END MODULES_END
0100 #define MODULES_LEN (1UL << 31)
0101
0102 static inline int is_module_addr(void *addr)
0103 {
0104 BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
0105 if (addr < (void *)MODULES_VADDR)
0106 return 0;
0107 if (addr > (void *)MODULES_END)
0108 return 0;
0109 return 1;
0110 }
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164 #define _PAGE_NOEXEC 0x100
0165 #define _PAGE_PROTECT 0x200
0166 #define _PAGE_INVALID 0x400
0167 #define _PAGE_LARGE 0x800
0168
0169
0170 #define _PAGE_PRESENT 0x001
0171 #define _PAGE_YOUNG 0x004
0172 #define _PAGE_DIRTY 0x008
0173 #define _PAGE_READ 0x010
0174 #define _PAGE_WRITE 0x020
0175 #define _PAGE_SPECIAL 0x040
0176 #define _PAGE_UNUSED 0x080
0177
0178 #ifdef CONFIG_MEM_SOFT_DIRTY
0179 #define _PAGE_SOFT_DIRTY 0x002
0180 #else
0181 #define _PAGE_SOFT_DIRTY 0x000
0182 #endif
0183
0184 #define _PAGE_SWP_EXCLUSIVE _PAGE_LARGE
0185
0186
0187 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \
0188 _PAGE_YOUNG | _PAGE_SOFT_DIRTY)
0189
0190
0191
0192
0193
0194
0195
0196
0197
0198
0199
0200
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210
0211
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221
0222
0223
0224
0225
0226
0227
0228 #define _ASCE_ORIGIN ~0xfffUL
0229 #define _ASCE_PRIVATE_SPACE 0x100
0230 #define _ASCE_ALT_EVENT 0x80
0231 #define _ASCE_SPACE_SWITCH 0x40
0232 #define _ASCE_REAL_SPACE 0x20
0233 #define _ASCE_TYPE_MASK 0x0c
0234 #define _ASCE_TYPE_REGION1 0x0c
0235 #define _ASCE_TYPE_REGION2 0x08
0236 #define _ASCE_TYPE_REGION3 0x04
0237 #define _ASCE_TYPE_SEGMENT 0x00
0238 #define _ASCE_TABLE_LENGTH 0x03
0239
0240
0241 #define _REGION_ENTRY_ORIGIN ~0xfffUL
0242 #define _REGION_ENTRY_PROTECT 0x200
0243 #define _REGION_ENTRY_NOEXEC 0x100
0244 #define _REGION_ENTRY_OFFSET 0xc0
0245 #define _REGION_ENTRY_INVALID 0x20
0246 #define _REGION_ENTRY_TYPE_MASK 0x0c
0247 #define _REGION_ENTRY_TYPE_R1 0x0c
0248 #define _REGION_ENTRY_TYPE_R2 0x08
0249 #define _REGION_ENTRY_TYPE_R3 0x04
0250 #define _REGION_ENTRY_LENGTH 0x03
0251
0252 #define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
0253 #define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID)
0254 #define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
0255 #define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID)
0256 #define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
0257 #define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
0258
0259 #define _REGION3_ENTRY_ORIGIN_LARGE ~0x7fffffffUL
0260 #define _REGION3_ENTRY_DIRTY 0x2000
0261 #define _REGION3_ENTRY_YOUNG 0x1000
0262 #define _REGION3_ENTRY_LARGE 0x0400
0263 #define _REGION3_ENTRY_READ 0x0002
0264 #define _REGION3_ENTRY_WRITE 0x0001
0265
0266 #ifdef CONFIG_MEM_SOFT_DIRTY
0267 #define _REGION3_ENTRY_SOFT_DIRTY 0x4000
0268 #else
0269 #define _REGION3_ENTRY_SOFT_DIRTY 0x0000
0270 #endif
0271
0272 #define _REGION_ENTRY_BITS 0xfffffffffffff22fUL
0273
0274
0275 #define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL
0276 #define _SEGMENT_ENTRY_HARDWARE_BITS 0xfffffffffffffe30UL
0277 #define _SEGMENT_ENTRY_HARDWARE_BITS_LARGE 0xfffffffffff00730UL
0278 #define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL
0279 #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL
0280 #define _SEGMENT_ENTRY_PROTECT 0x200
0281 #define _SEGMENT_ENTRY_NOEXEC 0x100
0282 #define _SEGMENT_ENTRY_INVALID 0x20
0283 #define _SEGMENT_ENTRY_TYPE_MASK 0x0c
0284
0285 #define _SEGMENT_ENTRY (0)
0286 #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
0287
0288 #define _SEGMENT_ENTRY_DIRTY 0x2000
0289 #define _SEGMENT_ENTRY_YOUNG 0x1000
0290 #define _SEGMENT_ENTRY_LARGE 0x0400
0291 #define _SEGMENT_ENTRY_WRITE 0x0002
0292 #define _SEGMENT_ENTRY_READ 0x0001
0293
0294 #ifdef CONFIG_MEM_SOFT_DIRTY
0295 #define _SEGMENT_ENTRY_SOFT_DIRTY 0x4000
0296 #else
0297 #define _SEGMENT_ENTRY_SOFT_DIRTY 0x0000
0298 #endif
0299
0300 #define _CRST_ENTRIES 2048
0301 #define _PAGE_ENTRIES 256
0302
0303 #define _CRST_TABLE_SIZE (_CRST_ENTRIES * 8)
0304 #define _PAGE_TABLE_SIZE (_PAGE_ENTRIES * 8)
0305
0306 #define _REGION1_SHIFT 53
0307 #define _REGION2_SHIFT 42
0308 #define _REGION3_SHIFT 31
0309 #define _SEGMENT_SHIFT 20
0310
0311 #define _REGION1_INDEX (0x7ffUL << _REGION1_SHIFT)
0312 #define _REGION2_INDEX (0x7ffUL << _REGION2_SHIFT)
0313 #define _REGION3_INDEX (0x7ffUL << _REGION3_SHIFT)
0314 #define _SEGMENT_INDEX (0x7ffUL << _SEGMENT_SHIFT)
0315 #define _PAGE_INDEX (0xffUL << _PAGE_SHIFT)
0316
0317 #define _REGION1_SIZE (1UL << _REGION1_SHIFT)
0318 #define _REGION2_SIZE (1UL << _REGION2_SHIFT)
0319 #define _REGION3_SIZE (1UL << _REGION3_SHIFT)
0320 #define _SEGMENT_SIZE (1UL << _SEGMENT_SHIFT)
0321
0322 #define _REGION1_MASK (~(_REGION1_SIZE - 1))
0323 #define _REGION2_MASK (~(_REGION2_SIZE - 1))
0324 #define _REGION3_MASK (~(_REGION3_SIZE - 1))
0325 #define _SEGMENT_MASK (~(_SEGMENT_SIZE - 1))
0326
0327 #define PMD_SHIFT _SEGMENT_SHIFT
0328 #define PUD_SHIFT _REGION3_SHIFT
0329 #define P4D_SHIFT _REGION2_SHIFT
0330 #define PGDIR_SHIFT _REGION1_SHIFT
0331
0332 #define PMD_SIZE _SEGMENT_SIZE
0333 #define PUD_SIZE _REGION3_SIZE
0334 #define P4D_SIZE _REGION2_SIZE
0335 #define PGDIR_SIZE _REGION1_SIZE
0336
0337 #define PMD_MASK _SEGMENT_MASK
0338 #define PUD_MASK _REGION3_MASK
0339 #define P4D_MASK _REGION2_MASK
0340 #define PGDIR_MASK _REGION1_MASK
0341
0342 #define PTRS_PER_PTE _PAGE_ENTRIES
0343 #define PTRS_PER_PMD _CRST_ENTRIES
0344 #define PTRS_PER_PUD _CRST_ENTRIES
0345 #define PTRS_PER_P4D _CRST_ENTRIES
0346 #define PTRS_PER_PGD _CRST_ENTRIES
0347
0348
0349
0350
0351
0352
0353
0354
0355
0356
0357
0358
0359
0360
0361
0362
0363
0364
0365
0366
0367
0368
0369
0370
0371 #define PGSTE_ACC_BITS 0xf000000000000000UL
0372 #define PGSTE_FP_BIT 0x0800000000000000UL
0373 #define PGSTE_PCL_BIT 0x0080000000000000UL
0374 #define PGSTE_HR_BIT 0x0040000000000000UL
0375 #define PGSTE_HC_BIT 0x0020000000000000UL
0376 #define PGSTE_GR_BIT 0x0004000000000000UL
0377 #define PGSTE_GC_BIT 0x0002000000000000UL
0378 #define PGSTE_UC_BIT 0x0000800000000000UL
0379 #define PGSTE_IN_BIT 0x0000400000000000UL
0380 #define PGSTE_VSIE_BIT 0x0000200000000000UL
0381
0382
0383 #define _PGSTE_GPS_ZERO 0x0000000080000000UL
0384 #define _PGSTE_GPS_NODAT 0x0000000040000000UL
0385 #define _PGSTE_GPS_USAGE_MASK 0x0000000003000000UL
0386 #define _PGSTE_GPS_USAGE_STABLE 0x0000000000000000UL
0387 #define _PGSTE_GPS_USAGE_UNUSED 0x0000000001000000UL
0388 #define _PGSTE_GPS_USAGE_POT_VOLATILE 0x0000000002000000UL
0389 #define _PGSTE_GPS_USAGE_VOLATILE _PGSTE_GPS_USAGE_MASK
0390
0391
0392
0393
0394
0395
0396 #define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
0397 _ASCE_ALT_EVENT)
0398
0399
0400
0401
0402 #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID | _PAGE_PROTECT)
0403 #define PAGE_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | \
0404 _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT)
0405 #define PAGE_RX __pgprot(_PAGE_PRESENT | _PAGE_READ | \
0406 _PAGE_INVALID | _PAGE_PROTECT)
0407 #define PAGE_RW __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
0408 _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT)
0409 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
0410 _PAGE_INVALID | _PAGE_PROTECT)
0411
0412 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
0413 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
0414 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
0415 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
0416 #define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \
0417 _PAGE_PROTECT | _PAGE_NOEXEC)
0418 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
0419 _PAGE_YOUNG | _PAGE_DIRTY)
0420
0421
0422
0423
0424
0425
0426
0427
0428
0429
0430
0431 #define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \
0432 _SEGMENT_ENTRY_PROTECT)
0433 #define SEGMENT_RO __pgprot(_SEGMENT_ENTRY_PROTECT | \
0434 _SEGMENT_ENTRY_READ | \
0435 _SEGMENT_ENTRY_NOEXEC)
0436 #define SEGMENT_RX __pgprot(_SEGMENT_ENTRY_PROTECT | \
0437 _SEGMENT_ENTRY_READ)
0438 #define SEGMENT_RW __pgprot(_SEGMENT_ENTRY_READ | \
0439 _SEGMENT_ENTRY_WRITE | \
0440 _SEGMENT_ENTRY_NOEXEC)
0441 #define SEGMENT_RWX __pgprot(_SEGMENT_ENTRY_READ | \
0442 _SEGMENT_ENTRY_WRITE)
0443 #define SEGMENT_KERNEL __pgprot(_SEGMENT_ENTRY | \
0444 _SEGMENT_ENTRY_LARGE | \
0445 _SEGMENT_ENTRY_READ | \
0446 _SEGMENT_ENTRY_WRITE | \
0447 _SEGMENT_ENTRY_YOUNG | \
0448 _SEGMENT_ENTRY_DIRTY | \
0449 _SEGMENT_ENTRY_NOEXEC)
0450 #define SEGMENT_KERNEL_RO __pgprot(_SEGMENT_ENTRY | \
0451 _SEGMENT_ENTRY_LARGE | \
0452 _SEGMENT_ENTRY_READ | \
0453 _SEGMENT_ENTRY_YOUNG | \
0454 _SEGMENT_ENTRY_PROTECT | \
0455 _SEGMENT_ENTRY_NOEXEC)
0456 #define SEGMENT_KERNEL_EXEC __pgprot(_SEGMENT_ENTRY | \
0457 _SEGMENT_ENTRY_LARGE | \
0458 _SEGMENT_ENTRY_READ | \
0459 _SEGMENT_ENTRY_WRITE | \
0460 _SEGMENT_ENTRY_YOUNG | \
0461 _SEGMENT_ENTRY_DIRTY)
0462
0463
0464
0465
0466
0467 #define REGION3_KERNEL __pgprot(_REGION_ENTRY_TYPE_R3 | \
0468 _REGION3_ENTRY_LARGE | \
0469 _REGION3_ENTRY_READ | \
0470 _REGION3_ENTRY_WRITE | \
0471 _REGION3_ENTRY_YOUNG | \
0472 _REGION3_ENTRY_DIRTY | \
0473 _REGION_ENTRY_NOEXEC)
0474 #define REGION3_KERNEL_RO __pgprot(_REGION_ENTRY_TYPE_R3 | \
0475 _REGION3_ENTRY_LARGE | \
0476 _REGION3_ENTRY_READ | \
0477 _REGION3_ENTRY_YOUNG | \
0478 _REGION_ENTRY_PROTECT | \
0479 _REGION_ENTRY_NOEXEC)
0480
0481 static inline bool mm_p4d_folded(struct mm_struct *mm)
0482 {
0483 return mm->context.asce_limit <= _REGION1_SIZE;
0484 }
0485 #define mm_p4d_folded(mm) mm_p4d_folded(mm)
0486
0487 static inline bool mm_pud_folded(struct mm_struct *mm)
0488 {
0489 return mm->context.asce_limit <= _REGION2_SIZE;
0490 }
0491 #define mm_pud_folded(mm) mm_pud_folded(mm)
0492
0493 static inline bool mm_pmd_folded(struct mm_struct *mm)
0494 {
0495 return mm->context.asce_limit <= _REGION3_SIZE;
0496 }
0497 #define mm_pmd_folded(mm) mm_pmd_folded(mm)
0498
0499 static inline int mm_has_pgste(struct mm_struct *mm)
0500 {
0501 #ifdef CONFIG_PGSTE
0502 if (unlikely(mm->context.has_pgste))
0503 return 1;
0504 #endif
0505 return 0;
0506 }
0507
0508 static inline int mm_is_protected(struct mm_struct *mm)
0509 {
0510 #ifdef CONFIG_PGSTE
0511 if (unlikely(atomic_read(&mm->context.protected_count)))
0512 return 1;
0513 #endif
0514 return 0;
0515 }
0516
0517 static inline int mm_alloc_pgste(struct mm_struct *mm)
0518 {
0519 #ifdef CONFIG_PGSTE
0520 if (unlikely(mm->context.alloc_pgste))
0521 return 1;
0522 #endif
0523 return 0;
0524 }
0525
0526 static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
0527 {
0528 return __pte(pte_val(pte) & ~pgprot_val(prot));
0529 }
0530
0531 static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
0532 {
0533 return __pte(pte_val(pte) | pgprot_val(prot));
0534 }
0535
0536 static inline pmd_t clear_pmd_bit(pmd_t pmd, pgprot_t prot)
0537 {
0538 return __pmd(pmd_val(pmd) & ~pgprot_val(prot));
0539 }
0540
0541 static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot)
0542 {
0543 return __pmd(pmd_val(pmd) | pgprot_val(prot));
0544 }
0545
0546 static inline pud_t clear_pud_bit(pud_t pud, pgprot_t prot)
0547 {
0548 return __pud(pud_val(pud) & ~pgprot_val(prot));
0549 }
0550
0551 static inline pud_t set_pud_bit(pud_t pud, pgprot_t prot)
0552 {
0553 return __pud(pud_val(pud) | pgprot_val(prot));
0554 }
0555
0556
0557
0558
0559
0560 #define mm_forbids_zeropage mm_has_pgste
0561 static inline int mm_uses_skeys(struct mm_struct *mm)
0562 {
0563 #ifdef CONFIG_PGSTE
0564 if (mm->context.uses_skeys)
0565 return 1;
0566 #endif
0567 return 0;
0568 }
0569
0570 static inline void csp(unsigned int *ptr, unsigned int old, unsigned int new)
0571 {
0572 union register_pair r1 = { .even = old, .odd = new, };
0573 unsigned long address = (unsigned long)ptr | 1;
0574
0575 asm volatile(
0576 " csp %[r1],%[address]"
0577 : [r1] "+&d" (r1.pair), "+m" (*ptr)
0578 : [address] "d" (address)
0579 : "cc");
0580 }
0581
0582 static inline void cspg(unsigned long *ptr, unsigned long old, unsigned long new)
0583 {
0584 union register_pair r1 = { .even = old, .odd = new, };
0585 unsigned long address = (unsigned long)ptr | 1;
0586
0587 asm volatile(
0588 " cspg %[r1],%[address]"
0589 : [r1] "+&d" (r1.pair), "+m" (*ptr)
0590 : [address] "d" (address)
0591 : "cc");
0592 }
0593
0594 #define CRDTE_DTT_PAGE 0x00UL
0595 #define CRDTE_DTT_SEGMENT 0x10UL
0596 #define CRDTE_DTT_REGION3 0x14UL
0597 #define CRDTE_DTT_REGION2 0x18UL
0598 #define CRDTE_DTT_REGION1 0x1cUL
0599
0600 static inline void crdte(unsigned long old, unsigned long new,
0601 unsigned long *table, unsigned long dtt,
0602 unsigned long address, unsigned long asce)
0603 {
0604 union register_pair r1 = { .even = old, .odd = new, };
0605 union register_pair r2 = { .even = __pa(table) | dtt, .odd = address, };
0606
0607 asm volatile(".insn rrf,0xb98f0000,%[r1],%[r2],%[asce],0"
0608 : [r1] "+&d" (r1.pair)
0609 : [r2] "d" (r2.pair), [asce] "a" (asce)
0610 : "memory", "cc");
0611 }
0612
0613
0614
0615
0616 static inline int pgd_folded(pgd_t pgd)
0617 {
0618 return (pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1;
0619 }
0620
0621 static inline int pgd_present(pgd_t pgd)
0622 {
0623 if (pgd_folded(pgd))
0624 return 1;
0625 return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
0626 }
0627
0628 static inline int pgd_none(pgd_t pgd)
0629 {
0630 if (pgd_folded(pgd))
0631 return 0;
0632 return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL;
0633 }
0634
0635 static inline int pgd_bad(pgd_t pgd)
0636 {
0637 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1)
0638 return 0;
0639 return (pgd_val(pgd) & ~_REGION_ENTRY_BITS) != 0;
0640 }
0641
0642 static inline unsigned long pgd_pfn(pgd_t pgd)
0643 {
0644 unsigned long origin_mask;
0645
0646 origin_mask = _REGION_ENTRY_ORIGIN;
0647 return (pgd_val(pgd) & origin_mask) >> PAGE_SHIFT;
0648 }
0649
0650 static inline int p4d_folded(p4d_t p4d)
0651 {
0652 return (p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2;
0653 }
0654
0655 static inline int p4d_present(p4d_t p4d)
0656 {
0657 if (p4d_folded(p4d))
0658 return 1;
0659 return (p4d_val(p4d) & _REGION_ENTRY_ORIGIN) != 0UL;
0660 }
0661
0662 static inline int p4d_none(p4d_t p4d)
0663 {
0664 if (p4d_folded(p4d))
0665 return 0;
0666 return p4d_val(p4d) == _REGION2_ENTRY_EMPTY;
0667 }
0668
0669 static inline unsigned long p4d_pfn(p4d_t p4d)
0670 {
0671 unsigned long origin_mask;
0672
0673 origin_mask = _REGION_ENTRY_ORIGIN;
0674 return (p4d_val(p4d) & origin_mask) >> PAGE_SHIFT;
0675 }
0676
0677 static inline int pud_folded(pud_t pud)
0678 {
0679 return (pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3;
0680 }
0681
0682 static inline int pud_present(pud_t pud)
0683 {
0684 if (pud_folded(pud))
0685 return 1;
0686 return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
0687 }
0688
0689 static inline int pud_none(pud_t pud)
0690 {
0691 if (pud_folded(pud))
0692 return 0;
0693 return pud_val(pud) == _REGION3_ENTRY_EMPTY;
0694 }
0695
0696 #define pud_leaf pud_large
0697 static inline int pud_large(pud_t pud)
0698 {
0699 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
0700 return 0;
0701 return !!(pud_val(pud) & _REGION3_ENTRY_LARGE);
0702 }
0703
0704 #define pmd_leaf pmd_large
0705 static inline int pmd_large(pmd_t pmd)
0706 {
0707 return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
0708 }
0709
0710 static inline int pmd_bad(pmd_t pmd)
0711 {
0712 if ((pmd_val(pmd) & _SEGMENT_ENTRY_TYPE_MASK) > 0 || pmd_large(pmd))
0713 return 1;
0714 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
0715 }
0716
0717 static inline int pud_bad(pud_t pud)
0718 {
0719 unsigned long type = pud_val(pud) & _REGION_ENTRY_TYPE_MASK;
0720
0721 if (type > _REGION_ENTRY_TYPE_R3 || pud_large(pud))
0722 return 1;
0723 if (type < _REGION_ENTRY_TYPE_R3)
0724 return 0;
0725 return (pud_val(pud) & ~_REGION_ENTRY_BITS) != 0;
0726 }
0727
0728 static inline int p4d_bad(p4d_t p4d)
0729 {
0730 unsigned long type = p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK;
0731
0732 if (type > _REGION_ENTRY_TYPE_R2)
0733 return 1;
0734 if (type < _REGION_ENTRY_TYPE_R2)
0735 return 0;
0736 return (p4d_val(p4d) & ~_REGION_ENTRY_BITS) != 0;
0737 }
0738
0739 static inline int pmd_present(pmd_t pmd)
0740 {
0741 return pmd_val(pmd) != _SEGMENT_ENTRY_EMPTY;
0742 }
0743
0744 static inline int pmd_none(pmd_t pmd)
0745 {
0746 return pmd_val(pmd) == _SEGMENT_ENTRY_EMPTY;
0747 }
0748
0749 #define pmd_write pmd_write
0750 static inline int pmd_write(pmd_t pmd)
0751 {
0752 return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0;
0753 }
0754
0755 #define pud_write pud_write
0756 static inline int pud_write(pud_t pud)
0757 {
0758 return (pud_val(pud) & _REGION3_ENTRY_WRITE) != 0;
0759 }
0760
0761 static inline int pmd_dirty(pmd_t pmd)
0762 {
0763 return (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
0764 }
0765
0766 static inline int pmd_young(pmd_t pmd)
0767 {
0768 return (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
0769 }
0770
0771 static inline int pte_present(pte_t pte)
0772 {
0773
0774 return (pte_val(pte) & _PAGE_PRESENT) != 0;
0775 }
0776
0777 static inline int pte_none(pte_t pte)
0778 {
0779
0780 return pte_val(pte) == _PAGE_INVALID;
0781 }
0782
0783 static inline int pte_swap(pte_t pte)
0784 {
0785
0786 return (pte_val(pte) & (_PAGE_PROTECT | _PAGE_PRESENT))
0787 == _PAGE_PROTECT;
0788 }
0789
0790 static inline int pte_special(pte_t pte)
0791 {
0792 return (pte_val(pte) & _PAGE_SPECIAL);
0793 }
0794
0795 #define __HAVE_ARCH_PTE_SAME
0796 static inline int pte_same(pte_t a, pte_t b)
0797 {
0798 return pte_val(a) == pte_val(b);
0799 }
0800
0801 #ifdef CONFIG_NUMA_BALANCING
0802 static inline int pte_protnone(pte_t pte)
0803 {
0804 return pte_present(pte) && !(pte_val(pte) & _PAGE_READ);
0805 }
0806
0807 static inline int pmd_protnone(pmd_t pmd)
0808 {
0809
0810 return pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ);
0811 }
0812 #endif
0813
0814 #define __HAVE_ARCH_PTE_SWP_EXCLUSIVE
0815 static inline int pte_swp_exclusive(pte_t pte)
0816 {
0817 return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
0818 }
0819
0820 static inline pte_t pte_swp_mkexclusive(pte_t pte)
0821 {
0822 return set_pte_bit(pte, __pgprot(_PAGE_SWP_EXCLUSIVE));
0823 }
0824
0825 static inline pte_t pte_swp_clear_exclusive(pte_t pte)
0826 {
0827 return clear_pte_bit(pte, __pgprot(_PAGE_SWP_EXCLUSIVE));
0828 }
0829
0830 static inline int pte_soft_dirty(pte_t pte)
0831 {
0832 return pte_val(pte) & _PAGE_SOFT_DIRTY;
0833 }
0834 #define pte_swp_soft_dirty pte_soft_dirty
0835
0836 static inline pte_t pte_mksoft_dirty(pte_t pte)
0837 {
0838 return set_pte_bit(pte, __pgprot(_PAGE_SOFT_DIRTY));
0839 }
0840 #define pte_swp_mksoft_dirty pte_mksoft_dirty
0841
0842 static inline pte_t pte_clear_soft_dirty(pte_t pte)
0843 {
0844 return clear_pte_bit(pte, __pgprot(_PAGE_SOFT_DIRTY));
0845 }
0846 #define pte_swp_clear_soft_dirty pte_clear_soft_dirty
0847
0848 static inline int pmd_soft_dirty(pmd_t pmd)
0849 {
0850 return pmd_val(pmd) & _SEGMENT_ENTRY_SOFT_DIRTY;
0851 }
0852
0853 static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
0854 {
0855 return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_SOFT_DIRTY));
0856 }
0857
0858 static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
0859 {
0860 return clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_SOFT_DIRTY));
0861 }
0862
0863
0864
0865
0866
0867 static inline int pte_write(pte_t pte)
0868 {
0869 return (pte_val(pte) & _PAGE_WRITE) != 0;
0870 }
0871
0872 static inline int pte_dirty(pte_t pte)
0873 {
0874 return (pte_val(pte) & _PAGE_DIRTY) != 0;
0875 }
0876
0877 static inline int pte_young(pte_t pte)
0878 {
0879 return (pte_val(pte) & _PAGE_YOUNG) != 0;
0880 }
0881
0882 #define __HAVE_ARCH_PTE_UNUSED
0883 static inline int pte_unused(pte_t pte)
0884 {
0885 return pte_val(pte) & _PAGE_UNUSED;
0886 }
0887
0888
0889
0890
0891
0892
0893
0894 static inline pgprot_t pte_pgprot(pte_t pte)
0895 {
0896 unsigned long pte_flags = pte_val(pte) & _PAGE_CHG_MASK;
0897
0898 if (pte_write(pte))
0899 pte_flags |= pgprot_val(PAGE_KERNEL);
0900 else
0901 pte_flags |= pgprot_val(PAGE_KERNEL_RO);
0902 pte_flags |= pte_val(pte) & mio_wb_bit_mask;
0903
0904 return __pgprot(pte_flags);
0905 }
0906
0907
0908
0909
0910
0911 static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
0912 {
0913 WRITE_ONCE(*pgdp, pgd);
0914 }
0915
0916 static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
0917 {
0918 WRITE_ONCE(*p4dp, p4d);
0919 }
0920
0921 static inline void set_pud(pud_t *pudp, pud_t pud)
0922 {
0923 WRITE_ONCE(*pudp, pud);
0924 }
0925
0926 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
0927 {
0928 WRITE_ONCE(*pmdp, pmd);
0929 }
0930
0931 static inline void set_pte(pte_t *ptep, pte_t pte)
0932 {
0933 WRITE_ONCE(*ptep, pte);
0934 }
0935
0936 static inline void pgd_clear(pgd_t *pgd)
0937 {
0938 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1)
0939 set_pgd(pgd, __pgd(_REGION1_ENTRY_EMPTY));
0940 }
0941
0942 static inline void p4d_clear(p4d_t *p4d)
0943 {
0944 if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
0945 set_p4d(p4d, __p4d(_REGION2_ENTRY_EMPTY));
0946 }
0947
0948 static inline void pud_clear(pud_t *pud)
0949 {
0950 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
0951 set_pud(pud, __pud(_REGION3_ENTRY_EMPTY));
0952 }
0953
0954 static inline void pmd_clear(pmd_t *pmdp)
0955 {
0956 set_pmd(pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
0957 }
0958
0959 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
0960 {
0961 set_pte(ptep, __pte(_PAGE_INVALID));
0962 }
0963
0964
0965
0966
0967
0968 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
0969 {
0970 pte = clear_pte_bit(pte, __pgprot(~_PAGE_CHG_MASK));
0971 pte = set_pte_bit(pte, newprot);
0972
0973
0974
0975
0976 if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ))
0977 pte = clear_pte_bit(pte, __pgprot(_PAGE_INVALID));
0978
0979
0980
0981
0982 if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE))
0983 pte = clear_pte_bit(pte, __pgprot(_PAGE_PROTECT));
0984 return pte;
0985 }
0986
0987 static inline pte_t pte_wrprotect(pte_t pte)
0988 {
0989 pte = clear_pte_bit(pte, __pgprot(_PAGE_WRITE));
0990 return set_pte_bit(pte, __pgprot(_PAGE_PROTECT));
0991 }
0992
0993 static inline pte_t pte_mkwrite(pte_t pte)
0994 {
0995 pte = set_pte_bit(pte, __pgprot(_PAGE_WRITE));
0996 if (pte_val(pte) & _PAGE_DIRTY)
0997 pte = clear_pte_bit(pte, __pgprot(_PAGE_PROTECT));
0998 return pte;
0999 }
1000
1001 static inline pte_t pte_mkclean(pte_t pte)
1002 {
1003 pte = clear_pte_bit(pte, __pgprot(_PAGE_DIRTY));
1004 return set_pte_bit(pte, __pgprot(_PAGE_PROTECT));
1005 }
1006
1007 static inline pte_t pte_mkdirty(pte_t pte)
1008 {
1009 pte = set_pte_bit(pte, __pgprot(_PAGE_DIRTY | _PAGE_SOFT_DIRTY));
1010 if (pte_val(pte) & _PAGE_WRITE)
1011 pte = clear_pte_bit(pte, __pgprot(_PAGE_PROTECT));
1012 return pte;
1013 }
1014
1015 static inline pte_t pte_mkold(pte_t pte)
1016 {
1017 pte = clear_pte_bit(pte, __pgprot(_PAGE_YOUNG));
1018 return set_pte_bit(pte, __pgprot(_PAGE_INVALID));
1019 }
1020
1021 static inline pte_t pte_mkyoung(pte_t pte)
1022 {
1023 pte = set_pte_bit(pte, __pgprot(_PAGE_YOUNG));
1024 if (pte_val(pte) & _PAGE_READ)
1025 pte = clear_pte_bit(pte, __pgprot(_PAGE_INVALID));
1026 return pte;
1027 }
1028
1029 static inline pte_t pte_mkspecial(pte_t pte)
1030 {
1031 return set_pte_bit(pte, __pgprot(_PAGE_SPECIAL));
1032 }
1033
1034 #ifdef CONFIG_HUGETLB_PAGE
1035 static inline pte_t pte_mkhuge(pte_t pte)
1036 {
1037 return set_pte_bit(pte, __pgprot(_PAGE_LARGE));
1038 }
1039 #endif
1040
1041 #define IPTE_GLOBAL 0
1042 #define IPTE_LOCAL 1
1043
1044 #define IPTE_NODAT 0x400
1045 #define IPTE_GUEST_ASCE 0x800
1046
1047 static __always_inline void __ptep_ipte(unsigned long address, pte_t *ptep,
1048 unsigned long opt, unsigned long asce,
1049 int local)
1050 {
1051 unsigned long pto = __pa(ptep);
1052
1053 if (__builtin_constant_p(opt) && opt == 0) {
1054
1055 asm volatile(
1056 " ipte %[r1],%[r2],0,%[m4]"
1057 : "+m" (*ptep) : [r1] "a" (pto), [r2] "a" (address),
1058 [m4] "i" (local));
1059 return;
1060 }
1061
1062
1063 opt = opt | (asce & _ASCE_ORIGIN);
1064 asm volatile(
1065 " ipte %[r1],%[r2],%[r3],%[m4]"
1066 : [r2] "+a" (address), [r3] "+a" (opt)
1067 : [r1] "a" (pto), [m4] "i" (local) : "memory");
1068 }
1069
1070 static __always_inline void __ptep_ipte_range(unsigned long address, int nr,
1071 pte_t *ptep, int local)
1072 {
1073 unsigned long pto = __pa(ptep);
1074
1075
1076 do {
1077 asm volatile(
1078 " ipte %[r1],%[r2],%[r3],%[m4]"
1079 : [r2] "+a" (address), [r3] "+a" (nr)
1080 : [r1] "a" (pto), [m4] "i" (local) : "memory");
1081 } while (nr != 255);
1082 }
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097 pte_t ptep_xchg_direct(struct mm_struct *, unsigned long, pte_t *, pte_t);
1098 pte_t ptep_xchg_lazy(struct mm_struct *, unsigned long, pte_t *, pte_t);
1099
1100 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1101 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
1102 unsigned long addr, pte_t *ptep)
1103 {
1104 pte_t pte = *ptep;
1105
1106 pte = ptep_xchg_direct(vma->vm_mm, addr, ptep, pte_mkold(pte));
1107 return pte_young(pte);
1108 }
1109
1110 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
1111 static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
1112 unsigned long address, pte_t *ptep)
1113 {
1114 return ptep_test_and_clear_young(vma, address, ptep);
1115 }
1116
1117 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
1118 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
1119 unsigned long addr, pte_t *ptep)
1120 {
1121 pte_t res;
1122
1123 res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
1124
1125 if (mm_is_protected(mm) && pte_present(res))
1126 uv_convert_owned_from_secure(pte_val(res) & PAGE_MASK);
1127 return res;
1128 }
1129
1130 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1131 pte_t ptep_modify_prot_start(struct vm_area_struct *, unsigned long, pte_t *);
1132 void ptep_modify_prot_commit(struct vm_area_struct *, unsigned long,
1133 pte_t *, pte_t, pte_t);
1134
1135 #define __HAVE_ARCH_PTEP_CLEAR_FLUSH
1136 static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
1137 unsigned long addr, pte_t *ptep)
1138 {
1139 pte_t res;
1140
1141 res = ptep_xchg_direct(vma->vm_mm, addr, ptep, __pte(_PAGE_INVALID));
1142
1143 if (mm_is_protected(vma->vm_mm) && pte_present(res))
1144 uv_convert_owned_from_secure(pte_val(res) & PAGE_MASK);
1145 return res;
1146 }
1147
1148
1149
1150
1151
1152
1153
1154
1155 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
1156 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
1157 unsigned long addr,
1158 pte_t *ptep, int full)
1159 {
1160 pte_t res;
1161
1162 if (full) {
1163 res = *ptep;
1164 set_pte(ptep, __pte(_PAGE_INVALID));
1165 } else {
1166 res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
1167 }
1168
1169 if (!mm_is_protected(mm) || !pte_present(res))
1170 return res;
1171
1172
1173
1174
1175
1176 if (full && !uv_destroy_owned_page(pte_val(res) & PAGE_MASK))
1177 return res;
1178
1179
1180
1181
1182
1183 uv_convert_owned_from_secure(pte_val(res) & PAGE_MASK);
1184 return res;
1185 }
1186
1187 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
1188 static inline void ptep_set_wrprotect(struct mm_struct *mm,
1189 unsigned long addr, pte_t *ptep)
1190 {
1191 pte_t pte = *ptep;
1192
1193 if (pte_write(pte))
1194 ptep_xchg_lazy(mm, addr, ptep, pte_wrprotect(pte));
1195 }
1196
1197 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1198 static inline int ptep_set_access_flags(struct vm_area_struct *vma,
1199 unsigned long addr, pte_t *ptep,
1200 pte_t entry, int dirty)
1201 {
1202 if (pte_same(*ptep, entry))
1203 return 0;
1204 ptep_xchg_direct(vma->vm_mm, addr, ptep, entry);
1205 return 1;
1206 }
1207
1208
1209
1210
1211 void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr,
1212 pte_t *ptep, pte_t entry);
1213 void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
1214 void ptep_notify(struct mm_struct *mm, unsigned long addr,
1215 pte_t *ptep, unsigned long bits);
1216 int ptep_force_prot(struct mm_struct *mm, unsigned long gaddr,
1217 pte_t *ptep, int prot, unsigned long bit);
1218 void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
1219 pte_t *ptep , int reset);
1220 void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
1221 int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
1222 pte_t *sptep, pte_t *tptep, pte_t pte);
1223 void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep);
1224
1225 bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long address,
1226 pte_t *ptep);
1227 int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1228 unsigned char key, bool nq);
1229 int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1230 unsigned char key, unsigned char *oldkey,
1231 bool nq, bool mr, bool mc);
1232 int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr);
1233 int get_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1234 unsigned char *key);
1235
1236 int set_pgste_bits(struct mm_struct *mm, unsigned long addr,
1237 unsigned long bits, unsigned long value);
1238 int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep);
1239 int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc,
1240 unsigned long *oldpte, unsigned long *oldpgste);
1241 void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr);
1242 void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr);
1243 void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr);
1244 void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr);
1245
1246 #define pgprot_writecombine pgprot_writecombine
1247 pgprot_t pgprot_writecombine(pgprot_t prot);
1248
1249 #define pgprot_writethrough pgprot_writethrough
1250 pgprot_t pgprot_writethrough(pgprot_t prot);
1251
1252
1253
1254
1255
1256
1257 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
1258 pte_t *ptep, pte_t entry)
1259 {
1260 if (pte_present(entry))
1261 entry = clear_pte_bit(entry, __pgprot(_PAGE_UNUSED));
1262 if (mm_has_pgste(mm))
1263 ptep_set_pte_at(mm, addr, ptep, entry);
1264 else
1265 set_pte(ptep, entry);
1266 }
1267
1268
1269
1270
1271
1272 static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
1273 {
1274 pte_t __pte;
1275
1276 __pte = __pte(physpage | pgprot_val(pgprot));
1277 if (!MACHINE_HAS_NX)
1278 __pte = clear_pte_bit(__pte, __pgprot(_PAGE_NOEXEC));
1279 return pte_mkyoung(__pte);
1280 }
1281
1282 static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
1283 {
1284 unsigned long physpage = page_to_phys(page);
1285 pte_t __pte = mk_pte_phys(physpage, pgprot);
1286
1287 if (pte_write(__pte) && PageDirty(page))
1288 __pte = pte_mkdirty(__pte);
1289 return __pte;
1290 }
1291
1292 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
1293 #define p4d_index(address) (((address) >> P4D_SHIFT) & (PTRS_PER_P4D-1))
1294 #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
1295 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
1296
1297 #define p4d_deref(pud) ((unsigned long)__va(p4d_val(pud) & _REGION_ENTRY_ORIGIN))
1298 #define pgd_deref(pgd) ((unsigned long)__va(pgd_val(pgd) & _REGION_ENTRY_ORIGIN))
1299
1300 static inline unsigned long pmd_deref(pmd_t pmd)
1301 {
1302 unsigned long origin_mask;
1303
1304 origin_mask = _SEGMENT_ENTRY_ORIGIN;
1305 if (pmd_large(pmd))
1306 origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
1307 return (unsigned long)__va(pmd_val(pmd) & origin_mask);
1308 }
1309
1310 static inline unsigned long pmd_pfn(pmd_t pmd)
1311 {
1312 return __pa(pmd_deref(pmd)) >> PAGE_SHIFT;
1313 }
1314
1315 static inline unsigned long pud_deref(pud_t pud)
1316 {
1317 unsigned long origin_mask;
1318
1319 origin_mask = _REGION_ENTRY_ORIGIN;
1320 if (pud_large(pud))
1321 origin_mask = _REGION3_ENTRY_ORIGIN_LARGE;
1322 return (unsigned long)__va(pud_val(pud) & origin_mask);
1323 }
1324
1325 static inline unsigned long pud_pfn(pud_t pud)
1326 {
1327 return __pa(pud_deref(pud)) >> PAGE_SHIFT;
1328 }
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341 static inline pgd_t *pgd_offset_raw(pgd_t *pgd, unsigned long address)
1342 {
1343 unsigned long rste;
1344 unsigned int shift;
1345
1346
1347 rste = pgd_val(*pgd);
1348
1349 shift = ((rste & _REGION_ENTRY_TYPE_MASK) >> 2) * 11 + 20;
1350 return pgd + ((address >> shift) & (PTRS_PER_PGD - 1));
1351 }
1352
1353 #define pgd_offset(mm, address) pgd_offset_raw(READ_ONCE((mm)->pgd), address)
1354
1355 static inline p4d_t *p4d_offset_lockless(pgd_t *pgdp, pgd_t pgd, unsigned long address)
1356 {
1357 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1)
1358 return (p4d_t *) pgd_deref(pgd) + p4d_index(address);
1359 return (p4d_t *) pgdp;
1360 }
1361 #define p4d_offset_lockless p4d_offset_lockless
1362
1363 static inline p4d_t *p4d_offset(pgd_t *pgdp, unsigned long address)
1364 {
1365 return p4d_offset_lockless(pgdp, *pgdp, address);
1366 }
1367
1368 static inline pud_t *pud_offset_lockless(p4d_t *p4dp, p4d_t p4d, unsigned long address)
1369 {
1370 if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2)
1371 return (pud_t *) p4d_deref(p4d) + pud_index(address);
1372 return (pud_t *) p4dp;
1373 }
1374 #define pud_offset_lockless pud_offset_lockless
1375
1376 static inline pud_t *pud_offset(p4d_t *p4dp, unsigned long address)
1377 {
1378 return pud_offset_lockless(p4dp, *p4dp, address);
1379 }
1380 #define pud_offset pud_offset
1381
1382 static inline pmd_t *pmd_offset_lockless(pud_t *pudp, pud_t pud, unsigned long address)
1383 {
1384 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3)
1385 return (pmd_t *) pud_deref(pud) + pmd_index(address);
1386 return (pmd_t *) pudp;
1387 }
1388 #define pmd_offset_lockless pmd_offset_lockless
1389
1390 static inline pmd_t *pmd_offset(pud_t *pudp, unsigned long address)
1391 {
1392 return pmd_offset_lockless(pudp, *pudp, address);
1393 }
1394 #define pmd_offset pmd_offset
1395
1396 static inline unsigned long pmd_page_vaddr(pmd_t pmd)
1397 {
1398 return (unsigned long) pmd_deref(pmd);
1399 }
1400
1401 static inline bool gup_fast_permitted(unsigned long start, unsigned long end)
1402 {
1403 return end <= current->mm->context.asce_limit;
1404 }
1405 #define gup_fast_permitted gup_fast_permitted
1406
1407 #define pfn_pte(pfn, pgprot) mk_pte_phys(((pfn) << PAGE_SHIFT), (pgprot))
1408 #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1409 #define pte_page(x) pfn_to_page(pte_pfn(x))
1410
1411 #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
1412 #define pud_page(pud) pfn_to_page(pud_pfn(pud))
1413 #define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d))
1414 #define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd))
1415
1416 static inline pmd_t pmd_wrprotect(pmd_t pmd)
1417 {
1418 pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_WRITE));
1419 return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
1420 }
1421
1422 static inline pmd_t pmd_mkwrite(pmd_t pmd)
1423 {
1424 pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_WRITE));
1425 if (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY)
1426 pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
1427 return pmd;
1428 }
1429
1430 static inline pmd_t pmd_mkclean(pmd_t pmd)
1431 {
1432 pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_DIRTY));
1433 return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
1434 }
1435
1436 static inline pmd_t pmd_mkdirty(pmd_t pmd)
1437 {
1438 pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_SOFT_DIRTY));
1439 if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE)
1440 pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
1441 return pmd;
1442 }
1443
1444 static inline pud_t pud_wrprotect(pud_t pud)
1445 {
1446 pud = clear_pud_bit(pud, __pgprot(_REGION3_ENTRY_WRITE));
1447 return set_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT));
1448 }
1449
1450 static inline pud_t pud_mkwrite(pud_t pud)
1451 {
1452 pud = set_pud_bit(pud, __pgprot(_REGION3_ENTRY_WRITE));
1453 if (pud_val(pud) & _REGION3_ENTRY_DIRTY)
1454 pud = clear_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT));
1455 return pud;
1456 }
1457
1458 static inline pud_t pud_mkclean(pud_t pud)
1459 {
1460 pud = clear_pud_bit(pud, __pgprot(_REGION3_ENTRY_DIRTY));
1461 return set_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT));
1462 }
1463
1464 static inline pud_t pud_mkdirty(pud_t pud)
1465 {
1466 pud = set_pud_bit(pud, __pgprot(_REGION3_ENTRY_DIRTY | _REGION3_ENTRY_SOFT_DIRTY));
1467 if (pud_val(pud) & _REGION3_ENTRY_WRITE)
1468 pud = clear_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT));
1469 return pud;
1470 }
1471
1472 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
1473 static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
1474 {
1475
1476
1477
1478
1479 if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
1480 return pgprot_val(SEGMENT_NONE);
1481 if (pgprot_val(pgprot) == pgprot_val(PAGE_RO))
1482 return pgprot_val(SEGMENT_RO);
1483 if (pgprot_val(pgprot) == pgprot_val(PAGE_RX))
1484 return pgprot_val(SEGMENT_RX);
1485 if (pgprot_val(pgprot) == pgprot_val(PAGE_RW))
1486 return pgprot_val(SEGMENT_RW);
1487 return pgprot_val(SEGMENT_RWX);
1488 }
1489
1490 static inline pmd_t pmd_mkyoung(pmd_t pmd)
1491 {
1492 pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_YOUNG));
1493 if (pmd_val(pmd) & _SEGMENT_ENTRY_READ)
1494 pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_INVALID));
1495 return pmd;
1496 }
1497
1498 static inline pmd_t pmd_mkold(pmd_t pmd)
1499 {
1500 pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_YOUNG));
1501 return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_INVALID));
1502 }
1503
1504 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
1505 {
1506 unsigned long mask;
1507
1508 mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
1509 mask |= _SEGMENT_ENTRY_DIRTY;
1510 mask |= _SEGMENT_ENTRY_YOUNG;
1511 mask |= _SEGMENT_ENTRY_LARGE;
1512 mask |= _SEGMENT_ENTRY_SOFT_DIRTY;
1513 pmd = __pmd(pmd_val(pmd) & mask);
1514 pmd = set_pmd_bit(pmd, __pgprot(massage_pgprot_pmd(newprot)));
1515 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1516 pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
1517 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG))
1518 pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_INVALID));
1519 return pmd;
1520 }
1521
1522 static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
1523 {
1524 return __pmd(physpage + massage_pgprot_pmd(pgprot));
1525 }
1526
1527 #endif
1528
1529 static inline void __pmdp_csp(pmd_t *pmdp)
1530 {
1531 csp((unsigned int *)pmdp + 1, pmd_val(*pmdp),
1532 pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
1533 }
1534
1535 #define IDTE_GLOBAL 0
1536 #define IDTE_LOCAL 1
1537
1538 #define IDTE_PTOA 0x0800
1539 #define IDTE_NODAT 0x1000
1540 #define IDTE_GUEST_ASCE 0x2000
1541
1542 static __always_inline void __pmdp_idte(unsigned long addr, pmd_t *pmdp,
1543 unsigned long opt, unsigned long asce,
1544 int local)
1545 {
1546 unsigned long sto;
1547
1548 sto = __pa(pmdp) - pmd_index(addr) * sizeof(pmd_t);
1549 if (__builtin_constant_p(opt) && opt == 0) {
1550
1551 asm volatile(
1552 " idte %[r1],0,%[r2],%[m4]"
1553 : "+m" (*pmdp)
1554 : [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK)),
1555 [m4] "i" (local)
1556 : "cc" );
1557 } else {
1558
1559 asm volatile(
1560 " idte %[r1],%[r3],%[r2],%[m4]"
1561 : "+m" (*pmdp)
1562 : [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK) | opt),
1563 [r3] "a" (asce), [m4] "i" (local)
1564 : "cc" );
1565 }
1566 }
1567
1568 static __always_inline void __pudp_idte(unsigned long addr, pud_t *pudp,
1569 unsigned long opt, unsigned long asce,
1570 int local)
1571 {
1572 unsigned long r3o;
1573
1574 r3o = __pa(pudp) - pud_index(addr) * sizeof(pud_t);
1575 r3o |= _ASCE_TYPE_REGION3;
1576 if (__builtin_constant_p(opt) && opt == 0) {
1577
1578 asm volatile(
1579 " idte %[r1],0,%[r2],%[m4]"
1580 : "+m" (*pudp)
1581 : [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK)),
1582 [m4] "i" (local)
1583 : "cc");
1584 } else {
1585
1586 asm volatile(
1587 " idte %[r1],%[r3],%[r2],%[m4]"
1588 : "+m" (*pudp)
1589 : [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK) | opt),
1590 [r3] "a" (asce), [m4] "i" (local)
1591 : "cc" );
1592 }
1593 }
1594
1595 pmd_t pmdp_xchg_direct(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
1596 pmd_t pmdp_xchg_lazy(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
1597 pud_t pudp_xchg_direct(struct mm_struct *, unsigned long, pud_t *, pud_t);
1598
1599 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1600
1601 #define __HAVE_ARCH_PGTABLE_DEPOSIT
1602 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1603 pgtable_t pgtable);
1604
1605 #define __HAVE_ARCH_PGTABLE_WITHDRAW
1606 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
1607
1608 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
1609 static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
1610 unsigned long addr, pmd_t *pmdp,
1611 pmd_t entry, int dirty)
1612 {
1613 VM_BUG_ON(addr & ~HPAGE_MASK);
1614
1615 entry = pmd_mkyoung(entry);
1616 if (dirty)
1617 entry = pmd_mkdirty(entry);
1618 if (pmd_val(*pmdp) == pmd_val(entry))
1619 return 0;
1620 pmdp_xchg_direct(vma->vm_mm, addr, pmdp, entry);
1621 return 1;
1622 }
1623
1624 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1625 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1626 unsigned long addr, pmd_t *pmdp)
1627 {
1628 pmd_t pmd = *pmdp;
1629
1630 pmd = pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd_mkold(pmd));
1631 return pmd_young(pmd);
1632 }
1633
1634 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
1635 static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
1636 unsigned long addr, pmd_t *pmdp)
1637 {
1638 VM_BUG_ON(addr & ~HPAGE_MASK);
1639 return pmdp_test_and_clear_young(vma, addr, pmdp);
1640 }
1641
1642 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1643 pmd_t *pmdp, pmd_t entry)
1644 {
1645 if (!MACHINE_HAS_NX)
1646 entry = clear_pmd_bit(entry, __pgprot(_SEGMENT_ENTRY_NOEXEC));
1647 set_pmd(pmdp, entry);
1648 }
1649
1650 static inline pmd_t pmd_mkhuge(pmd_t pmd)
1651 {
1652 pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_LARGE));
1653 pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_YOUNG));
1654 return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
1655 }
1656
1657 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
1658 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
1659 unsigned long addr, pmd_t *pmdp)
1660 {
1661 return pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1662 }
1663
1664 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
1665 static inline pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma,
1666 unsigned long addr,
1667 pmd_t *pmdp, int full)
1668 {
1669 if (full) {
1670 pmd_t pmd = *pmdp;
1671 set_pmd(pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1672 return pmd;
1673 }
1674 return pmdp_xchg_lazy(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1675 }
1676
1677 #define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
1678 static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
1679 unsigned long addr, pmd_t *pmdp)
1680 {
1681 return pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
1682 }
1683
1684 #define __HAVE_ARCH_PMDP_INVALIDATE
1685 static inline pmd_t pmdp_invalidate(struct vm_area_struct *vma,
1686 unsigned long addr, pmd_t *pmdp)
1687 {
1688 pmd_t pmd = __pmd(pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
1689
1690 return pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd);
1691 }
1692
1693 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
1694 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1695 unsigned long addr, pmd_t *pmdp)
1696 {
1697 pmd_t pmd = *pmdp;
1698
1699 if (pmd_write(pmd))
1700 pmd = pmdp_xchg_lazy(mm, addr, pmdp, pmd_wrprotect(pmd));
1701 }
1702
1703 static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
1704 unsigned long address,
1705 pmd_t *pmdp)
1706 {
1707 return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
1708 }
1709 #define pmdp_collapse_flush pmdp_collapse_flush
1710
1711 #define pfn_pmd(pfn, pgprot) mk_pmd_phys(((pfn) << PAGE_SHIFT), (pgprot))
1712 #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
1713
1714 static inline int pmd_trans_huge(pmd_t pmd)
1715 {
1716 return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE;
1717 }
1718
1719 #define has_transparent_hugepage has_transparent_hugepage
1720 static inline int has_transparent_hugepage(void)
1721 {
1722 return MACHINE_HAS_EDAT1 ? 1 : 0;
1723 }
1724 #endif
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743 #define __SWP_OFFSET_MASK ((1UL << 52) - 1)
1744 #define __SWP_OFFSET_SHIFT 12
1745 #define __SWP_TYPE_MASK ((1UL << 5) - 1)
1746 #define __SWP_TYPE_SHIFT 2
1747
1748 static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
1749 {
1750 unsigned long pteval;
1751
1752 pteval = _PAGE_INVALID | _PAGE_PROTECT;
1753 pteval |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT;
1754 pteval |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT;
1755 return __pte(pteval);
1756 }
1757
1758 static inline unsigned long __swp_type(swp_entry_t entry)
1759 {
1760 return (entry.val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK;
1761 }
1762
1763 static inline unsigned long __swp_offset(swp_entry_t entry)
1764 {
1765 return (entry.val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK;
1766 }
1767
1768 static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
1769 {
1770 return (swp_entry_t) { pte_val(mk_swap_pte(type, offset)) };
1771 }
1772
1773 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
1774 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
1775
1776 #define kern_addr_valid(addr) (1)
1777
1778 extern int vmem_add_mapping(unsigned long start, unsigned long size);
1779 extern void vmem_remove_mapping(unsigned long start, unsigned long size);
1780 extern int s390_enable_sie(void);
1781 extern int s390_enable_skey(void);
1782 extern void s390_reset_cmma(struct mm_struct *mm);
1783
1784
1785 #define HAVE_ARCH_UNMAPPED_AREA
1786 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1787
1788 #define pmd_pgtable(pmd) \
1789 ((pgtable_t)__va(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE))
1790
1791 #endif