0001
0002
0003
0004
0005
0006 #include <linux/sched.h>
0007 #include <linux/mm_types.h>
0008 #include <linux/memblock.h>
0009 #include <linux/memremap.h>
0010 #include <linux/pkeys.h>
0011 #include <linux/debugfs.h>
0012 #include <misc/cxl-base.h>
0013
0014 #include <asm/pgalloc.h>
0015 #include <asm/tlb.h>
0016 #include <asm/trace.h>
0017 #include <asm/powernv.h>
0018 #include <asm/firmware.h>
0019 #include <asm/ultravisor.h>
0020 #include <asm/kexec.h>
0021
0022 #include <mm/mmu_decl.h>
0023 #include <trace/events/thp.h>
0024
0025 #include "internal.h"
0026
0027 struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
0028 EXPORT_SYMBOL_GPL(mmu_psize_defs);
0029
0030 #ifdef CONFIG_SPARSEMEM_VMEMMAP
0031 int mmu_vmemmap_psize = MMU_PAGE_4K;
0032 #endif
0033
0034 unsigned long __pmd_frag_nr;
0035 EXPORT_SYMBOL(__pmd_frag_nr);
0036 unsigned long __pmd_frag_size_shift;
0037 EXPORT_SYMBOL(__pmd_frag_size_shift);
0038
0039 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
0040
0041
0042
0043
0044
0045
0046
0047 int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
0048 pmd_t *pmdp, pmd_t entry, int dirty)
0049 {
0050 int changed;
0051 #ifdef CONFIG_DEBUG_VM
0052 WARN_ON(!pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
0053 assert_spin_locked(pmd_lockptr(vma->vm_mm, pmdp));
0054 #endif
0055 changed = !pmd_same(*(pmdp), entry);
0056 if (changed) {
0057
0058
0059
0060
0061 __ptep_set_access_flags(vma, pmdp_ptep(pmdp),
0062 pmd_pte(entry), address, MMU_PAGE_2M);
0063 }
0064 return changed;
0065 }
0066
0067 int pmdp_test_and_clear_young(struct vm_area_struct *vma,
0068 unsigned long address, pmd_t *pmdp)
0069 {
0070 return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp);
0071 }
0072
0073
0074
0075
0076 void set_pmd_at(struct mm_struct *mm, unsigned long addr,
0077 pmd_t *pmdp, pmd_t pmd)
0078 {
0079 #ifdef CONFIG_DEBUG_VM
0080
0081
0082
0083
0084
0085 WARN_ON(pte_hw_valid(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp)));
0086 assert_spin_locked(pmd_lockptr(mm, pmdp));
0087 WARN_ON(!(pmd_large(pmd)));
0088 #endif
0089 trace_hugepage_set_pmd(addr, pmd_val(pmd));
0090 return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
0091 }
0092
0093 static void do_serialize(void *arg)
0094 {
0095
0096 if (radix_enabled()) {
0097 struct mm_struct *mm = arg;
0098 exit_lazy_flush_tlb(mm, false);
0099 }
0100 }
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112 void serialize_against_pte_lookup(struct mm_struct *mm)
0113 {
0114 smp_mb();
0115 smp_call_function_many(mm_cpumask(mm), do_serialize, mm, 1);
0116 }
0117
0118
0119
0120
0121
0122 pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
0123 pmd_t *pmdp)
0124 {
0125 unsigned long old_pmd;
0126
0127 old_pmd = pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, _PAGE_INVALID);
0128 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
0129 return __pmd(old_pmd);
0130 }
0131
0132 pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma,
0133 unsigned long addr, pmd_t *pmdp, int full)
0134 {
0135 pmd_t pmd;
0136 VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
0137 VM_BUG_ON((pmd_present(*pmdp) && !pmd_trans_huge(*pmdp) &&
0138 !pmd_devmap(*pmdp)) || !pmd_present(*pmdp));
0139 pmd = pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
0140
0141
0142
0143
0144
0145 if (!full)
0146 flush_pmd_tlb_range(vma, addr, addr + HPAGE_PMD_SIZE);
0147 return pmd;
0148 }
0149
0150 static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot)
0151 {
0152 return __pmd(pmd_val(pmd) | pgprot_val(pgprot));
0153 }
0154
0155
0156
0157
0158
0159
0160 pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot)
0161 {
0162 unsigned long pmdv;
0163
0164 pmdv = (pfn << PAGE_SHIFT) & PTE_RPN_MASK;
0165
0166 return __pmd_mkhuge(pmd_set_protbits(__pmd(pmdv), pgprot));
0167 }
0168
0169 pmd_t mk_pmd(struct page *page, pgprot_t pgprot)
0170 {
0171 return pfn_pmd(page_to_pfn(page), pgprot);
0172 }
0173
0174 pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
0175 {
0176 unsigned long pmdv;
0177
0178 pmdv = pmd_val(pmd);
0179 pmdv &= _HPAGE_CHG_MASK;
0180 return pmd_set_protbits(__pmd(pmdv), newprot);
0181 }
0182 #endif
0183
0184
0185 notrace void mmu_cleanup_all(void)
0186 {
0187 if (radix_enabled())
0188 radix__mmu_cleanup_all();
0189 else if (mmu_hash_ops.hpte_clear_all)
0190 mmu_hash_ops.hpte_clear_all();
0191
0192 reset_sprs();
0193 }
0194
0195 #ifdef CONFIG_MEMORY_HOTPLUG
0196 int __meminit create_section_mapping(unsigned long start, unsigned long end,
0197 int nid, pgprot_t prot)
0198 {
0199 if (radix_enabled())
0200 return radix__create_section_mapping(start, end, nid, prot);
0201
0202 return hash__create_section_mapping(start, end, nid, prot);
0203 }
0204
0205 int __meminit remove_section_mapping(unsigned long start, unsigned long end)
0206 {
0207 if (radix_enabled())
0208 return radix__remove_section_mapping(start, end);
0209
0210 return hash__remove_section_mapping(start, end);
0211 }
0212 #endif
0213
0214 void __init mmu_partition_table_init(void)
0215 {
0216 unsigned long patb_size = 1UL << PATB_SIZE_SHIFT;
0217 unsigned long ptcr;
0218
0219
0220 partition_tb = memblock_alloc(patb_size, patb_size);
0221 if (!partition_tb)
0222 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
0223 __func__, patb_size, patb_size);
0224
0225 ptcr = __pa(partition_tb) | (PATB_SIZE_SHIFT - 12);
0226 set_ptcr_when_no_uv(ptcr);
0227 powernv_set_nmmu_ptcr(ptcr);
0228 }
0229
0230 static void flush_partition(unsigned int lpid, bool radix)
0231 {
0232 if (radix) {
0233 radix__flush_all_lpid(lpid);
0234 radix__flush_all_lpid_guest(lpid);
0235 } else {
0236 asm volatile("ptesync" : : : "memory");
0237 asm volatile(PPC_TLBIE_5(%0,%1,2,0,0) : :
0238 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
0239
0240 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
0241 trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 0);
0242 }
0243 }
0244
0245 void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0,
0246 unsigned long dw1, bool flush)
0247 {
0248 unsigned long old = be64_to_cpu(partition_tb[lpid].patb0);
0249
0250
0251
0252
0253
0254
0255
0256
0257
0258
0259 partition_tb[lpid].patb0 = cpu_to_be64(dw0);
0260 partition_tb[lpid].patb1 = cpu_to_be64(dw1);
0261
0262
0263
0264
0265
0266
0267
0268
0269 if (firmware_has_feature(FW_FEATURE_ULTRAVISOR)) {
0270 uv_register_pate(lpid, dw0, dw1);
0271 pr_info("PATE registered by ultravisor: dw0 = 0x%lx, dw1 = 0x%lx\n",
0272 dw0, dw1);
0273 } else if (flush) {
0274
0275
0276
0277
0278
0279 flush_partition(lpid, (old & PATB_HR));
0280 }
0281 }
0282 EXPORT_SYMBOL_GPL(mmu_partition_table_set_entry);
0283
0284 static pmd_t *get_pmd_from_cache(struct mm_struct *mm)
0285 {
0286 void *pmd_frag, *ret;
0287
0288 if (PMD_FRAG_NR == 1)
0289 return NULL;
0290
0291 spin_lock(&mm->page_table_lock);
0292 ret = mm->context.pmd_frag;
0293 if (ret) {
0294 pmd_frag = ret + PMD_FRAG_SIZE;
0295
0296
0297
0298 if (((unsigned long)pmd_frag & ~PAGE_MASK) == 0)
0299 pmd_frag = NULL;
0300 mm->context.pmd_frag = pmd_frag;
0301 }
0302 spin_unlock(&mm->page_table_lock);
0303 return (pmd_t *)ret;
0304 }
0305
0306 static pmd_t *__alloc_for_pmdcache(struct mm_struct *mm)
0307 {
0308 void *ret = NULL;
0309 struct page *page;
0310 gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO;
0311
0312 if (mm == &init_mm)
0313 gfp &= ~__GFP_ACCOUNT;
0314 page = alloc_page(gfp);
0315 if (!page)
0316 return NULL;
0317 if (!pgtable_pmd_page_ctor(page)) {
0318 __free_pages(page, 0);
0319 return NULL;
0320 }
0321
0322 atomic_set(&page->pt_frag_refcount, 1);
0323
0324 ret = page_address(page);
0325
0326
0327
0328
0329 if (PMD_FRAG_NR == 1)
0330 return ret;
0331
0332 spin_lock(&mm->page_table_lock);
0333
0334
0335
0336
0337
0338 if (likely(!mm->context.pmd_frag)) {
0339 atomic_set(&page->pt_frag_refcount, PMD_FRAG_NR);
0340 mm->context.pmd_frag = ret + PMD_FRAG_SIZE;
0341 }
0342 spin_unlock(&mm->page_table_lock);
0343
0344 return (pmd_t *)ret;
0345 }
0346
0347 pmd_t *pmd_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr)
0348 {
0349 pmd_t *pmd;
0350
0351 pmd = get_pmd_from_cache(mm);
0352 if (pmd)
0353 return pmd;
0354
0355 return __alloc_for_pmdcache(mm);
0356 }
0357
0358 void pmd_fragment_free(unsigned long *pmd)
0359 {
0360 struct page *page = virt_to_page(pmd);
0361
0362 if (PageReserved(page))
0363 return free_reserved_page(page);
0364
0365 BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0);
0366 if (atomic_dec_and_test(&page->pt_frag_refcount)) {
0367 pgtable_pmd_page_dtor(page);
0368 __free_page(page);
0369 }
0370 }
0371
0372 static inline void pgtable_free(void *table, int index)
0373 {
0374 switch (index) {
0375 case PTE_INDEX:
0376 pte_fragment_free(table, 0);
0377 break;
0378 case PMD_INDEX:
0379 pmd_fragment_free(table);
0380 break;
0381 case PUD_INDEX:
0382 __pud_free(table);
0383 break;
0384 #if defined(CONFIG_PPC_4K_PAGES) && defined(CONFIG_HUGETLB_PAGE)
0385
0386 case HTLB_16M_INDEX:
0387 BUILD_BUG_ON(H_16M_CACHE_INDEX <= 0);
0388 kmem_cache_free(PGT_CACHE(H_16M_CACHE_INDEX), table);
0389 break;
0390
0391 case HTLB_16G_INDEX:
0392 BUILD_BUG_ON(H_16G_CACHE_INDEX <= 0);
0393 kmem_cache_free(PGT_CACHE(H_16G_CACHE_INDEX), table);
0394 break;
0395 #endif
0396
0397 default:
0398 BUG();
0399 }
0400 }
0401
0402 void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index)
0403 {
0404 unsigned long pgf = (unsigned long)table;
0405
0406 BUG_ON(index > MAX_PGTABLE_INDEX_SIZE);
0407 pgf |= index;
0408 tlb_remove_table(tlb, (void *)pgf);
0409 }
0410
0411 void __tlb_remove_table(void *_table)
0412 {
0413 void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
0414 unsigned int index = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
0415
0416 return pgtable_free(table, index);
0417 }
0418
0419 #ifdef CONFIG_PROC_FS
0420 atomic_long_t direct_pages_count[MMU_PAGE_COUNT];
0421
0422 void arch_report_meminfo(struct seq_file *m)
0423 {
0424
0425
0426
0427
0428 if (!radix_enabled())
0429 return;
0430 seq_printf(m, "DirectMap4k: %8lu kB\n",
0431 atomic_long_read(&direct_pages_count[MMU_PAGE_4K]) << 2);
0432 seq_printf(m, "DirectMap64k: %8lu kB\n",
0433 atomic_long_read(&direct_pages_count[MMU_PAGE_64K]) << 6);
0434 seq_printf(m, "DirectMap2M: %8lu kB\n",
0435 atomic_long_read(&direct_pages_count[MMU_PAGE_2M]) << 11);
0436 seq_printf(m, "DirectMap1G: %8lu kB\n",
0437 atomic_long_read(&direct_pages_count[MMU_PAGE_1G]) << 20);
0438 }
0439 #endif
0440
0441 pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr,
0442 pte_t *ptep)
0443 {
0444 unsigned long pte_val;
0445
0446
0447
0448
0449
0450
0451 pte_val = pte_update(vma->vm_mm, addr, ptep, _PAGE_PRESENT, _PAGE_INVALID, 0);
0452
0453 return __pte(pte_val);
0454
0455 }
0456
0457 void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
0458 pte_t *ptep, pte_t old_pte, pte_t pte)
0459 {
0460 if (radix_enabled())
0461 return radix__ptep_modify_prot_commit(vma, addr,
0462 ptep, old_pte, pte);
0463 set_pte_at(vma->vm_mm, addr, ptep, pte);
0464 }
0465
0466
0467
0468
0469
0470
0471
0472
0473
0474
0475
0476
0477
0478 int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
0479 struct spinlock *old_pmd_ptl,
0480 struct vm_area_struct *vma)
0481 {
0482 if (radix_enabled())
0483 return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma);
0484
0485 return true;
0486 }
0487
0488
0489
0490
0491 bool tlbie_capable __read_mostly = true;
0492 EXPORT_SYMBOL(tlbie_capable);
0493
0494
0495
0496
0497
0498
0499 bool tlbie_enabled __read_mostly = true;
0500
0501 static int __init setup_disable_tlbie(char *str)
0502 {
0503 if (!radix_enabled()) {
0504 pr_err("disable_tlbie: Unable to disable TLBIE with Hash MMU.\n");
0505 return 1;
0506 }
0507
0508 tlbie_capable = false;
0509 tlbie_enabled = false;
0510
0511 return 1;
0512 }
0513 __setup("disable_tlbie", setup_disable_tlbie);
0514
0515 static int __init pgtable_debugfs_setup(void)
0516 {
0517 if (!tlbie_capable)
0518 return 0;
0519
0520
0521
0522
0523
0524
0525
0526 debugfs_create_bool("tlbie_enabled", 0600,
0527 arch_debugfs_dir,
0528 &tlbie_enabled);
0529
0530 return 0;
0531 }
0532 arch_initcall(pgtable_debugfs_setup);
0533
0534 #if defined(CONFIG_ZONE_DEVICE) && defined(CONFIG_ARCH_HAS_MEMREMAP_COMPAT_ALIGN)
0535
0536
0537
0538
0539
0540
0541
0542 unsigned long memremap_compat_align(void)
0543 {
0544 if (!radix_enabled()) {
0545 unsigned int shift = mmu_psize_defs[mmu_linear_psize].shift;
0546 return max(SUBSECTION_SIZE, 1UL << shift);
0547 }
0548
0549 return SUBSECTION_SIZE;
0550 }
0551 EXPORT_SYMBOL_GPL(memremap_compat_align);
0552 #endif
0553
0554 pgprot_t vm_get_page_prot(unsigned long vm_flags)
0555 {
0556 unsigned long prot = pgprot_val(protection_map[vm_flags &
0557 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]);
0558
0559 if (vm_flags & VM_SAO)
0560 prot |= _PAGE_SAO;
0561
0562 #ifdef CONFIG_PPC_MEM_KEYS
0563 prot |= vmflag_to_pte_pkey_bits(vm_flags);
0564 #endif
0565
0566 return __pgprot(prot);
0567 }
0568 EXPORT_SYMBOL(vm_get_page_prot);