0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #include <asm/interrupt.h>
0013 #include <asm/mmu.h>
0014 #include <asm/mmu_context.h>
0015 #include <asm/paca.h>
0016 #include <asm/ppc-opcode.h>
0017 #include <asm/cputable.h>
0018 #include <asm/cacheflush.h>
0019 #include <asm/smp.h>
0020 #include <linux/compiler.h>
0021 #include <linux/context_tracking.h>
0022 #include <linux/mm_types.h>
0023 #include <linux/pgtable.h>
0024
0025 #include <asm/udbg.h>
0026 #include <asm/code-patching.h>
0027
0028 #include "internal.h"
0029
0030
0031 static long slb_allocate_user(struct mm_struct *mm, unsigned long ea);
0032
0033 bool stress_slb_enabled __initdata;
0034
0035 static int __init parse_stress_slb(char *p)
0036 {
0037 stress_slb_enabled = true;
0038 return 0;
0039 }
0040 early_param("stress_slb", parse_stress_slb);
0041
0042 __ro_after_init DEFINE_STATIC_KEY_FALSE(stress_slb_key);
0043
0044 static void assert_slb_presence(bool present, unsigned long ea)
0045 {
0046 #ifdef CONFIG_DEBUG_VM
0047 unsigned long tmp;
0048
0049 WARN_ON_ONCE(mfmsr() & MSR_EE);
0050
0051 if (!cpu_has_feature(CPU_FTR_ARCH_206))
0052 return;
0053
0054
0055
0056
0057
0058 ea &= ~((1UL << SID_SHIFT) - 1);
0059 asm volatile(__PPC_SLBFEE_DOT(%0, %1) : "=r"(tmp) : "r"(ea) : "cr0");
0060
0061 WARN_ON(present == (tmp == 0));
0062 #endif
0063 }
0064
0065 static inline void slb_shadow_update(unsigned long ea, int ssize,
0066 unsigned long flags,
0067 enum slb_index index)
0068 {
0069 struct slb_shadow *p = get_slb_shadow();
0070
0071
0072
0073
0074
0075
0076 WRITE_ONCE(p->save_area[index].esid, 0);
0077 WRITE_ONCE(p->save_area[index].vsid, cpu_to_be64(mk_vsid_data(ea, ssize, flags)));
0078 WRITE_ONCE(p->save_area[index].esid, cpu_to_be64(mk_esid_data(ea, ssize, index)));
0079 }
0080
0081 static inline void slb_shadow_clear(enum slb_index index)
0082 {
0083 WRITE_ONCE(get_slb_shadow()->save_area[index].esid, cpu_to_be64(index));
0084 }
0085
0086 static inline void create_shadowed_slbe(unsigned long ea, int ssize,
0087 unsigned long flags,
0088 enum slb_index index)
0089 {
0090
0091
0092
0093
0094
0095 slb_shadow_update(ea, ssize, flags, index);
0096
0097 assert_slb_presence(false, ea);
0098 asm volatile("slbmte %0,%1" :
0099 : "r" (mk_vsid_data(ea, ssize, flags)),
0100 "r" (mk_esid_data(ea, ssize, index))
0101 : "memory" );
0102 }
0103
0104
0105
0106
0107
0108 void __slb_restore_bolted_realmode(void)
0109 {
0110 struct slb_shadow *p = get_slb_shadow();
0111 enum slb_index index;
0112
0113
0114 for (index = 0; index < SLB_NUM_BOLTED; index++) {
0115 asm volatile("slbmte %0,%1" :
0116 : "r" (be64_to_cpu(p->save_area[index].vsid)),
0117 "r" (be64_to_cpu(p->save_area[index].esid)));
0118 }
0119
0120 assert_slb_presence(true, local_paca->kstack);
0121 }
0122
0123
0124
0125
0126 void slb_restore_bolted_realmode(void)
0127 {
0128 __slb_restore_bolted_realmode();
0129 get_paca()->slb_cache_ptr = 0;
0130
0131 get_paca()->slb_kern_bitmap = (1U << SLB_NUM_BOLTED) - 1;
0132 get_paca()->slb_used_bitmap = get_paca()->slb_kern_bitmap;
0133 }
0134
0135
0136
0137
0138 void slb_flush_all_realmode(void)
0139 {
0140 asm volatile("slbmte %0,%0; slbia" : : "r" (0));
0141 }
0142
0143 static __always_inline void __slb_flush_and_restore_bolted(bool preserve_kernel_lookaside)
0144 {
0145 struct slb_shadow *p = get_slb_shadow();
0146 unsigned long ksp_esid_data, ksp_vsid_data;
0147 u32 ih;
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157 if (preserve_kernel_lookaside)
0158 ih = 1;
0159 else
0160 ih = 0;
0161
0162 ksp_esid_data = be64_to_cpu(p->save_area[KSTACK_INDEX].esid);
0163 ksp_vsid_data = be64_to_cpu(p->save_area[KSTACK_INDEX].vsid);
0164
0165 asm volatile(PPC_SLBIA(%0)" \n"
0166 "slbmte %1, %2 \n"
0167 :: "i" (ih),
0168 "r" (ksp_vsid_data),
0169 "r" (ksp_esid_data)
0170 : "memory");
0171 }
0172
0173
0174
0175
0176
0177 void slb_flush_and_restore_bolted(void)
0178 {
0179 BUILD_BUG_ON(SLB_NUM_BOLTED != 2);
0180
0181 WARN_ON(!irqs_disabled());
0182
0183
0184
0185
0186
0187 hard_irq_disable();
0188
0189 isync();
0190 __slb_flush_and_restore_bolted(false);
0191 isync();
0192
0193 assert_slb_presence(true, get_paca()->kstack);
0194
0195 get_paca()->slb_cache_ptr = 0;
0196
0197 get_paca()->slb_kern_bitmap = (1U << SLB_NUM_BOLTED) - 1;
0198 get_paca()->slb_used_bitmap = get_paca()->slb_kern_bitmap;
0199 }
0200
0201 void slb_save_contents(struct slb_entry *slb_ptr)
0202 {
0203 int i;
0204 unsigned long e, v;
0205
0206
0207 get_paca()->slb_save_cache_ptr = get_paca()->slb_cache_ptr;
0208
0209 if (!slb_ptr)
0210 return;
0211
0212 for (i = 0; i < mmu_slb_size; i++) {
0213 asm volatile("slbmfee %0,%1" : "=r" (e) : "r" (i));
0214 asm volatile("slbmfev %0,%1" : "=r" (v) : "r" (i));
0215 slb_ptr->esid = e;
0216 slb_ptr->vsid = v;
0217 slb_ptr++;
0218 }
0219 }
0220
0221 void slb_dump_contents(struct slb_entry *slb_ptr)
0222 {
0223 int i, n;
0224 unsigned long e, v;
0225 unsigned long llp;
0226
0227 if (!slb_ptr)
0228 return;
0229
0230 pr_err("SLB contents of cpu 0x%x\n", smp_processor_id());
0231
0232 for (i = 0; i < mmu_slb_size; i++) {
0233 e = slb_ptr->esid;
0234 v = slb_ptr->vsid;
0235 slb_ptr++;
0236
0237 if (!e && !v)
0238 continue;
0239
0240 pr_err("%02d %016lx %016lx %s\n", i, e, v,
0241 (e & SLB_ESID_V) ? "VALID" : "NOT VALID");
0242
0243 if (!(e & SLB_ESID_V))
0244 continue;
0245
0246 llp = v & SLB_VSID_LLP;
0247 if (v & SLB_VSID_B_1T) {
0248 pr_err(" 1T ESID=%9lx VSID=%13lx LLP:%3lx\n",
0249 GET_ESID_1T(e),
0250 (v & ~SLB_VSID_B) >> SLB_VSID_SHIFT_1T, llp);
0251 } else {
0252 pr_err(" 256M ESID=%9lx VSID=%13lx LLP:%3lx\n",
0253 GET_ESID(e),
0254 (v & ~SLB_VSID_B) >> SLB_VSID_SHIFT, llp);
0255 }
0256 }
0257
0258 if (!early_cpu_has_feature(CPU_FTR_ARCH_300)) {
0259
0260 pr_err("SLB RR allocator index %d\n", get_paca()->stab_rr);
0261
0262
0263 pr_err("SLB cache ptr value = %d\n", get_paca()->slb_save_cache_ptr);
0264 pr_err("Valid SLB cache entries:\n");
0265 n = min_t(int, get_paca()->slb_save_cache_ptr, SLB_CACHE_ENTRIES);
0266 for (i = 0; i < n; i++)
0267 pr_err("%02d EA[0-35]=%9x\n", i, get_paca()->slb_cache[i]);
0268 pr_err("Rest of SLB cache entries:\n");
0269 for (i = n; i < SLB_CACHE_ENTRIES; i++)
0270 pr_err("%02d EA[0-35]=%9x\n", i, get_paca()->slb_cache[i]);
0271 }
0272 }
0273
0274 void slb_vmalloc_update(void)
0275 {
0276
0277
0278
0279 slb_flush_and_restore_bolted();
0280 }
0281
0282 static bool preload_hit(struct thread_info *ti, unsigned long esid)
0283 {
0284 unsigned char i;
0285
0286 for (i = 0; i < ti->slb_preload_nr; i++) {
0287 unsigned char idx;
0288
0289 idx = (ti->slb_preload_tail + i) % SLB_PRELOAD_NR;
0290 if (esid == ti->slb_preload_esid[idx])
0291 return true;
0292 }
0293 return false;
0294 }
0295
0296 static bool preload_add(struct thread_info *ti, unsigned long ea)
0297 {
0298 unsigned char idx;
0299 unsigned long esid;
0300
0301 if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) {
0302
0303 if (ea & ESID_MASK_1T)
0304 ea &= ESID_MASK_1T;
0305 }
0306
0307 esid = ea >> SID_SHIFT;
0308
0309 if (preload_hit(ti, esid))
0310 return false;
0311
0312 idx = (ti->slb_preload_tail + ti->slb_preload_nr) % SLB_PRELOAD_NR;
0313 ti->slb_preload_esid[idx] = esid;
0314 if (ti->slb_preload_nr == SLB_PRELOAD_NR)
0315 ti->slb_preload_tail = (ti->slb_preload_tail + 1) % SLB_PRELOAD_NR;
0316 else
0317 ti->slb_preload_nr++;
0318
0319 return true;
0320 }
0321
0322 static void preload_age(struct thread_info *ti)
0323 {
0324 if (!ti->slb_preload_nr)
0325 return;
0326 ti->slb_preload_nr--;
0327 ti->slb_preload_tail = (ti->slb_preload_tail + 1) % SLB_PRELOAD_NR;
0328 }
0329
0330 void slb_setup_new_exec(void)
0331 {
0332 struct thread_info *ti = current_thread_info();
0333 struct mm_struct *mm = current->mm;
0334 unsigned long exec = 0x10000000;
0335
0336 WARN_ON(irqs_disabled());
0337
0338
0339
0340
0341
0342 if (ti->slb_preload_nr + 2 > SLB_PRELOAD_NR)
0343 return;
0344
0345 hard_irq_disable();
0346
0347
0348
0349
0350
0351
0352
0353
0354
0355
0356
0357
0358
0359
0360
0361
0362
0363 if (!is_kernel_addr(exec)) {
0364 if (preload_add(ti, exec))
0365 slb_allocate_user(mm, exec);
0366 }
0367
0368
0369 if (!is_kernel_addr(mm->mmap_base)) {
0370 if (preload_add(ti, mm->mmap_base))
0371 slb_allocate_user(mm, mm->mmap_base);
0372 }
0373
0374
0375 asm volatile("isync" : : : "memory");
0376
0377 local_irq_enable();
0378 }
0379
0380 void preload_new_slb_context(unsigned long start, unsigned long sp)
0381 {
0382 struct thread_info *ti = current_thread_info();
0383 struct mm_struct *mm = current->mm;
0384 unsigned long heap = mm->start_brk;
0385
0386 WARN_ON(irqs_disabled());
0387
0388
0389 if (ti->slb_preload_nr + 3 > SLB_PRELOAD_NR)
0390 return;
0391
0392 hard_irq_disable();
0393
0394
0395 if (!is_kernel_addr(start)) {
0396 if (preload_add(ti, start))
0397 slb_allocate_user(mm, start);
0398 }
0399
0400
0401 if (!is_kernel_addr(sp)) {
0402 if (preload_add(ti, sp))
0403 slb_allocate_user(mm, sp);
0404 }
0405
0406
0407 if (heap && !is_kernel_addr(heap)) {
0408 if (preload_add(ti, heap))
0409 slb_allocate_user(mm, heap);
0410 }
0411
0412
0413 asm volatile("isync" : : : "memory");
0414
0415 local_irq_enable();
0416 }
0417
0418 static void slb_cache_slbie_kernel(unsigned int index)
0419 {
0420 unsigned long slbie_data = get_paca()->slb_cache[index];
0421 unsigned long ksp = get_paca()->kstack;
0422
0423 slbie_data <<= SID_SHIFT;
0424 slbie_data |= 0xc000000000000000ULL;
0425 if ((ksp & slb_esid_mask(mmu_kernel_ssize)) == slbie_data)
0426 return;
0427 slbie_data |= mmu_kernel_ssize << SLBIE_SSIZE_SHIFT;
0428
0429 asm volatile("slbie %0" : : "r" (slbie_data));
0430 }
0431
0432 static void slb_cache_slbie_user(unsigned int index)
0433 {
0434 unsigned long slbie_data = get_paca()->slb_cache[index];
0435
0436 slbie_data <<= SID_SHIFT;
0437 slbie_data |= user_segment_size(slbie_data) << SLBIE_SSIZE_SHIFT;
0438 slbie_data |= SLBIE_C;
0439
0440 asm volatile("slbie %0" : : "r" (slbie_data));
0441 }
0442
0443
0444 void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
0445 {
0446 struct thread_info *ti = task_thread_info(tsk);
0447 unsigned char i;
0448
0449
0450
0451
0452
0453
0454
0455 hard_irq_disable();
0456 isync();
0457 if (stress_slb()) {
0458 __slb_flush_and_restore_bolted(false);
0459 isync();
0460 get_paca()->slb_cache_ptr = 0;
0461 get_paca()->slb_kern_bitmap = (1U << SLB_NUM_BOLTED) - 1;
0462
0463 } else if (cpu_has_feature(CPU_FTR_ARCH_300)) {
0464
0465
0466
0467
0468
0469
0470 asm volatile(PPC_SLBIA(3));
0471
0472 } else {
0473 unsigned long offset = get_paca()->slb_cache_ptr;
0474
0475 if (!mmu_has_feature(MMU_FTR_NO_SLBIE_B) &&
0476 offset <= SLB_CACHE_ENTRIES) {
0477
0478
0479
0480
0481
0482
0483 for (i = 0; i < offset; i++)
0484 slb_cache_slbie_user(i);
0485
0486
0487 if (!cpu_has_feature(CPU_FTR_ARCH_207S) && offset == 1)
0488 slb_cache_slbie_user(0);
0489
0490 } else {
0491
0492 __slb_flush_and_restore_bolted(true);
0493 isync();
0494
0495 get_paca()->slb_kern_bitmap = (1U << SLB_NUM_BOLTED) - 1;
0496 }
0497
0498 get_paca()->slb_cache_ptr = 0;
0499 }
0500 get_paca()->slb_used_bitmap = get_paca()->slb_kern_bitmap;
0501
0502 copy_mm_to_paca(mm);
0503
0504
0505
0506
0507
0508
0509
0510 tsk->thread.load_slb++;
0511 if (!tsk->thread.load_slb) {
0512 unsigned long pc = KSTK_EIP(tsk);
0513
0514 preload_age(ti);
0515 preload_add(ti, pc);
0516 }
0517
0518 for (i = 0; i < ti->slb_preload_nr; i++) {
0519 unsigned char idx;
0520 unsigned long ea;
0521
0522 idx = (ti->slb_preload_tail + i) % SLB_PRELOAD_NR;
0523 ea = (unsigned long)ti->slb_preload_esid[idx] << SID_SHIFT;
0524
0525 slb_allocate_user(mm, ea);
0526 }
0527
0528
0529
0530
0531
0532
0533 isync();
0534 }
0535
0536 void slb_set_size(u16 size)
0537 {
0538 mmu_slb_size = size;
0539 }
0540
0541 void slb_initialize(void)
0542 {
0543 unsigned long linear_llp, vmalloc_llp, io_llp;
0544 unsigned long lflags;
0545 static int slb_encoding_inited;
0546 #ifdef CONFIG_SPARSEMEM_VMEMMAP
0547 unsigned long vmemmap_llp;
0548 #endif
0549
0550
0551 linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
0552 io_llp = mmu_psize_defs[mmu_io_psize].sllp;
0553 vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
0554 get_paca()->vmalloc_sllp = SLB_VSID_KERNEL | vmalloc_llp;
0555 #ifdef CONFIG_SPARSEMEM_VMEMMAP
0556 vmemmap_llp = mmu_psize_defs[mmu_vmemmap_psize].sllp;
0557 #endif
0558 if (!slb_encoding_inited) {
0559 slb_encoding_inited = 1;
0560 pr_devel("SLB: linear LLP = %04lx\n", linear_llp);
0561 pr_devel("SLB: io LLP = %04lx\n", io_llp);
0562 #ifdef CONFIG_SPARSEMEM_VMEMMAP
0563 pr_devel("SLB: vmemmap LLP = %04lx\n", vmemmap_llp);
0564 #endif
0565 }
0566
0567 get_paca()->stab_rr = SLB_NUM_BOLTED - 1;
0568 get_paca()->slb_kern_bitmap = (1U << SLB_NUM_BOLTED) - 1;
0569 get_paca()->slb_used_bitmap = get_paca()->slb_kern_bitmap;
0570
0571 lflags = SLB_VSID_KERNEL | linear_llp;
0572
0573
0574 asm volatile("isync":::"memory");
0575 asm volatile("slbmte %0,%0"::"r" (0) : "memory");
0576 asm volatile("isync; slbia; isync":::"memory");
0577 create_shadowed_slbe(PAGE_OFFSET, mmu_kernel_ssize, lflags, LINEAR_INDEX);
0578
0579
0580
0581
0582
0583
0584
0585 slb_shadow_clear(KSTACK_INDEX);
0586 if (raw_smp_processor_id() != boot_cpuid &&
0587 (get_paca()->kstack & slb_esid_mask(mmu_kernel_ssize)) > PAGE_OFFSET)
0588 create_shadowed_slbe(get_paca()->kstack,
0589 mmu_kernel_ssize, lflags, KSTACK_INDEX);
0590
0591 asm volatile("isync":::"memory");
0592 }
0593
0594 static void slb_cache_update(unsigned long esid_data)
0595 {
0596 int slb_cache_index;
0597
0598 if (cpu_has_feature(CPU_FTR_ARCH_300))
0599 return;
0600
0601 if (stress_slb())
0602 return;
0603
0604
0605
0606
0607 slb_cache_index = local_paca->slb_cache_ptr;
0608 if (slb_cache_index < SLB_CACHE_ENTRIES) {
0609
0610
0611
0612
0613 local_paca->slb_cache[slb_cache_index++] = esid_data >> SID_SHIFT;
0614 local_paca->slb_cache_ptr++;
0615 } else {
0616
0617
0618
0619
0620
0621 local_paca->slb_cache_ptr = SLB_CACHE_ENTRIES + 1;
0622 }
0623 }
0624
0625 static enum slb_index alloc_slb_index(bool kernel)
0626 {
0627 enum slb_index index;
0628
0629
0630
0631
0632
0633
0634
0635
0636
0637
0638
0639
0640
0641
0642
0643
0644
0645 if (local_paca->slb_used_bitmap != U32_MAX) {
0646 index = ffz(local_paca->slb_used_bitmap);
0647 local_paca->slb_used_bitmap |= 1U << index;
0648 if (kernel)
0649 local_paca->slb_kern_bitmap |= 1U << index;
0650 } else {
0651
0652 index = local_paca->stab_rr;
0653 if (index < (mmu_slb_size - 1))
0654 index++;
0655 else
0656 index = SLB_NUM_BOLTED;
0657 local_paca->stab_rr = index;
0658 if (index < 32) {
0659 if (kernel)
0660 local_paca->slb_kern_bitmap |= 1U << index;
0661 else
0662 local_paca->slb_kern_bitmap &= ~(1U << index);
0663 }
0664 }
0665 BUG_ON(index < SLB_NUM_BOLTED);
0666
0667 return index;
0668 }
0669
0670 static long slb_insert_entry(unsigned long ea, unsigned long context,
0671 unsigned long flags, int ssize, bool kernel)
0672 {
0673 unsigned long vsid;
0674 unsigned long vsid_data, esid_data;
0675 enum slb_index index;
0676
0677 vsid = get_vsid(context, ea, ssize);
0678 if (!vsid)
0679 return -EFAULT;
0680
0681
0682
0683
0684
0685
0686
0687
0688
0689
0690 barrier();
0691
0692 index = alloc_slb_index(kernel);
0693
0694 vsid_data = __mk_vsid_data(vsid, ssize, flags);
0695 esid_data = mk_esid_data(ea, ssize, index);
0696
0697
0698
0699
0700
0701
0702
0703 assert_slb_presence(false, ea);
0704 if (stress_slb()) {
0705 int slb_cache_index = local_paca->slb_cache_ptr;
0706
0707
0708
0709
0710
0711
0712
0713 BUILD_BUG_ON(SLB_CACHE_ENTRIES < 3);
0714 if (!kernel || slb_cache_index == 3) {
0715 int i;
0716
0717 for (i = 0; i < slb_cache_index; i++)
0718 slb_cache_slbie_kernel(i);
0719 slb_cache_index = 0;
0720 }
0721
0722 if (kernel)
0723 local_paca->slb_cache[slb_cache_index++] = esid_data >> SID_SHIFT;
0724 local_paca->slb_cache_ptr = slb_cache_index;
0725 }
0726 asm volatile("slbmte %0, %1" : : "r" (vsid_data), "r" (esid_data));
0727
0728 barrier();
0729
0730 if (!kernel)
0731 slb_cache_update(esid_data);
0732
0733 return 0;
0734 }
0735
0736 static long slb_allocate_kernel(unsigned long ea, unsigned long id)
0737 {
0738 unsigned long context;
0739 unsigned long flags;
0740 int ssize;
0741
0742 if (id == LINEAR_MAP_REGION_ID) {
0743
0744
0745 if ((ea & EA_MASK) > (1UL << H_MAX_PHYSMEM_BITS))
0746 return -EFAULT;
0747
0748 flags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_linear_psize].sllp;
0749
0750 #ifdef CONFIG_SPARSEMEM_VMEMMAP
0751 } else if (id == VMEMMAP_REGION_ID) {
0752
0753 if (ea >= H_VMEMMAP_END)
0754 return -EFAULT;
0755
0756 flags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmemmap_psize].sllp;
0757 #endif
0758 } else if (id == VMALLOC_REGION_ID) {
0759
0760 if (ea >= H_VMALLOC_END)
0761 return -EFAULT;
0762
0763 flags = local_paca->vmalloc_sllp;
0764
0765 } else if (id == IO_REGION_ID) {
0766
0767 if (ea >= H_KERN_IO_END)
0768 return -EFAULT;
0769
0770 flags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_io_psize].sllp;
0771
0772 } else {
0773 return -EFAULT;
0774 }
0775
0776 ssize = MMU_SEGSIZE_1T;
0777 if (!mmu_has_feature(MMU_FTR_1T_SEGMENT))
0778 ssize = MMU_SEGSIZE_256M;
0779
0780 context = get_kernel_context(ea);
0781
0782 return slb_insert_entry(ea, context, flags, ssize, true);
0783 }
0784
0785 static long slb_allocate_user(struct mm_struct *mm, unsigned long ea)
0786 {
0787 unsigned long context;
0788 unsigned long flags;
0789 int bpsize;
0790 int ssize;
0791
0792
0793
0794
0795
0796 if (ea >= mm_ctx_slb_addr_limit(&mm->context))
0797 return -EFAULT;
0798
0799 context = get_user_context(&mm->context, ea);
0800 if (!context)
0801 return -EFAULT;
0802
0803 if (unlikely(ea >= H_PGTABLE_RANGE)) {
0804 WARN_ON(1);
0805 return -EFAULT;
0806 }
0807
0808 ssize = user_segment_size(ea);
0809
0810 bpsize = get_slice_psize(mm, ea);
0811 flags = SLB_VSID_USER | mmu_psize_defs[bpsize].sllp;
0812
0813 return slb_insert_entry(ea, context, flags, ssize, false);
0814 }
0815
0816 DEFINE_INTERRUPT_HANDLER_RAW(do_slb_fault)
0817 {
0818 unsigned long ea = regs->dar;
0819 unsigned long id = get_region_id(ea);
0820
0821
0822 VM_WARN_ON(mfmsr() & MSR_EE);
0823
0824 if (regs_is_unrecoverable(regs))
0825 return -EINVAL;
0826
0827
0828
0829
0830
0831
0832
0833
0834
0835
0836
0837
0838
0839
0840
0841
0842
0843
0844 if (id >= LINEAR_MAP_REGION_ID) {
0845 long err;
0846 #ifdef CONFIG_DEBUG_VM
0847
0848 BUG_ON(local_paca->in_kernel_slb_handler);
0849 local_paca->in_kernel_slb_handler = 1;
0850 #endif
0851 err = slb_allocate_kernel(ea, id);
0852 #ifdef CONFIG_DEBUG_VM
0853 local_paca->in_kernel_slb_handler = 0;
0854 #endif
0855 return err;
0856 } else {
0857 struct mm_struct *mm = current->mm;
0858 long err;
0859
0860 if (unlikely(!mm))
0861 return -EFAULT;
0862
0863 err = slb_allocate_user(mm, ea);
0864 if (!err)
0865 preload_add(current_thread_info(), ea);
0866
0867 return err;
0868 }
0869 }