0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #undef DEBUG
0013
0014 #include <linux/kernel.h>
0015 #include <linux/mm.h>
0016 #include <linux/pagemap.h>
0017 #include <linux/err.h>
0018 #include <linux/spinlock.h>
0019 #include <linux/export.h>
0020 #include <linux/hugetlb.h>
0021 #include <linux/sched/mm.h>
0022 #include <linux/security.h>
0023 #include <asm/mman.h>
0024 #include <asm/mmu.h>
0025 #include <asm/copro.h>
0026 #include <asm/hugetlb.h>
0027 #include <asm/mmu_context.h>
0028
0029 static DEFINE_SPINLOCK(slice_convert_lock);
0030
0031 #ifdef DEBUG
0032 int _slice_debug = 1;
0033
0034 static void slice_print_mask(const char *label, const struct slice_mask *mask)
0035 {
0036 if (!_slice_debug)
0037 return;
0038 pr_devel("%s low_slice: %*pbl\n", label,
0039 (int)SLICE_NUM_LOW, &mask->low_slices);
0040 pr_devel("%s high_slice: %*pbl\n", label,
0041 (int)SLICE_NUM_HIGH, mask->high_slices);
0042 }
0043
0044 #define slice_dbg(fmt...) do { if (_slice_debug) pr_devel(fmt); } while (0)
0045
0046 #else
0047
0048 static void slice_print_mask(const char *label, const struct slice_mask *mask) {}
0049 #define slice_dbg(fmt...)
0050
0051 #endif
0052
0053 static inline notrace bool slice_addr_is_low(unsigned long addr)
0054 {
0055 u64 tmp = (u64)addr;
0056
0057 return tmp < SLICE_LOW_TOP;
0058 }
0059
0060 static void slice_range_to_mask(unsigned long start, unsigned long len,
0061 struct slice_mask *ret)
0062 {
0063 unsigned long end = start + len - 1;
0064
0065 ret->low_slices = 0;
0066 if (SLICE_NUM_HIGH)
0067 bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
0068
0069 if (slice_addr_is_low(start)) {
0070 unsigned long mend = min(end,
0071 (unsigned long)(SLICE_LOW_TOP - 1));
0072
0073 ret->low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1))
0074 - (1u << GET_LOW_SLICE_INDEX(start));
0075 }
0076
0077 if (SLICE_NUM_HIGH && !slice_addr_is_low(end)) {
0078 unsigned long start_index = GET_HIGH_SLICE_INDEX(start);
0079 unsigned long align_end = ALIGN(end, (1UL << SLICE_HIGH_SHIFT));
0080 unsigned long count = GET_HIGH_SLICE_INDEX(align_end) - start_index;
0081
0082 bitmap_set(ret->high_slices, start_index, count);
0083 }
0084 }
0085
0086 static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
0087 unsigned long len)
0088 {
0089 struct vm_area_struct *vma;
0090
0091 if ((mm_ctx_slb_addr_limit(&mm->context) - len) < addr)
0092 return 0;
0093 vma = find_vma(mm, addr);
0094 return (!vma || (addr + len) <= vm_start_gap(vma));
0095 }
0096
0097 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
0098 {
0099 return !slice_area_is_free(mm, slice << SLICE_LOW_SHIFT,
0100 1ul << SLICE_LOW_SHIFT);
0101 }
0102
0103 static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice)
0104 {
0105 unsigned long start = slice << SLICE_HIGH_SHIFT;
0106 unsigned long end = start + (1ul << SLICE_HIGH_SHIFT);
0107
0108
0109
0110
0111 if (start == 0)
0112 start = (unsigned long)SLICE_LOW_TOP;
0113
0114 return !slice_area_is_free(mm, start, end - start);
0115 }
0116
0117 static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret,
0118 unsigned long high_limit)
0119 {
0120 unsigned long i;
0121
0122 ret->low_slices = 0;
0123 if (SLICE_NUM_HIGH)
0124 bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
0125
0126 for (i = 0; i < SLICE_NUM_LOW; i++)
0127 if (!slice_low_has_vma(mm, i))
0128 ret->low_slices |= 1u << i;
0129
0130 if (slice_addr_is_low(high_limit - 1))
0131 return;
0132
0133 for (i = 0; i < GET_HIGH_SLICE_INDEX(high_limit); i++)
0134 if (!slice_high_has_vma(mm, i))
0135 __set_bit(i, ret->high_slices);
0136 }
0137
0138 static bool slice_check_range_fits(struct mm_struct *mm,
0139 const struct slice_mask *available,
0140 unsigned long start, unsigned long len)
0141 {
0142 unsigned long end = start + len - 1;
0143 u64 low_slices = 0;
0144
0145 if (slice_addr_is_low(start)) {
0146 unsigned long mend = min(end,
0147 (unsigned long)(SLICE_LOW_TOP - 1));
0148
0149 low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1))
0150 - (1u << GET_LOW_SLICE_INDEX(start));
0151 }
0152 if ((low_slices & available->low_slices) != low_slices)
0153 return false;
0154
0155 if (SLICE_NUM_HIGH && !slice_addr_is_low(end)) {
0156 unsigned long start_index = GET_HIGH_SLICE_INDEX(start);
0157 unsigned long align_end = ALIGN(end, (1UL << SLICE_HIGH_SHIFT));
0158 unsigned long count = GET_HIGH_SLICE_INDEX(align_end) - start_index;
0159 unsigned long i;
0160
0161 for (i = start_index; i < start_index + count; i++) {
0162 if (!test_bit(i, available->high_slices))
0163 return false;
0164 }
0165 }
0166
0167 return true;
0168 }
0169
0170 static void slice_flush_segments(void *parm)
0171 {
0172 #ifdef CONFIG_PPC64
0173 struct mm_struct *mm = parm;
0174 unsigned long flags;
0175
0176 if (mm != current->active_mm)
0177 return;
0178
0179 copy_mm_to_paca(current->active_mm);
0180
0181 local_irq_save(flags);
0182 slb_flush_and_restore_bolted();
0183 local_irq_restore(flags);
0184 #endif
0185 }
0186
0187 static void slice_convert(struct mm_struct *mm,
0188 const struct slice_mask *mask, int psize)
0189 {
0190 int index, mask_index;
0191
0192 unsigned char *hpsizes, *lpsizes;
0193 struct slice_mask *psize_mask, *old_mask;
0194 unsigned long i, flags;
0195 int old_psize;
0196
0197 slice_dbg("slice_convert(mm=%p, psize=%d)\n", mm, psize);
0198 slice_print_mask(" mask", mask);
0199
0200 psize_mask = slice_mask_for_size(&mm->context, psize);
0201
0202
0203
0204
0205 spin_lock_irqsave(&slice_convert_lock, flags);
0206
0207 lpsizes = mm_ctx_low_slices(&mm->context);
0208 for (i = 0; i < SLICE_NUM_LOW; i++) {
0209 if (!(mask->low_slices & (1u << i)))
0210 continue;
0211
0212 mask_index = i & 0x1;
0213 index = i >> 1;
0214
0215
0216 old_psize = (lpsizes[index] >> (mask_index * 4)) & 0xf;
0217 old_mask = slice_mask_for_size(&mm->context, old_psize);
0218 old_mask->low_slices &= ~(1u << i);
0219 psize_mask->low_slices |= 1u << i;
0220
0221
0222 lpsizes[index] = (lpsizes[index] & ~(0xf << (mask_index * 4))) |
0223 (((unsigned long)psize) << (mask_index * 4));
0224 }
0225
0226 hpsizes = mm_ctx_high_slices(&mm->context);
0227 for (i = 0; i < GET_HIGH_SLICE_INDEX(mm_ctx_slb_addr_limit(&mm->context)); i++) {
0228 if (!test_bit(i, mask->high_slices))
0229 continue;
0230
0231 mask_index = i & 0x1;
0232 index = i >> 1;
0233
0234
0235 old_psize = (hpsizes[index] >> (mask_index * 4)) & 0xf;
0236 old_mask = slice_mask_for_size(&mm->context, old_psize);
0237 __clear_bit(i, old_mask->high_slices);
0238 __set_bit(i, psize_mask->high_slices);
0239
0240
0241 hpsizes[index] = (hpsizes[index] & ~(0xf << (mask_index * 4))) |
0242 (((unsigned long)psize) << (mask_index * 4));
0243 }
0244
0245 slice_dbg(" lsps=%lx, hsps=%lx\n",
0246 (unsigned long)mm_ctx_low_slices(&mm->context),
0247 (unsigned long)mm_ctx_high_slices(&mm->context));
0248
0249 spin_unlock_irqrestore(&slice_convert_lock, flags);
0250
0251 copro_flush_all_slbs(mm);
0252 }
0253
0254
0255
0256
0257
0258
0259
0260
0261 static bool slice_scan_available(unsigned long addr,
0262 const struct slice_mask *available,
0263 int end, unsigned long *boundary_addr)
0264 {
0265 unsigned long slice;
0266 if (slice_addr_is_low(addr)) {
0267 slice = GET_LOW_SLICE_INDEX(addr);
0268 *boundary_addr = (slice + end) << SLICE_LOW_SHIFT;
0269 return !!(available->low_slices & (1u << slice));
0270 } else {
0271 slice = GET_HIGH_SLICE_INDEX(addr);
0272 *boundary_addr = (slice + end) ?
0273 ((slice + end) << SLICE_HIGH_SHIFT) : SLICE_LOW_TOP;
0274 return !!test_bit(slice, available->high_slices);
0275 }
0276 }
0277
0278 static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
0279 unsigned long addr, unsigned long len,
0280 const struct slice_mask *available,
0281 int psize, unsigned long high_limit)
0282 {
0283 int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
0284 unsigned long found, next_end;
0285 struct vm_unmapped_area_info info;
0286
0287 info.flags = 0;
0288 info.length = len;
0289 info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
0290 info.align_offset = 0;
0291
0292
0293
0294 while (addr < high_limit) {
0295 info.low_limit = addr;
0296 if (!slice_scan_available(addr, available, 1, &addr))
0297 continue;
0298
0299 next_slice:
0300
0301
0302
0303
0304
0305
0306 if (addr >= high_limit)
0307 addr = high_limit;
0308 else if (slice_scan_available(addr, available, 1, &next_end)) {
0309 addr = next_end;
0310 goto next_slice;
0311 }
0312 info.high_limit = addr;
0313
0314 found = vm_unmapped_area(&info);
0315 if (!(found & ~PAGE_MASK))
0316 return found;
0317 }
0318
0319 return -ENOMEM;
0320 }
0321
0322 static unsigned long slice_find_area_topdown(struct mm_struct *mm,
0323 unsigned long addr, unsigned long len,
0324 const struct slice_mask *available,
0325 int psize, unsigned long high_limit)
0326 {
0327 int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
0328 unsigned long found, prev;
0329 struct vm_unmapped_area_info info;
0330 unsigned long min_addr = max(PAGE_SIZE, mmap_min_addr);
0331
0332 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
0333 info.length = len;
0334 info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
0335 info.align_offset = 0;
0336
0337
0338
0339
0340
0341
0342 if (high_limit > DEFAULT_MAP_WINDOW)
0343 addr += mm_ctx_slb_addr_limit(&mm->context) - DEFAULT_MAP_WINDOW;
0344
0345 while (addr > min_addr) {
0346 info.high_limit = addr;
0347 if (!slice_scan_available(addr - 1, available, 0, &addr))
0348 continue;
0349
0350 prev_slice:
0351
0352
0353
0354
0355
0356
0357 if (addr < min_addr)
0358 addr = min_addr;
0359 else if (slice_scan_available(addr - 1, available, 0, &prev)) {
0360 addr = prev;
0361 goto prev_slice;
0362 }
0363 info.low_limit = addr;
0364
0365 found = vm_unmapped_area(&info);
0366 if (!(found & ~PAGE_MASK))
0367 return found;
0368 }
0369
0370
0371
0372
0373
0374
0375
0376 return slice_find_area_bottomup(mm, TASK_UNMAPPED_BASE, len, available, psize, high_limit);
0377 }
0378
0379
0380 static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len,
0381 const struct slice_mask *mask, int psize,
0382 int topdown, unsigned long high_limit)
0383 {
0384 if (topdown)
0385 return slice_find_area_topdown(mm, mm->mmap_base, len, mask, psize, high_limit);
0386 else
0387 return slice_find_area_bottomup(mm, mm->mmap_base, len, mask, psize, high_limit);
0388 }
0389
0390 static inline void slice_copy_mask(struct slice_mask *dst,
0391 const struct slice_mask *src)
0392 {
0393 dst->low_slices = src->low_slices;
0394 if (!SLICE_NUM_HIGH)
0395 return;
0396 bitmap_copy(dst->high_slices, src->high_slices, SLICE_NUM_HIGH);
0397 }
0398
0399 static inline void slice_or_mask(struct slice_mask *dst,
0400 const struct slice_mask *src1,
0401 const struct slice_mask *src2)
0402 {
0403 dst->low_slices = src1->low_slices | src2->low_slices;
0404 if (!SLICE_NUM_HIGH)
0405 return;
0406 bitmap_or(dst->high_slices, src1->high_slices, src2->high_slices, SLICE_NUM_HIGH);
0407 }
0408
0409 static inline void slice_andnot_mask(struct slice_mask *dst,
0410 const struct slice_mask *src1,
0411 const struct slice_mask *src2)
0412 {
0413 dst->low_slices = src1->low_slices & ~src2->low_slices;
0414 if (!SLICE_NUM_HIGH)
0415 return;
0416 bitmap_andnot(dst->high_slices, src1->high_slices, src2->high_slices, SLICE_NUM_HIGH);
0417 }
0418
0419 #ifdef CONFIG_PPC_64K_PAGES
0420 #define MMU_PAGE_BASE MMU_PAGE_64K
0421 #else
0422 #define MMU_PAGE_BASE MMU_PAGE_4K
0423 #endif
0424
0425 unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
0426 unsigned long flags, unsigned int psize,
0427 int topdown)
0428 {
0429 struct slice_mask good_mask;
0430 struct slice_mask potential_mask;
0431 const struct slice_mask *maskp;
0432 const struct slice_mask *compat_maskp = NULL;
0433 int fixed = (flags & MAP_FIXED);
0434 int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
0435 unsigned long page_size = 1UL << pshift;
0436 struct mm_struct *mm = current->mm;
0437 unsigned long newaddr;
0438 unsigned long high_limit;
0439
0440 high_limit = DEFAULT_MAP_WINDOW;
0441 if (addr >= high_limit || (fixed && (addr + len > high_limit)))
0442 high_limit = TASK_SIZE;
0443
0444 if (len > high_limit)
0445 return -ENOMEM;
0446 if (len & (page_size - 1))
0447 return -EINVAL;
0448 if (fixed) {
0449 if (addr & (page_size - 1))
0450 return -EINVAL;
0451 if (addr > high_limit - len)
0452 return -ENOMEM;
0453 }
0454
0455 if (high_limit > mm_ctx_slb_addr_limit(&mm->context)) {
0456
0457
0458
0459
0460
0461 mm_ctx_set_slb_addr_limit(&mm->context, high_limit);
0462
0463 on_each_cpu(slice_flush_segments, mm, 1);
0464 }
0465
0466
0467 BUG_ON(mm->task_size == 0);
0468 BUG_ON(mm_ctx_slb_addr_limit(&mm->context) == 0);
0469 VM_BUG_ON(radix_enabled());
0470
0471 slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize);
0472 slice_dbg(" addr=%lx, len=%lx, flags=%lx, topdown=%d\n",
0473 addr, len, flags, topdown);
0474
0475
0476 if (!fixed && addr) {
0477 addr = ALIGN(addr, page_size);
0478 slice_dbg(" aligned addr=%lx\n", addr);
0479
0480 if (addr > high_limit - len || addr < mmap_min_addr ||
0481 !slice_area_is_free(mm, addr, len))
0482 addr = 0;
0483 }
0484
0485
0486
0487
0488 maskp = slice_mask_for_size(&mm->context, psize);
0489
0490
0491
0492
0493
0494
0495
0496
0497
0498
0499
0500
0501
0502
0503
0504
0505
0506
0507
0508
0509
0510
0511
0512
0513
0514 if (IS_ENABLED(CONFIG_PPC_64K_PAGES) && psize == MMU_PAGE_64K) {
0515 compat_maskp = slice_mask_for_size(&mm->context, MMU_PAGE_4K);
0516 if (fixed)
0517 slice_or_mask(&good_mask, maskp, compat_maskp);
0518 else
0519 slice_copy_mask(&good_mask, maskp);
0520 } else {
0521 slice_copy_mask(&good_mask, maskp);
0522 }
0523
0524 slice_print_mask(" good_mask", &good_mask);
0525 if (compat_maskp)
0526 slice_print_mask(" compat_mask", compat_maskp);
0527
0528
0529 if (addr != 0 || fixed) {
0530
0531
0532
0533 if (slice_check_range_fits(mm, &good_mask, addr, len)) {
0534 slice_dbg(" fits good !\n");
0535 newaddr = addr;
0536 goto return_addr;
0537 }
0538 } else {
0539
0540
0541
0542 newaddr = slice_find_area(mm, len, &good_mask,
0543 psize, topdown, high_limit);
0544 if (newaddr != -ENOMEM) {
0545
0546
0547
0548 slice_dbg(" found area at 0x%lx\n", newaddr);
0549 goto return_addr;
0550 }
0551 }
0552
0553
0554
0555
0556 slice_mask_for_free(mm, &potential_mask, high_limit);
0557 slice_or_mask(&potential_mask, &potential_mask, &good_mask);
0558 slice_print_mask(" potential", &potential_mask);
0559
0560 if (addr != 0 || fixed) {
0561 if (slice_check_range_fits(mm, &potential_mask, addr, len)) {
0562 slice_dbg(" fits potential !\n");
0563 newaddr = addr;
0564 goto convert;
0565 }
0566 }
0567
0568
0569 if (fixed)
0570 return -EBUSY;
0571
0572 slice_dbg(" search...\n");
0573
0574
0575
0576
0577 if (addr) {
0578 newaddr = slice_find_area(mm, len, &good_mask,
0579 psize, topdown, high_limit);
0580 if (newaddr != -ENOMEM) {
0581 slice_dbg(" found area at 0x%lx\n", newaddr);
0582 goto return_addr;
0583 }
0584 }
0585
0586
0587
0588
0589 newaddr = slice_find_area(mm, len, &potential_mask,
0590 psize, topdown, high_limit);
0591
0592 if (IS_ENABLED(CONFIG_PPC_64K_PAGES) && newaddr == -ENOMEM &&
0593 psize == MMU_PAGE_64K) {
0594
0595 slice_or_mask(&potential_mask, &potential_mask, compat_maskp);
0596 newaddr = slice_find_area(mm, len, &potential_mask,
0597 psize, topdown, high_limit);
0598 }
0599
0600 if (newaddr == -ENOMEM)
0601 return -ENOMEM;
0602
0603 slice_range_to_mask(newaddr, len, &potential_mask);
0604 slice_dbg(" found potential area at 0x%lx\n", newaddr);
0605 slice_print_mask(" mask", &potential_mask);
0606
0607 convert:
0608
0609
0610
0611
0612 if (need_extra_context(mm, newaddr)) {
0613 if (alloc_extended_context(mm, newaddr) < 0)
0614 return -ENOMEM;
0615 }
0616
0617 slice_andnot_mask(&potential_mask, &potential_mask, &good_mask);
0618 if (compat_maskp && !fixed)
0619 slice_andnot_mask(&potential_mask, &potential_mask, compat_maskp);
0620 if (potential_mask.low_slices ||
0621 (SLICE_NUM_HIGH &&
0622 !bitmap_empty(potential_mask.high_slices, SLICE_NUM_HIGH))) {
0623 slice_convert(mm, &potential_mask, psize);
0624 if (psize > MMU_PAGE_BASE)
0625 on_each_cpu(slice_flush_segments, mm, 1);
0626 }
0627 return newaddr;
0628
0629 return_addr:
0630 if (need_extra_context(mm, newaddr)) {
0631 if (alloc_extended_context(mm, newaddr) < 0)
0632 return -ENOMEM;
0633 }
0634 return newaddr;
0635 }
0636 EXPORT_SYMBOL_GPL(slice_get_unmapped_area);
0637
0638 unsigned long arch_get_unmapped_area(struct file *filp,
0639 unsigned long addr,
0640 unsigned long len,
0641 unsigned long pgoff,
0642 unsigned long flags)
0643 {
0644 if (radix_enabled())
0645 return generic_get_unmapped_area(filp, addr, len, pgoff, flags);
0646
0647 return slice_get_unmapped_area(addr, len, flags,
0648 mm_ctx_user_psize(¤t->mm->context), 0);
0649 }
0650
0651 unsigned long arch_get_unmapped_area_topdown(struct file *filp,
0652 const unsigned long addr0,
0653 const unsigned long len,
0654 const unsigned long pgoff,
0655 const unsigned long flags)
0656 {
0657 if (radix_enabled())
0658 return generic_get_unmapped_area_topdown(filp, addr0, len, pgoff, flags);
0659
0660 return slice_get_unmapped_area(addr0, len, flags,
0661 mm_ctx_user_psize(¤t->mm->context), 1);
0662 }
0663
0664 unsigned int notrace get_slice_psize(struct mm_struct *mm, unsigned long addr)
0665 {
0666 unsigned char *psizes;
0667 int index, mask_index;
0668
0669 VM_BUG_ON(radix_enabled());
0670
0671 if (slice_addr_is_low(addr)) {
0672 psizes = mm_ctx_low_slices(&mm->context);
0673 index = GET_LOW_SLICE_INDEX(addr);
0674 } else {
0675 psizes = mm_ctx_high_slices(&mm->context);
0676 index = GET_HIGH_SLICE_INDEX(addr);
0677 }
0678 mask_index = index & 0x1;
0679 return (psizes[index >> 1] >> (mask_index * 4)) & 0xf;
0680 }
0681 EXPORT_SYMBOL_GPL(get_slice_psize);
0682
0683 void slice_init_new_context_exec(struct mm_struct *mm)
0684 {
0685 unsigned char *hpsizes, *lpsizes;
0686 struct slice_mask *mask;
0687 unsigned int psize = mmu_virtual_psize;
0688
0689 slice_dbg("slice_init_new_context_exec(mm=%p)\n", mm);
0690
0691
0692
0693
0694
0695
0696 mm_ctx_set_slb_addr_limit(&mm->context, SLB_ADDR_LIMIT_DEFAULT);
0697 mm_ctx_set_user_psize(&mm->context, psize);
0698
0699
0700
0701
0702 lpsizes = mm_ctx_low_slices(&mm->context);
0703 memset(lpsizes, (psize << 4) | psize, SLICE_NUM_LOW >> 1);
0704
0705 hpsizes = mm_ctx_high_slices(&mm->context);
0706 memset(hpsizes, (psize << 4) | psize, SLICE_NUM_HIGH >> 1);
0707
0708
0709
0710
0711 mask = slice_mask_for_size(&mm->context, psize);
0712 mask->low_slices = ~0UL;
0713 if (SLICE_NUM_HIGH)
0714 bitmap_fill(mask->high_slices, SLICE_NUM_HIGH);
0715 }
0716
0717 void slice_setup_new_exec(void)
0718 {
0719 struct mm_struct *mm = current->mm;
0720
0721 slice_dbg("slice_setup_new_exec(mm=%p)\n", mm);
0722
0723 if (!is_32bit_task())
0724 return;
0725
0726 mm_ctx_set_slb_addr_limit(&mm->context, DEFAULT_MAP_WINDOW);
0727 }
0728
0729 void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
0730 unsigned long len, unsigned int psize)
0731 {
0732 struct slice_mask mask;
0733
0734 VM_BUG_ON(radix_enabled());
0735
0736 slice_range_to_mask(start, len, &mask);
0737 slice_convert(mm, &mask, psize);
0738 }
0739
0740 #ifdef CONFIG_HUGETLB_PAGE
0741
0742
0743
0744
0745
0746
0747
0748
0749
0750
0751
0752
0753
0754
0755
0756
0757
0758
0759
0760 int slice_is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
0761 unsigned long len)
0762 {
0763 const struct slice_mask *maskp;
0764 unsigned int psize = mm_ctx_user_psize(&mm->context);
0765
0766 VM_BUG_ON(radix_enabled());
0767
0768 maskp = slice_mask_for_size(&mm->context, psize);
0769
0770
0771 if (IS_ENABLED(CONFIG_PPC_64K_PAGES) && psize == MMU_PAGE_64K) {
0772 const struct slice_mask *compat_maskp;
0773 struct slice_mask available;
0774
0775 compat_maskp = slice_mask_for_size(&mm->context, MMU_PAGE_4K);
0776 slice_or_mask(&available, maskp, compat_maskp);
0777 return !slice_check_range_fits(mm, &available, addr, len);
0778 }
0779
0780 return !slice_check_range_fits(mm, maskp, addr, len);
0781 }
0782
0783 unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
0784 {
0785
0786 if (radix_enabled())
0787 return vma_kernel_pagesize(vma);
0788
0789 return 1UL << mmu_psize_to_shift(get_slice_psize(vma->vm_mm, vma->vm_start));
0790 }
0791
0792 static int file_to_psize(struct file *file)
0793 {
0794 struct hstate *hstate = hstate_file(file);
0795 return shift_to_mmu_psize(huge_page_shift(hstate));
0796 }
0797
0798 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
0799 unsigned long len, unsigned long pgoff,
0800 unsigned long flags)
0801 {
0802 if (radix_enabled())
0803 return generic_hugetlb_get_unmapped_area(file, addr, len, pgoff, flags);
0804
0805 return slice_get_unmapped_area(addr, len, flags, file_to_psize(file), 1);
0806 }
0807 #endif