0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089 #include <linux/pagemap.h>
0090 #include <linux/migrate.h>
0091 #include <linux/kvm_host.h>
0092 #include <linux/ksm.h>
0093 #include <linux/of.h>
0094 #include <linux/memremap.h>
0095 #include <asm/ultravisor.h>
0096 #include <asm/mman.h>
0097 #include <asm/kvm_ppc.h>
0098 #include <asm/kvm_book3s_uvmem.h>
0099
0100 static struct dev_pagemap kvmppc_uvmem_pgmap;
0101 static unsigned long *kvmppc_uvmem_bitmap;
0102 static DEFINE_SPINLOCK(kvmppc_uvmem_bitmap_lock);
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187
0188
0189
0190
0191
0192
0193
0194
0195
0196
0197
0198
0199
0200
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210
0211
0212
0213
0214
0215
0216
0217
0218
0219 #define KVMPPC_GFN_UVMEM_PFN (1UL << 63)
0220 #define KVMPPC_GFN_MEM_PFN (1UL << 62)
0221 #define KVMPPC_GFN_SHARED (1UL << 61)
0222 #define KVMPPC_GFN_SECURE (KVMPPC_GFN_UVMEM_PFN | KVMPPC_GFN_MEM_PFN)
0223 #define KVMPPC_GFN_FLAG_MASK (KVMPPC_GFN_SECURE | KVMPPC_GFN_SHARED)
0224 #define KVMPPC_GFN_PFN_MASK (~KVMPPC_GFN_FLAG_MASK)
0225
0226 struct kvmppc_uvmem_slot {
0227 struct list_head list;
0228 unsigned long nr_pfns;
0229 unsigned long base_pfn;
0230 unsigned long *pfns;
0231 };
0232 struct kvmppc_uvmem_page_pvt {
0233 struct kvm *kvm;
0234 unsigned long gpa;
0235 bool skip_page_out;
0236 bool remove_gfn;
0237 };
0238
0239 bool kvmppc_uvmem_available(void)
0240 {
0241
0242
0243
0244
0245 return !!kvmppc_uvmem_bitmap;
0246 }
0247
0248 int kvmppc_uvmem_slot_init(struct kvm *kvm, const struct kvm_memory_slot *slot)
0249 {
0250 struct kvmppc_uvmem_slot *p;
0251
0252 p = kzalloc(sizeof(*p), GFP_KERNEL);
0253 if (!p)
0254 return -ENOMEM;
0255 p->pfns = vcalloc(slot->npages, sizeof(*p->pfns));
0256 if (!p->pfns) {
0257 kfree(p);
0258 return -ENOMEM;
0259 }
0260 p->nr_pfns = slot->npages;
0261 p->base_pfn = slot->base_gfn;
0262
0263 mutex_lock(&kvm->arch.uvmem_lock);
0264 list_add(&p->list, &kvm->arch.uvmem_pfns);
0265 mutex_unlock(&kvm->arch.uvmem_lock);
0266
0267 return 0;
0268 }
0269
0270
0271
0272
0273 void kvmppc_uvmem_slot_free(struct kvm *kvm, const struct kvm_memory_slot *slot)
0274 {
0275 struct kvmppc_uvmem_slot *p, *next;
0276
0277 mutex_lock(&kvm->arch.uvmem_lock);
0278 list_for_each_entry_safe(p, next, &kvm->arch.uvmem_pfns, list) {
0279 if (p->base_pfn == slot->base_gfn) {
0280 vfree(p->pfns);
0281 list_del(&p->list);
0282 kfree(p);
0283 break;
0284 }
0285 }
0286 mutex_unlock(&kvm->arch.uvmem_lock);
0287 }
0288
0289 static void kvmppc_mark_gfn(unsigned long gfn, struct kvm *kvm,
0290 unsigned long flag, unsigned long uvmem_pfn)
0291 {
0292 struct kvmppc_uvmem_slot *p;
0293
0294 list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) {
0295 if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) {
0296 unsigned long index = gfn - p->base_pfn;
0297
0298 if (flag == KVMPPC_GFN_UVMEM_PFN)
0299 p->pfns[index] = uvmem_pfn | flag;
0300 else
0301 p->pfns[index] = flag;
0302 return;
0303 }
0304 }
0305 }
0306
0307
0308 static void kvmppc_gfn_secure_uvmem_pfn(unsigned long gfn,
0309 unsigned long uvmem_pfn, struct kvm *kvm)
0310 {
0311 kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_UVMEM_PFN, uvmem_pfn);
0312 }
0313
0314
0315 static void kvmppc_gfn_secure_mem_pfn(unsigned long gfn, struct kvm *kvm)
0316 {
0317 kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_MEM_PFN, 0);
0318 }
0319
0320
0321 static void kvmppc_gfn_shared(unsigned long gfn, struct kvm *kvm)
0322 {
0323 kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_SHARED, 0);
0324 }
0325
0326
0327 static void kvmppc_gfn_remove(unsigned long gfn, struct kvm *kvm)
0328 {
0329 kvmppc_mark_gfn(gfn, kvm, 0, 0);
0330 }
0331
0332
0333 static bool kvmppc_gfn_is_uvmem_pfn(unsigned long gfn, struct kvm *kvm,
0334 unsigned long *uvmem_pfn)
0335 {
0336 struct kvmppc_uvmem_slot *p;
0337
0338 list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) {
0339 if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) {
0340 unsigned long index = gfn - p->base_pfn;
0341
0342 if (p->pfns[index] & KVMPPC_GFN_UVMEM_PFN) {
0343 if (uvmem_pfn)
0344 *uvmem_pfn = p->pfns[index] &
0345 KVMPPC_GFN_PFN_MASK;
0346 return true;
0347 } else
0348 return false;
0349 }
0350 }
0351 return false;
0352 }
0353
0354
0355
0356
0357
0358
0359
0360
0361 static bool kvmppc_next_nontransitioned_gfn(const struct kvm_memory_slot *memslot,
0362 struct kvm *kvm, unsigned long *gfn)
0363 {
0364 struct kvmppc_uvmem_slot *p = NULL, *iter;
0365 bool ret = false;
0366 unsigned long i;
0367
0368 list_for_each_entry(iter, &kvm->arch.uvmem_pfns, list)
0369 if (*gfn >= iter->base_pfn && *gfn < iter->base_pfn + iter->nr_pfns) {
0370 p = iter;
0371 break;
0372 }
0373 if (!p)
0374 return ret;
0375
0376
0377
0378
0379 for (i = *gfn; i < p->base_pfn + p->nr_pfns; i++) {
0380 unsigned long index = i - p->base_pfn;
0381
0382 if (!(p->pfns[index] & KVMPPC_GFN_FLAG_MASK)) {
0383 *gfn = i;
0384 ret = true;
0385 break;
0386 }
0387 }
0388 return ret;
0389 }
0390
0391 static int kvmppc_memslot_page_merge(struct kvm *kvm,
0392 const struct kvm_memory_slot *memslot, bool merge)
0393 {
0394 unsigned long gfn = memslot->base_gfn;
0395 unsigned long end, start = gfn_to_hva(kvm, gfn);
0396 int ret = 0;
0397 struct vm_area_struct *vma;
0398 int merge_flag = (merge) ? MADV_MERGEABLE : MADV_UNMERGEABLE;
0399
0400 if (kvm_is_error_hva(start))
0401 return H_STATE;
0402
0403 end = start + (memslot->npages << PAGE_SHIFT);
0404
0405 mmap_write_lock(kvm->mm);
0406 do {
0407 vma = find_vma_intersection(kvm->mm, start, end);
0408 if (!vma) {
0409 ret = H_STATE;
0410 break;
0411 }
0412 ret = ksm_madvise(vma, vma->vm_start, vma->vm_end,
0413 merge_flag, &vma->vm_flags);
0414 if (ret) {
0415 ret = H_STATE;
0416 break;
0417 }
0418 start = vma->vm_end;
0419 } while (end > vma->vm_end);
0420
0421 mmap_write_unlock(kvm->mm);
0422 return ret;
0423 }
0424
0425 static void __kvmppc_uvmem_memslot_delete(struct kvm *kvm,
0426 const struct kvm_memory_slot *memslot)
0427 {
0428 uv_unregister_mem_slot(kvm->arch.lpid, memslot->id);
0429 kvmppc_uvmem_slot_free(kvm, memslot);
0430 kvmppc_memslot_page_merge(kvm, memslot, true);
0431 }
0432
0433 static int __kvmppc_uvmem_memslot_create(struct kvm *kvm,
0434 const struct kvm_memory_slot *memslot)
0435 {
0436 int ret = H_PARAMETER;
0437
0438 if (kvmppc_memslot_page_merge(kvm, memslot, false))
0439 return ret;
0440
0441 if (kvmppc_uvmem_slot_init(kvm, memslot))
0442 goto out1;
0443
0444 ret = uv_register_mem_slot(kvm->arch.lpid,
0445 memslot->base_gfn << PAGE_SHIFT,
0446 memslot->npages * PAGE_SIZE,
0447 0, memslot->id);
0448 if (ret < 0) {
0449 ret = H_PARAMETER;
0450 goto out;
0451 }
0452 return 0;
0453 out:
0454 kvmppc_uvmem_slot_free(kvm, memslot);
0455 out1:
0456 kvmppc_memslot_page_merge(kvm, memslot, true);
0457 return ret;
0458 }
0459
0460 unsigned long kvmppc_h_svm_init_start(struct kvm *kvm)
0461 {
0462 struct kvm_memslots *slots;
0463 struct kvm_memory_slot *memslot, *m;
0464 int ret = H_SUCCESS;
0465 int srcu_idx, bkt;
0466
0467 kvm->arch.secure_guest = KVMPPC_SECURE_INIT_START;
0468
0469 if (!kvmppc_uvmem_bitmap)
0470 return H_UNSUPPORTED;
0471
0472
0473 if (!kvm_is_radix(kvm))
0474 return H_UNSUPPORTED;
0475
0476
0477 if (!kvm->arch.svm_enabled)
0478 return H_AUTHORITY;
0479
0480 srcu_idx = srcu_read_lock(&kvm->srcu);
0481
0482
0483 slots = kvm_memslots(kvm);
0484 kvm_for_each_memslot(memslot, bkt, slots) {
0485 ret = __kvmppc_uvmem_memslot_create(kvm, memslot);
0486 if (ret)
0487 break;
0488 }
0489
0490 if (ret) {
0491 slots = kvm_memslots(kvm);
0492 kvm_for_each_memslot(m, bkt, slots) {
0493 if (m == memslot)
0494 break;
0495 __kvmppc_uvmem_memslot_delete(kvm, memslot);
0496 }
0497 }
0498
0499 srcu_read_unlock(&kvm->srcu, srcu_idx);
0500 return ret;
0501 }
0502
0503
0504
0505
0506
0507
0508 static int __kvmppc_svm_page_out(struct vm_area_struct *vma,
0509 unsigned long start,
0510 unsigned long end, unsigned long page_shift,
0511 struct kvm *kvm, unsigned long gpa)
0512 {
0513 unsigned long src_pfn, dst_pfn = 0;
0514 struct migrate_vma mig;
0515 struct page *dpage, *spage;
0516 struct kvmppc_uvmem_page_pvt *pvt;
0517 unsigned long pfn;
0518 int ret = U_SUCCESS;
0519
0520 memset(&mig, 0, sizeof(mig));
0521 mig.vma = vma;
0522 mig.start = start;
0523 mig.end = end;
0524 mig.src = &src_pfn;
0525 mig.dst = &dst_pfn;
0526 mig.pgmap_owner = &kvmppc_uvmem_pgmap;
0527 mig.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
0528
0529
0530 if (!kvmppc_gfn_is_uvmem_pfn(gpa >> page_shift, kvm, NULL))
0531 return ret;
0532
0533 ret = migrate_vma_setup(&mig);
0534 if (ret)
0535 return -1;
0536
0537 spage = migrate_pfn_to_page(*mig.src);
0538 if (!spage || !(*mig.src & MIGRATE_PFN_MIGRATE))
0539 goto out_finalize;
0540
0541 if (!is_zone_device_page(spage))
0542 goto out_finalize;
0543
0544 dpage = alloc_page_vma(GFP_HIGHUSER, vma, start);
0545 if (!dpage) {
0546 ret = -1;
0547 goto out_finalize;
0548 }
0549
0550 lock_page(dpage);
0551 pvt = spage->zone_device_data;
0552 pfn = page_to_pfn(dpage);
0553
0554
0555
0556
0557
0558
0559
0560
0561 if (!pvt->skip_page_out)
0562 ret = uv_page_out(kvm->arch.lpid, pfn << page_shift,
0563 gpa, 0, page_shift);
0564
0565 if (ret == U_SUCCESS)
0566 *mig.dst = migrate_pfn(pfn);
0567 else {
0568 unlock_page(dpage);
0569 __free_page(dpage);
0570 goto out_finalize;
0571 }
0572
0573 migrate_vma_pages(&mig);
0574
0575 out_finalize:
0576 migrate_vma_finalize(&mig);
0577 return ret;
0578 }
0579
0580 static inline int kvmppc_svm_page_out(struct vm_area_struct *vma,
0581 unsigned long start, unsigned long end,
0582 unsigned long page_shift,
0583 struct kvm *kvm, unsigned long gpa)
0584 {
0585 int ret;
0586
0587 mutex_lock(&kvm->arch.uvmem_lock);
0588 ret = __kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa);
0589 mutex_unlock(&kvm->arch.uvmem_lock);
0590
0591 return ret;
0592 }
0593
0594
0595
0596
0597
0598
0599
0600
0601
0602 void kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *slot,
0603 struct kvm *kvm, bool skip_page_out)
0604 {
0605 int i;
0606 struct kvmppc_uvmem_page_pvt *pvt;
0607 struct page *uvmem_page;
0608 struct vm_area_struct *vma = NULL;
0609 unsigned long uvmem_pfn, gfn;
0610 unsigned long addr;
0611
0612 mmap_read_lock(kvm->mm);
0613
0614 addr = slot->userspace_addr;
0615
0616 gfn = slot->base_gfn;
0617 for (i = slot->npages; i; --i, ++gfn, addr += PAGE_SIZE) {
0618
0619
0620 if (!vma || addr >= vma->vm_end) {
0621 vma = vma_lookup(kvm->mm, addr);
0622 if (!vma) {
0623 pr_err("Can't find VMA for gfn:0x%lx\n", gfn);
0624 break;
0625 }
0626 }
0627
0628 mutex_lock(&kvm->arch.uvmem_lock);
0629
0630 if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) {
0631 uvmem_page = pfn_to_page(uvmem_pfn);
0632 pvt = uvmem_page->zone_device_data;
0633 pvt->skip_page_out = skip_page_out;
0634 pvt->remove_gfn = true;
0635
0636 if (__kvmppc_svm_page_out(vma, addr, addr + PAGE_SIZE,
0637 PAGE_SHIFT, kvm, pvt->gpa))
0638 pr_err("Can't page out gpa:0x%lx addr:0x%lx\n",
0639 pvt->gpa, addr);
0640 } else {
0641
0642 kvmppc_gfn_remove(gfn, kvm);
0643 }
0644
0645 mutex_unlock(&kvm->arch.uvmem_lock);
0646 }
0647
0648 mmap_read_unlock(kvm->mm);
0649 }
0650
0651 unsigned long kvmppc_h_svm_init_abort(struct kvm *kvm)
0652 {
0653 int srcu_idx, bkt;
0654 struct kvm_memory_slot *memslot;
0655
0656
0657
0658
0659
0660 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
0661 return H_UNSUPPORTED;
0662
0663 if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
0664 return H_STATE;
0665
0666 srcu_idx = srcu_read_lock(&kvm->srcu);
0667
0668 kvm_for_each_memslot(memslot, bkt, kvm_memslots(kvm))
0669 kvmppc_uvmem_drop_pages(memslot, kvm, false);
0670
0671 srcu_read_unlock(&kvm->srcu, srcu_idx);
0672
0673 kvm->arch.secure_guest = 0;
0674 uv_svm_terminate(kvm->arch.lpid);
0675
0676 return H_PARAMETER;
0677 }
0678
0679
0680
0681
0682
0683
0684
0685
0686
0687 static struct page *kvmppc_uvmem_get_page(unsigned long gpa, struct kvm *kvm)
0688 {
0689 struct page *dpage = NULL;
0690 unsigned long bit, uvmem_pfn;
0691 struct kvmppc_uvmem_page_pvt *pvt;
0692 unsigned long pfn_last, pfn_first;
0693
0694 pfn_first = kvmppc_uvmem_pgmap.range.start >> PAGE_SHIFT;
0695 pfn_last = pfn_first +
0696 (range_len(&kvmppc_uvmem_pgmap.range) >> PAGE_SHIFT);
0697
0698 spin_lock(&kvmppc_uvmem_bitmap_lock);
0699 bit = find_first_zero_bit(kvmppc_uvmem_bitmap,
0700 pfn_last - pfn_first);
0701 if (bit >= (pfn_last - pfn_first))
0702 goto out;
0703 bitmap_set(kvmppc_uvmem_bitmap, bit, 1);
0704 spin_unlock(&kvmppc_uvmem_bitmap_lock);
0705
0706 pvt = kzalloc(sizeof(*pvt), GFP_KERNEL);
0707 if (!pvt)
0708 goto out_clear;
0709
0710 uvmem_pfn = bit + pfn_first;
0711 kvmppc_gfn_secure_uvmem_pfn(gpa >> PAGE_SHIFT, uvmem_pfn, kvm);
0712
0713 pvt->gpa = gpa;
0714 pvt->kvm = kvm;
0715
0716 dpage = pfn_to_page(uvmem_pfn);
0717 dpage->zone_device_data = pvt;
0718 lock_page(dpage);
0719 return dpage;
0720 out_clear:
0721 spin_lock(&kvmppc_uvmem_bitmap_lock);
0722 bitmap_clear(kvmppc_uvmem_bitmap, bit, 1);
0723 out:
0724 spin_unlock(&kvmppc_uvmem_bitmap_lock);
0725 return NULL;
0726 }
0727
0728
0729
0730
0731
0732 static int kvmppc_svm_page_in(struct vm_area_struct *vma,
0733 unsigned long start,
0734 unsigned long end, unsigned long gpa, struct kvm *kvm,
0735 unsigned long page_shift,
0736 bool pagein)
0737 {
0738 unsigned long src_pfn, dst_pfn = 0;
0739 struct migrate_vma mig;
0740 struct page *spage;
0741 unsigned long pfn;
0742 struct page *dpage;
0743 int ret = 0;
0744
0745 memset(&mig, 0, sizeof(mig));
0746 mig.vma = vma;
0747 mig.start = start;
0748 mig.end = end;
0749 mig.src = &src_pfn;
0750 mig.dst = &dst_pfn;
0751 mig.flags = MIGRATE_VMA_SELECT_SYSTEM;
0752
0753 ret = migrate_vma_setup(&mig);
0754 if (ret)
0755 return ret;
0756
0757 if (!(*mig.src & MIGRATE_PFN_MIGRATE)) {
0758 ret = -1;
0759 goto out_finalize;
0760 }
0761
0762 dpage = kvmppc_uvmem_get_page(gpa, kvm);
0763 if (!dpage) {
0764 ret = -1;
0765 goto out_finalize;
0766 }
0767
0768 if (pagein) {
0769 pfn = *mig.src >> MIGRATE_PFN_SHIFT;
0770 spage = migrate_pfn_to_page(*mig.src);
0771 if (spage) {
0772 ret = uv_page_in(kvm->arch.lpid, pfn << page_shift,
0773 gpa, 0, page_shift);
0774 if (ret)
0775 goto out_finalize;
0776 }
0777 }
0778
0779 *mig.dst = migrate_pfn(page_to_pfn(dpage));
0780 migrate_vma_pages(&mig);
0781 out_finalize:
0782 migrate_vma_finalize(&mig);
0783 return ret;
0784 }
0785
0786 static int kvmppc_uv_migrate_mem_slot(struct kvm *kvm,
0787 const struct kvm_memory_slot *memslot)
0788 {
0789 unsigned long gfn = memslot->base_gfn;
0790 struct vm_area_struct *vma;
0791 unsigned long start, end;
0792 int ret = 0;
0793
0794 mmap_read_lock(kvm->mm);
0795 mutex_lock(&kvm->arch.uvmem_lock);
0796 while (kvmppc_next_nontransitioned_gfn(memslot, kvm, &gfn)) {
0797 ret = H_STATE;
0798 start = gfn_to_hva(kvm, gfn);
0799 if (kvm_is_error_hva(start))
0800 break;
0801
0802 end = start + (1UL << PAGE_SHIFT);
0803 vma = find_vma_intersection(kvm->mm, start, end);
0804 if (!vma || vma->vm_start > start || vma->vm_end < end)
0805 break;
0806
0807 ret = kvmppc_svm_page_in(vma, start, end,
0808 (gfn << PAGE_SHIFT), kvm, PAGE_SHIFT, false);
0809 if (ret) {
0810 ret = H_STATE;
0811 break;
0812 }
0813
0814
0815 cond_resched();
0816 }
0817 mutex_unlock(&kvm->arch.uvmem_lock);
0818 mmap_read_unlock(kvm->mm);
0819 return ret;
0820 }
0821
0822 unsigned long kvmppc_h_svm_init_done(struct kvm *kvm)
0823 {
0824 struct kvm_memslots *slots;
0825 struct kvm_memory_slot *memslot;
0826 int srcu_idx, bkt;
0827 long ret = H_SUCCESS;
0828
0829 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
0830 return H_UNSUPPORTED;
0831
0832
0833 srcu_idx = srcu_read_lock(&kvm->srcu);
0834 slots = kvm_memslots(kvm);
0835 kvm_for_each_memslot(memslot, bkt, slots) {
0836 ret = kvmppc_uv_migrate_mem_slot(kvm, memslot);
0837 if (ret) {
0838
0839
0840
0841
0842
0843
0844
0845
0846
0847 ret = H_STATE;
0848 goto out;
0849 }
0850 }
0851
0852 kvm->arch.secure_guest |= KVMPPC_SECURE_INIT_DONE;
0853 pr_info("LPID %d went secure\n", kvm->arch.lpid);
0854
0855 out:
0856 srcu_read_unlock(&kvm->srcu, srcu_idx);
0857 return ret;
0858 }
0859
0860
0861
0862
0863
0864
0865
0866
0867
0868
0869 static unsigned long kvmppc_share_page(struct kvm *kvm, unsigned long gpa,
0870 unsigned long page_shift)
0871 {
0872
0873 int ret = H_PARAMETER;
0874 struct page *uvmem_page;
0875 struct kvmppc_uvmem_page_pvt *pvt;
0876 unsigned long pfn;
0877 unsigned long gfn = gpa >> page_shift;
0878 int srcu_idx;
0879 unsigned long uvmem_pfn;
0880
0881 srcu_idx = srcu_read_lock(&kvm->srcu);
0882 mutex_lock(&kvm->arch.uvmem_lock);
0883 if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) {
0884 uvmem_page = pfn_to_page(uvmem_pfn);
0885 pvt = uvmem_page->zone_device_data;
0886 pvt->skip_page_out = true;
0887
0888
0889
0890
0891 pvt->remove_gfn = false;
0892 }
0893
0894 retry:
0895 mutex_unlock(&kvm->arch.uvmem_lock);
0896 pfn = gfn_to_pfn(kvm, gfn);
0897 if (is_error_noslot_pfn(pfn))
0898 goto out;
0899
0900 mutex_lock(&kvm->arch.uvmem_lock);
0901 if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) {
0902 uvmem_page = pfn_to_page(uvmem_pfn);
0903 pvt = uvmem_page->zone_device_data;
0904 pvt->skip_page_out = true;
0905 pvt->remove_gfn = false;
0906 kvm_release_pfn_clean(pfn);
0907 goto retry;
0908 }
0909
0910 if (!uv_page_in(kvm->arch.lpid, pfn << page_shift, gpa, 0,
0911 page_shift)) {
0912 kvmppc_gfn_shared(gfn, kvm);
0913 ret = H_SUCCESS;
0914 }
0915 kvm_release_pfn_clean(pfn);
0916 mutex_unlock(&kvm->arch.uvmem_lock);
0917 out:
0918 srcu_read_unlock(&kvm->srcu, srcu_idx);
0919 return ret;
0920 }
0921
0922
0923
0924
0925
0926
0927
0928 unsigned long kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gpa,
0929 unsigned long flags,
0930 unsigned long page_shift)
0931 {
0932 unsigned long start, end;
0933 struct vm_area_struct *vma;
0934 int srcu_idx;
0935 unsigned long gfn = gpa >> page_shift;
0936 int ret;
0937
0938 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
0939 return H_UNSUPPORTED;
0940
0941 if (page_shift != PAGE_SHIFT)
0942 return H_P3;
0943
0944 if (flags & ~H_PAGE_IN_SHARED)
0945 return H_P2;
0946
0947 if (flags & H_PAGE_IN_SHARED)
0948 return kvmppc_share_page(kvm, gpa, page_shift);
0949
0950 ret = H_PARAMETER;
0951 srcu_idx = srcu_read_lock(&kvm->srcu);
0952 mmap_read_lock(kvm->mm);
0953
0954 start = gfn_to_hva(kvm, gfn);
0955 if (kvm_is_error_hva(start))
0956 goto out;
0957
0958 mutex_lock(&kvm->arch.uvmem_lock);
0959
0960 if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, NULL))
0961 goto out_unlock;
0962
0963 end = start + (1UL << page_shift);
0964 vma = find_vma_intersection(kvm->mm, start, end);
0965 if (!vma || vma->vm_start > start || vma->vm_end < end)
0966 goto out_unlock;
0967
0968 if (kvmppc_svm_page_in(vma, start, end, gpa, kvm, page_shift,
0969 true))
0970 goto out_unlock;
0971
0972 ret = H_SUCCESS;
0973
0974 out_unlock:
0975 mutex_unlock(&kvm->arch.uvmem_lock);
0976 out:
0977 mmap_read_unlock(kvm->mm);
0978 srcu_read_unlock(&kvm->srcu, srcu_idx);
0979 return ret;
0980 }
0981
0982
0983
0984
0985
0986
0987
0988
0989
0990
0991 static vm_fault_t kvmppc_uvmem_migrate_to_ram(struct vm_fault *vmf)
0992 {
0993 struct kvmppc_uvmem_page_pvt *pvt = vmf->page->zone_device_data;
0994
0995 if (kvmppc_svm_page_out(vmf->vma, vmf->address,
0996 vmf->address + PAGE_SIZE, PAGE_SHIFT,
0997 pvt->kvm, pvt->gpa))
0998 return VM_FAULT_SIGBUS;
0999 else
1000 return 0;
1001 }
1002
1003
1004
1005
1006
1007
1008
1009
1010 static void kvmppc_uvmem_page_free(struct page *page)
1011 {
1012 unsigned long pfn = page_to_pfn(page) -
1013 (kvmppc_uvmem_pgmap.range.start >> PAGE_SHIFT);
1014 struct kvmppc_uvmem_page_pvt *pvt;
1015
1016 spin_lock(&kvmppc_uvmem_bitmap_lock);
1017 bitmap_clear(kvmppc_uvmem_bitmap, pfn, 1);
1018 spin_unlock(&kvmppc_uvmem_bitmap_lock);
1019
1020 pvt = page->zone_device_data;
1021 page->zone_device_data = NULL;
1022 if (pvt->remove_gfn)
1023 kvmppc_gfn_remove(pvt->gpa >> PAGE_SHIFT, pvt->kvm);
1024 else
1025 kvmppc_gfn_secure_mem_pfn(pvt->gpa >> PAGE_SHIFT, pvt->kvm);
1026 kfree(pvt);
1027 }
1028
1029 static const struct dev_pagemap_ops kvmppc_uvmem_ops = {
1030 .page_free = kvmppc_uvmem_page_free,
1031 .migrate_to_ram = kvmppc_uvmem_migrate_to_ram,
1032 };
1033
1034
1035
1036
1037 unsigned long
1038 kvmppc_h_svm_page_out(struct kvm *kvm, unsigned long gpa,
1039 unsigned long flags, unsigned long page_shift)
1040 {
1041 unsigned long gfn = gpa >> page_shift;
1042 unsigned long start, end;
1043 struct vm_area_struct *vma;
1044 int srcu_idx;
1045 int ret;
1046
1047 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
1048 return H_UNSUPPORTED;
1049
1050 if (page_shift != PAGE_SHIFT)
1051 return H_P3;
1052
1053 if (flags)
1054 return H_P2;
1055
1056 ret = H_PARAMETER;
1057 srcu_idx = srcu_read_lock(&kvm->srcu);
1058 mmap_read_lock(kvm->mm);
1059 start = gfn_to_hva(kvm, gfn);
1060 if (kvm_is_error_hva(start))
1061 goto out;
1062
1063 end = start + (1UL << page_shift);
1064 vma = find_vma_intersection(kvm->mm, start, end);
1065 if (!vma || vma->vm_start > start || vma->vm_end < end)
1066 goto out;
1067
1068 if (!kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa))
1069 ret = H_SUCCESS;
1070 out:
1071 mmap_read_unlock(kvm->mm);
1072 srcu_read_unlock(&kvm->srcu, srcu_idx);
1073 return ret;
1074 }
1075
1076 int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn)
1077 {
1078 unsigned long pfn;
1079 int ret = U_SUCCESS;
1080
1081 pfn = gfn_to_pfn(kvm, gfn);
1082 if (is_error_noslot_pfn(pfn))
1083 return -EFAULT;
1084
1085 mutex_lock(&kvm->arch.uvmem_lock);
1086 if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, NULL))
1087 goto out;
1088
1089 ret = uv_page_in(kvm->arch.lpid, pfn << PAGE_SHIFT, gfn << PAGE_SHIFT,
1090 0, PAGE_SHIFT);
1091 out:
1092 kvm_release_pfn_clean(pfn);
1093 mutex_unlock(&kvm->arch.uvmem_lock);
1094 return (ret == U_SUCCESS) ? RESUME_GUEST : -EFAULT;
1095 }
1096
1097 int kvmppc_uvmem_memslot_create(struct kvm *kvm, const struct kvm_memory_slot *new)
1098 {
1099 int ret = __kvmppc_uvmem_memslot_create(kvm, new);
1100
1101 if (!ret)
1102 ret = kvmppc_uv_migrate_mem_slot(kvm, new);
1103
1104 return ret;
1105 }
1106
1107 void kvmppc_uvmem_memslot_delete(struct kvm *kvm, const struct kvm_memory_slot *old)
1108 {
1109 __kvmppc_uvmem_memslot_delete(kvm, old);
1110 }
1111
1112 static u64 kvmppc_get_secmem_size(void)
1113 {
1114 struct device_node *np;
1115 int i, len;
1116 const __be32 *prop;
1117 u64 size = 0;
1118
1119
1120
1121
1122
1123
1124 for_each_compatible_node(np, NULL, "ibm,secure-memory") {
1125 prop = of_get_property(np, "reg", &len);
1126 if (!prop)
1127 continue;
1128 size += of_read_number(prop + 2, 2);
1129 }
1130 if (size)
1131 return size;
1132
1133 np = of_find_compatible_node(NULL, NULL, "ibm,uv-firmware");
1134 if (!np)
1135 goto out;
1136
1137 prop = of_get_property(np, "secure-memory-ranges", &len);
1138 if (!prop)
1139 goto out_put;
1140
1141 for (i = 0; i < len / (sizeof(*prop) * 4); i++)
1142 size += of_read_number(prop + (i * 4) + 2, 2);
1143
1144 out_put:
1145 of_node_put(np);
1146 out:
1147 return size;
1148 }
1149
1150 int kvmppc_uvmem_init(void)
1151 {
1152 int ret = 0;
1153 unsigned long size;
1154 struct resource *res;
1155 void *addr;
1156 unsigned long pfn_last, pfn_first;
1157
1158 size = kvmppc_get_secmem_size();
1159 if (!size) {
1160
1161
1162
1163
1164
1165 pr_info("KVMPPC-UVMEM: No support for secure guests\n");
1166 goto out;
1167 }
1168
1169 res = request_free_mem_region(&iomem_resource, size, "kvmppc_uvmem");
1170 if (IS_ERR(res)) {
1171 ret = PTR_ERR(res);
1172 goto out;
1173 }
1174
1175 kvmppc_uvmem_pgmap.type = MEMORY_DEVICE_PRIVATE;
1176 kvmppc_uvmem_pgmap.range.start = res->start;
1177 kvmppc_uvmem_pgmap.range.end = res->end;
1178 kvmppc_uvmem_pgmap.nr_range = 1;
1179 kvmppc_uvmem_pgmap.ops = &kvmppc_uvmem_ops;
1180
1181 kvmppc_uvmem_pgmap.owner = &kvmppc_uvmem_pgmap;
1182 addr = memremap_pages(&kvmppc_uvmem_pgmap, NUMA_NO_NODE);
1183 if (IS_ERR(addr)) {
1184 ret = PTR_ERR(addr);
1185 goto out_free_region;
1186 }
1187
1188 pfn_first = res->start >> PAGE_SHIFT;
1189 pfn_last = pfn_first + (resource_size(res) >> PAGE_SHIFT);
1190 kvmppc_uvmem_bitmap = kcalloc(BITS_TO_LONGS(pfn_last - pfn_first),
1191 sizeof(unsigned long), GFP_KERNEL);
1192 if (!kvmppc_uvmem_bitmap) {
1193 ret = -ENOMEM;
1194 goto out_unmap;
1195 }
1196
1197 pr_info("KVMPPC-UVMEM: Secure Memory size 0x%lx\n", size);
1198 return ret;
1199 out_unmap:
1200 memunmap_pages(&kvmppc_uvmem_pgmap);
1201 out_free_region:
1202 release_mem_region(res->start, size);
1203 out:
1204 return ret;
1205 }
1206
1207 void kvmppc_uvmem_free(void)
1208 {
1209 if (!kvmppc_uvmem_bitmap)
1210 return;
1211
1212 memunmap_pages(&kvmppc_uvmem_pgmap);
1213 release_mem_region(kvmppc_uvmem_pgmap.range.start,
1214 range_len(&kvmppc_uvmem_pgmap.range));
1215 kfree(kvmppc_uvmem_bitmap);
1216 }