0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/types.h>
0010 #include <linux/string.h>
0011 #include <linux/kvm.h>
0012 #include <linux/kvm_host.h>
0013 #include <linux/highmem.h>
0014 #include <linux/gfp.h>
0015 #include <linux/slab.h>
0016 #include <linux/sched/signal.h>
0017 #include <linux/hugetlb.h>
0018 #include <linux/list.h>
0019 #include <linux/anon_inodes.h>
0020 #include <linux/iommu.h>
0021 #include <linux/file.h>
0022 #include <linux/mm.h>
0023
0024 #include <asm/kvm_ppc.h>
0025 #include <asm/kvm_book3s.h>
0026 #include <asm/book3s/64/mmu-hash.h>
0027 #include <asm/hvcall.h>
0028 #include <asm/synch.h>
0029 #include <asm/ppc-opcode.h>
0030 #include <asm/udbg.h>
0031 #include <asm/iommu.h>
0032 #include <asm/tce.h>
0033 #include <asm/mmu_context.h>
0034
0035 static struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm *kvm,
0036 unsigned long liobn)
0037 {
0038 struct kvmppc_spapr_tce_table *stt;
0039
0040 list_for_each_entry_lockless(stt, &kvm->arch.spapr_tce_tables, list)
0041 if (stt->liobn == liobn)
0042 return stt;
0043
0044 return NULL;
0045 }
0046
0047 static unsigned long kvmppc_tce_pages(unsigned long iommu_pages)
0048 {
0049 return ALIGN(iommu_pages * sizeof(u64), PAGE_SIZE) / PAGE_SIZE;
0050 }
0051
0052 static unsigned long kvmppc_stt_pages(unsigned long tce_pages)
0053 {
0054 unsigned long stt_bytes = sizeof(struct kvmppc_spapr_tce_table) +
0055 (tce_pages * sizeof(struct page *));
0056
0057 return tce_pages + ALIGN(stt_bytes, PAGE_SIZE) / PAGE_SIZE;
0058 }
0059
0060 static void kvm_spapr_tce_iommu_table_free(struct rcu_head *head)
0061 {
0062 struct kvmppc_spapr_tce_iommu_table *stit = container_of(head,
0063 struct kvmppc_spapr_tce_iommu_table, rcu);
0064
0065 iommu_tce_table_put(stit->tbl);
0066
0067 kfree(stit);
0068 }
0069
0070 static void kvm_spapr_tce_liobn_put(struct kref *kref)
0071 {
0072 struct kvmppc_spapr_tce_iommu_table *stit = container_of(kref,
0073 struct kvmppc_spapr_tce_iommu_table, kref);
0074
0075 list_del_rcu(&stit->next);
0076
0077 call_rcu(&stit->rcu, kvm_spapr_tce_iommu_table_free);
0078 }
0079
0080 extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
0081 struct iommu_group *grp)
0082 {
0083 int i;
0084 struct kvmppc_spapr_tce_table *stt;
0085 struct kvmppc_spapr_tce_iommu_table *stit, *tmp;
0086 struct iommu_table_group *table_group = NULL;
0087
0088 rcu_read_lock();
0089 list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
0090
0091 table_group = iommu_group_get_iommudata(grp);
0092 if (WARN_ON(!table_group))
0093 continue;
0094
0095 list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) {
0096 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
0097 if (table_group->tables[i] != stit->tbl)
0098 continue;
0099
0100 kref_put(&stit->kref, kvm_spapr_tce_liobn_put);
0101 }
0102 }
0103 cond_resched_rcu();
0104 }
0105 rcu_read_unlock();
0106 }
0107
0108 extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
0109 struct iommu_group *grp)
0110 {
0111 struct kvmppc_spapr_tce_table *stt = NULL;
0112 bool found = false;
0113 struct iommu_table *tbl = NULL;
0114 struct iommu_table_group *table_group;
0115 long i;
0116 struct kvmppc_spapr_tce_iommu_table *stit;
0117 struct fd f;
0118
0119 f = fdget(tablefd);
0120 if (!f.file)
0121 return -EBADF;
0122
0123 rcu_read_lock();
0124 list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
0125 if (stt == f.file->private_data) {
0126 found = true;
0127 break;
0128 }
0129 }
0130 rcu_read_unlock();
0131
0132 fdput(f);
0133
0134 if (!found)
0135 return -EINVAL;
0136
0137 table_group = iommu_group_get_iommudata(grp);
0138 if (WARN_ON(!table_group))
0139 return -EFAULT;
0140
0141 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
0142 struct iommu_table *tbltmp = table_group->tables[i];
0143
0144 if (!tbltmp)
0145 continue;
0146
0147 if ((tbltmp->it_page_shift <= stt->page_shift) &&
0148 (tbltmp->it_offset << tbltmp->it_page_shift ==
0149 stt->offset << stt->page_shift) &&
0150 (tbltmp->it_size << tbltmp->it_page_shift >=
0151 stt->size << stt->page_shift)) {
0152
0153
0154
0155
0156 tbl = iommu_tce_table_get(tbltmp);
0157 break;
0158 }
0159 }
0160 if (!tbl)
0161 return -EINVAL;
0162
0163 rcu_read_lock();
0164 list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
0165 if (tbl != stit->tbl)
0166 continue;
0167
0168 if (!kref_get_unless_zero(&stit->kref)) {
0169
0170 iommu_tce_table_put(tbl);
0171 rcu_read_unlock();
0172 return -ENOTTY;
0173 }
0174
0175
0176
0177
0178 rcu_read_unlock();
0179 return 0;
0180 }
0181 rcu_read_unlock();
0182
0183 stit = kzalloc(sizeof(*stit), GFP_KERNEL);
0184 if (!stit) {
0185 iommu_tce_table_put(tbl);
0186 return -ENOMEM;
0187 }
0188
0189 stit->tbl = tbl;
0190 kref_init(&stit->kref);
0191
0192 list_add_rcu(&stit->next, &stt->iommu_tables);
0193
0194 return 0;
0195 }
0196
0197 static void release_spapr_tce_table(struct rcu_head *head)
0198 {
0199 struct kvmppc_spapr_tce_table *stt = container_of(head,
0200 struct kvmppc_spapr_tce_table, rcu);
0201 unsigned long i, npages = kvmppc_tce_pages(stt->size);
0202
0203 for (i = 0; i < npages; i++)
0204 if (stt->pages[i])
0205 __free_page(stt->pages[i]);
0206
0207 kfree(stt);
0208 }
0209
0210 static struct page *kvm_spapr_get_tce_page(struct kvmppc_spapr_tce_table *stt,
0211 unsigned long sttpage)
0212 {
0213 struct page *page = stt->pages[sttpage];
0214
0215 if (page)
0216 return page;
0217
0218 mutex_lock(&stt->alloc_lock);
0219 page = stt->pages[sttpage];
0220 if (!page) {
0221 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
0222 WARN_ON_ONCE(!page);
0223 if (page)
0224 stt->pages[sttpage] = page;
0225 }
0226 mutex_unlock(&stt->alloc_lock);
0227
0228 return page;
0229 }
0230
0231 static vm_fault_t kvm_spapr_tce_fault(struct vm_fault *vmf)
0232 {
0233 struct kvmppc_spapr_tce_table *stt = vmf->vma->vm_file->private_data;
0234 struct page *page;
0235
0236 if (vmf->pgoff >= kvmppc_tce_pages(stt->size))
0237 return VM_FAULT_SIGBUS;
0238
0239 page = kvm_spapr_get_tce_page(stt, vmf->pgoff);
0240 if (!page)
0241 return VM_FAULT_OOM;
0242
0243 get_page(page);
0244 vmf->page = page;
0245 return 0;
0246 }
0247
0248 static const struct vm_operations_struct kvm_spapr_tce_vm_ops = {
0249 .fault = kvm_spapr_tce_fault,
0250 };
0251
0252 static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma)
0253 {
0254 vma->vm_ops = &kvm_spapr_tce_vm_ops;
0255 return 0;
0256 }
0257
0258 static int kvm_spapr_tce_release(struct inode *inode, struct file *filp)
0259 {
0260 struct kvmppc_spapr_tce_table *stt = filp->private_data;
0261 struct kvmppc_spapr_tce_iommu_table *stit, *tmp;
0262 struct kvm *kvm = stt->kvm;
0263
0264 mutex_lock(&kvm->lock);
0265 list_del_rcu(&stt->list);
0266 mutex_unlock(&kvm->lock);
0267
0268 list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) {
0269 WARN_ON(!kref_read(&stit->kref));
0270 while (1) {
0271 if (kref_put(&stit->kref, kvm_spapr_tce_liobn_put))
0272 break;
0273 }
0274 }
0275
0276 account_locked_vm(kvm->mm,
0277 kvmppc_stt_pages(kvmppc_tce_pages(stt->size)), false);
0278
0279 kvm_put_kvm(stt->kvm);
0280
0281 call_rcu(&stt->rcu, release_spapr_tce_table);
0282
0283 return 0;
0284 }
0285
0286 static const struct file_operations kvm_spapr_tce_fops = {
0287 .mmap = kvm_spapr_tce_mmap,
0288 .release = kvm_spapr_tce_release,
0289 };
0290
0291 long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
0292 struct kvm_create_spapr_tce_64 *args)
0293 {
0294 struct kvmppc_spapr_tce_table *stt = NULL;
0295 struct kvmppc_spapr_tce_table *siter;
0296 struct mm_struct *mm = kvm->mm;
0297 unsigned long npages, size = args->size;
0298 int ret;
0299
0300 if (!args->size || args->page_shift < 12 || args->page_shift > 34 ||
0301 (args->offset + args->size > (ULLONG_MAX >> args->page_shift)))
0302 return -EINVAL;
0303
0304 npages = kvmppc_tce_pages(size);
0305 ret = account_locked_vm(mm, kvmppc_stt_pages(npages), true);
0306 if (ret)
0307 return ret;
0308
0309 ret = -ENOMEM;
0310 stt = kzalloc(struct_size(stt, pages, npages), GFP_KERNEL | __GFP_NOWARN);
0311 if (!stt)
0312 goto fail_acct;
0313
0314 stt->liobn = args->liobn;
0315 stt->page_shift = args->page_shift;
0316 stt->offset = args->offset;
0317 stt->size = size;
0318 stt->kvm = kvm;
0319 mutex_init(&stt->alloc_lock);
0320 INIT_LIST_HEAD_RCU(&stt->iommu_tables);
0321
0322 mutex_lock(&kvm->lock);
0323
0324
0325 ret = 0;
0326 list_for_each_entry(siter, &kvm->arch.spapr_tce_tables, list) {
0327 if (siter->liobn == args->liobn) {
0328 ret = -EBUSY;
0329 break;
0330 }
0331 }
0332
0333 kvm_get_kvm(kvm);
0334 if (!ret)
0335 ret = anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
0336 stt, O_RDWR | O_CLOEXEC);
0337
0338 if (ret >= 0)
0339 list_add_rcu(&stt->list, &kvm->arch.spapr_tce_tables);
0340 else
0341 kvm_put_kvm_no_destroy(kvm);
0342
0343 mutex_unlock(&kvm->lock);
0344
0345 if (ret >= 0)
0346 return ret;
0347
0348 kfree(stt);
0349 fail_acct:
0350 account_locked_vm(mm, kvmppc_stt_pages(npages), false);
0351 return ret;
0352 }
0353
0354 static long kvmppc_tce_to_ua(struct kvm *kvm, unsigned long tce,
0355 unsigned long *ua)
0356 {
0357 unsigned long gfn = tce >> PAGE_SHIFT;
0358 struct kvm_memory_slot *memslot;
0359
0360 memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn);
0361 if (!memslot)
0362 return -EINVAL;
0363
0364 *ua = __gfn_to_hva_memslot(memslot, gfn) |
0365 (tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
0366
0367 return 0;
0368 }
0369
0370 static long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt,
0371 unsigned long tce)
0372 {
0373 unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
0374 enum dma_data_direction dir = iommu_tce_direction(tce);
0375 struct kvmppc_spapr_tce_iommu_table *stit;
0376 unsigned long ua = 0;
0377
0378
0379 if (dir == DMA_NONE)
0380 return H_SUCCESS;
0381
0382 if (iommu_tce_check_gpa(stt->page_shift, gpa))
0383 return H_TOO_HARD;
0384
0385 if (kvmppc_tce_to_ua(stt->kvm, tce, &ua))
0386 return H_TOO_HARD;
0387
0388 rcu_read_lock();
0389 list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
0390 unsigned long hpa = 0;
0391 struct mm_iommu_table_group_mem_t *mem;
0392 long shift = stit->tbl->it_page_shift;
0393
0394 mem = mm_iommu_lookup(stt->kvm->mm, ua, 1ULL << shift);
0395 if (!mem || mm_iommu_ua_to_hpa(mem, ua, shift, &hpa)) {
0396 rcu_read_unlock();
0397 return H_TOO_HARD;
0398 }
0399 }
0400 rcu_read_unlock();
0401
0402 return H_SUCCESS;
0403 }
0404
0405
0406
0407
0408
0409
0410 static void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt,
0411 unsigned long idx, unsigned long tce)
0412 {
0413 struct page *page;
0414 u64 *tbl;
0415 unsigned long sttpage;
0416
0417 idx -= stt->offset;
0418 sttpage = idx / TCES_PER_PAGE;
0419 page = stt->pages[sttpage];
0420
0421 if (!page) {
0422
0423 if (!tce)
0424 return;
0425
0426 page = kvm_spapr_get_tce_page(stt, sttpage);
0427 if (!page)
0428 return;
0429 }
0430 tbl = page_to_virt(page);
0431
0432 tbl[idx % TCES_PER_PAGE] = tce;
0433 }
0434
0435 static void kvmppc_clear_tce(struct mm_struct *mm, struct kvmppc_spapr_tce_table *stt,
0436 struct iommu_table *tbl, unsigned long entry)
0437 {
0438 unsigned long i;
0439 unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
0440 unsigned long io_entry = entry << (stt->page_shift - tbl->it_page_shift);
0441
0442 for (i = 0; i < subpages; ++i) {
0443 unsigned long hpa = 0;
0444 enum dma_data_direction dir = DMA_NONE;
0445
0446 iommu_tce_xchg_no_kill(mm, tbl, io_entry + i, &hpa, &dir);
0447 }
0448 }
0449
0450 static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm,
0451 struct iommu_table *tbl, unsigned long entry)
0452 {
0453 struct mm_iommu_table_group_mem_t *mem = NULL;
0454 const unsigned long pgsize = 1ULL << tbl->it_page_shift;
0455 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
0456
0457 if (!pua)
0458 return H_SUCCESS;
0459
0460 mem = mm_iommu_lookup(kvm->mm, be64_to_cpu(*pua), pgsize);
0461 if (!mem)
0462 return H_TOO_HARD;
0463
0464 mm_iommu_mapped_dec(mem);
0465
0466 *pua = cpu_to_be64(0);
0467
0468 return H_SUCCESS;
0469 }
0470
0471 static long kvmppc_tce_iommu_do_unmap(struct kvm *kvm,
0472 struct iommu_table *tbl, unsigned long entry)
0473 {
0474 enum dma_data_direction dir = DMA_NONE;
0475 unsigned long hpa = 0;
0476 long ret;
0477
0478 if (WARN_ON_ONCE(iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa,
0479 &dir)))
0480 return H_TOO_HARD;
0481
0482 if (dir == DMA_NONE)
0483 return H_SUCCESS;
0484
0485 ret = kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry);
0486 if (ret != H_SUCCESS)
0487 iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa, &dir);
0488
0489 return ret;
0490 }
0491
0492 static long kvmppc_tce_iommu_unmap(struct kvm *kvm,
0493 struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
0494 unsigned long entry)
0495 {
0496 unsigned long i, ret = H_SUCCESS;
0497 unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
0498 unsigned long io_entry = entry * subpages;
0499
0500 for (i = 0; i < subpages; ++i) {
0501 ret = kvmppc_tce_iommu_do_unmap(kvm, tbl, io_entry + i);
0502 if (ret != H_SUCCESS)
0503 break;
0504 }
0505
0506 iommu_tce_kill(tbl, io_entry, subpages);
0507
0508 return ret;
0509 }
0510
0511 static long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
0512 unsigned long entry, unsigned long ua,
0513 enum dma_data_direction dir)
0514 {
0515 long ret;
0516 unsigned long hpa;
0517 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
0518 struct mm_iommu_table_group_mem_t *mem;
0519
0520 if (!pua)
0521
0522 return H_TOO_HARD;
0523
0524 mem = mm_iommu_lookup(kvm->mm, ua, 1ULL << tbl->it_page_shift);
0525 if (!mem)
0526
0527 return H_TOO_HARD;
0528
0529 if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, tbl->it_page_shift, &hpa)))
0530 return H_TOO_HARD;
0531
0532 if (mm_iommu_mapped_inc(mem))
0533 return H_TOO_HARD;
0534
0535 ret = iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa, &dir);
0536 if (WARN_ON_ONCE(ret)) {
0537 mm_iommu_mapped_dec(mem);
0538 return H_TOO_HARD;
0539 }
0540
0541 if (dir != DMA_NONE)
0542 kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry);
0543
0544 *pua = cpu_to_be64(ua);
0545
0546 return 0;
0547 }
0548
0549 static long kvmppc_tce_iommu_map(struct kvm *kvm,
0550 struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
0551 unsigned long entry, unsigned long ua,
0552 enum dma_data_direction dir)
0553 {
0554 unsigned long i, pgoff, ret = H_SUCCESS;
0555 unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
0556 unsigned long io_entry = entry * subpages;
0557
0558 for (i = 0, pgoff = 0; i < subpages;
0559 ++i, pgoff += IOMMU_PAGE_SIZE(tbl)) {
0560
0561 ret = kvmppc_tce_iommu_do_map(kvm, tbl,
0562 io_entry + i, ua + pgoff, dir);
0563 if (ret != H_SUCCESS)
0564 break;
0565 }
0566
0567 iommu_tce_kill(tbl, io_entry, subpages);
0568
0569 return ret;
0570 }
0571
0572 long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
0573 unsigned long ioba, unsigned long tce)
0574 {
0575 struct kvmppc_spapr_tce_table *stt;
0576 long ret, idx;
0577 struct kvmppc_spapr_tce_iommu_table *stit;
0578 unsigned long entry, ua = 0;
0579 enum dma_data_direction dir;
0580
0581
0582
0583
0584 stt = kvmppc_find_table(vcpu->kvm, liobn);
0585 if (!stt)
0586 return H_TOO_HARD;
0587
0588 ret = kvmppc_ioba_validate(stt, ioba, 1);
0589 if (ret != H_SUCCESS)
0590 return ret;
0591
0592 idx = srcu_read_lock(&vcpu->kvm->srcu);
0593
0594 ret = kvmppc_tce_validate(stt, tce);
0595 if (ret != H_SUCCESS)
0596 goto unlock_exit;
0597
0598 dir = iommu_tce_direction(tce);
0599
0600 if ((dir != DMA_NONE) && kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) {
0601 ret = H_PARAMETER;
0602 goto unlock_exit;
0603 }
0604
0605 entry = ioba >> stt->page_shift;
0606
0607 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
0608 if (dir == DMA_NONE)
0609 ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt,
0610 stit->tbl, entry);
0611 else
0612 ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl,
0613 entry, ua, dir);
0614
0615
0616 if (ret != H_SUCCESS) {
0617 kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry);
0618 goto unlock_exit;
0619 }
0620 }
0621
0622 kvmppc_tce_put(stt, entry, tce);
0623
0624 unlock_exit:
0625 srcu_read_unlock(&vcpu->kvm->srcu, idx);
0626
0627 return ret;
0628 }
0629 EXPORT_SYMBOL_GPL(kvmppc_h_put_tce);
0630
0631 long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
0632 unsigned long liobn, unsigned long ioba,
0633 unsigned long tce_list, unsigned long npages)
0634 {
0635 struct kvmppc_spapr_tce_table *stt;
0636 long i, ret = H_SUCCESS, idx;
0637 unsigned long entry, ua = 0;
0638 u64 __user *tces;
0639 u64 tce;
0640 struct kvmppc_spapr_tce_iommu_table *stit;
0641
0642 stt = kvmppc_find_table(vcpu->kvm, liobn);
0643 if (!stt)
0644 return H_TOO_HARD;
0645
0646 entry = ioba >> stt->page_shift;
0647
0648
0649
0650
0651 if (npages > 512)
0652 return H_PARAMETER;
0653
0654 if (tce_list & (SZ_4K - 1))
0655 return H_PARAMETER;
0656
0657 ret = kvmppc_ioba_validate(stt, ioba, npages);
0658 if (ret != H_SUCCESS)
0659 return ret;
0660
0661 idx = srcu_read_lock(&vcpu->kvm->srcu);
0662 if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua)) {
0663 ret = H_TOO_HARD;
0664 goto unlock_exit;
0665 }
0666 tces = (u64 __user *) ua;
0667
0668 for (i = 0; i < npages; ++i) {
0669 if (get_user(tce, tces + i)) {
0670 ret = H_TOO_HARD;
0671 goto unlock_exit;
0672 }
0673 tce = be64_to_cpu(tce);
0674
0675 ret = kvmppc_tce_validate(stt, tce);
0676 if (ret != H_SUCCESS)
0677 goto unlock_exit;
0678 }
0679
0680 for (i = 0; i < npages; ++i) {
0681
0682
0683
0684
0685
0686
0687
0688
0689
0690
0691 if (get_user(tce, tces + i)) {
0692 ret = H_TOO_HARD;
0693 goto unlock_exit;
0694 }
0695 tce = be64_to_cpu(tce);
0696
0697 if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) {
0698 ret = H_PARAMETER;
0699 goto unlock_exit;
0700 }
0701
0702 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
0703 ret = kvmppc_tce_iommu_map(vcpu->kvm, stt,
0704 stit->tbl, entry + i, ua,
0705 iommu_tce_direction(tce));
0706
0707 if (ret != H_SUCCESS) {
0708 kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl,
0709 entry + i);
0710 goto unlock_exit;
0711 }
0712 }
0713
0714 kvmppc_tce_put(stt, entry + i, tce);
0715 }
0716
0717 unlock_exit:
0718 srcu_read_unlock(&vcpu->kvm->srcu, idx);
0719
0720 return ret;
0721 }
0722 EXPORT_SYMBOL_GPL(kvmppc_h_put_tce_indirect);
0723
0724 long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
0725 unsigned long liobn, unsigned long ioba,
0726 unsigned long tce_value, unsigned long npages)
0727 {
0728 struct kvmppc_spapr_tce_table *stt;
0729 long i, ret;
0730 struct kvmppc_spapr_tce_iommu_table *stit;
0731
0732 stt = kvmppc_find_table(vcpu->kvm, liobn);
0733 if (!stt)
0734 return H_TOO_HARD;
0735
0736 ret = kvmppc_ioba_validate(stt, ioba, npages);
0737 if (ret != H_SUCCESS)
0738 return ret;
0739
0740
0741 if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ))
0742 return H_PARAMETER;
0743
0744 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
0745 unsigned long entry = ioba >> stt->page_shift;
0746
0747 for (i = 0; i < npages; ++i) {
0748 ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt,
0749 stit->tbl, entry + i);
0750
0751 if (ret == H_SUCCESS)
0752 continue;
0753
0754 if (ret == H_TOO_HARD)
0755 return ret;
0756
0757 WARN_ON_ONCE(1);
0758 kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry + i);
0759 }
0760 }
0761
0762 for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
0763 kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
0764
0765 return ret;
0766 }
0767 EXPORT_SYMBOL_GPL(kvmppc_h_stuff_tce);
0768
0769 long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
0770 unsigned long ioba)
0771 {
0772 struct kvmppc_spapr_tce_table *stt;
0773 long ret;
0774 unsigned long idx;
0775 struct page *page;
0776 u64 *tbl;
0777
0778 stt = kvmppc_find_table(vcpu->kvm, liobn);
0779 if (!stt)
0780 return H_TOO_HARD;
0781
0782 ret = kvmppc_ioba_validate(stt, ioba, 1);
0783 if (ret != H_SUCCESS)
0784 return ret;
0785
0786 idx = (ioba >> stt->page_shift) - stt->offset;
0787 page = stt->pages[idx / TCES_PER_PAGE];
0788 if (!page) {
0789 vcpu->arch.regs.gpr[4] = 0;
0790 return H_SUCCESS;
0791 }
0792 tbl = (u64 *)page_address(page);
0793
0794 vcpu->arch.regs.gpr[4] = tbl[idx % TCES_PER_PAGE];
0795
0796 return H_SUCCESS;
0797 }
0798 EXPORT_SYMBOL_GPL(kvmppc_h_get_tce);