0001
0002
0003
0004
0005
0006
0007 #include <linux/kvm_host.h>
0008 #include <asm/kvm_emulate.h>
0009 #include <asm/kvm_hyp.h>
0010 #include <asm/kvm_mmu.h>
0011 #include <asm/kvm_pgtable.h>
0012 #include <asm/kvm_pkvm.h>
0013 #include <asm/stage2_pgtable.h>
0014
0015 #include <hyp/fault.h>
0016
0017 #include <nvhe/gfp.h>
0018 #include <nvhe/memory.h>
0019 #include <nvhe/mem_protect.h>
0020 #include <nvhe/mm.h>
0021
0022 #define KVM_HOST_S2_FLAGS (KVM_PGTABLE_S2_NOFWB | KVM_PGTABLE_S2_IDMAP)
0023
0024 extern unsigned long hyp_nr_cpus;
0025 struct host_kvm host_kvm;
0026
0027 static struct hyp_pool host_s2_pool;
0028
0029 const u8 pkvm_hyp_id = 1;
0030
0031 static void host_lock_component(void)
0032 {
0033 hyp_spin_lock(&host_kvm.lock);
0034 }
0035
0036 static void host_unlock_component(void)
0037 {
0038 hyp_spin_unlock(&host_kvm.lock);
0039 }
0040
0041 static void hyp_lock_component(void)
0042 {
0043 hyp_spin_lock(&pkvm_pgd_lock);
0044 }
0045
0046 static void hyp_unlock_component(void)
0047 {
0048 hyp_spin_unlock(&pkvm_pgd_lock);
0049 }
0050
0051 static void *host_s2_zalloc_pages_exact(size_t size)
0052 {
0053 void *addr = hyp_alloc_pages(&host_s2_pool, get_order(size));
0054
0055 hyp_split_page(hyp_virt_to_page(addr));
0056
0057
0058
0059
0060
0061
0062 WARN_ON(size != (PAGE_SIZE << get_order(size)));
0063
0064 return addr;
0065 }
0066
0067 static void *host_s2_zalloc_page(void *pool)
0068 {
0069 return hyp_alloc_pages(pool, 0);
0070 }
0071
0072 static void host_s2_get_page(void *addr)
0073 {
0074 hyp_get_page(&host_s2_pool, addr);
0075 }
0076
0077 static void host_s2_put_page(void *addr)
0078 {
0079 hyp_put_page(&host_s2_pool, addr);
0080 }
0081
0082 static int prepare_s2_pool(void *pgt_pool_base)
0083 {
0084 unsigned long nr_pages, pfn;
0085 int ret;
0086
0087 pfn = hyp_virt_to_pfn(pgt_pool_base);
0088 nr_pages = host_s2_pgtable_pages();
0089 ret = hyp_pool_init(&host_s2_pool, pfn, nr_pages, 0);
0090 if (ret)
0091 return ret;
0092
0093 host_kvm.mm_ops = (struct kvm_pgtable_mm_ops) {
0094 .zalloc_pages_exact = host_s2_zalloc_pages_exact,
0095 .zalloc_page = host_s2_zalloc_page,
0096 .phys_to_virt = hyp_phys_to_virt,
0097 .virt_to_phys = hyp_virt_to_phys,
0098 .page_count = hyp_page_count,
0099 .get_page = host_s2_get_page,
0100 .put_page = host_s2_put_page,
0101 };
0102
0103 return 0;
0104 }
0105
0106 static void prepare_host_vtcr(void)
0107 {
0108 u32 parange, phys_shift;
0109
0110
0111 parange = kvm_get_parange(id_aa64mmfr0_el1_sys_val);
0112 phys_shift = id_aa64mmfr0_parange_to_phys_shift(parange);
0113
0114 host_kvm.arch.vtcr = kvm_get_vtcr(id_aa64mmfr0_el1_sys_val,
0115 id_aa64mmfr1_el1_sys_val, phys_shift);
0116 }
0117
0118 static bool host_stage2_force_pte_cb(u64 addr, u64 end, enum kvm_pgtable_prot prot);
0119
0120 int kvm_host_prepare_stage2(void *pgt_pool_base)
0121 {
0122 struct kvm_s2_mmu *mmu = &host_kvm.arch.mmu;
0123 int ret;
0124
0125 prepare_host_vtcr();
0126 hyp_spin_lock_init(&host_kvm.lock);
0127 mmu->arch = &host_kvm.arch;
0128
0129 ret = prepare_s2_pool(pgt_pool_base);
0130 if (ret)
0131 return ret;
0132
0133 ret = __kvm_pgtable_stage2_init(&host_kvm.pgt, mmu,
0134 &host_kvm.mm_ops, KVM_HOST_S2_FLAGS,
0135 host_stage2_force_pte_cb);
0136 if (ret)
0137 return ret;
0138
0139 mmu->pgd_phys = __hyp_pa(host_kvm.pgt.pgd);
0140 mmu->pgt = &host_kvm.pgt;
0141 atomic64_set(&mmu->vmid.id, 0);
0142
0143 return 0;
0144 }
0145
0146 int __pkvm_prot_finalize(void)
0147 {
0148 struct kvm_s2_mmu *mmu = &host_kvm.arch.mmu;
0149 struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params);
0150
0151 if (params->hcr_el2 & HCR_VM)
0152 return -EPERM;
0153
0154 params->vttbr = kvm_get_vttbr(mmu);
0155 params->vtcr = host_kvm.arch.vtcr;
0156 params->hcr_el2 |= HCR_VM;
0157 kvm_flush_dcache_to_poc(params, sizeof(*params));
0158
0159 write_sysreg(params->hcr_el2, hcr_el2);
0160 __load_stage2(&host_kvm.arch.mmu, &host_kvm.arch);
0161
0162
0163
0164
0165
0166 asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT));
0167
0168
0169 __tlbi(vmalls12e1);
0170 dsb(nsh);
0171 isb();
0172
0173 return 0;
0174 }
0175
0176 static int host_stage2_unmap_dev_all(void)
0177 {
0178 struct kvm_pgtable *pgt = &host_kvm.pgt;
0179 struct memblock_region *reg;
0180 u64 addr = 0;
0181 int i, ret;
0182
0183
0184 for (i = 0; i < hyp_memblock_nr; i++, addr = reg->base + reg->size) {
0185 reg = &hyp_memory[i];
0186 ret = kvm_pgtable_stage2_unmap(pgt, addr, reg->base - addr);
0187 if (ret)
0188 return ret;
0189 }
0190 return kvm_pgtable_stage2_unmap(pgt, addr, BIT(pgt->ia_bits) - addr);
0191 }
0192
0193 struct kvm_mem_range {
0194 u64 start;
0195 u64 end;
0196 };
0197
0198 static bool find_mem_range(phys_addr_t addr, struct kvm_mem_range *range)
0199 {
0200 int cur, left = 0, right = hyp_memblock_nr;
0201 struct memblock_region *reg;
0202 phys_addr_t end;
0203
0204 range->start = 0;
0205 range->end = ULONG_MAX;
0206
0207
0208 while (left < right) {
0209 cur = (left + right) >> 1;
0210 reg = &hyp_memory[cur];
0211 end = reg->base + reg->size;
0212 if (addr < reg->base) {
0213 right = cur;
0214 range->end = reg->base;
0215 } else if (addr >= end) {
0216 left = cur + 1;
0217 range->start = end;
0218 } else {
0219 range->start = reg->base;
0220 range->end = end;
0221 return true;
0222 }
0223 }
0224
0225 return false;
0226 }
0227
0228 bool addr_is_memory(phys_addr_t phys)
0229 {
0230 struct kvm_mem_range range;
0231
0232 return find_mem_range(phys, &range);
0233 }
0234
0235 static bool is_in_mem_range(u64 addr, struct kvm_mem_range *range)
0236 {
0237 return range->start <= addr && addr < range->end;
0238 }
0239
0240 static bool range_is_memory(u64 start, u64 end)
0241 {
0242 struct kvm_mem_range r;
0243
0244 if (!find_mem_range(start, &r))
0245 return false;
0246
0247 return is_in_mem_range(end - 1, &r);
0248 }
0249
0250 static inline int __host_stage2_idmap(u64 start, u64 end,
0251 enum kvm_pgtable_prot prot)
0252 {
0253 return kvm_pgtable_stage2_map(&host_kvm.pgt, start, end - start, start,
0254 prot, &host_s2_pool);
0255 }
0256
0257
0258
0259
0260
0261
0262
0263 #define host_stage2_try(fn, ...) \
0264 ({ \
0265 int __ret; \
0266 hyp_assert_lock_held(&host_kvm.lock); \
0267 __ret = fn(__VA_ARGS__); \
0268 if (__ret == -ENOMEM) { \
0269 __ret = host_stage2_unmap_dev_all(); \
0270 if (!__ret) \
0271 __ret = fn(__VA_ARGS__); \
0272 } \
0273 __ret; \
0274 })
0275
0276 static inline bool range_included(struct kvm_mem_range *child,
0277 struct kvm_mem_range *parent)
0278 {
0279 return parent->start <= child->start && child->end <= parent->end;
0280 }
0281
0282 static int host_stage2_adjust_range(u64 addr, struct kvm_mem_range *range)
0283 {
0284 struct kvm_mem_range cur;
0285 kvm_pte_t pte;
0286 u32 level;
0287 int ret;
0288
0289 hyp_assert_lock_held(&host_kvm.lock);
0290 ret = kvm_pgtable_get_leaf(&host_kvm.pgt, addr, &pte, &level);
0291 if (ret)
0292 return ret;
0293
0294 if (kvm_pte_valid(pte))
0295 return -EAGAIN;
0296
0297 if (pte)
0298 return -EPERM;
0299
0300 do {
0301 u64 granule = kvm_granule_size(level);
0302 cur.start = ALIGN_DOWN(addr, granule);
0303 cur.end = cur.start + granule;
0304 level++;
0305 } while ((level < KVM_PGTABLE_MAX_LEVELS) &&
0306 !(kvm_level_supports_block_mapping(level) &&
0307 range_included(&cur, range)));
0308
0309 *range = cur;
0310
0311 return 0;
0312 }
0313
0314 int host_stage2_idmap_locked(phys_addr_t addr, u64 size,
0315 enum kvm_pgtable_prot prot)
0316 {
0317 return host_stage2_try(__host_stage2_idmap, addr, addr + size, prot);
0318 }
0319
0320 int host_stage2_set_owner_locked(phys_addr_t addr, u64 size, u8 owner_id)
0321 {
0322 return host_stage2_try(kvm_pgtable_stage2_set_owner, &host_kvm.pgt,
0323 addr, size, &host_s2_pool, owner_id);
0324 }
0325
0326 static bool host_stage2_force_pte_cb(u64 addr, u64 end, enum kvm_pgtable_prot prot)
0327 {
0328
0329
0330
0331
0332
0333
0334
0335
0336
0337
0338
0339
0340
0341
0342 if (range_is_memory(addr, end))
0343 return prot != PKVM_HOST_MEM_PROT;
0344 else
0345 return prot != PKVM_HOST_MMIO_PROT;
0346 }
0347
0348 static int host_stage2_idmap(u64 addr)
0349 {
0350 struct kvm_mem_range range;
0351 bool is_memory = find_mem_range(addr, &range);
0352 enum kvm_pgtable_prot prot;
0353 int ret;
0354
0355 prot = is_memory ? PKVM_HOST_MEM_PROT : PKVM_HOST_MMIO_PROT;
0356
0357 host_lock_component();
0358 ret = host_stage2_adjust_range(addr, &range);
0359 if (ret)
0360 goto unlock;
0361
0362 ret = host_stage2_idmap_locked(range.start, range.end - range.start, prot);
0363 unlock:
0364 host_unlock_component();
0365
0366 return ret;
0367 }
0368
0369 void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt)
0370 {
0371 struct kvm_vcpu_fault_info fault;
0372 u64 esr, addr;
0373 int ret = 0;
0374
0375 esr = read_sysreg_el2(SYS_ESR);
0376 BUG_ON(!__get_fault_info(esr, &fault));
0377
0378 addr = (fault.hpfar_el2 & HPFAR_MASK) << 8;
0379 ret = host_stage2_idmap(addr);
0380 BUG_ON(ret && ret != -EAGAIN);
0381 }
0382
0383
0384 enum pkvm_component_id {
0385 PKVM_ID_HOST,
0386 PKVM_ID_HYP,
0387 };
0388
0389 struct pkvm_mem_transition {
0390 u64 nr_pages;
0391
0392 struct {
0393 enum pkvm_component_id id;
0394
0395 u64 addr;
0396
0397 union {
0398 struct {
0399
0400 u64 completer_addr;
0401 } host;
0402 };
0403 } initiator;
0404
0405 struct {
0406 enum pkvm_component_id id;
0407 } completer;
0408 };
0409
0410 struct pkvm_mem_share {
0411 const struct pkvm_mem_transition tx;
0412 const enum kvm_pgtable_prot completer_prot;
0413 };
0414
0415 struct check_walk_data {
0416 enum pkvm_page_state desired;
0417 enum pkvm_page_state (*get_page_state)(kvm_pte_t pte);
0418 };
0419
0420 static int __check_page_state_visitor(u64 addr, u64 end, u32 level,
0421 kvm_pte_t *ptep,
0422 enum kvm_pgtable_walk_flags flag,
0423 void * const arg)
0424 {
0425 struct check_walk_data *d = arg;
0426 kvm_pte_t pte = *ptep;
0427
0428 if (kvm_pte_valid(pte) && !addr_is_memory(kvm_pte_to_phys(pte)))
0429 return -EINVAL;
0430
0431 return d->get_page_state(pte) == d->desired ? 0 : -EPERM;
0432 }
0433
0434 static int check_page_state_range(struct kvm_pgtable *pgt, u64 addr, u64 size,
0435 struct check_walk_data *data)
0436 {
0437 struct kvm_pgtable_walker walker = {
0438 .cb = __check_page_state_visitor,
0439 .arg = data,
0440 .flags = KVM_PGTABLE_WALK_LEAF,
0441 };
0442
0443 return kvm_pgtable_walk(pgt, addr, size, &walker);
0444 }
0445
0446 static enum pkvm_page_state host_get_page_state(kvm_pte_t pte)
0447 {
0448 if (!kvm_pte_valid(pte) && pte)
0449 return PKVM_NOPAGE;
0450
0451 return pkvm_getstate(kvm_pgtable_stage2_pte_prot(pte));
0452 }
0453
0454 static int __host_check_page_state_range(u64 addr, u64 size,
0455 enum pkvm_page_state state)
0456 {
0457 struct check_walk_data d = {
0458 .desired = state,
0459 .get_page_state = host_get_page_state,
0460 };
0461
0462 hyp_assert_lock_held(&host_kvm.lock);
0463 return check_page_state_range(&host_kvm.pgt, addr, size, &d);
0464 }
0465
0466 static int __host_set_page_state_range(u64 addr, u64 size,
0467 enum pkvm_page_state state)
0468 {
0469 enum kvm_pgtable_prot prot = pkvm_mkstate(PKVM_HOST_MEM_PROT, state);
0470
0471 return host_stage2_idmap_locked(addr, size, prot);
0472 }
0473
0474 static int host_request_owned_transition(u64 *completer_addr,
0475 const struct pkvm_mem_transition *tx)
0476 {
0477 u64 size = tx->nr_pages * PAGE_SIZE;
0478 u64 addr = tx->initiator.addr;
0479
0480 *completer_addr = tx->initiator.host.completer_addr;
0481 return __host_check_page_state_range(addr, size, PKVM_PAGE_OWNED);
0482 }
0483
0484 static int host_request_unshare(u64 *completer_addr,
0485 const struct pkvm_mem_transition *tx)
0486 {
0487 u64 size = tx->nr_pages * PAGE_SIZE;
0488 u64 addr = tx->initiator.addr;
0489
0490 *completer_addr = tx->initiator.host.completer_addr;
0491 return __host_check_page_state_range(addr, size, PKVM_PAGE_SHARED_OWNED);
0492 }
0493
0494 static int host_initiate_share(u64 *completer_addr,
0495 const struct pkvm_mem_transition *tx)
0496 {
0497 u64 size = tx->nr_pages * PAGE_SIZE;
0498 u64 addr = tx->initiator.addr;
0499
0500 *completer_addr = tx->initiator.host.completer_addr;
0501 return __host_set_page_state_range(addr, size, PKVM_PAGE_SHARED_OWNED);
0502 }
0503
0504 static int host_initiate_unshare(u64 *completer_addr,
0505 const struct pkvm_mem_transition *tx)
0506 {
0507 u64 size = tx->nr_pages * PAGE_SIZE;
0508 u64 addr = tx->initiator.addr;
0509
0510 *completer_addr = tx->initiator.host.completer_addr;
0511 return __host_set_page_state_range(addr, size, PKVM_PAGE_OWNED);
0512 }
0513
0514 static enum pkvm_page_state hyp_get_page_state(kvm_pte_t pte)
0515 {
0516 if (!kvm_pte_valid(pte))
0517 return PKVM_NOPAGE;
0518
0519 return pkvm_getstate(kvm_pgtable_stage2_pte_prot(pte));
0520 }
0521
0522 static int __hyp_check_page_state_range(u64 addr, u64 size,
0523 enum pkvm_page_state state)
0524 {
0525 struct check_walk_data d = {
0526 .desired = state,
0527 .get_page_state = hyp_get_page_state,
0528 };
0529
0530 hyp_assert_lock_held(&pkvm_pgd_lock);
0531 return check_page_state_range(&pkvm_pgtable, addr, size, &d);
0532 }
0533
0534 static bool __hyp_ack_skip_pgtable_check(const struct pkvm_mem_transition *tx)
0535 {
0536 return !(IS_ENABLED(CONFIG_NVHE_EL2_DEBUG) ||
0537 tx->initiator.id != PKVM_ID_HOST);
0538 }
0539
0540 static int hyp_ack_share(u64 addr, const struct pkvm_mem_transition *tx,
0541 enum kvm_pgtable_prot perms)
0542 {
0543 u64 size = tx->nr_pages * PAGE_SIZE;
0544
0545 if (perms != PAGE_HYP)
0546 return -EPERM;
0547
0548 if (__hyp_ack_skip_pgtable_check(tx))
0549 return 0;
0550
0551 return __hyp_check_page_state_range(addr, size, PKVM_NOPAGE);
0552 }
0553
0554 static int hyp_ack_unshare(u64 addr, const struct pkvm_mem_transition *tx)
0555 {
0556 u64 size = tx->nr_pages * PAGE_SIZE;
0557
0558 if (__hyp_ack_skip_pgtable_check(tx))
0559 return 0;
0560
0561 return __hyp_check_page_state_range(addr, size,
0562 PKVM_PAGE_SHARED_BORROWED);
0563 }
0564
0565 static int hyp_complete_share(u64 addr, const struct pkvm_mem_transition *tx,
0566 enum kvm_pgtable_prot perms)
0567 {
0568 void *start = (void *)addr, *end = start + (tx->nr_pages * PAGE_SIZE);
0569 enum kvm_pgtable_prot prot;
0570
0571 prot = pkvm_mkstate(perms, PKVM_PAGE_SHARED_BORROWED);
0572 return pkvm_create_mappings_locked(start, end, prot);
0573 }
0574
0575 static int hyp_complete_unshare(u64 addr, const struct pkvm_mem_transition *tx)
0576 {
0577 u64 size = tx->nr_pages * PAGE_SIZE;
0578 int ret = kvm_pgtable_hyp_unmap(&pkvm_pgtable, addr, size);
0579
0580 return (ret != size) ? -EFAULT : 0;
0581 }
0582
0583 static int check_share(struct pkvm_mem_share *share)
0584 {
0585 const struct pkvm_mem_transition *tx = &share->tx;
0586 u64 completer_addr;
0587 int ret;
0588
0589 switch (tx->initiator.id) {
0590 case PKVM_ID_HOST:
0591 ret = host_request_owned_transition(&completer_addr, tx);
0592 break;
0593 default:
0594 ret = -EINVAL;
0595 }
0596
0597 if (ret)
0598 return ret;
0599
0600 switch (tx->completer.id) {
0601 case PKVM_ID_HYP:
0602 ret = hyp_ack_share(completer_addr, tx, share->completer_prot);
0603 break;
0604 default:
0605 ret = -EINVAL;
0606 }
0607
0608 return ret;
0609 }
0610
0611 static int __do_share(struct pkvm_mem_share *share)
0612 {
0613 const struct pkvm_mem_transition *tx = &share->tx;
0614 u64 completer_addr;
0615 int ret;
0616
0617 switch (tx->initiator.id) {
0618 case PKVM_ID_HOST:
0619 ret = host_initiate_share(&completer_addr, tx);
0620 break;
0621 default:
0622 ret = -EINVAL;
0623 }
0624
0625 if (ret)
0626 return ret;
0627
0628 switch (tx->completer.id) {
0629 case PKVM_ID_HYP:
0630 ret = hyp_complete_share(completer_addr, tx, share->completer_prot);
0631 break;
0632 default:
0633 ret = -EINVAL;
0634 }
0635
0636 return ret;
0637 }
0638
0639
0640
0641
0642
0643
0644
0645
0646
0647
0648 static int do_share(struct pkvm_mem_share *share)
0649 {
0650 int ret;
0651
0652 ret = check_share(share);
0653 if (ret)
0654 return ret;
0655
0656 return WARN_ON(__do_share(share));
0657 }
0658
0659 static int check_unshare(struct pkvm_mem_share *share)
0660 {
0661 const struct pkvm_mem_transition *tx = &share->tx;
0662 u64 completer_addr;
0663 int ret;
0664
0665 switch (tx->initiator.id) {
0666 case PKVM_ID_HOST:
0667 ret = host_request_unshare(&completer_addr, tx);
0668 break;
0669 default:
0670 ret = -EINVAL;
0671 }
0672
0673 if (ret)
0674 return ret;
0675
0676 switch (tx->completer.id) {
0677 case PKVM_ID_HYP:
0678 ret = hyp_ack_unshare(completer_addr, tx);
0679 break;
0680 default:
0681 ret = -EINVAL;
0682 }
0683
0684 return ret;
0685 }
0686
0687 static int __do_unshare(struct pkvm_mem_share *share)
0688 {
0689 const struct pkvm_mem_transition *tx = &share->tx;
0690 u64 completer_addr;
0691 int ret;
0692
0693 switch (tx->initiator.id) {
0694 case PKVM_ID_HOST:
0695 ret = host_initiate_unshare(&completer_addr, tx);
0696 break;
0697 default:
0698 ret = -EINVAL;
0699 }
0700
0701 if (ret)
0702 return ret;
0703
0704 switch (tx->completer.id) {
0705 case PKVM_ID_HYP:
0706 ret = hyp_complete_unshare(completer_addr, tx);
0707 break;
0708 default:
0709 ret = -EINVAL;
0710 }
0711
0712 return ret;
0713 }
0714
0715
0716
0717
0718
0719
0720
0721
0722
0723
0724 static int do_unshare(struct pkvm_mem_share *share)
0725 {
0726 int ret;
0727
0728 ret = check_unshare(share);
0729 if (ret)
0730 return ret;
0731
0732 return WARN_ON(__do_unshare(share));
0733 }
0734
0735 int __pkvm_host_share_hyp(u64 pfn)
0736 {
0737 int ret;
0738 u64 host_addr = hyp_pfn_to_phys(pfn);
0739 u64 hyp_addr = (u64)__hyp_va(host_addr);
0740 struct pkvm_mem_share share = {
0741 .tx = {
0742 .nr_pages = 1,
0743 .initiator = {
0744 .id = PKVM_ID_HOST,
0745 .addr = host_addr,
0746 .host = {
0747 .completer_addr = hyp_addr,
0748 },
0749 },
0750 .completer = {
0751 .id = PKVM_ID_HYP,
0752 },
0753 },
0754 .completer_prot = PAGE_HYP,
0755 };
0756
0757 host_lock_component();
0758 hyp_lock_component();
0759
0760 ret = do_share(&share);
0761
0762 hyp_unlock_component();
0763 host_unlock_component();
0764
0765 return ret;
0766 }
0767
0768 int __pkvm_host_unshare_hyp(u64 pfn)
0769 {
0770 int ret;
0771 u64 host_addr = hyp_pfn_to_phys(pfn);
0772 u64 hyp_addr = (u64)__hyp_va(host_addr);
0773 struct pkvm_mem_share share = {
0774 .tx = {
0775 .nr_pages = 1,
0776 .initiator = {
0777 .id = PKVM_ID_HOST,
0778 .addr = host_addr,
0779 .host = {
0780 .completer_addr = hyp_addr,
0781 },
0782 },
0783 .completer = {
0784 .id = PKVM_ID_HYP,
0785 },
0786 },
0787 .completer_prot = PAGE_HYP,
0788 };
0789
0790 host_lock_component();
0791 hyp_lock_component();
0792
0793 ret = do_unshare(&share);
0794
0795 hyp_unlock_component();
0796 host_unlock_component();
0797
0798 return ret;
0799 }