![]() |
|
|||
0001 // SPDX-License-Identifier: GPL-2.0-only 0002 /* 0003 * Copyright (C) 2015 - ARM Ltd 0004 * Author: Marc Zyngier <marc.zyngier@arm.com> 0005 */ 0006 0007 #include <asm/kvm_hyp.h> 0008 #include <asm/kvm_mmu.h> 0009 #include <asm/tlbflush.h> 0010 0011 #include <nvhe/mem_protect.h> 0012 0013 struct tlb_inv_context { 0014 u64 tcr; 0015 }; 0016 0017 static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu, 0018 struct tlb_inv_context *cxt) 0019 { 0020 if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) { 0021 u64 val; 0022 0023 /* 0024 * For CPUs that are affected by ARM 1319367, we need to 0025 * avoid a host Stage-1 walk while we have the guest's 0026 * VMID set in the VTTBR in order to invalidate TLBs. 0027 * We're guaranteed that the S1 MMU is enabled, so we can 0028 * simply set the EPD bits to avoid any further TLB fill. 0029 */ 0030 val = cxt->tcr = read_sysreg_el1(SYS_TCR); 0031 val |= TCR_EPD1_MASK | TCR_EPD0_MASK; 0032 write_sysreg_el1(val, SYS_TCR); 0033 isb(); 0034 } 0035 0036 /* 0037 * __load_stage2() includes an ISB only when the AT 0038 * workaround is applied. Take care of the opposite condition, 0039 * ensuring that we always have an ISB, but not two ISBs back 0040 * to back. 0041 */ 0042 __load_stage2(mmu, kern_hyp_va(mmu->arch)); 0043 asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT)); 0044 } 0045 0046 static void __tlb_switch_to_host(struct tlb_inv_context *cxt) 0047 { 0048 __load_host_stage2(); 0049 0050 if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) { 0051 /* Ensure write of the host VMID */ 0052 isb(); 0053 /* Restore the host's TCR_EL1 */ 0054 write_sysreg_el1(cxt->tcr, SYS_TCR); 0055 } 0056 } 0057 0058 void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, 0059 phys_addr_t ipa, int level) 0060 { 0061 struct tlb_inv_context cxt; 0062 0063 dsb(ishst); 0064 0065 /* Switch to requested VMID */ 0066 __tlb_switch_to_guest(mmu, &cxt); 0067 0068 /* 0069 * We could do so much better if we had the VA as well. 0070 * Instead, we invalidate Stage-2 for this IPA, and the 0071 * whole of Stage-1. Weep... 0072 */ 0073 ipa >>= 12; 0074 __tlbi_level(ipas2e1is, ipa, level); 0075 0076 /* 0077 * We have to ensure completion of the invalidation at Stage-2, 0078 * since a table walk on another CPU could refill a TLB with a 0079 * complete (S1 + S2) walk based on the old Stage-2 mapping if 0080 * the Stage-1 invalidation happened first. 0081 */ 0082 dsb(ish); 0083 __tlbi(vmalle1is); 0084 dsb(ish); 0085 isb(); 0086 0087 /* 0088 * If the host is running at EL1 and we have a VPIPT I-cache, 0089 * then we must perform I-cache maintenance at EL2 in order for 0090 * it to have an effect on the guest. Since the guest cannot hit 0091 * I-cache lines allocated with a different VMID, we don't need 0092 * to worry about junk out of guest reset (we nuke the I-cache on 0093 * VMID rollover), but we do need to be careful when remapping 0094 * executable pages for the same guest. This can happen when KSM 0095 * takes a CoW fault on an executable page, copies the page into 0096 * a page that was previously mapped in the guest and then needs 0097 * to invalidate the guest view of the I-cache for that page 0098 * from EL1. To solve this, we invalidate the entire I-cache when 0099 * unmapping a page from a guest if we have a VPIPT I-cache but 0100 * the host is running at EL1. As above, we could do better if 0101 * we had the VA. 0102 * 0103 * The moral of this story is: if you have a VPIPT I-cache, then 0104 * you should be running with VHE enabled. 0105 */ 0106 if (icache_is_vpipt()) 0107 icache_inval_all_pou(); 0108 0109 __tlb_switch_to_host(&cxt); 0110 } 0111 0112 void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu) 0113 { 0114 struct tlb_inv_context cxt; 0115 0116 dsb(ishst); 0117 0118 /* Switch to requested VMID */ 0119 __tlb_switch_to_guest(mmu, &cxt); 0120 0121 __tlbi(vmalls12e1is); 0122 dsb(ish); 0123 isb(); 0124 0125 __tlb_switch_to_host(&cxt); 0126 } 0127 0128 void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu) 0129 { 0130 struct tlb_inv_context cxt; 0131 0132 /* Switch to requested VMID */ 0133 __tlb_switch_to_guest(mmu, &cxt); 0134 0135 __tlbi(vmalle1); 0136 asm volatile("ic iallu"); 0137 dsb(nsh); 0138 isb(); 0139 0140 __tlb_switch_to_host(&cxt); 0141 } 0142 0143 void __kvm_flush_vm_context(void) 0144 { 0145 dsb(ishst); 0146 __tlbi(alle1is); 0147 0148 /* 0149 * VIPT and PIPT caches are not affected by VMID, so no maintenance 0150 * is necessary across a VMID rollover. 0151 * 0152 * VPIPT caches constrain lookup and maintenance to the active VMID, 0153 * so we need to invalidate lines with a stale VMID to avoid an ABA 0154 * race after multiple rollovers. 0155 * 0156 */ 0157 if (icache_is_vpipt()) 0158 asm volatile("ic ialluis"); 0159 0160 dsb(ish); 0161 }
[ Source navigation ] | [ Diff markup ] | [ Identifier search ] | [ general search ] |
This page was automatically generated by the 2.1.0 LXR engine. The LXR team |
![]() ![]() |