0001
0002
0003
0004
0005
0006
0007 #include <linux/irqflags.h>
0008
0009 #include <asm/kvm_hyp.h>
0010 #include <asm/kvm_mmu.h>
0011 #include <asm/tlbflush.h>
0012
0013 struct tlb_inv_context {
0014 unsigned long flags;
0015 u64 tcr;
0016 u64 sctlr;
0017 };
0018
0019 static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
0020 struct tlb_inv_context *cxt)
0021 {
0022 u64 val;
0023
0024 local_irq_save(cxt->flags);
0025
0026 if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036 val = cxt->tcr = read_sysreg_el1(SYS_TCR);
0037 val |= TCR_EPD1_MASK | TCR_EPD0_MASK;
0038 write_sysreg_el1(val, SYS_TCR);
0039 val = cxt->sctlr = read_sysreg_el1(SYS_SCTLR);
0040 val |= SCTLR_ELx_M;
0041 write_sysreg_el1(val, SYS_SCTLR);
0042 }
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056 __load_stage2(mmu, mmu->arch);
0057 val = read_sysreg(hcr_el2);
0058 val &= ~HCR_TGE;
0059 write_sysreg(val, hcr_el2);
0060 isb();
0061 }
0062
0063 static void __tlb_switch_to_host(struct tlb_inv_context *cxt)
0064 {
0065
0066
0067
0068
0069 write_sysreg(0, vttbr_el2);
0070 write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
0071 isb();
0072
0073 if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
0074
0075 write_sysreg_el1(cxt->tcr, SYS_TCR);
0076 write_sysreg_el1(cxt->sctlr, SYS_SCTLR);
0077 }
0078
0079 local_irq_restore(cxt->flags);
0080 }
0081
0082 void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
0083 phys_addr_t ipa, int level)
0084 {
0085 struct tlb_inv_context cxt;
0086
0087 dsb(ishst);
0088
0089
0090 __tlb_switch_to_guest(mmu, &cxt);
0091
0092
0093
0094
0095
0096
0097 ipa >>= 12;
0098 __tlbi_level(ipas2e1is, ipa, level);
0099
0100
0101
0102
0103
0104
0105
0106 dsb(ish);
0107 __tlbi(vmalle1is);
0108 dsb(ish);
0109 isb();
0110
0111 __tlb_switch_to_host(&cxt);
0112 }
0113
0114 void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
0115 {
0116 struct tlb_inv_context cxt;
0117
0118 dsb(ishst);
0119
0120
0121 __tlb_switch_to_guest(mmu, &cxt);
0122
0123 __tlbi(vmalls12e1is);
0124 dsb(ish);
0125 isb();
0126
0127 __tlb_switch_to_host(&cxt);
0128 }
0129
0130 void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu)
0131 {
0132 struct tlb_inv_context cxt;
0133
0134
0135 __tlb_switch_to_guest(mmu, &cxt);
0136
0137 __tlbi(vmalle1);
0138 asm volatile("ic iallu");
0139 dsb(nsh);
0140 isb();
0141
0142 __tlb_switch_to_host(&cxt);
0143 }
0144
0145 void __kvm_flush_vm_context(void)
0146 {
0147 dsb(ishst);
0148 __tlbi(alle1is);
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158
0159 if (icache_is_vpipt())
0160 asm volatile("ic ialluis");
0161
0162 dsb(ish);
0163 }