Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright (C) 2015 - ARM Ltd
0004  * Author: Marc Zyngier <marc.zyngier@arm.com>
0005  */
0006 
0007 #include <linux/irqflags.h>
0008 
0009 #include <asm/kvm_hyp.h>
0010 #include <asm/kvm_mmu.h>
0011 #include <asm/tlbflush.h>
0012 
0013 struct tlb_inv_context {
0014     unsigned long   flags;
0015     u64     tcr;
0016     u64     sctlr;
0017 };
0018 
0019 static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
0020                   struct tlb_inv_context *cxt)
0021 {
0022     u64 val;
0023 
0024     local_irq_save(cxt->flags);
0025 
0026     if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
0027         /*
0028          * For CPUs that are affected by ARM errata 1165522 or 1530923,
0029          * we cannot trust stage-1 to be in a correct state at that
0030          * point. Since we do not want to force a full load of the
0031          * vcpu state, we prevent the EL1 page-table walker to
0032          * allocate new TLBs. This is done by setting the EPD bits
0033          * in the TCR_EL1 register. We also need to prevent it to
0034          * allocate IPA->PA walks, so we enable the S1 MMU...
0035          */
0036         val = cxt->tcr = read_sysreg_el1(SYS_TCR);
0037         val |= TCR_EPD1_MASK | TCR_EPD0_MASK;
0038         write_sysreg_el1(val, SYS_TCR);
0039         val = cxt->sctlr = read_sysreg_el1(SYS_SCTLR);
0040         val |= SCTLR_ELx_M;
0041         write_sysreg_el1(val, SYS_SCTLR);
0042     }
0043 
0044     /*
0045      * With VHE enabled, we have HCR_EL2.{E2H,TGE} = {1,1}, and
0046      * most TLB operations target EL2/EL0. In order to affect the
0047      * guest TLBs (EL1/EL0), we need to change one of these two
0048      * bits. Changing E2H is impossible (goodbye TTBR1_EL2), so
0049      * let's flip TGE before executing the TLB operation.
0050      *
0051      * ARM erratum 1165522 requires some special handling (again),
0052      * as we need to make sure both stages of translation are in
0053      * place before clearing TGE. __load_stage2() already
0054      * has an ISB in order to deal with this.
0055      */
0056     __load_stage2(mmu, mmu->arch);
0057     val = read_sysreg(hcr_el2);
0058     val &= ~HCR_TGE;
0059     write_sysreg(val, hcr_el2);
0060     isb();
0061 }
0062 
0063 static void __tlb_switch_to_host(struct tlb_inv_context *cxt)
0064 {
0065     /*
0066      * We're done with the TLB operation, let's restore the host's
0067      * view of HCR_EL2.
0068      */
0069     write_sysreg(0, vttbr_el2);
0070     write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
0071     isb();
0072 
0073     if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
0074         /* Restore the registers to what they were */
0075         write_sysreg_el1(cxt->tcr, SYS_TCR);
0076         write_sysreg_el1(cxt->sctlr, SYS_SCTLR);
0077     }
0078 
0079     local_irq_restore(cxt->flags);
0080 }
0081 
0082 void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
0083                   phys_addr_t ipa, int level)
0084 {
0085     struct tlb_inv_context cxt;
0086 
0087     dsb(ishst);
0088 
0089     /* Switch to requested VMID */
0090     __tlb_switch_to_guest(mmu, &cxt);
0091 
0092     /*
0093      * We could do so much better if we had the VA as well.
0094      * Instead, we invalidate Stage-2 for this IPA, and the
0095      * whole of Stage-1. Weep...
0096      */
0097     ipa >>= 12;
0098     __tlbi_level(ipas2e1is, ipa, level);
0099 
0100     /*
0101      * We have to ensure completion of the invalidation at Stage-2,
0102      * since a table walk on another CPU could refill a TLB with a
0103      * complete (S1 + S2) walk based on the old Stage-2 mapping if
0104      * the Stage-1 invalidation happened first.
0105      */
0106     dsb(ish);
0107     __tlbi(vmalle1is);
0108     dsb(ish);
0109     isb();
0110 
0111     __tlb_switch_to_host(&cxt);
0112 }
0113 
0114 void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
0115 {
0116     struct tlb_inv_context cxt;
0117 
0118     dsb(ishst);
0119 
0120     /* Switch to requested VMID */
0121     __tlb_switch_to_guest(mmu, &cxt);
0122 
0123     __tlbi(vmalls12e1is);
0124     dsb(ish);
0125     isb();
0126 
0127     __tlb_switch_to_host(&cxt);
0128 }
0129 
0130 void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu)
0131 {
0132     struct tlb_inv_context cxt;
0133 
0134     /* Switch to requested VMID */
0135     __tlb_switch_to_guest(mmu, &cxt);
0136 
0137     __tlbi(vmalle1);
0138     asm volatile("ic iallu");
0139     dsb(nsh);
0140     isb();
0141 
0142     __tlb_switch_to_host(&cxt);
0143 }
0144 
0145 void __kvm_flush_vm_context(void)
0146 {
0147     dsb(ishst);
0148     __tlbi(alle1is);
0149 
0150     /*
0151      * VIPT and PIPT caches are not affected by VMID, so no maintenance
0152      * is necessary across a VMID rollover.
0153      *
0154      * VPIPT caches constrain lookup and maintenance to the active VMID,
0155      * so we need to invalidate lines with a stale VMID to avoid an ABA
0156      * race after multiple rollovers.
0157      *
0158      */
0159     if (icache_is_vpipt())
0160         asm volatile("ic ialluis");
0161 
0162     dsb(ish);
0163 }