Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright (C) 2017 ARM Ltd.
0004  * Author: Marc Zyngier <marc.zyngier@arm.com>
0005  */
0006 
0007 #include <linux/kvm_host.h>
0008 #include <linux/random.h>
0009 #include <linux/memblock.h>
0010 #include <asm/alternative.h>
0011 #include <asm/debug-monitors.h>
0012 #include <asm/insn.h>
0013 #include <asm/kvm_mmu.h>
0014 #include <asm/memory.h>
0015 
0016 /*
0017  * The LSB of the HYP VA tag
0018  */
0019 static u8 tag_lsb;
0020 /*
0021  * The HYP VA tag value with the region bit
0022  */
0023 static u64 tag_val;
0024 static u64 va_mask;
0025 
0026 /*
0027  * Compute HYP VA by using the same computation as kern_hyp_va().
0028  */
0029 static u64 __early_kern_hyp_va(u64 addr)
0030 {
0031     addr &= va_mask;
0032     addr |= tag_val << tag_lsb;
0033     return addr;
0034 }
0035 
0036 /*
0037  * Store a hyp VA <-> PA offset into a EL2-owned variable.
0038  */
0039 static void init_hyp_physvirt_offset(void)
0040 {
0041     u64 kern_va, hyp_va;
0042 
0043     /* Compute the offset from the hyp VA and PA of a random symbol. */
0044     kern_va = (u64)lm_alias(__hyp_text_start);
0045     hyp_va = __early_kern_hyp_va(kern_va);
0046     hyp_physvirt_offset = (s64)__pa(kern_va) - (s64)hyp_va;
0047 }
0048 
0049 /*
0050  * We want to generate a hyp VA with the following format (with V ==
0051  * vabits_actual):
0052  *
0053  *  63 ... V |     V-1    | V-2 .. tag_lsb | tag_lsb - 1 .. 0
0054  *  ---------------------------------------------------------
0055  * | 0000000 | hyp_va_msb |   random tag   |  kern linear VA |
0056  *           |--------- tag_val -----------|----- va_mask ---|
0057  *
0058  * which does not conflict with the idmap regions.
0059  */
0060 __init void kvm_compute_layout(void)
0061 {
0062     phys_addr_t idmap_addr = __pa_symbol(__hyp_idmap_text_start);
0063     u64 hyp_va_msb;
0064 
0065     /* Where is my RAM region? */
0066     hyp_va_msb  = idmap_addr & BIT(vabits_actual - 1);
0067     hyp_va_msb ^= BIT(vabits_actual - 1);
0068 
0069     tag_lsb = fls64((u64)phys_to_virt(memblock_start_of_DRAM()) ^
0070             (u64)(high_memory - 1));
0071 
0072     va_mask = GENMASK_ULL(tag_lsb - 1, 0);
0073     tag_val = hyp_va_msb;
0074 
0075     if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && tag_lsb != (vabits_actual - 1)) {
0076         /* We have some free bits to insert a random tag. */
0077         tag_val |= get_random_long() & GENMASK_ULL(vabits_actual - 2, tag_lsb);
0078     }
0079     tag_val >>= tag_lsb;
0080 
0081     init_hyp_physvirt_offset();
0082 }
0083 
0084 /*
0085  * The .hyp.reloc ELF section contains a list of kimg positions that
0086  * contains kimg VAs but will be accessed only in hyp execution context.
0087  * Convert them to hyp VAs. See gen-hyprel.c for more details.
0088  */
0089 __init void kvm_apply_hyp_relocations(void)
0090 {
0091     int32_t *rel;
0092     int32_t *begin = (int32_t *)__hyp_reloc_begin;
0093     int32_t *end = (int32_t *)__hyp_reloc_end;
0094 
0095     for (rel = begin; rel < end; ++rel) {
0096         uintptr_t *ptr, kimg_va;
0097 
0098         /*
0099          * Each entry contains a 32-bit relative offset from itself
0100          * to a kimg VA position.
0101          */
0102         ptr = (uintptr_t *)lm_alias((char *)rel + *rel);
0103 
0104         /* Read the kimg VA value at the relocation address. */
0105         kimg_va = *ptr;
0106 
0107         /* Convert to hyp VA and store back to the relocation address. */
0108         *ptr = __early_kern_hyp_va((uintptr_t)lm_alias(kimg_va));
0109     }
0110 }
0111 
0112 static u32 compute_instruction(int n, u32 rd, u32 rn)
0113 {
0114     u32 insn = AARCH64_BREAK_FAULT;
0115 
0116     switch (n) {
0117     case 0:
0118         insn = aarch64_insn_gen_logical_immediate(AARCH64_INSN_LOGIC_AND,
0119                               AARCH64_INSN_VARIANT_64BIT,
0120                               rn, rd, va_mask);
0121         break;
0122 
0123     case 1:
0124         /* ROR is a variant of EXTR with Rm = Rn */
0125         insn = aarch64_insn_gen_extr(AARCH64_INSN_VARIANT_64BIT,
0126                          rn, rn, rd,
0127                          tag_lsb);
0128         break;
0129 
0130     case 2:
0131         insn = aarch64_insn_gen_add_sub_imm(rd, rn,
0132                             tag_val & GENMASK(11, 0),
0133                             AARCH64_INSN_VARIANT_64BIT,
0134                             AARCH64_INSN_ADSB_ADD);
0135         break;
0136 
0137     case 3:
0138         insn = aarch64_insn_gen_add_sub_imm(rd, rn,
0139                             tag_val & GENMASK(23, 12),
0140                             AARCH64_INSN_VARIANT_64BIT,
0141                             AARCH64_INSN_ADSB_ADD);
0142         break;
0143 
0144     case 4:
0145         /* ROR is a variant of EXTR with Rm = Rn */
0146         insn = aarch64_insn_gen_extr(AARCH64_INSN_VARIANT_64BIT,
0147                          rn, rn, rd, 64 - tag_lsb);
0148         break;
0149     }
0150 
0151     return insn;
0152 }
0153 
0154 void __init kvm_update_va_mask(struct alt_instr *alt,
0155                    __le32 *origptr, __le32 *updptr, int nr_inst)
0156 {
0157     int i;
0158 
0159     BUG_ON(nr_inst != 5);
0160 
0161     for (i = 0; i < nr_inst; i++) {
0162         u32 rd, rn, insn, oinsn;
0163 
0164         /*
0165          * VHE doesn't need any address translation, let's NOP
0166          * everything.
0167          *
0168          * Alternatively, if the tag is zero (because the layout
0169          * dictates it and we don't have any spare bits in the
0170          * address), NOP everything after masking the kernel VA.
0171          */
0172         if (has_vhe() || (!tag_val && i > 0)) {
0173             updptr[i] = cpu_to_le32(aarch64_insn_gen_nop());
0174             continue;
0175         }
0176 
0177         oinsn = le32_to_cpu(origptr[i]);
0178         rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, oinsn);
0179         rn = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RN, oinsn);
0180 
0181         insn = compute_instruction(i, rd, rn);
0182         BUG_ON(insn == AARCH64_BREAK_FAULT);
0183 
0184         updptr[i] = cpu_to_le32(insn);
0185     }
0186 }
0187 
0188 void kvm_patch_vector_branch(struct alt_instr *alt,
0189                  __le32 *origptr, __le32 *updptr, int nr_inst)
0190 {
0191     u64 addr;
0192     u32 insn;
0193 
0194     BUG_ON(nr_inst != 4);
0195 
0196     if (!cpus_have_const_cap(ARM64_SPECTRE_V3A) || WARN_ON_ONCE(has_vhe()))
0197         return;
0198 
0199     /*
0200      * Compute HYP VA by using the same computation as kern_hyp_va()
0201      */
0202     addr = __early_kern_hyp_va((u64)kvm_ksym_ref(__kvm_hyp_vector));
0203 
0204     /* Use PC[10:7] to branch to the same vector in KVM */
0205     addr |= ((u64)origptr & GENMASK_ULL(10, 7));
0206 
0207     /*
0208      * Branch over the preamble in order to avoid the initial store on
0209      * the stack (which we already perform in the hardening vectors).
0210      */
0211     addr += KVM_VECTOR_PREAMBLE;
0212 
0213     /* movz x0, #(addr & 0xffff) */
0214     insn = aarch64_insn_gen_movewide(AARCH64_INSN_REG_0,
0215                      (u16)addr,
0216                      0,
0217                      AARCH64_INSN_VARIANT_64BIT,
0218                      AARCH64_INSN_MOVEWIDE_ZERO);
0219     *updptr++ = cpu_to_le32(insn);
0220 
0221     /* movk x0, #((addr >> 16) & 0xffff), lsl #16 */
0222     insn = aarch64_insn_gen_movewide(AARCH64_INSN_REG_0,
0223                      (u16)(addr >> 16),
0224                      16,
0225                      AARCH64_INSN_VARIANT_64BIT,
0226                      AARCH64_INSN_MOVEWIDE_KEEP);
0227     *updptr++ = cpu_to_le32(insn);
0228 
0229     /* movk x0, #((addr >> 32) & 0xffff), lsl #32 */
0230     insn = aarch64_insn_gen_movewide(AARCH64_INSN_REG_0,
0231                      (u16)(addr >> 32),
0232                      32,
0233                      AARCH64_INSN_VARIANT_64BIT,
0234                      AARCH64_INSN_MOVEWIDE_KEEP);
0235     *updptr++ = cpu_to_le32(insn);
0236 
0237     /* br x0 */
0238     insn = aarch64_insn_gen_branch_reg(AARCH64_INSN_REG_0,
0239                        AARCH64_INSN_BRANCH_NOLINK);
0240     *updptr++ = cpu_to_le32(insn);
0241 }
0242 
0243 static void generate_mov_q(u64 val, __le32 *origptr, __le32 *updptr, int nr_inst)
0244 {
0245     u32 insn, oinsn, rd;
0246 
0247     BUG_ON(nr_inst != 4);
0248 
0249     /* Compute target register */
0250     oinsn = le32_to_cpu(*origptr);
0251     rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, oinsn);
0252 
0253     /* movz rd, #(val & 0xffff) */
0254     insn = aarch64_insn_gen_movewide(rd,
0255                      (u16)val,
0256                      0,
0257                      AARCH64_INSN_VARIANT_64BIT,
0258                      AARCH64_INSN_MOVEWIDE_ZERO);
0259     *updptr++ = cpu_to_le32(insn);
0260 
0261     /* movk rd, #((val >> 16) & 0xffff), lsl #16 */
0262     insn = aarch64_insn_gen_movewide(rd,
0263                      (u16)(val >> 16),
0264                      16,
0265                      AARCH64_INSN_VARIANT_64BIT,
0266                      AARCH64_INSN_MOVEWIDE_KEEP);
0267     *updptr++ = cpu_to_le32(insn);
0268 
0269     /* movk rd, #((val >> 32) & 0xffff), lsl #32 */
0270     insn = aarch64_insn_gen_movewide(rd,
0271                      (u16)(val >> 32),
0272                      32,
0273                      AARCH64_INSN_VARIANT_64BIT,
0274                      AARCH64_INSN_MOVEWIDE_KEEP);
0275     *updptr++ = cpu_to_le32(insn);
0276 
0277     /* movk rd, #((val >> 48) & 0xffff), lsl #48 */
0278     insn = aarch64_insn_gen_movewide(rd,
0279                      (u16)(val >> 48),
0280                      48,
0281                      AARCH64_INSN_VARIANT_64BIT,
0282                      AARCH64_INSN_MOVEWIDE_KEEP);
0283     *updptr++ = cpu_to_le32(insn);
0284 }
0285 
0286 void kvm_get_kimage_voffset(struct alt_instr *alt,
0287                 __le32 *origptr, __le32 *updptr, int nr_inst)
0288 {
0289     generate_mov_q(kimage_voffset, origptr, updptr, nr_inst);
0290 }
0291 
0292 void kvm_compute_final_ctr_el0(struct alt_instr *alt,
0293                    __le32 *origptr, __le32 *updptr, int nr_inst)
0294 {
0295     generate_mov_q(read_sanitised_ftr_reg(SYS_CTR_EL0),
0296                origptr, updptr, nr_inst);
0297 }