Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright (C) 2008-2013 Freescale Semiconductor, Inc. All rights reserved.
0004  *
0005  * Author: Yu Liu, yu.liu@freescale.com
0006  *         Scott Wood, scottwood@freescale.com
0007  *         Ashish Kalra, ashish.kalra@freescale.com
0008  *         Varun Sethi, varun.sethi@freescale.com
0009  *         Alexander Graf, agraf@suse.de
0010  *
0011  * Description:
0012  * This file is based on arch/powerpc/kvm/44x_tlb.c,
0013  * by Hollis Blanchard <hollisb@us.ibm.com>.
0014  */
0015 
0016 #include <linux/kernel.h>
0017 #include <linux/types.h>
0018 #include <linux/slab.h>
0019 #include <linux/string.h>
0020 #include <linux/kvm.h>
0021 #include <linux/kvm_host.h>
0022 #include <linux/highmem.h>
0023 #include <linux/log2.h>
0024 #include <linux/uaccess.h>
0025 #include <linux/sched.h>
0026 #include <linux/rwsem.h>
0027 #include <linux/vmalloc.h>
0028 #include <linux/hugetlb.h>
0029 #include <asm/kvm_ppc.h>
0030 
0031 #include "e500.h"
0032 #include "trace_booke.h"
0033 #include "timing.h"
0034 #include "e500_mmu_host.h"
0035 
0036 static inline unsigned int gtlb0_get_next_victim(
0037         struct kvmppc_vcpu_e500 *vcpu_e500)
0038 {
0039     unsigned int victim;
0040 
0041     victim = vcpu_e500->gtlb_nv[0]++;
0042     if (unlikely(vcpu_e500->gtlb_nv[0] >= vcpu_e500->gtlb_params[0].ways))
0043         vcpu_e500->gtlb_nv[0] = 0;
0044 
0045     return victim;
0046 }
0047 
0048 static int tlb0_set_base(gva_t addr, int sets, int ways)
0049 {
0050     int set_base;
0051 
0052     set_base = (addr >> PAGE_SHIFT) & (sets - 1);
0053     set_base *= ways;
0054 
0055     return set_base;
0056 }
0057 
0058 static int gtlb0_set_base(struct kvmppc_vcpu_e500 *vcpu_e500, gva_t addr)
0059 {
0060     return tlb0_set_base(addr, vcpu_e500->gtlb_params[0].sets,
0061                  vcpu_e500->gtlb_params[0].ways);
0062 }
0063 
0064 static unsigned int get_tlb_esel(struct kvm_vcpu *vcpu, int tlbsel)
0065 {
0066     struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
0067     int esel = get_tlb_esel_bit(vcpu);
0068 
0069     if (tlbsel == 0) {
0070         esel &= vcpu_e500->gtlb_params[0].ways - 1;
0071         esel += gtlb0_set_base(vcpu_e500, vcpu->arch.shared->mas2);
0072     } else {
0073         esel &= vcpu_e500->gtlb_params[tlbsel].entries - 1;
0074     }
0075 
0076     return esel;
0077 }
0078 
0079 /* Search the guest TLB for a matching entry. */
0080 static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500,
0081         gva_t eaddr, int tlbsel, unsigned int pid, int as)
0082 {
0083     int size = vcpu_e500->gtlb_params[tlbsel].entries;
0084     unsigned int set_base, offset;
0085     int i;
0086 
0087     if (tlbsel == 0) {
0088         set_base = gtlb0_set_base(vcpu_e500, eaddr);
0089         size = vcpu_e500->gtlb_params[0].ways;
0090     } else {
0091         if (eaddr < vcpu_e500->tlb1_min_eaddr ||
0092                 eaddr > vcpu_e500->tlb1_max_eaddr)
0093             return -1;
0094         set_base = 0;
0095     }
0096 
0097     offset = vcpu_e500->gtlb_offset[tlbsel];
0098 
0099     for (i = 0; i < size; i++) {
0100         struct kvm_book3e_206_tlb_entry *tlbe =
0101             &vcpu_e500->gtlb_arch[offset + set_base + i];
0102         unsigned int tid;
0103 
0104         if (eaddr < get_tlb_eaddr(tlbe))
0105             continue;
0106 
0107         if (eaddr > get_tlb_end(tlbe))
0108             continue;
0109 
0110         tid = get_tlb_tid(tlbe);
0111         if (tid && (tid != pid))
0112             continue;
0113 
0114         if (!get_tlb_v(tlbe))
0115             continue;
0116 
0117         if (get_tlb_ts(tlbe) != as && as != -1)
0118             continue;
0119 
0120         return set_base + i;
0121     }
0122 
0123     return -1;
0124 }
0125 
0126 static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
0127         gva_t eaddr, int as)
0128 {
0129     struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
0130     unsigned int victim, tsized;
0131     int tlbsel;
0132 
0133     /* since we only have two TLBs, only lower bit is used. */
0134     tlbsel = (vcpu->arch.shared->mas4 >> 28) & 0x1;
0135     victim = (tlbsel == 0) ? gtlb0_get_next_victim(vcpu_e500) : 0;
0136     tsized = (vcpu->arch.shared->mas4 >> 7) & 0x1f;
0137 
0138     vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim)
0139         | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
0140     vcpu->arch.shared->mas1 = MAS1_VALID | (as ? MAS1_TS : 0)
0141         | MAS1_TID(get_tlbmiss_tid(vcpu))
0142         | MAS1_TSIZE(tsized);
0143     vcpu->arch.shared->mas2 = (eaddr & MAS2_EPN)
0144         | (vcpu->arch.shared->mas4 & MAS2_ATTRIB_MASK);
0145     vcpu->arch.shared->mas7_3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3;
0146     vcpu->arch.shared->mas6 = (vcpu->arch.shared->mas6 & MAS6_SPID1)
0147         | (get_cur_pid(vcpu) << 16)
0148         | (as ? MAS6_SAS : 0);
0149 }
0150 
0151 static void kvmppc_recalc_tlb1map_range(struct kvmppc_vcpu_e500 *vcpu_e500)
0152 {
0153     int size = vcpu_e500->gtlb_params[1].entries;
0154     unsigned int offset;
0155     gva_t eaddr;
0156     int i;
0157 
0158     vcpu_e500->tlb1_min_eaddr = ~0UL;
0159     vcpu_e500->tlb1_max_eaddr = 0;
0160     offset = vcpu_e500->gtlb_offset[1];
0161 
0162     for (i = 0; i < size; i++) {
0163         struct kvm_book3e_206_tlb_entry *tlbe =
0164             &vcpu_e500->gtlb_arch[offset + i];
0165 
0166         if (!get_tlb_v(tlbe))
0167             continue;
0168 
0169         eaddr = get_tlb_eaddr(tlbe);
0170         vcpu_e500->tlb1_min_eaddr =
0171                 min(vcpu_e500->tlb1_min_eaddr, eaddr);
0172 
0173         eaddr = get_tlb_end(tlbe);
0174         vcpu_e500->tlb1_max_eaddr =
0175                 max(vcpu_e500->tlb1_max_eaddr, eaddr);
0176     }
0177 }
0178 
0179 static int kvmppc_need_recalc_tlb1map_range(struct kvmppc_vcpu_e500 *vcpu_e500,
0180                 struct kvm_book3e_206_tlb_entry *gtlbe)
0181 {
0182     unsigned long start, end, size;
0183 
0184     size = get_tlb_bytes(gtlbe);
0185     start = get_tlb_eaddr(gtlbe) & ~(size - 1);
0186     end = start + size - 1;
0187 
0188     return vcpu_e500->tlb1_min_eaddr == start ||
0189             vcpu_e500->tlb1_max_eaddr == end;
0190 }
0191 
0192 /* This function is supposed to be called for a adding a new valid tlb entry */
0193 static void kvmppc_set_tlb1map_range(struct kvm_vcpu *vcpu,
0194                 struct kvm_book3e_206_tlb_entry *gtlbe)
0195 {
0196     unsigned long start, end, size;
0197     struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
0198 
0199     if (!get_tlb_v(gtlbe))
0200         return;
0201 
0202     size = get_tlb_bytes(gtlbe);
0203     start = get_tlb_eaddr(gtlbe) & ~(size - 1);
0204     end = start + size - 1;
0205 
0206     vcpu_e500->tlb1_min_eaddr = min(vcpu_e500->tlb1_min_eaddr, start);
0207     vcpu_e500->tlb1_max_eaddr = max(vcpu_e500->tlb1_max_eaddr, end);
0208 }
0209 
0210 static inline int kvmppc_e500_gtlbe_invalidate(
0211                 struct kvmppc_vcpu_e500 *vcpu_e500,
0212                 int tlbsel, int esel)
0213 {
0214     struct kvm_book3e_206_tlb_entry *gtlbe =
0215         get_entry(vcpu_e500, tlbsel, esel);
0216 
0217     if (unlikely(get_tlb_iprot(gtlbe)))
0218         return -1;
0219 
0220     if (tlbsel == 1 && kvmppc_need_recalc_tlb1map_range(vcpu_e500, gtlbe))
0221         kvmppc_recalc_tlb1map_range(vcpu_e500);
0222 
0223     gtlbe->mas1 = 0;
0224 
0225     return 0;
0226 }
0227 
0228 int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500, ulong value)
0229 {
0230     int esel;
0231 
0232     if (value & MMUCSR0_TLB0FI)
0233         for (esel = 0; esel < vcpu_e500->gtlb_params[0].entries; esel++)
0234             kvmppc_e500_gtlbe_invalidate(vcpu_e500, 0, esel);
0235     if (value & MMUCSR0_TLB1FI)
0236         for (esel = 0; esel < vcpu_e500->gtlb_params[1].entries; esel++)
0237             kvmppc_e500_gtlbe_invalidate(vcpu_e500, 1, esel);
0238 
0239     /* Invalidate all host shadow mappings */
0240     kvmppc_core_flush_tlb(&vcpu_e500->vcpu);
0241 
0242     return EMULATE_DONE;
0243 }
0244 
0245 int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, gva_t ea)
0246 {
0247     struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
0248     unsigned int ia;
0249     int esel, tlbsel;
0250 
0251     ia = (ea >> 2) & 0x1;
0252 
0253     /* since we only have two TLBs, only lower bit is used. */
0254     tlbsel = (ea >> 3) & 0x1;
0255 
0256     if (ia) {
0257         /* invalidate all entries */
0258         for (esel = 0; esel < vcpu_e500->gtlb_params[tlbsel].entries;
0259              esel++)
0260             kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
0261     } else {
0262         ea &= 0xfffff000;
0263         esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel,
0264                 get_cur_pid(vcpu), -1);
0265         if (esel >= 0)
0266             kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
0267     }
0268 
0269     /* Invalidate all host shadow mappings */
0270     kvmppc_core_flush_tlb(&vcpu_e500->vcpu);
0271 
0272     return EMULATE_DONE;
0273 }
0274 
0275 static void tlbilx_all(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel,
0276                int pid, int type)
0277 {
0278     struct kvm_book3e_206_tlb_entry *tlbe;
0279     int tid, esel;
0280 
0281     /* invalidate all entries */
0282     for (esel = 0; esel < vcpu_e500->gtlb_params[tlbsel].entries; esel++) {
0283         tlbe = get_entry(vcpu_e500, tlbsel, esel);
0284         tid = get_tlb_tid(tlbe);
0285         if (type == 0 || tid == pid) {
0286             inval_gtlbe_on_host(vcpu_e500, tlbsel, esel);
0287             kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
0288         }
0289     }
0290 }
0291 
0292 static void tlbilx_one(struct kvmppc_vcpu_e500 *vcpu_e500, int pid,
0293                gva_t ea)
0294 {
0295     int tlbsel, esel;
0296 
0297     for (tlbsel = 0; tlbsel < 2; tlbsel++) {
0298         esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, -1);
0299         if (esel >= 0) {
0300             inval_gtlbe_on_host(vcpu_e500, tlbsel, esel);
0301             kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
0302             break;
0303         }
0304     }
0305 }
0306 
0307 int kvmppc_e500_emul_tlbilx(struct kvm_vcpu *vcpu, int type, gva_t ea)
0308 {
0309     struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
0310     int pid = get_cur_spid(vcpu);
0311 
0312     if (type == 0 || type == 1) {
0313         tlbilx_all(vcpu_e500, 0, pid, type);
0314         tlbilx_all(vcpu_e500, 1, pid, type);
0315     } else if (type == 3) {
0316         tlbilx_one(vcpu_e500, pid, ea);
0317     }
0318 
0319     return EMULATE_DONE;
0320 }
0321 
0322 int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu)
0323 {
0324     struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
0325     int tlbsel, esel;
0326     struct kvm_book3e_206_tlb_entry *gtlbe;
0327 
0328     tlbsel = get_tlb_tlbsel(vcpu);
0329     esel = get_tlb_esel(vcpu, tlbsel);
0330 
0331     gtlbe = get_entry(vcpu_e500, tlbsel, esel);
0332     vcpu->arch.shared->mas0 &= ~MAS0_NV(~0);
0333     vcpu->arch.shared->mas0 |= MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
0334     vcpu->arch.shared->mas1 = gtlbe->mas1;
0335     vcpu->arch.shared->mas2 = gtlbe->mas2;
0336     vcpu->arch.shared->mas7_3 = gtlbe->mas7_3;
0337 
0338     return EMULATE_DONE;
0339 }
0340 
0341 int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, gva_t ea)
0342 {
0343     struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
0344     int as = !!get_cur_sas(vcpu);
0345     unsigned int pid = get_cur_spid(vcpu);
0346     int esel, tlbsel;
0347     struct kvm_book3e_206_tlb_entry *gtlbe = NULL;
0348 
0349     for (tlbsel = 0; tlbsel < 2; tlbsel++) {
0350         esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, as);
0351         if (esel >= 0) {
0352             gtlbe = get_entry(vcpu_e500, tlbsel, esel);
0353             break;
0354         }
0355     }
0356 
0357     if (gtlbe) {
0358         esel &= vcpu_e500->gtlb_params[tlbsel].ways - 1;
0359 
0360         vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(esel)
0361             | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
0362         vcpu->arch.shared->mas1 = gtlbe->mas1;
0363         vcpu->arch.shared->mas2 = gtlbe->mas2;
0364         vcpu->arch.shared->mas7_3 = gtlbe->mas7_3;
0365     } else {
0366         int victim;
0367 
0368         /* since we only have two TLBs, only lower bit is used. */
0369         tlbsel = vcpu->arch.shared->mas4 >> 28 & 0x1;
0370         victim = (tlbsel == 0) ? gtlb0_get_next_victim(vcpu_e500) : 0;
0371 
0372         vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel)
0373             | MAS0_ESEL(victim)
0374             | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
0375         vcpu->arch.shared->mas1 =
0376               (vcpu->arch.shared->mas6 & MAS6_SPID0)
0377             | ((vcpu->arch.shared->mas6 & MAS6_SAS) ? MAS1_TS : 0)
0378             | (vcpu->arch.shared->mas4 & MAS4_TSIZED(~0));
0379         vcpu->arch.shared->mas2 &= MAS2_EPN;
0380         vcpu->arch.shared->mas2 |= vcpu->arch.shared->mas4 &
0381                        MAS2_ATTRIB_MASK;
0382         vcpu->arch.shared->mas7_3 &= MAS3_U0 | MAS3_U1 |
0383                          MAS3_U2 | MAS3_U3;
0384     }
0385 
0386     kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS);
0387     return EMULATE_DONE;
0388 }
0389 
0390 int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
0391 {
0392     struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
0393     struct kvm_book3e_206_tlb_entry *gtlbe;
0394     int tlbsel, esel;
0395     int recal = 0;
0396     int idx;
0397 
0398     tlbsel = get_tlb_tlbsel(vcpu);
0399     esel = get_tlb_esel(vcpu, tlbsel);
0400 
0401     gtlbe = get_entry(vcpu_e500, tlbsel, esel);
0402 
0403     if (get_tlb_v(gtlbe)) {
0404         inval_gtlbe_on_host(vcpu_e500, tlbsel, esel);
0405         if ((tlbsel == 1) &&
0406             kvmppc_need_recalc_tlb1map_range(vcpu_e500, gtlbe))
0407             recal = 1;
0408     }
0409 
0410     gtlbe->mas1 = vcpu->arch.shared->mas1;
0411     gtlbe->mas2 = vcpu->arch.shared->mas2;
0412     if (!(vcpu->arch.shared->msr & MSR_CM))
0413         gtlbe->mas2 &= 0xffffffffUL;
0414     gtlbe->mas7_3 = vcpu->arch.shared->mas7_3;
0415 
0416     trace_kvm_booke206_gtlb_write(vcpu->arch.shared->mas0, gtlbe->mas1,
0417                                   gtlbe->mas2, gtlbe->mas7_3);
0418 
0419     if (tlbsel == 1) {
0420         /*
0421          * If a valid tlb1 entry is overwritten then recalculate the
0422          * min/max TLB1 map address range otherwise no need to look
0423          * in tlb1 array.
0424          */
0425         if (recal)
0426             kvmppc_recalc_tlb1map_range(vcpu_e500);
0427         else
0428             kvmppc_set_tlb1map_range(vcpu, gtlbe);
0429     }
0430 
0431     idx = srcu_read_lock(&vcpu->kvm->srcu);
0432 
0433     /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
0434     if (tlbe_is_host_safe(vcpu, gtlbe)) {
0435         u64 eaddr = get_tlb_eaddr(gtlbe);
0436         u64 raddr = get_tlb_raddr(gtlbe);
0437 
0438         if (tlbsel == 0) {
0439             gtlbe->mas1 &= ~MAS1_TSIZE(~0);
0440             gtlbe->mas1 |= MAS1_TSIZE(BOOK3E_PAGESZ_4K);
0441         }
0442 
0443         /* Premap the faulting page */
0444         kvmppc_mmu_map(vcpu, eaddr, raddr, index_of(tlbsel, esel));
0445     }
0446 
0447     srcu_read_unlock(&vcpu->kvm->srcu, idx);
0448 
0449     kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS);
0450     return EMULATE_DONE;
0451 }
0452 
0453 static int kvmppc_e500_tlb_search(struct kvm_vcpu *vcpu,
0454                   gva_t eaddr, unsigned int pid, int as)
0455 {
0456     struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
0457     int esel, tlbsel;
0458 
0459     for (tlbsel = 0; tlbsel < 2; tlbsel++) {
0460         esel = kvmppc_e500_tlb_index(vcpu_e500, eaddr, tlbsel, pid, as);
0461         if (esel >= 0)
0462             return index_of(tlbsel, esel);
0463     }
0464 
0465     return -1;
0466 }
0467 
0468 /* 'linear_address' is actually an encoding of AS|PID|EADDR . */
0469 int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
0470                                struct kvm_translation *tr)
0471 {
0472     int index;
0473     gva_t eaddr;
0474     u8 pid;
0475     u8 as;
0476 
0477     eaddr = tr->linear_address;
0478     pid = (tr->linear_address >> 32) & 0xff;
0479     as = (tr->linear_address >> 40) & 0x1;
0480 
0481     index = kvmppc_e500_tlb_search(vcpu, eaddr, pid, as);
0482     if (index < 0) {
0483         tr->valid = 0;
0484         return 0;
0485     }
0486 
0487     tr->physical_address = kvmppc_mmu_xlate(vcpu, index, eaddr);
0488     /* XXX what does "writeable" and "usermode" even mean? */
0489     tr->valid = 1;
0490 
0491     return 0;
0492 }
0493 
0494 
0495 int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
0496 {
0497     unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
0498 
0499     return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as);
0500 }
0501 
0502 int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
0503 {
0504     unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS);
0505 
0506     return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as);
0507 }
0508 
0509 void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu)
0510 {
0511     unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
0512 
0513     kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.regs.nip, as);
0514 }
0515 
0516 void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu)
0517 {
0518     unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS);
0519 
0520     kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.fault_dear, as);
0521 }
0522 
0523 gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int index,
0524             gva_t eaddr)
0525 {
0526     struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
0527     struct kvm_book3e_206_tlb_entry *gtlbe;
0528     u64 pgmask;
0529 
0530     gtlbe = get_entry(vcpu_e500, tlbsel_of(index), esel_of(index));
0531     pgmask = get_tlb_bytes(gtlbe) - 1;
0532 
0533     return get_tlb_raddr(gtlbe) | (eaddr & pgmask);
0534 }
0535 
0536 /*****************************************/
0537 
0538 static void free_gtlb(struct kvmppc_vcpu_e500 *vcpu_e500)
0539 {
0540     int i;
0541 
0542     kvmppc_core_flush_tlb(&vcpu_e500->vcpu);
0543     kfree(vcpu_e500->g2h_tlb1_map);
0544     kfree(vcpu_e500->gtlb_priv[0]);
0545     kfree(vcpu_e500->gtlb_priv[1]);
0546 
0547     if (vcpu_e500->shared_tlb_pages) {
0548         vfree((void *)(round_down((uintptr_t)vcpu_e500->gtlb_arch,
0549                       PAGE_SIZE)));
0550 
0551         for (i = 0; i < vcpu_e500->num_shared_tlb_pages; i++) {
0552             set_page_dirty_lock(vcpu_e500->shared_tlb_pages[i]);
0553             put_page(vcpu_e500->shared_tlb_pages[i]);
0554         }
0555 
0556         vcpu_e500->num_shared_tlb_pages = 0;
0557 
0558         kfree(vcpu_e500->shared_tlb_pages);
0559         vcpu_e500->shared_tlb_pages = NULL;
0560     } else {
0561         kfree(vcpu_e500->gtlb_arch);
0562     }
0563 
0564     vcpu_e500->gtlb_arch = NULL;
0565 }
0566 
0567 void kvmppc_get_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
0568 {
0569     sregs->u.e.mas0 = vcpu->arch.shared->mas0;
0570     sregs->u.e.mas1 = vcpu->arch.shared->mas1;
0571     sregs->u.e.mas2 = vcpu->arch.shared->mas2;
0572     sregs->u.e.mas7_3 = vcpu->arch.shared->mas7_3;
0573     sregs->u.e.mas4 = vcpu->arch.shared->mas4;
0574     sregs->u.e.mas6 = vcpu->arch.shared->mas6;
0575 
0576     sregs->u.e.mmucfg = vcpu->arch.mmucfg;
0577     sregs->u.e.tlbcfg[0] = vcpu->arch.tlbcfg[0];
0578     sregs->u.e.tlbcfg[1] = vcpu->arch.tlbcfg[1];
0579     sregs->u.e.tlbcfg[2] = 0;
0580     sregs->u.e.tlbcfg[3] = 0;
0581 }
0582 
0583 int kvmppc_set_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
0584 {
0585     if (sregs->u.e.features & KVM_SREGS_E_ARCH206_MMU) {
0586         vcpu->arch.shared->mas0 = sregs->u.e.mas0;
0587         vcpu->arch.shared->mas1 = sregs->u.e.mas1;
0588         vcpu->arch.shared->mas2 = sregs->u.e.mas2;
0589         vcpu->arch.shared->mas7_3 = sregs->u.e.mas7_3;
0590         vcpu->arch.shared->mas4 = sregs->u.e.mas4;
0591         vcpu->arch.shared->mas6 = sregs->u.e.mas6;
0592     }
0593 
0594     return 0;
0595 }
0596 
0597 int kvmppc_get_one_reg_e500_tlb(struct kvm_vcpu *vcpu, u64 id,
0598                 union kvmppc_one_reg *val)
0599 {
0600     int r = 0;
0601     long int i;
0602 
0603     switch (id) {
0604     case KVM_REG_PPC_MAS0:
0605         *val = get_reg_val(id, vcpu->arch.shared->mas0);
0606         break;
0607     case KVM_REG_PPC_MAS1:
0608         *val = get_reg_val(id, vcpu->arch.shared->mas1);
0609         break;
0610     case KVM_REG_PPC_MAS2:
0611         *val = get_reg_val(id, vcpu->arch.shared->mas2);
0612         break;
0613     case KVM_REG_PPC_MAS7_3:
0614         *val = get_reg_val(id, vcpu->arch.shared->mas7_3);
0615         break;
0616     case KVM_REG_PPC_MAS4:
0617         *val = get_reg_val(id, vcpu->arch.shared->mas4);
0618         break;
0619     case KVM_REG_PPC_MAS6:
0620         *val = get_reg_val(id, vcpu->arch.shared->mas6);
0621         break;
0622     case KVM_REG_PPC_MMUCFG:
0623         *val = get_reg_val(id, vcpu->arch.mmucfg);
0624         break;
0625     case KVM_REG_PPC_EPTCFG:
0626         *val = get_reg_val(id, vcpu->arch.eptcfg);
0627         break;
0628     case KVM_REG_PPC_TLB0CFG:
0629     case KVM_REG_PPC_TLB1CFG:
0630     case KVM_REG_PPC_TLB2CFG:
0631     case KVM_REG_PPC_TLB3CFG:
0632         i = id - KVM_REG_PPC_TLB0CFG;
0633         *val = get_reg_val(id, vcpu->arch.tlbcfg[i]);
0634         break;
0635     case KVM_REG_PPC_TLB0PS:
0636     case KVM_REG_PPC_TLB1PS:
0637     case KVM_REG_PPC_TLB2PS:
0638     case KVM_REG_PPC_TLB3PS:
0639         i = id - KVM_REG_PPC_TLB0PS;
0640         *val = get_reg_val(id, vcpu->arch.tlbps[i]);
0641         break;
0642     default:
0643         r = -EINVAL;
0644         break;
0645     }
0646 
0647     return r;
0648 }
0649 
0650 int kvmppc_set_one_reg_e500_tlb(struct kvm_vcpu *vcpu, u64 id,
0651                    union kvmppc_one_reg *val)
0652 {
0653     int r = 0;
0654     long int i;
0655 
0656     switch (id) {
0657     case KVM_REG_PPC_MAS0:
0658         vcpu->arch.shared->mas0 = set_reg_val(id, *val);
0659         break;
0660     case KVM_REG_PPC_MAS1:
0661         vcpu->arch.shared->mas1 = set_reg_val(id, *val);
0662         break;
0663     case KVM_REG_PPC_MAS2:
0664         vcpu->arch.shared->mas2 = set_reg_val(id, *val);
0665         break;
0666     case KVM_REG_PPC_MAS7_3:
0667         vcpu->arch.shared->mas7_3 = set_reg_val(id, *val);
0668         break;
0669     case KVM_REG_PPC_MAS4:
0670         vcpu->arch.shared->mas4 = set_reg_val(id, *val);
0671         break;
0672     case KVM_REG_PPC_MAS6:
0673         vcpu->arch.shared->mas6 = set_reg_val(id, *val);
0674         break;
0675     /* Only allow MMU registers to be set to the config supported by KVM */
0676     case KVM_REG_PPC_MMUCFG: {
0677         u32 reg = set_reg_val(id, *val);
0678         if (reg != vcpu->arch.mmucfg)
0679             r = -EINVAL;
0680         break;
0681     }
0682     case KVM_REG_PPC_EPTCFG: {
0683         u32 reg = set_reg_val(id, *val);
0684         if (reg != vcpu->arch.eptcfg)
0685             r = -EINVAL;
0686         break;
0687     }
0688     case KVM_REG_PPC_TLB0CFG:
0689     case KVM_REG_PPC_TLB1CFG:
0690     case KVM_REG_PPC_TLB2CFG:
0691     case KVM_REG_PPC_TLB3CFG: {
0692         /* MMU geometry (N_ENTRY/ASSOC) can be set only using SW_TLB */
0693         u32 reg = set_reg_val(id, *val);
0694         i = id - KVM_REG_PPC_TLB0CFG;
0695         if (reg != vcpu->arch.tlbcfg[i])
0696             r = -EINVAL;
0697         break;
0698     }
0699     case KVM_REG_PPC_TLB0PS:
0700     case KVM_REG_PPC_TLB1PS:
0701     case KVM_REG_PPC_TLB2PS:
0702     case KVM_REG_PPC_TLB3PS: {
0703         u32 reg = set_reg_val(id, *val);
0704         i = id - KVM_REG_PPC_TLB0PS;
0705         if (reg != vcpu->arch.tlbps[i])
0706             r = -EINVAL;
0707         break;
0708     }
0709     default:
0710         r = -EINVAL;
0711         break;
0712     }
0713 
0714     return r;
0715 }
0716 
0717 static int vcpu_mmu_geometry_update(struct kvm_vcpu *vcpu,
0718         struct kvm_book3e_206_tlb_params *params)
0719 {
0720     vcpu->arch.tlbcfg[0] &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
0721     if (params->tlb_sizes[0] <= 2048)
0722         vcpu->arch.tlbcfg[0] |= params->tlb_sizes[0];
0723     vcpu->arch.tlbcfg[0] |= params->tlb_ways[0] << TLBnCFG_ASSOC_SHIFT;
0724 
0725     vcpu->arch.tlbcfg[1] &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
0726     vcpu->arch.tlbcfg[1] |= params->tlb_sizes[1];
0727     vcpu->arch.tlbcfg[1] |= params->tlb_ways[1] << TLBnCFG_ASSOC_SHIFT;
0728     return 0;
0729 }
0730 
0731 int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
0732                   struct kvm_config_tlb *cfg)
0733 {
0734     struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
0735     struct kvm_book3e_206_tlb_params params;
0736     char *virt;
0737     struct page **pages;
0738     struct tlbe_priv *privs[2] = {};
0739     u64 *g2h_bitmap;
0740     size_t array_len;
0741     u32 sets;
0742     int num_pages, ret, i;
0743 
0744     if (cfg->mmu_type != KVM_MMU_FSL_BOOKE_NOHV)
0745         return -EINVAL;
0746 
0747     if (copy_from_user(&params, (void __user *)(uintptr_t)cfg->params,
0748                sizeof(params)))
0749         return -EFAULT;
0750 
0751     if (params.tlb_sizes[1] > 64)
0752         return -EINVAL;
0753     if (params.tlb_ways[1] != params.tlb_sizes[1])
0754         return -EINVAL;
0755     if (params.tlb_sizes[2] != 0 || params.tlb_sizes[3] != 0)
0756         return -EINVAL;
0757     if (params.tlb_ways[2] != 0 || params.tlb_ways[3] != 0)
0758         return -EINVAL;
0759 
0760     if (!is_power_of_2(params.tlb_ways[0]))
0761         return -EINVAL;
0762 
0763     sets = params.tlb_sizes[0] >> ilog2(params.tlb_ways[0]);
0764     if (!is_power_of_2(sets))
0765         return -EINVAL;
0766 
0767     array_len = params.tlb_sizes[0] + params.tlb_sizes[1];
0768     array_len *= sizeof(struct kvm_book3e_206_tlb_entry);
0769 
0770     if (cfg->array_len < array_len)
0771         return -EINVAL;
0772 
0773     num_pages = DIV_ROUND_UP(cfg->array + array_len - 1, PAGE_SIZE) -
0774             cfg->array / PAGE_SIZE;
0775     pages = kmalloc_array(num_pages, sizeof(*pages), GFP_KERNEL);
0776     if (!pages)
0777         return -ENOMEM;
0778 
0779     ret = get_user_pages_fast(cfg->array, num_pages, FOLL_WRITE, pages);
0780     if (ret < 0)
0781         goto free_pages;
0782 
0783     if (ret != num_pages) {
0784         num_pages = ret;
0785         ret = -EFAULT;
0786         goto put_pages;
0787     }
0788 
0789     virt = vmap(pages, num_pages, VM_MAP, PAGE_KERNEL);
0790     if (!virt) {
0791         ret = -ENOMEM;
0792         goto put_pages;
0793     }
0794 
0795     privs[0] = kcalloc(params.tlb_sizes[0], sizeof(*privs[0]), GFP_KERNEL);
0796     if (!privs[0]) {
0797         ret = -ENOMEM;
0798         goto put_pages;
0799     }
0800 
0801     privs[1] = kcalloc(params.tlb_sizes[1], sizeof(*privs[1]), GFP_KERNEL);
0802     if (!privs[1]) {
0803         ret = -ENOMEM;
0804         goto free_privs_first;
0805     }
0806 
0807     g2h_bitmap = kcalloc(params.tlb_sizes[1],
0808                  sizeof(*g2h_bitmap),
0809                  GFP_KERNEL);
0810     if (!g2h_bitmap) {
0811         ret = -ENOMEM;
0812         goto free_privs_second;
0813     }
0814 
0815     free_gtlb(vcpu_e500);
0816 
0817     vcpu_e500->gtlb_priv[0] = privs[0];
0818     vcpu_e500->gtlb_priv[1] = privs[1];
0819     vcpu_e500->g2h_tlb1_map = g2h_bitmap;
0820 
0821     vcpu_e500->gtlb_arch = (struct kvm_book3e_206_tlb_entry *)
0822         (virt + (cfg->array & (PAGE_SIZE - 1)));
0823 
0824     vcpu_e500->gtlb_params[0].entries = params.tlb_sizes[0];
0825     vcpu_e500->gtlb_params[1].entries = params.tlb_sizes[1];
0826 
0827     vcpu_e500->gtlb_offset[0] = 0;
0828     vcpu_e500->gtlb_offset[1] = params.tlb_sizes[0];
0829 
0830     /* Update vcpu's MMU geometry based on SW_TLB input */
0831     vcpu_mmu_geometry_update(vcpu, &params);
0832 
0833     vcpu_e500->shared_tlb_pages = pages;
0834     vcpu_e500->num_shared_tlb_pages = num_pages;
0835 
0836     vcpu_e500->gtlb_params[0].ways = params.tlb_ways[0];
0837     vcpu_e500->gtlb_params[0].sets = sets;
0838 
0839     vcpu_e500->gtlb_params[1].ways = params.tlb_sizes[1];
0840     vcpu_e500->gtlb_params[1].sets = 1;
0841 
0842     kvmppc_recalc_tlb1map_range(vcpu_e500);
0843     return 0;
0844  free_privs_second:
0845     kfree(privs[1]);
0846  free_privs_first:
0847     kfree(privs[0]);
0848  put_pages:
0849     for (i = 0; i < num_pages; i++)
0850         put_page(pages[i]);
0851  free_pages:
0852     kfree(pages);
0853     return ret;
0854 }
0855 
0856 int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
0857                  struct kvm_dirty_tlb *dirty)
0858 {
0859     struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
0860     kvmppc_recalc_tlb1map_range(vcpu_e500);
0861     kvmppc_core_flush_tlb(vcpu);
0862     return 0;
0863 }
0864 
0865 /* Vcpu's MMU default configuration */
0866 static int vcpu_mmu_init(struct kvm_vcpu *vcpu,
0867                struct kvmppc_e500_tlb_params *params)
0868 {
0869     /* Initialize RASIZE, PIDSIZE, NTLBS and MAVN fields with host values*/
0870     vcpu->arch.mmucfg = mfspr(SPRN_MMUCFG) & ~MMUCFG_LPIDSIZE;
0871 
0872     /* Initialize TLBnCFG fields with host values and SW_TLB geometry*/
0873     vcpu->arch.tlbcfg[0] = mfspr(SPRN_TLB0CFG) &
0874                  ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
0875     vcpu->arch.tlbcfg[0] |= params[0].entries;
0876     vcpu->arch.tlbcfg[0] |= params[0].ways << TLBnCFG_ASSOC_SHIFT;
0877 
0878     vcpu->arch.tlbcfg[1] = mfspr(SPRN_TLB1CFG) &
0879                  ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
0880     vcpu->arch.tlbcfg[1] |= params[1].entries;
0881     vcpu->arch.tlbcfg[1] |= params[1].ways << TLBnCFG_ASSOC_SHIFT;
0882 
0883     if (has_feature(vcpu, VCPU_FTR_MMU_V2)) {
0884         vcpu->arch.tlbps[0] = mfspr(SPRN_TLB0PS);
0885         vcpu->arch.tlbps[1] = mfspr(SPRN_TLB1PS);
0886 
0887         vcpu->arch.mmucfg &= ~MMUCFG_LRAT;
0888 
0889         /* Guest mmu emulation currently doesn't handle E.PT */
0890         vcpu->arch.eptcfg = 0;
0891         vcpu->arch.tlbcfg[0] &= ~TLBnCFG_PT;
0892         vcpu->arch.tlbcfg[1] &= ~TLBnCFG_IND;
0893     }
0894 
0895     return 0;
0896 }
0897 
0898 int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
0899 {
0900     struct kvm_vcpu *vcpu = &vcpu_e500->vcpu;
0901 
0902     if (e500_mmu_host_init(vcpu_e500))
0903         goto free_vcpu;
0904 
0905     vcpu_e500->gtlb_params[0].entries = KVM_E500_TLB0_SIZE;
0906     vcpu_e500->gtlb_params[1].entries = KVM_E500_TLB1_SIZE;
0907 
0908     vcpu_e500->gtlb_params[0].ways = KVM_E500_TLB0_WAY_NUM;
0909     vcpu_e500->gtlb_params[0].sets =
0910         KVM_E500_TLB0_SIZE / KVM_E500_TLB0_WAY_NUM;
0911 
0912     vcpu_e500->gtlb_params[1].ways = KVM_E500_TLB1_SIZE;
0913     vcpu_e500->gtlb_params[1].sets = 1;
0914 
0915     vcpu_e500->gtlb_arch = kmalloc_array(KVM_E500_TLB0_SIZE +
0916                          KVM_E500_TLB1_SIZE,
0917                          sizeof(*vcpu_e500->gtlb_arch),
0918                          GFP_KERNEL);
0919     if (!vcpu_e500->gtlb_arch)
0920         return -ENOMEM;
0921 
0922     vcpu_e500->gtlb_offset[0] = 0;
0923     vcpu_e500->gtlb_offset[1] = KVM_E500_TLB0_SIZE;
0924 
0925     vcpu_e500->gtlb_priv[0] = kcalloc(vcpu_e500->gtlb_params[0].entries,
0926                       sizeof(struct tlbe_ref),
0927                       GFP_KERNEL);
0928     if (!vcpu_e500->gtlb_priv[0])
0929         goto free_vcpu;
0930 
0931     vcpu_e500->gtlb_priv[1] = kcalloc(vcpu_e500->gtlb_params[1].entries,
0932                       sizeof(struct tlbe_ref),
0933                       GFP_KERNEL);
0934     if (!vcpu_e500->gtlb_priv[1])
0935         goto free_vcpu;
0936 
0937     vcpu_e500->g2h_tlb1_map = kcalloc(vcpu_e500->gtlb_params[1].entries,
0938                       sizeof(*vcpu_e500->g2h_tlb1_map),
0939                       GFP_KERNEL);
0940     if (!vcpu_e500->g2h_tlb1_map)
0941         goto free_vcpu;
0942 
0943     vcpu_mmu_init(vcpu, vcpu_e500->gtlb_params);
0944 
0945     kvmppc_recalc_tlb1map_range(vcpu_e500);
0946     return 0;
0947  free_vcpu:
0948     free_gtlb(vcpu_e500);
0949     return -1;
0950 }
0951 
0952 void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
0953 {
0954     free_gtlb(vcpu_e500);
0955     e500_mmu_host_uninit(vcpu_e500);
0956 }