0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/kvm_host.h>
0010
0011 #include <asm/kvm_ppc.h>
0012 #include <asm/kvm_book3s.h>
0013 #include <asm/book3s/32/mmu-hash.h>
0014 #include <asm/machdep.h>
0015 #include <asm/mmu_context.h>
0016 #include <asm/hw_irq.h>
0017 #include "book3s.h"
0018
0019
0020
0021
0022 #ifdef DEBUG_MMU
0023 #define dprintk_mmu(a, ...) printk(KERN_INFO a, __VA_ARGS__)
0024 #else
0025 #define dprintk_mmu(a, ...) do { } while(0)
0026 #endif
0027
0028 #ifdef DEBUG_SR
0029 #define dprintk_sr(a, ...) printk(KERN_INFO a, __VA_ARGS__)
0030 #else
0031 #define dprintk_sr(a, ...) do { } while(0)
0032 #endif
0033
0034 #if PAGE_SHIFT != 12
0035 #error Unknown page size
0036 #endif
0037
0038 #ifdef CONFIG_SMP
0039 #error XXX need to grab mmu_hash_lock
0040 #endif
0041
0042 #ifdef CONFIG_PTE_64BIT
0043 #error Only 32 bit pages are supported for now
0044 #endif
0045
0046 static ulong htab;
0047 static u32 htabmask;
0048
0049 void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
0050 {
0051 volatile u32 *pteg;
0052
0053
0054 pteg = (u32*)pte->slot;
0055 pteg[0] = 0;
0056
0057
0058 asm volatile ("sync");
0059 asm volatile ("tlbie %0" : : "r" (pte->pte.eaddr) : "memory");
0060 asm volatile ("sync");
0061 asm volatile ("tlbsync");
0062 }
0063
0064
0065
0066 static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid)
0067 {
0068 return (u16)(((gvsid >> (SID_MAP_BITS * 7)) & SID_MAP_MASK) ^
0069 ((gvsid >> (SID_MAP_BITS * 6)) & SID_MAP_MASK) ^
0070 ((gvsid >> (SID_MAP_BITS * 5)) & SID_MAP_MASK) ^
0071 ((gvsid >> (SID_MAP_BITS * 4)) & SID_MAP_MASK) ^
0072 ((gvsid >> (SID_MAP_BITS * 3)) & SID_MAP_MASK) ^
0073 ((gvsid >> (SID_MAP_BITS * 2)) & SID_MAP_MASK) ^
0074 ((gvsid >> (SID_MAP_BITS * 1)) & SID_MAP_MASK) ^
0075 ((gvsid >> (SID_MAP_BITS * 0)) & SID_MAP_MASK));
0076 }
0077
0078
0079 static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
0080 {
0081 struct kvmppc_sid_map *map;
0082 u16 sid_map_mask;
0083
0084 if (kvmppc_get_msr(vcpu) & MSR_PR)
0085 gvsid |= VSID_PR;
0086
0087 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
0088 map = &to_book3s(vcpu)->sid_map[sid_map_mask];
0089 if (map->guest_vsid == gvsid) {
0090 dprintk_sr("SR: Searching 0x%llx -> 0x%llx\n",
0091 gvsid, map->host_vsid);
0092 return map;
0093 }
0094
0095 map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask];
0096 if (map->guest_vsid == gvsid) {
0097 dprintk_sr("SR: Searching 0x%llx -> 0x%llx\n",
0098 gvsid, map->host_vsid);
0099 return map;
0100 }
0101
0102 dprintk_sr("SR: Searching 0x%llx -> not found\n", gvsid);
0103 return NULL;
0104 }
0105
0106 static u32 *kvmppc_mmu_get_pteg(struct kvm_vcpu *vcpu, u32 vsid, u32 eaddr,
0107 bool primary)
0108 {
0109 u32 page, hash;
0110 ulong pteg = htab;
0111
0112 page = (eaddr & ~ESID_MASK) >> 12;
0113
0114 hash = ((vsid ^ page) << 6);
0115 if (!primary)
0116 hash = ~hash;
0117
0118 hash &= htabmask;
0119
0120 pteg |= hash;
0121
0122 dprintk_mmu("htab: %lx | hash: %x | htabmask: %x | pteg: %lx\n",
0123 htab, hash, htabmask, pteg);
0124
0125 return (u32*)pteg;
0126 }
0127
0128 extern char etext[];
0129
0130 int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
0131 bool iswrite)
0132 {
0133 kvm_pfn_t hpaddr;
0134 u64 vpn;
0135 u64 vsid;
0136 struct kvmppc_sid_map *map;
0137 volatile u32 *pteg;
0138 u32 eaddr = orig_pte->eaddr;
0139 u32 pteg0, pteg1;
0140 register int rr = 0;
0141 bool primary = false;
0142 bool evict = false;
0143 struct hpte_cache *pte;
0144 int r = 0;
0145 bool writable;
0146
0147
0148 hpaddr = kvmppc_gpa_to_pfn(vcpu, orig_pte->raddr, iswrite, &writable);
0149 if (is_error_noslot_pfn(hpaddr)) {
0150 printk(KERN_INFO "Couldn't get guest page for gpa %lx!\n",
0151 orig_pte->raddr);
0152 r = -EINVAL;
0153 goto out;
0154 }
0155 hpaddr <<= PAGE_SHIFT;
0156
0157
0158 vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid);
0159 map = find_sid_vsid(vcpu, vsid);
0160 if (!map) {
0161 kvmppc_mmu_map_segment(vcpu, eaddr);
0162 map = find_sid_vsid(vcpu, vsid);
0163 }
0164 BUG_ON(!map);
0165
0166 vsid = map->host_vsid;
0167 vpn = (vsid << (SID_SHIFT - VPN_SHIFT)) |
0168 ((eaddr & ~ESID_MASK) >> VPN_SHIFT);
0169 next_pteg:
0170 if (rr == 16) {
0171 primary = !primary;
0172 evict = true;
0173 rr = 0;
0174 }
0175
0176 pteg = kvmppc_mmu_get_pteg(vcpu, vsid, eaddr, primary);
0177
0178
0179 if (!evict && (pteg[rr] & PTE_V)) {
0180 rr += 2;
0181 goto next_pteg;
0182 }
0183
0184 dprintk_mmu("KVM: old PTEG: %p (%d)\n", pteg, rr);
0185 dprintk_mmu("KVM: %08x - %08x\n", pteg[0], pteg[1]);
0186 dprintk_mmu("KVM: %08x - %08x\n", pteg[2], pteg[3]);
0187 dprintk_mmu("KVM: %08x - %08x\n", pteg[4], pteg[5]);
0188 dprintk_mmu("KVM: %08x - %08x\n", pteg[6], pteg[7]);
0189 dprintk_mmu("KVM: %08x - %08x\n", pteg[8], pteg[9]);
0190 dprintk_mmu("KVM: %08x - %08x\n", pteg[10], pteg[11]);
0191 dprintk_mmu("KVM: %08x - %08x\n", pteg[12], pteg[13]);
0192 dprintk_mmu("KVM: %08x - %08x\n", pteg[14], pteg[15]);
0193
0194 pteg0 = ((eaddr & 0x0fffffff) >> 22) | (vsid << 7) | PTE_V |
0195 (primary ? 0 : PTE_SEC);
0196 pteg1 = hpaddr | PTE_M | PTE_R | PTE_C;
0197
0198 if (orig_pte->may_write && writable) {
0199 pteg1 |= PP_RWRW;
0200 mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
0201 } else {
0202 pteg1 |= PP_RWRX;
0203 }
0204
0205 if (orig_pte->may_execute)
0206 kvmppc_mmu_flush_icache(hpaddr >> PAGE_SHIFT);
0207
0208 local_irq_disable();
0209
0210 if (pteg[rr]) {
0211 pteg[rr] = 0;
0212 asm volatile ("sync");
0213 }
0214 pteg[rr + 1] = pteg1;
0215 pteg[rr] = pteg0;
0216 asm volatile ("sync");
0217
0218 local_irq_enable();
0219
0220 dprintk_mmu("KVM: new PTEG: %p\n", pteg);
0221 dprintk_mmu("KVM: %08x - %08x\n", pteg[0], pteg[1]);
0222 dprintk_mmu("KVM: %08x - %08x\n", pteg[2], pteg[3]);
0223 dprintk_mmu("KVM: %08x - %08x\n", pteg[4], pteg[5]);
0224 dprintk_mmu("KVM: %08x - %08x\n", pteg[6], pteg[7]);
0225 dprintk_mmu("KVM: %08x - %08x\n", pteg[8], pteg[9]);
0226 dprintk_mmu("KVM: %08x - %08x\n", pteg[10], pteg[11]);
0227 dprintk_mmu("KVM: %08x - %08x\n", pteg[12], pteg[13]);
0228 dprintk_mmu("KVM: %08x - %08x\n", pteg[14], pteg[15]);
0229
0230
0231
0232
0233 pte = kvmppc_mmu_hpte_cache_next(vcpu);
0234 if (!pte) {
0235 kvm_release_pfn_clean(hpaddr >> PAGE_SHIFT);
0236 r = -EAGAIN;
0237 goto out;
0238 }
0239
0240 dprintk_mmu("KVM: %c%c Map 0x%llx: [%lx] 0x%llx (0x%llx) -> %lx\n",
0241 orig_pte->may_write ? 'w' : '-',
0242 orig_pte->may_execute ? 'x' : '-',
0243 orig_pte->eaddr, (ulong)pteg, vpn,
0244 orig_pte->vpage, hpaddr);
0245
0246 pte->slot = (ulong)&pteg[rr];
0247 pte->host_vpn = vpn;
0248 pte->pte = *orig_pte;
0249 pte->pfn = hpaddr >> PAGE_SHIFT;
0250
0251 kvmppc_mmu_hpte_cache_map(vcpu, pte);
0252
0253 kvm_release_pfn_clean(hpaddr >> PAGE_SHIFT);
0254 out:
0255 return r;
0256 }
0257
0258 void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
0259 {
0260 kvmppc_mmu_pte_vflush(vcpu, pte->vpage, 0xfffffffffULL);
0261 }
0262
0263 static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
0264 {
0265 struct kvmppc_sid_map *map;
0266 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
0267 u16 sid_map_mask;
0268 static int backwards_map = 0;
0269
0270 if (kvmppc_get_msr(vcpu) & MSR_PR)
0271 gvsid |= VSID_PR;
0272
0273
0274
0275
0276 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
0277 if (backwards_map)
0278 sid_map_mask = SID_MAP_MASK - sid_map_mask;
0279
0280 map = &to_book3s(vcpu)->sid_map[sid_map_mask];
0281
0282
0283 backwards_map = !backwards_map;
0284
0285
0286 if (vcpu_book3s->vsid_next >= VSID_POOL_SIZE) {
0287 vcpu_book3s->vsid_next = 0;
0288 memset(vcpu_book3s->sid_map, 0,
0289 sizeof(struct kvmppc_sid_map) * SID_MAP_NUM);
0290 kvmppc_mmu_pte_flush(vcpu, 0, 0);
0291 kvmppc_mmu_flush_segments(vcpu);
0292 }
0293 map->host_vsid = vcpu_book3s->vsid_pool[vcpu_book3s->vsid_next];
0294 vcpu_book3s->vsid_next++;
0295
0296 map->guest_vsid = gvsid;
0297 map->valid = true;
0298
0299 return map;
0300 }
0301
0302 int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
0303 {
0304 u32 esid = eaddr >> SID_SHIFT;
0305 u64 gvsid;
0306 u32 sr;
0307 struct kvmppc_sid_map *map;
0308 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
0309 int r = 0;
0310
0311 if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) {
0312
0313 svcpu->sr[esid] = SR_INVALID;
0314 r = -ENOENT;
0315 goto out;
0316 }
0317
0318 map = find_sid_vsid(vcpu, gvsid);
0319 if (!map)
0320 map = create_sid_map(vcpu, gvsid);
0321
0322 map->guest_esid = esid;
0323 sr = map->host_vsid | SR_KP;
0324 svcpu->sr[esid] = sr;
0325
0326 dprintk_sr("MMU: mtsr %d, 0x%x\n", esid, sr);
0327
0328 out:
0329 svcpu_put(svcpu);
0330 return r;
0331 }
0332
0333 void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
0334 {
0335 int i;
0336 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
0337
0338 dprintk_sr("MMU: flushing all segments (%d)\n", ARRAY_SIZE(svcpu->sr));
0339 for (i = 0; i < ARRAY_SIZE(svcpu->sr); i++)
0340 svcpu->sr[i] = SR_INVALID;
0341
0342 svcpu_put(svcpu);
0343 }
0344
0345 void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu)
0346 {
0347 int i;
0348
0349 kvmppc_mmu_hpte_destroy(vcpu);
0350 preempt_disable();
0351 for (i = 0; i < SID_CONTEXTS; i++)
0352 __destroy_context(to_book3s(vcpu)->context_id[i]);
0353 preempt_enable();
0354 }
0355
0356 int kvmppc_mmu_init_pr(struct kvm_vcpu *vcpu)
0357 {
0358 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
0359 int err;
0360 ulong sdr1;
0361 int i;
0362 int j;
0363
0364 for (i = 0; i < SID_CONTEXTS; i++) {
0365 err = __init_new_context();
0366 if (err < 0)
0367 goto init_fail;
0368 vcpu3s->context_id[i] = err;
0369
0370
0371 for (j = 0; j < 16; j++)
0372 vcpu3s->vsid_pool[(i * 16) + j] = CTX_TO_VSID(err, j);
0373 }
0374
0375 vcpu3s->vsid_next = 0;
0376
0377
0378 asm ( "mfsdr1 %0" : "=r"(sdr1) );
0379 htabmask = ((sdr1 & 0x1FF) << 16) | 0xFFC0;
0380 htab = (ulong)__va(sdr1 & 0xffff0000);
0381
0382 kvmppc_mmu_hpte_init(vcpu);
0383
0384 return 0;
0385
0386 init_fail:
0387 for (j = 0; j < i; j++) {
0388 if (!vcpu3s->context_id[j])
0389 continue;
0390
0391 __destroy_context(to_book3s(vcpu)->context_id[j]);
0392 }
0393
0394 return -1;
0395 }