Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
0004  *
0005  * Authors:
0006  *     Alexander Graf <agraf@suse.de>
0007  */
0008 
0009 #include <linux/kvm_host.h>
0010 #include <linux/hash.h>
0011 #include <linux/slab.h>
0012 #include <linux/rculist.h>
0013 
0014 #include <asm/kvm_ppc.h>
0015 #include <asm/kvm_book3s.h>
0016 #include <asm/machdep.h>
0017 #include <asm/mmu_context.h>
0018 #include <asm/hw_irq.h>
0019 
0020 #include "trace_pr.h"
0021 
0022 #define PTE_SIZE    12
0023 
0024 static struct kmem_cache *hpte_cache;
0025 
0026 static inline u64 kvmppc_mmu_hash_pte(u64 eaddr)
0027 {
0028     return hash_64(eaddr >> PTE_SIZE, HPTEG_HASH_BITS_PTE);
0029 }
0030 
0031 static inline u64 kvmppc_mmu_hash_pte_long(u64 eaddr)
0032 {
0033     return hash_64((eaddr & 0x0ffff000) >> PTE_SIZE,
0034                HPTEG_HASH_BITS_PTE_LONG);
0035 }
0036 
0037 static inline u64 kvmppc_mmu_hash_vpte(u64 vpage)
0038 {
0039     return hash_64(vpage & 0xfffffffffULL, HPTEG_HASH_BITS_VPTE);
0040 }
0041 
0042 static inline u64 kvmppc_mmu_hash_vpte_long(u64 vpage)
0043 {
0044     return hash_64((vpage & 0xffffff000ULL) >> 12,
0045                HPTEG_HASH_BITS_VPTE_LONG);
0046 }
0047 
0048 #ifdef CONFIG_PPC_BOOK3S_64
0049 static inline u64 kvmppc_mmu_hash_vpte_64k(u64 vpage)
0050 {
0051     return hash_64((vpage & 0xffffffff0ULL) >> 4,
0052                HPTEG_HASH_BITS_VPTE_64K);
0053 }
0054 #endif
0055 
0056 void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
0057 {
0058     u64 index;
0059     struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
0060 
0061     trace_kvm_book3s_mmu_map(pte);
0062 
0063     spin_lock(&vcpu3s->mmu_lock);
0064 
0065     /* Add to ePTE list */
0066     index = kvmppc_mmu_hash_pte(pte->pte.eaddr);
0067     hlist_add_head_rcu(&pte->list_pte, &vcpu3s->hpte_hash_pte[index]);
0068 
0069     /* Add to ePTE_long list */
0070     index = kvmppc_mmu_hash_pte_long(pte->pte.eaddr);
0071     hlist_add_head_rcu(&pte->list_pte_long,
0072                &vcpu3s->hpte_hash_pte_long[index]);
0073 
0074     /* Add to vPTE list */
0075     index = kvmppc_mmu_hash_vpte(pte->pte.vpage);
0076     hlist_add_head_rcu(&pte->list_vpte, &vcpu3s->hpte_hash_vpte[index]);
0077 
0078     /* Add to vPTE_long list */
0079     index = kvmppc_mmu_hash_vpte_long(pte->pte.vpage);
0080     hlist_add_head_rcu(&pte->list_vpte_long,
0081                &vcpu3s->hpte_hash_vpte_long[index]);
0082 
0083 #ifdef CONFIG_PPC_BOOK3S_64
0084     /* Add to vPTE_64k list */
0085     index = kvmppc_mmu_hash_vpte_64k(pte->pte.vpage);
0086     hlist_add_head_rcu(&pte->list_vpte_64k,
0087                &vcpu3s->hpte_hash_vpte_64k[index]);
0088 #endif
0089 
0090     vcpu3s->hpte_cache_count++;
0091 
0092     spin_unlock(&vcpu3s->mmu_lock);
0093 }
0094 
0095 static void free_pte_rcu(struct rcu_head *head)
0096 {
0097     struct hpte_cache *pte = container_of(head, struct hpte_cache, rcu_head);
0098     kmem_cache_free(hpte_cache, pte);
0099 }
0100 
0101 static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
0102 {
0103     struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
0104 
0105     trace_kvm_book3s_mmu_invalidate(pte);
0106 
0107     /* Different for 32 and 64 bit */
0108     kvmppc_mmu_invalidate_pte(vcpu, pte);
0109 
0110     spin_lock(&vcpu3s->mmu_lock);
0111 
0112     /* pte already invalidated in between? */
0113     if (hlist_unhashed(&pte->list_pte)) {
0114         spin_unlock(&vcpu3s->mmu_lock);
0115         return;
0116     }
0117 
0118     hlist_del_init_rcu(&pte->list_pte);
0119     hlist_del_init_rcu(&pte->list_pte_long);
0120     hlist_del_init_rcu(&pte->list_vpte);
0121     hlist_del_init_rcu(&pte->list_vpte_long);
0122 #ifdef CONFIG_PPC_BOOK3S_64
0123     hlist_del_init_rcu(&pte->list_vpte_64k);
0124 #endif
0125     vcpu3s->hpte_cache_count--;
0126 
0127     spin_unlock(&vcpu3s->mmu_lock);
0128 
0129     call_rcu(&pte->rcu_head, free_pte_rcu);
0130 }
0131 
0132 static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu)
0133 {
0134     struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
0135     struct hpte_cache *pte;
0136     int i;
0137 
0138     rcu_read_lock();
0139 
0140     for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
0141         struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i];
0142 
0143         hlist_for_each_entry_rcu(pte, list, list_vpte_long)
0144             invalidate_pte(vcpu, pte);
0145     }
0146 
0147     rcu_read_unlock();
0148 }
0149 
0150 static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea)
0151 {
0152     struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
0153     struct hlist_head *list;
0154     struct hpte_cache *pte;
0155 
0156     /* Find the list of entries in the map */
0157     list = &vcpu3s->hpte_hash_pte[kvmppc_mmu_hash_pte(guest_ea)];
0158 
0159     rcu_read_lock();
0160 
0161     /* Check the list for matching entries and invalidate */
0162     hlist_for_each_entry_rcu(pte, list, list_pte)
0163         if ((pte->pte.eaddr & ~0xfffUL) == guest_ea)
0164             invalidate_pte(vcpu, pte);
0165 
0166     rcu_read_unlock();
0167 }
0168 
0169 static void kvmppc_mmu_pte_flush_long(struct kvm_vcpu *vcpu, ulong guest_ea)
0170 {
0171     struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
0172     struct hlist_head *list;
0173     struct hpte_cache *pte;
0174 
0175     /* Find the list of entries in the map */
0176     list = &vcpu3s->hpte_hash_pte_long[
0177             kvmppc_mmu_hash_pte_long(guest_ea)];
0178 
0179     rcu_read_lock();
0180 
0181     /* Check the list for matching entries and invalidate */
0182     hlist_for_each_entry_rcu(pte, list, list_pte_long)
0183         if ((pte->pte.eaddr & 0x0ffff000UL) == guest_ea)
0184             invalidate_pte(vcpu, pte);
0185 
0186     rcu_read_unlock();
0187 }
0188 
0189 void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask)
0190 {
0191     trace_kvm_book3s_mmu_flush("", vcpu, guest_ea, ea_mask);
0192     guest_ea &= ea_mask;
0193 
0194     switch (ea_mask) {
0195     case ~0xfffUL:
0196         kvmppc_mmu_pte_flush_page(vcpu, guest_ea);
0197         break;
0198     case 0x0ffff000:
0199         kvmppc_mmu_pte_flush_long(vcpu, guest_ea);
0200         break;
0201     case 0:
0202         /* Doing a complete flush -> start from scratch */
0203         kvmppc_mmu_pte_flush_all(vcpu);
0204         break;
0205     default:
0206         WARN_ON(1);
0207         break;
0208     }
0209 }
0210 
0211 /* Flush with mask 0xfffffffff */
0212 static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp)
0213 {
0214     struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
0215     struct hlist_head *list;
0216     struct hpte_cache *pte;
0217     u64 vp_mask = 0xfffffffffULL;
0218 
0219     list = &vcpu3s->hpte_hash_vpte[kvmppc_mmu_hash_vpte(guest_vp)];
0220 
0221     rcu_read_lock();
0222 
0223     /* Check the list for matching entries and invalidate */
0224     hlist_for_each_entry_rcu(pte, list, list_vpte)
0225         if ((pte->pte.vpage & vp_mask) == guest_vp)
0226             invalidate_pte(vcpu, pte);
0227 
0228     rcu_read_unlock();
0229 }
0230 
0231 #ifdef CONFIG_PPC_BOOK3S_64
0232 /* Flush with mask 0xffffffff0 */
0233 static void kvmppc_mmu_pte_vflush_64k(struct kvm_vcpu *vcpu, u64 guest_vp)
0234 {
0235     struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
0236     struct hlist_head *list;
0237     struct hpte_cache *pte;
0238     u64 vp_mask = 0xffffffff0ULL;
0239 
0240     list = &vcpu3s->hpte_hash_vpte_64k[
0241         kvmppc_mmu_hash_vpte_64k(guest_vp)];
0242 
0243     rcu_read_lock();
0244 
0245     /* Check the list for matching entries and invalidate */
0246     hlist_for_each_entry_rcu(pte, list, list_vpte_64k)
0247         if ((pte->pte.vpage & vp_mask) == guest_vp)
0248             invalidate_pte(vcpu, pte);
0249 
0250     rcu_read_unlock();
0251 }
0252 #endif
0253 
0254 /* Flush with mask 0xffffff000 */
0255 static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp)
0256 {
0257     struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
0258     struct hlist_head *list;
0259     struct hpte_cache *pte;
0260     u64 vp_mask = 0xffffff000ULL;
0261 
0262     list = &vcpu3s->hpte_hash_vpte_long[
0263         kvmppc_mmu_hash_vpte_long(guest_vp)];
0264 
0265     rcu_read_lock();
0266 
0267     /* Check the list for matching entries and invalidate */
0268     hlist_for_each_entry_rcu(pte, list, list_vpte_long)
0269         if ((pte->pte.vpage & vp_mask) == guest_vp)
0270             invalidate_pte(vcpu, pte);
0271 
0272     rcu_read_unlock();
0273 }
0274 
0275 void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
0276 {
0277     trace_kvm_book3s_mmu_flush("v", vcpu, guest_vp, vp_mask);
0278     guest_vp &= vp_mask;
0279 
0280     switch(vp_mask) {
0281     case 0xfffffffffULL:
0282         kvmppc_mmu_pte_vflush_short(vcpu, guest_vp);
0283         break;
0284 #ifdef CONFIG_PPC_BOOK3S_64
0285     case 0xffffffff0ULL:
0286         kvmppc_mmu_pte_vflush_64k(vcpu, guest_vp);
0287         break;
0288 #endif
0289     case 0xffffff000ULL:
0290         kvmppc_mmu_pte_vflush_long(vcpu, guest_vp);
0291         break;
0292     default:
0293         WARN_ON(1);
0294         return;
0295     }
0296 }
0297 
0298 void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
0299 {
0300     struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
0301     struct hpte_cache *pte;
0302     int i;
0303 
0304     trace_kvm_book3s_mmu_flush("p", vcpu, pa_start, pa_end);
0305 
0306     rcu_read_lock();
0307 
0308     for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
0309         struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i];
0310 
0311         hlist_for_each_entry_rcu(pte, list, list_vpte_long)
0312             if ((pte->pte.raddr >= pa_start) &&
0313                 (pte->pte.raddr < pa_end))
0314                 invalidate_pte(vcpu, pte);
0315     }
0316 
0317     rcu_read_unlock();
0318 }
0319 
0320 struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu)
0321 {
0322     struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
0323     struct hpte_cache *pte;
0324 
0325     if (vcpu3s->hpte_cache_count == HPTEG_CACHE_NUM)
0326         kvmppc_mmu_pte_flush_all(vcpu);
0327 
0328     pte = kmem_cache_zalloc(hpte_cache, GFP_KERNEL);
0329 
0330     return pte;
0331 }
0332 
0333 void kvmppc_mmu_hpte_cache_free(struct hpte_cache *pte)
0334 {
0335     kmem_cache_free(hpte_cache, pte);
0336 }
0337 
0338 void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu)
0339 {
0340     kvmppc_mmu_pte_flush(vcpu, 0, 0);
0341 }
0342 
0343 static void kvmppc_mmu_hpte_init_hash(struct hlist_head *hash_list, int len)
0344 {
0345     int i;
0346 
0347     for (i = 0; i < len; i++)
0348         INIT_HLIST_HEAD(&hash_list[i]);
0349 }
0350 
0351 int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu)
0352 {
0353     struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
0354 
0355     /* init hpte lookup hashes */
0356     kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_pte,
0357                   ARRAY_SIZE(vcpu3s->hpte_hash_pte));
0358     kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_pte_long,
0359                   ARRAY_SIZE(vcpu3s->hpte_hash_pte_long));
0360     kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte,
0361                   ARRAY_SIZE(vcpu3s->hpte_hash_vpte));
0362     kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte_long,
0363                   ARRAY_SIZE(vcpu3s->hpte_hash_vpte_long));
0364 #ifdef CONFIG_PPC_BOOK3S_64
0365     kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte_64k,
0366                   ARRAY_SIZE(vcpu3s->hpte_hash_vpte_64k));
0367 #endif
0368 
0369     spin_lock_init(&vcpu3s->mmu_lock);
0370 
0371     return 0;
0372 }
0373 
0374 int kvmppc_mmu_hpte_sysinit(void)
0375 {
0376     /* init hpte slab cache */
0377     hpte_cache = kmem_cache_create("kvm-spt", sizeof(struct hpte_cache),
0378                        sizeof(struct hpte_cache), 0, NULL);
0379 
0380     return 0;
0381 }
0382 
0383 void kvmppc_mmu_hpte_sysexit(void)
0384 {
0385     kmem_cache_destroy(hpte_cache);
0386 }