Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * KVM selftest s390x library code - CPU-related functions (page tables...)
0004  *
0005  * Copyright (C) 2019, Red Hat, Inc.
0006  */
0007 
0008 #include "processor.h"
0009 #include "kvm_util.h"
0010 
0011 #define PAGES_PER_REGION 4
0012 
0013 void virt_arch_pgd_alloc(struct kvm_vm *vm)
0014 {
0015     vm_paddr_t paddr;
0016 
0017     TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x",
0018             vm->page_size);
0019 
0020     if (vm->pgd_created)
0021         return;
0022 
0023     paddr = vm_phy_pages_alloc(vm, PAGES_PER_REGION,
0024                    KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
0025     memset(addr_gpa2hva(vm, paddr), 0xff, PAGES_PER_REGION * vm->page_size);
0026 
0027     vm->pgd = paddr;
0028     vm->pgd_created = true;
0029 }
0030 
0031 /*
0032  * Allocate 4 pages for a region/segment table (ri < 4), or one page for
0033  * a page table (ri == 4). Returns a suitable region/segment table entry
0034  * which points to the freshly allocated pages.
0035  */
0036 static uint64_t virt_alloc_region(struct kvm_vm *vm, int ri)
0037 {
0038     uint64_t taddr;
0039 
0040     taddr = vm_phy_pages_alloc(vm,  ri < 4 ? PAGES_PER_REGION : 1,
0041                    KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
0042     memset(addr_gpa2hva(vm, taddr), 0xff, PAGES_PER_REGION * vm->page_size);
0043 
0044     return (taddr & REGION_ENTRY_ORIGIN)
0045         | (((4 - ri) << 2) & REGION_ENTRY_TYPE)
0046         | ((ri < 4 ? (PAGES_PER_REGION - 1) : 0) & REGION_ENTRY_LENGTH);
0047 }
0048 
0049 void virt_arch_pg_map(struct kvm_vm *vm, uint64_t gva, uint64_t gpa)
0050 {
0051     int ri, idx;
0052     uint64_t *entry;
0053 
0054     TEST_ASSERT((gva % vm->page_size) == 0,
0055         "Virtual address not on page boundary,\n"
0056         "  vaddr: 0x%lx vm->page_size: 0x%x",
0057         gva, vm->page_size);
0058     TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
0059         (gva >> vm->page_shift)),
0060         "Invalid virtual address, vaddr: 0x%lx",
0061         gva);
0062     TEST_ASSERT((gpa % vm->page_size) == 0,
0063         "Physical address not on page boundary,\n"
0064         "  paddr: 0x%lx vm->page_size: 0x%x",
0065         gva, vm->page_size);
0066     TEST_ASSERT((gpa >> vm->page_shift) <= vm->max_gfn,
0067         "Physical address beyond beyond maximum supported,\n"
0068         "  paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
0069         gva, vm->max_gfn, vm->page_size);
0070 
0071     /* Walk through region and segment tables */
0072     entry = addr_gpa2hva(vm, vm->pgd);
0073     for (ri = 1; ri <= 4; ri++) {
0074         idx = (gva >> (64 - 11 * ri)) & 0x7ffu;
0075         if (entry[idx] & REGION_ENTRY_INVALID)
0076             entry[idx] = virt_alloc_region(vm, ri);
0077         entry = addr_gpa2hva(vm, entry[idx] & REGION_ENTRY_ORIGIN);
0078     }
0079 
0080     /* Fill in page table entry */
0081     idx = (gva >> 12) & 0x0ffu;     /* page index */
0082     if (!(entry[idx] & PAGE_INVALID))
0083         fprintf(stderr,
0084             "WARNING: PTE for gpa=0x%"PRIx64" already set!\n", gpa);
0085     entry[idx] = gpa;
0086 }
0087 
0088 vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
0089 {
0090     int ri, idx;
0091     uint64_t *entry;
0092 
0093     TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x",
0094             vm->page_size);
0095 
0096     entry = addr_gpa2hva(vm, vm->pgd);
0097     for (ri = 1; ri <= 4; ri++) {
0098         idx = (gva >> (64 - 11 * ri)) & 0x7ffu;
0099         TEST_ASSERT(!(entry[idx] & REGION_ENTRY_INVALID),
0100                 "No region mapping for vm virtual address 0x%lx",
0101                 gva);
0102         entry = addr_gpa2hva(vm, entry[idx] & REGION_ENTRY_ORIGIN);
0103     }
0104 
0105     idx = (gva >> 12) & 0x0ffu;     /* page index */
0106 
0107     TEST_ASSERT(!(entry[idx] & PAGE_INVALID),
0108             "No page mapping for vm virtual address 0x%lx", gva);
0109 
0110     return (entry[idx] & ~0xffful) + (gva & 0xffful);
0111 }
0112 
0113 static void virt_dump_ptes(FILE *stream, struct kvm_vm *vm, uint8_t indent,
0114                uint64_t ptea_start)
0115 {
0116     uint64_t *pte, ptea;
0117 
0118     for (ptea = ptea_start; ptea < ptea_start + 0x100 * 8; ptea += 8) {
0119         pte = addr_gpa2hva(vm, ptea);
0120         if (*pte & PAGE_INVALID)
0121             continue;
0122         fprintf(stream, "%*spte @ 0x%lx: 0x%016lx\n",
0123             indent, "", ptea, *pte);
0124     }
0125 }
0126 
0127 static void virt_dump_region(FILE *stream, struct kvm_vm *vm, uint8_t indent,
0128                  uint64_t reg_tab_addr)
0129 {
0130     uint64_t addr, *entry;
0131 
0132     for (addr = reg_tab_addr; addr < reg_tab_addr + 0x400 * 8; addr += 8) {
0133         entry = addr_gpa2hva(vm, addr);
0134         if (*entry & REGION_ENTRY_INVALID)
0135             continue;
0136         fprintf(stream, "%*srt%lde @ 0x%lx: 0x%016lx\n",
0137             indent, "", 4 - ((*entry & REGION_ENTRY_TYPE) >> 2),
0138             addr, *entry);
0139         if (*entry & REGION_ENTRY_TYPE) {
0140             virt_dump_region(stream, vm, indent + 2,
0141                      *entry & REGION_ENTRY_ORIGIN);
0142         } else {
0143             virt_dump_ptes(stream, vm, indent + 2,
0144                        *entry & REGION_ENTRY_ORIGIN);
0145         }
0146     }
0147 }
0148 
0149 void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
0150 {
0151     if (!vm->pgd_created)
0152         return;
0153 
0154     virt_dump_region(stream, vm, indent, vm->pgd);
0155 }
0156 
0157 struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
0158                   void *guest_code)
0159 {
0160     size_t stack_size =  DEFAULT_STACK_PGS * getpagesize();
0161     uint64_t stack_vaddr;
0162     struct kvm_regs regs;
0163     struct kvm_sregs sregs;
0164     struct kvm_vcpu *vcpu;
0165     struct kvm_run *run;
0166 
0167     TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x",
0168             vm->page_size);
0169 
0170     stack_vaddr = vm_vaddr_alloc(vm, stack_size,
0171                      DEFAULT_GUEST_STACK_VADDR_MIN);
0172 
0173     vcpu = __vm_vcpu_add(vm, vcpu_id);
0174 
0175     /* Setup guest registers */
0176     vcpu_regs_get(vcpu, &regs);
0177     regs.gprs[15] = stack_vaddr + (DEFAULT_STACK_PGS * getpagesize()) - 160;
0178     vcpu_regs_set(vcpu, &regs);
0179 
0180     vcpu_sregs_get(vcpu, &sregs);
0181     sregs.crs[0] |= 0x00040000;     /* Enable floating point regs */
0182     sregs.crs[1] = vm->pgd | 0xf;       /* Primary region table */
0183     vcpu_sregs_set(vcpu, &sregs);
0184 
0185     run = vcpu->run;
0186     run->psw_mask = 0x0400000180000000ULL;  /* DAT enabled + 64 bit mode */
0187     run->psw_addr = (uintptr_t)guest_code;
0188 
0189     return vcpu;
0190 }
0191 
0192 void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
0193 {
0194     va_list ap;
0195     struct kvm_regs regs;
0196     int i;
0197 
0198     TEST_ASSERT(num >= 1 && num <= 5, "Unsupported number of args,\n"
0199             "  num: %u\n",
0200             num);
0201 
0202     va_start(ap, num);
0203     vcpu_regs_get(vcpu, &regs);
0204 
0205     for (i = 0; i < num; i++)
0206         regs.gprs[i + 2] = va_arg(ap, uint64_t);
0207 
0208     vcpu_regs_set(vcpu, &regs);
0209     va_end(ap);
0210 }
0211 
0212 void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent)
0213 {
0214     fprintf(stream, "%*spstate: psw: 0x%.16llx:0x%.16llx\n",
0215         indent, "", vcpu->run->psw_mask, vcpu->run->psw_addr);
0216 }
0217 
0218 void assert_on_unhandled_exception(struct kvm_vcpu *vcpu)
0219 {
0220 }