Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /*
0003  * tools/testing/selftests/kvm/include/kvm_util_base.h
0004  *
0005  * Copyright (C) 2018, Google LLC.
0006  */
0007 #ifndef SELFTEST_KVM_UTIL_BASE_H
0008 #define SELFTEST_KVM_UTIL_BASE_H
0009 
0010 #include "test_util.h"
0011 
0012 #include <linux/compiler.h>
0013 #include "linux/hashtable.h"
0014 #include "linux/list.h"
0015 #include <linux/kernel.h>
0016 #include <linux/kvm.h>
0017 #include "linux/rbtree.h"
0018 
0019 
0020 #include <sys/ioctl.h>
0021 
0022 #include "sparsebit.h"
0023 
0024 #define KVM_DEV_PATH "/dev/kvm"
0025 #define KVM_MAX_VCPUS 512
0026 
0027 #define NSEC_PER_SEC 1000000000L
0028 
0029 typedef uint64_t vm_paddr_t; /* Virtual Machine (Guest) physical address */
0030 typedef uint64_t vm_vaddr_t; /* Virtual Machine (Guest) virtual address */
0031 
0032 struct userspace_mem_region {
0033     struct kvm_userspace_memory_region region;
0034     struct sparsebit *unused_phy_pages;
0035     int fd;
0036     off_t offset;
0037     void *host_mem;
0038     void *host_alias;
0039     void *mmap_start;
0040     void *mmap_alias;
0041     size_t mmap_size;
0042     struct rb_node gpa_node;
0043     struct rb_node hva_node;
0044     struct hlist_node slot_node;
0045 };
0046 
0047 struct kvm_vcpu {
0048     struct list_head list;
0049     uint32_t id;
0050     int fd;
0051     struct kvm_vm *vm;
0052     struct kvm_run *run;
0053 #ifdef __x86_64__
0054     struct kvm_cpuid2 *cpuid;
0055 #endif
0056     struct kvm_dirty_gfn *dirty_gfns;
0057     uint32_t fetch_index;
0058     uint32_t dirty_gfns_count;
0059 };
0060 
0061 struct userspace_mem_regions {
0062     struct rb_root gpa_tree;
0063     struct rb_root hva_tree;
0064     DECLARE_HASHTABLE(slot_hash, 9);
0065 };
0066 
0067 struct kvm_vm {
0068     int mode;
0069     unsigned long type;
0070     int kvm_fd;
0071     int fd;
0072     unsigned int pgtable_levels;
0073     unsigned int page_size;
0074     unsigned int page_shift;
0075     unsigned int pa_bits;
0076     unsigned int va_bits;
0077     uint64_t max_gfn;
0078     struct list_head vcpus;
0079     struct userspace_mem_regions regions;
0080     struct sparsebit *vpages_valid;
0081     struct sparsebit *vpages_mapped;
0082     bool has_irqchip;
0083     bool pgd_created;
0084     vm_paddr_t pgd;
0085     vm_vaddr_t gdt;
0086     vm_vaddr_t tss;
0087     vm_vaddr_t idt;
0088     vm_vaddr_t handlers;
0089     uint32_t dirty_ring_size;
0090 
0091     /* Cache of information for binary stats interface */
0092     int stats_fd;
0093     struct kvm_stats_header stats_header;
0094     struct kvm_stats_desc *stats_desc;
0095 };
0096 
0097 
0098 #define kvm_for_each_vcpu(vm, i, vcpu)          \
0099     for ((i) = 0; (i) <= (vm)->last_vcpu_id; (i)++) \
0100         if (!((vcpu) = vm->vcpus[i]))       \
0101             continue;           \
0102         else
0103 
0104 struct userspace_mem_region *
0105 memslot2region(struct kvm_vm *vm, uint32_t memslot);
0106 
0107 /* Minimum allocated guest virtual and physical addresses */
0108 #define KVM_UTIL_MIN_VADDR      0x2000
0109 #define KVM_GUEST_PAGE_TABLE_MIN_PADDR  0x180000
0110 
0111 #define DEFAULT_GUEST_STACK_VADDR_MIN   0xab6000
0112 #define DEFAULT_STACK_PGS       5
0113 
0114 enum vm_guest_mode {
0115     VM_MODE_P52V48_4K,
0116     VM_MODE_P52V48_64K,
0117     VM_MODE_P48V48_4K,
0118     VM_MODE_P48V48_16K,
0119     VM_MODE_P48V48_64K,
0120     VM_MODE_P40V48_4K,
0121     VM_MODE_P40V48_16K,
0122     VM_MODE_P40V48_64K,
0123     VM_MODE_PXXV48_4K,  /* For 48bits VA but ANY bits PA */
0124     VM_MODE_P47V64_4K,
0125     VM_MODE_P44V64_4K,
0126     VM_MODE_P36V48_4K,
0127     VM_MODE_P36V48_16K,
0128     VM_MODE_P36V48_64K,
0129     VM_MODE_P36V47_16K,
0130     NUM_VM_MODES,
0131 };
0132 
0133 #if defined(__aarch64__)
0134 
0135 extern enum vm_guest_mode vm_mode_default;
0136 
0137 #define VM_MODE_DEFAULT         vm_mode_default
0138 #define MIN_PAGE_SHIFT          12U
0139 #define ptes_per_page(page_size)    ((page_size) / 8)
0140 
0141 #elif defined(__x86_64__)
0142 
0143 #define VM_MODE_DEFAULT         VM_MODE_PXXV48_4K
0144 #define MIN_PAGE_SHIFT          12U
0145 #define ptes_per_page(page_size)    ((page_size) / 8)
0146 
0147 #elif defined(__s390x__)
0148 
0149 #define VM_MODE_DEFAULT         VM_MODE_P44V64_4K
0150 #define MIN_PAGE_SHIFT          12U
0151 #define ptes_per_page(page_size)    ((page_size) / 16)
0152 
0153 #elif defined(__riscv)
0154 
0155 #if __riscv_xlen == 32
0156 #error "RISC-V 32-bit kvm selftests not supported"
0157 #endif
0158 
0159 #define VM_MODE_DEFAULT         VM_MODE_P40V48_4K
0160 #define MIN_PAGE_SHIFT          12U
0161 #define ptes_per_page(page_size)    ((page_size) / 8)
0162 
0163 #endif
0164 
0165 #define MIN_PAGE_SIZE       (1U << MIN_PAGE_SHIFT)
0166 #define PTES_PER_MIN_PAGE   ptes_per_page(MIN_PAGE_SIZE)
0167 
0168 struct vm_guest_mode_params {
0169     unsigned int pa_bits;
0170     unsigned int va_bits;
0171     unsigned int page_size;
0172     unsigned int page_shift;
0173 };
0174 extern const struct vm_guest_mode_params vm_guest_mode_params[];
0175 
0176 int open_path_or_exit(const char *path, int flags);
0177 int open_kvm_dev_path_or_exit(void);
0178 unsigned int kvm_check_cap(long cap);
0179 
0180 static inline bool kvm_has_cap(long cap)
0181 {
0182     return kvm_check_cap(cap);
0183 }
0184 
0185 #define __KVM_SYSCALL_ERROR(_name, _ret) \
0186     "%s failed, rc: %i errno: %i (%s)", (_name), (_ret), errno, strerror(errno)
0187 
0188 #define __KVM_IOCTL_ERROR(_name, _ret)  __KVM_SYSCALL_ERROR(_name, _ret)
0189 #define KVM_IOCTL_ERROR(_ioctl, _ret) __KVM_IOCTL_ERROR(#_ioctl, _ret)
0190 
0191 #define kvm_do_ioctl(fd, cmd, arg)                      \
0192 ({                                      \
0193     static_assert(!_IOC_SIZE(cmd) || sizeof(*arg) == _IOC_SIZE(cmd), "");   \
0194     ioctl(fd, cmd, arg);                            \
0195 })
0196 
0197 #define __kvm_ioctl(kvm_fd, cmd, arg)               \
0198     kvm_do_ioctl(kvm_fd, cmd, arg)
0199 
0200 
0201 #define _kvm_ioctl(kvm_fd, cmd, name, arg)          \
0202 ({                              \
0203     int ret = __kvm_ioctl(kvm_fd, cmd, arg);        \
0204                                 \
0205     TEST_ASSERT(!ret, __KVM_IOCTL_ERROR(name, ret));    \
0206 })
0207 
0208 #define kvm_ioctl(kvm_fd, cmd, arg) \
0209     _kvm_ioctl(kvm_fd, cmd, #cmd, arg)
0210 
0211 static __always_inline void static_assert_is_vm(struct kvm_vm *vm) { }
0212 
0213 #define __vm_ioctl(vm, cmd, arg)                \
0214 ({                              \
0215     static_assert_is_vm(vm);                \
0216     kvm_do_ioctl((vm)->fd, cmd, arg);           \
0217 })
0218 
0219 #define _vm_ioctl(vm, cmd, name, arg)               \
0220 ({                              \
0221     int ret = __vm_ioctl(vm, cmd, arg);         \
0222                                 \
0223     TEST_ASSERT(!ret, __KVM_IOCTL_ERROR(name, ret));    \
0224 })
0225 
0226 #define vm_ioctl(vm, cmd, arg)                  \
0227     _vm_ioctl(vm, cmd, #cmd, arg)
0228 
0229 
0230 static __always_inline void static_assert_is_vcpu(struct kvm_vcpu *vcpu) { }
0231 
0232 #define __vcpu_ioctl(vcpu, cmd, arg)                \
0233 ({                              \
0234     static_assert_is_vcpu(vcpu);                \
0235     kvm_do_ioctl((vcpu)->fd, cmd, arg);         \
0236 })
0237 
0238 #define _vcpu_ioctl(vcpu, cmd, name, arg)           \
0239 ({                              \
0240     int ret = __vcpu_ioctl(vcpu, cmd, arg);         \
0241                                 \
0242     TEST_ASSERT(!ret, __KVM_IOCTL_ERROR(name, ret));    \
0243 })
0244 
0245 #define vcpu_ioctl(vcpu, cmd, arg)              \
0246     _vcpu_ioctl(vcpu, cmd, #cmd, arg)
0247 
0248 /*
0249  * Looks up and returns the value corresponding to the capability
0250  * (KVM_CAP_*) given by cap.
0251  */
0252 static inline int vm_check_cap(struct kvm_vm *vm, long cap)
0253 {
0254     int ret =  __vm_ioctl(vm, KVM_CHECK_EXTENSION, (void *)cap);
0255 
0256     TEST_ASSERT(ret >= 0, KVM_IOCTL_ERROR(KVM_CHECK_EXTENSION, ret));
0257     return ret;
0258 }
0259 
0260 static inline int __vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0)
0261 {
0262     struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } };
0263 
0264     return __vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap);
0265 }
0266 static inline void vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0)
0267 {
0268     struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } };
0269 
0270     vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap);
0271 }
0272 
0273 void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size);
0274 const char *vm_guest_mode_string(uint32_t i);
0275 
0276 void kvm_vm_free(struct kvm_vm *vmp);
0277 void kvm_vm_restart(struct kvm_vm *vmp);
0278 void kvm_vm_release(struct kvm_vm *vmp);
0279 int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, const vm_vaddr_t gva,
0280                size_t len);
0281 void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename);
0282 int kvm_memfd_alloc(size_t size, bool hugepages);
0283 
0284 void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
0285 
0286 static inline void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log)
0287 {
0288     struct kvm_dirty_log args = { .dirty_bitmap = log, .slot = slot };
0289 
0290     vm_ioctl(vm, KVM_GET_DIRTY_LOG, &args);
0291 }
0292 
0293 static inline void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log,
0294                       uint64_t first_page, uint32_t num_pages)
0295 {
0296     struct kvm_clear_dirty_log args = {
0297         .dirty_bitmap = log,
0298         .slot = slot,
0299         .first_page = first_page,
0300         .num_pages = num_pages
0301     };
0302 
0303     vm_ioctl(vm, KVM_CLEAR_DIRTY_LOG, &args);
0304 }
0305 
0306 static inline uint32_t kvm_vm_reset_dirty_ring(struct kvm_vm *vm)
0307 {
0308     return __vm_ioctl(vm, KVM_RESET_DIRTY_RINGS, NULL);
0309 }
0310 
0311 static inline int vm_get_stats_fd(struct kvm_vm *vm)
0312 {
0313     int fd = __vm_ioctl(vm, KVM_GET_STATS_FD, NULL);
0314 
0315     TEST_ASSERT(fd >= 0, KVM_IOCTL_ERROR(KVM_GET_STATS_FD, fd));
0316     return fd;
0317 }
0318 
0319 static inline void read_stats_header(int stats_fd, struct kvm_stats_header *header)
0320 {
0321     ssize_t ret;
0322 
0323     ret = read(stats_fd, header, sizeof(*header));
0324     TEST_ASSERT(ret == sizeof(*header), "Read stats header");
0325 }
0326 
0327 struct kvm_stats_desc *read_stats_descriptors(int stats_fd,
0328                           struct kvm_stats_header *header);
0329 
0330 static inline ssize_t get_stats_descriptor_size(struct kvm_stats_header *header)
0331 {
0332      /*
0333       * The base size of the descriptor is defined by KVM's ABI, but the
0334       * size of the name field is variable, as far as KVM's ABI is
0335       * concerned. For a given instance of KVM, the name field is the same
0336       * size for all stats and is provided in the overall stats header.
0337       */
0338     return sizeof(struct kvm_stats_desc) + header->name_size;
0339 }
0340 
0341 static inline struct kvm_stats_desc *get_stats_descriptor(struct kvm_stats_desc *stats,
0342                               int index,
0343                               struct kvm_stats_header *header)
0344 {
0345     /*
0346      * Note, size_desc includes the size of the name field, which is
0347      * variable. i.e. this is NOT equivalent to &stats_desc[i].
0348      */
0349     return (void *)stats + index * get_stats_descriptor_size(header);
0350 }
0351 
0352 void read_stat_data(int stats_fd, struct kvm_stats_header *header,
0353             struct kvm_stats_desc *desc, uint64_t *data,
0354             size_t max_elements);
0355 
0356 void __vm_get_stat(struct kvm_vm *vm, const char *stat_name, uint64_t *data,
0357            size_t max_elements);
0358 
0359 static inline uint64_t vm_get_stat(struct kvm_vm *vm, const char *stat_name)
0360 {
0361     uint64_t data;
0362 
0363     __vm_get_stat(vm, stat_name, &data, 1);
0364     return data;
0365 }
0366 
0367 void vm_create_irqchip(struct kvm_vm *vm);
0368 
0369 void vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
0370                    uint64_t gpa, uint64_t size, void *hva);
0371 int __vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
0372                 uint64_t gpa, uint64_t size, void *hva);
0373 void vm_userspace_mem_region_add(struct kvm_vm *vm,
0374     enum vm_mem_backing_src_type src_type,
0375     uint64_t guest_paddr, uint32_t slot, uint64_t npages,
0376     uint32_t flags);
0377 
0378 void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags);
0379 void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa);
0380 void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot);
0381 struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id);
0382 vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
0383 vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages);
0384 vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm);
0385 
0386 void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
0387           unsigned int npages);
0388 void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa);
0389 void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva);
0390 vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva);
0391 void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa);
0392 
0393 void vcpu_run(struct kvm_vcpu *vcpu);
0394 int _vcpu_run(struct kvm_vcpu *vcpu);
0395 
0396 static inline int __vcpu_run(struct kvm_vcpu *vcpu)
0397 {
0398     return __vcpu_ioctl(vcpu, KVM_RUN, NULL);
0399 }
0400 
0401 void vcpu_run_complete_io(struct kvm_vcpu *vcpu);
0402 struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vcpu *vcpu);
0403 
0404 static inline void vcpu_enable_cap(struct kvm_vcpu *vcpu, uint32_t cap,
0405                    uint64_t arg0)
0406 {
0407     struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } };
0408 
0409     vcpu_ioctl(vcpu, KVM_ENABLE_CAP, &enable_cap);
0410 }
0411 
0412 static inline void vcpu_guest_debug_set(struct kvm_vcpu *vcpu,
0413                     struct kvm_guest_debug *debug)
0414 {
0415     vcpu_ioctl(vcpu, KVM_SET_GUEST_DEBUG, debug);
0416 }
0417 
0418 static inline void vcpu_mp_state_get(struct kvm_vcpu *vcpu,
0419                      struct kvm_mp_state *mp_state)
0420 {
0421     vcpu_ioctl(vcpu, KVM_GET_MP_STATE, mp_state);
0422 }
0423 static inline void vcpu_mp_state_set(struct kvm_vcpu *vcpu,
0424                      struct kvm_mp_state *mp_state)
0425 {
0426     vcpu_ioctl(vcpu, KVM_SET_MP_STATE, mp_state);
0427 }
0428 
0429 static inline void vcpu_regs_get(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
0430 {
0431     vcpu_ioctl(vcpu, KVM_GET_REGS, regs);
0432 }
0433 
0434 static inline void vcpu_regs_set(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
0435 {
0436     vcpu_ioctl(vcpu, KVM_SET_REGS, regs);
0437 }
0438 static inline void vcpu_sregs_get(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
0439 {
0440     vcpu_ioctl(vcpu, KVM_GET_SREGS, sregs);
0441 
0442 }
0443 static inline void vcpu_sregs_set(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
0444 {
0445     vcpu_ioctl(vcpu, KVM_SET_SREGS, sregs);
0446 }
0447 static inline int _vcpu_sregs_set(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
0448 {
0449     return __vcpu_ioctl(vcpu, KVM_SET_SREGS, sregs);
0450 }
0451 static inline void vcpu_fpu_get(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
0452 {
0453     vcpu_ioctl(vcpu, KVM_GET_FPU, fpu);
0454 }
0455 static inline void vcpu_fpu_set(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
0456 {
0457     vcpu_ioctl(vcpu, KVM_SET_FPU, fpu);
0458 }
0459 
0460 static inline int __vcpu_get_reg(struct kvm_vcpu *vcpu, uint64_t id, void *addr)
0461 {
0462     struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)addr };
0463 
0464     return __vcpu_ioctl(vcpu, KVM_GET_ONE_REG, &reg);
0465 }
0466 static inline int __vcpu_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val)
0467 {
0468     struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val };
0469 
0470     return __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg);
0471 }
0472 static inline void vcpu_get_reg(struct kvm_vcpu *vcpu, uint64_t id, void *addr)
0473 {
0474     struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)addr };
0475 
0476     vcpu_ioctl(vcpu, KVM_GET_ONE_REG, &reg);
0477 }
0478 static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val)
0479 {
0480     struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val };
0481 
0482     vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg);
0483 }
0484 
0485 #ifdef __KVM_HAVE_VCPU_EVENTS
0486 static inline void vcpu_events_get(struct kvm_vcpu *vcpu,
0487                    struct kvm_vcpu_events *events)
0488 {
0489     vcpu_ioctl(vcpu, KVM_GET_VCPU_EVENTS, events);
0490 }
0491 static inline void vcpu_events_set(struct kvm_vcpu *vcpu,
0492                    struct kvm_vcpu_events *events)
0493 {
0494     vcpu_ioctl(vcpu, KVM_SET_VCPU_EVENTS, events);
0495 }
0496 #endif
0497 #ifdef __x86_64__
0498 static inline void vcpu_nested_state_get(struct kvm_vcpu *vcpu,
0499                      struct kvm_nested_state *state)
0500 {
0501     vcpu_ioctl(vcpu, KVM_GET_NESTED_STATE, state);
0502 }
0503 static inline int __vcpu_nested_state_set(struct kvm_vcpu *vcpu,
0504                       struct kvm_nested_state *state)
0505 {
0506     return __vcpu_ioctl(vcpu, KVM_SET_NESTED_STATE, state);
0507 }
0508 
0509 static inline void vcpu_nested_state_set(struct kvm_vcpu *vcpu,
0510                      struct kvm_nested_state *state)
0511 {
0512     vcpu_ioctl(vcpu, KVM_SET_NESTED_STATE, state);
0513 }
0514 #endif
0515 static inline int vcpu_get_stats_fd(struct kvm_vcpu *vcpu)
0516 {
0517     int fd = __vcpu_ioctl(vcpu, KVM_GET_STATS_FD, NULL);
0518 
0519     TEST_ASSERT(fd >= 0, KVM_IOCTL_ERROR(KVM_GET_STATS_FD, fd));
0520     return fd;
0521 }
0522 
0523 int __kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr);
0524 
0525 static inline void kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr)
0526 {
0527     int ret = __kvm_has_device_attr(dev_fd, group, attr);
0528 
0529     TEST_ASSERT(!ret, "KVM_HAS_DEVICE_ATTR failed, rc: %i errno: %i", ret, errno);
0530 }
0531 
0532 int __kvm_device_attr_get(int dev_fd, uint32_t group, uint64_t attr, void *val);
0533 
0534 static inline void kvm_device_attr_get(int dev_fd, uint32_t group,
0535                        uint64_t attr, void *val)
0536 {
0537     int ret = __kvm_device_attr_get(dev_fd, group, attr, val);
0538 
0539     TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_GET_DEVICE_ATTR, ret));
0540 }
0541 
0542 int __kvm_device_attr_set(int dev_fd, uint32_t group, uint64_t attr, void *val);
0543 
0544 static inline void kvm_device_attr_set(int dev_fd, uint32_t group,
0545                        uint64_t attr, void *val)
0546 {
0547     int ret = __kvm_device_attr_set(dev_fd, group, attr, val);
0548 
0549     TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_SET_DEVICE_ATTR, ret));
0550 }
0551 
0552 static inline int __vcpu_has_device_attr(struct kvm_vcpu *vcpu, uint32_t group,
0553                      uint64_t attr)
0554 {
0555     return __kvm_has_device_attr(vcpu->fd, group, attr);
0556 }
0557 
0558 static inline void vcpu_has_device_attr(struct kvm_vcpu *vcpu, uint32_t group,
0559                     uint64_t attr)
0560 {
0561     kvm_has_device_attr(vcpu->fd, group, attr);
0562 }
0563 
0564 static inline int __vcpu_device_attr_get(struct kvm_vcpu *vcpu, uint32_t group,
0565                      uint64_t attr, void *val)
0566 {
0567     return __kvm_device_attr_get(vcpu->fd, group, attr, val);
0568 }
0569 
0570 static inline void vcpu_device_attr_get(struct kvm_vcpu *vcpu, uint32_t group,
0571                     uint64_t attr, void *val)
0572 {
0573     kvm_device_attr_get(vcpu->fd, group, attr, val);
0574 }
0575 
0576 static inline int __vcpu_device_attr_set(struct kvm_vcpu *vcpu, uint32_t group,
0577                      uint64_t attr, void *val)
0578 {
0579     return __kvm_device_attr_set(vcpu->fd, group, attr, val);
0580 }
0581 
0582 static inline void vcpu_device_attr_set(struct kvm_vcpu *vcpu, uint32_t group,
0583                     uint64_t attr, void *val)
0584 {
0585     kvm_device_attr_set(vcpu->fd, group, attr, val);
0586 }
0587 
0588 int __kvm_test_create_device(struct kvm_vm *vm, uint64_t type);
0589 int __kvm_create_device(struct kvm_vm *vm, uint64_t type);
0590 
0591 static inline int kvm_create_device(struct kvm_vm *vm, uint64_t type)
0592 {
0593     int fd = __kvm_create_device(vm, type);
0594 
0595     TEST_ASSERT(fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_DEVICE, fd));
0596     return fd;
0597 }
0598 
0599 void *vcpu_map_dirty_ring(struct kvm_vcpu *vcpu);
0600 
0601 /*
0602  * VM VCPU Args Set
0603  *
0604  * Input Args:
0605  *   vm - Virtual Machine
0606  *   num - number of arguments
0607  *   ... - arguments, each of type uint64_t
0608  *
0609  * Output Args: None
0610  *
0611  * Return: None
0612  *
0613  * Sets the first @num input parameters for the function at @vcpu's entry point,
0614  * per the C calling convention of the architecture, to the values given as
0615  * variable args. Each of the variable args is expected to be of type uint64_t.
0616  * The maximum @num can be is specific to the architecture.
0617  */
0618 void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...);
0619 
0620 void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level);
0621 int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level);
0622 
0623 #define KVM_MAX_IRQ_ROUTES      4096
0624 
0625 struct kvm_irq_routing *kvm_gsi_routing_create(void);
0626 void kvm_gsi_routing_irqchip_add(struct kvm_irq_routing *routing,
0627         uint32_t gsi, uint32_t pin);
0628 int _kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing);
0629 void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing);
0630 
0631 const char *exit_reason_str(unsigned int exit_reason);
0632 
0633 vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
0634                  uint32_t memslot);
0635 vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
0636                   vm_paddr_t paddr_min, uint32_t memslot);
0637 vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm);
0638 
0639 /*
0640  * ____vm_create() does KVM_CREATE_VM and little else.  __vm_create() also
0641  * loads the test binary into guest memory and creates an IRQ chip (x86 only).
0642  * __vm_create() does NOT create vCPUs, @nr_runnable_vcpus is used purely to
0643  * calculate the amount of memory needed for per-vCPU data, e.g. stacks.
0644  */
0645 struct kvm_vm *____vm_create(enum vm_guest_mode mode, uint64_t nr_pages);
0646 struct kvm_vm *__vm_create(enum vm_guest_mode mode, uint32_t nr_runnable_vcpus,
0647                uint64_t nr_extra_pages);
0648 
0649 static inline struct kvm_vm *vm_create_barebones(void)
0650 {
0651     return ____vm_create(VM_MODE_DEFAULT, 0);
0652 }
0653 
0654 static inline struct kvm_vm *vm_create(uint32_t nr_runnable_vcpus)
0655 {
0656     return __vm_create(VM_MODE_DEFAULT, nr_runnable_vcpus, 0);
0657 }
0658 
0659 struct kvm_vm *__vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus,
0660                       uint64_t extra_mem_pages,
0661                       void *guest_code, struct kvm_vcpu *vcpus[]);
0662 
0663 static inline struct kvm_vm *vm_create_with_vcpus(uint32_t nr_vcpus,
0664                           void *guest_code,
0665                           struct kvm_vcpu *vcpus[])
0666 {
0667     return __vm_create_with_vcpus(VM_MODE_DEFAULT, nr_vcpus, 0,
0668                       guest_code, vcpus);
0669 }
0670 
0671 /*
0672  * Create a VM with a single vCPU with reasonable defaults and @extra_mem_pages
0673  * additional pages of guest memory.  Returns the VM and vCPU (via out param).
0674  */
0675 struct kvm_vm *__vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
0676                      uint64_t extra_mem_pages,
0677                      void *guest_code);
0678 
0679 static inline struct kvm_vm *vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
0680                              void *guest_code)
0681 {
0682     return __vm_create_with_one_vcpu(vcpu, 0, guest_code);
0683 }
0684 
0685 struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm);
0686 
0687 unsigned long vm_compute_max_gfn(struct kvm_vm *vm);
0688 unsigned int vm_calc_num_guest_pages(enum vm_guest_mode mode, size_t size);
0689 unsigned int vm_num_host_pages(enum vm_guest_mode mode, unsigned int num_guest_pages);
0690 unsigned int vm_num_guest_pages(enum vm_guest_mode mode, unsigned int num_host_pages);
0691 static inline unsigned int
0692 vm_adjust_num_guest_pages(enum vm_guest_mode mode, unsigned int num_guest_pages)
0693 {
0694     unsigned int n;
0695     n = vm_num_guest_pages(mode, vm_num_host_pages(mode, num_guest_pages));
0696 #ifdef __s390x__
0697     /* s390 requires 1M aligned guest sizes */
0698     n = (n + 255) & ~255;
0699 #endif
0700     return n;
0701 }
0702 
0703 struct kvm_userspace_memory_region *
0704 kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start,
0705                  uint64_t end);
0706 
0707 #define sync_global_to_guest(vm, g) ({              \
0708     typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \
0709     memcpy(_p, &(g), sizeof(g));                \
0710 })
0711 
0712 #define sync_global_from_guest(vm, g) ({            \
0713     typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \
0714     memcpy(&(g), _p, sizeof(g));                \
0715 })
0716 
0717 void assert_on_unhandled_exception(struct kvm_vcpu *vcpu);
0718 
0719 void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu,
0720             uint8_t indent);
0721 
0722 static inline void vcpu_dump(FILE *stream, struct kvm_vcpu *vcpu,
0723                  uint8_t indent)
0724 {
0725     vcpu_arch_dump(stream, vcpu, indent);
0726 }
0727 
0728 /*
0729  * Adds a vCPU with reasonable defaults (e.g. a stack)
0730  *
0731  * Input Args:
0732  *   vm - Virtual Machine
0733  *   vcpu_id - The id of the VCPU to add to the VM.
0734  *   guest_code - The vCPU's entry point
0735  */
0736 struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
0737                   void *guest_code);
0738 
0739 static inline struct kvm_vcpu *vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
0740                        void *guest_code)
0741 {
0742     return vm_arch_vcpu_add(vm, vcpu_id, guest_code);
0743 }
0744 
0745 /* Re-create a vCPU after restarting a VM, e.g. for state save/restore tests. */
0746 struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, uint32_t vcpu_id);
0747 
0748 static inline struct kvm_vcpu *vm_vcpu_recreate(struct kvm_vm *vm,
0749                         uint32_t vcpu_id)
0750 {
0751     return vm_arch_vcpu_recreate(vm, vcpu_id);
0752 }
0753 
0754 void vcpu_arch_free(struct kvm_vcpu *vcpu);
0755 
0756 void virt_arch_pgd_alloc(struct kvm_vm *vm);
0757 
0758 static inline void virt_pgd_alloc(struct kvm_vm *vm)
0759 {
0760     virt_arch_pgd_alloc(vm);
0761 }
0762 
0763 /*
0764  * VM Virtual Page Map
0765  *
0766  * Input Args:
0767  *   vm - Virtual Machine
0768  *   vaddr - VM Virtual Address
0769  *   paddr - VM Physical Address
0770  *   memslot - Memory region slot for new virtual translation tables
0771  *
0772  * Output Args: None
0773  *
0774  * Return: None
0775  *
0776  * Within @vm, creates a virtual translation for the page starting
0777  * at @vaddr to the page starting at @paddr.
0778  */
0779 void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr);
0780 
0781 static inline void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
0782 {
0783     virt_arch_pg_map(vm, vaddr, paddr);
0784 }
0785 
0786 
0787 /*
0788  * Address Guest Virtual to Guest Physical
0789  *
0790  * Input Args:
0791  *   vm - Virtual Machine
0792  *   gva - VM virtual address
0793  *
0794  * Output Args: None
0795  *
0796  * Return:
0797  *   Equivalent VM physical address
0798  *
0799  * Returns the VM physical address of the translated VM virtual
0800  * address given by @gva.
0801  */
0802 vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva);
0803 
0804 static inline vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
0805 {
0806     return addr_arch_gva2gpa(vm, gva);
0807 }
0808 
0809 /*
0810  * Virtual Translation Tables Dump
0811  *
0812  * Input Args:
0813  *   stream - Output FILE stream
0814  *   vm     - Virtual Machine
0815  *   indent - Left margin indent amount
0816  *
0817  * Output Args: None
0818  *
0819  * Return: None
0820  *
0821  * Dumps to the FILE stream given by @stream, the contents of all the
0822  * virtual translation tables for the VM given by @vm.
0823  */
0824 void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
0825 
0826 static inline void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
0827 {
0828     virt_arch_dump(stream, vm, indent);
0829 }
0830 
0831 
0832 static inline int __vm_disable_nx_huge_pages(struct kvm_vm *vm)
0833 {
0834     return __vm_enable_cap(vm, KVM_CAP_VM_DISABLE_NX_HUGE_PAGES, 0);
0835 }
0836 
0837 #endif /* SELFTEST_KVM_UTIL_BASE_H */