Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: MIT */
0002 /******************************************************************************
0003  * vcpu.h
0004  *
0005  * VCPU initialisation, query, and hotplug.
0006  *
0007  * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
0008  */
0009 
0010 #ifndef __XEN_PUBLIC_VCPU_H__
0011 #define __XEN_PUBLIC_VCPU_H__
0012 
0013 /*
0014  * Prototype for this hypercall is:
0015  *  int vcpu_op(int cmd, int vcpuid, void *extra_args)
0016  * @cmd        == VCPUOP_??? (VCPU operation).
0017  * @vcpuid     == VCPU to operate on.
0018  * @extra_args == Operation-specific extra arguments (NULL if none).
0019  */
0020 
0021 /*
0022  * Initialise a VCPU. Each VCPU can be initialised only once. A
0023  * newly-initialised VCPU will not run until it is brought up by VCPUOP_up.
0024  *
0025  * @extra_arg == pointer to vcpu_guest_context structure containing initial
0026  *               state for the VCPU.
0027  */
0028 #define VCPUOP_initialise            0
0029 
0030 /*
0031  * Bring up a VCPU. This makes the VCPU runnable. This operation will fail
0032  * if the VCPU has not been initialised (VCPUOP_initialise).
0033  */
0034 #define VCPUOP_up                    1
0035 
0036 /*
0037  * Bring down a VCPU (i.e., make it non-runnable).
0038  * There are a few caveats that callers should observe:
0039  *  1. This operation may return, and VCPU_is_up may return false, before the
0040  *     VCPU stops running (i.e., the command is asynchronous). It is a good
0041  *     idea to ensure that the VCPU has entered a non-critical loop before
0042  *     bringing it down. Alternatively, this operation is guaranteed
0043  *     synchronous if invoked by the VCPU itself.
0044  *  2. After a VCPU is initialised, there is currently no way to drop all its
0045  *     references to domain memory. Even a VCPU that is down still holds
0046  *     memory references via its pagetable base pointer and GDT. It is good
0047  *     practise to move a VCPU onto an 'idle' or default page table, LDT and
0048  *     GDT before bringing it down.
0049  */
0050 #define VCPUOP_down                  2
0051 
0052 /* Returns 1 if the given VCPU is up. */
0053 #define VCPUOP_is_up                 3
0054 
0055 /*
0056  * Return information about the state and running time of a VCPU.
0057  * @extra_arg == pointer to vcpu_runstate_info structure.
0058  */
0059 #define VCPUOP_get_runstate_info     4
0060 struct vcpu_runstate_info {
0061     /* VCPU's current state (RUNSTATE_*). */
0062     int      state;
0063     /* When was current state entered (system time, ns)? */
0064     uint64_t state_entry_time;
0065     /*
0066      * Update indicator set in state_entry_time:
0067      * When activated via VMASST_TYPE_runstate_update_flag, set during
0068      * updates in guest memory mapped copy of vcpu_runstate_info.
0069      */
0070 #define XEN_RUNSTATE_UPDATE (1ULL << 63)
0071     /*
0072      * Time spent in each RUNSTATE_* (ns). The sum of these times is
0073      * guaranteed not to drift from system time.
0074      */
0075     uint64_t time[4];
0076 };
0077 DEFINE_GUEST_HANDLE_STRUCT(vcpu_runstate_info);
0078 
0079 /* VCPU is currently running on a physical CPU. */
0080 #define RUNSTATE_running  0
0081 
0082 /* VCPU is runnable, but not currently scheduled on any physical CPU. */
0083 #define RUNSTATE_runnable 1
0084 
0085 /* VCPU is blocked (a.k.a. idle). It is therefore not runnable. */
0086 #define RUNSTATE_blocked  2
0087 
0088 /*
0089  * VCPU is not runnable, but it is not blocked.
0090  * This is a 'catch all' state for things like hotplug and pauses by the
0091  * system administrator (or for critical sections in the hypervisor).
0092  * RUNSTATE_blocked dominates this state (it is the preferred state).
0093  */
0094 #define RUNSTATE_offline  3
0095 
0096 /*
0097  * Register a shared memory area from which the guest may obtain its own
0098  * runstate information without needing to execute a hypercall.
0099  * Notes:
0100  *  1. The registered address may be virtual or physical, depending on the
0101  *     platform. The virtual address should be registered on x86 systems.
0102  *  2. Only one shared area may be registered per VCPU. The shared area is
0103  *     updated by the hypervisor each time the VCPU is scheduled. Thus
0104  *     runstate.state will always be RUNSTATE_running and
0105  *     runstate.state_entry_time will indicate the system time at which the
0106  *     VCPU was last scheduled to run.
0107  * @extra_arg == pointer to vcpu_register_runstate_memory_area structure.
0108  */
0109 #define VCPUOP_register_runstate_memory_area 5
0110 struct vcpu_register_runstate_memory_area {
0111         union {
0112                 GUEST_HANDLE(vcpu_runstate_info) h;
0113                 struct vcpu_runstate_info *v;
0114                 uint64_t p;
0115         } addr;
0116 };
0117 
0118 /*
0119  * Set or stop a VCPU's periodic timer. Every VCPU has one periodic timer
0120  * which can be set via these commands. Periods smaller than one millisecond
0121  * may not be supported.
0122  */
0123 #define VCPUOP_set_periodic_timer    6 /* arg == vcpu_set_periodic_timer_t */
0124 #define VCPUOP_stop_periodic_timer   7 /* arg == NULL */
0125 struct vcpu_set_periodic_timer {
0126         uint64_t period_ns;
0127 };
0128 DEFINE_GUEST_HANDLE_STRUCT(vcpu_set_periodic_timer);
0129 
0130 /*
0131  * Set or stop a VCPU's single-shot timer. Every VCPU has one single-shot
0132  * timer which can be set via these commands.
0133  */
0134 #define VCPUOP_set_singleshot_timer  8 /* arg == vcpu_set_singleshot_timer_t */
0135 #define VCPUOP_stop_singleshot_timer 9 /* arg == NULL */
0136 struct vcpu_set_singleshot_timer {
0137         uint64_t timeout_abs_ns;
0138         uint32_t flags;            /* VCPU_SSHOTTMR_??? */
0139 };
0140 DEFINE_GUEST_HANDLE_STRUCT(vcpu_set_singleshot_timer);
0141 
0142 /* Flags to VCPUOP_set_singleshot_timer. */
0143  /* Require the timeout to be in the future (return -ETIME if it's passed). */
0144 #define _VCPU_SSHOTTMR_future (0)
0145 #define VCPU_SSHOTTMR_future  (1U << _VCPU_SSHOTTMR_future)
0146 
0147 /*
0148  * Register a memory location in the guest address space for the
0149  * vcpu_info structure.  This allows the guest to place the vcpu_info
0150  * structure in a convenient place, such as in a per-cpu data area.
0151  * The pointer need not be page aligned, but the structure must not
0152  * cross a page boundary.
0153  */
0154 #define VCPUOP_register_vcpu_info   10  /* arg == struct vcpu_info */
0155 struct vcpu_register_vcpu_info {
0156     uint64_t mfn;    /* mfn of page to place vcpu_info */
0157     uint32_t offset; /* offset within page */
0158     uint32_t rsvd;   /* unused */
0159 };
0160 DEFINE_GUEST_HANDLE_STRUCT(vcpu_register_vcpu_info);
0161 
0162 /* Send an NMI to the specified VCPU. @extra_arg == NULL. */
0163 #define VCPUOP_send_nmi             11
0164 
0165 /*
0166  * Get the physical ID information for a pinned vcpu's underlying physical
0167  * processor.  The physical ID informmation is architecture-specific.
0168  * On x86: id[31:0]=apic_id, id[63:32]=acpi_id.
0169  * This command returns -EINVAL if it is not a valid operation for this VCPU.
0170  */
0171 #define VCPUOP_get_physid           12 /* arg == vcpu_get_physid_t */
0172 struct vcpu_get_physid {
0173     uint64_t phys_id;
0174 };
0175 DEFINE_GUEST_HANDLE_STRUCT(vcpu_get_physid);
0176 #define xen_vcpu_physid_to_x86_apicid(physid) ((uint32_t)(physid))
0177 #define xen_vcpu_physid_to_x86_acpiid(physid) ((uint32_t)((physid) >> 32))
0178 
0179 /*
0180  * Register a memory location to get a secondary copy of the vcpu time
0181  * parameters.  The master copy still exists as part of the vcpu shared
0182  * memory area, and this secondary copy is updated whenever the master copy
0183  * is updated (and using the same versioning scheme for synchronisation).
0184  *
0185  * The intent is that this copy may be mapped (RO) into userspace so
0186  * that usermode can compute system time using the time info and the
0187  * tsc.  Usermode will see an array of vcpu_time_info structures, one
0188  * for each vcpu, and choose the right one by an existing mechanism
0189  * which allows it to get the current vcpu number (such as via a
0190  * segment limit).  It can then apply the normal algorithm to compute
0191  * system time from the tsc.
0192  *
0193  * @extra_arg == pointer to vcpu_register_time_info_memory_area structure.
0194  */
0195 #define VCPUOP_register_vcpu_time_memory_area   13
0196 DEFINE_GUEST_HANDLE_STRUCT(vcpu_time_info);
0197 struct vcpu_register_time_memory_area {
0198     union {
0199         GUEST_HANDLE(vcpu_time_info) h;
0200         struct pvclock_vcpu_time_info *v;
0201         uint64_t p;
0202     } addr;
0203 };
0204 DEFINE_GUEST_HANDLE_STRUCT(vcpu_register_time_memory_area);
0205 
0206 #endif /* __XEN_PUBLIC_VCPU_H__ */