Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 /******************************************************************************
0003  * x86_emulate.h
0004  *
0005  * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
0006  *
0007  * Copyright (c) 2005 Keir Fraser
0008  *
0009  * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
0010  */
0011 
0012 #ifndef _ASM_X86_KVM_X86_EMULATE_H
0013 #define _ASM_X86_KVM_X86_EMULATE_H
0014 
0015 #include <asm/desc_defs.h>
0016 #include "fpu.h"
0017 
0018 struct x86_emulate_ctxt;
0019 enum x86_intercept;
0020 enum x86_intercept_stage;
0021 
0022 struct x86_exception {
0023     u8 vector;
0024     bool error_code_valid;
0025     u16 error_code;
0026     bool nested_page_fault;
0027     u64 address; /* cr2 or nested page fault gpa */
0028     u8 async_page_fault;
0029 };
0030 
0031 /*
0032  * This struct is used to carry enough information from the instruction
0033  * decoder to main KVM so that a decision can be made whether the
0034  * instruction needs to be intercepted or not.
0035  */
0036 struct x86_instruction_info {
0037     u8  intercept;          /* which intercept                      */
0038     u8  rep_prefix;         /* rep prefix?                          */
0039     u8  modrm_mod;      /* mod part of modrm            */
0040     u8  modrm_reg;          /* index of register used               */
0041     u8  modrm_rm;       /* rm part of modrm         */
0042     u64 src_val;            /* value of source operand              */
0043     u64 dst_val;            /* value of destination operand         */
0044     u8  src_bytes;          /* size of source operand               */
0045     u8  dst_bytes;          /* size of destination operand          */
0046     u8  ad_bytes;           /* size of src/dst address              */
0047     u64 next_rip;           /* rip following the instruction        */
0048 };
0049 
0050 /*
0051  * x86_emulate_ops:
0052  *
0053  * These operations represent the instruction emulator's interface to memory.
0054  * There are two categories of operation: those that act on ordinary memory
0055  * regions (*_std), and those that act on memory regions known to require
0056  * special treatment or emulation (*_emulated).
0057  *
0058  * The emulator assumes that an instruction accesses only one 'emulated memory'
0059  * location, that this location is the given linear faulting address (cr2), and
0060  * that this is one of the instruction's data operands. Instruction fetches and
0061  * stack operations are assumed never to access emulated memory. The emulator
0062  * automatically deduces which operand of a string-move operation is accessing
0063  * emulated memory, and assumes that the other operand accesses normal memory.
0064  *
0065  * NOTES:
0066  *  1. The emulator isn't very smart about emulated vs. standard memory.
0067  *     'Emulated memory' access addresses should be checked for sanity.
0068  *     'Normal memory' accesses may fault, and the caller must arrange to
0069  *     detect and handle reentrancy into the emulator via recursive faults.
0070  *     Accesses may be unaligned and may cross page boundaries.
0071  *  2. If the access fails (cannot emulate, or a standard access faults) then
0072  *     it is up to the memop to propagate the fault to the guest VM via
0073  *     some out-of-band mechanism, unknown to the emulator. The memop signals
0074  *     failure by returning X86EMUL_PROPAGATE_FAULT to the emulator, which will
0075  *     then immediately bail.
0076  *  3. Valid access sizes are 1, 2, 4 and 8 bytes. On x86/32 systems only
0077  *     cmpxchg8b_emulated need support 8-byte accesses.
0078  *  4. The emulator cannot handle 64-bit mode emulation on an x86/32 system.
0079  */
0080 /* Access completed successfully: continue emulation as normal. */
0081 #define X86EMUL_CONTINUE        0
0082 /* Access is unhandleable: bail from emulation and return error to caller. */
0083 #define X86EMUL_UNHANDLEABLE    1
0084 /* Terminate emulation but return success to the caller. */
0085 #define X86EMUL_PROPAGATE_FAULT 2 /* propagate a generated fault to guest */
0086 #define X86EMUL_RETRY_INSTR     3 /* retry the instruction for some reason */
0087 #define X86EMUL_CMPXCHG_FAILED  4 /* cmpxchg did not see expected value */
0088 #define X86EMUL_IO_NEEDED       5 /* IO is needed to complete emulation */
0089 #define X86EMUL_INTERCEPTED     6 /* Intercepted by nested VMCB/VMCS */
0090 
0091 struct x86_emulate_ops {
0092     void (*vm_bugged)(struct x86_emulate_ctxt *ctxt);
0093     /*
0094      * read_gpr: read a general purpose register (rax - r15)
0095      *
0096      * @reg: gpr number.
0097      */
0098     ulong (*read_gpr)(struct x86_emulate_ctxt *ctxt, unsigned reg);
0099     /*
0100      * write_gpr: write a general purpose register (rax - r15)
0101      *
0102      * @reg: gpr number.
0103      * @val: value to write.
0104      */
0105     void (*write_gpr)(struct x86_emulate_ctxt *ctxt, unsigned reg, ulong val);
0106     /*
0107      * read_std: Read bytes of standard (non-emulated/special) memory.
0108      *           Used for descriptor reading.
0109      *  @addr:  [IN ] Linear address from which to read.
0110      *  @val:   [OUT] Value read from memory, zero-extended to 'u_long'.
0111      *  @bytes: [IN ] Number of bytes to read from memory.
0112      *  @system:[IN ] Whether the access is forced to be at CPL0.
0113      */
0114     int (*read_std)(struct x86_emulate_ctxt *ctxt,
0115             unsigned long addr, void *val,
0116             unsigned int bytes,
0117             struct x86_exception *fault, bool system);
0118 
0119     /*
0120      * read_phys: Read bytes of standard (non-emulated/special) memory.
0121      *            Used for descriptor reading.
0122      *  @addr:  [IN ] Physical address from which to read.
0123      *  @val:   [OUT] Value read from memory.
0124      *  @bytes: [IN ] Number of bytes to read from memory.
0125      */
0126     int (*read_phys)(struct x86_emulate_ctxt *ctxt, unsigned long addr,
0127             void *val, unsigned int bytes);
0128 
0129     /*
0130      * write_std: Write bytes of standard (non-emulated/special) memory.
0131      *            Used for descriptor writing.
0132      *  @addr:  [IN ] Linear address to which to write.
0133      *  @val:   [OUT] Value write to memory, zero-extended to 'u_long'.
0134      *  @bytes: [IN ] Number of bytes to write to memory.
0135      *  @system:[IN ] Whether the access is forced to be at CPL0.
0136      */
0137     int (*write_std)(struct x86_emulate_ctxt *ctxt,
0138              unsigned long addr, void *val, unsigned int bytes,
0139              struct x86_exception *fault, bool system);
0140     /*
0141      * fetch: Read bytes of standard (non-emulated/special) memory.
0142      *        Used for instruction fetch.
0143      *  @addr:  [IN ] Linear address from which to read.
0144      *  @val:   [OUT] Value read from memory, zero-extended to 'u_long'.
0145      *  @bytes: [IN ] Number of bytes to read from memory.
0146      */
0147     int (*fetch)(struct x86_emulate_ctxt *ctxt,
0148              unsigned long addr, void *val, unsigned int bytes,
0149              struct x86_exception *fault);
0150 
0151     /*
0152      * read_emulated: Read bytes from emulated/special memory area.
0153      *  @addr:  [IN ] Linear address from which to read.
0154      *  @val:   [OUT] Value read from memory, zero-extended to 'u_long'.
0155      *  @bytes: [IN ] Number of bytes to read from memory.
0156      */
0157     int (*read_emulated)(struct x86_emulate_ctxt *ctxt,
0158                  unsigned long addr, void *val, unsigned int bytes,
0159                  struct x86_exception *fault);
0160 
0161     /*
0162      * write_emulated: Write bytes to emulated/special memory area.
0163      *  @addr:  [IN ] Linear address to which to write.
0164      *  @val:   [IN ] Value to write to memory (low-order bytes used as
0165      *                required).
0166      *  @bytes: [IN ] Number of bytes to write to memory.
0167      */
0168     int (*write_emulated)(struct x86_emulate_ctxt *ctxt,
0169                   unsigned long addr, const void *val,
0170                   unsigned int bytes,
0171                   struct x86_exception *fault);
0172 
0173     /*
0174      * cmpxchg_emulated: Emulate an atomic (LOCKed) CMPXCHG operation on an
0175      *                   emulated/special memory area.
0176      *  @addr:  [IN ] Linear address to access.
0177      *  @old:   [IN ] Value expected to be current at @addr.
0178      *  @new:   [IN ] Value to write to @addr.
0179      *  @bytes: [IN ] Number of bytes to access using CMPXCHG.
0180      */
0181     int (*cmpxchg_emulated)(struct x86_emulate_ctxt *ctxt,
0182                 unsigned long addr,
0183                 const void *old,
0184                 const void *new,
0185                 unsigned int bytes,
0186                 struct x86_exception *fault);
0187     void (*invlpg)(struct x86_emulate_ctxt *ctxt, ulong addr);
0188 
0189     int (*pio_in_emulated)(struct x86_emulate_ctxt *ctxt,
0190                    int size, unsigned short port, void *val,
0191                    unsigned int count);
0192 
0193     int (*pio_out_emulated)(struct x86_emulate_ctxt *ctxt,
0194                 int size, unsigned short port, const void *val,
0195                 unsigned int count);
0196 
0197     bool (*get_segment)(struct x86_emulate_ctxt *ctxt, u16 *selector,
0198                 struct desc_struct *desc, u32 *base3, int seg);
0199     void (*set_segment)(struct x86_emulate_ctxt *ctxt, u16 selector,
0200                 struct desc_struct *desc, u32 base3, int seg);
0201     unsigned long (*get_cached_segment_base)(struct x86_emulate_ctxt *ctxt,
0202                          int seg);
0203     void (*get_gdt)(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt);
0204     void (*get_idt)(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt);
0205     void (*set_gdt)(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt);
0206     void (*set_idt)(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt);
0207     ulong (*get_cr)(struct x86_emulate_ctxt *ctxt, int cr);
0208     int (*set_cr)(struct x86_emulate_ctxt *ctxt, int cr, ulong val);
0209     int (*cpl)(struct x86_emulate_ctxt *ctxt);
0210     void (*get_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong *dest);
0211     int (*set_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong value);
0212     u64 (*get_smbase)(struct x86_emulate_ctxt *ctxt);
0213     void (*set_smbase)(struct x86_emulate_ctxt *ctxt, u64 smbase);
0214     int (*set_msr_with_filter)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 data);
0215     int (*get_msr_with_filter)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 *pdata);
0216     int (*set_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 data);
0217     int (*get_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 *pdata);
0218     int (*check_pmc)(struct x86_emulate_ctxt *ctxt, u32 pmc);
0219     int (*read_pmc)(struct x86_emulate_ctxt *ctxt, u32 pmc, u64 *pdata);
0220     void (*halt)(struct x86_emulate_ctxt *ctxt);
0221     void (*wbinvd)(struct x86_emulate_ctxt *ctxt);
0222     int (*fix_hypercall)(struct x86_emulate_ctxt *ctxt);
0223     int (*intercept)(struct x86_emulate_ctxt *ctxt,
0224              struct x86_instruction_info *info,
0225              enum x86_intercept_stage stage);
0226 
0227     bool (*get_cpuid)(struct x86_emulate_ctxt *ctxt, u32 *eax, u32 *ebx,
0228               u32 *ecx, u32 *edx, bool exact_only);
0229     bool (*guest_has_long_mode)(struct x86_emulate_ctxt *ctxt);
0230     bool (*guest_has_movbe)(struct x86_emulate_ctxt *ctxt);
0231     bool (*guest_has_fxsr)(struct x86_emulate_ctxt *ctxt);
0232     bool (*guest_has_rdpid)(struct x86_emulate_ctxt *ctxt);
0233 
0234     void (*set_nmi_mask)(struct x86_emulate_ctxt *ctxt, bool masked);
0235 
0236     unsigned (*get_hflags)(struct x86_emulate_ctxt *ctxt);
0237     void (*exiting_smm)(struct x86_emulate_ctxt *ctxt);
0238     int (*leave_smm)(struct x86_emulate_ctxt *ctxt, const char *smstate);
0239     void (*triple_fault)(struct x86_emulate_ctxt *ctxt);
0240     int (*set_xcr)(struct x86_emulate_ctxt *ctxt, u32 index, u64 xcr);
0241 };
0242 
0243 /* Type, address-of, and value of an instruction's operand. */
0244 struct operand {
0245     enum { OP_REG, OP_MEM, OP_MEM_STR, OP_IMM, OP_XMM, OP_MM, OP_NONE } type;
0246     unsigned int bytes;
0247     unsigned int count;
0248     union {
0249         unsigned long orig_val;
0250         u64 orig_val64;
0251     };
0252     union {
0253         unsigned long *reg;
0254         struct segmented_address {
0255             ulong ea;
0256             unsigned seg;
0257         } mem;
0258         unsigned xmm;
0259         unsigned mm;
0260     } addr;
0261     union {
0262         unsigned long val;
0263         u64 val64;
0264         char valptr[sizeof(sse128_t)];
0265         sse128_t vec_val;
0266         u64 mm_val;
0267         void *data;
0268     };
0269 };
0270 
0271 struct fetch_cache {
0272     u8 data[15];
0273     u8 *ptr;
0274     u8 *end;
0275 };
0276 
0277 struct read_cache {
0278     u8 data[1024];
0279     unsigned long pos;
0280     unsigned long end;
0281 };
0282 
0283 /* Execution mode, passed to the emulator. */
0284 enum x86emul_mode {
0285     X86EMUL_MODE_REAL,  /* Real mode.             */
0286     X86EMUL_MODE_VM86,  /* Virtual 8086 mode.     */
0287     X86EMUL_MODE_PROT16,    /* 16-bit protected mode. */
0288     X86EMUL_MODE_PROT32,    /* 32-bit protected mode. */
0289     X86EMUL_MODE_PROT64,    /* 64-bit (long) mode.    */
0290 };
0291 
0292 /* These match some of the HF_* flags defined in kvm_host.h  */
0293 #define X86EMUL_GUEST_MASK           (1 << 5) /* VCPU is in guest-mode */
0294 #define X86EMUL_SMM_MASK             (1 << 6)
0295 #define X86EMUL_SMM_INSIDE_NMI_MASK  (1 << 7)
0296 
0297 /*
0298  * fastop functions are declared as taking a never-defined fastop parameter,
0299  * so they can't be called from C directly.
0300  */
0301 struct fastop;
0302 
0303 typedef void (*fastop_t)(struct fastop *);
0304 
0305 /*
0306  * The emulator's _regs array tracks only the GPRs, i.e. excludes RIP.  RIP is
0307  * tracked/accessed via _eip, and except for RIP relative addressing, which
0308  * also uses _eip, RIP cannot be a register operand nor can it be an operand in
0309  * a ModRM or SIB byte.
0310  */
0311 #ifdef CONFIG_X86_64
0312 #define NR_EMULATOR_GPRS    16
0313 #else
0314 #define NR_EMULATOR_GPRS    8
0315 #endif
0316 
0317 struct x86_emulate_ctxt {
0318     void *vcpu;
0319     const struct x86_emulate_ops *ops;
0320 
0321     /* Register state before/after emulation. */
0322     unsigned long eflags;
0323     unsigned long eip; /* eip before instruction emulation */
0324     /* Emulated execution mode, represented by an X86EMUL_MODE value. */
0325     enum x86emul_mode mode;
0326 
0327     /* interruptibility state, as a result of execution of STI or MOV SS */
0328     int interruptibility;
0329 
0330     bool perm_ok; /* do not check permissions if true */
0331     bool tf;    /* TF value before instruction (after for syscall/sysret) */
0332 
0333     bool have_exception;
0334     struct x86_exception exception;
0335 
0336     /* GPA available */
0337     bool gpa_available;
0338     gpa_t gpa_val;
0339 
0340     /*
0341      * decode cache
0342      */
0343 
0344     /* current opcode length in bytes */
0345     u8 opcode_len;
0346     u8 b;
0347     u8 intercept;
0348     u8 op_bytes;
0349     u8 ad_bytes;
0350     union {
0351         int (*execute)(struct x86_emulate_ctxt *ctxt);
0352         fastop_t fop;
0353     };
0354     int (*check_perm)(struct x86_emulate_ctxt *ctxt);
0355 
0356     bool rip_relative;
0357     u8 rex_prefix;
0358     u8 lock_prefix;
0359     u8 rep_prefix;
0360     /* bitmaps of registers in _regs[] that can be read */
0361     u16 regs_valid;
0362     /* bitmaps of registers in _regs[] that have been written */
0363     u16 regs_dirty;
0364     /* modrm */
0365     u8 modrm;
0366     u8 modrm_mod;
0367     u8 modrm_reg;
0368     u8 modrm_rm;
0369     u8 modrm_seg;
0370     u8 seg_override;
0371     u64 d;
0372     unsigned long _eip;
0373 
0374     /* Here begins the usercopy section. */
0375     struct operand src;
0376     struct operand src2;
0377     struct operand dst;
0378     struct operand memop;
0379     unsigned long _regs[NR_EMULATOR_GPRS];
0380     struct operand *memopp;
0381     struct fetch_cache fetch;
0382     struct read_cache io_read;
0383     struct read_cache mem_read;
0384     bool is_branch;
0385 };
0386 
0387 #define KVM_EMULATOR_BUG_ON(cond, ctxt)     \
0388 ({                      \
0389     int __ret = (cond);         \
0390                         \
0391     if (WARN_ON_ONCE(__ret))        \
0392         ctxt->ops->vm_bugged(ctxt); \
0393     unlikely(__ret);            \
0394 })
0395 
0396 /* Repeat String Operation Prefix */
0397 #define REPE_PREFIX 0xf3
0398 #define REPNE_PREFIX    0xf2
0399 
0400 /* CPUID vendors */
0401 #define X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx 0x68747541
0402 #define X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx 0x444d4163
0403 #define X86EMUL_CPUID_VENDOR_AuthenticAMD_edx 0x69746e65
0404 
0405 #define X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx 0x69444d41
0406 #define X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx 0x21726574
0407 #define X86EMUL_CPUID_VENDOR_AMDisbetterI_edx 0x74656273
0408 
0409 #define X86EMUL_CPUID_VENDOR_HygonGenuine_ebx 0x6f677948
0410 #define X86EMUL_CPUID_VENDOR_HygonGenuine_ecx 0x656e6975
0411 #define X86EMUL_CPUID_VENDOR_HygonGenuine_edx 0x6e65476e
0412 
0413 #define X86EMUL_CPUID_VENDOR_GenuineIntel_ebx 0x756e6547
0414 #define X86EMUL_CPUID_VENDOR_GenuineIntel_ecx 0x6c65746e
0415 #define X86EMUL_CPUID_VENDOR_GenuineIntel_edx 0x49656e69
0416 
0417 #define X86EMUL_CPUID_VENDOR_CentaurHauls_ebx 0x746e6543
0418 #define X86EMUL_CPUID_VENDOR_CentaurHauls_ecx 0x736c7561
0419 #define X86EMUL_CPUID_VENDOR_CentaurHauls_edx 0x48727561
0420 
0421 static inline bool is_guest_vendor_intel(u32 ebx, u32 ecx, u32 edx)
0422 {
0423     return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
0424            ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
0425            edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
0426 }
0427 
0428 static inline bool is_guest_vendor_amd(u32 ebx, u32 ecx, u32 edx)
0429 {
0430     return (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
0431         ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
0432         edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx) ||
0433            (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
0434         ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
0435         edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx);
0436 }
0437 
0438 static inline bool is_guest_vendor_hygon(u32 ebx, u32 ecx, u32 edx)
0439 {
0440     return ebx == X86EMUL_CPUID_VENDOR_HygonGenuine_ebx &&
0441            ecx == X86EMUL_CPUID_VENDOR_HygonGenuine_ecx &&
0442            edx == X86EMUL_CPUID_VENDOR_HygonGenuine_edx;
0443 }
0444 
0445 enum x86_intercept_stage {
0446     X86_ICTP_NONE = 0,   /* Allow zero-init to not match anything */
0447     X86_ICPT_PRE_EXCEPT,
0448     X86_ICPT_POST_EXCEPT,
0449     X86_ICPT_POST_MEMACCESS,
0450 };
0451 
0452 enum x86_intercept {
0453     x86_intercept_none,
0454     x86_intercept_cr_read,
0455     x86_intercept_cr_write,
0456     x86_intercept_clts,
0457     x86_intercept_lmsw,
0458     x86_intercept_smsw,
0459     x86_intercept_dr_read,
0460     x86_intercept_dr_write,
0461     x86_intercept_lidt,
0462     x86_intercept_sidt,
0463     x86_intercept_lgdt,
0464     x86_intercept_sgdt,
0465     x86_intercept_lldt,
0466     x86_intercept_sldt,
0467     x86_intercept_ltr,
0468     x86_intercept_str,
0469     x86_intercept_rdtsc,
0470     x86_intercept_rdpmc,
0471     x86_intercept_pushf,
0472     x86_intercept_popf,
0473     x86_intercept_cpuid,
0474     x86_intercept_rsm,
0475     x86_intercept_iret,
0476     x86_intercept_intn,
0477     x86_intercept_invd,
0478     x86_intercept_pause,
0479     x86_intercept_hlt,
0480     x86_intercept_invlpg,
0481     x86_intercept_invlpga,
0482     x86_intercept_vmrun,
0483     x86_intercept_vmload,
0484     x86_intercept_vmsave,
0485     x86_intercept_vmmcall,
0486     x86_intercept_stgi,
0487     x86_intercept_clgi,
0488     x86_intercept_skinit,
0489     x86_intercept_rdtscp,
0490     x86_intercept_rdpid,
0491     x86_intercept_icebp,
0492     x86_intercept_wbinvd,
0493     x86_intercept_monitor,
0494     x86_intercept_mwait,
0495     x86_intercept_rdmsr,
0496     x86_intercept_wrmsr,
0497     x86_intercept_in,
0498     x86_intercept_ins,
0499     x86_intercept_out,
0500     x86_intercept_outs,
0501     x86_intercept_xsetbv,
0502 
0503     nr_x86_intercepts
0504 };
0505 
0506 /* Host execution mode. */
0507 #if defined(CONFIG_X86_32)
0508 #define X86EMUL_MODE_HOST X86EMUL_MODE_PROT32
0509 #elif defined(CONFIG_X86_64)
0510 #define X86EMUL_MODE_HOST X86EMUL_MODE_PROT64
0511 #endif
0512 
0513 int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len, int emulation_type);
0514 bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt);
0515 #define EMULATION_FAILED -1
0516 #define EMULATION_OK 0
0517 #define EMULATION_RESTART 1
0518 #define EMULATION_INTERCEPTED 2
0519 void init_decode_cache(struct x86_emulate_ctxt *ctxt);
0520 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt);
0521 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
0522              u16 tss_selector, int idt_index, int reason,
0523              bool has_error_code, u32 error_code);
0524 int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq);
0525 void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt);
0526 void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt);
0527 bool emulator_can_use_gpa(struct x86_emulate_ctxt *ctxt);
0528 
0529 #endif /* _ASM_X86_KVM_X86_EMULATE_H */