0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021 #include <linux/kvm_host.h>
0022 #include "kvm_cache_regs.h"
0023 #include "kvm_emulate.h"
0024 #include <linux/stringify.h>
0025 #include <asm/debugreg.h>
0026 #include <asm/nospec-branch.h>
0027 #include <asm/ibt.h>
0028
0029 #include "x86.h"
0030 #include "tss.h"
0031 #include "mmu.h"
0032 #include "pmu.h"
0033
0034
0035
0036
0037 #define OpNone 0ull
0038 #define OpImplicit 1ull
0039 #define OpReg 2ull
0040 #define OpMem 3ull
0041 #define OpAcc 4ull
0042 #define OpDI 5ull
0043 #define OpMem64 6ull
0044 #define OpImmUByte 7ull
0045 #define OpDX 8ull
0046 #define OpCL 9ull
0047 #define OpImmByte 10ull
0048 #define OpOne 11ull
0049 #define OpImm 12ull
0050 #define OpMem16 13ull
0051 #define OpMem32 14ull
0052 #define OpImmU 15ull
0053 #define OpSI 16ull
0054 #define OpImmFAddr 17ull
0055 #define OpMemFAddr 18ull
0056 #define OpImmU16 19ull
0057 #define OpES 20ull
0058 #define OpCS 21ull
0059 #define OpSS 22ull
0060 #define OpDS 23ull
0061 #define OpFS 24ull
0062 #define OpGS 25ull
0063 #define OpMem8 26ull
0064 #define OpImm64 27ull
0065 #define OpXLat 28ull
0066 #define OpAccLo 29ull
0067 #define OpAccHi 30ull
0068
0069 #define OpBits 5
0070 #define OpMask ((1ull << OpBits) - 1)
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082 #define ByteOp (1<<0)
0083
0084 #define DstShift 1
0085 #define ImplicitOps (OpImplicit << DstShift)
0086 #define DstReg (OpReg << DstShift)
0087 #define DstMem (OpMem << DstShift)
0088 #define DstAcc (OpAcc << DstShift)
0089 #define DstDI (OpDI << DstShift)
0090 #define DstMem64 (OpMem64 << DstShift)
0091 #define DstMem16 (OpMem16 << DstShift)
0092 #define DstImmUByte (OpImmUByte << DstShift)
0093 #define DstDX (OpDX << DstShift)
0094 #define DstAccLo (OpAccLo << DstShift)
0095 #define DstMask (OpMask << DstShift)
0096
0097 #define SrcShift 6
0098 #define SrcNone (OpNone << SrcShift)
0099 #define SrcReg (OpReg << SrcShift)
0100 #define SrcMem (OpMem << SrcShift)
0101 #define SrcMem16 (OpMem16 << SrcShift)
0102 #define SrcMem32 (OpMem32 << SrcShift)
0103 #define SrcImm (OpImm << SrcShift)
0104 #define SrcImmByte (OpImmByte << SrcShift)
0105 #define SrcOne (OpOne << SrcShift)
0106 #define SrcImmUByte (OpImmUByte << SrcShift)
0107 #define SrcImmU (OpImmU << SrcShift)
0108 #define SrcSI (OpSI << SrcShift)
0109 #define SrcXLat (OpXLat << SrcShift)
0110 #define SrcImmFAddr (OpImmFAddr << SrcShift)
0111 #define SrcMemFAddr (OpMemFAddr << SrcShift)
0112 #define SrcAcc (OpAcc << SrcShift)
0113 #define SrcImmU16 (OpImmU16 << SrcShift)
0114 #define SrcImm64 (OpImm64 << SrcShift)
0115 #define SrcDX (OpDX << SrcShift)
0116 #define SrcMem8 (OpMem8 << SrcShift)
0117 #define SrcAccHi (OpAccHi << SrcShift)
0118 #define SrcMask (OpMask << SrcShift)
0119 #define BitOp (1<<11)
0120 #define MemAbs (1<<12)
0121 #define String (1<<13)
0122 #define Stack (1<<14)
0123 #define GroupMask (7<<15)
0124 #define Group (1<<15)
0125 #define GroupDual (2<<15)
0126 #define Prefix (3<<15)
0127 #define RMExt (4<<15)
0128 #define Escape (5<<15)
0129 #define InstrDual (6<<15)
0130 #define ModeDual (7<<15)
0131 #define Sse (1<<18)
0132
0133 #define ModRM (1<<19)
0134
0135 #define Mov (1<<20)
0136
0137 #define Prot (1<<21)
0138 #define EmulateOnUD (1<<22)
0139 #define NoAccess (1<<23)
0140 #define Op3264 (1<<24)
0141 #define Undefined (1<<25)
0142 #define Lock (1<<26)
0143 #define Priv (1<<27)
0144 #define No64 (1<<28)
0145 #define PageTable (1 << 29)
0146 #define NotImpl (1 << 30)
0147
0148 #define Src2Shift (31)
0149 #define Src2None (OpNone << Src2Shift)
0150 #define Src2Mem (OpMem << Src2Shift)
0151 #define Src2CL (OpCL << Src2Shift)
0152 #define Src2ImmByte (OpImmByte << Src2Shift)
0153 #define Src2One (OpOne << Src2Shift)
0154 #define Src2Imm (OpImm << Src2Shift)
0155 #define Src2ES (OpES << Src2Shift)
0156 #define Src2CS (OpCS << Src2Shift)
0157 #define Src2SS (OpSS << Src2Shift)
0158 #define Src2DS (OpDS << Src2Shift)
0159 #define Src2FS (OpFS << Src2Shift)
0160 #define Src2GS (OpGS << Src2Shift)
0161 #define Src2Mask (OpMask << Src2Shift)
0162 #define Mmx ((u64)1 << 40)
0163 #define AlignMask ((u64)7 << 41)
0164 #define Aligned ((u64)1 << 41)
0165 #define Unaligned ((u64)2 << 41)
0166 #define Avx ((u64)3 << 41)
0167 #define Aligned16 ((u64)4 << 41)
0168 #define Fastop ((u64)1 << 44)
0169 #define NoWrite ((u64)1 << 45)
0170 #define SrcWrite ((u64)1 << 46)
0171 #define NoMod ((u64)1 << 47)
0172 #define Intercept ((u64)1 << 48)
0173 #define CheckPerm ((u64)1 << 49)
0174 #define PrivUD ((u64)1 << 51)
0175 #define NearBranch ((u64)1 << 52)
0176 #define No16 ((u64)1 << 53)
0177 #define IncSP ((u64)1 << 54)
0178 #define TwoMemOp ((u64)1 << 55)
0179 #define IsBranch ((u64)1 << 56)
0180
0181 #define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
0182
0183 #define X2(x...) x, x
0184 #define X3(x...) X2(x), x
0185 #define X4(x...) X2(x), X2(x)
0186 #define X5(x...) X4(x), x
0187 #define X6(x...) X4(x), X2(x)
0188 #define X7(x...) X4(x), X3(x)
0189 #define X8(x...) X4(x), X4(x)
0190 #define X16(x...) X8(x), X8(x)
0191
0192 struct opcode {
0193 u64 flags;
0194 u8 intercept;
0195 u8 pad[7];
0196 union {
0197 int (*execute)(struct x86_emulate_ctxt *ctxt);
0198 const struct opcode *group;
0199 const struct group_dual *gdual;
0200 const struct gprefix *gprefix;
0201 const struct escape *esc;
0202 const struct instr_dual *idual;
0203 const struct mode_dual *mdual;
0204 void (*fastop)(struct fastop *fake);
0205 } u;
0206 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
0207 };
0208
0209 struct group_dual {
0210 struct opcode mod012[8];
0211 struct opcode mod3[8];
0212 };
0213
0214 struct gprefix {
0215 struct opcode pfx_no;
0216 struct opcode pfx_66;
0217 struct opcode pfx_f2;
0218 struct opcode pfx_f3;
0219 };
0220
0221 struct escape {
0222 struct opcode op[8];
0223 struct opcode high[64];
0224 };
0225
0226 struct instr_dual {
0227 struct opcode mod012;
0228 struct opcode mod3;
0229 };
0230
0231 struct mode_dual {
0232 struct opcode mode32;
0233 struct opcode mode64;
0234 };
0235
0236 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
0237
0238 enum x86_transfer_type {
0239 X86_TRANSFER_NONE,
0240 X86_TRANSFER_CALL_JMP,
0241 X86_TRANSFER_RET,
0242 X86_TRANSFER_TASK_SWITCH,
0243 };
0244
0245 static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
0246 {
0247 if (KVM_EMULATOR_BUG_ON(nr >= NR_EMULATOR_GPRS, ctxt))
0248 nr &= NR_EMULATOR_GPRS - 1;
0249
0250 if (!(ctxt->regs_valid & (1 << nr))) {
0251 ctxt->regs_valid |= 1 << nr;
0252 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
0253 }
0254 return ctxt->_regs[nr];
0255 }
0256
0257 static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
0258 {
0259 if (KVM_EMULATOR_BUG_ON(nr >= NR_EMULATOR_GPRS, ctxt))
0260 nr &= NR_EMULATOR_GPRS - 1;
0261
0262 BUILD_BUG_ON(sizeof(ctxt->regs_dirty) * BITS_PER_BYTE < NR_EMULATOR_GPRS);
0263 BUILD_BUG_ON(sizeof(ctxt->regs_valid) * BITS_PER_BYTE < NR_EMULATOR_GPRS);
0264
0265 ctxt->regs_valid |= 1 << nr;
0266 ctxt->regs_dirty |= 1 << nr;
0267 return &ctxt->_regs[nr];
0268 }
0269
0270 static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
0271 {
0272 reg_read(ctxt, nr);
0273 return reg_write(ctxt, nr);
0274 }
0275
0276 static void writeback_registers(struct x86_emulate_ctxt *ctxt)
0277 {
0278 unsigned long dirty = ctxt->regs_dirty;
0279 unsigned reg;
0280
0281 for_each_set_bit(reg, &dirty, NR_EMULATOR_GPRS)
0282 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
0283 }
0284
0285 static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
0286 {
0287 ctxt->regs_dirty = 0;
0288 ctxt->regs_valid = 0;
0289 }
0290
0291
0292
0293
0294
0295 #define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\
0296 X86_EFLAGS_PF|X86_EFLAGS_CF)
0297
0298 #ifdef CONFIG_X86_64
0299 #define ON64(x) x
0300 #else
0301 #define ON64(x)
0302 #endif
0303
0304
0305
0306
0307
0308
0309
0310
0311
0312
0313
0314
0315
0316
0317
0318
0319
0320
0321 static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop);
0322
0323 #define FASTOP_SIZE 16
0324
0325 #define __FOP_FUNC(name) \
0326 ".align " __stringify(FASTOP_SIZE) " \n\t" \
0327 ".type " name ", @function \n\t" \
0328 name ":\n\t" \
0329 ASM_ENDBR \
0330 IBT_NOSEAL(name)
0331
0332 #define FOP_FUNC(name) \
0333 __FOP_FUNC(#name)
0334
0335 #define __FOP_RET(name) \
0336 "11: " ASM_RET \
0337 ".size " name ", .-" name "\n\t"
0338
0339 #define FOP_RET(name) \
0340 __FOP_RET(#name)
0341
0342 #define __FOP_START(op, align) \
0343 extern void em_##op(struct fastop *fake); \
0344 asm(".pushsection .text, \"ax\" \n\t" \
0345 ".global em_" #op " \n\t" \
0346 ".align " __stringify(align) " \n\t" \
0347 "em_" #op ":\n\t"
0348
0349 #define FOP_START(op) __FOP_START(op, FASTOP_SIZE)
0350
0351 #define FOP_END \
0352 ".popsection")
0353
0354 #define __FOPNOP(name) \
0355 __FOP_FUNC(name) \
0356 __FOP_RET(name)
0357
0358 #define FOPNOP() \
0359 __FOPNOP(__stringify(__UNIQUE_ID(nop)))
0360
0361 #define FOP1E(op, dst) \
0362 __FOP_FUNC(#op "_" #dst) \
0363 "10: " #op " %" #dst " \n\t" \
0364 __FOP_RET(#op "_" #dst)
0365
0366 #define FOP1EEX(op, dst) \
0367 FOP1E(op, dst) _ASM_EXTABLE_TYPE_REG(10b, 11b, EX_TYPE_ZERO_REG, %%esi)
0368
0369 #define FASTOP1(op) \
0370 FOP_START(op) \
0371 FOP1E(op##b, al) \
0372 FOP1E(op##w, ax) \
0373 FOP1E(op##l, eax) \
0374 ON64(FOP1E(op##q, rax)) \
0375 FOP_END
0376
0377
0378 #define FASTOP1SRC2(op, name) \
0379 FOP_START(name) \
0380 FOP1E(op, cl) \
0381 FOP1E(op, cx) \
0382 FOP1E(op, ecx) \
0383 ON64(FOP1E(op, rcx)) \
0384 FOP_END
0385
0386
0387 #define FASTOP1SRC2EX(op, name) \
0388 FOP_START(name) \
0389 FOP1EEX(op, cl) \
0390 FOP1EEX(op, cx) \
0391 FOP1EEX(op, ecx) \
0392 ON64(FOP1EEX(op, rcx)) \
0393 FOP_END
0394
0395 #define FOP2E(op, dst, src) \
0396 __FOP_FUNC(#op "_" #dst "_" #src) \
0397 #op " %" #src ", %" #dst " \n\t" \
0398 __FOP_RET(#op "_" #dst "_" #src)
0399
0400 #define FASTOP2(op) \
0401 FOP_START(op) \
0402 FOP2E(op##b, al, dl) \
0403 FOP2E(op##w, ax, dx) \
0404 FOP2E(op##l, eax, edx) \
0405 ON64(FOP2E(op##q, rax, rdx)) \
0406 FOP_END
0407
0408
0409 #define FASTOP2W(op) \
0410 FOP_START(op) \
0411 FOPNOP() \
0412 FOP2E(op##w, ax, dx) \
0413 FOP2E(op##l, eax, edx) \
0414 ON64(FOP2E(op##q, rax, rdx)) \
0415 FOP_END
0416
0417
0418 #define FASTOP2CL(op) \
0419 FOP_START(op) \
0420 FOP2E(op##b, al, cl) \
0421 FOP2E(op##w, ax, cl) \
0422 FOP2E(op##l, eax, cl) \
0423 ON64(FOP2E(op##q, rax, cl)) \
0424 FOP_END
0425
0426
0427 #define FASTOP2R(op, name) \
0428 FOP_START(name) \
0429 FOP2E(op##b, dl, al) \
0430 FOP2E(op##w, dx, ax) \
0431 FOP2E(op##l, edx, eax) \
0432 ON64(FOP2E(op##q, rdx, rax)) \
0433 FOP_END
0434
0435 #define FOP3E(op, dst, src, src2) \
0436 __FOP_FUNC(#op "_" #dst "_" #src "_" #src2) \
0437 #op " %" #src2 ", %" #src ", %" #dst " \n\t"\
0438 __FOP_RET(#op "_" #dst "_" #src "_" #src2)
0439
0440
0441 #define FASTOP3WCL(op) \
0442 FOP_START(op) \
0443 FOPNOP() \
0444 FOP3E(op##w, ax, dx, cl) \
0445 FOP3E(op##l, eax, edx, cl) \
0446 ON64(FOP3E(op##q, rax, rdx, cl)) \
0447 FOP_END
0448
0449
0450 #define FOP_SETCC(op) \
0451 FOP_FUNC(op) \
0452 #op " %al \n\t" \
0453 FOP_RET(op)
0454
0455 FOP_START(setcc)
0456 FOP_SETCC(seto)
0457 FOP_SETCC(setno)
0458 FOP_SETCC(setc)
0459 FOP_SETCC(setnc)
0460 FOP_SETCC(setz)
0461 FOP_SETCC(setnz)
0462 FOP_SETCC(setbe)
0463 FOP_SETCC(setnbe)
0464 FOP_SETCC(sets)
0465 FOP_SETCC(setns)
0466 FOP_SETCC(setp)
0467 FOP_SETCC(setnp)
0468 FOP_SETCC(setl)
0469 FOP_SETCC(setnl)
0470 FOP_SETCC(setle)
0471 FOP_SETCC(setnle)
0472 FOP_END;
0473
0474 FOP_START(salc)
0475 FOP_FUNC(salc)
0476 "pushf; sbb %al, %al; popf \n\t"
0477 FOP_RET(salc)
0478 FOP_END;
0479
0480
0481
0482
0483
0484 #define asm_safe(insn, inoutclob...) \
0485 ({ \
0486 int _fault = 0; \
0487 \
0488 asm volatile("1:" insn "\n" \
0489 "2:\n" \
0490 _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_ONE_REG, %[_fault]) \
0491 : [_fault] "+r"(_fault) inoutclob ); \
0492 \
0493 _fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \
0494 })
0495
0496 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
0497 enum x86_intercept intercept,
0498 enum x86_intercept_stage stage)
0499 {
0500 struct x86_instruction_info info = {
0501 .intercept = intercept,
0502 .rep_prefix = ctxt->rep_prefix,
0503 .modrm_mod = ctxt->modrm_mod,
0504 .modrm_reg = ctxt->modrm_reg,
0505 .modrm_rm = ctxt->modrm_rm,
0506 .src_val = ctxt->src.val64,
0507 .dst_val = ctxt->dst.val64,
0508 .src_bytes = ctxt->src.bytes,
0509 .dst_bytes = ctxt->dst.bytes,
0510 .ad_bytes = ctxt->ad_bytes,
0511 .next_rip = ctxt->eip,
0512 };
0513
0514 return ctxt->ops->intercept(ctxt, &info, stage);
0515 }
0516
0517 static void assign_masked(ulong *dest, ulong src, ulong mask)
0518 {
0519 *dest = (*dest & ~mask) | (src & mask);
0520 }
0521
0522 static void assign_register(unsigned long *reg, u64 val, int bytes)
0523 {
0524
0525 switch (bytes) {
0526 case 1:
0527 *(u8 *)reg = (u8)val;
0528 break;
0529 case 2:
0530 *(u16 *)reg = (u16)val;
0531 break;
0532 case 4:
0533 *reg = (u32)val;
0534 break;
0535 case 8:
0536 *reg = val;
0537 break;
0538 }
0539 }
0540
0541 static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
0542 {
0543 return (1UL << (ctxt->ad_bytes << 3)) - 1;
0544 }
0545
0546 static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
0547 {
0548 u16 sel;
0549 struct desc_struct ss;
0550
0551 if (ctxt->mode == X86EMUL_MODE_PROT64)
0552 return ~0UL;
0553 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
0554 return ~0U >> ((ss.d ^ 1) * 16);
0555 }
0556
0557 static int stack_size(struct x86_emulate_ctxt *ctxt)
0558 {
0559 return (__fls(stack_mask(ctxt)) + 1) >> 3;
0560 }
0561
0562
0563 static inline unsigned long
0564 address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
0565 {
0566 if (ctxt->ad_bytes == sizeof(unsigned long))
0567 return reg;
0568 else
0569 return reg & ad_mask(ctxt);
0570 }
0571
0572 static inline unsigned long
0573 register_address(struct x86_emulate_ctxt *ctxt, int reg)
0574 {
0575 return address_mask(ctxt, reg_read(ctxt, reg));
0576 }
0577
0578 static void masked_increment(ulong *reg, ulong mask, int inc)
0579 {
0580 assign_masked(reg, *reg + inc, mask);
0581 }
0582
0583 static inline void
0584 register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
0585 {
0586 ulong *preg = reg_rmw(ctxt, reg);
0587
0588 assign_register(preg, *preg + inc, ctxt->ad_bytes);
0589 }
0590
0591 static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
0592 {
0593 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
0594 }
0595
0596 static u32 desc_limit_scaled(struct desc_struct *desc)
0597 {
0598 u32 limit = get_desc_limit(desc);
0599
0600 return desc->g ? (limit << 12) | 0xfff : limit;
0601 }
0602
0603 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
0604 {
0605 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
0606 return 0;
0607
0608 return ctxt->ops->get_cached_segment_base(ctxt, seg);
0609 }
0610
0611 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
0612 u32 error, bool valid)
0613 {
0614 if (KVM_EMULATOR_BUG_ON(vec > 0x1f, ctxt))
0615 return X86EMUL_UNHANDLEABLE;
0616
0617 ctxt->exception.vector = vec;
0618 ctxt->exception.error_code = error;
0619 ctxt->exception.error_code_valid = valid;
0620 return X86EMUL_PROPAGATE_FAULT;
0621 }
0622
0623 static int emulate_db(struct x86_emulate_ctxt *ctxt)
0624 {
0625 return emulate_exception(ctxt, DB_VECTOR, 0, false);
0626 }
0627
0628 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
0629 {
0630 return emulate_exception(ctxt, GP_VECTOR, err, true);
0631 }
0632
0633 static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
0634 {
0635 return emulate_exception(ctxt, SS_VECTOR, err, true);
0636 }
0637
0638 static int emulate_ud(struct x86_emulate_ctxt *ctxt)
0639 {
0640 return emulate_exception(ctxt, UD_VECTOR, 0, false);
0641 }
0642
0643 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
0644 {
0645 return emulate_exception(ctxt, TS_VECTOR, err, true);
0646 }
0647
0648 static int emulate_de(struct x86_emulate_ctxt *ctxt)
0649 {
0650 return emulate_exception(ctxt, DE_VECTOR, 0, false);
0651 }
0652
0653 static int emulate_nm(struct x86_emulate_ctxt *ctxt)
0654 {
0655 return emulate_exception(ctxt, NM_VECTOR, 0, false);
0656 }
0657
0658 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
0659 {
0660 u16 selector;
0661 struct desc_struct desc;
0662
0663 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
0664 return selector;
0665 }
0666
0667 static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
0668 unsigned seg)
0669 {
0670 u16 dummy;
0671 u32 base3;
0672 struct desc_struct desc;
0673
0674 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
0675 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
0676 }
0677
0678 static inline u8 ctxt_virt_addr_bits(struct x86_emulate_ctxt *ctxt)
0679 {
0680 return (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_LA57) ? 57 : 48;
0681 }
0682
0683 static inline bool emul_is_noncanonical_address(u64 la,
0684 struct x86_emulate_ctxt *ctxt)
0685 {
0686 return !__is_canonical_address(la, ctxt_virt_addr_bits(ctxt));
0687 }
0688
0689
0690
0691
0692
0693
0694
0695
0696
0697
0698 static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size)
0699 {
0700 u64 alignment = ctxt->d & AlignMask;
0701
0702 if (likely(size < 16))
0703 return 1;
0704
0705 switch (alignment) {
0706 case Unaligned:
0707 case Avx:
0708 return 1;
0709 case Aligned16:
0710 return 16;
0711 case Aligned:
0712 default:
0713 return size;
0714 }
0715 }
0716
0717 static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
0718 struct segmented_address addr,
0719 unsigned *max_size, unsigned size,
0720 bool write, bool fetch,
0721 enum x86emul_mode mode, ulong *linear)
0722 {
0723 struct desc_struct desc;
0724 bool usable;
0725 ulong la;
0726 u32 lim;
0727 u16 sel;
0728 u8 va_bits;
0729
0730 la = seg_base(ctxt, addr.seg) + addr.ea;
0731 *max_size = 0;
0732 switch (mode) {
0733 case X86EMUL_MODE_PROT64:
0734 *linear = la;
0735 va_bits = ctxt_virt_addr_bits(ctxt);
0736 if (!__is_canonical_address(la, va_bits))
0737 goto bad;
0738
0739 *max_size = min_t(u64, ~0u, (1ull << va_bits) - la);
0740 if (size > *max_size)
0741 goto bad;
0742 break;
0743 default:
0744 *linear = la = (u32)la;
0745 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
0746 addr.seg);
0747 if (!usable)
0748 goto bad;
0749
0750 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
0751 || !(desc.type & 2)) && write)
0752 goto bad;
0753
0754 if (!fetch && (desc.type & 8) && !(desc.type & 2))
0755 goto bad;
0756 lim = desc_limit_scaled(&desc);
0757 if (!(desc.type & 8) && (desc.type & 4)) {
0758
0759 if (addr.ea <= lim)
0760 goto bad;
0761 lim = desc.d ? 0xffffffff : 0xffff;
0762 }
0763 if (addr.ea > lim)
0764 goto bad;
0765 if (lim == 0xffffffff)
0766 *max_size = ~0u;
0767 else {
0768 *max_size = (u64)lim + 1 - addr.ea;
0769 if (size > *max_size)
0770 goto bad;
0771 }
0772 break;
0773 }
0774 if (la & (insn_alignment(ctxt, size) - 1))
0775 return emulate_gp(ctxt, 0);
0776 return X86EMUL_CONTINUE;
0777 bad:
0778 if (addr.seg == VCPU_SREG_SS)
0779 return emulate_ss(ctxt, 0);
0780 else
0781 return emulate_gp(ctxt, 0);
0782 }
0783
0784 static int linearize(struct x86_emulate_ctxt *ctxt,
0785 struct segmented_address addr,
0786 unsigned size, bool write,
0787 ulong *linear)
0788 {
0789 unsigned max_size;
0790 return __linearize(ctxt, addr, &max_size, size, write, false,
0791 ctxt->mode, linear);
0792 }
0793
0794 static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
0795 enum x86emul_mode mode)
0796 {
0797 ulong linear;
0798 int rc;
0799 unsigned max_size;
0800 struct segmented_address addr = { .seg = VCPU_SREG_CS,
0801 .ea = dst };
0802
0803 if (ctxt->op_bytes != sizeof(unsigned long))
0804 addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
0805 rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
0806 if (rc == X86EMUL_CONTINUE)
0807 ctxt->_eip = addr.ea;
0808 return rc;
0809 }
0810
0811 static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
0812 {
0813 return assign_eip(ctxt, dst, ctxt->mode);
0814 }
0815
0816 static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
0817 const struct desc_struct *cs_desc)
0818 {
0819 enum x86emul_mode mode = ctxt->mode;
0820 int rc;
0821
0822 #ifdef CONFIG_X86_64
0823 if (ctxt->mode >= X86EMUL_MODE_PROT16) {
0824 if (cs_desc->l) {
0825 u64 efer = 0;
0826
0827 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
0828 if (efer & EFER_LMA)
0829 mode = X86EMUL_MODE_PROT64;
0830 } else
0831 mode = X86EMUL_MODE_PROT32;
0832 }
0833 #endif
0834 if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
0835 mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
0836 rc = assign_eip(ctxt, dst, mode);
0837 if (rc == X86EMUL_CONTINUE)
0838 ctxt->mode = mode;
0839 return rc;
0840 }
0841
0842 static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
0843 {
0844 return assign_eip_near(ctxt, ctxt->_eip + rel);
0845 }
0846
0847 static int linear_read_system(struct x86_emulate_ctxt *ctxt, ulong linear,
0848 void *data, unsigned size)
0849 {
0850 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, true);
0851 }
0852
0853 static int linear_write_system(struct x86_emulate_ctxt *ctxt,
0854 ulong linear, void *data,
0855 unsigned int size)
0856 {
0857 return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, true);
0858 }
0859
0860 static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
0861 struct segmented_address addr,
0862 void *data,
0863 unsigned size)
0864 {
0865 int rc;
0866 ulong linear;
0867
0868 rc = linearize(ctxt, addr, size, false, &linear);
0869 if (rc != X86EMUL_CONTINUE)
0870 return rc;
0871 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, false);
0872 }
0873
0874 static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
0875 struct segmented_address addr,
0876 void *data,
0877 unsigned int size)
0878 {
0879 int rc;
0880 ulong linear;
0881
0882 rc = linearize(ctxt, addr, size, true, &linear);
0883 if (rc != X86EMUL_CONTINUE)
0884 return rc;
0885 return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, false);
0886 }
0887
0888
0889
0890
0891
0892 static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
0893 {
0894 int rc;
0895 unsigned size, max_size;
0896 unsigned long linear;
0897 int cur_size = ctxt->fetch.end - ctxt->fetch.data;
0898 struct segmented_address addr = { .seg = VCPU_SREG_CS,
0899 .ea = ctxt->eip + cur_size };
0900
0901
0902
0903
0904
0905
0906
0907
0908
0909
0910
0911 rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
0912 &linear);
0913 if (unlikely(rc != X86EMUL_CONTINUE))
0914 return rc;
0915
0916 size = min_t(unsigned, 15UL ^ cur_size, max_size);
0917 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
0918
0919
0920
0921
0922
0923
0924
0925 if (unlikely(size < op_size))
0926 return emulate_gp(ctxt, 0);
0927
0928 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
0929 size, &ctxt->exception);
0930 if (unlikely(rc != X86EMUL_CONTINUE))
0931 return rc;
0932 ctxt->fetch.end += size;
0933 return X86EMUL_CONTINUE;
0934 }
0935
0936 static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
0937 unsigned size)
0938 {
0939 unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
0940
0941 if (unlikely(done_size < size))
0942 return __do_insn_fetch_bytes(ctxt, size - done_size);
0943 else
0944 return X86EMUL_CONTINUE;
0945 }
0946
0947
0948 #define insn_fetch(_type, _ctxt) \
0949 ({ _type _x; \
0950 \
0951 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
0952 if (rc != X86EMUL_CONTINUE) \
0953 goto done; \
0954 ctxt->_eip += sizeof(_type); \
0955 memcpy(&_x, ctxt->fetch.ptr, sizeof(_type)); \
0956 ctxt->fetch.ptr += sizeof(_type); \
0957 _x; \
0958 })
0959
0960 #define insn_fetch_arr(_arr, _size, _ctxt) \
0961 ({ \
0962 rc = do_insn_fetch_bytes(_ctxt, _size); \
0963 if (rc != X86EMUL_CONTINUE) \
0964 goto done; \
0965 ctxt->_eip += (_size); \
0966 memcpy(_arr, ctxt->fetch.ptr, _size); \
0967 ctxt->fetch.ptr += (_size); \
0968 })
0969
0970
0971
0972
0973
0974
0975 static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
0976 int byteop)
0977 {
0978 void *p;
0979 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
0980
0981 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
0982 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
0983 else
0984 p = reg_rmw(ctxt, modrm_reg);
0985 return p;
0986 }
0987
0988 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
0989 struct segmented_address addr,
0990 u16 *size, unsigned long *address, int op_bytes)
0991 {
0992 int rc;
0993
0994 if (op_bytes == 2)
0995 op_bytes = 3;
0996 *address = 0;
0997 rc = segmented_read_std(ctxt, addr, size, 2);
0998 if (rc != X86EMUL_CONTINUE)
0999 return rc;
1000 addr.ea += 2;
1001 rc = segmented_read_std(ctxt, addr, address, op_bytes);
1002 return rc;
1003 }
1004
1005 FASTOP2(add);
1006 FASTOP2(or);
1007 FASTOP2(adc);
1008 FASTOP2(sbb);
1009 FASTOP2(and);
1010 FASTOP2(sub);
1011 FASTOP2(xor);
1012 FASTOP2(cmp);
1013 FASTOP2(test);
1014
1015 FASTOP1SRC2(mul, mul_ex);
1016 FASTOP1SRC2(imul, imul_ex);
1017 FASTOP1SRC2EX(div, div_ex);
1018 FASTOP1SRC2EX(idiv, idiv_ex);
1019
1020 FASTOP3WCL(shld);
1021 FASTOP3WCL(shrd);
1022
1023 FASTOP2W(imul);
1024
1025 FASTOP1(not);
1026 FASTOP1(neg);
1027 FASTOP1(inc);
1028 FASTOP1(dec);
1029
1030 FASTOP2CL(rol);
1031 FASTOP2CL(ror);
1032 FASTOP2CL(rcl);
1033 FASTOP2CL(rcr);
1034 FASTOP2CL(shl);
1035 FASTOP2CL(shr);
1036 FASTOP2CL(sar);
1037
1038 FASTOP2W(bsf);
1039 FASTOP2W(bsr);
1040 FASTOP2W(bt);
1041 FASTOP2W(bts);
1042 FASTOP2W(btr);
1043 FASTOP2W(btc);
1044
1045 FASTOP2(xadd);
1046
1047 FASTOP2R(cmp, cmp_r);
1048
1049 static int em_bsf_c(struct x86_emulate_ctxt *ctxt)
1050 {
1051
1052 if (ctxt->src.val == 0)
1053 ctxt->dst.type = OP_NONE;
1054 return fastop(ctxt, em_bsf);
1055 }
1056
1057 static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
1058 {
1059
1060 if (ctxt->src.val == 0)
1061 ctxt->dst.type = OP_NONE;
1062 return fastop(ctxt, em_bsr);
1063 }
1064
1065 static __always_inline u8 test_cc(unsigned int condition, unsigned long flags)
1066 {
1067 u8 rc;
1068 void (*fop)(void) = (void *)em_setcc + FASTOP_SIZE * (condition & 0xf);
1069
1070 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
1071 asm("push %[flags]; popf; " CALL_NOSPEC
1072 : "=a"(rc) : [thunk_target]"r"(fop), [flags]"r"(flags));
1073 return rc;
1074 }
1075
1076 static void fetch_register_operand(struct operand *op)
1077 {
1078 switch (op->bytes) {
1079 case 1:
1080 op->val = *(u8 *)op->addr.reg;
1081 break;
1082 case 2:
1083 op->val = *(u16 *)op->addr.reg;
1084 break;
1085 case 4:
1086 op->val = *(u32 *)op->addr.reg;
1087 break;
1088 case 8:
1089 op->val = *(u64 *)op->addr.reg;
1090 break;
1091 }
1092 }
1093
1094 static int em_fninit(struct x86_emulate_ctxt *ctxt)
1095 {
1096 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1097 return emulate_nm(ctxt);
1098
1099 kvm_fpu_get();
1100 asm volatile("fninit");
1101 kvm_fpu_put();
1102 return X86EMUL_CONTINUE;
1103 }
1104
1105 static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1106 {
1107 u16 fcw;
1108
1109 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1110 return emulate_nm(ctxt);
1111
1112 kvm_fpu_get();
1113 asm volatile("fnstcw %0": "+m"(fcw));
1114 kvm_fpu_put();
1115
1116 ctxt->dst.val = fcw;
1117
1118 return X86EMUL_CONTINUE;
1119 }
1120
1121 static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1122 {
1123 u16 fsw;
1124
1125 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1126 return emulate_nm(ctxt);
1127
1128 kvm_fpu_get();
1129 asm volatile("fnstsw %0": "+m"(fsw));
1130 kvm_fpu_put();
1131
1132 ctxt->dst.val = fsw;
1133
1134 return X86EMUL_CONTINUE;
1135 }
1136
1137 static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1138 struct operand *op)
1139 {
1140 unsigned reg = ctxt->modrm_reg;
1141
1142 if (!(ctxt->d & ModRM))
1143 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
1144
1145 if (ctxt->d & Sse) {
1146 op->type = OP_XMM;
1147 op->bytes = 16;
1148 op->addr.xmm = reg;
1149 kvm_read_sse_reg(reg, &op->vec_val);
1150 return;
1151 }
1152 if (ctxt->d & Mmx) {
1153 reg &= 7;
1154 op->type = OP_MM;
1155 op->bytes = 8;
1156 op->addr.mm = reg;
1157 return;
1158 }
1159
1160 op->type = OP_REG;
1161 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1162 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1163
1164 fetch_register_operand(op);
1165 op->orig_val = op->val;
1166 }
1167
1168 static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1169 {
1170 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1171 ctxt->modrm_seg = VCPU_SREG_SS;
1172 }
1173
1174 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1175 struct operand *op)
1176 {
1177 u8 sib;
1178 int index_reg, base_reg, scale;
1179 int rc = X86EMUL_CONTINUE;
1180 ulong modrm_ea = 0;
1181
1182 ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8);
1183 index_reg = (ctxt->rex_prefix << 2) & 8;
1184 base_reg = (ctxt->rex_prefix << 3) & 8;
1185
1186 ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
1187 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
1188 ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1189 ctxt->modrm_seg = VCPU_SREG_DS;
1190
1191 if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1192 op->type = OP_REG;
1193 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1194 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1195 ctxt->d & ByteOp);
1196 if (ctxt->d & Sse) {
1197 op->type = OP_XMM;
1198 op->bytes = 16;
1199 op->addr.xmm = ctxt->modrm_rm;
1200 kvm_read_sse_reg(ctxt->modrm_rm, &op->vec_val);
1201 return rc;
1202 }
1203 if (ctxt->d & Mmx) {
1204 op->type = OP_MM;
1205 op->bytes = 8;
1206 op->addr.mm = ctxt->modrm_rm & 7;
1207 return rc;
1208 }
1209 fetch_register_operand(op);
1210 return rc;
1211 }
1212
1213 op->type = OP_MEM;
1214
1215 if (ctxt->ad_bytes == 2) {
1216 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1217 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1218 unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1219 unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1220
1221
1222 switch (ctxt->modrm_mod) {
1223 case 0:
1224 if (ctxt->modrm_rm == 6)
1225 modrm_ea += insn_fetch(u16, ctxt);
1226 break;
1227 case 1:
1228 modrm_ea += insn_fetch(s8, ctxt);
1229 break;
1230 case 2:
1231 modrm_ea += insn_fetch(u16, ctxt);
1232 break;
1233 }
1234 switch (ctxt->modrm_rm) {
1235 case 0:
1236 modrm_ea += bx + si;
1237 break;
1238 case 1:
1239 modrm_ea += bx + di;
1240 break;
1241 case 2:
1242 modrm_ea += bp + si;
1243 break;
1244 case 3:
1245 modrm_ea += bp + di;
1246 break;
1247 case 4:
1248 modrm_ea += si;
1249 break;
1250 case 5:
1251 modrm_ea += di;
1252 break;
1253 case 6:
1254 if (ctxt->modrm_mod != 0)
1255 modrm_ea += bp;
1256 break;
1257 case 7:
1258 modrm_ea += bx;
1259 break;
1260 }
1261 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1262 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1263 ctxt->modrm_seg = VCPU_SREG_SS;
1264 modrm_ea = (u16)modrm_ea;
1265 } else {
1266
1267 if ((ctxt->modrm_rm & 7) == 4) {
1268 sib = insn_fetch(u8, ctxt);
1269 index_reg |= (sib >> 3) & 7;
1270 base_reg |= sib & 7;
1271 scale = sib >> 6;
1272
1273 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1274 modrm_ea += insn_fetch(s32, ctxt);
1275 else {
1276 modrm_ea += reg_read(ctxt, base_reg);
1277 adjust_modrm_seg(ctxt, base_reg);
1278
1279 if ((ctxt->d & IncSP) &&
1280 base_reg == VCPU_REGS_RSP)
1281 modrm_ea += ctxt->op_bytes;
1282 }
1283 if (index_reg != 4)
1284 modrm_ea += reg_read(ctxt, index_reg) << scale;
1285 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1286 modrm_ea += insn_fetch(s32, ctxt);
1287 if (ctxt->mode == X86EMUL_MODE_PROT64)
1288 ctxt->rip_relative = 1;
1289 } else {
1290 base_reg = ctxt->modrm_rm;
1291 modrm_ea += reg_read(ctxt, base_reg);
1292 adjust_modrm_seg(ctxt, base_reg);
1293 }
1294 switch (ctxt->modrm_mod) {
1295 case 1:
1296 modrm_ea += insn_fetch(s8, ctxt);
1297 break;
1298 case 2:
1299 modrm_ea += insn_fetch(s32, ctxt);
1300 break;
1301 }
1302 }
1303 op->addr.mem.ea = modrm_ea;
1304 if (ctxt->ad_bytes != 8)
1305 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1306
1307 done:
1308 return rc;
1309 }
1310
1311 static int decode_abs(struct x86_emulate_ctxt *ctxt,
1312 struct operand *op)
1313 {
1314 int rc = X86EMUL_CONTINUE;
1315
1316 op->type = OP_MEM;
1317 switch (ctxt->ad_bytes) {
1318 case 2:
1319 op->addr.mem.ea = insn_fetch(u16, ctxt);
1320 break;
1321 case 4:
1322 op->addr.mem.ea = insn_fetch(u32, ctxt);
1323 break;
1324 case 8:
1325 op->addr.mem.ea = insn_fetch(u64, ctxt);
1326 break;
1327 }
1328 done:
1329 return rc;
1330 }
1331
1332 static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1333 {
1334 long sv = 0, mask;
1335
1336 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1337 mask = ~((long)ctxt->dst.bytes * 8 - 1);
1338
1339 if (ctxt->src.bytes == 2)
1340 sv = (s16)ctxt->src.val & (s16)mask;
1341 else if (ctxt->src.bytes == 4)
1342 sv = (s32)ctxt->src.val & (s32)mask;
1343 else
1344 sv = (s64)ctxt->src.val & (s64)mask;
1345
1346 ctxt->dst.addr.mem.ea = address_mask(ctxt,
1347 ctxt->dst.addr.mem.ea + (sv >> 3));
1348 }
1349
1350
1351 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1352 }
1353
1354 static int read_emulated(struct x86_emulate_ctxt *ctxt,
1355 unsigned long addr, void *dest, unsigned size)
1356 {
1357 int rc;
1358 struct read_cache *mc = &ctxt->mem_read;
1359
1360 if (mc->pos < mc->end)
1361 goto read_cached;
1362
1363 if (KVM_EMULATOR_BUG_ON((mc->end + size) >= sizeof(mc->data), ctxt))
1364 return X86EMUL_UNHANDLEABLE;
1365
1366 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1367 &ctxt->exception);
1368 if (rc != X86EMUL_CONTINUE)
1369 return rc;
1370
1371 mc->end += size;
1372
1373 read_cached:
1374 memcpy(dest, mc->data + mc->pos, size);
1375 mc->pos += size;
1376 return X86EMUL_CONTINUE;
1377 }
1378
1379 static int segmented_read(struct x86_emulate_ctxt *ctxt,
1380 struct segmented_address addr,
1381 void *data,
1382 unsigned size)
1383 {
1384 int rc;
1385 ulong linear;
1386
1387 rc = linearize(ctxt, addr, size, false, &linear);
1388 if (rc != X86EMUL_CONTINUE)
1389 return rc;
1390 return read_emulated(ctxt, linear, data, size);
1391 }
1392
1393 static int segmented_write(struct x86_emulate_ctxt *ctxt,
1394 struct segmented_address addr,
1395 const void *data,
1396 unsigned size)
1397 {
1398 int rc;
1399 ulong linear;
1400
1401 rc = linearize(ctxt, addr, size, true, &linear);
1402 if (rc != X86EMUL_CONTINUE)
1403 return rc;
1404 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1405 &ctxt->exception);
1406 }
1407
1408 static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1409 struct segmented_address addr,
1410 const void *orig_data, const void *data,
1411 unsigned size)
1412 {
1413 int rc;
1414 ulong linear;
1415
1416 rc = linearize(ctxt, addr, size, true, &linear);
1417 if (rc != X86EMUL_CONTINUE)
1418 return rc;
1419 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1420 size, &ctxt->exception);
1421 }
1422
1423 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1424 unsigned int size, unsigned short port,
1425 void *dest)
1426 {
1427 struct read_cache *rc = &ctxt->io_read;
1428
1429 if (rc->pos == rc->end) {
1430 unsigned int in_page, n;
1431 unsigned int count = ctxt->rep_prefix ?
1432 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1433 in_page = (ctxt->eflags & X86_EFLAGS_DF) ?
1434 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1435 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1436 n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
1437 if (n == 0)
1438 n = 1;
1439 rc->pos = rc->end = 0;
1440 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1441 return 0;
1442 rc->end = n * size;
1443 }
1444
1445 if (ctxt->rep_prefix && (ctxt->d & String) &&
1446 !(ctxt->eflags & X86_EFLAGS_DF)) {
1447 ctxt->dst.data = rc->data + rc->pos;
1448 ctxt->dst.type = OP_MEM_STR;
1449 ctxt->dst.count = (rc->end - rc->pos) / size;
1450 rc->pos = rc->end;
1451 } else {
1452 memcpy(dest, rc->data + rc->pos, size);
1453 rc->pos += size;
1454 }
1455 return 1;
1456 }
1457
1458 static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1459 u16 index, struct desc_struct *desc)
1460 {
1461 struct desc_ptr dt;
1462 ulong addr;
1463
1464 ctxt->ops->get_idt(ctxt, &dt);
1465
1466 if (dt.size < index * 8 + 7)
1467 return emulate_gp(ctxt, index << 3 | 0x2);
1468
1469 addr = dt.address + index * 8;
1470 return linear_read_system(ctxt, addr, desc, sizeof(*desc));
1471 }
1472
1473 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1474 u16 selector, struct desc_ptr *dt)
1475 {
1476 const struct x86_emulate_ops *ops = ctxt->ops;
1477 u32 base3 = 0;
1478
1479 if (selector & 1 << 2) {
1480 struct desc_struct desc;
1481 u16 sel;
1482
1483 memset(dt, 0, sizeof(*dt));
1484 if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1485 VCPU_SREG_LDTR))
1486 return;
1487
1488 dt->size = desc_limit_scaled(&desc);
1489 dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1490 } else
1491 ops->get_gdt(ctxt, dt);
1492 }
1493
1494 static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt,
1495 u16 selector, ulong *desc_addr_p)
1496 {
1497 struct desc_ptr dt;
1498 u16 index = selector >> 3;
1499 ulong addr;
1500
1501 get_descriptor_table_ptr(ctxt, selector, &dt);
1502
1503 if (dt.size < index * 8 + 7)
1504 return emulate_gp(ctxt, selector & 0xfffc);
1505
1506 addr = dt.address + index * 8;
1507
1508 #ifdef CONFIG_X86_64
1509 if (addr >> 32 != 0) {
1510 u64 efer = 0;
1511
1512 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1513 if (!(efer & EFER_LMA))
1514 addr &= (u32)-1;
1515 }
1516 #endif
1517
1518 *desc_addr_p = addr;
1519 return X86EMUL_CONTINUE;
1520 }
1521
1522
1523 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1524 u16 selector, struct desc_struct *desc,
1525 ulong *desc_addr_p)
1526 {
1527 int rc;
1528
1529 rc = get_descriptor_ptr(ctxt, selector, desc_addr_p);
1530 if (rc != X86EMUL_CONTINUE)
1531 return rc;
1532
1533 return linear_read_system(ctxt, *desc_addr_p, desc, sizeof(*desc));
1534 }
1535
1536
1537 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1538 u16 selector, struct desc_struct *desc)
1539 {
1540 int rc;
1541 ulong addr;
1542
1543 rc = get_descriptor_ptr(ctxt, selector, &addr);
1544 if (rc != X86EMUL_CONTINUE)
1545 return rc;
1546
1547 return linear_write_system(ctxt, addr, desc, sizeof(*desc));
1548 }
1549
1550 static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1551 u16 selector, int seg, u8 cpl,
1552 enum x86_transfer_type transfer,
1553 struct desc_struct *desc)
1554 {
1555 struct desc_struct seg_desc, old_desc;
1556 u8 dpl, rpl;
1557 unsigned err_vec = GP_VECTOR;
1558 u32 err_code = 0;
1559 bool null_selector = !(selector & ~0x3);
1560 ulong desc_addr;
1561 int ret;
1562 u16 dummy;
1563 u32 base3 = 0;
1564
1565 memset(&seg_desc, 0, sizeof(seg_desc));
1566
1567 if (ctxt->mode == X86EMUL_MODE_REAL) {
1568
1569
1570 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1571 set_desc_base(&seg_desc, selector << 4);
1572 goto load;
1573 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1574
1575 set_desc_base(&seg_desc, selector << 4);
1576 set_desc_limit(&seg_desc, 0xffff);
1577 seg_desc.type = 3;
1578 seg_desc.p = 1;
1579 seg_desc.s = 1;
1580 seg_desc.dpl = 3;
1581 goto load;
1582 }
1583
1584 rpl = selector & 3;
1585
1586
1587 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1588 goto exception;
1589
1590
1591 if (null_selector) {
1592 if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR)
1593 goto exception;
1594
1595 if (seg == VCPU_SREG_SS) {
1596 if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)
1597 goto exception;
1598
1599
1600
1601
1602
1603 seg_desc.type = 3;
1604 seg_desc.p = 1;
1605 seg_desc.s = 1;
1606 seg_desc.dpl = cpl;
1607 seg_desc.d = 1;
1608 seg_desc.g = 1;
1609 }
1610
1611
1612 goto load;
1613 }
1614
1615 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1616 if (ret != X86EMUL_CONTINUE)
1617 return ret;
1618
1619 err_code = selector & 0xfffc;
1620 err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR :
1621 GP_VECTOR;
1622
1623
1624 if (seg <= VCPU_SREG_GS && !seg_desc.s) {
1625 if (transfer == X86_TRANSFER_CALL_JMP)
1626 return X86EMUL_UNHANDLEABLE;
1627 goto exception;
1628 }
1629
1630 dpl = seg_desc.dpl;
1631
1632 switch (seg) {
1633 case VCPU_SREG_SS:
1634
1635
1636
1637
1638 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1639 goto exception;
1640 break;
1641 case VCPU_SREG_CS:
1642 if (!(seg_desc.type & 8))
1643 goto exception;
1644
1645 if (transfer == X86_TRANSFER_RET) {
1646
1647 if (rpl < cpl)
1648 goto exception;
1649
1650 if (rpl > cpl)
1651 return X86EMUL_UNHANDLEABLE;
1652 }
1653 if (transfer == X86_TRANSFER_RET || transfer == X86_TRANSFER_TASK_SWITCH) {
1654 if (seg_desc.type & 4) {
1655
1656 if (dpl > rpl)
1657 goto exception;
1658 } else {
1659
1660 if (dpl != rpl)
1661 goto exception;
1662 }
1663 } else {
1664 if (seg_desc.type & 4) {
1665
1666 if (dpl > cpl)
1667 goto exception;
1668 } else {
1669
1670 if (rpl > cpl || dpl != cpl)
1671 goto exception;
1672 }
1673 }
1674
1675 if (seg_desc.d && seg_desc.l) {
1676 u64 efer = 0;
1677
1678 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1679 if (efer & EFER_LMA)
1680 goto exception;
1681 }
1682
1683
1684 selector = (selector & 0xfffc) | cpl;
1685 break;
1686 case VCPU_SREG_TR:
1687 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1688 goto exception;
1689 break;
1690 case VCPU_SREG_LDTR:
1691 if (seg_desc.s || seg_desc.type != 2)
1692 goto exception;
1693 break;
1694 default:
1695
1696
1697
1698
1699
1700 if ((seg_desc.type & 0xa) == 0x8 ||
1701 (((seg_desc.type & 0xc) != 0xc) &&
1702 (rpl > dpl && cpl > dpl)))
1703 goto exception;
1704 break;
1705 }
1706
1707 if (!seg_desc.p) {
1708 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1709 goto exception;
1710 }
1711
1712 if (seg_desc.s) {
1713
1714 if (!(seg_desc.type & 1)) {
1715 seg_desc.type |= 1;
1716 ret = write_segment_descriptor(ctxt, selector,
1717 &seg_desc);
1718 if (ret != X86EMUL_CONTINUE)
1719 return ret;
1720 }
1721 } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1722 ret = linear_read_system(ctxt, desc_addr+8, &base3, sizeof(base3));
1723 if (ret != X86EMUL_CONTINUE)
1724 return ret;
1725 if (emul_is_noncanonical_address(get_desc_base(&seg_desc) |
1726 ((u64)base3 << 32), ctxt))
1727 return emulate_gp(ctxt, err_code);
1728 }
1729
1730 if (seg == VCPU_SREG_TR) {
1731 old_desc = seg_desc;
1732 seg_desc.type |= 2;
1733 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1734 sizeof(seg_desc), &ctxt->exception);
1735 if (ret != X86EMUL_CONTINUE)
1736 return ret;
1737 }
1738 load:
1739 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1740 if (desc)
1741 *desc = seg_desc;
1742 return X86EMUL_CONTINUE;
1743 exception:
1744 return emulate_exception(ctxt, err_vec, err_code, true);
1745 }
1746
1747 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1748 u16 selector, int seg)
1749 {
1750 u8 cpl = ctxt->ops->cpl(ctxt);
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762 if (seg == VCPU_SREG_SS && selector == 3 &&
1763 ctxt->mode == X86EMUL_MODE_PROT64)
1764 return emulate_exception(ctxt, GP_VECTOR, 0, true);
1765
1766 return __load_segment_descriptor(ctxt, selector, seg, cpl,
1767 X86_TRANSFER_NONE, NULL);
1768 }
1769
1770 static void write_register_operand(struct operand *op)
1771 {
1772 return assign_register(op->addr.reg, op->val, op->bytes);
1773 }
1774
1775 static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1776 {
1777 switch (op->type) {
1778 case OP_REG:
1779 write_register_operand(op);
1780 break;
1781 case OP_MEM:
1782 if (ctxt->lock_prefix)
1783 return segmented_cmpxchg(ctxt,
1784 op->addr.mem,
1785 &op->orig_val,
1786 &op->val,
1787 op->bytes);
1788 else
1789 return segmented_write(ctxt,
1790 op->addr.mem,
1791 &op->val,
1792 op->bytes);
1793 break;
1794 case OP_MEM_STR:
1795 return segmented_write(ctxt,
1796 op->addr.mem,
1797 op->data,
1798 op->bytes * op->count);
1799 break;
1800 case OP_XMM:
1801 kvm_write_sse_reg(op->addr.xmm, &op->vec_val);
1802 break;
1803 case OP_MM:
1804 kvm_write_mmx_reg(op->addr.mm, &op->mm_val);
1805 break;
1806 case OP_NONE:
1807
1808 break;
1809 default:
1810 break;
1811 }
1812 return X86EMUL_CONTINUE;
1813 }
1814
1815 static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1816 {
1817 struct segmented_address addr;
1818
1819 rsp_increment(ctxt, -bytes);
1820 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1821 addr.seg = VCPU_SREG_SS;
1822
1823 return segmented_write(ctxt, addr, data, bytes);
1824 }
1825
1826 static int em_push(struct x86_emulate_ctxt *ctxt)
1827 {
1828
1829 ctxt->dst.type = OP_NONE;
1830 return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1831 }
1832
1833 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1834 void *dest, int len)
1835 {
1836 int rc;
1837 struct segmented_address addr;
1838
1839 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1840 addr.seg = VCPU_SREG_SS;
1841 rc = segmented_read(ctxt, addr, dest, len);
1842 if (rc != X86EMUL_CONTINUE)
1843 return rc;
1844
1845 rsp_increment(ctxt, len);
1846 return rc;
1847 }
1848
1849 static int em_pop(struct x86_emulate_ctxt *ctxt)
1850 {
1851 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1852 }
1853
1854 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1855 void *dest, int len)
1856 {
1857 int rc;
1858 unsigned long val, change_mask;
1859 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
1860 int cpl = ctxt->ops->cpl(ctxt);
1861
1862 rc = emulate_pop(ctxt, &val, len);
1863 if (rc != X86EMUL_CONTINUE)
1864 return rc;
1865
1866 change_mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
1867 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF |
1868 X86_EFLAGS_TF | X86_EFLAGS_DF | X86_EFLAGS_NT |
1869 X86_EFLAGS_AC | X86_EFLAGS_ID;
1870
1871 switch(ctxt->mode) {
1872 case X86EMUL_MODE_PROT64:
1873 case X86EMUL_MODE_PROT32:
1874 case X86EMUL_MODE_PROT16:
1875 if (cpl == 0)
1876 change_mask |= X86_EFLAGS_IOPL;
1877 if (cpl <= iopl)
1878 change_mask |= X86_EFLAGS_IF;
1879 break;
1880 case X86EMUL_MODE_VM86:
1881 if (iopl < 3)
1882 return emulate_gp(ctxt, 0);
1883 change_mask |= X86_EFLAGS_IF;
1884 break;
1885 default:
1886 change_mask |= (X86_EFLAGS_IOPL | X86_EFLAGS_IF);
1887 break;
1888 }
1889
1890 *(unsigned long *)dest =
1891 (ctxt->eflags & ~change_mask) | (val & change_mask);
1892
1893 return rc;
1894 }
1895
1896 static int em_popf(struct x86_emulate_ctxt *ctxt)
1897 {
1898 ctxt->dst.type = OP_REG;
1899 ctxt->dst.addr.reg = &ctxt->eflags;
1900 ctxt->dst.bytes = ctxt->op_bytes;
1901 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1902 }
1903
1904 static int em_enter(struct x86_emulate_ctxt *ctxt)
1905 {
1906 int rc;
1907 unsigned frame_size = ctxt->src.val;
1908 unsigned nesting_level = ctxt->src2.val & 31;
1909 ulong rbp;
1910
1911 if (nesting_level)
1912 return X86EMUL_UNHANDLEABLE;
1913
1914 rbp = reg_read(ctxt, VCPU_REGS_RBP);
1915 rc = push(ctxt, &rbp, stack_size(ctxt));
1916 if (rc != X86EMUL_CONTINUE)
1917 return rc;
1918 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
1919 stack_mask(ctxt));
1920 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1921 reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
1922 stack_mask(ctxt));
1923 return X86EMUL_CONTINUE;
1924 }
1925
1926 static int em_leave(struct x86_emulate_ctxt *ctxt)
1927 {
1928 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
1929 stack_mask(ctxt));
1930 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
1931 }
1932
1933 static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1934 {
1935 int seg = ctxt->src2.val;
1936
1937 ctxt->src.val = get_segment_selector(ctxt, seg);
1938 if (ctxt->op_bytes == 4) {
1939 rsp_increment(ctxt, -2);
1940 ctxt->op_bytes = 2;
1941 }
1942
1943 return em_push(ctxt);
1944 }
1945
1946 static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1947 {
1948 int seg = ctxt->src2.val;
1949 unsigned long selector;
1950 int rc;
1951
1952 rc = emulate_pop(ctxt, &selector, 2);
1953 if (rc != X86EMUL_CONTINUE)
1954 return rc;
1955
1956 if (ctxt->modrm_reg == VCPU_SREG_SS)
1957 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
1958 if (ctxt->op_bytes > 2)
1959 rsp_increment(ctxt, ctxt->op_bytes - 2);
1960
1961 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1962 return rc;
1963 }
1964
1965 static int em_pusha(struct x86_emulate_ctxt *ctxt)
1966 {
1967 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
1968 int rc = X86EMUL_CONTINUE;
1969 int reg = VCPU_REGS_RAX;
1970
1971 while (reg <= VCPU_REGS_RDI) {
1972 (reg == VCPU_REGS_RSP) ?
1973 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
1974
1975 rc = em_push(ctxt);
1976 if (rc != X86EMUL_CONTINUE)
1977 return rc;
1978
1979 ++reg;
1980 }
1981
1982 return rc;
1983 }
1984
1985 static int em_pushf(struct x86_emulate_ctxt *ctxt)
1986 {
1987 ctxt->src.val = (unsigned long)ctxt->eflags & ~X86_EFLAGS_VM;
1988 return em_push(ctxt);
1989 }
1990
1991 static int em_popa(struct x86_emulate_ctxt *ctxt)
1992 {
1993 int rc = X86EMUL_CONTINUE;
1994 int reg = VCPU_REGS_RDI;
1995 u32 val;
1996
1997 while (reg >= VCPU_REGS_RAX) {
1998 if (reg == VCPU_REGS_RSP) {
1999 rsp_increment(ctxt, ctxt->op_bytes);
2000 --reg;
2001 }
2002
2003 rc = emulate_pop(ctxt, &val, ctxt->op_bytes);
2004 if (rc != X86EMUL_CONTINUE)
2005 break;
2006 assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes);
2007 --reg;
2008 }
2009 return rc;
2010 }
2011
2012 static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2013 {
2014 const struct x86_emulate_ops *ops = ctxt->ops;
2015 int rc;
2016 struct desc_ptr dt;
2017 gva_t cs_addr;
2018 gva_t eip_addr;
2019 u16 cs, eip;
2020
2021
2022 ctxt->src.val = ctxt->eflags;
2023 rc = em_push(ctxt);
2024 if (rc != X86EMUL_CONTINUE)
2025 return rc;
2026
2027 ctxt->eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC);
2028
2029 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
2030 rc = em_push(ctxt);
2031 if (rc != X86EMUL_CONTINUE)
2032 return rc;
2033
2034 ctxt->src.val = ctxt->_eip;
2035 rc = em_push(ctxt);
2036 if (rc != X86EMUL_CONTINUE)
2037 return rc;
2038
2039 ops->get_idt(ctxt, &dt);
2040
2041 eip_addr = dt.address + (irq << 2);
2042 cs_addr = dt.address + (irq << 2) + 2;
2043
2044 rc = linear_read_system(ctxt, cs_addr, &cs, 2);
2045 if (rc != X86EMUL_CONTINUE)
2046 return rc;
2047
2048 rc = linear_read_system(ctxt, eip_addr, &eip, 2);
2049 if (rc != X86EMUL_CONTINUE)
2050 return rc;
2051
2052 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
2053 if (rc != X86EMUL_CONTINUE)
2054 return rc;
2055
2056 ctxt->_eip = eip;
2057
2058 return rc;
2059 }
2060
2061 int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2062 {
2063 int rc;
2064
2065 invalidate_registers(ctxt);
2066 rc = __emulate_int_real(ctxt, irq);
2067 if (rc == X86EMUL_CONTINUE)
2068 writeback_registers(ctxt);
2069 return rc;
2070 }
2071
2072 static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
2073 {
2074 switch(ctxt->mode) {
2075 case X86EMUL_MODE_REAL:
2076 return __emulate_int_real(ctxt, irq);
2077 case X86EMUL_MODE_VM86:
2078 case X86EMUL_MODE_PROT16:
2079 case X86EMUL_MODE_PROT32:
2080 case X86EMUL_MODE_PROT64:
2081 default:
2082
2083 return X86EMUL_UNHANDLEABLE;
2084 }
2085 }
2086
2087 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
2088 {
2089 int rc = X86EMUL_CONTINUE;
2090 unsigned long temp_eip = 0;
2091 unsigned long temp_eflags = 0;
2092 unsigned long cs = 0;
2093 unsigned long mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
2094 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_TF |
2095 X86_EFLAGS_IF | X86_EFLAGS_DF | X86_EFLAGS_OF |
2096 X86_EFLAGS_IOPL | X86_EFLAGS_NT | X86_EFLAGS_RF |
2097 X86_EFLAGS_AC | X86_EFLAGS_ID |
2098 X86_EFLAGS_FIXED;
2099 unsigned long vm86_mask = X86_EFLAGS_VM | X86_EFLAGS_VIF |
2100 X86_EFLAGS_VIP;
2101
2102
2103
2104 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
2105
2106 if (rc != X86EMUL_CONTINUE)
2107 return rc;
2108
2109 if (temp_eip & ~0xffff)
2110 return emulate_gp(ctxt, 0);
2111
2112 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2113
2114 if (rc != X86EMUL_CONTINUE)
2115 return rc;
2116
2117 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
2118
2119 if (rc != X86EMUL_CONTINUE)
2120 return rc;
2121
2122 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
2123
2124 if (rc != X86EMUL_CONTINUE)
2125 return rc;
2126
2127 ctxt->_eip = temp_eip;
2128
2129 if (ctxt->op_bytes == 4)
2130 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
2131 else if (ctxt->op_bytes == 2) {
2132 ctxt->eflags &= ~0xffff;
2133 ctxt->eflags |= temp_eflags;
2134 }
2135
2136 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK;
2137 ctxt->eflags |= X86_EFLAGS_FIXED;
2138 ctxt->ops->set_nmi_mask(ctxt, false);
2139
2140 return rc;
2141 }
2142
2143 static int em_iret(struct x86_emulate_ctxt *ctxt)
2144 {
2145 switch(ctxt->mode) {
2146 case X86EMUL_MODE_REAL:
2147 return emulate_iret_real(ctxt);
2148 case X86EMUL_MODE_VM86:
2149 case X86EMUL_MODE_PROT16:
2150 case X86EMUL_MODE_PROT32:
2151 case X86EMUL_MODE_PROT64:
2152 default:
2153
2154 return X86EMUL_UNHANDLEABLE;
2155 }
2156 }
2157
2158 static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2159 {
2160 int rc;
2161 unsigned short sel;
2162 struct desc_struct new_desc;
2163 u8 cpl = ctxt->ops->cpl(ctxt);
2164
2165 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2166
2167 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
2168 X86_TRANSFER_CALL_JMP,
2169 &new_desc);
2170 if (rc != X86EMUL_CONTINUE)
2171 return rc;
2172
2173 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
2174
2175 if (rc != X86EMUL_CONTINUE)
2176 return X86EMUL_UNHANDLEABLE;
2177
2178 return rc;
2179 }
2180
2181 static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
2182 {
2183 return assign_eip_near(ctxt, ctxt->src.val);
2184 }
2185
2186 static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
2187 {
2188 int rc;
2189 long int old_eip;
2190
2191 old_eip = ctxt->_eip;
2192 rc = assign_eip_near(ctxt, ctxt->src.val);
2193 if (rc != X86EMUL_CONTINUE)
2194 return rc;
2195 ctxt->src.val = old_eip;
2196 rc = em_push(ctxt);
2197 return rc;
2198 }
2199
2200 static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2201 {
2202 u64 old = ctxt->dst.orig_val64;
2203
2204 if (ctxt->dst.bytes == 16)
2205 return X86EMUL_UNHANDLEABLE;
2206
2207 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2208 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2209 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2210 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2211 ctxt->eflags &= ~X86_EFLAGS_ZF;
2212 } else {
2213 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2214 (u32) reg_read(ctxt, VCPU_REGS_RBX);
2215
2216 ctxt->eflags |= X86_EFLAGS_ZF;
2217 }
2218 return X86EMUL_CONTINUE;
2219 }
2220
2221 static int em_ret(struct x86_emulate_ctxt *ctxt)
2222 {
2223 int rc;
2224 unsigned long eip;
2225
2226 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2227 if (rc != X86EMUL_CONTINUE)
2228 return rc;
2229
2230 return assign_eip_near(ctxt, eip);
2231 }
2232
2233 static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2234 {
2235 int rc;
2236 unsigned long eip, cs;
2237 int cpl = ctxt->ops->cpl(ctxt);
2238 struct desc_struct new_desc;
2239
2240 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2241 if (rc != X86EMUL_CONTINUE)
2242 return rc;
2243 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2244 if (rc != X86EMUL_CONTINUE)
2245 return rc;
2246 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl,
2247 X86_TRANSFER_RET,
2248 &new_desc);
2249 if (rc != X86EMUL_CONTINUE)
2250 return rc;
2251 rc = assign_eip_far(ctxt, eip, &new_desc);
2252
2253 if (rc != X86EMUL_CONTINUE)
2254 return X86EMUL_UNHANDLEABLE;
2255
2256 return rc;
2257 }
2258
2259 static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2260 {
2261 int rc;
2262
2263 rc = em_ret_far(ctxt);
2264 if (rc != X86EMUL_CONTINUE)
2265 return rc;
2266 rsp_increment(ctxt, ctxt->src.val);
2267 return X86EMUL_CONTINUE;
2268 }
2269
2270 static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2271 {
2272
2273 ctxt->dst.orig_val = ctxt->dst.val;
2274 ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2275 ctxt->src.orig_val = ctxt->src.val;
2276 ctxt->src.val = ctxt->dst.orig_val;
2277 fastop(ctxt, em_cmp);
2278
2279 if (ctxt->eflags & X86_EFLAGS_ZF) {
2280
2281 ctxt->src.type = OP_NONE;
2282 ctxt->dst.val = ctxt->src.orig_val;
2283 } else {
2284
2285 ctxt->src.type = OP_REG;
2286 ctxt->src.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2287 ctxt->src.val = ctxt->dst.orig_val;
2288
2289 ctxt->dst.val = ctxt->dst.orig_val;
2290 }
2291 return X86EMUL_CONTINUE;
2292 }
2293
2294 static int em_lseg(struct x86_emulate_ctxt *ctxt)
2295 {
2296 int seg = ctxt->src2.val;
2297 unsigned short sel;
2298 int rc;
2299
2300 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2301
2302 rc = load_segment_descriptor(ctxt, sel, seg);
2303 if (rc != X86EMUL_CONTINUE)
2304 return rc;
2305
2306 ctxt->dst.val = ctxt->src.val;
2307 return rc;
2308 }
2309
2310 static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt)
2311 {
2312 #ifdef CONFIG_X86_64
2313 return ctxt->ops->guest_has_long_mode(ctxt);
2314 #else
2315 return false;
2316 #endif
2317 }
2318
2319 static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags)
2320 {
2321 desc->g = (flags >> 23) & 1;
2322 desc->d = (flags >> 22) & 1;
2323 desc->l = (flags >> 21) & 1;
2324 desc->avl = (flags >> 20) & 1;
2325 desc->p = (flags >> 15) & 1;
2326 desc->dpl = (flags >> 13) & 3;
2327 desc->s = (flags >> 12) & 1;
2328 desc->type = (flags >> 8) & 15;
2329 }
2330
2331 static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, const char *smstate,
2332 int n)
2333 {
2334 struct desc_struct desc;
2335 int offset;
2336 u16 selector;
2337
2338 selector = GET_SMSTATE(u32, smstate, 0x7fa8 + n * 4);
2339
2340 if (n < 3)
2341 offset = 0x7f84 + n * 12;
2342 else
2343 offset = 0x7f2c + (n - 3) * 12;
2344
2345 set_desc_base(&desc, GET_SMSTATE(u32, smstate, offset + 8));
2346 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, offset + 4));
2347 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, offset));
2348 ctxt->ops->set_segment(ctxt, selector, &desc, 0, n);
2349 return X86EMUL_CONTINUE;
2350 }
2351
2352 #ifdef CONFIG_X86_64
2353 static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, const char *smstate,
2354 int n)
2355 {
2356 struct desc_struct desc;
2357 int offset;
2358 u16 selector;
2359 u32 base3;
2360
2361 offset = 0x7e00 + n * 16;
2362
2363 selector = GET_SMSTATE(u16, smstate, offset);
2364 rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smstate, offset + 2) << 8);
2365 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, offset + 4));
2366 set_desc_base(&desc, GET_SMSTATE(u32, smstate, offset + 8));
2367 base3 = GET_SMSTATE(u32, smstate, offset + 12);
2368
2369 ctxt->ops->set_segment(ctxt, selector, &desc, base3, n);
2370 return X86EMUL_CONTINUE;
2371 }
2372 #endif
2373
2374 static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
2375 u64 cr0, u64 cr3, u64 cr4)
2376 {
2377 int bad;
2378 u64 pcid;
2379
2380
2381 pcid = 0;
2382 if (cr4 & X86_CR4_PCIDE) {
2383 pcid = cr3 & 0xfff;
2384 cr3 &= ~0xfff;
2385 }
2386
2387 bad = ctxt->ops->set_cr(ctxt, 3, cr3);
2388 if (bad)
2389 return X86EMUL_UNHANDLEABLE;
2390
2391
2392
2393
2394
2395
2396 bad = ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
2397 if (bad)
2398 return X86EMUL_UNHANDLEABLE;
2399
2400 bad = ctxt->ops->set_cr(ctxt, 0, cr0);
2401 if (bad)
2402 return X86EMUL_UNHANDLEABLE;
2403
2404 if (cr4 & X86_CR4_PCIDE) {
2405 bad = ctxt->ops->set_cr(ctxt, 4, cr4);
2406 if (bad)
2407 return X86EMUL_UNHANDLEABLE;
2408 if (pcid) {
2409 bad = ctxt->ops->set_cr(ctxt, 3, cr3 | pcid);
2410 if (bad)
2411 return X86EMUL_UNHANDLEABLE;
2412 }
2413
2414 }
2415
2416 return X86EMUL_CONTINUE;
2417 }
2418
2419 static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
2420 const char *smstate)
2421 {
2422 struct desc_struct desc;
2423 struct desc_ptr dt;
2424 u16 selector;
2425 u32 val, cr0, cr3, cr4;
2426 int i;
2427
2428 cr0 = GET_SMSTATE(u32, smstate, 0x7ffc);
2429 cr3 = GET_SMSTATE(u32, smstate, 0x7ff8);
2430 ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7ff4) | X86_EFLAGS_FIXED;
2431 ctxt->_eip = GET_SMSTATE(u32, smstate, 0x7ff0);
2432
2433 for (i = 0; i < NR_EMULATOR_GPRS; i++)
2434 *reg_write(ctxt, i) = GET_SMSTATE(u32, smstate, 0x7fd0 + i * 4);
2435
2436 val = GET_SMSTATE(u32, smstate, 0x7fcc);
2437
2438 if (ctxt->ops->set_dr(ctxt, 6, val))
2439 return X86EMUL_UNHANDLEABLE;
2440
2441 val = GET_SMSTATE(u32, smstate, 0x7fc8);
2442
2443 if (ctxt->ops->set_dr(ctxt, 7, val))
2444 return X86EMUL_UNHANDLEABLE;
2445
2446 selector = GET_SMSTATE(u32, smstate, 0x7fc4);
2447 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7f64));
2448 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7f60));
2449 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7f5c));
2450 ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_TR);
2451
2452 selector = GET_SMSTATE(u32, smstate, 0x7fc0);
2453 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7f80));
2454 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7f7c));
2455 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7f78));
2456 ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_LDTR);
2457
2458 dt.address = GET_SMSTATE(u32, smstate, 0x7f74);
2459 dt.size = GET_SMSTATE(u32, smstate, 0x7f70);
2460 ctxt->ops->set_gdt(ctxt, &dt);
2461
2462 dt.address = GET_SMSTATE(u32, smstate, 0x7f58);
2463 dt.size = GET_SMSTATE(u32, smstate, 0x7f54);
2464 ctxt->ops->set_idt(ctxt, &dt);
2465
2466 for (i = 0; i < 6; i++) {
2467 int r = rsm_load_seg_32(ctxt, smstate, i);
2468 if (r != X86EMUL_CONTINUE)
2469 return r;
2470 }
2471
2472 cr4 = GET_SMSTATE(u32, smstate, 0x7f14);
2473
2474 ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7ef8));
2475
2476 return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
2477 }
2478
2479 #ifdef CONFIG_X86_64
2480 static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
2481 const char *smstate)
2482 {
2483 struct desc_struct desc;
2484 struct desc_ptr dt;
2485 u64 val, cr0, cr3, cr4;
2486 u32 base3;
2487 u16 selector;
2488 int i, r;
2489
2490 for (i = 0; i < NR_EMULATOR_GPRS; i++)
2491 *reg_write(ctxt, i) = GET_SMSTATE(u64, smstate, 0x7ff8 - i * 8);
2492
2493 ctxt->_eip = GET_SMSTATE(u64, smstate, 0x7f78);
2494 ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7f70) | X86_EFLAGS_FIXED;
2495
2496 val = GET_SMSTATE(u64, smstate, 0x7f68);
2497
2498 if (ctxt->ops->set_dr(ctxt, 6, val))
2499 return X86EMUL_UNHANDLEABLE;
2500
2501 val = GET_SMSTATE(u64, smstate, 0x7f60);
2502
2503 if (ctxt->ops->set_dr(ctxt, 7, val))
2504 return X86EMUL_UNHANDLEABLE;
2505
2506 cr0 = GET_SMSTATE(u64, smstate, 0x7f58);
2507 cr3 = GET_SMSTATE(u64, smstate, 0x7f50);
2508 cr4 = GET_SMSTATE(u64, smstate, 0x7f48);
2509 ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7f00));
2510 val = GET_SMSTATE(u64, smstate, 0x7ed0);
2511
2512 if (ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA))
2513 return X86EMUL_UNHANDLEABLE;
2514
2515 selector = GET_SMSTATE(u32, smstate, 0x7e90);
2516 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7e92) << 8);
2517 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7e94));
2518 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7e98));
2519 base3 = GET_SMSTATE(u32, smstate, 0x7e9c);
2520 ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_TR);
2521
2522 dt.size = GET_SMSTATE(u32, smstate, 0x7e84);
2523 dt.address = GET_SMSTATE(u64, smstate, 0x7e88);
2524 ctxt->ops->set_idt(ctxt, &dt);
2525
2526 selector = GET_SMSTATE(u32, smstate, 0x7e70);
2527 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7e72) << 8);
2528 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7e74));
2529 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7e78));
2530 base3 = GET_SMSTATE(u32, smstate, 0x7e7c);
2531 ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_LDTR);
2532
2533 dt.size = GET_SMSTATE(u32, smstate, 0x7e64);
2534 dt.address = GET_SMSTATE(u64, smstate, 0x7e68);
2535 ctxt->ops->set_gdt(ctxt, &dt);
2536
2537 r = rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
2538 if (r != X86EMUL_CONTINUE)
2539 return r;
2540
2541 for (i = 0; i < 6; i++) {
2542 r = rsm_load_seg_64(ctxt, smstate, i);
2543 if (r != X86EMUL_CONTINUE)
2544 return r;
2545 }
2546
2547 return X86EMUL_CONTINUE;
2548 }
2549 #endif
2550
2551 static int em_rsm(struct x86_emulate_ctxt *ctxt)
2552 {
2553 unsigned long cr0, cr4, efer;
2554 char buf[512];
2555 u64 smbase;
2556 int ret;
2557
2558 if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0)
2559 return emulate_ud(ctxt);
2560
2561 smbase = ctxt->ops->get_smbase(ctxt);
2562
2563 ret = ctxt->ops->read_phys(ctxt, smbase + 0xfe00, buf, sizeof(buf));
2564 if (ret != X86EMUL_CONTINUE)
2565 return X86EMUL_UNHANDLEABLE;
2566
2567 if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
2568 ctxt->ops->set_nmi_mask(ctxt, false);
2569
2570 ctxt->ops->exiting_smm(ctxt);
2571
2572
2573
2574
2575
2576
2577 if (emulator_has_longmode(ctxt)) {
2578 struct desc_struct cs_desc;
2579
2580
2581 cr4 = ctxt->ops->get_cr(ctxt, 4);
2582 if (cr4 & X86_CR4_PCIDE)
2583 ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
2584
2585
2586 memset(&cs_desc, 0, sizeof(cs_desc));
2587 cs_desc.type = 0xb;
2588 cs_desc.s = cs_desc.g = cs_desc.p = 1;
2589 ctxt->ops->set_segment(ctxt, 0, &cs_desc, 0, VCPU_SREG_CS);
2590 }
2591
2592
2593 cr0 = ctxt->ops->get_cr(ctxt, 0);
2594 if (cr0 & X86_CR0_PE)
2595 ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
2596
2597 if (emulator_has_longmode(ctxt)) {
2598
2599 cr4 = ctxt->ops->get_cr(ctxt, 4);
2600 if (cr4 & X86_CR4_PAE)
2601 ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
2602
2603
2604 efer = 0;
2605 ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
2606 }
2607
2608
2609
2610
2611
2612
2613 if (ctxt->ops->leave_smm(ctxt, buf))
2614 goto emulate_shutdown;
2615
2616 #ifdef CONFIG_X86_64
2617 if (emulator_has_longmode(ctxt))
2618 ret = rsm_load_state_64(ctxt, buf);
2619 else
2620 #endif
2621 ret = rsm_load_state_32(ctxt, buf);
2622
2623 if (ret != X86EMUL_CONTINUE)
2624 goto emulate_shutdown;
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634 return X86EMUL_CONTINUE;
2635
2636 emulate_shutdown:
2637 ctxt->ops->triple_fault(ctxt);
2638 return X86EMUL_CONTINUE;
2639 }
2640
2641 static void
2642 setup_syscalls_segments(struct desc_struct *cs, struct desc_struct *ss)
2643 {
2644 cs->l = 0;
2645 set_desc_base(cs, 0);
2646 cs->g = 1;
2647 set_desc_limit(cs, 0xfffff);
2648 cs->type = 0x0b;
2649 cs->s = 1;
2650 cs->dpl = 0;
2651 cs->p = 1;
2652 cs->d = 1;
2653 cs->avl = 0;
2654
2655 set_desc_base(ss, 0);
2656 set_desc_limit(ss, 0xfffff);
2657 ss->g = 1;
2658 ss->s = 1;
2659 ss->type = 0x03;
2660 ss->d = 1;
2661 ss->dpl = 0;
2662 ss->p = 1;
2663 ss->l = 0;
2664 ss->avl = 0;
2665 }
2666
2667 static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2668 {
2669 u32 eax, ebx, ecx, edx;
2670
2671 eax = ecx = 0;
2672 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, true);
2673 return is_guest_vendor_intel(ebx, ecx, edx);
2674 }
2675
2676 static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2677 {
2678 const struct x86_emulate_ops *ops = ctxt->ops;
2679 u32 eax, ebx, ecx, edx;
2680
2681
2682
2683
2684
2685 if (ctxt->mode == X86EMUL_MODE_PROT64)
2686 return true;
2687
2688 eax = 0x00000000;
2689 ecx = 0x00000000;
2690 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, true);
2691
2692
2693
2694
2695
2696
2697 if (is_guest_vendor_intel(ebx, ecx, edx))
2698 return false;
2699
2700 if (is_guest_vendor_amd(ebx, ecx, edx) ||
2701 is_guest_vendor_hygon(ebx, ecx, edx))
2702 return true;
2703
2704
2705
2706
2707
2708 return false;
2709 }
2710
2711 static int em_syscall(struct x86_emulate_ctxt *ctxt)
2712 {
2713 const struct x86_emulate_ops *ops = ctxt->ops;
2714 struct desc_struct cs, ss;
2715 u64 msr_data;
2716 u16 cs_sel, ss_sel;
2717 u64 efer = 0;
2718
2719
2720 if (ctxt->mode == X86EMUL_MODE_REAL ||
2721 ctxt->mode == X86EMUL_MODE_VM86)
2722 return emulate_ud(ctxt);
2723
2724 if (!(em_syscall_is_enabled(ctxt)))
2725 return emulate_ud(ctxt);
2726
2727 ops->get_msr(ctxt, MSR_EFER, &efer);
2728 if (!(efer & EFER_SCE))
2729 return emulate_ud(ctxt);
2730
2731 setup_syscalls_segments(&cs, &ss);
2732 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2733 msr_data >>= 32;
2734 cs_sel = (u16)(msr_data & 0xfffc);
2735 ss_sel = (u16)(msr_data + 8);
2736
2737 if (efer & EFER_LMA) {
2738 cs.d = 0;
2739 cs.l = 1;
2740 }
2741 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2742 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2743
2744 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2745 if (efer & EFER_LMA) {
2746 #ifdef CONFIG_X86_64
2747 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2748
2749 ops->get_msr(ctxt,
2750 ctxt->mode == X86EMUL_MODE_PROT64 ?
2751 MSR_LSTAR : MSR_CSTAR, &msr_data);
2752 ctxt->_eip = msr_data;
2753
2754 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2755 ctxt->eflags &= ~msr_data;
2756 ctxt->eflags |= X86_EFLAGS_FIXED;
2757 #endif
2758 } else {
2759
2760 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2761 ctxt->_eip = (u32)msr_data;
2762
2763 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2764 }
2765
2766 ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
2767 return X86EMUL_CONTINUE;
2768 }
2769
2770 static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2771 {
2772 const struct x86_emulate_ops *ops = ctxt->ops;
2773 struct desc_struct cs, ss;
2774 u64 msr_data;
2775 u16 cs_sel, ss_sel;
2776 u64 efer = 0;
2777
2778 ops->get_msr(ctxt, MSR_EFER, &efer);
2779
2780 if (ctxt->mode == X86EMUL_MODE_REAL)
2781 return emulate_gp(ctxt, 0);
2782
2783
2784
2785
2786
2787 if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
2788 && !vendor_intel(ctxt))
2789 return emulate_ud(ctxt);
2790
2791
2792 if (ctxt->mode == X86EMUL_MODE_PROT64)
2793 return X86EMUL_UNHANDLEABLE;
2794
2795 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2796 if ((msr_data & 0xfffc) == 0x0)
2797 return emulate_gp(ctxt, 0);
2798
2799 setup_syscalls_segments(&cs, &ss);
2800 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2801 cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK;
2802 ss_sel = cs_sel + 8;
2803 if (efer & EFER_LMA) {
2804 cs.d = 0;
2805 cs.l = 1;
2806 }
2807
2808 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2809 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2810
2811 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2812 ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
2813
2814 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2815 *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
2816 (u32)msr_data;
2817 if (efer & EFER_LMA)
2818 ctxt->mode = X86EMUL_MODE_PROT64;
2819
2820 return X86EMUL_CONTINUE;
2821 }
2822
2823 static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2824 {
2825 const struct x86_emulate_ops *ops = ctxt->ops;
2826 struct desc_struct cs, ss;
2827 u64 msr_data, rcx, rdx;
2828 int usermode;
2829 u16 cs_sel = 0, ss_sel = 0;
2830
2831
2832 if (ctxt->mode == X86EMUL_MODE_REAL ||
2833 ctxt->mode == X86EMUL_MODE_VM86)
2834 return emulate_gp(ctxt, 0);
2835
2836 setup_syscalls_segments(&cs, &ss);
2837
2838 if ((ctxt->rex_prefix & 0x8) != 0x0)
2839 usermode = X86EMUL_MODE_PROT64;
2840 else
2841 usermode = X86EMUL_MODE_PROT32;
2842
2843 rcx = reg_read(ctxt, VCPU_REGS_RCX);
2844 rdx = reg_read(ctxt, VCPU_REGS_RDX);
2845
2846 cs.dpl = 3;
2847 ss.dpl = 3;
2848 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2849 switch (usermode) {
2850 case X86EMUL_MODE_PROT32:
2851 cs_sel = (u16)(msr_data + 16);
2852 if ((msr_data & 0xfffc) == 0x0)
2853 return emulate_gp(ctxt, 0);
2854 ss_sel = (u16)(msr_data + 24);
2855 rcx = (u32)rcx;
2856 rdx = (u32)rdx;
2857 break;
2858 case X86EMUL_MODE_PROT64:
2859 cs_sel = (u16)(msr_data + 32);
2860 if (msr_data == 0x0)
2861 return emulate_gp(ctxt, 0);
2862 ss_sel = cs_sel + 8;
2863 cs.d = 0;
2864 cs.l = 1;
2865 if (emul_is_noncanonical_address(rcx, ctxt) ||
2866 emul_is_noncanonical_address(rdx, ctxt))
2867 return emulate_gp(ctxt, 0);
2868 break;
2869 }
2870 cs_sel |= SEGMENT_RPL_MASK;
2871 ss_sel |= SEGMENT_RPL_MASK;
2872
2873 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2874 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2875
2876 ctxt->_eip = rdx;
2877 *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2878
2879 return X86EMUL_CONTINUE;
2880 }
2881
2882 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2883 {
2884 int iopl;
2885 if (ctxt->mode == X86EMUL_MODE_REAL)
2886 return false;
2887 if (ctxt->mode == X86EMUL_MODE_VM86)
2888 return true;
2889 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
2890 return ctxt->ops->cpl(ctxt) > iopl;
2891 }
2892
2893 #define VMWARE_PORT_VMPORT (0x5658)
2894 #define VMWARE_PORT_VMRPC (0x5659)
2895
2896 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2897 u16 port, u16 len)
2898 {
2899 const struct x86_emulate_ops *ops = ctxt->ops;
2900 struct desc_struct tr_seg;
2901 u32 base3;
2902 int r;
2903 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2904 unsigned mask = (1 << len) - 1;
2905 unsigned long base;
2906
2907
2908
2909
2910
2911 if (enable_vmware_backdoor &&
2912 ((port == VMWARE_PORT_VMPORT) || (port == VMWARE_PORT_VMRPC)))
2913 return true;
2914
2915 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2916 if (!tr_seg.p)
2917 return false;
2918 if (desc_limit_scaled(&tr_seg) < 103)
2919 return false;
2920 base = get_desc_base(&tr_seg);
2921 #ifdef CONFIG_X86_64
2922 base |= ((u64)base3) << 32;
2923 #endif
2924 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL, true);
2925 if (r != X86EMUL_CONTINUE)
2926 return false;
2927 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2928 return false;
2929 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL, true);
2930 if (r != X86EMUL_CONTINUE)
2931 return false;
2932 if ((perm >> bit_idx) & mask)
2933 return false;
2934 return true;
2935 }
2936
2937 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2938 u16 port, u16 len)
2939 {
2940 if (ctxt->perm_ok)
2941 return true;
2942
2943 if (emulator_bad_iopl(ctxt))
2944 if (!emulator_io_port_access_allowed(ctxt, port, len))
2945 return false;
2946
2947 ctxt->perm_ok = true;
2948
2949 return true;
2950 }
2951
2952 static void string_registers_quirk(struct x86_emulate_ctxt *ctxt)
2953 {
2954
2955
2956
2957
2958 #ifdef CONFIG_X86_64
2959 if (ctxt->ad_bytes != 4 || !vendor_intel(ctxt))
2960 return;
2961
2962 *reg_write(ctxt, VCPU_REGS_RCX) = 0;
2963
2964 switch (ctxt->b) {
2965 case 0xa4:
2966 case 0xa5:
2967 *reg_rmw(ctxt, VCPU_REGS_RSI) &= (u32)-1;
2968 fallthrough;
2969 case 0xaa:
2970 case 0xab:
2971 *reg_rmw(ctxt, VCPU_REGS_RDI) &= (u32)-1;
2972 }
2973 #endif
2974 }
2975
2976 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2977 struct tss_segment_16 *tss)
2978 {
2979 tss->ip = ctxt->_eip;
2980 tss->flag = ctxt->eflags;
2981 tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
2982 tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
2983 tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
2984 tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
2985 tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
2986 tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
2987 tss->si = reg_read(ctxt, VCPU_REGS_RSI);
2988 tss->di = reg_read(ctxt, VCPU_REGS_RDI);
2989
2990 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2991 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2992 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2993 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2994 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2995 }
2996
2997 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2998 struct tss_segment_16 *tss)
2999 {
3000 int ret;
3001 u8 cpl;
3002
3003 ctxt->_eip = tss->ip;
3004 ctxt->eflags = tss->flag | 2;
3005 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
3006 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
3007 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
3008 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
3009 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
3010 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
3011 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
3012 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
3013
3014
3015
3016
3017
3018 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
3019 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
3020 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
3021 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
3022 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
3023
3024 cpl = tss->cs & 3;
3025
3026
3027
3028
3029
3030 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
3031 X86_TRANSFER_TASK_SWITCH, NULL);
3032 if (ret != X86EMUL_CONTINUE)
3033 return ret;
3034 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3035 X86_TRANSFER_TASK_SWITCH, NULL);
3036 if (ret != X86EMUL_CONTINUE)
3037 return ret;
3038 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3039 X86_TRANSFER_TASK_SWITCH, NULL);
3040 if (ret != X86EMUL_CONTINUE)
3041 return ret;
3042 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3043 X86_TRANSFER_TASK_SWITCH, NULL);
3044 if (ret != X86EMUL_CONTINUE)
3045 return ret;
3046 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3047 X86_TRANSFER_TASK_SWITCH, NULL);
3048 if (ret != X86EMUL_CONTINUE)
3049 return ret;
3050
3051 return X86EMUL_CONTINUE;
3052 }
3053
3054 static int task_switch_16(struct x86_emulate_ctxt *ctxt, u16 old_tss_sel,
3055 ulong old_tss_base, struct desc_struct *new_desc)
3056 {
3057 struct tss_segment_16 tss_seg;
3058 int ret;
3059 u32 new_tss_base = get_desc_base(new_desc);
3060
3061 ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
3062 if (ret != X86EMUL_CONTINUE)
3063 return ret;
3064
3065 save_state_to_tss16(ctxt, &tss_seg);
3066
3067 ret = linear_write_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
3068 if (ret != X86EMUL_CONTINUE)
3069 return ret;
3070
3071 ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg));
3072 if (ret != X86EMUL_CONTINUE)
3073 return ret;
3074
3075 if (old_tss_sel != 0xffff) {
3076 tss_seg.prev_task_link = old_tss_sel;
3077
3078 ret = linear_write_system(ctxt, new_tss_base,
3079 &tss_seg.prev_task_link,
3080 sizeof(tss_seg.prev_task_link));
3081 if (ret != X86EMUL_CONTINUE)
3082 return ret;
3083 }
3084
3085 return load_state_from_tss16(ctxt, &tss_seg);
3086 }
3087
3088 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
3089 struct tss_segment_32 *tss)
3090 {
3091
3092 tss->eip = ctxt->_eip;
3093 tss->eflags = ctxt->eflags;
3094 tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
3095 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
3096 tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
3097 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
3098 tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
3099 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
3100 tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
3101 tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
3102
3103 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
3104 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
3105 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
3106 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
3107 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
3108 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
3109 }
3110
3111 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
3112 struct tss_segment_32 *tss)
3113 {
3114 int ret;
3115 u8 cpl;
3116
3117 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
3118 return emulate_gp(ctxt, 0);
3119 ctxt->_eip = tss->eip;
3120 ctxt->eflags = tss->eflags | 2;
3121
3122
3123 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
3124 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
3125 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
3126 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
3127 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
3128 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
3129 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
3130 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
3131
3132
3133
3134
3135
3136
3137 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
3138 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
3139 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
3140 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
3141 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
3142 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
3143 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
3144
3145
3146
3147
3148
3149
3150 if (ctxt->eflags & X86_EFLAGS_VM) {
3151 ctxt->mode = X86EMUL_MODE_VM86;
3152 cpl = 3;
3153 } else {
3154 ctxt->mode = X86EMUL_MODE_PROT32;
3155 cpl = tss->cs & 3;
3156 }
3157
3158
3159
3160
3161
3162 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
3163 cpl, X86_TRANSFER_TASK_SWITCH, NULL);
3164 if (ret != X86EMUL_CONTINUE)
3165 return ret;
3166 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3167 X86_TRANSFER_TASK_SWITCH, NULL);
3168 if (ret != X86EMUL_CONTINUE)
3169 return ret;
3170 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3171 X86_TRANSFER_TASK_SWITCH, NULL);
3172 if (ret != X86EMUL_CONTINUE)
3173 return ret;
3174 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3175 X86_TRANSFER_TASK_SWITCH, NULL);
3176 if (ret != X86EMUL_CONTINUE)
3177 return ret;
3178 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3179 X86_TRANSFER_TASK_SWITCH, NULL);
3180 if (ret != X86EMUL_CONTINUE)
3181 return ret;
3182 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
3183 X86_TRANSFER_TASK_SWITCH, NULL);
3184 if (ret != X86EMUL_CONTINUE)
3185 return ret;
3186 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
3187 X86_TRANSFER_TASK_SWITCH, NULL);
3188
3189 return ret;
3190 }
3191
3192 static int task_switch_32(struct x86_emulate_ctxt *ctxt, u16 old_tss_sel,
3193 ulong old_tss_base, struct desc_struct *new_desc)
3194 {
3195 struct tss_segment_32 tss_seg;
3196 int ret;
3197 u32 new_tss_base = get_desc_base(new_desc);
3198 u32 eip_offset = offsetof(struct tss_segment_32, eip);
3199 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
3200
3201 ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
3202 if (ret != X86EMUL_CONTINUE)
3203 return ret;
3204
3205 save_state_to_tss32(ctxt, &tss_seg);
3206
3207
3208 ret = linear_write_system(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
3209 ldt_sel_offset - eip_offset);
3210 if (ret != X86EMUL_CONTINUE)
3211 return ret;
3212
3213 ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg));
3214 if (ret != X86EMUL_CONTINUE)
3215 return ret;
3216
3217 if (old_tss_sel != 0xffff) {
3218 tss_seg.prev_task_link = old_tss_sel;
3219
3220 ret = linear_write_system(ctxt, new_tss_base,
3221 &tss_seg.prev_task_link,
3222 sizeof(tss_seg.prev_task_link));
3223 if (ret != X86EMUL_CONTINUE)
3224 return ret;
3225 }
3226
3227 return load_state_from_tss32(ctxt, &tss_seg);
3228 }
3229
3230 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
3231 u16 tss_selector, int idt_index, int reason,
3232 bool has_error_code, u32 error_code)
3233 {
3234 const struct x86_emulate_ops *ops = ctxt->ops;
3235 struct desc_struct curr_tss_desc, next_tss_desc;
3236 int ret;
3237 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
3238 ulong old_tss_base =
3239 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
3240 u32 desc_limit;
3241 ulong desc_addr, dr7;
3242
3243
3244
3245 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
3246 if (ret != X86EMUL_CONTINUE)
3247 return ret;
3248 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
3249 if (ret != X86EMUL_CONTINUE)
3250 return ret;
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262 if (reason == TASK_SWITCH_GATE) {
3263 if (idt_index != -1) {
3264
3265 struct desc_struct task_gate_desc;
3266 int dpl;
3267
3268 ret = read_interrupt_descriptor(ctxt, idt_index,
3269 &task_gate_desc);
3270 if (ret != X86EMUL_CONTINUE)
3271 return ret;
3272
3273 dpl = task_gate_desc.dpl;
3274 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
3275 return emulate_gp(ctxt, (idt_index << 3) | 0x2);
3276 }
3277 }
3278
3279 desc_limit = desc_limit_scaled(&next_tss_desc);
3280 if (!next_tss_desc.p ||
3281 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
3282 desc_limit < 0x2b)) {
3283 return emulate_ts(ctxt, tss_selector & 0xfffc);
3284 }
3285
3286 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
3287 curr_tss_desc.type &= ~(1 << 1);
3288 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
3289 }
3290
3291 if (reason == TASK_SWITCH_IRET)
3292 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
3293
3294
3295
3296 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
3297 old_tss_sel = 0xffff;
3298
3299 if (next_tss_desc.type & 8)
3300 ret = task_switch_32(ctxt, old_tss_sel, old_tss_base, &next_tss_desc);
3301 else
3302 ret = task_switch_16(ctxt, old_tss_sel,
3303 old_tss_base, &next_tss_desc);
3304 if (ret != X86EMUL_CONTINUE)
3305 return ret;
3306
3307 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
3308 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
3309
3310 if (reason != TASK_SWITCH_IRET) {
3311 next_tss_desc.type |= (1 << 1);
3312 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
3313 }
3314
3315 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
3316 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
3317
3318 if (has_error_code) {
3319 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
3320 ctxt->lock_prefix = 0;
3321 ctxt->src.val = (unsigned long) error_code;
3322 ret = em_push(ctxt);
3323 }
3324
3325 ops->get_dr(ctxt, 7, &dr7);
3326 ops->set_dr(ctxt, 7, dr7 & ~(DR_LOCAL_ENABLE_MASK | DR_LOCAL_SLOWDOWN));
3327
3328 return ret;
3329 }
3330
3331 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
3332 u16 tss_selector, int idt_index, int reason,
3333 bool has_error_code, u32 error_code)
3334 {
3335 int rc;
3336
3337 invalidate_registers(ctxt);
3338 ctxt->_eip = ctxt->eip;
3339 ctxt->dst.type = OP_NONE;
3340
3341 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
3342 has_error_code, error_code);
3343
3344 if (rc == X86EMUL_CONTINUE) {
3345 ctxt->eip = ctxt->_eip;
3346 writeback_registers(ctxt);
3347 }
3348
3349 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
3350 }
3351
3352 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
3353 struct operand *op)
3354 {
3355 int df = (ctxt->eflags & X86_EFLAGS_DF) ? -op->count : op->count;
3356
3357 register_address_increment(ctxt, reg, df * op->bytes);
3358 op->addr.mem.ea = register_address(ctxt, reg);
3359 }
3360
3361 static int em_das(struct x86_emulate_ctxt *ctxt)
3362 {
3363 u8 al, old_al;
3364 bool af, cf, old_cf;
3365
3366 cf = ctxt->eflags & X86_EFLAGS_CF;
3367 al = ctxt->dst.val;
3368
3369 old_al = al;
3370 old_cf = cf;
3371 cf = false;
3372 af = ctxt->eflags & X86_EFLAGS_AF;
3373 if ((al & 0x0f) > 9 || af) {
3374 al -= 6;
3375 cf = old_cf | (al >= 250);
3376 af = true;
3377 } else {
3378 af = false;
3379 }
3380 if (old_al > 0x99 || old_cf) {
3381 al -= 0x60;
3382 cf = true;
3383 }
3384
3385 ctxt->dst.val = al;
3386
3387 ctxt->src.type = OP_IMM;
3388 ctxt->src.val = 0;
3389 ctxt->src.bytes = 1;
3390 fastop(ctxt, em_or);
3391 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
3392 if (cf)
3393 ctxt->eflags |= X86_EFLAGS_CF;
3394 if (af)
3395 ctxt->eflags |= X86_EFLAGS_AF;
3396 return X86EMUL_CONTINUE;
3397 }
3398
3399 static int em_aam(struct x86_emulate_ctxt *ctxt)
3400 {
3401 u8 al, ah;
3402
3403 if (ctxt->src.val == 0)
3404 return emulate_de(ctxt);
3405
3406 al = ctxt->dst.val & 0xff;
3407 ah = al / ctxt->src.val;
3408 al %= ctxt->src.val;
3409
3410 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
3411
3412
3413 ctxt->src.type = OP_IMM;
3414 ctxt->src.val = 0;
3415 ctxt->src.bytes = 1;
3416 fastop(ctxt, em_or);
3417
3418 return X86EMUL_CONTINUE;
3419 }
3420
3421 static int em_aad(struct x86_emulate_ctxt *ctxt)
3422 {
3423 u8 al = ctxt->dst.val & 0xff;
3424 u8 ah = (ctxt->dst.val >> 8) & 0xff;
3425
3426 al = (al + (ah * ctxt->src.val)) & 0xff;
3427
3428 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
3429
3430
3431 ctxt->src.type = OP_IMM;
3432 ctxt->src.val = 0;
3433 ctxt->src.bytes = 1;
3434 fastop(ctxt, em_or);
3435
3436 return X86EMUL_CONTINUE;
3437 }
3438
3439 static int em_call(struct x86_emulate_ctxt *ctxt)
3440 {
3441 int rc;
3442 long rel = ctxt->src.val;
3443
3444 ctxt->src.val = (unsigned long)ctxt->_eip;
3445 rc = jmp_rel(ctxt, rel);
3446 if (rc != X86EMUL_CONTINUE)
3447 return rc;
3448 return em_push(ctxt);
3449 }
3450
3451 static int em_call_far(struct x86_emulate_ctxt *ctxt)
3452 {
3453 u16 sel, old_cs;
3454 ulong old_eip;
3455 int rc;
3456 struct desc_struct old_desc, new_desc;
3457 const struct x86_emulate_ops *ops = ctxt->ops;
3458 int cpl = ctxt->ops->cpl(ctxt);
3459 enum x86emul_mode prev_mode = ctxt->mode;
3460
3461 old_eip = ctxt->_eip;
3462 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
3463
3464 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
3465 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
3466 X86_TRANSFER_CALL_JMP, &new_desc);
3467 if (rc != X86EMUL_CONTINUE)
3468 return rc;
3469
3470 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
3471 if (rc != X86EMUL_CONTINUE)
3472 goto fail;
3473
3474 ctxt->src.val = old_cs;
3475 rc = em_push(ctxt);
3476 if (rc != X86EMUL_CONTINUE)
3477 goto fail;
3478
3479 ctxt->src.val = old_eip;
3480 rc = em_push(ctxt);
3481
3482
3483 if (rc != X86EMUL_CONTINUE) {
3484 pr_warn_once("faulting far call emulation tainted memory\n");
3485 goto fail;
3486 }
3487 return rc;
3488 fail:
3489 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3490 ctxt->mode = prev_mode;
3491 return rc;
3492
3493 }
3494
3495 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3496 {
3497 int rc;
3498 unsigned long eip;
3499
3500 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3501 if (rc != X86EMUL_CONTINUE)
3502 return rc;
3503 rc = assign_eip_near(ctxt, eip);
3504 if (rc != X86EMUL_CONTINUE)
3505 return rc;
3506 rsp_increment(ctxt, ctxt->src.val);
3507 return X86EMUL_CONTINUE;
3508 }
3509
3510 static int em_xchg(struct x86_emulate_ctxt *ctxt)
3511 {
3512
3513 ctxt->src.val = ctxt->dst.val;
3514 write_register_operand(&ctxt->src);
3515
3516
3517 ctxt->dst.val = ctxt->src.orig_val;
3518 ctxt->lock_prefix = 1;
3519 return X86EMUL_CONTINUE;
3520 }
3521
3522 static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3523 {
3524 ctxt->dst.val = ctxt->src2.val;
3525 return fastop(ctxt, em_imul);
3526 }
3527
3528 static int em_cwd(struct x86_emulate_ctxt *ctxt)
3529 {
3530 ctxt->dst.type = OP_REG;
3531 ctxt->dst.bytes = ctxt->src.bytes;
3532 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3533 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
3534
3535 return X86EMUL_CONTINUE;
3536 }
3537
3538 static int em_rdpid(struct x86_emulate_ctxt *ctxt)
3539 {
3540 u64 tsc_aux = 0;
3541
3542 if (!ctxt->ops->guest_has_rdpid(ctxt))
3543 return emulate_ud(ctxt);
3544
3545 ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux);
3546 ctxt->dst.val = tsc_aux;
3547 return X86EMUL_CONTINUE;
3548 }
3549
3550 static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3551 {
3552 u64 tsc = 0;
3553
3554 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
3555 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3556 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3557 return X86EMUL_CONTINUE;
3558 }
3559
3560 static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3561 {
3562 u64 pmc;
3563
3564 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
3565 return emulate_gp(ctxt, 0);
3566 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3567 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3568 return X86EMUL_CONTINUE;
3569 }
3570
3571 static int em_mov(struct x86_emulate_ctxt *ctxt)
3572 {
3573 memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
3574 return X86EMUL_CONTINUE;
3575 }
3576
3577 static int em_movbe(struct x86_emulate_ctxt *ctxt)
3578 {
3579 u16 tmp;
3580
3581 if (!ctxt->ops->guest_has_movbe(ctxt))
3582 return emulate_ud(ctxt);
3583
3584 switch (ctxt->op_bytes) {
3585 case 2:
3586
3587
3588
3589
3590
3591
3592
3593
3594 tmp = (u16)ctxt->src.val;
3595 ctxt->dst.val &= ~0xffffUL;
3596 ctxt->dst.val |= (unsigned long)swab16(tmp);
3597 break;
3598 case 4:
3599 ctxt->dst.val = swab32((u32)ctxt->src.val);
3600 break;
3601 case 8:
3602 ctxt->dst.val = swab64(ctxt->src.val);
3603 break;
3604 default:
3605 BUG();
3606 }
3607 return X86EMUL_CONTINUE;
3608 }
3609
3610 static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3611 {
3612 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3613 return emulate_gp(ctxt, 0);
3614
3615
3616 ctxt->dst.type = OP_NONE;
3617 return X86EMUL_CONTINUE;
3618 }
3619
3620 static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3621 {
3622 unsigned long val;
3623
3624 if (ctxt->mode == X86EMUL_MODE_PROT64)
3625 val = ctxt->src.val & ~0ULL;
3626 else
3627 val = ctxt->src.val & ~0U;
3628
3629
3630 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3631 return emulate_gp(ctxt, 0);
3632
3633
3634 ctxt->dst.type = OP_NONE;
3635 return X86EMUL_CONTINUE;
3636 }
3637
3638 static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3639 {
3640 u64 msr_index = reg_read(ctxt, VCPU_REGS_RCX);
3641 u64 msr_data;
3642 int r;
3643
3644 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3645 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3646 r = ctxt->ops->set_msr_with_filter(ctxt, msr_index, msr_data);
3647
3648 if (r == X86EMUL_IO_NEEDED)
3649 return r;
3650
3651 if (r > 0)
3652 return emulate_gp(ctxt, 0);
3653
3654 return r < 0 ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE;
3655 }
3656
3657 static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3658 {
3659 u64 msr_index = reg_read(ctxt, VCPU_REGS_RCX);
3660 u64 msr_data;
3661 int r;
3662
3663 r = ctxt->ops->get_msr_with_filter(ctxt, msr_index, &msr_data);
3664
3665 if (r == X86EMUL_IO_NEEDED)
3666 return r;
3667
3668 if (r)
3669 return emulate_gp(ctxt, 0);
3670
3671 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3672 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3673 return X86EMUL_CONTINUE;
3674 }
3675
3676 static int em_store_sreg(struct x86_emulate_ctxt *ctxt, int segment)
3677 {
3678 if (segment > VCPU_SREG_GS &&
3679 (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3680 ctxt->ops->cpl(ctxt) > 0)
3681 return emulate_gp(ctxt, 0);
3682
3683 ctxt->dst.val = get_segment_selector(ctxt, segment);
3684 if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
3685 ctxt->dst.bytes = 2;
3686 return X86EMUL_CONTINUE;
3687 }
3688
3689 static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3690 {
3691 if (ctxt->modrm_reg > VCPU_SREG_GS)
3692 return emulate_ud(ctxt);
3693
3694 return em_store_sreg(ctxt, ctxt->modrm_reg);
3695 }
3696
3697 static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3698 {
3699 u16 sel = ctxt->src.val;
3700
3701 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3702 return emulate_ud(ctxt);
3703
3704 if (ctxt->modrm_reg == VCPU_SREG_SS)
3705 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3706
3707
3708 ctxt->dst.type = OP_NONE;
3709 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3710 }
3711
3712 static int em_sldt(struct x86_emulate_ctxt *ctxt)
3713 {
3714 return em_store_sreg(ctxt, VCPU_SREG_LDTR);
3715 }
3716
3717 static int em_lldt(struct x86_emulate_ctxt *ctxt)
3718 {
3719 u16 sel = ctxt->src.val;
3720
3721
3722 ctxt->dst.type = OP_NONE;
3723 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3724 }
3725
3726 static int em_str(struct x86_emulate_ctxt *ctxt)
3727 {
3728 return em_store_sreg(ctxt, VCPU_SREG_TR);
3729 }
3730
3731 static int em_ltr(struct x86_emulate_ctxt *ctxt)
3732 {
3733 u16 sel = ctxt->src.val;
3734
3735
3736 ctxt->dst.type = OP_NONE;
3737 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3738 }
3739
3740 static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3741 {
3742 int rc;
3743 ulong linear;
3744
3745 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3746 if (rc == X86EMUL_CONTINUE)
3747 ctxt->ops->invlpg(ctxt, linear);
3748
3749 ctxt->dst.type = OP_NONE;
3750 return X86EMUL_CONTINUE;
3751 }
3752
3753 static int em_clts(struct x86_emulate_ctxt *ctxt)
3754 {
3755 ulong cr0;
3756
3757 cr0 = ctxt->ops->get_cr(ctxt, 0);
3758 cr0 &= ~X86_CR0_TS;
3759 ctxt->ops->set_cr(ctxt, 0, cr0);
3760 return X86EMUL_CONTINUE;
3761 }
3762
3763 static int em_hypercall(struct x86_emulate_ctxt *ctxt)
3764 {
3765 int rc = ctxt->ops->fix_hypercall(ctxt);
3766
3767 if (rc != X86EMUL_CONTINUE)
3768 return rc;
3769
3770
3771 ctxt->_eip = ctxt->eip;
3772
3773 ctxt->dst.type = OP_NONE;
3774 return X86EMUL_CONTINUE;
3775 }
3776
3777 static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3778 void (*get)(struct x86_emulate_ctxt *ctxt,
3779 struct desc_ptr *ptr))
3780 {
3781 struct desc_ptr desc_ptr;
3782
3783 if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3784 ctxt->ops->cpl(ctxt) > 0)
3785 return emulate_gp(ctxt, 0);
3786
3787 if (ctxt->mode == X86EMUL_MODE_PROT64)
3788 ctxt->op_bytes = 8;
3789 get(ctxt, &desc_ptr);
3790 if (ctxt->op_bytes == 2) {
3791 ctxt->op_bytes = 4;
3792 desc_ptr.address &= 0x00ffffff;
3793 }
3794
3795 ctxt->dst.type = OP_NONE;
3796 return segmented_write_std(ctxt, ctxt->dst.addr.mem,
3797 &desc_ptr, 2 + ctxt->op_bytes);
3798 }
3799
3800 static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3801 {
3802 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3803 }
3804
3805 static int em_sidt(struct x86_emulate_ctxt *ctxt)
3806 {
3807 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3808 }
3809
3810 static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
3811 {
3812 struct desc_ptr desc_ptr;
3813 int rc;
3814
3815 if (ctxt->mode == X86EMUL_MODE_PROT64)
3816 ctxt->op_bytes = 8;
3817 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3818 &desc_ptr.size, &desc_ptr.address,
3819 ctxt->op_bytes);
3820 if (rc != X86EMUL_CONTINUE)
3821 return rc;
3822 if (ctxt->mode == X86EMUL_MODE_PROT64 &&
3823 emul_is_noncanonical_address(desc_ptr.address, ctxt))
3824 return emulate_gp(ctxt, 0);
3825 if (lgdt)
3826 ctxt->ops->set_gdt(ctxt, &desc_ptr);
3827 else
3828 ctxt->ops->set_idt(ctxt, &desc_ptr);
3829
3830 ctxt->dst.type = OP_NONE;
3831 return X86EMUL_CONTINUE;
3832 }
3833
3834 static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3835 {
3836 return em_lgdt_lidt(ctxt, true);
3837 }
3838
3839 static int em_lidt(struct x86_emulate_ctxt *ctxt)
3840 {
3841 return em_lgdt_lidt(ctxt, false);
3842 }
3843
3844 static int em_smsw(struct x86_emulate_ctxt *ctxt)
3845 {
3846 if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3847 ctxt->ops->cpl(ctxt) > 0)
3848 return emulate_gp(ctxt, 0);
3849
3850 if (ctxt->dst.type == OP_MEM)
3851 ctxt->dst.bytes = 2;
3852 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3853 return X86EMUL_CONTINUE;
3854 }
3855
3856 static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3857 {
3858 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3859 | (ctxt->src.val & 0x0f));
3860 ctxt->dst.type = OP_NONE;
3861 return X86EMUL_CONTINUE;
3862 }
3863
3864 static int em_loop(struct x86_emulate_ctxt *ctxt)
3865 {
3866 int rc = X86EMUL_CONTINUE;
3867
3868 register_address_increment(ctxt, VCPU_REGS_RCX, -1);
3869 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3870 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3871 rc = jmp_rel(ctxt, ctxt->src.val);
3872
3873 return rc;
3874 }
3875
3876 static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3877 {
3878 int rc = X86EMUL_CONTINUE;
3879
3880 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3881 rc = jmp_rel(ctxt, ctxt->src.val);
3882
3883 return rc;
3884 }
3885
3886 static int em_in(struct x86_emulate_ctxt *ctxt)
3887 {
3888 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3889 &ctxt->dst.val))
3890 return X86EMUL_IO_NEEDED;
3891
3892 return X86EMUL_CONTINUE;
3893 }
3894
3895 static int em_out(struct x86_emulate_ctxt *ctxt)
3896 {
3897 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3898 &ctxt->src.val, 1);
3899
3900 ctxt->dst.type = OP_NONE;
3901 return X86EMUL_CONTINUE;
3902 }
3903
3904 static int em_cli(struct x86_emulate_ctxt *ctxt)
3905 {
3906 if (emulator_bad_iopl(ctxt))
3907 return emulate_gp(ctxt, 0);
3908
3909 ctxt->eflags &= ~X86_EFLAGS_IF;
3910 return X86EMUL_CONTINUE;
3911 }
3912
3913 static int em_sti(struct x86_emulate_ctxt *ctxt)
3914 {
3915 if (emulator_bad_iopl(ctxt))
3916 return emulate_gp(ctxt, 0);
3917
3918 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3919 ctxt->eflags |= X86_EFLAGS_IF;
3920 return X86EMUL_CONTINUE;
3921 }
3922
3923 static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3924 {
3925 u32 eax, ebx, ecx, edx;
3926 u64 msr = 0;
3927
3928 ctxt->ops->get_msr(ctxt, MSR_MISC_FEATURES_ENABLES, &msr);
3929 if (msr & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT &&
3930 ctxt->ops->cpl(ctxt)) {
3931 return emulate_gp(ctxt, 0);
3932 }
3933
3934 eax = reg_read(ctxt, VCPU_REGS_RAX);
3935 ecx = reg_read(ctxt, VCPU_REGS_RCX);
3936 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
3937 *reg_write(ctxt, VCPU_REGS_RAX) = eax;
3938 *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3939 *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3940 *reg_write(ctxt, VCPU_REGS_RDX) = edx;
3941 return X86EMUL_CONTINUE;
3942 }
3943
3944 static int em_sahf(struct x86_emulate_ctxt *ctxt)
3945 {
3946 u32 flags;
3947
3948 flags = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
3949 X86_EFLAGS_SF;
3950 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3951
3952 ctxt->eflags &= ~0xffUL;
3953 ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3954 return X86EMUL_CONTINUE;
3955 }
3956
3957 static int em_lahf(struct x86_emulate_ctxt *ctxt)
3958 {
3959 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3960 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
3961 return X86EMUL_CONTINUE;
3962 }
3963
3964 static int em_bswap(struct x86_emulate_ctxt *ctxt)
3965 {
3966 switch (ctxt->op_bytes) {
3967 #ifdef CONFIG_X86_64
3968 case 8:
3969 asm("bswap %0" : "+r"(ctxt->dst.val));
3970 break;
3971 #endif
3972 default:
3973 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3974 break;
3975 }
3976 return X86EMUL_CONTINUE;
3977 }
3978
3979 static int em_clflush(struct x86_emulate_ctxt *ctxt)
3980 {
3981
3982 return X86EMUL_CONTINUE;
3983 }
3984
3985 static int em_clflushopt(struct x86_emulate_ctxt *ctxt)
3986 {
3987
3988 return X86EMUL_CONTINUE;
3989 }
3990
3991 static int em_movsxd(struct x86_emulate_ctxt *ctxt)
3992 {
3993 ctxt->dst.val = (s32) ctxt->src.val;
3994 return X86EMUL_CONTINUE;
3995 }
3996
3997 static int check_fxsr(struct x86_emulate_ctxt *ctxt)
3998 {
3999 if (!ctxt->ops->guest_has_fxsr(ctxt))
4000 return emulate_ud(ctxt);
4001
4002 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
4003 return emulate_nm(ctxt);
4004
4005
4006
4007
4008
4009 if (ctxt->mode >= X86EMUL_MODE_PROT64)
4010 return X86EMUL_UNHANDLEABLE;
4011
4012 return X86EMUL_CONTINUE;
4013 }
4014
4015
4016
4017
4018
4019 static size_t __fxstate_size(int nregs)
4020 {
4021 return offsetof(struct fxregs_state, xmm_space[0]) + nregs * 16;
4022 }
4023
4024 static inline size_t fxstate_size(struct x86_emulate_ctxt *ctxt)
4025 {
4026 bool cr4_osfxsr;
4027 if (ctxt->mode == X86EMUL_MODE_PROT64)
4028 return __fxstate_size(16);
4029
4030 cr4_osfxsr = ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR;
4031 return __fxstate_size(cr4_osfxsr ? 8 : 0);
4032 }
4033
4034
4035
4036
4037
4038
4039
4040
4041
4042
4043
4044
4045
4046
4047
4048
4049
4050
4051
4052 static int em_fxsave(struct x86_emulate_ctxt *ctxt)
4053 {
4054 struct fxregs_state fx_state;
4055 int rc;
4056
4057 rc = check_fxsr(ctxt);
4058 if (rc != X86EMUL_CONTINUE)
4059 return rc;
4060
4061 kvm_fpu_get();
4062
4063 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
4064
4065 kvm_fpu_put();
4066
4067 if (rc != X86EMUL_CONTINUE)
4068 return rc;
4069
4070 return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state,
4071 fxstate_size(ctxt));
4072 }
4073
4074
4075
4076
4077
4078
4079
4080
4081 static noinline int fxregs_fixup(struct fxregs_state *fx_state,
4082 const size_t used_size)
4083 {
4084 struct fxregs_state fx_tmp;
4085 int rc;
4086
4087 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_tmp));
4088 memcpy((void *)fx_state + used_size, (void *)&fx_tmp + used_size,
4089 __fxstate_size(16) - used_size);
4090
4091 return rc;
4092 }
4093
4094 static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
4095 {
4096 struct fxregs_state fx_state;
4097 int rc;
4098 size_t size;
4099
4100 rc = check_fxsr(ctxt);
4101 if (rc != X86EMUL_CONTINUE)
4102 return rc;
4103
4104 size = fxstate_size(ctxt);
4105 rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
4106 if (rc != X86EMUL_CONTINUE)
4107 return rc;
4108
4109 kvm_fpu_get();
4110
4111 if (size < __fxstate_size(16)) {
4112 rc = fxregs_fixup(&fx_state, size);
4113 if (rc != X86EMUL_CONTINUE)
4114 goto out;
4115 }
4116
4117 if (fx_state.mxcsr >> 16) {
4118 rc = emulate_gp(ctxt, 0);
4119 goto out;
4120 }
4121
4122 if (rc == X86EMUL_CONTINUE)
4123 rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
4124
4125 out:
4126 kvm_fpu_put();
4127
4128 return rc;
4129 }
4130
4131 static int em_xsetbv(struct x86_emulate_ctxt *ctxt)
4132 {
4133 u32 eax, ecx, edx;
4134
4135 if (!(ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSXSAVE))
4136 return emulate_ud(ctxt);
4137
4138 eax = reg_read(ctxt, VCPU_REGS_RAX);
4139 edx = reg_read(ctxt, VCPU_REGS_RDX);
4140 ecx = reg_read(ctxt, VCPU_REGS_RCX);
4141
4142 if (ctxt->ops->set_xcr(ctxt, ecx, ((u64)edx << 32) | eax))
4143 return emulate_gp(ctxt, 0);
4144
4145 return X86EMUL_CONTINUE;
4146 }
4147
4148 static bool valid_cr(int nr)
4149 {
4150 switch (nr) {
4151 case 0:
4152 case 2 ... 4:
4153 case 8:
4154 return true;
4155 default:
4156 return false;
4157 }
4158 }
4159
4160 static int check_cr_access(struct x86_emulate_ctxt *ctxt)
4161 {
4162 if (!valid_cr(ctxt->modrm_reg))
4163 return emulate_ud(ctxt);
4164
4165 return X86EMUL_CONTINUE;
4166 }
4167
4168 static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
4169 {
4170 unsigned long dr7;
4171
4172 ctxt->ops->get_dr(ctxt, 7, &dr7);
4173
4174
4175 return dr7 & (1 << 13);
4176 }
4177
4178 static int check_dr_read(struct x86_emulate_ctxt *ctxt)
4179 {
4180 int dr = ctxt->modrm_reg;
4181 u64 cr4;
4182
4183 if (dr > 7)
4184 return emulate_ud(ctxt);
4185
4186 cr4 = ctxt->ops->get_cr(ctxt, 4);
4187 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
4188 return emulate_ud(ctxt);
4189
4190 if (check_dr7_gd(ctxt)) {
4191 ulong dr6;
4192
4193 ctxt->ops->get_dr(ctxt, 6, &dr6);
4194 dr6 &= ~DR_TRAP_BITS;
4195 dr6 |= DR6_BD | DR6_ACTIVE_LOW;
4196 ctxt->ops->set_dr(ctxt, 6, dr6);
4197 return emulate_db(ctxt);
4198 }
4199
4200 return X86EMUL_CONTINUE;
4201 }
4202
4203 static int check_dr_write(struct x86_emulate_ctxt *ctxt)
4204 {
4205 u64 new_val = ctxt->src.val64;
4206 int dr = ctxt->modrm_reg;
4207
4208 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
4209 return emulate_gp(ctxt, 0);
4210
4211 return check_dr_read(ctxt);
4212 }
4213
4214 static int check_svme(struct x86_emulate_ctxt *ctxt)
4215 {
4216 u64 efer = 0;
4217
4218 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4219
4220 if (!(efer & EFER_SVME))
4221 return emulate_ud(ctxt);
4222
4223 return X86EMUL_CONTINUE;
4224 }
4225
4226 static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
4227 {
4228 u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
4229
4230
4231 if (rax & 0xffff000000000000ULL)
4232 return emulate_gp(ctxt, 0);
4233
4234 return check_svme(ctxt);
4235 }
4236
4237 static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
4238 {
4239 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4240
4241 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
4242 return emulate_gp(ctxt, 0);
4243
4244 return X86EMUL_CONTINUE;
4245 }
4246
4247 static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
4248 {
4249 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4250 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
4251
4252
4253
4254
4255
4256 if (enable_vmware_backdoor && is_vmware_backdoor_pmc(rcx))
4257 return X86EMUL_CONTINUE;
4258
4259
4260
4261
4262
4263
4264 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
4265 ctxt->ops->check_pmc(ctxt, rcx))
4266 return emulate_gp(ctxt, 0);
4267
4268 return X86EMUL_CONTINUE;
4269 }
4270
4271 static int check_perm_in(struct x86_emulate_ctxt *ctxt)
4272 {
4273 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
4274 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
4275 return emulate_gp(ctxt, 0);
4276
4277 return X86EMUL_CONTINUE;
4278 }
4279
4280 static int check_perm_out(struct x86_emulate_ctxt *ctxt)
4281 {
4282 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
4283 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
4284 return emulate_gp(ctxt, 0);
4285
4286 return X86EMUL_CONTINUE;
4287 }
4288
4289 #define D(_y) { .flags = (_y) }
4290 #define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
4291 #define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
4292 .intercept = x86_intercept_##_i, .check_perm = (_p) }
4293 #define N D(NotImpl)
4294 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
4295 #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
4296 #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
4297 #define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
4298 #define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) }
4299 #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
4300 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
4301 #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
4302 #define II(_f, _e, _i) \
4303 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
4304 #define IIP(_f, _e, _i, _p) \
4305 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
4306 .intercept = x86_intercept_##_i, .check_perm = (_p) }
4307 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
4308
4309 #define D2bv(_f) D((_f) | ByteOp), D(_f)
4310 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
4311 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
4312 #define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
4313 #define I2bvIP(_f, _e, _i, _p) \
4314 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
4315
4316 #define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
4317 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
4318 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
4319
4320 static const struct opcode group7_rm0[] = {
4321 N,
4322 I(SrcNone | Priv | EmulateOnUD, em_hypercall),
4323 N, N, N, N, N, N,
4324 };
4325
4326 static const struct opcode group7_rm1[] = {
4327 DI(SrcNone | Priv, monitor),
4328 DI(SrcNone | Priv, mwait),
4329 N, N, N, N, N, N,
4330 };
4331
4332 static const struct opcode group7_rm2[] = {
4333 N,
4334 II(ImplicitOps | Priv, em_xsetbv, xsetbv),
4335 N, N, N, N, N, N,
4336 };
4337
4338 static const struct opcode group7_rm3[] = {
4339 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
4340 II(SrcNone | Prot | EmulateOnUD, em_hypercall, vmmcall),
4341 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
4342 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
4343 DIP(SrcNone | Prot | Priv, stgi, check_svme),
4344 DIP(SrcNone | Prot | Priv, clgi, check_svme),
4345 DIP(SrcNone | Prot | Priv, skinit, check_svme),
4346 DIP(SrcNone | Prot | Priv, invlpga, check_svme),
4347 };
4348
4349 static const struct opcode group7_rm7[] = {
4350 N,
4351 DIP(SrcNone, rdtscp, check_rdtsc),
4352 N, N, N, N, N, N,
4353 };
4354
4355 static const struct opcode group1[] = {
4356 F(Lock, em_add),
4357 F(Lock | PageTable, em_or),
4358 F(Lock, em_adc),
4359 F(Lock, em_sbb),
4360 F(Lock | PageTable, em_and),
4361 F(Lock, em_sub),
4362 F(Lock, em_xor),
4363 F(NoWrite, em_cmp),
4364 };
4365
4366 static const struct opcode group1A[] = {
4367 I(DstMem | SrcNone | Mov | Stack | IncSP | TwoMemOp, em_pop), N, N, N, N, N, N, N,
4368 };
4369
4370 static const struct opcode group2[] = {
4371 F(DstMem | ModRM, em_rol),
4372 F(DstMem | ModRM, em_ror),
4373 F(DstMem | ModRM, em_rcl),
4374 F(DstMem | ModRM, em_rcr),
4375 F(DstMem | ModRM, em_shl),
4376 F(DstMem | ModRM, em_shr),
4377 F(DstMem | ModRM, em_shl),
4378 F(DstMem | ModRM, em_sar),
4379 };
4380
4381 static const struct opcode group3[] = {
4382 F(DstMem | SrcImm | NoWrite, em_test),
4383 F(DstMem | SrcImm | NoWrite, em_test),
4384 F(DstMem | SrcNone | Lock, em_not),
4385 F(DstMem | SrcNone | Lock, em_neg),
4386 F(DstXacc | Src2Mem, em_mul_ex),
4387 F(DstXacc | Src2Mem, em_imul_ex),
4388 F(DstXacc | Src2Mem, em_div_ex),
4389 F(DstXacc | Src2Mem, em_idiv_ex),
4390 };
4391
4392 static const struct opcode group4[] = {
4393 F(ByteOp | DstMem | SrcNone | Lock, em_inc),
4394 F(ByteOp | DstMem | SrcNone | Lock, em_dec),
4395 N, N, N, N, N, N,
4396 };
4397
4398 static const struct opcode group5[] = {
4399 F(DstMem | SrcNone | Lock, em_inc),
4400 F(DstMem | SrcNone | Lock, em_dec),
4401 I(SrcMem | NearBranch | IsBranch, em_call_near_abs),
4402 I(SrcMemFAddr | ImplicitOps | IsBranch, em_call_far),
4403 I(SrcMem | NearBranch | IsBranch, em_jmp_abs),
4404 I(SrcMemFAddr | ImplicitOps | IsBranch, em_jmp_far),
4405 I(SrcMem | Stack | TwoMemOp, em_push), D(Undefined),
4406 };
4407
4408 static const struct opcode group6[] = {
4409 II(Prot | DstMem, em_sldt, sldt),
4410 II(Prot | DstMem, em_str, str),
4411 II(Prot | Priv | SrcMem16, em_lldt, lldt),
4412 II(Prot | Priv | SrcMem16, em_ltr, ltr),
4413 N, N, N, N,
4414 };
4415
4416 static const struct group_dual group7 = { {
4417 II(Mov | DstMem, em_sgdt, sgdt),
4418 II(Mov | DstMem, em_sidt, sidt),
4419 II(SrcMem | Priv, em_lgdt, lgdt),
4420 II(SrcMem | Priv, em_lidt, lidt),
4421 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
4422 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
4423 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
4424 }, {
4425 EXT(0, group7_rm0),
4426 EXT(0, group7_rm1),
4427 EXT(0, group7_rm2),
4428 EXT(0, group7_rm3),
4429 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
4430 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
4431 EXT(0, group7_rm7),
4432 } };
4433
4434 static const struct opcode group8[] = {
4435 N, N, N, N,
4436 F(DstMem | SrcImmByte | NoWrite, em_bt),
4437 F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
4438 F(DstMem | SrcImmByte | Lock, em_btr),
4439 F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
4440 };
4441
4442
4443
4444
4445
4446 static const struct gprefix pfx_0f_c7_7 = {
4447 N, N, N, II(DstMem | ModRM | Op3264 | EmulateOnUD, em_rdpid, rdpid),
4448 };
4449
4450
4451 static const struct group_dual group9 = { {
4452 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
4453 }, {
4454 N, N, N, N, N, N, N,
4455 GP(0, &pfx_0f_c7_7),
4456 } };
4457
4458 static const struct opcode group11[] = {
4459 I(DstMem | SrcImm | Mov | PageTable, em_mov),
4460 X7(D(Undefined)),
4461 };
4462
4463 static const struct gprefix pfx_0f_ae_7 = {
4464 I(SrcMem | ByteOp, em_clflush), I(SrcMem | ByteOp, em_clflushopt), N, N,
4465 };
4466
4467 static const struct group_dual group15 = { {
4468 I(ModRM | Aligned16, em_fxsave),
4469 I(ModRM | Aligned16, em_fxrstor),
4470 N, N, N, N, N, GP(0, &pfx_0f_ae_7),
4471 }, {
4472 N, N, N, N, N, N, N, N,
4473 } };
4474
4475 static const struct gprefix pfx_0f_6f_0f_7f = {
4476 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
4477 };
4478
4479 static const struct instr_dual instr_dual_0f_2b = {
4480 I(0, em_mov), N
4481 };
4482
4483 static const struct gprefix pfx_0f_2b = {
4484 ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
4485 };
4486
4487 static const struct gprefix pfx_0f_10_0f_11 = {
4488 I(Unaligned, em_mov), I(Unaligned, em_mov), N, N,
4489 };
4490
4491 static const struct gprefix pfx_0f_28_0f_29 = {
4492 I(Aligned, em_mov), I(Aligned, em_mov), N, N,
4493 };
4494
4495 static const struct gprefix pfx_0f_e7 = {
4496 N, I(Sse, em_mov), N, N,
4497 };
4498
4499 static const struct escape escape_d9 = { {
4500 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw),
4501 }, {
4502
4503 N, N, N, N, N, N, N, N,
4504
4505 N, N, N, N, N, N, N, N,
4506
4507 N, N, N, N, N, N, N, N,
4508
4509 N, N, N, N, N, N, N, N,
4510
4511 N, N, N, N, N, N, N, N,
4512
4513 N, N, N, N, N, N, N, N,
4514
4515 N, N, N, N, N, N, N, N,
4516
4517 N, N, N, N, N, N, N, N,
4518 } };
4519
4520 static const struct escape escape_db = { {
4521 N, N, N, N, N, N, N, N,
4522 }, {
4523
4524 N, N, N, N, N, N, N, N,
4525
4526 N, N, N, N, N, N, N, N,
4527
4528 N, N, N, N, N, N, N, N,
4529
4530 N, N, N, N, N, N, N, N,
4531
4532 N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
4533
4534 N, N, N, N, N, N, N, N,
4535
4536 N, N, N, N, N, N, N, N,
4537
4538 N, N, N, N, N, N, N, N,
4539 } };
4540
4541 static const struct escape escape_dd = { {
4542 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw),
4543 }, {
4544
4545 N, N, N, N, N, N, N, N,
4546
4547 N, N, N, N, N, N, N, N,
4548
4549 N, N, N, N, N, N, N, N,
4550
4551 N, N, N, N, N, N, N, N,
4552
4553 N, N, N, N, N, N, N, N,
4554
4555 N, N, N, N, N, N, N, N,
4556
4557 N, N, N, N, N, N, N, N,
4558
4559 N, N, N, N, N, N, N, N,
4560 } };
4561
4562 static const struct instr_dual instr_dual_0f_c3 = {
4563 I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
4564 };
4565
4566 static const struct mode_dual mode_dual_63 = {
4567 N, I(DstReg | SrcMem32 | ModRM | Mov, em_movsxd)
4568 };
4569
4570 static const struct instr_dual instr_dual_8d = {
4571 D(DstReg | SrcMem | ModRM | NoAccess), N
4572 };
4573
4574 static const struct opcode opcode_table[256] = {
4575
4576 F6ALU(Lock, em_add),
4577 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
4578 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
4579
4580 F6ALU(Lock | PageTable, em_or),
4581 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
4582 N,
4583
4584 F6ALU(Lock, em_adc),
4585 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
4586 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
4587
4588 F6ALU(Lock, em_sbb),
4589 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
4590 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
4591
4592 F6ALU(Lock | PageTable, em_and), N, N,
4593
4594 F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
4595
4596 F6ALU(Lock, em_xor), N, N,
4597
4598 F6ALU(NoWrite, em_cmp), N, N,
4599
4600 X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
4601
4602 X8(I(SrcReg | Stack, em_push)),
4603
4604 X8(I(DstReg | Stack, em_pop)),
4605
4606 I(ImplicitOps | Stack | No64, em_pusha),
4607 I(ImplicitOps | Stack | No64, em_popa),
4608 N, MD(ModRM, &mode_dual_63),
4609 N, N, N, N,
4610
4611 I(SrcImm | Mov | Stack, em_push),
4612 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
4613 I(SrcImmByte | Mov | Stack, em_push),
4614 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
4615 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in),
4616 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out),
4617
4618 X16(D(SrcImmByte | NearBranch | IsBranch)),
4619
4620 G(ByteOp | DstMem | SrcImm, group1),
4621 G(DstMem | SrcImm, group1),
4622 G(ByteOp | DstMem | SrcImm | No64, group1),
4623 G(DstMem | SrcImmByte, group1),
4624 F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
4625 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
4626
4627 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
4628 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
4629 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
4630 ID(0, &instr_dual_8d),
4631 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
4632 G(0, group1A),
4633
4634 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
4635
4636 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
4637 I(SrcImmFAddr | No64 | IsBranch, em_call_far), N,
4638 II(ImplicitOps | Stack, em_pushf, pushf),
4639 II(ImplicitOps | Stack, em_popf, popf),
4640 I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
4641
4642 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
4643 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
4644 I2bv(SrcSI | DstDI | Mov | String | TwoMemOp, em_mov),
4645 F2bv(SrcSI | DstDI | String | NoWrite | TwoMemOp, em_cmp_r),
4646
4647 F2bv(DstAcc | SrcImm | NoWrite, em_test),
4648 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
4649 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
4650 F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
4651
4652 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
4653
4654 X8(I(DstReg | SrcImm64 | Mov, em_mov)),
4655
4656 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
4657 I(ImplicitOps | NearBranch | SrcImmU16 | IsBranch, em_ret_near_imm),
4658 I(ImplicitOps | NearBranch | IsBranch, em_ret),
4659 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
4660 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
4661 G(ByteOp, group11), G(0, group11),
4662
4663 I(Stack | SrcImmU16 | Src2ImmByte | IsBranch, em_enter),
4664 I(Stack | IsBranch, em_leave),
4665 I(ImplicitOps | SrcImmU16 | IsBranch, em_ret_far_imm),
4666 I(ImplicitOps | IsBranch, em_ret_far),
4667 D(ImplicitOps | IsBranch), DI(SrcImmByte | IsBranch, intn),
4668 D(ImplicitOps | No64 | IsBranch),
4669 II(ImplicitOps | IsBranch, em_iret, iret),
4670
4671 G(Src2One | ByteOp, group2), G(Src2One, group2),
4672 G(Src2CL | ByteOp, group2), G(Src2CL, group2),
4673 I(DstAcc | SrcImmUByte | No64, em_aam),
4674 I(DstAcc | SrcImmUByte | No64, em_aad),
4675 F(DstAcc | ByteOp | No64, em_salc),
4676 I(DstAcc | SrcXLat | ByteOp, em_mov),
4677
4678 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
4679
4680 X3(I(SrcImmByte | NearBranch | IsBranch, em_loop)),
4681 I(SrcImmByte | NearBranch | IsBranch, em_jcxz),
4682 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
4683 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
4684
4685 I(SrcImm | NearBranch | IsBranch, em_call),
4686 D(SrcImm | ImplicitOps | NearBranch | IsBranch),
4687 I(SrcImmFAddr | No64 | IsBranch, em_jmp_far),
4688 D(SrcImmByte | ImplicitOps | NearBranch | IsBranch),
4689 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
4690 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
4691
4692 N, DI(ImplicitOps, icebp), N, N,
4693 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4694 G(ByteOp, group3), G(0, group3),
4695
4696 D(ImplicitOps), D(ImplicitOps),
4697 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
4698 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4699 };
4700
4701 static const struct opcode twobyte_table[256] = {
4702
4703 G(0, group6), GD(0, &group7), N, N,
4704 N, I(ImplicitOps | EmulateOnUD | IsBranch, em_syscall),
4705 II(ImplicitOps | Priv, em_clts, clts), N,
4706 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
4707 N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4708
4709 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_10_0f_11),
4710 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_10_0f_11),
4711 N, N, N, N, N, N,
4712 D(ImplicitOps | ModRM | SrcMem | NoAccess),
4713 D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4714 D(ImplicitOps | ModRM | SrcMem | NoAccess),
4715 D(ImplicitOps | ModRM | SrcMem | NoAccess),
4716 D(ImplicitOps | ModRM | SrcMem | NoAccess),
4717 D(ImplicitOps | ModRM | SrcMem | NoAccess),
4718
4719 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_access),
4720 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4721 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4722 check_cr_access),
4723 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4724 check_dr_write),
4725 N, N, N, N,
4726 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
4727 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
4728 N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
4729 N, N, N, N,
4730
4731 II(ImplicitOps | Priv, em_wrmsr, wrmsr),
4732 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
4733 II(ImplicitOps | Priv, em_rdmsr, rdmsr),
4734 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
4735 I(ImplicitOps | EmulateOnUD | IsBranch, em_sysenter),
4736 I(ImplicitOps | Priv | EmulateOnUD | IsBranch, em_sysexit),
4737 N, N,
4738 N, N, N, N, N, N, N, N,
4739
4740 X16(D(DstReg | SrcMem | ModRM)),
4741
4742 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4743
4744 N, N, N, N,
4745 N, N, N, N,
4746 N, N, N, N,
4747 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
4748
4749 N, N, N, N,
4750 N, N, N, N,
4751 N, N, N, N,
4752 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
4753
4754 X16(D(SrcImm | NearBranch | IsBranch)),
4755
4756 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
4757
4758 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
4759 II(ImplicitOps, em_cpuid, cpuid),
4760 F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
4761 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4762 F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
4763
4764 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
4765 II(EmulateOnUD | ImplicitOps, em_rsm, rsm),
4766 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
4767 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4768 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
4769 GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
4770
4771 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable | SrcWrite, em_cmpxchg),
4772 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
4773 F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
4774 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4775 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
4776 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4777
4778 N, N,
4779 G(BitOp, group8),
4780 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4781 I(DstReg | SrcMem | ModRM, em_bsf_c),
4782 I(DstReg | SrcMem | ModRM, em_bsr_c),
4783 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4784
4785 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
4786 N, ID(0, &instr_dual_0f_c3),
4787 N, N, N, GD(0, &group9),
4788
4789 X8(I(DstReg, em_bswap)),
4790
4791 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4792
4793 N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4794 N, N, N, N, N, N, N, N,
4795
4796 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4797 };
4798
4799 static const struct instr_dual instr_dual_0f_38_f0 = {
4800 I(DstReg | SrcMem | Mov, em_movbe), N
4801 };
4802
4803 static const struct instr_dual instr_dual_0f_38_f1 = {
4804 I(DstMem | SrcReg | Mov, em_movbe), N
4805 };
4806
4807 static const struct gprefix three_byte_0f_38_f0 = {
4808 ID(0, &instr_dual_0f_38_f0), N, N, N
4809 };
4810
4811 static const struct gprefix three_byte_0f_38_f1 = {
4812 ID(0, &instr_dual_0f_38_f1), N, N, N
4813 };
4814
4815
4816
4817
4818
4819 static const struct opcode opcode_map_0f_38[256] = {
4820
4821 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4822
4823 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4824
4825 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
4826 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
4827
4828 N, N, X4(N), X8(N)
4829 };
4830
4831 #undef D
4832 #undef N
4833 #undef G
4834 #undef GD
4835 #undef I
4836 #undef GP
4837 #undef EXT
4838 #undef MD
4839 #undef ID
4840
4841 #undef D2bv
4842 #undef D2bvIP
4843 #undef I2bv
4844 #undef I2bvIP
4845 #undef I6ALU
4846
4847 static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
4848 {
4849 unsigned size;
4850
4851 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4852 if (size == 8)
4853 size = 4;
4854 return size;
4855 }
4856
4857 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4858 unsigned size, bool sign_extension)
4859 {
4860 int rc = X86EMUL_CONTINUE;
4861
4862 op->type = OP_IMM;
4863 op->bytes = size;
4864 op->addr.mem.ea = ctxt->_eip;
4865
4866 switch (op->bytes) {
4867 case 1:
4868 op->val = insn_fetch(s8, ctxt);
4869 break;
4870 case 2:
4871 op->val = insn_fetch(s16, ctxt);
4872 break;
4873 case 4:
4874 op->val = insn_fetch(s32, ctxt);
4875 break;
4876 case 8:
4877 op->val = insn_fetch(s64, ctxt);
4878 break;
4879 }
4880 if (!sign_extension) {
4881 switch (op->bytes) {
4882 case 1:
4883 op->val &= 0xff;
4884 break;
4885 case 2:
4886 op->val &= 0xffff;
4887 break;
4888 case 4:
4889 op->val &= 0xffffffff;
4890 break;
4891 }
4892 }
4893 done:
4894 return rc;
4895 }
4896
4897 static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4898 unsigned d)
4899 {
4900 int rc = X86EMUL_CONTINUE;
4901
4902 switch (d) {
4903 case OpReg:
4904 decode_register_operand(ctxt, op);
4905 break;
4906 case OpImmUByte:
4907 rc = decode_imm(ctxt, op, 1, false);
4908 break;
4909 case OpMem:
4910 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4911 mem_common:
4912 *op = ctxt->memop;
4913 ctxt->memopp = op;
4914 if (ctxt->d & BitOp)
4915 fetch_bit_operand(ctxt);
4916 op->orig_val = op->val;
4917 break;
4918 case OpMem64:
4919 ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
4920 goto mem_common;
4921 case OpAcc:
4922 op->type = OP_REG;
4923 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4924 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4925 fetch_register_operand(op);
4926 op->orig_val = op->val;
4927 break;
4928 case OpAccLo:
4929 op->type = OP_REG;
4930 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
4931 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4932 fetch_register_operand(op);
4933 op->orig_val = op->val;
4934 break;
4935 case OpAccHi:
4936 if (ctxt->d & ByteOp) {
4937 op->type = OP_NONE;
4938 break;
4939 }
4940 op->type = OP_REG;
4941 op->bytes = ctxt->op_bytes;
4942 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4943 fetch_register_operand(op);
4944 op->orig_val = op->val;
4945 break;
4946 case OpDI:
4947 op->type = OP_MEM;
4948 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4949 op->addr.mem.ea =
4950 register_address(ctxt, VCPU_REGS_RDI);
4951 op->addr.mem.seg = VCPU_SREG_ES;
4952 op->val = 0;
4953 op->count = 1;
4954 break;
4955 case OpDX:
4956 op->type = OP_REG;
4957 op->bytes = 2;
4958 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4959 fetch_register_operand(op);
4960 break;
4961 case OpCL:
4962 op->type = OP_IMM;
4963 op->bytes = 1;
4964 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
4965 break;
4966 case OpImmByte:
4967 rc = decode_imm(ctxt, op, 1, true);
4968 break;
4969 case OpOne:
4970 op->type = OP_IMM;
4971 op->bytes = 1;
4972 op->val = 1;
4973 break;
4974 case OpImm:
4975 rc = decode_imm(ctxt, op, imm_size(ctxt), true);
4976 break;
4977 case OpImm64:
4978 rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
4979 break;
4980 case OpMem8:
4981 ctxt->memop.bytes = 1;
4982 if (ctxt->memop.type == OP_REG) {
4983 ctxt->memop.addr.reg = decode_register(ctxt,
4984 ctxt->modrm_rm, true);
4985 fetch_register_operand(&ctxt->memop);
4986 }
4987 goto mem_common;
4988 case OpMem16:
4989 ctxt->memop.bytes = 2;
4990 goto mem_common;
4991 case OpMem32:
4992 ctxt->memop.bytes = 4;
4993 goto mem_common;
4994 case OpImmU16:
4995 rc = decode_imm(ctxt, op, 2, false);
4996 break;
4997 case OpImmU:
4998 rc = decode_imm(ctxt, op, imm_size(ctxt), false);
4999 break;
5000 case OpSI:
5001 op->type = OP_MEM;
5002 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
5003 op->addr.mem.ea =
5004 register_address(ctxt, VCPU_REGS_RSI);
5005 op->addr.mem.seg = ctxt->seg_override;
5006 op->val = 0;
5007 op->count = 1;
5008 break;
5009 case OpXLat:
5010 op->type = OP_MEM;
5011 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
5012 op->addr.mem.ea =
5013 address_mask(ctxt,
5014 reg_read(ctxt, VCPU_REGS_RBX) +
5015 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
5016 op->addr.mem.seg = ctxt->seg_override;
5017 op->val = 0;
5018 break;
5019 case OpImmFAddr:
5020 op->type = OP_IMM;
5021 op->addr.mem.ea = ctxt->_eip;
5022 op->bytes = ctxt->op_bytes + 2;
5023 insn_fetch_arr(op->valptr, op->bytes, ctxt);
5024 break;
5025 case OpMemFAddr:
5026 ctxt->memop.bytes = ctxt->op_bytes + 2;
5027 goto mem_common;
5028 case OpES:
5029 op->type = OP_IMM;
5030 op->val = VCPU_SREG_ES;
5031 break;
5032 case OpCS:
5033 op->type = OP_IMM;
5034 op->val = VCPU_SREG_CS;
5035 break;
5036 case OpSS:
5037 op->type = OP_IMM;
5038 op->val = VCPU_SREG_SS;
5039 break;
5040 case OpDS:
5041 op->type = OP_IMM;
5042 op->val = VCPU_SREG_DS;
5043 break;
5044 case OpFS:
5045 op->type = OP_IMM;
5046 op->val = VCPU_SREG_FS;
5047 break;
5048 case OpGS:
5049 op->type = OP_IMM;
5050 op->val = VCPU_SREG_GS;
5051 break;
5052 case OpImplicit:
5053
5054 default:
5055 op->type = OP_NONE;
5056 break;
5057 }
5058
5059 done:
5060 return rc;
5061 }
5062
5063 int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len, int emulation_type)
5064 {
5065 int rc = X86EMUL_CONTINUE;
5066 int mode = ctxt->mode;
5067 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
5068 bool op_prefix = false;
5069 bool has_seg_override = false;
5070 struct opcode opcode;
5071 u16 dummy;
5072 struct desc_struct desc;
5073
5074 ctxt->memop.type = OP_NONE;
5075 ctxt->memopp = NULL;
5076 ctxt->_eip = ctxt->eip;
5077 ctxt->fetch.ptr = ctxt->fetch.data;
5078 ctxt->fetch.end = ctxt->fetch.data + insn_len;
5079 ctxt->opcode_len = 1;
5080 ctxt->intercept = x86_intercept_none;
5081 if (insn_len > 0)
5082 memcpy(ctxt->fetch.data, insn, insn_len);
5083 else {
5084 rc = __do_insn_fetch_bytes(ctxt, 1);
5085 if (rc != X86EMUL_CONTINUE)
5086 goto done;
5087 }
5088
5089 switch (mode) {
5090 case X86EMUL_MODE_REAL:
5091 case X86EMUL_MODE_VM86:
5092 def_op_bytes = def_ad_bytes = 2;
5093 ctxt->ops->get_segment(ctxt, &dummy, &desc, NULL, VCPU_SREG_CS);
5094 if (desc.d)
5095 def_op_bytes = def_ad_bytes = 4;
5096 break;
5097 case X86EMUL_MODE_PROT16:
5098 def_op_bytes = def_ad_bytes = 2;
5099 break;
5100 case X86EMUL_MODE_PROT32:
5101 def_op_bytes = def_ad_bytes = 4;
5102 break;
5103 #ifdef CONFIG_X86_64
5104 case X86EMUL_MODE_PROT64:
5105 def_op_bytes = 4;
5106 def_ad_bytes = 8;
5107 break;
5108 #endif
5109 default:
5110 return EMULATION_FAILED;
5111 }
5112
5113 ctxt->op_bytes = def_op_bytes;
5114 ctxt->ad_bytes = def_ad_bytes;
5115
5116
5117 for (;;) {
5118 switch (ctxt->b = insn_fetch(u8, ctxt)) {
5119 case 0x66:
5120 op_prefix = true;
5121
5122 ctxt->op_bytes = def_op_bytes ^ 6;
5123 break;
5124 case 0x67:
5125 if (mode == X86EMUL_MODE_PROT64)
5126
5127 ctxt->ad_bytes = def_ad_bytes ^ 12;
5128 else
5129
5130 ctxt->ad_bytes = def_ad_bytes ^ 6;
5131 break;
5132 case 0x26:
5133 has_seg_override = true;
5134 ctxt->seg_override = VCPU_SREG_ES;
5135 break;
5136 case 0x2e:
5137 has_seg_override = true;
5138 ctxt->seg_override = VCPU_SREG_CS;
5139 break;
5140 case 0x36:
5141 has_seg_override = true;
5142 ctxt->seg_override = VCPU_SREG_SS;
5143 break;
5144 case 0x3e:
5145 has_seg_override = true;
5146 ctxt->seg_override = VCPU_SREG_DS;
5147 break;
5148 case 0x64:
5149 has_seg_override = true;
5150 ctxt->seg_override = VCPU_SREG_FS;
5151 break;
5152 case 0x65:
5153 has_seg_override = true;
5154 ctxt->seg_override = VCPU_SREG_GS;
5155 break;
5156 case 0x40 ... 0x4f:
5157 if (mode != X86EMUL_MODE_PROT64)
5158 goto done_prefixes;
5159 ctxt->rex_prefix = ctxt->b;
5160 continue;
5161 case 0xf0:
5162 ctxt->lock_prefix = 1;
5163 break;
5164 case 0xf2:
5165 case 0xf3:
5166 ctxt->rep_prefix = ctxt->b;
5167 break;
5168 default:
5169 goto done_prefixes;
5170 }
5171
5172
5173
5174 ctxt->rex_prefix = 0;
5175 }
5176
5177 done_prefixes:
5178
5179
5180 if (ctxt->rex_prefix & 8)
5181 ctxt->op_bytes = 8;
5182
5183
5184 opcode = opcode_table[ctxt->b];
5185
5186 if (ctxt->b == 0x0f) {
5187 ctxt->opcode_len = 2;
5188 ctxt->b = insn_fetch(u8, ctxt);
5189 opcode = twobyte_table[ctxt->b];
5190
5191
5192 if (ctxt->b == 0x38) {
5193 ctxt->opcode_len = 3;
5194 ctxt->b = insn_fetch(u8, ctxt);
5195 opcode = opcode_map_0f_38[ctxt->b];
5196 }
5197 }
5198 ctxt->d = opcode.flags;
5199
5200 if (ctxt->d & ModRM)
5201 ctxt->modrm = insn_fetch(u8, ctxt);
5202
5203
5204 if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
5205 (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
5206 ctxt->d = NotImpl;
5207 }
5208
5209 while (ctxt->d & GroupMask) {
5210 switch (ctxt->d & GroupMask) {
5211 case Group:
5212 goffset = (ctxt->modrm >> 3) & 7;
5213 opcode = opcode.u.group[goffset];
5214 break;
5215 case GroupDual:
5216 goffset = (ctxt->modrm >> 3) & 7;
5217 if ((ctxt->modrm >> 6) == 3)
5218 opcode = opcode.u.gdual->mod3[goffset];
5219 else
5220 opcode = opcode.u.gdual->mod012[goffset];
5221 break;
5222 case RMExt:
5223 goffset = ctxt->modrm & 7;
5224 opcode = opcode.u.group[goffset];
5225 break;
5226 case Prefix:
5227 if (ctxt->rep_prefix && op_prefix)
5228 return EMULATION_FAILED;
5229 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
5230 switch (simd_prefix) {
5231 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
5232 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
5233 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
5234 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
5235 }
5236 break;
5237 case Escape:
5238 if (ctxt->modrm > 0xbf) {
5239 size_t size = ARRAY_SIZE(opcode.u.esc->high);
5240 u32 index = array_index_nospec(
5241 ctxt->modrm - 0xc0, size);
5242
5243 opcode = opcode.u.esc->high[index];
5244 } else {
5245 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
5246 }
5247 break;
5248 case InstrDual:
5249 if ((ctxt->modrm >> 6) == 3)
5250 opcode = opcode.u.idual->mod3;
5251 else
5252 opcode = opcode.u.idual->mod012;
5253 break;
5254 case ModeDual:
5255 if (ctxt->mode == X86EMUL_MODE_PROT64)
5256 opcode = opcode.u.mdual->mode64;
5257 else
5258 opcode = opcode.u.mdual->mode32;
5259 break;
5260 default:
5261 return EMULATION_FAILED;
5262 }
5263
5264 ctxt->d &= ~(u64)GroupMask;
5265 ctxt->d |= opcode.flags;
5266 }
5267
5268 ctxt->is_branch = opcode.flags & IsBranch;
5269
5270
5271 if (ctxt->d == 0)
5272 return EMULATION_FAILED;
5273
5274 ctxt->execute = opcode.u.execute;
5275
5276 if (unlikely(emulation_type & EMULTYPE_TRAP_UD) &&
5277 likely(!(ctxt->d & EmulateOnUD)))
5278 return EMULATION_FAILED;
5279
5280 if (unlikely(ctxt->d &
5281 (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
5282 No16))) {
5283
5284
5285
5286
5287 ctxt->check_perm = opcode.check_perm;
5288 ctxt->intercept = opcode.intercept;
5289
5290 if (ctxt->d & NotImpl)
5291 return EMULATION_FAILED;
5292
5293 if (mode == X86EMUL_MODE_PROT64) {
5294 if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
5295 ctxt->op_bytes = 8;
5296 else if (ctxt->d & NearBranch)
5297 ctxt->op_bytes = 8;
5298 }
5299
5300 if (ctxt->d & Op3264) {
5301 if (mode == X86EMUL_MODE_PROT64)
5302 ctxt->op_bytes = 8;
5303 else
5304 ctxt->op_bytes = 4;
5305 }
5306
5307 if ((ctxt->d & No16) && ctxt->op_bytes == 2)
5308 ctxt->op_bytes = 4;
5309
5310 if (ctxt->d & Sse)
5311 ctxt->op_bytes = 16;
5312 else if (ctxt->d & Mmx)
5313 ctxt->op_bytes = 8;
5314 }
5315
5316
5317 if (ctxt->d & ModRM) {
5318 rc = decode_modrm(ctxt, &ctxt->memop);
5319 if (!has_seg_override) {
5320 has_seg_override = true;
5321 ctxt->seg_override = ctxt->modrm_seg;
5322 }
5323 } else if (ctxt->d & MemAbs)
5324 rc = decode_abs(ctxt, &ctxt->memop);
5325 if (rc != X86EMUL_CONTINUE)
5326 goto done;
5327
5328 if (!has_seg_override)
5329 ctxt->seg_override = VCPU_SREG_DS;
5330
5331 ctxt->memop.addr.mem.seg = ctxt->seg_override;
5332
5333
5334
5335
5336
5337 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
5338 if (rc != X86EMUL_CONTINUE)
5339 goto done;
5340
5341
5342
5343
5344
5345 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
5346 if (rc != X86EMUL_CONTINUE)
5347 goto done;
5348
5349
5350 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
5351
5352 if (ctxt->rip_relative && likely(ctxt->memopp))
5353 ctxt->memopp->addr.mem.ea = address_mask(ctxt,
5354 ctxt->memopp->addr.mem.ea + ctxt->_eip);
5355
5356 done:
5357 if (rc == X86EMUL_PROPAGATE_FAULT)
5358 ctxt->have_exception = true;
5359 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
5360 }
5361
5362 bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
5363 {
5364 return ctxt->d & PageTable;
5365 }
5366
5367 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
5368 {
5369
5370
5371
5372
5373
5374
5375
5376 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
5377 (ctxt->b == 0xae) || (ctxt->b == 0xaf))
5378 && (((ctxt->rep_prefix == REPE_PREFIX) &&
5379 ((ctxt->eflags & X86_EFLAGS_ZF) == 0))
5380 || ((ctxt->rep_prefix == REPNE_PREFIX) &&
5381 ((ctxt->eflags & X86_EFLAGS_ZF) == X86_EFLAGS_ZF))))
5382 return true;
5383
5384 return false;
5385 }
5386
5387 static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
5388 {
5389 int rc;
5390
5391 kvm_fpu_get();
5392 rc = asm_safe("fwait");
5393 kvm_fpu_put();
5394
5395 if (unlikely(rc != X86EMUL_CONTINUE))
5396 return emulate_exception(ctxt, MF_VECTOR, 0, false);
5397
5398 return X86EMUL_CONTINUE;
5399 }
5400
5401 static void fetch_possible_mmx_operand(struct operand *op)
5402 {
5403 if (op->type == OP_MM)
5404 kvm_read_mmx_reg(op->addr.mm, &op->mm_val);
5405 }
5406
5407 static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop)
5408 {
5409 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
5410
5411 if (!(ctxt->d & ByteOp))
5412 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
5413
5414 asm("push %[flags]; popf; " CALL_NOSPEC " ; pushf; pop %[flags]\n"
5415 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
5416 [thunk_target]"+S"(fop), ASM_CALL_CONSTRAINT
5417 : "c"(ctxt->src2.val));
5418
5419 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
5420 if (!fop)
5421 return emulate_de(ctxt);
5422 return X86EMUL_CONTINUE;
5423 }
5424
5425 void init_decode_cache(struct x86_emulate_ctxt *ctxt)
5426 {
5427
5428 ctxt->rip_relative = false;
5429 ctxt->rex_prefix = 0;
5430 ctxt->lock_prefix = 0;
5431 ctxt->rep_prefix = 0;
5432 ctxt->regs_valid = 0;
5433 ctxt->regs_dirty = 0;
5434
5435 ctxt->io_read.pos = 0;
5436 ctxt->io_read.end = 0;
5437 ctxt->mem_read.end = 0;
5438 }
5439
5440 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
5441 {
5442 const struct x86_emulate_ops *ops = ctxt->ops;
5443 int rc = X86EMUL_CONTINUE;
5444 int saved_dst_type = ctxt->dst.type;
5445 unsigned emul_flags;
5446
5447 ctxt->mem_read.pos = 0;
5448
5449
5450 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
5451 rc = emulate_ud(ctxt);
5452 goto done;
5453 }
5454
5455 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
5456 rc = emulate_ud(ctxt);
5457 goto done;
5458 }
5459
5460 emul_flags = ctxt->ops->get_hflags(ctxt);
5461 if (unlikely(ctxt->d &
5462 (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
5463 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
5464 (ctxt->d & Undefined)) {
5465 rc = emulate_ud(ctxt);
5466 goto done;
5467 }
5468
5469 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
5470 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
5471 rc = emulate_ud(ctxt);
5472 goto done;
5473 }
5474
5475 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
5476 rc = emulate_nm(ctxt);
5477 goto done;
5478 }
5479
5480 if (ctxt->d & Mmx) {
5481 rc = flush_pending_x87_faults(ctxt);
5482 if (rc != X86EMUL_CONTINUE)
5483 goto done;
5484
5485
5486
5487
5488 fetch_possible_mmx_operand(&ctxt->src);
5489 fetch_possible_mmx_operand(&ctxt->src2);
5490 if (!(ctxt->d & Mov))
5491 fetch_possible_mmx_operand(&ctxt->dst);
5492 }
5493
5494 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) {
5495 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5496 X86_ICPT_PRE_EXCEPT);
5497 if (rc != X86EMUL_CONTINUE)
5498 goto done;
5499 }
5500
5501
5502 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
5503 rc = emulate_ud(ctxt);
5504 goto done;
5505 }
5506
5507
5508 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
5509 if (ctxt->d & PrivUD)
5510 rc = emulate_ud(ctxt);
5511 else
5512 rc = emulate_gp(ctxt, 0);
5513 goto done;
5514 }
5515
5516
5517 if (ctxt->d & CheckPerm) {
5518 rc = ctxt->check_perm(ctxt);
5519 if (rc != X86EMUL_CONTINUE)
5520 goto done;
5521 }
5522
5523 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5524 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5525 X86_ICPT_POST_EXCEPT);
5526 if (rc != X86EMUL_CONTINUE)
5527 goto done;
5528 }
5529
5530 if (ctxt->rep_prefix && (ctxt->d & String)) {
5531
5532 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
5533 string_registers_quirk(ctxt);
5534 ctxt->eip = ctxt->_eip;
5535 ctxt->eflags &= ~X86_EFLAGS_RF;
5536 goto done;
5537 }
5538 }
5539 }
5540
5541 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
5542 rc = segmented_read(ctxt, ctxt->src.addr.mem,
5543 ctxt->src.valptr, ctxt->src.bytes);
5544 if (rc != X86EMUL_CONTINUE)
5545 goto done;
5546 ctxt->src.orig_val64 = ctxt->src.val64;
5547 }
5548
5549 if (ctxt->src2.type == OP_MEM) {
5550 rc = segmented_read(ctxt, ctxt->src2.addr.mem,
5551 &ctxt->src2.val, ctxt->src2.bytes);
5552 if (rc != X86EMUL_CONTINUE)
5553 goto done;
5554 }
5555
5556 if ((ctxt->d & DstMask) == ImplicitOps)
5557 goto special_insn;
5558
5559
5560 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
5561
5562 rc = segmented_read(ctxt, ctxt->dst.addr.mem,
5563 &ctxt->dst.val, ctxt->dst.bytes);
5564 if (rc != X86EMUL_CONTINUE) {
5565 if (!(ctxt->d & NoWrite) &&
5566 rc == X86EMUL_PROPAGATE_FAULT &&
5567 ctxt->exception.vector == PF_VECTOR)
5568 ctxt->exception.error_code |= PFERR_WRITE_MASK;
5569 goto done;
5570 }
5571 }
5572
5573 ctxt->dst.orig_val64 = ctxt->dst.val64;
5574
5575 special_insn:
5576
5577 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5578 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5579 X86_ICPT_POST_MEMACCESS);
5580 if (rc != X86EMUL_CONTINUE)
5581 goto done;
5582 }
5583
5584 if (ctxt->rep_prefix && (ctxt->d & String))
5585 ctxt->eflags |= X86_EFLAGS_RF;
5586 else
5587 ctxt->eflags &= ~X86_EFLAGS_RF;
5588
5589 if (ctxt->execute) {
5590 if (ctxt->d & Fastop)
5591 rc = fastop(ctxt, ctxt->fop);
5592 else
5593 rc = ctxt->execute(ctxt);
5594 if (rc != X86EMUL_CONTINUE)
5595 goto done;
5596 goto writeback;
5597 }
5598
5599 if (ctxt->opcode_len == 2)
5600 goto twobyte_insn;
5601 else if (ctxt->opcode_len == 3)
5602 goto threebyte_insn;
5603
5604 switch (ctxt->b) {
5605 case 0x70 ... 0x7f:
5606 if (test_cc(ctxt->b, ctxt->eflags))
5607 rc = jmp_rel(ctxt, ctxt->src.val);
5608 break;
5609 case 0x8d:
5610 ctxt->dst.val = ctxt->src.addr.mem.ea;
5611 break;
5612 case 0x90 ... 0x97:
5613 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
5614 ctxt->dst.type = OP_NONE;
5615 else
5616 rc = em_xchg(ctxt);
5617 break;
5618 case 0x98:
5619 switch (ctxt->op_bytes) {
5620 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
5621 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
5622 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
5623 }
5624 break;
5625 case 0xcc:
5626 rc = emulate_int(ctxt, 3);
5627 break;
5628 case 0xcd:
5629 rc = emulate_int(ctxt, ctxt->src.val);
5630 break;
5631 case 0xce:
5632 if (ctxt->eflags & X86_EFLAGS_OF)
5633 rc = emulate_int(ctxt, 4);
5634 break;
5635 case 0xe9:
5636 case 0xeb:
5637 rc = jmp_rel(ctxt, ctxt->src.val);
5638 ctxt->dst.type = OP_NONE;
5639 break;
5640 case 0xf4:
5641 ctxt->ops->halt(ctxt);
5642 break;
5643 case 0xf5:
5644
5645 ctxt->eflags ^= X86_EFLAGS_CF;
5646 break;
5647 case 0xf8:
5648 ctxt->eflags &= ~X86_EFLAGS_CF;
5649 break;
5650 case 0xf9:
5651 ctxt->eflags |= X86_EFLAGS_CF;
5652 break;
5653 case 0xfc:
5654 ctxt->eflags &= ~X86_EFLAGS_DF;
5655 break;
5656 case 0xfd:
5657 ctxt->eflags |= X86_EFLAGS_DF;
5658 break;
5659 default:
5660 goto cannot_emulate;
5661 }
5662
5663 if (rc != X86EMUL_CONTINUE)
5664 goto done;
5665
5666 writeback:
5667 if (ctxt->d & SrcWrite) {
5668 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
5669 rc = writeback(ctxt, &ctxt->src);
5670 if (rc != X86EMUL_CONTINUE)
5671 goto done;
5672 }
5673 if (!(ctxt->d & NoWrite)) {
5674 rc = writeback(ctxt, &ctxt->dst);
5675 if (rc != X86EMUL_CONTINUE)
5676 goto done;
5677 }
5678
5679
5680
5681
5682
5683 ctxt->dst.type = saved_dst_type;
5684
5685 if ((ctxt->d & SrcMask) == SrcSI)
5686 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
5687
5688 if ((ctxt->d & DstMask) == DstDI)
5689 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
5690
5691 if (ctxt->rep_prefix && (ctxt->d & String)) {
5692 unsigned int count;
5693 struct read_cache *r = &ctxt->io_read;
5694 if ((ctxt->d & SrcMask) == SrcSI)
5695 count = ctxt->src.count;
5696 else
5697 count = ctxt->dst.count;
5698 register_address_increment(ctxt, VCPU_REGS_RCX, -count);
5699
5700 if (!string_insn_completed(ctxt)) {
5701
5702
5703
5704
5705 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
5706 (r->end == 0 || r->end != r->pos)) {
5707
5708
5709
5710
5711
5712 ctxt->mem_read.end = 0;
5713 writeback_registers(ctxt);
5714 return EMULATION_RESTART;
5715 }
5716 goto done;
5717 }
5718 ctxt->eflags &= ~X86_EFLAGS_RF;
5719 }
5720
5721 ctxt->eip = ctxt->_eip;
5722 if (ctxt->mode != X86EMUL_MODE_PROT64)
5723 ctxt->eip = (u32)ctxt->_eip;
5724
5725 done:
5726 if (rc == X86EMUL_PROPAGATE_FAULT) {
5727 if (KVM_EMULATOR_BUG_ON(ctxt->exception.vector > 0x1f, ctxt))
5728 return EMULATION_FAILED;
5729 ctxt->have_exception = true;
5730 }
5731 if (rc == X86EMUL_INTERCEPTED)
5732 return EMULATION_INTERCEPTED;
5733
5734 if (rc == X86EMUL_CONTINUE)
5735 writeback_registers(ctxt);
5736
5737 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
5738
5739 twobyte_insn:
5740 switch (ctxt->b) {
5741 case 0x09:
5742 (ctxt->ops->wbinvd)(ctxt);
5743 break;
5744 case 0x08:
5745 case 0x0d:
5746 case 0x18:
5747 case 0x1f:
5748 break;
5749 case 0x20:
5750 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
5751 break;
5752 case 0x21:
5753 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
5754 break;
5755 case 0x40 ... 0x4f:
5756 if (test_cc(ctxt->b, ctxt->eflags))
5757 ctxt->dst.val = ctxt->src.val;
5758 else if (ctxt->op_bytes != 4)
5759 ctxt->dst.type = OP_NONE;
5760 break;
5761 case 0x80 ... 0x8f:
5762 if (test_cc(ctxt->b, ctxt->eflags))
5763 rc = jmp_rel(ctxt, ctxt->src.val);
5764 break;
5765 case 0x90 ... 0x9f:
5766 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
5767 break;
5768 case 0xb6 ... 0xb7:
5769 ctxt->dst.bytes = ctxt->op_bytes;
5770 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
5771 : (u16) ctxt->src.val;
5772 break;
5773 case 0xbe ... 0xbf:
5774 ctxt->dst.bytes = ctxt->op_bytes;
5775 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
5776 (s16) ctxt->src.val;
5777 break;
5778 default:
5779 goto cannot_emulate;
5780 }
5781
5782 threebyte_insn:
5783
5784 if (rc != X86EMUL_CONTINUE)
5785 goto done;
5786
5787 goto writeback;
5788
5789 cannot_emulate:
5790 return EMULATION_FAILED;
5791 }
5792
5793 void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5794 {
5795 invalidate_registers(ctxt);
5796 }
5797
5798 void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5799 {
5800 writeback_registers(ctxt);
5801 }
5802
5803 bool emulator_can_use_gpa(struct x86_emulate_ctxt *ctxt)
5804 {
5805 if (ctxt->rep_prefix && (ctxt->d & String))
5806 return false;
5807
5808 if (ctxt->d & TwoMemOp)
5809 return false;
5810
5811 return true;
5812 }