0001
0002 #ifndef _ASM_X86_VM86_H
0003 #define _ASM_X86_VM86_H
0004
0005 #include <asm/ptrace.h>
0006 #include <uapi/asm/vm86.h>
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017 struct kernel_vm86_regs {
0018
0019
0020
0021 struct pt_regs pt;
0022
0023
0024
0025 unsigned short es, __esh;
0026 unsigned short ds, __dsh;
0027 unsigned short fs, __fsh;
0028 unsigned short gs, __gsh;
0029 };
0030
0031 struct vm86 {
0032 struct vm86plus_struct __user *user_vm86;
0033 struct pt_regs regs32;
0034 unsigned long veflags;
0035 unsigned long veflags_mask;
0036 unsigned long saved_sp0;
0037
0038 unsigned long flags;
0039 unsigned long cpu_type;
0040 struct revectored_struct int_revectored;
0041 struct revectored_struct int21_revectored;
0042 struct vm86plus_info_struct vm86plus;
0043 };
0044
0045 #ifdef CONFIG_VM86
0046
0047 void handle_vm86_fault(struct kernel_vm86_regs *, long);
0048 int handle_vm86_trap(struct kernel_vm86_regs *, long, int);
0049 void save_v86_state(struct kernel_vm86_regs *, int);
0050
0051 struct task_struct;
0052
0053 #define free_vm86(t) do { \
0054 struct thread_struct *__t = (t); \
0055 if (__t->vm86 != NULL) { \
0056 kfree(__t->vm86); \
0057 __t->vm86 = NULL; \
0058 } \
0059 } while (0)
0060
0061
0062
0063
0064
0065 #define FIRST_VM86_IRQ 3
0066 #define LAST_VM86_IRQ 15
0067
0068 static inline int invalid_vm86_irq(int irq)
0069 {
0070 return irq < FIRST_VM86_IRQ || irq > LAST_VM86_IRQ;
0071 }
0072
0073 void release_vm86_irqs(struct task_struct *);
0074
0075 #else
0076
0077 #define handle_vm86_fault(a, b)
0078 #define release_vm86_irqs(a)
0079
0080 static inline int handle_vm86_trap(struct kernel_vm86_regs *a, long b, int c)
0081 {
0082 return 0;
0083 }
0084
0085 static inline void save_v86_state(struct kernel_vm86_regs *a, int b) { }
0086
0087 #define free_vm86(t) do { } while(0)
0088
0089 #endif
0090
0091 #endif