0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0033
0034 #include <linux/capability.h>
0035 #include <linux/errno.h>
0036 #include <linux/interrupt.h>
0037 #include <linux/syscalls.h>
0038 #include <linux/sched.h>
0039 #include <linux/sched/task_stack.h>
0040 #include <linux/kernel.h>
0041 #include <linux/signal.h>
0042 #include <linux/string.h>
0043 #include <linux/mm.h>
0044 #include <linux/smp.h>
0045 #include <linux/highmem.h>
0046 #include <linux/ptrace.h>
0047 #include <linux/audit.h>
0048 #include <linux/stddef.h>
0049 #include <linux/slab.h>
0050 #include <linux/security.h>
0051
0052 #include <linux/uaccess.h>
0053 #include <asm/io.h>
0054 #include <asm/tlbflush.h>
0055 #include <asm/irq.h>
0056 #include <asm/traps.h>
0057 #include <asm/vm86.h>
0058 #include <asm/switch_to.h>
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079 #define AL(regs) (((unsigned char *)&((regs)->pt.ax))[0])
0080 #define AH(regs) (((unsigned char *)&((regs)->pt.ax))[1])
0081 #define IP(regs) (*(unsigned short *)&((regs)->pt.ip))
0082 #define SP(regs) (*(unsigned short *)&((regs)->pt.sp))
0083
0084
0085
0086
0087 #define VFLAGS (*(unsigned short *)&(current->thread.vm86->veflags))
0088 #define VEFLAGS (current->thread.vm86->veflags)
0089
0090 #define set_flags(X, new, mask) \
0091 ((X) = ((X) & ~(mask)) | ((new) & (mask)))
0092
0093 #define SAFE_MASK (0xDD5)
0094 #define RETURN_MASK (0xDFF)
0095
0096 void save_v86_state(struct kernel_vm86_regs *regs, int retval)
0097 {
0098 struct task_struct *tsk = current;
0099 struct vm86plus_struct __user *user;
0100 struct vm86 *vm86 = current->thread.vm86;
0101
0102
0103
0104
0105
0106
0107 local_irq_enable();
0108
0109 BUG_ON(!vm86);
0110
0111 set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | vm86->veflags_mask);
0112 user = vm86->user_vm86;
0113
0114 if (!user_access_begin(user, vm86->vm86plus.is_vm86pus ?
0115 sizeof(struct vm86plus_struct) :
0116 sizeof(struct vm86_struct)))
0117 goto Efault;
0118
0119 unsafe_put_user(regs->pt.bx, &user->regs.ebx, Efault_end);
0120 unsafe_put_user(regs->pt.cx, &user->regs.ecx, Efault_end);
0121 unsafe_put_user(regs->pt.dx, &user->regs.edx, Efault_end);
0122 unsafe_put_user(regs->pt.si, &user->regs.esi, Efault_end);
0123 unsafe_put_user(regs->pt.di, &user->regs.edi, Efault_end);
0124 unsafe_put_user(regs->pt.bp, &user->regs.ebp, Efault_end);
0125 unsafe_put_user(regs->pt.ax, &user->regs.eax, Efault_end);
0126 unsafe_put_user(regs->pt.ip, &user->regs.eip, Efault_end);
0127 unsafe_put_user(regs->pt.cs, &user->regs.cs, Efault_end);
0128 unsafe_put_user(regs->pt.flags, &user->regs.eflags, Efault_end);
0129 unsafe_put_user(regs->pt.sp, &user->regs.esp, Efault_end);
0130 unsafe_put_user(regs->pt.ss, &user->regs.ss, Efault_end);
0131 unsafe_put_user(regs->es, &user->regs.es, Efault_end);
0132 unsafe_put_user(regs->ds, &user->regs.ds, Efault_end);
0133 unsafe_put_user(regs->fs, &user->regs.fs, Efault_end);
0134 unsafe_put_user(regs->gs, &user->regs.gs, Efault_end);
0135
0136
0137
0138
0139
0140
0141 user_access_end();
0142
0143 exit_vm86:
0144 preempt_disable();
0145 tsk->thread.sp0 = vm86->saved_sp0;
0146 tsk->thread.sysenter_cs = __KERNEL_CS;
0147 update_task_stack(tsk);
0148 refresh_sysenter_cs(&tsk->thread);
0149 vm86->saved_sp0 = 0;
0150 preempt_enable();
0151
0152 memcpy(®s->pt, &vm86->regs32, sizeof(struct pt_regs));
0153
0154 loadsegment(gs, vm86->regs32.gs);
0155
0156 regs->pt.ax = retval;
0157 return;
0158
0159 Efault_end:
0160 user_access_end();
0161 Efault:
0162 pr_alert("could not access userspace vm86 info\n");
0163 force_exit_sig(SIGSEGV);
0164 goto exit_vm86;
0165 }
0166
0167 static int do_vm86_irq_handling(int subfunction, int irqnumber);
0168 static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus);
0169
0170 SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, user_vm86)
0171 {
0172 return do_sys_vm86((struct vm86plus_struct __user *) user_vm86, false);
0173 }
0174
0175
0176 SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
0177 {
0178 switch (cmd) {
0179 case VM86_REQUEST_IRQ:
0180 case VM86_FREE_IRQ:
0181 case VM86_GET_IRQ_BITS:
0182 case VM86_GET_AND_RESET_IRQ:
0183 return do_vm86_irq_handling(cmd, (int)arg);
0184 case VM86_PLUS_INSTALL_CHECK:
0185
0186
0187
0188
0189
0190
0191 return 0;
0192 }
0193
0194
0195 return do_sys_vm86((struct vm86plus_struct __user *) arg, true);
0196 }
0197
0198
0199 static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus)
0200 {
0201 struct task_struct *tsk = current;
0202 struct vm86 *vm86 = tsk->thread.vm86;
0203 struct kernel_vm86_regs vm86regs;
0204 struct pt_regs *regs = current_pt_regs();
0205 unsigned long err = 0;
0206 struct vm86_struct v;
0207
0208 err = security_mmap_addr(0);
0209 if (err) {
0210
0211
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221
0222
0223
0224
0225
0226
0227
0228 pr_info_once("Denied a call to vm86(old) from %s[%d] (uid: %d). Set the vm.mmap_min_addr sysctl to 0 and/or adjust LSM mmap_min_addr policy to enable vm86 if you are using a vm86-based DOS emulator.\n",
0229 current->comm, task_pid_nr(current),
0230 from_kuid_munged(&init_user_ns, current_uid()));
0231 return -EPERM;
0232 }
0233
0234 if (!vm86) {
0235 if (!(vm86 = kzalloc(sizeof(*vm86), GFP_KERNEL)))
0236 return -ENOMEM;
0237 tsk->thread.vm86 = vm86;
0238 }
0239 if (vm86->saved_sp0)
0240 return -EPERM;
0241
0242 if (copy_from_user(&v, user_vm86,
0243 offsetof(struct vm86_struct, int_revectored)))
0244 return -EFAULT;
0245
0246
0247
0248 if (v.flags & VM86_SCREEN_BITMAP) {
0249 char comm[TASK_COMM_LEN];
0250
0251 pr_info_once("vm86: '%s' uses VM86_SCREEN_BITMAP, which is no longer supported\n", get_task_comm(comm, current));
0252 return -EINVAL;
0253 }
0254
0255 memset(&vm86regs, 0, sizeof(vm86regs));
0256
0257 vm86regs.pt.bx = v.regs.ebx;
0258 vm86regs.pt.cx = v.regs.ecx;
0259 vm86regs.pt.dx = v.regs.edx;
0260 vm86regs.pt.si = v.regs.esi;
0261 vm86regs.pt.di = v.regs.edi;
0262 vm86regs.pt.bp = v.regs.ebp;
0263 vm86regs.pt.ax = v.regs.eax;
0264 vm86regs.pt.ip = v.regs.eip;
0265 vm86regs.pt.cs = v.regs.cs;
0266 vm86regs.pt.flags = v.regs.eflags;
0267 vm86regs.pt.sp = v.regs.esp;
0268 vm86regs.pt.ss = v.regs.ss;
0269 vm86regs.es = v.regs.es;
0270 vm86regs.ds = v.regs.ds;
0271 vm86regs.fs = v.regs.fs;
0272 vm86regs.gs = v.regs.gs;
0273
0274 vm86->flags = v.flags;
0275 vm86->cpu_type = v.cpu_type;
0276
0277 if (copy_from_user(&vm86->int_revectored,
0278 &user_vm86->int_revectored,
0279 sizeof(struct revectored_struct)))
0280 return -EFAULT;
0281 if (copy_from_user(&vm86->int21_revectored,
0282 &user_vm86->int21_revectored,
0283 sizeof(struct revectored_struct)))
0284 return -EFAULT;
0285 if (plus) {
0286 if (copy_from_user(&vm86->vm86plus, &user_vm86->vm86plus,
0287 sizeof(struct vm86plus_info_struct)))
0288 return -EFAULT;
0289 vm86->vm86plus.is_vm86pus = 1;
0290 } else
0291 memset(&vm86->vm86plus, 0,
0292 sizeof(struct vm86plus_info_struct));
0293
0294 memcpy(&vm86->regs32, regs, sizeof(struct pt_regs));
0295 vm86->user_vm86 = user_vm86;
0296
0297
0298
0299
0300
0301
0302 VEFLAGS = vm86regs.pt.flags;
0303 vm86regs.pt.flags &= SAFE_MASK;
0304 vm86regs.pt.flags |= regs->flags & ~SAFE_MASK;
0305 vm86regs.pt.flags |= X86_VM_MASK;
0306
0307 vm86regs.pt.orig_ax = regs->orig_ax;
0308
0309 switch (vm86->cpu_type) {
0310 case CPU_286:
0311 vm86->veflags_mask = 0;
0312 break;
0313 case CPU_386:
0314 vm86->veflags_mask = X86_EFLAGS_NT | X86_EFLAGS_IOPL;
0315 break;
0316 case CPU_486:
0317 vm86->veflags_mask = X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL;
0318 break;
0319 default:
0320 vm86->veflags_mask = X86_EFLAGS_ID | X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL;
0321 break;
0322 }
0323
0324
0325
0326
0327 vm86->saved_sp0 = tsk->thread.sp0;
0328 savesegment(gs, vm86->regs32.gs);
0329
0330
0331 preempt_disable();
0332 tsk->thread.sp0 += 16;
0333
0334 if (boot_cpu_has(X86_FEATURE_SEP)) {
0335 tsk->thread.sysenter_cs = 0;
0336 refresh_sysenter_cs(&tsk->thread);
0337 }
0338
0339 update_task_stack(tsk);
0340 preempt_enable();
0341
0342 memcpy((struct kernel_vm86_regs *)regs, &vm86regs, sizeof(vm86regs));
0343 return regs->ax;
0344 }
0345
0346 static inline void set_IF(struct kernel_vm86_regs *regs)
0347 {
0348 VEFLAGS |= X86_EFLAGS_VIF;
0349 }
0350
0351 static inline void clear_IF(struct kernel_vm86_regs *regs)
0352 {
0353 VEFLAGS &= ~X86_EFLAGS_VIF;
0354 }
0355
0356 static inline void clear_TF(struct kernel_vm86_regs *regs)
0357 {
0358 regs->pt.flags &= ~X86_EFLAGS_TF;
0359 }
0360
0361 static inline void clear_AC(struct kernel_vm86_regs *regs)
0362 {
0363 regs->pt.flags &= ~X86_EFLAGS_AC;
0364 }
0365
0366
0367
0368
0369
0370
0371
0372
0373
0374
0375
0376
0377
0378 static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs *regs)
0379 {
0380 set_flags(VEFLAGS, flags, current->thread.vm86->veflags_mask);
0381 set_flags(regs->pt.flags, flags, SAFE_MASK);
0382 if (flags & X86_EFLAGS_IF)
0383 set_IF(regs);
0384 else
0385 clear_IF(regs);
0386 }
0387
0388 static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs *regs)
0389 {
0390 set_flags(VFLAGS, flags, current->thread.vm86->veflags_mask);
0391 set_flags(regs->pt.flags, flags, SAFE_MASK);
0392 if (flags & X86_EFLAGS_IF)
0393 set_IF(regs);
0394 else
0395 clear_IF(regs);
0396 }
0397
0398 static inline unsigned long get_vflags(struct kernel_vm86_regs *regs)
0399 {
0400 unsigned long flags = regs->pt.flags & RETURN_MASK;
0401
0402 if (VEFLAGS & X86_EFLAGS_VIF)
0403 flags |= X86_EFLAGS_IF;
0404 flags |= X86_EFLAGS_IOPL;
0405 return flags | (VEFLAGS & current->thread.vm86->veflags_mask);
0406 }
0407
0408 static inline int is_revectored(int nr, struct revectored_struct *bitmap)
0409 {
0410 return test_bit(nr, bitmap->__map);
0411 }
0412
0413 #define val_byte(val, n) (((__u8 *)&val)[n])
0414
0415 #define pushb(base, ptr, val, err_label) \
0416 do { \
0417 __u8 __val = val; \
0418 ptr--; \
0419 if (put_user(__val, base + ptr) < 0) \
0420 goto err_label; \
0421 } while (0)
0422
0423 #define pushw(base, ptr, val, err_label) \
0424 do { \
0425 __u16 __val = val; \
0426 ptr--; \
0427 if (put_user(val_byte(__val, 1), base + ptr) < 0) \
0428 goto err_label; \
0429 ptr--; \
0430 if (put_user(val_byte(__val, 0), base + ptr) < 0) \
0431 goto err_label; \
0432 } while (0)
0433
0434 #define pushl(base, ptr, val, err_label) \
0435 do { \
0436 __u32 __val = val; \
0437 ptr--; \
0438 if (put_user(val_byte(__val, 3), base + ptr) < 0) \
0439 goto err_label; \
0440 ptr--; \
0441 if (put_user(val_byte(__val, 2), base + ptr) < 0) \
0442 goto err_label; \
0443 ptr--; \
0444 if (put_user(val_byte(__val, 1), base + ptr) < 0) \
0445 goto err_label; \
0446 ptr--; \
0447 if (put_user(val_byte(__val, 0), base + ptr) < 0) \
0448 goto err_label; \
0449 } while (0)
0450
0451 #define popb(base, ptr, err_label) \
0452 ({ \
0453 __u8 __res; \
0454 if (get_user(__res, base + ptr) < 0) \
0455 goto err_label; \
0456 ptr++; \
0457 __res; \
0458 })
0459
0460 #define popw(base, ptr, err_label) \
0461 ({ \
0462 __u16 __res; \
0463 if (get_user(val_byte(__res, 0), base + ptr) < 0) \
0464 goto err_label; \
0465 ptr++; \
0466 if (get_user(val_byte(__res, 1), base + ptr) < 0) \
0467 goto err_label; \
0468 ptr++; \
0469 __res; \
0470 })
0471
0472 #define popl(base, ptr, err_label) \
0473 ({ \
0474 __u32 __res; \
0475 if (get_user(val_byte(__res, 0), base + ptr) < 0) \
0476 goto err_label; \
0477 ptr++; \
0478 if (get_user(val_byte(__res, 1), base + ptr) < 0) \
0479 goto err_label; \
0480 ptr++; \
0481 if (get_user(val_byte(__res, 2), base + ptr) < 0) \
0482 goto err_label; \
0483 ptr++; \
0484 if (get_user(val_byte(__res, 3), base + ptr) < 0) \
0485 goto err_label; \
0486 ptr++; \
0487 __res; \
0488 })
0489
0490
0491
0492
0493
0494
0495 static void do_int(struct kernel_vm86_regs *regs, int i,
0496 unsigned char __user *ssp, unsigned short sp)
0497 {
0498 unsigned long __user *intr_ptr;
0499 unsigned long segoffs;
0500 struct vm86 *vm86 = current->thread.vm86;
0501
0502 if (regs->pt.cs == BIOSSEG)
0503 goto cannot_handle;
0504 if (is_revectored(i, &vm86->int_revectored))
0505 goto cannot_handle;
0506 if (i == 0x21 && is_revectored(AH(regs), &vm86->int21_revectored))
0507 goto cannot_handle;
0508 intr_ptr = (unsigned long __user *) (i << 2);
0509 if (get_user(segoffs, intr_ptr))
0510 goto cannot_handle;
0511 if ((segoffs >> 16) == BIOSSEG)
0512 goto cannot_handle;
0513 pushw(ssp, sp, get_vflags(regs), cannot_handle);
0514 pushw(ssp, sp, regs->pt.cs, cannot_handle);
0515 pushw(ssp, sp, IP(regs), cannot_handle);
0516 regs->pt.cs = segoffs >> 16;
0517 SP(regs) -= 6;
0518 IP(regs) = segoffs & 0xffff;
0519 clear_TF(regs);
0520 clear_IF(regs);
0521 clear_AC(regs);
0522 return;
0523
0524 cannot_handle:
0525 save_v86_state(regs, VM86_INTx + (i << 8));
0526 }
0527
0528 int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno)
0529 {
0530 struct vm86 *vm86 = current->thread.vm86;
0531
0532 if (vm86->vm86plus.is_vm86pus) {
0533 if ((trapno == 3) || (trapno == 1)) {
0534 save_v86_state(regs, VM86_TRAP + (trapno << 8));
0535 return 0;
0536 }
0537 do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs));
0538 return 0;
0539 }
0540 if (trapno != 1)
0541 return 1;
0542 current->thread.trap_nr = trapno;
0543 current->thread.error_code = error_code;
0544 force_sig(SIGTRAP);
0545 return 0;
0546 }
0547
0548 void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code)
0549 {
0550 unsigned char opcode;
0551 unsigned char __user *csp;
0552 unsigned char __user *ssp;
0553 unsigned short ip, sp, orig_flags;
0554 int data32, pref_done;
0555 struct vm86plus_info_struct *vmpi = ¤t->thread.vm86->vm86plus;
0556
0557 #define CHECK_IF_IN_TRAP \
0558 if (vmpi->vm86dbg_active && vmpi->vm86dbg_TFpendig) \
0559 newflags |= X86_EFLAGS_TF
0560
0561 orig_flags = *(unsigned short *)®s->pt.flags;
0562
0563 csp = (unsigned char __user *) (regs->pt.cs << 4);
0564 ssp = (unsigned char __user *) (regs->pt.ss << 4);
0565 sp = SP(regs);
0566 ip = IP(regs);
0567
0568 data32 = 0;
0569 pref_done = 0;
0570 do {
0571 switch (opcode = popb(csp, ip, simulate_sigsegv)) {
0572 case 0x66: data32 = 1; break;
0573 case 0x67: break;
0574 case 0x2e: break;
0575 case 0x3e: break;
0576 case 0x26: break;
0577 case 0x36: break;
0578 case 0x65: break;
0579 case 0x64: break;
0580 case 0xf2: break;
0581 case 0xf3: break;
0582 default: pref_done = 1;
0583 }
0584 } while (!pref_done);
0585
0586 switch (opcode) {
0587
0588
0589 case 0x9c:
0590 if (data32) {
0591 pushl(ssp, sp, get_vflags(regs), simulate_sigsegv);
0592 SP(regs) -= 4;
0593 } else {
0594 pushw(ssp, sp, get_vflags(regs), simulate_sigsegv);
0595 SP(regs) -= 2;
0596 }
0597 IP(regs) = ip;
0598 goto vm86_fault_return;
0599
0600
0601 case 0x9d:
0602 {
0603 unsigned long newflags;
0604 if (data32) {
0605 newflags = popl(ssp, sp, simulate_sigsegv);
0606 SP(regs) += 4;
0607 } else {
0608 newflags = popw(ssp, sp, simulate_sigsegv);
0609 SP(regs) += 2;
0610 }
0611 IP(regs) = ip;
0612 CHECK_IF_IN_TRAP;
0613 if (data32)
0614 set_vflags_long(newflags, regs);
0615 else
0616 set_vflags_short(newflags, regs);
0617
0618 goto check_vip;
0619 }
0620
0621
0622 case 0xcd: {
0623 int intno = popb(csp, ip, simulate_sigsegv);
0624 IP(regs) = ip;
0625 if (vmpi->vm86dbg_active) {
0626 if ((1 << (intno & 7)) & vmpi->vm86dbg_intxxtab[intno >> 3]) {
0627 save_v86_state(regs, VM86_INTx + (intno << 8));
0628 return;
0629 }
0630 }
0631 do_int(regs, intno, ssp, sp);
0632 return;
0633 }
0634
0635
0636 case 0xcf:
0637 {
0638 unsigned long newip;
0639 unsigned long newcs;
0640 unsigned long newflags;
0641 if (data32) {
0642 newip = popl(ssp, sp, simulate_sigsegv);
0643 newcs = popl(ssp, sp, simulate_sigsegv);
0644 newflags = popl(ssp, sp, simulate_sigsegv);
0645 SP(regs) += 12;
0646 } else {
0647 newip = popw(ssp, sp, simulate_sigsegv);
0648 newcs = popw(ssp, sp, simulate_sigsegv);
0649 newflags = popw(ssp, sp, simulate_sigsegv);
0650 SP(regs) += 6;
0651 }
0652 IP(regs) = newip;
0653 regs->pt.cs = newcs;
0654 CHECK_IF_IN_TRAP;
0655 if (data32) {
0656 set_vflags_long(newflags, regs);
0657 } else {
0658 set_vflags_short(newflags, regs);
0659 }
0660 goto check_vip;
0661 }
0662
0663
0664 case 0xfa:
0665 IP(regs) = ip;
0666 clear_IF(regs);
0667 goto vm86_fault_return;
0668
0669
0670
0671
0672
0673
0674
0675
0676 case 0xfb:
0677 IP(regs) = ip;
0678 set_IF(regs);
0679 goto check_vip;
0680
0681 default:
0682 save_v86_state(regs, VM86_UNKNOWN);
0683 }
0684
0685 return;
0686
0687 check_vip:
0688 if ((VEFLAGS & (X86_EFLAGS_VIP | X86_EFLAGS_VIF)) ==
0689 (X86_EFLAGS_VIP | X86_EFLAGS_VIF)) {
0690 save_v86_state(regs, VM86_STI);
0691 return;
0692 }
0693
0694 vm86_fault_return:
0695 if (vmpi->force_return_for_pic && (VEFLAGS & (X86_EFLAGS_IF | X86_EFLAGS_VIF))) {
0696 save_v86_state(regs, VM86_PICRETURN);
0697 return;
0698 }
0699 if (orig_flags & X86_EFLAGS_TF)
0700 handle_vm86_trap(regs, 0, X86_TRAP_DB);
0701 return;
0702
0703 simulate_sigsegv:
0704
0705
0706
0707
0708
0709
0710
0711
0712
0713
0714 save_v86_state(regs, VM86_UNKNOWN);
0715 }
0716
0717
0718
0719 #define VM86_IRQNAME "vm86irq"
0720
0721 static struct vm86_irqs {
0722 struct task_struct *tsk;
0723 int sig;
0724 } vm86_irqs[16];
0725
0726 static DEFINE_SPINLOCK(irqbits_lock);
0727 static int irqbits;
0728
0729 #define ALLOWED_SIGS (1 \
0730 | (1 << SIGUSR1) | (1 << SIGUSR2) | (1 << SIGIO) | (1 << SIGURG) \
0731 | (1 << SIGUNUSED))
0732
0733 static irqreturn_t irq_handler(int intno, void *dev_id)
0734 {
0735 int irq_bit;
0736 unsigned long flags;
0737
0738 spin_lock_irqsave(&irqbits_lock, flags);
0739 irq_bit = 1 << intno;
0740 if ((irqbits & irq_bit) || !vm86_irqs[intno].tsk)
0741 goto out;
0742 irqbits |= irq_bit;
0743 if (vm86_irqs[intno].sig)
0744 send_sig(vm86_irqs[intno].sig, vm86_irqs[intno].tsk, 1);
0745
0746
0747
0748
0749 disable_irq_nosync(intno);
0750 spin_unlock_irqrestore(&irqbits_lock, flags);
0751 return IRQ_HANDLED;
0752
0753 out:
0754 spin_unlock_irqrestore(&irqbits_lock, flags);
0755 return IRQ_NONE;
0756 }
0757
0758 static inline void free_vm86_irq(int irqnumber)
0759 {
0760 unsigned long flags;
0761
0762 free_irq(irqnumber, NULL);
0763 vm86_irqs[irqnumber].tsk = NULL;
0764
0765 spin_lock_irqsave(&irqbits_lock, flags);
0766 irqbits &= ~(1 << irqnumber);
0767 spin_unlock_irqrestore(&irqbits_lock, flags);
0768 }
0769
0770 void release_vm86_irqs(struct task_struct *task)
0771 {
0772 int i;
0773 for (i = FIRST_VM86_IRQ ; i <= LAST_VM86_IRQ; i++)
0774 if (vm86_irqs[i].tsk == task)
0775 free_vm86_irq(i);
0776 }
0777
0778 static inline int get_and_reset_irq(int irqnumber)
0779 {
0780 int bit;
0781 unsigned long flags;
0782 int ret = 0;
0783
0784 if (invalid_vm86_irq(irqnumber)) return 0;
0785 if (vm86_irqs[irqnumber].tsk != current) return 0;
0786 spin_lock_irqsave(&irqbits_lock, flags);
0787 bit = irqbits & (1 << irqnumber);
0788 irqbits &= ~bit;
0789 if (bit) {
0790 enable_irq(irqnumber);
0791 ret = 1;
0792 }
0793
0794 spin_unlock_irqrestore(&irqbits_lock, flags);
0795 return ret;
0796 }
0797
0798
0799 static int do_vm86_irq_handling(int subfunction, int irqnumber)
0800 {
0801 int ret;
0802 switch (subfunction) {
0803 case VM86_GET_AND_RESET_IRQ: {
0804 return get_and_reset_irq(irqnumber);
0805 }
0806 case VM86_GET_IRQ_BITS: {
0807 return irqbits;
0808 }
0809 case VM86_REQUEST_IRQ: {
0810 int sig = irqnumber >> 8;
0811 int irq = irqnumber & 255;
0812 if (!capable(CAP_SYS_ADMIN)) return -EPERM;
0813 if (!((1 << sig) & ALLOWED_SIGS)) return -EPERM;
0814 if (invalid_vm86_irq(irq)) return -EPERM;
0815 if (vm86_irqs[irq].tsk) return -EPERM;
0816 ret = request_irq(irq, &irq_handler, 0, VM86_IRQNAME, NULL);
0817 if (ret) return ret;
0818 vm86_irqs[irq].sig = sig;
0819 vm86_irqs[irq].tsk = current;
0820 return irq;
0821 }
0822 case VM86_FREE_IRQ: {
0823 if (invalid_vm86_irq(irqnumber)) return -EPERM;
0824 if (!vm86_irqs[irqnumber].tsk) return 0;
0825 if (vm86_irqs[irqnumber].tsk != current) return -EPERM;
0826 free_vm86_irq(irqnumber);
0827 return 0;
0828 }
0829 }
0830 return -EINVAL;
0831 }
0832