2 * Copyright (C) 1994 Linus Torvalds
4 * 29 dec 2001 - Fixed oopses caused by unchecked access to the vm86
5 * stack - Manfred Spraul <manfred@colorfullife.com>
7 * 22 mar 2002 - Manfred detected the stackfaults, but didn't handle
8 * them correctly. Now the emulation will be in a
9 * consistent state after stackfaults - Kasper Dupont
10 * <kasperd@daimi.au.dk>
12 * 22 mar 2002 - Added missing clear_IF in set_vflags_* Kasper Dupont
13 * <kasperd@daimi.au.dk>
15 * ?? ??? 2002 - Fixed premature returns from handle_vm86_fault
16 * caused by Kasper Dupont's changes - Stas Sergeev
18 * 4 apr 2002 - Fixed CHECK_IF_IN_TRAP broken by Stas' changes.
19 * Kasper Dupont <kasperd@daimi.au.dk>
21 * 9 apr 2002 - Changed syntax of macros in handle_vm86_fault.
22 * Kasper Dupont <kasperd@daimi.au.dk>
24 * 9 apr 2002 - Changed stack access macros to jump to a label
25 * instead of returning to userspace. This simplifies
26 * do_int, and is needed by handle_vm6_fault. Kasper
27 * Dupont <kasperd@daimi.au.dk>
31 #include <linux/capability.h>
32 #include <linux/errno.h>
33 #include <linux/interrupt.h>
34 #include <linux/sched.h>
35 #include <linux/kernel.h>
36 #include <linux/signal.h>
37 #include <linux/string.h>
39 #include <linux/smp.h>
40 #include <linux/highmem.h>
41 #include <linux/ptrace.h>
42 #include <linux/audit.h>
43 #include <linux/stddef.h>
45 #include <asm/uaccess.h>
47 #include <asm/tlbflush.h>
53 * Interrupt handling is not guaranteed:
54 * - a real x86 will disable all interrupts for one instruction
55 * after a "mov ss,xx" to make stack handling atomic even without
56 * the 'lss' instruction. We can't guarantee this in v86 mode,
57 * as the next instruction might result in a page fault or similar.
58 * - a real x86 will have interrupts disabled for one instruction
59 * past the 'sti' that enables them. We don't bother with all the
62 * Let's hope these problems do not actually matter for anything.
66 #define KVM86 ((struct kernel_vm86_struct *)regs)
67 #define VMPI KVM86->vm86plus
71 * 8- and 16-bit register defines..
73 #define AL(regs) (((unsigned char *)&((regs)->pt.ax))[0])
74 #define AH(regs) (((unsigned char *)&((regs)->pt.ax))[1])
75 #define IP(regs) (*(unsigned short *)&((regs)->pt.ip))
76 #define SP(regs) (*(unsigned short *)&((regs)->pt.sp))
79 * virtual flags (16 and 32-bit versions)
81 #define VFLAGS (*(unsigned short *)&(current->thread.v86flags))
82 #define VEFLAGS (current->thread.v86flags)
84 #define set_flags(X, new, mask) \
85 ((X) = ((X) & ~(mask)) | ((new) & (mask)))
87 #define SAFE_MASK (0xDD5)
88 #define RETURN_MASK (0xDFF)
90 /* convert kernel_vm86_regs to vm86_regs */
91 static int copy_vm86_regs_to_user(struct vm86_regs __user
*user
,
92 const struct kernel_vm86_regs
*regs
)
97 * kernel_vm86_regs is missing gs, so copy everything up to
98 * (but not including) orig_eax, and then rest including orig_eax.
100 ret
+= copy_to_user(user
, regs
, offsetof(struct kernel_vm86_regs
, pt
.orig_ax
));
101 ret
+= copy_to_user(&user
->orig_eax
, ®s
->pt
.orig_ax
,
102 sizeof(struct kernel_vm86_regs
) -
103 offsetof(struct kernel_vm86_regs
, pt
.orig_ax
));
108 /* convert vm86_regs to kernel_vm86_regs */
109 static int copy_vm86_regs_from_user(struct kernel_vm86_regs
*regs
,
110 const struct vm86_regs __user
*user
,
115 /* copy ax-fs inclusive */
116 ret
+= copy_from_user(regs
, user
, offsetof(struct kernel_vm86_regs
, pt
.orig_ax
));
117 /* copy orig_ax-__gsh+extra */
118 ret
+= copy_from_user(®s
->pt
.orig_ax
, &user
->orig_eax
,
119 sizeof(struct kernel_vm86_regs
) -
120 offsetof(struct kernel_vm86_regs
, pt
.orig_ax
) +
125 struct pt_regs
*save_v86_state(struct kernel_vm86_regs
*regs
)
127 struct tss_struct
*tss
;
132 * This gets called from entry.S with interrupts disabled, but
133 * from process context. Enable interrupts here, before trying
134 * to access user space.
138 if (!current
->thread
.vm86_info
) {
139 printk("no vm86_info: BAD\n");
142 set_flags(regs
->pt
.flags
, VEFLAGS
, X86_EFLAGS_VIF
| current
->thread
.v86mask
);
143 tmp
= copy_vm86_regs_to_user(¤t
->thread
.vm86_info
->regs
, regs
);
144 tmp
+= put_user(current
->thread
.screen_bitmap
, ¤t
->thread
.vm86_info
->screen_bitmap
);
146 printk("vm86: could not access userspace vm86_info\n");
150 tss
= &per_cpu(init_tss
, get_cpu());
151 current
->thread
.sp0
= current
->thread
.saved_sp0
;
152 current
->thread
.sysenter_cs
= __KERNEL_CS
;
153 load_sp0(tss
, ¤t
->thread
);
154 current
->thread
.saved_sp0
= 0;
159 ret
->fs
= current
->thread
.saved_fs
;
160 loadsegment(gs
, current
->thread
.saved_gs
);
165 static void mark_screen_rdonly(struct mm_struct
*mm
)
174 pgd
= pgd_offset(mm
, 0xA0000);
175 if (pgd_none_or_clear_bad(pgd
))
177 pud
= pud_offset(pgd
, 0xA0000);
178 if (pud_none_or_clear_bad(pud
))
180 pmd
= pmd_offset(pud
, 0xA0000);
181 if (pmd_none_or_clear_bad(pmd
))
183 pte
= pte_offset_map_lock(mm
, pmd
, 0xA0000, &ptl
);
184 for (i
= 0; i
< 32; i
++) {
185 if (pte_present(*pte
))
186 set_pte(pte
, pte_wrprotect(*pte
));
189 pte_unmap_unlock(pte
, ptl
);
196 static int do_vm86_irq_handling(int subfunction
, int irqnumber
);
197 static void do_sys_vm86(struct kernel_vm86_struct
*info
, struct task_struct
*tsk
);
199 asmlinkage
int sys_vm86old(struct pt_regs regs
)
201 struct vm86_struct __user
*v86
= (struct vm86_struct __user
*)regs
.bx
;
202 struct kernel_vm86_struct info
; /* declare this _on top_,
203 * this avoids wasting of stack space.
204 * This remains on the stack until we
205 * return to 32 bit user space.
207 struct task_struct
*tsk
;
208 int tmp
, ret
= -EPERM
;
211 if (tsk
->thread
.saved_sp0
)
213 tmp
= copy_vm86_regs_from_user(&info
.regs
, &v86
->regs
,
214 offsetof(struct kernel_vm86_struct
, vm86plus
) -
219 memset(&info
.vm86plus
, 0, (int)&info
.regs32
- (int)&info
.vm86plus
);
221 tsk
->thread
.vm86_info
= v86
;
222 do_sys_vm86(&info
, tsk
);
223 ret
= 0; /* we never return here */
229 asmlinkage
int sys_vm86(struct pt_regs regs
)
231 struct kernel_vm86_struct info
; /* declare this _on top_,
232 * this avoids wasting of stack space.
233 * This remains on the stack until we
234 * return to 32 bit user space.
236 struct task_struct
*tsk
;
238 struct vm86plus_struct __user
*v86
;
242 case VM86_REQUEST_IRQ
:
244 case VM86_GET_IRQ_BITS
:
245 case VM86_GET_AND_RESET_IRQ
:
246 ret
= do_vm86_irq_handling(regs
.bx
, (int)regs
.cx
);
248 case VM86_PLUS_INSTALL_CHECK
:
250 * NOTE: on old vm86 stuff this will return the error
251 * from access_ok(), because the subfunction is
252 * interpreted as (invalid) address to vm86_struct.
253 * So the installation check works.
259 /* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */
261 if (tsk
->thread
.saved_sp0
)
263 v86
= (struct vm86plus_struct __user
*)regs
.cx
;
264 tmp
= copy_vm86_regs_from_user(&info
.regs
, &v86
->regs
,
265 offsetof(struct kernel_vm86_struct
, regs32
) -
271 info
.vm86plus
.is_vm86pus
= 1;
272 tsk
->thread
.vm86_info
= (struct vm86_struct __user
*)v86
;
273 do_sys_vm86(&info
, tsk
);
274 ret
= 0; /* we never return here */
280 static void do_sys_vm86(struct kernel_vm86_struct
*info
, struct task_struct
*tsk
)
282 struct tss_struct
*tss
;
284 * make sure the vm86() system call doesn't try to do anything silly
286 info
->regs
.pt
.ds
= 0;
287 info
->regs
.pt
.es
= 0;
288 info
->regs
.pt
.fs
= 0;
290 /* we are clearing gs later just before "jmp resume_userspace",
291 * because it is not saved/restored.
295 * The flags register is also special: we cannot trust that the user
296 * has set it up safely, so this makes sure interrupt etc flags are
297 * inherited from protected mode.
299 VEFLAGS
= info
->regs
.pt
.flags
;
300 info
->regs
.pt
.flags
&= SAFE_MASK
;
301 info
->regs
.pt
.flags
|= info
->regs32
->flags
& ~SAFE_MASK
;
302 info
->regs
.pt
.flags
|= X86_VM_MASK
;
304 switch (info
->cpu_type
) {
306 tsk
->thread
.v86mask
= 0;
309 tsk
->thread
.v86mask
= X86_EFLAGS_NT
| X86_EFLAGS_IOPL
;
312 tsk
->thread
.v86mask
= X86_EFLAGS_AC
| X86_EFLAGS_NT
| X86_EFLAGS_IOPL
;
315 tsk
->thread
.v86mask
= X86_EFLAGS_ID
| X86_EFLAGS_AC
| X86_EFLAGS_NT
| X86_EFLAGS_IOPL
;
320 * Save old state, set default return value (%ax) to 0
322 info
->regs32
->ax
= 0;
323 tsk
->thread
.saved_sp0
= tsk
->thread
.sp0
;
324 tsk
->thread
.saved_fs
= info
->regs32
->fs
;
325 savesegment(gs
, tsk
->thread
.saved_gs
);
327 tss
= &per_cpu(init_tss
, get_cpu());
328 tsk
->thread
.sp0
= (unsigned long) &info
->VM86_TSS_ESP0
;
330 tsk
->thread
.sysenter_cs
= 0;
331 load_sp0(tss
, &tsk
->thread
);
334 tsk
->thread
.screen_bitmap
= info
->screen_bitmap
;
335 if (info
->flags
& VM86_SCREEN_BITMAP
)
336 mark_screen_rdonly(tsk
->mm
);
338 /*call audit_syscall_exit since we do not exit via the normal paths */
339 if (unlikely(current
->audit_context
))
340 audit_syscall_exit(AUDITSC_RESULT(0), 0);
342 __asm__
__volatile__(
346 "jmp resume_userspace"
348 :"r" (&info
->regs
), "r" (task_thread_info(tsk
)), "r" (0));
349 /* we never return here */
352 static inline void return_to_32bit(struct kernel_vm86_regs
*regs16
, int retval
)
354 struct pt_regs
*regs32
;
356 regs32
= save_v86_state(regs16
);
358 __asm__
__volatile__("movl %0,%%esp\n\t"
360 "jmp resume_userspace"
361 : : "r" (regs32
), "r" (current_thread_info()));
364 static inline void set_IF(struct kernel_vm86_regs
*regs
)
366 VEFLAGS
|= X86_EFLAGS_VIF
;
367 if (VEFLAGS
& X86_EFLAGS_VIP
)
368 return_to_32bit(regs
, VM86_STI
);
371 static inline void clear_IF(struct kernel_vm86_regs
*regs
)
373 VEFLAGS
&= ~X86_EFLAGS_VIF
;
376 static inline void clear_TF(struct kernel_vm86_regs
*regs
)
378 regs
->pt
.flags
&= ~X86_EFLAGS_TF
;
381 static inline void clear_AC(struct kernel_vm86_regs
*regs
)
383 regs
->pt
.flags
&= ~X86_EFLAGS_AC
;
387 * It is correct to call set_IF(regs) from the set_vflags_*
388 * functions. However someone forgot to call clear_IF(regs)
389 * in the opposite case.
390 * After the command sequence CLI PUSHF STI POPF you should
391 * end up with interrupts disabled, but you ended up with
392 * interrupts enabled.
393 * ( I was testing my own changes, but the only bug I
394 * could find was in a function I had not changed. )
398 static inline void set_vflags_long(unsigned long flags
, struct kernel_vm86_regs
*regs
)
400 set_flags(VEFLAGS
, flags
, current
->thread
.v86mask
);
401 set_flags(regs
->pt
.flags
, flags
, SAFE_MASK
);
402 if (flags
& X86_EFLAGS_IF
)
408 static inline void set_vflags_short(unsigned short flags
, struct kernel_vm86_regs
*regs
)
410 set_flags(VFLAGS
, flags
, current
->thread
.v86mask
);
411 set_flags(regs
->pt
.flags
, flags
, SAFE_MASK
);
412 if (flags
& X86_EFLAGS_IF
)
418 static inline unsigned long get_vflags(struct kernel_vm86_regs
*regs
)
420 unsigned long flags
= regs
->pt
.flags
& RETURN_MASK
;
422 if (VEFLAGS
& X86_EFLAGS_VIF
)
423 flags
|= X86_EFLAGS_IF
;
424 flags
|= X86_EFLAGS_IOPL
;
425 return flags
| (VEFLAGS
& current
->thread
.v86mask
);
428 static inline int is_revectored(int nr
, struct revectored_struct
*bitmap
)
430 __asm__
__volatile__("btl %2,%1\n\tsbbl %0,%0"
432 :"m" (*bitmap
), "r" (nr
));
436 #define val_byte(val, n) (((__u8 *)&val)[n])
438 #define pushb(base, ptr, val, err_label) \
442 if (put_user(__val, base + ptr) < 0) \
446 #define pushw(base, ptr, val, err_label) \
450 if (put_user(val_byte(__val, 1), base + ptr) < 0) \
453 if (put_user(val_byte(__val, 0), base + ptr) < 0) \
457 #define pushl(base, ptr, val, err_label) \
461 if (put_user(val_byte(__val, 3), base + ptr) < 0) \
464 if (put_user(val_byte(__val, 2), base + ptr) < 0) \
467 if (put_user(val_byte(__val, 1), base + ptr) < 0) \
470 if (put_user(val_byte(__val, 0), base + ptr) < 0) \
474 #define popb(base, ptr, err_label) \
477 if (get_user(__res, base + ptr) < 0) \
483 #define popw(base, ptr, err_label) \
486 if (get_user(val_byte(__res, 0), base + ptr) < 0) \
489 if (get_user(val_byte(__res, 1), base + ptr) < 0) \
495 #define popl(base, ptr, err_label) \
498 if (get_user(val_byte(__res, 0), base + ptr) < 0) \
501 if (get_user(val_byte(__res, 1), base + ptr) < 0) \
504 if (get_user(val_byte(__res, 2), base + ptr) < 0) \
507 if (get_user(val_byte(__res, 3), base + ptr) < 0) \
513 /* There are so many possible reasons for this function to return
514 * VM86_INTx, so adding another doesn't bother me. We can expect
515 * userspace programs to be able to handle it. (Getting a problem
516 * in userspace is always better than an Oops anyway.) [KD]
518 static void do_int(struct kernel_vm86_regs
*regs
, int i
,
519 unsigned char __user
*ssp
, unsigned short sp
)
521 unsigned long __user
*intr_ptr
;
522 unsigned long segoffs
;
524 if (regs
->pt
.cs
== BIOSSEG
)
526 if (is_revectored(i
, &KVM86
->int_revectored
))
528 if (i
== 0x21 && is_revectored(AH(regs
), &KVM86
->int21_revectored
))
530 intr_ptr
= (unsigned long __user
*) (i
<< 2);
531 if (get_user(segoffs
, intr_ptr
))
533 if ((segoffs
>> 16) == BIOSSEG
)
535 pushw(ssp
, sp
, get_vflags(regs
), cannot_handle
);
536 pushw(ssp
, sp
, regs
->pt
.cs
, cannot_handle
);
537 pushw(ssp
, sp
, IP(regs
), cannot_handle
);
538 regs
->pt
.cs
= segoffs
>> 16;
540 IP(regs
) = segoffs
& 0xffff;
547 return_to_32bit(regs
, VM86_INTx
+ (i
<< 8));
550 int handle_vm86_trap(struct kernel_vm86_regs
*regs
, long error_code
, int trapno
)
552 if (VMPI
.is_vm86pus
) {
553 if ((trapno
== 3) || (trapno
== 1))
554 return_to_32bit(regs
, VM86_TRAP
+ (trapno
<< 8));
555 do_int(regs
, trapno
, (unsigned char __user
*) (regs
->pt
.ss
<< 4), SP(regs
));
559 return 1; /* we let this handle by the calling routine */
560 current
->thread
.trap_no
= trapno
;
561 current
->thread
.error_code
= error_code
;
562 force_sig(SIGTRAP
, current
);
566 void handle_vm86_fault(struct kernel_vm86_regs
*regs
, long error_code
)
568 unsigned char opcode
;
569 unsigned char __user
*csp
;
570 unsigned char __user
*ssp
;
571 unsigned short ip
, sp
, orig_flags
;
572 int data32
, pref_done
;
574 #define CHECK_IF_IN_TRAP \
575 if (VMPI.vm86dbg_active && VMPI.vm86dbg_TFpendig) \
576 newflags |= X86_EFLAGS_TF
577 #define VM86_FAULT_RETURN do { \
578 if (VMPI.force_return_for_pic && (VEFLAGS & (X86_EFLAGS_IF | X86_EFLAGS_VIF))) \
579 return_to_32bit(regs, VM86_PICRETURN); \
580 if (orig_flags & X86_EFLAGS_TF) \
581 handle_vm86_trap(regs, 0, 1); \
584 orig_flags
= *(unsigned short *)®s
->pt
.flags
;
586 csp
= (unsigned char __user
*) (regs
->pt
.cs
<< 4);
587 ssp
= (unsigned char __user
*) (regs
->pt
.ss
<< 4);
594 switch (opcode
= popb(csp
, ip
, simulate_sigsegv
)) {
595 case 0x66: /* 32-bit data */ data32
= 1; break;
596 case 0x67: /* 32-bit address */ break;
597 case 0x2e: /* CS */ break;
598 case 0x3e: /* DS */ break;
599 case 0x26: /* ES */ break;
600 case 0x36: /* SS */ break;
601 case 0x65: /* GS */ break;
602 case 0x64: /* FS */ break;
603 case 0xf2: /* repnz */ break;
604 case 0xf3: /* rep */ break;
605 default: pref_done
= 1;
607 } while (!pref_done
);
614 pushl(ssp
, sp
, get_vflags(regs
), simulate_sigsegv
);
617 pushw(ssp
, sp
, get_vflags(regs
), simulate_sigsegv
);
626 unsigned long newflags
;
628 newflags
= popl(ssp
, sp
, simulate_sigsegv
);
631 newflags
= popw(ssp
, sp
, simulate_sigsegv
);
637 set_vflags_long(newflags
, regs
);
639 set_vflags_short(newflags
, regs
);
646 int intno
= popb(csp
, ip
, simulate_sigsegv
);
648 if (VMPI
.vm86dbg_active
) {
649 if ((1 << (intno
& 7)) & VMPI
.vm86dbg_intxxtab
[intno
>> 3])
650 return_to_32bit(regs
, VM86_INTx
+ (intno
<< 8));
652 do_int(regs
, intno
, ssp
, sp
);
661 unsigned long newflags
;
663 newip
= popl(ssp
, sp
, simulate_sigsegv
);
664 newcs
= popl(ssp
, sp
, simulate_sigsegv
);
665 newflags
= popl(ssp
, sp
, simulate_sigsegv
);
668 newip
= popw(ssp
, sp
, simulate_sigsegv
);
669 newcs
= popw(ssp
, sp
, simulate_sigsegv
);
670 newflags
= popw(ssp
, sp
, simulate_sigsegv
);
677 set_vflags_long(newflags
, regs
);
679 set_vflags_short(newflags
, regs
);
692 * Damn. This is incorrect: the 'sti' instruction should actually
693 * enable interrupts after the /next/ instruction. Not good.
695 * Probably needs some horsing around with the TF flag. Aiee..
703 return_to_32bit(regs
, VM86_UNKNOWN
);
709 /* FIXME: After a long discussion with Stas we finally
710 * agreed, that this is wrong. Here we should
711 * really send a SIGSEGV to the user program.
712 * But how do we create the correct context? We
713 * are inside a general protection fault handler
714 * and has just returned from a page fault handler.
715 * The correct context for the signal handler
716 * should be a mixture of the two, but how do we
717 * get the information? [KD]
719 return_to_32bit(regs
, VM86_UNKNOWN
);
722 /* ---------------- vm86 special IRQ passing stuff ----------------- */
724 #define VM86_IRQNAME "vm86irq"
726 static struct vm86_irqs
{
727 struct task_struct
*tsk
;
731 static DEFINE_SPINLOCK(irqbits_lock
);
734 #define ALLOWED_SIGS (1 /* 0 = don't send a signal */ \
735 | (1 << SIGUSR1) | (1 << SIGUSR2) | (1 << SIGIO) | (1 << SIGURG) \
738 static irqreturn_t
irq_handler(int intno
, void *dev_id
)
743 spin_lock_irqsave(&irqbits_lock
, flags
);
744 irq_bit
= 1 << intno
;
745 if ((irqbits
& irq_bit
) || !vm86_irqs
[intno
].tsk
)
748 if (vm86_irqs
[intno
].sig
)
749 send_sig(vm86_irqs
[intno
].sig
, vm86_irqs
[intno
].tsk
, 1);
751 * IRQ will be re-enabled when user asks for the irq (whether
752 * polling or as a result of the signal)
754 disable_irq_nosync(intno
);
755 spin_unlock_irqrestore(&irqbits_lock
, flags
);
759 spin_unlock_irqrestore(&irqbits_lock
, flags
);
763 static inline void free_vm86_irq(int irqnumber
)
767 free_irq(irqnumber
, NULL
);
768 vm86_irqs
[irqnumber
].tsk
= NULL
;
770 spin_lock_irqsave(&irqbits_lock
, flags
);
771 irqbits
&= ~(1 << irqnumber
);
772 spin_unlock_irqrestore(&irqbits_lock
, flags
);
775 void release_vm86_irqs(struct task_struct
*task
)
778 for (i
= FIRST_VM86_IRQ
; i
<= LAST_VM86_IRQ
; i
++)
779 if (vm86_irqs
[i
].tsk
== task
)
783 static inline int get_and_reset_irq(int irqnumber
)
789 if (invalid_vm86_irq(irqnumber
)) return 0;
790 if (vm86_irqs
[irqnumber
].tsk
!= current
) return 0;
791 spin_lock_irqsave(&irqbits_lock
, flags
);
792 bit
= irqbits
& (1 << irqnumber
);
795 enable_irq(irqnumber
);
799 spin_unlock_irqrestore(&irqbits_lock
, flags
);
804 static int do_vm86_irq_handling(int subfunction
, int irqnumber
)
807 switch (subfunction
) {
808 case VM86_GET_AND_RESET_IRQ
: {
809 return get_and_reset_irq(irqnumber
);
811 case VM86_GET_IRQ_BITS
: {
814 case VM86_REQUEST_IRQ
: {
815 int sig
= irqnumber
>> 8;
816 int irq
= irqnumber
& 255;
817 if (!capable(CAP_SYS_ADMIN
)) return -EPERM
;
818 if (!((1 << sig
) & ALLOWED_SIGS
)) return -EPERM
;
819 if (invalid_vm86_irq(irq
)) return -EPERM
;
820 if (vm86_irqs
[irq
].tsk
) return -EPERM
;
821 ret
= request_irq(irq
, &irq_handler
, 0, VM86_IRQNAME
, NULL
);
823 vm86_irqs
[irq
].sig
= sig
;
824 vm86_irqs
[irq
].tsk
= current
;
827 case VM86_FREE_IRQ
: {
828 if (invalid_vm86_irq(irqnumber
)) return -EPERM
;
829 if (!vm86_irqs
[irqnumber
].tsk
) return 0;
830 if (vm86_irqs
[irqnumber
].tsk
!= current
) return -EPERM
;
831 free_vm86_irq(irqnumber
);