2 * Copyright (C) 1994 Linus Torvalds
4 * 29 dec 2001 - Fixed oopses caused by unchecked access to the vm86
5 * stack - Manfred Spraul <manfred@colorfullife.com>
7 * 22 mar 2002 - Manfred detected the stackfaults, but didn't handle
8 * them correctly. Now the emulation will be in a
9 * consistent state after stackfaults - Kasper Dupont
10 * <kasperd@daimi.au.dk>
12 * 22 mar 2002 - Added missing clear_IF in set_vflags_* Kasper Dupont
13 * <kasperd@daimi.au.dk>
15 * ?? ??? 2002 - Fixed premature returns from handle_vm86_fault
16 * caused by Kasper Dupont's changes - Stas Sergeev
18 * 4 apr 2002 - Fixed CHECK_IF_IN_TRAP broken by Stas' changes.
19 * Kasper Dupont <kasperd@daimi.au.dk>
21 * 9 apr 2002 - Changed syntax of macros in handle_vm86_fault.
22 * Kasper Dupont <kasperd@daimi.au.dk>
24 * 9 apr 2002 - Changed stack access macros to jump to a label
25 * instead of returning to userspace. This simplifies
26 * do_int, and is needed by handle_vm6_fault. Kasper
27 * Dupont <kasperd@daimi.au.dk>
31 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33 #include <linux/capability.h>
34 #include <linux/errno.h>
35 #include <linux/interrupt.h>
36 #include <linux/syscalls.h>
37 #include <linux/sched.h>
38 #include <linux/kernel.h>
39 #include <linux/signal.h>
40 #include <linux/string.h>
42 #include <linux/smp.h>
43 #include <linux/highmem.h>
44 #include <linux/ptrace.h>
45 #include <linux/audit.h>
46 #include <linux/stddef.h>
47 #include <linux/slab.h>
48 #include <linux/security.h>
50 #include <asm/uaccess.h>
52 #include <asm/tlbflush.h>
54 #include <asm/traps.h>
60 * Interrupt handling is not guaranteed:
61 * - a real x86 will disable all interrupts for one instruction
62 * after a "mov ss,xx" to make stack handling atomic even without
63 * the 'lss' instruction. We can't guarantee this in v86 mode,
64 * as the next instruction might result in a page fault or similar.
65 * - a real x86 will have interrupts disabled for one instruction
66 * past the 'sti' that enables them. We don't bother with all the
69 * Let's hope these problems do not actually matter for anything.
74 * 8- and 16-bit register defines..
76 #define AL(regs) (((unsigned char *)&((regs)->pt.ax))[0])
77 #define AH(regs) (((unsigned char *)&((regs)->pt.ax))[1])
78 #define IP(regs) (*(unsigned short *)&((regs)->pt.ip))
79 #define SP(regs) (*(unsigned short *)&((regs)->pt.sp))
82 * virtual flags (16 and 32-bit versions)
84 #define VFLAGS (*(unsigned short *)&(current->thread.vm86->veflags))
85 #define VEFLAGS (current->thread.vm86->veflags)
87 #define set_flags(X, new, mask) \
88 ((X) = ((X) & ~(mask)) | ((new) & (mask)))
90 #define SAFE_MASK (0xDD5)
91 #define RETURN_MASK (0xDFF)
93 void save_v86_state(struct kernel_vm86_regs
*regs
, int retval
)
95 struct tss_struct
*tss
;
96 struct task_struct
*tsk
= current
;
97 struct vm86plus_struct __user
*user
;
98 struct vm86
*vm86
= current
->thread
.vm86
;
102 * This gets called from entry.S with interrupts disabled, but
103 * from process context. Enable interrupts here, before trying
104 * to access user space.
108 if (!vm86
|| !vm86
->user_vm86
) {
109 pr_alert("no user_vm86: BAD\n");
112 set_flags(regs
->pt
.flags
, VEFLAGS
, X86_EFLAGS_VIF
| vm86
->veflags_mask
);
113 user
= vm86
->user_vm86
;
115 if (!access_ok(VERIFY_WRITE
, user
, vm86
->vm86plus
.is_vm86pus
?
116 sizeof(struct vm86plus_struct
) :
117 sizeof(struct vm86_struct
))) {
118 pr_alert("could not access userspace vm86 info\n");
123 put_user_ex(regs
->pt
.bx
, &user
->regs
.ebx
);
124 put_user_ex(regs
->pt
.cx
, &user
->regs
.ecx
);
125 put_user_ex(regs
->pt
.dx
, &user
->regs
.edx
);
126 put_user_ex(regs
->pt
.si
, &user
->regs
.esi
);
127 put_user_ex(regs
->pt
.di
, &user
->regs
.edi
);
128 put_user_ex(regs
->pt
.bp
, &user
->regs
.ebp
);
129 put_user_ex(regs
->pt
.ax
, &user
->regs
.eax
);
130 put_user_ex(regs
->pt
.ip
, &user
->regs
.eip
);
131 put_user_ex(regs
->pt
.cs
, &user
->regs
.cs
);
132 put_user_ex(regs
->pt
.flags
, &user
->regs
.eflags
);
133 put_user_ex(regs
->pt
.sp
, &user
->regs
.esp
);
134 put_user_ex(regs
->pt
.ss
, &user
->regs
.ss
);
135 put_user_ex(regs
->es
, &user
->regs
.es
);
136 put_user_ex(regs
->ds
, &user
->regs
.ds
);
137 put_user_ex(regs
->fs
, &user
->regs
.fs
);
138 put_user_ex(regs
->gs
, &user
->regs
.gs
);
140 put_user_ex(vm86
->screen_bitmap
, &user
->screen_bitmap
);
141 } put_user_catch(err
);
143 pr_alert("could not access userspace vm86 info\n");
147 tss
= &per_cpu(cpu_tss
, get_cpu());
148 tsk
->thread
.sp0
= vm86
->saved_sp0
;
149 tsk
->thread
.sysenter_cs
= __KERNEL_CS
;
150 load_sp0(tss
, &tsk
->thread
);
154 memcpy(®s
->pt
, &vm86
->regs32
, sizeof(struct pt_regs
));
156 lazy_load_gs(vm86
->regs32
.gs
);
158 regs
->pt
.ax
= retval
;
161 static void mark_screen_rdonly(struct mm_struct
*mm
)
170 down_write(&mm
->mmap_sem
);
171 pgd
= pgd_offset(mm
, 0xA0000);
172 if (pgd_none_or_clear_bad(pgd
))
174 pud
= pud_offset(pgd
, 0xA0000);
175 if (pud_none_or_clear_bad(pud
))
177 pmd
= pmd_offset(pud
, 0xA0000);
179 if (pmd_trans_huge(*pmd
)) {
180 struct vm_area_struct
*vma
= find_vma(mm
, 0xA0000);
181 split_huge_pmd(vma
, pmd
, 0xA0000);
183 if (pmd_none_or_clear_bad(pmd
))
185 pte
= pte_offset_map_lock(mm
, pmd
, 0xA0000, &ptl
);
186 for (i
= 0; i
< 32; i
++) {
187 if (pte_present(*pte
))
188 set_pte(pte
, pte_wrprotect(*pte
));
191 pte_unmap_unlock(pte
, ptl
);
193 up_write(&mm
->mmap_sem
);
199 static int do_vm86_irq_handling(int subfunction
, int irqnumber
);
200 static long do_sys_vm86(struct vm86plus_struct __user
*user_vm86
, bool plus
);
202 SYSCALL_DEFINE1(vm86old
, struct vm86_struct __user
*, user_vm86
)
204 return do_sys_vm86((struct vm86plus_struct __user
*) user_vm86
, false);
208 SYSCALL_DEFINE2(vm86
, unsigned long, cmd
, unsigned long, arg
)
211 case VM86_REQUEST_IRQ
:
213 case VM86_GET_IRQ_BITS
:
214 case VM86_GET_AND_RESET_IRQ
:
215 return do_vm86_irq_handling(cmd
, (int)arg
);
216 case VM86_PLUS_INSTALL_CHECK
:
218 * NOTE: on old vm86 stuff this will return the error
219 * from access_ok(), because the subfunction is
220 * interpreted as (invalid) address to vm86_struct.
221 * So the installation check works.
226 /* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */
227 return do_sys_vm86((struct vm86plus_struct __user
*) arg
, true);
231 static long do_sys_vm86(struct vm86plus_struct __user
*user_vm86
, bool plus
)
233 struct tss_struct
*tss
;
234 struct task_struct
*tsk
= current
;
235 struct vm86
*vm86
= tsk
->thread
.vm86
;
236 struct kernel_vm86_regs vm86regs
;
237 struct pt_regs
*regs
= current_pt_regs();
238 unsigned long err
= 0;
240 err
= security_mmap_addr(0);
243 * vm86 cannot virtualize the address space, so vm86 users
244 * need to manage the low 1MB themselves using mmap. Given
245 * that BIOS places important data in the first page, vm86
246 * is essentially useless if mmap_min_addr != 0. DOSEMU,
247 * for example, won't even bother trying to use vm86 if it
248 * can't map a page at virtual address 0.
250 * To reduce the available kernel attack surface, simply
251 * disallow vm86(old) for users who cannot mmap at va 0.
253 * The implementation of security_mmap_addr will allow
254 * suitably privileged users to map va 0 even if
255 * vm.mmap_min_addr is set above 0, and we want this
256 * behavior for vm86 as well, as it ensures that legacy
257 * tools like vbetool will not fail just because of
260 pr_info_once("Denied a call to vm86(old) from %s[%d] (uid: %d). Set the vm.mmap_min_addr sysctl to 0 and/or adjust LSM mmap_min_addr policy to enable vm86 if you are using a vm86-based DOS emulator.\n",
261 current
->comm
, task_pid_nr(current
),
262 from_kuid_munged(&init_user_ns
, current_uid()));
267 if (!(vm86
= kzalloc(sizeof(*vm86
), GFP_KERNEL
)))
269 tsk
->thread
.vm86
= vm86
;
274 if (!access_ok(VERIFY_READ
, user_vm86
, plus
?
275 sizeof(struct vm86_struct
) :
276 sizeof(struct vm86plus_struct
)))
279 memset(&vm86regs
, 0, sizeof(vm86regs
));
282 get_user_ex(vm86regs
.pt
.bx
, &user_vm86
->regs
.ebx
);
283 get_user_ex(vm86regs
.pt
.cx
, &user_vm86
->regs
.ecx
);
284 get_user_ex(vm86regs
.pt
.dx
, &user_vm86
->regs
.edx
);
285 get_user_ex(vm86regs
.pt
.si
, &user_vm86
->regs
.esi
);
286 get_user_ex(vm86regs
.pt
.di
, &user_vm86
->regs
.edi
);
287 get_user_ex(vm86regs
.pt
.bp
, &user_vm86
->regs
.ebp
);
288 get_user_ex(vm86regs
.pt
.ax
, &user_vm86
->regs
.eax
);
289 get_user_ex(vm86regs
.pt
.ip
, &user_vm86
->regs
.eip
);
290 get_user_ex(seg
, &user_vm86
->regs
.cs
);
291 vm86regs
.pt
.cs
= seg
;
292 get_user_ex(vm86regs
.pt
.flags
, &user_vm86
->regs
.eflags
);
293 get_user_ex(vm86regs
.pt
.sp
, &user_vm86
->regs
.esp
);
294 get_user_ex(seg
, &user_vm86
->regs
.ss
);
295 vm86regs
.pt
.ss
= seg
;
296 get_user_ex(vm86regs
.es
, &user_vm86
->regs
.es
);
297 get_user_ex(vm86regs
.ds
, &user_vm86
->regs
.ds
);
298 get_user_ex(vm86regs
.fs
, &user_vm86
->regs
.fs
);
299 get_user_ex(vm86regs
.gs
, &user_vm86
->regs
.gs
);
301 get_user_ex(vm86
->flags
, &user_vm86
->flags
);
302 get_user_ex(vm86
->screen_bitmap
, &user_vm86
->screen_bitmap
);
303 get_user_ex(vm86
->cpu_type
, &user_vm86
->cpu_type
);
304 } get_user_catch(err
);
308 if (copy_from_user(&vm86
->int_revectored
,
309 &user_vm86
->int_revectored
,
310 sizeof(struct revectored_struct
)))
312 if (copy_from_user(&vm86
->int21_revectored
,
313 &user_vm86
->int21_revectored
,
314 sizeof(struct revectored_struct
)))
317 if (copy_from_user(&vm86
->vm86plus
, &user_vm86
->vm86plus
,
318 sizeof(struct vm86plus_info_struct
)))
320 vm86
->vm86plus
.is_vm86pus
= 1;
322 memset(&vm86
->vm86plus
, 0,
323 sizeof(struct vm86plus_info_struct
));
325 memcpy(&vm86
->regs32
, regs
, sizeof(struct pt_regs
));
326 vm86
->user_vm86
= user_vm86
;
329 * The flags register is also special: we cannot trust that the user
330 * has set it up safely, so this makes sure interrupt etc flags are
331 * inherited from protected mode.
333 VEFLAGS
= vm86regs
.pt
.flags
;
334 vm86regs
.pt
.flags
&= SAFE_MASK
;
335 vm86regs
.pt
.flags
|= regs
->flags
& ~SAFE_MASK
;
336 vm86regs
.pt
.flags
|= X86_VM_MASK
;
338 vm86regs
.pt
.orig_ax
= regs
->orig_ax
;
340 switch (vm86
->cpu_type
) {
342 vm86
->veflags_mask
= 0;
345 vm86
->veflags_mask
= X86_EFLAGS_NT
| X86_EFLAGS_IOPL
;
348 vm86
->veflags_mask
= X86_EFLAGS_AC
| X86_EFLAGS_NT
| X86_EFLAGS_IOPL
;
351 vm86
->veflags_mask
= X86_EFLAGS_ID
| X86_EFLAGS_AC
| X86_EFLAGS_NT
| X86_EFLAGS_IOPL
;
358 vm86
->saved_sp0
= tsk
->thread
.sp0
;
359 lazy_save_gs(vm86
->regs32
.gs
);
361 tss
= &per_cpu(cpu_tss
, get_cpu());
362 /* make room for real-mode segments */
363 tsk
->thread
.sp0
+= 16;
365 if (static_cpu_has(X86_FEATURE_SEP
))
366 tsk
->thread
.sysenter_cs
= 0;
368 load_sp0(tss
, &tsk
->thread
);
371 if (vm86
->flags
& VM86_SCREEN_BITMAP
)
372 mark_screen_rdonly(tsk
->mm
);
374 memcpy((struct kernel_vm86_regs
*)regs
, &vm86regs
, sizeof(vm86regs
));
379 static inline void set_IF(struct kernel_vm86_regs
*regs
)
381 VEFLAGS
|= X86_EFLAGS_VIF
;
384 static inline void clear_IF(struct kernel_vm86_regs
*regs
)
386 VEFLAGS
&= ~X86_EFLAGS_VIF
;
389 static inline void clear_TF(struct kernel_vm86_regs
*regs
)
391 regs
->pt
.flags
&= ~X86_EFLAGS_TF
;
394 static inline void clear_AC(struct kernel_vm86_regs
*regs
)
396 regs
->pt
.flags
&= ~X86_EFLAGS_AC
;
400 * It is correct to call set_IF(regs) from the set_vflags_*
401 * functions. However someone forgot to call clear_IF(regs)
402 * in the opposite case.
403 * After the command sequence CLI PUSHF STI POPF you should
404 * end up with interrupts disabled, but you ended up with
405 * interrupts enabled.
406 * ( I was testing my own changes, but the only bug I
407 * could find was in a function I had not changed. )
411 static inline void set_vflags_long(unsigned long flags
, struct kernel_vm86_regs
*regs
)
413 set_flags(VEFLAGS
, flags
, current
->thread
.vm86
->veflags_mask
);
414 set_flags(regs
->pt
.flags
, flags
, SAFE_MASK
);
415 if (flags
& X86_EFLAGS_IF
)
421 static inline void set_vflags_short(unsigned short flags
, struct kernel_vm86_regs
*regs
)
423 set_flags(VFLAGS
, flags
, current
->thread
.vm86
->veflags_mask
);
424 set_flags(regs
->pt
.flags
, flags
, SAFE_MASK
);
425 if (flags
& X86_EFLAGS_IF
)
431 static inline unsigned long get_vflags(struct kernel_vm86_regs
*regs
)
433 unsigned long flags
= regs
->pt
.flags
& RETURN_MASK
;
435 if (VEFLAGS
& X86_EFLAGS_VIF
)
436 flags
|= X86_EFLAGS_IF
;
437 flags
|= X86_EFLAGS_IOPL
;
438 return flags
| (VEFLAGS
& current
->thread
.vm86
->veflags_mask
);
441 static inline int is_revectored(int nr
, struct revectored_struct
*bitmap
)
443 __asm__
__volatile__("btl %2,%1\n\tsbbl %0,%0"
445 :"m" (*bitmap
), "r" (nr
));
449 #define val_byte(val, n) (((__u8 *)&val)[n])
451 #define pushb(base, ptr, val, err_label) \
455 if (put_user(__val, base + ptr) < 0) \
459 #define pushw(base, ptr, val, err_label) \
463 if (put_user(val_byte(__val, 1), base + ptr) < 0) \
466 if (put_user(val_byte(__val, 0), base + ptr) < 0) \
470 #define pushl(base, ptr, val, err_label) \
474 if (put_user(val_byte(__val, 3), base + ptr) < 0) \
477 if (put_user(val_byte(__val, 2), base + ptr) < 0) \
480 if (put_user(val_byte(__val, 1), base + ptr) < 0) \
483 if (put_user(val_byte(__val, 0), base + ptr) < 0) \
487 #define popb(base, ptr, err_label) \
490 if (get_user(__res, base + ptr) < 0) \
496 #define popw(base, ptr, err_label) \
499 if (get_user(val_byte(__res, 0), base + ptr) < 0) \
502 if (get_user(val_byte(__res, 1), base + ptr) < 0) \
508 #define popl(base, ptr, err_label) \
511 if (get_user(val_byte(__res, 0), base + ptr) < 0) \
514 if (get_user(val_byte(__res, 1), base + ptr) < 0) \
517 if (get_user(val_byte(__res, 2), base + ptr) < 0) \
520 if (get_user(val_byte(__res, 3), base + ptr) < 0) \
526 /* There are so many possible reasons for this function to return
527 * VM86_INTx, so adding another doesn't bother me. We can expect
528 * userspace programs to be able to handle it. (Getting a problem
529 * in userspace is always better than an Oops anyway.) [KD]
531 static void do_int(struct kernel_vm86_regs
*regs
, int i
,
532 unsigned char __user
*ssp
, unsigned short sp
)
534 unsigned long __user
*intr_ptr
;
535 unsigned long segoffs
;
536 struct vm86
*vm86
= current
->thread
.vm86
;
538 if (regs
->pt
.cs
== BIOSSEG
)
540 if (is_revectored(i
, &vm86
->int_revectored
))
542 if (i
== 0x21 && is_revectored(AH(regs
), &vm86
->int21_revectored
))
544 intr_ptr
= (unsigned long __user
*) (i
<< 2);
545 if (get_user(segoffs
, intr_ptr
))
547 if ((segoffs
>> 16) == BIOSSEG
)
549 pushw(ssp
, sp
, get_vflags(regs
), cannot_handle
);
550 pushw(ssp
, sp
, regs
->pt
.cs
, cannot_handle
);
551 pushw(ssp
, sp
, IP(regs
), cannot_handle
);
552 regs
->pt
.cs
= segoffs
>> 16;
554 IP(regs
) = segoffs
& 0xffff;
561 save_v86_state(regs
, VM86_INTx
+ (i
<< 8));
564 int handle_vm86_trap(struct kernel_vm86_regs
*regs
, long error_code
, int trapno
)
566 struct vm86
*vm86
= current
->thread
.vm86
;
568 if (vm86
->vm86plus
.is_vm86pus
) {
569 if ((trapno
== 3) || (trapno
== 1)) {
570 save_v86_state(regs
, VM86_TRAP
+ (trapno
<< 8));
573 do_int(regs
, trapno
, (unsigned char __user
*) (regs
->pt
.ss
<< 4), SP(regs
));
577 return 1; /* we let this handle by the calling routine */
578 current
->thread
.trap_nr
= trapno
;
579 current
->thread
.error_code
= error_code
;
580 force_sig(SIGTRAP
, current
);
584 void handle_vm86_fault(struct kernel_vm86_regs
*regs
, long error_code
)
586 unsigned char opcode
;
587 unsigned char __user
*csp
;
588 unsigned char __user
*ssp
;
589 unsigned short ip
, sp
, orig_flags
;
590 int data32
, pref_done
;
591 struct vm86plus_info_struct
*vmpi
= ¤t
->thread
.vm86
->vm86plus
;
593 #define CHECK_IF_IN_TRAP \
594 if (vmpi->vm86dbg_active && vmpi->vm86dbg_TFpendig) \
595 newflags |= X86_EFLAGS_TF
597 orig_flags
= *(unsigned short *)®s
->pt
.flags
;
599 csp
= (unsigned char __user
*) (regs
->pt
.cs
<< 4);
600 ssp
= (unsigned char __user
*) (regs
->pt
.ss
<< 4);
607 switch (opcode
= popb(csp
, ip
, simulate_sigsegv
)) {
608 case 0x66: /* 32-bit data */ data32
= 1; break;
609 case 0x67: /* 32-bit address */ break;
610 case 0x2e: /* CS */ break;
611 case 0x3e: /* DS */ break;
612 case 0x26: /* ES */ break;
613 case 0x36: /* SS */ break;
614 case 0x65: /* GS */ break;
615 case 0x64: /* FS */ break;
616 case 0xf2: /* repnz */ break;
617 case 0xf3: /* rep */ break;
618 default: pref_done
= 1;
620 } while (!pref_done
);
627 pushl(ssp
, sp
, get_vflags(regs
), simulate_sigsegv
);
630 pushw(ssp
, sp
, get_vflags(regs
), simulate_sigsegv
);
634 goto vm86_fault_return
;
639 unsigned long newflags
;
641 newflags
= popl(ssp
, sp
, simulate_sigsegv
);
644 newflags
= popw(ssp
, sp
, simulate_sigsegv
);
650 set_vflags_long(newflags
, regs
);
652 set_vflags_short(newflags
, regs
);
659 int intno
= popb(csp
, ip
, simulate_sigsegv
);
661 if (vmpi
->vm86dbg_active
) {
662 if ((1 << (intno
& 7)) & vmpi
->vm86dbg_intxxtab
[intno
>> 3]) {
663 save_v86_state(regs
, VM86_INTx
+ (intno
<< 8));
667 do_int(regs
, intno
, ssp
, sp
);
676 unsigned long newflags
;
678 newip
= popl(ssp
, sp
, simulate_sigsegv
);
679 newcs
= popl(ssp
, sp
, simulate_sigsegv
);
680 newflags
= popl(ssp
, sp
, simulate_sigsegv
);
683 newip
= popw(ssp
, sp
, simulate_sigsegv
);
684 newcs
= popw(ssp
, sp
, simulate_sigsegv
);
685 newflags
= popw(ssp
, sp
, simulate_sigsegv
);
692 set_vflags_long(newflags
, regs
);
694 set_vflags_short(newflags
, regs
);
703 goto vm86_fault_return
;
707 * Damn. This is incorrect: the 'sti' instruction should actually
708 * enable interrupts after the /next/ instruction. Not good.
710 * Probably needs some horsing around with the TF flag. Aiee..
718 save_v86_state(regs
, VM86_UNKNOWN
);
724 if (VEFLAGS
& X86_EFLAGS_VIP
) {
725 save_v86_state(regs
, VM86_STI
);
730 if (vmpi
->force_return_for_pic
&& (VEFLAGS
& (X86_EFLAGS_IF
| X86_EFLAGS_VIF
))) {
731 save_v86_state(regs
, VM86_PICRETURN
);
734 if (orig_flags
& X86_EFLAGS_TF
)
735 handle_vm86_trap(regs
, 0, X86_TRAP_DB
);
739 /* FIXME: After a long discussion with Stas we finally
740 * agreed, that this is wrong. Here we should
741 * really send a SIGSEGV to the user program.
742 * But how do we create the correct context? We
743 * are inside a general protection fault handler
744 * and has just returned from a page fault handler.
745 * The correct context for the signal handler
746 * should be a mixture of the two, but how do we
747 * get the information? [KD]
749 save_v86_state(regs
, VM86_UNKNOWN
);
752 /* ---------------- vm86 special IRQ passing stuff ----------------- */
754 #define VM86_IRQNAME "vm86irq"
756 static struct vm86_irqs
{
757 struct task_struct
*tsk
;
761 static DEFINE_SPINLOCK(irqbits_lock
);
764 #define ALLOWED_SIGS (1 /* 0 = don't send a signal */ \
765 | (1 << SIGUSR1) | (1 << SIGUSR2) | (1 << SIGIO) | (1 << SIGURG) \
768 static irqreturn_t
irq_handler(int intno
, void *dev_id
)
773 spin_lock_irqsave(&irqbits_lock
, flags
);
774 irq_bit
= 1 << intno
;
775 if ((irqbits
& irq_bit
) || !vm86_irqs
[intno
].tsk
)
778 if (vm86_irqs
[intno
].sig
)
779 send_sig(vm86_irqs
[intno
].sig
, vm86_irqs
[intno
].tsk
, 1);
781 * IRQ will be re-enabled when user asks for the irq (whether
782 * polling or as a result of the signal)
784 disable_irq_nosync(intno
);
785 spin_unlock_irqrestore(&irqbits_lock
, flags
);
789 spin_unlock_irqrestore(&irqbits_lock
, flags
);
793 static inline void free_vm86_irq(int irqnumber
)
797 free_irq(irqnumber
, NULL
);
798 vm86_irqs
[irqnumber
].tsk
= NULL
;
800 spin_lock_irqsave(&irqbits_lock
, flags
);
801 irqbits
&= ~(1 << irqnumber
);
802 spin_unlock_irqrestore(&irqbits_lock
, flags
);
805 void release_vm86_irqs(struct task_struct
*task
)
808 for (i
= FIRST_VM86_IRQ
; i
<= LAST_VM86_IRQ
; i
++)
809 if (vm86_irqs
[i
].tsk
== task
)
813 static inline int get_and_reset_irq(int irqnumber
)
819 if (invalid_vm86_irq(irqnumber
)) return 0;
820 if (vm86_irqs
[irqnumber
].tsk
!= current
) return 0;
821 spin_lock_irqsave(&irqbits_lock
, flags
);
822 bit
= irqbits
& (1 << irqnumber
);
825 enable_irq(irqnumber
);
829 spin_unlock_irqrestore(&irqbits_lock
, flags
);
834 static int do_vm86_irq_handling(int subfunction
, int irqnumber
)
837 switch (subfunction
) {
838 case VM86_GET_AND_RESET_IRQ
: {
839 return get_and_reset_irq(irqnumber
);
841 case VM86_GET_IRQ_BITS
: {
844 case VM86_REQUEST_IRQ
: {
845 int sig
= irqnumber
>> 8;
846 int irq
= irqnumber
& 255;
847 if (!capable(CAP_SYS_ADMIN
)) return -EPERM
;
848 if (!((1 << sig
) & ALLOWED_SIGS
)) return -EPERM
;
849 if (invalid_vm86_irq(irq
)) return -EPERM
;
850 if (vm86_irqs
[irq
].tsk
) return -EPERM
;
851 ret
= request_irq(irq
, &irq_handler
, 0, VM86_IRQNAME
, NULL
);
853 vm86_irqs
[irq
].sig
= sig
;
854 vm86_irqs
[irq
].tsk
= current
;
857 case VM86_FREE_IRQ
: {
858 if (invalid_vm86_irq(irqnumber
)) return -EPERM
;
859 if (!vm86_irqs
[irqnumber
].tsk
) return 0;
860 if (vm86_irqs
[irqnumber
].tsk
!= current
) return -EPERM
;
861 free_vm86_irq(irqnumber
);