vfs: remove unused wrapper block_page_mkwrite()
[linux/fpc-iii.git] / arch / x86 / kernel / vm86_32.c
blob5246193519614dbd8d3a602544e8117376df05f7
1 /*
2 * Copyright (C) 1994 Linus Torvalds
4 * 29 dec 2001 - Fixed oopses caused by unchecked access to the vm86
5 * stack - Manfred Spraul <manfred@colorfullife.com>
7 * 22 mar 2002 - Manfred detected the stackfaults, but didn't handle
8 * them correctly. Now the emulation will be in a
9 * consistent state after stackfaults - Kasper Dupont
10 * <kasperd@daimi.au.dk>
12 * 22 mar 2002 - Added missing clear_IF in set_vflags_* Kasper Dupont
13 * <kasperd@daimi.au.dk>
15 * ?? ??? 2002 - Fixed premature returns from handle_vm86_fault
16 * caused by Kasper Dupont's changes - Stas Sergeev
18 * 4 apr 2002 - Fixed CHECK_IF_IN_TRAP broken by Stas' changes.
19 * Kasper Dupont <kasperd@daimi.au.dk>
21 * 9 apr 2002 - Changed syntax of macros in handle_vm86_fault.
22 * Kasper Dupont <kasperd@daimi.au.dk>
24 * 9 apr 2002 - Changed stack access macros to jump to a label
25 * instead of returning to userspace. This simplifies
26 * do_int, and is needed by handle_vm6_fault. Kasper
27 * Dupont <kasperd@daimi.au.dk>
31 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33 #include <linux/capability.h>
34 #include <linux/errno.h>
35 #include <linux/interrupt.h>
36 #include <linux/syscalls.h>
37 #include <linux/sched.h>
38 #include <linux/kernel.h>
39 #include <linux/signal.h>
40 #include <linux/string.h>
41 #include <linux/mm.h>
42 #include <linux/smp.h>
43 #include <linux/highmem.h>
44 #include <linux/ptrace.h>
45 #include <linux/audit.h>
46 #include <linux/stddef.h>
47 #include <linux/slab.h>
48 #include <linux/security.h>
50 #include <asm/uaccess.h>
51 #include <asm/io.h>
52 #include <asm/tlbflush.h>
53 #include <asm/irq.h>
54 #include <asm/traps.h>
55 #include <asm/vm86.h>
58 * Known problems:
60 * Interrupt handling is not guaranteed:
61 * - a real x86 will disable all interrupts for one instruction
62 * after a "mov ss,xx" to make stack handling atomic even without
63 * the 'lss' instruction. We can't guarantee this in v86 mode,
64 * as the next instruction might result in a page fault or similar.
65 * - a real x86 will have interrupts disabled for one instruction
66 * past the 'sti' that enables them. We don't bother with all the
67 * details yet.
69 * Let's hope these problems do not actually matter for anything.
74 * 8- and 16-bit register defines..
76 #define AL(regs) (((unsigned char *)&((regs)->pt.ax))[0])
77 #define AH(regs) (((unsigned char *)&((regs)->pt.ax))[1])
78 #define IP(regs) (*(unsigned short *)&((regs)->pt.ip))
79 #define SP(regs) (*(unsigned short *)&((regs)->pt.sp))
82 * virtual flags (16 and 32-bit versions)
84 #define VFLAGS (*(unsigned short *)&(current->thread.vm86->veflags))
85 #define VEFLAGS (current->thread.vm86->veflags)
87 #define set_flags(X, new, mask) \
88 ((X) = ((X) & ~(mask)) | ((new) & (mask)))
90 #define SAFE_MASK (0xDD5)
91 #define RETURN_MASK (0xDFF)
93 void save_v86_state(struct kernel_vm86_regs *regs, int retval)
95 struct tss_struct *tss;
96 struct task_struct *tsk = current;
97 struct vm86plus_struct __user *user;
98 struct vm86 *vm86 = current->thread.vm86;
99 long err = 0;
102 * This gets called from entry.S with interrupts disabled, but
103 * from process context. Enable interrupts here, before trying
104 * to access user space.
106 local_irq_enable();
108 if (!vm86 || !vm86->user_vm86) {
109 pr_alert("no user_vm86: BAD\n");
110 do_exit(SIGSEGV);
112 set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | vm86->veflags_mask);
113 user = vm86->user_vm86;
115 if (!access_ok(VERIFY_WRITE, user, vm86->vm86plus.is_vm86pus ?
116 sizeof(struct vm86plus_struct) :
117 sizeof(struct vm86_struct))) {
118 pr_alert("could not access userspace vm86 info\n");
119 do_exit(SIGSEGV);
122 put_user_try {
123 put_user_ex(regs->pt.bx, &user->regs.ebx);
124 put_user_ex(regs->pt.cx, &user->regs.ecx);
125 put_user_ex(regs->pt.dx, &user->regs.edx);
126 put_user_ex(regs->pt.si, &user->regs.esi);
127 put_user_ex(regs->pt.di, &user->regs.edi);
128 put_user_ex(regs->pt.bp, &user->regs.ebp);
129 put_user_ex(regs->pt.ax, &user->regs.eax);
130 put_user_ex(regs->pt.ip, &user->regs.eip);
131 put_user_ex(regs->pt.cs, &user->regs.cs);
132 put_user_ex(regs->pt.flags, &user->regs.eflags);
133 put_user_ex(regs->pt.sp, &user->regs.esp);
134 put_user_ex(regs->pt.ss, &user->regs.ss);
135 put_user_ex(regs->es, &user->regs.es);
136 put_user_ex(regs->ds, &user->regs.ds);
137 put_user_ex(regs->fs, &user->regs.fs);
138 put_user_ex(regs->gs, &user->regs.gs);
140 put_user_ex(vm86->screen_bitmap, &user->screen_bitmap);
141 } put_user_catch(err);
142 if (err) {
143 pr_alert("could not access userspace vm86 info\n");
144 do_exit(SIGSEGV);
147 tss = &per_cpu(cpu_tss, get_cpu());
148 tsk->thread.sp0 = vm86->saved_sp0;
149 tsk->thread.sysenter_cs = __KERNEL_CS;
150 load_sp0(tss, &tsk->thread);
151 vm86->saved_sp0 = 0;
152 put_cpu();
154 memcpy(&regs->pt, &vm86->regs32, sizeof(struct pt_regs));
156 lazy_load_gs(vm86->regs32.gs);
158 regs->pt.ax = retval;
161 static void mark_screen_rdonly(struct mm_struct *mm)
163 pgd_t *pgd;
164 pud_t *pud;
165 pmd_t *pmd;
166 pte_t *pte;
167 spinlock_t *ptl;
168 int i;
170 down_write(&mm->mmap_sem);
171 pgd = pgd_offset(mm, 0xA0000);
172 if (pgd_none_or_clear_bad(pgd))
173 goto out;
174 pud = pud_offset(pgd, 0xA0000);
175 if (pud_none_or_clear_bad(pud))
176 goto out;
177 pmd = pmd_offset(pud, 0xA0000);
178 split_huge_page_pmd_mm(mm, 0xA0000, pmd);
179 if (pmd_none_or_clear_bad(pmd))
180 goto out;
181 pte = pte_offset_map_lock(mm, pmd, 0xA0000, &ptl);
182 for (i = 0; i < 32; i++) {
183 if (pte_present(*pte))
184 set_pte(pte, pte_wrprotect(*pte));
185 pte++;
187 pte_unmap_unlock(pte, ptl);
188 out:
189 up_write(&mm->mmap_sem);
190 flush_tlb();
195 static int do_vm86_irq_handling(int subfunction, int irqnumber);
196 static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus);
198 SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, user_vm86)
200 return do_sys_vm86((struct vm86plus_struct __user *) user_vm86, false);
204 SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
206 switch (cmd) {
207 case VM86_REQUEST_IRQ:
208 case VM86_FREE_IRQ:
209 case VM86_GET_IRQ_BITS:
210 case VM86_GET_AND_RESET_IRQ:
211 return do_vm86_irq_handling(cmd, (int)arg);
212 case VM86_PLUS_INSTALL_CHECK:
214 * NOTE: on old vm86 stuff this will return the error
215 * from access_ok(), because the subfunction is
216 * interpreted as (invalid) address to vm86_struct.
217 * So the installation check works.
219 return 0;
222 /* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */
223 return do_sys_vm86((struct vm86plus_struct __user *) arg, true);
227 static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus)
229 struct tss_struct *tss;
230 struct task_struct *tsk = current;
231 struct vm86 *vm86 = tsk->thread.vm86;
232 struct kernel_vm86_regs vm86regs;
233 struct pt_regs *regs = current_pt_regs();
234 unsigned long err = 0;
236 err = security_mmap_addr(0);
237 if (err) {
239 * vm86 cannot virtualize the address space, so vm86 users
240 * need to manage the low 1MB themselves using mmap. Given
241 * that BIOS places important data in the first page, vm86
242 * is essentially useless if mmap_min_addr != 0. DOSEMU,
243 * for example, won't even bother trying to use vm86 if it
244 * can't map a page at virtual address 0.
246 * To reduce the available kernel attack surface, simply
247 * disallow vm86(old) for users who cannot mmap at va 0.
249 * The implementation of security_mmap_addr will allow
250 * suitably privileged users to map va 0 even if
251 * vm.mmap_min_addr is set above 0, and we want this
252 * behavior for vm86 as well, as it ensures that legacy
253 * tools like vbetool will not fail just because of
254 * vm.mmap_min_addr.
256 pr_info_once("Denied a call to vm86(old) from %s[%d] (uid: %d). Set the vm.mmap_min_addr sysctl to 0 and/or adjust LSM mmap_min_addr policy to enable vm86 if you are using a vm86-based DOS emulator.\n",
257 current->comm, task_pid_nr(current),
258 from_kuid_munged(&init_user_ns, current_uid()));
259 return -EPERM;
262 if (!vm86) {
263 if (!(vm86 = kzalloc(sizeof(*vm86), GFP_KERNEL)))
264 return -ENOMEM;
265 tsk->thread.vm86 = vm86;
267 if (vm86->saved_sp0)
268 return -EPERM;
270 if (!access_ok(VERIFY_READ, user_vm86, plus ?
271 sizeof(struct vm86_struct) :
272 sizeof(struct vm86plus_struct)))
273 return -EFAULT;
275 memset(&vm86regs, 0, sizeof(vm86regs));
276 get_user_try {
277 unsigned short seg;
278 get_user_ex(vm86regs.pt.bx, &user_vm86->regs.ebx);
279 get_user_ex(vm86regs.pt.cx, &user_vm86->regs.ecx);
280 get_user_ex(vm86regs.pt.dx, &user_vm86->regs.edx);
281 get_user_ex(vm86regs.pt.si, &user_vm86->regs.esi);
282 get_user_ex(vm86regs.pt.di, &user_vm86->regs.edi);
283 get_user_ex(vm86regs.pt.bp, &user_vm86->regs.ebp);
284 get_user_ex(vm86regs.pt.ax, &user_vm86->regs.eax);
285 get_user_ex(vm86regs.pt.ip, &user_vm86->regs.eip);
286 get_user_ex(seg, &user_vm86->regs.cs);
287 vm86regs.pt.cs = seg;
288 get_user_ex(vm86regs.pt.flags, &user_vm86->regs.eflags);
289 get_user_ex(vm86regs.pt.sp, &user_vm86->regs.esp);
290 get_user_ex(seg, &user_vm86->regs.ss);
291 vm86regs.pt.ss = seg;
292 get_user_ex(vm86regs.es, &user_vm86->regs.es);
293 get_user_ex(vm86regs.ds, &user_vm86->regs.ds);
294 get_user_ex(vm86regs.fs, &user_vm86->regs.fs);
295 get_user_ex(vm86regs.gs, &user_vm86->regs.gs);
297 get_user_ex(vm86->flags, &user_vm86->flags);
298 get_user_ex(vm86->screen_bitmap, &user_vm86->screen_bitmap);
299 get_user_ex(vm86->cpu_type, &user_vm86->cpu_type);
300 } get_user_catch(err);
301 if (err)
302 return err;
304 if (copy_from_user(&vm86->int_revectored,
305 &user_vm86->int_revectored,
306 sizeof(struct revectored_struct)))
307 return -EFAULT;
308 if (copy_from_user(&vm86->int21_revectored,
309 &user_vm86->int21_revectored,
310 sizeof(struct revectored_struct)))
311 return -EFAULT;
312 if (plus) {
313 if (copy_from_user(&vm86->vm86plus, &user_vm86->vm86plus,
314 sizeof(struct vm86plus_info_struct)))
315 return -EFAULT;
316 vm86->vm86plus.is_vm86pus = 1;
317 } else
318 memset(&vm86->vm86plus, 0,
319 sizeof(struct vm86plus_info_struct));
321 memcpy(&vm86->regs32, regs, sizeof(struct pt_regs));
322 vm86->user_vm86 = user_vm86;
325 * The flags register is also special: we cannot trust that the user
326 * has set it up safely, so this makes sure interrupt etc flags are
327 * inherited from protected mode.
329 VEFLAGS = vm86regs.pt.flags;
330 vm86regs.pt.flags &= SAFE_MASK;
331 vm86regs.pt.flags |= regs->flags & ~SAFE_MASK;
332 vm86regs.pt.flags |= X86_VM_MASK;
334 vm86regs.pt.orig_ax = regs->orig_ax;
336 switch (vm86->cpu_type) {
337 case CPU_286:
338 vm86->veflags_mask = 0;
339 break;
340 case CPU_386:
341 vm86->veflags_mask = X86_EFLAGS_NT | X86_EFLAGS_IOPL;
342 break;
343 case CPU_486:
344 vm86->veflags_mask = X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL;
345 break;
346 default:
347 vm86->veflags_mask = X86_EFLAGS_ID | X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL;
348 break;
352 * Save old state
354 vm86->saved_sp0 = tsk->thread.sp0;
355 lazy_save_gs(vm86->regs32.gs);
357 tss = &per_cpu(cpu_tss, get_cpu());
358 /* make room for real-mode segments */
359 tsk->thread.sp0 += 16;
360 if (cpu_has_sep)
361 tsk->thread.sysenter_cs = 0;
362 load_sp0(tss, &tsk->thread);
363 put_cpu();
365 if (vm86->flags & VM86_SCREEN_BITMAP)
366 mark_screen_rdonly(tsk->mm);
368 memcpy((struct kernel_vm86_regs *)regs, &vm86regs, sizeof(vm86regs));
369 force_iret();
370 return regs->ax;
373 static inline void set_IF(struct kernel_vm86_regs *regs)
375 VEFLAGS |= X86_EFLAGS_VIF;
378 static inline void clear_IF(struct kernel_vm86_regs *regs)
380 VEFLAGS &= ~X86_EFLAGS_VIF;
383 static inline void clear_TF(struct kernel_vm86_regs *regs)
385 regs->pt.flags &= ~X86_EFLAGS_TF;
388 static inline void clear_AC(struct kernel_vm86_regs *regs)
390 regs->pt.flags &= ~X86_EFLAGS_AC;
394 * It is correct to call set_IF(regs) from the set_vflags_*
395 * functions. However someone forgot to call clear_IF(regs)
396 * in the opposite case.
397 * After the command sequence CLI PUSHF STI POPF you should
398 * end up with interrupts disabled, but you ended up with
399 * interrupts enabled.
400 * ( I was testing my own changes, but the only bug I
401 * could find was in a function I had not changed. )
402 * [KD]
405 static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs *regs)
407 set_flags(VEFLAGS, flags, current->thread.vm86->veflags_mask);
408 set_flags(regs->pt.flags, flags, SAFE_MASK);
409 if (flags & X86_EFLAGS_IF)
410 set_IF(regs);
411 else
412 clear_IF(regs);
415 static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs *regs)
417 set_flags(VFLAGS, flags, current->thread.vm86->veflags_mask);
418 set_flags(regs->pt.flags, flags, SAFE_MASK);
419 if (flags & X86_EFLAGS_IF)
420 set_IF(regs);
421 else
422 clear_IF(regs);
425 static inline unsigned long get_vflags(struct kernel_vm86_regs *regs)
427 unsigned long flags = regs->pt.flags & RETURN_MASK;
429 if (VEFLAGS & X86_EFLAGS_VIF)
430 flags |= X86_EFLAGS_IF;
431 flags |= X86_EFLAGS_IOPL;
432 return flags | (VEFLAGS & current->thread.vm86->veflags_mask);
435 static inline int is_revectored(int nr, struct revectored_struct *bitmap)
437 __asm__ __volatile__("btl %2,%1\n\tsbbl %0,%0"
438 :"=r" (nr)
439 :"m" (*bitmap), "r" (nr));
440 return nr;
443 #define val_byte(val, n) (((__u8 *)&val)[n])
445 #define pushb(base, ptr, val, err_label) \
446 do { \
447 __u8 __val = val; \
448 ptr--; \
449 if (put_user(__val, base + ptr) < 0) \
450 goto err_label; \
451 } while (0)
453 #define pushw(base, ptr, val, err_label) \
454 do { \
455 __u16 __val = val; \
456 ptr--; \
457 if (put_user(val_byte(__val, 1), base + ptr) < 0) \
458 goto err_label; \
459 ptr--; \
460 if (put_user(val_byte(__val, 0), base + ptr) < 0) \
461 goto err_label; \
462 } while (0)
464 #define pushl(base, ptr, val, err_label) \
465 do { \
466 __u32 __val = val; \
467 ptr--; \
468 if (put_user(val_byte(__val, 3), base + ptr) < 0) \
469 goto err_label; \
470 ptr--; \
471 if (put_user(val_byte(__val, 2), base + ptr) < 0) \
472 goto err_label; \
473 ptr--; \
474 if (put_user(val_byte(__val, 1), base + ptr) < 0) \
475 goto err_label; \
476 ptr--; \
477 if (put_user(val_byte(__val, 0), base + ptr) < 0) \
478 goto err_label; \
479 } while (0)
481 #define popb(base, ptr, err_label) \
482 ({ \
483 __u8 __res; \
484 if (get_user(__res, base + ptr) < 0) \
485 goto err_label; \
486 ptr++; \
487 __res; \
490 #define popw(base, ptr, err_label) \
491 ({ \
492 __u16 __res; \
493 if (get_user(val_byte(__res, 0), base + ptr) < 0) \
494 goto err_label; \
495 ptr++; \
496 if (get_user(val_byte(__res, 1), base + ptr) < 0) \
497 goto err_label; \
498 ptr++; \
499 __res; \
502 #define popl(base, ptr, err_label) \
503 ({ \
504 __u32 __res; \
505 if (get_user(val_byte(__res, 0), base + ptr) < 0) \
506 goto err_label; \
507 ptr++; \
508 if (get_user(val_byte(__res, 1), base + ptr) < 0) \
509 goto err_label; \
510 ptr++; \
511 if (get_user(val_byte(__res, 2), base + ptr) < 0) \
512 goto err_label; \
513 ptr++; \
514 if (get_user(val_byte(__res, 3), base + ptr) < 0) \
515 goto err_label; \
516 ptr++; \
517 __res; \
520 /* There are so many possible reasons for this function to return
521 * VM86_INTx, so adding another doesn't bother me. We can expect
522 * userspace programs to be able to handle it. (Getting a problem
523 * in userspace is always better than an Oops anyway.) [KD]
525 static void do_int(struct kernel_vm86_regs *regs, int i,
526 unsigned char __user *ssp, unsigned short sp)
528 unsigned long __user *intr_ptr;
529 unsigned long segoffs;
530 struct vm86 *vm86 = current->thread.vm86;
532 if (regs->pt.cs == BIOSSEG)
533 goto cannot_handle;
534 if (is_revectored(i, &vm86->int_revectored))
535 goto cannot_handle;
536 if (i == 0x21 && is_revectored(AH(regs), &vm86->int21_revectored))
537 goto cannot_handle;
538 intr_ptr = (unsigned long __user *) (i << 2);
539 if (get_user(segoffs, intr_ptr))
540 goto cannot_handle;
541 if ((segoffs >> 16) == BIOSSEG)
542 goto cannot_handle;
543 pushw(ssp, sp, get_vflags(regs), cannot_handle);
544 pushw(ssp, sp, regs->pt.cs, cannot_handle);
545 pushw(ssp, sp, IP(regs), cannot_handle);
546 regs->pt.cs = segoffs >> 16;
547 SP(regs) -= 6;
548 IP(regs) = segoffs & 0xffff;
549 clear_TF(regs);
550 clear_IF(regs);
551 clear_AC(regs);
552 return;
554 cannot_handle:
555 save_v86_state(regs, VM86_INTx + (i << 8));
558 int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno)
560 struct vm86 *vm86 = current->thread.vm86;
562 if (vm86->vm86plus.is_vm86pus) {
563 if ((trapno == 3) || (trapno == 1)) {
564 save_v86_state(regs, VM86_TRAP + (trapno << 8));
565 return 0;
567 do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs));
568 return 0;
570 if (trapno != 1)
571 return 1; /* we let this handle by the calling routine */
572 current->thread.trap_nr = trapno;
573 current->thread.error_code = error_code;
574 force_sig(SIGTRAP, current);
575 return 0;
578 void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code)
580 unsigned char opcode;
581 unsigned char __user *csp;
582 unsigned char __user *ssp;
583 unsigned short ip, sp, orig_flags;
584 int data32, pref_done;
585 struct vm86plus_info_struct *vmpi = &current->thread.vm86->vm86plus;
587 #define CHECK_IF_IN_TRAP \
588 if (vmpi->vm86dbg_active && vmpi->vm86dbg_TFpendig) \
589 newflags |= X86_EFLAGS_TF
591 orig_flags = *(unsigned short *)&regs->pt.flags;
593 csp = (unsigned char __user *) (regs->pt.cs << 4);
594 ssp = (unsigned char __user *) (regs->pt.ss << 4);
595 sp = SP(regs);
596 ip = IP(regs);
598 data32 = 0;
599 pref_done = 0;
600 do {
601 switch (opcode = popb(csp, ip, simulate_sigsegv)) {
602 case 0x66: /* 32-bit data */ data32 = 1; break;
603 case 0x67: /* 32-bit address */ break;
604 case 0x2e: /* CS */ break;
605 case 0x3e: /* DS */ break;
606 case 0x26: /* ES */ break;
607 case 0x36: /* SS */ break;
608 case 0x65: /* GS */ break;
609 case 0x64: /* FS */ break;
610 case 0xf2: /* repnz */ break;
611 case 0xf3: /* rep */ break;
612 default: pref_done = 1;
614 } while (!pref_done);
616 switch (opcode) {
618 /* pushf */
619 case 0x9c:
620 if (data32) {
621 pushl(ssp, sp, get_vflags(regs), simulate_sigsegv);
622 SP(regs) -= 4;
623 } else {
624 pushw(ssp, sp, get_vflags(regs), simulate_sigsegv);
625 SP(regs) -= 2;
627 IP(regs) = ip;
628 goto vm86_fault_return;
630 /* popf */
631 case 0x9d:
633 unsigned long newflags;
634 if (data32) {
635 newflags = popl(ssp, sp, simulate_sigsegv);
636 SP(regs) += 4;
637 } else {
638 newflags = popw(ssp, sp, simulate_sigsegv);
639 SP(regs) += 2;
641 IP(regs) = ip;
642 CHECK_IF_IN_TRAP;
643 if (data32)
644 set_vflags_long(newflags, regs);
645 else
646 set_vflags_short(newflags, regs);
648 goto check_vip;
651 /* int xx */
652 case 0xcd: {
653 int intno = popb(csp, ip, simulate_sigsegv);
654 IP(regs) = ip;
655 if (vmpi->vm86dbg_active) {
656 if ((1 << (intno & 7)) & vmpi->vm86dbg_intxxtab[intno >> 3]) {
657 save_v86_state(regs, VM86_INTx + (intno << 8));
658 return;
661 do_int(regs, intno, ssp, sp);
662 return;
665 /* iret */
666 case 0xcf:
668 unsigned long newip;
669 unsigned long newcs;
670 unsigned long newflags;
671 if (data32) {
672 newip = popl(ssp, sp, simulate_sigsegv);
673 newcs = popl(ssp, sp, simulate_sigsegv);
674 newflags = popl(ssp, sp, simulate_sigsegv);
675 SP(regs) += 12;
676 } else {
677 newip = popw(ssp, sp, simulate_sigsegv);
678 newcs = popw(ssp, sp, simulate_sigsegv);
679 newflags = popw(ssp, sp, simulate_sigsegv);
680 SP(regs) += 6;
682 IP(regs) = newip;
683 regs->pt.cs = newcs;
684 CHECK_IF_IN_TRAP;
685 if (data32) {
686 set_vflags_long(newflags, regs);
687 } else {
688 set_vflags_short(newflags, regs);
690 goto check_vip;
693 /* cli */
694 case 0xfa:
695 IP(regs) = ip;
696 clear_IF(regs);
697 goto vm86_fault_return;
699 /* sti */
701 * Damn. This is incorrect: the 'sti' instruction should actually
702 * enable interrupts after the /next/ instruction. Not good.
704 * Probably needs some horsing around with the TF flag. Aiee..
706 case 0xfb:
707 IP(regs) = ip;
708 set_IF(regs);
709 goto check_vip;
711 default:
712 save_v86_state(regs, VM86_UNKNOWN);
715 return;
717 check_vip:
718 if (VEFLAGS & X86_EFLAGS_VIP) {
719 save_v86_state(regs, VM86_STI);
720 return;
723 vm86_fault_return:
724 if (vmpi->force_return_for_pic && (VEFLAGS & (X86_EFLAGS_IF | X86_EFLAGS_VIF))) {
725 save_v86_state(regs, VM86_PICRETURN);
726 return;
728 if (orig_flags & X86_EFLAGS_TF)
729 handle_vm86_trap(regs, 0, X86_TRAP_DB);
730 return;
732 simulate_sigsegv:
733 /* FIXME: After a long discussion with Stas we finally
734 * agreed, that this is wrong. Here we should
735 * really send a SIGSEGV to the user program.
736 * But how do we create the correct context? We
737 * are inside a general protection fault handler
738 * and has just returned from a page fault handler.
739 * The correct context for the signal handler
740 * should be a mixture of the two, but how do we
741 * get the information? [KD]
743 save_v86_state(regs, VM86_UNKNOWN);
746 /* ---------------- vm86 special IRQ passing stuff ----------------- */
748 #define VM86_IRQNAME "vm86irq"
750 static struct vm86_irqs {
751 struct task_struct *tsk;
752 int sig;
753 } vm86_irqs[16];
755 static DEFINE_SPINLOCK(irqbits_lock);
756 static int irqbits;
758 #define ALLOWED_SIGS (1 /* 0 = don't send a signal */ \
759 | (1 << SIGUSR1) | (1 << SIGUSR2) | (1 << SIGIO) | (1 << SIGURG) \
760 | (1 << SIGUNUSED))
762 static irqreturn_t irq_handler(int intno, void *dev_id)
764 int irq_bit;
765 unsigned long flags;
767 spin_lock_irqsave(&irqbits_lock, flags);
768 irq_bit = 1 << intno;
769 if ((irqbits & irq_bit) || !vm86_irqs[intno].tsk)
770 goto out;
771 irqbits |= irq_bit;
772 if (vm86_irqs[intno].sig)
773 send_sig(vm86_irqs[intno].sig, vm86_irqs[intno].tsk, 1);
775 * IRQ will be re-enabled when user asks for the irq (whether
776 * polling or as a result of the signal)
778 disable_irq_nosync(intno);
779 spin_unlock_irqrestore(&irqbits_lock, flags);
780 return IRQ_HANDLED;
782 out:
783 spin_unlock_irqrestore(&irqbits_lock, flags);
784 return IRQ_NONE;
787 static inline void free_vm86_irq(int irqnumber)
789 unsigned long flags;
791 free_irq(irqnumber, NULL);
792 vm86_irqs[irqnumber].tsk = NULL;
794 spin_lock_irqsave(&irqbits_lock, flags);
795 irqbits &= ~(1 << irqnumber);
796 spin_unlock_irqrestore(&irqbits_lock, flags);
799 void release_vm86_irqs(struct task_struct *task)
801 int i;
802 for (i = FIRST_VM86_IRQ ; i <= LAST_VM86_IRQ; i++)
803 if (vm86_irqs[i].tsk == task)
804 free_vm86_irq(i);
807 static inline int get_and_reset_irq(int irqnumber)
809 int bit;
810 unsigned long flags;
811 int ret = 0;
813 if (invalid_vm86_irq(irqnumber)) return 0;
814 if (vm86_irqs[irqnumber].tsk != current) return 0;
815 spin_lock_irqsave(&irqbits_lock, flags);
816 bit = irqbits & (1 << irqnumber);
817 irqbits &= ~bit;
818 if (bit) {
819 enable_irq(irqnumber);
820 ret = 1;
823 spin_unlock_irqrestore(&irqbits_lock, flags);
824 return ret;
828 static int do_vm86_irq_handling(int subfunction, int irqnumber)
830 int ret;
831 switch (subfunction) {
832 case VM86_GET_AND_RESET_IRQ: {
833 return get_and_reset_irq(irqnumber);
835 case VM86_GET_IRQ_BITS: {
836 return irqbits;
838 case VM86_REQUEST_IRQ: {
839 int sig = irqnumber >> 8;
840 int irq = irqnumber & 255;
841 if (!capable(CAP_SYS_ADMIN)) return -EPERM;
842 if (!((1 << sig) & ALLOWED_SIGS)) return -EPERM;
843 if (invalid_vm86_irq(irq)) return -EPERM;
844 if (vm86_irqs[irq].tsk) return -EPERM;
845 ret = request_irq(irq, &irq_handler, 0, VM86_IRQNAME, NULL);
846 if (ret) return ret;
847 vm86_irqs[irq].sig = sig;
848 vm86_irqs[irq].tsk = current;
849 return irq;
851 case VM86_FREE_IRQ: {
852 if (invalid_vm86_irq(irqnumber)) return -EPERM;
853 if (!vm86_irqs[irqnumber].tsk) return 0;
854 if (vm86_irqs[irqnumber].tsk != current) return -EPERM;
855 free_vm86_irq(irqnumber);
856 return 0;
859 return -EINVAL;