aboutsummaryrefslogtreecommitdiff
path: root/arch/i386/kernel/vm86.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/i386/kernel/vm86.c')
-rw-r--r--arch/i386/kernel/vm86.c21
1 files changed, 12 insertions, 9 deletions
diff --git a/arch/i386/kernel/vm86.c b/arch/i386/kernel/vm86.c
index 2f3d52dacff..ec0f68ce688 100644
--- a/arch/i386/kernel/vm86.c
+++ b/arch/i386/kernel/vm86.c
@@ -222,7 +222,7 @@ asmlinkage int sys_vm86(struct pt_regs regs)
goto out;
case VM86_PLUS_INSTALL_CHECK:
/* NOTE: on old vm86 stuff this will return the error
- from verify_area(), because the subfunction is
+ from access_ok(), because the subfunction is
interpreted as (invalid) address to vm86_struct.
So the installation check works.
*/
@@ -294,8 +294,8 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
*/
info->regs32->eax = 0;
tsk->thread.saved_esp0 = tsk->thread.esp0;
- asm volatile("movl %%fs,%0":"=m" (tsk->thread.saved_fs));
- asm volatile("movl %%gs,%0":"=m" (tsk->thread.saved_gs));
+ asm volatile("mov %%fs,%0":"=m" (tsk->thread.saved_fs));
+ asm volatile("mov %%gs,%0":"=m" (tsk->thread.saved_gs));
tss = &per_cpu(init_tss, get_cpu());
tsk->thread.esp0 = (unsigned long) &info->VM86_TSS_ESP0;
@@ -717,12 +717,12 @@ static irqreturn_t irq_handler(int intno, void *dev_id, struct pt_regs * regs)
irqbits |= irq_bit;
if (vm86_irqs[intno].sig)
send_sig(vm86_irqs[intno].sig, vm86_irqs[intno].tsk, 1);
- spin_unlock_irqrestore(&irqbits_lock, flags);
/*
* IRQ will be re-enabled when user asks for the irq (whether
* polling or as a result of the signal)
*/
- disable_irq(intno);
+ disable_irq_nosync(intno);
+ spin_unlock_irqrestore(&irqbits_lock, flags);
return IRQ_HANDLED;
out:
@@ -754,17 +754,20 @@ static inline int get_and_reset_irq(int irqnumber)
{
int bit;
unsigned long flags;
+ int ret = 0;
if (invalid_vm86_irq(irqnumber)) return 0;
if (vm86_irqs[irqnumber].tsk != current) return 0;
spin_lock_irqsave(&irqbits_lock, flags);
bit = irqbits & (1 << irqnumber);
irqbits &= ~bit;
+ if (bit) {
+ enable_irq(irqnumber);
+ ret = 1;
+ }
+
spin_unlock_irqrestore(&irqbits_lock, flags);
- if (!bit)
- return 0;
- enable_irq(irqnumber);
- return 1;
+ return ret;
}