/* * Known problems: * * Interrupt handling is not guaranteed: * - a real x86 will disable all interrupts for one instruction * after a "mov ss,xx" to make stack handling atomic even without * the 'lss' instruction. We can't guarantee this in v86 mode, * as the next instruction might result in a page fault or similar. * - a real x86 will have interrupts disabled for one instruction * past the 'sti' that enables them. We don't bother with all the * details yet. * * Let's hope these problems do not actually matter for anything.
*/
/* * This gets called from entry.S with interrupts disabled, but * from process context. Enable interrupts here, before trying * to access user space.
*/
local_irq_enable();
BUG_ON(!vm86);
set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | vm86->veflags_mask);
user = vm86->user_vm86;
SYSCALL_DEFINE2(vm86, unsignedlong, cmd, unsignedlong, arg)
{ switch (cmd) { case VM86_REQUEST_IRQ: case VM86_FREE_IRQ: case VM86_GET_IRQ_BITS: case VM86_GET_AND_RESET_IRQ: return do_vm86_irq_handling(cmd, (int)arg); case VM86_PLUS_INSTALL_CHECK: /* * NOTE: on old vm86 stuff this will return the error * from access_ok(), because the subfunction is * interpreted as (invalid) address to vm86_struct. * So the installation check works.
*/ return 0;
}
/* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */ return do_sys_vm86((struct vm86plus_struct __user *) arg, true);
}
err = security_mmap_addr(0); if (err) { /* * vm86 cannot virtualize the address space, so vm86 users * need to manage the low 1MB themselves using mmap. Given * that BIOS places important data in the first page, vm86 * is essentially useless if mmap_min_addr != 0. DOSEMU, * for example, won't even bother trying to use vm86 if it * can't map a page at virtual address 0. * * To reduce the available kernel attack surface, simply * disallow vm86(old) for users who cannot mmap at va 0. * * The implementation of security_mmap_addr will allow * suitably privileged users to map va 0 even if * vm.mmap_min_addr is set above 0, and we want this * behavior for vm86 as well, as it ensures that legacy * tools like vbetool will not fail just because of * vm.mmap_min_addr.
*/
pr_info_once("Denied a call to vm86(old) from %s[%d] (uid: %d). Set the vm.mmap_min_addr sysctl to 0 and/or adjust LSM mmap_min_addr policy to enable vm86 if you are using a vm86-based DOS emulator.\n",
current->comm, task_pid_nr(current),
from_kuid_munged(&init_user_ns, current_uid())); return -EPERM;
}
if (!vm86) { if (!(vm86 = kzalloc(sizeof(*vm86), GFP_KERNEL))) return -ENOMEM;
tsk->thread.vm86 = vm86;
} if (vm86->saved_sp0) return -EPERM;
if (copy_from_user(&v, user_vm86,
offsetof(struct vm86_struct, int_revectored))) return -EFAULT;
/* VM86_SCREEN_BITMAP had numerous bugs and appears to have no users. */ if (v.flags & VM86_SCREEN_BITMAP) {
pr_info_once("vm86: '%s' uses VM86_SCREEN_BITMAP, which is no longer supported\n",
current->comm); return -EINVAL;
}
/* * The flags register is also special: we cannot trust that the user * has set it up safely, so this makes sure interrupt etc flags are * inherited from protected mode.
*/
VEFLAGS = vm86regs.pt.flags;
vm86regs.pt.flags &= SAFE_MASK;
vm86regs.pt.flags |= regs->flags & ~SAFE_MASK;
vm86regs.pt.flags |= X86_VM_MASK;
/* * It is correct to call set_IF(regs) from the set_vflags_* * functions. However someone forgot to call clear_IF(regs) * in the opposite case. * After the command sequence CLI PUSHF STI POPF you should * end up with interrupts disabled, but you ended up with * interrupts enabled. * ( I was testing my own changes, but the only bug I * could find was in a function I had not changed. ) * [KD]
*/
#define popl(base, ptr, err_label) \
({ \
__u32 __res; \ if (get_user(val_byte(__res, 0), base + ptr) < 0) \ goto err_label; \
ptr++; \ if (get_user(val_byte(__res, 1), base + ptr) < 0) \ goto err_label; \
ptr++; \ if (get_user(val_byte(__res, 2), base + ptr) < 0) \ goto err_label; \
ptr++; \ if (get_user(val_byte(__res, 3), base + ptr) < 0) \ goto err_label; \
ptr++; \
__res; \
})
/* There are so many possible reasons for this function to return * VM86_INTx, so adding another doesn't bother me. We can expect * userspace programs to be able to handle it. (Getting a problem * in userspace is always better than an Oops anyway.) [KD]
*/ staticvoid do_int(struct kernel_vm86_regs *regs, int i, unsignedchar __user *ssp, unsignedshort sp)
{ unsignedlong __user *intr_ptr; unsignedlong segoffs; struct vm86 *vm86 = current->thread.vm86;
if (regs->pt.cs == BIOSSEG) goto cannot_handle; if (is_revectored(i, &vm86->int_revectored)) goto cannot_handle; if (i == 0x21 && is_revectored(AH(regs), &vm86->int21_revectored)) goto cannot_handle;
intr_ptr = (unsignedlong __user *) (i << 2); if (get_user(segoffs, intr_ptr)) goto cannot_handle; if ((segoffs >> 16) == BIOSSEG) goto cannot_handle;
pushw(ssp, sp, get_vflags(regs), cannot_handle);
pushw(ssp, sp, regs->pt.cs, cannot_handle);
pushw(ssp, sp, IP(regs), cannot_handle);
regs->pt.cs = segoffs >> 16;
SP(regs) -= 6;
IP(regs) = segoffs & 0xffff;
clear_TF(regs);
clear_IF(regs);
clear_AC(regs); return;
cannot_handle:
save_v86_state(regs, VM86_INTx + (i << 8));
}
int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno)
{ struct vm86 *vm86 = current->thread.vm86;
if (vm86->vm86plus.is_vm86pus) { if ((trapno == 3) || (trapno == 1)) {
save_v86_state(regs, VM86_TRAP + (trapno << 8)); return 0;
}
do_int(regs, trapno, (unsignedchar __user *) (regs->pt.ss << 4), SP(regs)); return 0;
} if (trapno != 1) return 1; /* we let this handle by the calling routine */
current->thread.trap_nr = trapno;
current->thread.error_code = error_code;
force_sig(SIGTRAP); return 0;
}
/* sti */ /* * Damn. This is incorrect: the 'sti' instruction should actually * enable interrupts after the /next/ instruction. Not good. * * Probably needs some horsing around with the TF flag. Aiee..
*/ case 0xfb:
IP(regs) = ip;
set_IF(regs); goto check_vip;
simulate_sigsegv: /* FIXME: After a long discussion with Stas we finally * agreed, that this is wrong. Here we should * really send a SIGSEGV to the user program. * But how do we create the correct context? We * are inside a general protection fault handler * and has just returned from a page fault handler. * The correct context for the signal handler * should be a mixture of the two, but how do we * get the information? [KD]
*/
save_v86_state(regs, VM86_UNKNOWN);
}
/* ---------------- vm86 special IRQ passing stuff ----------------- */
#define VM86_IRQNAME "vm86irq"
staticstruct vm86_irqs { struct task_struct *tsk; int sig;
} vm86_irqs[16];
spin_lock_irqsave(&irqbits_lock, flags);
irq_bit = 1 << intno; if ((irqbits & irq_bit) || !vm86_irqs[intno].tsk) goto out;
irqbits |= irq_bit; if (vm86_irqs[intno].sig)
send_sig(vm86_irqs[intno].sig, vm86_irqs[intno].tsk, 1); /* * IRQ will be re-enabled when user asks for the irq (whether * polling or as a result of the signal)
*/
disable_irq_nosync(intno);
spin_unlock_irqrestore(&irqbits_lock, flags); return IRQ_HANDLED;
void release_vm86_irqs(struct task_struct *task)
{ int i; for (i = FIRST_VM86_IRQ ; i <= LAST_VM86_IRQ; i++) if (vm86_irqs[i].tsk == task)
free_vm86_irq(i);
}
staticinlineint get_and_reset_irq(int irqnumber)
{ int bit; unsignedlong flags; int ret = 0;
if (invalid_vm86_irq(irqnumber)) return 0; if (vm86_irqs[irqnumber].tsk != current) return 0;
spin_lock_irqsave(&irqbits_lock, flags);
bit = irqbits & (1 << irqnumber);
irqbits &= ~bit; if (bit) {
enable_irq(irqnumber);
ret = 1;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.