/* * This routine handles page faults. It determines the address, * and the problem, and then passes it off to handle_mm_fault(). * * mmcsr: * 0 = translation not valid * 1 = access violation * 2 = fault-on-read * 3 = fault-on-execute * 4 = fault-on-write * * cause: * -1 = instruction fetch * 0 = load * 1 = store * * Registers $9 through $15 are saved in a block just prior to `regs' and * are saved and restored around the call to allow exception code to * modify them.
*/
/* As of EV6, a load into $31/$f31 is a prefetch, and never faults (or is suppressed by the PALcode). Support that for older CPUs
by ignoring such an instruction. */ if (cause == 0) { unsignedint insn;
__get_user(insn, (unsignedint __user *)regs->pc); if ((insn >> 21 & 0x1f) == 0x1f && /* ldq ldl ldt lds ldg ldf ldwu ldbu */
(1ul << (insn >> 26) & 0x30f00001400ul)) {
regs->pc += 4; return;
}
}
/* If we're in an interrupt context, or have no user context,
we must not take the fault. */ if (!mm || faulthandler_disabled()) goto no_context;
#ifdef CONFIG_ALPHA_LARGE_VMALLOC if (address >= TASK_SIZE) goto vmalloc_fault; #endif if (user_mode(regs))
flags |= FAULT_FLAG_USER;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
retry:
vma = lock_mm_and_find_vma(mm, address, regs); if (!vma) goto bad_area_nosemaphore;
/* Ok, we have a good vm_area for this memory access, so
we can handle it. */
si_code = SEGV_ACCERR; if (cause < 0) { if (!(vma->vm_flags & VM_EXEC)) goto bad_area;
} elseif (!cause) { /* Allow reads even for write-only mappings */ if (!(vma->vm_flags & (VM_READ | VM_WRITE))) goto bad_area;
} else { if (!(vma->vm_flags & VM_WRITE)) goto bad_area;
flags |= FAULT_FLAG_WRITE;
}
/* If for any reason at all we couldn't handle the fault, make sure we exit gracefully rather than endlessly redo
the fault. */
fault = handle_mm_fault(vma, address, flags, regs);
if (fault_signal_pending(fault, regs)) { if (!user_mode(regs)) goto no_context; return;
}
/* The fault is fully completed (including releasing mmap lock) */ if (fault & VM_FAULT_COMPLETED) return;
if (fault & VM_FAULT_RETRY) {
flags |= FAULT_FLAG_TRIED;
/* No need to mmap_read_unlock(mm) as we would * have already released it in __lock_page_or_retry * in mm/filemap.c.
*/
goto retry;
}
mmap_read_unlock(mm);
return;
/* Something tried to access memory that isn't in our memory map.
Fix it, but check if it's kernel or user first. */
bad_area:
mmap_read_unlock(mm);
bad_area_nosemaphore: if (user_mode(regs)) goto do_sigsegv;
no_context: /* Are we prepared to handle this fault as an exception? */ if ((fixup = search_exception_tables(regs->pc)) != 0) { unsignedlong newpc;
newpc = fixup_exception(dpf_reg, fixup, regs->pc);
regs->pc = newpc; return;
}
/* Oops. The kernel tried to access some bad page. We'll have to
terminate things with extreme prejudice. */
printk(KERN_ALERT "Unable to handle kernel paging request at " "virtual address %016lx\n", address);
die_if_kernel("Oops", regs, cause, (unsignedlong*)regs - 16);
make_task_dead(SIGKILL);
/* We ran out of memory, or some other thing happened to us that
made us unable to handle the page fault gracefully. */
out_of_memory:
mmap_read_unlock(mm); if (!user_mode(regs)) goto no_context;
pagefault_out_of_memory(); return;
do_sigbus:
mmap_read_unlock(mm); /* Send a sigbus, regardless of whether we were in kernel
or user mode. */
force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *) address); if (!user_mode(regs)) goto no_context; return;
#ifdef CONFIG_ALPHA_LARGE_VMALLOC
vmalloc_fault: if (user_mode(regs)) goto do_sigsegv; else { /* Synchronize this task's top level page-table
with the "reference" page table from init. */ long index = pgd_index(address);
pgd_t *pgd, *pgd_k;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.