// TODO VM_EXEC flag work-around, cache aliasing /* * arch/xtensa/mm/fault.c * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2001 - 2010 Tensilica Inc. * * Chris Zankel <chris@zankel.net> * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
*/
if (!pte_present(*pte_k)) goto bad_page_fault; return;
bad_page_fault:
bad_page_fault(regs, address, SIGKILL); #else
WARN_ONCE(1, "%s in noMMU configuration\n", __func__); #endif
} /* * This routine handles page faults. It determines the address, * and the problem, and then passes it off to one of the appropriate * routines. * * Note: does not handle Miss and MultiHit.
*/
int is_write, is_exec;
vm_fault_t fault; unsignedint flags = FAULT_FLAG_DEFAULT;
code = SEGV_MAPERR;
/* We fault-in kernel-space virtual memory on-demand. The * 'reference' page table is init_mm.pgd.
*/ if (address >= TASK_SIZE && !user_mode(regs)) {
vmalloc_fault(regs, address); return;
}
/* If we're in an interrupt or have no user * context, we must not take the fault..
*/ if (faulthandler_disabled() || !mm) {
bad_page_fault(regs, address, SIGSEGV); return;
}
retry:
vma = lock_mm_and_find_vma(mm, address, regs); if (!vma) goto bad_area_nosemaphore;
/* Ok, we have a good vm_area for this memory access, so * we can handle it..
*/
code = SEGV_ACCERR;
if (is_write) { if (!(vma->vm_flags & VM_WRITE)) goto bad_area;
flags |= FAULT_FLAG_WRITE;
} elseif (is_exec) { if (!(vma->vm_flags & VM_EXEC)) goto bad_area;
} else/* Allow read even from write-only pages. */ if (!(vma->vm_flags & (VM_READ | VM_WRITE))) goto bad_area;
/* If for any reason at all we couldn't handle the fault, * make sure we exit gracefully rather than endlessly redo * the fault.
*/
fault = handle_mm_fault(vma, address, flags, regs);
if (fault_signal_pending(fault, regs)) { if (!user_mode(regs))
bad_page_fault(regs, address, SIGKILL); return;
}
/* The fault is fully completed (including releasing mmap lock) */ if (fault & VM_FAULT_COMPLETED) return;
if (fault & VM_FAULT_RETRY) {
flags |= FAULT_FLAG_TRIED;
/* No need to mmap_read_unlock(mm) as we would * have already released it in __lock_page_or_retry * in mm/filemap.c.
*/
goto retry;
}
mmap_read_unlock(mm); return;
/* Something tried to access memory that isn't in our memory map.. * Fix it, but check if it's kernel or user first..
*/
bad_area:
mmap_read_unlock(mm);
bad_area_nosemaphore: if (user_mode(regs)) {
force_sig_fault(SIGSEGV, code, (void *) address); return;
}
bad_page_fault(regs, address, SIGSEGV); return;
/* We ran out of memory, or some other thing happened to us that made * us unable to handle the page fault gracefully.
*/
out_of_memory:
mmap_read_unlock(mm); if (!user_mode(regs))
bad_page_fault(regs, address, SIGKILL); else
pagefault_out_of_memory(); return;
do_sigbus:
mmap_read_unlock(mm);
/* Send a sigbus, regardless of whether we were in kernel * or user mode.
*/
force_sig_fault(SIGBUS, BUS_ADRERR, (void *) address);
/* Kernel mode? Handle exceptions or die */ if (!user_mode(regs))
bad_page_fault(regs, address, SIGBUS); return;
}
/* Are we prepared to handle this kernel fault? */ if ((entry = search_exception_tables(regs->pc)) != NULL) {
pr_debug("%s: Exception at pc=%#010lx (%lx)\n",
current->comm, regs->pc, entry->fixup);
regs->pc = entry->fixup; return;
}
/* Oops. The kernel tried to access some bad page. We'll have to * terminate things with extreme prejudice.
*/
pr_alert("Unable to handle kernel paging request at virtual " "address %08lx\n pc = %08lx, ra = %08lx\n",
address, regs->pc, regs->areg[0]);
die("Oops", regs, sig);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.