// SPDX-License-Identifier: GPL-2.0 /* * fault.c: Page fault handlers for the Sparc. * * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs, int text_fault)
{ unsignedlong addr = compute_si_addr(regs, text_fault);
__do_fault_siginfo(code, sig, regs, addr);
}
asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write, unsignedlong address)
{ struct vm_area_struct *vma; struct task_struct *tsk = current; struct mm_struct *mm = tsk->mm; int from_user = !(regs->psr & PSR_PS); int code;
vm_fault_t fault; unsignedint flags = FAULT_FLAG_DEFAULT;
if (text_fault)
address = regs->pc;
/* * We fault-in kernel-space virtual memory on-demand. The * 'reference' page table is init_mm.pgd. * * NOTE! We MUST NOT take any locks for this case. We may * be in an interrupt or a critical region, and should * only copy the information from the master page table, * nothing more.
*/
code = SEGV_MAPERR; if (address >= TASK_SIZE) goto vmalloc_fault;
/* * If we're in an interrupt or have no user * context, we must not take the fault..
*/ if (pagefault_disabled() || !mm) goto no_context;
if (!from_user && address >= PAGE_OFFSET) goto no_context;
retry:
vma = lock_mm_and_find_vma(mm, address, regs); if (!vma) goto bad_area_nosemaphore; /* * Ok, we have a good vm_area for this memory access, so * we can handle it..
*/
code = SEGV_ACCERR; if (write) { if (!(vma->vm_flags & VM_WRITE)) goto bad_area;
} else { /* Allow reads even for write-only mappings */ if (!(vma->vm_flags & (VM_READ | VM_EXEC))) goto bad_area;
}
if (from_user)
flags |= FAULT_FLAG_USER; if (write)
flags |= FAULT_FLAG_WRITE;
/* * If for any reason at all we couldn't handle the fault, * make sure we exit gracefully rather than endlessly redo * the fault.
*/
fault = handle_mm_fault(vma, address, flags, regs);
if (fault_signal_pending(fault, regs)) { if (!from_user) goto no_context; return;
}
/* The fault is fully completed (including releasing mmap lock) */ if (fault & VM_FAULT_COMPLETED) return;
if (fault & VM_FAULT_RETRY) {
flags |= FAULT_FLAG_TRIED;
/* No need to mmap_read_unlock(mm) as we would * have already released it in __lock_page_or_retry * in mm/filemap.c.
*/
goto retry;
}
mmap_read_unlock(mm); return;
/* * Something tried to access memory that isn't in our memory map.. * Fix it, but check if it's kernel or user first..
*/
bad_area:
mmap_read_unlock(mm);
bad_area_nosemaphore: /* User mode accesses just cause a SIGSEGV */ if (from_user) {
do_fault_siginfo(code, SIGSEGV, regs, text_fault); return;
}
/* Is this in ex_table? */
no_context: if (!from_user) { conststruct exception_table_entry *entry;
/* * We ran out of memory, or some other thing happened to us that made * us unable to handle the page fault gracefully.
*/
out_of_memory:
mmap_read_unlock(mm); if (from_user) {
pagefault_out_of_memory(); return;
} goto no_context;
do_sigbus:
mmap_read_unlock(mm);
do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, text_fault); if (!from_user) goto no_context;
vmalloc_fault:
{ /* * Synchronize this task's top level page-table * with the 'reference' page table.
*/ int offset = pgd_index(address);
pgd_t *pgd, *pgd_k;
p4d_t *p4d, *p4d_k;
pud_t *pud, *pud_k;
pmd_t *pmd, *pmd_k;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.