/* * This is useful to dump out the page tables associated with * 'addr' in mm 'mm'.
*/ void show_pte(constchar *lvl, struct mm_struct *mm, unsignedlong addr)
{
pgd_t *pgd;
show_pte(KERN_ALERT, mm, addr);
die("Oops", regs, fsr);
bust_spinlocks(0);
make_task_dead(SIGKILL);
}
/* * Oops. The kernel tried to access some page that wasn't present.
*/ staticvoid
__do_kernel_fault(struct mm_struct *mm, unsignedlong addr, unsignedint fsr, struct pt_regs *regs)
{ constchar *msg; /* * Are we prepared to handle this kernel fault?
*/ if (fixup_exception(regs)) return;
/* * No handler, we'll have to terminate things with extreme prejudice.
*/ if (addr < PAGE_SIZE) {
msg = "NULL pointer dereference";
} else { if (is_translation_fault(fsr) &&
kfence_handle_page_fault(addr, is_write_fault(fsr), regs)) return;
msg = "paging request";
}
die_kernel_fault(msg, mm, addr, fsr, regs);
}
/* * Something tried to access memory that isn't in our memory map.. * User mode accesses just cause a SIGSEGV
*/ staticvoid
__do_user_fault(unsignedlong addr, unsignedint fsr, unsignedint sig, int code, struct pt_regs *regs)
{ struct task_struct *tsk = current;
/* * If we are in kernel mode at this point, we * have no context to handle this fault with.
*/ if (user_mode(regs))
__do_user_fault(addr, fsr, SIGSEGV, SEGV_MAPERR, regs); else
__do_kernel_fault(mm, addr, fsr, regs);
}
/* * Privileged access aborts with CONFIG_CPU_TTBR0_PAN enabled are * routed via the translation fault mechanism. Check whether uaccess * is disabled while in kernel mode.
*/ if (!ttbr0_usermode_access_allowed(regs)) goto no_context;
if (!(flags & FAULT_FLAG_USER)) goto lock_mmap;
vma = lock_vma_under_rcu(mm, addr); if (!vma) goto lock_mmap;
/* * ok, we have a good vm_area for this memory access, check the * permissions on the VMA allow for the fault which occurred.
*/ if (!(vma->vm_flags & vm_flags)) {
mmap_read_unlock(mm);
fault = 0;
code = SEGV_ACCERR; goto bad_area;
}
/* If we need to retry but a fatal signal is pending, handle the * signal first. We do not need to release the mmap_lock because * it would already be released in __lock_page_or_retry in
* mm/filemap.c. */ if (fault_signal_pending(fault, regs)) { if (!user_mode(regs)) goto no_context; return 0;
}
/* The fault is fully completed (including releasing mmap lock) */ if (fault & VM_FAULT_COMPLETED) return 0;
if (!(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_RETRY) {
flags |= FAULT_FLAG_TRIED; goto retry;
}
}
mmap_read_unlock(mm);
done:
/* Handle the "normal" case first */ if (likely(!(fault & VM_FAULT_ERROR))) return 0;
code = SEGV_MAPERR;
bad_area: /* * If we are in kernel mode at this point, we * have no context to handle this fault with.
*/ if (!user_mode(regs)) goto no_context;
if (fault & VM_FAULT_OOM) { /* * We ran out of memory, call the OOM killer, and return to * userspace (which will retry the fault, or kill us if we * got oom-killed)
*/
pagefault_out_of_memory(); return 0;
}
if (fault & VM_FAULT_SIGBUS) { /* * We had some memory, but were unable to * successfully fix up this page fault.
*/
sig = SIGBUS;
code = BUS_ADRERR;
} else { /* * Something tried to access memory that * isn't in our memory map..
*/
sig = SIGSEGV;
}
/* * First Level Translation Fault Handler * * We enter here because the first level page table doesn't contain * a valid entry for the address. * * If the address is in kernel space (>= TASK_SIZE), then we are * probably faulting in the vmalloc() area. * * If the init_task's first level page tables contains the relevant * entry, we copy the it to this task. If not, we send the process * a signal, fixup the exception, or oops the kernel. * * NOTE! We MUST NOT take any locks for this case. We may be in an * interrupt or a critical region, and should only copy the information * from the master page table, nothing more.
*/ #ifdef CONFIG_MMU staticint __kprobes
do_translation_fault(unsignedlong addr, unsignedint fsr, struct pt_regs *regs)
{ unsignedint index;
pgd_t *pgd, *pgd_k;
p4d_t *p4d, *p4d_k;
pud_t *pud, *pud_k;
pmd_t *pmd, *pmd_k;
if (addr < TASK_SIZE) return do_page_fault(addr, fsr, regs);
#ifdef CONFIG_ARM_LPAE /* * Only one hardware entry per PMD with LPAE.
*/
index = 0; #else /* * On ARM one Linux PGD entry contains two hardware entries (see page * tables layout in pgtable.h). We normally guarantee that we always * fill both L1 entries. But create_mapping() doesn't follow the rule. * It can create inidividual L1 entries, so here we have to call * pmd_none() check for the entry really corresponded to address, not * for the first of pair.
*/
index = (addr >> SECTION_SHIFT) & 1; #endif if (pmd_none(pmd_k[index])) goto bad_area;
/* * Some section permission faults need to be handled gracefully. * They can happen due to a __{get,put}_user during an oops.
*/ #ifndef CONFIG_ARM_LPAE staticint
do_sect_fault(unsignedlong addr, unsignedint fsr, struct pt_regs *regs)
{
do_bad_area(addr, fsr, regs); return 0;
} #endif/* CONFIG_ARM_LPAE */
/* * Abort handler to be used only during first unmasking of asynchronous aborts * on the boot CPU. This makes sure that the machine will not die if the * firmware/bootloader left an imprecise abort pending for us to trip over.
*/ staticint __init early_abort_handler(unsignedlong addr, unsignedint fsr, struct pt_regs *regs)
{
pr_warn("Hit pending asynchronous external abort (FSR=0x%08x) during " "first unmask, this is most likely caused by a " "firmware/bootloader bug.\n", fsr);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.