/* * We now make sure that mmap_lock is held in all paths that call * this. Additionally, to prevent kswapd from ripping ptes from * under us, raise interrupts around the time that we look at the * pte, kswapd will have to wait to get his smp ipi response from * us. vmtruncate likewise. This saves us having to get pte lock.
*/ staticunsignedint get_user_insn(unsignedlong tpc)
{
pgd_t *pgdp = pgd_offset(current->mm, tpc);
p4d_t *p4dp;
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep, pte; unsignedlong pa;
u32 insn = 0;
if (pgd_none(*pgdp) || unlikely(pgd_bad(*pgdp))) goto out;
p4dp = p4d_offset(pgdp, tpc); if (p4d_none(*p4dp) || unlikely(p4d_bad(*p4dp))) goto out;
pudp = pud_offset(p4dp, tpc); if (pud_none(*pudp) || unlikely(pud_bad(*pudp))) goto out;
/* This disables preemption for us as well. */
local_irq_disable();
staticvoid do_fault_siginfo(int code, int sig, struct pt_regs *regs, unsignedlong fault_addr, unsignedint insn, int fault_code)
{ unsignedlong addr;
if (fault_code & FAULT_CODE_ITLB) {
addr = regs->tpc;
} else { /* If we were able to probe the faulting instruction, use it * to compute a precise fault address. Otherwise use the fault * time provided address which may only have page granularity.
*/ if (insn)
addr = compute_effective_address(regs, insn, 0); else
addr = fault_addr;
}
if (unlikely(show_unhandled_signals))
show_signal_msg(regs, sig, code, addr, current);
staticvoid __kprobes do_kernel_fault(struct pt_regs *regs, int si_code, int fault_code, unsignedint insn, unsignedlong address)
{ unsignedchar asi = ASI_P;
if ((!insn) && (regs->tstate & TSTATE_PRIV)) goto cannot_handle;
/* If user insn could be read (thus insn is zero), that * is fine. We will just gun down the process with a signal * in that case.
*/
if (!(fault_code & (FAULT_CODE_WRITE|FAULT_CODE_ITLB)) &&
(insn & 0xc0800000) == 0xc0800000) { if (insn & 0x2000)
asi = (regs->tstate >> 24); else
asi = (insn >> 5); if ((asi & 0xf2) == 0x82) { if (insn & 0x1000000) {
handle_ldf_stq(insn, regs);
} else { /* This was a non-faulting load. Just clear the * destination register(s) and continue with the next * instruction. -jj
*/
handle_ld_nf(insn, regs);
} return;
}
}
/* Is this in ex_table? */ if (regs->tstate & TSTATE_PRIV) { conststruct exception_table_entry *entry;
entry = search_exception_tables(regs->tpc); if (entry) {
regs->tpc = entry->fixup;
regs->tnpc = regs->tpc + 4; return;
}
} else { /* The si_code was set to make clear whether * this was a SEGV_MAPERR or SEGV_ACCERR fault.
*/
do_fault_siginfo(si_code, SIGSEGV, regs, address, insn, fault_code); return;
}
if (!mmap_read_trylock(mm)) { if ((regs->tstate & TSTATE_PRIV) &&
!search_exception_tables(regs->tpc)) {
insn = get_fault_insn(regs, insn); goto handle_kernel_fault;
}
retry:
mmap_read_lock(mm);
}
if (fault_code & FAULT_CODE_BAD_RA) goto do_sigbus;
vma = find_vma(mm, address); if (!vma) goto bad_area;
/* Pure DTLB misses do not tell us whether the fault causing * load/store/atomic was a write or not, it only says that there * was no match. So in such a case we (carefully) read the * instruction to try and figure this out. It's an optimization * so it's ok if we can't do this. * * Special hack, window spill/fill knows the exact fault type.
*/ if (((fault_code &
(FAULT_CODE_DTLB | FAULT_CODE_WRITE | FAULT_CODE_WINFIXUP)) == FAULT_CODE_DTLB) &&
(vma->vm_flags & VM_WRITE) != 0) {
insn = get_fault_insn(regs, 0); if (!insn) goto continue_fault; /* All loads, stores and atomics have bits 30 and 31 both set * in the instruction. Bit 21 is set in all stores, but we * have to avoid prefetches which also have bit 21 set.
*/ if ((insn & 0xc0200000) == 0xc0200000 &&
(insn & 0x01780000) != 0x01680000) { /* Don't bother updating thread struct value, * because update_mmu_cache only cares which tlb * the access came from.
*/
fault_code |= FAULT_CODE_WRITE;
}
}
continue_fault:
if (vma->vm_start <= address) goto good_area; if (!(vma->vm_flags & VM_GROWSDOWN)) goto bad_area; if (!(fault_code & FAULT_CODE_WRITE)) { /* Non-faulting loads shouldn't expand stack. */
insn = get_fault_insn(regs, insn); if ((insn & 0xc0800000) == 0xc0800000) { unsignedchar asi;
if (insn & 0x2000)
asi = (regs->tstate >> 24); else
asi = (insn >> 5); if ((asi & 0xf2) == 0x82) goto bad_area;
}
}
vma = expand_stack(mm, address); if (!vma) goto bad_area_nosemaphore; /* * Ok, we have a good vm_area for this memory access, so * we can handle it..
*/
good_area:
si_code = SEGV_ACCERR;
/* If we took a ITLB miss on a non-executable page, catch * that here.
*/ if ((fault_code & FAULT_CODE_ITLB) && !(vma->vm_flags & VM_EXEC)) {
WARN(address != regs->tpc, "address (%lx) != regs->tpc (%lx)\n", address, regs->tpc);
WARN_ON(regs->tstate & TSTATE_PRIV); goto bad_area;
}
if (fault_code & FAULT_CODE_WRITE) { if (!(vma->vm_flags & VM_WRITE)) goto bad_area;
/* Spitfire has an icache which does not snoop * processor stores. Later processors do...
*/ if (tlb_type == spitfire &&
(vma->vm_flags & VM_EXEC) != 0 &&
vma->vm_file != NULL)
set_thread_fault_code(fault_code |
FAULT_CODE_BLKCOMMIT);
flags |= FAULT_FLAG_WRITE;
} else { /* Allow reads even for write-only mappings */ if (!(vma->vm_flags & (VM_READ | VM_EXEC))) goto bad_area;
}
/* * Something tried to access memory that isn't in our memory map.. * Fix it, but check if it's kernel or user first..
*/
bad_area:
mmap_read_unlock(mm);
bad_area_nosemaphore:
insn = get_fault_insn(regs, insn);
/* * We ran out of memory, or some other thing happened to us that made * us unable to handle the page fault gracefully.
*/
out_of_memory:
insn = get_fault_insn(regs, insn);
mmap_read_unlock(mm); if (!(regs->tstate & TSTATE_PRIV)) {
pagefault_out_of_memory(); goto exit_exception;
} goto handle_kernel_fault;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.