/* * Find out which address space caused the exception.
*/ staticbool is_kernel_fault(struct pt_regs *regs)
{ union teid teid = { .val = regs->int_parm_long };
if (user_mode(regs)) returnfalse; if (teid.as == PSW_BITS_AS_SECONDARY) returnfalse; returntrue;
}
/* * This routine handles page faults. It determines the address, * and the problem, and then passes it off to one of the appropriate * routines. * * interruption code (int_code): * 04 Protection -> Write-Protection (suppression) * 10 Segment translation -> Not present (nullification) * 11 Page translation -> Not present (nullification) * 3b Region third trans. -> Not present (nullification)
*/ staticvoid do_exception(struct pt_regs *regs, int access)
{ struct vm_area_struct *vma; unsignedlong address; struct mm_struct *mm; unsignedint flags;
vm_fault_t fault; bool is_write;
/* * The instruction that caused the program check has * been nullified. Don't signal single step via SIGTRAP.
*/
clear_thread_flag(TIF_PER_TRAP); if (kprobe_page_fault(regs, 14)) return;
mm = current->mm;
address = get_fault_address(regs);
is_write = fault_is_write(regs); if (is_kernel_fault(regs) || faulthandler_disabled() || !mm) return handle_fault_error_nolock(regs, 0);
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
flags = FAULT_FLAG_DEFAULT; if (user_mode(regs))
flags |= FAULT_FLAG_USER; if (is_write)
access = VM_WRITE; if (access == VM_WRITE)
flags |= FAULT_FLAG_WRITE; if (!(flags & FAULT_FLAG_USER)) goto lock_mmap;
vma = lock_vma_under_rcu(mm, address); if (!vma) goto lock_mmap; if (!(vma->vm_flags & access)) {
vma_end_read(vma);
count_vm_vma_lock_event(VMA_LOCK_SUCCESS); return handle_fault_error_nolock(regs, SEGV_ACCERR);
}
fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs); if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED)))
vma_end_read(vma); if (!(fault & VM_FAULT_RETRY)) {
count_vm_vma_lock_event(VMA_LOCK_SUCCESS); goto done;
}
count_vm_vma_lock_event(VMA_LOCK_RETRY); if (fault & VM_FAULT_MAJOR)
flags |= FAULT_FLAG_TRIED; /* Quick path to respond to signals */ if (fault_signal_pending(fault, regs)) { if (!user_mode(regs))
handle_fault_error_nolock(regs, 0); return;
}
lock_mmap:
retry:
vma = lock_mm_and_find_vma(mm, address, regs); if (!vma) return handle_fault_error_nolock(regs, SEGV_MAPERR); if (unlikely(!(vma->vm_flags & access))) return handle_fault_error(regs, SEGV_ACCERR);
fault = handle_mm_fault(vma, address, flags, regs); if (fault_signal_pending(fault, regs)) { if (!user_mode(regs))
handle_fault_error_nolock(regs, 0); return;
} /* The fault is fully completed (including releasing mmap lock) */ if (fault & VM_FAULT_COMPLETED) return; if (fault & VM_FAULT_RETRY) {
flags |= FAULT_FLAG_TRIED; goto retry;
}
mmap_read_unlock(mm);
done: if (!(fault & VM_FAULT_ERROR)) return; if (fault & VM_FAULT_OOM) { if (!user_mode(regs))
handle_fault_error_nolock(regs, 0); else
pagefault_out_of_memory();
} elseif (fault & VM_FAULT_SIGSEGV) { if (!user_mode(regs))
handle_fault_error_nolock(regs, 0); else
do_sigsegv(regs, SEGV_MAPERR);
} elseif (fault & (VM_FAULT_SIGBUS | VM_FAULT_HWPOISON |
VM_FAULT_HWPOISON_LARGE)) { if (!user_mode(regs))
handle_fault_error_nolock(regs, 0); else
do_sigbus(regs);
} else {
pr_emerg("Unexpected fault flags: %08x\n", fault);
BUG();
}
}
/* * Protection exceptions are suppressing, decrement psw address. * The exception to this rule are aborted transactions, for these * the PSW already points to the correct location.
*/ if (!(regs->int_code & 0x200))
regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16); /* * Check for low-address protection. This needs to be treated * as a special case because the translation exception code * field is not guaranteed to contain valid data in this case.
*/ if (unlikely(!teid.b61)) { if (user_mode(regs)) { /* Low-address protection in user mode: cannot happen */
dump_fault_info(regs);
die(regs, "Low-address protection");
} /* * Low-address protection in kernel mode means * NULL pointer write access in kernel mode.
*/ return handle_fault_error_nolock(regs, 0);
} if (unlikely(cpu_has_nx() && teid.b56)) {
regs->int_parm_long = (teid.addr * PAGE_SIZE) | (regs->psw.addr & PAGE_MASK); return handle_fault_error_nolock(regs, SEGV_ACCERR);
}
do_exception(regs, VM_WRITE);
}
NOKPROBE_SYMBOL(do_protection_exception);
/* * Bit 61 indicates if the address is valid, if it is not the * kernel should be stopped or SIGSEGV should be sent to the * process. Bit 61 is not reliable without the misc UV feature, * therefore this needs to be checked too.
*/ if (uv_has_feature(BIT_UV_FEAT_MISC) && !teid.b61) { /* * When this happens, userspace did something that it * was not supposed to do, e.g. branching into secure * memory. Trigger a segmentation fault.
*/ if (user_mode(regs)) {
send_sig(SIGSEGV, current, 0); return;
} /* * The kernel should never run into this case and * there is no way out of this situation.
*/
panic("Unexpected PGM 0x3d with TEID bit 61=0");
} if (is_kernel_fault(regs)) {
folio = phys_to_folio(addr); if (unlikely(!folio_try_get(folio))) return;
rc = arch_make_folio_accessible(folio);
folio_put(folio); if (rc)
BUG();
} else { if (faulthandler_disabled()) return handle_fault_error_nolock(regs, 0);
mm = current->mm;
mmap_read_lock(mm);
vma = find_vma(mm, addr); if (!vma) return handle_fault_error(regs, SEGV_MAPERR);
folio = folio_walk_start(&fw, vma, addr, 0); if (!folio) {
mmap_read_unlock(mm); return;
} /* arch_make_folio_accessible() needs a raised refcount. */
folio_get(folio);
rc = arch_make_folio_accessible(folio);
folio_put(folio);
folio_walk_end(&fw, vma); if (rc)
send_sig(SIGSEGV, current, 0);
mmap_read_unlock(mm);
}
}
NOKPROBE_SYMBOL(do_secure_storage_access);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.