#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG /* * WARN/BUG is handled with a program interrupt so minimise checks here to * avoid recursion and maximise the chance of getting the first oops handled.
*/ #define INT_SOFT_MASK_BUG_ON(regs, cond) \ do { \ if ((user_mode(regs) || (TRAP(regs) != INTERRUPT_PROGRAM))) \
BUG_ON(cond); \
} while (0) #else #define INT_SOFT_MASK_BUG_ON(regs, cond) #endif
staticinlinevoid nap_adjust_return(struct pt_regs *regs)
{ #ifdef CONFIG_PPC_970_NAP if (unlikely(test_thread_local_flags(_TLF_NAPPING))) { /* Can avoid a test-and-clear because NMIs do not call this */
clear_thread_local_flags(_TLF_NAPPING);
regs_set_return_ip(regs, (unsignedlong)power4_idle_nap_return);
} #endif
}
/* * If the interrupt was taken with HARD_DIS clear, then enable MSR[EE]. * Asynchronous interrupts get here with HARD_DIS set (see below), so * this enables MSR[EE] for synchronous interrupts. IRQs remain * soft-masked. The interrupt handler may later call * interrupt_cond_local_irq_enable() to achieve a regular process * context.
*/ if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)) {
INT_SOFT_MASK_BUG_ON(regs, !(regs->msr & MSR_EE));
__hard_irq_enable();
} else {
__hard_RI_enable();
} /* Enable MSR[RI] early, to support kernel SLB and hash faults */ #endif
if (!arch_irq_disabled_regs(regs))
trace_hardirqs_off();
if (user_mode(regs)) {
kuap_lock();
CT_WARN_ON(ct_state() != CT_STATE_USER);
user_exit_irqoff();
account_cpu_user_entry();
account_stolen_time();
} else {
kuap_save_and_lock(regs); /* * CT_WARN_ON comes here via program_check_exception, * so avoid recursion.
*/ if (TRAP(regs) != INTERRUPT_PROGRAM)
CT_WARN_ON(ct_state() != CT_STATE_KERNEL &&
ct_state() != CT_STATE_IDLE);
INT_SOFT_MASK_BUG_ON(regs, is_implicit_soft_masked(regs));
INT_SOFT_MASK_BUG_ON(regs, arch_irq_disabled_regs(regs) &&
search_kernel_restart_table(regs->nip));
}
INT_SOFT_MASK_BUG_ON(regs, !arch_irq_disabled_regs(regs) &&
!(regs->msr & MSR_EE));
booke_restore_dbcr0();
}
/* * Care should be taken to note that interrupt_exit_prepare and * interrupt_async_exit_prepare do not necessarily return immediately to * regs context (e.g., if regs is usermode, we don't necessarily return to * user mode). Other interrupts might be taken between here and return, * context switch / preemption may occur in the exit path after this, or a * signal may be delivered, etc. * * The real interrupt exit code is platform specific, e.g., * interrupt_exit_user_prepare / interrupt_exit_kernel_prepare for 64s. * * However interrupt_nmi_exit_prepare does return directly to regs, because * NMIs do not do "exit work" or replay soft-masked interrupts.
*/ staticinlinevoid interrupt_exit_prepare(struct pt_regs *regs)
{
}
staticinlinevoid interrupt_async_enter_prepare(struct pt_regs *regs)
{ #ifdef CONFIG_PPC64 /* Ensure interrupt_enter_prepare does not enable MSR[EE] */
local_paca->irq_happened |= PACA_IRQ_HARD_DIS; #endif
interrupt_enter_prepare(regs); #ifdef CONFIG_PPC_BOOK3S_64 /* * RI=1 is set by interrupt_enter_prepare, so this thread flags access * has to come afterward (it can cause SLB faults).
*/ if (cpu_has_feature(CPU_FTR_CTRL) &&
!test_thread_local_flags(_TLF_RUNLATCH))
__ppc64_runlatch_on(); #endif
irq_enter();
}
staticinlinevoid interrupt_async_exit_prepare(struct pt_regs *regs)
{ /* * Adjust at exit so the main handler sees the true NIA. This must * come before irq_exit() because irq_exit can enable interrupts, and * if another interrupt is taken before nap_adjust_return has run * here, then that interrupt would return directly to idle nap return.
*/
nap_adjust_return(regs);
staticinlinebool nmi_disables_ftrace(struct pt_regs *regs)
{ /* Allow DEC and PMI to be traced when they are soft-NMI */ if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) { if (TRAP(regs) == INTERRUPT_DECREMENTER) returnfalse; if (TRAP(regs) == INTERRUPT_PERFMON) returnfalse;
} if (IS_ENABLED(CONFIG_PPC_BOOK3E_64)) { if (TRAP(regs) == INTERRUPT_PERFMON) returnfalse;
}
/* * Set IRQS_ALL_DISABLED unconditionally so irqs_disabled() does * the right thing, and set IRQ_HARD_DIS. We do not want to reconcile * because that goes through irq tracing which we don't want in NMI.
*/
local_paca->irq_soft_mask = IRQS_ALL_DISABLED;
local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
if (!(regs->msr & MSR_EE) || is_implicit_soft_masked(regs)) { /* * Adjust regs->softe to be soft-masked if it had not been * reconcied (e.g., interrupt entry with MSR[EE]=0 but softe * not yet set disabled), or if it was in an implicit soft * masked state. This makes arch_irq_disabled_regs(regs) * behave as expected.
*/
regs->softe = IRQS_ALL_DISABLED;
}
__hard_RI_enable();
/* Don't do any per-CPU operations until interrupt state is fixed */
if (nmi_disables_ftrace(regs)) {
state->ftrace_enabled = this_cpu_get_ftrace_enabled();
this_cpu_set_ftrace_enabled(0);
} #endif
/* If data relocations are enabled, it's safe to use nmi_enter() */ if (mfmsr() & MSR_DR) {
nmi_enter(); return;
}
/* * But do not use nmi_enter() for pseries hash guest taking a real-mode * NMI because not everything it touches is within the RMA limit.
*/ if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) &&
firmware_has_feature(FW_FEATURE_LPAR) &&
!radix_enabled()) return;
/* * Likewise, don't use it if we have some form of instrumentation (like * KASAN shadow) that is not safe to access in real mode (even on radix)
*/ if (IS_ENABLED(CONFIG_KASAN)) return;
/* * Likewise, do not use it in real mode if percpu first chunk is not * embedded. With CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK enabled there * are chances where percpu allocation can come from vmalloc area.
*/ if (percpu_first_chunk_is_paged) return;
/* Otherwise, it should be safe to call it */
nmi_enter();
}
staticinlinevoid interrupt_nmi_exit_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state)
{ if (mfmsr() & MSR_DR) { // nmi_exit if relocations are on
nmi_exit();
} elseif (IS_ENABLED(CONFIG_PPC_BOOK3S_64) &&
firmware_has_feature(FW_FEATURE_LPAR) &&
!radix_enabled()) { // no nmi_exit for a pseries hash guest taking a real mode exception
} elseif (IS_ENABLED(CONFIG_KASAN)) { // no nmi_exit for KASAN in real mode
} elseif (percpu_first_chunk_is_paged) { // no nmi_exit if percpu first chunk is not embedded
} else {
nmi_exit();
}
/* * nmi does not call nap_adjust_return because nmi should not create * new work to do (must use irq_work for that).
*/
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC_BOOK3S if (arch_irq_disabled_regs(regs)) { unsignedlong rst = search_kernel_restart_table(regs->nip); if (rst)
regs_set_return_ip(regs, rst);
} #endif
if (nmi_disables_ftrace(regs))
this_cpu_set_ftrace_enabled(state->ftrace_enabled);
/* * Don't use noinstr here like x86, but rather add NOKPROBE_SYMBOL to each * function definition. The reason for this is the noinstr section is placed * after the main text section, i.e., very far away from the interrupt entry * asm. That creates problems with fitting linker stubs when building large * kernels.
*/ #define interrupt_handler __visible noinline notrace __no_kcsan __no_sanitize_address
/** * DECLARE_INTERRUPT_HANDLER_RAW - Declare raw interrupt handler function * @func: Function name of the entry point * @returns: Returns a value back to asm caller
*/ #define DECLARE_INTERRUPT_HANDLER_RAW(func) \
__visible long func(struct pt_regs *regs)
/** * DEFINE_INTERRUPT_HANDLER_RAW - Define raw interrupt handler function * @func: Function name of the entry point * @returns: Returns a value back to asm caller * * @func is called from ASM entry code. * * This is a plain function which does no tracing, reconciling, etc. * The macro is written so it acts as function definition. Append the * body with a pair of curly brackets. * * raw interrupt handlers must not enable or disable interrupts, or * schedule, tracing and instrumentation (ftrace, lockdep, etc) would * not be advisable either, although may be possible in a pinch, the * trace will look odd at least. * * A raw handler may call one of the other interrupt handler functions * to be converted into that interrupt context without these restrictions. * * On PPC64, _RAW handlers may return with fast_interrupt_return. * * Specific handlers may have additional restrictions.
*/ #define DEFINE_INTERRUPT_HANDLER_RAW(func) \ static __always_inline __no_sanitize_address __no_kcsan long \
____##func(struct pt_regs *regs); \
\
interrupt_handler long func(struct pt_regs *regs) \
{ \ long ret; \
\
__hard_RI_enable(); \
\
ret = ____##func (regs); \
\ return ret; \
} \
NOKPROBE_SYMBOL(func); \
\ static __always_inline __no_sanitize_address __no_kcsan long \
____##func(struct pt_regs *regs)
/** * DECLARE_INTERRUPT_HANDLER - Declare synchronous interrupt handler function * @func: Function name of the entry point
*/ #define DECLARE_INTERRUPT_HANDLER(func) \
__visible void func(struct pt_regs *regs)
/** * DEFINE_INTERRUPT_HANDLER - Define synchronous interrupt handler function * @func: Function name of the entry point * * @func is called from ASM entry code. * * The macro is written so it acts as function definition. Append the * body with a pair of curly brackets.
*/ #define DEFINE_INTERRUPT_HANDLER(func) \ static __always_inline void ____##func(struct pt_regs *regs); \
\
interrupt_handler void func(struct pt_regs *regs) \
{ \
interrupt_enter_prepare(regs); \
\
____##func (regs); \
\
interrupt_exit_prepare(regs); \
} \
NOKPROBE_SYMBOL(func); \
\ static __always_inline void ____##func(struct pt_regs *regs)
/** * DECLARE_INTERRUPT_HANDLER_RET - Declare synchronous interrupt handler function * @func: Function name of the entry point * @returns: Returns a value back to asm caller
*/ #define DECLARE_INTERRUPT_HANDLER_RET(func) \
__visible long func(struct pt_regs *regs)
/** * DEFINE_INTERRUPT_HANDLER_RET - Define synchronous interrupt handler function * @func: Function name of the entry point * @returns: Returns a value back to asm caller * * @func is called from ASM entry code. * * The macro is written so it acts as function definition. Append the * body with a pair of curly brackets.
*/ #define DEFINE_INTERRUPT_HANDLER_RET(func) \ static __always_inline long ____##func(struct pt_regs *regs); \
\
interrupt_handler long func(struct pt_regs *regs) \
{ \ long ret; \
\
interrupt_enter_prepare(regs); \
\
ret = ____##func (regs); \
\
interrupt_exit_prepare(regs); \
\ return ret; \
} \
NOKPROBE_SYMBOL(func); \
\ static __always_inline long ____##func(struct pt_regs *regs)
/** * DECLARE_INTERRUPT_HANDLER_ASYNC - Declare asynchronous interrupt handler function * @func: Function name of the entry point
*/ #define DECLARE_INTERRUPT_HANDLER_ASYNC(func) \
__visible void func(struct pt_regs *regs)
/** * DEFINE_INTERRUPT_HANDLER_ASYNC - Define asynchronous interrupt handler function * @func: Function name of the entry point * * @func is called from ASM entry code. * * The macro is written so it acts as function definition. Append the * body with a pair of curly brackets.
*/ #define DEFINE_INTERRUPT_HANDLER_ASYNC(func) \ static __always_inline void ____##func(struct pt_regs *regs); \
\
interrupt_handler void func(struct pt_regs *regs) \
{ \
interrupt_async_enter_prepare(regs); \
\
____##func (regs); \
\
interrupt_async_exit_prepare(regs); \
} \
NOKPROBE_SYMBOL(func); \
\ static __always_inline void ____##func(struct pt_regs *regs)
/** * DECLARE_INTERRUPT_HANDLER_NMI - Declare NMI interrupt handler function * @func: Function name of the entry point * @returns: Returns a value back to asm caller
*/ #define DECLARE_INTERRUPT_HANDLER_NMI(func) \
__visible long func(struct pt_regs *regs)
/** * DEFINE_INTERRUPT_HANDLER_NMI - Define NMI interrupt handler function * @func: Function name of the entry point * @returns: Returns a value back to asm caller * * @func is called from ASM entry code. * * The macro is written so it acts as function definition. Append the * body with a pair of curly brackets.
*/ #define DEFINE_INTERRUPT_HANDLER_NMI(func) \ static __always_inline __no_sanitize_address __no_kcsan long \
____##func(struct pt_regs *regs); \
\
interrupt_handler long func(struct pt_regs *regs) \
{ \ struct interrupt_nmi_state state; \ long ret; \
\
interrupt_nmi_enter_prepare(regs, &state); \
\
ret = ____##func (regs); \
\
interrupt_nmi_exit_prepare(regs, &state); \
\ return ret; \
} \
NOKPROBE_SYMBOL(func); \
\ static __always_inline __no_sanitize_address __no_kcsan long \
____##func(struct pt_regs *regs)
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.