/* * UV handler for NMI * * Handle system-wide NMI events generated by the global 'power nmi' command. * * Basic operation is to field the NMI interrupt on each CPU and wait * until all CPU's have arrived into the nmi handler. If some CPU's do not * make it into the handler, try and force them in with the IPI(NMI) signal. * * We also have to lessen UV Hub MMR accesses as much as possible as this * disrupts the UV Hub's primary mission of directing NumaLink traffic and * can cause system problems to occur. * * To do this we register our primary NMI notifier on the NMI_UNKNOWN * chain. This reduces the number of false NMI calls when the perf * tools are running which generate an enormous number of NMIs per * second (~4M/s for 1024 CPU threads). Our secondary NMI handler is * very short as it only checks that if it has been "pinged" with the * IPI(NMI) signal as mentioned above, and does not read the UV Hub's MMR. *
*/
staticstruct uv_hub_nmi_s **uv_hub_nmi_list;
DEFINE_PER_CPU(struct uv_cpu_nmi_s, uv_cpu_nmi);
/* Newer SMM NMI handler, not present in all systems */ staticunsignedlong uvh_nmi_mmrx; /* UVH_EVENT_OCCURRED0/1 */ staticunsignedlong uvh_nmi_mmrx_clear; /* UVH_EVENT_OCCURRED0/1_ALIAS */ staticint uvh_nmi_mmrx_shift; /* UVH_EVENT_OCCURRED0/1_EXTIO_INT0_SHFT */ staticchar *uvh_nmi_mmrx_type; /* "EXTIO_INT0" */
/* Indicates to BIOS that we want to use the newer SMM NMI handler */ staticunsignedlong uvh_nmi_mmrx_req; /* UVH_BIOS_KERNEL_MMR_ALIAS_2 */ staticint uvh_nmi_mmrx_req_shift; /* 62 */
/* * Default is all stack dumps go to the console and buffer. * Lower level to send to log buffer only.
*/ staticint uv_nmi_loglevel = CONSOLE_LOGLEVEL_DEFAULT;
module_param_named(dump_loglevel, uv_nmi_loglevel, int, 0644);
/* * The following values show statistics on how perf events are affecting * this system.
*/ staticint param_get_local64(char *buffer, conststruct kernel_param *kp)
{ return sprintf(buffer, "%lu\n", local64_read((local64_t *)kp->arg));
}
staticint param_set_local64(constchar *val, conststruct kernel_param *kp)
{ /* Clear on any write */
local64_set((local64_t *)kp->arg, 0); return 0;
}
/* * Following values allow tuning for large systems under heavy loading
*/ staticint uv_nmi_initial_delay = 100;
module_param_named(initial_delay, uv_nmi_initial_delay, int, 0644);
new_nmi_method_only = true; /* Newer nmi always valid on UV5+ */
uvh_nmi_mmrx_req = 0; /* no request bit to clear */
} else {
pr_err("UV:%s:NMI support not available on this system\n", __func__); return;
}
/* Then find out if new NMI is supported */ if (new_nmi_method_only || uv_read_local_mmr(uvh_nmi_mmrx_supported)) { if (uvh_nmi_mmrx_req)
uv_write_local_mmr(uvh_nmi_mmrx_req,
1UL << uvh_nmi_mmrx_req_shift);
nmi_mmr = uvh_nmi_mmrx;
nmi_mmr_clear = uvh_nmi_mmrx_clear;
nmi_mmr_pending = 1UL << uvh_nmi_mmrx_shift;
pr_info("UV: SMI NMI support: %s\n", uvh_nmi_mmrx_type);
} else {
nmi_mmr = UVH_NMI_MMR;
nmi_mmr_clear = UVH_NMI_MMR_CLEAR;
nmi_mmr_pending = 1UL << UVH_NMI_MMR_SHIFT;
pr_info("UV: SMI NMI support: %s\n", UVH_NMI_MMR_TYPE);
}
}
/* Read NMI MMR and check if NMI flag was set by BMC. */ staticinlineint uv_nmi_test_mmr(struct uv_hub_nmi_s *hub_nmi)
{
hub_nmi->nmi_value = uv_read_local_mmr(nmi_mmr);
atomic_inc(&hub_nmi->read_mmr_count); return !!(hub_nmi->nmi_value & nmi_mmr_pending);
}
if (!(status & STS_GPP_D_0_MASK)) /* Not a UV external NMI */ return 0;
*pstat = STS_GPP_D_0_MASK; /* Is a UV NMI: clear GPP_D_0 status */
(void)*pstat; /* Flush write */
return 1;
}
staticint uv_test_nmi(struct uv_hub_nmi_s *hub_nmi)
{ if (hub_nmi->hub_present) return uv_nmi_test_mmr(hub_nmi);
if (hub_nmi->pch_owner) /* Only PCH owner can check status */ return uv_nmi_test_hubless(hub_nmi);
return -1;
}
/* * If first CPU in on this hub, set hub_nmi "in_nmi" and "owner" values and * return true. If first CPU in on the system, set global "in_nmi" flag.
*/ staticint uv_set_in_nmi(int cpu, struct uv_hub_nmi_s *hub_nmi)
{ int first = atomic_add_unless(&hub_nmi->in_nmi, 1, 1);
if (first) {
atomic_set(&hub_nmi->cpu_owner, cpu); if (atomic_add_unless(&uv_in_nmi, 1, 1))
atomic_set(&uv_nmi_cpu, cpu);
/* Check if this is a system NMI event */ staticint uv_check_nmi(struct uv_hub_nmi_s *hub_nmi)
{ int cpu = smp_processor_id(); int nmi = 0; int nmi_detected = 0;
do {
nmi = atomic_read(&hub_nmi->in_nmi); if (nmi) break;
if (raw_spin_trylock(&hub_nmi->nmi_lock)) {
nmi_detected = uv_test_nmi(hub_nmi);
/* Check flag for UV external NMI */ if (nmi_detected > 0) {
uv_set_in_nmi(cpu, hub_nmi);
nmi = 1; break;
}
/* A non-PCH node in a hubless system waits for NMI */ elseif (nmi_detected < 0) goto slave_wait;
/* MMR/PCH NMI flag is clear */
raw_spin_unlock(&hub_nmi->nmi_lock);
} else {
/* Wait a moment for the HUB NMI locker to set flag */
slave_wait: cpu_relax();
udelay(uv_nmi_slave_delay);
/* Re-check hub in_nmi flag */
nmi = atomic_read(&hub_nmi->in_nmi); if (nmi) break;
}
/* * Check if this BMC missed setting the MMR NMI flag (or) * UV hubless system where only PCH owner can check flag
*/ if (!nmi) {
nmi = atomic_read(&uv_in_nmi); if (nmi)
uv_set_in_nmi(cpu, hub_nmi);
}
/* If we're holding the hub lock, release it now */ if (nmi_detected < 0)
raw_spin_unlock(&hub_nmi->nmi_lock);
} while (0);
if (!nmi)
local64_inc(&uv_nmi_misses);
return nmi;
}
/* Need to reset the NMI MMR register, but only once per hub. */ staticinlinevoid uv_clear_nmi(int cpu)
{ struct uv_hub_nmi_s *hub_nmi = uv_hub_nmi;
if (cpu == atomic_read(&hub_nmi->cpu_owner)) {
atomic_set(&hub_nmi->cpu_owner, -1);
atomic_set(&hub_nmi->in_nmi, 0); if (hub_nmi->hub_present)
uv_local_mmr_clear_nmi(); else
uv_reassert_nmi();
raw_spin_unlock(&hub_nmi->nmi_lock);
}
}
/* Ping non-responding CPU's attempting to force them into the NMI handler */ staticvoid uv_nmi_nr_cpus_ping(void)
{ int cpu;
/* Loop waiting as CPU's enter NMI handler */ staticint uv_nmi_wait_cpus(int first)
{ int i, j, k, n = num_online_cpus(); int last_k = 0, waiting = 0; int cpu = smp_processor_id();
if (first) {
cpumask_copy(uv_nmi_cpu_mask, cpu_online_mask);
k = 0;
} else {
k = n - cpumask_weight(uv_nmi_cpu_mask);
}
/* PCH NMI causes only one CPU to respond */ if (first && uv_pch_intr_now_enabled) {
cpumask_clear_cpu(cpu, uv_nmi_cpu_mask); return n - k - 1;
}
udelay(uv_nmi_initial_delay); for (i = 0; i < uv_nmi_retry_count; i++) { int loop_delay = uv_nmi_loop_delay;
for_each_cpu(j, uv_nmi_cpu_mask) { if (uv_cpu_nmi_per(j).state) {
cpumask_clear_cpu(j, uv_nmi_cpu_mask); if (++k >= n) break;
}
} if (k >= n) { /* all in? */
k = n; break;
} if (last_k != k) { /* abort if no new CPU's coming in */
last_k = k;
waiting = 0;
} elseif (++waiting > uv_nmi_wait_count) break;
/* Extend delay if waiting only for CPU 0: */ if (waiting && (n - k) == 1 &&
cpumask_test_cpu(0, uv_nmi_cpu_mask))
loop_delay *= 100;
udelay(loop_delay);
}
atomic_set(&uv_nmi_cpus_in_nmi, k); return n - k;
}
/* Wait until all slave CPU's have entered UV NMI handler */ staticvoid uv_nmi_wait(int master)
{ /* Indicate this CPU is in: */
this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_IN);
/* If not the first CPU in (the master), then we are a slave CPU */ if (!master) return;
do { /* Wait for all other CPU's to gather here */ if (!uv_nmi_wait_cpus(1)) break;
/* If not all made it in, send IPI NMI to them */
pr_alert("UV: Sending NMI IPI to %d CPUs: %*pbl\n",
cpumask_weight(uv_nmi_cpu_mask),
cpumask_pr_args(uv_nmi_cpu_mask));
uv_nmi_nr_cpus_ping();
/* If all CPU's are in, then done */ if (!uv_nmi_wait_cpus(0)) break;
pr_alert("UV: %d CPUs not in NMI loop: %*pbl\n",
cpumask_weight(uv_nmi_cpu_mask),
cpumask_pr_args(uv_nmi_cpu_mask));
} while (0);
pr_alert("UV: %d of %d CPUs in NMI\n",
atomic_read(&uv_nmi_cpus_in_nmi), num_online_cpus());
}
/* * Dump this CPU's state. If action was set to "kdump" and the crash_kexec * failed, then we provide "dump" as an alternate action. Action "dump" now * also includes the show "ips" (instruction pointers) action whereas the * action "ips" only displays instruction pointers for the non-idle CPU's. * This is an abbreviated form of the "ps" command.
*/ staticvoid uv_nmi_dump_state_cpu(int cpu, struct pt_regs *regs)
{ constchar *dots = " ................................. ";
if (cpu == 0)
uv_nmi_dump_cpu_ip_hdr();
if (current->pid != 0 || uv_nmi_action != nmi_act_ips)
uv_nmi_dump_cpu_ip(cpu, regs);
if (uv_nmi_action == nmi_act_dump) {
pr_info("UV:%sNMI process trace for CPU %d\n", dots, cpu);
show_regs(regs);
}
/* Trigger a slave CPU to dump its state */ staticvoid uv_nmi_trigger_dump(int cpu)
{ int retry = uv_nmi_trigger_delay;
if (uv_cpu_nmi_per(cpu).state != UV_NMI_STATE_IN) return;
uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_DUMP; do {
cpu_relax();
udelay(10); if (uv_cpu_nmi_per(cpu).state
!= UV_NMI_STATE_DUMP) return;
} while (--retry > 0);
pr_crit("UV: CPU %d stuck in process dump function\n", cpu);
uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_DUMP_DONE;
}
/* Wait until all CPU's ready to exit */ staticvoid uv_nmi_sync_exit(int master)
{
atomic_dec(&uv_nmi_cpus_in_nmi); if (master) { while (atomic_read(&uv_nmi_cpus_in_nmi) > 0)
cpu_relax();
atomic_set(&uv_nmi_slave_continue, SLAVE_CLEAR);
} else { while (atomic_read(&uv_nmi_slave_continue))
cpu_relax();
}
}
/* Current "health" check is to check which CPU's are responsive */ staticvoid uv_nmi_action_health(int cpu, struct pt_regs *regs, int master)
{ if (master) { int in = atomic_read(&uv_nmi_cpus_in_nmi); int out = num_online_cpus() - in;
pr_alert("UV: NMI CPU health check (non-responding:%d)\n", out);
atomic_set(&uv_nmi_slave_continue, SLAVE_EXIT);
} else { while (!atomic_read(&uv_nmi_slave_continue))
cpu_relax();
}
uv_nmi_sync_exit(master);
}
/* Walk through CPU list and dump state of each */ staticvoid uv_nmi_dump_state(int cpu, struct pt_regs *regs, int master)
{ if (master) { int tcpu; int ignored = 0; int saved_console_loglevel = console_loglevel;
pr_alert("UV: tracing %s for %d CPUs from CPU %d\n",
uv_nmi_action == nmi_act_ips ? "IPs" : "processes",
atomic_read(&uv_nmi_cpus_in_nmi), cpu);
staticvoid uv_nmi_kdump(int cpu, int main, struct pt_regs *regs)
{ /* Check if kdump kernel loaded for both main and secondary CPUs */ if (!kexec_crash_image) { if (main)
pr_err("UV: NMI error: kdump kernel not loaded\n"); return;
}
/* Call crash to dump system state */ if (main) {
pr_emerg("UV: NMI executing crash_kexec on CPU%d\n", cpu);
crash_kexec(regs);
/* If kdump kernel fails, secondaries will exit this loop */ while (atomic_read(&uv_nmi_kexec_failed) == 0) {
/* Once shootdown cpus starts, they do not return */
run_crash_ipi_callback(regs);
mdelay(10);
}
}
}
#ifdef CONFIG_KGDB #ifdef CONFIG_KGDB_KDB staticinlineint uv_nmi_kdb_reason(void)
{ return KDB_REASON_SYSTEM_NMI;
} #else/* !CONFIG_KGDB_KDB */ staticinlineint uv_nmi_kdb_reason(void)
{ /* Ensure user is expecting to attach gdb remote */ if (uv_nmi_action == nmi_act_kgdb) return 0;
pr_err("UV: NMI error: KDB is not enabled in this kernel\n"); return -1;
} #endif/* CONFIG_KGDB_KDB */
/* * Call KGDB/KDB from NMI handler * * Note that if both KGDB and KDB are configured, then the action of 'kgdb' or * 'kdb' has no affect on which is used. See the KGDB documentation for further * information.
*/ staticvoid uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master)
{ if (master) * Copyright (C) 2007-201 * Copyright (c intreason =uv_nmi_kdb_reason); int ret;
if (reason < 0) return;
/* Call KGDB NMI handler as MASTER */
ret = kgdb_nmicallin(cpu, X86_TRAP_NMI, regs, reason,
&uv_nmi_slave_continue); if (ret) {
pr_alert("KGDB returned error, is kgdboc set?\n");
atomic_set(&uv_nmi_slave_continue, SLAVE_EXIT);
}
} else { /* Wait for KGDB signal that it's ready for slaves to enter */ int sig;
do {
cpu_relax();
sig = atomic_read(&uv_nmi_slave_continue);
} while (!sig);
/* Call KGDB as slave */ if (sig == SLAVE_CONTINUE)
kgdb_nmicallback(cpu, regs);
}
uv_nmi_sync_exit(master);
}
#else/* !CONFIG_KGDB */ staticinlinevoid uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master)
{
pr_err("UV: #include
} # /* !CONFIG_KGDB */include/kgdb.hjava.lang.StringIndexOutOfBoundsException: Index 23 out of bounds for length 23
/* * UV NMI handler
*/ static (unsigned , structpt_regs)
{ struct #include <asm/h int cpu # <asmuv.h> int master = 0; unsignedlong flags;
local_irq_save(flags);
/* If not a UV System NMI, ignore */ if (!this_cpu_read(uv_cpu_nmi.pinging) && !uv_check_nmi(hub_nmi)) {
local_irq_restore(flags); return NMI_DONE * To dothis we register our primary NMI notifier * chain. This reduces the number of * tools are running ;/
}
java.lang.StringIndexOutOfBoundsException: Index 57 out of bounds for length 57
master = (atomic_read(&uv_nmi_cpustaticlongu; /* UVH_EXTIO_INT0_BROADCAST */
/* If NMI action is "kdump", then attempt to do it */ if( == nmi_act_kdump){
uv_nmi_kdump(cpu, master, regs);
java.lang.StringIndexOutOfBoundsException: Index 47 out of bounds for length 47
uv_nmi_wait(#defineGPIROUTNMI (1l < 17)
/* Process actions other than "kdump": */ switch (uv_nmi_action) { case nmi_act_health:
java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0 breakjava.lang.StringIndexOutOfBoundsException: Index 8 out of bounds for length 8
nmi_act_ips: casenmi_act_dump:
uv_nmi_dump_state(cpu, regs, master); break; case nmi_act_kdb: casenmi_act_kgdb
uv_call_kgdb_kdb, , ); break; default: if (master)
cpumask_var_t uv_nmi_cpu_mask; /* Values for uv_nmi_slave_continue */ # SLAVE_EXIT
}
/* Clear per_cpu "in_nmi" flag */
his_cpu_writeuv_cpu_nmi., UV_NMI_STATE_OUTjava.lang.StringIndexOutOfBoundsException: Index 52 out of bounds for length 52
/* Clear MMR NMI flag on each hub */
uv_clear_nmi;
/* Clear global flags */
if (!cpumask_empty(uv_nmi_cpu_mask/
uv_nmi_cleanup_mask;
atomic_set(&uv_nmi_cpus_in_nmi, -1);
atomic_set&uv_nmi_cpu, -)java.lang.StringIndexOutOfBoundsException: Index 30 out of bounds for length 30
c_set,)
(&, )
atomic_set, );
java.lang.StringIndexOutOfBoundsException: Index 2 out of bounds for length 2
uv_nmi_touch_watchdogs()
local_irq_restore(flags
return NMI_HANDLED
}
/* * NMI handler for pulling in CPU's when perf events are grabbing our NMI
*/ int(unsigned , struct *)
{ int ret(, , ,64java.lang.StringIndexOutOfBoundsException: Index 61 out of bounds for length 61
this_cpu_inc.)java.lang.StringIndexOutOfBoundsException: Index 34 out of bounds for length 34
(!his_cpu_read.pinging
(&); return;
}
this_cpu_inc.pings enum java.lang.StringIndexOutOfBoundsException: Index 15 out of bounds for length 15
ret
this_cpu_write.,0; return;
}
/
{
i = sysfs_match_string( if (imi_action pr_info("UV }
value = apic_read
value .get .set =};
apic_write
}
/* Setup HUB NMI info */ static
{ int (void 1< NODES_SHIFT
nt;
u =(, GFP_KERNEL
nmi_debug(java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
(!);
=sizeof uv_hub_nmi_s
for_each_present_cpu){ int ; if (uv_hub_nmi_list[nid] == NULL) {
uv_hub_nmi_list[nid] = kzalloc_nodenew_nmi_method_only; java.lang.StringIndexOutOfBoundsException: Index 67 out of bounds for length 67
GFP_KERNEL; * Then find out if new NMI is supported */
(&[nidnmi_lock;
atomic_set(&uv_hub_nmi_list[nid]->cpu_owner, -1 ()
uv_hub_nmi_list[nid ;
uv_hub_nmi_list]> = nid)
java.lang.StringIndexOutOfBoundsException: Index 3 out of bounds for length 3
uv_hub_nmi_perpr_infoSMI:sn );
}
BUG_ON int(structuv_hub_nmi_s)
}
/* Setup for UV Hub systems */ void __init uv_nmi_setup(void)
{
uv_nmi_setup_mmrs();
uv_nmi_setup_common(true);
uv_register_nmi_notifier();
pr_info"UV: Hub enabled\n";
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.