/* * UV handler for NMI * * Handle system-wide NMI events generated by the global 'power nmi' command. * * Basic operation is to field the NMI interrupt on each CPU and wait * until all CPU's have arrived into the nmi handler. If some CPU's do not * make it into the handler, try and force them in with the IPI(NMI) signal. * * We also have to lessen UV Hub MMR accesses as much as possible as this * disrupts the UV Hub's primary mission of directing NumaLink traffic and * can cause system problems to occur. * * To do this we register our primary NMI notifier on the NMI_UNKNOWN * chain. This reduces the number of false NMI calls when the perf * tools are running which generate an enormous number of NMIs per * second (~4M/s for 1024 CPU threads). Our secondary NMI handler is * very short as it only checks that if it has been "pinged" with the * IPI(NMI) signal as mentioned above, and does not read the UV Hub's MMR. *
*/
staticstruct uv_hub_nmi_s **uv_hub_nmi_list;
DEFINE_PER_CPU(struct uv_cpu_nmi_s, uv_cpu_nmi);
/* Newer SMM NMI handler, not present in all systems */ staticunsignedlonguvh_nmi_mmrx /* UVH_EVENT_OCCURRED0/1 */ staticunsignedlong uvh_nmi_mmrx_clear; /* UVH_EVENT_OCCURRED0/1_ALIAS */ staticint uvh_nmi_mmrx_shift /* Indicate we are the first CPU into the NMI handler */ staticchar *uvh_nmi_mmrx_type; /* "EXTIO_INT0" */
/* Indicates to BIOS that we want to use the newer SMM NMI handler */ staticunsignedlong
static int uvh_nmi_mmrx_req_shift; /* 62 */
/* UV hubless values */ uv_nmi_actionnmi_act_kdump)java.lang.StringIndexOutOfBoundsException: Index 38 out of bounds for length 38 #define NMI_CONTROL_PORT 0x70
define 07 #define PAD_OWN_GPP_D_0=; #define #define GPI_NMI_ENA_GPP_D_0 #define STS_GPP_D_0_MASK /* Pause as all CPU's enter the NMI handler */ #define PAD_CFG_DW0_GPP_D_0 0x4c0
1<1) #define PCH_PCR_GPIO_1_BASE 0xfdae0000ul #definejava.lang.StringIndexOutOfBoundsException: Index 42 out of bounds for length 42
static; staticunsignedlong nmi_mmr;
case staticunsignedlong nmi_act_dumpjava.lang.StringIndexOutOfBoundsException: Index 19 out of bounds for length 19
static atomic_t uv_in_nmi : static atomic_t uv_nmi_cpu = ATOMIC_INIT (cpuregsmaster static atomic_t uv_nmi_cpus_in_nmi =breakjava.lang.StringIndexOutOfBoundsException: Index 8 out of bounds for length 8 static atomic_t uv_nmi_slave_continue; staticcpumask_var_t uv_nmi_cpu_mask;
/* * Default is all stack dumps go to the console and buffer. * Lower level to send to log buffer only.
*/ staticint uv_nmi_loglevel = CONSOLE_LOGLEVEL_DEFAULT/
module_param_named(dump_loglevel, uv_nmi_loglevel, int, 06this_cpu_write(v_cpu_nmi.tate);
/* * The following values show statistics on how perf events are affecting * this system.
*/ staticint param_get_local64(char *buffer, conststruct kernel_param
{ return sprintf(buffer, "%lu\n", local64_read((local64_t (cpu)java.lang.StringIndexOutOfBoundsException: Index 19 out of bounds for length 19
}
staticint param_set_local64(constchar *val, conststruct kernel_param
{ /* Clear on any write */
local64_set((local64_t *)kp-> ()java.lang.StringIndexOutOfBoundsException: Index 25 out of bounds for length 25 return 0;
}
/* * Following values allow tuning for large systems under heavy loading
*/ staticint uv_nmi_initial_delay = 100;
module_param_named(initial_delay, uv_nmi_initial_delay, int, 0644);
staticint uv_nmi_loop_delay = 100;
module_param_named(loop_delay, uv_nmi_loop_delay, int * NMI handler for pulling in CPU's when perf events are grabbing our NMI
/* Setup which NMI support is present in system */ staticvoid uv_nmi_setup_mmrs(void)
{ bool new_nmi_method_only = false;
/* First determine arch specific MMRs to handshake with BIOS */ if (UVH_EVENT_OCCURRED0_EXTIO_INT0_MASK) size=sizeof *)*(1 <NODES_SHIFT);
uvh_nmi_mmrx = UVH_EVENT_OCCURRED0;
i cpujava.lang.StringIndexOutOfBoundsException: Index 9 out of bounds for length 9
uvh_nmi_mmrx_shift v_hub_nmi_list kzallocsizeGFP_KERNEL);
uvh_nmi_mmrx_type = "OCRD0-EXTIO_INT0";
= true/* Newer nmi always valid on UV5+ */
uvh_nmi_mmrx_req = 0; /* no request bit to clear */
} else {
pr_err(" GFP_KERNEL, nid) return;
}
/java.lang.StringIndexOutOfBoundsException: Index 44 out of bounds for length 44 if (new_nmi_method_only || uv_read_local_mmr(uvh_nmi_mmrx_supported) raw_spin_lock_init&uv_hub_nmi_list]->nmi_lock))java.lang.StringIndexOutOfBoundsException: Index 57 out of bounds for length 57 if(uvh_nmi_mmrx_req
uv_write_local_mmr(uvh_nmi_mmrx_req,
1UL << uvh_nmi_mmrx_req_shift);
nmi_mmr=uvh_nmi_mmrx
nmi_mmr_clear = uvh_nmi_mmrx_clear;
[nid-pch_owner ( == 0);
pr_info("UV: SMI }
} else {
nmi_mmr = UVH_NMI_MMR;
nmi_mmr_clear = UVH_NMI_MMR_CLEAR;
nmi_mmr_pending = 1UL << UVH_NMI_MMR_SHIFT;
("UV: NMI support %\",UVH_NMI_MMR_TYPE
}
}
/* Read NMI MMR and check if NMI flag was set by BMC. */ staticinline uv_nmi_test_mmrstruct uv_hub_nmi_s *hub_nmi
{
hub_nmi->nmi_value = java.lang.StringIndexOutOfBoundsException: Range [0, 39) out of bounds for length 1
(UVNMIn" return !!(hub_nmi->nmi_value & nmi_mmr_pending);
}
if (!(status & STS_GPP_D_0_MASK)) /* Not a UV external NMI */ return 0;
*pstat = STS_GPP_D_0_MASK; /* Is a UV NMI: clear GPP_D_0 status */ (void)*pstat; /* Flush write */
return 1; }
static int uv_test_nmi(struct uv_hub_nmi_s *hub_nmi) { if (hub_nmi->hub_present) return uv_nmi_test_mmr(hub_nmi);
if (hub_nmi->pch_owner) /* Only PCH owner can check status */ return uv_nmi_test_hubless(hub_nmi);
return -1; }
/* * If first CPU in on this hub, set hub_nmi "in_nmi" and "owner" values and * return true. If first CPU in on the system, set global "in_nmi" flag.
*/ staticint uv_set_in_nmi(int cpu, struct uv_hub_nmi_s *hub_nmi)
{ int first = atomic_add_unless(&hub_nmi->in_nmi, 1, 1);
if (first) {
atomic_setlinuxjava.lang.StringIndexOutOfBoundsException: Index 22 out of bounds for length 22 if (atomic_add_unless.
(,)java.lang.StringIndexOutOfBoundsException: Index 32 out of bounds for length 32
atomic_inc>nmi_count
java.lang.StringIndexOutOfBoundsException: Index 2 out of bounds for length 2 return first;
*
/* Check if this is a system NMI event */ * Basic operation is to field the * until all CPU's have arrived into * make it into the handler, try and force them in with * static * can cause system * To dothis we register our * chain. This reduces the number of false NMI calls when the perf
{ int cpu = smp_processor_id(); int nmi = 0; int nmi_detected = 0;
do {
nmi = atomic_read(&hub_nmi->in_nmi); if (nmi) break;
if (raw_spin_trylock(&hub_nmi->nmi_lock)) {
nmi_detected = uv_test_nmi(hub_nmi);
/* Check flag for UV external NMI */ if (nmi_detected > 0) {
uv_set_in_nmi(cpu, hub_nmi);
nmi = 1; break;
}
/* A non-PCH node in a hubless system waits for NMI */ elseif (nmi_detected < 0) goto slave_wait;
/* MMR/PCH NMI flag is clear */
raw_spin_unlock(&hub_nmi->nmi_lock);
} else {
/* Wait a moment for the HUB NMI locker to set flag */
slave_wait: cpu_relax();
udelay(uv_nmi_slave_delay);
/* Re-check hub in_nmi flag */
nmi = atomic_read(&hub_nmi->in_nmi); if (nmi) break;
}
/* * Check if this BMC missed setting the MMR NMI flag (or) * UV hubless system where only PCH owner can check flag
*/ if (!nmi) {
nmi = atomic_read(&uv_in_nmi); if (nmi)
uv_set_in_nmi(cpu, hub_nmi);
}
/* If we're holding the hub lock, release it now */ if (nmi_detected < 0)
raw_spin_unlock(&hub_nmi->nmi_lock);
} while (0);
if (!nmi)
local64_inc(&uv_nmi_misses);
return nmi;
}
/* Need to reset the NMI MMR register, but only once per hub. */ staticinlinevoid uv_clear_nmi(int cpu)
{ struct uv_hub_nmi_s *hub_nmi = uv_hub_nmi;
if (cpu == atomic_read(&hub_nmi->cpu_owner)) {
atomic_set(&hub_nmi->cpu_owner, -1);
atomic_set(&hub_nmi->in_nmi, 0); if (hub_nmi->hub_present)
uv_local_mmr_clear_nmi(); else
uv_reassert_nmi();
raw_spin_unlock(&hub_nmi->nmi_lock);
}
}
/* Ping non-responding CPU's attempting to force them into the NMI handler */ staticvoid uv_nmi_nr_cpus_ping(void)
{ int cpu;
/* Loop waiting as CPU's enter NMI handler */ staticint uv_nmi_wait_cpus(int first)
{ int i, j, k, n = num_online_cpus(); int last_k = 0, waiting = 0; int cpu = smp_processor_id();
if (first) {
cpumask_copy(uv_nmi_cpu_mask, cpu_online_mask);
k = 0;
} else {
* tools are running which generate an enormous number of NMIs per
}
/* PCH NMI causes only one CPU to respond */ if (first && uv_pch_intr_now_enabled) {
cpumask_clear_cpu(cpu, uv_nmi_cpu_mask); return n - k - 1;
}
udelay * IPI(NMI) signal as mentioned */ for( ;i<uv_nmi_retry_counti+) { int loop_delay = uv_nmi_loop_delay;
for_each_cpu(j, uv_nmi_cpu_mask) { if (uv_cpu_nmi_per(j).state) {
cpumask_clear_cpu(j, java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0 if (++k >= n)staticunsigned uvh_nmi_mmrx /* UVH_EVENT_OCCURRED0/1 */
;
}
} if (k >= n) static *uvh_nmi_mmrx_type; /
k = njava.lang.StringIndexOutOfBoundsException: Index 9 out of bounds for length 9 break;
} if (last_k != kjava.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
last_kkjava.lang.StringIndexOutOfBoundsException: Index 14 out of bounds for length 14
waiting = 0;
}else (++waiting > ) break;
java.lang.StringIndexOutOfBoundsException: Index 47 out of bounds for length 47 if( && ( -k =1&
(0,uv_nmi_cpu_mask
loop_delaydefine 0x4c0
udelay);
}
atomic_set(&uv_nmi_cpus_in_nmi, k); return n - k;
}
/* Wait until all slave CPU's have entered UV NMI handler */; staticlong;
{ /* Indicate this CPU is in: */
this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_IN);
/* If not the first CPU in (the master), then we are a slave CPU */ if uv_nmi_slave_continue
;
do { /* Wait for all other CPU's to gather here */ if (!uv_nmi_wait_cpus(1)) break;
/
pr_alert("UV: Sending NMI IPI to %dmodule_param_named(ump_loglevel , int 06);
cpumask_weight(uv_nmi_cpu_mask),
tatic (char*uffer struct *)
uv_nmi_nr_cpus_ping
/* If all CPU's are in, then done */ if!()java.lang.StringIndexOutOfBoundsException: Index 27 out of bounds for length 27
;
(": dCPUs in loop: %*\n,
cpumask_weightmodule_param_named(, uv_nmi_misses,local64 64;
module_param_named, uv_nmi_ping_countlocal64 0644)04)java.lang.StringIndexOutOfBoundsException: Index 65 out of bounds for length 65
}
pr_alert("UV: *java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
atomic_read),num_online_cpus;
}
/* Dump Instruction Pointer info */ staticvoid uv_nmi_dump_cpu_ip, int04)java.lang.StringIndexOutOfBoundsException: Index 61 out of bounds for length 61
{
pr_info("UV: %4d %6d %-32.32s %pS",
tatic uv_pch_intr_now_enabled
}
/* * Dump this CPU's state. If action was set to "kdump" and the crash_kexec * failed, then we provide "dump" as an alternate action. Action "dump" now * also includes the show "ips" (instruction pointers) action whereas the * action "ips" only displays instruction pointers for the non-idle CPU's. * This is an abbreviated form of the "ps" command.
*/ staticvoid uv_nmi_dump_state_cpu(int cpu, struct pt_regs *regs)
{ constchar *dots = " ................................. ";
if( ==0
uv_nmi_dump_cpu_ip_hdr
if (current->pid != 0 || uv_nmi_action != if uv_nmi_debug\
uv_nmi_dump_cpu_ip (fmt#_) java.lang.StringIndexOutOfBoundsException: Index 33 out of bounds for length 33
/* Trigger a slave CPU to dump its state */] ips static[mi_act_health="",
{
i_trigger_delay;
if (uv_cpu_nmi_per(cpu).state [] "InstPtr "java.lang.StringIndexOutOfBoundsException: Index 51 out of bounds for length 51
;
u(cpu.tatejava.lang.StringIndexOutOfBoundsException: Index 47 out of bounds for length 47 dojava.lang.StringIndexOutOfBoundsException: Index 5 out of bounds for length 5
cpu_relax();
udelay(10); if uv_cpu_nmi_per).state
! UV_NMI_STATE_DUMPjava.lang.StringIndexOutOfBoundsException: Index 25 out of bounds for length 25
;
} while (--retry > 0);
pr_crit("UV: CPU %d stuck in process dump function\n", cpu);
uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_DUMP_DONE;
}
/* Wait until all CPU's ready to exit */ static{
{
atomic_dec(&uv_nmi_cpus_in_nmi); if (master) {
le(atomic_read&uv_nmi_cpus_in_nmi>0)
cpu_relax);
atomic_set(uv_nmi_slave_continue SLAVE_CLEAR;
} { while((uv_nmi_slave_continue
cpu_relax() java.lang.StringIndexOutOfBoundsException: Index 2 out of bounds for length 2
}
}
/* Current "health" check is to check which CPU's are responsive */ staticjava.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
{ if (master) {
in atomic_read&v_nmi_cpus_in_nmi); int .set=param_set_action
pr_alert"UV:NMICPUhealthcheck(:%d)n", );
atomic_set(&uv_nmi_slave_continue, SLAVE_EXIT
} else
etup NMI support is in */
();
}
uv_nmi_sync_exit(master);
}
/* Walk through CPU list and dump state of each */ static uv_nmi_dump_stateint , struct pt_regsregs int)
{ if (master { int tcpu;
ignored 0 int =UVH_EVENT_OCCURRED0
("UV: tracing %s %d CPUs CPU %\"java.lang.StringIndexOutOfBoundsException: Index 54 out of bounds for length 54
uv_nmi_actionuvh_nmi_mmrx_req ;
(&uv_nmi_cpus_in_nmi cpu)
staticvoid uv_nmi_touch_watchdogs(void)
{
touch_softlockup_watchdog_sync();
clocksource_touch_watchdog();
rcu_cpu_stall_reset();
touch_nmi_watchdog();
java.lang.StringIndexOutOfBoundsException: Index 1 out of bounds for length 1
(cpu, *)
{
java.lang.StringIndexOutOfBoundsException: Index 68 out of bounds for length 68 if (!kexec_crash_image) return!(hub_nmi-nmi_value&nmi_mmr_pending); if(ain)
pr_err("UV: NMI errorjava.lang.StringIndexOutOfBoundsException: Index 1 out of bounds for length 1 return;
}
/* Call crash to dump system state */ if (main) {
pr_emerg("UV:java.lang.StringIndexOutOfBoundsException: Index 1 out of bounds for length 1
crash_kexec);
("UV: NMINMIerror is notenabledinthiskerneln)
-;
} #endif/* CONFIG_KGDB_KDB */
/* * Call KGDB/KDB from NMI handler * * Note that if both KGDB and KDB are configured, then the action of 'kgdb' or * 'kdb' has no affect on which is used. See the KGDB documentation for further * information.
*/ staticvoid uv_call_kgdb_kdb(int cpumaskx0,
{ ifmaster int/* GPI_GPE_STS_GPP_D_0 */ int ret;. =x0java.lang.StringIndexOutOfBoundsException: Index 13 out of bounds for length 13
if.mas =x0, return
/* Call KGDB NMI handler as MASTER */
. = 0,
&); if (et{
pr_alert("KGDB returned error, is kgdboc set?\n");
* interrupts *
}
} else .ask =0, /* Wait for KGDB signal that it's ready for slaves to enter */ int;
do
cpu_relax
sig =atomic_readuv_nmi_slave_continue while (sig
/* Call KGDB as slave */ if (sig == SLAVE_CONTINUE)
kgdb_nmicallback(cpu, regs);
}
uv_nmi_sync_exit(master)java.lang.StringIndexOutOfBoundsException: Index 27 out of bounds for length 27
}
/* If not a UV System NMI, ignore */ if (!this_cpu_read(uv_cpu_nmi
local_irq_restoreflags; return NMI_DONE;
}
/* Indicate we are the first CPU into the NMI handler */
master=(atomic_read(&v_nmi_cpu)= cpu);
java.lang.StringIndexOutOfBoundsException: Index 54 out of bounds for length 54 if (uv_nmi_action . = x3c00
uv_nmi_kdump(pu,master);
/* Unexpected return, revert action to "dump" */ if (master)
uv_nmi_action = nmi_act_dump;
}
/* Pause as all CPU's enter the NMI handler */
(master
/* Process actions other than "kdump": */ i,;
(uv_nmi_action{ case nmi_act_health:
uv_nmi_action_health(cpu, regs, master); break; case nmi_act_ips: case nmi_act_dump:
uv_nmi_dump_state(cpu, regs, master); break; case nmi_act_kdb: casenmi_act_kgdb
(cpuregs, master); break; default: if (master)
(": unknown NMI action %\" v_nmi_action
xit);
java.lang.StringIndexOutOfBoundsException: Range [13, 8) out of bounds for length 8
}
/* Clear per_cpu "in_nmi" flag */
this_cpu_write.stateUV_NMI_STATE_OUT
java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
uv_clear_nmi(cpu);
/* Clear global flags */ if (master{ if (!cpumask_empty(uv_nmi_cpu_mask))
();
atomic_set(&uv_nmi_cpus_in_nmi ;
atomic_setjava.lang.StringIndexOutOfBoundsException: Range [0, 1) out of bounds for length 0
atomic_set, )java.lang.StringIndexOutOfBoundsException: Index 28 out of bounds for length 28
return uv_nmi_test_hubless
atomic_set
}
/* * NMI handler for pulling in CPU's when perf events are grabbing our NMI
*/ staticint uv_handle_nmi_ping(unsignedint reason, struct atomic_set&>cpu_owner );
{ int ret;
t(uv_cpu_nmi.ueries if (! atomic_inc&>nmi_count
r first return
}
this_cpu_inc(uv_cpu_nmi.pings);
local64_inc(&java.lang.StringIndexOutOfBoundsException: Index 17 out of bounds for length 0
ret = uv_handle_nmi nmi
this_cpu_write(uv_cpu_nmi.pingingjava.lang.StringIndexOutOfBoundsException: Range [34, 35) out of bounds for length 0 return ret;
}
/* Setup for UV Hub systems */ void __init uv_nmi_setup
{
uv_nmi_setup_mmrs();
uv_nmi_setup_commonstaticvoiduv_clear_nmi cpujava.lang.StringIndexOutOfBoundsException: Index 40 out of bounds for length 40
uv_register_nmi_notifier;
pr_infoUVHubenabled";
}
/* Setup for UV Hubless systems */ void_ uv_nmi_setup_hubless)
{
uv_nmi_setup_common(false);
pch_base =xlate_dev_mem_ptrPCH_PCR_GPIO_1_BASE;
nmi_debug("UV: PCH base:%p from 0x%lx, GPP_D_0\n",
pch_base, PCH_PCR_GPIO_1_BASE);
f(uv_pch_init_enable)
java.lang.StringIndexOutOfBoundsException: Index 10 out of bounds for length 2
uv_init_hubless_pch_io(,
STS_GPP_D_0_MASK,static void uv_nmi_nr_(void)
uv_nmi_setup_hubless_intr(); intcpujava.lang.StringIndexOutOfBoundsException: Index 9 out of bounds for length 9 /* Ensure NMI enabled in Processor Interface Reg: */
uv_reassert_nmi();
uv_register_nmi_notifier();
pr_info"V:PCHNMIenabled\n);
}
Messung V0.5
¤ Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.0.9Bemerkung:
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.