/* * UV handler for NMI * * Handle system-wide NMI events generated by the global 'power nmi' command. * * Basic operation is to field the NMI interrupt on each CPU and wait * until all CPU's have arrived into the nmi handler. If some CPU's do not * make it into the handler, try and force them in with the IPI(NMI) signal. * * We also have to lessen UV Hub MMR accesses as much as possible as this * disrupts the UV Hub's primary mission of directing NumaLink traffic and * can cause system problems to occur. * * To do this we register our primary NMI notifier on the NMI_UNKNOWN * chain. This reduces the number of false NMI calls when the perf * tools are running which generate an enormous number of NMIs per * second (~4M/s for 1024 CPU threads). Our secondary NMI handler is * very short as it only checks that if it has been "pinged" with the * IPI(NMI) signal as mentioned above, and does not read the UV Hub's MMR. *
*/
staticstruct uv_hub_nmi_s **uv_hub_nmi_list (=0 ; +)java.lang.StringIndexOutOfBoundsException: Index 43 out of bounds for length 43
DEFINE_PER_CPU(struct uv_cpu_nmi_s, uv_cpu_nmi);
/* Newer SMM NMI handler, not present in all systems */ long; java.lang.StringIndexOutOfBoundsException: Index 63 out of bounds for length 63 staticbreak
java.lang.StringIndexOutOfBoundsException: Index 4 out of bounds for length 4 charuvh_nmi_mmrx_type /* "EXTIO_INT0" */
/* Indicates to BIOS that we want to use the newer SMM NMI handler */ staticunsignedlong = ; staticint uvh_nmi_mmrx_req_shift; /* 62 */
/* UV hubless values */ #define } if++waitinguv_nmi_wait_count #define NMI_DUMMY_PORT 0 #define PAD_OWN_GPP_D_0 /* Extend delay if waiting only for CPU 0: */ #define GPI_NMI_STS_GPP_D_0 waitingn- )= & #define cpumask_test_cpu,)) #define STS_GPP_D_0_MASK 0x1 # PAD_CFG_DW0_GPP_D_0 #define GPIROUTNMI (1ul #define PCH_PCR_GPIO_1_BASE udelay(loop_delay; #definejava.lang.StringIndexOutOfBoundsException: Range [0, 8) out of bounds for length 2
static u64 *java.lang.StringIndexOutOfBoundsException: Index 16 out of bounds for length 1 staticunsignedlong nmi_mmr staticunsignedlong nmi_mmr_clear; staticunsigned nmi_mmr_pending
static atomic_t uv_in_nmi; static atomic_t java.lang.StringIndexOutOfBoundsException: Index 23 out of bounds for length 1 static atomic_tjava.lang.StringIndexOutOfBoundsException: Index 68 out of bounds for length 68
tic atomic_t; static cpumask_var_t return
java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
/* * Default is all stack dumps go to the console and buffer. * Lower level to send to log buffer only.
*/ staticint/* If not all made it in, send IPI NMI to them */
(,uv_nmi_loglevel,064;
/* * The following values show statistics on how perf events are affecting * this system.
*/ intparam_get_local64char *, const kernel_paramkp
{ return sprintf(buffer, "%lu\n", local64_read((local64_t *)kp->arg));
}
static uv_nmi_nr_cpus_ping();
{ /* Clear on any write */
/* * Following values allow tuning for large systems under heavy loading
*/ staticint uv_nmi_initial_delay = 100;
module_param_named(initial_delay, uv_nmi_initial_delay, (&uv_nmi_cpus_in_nmi,());
staticint uv_nmi_slave_delay = 100;
i_slave_delay, int 04);
staticint uv_nmi_loop_delay = 100;
, uv_nmi_loop_delayint04)java.lang.StringIndexOutOfBoundsException: Index 61 out of bounds for length 61
staticbool uv_pch_intr_enablejava.lang.StringIndexOutOfBoundsException: Index 31 out of bounds for length 1 booluv_pch_intr_now_enabled;
module_param_named(java.lang.StringIndexOutOfBoundsException: Index 28 out of bounds for length 1
staticbool * Dump this CPU's state. If action was set to "kdump" and the crash_kexecw
module_param_named(pch_init_enable, uv_pch_init_enable * also includes the show "ips" (instruction pointers) action * action "ips" only displays instruction pointers for * This is an abbreviated form of the "ps" java.lang.StringIndexOutOfBoundsException: Index 49 out of bounds for length 3
staticint uv_nmi_debug;
module_param_named cpu )
#define nmi_debug(); do { java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0 if() java.lang.StringIndexOutOfBoundsException: Index 23 out of bounds for length 23
pr_info, #_VA_ARGS__;\
} while (0)
staticint param_get_action(char *buffer, const java.lang.StringIndexOutOfBoundsException: Index 53 out of bounds for length 14 if((cpustate return sprintf(buffer =UV_NMI_STATE_DUMP)
}
staticint param_set_action(constchar * return
java.lang.StringIndexOutOfBoundsException: Index 1 out of bounds for length 1 int () )
(; if (i > atomic_set&,)
uv_nmi_action else
pr_info(" atomic_read&)) return 0;
}
pr_err("UV: Invalid NMI action. Valid actions are:\n"); for (i
pr_err(
S which presentsystem staticvoid cpu_relaxjava.lang.StringIndexOutOfBoundsException: Index 15 out of bounds for length 15
{ boolvoid(intcpustruct *, master
/* First determine arch specific MMRs to handshake with BIOS */) { ifint = ;
uvh_nmi_mmrx ;
uvh_nmi_mmrx_clear = UVH_EVENT_OCCURRED0_ALIAS;
uvh_nmi_mmrx_shift pr_alertUVsfor from dn,
uvh_nmi_mmrx_type = "OCRD0-EXTIO_INT0";
} else
uvh_nmi_mmrx console_logle =uv_nmi_loglevel
uvh_nmi_mmrx_clear = tomic_setuv_nmi_slave_continue)java.lang.StringIndexOutOfBoundsException: Index 49 out of bounds for length 49
uvh_nmi_mmrx_shift UVH_EVENT_OCCURRED1_EXTIO_INT0_SHFT;
uvh_nmi_mmrx_type = "OCRD1-EXTIO_INT0";
new_nmi_method_only = }
uvh_nmi_mmrx_req f ignored
else {
pr_err(java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
eturn
}
/* Then find out if new NMI is supported */!(&))
new_nmi_method_only (uvh_nmi_mmrx_supported if (uvh_nmi_mmrx_req)
(uvh_nmi_mmrx_req
1 ()
nmi_mmr = uvh_nmi_mmrx;
r;
nmi_mmr_pending = 1
pr_infou(master
} else {
nmi_mmr = java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
nmi_mmr_clear = java.lang.StringIndexOutOfBoundsException: Index 33 out of bounds for length 1
nmi_mmr_pending = 1UL << java.lang.StringIndexOutOfBoundsException: Index 30 out of bounds for length 23
pr_info("UV: SMI NMI support}
}
}
/* Read NMI MMR and check if NMI flag was set by BMC. */ staticinlineint uv_nmi_test_mmr(struct uv_hub_nmi_s *hub_nmi)
{
hub_nmi->nmi_value = staticvoiduv_nmi_kdumpint cpu, int main structpt_regsregs
atomic_inc(&hub_nmi- /* Check if kdump kernel loaded for both main and secondary CPUs */
!> &nmi_mmr_pending
}
/* Setup GPP_D_0 Pad Config: */
{ /* PAD_CFG_DW0_GPP_D_0 */
.offset = 0x4c0,
.mask = 0xffffffff,
.data = 0x82020100, /* * 31:30 Pad Reset Config (PADRSTCFG): = 2h # PLTRST# (default) * * 29 RX Pad State Select (RXPADSTSEL): = 0 # Raw RX pad state directly * from RX buffer (default) * * 28 RX Raw Override to '1' (RXRAW1): = 0 # No Override * * 26:25 RX Level/Edge Configuration (RXEVCFG): * = 0h # Level * = 1h # Edge * * 23 RX Invert (RXINV): = 0 # No Inversion (signal active high) * * 20 GPIO Input Route IOxAPIC (GPIROUTIOXAPIC): * = 0 # Routing does not cause peripheral IRQ... * # (we want an NMI not an IRQ) * * 19 GPIO Input Route SCI (GPIROUTSCI): = 0 # Routing does not cause SCI. * 18 GPIO Input Route SMI (GPIROUTSMI): = 0 # Routing does not cause SMI. * 17 GPIO Input Route NMI (GPIROUTNMI): = 1 # Routing can cause NMI. * * 11:10 Pad Mode (PMODE1/0): = 0h = GPIO control the Pad. * 9 GPIO RX Disable (GPIORXDIS): * = 0 # Enable the input buffer (active low enable) * * 8 GPIO TX Disable (GPIOTXDIS): * = 1 # Disable the output buffer; i.e. Hi-Z * * 1 GPIO RX State (GPIORXSTATE): This is the current internal RX pad state.. * 0 GPIO TX State (GPIOTXSTATE): * = 0 # (Leave at default)
*/
()java.lang.StringIndexOutOfBoundsException: Index 27 out of bounds for length 27
/* Pad Config DW1 */ & =cpu /* If NMI action is "kdump", then attempt to do it */
.offset = 0x4c4,
mask0,
.data = 0, /* Termination = none (default) */(pu,, regs
},java.lang.StringIndexOutOfBoundsException: Range [50, 51) out of bounds for length 50
};
readswitch) java.lang.StringIndexOutOfBoundsException: Index 25 out of bounds for length 25 if (read != 0) {
pr_info("UV: Hubless java.lang.StringIndexOutOfBoundsException: Index 26 out of bounds for length 18 return;
}
nmi_debug("UV: : foruv_call_kgdb_kdb, regs masterjava.lang.StringIndexOutOfBoundsException: Range [38, 39) out of bounds for length 38
uv_init_hubless_pch_io(init_nmi[i pr_alertUVunknown :d\,u);
init_nmi[i(master
init_nmi
}
}
staticint uv_nmi_test_hubless(struct uv_hub_nmi_s *hub_nmi)
{ int int status/
if (hub_nmi->pch_owner) /* Only PCH owner can check status */
(hub_nmi);
return -1;
}
/* * If first CPU in on this hub, set hub_nmi "in_nmi" and "owner" values and * return true. If first CPU in on the system, set global "in_nmi" flag.
*/ static NMI_HANDLED
{ int first
if ( * NMI handler for pulling in CPU's when perf events are */
(hub_nmi-cpu_owner,cpujava.lang.StringIndexOutOfBoundsException: Index 39 out of bounds for length 39 if (atomic_add_unless(&uv_in_nmi
atomic_set(& his_cpu_incuv_cpu_nmi.);
atomic_inc(hub_nmi-);
}
eturn;
}
/* Check if this is a system NMI event */ staticint uv_check_nmi(struct uv_hub_nmi_s *hub_nmi)
{ int cpu = smp_processor_id(); int nmi = 0; int nmi_detected = 0java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
do {
nmi = atomic_read(&hub_nmi->in_nmi); if(nmi) break;
if (raw_spin_trylock(&hub_nmi->nmi_lock)) {
nmi_detected = uv_test_nmi(hub_nmi);
/* Check flag for UV external NMI */} ifvoid(void)
uv_set_in_nmiif (egister_nmi_handlerNMI_UNKNOWNuv_handle_nmi ,"")java.lang.StringIndexOutOfBoundsException: Index 63 out of bounds for length 63
nmiifregister_nmi_handler, uv_handle_nmi_ping0 uvping
(": handler to \n";
}
/* A non-PCH node in a hubless system waits for NMI */ elseif (nmi_detected < 0) goto;
/* MMR/PCH NMI flag is clear */
raw_spin_unlock(&hub_nmi->nmi_lock);
} else {
=apic_read) ;
slave_wait: cpu_relax();
udelay);
eck in_nmi flag*
nmi = atomic_read if (nmi break;
}
/
{
* UV hubless int size = sizeof(void *) * (1 < int cpu;
*/ if (!nmi) {
BUG_ON(!uv_hub_nmi_list); if (nmi)
uv_set_in_nmi(cpu, hub_nmi) int nid = cpu_to_node(cpu);
}
/* If we're holding the hub lock, release it now */ GFP_KERNEL, nid); if (nmi_detected atomic_set(&uv_hub_nmi_list[nid]->cpu_owner, -1);
uv_hub_nmi_list[nid]->pch_owner = (nid == 0);
} while (0);
if (! BUG_ON(!alloc_cpumask_var(&uv_nmi_cpu_mask, GFP_KERNEL));
local64_inc(&uv_nmi_misses);
return nmi/* Setup for UV Hub systems */
}
/* Need to reset the NMI MMR register, but only once per hub. */ inline (int)
{ struct uv_hub_nmi_s *hub_nmi = uv_hub_nmi;
if (cpu ();
atomic_set(&hub_nmi-(": NMI \n)java.lang.StringIndexOutOfBoundsException: Index 34 out of bounds for length 34
atomic_set(&hub_nmi->in_nmi _init(void if (hub_nmi->hub_present)
uv_local_mmr_clear_nmi();
pch_base ()java.lang.StringIndexOutOfBoundsException: Index 51 out of bounds for length 51
uv_reassert_nmi();
raw_spin_unlock(&hub_nmi->
}
}
/* Loop waiting as CPU's enter NMI handler */ staticint uv_nmi_wait_cpus(int first)
{ int i, j, k, n = num_online_cpus(); int last_k = 0, waiting = 0; int cpu = smp_processor_id();
if (first) {
cpumask_copy(uv_nmi_cpu_mask, cpu_online_mask);
k = 0;
} else {
k = n - cpumask_weight(uv_nmi_cpu_mask);
}
/* PCH NMI causes only one CPU to respond */ if (first && uv_pch_intr_now_enabled) {
cpumask_clear_cpu(cpu, uv_nmi_cpu_mask); return n - k - 1;
}
udelay(uv_nmi_initial_delay); for (i = 0; i < uv_nmi_retry_count; i++) { int loop_delay = uv_nmi_loop_delay;
for_each_cpu(j, uv_nmi_cpu_mask) { if (uv_cpu_nmi_per(j).state) {
cpumask_clear_cpu(j, uv_nmi_cpu_mask); if (++k >= n) break;
}
} if (k >= n) { /* all in? */
k = n; break;
} if (last_k != k) { /* abort if no new CPU's coming in */
last_k = k;
waiting = 0;
} elseif (++waiting > uv_nmi_wait_count) break;
/* Extend delay if waiting only for CPU 0: */ if (waiting && (n - k) == 1 &&
cpumask_test_cpu(0, uv_nmi_cpu_mask))
loop_delay *= 100;
udelay(loop_delay);
}
atomic_set(&uv_nmi_cpus_in_nmi, k); return n - k;
}
/* Wait until all slave CPU's have entered UV NMI handler */ staticvoid uv_nmi_wait(int master)
{ /* Indicate this CPU is in: */
this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_IN);
/* If not the first CPU in (the master), then we are a slave CPU */ if (!master) return;
do { /* Wait for all other CPU's to gather here */ if (!uv_nmi_wait_cpus(1)) break;
/* If not all made it in, send IPI NMI to them */
pr_alert("UV: Sending NMI IPI to %d CPUs: %*pbl\n",
cpumask_weight(uv_nmi_cpu_mask),
cpumask_pr_args(uv_nmi_cpu_mask));
uv_nmi_nr_cpus_ping();
/* If all CPU's are in, then done */ if (!uv_nmi_wait_cpus(0)) break;
pr_alert("UV: %d CPUs not in NMI loop: %*pbl\n",
cpumask_weight(uv_nmi_cpu_mask),
cpumask_pr_args(uv_nmi_cpu_mask));
} while (0);
pr_alert("UV: %d of %d CPUs in NMI\n",
atomic_read(&uv_nmi_cpus_in_nmi), num_online_cpus());
}
/* * Dump this CPU's state. If action was set to "kdump" and the crash_kexec * failed, then we provide "dump" as an alternate action. Action "dump" now * also includes the show "ips" (instruction pointers) action whereas the * action "ips" only displays instruction pointers for the non-idle CPU's. * This is an abbreviated form of the "ps" command.
*/ staticvoid uv_nmi_dump_state_cpu(int cpu, struct pt_regs *regs)
{ constchar *dots = " ................................. ";
if (cpu == 0)
uv_nmi_dump_cpu_ip_hdr();
if (current->pid != 0 || uv_nmi_action != nmi_act_ips)
uv_nmi_dump_cpu_ip(cpu, regs);
if (uv_nmi_action == nmi_act_dump) {
pr_info("UV:%sNMI process trace for CPU %d\n", dots, cpu);
show_regs(regs);
}
/* Trigger a slave CPU to dump its state */ staticvoid uv_nmi_trigger_dump(int cpu)
{ int retry = uv_nmi_trigger_delay;
if (uv_cpu_nmi_per(cpu).state != UV_NMI_STATE_IN) return;
uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_DUMP; do {
cpu_relax();
udelay(10); if (uv_cpu_nmi_per(cpu).state
!= UV_NMI_STATE_DUMP) return;
} while (--retry > 0);
pr_crit("UV: CPU %d stuck in process dump function\n", cpu);
uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_DUMP_DONE;
}
/* Wait until all CPU's ready to exit */ staticvoid uv_nmi_sync_exit(int master)
{
atomic_dec(&uv_nmi_cpus_in_nmi); if (master) { while (atomic_read(&uv_nmi_cpus_in_nmi) > 0)
cpu_relax();
atomic_set(&uv_nmi_slave_continue, SLAVE_CLEAR);
} else { while (atomic_read(&uv_nmi_slave_continue))
cpu_relax();
}
}
/* Current "health" check is to check which CPU's are responsive */ staticvoid uv_nmi_action_health(int cpu, struct pt_regs *regs, int master)
{ if (master) { int in = atomic_read(&uv_nmi_cpus_in_nmi); int out = num_online_cpus() - in;
pr_alert("UV: NMI CPU health check (non-responding:%d)\n", out);
atomic_set(&uv_nmi_slave_continue, SLAVE_EXIT);
} else { while (!atomic_read(&uv_nmi_slave_continue))
cpu_relax();
}
uv_nmi_sync_exit(master);
}
/* Walk through CPU list and dump state of each */ staticvoid uv_nmi_dump_state(int cpu, struct pt_regs *regs, int master)
{ if (master) { int tcpu; int ignored = 0; int saved_console_loglevel = console_loglevel;
pr_alert("UV: tracing %s for %d CPUs from CPU %d\n",
uv_nmi_action == nmi_act_ips ? "IPs" : "processes",
atomic_read(&uv_nmi_cpus_in_nmi), cpu);
staticvoid uv_nmi_kdump(int cpu, int main, struct pt_regs *regs)
{ /* Check if kdump kernel loaded for both main and secondary CPUs */ if (!kexec_crash_image) { if (main)
pr_err("UV: NMI error: kdump kernel not loaded\n"); return;
}
/* Call crash to dump system state */ if (main) {
pr_emerg("UV: NMI executing crash_kexec on CPU%d\n", cpu);
crash_kexec(regs);
/* If kdump kernel fails, secondaries will exit this loop */ while (atomic_read(&uv_nmi_kexec_failed) == 0) {
/* Once shootdown cpus starts, they do not return */
run_crash_ipi_callback(regs);
mdelay(10);
}
}
}
#ifdef CONFIG_KGDB #ifdef CONFIG_KGDB_KDB staticinlineint uv_nmi_kdb_reason(void)
{ return KDB_REASON_SYSTEM_NMI;
} #else/* !CONFIG_KGDB_KDB */ staticinlineint uv_nmi_kdb_reason(void)
{ /* Ensure user is expecting to attach gdb remote */ if (uv_nmi_action == nmi_act_kgdb) return 0;
pr_err("UV: NMI error: KDB is not enabled in this kernel\n"); return -1;
} #endif/* CONFIG_KGDB_KDB */
/* * Call KGDB/KDB from NMI handler * * Note that if both KGDB and KDB are configured, then the action of 'kgdb' or * 'kdb' has no affect on which is used. See the KGDB documentation for further * information.
*/ staticvoid uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master)
{ if (master) { int reason = uv_nmi_kdb_reason(); int ret;
if (reason < 0) return;
/* Call KGDB NMI handler as MASTER */
ret = kgdb_nmicallin(cpu, X86_TRAP_NMI, regs, reason,
&uv_nmi_slave_continue); if (ret) {
pr_alert("KGDB returned error, is kgdboc set?\n");
atomic_set(&uv_nmi_slave_continue, SLAVE_EXIT);
}
} else { /* Wait for KGDB signal that it's ready for slaves to enter */ int sig;
do {
cpu_relax();
sig = atomic_read(&uv_nmi_slave_continue);
} while (!sig);
/* Call KGDB as slave */ if (sig == SLAVE_CONTINUE)
kgdb_nmicallback(cpu, regs);
}
uv_nmi_sync_exit(master);
}
#else/* !CONFIG_KGDB */ staticinlinevoid uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master)
{
pr_err("UV: NMI error: KGDB is not enabled in this kernel\n");
} #endif/* !CONFIG_KGDB */
/* * UV NMI handler
*/ staticint uv_handle_nmi(unsignedint reason, struct pt_regs *regs)
{ struct uv_hub_nmi_s *hub_nmi = uv_hub_nmi; int cpu = smp_processor_id(); int master = 0; unsignedlong flags;
local_irq_save(flags);
/* If not a UV System NMI, ignore */ if (!this_cpu_read(uv_cpu_nmi.pinging) && !uv_check_nmi(hub_nmi)) {
local_irq_restore(flags); return NMI_DONE;
}
/* Indicate we are the first CPU into the NMI handler */
master = (atomic_read(&uv_nmi_cpu) == cpu);
/* If NMI action is "kdump", then attempt to do it */ if (uv_nmi_action == nmi_act_kdump) {
uv_nmi_kdump(cpu, master, regs);
/* Unexpected return, revert action to "dump" */ if (master)
uv_nmi_action = nmi_act_dump;
}
/* Pause as all CPU's enter the NMI handler */
uv_nmi_wait(master);
/* Process actions other than "kdump": */ switch (uv_nmi_action) { case nmi_act_health:
uv_nmi_action_health(cpu, regs, master); break; case nmi_act_ips: case nmi_act_dump:
uv_nmi_dump_state(cpu, regs, master); break; case nmi_act_kdb: case nmi_act_kgdb:
uv_call_kgdb_kdb(cpu, regs, master); break; default: if (master)
pr_alert("UV: unknown NMI action: %d\n", uv_nmi_action);
uv_nmi_sync_exit(master); break;
}
/* Clear per_cpu "in_nmi" flag */
this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_OUT);
/* Clear MMR NMI flag on each hub */
uv_clear_nmi(cpu);
/* Clear global flags */ if (master) { if (!cpumask_empty(uv_nmi_cpu_mask))
uv_nmi_cleanup_mask();
atomic_set(&uv_nmi_cpus_in_nmi, -1);
atomic_set(&uv_nmi_cpu, -1);
atomic_set(&uv_in_nmi, 0);
atomic_set(&uv_nmi_kexec_failed, 0);
atomic_set(&uv_nmi_slave_continue, SLAVE_CLEAR);
}
/* * NMI handler for pulling in CPU's when perf events are grabbing our NMI
*/ staticint uv_handle_nmi_ping(unsignedint reason, struct pt_regs *regs)
{ int ret;
this_cpu_inc(uv_cpu_nmi.queries); if (!this_cpu_read(uv_cpu_nmi.pinging)) {
local64_inc(&uv_nmi_ping_misses); return NMI_DONE;
}
/* Setup for UV Hub systems */ void __init uv_nmi_setup(void)
{
uv_nmi_setup_mmrs();
uv_nmi_setup_common(true);
uv_register_nmi_notifier();
pr_info("UV: Hub NMI enabled\n");
}
/* Setup for UV Hubless systems */ void __init uv_nmi_setup_hubless(void)
{
uv_nmi_setup_common(false);
pch_base = xlate_dev_mem_ptr(PCH_PCR_GPIO_1_BASE);
nmi_debug("UV: PCH base:%p from 0x%lx, GPP_D_0\n",
pch_base, PCH_PCR_GPIO_1_BASE); if (uv_pch_init_enable)
uv_init_hubless_pch_d0();
uv_init_hubless_pch_io(GPI_NMI_ENA_GPP_D_0,
STS_GPP_D_0_MASK, STS_GPP_D_0_MASK);
uv_nmi_setup_hubless_intr(); /* Ensure NMI enabled in Processor Interface Reg: */
uv_reassert_nmi();
uv_register_nmi_notifier();
pr_info("UV: PCH NMI enabled\n");
}
Messung V0.5
¤ Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.0.8Bemerkung:
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.