if (fw_feature_is("enabled", "fw-bcctrl-serialized", np))
security_ftr_set(SEC_FTR_BCCTRL_SERIALISED);
if (fw_feature_is("enabled", "inst-l1d-flush-ori30,30,0", np))
security_ftr_set(SEC_FTR_L1D_FLUSH_ORI30);
if (fw_feature_is("enabled", "inst-l1d-flush-trig2", np))
security_ftr_set(SEC_FTR_L1D_FLUSH_TRIG2);
if (fw_feature_is("enabled", "fw-l1d-thread-split", np))
security_ftr_set(SEC_FTR_L1D_THREAD_PRIV);
if (fw_feature_is("enabled", "fw-count-cache-disabled", np))
security_ftr_set(SEC_FTR_COUNT_CACHE_DISABLED);
if (fw_feature_is("enabled", "fw-count-cache-flush-bcctr2,0,0", np))
security_ftr_set(SEC_FTR_BCCTR_FLUSH_ASSIST);
if (fw_feature_is("enabled", "needs-count-cache-flush-on-context-switch", np))
security_ftr_set(SEC_FTR_FLUSH_COUNT_CACHE);
/* * The features below are enabled by default, so we instead look to see * if firmware has *disabled* them, and clear them if so.
*/ if (fw_feature_is("disabled", "speculation-policy-favor-security", np))
security_ftr_clear(SEC_FTR_FAVOUR_SECURITY);
if (fw_feature_is("disabled", "needs-l1d-flush-msr-pr-0-to-1", np))
security_ftr_clear(SEC_FTR_L1D_FLUSH_PR);
if (fw_feature_is("disabled", "needs-l1d-flush-msr-hv-1-to-0", np))
security_ftr_clear(SEC_FTR_L1D_FLUSH_HV);
if (fw_feature_is("disabled", "needs-spec-barrier-for-bound-checks", np))
security_ftr_clear(SEC_FTR_BNDS_CHK_SPEC_BAR);
if (fw_feature_is("enabled", "no-need-l1d-flush-msr-pr-1-to-0", np))
security_ftr_clear(SEC_FTR_L1D_FLUSH_ENTRY);
if (fw_feature_is("enabled", "no-need-l1d-flush-kernel-on-user-access", np))
security_ftr_clear(SEC_FTR_L1D_FLUSH_UACCESS);
if (fw_feature_is("enabled", "no-need-store-drain-on-priv-state-switch", np))
security_ftr_clear(SEC_FTR_STF_BARRIER);
}
if (fw_features) {
init_fw_feat_flags(fw_features);
of_node_put(fw_features);
if (security_ftr_enabled(SEC_FTR_L1D_FLUSH_TRIG2))
type = L1D_FLUSH_MTTRIG;
if (security_ftr_enabled(SEC_FTR_L1D_FLUSH_ORI30))
type = L1D_FLUSH_ORI;
}
/* * The issues addressed by the entry and uaccess flush don't affect P7 * or P8, so on bare metal disable them explicitly in case firmware does * not include the features to disable them. POWER9 and newer processors * should have the appropriate firmware flags.
*/ if (pvr_version_is(PVR_POWER7) || pvr_version_is(PVR_POWER7p) ||
pvr_version_is(PVR_POWER8E) || pvr_version_is(PVR_POWER8NVL) ||
pvr_version_is(PVR_POWER8)) {
security_ftr_clear(SEC_FTR_L1D_FLUSH_ENTRY);
security_ftr_clear(SEC_FTR_L1D_FLUSH_UACCESS);
}
/* * Initialize the LPC bus now so that legacy serial * ports can be found on it
*/
opal_lpc_init();
#ifdef CONFIG_HVC_OPAL if (firmware_has_feature(FW_FEATURE_OPAL))
hvc_opal_init_early(); else #endif
add_preferred_console("hvc", 0, NULL);
#ifdef CONFIG_PPC_64S_HASH_MMU if (!radix_enabled()) {
size_t size = sizeof(struct slb_entry) * mmu_slb_size; int i;
/* Allocate per cpu area to save old slb contents during MCE */
for_each_possible_cpu(i) {
paca_ptrs[i]->mce_faulty_slbs =
memblock_alloc_node(size,
__alignof__(struct slb_entry),
cpu_to_node(i));
}
} #endif
}
staticvoid __init pnv_init_IRQ(void)
{ /* Try using a XIVE if available, otherwise use a XICS */ if (!xive_native_init())
xics_init();
staticvoid pnv_shutdown(void)
{ /* Let the PCI code clear up IODA tables */
pnv_pci_shutdown();
/* * Stop OPAL activity: Unregister all OPAL interrupts so they * don't fire up while we kexec and make sure all potentially * DMA'ing ops are complete (such as dump retrieval).
*/
opal_shutdown();
}
#ifdef CONFIG_KEXEC_CORE staticvoid pnv_kexec_wait_secondaries_down(void)
{ int my_cpu, i, notified = -1;
for (;;) {
rc = opal_query_cpu_status(get_hard_smp_processor_id(i),
&status); if (rc != OPAL_SUCCESS || status != OPAL_THREAD_STARTED) break;
barrier(); if (i != notified) {
printk(KERN_INFO "kexec: waiting for cpu %d " "(physical %d) to enter OPAL\n",
i, paca_ptrs[i]->hw_cpu_id);
notified = i;
}
/* * On crash secondaries might be unreachable or hung, * so timeout if we've waited too long
* */
mdelay(1); if (timeout-- == 0) {
printk(KERN_ERR "kexec: timed out waiting for " "cpu %d (physical %d) to enter OPAL\n",
i, paca_ptrs[i]->hw_cpu_id); break;
}
}
}
}
staticvoid pnv_kexec_cpu_down(int crash_shutdown, int secondary)
{
u64 reinit_flags;
if (xive_enabled())
xive_teardown_cpu(); else
xics_kexec_teardown_cpu(secondary);
/* On OPAL, we return all CPUs to firmware */ if (!firmware_has_feature(FW_FEATURE_OPAL)) return;
if (secondary) { /* Return secondary CPUs to firmware on OPAL v3 */
mb();
get_paca()->kexec_state = KEXEC_STATE_REAL_MODE;
mb();
/* Return the CPU to OPAL */
opal_return_cpu();
} else { /* Primary waits for the secondaries to have reached OPAL */
pnv_kexec_wait_secondaries_down();
/* Switch XIVE back to emulation mode */ if (xive_enabled())
xive_shutdown();
/* * We might be running as little-endian - now that interrupts * are disabled, reset the HILE bit to big-endian so we don't * take interrupts in the wrong endian later * * We reinit to enable both radix and hash on P9 to ensure * the mode used by the next kernel is always supported.
*/
reinit_flags = OPAL_REINIT_CPUS_HILE_BE; if (cpu_has_feature(CPU_FTR_ARCH_300))
reinit_flags |= OPAL_REINIT_CPUS_MMU_RADIX |
OPAL_REINIT_CPUS_MMU_HASH;
opal_reinit_cpus(reinit_flags);
}
} #endif/* CONFIG_KEXEC_CORE */
if (opal_reinit_cpus(OPAL_REINIT_CPUS_TM_SUSPEND_DISABLED) != OPAL_SUCCESS) return;
pr_info("Enabling TM (Transactional Memory) with Suspend Disabled\n");
cur_cpu_spec->cpu_features |= CPU_FTR_TM; /* Make sure "normal" HTM is off (it should be) */
cur_cpu_spec->cpu_user_features2 &= ~PPC_FEATURE2_HTM; /* Turn on no suspend mode, and HTM no SC */
cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_HTM_NO_SUSPEND | \
PPC_FEATURE2_HTM_NOSC;
tm_suspend_disabled = true;
} #endif/* CONFIG_PPC_TRANSACTIONAL_MEM */
/* * Returns the cpu frequency for 'cpu' in Hz. This is used by * /proc/cpuinfo
*/ staticunsignedlong pnv_get_proc_freq(unsignedint cpu)
{ unsignedlong ret_freq;
ret_freq = cpufreq_get(cpu) * 1000ul;
/* * If the backend cpufreq driver does not exist, * then fallback to old way of reporting the clockrate.
*/ if (!ret_freq)
ret_freq = ppc_proc_freq; return ret_freq;
}
staticlong pnv_machine_check_early(struct pt_regs *regs)
{ long handled = 0;
if (cur_cpu_spec && cur_cpu_spec->machine_check_early)
handled = cur_cpu_spec->machine_check_early(regs);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.