/* * It would make sense to check SEC_FTR_SPEC_BAR_ORI31 below as well. * But there's a good reason not to. The two flags we check below are * both are enabled by default in the kernel, so if the hcall is not * functional they will be enabled. * On a system where the host firmware has been updated (so the ori * functions as a barrier), but on which the hypervisor (KVM/Qemu) has * not been updated, we would like to enable the barrier. Dropping the * check for SEC_FTR_SPEC_BAR_ORI31 achieves that. The only downside is * we potentially enable the barrier on systems where the host firmware * is not updated, but that's harmless as it's a no-op.
*/
enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR);
if (!no_nospec && !cpu_mitigations_off())
enable_barrier_nospec(enable);
}
/* This is the generic flag used by other architectures */ staticint __init handle_ssbd(char *p)
{ if (!p || strncmp(p, "auto", 5) == 0 || strncmp(p, "on", 2) == 0 ) { /* Until firmware tells us, we have the barrier with auto */ return 0;
} elseif (strncmp(p, "off", 3) == 0) {
handle_no_stf_barrier(NULL); return 0;
} else return 1;
/* This is the generic flag used by other architectures */ staticint __init handle_no_ssbd(char *p)
{
handle_no_stf_barrier(NULL); return 0;
}
early_param("nospec_store_bypass_disable", handle_no_ssbd);
staticvoid stf_barrier_enable(bool enable)
{ if (enable)
do_stf_barrier_fixups(stf_enabled_flush_types); else
do_stf_barrier_fixups(STF_BARRIER_NONE);
/* Default to fallback in case fw-features are not available */ if (cpu_has_feature(CPU_FTR_ARCH_300))
type = STF_BARRIER_EIEIO; elseif (cpu_has_feature(CPU_FTR_ARCH_207S))
type = STF_BARRIER_SYNC_ORI; elseif (cpu_has_feature(CPU_FTR_ARCH_206))
type = STF_BARRIER_FALLBACK; else
type = STF_BARRIER_NONE;
if (!no_stf_barrier && !cpu_mitigations_off())
stf_barrier_enable(enable);
}
ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
{ if (stf_barrier && stf_enabled_flush_types != STF_BARRIER_NONE) { constchar *type; switch (stf_enabled_flush_types) { case STF_BARRIER_EIEIO:
type = "eieio"; break; case STF_BARRIER_SYNC_ORI:
type = "hwsync"; break; case STF_BARRIER_FALLBACK:
type = "fallback"; break; default:
type = "unknown";
} return sprintf(buf, "Mitigation: Kernel entry/exit barrier (%s)\n", type);
}
if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) &&
!security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR)) return sprintf(buf, "Not affected\n");
return sprintf(buf, "Vulnerable\n");
}
staticint ssb_prctl_get(struct task_struct *task)
{ /* * The STF_BARRIER feature is on by default, so if it's off that means * firmware has explicitly said the CPU is not vulnerable via either * the hypercall or device tree.
*/ if (!security_ftr_enabled(SEC_FTR_STF_BARRIER)) return PR_SPEC_NOT_AFFECTED;
/* * If the system's CPU has no known barrier (see setup_stf_barrier()) * then assume that the CPU is not vulnerable.
*/ if (stf_enabled_flush_types == STF_BARRIER_NONE) return PR_SPEC_NOT_AFFECTED;
/* * Otherwise the CPU is vulnerable. The barrier is not a global or * per-process mitigation, so the only value that can be reported here * is PR_SPEC_ENABLE, which appears as "vulnerable" in /proc.
*/ return PR_SPEC_ENABLE;
}
int arch_prctl_spec_ctrl_get(struct task_struct *task, unsignedlong which)
{ switch (which) { case PR_SPEC_STORE_BYPASS: return ssb_prctl_get(task); default: return -ENODEV;
}
}
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
site = &patch__call_kvm_flush_link_stack;
site2 = &patch__call_kvm_flush_link_stack_p9; // This controls the branch from guest_exit_cont to kvm_flush_link_stack if (link_stack_flush_type == BRANCH_CACHE_FLUSH_NONE) {
patch_instruction_site(site, ppc_inst(PPC_RAW_NOP()));
patch_instruction_site(site2, ppc_inst(PPC_RAW_NOP()));
} else { // Could use HW flush, but that could also flush count cache
patch_branch_site(site, (u64)&kvm_flush_link_stack, BRANCH_SET_LINK);
patch_branch_site(site2, (u64)&kvm_flush_link_stack, BRANCH_SET_LINK);
} #endif
// Patch out the bcctr first, then nop the rest
site = &patch__call_flush_branch_caches3;
patch_instruction_site(site, ppc_inst(PPC_RAW_NOP()));
site = &patch__call_flush_branch_caches2;
patch_instruction_site(site, ppc_inst(PPC_RAW_NOP()));
site = &patch__call_flush_branch_caches1;
patch_instruction_site(site, ppc_inst(PPC_RAW_NOP()));
// This controls the branch from _switch to flush_branch_caches if (count_cache_flush_type == BRANCH_CACHE_FLUSH_NONE &&
link_stack_flush_type == BRANCH_CACHE_FLUSH_NONE) { // Nothing to be done
} elseif (count_cache_flush_type == BRANCH_CACHE_FLUSH_HW &&
link_stack_flush_type == BRANCH_CACHE_FLUSH_HW) { // Patch in the bcctr last
site = &patch__call_flush_branch_caches1;
patch_instruction_site(site, ppc_inst(0x39207fff)); // li r9,0x7fff
site = &patch__call_flush_branch_caches2;
patch_instruction_site(site, ppc_inst(0x7d2903a6)); // mtctr r9
site = &patch__call_flush_branch_caches3;
patch_instruction_site(site, ppc_inst(PPC_INST_BCCTR_FLUSH));
// If we just need to flush the link stack, early return if (count_cache_flush_type == BRANCH_CACHE_FLUSH_NONE) {
patch_instruction_site(&patch__flush_link_stack_return,
ppc_inst(PPC_RAW_BLR()));
// If we have flush instruction, early return
} elseif (count_cache_flush_type == BRANCH_CACHE_FLUSH_HW) {
patch_instruction_site(&patch__flush_count_cache_return,
ppc_inst(PPC_RAW_BLR()));
}
}
}
staticvoid toggle_branch_cache_flush(bool enable)
{ if (!enable || !security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) { if (count_cache_flush_type != BRANCH_CACHE_FLUSH_NONE)
count_cache_flush_type = BRANCH_CACHE_FLUSH_NONE;
if (no_spectrev2 || cpu_mitigations_off()) { if (security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED) ||
security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED))
pr_warn("Spectre v2 mitigations not fully under software control, can't disable\n");
enable = false;
}
/* * There's no firmware feature flag/hypervisor bit to tell us we need to * flush the link stack on context switch. So we set it here if we see * either of the Spectre v2 mitigations that aim to protect userspace.
*/ if (security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED) ||
security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE))
security_ftr_set(SEC_FTR_FLUSH_LINK_STACK);
/* * The RFI flush is not KPTI, but because users will see doco that says to use * nopti we hijack that option here to also disable the RFI flush.
*/ staticint __init handle_no_pti(char *p)
{
pr_info("rfi-flush: disabling due to 'nopti' on command line.\n");
handle_no_rfi_flush(NULL); return 0;
}
early_param("nopti", handle_no_pti);
staticvoid do_nothing(void *unused)
{ /* * We don't need to do the flush explicitly, just enter+exit kernel is * sufficient, the RFI exit handlers will do the right thing.
*/
}
staticvoid __ref init_fallback_flush(void)
{
u64 l1d_size, limit; int cpu;
/* Only allocate the fallback flush area once (at boot time). */ if (l1d_flush_fallback_area) return;
l1d_size = ppc64_caches.l1d.size;
/* * If there is no d-cache-size property in the device tree, l1d_size * could be zero. That leads to the loop in the asm wrapping around to * 2^64-1, and then walking off the end of the fallback area and * eventually causing a page fault which is fatal. Just default to * something vaguely sane.
*/ if (!l1d_size)
l1d_size = (64 * 1024);
limit = min(ppc64_bolted_size(), ppc64_rma_size);
/* * Align to L1d size, and size it at 2x L1d size, to catch possible * hardware prefetch runoff. We don't have a recipe for load patterns to * reliably avoid the prefetcher.
*/
l1d_flush_fallback_area = memblock_alloc_try_nid(l1d_size * 2,
l1d_size, MEMBLOCK_LOW_LIMIT,
limit, NUMA_NO_NODE); if (!l1d_flush_fallback_area)
panic("%s: Failed to allocate %llu bytes align=0x%llx max_addr=%pa\n",
__func__, l1d_size * 2, l1d_size, &limit);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.