/* * Speculation Vulnerability Handling * * Each vulnerability is handled with the following functions: * <vuln>_select_mitigation() -- Selects a mitigation to use. This should * take into account all relevant command line * options. * <vuln>_update_mitigation() -- This is called after all vulnerabilities have * selected a mitigation, in case the selection * may want to change based on other choices * made. This function is optional. * <vuln>_apply_mitigation() -- Enable the selected mitigation. * * The compile-time mitigation in all cases should be AUTO. An explicit * command-line option can override AUTO. If no such option is * provided, <vuln>_select_mitigation() will override AUTO to the best * mitigation option.
*/
/* The base value of the SPEC_CTRL MSR without task-specific bits set */
u64 x86_spec_ctrl_base;
EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
/* The current value of the SPEC_CTRL MSR with task-specific bits set */
DEFINE_PER_CPU(u64, x86_spec_ctrl_current);
EXPORT_PER_CPU_SYMBOL_GPL(x86_spec_ctrl_current);
/* * Set when the CPU has run a potentially malicious guest. An IBPB will * be needed to before running userspace. That IBPB will flush the branch * predictor content.
*/
DEFINE_PER_CPU(bool, x86_ibpb_exit_to_user);
EXPORT_PER_CPU_SYMBOL_GPL(x86_ibpb_exit_to_user);
/* Update SPEC_CTRL MSR and its cached copy unconditionally */ staticvoid update_spec_ctrl(u64 val)
{
this_cpu_write(x86_spec_ctrl_current, val);
wrmsrq(MSR_IA32_SPEC_CTRL, val);
}
/* * Keep track of the SPEC_CTRL MSR value for the current task, which may differ * from x86_spec_ctrl_base due to STIBP/SSB in __speculation_ctrl_update().
*/ void update_spec_ctrl_cond(u64 val)
{ if (this_cpu_read(x86_spec_ctrl_current) == val) return;
this_cpu_write(x86_spec_ctrl_current, val);
/* * When KERNEL_IBRS this MSR is written on return-to-user, unless * forced the update can be delayed until that time.
*/ if (!cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS))
wrmsrq(MSR_IA32_SPEC_CTRL, val);
}
/* * AMD specific MSR info for Speculative Store Bypass control. * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
*/
u64 __ro_after_init x86_amd_ls_cfg_base;
u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
/* Control conditional STIBP in switch_to() */
DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp); /* Control conditional IBPB in switch_mm() */
DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb); /* Control unconditional IBPB in switch_mm() */
DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
/* Control IBPB on vCPU load */
DEFINE_STATIC_KEY_FALSE(switch_vcpu_ibpb);
EXPORT_SYMBOL_GPL(switch_vcpu_ibpb);
/* Control CPU buffer clear before idling (halt, mwait) */
DEFINE_STATIC_KEY_FALSE(cpu_buf_idle_clear);
EXPORT_SYMBOL_GPL(cpu_buf_idle_clear);
/* * Controls whether l1d flush based mitigations are enabled, * based on hw features and admin setting via boot parameter * defaults to false
*/
DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
/* * Controls CPU Fill buffer clear before VMenter. This is a subset of * X86_FEATURE_CLEAR_CPU_BUF, and should only be enabled when KVM-only * mitigation is required.
*/
DEFINE_STATIC_KEY_FALSE(cpu_buf_vm_clear);
EXPORT_SYMBOL_GPL(cpu_buf_vm_clear);
if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL))
pr_cont("user_kernel, ");
if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER))
pr_cont("user_user, ");
if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST))
pr_cont("guest_host, ");
if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST))
pr_cont("guest_guest, ");
pr_cont("SMT mitigations: ");
switch (smt_mitigations) { case SMT_MITIGATIONS_OFF:
pr_cont("off\n"); break; case SMT_MITIGATIONS_AUTO:
pr_cont("auto\n"); break; case SMT_MITIGATIONS_ON:
pr_cont("on\n");
}
}
void __init cpu_select_mitigations(void)
{ /* * Read the SPEC_CTRL MSR to account for reserved bits which may * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD * init code as it is not enumerated and depends on the family.
*/ if (cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL)) {
rdmsrq(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
/* * Previously running kernel (kexec), may have some controls * turned ON. Clear them and let the mitigations setup below * rediscover them based on configuration.
*/
x86_spec_ctrl_base &= ~SPEC_CTRL_MITIGATIONS_MASK;
}
x86_arch_cap_msr = x86_read_arch_cap_msr();
cpu_print_attack_vectors();
/* Select the proper CPU mitigations before patching alternatives: */
spectre_v1_select_mitigation();
spectre_v2_select_mitigation();
retbleed_select_mitigation();
spectre_v2_user_select_mitigation();
ssb_select_mitigation();
l1tf_select_mitigation();
mds_select_mitigation();
taa_select_mitigation();
mmio_select_mitigation();
rfds_select_mitigation();
srbds_select_mitigation();
l1d_flush_select_mitigation();
srso_select_mitigation();
gds_select_mitigation();
its_select_mitigation();
bhi_select_mitigation();
tsa_select_mitigation();
vmscape_select_mitigation();
/* * After mitigations are selected, some may need to update their * choices.
*/
spectre_v2_update_mitigation(); /* * retbleed_update_mitigation() relies on the state set by * spectre_v2_update_mitigation(); specifically it wants to know about * spectre_v2=ibrs.
*/
retbleed_update_mitigation(); /* * its_update_mitigation() depends on spectre_v2_update_mitigation() * and retbleed_update_mitigation().
*/
its_update_mitigation();
/* * spectre_v2_user_update_mitigation() depends on * retbleed_update_mitigation(), specifically the STIBP * selection is forced for UNRET or IBPB.
*/
spectre_v2_user_update_mitigation();
mds_update_mitigation();
taa_update_mitigation();
mmio_update_mitigation();
rfds_update_mitigation();
bhi_update_mitigation(); /* srso_update_mitigation() depends on retbleed_update_mitigation(). */
srso_update_mitigation();
vmscape_update_mitigation();
/* * NOTE: This function is *only* called for SVM, since Intel uses * MSR_IA32_SPEC_CTRL for SSBD.
*/ void
x86_virt_spec_ctrl(u64 guest_virt_spec_ctrl, bool setguest)
{
u64 guestval, hostval; struct thread_info *ti = current_thread_info();
/* * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
*/ if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
!static_cpu_has(X86_FEATURE_VIRT_SSBD)) return;
/* * If the host has SSBD mitigation enabled, force it in the host's * virtual MSR value. If its not permanently enabled, evaluate * current's TIF_SSBD thread flag.
*/ if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
hostval = SPEC_CTRL_SSBD; else
hostval = ssbd_tif_to_spec_ctrl(ti->flags);
/* Sanitize the guest value */
guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD;
if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
wrmsrq(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD); elseif (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
wrmsrq(MSR_AMD64_LS_CFG, msrval);
}
#undef pr_fmt #define pr_fmt(fmt) "MDS: " fmt
/* * Returns true if vulnerability should be mitigated based on the * selected attack vector controls. * * See Documentation/admin-guide/hw-vuln/attack_vector_controls.rst
*/ staticbool __init should_mitigate_vuln(unsignedint bug)
{ switch (bug) { /* * The only runtime-selected spectre_v1 mitigations in the kernel are * related to SWAPGS protection on kernel entry. Therefore, protection * is only required for the user->kernel attack vector.
*/ case X86_BUG_SPECTRE_V1: return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL);
case X86_BUG_SPECTRE_V2: case X86_BUG_RETBLEED: case X86_BUG_L1TF: case X86_BUG_ITS: return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) ||
cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST);
case X86_BUG_SPECTRE_V2_USER: return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER) ||
cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST);
/* * All the vulnerabilities below allow potentially leaking data * across address spaces. Therefore, mitigation is required for * any of these 4 attack vectors.
*/ case X86_BUG_MDS: case X86_BUG_TAA: case X86_BUG_MMIO_STALE_DATA: case X86_BUG_RFDS: case X86_BUG_SRBDS: return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) ||
cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST) ||
cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER) ||
cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST);
/* Default mitigation for Register File Data Sampling */ staticenum rfds_mitigations rfds_mitigation __ro_after_init =
IS_ENABLED(CONFIG_MITIGATION_RFDS) ? RFDS_MITIGATION_AUTO : RFDS_MITIGATION_OFF;
/* * Set if any of MDS/TAA/MMIO/RFDS are going to enable VERW clearing * through X86_FEATURE_CLEAR_CPU_BUF on kernel and guest entry.
*/ staticbool verw_clear_cpu_buf_mitigation_selected __ro_after_init;
if (mds_mitigation == MDS_MITIGATION_AUTO) { if (should_mitigate_vuln(X86_BUG_MDS))
mds_mitigation = MDS_MITIGATION_FULL; else
mds_mitigation = MDS_MITIGATION_OFF;
}
if (mds_mitigation == MDS_MITIGATION_OFF) return;
verw_clear_cpu_buf_mitigation_selected = true;
}
staticvoid __init mds_update_mitigation(void)
{ if (!boot_cpu_has_bug(X86_BUG_MDS)) return;
/* If TAA, MMIO, or RFDS are being mitigated, MDS gets mitigated too. */ if (verw_clear_cpu_buf_mitigation_selected)
mds_mitigation = MDS_MITIGATION_FULL;
if (mds_mitigation == MDS_MITIGATION_FULL) { if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
mds_mitigation = MDS_MITIGATION_VMWERV;
}
/* TSX previously disabled by tsx=off */ if (!boot_cpu_has(X86_FEATURE_RTM)) {
taa_mitigation = TAA_MITIGATION_TSX_DISABLED; return;
}
/* Microcode will be checked in taa_update_mitigation(). */ if (taa_mitigation == TAA_MITIGATION_AUTO) { if (should_mitigate_vuln(X86_BUG_TAA))
taa_mitigation = TAA_MITIGATION_VERW; else
taa_mitigation = TAA_MITIGATION_OFF;
}
if (taa_mitigation != TAA_MITIGATION_OFF)
verw_clear_cpu_buf_mitigation_selected = true;
}
staticvoid __init taa_update_mitigation(void)
{ if (!taa_vulnerable()) return;
if (verw_clear_cpu_buf_mitigation_selected)
taa_mitigation = TAA_MITIGATION_VERW;
if (taa_mitigation == TAA_MITIGATION_VERW) { /* Check if the requisite ucode is available. */ if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
/* * VERW doesn't clear the CPU buffers when MD_CLEAR=1 and MDS_NO=1. * A microcode update fixes this behavior to clear CPU buffers. It also * adds support for MSR_IA32_TSX_CTRL which is enumerated by the * ARCH_CAP_TSX_CTRL_MSR bit. * * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode * update is required.
*/ if ((x86_arch_cap_msr & ARCH_CAP_MDS_NO) &&
!(x86_arch_cap_msr & ARCH_CAP_TSX_CTRL_MSR))
taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
}
pr_info("%s\n", taa_strings[taa_mitigation]);
}
staticvoid __init taa_apply_mitigation(void)
{ if (taa_mitigation == TAA_MITIGATION_VERW ||
taa_mitigation == TAA_MITIGATION_UCODE_NEEDED) { /* * TSX is enabled, select alternate mitigation for TAA which is * the same as MDS. Enable MDS static branch to clear CPU buffers. * * For guests that can't determine whether the correct microcode is * present on host, enable the mitigation for UCODE_NEEDED as well.
*/
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
if (taa_nosmt || smt_mitigations == SMT_MITIGATIONS_ON)
cpu_smt_disable(false);
}
}
staticint __init tsx_async_abort_parse_cmdline(char *str)
{ if (!boot_cpu_has_bug(X86_BUG_TAA)) return 0;
/* Microcode will be checked in mmio_update_mitigation(). */ if (mmio_mitigation == MMIO_MITIGATION_AUTO) { if (should_mitigate_vuln(X86_BUG_MMIO_STALE_DATA))
mmio_mitigation = MMIO_MITIGATION_VERW; else
mmio_mitigation = MMIO_MITIGATION_OFF;
}
if (mmio_mitigation == MMIO_MITIGATION_OFF) return;
/* * Enable CPU buffer clear mitigation for host and VMM, if also affected * by MDS or TAA.
*/ if (boot_cpu_has_bug(X86_BUG_MDS) || taa_vulnerable())
verw_clear_cpu_buf_mitigation_selected = true;
}
staticvoid __init mmio_update_mitigation(void)
{ if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) return;
if (verw_clear_cpu_buf_mitigation_selected)
mmio_mitigation = MMIO_MITIGATION_VERW;
if (mmio_mitigation == MMIO_MITIGATION_VERW) { /* * Check if the system has the right microcode. * * CPU Fill buffer clear mitigation is enumerated by either an explicit * FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS * affected systems.
*/ if (!((x86_arch_cap_msr & ARCH_CAP_FB_CLEAR) ||
(boot_cpu_has(X86_FEATURE_MD_CLEAR) &&
boot_cpu_has(X86_FEATURE_FLUSH_L1D) &&
!(x86_arch_cap_msr & ARCH_CAP_MDS_NO))))
mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED;
}
pr_info("%s\n", mmio_strings[mmio_mitigation]);
}
staticvoid __init mmio_apply_mitigation(void)
{ if (mmio_mitigation == MMIO_MITIGATION_OFF) return;
/* * Only enable the VMM mitigation if the CPU buffer clear mitigation is * not being used.
*/ if (verw_clear_cpu_buf_mitigation_selected) {
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
static_branch_disable(&cpu_buf_vm_clear);
} else {
static_branch_enable(&cpu_buf_vm_clear);
}
/* * If Processor-MMIO-Stale-Data bug is present and Fill Buffer data can * be propagated to uncore buffers, clearing the Fill buffers on idle * is required irrespective of SMT state.
*/ if (!(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO))
static_branch_enable(&cpu_buf_idle_clear);
if (mmio_nosmt || smt_mitigations == SMT_MITIGATIONS_ON)
cpu_smt_disable(false);
}
staticint __init mmio_stale_data_parse_cmdline(char *str)
{ if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) return 0;
if (srbds_mitigation == SRBDS_MITIGATION_UCODE_NEEDED) return;
/* * A MDS_NO CPU for which SRBDS mitigation is not needed due to TSX * being disabled and it hasn't received the SRBDS MSR microcode.
*/ if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL)) return;
rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
switch (srbds_mitigation) { case SRBDS_MITIGATION_OFF: case SRBDS_MITIGATION_TSX_OFF:
mcu_ctrl |= RNGDS_MITG_DIS; break; case SRBDS_MITIGATION_FULL:
mcu_ctrl &= ~RNGDS_MITG_DIS; break; default: break;
}
if (srbds_mitigation == SRBDS_MITIGATION_AUTO) { if (should_mitigate_vuln(X86_BUG_SRBDS))
srbds_mitigation = SRBDS_MITIGATION_FULL; else {
srbds_mitigation = SRBDS_MITIGATION_OFF; return;
}
}
/* * Check to see if this is one of the MDS_NO systems supporting TSX that * are only exposed to SRBDS when TSX is enabled or when CPU is affected * by Processor MMIO Stale Data vulnerability.
*/ if ((x86_arch_cap_msr & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) &&
!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
srbds_mitigation = SRBDS_MITIGATION_TSX_OFF; elseif (boot_cpu_has(X86_FEATURE_HYPERVISOR))
srbds_mitigation = SRBDS_MITIGATION_HYPERVISOR; elseif (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL))
srbds_mitigation = SRBDS_MITIGATION_UCODE_NEEDED; elseif (srbds_off)
srbds_mitigation = SRBDS_MITIGATION_OFF;
switch (gds_mitigation) { case GDS_MITIGATION_OFF:
rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
mcu_ctrl |= GDS_MITG_DIS; break; case GDS_MITIGATION_FULL_LOCKED: /* * The LOCKED state comes from the boot CPU. APs might not have * the same state. Make sure the mitigation is enabled on all * CPUs.
*/ case GDS_MITIGATION_FULL:
rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
mcu_ctrl &= ~GDS_MITG_DIS; break; case GDS_MITIGATION_FORCE: case GDS_MITIGATION_UCODE_NEEDED: case GDS_MITIGATION_HYPERVISOR: case GDS_MITIGATION_AUTO: return;
}
wrmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
/* * Check to make sure that the WRMSR value was not ignored. Writes to * GDS_MITG_DIS will be ignored if this processor is locked but the boot * processor was not.
*/
rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl_after);
WARN_ON_ONCE(mcu_ctrl != mcu_ctrl_after);
}
if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
gds_mitigation = GDS_MITIGATION_HYPERVISOR; return;
}
/* Will verify below that mitigation _can_ be disabled */ if (gds_mitigation == GDS_MITIGATION_AUTO) { if (should_mitigate_vuln(X86_BUG_GDS))
gds_mitigation = GDS_MITIGATION_FULL; else
gds_mitigation = GDS_MITIGATION_OFF;
}
/* No microcode */ if (!(x86_arch_cap_msr & ARCH_CAP_GDS_CTRL)) { if (gds_mitigation != GDS_MITIGATION_FORCE)
gds_mitigation = GDS_MITIGATION_UCODE_NEEDED; return;
}
/* Microcode has mitigation, use it */ if (gds_mitigation == GDS_MITIGATION_FORCE)
gds_mitigation = GDS_MITIGATION_FULL;
rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); if (mcu_ctrl & GDS_MITG_LOCKED) { if (gds_mitigation == GDS_MITIGATION_OFF)
pr_warn("Mitigation locked. Disable failed.\n");
/* * The mitigation is selected from the boot CPU. All other CPUs * _should_ have the same state. If the boot CPU isn't locked * but others are then update_gds_msr() will WARN() of the state * mismatch. If the boot CPU is locked update_gds_msr() will * ensure the other CPUs have the mitigation enabled.
*/
gds_mitigation = GDS_MITIGATION_FULL_LOCKED;
}
}
staticvoid __init gds_apply_mitigation(void)
{ if (!boot_cpu_has_bug(X86_BUG_GDS)) return;
/* Microcode is present */ if (x86_arch_cap_msr & ARCH_CAP_GDS_CTRL)
update_gds_msr(); elseif (gds_mitigation == GDS_MITIGATION_FORCE) { /* * This only needs to be done on the boot CPU so do it * here rather than in update_gds_msr()
*/
setup_clear_cpu_cap(X86_FEATURE_AVX);
pr_warn("Microcode update needed! Disabling AVX as mitigation.\n");
}
pr_info("%s\n", gds_strings[gds_mitigation]);
}
staticint __init gds_parse_cmdline(char *str)
{ if (!str) return -EINVAL;
staticconstchar * const spectre_v1_strings[] = {
[SPECTRE_V1_MITIGATION_NONE] = "Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers",
[SPECTRE_V1_MITIGATION_AUTO] = "Mitigation: usercopy/swapgs barriers and __user pointer sanitization",
};
/* * Does SMAP provide full mitigation against speculative kernel access to * userspace?
*/ staticbool smap_works_speculatively(void)
{ if (!boot_cpu_has(X86_FEATURE_SMAP)) returnfalse;
/* * On CPUs which are vulnerable to Meltdown, SMAP does not * prevent speculative access to user data in the L1 cache. * Consider SMAP to be non-functional as a mitigation on these * CPUs.
*/ if (boot_cpu_has(X86_BUG_CPU_MELTDOWN)) returnfalse;
returntrue;
}
staticvoid __init spectre_v1_select_mitigation(void)
{ if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1))
spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
if (!should_mitigate_vuln(X86_BUG_SPECTRE_V1))
spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
}
staticvoid __init spectre_v1_apply_mitigation(void)
{ if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1)) return;
if (spectre_v1_mitigation == SPECTRE_V1_MITIGATION_AUTO) { /* * With Spectre v1, a user can speculatively control either * path of a conditional swapgs with a user-controlled GS * value. The mitigation is to add lfences to both code paths. * * If FSGSBASE is enabled, the user can put a kernel address in * GS, in which case SMAP provides no protection. * * If FSGSBASE is disabled, the user can only put a user space * address in GS. That makes an attack harder, but still * possible if there's no SMAP protection.
*/ if (boot_cpu_has(X86_FEATURE_FSGSBASE) ||
!smap_works_speculatively()) { /* * Mitigation can be provided from SWAPGS itself or * PTI as the CR3 write in the Meltdown mitigation * is serializing. * * If neither is there, mitigate with an LFENCE to * stop speculation through swapgs.
*/ if (boot_cpu_has_bug(X86_BUG_SWAPGS) &&
!boot_cpu_has(X86_FEATURE_PTI))
setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_USER);
/* * Enable lfences in the kernel entry (non-swapgs) * paths, to prevent user entry from speculatively * skipping swapgs.
*/
setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_KERNEL);
}
}
#define RETBLEED_UNTRAIN_MSG "WARNING: BTB untrained return thunk mitigation is only effective on AMD/Hygon!\n" #define RETBLEED_INTEL_MSG "WARNING: Spectre v2 mitigation leaves CPU vulnerable to RETBleed attacks, data leaks possible!\n"
switch (retbleed_mitigation) { case RETBLEED_MITIGATION_UNRET: if (!IS_ENABLED(CONFIG_MITIGATION_UNRET_ENTRY)) {
retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
pr_err("WARNING: kernel not compiled with MITIGATION_UNRET_ENTRY.\n");
} break; case RETBLEED_MITIGATION_IBPB: if (!boot_cpu_has(X86_FEATURE_IBPB)) {
pr_err("WARNING: CPU does not support IBPB.\n");
retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
} elseif (!IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) {
pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n");
retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
} break; case RETBLEED_MITIGATION_STUFF: if (!IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING)) {
pr_err("WARNING: kernel not compiled with MITIGATION_CALL_DEPTH_TRACKING.\n");
retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
} elseif (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
pr_err("WARNING: retbleed=stuff only supported for Intel CPUs.\n");
retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
} break; default: break;
}
if (retbleed_mitigation != RETBLEED_MITIGATION_AUTO) return;
if (!should_mitigate_vuln(X86_BUG_RETBLEED)) {
retbleed_mitigation = RETBLEED_MITIGATION_NONE; return;
}
staticvoid __init retbleed_update_mitigation(void)
{ if (!boot_cpu_has_bug(X86_BUG_RETBLEED)) return;
/* ITS can also enable stuffing */ if (its_mitigation == ITS_MITIGATION_RETPOLINE_STUFF)
retbleed_mitigation = RETBLEED_MITIGATION_STUFF;
/* If SRSO is using IBPB, that works for retbleed too */ if (srso_mitigation == SRSO_MITIGATION_IBPB)
retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
if (retbleed_mitigation == RETBLEED_MITIGATION_STUFF &&
!cdt_possible(spectre_v2_enabled)) {
pr_err("WARNING: retbleed=stuff depends on retpoline\n");
retbleed_mitigation = RETBLEED_MITIGATION_NONE;
}
/* * Let IBRS trump all on Intel without affecting the effects of the * retbleed= cmdline option except for call depth based stuffing
*/ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { switch (spectre_v2_enabled) { case SPECTRE_V2_IBRS:
retbleed_mitigation = RETBLEED_MITIGATION_IBRS; break; case SPECTRE_V2_EIBRS: case SPECTRE_V2_EIBRS_RETPOLINE: case SPECTRE_V2_EIBRS_LFENCE:
retbleed_mitigation = RETBLEED_MITIGATION_EIBRS; break; default: if (retbleed_mitigation != RETBLEED_MITIGATION_STUFF) { if (retbleed_mitigation != RETBLEED_MITIGATION_NONE)
pr_err(RETBLEED_INTEL_MSG);
switch (retbleed_mitigation) { case RETBLEED_MITIGATION_NONE: return;
case RETBLEED_MITIGATION_UNRET:
setup_force_cpu_cap(X86_FEATURE_RETHUNK);
setup_force_cpu_cap(X86_FEATURE_UNRET);
set_return_thunk(retbleed_return_thunk);
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
pr_err(RETBLEED_UNTRAIN_MSG);
mitigate_smt = true; break;
case RETBLEED_MITIGATION_IBPB:
setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
mitigate_smt = true;
/* * IBPB on entry already obviates the need for * software-based untraining so clear those in case some * other mitigation like SRSO has selected them.
*/
setup_clear_cpu_cap(X86_FEATURE_UNRET);
setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
/* * There is no need for RSB filling: write_ibpb() ensures * all predictions, including the RSB, are invalidated, * regardless of IBPB implementation.
*/
setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT);
break;
case RETBLEED_MITIGATION_STUFF:
setup_force_cpu_cap(X86_FEATURE_RETHUNK);
setup_force_cpu_cap(X86_FEATURE_CALL_DEPTH);
if (its_mitigation == ITS_MITIGATION_AUTO) { if (should_mitigate_vuln(X86_BUG_ITS))
its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS; else
its_mitigation = ITS_MITIGATION_OFF;
}
if (its_mitigation == ITS_MITIGATION_OFF) return;
if (!IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) ||
!IS_ENABLED(CONFIG_MITIGATION_RETHUNK)) {
pr_err("WARNING: ITS mitigation depends on retpoline and rethunk support\n");
its_mitigation = ITS_MITIGATION_OFF; return;
}
if (IS_ENABLED(CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B)) {
pr_err("WARNING: ITS mitigation is not compatible with CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B\n");
its_mitigation = ITS_MITIGATION_OFF; return;
}
if (its_mitigation == ITS_MITIGATION_RETPOLINE_STUFF &&
!IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING)) {
pr_err("RSB stuff mitigation not supported, using default\n");
its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
}
if (its_mitigation == ITS_MITIGATION_VMEXIT_ONLY &&
!boot_cpu_has_bug(X86_BUG_ITS_NATIVE_ONLY))
its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
}
staticvoid __init its_update_mitigation(void)
{ if (!boot_cpu_has_bug(X86_BUG_ITS)) return;
switch (spectre_v2_enabled) { case SPECTRE_V2_NONE: if (its_mitigation != ITS_MITIGATION_OFF)
pr_err("WARNING: Spectre-v2 mitigation is off, disabling ITS\n");
its_mitigation = ITS_MITIGATION_OFF; break; case SPECTRE_V2_RETPOLINE: case SPECTRE_V2_EIBRS_RETPOLINE: /* Retpoline+CDT mitigates ITS */ if (retbleed_mitigation == RETBLEED_MITIGATION_STUFF)
its_mitigation = ITS_MITIGATION_RETPOLINE_STUFF; break; case SPECTRE_V2_LFENCE: case SPECTRE_V2_EIBRS_LFENCE:
pr_err("WARNING: ITS mitigation is not compatible with lfence mitigation\n");
its_mitigation = ITS_MITIGATION_OFF; break; default: break;
}
if (its_mitigation == ITS_MITIGATION_RETPOLINE_STUFF &&
!cdt_possible(spectre_v2_enabled))
its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
pr_info("%s\n", its_strings[its_mitigation]);
}
staticvoid __init its_apply_mitigation(void)
{ switch (its_mitigation) { case ITS_MITIGATION_OFF: case ITS_MITIGATION_AUTO: case ITS_MITIGATION_VMEXIT_ONLY: break; case ITS_MITIGATION_ALIGNED_THUNKS: if (!boot_cpu_has(X86_FEATURE_RETPOLINE))
setup_force_cpu_cap(X86_FEATURE_INDIRECT_THUNK_ITS);
if (tsa_mitigation == TSA_MITIGATION_AUTO) { bool vm = false, uk = false;
tsa_mitigation = TSA_MITIGATION_NONE;
if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) ||
cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER)) {
tsa_mitigation = TSA_MITIGATION_USER_KERNEL;
uk = true;
}
if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST) ||
cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST)) {
tsa_mitigation = TSA_MITIGATION_VM;
vm = true;
}
if (uk && vm)
tsa_mitigation = TSA_MITIGATION_FULL;
}
if (tsa_mitigation == TSA_MITIGATION_NONE) return;
if (!boot_cpu_has(X86_FEATURE_VERW_CLEAR))
tsa_mitigation = TSA_MITIGATION_UCODE_NEEDED;
/* * No need to set verw_clear_cpu_buf_mitigation_selected - it * doesn't fit all cases here and it is not needed because this * is the only VERW-based mitigation on AMD.
*/
pr_info("%s\n", tsa_strings[tsa_mitigation]);
}
staticvoid __init tsa_apply_mitigation(void)
{ switch (tsa_mitigation) { case TSA_MITIGATION_USER_KERNEL:
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); break; case TSA_MITIGATION_VM:
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM); break; case TSA_MITIGATION_FULL:
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM); break; default: break;
}
}
#define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n" #define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n" #define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n" #define SPECTRE_V2_IBRS_PERF_MSG "WARNING: IBRS mitigation selected on Enhanced IBRS CPU, this may cause unnecessary performance loss\n"
#ifdef CONFIG_BPF_SYSCALL void unpriv_ebpf_notify(int new_state)
{ if (new_state) return;
/* Unprivileged eBPF is enabled */
switch (spectre_v2_enabled) { case SPECTRE_V2_EIBRS:
pr_err(SPECTRE_V2_EIBRS_EBPF_MSG); break; case SPECTRE_V2_EIBRS_LFENCE: if (sched_smt_active())
pr_err(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG); break; default: break;
}
} #endif
staticinlinebool match_option(constchar *arg, int arglen, constchar *opt)
{ int len = strlen(opt);
return len == arglen && !strncmp(arg, opt, len);
}
/* The kernel command line selection for spectre v2 */ enum spectre_v2_mitigation_cmd {
SPECTRE_V2_CMD_NONE,
SPECTRE_V2_CMD_AUTO,
SPECTRE_V2_CMD_FORCE,
SPECTRE_V2_CMD_RETPOLINE,
SPECTRE_V2_CMD_RETPOLINE_GENERIC,
SPECTRE_V2_CMD_RETPOLINE_LFENCE,
SPECTRE_V2_CMD_EIBRS,
SPECTRE_V2_CMD_EIBRS_RETPOLINE,
SPECTRE_V2_CMD_EIBRS_LFENCE,
SPECTRE_V2_CMD_IBRS,
};
staticvoid __init spec_v2_user_print_cond(constchar *reason, bool secure)
{ if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
pr_info("spectre_v2_user=%s forced on command line.\n", reason);
}
staticenum spectre_v2_user_cmd __init spectre_v2_parse_user_cmdline(void)
{ char arg[20]; int ret, i;
if (!IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V2)) return SPECTRE_V2_USER_CMD_NONE;
ret = cmdline_find_option(boot_command_line, "spectre_v2_user",
arg, sizeof(arg)); if (ret < 0) return SPECTRE_V2_USER_CMD_AUTO;
for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) { if (match_option(arg, ret, v2_user_options[i].option)) {
spec_v2_user_print_cond(v2_user_options[i].option,
v2_user_options[i].secure); return v2_user_options[i].cmd;
}
}
pr_err("Unknown user space protection option (%s). Switching to default\n", arg); return SPECTRE_V2_USER_CMD_AUTO;
}
staticvoid __init spectre_v2_user_select_mitigation(void)
{ if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP)) return;
switch (spectre_v2_parse_user_cmdline()) { case SPECTRE_V2_USER_CMD_NONE: return; case SPECTRE_V2_USER_CMD_FORCE:
spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT;
spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT; break; case SPECTRE_V2_USER_CMD_AUTO: if (!should_mitigate_vuln(X86_BUG_SPECTRE_V2_USER)) break;
spectre_v2_user_ibpb = SPECTRE_V2_USER_PRCTL; if (smt_mitigations == SMT_MITIGATIONS_OFF) break;
spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL; break; case SPECTRE_V2_USER_CMD_PRCTL:
spectre_v2_user_ibpb = SPECTRE_V2_USER_PRCTL;
spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL; break; case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT;
spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL; break; case SPECTRE_V2_USER_CMD_SECCOMP: if (IS_ENABLED(CONFIG_SECCOMP))
spectre_v2_user_ibpb = SPECTRE_V2_USER_SECCOMP; else
spectre_v2_user_ibpb = SPECTRE_V2_USER_PRCTL;
spectre_v2_user_stibp = spectre_v2_user_ibpb; break; case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT; if (IS_ENABLED(CONFIG_SECCOMP))
spectre_v2_user_stibp = SPECTRE_V2_USER_SECCOMP; else
spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL; break;
}
/* * At this point, an STIBP mode other than "off" has been set. * If STIBP support is not being forced, check if STIBP always-on * is preferred.
*/ if ((spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL ||
spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) &&
boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT_PREFERRED;
if (!boot_cpu_has(X86_FEATURE_IBPB))
spectre_v2_user_ibpb = SPECTRE_V2_USER_NONE;
if (!boot_cpu_has(X86_FEATURE_STIBP))
spectre_v2_user_stibp = SPECTRE_V2_USER_NONE;
}
staticvoid __init spectre_v2_user_update_mitigation(void)
{ if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP)) return;
/* The spectre_v2 cmd line can override spectre_v2_user options */ if (spectre_v2_cmd == SPECTRE_V2_CMD_NONE) {
spectre_v2_user_ibpb = SPECTRE_V2_USER_NONE;
spectre_v2_user_stibp = SPECTRE_V2_USER_NONE;
} elseif (spectre_v2_cmd == SPECTRE_V2_CMD_FORCE) {
spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT;
spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT;
}
/* * If no STIBP, Intel enhanced IBRS is enabled, or SMT impossible, STIBP * is not required. * * Intel's Enhanced IBRS also protects against cross-thread branch target * injection in user-mode as the IBRS bit remains always set which * implicitly enables cross-thread protections. However, in legacy IBRS * mode, the IBRS bit is set only on kernel entry and cleared on return * to userspace. AMD Automatic IBRS also does not protect userspace. * These modes therefore disable the implicit cross-thread protection, * so allow for STIBP to be selected in those cases.
*/ if (!boot_cpu_has(X86_FEATURE_STIBP) ||
!cpu_smt_possible() ||
(spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
!boot_cpu_has(X86_FEATURE_AUTOIBRS))) {
spectre_v2_user_stibp = SPECTRE_V2_USER_NONE; return;
}
ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg)); if (ret < 0) return cmd;
for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) { if (!match_option(arg, ret, mitigation_options[i].option)) continue;
cmd = mitigation_options[i].cmd; break;
}
if (i >= ARRAY_SIZE(mitigation_options)) {
pr_err("unknown option (%s). Switching to default mode\n", arg); return cmd;
}
if ((cmd == SPECTRE_V2_CMD_RETPOLINE ||
cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE ||
cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC ||
cmd == SPECTRE_V2_CMD_EIBRS_LFENCE ||
cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) &&
!IS_ENABLED(CONFIG_MITIGATION_RETPOLINE)) {
pr_err("%s selected but not compiled in. Switching to AUTO select\n",
mitigation_options[i].option); return SPECTRE_V2_CMD_AUTO;
}
if ((cmd == SPECTRE_V2_CMD_EIBRS ||
cmd == SPECTRE_V2_CMD_EIBRS_LFENCE ||
cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) &&
!boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
pr_err("%s selected but CPU doesn't have Enhanced or Automatic IBRS. Switching to AUTO select\n",
mitigation_options[i].option); return SPECTRE_V2_CMD_AUTO;
}
if ((cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE ||
cmd == SPECTRE_V2_CMD_EIBRS_LFENCE) &&
!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
pr_err("%s selected, but CPU doesn't have a serializing LFENCE. Switching to AUTO select\n",
mitigation_options[i].option); return SPECTRE_V2_CMD_AUTO;
}
if (cmd == SPECTRE_V2_CMD_IBRS && !IS_ENABLED(CONFIG_MITIGATION_IBRS_ENTRY)) {
pr_err("%s selected but not compiled in. Switching to AUTO select\n",
mitigation_options[i].option); return SPECTRE_V2_CMD_AUTO;
}
if (cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
pr_err("%s selected but not Intel CPU. Switching to AUTO select\n",
mitigation_options[i].option); return SPECTRE_V2_CMD_AUTO;
}
if (cmd == SPECTRE_V2_CMD_IBRS && !boot_cpu_has(X86_FEATURE_IBRS)) {
pr_err("%s selected but CPU doesn't have IBRS. Switching to AUTO select\n",
mitigation_options[i].option); return SPECTRE_V2_CMD_AUTO;
}
if (cmd == SPECTRE_V2_CMD_IBRS && cpu_feature_enabled(X86_FEATURE_XENPV)) {
pr_err("%s selected but running as XenPV guest. Switching to AUTO select\n",
mitigation_options[i].option); return SPECTRE_V2_CMD_AUTO;
}
staticenum spectre_v2_mitigation __init spectre_v2_select_retpoline(void)
{ if (!IS_ENABLED(CONFIG_MITIGATION_RETPOLINE)) {
pr_err("Kernel not compiled with retpoline; no mitigation available!"); return SPECTRE_V2_NONE;
}
return SPECTRE_V2_RETPOLINE;
}
staticbool __ro_after_init rrsba_disabled;
/* Disable in-kernel use of non-RSB RET predictors */ staticvoid __init spec_ctrl_disable_kernel_rrsba(void)
{ if (rrsba_disabled) return;
if (!(x86_arch_cap_msr & ARCH_CAP_RRSBA)) {
rrsba_disabled = true; return;
}
if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL)) return;
staticvoid __init spectre_v2_select_rsb_mitigation(enum spectre_v2_mitigation mode)
{ /* * WARNING! There are many subtleties to consider when changing *any* * code related to RSB-related mitigations. Before doing so, carefully * read the following document, and update if necessary: * * Documentation/admin-guide/hw-vuln/rsb.rst * * In an overly simplified nutshell: * * - User->user RSB attacks are conditionally mitigated during * context switches by cond_mitigation -> write_ibpb(). * * - User->kernel and guest->host attacks are mitigated by eIBRS or * RSB filling. * * Though, depending on config, note that other alternative * mitigations may end up getting used instead, e.g., IBPB on * entry/vmexit, call depth tracking, or return thunks.
*/
switch (mode) { case SPECTRE_V2_NONE: break;
case SPECTRE_V2_EIBRS: case SPECTRE_V2_EIBRS_LFENCE: case SPECTRE_V2_EIBRS_RETPOLINE: if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) {
pr_info("Spectre v2 / PBRSB-eIBRS: Retire a single CALL on VMEXIT\n");
setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE);
} break;
case SPECTRE_V2_RETPOLINE:
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Dauer der Verarbeitung: 0.50 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.