/* * Call depth tracking for Intel SKL CPUs to address the RSB underflow * issue in software. * * The tracking does not use a counter. It uses uses arithmetic shift * right on call entry and logical shift left on return. * * The depth tracking variable is initialized to 0x8000.... when the call * depth is zero. The arithmetic shift right sign extends the MSB and * saturates after the 12th call. The shift count is 5 for both directions * so the tracking covers 12 nested calls. * * Call * 0: 0x8000000000000000 0x0000000000000000 * 1: 0xfc00000000000000 0xf000000000000000 * ... * 11: 0xfffffffffffffff8 0xfffffffffffffc00 * 12: 0xffffffffffffffff 0xffffffffffffffe0 * * After a return buffer fill the depth is credited 12 calls before the * next stuffing has to take place. * * There is a inaccuracy for situations like this: * * 10 calls * 5 returns * 3 calls * 4 returns * 3 calls * .... * * The shift count might cause this to be off by one in either direction, * but there is still a cushion vs. the RSB depth. The algorithm does not * claim to be perfect and it can be speculated around by the CPU, but it * is considered that it obfuscates the problem enough to make exploitation * extremely difficult.
*/ #define RET_DEPTH_SHIFT 5 #define RSB_RET_STUFF_LOOPS 16 #define RET_DEPTH_INIT 0x8000000000000000ULL #define RET_DEPTH_INIT_FROM_CALL 0xfc00000000000000ULL #define RET_DEPTH_CREDIT 0xffffffffffffffffULL
/* * Fill the CPU return stack buffer. * * Each entry in the RSB, if used for a speculative 'ret', contains an * infinite 'pause; lfence; jmp' loop to capture speculative execution. * * This is required in various cases for retpoline and IBRS-based * mitigations for the Spectre variant 2 vulnerability. Sometimes to * eliminate potentially bogus entries from the RSB, and sometimes * purely to ensure that it doesn't get empty, which on some CPUs would * allow predictions from other (unwanted!) sources to be used. * * We define a CPP macro such that it can be used from both .S files and * inline assembly. It's possible to do a .macro and then include that * from C via asm(".include <asm/nospec-branch.h>") but let's not go there.
*/
#define RETPOLINE_THUNK_SIZE 32 #define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */
/* * Common helper for __FILL_RETURN_BUFFER and __FILL_ONE_RETURN.
*/ #define __FILL_RETURN_SLOT \
ANNOTATE_INTRA_FUNCTION_CALL; \
call 772f; \
int3; \
772:
/* * Stuff the entire RSB. * * Google experimented with loop-unrolling and this turned out to be * the optimal version - two calls, each with their own speculation * trap should their return address end up getting used, in a loop.
*/ #ifdef CONFIG_X86_64 #define __FILL_RETURN_BUFFER(reg, nr) \
mov $(nr/2), reg; \
771: \
__FILL_RETURN_SLOT \
__FILL_RETURN_SLOT \
add $(BITS_PER_LONG/8) * 2, %_ASM_SP; \
dec reg; \
jnz 771b; \ /* barrier for jnz misprediction */ \
lfence; \
CREDIT_CALL_DEPTH \
CALL_THUNKS_DEBUG_INC_CTXSW #else /* * i386 doesn't unconditionally have LFENCE, as such it can't * do a loop.
*/ #define __FILL_RETURN_BUFFER(reg, nr) \
.rept nr; \
__FILL_RETURN_SLOT; \
.endr; \
add $(BITS_PER_LONG/8) * nr, %_ASM_SP; #endif
/* * Stuff a single RSB slot. * * To mitigate Post-Barrier RSB speculation, one CALL instruction must be * forced to retire before letting a RET instruction execute. * * On PBRSB-vulnerable CPUs, it is not safe for a RET to be executed * before this point.
*/ #define __FILL_ONE_RETURN \
__FILL_RETURN_SLOT \
add $(BITS_PER_LONG/8), %_ASM_SP; \
lfence;
#ifdef __ASSEMBLER__
/* * (ab)use RETPOLINE_SAFE on RET to annotate away 'bare' RET instructions * vs RETBleed validation.
*/ #define ANNOTATE_UNRET_SAFE ANNOTATE_RETPOLINE_SAFE
/* * Abuse ANNOTATE_RETPOLINE_SAFE on a NOP to indicate UNRET_END, should * eventually turn into its own annotation.
*/
.macro VALIDATE_UNRET_END #ifdefined(CONFIG_NOINSTR_VALIDATION) && \
(defined(CONFIG_MITIGATION_UNRET_ENTRY) || defined(CONFIG_MITIGATION_SRSO))
ANNOTATE_RETPOLINE_SAFE
nop #endif
.endm
/* * Emits a conditional CS prefix that is compatible with * -mindirect-branch-cs-prefix.
*/
.macro __CS_PREFIX reg:req
.irp rs,r8,r9,r10,r11,r12,r13,r14,r15
.ifc \reg,\rs
.byte 0x2e
.endif
.endr
.endm
/* * JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple * indirect jmp/call which may be susceptible to the Spectre variant 2 * attack. * * NOTE: these do not take kCFI into account and are thus not comparable to C * indirect calls, take care when using. The target of these should be an ENDBR * instruction irrespective of kCFI.
*/
.macro JMP_NOSPEC reg:req #ifdef CONFIG_MITIGATION_RETPOLINE
__CS_PREFIX \reg
jmp __x86_indirect_thunk_\reg #else
jmp *%\reg
int3 #endif
.endm
/* * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP * monstrosity above, manually.
*/
.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req ftr2=ALT_NOT(X86_FEATURE_ALWAYS)
ALTERNATIVE_2 "jmp .Lskip_rsb_\@", \
__stringify(__FILL_RETURN_BUFFER(\reg,\nr)), \ftr, \
__stringify(nop;nop;__FILL_ONE_RETURN), \ftr2
.Lskip_rsb_\@:
.endm
/* * The CALL to srso_alias_untrain_ret() must be patched in directly at * the spot where untraining must be done, ie., srso_alias_untrain_ret() * must be the target of a CALL instruction instead of indirectly * jumping to a wrapper which then calls it. Therefore, this macro is * called outside of __UNTRAIN_RET below, for the time being, before the * kernel can support nested alternatives with arbitrary nesting.
*/
.macro CALL_UNTRAIN_RET #ifdefined(CONFIG_MITIGATION_UNRET_ENTRY) || defined(CONFIG_MITIGATION_SRSO)
ALTERNATIVE_2 "", "call entry_untrain_ret", X86_FEATURE_UNRET, \ "call srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS #endif
.endm
/* * Mitigate RETBleed for AMD/Hygon Zen uarch. Requires KERNEL CR3 because the * return thunk isn't mapped into the userspace tables (then again, AMD * typically has NO_MELTDOWN). * * While retbleed_untrain_ret() doesn't clobber anything but requires stack, * write_ibpb() will clobber AX, CX, DX. * * As such, this must be placed after every *SWITCH_TO_KERNEL_CR3 at a point * where we have a stack but before any RET instruction.
*/
.macro __UNTRAIN_RET ibpb_feature, call_depth_insns #ifdefined(CONFIG_MITIGATION_RETHUNK) || defined(CONFIG_MITIGATION_IBPB_ENTRY)
VALIDATE_UNRET_END
CALL_UNTRAIN_RET
ALTERNATIVE_2 "", \ "call write_ibpb", \ibpb_feature, \
__stringify(\call_depth_insns), X86_FEATURE_CALL_DEPTH #endif
.endm
/* * Macro to execute VERW insns that mitigate transient data sampling * attacks such as MDS or TSA. On affected systems a microcode update * overloaded VERW insns to also clear the CPU buffers. VERW clobbers * CFLAGS.ZF. * Note: Only the memory operand variant of VERW clears the CPU buffers.
*/
.macro __CLEAR_CPU_BUFFERS feature #ifdef CONFIG_X86_64
ALTERNATIVE "", "verw x86_verw_sel(%rip)", \feature #else /* * In 32bit mode, the memory operand must be a %cs reference. The data * segments may not be usable (vm86 mode), and the stack segment may not * be flat (ESPFIX32).
*/
ALTERNATIVE "", "verw %cs:x86_verw_sel", \feature #endif
.endm
/* * Emits a conditional CS prefix that is compatible with * -mindirect-branch-cs-prefix.
*/ #define __CS_PREFIX(reg) \ ".irp rs,r8,r9,r10,r11,r12,r13,r14,r15\n" \ ".ifc \\rs," reg "\n" \ ".byte 0x2e\n" \ ".endif\n" \ ".endr\n"
/* * Inline asm uses the %V modifier which is only in newer GCC * which is ensured when CONFIG_MITIGATION_RETPOLINE is defined.
*/ #define CALL_NOSPEC __CS_PREFIX("%V[thunk_target]") \ "call __x86_indirect_thunk_%V[thunk_target]\n"
/* The Intel SPEC CTRL MSR base value cache */ extern u64 x86_spec_ctrl_base;
DECLARE_PER_CPU(u64, x86_spec_ctrl_current); externvoid update_spec_ctrl_cond(u64 val); extern u64 spec_ctrl_current(void);
/* * With retpoline, we must use IBRS to restrict branch prediction * before calling into firmware. * * (Implemented as CPP macros due to header hell.)
*/ #define firmware_restrict_branch_speculation_start() \ do { \
preempt_disable(); \
alternative_msr_write(MSR_IA32_SPEC_CTRL, \
spec_ctrl_current() | SPEC_CTRL_IBRS, \
X86_FEATURE_USE_IBRS_FW); \
alternative_msr_write(MSR_IA32_PRED_CMD, PRED_CMD_IBPB, \
X86_FEATURE_USE_IBPB_FW); \
} while (0)
#define firmware_restrict_branch_speculation_end() \ do { \
alternative_msr_write(MSR_IA32_SPEC_CTRL, \
spec_ctrl_current(), \
X86_FEATURE_USE_IBRS_FW); \
preempt_enable(); \
} while (0)
/** * x86_clear_cpu_buffers - Buffer clearing support for different x86 CPU vulns * * This uses the otherwise unused and obsolete VERW instruction in * combination with microcode which triggers a CPU buffer flush when the * instruction is executed.
*/ static __always_inline void x86_clear_cpu_buffers(void)
{ staticconst u16 ds = __KERNEL_DS;
/* * Has to be the memory-operand variant because only that * guarantees the CPU buffer flush functionality according to * documentation. The register-operand variant does not. * Works with any segment selector, but a valid writable * data segment is the fastest variant. * * "cc" clobber is required because VERW modifies ZF.
*/ asmvolatile("verw %[ds]" : : [ds] "m" (ds) : "cc");
}
/** * x86_idle_clear_cpu_buffers - Buffer clearing support in idle for the MDS * and TSA vulnerabilities. * * Clear CPU buffers if the corresponding static key is enabled
*/ static __always_inline void x86_idle_clear_cpu_buffers(void)
{ if (static_branch_likely(&cpu_buf_idle_clear))
x86_clear_cpu_buffers();
}
#endif/* __ASSEMBLER__ */
#endif/* _ASM_X86_NOSPEC_BRANCH_H_ */
Messung V0.5
¤ Dauer der Verarbeitung: 0.2 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.