SEC("xdp")
__description("32-bit spill of 64-bit reg should clear ID")
__failure __msg("math between ctx pointer and 4294967295 is not allowed")
__naked void spill_32bit_of_64bit_fail(void)
{ asmvolatile (" \
r6 = r1; \ /* Roll one bit to force the verifier to track both branches. */\
call %[bpf_get_prandom_u32]; \
r0 &= 0x8; \ /* Put a large number into r1. */ \
r1 = 0xffffffff; \
r1 <<= 32; \
r1 += r0; \ /* Assign an ID to r1. */ \
r2 = r1; \ /* 32-bit spill r1 to stack - should clear the ID! */\
*(u32*)(r10 - 8) = r1; \ /* 32-bit fill r2 from stack. */ \
r2 = *(u32*)(r10 - 8); \ /* Compare r2 with another register to trigger sync_linked_regs.\ * Having one random bit is important here, otherwise the verifier cuts\ * the corners. If the ID was mistakenly preserved on spill, this would\ * cause the verifier to think that r1 is also equal to zero in one of\ * the branches, and equal to eight on the other branch.\
*/
r3 = 0; \ if r2 != r3 goto l0_%=; \
l0_%=: r1 >>= 32; \ /* At this point, if the verifier thinks that r1 is 0, an out-of-bounds\ * read will happen, because it actually contains 0xffffffff.\
*/
r6 += r1; \
r0 = *(u32*)(r6 + 0); \ exit; \ " :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("xdp")
__description("16-bit spill of 32-bit reg should clear ID")
__failure __msg("dereference of modified ctx ptr R6 off=65535 disallowed")
__naked void spill_16bit_of_32bit_fail(void)
{ asmvolatile (" \
r6 = r1; \ /* Roll one bit to force the verifier to track both branches. */\
call %[bpf_get_prandom_u32]; \
r0 &= 0x8; \ /* Put a large number into r1. */ \
w1 = 0xffff0000; \
r1 += r0; \ /* Assign an ID to r1. */ \
r2 = r1; \ /* 16-bit spill r1 to stack - should clear the ID! */\
*(u16*)(r10 - 8) = r1; \ /* 16-bit fill r2 from stack. */ \
r2 = *(u16*)(r10 - 8); \ /* Compare r2 with another register to trigger sync_linked_regs.\ * Having one random bit is important here, otherwise the verifier cuts\ * the corners. If the ID was mistakenly preserved on spill, this would\ * cause the verifier to think that r1 is also equal to zero in one of\ * the branches, and equal to eight on the other branch.\
*/
r3 = 0; \ if r2 != r3 goto l0_%=; \
l0_%=: r1 >>= 16; \ /* At this point, if the verifier thinks that r1 is 0, an out-of-bounds\ * read will happen, because it actually contains 0xffff.\
*/
r6 += r1; \
r0 = *(u32*)(r6 + 0); \ exit; \ " :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("raw_tp")
__log_level(2)
__success /* fp-8 is spilled IMPRECISE value zero (represented by a zero value fake reg) */
__msg("2: (7a) *(u64 *)(r10 -8) = 0 ; R10=fp0 fp-8_w=0") /* but fp-16 is spilled IMPRECISE zero const reg */
__msg("4: (7b) *(u64 *)(r10 -16) = r0 ; R0_w=0 R10=fp0 fp-16_w=0") /* validate that assigning R2 from STACK_SPILL with zero value doesn't mark register * precise immediately; if necessary, it will be marked precise later
*/
__msg("6: (71) r2 = *(u8 *)(r10 -1) ; R2_w=0 R10=fp0 fp-8_w=0") /* similarly, when R2 is assigned from spilled register, it is initially * imprecise, but will be marked precise later once it is used in precise context
*/
__msg("10: (71) r2 = *(u8 *)(r10 -9) ; R2_w=0 R10=fp0 fp-16_w=0")
__msg("11: (0f) r1 += r2")
__msg("mark_precise: frame0: last_idx 11 first_idx 0 subseq_idx -1")
__msg("mark_precise: frame0: regs=r2 stack= before 10: (71) r2 = *(u8 *)(r10 -9)")
__msg("mark_precise: frame0: regs= stack=-16 before 9: (bf) r1 = r6")
__msg("mark_precise: frame0: regs= stack=-16 before 8: (73) *(u8 *)(r1 +0) = r2")
__msg("mark_precise: frame0: regs= stack=-16 before 7: (0f) r1 += r2")
__msg("mark_precise: frame0: regs= stack=-16 before 6: (71) r2 = *(u8 *)(r10 -1)")
__msg("mark_precise: frame0: regs= stack=-16 before 5: (bf) r1 = r6")
__msg("mark_precise: frame0: regs= stack=-16 before 4: (7b) *(u64 *)(r10 -16) = r0")
__msg("mark_precise: frame0: regs=r0 stack= before 3: (b7) r0 = 0")
__naked void partial_stack_load_preserves_zeros(void)
{ asmvolatile ( /* fp-8 is value zero (represented by a zero value fake reg) */ ".8byte %[fp8_st_zero];"/* LLVM-18+: *(u64 *)(r10 -8) = 0; */
/* fp-16 is const zero register */ "r0 = 0;" "*(u64 *)(r10 -16) = r0;"
/* load single U8 from non-aligned spilled value zero slot */ "r1 = %[single_byte_buf];" "r2 = *(u8 *)(r10 -1);" "r1 += r2;" "*(u8 *)(r1 + 0) = r2;"/* this should be fine */
/* load single U8 from non-aligned ZERO REG slot */ "r1 = %[single_byte_buf];" "r2 = *(u8 *)(r10 -9);" "r1 += r2;" "*(u8 *)(r1 + 0) = r2;"/* this should be fine */
/* load single U16 from non-aligned spilled value zero slot */ "r1 = %[single_byte_buf];" "r2 = *(u16 *)(r10 -2);" "r1 += r2;" "*(u8 *)(r1 + 0) = r2;"/* this should be fine */
/* load single U16 from non-aligned ZERO REG slot */ "r1 = %[single_byte_buf];" "r2 = *(u16 *)(r10 -10);" "r1 += r2;" "*(u8 *)(r1 + 0) = r2;"/* this should be fine */
/* load single U32 from non-aligned spilled value zero slot */ "r1 = %[single_byte_buf];" "r2 = *(u32 *)(r10 -4);" "r1 += r2;" "*(u8 *)(r1 + 0) = r2;"/* this should be fine */
/* load single U32 from non-aligned ZERO REG slot */ "r1 = %[single_byte_buf];" "r2 = *(u32 *)(r10 -12);" "r1 += r2;" "*(u8 *)(r1 + 0) = r2;"/* this should be fine */
/* for completeness, load U64 from STACK_ZERO slot */ "r1 = %[single_byte_buf];" "r2 = *(u64 *)(r10 -8);" "r1 += r2;" "*(u8 *)(r1 + 0) = r2;"/* this should be fine */
/* for completeness, load U64 from ZERO REG slot */ "r1 = %[single_byte_buf];" "r2 = *(u64 *)(r10 -16);" "r1 += r2;" "*(u8 *)(r1 + 0) = r2;"/* this should be fine */
/* load single U8 from non-aligned stack zero slot */ "r1 = %[single_byte_buf];" "r2 = *(u8 *)(r10 -1);" "r1 += r2;" "*(u8 *)(r1 + 0) = r2;"/* this should be fine */
/* load single U16 from non-aligned stack zero slot */ "r1 = %[single_byte_buf];" "r2 = *(u16 *)(r10 -2);" "r1 += r2;" "*(u8 *)(r1 + 0) = r2;"/* this should be fine */
/* load single U32 from non-aligned stack zero slot */ "r1 = %[single_byte_buf];" "r2 = *(u32 *)(r10 -4);" "r1 += r2;" "*(u8 *)(r1 + 0) = r2;"/* this should be fine */
SEC("xdp")
__description("32-bit spilled reg range should be tracked")
__success __retval(0)
__naked void spill_32bit_range_track(void)
{ asmvolatile(" \
call %[bpf_ktime_get_ns]; \ /* Make r0 bounded. */ \
r0 &= 65535; \ /* Assign an ID to r0. */ \
r1 = r0; \ /* 32-bit spill r0 to stack. */ \
*(u32*)(r10 - 8) = r0; \ /* Boundary check on r0. */ \ if r0 < 1 goto l0_%=; \ /* 32-bit fill r1 from stack. */ \
r1 = *(u32*)(r10 - 8); \ /* r1 == r0 => r1 >= 1 always. */ \ if r1 >= 1 goto l0_%=; \ /* Dead branch: the verifier should prune it. \ * Do an invalid memory access if the verifier \ * follows it. \
*/
r0 = *(u64*)(r9 + 0); \
l0_%=: r0 = 0; \ exit; \ " :
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
SEC("xdp")
__description("64-bit spill of 64-bit reg should assign ID")
__success __retval(0)
__naked void spill_64bit_of_64bit_ok(void)
{ asmvolatile (" \ /* Roll one bit to make the register inexact. */\
call %[bpf_get_prandom_u32]; \
r0 &= 0x80000000; \
r0 <<= 32; \ /* 64-bit spill r0 to stack - should assign an ID. */\
*(u64*)(r10 - 8) = r0; \ /* 64-bit fill r1 from stack - should preserve the ID. */\
r1 = *(u64*)(r10 - 8); \ /* Compare r1 with another register to trigger sync_linked_regs.\ * Having one random bit is important here, otherwise the verifier cuts\ * the corners. \
*/
r2 = 0; \ if r1 != r2 goto l0_%=; \ /* The result of this comparison is predefined. */\ if r0 == r2 goto l0_%=; \ /* Dead branch: the verifier should prune it. Do an invalid memory\ * access if the verifier follows it. \
*/
r0 = *(u64*)(r9 + 0); \ exit; \
l0_%=: r0 = 0; \ exit; \ " :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("xdp")
__description("32-bit spill of 32-bit reg should assign ID")
__success __retval(0)
__naked void spill_32bit_of_32bit_ok(void)
{ asmvolatile (" \ /* Roll one bit to make the register inexact. */\
call %[bpf_get_prandom_u32]; \
w0 &= 0x80000000; \ /* 32-bit spill r0 to stack - should assign an ID. */\
*(u32*)(r10 - 8) = r0; \ /* 32-bit fill r1 from stack - should preserve the ID. */\
r1 = *(u32*)(r10 - 8); \ /* Compare r1 with another register to trigger sync_linked_regs.\ * Having one random bit is important here, otherwise the verifier cuts\ * the corners. \
*/
r2 = 0; \ if r1 != r2 goto l0_%=; \ /* The result of this comparison is predefined. */\ if r0 == r2 goto l0_%=; \ /* Dead branch: the verifier should prune it. Do an invalid memory\ * access if the verifier follows it. \
*/
r0 = *(u64*)(r9 + 0); \ exit; \
l0_%=: r0 = 0; \ exit; \ " :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("xdp")
__description("16-bit spill of 16-bit reg should assign ID")
__success __retval(0)
__naked void spill_16bit_of_16bit_ok(void)
{ asmvolatile (" \ /* Roll one bit to make the register inexact. */\
call %[bpf_get_prandom_u32]; \
r0 &= 0x8000; \ /* 16-bit spill r0 to stack - should assign an ID. */\
*(u16*)(r10 - 8) = r0; \ /* 16-bit fill r1 from stack - should preserve the ID. */\
r1 = *(u16*)(r10 - 8); \ /* Compare r1 with another register to trigger sync_linked_regs.\ * Having one random bit is important here, otherwise the verifier cuts\ * the corners. \
*/
r2 = 0; \ if r1 != r2 goto l0_%=; \ /* The result of this comparison is predefined. */\ if r0 == r2 goto l0_%=; \ /* Dead branch: the verifier should prune it. Do an invalid memory\ * access if the verifier follows it. \
*/
r0 = *(u64*)(r9 + 0); \ exit; \
l0_%=: r0 = 0; \ exit; \ " :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("xdp")
__description("8-bit spill of 8-bit reg should assign ID")
__success __retval(0)
__naked void spill_8bit_of_8bit_ok(void)
{ asmvolatile (" \ /* Roll one bit to make the register inexact. */\
call %[bpf_get_prandom_u32]; \
r0 &= 0x80; \ /* 8-bit spill r0 to stack - should assign an ID. */\
*(u8*)(r10 - 8) = r0; \ /* 8-bit fill r1 from stack - should preserve the ID. */\
r1 = *(u8*)(r10 - 8); \ /* Compare r1 with another register to trigger sync_linked_regs.\ * Having one random bit is important here, otherwise the verifier cuts\ * the corners. \
*/
r2 = 0; \ if r1 != r2 goto l0_%=; \ /* The result of this comparison is predefined. */\ if r0 == r2 goto l0_%=; \ /* Dead branch: the verifier should prune it. Do an invalid memory\ * access if the verifier follows it. \
*/
r0 = *(u64*)(r9 + 0); \ exit; \
l0_%=: r0 = 0; \ exit; \ " :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("xdp")
__description("spill unbounded reg, then range check src")
__success __retval(0)
__naked void spill_unbounded(void)
{ asmvolatile (" \ /* Produce an unbounded scalar. */ \
call %[bpf_get_prandom_u32]; \ /* Spill r0 to stack. */ \
*(u64*)(r10 - 8) = r0; \ /* Boundary check on r0. */ \ if r0 > 16 goto l0_%=; \ /* Fill r0 from stack. */ \
r0 = *(u64*)(r10 - 8); \ /* Boundary check on r0 with predetermined result. */\ if r0 <= 16 goto l0_%=; \ /* Dead branch: the verifier should prune it. Do an invalid memory\ * access if the verifier follows it. \
*/
r0 = *(u64*)(r9 + 0); \
l0_%=: r0 = 0; \ exit; \ " :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("xdp")
__description("32-bit fill after 64-bit spill")
__success __retval(0)
__naked void fill_32bit_after_spill_64bit(void)
{ asmvolatile(" \ /* Randomize the upper 32 bits. */ \
call %[bpf_get_prandom_u32]; \
r0 <<= 32; \ /* 64-bit spill r0 to stack. */ \
*(u64*)(r10 - 8) = r0; \ /* 32-bit fill r0 from stack. */ \ " #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ "r0 = *(u32*)(r10 - 8);" #else "r0 = *(u32*)(r10 - 4);" #endif " \ /* Boundary check on r0 with predetermined result. */\ if r0 == 0 goto l0_%=; \ /* Dead branch: the verifier should prune it. Do an invalid memory\ * access if the verifier follows it. \
*/
r0 = *(u64*)(r9 + 0); \
l0_%=: exit; \ " :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("xdp")
__description("32-bit fill after 64-bit spill of 32-bit value should preserve ID")
__success __retval(0)
__naked void fill_32bit_after_spill_64bit_preserve_id(void)
{ asmvolatile (" \ /* Randomize the lower 32 bits. */ \
call %[bpf_get_prandom_u32]; \
w0 &= 0xffffffff; \ /* 64-bit spill r0 to stack - should assign an ID. */\
*(u64*)(r10 - 8) = r0; \ /* 32-bit fill r1 from stack - should preserve the ID. */\ " #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ "r1 = *(u32*)(r10 - 8);" #else "r1 = *(u32*)(r10 - 4);" #endif " \ /* Compare r1 with another register to trigger sync_linked_regs. */\
r2 = 0; \ if r1 != r2 goto l0_%=; \ /* The result of this comparison is predefined. */\ if r0 == r2 goto l0_%=; \ /* Dead branch: the verifier should prune it. Do an invalid memory\ * access if the verifier follows it. \
*/
r0 = *(u64*)(r9 + 0); \ exit; \
l0_%=: r0 = 0; \ exit; \ " :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("xdp")
__description("32-bit fill after 64-bit spill should clear ID")
__failure __msg("math between ctx pointer and 4294967295 is not allowed")
__naked void fill_32bit_after_spill_64bit_clear_id(void)
{ asmvolatile (" \
r6 = r1; \ /* Roll one bit to force the verifier to track both branches. */\
call %[bpf_get_prandom_u32]; \
r0 &= 0x8; \ /* Put a large number into r1. */ \
r1 = 0xffffffff; \
r1 <<= 32; \
r1 += r0; \ /* 64-bit spill r1 to stack - should assign an ID. */\
*(u64*)(r10 - 8) = r1; \ /* 32-bit fill r2 from stack - should clear the ID. */\ " #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ "r2 = *(u32*)(r10 - 8);" #else "r2 = *(u32*)(r10 - 4);" #endif " \ /* Compare r2 with another register to trigger sync_linked_regs.\ * Having one random bit is important here, otherwise the verifier cuts\ * the corners. If the ID was mistakenly preserved on fill, this would\ * cause the verifier to think that r1 is also equal to zero in one of\ * the branches, and equal to eight on the other branch.\
*/
r3 = 0; \ if r2 != r3 goto l0_%=; \
l0_%=: r1 >>= 32; \ /* The verifier shouldn't propagate r2's range to r1, so it should\ * still remember r1 = 0xffffffff and reject the below.\
*/
r6 += r1; \
r0 = *(u32*)(r6 + 0); \ exit; \ " :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
/* stacksafe(): check if stack spill of an imprecise scalar in old state * is considered equivalent to STACK_{MISC,INVALID} in cur state.
*/
SEC("socket")
__success __log_level(2)
__msg("8: (79) r1 = *(u64 *)(r10 -8)")
__msg("8: safe")
__msg("processed 11 insns") /* STACK_INVALID should prevent verifier in unpriv mode from * considering states equivalent and force an error on second * verification path (entry - label 1 - label 2).
*/
__failure_unpriv
__msg_unpriv("8: (79) r1 = *(u64 *)(r10 -8)")
__msg_unpriv("9: (95) exit")
__msg_unpriv("8: (79) r1 = *(u64 *)(r10 -8)")
__msg_unpriv("invalid read from stack off -8+2 size 8")
__flag(BPF_F_TEST_STATE_FREQ)
__naked void old_imprecise_scalar_vs_cur_stack_misc(void)
{ asmvolatile( /* get a random value for branching */ "call %[bpf_ktime_get_ns];" "if r0 == 0 goto 1f;" /* conjure scalar at fp-8 */ "r0 = 42;" "*(u64*)(r10 - 8) = r0;" "goto 2f;" "1:" /* conjure STACK_{MISC,INVALID} at fp-8 */ "call %[bpf_ktime_get_ns];" "*(u16*)(r10 - 8) = r0;" "*(u16*)(r10 - 4) = r0;" "2:" /* read fp-8, should be considered safe on second visit */ "r1 = *(u64*)(r10 - 8);" "exit;"
:
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
/* stacksafe(): check that stack spill of a precise scalar in old state * is not considered equivalent to STACK_MISC in cur state.
*/
SEC("socket")
__success __log_level(2) /* verifier should visit 'if r1 == 0x2a ...' two times: * - once for path entry - label 2; * - once for path entry - label 1 - label 2.
*/
__msg("if r1 == 0x2a goto pc+0")
__msg("if r1 == 0x2a goto pc+0")
__msg("processed 15 insns")
__flag(BPF_F_TEST_STATE_FREQ)
__naked void old_precise_scalar_vs_cur_stack_misc(void)
{ asmvolatile( /* get a random value for branching */ "call %[bpf_ktime_get_ns];" "if r0 == 0 goto 1f;" /* conjure scalar at fp-8 */ "r0 = 42;" "*(u64*)(r10 - 8) = r0;" "goto 2f;" "1:" /* conjure STACK_MISC at fp-8 */ "call %[bpf_ktime_get_ns];" "*(u64*)(r10 - 8) = r0;" "*(u32*)(r10 - 4) = r0;" "2:" /* read fp-8, should not be considered safe on second visit */ "r1 = *(u64*)(r10 - 8);" /* use r1 in precise context */ "if r1 == 42 goto +0;" "exit;"
:
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
/* stacksafe(): check if STACK_MISC in old state is considered * equivalent to stack spill of a scalar in cur state.
*/
SEC("socket")
__success __log_level(2)
__msg("8: (79) r0 = *(u64 *)(r10 -8)")
__msg("8: safe")
__msg("processed 11 insns")
__flag(BPF_F_TEST_STATE_FREQ)
__naked void old_stack_misc_vs_cur_scalar(void)
{ asmvolatile( /* get a random value for branching */ "call %[bpf_ktime_get_ns];" "if r0 == 0 goto 1f;" /* conjure STACK_{MISC,INVALID} at fp-8 */ "call %[bpf_ktime_get_ns];" "*(u16*)(r10 - 8) = r0;" "*(u16*)(r10 - 4) = r0;" "goto 2f;" "1:" /* conjure scalar at fp-8 */ "r0 = 42;" "*(u64*)(r10 - 8) = r0;" "2:" /* read fp-8, should be considered safe on second visit */ "r0 = *(u64*)(r10 - 8);" "exit;"
:
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
/* stacksafe(): check that STACK_MISC in old state is not considered * equivalent to stack spill of a non-scalar in cur state.
*/
SEC("socket")
__success __log_level(2) /* verifier should process exit instructions twice: * - once for path entry - label 2; * - once for path entry - label 1 - label 2.
*/
__msg("8: (79) r1 = *(u64 *)(r10 -8)")
__msg("9: (95) exit")
__msg("from 2 to 7")
__msg("8: safe")
__msg("processed 11 insns")
__flag(BPF_F_TEST_STATE_FREQ)
__naked void old_stack_misc_vs_cur_ctx_ptr(void)
{ asmvolatile( /* remember context pointer in r9 */ "r9 = r1;" /* get a random value for branching */ "call %[bpf_ktime_get_ns];" "if r0 == 0 goto 1f;" /* conjure STACK_MISC at fp-8 */ "call %[bpf_ktime_get_ns];" "*(u64*)(r10 - 8) = r0;" "*(u32*)(r10 - 4) = r0;" "goto 2f;" "1:" /* conjure context pointer in fp-8 */ "*(u64*)(r10 - 8) = r9;" "2:" /* read fp-8, should not be considered safe on second visit */ "r1 = *(u64*)(r10 - 8);" "exit;"
:
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.