staticbool range_eq(struct range x, struct range y)
{ return x.a == y.a && x.b == y.b;
}
staticstruct range range_cast_to_s32(struct range x)
{
u64 a = x.a, b = x.b;
/* if upper 32 bits are constant, lower 32 bits should form a proper * s32 range to be correct
*/ if (upper32(a) == upper32(b) && (s32)a <= (s32)b) return range(S32, a, b);
/* Special case where upper bits form a small sequence of two * sequential numbers (in 32-bit unsigned space, so 0xffffffff to * 0x00000000 is also valid), while lower bits form a proper s32 range * going from negative numbers to positive numbers. * * E.g.: [0xfffffff0ffffff00; 0xfffffff100000010]. Iterating * over full 64-bit numbers range will form a proper [-16, 16] * ([0xffffff00; 0x00000010]) range in its lower 32 bits.
*/ if (upper32(a) + 1 == upper32(b) && (s32)a < 0 && (s32)b >= 0) return range(S32, a, b);
/* otherwise we can't derive much meaningful information */ return unkn[S32];
}
staticstruct range range_cast_u64(enum num_t to_t, struct range x)
{
u64 a = (u64)x.a, b = (u64)x.b;
switch (to_t) { case U64: return x; case U32: if (upper32(a) != upper32(b)) return unkn[U32]; return range(U32, a, b); case S64: if (sign64(a) != sign64(b)) return unkn[S64]; return range(S64, a, b); case S32: return range_cast_to_s32(x); default: printf("range_cast_u64!\n"); exit(1);
}
}
staticstruct range range_cast_s64(enum num_t to_t, struct range x)
{
s64 a = (s64)x.a, b = (s64)x.b;
switch (to_t) { case U64: /* equivalent to (s64)a <= (s64)b check */ if (sign64(a) != sign64(b)) return unkn[U64]; return range(U64, a, b); case U32: if (upper32(a) != upper32(b) || sign32(a) != sign32(b)) return unkn[U32]; return range(U32, a, b); case S64: return x; case S32: return range_cast_to_s32(x); default: printf("range_cast_s64!\n"); exit(1);
}
}
staticstruct range range_cast_u32(enum num_t to_t, struct range x)
{
u32 a = (u32)x.a, b = (u32)x.b;
switch (to_t) { case U64: case S64: /* u32 is always a valid zero-extended u64/s64 */ return range(to_t, a, b); case U32: return x; case S32: return range_cast_to_s32(range(U32, a, b)); default: printf("range_cast_u32!\n"); exit(1);
}
}
staticstruct range range_cast_s32(enum num_t to_t, struct range x)
{
s32 a = (s32)x.a, b = (s32)x.b;
switch (to_t) { case U64: case U32: case S64: if (sign32(a) != sign32(b)) return unkn[to_t]; return range(to_t, a, b); case S32: return x; default: printf("range_cast_s32!\n"); exit(1);
}
}
/* Reinterpret range in *from_t* domain as a range in *to_t* domain preserving * all possible information. Worst case, it will be unknown range within * *to_t* domain, if nothing more specific can be guaranteed during the * conversion
*/ staticstruct range range_cast(enum num_t from_t, enum num_t to_t, struct range from)
{ switch (from_t) { case U64: return range_cast_u64(to_t, from); case U32: return range_cast_u32(to_t, from); case S64: return range_cast_s64(to_t, from); case S32: return range_cast_s32(to_t, from); default: printf("range_cast!\n"); exit(1);
}
}
staticbool is_valid_num(enum num_t t, u64 x)
{ switch (t) { case U64: returntrue; case U32: return upper32(x) == 0; case S64: returntrue; case S32: return upper32(x) == 0; default: printf("is_valid_num!\n"); exit(1);
}
}
staticbool is_valid_range(enum num_t t, struct range x)
{ if (!is_valid_num(t, x.a) || !is_valid_num(t, x.b)) returnfalse;
switch (t) { case U64: return (u64)x.a <= (u64)x.b; case U32: return (u32)x.a <= (u32)x.b; case S64: return (s64)x.a <= (s64)x.b; case S32: return (s32)x.a <= (s32)x.b; default: printf("is_valid_range!\n"); exit(1);
}
}
staticstruct range range_improve(enum num_t t, struct range old, struct range new)
{ return range(t, max_t(t, old.a, new.a), min_t(t, old.b, new.b));
}
staticstruct range range_refine(enum num_t x_t, struct range x, enum num_t y_t, struct range y)
{ struct range y_cast;
y_cast = range_cast(y_t, x_t, y);
/* If we know that * - *x* is in the range of signed 32bit value, and * - *y_cast* range is 32-bit signed non-negative * then *x* range can be improved with *y_cast* such that *x* range * is 32-bit signed non-negative. Otherwise, if the new range for *x* * allows upper 32-bit * 0xffffffff then the eventual new range for * *x* will be out of signed 32-bit range which violates the origin * *x* range.
*/ if (x_t == S64 && y_t == S32 && y_cast.a <= S32_MAX && y_cast.b <= S32_MAX &&
(s64)x.a >= S32_MIN && (s64)x.b <= S32_MAX) return range_improve(x_t, x, y_cast);
/* the case when new range knowledge, *y*, is a 32-bit subregister * range, while previous range knowledge, *x*, is a full register * 64-bit range, needs special treatment to take into account upper 32 * bits of full register range
*/ if (t_is_32(y_t) && !t_is_32(x_t)) { struct range x_swap;
/* some combinations of upper 32 bits and sign bit can lead to * invalid ranges, in such cases it's easier to detect them * after cast/swap than try to enumerate all the conditions * under which transformation and knowledge transfer is valid
*/
x_swap = range(x_t, swap_low32(x.a, y_cast.a), swap_low32(x.b, y_cast.b)); if (!is_valid_range(x_t, x_swap)) return x; return range_improve(x_t, x, x_swap);
}
staticenum op complement_op(enum op op)
{ switch (op) { case OP_LT: return OP_GE; case OP_LE: return OP_GT; case OP_GT: return OP_LE; case OP_GE: return OP_LT; case OP_EQ: return OP_NE; case OP_NE: return OP_EQ; default: printf("complement_op!\n"); exit(1);
}
}
staticconstchar *op_str(enum op op)
{ switch (op) { case OP_LT: return"<"; case OP_LE: return"<="; case OP_GT: return">"; case OP_GE: return">="; case OP_EQ: return"=="; case OP_NE: return"!="; default: printf("op_str!\n"); exit(1);
}
}
/* Can register with range [x.a, x.b] *EVER* satisfy * OP (<, <=, >, >=, ==, !=) relation to * a register with range [y.a, y.b] * _in *num_t* domain_
*/ staticbool range_canbe_op(enum num_t t, struct range x, struct range y, enum op op)
{ #define range_canbe(T) do { \ switch (op) { \ case OP_LT: return (T)x.a < (T)y.b; \ case OP_LE: return (T)x.a <= (T)y.b; \ case OP_GT: return (T)x.b > (T)y.a; \ case OP_GE: return (T)x.b >= (T)y.a; \ case OP_EQ: return (T)max_t(t, x.a, y.a) <= (T)min_t(t, x.b, y.b); \ case OP_NE: return !((T)x.a == (T)x.b && (T)y.a == (T)y.b && (T)x.a == (T)y.a); \ default: printf("range_canbe op %d\n", op); exit(1); \
} \
} while (0)
switch (t) { case U64: { range_canbe(u64); } case U32: { range_canbe(u32); } case S64: { range_canbe(s64); } case S32: { range_canbe(s32); } default: printf("range_canbe!\n"); exit(1);
} #undef range_canbe
}
/* Does register with range [x.a, x.b] *ALWAYS* satisfy * OP (<, <=, >, >=, ==, !=) relation to * a register with range [y.a, y.b] * _in *num_t* domain_
*/ staticbool range_always_op(enum num_t t, struct range x, struct range y, enum op op)
{ /* always op <=> ! canbe complement(op) */ return !range_canbe_op(t, x, y, complement_op(op));
}
/* Does register with range [x.a, x.b] *NEVER* satisfy * OP (<, <=, >, >=, ==, !=) relation to * a register with range [y.a, y.b] * _in *num_t* domain_
*/ staticbool range_never_op(enum num_t t, struct range x, struct range y, enum op op)
{ return !range_canbe_op(t, x, y, op);
}
/* similar to verifier's is_branch_taken(): * 1 - always taken; * 0 - never taken, * -1 - unsure.
*/ staticint range_branch_taken_op(enum num_t t, struct range x, struct range y, enum op op)
{ if (range_always_op(t, x, y, op)) return 1; if (range_never_op(t, x, y, op)) return 0; return -1;
}
/* What would be the new estimates for register x and y ranges assuming truthful * OP comparison between them. I.e., (x OP y == true) => x <- newx, y <- newy. * * We assume "interesting" cases where ranges overlap. Cases where it's * obvious that (x OP y) is either always true or false should be filtered with * range_never and range_always checks.
*/ staticvoid range_cond(enum num_t t, struct range x, struct range y, enum op op, struct range *newx, struct range *newy)
{ if (!range_canbe_op(t, x, y, op)) { /* nothing to adjust, can't happen, return original values */
*newx = x;
*newy = y; return;
} switch (op) { case OP_LT:
*newx = range(t, x.a, min_t(t, x.b, y.b - 1));
*newy = range(t, max_t(t, x.a + 1, y.a), y.b); break; case OP_LE:
*newx = range(t, x.a, min_t(t, x.b, y.b));
*newy = range(t, max_t(t, x.a, y.a), y.b); break; case OP_GT:
*newx = range(t, max_t(t, x.a, y.a + 1), x.b);
*newy = range(t, y.a, min_t(t, x.b - 1, y.b)); break; case OP_GE:
*newx = range(t, max_t(t, x.a, y.a), x.b);
*newy = range(t, y.a, min_t(t, x.b, y.b)); break; case OP_EQ:
*newx = range(t, max_t(t, x.a, y.a), min_t(t, x.b, y.b));
*newy = range(t, max_t(t, x.a, y.a), min_t(t, x.b, y.b)); break; case OP_NE: /* below logic is supported by the verifier now */ if (x.a == x.b && x.a == y.a) { /* X is a constant matching left side of Y */
*newx = range(t, x.a, x.b);
*newy = range(t, y.a + 1, y.b);
} elseif (x.a == x.b && x.b == y.b) { /* X is a constant matching right side of Y */
*newx = range(t, x.a, x.b);
*newy = range(t, y.a, y.b - 1);
} elseif (y.a == y.b && x.a == y.a) { /* Y is a constant matching left side of X */
*newx = range(t, x.a + 1, x.b);
*newy = range(t, y.a, y.b);
} elseif (y.a == y.b && x.b == y.b) { /* Y is a constant matching right side of X */
*newx = range(t, x.a, x.b - 1);
*newy = range(t, y.a, y.b);
} else { /* generic case, can't derive more information */
*newx = range(t, x.a, x.b);
*newy = range(t, y.a, y.b);
}
break; default: break;
}
}
/* ======================= * REGISTER STATE HANDLING * =======================
*/ struct reg_state { struct range r[4]; /* indexed by enum num_t: U64, U32, S64, S32 */ bool valid;
};
again: /* try to derive new knowledge from just learned range x of type t */ for (d_t = first_t; d_t <= last_t; d_t++) {
old = r->r[d_t];
r->r[d_t] = range_refine(d_t, r->r[d_t], t, x); if (!range_eq(r->r[d_t], old)) {
keep_going = true; if (env.verbosity >= VERBOSE_VERY)
print_refinement(t, x, d_t, old, r->r[d_t], ctx);
}
}
/* now see if we can derive anything new from updated reg_state's ranges */ for (s_t = first_t; s_t <= last_t; s_t++) { for (d_t = first_t; d_t <= last_t; d_t++) {
old = r->r[d_t];
r->r[d_t] = range_refine(d_t, r->r[d_t], s_t, r->r[s_t]); if (!range_eq(r->r[d_t], old)) {
keep_going = true; if (env.verbosity >= VERBOSE_VERY)
print_refinement(s_t, r->r[s_t], d_t, old, r->r[d_t], ctx);
}
}
}
/* keep refining until we converge */ if (keep_going) {
keep_going = false; goto again;
}
}
staticvoid reg_state_cond(enum num_t t, struct reg_state *x, struct reg_state *y, enum op op, struct reg_state *newx, struct reg_state *newy, constchar *ctx)
{ char buf[32]; enum num_t ts[2]; struct reg_state xx = *x, yy = *y; int i, t_cnt; struct range z1, z2;
if (op == OP_EQ || op == OP_NE) { /* OP_EQ and OP_NE are sign-agnostic, so we need to process * both signed and unsigned domains at the same time
*/
ts[0] = t_unsigned(t);
ts[1] = t_signed(t);
t_cnt = 2;
} else {
ts[0] = t;
t_cnt = 1;
}
for (i = 0; i < t_cnt; i++) {
t = ts[i];
z1 = x->r[t];
z2 = y->r[t];
if (br_u >= 0 && br_s >= 0 && br_u != br_s)
ASSERT_FALSE(true, "branch taken inconsistency!\n");
/* if 64-bit ranges are indecisive, use 32-bit subranges to * eliminate always/never taken branches, if possible
*/ if (br_u == -1 && (t == U64 || t == S64)) {
br = range_branch_taken_op(U32, x->r[U32], y->r[U32], op); /* we can only reject for OP_EQ, never take branch * based on lower 32 bits
*/ if (op == OP_EQ && br == 0) return 0; /* for OP_NEQ we can be conclusive only if lower 32 bits * differ and thus inequality branch is always taken
*/ if (op == OP_NE && br == 1) return 1;
/* ===================================== * BPF PROGS GENERATION AND VERIFICATION * =====================================
*/ struct case_spec { /* whether to init full register (r1) or sub-register (w1) */ bool init_subregs; /* whether to establish initial value range on full register (r1) or * sub-register (w1)
*/ bool setup_subregs; /* whether to establish initial value range using signed or unsigned * comparisons (i.e., initialize umin/umax or smin/smax directly)
*/ bool setup_signed; /* whether to perform comparison on full registers or sub-registers */ bool compare_subregs; /* whether to perform comparison using signed or unsigned operations */ bool compare_signed;
};
/* Generate test BPF program based on provided test ranges, operation, and * specifications about register bitness and signedness.
*/ staticint load_range_cmp_prog(struct range x, struct range y, enum op op, int branch_taken, struct case_spec spec, char *log_buf, size_t log_sz, int *false_pos, int *true_pos)
{ #define emit(insn) ({ \ struct bpf_insn __insns[] = { insn }; \ int __i; \ for (__i = 0; __i < ARRAY_SIZE(__insns); __i++) \
insns[cur_pos + __i] = __insns[__i]; \
cur_pos += __i; \
}) #define JMP_TO(target) (target - cur_pos - 1) int cur_pos = 0, exit_pos, fd, op_code; struct bpf_insn insns[64];
LIBBPF_OPTS(bpf_prog_load_opts, opts,
.log_level = 2,
.log_buf = log_buf,
.log_size = log_sz,
.prog_flags = testing_prog_flags(),
);
/* ; skip exit block below * goto +2;
*/
emit(BPF_JMP_A(2));
exit_pos = cur_pos; /* ; exit block for all the preparatory conditionals * out: * r0 = 0; * exit;
*/
emit(BPF_MOV64_IMM(BPF_REG_0, 0));
emit(BPF_EXIT_INSN()); /* * ; assign r6/w6 and r7/w7 unpredictable u64/u32 value * call bpf_get_current_pid_tgid; * r6 = r0; | w6 = w0; * call bpf_get_current_pid_tgid; * r7 = r0; | w7 = w0;
*/
emit(BPF_EMIT_CALL(BPF_FUNC_get_current_pid_tgid)); if (spec.init_subregs)
emit(BPF_MOV32_REG(BPF_REG_6, BPF_REG_0)); else
emit(BPF_MOV64_REG(BPF_REG_6, BPF_REG_0));
emit(BPF_EMIT_CALL(BPF_FUNC_get_current_pid_tgid)); if (spec.init_subregs)
emit(BPF_MOV32_REG(BPF_REG_7, BPF_REG_0)); else
emit(BPF_MOV64_REG(BPF_REG_7, BPF_REG_0)); /* ; setup initial r6/w6 possible value range ([x.a, x.b]) * r1 = %[x.a] ll; | w1 = %[x.a]; * r2 = %[x.b] ll; | w2 = %[x.b]; * if r6 < r1 goto out; | if w6 < w1 goto out; * if r6 > r2 goto out; | if w6 > w2 goto out;
*/ if (spec.setup_subregs) {
emit(BPF_MOV32_IMM(BPF_REG_1, (s32)x.a));
emit(BPF_MOV32_IMM(BPF_REG_2, (s32)x.b));
emit(BPF_JMP32_REG(spec.setup_signed ? BPF_JSLT : BPF_JLT,
BPF_REG_6, BPF_REG_1, JMP_TO(exit_pos)));
emit(BPF_JMP32_REG(spec.setup_signed ? BPF_JSGT : BPF_JGT,
BPF_REG_6, BPF_REG_2, JMP_TO(exit_pos)));
} else {
emit(BPF_LD_IMM64(BPF_REG_1, x.a));
emit(BPF_LD_IMM64(BPF_REG_2, x.b));
emit(BPF_JMP_REG(spec.setup_signed ? BPF_JSLT : BPF_JLT,
BPF_REG_6, BPF_REG_1, JMP_TO(exit_pos)));
emit(BPF_JMP_REG(spec.setup_signed ? BPF_JSGT : BPF_JGT,
BPF_REG_6, BPF_REG_2, JMP_TO(exit_pos)));
} /* ; setup initial r7/w7 possible value range ([y.a, y.b]) * r1 = %[y.a] ll; | w1 = %[y.a]; * r2 = %[y.b] ll; | w2 = %[y.b]; * if r7 < r1 goto out; | if w7 < w1 goto out; * if r7 > r2 goto out; | if w7 > w2 goto out;
*/ if (spec.setup_subregs) {
emit(BPF_MOV32_IMM(BPF_REG_1, (s32)y.a));
emit(BPF_MOV32_IMM(BPF_REG_2, (s32)y.b));
emit(BPF_JMP32_REG(spec.setup_signed ? BPF_JSLT : BPF_JLT,
BPF_REG_7, BPF_REG_1, JMP_TO(exit_pos)));
emit(BPF_JMP32_REG(spec.setup_signed ? BPF_JSGT : BPF_JGT,
BPF_REG_7, BPF_REG_2, JMP_TO(exit_pos)));
} else {
emit(BPF_LD_IMM64(BPF_REG_1, y.a));
emit(BPF_LD_IMM64(BPF_REG_2, y.b));
emit(BPF_JMP_REG(spec.setup_signed ? BPF_JSLT : BPF_JLT,
BPF_REG_7, BPF_REG_1, JMP_TO(exit_pos)));
emit(BPF_JMP_REG(spec.setup_signed ? BPF_JSGT : BPF_JGT,
BPF_REG_7, BPF_REG_2, JMP_TO(exit_pos)));
} /* ; range test instruction * if r6 <op> r7 goto +3; | if w6 <op> w7 goto +3;
*/ switch (op) { case OP_LT: op_code = spec.compare_signed ? BPF_JSLT : BPF_JLT; break; case OP_LE: op_code = spec.compare_signed ? BPF_JSLE : BPF_JLE; break; case OP_GT: op_code = spec.compare_signed ? BPF_JSGT : BPF_JGT; break; case OP_GE: op_code = spec.compare_signed ? BPF_JSGE : BPF_JGE; break; case OP_EQ: op_code = BPF_JEQ; break; case OP_NE: op_code = BPF_JNE; break; default:
printf("unrecognized op %d\n", op); return -ENOTSUP;
} /* ; BEFORE conditional, r0/w0 = {r6/w6,r7/w7} is to extract verifier state reliably * ; this is used for debugging, as verifier doesn't always print * ; registers states as of condition jump instruction (e.g., when * ; precision marking happens) * r0 = r6; | w0 = w6; * r0 = r7; | w0 = w7;
*/ if (spec.compare_subregs) {
emit(BPF_MOV32_REG(BPF_REG_0, BPF_REG_6));
emit(BPF_MOV32_REG(BPF_REG_0, BPF_REG_7));
} else {
emit(BPF_MOV64_REG(BPF_REG_0, BPF_REG_6));
emit(BPF_MOV64_REG(BPF_REG_0, BPF_REG_7));
} if (spec.compare_subregs)
emit(BPF_JMP32_REG(op_code, BPF_REG_6, BPF_REG_7, 3)); else
emit(BPF_JMP_REG(op_code, BPF_REG_6, BPF_REG_7, 3)); /* ; FALSE branch, r0/w0 = {r6/w6,r7/w7} is to extract verifier state reliably * r0 = r6; | w0 = w6; * r0 = r7; | w0 = w7; * exit;
*/
*false_pos = cur_pos; if (spec.compare_subregs) {
emit(BPF_MOV32_REG(BPF_REG_0, BPF_REG_6));
emit(BPF_MOV32_REG(BPF_REG_0, BPF_REG_7));
} else {
emit(BPF_MOV64_REG(BPF_REG_0, BPF_REG_6));
emit(BPF_MOV64_REG(BPF_REG_0, BPF_REG_7));
} if (branch_taken == 1) /* false branch is never taken */
emit(BPF_EMIT_CALL(0xDEAD)); /* poison this branch */ else
emit(BPF_EXIT_INSN()); /* ; TRUE branch, r0/w0 = {r6/w6,r7/w7} is to extract verifier state reliably * r0 = r6; | w0 = w6; * r0 = r7; | w0 = w7; * exit;
*/
*true_pos = cur_pos; if (spec.compare_subregs) {
emit(BPF_MOV32_REG(BPF_REG_0, BPF_REG_6));
emit(BPF_MOV32_REG(BPF_REG_0, BPF_REG_7));
} else {
emit(BPF_MOV64_REG(BPF_REG_0, BPF_REG_6));
emit(BPF_MOV64_REG(BPF_REG_0, BPF_REG_7));
} if (branch_taken == 0) /* true branch is never taken */
emit(BPF_EMIT_CALL(0xDEAD)); /* poison this branch */
emit(BPF_EXIT_INSN()); /* last instruction has to be exit */
/* Parse register state from verifier log. * `s` should point to the start of "Rx = ..." substring in the verifier log.
*/ staticint parse_reg_state(constchar *s, struct reg_state *reg)
{ /* There are two generic forms for SCALAR register: * - known constant: R6_rwD=P%lld * - range: R6_rwD=scalar(id=1,...), where "..." is a comma-separated * list of optional range specifiers: * - umin=%llu, if missing, assumed 0; * - umax=%llu, if missing, assumed U64_MAX; * - smin=%lld, if missing, assumed S64_MIN; * - smax=%lld, if missing, assumed S64_MAX; * - umin32=%d, if missing, assumed 0; * - umax32=%d, if missing, assumed U32_MAX; * - smin32=%d, if missing, assumed S32_MIN; * - smax32=%d, if missing, assumed S32_MAX; * - var_off=(%#llx; %#llx), tnum part, we don't care about it. * * If some of the values are equal, they will be grouped (but min/max * are not mixed together, and similarly negative values are not * grouped with non-negative ones). E.g.: * * R6_w=Pscalar(smin=smin32=0, smax=umax=umax32=1000) * * _rwD part is optional (and any of the letters can be missing). * P (precision mark) is optional as well. * * Anything inside scalar() is optional, including id, of course.
*/ struct { constchar *pfx;
u64 *dst, def; bool is_32, is_set;
} *f, fields[8] = {
{"smin=", ®->r[S64].a, S64_MIN},
{"smax=", ®->r[S64].b, S64_MAX},
{"umin=", ®->r[U64].a, 0},
{"umax=", ®->r[U64].b, U64_MAX},
{"smin32=", ®->r[S32].a, (u32)S32_MIN, true},
{"smax32=", ®->r[S32].b, (u32)S32_MAX, true},
{"umin32=", ®->r[U32].a, 0, true},
{"umax32=", ®->r[U32].b, U32_MAX, true},
}; constchar *p; int i;
p = strchr(s, '='); if (!p) return -EINVAL;
p++; if (*p == 'P')
p++;
if (!str_has_pfx(p, "scalar(")) { longlong sval; enum num_t t;
if (p[0] == '0' && p[1] == 'x') { if (sscanf(p, "%llx", &sval) != 1) return -EINVAL;
} else { if (sscanf(p, "%lld", &sval) != 1) return -EINVAL;
}
q = strstr(p, buf); if (!q) {
*specs[i].state = (struct reg_state){.valid = false}; continue;
}
p = strstr(q, specs[i].reg_upper); if (!p) return -EINVAL;
err = parse_reg_state(p, specs[i].state); if (err) return -EINVAL;
} return 0;
}
/* Validate ranges match, and print details if they don't */ staticbool assert_range_eq(enum num_t t, struct range x, struct range y, constchar *ctx1, constchar *ctx2)
{
DEFINE_STRBUF(sb, 512);
/* Validate that register states match, and print details if they don't */ staticbool assert_reg_state_eq(struct reg_state *r, struct reg_state *e, constchar *ctx)
{ bool ok = true; enum num_t t;
/* filter out irrelevant precision backtracking logs */ if (str_has_pfx(buf, "mark_precise: ")) goto skip_line;
printf("%.*s\n", (int)(p - buf), buf);
skip_line:
buf = *p == '\0' ? p : p + 1;
}
}
/* Simulate provided test case purely with our own range-based logic. * This is done to set up expectations for verifier's branch_taken logic and * verifier's register states in the verifier log.
*/ staticvoid sim_case(enum num_t init_t, enum num_t cond_t, struct range x, struct range y, enum op op, struct reg_state *fr1, struct reg_state *fr2, struct reg_state *tr1, struct reg_state *tr2, int *branch_taken)
{ const u64 A = x.a; const u64 B = x.b; const u64 C = y.a; const u64 D = y.b; struct reg_state rc; enum op rev_op = complement_op(op); enum num_t t;
fr1->valid = fr2->valid = true;
tr1->valid = tr2->valid = true; for (t = first_t; t <= last_t; t++) { /* if we are initializing using 32-bit subregisters, * full registers get upper 32 bits zeroed automatically
*/ struct range z = t_is_32(init_t) ? unkn_subreg(t) : unkn[t];
/* Given setup ranges and number types, go over all supported operations, * generating individual subtest for each allowed combination
*/ staticint verify_case_opt(struct ctx *ctx, enum num_t init_t, enum num_t cond_t, struct range x, struct range y, bool is_subtest)
{
DEFINE_STRBUF(sb, 256); int err; struct subtest_case sub = {
.init_t = init_t,
.cond_t = cond_t,
.x = x,
.y = y,
};
sb->pos = 0; /* reset position in strbuf */
subtest_case_str(sb, &sub, false/* ignore op */); if (is_subtest && !test__start_subtest(sb->buf)) return 0;
for (sub.op = first_op; sub.op <= last_op; sub.op++) {
sb->pos = 0; /* reset position in strbuf */
subtest_case_str(sb, &sub, true/* print op */);
if (env.verbosity >= VERBOSE_NORMAL) /* this speeds up debugging */
printf("TEST CASE: %s\n", sb->buf);
/* Generate valid unique constants from seeds, both signed and unsigned */ staticvoid gen_vals(struct ctx *ctx)
{ int i, j, cnt = 0;
for (i = 0; i < ARRAY_SIZE(upper_seeds); i++) { for (j = 0; j < ARRAY_SIZE(lower_seeds); j++) {
ctx->uvals[cnt++] = (((u64)upper_seeds[i]) << 32) | lower_seeds[j];
}
}
/* sort and compact uvals (i.e., it's `sort | uniq`) */
qsort(ctx->uvals, cnt, sizeof(*ctx->uvals), u64_cmp); for (i = 1, j = 0; i < cnt; i++) { if (ctx->uvals[j] == ctx->uvals[i]) continue;
j++;
ctx->uvals[j] = ctx->uvals[i];
}
ctx->val_cnt = j + 1;
/* we have exactly the same number of s64 values, they are just in * a different order than u64s, so just sort them differently
*/ for (i = 0; i < ctx->val_cnt; i++)
ctx->svals[i] = ctx->uvals[i];
qsort(ctx->svals, ctx->val_cnt, sizeof(*ctx->svals), s64_cmp);
if (env.verbosity >= VERBOSE_SUPER) {
DEFINE_STRBUF(sb1, 256);
DEFINE_STRBUF(sb2, 256);
for (i = 0; i < ctx->val_cnt; i++) {
sb1->pos = sb2->pos = 0;
snprintf_num(U64, sb1, ctx->uvals[i]);
snprintf_num(S64, sb2, ctx->svals[i]);
printf("SEED #%d: u64=%-20s s64=%-20s\n", i, sb1->buf, sb2->buf);
}
}
/* 32-bit values are generated separately */
cnt = 0; for (i = 0; i < ARRAY_SIZE(lower_seeds); i++) {
ctx->usubvals[cnt++] = lower_seeds[i];
}
/* sort and compact usubvals (i.e., it's `sort | uniq`) */
qsort(ctx->usubvals, cnt, sizeof(*ctx->usubvals), u32_cmp); for (i = 1, j = 0; i < cnt; i++) { if (ctx->usubvals[j] == ctx->usubvals[i]) continue;
j++;
ctx->usubvals[j] = ctx->usubvals[i];
}
ctx->subval_cnt = j + 1;
for (i = 0; i < ctx->subval_cnt; i++)
ctx->ssubvals[i] = ctx->usubvals[i];
qsort(ctx->ssubvals, ctx->subval_cnt, sizeof(*ctx->ssubvals), s32_cmp);
if (env.verbosity >= VERBOSE_SUPER) {
DEFINE_STRBUF(sb1, 256);
DEFINE_STRBUF(sb2, 256);
for (i = 0; i < ctx->subval_cnt; i++) {
sb1->pos = sb2->pos = 0;
snprintf_num(U32, sb1, ctx->usubvals[i]);
snprintf_num(S32, sb2, ctx->ssubvals[i]);
printf("SUBSEED #%d: u32=%-10s s32=%-10s\n", i, sb1->buf, sb2->buf);
}
}
}
/* Generate valid ranges from upper/lower seeds */ staticint gen_ranges(struct ctx *ctx)
{ int i, j, cnt = 0;
for (i = 0; i < ctx->val_cnt; i++) { for (j = i; j < ctx->val_cnt; j++) { if (env.verbosity >= VERBOSE_SUPER) {
DEFINE_STRBUF(sb1, 256);
DEFINE_STRBUF(sb2, 256);
/* Go over generated constants and ranges and validate various supported * combinations of them
*/ staticvoid validate_gen_range_vs_const_64(enum num_t init_t, enum num_t cond_t)
{ struct ctx ctx; struct range rconst; conststruct range *ranges; const u64 *vals; int i, j;
for (i = 0; i < rcnt; i++) { for (j = i; j < rcnt; j++) { /* (<range> x <range>) */ if (verify_case(&ctx, init_t, cond_t, ranges[i], ranges[j])) goto cleanup; if (verify_case(&ctx, init_t, cond_t, ranges[j], ranges[i])) goto cleanup;
}
}
cleanup:
cleanup_ctx(&ctx);
}
/* Go over thousands of test cases generated from initial seed values. * Given this take a long time, guard this begind SLOW_TESTS=1 envvar. If * envvar is not set, this test is skipped during test_progs testing. * * We split this up into smaller subsets based on initialization and * conditional numeric domains to get an easy parallelization with test_progs' * -j argument.
*/
static u64 rand_u64()
{ /* RAND_MAX is guaranteed to be at least 1<<15, but in practice it * seems to be 1<<31, so we need to call it thrice to get full u64; * we'll use roughly equal split: 22 + 21 + 21 bits
*/ return ((u64)random() << 42) |
(((u64)random() & RAND_21BIT_MASK) << 21) |
(random() & RAND_21BIT_MASK);
}
/* Go over crafted hard-coded cases. This is fast, so we do it as part of * normal test_progs run.
*/ void test_reg_bounds_crafted(void)
{ struct ctx ctx; int i;
memset(&ctx, 0, sizeof(ctx));
for (i = 0; i < ARRAY_SIZE(crafted_cases); i++) { struct subtest_case *c = &crafted_cases[i];
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.