/* validate BPF_LOG_FIXED works as verifier log used to work, that is: * we get -ENOSPC and beginning of the full verifier log. This only * works for log_level 2 and log_level 1 + failed program. For log * level 2 we don't reset log at all. For log_level 1 + failed program * we don't get to verification stats output. With log level 1 * for successful program final result will be just verifier stats. * But if provided too short log buf, kernel will NULL-out log->ubuf * and will stop emitting further log. This means we'll never see * predictable verifier stats. * Long story short, we do the following -ENOSPC test only for * predictable combinations.
*/ if (log_level >= 2 || expect_load_error) {
opts.log_buf = logs.buf;
opts.log_level = log_level | 8; /* fixed-length log */
opts.log_size = 25;
prog_fd = bpf_prog_load(BPF_PROG_TYPE_RAW_TRACEPOINT, "log_fixed25", "GPL", insns, insn_cnt, &opts); if (!ASSERT_EQ(prog_fd, -ENOSPC, "unexpected_log_fixed_prog_load_result")) { if (prog_fd >= 0)
close(prog_fd); goto cleanup;
} if (!ASSERT_EQ(strlen(logs.buf), 24, "log_fixed_25")) goto cleanup; if (!ASSERT_STRNEQ(logs.buf, logs.reference, 24, "log_fixed_contents_25")) goto cleanup;
}
/* validate rolling verifier log logic: try all variations of log buf * length to force various truncation scenarios
*/
opts.log_buf = logs.buf;
/* rotating mode, then fixed mode */ for (mode = 1; mode >= 0; mode--) { /* prefill logs.buf with 'A's to detect any write beyond allowed length */
memset(logs.filler, 'A', sizeof(logs.filler));
logs.filler[sizeof(logs.filler) - 1] = '\0';
memset(logs.buf, 'A', sizeof(logs.buf));
logs.buf[sizeof(logs.buf) - 1] = '\0';
for (i = 1; i < fixed_log_sz; i++) {
opts.log_size = i;
opts.log_level = log_level | (mode ? 0 : 8 /* BPF_LOG_FIXED */);
/* prepare simple BTF contents */
btf = btf__new_empty(); if (!ASSERT_OK_PTR(btf, "btf_new_empty")) return;
res = btf__add_int(btf, "whatever", 4, 0); if (!ASSERT_GT(res, 0, "btf_add_int_id")) goto cleanup; if (bad_btf) { /* btf__add_int() doesn't allow bad value of size, so we'll just * force-cast btf_type pointer and manually override size to invalid * 3 if we need to simulate failure
*/
t = (void *)btf__type_by_id(btf, res); if (!ASSERT_OK_PTR(t, "int_btf_type")) goto cleanup;
t->size = 3;
}
btf_data = btf__raw_data(btf, &btf_data_sz); if (!ASSERT_OK_PTR(btf_data, "btf_data")) goto cleanup;
/* validate BPF_LOG_FIXED truncation works as verifier log used to work */
opts.log_buf = logs.buf;
opts.log_level = 1 | 8; /* fixed-length log */
opts.log_size = 25;
res = load_btf(&opts, true);
ASSERT_EQ(res, -ENOSPC, "half_log_fd");
ASSERT_EQ(strlen(logs.buf), 24, "log_fixed_25");
ASSERT_STRNEQ(logs.buf, logs.reference, 24, op_name);
/* validate rolling verifier log logic: try all variations of log buf * length to force various truncation scenarios
*/
opts.log_buf = logs.buf;
opts.log_level = 1; /* rolling log */
/* prefill logs.buf with 'A's to detect any write beyond allowed length */
memset(logs.filler, 'A', sizeof(logs.filler));
logs.filler[sizeof(logs.filler) - 1] = '\0';
memset(logs.buf, 'A', sizeof(logs.buf));
logs.buf[sizeof(logs.buf) - 1] = '\0';
for (i = 1; i < fixed_log_sz; i++) {
opts.log_size = i;
snprintf(op_name, sizeof(op_name), "log_roll_btf_load_%d", i);
res = load_btf(&opts, true); if (!ASSERT_EQ(res, -ENOSPC, op_name)) goto cleanup;
/* check that unused portions of logs.buf are not overwritten */
snprintf(op_name, sizeof(op_name), "log_roll_unused_tail_%d", i); if (!ASSERT_STREQ(logs.buf + i, logs.filler + i, op_name)) {
printf("CMP:%d\nS1:'%s'\nS2:'%s'\n",
strcmp(logs.buf + i, logs.filler + i),
logs.buf + i, logs.filler + i); goto cleanup;
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.