/* vsprintf() in __base_pr() uses nonliteral format string. It may break * compilation if user enables corresponding warning. Disable it explicitly.
*/ #pragma GCC diagnostic ignored "-Wformat-nonliteral"
#define __printf(a, b) __attribute__((format(printf, a, b)))
struct reloc_desc { enum reloc_type type; int insn_idx; union { conststruct bpf_core_relo *core_relo; /* used when type == RELO_CORE */ struct { int map_idx; int sym_off; int ext_idx;
};
};
};
/* stored as sec_def->cookie for all libbpf-supported SEC()s */ enum sec_def_flags {
SEC_NONE = 0, /* expected_attach_type is optional, if kernel doesn't support that */
SEC_EXP_ATTACH_OPT = 1, /* legacy, only used by libbpf_get_type_names() and * libbpf_attach_type_by_name(), not used by libbpf itself at all. * This used to be associated with cgroup (and few other) BPF programs * that were attachable through BPF_PROG_ATTACH command. Pretty * meaningless nowadays, though.
*/
SEC_ATTACHABLE = 2,
SEC_ATTACHABLE_OPT = SEC_ATTACHABLE | SEC_EXP_ATTACH_OPT, /* attachment target is specified through BTF ID in either kernel or * other BPF program's BTF object
*/
SEC_ATTACH_BTF = 4, /* BPF program type allows sleeping/blocking in kernel */
SEC_SLEEPABLE = 8, /* BPF program support non-linear XDP buffer */
SEC_XDP_FRAGS = 16, /* Setup proper attach type for usdt probes. */
SEC_USDT = 32,
};
struct bpf_sec_def { char *sec; enum bpf_prog_type prog_type; enum bpf_attach_type expected_attach_type; long cookie; int handler_id;
/* * bpf_prog should be a better name but it has been used in * linux/filter.h.
*/ struct bpf_program { char *name; char *sec_name;
size_t sec_idx; conststruct bpf_sec_def *sec_def; /* this program's instruction offset (in number of instructions) * within its containing ELF section
*/
size_t sec_insn_off; /* number of original instructions in ELF section belonging to this * program, not taking into account subprogram instructions possible * appended later during relocation
*/
size_t sec_insn_cnt; /* Offset (in number of instructions) of the start of instruction * belonging to this BPF program within its containing main BPF * program. For the entry-point (main) BPF program, this is always * zero. For a sub-program, this gets reset before each of main BPF * programs are processed and relocated and is used to determined * whether sub-program was already appended to the main program, and * if yes, at which instruction offset.
*/
size_t sub_insn_off;
/* instructions that belong to BPF program; insns[0] is located at * sec_insn_off instruction within its ELF section in ELF file, so * when mapping ELF file instruction index to the local instruction, * one needs to subtract sec_insn_off; and vice versa.
*/ struct bpf_insn *insns; /* actual number of instruction in this BPF program's image; for * entry-point BPF programs this includes the size of main program * itself plus all the used sub-programs, appended at the end
*/
size_t insns_cnt;
struct bpf_map { struct bpf_object *obj; char *name; /* real_name is defined for special internal maps (.rodata*, * .data*, .bss, .kconfig) and preserves their original ELF section * name. This is important to be able to find corresponding BTF * DATASEC information.
*/ char *real_name; int fd; int sec_idx;
size_t sec_offset; int map_ifindex; int inner_map_fd; struct bpf_map_def def;
__u32 numa_node;
__u32 btf_var_idx; int mod_btf_fd;
__u32 btf_key_type_id;
__u32 btf_value_type_id;
__u32 btf_vmlinux_value_type_id; enum libbpf_map_type libbpf_type; void *mmaped; struct bpf_struct_ops *st_ops; struct bpf_map *inner_map; void **init_slots; int init_slots_sz; char *pin_path; bool pinned; bool reused; bool autocreate; bool autoattach;
__u64 map_extra;
};
struct extern_desc { enum extern_type type; int sym_idx; int btf_id; int sec_btf_id; char *name; char *essent_name; bool is_set; bool is_weak; union { struct { enum kcfg_type type; int sz; int align; int data_off; bool is_signed;
} kcfg; struct { unsignedlonglong addr;
/* target btf_id of the corresponding kernel var. */ int kernel_btf_obj_fd; int kernel_btf_id;
/* local btf_id of the ksym extern's type. */
__u32 type_id; /* BTF fd index to be patched in for insn->off, this is * 0 for vmlinux BTF, index in obj->fd_array for module * BTF
*/
__s16 btf_fd_idx;
} ksym;
};
};
struct module_btf { struct btf *btf; char *name;
__u32 id; int fd; int fd_array_idx;
};
char *kconfig; struct extern_desc *externs; int nr_extern; int kconfig_map_idx;
bool has_subcalls; bool has_rodata;
struct bpf_gen *gen_loader;
/* Information when doing ELF related work. Only valid if efile.elf is not NULL */ struct elf_state efile;
unsignedchar byteorder;
struct btf *btf; struct btf_ext *btf_ext;
/* Parse and load BTF vmlinux if any of the programs in the object need * it at load time.
*/ struct btf *btf_vmlinux; /* Path to the custom BTF to be used for BPF CO-RE relocations as an * override for vmlinux BTF.
*/ char *btf_custom_path; /* vmlinux BTF override for CO-RE relocations */ struct btf *btf_vmlinux_override; /* Lazily initialized kernel module BTFs */ struct module_btf *btf_modules; bool btf_modules_loaded;
size_t btf_module_cnt;
size_t btf_module_cap;
/* optional log settings passed to BPF_BTF_LOAD and BPF_PROG_LOAD commands */ char *log_buf;
size_t log_size;
__u32 log_level;
int *fd_array;
size_t fd_array_cap;
size_t fd_array_cnt;
struct usdt_manager *usdt_man;
int arena_map_idx; void *arena_data;
size_t arena_data_sz;
struct kern_feature_cache *feat_cache; char *token_path; int token_fd;
/* libbpf's convention for SEC("?abc...") is that it's just like * SEC("abc...") but the corresponding bpf_program starts out with * autoload set to false.
*/ if (sec_name[0] == '?') {
prog->autoload = false; /* from now on forget there was ? in section name */
sec_name++;
} else {
prog->autoload = true;
}
for (i = 0; i < nr_syms; i++) {
sym = elf_sym_by_idx(obj, i);
if (sym->st_shndx != sec_idx) continue; if (ELF64_ST_TYPE(sym->st_info) != STT_FUNC) continue;
prog_sz = sym->st_size;
sec_off = sym->st_value;
name = elf_sym_str(obj, sym->st_name); if (!name) {
pr_warn("sec '%s': failed to get symbol name for offset %zu\n",
sec_name, sec_off); return -LIBBPF_ERRNO__FORMAT;
}
if (sec_off + prog_sz > sec_sz || sec_off + prog_sz < sec_off) {
pr_warn("sec '%s': program at offset %zu crosses section boundary\n",
sec_name, sec_off); return -LIBBPF_ERRNO__FORMAT;
}
if (sec_idx != obj->efile.text_shndx && ELF64_ST_BIND(sym->st_info) == STB_LOCAL) {
pr_warn("sec '%s': program '%s' is static and not supported\n", sec_name, name); return -ENOTSUP;
}
pr_debug("sec '%s': found program '%s' at insn offset %zu (%zu bytes), code size %zu insns (%zu bytes)\n",
sec_name, name, sec_off / BPF_INSN_SZ, sec_off, prog_sz / BPF_INSN_SZ, prog_sz);
progs = libbpf_reallocarray(progs, nr_progs + 1, sizeof(*progs)); if (!progs) { /* * In this case the original obj->programs * is still valid, so don't need special treat for * bpf_close_object().
*/
pr_warn("sec '%s': failed to alloc memory for new program '%s'\n",
sec_name, name); return -ENOMEM;
}
obj->programs = progs;
prog = &progs[nr_progs];
err = bpf_object__init_prog(obj, prog, name, sec_idx, sec_name,
sec_off, data + sec_off, prog_sz); if (err) return err;
if (ELF64_ST_BIND(sym->st_info) != STB_LOCAL)
prog->sym_global = true;
/* if function is a global/weak symbol, but has restricted * (STV_HIDDEN or STV_INTERNAL) visibility, mark its BTF FUNC * as static to enable more permissive BPF verification mode * with more outside context available to BPF verifier
*/ if (prog->sym_global && (ELF64_ST_VISIBILITY(sym->st_other) == STV_HIDDEN
|| ELF64_ST_VISIBILITY(sym->st_other) == STV_INTERNAL))
prog->mark_btf_static = true;
/* Look for the corresponding "map_value" type that will be used * in map_update(BPF_MAP_TYPE_STRUCT_OPS) first, figure out the btf * and the mod_btf. * For example, find "struct bpf_struct_ops_tcp_congestion_ops".
*/
kern_vtype_id = find_ksym_btf_id(obj, stname, BTF_KIND_STRUCT, &btf, mod_btf); if (kern_vtype_id < 0) {
pr_warn("struct_ops init_kern: struct %s is not found in kernel BTF\n", stname); return kern_vtype_id;
}
kern_vtype = btf__type_by_id(btf, kern_vtype_id);
kern_type_id = btf__find_by_name_kind(btf, tname, BTF_KIND_STRUCT); if (kern_type_id < 0) {
pr_warn("struct_ops init_kern: struct %s is not found in kernel BTF\n", tname); return kern_type_id;
}
kern_type = btf__type_by_id(btf, kern_type_id);
/* Find "struct tcp_congestion_ops" from * struct bpf_struct_ops_tcp_congestion_ops { * [ ... ] * struct tcp_congestion_ops data; * }
*/
kern_data_member = btf_members(kern_vtype); for (i = 0; i < btf_vlen(kern_vtype); i++, kern_data_member++) { if (kern_data_member->type == kern_type_id) break;
} if (i == btf_vlen(kern_vtype)) {
pr_warn("struct_ops init_kern: struct %s data is not found in struct %s\n",
tname, stname); return -EINVAL;
}
staticbool is_valid_st_ops_program(struct bpf_object *obj, conststruct bpf_program *prog)
{ int i;
for (i = 0; i < obj->nr_programs; i++) { if (&obj->programs[i] == prog) return prog->type == BPF_PROG_TYPE_STRUCT_OPS;
}
returnfalse;
}
/* For each struct_ops program P, referenced from some struct_ops map M, * enable P.autoload if there are Ms for which M.autocreate is true, * disable P.autoload if for all Ms M.autocreate is false. * Don't change P.autoload for programs that are not referenced from any maps.
*/ staticint bpf_object_adjust_struct_ops_autoload(struct bpf_object *obj)
{ struct bpf_program *prog, *slot_prog; struct bpf_map *map; int i, j, k, vlen;
for (i = 0; i < obj->nr_programs; ++i) { int should_load = false; int use_cnt = 0;
prog = &obj->programs[i]; if (prog->type != BPF_PROG_TYPE_STRUCT_OPS) continue;
member = btf_members(type); for (i = 0; i < btf_vlen(type); i++, member++) { conststruct btf_type *mtype, *kern_mtype;
__u32 mtype_id, kern_mtype_id; void *mdata, *kern_mdata; struct bpf_program *prog;
__s64 msize, kern_msize;
__u32 moff, kern_moff;
__u32 kern_member_idx; constchar *mname;
mname = btf__name_by_offset(btf, member->name_off);
moff = member->offset / 8;
mdata = data + moff;
msize = btf__resolve_size(btf, member->type); if (msize < 0) {
pr_warn("struct_ops init_kern %s: failed to resolve the size of member %s\n",
map->name, mname); return msize;
}
kern_member = find_member_by_name(kern_btf, kern_type, mname); if (!kern_member) { if (!libbpf_is_mem_zeroed(mdata, msize)) {
pr_warn("struct_ops init_kern %s: Cannot find member %s in kernel BTF\n",
map->name, mname); return -ENOTSUP;
}
if (st_ops->progs[i]) { /* If we had declaratively set struct_ops callback, we need to * force its autoload to false, because it doesn't have * a chance of succeeding from POV of the current struct_ops map. * If this program is still referenced somewhere else, though, * then bpf_object_adjust_struct_ops_autoload() will update its * autoload accordingly.
*/
st_ops->progs[i]->autoload = false;
st_ops->progs[i] = NULL;
}
/* Skip all-zero/NULL fields if they are not present in the kernel BTF */
pr_info("struct_ops %s: member %s not found in kernel, skipping it as it's set to zero\n",
map->name, mname); continue;
}
kern_member_idx = kern_member - btf_members(kern_type); if (btf_member_bitfield_size(type, i) ||
btf_member_bitfield_size(kern_type, kern_member_idx)) {
pr_warn("struct_ops init_kern %s: bitfield %s is not supported\n",
map->name, mname); return -ENOTSUP;
}
if (btf_is_ptr(mtype)) {
prog = *(void **)mdata; /* just like for !kern_member case above, reset declaratively * set (at compile time) program's autload to false, * if user replaced it with another program or NULL
*/ if (st_ops->progs[i] && st_ops->progs[i] != prog)
st_ops->progs[i]->autoload = false;
/* Update the value from the shadow type */
st_ops->progs[i] = prog; if (!prog) continue;
if (!is_valid_st_ops_program(obj, prog)) {
pr_warn("struct_ops init_kern %s: member %s is not a struct_ops program\n",
map->name, mname); return -ENOTSUP;
}
/* mtype->type must be a func_proto which was * guaranteed in bpf_object__collect_st_ops_relos(), * so only check kern_mtype for func_proto here.
*/ if (!btf_is_func_proto(kern_mtype)) {
pr_warn("struct_ops init_kern %s: kernel member %s is not a func ptr\n",
map->name, mname); return -ENOTSUP;
}
if (mod_btf)
prog->attach_btf_obj_fd = mod_btf->fd;
/* if we haven't yet processed this BPF program, record proper * attach_btf_id and member_idx
*/ if (!prog->attach_btf_id) {
prog->attach_btf_id = kern_type_id;
prog->expected_attach_type = kern_member_idx;
}
/* struct_ops BPF prog can be re-used between multiple * .struct_ops & .struct_ops.link as long as it's the * same struct_ops struct definition and the same * function pointer field
*/ if (prog->attach_btf_id != kern_type_id) {
pr_warn("struct_ops init_kern %s func ptr %s: invalid reuse of prog %s in sec %s with type %u: attach_btf_id %u != kern_type_id %u\n",
map->name, mname, prog->name, prog->sec_name, prog->type,
prog->attach_btf_id, kern_type_id); return -EINVAL;
} if (prog->expected_attach_type != kern_member_idx) {
pr_warn("struct_ops init_kern %s func ptr %s: invalid reuse of prog %s in sec %s with type %u: expected_attach_type %u != kern_member_idx %u\n",
map->name, mname, prog->name, prog->sec_name, prog->type,
prog->expected_attach_type, kern_member_idx); return -EINVAL;
}
datasec = btf__type_by_id(btf, datasec_id);
vsi = btf_var_secinfos(datasec); for (i = 0; i < btf_vlen(datasec); i++, vsi++) {
type = btf__type_by_id(obj->btf, vsi->type);
var_name = btf__name_by_offset(obj->btf, type->name_off);
type_id = btf__resolve_type(obj->btf, vsi->type); if (type_id < 0) {
pr_warn("struct_ops init: Cannot resolve var type_id %u in DATASEC %s\n",
vsi->type, sec_name); return -EINVAL;
}
type = btf__type_by_id(obj->btf, type_id);
tname = btf__name_by_offset(obj->btf, type->name_off); if (!tname[0]) {
pr_warn("struct_ops init: anonymous type is not supported\n"); return -ENOTSUP;
} if (!btf_is_struct(type)) {
pr_warn("struct_ops init: %s is not a struct\n", tname); return -EINVAL;
}
map = bpf_object__add_map(obj); if (IS_ERR(map)) return PTR_ERR(map);
/* Follow same convention as for programs autoload: * SEC("?.struct_ops") means map is not created by default.
*/ if (sec_name[0] == '?') {
map->autocreate = false; /* from now on forget there was ? in section name */
sec_name++;
}
if (vsi->offset + type->size > data->d_size) {
pr_warn("struct_ops init: var %s is beyond the end of DATASEC %s\n",
var_name, sec_name); return -EINVAL;
}
strcpy(obj->path, path); if (obj_name) {
libbpf_strlcpy(obj->name, obj_name, sizeof(obj->name));
} else { /* Using basename() GNU version which doesn't modify arg. */
libbpf_strlcpy(obj->name, basename((void *)path), sizeof(obj->name));
end = strchr(obj->name, '.'); if (end)
*end = 0;
}
obj->efile.fd = -1; /* * Caller of this function should also call * bpf_object__elf_finish() after data collection to return * obj_buf to user. If not, we should duplicate the buffer to * avoid user freeing them before elf finish.
*/
obj->efile.obj_buf = obj_buf;
obj->efile.obj_buf_sz = obj_buf_sz;
obj->efile.btf_maps_shndx = -1;
obj->kconfig_map_idx = -1;
obj->arena_map_idx = -1;
if (!elf) {
pr_warn("elf: failed to open %s as ELF file: %s\n", obj->path, elf_errmsg(-1));
err = -LIBBPF_ERRNO__LIBELF; goto errout;
}
obj->efile.elf = elf;
if (elf_kind(elf) != ELF_K_ELF) {
err = -LIBBPF_ERRNO__FORMAT;
pr_warn("elf: '%s' is not a proper ELF object\n", obj->path); goto errout;
}
if (gelf_getclass(elf) != ELFCLASS64) {
err = -LIBBPF_ERRNO__FORMAT;
pr_warn("elf: '%s' is not a 64-bit ELF object\n", obj->path); goto errout;
}
obj->efile.ehdr = ehdr = elf64_getehdr(elf); if (!obj->efile.ehdr) {
pr_warn("elf: failed to get ELF header from %s: %s\n", obj->path, elf_errmsg(-1));
err = -LIBBPF_ERRNO__FORMAT; goto errout;
}
/* Validate ELF object endianness... */ if (ehdr->e_ident[EI_DATA] != ELFDATA2LSB &&
ehdr->e_ident[EI_DATA] != ELFDATA2MSB) {
err = -LIBBPF_ERRNO__ENDIAN;
pr_warn("elf: '%s' has unknown byte order\n", obj->path); goto errout;
} /* and save after bpf_object_open() frees ELF data */
obj->byteorder = ehdr->e_ident[EI_DATA];
if (elf_getshdrstrndx(elf, &obj->efile.shstrndx)) {
pr_warn("elf: failed to get section names section index for %s: %s\n",
obj->path, elf_errmsg(-1));
err = -LIBBPF_ERRNO__FORMAT; goto errout;
}
/* ELF is corrupted/truncated, avoid calling elf_strptr. */ if (!elf_rawdata(elf_getscn(elf, obj->efile.shstrndx), NULL)) {
pr_warn("elf: failed to get section names strings from %s: %s\n",
obj->path, elf_errmsg(-1));
err = -LIBBPF_ERRNO__FORMAT; goto errout;
}
/* Old LLVM set e_machine to EM_NONE */ if (ehdr->e_type != ET_REL || (ehdr->e_machine && ehdr->e_machine != EM_BPF)) {
pr_warn("elf: %s is not a valid eBPF object file\n", obj->path);
err = -LIBBPF_ERRNO__FORMAT; goto errout;
}
for (si = 0; si < symbols->d_size / sizeof(Elf64_Sym); si++) {
Elf64_Sym *sym = elf_sym_by_idx(obj, si);
if (ELF64_ST_TYPE(sym->st_info) != STT_OBJECT) continue;
if (ELF64_ST_BIND(sym->st_info) != STB_GLOBAL &&
ELF64_ST_BIND(sym->st_info) != STB_WEAK) continue;
sname = elf_sym_str(obj, sym->st_name); if (!sname) {
pr_warn("failed to get sym name string for var %s\n", name); return ERR_PTR(-EIO);
} if (strcmp(name, sname) == 0) return sym;
}
map = &obj->maps[obj->nr_maps++];
map->obj = obj; /* Preallocate map FD without actually creating BPF map just yet. * These map FD "placeholders" will be reused later without changing * FD value when map is actually created in the kernel. * * This is useful to be able to perform BPF program relocations * without having to create BPF maps before that step. This allows us * to finalize and load BTF very late in BPF object's loading phase, * right before BPF maps have to be created and BPF programs have to * be loaded. By having these map FD placeholders we can perform all * the sanitizations, relocations, and any other adjustments before we * start creating actual BPF kernel objects (BTF, maps, progs).
*/
map->fd = create_placeholder_fd(); if (map->fd < 0) return ERR_PTR(map->fd);
map->inner_map_fd = -1;
map->autocreate = true;
/* This is one of the more confusing parts of libbpf for various * reasons, some of which are historical. The original idea for naming * internal names was to include as much of BPF object name prefix as * possible, so that it can be distinguished from similar internal * maps of a different BPF object. * As an example, let's say we have bpf_object named 'my_object_name' * and internal map corresponding to '.rodata' ELF section. The final * map name advertised to user and to the kernel will be * 'my_objec.rodata', taking first 8 characters of object name and * entire 7 characters of '.rodata'. * Somewhat confusingly, if internal map ELF section name is shorter * than 7 characters, e.g., '.bss', we still reserve 7 characters * for the suffix, even though we only have 4 actual characters, and * resulting map will be called 'my_objec.bss', not even using all 15 * characters allowed by the kernel. Oh well, at least the truncated * object name is somewhat consistent in this case. But if the map * name is '.kconfig', we'll still have entirety of '.kconfig' added * (8 chars) and thus will be left with only first 7 characters of the * object name ('my_obje'). Happy guessing, user, that the final map * name will be "my_obje.kconfig". * Now, with libbpf starting to support arbitrarily named .rodata.* * and .data.* data sections, it's possible that ELF section name is * longer than allowed 15 chars, so we now need to be careful to take * only up to 15 first characters of ELF name, taking no BPF object * name characters at all. So '.rodata.abracadabra' will result in * '.rodata.abracad' kernel and user-visible name. * We need to keep this convoluted logic intact for .data, .bss and * .rodata maps, but for new custom .data.custom and .rodata.custom * maps we use their ELF names as is, not prepending bpf_object name * in front. We still need to truncate them to 15 characters for the * kernel. Full name can be recovered for such maps by using DATASEC * BTF type associated with such map's value type, though.
*/ if (sfx_len >= BPF_OBJ_NAME_LEN)
sfx_len = BPF_OBJ_NAME_LEN - 1;
/* if there are two or more dots in map name, it's a custom dot map */ if (strchr(real_name + 1, '.') != NULL)
pfx_len = 0; else
pfx_len = min((size_t)BPF_OBJ_NAME_LEN - sfx_len - 1, strlen(obj->name));
/* Internal BPF map is mmap()'able only if at least one of corresponding * DATASEC's VARs are to be exposed through BPF skeleton. I.e., it's a GLOBAL * variable and it's not marked as __hidden (which turns it into, effectively, * a STATIC variable).
*/ staticbool map_is_mmapable(struct bpf_object *obj, struct bpf_map *map)
{ conststruct btf_type *t, *vt; struct btf_var_secinfo *vsi; int i, n;
if (!map->btf_value_type_id) returnfalse;
t = btf__type_by_id(obj->btf, map->btf_value_type_id); if (!btf_is_datasec(t)) returnfalse;
vsi = btf_var_secinfos(t); for (i = 0, n = btf_vlen(t); i < n; i++, vsi++) {
vt = btf__type_by_id(obj->btf, vsi->type); if (!btf_is_var(vt)) continue;
if (btf_var(vt)->linkage != BTF_VAR_STATIC) returntrue;
}
/* Validate that value stored in u64 fits in integer of `ext->sz` * bytes size without any loss of information. If the target integer * is signed, we rely on the following limits of integer type of * Y bits and subsequent transformation: * * -2^(Y-1) <= X <= 2^(Y-1) - 1 * 0 <= X + 2^(Y-1) <= 2^Y - 1 * 0 <= X + 2^(Y-1) < 2^Y * * For unsigned target integer, check that all the (64 - Y) bits are * zero.
*/ if (ext->kcfg.is_signed) return v + (1ULL << (bit_sz - 1)) < (1ULL << bit_sz); else return (v >> bit_sz) == 0;
}
staticint set_kcfg_value_num(struct extern_desc *ext, void *ext_val,
__u64 value)
{ if (ext->kcfg.type != KCFG_INT && ext->kcfg.type != KCFG_CHAR &&
ext->kcfg.type != KCFG_BOOL) {
pr_warn("extern (kcfg) '%s': value '%llu' implies integer, char, or boolean type\n",
ext->name, (unsignedlonglong)value); return -EINVAL;
} if (ext->kcfg.type == KCFG_BOOL && value > 1) {
pr_warn("extern (kcfg) '%s': value '%llu' isn't boolean compatible\n",
ext->name, (unsignedlonglong)value); return -EINVAL;
} if (!is_kcfg_value_in_range(ext, value)) {
pr_warn("extern (kcfg) '%s': value '%llu' doesn't fit in %d bytes\n",
ext->name, (unsignedlonglong)value, ext->kcfg.sz); return -ERANGE;
} switch (ext->kcfg.sz) { case 1:
*(__u8 *)ext_val = value; break; case 2:
*(__u16 *)ext_val = value; break; case 4:
*(__u32 *)ext_val = value; break; case 8:
*(__u64 *)ext_val = value; break; default: return -EINVAL;
}
ext->is_set = true; return 0;
}
sep = strchr(buf, '='); if (!sep) {
pr_warn("failed to parse '%s': no separator\n", buf); return -EINVAL;
}
/* Trim ending '\n' */
len = strlen(buf); if (buf[len - 1] == '\n')
buf[len - 1] = '\0'; /* Split on '=' and ensure that a value is present. */
*sep = '\0'; if (!sep[1]) {
*sep = '=';
pr_warn("failed to parse '%s': no value\n", buf); return -EINVAL;
}
ext = find_extern_by_name(obj, buf); if (!ext || ext->is_set) return 0;
ext_val = data + ext->kcfg.data_off;
value = sep + 1;
t = skip_mods_and_typedefs(btf, id, NULL); if (!btf_is_ptr(t)) return NULL;
t = skip_mods_and_typedefs(btf, t->type, res_id);
return btf_is_func_proto(t) ? t : NULL;
}
staticconstchar *__btf_kind_str(__u16 kind)
{ switch (kind) { case BTF_KIND_UNKN: return"void"; case BTF_KIND_INT: return"int"; case BTF_KIND_PTR: return"ptr"; case BTF_KIND_ARRAY: return"array"; case BTF_KIND_STRUCT: return"struct"; case BTF_KIND_UNION: return"union"; case BTF_KIND_ENUM: return"enum"; case BTF_KIND_FWD: return"fwd"; case BTF_KIND_TYPEDEF: return"typedef"; case BTF_KIND_VOLATILE: return"volatile"; case BTF_KIND_CONST: return"const"; case BTF_KIND_RESTRICT: return"restrict"; case BTF_KIND_FUNC: return"func"; case BTF_KIND_FUNC_PROTO: return"func_proto"; case BTF_KIND_VAR: return"var";
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Dauer der Verarbeitung: 0.13 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.