// SPDX-License-Identifier: GPL-2.0-or-later /* * Linux Socket Filter - Kernel level socket filtering * * Based on the design of the Berkeley Packet Filter. The new * internal format has been designed by PLUMgrid: * * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com * * Authors: * * Jay Schulist <jschlst@samba.org> * Alexei Starovoitov <ast@plumgrid.com> * Daniel Borkmann <dborkman@redhat.com> * * Andi Kleen - Fix a few bad bugs and races. * Kris Katterjohn - Added many additional checks in bpf_check_classic()
*/
/* No hurry in this branch * * Exported for the bpf jit load helper.
*/ void *bpf_internal_load_pointer_neg_helper(conststruct sk_buff *skb, int k, unsignedint size)
{
u8 *ptr = NULL;
if (k >= SKF_NET_OFF) {
ptr = skb_network_header(skb) + k - SKF_NET_OFF;
} elseif (k >= SKF_LL_OFF) { if (unlikely(!skb_mac_header_was_set(skb))) return NULL;
ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
} if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb)) return ptr;
return NULL;
}
/* tell bpf programs that include vmlinux.h kernel's PAGE_SIZE */ enum page_size_enum {
__PAGE_SIZE = PAGE_SIZE
};
/* The jit engine is responsible to provide an array * for insn_off to the jited_off mapping (insn_to_jit_off). * * The idx to this array is the insn_off. Hence, the insn_off * here is relative to the prog itself instead of the main prog. * This array has one entry for each xlated bpf insn. * * jited_off is the byte off to the end of the jited insn. * * Hence, with * insn_start: * The first bpf insn off of the prog. The insn off * here is relative to the main prog. * e.g. if prog is a subprog, insn_start > 0 * linfo_idx: * The prog's idx to prog->aux->linfo and jited_linfo * * jited_linfo[linfo_idx] = prog->bpf_func * * For i > linfo_idx, * * jited_linfo[i] = prog->bpf_func + * insn_to_jit_off[linfo[i].insn_off - insn_start - 1]
*/ void bpf_prog_fill_jited_linfo(struct bpf_prog *prog, const u32 *insn_to_jit_off)
{
u32 linfo_idx, insn_start, insn_end, nr_linfo, i; conststruct bpf_line_info *linfo; void **jited_linfo;
if (!prog->aux->jited_linfo || prog->aux->func_idx > prog->aux->func_cnt) /* Userspace did not provide linfo */ return;
/* We keep fp->aux from fp_old around in the new * reallocated structure.
*/
fp_old->aux = NULL;
fp_old->stats = NULL;
fp_old->active = NULL;
__bpf_prog_free(fp_old);
}
while (blocks--) {
sha1_transform(digest, todo, ws);
todo += SHA1_BLOCK_SIZE;
}
result = (__force __be32 *)digest; for (i = 0; i < SHA1_DIGEST_WORDS; i++)
result[i] = cpu_to_be32(digest[i]);
memcpy(fp->tag, result, sizeof(fp->tag));
/* In the probing pass we still operate on the original, * unpatched image in order to check overflows before we * do any other adjustments. Therefore skip the patchlet.
*/ if (probe_pass && i == pos) {
i = end_new;
insn = prog->insnsi + end_old;
} if (bpf_pseudo_func(insn)) {
ret = bpf_adj_delta_to_imm(insn, pos, end_old,
end_new, i, probe_pass); if (ret) return ret; continue;
}
code = insn->code; if ((BPF_CLASS(code) != BPF_JMP &&
BPF_CLASS(code) != BPF_JMP32) ||
BPF_OP(code) == BPF_EXIT) continue; /* Adjust offset of jmps if we cross patch boundaries. */ if (BPF_OP(code) == BPF_CALL) { if (insn->src_reg != BPF_PSEUDO_CALL) continue;
ret = bpf_adj_delta_to_imm(insn, pos, end_old,
end_new, i, probe_pass);
} else {
ret = bpf_adj_delta_to_off(insn, pos, end_old,
end_new, i, probe_pass);
} if (ret) break;
}
/* Since our patchlet doesn't expand the image, we're done. */ if (insn_delta == 0) {
memcpy(prog->insnsi + off, patch, sizeof(*patch)); return prog;
}
insn_adj_cnt = prog->len + insn_delta;
/* Reject anything that would potentially let the insn->off * target overflow when we have excessive program expansions. * We need to probe here before we do any reallocation where * we afterwards may not fail anymore.
*/ if (insn_adj_cnt > cnt_max &&
(err = bpf_adj_branches(prog, off, off + 1, off + len, true))) return ERR_PTR(err);
/* Several new instructions need to be inserted. Make room * for them. Likely, there's no need for a new allocation as * last page could have large enough tailroom.
*/
prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
GFP_USER); if (!prog_adj) return ERR_PTR(-ENOMEM);
prog_adj->len = insn_adj_cnt;
/* Patching happens in 3 steps: * * 1) Move over tail of insnsi from next instruction onwards, * so we can patch the single target insn with one or more * new ones (patching is always from 1 to n insns, n > 0). * 2) Inject new instructions at the target location. * 3) Adjust branch offsets if necessary.
*/
insn_rest = insn_adj_cnt - off - len;
memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1, sizeof(*patch) * insn_rest);
memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
/* We are guaranteed to not fail at this point, otherwise * the ship has sailed to reverse to the original state. An * overflow cannot happen at this point.
*/
BUG_ON(bpf_adj_branches(prog_adj, off, off + 1, off + len, false));
bpf_adj_linfo(prog_adj, off, insn_delta);
return prog_adj;
}
int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt)
{ int err;
/* Branch offsets can't overflow when program is shrinking, no need * to call bpf_adj_branches(..., true) here
*/
memmove(prog->insnsi + off, prog->insnsi + off + cnt, sizeof(struct bpf_insn) * (prog->len - off - cnt));
prog->len -= cnt;
#ifdef CONFIG_BPF_JIT /* All BPF JIT sysctl knobs here. */ int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON); int bpf_jit_kallsyms __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON); int bpf_jit_harden __read_mostly; long bpf_jit_limit __read_mostly; long bpf_jit_limit_max __read_mostly;
BUILD_BUG_ON(sizeof("bpf_prog_") + sizeof(prog->tag) * 2 + /* name has been null terminated. * We should need +1 for the '_' preceding * the name. However, the null character * is double counted between the name and the * sizeof("bpf_prog_") above, so we omit * the +1 here.
*/ sizeof(prog->aux->name) > KSYM_NAME_LEN);
/* prog->aux->name will be ignored if full btf name is available */ if (prog->aux->func_info_cnt && prog->aux->func_idx < prog->aux->func_info_cnt) {
type = btf_type_by_id(prog->aux->btf,
prog->aux->func_info[prog->aux->func_idx].type_id);
func_name = btf_name_by_offset(prog->aux->btf, type->name_off);
snprintf(sym, (size_t)(end - sym), "_%s", func_name); return;
}
static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
{ unsignedlong val = (unsignedlong)key; conststruct bpf_ksym *ksym;
ksym = container_of(n, struct bpf_ksym, tnode);
if (val < ksym->start) return -1; /* Ensure that we detect return addresses as part of the program, when * the final instruction is a call for a program part of the stack * trace. Therefore, do val > ksym->end instead of val >= ksym->end.
*/ if (val > ksym->end) return 1;
#ifdef CONFIG_FINEIBT /* * When FineIBT, code in the __cfi_foo() symbols can get executed * and hence unwinder needs help.
*/ if (cfi_mode != CFI_FINEIBT) return;
/* * BPF program pack allocator. * * Most BPF programs are pretty small. Allocating a hole page for each * program is sometime a waste. Many small bpf program also adds pressure * to instruction TLB. To solve this issue, we introduce a BPF program pack * allocator. The prog_pack allocator uses HPAGE_PMD_SIZE page (2MB on x86) * to host BPF programs.
*/ #define BPF_PROG_CHUNK_SHIFT 6 #define BPF_PROG_CHUNK_SIZE (1 << BPF_PROG_CHUNK_SHIFT) #define BPF_PROG_CHUNK_MASK (~(BPF_PROG_CHUNK_SIZE - 1))
/* PMD_SIZE is not available in some special config, e.g. ARCH=arm with * CONFIG_MMU=n. Use PAGE_SIZE in these cases.
*/ #ifdef PMD_SIZE /* PMD_SIZE is really big for some archs. It doesn't make sense to * reserve too much memory in one allocation. Hardcode BPF_PROG_PACK_SIZE to * 2MiB * num_possible_nodes(). On most architectures PMD_SIZE will be * greater than or equal to 2MB.
*/ #define BPF_PROG_PACK_SIZE (SZ_2M * num_possible_nodes()) #else #define BPF_PROG_PACK_SIZE PAGE_SIZE #endif
/* Can be overridden by an arch's JIT compiler if it has a custom, * dedicated BPF backend memory area, or if neither of the two * below apply.
*/
u64 __weak bpf_jit_alloc_exec_limit(void)
{ #ifdefined(MODULES_VADDR) return MODULES_END - MODULES_VADDR; #else return VMALLOC_END - VMALLOC_START; #endif
}
staticint __init bpf_jit_charge_init(void)
{ /* Only used as heuristic here to derive limit. */
bpf_jit_limit_max = bpf_jit_alloc_exec_limit();
bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 1,
PAGE_SIZE), LONG_MAX); return 0;
}
pure_initcall(bpf_jit_charge_init);
int bpf_jit_charge_modmem(u32 size)
{ if (atomic_long_add_return(size, &bpf_jit_current) > READ_ONCE(bpf_jit_limit)) { if (!bpf_capable()) {
atomic_long_sub(size, &bpf_jit_current); return -EPERM;
}
}
/* Most of BPF filters are really small, but if some of them * fill a page, allow at least 128 extra bytes to insert a * random section of illegal instructions.
*/
size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
if (bpf_jit_charge_modmem(size)) return NULL;
hdr = bpf_jit_alloc_exec(size); if (!hdr) {
bpf_jit_uncharge_modmem(size); return NULL;
}
/* Fill space with illegal/arch-dep instructions. */
bpf_fill_ill_insns(hdr, size);
/* Allocate jit binary from bpf_prog_pack allocator. * Since the allocated memory is RO+X, the JIT engine cannot write directly * to the memory. To solve this problem, a RW buffer is also allocated at * as the same time. The JIT engine should calculate offsets based on the * RO memory address, but write JITed program to the RW buffer. Once the * JIT engine finishes, it calls bpf_jit_binary_pack_finalize, which copies * the JITed program to the RO memory.
*/ struct bpf_binary_header *
bpf_jit_binary_pack_alloc(unsignedint proglen, u8 **image_ptr, unsignedint alignment, struct bpf_binary_header **rw_header,
u8 **rw_image,
bpf_jit_fill_hole_t bpf_fill_ill_insns)
{ struct bpf_binary_header *ro_header;
u32 size, hole, start;
/* Copy JITed text from rw_header to its final location, the ro_header. */ int bpf_jit_binary_pack_finalize(struct bpf_binary_header *ro_header, struct bpf_binary_header *rw_header)
{ void *ptr;
/* bpf_jit_binary_pack_free is called in two different scenarios: * 1) when the program is freed after; * 2) when the JIT engine fails (before bpf_jit_binary_pack_finalize). * For case 2), we need to free both the RO memory and the RW buffer. * * bpf_jit_binary_pack_free requires proper ro_header->size. However, * bpf_jit_binary_pack_alloc does not set it. Therefore, ro_header->size * must be set with either bpf_jit_binary_pack_finalize (normal path) or * bpf_arch_text_copy (when jit fails).
*/ void bpf_jit_binary_pack_free(struct bpf_binary_header *ro_header, struct bpf_binary_header *rw_header)
{
u32 size = ro_header->size;
/* This symbol is only overridden by archs that have different * requirements than the usual eBPF JITs, f.e. when they only * implement cBPF JIT, do not set images read-only, etc.
*/ void __weak bpf_jit_free(struct bpf_prog *fp)
{ if (fp->jited) { struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
int bpf_jit_get_func_addr(conststruct bpf_prog *prog, conststruct bpf_insn *insn, bool extra_pass,
u64 *func_addr, bool *func_addr_fixed)
{
s16 off = insn->off;
s32 imm = insn->imm;
u8 *addr; int err;
*func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL; if (!*func_addr_fixed) { /* Place-holder address till the last pass has collected * all addresses for JITed subprograms in which case we * can pick them up from prog->aux.
*/ if (!extra_pass)
addr = NULL; elseif (prog->aux->func &&
off >= 0 && off < prog->aux->real_func_cnt)
addr = (u8 *)prog->aux->func[off]->bpf_func; else return -EINVAL;
} elseif (insn->src_reg == BPF_PSEUDO_KFUNC_CALL &&
bpf_jit_supports_far_kfunc_call()) {
err = bpf_get_kfunc_addr(prog, insn->imm, insn->off, &addr); if (err) return err;
} else { /* Address of a BPF helper call. Since part of the core * kernel, it's always at a fixed location. __bpf_call_base * and the helper with imm relative to it are both in core * kernel.
*/
addr = (u8 *)__bpf_call_base + imm;
}
/* Constraints on AX register: * * AX register is inaccessible from user space. It is mapped in * all JITs, and used here for constant blinding rewrites. It is * typically "stateless" meaning its contents are only valid within * the executed instruction, but not across several instructions. * There are a few exceptions however which are further detailed * below. * * Constant blinding is only used by JITs, not in the interpreter. * The interpreter uses AX in some occasions as a local temporary * register e.g. in DIV or MOD instructions. * * In restricted circumstances, the verifier can also use the AX * register for rewrites as long as they do not interfere with * the above cases!
*/ if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX) goto out;
fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags); if (fp != NULL) { /* aux->prog still points to the fp_other one, so * when promoting the clone to the real program, * this still needs to be adapted.
*/
memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
}
return fp;
}
staticvoid bpf_prog_clone_free(struct bpf_prog *fp)
{ /* aux was stolen by the other clone, so we cannot free * it from this path! It will be freed eventually by the * other program on release. * * At this point, we don't need a deferred release since * clone is guaranteed to not be locked.
*/
fp->aux = NULL;
fp->stats = NULL;
fp->active = NULL;
__bpf_prog_free(fp);
}
void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
{ /* We have to repoint aux->prog to self, as we don't * know whether fp here is the clone or the original.
*/
fp->aux->prog = fp;
bpf_prog_clone_free(fp_other);
}
struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
{ struct bpf_insn insn_buff[16], aux[2]; struct bpf_prog *clone, *tmp; int insn_delta, insn_cnt; struct bpf_insn *insn; int i, rewritten;
if (!prog->blinding_requested || prog->blinded) return prog;
clone = bpf_prog_clone_create(prog, GFP_USER); if (!clone) return ERR_PTR(-ENOMEM);
insn_cnt = clone->len;
insn = clone->insnsi;
for (i = 0; i < insn_cnt; i++, insn++) { if (bpf_pseudo_func(insn)) { /* ld_imm64 with an address of bpf subprog is not * a user controlled constant. Don't randomize it, * since it will conflict with jit_subprogs() logic.
*/
insn++;
i++; continue;
}
/* We temporarily need to hold the original ld64 insn * so that we can still access the first part in the * second blinding run.
*/ if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
insn[1].code == 0)
memcpy(aux, insn, sizeof(aux));
rewritten = bpf_jit_blind_insn(insn, aux, insn_buff,
clone->aux->verifier_zext); if (!rewritten) continue;
tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten); if (IS_ERR(tmp)) { /* Patching may have repointed aux->prog during * realloc from the original one, so we need to * fix it up here on error.
*/
bpf_jit_prog_release_other(prog, clone); return tmp;
}
clone = tmp;
insn_delta = rewritten - 1;
/* Walk new program and skip insns we just inserted. */
insn = clone->insnsi + i + insn_delta;
insn_cnt += insn_delta;
i += insn_delta;
}
/* Base function for offset calculation. Needs to go into .text section, * therefore keeping it non-static as well; will also be used by JITs * anyway later on, so do not let the compiler omit it. This also needs * to go into kallsyms for correlation from e.g. bpftool, so naming * must not change.
*/
noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
{ return 0;
}
EXPORT_SYMBOL_GPL(__bpf_call_base);
if (unlikely(index >= array->map.max_entries)) goto out;
if (unlikely(tail_call_cnt >= MAX_TAIL_CALL_CNT)) goto out;
tail_call_cnt++;
prog = READ_ONCE(array->ptrs[index]); if (!prog) goto out;
/* ARG1 at this point is guaranteed to point to CTX from * the verifier side due to the fact that the tail call is * handled like a helper, that is, bpf_tail_call_proto, * where arg1_type is ARG_PTR_TO_CTX.
*/
insn = prog->insnsi; goto select_insn;
out:
CONT;
}
JMP_JA:
insn += insn->off;
CONT;
JMP32_JA:
insn += insn->imm;
CONT;
JMP_EXIT: return BPF_R0; /* JMP */ #define COND_JMP(SIGN, OPCODE, CMP_OP) \
JMP_##OPCODE##_X: \ if ((SIGN##64) DST CMP_OP (SIGN##64) SRC) { \
insn += insn->off; \
CONT_JMP; \
} \
CONT; \
JMP32_##OPCODE##_X: \ if ((SIGN##32) DST CMP_OP (SIGN##32) SRC) { \
insn += insn->off; \
CONT_JMP; \
} \
CONT; \
JMP_##OPCODE##_K: \ if ((SIGN##64) DST CMP_OP (SIGN##64) IMM) { \
insn += insn->off; \
CONT_JMP; \
} \
CONT; \
JMP32_##OPCODE##_K: \ if ((SIGN##32) DST CMP_OP (SIGN##32) IMM) { \
insn += insn->off; \
CONT_JMP; \
} \
CONT;
COND_JMP(u, JEQ, ==)
COND_JMP(u, JNE, !=)
COND_JMP(u, JGT, >)
COND_JMP(u, JLT, <)
COND_JMP(u, JGE, >=)
COND_JMP(u, JLE, <=)
COND_JMP(u, JSET, &)
COND_JMP(s, JSGT, >)
COND_JMP(s, JSLT, <)
COND_JMP(s, JSGE, >=)
COND_JMP(s, JSLE, <=) #undef COND_JMP /* ST, STX and LDX*/
ST_NOSPEC: /* Speculation barrier for mitigating Speculative Store Bypass, * Bounds-Check Bypass and Type Confusion. In case of arm64, we * rely on the firmware mitigation as controlled via the ssbd * kernel parameter. Whenever the mitigation is enabled, it * works for all of the kernel code with no need to provide any * additional instructions here. In case of x86, we use 'lfence' * insn for mitigation. We reuse preexisting logic from Spectre * v1 mitigation that happens to produce the required code on * x86 for v4 as well.
*/
barrier_nospec();
CONT; #define LDST(SIZEOP, SIZE) \
STX_MEM_##SIZEOP: \
*(SIZE *)(unsignedlong) (DST + insn->off) = SRC; \
CONT; \
ST_MEM_##SIZEOP: \
*(SIZE *)(unsignedlong) (DST + insn->off) = IMM; \
CONT; \
LDX_MEM_##SIZEOP: \
DST = *(SIZE *)(unsignedlong) (SRC + insn->off); \
CONT; \
LDX_PROBE_MEM_##SIZEOP: \
bpf_probe_read_kernel_common(&DST, sizeof(SIZE), \
(constvoid *)(long) (SRC + insn->off)); \
DST = *((SIZE *)&DST); \
CONT;
default_label: /* If we ever reach this, we have a bug somewhere. Die hard here * instead of just returning 0; we could be somewhere in a subprog, * so execution could continue otherwise which we do /not/ want. * * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable().
*/
pr_warn("BPF interpreter: unknown opcode %02x (imm: 0x%x)\n",
insn->code, insn->imm);
BUG_ON(1); return 0;
}
staticunsignedint __bpf_prog_ret0_warn(constvoid *ctx, conststruct bpf_insn *insn)
{ /* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON * is not working properly, so warn about it!
*/
WARN_ON_ONCE(1); return 0;
}
spin_lock(&map->owner_lock); /* There's no owner yet where we could check for compatibility. */ if (!map->owner) {
map->owner = bpf_map_owner_alloc(map); if (!map->owner) goto err;
map->owner->type = prog_type;
map->owner->jited = fp->jited;
map->owner->xdp_has_frags = aux->xdp_has_frags;
map->owner->expected_attach_type = fp->expected_attach_type;
map->owner->attach_func_proto = aux->attach_func_proto;
for_each_cgroup_storage_type(i) {
map->owner->storage_cookie[i] =
aux->cgroup_storage[i] ?
aux->cgroup_storage[i]->cookie : 0;
}
ret = true;
} else {
ret = map->owner->type == prog_type &&
map->owner->jited == fp->jited &&
map->owner->xdp_has_frags == aux->xdp_has_frags; if (ret &&
map->map_type == BPF_MAP_TYPE_PROG_ARRAY &&
map->owner->expected_attach_type != fp->expected_attach_type)
ret = false;
for_each_cgroup_storage_type(i) { if (!ret) break;
cookie = aux->cgroup_storage[i] ?
aux->cgroup_storage[i]->cookie : 0;
ret = map->owner->storage_cookie[i] == cookie ||
!cookie;
} if (ret &&
map->owner->attach_func_proto != aux->attach_func_proto) { switch (prog_type) { case BPF_PROG_TYPE_TRACING: case BPF_PROG_TYPE_LSM: case BPF_PROG_TYPE_EXT: case BPF_PROG_TYPE_STRUCT_OPS:
ret = false; break; default: break;
}
}
}
err:
spin_unlock(&map->owner_lock); return ret;
}
bool bpf_prog_map_compatible(struct bpf_map *map, conststruct bpf_prog *fp)
{ /* XDP programs inserted into maps are not guaranteed to run on * a particular netdev (and can run outside driver context entirely * in the case of devmap and cpumap). Until device checks * are implemented, prohibit adding dev-bound programs to program maps.
*/ if (bpf_prog_is_dev_bound(fp->aux)) returnfalse;
return __bpf_prog_map_compatible(map, fp);
}
staticint bpf_check_tail_call(conststruct bpf_prog *fp)
{ struct bpf_prog_aux *aux = fp->aux; int i, ret = 0;
mutex_lock(&aux->used_maps_mutex); for (i = 0; i < aux->used_map_cnt; i++) { struct bpf_map *map = aux->used_maps[i];
if (!map_type_contains_progs(map)) continue;
if (!__bpf_prog_map_compatible(map, fp)) {
ret = -EINVAL; goto out;
}
}
/* may_goto may cause stack size > 512, leading to idx out-of-bounds. * But for non-JITed programs, we don't need bpf_func, so no bounds * check needed.
*/ if (idx < ARRAY_SIZE(interpreters)) {
fp->bpf_func = interpreters[idx];
select_interpreter = true;
} else {
fp->bpf_func = __bpf_prog_ret0_warn;
} #else
fp->bpf_func = __bpf_prog_ret0_warn; #endif return select_interpreter;
}
/** * bpf_prog_select_runtime - select exec runtime for BPF program * @fp: bpf_prog populated with BPF program * @err: pointer to error variable * * Try to JIT eBPF program, if JIT is not available, use interpreter. * The BPF program will be executed via bpf_prog_run() function. *
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Dauer der Verarbeitung: 0.51 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.