/* Datapath usually can give us guarantees on how much adjust head * can be done without the need for any checks. Optimize the simple * case where there is only one adjust head by a constant.
*/ if (reg2->type != SCALAR_VALUE || !tnum_is_const(reg2->var_off)) goto exit_set_location;
imm = reg2->var_off.value; /* Translator will skip all checks, we need to guarantee min pkt len */ if (imm > ETH_ZLEN - ETH_HLEN) goto exit_set_location; if (imm > (int)bpf->adjust_head.guaranteed_add ||
imm < -bpf->adjust_head.guaranteed_sub) goto exit_set_location;
if (nfp_prog->adjust_head_location) { /* Only one call per program allowed */ if (nfp_prog->adjust_head_location != meta->n) goto exit_set_location;
if (meta->arg2.reg.var_off.value != imm) goto exit_set_location;
}
/* We need to record each time update happens with non-zero words, * in case such word is used in atomic operations. * Implicitly depend on nfp_bpf_stack_arg_ok(reg3) being run before.
*/
if (nfp_map->use_map[i / 4].type == NFP_MAP_USE_ATOMIC_CNT) {
pr_vlog(env, "value at offset %d/%d may be non-zero, bpf_map_update_elem() is required to initialize atomic counters to zero to avoid offload endian issues\n",
i, soff); returnfalse;
}
nfp_map->use_map[i / 4].non_zero_update = 1;
}
switch (func_id) { case BPF_FUNC_xdp_adjust_head: if (!bpf->adjust_head.off_max) {
pr_vlog(env, "adjust_head not supported by FW\n"); return -EOPNOTSUPP;
} if (!(bpf->adjust_head.flags & NFP_BPF_ADJUST_HEAD_NO_META)) {
pr_vlog(env, "adjust_head: FW requires shifting metadata, not supported by the driver\n"); return -EOPNOTSUPP;
}
case BPF_FUNC_map_delete_elem: if (!nfp_bpf_map_call_ok("map_delete", env, meta,
bpf->helpers.map_delete, reg1) ||
!nfp_bpf_stack_arg_ok("map_delete", env, reg2,
meta->func_id ? &meta->arg2 : NULL)) return -EOPNOTSUPP; break;
case BPF_FUNC_get_prandom_u32: if (bpf->pseudo_random) break;
pr_vlog(env, "bpf_get_prandom_u32(): FW doesn't support random number generation\n"); return -EOPNOTSUPP;
if (!bpf->helpers.perf_event_output) {
pr_vlog(env, "event_output: not supported by FW\n"); return -EOPNOTSUPP;
}
/* Force current CPU to make sure we can report the event * wherever we get the control message from FW.
*/ if (reg3->var_off.mask & BPF_F_INDEX_MASK ||
(reg3->var_off.value & BPF_F_INDEX_MASK) !=
BPF_F_CURRENT_CPU) { char tn_buf[48];
tnum_strn(tn_buf, sizeof(tn_buf), reg3->var_off);
pr_vlog(env, "event_output: must use BPF_F_CURRENT_CPU, var_off: %s\n",
tn_buf); return -EOPNOTSUPP;
}
/* Save space in meta, we don't care about arguments other * than 4th meta, shove it into arg1.
*/
reg1 = cur_regs(env) + BPF_REG_4;
/* Warn user that on offload NFP may return success even if map * is not going to accept the event, since the event output is * fully async and device won't know the state of the map. * There is also FW limitation on the event length. * * Lost events will not show up on the perf ring, driver * won't see them at all. Events may also get reordered.
*/
dev_warn_once(&nfp_prog->bpf->app->pf->pdev->dev, "bpf: note: return codes and behavior of bpf_event_output() helper differs for offloaded programs!\n");
pr_vlog(env, "warning: return codes and behavior of event_output helper differ for offload!\n");
if (!meta->func_id) break;
if (reg1->type != meta->arg1.type) {
pr_vlog(env, "event_output: ptr type changed: %d %d\n",
meta->arg1.type, reg1->type); return -EINVAL;
} break;
default:
pr_vlog(env, "unsupported function id: %d\n", func_id); return -EOPNOTSUPP;
}
if (use >= ARRAY_SIZE(names) || !names[use]) return"unknown"; return names[use];
}
staticint
nfp_bpf_map_mark_used_one(struct bpf_verifier_env *env, struct nfp_bpf_map *nfp_map, unsignedint off, enum nfp_bpf_map_use use)
{ if (nfp_map->use_map[off / 4].type != NFP_MAP_UNUSED &&
nfp_map->use_map[off / 4].type != use) {
pr_vlog(env, "map value use type conflict %s vs %s off: %u\n",
nfp_bpf_map_use_name(nfp_map->use_map[off / 4].type),
nfp_bpf_map_use_name(use), off); return -EOPNOTSUPP;
}
if (nfp_map->use_map[off / 4].non_zero_update &&
use == NFP_MAP_USE_ATOMIC_CNT) {
pr_vlog(env, "atomic counter in map value may already be initialized to non-zero value off: %u\n",
off); return -EOPNOTSUPP;
}
if (meta->insn.imm != BPF_ADD) {
pr_vlog(env, "atomic op not implemented: %d\n", meta->insn.imm); return -EOPNOTSUPP;
}
if (dreg->type != PTR_TO_MAP_VALUE) {
pr_vlog(env, "atomic add not to a map value pointer: %d\n",
dreg->type); return -EOPNOTSUPP;
} if (sreg->type != SCALAR_VALUE) {
pr_vlog(env, "atomic add not of a scalar: %d\n", sreg->type); return -EOPNOTSUPP;
}
/* NFP supports u16 and u32 multiplication. * * For ALU64, if either operand is beyond u32's value range, we reject * it. One thing to note, if the source operand is BPF_K, then we need * to check "imm" field directly, and we'd reject it if it is negative. * Because for ALU64, "imm" (with s32 type) is expected to be sign * extended to s64 which NFP mul doesn't support. * * For ALU32, it is fine for "imm" be negative though, because the * result is 32-bits and there is no difference on the low halve of * the result for signed/unsigned mul, so we will get correct result.
*/ if (is_mbpf_mul(meta)) { if (meta->umax_dst > U32_MAX) {
pr_vlog(env, "multiplier is not within u32 value range\n"); return -EINVAL;
} if (mbpf_src(meta) == BPF_X && meta->umax_src > U32_MAX) {
pr_vlog(env, "multiplicand is not within u32 value range\n"); return -EINVAL;
} if (mbpf_class(meta) == BPF_ALU64 &&
mbpf_src(meta) == BPF_K && meta->insn.imm < 0) {
pr_vlog(env, "sign extended multiplicand won't be within u32 value range\n"); return -EINVAL;
}
}
/* NFP doesn't have divide instructions, we support divide by constant * through reciprocal multiplication. Given NFP support multiplication * no bigger than u32, we'd require divisor and dividend no bigger than * that as well. * * Also eBPF doesn't support signed divide and has enforced this on C * language level by failing compilation. However LLVM assembler hasn't * enforced this, so it is possible for negative constant to leak in as * a BPF_K operand through assembly code, we reject such cases as well.
*/ if (is_mbpf_div(meta)) { if (meta->umax_dst > U32_MAX) {
pr_vlog(env, "dividend is not within u32 value range\n"); return -EINVAL;
} if (mbpf_src(meta) == BPF_X) { if (meta->umin_src != meta->umax_src) {
pr_vlog(env, "divisor is not constant\n"); return -EINVAL;
} if (meta->umax_src > U32_MAX) {
pr_vlog(env, "divisor is not within u32 value range\n"); return -EINVAL;
}
} if (mbpf_src(meta) == BPF_K && meta->insn.imm < 0) {
pr_vlog(env, "divide by negative constant is not supported\n"); return -EINVAL;
}
}
return 0;
}
int nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
{ struct nfp_prog *nfp_prog = env->prog->aux->offload->dev_priv; struct nfp_insn_meta *meta = nfp_prog->verifier_meta;
meta = nfp_bpf_goto_meta(nfp_prog, meta, insn_idx);
nfp_prog->verifier_meta = meta;
if (!nfp_bpf_supported_opcode(meta->insn.code)) {
pr_vlog(env, "instruction %#02x not supported\n",
meta->insn.code); return -EINVAL;
}
if (index + 1 != nfp_prog->subprog_cnt) {
pr_vlog(env, "BUG: number of processed BPF functions is not consistent (processed %d, expected %d)\n",
index + 1, nfp_prog->subprog_cnt); return -EFAULT;
}
/* Inspired from check_max_stack_depth() from kernel verifier. * Starting from main subprogram, walk all instructions and recursively * walk all callees that given subprogram can call. Since recursion is * prevented by the kernel verifier, this algorithm only needs a local * stack of MAX_CALL_FRAMES to remember callsites.
*/
process_subprog:
frame_depths[frame] = nfp_prog->subprog[idx].stack_depth;
frame_depths[frame] = round_up(frame_depths[frame], STACK_FRAME_ALIGN);
depth += frame_depths[frame];
max_depth = max(max_depth, depth);
continue_subprog: for (; meta != nfp_prog_last_meta(nfp_prog) && meta->subprog_idx == idx;
meta = nfp_meta_next(meta)) { if (!is_mbpf_pseudo_call(meta)) continue;
/* We found a call to a subprogram. Remember instruction to * return to and subprog id.
*/
ret_insn[frame] = nfp_meta_next(meta);
ret_prog[frame] = idx;
/* Find the callee and start processing it. */
meta = nfp_bpf_goto_meta(nfp_prog, meta,
meta->n + 1 + meta->insn.imm);
idx = meta->subprog_idx;
frame++; goto process_subprog;
} /* End of for() loop means the last instruction of the subprog was * reached. If we popped all stack frames, return; otherwise, go on * processing remaining instructions from the caller.
*/ if (frame == 0) return max_depth;
info = env->subprog_info; for (i = 0; i < nfp_prog->subprog_cnt; i++) {
nfp_prog->subprog[i].stack_depth = info[i].stack_depth;
if (i == 0) continue;
/* Account for size of return address. */
nfp_prog->subprog[i].stack_depth += REG_WIDTH; /* Account for size of saved registers, if necessary. */ if (nfp_prog->subprog[i].needs_reg_push)
nfp_prog->subprog[i].stack_depth += BPF_REG_SIZE * 4;
}
nn = netdev_priv(env->prog->aux->offload->netdev);
max_stack = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64;
nfp_prog->stack_size = nfp_bpf_get_stack_usage(nfp_prog); if (nfp_prog->stack_size > max_stack) {
pr_vlog(env, "stack too large: program %dB > FW stack %dB\n",
nfp_prog->stack_size, max_stack); return -EOPNOTSUPP;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.