if (signal_pending(current)) { /* During iteration: we've been cancelled, abort. */
*err = -EINTR; goto reset;
}
if (need_resched()) { /* During iteration: we need to reschedule between runs. */
t->time_spent += ktime_get_ns() - t->time_start;
bpf_test_timer_leave(t);
cond_resched();
bpf_test_timer_enter(t);
}
/* Do another round. */ returntrue;
reset:
t->i = 0; returnfalse;
}
/* We put this struct at the head of each page with a context and frame * initialised when the page is allocated, so we don't have to do this on each * repetition of the test run.
*/ struct xdp_page_head { struct xdp_buff orig_ctx; struct xdp_buff ctx; union { /* ::data_hard_start starts here */
DECLARE_FLEX_ARRAY(struct xdp_frame, frame);
DECLARE_FLEX_ARRAY(u8, data);
};
};
/* will copy 'mem.id' into pp->xdp_mem_id */
err = xdp_reg_mem_model(&xdp->mem, MEM_TYPE_PAGE_POOL, pp); if (err) goto err_mmodel;
xdp->pp = pp;
/* We create a 'fake' RXQ referencing the original dev, but with an * xdp_mem_info pointing to our page_pool
*/
xdp_rxq_info_reg(&xdp->rxq, orig_ctx->rxq->dev, 0, 0);
xdp->rxq.mem.type = MEM_TYPE_PAGE_POOL;
xdp->rxq.mem.id = pp->xdp_mem_id;
xdp->dev = orig_ctx->rxq->dev;
xdp->orig_ctx = orig_ctx;
staticbool frame_was_changed(conststruct xdp_page_head *head)
{ /* xdp_scrub_frame() zeroes the data pointer, flags is the last field, * i.e. has the highest chances to be overwritten. If those two are * untouched, it's most likely safe to skip the context reset.
*/ return head->frame->data != head->orig_ctx.data ||
head->frame->flags != head->orig_ctx.flags;
}
staticint xdp_recv_frames(struct xdp_frame **frames, int nframes, struct sk_buff **skbs, struct net_device *dev)
{
gfp_t gfp = __GFP_ZERO | GFP_ATOMIC; int i, n;
LIST_HEAD(list);
n = kmem_cache_alloc_bulk(net_hotdata.skbuff_cache, gfp, nframes,
(void **)skbs); if (unlikely(n == 0)) { for (i = 0; i < nframes; i++)
xdp_return_frame(frames[i]); return -ENOMEM;
}
for (i = 0; i < nframes; i++) { struct xdp_frame *xdpf = frames[i]; struct sk_buff *skb = skbs[i];
/* if program changed pkt bounds we need to update the xdp_frame */ if (unlikely(ctx_was_changed(head))) {
ret = xdp_update_frame_from_buff(ctx, frm); if (ret) {
xdp_return_buff(ctx); continue;
}
}
switch (act) { case XDP_TX: /* we can't do a real XDP_TX since we're not in the * driver, so turn it into a REDIRECT back to the same * index
*/
ri->tgt_index = xdp->dev->ifindex;
ri->map_id = INT_MAX;
ri->map_type = BPF_MAP_TYPE_UNSPEC;
fallthrough; case XDP_REDIRECT:
redirect = true;
ret = xdp_do_redirect_frame(xdp->dev, ctx, frm, prog); if (ret)
xdp_return_buff(ctx); break; case XDP_PASS:
frames[nframes++] = frm; break; default:
bpf_warn_invalid_xdp_action(NULL, prog, act);
fallthrough; case XDP_DROP:
xdp_return_buff(ctx); break;
}
}
out: if (redirect)
xdp_do_flush(); if (nframes) {
ret = xdp_recv_frames(frames, nframes, xdp->skbs, xdp->dev); if (ret)
err = ret;
}
/* Clamp copy if the user has provided a size hint, but copy the full * buffer if not to retain old behaviour.
*/ if (kattr->test.data_size_out &&
copy_size > kattr->test.data_size_out) {
copy_size = kattr->test.data_size_out;
err = -ENOSPC;
}
if (data_out) { int len = sinfo ? copy_size - sinfo->xdp_frags_size : copy_size;
if (len < 0) {
err = -ENOSPC; goto out;
}
if (copy_to_user(data_out, data, len)) goto out;
if (sinfo) { int i, offset = len;
u32 data_len;
for (i = 0; i < sinfo->nr_frags; i++) {
skb_frag_t *frag = &sinfo->frags[i];
if (offset >= copy_size) {
err = -ENOSPC; break;
}
if (copy_to_user(data_out + offset,
skb_frag_address(frag),
data_len)) goto out;
offset += data_len;
}
}
}
if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size))) goto out; if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval))) goto out; if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration))) goto out; if (err != -ENOSPC)
err = 0;
out:
trace_bpf_test_finish(&err); return err;
}
/* Integer types of various sizes and pointer combinations cover variety of * architecture dependent calling conventions. 7+ can be supported in the * future.
*/
__bpf_kfunc_start_defs();
__bpf_kfunc int bpf_fentry_test1(int a)
{ return a + 1;
}
EXPORT_SYMBOL_GPL(bpf_fentry_test1);
int noinline bpf_fentry_test2(int a, u64 b)
{ return a + b;
}
int noinline bpf_fentry_test3(char a, int b, u64 c)
{ return a + b + c;
}
int noinline bpf_fentry_test4(void *a, char b, int c, u64 d)
{ return (long)a + b + c + d;
}
int noinline bpf_fentry_test5(u64 a, void *b, short c, int d, u64 e)
{ return a + (long)b + c + d + e;
}
int noinline bpf_fentry_test6(u64 a, void *b, short c, int d, void *e, u64 f)
{ return a + (long)b + c + d + (long)e + f;
}
if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 && cpu != 0) return -EINVAL;
if (ctx_size_in) {
info.ctx = memdup_user(ctx_in, ctx_size_in); if (IS_ERR(info.ctx)) return PTR_ERR(info.ctx);
} else {
info.ctx = NULL;
}
info.prog = prog;
current_cpu = get_cpu(); if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 ||
cpu == current_cpu) {
__bpf_prog_test_run_raw_tp(&info);
} elseif (cpu >= nr_cpu_ids || !cpu_online(cpu)) { /* smp_call_function_single() also checks cpu_online() * after csd_lock(). However, since cpu is from user * space, let's do an extra quick check to filter out * invalid value before smp_call_function_single().
*/
err = -ENXIO;
} else {
err = smp_call_function_single(cpu, __bpf_prog_test_run_raw_tp,
&info, 1);
}
put_cpu();
if (!err &&
copy_to_user(&uattr->test.retval, &info.retval, sizeof(u32)))
err = -EFAULT;
if (copy_to_user(data_out, data, copy_size)) goto out; if (copy_to_user(&uattr->test.ctx_size_out, &size, sizeof(size))) goto out; if (err != -ENOSPC)
err = 0;
out: return err;
}
/** * range_is_zero - test whether buffer is initialized * @buf: buffer to check * @from: check from this position * @to: check up until (excluding) this position * * This function returns true if the there is a non-zero byte * in the buf in the range [from,to).
*/ staticinlinebool range_is_zero(void *buf, size_t from, size_t to)
{ return !memchr_inv((u8 *)buf + from, 0, to - from);
}
switch (prog->type) { case BPF_PROG_TYPE_SCHED_CLS: case BPF_PROG_TYPE_SCHED_ACT:
is_l2 = true;
fallthrough; case BPF_PROG_TYPE_LWT_IN: case BPF_PROG_TYPE_LWT_OUT: case BPF_PROG_TYPE_LWT_XMIT: case BPF_PROG_TYPE_CGROUP_SKB:
is_direct_pkt_access = true; break; default: break;
}
sk = sk_alloc(net, AF_UNSPEC, GFP_USER, &bpf_dummy_proto, 1); if (!sk) {
kfree(data);
kfree(ctx); return -ENOMEM;
}
sock_init_data(NULL, sk);
ret = bpf_test_run(prog, skb, repeat, &retval, &duration, false); if (ret) goto out; if (!is_l2) { if (skb_headroom(skb) < hh_len) { int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
if (pskb_expand_head(skb, nhead, 0, GFP_USER)) {
ret = -ENOMEM; goto out;
}
}
memset(__skb_push(skb, hh_len), 0, hh_len);
}
if (kattr->test.flags & BPF_F_TEST_SKB_CHECKSUM_COMPLETE) { constint off = skb_network_offset(skb); int len = skb->len - off;
__wsum csum;
csum = skb_checksum(skb, off, len, 0);
if (csum_fold(skb->csum) != csum_fold(csum)) {
ret = -EBADMSG; goto out;
}
}
convert_skb_to___skb(skb, ctx);
size = skb->len; /* bpf program can never convert linear skb to non-linear */ if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
size = skb_headlen(skb);
ret = bpf_test_finish(kattr, uattr, skb->data, NULL, size, retval,
duration); if (!ret)
ret = bpf_ctx_finish(kattr, uattr, ctx, sizeof(struct __sk_buff));
out: if (dev && dev != net->loopback_dev)
dev_put(dev);
kfree_skb(skb);
sk_free(sk);
kfree(ctx); return ret;
}
ctx = bpf_ctx_init(kattr, sizeof(struct xdp_md)); if (IS_ERR(ctx)) return PTR_ERR(ctx);
if (ctx) { /* There can't be user provided data before the meta data */ if (ctx->data_meta || ctx->data_end != size ||
ctx->data > ctx->data_end ||
unlikely(xdp_metalen_invalid(ctx->data)) ||
(do_live && (kattr->test.data_out || kattr->test.ctx_out))) goto free_ctx; /* Meta data is allocated from the headroom */
headroom -= ctx->data;
}
max_data_sz = PAGE_SIZE - headroom - tailroom; if (size > max_data_sz) { /* disallow live data mode for jumbo frames */ if (do_live) goto free_ctx;
size = max_data_sz;
}
data = bpf_test_init(kattr, size, max_data_sz, headroom, tailroom); if (IS_ERR(data)) {
ret = PTR_ERR(data); goto free_ctx;
}
if (do_live)
ret = bpf_test_run_xdp_live(prog, &xdp, repeat, batch_size, &duration); else
ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true); /* We convert the xdp_buff back to an xdp_md before checking the return * code so the reference count of any held netdevice will be decremented * even if the test run failed.
*/
xdp_convert_buff_to_md(&xdp, ctx); if (ret) goto out;
size = xdp.data_end - xdp.data_meta + sinfo->xdp_frags_size;
ret = bpf_test_finish(kattr, uattr, xdp.data_meta, sinfo, size,
retval, duration); if (!ret)
ret = bpf_ctx_finish(kattr, uattr, ctx, sizeof(struct xdp_md));
out: if (repeat > 1)
bpf_prog_change_xdp(prog, NULL);
free_data: for (i = 0; i < sinfo->nr_frags; i++)
__free_page(skb_frag_page(&sinfo->frags[i]));
kfree(data);
free_ctx:
kfree(ctx); return ret;
}
staticint verify_user_bpf_flow_keys(struct bpf_flow_keys *ctx)
{ /* make sure the fields we don't use are zeroed */ if (!range_is_zero(ctx, 0, offsetof(struct bpf_flow_keys, flags))) return -EINVAL;
/* flags is allowed */
if (!range_is_zero(ctx, offsetofend(struct bpf_flow_keys, flags), sizeof(struct bpf_flow_keys))) return -EINVAL;
ret = register_btf_fmodret_id_set(&bpf_test_modify_return_set);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_prog_test_kfunc_set);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_prog_test_kfunc_set);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_prog_test_kfunc_set); return ret ?: register_btf_id_dtor_kfuncs(bpf_prog_test_dtor_kfunc,
ARRAY_SIZE(bpf_prog_test_dtor_kfunc),
THIS_MODULE);
}
late_initcall(bpf_prog_test_run_init);
Messung V0.5
¤ Dauer der Verarbeitung: 0.8 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.