/* * Copyright (C) 2017-2018 Netronome Systems, Inc. * * This software is licensed under the GNU General License Version 2, * June 1991 as shown in the file COPYING in the top-level directory of this * source tree. * * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
*/
/* Protects offdevs, members of bpf_offload_netdev and offload members * of all progs. * RTNL lock cannot be taken when holding this lock.
*/ static DECLARE_RWSEM(bpf_devs_lock);
/* Try to move the objects to another netdev of the device */ if (offdev) {
list_del(&ondev->offdev_netdevs);
altdev = list_first_entry_or_null(&offdev->netdevs, struct bpf_offload_netdev,
offdev_netdevs);
}
offload = kzalloc(sizeof(*offload), GFP_USER); if (!offload) return -ENOMEM;
offload->prog = prog;
offload->netdev = netdev;
ondev = bpf_offload_find_netdev(offload->netdev); /* When program is offloaded require presence of "true" * bpf_offload_netdev, avoid the one created for !ondev case below.
*/ if (bpf_prog_is_offloaded(prog->aux) && (!ondev || !ondev->offdev)) {
err = -EINVAL; goto err_free;
} if (!ondev) { /* When only binding to the device, explicitly * create an entry in the hashtable.
*/
err = __bpf_offload_dev_netdev_register(NULL, offload->netdev); if (err) goto err_free;
ondev = bpf_offload_find_netdev(offload->netdev);
}
offload->offdev = ondev->offdev;
prog->aux->offload = offload;
list_add_tail(&offload->offloads, &ondev->progs);
return 0;
err_free:
kfree(offload); return err;
}
int bpf_prog_dev_bound_init(struct bpf_prog *prog, union bpf_attr *attr)
{ struct net_device *netdev; int err;
if (attr->prog_type != BPF_PROG_TYPE_SCHED_CLS &&
attr->prog_type != BPF_PROG_TYPE_XDP) return -EINVAL;
if (attr->prog_flags & ~(BPF_F_XDP_DEV_BOUND_ONLY | BPF_F_XDP_HAS_FRAGS)) return -EINVAL;
/* Frags are allowed only if program is dev-bound-only, but not * if it is requesting bpf offload.
*/ if (attr->prog_flags & BPF_F_XDP_HAS_FRAGS &&
!(attr->prog_flags & BPF_F_XDP_DEV_BOUND_ONLY)) return -EINVAL;
if (attr->prog_type == BPF_PROG_TYPE_SCHED_CLS &&
attr->prog_flags & BPF_F_XDP_DEV_BOUND_ONLY) return -EINVAL;
netdev = dev_get_by_index(current->nsproxy->net_ns, attr->prog_ifindex); if (!netdev) return -EINVAL;
err = bpf_dev_offload_check(netdev); if (err) goto out;
int bpf_prog_offload_verifier_prep(struct bpf_prog *prog)
{ struct bpf_prog_offload *offload; int ret = -ENODEV;
down_read(&bpf_devs_lock);
offload = prog->aux->offload; if (offload) {
ret = offload->offdev->ops->prepare(prog);
offload->dev_state = !ret;
}
up_read(&bpf_devs_lock);
return ret;
}
int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
{ struct bpf_prog_offload *offload; int ret = -ENODEV;
down_read(&bpf_devs_lock);
offload = env->prog->aux->offload; if (offload)
ret = offload->offdev->ops->insn_hook(env, insn_idx,
prev_insn_idx);
up_read(&bpf_devs_lock);
return ret;
}
int bpf_prog_offload_finalize(struct bpf_verifier_env *env)
{ struct bpf_prog_offload *offload; int ret = -ENODEV;
down_read(&bpf_devs_lock);
offload = env->prog->aux->offload; if (offload) { if (offload->offdev->ops->finalize)
ret = offload->offdev->ops->finalize(env); else
ret = 0;
}
up_read(&bpf_devs_lock);
return ret;
}
void
bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off, struct bpf_insn *insn)
{ conststruct bpf_prog_offload_ops *ops; struct bpf_prog_offload *offload; int ret = -EOPNOTSUPP;
down_read(&bpf_devs_lock);
offload = env->prog->aux->offload; if (offload) {
ops = offload->offdev->ops; if (!offload->opt_failed && ops->replace_insn)
ret = ops->replace_insn(env, off, insn);
offload->opt_failed |= ret;
}
up_read(&bpf_devs_lock);
}
void
bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt)
{ struct bpf_prog_offload *offload; int ret = -EOPNOTSUPP;
down_read(&bpf_devs_lock);
offload = env->prog->aux->offload; if (offload) { if (!offload->opt_failed && offload->offdev->ops->remove_insns)
ret = offload->offdev->ops->remove_insns(env, off, cnt);
offload->opt_failed |= ret;
}
up_read(&bpf_devs_lock);
}
staticint bpf_prog_offload_translate(struct bpf_prog *prog)
{ struct bpf_prog_offload *offload; int ret = -ENODEV;
down_read(&bpf_devs_lock);
offload = prog->aux->offload; if (offload)
ret = offload->offdev->ops->translate(prog);
up_read(&bpf_devs_lock);
return ret;
}
staticunsignedint bpf_prog_warn_on_exec(constvoid *ctx, conststruct bpf_insn *insn)
{
WARN(1, "attempt to execute device eBPF program on the host!"); return 0;
}
int bpf_prog_offload_compile(struct bpf_prog *prog)
{
prog->bpf_func = bpf_prog_warn_on_exec;
rtnl_lock();
down_write(&bpf_devs_lock); if (offmap->netdev)
__bpf_map_offload_destroy(offmap);
up_write(&bpf_devs_lock);
rtnl_unlock();
bpf_map_area_free(offmap);
}
u64 bpf_map_offload_map_mem_usage(conststruct bpf_map *map)
{ /* The memory dynamically allocated in netdev dev_ops is not counted */ returnsizeof(struct bpf_offloaded_map);
}
int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value)
{ struct bpf_offloaded_map *offmap = map_to_offmap(map); int ret = -ENODEV;
down_read(&bpf_devs_lock); if (offmap->netdev)
ret = offmap->dev_ops->map_lookup_elem(offmap, key, value);
up_read(&bpf_devs_lock);
return ret;
}
int bpf_map_offload_update_elem(struct bpf_map *map, void *key, void *value, u64 flags)
{ struct bpf_offloaded_map *offmap = map_to_offmap(map); int ret = -ENODEV;
if (unlikely(flags > BPF_EXIST)) return -EINVAL;
down_read(&bpf_devs_lock); if (offmap->netdev)
ret = offmap->dev_ops->map_update_elem(offmap, key, value,
flags);
up_read(&bpf_devs_lock);
return ret;
}
int bpf_map_offload_delete_elem(struct bpf_map *map, void *key)
{ struct bpf_offloaded_map *offmap = map_to_offmap(map); int ret = -ENODEV;
down_read(&bpf_devs_lock); if (offmap->netdev)
ret = offmap->dev_ops->map_delete_elem(offmap, key);
up_read(&bpf_devs_lock);
return ret;
}
int bpf_map_offload_get_next_key(struct bpf_map *map, void *key, void *next_key)
{ struct bpf_offloaded_map *offmap = map_to_offmap(map); int ret = -ENODEV;
down_read(&bpf_devs_lock); if (offmap->netdev)
ret = offmap->dev_ops->map_get_next_key(offmap, key, next_key);
up_read(&bpf_devs_lock);
/* We don't hold bpf_devs_lock while resolving several * kfuncs and can race with the unregister_netdevice(). * We rely on bpf_dev_bound_match() check at attach * to render this program unusable.
*/
down_read(&bpf_devs_lock); if (!prog->aux->offload) goto out;
ops = prog->aux->offload->netdev->xdp_metadata_ops; if (!ops) goto out;
#define XDP_METADATA_KFUNC(name, _, __, xmo) \ if (func_id == bpf_xdp_metadata_kfunc_id(name)) p = ops->xmo;
XDP_METADATA_KFUNC_xxx #undef XDP_METADATA_KFUNC
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.