/* Disabling BH is needed to protect per-CPU bpf_redirect_info between * BPF prog and skb_do_redirect().
*/
local_bh_disable();
bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
bpf_compute_data_pointers(skb);
ret = bpf_prog_run_save_cb(lwt->prog, skb);
switch (ret) { case BPF_OK: case BPF_LWT_REROUTE: break;
case BPF_REDIRECT: if (unlikely(!can_redirect)) {
pr_warn_once("Illegal redirect return code in prog %s\n",
lwt->name ? : "");
ret = BPF_OK;
} else {
skb_reset_mac_header(skb);
skb_do_redirect(skb);
ret = BPF_REDIRECT;
} break;
case BPF_DROP:
kfree_skb(skb);
ret = -EPERM; break;
default:
pr_warn_once("bpf-lwt: Illegal return value %u, expect packet loss\n", ret);
kfree_skb(skb);
ret = -EINVAL; break;
}
bpf = bpf_lwt_lwtunnel(dst->lwtstate); if (bpf->out.prog) {
ret = run_lwt_bpf(skb, &bpf->out, dst, NO_REDIRECT); if (ret < 0) return ret;
}
if (unlikely(!dst->lwtstate->orig_output)) {
pr_warn_once("orig_output not set on dst for prog %s\n",
bpf->out.name);
kfree_skb(skb); return -EINVAL;
}
sk = sk_to_full_sk(skb->sk); if (sk) { if (sk->sk_bound_dev_if)
oif = sk->sk_bound_dev_if;
net = sock_net(sk);
} else {
net = dev_net(skb_dst(skb)->dev);
}
/* Although skb header was reserved in bpf_lwt_push_ip_encap(), it * was done for the previous dst, so we are doing it here again, in * case the new dst needs much more space. The call below is a noop * if there is enough header space in skb.
*/
err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev)); if (unlikely(err)) goto err;
skb_dst_drop(skb);
skb_dst_set(skb, dst);
err = dst_output(dev_net(skb_dst(skb)->dev), skb->sk, skb); if (unlikely(err)) return net_xmit_errno(err);
bpf = bpf_lwt_lwtunnel(dst->lwtstate); if (bpf->xmit.prog) { int hh_len = dst->dev->hard_header_len;
__be16 proto = skb->protocol; int ret;
ret = run_lwt_bpf(skb, &bpf->xmit, dst, CAN_REDIRECT); switch (ret) { case BPF_OK: /* If the header changed, e.g. via bpf_lwt_push_encap, * BPF_LWT_REROUTE below should have been used if the * protocol was also changed.
*/ if (skb->protocol != proto) {
kfree_skb(skb); return -EINVAL;
} /* If the header was expanded, headroom might be too * small for L2 header to come, expand as needed.
*/
ret = xmit_check_hhlen(skb, hh_len); if (unlikely(ret)) return ret;
return LWTUNNEL_XMIT_CONTINUE; case BPF_REDIRECT: return LWTUNNEL_XMIT_DONE; case BPF_LWT_REROUTE: return bpf_lwt_xmit_reroute(skb); default: return ret;
}
}
return LWTUNNEL_XMIT_CONTINUE;
}
staticvoid bpf_lwt_prog_destroy(struct bpf_lwt_prog *prog)
{ if (prog->prog)
bpf_prog_put(prog->prog);
staticint bpf_lwt_prog_cmp(struct bpf_lwt_prog *a, struct bpf_lwt_prog *b)
{ /* FIXME: * The LWT state is currently rebuilt for delete requests which * results in a new bpf_prog instance. Comparing names for now.
*/ if (!a->name && !b->name) return 0;
staticint handle_gso_encap(struct sk_buff *skb, bool ipv4, int encap_len)
{ int next_hdr_offset; void *next_hdr;
__u8 protocol;
/* SCTP and UDP_L4 gso need more nuanced handling than what * handle_gso_type() does above: skb_decrease_gso_size() is not enough. * So at the moment only TCP GSO packets are let through.
*/ if (!(skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) return -ENOTSUPP;
case IPPROTO_IP: case IPPROTO_IPV6: if (ipv4) return handle_gso_type(skb, SKB_GSO_IPXIP4, encap_len); else return handle_gso_type(skb, SKB_GSO_IPXIP6, encap_len);
default: return -EPROTONOSUPPORT;
}
}
int bpf_lwt_push_ip_encap(struct sk_buff *skb, void *hdr, u32 len, bool ingress)
{ struct iphdr *iph; bool ipv4; int err;
if (unlikely(len < sizeof(struct iphdr) || len > LWT_BPF_MAX_HEADROOM)) return -EINVAL;
if (ingress)
err = skb_cow_head(skb, len + skb->mac_len); else
err = skb_cow_head(skb,
len + LL_RESERVED_SPACE(skb_dst(skb)->dev)); if (unlikely(err)) return err;
/* push the encap headers and fix pointers */
skb_reset_inner_headers(skb);
skb_reset_inner_mac_header(skb); /* mac header is not yet set */
skb_set_inner_protocol(skb, skb->protocol);
skb->encapsulation = 1;
skb_push(skb, len); if (ingress)
skb_postpush_rcsum(skb, iph, len);
skb_reset_network_header(skb);
memcpy(skb_network_header(skb), hdr, len);
bpf_compute_data_pointers(skb);
skb_clear_hash(skb);
if (ipv4) {
skb->protocol = htons(ETH_P_IP);
iph = ip_hdr(skb);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.