if (base_type(info->reg_type) == PTR_TO_BTF_ID &&
!bpf_type_has_unsafe_modifiers(info->reg_type) &&
info->btf_id == sock_id) /* promote it to tcp_sock */
info->btf_id = tcp_sock_id;
returntrue;
}
staticint bpf_tcp_ca_btf_struct_access(struct bpf_verifier_log *log, conststruct bpf_reg_state *reg, int off, int size)
{ conststruct btf_type *t;
size_t end;
t = btf_type_by_id(reg->btf, reg->btf_id); if (t != tcp_sock_type) {
bpf_log(log, "only read is supported\n"); return -EACCES;
}
switch (off) { case offsetof(struct sock, sk_pacing_rate):
end = offsetofend(struct sock, sk_pacing_rate); break; case offsetof(struct sock, sk_pacing_status):
end = offsetofend(struct sock, sk_pacing_status); break; case bpf_ctx_range(struct inet_connection_sock, icsk_ca_priv):
end = offsetofend(struct inet_connection_sock, icsk_ca_priv); break; case offsetof(struct inet_connection_sock, icsk_ack.pending):
end = offsetofend(struct inet_connection_sock,
icsk_ack.pending); break; case offsetof(struct tcp_sock, snd_cwnd):
end = offsetofend(struct tcp_sock, snd_cwnd); break; case offsetof(struct tcp_sock, snd_cwnd_cnt):
end = offsetofend(struct tcp_sock, snd_cwnd_cnt); break; case offsetof(struct tcp_sock, snd_cwnd_stamp):
end = offsetofend(struct tcp_sock, snd_cwnd_stamp); break; case offsetof(struct tcp_sock, snd_ssthresh):
end = offsetofend(struct tcp_sock, snd_ssthresh); break; case offsetof(struct tcp_sock, ecn_flags):
end = offsetofend(struct tcp_sock, ecn_flags); break; case offsetof(struct tcp_sock, app_limited):
end = offsetofend(struct tcp_sock, app_limited); break; default:
bpf_log(log, "no write support to tcp_sock at off %d\n", off); return -EACCES;
}
if (off + size > end) {
bpf_log(log, "write access at off %d with size %d beyond the member of tcp_sock ended at %zu\n",
off, size, end); return -EACCES;
}
midx = prog->expected_attach_type;
t = tcp_congestion_ops_type;
m = &btf_type_member(t)[midx];
return __btf_member_bit_offset(t, m) / 8;
}
staticconststruct bpf_func_proto *
bpf_tcp_ca_get_func_proto(enum bpf_func_id func_id, conststruct bpf_prog *prog)
{ switch (func_id) { case BPF_FUNC_tcp_send_ack: return &bpf_tcp_send_ack_proto; case BPF_FUNC_sk_storage_get: return &bpf_sk_storage_get_proto; case BPF_FUNC_sk_storage_delete: return &bpf_sk_storage_delete_proto; case BPF_FUNC_setsockopt: /* Does not allow release() to call setsockopt. * release() is called when the current bpf-tcp-cc * is retiring. It is not allowed to call * setsockopt() to make further changes which * may potentially allocate new resources.
*/ if (prog_ops_moff(prog) !=
offsetof(struct tcp_congestion_ops, release)) return &bpf_sk_setsockopt_proto; return NULL; case BPF_FUNC_getsockopt: /* Since get/setsockopt is usually expected to * be available together, disable getsockopt for * release also to avoid usage surprise. * The bpf-tcp-cc already has a more powerful way * to read tcp_sock from the PTR_TO_BTF_ID.
*/ if (prog_ops_moff(prog) !=
offsetof(struct tcp_congestion_ops, release)) return &bpf_sk_getsockopt_proto; return NULL; case BPF_FUNC_ktime_get_coarse_ns: return &bpf_ktime_get_coarse_ns_proto; default: return bpf_base_func_proto(func_id, prog);
}
}
staticint __init bpf_tcp_ca_kfunc_init(void)
{ int ret;
ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &bpf_tcp_ca_kfunc_set);
ret = ret ?: register_bpf_struct_ops(&bpf_tcp_congestion_ops, tcp_congestion_ops);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.