void tls_strp_abort_strp(struct tls_strparser *strp, int err)
{ if (strp->stopped) return;
strp->stopped = 1;
/* Report an error on the lower socket */
WRITE_ONCE(strp->sk->sk_err, -err); /* Paired with smp_rmb() in tcp_poll() */
smp_wmb();
sk_error_report(strp->sk);
}
/* Create a new skb with the contents of input copied to its page frags */ staticstruct sk_buff *tls_strp_msg_make_copy(struct tls_strparser *strp)
{ struct strp_msg *rxm; struct sk_buff *skb;
skb = tls_strp_skb_copy(strp, strp->anchor, strp->stm.offset,
strp->stm.full_len); if (!skb) return NULL;
/* Steal the input skb, input msg is invalid after calling this function */ struct sk_buff *tls_strp_msg_detach(struct tls_sw_context_rx *ctx)
{ struct tls_strparser *strp = &ctx->strp;
#ifdef CONFIG_TLS_DEVICE
DEBUG_NET_WARN_ON_ONCE(!strp->anchor->decrypted); #else /* This function turns an input into an output, * that can only happen if we have offload.
*/
WARN_ON(1); #endif
if (strp->copy_mode) { struct sk_buff *skb;
/* Replace anchor with an empty skb, this is a little * dangerous but __tls_cur_msg() warns on empty skbs * so hopefully we'll catch abuses.
*/
skb = alloc_skb(0, strp->sk->sk_allocation); if (!skb) return NULL;
swap(strp->anchor, skb); return skb;
}
return tls_strp_msg_make_copy(strp);
}
/* Force the input skb to be in copy mode. The data ownership remains * with the input skb itself (meaning unpause will wipe it) but it can * be modified.
*/ int tls_strp_msg_cow(struct tls_sw_context_rx *ctx)
{ struct tls_strparser *strp = &ctx->strp; struct sk_buff *skb;
if (strp->copy_mode) return 0;
skb = tls_strp_msg_make_copy(strp); if (!skb) return -ENOMEM;
/* Make a clone (in the skb sense) of the input msg to keep a reference * to the underlying data. The reference-holding skbs get placed on * @dst.
*/ int tls_strp_msg_hold(struct tls_strparser *strp, struct sk_buff_head *dst)
{ struct skb_shared_info *shinfo = skb_shinfo(strp->anchor);
if (strp->copy_mode) { struct sk_buff *skb;
WARN_ON_ONCE(!shinfo->nr_frags);
/* We can't skb_clone() the anchor, it gets wiped by unpause */
skb = alloc_skb(0, strp->sk->sk_allocation); if (!skb) return -ENOMEM;
if (unlikely(nfrag >= skb_shinfo(skb)->nr_frags)) {
DEBUG_NET_WARN_ON_ONCE(1); return -EMSGSIZE;
}
frag = &skb_shinfo(skb)->frags[nfrag];
len = in_len; /* First make sure we got the header */ if (!strp->stm.full_len) { /* Assume one page is more than enough for headers */
chunk = min_t(size_t, len, PAGE_SIZE - skb_frag_size(frag));
WARN_ON_ONCE(skb_copy_bits(in_skb, offset,
skb_frag_address(frag) +
skb_frag_size(frag),
chunk));
/* If the rbuf is small or rcv window has collapsed to 0 we need * to read the data out. Otherwise the connection will stall. * Without pressure threshold of INT_MAX will never be ready.
*/ if (likely(qshort && !tcp_epollin_ready(strp->sk, INT_MAX))) return 0;
shinfo = skb_shinfo(strp->anchor);
/* If we don't know the length go max plus page for cipher overhead */
need_spc = strp->stm.full_len ?: TLS_MAX_PAYLOAD_SIZE + PAGE_SIZE;
for (len = need_spc; len > 0; len -= PAGE_SIZE) {
page = alloc_page(strp->sk->sk_allocation); if (!page) {
tls_strp_flush_anchor_copy(strp); return -ENOMEM;
}
first = skb_shinfo(strp->anchor)->frag_list;
skb = first;
seq = TCP_SKB_CB(first)->seq;
/* Make sure there's no duplicate data in the queue, * and the decrypted status matches.
*/ while (skb->len < len) {
seq += skb->len;
len -= skb->len;
skb = skb->next;
if (TCP_SKB_CB(skb)->seq != seq) returnfalse; if (skb_cmp_decrypted(first, skb)) returnfalse;
}
void tls_strp_check_rcv(struct tls_strparser *strp)
{ if (unlikely(strp->stopped) || strp->msg_ready) return;
if (tls_strp_read_sock(strp) == -ENOMEM)
queue_work(tls_strp_wq, &strp->work);
}
/* Lower sock lock held */ void tls_strp_data_ready(struct tls_strparser *strp)
{ /* This check is needed to synchronize with do_tls_strp_work. * do_tls_strp_work acquires a process lock (lock_sock) whereas * the lock held here is bh_lock_sock. The two locks can be * held by different threads at the same time, but bh_lock_sock * allows a thread in BH context to safely check if the process * lock is held. In this case, if the lock is held, queue work.
*/ if (sock_owned_by_user_nocheck(strp->sk)) {
queue_work(tls_strp_wq, &strp->work); return;
}
strp->anchor = alloc_skb(0, GFP_KERNEL); if (!strp->anchor) return -ENOMEM;
INIT_WORK(&strp->work, tls_strp_work);
return 0;
}
/* strp must already be stopped so that tls_strp_recv will no longer be called. * Note that tls_strp_done is not called with the lower socket held.
*/ void tls_strp_done(struct tls_strparser *strp)
{
WARN_ON(!strp->stopped);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.