// SPDX-License-Identifier: GPL-2.0-or-later /* RxRPC packet reception * * Copyright (C) 2007, 2016, 2022 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com)
*/
/* * handle data received on the local endpoint * - may be called in interrupt context * * [!] Note that as this is called from the encap_rcv hook, the socket is not * held locked by the caller and nothing prevents sk_user_data on the UDP from * being cleared in the middle of processing this function. * * Called with the RCU read lock held from the IP layer via UDP.
*/ int rxrpc_encap_rcv(struct sock *udp_sk, struct sk_buff *skb)
{ struct sk_buff_head *rx_queue; struct rxrpc_local *local = rcu_dereference_sk_user_data(udp_sk); struct task_struct *io_thread;
if (unlikely(!local)) {
kfree_skb(skb); return 0;
}
io_thread = READ_ONCE(local->io_thread); if (!io_thread) {
kfree_skb(skb); return 0;
} if (skb->tstamp == 0)
skb->tstamp = ktime_get_real();
/* * Process packets received on the local endpoint
*/ staticbool rxrpc_input_packet(struct rxrpc_local *local, struct sk_buff **_skb)
{ struct rxrpc_connection *conn; struct sockaddr_rxrpc peer_srx; struct rxrpc_skb_priv *sp; struct rxrpc_peer *peer = NULL; struct sk_buff *skb = *_skb; bool ret = false;
skb_pull(skb, sizeof(struct udphdr));
sp = rxrpc_skb(skb);
/* dig out the RxRPC connection details */ if (!rxrpc_extract_header(sp, skb)) return just_discard;
if (IS_ENABLED(CONFIG_AF_RXRPC_INJECT_LOSS)) { staticint lose; if ((lose++ & 7) == 7) {
trace_rxrpc_rx_lose(sp); return just_discard;
}
}
trace_rxrpc_rx_packet(sp);
switch (sp->hdr.type) { case RXRPC_PACKET_TYPE_VERSION: if (rxrpc_to_client(sp)) return just_discard; return rxrpc_input_version(local, skb);
case RXRPC_PACKET_TYPE_BUSY: if (rxrpc_to_server(sp)) return just_discard;
fallthrough; case RXRPC_PACKET_TYPE_ACK: case RXRPC_PACKET_TYPE_ACKALL: if (sp->hdr.callNumber == 0) return rxrpc_bad_message(skb, rxrpc_badmsg_zero_call); break; case RXRPC_PACKET_TYPE_ABORT: if (!rxrpc_extract_abort(skb)) return just_discard; /* Just discard if malformed */ break;
case RXRPC_PACKET_TYPE_DATA: if (sp->hdr.callNumber == 0) return rxrpc_bad_message(skb, rxrpc_badmsg_zero_call); if (sp->hdr.seq == 0) return rxrpc_bad_message(skb, rxrpc_badmsg_zero_seq);
/* Unshare the packet so that it can be modified for in-place * decryption.
*/ if (sp->hdr.securityIndex != 0) {
skb = skb_unshare(skb, GFP_ATOMIC); if (!skb) {
rxrpc_eaten_skb(*_skb, rxrpc_skb_eaten_by_unshare_nomem);
*_skb = NULL; return just_discard;
}
case RXRPC_PACKET_TYPE_CHALLENGE: if (rxrpc_to_server(sp)) return just_discard; break; case RXRPC_PACKET_TYPE_RESPONSE: if (rxrpc_to_client(sp)) return just_discard; break;
/* Packet types 9-11 should just be ignored. */ case RXRPC_PACKET_TYPE_PARAMS: case RXRPC_PACKET_TYPE_10: case RXRPC_PACKET_TYPE_11: return just_discard;
if (rxrpc_to_client(sp)) {
rcu_read_lock();
conn = rxrpc_find_client_connection_rcu(local, &peer_srx, skb);
conn = rxrpc_get_connection_maybe(conn, rxrpc_conn_get_call_input);
rcu_read_unlock(); if (!conn) return rxrpc_protocol_error(skb, rxrpc_eproto_no_client_conn);
ret = rxrpc_input_packet_on_conn(conn, &peer_srx, skb);
rxrpc_put_connection(conn, rxrpc_conn_put_call_input); return ret;
}
/* We need to look up service connections by the full protocol * parameter set. We look up the peer first as an intermediate step * and then the connection from the peer's tree.
*/
rcu_read_lock();
if (after(sp->hdr.serial, conn->hi_serial))
conn->hi_serial = sp->hdr.serial;
/* It's a connection-level packet if the call number is 0. */ if (sp->hdr.callNumber == 0) return rxrpc_input_conn_packet(conn, skb);
/* Deal with path MTU discovery probing. */ if (sp->hdr.type == RXRPC_PACKET_TYPE_ACK &&
conn->pmtud_probe &&
after_eq(sp->ack.acked_serial, conn->pmtud_probe))
rxrpc_input_probe_for_pmtud(conn, sp->ack.acked_serial, false);
/* Call-bound packets are routed by connection channel. */
channel = sp->hdr.cid & RXRPC_CHANNELMASK;
chan = &conn->channels[channel];
/* Ignore really old calls */ if (sp->hdr.callNumber < chan->last_call) return just_discard;
if (sp->hdr.callNumber == chan->last_call) { if (chan->call ||
sp->hdr.type == RXRPC_PACKET_TYPE_ABORT) return just_discard;
/* For the previous service call, if completed successfully, we * discard all further packets.
*/ if (rxrpc_conn_is_service(conn) &&
chan->last_type == RXRPC_PACKET_TYPE_ACK) return just_discard;
/* But otherwise we need to retransmit the final packet from * data cached in the connection record.
*/ if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA)
trace_rxrpc_rx_data(chan->call_debug_id,
sp->hdr.seq,
sp->hdr.serial,
sp->hdr.flags);
rxrpc_conn_retransmit_call(conn, skb, channel); return just_discard;
}
for (;;) {
rxrpc_inc_stat(local->rxnet, stat_io_loop);
/* Inject a delay into packets if requested. */ #ifdef CONFIG_AF_RXRPC_INJECT_RX_DELAY
now = ktime_get_real(); while ((skb = skb_peek(&local->rx_delay_queue))) { if (ktime_before(now, skb->tstamp)) break;
skb = skb_dequeue(&local->rx_delay_queue);
skb_queue_tail(&local->rx_queue, skb);
} #endif
if (!skb_queue_empty(&local->rx_queue)) {
spin_lock_irq(&local->rx_queue.lock);
skb_queue_splice_tail_init(&local->rx_queue, &rx_queue);
spin_unlock_irq(&local->rx_queue.lock);
trace_rxrpc_iothread_rx(local, skb_queue_len(&rx_queue));
}
/* Distribute packets and errors. */ while ((skb = __skb_dequeue(&rx_queue))) { struct rxrpc_skb_priv *sp = rxrpc_skb(skb); switch (skb->mark) { case RXRPC_SKB_MARK_PACKET:
skb->priority = 0; if (!rxrpc_input_packet(local, &skb))
rxrpc_reject_packet(local, skb);
trace_rxrpc_rx_done(skb->mark, skb->priority);
rxrpc_free_skb(skb, rxrpc_skb_put_input); break; case RXRPC_SKB_MARK_ERROR:
rxrpc_input_error(local, skb);
rxrpc_free_skb(skb, rxrpc_skb_put_error_report); break; case RXRPC_SKB_MARK_SERVICE_CONN_SECURED:
rxrpc_input_conn_event(sp->poke_conn, skb);
rxrpc_put_connection(sp->poke_conn, rxrpc_conn_put_poke);
rxrpc_free_skb(skb, rxrpc_skb_put_conn_secured); break; default:
WARN_ON_ONCE(1);
rxrpc_free_skb(skb, rxrpc_skb_put_unknown); break;
}
}
/* Deal with connections that want immediate attention. */ if (!list_empty_careful(&local->conn_attend_q)) {
spin_lock_irq(&local->lock);
list_splice_tail_init(&local->conn_attend_q, &conn_attend_q);
spin_unlock_irq(&local->lock);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.