// SPDX-License-Identifier: GPL-2.0-or-later /* incoming call handling * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com)
*/
/* * Preallocate a single service call, connection and peer and, if possible, * give them a user ID and attach the user's side of the ID to them.
*/ staticint rxrpc_service_prealloc_one(struct rxrpc_sock *rx, struct rxrpc_backlog *b,
rxrpc_notify_rx_t notify_rx, unsignedlong user_call_ID, gfp_t gfp, unsignedint debug_id)
{ struct rxrpc_call *call, *xcall; struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk)); struct rb_node *parent, **pp; int max, tmp; unsignedint size = RXRPC_BACKLOG_MAX; unsignedint head, tail, call_head, call_tail;
max = rx->sk.sk_max_ack_backlog;
tmp = rx->sk.sk_ack_backlog; if (tmp >= max) {
_leave(" = -ENOBUFS [full %u]", max); return -ENOBUFS;
}
max -= tmp;
/* We don't need more conns and peers than we have calls, but on the * other hand, we shouldn't ever use more peers than conns or conns * than calls.
*/
call_head = b->call_backlog_head;
call_tail = READ_ONCE(b->call_backlog_tail);
tmp = CIRC_CNT(call_head, call_tail, size); if (tmp >= max) {
_leave(" = -ENOBUFS [enough %u]", tmp); return -ENOBUFS;
}
max = tmp + 1;
head = b->peer_backlog_head;
tail = READ_ONCE(b->peer_backlog_tail); if (CIRC_CNT(head, tail, size) < max) { struct rxrpc_peer *peer;
/* Now it gets complicated, because calls get registered with the * socket here, with a user ID preassigned by the user.
*/
call = rxrpc_alloc_call(rx, gfp, debug_id); if (!call) return -ENOMEM;
call->flags |= (1 << RXRPC_CALL_IS_SERVICE);
rxrpc_set_call_state(call, RXRPC_CALL_SERVER_PREALLOC);
__set_bit(RXRPC_CALL_EV_INITIAL_PING, &call->events);
/* * Allocate the preallocation buffers for incoming service calls. These must * be charged manually.
*/ int rxrpc_service_prealloc(struct rxrpc_sock *rx, gfp_t gfp)
{ struct rxrpc_backlog *b = rx->backlog;
if (!b) {
b = kzalloc(sizeof(struct rxrpc_backlog), gfp); if (!b) return -ENOMEM;
rx->backlog = b;
}
return 0;
}
/* * Discard the preallocation on a service.
*/ void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
{ struct rxrpc_backlog *b = rx->backlog; struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk)); unsignedint size = RXRPC_BACKLOG_MAX, head, tail;
if (!b) return;
rx->backlog = NULL;
/* Make sure that there aren't any incoming calls in progress before we * clear the preallocation buffers.
*/
spin_lock_irq(&rx->incoming_lock);
spin_unlock_irq(&rx->incoming_lock);
/* Now allocate and set up the connection */
conn = b->conn_backlog[conn_tail];
b->conn_backlog[conn_tail] = NULL;
smp_store_release(&b->conn_backlog_tail,
(conn_tail + 1) & (RXRPC_BACKLOG_MAX - 1));
conn->local = rxrpc_get_local(local, rxrpc_local_get_prealloc_conn);
conn->peer = peer;
rxrpc_see_connection(conn, rxrpc_conn_see_new_service_conn);
rxrpc_new_incoming_connection(rx, conn, sec, skb);
} else {
rxrpc_get_connection(conn, rxrpc_conn_get_service_conn);
atomic_inc(&conn->active);
}
/* And now we can allocate and set up a new call */
call = b->call_backlog[call_tail];
b->call_backlog[call_tail] = NULL;
smp_store_release(&b->call_backlog_tail,
(call_tail + 1) & (RXRPC_BACKLOG_MAX - 1));
/* * Set up a new incoming call. Called from the I/O thread. * * If this is for a kernel service, when we allocate the call, it will have * three refs on it: (1) the kernel service, (2) the user_call_ID tree, (3) the * retainer ref obtained from the backlog buffer. Prealloc calls for userspace * services only have the ref from the backlog buffer. * * If we want to report an error, we mark the skb with the packet type and * abort code and return false.
*/ bool rxrpc_new_incoming_call(struct rxrpc_local *local, struct rxrpc_peer *peer, struct rxrpc_connection *conn, struct sockaddr_rxrpc *peer_srx, struct sk_buff *skb)
{ conststruct rxrpc_security *sec = NULL; struct rxrpc_skb_priv *sp = rxrpc_skb(skb); struct rxrpc_call *call = NULL; struct rxrpc_sock *rx;
_enter("");
/* Don't set up a call for anything other than a DATA packet. */ if (sp->hdr.type != RXRPC_PACKET_TYPE_DATA) return rxrpc_protocol_error(skb, rxrpc_eproto_no_service_call);
read_lock_irq(&local->services_lock);
/* Weed out packets to services we're not offering. Packets that would * begin a call are explicitly rejected and the rest are just * discarded.
*/
rx = local->service; if (!rx || (sp->hdr.serviceId != rx->srx.srx_service &&
sp->hdr.serviceId != rx->second_service)
) { if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA &&
sp->hdr.seq == 1) goto unsupported_service; goto discard;
}
if (!conn) {
sec = rxrpc_get_incoming_security(rx, skb); if (!sec) goto unsupported_security;
}
/* * rxrpc_kernel_charge_accept - Charge up socket with preallocated calls * @sock: The socket on which to preallocate * @notify_rx: Event notification function for the call * @user_call_ID: The tag to attach to the preallocated call * @gfp: The allocation conditions. * @debug_id: The tracing debug ID. * * Charge up the socket with preallocated calls, each with a user ID. The * ->user_attach_call() callback function should be provided to effect the * attachment from the user's side. The user is given a ref to hold on the * call. * * Note that the call may be come connected before this function returns.
*/ int rxrpc_kernel_charge_accept(struct socket *sock, rxrpc_notify_rx_t notify_rx, unsignedlong user_call_ID, gfp_t gfp, unsignedint debug_id)
{ struct rxrpc_sock *rx = rxrpc_sk(sock->sk); struct rxrpc_backlog *b = rx->backlog;
if (sock->sk->sk_state == RXRPC_CLOSE) return -ESHUTDOWN;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.