// SPDX-License-Identifier: GPL-2.0-or-later /* RxRPC individual remote procedure call handling * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com)
*/
call = kmem_cache_zalloc(rxrpc_call_jar, gfp); if (!call) return NULL;
mutex_init(&call->user_mutex);
/* Prevent lockdep reporting a deadlock false positive between the afs * filesystem and sys_sendmsg() via the mmap sem.
*/ if (rx->sk.sk_kern_sock)
lockdep_set_class(&call->user_mutex,
&rxrpc_call_user_mutex_lock_class_key);
if (test_bit(RXRPC_CALL_KERNEL, &call->flags))
limiter = &rxrpc_kernel_call_limiter;
up(limiter);
}
/* * Start the process of connecting a call. We obtain a peer and a connection * bundle, but the actual association of a call with a connection is offloaded * to the I/O thread to simplify locking.
*/ staticint rxrpc_connect_call(struct rxrpc_call *call, gfp_t gfp)
{ struct rxrpc_local *local = call->local; int ret = -ENOMEM;
/* * Set up a call for the given parameters. * - Called with the socket lock held, which it must release. * - If it returns a call, the call's lock will need releasing by the caller.
*/ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, struct rxrpc_conn_parameters *cp, struct rxrpc_call_params *p,
gfp_t gfp, unsignedint debug_id)
__releases(&rx->sk.sk_lock)
__acquires(&call->user_mutex)
{ struct rxrpc_call *call, *xcall; struct rxrpc_net *rxnet; struct semaphore *limiter; struct rb_node *parent, **pp; int ret;
_enter("%p,%lx", rx, p->user_call_ID);
if (WARN_ON_ONCE(!cp->peer)) {
release_sock(&rx->sk); return ERR_PTR(-EIO);
}
/* From this point on, the call is protected by its own lock. */
release_sock(&rx->sk);
/* Set up or get a connection record and set the protocol parameters, * including channel number and call ID.
*/
ret = rxrpc_connect_call(call, gfp); if (ret < 0) goto error_attached_to_socket;
_leave(" = %p [new]", call); return call;
/* We unexpectedly found the user ID in the list after taking * the call_lock. This shouldn't happen unless the user races * with itself and tries to add the same user ID twice at the * same time in different threads.
*/
error_dup_user_ID:
write_unlock(&rx->call_lock);
release_sock(&rx->sk);
rxrpc_prefail_call(call, RXRPC_CALL_LOCAL_ERROR, -EEXIST);
trace_rxrpc_call(call->debug_id, refcount_read(&call->ref), 0,
rxrpc_call_see_userid_exists);
mutex_unlock(&call->user_mutex);
rxrpc_put_call(call, rxrpc_call_put_userid_exists);
_leave(" = -EEXIST"); return ERR_PTR(-EEXIST);
/* We got an error, but the call is attached to the socket and is in * need of release. However, we might now race with recvmsg() when it * completion notifies the socket. Return 0 from sys_sendmsg() and * leave the error to recvmsg() to deal with.
*/
error_attached_to_socket:
trace_rxrpc_call(call->debug_id, refcount_read(&call->ref), ret,
rxrpc_call_see_connect_failed);
rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, 0, ret);
_leave(" = c=%08x [err]", call->debug_id); return call;
}
/* * Set up an incoming call. call->conn points to the connection. * This is called with interrupts disabled and isn't allowed to fail.
*/ void rxrpc_incoming_call(struct rxrpc_sock *rx, struct rxrpc_call *call, struct sk_buff *skb)
{ struct rxrpc_connection *conn = call->conn; struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
u32 chan;
switch (conn->state) { case RXRPC_CONN_SERVICE_UNSECURED: case RXRPC_CONN_SERVICE_CHALLENGING:
__set_bit(RXRPC_CALL_CONN_CHALLENGING, &call->flags); break; case RXRPC_CONN_SERVICE: break;
case RXRPC_CONN_ABORTED:
rxrpc_set_call_completion(call, conn->completion,
conn->abort_code, conn->error); break; default:
BUG();
}
rxrpc_get_call(call, rxrpc_call_get_io_thread);
/* Set the channel for this call. We don't get channel_lock as we're * only defending against the data_ready handler (which we're called * from) and the RESPONSE packet parser (which is only really * interested in call_counter and can cope with a disagreement with the * call pointer).
*/
chan = sp->hdr.cid & RXRPC_CHANNELMASK;
conn->channels[chan].call_counter = call->call_id;
conn->channels[chan].call_id = call->call_id;
conn->channels[chan].call = call;
spin_unlock(&conn->state_lock);
/* * Note the re-emergence of a call.
*/ void rxrpc_see_call(struct rxrpc_call *call, enum rxrpc_call_trace why)
{ if (call) { int r = refcount_read(&call->ref);
if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags))
BUG();
rxrpc_put_call_slot(call);
/* Note that at this point, the call may still be on or may have been * added back on to the socket receive queue. recvmsg() must discard * released calls. The CALL_RELEASED flag should prevent further * notifications.
*/
spin_lock_irq(&rx->recvmsg_lock);
spin_unlock_irq(&rx->recvmsg_lock);
write_lock(&rx->call_lock);
if (test_and_clear_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
rb_erase(&call->sock_node, &rx->calls);
memset(&call->sock_node, 0xdd, sizeof(call->sock_node));
putu = true;
}
dead = __refcount_dec_and_test(&call->ref, &r);
trace_rxrpc_call(debug_id, r - 1, 0, why); if (dead) {
ASSERTCMP(__rxrpc_call_state(call), ==, RXRPC_CALL_COMPLETE);
if (!list_empty(&call->link)) {
spin_lock(&rxnet->call_lock);
list_del_init(&call->link);
spin_unlock(&rxnet->call_lock);
}
rxrpc_cleanup_call(call);
}
}
/* * Free up the call under RCU.
*/ staticvoid rxrpc_rcu_free_call(struct rcu_head *rcu)
{ struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu); struct rxrpc_net *rxnet = READ_ONCE(call->rxnet);
kmem_cache_free(rxrpc_call_jar, call); if (atomic_dec_and_test(&rxnet->nr_calls))
wake_up_var(&rxnet->nr_calls);
}
/* * Final call destruction - but must be done in process context.
*/ staticvoid rxrpc_destroy_call(struct work_struct *work)
{ struct rxrpc_call *call = container_of(work, struct rxrpc_call, destroyer);
if (rcu_read_lock_held()) /* Can't use the rxrpc workqueue as we need to cancel/flush * something that may be running/waiting there.
*/
schedule_work(&call->destroyer); else
rxrpc_destroy_call(&call->destroyer);
}
/* * Make sure that all calls are gone from a network namespace. To reach this * point, any open UDP sockets in that namespace must have been closed, so any * outstanding calls cannot be doing I/O.
*/ void rxrpc_destroy_all_calls(struct rxrpc_net *rxnet)
{ struct rxrpc_call *call;
_enter("");
if (!list_empty(&rxnet->calls)) {
spin_lock(&rxnet->call_lock);
pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n",
call, refcount_read(&call->ref),
rxrpc_call_states[__rxrpc_call_state(call)],
call->flags, call->events);
/** * rxrpc_kernel_query_call_security - Query call's security parameters * @call: The call to query * @_service_id: Where to return the service ID * @_enctype: Where to return the "encoding type" * * This queries the security parameters of a call, setting *@_service_id and * *@_enctype and returning the security class. * * Return: The security class protocol number.
*/
u8 rxrpc_kernel_query_call_security(struct rxrpc_call *call,
u16 *_service_id, u32 *_enctype)
{
*_service_id = call->dest_srx.srx_service;
*_enctype = call->security_enctype; return call->security_ix;
}
EXPORT_SYMBOL(rxrpc_kernel_query_call_security);
Messung V0.5
¤ Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.0.26Bemerkung:
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.