// SPDX-License-Identifier: GPL-2.0-or-later /* Maintain an RxRPC server socket to do AFS communications through * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com)
*/
/* * open an RxRPC socket and bind it to be a server for callback notifications * - the socket is left in blocking mode and non-blocking ops use MSG_DONTWAIT
*/ int afs_open_socket(struct afs_net *net)
{ struct sockaddr_rxrpc srx; struct socket *socket; int ret;
_enter("");
ret = sock_create_kern(net->net, AF_RXRPC, SOCK_DGRAM, PF_INET6, &socket); if (ret < 0) goto error_1;
/* bind the callback manager's address to make this a server socket */
memset(&srx, 0, sizeof(srx));
srx.srx_family = AF_RXRPC;
srx.srx_service = CM_SERVICE;
srx.transport_type = SOCK_DGRAM;
srx.transport_len = sizeof(srx.transport.sin6);
srx.transport.sin6.sin6_family = AF_INET6;
srx.transport.sin6.sin6_port = htons(AFS_CM_PORT);
ret = rxrpc_sock_set_min_security_level(socket->sk,
RXRPC_SECURITY_ENCRYPT); if (ret < 0) goto error_2;
ret = rxrpc_sock_set_manage_response(socket->sk, true); if (ret < 0) goto error_2;
ret = afs_create_token_key(net, socket); if (ret < 0)
pr_err("Couldn't create RxGK CM key: %d\n", ret);
ret = kernel_bind(socket, (struct sockaddr *) &srx, sizeof(srx)); if (ret == -EADDRINUSE) {
srx.transport.sin6.sin6_port = 0;
ret = kernel_bind(socket, (struct sockaddr *) &srx, sizeof(srx));
} if (ret < 0) goto error_2;
srx.srx_service = YFS_CM_SERVICE;
ret = kernel_bind(socket, (struct sockaddr *) &srx, sizeof(srx)); if (ret < 0) goto error_2;
/* Ideally, we'd turn on service upgrade here, but we can't because * OpenAFS is buggy and leaks the userStatus field from packet to * packet and between FS packets and CB packets - so if we try to do an * upgrade on an FS packet, OpenAFS will leak that into the CB packet * it sends back to us.
*/
o = atomic_read(&net->nr_outstanding_calls);
trace_afs_call(call->debug_id, afs_call_trace_free, 0, o,
__builtin_return_address(0));
kfree(call);
o = atomic_dec_return(&net->nr_outstanding_calls); if (o == 0)
wake_up_var(&net->nr_outstanding_calls);
}
/* * Dispose of a reference on a call.
*/ void afs_put_call(struct afs_call *call)
{ struct afs_net *net = call->net; unsignedint debug_id = call->debug_id; bool zero; int r, o;
zero = __refcount_dec_and_test(&call->ref, &r);
o = atomic_read(&net->nr_outstanding_calls);
trace_afs_call(debug_id, afs_call_trace_put, r - 1, o,
__builtin_return_address(0)); if (zero)
afs_free_call(call);
}
/* * Dispose of a reference on a call, deferring the cleanup to a workqueue * to avoid lock recursion.
*/ void afs_deferred_put_call(struct afs_call *call)
{ struct afs_net *net = call->net; unsignedint debug_id = call->debug_id; bool zero; int r, o;
zero = __refcount_dec_and_test(&call->ref, &r);
o = atomic_read(&net->nr_outstanding_calls);
trace_afs_call(debug_id, afs_call_trace_put, r - 1, o,
__builtin_return_address(0)); if (zero)
schedule_work(&call->free_work);
}
/* * Queue the call for actual work.
*/ staticvoid afs_queue_call_work(struct afs_call *call)
{ if (call->type->work) {
afs_get_call(call, afs_call_trace_work); if (!queue_work(afs_wq, &call->work))
afs_put_call(call);
}
}
/* * allocate a call with flat request and reply buffers
*/ struct afs_call *afs_alloc_flat_call(struct afs_net *net, conststruct afs_call_type *type,
size_t request_size, size_t reply_max)
{ struct afs_call *call;
call = afs_alloc_call(net, type, GFP_NOFS); if (!call) goto nomem_call;
if (request_size) {
call->request_size = request_size;
call->request = kmalloc(request_size, GFP_NOFS); if (!call->request) goto nomem_free;
}
if (reply_max) {
call->reply_max = reply_max;
call->buffer = kmalloc(reply_max, GFP_NOFS); if (!call->buffer) goto nomem_free;
}
/* * Initiate a call and synchronously queue up the parameters for dispatch. Any * error is stored into the call struct, which the caller must check for.
*/ void afs_make_call(struct afs_call *call, gfp_t gfp)
{ struct rxrpc_call *rxcall; struct msghdr msg; struct kvec iov[1];
size_t len;
s64 tx_total_len; int ret;
/* Work out the length we're going to transmit. This is awkward for * calls such as FS.StoreData where there's an extra injection of data * after the initial fixed part.
*/
tx_total_len = call->request_size; if (call->write_iter)
tx_total_len += iov_iter_count(call->write_iter);
/* If the call is going to be asynchronous, we need an extra ref for * the call to hold itself so the caller need not hang on to its ref.
*/ if (call->async) {
afs_get_call(call, afs_call_trace_get);
call->drop_ref = true;
}
ret = rxrpc_kernel_send_data(call->net->socket,
call->rxcall, &msg,
iov_iter_count(&msg.msg_iter),
afs_notify_end_request_tx);
*call->write_iter = msg.msg_iter;
trace_afs_sent_data(call, &msg, ret); if (ret < 0) goto error_do_abort;
}
/* Note that at this point, we may have received the reply or an abort * - and an asynchronous call may already have completed. * * afs_wait_for_call_to_complete(call) * must be called to synchronously clean up.
*/ return;
error_do_abort: if (ret != -ECONNABORTED)
rxrpc_kernel_abort_call(call->net->socket, rxcall,
RX_USER_ABORT, ret,
afs_abort_send_data_error); if (call->async) {
afs_see_call(call, afs_call_trace_async_abort); return;
}
if (ret == -ECONNABORTED) {
len = 0;
iov_iter_kvec(&msg.msg_iter, ITER_DEST, NULL, 0, 0);
rxrpc_kernel_recv_data(call->net->socket, rxcall,
&msg.msg_iter, &len, false,
&call->abort_code, &call->service_id);
call->responded = true;
}
call->error = ret;
trace_afs_call_done(call);
error_kill_call: if (call->async)
afs_see_call(call, afs_call_trace_async_kill); if (call->type->immediate_cancel)
call->type->immediate_cancel(call);
/* We need to dispose of the extra ref we grabbed for an async call. * The call, however, might be queued on afs_async_calls and we need to * make sure we don't get any more notifications that might requeue it.
*/ if (call->rxcall)
rxrpc_kernel_shutdown_call(call->net->socket, call->rxcall); if (call->async) { if (cancel_work_sync(&call->async_work))
afs_put_call(call);
afs_set_call_complete(call, ret, 0);
}
/* * Log remote abort codes that indicate that we have a protocol disagreement * with the server.
*/ staticvoid afs_log_error(struct afs_call *call, s32 remote_abort)
{ staticint max = 0; constchar *msg; int m;
switch (remote_abort) { case RX_EOF: msg = "unexpected EOF"; break; case RXGEN_CC_MARSHAL: msg = "client marshalling"; break; case RXGEN_CC_UNMARSHAL: msg = "client unmarshalling"; break; case RXGEN_SS_MARSHAL: msg = "server marshalling"; break; case RXGEN_SS_UNMARSHAL: msg = "server unmarshalling"; break; case RXGEN_DECODE: msg = "opcode decode"; break; case RXGEN_SS_XDRFREE: msg = "server XDR cleanup"; break; case RXGEN_CC_XDRFREE: msg = "client XDR cleanup"; break; case -32: msg = "insufficient data"; break; default: return;
}
m = max; if (m < 3) {
max = m + 1;
pr_notice("kAFS: Peer reported %s failure on %s [%pISp]\n",
msg, call->type->name,
rxrpc_kernel_remote_addr(call->peer));
}
}
/* * deliver messages to a call
*/ void afs_deliver_to_call(struct afs_call *call)
{ enum afs_call_state state;
size_t len;
u32 abort_code, remote_abort = 0; int ret;
_enter("%s", call->type->name);
while (state = READ_ONCE(call->state),
state == AFS_CALL_CL_AWAIT_REPLY ||
state == AFS_CALL_SV_AWAIT_OP_ID ||
state == AFS_CALL_SV_AWAIT_REQUEST ||
state == AFS_CALL_SV_AWAIT_ACK
) { if (state == AFS_CALL_SV_AWAIT_ACK) {
len = 0;
iov_iter_kvec(&call->def_iter, ITER_DEST, NULL, 0, 0);
ret = rxrpc_kernel_recv_data(call->net->socket,
call->rxcall, &call->def_iter,
&len, false, &remote_abort,
&call->service_id);
trace_afs_receive_data(call, &call->def_iter, false, ret);
if (ret == -EINPROGRESS || ret == -EAGAIN) return; if (ret < 0 || ret == 1) { if (ret == 1)
ret = 0; goto call_complete;
} return;
}
ret = call->type->deliver(call);
state = READ_ONCE(call->state); if (ret == 0 && call->unmarshalling_error)
ret = -EBADMSG; switch (ret) { case 0:
call->responded = true;
afs_queue_call_work(call); if (state == AFS_CALL_CL_PROC_REPLY) { if (call->op)
set_bit(AFS_SERVER_FL_MAY_HAVE_CB,
&call->op->server->flags); goto call_complete;
}
ASSERTCMP(state, >, AFS_CALL_CL_PROC_REPLY); goto done; case -EINPROGRESS: case -EAGAIN: goto out; case -ECONNABORTED:
ASSERTCMP(state, ==, AFS_CALL_COMPLETE);
call->responded = true;
afs_log_error(call, call->abort_code); goto done; case -ENOTSUPP:
call->responded = true;
abort_code = RXGEN_OPCODE;
rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
abort_code, ret,
afs_abort_op_not_supported); goto local_abort; case -EIO:
pr_err("kAFS: Call %u in bad state %u\n",
call->debug_id, state);
fallthrough; case -ENODATA: case -EBADMSG: case -EMSGSIZE: case -ENOMEM: case -EFAULT:
abort_code = RXGEN_CC_UNMARSHAL; if (state != AFS_CALL_CL_AWAIT_REPLY)
abort_code = RXGEN_SS_UNMARSHAL;
rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
abort_code, ret,
afs_abort_unmarshal_error); goto local_abort; default:
abort_code = RX_CALL_DEAD;
rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
abort_code, ret,
afs_abort_general_error); goto local_abort;
}
}
done: if (call->type->done)
call->type->done(call);
out:
_leave(""); return;
/* * Wait synchronously for a call to complete.
*/ void afs_wait_for_call_to_complete(struct afs_call *call)
{ bool rxrpc_complete = false;
_enter("");
if (!afs_check_call_state(call, AFS_CALL_COMPLETE)) {
DECLARE_WAITQUEUE(myself, current);
add_wait_queue(&call->waitq, &myself); for (;;) {
set_current_state(TASK_UNINTERRUPTIBLE);
/* deliver any messages that are in the queue */ if (!afs_check_call_state(call, AFS_CALL_COMPLETE) &&
call->need_attention) {
call->need_attention = false;
__set_current_state(TASK_RUNNING);
afs_deliver_to_call(call); continue;
}
if (afs_check_call_state(call, AFS_CALL_COMPLETE)) break;
if (!rxrpc_kernel_check_life(call->net->socket, call->rxcall)) { /* rxrpc terminated the call. */
rxrpc_complete = true; break;
}
/* * Wake up an asynchronous call. The caller is holding the call notify * spinlock around this, so we can't call afs_put_call().
*/ staticvoid afs_wake_up_async_call(struct sock *sk, struct rxrpc_call *rxcall, unsignedlong call_user_ID)
{ struct afs_call *call = (struct afs_call *)call_user_ID; int r;
if (__refcount_inc_not_zero(&call->ref, &r)) {
trace_afs_call(call->debug_id, afs_call_trace_wake, r + 1,
atomic_read(&call->net->nr_outstanding_calls),
__builtin_return_address(0));
if (!queue_work(afs_async_calls, &call->async_work))
afs_deferred_put_call(call);
}
}
/* * Perform I/O processing on an asynchronous call. The work item carries a ref * to the call struct that we either need to release or to pass on.
*/ staticvoid afs_process_async_call(struct work_struct *work)
{ struct afs_call *call = container_of(work, struct afs_call, async_work);
/* * Grab the operation ID from an incoming cache manager call. The socket * buffer is discarded on error or if we don't yet have sufficient data.
*/ staticint afs_deliver_cm_op_id(struct afs_call *call)
{ int ret;
_enter("{%zu}", iov_iter_count(call->iter));
/* the operation ID forms the first four bytes of the request data */
ret = afs_extract_data(call, true); if (ret < 0) return ret;
/* * Extract a piece of data from the received data socket buffers.
*/ int afs_extract_data(struct afs_call *call, bool want_more)
{ struct afs_net *net = call->net; struct iov_iter *iter = call->iter; enum afs_call_state state;
u32 remote_abort = 0; int ret;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.