// SPDX-License-Identifier: GPL-2.0-or-later /* In-kernel rxperf server for testing purposes. * * Copyright (C) 2022 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com)
*/
/* * Open an rxrpc socket and bind it to be a server for callback notifications * - the socket is left in blocking mode and non-blocking ops use MSG_DONTWAIT
*/ staticint rxperf_open_socket(void)
{ struct sockaddr_rxrpc srx; struct socket *socket; int ret;
ret = sock_create_kern(&init_net, AF_RXRPC, SOCK_DGRAM, PF_INET6,
&socket); if (ret < 0) goto error_1;
socket->sk->sk_allocation = GFP_NOFS;
/* bind the callback manager's address to make this a server socket */
memset(&srx, 0, sizeof(srx));
srx.srx_family = AF_RXRPC;
srx.srx_service = RX_PERF_SERVICE;
srx.transport_type = SOCK_DGRAM;
srx.transport_len = sizeof(srx.transport.sin6);
srx.transport.sin6.sin6_family = AF_INET6;
srx.transport.sin6.sin6_port = htons(RXPERF_PORT);
ret = rxrpc_sock_set_min_security_level(socket->sk,
RXRPC_SECURITY_ENCRYPT); if (ret < 0) goto error_2;
ret = rxrpc_sock_set_security_keyring(socket->sk, rxperf_sec_keyring);
ret = kernel_bind(socket, (struct sockaddr *)&srx, sizeof(srx)); if (ret < 0) goto error_2;
error_2:
sock_release(socket);
error_1:
pr_err("Can't set up rxperf socket: %d\n", ret); return ret;
}
/* * close the rxrpc socket rxperf was using
*/ staticvoid rxperf_close_socket(void)
{
kernel_listen(rxperf_socket, 0);
kernel_sock_shutdown(rxperf_socket, SHUT_RDWR);
flush_workqueue(rxperf_workqueue);
sock_release(rxperf_socket);
}
/* * Log remote abort codes that indicate that we have a protocol disagreement * with the server.
*/ staticvoid rxperf_log_error(struct rxperf_call *call, s32 remote_abort)
{ staticint max = 0; constchar *msg; int m;
switch (remote_abort) { case RX_EOF: msg = "unexpected EOF"; break; case RXGEN_CC_MARSHAL: msg = "client marshalling"; break; case RXGEN_CC_UNMARSHAL: msg = "client unmarshalling"; break; case RXGEN_SS_MARSHAL: msg = "server marshalling"; break; case RXGEN_SS_UNMARSHAL: msg = "server unmarshalling"; break; case RXGEN_DECODE: msg = "opcode decode"; break; case RXGEN_SS_XDRFREE: msg = "server XDR cleanup"; break; case RXGEN_CC_XDRFREE: msg = "client XDR cleanup"; break; case -32: msg = "insufficient data"; break; default: return;
}
m = max; if (m < 3) {
max = m + 1;
pr_info("Peer reported %s failure on %s\n", msg, call->type);
}
}
/* * deliver messages to a call
*/ staticvoid rxperf_deliver_to_call(struct work_struct *work)
{ struct rxperf_call *call = container_of(work, struct rxperf_call, work); enum rxperf_call_state state;
u32 abort_code, remote_abort = 0; int ret = 0;
if (call->state == RXPERF_CALL_COMPLETE) return;
while (state = call->state,
state == RXPERF_CALL_SV_AWAIT_PARAMS ||
state == RXPERF_CALL_SV_AWAIT_REQUEST ||
state == RXPERF_CALL_SV_AWAIT_ACK
) { if (state == RXPERF_CALL_SV_AWAIT_ACK) { if (!rxrpc_kernel_check_life(rxperf_socket, call->rxcall)) goto call_complete; return;
}
ret = call->deliver(call); if (ret == 0)
ret = rxperf_process_call(call);
switch (ret) { case 0: continue; case -EINPROGRESS: case -EAGAIN: return; case -ECONNABORTED:
rxperf_log_error(call, call->abort_code); goto call_complete; case -EOPNOTSUPP:
abort_code = RXGEN_OPCODE;
rxrpc_kernel_abort_call(rxperf_socket, call->rxcall,
abort_code, ret,
rxperf_abort_op_not_supported); goto call_complete; case -ENOTSUPP:
abort_code = RX_USER_ABORT;
rxrpc_kernel_abort_call(rxperf_socket, call->rxcall,
abort_code, ret,
rxperf_abort_op_not_supported); goto call_complete; case -EIO:
pr_err("Call %u in bad state %u\n",
call->debug_id, call->state);
fallthrough; case -ENODATA: case -EBADMSG: case -EMSGSIZE: case -ENOMEM: case -EFAULT:
rxrpc_kernel_abort_call(rxperf_socket, call->rxcall,
RXGEN_SS_UNMARSHAL, ret,
rxperf_abort_unmarshal_error); goto call_complete; default:
rxrpc_kernel_abort_call(rxperf_socket, call->rxcall,
RX_CALL_DEAD, ret,
rxperf_abort_general_error); goto call_complete;
}
}
call_complete:
rxperf_set_call_complete(call, ret, remote_abort); /* The call may have been requeued */
rxrpc_kernel_shutdown_call(rxperf_socket, call->rxcall);
rxrpc_kernel_put_call(rxperf_socket, call->rxcall);
cancel_work(&call->work);
kfree(call);
}
/* * Extract a piece of data from the received data socket buffers.
*/ staticint rxperf_extract_data(struct rxperf_call *call, bool want_more)
{
u32 remote_abort = 0; int ret;
ret = rxrpc_kernel_recv_data(rxperf_socket, call->rxcall, &call->iter,
&call->iov_len, want_more, &remote_abort,
&call->service_id);
pr_debug("Extract i=%zu l=%zu m=%u ret=%d\n",
iov_iter_count(&call->iter), call->iov_len, want_more, ret); if (ret == 0 || ret == -EAGAIN) return ret;
if (ret == 1) { switch (call->state) { case RXPERF_CALL_SV_AWAIT_REQUEST:
rxperf_set_call_state(call, RXPERF_CALL_SV_REPLYING); break; case RXPERF_CALL_COMPLETE:
pr_debug("premature completion %d", call->error); return call->error; default: break;
} return 0;
}
if (IS_ERR(kref)) {
pr_err("Can't allocate rxperf server key: %ld\n", PTR_ERR(kref)); return PTR_ERR(kref);
}
ret = key_link(keyring, key_ref_to_ptr(kref)); if (ret < 0)
pr_err("Can't link rxperf server key: %d\n", ret);
key_ref_put(kref); return ret;
} #endif
/* * Initialise the rxperf server.
*/ staticint __init rxperf_init(void)
{ struct key *keyring; int ret = -ENOMEM;
pr_info("Server registering\n");
rxperf_workqueue = alloc_workqueue("rxperf", 0, 0); if (!rxperf_workqueue) goto error_workqueue;
keyring = keyring_alloc("rxperf_server",
GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, current_cred(),
KEY_POS_VIEW | KEY_POS_READ | KEY_POS_SEARCH |
KEY_POS_WRITE |
KEY_USR_VIEW | KEY_USR_READ | KEY_USR_SEARCH |
KEY_USR_WRITE |
KEY_OTH_VIEW | KEY_OTH_READ | KEY_OTH_SEARCH,
KEY_ALLOC_NOT_IN_QUOTA,
NULL, NULL); if (IS_ERR(keyring)) {
pr_err("Can't allocate rxperf server keyring: %ld\n",
PTR_ERR(keyring)); goto error_keyring;
}
rxperf_sec_keyring = keyring;
ret = rxperf_add_rxkad_key(keyring); if (ret < 0) goto error_key; #ifdef CONFIG_RXGK
ret = rxperf_add_yfs_rxgk_key(keyring, KRB5_ENCTYPE_AES128_CTS_HMAC_SHA1_96); if (ret < 0) goto error_key;
ret = rxperf_add_yfs_rxgk_key(keyring, KRB5_ENCTYPE_AES256_CTS_HMAC_SHA1_96); if (ret < 0) goto error_key;
ret = rxperf_add_yfs_rxgk_key(keyring, KRB5_ENCTYPE_AES128_CTS_HMAC_SHA256_128); if (ret < 0) goto error_key;
ret = rxperf_add_yfs_rxgk_key(keyring, KRB5_ENCTYPE_AES256_CTS_HMAC_SHA384_192); if (ret < 0) goto error_key;
ret = rxperf_add_yfs_rxgk_key(keyring, KRB5_ENCTYPE_CAMELLIA128_CTS_CMAC); if (ret < 0) goto error_key;
ret = rxperf_add_yfs_rxgk_key(keyring, KRB5_ENCTYPE_CAMELLIA256_CTS_CMAC); if (ret < 0) goto error_key; #endif
ret = rxperf_open_socket(); if (ret < 0) goto error_socket; return 0;
error_socket:
error_key:
key_put(rxperf_sec_keyring);
error_keyring:
destroy_workqueue(rxperf_workqueue);
rcu_barrier();
error_workqueue:
pr_err("Failed to register: %d\n", ret); return ret;
}
late_initcall(rxperf_init); /* Must be called after net/ to create socket */
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.