// SPDX-License-Identifier: GPL-2.0-only /* * FF-A v1.0 proxy to filter out invalid memory-sharing SMC calls issued by * the host. FF-A is a slightly more palatable abbreviation of "Arm Firmware * Framework for Arm A-profile", which is specified by Arm in document * number DEN0077. * * Copyright (C) 2022 - Google LLC * Author: Andrew Walbran <qwandor@google.com> * * This driver hooks into the SMC trapping logic for the host and intercepts * all calls falling within the FF-A range. Each call is either: * * - Forwarded on unmodified to the SPMD at EL3 * - Rejected as "unsupported" * - Accompanied by a host stage-2 page-table check/update and reissued * * Consequently, any attempts by the host to make guest memory pages * accessible to the secure world using FF-A will be detected either here * (in the case that the memory is already owned by the guest) or during * donation to the guest (in the case that the memory was previously shared * with the secure world). * * To allow the rolling-back of page-table updates and FF-A calls in the * event of failure, operations involving the RXTX buffers are locked for * the duration and are therefore serialised.
*/
/* * "ID value 0 must be returned at the Non-secure physical FF-A instance" * We share this ID with the host.
*/ #define HOST_FFA_ID 0
/* * A buffer to hold the maximum descriptor size we can see from the host, * which is required when the SPMD returns a fragmented FFA_MEM_RETRIEVE_RESP * when resolving the handle on the reclaim path.
*/ struct kvm_ffa_descriptor_buffer { void *buf;
size_t len;
};
/* * Note that we don't currently lock these buffers explicitly, instead * relying on the locking of the host FFA buffers as we only have one * client.
*/ staticstruct kvm_ffa_buffers hyp_buffers; staticstruct kvm_ffa_buffers host_buffers; static u32 hyp_ffa_version; staticbool has_version_negotiated; static hyp_spinlock_t version_lock;
if (npages != (KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) / FFA_PAGE_SIZE) {
ret = FFA_RET_INVALID_PARAMETERS; goto out;
}
if (!PAGE_ALIGNED(tx) || !PAGE_ALIGNED(rx)) {
ret = FFA_RET_INVALID_PARAMETERS; goto out;
}
hyp_spin_lock(&host_buffers.lock); if (host_buffers.tx) {
ret = FFA_RET_DENIED; goto out_unlock;
}
/* * Map our hypervisor buffers into the SPMD before mapping and * pinning the host buffers in our own address space.
*/
ret = ffa_map_hyp_buffers(npages); if (ret) goto out_unlock;
ret = __pkvm_host_share_hyp(hyp_phys_to_pfn(tx)); if (ret) {
ret = FFA_RET_INVALID_PARAMETERS; goto err_unmap;
}
ret = __pkvm_host_share_hyp(hyp_phys_to_pfn(rx)); if (ret) {
ret = FFA_RET_INVALID_PARAMETERS; goto err_unshare_tx;
}
tx_virt = hyp_phys_to_virt(tx);
ret = hyp_pin_shared_mem(tx_virt, tx_virt + 1); if (ret) {
ret = FFA_RET_INVALID_PARAMETERS; goto err_unshare_rx;
}
rx_virt = hyp_phys_to_virt(rx);
ret = hyp_pin_shared_mem(rx_virt, rx_virt + 1); if (ret) {
ret = FFA_RET_INVALID_PARAMETERS; goto err_unpin_tx;
}
ret = ffa_host_share_ranges(buf, nr_ranges); if (ret) { /* * We're effectively aborting the transaction, so we need * to restore the global state back to what it was prior to * transmission of the first fragment.
*/
ffa_mem_reclaim(res, handle_lo, handle_hi, 0);
WARN_ON(res->a0 != FFA_SUCCESS); goto out_unlock;
}
out_unlock:
hyp_spin_unlock(&host_buffers.lock);
out: if (ret)
ffa_to_smccc_res(res, ret);
/* * If for any reason this did not succeed, we're in trouble as we have * now lost the content of the previous fragments and we can't rollback * the host stage-2 changes. The pages previously marked as shared will * remain stuck in that state forever, hence preventing the host from * sharing/donating them again and may possibly lead to subsequent * failures, but this will not compromise confidentiality.
*/ return;
}
ep_mem_access = (void *)buf +
ffa_mem_desc_offset(buf, 0, hyp_ffa_version);
offset = ep_mem_access->composite_off; /* * We can trust the SPMD to get this right, but let's at least * check that we end up with something that doesn't look _completely_ * bogus.
*/ if (WARN_ON(offset > len ||
fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE)) {
ret = FFA_RET_ABORTED;
ffa_rx_release(res); goto out_unlock;
}
if (len > ffa_desc_buf.len) {
ret = FFA_RET_NO_MEMORY;
ffa_rx_release(res); goto out_unlock;
}
ffa_mem_reclaim(res, handle_lo, handle_hi, flags); if (res->a0 != FFA_SUCCESS) goto out_unlock;
reg = (void *)buf + offset; /* If the SPMD was happy, then we should be too. */
WARN_ON(ffa_host_unshare_ranges(reg->constituents,
reg->addr_range_cnt));
out_unlock:
hyp_spin_unlock(&host_buffers.lock);
if (ret)
ffa_to_smccc_res(res, ret);
}
/* * Is a given FFA function supported, either by forwarding on directly * or by handling at EL2?
*/ staticbool ffa_call_supported(u64 func_id)
{ switch (func_id) { /* Unsupported memory management calls */ case FFA_FN64_MEM_RETRIEVE_REQ: case FFA_MEM_RETRIEVE_RESP: case FFA_MEM_RELINQUISH: case FFA_MEM_OP_PAUSE: case FFA_MEM_OP_RESUME: case FFA_MEM_FRAG_RX: case FFA_FN64_MEM_DONATE: /* Indirect message passing via RX/TX buffers */ case FFA_MSG_SEND: case FFA_MSG_POLL: case FFA_MSG_WAIT: /* 32-bit variants of 64-bit calls */ case FFA_MSG_SEND_DIRECT_RESP: case FFA_RXTX_MAP: case FFA_MEM_DONATE: case FFA_MEM_RETRIEVE_REQ: returnfalse;
}
returntrue;
}
staticbool do_ffa_features(struct arm_smccc_res *res, struct kvm_cpu_context *ctxt)
{
DECLARE_REG(u32, id, ctxt, 1);
u64 prop = 0; int ret = 0;
if (!ffa_call_supported(id)) {
ret = FFA_RET_NOT_SUPPORTED; goto out_handled;
}
switch (id) { case FFA_MEM_SHARE: case FFA_FN64_MEM_SHARE: case FFA_MEM_LEND: case FFA_FN64_MEM_LEND:
ret = FFA_RET_SUCCESS;
prop = 0; /* No support for dynamic buffers */ goto out_handled; default: returnfalse;
}
if (FFA_MAJOR_VERSION(ffa_req_version) != 1) {
res->a0 = FFA_RET_NOT_SUPPORTED; return;
}
hyp_spin_lock(&version_lock); if (has_version_negotiated) {
res->a0 = hyp_ffa_version; goto unlock;
}
/* * If the client driver tries to downgrade the version, we need to ask * first if TEE supports it.
*/ if (FFA_MINOR_VERSION(ffa_req_version) < FFA_MINOR_VERSION(hyp_ffa_version)) {
arm_smccc_1_1_smc(FFA_VERSION, ffa_req_version, 0,
0, 0, 0, 0, 0,
res); if (res->a0 == FFA_RET_NOT_SUPPORTED) goto unlock;
/* * There's no way we can tell what a non-standard SMC call might * be up to. Ideally, we would terminate these here and return * an error to the host, but sadly devices make use of custom * firmware calls for things like power management, debugging, * RNG access and crash reporting. * * Given that the architecture requires us to trust EL3 anyway, * we forward unrecognised calls on under the assumption that * the firmware doesn't expose a mechanism to access arbitrary * non-secure memory. Short of a per-device table of SMCs, this * is the best we can do.
*/ if (!is_ffa_call(func_id)) returnfalse;
/* * Firmware returns the maximum supported version of the FF-A * implementation. Check that the returned version is * backwards-compatible with the hyp according to the rules in DEN0077A * v1.1 REL0 13.2.1. * * Of course, things are never simple when dealing with firmware. v1.1 * broke ABI with v1.0 on several structures, which is itself * incompatible with the aforementioned versioning scheme. The * expectation is that v1.x implementations that do not support the v1.0 * ABI return NOT_SUPPORTED rather than a version number, according to * DEN0077A v1.1 REL0 18.6.4.
*/ if (FFA_MAJOR_VERSION(res.a0) != 1) return -EOPNOTSUPP;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.