// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2012,2013 - ARM Ltd * Author: Marc Zyngier <marc.zyngier@arm.com> * * Derived from arch/arm/kvm/guest.c: * Copyright (C) 2012 - Virtual Open Systems and Columbia University * Author: Christoffer Dall <c.dall@virtualopensystems.com>
*/
staticint core_reg_size_from_offset(conststruct kvm_vcpu *vcpu, u64 off)
{ int size;
switch (off) { case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
KVM_REG_ARM_CORE_REG(regs.regs[30]): case KVM_REG_ARM_CORE_REG(regs.sp): case KVM_REG_ARM_CORE_REG(regs.pc): case KVM_REG_ARM_CORE_REG(regs.pstate): case KVM_REG_ARM_CORE_REG(sp_el1): case KVM_REG_ARM_CORE_REG(elr_el1): case KVM_REG_ARM_CORE_REG(spsr[0]) ...
KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]):
size = sizeof(__u64); break;
case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
size = sizeof(__uint128_t); break;
case KVM_REG_ARM_CORE_REG(fp_regs.fpsr): case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
size = sizeof(__u32); break;
default: return -EINVAL;
}
if (!IS_ALIGNED(off, size / sizeof(__u32))) return -EINVAL;
/* * The KVM_REG_ARM64_SVE regs must be used instead of * KVM_REG_ARM_CORE for accessing the FPSIMD V-registers on * SVE-enabled vcpus:
*/ if (vcpu_has_sve(vcpu) && core_reg_offset_is_vreg(off)) return -EINVAL;
return size;
}
staticvoid *core_reg_addr(struct kvm_vcpu *vcpu, conststruct kvm_one_reg *reg)
{
u64 off = core_reg_offset_from_id(reg->id); int size = core_reg_size_from_offset(vcpu, off);
if (size < 0) return NULL;
if (KVM_REG_SIZE(reg->id) != size) return NULL;
switch (off) { case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
KVM_REG_ARM_CORE_REG(regs.regs[30]):
off -= KVM_REG_ARM_CORE_REG(regs.regs[0]);
off /= 2; return &vcpu->arch.ctxt.regs.regs[off];
case KVM_REG_ARM_CORE_REG(regs.sp): return &vcpu->arch.ctxt.regs.sp;
case KVM_REG_ARM_CORE_REG(regs.pc): return &vcpu->arch.ctxt.regs.pc;
case KVM_REG_ARM_CORE_REG(regs.pstate): return &vcpu->arch.ctxt.regs.pstate;
case KVM_REG_ARM_CORE_REG(sp_el1): return __ctxt_sys_reg(&vcpu->arch.ctxt, SP_EL1);
case KVM_REG_ARM_CORE_REG(elr_el1): return __ctxt_sys_reg(&vcpu->arch.ctxt, ELR_EL1);
case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_EL1]): return __ctxt_sys_reg(&vcpu->arch.ctxt, SPSR_EL1);
case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_ABT]): return &vcpu->arch.ctxt.spsr_abt;
case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_UND]): return &vcpu->arch.ctxt.spsr_und;
case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_IRQ]): return &vcpu->arch.ctxt.spsr_irq;
case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_FIQ]): return &vcpu->arch.ctxt.spsr_fiq;
case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
off -= KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]);
off /= 4; return &vcpu->arch.ctxt.fp_regs.vregs[off];
case KVM_REG_ARM_CORE_REG(fp_regs.fpsr): return &vcpu->arch.ctxt.fp_regs.fpsr;
case KVM_REG_ARM_CORE_REG(fp_regs.fpcr): return &vcpu->arch.ctxt.fp_regs.fpcr;
default: return NULL;
}
}
staticint get_core_reg(struct kvm_vcpu *vcpu, conststruct kvm_one_reg *reg)
{ /* * Because the kvm_regs structure is a mix of 32, 64 and * 128bit fields, we index it as if it was a 32bit * array. Hence below, nr_regs is the number of entries, and * off the index in the "array".
*/
__u32 __user *uaddr = (__u32 __user *)(unsignedlong)reg->addr; int nr_regs = sizeof(struct kvm_regs) / sizeof(__u32); void *addr;
u32 off;
/* Our ID is an index into the kvm_regs struct. */
off = core_reg_offset_from_id(reg->id); if (off >= nr_regs ||
(off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs) return -ENOENT;
addr = core_reg_addr(vcpu, reg); if (!addr) return -EINVAL;
if (copy_to_user(uaddr, addr, KVM_REG_SIZE(reg->id))) return -EFAULT;
/* Our ID is an index into the kvm_regs struct. */
off = core_reg_offset_from_id(reg->id); if (off >= nr_regs ||
(off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs) return -ENOENT;
addr = core_reg_addr(vcpu, reg); if (!addr) return -EINVAL;
if (KVM_REG_SIZE(reg->id) > sizeof(tmp)) return -EINVAL;
if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) {
u64 mode = (*(u64 *)valp) & PSR_AA32_MODE_MASK; switch (mode) { case PSR_AA32_MODE_USR: if (!kvm_supports_32bit_el0()) return -EINVAL; break; case PSR_AA32_MODE_FIQ: case PSR_AA32_MODE_IRQ: case PSR_AA32_MODE_SVC: case PSR_AA32_MODE_ABT: case PSR_AA32_MODE_UND: case PSR_AA32_MODE_SYS: if (!vcpu_el1_is_32bit(vcpu)) return -EINVAL; break; case PSR_MODE_EL2h: case PSR_MODE_EL2t: if (!vcpu_has_nv(vcpu)) return -EINVAL;
fallthrough; case PSR_MODE_EL0t: case PSR_MODE_EL1t: case PSR_MODE_EL1h: if (vcpu_el1_is_32bit(vcpu)) return -EINVAL; break; default:
err = -EINVAL; goto out;
}
}
memcpy(addr, valp, KVM_REG_SIZE(reg->id));
if (*vcpu_cpsr(vcpu) & PSR_MODE32_BIT) { int i, nr_reg;
switch (*vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK) { /* * Either we are dealing with user mode, and only the * first 15 registers (+ PC) must be narrowed to 32bit. * AArch32 r0-r14 conveniently map to AArch64 x0-x14.
*/ case PSR_AA32_MODE_USR: case PSR_AA32_MODE_SYS:
nr_reg = 15; break;
/* * Otherwise, this is a privileged mode, and *all* the * registers must be narrowed to 32bit.
*/ default:
nr_reg = 31; break;
}
for (i = 0; i < nr_reg; i++)
vcpu_set_reg(vcpu, i, (u32)vcpu_get_reg(vcpu, i));
if (kvm_arm_vcpu_sve_finalized(vcpu)) return -EPERM; /* too late! */
if (WARN_ON(vcpu->arch.sve_state)) return -EINVAL;
if (copy_from_user(vqs, (constvoid __user *)reg->addr, sizeof(vqs))) return -EFAULT;
max_vq = 0; for (vq = SVE_VQ_MIN; vq <= SVE_VQ_MAX; ++vq) if (vq_present(vqs, vq))
max_vq = vq;
if (max_vq > sve_vq_from_vl(kvm_sve_max_vl)) return -EINVAL;
/* * Vector lengths supported by the host can't currently be * hidden from the guest individually: instead we can only set a * maximum via ZCR_EL2.LEN. So, make sure the available vector * lengths match the set requested exactly up to the requested * maximum:
*/ for (vq = SVE_VQ_MIN; vq <= max_vq; ++vq) if (vq_present(vqs, vq) != sve_vq_available(vq)) return -EINVAL;
/* Can't run with no vector lengths at all: */ if (max_vq < SVE_VQ_MIN) return -EINVAL;
/* vcpu->arch.sve_state will be alloc'd by kvm_vcpu_finalize_sve() */
vcpu->arch.sve_max_vl = sve_vl_from_vq(max_vq);
/* * Number of register slices required to cover each whole SVE register. * NOTE: Only the first slice every exists, for now. * If you are tempted to modify this, you must also rework sve_reg_to_region() * to match:
*/ #define vcpu_sve_slices(vcpu) 1
/* Bounds of a single SVE register slice within vcpu->arch.sve_state */ struct sve_state_reg_region { unsignedint koffset; /* offset into sve_state in kernel memory */ unsignedint klen; /* length in kernel memory */ unsignedint upad; /* extra trailing padding in user memory */
};
/* * Validate SVE register ID and get sanitised bounds for user/kernel SVE * register copy
*/ staticint sve_reg_to_region(struct sve_state_reg_region *region, struct kvm_vcpu *vcpu, conststruct kvm_one_reg *reg)
{ /* reg ID ranges for Z- registers */ const u64 zreg_id_min = KVM_REG_ARM64_SVE_ZREG(0, 0); const u64 zreg_id_max = KVM_REG_ARM64_SVE_ZREG(SVE_NUM_ZREGS - 1,
SVE_NUM_SLICES - 1);
/* reg ID ranges for P- registers and FFR (which are contiguous) */ const u64 preg_id_min = KVM_REG_ARM64_SVE_PREG(0, 0); const u64 preg_id_max = KVM_REG_ARM64_SVE_FFR(SVE_NUM_SLICES - 1);
unsignedint vq; unsignedint reg_num;
unsignedint reqoffset, reqlen; /* User-requested offset and length */ unsignedint maxlen; /* Maximum permitted length */
staticbool is_timer_reg(u64 index)
{ switch (index) { case KVM_REG_ARM_TIMER_CTL: case KVM_REG_ARM_TIMER_CNT: case KVM_REG_ARM_TIMER_CVAL: case KVM_REG_ARM_PTIMER_CTL: case KVM_REG_ARM_PTIMER_CNT: case KVM_REG_ARM_PTIMER_CVAL: returntrue;
} returnfalse;
}
staticint copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
{ for (int i = 0; i < NUM_TIMER_REGS; i++) { if (put_user(timer_reg_list[i], uindices)) return -EFAULT;
uindices++;
}
/* Policed by KVM_GET_REG_LIST: */
WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu));
/* * Enumerate this first, so that userspace can save/restore in * the order reported by KVM_GET_REG_LIST:
*/
reg = KVM_REG_ARM64_SVE_VLS; if (put_user(reg, uindices++)) return -EFAULT;
++num_regs;
for (i = 0; i < slices; i++) { for (n = 0; n < SVE_NUM_ZREGS; n++) {
reg = KVM_REG_ARM64_SVE_ZREG(n, i); if (put_user(reg, uindices++)) return -EFAULT;
num_regs++;
}
for (n = 0; n < SVE_NUM_PREGS; n++) {
reg = KVM_REG_ARM64_SVE_PREG(n, i); if (put_user(reg, uindices++)) return -EFAULT;
num_regs++;
}
reg = KVM_REG_ARM64_SVE_FFR(i); if (put_user(reg, uindices++)) return -EFAULT;
num_regs++;
}
return num_regs;
}
/** * kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG * @vcpu: the vCPU pointer * * This is for all registers.
*/ unsignedlong kvm_arm_num_regs(struct kvm_vcpu *vcpu)
{ unsignedlong res = 0;
res += num_core_regs(vcpu);
res += num_sve_regs(vcpu);
res += kvm_arm_num_sys_reg_descs(vcpu);
res += kvm_arm_get_fw_num_regs(vcpu);
res += NUM_TIMER_REGS;
return res;
}
/** * kvm_arm_copy_reg_indices - get indices of all registers. * @vcpu: the vCPU pointer * @uindices: register list to copy * * We do core registers right here, then we append system regs.
*/ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
{ int ret;
ret = copy_core_reg_indices(vcpu, uindices); if (ret < 0) return ret;
uindices += ret;
ret = copy_sve_reg_indices(vcpu, uindices); if (ret < 0) return ret;
uindices += ret;
ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices); if (ret < 0) return ret;
uindices += kvm_arm_get_fw_num_regs(vcpu);
ret = copy_timer_indices(vcpu, uindices); if (ret < 0) return ret;
uindices += NUM_TIMER_REGS;
if (events->exception.serror_pending && events->exception.serror_has_esr)
events->exception.serror_esr = vcpu_get_vsesr(vcpu);
/* * We never return a pending ext_dabt here because we deliver it to * the virtual CPU directly when setting the event and it's no longer * 'pending' at this point.
*/
return 0;
}
staticvoid commit_pending_events(struct kvm_vcpu *vcpu)
{ if (!vcpu_get_flag(vcpu, PENDING_EXCEPTION)) return;
/* * Reset the MMIO emulation state to avoid stepping PC after emulating * the exception entry.
*/
vcpu->mmio_needed = false;
kvm_call_hyp(__kvm_adjust_pc, vcpu);
}
int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, struct kvm_vcpu_events *events)
{ bool serror_pending = events->exception.serror_pending; bool has_esr = events->exception.serror_has_esr; bool ext_dabt_pending = events->exception.ext_dabt_pending;
u64 esr = events->exception.serror_esr; int ret = 0;
/* * Immediately commit the pending SEA to the vCPU's architectural * state which is necessary since we do not return a pending SEA * to userspace via KVM_GET_VCPU_EVENTS.
*/ if (ext_dabt_pending) {
ret = kvm_inject_sea_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
commit_pending_events(vcpu);
}
if (ret < 0) return ret;
if (!serror_pending) return 0;
if (!cpus_have_final_cap(ARM64_HAS_RAS_EXTN) && has_esr) return -EINVAL;
if (has_esr && (esr & ~ESR_ELx_ISS_MASK)) return -EINVAL;
if (has_esr)
ret = kvm_inject_serror_esr(vcpu, esr); else
ret = kvm_inject_serror(vcpu);
/* * We could've decided that the SError is due for immediate software * injection; commit the exception in case userspace decides it wants * to inject more exceptions for some strange reason.
*/
commit_pending_events(vcpu); return (ret < 0) ? ret : 0;
}
/** * kvm_arch_vcpu_ioctl_set_guest_debug - set up guest debugging * @vcpu: the vCPU pointer * @dbg: the ioctl data buffer * * This sets up and enables the VM for guest debugging. Userspace * passes in a control flag to enable different debug types and * potentially other architecture specific information in the rest of * the structure.
*/ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg)
{
trace_kvm_set_guest_debug(vcpu, dbg->control);
if (dbg->control & ~KVM_GUESTDBG_VALID_MASK) return -EINVAL;
/* Hardware assisted Break and Watch points */ if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW)
vcpu->arch.external_debug_state = dbg->arch;
return 0;
}
int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
{ int ret;
switch (attr->group) { case KVM_ARM_VCPU_PMU_V3_CTRL:
mutex_lock(&vcpu->kvm->arch.config_lock);
ret = kvm_arm_pmu_v3_set_attr(vcpu, attr);
mutex_unlock(&vcpu->kvm->arch.config_lock); break; case KVM_ARM_VCPU_TIMER_CTRL:
ret = kvm_arm_timer_set_attr(vcpu, attr); break; case KVM_ARM_VCPU_PVTIME_CTRL:
ret = kvm_arm_pvtime_set_attr(vcpu, attr); break; default:
ret = -ENXIO; break;
}
return ret;
}
int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
{ int ret;
switch (attr->group) { case KVM_ARM_VCPU_PMU_V3_CTRL:
ret = kvm_arm_pmu_v3_get_attr(vcpu, attr); break; case KVM_ARM_VCPU_TIMER_CTRL:
ret = kvm_arm_timer_get_attr(vcpu, attr); break; case KVM_ARM_VCPU_PVTIME_CTRL:
ret = kvm_arm_pvtime_get_attr(vcpu, attr); break; default:
ret = -ENXIO; break;
}
return ret;
}
int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
{ int ret;
switch (attr->group) { case KVM_ARM_VCPU_PMU_V3_CTRL:
ret = kvm_arm_pmu_v3_has_attr(vcpu, attr); break; case KVM_ARM_VCPU_TIMER_CTRL:
ret = kvm_arm_timer_has_attr(vcpu, attr); break; case KVM_ARM_VCPU_PVTIME_CTRL:
ret = kvm_arm_pvtime_has_attr(vcpu, attr); break; default:
ret = -ENXIO; break;
}
if (!write) { if ((folio_test_hugetlb(folio) &&
folio_test_hugetlb_mte_tagged(folio)) ||
page_mte_tagged(page))
num_tags = mte_copy_tags_to_user(tags, maddr,
MTE_GRANULES_PER_PAGE); else /* No tags in memory, so write zeros */
num_tags = MTE_GRANULES_PER_PAGE -
clear_user(tags, MTE_GRANULES_PER_PAGE);
kvm_release_page_clean(page);
} else { /* * Only locking to serialise with a concurrent * __set_ptes() in the VMM but still overriding the * tags, hence ignoring the return value.
*/ if (folio_test_hugetlb(folio))
folio_try_hugetlb_mte_tagging(folio); else
try_page_mte_tagging(page);
num_tags = mte_copy_tags_from_user(maddr, tags,
MTE_GRANULES_PER_PAGE);
/* uaccess failed, don't leave stale tags */ if (num_tags != MTE_GRANULES_PER_PAGE)
mte_clear_page_tags(maddr); if (folio_test_hugetlb(folio))
folio_set_hugetlb_mte_tagged(folio); else
set_page_mte_tagged(page);
kvm_release_page_dirty(page);
}
if (num_tags != MTE_GRANULES_PER_PAGE) {
ret = -EFAULT; goto out;
}
gfn++;
tags += num_tags;
length -= PAGE_SIZE;
}
out:
mutex_unlock(&kvm->slots_lock); /* If some data has been copied report the number of bytes copied */ if (length != copy_tags->length) return copy_tags->length - length; return ret;
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.41 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.