// SPDX-License-Identifier: GPL-2.0-only /* * set_id_regs - Test for setting ID register from usersapce. * * Copyright (c) 2023 Google LLC. * * * Test that KVM supports setting ID registers from userspace and handles the * feature set correctly.
*/
enum ftr_type {
FTR_EXACT, /* Use a predefined safe value */
FTR_LOWER_SAFE, /* Smaller value is safe */
FTR_HIGHER_SAFE, /* Bigger value is safe */
FTR_HIGHER_OR_ZERO_SAFE, /* Bigger value is safe, but 0 is biggest */
FTR_END, /* Mark the last ftr bits */
};
#define FTR_SIGNED true/* Value should be treated as signed */ #define FTR_UNSIGNED false/* Value should be treated as unsigned */
struct reg_ftr_bits { char *name; bool sign; enum ftr_type type;
uint8_t shift;
uint64_t mask; /* * For FTR_EXACT, safe_val is used as the exact safe value. * For FTR_LOWER_SAFE, safe_val is used as the minimal safe value.
*/
int64_t safe_val;
};
/* Return a safe value to a given ftr_bits an ftr value */
uint64_t get_safe_value(conststruct reg_ftr_bits *ftr_bits, uint64_t ftr)
{
uint64_t ftr_max = GENMASK_ULL(ARM64_FEATURE_FIELD_BITS - 1, 0);
if (ftr_bits->sign == FTR_UNSIGNED) { switch (ftr_bits->type) { case FTR_EXACT:
ftr = ftr_bits->safe_val; break; case FTR_LOWER_SAFE: if (ftr > ftr_bits->safe_val)
ftr--; break; case FTR_HIGHER_SAFE: if (ftr < ftr_max)
ftr++; break; case FTR_HIGHER_OR_ZERO_SAFE: if (ftr == ftr_max)
ftr = 0; elseif (ftr != 0)
ftr++; break; default: break;
}
} elseif (ftr != ftr_max) { switch (ftr_bits->type) { case FTR_EXACT:
ftr = ftr_bits->safe_val; break; case FTR_LOWER_SAFE: if (ftr > ftr_bits->safe_val)
ftr--; break; case FTR_HIGHER_SAFE: if (ftr < ftr_max - 1)
ftr++; break; case FTR_HIGHER_OR_ZERO_SAFE: if (ftr != 0 && ftr != ftr_max - 1)
ftr++; break; default: break;
}
}
return ftr;
}
/* Return an invalid value to a given ftr_bits an ftr value */
uint64_t get_invalid_value(conststruct reg_ftr_bits *ftr_bits, uint64_t ftr)
{
uint64_t ftr_max = GENMASK_ULL(ARM64_FEATURE_FIELD_BITS - 1, 0);
if (ftr_bits->sign == FTR_UNSIGNED) { switch (ftr_bits->type) { case FTR_EXACT:
ftr = max((uint64_t)ftr_bits->safe_val + 1, ftr + 1); break; case FTR_LOWER_SAFE:
ftr++; break; case FTR_HIGHER_SAFE:
ftr--; break; case FTR_HIGHER_OR_ZERO_SAFE: if (ftr == 0)
ftr = ftr_max; else
ftr--; break; default: break;
}
} elseif (ftr != ftr_max) { switch (ftr_bits->type) { case FTR_EXACT:
ftr = max((uint64_t)ftr_bits->safe_val + 1, ftr + 1); break; case FTR_LOWER_SAFE:
ftr++; break; case FTR_HIGHER_SAFE:
ftr--; break; case FTR_HIGHER_OR_ZERO_SAFE: if (ftr == 0)
ftr = ftr_max - 1; else
ftr--; break; default: break;
}
} else {
ftr = 0;
}
staticvoid test_vm_ftr_id_regs(struct kvm_vcpu *vcpu, bool aarch64_only)
{
uint64_t masks[KVM_ARM_FEATURE_ID_RANGE_SIZE]; struct reg_mask_range range = {
.addr = (__u64)masks,
}; int ret;
/* KVM should return error when reserved field is not zero */
range.reserved[0] = 1;
ret = __vm_ioctl(vcpu->vm, KVM_ARM_GET_REG_WRITABLE_MASKS, &range);
TEST_ASSERT(ret, "KVM doesn't check invalid parameters.");
/* Get writable masks for feature ID registers */
memset(range.reserved, 0, sizeof(range.reserved));
vm_ioctl(vcpu->vm, KVM_ARM_GET_REG_WRITABLE_MASKS, &range);
for (int i = 0; i < ARRAY_SIZE(test_regs); i++) { conststruct reg_ftr_bits *ftr_bits = test_regs[i].ftr_bits;
uint32_t reg_id = test_regs[i].reg;
uint64_t reg = KVM_ARM64_SYS_REG(reg_id); int idx;
/* Get the index to masks array for the idreg */
idx = encoding_to_range_idx(reg_id);
for (int j = 0; ftr_bits[j].type != FTR_END; j++) { /* Skip aarch32 reg on aarch64 only system, since they are RAZ/WI. */ if (aarch64_only && sys_reg_CRm(reg_id) < 4) {
ksft_test_result_skip("%s on AARCH64 only system\n",
ftr_bits[j].name); continue;
}
/* Make sure the feature field is writable */
TEST_ASSERT_EQ(masks[idx] & ftr_bits[j].mask, ftr_bits[j].mask);
/* * If ID_AA64PFR0.MPAM is _not_ officially modifiable and is zero, * check that if it can be set to 1, (i.e. it is supported by the * hardware), that it can't be set to other values.
*/
/* Get writable masks for feature ID registers */
memset(range.reserved, 0, sizeof(range.reserved));
vm_ioctl(vcpu->vm, KVM_ARM_GET_REG_WRITABLE_MASKS, &range);
/* Writeable? Nothing to test! */
idx = encoding_to_range_idx(SYS_ID_AA64PFR0_EL1); if ((masks[idx] & ID_AA64PFR0_EL1_MPAM_MASK) == ID_AA64PFR0_EL1_MPAM_MASK) {
ksft_test_result_skip("ID_AA64PFR0_EL1.MPAM is officially writable, nothing to test\n"); return;
}
/* Get the id register value */
val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1));
/* Try to set MPAM=0. This should always be possible. */
val &= ~ID_AA64PFR0_EL1_MPAM_MASK;
val |= FIELD_PREP(ID_AA64PFR0_EL1_MPAM_MASK, 0);
err = __vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1), val); if (err)
ksft_test_result_fail("ID_AA64PFR0_EL1.MPAM=0 was not accepted\n"); else
ksft_test_result_pass("ID_AA64PFR0_EL1.MPAM=0 worked\n");
/* Try to set MPAM=1 */
val &= ~ID_AA64PFR0_EL1_MPAM_MASK;
val |= FIELD_PREP(ID_AA64PFR0_EL1_MPAM_MASK, 1);
err = __vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1), val); if (err)
ksft_test_result_skip("ID_AA64PFR0_EL1.MPAM is not writable, nothing to test\n"); else
ksft_test_result_pass("ID_AA64PFR0_EL1.MPAM=1 was writable\n");
/* Try to set MPAM=2 */
val &= ~ID_AA64PFR0_EL1_MPAM_MASK;
val |= FIELD_PREP(ID_AA64PFR0_EL1_MPAM_MASK, 2);
err = __vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1), val); if (err)
ksft_test_result_pass("ID_AA64PFR0_EL1.MPAM not arbitrarily modifiable\n"); else
ksft_test_result_fail("ID_AA64PFR0_EL1.MPAM value should not be ignored\n");
/* And again for ID_AA64PFR1_EL1.MPAM_frac */
idx = encoding_to_range_idx(SYS_ID_AA64PFR1_EL1); if ((masks[idx] & ID_AA64PFR1_EL1_MPAM_frac_MASK) == ID_AA64PFR1_EL1_MPAM_frac_MASK) {
ksft_test_result_skip("ID_AA64PFR1_EL1.MPAM_frac is officially writable, nothing to test\n"); return;
}
/* Get the id register value */
val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1));
/* Try to set MPAM_frac=0. This should always be possible. */
val &= ~ID_AA64PFR1_EL1_MPAM_frac_MASK;
val |= FIELD_PREP(ID_AA64PFR1_EL1_MPAM_frac_MASK, 0);
err = __vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1), val); if (err)
ksft_test_result_fail("ID_AA64PFR0_EL1.MPAM_frac=0 was not accepted\n"); else
ksft_test_result_pass("ID_AA64PFR0_EL1.MPAM_frac=0 worked\n");
/* Try to set MPAM_frac=1 */
val &= ~ID_AA64PFR1_EL1_MPAM_frac_MASK;
val |= FIELD_PREP(ID_AA64PFR1_EL1_MPAM_frac_MASK, 1);
err = __vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1), val); if (err)
ksft_test_result_skip("ID_AA64PFR1_EL1.MPAM_frac is not writable, nothing to test\n"); else
ksft_test_result_pass("ID_AA64PFR0_EL1.MPAM_frac=1 was writable\n");
/* Try to set MPAM_frac=2 */
val &= ~ID_AA64PFR1_EL1_MPAM_frac_MASK;
val |= FIELD_PREP(ID_AA64PFR1_EL1_MPAM_frac_MASK, 2);
err = __vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1), val); if (err)
ksft_test_result_pass("ID_AA64PFR1_EL1.MPAM_frac not arbitrarily modifiable\n"); else
ksft_test_result_fail("ID_AA64PFR1_EL1.MPAM_frac value should not be ignored\n");
}
if (!have_cap_arm_mte) {
ksft_test_result_skip("MTE capability not supported, nothing to test\n"); return;
}
/* Get writable masks for feature ID registers */
memset(range.reserved, 0, sizeof(range.reserved));
vm_ioctl(vcpu->vm, KVM_ARM_GET_REG_WRITABLE_MASKS, &range);
idx = encoding_to_range_idx(SYS_ID_AA64PFR1_EL1); if ((masks[idx] & ID_AA64PFR1_EL1_MTE_frac_MASK) == ID_AA64PFR1_EL1_MTE_frac_MASK) {
ksft_test_result_skip("ID_AA64PFR1_EL1.MTE_frac is officially writable, nothing to test\n"); return;
}
/* * When MTE is supported but MTE_ASYMM is not (ID_AA64PFR1_EL1.MTE == 2) * ID_AA64PFR1_EL1.MTE_frac == 0xF indicates MTE_ASYNC is unsupported * and MTE_frac == 0 indicates it is supported. * * As MTE_frac was previously unconditionally read as 0, check * that the set to 0 succeeds but does not change MTE_frac * from unsupported (0xF) to supported (0). *
*/
val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1));
mte = FIELD_GET(ID_AA64PFR1_EL1_MTE, val);
mte_frac = FIELD_GET(ID_AA64PFR1_EL1_MTE_frac, val); if (mte != ID_AA64PFR1_EL1_MTE_MTE2 ||
mte_frac != ID_AA64PFR1_EL1_MTE_frac_NI) {
ksft_test_result_skip("MTE_ASYNC or MTE_ASYMM are supported, nothing to test\n"); return;
}
/* Try to set MTE_frac=0. */
val &= ~ID_AA64PFR1_EL1_MTE_frac_MASK;
val |= FIELD_PREP(ID_AA64PFR1_EL1_MTE_frac_MASK, 0);
err = __vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1), val); if (err) {
ksft_test_result_fail("ID_AA64PFR1_EL1.MTE_frac=0 was not accepted\n"); return;
}
val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1));
mte_frac = FIELD_GET(ID_AA64PFR1_EL1_MTE_frac, val); if (mte_frac == ID_AA64PFR1_EL1_MTE_frac_NI)
ksft_test_result_pass("ID_AA64PFR1_EL1.MTE_frac=0 accepted and still 0xF\n"); else
ksft_test_result_pass("ID_AA64PFR1_EL1.MTE_frac no longer 0xF\n");
}
switch (get_ucall(vcpu, &uc)) { case UCALL_ABORT:
REPORT_GUEST_ASSERT(uc); break; case UCALL_SYNC: /* Make sure the written values are seen by guest */
TEST_ASSERT_EQ(test_reg_vals[encoding_to_range_idx(uc.args[2])],
uc.args[3]); break; case UCALL_DONE:
done = true; break; default:
TEST_FAIL("Unexpected ucall: %lu", uc.cmd);
}
}
}
/* find the first empty level in the cache hierarchy */ for (level = 1; level < 7; level++) { if (!CLIDR_CTYPE(clidr, level)) break;
}
/* * If you have a mind-boggling 7 levels of cache, congratulations, you * get to fix this.
*/
TEST_ASSERT(level <= 7, "can't find an empty level in cache hierarchy");
/* stick in a unified cache level */
clidr |= BIT(2) << CLIDR_CTYPE_SHIFT(level);
staticvoid test_reset_preserves_id_regs(struct kvm_vcpu *vcpu)
{ /* * Calls KVM_ARM_VCPU_INIT behind the scenes, which will do an * architectural reset of the vCPU.
*/
aarch64_vcpu_setup(vcpu, NULL);
for (int i = 0; i < ARRAY_SIZE(test_regs); i++)
test_assert_id_reg_unchanged(vcpu, test_regs[i].reg);
/* Check for AARCH64 only system */
val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1));
el0 = FIELD_GET(ID_AA64PFR0_EL1_EL0, val);
aarch64_only = (el0 == ID_AA64PFR0_EL1_EL0_IMP);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.