// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2019 Western Digital Corporation or its affiliates. * Copyright (C) 2023 Ventana Micro Systems Inc. * * Authors: * Anup Patel <apatel@ventanamicro.com>
*/
if (kvm_ext >= KVM_RISCV_ISA_EXT_MAX ||
kvm_ext >= ARRAY_SIZE(kvm_isa_ext_arr)) return -ENOENT;
*guest_ext = kvm_isa_ext_arr[kvm_ext]; switch (*guest_ext) { case RISCV_ISA_EXT_SMNPM: /* * Pointer masking effective in (H)S-mode is provided by the * Smnpm extension, so that extension is reported to the guest, * even though the CSR bits for configuring VS-mode pointer * masking on the host side are part of the Ssnpm extension.
*/
host_ext = RISCV_ISA_EXT_SSNPM; break; default:
host_ext = *guest_ext; break;
}
if (!__riscv_isa_extension_available(NULL, host_ext)) return -ENOENT;
return 0;
}
staticbool kvm_riscv_vcpu_isa_enable_allowed(unsignedlong ext)
{ switch (ext) { case KVM_RISCV_ISA_EXT_H: returnfalse; case KVM_RISCV_ISA_EXT_SSCOFPMF: /* Sscofpmf depends on interrupt filtering defined in ssaia */ return __riscv_isa_extension_available(NULL, RISCV_ISA_EXT_SSAIA); case KVM_RISCV_ISA_EXT_SVADU: /* * The henvcfg.ADUE is read-only zero if menvcfg.ADUE is zero. * Guest OS can use Svadu only when host OS enable Svadu.
*/ return arch_has_hw_pte_young(); case KVM_RISCV_ISA_EXT_V: return riscv_v_vstate_ctrl_user_allowed(); default: break;
}
returntrue;
}
staticbool kvm_riscv_vcpu_isa_disable_allowed(unsignedlong ext)
{ switch (ext) { /* Extensions which don't have any mechanism to disable */ case KVM_RISCV_ISA_EXT_A: case KVM_RISCV_ISA_EXT_C: case KVM_RISCV_ISA_EXT_I: case KVM_RISCV_ISA_EXT_M: case KVM_RISCV_ISA_EXT_SMNPM: /* There is not architectural config bit to disable sscofpmf completely */ case KVM_RISCV_ISA_EXT_SSCOFPMF: case KVM_RISCV_ISA_EXT_SSNPM: case KVM_RISCV_ISA_EXT_SSTC: case KVM_RISCV_ISA_EXT_SVINVAL: case KVM_RISCV_ISA_EXT_SVNAPOT: case KVM_RISCV_ISA_EXT_SVVPTC: case KVM_RISCV_ISA_EXT_ZAAMO: case KVM_RISCV_ISA_EXT_ZABHA: case KVM_RISCV_ISA_EXT_ZACAS: case KVM_RISCV_ISA_EXT_ZALRSC: case KVM_RISCV_ISA_EXT_ZAWRS: case KVM_RISCV_ISA_EXT_ZBA: case KVM_RISCV_ISA_EXT_ZBB: case KVM_RISCV_ISA_EXT_ZBC: case KVM_RISCV_ISA_EXT_ZBKB: case KVM_RISCV_ISA_EXT_ZBKC: case KVM_RISCV_ISA_EXT_ZBKX: case KVM_RISCV_ISA_EXT_ZBS: case KVM_RISCV_ISA_EXT_ZCA: case KVM_RISCV_ISA_EXT_ZCB: case KVM_RISCV_ISA_EXT_ZCD: case KVM_RISCV_ISA_EXT_ZCF: case KVM_RISCV_ISA_EXT_ZCMOP: case KVM_RISCV_ISA_EXT_ZFA: case KVM_RISCV_ISA_EXT_ZFH: case KVM_RISCV_ISA_EXT_ZFHMIN: case KVM_RISCV_ISA_EXT_ZICCRSE: case KVM_RISCV_ISA_EXT_ZICNTR: case KVM_RISCV_ISA_EXT_ZICOND: case KVM_RISCV_ISA_EXT_ZICSR: case KVM_RISCV_ISA_EXT_ZIFENCEI: case KVM_RISCV_ISA_EXT_ZIHINTNTL: case KVM_RISCV_ISA_EXT_ZIHINTPAUSE: case KVM_RISCV_ISA_EXT_ZIHPM: case KVM_RISCV_ISA_EXT_ZIMOP: case KVM_RISCV_ISA_EXT_ZKND: case KVM_RISCV_ISA_EXT_ZKNE: case KVM_RISCV_ISA_EXT_ZKNH: case KVM_RISCV_ISA_EXT_ZKR: case KVM_RISCV_ISA_EXT_ZKSED: case KVM_RISCV_ISA_EXT_ZKSH: case KVM_RISCV_ISA_EXT_ZKT: case KVM_RISCV_ISA_EXT_ZTSO: case KVM_RISCV_ISA_EXT_ZVBB: case KVM_RISCV_ISA_EXT_ZVBC: case KVM_RISCV_ISA_EXT_ZVFH: case KVM_RISCV_ISA_EXT_ZVFHMIN: case KVM_RISCV_ISA_EXT_ZVKB: case KVM_RISCV_ISA_EXT_ZVKG: case KVM_RISCV_ISA_EXT_ZVKNED: case KVM_RISCV_ISA_EXT_ZVKNHA: case KVM_RISCV_ISA_EXT_ZVKNHB: case KVM_RISCV_ISA_EXT_ZVKSED: case KVM_RISCV_ISA_EXT_ZVKSH: case KVM_RISCV_ISA_EXT_ZVKT: returnfalse; /* Extensions which can be disabled using Smstateen */ case KVM_RISCV_ISA_EXT_SSAIA: return riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN); case KVM_RISCV_ISA_EXT_SVADE: /* * The henvcfg.ADUE is read-only zero if menvcfg.ADUE is zero. * Svade can't be disabled unless we support Svadu.
*/ return arch_has_hw_pte_young(); default: break;
}
for (i = 0; i < ARRAY_SIZE(kvm_isa_ext_arr); i++) { if (kvm_riscv_vcpu_isa_check_host(i, &guest_ext)) continue; if (kvm_riscv_vcpu_isa_enable_allowed(i))
set_bit(guest_ext, vcpu->arch.isa);
}
}
if (KVM_REG_SIZE(reg->id) != sizeof(unsignedlong)) return -EINVAL;
if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id))) return -EFAULT;
switch (reg_num) { case KVM_REG_RISCV_CONFIG_REG(isa): /* * This ONE REG interface is only defined for * single letter extensions.
*/ if (fls(reg_val) >= RISCV_ISA_EXT_BASE) return -EINVAL;
/* * Return early (i.e. do nothing) if reg_val is the same * value retrievable via kvm_riscv_vcpu_get_reg_config().
*/ if (reg_val == (vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK)) break;
if (!vcpu->arch.ran_atleast_once) { /* Ignore the enable/disable request for certain extensions */ for (i = 0; i < RISCV_ISA_EXT_BASE; i++) {
isa_ext = kvm_riscv_vcpu_base2isa_ext(i); if (isa_ext >= KVM_RISCV_ISA_EXT_MAX) {
reg_val &= ~BIT(i); continue;
} if (!kvm_riscv_vcpu_isa_enable_allowed(isa_ext)) if (reg_val & BIT(i))
reg_val &= ~BIT(i); if (!kvm_riscv_vcpu_isa_disable_allowed(isa_ext)) if (!(reg_val & BIT(i)))
reg_val |= BIT(i);
}
reg_val &= riscv_isa_extension_base(NULL); /* Do not modify anything beyond single letter extensions */
reg_val = (vcpu->arch.isa[0] & ~KVM_RISCV_BASE_ISA_MASK) |
(reg_val & KVM_RISCV_BASE_ISA_MASK);
vcpu->arch.isa[0] = reg_val;
kvm_riscv_vcpu_fp_reset(vcpu);
} else { return -EBUSY;
} break; case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size): if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM)) return -ENOENT; if (reg_val != riscv_cbom_block_size) return -EINVAL; break; case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size): if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ)) return -ENOENT; if (reg_val != riscv_cboz_block_size) return -EINVAL; break; case KVM_REG_RISCV_CONFIG_REG(mvendorid): if (reg_val == vcpu->arch.mvendorid) break; if (!vcpu->arch.ran_atleast_once)
vcpu->arch.mvendorid = reg_val; else return -EBUSY; break; case KVM_REG_RISCV_CONFIG_REG(marchid): if (reg_val == vcpu->arch.marchid) break; if (!vcpu->arch.ran_atleast_once)
vcpu->arch.marchid = reg_val; else return -EBUSY; break; case KVM_REG_RISCV_CONFIG_REG(mimpid): if (reg_val == vcpu->arch.mimpid) break; if (!vcpu->arch.ran_atleast_once)
vcpu->arch.mimpid = reg_val; else return -EBUSY; break; case KVM_REG_RISCV_CONFIG_REG(satp_mode): if (reg_val != (satp_mode >> SATP_MODE_SHIFT)) return -EINVAL; break; default: return -ENOENT;
}
ret = kvm_riscv_vcpu_isa_check_host(reg_num, &guest_ext); if (ret) return ret;
if (reg_val == test_bit(guest_ext, vcpu->arch.isa)) return 0;
if (!vcpu->arch.ran_atleast_once) { /* * All multi-letter extension and a few single letter * extension can be disabled
*/ if (reg_val == 1 &&
kvm_riscv_vcpu_isa_enable_allowed(reg_num))
set_bit(guest_ext, vcpu->arch.isa); elseif (!reg_val &&
kvm_riscv_vcpu_isa_disable_allowed(reg_num))
clear_bit(guest_ext, vcpu->arch.isa); else return -EINVAL;
kvm_riscv_vcpu_fp_reset(vcpu);
} else { return -EBUSY;
}
if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id))) return -EFAULT;
switch (reg_subtype) { case KVM_REG_RISCV_ISA_SINGLE: return riscv_vcpu_set_isa_ext_single(vcpu, reg_num, reg_val); case KVM_REG_RISCV_ISA_MULTI_EN: return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, true); case KVM_REG_RISCV_ISA_MULTI_DIS: return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, false); default: return -ENOENT;
}
return 0;
}
staticint copy_config_reg_indices(conststruct kvm_vcpu *vcpu,
u64 __user *uindices)
{ int n = 0;
for (int i = 0; i < sizeof(struct kvm_riscv_config)/sizeof(unsignedlong);
i++) {
u64 size;
u64 reg;
/* * Avoid reporting config reg if the corresponding extension * was not available.
*/ if (i == KVM_REG_RISCV_CONFIG_REG(zicbom_block_size) &&
!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM)) continue; elseif (i == KVM_REG_RISCV_CONFIG_REG(zicboz_block_size) &&
!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ)) continue;
if (riscv_isa_extension_available(vcpu->arch.isa, SSAIA))
n += sizeof(struct kvm_riscv_aia_csr) / sizeof(unsignedlong); if (riscv_isa_extension_available(vcpu->arch.isa, SMSTATEEN))
n += sizeof(struct kvm_riscv_smstateen_csr) / sizeof(unsignedlong);
staticint copy_vector_reg_indices(conststruct kvm_vcpu *vcpu,
u64 __user *uindices)
{ conststruct kvm_cpu_context *cntx = &vcpu->arch.guest_context; int n = num_vector_regs(vcpu);
u64 reg, size; int i;
if (n == 0) return 0;
/* copy vstart, vl, vtype, vcsr and vlenb */
size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64; for (i = 0; i < 5; i++) {
reg = KVM_REG_RISCV | size | KVM_REG_RISCV_VECTOR | i;
if (uindices) { if (put_user(reg, uindices)) return -EFAULT;
uindices++;
}
}
/* vector_regs have a variable 'vlenb' size */
size = __builtin_ctzl(cntx->vector.vlenb);
size <<= KVM_REG_SIZE_SHIFT; for (i = 0; i < 32; i++) {
reg = KVM_REG_RISCV | KVM_REG_RISCV_VECTOR | size |
KVM_REG_RISCV_VECTOR_REG(i);
if (uindices) { if (put_user(reg, uindices)) return -EFAULT;
uindices++;
}
}
return n;
}
/* * kvm_riscv_vcpu_num_regs - how many registers do we present via KVM_GET/SET_ONE_REG * * This is for all registers.
*/ unsignedlong kvm_riscv_vcpu_num_regs(struct kvm_vcpu *vcpu)
{ unsignedlong res = 0;
res += num_config_regs(vcpu);
res += num_core_regs();
res += num_csr_regs(vcpu);
res += num_timer_regs();
res += num_fp_f_regs(vcpu);
res += num_fp_d_regs(vcpu);
res += num_vector_regs(vcpu);
res += num_isa_ext_regs(vcpu);
res += num_sbi_ext_regs(vcpu);
res += num_sbi_regs(vcpu);
return res;
}
/* * kvm_riscv_vcpu_copy_reg_indices - get indices of all registers.
*/ int kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu *vcpu,
u64 __user *uindices)
{ int ret;
ret = copy_config_reg_indices(vcpu, uindices); if (ret < 0) return ret;
uindices += ret;
ret = copy_core_reg_indices(uindices); if (ret < 0) return ret;
uindices += ret;
ret = copy_csr_reg_indices(vcpu, uindices); if (ret < 0) return ret;
uindices += ret;
ret = copy_timer_reg_indices(uindices); if (ret < 0) return ret;
uindices += ret;
ret = copy_fp_f_reg_indices(vcpu, uindices); if (ret < 0) return ret;
uindices += ret;
ret = copy_fp_d_reg_indices(vcpu, uindices); if (ret < 0) return ret;
uindices += ret;
ret = copy_vector_reg_indices(vcpu, uindices); if (ret < 0) return ret;
uindices += ret;
ret = copy_isa_ext_reg_indices(vcpu, uindices); if (ret < 0) return ret;
uindices += ret;
ret = copy_sbi_ext_reg_indices(vcpu, uindices); if (ret < 0) return ret;
uindices += ret;
ret = copy_sbi_reg_indices(vcpu, uindices); if (ret < 0) return ret;
uindices += ret;
return 0;
}
int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu, conststruct kvm_one_reg *reg)
{ switch (reg->id & KVM_REG_RISCV_TYPE_MASK) { case KVM_REG_RISCV_CONFIG: return kvm_riscv_vcpu_set_reg_config(vcpu, reg); case KVM_REG_RISCV_CORE: return kvm_riscv_vcpu_set_reg_core(vcpu, reg); case KVM_REG_RISCV_CSR: return kvm_riscv_vcpu_set_reg_csr(vcpu, reg); case KVM_REG_RISCV_TIMER: return kvm_riscv_vcpu_set_reg_timer(vcpu, reg); case KVM_REG_RISCV_FP_F: return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
KVM_REG_RISCV_FP_F); case KVM_REG_RISCV_FP_D: return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
KVM_REG_RISCV_FP_D); case KVM_REG_RISCV_VECTOR: return kvm_riscv_vcpu_set_reg_vector(vcpu, reg); case KVM_REG_RISCV_ISA_EXT: return kvm_riscv_vcpu_set_reg_isa_ext(vcpu, reg); case KVM_REG_RISCV_SBI_EXT: return kvm_riscv_vcpu_set_reg_sbi_ext(vcpu, reg); case KVM_REG_RISCV_SBI_STATE: return kvm_riscv_vcpu_set_reg_sbi(vcpu, reg); default: break;
}
return -ENOENT;
}
int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu, conststruct kvm_one_reg *reg)
{ switch (reg->id & KVM_REG_RISCV_TYPE_MASK) { case KVM_REG_RISCV_CONFIG: return kvm_riscv_vcpu_get_reg_config(vcpu, reg); case KVM_REG_RISCV_CORE: return kvm_riscv_vcpu_get_reg_core(vcpu, reg); case KVM_REG_RISCV_CSR: return kvm_riscv_vcpu_get_reg_csr(vcpu, reg); case KVM_REG_RISCV_TIMER: return kvm_riscv_vcpu_get_reg_timer(vcpu, reg); case KVM_REG_RISCV_FP_F: return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
KVM_REG_RISCV_FP_F); case KVM_REG_RISCV_FP_D: return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
KVM_REG_RISCV_FP_D); case KVM_REG_RISCV_VECTOR: return kvm_riscv_vcpu_get_reg_vector(vcpu, reg); case KVM_REG_RISCV_ISA_EXT: return kvm_riscv_vcpu_get_reg_isa_ext(vcpu, reg); case KVM_REG_RISCV_SBI_EXT: return kvm_riscv_vcpu_get_reg_sbi_ext(vcpu, reg); case KVM_REG_RISCV_SBI_STATE: return kvm_riscv_vcpu_get_reg_sbi(vcpu, reg); default: break;
}
return -ENOENT;
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.15 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.