if (inst.reg2_format.opcode != cpucfg_op) return EMULATE_FAIL;
rd = inst.reg2_format.rd;
rj = inst.reg2_format.rj;
++vcpu->stat.cpucfg_exits;
index = vcpu->arch.gprs[rj];
/* * By LoongArch Reference Manual 2.2.10.5 * Return value is 0 for undefined CPUCFG index * * Disable preemption since hw gcsr is accessed
*/
preempt_disable(); switch (index) { case 0 ... (KVM_MAX_CPUCFG_REGS - 1):
vcpu->arch.gprs[rd] = vcpu->arch.cpucfg[index]; break; case CPUCFG_KVM_SIG: /* CPUCFG emulation between 0x40000000 -- 0x400000ff */
vcpu->arch.gprs[rd] = *(unsignedint *)KVM_SIGNATURE; break; case CPUCFG_KVM_FEATURE:
ret = vcpu->kvm->arch.pv_features & LOONGARCH_PV_FEAT_MASK;
vcpu->arch.gprs[rd] = ret; break; default:
vcpu->arch.gprs[rd] = 0; break;
}
preempt_enable();
return EMULATE_DONE;
}
staticunsignedlong kvm_emu_read_csr(struct kvm_vcpu *vcpu, int csrid)
{ unsignedlong val = 0; struct loongarch_csrs *csr = vcpu->arch.csr;
/* * From LoongArch Reference Manual Volume 1 Chapter 4.2.1 * For undefined CSR id, return value is 0
*/ if (get_gcsr_flag(csrid) & SW_GCSR)
val = kvm_read_sw_gcsr(csr, csrid); else
pr_warn_once("Unsupported csrrd 0x%x with pc %lx\n", csrid, vcpu->arch.pc);
return val;
}
staticunsignedlong kvm_emu_write_csr(struct kvm_vcpu *vcpu, int csrid, unsignedlong val)
{ unsignedlong old = 0; struct loongarch_csrs *csr = vcpu->arch.csr;
if (get_gcsr_flag(csrid) & SW_GCSR) {
old = kvm_read_sw_gcsr(csr, csrid);
kvm_write_sw_gcsr(csr, csrid, val);
} else
pr_warn_once("Unsupported csrwr 0x%x with pc %lx\n", csrid, vcpu->arch.pc);
return old;
}
staticunsignedlong kvm_emu_xchg_csr(struct kvm_vcpu *vcpu, int csrid, unsignedlong csr_mask, unsignedlong val)
{ unsignedlong old = 0; struct loongarch_csrs *csr = vcpu->arch.csr;
if (get_gcsr_flag(csrid) & SW_GCSR) {
old = kvm_read_sw_gcsr(csr, csrid);
val = (old & ~csr_mask) | (val & csr_mask);
kvm_write_sw_gcsr(csr, csrid, val);
old = old & csr_mask;
} else
pr_warn_once("Unsupported csrxchg 0x%x with pc %lx\n", csrid, vcpu->arch.pc);
/* Process CSR ops */ switch (rj) { case 0: /* process csrrd */
val = kvm_emu_read_csr(vcpu, csrid);
vcpu->arch.gprs[rd] = val; break; case 1: /* process csrwr */
val = vcpu->arch.gprs[rd];
val = kvm_emu_write_csr(vcpu, csrid, val);
vcpu->arch.gprs[rd] = val; break; default: /* process csrxchg */
val = vcpu->arch.gprs[rd];
csr_mask = vcpu->arch.gprs[rj];
val = kvm_emu_xchg_csr(vcpu, csrid, csr_mask, val);
vcpu->arch.gprs[rd] = val;
}
return EMULATE_DONE;
}
int kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu)
{ int idx, ret; unsignedlong *val;
u32 addr, rd, rj, opcode;
/* * Each IOCSR with different opcode
*/
rd = inst.reg2_format.rd;
rj = inst.reg2_format.rj;
opcode = inst.reg2_format.opcode;
addr = vcpu->arch.gprs[rj];
run->iocsr_io.phys_addr = addr;
run->iocsr_io.is_write = 0;
val = &vcpu->arch.gprs[rd];
/* LoongArch is Little endian */ switch (opcode) { case iocsrrdb_op:
run->iocsr_io.len = 1; break; case iocsrrdh_op:
run->iocsr_io.len = 2; break; case iocsrrdw_op:
run->iocsr_io.len = 4; break; case iocsrrdd_op:
run->iocsr_io.len = 8; break; case iocsrwrb_op:
run->iocsr_io.len = 1;
run->iocsr_io.is_write = 1; break; case iocsrwrh_op:
run->iocsr_io.len = 2;
run->iocsr_io.is_write = 1; break; case iocsrwrw_op:
run->iocsr_io.len = 4;
run->iocsr_io.is_write = 1; break; case iocsrwrd_op:
run->iocsr_io.len = 8;
run->iocsr_io.is_write = 1; break; default: return EMULATE_FAIL;
}
if (run->iocsr_io.is_write) {
idx = srcu_read_lock(&vcpu->kvm->srcu);
ret = kvm_io_bus_write(vcpu, KVM_IOCSR_BUS, addr, run->iocsr_io.len, val);
srcu_read_unlock(&vcpu->kvm->srcu, idx); if (ret == 0)
ret = EMULATE_DONE; else {
ret = EMULATE_DO_IOCSR; /* Save data and let user space to write it */
memcpy(run->iocsr_io.data, val, run->iocsr_io.len);
}
trace_kvm_iocsr(KVM_TRACE_IOCSR_WRITE, run->iocsr_io.len, addr, val);
} else {
idx = srcu_read_lock(&vcpu->kvm->srcu);
ret = kvm_io_bus_read(vcpu, KVM_IOCSR_BUS, addr, run->iocsr_io.len, val);
srcu_read_unlock(&vcpu->kvm->srcu, idx); if (ret == 0)
ret = EMULATE_DONE; else {
ret = EMULATE_DO_IOCSR; /* Save register id for iocsr read completion */
vcpu->arch.io_gpr = rd;
}
trace_kvm_iocsr(KVM_TRACE_IOCSR_READ, run->iocsr_io.len, addr, NULL);
}
return ret;
}
int kvm_complete_iocsr_read(struct kvm_vcpu *vcpu, struct kvm_run *run)
{ enum emulation_result er = EMULATE_DONE; unsignedlong *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
switch (run->iocsr_io.len) { case 1:
*gpr = *(s8 *)run->iocsr_io.data; break; case 2:
*gpr = *(s16 *)run->iocsr_io.data; break; case 4:
*gpr = *(s32 *)run->iocsr_io.data; break; case 8:
*gpr = *(s64 *)run->iocsr_io.data; break; default:
kvm_err("Bad IOCSR length: %d, addr is 0x%lx\n",
run->iocsr_io.len, vcpu->arch.badv);
er = EMULATE_FAIL; break;
}
return er;
}
int kvm_emu_idle(struct kvm_vcpu *vcpu)
{
++vcpu->stat.idle_exits;
trace_kvm_exit_idle(vcpu, KVM_TRACE_EXIT_IDLE);
if (!kvm_arch_vcpu_runnable(vcpu))
kvm_vcpu_halt(vcpu);
trace_kvm_exit_gspr(vcpu, inst.word);
er = EMULATE_FAIL; switch (((inst.word >> 24) & 0xff)) { case 0x0: /* CPUCFG GSPR */
trace_kvm_exit_cpucfg(vcpu, KVM_TRACE_EXIT_CPUCFG);
er = kvm_emu_cpucfg(vcpu, inst); break; case 0x4: /* CSR{RD,WR,XCHG} GSPR */
trace_kvm_exit_csr(vcpu, KVM_TRACE_EXIT_CSR);
er = kvm_handle_csr(vcpu, inst); break; case 0x6: /* Cache, Idle and IOCSR GSPR */ switch (((inst.word >> 22) & 0x3ff)) { case 0x18: /* Cache GSPR */
er = EMULATE_DONE;
trace_kvm_exit_cache(vcpu, KVM_TRACE_EXIT_CACHE); break; case 0x19: /* Idle/IOCSR GSPR */ switch (((inst.word >> 15) & 0x1ffff)) { case 0xc90: /* IOCSR GSPR */
er = kvm_emu_iocsr(inst, run, vcpu); break; case 0xc91: /* Idle GSPR */
er = kvm_emu_idle(vcpu); break; default:
er = EMULATE_FAIL; break;
} break; default:
er = EMULATE_FAIL; break;
} break; default:
er = EMULATE_FAIL; break;
}
/* Rollback PC only if emulation was unsuccessful */ if (er == EMULATE_FAIL) {
kvm_err("[%#lx]%s: unsupported gspr instruction 0x%08x\n",
curr_pc, __func__, inst.word);
/* * Trigger GSPR: * 1) Execute CPUCFG instruction; * 2) Execute CACOP/IDLE instructions; * 3) Access to unimplemented CSRs/IOCSRs.
*/ staticint kvm_handle_gspr(struct kvm_vcpu *vcpu, int ecode)
{ int ret = RESUME_GUEST; enum emulation_result er = EMULATE_DONE;
er = kvm_trap_handle_gspr(vcpu);
if (er == EMULATE_DONE) {
ret = RESUME_GUEST;
} elseif (er == EMULATE_DO_MMIO) {
vcpu->run->exit_reason = KVM_EXIT_MMIO;
ret = RESUME_HOST;
} elseif (er == EMULATE_DO_IOCSR) {
vcpu->run->exit_reason = KVM_EXIT_LOONGARCH_IOCSR;
ret = RESUME_HOST;
} else {
kvm_queue_exception(vcpu, EXCCODE_INE, 0);
ret = RESUME_GUEST;
}
return ret;
}
int kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst)
{ int idx, ret; unsignedint op8, opcode, rd; struct kvm_run *run = vcpu->run;
run->mmio.phys_addr = vcpu->arch.badv;
vcpu->mmio_needed = 2; /* signed */
op8 = (inst.word >> 24) & 0xff;
ret = EMULATE_DO_MMIO;
switch (op8) { case 0x24 ... 0x27: /* ldptr.w/d process */
rd = inst.reg2i14_format.rd;
opcode = inst.reg2i14_format.opcode;
switch (opcode) { case ldptrw_op:
run->mmio.len = 4; break; case ldptrd_op:
run->mmio.len = 8; break; default: break;
} break; case 0x28 ... 0x2e: /* ld.b/h/w/d, ld.bu/hu/wu process */
rd = inst.reg2i12_format.rd;
opcode = inst.reg2i12_format.opcode;
switch (opcode) { case ldb_op:
run->mmio.len = 1; break; case ldbu_op:
vcpu->mmio_needed = 1; /* unsigned */
run->mmio.len = 1; break; case ldh_op:
run->mmio.len = 2; break; case ldhu_op:
vcpu->mmio_needed = 1; /* unsigned */
run->mmio.len = 2; break; case ldw_op:
run->mmio.len = 4; break; case ldwu_op:
vcpu->mmio_needed = 1; /* unsigned */
run->mmio.len = 4; break; case ldd_op:
run->mmio.len = 8; break; default:
ret = EMULATE_FAIL; break;
} break; case 0x38: /* ldx.b/h/w/d, ldx.bu/hu/wu process */
rd = inst.reg3_format.rd;
opcode = inst.reg3_format.opcode;
switch (opcode) { case ldxb_op:
run->mmio.len = 1; break; case ldxbu_op:
run->mmio.len = 1;
vcpu->mmio_needed = 1; /* unsigned */ break; case ldxh_op:
run->mmio.len = 2; break; case ldxhu_op:
run->mmio.len = 2;
vcpu->mmio_needed = 1; /* unsigned */ break; case ldxw_op:
run->mmio.len = 4; break; case ldxwu_op:
run->mmio.len = 4;
vcpu->mmio_needed = 1; /* unsigned */ break; case ldxd_op:
run->mmio.len = 8; break; default:
ret = EMULATE_FAIL; break;
} break; default:
ret = EMULATE_FAIL;
}
if (ret == EMULATE_DO_MMIO) {
trace_kvm_mmio(KVM_TRACE_MMIO_READ, run->mmio.len, run->mmio.phys_addr, NULL);
/* * If mmio device such as PCH-PIC is emulated in KVM, * it need not return to user space to handle the mmio * exception.
*/
idx = srcu_read_lock(&vcpu->kvm->srcu);
ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, vcpu->arch.badv,
run->mmio.len, &vcpu->arch.gprs[rd]);
srcu_read_unlock(&vcpu->kvm->srcu, idx); if (!ret) {
update_pc(&vcpu->arch);
vcpu->mmio_needed = 0; return EMULATE_DONE;
}
/* Set for kvm_complete_mmio_read() use */
vcpu->arch.io_gpr = rd;
run->mmio.is_write = 0;
vcpu->mmio_is_write = 0; return EMULATE_DO_MMIO;
}
/* * Update PC and hold onto current PC in case there is * an error and we want to rollback the PC
*/
curr_pc = vcpu->arch.pc;
update_pc(&vcpu->arch);
op8 = (inst.word >> 24) & 0xff;
run->mmio.phys_addr = vcpu->arch.badv;
ret = EMULATE_DO_MMIO; switch (op8) { case 0x24 ... 0x27: /* stptr.w/d process */
rd = inst.reg2i14_format.rd;
opcode = inst.reg2i14_format.opcode;
switch (opcode) { case stptrw_op:
run->mmio.len = 4;
*(unsignedint *)data = vcpu->arch.gprs[rd]; break; case stptrd_op:
run->mmio.len = 8;
*(unsignedlong *)data = vcpu->arch.gprs[rd]; break; default:
ret = EMULATE_FAIL; break;
} break; case 0x28 ... 0x2e: /* st.b/h/w/d process */
rd = inst.reg2i12_format.rd;
opcode = inst.reg2i12_format.opcode;
rd_val = vcpu->arch.gprs[rd];
switch (opcode) { case stb_op:
run->mmio.len = 1;
*(unsignedchar *)data = rd_val; break; case sth_op:
run->mmio.len = 2;
*(unsignedshort *)data = rd_val; break; case stw_op:
run->mmio.len = 4;
*(unsignedint *)data = rd_val; break; case std_op:
run->mmio.len = 8;
*(unsignedlong *)data = rd_val; break; default:
ret = EMULATE_FAIL; break;
} break; case 0x38: /* stx.b/h/w/d process */
rd = inst.reg3_format.rd;
opcode = inst.reg3_format.opcode;
switch (opcode) { case stxb_op:
run->mmio.len = 1;
*(unsignedchar *)data = vcpu->arch.gprs[rd]; break; case stxh_op:
run->mmio.len = 2;
*(unsignedshort *)data = vcpu->arch.gprs[rd]; break; case stxw_op:
run->mmio.len = 4;
*(unsignedint *)data = vcpu->arch.gprs[rd]; break; case stxd_op:
run->mmio.len = 8;
*(unsignedlong *)data = vcpu->arch.gprs[rd]; break; default:
ret = EMULATE_FAIL; break;
} break; default:
ret = EMULATE_FAIL;
}
if (ret == EMULATE_DO_MMIO) {
trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, run->mmio.len, run->mmio.phys_addr, data);
/* * If mmio device such as PCH-PIC is emulated in KVM, * it need not return to user space to handle the mmio * exception.
*/
idx = srcu_read_lock(&vcpu->kvm->srcu);
ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, vcpu->arch.badv, run->mmio.len, data);
srcu_read_unlock(&vcpu->kvm->srcu, idx); if (!ret) return EMULATE_DONE;
/** * kvm_handle_fpu_disabled() - Guest used fpu however it is disabled at host * @vcpu: Virtual CPU context. * @ecode: Exception code. * * Handle when the guest attempts to use fpu which hasn't been allowed * by the root context.
*/ staticint kvm_handle_fpu_disabled(struct kvm_vcpu *vcpu, int ecode)
{ struct kvm_run *run = vcpu->run;
if (!kvm_guest_has_fpu(&vcpu->arch)) {
kvm_queue_exception(vcpu, EXCCODE_INE, 0); return RESUME_GUEST;
}
/* * If guest FPU not present, the FPU operation should have been * treated as a reserved instruction! * If FPU already in use, we shouldn't get this at all.
*/ if (WARN_ON(vcpu->arch.aux_inuse & KVM_LARCH_FPU)) {
kvm_err("%s internal error\n", __func__);
run->exit_reason = KVM_EXIT_INTERNAL_ERROR; return RESUME_HOST;
}
/* * kvm_handle_lsx_disabled() - Guest used LSX while disabled in root. * @vcpu: Virtual CPU context. * @ecode: Exception code. * * Handle when the guest attempts to use LSX when it is disabled in the root * context.
*/ staticint kvm_handle_lsx_disabled(struct kvm_vcpu *vcpu, int ecode)
{ if (kvm_own_lsx(vcpu))
kvm_queue_exception(vcpu, EXCCODE_INE, 0);
return RESUME_GUEST;
}
/* * kvm_handle_lasx_disabled() - Guest used LASX while disabled in root. * @vcpu: Virtual CPU context. * @ecode: Exception code. * * Handle when the guest attempts to use LASX when it is disabled in the root * context.
*/ staticint kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu, int ecode)
{ if (kvm_own_lasx(vcpu))
kvm_queue_exception(vcpu, EXCCODE_INE, 0);
return RESUME_GUEST;
}
staticint kvm_handle_lbt_disabled(struct kvm_vcpu *vcpu, int ecode)
{ if (kvm_own_lbt(vcpu))
kvm_queue_exception(vcpu, EXCCODE_INE, 0);
min = kvm_read_reg(vcpu, LOONGARCH_GPR_A3);
for_each_set_bit(cpu, ipi_bitmap, BITS_PER_LONG * 2) {
dest = kvm_get_vcpu_by_cpuid(vcpu->kvm, cpu + min); if (!dest) continue;
/* Send SWI0 to dest vcpu to emulate IPI interrupt */
kvm_queue_irq(dest, INT_SWI0);
kvm_vcpu_kick(dest);
}
}
/* * Hypercall emulation always return to guest, Caller should check retval.
*/ staticvoid kvm_handle_service(struct kvm_vcpu *vcpu)
{ long ret = KVM_HCALL_INVALID_CODE; unsignedlong func = kvm_read_reg(vcpu, LOONGARCH_GPR_A0);
switch (func) { case KVM_HCALL_FUNC_IPI: if (kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_IPI)) {
kvm_send_pv_ipi(vcpu);
ret = KVM_HCALL_SUCCESS;
} break; case KVM_HCALL_FUNC_NOTIFY: if (kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME))
ret = kvm_save_notify(vcpu); break; default: break;
}
kvm_write_reg(vcpu, LOONGARCH_GPR_A0, ret);
}
staticint kvm_handle_hypercall(struct kvm_vcpu *vcpu, int ecode)
{ int ret;
larch_inst inst; unsignedint code;
inst.word = vcpu->arch.badi;
code = inst.reg0i15_format.immediate;
ret = RESUME_GUEST;
switch (code) { case KVM_HCALL_SERVICE:
vcpu->stat.hypercall_exits++;
kvm_handle_service(vcpu); break; case KVM_HCALL_USER_SERVICE: if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_USER_HCALL)) {
kvm_write_reg(vcpu, LOONGARCH_GPR_A0, KVM_HCALL_INVALID_CODE); break;
}
vcpu->stat.hypercall_exits++;
vcpu->run->exit_reason = KVM_EXIT_HYPERCALL;
vcpu->run->hypercall.nr = KVM_HCALL_USER_SERVICE;
vcpu->run->hypercall.args[0] = kvm_read_reg(vcpu, LOONGARCH_GPR_A0);
vcpu->run->hypercall.args[1] = kvm_read_reg(vcpu, LOONGARCH_GPR_A1);
vcpu->run->hypercall.args[2] = kvm_read_reg(vcpu, LOONGARCH_GPR_A2);
vcpu->run->hypercall.args[3] = kvm_read_reg(vcpu, LOONGARCH_GPR_A3);
vcpu->run->hypercall.args[4] = kvm_read_reg(vcpu, LOONGARCH_GPR_A4);
vcpu->run->hypercall.args[5] = kvm_read_reg(vcpu, LOONGARCH_GPR_A5);
vcpu->run->hypercall.flags = 0; /* * Set invalid return value by default, let user-mode VMM modify it.
*/
vcpu->run->hypercall.ret = KVM_HCALL_INVALID_CODE;
ret = RESUME_HOST; break; case KVM_HCALL_SWDBG: /* KVM_HCALL_SWDBG only in effective when SW_BP is enabled */ if (vcpu->guest_debug & KVM_GUESTDBG_SW_BP_MASK) {
vcpu->run->exit_reason = KVM_EXIT_DEBUG;
ret = RESUME_HOST; break;
}
fallthrough; default: /* Treat it as noop intruction, only set return value */
kvm_write_reg(vcpu, LOONGARCH_GPR_A0, KVM_HCALL_INVALID_CODE); break;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.