/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * KVM/MIPS: Support for hardware virtualization extensions * * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. * Authors: Yann Le Du <ledu@kymasys.com>
*/
/* Pointers to last VCPU loaded on each physical CPU */ staticstruct kvm_vcpu *last_vcpu[NR_CPUS]; /* Pointers to last VCPU executed on each physical CPU */ staticstruct kvm_vcpu *last_exec_vcpu[NR_CPUS];
/* * Number of guest VTLB entries to use, so we can catch inconsistency between * CPUs.
*/ staticunsignedint kvm_vz_guest_vtlb_size;
staticinlinevoid kvm_vz_write_gc0_ebase(long v)
{ /* * First write with WG=1 to write upper bits, then write again in case * WG should be left at 0. * write_gc0_ebase_64() is no longer UNDEFINED since R6.
*/ if (sizeof(long) == 8 &&
(cpu_has_mips64r6 || cpu_has_ebase_wg)) {
write_gc0_ebase_64(v | MIPS_EBASE_WG);
write_gc0_ebase_64(v);
} else {
write_gc0_ebase(v | MIPS_EBASE_WG);
write_gc0_ebase(v);
}
}
/* * These Config bits may be writable by the guest: * Config: [K23, KU] (!TLB), K0 * Config1: (none) * Config2: [TU, SU] (impl) * Config3: ISAOnExc * Config4: FTLBPageSize * Config5: K, CV, MSAEn, UFE, FRE, SBRI, UFR
*/
/* Permit MSAEn changes if MSA supported and enabled */ if (kvm_mips_guest_has_msa(&vcpu->arch))
mask |= MIPS_CONF5_MSAEN;
/* * Permit guest FPU mode changes if FPU is enabled and the relevant * feature exists according to FIR register.
*/ if (kvm_mips_guest_has_fpu(&vcpu->arch)) { if (cpu_has_ufr)
mask |= MIPS_CONF5_UFR; if (cpu_has_fre)
mask |= MIPS_CONF5_FRE | MIPS_CONF5_UFE;
}
switch (priority) { case MIPS_EXC_INT_TIMER:
set_gc0_cause(C_TI); break;
case MIPS_EXC_INT_IO_1: case MIPS_EXC_INT_IO_2: case MIPS_EXC_INT_IPI_1: case MIPS_EXC_INT_IPI_2: if (cpu_has_guestctl2)
set_c0_guestctl2(irq); else
set_gc0_cause(irq); break;
switch (priority) { case MIPS_EXC_INT_TIMER: /* * Explicitly clear irq associated with Cause.IP[IPTI] * if GuestCtl2 virtual interrupt register not * supported or if not using GuestCtl2 Hardware Clear.
*/ if (cpu_has_guestctl2) { if (!(read_c0_guestctl2() & (irq << 14)))
clear_c0_guestctl2(irq);
} else {
clear_gc0_cause(irq);
} break;
case MIPS_EXC_INT_IO_1: case MIPS_EXC_INT_IO_2: case MIPS_EXC_INT_IPI_1: case MIPS_EXC_INT_IPI_2: /* Clear GuestCtl2.VIP irq if not using Hardware Clear */ if (cpu_has_guestctl2) { if (!(read_c0_guestctl2() & (irq << 14)))
clear_c0_guestctl2(irq);
} else {
clear_gc0_cause(irq);
} break;
/** * kvm_vz_should_use_htimer() - Find whether to use the VZ hard guest timer. * @vcpu: Virtual CPU. * * Returns: true if the VZ GTOffset & real guest CP0_Count should be used * instead of software emulation of guest timer. * false otherwise.
*/ staticbool kvm_vz_should_use_htimer(struct kvm_vcpu *vcpu)
{ if (kvm_mips_count_disabled(vcpu)) returnfalse;
/* Chosen frequency must match real frequency */ if (mips_hpt_frequency != vcpu->arch.count_hz) returnfalse;
/* We don't support a CP0_GTOffset with fewer bits than CP0_Count */ if (current_cpu_data.gtoffset_mask != 0xffffffff) returnfalse;
returntrue;
}
/** * _kvm_vz_restore_stimer() - Restore soft timer state. * @vcpu: Virtual CPU. * @compare: CP0_Compare register value, restored by caller. * @cause: CP0_Cause register to restore. * * Restore VZ state relating to the soft timer. The hard timer can be enabled * later.
*/ staticvoid _kvm_vz_restore_stimer(struct kvm_vcpu *vcpu, u32 compare,
u32 cause)
{ /* * Avoid spurious counter interrupts by setting Guest CP0_Count to just * after Guest CP0_Compare.
*/
write_c0_gtoffset(compare - read_c0_count());
/** * _kvm_vz_restore_htimer() - Restore hard timer state. * @vcpu: Virtual CPU. * @compare: CP0_Compare register value, restored by caller. * @cause: CP0_Cause register to restore. * * Restore hard timer Guest.Count & Guest.Cause taking care to preserve the * value of Guest.CP0_Cause.TI while restoring Guest.CP0_Cause.
*/ staticvoid _kvm_vz_restore_htimer(struct kvm_vcpu *vcpu,
u32 compare, u32 cause)
{
u32 start_count, after_count; unsignedlong flags;
/* * Freeze the soft-timer and sync the guest CP0_Count with it. We do * this with interrupts disabled to avoid latency.
*/
local_irq_save(flags);
kvm_mips_freeze_hrtimer(vcpu, &start_count);
write_c0_gtoffset(start_count - read_c0_count());
local_irq_restore(flags);
/* restore guest CP0_Cause, as TI may already be set */
back_to_back_c0_hazard();
write_gc0_cause(cause);
/* * The above sequence isn't atomic and would result in lost timer * interrupts if we're not careful. Detect if a timer interrupt is due * and assert it.
*/
back_to_back_c0_hazard();
after_count = read_gc0_count(); if (after_count - start_count > compare - start_count - 1)
kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
}
/** * kvm_vz_acquire_htimer() - Switch to hard timer state. * @vcpu: Virtual CPU. * * Restore hard timer state on top of existing soft timer state if possible. * * Since hard timer won't remain active over preemption, preemption should be * disabled by the caller.
*/ void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu)
{
u32 gctl0;
gctl0 = read_c0_guestctl0(); if (!(gctl0 & MIPS_GCTL0_GT) && kvm_vz_should_use_htimer(vcpu)) { /* enable guest access to hard timer */
write_c0_guestctl0(gctl0 | MIPS_GCTL0_GT);
/** * _kvm_vz_save_htimer() - Switch to software emulation of guest timer. * @vcpu: Virtual CPU. * @out_compare: Pointer to write compare value to. * @out_cause: Pointer to write cause value to. * * Save VZ guest timer state and switch to software emulation of guest CP0 * timer. The hard timer must already be in use, so preemption should be * disabled.
*/ staticvoid _kvm_vz_save_htimer(struct kvm_vcpu *vcpu,
u32 *out_compare, u32 *out_cause)
{
u32 cause, compare, before_count, end_count;
ktime_t before_time;
/* * Record the CP0_Count *prior* to saving CP0_Cause, so we have a time * at which no pending timer interrupt is missing.
*/
before_count = read_gc0_count();
back_to_back_c0_hazard();
cause = read_gc0_cause();
*out_cause = cause;
/* * Record a final CP0_Count which we will transfer to the soft-timer. * This is recorded *after* saving CP0_Cause, so we don't get any timer * interrupts from just after the final CP0_Count point.
*/
back_to_back_c0_hazard();
end_count = read_gc0_count();
/* * The above sequence isn't atomic, so we could miss a timer interrupt * between reading CP0_Cause and end_count. Detect and record any timer * interrupt due between before_count and end_count.
*/ if (end_count - before_count > compare - before_count - 1)
kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
/* * Restore soft-timer, ignoring a small amount of negative drift due to * delay between freeze_hrtimer and setting CP0_GTOffset.
*/
kvm_mips_restore_hrtimer(vcpu, before_time, end_count, -0x10000);
}
/** * kvm_vz_save_timer() - Save guest timer state. * @vcpu: Virtual CPU. * * Save VZ guest timer state and switch to soft guest timer if hard timer was in * use.
*/ staticvoid kvm_vz_save_timer(struct kvm_vcpu *vcpu)
{ struct mips_coproc *cop0 = &vcpu->arch.cop0;
u32 gctl0, compare, cause;
gctl0 = read_c0_guestctl0(); if (gctl0 & MIPS_GCTL0_GT) { /* disable guest use of hard timer */
write_c0_guestctl0(gctl0 & ~MIPS_GCTL0_GT);
/* save hard timer state */
_kvm_vz_save_htimer(vcpu, &compare, &cause);
} else {
compare = read_gc0_compare();
cause = read_gc0_cause();
}
/* save timer-related state to VCPU context */
kvm_write_sw_gc0_cause(cop0, cause);
kvm_write_sw_gc0_compare(cop0, compare);
}
/** * kvm_vz_lose_htimer() - Ensure hard guest timer is not in use. * @vcpu: Virtual CPU. * * Transfers the state of the hard guest timer to the soft guest timer, leaving * guest state intact so it can continue to be used with the soft timer.
*/ void kvm_vz_lose_htimer(struct kvm_vcpu *vcpu)
{
u32 gctl0, compare, cause;
preempt_disable();
gctl0 = read_c0_guestctl0(); if (gctl0 & MIPS_GCTL0_GT) { /* disable guest use of timer */
write_c0_guestctl0(gctl0 & ~MIPS_GCTL0_GT);
/* switch to soft timer */
_kvm_vz_save_htimer(vcpu, &compare, &cause);
/* leave soft timer in usable state */
_kvm_vz_restore_stimer(vcpu, compare, cause);
}
preempt_enable();
}
/** * is_eva_access() - Find whether an instruction is an EVA memory accessor. * @inst: 32-bit instruction encoding. * * Finds whether @inst encodes an EVA memory access instruction, which would * indicate that emulation of it should access the user mode address space * instead of the kernel mode address space. This matters for MUSUK segments * which are TLB mapped for user mode but unmapped for kernel mode. * * Returns: Whether @inst encodes an EVA accessor instruction.
*/ staticbool is_eva_access(union mips_instruction inst)
{ if (inst.spec3_format.opcode != spec3_op) returnfalse;
switch (inst.spec3_format.func) { case lwle_op: case lwre_op: case cachee_op: case sbe_op: case she_op: case sce_op: case swe_op: case swle_op: case swre_op: case prefe_op: case lbue_op: case lhue_op: case lbe_op: case lhe_op: case lle_op: case lwe_op: returntrue; default: returnfalse;
}
}
/** * is_eva_am_mapped() - Find whether an access mode is mapped. * @vcpu: KVM VCPU state. * @am: 3-bit encoded access mode. * @eu: Segment becomes unmapped and uncached when Status.ERL=1. * * Decode @am to find whether it encodes a mapped segment for the current VCPU * state. Where necessary @eu and the actual instruction causing the fault are * taken into account to make the decision. * * Returns: Whether the VCPU faulted on a TLB mapped address.
*/ staticbool is_eva_am_mapped(struct kvm_vcpu *vcpu, unsignedint am, bool eu)
{
u32 am_lookup; int err;
/* * Interpret access control mode. We assume address errors will already * have been caught by the guest, leaving us with: * AM UM SM KM 31..24 23..16 * UK 0 000 Unm 0 0 * MK 1 001 TLB 1 * MSK 2 010 TLB TLB 1 * MUSK 3 011 TLB TLB TLB 1 * MUSUK 4 100 TLB TLB Unm 0 1 * USK 5 101 Unm Unm 0 0 * - 6 110 0 0 * UUSK 7 111 Unm Unm Unm 0 0 * * We shift a magic value by AM across the sign bit to find if always * TLB mapped, and if not shift by 8 again to find if it depends on KM.
*/
am_lookup = 0x70080000 << am; if ((s32)am_lookup < 0) { /* * MK, MSK, MUSK * Always TLB mapped, unless SegCtl.EU && ERL
*/ if (!eu || !(read_gc0_status() & ST0_ERL)) returntrue;
} else {
am_lookup <<= 8; if ((s32)am_lookup < 0) { union mips_instruction inst; unsignedint status;
u32 *opc;
/* * MUSUK * TLB mapped if not in kernel mode
*/
status = read_gc0_status(); if (!(status & (ST0_EXL | ST0_ERL)) &&
(status & ST0_KSU)) returntrue; /* * EVA access instructions in kernel * mode access user address space.
*/
opc = (u32 *)vcpu->arch.pc; if (vcpu->arch.host_cp0_cause & CAUSEF_BD)
opc += 1;
err = kvm_get_badinstr(opc, vcpu, &inst.word); if (!err && is_eva_access(inst)) returntrue;
}
}
returnfalse;
}
/** * kvm_vz_gva_to_gpa() - Convert valid GVA to GPA. * @vcpu: KVM VCPU state. * @gva: Guest virtual address to convert. * @gpa: Output guest physical address. * * Convert a guest virtual address (GVA) which is valid according to the guest * context, to a guest physical address (GPA). * * Returns: 0 on success. * -errno on failure.
*/ staticint kvm_vz_gva_to_gpa(struct kvm_vcpu *vcpu, unsignedlong gva, unsignedlong *gpa)
{
u32 gva32 = gva; unsignedlong segctl;
if ((long)gva == (s32)gva32) { /* Handle canonical 32-bit virtual address */ if (cpu_guest_has_segments) { unsignedlong mask, pa;
switch (gva32 >> 29) { case 0: case 1: /* CFG5 (1GB) */
segctl = read_gc0_segctl2() >> 16;
mask = (unsignedlong)0xfc0000000ull; break; case 2: case 3: /* CFG4 (1GB) */
segctl = read_gc0_segctl2();
mask = (unsignedlong)0xfc0000000ull; break; case 4: /* CFG3 (512MB) */
segctl = read_gc0_segctl1() >> 16;
mask = (unsignedlong)0xfe0000000ull; break; case 5: /* CFG2 (512MB) */
segctl = read_gc0_segctl1();
mask = (unsignedlong)0xfe0000000ull; break; case 6: /* CFG1 (512MB) */
segctl = read_gc0_segctl0() >> 16;
mask = (unsignedlong)0xfe0000000ull; break; case 7: /* CFG0 (512MB) */
segctl = read_gc0_segctl0();
mask = (unsignedlong)0xfe0000000ull; break; default: /* * GCC 4.9 isn't smart enough to figure out that * segctl and mask are always initialised.
*/
unreachable();
}
/* Unmapped, find guest physical address */
pa = (segctl << 20) & mask;
pa |= gva32 & ~mask;
*gpa = pa; return 0;
} elseif ((s32)gva32 < (s32)0xc0000000) { /* legacy unmapped KSeg0 or KSeg1 */
*gpa = gva32 & 0x1fffffff; return 0;
} #ifdef CONFIG_64BIT
} elseif ((gva & 0xc000000000000000) == 0x8000000000000000) { /* XKPHYS */ if (cpu_guest_has_segments) { /* * Each of the 8 regions can be overridden by SegCtl2.XR * to use SegCtl1.XAM.
*/
segctl = read_gc0_segctl2(); if (segctl & (1ull << (56 + ((gva >> 59) & 0x7)))) {
segctl = read_gc0_segctl1(); if (is_eva_am_mapped(vcpu, (segctl >> 59) & 0x7,
0)) goto tlb_mapped;
}
} /* * Traditionally fully unmapped. * Bits 61:59 specify the CCA, which we can just mask off here. * Bits 58:PABITS should be zero, but we shouldn't have got here * if it wasn't.
*/
*gpa = gva & 0x07ffffffffffffff; return 0; #endif
}
/** * kvm_vz_badvaddr_to_gpa() - Convert GVA BadVAddr from root exception to GPA. * @vcpu: KVM VCPU state. * @badvaddr: Root BadVAddr. * @gpa: Output guest physical address. * * VZ implementations are permitted to report guest virtual addresses (GVA) in * BadVAddr on a root exception during guest execution, instead of the more * convenient guest physical addresses (GPA). When we get a GVA, this function * converts it to a GPA, taking into account guest segmentation and guest TLB * state. * * Returns: 0 on success. * -errno on failure.
*/ staticint kvm_vz_badvaddr_to_gpa(struct kvm_vcpu *vcpu, unsignedlong badvaddr, unsignedlong *gpa)
{ unsignedint gexccode = (vcpu->arch.host_cp0_guestctl0 &
MIPS_GCTL0_GEXC) >> MIPS_GCTL0_GEXC_SHIFT;
/* If BadVAddr is GPA, then all is well in the world */ if (likely(gexccode == MIPS_GCTL0_GEXC_GPA)) {
*gpa = badvaddr; return 0;
}
/* Otherwise we'd expect it to be GVA ... */ if (WARN(gexccode != MIPS_GCTL0_GEXC_GVA, "Unexpected gexccode %#x\n", gexccode)) return -EINVAL;
/* ... and we need to perform the GVA->GPA translation in software */ return kvm_vz_gva_to_gpa(vcpu, badvaddr, gpa);
}
if (read_gc0_pagegrain() & PG_ELPA)
mask |= 0x00ffffff00000000ull; if (cpu_guest_has_mvh)
mask |= MIPS_MAAR_VH;
/* Set or clear VH */ if (op == mtc_op) { /* clear VH */
val &= ~MIPS_MAAR_VH;
} elseif (op == dmtc_op) { /* set VH to match VL */
val &= ~MIPS_MAAR_VH; if (val & MIPS_MAAR_VL)
val |= MIPS_MAAR_VH;
}
/* * Update PC and hold onto current PC in case there is * an error and we want to rollback the PC
*/
curr_pc = vcpu->arch.pc;
er = update_pc(vcpu, cause); if (er == EMULATE_FAIL) return er;
if (inst.co_format.co) { switch (inst.co_format.func) { case wait_op:
er = kvm_mips_emul_wait(vcpu); break; default:
er = EMULATE_FAIL;
}
} else {
rt = inst.c0r_format.rt;
rd = inst.c0r_format.rd;
sel = inst.c0r_format.sel;
switch (inst.c0r_format.rs) { case dmfc_op: case mfc_op: #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
cop0->stat[rd][sel]++; #endif if (rd == MIPS_CP0_COUNT &&
sel == 0) { /* Count */
val = kvm_mips_read_count(vcpu);
} elseif (rd == MIPS_CP0_COMPARE &&
sel == 0) { /* Compare */
val = read_gc0_compare();
} elseif (rd == MIPS_CP0_LLADDR &&
sel == 0) { /* LLAddr */ if (cpu_guest_has_rw_llb)
val = read_gc0_lladdr() &
MIPS_LLADDR_LLB; else
val = 0;
} elseif (rd == MIPS_CP0_LLADDR &&
sel == 1 && /* MAAR */
cpu_guest_has_maar &&
!cpu_guest_has_dyn_maar) { /* MAARI must be in range */
BUG_ON(kvm_read_sw_gc0_maari(cop0) >=
ARRAY_SIZE(vcpu->arch.maar));
val = vcpu->arch.maar[
kvm_read_sw_gc0_maari(cop0)];
} elseif ((rd == MIPS_CP0_PRID &&
(sel == 0 || /* PRid */
sel == 2 || /* CDMMBase */
sel == 3)) || /* CMGCRBase */
(rd == MIPS_CP0_STATUS &&
(sel == 2 || /* SRSCtl */
sel == 3)) || /* SRSMap */
(rd == MIPS_CP0_CONFIG &&
(sel == 6 || /* Config6 */
sel == 7)) || /* Config7 */
(rd == MIPS_CP0_LLADDR &&
(sel == 2) && /* MAARI */
cpu_guest_has_maar &&
!cpu_guest_has_dyn_maar) ||
(rd == MIPS_CP0_ERRCTL &&
(sel == 0))) { /* ErrCtl */
val = cop0->reg[rd][sel]; #ifdef CONFIG_CPU_LOONGSON64
} elseif (rd == MIPS_CP0_DIAG &&
(sel == 0)) { /* Diag */
val = cop0->reg[rd][sel]; #endif
} else {
val = 0;
er = EMULATE_FAIL;
}
if (er != EMULATE_FAIL) { /* Sign extend */ if (inst.c0r_format.rs == mfc_op)
val = (int)val;
vcpu->arch.gprs[rt] = val;
}
default:
er = EMULATE_FAIL; break;
}
} /* Rollback PC only if emulation was unsuccessful */ if (er == EMULATE_FAIL) {
kvm_err("[%#lx]%s: unsupported cop0 instruction 0x%08x\n",
curr_pc, __func__, inst.word);
/* * Update PC and hold onto current PC in case there is * an error and we want to rollback the PC
*/
curr_pc = vcpu->arch.pc;
er = update_pc(vcpu, cause); if (er == EMULATE_FAIL) return er;
base = inst.i_format.rs;
op_inst = inst.i_format.rt; if (cpu_has_mips_r6)
offset = inst.spec3_format.simmediate; else
offset = inst.i_format.simmediate;
cache = op_inst & CacheOp_Cache;
op = op_inst & CacheOp_Op;
/* Secondary or tirtiary cache ops ignored */ if (cache != Cache_I && cache != Cache_D) return EMULATE_DONE;
switch (op_inst) { case Index_Invalidate_I:
flush_icache_line_indexed(va); return EMULATE_DONE; case Index_Writeback_Inv_D:
flush_dcache_line_indexed(va); return EMULATE_DONE; case Hit_Invalidate_I: case Hit_Invalidate_D: case Hit_Writeback_Inv_D: if (boot_cpu_type() == CPU_CAVIUM_OCTEON3) { /* We can just flush entire icache */
local_flush_icache_range(0, 0); return EMULATE_DONE;
}
/* So far, other platforms support guest hit cache ops */ break; default: break;
}
/* * Update PC and hold onto current PC in case there is * an error and we want to rollback the PC
*/
curr_pc = vcpu->arch.pc;
er = update_pc(vcpu, cause); if (er == EMULATE_FAIL) return er;
switch (vcpu->arch.gprs[rs]) { case LOONGSON_CFG0:
vcpu->arch.gprs[rd] = 0x14c000; break; case LOONGSON_CFG1:
hostcfg &= (LOONGSON_CFG1_FP | LOONGSON_CFG1_MMI |
LOONGSON_CFG1_MSA1 | LOONGSON_CFG1_MSA2 |
LOONGSON_CFG1_SFBP);
vcpu->arch.gprs[rd] = hostcfg; break; case LOONGSON_CFG2:
hostcfg &= (LOONGSON_CFG2_LEXT1 | LOONGSON_CFG2_LEXT2 |
LOONGSON_CFG2_LEXT3 | LOONGSON_CFG2_LSPW);
vcpu->arch.gprs[rd] = hostcfg; break; case LOONGSON_CFG3:
vcpu->arch.gprs[rd] = hostcfg; break; default: /* Don't export any other advanced features to guest */
vcpu->arch.gprs[rd] = 0; break;
} break;
default:
kvm_err("lwc2 emulate not impl %d rs %lx @%lx\n",
inst.loongson3_lscsr_format.fr, vcpu->arch.gprs[rs], curr_pc);
er = EMULATE_FAIL; break;
}
/* Rollback PC only if emulation was unsuccessful */ if (er == EMULATE_FAIL) {
kvm_err("[%#lx]%s: unsupported lwc2 instruction 0x%08x 0x%08x\n",
curr_pc, __func__, inst.word, inst.loongson3_lscsr_format.fr);
vcpu->arch.pc = curr_pc;
}
return er;
} #endif
staticenum emulation_result kvm_trap_vz_handle_gpsi(u32 cause, u32 *opc, struct kvm_vcpu *vcpu)
{ enum emulation_result er = EMULATE_DONE; struct kvm_vcpu_arch *arch = &vcpu->arch; union mips_instruction inst; int rd, rt, sel; int err;
/* * Fetch the instruction.
*/ if (cause & CAUSEF_BD)
opc += 1;
err = kvm_get_badinstr(opc, vcpu, &inst.word); if (err) return EMULATE_FAIL;
switch (inst.r_format.opcode) { case cop0_op:
er = kvm_vz_gpsi_cop0(inst, opc, cause, vcpu); break; #ifndef CONFIG_CPU_MIPSR6 case cache_op:
trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
er = kvm_vz_gpsi_cache(inst, opc, cause, vcpu); break; #endif #ifdef CONFIG_CPU_LOONGSON64 case lwc2_op:
er = kvm_vz_gpsi_lwc2(inst, opc, cause, vcpu); break; #endif case spec3_op: switch (inst.spec3_format.func) { #ifdef CONFIG_CPU_MIPSR6 case cache6_op:
trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
er = kvm_vz_gpsi_cache(inst, opc, cause, vcpu); break; #endif case rdhwr_op: if (inst.r_format.rs || (inst.r_format.re >> 3)) goto unknown;
default:
kvm_err("GPSI exception not supported (%p/%#x)\n",
opc, inst.word);
kvm_arch_vcpu_dump_regs(vcpu);
er = EMULATE_FAIL; break;
}
return er;
}
staticenum emulation_result kvm_trap_vz_handle_gsfc(u32 cause, u32 *opc, struct kvm_vcpu *vcpu)
{ enum emulation_result er = EMULATE_DONE; struct kvm_vcpu_arch *arch = &vcpu->arch; union mips_instruction inst; int err;
/* * Fetch the instruction.
*/ if (cause & CAUSEF_BD)
opc += 1;
err = kvm_get_badinstr(opc, vcpu, &inst.word); if (err) return EMULATE_FAIL;
/* complete MTC0 on behalf of guest and advance EPC */ if (inst.c0r_format.opcode == cop0_op &&
inst.c0r_format.rs == mtc_op &&
inst.c0r_format.z == 0) { int rt = inst.c0r_format.rt; int rd = inst.c0r_format.rd; int sel = inst.c0r_format.sel; unsignedint val = arch->gprs[rt]; unsignedint old_val, change;
if ((rd == MIPS_CP0_STATUS) && (sel == 0)) { /* FR bit should read as zero if no FPU */ if (!kvm_mips_guest_has_fpu(&vcpu->arch))
val &= ~(ST0_CU1 | ST0_FR);
/* * Also don't allow FR to be set if host doesn't support * it.
*/ if (!(boot_cpu_data.fpu_id & MIPS_FPIR_F64))
val &= ~ST0_FR;
old_val = read_gc0_status();
change = val ^ old_val;
if (change & ST0_FR) { /* * FPU and Vector register state is made * UNPREDICTABLE by a change of FR, so don't * even bother saving it.
*/
kvm_drop_fpu(vcpu);
}
/* * If MSA state is already live, it is undefined how it * interacts with FR=0 FPU state, and we don't want to * hit reserved instruction exceptions trying to save * the MSA state later when CU=1 && FR=1, so play it * safe and save it first.
*/ if (change & ST0_CU1 && !(val & ST0_FR) &&
vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
kvm_lose_fpu(vcpu);
/* * Propagate FRE changes immediately if the FPU * context is already loaded.
*/ if (change & MIPS_CONF5_FRE &&
vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)
change_c0_config5(MIPS_CONF5_FRE, val);
preempt_enable();
val = old_val ^
(change & kvm_vz_config5_guest_wrmask(vcpu));
write_gc0_config5(val);
} else {
kvm_err("Handle GSFC, unsupported field change @ %p: %#x\n",
opc, inst.word);
er = EMULATE_FAIL;
}
if (er != EMULATE_FAIL)
er = update_pc(vcpu, cause);
} else {
kvm_err("Handle GSFC, unrecognized instruction @ %p: %#x\n",
opc, inst.word);
er = EMULATE_FAIL;
}
return er;
}
staticenum emulation_result kvm_trap_vz_handle_ghfc(u32 cause, u32 *opc, struct kvm_vcpu *vcpu)
{ /* * Presumably this is due to MC (guest mode change), so lets trace some * relevant info.
*/
trace_kvm_guest_mode_change(vcpu);
return EMULATE_DONE;
}
staticenum emulation_result kvm_trap_vz_handle_hc(u32 cause, u32 *opc, struct kvm_vcpu *vcpu)
{ enum emulation_result er; union mips_instruction inst; unsignedlong curr_pc; int err;
if (cause & CAUSEF_BD)
opc += 1;
err = kvm_get_badinstr(opc, vcpu, &inst.word); if (err) return EMULATE_FAIL;
/* * Update PC and hold onto current PC in case there is * an error and we want to rollback the PC
*/
curr_pc = vcpu->arch.pc;
er = update_pc(vcpu, cause); if (er == EMULATE_FAIL) return er;
er = kvm_mips_emul_hypcall(vcpu, inst); if (er == EMULATE_FAIL)
vcpu->arch.pc = curr_pc;
staticint kvm_trap_vz_handle_guest_exit(struct kvm_vcpu *vcpu)
{
u32 *opc = (u32 *) vcpu->arch.pc;
u32 cause = vcpu->arch.host_cp0_cause; enum emulation_result er = EMULATE_DONE;
u32 gexccode = (vcpu->arch.host_cp0_guestctl0 &
MIPS_GCTL0_GEXC) >> MIPS_GCTL0_GEXC_SHIFT; int ret = RESUME_GUEST;
trace_kvm_exit(vcpu, KVM_TRACE_EXIT_GEXCCODE_BASE + gexccode); switch (gexccode) { case MIPS_GCTL0_GEXC_GPSI:
++vcpu->stat.vz_gpsi_exits;
er = kvm_trap_vz_handle_gpsi(cause, opc, vcpu); break; case MIPS_GCTL0_GEXC_GSFC:
++vcpu->stat.vz_gsfc_exits;
er = kvm_trap_vz_handle_gsfc(cause, opc, vcpu); break; case MIPS_GCTL0_GEXC_HC:
++vcpu->stat.vz_hc_exits;
er = kvm_trap_vz_handle_hc(cause, opc, vcpu); break; case MIPS_GCTL0_GEXC_GRR:
++vcpu->stat.vz_grr_exits;
er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
vcpu); break; case MIPS_GCTL0_GEXC_GVA:
++vcpu->stat.vz_gva_exits;
er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
vcpu); break; case MIPS_GCTL0_GEXC_GHFC:
++vcpu->stat.vz_ghfc_exits;
er = kvm_trap_vz_handle_ghfc(cause, opc, vcpu); break; case MIPS_GCTL0_GEXC_GPA:
++vcpu->stat.vz_gpa_exits;
er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
vcpu); break; default:
++vcpu->stat.vz_resvd_exits;
er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
vcpu); break;
}
if (er == EMULATE_DONE) {
ret = RESUME_GUEST;
} elseif (er == EMULATE_HYPERCALL) {
ret = kvm_mips_handle_hypcall(vcpu);
} else {
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
} return ret;
}
/** * kvm_trap_vz_handle_cop_unusable() - Guest used unusable coprocessor. * @vcpu: Virtual CPU context. * * Handle when the guest attempts to use a coprocessor which hasn't been allowed * by the root context. * * Return: value indicating whether to resume the host or the guest * (RESUME_HOST or RESUME_GUEST)
*/ staticint kvm_trap_vz_handle_cop_unusable(struct kvm_vcpu *vcpu)
{
u32 cause = vcpu->arch.host_cp0_cause; enum emulation_result er = EMULATE_FAIL; int ret = RESUME_GUEST;
if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) { /* * If guest FPU not present, the FPU operation should have been * treated as a reserved instruction! * If FPU already in use, we shouldn't get this at all.
*/ if (WARN_ON(!kvm_mips_guest_has_fpu(&vcpu->arch) ||
vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) {
preempt_enable(); return EMULATE_FAIL;
}
kvm_own_fpu(vcpu);
er = EMULATE_DONE;
} /* other coprocessors not handled */
switch (er) { case EMULATE_DONE:
ret = RESUME_GUEST; break;
case EMULATE_FAIL:
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST; break;
default:
BUG();
} return ret;
}
/** * kvm_trap_vz_handle_msa_disabled() - Guest used MSA while disabled in root. * @vcpu: Virtual CPU context. * * Handle when the guest attempts to use MSA when it is disabled in the root * context. * * Return: value indicating whether to resume the host or the guest * (RESUME_HOST or RESUME_GUEST)
*/ staticint kvm_trap_vz_handle_msa_disabled(struct kvm_vcpu *vcpu)
{ /* * If MSA not present or not exposed to guest or FR=0, the MSA operation * should have been treated as a reserved instruction! * Same if CU1=1, FR=0. * If MSA already in use, we shouldn't get this at all.
*/ if (!kvm_mips_guest_has_msa(&vcpu->arch) ||
(read_gc0_status() & (ST0_CU1 | ST0_FR)) == ST0_CU1 ||
!(read_gc0_config5() & MIPS_CONF5_MSAEN) ||
vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; return RESUME_HOST;
}
kvm_own_msa(vcpu);
return RESUME_GUEST;
}
staticint kvm_trap_vz_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
{ struct kvm_run *run = vcpu->run;
u32 *opc = (u32 *) vcpu->arch.pc;
u32 cause = vcpu->arch.host_cp0_cause;
ulong badvaddr = vcpu->arch.host_cp0_badvaddr; union mips_instruction inst; enum emulation_result er = EMULATE_DONE; int err, ret = RESUME_GUEST;
if (kvm_mips_handle_vz_root_tlb_fault(badvaddr, vcpu, false)) { /* A code fetch fault doesn't count as an MMIO */ if (kvm_is_ifetch_fault(&vcpu->arch)) {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR; return RESUME_HOST;
}
/* Fetch the instruction */ if (cause & CAUSEF_BD)
opc += 1;
err = kvm_get_badinstr(opc, vcpu, &inst.word); if (err) {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR; return RESUME_HOST;
}
/* Treat as MMIO */
er = kvm_mips_emulate_load(inst, cause, vcpu); if (er == EMULATE_FAIL) {
kvm_err("Guest Emulate Load from MMIO space failed: PC: %p, BadVaddr: %#lx\n",
opc, badvaddr);
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
}
}
if (er == EMULATE_DONE) {
ret = RESUME_GUEST;
} elseif (er == EMULATE_DO_MMIO) {
run->exit_reason = KVM_EXIT_MMIO;
ret = RESUME_HOST;
} else {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
} return ret;
}
staticint kvm_trap_vz_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
{ struct kvm_run *run = vcpu->run;
u32 *opc = (u32 *) vcpu->arch.pc;
u32 cause = vcpu->arch.host_cp0_cause;
ulong badvaddr = vcpu->arch.host_cp0_badvaddr; union mips_instruction inst; enum emulation_result er = EMULATE_DONE; int err; int ret = RESUME_GUEST;
/* Just try the access again if we couldn't do the translation */ if (kvm_vz_badvaddr_to_gpa(vcpu, badvaddr, &badvaddr)) return RESUME_GUEST;
vcpu->arch.host_cp0_badvaddr = badvaddr;
if (kvm_mips_handle_vz_root_tlb_fault(badvaddr, vcpu, true)) { /* Fetch the instruction */ if (cause & CAUSEF_BD)
opc += 1;
err = kvm_get_badinstr(opc, vcpu, &inst.word); if (err) {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR; return RESUME_HOST;
}
/* Treat as MMIO */
er = kvm_mips_emulate_store(inst, cause, vcpu); if (er == EMULATE_FAIL) {
kvm_err("Guest Emulate Store to MMIO space failed: PC: %p, BadVaddr: %#lx\n",
opc, badvaddr);
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
}
}
if (er == EMULATE_DONE) {
ret = RESUME_GUEST;
} elseif (er == EMULATE_DO_MMIO) {
run->exit_reason = KVM_EXIT_MMIO;
ret = RESUME_HOST;
} else {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
} return ret;
}
ret = ARRAY_SIZE(kvm_vz_get_one_regs); if (cpu_guest_has_userlocal)
++ret; if (cpu_guest_has_badinstr)
++ret; if (cpu_guest_has_badinstrp)
++ret; if (cpu_guest_has_contextconfig)
ret += ARRAY_SIZE(kvm_vz_get_one_regs_contextconfig); if (cpu_guest_has_segments)
ret += ARRAY_SIZE(kvm_vz_get_one_regs_segments); if (cpu_guest_has_htw || cpu_guest_has_ldpte)
ret += ARRAY_SIZE(kvm_vz_get_one_regs_htw); if (cpu_guest_has_maar && !cpu_guest_has_dyn_maar)
ret += 1 + ARRAY_SIZE(vcpu->arch.maar);
ret += __arch_hweight8(cpu_data[0].guest.kscratch_mask);
if (copy_to_user(indices, kvm_vz_get_one_regs, sizeof(kvm_vz_get_one_regs))) return -EFAULT;
indices += ARRAY_SIZE(kvm_vz_get_one_regs);
if (cpu_guest_has_userlocal) {
index = KVM_REG_MIPS_CP0_USERLOCAL; if (copy_to_user(indices, &index, sizeof(index))) return -EFAULT;
++indices;
} if (cpu_guest_has_badinstr) {
index = KVM_REG_MIPS_CP0_BADINSTR; if (copy_to_user(indices, &index, sizeof(index))) return -EFAULT;
++indices;
} if (cpu_guest_has_badinstrp) {
index = KVM_REG_MIPS_CP0_BADINSTRP; if (copy_to_user(indices, &index, sizeof(index))) return -EFAULT;
++indices;
} if (cpu_guest_has_contextconfig) { if (copy_to_user(indices, kvm_vz_get_one_regs_contextconfig, sizeof(kvm_vz_get_one_regs_contextconfig))) return -EFAULT;
indices += ARRAY_SIZE(kvm_vz_get_one_regs_contextconfig);
} if (cpu_guest_has_segments) { if (copy_to_user(indices, kvm_vz_get_one_regs_segments, sizeof(kvm_vz_get_one_regs_segments))) return -EFAULT;
indices += ARRAY_SIZE(kvm_vz_get_one_regs_segments);
} if (cpu_guest_has_htw || cpu_guest_has_ldpte) { if (copy_to_user(indices, kvm_vz_get_one_regs_htw, sizeof(kvm_vz_get_one_regs_htw))) return -EFAULT;
indices += ARRAY_SIZE(kvm_vz_get_one_regs_htw);
} if (cpu_guest_has_maar && !cpu_guest_has_dyn_maar) { for (i = 0; i < ARRAY_SIZE(vcpu->arch.maar); ++i) {
index = KVM_REG_MIPS_CP0_MAAR(i); if (copy_to_user(indices, &index, sizeof(index))) return -EFAULT;
++indices;
}
index = KVM_REG_MIPS_CP0_MAARI; if (copy_to_user(indices, &index, sizeof(index))) return -EFAULT;
++indices;
} for (i = 0; i < 6; ++i) { if (!cpu_guest_has_kscr(i + 2)) continue;
if (copy_to_user(indices, &kvm_vz_get_one_regs_kscratch[i], sizeof(kvm_vz_get_one_regs_kscratch[i]))) return -EFAULT;
++indices;
}
return 0;
}
staticinline s64 entrylo_kvm_to_user(unsignedlong v)
{
s64 mask, ret = v;
if (BITS_PER_LONG == 32) { /* * KVM API exposes 64-bit version of the register, so move the * RI/XI bits up into place.
*/
mask = MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI;
ret &= ~mask;
ret |= ((s64)v & mask) << 32;
} return ret;
}
staticinlineunsignedlong entrylo_user_to_kvm(s64 v)
{ unsignedlong mask, ret = v;
if (BITS_PER_LONG == 32) { /* * KVM API exposes 64-bit versiono of the register, so move the * RI/XI bits down into place.
*/
mask = MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI;
ret &= ~mask;
ret |= (v >> 32) & mask;
} return ret;
}
switch (reg->id) { case KVM_REG_MIPS_CP0_INDEX:
write_gc0_index(v); break; case KVM_REG_MIPS_CP0_ENTRYLO0:
write_gc0_entrylo0(entrylo_user_to_kvm(v)); break; case KVM_REG_MIPS_CP0_ENTRYLO1:
write_gc0_entrylo1(entrylo_user_to_kvm(v)); break; case KVM_REG_MIPS_CP0_CONTEXT:
write_gc0_context(v); break; case KVM_REG_MIPS_CP0_CONTEXTCONFIG: if (!cpu_guest_has_contextconfig) return -EINVAL;
write_gc0_contextconfig(v); break; case KVM_REG_MIPS_CP0_USERLOCAL: if (!cpu_guest_has_userlocal) return -EINVAL;
write_gc0_userlocal(v); break; #ifdef CONFIG_64BIT case KVM_REG_MIPS_CP0_XCONTEXTCONFIG: if (!cpu_guest_has_contextconfig) return -EINVAL;
write_gc0_xcontextconfig(v); break; #endif case KVM_REG_MIPS_CP0_PAGEMASK:
write_gc0_pagemask(v); break; case KVM_REG_MIPS_CP0_PAGEGRAIN:
write_gc0_pagegrain(v); break; case KVM_REG_MIPS_CP0_SEGCTL0: if (!cpu_guest_has_segments) return -EINVAL;
write_gc0_segctl0(v); break; case KVM_REG_MIPS_CP0_SEGCTL1: if (!cpu_guest_has_segments) return -EINVAL;
write_gc0_segctl1(v); break; case KVM_REG_MIPS_CP0_SEGCTL2: if (!cpu_guest_has_segments) return -EINVAL;
write_gc0_segctl2(v); break; case KVM_REG_MIPS_CP0_PWBASE: if (!cpu_guest_has_htw && !cpu_guest_has_ldpte) return -EINVAL;
write_gc0_pwbase(v); break; case KVM_REG_MIPS_CP0_PWFIELD: if (!cpu_guest_has_htw && !cpu_guest_has_ldpte) return -EINVAL;
write_gc0_pwfield(v); break; case KVM_REG_MIPS_CP0_PWSIZE: if (!cpu_guest_has_htw && !cpu_guest_has_ldpte) return -EINVAL;
write_gc0_pwsize(v); break; case KVM_REG_MIPS_CP0_WIRED:
change_gc0_wired(MIPSR6_WIRED_WIRED, v); break; case KVM_REG_MIPS_CP0_PWCTL: if (!cpu_guest_has_htw && !cpu_guest_has_ldpte) return -EINVAL;
write_gc0_pwctl(v); break; case KVM_REG_MIPS_CP0_HWRENA:
write_gc0_hwrena(v); break; case KVM_REG_MIPS_CP0_BADVADDR:
write_gc0_badvaddr(v); break; case KVM_REG_MIPS_CP0_BADINSTR: if (!cpu_guest_has_badinstr) return -EINVAL;
write_gc0_badinstr(v); break; case KVM_REG_MIPS_CP0_BADINSTRP: if (!cpu_guest_has_badinstrp) return -EINVAL;
write_gc0_badinstrp(v); break; case KVM_REG_MIPS_CP0_COUNT:
kvm_mips_write_count(vcpu, v); break; case KVM_REG_MIPS_CP0_ENTRYHI:
write_gc0_entryhi(v); break; case KVM_REG_MIPS_CP0_COMPARE:
kvm_mips_write_compare(vcpu, v, false); break; case KVM_REG_MIPS_CP0_STATUS:
write_gc0_status(v); break; case KVM_REG_MIPS_CP0_INTCTL:
write_gc0_intctl(v); break; case KVM_REG_MIPS_CP0_CAUSE: /* * If the timer is stopped or started (DC bit) it must look * atomic with changes to the timer interrupt pending bit (TI). * A timer interrupt should not happen in between.
*/ if ((read_gc0_cause() ^ v) & CAUSEF_DC) { if (v & CAUSEF_DC) { /* disable timer first */
kvm_mips_count_disable_cause(vcpu);
change_gc0_cause((u32)~CAUSEF_DC, v);
} else { /* enable timer last */
change_gc0_cause((u32)~CAUSEF_DC, v);
kvm_mips_count_enable_cause(vcpu);
}
} else {
write_gc0_cause(v);
} break; case KVM_REG_MIPS_CP0_EPC:
write_gc0_epc(v); break; case KVM_REG_MIPS_CP0_PRID: switch (boot_cpu_type()) { case CPU_CAVIUM_OCTEON3: /* Octeon III has a guest.PRid, but its read-only */ break; default:
kvm_write_c0_guest_prid(cop0, v); break;
} break; case KVM_REG_MIPS_CP0_EBASE:
kvm_vz_write_gc0_ebase(v); break; case KVM_REG_MIPS_CP0_CONFIG:
cur = read_gc0_config();
change = (cur ^ v) & kvm_vz_config_user_wrmask(vcpu); if (change) {
v = cur ^ change;
write_gc0_config(v);
} break; case KVM_REG_MIPS_CP0_CONFIG1: if (!cpu_guest_has_conf1) break;
cur = read_gc0_config1();
change = (cur ^ v) & kvm_vz_config1_user_wrmask(vcpu); if (change) {
v = cur ^ change;
write_gc0_config1(v);
} break; case KVM_REG_MIPS_CP0_CONFIG2: if (!cpu_guest_has_conf2) break;
cur = read_gc0_config2();
change = (cur ^ v) & kvm_vz_config2_user_wrmask(vcpu); if (change) {
v = cur ^ change;
write_gc0_config2(v);
} break; case KVM_REG_MIPS_CP0_CONFIG3: if (!cpu_guest_has_conf3) break;
cur = read_gc0_config3();
change = (cur ^ v) & kvm_vz_config3_user_wrmask(vcpu); if (change) {
v = cur ^ change;
write_gc0_config3(v);
} break; case KVM_REG_MIPS_CP0_CONFIG4: if (!cpu_guest_has_conf4) break;
cur = read_gc0_config4();
change = (cur ^ v) & kvm_vz_config4_user_wrmask(vcpu); if (change) {
v = cur ^ change;
write_gc0_config4(v);
} break; case KVM_REG_MIPS_CP0_CONFIG5: if (!cpu_guest_has_conf5) break;
cur = read_gc0_config5();
change = (cur ^ v) & kvm_vz_config5_user_wrmask(vcpu); if (change) {
v = cur ^ change;
write_gc0_config5(v);
} break; case KVM_REG_MIPS_CP0_CONFIG6:
cur = kvm_read_sw_gc0_config6(cop0);
change = (cur ^ v) & kvm_vz_config6_user_wrmask(vcpu); if (change) {
v = cur ^ change;
kvm_write_sw_gc0_config6(cop0, (int)v);
} break; case KVM_REG_MIPS_CP0_MAAR(0) ... KVM_REG_MIPS_CP0_MAAR(0x3f): if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar) return -EINVAL;
idx = reg->id - KVM_REG_MIPS_CP0_MAAR(0); if (idx >= ARRAY_SIZE(vcpu->arch.maar)) return -EINVAL;
vcpu->arch.maar[idx] = mips_process_maar(dmtc_op, v); break; case KVM_REG_MIPS_CP0_MAARI: if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar) return -EINVAL;
kvm_write_maari(vcpu, v); break; #ifdef CONFIG_64BIT case KVM_REG_MIPS_CP0_XCONTEXT:
write_gc0_xcontext(v); break; #endif case KVM_REG_MIPS_CP0_ERROREPC:
write_gc0_errorepc(v); break; case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6:
idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2; if (!cpu_guest_has_kscr(idx)) return -EINVAL; switch (idx) { case 2:
write_gc0_kscratch1(v); break; case 3:
write_gc0_kscratch2(v); break; case 4:
write_gc0_kscratch3(v); break; case 5:
write_gc0_kscratch4(v); break; case 6:
write_gc0_kscratch5(v); break; case 7:
write_gc0_kscratch6(v); break;
} break; case KVM_REG_MIPS_COUNT_CTL:
ret = kvm_mips_set_count_ctl(vcpu, v); break; case KVM_REG_MIPS_COUNT_RESUME:
ret = kvm_mips_set_count_resume(vcpu, v); break; case KVM_REG_MIPS_COUNT_HZ:
ret = kvm_mips_set_count_hz(vcpu, v); break; default: return -EINVAL;
} return ret;
}
if (!(++guestid & GUESTID_MASK)) { if (cpu_has_vtag_icache)
flush_icache_all();
if (!guestid) /* fix version if needed */
guestid = GUESTID_FIRST_VERSION;
++guestid; /* guestid 0 reserved for root */
/* start new guestid cycle */
kvm_vz_local_flush_roottlb_all_guests();
kvm_vz_local_flush_guesttlb_all();
}
guestid_cache(cpu) = guestid;
}
/* Returns 1 if the guest TLB may be clobbered */ staticint kvm_vz_check_requests(struct kvm_vcpu *vcpu, int cpu)
{ int ret = 0; int i;
if (!kvm_request_pending(vcpu)) return 0;
if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) { if (cpu_has_guestid) { /* Drop all GuestIDs for this VCPU */
for_each_possible_cpu(i)
vcpu->arch.vzguestid[i] = 0; /* This will clobber guest TLB contents too */
ret = 1;
} /* * For Root ASID Dealias (RAD) we don't do anything here, but we * still need the request to ensure we recheck asid_flush_mask. * We can still return 0 as only the root TLB will be affected * by a root ASID flush.
*/
}
/* Expand the wired TLB array if necessary */
wired &= MIPSR6_WIRED_WIRED; if (wired > vcpu->arch.wired_tlb_limit) {
tlbs = krealloc(vcpu->arch.wired_tlb, wired * sizeof(*vcpu->arch.wired_tlb), GFP_ATOMIC); if (WARN_ON(!tlbs)) { /* Save whatever we can */
wired = vcpu->arch.wired_tlb_limit;
} else {
vcpu->arch.wired_tlb = tlbs;
vcpu->arch.wired_tlb_limit = wired;
}
}
if (wired) /* Save wired entries from the guest TLB */
kvm_vz_save_guesttlb(vcpu->arch.wired_tlb, 0, wired); /* Invalidate any dropped entries since last time */ for (i = wired; i < vcpu->arch.wired_tlb_used; ++i) {
vcpu->arch.wired_tlb[i].tlb_hi = UNIQUE_GUEST_ENTRYHI(i);
vcpu->arch.wired_tlb[i].tlb_lo[0] = 0;
vcpu->arch.wired_tlb[i].tlb_lo[1] = 0;
vcpu->arch.wired_tlb[i].tlb_mask = 0;
}
vcpu->arch.wired_tlb_used = wired;
}
staticvoid kvm_vz_vcpu_load_wired(struct kvm_vcpu *vcpu)
{ /* Load wired entries into the guest TLB */ if (vcpu->arch.wired_tlb)
kvm_vz_load_guesttlb(vcpu->arch.wired_tlb, 0,
vcpu->arch.wired_tlb_used);
}
/* * Are we entering guest context on a different CPU to last time? * If so, the VCPU's guest TLB state on this CPU may be stale.
*/
migrated = (vcpu->arch.last_exec_cpu != cpu);
vcpu->arch.last_exec_cpu = cpu;
/* * A vcpu's GuestID is set in GuestCtl1.ID when the vcpu is loaded and * remains set until another vcpu is loaded in. As a rule GuestRID * remains zeroed when in root context unless the kernel is busy * manipulating guest tlb entries.
*/ if (cpu_has_guestid) { /* * Check if our GuestID is of an older version and thus invalid. * * We also discard the stored GuestID if we've executed on * another CPU, as the guest mappings may have changed without * hypervisor knowledge.
*/ if (migrated ||
(vcpu->arch.vzguestid[cpu] ^ guestid_cache(cpu)) &
GUESTID_VERSION_MASK) {
kvm_vz_get_new_guestid(cpu, vcpu);
vcpu->arch.vzguestid[cpu] = guestid_cache(cpu);
trace_kvm_guestid_change(vcpu,
vcpu->arch.vzguestid[cpu]);
}
/* Restore GuestID */
change_c0_guestctl1(GUESTID_MASK, vcpu->arch.vzguestid[cpu]);
} else { /* * The Guest TLB only stores a single guest's TLB state, so * flush it if another VCPU has executed on this CPU. * * We also flush if we've executed on another CPU, as the guest * mappings may have changed without hypervisor knowledge.
*/ if (migrated || last_exec_vcpu[cpu] != vcpu)
kvm_vz_local_flush_guesttlb_all();
last_exec_vcpu[cpu] = vcpu;
/* * Root ASID dealiases guest GPA mappings in the root TLB. * Allocate new root ASID if needed.
*/ if (cpumask_test_and_clear_cpu(cpu, &kvm->arch.asid_flush_mask))
get_new_mmu_context(gpa_mm); else
check_mmu_context(gpa_mm);
}
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Dauer der Verarbeitung: 0.15 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.