/* * Load up guest vcpu FP state if it's needed. * It also set the MSR_FP in thread so that host know * we're holding FPU, and then host can help to save * guest vcpu FP state if other threads require to use FPU. * This simulates an FP unavailable fault. * * It requires to be called with preemption disabled.
*/ staticinlinevoid kvmppc_load_guest_fp(struct kvm_vcpu *vcpu)
{ #ifdef CONFIG_PPC_FPU if (!(current->thread.regs->msr & MSR_FP)) {
enable_kernel_fp();
load_fp_state(&vcpu->arch.fp);
disable_kernel_fp();
current->thread.fp_save_area = &vcpu->arch.fp;
current->thread.regs->msr |= MSR_FP;
} #endif
}
/* * Save guest vcpu FP state into thread. * It requires to be called with preemption disabled.
*/ staticinlinevoid kvmppc_save_guest_fp(struct kvm_vcpu *vcpu)
{ #ifdef CONFIG_PPC_FPU if (current->thread.regs->msr & MSR_FP)
giveup_fpu(current);
current->thread.fp_save_area = NULL; #endif
}
staticvoid kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu)
{ #ifdefined(CONFIG_PPC_FPU) && !defined(CONFIG_KVM_BOOKE_HV) /* We always treat the FP bit as enabled from the host
perspective, so only need to adjust the shadow MSR */
vcpu->arch.shadow_msr &= ~MSR_FP;
vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_FP; #endif
}
/* * Simulate AltiVec unavailable fault to load guest state * from thread to AltiVec unit. * It requires to be called with preemption disabled.
*/ staticinlinevoid kvmppc_load_guest_altivec(struct kvm_vcpu *vcpu)
{ #ifdef CONFIG_ALTIVEC if (cpu_has_feature(CPU_FTR_ALTIVEC)) { if (!(current->thread.regs->msr & MSR_VEC)) {
enable_kernel_altivec();
load_vr_state(&vcpu->arch.vr);
disable_kernel_altivec();
current->thread.vr_save_area = &vcpu->arch.vr;
current->thread.regs->msr |= MSR_VEC;
}
} #endif
}
/* * Save guest vcpu AltiVec state into thread. * It requires to be called with preemption disabled.
*/ staticinlinevoid kvmppc_save_guest_altivec(struct kvm_vcpu *vcpu)
{ #ifdef CONFIG_ALTIVEC if (cpu_has_feature(CPU_FTR_ALTIVEC)) { if (current->thread.regs->msr & MSR_VEC)
giveup_altivec(current);
current->thread.vr_save_area = NULL;
} #endif
}
staticvoid kvmppc_vcpu_sync_debug(struct kvm_vcpu *vcpu)
{ /* Synchronize guest's desire to get debug interrupts into shadow MSR */ #ifndef CONFIG_KVM_BOOKE_HV
vcpu->arch.shadow_msr &= ~MSR_DE;
vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_DE; #endif
/* Force enable debug interrupts when user space wants to debug */ if (vcpu->guest_debug) { #ifdef CONFIG_KVM_BOOKE_HV /* * Since there is no shadow MSR, sync MSR_DE into the guest * visible MSR.
*/
vcpu->arch.shared->msr |= MSR_DE; #else
vcpu->arch.shadow_msr |= MSR_DE;
vcpu->arch.shared->msr &= ~MSR_DE; #endif
}
}
/* * Helper function for "full" MSR writes. No need to call this if only * EE/CE/ME/DE/RI are changing.
*/ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
{
u32 old_msr = vcpu->arch.shared->msr;
if (!keep_irq)
clear_bit(priority, &vcpu->arch.pending_exceptions);
}
#ifdef CONFIG_KVM_BOOKE_HV /* * If an interrupt is pending but masked, raise a guest doorbell * so that we are notified when the guest enables the relevant * MSR bit.
*/ if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_EE)
kvmppc_set_pending_interrupt(vcpu, INT_CLASS_NONCRIT); if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_CE)
kvmppc_set_pending_interrupt(vcpu, INT_CLASS_CRIT); if (vcpu->arch.pending_exceptions & BOOKE_IRQPRIO_MACHINE_CHECK)
kvmppc_set_pending_interrupt(vcpu, INT_CLASS_MC); #endif
return allowed;
}
/* * Return the number of jiffies until the next timeout. If the timeout is * longer than the TIMER_NEXT_MAX_DELTA, then return TIMER_NEXT_MAX_DELTA * because the larger value can break the timer APIs.
*/ staticunsignedlong watchdog_next_timeout(struct kvm_vcpu *vcpu)
{
u64 tb, wdt_tb, wdt_ticks = 0;
u64 nr_jiffies = 0;
u32 period = TCR_GET_WP(vcpu->arch.tcr);
wdt_tb = 1ULL << (63 - period);
tb = get_tb(); /* * The watchdog timeout will hapeen when TB bit corresponding * to watchdog will toggle from 0 to 1.
*/ if (tb & wdt_tb)
wdt_ticks = wdt_tb;
wdt_ticks += wdt_tb - (tb & (wdt_tb - 1));
/* Convert timebase ticks to jiffies */
nr_jiffies = wdt_ticks;
if (do_div(nr_jiffies, tb_ticks_per_jiffy))
nr_jiffies++;
/* * If TSR_ENW and TSR_WIS are not set then no need to exit to * userspace, so clear the KVM_REQ_WATCHDOG request.
*/ if ((vcpu->arch.tsr & (TSR_ENW | TSR_WIS)) != (TSR_ENW | TSR_WIS))
kvm_clear_request(KVM_REQ_WATCHDOG, vcpu);
spin_lock_irqsave(&vcpu->arch.wdt_lock, flags);
nr_jiffies = watchdog_next_timeout(vcpu); /* * If the number of jiffies of watchdog timer >= TIMER_NEXT_MAX_DELTA * then do not run the watchdog timer as this can break timer APIs.
*/ if (nr_jiffies < TIMER_NEXT_MAX_DELTA)
mod_timer(&vcpu->arch.wdt_timer, jiffies + nr_jiffies); else
timer_delete(&vcpu->arch.wdt_timer);
spin_unlock_irqrestore(&vcpu->arch.wdt_lock, flags);
}
/* Time out event */ if (tsr & TSR_ENW) { if (tsr & TSR_WIS)
final = 1; else
new_tsr = tsr | TSR_WIS;
} else {
new_tsr = tsr | TSR_ENW;
}
} while (cmpxchg(&vcpu->arch.tsr, tsr, new_tsr) != tsr);
if (new_tsr & TSR_WIS) {
smp_wmb();
kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
kvm_vcpu_kick(vcpu);
}
/* * If this is final watchdog expiry and some action is required * then exit to userspace.
*/ if (final && (vcpu->arch.tcr & TCR_WRC_MASK) &&
vcpu->arch.watchdog_enabled) {
smp_wmb();
kvm_make_request(KVM_REQ_WATCHDOG, vcpu);
kvm_vcpu_kick(vcpu);
}
/* * Stop running the watchdog timer after final expiration to * prevent the host from being flooded with timers if the * guest sets a short period. * Timers will resume when TSR/TCR is updated next time.
*/ if (!final)
arm_next_watchdog(vcpu);
}
/* Tell the guest about our interrupt status */
vcpu->arch.shared->int_pending = !!*pending;
}
/* Check pending exceptions and deliver one, if possible. */ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
{ int r = 0;
WARN_ON_ONCE(!irqs_disabled());
kvmppc_core_check_exceptions(vcpu);
if (kvm_request_pending(vcpu)) { /* Exception delivery raised request; start over */ return 1;
}
if (vcpu->arch.shared->msr & MSR_WE) {
local_irq_enable();
kvm_vcpu_halt(vcpu);
hard_irq_disable();
kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
r = 1;
}
return r;
}
int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
{ int r = 1; /* Indicate we want to get back into the guest */
if (kvm_check_request(KVM_REQ_PENDING_TIMER, vcpu))
update_timer_ints(vcpu); #ifdefined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
kvmppc_core_flush_tlb(vcpu); #endif
if (kvm_check_request(KVM_REQ_WATCHDOG, vcpu)) {
vcpu->run->exit_reason = KVM_EXIT_WATCHDOG;
r = 0;
}
if (kvm_check_request(KVM_REQ_EPR_EXIT, vcpu)) {
vcpu->run->epr.epr = 0;
vcpu->arch.epr_needed = true;
vcpu->run->exit_reason = KVM_EXIT_EPR;
r = 0;
}
return r;
}
int kvmppc_vcpu_run(struct kvm_vcpu *vcpu)
{ int ret, s; struct debug_reg debug;
if (!vcpu->arch.sane) {
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; return -EINVAL;
}
s = kvmppc_prepare_to_enter(vcpu); if (s <= 0) {
ret = s; goto out;
} /* interrupts now hard-disabled */
#ifdef CONFIG_PPC_FPU /* Save userspace FPU state in stack */
enable_kernel_fp();
/* * Since we can't trap on MSR_FP in GS-mode, we consider the guest * as always using the FPU.
*/
kvmppc_load_guest_fp(vcpu); #endif
#ifdef CONFIG_ALTIVEC /* Save userspace AltiVec state in stack */ if (cpu_has_feature(CPU_FTR_ALTIVEC))
enable_kernel_altivec(); /* * Since we can't trap on MSR_VEC in GS-mode, we consider the guest * as always using the AltiVec.
*/
kvmppc_load_guest_altivec(vcpu); #endif
er = kvmppc_emulate_instruction(vcpu); switch (er) { case EMULATE_DONE: /* don't overwrite subtypes, just account kvm_stats */
kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS); /* Future optimization: only reload non-volatiles if
* they were actually modified by emulation. */ return RESUME_GUEST_NV;
case EMULATE_AGAIN: return RESUME_GUEST;
case EMULATE_FAIL:
printk(KERN_CRIT "%s: emulation at %lx failed (%08lx)\n",
__func__, vcpu->arch.regs.nip, vcpu->arch.last_inst); /* For debugging, encode the failing instruction and
* report it to userspace. */
vcpu->run->hw.hardware_exit_reason = ~0ULL << 32;
vcpu->run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
kvmppc_core_queue_program(vcpu, ESR_PIL); return RESUME_HOST;
/* Inject a program interrupt if trap debug is not allowed */ if ((dbsr & DBSR_TIE) && !(vcpu->arch.shared->msr & MSR_DE))
kvmppc_core_queue_program(vcpu, ESR_PTR);
/* * For interrupts needed to be handled by host interrupt handlers, * corresponding host handler are called from here in similar way * (but not exact) as they are called from low level handler * (such as from arch/powerpc/kernel/head_fsl_booke.S).
*/ staticvoid kvmppc_restart_interrupt(struct kvm_vcpu *vcpu, unsignedint exit_nr)
{ struct pt_regs regs;
switch (exit_nr) { case BOOKE_INTERRUPT_EXTERNAL:
kvmppc_fill_pt_regs(®s);
do_IRQ(®s); break; case BOOKE_INTERRUPT_DECREMENTER:
kvmppc_fill_pt_regs(®s);
timer_interrupt(®s); break; #ifdefined(CONFIG_PPC_DOORBELL) case BOOKE_INTERRUPT_DOORBELL:
kvmppc_fill_pt_regs(®s);
doorbell_exception(®s); break; #endif case BOOKE_INTERRUPT_MACHINE_CHECK: /* FIXME */ break; case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
kvmppc_fill_pt_regs(®s);
performance_monitor_exception(®s); break; case BOOKE_INTERRUPT_WATCHDOG:
kvmppc_fill_pt_regs(®s); #ifdef CONFIG_BOOKE_WDT
WatchdogException(®s); #else
unknown_exception(®s); #endif break; case BOOKE_INTERRUPT_CRITICAL:
kvmppc_fill_pt_regs(®s);
unknown_exception(®s); break; case BOOKE_INTERRUPT_DEBUG: /* Save DBSR before preemption is enabled */
vcpu->arch.dbsr = mfspr(SPRN_DBSR);
kvmppc_clear_dbsr(); break;
}
}
case EMULATE_FAIL:
pr_debug("%s: load instruction from guest address %lx failed\n",
__func__, vcpu->arch.regs.nip); /* For debugging, encode the failing instruction and
* report it to userspace. */
vcpu->run->hw.hardware_exit_reason = ~0ULL << 32;
vcpu->run->hw.hardware_exit_reason |= last_inst;
kvmppc_core_queue_program(vcpu, ESR_PIL); return RESUME_HOST;
default:
BUG();
}
}
/* * kvmppc_handle_exit * * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
*/ int kvmppc_handle_exit(struct kvm_vcpu *vcpu, unsignedint exit_nr)
{ struct kvm_run *run = vcpu->run; int r = RESUME_HOST; int s; int idx;
u32 last_inst = KVM_INST_FETCH_FAILED;
ppc_inst_t pinst; enum emulation_result emulated = EMULATE_DONE;
/* Fix irq state (pairs with kvmppc_fix_ee_before_entry()) */
kvmppc_fix_ee_after_exit();
/* update before a new last_exit_type is rewritten */
kvmppc_update_timing_stats(vcpu);
/* restart interrupts if they were meant for the host */
kvmppc_restart_interrupt(vcpu, exit_nr);
/* * get last instruction before being preempted * TODO: for e6500 check also BOOKE_INTERRUPT_LRAT_ERROR & ESR_DATA
*/ switch (exit_nr) { case BOOKE_INTERRUPT_DATA_STORAGE: case BOOKE_INTERRUPT_DTLB_MISS: case BOOKE_INTERRUPT_HV_PRIV:
emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &pinst);
last_inst = ppc_inst_val(pinst); break; case BOOKE_INTERRUPT_PROGRAM: /* SW breakpoints arrive as illegal instructions on HV */ if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) {
emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &pinst);
last_inst = ppc_inst_val(pinst);
} break; default: break;
}
trace_kvm_exit(exit_nr, vcpu);
context_tracking_guest_exit(); if (!vtime_accounting_enabled_this_cpu()) {
local_irq_enable(); /* * Service IRQs here before vtime_account_guest_exit() so any * ticks that occurred while running the guest are accounted to * the guest. If vtime accounting is enabled, accounting uses * TB rather than ticks, so it can be done without enabling * interrupts here, which has the problem that it accounts * interrupt processing overhead to the host.
*/
local_irq_disable();
}
vtime_account_guest_exit();
if (emulated != EMULATE_DONE) {
r = kvmppc_resume_inst_load(vcpu, emulated, last_inst); goto out;
}
switch (exit_nr) { case BOOKE_INTERRUPT_MACHINE_CHECK:
printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR));
kvmppc_dump_vcpu(vcpu); /* For debugging, send invalid exit reason to user space */
run->hw.hardware_exit_reason = ~1ULL << 32;
run->hw.hardware_exit_reason |= mfspr(SPRN_MCSR);
r = RESUME_HOST; break;
case BOOKE_INTERRUPT_EXTERNAL:
kvmppc_account_exit(vcpu, EXT_INTR_EXITS);
r = RESUME_GUEST; break;
case BOOKE_INTERRUPT_DECREMENTER:
kvmppc_account_exit(vcpu, DEC_EXITS);
r = RESUME_GUEST; break;
case BOOKE_INTERRUPT_WATCHDOG:
r = RESUME_GUEST; break;
case BOOKE_INTERRUPT_DOORBELL:
kvmppc_account_exit(vcpu, DBELL_EXITS);
r = RESUME_GUEST; break;
case BOOKE_INTERRUPT_GUEST_DBELL_CRIT:
kvmppc_account_exit(vcpu, GDBELL_EXITS);
/* * We are here because there is a pending guest interrupt * which could not be delivered as MSR_CE or MSR_ME was not * set. Once we break from here we will retry delivery.
*/
r = RESUME_GUEST; break;
case BOOKE_INTERRUPT_GUEST_DBELL:
kvmppc_account_exit(vcpu, GDBELL_EXITS);
/* * We are here because there is a pending guest interrupt * which could not be delivered as MSR_EE was not set. Once * we break from here we will retry delivery.
*/
r = RESUME_GUEST; break;
case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
r = RESUME_GUEST; break;
case BOOKE_INTERRUPT_HV_PRIV:
r = emulation_exit(vcpu); break;
case BOOKE_INTERRUPT_PROGRAM: if ((vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) &&
(last_inst == KVMPPC_INST_SW_BREAKPOINT)) { /* * We are here because of an SW breakpoint instr, * so lets return to host to handle.
*/
r = kvmppc_handle_debug(vcpu);
run->exit_reason = KVM_EXIT_DEBUG;
kvmppc_account_exit(vcpu, DEBUG_EXITS); break;
}
if (vcpu->arch.shared->msr & (MSR_PR | MSR_GS)) { /* * Program traps generated by user-level software must * be handled by the guest kernel. * * In GS mode, hypervisor privileged instructions trap * on BOOKE_INTERRUPT_HV_PRIV, not here, so these are * actual program interrupts, handled by the guest.
*/
kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr);
r = RESUME_GUEST;
kvmppc_account_exit(vcpu, USR_PR_INST); break;
}
r = emulation_exit(vcpu); break;
case BOOKE_INTERRUPT_FP_UNAVAIL:
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
kvmppc_account_exit(vcpu, FP_UNAVAIL);
r = RESUME_GUEST; break;
#ifdef CONFIG_SPE case BOOKE_INTERRUPT_SPE_UNAVAIL: { if (vcpu->arch.shared->msr & MSR_SPE)
kvmppc_vcpu_enable_spe(vcpu); else
kvmppc_booke_queue_irqprio(vcpu,
BOOKE_IRQPRIO_SPE_UNAVAIL);
r = RESUME_GUEST; break;
}
case BOOKE_INTERRUPT_SPE_FP_DATA:
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA);
r = RESUME_GUEST; break;
case BOOKE_INTERRUPT_SPE_FP_ROUND:
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
r = RESUME_GUEST; break; #elifdefined(CONFIG_SPE_POSSIBLE) case BOOKE_INTERRUPT_SPE_UNAVAIL: /* * Guest wants SPE, but host kernel doesn't support it. Send * an "unimplemented operation" program check to the guest.
*/
kvmppc_core_queue_program(vcpu, ESR_PUO | ESR_SPV);
r = RESUME_GUEST; break;
/* * These really should never happen without CONFIG_SPE, * as we should never enable the real MSR[SPE] in the guest.
*/ case BOOKE_INTERRUPT_SPE_FP_DATA: case BOOKE_INTERRUPT_SPE_FP_ROUND:
printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n",
__func__, exit_nr, vcpu->arch.regs.nip);
run->hw.hardware_exit_reason = exit_nr;
r = RESUME_HOST; break; #endif/* CONFIG_SPE_POSSIBLE */
/* * On cores with Vector category, KVM is loaded only if CONFIG_ALTIVEC, * see kvmppc_e500mc_check_processor_compat().
*/ #ifdef CONFIG_ALTIVEC case BOOKE_INTERRUPT_ALTIVEC_UNAVAIL:
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_UNAVAIL);
r = RESUME_GUEST; break;
case BOOKE_INTERRUPT_ALTIVEC_ASSIST:
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_ASSIST);
r = RESUME_GUEST; break; #endif
case BOOKE_INTERRUPT_DATA_STORAGE:
kvmppc_core_queue_data_storage(vcpu, 0, vcpu->arch.fault_dear,
vcpu->arch.fault_esr);
kvmppc_account_exit(vcpu, DSI_EXITS);
r = RESUME_GUEST; break;
case BOOKE_INTERRUPT_INST_STORAGE:
kvmppc_core_queue_inst_storage(vcpu, vcpu->arch.fault_esr);
kvmppc_account_exit(vcpu, ISI_EXITS);
r = RESUME_GUEST; break;
case BOOKE_INTERRUPT_ALIGNMENT:
kvmppc_core_queue_alignment(vcpu, vcpu->arch.fault_dear,
vcpu->arch.fault_esr);
r = RESUME_GUEST; break;
#ifdef CONFIG_KVM_BOOKE_HV case BOOKE_INTERRUPT_HV_SYSCALL: if (!(vcpu->arch.shared->msr & MSR_PR)) {
kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
} else { /* * hcall from guest userspace -- send privileged * instruction program check.
*/
kvmppc_core_queue_program(vcpu, ESR_PPR);
}
r = RESUME_GUEST; break; #else case BOOKE_INTERRUPT_SYSCALL: if (!(vcpu->arch.shared->msr & MSR_PR) &&
(((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) { /* KVM PV hypercalls */
kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
r = RESUME_GUEST;
} else { /* Guest syscalls */
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL);
}
kvmppc_account_exit(vcpu, SYSCALL_EXITS);
r = RESUME_GUEST; break; #endif
case BOOKE_INTERRUPT_DTLB_MISS: { unsignedlong eaddr = vcpu->arch.fault_dear; int gtlb_index;
gpa_t gpaddr;
gfn_t gfn;
#ifdef CONFIG_KVM_E500V2 if (!(vcpu->arch.shared->msr & MSR_PR) &&
(eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) {
kvmppc_map_magic(vcpu);
kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
r = RESUME_GUEST;
break;
} #endif
/* Check the guest TLB. */
gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr); if (gtlb_index < 0) { /* The guest didn't have a mapping for it. */
kvmppc_core_queue_dtlb_miss(vcpu,
vcpu->arch.fault_dear,
vcpu->arch.fault_esr);
kvmppc_mmu_dtlb_miss(vcpu);
kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS);
r = RESUME_GUEST; break;
}
if (kvm_is_visible_gfn(vcpu->kvm, gfn)) { /* The guest TLB had a mapping, but the shadow TLB * didn't, and it is RAM. This could be because: * a) the entry is mapping the host kernel, or * b) the guest used a large mapping which we're faking * Either way, we need to satisfy the fault without
* invoking the guest. */
kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
r = RESUME_GUEST;
} else { /* Guest has mapped and accessed a page which is not
* actually RAM. */
vcpu->arch.paddr_accessed = gpaddr;
vcpu->arch.vaddr_accessed = eaddr;
r = kvmppc_emulate_mmio(vcpu);
kvmppc_account_exit(vcpu, MMIO_EXITS);
}
srcu_read_unlock(&vcpu->kvm->srcu, idx); break;
}
case BOOKE_INTERRUPT_ITLB_MISS: { unsignedlong eaddr = vcpu->arch.regs.nip;
gpa_t gpaddr;
gfn_t gfn; int gtlb_index;
r = RESUME_GUEST;
/* Check the guest TLB. */
gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr); if (gtlb_index < 0) { /* The guest didn't have a mapping for it. */
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
kvmppc_mmu_itlb_miss(vcpu);
kvmppc_account_exit(vcpu, ITLB_REAL_MISS_EXITS); break;
}
if (kvm_is_visible_gfn(vcpu->kvm, gfn)) { /* The guest TLB had a mapping, but the shadow TLB * didn't. This could be because: * a) the entry is mapping the host kernel, or * b) the guest used a large mapping which we're faking * Either way, we need to satisfy the fault without
* invoking the guest. */
kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
} else { /* Guest mapped and leaped at non-RAM! */
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK);
}
srcu_read_unlock(&vcpu->kvm->srcu, idx); break;
}
case BOOKE_INTERRUPT_DEBUG: {
r = kvmppc_handle_debug(vcpu); if (r == RESUME_HOST)
run->exit_reason = KVM_EXIT_DEBUG;
kvmppc_account_exit(vcpu, DEBUG_EXITS); break;
}
out: /* * To avoid clobbering exit_reason, only check for signals if we * aren't already exiting to userspace for some other reason.
*/ if (!(r & RESUME_HOST)) {
s = kvmppc_prepare_to_enter(vcpu); if (s <= 0)
r = (s << 2) | RESUME_HOST | (r & RESUME_FLAG_NV); else { /* interrupts now hard-disabled */
kvmppc_fix_ee_before_entry();
kvmppc_load_guest_fp(vcpu);
kvmppc_load_guest_altivec(vcpu);
}
}
/* XXX read permissions from the guest TLB */
pte->may_read = true;
pte->may_write = true;
pte->may_execute = true;
return 0;
}
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg)
{ struct debug_reg *dbg_reg; int n, b = 0, w = 0; int ret = 0;
#ifdef CONFIG_KVM_BOOKE_HV /* * On BookE-HV (e500mc) the guest is always executed with MSR.GS=1 * DBCR1 and DBCR2 are set to trigger debug events when MSR.PR is 0
*/
dbg_reg->dbcr1 = 0;
dbg_reg->dbcr2 = 0; #else /* * On BookE-PR (e500v2) the guest is always executed with MSR.PR=1 * We set DBCR1 and DBCR2 to only trigger debug events when MSR.PR * is set.
*/
dbg_reg->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | DBCR1_IAC3US |
DBCR1_IAC4US;
dbg_reg->dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US; #endif
if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) goto out;
ret = -EINVAL; for (n = 0; n < (KVMPPC_BOOKE_IAC_NUM + KVMPPC_BOOKE_DAC_NUM); n++) {
uint64_t addr = dbg->arch.bp[n].addr;
uint32_t type = dbg->arch.bp[n].type;
if (type == KVMPPC_DEBUG_NONE) continue;
if (type & ~(KVMPPC_DEBUG_WATCH_READ |
KVMPPC_DEBUG_WATCH_WRITE |
KVMPPC_DEBUG_BREAKPOINT)) goto out;
/* Eye-catching numbers so we know if the guest takes an interrupt
* before it's programmed its own IVPR/IVORs. */
vcpu->arch.ivpr = 0x55550000; for (i = 0; i < BOOKE_IRQPRIO_MAX; i++)
vcpu->arch.ivor[i] = 0x7700 | i * 4;
kvmppc_init_timing_stats(vcpu);
r = kvmppc_core_vcpu_setup(vcpu); if (r)
vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
kvmppc_sanity_check(vcpu); return r;
}
int __init kvmppc_booke_init(void)
{ #ifndef CONFIG_KVM_BOOKE_HV unsignedlong ivor[16]; unsignedlong *handler = kvmppc_booke_handler_addr; unsignedlong max_ivor = 0; unsignedlong handler_len; int i;
/* We install our own exception handlers by hijacking IVPR. IVPR must
* be 16-bit aligned, so we need a 64KB allocation. */
kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO,
VCPU_SIZE_ORDER); if (!kvmppc_booke_handlers) return -ENOMEM;
/* XXX make sure our handlers are smaller than Linux's */
/* Copy our interrupt handlers to match host IVORs. That way we don't
* have to swap the IVORs on every guest/host transition. */
ivor[0] = mfspr(SPRN_IVOR0);
ivor[1] = mfspr(SPRN_IVOR1);
ivor[2] = mfspr(SPRN_IVOR2);
ivor[3] = mfspr(SPRN_IVOR3);
ivor[4] = mfspr(SPRN_IVOR4);
ivor[5] = mfspr(SPRN_IVOR5);
ivor[6] = mfspr(SPRN_IVOR6);
ivor[7] = mfspr(SPRN_IVOR7);
ivor[8] = mfspr(SPRN_IVOR8);
ivor[9] = mfspr(SPRN_IVOR9);
ivor[10] = mfspr(SPRN_IVOR10);
ivor[11] = mfspr(SPRN_IVOR11);
ivor[12] = mfspr(SPRN_IVOR12);
ivor[13] = mfspr(SPRN_IVOR13);
ivor[14] = mfspr(SPRN_IVOR14);
ivor[15] = mfspr(SPRN_IVOR15);
for (i = 0; i < 16; i++) { if (ivor[i] > max_ivor)
max_ivor = i;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.