preempt_disable(); /* * need flush FP/VEC/VSX to vcpu save area before * copy.
*/
kvmppc_giveup_ext(vcpu, MSR_VSX);
kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
kvmppc_copyto_vcpu_tm(vcpu);
kvmppc_save_tm_sprs(vcpu);
/* * as a result of trecheckpoint. set TS to suspended.
*/
guest_msr &= ~(MSR_TS_MASK);
guest_msr |= MSR_TS_S;
kvmppc_set_msr(vcpu, guest_msr);
kvmppc_restore_tm_pr(vcpu);
preempt_enable();
}
/* emulate tabort. at guest privilege state */ void kvmppc_emulate_tabort(struct kvm_vcpu *vcpu, int ra_val)
{ /* currently we only emulate tabort. but no emulation of other * tabort variants since there is no kernel usage of them at * present.
*/ unsignedlong guest_msr = kvmppc_get_msr(vcpu);
uint64_t org_texasr;
int kvmppc_core_emulate_op_pr(struct kvm_vcpu *vcpu, unsignedint inst, int *advance)
{ int emulated = EMULATE_DONE; int rt = get_rt(inst); int rs = get_rs(inst); int ra = get_ra(inst); int rb = get_rb(inst);
u32 inst_sc = 0x44000002;
switch (get_op(inst)) { case 0:
emulated = EMULATE_FAIL; if ((kvmppc_get_msr(vcpu) & MSR_LE) &&
(inst == swab32(inst_sc))) { /* * This is the byte reversed syscall instruction of our * hypercall handler. Early versions of LE Linux didn't * swap the instructions correctly and ended up in * illegal instructions. * Just always fail hypercalls on these broken systems.
*/
kvmppc_set_gpr(vcpu, 3, EV_UNIMPLEMENTED);
kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
emulated = EMULATE_DONE;
} break; case 19: switch (get_xop(inst)) { case OP_19_XOP_RFID: case OP_19_XOP_RFI: { unsignedlong srr1 = kvmppc_get_srr1(vcpu); #ifdef CONFIG_PPC_TRANSACTIONAL_MEM unsignedlong cur_msr = kvmppc_get_msr(vcpu);
/* * add rules to fit in ISA specification regarding TM * state transition in TM disable/Suspended state, * and target TM state is TM inactive(00) state. (the * change should be suppressed).
*/ if (((cur_msr & MSR_TM) == 0) &&
((srr1 & MSR_TM) == 0) &&
MSR_TM_SUSPENDED(cur_msr) &&
!MSR_TM_ACTIVE(srr1))
srr1 |= MSR_TS_S; #endif
kvmppc_set_pc(vcpu, kvmppc_get_srr0(vcpu));
kvmppc_set_msr(vcpu, srr1);
*advance = 0; break;
}
default:
emulated = EMULATE_FAIL; break;
} break; case 31: switch (get_xop(inst)) { case OP_31_XOP_MFMSR:
kvmppc_set_gpr(vcpu, rt, kvmppc_get_msr(vcpu)); break; case OP_31_XOP_MTMSRD:
{
ulong rs_val = kvmppc_get_gpr(vcpu, rs); if (inst & 0x10000) {
ulong new_msr = kvmppc_get_msr(vcpu);
new_msr &= ~(MSR_RI | MSR_EE);
new_msr |= rs_val & (MSR_RI | MSR_EE);
kvmppc_set_msr_fast(vcpu, new_msr);
} else
kvmppc_set_msr(vcpu, rs_val); break;
} case OP_31_XOP_MTMSR:
kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs)); break; case OP_31_XOP_MFSR:
{ int srnum;
srnum = kvmppc_get_field(inst, 12 + 32, 15 + 32); if (vcpu->arch.mmu.mfsrin) {
u32 sr;
sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
kvmppc_set_gpr(vcpu, rt, sr);
} break;
} case OP_31_XOP_MFSRIN:
{ int srnum;
srnum = (kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf; if (vcpu->arch.mmu.mfsrin) {
u32 sr;
sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
kvmppc_set_gpr(vcpu, rt, sr);
} break;
} case OP_31_XOP_MTSR:
vcpu->arch.mmu.mtsrin(vcpu,
(inst >> 16) & 0xf,
kvmppc_get_gpr(vcpu, rs)); break; case OP_31_XOP_MTSRIN:
vcpu->arch.mmu.mtsrin(vcpu,
(kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf,
kvmppc_get_gpr(vcpu, rs)); break; case OP_31_XOP_TLBIE: case OP_31_XOP_TLBIEL:
{ bool large = (inst & 0x00200000) ? true : false;
ulong addr = kvmppc_get_gpr(vcpu, rb);
vcpu->arch.mmu.tlbie(vcpu, addr, large); break;
} #ifdef CONFIG_PPC_BOOK3S_64 case OP_31_XOP_FAKE_SC1:
{ /* SC 1 papr hypercalls */
ulong cmd = kvmppc_get_gpr(vcpu, 3); int i;
/* only emulate for privilege guest, since problem state * guest can run with TM enabled and we don't expect to * trap at here for that case.
*/
WARN_ON(guest_msr & MSR_PR);
if (MSR_TM_ACTIVE(kvmppc_get_msr(vcpu)) &&
!((MSR_TM_SUSPENDED(kvmppc_get_msr(vcpu))) &&
(sprn == SPRN_TFHAR))) { /* it is illegal to mtspr() TM regs in * other than non-transactional state, with * the exception of TFHAR in suspend state.
*/
kvmppc_core_queue_program(vcpu, SRR1_PROGTM);
emulated = EMULATE_AGAIN; break;
}
break; #endif #endif case SPRN_ICTC: case SPRN_THRM1: case SPRN_THRM2: case SPRN_THRM3: case SPRN_CTRLF: case SPRN_CTRLT: case SPRN_L2CR: case SPRN_DSCR: case SPRN_MMCR0_GEKKO: case SPRN_MMCR1_GEKKO: case SPRN_PMC1_GEKKO: case SPRN_PMC2_GEKKO: case SPRN_PMC3_GEKKO: case SPRN_PMC4_GEKKO: case SPRN_WPAR_GEKKO: case SPRN_MSSSR0: case SPRN_DABR: #ifdef CONFIG_PPC_BOOK3S_64 case SPRN_MMCRS: case SPRN_MMCRA: case SPRN_MMCR0: case SPRN_MMCR1: case SPRN_MMCR2: case SPRN_UMMCR2: case SPRN_UAMOR: case SPRN_IAMR: case SPRN_AMR: #endif break;
unprivileged: default:
pr_info_ratelimited("KVM: invalid SPR write: %d\n", sprn); if (sprn & 0x10) { if (kvmppc_get_msr(vcpu) & MSR_PR) {
kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
emulated = EMULATE_AGAIN;
}
} else { if ((kvmppc_get_msr(vcpu) & MSR_PR) || sprn == 0) {
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
emulated = EMULATE_AGAIN;
}
} break;
}
return emulated;
}
int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
{ int emulated = EMULATE_DONE;
switch (sprn) { case SPRN_IBAT0U ... SPRN_IBAT3L: case SPRN_IBAT4U ... SPRN_IBAT7L: case SPRN_DBAT0U ... SPRN_DBAT3L: case SPRN_DBAT4U ... SPRN_DBAT7L:
{ struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn);
break;
} case SPRN_SDR1: if (!spr_allowed(vcpu, PRIV_HYPER)) goto unprivileged;
*spr_val = to_book3s(vcpu)->sdr1; break; case SPRN_DSISR:
*spr_val = kvmppc_get_dsisr(vcpu); break; case SPRN_DAR:
*spr_val = kvmppc_get_dar(vcpu); break; case SPRN_HIOR:
*spr_val = to_book3s(vcpu)->hior; break; case SPRN_HID0:
*spr_val = to_book3s(vcpu)->hid[0]; break; case SPRN_HID1:
*spr_val = to_book3s(vcpu)->hid[1]; break; case SPRN_HID2_750FX: case SPRN_HID2_GEKKO:
*spr_val = to_book3s(vcpu)->hid[2]; break; case SPRN_HID4: case SPRN_HID4_GEKKO:
*spr_val = to_book3s(vcpu)->hid[4]; break; case SPRN_HID5:
*spr_val = to_book3s(vcpu)->hid[5]; break; case SPRN_CFAR: case SPRN_DSCR:
*spr_val = 0; break; case SPRN_PURR: /* * On exit we would have updated purr
*/
*spr_val = vcpu->arch.purr; break; case SPRN_SPURR: /* * On exit we would have updated spurr
*/
*spr_val = vcpu->arch.spurr; break; case SPRN_VTB:
*spr_val = to_book3s(vcpu)->vtb; break; case SPRN_IC:
*spr_val = vcpu->arch.ic; break; case SPRN_GQR0: case SPRN_GQR1: case SPRN_GQR2: case SPRN_GQR3: case SPRN_GQR4: case SPRN_GQR5: case SPRN_GQR6: case SPRN_GQR7:
*spr_val = to_book3s(vcpu)->gqr[sprn - SPRN_GQR0]; break; #ifdef CONFIG_PPC_BOOK3S_64 case SPRN_FSCR:
*spr_val = vcpu->arch.fscr; break; case SPRN_BESCR:
*spr_val = vcpu->arch.bescr; break; case SPRN_EBBHR:
*spr_val = vcpu->arch.ebbhr; break; case SPRN_EBBRR:
*spr_val = vcpu->arch.ebbrr; break; #ifdef CONFIG_PPC_TRANSACTIONAL_MEM case SPRN_TFHAR: case SPRN_TEXASR: case SPRN_TFIAR: if (!cpu_has_feature(CPU_FTR_TM)) break;
tm_enable(); if (sprn == SPRN_TFHAR)
*spr_val = mfspr(SPRN_TFHAR); elseif (sprn == SPRN_TEXASR)
*spr_val = mfspr(SPRN_TEXASR); elseif (sprn == SPRN_TFIAR)
*spr_val = mfspr(SPRN_TFIAR);
tm_disable(); break; #endif #endif case SPRN_THRM1: case SPRN_THRM2: case SPRN_THRM3: case SPRN_CTRLF: case SPRN_CTRLT: case SPRN_L2CR: case SPRN_MMCR0_GEKKO: case SPRN_MMCR1_GEKKO: case SPRN_PMC1_GEKKO: case SPRN_PMC2_GEKKO: case SPRN_PMC3_GEKKO: case SPRN_PMC4_GEKKO: case SPRN_WPAR_GEKKO: case SPRN_MSSSR0: case SPRN_DABR: #ifdef CONFIG_PPC_BOOK3S_64 case SPRN_MMCRS: case SPRN_MMCRA: case SPRN_MMCR0: case SPRN_MMCR1: case SPRN_MMCR2: case SPRN_UMMCR2: case SPRN_TIR: case SPRN_UAMOR: case SPRN_IAMR: case SPRN_AMR: #endif
*spr_val = 0; break; default:
unprivileged:
pr_info_ratelimited("KVM: invalid SPR read: %d\n", sprn); if (sprn & 0x10) { if (kvmppc_get_msr(vcpu) & MSR_PR) {
kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
emulated = EMULATE_AGAIN;
}
} else { if ((kvmppc_get_msr(vcpu) & MSR_PR) || sprn == 0 ||
sprn == 4 || sprn == 5 || sprn == 6) {
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
emulated = EMULATE_AGAIN;
}
}
ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsignedint inst)
{ #ifdef CONFIG_PPC_BOOK3S_64 /* * Linux's fix_alignment() assumes that DAR is valid, so can we
*/ return vcpu->arch.fault_dar; #else
ulong dar = 0;
ulong ra = get_ra(inst);
ulong rb = get_rb(inst);
switch (get_op(inst)) { case OP_LFS: case OP_LFD: case OP_STFD: case OP_STFS: if (ra)
dar = kvmppc_get_gpr(vcpu, ra);
dar += (s32)((s16)inst); break; case 31: if (ra)
dar = kvmppc_get_gpr(vcpu, ra);
dar += kvmppc_get_gpr(vcpu, rb); break; default:
printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst); break;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.