/* Always fail to lock the cache */
vcpu_e500->l1csr0 |= L1CSR0_CUL; return EMULATE_DONE;
}
staticint kvmppc_e500_emul_mftmr(struct kvm_vcpu *vcpu, unsignedint inst, int rt)
{ /* Expose one thread per vcpu */ if (get_tmrn(inst) == TMRN_TMCFG0) {
kvmppc_set_gpr(vcpu, rt,
1 | (1 << TMRN_TMCFG0_NATHRD_SHIFT)); return EMULATE_DONE;
}
return EMULATE_FAIL;
}
int kvmppc_core_emulate_op_e500(struct kvm_vcpu *vcpu, unsignedint inst, int *advance)
{ int emulated = EMULATE_DONE; int ra = get_ra(inst); int rb = get_rb(inst); int rt = get_rt(inst);
gva_t ea;
switch (get_op(inst)) { case 31: switch (get_xop(inst)) {
case XOP_DCBTLS:
emulated = kvmppc_e500_emul_dcbtls(vcpu); break;
#ifdef CONFIG_KVM_E500MC case XOP_MSGSND:
emulated = kvmppc_e500_emul_msgsnd(vcpu, rb); break;
case XOP_MSGCLR:
emulated = kvmppc_e500_emul_msgclr(vcpu, rb); break; #endif
case XOP_TLBRE:
emulated = kvmppc_e500_emul_tlbre(vcpu); break;
case XOP_TLBWE:
emulated = kvmppc_e500_emul_tlbwe(vcpu); break;
case XOP_TLBSX:
ea = kvmppc_get_ea_indexed(vcpu, ra, rb);
emulated = kvmppc_e500_emul_tlbsx(vcpu, ea); break;
case XOP_TLBILX: { int type = rt & 0x3;
ea = kvmppc_get_ea_indexed(vcpu, ra, rb);
emulated = kvmppc_e500_emul_tlbilx(vcpu, type, ea); break;
}
case XOP_TLBIVAX:
ea = kvmppc_get_ea_indexed(vcpu, ra, rb);
emulated = kvmppc_e500_emul_tlbivax(vcpu, ea); break;
case XOP_MFTMR:
emulated = kvmppc_e500_emul_mftmr(vcpu, inst, rt); break;
case XOP_EHPRIV:
emulated = kvmppc_e500_emul_ehpriv(vcpu, inst, advance); break;
default:
emulated = EMULATE_FAIL;
}
break;
default:
emulated = EMULATE_FAIL;
}
if (emulated == EMULATE_FAIL)
emulated = kvmppc_booke_emulate_op(vcpu, inst, advance);
return emulated;
}
int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
{ struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); int emulated = EMULATE_DONE;
switch (sprn) { #ifndef CONFIG_KVM_BOOKE_HV case SPRN_PID:
kvmppc_set_pid(vcpu, spr_val); break; case SPRN_PID1: if (spr_val != 0) return EMULATE_FAIL;
vcpu_e500->pid[1] = spr_val; break; case SPRN_PID2: if (spr_val != 0) return EMULATE_FAIL;
vcpu_e500->pid[2] = spr_val; break; case SPRN_MAS0:
vcpu->arch.shared->mas0 = spr_val; break; case SPRN_MAS1:
vcpu->arch.shared->mas1 = spr_val; break; case SPRN_MAS2:
vcpu->arch.shared->mas2 = spr_val; break; case SPRN_MAS3:
vcpu->arch.shared->mas7_3 &= ~(u64)0xffffffff;
vcpu->arch.shared->mas7_3 |= spr_val; break; case SPRN_MAS4:
vcpu->arch.shared->mas4 = spr_val; break; case SPRN_MAS6:
vcpu->arch.shared->mas6 = spr_val; break; case SPRN_MAS7:
vcpu->arch.shared->mas7_3 &= (u64)0xffffffff;
vcpu->arch.shared->mas7_3 |= (u64)spr_val << 32; break; #endif case SPRN_L1CSR0:
vcpu_e500->l1csr0 = spr_val;
vcpu_e500->l1csr0 &= ~(L1CSR0_DCFI | L1CSR0_CLFC); break; case SPRN_L1CSR1:
vcpu_e500->l1csr1 = spr_val;
vcpu_e500->l1csr1 &= ~(L1CSR1_ICFI | L1CSR1_ICLFR); break; case SPRN_HID0:
vcpu_e500->hid0 = spr_val; break; case SPRN_HID1:
vcpu_e500->hid1 = spr_val; break;
case SPRN_MMUCSR0:
emulated = kvmppc_e500_emul_mt_mmucsr0(vcpu_e500,
spr_val); break;
case SPRN_PWRMGTCR0: /* * Guest relies on host power management configurations * Treat the request as a general store
*/
vcpu->arch.pwrmgtcr0 = spr_val; break;
case SPRN_BUCSR: /* * If we are here, it means that we have already flushed the * branch predictor, so just return to guest.
*/ break;
/* extra exceptions */ #ifdef CONFIG_SPE_POSSIBLE case SPRN_IVOR32:
vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] = spr_val; break; case SPRN_IVOR33:
vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA] = spr_val; break; case SPRN_IVOR34:
vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND] = spr_val; break; #endif #ifdef CONFIG_ALTIVEC case SPRN_IVOR32:
vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_UNAVAIL] = spr_val; break; case SPRN_IVOR33:
vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_ASSIST] = spr_val; break; #endif case SPRN_IVOR35:
vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] = spr_val; break; #ifdef CONFIG_KVM_BOOKE_HV case SPRN_IVOR36:
vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL] = spr_val; break; case SPRN_IVOR37:
vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT] = spr_val; break; #endif default:
emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, spr_val);
}
return emulated;
}
int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
{ struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); int emulated = EMULATE_DONE;
switch (sprn) { #ifndef CONFIG_KVM_BOOKE_HV case SPRN_PID:
*spr_val = vcpu_e500->pid[0]; break; case SPRN_PID1:
*spr_val = vcpu_e500->pid[1]; break; case SPRN_PID2:
*spr_val = vcpu_e500->pid[2]; break; case SPRN_MAS0:
*spr_val = vcpu->arch.shared->mas0; break; case SPRN_MAS1:
*spr_val = vcpu->arch.shared->mas1; break; case SPRN_MAS2:
*spr_val = vcpu->arch.shared->mas2; break; case SPRN_MAS3:
*spr_val = (u32)vcpu->arch.shared->mas7_3; break; case SPRN_MAS4:
*spr_val = vcpu->arch.shared->mas4; break; case SPRN_MAS6:
*spr_val = vcpu->arch.shared->mas6; break; case SPRN_MAS7:
*spr_val = vcpu->arch.shared->mas7_3 >> 32; break; #endif case SPRN_DECAR:
*spr_val = vcpu->arch.decar; break; case SPRN_TLB0CFG:
*spr_val = vcpu->arch.tlbcfg[0]; break; case SPRN_TLB1CFG:
*spr_val = vcpu->arch.tlbcfg[1]; break; case SPRN_TLB0PS: if (!has_feature(vcpu, VCPU_FTR_MMU_V2)) return EMULATE_FAIL;
*spr_val = vcpu->arch.tlbps[0]; break; case SPRN_TLB1PS: if (!has_feature(vcpu, VCPU_FTR_MMU_V2)) return EMULATE_FAIL;
*spr_val = vcpu->arch.tlbps[1]; break; case SPRN_L1CSR0:
*spr_val = vcpu_e500->l1csr0; break; case SPRN_L1CSR1:
*spr_val = vcpu_e500->l1csr1; break; case SPRN_HID0:
*spr_val = vcpu_e500->hid0; break; case SPRN_HID1:
*spr_val = vcpu_e500->hid1; break; case SPRN_SVR:
*spr_val = vcpu_e500->svr; break;
case SPRN_MMUCSR0:
*spr_val = 0; break;
case SPRN_MMUCFG:
*spr_val = vcpu->arch.mmucfg; break; case SPRN_EPTCFG: if (!has_feature(vcpu, VCPU_FTR_MMU_V2)) return EMULATE_FAIL; /* * Legacy Linux guests access EPTCFG register even if the E.PT * category is disabled in the VM. Give them a chance to live.
*/
*spr_val = vcpu->arch.eptcfg; break;
case SPRN_PWRMGTCR0:
*spr_val = vcpu->arch.pwrmgtcr0; break;
/* extra exceptions */ #ifdef CONFIG_SPE_POSSIBLE case SPRN_IVOR32:
*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL]; break; case SPRN_IVOR33:
*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA]; break; case SPRN_IVOR34:
*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND]; break; #endif #ifdef CONFIG_ALTIVEC case SPRN_IVOR32:
*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_UNAVAIL]; break; case SPRN_IVOR33:
*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_ASSIST]; break; #endif case SPRN_IVOR35:
*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR]; break; #ifdef CONFIG_KVM_BOOKE_HV case SPRN_IVOR36:
*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL]; break; case SPRN_IVOR37:
*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT]; break; #endif default:
emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, spr_val);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.