// SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2023 Jordan Niethe, IBM Corp. <jniethe5@gmail.com> * * Authors: * Jordan Niethe <jniethe5@gmail.com> * * Description: KVM functions specific to running on Book 3S * processors as a NESTEDv2 guest. *
*/
kvmppc_gsbm_fill(&gsbm);
kvmppc_gsbm_for_each(&gsbm, iden)
{ switch (iden) { case KVMPPC_GSID_HOST_STATE_SIZE: case KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE: case KVMPPC_GSID_PARTITION_TABLE: case KVMPPC_GSID_PROCESS_TABLE: case KVMPPC_GSID_RUN_INPUT: case KVMPPC_GSID_RUN_OUTPUT: /* Host wide counters */ case KVMPPC_GSID_L0_GUEST_HEAP: case KVMPPC_GSID_L0_GUEST_HEAP_MAX: case KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE: case KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX: case KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM: break; default:
size += kvmppc_gse_total_size(kvmppc_gsid_size(iden));
}
} return size;
}
if ((gsm->flags & KVMPPC_GS_FLAGS_WIDE) !=
(kvmppc_gsid_flags(iden) & KVMPPC_GS_FLAGS_WIDE)) continue;
switch (iden) { case KVMPPC_GSID_DSCR:
rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.dscr); break; case KVMPPC_GSID_MMCRA:
rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.mmcra); break; case KVMPPC_GSID_HFSCR:
rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.hfscr); break; case KVMPPC_GSID_PURR:
rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.purr); break; case KVMPPC_GSID_SPURR:
rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.spurr); break; case KVMPPC_GSID_AMR:
rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.amr); break; case KVMPPC_GSID_UAMOR:
rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.uamor); break; case KVMPPC_GSID_SIAR:
rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.siar); break; case KVMPPC_GSID_SDAR:
rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.sdar); break; case KVMPPC_GSID_IAMR:
rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.iamr); break; case KVMPPC_GSID_DAWR0:
rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.dawr0); break; case KVMPPC_GSID_DAWR1:
rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.dawr1); break; case KVMPPC_GSID_DAWRX0:
rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.dawrx0); break; case KVMPPC_GSID_DAWRX1:
rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.dawrx1); break; case KVMPPC_GSID_DEXCR:
rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.dexcr); break; case KVMPPC_GSID_HASHKEYR:
rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.hashkeyr); break; case KVMPPC_GSID_HASHPKEYR:
rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.hashpkeyr); break; case KVMPPC_GSID_CIABR:
rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ciabr); break; case KVMPPC_GSID_WORT:
rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.wort); break; case KVMPPC_GSID_PPR:
rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ppr); break; case KVMPPC_GSID_PSPB:
rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.pspb); break; case KVMPPC_GSID_TAR:
rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.tar); break; case KVMPPC_GSID_FSCR:
rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.fscr); break; case KVMPPC_GSID_EBBHR:
rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ebbhr); break; case KVMPPC_GSID_EBBRR:
rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ebbrr); break; case KVMPPC_GSID_BESCR:
rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.bescr); break; case KVMPPC_GSID_IC:
rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ic); break; case KVMPPC_GSID_CTRL:
rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ctrl); break; case KVMPPC_GSID_PIDR:
rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.pid); break; case KVMPPC_GSID_AMOR: {
u64 amor = ~0;
rc = kvmppc_gse_put_u64(gsb, iden, amor); break;
} case KVMPPC_GSID_VRSAVE:
rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.vrsave); break; case KVMPPC_GSID_MMCR(0)... KVMPPC_GSID_MMCR(3):
i = iden - KVMPPC_GSID_MMCR(0);
rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.mmcr[i]); break; case KVMPPC_GSID_SIER(0)... KVMPPC_GSID_SIER(2):
i = iden - KVMPPC_GSID_SIER(0);
rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.sier[i]); break; case KVMPPC_GSID_PMC(0)... KVMPPC_GSID_PMC(5):
i = iden - KVMPPC_GSID_PMC(0);
rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.pmc[i]); break; case KVMPPC_GSID_GPR(0)... KVMPPC_GSID_GPR(31):
i = iden - KVMPPC_GSID_GPR(0);
rc = kvmppc_gse_put_u64(gsb, iden,
vcpu->arch.regs.gpr[i]); break; case KVMPPC_GSID_CR:
rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.regs.ccr); break; case KVMPPC_GSID_XER:
rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.regs.xer); break; case KVMPPC_GSID_CTR:
rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.regs.ctr); break; case KVMPPC_GSID_LR:
rc = kvmppc_gse_put_u64(gsb, iden,
vcpu->arch.regs.link); break; case KVMPPC_GSID_NIA:
rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.regs.nip); break; case KVMPPC_GSID_SRR0:
rc = kvmppc_gse_put_u64(gsb, iden,
vcpu->arch.shregs.srr0); break; case KVMPPC_GSID_SRR1:
rc = kvmppc_gse_put_u64(gsb, iden,
vcpu->arch.shregs.srr1); break; case KVMPPC_GSID_SPRG0:
rc = kvmppc_gse_put_u64(gsb, iden,
vcpu->arch.shregs.sprg0); break; case KVMPPC_GSID_SPRG1:
rc = kvmppc_gse_put_u64(gsb, iden,
vcpu->arch.shregs.sprg1); break; case KVMPPC_GSID_SPRG2:
rc = kvmppc_gse_put_u64(gsb, iden,
vcpu->arch.shregs.sprg2); break; case KVMPPC_GSID_SPRG3:
rc = kvmppc_gse_put_u64(gsb, iden,
vcpu->arch.shregs.sprg3); break; case KVMPPC_GSID_DAR:
rc = kvmppc_gse_put_u64(gsb, iden,
vcpu->arch.shregs.dar); break; case KVMPPC_GSID_DSISR:
rc = kvmppc_gse_put_u32(gsb, iden,
vcpu->arch.shregs.dsisr); break; case KVMPPC_GSID_MSR:
rc = kvmppc_gse_put_u64(gsb, iden,
vcpu->arch.shregs.msr); break; case KVMPPC_GSID_VTB:
rc = kvmppc_gse_put_u64(gsb, iden,
vcpu->arch.vcore->vtb); break; case KVMPPC_GSID_DPDES:
rc = kvmppc_gse_put_u64(gsb, iden,
vcpu->arch.vcore->dpdes); break; case KVMPPC_GSID_LPCR:
rc = kvmppc_gse_put_u64(gsb, iden,
vcpu->arch.vcore->lpcr); break; case KVMPPC_GSID_TB_OFFSET:
rc = kvmppc_gse_put_u64(gsb, iden,
vcpu->arch.vcore->tb_offset); break; case KVMPPC_GSID_FPSCR:
rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.fp.fpscr); break; case KVMPPC_GSID_VSRS(0)... KVMPPC_GSID_VSRS(31):
i = iden - KVMPPC_GSID_VSRS(0);
memcpy(&v, &vcpu->arch.fp.fpr[i], sizeof(vcpu->arch.fp.fpr[i]));
rc = kvmppc_gse_put_vector128(gsb, iden, &v); break; #ifdef CONFIG_VSX case KVMPPC_GSID_VSCR:
rc = kvmppc_gse_put_u32(gsb, iden,
vcpu->arch.vr.vscr.u[3]); break; case KVMPPC_GSID_VSRS(32)... KVMPPC_GSID_VSRS(63):
i = iden - KVMPPC_GSID_VSRS(32);
rc = kvmppc_gse_put_vector128(gsb, iden,
&vcpu->arch.vr.vr[i]); break; #endif case KVMPPC_GSID_DEC_EXPIRY_TB: {
u64 dw;
dw = vcpu->arch.dec_expires -
vcpu->arch.vcore->tb_offset;
rc = kvmppc_gse_put_u64(gsb, iden, dw); break;
} case KVMPPC_GSID_LOGICAL_PVR: /* * Though 'arch_compat == 0' would mean the default * compatibility, arch_compat, being a Guest Wide * Element, cannot be filled with a value of 0 in GSB * as this would result into a kernel trap. * Hence, when `arch_compat == 0`, arch_compat should * default to L1's PVR.
*/ if (!vcpu->arch.vcore->arch_compat) { if (cpu_has_feature(CPU_FTR_P11_PVR))
arch_compat = PVR_ARCH_31_P11; elseif (cpu_has_feature(CPU_FTR_ARCH_31))
arch_compat = PVR_ARCH_31; elseif (cpu_has_feature(CPU_FTR_ARCH_300))
arch_compat = PVR_ARCH_300;
} else {
arch_compat = vcpu->arch.vcore->arch_compat;
}
rc = kvmppc_gse_put_u32(gsb, iden, arch_compat); break;
}
/** * __kvmhv_nestedv2_mark_dirty() - mark a Guest State ID to be sent to the host * @vcpu: vcpu * @iden: guest state ID * * Mark a guest state ID as having been changed by the L1 host and thus * the new value must be sent to the L0 hypervisor. See kvmhv_nestedv2_flush_vcpu()
*/ int __kvmhv_nestedv2_mark_dirty(struct kvm_vcpu *vcpu, u16 iden)
{ struct kvmhv_nestedv2_io *io; struct kvmppc_gs_bitmap *valids; struct kvmppc_gs_msg *gsm;
/** * __kvmhv_nestedv2_cached_reload() - reload a Guest State ID from the host * @vcpu: vcpu * @iden: guest state ID * * Reload the value for the guest state ID from the L0 host into the L1 host. * This is cached so that going out to the L0 host only happens if necessary.
*/ int __kvmhv_nestedv2_cached_reload(struct kvm_vcpu *vcpu, u16 iden)
{ struct kvmhv_nestedv2_io *io; struct kvmppc_gs_bitmap *valids; struct kvmppc_gs_buff *gsb; struct kvmppc_gs_msg gsm; int rc;
/** * kvmhv_nestedv2_flush_vcpu() - send modified Guest State IDs to the host * @vcpu: vcpu * @time_limit: hdec expiry tb * * Send the values marked by __kvmhv_nestedv2_mark_dirty() to the L0 host. * Thread wide values are copied to the H_GUEST_RUN_VCPU input buffer. Guest * wide values need to be sent with H_GUEST_SET first. * * The hdec tb offset is always sent to L0 host.
*/ int kvmhv_nestedv2_flush_vcpu(struct kvm_vcpu *vcpu, u64 time_limit)
{ struct kvmhv_nestedv2_io *io; struct kvmppc_gs_buff *gsb; struct kvmppc_gs_msg *gsm; int rc;
int __kvmhv_nestedv2_mark_dirty_ptregs(struct kvm_vcpu *vcpu, struct pt_regs *regs)
{ for (int i = 0; i < 32; i++)
kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_GPR(i));
/** * kvmhv_nestedv2_vcpu_create() - create nested vcpu for the NESTEDv2 API * @vcpu: vcpu * @io: NESTEDv2 nested io state * * Parse the output buffer from H_GUEST_RUN_VCPU to update vcpu.
*/ int kvmhv_nestedv2_vcpu_create(struct kvm_vcpu *vcpu, struct kvmhv_nestedv2_io *io)
{ long rc;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.