/** * The preemption should be disabled here because it races with * kvm_sched_out/kvm_sched_in(called from preempt notifiers) which * also calls vcpu_load/put.
*/
get_cpu();
loaded = (vcpu->cpu != -1); if (loaded)
kvm_arch_vcpu_put(vcpu);
/* Reset the guest CSRs for hotplug usecase */ if (loaded)
kvm_arch_vcpu_load(vcpu, smp_processor_id());
put_cpu();
}
int kvm_arch_vcpu_precreate(struct kvm *kvm, unsignedint id)
{ return 0;
}
int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
{ int rc;
spin_lock_init(&vcpu->arch.mp_state_lock);
/* Mark this VCPU never ran */
vcpu->arch.ran_atleast_once = false;
vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
bitmap_zero(vcpu->arch.isa, RISCV_ISA_EXT_MAX);
/* Setup ISA features available to VCPU */
kvm_riscv_vcpu_setup_isa(vcpu);
void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
{ /** * vcpu with id 0 is the designated boot cpu. * Keep all vcpus with non-zero id in power-off state so that * they can be brought up using SBI HSM extension.
*/ if (vcpu->vcpu_idx != 0)
kvm_riscv_vcpu_power_off(vcpu);
}
/* Read current HVIP and VSIE CSRs */
csr->vsie = ncsr_read(CSR_VSIE);
/* Sync-up HVIP.VSSIP bit changes does by Guest */
hvip = ncsr_read(CSR_HVIP); if ((csr->hvip ^ hvip) & (1UL << IRQ_VS_SOFT)) { if (hvip & (1UL << IRQ_VS_SOFT)) { if (!test_and_set_bit(IRQ_VS_SOFT,
v->irqs_pending_mask))
set_bit(IRQ_VS_SOFT, v->irqs_pending);
} else { if (!test_and_set_bit(IRQ_VS_SOFT,
v->irqs_pending_mask))
clear_bit(IRQ_VS_SOFT, v->irqs_pending);
}
}
/* Sync up the HVIP.LCOFIP bit changes (only clear) by the guest */ if ((csr->hvip ^ hvip) & (1UL << IRQ_PMU_OVF)) { if (!(hvip & (1UL << IRQ_PMU_OVF)) &&
!test_and_set_bit(IRQ_PMU_OVF, v->irqs_pending_mask))
clear_bit(IRQ_PMU_OVF, v->irqs_pending);
}
/* Sync-up AIA high interrupts */
kvm_riscv_vcpu_aia_sync_interrupts(vcpu);
int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsignedint irq)
{ /* * We only allow VS-mode software, timer, and external * interrupts when irq is one of the local interrupts * defined by RISC-V privilege specification.
*/ if (irq < IRQ_LOCAL_MAX &&
irq != IRQ_VS_SOFT &&
irq != IRQ_VS_TIMER &&
irq != IRQ_VS_EXT &&
irq != IRQ_PMU_OVF) return -EINVAL;
int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsignedint irq)
{ /* * We only allow VS-mode software, timer, counter overflow and external * interrupts when irq is one of the local interrupts * defined by RISC-V privilege specification.
*/ if (irq < IRQ_LOCAL_MAX &&
irq != IRQ_VS_SOFT &&
irq != IRQ_VS_TIMER &&
irq != IRQ_VS_EXT &&
irq != IRQ_PMU_OVF) return -EINVAL;
int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state)
{ int ret = 0;
spin_lock(&vcpu->arch.mp_state_lock);
switch (mp_state->mp_state) { case KVM_MP_STATE_RUNNABLE:
WRITE_ONCE(vcpu->arch.mp_state, *mp_state); break; case KVM_MP_STATE_STOPPED:
__kvm_riscv_vcpu_power_off(vcpu); break; case KVM_MP_STATE_INIT_RECEIVED: if (vcpu->kvm->arch.mp_state_reset)
kvm_riscv_reset_vcpu(vcpu, false); else
ret = -EINVAL; break; default:
ret = -EINVAL;
}
/** * kvm_riscv_check_vcpu_requests - check and handle pending vCPU requests * @vcpu: the VCPU pointer * * Return: 1 if we should enter the guest * 0 if we should exit to userspace
*/ staticint kvm_riscv_check_vcpu_requests(struct kvm_vcpu *vcpu)
{ struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu);
if (kvm_request_pending(vcpu)) { if (kvm_check_request(KVM_REQ_SLEEP, vcpu)) {
kvm_vcpu_srcu_read_unlock(vcpu);
rcuwait_wait_event(wait,
(!kvm_riscv_vcpu_stopped(vcpu)) && (!vcpu->arch.pause),
TASK_INTERRUPTIBLE);
kvm_vcpu_srcu_read_lock(vcpu);
if (kvm_riscv_vcpu_stopped(vcpu) || vcpu->arch.pause) { /* * Awaken to handle a signal, request to * sleep again later.
*/
kvm_make_request(KVM_REQ_SLEEP, vcpu);
}
}
if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
kvm_riscv_reset_vcpu(vcpu, true);
if (kvm_check_request(KVM_REQ_UPDATE_HGATP, vcpu))
kvm_riscv_mmu_update_hgatp(vcpu);
if (kvm_check_request(KVM_REQ_FENCE_I, vcpu))
kvm_riscv_fence_i_process(vcpu);
if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
kvm_riscv_tlb_flush_process(vcpu);
if (kvm_check_request(KVM_REQ_HFENCE_VVMA_ALL, vcpu))
kvm_riscv_hfence_vvma_all_process(vcpu);
if (kvm_check_request(KVM_REQ_HFENCE, vcpu))
kvm_riscv_hfence_process(vcpu);
if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
kvm_riscv_vcpu_record_steal_time(vcpu);
if (kvm_dirty_ring_check_request(vcpu)) return 0;
}
/* * Actually run the vCPU, entering an RCU extended quiescent state (EQS) while * the vCPU is running. * * This must be noinstr as instrumentation may make use of RCU, and this is not * safe during the EQS.
*/ staticvoid noinstr kvm_riscv_vcpu_enter_exit(struct kvm_vcpu *vcpu, struct kvm_cpu_trap *trap)
{ void *nsh; struct kvm_cpu_context *gcntx = &vcpu->arch.guest_context; struct kvm_cpu_context *hcntx = &vcpu->arch.host_context;
/* * We save trap CSRs (such as SEPC, SCAUSE, STVAL, HTVAL, and * HTINST) here because we do local_irq_enable() after this * function in kvm_arch_vcpu_ioctl_run() which can result in * an interrupt immediately after local_irq_enable() and can * potentially change trap CSRs.
*/
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
{ int ret; struct kvm_cpu_trap trap; struct kvm_run *run = vcpu->run;
if (!vcpu->arch.ran_atleast_once)
kvm_riscv_vcpu_setup_config(vcpu);
/* Mark this VCPU ran at least once */
vcpu->arch.ran_atleast_once = true;
kvm_vcpu_srcu_read_lock(vcpu);
switch (run->exit_reason) { case KVM_EXIT_MMIO: /* Process MMIO value returned from user-space */
ret = kvm_riscv_vcpu_mmio_return(vcpu, vcpu->run); break; case KVM_EXIT_RISCV_SBI: /* Process SBI value returned from user-space */
ret = kvm_riscv_vcpu_sbi_return(vcpu, vcpu->run); break; case KVM_EXIT_RISCV_CSR: /* Process CSR value returned from user-space */
ret = kvm_riscv_vcpu_csr_return(vcpu, vcpu->run); break; default:
ret = 0; break;
} if (ret) {
kvm_vcpu_srcu_read_unlock(vcpu); return ret;
}
if (!vcpu->wants_to_run) {
kvm_vcpu_srcu_read_unlock(vcpu); return -EINTR;
}
vcpu_load(vcpu);
kvm_sigset_activate(vcpu);
ret = 1;
run->exit_reason = KVM_EXIT_UNKNOWN; while (ret > 0) { /* Check conditions before entering the guest */
ret = xfer_to_guest_mode_handle_work(vcpu); if (ret) continue;
ret = 1;
kvm_riscv_gstage_vmid_update(vcpu);
ret = kvm_riscv_check_vcpu_requests(vcpu); if (ret <= 0) continue;
preempt_disable();
/* Update AIA HW state before entering guest */
ret = kvm_riscv_vcpu_aia_update(vcpu); if (ret <= 0) {
preempt_enable(); continue;
}
local_irq_disable();
/* * Ensure we set mode to IN_GUEST_MODE after we disable * interrupts and before the final VCPU requests check. * See the comment in kvm_vcpu_exiting_guest_mode() and * Documentation/virt/kvm/vcpu-requests.rst
*/
vcpu->mode = IN_GUEST_MODE;
/* * Sanitize VMID mappings cached (TLB) on current CPU * * Note: This should be done after G-stage VMID has been * updated using kvm_riscv_gstage_vmid_ver_changed()
*/
kvm_riscv_gstage_vmid_sanitize(vcpu);
/* Syncup interrupts state with HW */
kvm_riscv_vcpu_sync_interrupts(vcpu);
/* * We must ensure that any pending interrupts are taken before * we exit guest timing so that timer ticks are accounted as * guest time. Transiently unmask interrupts so that any * pending interrupts are taken. * * There's no barrier which ensures that pending interrupts are * recognised, so we just hope that the CPU takes any pending * interrupts between the enable and disable.
*/
local_irq_enable();
local_irq_disable();
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.