/* * check if there is pending interrupt from * non-APIC source without intack.
*/ int kvm_cpu_has_extint(struct kvm_vcpu *v)
{ /* * FIXME: interrupt.injected represents an interrupt whose * side-effects have already been applied (e.g. bit from IRR * already moved to ISR). Therefore, it is incorrect to rely * on interrupt.injected to know if there is a pending * interrupt in the user-mode LAPIC. * This leads to nVMX/nSVM not be able to distinguish * if it should exit from L2 to L1 on EXTERNAL_INTERRUPT on * pending interrupt or should re-inject an injected * interrupt.
*/ if (!lapic_in_kernel(v)) return v->arch.interrupt.injected;
if (kvm_xen_has_interrupt(v)) return 1;
if (!kvm_apic_accept_pic_intr(v)) return 0;
#ifdef CONFIG_KVM_IOAPIC if (pic_in_kernel(v->kvm)) return v->kvm->arch.vpic->output; #endif
/* * check if there is injectable interrupt: * when virtual interrupt delivery enabled, * interrupt from apic will handled by hardware, * we don't need to check it here.
*/ int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v)
{ if (kvm_cpu_has_extint(v)) return 1;
if (!is_guest_mode(v) && kvm_vcpu_apicv_active(v)) return 0;
int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm, int irq_source_id, int level, bool line_status)
{ struct kvm_lapic_irq irq; int r;
int kvm_set_routing_entry(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e, conststruct kvm_irq_routing_entry *ue)
{ /* We can't check irqchip_in_kernel() here as some callers are * currently initializing the irqchip. Other callers should therefore * check kvm_arch_can_set_irq_routing() before calling this function.
*/ switch (ue->type) { #ifdef CONFIG_KVM_IOAPIC case KVM_IRQ_ROUTING_IRQCHIP: if (irqchip_split(kvm)) return -EINVAL;
e->irqchip.pin = ue->u.irqchip.pin; switch (ue->u.irqchip.irqchip) { case KVM_IRQCHIP_PIC_SLAVE:
e->irqchip.pin += PIC_NUM_PINS / 2;
fallthrough; case KVM_IRQCHIP_PIC_MASTER: if (ue->u.irqchip.pin >= PIC_NUM_PINS / 2) return -EINVAL;
e->set = kvm_pic_set_irq; break; case KVM_IRQCHIP_IOAPIC: if (ue->u.irqchip.pin >= KVM_IOAPIC_NUM_PINS) return -EINVAL;
e->set = kvm_ioapic_set_irq; break; default: return -EINVAL;
}
e->irqchip.irqchip = ue->u.irqchip.irqchip; break; #endif case KVM_IRQ_ROUTING_MSI:
e->set = kvm_set_msi;
e->msi.address_lo = ue->u.msi.address_lo;
e->msi.address_hi = ue->u.msi.address_hi;
e->msi.data = ue->u.msi.data;
if (kvm_msi_route_invalid(kvm, e)) return -EINVAL; break; #ifdef CONFIG_KVM_HYPERV case KVM_IRQ_ROUTING_HV_SINT:
e->set = kvm_hv_synic_set_irq;
e->hv_sint.vcpu = ue->u.hv_sint.vcpu;
e->hv_sint.sint = ue->u.hv_sint.sint; break; #endif #ifdef CONFIG_KVM_XEN case KVM_IRQ_ROUTING_XEN_EVTCHN: return kvm_xen_setup_evtchn(kvm, e, ue); #endif default: return -EINVAL;
}
return 0;
}
bool kvm_intr_is_single_vcpu(struct kvm *kvm, struct kvm_lapic_irq *irq, struct kvm_vcpu **dest_vcpu)
{ int r = 0; unsignedlong i; struct kvm_vcpu *vcpu;
if (kvm_intr_is_single_vcpu_fast(kvm, irq, dest_vcpu)) returntrue;
kvm_for_each_vcpu(i, vcpu, kvm) { if (!kvm_apic_present(vcpu)) continue;
if (!kvm_apic_match_dest(vcpu, NULL, irq->shorthand,
irq->dest_id, irq->dest_mode)) continue;
if (++r == 2) returnfalse;
*dest_vcpu = vcpu;
}
return r == 1;
}
EXPORT_SYMBOL_GPL(kvm_intr_is_single_vcpu);
void kvm_scan_ioapic_irq(struct kvm_vcpu *vcpu, u32 dest_id, u16 dest_mode,
u8 vector, unsignedlong *ioapic_handled_vectors)
{ /* * Intercept EOI if the vCPU is the target of the new IRQ routing, or * the vCPU has a pending IRQ from the old routing, i.e. if the vCPU * may receive a level-triggered IRQ in the future, or already received * level-triggered IRQ. The EOI needs to be intercepted and forwarded * to I/O APIC emulation so that the IRQ can be de-asserted.
*/ if (kvm_apic_match_dest(vcpu, NULL, APIC_DEST_NOSHORT, dest_id, dest_mode)) {
__set_bit(vector, ioapic_handled_vectors);
} elseif (kvm_apic_pending_eoi(vcpu, vector)) {
__set_bit(vector, ioapic_handled_vectors);
/* * Track the highest pending EOI for which the vCPU is NOT the * target in the new routing. Only the EOI for the IRQ that is * in-flight (for the old routing) needs to be intercepted, any * future IRQs that arrive on this vCPU will be coincidental to * the level-triggered routing and don't need to be intercepted.
*/ if ((int)vector > vcpu->arch.highest_stale_pending_ioapic_eoi)
vcpu->arch.highest_stale_pending_ioapic_eoi = vector;
}
}
if (WARN_ON_ONCE(!irqchip_in_kernel(kvm) || !kvm_arch_has_irq_bypass())) return -EINVAL;
if (entry && entry->type == KVM_IRQ_ROUTING_MSI) {
kvm_msi_to_lapic_irq(kvm, entry, &irq);
/* * Force remapped mode if hardware doesn't support posting the * virtual interrupt to a vCPU. Only IRQs are postable (NMIs, * SMIs, etc. are not), and neither AMD nor Intel IOMMUs support * posting multicast/broadcast IRQs. If the interrupt can't be * posted, the device MSI needs to be routed to the host so that * the guest's desired interrupt can be synthesized by KVM. * * This means that KVM can only post lowest-priority interrupts * if they have a single CPU as the destination, e.g. only if * the guest has affined the interrupt to a single vCPU.
*/ if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu) ||
!kvm_irq_is_postable(&irq))
vcpu = NULL;
}
if (!irqfd->irq_bypass_vcpu && !vcpu) return 0;
r = kvm_x86_call(pi_update_irte)(irqfd, irqfd->kvm, host_irq, irqfd->gsi,
vcpu, irq.vector); if (r) {
WARN_ON_ONCE(irqfd->irq_bypass_vcpu && !vcpu);
irqfd->irq_bypass_vcpu = NULL; return r;
}
if (!kvm->arch.nr_possible_bypass_irqs++)
kvm_x86_call(pi_start_bypass)(kvm);
if (irqfd->irq_entry.type == KVM_IRQ_ROUTING_MSI) {
ret = kvm_pi_update_irte(irqfd, &irqfd->irq_entry); if (ret)
kvm->arch.nr_possible_bypass_irqs--;
}
spin_unlock_irq(&kvm->irqfds.lock);
/* * If the producer of an IRQ that is currently being posted to a vCPU * is unregistered, change the associated IRTE back to remapped mode as * the IRQ has been released (or repurposed) by the device driver, i.e. * KVM must relinquish control of the IRTE.
*/
spin_lock_irq(&kvm->irqfds.lock);
if (irqfd->irq_entry.type == KVM_IRQ_ROUTING_MSI) {
ret = kvm_pi_update_irte(irqfd, NULL); if (ret)
pr_info("irq bypass consumer (eventfd %p) unregistration fails: %d\n",
irqfd->consumer.eventfd, ret);
}
irqfd->producer = NULL;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.