// SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2012 Michael Ellerman, IBM Corporation. * Copyright 2012 Benjamin Herrenschmidt, IBM Corporation.
*/
/* * LOCKING * ======= * * Each ICS has a spin lock protecting the information about the IRQ * sources and avoiding simultaneous deliveries of the same interrupt. * * ICP operations are done via a single compare & swap transaction * (most ICP state fits in the union kvmppc_icp_state)
*/
/* * TODO * ==== * * - To speed up resends, keep a bitmap of "resend" set bits in the * ICS * * - Speed up server# -> ICP lookup (array ? hash table ?) * * - Make ICS lockless as well, or at least a per-interrupt lock or hashed * locks array to improve scalability
*/
/* * Return value ideally indicates how the interrupt was handled, but no * callers look at it (given that we don't implement KVM_IRQ_LINE_STATUS), * so just return 0.
*/ staticint ics_deliver_irq(struct kvmppc_xics *xics, u32 irq, u32 level)
{ struct ics_irq_state *state; struct kvmppc_ics *ics;
u16 src;
u32 pq_old, pq_new;
ics = kvmppc_xics_find_ics(xics, irq, &src); if (!ics) {
XICS_DBG("ics_deliver_irq: IRQ 0x%06x not found !\n", irq); return -EINVAL;
}
state = &ics->irq_state[src]; if (!state->exists) return -EINVAL;
if (level == KVM_INTERRUPT_SET_LEVEL || level == KVM_INTERRUPT_SET)
level = 1; elseif (level == KVM_INTERRUPT_UNSET)
level = 0; /* * Take other values the same as 1, consistent with original code. * maybe WARN here?
*/
if (!state->lsi && level == 0) /* noop for MSI */ return 0;
do {
pq_old = state->pq_state; if (state->lsi) { if (level) { if (pq_old & PQ_PRESENTED) /* Setting already set LSI ... */ return 0;
XICS_DBG("UPD [%04lx] - C:%02x M:%02x PP: %02x PI:%06x R:%d O:%d\n",
icp->server_num,
old.cppr, old.mfrr, old.pending_pri, old.xisr,
old.need_resend, old.out_ee);
XICS_DBG("UPD - C:%02x M:%02x PP: %02x PI:%06x R:%d O:%d\n", new.cppr, new.mfrr, new.pending_pri, new.xisr, new.need_resend, new.out_ee); /* * Check for output state update * * Note that this is racy since another processor could be updating * the state already. This is why we never clear the interrupt output * here, we only ever set it. The clear only happens prior to doing * an update and only by the processor itself. Currently we do it * in Accept (H_XIRR) and Up_Cppr (H_XPPR). * * We also do not try to figure out whether the EE state has changed, * we unconditionally set it if the new state calls for it. The reason * for that is that we opportunistically remove the pending interrupt * flag when raising CPPR, so we need to set it back here if an * interrupt is still pending.
*/ if (new.out_ee) {
kvmppc_book3s_queue_irqprio(icp->vcpu,
BOOK3S_INTERRUPT_EXTERNAL); if (!change_self)
kvmppc_fast_vcpu_kick(icp->vcpu);
}
bail: return success;
}
/* Order this load with the test for need_resend in the caller */
smp_rmb();
for_each_set_bit(icsid, icp->resend_map, xics->max_icsid + 1) { struct kvmppc_ics *ics = xics->ics[icsid];
if (!test_and_clear_bit(icsid, icp->resend_map)) continue; if (!ics) continue;
ics_check_resend(xics, ics, icp);
}
}
XICS_DBG("try deliver %#x(P:%#x) to server %#lx\n", irq, priority,
icp->server_num);
do {
old_state = new_state = READ_ONCE(icp->state);
*reject = 0;
/* See if we can deliver */
success = new_state.cppr > priority &&
new_state.mfrr > priority &&
new_state.pending_pri > priority;
/* * If we can, check for a rejection and perform the * delivery
*/ if (success) {
*reject = new_state.xisr;
new_state.xisr = irq;
new_state.pending_pri = priority;
} else { /* * If we failed to deliver we set need_resend * so a subsequent CPPR state change causes us * to try a new delivery.
*/
new_state.need_resend = true;
}
} while (!icp_try_update(icp, old_state, new_state, false));
/* * This is used both for initial delivery of an interrupt and * for subsequent rejection. * * Rejection can be racy vs. resends. We have evaluated the * rejection in an atomic ICP transaction which is now complete, * so potentially the ICP can already accept the interrupt again. * * So we need to retry the delivery. Essentially the reject path * boils down to a failed delivery. Always. * * Now the interrupt could also have moved to a different target, * thus we may need to re-do the ICP lookup as well
*/
again: /* Get the ICS state and lock it */
ics = kvmppc_xics_find_ics(xics, new_irq, &src); if (!ics) {
XICS_DBG("icp_deliver_irq: IRQ 0x%06x not found !\n", new_irq); return;
}
state = &ics->irq_state[src];
/* Get a lock on the ICS */
local_irq_save(flags);
arch_spin_lock(&ics->lock);
/* Get our server */ if (!icp || state->server != icp->server_num) {
icp = kvmppc_xics_find_server(xics->kvm, state->server); if (!icp) {
pr_warn("icp_deliver_irq: IRQ 0x%06x server 0x%x not found !\n",
new_irq, state->server); goto out;
}
}
if (check_resend) if (!state->resend) goto out;
/* Clear the resend bit of that interrupt */
state->resend = 0;
/* * If masked, bail out * * Note: PAPR doesn't mention anything about masked pending * when doing a resend, only when doing a delivery. * * However that would have the effect of losing a masked * interrupt that was rejected and isn't consistent with * the whole masked_pending business which is about not * losing interrupts that occur while masked. * * I don't differentiate normal deliveries and resends, this * implementation will differ from PAPR and not lose such * interrupts.
*/ if (state->priority == MASKED) {
XICS_DBG("irq %#x masked pending\n", new_irq);
state->masked_pending = 1; goto out;
}
/* * Try the delivery, this will set the need_resend flag * in the ICP as part of the atomic transaction if the * delivery is not possible. * * Note that if successful, the new delivery might have itself * rejected an interrupt that was "delivered" before we took the * ics spin lock. * * In this case we do the whole sequence all over again for the * new guy. We cannot assume that the rejected interrupt is less * favored than the new one, and thus doesn't need to be delivered, * because by the time we exit icp_try_to_deliver() the target * processor may well have already consumed & completed it, and thus * the rejected interrupt might actually be already acceptable.
*/ if (icp_try_to_deliver(icp, new_irq, state->priority, &reject)) { /* * Delivery was successful, did we reject somebody else ?
*/ if (reject && reject != XICS_IPI) {
arch_spin_unlock(&ics->lock);
local_irq_restore(flags);
new_irq = reject;
check_resend = false; goto again;
}
} else { /* * We failed to deliver the interrupt we need to set the * resend map bit and mark the ICS state as needing a resend
*/
state->resend = 1;
/* * Make sure when checking resend, we don't miss the resend * if resend_map bit is seen and cleared.
*/
smp_wmb();
set_bit(ics->icsid, icp->resend_map);
/* * If the need_resend flag got cleared in the ICP some time * between icp_try_to_deliver() atomic update and now, then * we know it might have missed the resend_map bit. So we * retry
*/
smp_mb(); if (!icp->state.need_resend) {
state->resend = 0;
arch_spin_unlock(&ics->lock);
local_irq_restore(flags);
check_resend = false; goto again;
}
}
out:
arch_spin_unlock(&ics->lock);
local_irq_restore(flags);
}
/* * This handles several related states in one operation: * * ICP State: Down_CPPR * * Load CPPR with new value and if the XISR is 0 * then check for resends: * * ICP State: Resend * * If MFRR is more favored than CPPR, check for IPIs * and notify ICS of a potential resend. This is done * asynchronously (when used in real mode, we will have * to exit here). * * We do not handle the complete Check_IPI as documented * here. In the PAPR, this state will be used for both * Set_MFRR and Down_CPPR. However, we know that we aren't * changing the MFRR state here so we don't need to handle * the case of an MFRR causing a reject of a pending irq, * this will have been handled when the MFRR was set in the * first place. * * Thus we don't have to handle rejects, only resends. * * When implementing real mode for HV KVM, resend will lead to * a H_TOO_HARD return and the whole transaction will be handled * in virtual mode.
*/ do {
old_state = new_state = READ_ONCE(icp->state);
/* Down_CPPR */
new_state.cppr = new_cppr;
/* * Cut down Resend / Check_IPI / IPI * * The logic is that we cannot have a pending interrupt * trumped by an IPI at this point (see above), so we * know that either the pending interrupt is already an * IPI (in which case we don't care to override it) or * it's either more favored than us or non existent
*/ if (new_state.mfrr < new_cppr &&
new_state.mfrr <= new_state.pending_pri) {
WARN_ON(new_state.xisr != XICS_IPI &&
new_state.xisr != 0);
new_state.pending_pri = new_state.mfrr;
new_state.xisr = XICS_IPI;
}
} while (!icp_try_update(icp, old_state, new_state, true));
/* * Now handle resend checks. Those are asynchronous to the ICP * state update in HW (ie bus transactions) so we can handle them * separately here too
*/ if (resend)
icp_check_resend(xics, icp);
}
/* First, remove EE from the processor */
kvmppc_book3s_dequeue_irqprio(icp->vcpu, BOOK3S_INTERRUPT_EXTERNAL);
/* * ICP State: Accept_Interrupt * * Return the pending interrupt (if any) along with the * current CPPR, then clear the XISR & set CPPR to the * pending priority
*/ do {
old_state = new_state = READ_ONCE(icp->state);
XICS_DBG("h_ipi vcpu %d to server %lu mfrr %#lx\n",
vcpu->vcpu_id, server, mfrr);
icp = vcpu->arch.icp;
local = icp->server_num == server; if (!local) {
icp = kvmppc_xics_find_server(vcpu->kvm, server); if (!icp) return H_PARAMETER;
}
/* * ICP state: Set_MFRR * * If the CPPR is more favored than the new MFRR, then * nothing needs to be rejected as there can be no XISR to * reject. If the MFRR is being made less favored then * there might be a previously-rejected interrupt needing * to be resent. * * ICP state: Check_IPI * * If the CPPR is less favored, then we might be replacing * an interrupt, and thus need to possibly reject it. * * ICP State: IPI * * Besides rejecting any pending interrupts, we also * update XISR and pending_pri to mark IPI as pending. * * PAPR does not describe this state, but if the MFRR is being * made less favored than its earlier value, there might be * a previously-rejected interrupt needing to be resent. * Ideally, we would want to resend only if * prio(pending_interrupt) < mfrr && * prio(pending_interrupt) < cppr * where pending interrupt is the one that was rejected. But * we don't have that state, so we simply trigger a resend * whenever the MFRR is made less favored.
*/ do {
old_state = new_state = READ_ONCE(icp->state);
/* Set_MFRR */
new_state.mfrr = mfrr;
/* Check_IPI */
reject = 0;
resend = false; if (mfrr < new_state.cppr) { /* Reject a pending interrupt if not an IPI */ if (mfrr <= new_state.pending_pri) {
reject = new_state.xisr;
new_state.pending_pri = mfrr;
new_state.xisr = XICS_IPI;
}
}
if (mfrr > old_state.mfrr) {
resend = new_state.need_resend;
new_state.need_resend = 0;
}
} while (!icp_try_update(icp, old_state, new_state, local));
/* * ICP State: Set_CPPR * * We can safely compare the new value with the current * value outside of the transaction as the CPPR is only * ever changed by the processor on itself
*/ if (cppr > icp->state.cppr)
icp_down_cppr(xics, icp, cppr); elseif (cppr == icp->state.cppr) return;
/* * ICP State: Up_CPPR * * The processor is raising its priority, this can result * in a rejection of a pending interrupt: * * ICP State: Reject_Current * * We can remove EE from the current processor, the update * transaction will set it again if needed
*/
kvmppc_book3s_dequeue_irqprio(icp->vcpu, BOOK3S_INTERRUPT_EXTERNAL);
do {
old_state = new_state = READ_ONCE(icp->state);
} while (!icp_try_update(icp, old_state, new_state, true));
/* * Check for rejects. They are handled by doing a new delivery * attempt (see comments in icp_deliver_irq).
*/ if (reject && reject != XICS_IPI)
icp_deliver_irq(xics, icp, reject, false);
}
/* * ICS EOI handling: For LSI, if P bit is still set, we need to * resend it. * * For MSI, we move Q bit into P (and clear Q). If it is set, * resend it.
*/
ics = kvmppc_xics_find_ics(xics, irq, &src); if (!ics) {
XICS_DBG("ios_eoi: IRQ 0x%06x not found !\n", irq); return H_PARAMETER;
}
state = &ics->irq_state[src];
if (state->lsi)
pq_new = state->pq_state; else do {
pq_old = state->pq_state;
pq_new = pq_old >> 1;
} while (cmpxchg(&state->pq_state, pq_old, pq_new) != pq_old);
if (pq_new & PQ_PRESENTED)
icp_deliver_irq(xics, icp, irq, false);
/* * ICP State: EOI * * Note: If EOI is incorrectly used by SW to lower the CPPR * value (ie more favored), we do not check for rejection of * a pending interrupt, this is a SW error and PAPR specifies * that we don't have to deal with it. * * The sending of an EOI to the ICS is handled after the * CPPR update * * ICP State: Down_CPPR which we handle * in a separate function as it's shared with H_CPPR.
*/
icp_down_cppr(xics, icp, xirr >> 24);
/* IPIs have no EOI */ if (irq == XICS_IPI) return H_SUCCESS;
/* * Deassert the CPU interrupt request. * icp_try_update will reassert it if necessary.
*/
kvmppc_book3s_dequeue_irqprio(icp->vcpu, BOOK3S_INTERRUPT_EXTERNAL);
/* * Note that if we displace an interrupt from old_state.xisr, * we don't mark it as rejected. We expect userspace to set * the state of the interrupt sources to be consistent with * the ICP states (either before or afterwards, which doesn't * matter). We do handle resends due to CPPR becoming less * favoured because that is necessary to end up with a * consistent state in the situation where userspace restores * the ICS states before the ICP states.
*/ do {
old_state = READ_ONCE(icp->state);
ics = kvmppc_xics_find_ics(xics, irq, &idx); if (!ics) return -ENOENT;
irqp = &ics->irq_state[idx];
local_irq_save(flags);
arch_spin_lock(&ics->lock);
ret = -ENOENT; if (irqp->exists) {
val = irqp->server;
prio = irqp->priority; if (prio == MASKED) {
val |= KVM_XICS_MASKED;
prio = irqp->saved_priority;
}
val |= prio << KVM_XICS_PRIORITY_SHIFT; if (irqp->lsi) {
val |= KVM_XICS_LEVEL_SENSITIVE; if (irqp->pq_state & PQ_PRESENTED)
val |= KVM_XICS_PENDING;
} elseif (irqp->masked_pending || irqp->resend)
val |= KVM_XICS_PENDING;
if (irqp->pq_state & PQ_PRESENTED)
val |= KVM_XICS_PRESENTED;
if (irqp->pq_state & PQ_QUEUED)
val |= KVM_XICS_QUEUED;
ret = 0;
}
arch_spin_unlock(&ics->lock);
local_irq_restore(flags);
if (irq < KVMPPC_XICS_FIRST_IRQ || irq >= KVMPPC_XICS_NR_IRQS) return -ENOENT;
ics = kvmppc_xics_find_ics(xics, irq, &idx); if (!ics) {
ics = kvmppc_xics_create_ics(xics->kvm, xics, irq); if (!ics) return -ENOMEM;
}
irqp = &ics->irq_state[idx]; if (get_user(val, ubufp)) return -EFAULT;
server = val & KVM_XICS_DESTINATION_MASK;
prio = val >> KVM_XICS_PRIORITY_SHIFT; if (prio != MASKED &&
kvmppc_xics_find_server(xics->kvm, server) == NULL) return -EINVAL;
local_irq_save(flags);
arch_spin_lock(&ics->lock);
irqp->server = server;
irqp->saved_priority = prio; if (val & KVM_XICS_MASKED)
prio = MASKED;
irqp->priority = prio;
irqp->resend = 0;
irqp->masked_pending = 0;
irqp->lsi = 0;
irqp->pq_state = 0; if (val & KVM_XICS_LEVEL_SENSITIVE)
irqp->lsi = 1; /* If PENDING, set P in case P is not saved because of old code */ if (val & KVM_XICS_PRESENTED || val & KVM_XICS_PENDING)
irqp->pq_state |= PQ_PRESENTED; if (val & KVM_XICS_QUEUED)
irqp->pq_state |= PQ_QUEUED;
irqp->exists = 1;
arch_spin_unlock(&ics->lock);
local_irq_restore(flags);
if (val & KVM_XICS_PENDING)
icp_deliver_irq(xics, NULL, irqp->number, false);
return 0;
}
int kvmppc_xics_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, bool line_status)
{ struct kvmppc_xics *xics = kvm->arch.xics;
if (!xics) return -ENODEV; return ics_deliver_irq(xics, irq, level);
}
/* * Called when device fd is closed. kvm->lock is held.
*/ staticvoid kvmppc_xics_release(struct kvm_device *dev)
{ struct kvmppc_xics *xics = dev->private; unsignedlong i; struct kvm *kvm = xics->kvm; struct kvm_vcpu *vcpu;
pr_devel("Releasing xics device\n");
/* * Since this is the device release function, we know that * userspace does not have any open fd referring to the * device. Therefore there can not be any of the device * attribute set/get functions being executed concurrently, * and similarly, the connect_vcpu and set/clr_mapped * functions also cannot be being executed.
*/
debugfs_remove(xics->dentry);
/* * We should clean up the vCPU interrupt presenters first.
*/
kvm_for_each_vcpu(i, vcpu, kvm) { /* * Take vcpu->mutex to ensure that no one_reg get/set ioctl * (i.e. kvmppc_xics_[gs]et_icp) can be done concurrently. * Holding the vcpu->mutex also means that execution is * excluded for the vcpu until the ICP was freed. When the vcpu * can execute again, vcpu->arch.icp and vcpu->arch.irq_type * have been cleared and the vcpu will not be going into the * XICS code anymore.
*/
mutex_lock(&vcpu->mutex);
kvmppc_xics_free_icp(vcpu);
mutex_unlock(&vcpu->mutex);
}
if (kvm)
kvm->arch.xics = NULL;
for (i = 0; i <= xics->max_icsid; i++) {
kfree(xics->ics[i]);
xics->ics[i] = NULL;
} /* * A reference of the kvmppc_xics pointer is now kept under * the xics_device pointer of the machine for reuse. It is * freed when the VM is destroyed for now until we fix all the * execution paths.
*/
kfree(dev);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.