/* * OpenPIC emulation * * Copyright (c) 2004 Jocelyn Mayer * 2011 Alexander Graf * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE.
*/
struct irq_queue { /* Round up to the nearest 64 IRQs so that the queue length * won't change when moving between 32 and 64 bit hosts.
*/ unsignedlong queue[BITS_TO_LONGS((MAX_IRQ + 63) & ~63)]; int next; int priority;
};
struct irq_source {
uint32_t ivpr; /* IRQ vector/priority register */
uint32_t idr; /* IRQ destination register */
uint32_t destmask; /* bitmap of CPU destinations */ int last_cpu; int output; /* IRQ level, e.g. ILR_INTTGT_INT */ int pending; /* TRUE if IRQ is pending */ enum irq_type type; bool level:1; /* level-triggered */ bool nomask:1; /* critical interrupts ignore mask on some FSL MPICs */
};
staticvoid IRQ_local_pipe(struct openpic *opp, int n_CPU, int n_IRQ, bool active, bool was_active)
{ struct irq_dest *dst; struct irq_source *src; int priority;
dst = &opp->dst[n_CPU];
src = &opp->src[n_IRQ];
pr_debug("%s: IRQ %d active %d was %d\n",
__func__, n_IRQ, active, was_active);
if (src->output != ILR_INTTGT_INT) {
pr_debug("%s: output %d irq %d active %d was %d count %d\n",
__func__, src->output, n_IRQ, active, was_active,
dst->outputs_active[src->output]);
/* On Freescale MPIC, critical interrupts ignore priority, * IACK, EOI, etc. Before MPIC v4.1 they also ignore * masking.
*/ if (active) { if (!was_active &&
dst->outputs_active[src->output]++ == 0) {
pr_debug("%s: Raise OpenPIC output %d cpu %d irq %d\n",
__func__, src->output, n_CPU, n_IRQ);
mpic_irq_raise(opp, dst, src->output);
}
} else { if (was_active &&
--dst->outputs_active[src->output] == 0) {
pr_debug("%s: Lower OpenPIC output %d cpu %d irq %d\n",
__func__, src->output, n_CPU, n_IRQ);
mpic_irq_lower(opp, dst, src->output);
}
}
return;
}
priority = IVPR_PRIORITY(src->ivpr);
/* Even if the interrupt doesn't have enough priority, * it is still raised, in case ctpr is lowered later.
*/ if (active)
IRQ_setbit(&dst->raised, n_IRQ); else
IRQ_resetbit(&dst->raised, n_IRQ);
IRQ_check(opp, &dst->raised);
if (active && priority <= dst->ctpr) {
pr_debug("%s: IRQ %d priority %d too low for ctpr %d on CPU %d\n",
__func__, n_IRQ, priority, dst->ctpr, n_CPU);
active = 0;
}
if (active) { if (IRQ_get_next(opp, &dst->servicing) >= 0 &&
priority <= dst->servicing.priority) {
pr_debug("%s: IRQ %d is hidden by servicing IRQ %d on CPU %d\n",
__func__, n_IRQ, dst->servicing.next, n_CPU);
} else {
pr_debug("%s: Raise OpenPIC INT output cpu %d irq %d/%d\n",
__func__, n_CPU, n_IRQ, dst->raised.next);
mpic_irq_raise(opp, dst, ILR_INTTGT_INT);
}
} else {
IRQ_get_next(opp, &dst->servicing); if (dst->raised.priority > dst->ctpr &&
dst->raised.priority > dst->servicing.priority) {
pr_debug("%s: IRQ %d inactive, IRQ %d prio %d above %d/%d, CPU %d\n",
__func__, n_IRQ, dst->raised.next,
dst->raised.priority, dst->ctpr,
dst->servicing.priority, n_CPU); /* IRQ line stays asserted */
} else {
pr_debug("%s: IRQ %d inactive, current prio %d/%d, CPU %d\n",
__func__, n_IRQ, dst->ctpr,
dst->servicing.priority, n_CPU);
mpic_irq_lower(opp, dst, ILR_INTTGT_INT);
}
}
}
/* update pic state because registers for n_IRQ have changed value */ staticvoid openpic_update_irq(struct openpic *opp, int n_IRQ)
{ struct irq_source *src; bool active, was_active; int i;
src = &opp->src[n_IRQ];
active = src->pending;
if ((src->ivpr & IVPR_MASK_MASK) && !src->nomask) { /* Interrupt source is disabled */
pr_debug("%s: IRQ %d is disabled\n", __func__, n_IRQ);
active = false;
}
was_active = !!(src->ivpr & IVPR_ACTIVITY_MASK);
/* * We don't have a similar check for already-active because * ctpr may have changed and we need to withdraw the interrupt.
*/ if (!active && !was_active) {
pr_debug("%s: IRQ %d is already inactive\n", __func__, n_IRQ); return;
}
if (active)
src->ivpr |= IVPR_ACTIVITY_MASK; else
src->ivpr &= ~IVPR_ACTIVITY_MASK;
if (src->destmask == 0) { /* No target */
pr_debug("%s: IRQ %d has no target\n", __func__, n_IRQ); return;
}
if (src->destmask == (1 << src->last_cpu)) { /* Only one CPU is allowed to receive this IRQ */
IRQ_local_pipe(opp, src->last_cpu, n_IRQ, active, was_active);
} elseif (!(src->ivpr & IVPR_MODE_MASK)) { /* Directed delivery mode */ for (i = 0; i < opp->nb_cpus; i++) { if (src->destmask & (1 << i)) {
IRQ_local_pipe(opp, i, n_IRQ, active,
was_active);
}
}
} else { /* Distributed delivery mode */ for (i = src->last_cpu + 1; i != src->last_cpu; i++) { if (i == opp->nb_cpus)
i = 0;
if (src->destmask & (1 << i)) {
IRQ_local_pipe(opp, i, n_IRQ, active,
was_active);
src->last_cpu = i; break;
}
}
}
}
staticvoid openpic_set_irq(void *opaque, int n_IRQ, int level)
{ struct openpic *opp = opaque; struct irq_source *src;
if (n_IRQ >= MAX_IRQ) {
WARN_ONCE(1, "%s: IRQ %d out of range\n", __func__, n_IRQ); return;
}
if (src->output != ILR_INTTGT_INT) { /* Edge-triggered interrupts shouldn't be used * with non-INT delivery, but just in case, * try to make it do something sane rather than * cause an interrupt storm. This is close to * what you'd probably see happen in real hardware.
*/
src->pending = 0;
openpic_update_irq(opp, n_IRQ);
}
}
}
staticvoid openpic_reset(struct openpic *opp)
{ int i;
/* NOTE when implementing newer FSL MPIC models: starting with v4.0, * the polarity bit is read-only on internal interrupts.
*/
mask = IVPR_MASK_MASK | IVPR_PRIORITY_MASK | IVPR_SENSE_MASK |
IVPR_POLARITY_MASK | opp->vector_mask;
/* ACTIVITY bit is read-only */
opp->src[n_IRQ].ivpr =
(opp->src[n_IRQ].ivpr & IVPR_ACTIVITY_MASK) | (val & mask);
/* For FSL internal interrupts, The sense bit is reserved and zero, * and the interrupt is always level-triggered. Timers and IPIs * have no sense or polarity bits, and are edge-triggered.
*/ switch (opp->src[n_IRQ].type) { case IRQ_TYPE_NORMAL:
opp->src[n_IRQ].level =
!!(opp->src[n_IRQ].ivpr & IVPR_SENSE_MASK); break;
case IRQ_TYPE_FSLINT:
opp->src[n_IRQ].ivpr &= ~IVPR_SENSE_MASK; break;
case IRQ_TYPE_FSLSPECIAL:
opp->src[n_IRQ].ivpr &= ~(IVPR_POLARITY_MASK | IVPR_SENSE_MASK); break;
}
switch (addr) { case 0x00: /* Block Revision Register1 (BRR1) is Readonly */ break; case 0x40: case 0x50: case 0x60: case 0x70: case 0x80: case 0x90: case 0xA0: case 0xB0:
err = openpic_cpu_write_internal(opp, addr, val,
get_current_cpu()); break; case 0x1000: /* FRR */ break; case 0x1020: /* GCR */
openpic_gcr_write(opp, val); break; case 0x1080: /* VIR */ break; case 0x1090: /* PIR */ /* * This register is used to reset a CPU core -- * let userspace handle it.
*/
err = -ENXIO; break; case 0x10A0: /* IPI_IVPR */ case 0x10B0: case 0x10C0: case 0x10D0: { int idx;
idx = (addr - 0x10A0) >> 4;
write_IRQreg_ivpr(opp, opp->irq_ipi0 + idx, val); break;
} case 0x10E0: /* SPVE */
opp->spve = val & opp->vector_mask; break; default: break;
}
switch (addr) { case 0x00: case 0x10: case 0x20: case 0x30: case 0x40: case 0x50: case 0x60: case 0x70: /* MSIRs */
r = opp->msi[srs].msir; /* Clear on read */
opp->msi[srs].msir = 0;
openpic_set_irq(opp, opp->irq_msi + srs, 0); break; case 0x120: /* MSISR */ for (i = 0; i < MAX_MSI; i++)
r |= (opp->msi[i].msir ? 1 : 0) << i; break;
}
dst = &opp->dst[idx];
addr &= 0xFF0; switch (addr) { case 0x40: /* IPIDR */ case 0x50: case 0x60: case 0x70:
idx = (addr - 0x40) >> 4; /* we use IDE as mask which CPUs to deliver the IPI to still. */
opp->src[opp->irq_ipi0 + idx].destmask |= val;
openpic_set_irq(opp, opp->irq_ipi0 + idx, 1);
openpic_set_irq(opp, opp->irq_ipi0 + idx, 0); break; case 0x80: /* CTPR */
dst->ctpr = val & 0x0000000F;
pr_debug("%s: set CPU %d ctpr to %d, raised %d servicing %d\n",
__func__, idx, dst->ctpr, dst->raised.priority,
dst->servicing.priority);
if (dst->raised.priority <= dst->ctpr) {
pr_debug("%s: Lower OpenPIC INT output cpu %d due to ctpr\n",
__func__, idx);
mpic_irq_lower(opp, dst, ILR_INTTGT_INT);
} elseif (dst->raised.priority > dst->servicing.priority) {
pr_debug("%s: Raise OpenPIC INT output cpu %d irq %d\n",
__func__, idx, dst->raised.next);
mpic_irq_raise(opp, dst, ILR_INTTGT_INT);
}
break; case 0x90: /* WHOAMI */ /* Read-only register */ break; case 0xA0: /* IACK */ /* Read-only register */ break; case 0xB0: { /* EOI */ int notify_eoi;
if ((irq >= opp->irq_ipi0) && (irq < (opp->irq_ipi0 + MAX_IPI))) {
src->destmask &= ~(1 << cpu); if (src->destmask && !src->level) { /* trigger on CPUs that didn't know about it yet */
openpic_set_irq(opp, irq, 1);
openpic_set_irq(opp, irq, 0); /* if all CPUs knew about it, set active bit again */
src->ivpr |= IVPR_ACTIVITY_MASK;
}
}
return retval;
}
void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu)
{ struct openpic *opp = vcpu->arch.mpic; int cpu = vcpu->arch.irq_cpu_id; unsignedlong flags;
spin_lock_irqsave(&opp->lock, flags);
if ((opp->gcr & opp->mpic_mode_mask) == GCR_MODE_PROXY)
kvmppc_set_epr(vcpu, openpic_iack(opp, &opp->dst[cpu], cpu));
if (addr & (len - 1)) {
pr_debug("%s: bad alignment %llx/%d\n",
__func__, addr, len); return -EINVAL;
}
spin_lock_irq(&opp->lock);
ret = kvm_mpic_read_internal(opp, addr - opp->reg_base, &u.val);
spin_unlock_irq(&opp->lock);
/* * Technically only 32-bit accesses are allowed, but be nice to * people dumping registers a byte at a time -- it works in real * hardware (reads only, not writes).
*/ if (len == 4) {
*(u32 *)ptr = u.val;
pr_debug("%s: addr %llx ret %d len 4 val %x\n",
__func__, addr, ret, u.val);
} elseif (len == 1) {
*(u8 *)ptr = u.bytes[addr & 3];
pr_debug("%s: addr %llx ret %d len 1 val %x\n",
__func__, addr, ret, u.bytes[addr & 3]);
} else {
pr_debug("%s: bad length %d\n", __func__, len); return -EINVAL;
}
/* This might need to be changed if GCR gets extended */ if (opp->mpic_mode_mask == GCR_MODE_PROXY)
vcpu->arch.epr_flags |= KVMPPC_EPR_KERNEL;
out:
spin_unlock_irq(&opp->lock); return ret;
}
/* * This should only happen immediately before the mpic is destroyed, * so we shouldn't need to worry about anything still trying to * access the vcpu pointer.
*/ void kvmppc_mpic_disconnect_vcpu(struct openpic *opp, struct kvm_vcpu *vcpu)
{
BUG_ON(!opp->dst[vcpu->arch.irq_cpu_id].vcpu);
opp->dst[vcpu->arch.irq_cpu_id].vcpu = NULL;
}
/* * Return value: * < 0 Interrupt was ignored (masked or not delivered for other reasons) * = 0 Interrupt was coalesced (previous irq is still pending) * > 0 Number of CPUs interrupt was delivered to
*/ staticint mpic_set_irq(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm, int irq_source_id, int level, bool line_status)
{
u32 irq = e->irqchip.pin; struct openpic *opp = kvm->arch.mpic; unsignedlong flags;
/* All code paths we care about don't check for the return value */ return 0;
}
int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm, int irq_source_id, int level, bool line_status)
{ struct openpic *opp = kvm->arch.mpic; unsignedlong flags;
spin_lock_irqsave(&opp->lock, flags);
/* * XXX We ignore the target address for now, as we only support * a single MSI bank.
*/
openpic_msi_write(kvm->arch.mpic, MSIIR_OFFSET, e->msi.data);
spin_unlock_irqrestore(&opp->lock, flags);
/* All code paths we care about don't check for the return value */ return 0;
}
int kvm_set_routing_entry(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e, conststruct kvm_irq_routing_entry *ue)
{ int r = -EINVAL;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.