if (dist->vgic_model != KVM_DEV_TYPE_ARM_VGIC_V3) returnfalse;
return dist->has_its;
}
bool vgic_supports_direct_msis(struct kvm *kvm)
{ /* * Deliberately conflate vLPI and vSGI support on GICv4.1 hardware, * indirectly allowing userspace to control whether or not vPEs are * allocated for the VM.
*/ if (system_supports_direct_sgis() && !vgic_supports_direct_sgis(kvm)) returnfalse;
/* * The Revision field in the IIDR have the following meanings: * * Revision 2: Interrupt groups are guest-configurable and signaled using * their configured groups.
*/
switch (addr & 0x0c) { case GICD_CTLR: if (vgic->enabled)
value |= GICD_CTLR_ENABLE_SS_G1;
value |= GICD_CTLR_ARE_NS | GICD_CTLR_DS; if (vgic->nassgireq)
value |= GICD_CTLR_nASSGIreq; break; case GICD_TYPER:
value = vgic->nr_spis + VGIC_NR_PRIVATE_IRQS;
value = (value >> 5) - 1; if (vgic_has_its(vcpu->kvm)) {
value |= (INTERRUPT_ID_BITS_ITS - 1) << 19;
value |= GICD_TYPER_LPIS;
} else {
value |= (INTERRUPT_ID_BITS_SPIS - 1) << 19;
} break; case GICD_TYPER2: if (vgic_supports_direct_sgis(vcpu->kvm))
value = GICD_TYPER2_nASSGIcap; break; case GICD_IIDR:
value = (PRODUCT_ID_KVM << GICD_IIDR_PRODUCT_ID_SHIFT) |
(vgic->implementation_rev << GICD_IIDR_REVISION_SHIFT) |
(IMPLEMENTER_ARM << GICD_IIDR_IMPLEMENTER_SHIFT); break; default: return 0;
}
mutex_unlock(&vcpu->kvm->arch.config_lock); break;
} case GICD_TYPER: case GICD_TYPER2: case GICD_IIDR: /* This is at best for documentation purposes... */ return;
}
}
if (reg == val) return 0; if (vgic_initialized(vcpu->kvm)) return -EBUSY; if ((reg ^ val) & ~GICD_TYPER2_nASSGIcap) return -EINVAL; if (!system_supports_direct_sgis() && val) return -EINVAL;
dist->nassgicap = val & GICD_TYPER2_nASSGIcap; return 0; case GICD_IIDR:
reg = vgic_mmio_read_v3_misc(vcpu, addr, len); if ((reg ^ val) & ~GICD_IIDR_REVISION_MASK) return -EINVAL;
reg = FIELD_GET(GICD_IIDR_REVISION_MASK, reg); switch (reg) { case KVM_VGIC_IMP_REV_2: case KVM_VGIC_IMP_REV_3:
dist->implementation_rev = reg; return 0; default: return -EINVAL;
} case GICD_CTLR: /* Not a GICv4.1? No HW SGIs */ if (!vgic_supports_direct_sgis(vcpu->kvm))
val &= ~GICD_CTLR_nASSGIreq;
dist->enabled = val & GICD_CTLR_ENABLE_SS_G1;
dist->nassgireq = val & GICD_CTLR_nASSGIreq; return 0;
}
/* The upper word is WI for us since we don't implement Aff3. */ if (addr & 4) return;
irq = vgic_get_irq(vcpu->kvm, intid);
if (!irq) return;
raw_spin_lock_irqsave(&irq->irq_lock, flags);
/* We only care about and preserve Aff0, Aff1 and Aff2. */
irq->mpidr = val & GENMASK(23, 0);
irq->target_vcpu = kvm_mpidr_to_vcpu(vcpu->kvm, irq->mpidr);
if (!(val & GICR_CTLR_ENABLE_LPIS)) { /* * Don't disable if RWP is set, as there already an * ongoing disable. Funky guest...
*/
ctlr = atomic_cmpxchg_acquire(&vgic_cpu->ctlr,
GICR_CTLR_ENABLE_LPIS,
GICR_CTLR_RWP); if (ctlr != GICR_CTLR_ENABLE_LPIS) return;
/* * the rdist is the last one of the redist region, * check whether there is no other contiguous rdist region
*/
list_for_each_entry(iter, rd_regions, list) { if (iter->base == end && iter->free_index > 0) returnfalse;
}
} returntrue;
}
/* We want to avoid outer shareable. */
u64 vgic_sanitise_shareability(u64 field)
{ switch (field) { case GIC_BASER_OuterShareable: return GIC_BASER_InnerShareable; default: return field;
}
}
/* Avoid any inner non-cacheable mapping. */
u64 vgic_sanitise_inner_cacheability(u64 field)
{ switch (field) { case GIC_BASER_CACHE_nCnB: case GIC_BASER_CACHE_nC: return GIC_BASER_CACHE_RaWb; default: return field;
}
}
/* Non-cacheable or same-as-inner are OK. */
u64 vgic_sanitise_outer_cacheability(u64 field)
{ switch (field) { case GIC_BASER_CACHE_SameAsInner: case GIC_BASER_CACHE_nC: return field; default: return GIC_BASER_CACHE_SameAsInner;
}
}
u64 vgic_sanitise_field(u64 reg, u64 field_mask, int field_shift,
u64 (*sanitise_fn)(u64))
{
u64 field = (reg & field_mask) >> field_shift;
/* * If the guest wrote only to the upper 32bit part of the * register, drop the write on the floor, as it is only for * vPEs (which we don't support for obvious reasons). * * Also discard the access if LPIs are not enabled.
*/ if ((addr & 4) || !vgic_lpis_enabled(vcpu)) return;
intid = lower_32_bits(val); if (intid < VGIC_MIN_LPI) return;
if (!IS_VGIC_ADDR_UNDEF(vgic_cpu->rd_iodev.base_addr)) goto out_unlock;
/* * We may be creating VCPUs before having set the base address for the * redistributor region, in which case we will come back to this * function for all VCPUs when the base address is set. Just return * without doing any work for now.
*/
rdreg = vgic_v3_rdist_free_slot(&vgic->rd_regions); if (!rdreg) goto out_unlock;
if (!vgic_v3_check_base(kvm)) {
ret = -EINVAL; goto out_unlock;
}
staticint vgic_register_all_redist_iodevs(struct kvm *kvm)
{ struct kvm_vcpu *vcpu; unsignedlong c; int ret = 0;
lockdep_assert_held(&kvm->slots_lock);
kvm_for_each_vcpu(c, vcpu, kvm) {
ret = vgic_register_redist_iodev(vcpu); if (ret) break;
}
if (ret) { /* The current c failed, so iterate over the previous ones. */ int i;
for (i = 0; i < c; i++) {
vcpu = kvm_get_vcpu(kvm, i);
vgic_unregister_redist_iodev(vcpu);
}
}
return ret;
}
/** * vgic_v3_alloc_redist_region - Allocate a new redistributor region * * Performs various checks before inserting the rdist region in the list. * Those tests depend on whether the size of the rdist region is known * (ie. count != 0). The list is sorted by rdist region index. * * @kvm: kvm handle * @index: redist region index * @base: base of the new rdist region * @count: number of redistributors the region is made of (0 in the old style * single region, whose size is induced from the number of vcpus) * * Return 0 on success, < 0 otherwise
*/ staticint vgic_v3_alloc_redist_region(struct kvm *kvm, uint32_t index,
gpa_t base, uint32_t count)
{ struct vgic_dist *d = &kvm->arch.vgic; struct vgic_redist_region *rdreg; struct list_head *rd_regions = &d->rd_regions; int nr_vcpus = atomic_read(&kvm->online_vcpus);
size_t size = count ? count * KVM_VGIC_V3_REDIST_SIZE
: nr_vcpus * KVM_VGIC_V3_REDIST_SIZE; int ret;
/* cross the end of memory ? */ if (base + size < base) return -EINVAL;
if (list_empty(rd_regions)) { if (index != 0) return -EINVAL;
} else {
rdreg = list_last_entry(rd_regions, struct vgic_redist_region, list);
/* Don't mix single region and discrete redist regions */ if (!count && rdreg->count) return -EINVAL;
if (!count) return -EEXIST;
if (index != rdreg->index + 1) return -EINVAL;
}
/* * For legacy single-region redistributor regions (!count), * check that the redistributor region does not overlap with the * distributor's address space.
*/ if (!count && !IS_VGIC_ADDR_UNDEF(d->vgic_dist_base) &&
vgic_dist_overlap(kvm, base, size)) return -EINVAL;
/* collision with any other rdist region? */ if (vgic_v3_rdist_overlap(kvm, base, size)) return -EINVAL;
rdreg = kzalloc(sizeof(*rdreg), GFP_KERNEL_ACCOUNT); if (!rdreg) return -ENOMEM;
rdreg->base = VGIC_ADDR_UNDEF;
ret = vgic_check_iorange(kvm, rdreg->base, base, SZ_64K, size); if (ret) goto free;
/* Garbage collect the region */
kvm_for_each_vcpu(c, vcpu, kvm) { if (vcpu->arch.vgic_cpu.rdreg == rdreg)
vcpu->arch.vgic_cpu.rdreg = NULL;
}
list_del(&rdreg->list);
kfree(rdreg);
}
int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count)
{ int ret;
mutex_lock(&kvm->arch.config_lock);
ret = vgic_v3_alloc_redist_region(kvm, index, addr, count);
mutex_unlock(&kvm->arch.config_lock); if (ret) return ret;
/* * Register iodevs for each existing VCPU. Adding more VCPUs * afterwards will register the iodevs when needed.
*/
ret = vgic_register_all_redist_iodevs(kvm); if (ret) { struct vgic_redist_region *rdreg;
/* We only support aligned 32-bit accesses. */ if (addr & 3) return -ENXIO;
region = vgic_get_mmio_region(vcpu, &iodev, addr, sizeof(u32)); if (!region) return -ENXIO;
return 0;
}
/* * The ICC_SGI* registers encode the affinity differently from the MPIDR, * so provide a wrapper to use the existing defines to isolate a certain * affinity level.
*/ #define SGI_AFFINITY_LEVEL(reg, level) \
((((reg) & ICC_SGI1R_AFFINITY_## level ##_MASK) \
>> ICC_SGI1R_AFFINITY_## level ##_SHIFT) << MPIDR_LEVEL_SHIFT(level))
/* * An access targeting Group0 SGIs can only generate * those, while an access targeting Group1 SGIs can * generate interrupts of either group.
*/ if (!irq->group || allow_group1) { if (!irq->hw) {
irq->pending_latch = true;
vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
} else { /* HW SGI? Ask the GIC to inject it */ int err;
err = irq_set_irqchip_state(irq->host_irq,
IRQCHIP_STATE_PENDING, true);
WARN_RATELIMIT(err, "IRQ %d", irq->host_irq);
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
}
} else {
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
}
vgic_put_irq(vcpu->kvm, irq);
}
/** * vgic_v3_dispatch_sgi - handle SGI requests from VCPUs * @vcpu: The VCPU requesting a SGI * @reg: The value written into ICC_{ASGI1,SGI0,SGI1}R by that VCPU * @allow_group1: Does the sysreg access allow generation of G1 SGIs * * With GICv3 (and ARE=1) CPUs trigger SGIs by writing to a system register. * This will trap in sys_regs.c and call this function. * This ICC_SGI1R_EL1 register contains the upper three affinity levels of the * target processors as well as a bitmask of 16 Aff0 CPUs. * * If the interrupt routing mode bit is not set, we iterate over the Aff0 * bits and signal the VCPUs matching the provided Aff{3,2,1}. * * If this bit is set, we signal all, but not the calling VCPU.
*/ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1)
{ struct kvm *kvm = vcpu->kvm; struct kvm_vcpu *c_vcpu; unsignedlong target_cpus;
u64 mpidr;
u32 sgi, aff0; unsignedlong c;
sgi = FIELD_GET(ICC_SGI1R_SGI_ID_MASK, reg);
/* Broadcast */ if (unlikely(reg & BIT_ULL(ICC_SGI1R_IRQ_ROUTING_MODE_BIT))) {
kvm_for_each_vcpu(c, c_vcpu, kvm) { /* Don't signal the calling VCPU */ if (c_vcpu == vcpu) continue;
vgic_v3_queue_sgi(c_vcpu, sgi, allow_group1);
}
return;
}
/* We iterate over affinities to find the corresponding vcpus */
mpidr = SGI_AFFINITY_LEVEL(reg, 3);
mpidr |= SGI_AFFINITY_LEVEL(reg, 2);
mpidr |= SGI_AFFINITY_LEVEL(reg, 1);
target_cpus = FIELD_GET(ICC_SGI1R_TARGET_LIST_MASK, reg);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.