/* * Hardcoded ID_BITS limit for systems supporting only a 1-level IST * table. Systems supporting only a 1-level IST table aren't expected * to require more than 2^12 LPIs. Tweak as required.
*/ #define LPI_ID_BITS_LINEAR 12
/* * The polling wait (in gicv5_wait_for_op_s_atomic()) on a GIC register * provides the memory barriers (through MMIO accessors) * required to synchronize CPU and GIC access to IST memory.
*/ staticint gicv5_irs_ist_synchronise(struct gicv5_irs_chip_data *irs_data)
{ return gicv5_wait_for_op_atomic(irs_data->irs_base, GICV5_IRS_IST_STATUSR,
GICV5_IRS_IST_STATUSR_IDLE, NULL);
}
/* Taken from GICv5 specifications 10.2.1.13 IRS_IST_BASER */
n = max(5, lpi_id_bits + 1 + istsz);
l2istsz = BIT(n + 1); /* * Check memory requirements. For a linear IST we cap the * number of ID bits to a value that should never exceed * kmalloc interface memory allocation limits, so this * check is really belt and braces.
*/ if (l2istsz > KMALLOC_MAX_SIZE) {
u8 lpi_id_cap = ilog2(KMALLOC_MAX_SIZE) - 2 + istsz;
pr_warn("Limiting LPI ID bits from %u to %u\n",
lpi_id_bits, lpi_id_cap);
lpi_id_bits = lpi_id_cap;
l2istsz = KMALLOC_MAX_SIZE;
}
ist = kzalloc(l2istsz, GFP_KERNEL); if (!ist) return -ENOMEM;
if (irs_data->flags & IRS_FLAGS_NON_COHERENT)
dcache_clean_inval_poc((unsignedlong)ist,
(unsignedlong)ist + l2istsz); else
dsb(ishst);
ret = gicv5_irs_ist_synchronise(irs_data); if (ret) {
l1ist[index] = 0;
kfree(l2ist); return ret;
}
kmemleak_ignore(l2ist);
/* * Make sure we invalidate the cache line pulled before the IRS * had a chance to update the L1 entry and mark it valid.
*/ if (irs_data->flags & IRS_FLAGS_NON_COHERENT) { /* * gicv5_irs_ist_synchronise() includes memory * barriers (MMIO accessors) required to guarantee that the * following dcache invalidation is not executed before the * IST mapping operation has completed.
*/
dcache_inval_poc((unsignedlong)(l1ist + index),
(unsignedlong)(l1ist + index) + sizeof(*l1ist));
}
return 0;
}
/* * Try to match the L2 IST size to the pagesize, and if this is not possible * pick the smallest supported L2 size in order to minimise the requirement for * physically contiguous blocks of memory as page-sized allocations are * guaranteed to be physically contiguous, and are by definition the easiest to * find. * * Fall back to the smallest supported size (in the event that the pagesize * itself is not supported) again serves to make it easier to find physically * contiguous blocks of memory.
*/ staticunsignedint gicv5_irs_l2_sz(u32 idr2)
{ switch (PAGE_SIZE) { case SZ_64K: if (GICV5_IRS_IST_L2SZ_SUPPORT_64KB(idr2)) return GICV5_IRS_IST_CFGR_L2SZ_64K;
fallthrough; case SZ_4K: if (GICV5_IRS_IST_L2SZ_SUPPORT_4KB(idr2)) return GICV5_IRS_IST_CFGR_L2SZ_4K;
fallthrough; case SZ_16K: if (GICV5_IRS_IST_L2SZ_SUPPORT_16KB(idr2)) return GICV5_IRS_IST_CFGR_L2SZ_16K; break;
}
if (GICV5_IRS_IST_L2SZ_SUPPORT_4KB(idr2)) return GICV5_IRS_IST_CFGR_L2SZ_4K;
/* * For two level tables we are always supporting the maximum allowed * number of IDs. * * For 1-level tables, we should support a number of bits that * is >= min_lpi_id_bits but cap it to LPI_ID_BITS_LINEAR lest * the level 1-table gets too large and its memory allocation * may fail.
*/ if (two_levels) {
lpi_id_bits = idr2_id_bits;
} else {
lpi_id_bits = max(LPI_ID_BITS_LINEAR, idr2_min_lpi_id_bits);
lpi_id_bits = min(lpi_id_bits, idr2_id_bits);
}
/* * Cap the ID bits according to the CPUIF supported ID bits
*/
lpi_id_bits = min(lpi_id_bits, gicv5_global_data.cpuif_id_bits);
if (two_levels)
l2sz = gicv5_irs_l2_sz(idr2);
istmd = !!FIELD_GET(GICV5_IRS_IDR2_ISTMD, idr2);
l2_iste_sz = GICV5_IRS_IST_CFGR_ISTSZ_4;
if (istmd) {
l2_iste_sz_split = FIELD_GET(GICV5_IRS_IDR2_ISTMD_SZ, idr2);
int gicv5_irs_cpu_to_iaffid(int cpuid, u16 *iaffid)
{ if (!per_cpu(cpu_iaffid, cpuid).valid) {
pr_err("IAFFID for CPU %d has not been initialised\n", cpuid); return -ENODEV;
}
int gicv5_spi_irq_set_type(struct irq_data *d, unsignedint type)
{ struct gicv5_irs_chip_data *irs_data = d->chip_data;
u32 selr, cfgr; bool level; int ret;
/* * There is no distinction between HIGH/LOW for level IRQs * and RISING/FALLING for edge IRQs in the architecture, * hence consider them equivalent.
*/ switch (type) { case IRQ_TYPE_EDGE_RISING: case IRQ_TYPE_EDGE_FALLING:
level = false; break; case IRQ_TYPE_LEVEL_HIGH: case IRQ_TYPE_LEVEL_LOW:
level = true; break; default: return -EINVAL;
}
guard(raw_spinlock)(&irs_data->spi_config_lock);
selr = FIELD_PREP(GICV5_IRS_SPI_SELR_ID, d->hwirq);
irs_writel_relaxed(irs_data, selr, GICV5_IRS_SPI_SELR);
ret = gicv5_irs_wait_for_spi_op(irs_data); if (ret) return ret;
ret = gicv5_irs_of_init_affinity(node, irs_data, iaffid_bits); if (ret) {
pr_err("Failed to parse CPU IAFFIDs from the device tree!\n"); goto out_iomem;
}
idr = irs_readl_relaxed(irs_data, GICV5_IRS_IDR2); if (WARN(!FIELD_GET(GICV5_IRS_IDR2_LPI, idr), "LPI support not available - no IPIs, can't proceed\n")) {
ret = -ENODEV; goto out_iomem;
}
if (irs_data->spi_range) {
pr_info("%s detected SPI range [%u-%u]\n",
of_node_full_name(node),
irs_data->spi_min,
irs_data->spi_min +
irs_data->spi_range - 1);
}
/* * Do the global setting only on the first IRS. * Global properties (iaffid_bits, global spi count) are guaranteed to * be consistent across IRSes by the architecture.
*/ if (list_empty(&irs_nodes)) {
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.