// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved. * Author: Marc Zyngier <marc.zyngier@arm.com>
*/
/* * There are 16 SGIs, though we only actually use 8 in Linux. The other 8 SGIs * are potentially stolen by the secure side. Some code, especially code dealing * with hwirq IDs, is simplified by accounting for all 16.
*/ #define SGI_NR 16
/* * The behaviours of RPR and PMR registers differ depending on the value of * SCR_EL3.FIQ, and the behaviour of non-secure priority registers of the * distributor and redistributors depends on whether security is enabled in the * GIC. * * When security is enabled, non-secure priority values from the (re)distributor * are presented to the GIC CPUIF as follow: * (GIC_(R)DIST_PRI[irq] >> 1) | 0x80; * * If SCR_EL3.FIQ == 1, the values written to/read from PMR and RPR at non-secure * EL1 are subject to a similar operation thus matching the priorities presented * from the (re)distributor when security is enabled. When SCR_EL3.FIQ == 0, * these values are unchanged by the GIC. * * see GICv3/GICv4 Architecture Specification (IHI0069D): * - section 4.8.1 Non-secure accesses to register fields for Secure interrupt * priorities. * - Figure 4-7 Secure read of the priority field for a Non-secure Group 1 * interrupt.
*/ static DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis);
/* * Let's find out if Group0 is under control of EL3 or not by * setting the highest possible, non-zero priority in PMR. * * If SCR_EL3.FIQ is set, the priority gets shifted down in * order for the CPU interface to set bit 7, and keep the * actual priority in the non-secure range. In the process, it * looses the least significant bit and the actual priority * becomes 0x80. Reading it back returns 0, indicating that * we're don't have access to Group0.
*/
gic_write_pmr(BIT(8 - gic_get_pribits()));
val = gic_read_pmr();
/* * How priority values are used by the GIC depends on two things: * the security state of the GIC (controlled by the GICD_CTLR.DS bit) * and if Group 0 interrupts can be delivered to Linux in the non-secure * world as FIQs (controlled by the SCR_EL3.FIQ bit). These affect the * way priorities are presented in ICC_PMR_EL1 and in the distributor: * * GICD_CTLR.DS | SCR_EL3.FIQ | ICC_PMR_EL1 | Distributor * ------------------------------------------------------- * 1 | - | unchanged | unchanged * ------------------------------------------------------- * 0 | 1 | non-secure | non-secure * ------------------------------------------------------- * 0 | 0 | unchanged | non-secure * * In the non-secure view reads and writes are modified: * * - A value written is right-shifted by one and the MSB is set, * forcing the priority into the non-secure range. * * - A value read is left-shifted by one. * * In the first two cases, where ICC_PMR_EL1 and the interrupt priority * are both either modified or unchanged, we can use the same set of * priorities. * * In the last case, where only the interrupt priorities are modified to * be in the non-secure range, we program the non-secure values into * the distributor to match the PMR values we want.
*/ if (cpus_have_group0 && !cpus_have_security_disabled) {
dist_prio_irq = __gicv3_prio_to_ns(dist_prio_irq);
dist_prio_nmi = __gicv3_prio_to_ns(dist_prio_nmi);
}
/* * For the erratum T241-FABRIC-4, read accesses to GICD_In{E} * registers are directed to the chip that owns the SPI. The * the alias region can also be used for writes to the * GICD_In{E} except GICD_ICENABLERn. Each chip has support * for 320 {E}SPIs. Mappings for all 4 chips: * Chip0 = 32-351 * Chip1 = 352-671 * Chip2 = 672-991 * Chip3 = 4096-4415
*/ switch (__get_intid_range(hwirq)) { case SPI_RANGE:
chip = (hwirq - 32) / 320; break; case ESPI_RANGE:
chip = 3; break; default:
unreachable();
} return t241_dist_base_alias[chip];
}
return gic_data.dist_base;
}
staticinlinevoid __iomem *gic_dist_base(struct irq_data *d)
{ switch (get_intid_range(d)) { case SGI_RANGE: case PPI_RANGE: case EPPI_RANGE: /* SGI+PPI -> SGI_base for this CPU */ return gic_data_rdist_sgi_base();
case SPI_RANGE: case ESPI_RANGE: /* SPI -> dist_base */ return gic_data.dist_base;
ret = readl_relaxed_poll_timeout_atomic(base + GICD_CTLR, val, !(val & bit),
1, USEC_PER_SEC); if (ret == -ETIMEDOUT)
pr_err_ratelimited("RWP timeout, gone fishing\n");
}
/* Wait for completion of a distributor change */ staticvoid gic_dist_wait_for_rwp(void)
{
gic_do_wait_for_rwp(gic_data.dist_base, GICD_CTLR_RWP);
}
/* Wait for completion of a redistributor change */ staticvoid gic_redist_wait_for_rwp(void)
{
gic_do_wait_for_rwp(gic_data_rdist_rd_base(), GICR_CTLR_RWP);
}
if (gic_data.flags & FLAGS_WORKAROUND_GICR_WAKER_MSM8996) return;
rbase = gic_data_rdist_rd_base();
val = readl_relaxed(rbase + GICR_WAKER); if (enable) /* Wake up this CPU redistributor */
val &= ~GICR_WAKER_ProcessorSleep; else
val |= GICR_WAKER_ProcessorSleep;
writel_relaxed(val, rbase + GICR_WAKER);
if (!enable) { /* Check that GICR_WAKER is writeable */
val = readl_relaxed(rbase + GICR_WAKER); if (!(val & GICR_WAKER_ProcessorSleep)) return; /* No PM support in this redistributor */
}
ret = readl_relaxed_poll_timeout_atomic(rbase + GICR_WAKER, val,
enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep),
1, USEC_PER_SEC); if (ret == -ETIMEDOUT) {
pr_err_ratelimited("redistributor failed to %s...\n",
enable ? "wakeup" : "sleep");
}
}
/* * Routines to disable, enable, EOI and route interrupts
*/ static u32 convert_offset_index(struct irq_data *d, u32 offset, u32 *index)
{ switch (get_intid_range(d)) { case SGI_RANGE: case PPI_RANGE: case SPI_RANGE:
*index = d->hwirq; return offset; case EPPI_RANGE: /* * Contrary to the ESPI range, the EPPI range is contiguous * to the PPI range in the registers, so let's adjust the * displacement accordingly. Consistency is overrated.
*/
*index = d->hwirq - EPPI_BASE_INTID + 32; return offset; case ESPI_RANGE:
*index = d->hwirq - ESPI_BASE_INTID; switch (offset) { case GICD_ISENABLER: return GICD_ISENABLERnE; case GICD_ICENABLER: return GICD_ICENABLERnE; case GICD_ISPENDR: return GICD_ISPENDRnE; case GICD_ICPENDR: return GICD_ICPENDRnE; case GICD_ISACTIVER: return GICD_ISACTIVERnE; case GICD_ICACTIVER: return GICD_ICACTIVERnE; case GICD_IPRIORITYR: return GICD_IPRIORITYRnE; case GICD_ICFGR: return GICD_ICFGRnE; case GICD_IROUTER: return GICD_IROUTERnE; default: break;
} break; default: break;
}
staticvoid gic_eoimode1_mask_irq(struct irq_data *d)
{
gic_mask_irq(d); /* * When masking a forwarded interrupt, make sure it is * deactivated as well. * * This ensures that an interrupt that is getting * disabled/masked will not get "stuck", because there is * noone to deactivate it (guest is being terminated).
*/ if (irqd_is_forwarded_to_vcpu(d))
gic_poke_irq(d, GICD_ICACTIVER);
}
if (d->hwirq >= 8192) /* SGI/PPI/SPI only */ return -EINVAL;
switch (which) { case IRQCHIP_STATE_PENDING:
reg = val ? GICD_ISPENDR : GICD_ICPENDR; break;
case IRQCHIP_STATE_ACTIVE:
reg = val ? GICD_ISACTIVER : GICD_ICACTIVER; break;
case IRQCHIP_STATE_MASKED: if (val) {
gic_mask_irq(d); return 0;
}
reg = GICD_ISENABLER; break;
default: return -EINVAL;
}
gic_poke_irq(d, reg);
/* * Force read-back to guarantee that the active state has taken * effect, and won't race with a guest-driven deactivation.
*/ if (reg == GICD_ISACTIVER)
gic_peek_irq(d, reg); return 0;
}
staticint gic_irq_get_irqchip_state(struct irq_data *d, enum irqchip_irq_state which, bool *val)
{ if (d->hwirq >= 8192) /* PPI/SPI only */ return -EINVAL;
switch (which) { case IRQCHIP_STATE_PENDING:
*val = gic_peek_irq(d, GICD_ISPENDR); break;
case IRQCHIP_STATE_ACTIVE:
*val = gic_peek_irq(d, GICD_ISACTIVER); break;
case IRQCHIP_STATE_MASKED:
*val = !gic_peek_irq(d, GICD_ISENABLER); break;
if (gic_peek_irq(d, GICD_ISENABLER)) {
pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq); return -EINVAL;
}
/* * A secondary irq_chip should be in charge of LPI request, * it should not be possible to get there
*/ if (WARN_ON(irqd_to_hwirq(d) >= 8192)) return -EINVAL;
/* desc lock should already be held */ if (gic_irq_in_rdist(d)) {
u32 idx = gic_get_rdist_index(d);
/* * Setting up a percpu interrupt as NMI, only switch handler * for first NMI
*/ if (!refcount_inc_not_zero(&rdist_nmi_refs[idx])) {
refcount_set(&rdist_nmi_refs[idx], 1);
desc->handle_irq = handle_percpu_devid_fasteoi_nmi;
}
} else {
desc->handle_irq = handle_fasteoi_nmi;
}
if (gic_peek_irq(d, GICD_ISENABLER)) {
pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq); return;
}
/* * A secondary irq_chip should be in charge of LPI request, * it should not be possible to get there
*/ if (WARN_ON(irqd_to_hwirq(d) >= 8192)) return;
/* desc lock should already be held */ if (gic_irq_in_rdist(d)) {
u32 idx = gic_get_rdist_index(d);
/* Tearing down NMI, only switch handler for last NMI */ if (refcount_dec_and_test(&rdist_nmi_refs[idx]))
desc->handle_irq = handle_percpu_devid_irq;
} else {
desc->handle_irq = handle_fasteoi_irq;
}
if (!static_branch_unlikely(&gic_arm64_2941627_erratum)) returnfalse;
range = get_intid_range(d);
/* * The workaround is needed if the IRQ is an SPI and * the target cpu is different from the one we are * executing on.
*/ return (range == SPI_RANGE || range == ESPI_RANGE) &&
!cpumask_test_cpu(raw_smp_processor_id(),
irq_data_get_effective_affinity_mask(d));
}
if (gic_arm64_erratum_2941627_needed(d)) { /* * Make sure the GIC stream deactivate packet * issued by ICC_EOIR1_EL1 has completed before * deactivating through GICD_IACTIVER.
*/
dsb(sy);
gic_poke_irq(d, GICD_ICACTIVER);
}
}
staticvoid gic_eoimode1_eoi_irq(struct irq_data *d)
{ /* * No need to deactivate an LPI, or an interrupt that * is is getting forwarded to a vcpu.
*/ if (irqd_to_hwirq(d) >= 8192 || irqd_is_forwarded_to_vcpu(d)) return;
if (!gic_arm64_erratum_2941627_needed(d))
gic_write_dir(irqd_to_hwirq(d)); else
gic_poke_irq(d, GICD_ICACTIVER);
}
/* Interrupt configuration for SGIs can't be changed */ if (range == SGI_RANGE) return type != IRQ_TYPE_EDGE_RISING ? -EINVAL : 0;
/* SPIs have restrictions on the supported types */ if ((range == SPI_RANGE || range == ESPI_RANGE) &&
type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING) return -EINVAL;
if (gic_irq_in_rdist(d))
base = gic_data_rdist_sgi_base(); else
base = gic_dist_base_alias(d);
ret = gic_configure_irq(index, type, base + offset); if (ret && (range == PPI_RANGE || range == EPPI_RANGE)) { /* Misconfigured PPIs are usually not fatal */
pr_warn("GIC: PPI INTID%ld is secure or misconfigured\n", irq);
ret = 0;
}
staticvoid gic_deactivate_unhandled(u32 irqnr)
{ if (static_branch_likely(&supports_deactivate_key)) { if (irqnr < 8192)
gic_write_dir(irqnr);
} else {
write_gicreg(irqnr, ICC_EOIR1_EL1);
isb();
}
}
/* * Follow a read of the IAR with any HW maintenance that needs to happen prior * to invoking the relevant IRQ handler. We must do two things: * * (1) Ensure instruction ordering between a read of IAR and subsequent * instructions in the IRQ handler using an ISB. * * It is possible for the IAR to report an IRQ which was signalled *after* * the CPU took an IRQ exception as multiple interrupts can race to be * recognized by the GIC, earlier interrupts could be withdrawn, and/or * later interrupts could be prioritized by the GIC. * * For devices which are tightly coupled to the CPU, such as PMUs, a * context synchronization event is necessary to ensure that system * register state is not stale, as these may have been indirectly written * *after* exception entry. * * (2) Execute an interrupt priority drop when EOI mode 1 is in use.
*/ staticinlinevoid gic_complete_ack(u32 irqnr)
{ if (static_branch_likely(&supports_deactivate_key))
write_gicreg(irqnr, ICC_EOIR1_EL1);
isb();
}
staticbool gic_rpr_is_nmi_prio(void)
{ if (!gic_supports_nmi()) returnfalse;
/* * An exception has been taken from a context with IRQs enabled, and this could * be an IRQ or an NMI. * * The entry code called us with DAIF.IF set to keep NMIs masked. We must clear * DAIF.IF (and update ICC_PMR_EL1 to mask regular IRQs) prior to returning, * after handling any NMI but before handling any IRQ. * * The entry code has performed IRQ entry, and if an NMI is detected we must * perform NMI entry/exit around invoking the handler.
*/ staticvoid __gic_handle_irq_from_irqson(struct pt_regs *regs)
{ bool is_nmi;
u32 irqnr;
irqnr = gic_read_iar();
is_nmi = gic_rpr_is_nmi_prio();
if (is_nmi) {
nmi_enter();
__gic_handle_nmi(irqnr, regs);
nmi_exit();
}
if (gic_prio_masking_enabled()) {
gic_pmr_mask_irqs();
gic_arch_enable_irqs();
}
if (!is_nmi)
__gic_handle_irq(irqnr, regs);
}
/* * An exception has been taken from a context with IRQs disabled, which can only * be an NMI. * * The entry code called us with DAIF.IF set to keep NMIs masked. We must leave * DAIF.IF (and ICC_PMR_EL1) unchanged. * * The entry code has performed NMI entry.
*/ staticvoid __gic_handle_irq_from_irqsoff(struct pt_regs *regs)
{
u64 pmr;
u32 irqnr;
/* * We were in a context with IRQs disabled. However, the * entry code has set PMR to a value that allows any * interrupt to be acknowledged, and not just NMIs. This can * lead to surprising effects if the NMI has been retired in * the meantime, and that there is an IRQ pending. The IRQ * would then be taken in NMI context, something that nobody * wants to debug twice. * * Until we sort this, drop PMR again to a level that will * actually only allow NMIs before reading IAR, and then * restore it to what it was.
*/
pmr = gic_read_pmr();
gic_pmr_mask_irqs();
isb();
irqnr = gic_read_iar();
gic_write_pmr(pmr);
/* Disable the distributor */
writel_relaxed(0, base + GICD_CTLR);
gic_dist_wait_for_rwp();
/* * Configure SPIs as non-secure Group-1. This will only matter * if the GIC only has a single security state. This will not * do the right thing if the kernel is running in secure mode, * but that's not the intended use case anyway.
*/ for (i = 32; i < GIC_LINE_NR; i += 32)
writel_relaxed(~0, base + GICD_IGROUPR + i / 8);
/* Extended SPI range, not handled by the GICv2/GICv3 common code */ for (i = 0; i < GIC_ESPI_NR; i += 32) {
writel_relaxed(~0U, base + GICD_ICENABLERnE + i / 8);
writel_relaxed(~0U, base + GICD_ICACTIVERnE + i / 8);
}
for (i = 0; i < GIC_ESPI_NR; i += 32)
writel_relaxed(~0U, base + GICD_IGROUPRnE + i / 8);
for (i = 0; i < GIC_ESPI_NR; i += 16)
writel_relaxed(0, base + GICD_ICFGRnE + i / 4);
for (i = 0; i < GIC_ESPI_NR; i += 4)
writel_relaxed(REPEAT_BYTE_U32(dist_prio_irq),
base + GICD_IPRIORITYRnE + i);
/* Now do the common stuff */
gic_dist_config(base, GIC_LINE_NR, dist_prio_irq);
val = GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1; if (gic_data.rdists.gicd_typer2 & GICD_TYPER2_nASSGIcap) {
pr_info("Enabling SGIs without active state\n");
val |= GICD_CTLR_nASSGIreq;
}
/* Enable distributor with ARE, Group1, and wait for it to drain */
writel_relaxed(val, base + GICD_CTLR);
gic_dist_wait_for_rwp();
/* * Set all global interrupts to the boot CPU only. ARE must be * enabled.
*/
affinity = gic_cpu_to_affinity(smp_processor_id()); for (i = 32; i < GIC_LINE_NR; i++)
gic_write_irouter(affinity, base + GICD_IROUTER + i * 8);
for (i = 0; i < GIC_ESPI_NR; i++)
gic_write_irouter(affinity, base + GICD_IROUTERnE + i * 8);
}
staticint gic_iterate_rdists(int (*fn)(struct redist_region *, void __iomem *))
{ int ret = -ENODEV; int i;
for (i = 0; i < gic_data.nr_redist_regions; i++) { void __iomem *ptr = gic_data.redist_regions[i].redist_base;
u64 typer;
u32 reg;
pr_info("CPU%d: found redistributor %lx region %d:%pa\n",
smp_processor_id(), mpidr,
(int)(region - gic_data.redist_regions),
&gic_data_rdist()->phys_base); return 0;
}
/* Try next one */ return 1;
}
staticint gic_populate_rdist(void)
{ if (gic_iterate_rdists(__gic_populate_rdist) == 0) return 0;
/* We couldn't even deal with ourselves... */
WARN(true, "CPU%d: mpidr %lx has no re-distributor!\n",
smp_processor_id(),
(unsignedlong)cpu_logical_map(smp_processor_id())); return -ENODEV;
}
/* Deactivate any present vPE */
val = gicr_read_vpendbaser(ptr + SZ_128K + GICR_VPENDBASER); if (val & GICR_VPENDBASER_Valid)
gicr_write_vpendbaser(GICR_VPENDBASER_PendingLast,
ptr + SZ_128K + GICR_VPENDBASER);
/* Mark the VPE table as invalid */
val = gicr_read_vpropbaser(ptr + SZ_128K + GICR_VPROPBASER);
val &= ~GICR_VPROPBASER_4_1_VALID;
gicr_write_vpropbaser(val, ptr + SZ_128K + GICR_VPROPBASER);
}
/* * TYPER.RVPEID implies some form of DirectLPI, no matter what the * doc says... :-/ And CTLR.IR implies another subset of DirectLPI * that the ITS driver can make use of for LPIs (and not VLPIs). * * These are 3 different ways to express the same thing, depending * on the revision of the architecture and its relaxations over * time. Just group them under the 'direct_lpi' banner.
*/
gic_data.rdists.has_rvpeid &= !!(typer & GICR_TYPER_RVPEID);
gic_data.rdists.has_direct_lpi &= (!!(typer & GICR_TYPER_DirectLPIS) |
!!(ctlr & GICR_CTLR_IR) |
gic_data.rdists.has_rvpeid);
gic_data.rdists.has_vpend_valid_dirty &= !!(typer & GICR_TYPER_DIRTY);
staticvoid gic_cpu_sys_reg_enable(void)
{ /* * Need to check that the SRE bit has actually been set. If * not, it means that SRE is disabled at EL2. We're going to * die painfully, and there is nothing we can do about it. * * Kindly inform the luser.
*/ if (!gic_enable_sre())
pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n");
}
staticvoid gic_cpu_sys_reg_init(void)
{ int i, cpu = smp_processor_id();
u64 mpidr = gic_cpu_to_affinity(cpu);
u64 need_rss = MPIDR_RS(mpidr); bool group0;
u32 pribits;
pribits = gic_get_pribits();
group0 = gic_has_group0();
/* Set priority mask register */ if (!gic_prio_masking_enabled()) {
write_gicreg(DEFAULT_PMR_VALUE, ICC_PMR_EL1);
} elseif (gic_supports_nmi()) { /* * Check that all CPUs use the same priority space. * * If there's a mismatch with the boot CPU, the system is * likely to die as interrupt masking will not work properly on * all CPUs.
*/
WARN_ON(group0 != cpus_have_group0);
WARN_ON(gic_dist_security_disabled() != cpus_have_security_disabled);
}
/* * Some firmwares hand over to the kernel with the BPR changed from * its reset value (and with a value large enough to prevent * any pre-emptive interrupts from working at all). Writing a zero * to BPR restores is reset value.
*/
gic_write_bpr1(0);
if (static_branch_likely(&supports_deactivate_key)) { /* EOI drops priority only (mode 1) */
gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop);
} else { /* EOI deactivates interrupt too (mode 0) */
gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir);
}
/* Always whack Group0 before Group1 */ if (group0) { switch(pribits) { case 8: case 7:
write_gicreg(0, ICC_AP0R3_EL1);
write_gicreg(0, ICC_AP0R2_EL1);
fallthrough; case 6:
write_gicreg(0, ICC_AP0R1_EL1);
fallthrough; case 5: case 4:
write_gicreg(0, ICC_AP0R0_EL1);
}
isb();
}
switch(pribits) { case 8: case 7:
write_gicreg(0, ICC_AP1R3_EL1);
write_gicreg(0, ICC_AP1R2_EL1);
fallthrough; case 6:
write_gicreg(0, ICC_AP1R1_EL1);
fallthrough; case 5: case 4:
write_gicreg(0, ICC_AP1R0_EL1);
}
isb();
/* ... and let's hit the road... */
gic_write_grpen1(1);
/* Keep the RSS capability status in per_cpu variable */
per_cpu(has_rss, cpu) = !!(gic_read_ctlr() & ICC_CTLR_EL1_RSS);
/* Check all the CPUs have capable of sending SGIs to other CPUs */
for_each_online_cpu(i) { bool have_rss = per_cpu(has_rss, i) && per_cpu(has_rss, cpu);
need_rss |= MPIDR_RS(gic_cpu_to_affinity(i)); if (need_rss && (!have_rss))
pr_crit("CPU%d (%lx) can't SGI CPU%d (%lx), no RSS\n",
cpu, (unsignedlong)mpidr,
i, (unsignedlong)gic_cpu_to_affinity(i));
}
/** * GIC spec says, when ICC_CTLR_EL1.RSS==1 and GICD_TYPER.RSS==0, * writing ICC_ASGI1R_EL1 register with RS != 0 is a CONSTRAINED * UNPREDICTABLE choice of : * - The write is ignored. * - The RS field is treated as 0.
*/ if (need_rss && (!gic_data.has_rss))
pr_crit_once("RSS is required but GICD doesn't support it\n");
}
/* * gic_starting_cpu() is called after the last point where cpuhp is allowed * to fail. So pre check for problems earlier.
*/ staticint gic_check_rdist(unsignedint cpu)
{ if (cpumask_test_cpu(cpu, &broken_rdists)) return -EINVAL;
/* * If the interrupt was enabled, enabled it again. Otherwise, * just wait for the distributor to have digested our changes.
*/ if (enabled)
gic_unmask_irq(d);
/* Not for us */ if (fwspec->fwnode != d->fwnode) return 0;
/* Handle pure domain searches */ if (!fwspec->param_count) return d->bus_token == bus_token;
/* If this is not DT, then we have a single domain */ if (!is_of_node(fwspec->fwnode)) return 1;
ret = gic_irq_domain_translate(d, fwspec, &hwirq, &type); if (WARN_ON_ONCE(ret)) return 0;
if (!fwspec_is_partitioned_ppi(fwspec, hwirq)) return d == gic_data.domain;
/* * If this is a PPI and we have a 4th (non-null) parameter, * then we need to match the partition domain.
*/
ppi_idx = __gic_get_ppi_index(hwirq); return d == partition_get_domain(gic_data.ppi_descs[ppi_idx]);
}
/* * HIP06 GICD_IIDR clashes with GIC-600 product number (despite * not being an actual ARM implementation). The saving grace is * that GIC-600 doesn't have ESPI, so nothing to do in that case. * HIP07 doesn't even have a proper IIDR, and still pretends to * have ESPI. In both cases, put them right.
*/ if (d->rdists.gicd_typer & GICD_TYPER_ESPI) { /* Zero both ESPI and the RES0 field next to it... */
d->rdists.gicd_typer &= ~GENMASK(9, 8); returntrue;
}
/* Check JEP106 code for NVIDIA T241 chip (036b:0241) */ if ((soc_id < 0) || (soc_id != SMCCC_SOC_ID_T241)) returnfalse;
/* Find the chips based on GICR regions PHYS addr */ for (i = 0; i < gic_data.nr_redist_regions; i++) {
chip_bmask |= BIT(FIELD_GET(T241_CHIPN_MASK,
(u64)gic_data.redist_regions[i].phys_base));
}
if (hweight32(chip_bmask) < 3) returnfalse;
/* Setup GICD alias regions */ for (i = 0; i < ARRAY_SIZE(t241_dist_base_alias); i++) { if (chip_bmask & BIT(i)) {
phys = gic_data.dist_phys_base + T241_CHIP_GICDA_OFFSET;
phys |= FIELD_PREP(T241_CHIPN_MASK, i);
t241_dist_base_alias[i] = ioremap(phys, SZ_64K);
WARN_ON_ONCE(!t241_dist_base_alias[i]);
}
}
static_branch_enable(&gic_nvidia_t241_erratum); returntrue;
}
/* * ThunderX1 explodes on reading GICD_TYPER2, in violation of the * architecture spec (which says that reserved registers are RES0).
*/ if (!(gic_data.flags & FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539))
gic_data.rdists.gicd_typer2 = readl_relaxed(gic_data.dist_base + GICD_TYPER2);
gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops,
&gic_data);
gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist)); if (!static_branch_unlikely(&gic_nvidia_t241_erratum)) { /* Disable GICv4.x features for the erratum T241-FABRIC-4 */
gic_data.rdists.has_rvpeid = true;
gic_data.rdists.has_vlpis = true;
gic_data.rdists.has_direct_lpi = true;
gic_data.rdists.has_vpend_valid_dirty = true;
}
if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4) return -ENODEV;
return 0;
}
/* Create all possible partitions at boot time */ staticvoid __init gic_populate_ppi_partitions(struct device_node *gic_node)
{ struct device_node *parts_node, *child_part; int part_idx = 0, i; int nr_parts; struct partition_affinity *parts;
parts_node = of_get_child_by_name(gic_node, "ppi-partitions"); if (!parts_node) return;
gic_data.ppi_descs = kcalloc(gic_data.ppi_nr, sizeof(*gic_data.ppi_descs), GFP_KERNEL); if (!gic_data.ppi_descs) goto out_put_node;
nr_parts = of_get_child_count(parts_node);
if (!nr_parts) goto out_put_node;
parts = kcalloc(nr_parts, sizeof(*parts), GFP_KERNEL); if (WARN_ON(!parts)) goto out_put_node;
for_each_child_of_node(parts_node, child_part) { struct partition_affinity *part; int n;
if (static_branch_likely(&supports_deactivate_key))
gic_of_setup_kvm_info(node, nr_redist_regions); return 0;
out_unmap_rdist: for (i = 0; i < nr_redist_regions; i++) if (rdist_regs[i].redist_base && !IS_ERR(rdist_regs[i].redist_base))
iounmap(rdist_regs[i].redist_base);
kfree(rdist_regs);
out_unmap_dist:
iounmap(dist_base); return err;
}
/* Neither enabled or online capable means it doesn't exist, skip it */ if (!(gicc->flags & (ACPI_MADT_ENABLED | ACPI_MADT_GICC_ONLINE_CAPABLE))) return 0;
/* * Capable but disabled CPUs can be brought online later. What about * the redistributor? ACPI doesn't want to say! * Virtual hotplug systems can use the MADT's "always-on" GICR entries. * Otherwise, prevent such CPUs from being brought online.
*/ if (!(gicc->flags & ACPI_MADT_ENABLED)) { int cpu = get_cpu_for_acpi_id(gicc->uid);
pr_warn("CPU %u's redistributor is inaccessible: this CPU can't be brought online\n", cpu); if (cpu >= 0)
cpumask_set_cpu(cpu, &broken_rdists); return 0;
}
/* * If GICC is enabled and has valid gicr base address, then it means * GICR base is presented via GICC. The redistributor is only known to * be accessible if the GICC is marked as enabled. If this bit is not * set, we'd need to add the redistributor at runtime, which isn't * supported.
*/ if (gicc->flags & ACPI_MADT_ENABLED && gicc->gicr_base_address)
acpi_data.enabled_rdists++;
return 0;
}
staticint __init gic_acpi_count_gicr_regions(void)
{ int count;
/* * Count how many redistributor regions we have. It is not allowed * to mix redistributor description, GICR and GICC subtables have to be * mutually exclusive.
*/
count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR,
gic_acpi_match_gicr, 0); if (count > 0) {
acpi_data.single_redist = false; return count;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.