/* * Unmask and enable the specified interrupts. Does not check current state, * so any bits not specified here will become masked and disabled.
*/ staticvoid unmask_and_enable(struct xe_tile *tile, u32 irqregs, u32 bits)
{ struct xe_mmio *mmio = &tile->mmio;
/* * If we're just enabling an interrupt now, it shouldn't already * be raised in the IIR.
*/
assert_iir_is_zero(mmio, IIR(irqregs));
/* * Now with master disabled, get a sample of level indications * for this interrupt. Indications will be cleared on related acks. * New indications can and will light up during processing, * and will generate new interrupt after enabling master.
*/ return xe_mmio_read32(mmio, GFX_MSTR_IRQ);
}
if (xe_gt_is_main_type(gt)) { /* Enable interrupts for each engine class */
xe_mmio_write32(mmio, RENDER_COPY_INTR_ENABLE, dmask); if (ccs_mask)
xe_mmio_write32(mmio, CCS_RSVD_INTR_ENABLE, smask);
/* Unmask interrupts for each engine instance */
xe_mmio_write32(mmio, RCS0_RSVD_INTR_MASK, ~smask);
xe_mmio_write32(mmio, BCS_RSVD_INTR_MASK, ~smask); if (bcs_mask & (BIT(1)|BIT(2)))
xe_mmio_write32(mmio, XEHPC_BCS1_BCS2_INTR_MASK, ~dmask); if (bcs_mask & (BIT(3)|BIT(4)))
xe_mmio_write32(mmio, XEHPC_BCS3_BCS4_INTR_MASK, ~dmask); if (bcs_mask & (BIT(5)|BIT(6)))
xe_mmio_write32(mmio, XEHPC_BCS5_BCS6_INTR_MASK, ~dmask); if (bcs_mask & (BIT(7)|BIT(8)))
xe_mmio_write32(mmio, XEHPC_BCS7_BCS8_INTR_MASK, ~dmask); if (ccs_mask & (BIT(0)|BIT(1)))
xe_mmio_write32(mmio, CCS0_CCS1_INTR_MASK, ~dmask); if (ccs_mask & (BIT(2)|BIT(3)))
xe_mmio_write32(mmio, CCS2_CCS3_INTR_MASK, ~dmask);
}
if (xe_gt_is_media_type(gt) || MEDIA_VER(xe) < 13) { /* Enable interrupts for each engine class */
xe_mmio_write32(mmio, VCS_VECS_INTR_ENABLE, dmask);
/* Unmask interrupts for each engine instance */
xe_mmio_write32(mmio, VCS0_VCS1_INTR_MASK, ~dmask);
xe_mmio_write32(mmio, VCS2_VCS3_INTR_MASK, ~dmask);
xe_mmio_write32(mmio, VECS0_VECS1_INTR_MASK, ~dmask);
/* * the heci2 interrupt is enabled via the same register as the * GSCCS interrupts, but it has its own mask register.
*/ if (xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_OTHER)) {
gsc_mask = irqs | GSC_ER_COMPLETE;
heci_mask = GSC_IRQ_INTF(1);
} elseif (xe->info.has_heci_gscfi) {
gsc_mask = GSC_IRQ_INTF(1);
}
if (gsc_mask) {
xe_mmio_write32(mmio, GUNIT_GSC_INTR_ENABLE, gsc_mask | heci_mask);
xe_mmio_write32(mmio, GUNIT_GSC_INTR_MASK, ~gsc_mask);
} if (heci_mask)
xe_mmio_write32(mmio, HECI2_RSVD_INTR_MASK, ~(heci_mask << 16));
/* * NB: Specs do not specify how long to spin wait, * so we do ~100us as an educated guess.
*/
timeout_ts = (local_clock() >> 10) + 100; do {
ident = xe_mmio_read32(mmio, INTR_IDENTITY_REG(bank));
} while (!(ident & INTR_DATA_VALID) &&
!time_after32(local_clock() >> 10, timeout_ts));
if (unlikely(!(ident & INTR_DATA_VALID))) {
drm_err(&xe->drm, "INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n",
bank, bit, ident); return 0;
}
if (class == XE_ENGINE_CLASS_OTHER) { /* * HECI GSCFI interrupts come from outside of GT. * KCR irqs come from inside GT but are handled * by the global PXP subsystem.
*/ if (xe->info.has_heci_gscfi && instance == OTHER_GSC_INSTANCE)
xe_heci_gsc_irq_handler(xe, intr_vec); elseif (instance == OTHER_KCR_INSTANCE)
xe_pxp_irq_handler(xe, intr_vec); else
gt_other_irq_handler(engine_gt, instance, intr_vec);
}
}
}
spin_unlock(&xe->irq.lock);
}
/* * Top-level interrupt handler for Xe_LP platforms (which did not have * a "master tile" interrupt register.
*/ static irqreturn_t xelp_irq_handler(int irq, void *arg)
{ struct xe_device *xe = arg; struct xe_tile *tile = xe_device_get_root_tile(xe);
u32 master_ctl, gu_misc_iir; unsignedlong intr_dw[2];
u32 identity[32];
if (!atomic_read(&xe->irq.enabled)) return IRQ_NONE;
if ((master_tile_ctl & DG1_MSTR_TILE(tile->id)) == 0) continue;
master_ctl = xe_mmio_read32(mmio, GFX_MSTR_IRQ);
/* * We might be in irq handler just when PCIe DPC is initiated * and all MMIO reads will be returned with all 1's. Ignore this * irq as device is inaccessible.
*/ if (master_ctl == REG_GENMASK(31, 0)) {
drm_dbg(&tile_to_xe(tile)->drm, "Ignore this IRQ as device might be in DPC containment.\n"); return IRQ_HANDLED;
}
/* * Display interrupts (including display backlight operations * that get reported as Gunit GSE) would only be hooked up to * the primary tile.
*/ if (id == 0) { if (xe->info.has_heci_cscfi)
xe_heci_csc_irq_handler(xe, master_ctl);
xe_display_irq_handler(xe, master_ctl);
xe_i2c_irq_handler(xe, master_ctl);
gu_misc_iir = gu_misc_irq_ack(xe, master_ctl);
}
}
/* * The tile's top-level status register should be the last one * to be reset to avoid possible bit re-latching from lower * level interrupts.
*/ if (GRAPHICS_VERx100(xe) >= 1210) {
for_each_tile(tile, xe, id)
dg1_irq_reset_mstr(tile);
}
}
/* * ASLE backlight operations are reported via GUnit GSE interrupts * on the root tile.
*/
unmask_and_enable(xe_device_get_root_tile(xe),
GU_MISC_IRQ_OFFSET, GU_MISC_GSE);
void xe_irq_suspend(struct xe_device *xe)
{
atomic_set(&xe->irq.enabled, 0); /* no new irqs */
/* flush irqs */ if (xe_device_has_msix(xe))
xe_irq_msix_synchronize_irq(xe); else
xe_irq_msi_synchronize_irq(xe);
xe_irq_reset(xe); /* turn irqs off */
}
void xe_irq_resume(struct xe_device *xe)
{ struct xe_gt *gt; int id;
/* * lock not needed: * 1. no irq will arrive before the postinstall * 2. display is not yet resumed
*/
atomic_set(&xe->irq.enabled, 1);
xe_irq_reset(xe);
xe_irq_postinstall(xe); /* turn irqs on */
for_each_gt(gt, xe, id)
xe_irq_enable_hwe(gt);
}
/* MSI-X related definitions and functions below. */
enum xe_irq_msix_static {
GUC2HOST_MSIX = 0,
DEFAULT_MSIX = XE_IRQ_DEFAULT_MSIX, /* Must be last */
NUM_OF_STATIC_MSIX,
};
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.