/* * Set mask to halt GFRC if any online core in SMP cluster is halted. * Only works for ARC HS v3.0+, on earlier versions has no effect.
*/ staticvoid mcip_update_gfrc_halt_mask(int cpu)
{ struct bcr_generic gfrc; unsignedlong flags;
u32 gfrc_halt_mask;
READ_BCR(ARC_REG_GFRC_BUILD, gfrc);
/* * CMD_GFRC_SET_CORE and CMD_GFRC_READ_CORE commands were added in * GFRC 0x3 version.
*/ if (gfrc.ver < 0x3) return;
/* * mcip_mask is same for CMD_DEBUG_SET_SELECT and CMD_DEBUG_SET_MASK * commands. So read it once instead of reading both CMD_DEBUG_READ_MASK * and CMD_DEBUG_READ_SELECT.
*/
__mcip_cmd(CMD_DEBUG_READ_SELECT, 0);
mcip_mask = read_aux_reg(ARC_REG_MCIP_READBACK);
mcip_mask |= BIT(cpu);
__mcip_cmd_data(CMD_DEBUG_SET_SELECT, 0, mcip_mask); /* * Parameter specified halt cause: * STATUS32[H]/actionpoint/breakpoint/self-halt * We choose all of them (0xF).
*/
__mcip_cmd_data(CMD_DEBUG_SET_MASK, 0xF, mcip_mask);
/* Update GFRC halt mask as new CPU came online */ if (mp.gfrc)
mcip_update_gfrc_halt_mask(cpu);
/* Update MCIP debug mask as new CPU came online */ if (mp.dbg)
mcip_update_debug_halt_mask(cpu);
}
staticvoid mcip_ipi_send(int cpu)
{ unsignedlong flags; int ipi_was_pending;
/* ARConnect can only send IPI to others */ if (unlikely(cpu == raw_smp_processor_id())) {
arc_softirq_trigger(SOFTIRQ_IRQ); return;
}
raw_spin_lock_irqsave(&mcip_lock, flags);
/* * If receiver already has a pending interrupt, elide sending this one. * Linux cross core calling works well with concurrent IPIs * coalesced into one * see arch/arc/kernel/smp.c: ipi_send_msg_one()
*/
__mcip_cmd(CMD_INTRPT_READ_STATUS, cpu);
ipi_was_pending = read_aux_reg(ARC_REG_MCIP_READBACK); if (!ipi_was_pending)
__mcip_cmd(CMD_INTRPT_GENERATE_IRQ, cpu);
if (unlikely(irq == SOFTIRQ_IRQ)) {
arc_softirq_clear(irq); return;
}
raw_spin_lock_irqsave(&mcip_lock, flags);
/* Who sent the IPI */
__mcip_cmd(CMD_INTRPT_CHECK_SOURCE, 0);
cpu = read_aux_reg(ARC_REG_MCIP_READBACK); /* 1,2,4,8... */
/* * In rare case, multiple concurrent IPIs sent to same target can * possibly be coalesced by MCIP into 1 asserted IRQ, so @cpus can be * "vectored" (multiple bits sets) as opposed to typical single bit
*/ do {
c = __ffs(cpu); /* 0,1,2,3 */
__mcip_cmd(CMD_INTRPT_GENERATE_ACK, c);
cpu &= ~(1U << c);
} while (cpu);
/* * Set the DEST for @cmn_irq to @cpu_mask (1 bit per core)
*/ staticvoid idu_set_dest(unsignedint cmn_irq, unsignedint cpu_mask)
{
__mcip_cmd_data(CMD_IDU_SET_DEST, cmn_irq, cpu_mask);
}
/* * ARCv2 IDU HW does not support inverse polarity, so these are the * only interrupt types supported.
*/ if (type & ~(IRQ_TYPE_EDGE_RISING | IRQ_TYPE_LEVEL_HIGH)) return -EINVAL;
staticvoid idu_irq_enable(struct irq_data *data)
{ /* * By default send all common interrupts to all available online CPUs. * The affinity of common interrupts in IDU must be set manually since * in some cases the kernel will not call irq_set_affinity() by itself: * 1. When the kernel is not configured with support of SMP. * 2. When the kernel is configured with support of SMP but upper * interrupt controllers does not support setting of the affinity * and cannot propagate it to IDU.
*/
idu_irq_set_affinity(data, cpu_online_mask, false);
idu_irq_unmask(data);
}
/* Parent interrupts (core-intc) are already mapped */
for (i = 0; i < nr_irqs; i++) { /* Mask all common interrupts by default */
idu_irq_mask_raw(i);
/* * Return parent uplink IRQs (towards core intc) 24,25,..... * this step has been done before already * however we need it to get the parent virq and set IDU handler * as first level isr
*/
virq = irq_create_mapping(NULL, i + FIRST_EXT_IRQ);
BUG_ON(!virq);
irq_set_chained_handler_and_data(virq, idu_cascade_isr, domain);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.