staticvoid icp_native_teardown_cpu(void)
{ int cpu = smp_processor_id();
/* Clear any pending IPI */
icp_native_set_qirr(cpu, 0xff);
}
staticvoid icp_native_flush_ipi(void)
{ /* We take the ipi irq but and never return so we * need to EOI the IPI, but want to leave our priority 0 * * should we check all the other interrupts too? * should we be flagging idle loop instead? * or creating some task to be scheduled?
*/
/* * Called when an interrupt is received on an off-line CPU to * clear the interrupt, so that the CPU can go back to nap mode.
*/ void icp_native_flush_interrupt(void)
{ unsignedint xirr = icp_native_get_xirr(); unsignedint vec = xirr & 0x00ffffff;
if (vec == XICS_IRQ_SPURIOUS) return; if (vec == XICS_IPI) { /* Clear pending IPI */ int cpu = smp_processor_id();
kvmppc_clear_host_ipi(cpu);
icp_native_set_qirr(cpu, 0xff);
} else {
pr_err("XICS: hw interrupt 0x%x to offline cpu, disabling\n",
vec);
xics_mask_unknown_vec(vec);
} /* EOI the interrupt */
icp_native_set_xirr(xirr);
}
staticint __init icp_native_map_one_cpu(int hw_id, unsignedlong addr, unsignedlong size)
{ char *rname; int i, cpu = -1;
/* This may look gross but it's good enough for now, we don't quite * have a hard -> linux processor id matching.
*/
for_each_possible_cpu(i) { if (!cpu_present(i)) continue; if (hw_id == get_hard_smp_processor_id(i)) {
cpu = i; break;
}
}
/* Fail, skip that CPU. Don't print, it's normal, some XICS come up * with way more entries in there than you have CPUs
*/ if (cpu == -1) return 0;
if (!rname) return -ENOMEM; if (!request_mem_region(addr, size, rname)) {
pr_warn("icp_native: Could not reserve ICP MMIO for CPU %d, interrupt server #0x%x\n",
cpu, hw_id); return -EBUSY;
}
icp_native_regs[cpu] = ioremap(addr, size);
kvmppc_set_xics_phys(cpu, addr); if (!icp_native_regs[cpu]) {
pr_warn("icp_native: Failed ioremap for CPU %d, interrupt server #0x%x, addr %#lx\n",
cpu, hw_id, addr);
release_mem_region(addr, size); return -ENOMEM;
} return 0;
}
staticint __init icp_native_init_one_node(struct device_node *np, unsignedint *indx)
{ unsignedint ilen; const __be32 *ireg; int i; int num_reg; int num_servers = 0;
/* This code does the theorically broken assumption that the interrupt * server numbers are the same as the hard CPU numbers. * This happens to be the case so far but we are playing with fire... * should be fixed one of these days. -BenH.
*/
ireg = of_get_property(np, "ibm,interrupt-server-ranges", &ilen);
/* Do that ever happen ? we'll know soon enough... but even good'old * f80 does have that property ..
*/
WARN_ON((ireg == NULL) || (ilen != 2*sizeof(u32)));
if (ireg) {
*indx = of_read_number(ireg, 1); if (ilen >= 2*sizeof(u32))
num_servers = of_read_number(ireg + 1, 1);
}
num_reg = of_address_count(np); if (num_servers && (num_servers != num_reg)) {
pr_err("icp_native: ICP reg len (%d) != num servers (%d)",
num_reg, num_servers); return -1;
}
for (i = 0; i < num_reg; i++) { struct resource r; int err;
err = of_address_to_resource(np, i, &r); if (err) {
pr_err("icp_native: Could not translate ICP MMIO" " for interrupt server 0x%x (%d)\n", *indx, err); return -1;
}
if (icp_native_map_one_cpu(*indx, r.start, resource_size(&r))) return -1;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.