/* * cpu_possible_mask needs to be filled out prior to setup_per_cpu_areas * (which is prior to any of our smp_prepare_cpu crap), in order to set * up the... per_cpu areas.
*/
struct ipi_data { unsignedlong bits;
};
static DEFINE_PER_CPU(struct ipi_data, ipi_data);
staticinlinevoid __handle_ipi(unsignedlong *ops, struct ipi_data *ipi, int cpu)
{ unsignedlong msg = 0; do {
msg = find_next_bit(ops, BITS_PER_LONG, msg+1);
switch (msg) {
case IPI_TIMER:
ipi_timer(); break;
case IPI_CALL_FUNC:
generic_smp_call_function_interrupt(); break;
case IPI_CPU_STOP: /* * call vmstop()
*/
__vmstop(); break;
case IPI_RESCHEDULE:
scheduler_ipi(); break;
}
} while (msg < BITS_PER_LONG);
}
/* Used for IPI call from other CPU's to unmask int */ void smp_vm_unmask_irq(void *info)
{
__vmintop_locen((long) info);
}
/* * This is based on Alpha's IPI stuff. * Supposed to take (int, void*) as args now. * Specifically, first arg is irq, second is the irq_desc.
*/
static irqreturn_t handle_ipi(int irq, void *desc)
{ int cpu = smp_processor_id(); struct ipi_data *ipi = &per_cpu(ipi_data, cpu); unsignedlong ops;
void __init smp_prepare_cpus(unsignedint max_cpus)
{ int i, irq = BASE_IPI_IRQ;
/* * should eventually have some sort of machine * descriptor that has this stuff
*/
/* Right now, let's just fake it. */ for (i = 0; i < max_cpus; i++)
set_cpu_present(i, true);
/* Also need to register the interrupts for IPI */ if (max_cpus > 1) { if (request_irq(irq, handle_ipi, IRQF_TRIGGER_RISING, "ipi_handler", NULL))
pr_err("Failed to request irq %d (ipi_handler)\n", irq);
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.