buf = of_get_flat_dt_prop(dt_root, name, NULL); if (!buf) return -EINVAL;
if (cpulist_parse(buf, cpumask)) return -EINVAL;
return 0;
}
/* * Read from DeviceTree and setup cpu possible mask. If there is no * "possible-cpus" property in DeviceTree pretend all [0..NR_CPUS-1] exist.
*/ staticvoid __init arc_init_cpu_possible(void)
{ struct cpumask cpumask;
if (arc_get_cpu_map("possible-cpus", &cpumask)) {
pr_warn("Failed to get possible-cpus from dtb, pretending all %u cpus exist\n",
NR_CPUS);
cpumask_setall(&cpumask);
}
if (!cpumask_test_cpu(0, &cpumask))
panic("Master cpu (cpu[0]) is missed in cpu possible mask!");
init_cpu_possible(&cpumask);
}
/* * Called from setup_arch() before calling setup_processor() * * - Initialise the CPU possible map early - this describes the CPUs * which may be present or become present in the system. * - Call early smp init hook. This can initialize a specific multi-core * IP which is say common to several platforms (hence not part of * platform specific int_early() hook)
*/ void __init smp_init_cpus(void)
{
arc_init_cpu_possible();
if (plat_smp_ops.init_early_smp)
plat_smp_ops.init_early_smp();
}
/* called from init ( ) => process 1 */ void __init smp_prepare_cpus(unsignedint max_cpus)
{ /* * if platform didn't set the present map already, do it now * boot cpu is set to present already by init/main.c
*/ if (num_present_cpus() <= 1)
init_cpu_present(cpu_possible_mask);
}
void __init smp_cpus_done(unsignedint max_cpus)
{
}
/* * Default smp boot helper for Run-on-reset case where all cores start off * together. Non-masters need to wait for Master to start running. * This is implemented using a flag in memory, which Non-masters spin-wait on. * Master sets it to cpu-id of core to "ungate" it.
*/ staticvolatileint wake_flag;
#ifdef CONFIG_ISA_ARCOMPACT
#define __boot_read(f) f #define __boot_write(f, v) f = v
/* * The very first "C" code executed by secondary * Called from asm stub in head.S * "current"/R25 already setup by low level boot code
*/ void start_kernel_secondary(void)
{ struct mm_struct *mm = &init_mm; unsignedint cpu = smp_processor_id();
/* * Called from kernel_init( ) -> smp_init( ) - for each CPU * * At this point, Secondary Processor is "HALT"ed: * -It booted, but was halted in head.S * -It was configured to halt-on-reset * So need to wake it up. * * Essential requirements being where to run from (PC) and stack (SP)
*/ int __cpu_up(unsignedint cpu, struct task_struct *idle)
{ unsignedlong wait_till;
secondary_idle_tsk = idle;
pr_info("Idle Task [%d] %p", cpu, idle);
pr_info("Trying to bring up CPU%u ...\n", cpu);
if (plat_smp_ops.cpu_kick)
plat_smp_ops.cpu_kick(cpu,
(unsignedlong)first_lines_of_secondary); else
arc_default_smp_cpu_kick(cpu, (unsignedlong)NULL);
/* wait for 1 sec after kicking the secondary */
wait_till = jiffies + HZ; while (time_before(jiffies, wait_till)) { if (cpu_online(cpu)) break;
}
if (!cpu_online(cpu)) {
pr_info("Timeout: CPU%u FAILED to come up !!!\n", cpu); return -1;
}
secondary_idle_tsk = NULL;
return 0;
}
/*****************************************************************************/ /* Inter Processor Interrupt Handling */ /*****************************************************************************/
/* * In arches with IRQ for each msg type (above), receiver can use IRQ-id to * figure out what msg was sent. For those which don't (ARC has dedicated IPI * IRQ), the msg-type needs to be conveyed via per-cpu data
*/
pr_debug("%d Sending msg [%d] to %d\n", smp_processor_id(), msg, cpu);
local_irq_save(flags);
/* * Atomically write new msg bit (in case others are writing too), * and read back old value
*/ do { new = old = *ipi_data_ptr; new |= 1U << msg;
} while (cmpxchg(ipi_data_ptr, old, new) != old);
/* * Call the platform specific IPI kick function, but avoid if possible: * Only do so if there's no pending msg from other concurrent sender(s). * Otherwise, receiver will see this msg as well when it takes the * IPI corresponding to that msg. This is true, even if it is already in * IPI handler, because !@old means it has not yet dequeued the msg(s) * so @new msg can be a free-loader
*/ if (plat_smp_ops.ipi_send && !old)
plat_smp_ops.ipi_send(cpu);
staticinlineint __do_IPI(unsignedlong msg)
{ int rc = 0;
switch (msg) { case IPI_RESCHEDULE:
scheduler_ipi(); break;
case IPI_CALL_FUNC:
generic_smp_call_function_interrupt(); break;
case IPI_CPU_STOP:
ipi_cpu_stop(); break;
default:
rc = 1;
}
return rc;
}
/* * arch-common ISR to handle for inter-processor interrupts * Has hooks for platform specific IPI
*/ static irqreturn_t do_IPI(int irq, void *dev_id)
{ unsignedlong pending; unsignedlong __maybe_unused copy;
pr_debug("IPI [%ld] received on cpu %d\n",
*this_cpu_ptr(&ipi_data), smp_processor_id());
if (plat_smp_ops.ipi_clear)
plat_smp_ops.ipi_clear(irq);
/* * "dequeue" the msg corresponding to this IPI (and possibly other * piggybacked msg from elided IPIs: see ipi_send_msg_one() above)
*/
copy = pending = xchg(this_cpu_ptr(&ipi_data), 0);
do { unsignedlong msg = __ffs(pending); int rc;
rc = __do_IPI(msg); if (rc)
pr_info("IPI with bogus msg %ld in %ld\n", msg, copy);
pending &= ~(1U << msg);
} while (pending);
return IRQ_HANDLED;
}
/* * API called by platform code to hookup arch-common ISR to their IPI IRQ * * Note: If IPI is provided by platform (vs. say ARC MCIP), their intc setup/map * function needs to call irq_set_percpu_devid() for IPI IRQ, otherwise * request_percpu_irq() below will fail
*/ static DEFINE_PER_CPU(int, ipi_dev);
int smp_ipi_irq_setup(int cpu, irq_hw_number_t hwirq)
{ int *dev = per_cpu_ptr(&ipi_dev, cpu); unsignedint virq = irq_find_mapping(NULL, hwirq);
if (!virq)
panic("Cannot find virq for root domain and hwirq=%lu", hwirq);
/* Boot cpu calls request, all call enable */ if (!cpu) { int rc;
rc = request_percpu_irq(virq, do_IPI, "IPI Interrupt", dev); if (rc)
panic("Percpu IRQ request failed for %u\n", virq);
}
enable_percpu_irq(virq, 0);
return 0;
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.14 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.