int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
EXPORT_SYMBOL(__cpu_number_map);
int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
EXPORT_SYMBOL(__cpu_logical_map);
/* Representing the threads (siblings) of each logical CPU */
cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(cpu_sibling_map);
/* Representing the last level cache shared map of each logical CPU */
cpumask_t cpu_llc_shared_map[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(cpu_llc_shared_map);
/* Representing the core map of multi-core chips of each logical CPU */
cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(cpu_core_map);
/* * A logcal cpu mask containing only one VPE per core to * reduce the number of IPIs on large MT systems.
*/
cpumask_t cpu_foreign_map[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(cpu_foreign_map);
/* representing cpus for which sibling maps can be computed */ static cpumask_t cpu_sibling_setup_map;
/* representing cpus for which llc shared maps can be computed */ static cpumask_t cpu_llc_shared_setup_map;
/* representing cpus for which core maps can be computed */ static cpumask_t cpu_core_setup_map;
/* * Calculate a new cpu_foreign_map mask whenever a * new cpu appears or disappears.
*/ void calculate_cpu_foreign_map(void)
{ int i, k, core_present;
cpumask_t temp_foreign_map;
/* Re-calculate the mask */
cpumask_clear(&temp_foreign_map);
for_each_online_cpu(i) {
core_present = 0;
for_each_cpu(k, &temp_foreign_map) if (cpus_are_siblings(i, k))
core_present = 1; if (!core_present)
cpumask_set_cpu(i, &temp_foreign_map);
}
/* Send mailbox buffer via Mail_Send */ staticvoid csr_mail_send(uint64_t data, int cpu, int mailbox)
{
uint64_t val;
/* Send high 32 bits */
val = IOCSR_MBUF_SEND_BLOCKING;
val |= (IOCSR_MBUF_SEND_BOX_HI(mailbox) << IOCSR_MBUF_SEND_BOX_SHIFT);
val |= (cpu << IOCSR_MBUF_SEND_CPU_SHIFT);
val |= (data & IOCSR_MBUF_SEND_H32_MASK);
iocsr_write64(val, LOONGARCH_IOCSR_MBUF_SEND);
/* Send low 32 bits */
val = IOCSR_MBUF_SEND_BLOCKING;
val |= (IOCSR_MBUF_SEND_BOX_LO(mailbox) << IOCSR_MBUF_SEND_BOX_SHIFT);
val |= (cpu << IOCSR_MBUF_SEND_CPU_SHIFT);
val |= (data << IOCSR_MBUF_SEND_BUF_SHIFT);
iocsr_write64(val, LOONGARCH_IOCSR_MBUF_SEND);
};
static u32 ipi_read_clear(int cpu)
{
u32 action;
/* Load the ipi register to figure out what we're supposed to do */
action = iocsr_read32(LOONGARCH_IOCSR_IPI_STATUS); /* Clear the ipi register to clear the interrupt */
iocsr_write32(action, LOONGARCH_IOCSR_IPI_CLEAR);
wbflush();
/* * This function sends a 'reschedule' IPI to another CPU. * it goes straight through and wastes no time serializing * anything. Worst case is that we lose a reschedule ...
*/ void arch_smp_send_reschedule(int cpu)
{
mp_ops.send_ipi_single(cpu, ACTION_RESCHEDULE);
}
EXPORT_SYMBOL_GPL(arch_smp_send_reschedule);
/* * Setup the PC, SP, and TP of a secondary processor and start it running!
*/ void loongson_boot_secondary(int cpu, struct task_struct *idle)
{ unsignedlong entry;
/* * The mapping between present cpus and nodes has been * built during MADT and SRAT parsing. * * If possible cpus = present cpus here, early_cpu_to_node * will return valid node. * * If possible cpus > present cpus here (e.g. some possible * cpus will be added by cpu-hotplug later), for possible but * not present cpus, early_cpu_to_node will return NUMA_NO_NODE, * and we just map them to online nodes in round-robin way. * Once hotplugged, new correct mapping will be built for them.
*/ if (node != NUMA_NO_NODE)
set_cpu_numa_node(cpu, node); else {
set_cpu_numa_node(cpu, rr_node);
rr_node = next_node_in(rr_node, node_online_map);
}
}
pv_spinlock_init();
}
/* called from main before smp_init() */ void __init smp_prepare_cpus(unsignedint max_cpus)
{
init_new_context(current, &init_mm);
current_thread_info()->cpu = 0;
loongson_prepare_cpus(max_cpus);
set_cpu_sibling_map(0);
set_cpu_llc_shared_map(0);
set_cpu_core_map(0);
calculate_cpu_foreign_map(); #ifndef CONFIG_HOTPLUG_CPU
init_cpu_present(cpu_possible_mask); #endif
}
int __cpu_up(unsignedint cpu, struct task_struct *tidle)
{
loongson_boot_secondary(cpu, tidle);
/* Wait for CPU to start and be ready to sync counters */ if (!wait_for_completion_timeout(&cpu_starting,
msecs_to_jiffies(5000))) {
pr_crit("CPU%u: failed to start\n", cpu); return -EIO;
}
/* Wait for CPU to finish startup & mark itself online before return */
wait_for_completion(&cpu_running);
return 0;
}
/* * First C code run on the secondary CPUs after being started up by * the master.
*/
asmlinkage void start_secondary(void)
{ unsignedint cpu;
sync_counter();
cpu = raw_smp_processor_id();
set_my_cpu_offset(per_cpu_offset(cpu));
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.