int __cpu_number_map[CONFIG_MIPS_NR_CPU_NR_MAP]; /* Map physical to logical */
EXPORT_SYMBOL(__cpu_number_map);
int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
EXPORT_SYMBOL(__cpu_logical_map);
/* Number of TCs (or siblings in Intel speak) per CPU core */ int smp_num_siblings = 1;
EXPORT_SYMBOL(smp_num_siblings);
/* representing the TCs (or siblings in Intel speak) of each logical CPU */
cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(cpu_sibling_map);
/* representing the core map of multi-core chips of each logical CPU */
cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(cpu_core_map);
/* * A logical cpu mask containing only one VPE per core to * reduce the number of IPIs on large MT systems.
*/
cpumask_t cpu_foreign_map[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(cpu_foreign_map);
/* representing cpus for which sibling maps can be computed */ static cpumask_t cpu_sibling_setup_map;
/* representing cpus for which core maps can be computed */ static cpumask_t cpu_core_setup_map;
/* * Calculate a new cpu_foreign_map mask whenever a * new cpu appears or disappears.
*/ void calculate_cpu_foreign_map(void)
{ int i, k, core_present;
cpumask_t temp_foreign_map;
/* Re-calculate the mask */
cpumask_clear(&temp_foreign_map);
for_each_online_cpu(i) {
core_present = 0;
for_each_cpu(k, &temp_foreign_map) if (cpus_are_siblings(i, k))
core_present = 1; if (!core_present)
cpumask_set_cpu(i, &temp_foreign_map);
}
/* * Some platforms have half DT setup. So if we found irq node but * didn't find an ipidomain, try to search for one that is not in the * DT.
*/ if (node && !ipidomain)
ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI);
/* * There are systems which use IPI IRQ domains, but only have one * registered when some runtime condition is met. For example a Malta * kernel may include support for GIC & CPU interrupt controller IPI * IRQ domains, but if run on a system with no GIC & no MT ASE then * neither will be supported or registered. * * We only have a problem if we're actually using multiple CPUs so fail * loudly if that is the case. Otherwise simply return, skipping IPI * setup, if we're running with only a single CPU.
*/ if (!ipidomain) {
BUG_ON(num_present_cpus() > 1); return 0;
}
virq = irq_reserve_ipi(ipidomain, mask);
BUG_ON(!virq); if (!call_virq)
call_virq = virq;
virq = irq_reserve_ipi(ipidomain, mask);
BUG_ON(!virq); if (!sched_virq)
sched_virq = virq;
if (irq_domain_is_ipi_per_cpu(ipidomain)) { int cpu;
/* * Some platforms have half DT setup. So if we found irq node but * didn't find an ipidomain, try to search for one that is not in the * DT.
*/ if (node && !ipidomain)
ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI);
BUG_ON(!ipidomain);
if (irq_domain_is_ipi_per_cpu(ipidomain)) { int cpu;
/* * First C code run on the secondary CPUs after being started up by * the master.
*/
asmlinkage void start_secondary(void)
{ unsignedint cpu = raw_smp_processor_id();
/* called from main before smp_init() */ void __init smp_prepare_cpus(unsignedint max_cpus)
{
init_new_context(current, &init_mm);
current_thread_info()->cpu = 0;
mp_ops->prepare_cpus(max_cpus);
set_cpu_sibling_map(0);
set_cpu_core_map(0);
calculate_cpu_foreign_map(); #ifndef CONFIG_HOTPLUG_CPU
init_cpu_present(cpu_possible_mask); #endif
cpumask_copy(&cpu_coherent_mask, cpu_possible_mask);
}
/* preload SMP state for boot cpu */ void __init smp_prepare_boot_cpu(void)
{ if (mp_ops->prepare_boot_cpu)
mp_ops->prepare_boot_cpu();
set_cpu_possible(0, true);
set_cpu_online(0, true);
}
#ifdef CONFIG_HOTPLUG_PARALLEL int arch_cpuhp_kick_ap_alive(unsignedint cpu, struct task_struct *tidle)
{ return mp_ops->boot_secondary(cpu, tidle);
} #else int __cpu_up(unsignedint cpu, struct task_struct *tidle)
{ int err;
err = mp_ops->boot_secondary(cpu, tidle); if (err) return err;
/* Wait for CPU to start and be ready to sync counters */ if (!wait_for_completion_timeout(&cpu_starting,
msecs_to_jiffies(1000))) {
pr_crit("CPU%u: failed to start\n", cpu); return -EIO;
}
/* Wait for CPU to finish startup & mark itself online before return */
wait_for_completion(&cpu_running); return 0;
} #endif
#ifdef CONFIG_PROFILING /* Not really SMP stuff ... */ int setup_profiling_timer(unsignedint multiplier)
{ return 0;
} #endif
/* * Special Variant of smp_call_function for use by TLB functions: * * o No return value * o collapses to normal function call on UP kernels * o collapses to normal function call on systems with a single shared * primary cache.
*/ staticinlinevoid smp_on_other_tlbs(void (*func) (void *info), void *info)
{
smp_call_function(func, info, 1);
}
/* * The following tlb flush calls are invoked when old translations are * being torn down, or pte attributes are changing. For single threaded * address spaces, a new context is obtained on the current cpu, and tlb * context on other cpus are invalidated to force a new context allocation * at switch_mm time, should the mm ever be used on other cpus. For * multithreaded address spaces, inter-CPU interrupts have to be sent. * Another case where inter-CPU interrupts are required is when the target * mm might be active on another cpu (eg debuggers doing the flushes on * behalf of debugees, kswapd stealing pages from another process etc). * Kanoj 07/00.
*/
void flush_tlb_mm(struct mm_struct *mm)
{ if (!mm) return;
if (atomic_read(&mm->mm_users) == 0) return; /* happens as a result of exit_mmap() */
preempt_disable();
if (cpu_has_mmid) { /* * No need to worry about other CPUs - the ginvt in * drop_mmu_context() will be globalized.
*/
} elseif ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
smp_on_other_tlbs(flush_tlb_mm_ipi, mm);
} else { unsignedint cpu;
for_each_online_cpu(cpu) { if (cpu != smp_processor_id() && cpu_context(cpu, mm))
set_cpu_context(cpu, mm, 0);
}
}
drop_mmu_context(mm);
for_each_online_cpu(cpu) { /* * flush_cache_range() will only fully flush icache if * the VMA is executable, otherwise we must invalidate * ASID without it appearing to has_valid_asid() as if * mm has been completely unused by that CPU.
*/ if (cpu != smp_processor_id() && cpu_context(cpu, mm))
set_cpu_context(cpu, mm, !exec);
}
local_flush_tlb_range(vma, start, end);
}
preempt_enable();
}
for_each_online_cpu(cpu) { /* * flush_cache_page() only does partial flushes, so * invalidate ASID without it appearing to * has_valid_asid() as if mm has been completely unused * by that CPU.
*/ if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
set_cpu_context(cpu, vma->vm_mm, 1);
}
local_flush_tlb_page(vma, page);
}
preempt_enable();
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.