/* The only guaranteed locking primitive available on all Sparc * processors is 'ldstub [%reg + immediate], %dest_reg' which atomically * places the current byte at the effective address into dest_reg and * places 0xff there afterwards. Pretty lame locking primitive * compared to the Alpha and the Intel no? Most Sparcs have 'swap' * instruction which is much better...
*/
void smp_store_cpu_info(int id)
{ int cpu_node; int mid;
void arch_smp_send_reschedule(int cpu)
{ /* * CPU model dependent way of implementing IPI generation targeting * a single CPU. The trap handler needs only to do trap entry/return * to call schedule.
*/
sparc32_ipi_ops->resched(cpu);
}
void smp_send_stop(void)
{
}
void arch_send_call_function_single_ipi(int cpu)
{ /* trigger one IPI single call on one CPU */
sparc32_ipi_ops->single(cpu);
}
void arch_send_call_function_ipi_mask(conststruct cpumask *mask)
{ int cpu;
/* trigger IPI mask call on each CPU */
for_each_cpu(cpu, mask)
sparc32_ipi_ops->mask_one(cpu);
}
void smp_resched_interrupt(void)
{
irq_enter();
scheduler_ipi();
local_cpu_data().irq_resched_count++;
irq_exit(); /* re-schedule routine called by interrupt return code. */
}
void __init smp_prepare_cpus(unsignedint max_cpus)
{ int i, cpuid, extra;
printk("Entering SMP Mode...\n");
extra = 0; for (i = 0; !cpu_find_by_instance(i, NULL, &cpuid); i++) { if (cpuid >= NR_CPUS)
extra++;
} /* i = number of cpus */ if (extra && max_cpus > i - extra)
printk("Warning: NR_CPUS is too low to start all cpus\n");
smp_store_cpu_info(boot_cpu_id);
switch(sparc_cpu_model) { case sun4m:
smp4m_boot_cpus(); break; case sun4d:
smp4d_boot_cpus(); break; case sparc_leon:
leon_boot_cpus(); break; case sun4e:
printk("SUN4E\n");
BUG(); break; case sun4u:
printk("SUN4U\n");
BUG(); break; default:
printk("UNKNOWN!\n");
BUG(); break;
}
}
/* Set this up early so that things like the scheduler can init * properly. We use the same cpu mask for both the present and * possible cpu map.
*/ void __init smp_setup_cpu_possible_map(void)
{ int instance, mid;
void __init smp_prepare_boot_cpu(void)
{ int cpuid = hard_smp_processor_id();
if (cpuid >= NR_CPUS) {
prom_printf("Serious problem, boot cpu id >= NR_CPUS\n");
prom_halt();
} if (cpuid != 0)
printk("boot cpu id != 0, this could work but is untested\n");
int __cpu_up(unsignedint cpu, struct task_struct *tidle)
{ int ret=0;
switch(sparc_cpu_model) { case sun4m:
ret = smp4m_boot_one_cpu(cpu, tidle); break; case sun4d:
ret = smp4d_boot_one_cpu(cpu, tidle); break; case sparc_leon:
ret = leon_boot_one_cpu(cpu, tidle); break; case sun4e:
printk("SUN4E\n");
BUG(); break; case sun4u:
printk("SUN4U\n");
BUG(); break; default:
printk("UNKNOWN!\n");
BUG(); break;
}
if (!ret) {
cpumask_set_cpu(cpu, &smp_commenced_mask); while (!cpu_online(cpu))
mb();
} return ret;
}
/* * SMP booting is extremely fragile in some architectures. So run * the cpu initialization code first before anything else.
*/
arch_cpu_pre_starting(arg);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.