// SPDX-License-Identifier: GPL-2.0 /* * SMP related functions * * Copyright IBM Corp. 1999, 2012 * Author(s): Denis Joseph Barrow, * Martin Schwidefsky <schwidefsky@de.ibm.com>, * * based on other smp stuff by * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net> * (c) 1998 Ingo Molnar * * The code outside of smp.c uses logical cpu numbers, only smp.c does * the translation of logical to physical cpu ids. All new code that * operates on physical cpu numbers needs to go into smp.c.
*/
static u8 boot_core_type;
DEFINE_PER_CPU(struct pcpu, pcpu_devices); /* * Pointer to the pcpu area of the boot CPU. This is required when a restart * interrupt is triggered on an offline CPU. For that case accessing percpu * data with the common primitives does not work, since the percpu offset is * stored in a non existent lowcore.
*/ staticstruct pcpu *ipl_pcpu;
/* * The smp_cpu_state_mutex must be held when changing the state or polarization * member of a pcpu data structure within the pcpu_devices array.
*/
DEFINE_MUTEX(smp_cpu_state_mutex);
/* * Signal processor helper functions.
*/ staticinlineint __pcpu_sigp_relax(u16 addr, u8 order, unsignedlong parm)
{ int cc;
while (1) {
cc = __pcpu_sigp(addr, order, parm, NULL); if (cc != SIGP_CC_BUSY) return cc;
cpu_relax();
}
}
staticinlineint pcpu_running(struct pcpu *pcpu)
{ if (__pcpu_sigp(pcpu->address, SIGP_SENSE_RUNNING,
0, NULL) != SIGP_CC_STATUS_STORED) return 1; /* Status stored condition code is equivalent to cpu not running. */ return 0;
}
/* * Find struct pcpu by cpu address.
*/ staticstruct pcpu *pcpu_find_address(conststruct cpumask *mask, u16 address)
{ int cpu;
/* * Call function via PSW restart on pcpu and stop the current cpu.
*/ staticvoid __pcpu_delegate(pcpu_delegate_fn *func, void *data)
{
func(data); /* should not return */
}
/* * Stop all cpus but the current one.
*/ void smp_send_stop(void)
{ struct pcpu *pcpu; int cpu;
/* Disable all interrupts/machine checks */
__load_psw_mask(PSW_KERNEL_BITS);
trace_hardirqs_off();
debug_set_critical();
if (oops_in_progress)
smp_emergency_stop();
/* stop all processors */
for_each_online_cpu(cpu) { if (cpu == smp_processor_id()) continue;
pcpu = per_cpu_ptr(&pcpu_devices, cpu);
pcpu_sigp_retry(pcpu, SIGP_STOP, 0); while (!pcpu_stopped(pcpu))
cpu_relax();
}
}
/* * This is the main routine where commands issued by other * cpus are handled.
*/ staticvoid smp_handle_ext_call(void)
{ unsignedlong bits;
/* handle bit signal external calls */
bits = this_cpu_xchg(pcpu_devices.ec_mask, 0); if (test_bit(ec_stop_cpu, &bits))
smp_stop_cpu(); if (test_bit(ec_schedule, &bits))
scheduler_ipi(); if (test_bit(ec_call_function_single, &bits))
generic_smp_call_function_single_interrupt(); if (test_bit(ec_mcck_pending, &bits))
s390_handle_mcck(); if (test_bit(ec_irq_work, &bits))
irq_work_run();
}
/* * this function sends a 'reschedule' IPI to another CPU. * it goes straight through and wastes no time serializing * anything. Worst case is that we lose a reschedule ...
*/ void arch_smp_send_reschedule(int cpu)
{
pcpu_ec_call(per_cpu_ptr(&pcpu_devices, cpu), ec_schedule);
}
pcpu = per_cpu_ptr(&pcpu_devices, cpu);
lc = lowcore_ptr[cpu];
pa = __pa(&lc->floating_pt_save_area); if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_STATUS_AT_ADDRESS,
pa) != SIGP_CC_ORDER_CODE_ACCEPTED) return -EIO; if (!cpu_has_vx() && !cpu_has_gs()) return 0;
pa = lc->mcesad & MCESA_ORIGIN_MASK; if (cpu_has_gs())
pa |= lc->mcesad & MCESA_LC_MASK; if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_ADDITIONAL_STATUS,
pa) != SIGP_CC_ORDER_CODE_ACCEPTED) return -EIO; return 0;
}
/* * Collect CPU state of the previous, crashed system. * There are three cases: * 1) standard zfcp/nvme dump * condition: OLDMEM_BASE == NULL && is_ipl_type_dump() == true * The state for all CPUs except the boot CPU needs to be collected * with sigp stop-and-store-status. The boot CPU state is located in * the absolute lowcore of the memory stored in the HSA. The zcore code * will copy the boot CPU state from the HSA. * 2) stand-alone kdump for SCSI/NVMe (zfcp/nvme dump with swapped memory) * condition: OLDMEM_BASE != NULL && is_ipl_type_dump() == true * The state for all CPUs except the boot CPU needs to be collected * with sigp stop-and-store-status. The firmware or the boot-loader * stored the registers of the boot CPU in the absolute lowcore in the * memory of the old system. * 3) kdump or stand-alone kdump for DASD * condition: OLDMEM_BASE != NULL && is_ipl_type_dump() == false * The state for all CPUs except the boot CPU needs to be collected * with sigp stop-and-store-status. The kexec code or the boot-loader * stored the registers of the boot CPU in the memory of the old system. * * Note that the legacy kdump mode where the old kernel stored the CPU states * does no longer exist: setup_arch() explicitly deactivates the elfcorehdr= * kernel parameter. The is_kdump_kernel() implementation on s390 is independent * of the elfcorehdr= parameter.
*/ staticbool dump_available(void)
{ return oldmem_data.start || is_ipl_type_dump();
}
if (!dump_available()) return; /* Allocate a page as dumping area for the store status sigps */
page = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); if (!page)
panic("ERROR: Failed to allocate %lx bytes below %lx\n",
PAGE_SIZE, 1UL << 31);
/* Set multi-threading state to the previous system. */
pcpu_set_smt(sclp.mtid_prev);
boot_cpu_addr = stap();
max_cpu_addr = SCLP_MAX_CORES << sclp.mtid_prev; for (addr = 0; addr <= max_cpu_addr; addr++) { if (addr == boot_cpu_addr) continue; if (__pcpu_sigp_relax(addr, SIGP_SENSE, 0) ==
SIGP_CC_NOT_OPERATIONAL) continue;
sa = save_area_alloc(false);
__pcpu_sigp_relax(addr, SIGP_STORE_STATUS_AT_ADDRESS, __pa(page));
save_area_add_regs(sa, page); if (cpu_has_vx()) {
__pcpu_sigp_relax(addr, SIGP_STORE_ADDITIONAL_STATUS, __pa(page));
save_area_add_vxrs(sa, page);
}
}
memblock_free(page, PAGE_SIZE);
diag_amode31_ops.diag308_reset();
pcpu_set_smt(0);
} #endif/* CONFIG_CRASH_DUMP */
cpus_read_lock();
mutex_lock(&smp_cpu_state_mutex);
nr = 0;
cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask); /* * Add IPL core first (which got logical CPU number 0) to make sure * that all SMT threads get subsequent logical CPU numbers.
*/ if (early) {
core_id = per_cpu(pcpu_devices, 0).address >> smp_cpu_mt_shift; for (i = 0; i < info->configured; i++) {
core = &info->core[i]; if (core->core_id == core_id) {
nr += smp_add_core(core, &avail, true, early); break;
}
}
} for (i = 0; i < info->combined; i++) {
configured = i < info->configured;
nr += smp_add_core(&info->core[i], &avail, configured, early);
}
mutex_unlock(&smp_cpu_state_mutex);
cpus_read_unlock(); return nr;
}
/* Get CPU information */
info = memblock_alloc_or_panic(sizeof(*info), 8);
smp_get_core_info(info, 1); /* Find boot CPU type */ if (sclp.has_core_type) {
address = stap(); for (cpu = 0; cpu < info->combined; cpu++) if (info->core[cpu].core_id == address) { /* The boot cpu dictates the cpu type. */
boot_core_type = info->core[cpu].type; break;
} if (cpu >= info->combined)
panic("Could not find boot CPU type");
}
/* Set multi-threading state for the current system */
mtid = boot_core_type ? sclp.mtid : sclp.mtid_cp;
mtid = (mtid < smp_max_threads) ? mtid : smp_max_threads - 1;
pcpu_set_smt(mtid);
cpu_smt_set_num_threads(smp_cpu_mtid + 1, smp_cpu_mtid + 1);
/* Print number of CPUs */
c_cpus = s_cpus = 0; for (cpu = 0; cpu < info->combined; cpu++) { if (sclp.has_core_type &&
info->core[cpu].type != boot_core_type) continue; if (cpu < info->configured)
c_cpus += smp_cpu_mtid + 1; else
s_cpus += smp_cpu_mtid + 1;
}
pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
memblock_free(info, sizeof(*info));
}
/* * Activate a secondary processor.
*/ staticvoid smp_start_secondary(void *cpuvoid)
{ struct lowcore *lc = get_lowcore(); int cpu = raw_smp_processor_id();
/* Upping and downing of CPUs */ int __cpu_up(unsignedint cpu, struct task_struct *tidle)
{ struct pcpu *pcpu = per_cpu_ptr(&pcpu_devices, cpu); int rc;
if (pcpu->state != CPU_STATE_CONFIGURED) return -EIO; if (pcpu_sigp_retry(pcpu, SIGP_INITIAL_CPU_RESET, 0) !=
SIGP_CC_ORDER_CODE_ACCEPTED) return -EIO;
rc = pcpu_alloc_lowcore(pcpu, cpu); if (rc) return rc; /* * Make sure global control register contents do not change * until new CPU has initialized control registers.
*/
system_ctlreg_lock();
pcpu_prepare_secondary(pcpu, cpu);
pcpu_attach_task(cpu, tidle);
pcpu_start_fn(cpu, smp_start_secondary, NULL); /* Wait until cpu puts itself in the online & active maps */ while (!cpu_online(cpu))
cpu_relax();
system_ctlreg_unlock(); return 0;
}
/* Wait until target cpu is down */
pcpu = per_cpu_ptr(&pcpu_devices, cpu); while (!pcpu_stopped(pcpu))
cpu_relax();
pcpu_free_lowcore(pcpu, cpu);
cpumask_clear_cpu(cpu, mm_cpumask(&init_mm));
cpumask_clear_cpu(cpu, &init_mm.context.cpu_attach_mask);
pcpu->flags = 0;
}
/* * the frequency of the profiling timer can be changed * by writing a multiplier value into /proc/profile. * * usually you want to run this on all CPUs ;)
*/ int setup_profiling_timer(unsignedint multiplier)
{ return 0;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.