// SPDX-License-Identifier: GPL-2.0-or-later /* * pseries CPU Hotplug infrastructure. * * Split out from arch/powerpc/platforms/pseries/setup.c * arch/powerpc/kernel/rtas.c, and arch/powerpc/platforms/pseries/smp.c * * Peter Bergner, IBM March 2001. * Copyright (C) 2001 IBM. * Dave Engebretsen, Peter Bergner, and * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com * Plus various changes from other IBM teams... * * Copyright (C) 2006 Michael Ellerman, IBM Corporation
*/
/*fix boot_cpuid here*/ if (cpu == boot_cpuid)
boot_cpuid = cpumask_any(cpu_online_mask);
/* FIXME: abstract this to not be platform specific later on */ if (xive_enabled())
xive_smp_disable_cpu(); else
xics_migrate_irqs_away();
cleanup_cpu_mmu_context();
return 0;
}
/* * pseries_cpu_die: Wait for the cpu to die. * @cpu: logical processor id of the CPU whose death we're awaiting. * * This function is called from the context of the thread which is performing * the cpu-offline. Here we wait for long enough to allow the cpu in question * to self-destroy so that the cpu-offline thread can send the CPU_DEAD * notifications. * * OTOH, pseries_cpu_offline_self() is called by the @cpu when it wants to * self-destruct.
*/ staticvoid pseries_cpu_die(unsignedint cpu)
{ int cpu_status = 1; unsignedint pcpu = get_hard_smp_processor_id(cpu); unsignedlong timeout = jiffies + msecs_to_jiffies(120000);
while (true) {
cpu_status = smp_query_cpu_stopped(pcpu); if (cpu_status == QCSS_STOPPED ||
cpu_status == QCSS_HARDWARE_ERROR) break;
if (time_after(jiffies, timeout)) {
pr_warn("CPU %i (hwid %i) didn't die after 120 seconds\n",
cpu, pcpu);
timeout = jiffies + msecs_to_jiffies(120000);
}
cond_resched();
}
if (cpu_status == QCSS_HARDWARE_ERROR) {
pr_warn("CPU %i (hwid %i) reported error while dying\n",
cpu, pcpu);
}
paca_ptrs[cpu]->cpu_start = 0;
}
/** * find_cpu_id_range - found a linear ranger of @nthreads free CPU ids. * @nthreads : the number of threads (cpu ids) * @assigned_node : the node it belongs to or NUMA_NO_NODE if free ids from any * node can be peek. * @cpu_mask: the returned CPU mask. * * Returns 0 on success.
*/ staticint find_cpu_id_range(unsignedint nthreads, int assigned_node,
cpumask_var_t *cpu_mask)
{
cpumask_var_t candidate_mask; unsignedint cpu, node; int rc = -ENOSPC;
if (!zalloc_cpumask_var(&candidate_mask, GFP_KERNEL)) return -ENOMEM;
cpumask_clear(*cpu_mask); for (cpu = 0; cpu < nthreads; cpu++)
cpumask_set_cpu(cpu, *cpu_mask);
/* Get a bitmap of unoccupied slots. */
cpumask_xor(candidate_mask, cpu_possible_mask, cpu_present_mask);
if (assigned_node != NUMA_NO_NODE) { /* * Remove free ids previously assigned on the other nodes. We * can walk only online nodes because once a node became online * it is not turned offlined back.
*/
for_each_online_node(node) { if (node == assigned_node) continue;
cpumask_andnot(candidate_mask, candidate_mask,
node_recorded_ids_map[node]);
}
}
if (cpumask_empty(candidate_mask)) goto out;
while (!cpumask_empty(*cpu_mask)) { if (cpumask_subset(*cpu_mask, candidate_mask)) /* Found a range where we can insert the new cpu(s) */ break;
cpumask_shift_left(*cpu_mask, *cpu_mask, nthreads);
}
/* * Update cpu_present_mask and paca(s) for a new cpu node. The wrinkle * here is that a cpu device node may represent multiple logical cpus * in the SMT case. We must honor the assumption in other code that * the logical ids for sibling SMT threads x and y are adjacent, such * that x^1 == y and y^1 == x.
*/ staticint pseries_add_processor(struct device_node *np)
{ int len, nthreads, node, cpu, assigned_node; int rc = 0;
cpumask_var_t cpu_mask; const __be32 *intserv;
intserv = of_get_property(np, "ibm,ppc-interrupt-server#s", &len); if (!intserv) return 0;
nthreads = len / sizeof(u32);
if (!alloc_cpumask_var(&cpu_mask, GFP_KERNEL)) return -ENOMEM;
/* * Fetch from the DT nodes read by dlpar_configure_connector() the NUMA * node id the added CPU belongs to.
*/
node = of_node_to_nid(np); if (node < 0 || !node_possible(node))
node = first_online_node;
/* Record the newly used CPU ids for the associate node. */
cpumask_or(node_recorded_ids_map[assigned_node],
node_recorded_ids_map[assigned_node], cpu_mask);
/* * If node is set to NUMA_NO_NODE, CPU ids have be reused from * another node, remove them from its mask.
*/ if (node == NUMA_NO_NODE) {
cpu = cpumask_first(cpu_mask);
pr_warn("Reusing free CPU ids %d-%d from another node\n",
cpu, cpu + nthreads - 1);
for_each_online_node(node) { if (node == assigned_node) continue;
cpumask_andnot(node_recorded_ids_map[node],
node_recorded_ids_map[node],
cpu_mask);
}
}
/* * Update the present map for a cpu node which is going away, and set * the hard id in the paca(s) to -1 to be consistent with boot time * convention for non-present cpus.
*/ staticvoid pseries_remove_processor(struct device_node *np)
{ unsignedint cpu; int len, nthreads, i; const __be32 *intserv;
u32 thread;
intserv = of_get_property(np, "ibm,ppc-interrupt-server#s", &len); if (!intserv) return;
nthreads = len / sizeof(u32);
cpu_maps_update_begin(); for (i = 0; i < nthreads; i++) {
thread = be32_to_cpu(intserv[i]);
for_each_present_cpu(cpu) { if (get_hard_smp_processor_id(cpu) != thread) continue;
BUG_ON(cpu_online(cpu));
set_cpu_present(cpu, false);
set_hard_smp_processor_id(cpu, -1);
update_numa_cpu_lookup_table(cpu, -1); break;
} if (cpu >= nr_cpu_ids)
printk(KERN_WARNING "Could not find cpu to remove " "with physical id 0x%x\n", thread);
}
cpu_maps_update_done();
}
staticint dlpar_offline_cpu(struct device_node *dn)
{ int rc = 0; unsignedint cpu; int len, nthreads, i; const __be32 *intserv;
u32 thread;
intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len); if (!intserv) return -EINVAL;
nthreads = len / sizeof(u32);
cpu_maps_update_begin(); for (i = 0; i < nthreads; i++) {
thread = be32_to_cpu(intserv[i]);
for_each_present_cpu(cpu) { if (get_hard_smp_processor_id(cpu) != thread) continue;
if (!cpu_online(cpu)) break;
/* * device_offline() will return -EBUSY (via cpu_down()) if there * is only one CPU left. Check it here to fail earlier and with a * more informative error message, while also retaining the * cpu_add_remove_lock to be sure that no CPUs are being * online/offlined during this check.
*/ if (num_online_cpus() == 1) {
pr_warn("Unable to remove last online CPU %pOFn\n", dn);
rc = -EBUSY; goto out_unlock;
}
cpu_maps_update_done();
rc = device_offline(get_cpu_device(cpu)); if (rc) goto out;
cpu_maps_update_begin(); break;
} if (cpu == num_possible_cpus()) {
pr_warn("Could not find cpu to offline with physical id 0x%x\n",
thread);
}
}
out_unlock:
cpu_maps_update_done();
out: return rc;
}
staticint dlpar_online_cpu(struct device_node *dn)
{ int rc = 0; unsignedint cpu; int len, nthreads, i; const __be32 *intserv;
u32 thread;
intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len); if (!intserv) return -EINVAL;
nthreads = len / sizeof(u32);
cpu_maps_update_begin(); for (i = 0; i < nthreads; i++) {
thread = be32_to_cpu(intserv[i]);
for_each_present_cpu(cpu) { if (get_hard_smp_processor_id(cpu) != thread) continue;
if (!topology_is_primary_thread(cpu)) { if (cpu_smt_control != CPU_SMT_ENABLED) break; if (!topology_smt_thread_allowed(cpu)) break;
}
break;
} if (cpu == num_possible_cpus())
printk(KERN_WARNING "Could not find cpu to online " "with physical id 0x%x\n", thread);
}
cpu_maps_update_done();
info = of_find_property(parent, "ibm,drc-info", NULL); if (!info) returnfalse;
value = of_prop_next_u32(info, NULL, &count);
/* First value of ibm,drc-info is number of drc-info records */ if (value)
value++; else returnfalse;
for (i = 0; i < count; i++) { if (of_read_drc_info_cell(&info, &value, &drc)) returnfalse;
if (strncmp(drc.drc_type, "CPU", 3)) break;
if (drc_index > drc.last_drc_index) continue;
index = drc.drc_index_start; for (j = 0; j < drc.num_sequential_elems; j++) { if (drc_index == index) returntrue;
index += drc.sequential_inc;
}
}
returnfalse;
}
staticbool valid_cpu_drc_index(struct device_node *parent, u32 drc_index)
{ bool found = false; int rc, index;
if (of_property_present(parent, "ibm,drc-info")) return drc_info_valid_index(parent, drc_index);
/* Note that the format of the ibm,drc-indexes array is * the number of entries in the array followed by the array * of drc values so we start looking at index = 1.
*/
index = 1; while (!found) {
u32 drc;
staticint pseries_cpuhp_attach_nodes(struct device_node *dn)
{ struct of_changeset cs; int ret;
/* * This device node is unattached but may have siblings; open-code the * traversal.
*/ for (of_changeset_init(&cs); dn != NULL; dn = dn->sibling) {
ret = of_changeset_attach_node(&cs, dn); if (ret) goto out;
}
ret = of_changeset_apply(&cs);
out:
of_changeset_destroy(&cs); return ret;
}
switch (hp_elog->action) { case PSERIES_HP_ELOG_ACTION_REMOVE: if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX) {
rc = dlpar_cpu_remove_by_index(drc_index); /* * Setting the isolation state of an UNISOLATED/CONFIGURED * device to UNISOLATE is a no-op, but the hypervisor can * use it as a hint that the CPU removal failed.
*/ if (rc)
dlpar_unisolate_drc(drc_index);
} else
rc = -EINVAL; break; case PSERIES_HP_ELOG_ACTION_ADD: if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX)
rc = dlpar_cpu_add(drc_index); else
rc = -EINVAL; break; default:
pr_err("Invalid action (%d) specified\n", hp_elog->action);
rc = -EINVAL; break;
}
/* Processors can be added/removed only on LPAR */ if (firmware_has_feature(FW_FEATURE_LPAR)) {
for_each_node(node) { if (!alloc_cpumask_var_node(&node_recorded_ids_map[node],
GFP_KERNEL, node)) return -ENOMEM;
/* Record ids of CPU added at boot time */
cpumask_copy(node_recorded_ids_map[node],
cpumask_of_node(node));
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.