staticvoid update_scale_freq_invariant(bool status)
{ if (scale_freq_invariant == status) return;
/* * Task scheduler behavior depends on frequency invariance support, * either cpufreq or counter driven. If the support status changes as * a result of counter initialisation and use, retrigger the build of * scheduling domains to ensure the information is propagated properly.
*/ if (topology_scale_freq_invariant() == status) {
scale_freq_invariant = status;
rebuild_sched_domains_energy();
}
}
/* * Avoid calling rebuild_sched_domains() unnecessarily if FIE is * supported by cpufreq.
*/ if (cpumask_empty(&scale_freq_counters_mask))
scale_freq_invariant = topology_scale_freq_invariant();
/* * If the use of counters for FIE is enabled, just return as we don't * want to update the scale factor with information from CPUFREQ. * Instead the scale factor will be updated from arch_scale_freq_tick.
*/ if (supports_scale_freq_counters(cpus)) return;
/** * topology_update_hw_pressure() - Update HW pressure for CPUs * @cpus : The related CPUs for which capacity has been reduced * @capped_freq : The maximum allowed frequency that CPUs can run at * * Update the value of HW pressure for all @cpus in the mask. The * cpumask should include all (online+offline) affected CPUs, to avoid * operating on stale data when hot-plug is used for some CPUs. The * @capped_freq reflects the currently allowed max CPUs frequency due to * HW capping. It might be also a boost frequency value, which is bigger * than the internal 'capacity_freq_ref' max frequency. In such case the * pressure value should simply be removed, since this is an indication that * there is no HW throttling. The @capped_freq must be provided in kHz.
*/ void topology_update_hw_pressure(conststruct cpumask *cpus, unsignedlong capped_freq)
{ unsignedlong max_capacity, capacity, pressure;
u32 max_freq; int cpu;
cpu = cpumask_first(cpus);
max_capacity = arch_scale_cpu_capacity(cpu);
max_freq = arch_scale_freq_ref(cpu);
/* * Handle properly the boost frequencies, which should simply clean * the HW pressure value.
*/ if (max_freq <= capped_freq)
capacity = max_capacity; else
capacity = mult_frac(max_capacity, capped_freq, max_freq);
int topology_update_cpu_topology(void)
{ return update_topology;
}
/* * Updating the sched_domains can't be done directly from cpufreq callbacks * due to locking, so queue the work for later.
*/ staticvoid update_topology_flags_workfn(struct work_struct *work)
{
update_topology = 1;
rebuild_sched_domains();
pr_debug("sched_domain hierarchy rebuilt, flags updated\n");
update_topology = 0;
}
bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
{ struct clk *cpu_clk; staticbool cap_parsing_failed; int ret;
u32 cpu_capacity;
if (cap_parsing_failed) returnfalse;
ret = of_property_read_u32(cpu_node, "capacity-dmips-mhz",
&cpu_capacity); if (!ret) { if (!raw_capacity) {
raw_capacity = kcalloc(num_possible_cpus(), sizeof(*raw_capacity),
GFP_KERNEL); if (!raw_capacity) {
cap_parsing_failed = true; returnfalse;
}
}
raw_capacity[cpu] = cpu_capacity;
pr_debug("cpu_capacity: %pOF cpu_capacity=%u (raw)\n",
cpu_node, raw_capacity[cpu]);
/* * Update capacity_freq_ref for calculating early boot CPU capacities. * For non-clk CPU DVFS mechanism, there's no way to get the * frequency value now, assuming they are running at the same * frequency (by keeping the initial capacity_freq_ref value).
*/
cpu_clk = of_clk_get(cpu_node, 0); if (!IS_ERR_OR_NULL(cpu_clk)) {
per_cpu(capacity_freq_ref, cpu) =
clk_get_rate(cpu_clk) / HZ_PER_KHZ;
clk_put(cpu_clk);
}
} else { if (raw_capacity) {
pr_err("cpu_capacity: missing %pOF raw capacity\n",
cpu_node);
pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
}
cap_parsing_failed = true;
free_raw_capacity();
}
pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n",
cpumask_pr_args(policy->related_cpus),
cpumask_pr_args(cpus_to_visit));
staticint __init register_cpufreq_notifier(void)
{ int ret;
/* * On ACPI-based systems skip registering cpufreq notifier as cpufreq * information is not needed for cpu capacity initialization.
*/ if (!acpi_disabled) return -EINVAL;
if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL)) return -ENOMEM;
cpumask_copy(cpus_to_visit, cpu_possible_mask);
ret = cpufreq_register_notifier(&init_cpu_capacity_notifier,
CPUFREQ_POLICY_NOTIFIER);
/* Used to enable the SMT control */ staticunsignedint max_smt_thread_num = 1;
/* * This function returns the logic cpu number of the node. * There are basically three kinds of return values: * (1) logic cpu number which is > 0. * (2) -ENODEV when the device tree(DT) node is valid and found in the DT but * there is no possible logical CPU in the kernel to match. This happens * when CONFIG_NR_CPUS is configure to be smaller than the number of * CPU nodes in DT. We need to just ignore this case. * (3) -1 if the node does not exist in the device tree
*/ staticint __init get_cpu_for_node(struct device_node *node)
{ int cpu; struct device_node *cpu_node __free(device_node) =
of_parse_phandle(node, "cpu", 0);
if (!cpu_node) return -1;
cpu = of_cpu_node_to_id(cpu_node); if (cpu >= 0)
topology_parse_cpu_capacity(cpu_node, cpu); else
pr_info("CPU node for %pOF exist but the possible cpu range is :%*pbl\n",
cpu_node, cpumask_pr_args(cpu_possible_mask));
return cpu;
}
staticint __init parse_core(struct device_node *core, int package_id, int cluster_id, int core_id)
{ char name[20]; bool leaf = true; int i = 0; int cpu;
cpu = get_cpu_for_node(core); if (cpu >= 0) { if (!leaf) {
pr_err("%pOF: Core has both threads and CPU\n",
core); return -EINVAL;
}
cpu_topology[cpu].package_id = package_id;
cpu_topology[cpu].cluster_id = cluster_id;
cpu_topology[cpu].core_id = core_id;
} elseif (leaf && cpu != -ENODEV) {
pr_err("%pOF: Can't get CPU for leaf core\n", core); return -EINVAL;
}
return 0;
}
staticint __init parse_cluster(struct device_node *cluster, int package_id, int cluster_id, int depth)
{ char name[20]; bool leaf = true; bool has_cores = false; int core_id = 0; int i, ret;
/* * First check for child clusters; we currently ignore any * information about the nesting of clusters and present the * scheduler with a flat list of them.
*/
i = 0; do {
snprintf(name, sizeof(name), "cluster%d", i); struct device_node *c __free(device_node) =
of_get_child_by_name(cluster, name);
if (!c) break;
leaf = false;
ret = parse_cluster(c, package_id, i, depth + 1); if (depth > 0)
pr_warn("Topology for clusters of clusters not yet supported\n"); if (ret != 0) return ret;
i++;
} while (1);
/* Now check for cores */
i = 0; do {
snprintf(name, sizeof(name), "core%d", i); struct device_node *c __free(device_node) =
of_get_child_by_name(cluster, name);
if (!c) break;
has_cores = true;
if (depth == 0) {
pr_err("%pOF: cpu-map children should be clusters\n", c); return -EINVAL;
}
if (leaf) {
ret = parse_core(c, package_id, cluster_id, core_id++); if (ret != 0) return ret;
} else {
pr_err("%pOF: Non-leaf cluster with core %s\n",
cluster, name); return -EINVAL;
}
i++;
} while (1);
if (leaf && !has_cores)
pr_warn("%pOF: empty cluster\n", cluster);
has_socket = true;
ret = parse_cluster(c, package_id, -1, 0); if (ret != 0) return ret;
package_id++;
} while (1);
if (!has_socket)
ret = parse_cluster(socket, 0, -1, 0);
/* * Reset the max_smt_thread_num to 1 on failure. Since on failure * we need to notify the framework the SMT is not supported, but * max_smt_thread_num can be initialized to the SMT thread number * of the cores which are successfully parsed.
*/ if (ret)
max_smt_thread_num = 1;
staticint __init parse_dt_topology(void)
{ int ret = 0; int cpu; struct device_node *cn __free(device_node) =
of_find_node_by_path("/cpus");
if (!cn) {
pr_err("No CPU information found in DT\n"); return 0;
}
/* * When topology is provided cpu-map is essentially a root * cluster with restricted subnodes.
*/ struct device_node *map __free(device_node) =
of_get_child_by_name(cn, "cpu-map");
if (!map) return ret;
ret = parse_socket(map); if (ret != 0) return ret;
topology_normalize_cpu_scale();
/* * Check that all cores are in the topology; the SMP code will * only mark cores described in the DT as possible.
*/
for_each_possible_cpu(cpu) if (cpu_topology[cpu].package_id < 0) { return -EINVAL;
}
return ret;
} #endif
/* * cpu topology table
*/ struct cpu_topology cpu_topology[NR_CPUS];
EXPORT_SYMBOL_GPL(cpu_topology);
/* Find the smaller of NUMA, core or LLC siblings */ if (cpumask_subset(&cpu_topology[cpu].core_sibling, core_mask)) { /* not numa in package, lets use the package siblings */
core_mask = &cpu_topology[cpu].core_sibling;
}
if (last_level_cache_is_valid(cpu)) { if (cpumask_subset(&cpu_topology[cpu].llc_sibling, core_mask))
core_mask = &cpu_topology[cpu].llc_sibling;
}
/* * For systems with no shared cpu-side LLC but with clusters defined, * extend core_mask to cluster_siblings. The sched domain builder will * then remove MC as redundant with CLS if SCHED_CLUSTER is enabled.
*/ if (IS_ENABLED(CONFIG_SCHED_CLUSTER) &&
cpumask_subset(core_mask, &cpu_topology[cpu].cluster_sibling))
core_mask = &cpu_topology[cpu].cluster_sibling;
return core_mask;
}
conststruct cpumask *cpu_clustergroup_mask(int cpu)
{ /* * Forbid cpu_clustergroup_mask() to span more or the same CPUs as * cpu_coregroup_mask().
*/ if (cpumask_subset(cpu_coregroup_mask(cpu),
&cpu_topology[cpu].cluster_sibling)) return topology_sibling_cpumask(cpu);
__weak int __init parse_acpi_topology(void)
{ return 0;
}
#ifdefined(CONFIG_ARM64) || defined(CONFIG_RISCV) void __init init_cpu_topology(void)
{ int cpu, ret;
reset_cpu_topology();
ret = parse_acpi_topology(); if (!ret)
ret = of_have_populated_dt() && parse_dt_topology();
if (ret) { /* * Discard anything that was parsed if we hit an error so we * don't use partial information. But do not return yet to give * arch-specific early cache level detection a chance to run.
*/
reset_cpu_topology();
}
for_each_possible_cpu(cpu) {
ret = fetch_cache_info(cpu); if (!ret) continue; elseif (ret != -ENOENT)
pr_err("Early cacheinfo failed, ret = %d\n", ret); return;
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.