// SPDX-License-Identifier: GPL-2.0 /* * System Control and Power Interface (SCMI) based CPUFreq Interface driver * * Copyright (C) 2018-2021 ARM Ltd. * Sudeep Holla <sudeep.holla@arm.com>
*/
policy = cpufreq_cpu_get_raw(cpu); if (unlikely(!policy)) return 0;
priv = policy->driver_data;
ret = perf_ops->freq_get(ph, priv->domain_id, &rate, false); if (ret) return 0; return rate / 1000;
}
/* * perf_ops->freq_set is not a synchronous, the actual OPP change will * happen asynchronously and can get notified if the events are * subscribed for by the SCMI firmware
*/ staticint
scmi_cpufreq_set_target(struct cpufreq_policy *policy, unsignedint index)
{ struct scmi_data *priv = policy->driver_data;
u64 freq = policy->freq_table[index].frequency;
if (of_parse_phandle_with_args(np, "clocks", "#clock-cells", 0,
&domain_id)) { /* Find the corresponding index for power-domain "perf". */
index = of_property_match_string(np, "power-domain-names", "perf"); if (index < 0) return -EINVAL;
if (of_parse_phandle_with_args(np, "power-domains", "#power-domain-cells", index,
&domain_id)) return -EINVAL;
}
return domain_id.args[0];
}
staticint
scmi_get_sharing_cpus(struct device *cpu_dev, int domain, struct cpumask *cpumask)
{ int cpu, tdomain; struct device *tcpu_dev;
for_each_present_cpu(cpu) { if (cpu == cpu_dev->id) continue;
tcpu_dev = get_cpu_device(cpu); if (!tcpu_dev) continue;
tdomain = scmi_cpu_domain_id(tcpu_dev); if (tdomain == domain)
cpumask_set_cpu(cpu, cpumask);
}
domain = scmi_cpu_domain_id(cpu_dev); if (domain < 0) return domain;
/* Get the power cost of the performance domain. */
Hz = *KHz * 1000;
ret = perf_ops->est_power_get(ph, domain, &Hz, power); if (ret) return ret;
/* Convert the power to uW if it is mW (ignore bogoW) */ if (power_scale == SCMI_POWER_MILLIWATTS)
*power *= MICROWATT_PER_MILLIWATT;
/* The EM framework specifies the frequency in KHz. */
*KHz = Hz / 1000;
return 0;
}
staticint
scmi_get_rate_limit(u32 domain, bool has_fast_switch)
{ int ret, rate_limit;
if (has_fast_switch) { /* * Fast channels are used whenever available, * so use their rate_limit value if populated.
*/
ret = perf_ops->fast_switch_rate_limit(ph, domain,
&rate_limit); if (!ret && rate_limit) return rate_limit;
}
ret = perf_ops->rate_limit_get(ph, domain, &rate_limit); if (ret) return 0;
cpu_dev = get_cpu_device(policy->cpu); if (!cpu_dev) {
pr_err("failed to get cpu%d device\n", policy->cpu); return -ENODEV;
}
domain = scmi_cpu_domain_id(cpu_dev); if (domain < 0) return domain;
priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM;
if (!zalloc_cpumask_var(&priv->opp_shared_cpus, GFP_KERNEL)) {
ret = -ENOMEM; goto out_free_priv;
}
/* Obtain CPUs that share SCMI performance controls */
ret = scmi_get_sharing_cpus(cpu_dev, domain, policy->cpus); if (ret) {
dev_warn(cpu_dev, "failed to get sharing cpumask\n"); goto out_free_cpumask;
}
/* * Obtain CPUs that share performance levels. * The OPP 'sharing cpus' info may come from DT through an empty opp * table and opp-shared.
*/
ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, priv->opp_shared_cpus); if (ret || cpumask_empty(priv->opp_shared_cpus)) { /* * Either opp-table is not set or no opp-shared was found. * Use the CPU mask from SCMI to designate CPUs sharing an OPP * table.
*/
cpumask_copy(priv->opp_shared_cpus, policy->cpus);
}
/* * A previous CPU may have marked OPPs as shared for a few CPUs, based on * what OPP core provided. If the current CPU is part of those few, then * there is no need to add OPPs again.
*/
nr_opp = dev_pm_opp_get_opp_count(cpu_dev); if (nr_opp <= 0) {
ret = perf_ops->device_opps_add(ph, cpu_dev, domain); if (ret) {
dev_warn(cpu_dev, "failed to add opps to the device\n"); goto out_free_cpumask;
}
nr_opp = dev_pm_opp_get_opp_count(cpu_dev); if (nr_opp <= 0) {
dev_err(cpu_dev, "%s: No OPPs for this device: %d\n",
__func__, nr_opp);
ret = -ENODEV; goto out_free_opp;
}
ret = dev_pm_opp_set_sharing_cpus(cpu_dev, priv->opp_shared_cpus); if (ret) {
dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
__func__, ret);
goto out_free_opp;
}
priv->nr_opp = nr_opp;
}
ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); if (ret) {
dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret); goto out_free_opp;
}
ret = freq_qos_add_request(&policy->constraints, &priv->limits_freq_req, FREQ_QOS_MAX,
FREQ_QOS_MAX_DEFAULT_VALUE); if (ret < 0) {
dev_err(cpu_dev, "failed to add qos limits request: %d\n", ret); goto out_free_table;
}
priv->limit_notify_nb.notifier_call = scmi_limit_notify_cb;
ret = sdev->handle->notify_ops->event_notifier_register(sdev->handle, SCMI_PROTOCOL_PERF,
SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED,
&priv->domain_id,
&priv->limit_notify_nb); if (ret)
dev_warn(&sdev->dev, "failed to register for limits change notifier for domain %d\n",
priv->domain_id);
/* * This callback will be called for each policy, but we don't need to * register with EM every time. Despite not being part of the same * policy, some CPUs may still share their perf-domains, and a CPU from * another policy may already have registered with EM on behalf of CPUs * of this policy.
*/ if (!priv->nr_opp) return;
/* * Older Broadcom STB chips had a "clocks" property for CPU node(s) * that did not match the SCMI performance protocol node, if we got * there, it means we had such an older Device Tree, therefore return * true to preserve backwards compatibility.
*/ if (of_machine_is_compatible("brcm,brcmstb")) returntrue;
if (!handle || !scmi_dev_used_by_cpus(dev)) return -ENODEV;
scmi_cpufreq_driver.driver_data = sdev;
perf_ops = handle->devm_protocol_get(sdev, SCMI_PROTOCOL_PERF, &ph); if (IS_ERR(perf_ops)) return PTR_ERR(perf_ops);
#ifdef CONFIG_COMMON_CLK /* dummy clock provider as needed by OPP if clocks property is used */ if (of_property_present(dev->of_node, "#clock-cells")) {
ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, NULL); if (ret) return dev_err_probe(dev, ret, "%s: registering clock provider failed\n", __func__);
} #endif
ret = cpufreq_register_driver(&scmi_cpufreq_driver); if (ret) {
dev_err(dev, "%s: registering cpufreq failed, err: %d\n",
__func__, ret);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.