/* * An earlier version of opp-v1 bindings used to name the regulator * "cpu0-supply", we still need to handle that for backwards compatibility.
*/ staticconstchar *find_supply_name(struct device *dev)
{ struct device_node *np __free(device_node) = of_node_get(dev->of_node); int cpu = dev->id;
/* This must be valid for sure */ if (WARN_ON(!np)) return NULL;
/* Try "cpu0" for older DTs */ if (!cpu && of_property_present(np, "cpu0-supply")) return"cpu0";
if (of_property_present(np, "cpu-supply")) return"cpu";
dev_dbg(dev, "no regulator for cpu%d\n", cpu); return NULL;
}
priv = cpufreq_dt_find_data(policy->cpu); if (!priv) {
pr_err("failed to find data for cpu%d\n", policy->cpu); return -ENODEV;
}
cpu_dev = priv->cpu_dev;
cpu_clk = clk_get(cpu_dev, NULL); if (IS_ERR(cpu_clk)) {
ret = PTR_ERR(cpu_clk);
dev_err(cpu_dev, "%s: failed to get clk: %d\n", __func__, ret); return ret;
}
transition_latency = dev_pm_opp_get_max_transition_latency(cpu_dev); if (!transition_latency)
transition_latency = CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS;
/* * OPP layer will be taking care of regulators now, but it needs to know * the name of the regulator first.
*/
reg_name[0] = find_supply_name(cpu_dev); if (reg_name[0]) {
priv->opp_token = dev_pm_opp_set_regulators(cpu_dev, reg_name); if (priv->opp_token < 0) {
ret = dev_err_probe(cpu_dev, priv->opp_token, "failed to set regulators\n"); goto free_cpumask;
}
}
/* Get OPP-sharing information from "operating-points-v2" bindings */
ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, priv->cpus); if (ret) { if (ret != -ENOENT) goto out;
/* * operating-points-v2 not supported, fallback to all CPUs share * OPP for backward compatibility if the platform hasn't set * sharing CPUs.
*/ if (dev_pm_opp_get_sharing_cpus(cpu_dev, priv->cpus))
fallback = true;
}
/* * Initialize OPP tables for all priv->cpus. They will be shared by * all CPUs which have marked their CPUs shared with OPP bindings. * * For platforms not using operating-points-v2 bindings, we do this * before updating priv->cpus. Otherwise, we will end up creating * duplicate OPPs for the CPUs. * * OPPs might be populated at runtime, don't fail for error here unless * it is -EPROBE_DEFER.
*/
ret = dev_pm_opp_of_cpumask_add_table(priv->cpus); if (!ret) {
priv->have_static_opps = true;
} elseif (ret == -EPROBE_DEFER) { goto out;
}
/* * The OPP table must be initialized, statically or dynamically, by this * point.
*/
ret = dev_pm_opp_get_opp_count(cpu_dev); if (ret <= 0) {
dev_err(cpu_dev, "OPP table can't be empty\n");
ret = -ENODEV; goto out;
}
if (fallback) {
cpumask_setall(priv->cpus);
ret = dev_pm_opp_set_sharing_cpus(cpu_dev, priv->cpus); if (ret)
dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
__func__, ret);
}
ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &priv->freq_table); if (ret) {
dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret); goto out;
}
list_add(&priv->node, &priv_list); return 0;
out: if (priv->have_static_opps)
dev_pm_opp_of_cpumask_remove_table(priv->cpus);
dev_pm_opp_put_regulators(priv->opp_token);
free_cpumask:
free_cpumask_var(priv->cpus); return ret;
}
/* Request resources early so we can return in case of -EPROBE_DEFER */
for_each_present_cpu(cpu) {
ret = dt_cpufreq_early_init(&pdev->dev, cpu); if (ret) goto err;
}
if (data) { if (data->have_governor_per_policy)
dt_cpufreq_driver.flags |= CPUFREQ_HAVE_GOVERNOR_PER_POLICY;
dt_cpufreq_driver.resume = data->resume; if (data->suspend)
dt_cpufreq_driver.suspend = data->suspend; if (data->get_intermediate) {
dt_cpufreq_driver.target_intermediate = data->target_intermediate;
dt_cpufreq_driver.get_intermediate = data->get_intermediate;
}
}
ret = cpufreq_register_driver(&dt_cpufreq_driver); if (ret) {
dev_err(&pdev->dev, "failed register driver: %d\n", ret); goto err;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.