/* Skip voltage update if the opp table is not available */ if (!icc_scaling_enabled) return dev_pm_opp_add(cpu_dev, freq_hz, volt);
ret = dev_pm_opp_adjust_voltage(cpu_dev, freq_hz, volt, volt, volt); if (ret) {
dev_err(cpu_dev, "Voltage update failed freq=%ld\n", freq_khz); return ret;
}
if (data->per_core_dcvs) for (i = 1; i < cpumask_weight(policy->related_cpus); i++)
writel_relaxed(index, data->base + soc_data->reg_perf_state + i * 4);
if (icc_scaling_enabled)
qcom_cpufreq_set_bw(policy, freq);
/* Get the frequency requested by the cpufreq core for the CPU */ staticunsignedint qcom_cpufreq_get_freq(struct cpufreq_policy *policy)
{ struct qcom_cpufreq_data *data; conststruct qcom_cpufreq_soc_data *soc_data; unsignedint index;
if (!policy) return 0;
data = policy->driver_data;
soc_data = qcom_cpufreq.soc_data;
index = readl_relaxed(data->base + soc_data->reg_perf_state);
index = min(index, LUT_MAX_ENTRIES - 1);
index = policy->cached_resolved_idx;
writel_relaxed(index, data->base + soc_data->reg_perf_state);
if (data->per_core_dcvs) for (i = 1; i < cpumask_weight(policy->related_cpus); i++)
writel_relaxed(index, data->base + soc_data->reg_perf_state + i * 4);
ret = dev_pm_opp_of_add_table(cpu_dev); if (!ret) { /* Disable all opps and cross-validate against LUT later */
icc_scaling_enabled = true; for (rate = 0; ; rate++) {
opp = dev_pm_opp_find_freq_ceil(cpu_dev, &rate); if (IS_ERR(opp)) break;
/* * Two of the same frequencies with the same core counts means * end of table
*/ if (i > 0 && prev_freq == freq) { struct cpufreq_frequency_table *prev = &table[i - 1];
/* * Only treat the last frequency that might be a boost * as the boost frequency
*/ if (prev->frequency == CPUFREQ_ENTRY_INVALID) { if (!qcom_cpufreq_update_opp(cpu_dev, prev_freq, volt)) {
prev->frequency = prev_freq;
prev->flags = CPUFREQ_BOOST_FREQ;
} else {
dev_warn(cpu_dev, "failed to update OPP for freq=%d\n",
freq);
}
}
/* * Get the h/w throttled frequency, normalize it using the * registered opp table and use it to calculate thermal pressure.
*/
freq_hz = qcom_lmh_get_throttle_freq(data);
opp = dev_pm_opp_find_freq_floor(dev, &freq_hz); if (IS_ERR(opp) && PTR_ERR(opp) == -ERANGE)
opp = dev_pm_opp_find_freq_ceil(dev, &freq_hz);
if (IS_ERR(opp)) {
dev_warn(dev, "Can't find the OPP for throttling: %pe!\n", opp);
} else {
dev_pm_opp_put(opp);
}
throttled_freq = freq_hz / HZ_PER_KHZ;
/* Update HW pressure (the boost frequencies are accepted) */
arch_update_hw_pressure(policy->related_cpus, throttled_freq);
/* * In the unlikely case policy is unregistered do not enable * polling or h/w interrupt
*/
mutex_lock(&data->throttle_lock); if (data->cancel_throttle) goto out;
/* * If h/w throttled frequency is higher than what cpufreq has requested * for, then stop polling and switch back to interrupt mechanism.
*/ if (throttled_freq >= qcom_cpufreq_get_freq(cpufreq_cpu_get_raw(cpu)))
enable_irq(data->throttle_irq); else
mod_delayed_work(system_highpri_wq, &data->throttle_work,
msecs_to_jiffies(10));
staticint qcom_cpufreq_hw_lmh_init(struct cpufreq_policy *policy, int index)
{ struct qcom_cpufreq_data *data = policy->driver_data; struct platform_device *pdev = cpufreq_get_driver_data(); int ret;
/* * Look for LMh interrupt. If no interrupt line is specified / * if there is an error, allow cpufreq to be enabled as usual.
*/
data->throttle_irq = platform_get_irq_optional(pdev, index); if (data->throttle_irq == -ENXIO) return 0; if (data->throttle_irq < 0) return data->throttle_irq;
ret = irq_set_affinity_and_hint(data->throttle_irq, policy->cpus); if (ret)
dev_err(&pdev->dev, "Failed to set CPU affinity of %s[%d]\n",
data->irq_name, data->throttle_irq);
ret = irq_set_affinity_and_hint(data->throttle_irq, policy->cpus); if (ret)
dev_err(&pdev->dev, "Failed to set CPU affinity of %s[%d]\n",
data->irq_name, data->throttle_irq);
cpu_dev = get_cpu_device(policy->cpu); if (!cpu_dev) {
pr_err("%s: failed to get cpu%d device\n", __func__,
policy->cpu); return -ENODEV;
}
cpu_np = of_cpu_device_node_get(policy->cpu); if (!cpu_np) return -EINVAL;
ret = of_parse_phandle_with_args(cpu_np, "qcom,freq-domain", "#freq-domain-cells", 0, &args);
of_node_put(cpu_np); if (ret) return ret;
index = args.args[0];
data = &qcom_cpufreq.data[index];
/* HW should be in enabled state to proceed */ if (!(readl_relaxed(data->base + qcom_cpufreq.soc_data->reg_enable) & 0x1)) {
dev_err(dev, "Domain-%d cpufreq hardware not enabled\n", index); return -ENODEV;
}
if (readl_relaxed(data->base + qcom_cpufreq.soc_data->reg_dcvs_ctrl) & 0x1)
data->per_core_dcvs = true;
/* * Since we cannot determine the closest rate of the target rate, let's just * return the actual rate at which the clock is running at. This is needed to * make clk_set_rate() API work properly.
*/ staticint qcom_cpufreq_hw_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
{
req->rate = qcom_cpufreq_hw_recalc_rate(hw, 0);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.