/** * cppc_scale_freq_workfn - CPPC arch_freq_scale updater for frequency invariance * @work: The work item. * * The CPPC driver register itself with the topology core to provide its own * implementation (cppc_scale_freq_tick()) of topology_scale_freq_tick() which * gets called by the scheduler on every tick. * * Note that the arch specific counters have higher priority than CPPC counters, * if available, though the CPPC driver doesn't need to have any special * handling for that. * * On an invocation of cppc_scale_freq_tick(), we schedule an irq work (since we * reach here from hard-irq context), which then schedules a normal work item * and cppc_scale_freq_workfn() updates the per_cpu arch_freq_scale variable * based on the counter updates since the last tick.
*/ staticvoid cppc_scale_freq_workfn(struct kthread_work *work)
{ struct cppc_freq_invariance *cppc_fi; struct cppc_perf_fb_ctrs fb_ctrs = {0}; struct cppc_cpudata *cpu_data; unsignedlong local_freq_scale;
u64 perf;
ret = cppc_get_perf_ctrs(cpu, &cppc_fi->prev_perf_fb_ctrs); if (ret) {
pr_warn("%s: failed to read perf counters for cpu:%d: %d\n",
__func__, cpu, ret);
/* * Don't abort if the CPU was offline while the driver * was getting registered.
*/ if (cpu_online(cpu)) return;
}
}
/* Register for freq-invariance */
topology_set_scale_freq_source(&cppc_sftd, policy->cpus);
}
/* * We free all the resources on policy's removal and not on CPU removal as the * irq-work are per-cpu and the hotplug core takes care of flushing the pending * irq-works (hint: smpcfd_dying_cpu()) on CPU hotplug. Even if the kthread-work * fires on another CPU after the concerned CPU is removed, it won't harm. * * We just need to make sure to remove them all on policy->exit().
*/ staticvoid cppc_cpufreq_cpu_fie_exit(struct cpufreq_policy *policy)
{ struct cppc_freq_invariance *cppc_fi; int cpu;
if (fie_disabled) return;
/* policy->cpus will be empty here, use related_cpus instead */
topology_clear_scale_freq_source(SCALE_FREQ_SOURCE_CPPC, policy->related_cpus);
if (fie_disabled != FIE_ENABLED && fie_disabled != FIE_DISABLED) {
fie_disabled = FIE_ENABLED; if (cppc_perf_ctrs_in_pcc()) {
pr_info("FIE not enabled on systems with registers in PCC\n");
fie_disabled = FIE_DISABLED;
}
}
if (fie_disabled) return;
kworker_fie = kthread_run_worker(0, "cppc_fie"); if (IS_ERR(kworker_fie)) {
pr_warn("%s: failed to create kworker_fie: %ld\n", __func__,
PTR_ERR(kworker_fie));
fie_disabled = FIE_DISABLED; return;
}
ret = sched_setattr_nocheck(kworker_fie->task, &attr); if (ret) {
pr_warn("%s: failed to set SCHED_DEADLINE: %d\n", __func__,
ret);
kthread_destroy_worker(kworker_fie);
fie_disabled = FIE_DISABLED;
}
}
staticvoid cppc_freq_invariance_exit(void)
{ if (fie_disabled) return;
if (transition_latency_ns == CPUFREQ_ETERNAL) return CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS / NSEC_PER_USEC;
return transition_latency_ns / NSEC_PER_USEC;
}
/* * The PCC subspace describes the rate at which platform can accept commands * on the shared PCC channel (including READs which do not count towards freq * transition requests), so ideally we need to use the PCC values as a fallback * if we don't have a platform specific transition_delay_us
*/ #ifdef CONFIG_ARM64 #include <asm/cputype.h>
/* Create an artificial performance state every CPPC_EM_CAP_STEP capacity unit. */ #define CPPC_EM_CAP_STEP (20) /* Increase the cost value by CPPC_EM_COST_STEP every performance state. */ #define CPPC_EM_COST_STEP (1) /* Add a cost gap correspnding to the energy of 4 CPUs. */ #define CPPC_EM_COST_GAP (4 * SCHED_CAPACITY_SCALE * CPPC_EM_COST_STEP \
/ CPPC_EM_CAP_STEP)
staticunsignedint get_perf_level_count(struct cpufreq_policy *policy)
{ struct cppc_perf_caps *perf_caps; unsignedint min_cap, max_cap; struct cppc_cpudata *cpu_data; int cpu = policy->cpu;
/* * To avoid bad integer approximation, check that new frequency value * increased and that the new frequency will be converted to the * desired step value.
*/ while ((*KHz == prev_freq) || (step_check != step)) {
perf++;
*KHz = cppc_perf_to_khz(perf_caps, perf);
perf_check = cppc_khz_to_perf(perf_caps, *KHz);
step_check = perf_check / perf_step;
}
/* * With an artificial EM, only the cost value is used. Still the power * is populated such as 0 < power < EM_MAX_POWER. This allows to add * more sense to the artificial performance states.
*/
*power = compute_cost(cpu_dev->id, step);
staticint cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
{ unsignedint cpu = policy->cpu; struct cppc_cpudata *cpu_data; struct cppc_perf_caps *caps; int ret;
cpu_data = cppc_cpufreq_get_cpu_data(cpu); if (!cpu_data) {
pr_err("Error in acquiring _CPC/_PSD data for CPU%d.\n", cpu); return -ENODEV;
}
caps = &cpu_data->perf_caps;
policy->driver_data = cpu_data;
/* * Set min to lowest nonlinear perf to avoid any efficiency penalty (see * Section 8.4.7.1.1.5 of ACPI 6.1 spec)
*/
policy->min = cppc_perf_to_khz(caps, caps->lowest_nonlinear_perf);
policy->max = cppc_perf_to_khz(caps, policy->boost_enabled ?
caps->highest_perf : caps->nominal_perf);
/* * Set cpuinfo.min_freq to Lowest to make the full range of performance * available if userspace wants to use any perf between lowest & lowest * nonlinear perf
*/
policy->cpuinfo.min_freq = cppc_perf_to_khz(caps, caps->lowest_perf);
policy->cpuinfo.max_freq = policy->max;
switch (policy->shared_type) { case CPUFREQ_SHARED_TYPE_HW: case CPUFREQ_SHARED_TYPE_NONE: /* Nothing to be done - we'll have a policy for each CPU */ break; case CPUFREQ_SHARED_TYPE_ANY: /* * All CPUs in the domain will share a policy and all cpufreq * operations will use a single cppc_cpudata structure stored * in policy->driver_data.
*/
cpumask_copy(policy->cpus, cpu_data->shared_cpu_map); break; default:
pr_debug("Unsupported CPU co-ord type: %d\n",
policy->shared_type);
ret = -EFAULT; goto out;
}
/* * If 'highest_perf' is greater than 'nominal_perf', we assume CPU Boost * is supported.
*/ if (caps->highest_perf > caps->nominal_perf)
policy->boost_supported = true;
/* Set policy->cur to max now. The governors will adjust later. */
policy->cur = cppc_perf_to_khz(caps, caps->highest_perf);
cpu_data->perf_ctrls.desired_perf = caps->highest_perf;
ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls); if (ret) {
pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n",
caps->highest_perf, cpu, ret); goto out;
}
ret = cppc_get_perf_ctrs_sample(cpu, &fb_ctrs_t0, &fb_ctrs_t1); if (ret) { if (ret == -EFAULT) /* Any of the associated CPPC regs is 0. */ goto out_invalid_counters; else return 0;
}
delivered_perf = cppc_perf_from_fbctrs(cpu_data, &fb_ctrs_t0,
&fb_ctrs_t1); if (!delivered_perf) goto out_invalid_counters;
out_invalid_counters: /* * Feedback counters could be unchanged or 0 when a cpu enters a * low-power idle state, e.g. clock-gated or power-gated. * Use desired perf for reflecting frequency. Get the latest register * value first as some platforms may update the actual delivered perf * there; if failed, resort to the cached desired perf.
*/ if (cppc_get_desired_perf(cpu, &delivered_perf))
delivered_perf = cpu_data->perf_ctrls.desired_perf;
module_exit(cppc_cpufreq_exit);
MODULE_AUTHOR("Ashwin Chaugule");
MODULE_DESCRIPTION("CPUFreq driver based on the ACPI CPPC v5.0+ spec");
MODULE_LICENSE("GPL");
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.