/* Macros to iterate over CPU policies */ #define for_each_suitable_policy(__policy, __active) \
list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) \ if ((__active) == !policy_is_inactive(__policy))
/* * The "cpufreq driver" - the arch- or hardware-dependent low * level driver of CPUFreq support, and its spinlock. This lock * also protects the cpufreq_cpu_data array.
*/ staticstruct cpufreq_driver *cpufreq_driver; static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data); static DEFINE_RWLOCK(cpufreq_driver_lock);
/* * Two notifier lists: the "policy" list is involved in the * validation process for a new CPU frequency policy; the * "transition" list for kernel code that needs to handle * changes to devices when the CPU clock speed changes. * The mutex locks both lists.
*/ static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
SRCU_NOTIFIER_HEAD_STATIC(cpufreq_transition_notifier_list);
staticint off __read_mostly; staticint cpufreq_disabled(void)
{ return off;
} void disable_cpufreq(void)
{
off = 1;
}
EXPORT_SYMBOL_GPL(disable_cpufreq);
/* * This is a generic cpufreq init() routine which can be used by cpufreq * drivers of SMP systems. It will do following: * - validate & show freq table passed * - set policies transition latency * - policy->cpus with all possible CPUs
*/ void cpufreq_generic_init(struct cpufreq_policy *policy, struct cpufreq_frequency_table *table, unsignedint transition_latency)
{
policy->freq_table = table;
policy->cpuinfo.transition_latency = transition_latency;
/* * The driver only supports the SMP configuration where all processors * share the clock and voltage and clock.
*/
cpumask_setall(policy->cpus);
}
EXPORT_SYMBOL_GPL(cpufreq_generic_init);
/** * cpufreq_cpu_get - Return policy for a CPU and mark it as busy. * @cpu: CPU to find the policy for. * * Call cpufreq_cpu_get_raw() to obtain a cpufreq policy for @cpu and increment * the kobject reference counter of that policy. Return a valid policy on * success or NULL on failure. * * The policy returned by this function has to be released with the help of * cpufreq_cpu_put() to balance its kobject reference counter properly.
*/ struct cpufreq_policy *cpufreq_cpu_get(unsignedint cpu)
{ struct cpufreq_policy *policy = NULL; unsignedlong flags;
if (WARN_ON(cpu >= nr_cpu_ids)) return NULL;
/* get the cpufreq driver */
read_lock_irqsave(&cpufreq_driver_lock, flags);
if (cpufreq_driver) { /* get the CPU */
policy = cpufreq_cpu_get_raw(cpu); if (policy)
kobject_get(&policy->kobj);
}
/** * cpufreq_cpu_put - Decrement kobject usage counter for cpufreq policy. * @policy: cpufreq policy returned by cpufreq_cpu_get().
*/ void cpufreq_cpu_put(struct cpufreq_policy *policy)
{
kobject_put(&policy->kobj);
}
EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
/********************************************************************* * EXTERNALLY AFFECTING FREQUENCY CHANGES *
*********************************************************************/
/** * adjust_jiffies - Adjust the system "loops_per_jiffy". * @val: CPUFREQ_PRECHANGE or CPUFREQ_POSTCHANGE. * @ci: Frequency change information. * * This function alters the system "loops_per_jiffy" for the clock * speed change. Note that loops_per_jiffy cannot be updated on SMP * systems as each CPU might be scaled differently. So, use the arch * per-CPU loops_per_jiffy value wherever possible.
*/ staticvoid adjust_jiffies(unsignedlong val, struct cpufreq_freqs *ci)
{ #ifndef CONFIG_SMP staticunsignedlong l_p_j_ref; staticunsignedint l_p_j_ref_freq;
if (ci->flags & CPUFREQ_CONST_LOOPS) return;
if (!l_p_j_ref_freq) {
l_p_j_ref = loops_per_jiffy;
l_p_j_ref_freq = ci->old;
pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
l_p_j_ref, l_p_j_ref_freq);
} if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
ci->new);
pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
loops_per_jiffy, ci->new);
} #endif
}
/** * cpufreq_notify_transition - Notify frequency transition and adjust jiffies. * @policy: cpufreq policy to enable fast frequency switching for. * @freqs: contain details of the frequency update. * @state: set to CPUFREQ_PRECHANGE or CPUFREQ_POSTCHANGE. * * This function calls the transition notifiers and adjust_jiffies(). * * It is called twice on all CPU frequency changes that have external effects.
*/ staticvoid cpufreq_notify_transition(struct cpufreq_policy *policy, struct cpufreq_freqs *freqs, unsignedint state)
{ int cpu;
BUG_ON(irqs_disabled());
if (cpufreq_disabled()) return;
freqs->policy = policy;
freqs->flags = cpufreq_driver->flags;
pr_debug("notification %u of frequency transition to %u kHz\n",
state, freqs->new);
switch (state) { case CPUFREQ_PRECHANGE: /* * Detect if the driver reported a value as "old frequency" * which is not equal to what the cpufreq core thinks is * "old frequency".
*/ if (policy->cur && policy->cur != freqs->old) {
pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
freqs->old, policy->cur);
freqs->old = policy->cur;
}
/* Do post notifications when there are chances that transition has failed */ staticvoid cpufreq_notify_post_transition(struct cpufreq_policy *policy, struct cpufreq_freqs *freqs, int transition_failed)
{
cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE); if (!transition_failed) return;
/* * Catch double invocations of _begin() which lead to self-deadlock. * ASYNC_NOTIFICATION drivers are left out because the cpufreq core * doesn't invoke _begin() on their behalf, and hence the chances of * double invocations are very low. Moreover, there are scenarios * where these checks can emit false-positive warnings in these * drivers; so we avoid that by skipping them altogether.
*/
WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
&& current == policy->transition_task);
/* * Fast frequency switching status count. Positive means "enabled", negative * means "disabled" and 0 means "not decided yet".
*/ staticint cpufreq_fast_switch_count; static DEFINE_MUTEX(cpufreq_fast_switch_lock);
/** * cpufreq_enable_fast_switch - Enable fast frequency switching for policy. * @policy: cpufreq policy to enable fast frequency switching for. * * Try to enable fast frequency switching for @policy. * * The attempt will fail if there is at least one transition notifier registered * at this point, as fast frequency switching is quite fundamentally at odds * with transition notifiers. Thus if successful, it will make registration of * transition notifiers fail going forward.
*/ void cpufreq_enable_fast_switch(struct cpufreq_policy *policy)
{
lockdep_assert_held(&policy->rwsem);
if (!policy->fast_switch_possible) return;
mutex_lock(&cpufreq_fast_switch_lock); if (cpufreq_fast_switch_count >= 0) {
cpufreq_fast_switch_count++;
policy->fast_switch_enabled = true;
} else {
pr_warn("CPU%u: Fast frequency switching not enabled\n",
policy->cpu);
cpufreq_list_transition_notifiers();
}
mutex_unlock(&cpufreq_fast_switch_lock);
}
EXPORT_SYMBOL_GPL(cpufreq_enable_fast_switch);
/** * cpufreq_disable_fast_switch - Disable fast frequency switching for policy. * @policy: cpufreq policy to disable fast frequency switching for.
*/ void cpufreq_disable_fast_switch(struct cpufreq_policy *policy)
{
mutex_lock(&cpufreq_fast_switch_lock); if (policy->fast_switch_enabled) {
policy->fast_switch_enabled = false; if (!WARN_ON(cpufreq_fast_switch_count <= 0))
cpufreq_fast_switch_count--;
}
mutex_unlock(&cpufreq_fast_switch_lock);
}
EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
/** * cpufreq_driver_resolve_freq - Map a target frequency to a driver-supported * one. * @policy: associated policy to interrogate * @target_freq: target frequency to resolve. * * The target to driver frequency mapping is cached in the policy. * * Return: Lowest driver-supported frequency greater than or equal to the * given target_freq, subject to policy (min/max) and driver limitations.
*/ unsignedint cpufreq_driver_resolve_freq(struct cpufreq_policy *policy, unsignedint target_freq)
{ unsignedint min = READ_ONCE(policy->min); unsignedint max = READ_ONCE(policy->max);
/* * If this function runs in parallel with cpufreq_set_policy(), it may * read policy->min before the update and policy->max after the update * or the other way around, so there is no ordering guarantee. * * Resolve this by always honoring the max (in case it comes from * thermal throttling or similar).
*/ if (unlikely(min > max))
min = max;
mutex_lock(&cpufreq_governor_mutex);
t = find_governor(str_governor); if (!t) goto unlock;
if (!try_module_get(t->owner))
t = NULL;
unlock:
mutex_unlock(&cpufreq_governor_mutex);
return t;
}
staticunsignedint cpufreq_parse_policy(char *str_governor)
{ if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) return CPUFREQ_POLICY_PERFORMANCE;
if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN)) return CPUFREQ_POLICY_POWERSAVE;
return CPUFREQ_POLICY_UNKNOWN;
}
/** * cpufreq_parse_governor - parse a governor string only for has_target() * @str_governor: Governor name.
*/ staticstruct cpufreq_governor *cpufreq_parse_governor(char *str_governor)
{ struct cpufreq_governor *t;
t = get_governor(str_governor); if (t) return t;
if (request_module("cpufreq_%s", str_governor)) return NULL;
return get_governor(str_governor);
}
/* * cpufreq_per_cpu_attr_read() / show_##file_name() - * print out cpufreq information * * Write out information from cpufreq_driver->policy[cpu]; object must be * "unsigned int".
*/
/* * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
*/ static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy, char *buf)
{ unsignedint cur_freq = __cpufreq_get(policy);
if (cur_freq) return sysfs_emit(buf, "%u\n", cur_freq);
return sysfs_emit(buf, "\n");
}
/* * show_cpuinfo_avg_freq - average CPU frequency as detected by hardware
*/ static ssize_t show_cpuinfo_avg_freq(struct cpufreq_policy *policy, char *buf)
{ int avg_freq = arch_freq_get_on_cpu(policy->cpu);
/* * show_scaling_governor - show the current policy for the specified CPU
*/ static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
{ if (policy->policy == CPUFREQ_POLICY_POWERSAVE) return sysfs_emit(buf, "powersave\n"); elseif (policy->policy == CPUFREQ_POLICY_PERFORMANCE) return sysfs_emit(buf, "performance\n"); elseif (policy->governor) return sysfs_emit(buf, "%s\n", policy->governor->name); return -EINVAL;
}
/* * store_scaling_governor - store policy for the specified CPU
*/ static ssize_t store_scaling_governor(struct cpufreq_policy *policy, constchar *buf, size_t count)
{ char str_governor[CPUFREQ_NAME_LEN]; int ret;
ret = sscanf(buf, "%15s", str_governor); if (ret != 1) return -EINVAL;
if (cpufreq_driver->setpolicy) { unsignedint new_pol;
new_pol = cpufreq_parse_policy(str_governor); if (!new_pol) return -EINVAL;
for_each_cpu(cpu, mask) {
i += sysfs_emit_at(buf, i, "%u ", cpu); if (i >= (PAGE_SIZE - 5)) break;
}
/* Remove the extra space at the end */
i--;
i += sysfs_emit_at(buf, i, "\n"); return i;
}
EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
/* * show_related_cpus - show the CPUs affected by each transition even if * hw coordination is in use
*/ static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
{ return cpufreq_show_cpus(policy->related_cpus, buf);
}
/* * show_affected_cpus - show the CPUs affected by each transition
*/ static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
{ return cpufreq_show_cpus(policy->cpus, buf);
}
staticint cpufreq_add_dev_interface(struct cpufreq_policy *policy)
{ struct freq_attr **drv_attr; int ret = 0;
/* Attributes that need freq_table */ if (policy->freq_table) {
ret = sysfs_create_file(&policy->kobj,
&cpufreq_freq_attr_scaling_available_freqs.attr); if (ret) return ret;
if (cpufreq_boost_supported()) {
ret = sysfs_create_file(&policy->kobj,
&cpufreq_freq_attr_scaling_boost_freqs.attr); if (ret) return ret;
}
}
/* set up files for this cpu device */
drv_attr = cpufreq_driver->attr; while (drv_attr && *drv_attr) {
ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr)); if (ret) return ret;
drv_attr++;
} if (cpufreq_driver->get) {
ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr); if (ret) return ret;
}
if (cpufreq_avg_freq_supported(policy)) {
ret = sysfs_create_file(&policy->kobj, &cpuinfo_avg_freq.attr); if (ret) return ret;
}
if (cpufreq_driver->bios_limit) {
ret = sysfs_create_file(&policy->kobj, &bios_limit.attr); if (ret) return ret;
}
if (cpufreq_boost_supported()) {
ret = sysfs_create_file(&policy->kobj, &local_boost.attr); if (ret) return ret;
}
return 0;
}
staticint cpufreq_init_policy(struct cpufreq_policy *policy)
{ struct cpufreq_governor *gov = NULL; unsignedint pol = CPUFREQ_POLICY_UNKNOWN; int ret;
if (has_target()) { /* Update policy governor to the one used before hotplug. */
gov = get_governor(policy->last_governor); if (gov) {
pr_debug("Restoring governor %s for cpu %d\n",
gov->name, policy->cpu);
} else {
gov = get_governor(default_governor);
}
if (!gov) {
gov = cpufreq_default_governor();
__module_get(gov->owner);
}
} else {
/* Use the default policy if there is no last_policy. */ if (policy->last_policy) {
pol = policy->last_policy;
} else {
pol = cpufreq_parse_policy(default_governor); /* * In case the default governor is neither "performance" * nor "powersave", fall back to the initial policy * value set by the driver.
*/ if (pol == CPUFREQ_POLICY_UNKNOWN)
pol = policy->policy;
} if (pol != CPUFREQ_POLICY_PERFORMANCE &&
pol != CPUFREQ_POLICY_POWERSAVE) return -ENODATA;
}
ret = cpufreq_set_policy(policy, gov, pol); if (gov)
module_put(gov->owner);
return ret;
}
staticint cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsignedint cpu)
{ int ret = 0;
/* Has this CPU been taken care of already? */ if (cpumask_test_cpu(cpu, policy->cpus)) return 0;
guard(cpufreq_policy_write)(policy);
if (has_target())
cpufreq_stop_governor(policy);
cpumask_set_cpu(cpu, policy->cpus);
if (has_target()) {
ret = cpufreq_start_governor(policy); if (ret)
pr_err("%s: Failed to start governor\n", __func__);
}
return ret;
}
void refresh_frequency_limits(struct cpufreq_policy *policy)
{ if (!policy_is_inactive(policy)) {
pr_debug("updating policy for CPU %u\n", policy->cpu);
/* * We need to make sure that the underlying kobj is * actually not referenced anymore by anybody before we * proceed with unloading.
*/
pr_debug("waiting for dropping of refcount\n");
wait_for_completion(cmp);
pr_debug("wait complete\n");
}
policy = kzalloc(sizeof(*policy), GFP_KERNEL); if (!policy) return NULL;
if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL)) goto err_free_policy;
if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) goto err_free_cpumask;
if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL)) goto err_free_rcpumask;
init_completion(&policy->kobj_unregister);
ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
cpufreq_global_kobject, "policy%u", cpu); if (ret) {
dev_err(dev, "%s: failed to init policy->kobj: %d\n", __func__, ret); /* * The entire policy object will be freed below, but the extra * memory allocated for the kobject name needs to be freed by * releasing the kobject.
*/
kobject_put(&policy->kobj); goto err_free_real_cpus;
}
staticvoid cpufreq_policy_free(struct cpufreq_policy *policy)
{ unsignedlong flags; int cpu;
/* * The callers must ensure the policy is inactive by now, to avoid any * races with show()/store() callbacks.
*/ if (unlikely(!policy_is_inactive(policy)))
pr_warn("%s: Freeing active policy\n", __func__);
/* Remove policy from list */
write_lock_irqsave(&cpufreq_driver_lock, flags);
list_del(&policy->policy_list);
/* Cancel any pending policy->update work before freeing the policy. */
cancel_work_sync(&policy->update);
if (policy->max_freq_req) { /* * Remove max_freq_req after sending CPUFREQ_REMOVE_POLICY * notification, since CPUFREQ_CREATE_POLICY notification was * sent after adding max_freq_req earlier.
*/
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_REMOVE_POLICY, policy);
freq_qos_remove_request(policy->max_freq_req);
}
if (!new_policy && cpufreq_driver->online) { /* Recover policy->cpus using related_cpus */
cpumask_copy(policy->cpus, policy->related_cpus);
ret = cpufreq_driver->online(policy); if (ret) {
pr_debug("%s: %d: initialization failed\n", __func__,
__LINE__); goto out_exit_policy;
}
} else {
cpumask_copy(policy->cpus, cpumask_of(cpu));
/* * Call driver. From then on the cpufreq must be able * to accept all calls to ->verify and ->setpolicy for this CPU.
*/
ret = cpufreq_driver->init(policy); if (ret) {
pr_debug("%s: %d: initialization failed\n", __func__,
__LINE__); goto out_clear_policy;
}
/* * The initialization has succeeded and the policy is online. * If there is a problem with its frequency table, take it * offline and drop it.
*/
ret = cpufreq_table_validate_and_sort(policy); if (ret) goto out_offline_policy;
/* related_cpus should at least include policy->cpus. */
cpumask_copy(policy->related_cpus, policy->cpus);
}
/* * affected cpus must always be the one, which are online. We aren't * managing offline cpus here.
*/
cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
policy->min_freq_req = kzalloc(2 * sizeof(*policy->min_freq_req),
GFP_KERNEL); if (!policy->min_freq_req) {
ret = -ENOMEM; goto out_destroy_policy;
}
ret = freq_qos_add_request(&policy->constraints,
policy->min_freq_req, FREQ_QOS_MIN,
FREQ_QOS_MIN_DEFAULT_VALUE); if (ret < 0) { /* * So we don't call freq_qos_remove_request() for an * uninitialized request.
*/
kfree(policy->min_freq_req);
policy->min_freq_req = NULL; goto out_destroy_policy;
}
/* * This must be initialized right here to avoid calling * freq_qos_remove_request() on uninitialized request in case * of errors.
*/
policy->max_freq_req = policy->min_freq_req + 1;
ret = freq_qos_add_request(&policy->constraints,
policy->max_freq_req, FREQ_QOS_MAX,
FREQ_QOS_MAX_DEFAULT_VALUE); if (ret < 0) {
policy->max_freq_req = NULL; goto out_destroy_policy;
}
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_CREATE_POLICY, policy);
} else {
ret = freq_qos_update_request(policy->max_freq_req, policy->max); if (ret < 0) goto out_destroy_policy;
}
if (cpufreq_driver->get && has_target()) {
policy->cur = cpufreq_driver->get(policy->cpu); if (!policy->cur) {
ret = -EIO;
pr_err("%s: ->get() failed\n", __func__); goto out_destroy_policy;
}
}
/* * Sometimes boot loaders set CPU frequency to a value outside of * frequency table present with cpufreq core. In such cases CPU might be * unstable if it has to run on that frequency for long duration of time * and so its better to set it to a frequency which is specified in * freq-table. This also makes cpufreq stats inconsistent as * cpufreq-stats would fail to register because current frequency of CPU * isn't found in freq-table. * * Because we don't want this change to effect boot process badly, we go * for the next freq which is >= policy->cur ('cur' must be set by now, * otherwise we will end up setting freq to lowest of the table as 'cur' * is initialized to zero). * * We are passing target-freq as "policy->cur - 1" otherwise * __cpufreq_driver_target() would simply fail, as policy->cur will be * equal to target-freq.
*/ if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
&& has_target()) { unsignedint old_freq = policy->cur;
/* Are we running at unknown frequency ? */
ret = cpufreq_frequency_table_get_index(policy, old_freq); if (ret == -EINVAL) {
ret = __cpufreq_driver_target(policy, old_freq - 1,
CPUFREQ_RELATION_L);
/* * Reaching here after boot in a few seconds may not * mean that system will remain stable at "unknown" * frequency for longer duration. Hence, a BUG_ON().
*/
BUG_ON(ret);
pr_info("%s: CPU%d: Running at unlisted initial frequency: %u kHz, changing to: %u kHz\n",
__func__, policy->cpu, old_freq, policy->cur);
}
}
if (new_policy) {
ret = cpufreq_add_dev_interface(policy); if (ret) goto out_destroy_policy;
/* * Register with the energy model before * em_rebuild_sched_domains() is called, which will result * in rebuilding of the sched domains, which should only be done * once the energy model is properly initialized for the policy * first. * * Also, this should be called before the policy is registered * with cooling framework.
*/ if (cpufreq_driver->register_em)
cpufreq_driver->register_em(policy);
}
ret = cpufreq_init_policy(policy); if (ret) {
pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
__func__, cpu, ret); goto out_destroy_policy;
}
/* Check if this CPU already has a policy to manage it */
policy = per_cpu(cpufreq_cpu_data, cpu); if (policy) {
WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus)); if (!policy_is_inactive(policy)) return cpufreq_add_policy_cpu(policy, cpu);
/* This is the only online CPU for the policy. Start over. */
new_policy = false;
} else {
new_policy = true;
policy = cpufreq_policy_alloc(cpu); if (!policy) return -ENOMEM;
}
ret = cpufreq_policy_online(policy, cpu, new_policy); if (ret) {
cpufreq_policy_free(policy); return ret;
}
kobject_uevent(&policy->kobj, KOBJ_ADD);
/* Callback for handling stuff after policy is ready */ if (cpufreq_driver->ready)
cpufreq_driver->ready(policy);
/* Register cpufreq cooling only for a new policy */ if (new_policy && cpufreq_thermal_control_enabled(cpufreq_driver))
policy->cdev = of_cpufreq_cooling_register(policy);
/* * Let the per-policy boost flag mirror the cpufreq_driver boost during * initialization for a new policy. For an existing policy, maintain the * previous boost value unless global boost is disabled.
*/ if (cpufreq_driver->set_boost && policy->boost_supported &&
(new_policy || !cpufreq_boost_enabled())) {
ret = policy_set_boost(policy, cpufreq_boost_enabled()); if (ret) { /* If the set_boost fails, the online operation is not affected */
pr_info("%s: CPU%d: Cannot %s BOOST\n", __func__, policy->cpu,
str_enable_disable(cpufreq_boost_enabled()));
}
}
pr_debug("initialization complete\n");
return 0;
}
/** * cpufreq_add_dev - the cpufreq interface for a CPU device. * @dev: CPU device. * @sif: Subsystem interface structure pointer (not used)
*/ staticint cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
{ struct cpufreq_policy *policy; unsigned cpu = dev->id; int ret;
if (cpu_online(cpu)) {
ret = cpufreq_online(cpu); if (ret) return ret;
}
/* Create sysfs link on CPU registration */
policy = per_cpu(cpufreq_cpu_data, cpu); if (policy)
add_cpu_dev_symlink(policy, cpu, dev);
return 0;
}
staticvoid __cpufreq_offline(unsignedint cpu, struct cpufreq_policy *policy)
{ int ret;
if (has_target())
cpufreq_stop_governor(policy);
cpumask_clear_cpu(cpu, policy->cpus);
if (!policy_is_inactive(policy)) { /* Nominate a new CPU if necessary. */ if (cpu == policy->cpu)
policy->cpu = cpumask_any(policy->cpus);
/* Start the governor again for the active policy. */ if (has_target()) {
ret = cpufreq_start_governor(policy); if (ret)
pr_err("%s: Failed to start governor\n", __func__);
}
/* * Perform the ->offline() during light-weight tear-down, as * that allows fast recovery when the CPU comes back.
*/ if (cpufreq_driver->offline) {
cpufreq_driver->offline(policy); return;
}
if (cpufreq_driver->exit)
cpufreq_driver->exit(policy);
pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
policy = cpufreq_cpu_get_raw(cpu); if (!policy) {
pr_debug("%s: No cpu_data found\n", __func__); return 0;
}
guard(cpufreq_policy_write)(policy);
__cpufreq_offline(cpu, policy);
return 0;
}
/* * cpufreq_remove_dev - remove a CPU device * * Removes the cpufreq interface for a CPU device.
*/ staticvoid cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
{ unsignedint cpu = dev->id; struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
if (!policy) return;
scoped_guard(cpufreq_policy_write, policy) { if (cpu_online(cpu))
__cpufreq_offline(cpu, policy);
remove_cpu_dev_symlink(policy, cpu, dev);
if (!cpumask_empty(policy->real_cpus)) return;
/* * Unregister cpufreq cooling once all the CPUs of the policy * are removed.
*/ if (cpufreq_thermal_control_enabled(cpufreq_driver)) {
cpufreq_cooling_unregister(policy->cdev);
policy->cdev = NULL;
}
/* We did light-weight exit earlier, do full tear down now */ if (cpufreq_driver->offline && cpufreq_driver->exit)
cpufreq_driver->exit(policy);
}
cpufreq_policy_free(policy);
}
/** * cpufreq_out_of_sync - Fix up actual and saved CPU frequency difference. * @policy: Policy managing CPUs. * @new_freq: New CPU frequency. * * Adjust to the current frequency first and clean up later by either calling * cpufreq_update_policy(), or scheduling handle_update().
*/ staticvoid cpufreq_out_of_sync(struct cpufreq_policy *policy, unsignedint new_freq)
{ struct cpufreq_freqs freqs;
pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
policy->cur, new_freq);
new_freq = cpufreq_driver->get(policy->cpu); if (!new_freq) return 0;
/* * If fast frequency switching is used with the given policy, the check * against policy->cur is pointless, so skip it in that case.
*/ if (policy->fast_switch_enabled || !has_target()) return new_freq;
if (policy->cur != new_freq) { /* * For some platforms, the frequency returned by hardware may be * slightly different from what is provided in the frequency * table, for example hardware may return 499 MHz instead of 500 * MHz. In such cases it is better to avoid getting into * unnecessary frequency updates.
*/ if (abs(policy->cur - new_freq) < KHZ_PER_MHZ) return policy->cur;
cpufreq_out_of_sync(policy, new_freq); if (update)
schedule_work(&policy->update);
}
return new_freq;
}
/** * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur * @cpu: CPU number * * This is the last known freq, without actually getting it from the driver. * Return value will be same as what is shown in scaling_cur_freq in sysfs.
*/ unsignedint cpufreq_quick_get(unsignedint cpu)
{ struct cpufreq_policy *policy __free(put_cpufreq_policy) = NULL; unsignedlong flags;
policy = cpufreq_cpu_get(cpu); if (policy) return policy->cur;
return 0;
}
EXPORT_SYMBOL(cpufreq_quick_get);
/** * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU * @cpu: CPU number * * Just return the max possible frequency for a given CPU.
*/ unsignedint cpufreq_quick_get_max(unsignedint cpu)
{ struct cpufreq_policy *policy __free(put_cpufreq_policy);
policy = cpufreq_cpu_get(cpu); if (policy) return policy->max;
return 0;
}
EXPORT_SYMBOL(cpufreq_quick_get_max);
/** * cpufreq_get_hw_max_freq - get the max hardware frequency of the CPU * @cpu: CPU number * * The default return value is the max_freq field of cpuinfo.
*/
__weak unsignedint cpufreq_get_hw_max_freq(unsignedint cpu)
{ struct cpufreq_policy *policy __free(put_cpufreq_policy);
policy = cpufreq_cpu_get(cpu); if (policy) return policy->cpuinfo.max_freq;
/** * cpufreq_get - get the current CPU frequency (in kHz) * @cpu: CPU number * * Get the CPU current (static) CPU frequency
*/ unsignedint cpufreq_get(unsignedint cpu)
{ struct cpufreq_policy *policy __free(put_cpufreq_policy);
policy = cpufreq_cpu_get(cpu); if (!policy) return 0;
/* * In case platform wants some specific frequency to be configured * during suspend..
*/ int cpufreq_generic_suspend(struct cpufreq_policy *policy)
{ int ret;
if (!policy->suspend_freq) {
pr_debug("%s: suspend_freq not defined\n", __func__); return 0;
}
ret = __cpufreq_driver_target(policy, policy->suspend_freq,
CPUFREQ_RELATION_H); if (ret)
pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
__func__, policy->suspend_freq, ret);
/** * cpufreq_suspend() - Suspend CPUFreq governors. * * Called during system wide Suspend/Hibernate cycles for suspending governors * as some platforms can't change frequency after this point in suspend cycle. * Because some of the devices (like: i2c, regulators, etc) they use for * changing frequency are suspended quickly after this point.
*/ void cpufreq_suspend(void)
{ struct cpufreq_policy *policy;
if (!cpufreq_driver) return;
if (!has_target() && !cpufreq_driver->suspend) goto suspend;
if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy))
pr_err("%s: Failed to suspend driver: %s\n", __func__,
cpufreq_driver->name);
}
suspend:
cpufreq_suspended = true;
}
/** * cpufreq_resume() - Resume CPUFreq governors. * * Called during system wide Suspend/Hibernate cycle for resuming governors that * are suspended with cpufreq_suspend().
*/ void cpufreq_resume(void)
{ struct cpufreq_policy *policy; int ret;
if (!cpufreq_driver) return;
if (unlikely(!cpufreq_suspended)) return;
cpufreq_suspended = false;
if (!has_target() && !cpufreq_driver->resume) return;
pr_debug("%s: Resuming Governors\n", __func__);
for_each_active_policy(policy) { if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) {
pr_err("%s: Failed to resume driver: %s\n", __func__,
cpufreq_driver->name);
} elseif (has_target()) {
scoped_guard(cpufreq_policy_write, policy) {
ret = cpufreq_start_governor(policy);
}
if (ret)
pr_err("%s: Failed to start governor for CPU%u's policy\n",
__func__, policy->cpu);
}
}
}
/** * cpufreq_driver_test_flags - Test cpufreq driver's flags against given ones. * @flags: Flags to test against the current cpufreq driver's flags. * * Assumes that the driver is there, so callers must ensure that this is the * case.
*/ bool cpufreq_driver_test_flags(u16 flags)
{ return !!(cpufreq_driver->flags & flags);
}
/** * cpufreq_get_current_driver - Return the current driver's name. * * Return the name string of the currently registered cpufreq driver or NULL if * none.
*/ constchar *cpufreq_get_current_driver(void)
{ if (cpufreq_driver) return cpufreq_driver->name;
/** * cpufreq_get_driver_data - Return current driver data. * * Return the private data of the currently registered cpufreq driver, or NULL * if no cpufreq driver has been registered.
*/ void *cpufreq_get_driver_data(void)
{ if (cpufreq_driver) return cpufreq_driver->driver_data;
/** * cpufreq_register_notifier - Register a notifier with cpufreq. * @nb: notifier function to register. * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER. * * Add a notifier to one of two lists: either a list of notifiers that run on * clock rate changes (once before and once after every transition), or a list * of notifiers that ron on cpufreq policy changes. * * This function may sleep and it has the same return values as * blocking_notifier_chain_register().
*/ int cpufreq_register_notifier(struct notifier_block *nb, unsignedint list)
{ int ret;
if (cpufreq_disabled()) return -EINVAL;
switch (list) { case CPUFREQ_TRANSITION_NOTIFIER:
mutex_lock(&cpufreq_fast_switch_lock);
if (cpufreq_fast_switch_count > 0) {
mutex_unlock(&cpufreq_fast_switch_lock); return -EBUSY;
}
ret = srcu_notifier_chain_register(
&cpufreq_transition_notifier_list, nb); if (!ret)
cpufreq_fast_switch_count--;
mutex_unlock(&cpufreq_fast_switch_lock); break; case CPUFREQ_POLICY_NOTIFIER:
ret = blocking_notifier_chain_register(
&cpufreq_policy_notifier_list, nb); break; default:
ret = -EINVAL;
}
/** * cpufreq_unregister_notifier - Unregister a notifier from cpufreq. * @nb: notifier block to be unregistered. * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER. * * Remove a notifier from one of the cpufreq notifier lists. * * This function may sleep and it has the same return values as * blocking_notifier_chain_unregister().
*/ int cpufreq_unregister_notifier(struct notifier_block *nb, unsignedint list)
{ int ret;
if (cpufreq_disabled()) return -EINVAL;
switch (list) { case CPUFREQ_TRANSITION_NOTIFIER:
mutex_lock(&cpufreq_fast_switch_lock);
ret = srcu_notifier_chain_unregister(
&cpufreq_transition_notifier_list, nb); if (!ret && !WARN_ON(cpufreq_fast_switch_count >= 0))
cpufreq_fast_switch_count++;
mutex_unlock(&cpufreq_fast_switch_lock); break; case CPUFREQ_POLICY_NOTIFIER:
ret = blocking_notifier_chain_unregister(
&cpufreq_policy_notifier_list, nb); break; default:
ret = -EINVAL;
}
/** * cpufreq_driver_fast_switch - Carry out a fast CPU frequency switch. * @policy: cpufreq policy to switch the frequency for. * @target_freq: New frequency to set (may be approximate). * * Carry out a fast frequency switch without sleeping. * * The driver's ->fast_switch() callback invoked by this function must be * suitable for being called from within RCU-sched read-side critical sections * and it is expected to select the minimum available frequency greater than or * equal to @target_freq (CPUFREQ_RELATION_L). * * This function must not be called if policy->fast_switch_enabled is unset. * * Governors calling this function must guarantee that it will never be invoked * twice in parallel for the same policy and that it will never be called in * parallel with either ->target() or ->target_index() for the same policy. * * Returns the actual frequency set for the CPU. * * If 0 is returned by the driver's ->fast_switch() callback to indicate an * error condition, the hardware configuration must be preserved.
*/ unsignedint cpufreq_driver_fast_switch(struct cpufreq_policy *policy, unsignedint target_freq)
{ unsignedint freq; int cpu;
/** * cpufreq_driver_adjust_perf - Adjust CPU performance level in one go. * @cpu: Target CPU. * @min_perf: Minimum (required) performance level (units of @capacity). * @target_perf: Target (desired) performance level (units of @capacity). * @capacity: Capacity of the target CPU. * * Carry out a fast performance level switch of @cpu without sleeping. * * The driver's ->adjust_perf() callback invoked by this function must be * suitable for being called from within RCU-sched read-side critical sections * and it is expected to select a suitable performance level equal to or above * @min_perf and preferably equal to or below @target_perf. * * This function must not be called if policy->fast_switch_enabled is unset. * * Governors calling this function must guarantee that it will never be invoked * twice in parallel for the same CPU and that it will never be called in * parallel with either ->target() or ->target_index() or ->fast_switch() for * the same CPU.
*/ void cpufreq_driver_adjust_perf(unsignedint cpu, unsignedlong min_perf, unsignedlong target_perf, unsignedlong capacity)
{
cpufreq_driver->adjust_perf(cpu, min_perf, target_perf, capacity);
}
/** * cpufreq_driver_has_adjust_perf - Check "direct fast switch" callback. * * Return 'true' if the ->adjust_perf callback is present for the * current driver or 'false' otherwise.
*/ bool cpufreq_driver_has_adjust_perf(void)
{ return !!cpufreq_driver->adjust_perf;
}
/* Must set freqs->new to intermediate frequency */ staticint __target_intermediate(struct cpufreq_policy *policy, struct cpufreq_freqs *freqs, int index)
{ int ret;
retval = cpufreq_driver->target_index(policy, index); if (retval)
pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
retval);
if (notify) {
cpufreq_freq_transition_end(policy, &freqs, retval);
/* * Failed after setting to intermediate freq? Driver should have * reverted back to initial frequency and so should we. Check * here for intermediate_freq instead of get_intermediate, in * case we haven't switched to intermediate freq at all.
*/ if (unlikely(retval && intermediate_freq)) {
freqs.old = intermediate_freq;
freqs.new = restore_freq;
cpufreq_freq_transition_begin(policy, &freqs);
cpufreq_freq_transition_end(policy, &freqs, 0);
}
}
pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
policy->cpu, target_freq, relation, old_target_freq);
/* * This might look like a redundant call as we are checking it again * after finding index. But it is left intentionally for cases where * exactly same freq is called again and so we can save on few function * calls.
*/ if (target_freq == policy->cur &&
!(cpufreq_driver->flags & CPUFREQ_NEED_UPDATE_LIMITS)) return 0;
if (cpufreq_driver->target) { /* * If the driver hasn't setup a single inefficient frequency, * it's unlikely it knows how to decode CPUFREQ_RELATION_E.
*/ if (!policy->efficiencies_available)
relation &= ~CPUFREQ_RELATION_E;
staticint cpufreq_init_governor(struct cpufreq_policy *policy)
{ int ret;
/* Don't start any governor operations if we are entering suspend */ if (cpufreq_suspended) return 0; /* * Governor might not be initiated here if ACPI _PPC changed * notification happened, so check it.
*/ if (!policy->governor) return -EINVAL;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.