/********************************************************************* * CPUFREQ INTERFACE *
*********************************************************************/ /* * Frequency values here are CPU kHz * * Maximum transition latency is in nanoseconds - if it's unknown, * CPUFREQ_ETERNAL shall be used.
*/
#define CPUFREQ_NAME_LEN 16 /* Print length for names. Extra 1 space for accommodating '\n' in prints */ #define CPUFREQ_NAME_PLEN (CPUFREQ_NAME_LEN + 1)
unsignedint shared_type; /* ACPI: ANY or ALL affected CPUs
should set cpufreq */ unsignedint cpu; /* cpu managing this policy, must be online */
struct clk *clk; struct cpufreq_cpuinfo cpuinfo;/* see above */
unsignedint min; /* in kHz */ unsignedint max; /* in kHz */ unsignedint cur; /* in kHz, only needed if cpufreq
* governors are used */ unsignedint suspend_freq; /* freq to set during suspend */
unsignedint policy; /* see above */ unsignedint last_policy; /* policy before unplug */ struct cpufreq_governor *governor; /* see below */ void *governor_data; char last_governor[CPUFREQ_NAME_LEN]; /* last governor used */
struct work_struct update; /* if update_policy() needs to be
* called, but you're in IRQ context */
/* * The rules for this semaphore: * - Any routine that wants to read from the policy structure will * do a down_read on this semaphore. * - Any routine that will write to the policy structure and/or may take away * the policy altogether (eg. CPU hotplug), will hold this lock in write * mode before doing so.
*/ struct rw_semaphore rwsem;
/* * Fast switch flags: * - fast_switch_possible should be set by the driver if it can * guarantee that frequency can be changed on any CPU sharing the * policy and that the change will affect all of the policy CPUs then. * - fast_switch_enabled is to be set by governors that support fast * frequency switching with the help of cpufreq_enable_fast_switch().
*/ bool fast_switch_possible; bool fast_switch_enabled;
/* * Set if the CPUFREQ_GOV_STRICT_TARGET flag is set for the current * governor.
*/ bool strict_target;
/* * Set if inefficient frequencies were found in the frequency table. * This indicates if the relation flag CPUFREQ_RELATION_E can be * honored.
*/ bool efficiencies_available;
/* * Preferred average time interval between consecutive invocations of * the driver to set the frequency for this policy. To be set by the * scaling driver (0, which is the default, means no preference).
*/ unsignedint transition_delay_us;
/* * Remote DVFS flag (Not added to the driver structure as we don't want * to access another structure from scheduler hotpath). * * Should be set if CPUs can do DVFS on behalf of other CPUs from * different cpufreq policies.
*/ bool dvfs_possible_from_any_cpu;
/* Per policy boost enabled flag. */ bool boost_enabled;
/* Per policy boost supported flag. */ bool boost_supported;
/* Cached frequency lookup from cpufreq_driver_resolve_freq. */ unsignedint cached_target_freq; unsignedint cached_resolved_idx;
/* Synchronization for frequency transitions */ bool transition_ongoing; /* Tracks transition status */
spinlock_t transition_lock;
wait_queue_head_t transition_wait; struct task_struct *transition_task; /* Task which is doing the transition */
/* cpufreq-stats */ struct cpufreq_stats *stats;
/* For cpufreq driver's internal use */ void *driver_data;
/* Pointer to the cooling device if used for thermal mitigation */ struct thermal_cooling_device *cdev;
/* * Used for passing new cpufreq policy data to the cpufreq driver's ->verify() * callback for sanitization. That callback is only expected to modify the min * and max values, if necessary, and specifically it must not update the * frequency table.
*/ struct cpufreq_policy_data { struct cpufreq_cpuinfo cpuinfo; struct cpufreq_frequency_table *freq_table; unsignedint cpu; unsignedint min; /* in kHz */ unsignedint max; /* in kHz */
};
struct cpufreq_freqs { struct cpufreq_policy *policy; unsignedint old; unsignedintnew;
u8 flags; /* flags of cpufreq_driver, see below. */
};
/* Only for ACPI */ #define CPUFREQ_SHARED_TYPE_NONE (0) /* None */ #define CPUFREQ_SHARED_TYPE_HW (1) /* HW does needed coordination */ #define CPUFREQ_SHARED_TYPE_ALL (2) /* All dependent CPUs should set freq */ #define CPUFREQ_SHARED_TYPE_ANY (3) /* Freq can be set from any dependent CPU*/
#define CPUFREQ_RELATION_L 0 /* lowest frequency at or above target */ #define CPUFREQ_RELATION_H 1 /* highest frequency below or at target */ #define CPUFREQ_RELATION_C 2 /* closest frequency to target */ /* relation flags */ #define CPUFREQ_RELATION_E BIT(2) /* Get if possible an efficient frequency */
/* needed by all drivers */ int (*init)(struct cpufreq_policy *policy); int (*verify)(struct cpufreq_policy_data *policy);
/* define one out of two */ int (*setpolicy)(struct cpufreq_policy *policy);
int (*target)(struct cpufreq_policy *policy, unsignedint target_freq, unsignedint relation); /* Deprecated */ int (*target_index)(struct cpufreq_policy *policy, unsignedint index); unsignedint (*fast_switch)(struct cpufreq_policy *policy, unsignedint target_freq); /* * ->fast_switch() replacement for drivers that use an internal * representation of performance levels and can pass hints other than * the target performance level to the hardware. This can only be set * if ->fast_switch is set too, because in those cases (under specific * conditions) scale invariance can be disabled, which causes the * schedutil governor to fall back to the latter.
*/ void (*adjust_perf)(unsignedint cpu, unsignedlong min_perf, unsignedlong target_perf, unsignedlong capacity);
/* * Only for drivers with target_index() and CPUFREQ_ASYNC_NOTIFICATION * unset. * * get_intermediate should return a stable intermediate frequency * platform wants to switch to and target_intermediate() should set CPU * to that frequency, before jumping to the frequency corresponding * to 'index'. Core will take care of sending notifications and driver * doesn't have to handle them in target_intermediate() or * target_index(). * * Drivers can return '0' from get_intermediate() in case they don't * wish to switch to intermediate frequency for some target frequency. * In that case core will directly call ->target_index().
*/ unsignedint (*get_intermediate)(struct cpufreq_policy *policy, unsignedint index); int (*target_intermediate)(struct cpufreq_policy *policy, unsignedint index);
/* should be defined, if possible, return 0 on error */ unsignedint (*get)(unsignedint cpu);
/* Called to update policy limits on firmware notifications. */ void (*update_limits)(struct cpufreq_policy *policy);
/* optional */ int (*bios_limit)(int cpu, unsignedint *limit);
int (*online)(struct cpufreq_policy *policy); int (*offline)(struct cpufreq_policy *policy); void (*exit)(struct cpufreq_policy *policy); int (*suspend)(struct cpufreq_policy *policy); int (*resume)(struct cpufreq_policy *policy);
/* Will be called after the driver is fully initialized */ void (*ready)(struct cpufreq_policy *policy);
struct freq_attr **attr;
/* platform specific boost support code */ bool boost_enabled; int (*set_boost)(struct cpufreq_policy *policy, int state);
/* * Set by drivers that want to register with the energy model after the * policy is properly initialized, but before the governor is started.
*/ void (*register_em)(struct cpufreq_policy *policy);
};
/* flags */
/* * Set by drivers that need to update internal upper and lower boundaries along * with the target frequency and so the core and governors should also invoke * the diver if the target frequency does not change, but the policy min or max * may have changed.
*/ #define CPUFREQ_NEED_UPDATE_LIMITS BIT(0)
/* loops_per_jiffy or other kernel "constants" aren't affected by frequency transitions */ #define CPUFREQ_CONST_LOOPS BIT(1)
/* * Set by drivers that want the core to automatically register the cpufreq * driver as a thermal cooling device.
*/ #define CPUFREQ_IS_COOLING_DEV BIT(2)
/* * This should be set by platforms having multiple clock-domains, i.e. * supporting multiple policies. With this sysfs directories of governor would * be created in cpu/cpu<num>/cpufreq/ directory and so they can use the same * governor with different tunables for different clusters.
*/ #define CPUFREQ_HAVE_GOVERNOR_PER_POLICY BIT(3)
/* * Driver will do POSTCHANGE notifications from outside of their ->target() * routine and so must set cpufreq_driver->flags with this flag, so that core * can handle them specially.
*/ #define CPUFREQ_ASYNC_NOTIFICATION BIT(4)
/* * Set by drivers which want cpufreq core to check if CPU is running at a * frequency present in freq-table exposed by the driver. For these drivers if * CPU is found running at an out of table freq, we will try to set it to a freq * from the table. And if that fails, we will stop further boot process by * issuing a BUG_ON().
*/ #define CPUFREQ_NEED_INITIAL_FREQ_CHECK BIT(5)
/* * Set by drivers to disallow use of governors with "dynamic_switching" flag * set.
*/ #define CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING BIT(6)
int cpufreq_register_driver(struct cpufreq_driver *driver_data); void cpufreq_unregister_driver(struct cpufreq_driver *driver_data);
#define CPUFREQ_POLICY_UNKNOWN (0) /* * If (cpufreq_driver->target) exists, the ->governor decides what frequency * within the limits is used. If (cpufreq_driver->setpolicy> exists, these * two generic policies are available:
*/ #define CPUFREQ_POLICY_POWERSAVE (1) #define CPUFREQ_POLICY_PERFORMANCE (2)
/********************************************************************* * FREQUENCY TABLE HELPERS *
*********************************************************************/
/* Special Values of .frequency field */ #define CPUFREQ_ENTRY_INVALID ~0u #define CPUFREQ_TABLE_END ~1u /* Special Values of .flags field */ #define CPUFREQ_BOOST_FREQ (1 << 0) #define CPUFREQ_INEFFICIENT_FREQ (1 << 1)
struct cpufreq_frequency_table { unsignedint flags; unsignedint driver_data; /* driver specific data, not used by core */ unsignedint frequency; /* kHz - doesn't need to be in ascending
* order */
};
/* * cpufreq_for_each_entry - iterate over a cpufreq_frequency_table * @pos: the cpufreq_frequency_table * to use as a loop cursor. * @table: the cpufreq_frequency_table * to iterate over.
*/
/* * cpufreq_for_each_entry_idx - iterate over a cpufreq_frequency_table * with index * @pos: the cpufreq_frequency_table * to use as a loop cursor. * @table: the cpufreq_frequency_table * to iterate over. * @idx: the table entry currently being processed
*/
/* * cpufreq_for_each_valid_entry - iterate over a cpufreq_frequency_table * excluding CPUFREQ_ENTRY_INVALID frequencies. * @pos: the cpufreq_frequency_table * to use as a loop cursor. * @table: the cpufreq_frequency_table * to iterate over.
*/
/* * cpufreq_for_each_valid_entry_idx - iterate with index over a cpufreq * frequency_table excluding CPUFREQ_ENTRY_INVALID frequencies. * @pos: the cpufreq_frequency_table * to use as a loop cursor. * @table: the cpufreq_frequency_table * to iterate over. * @idx: the table entry currently being processed
*/
/** * cpufreq_for_each_efficient_entry_idx - iterate with index over a cpufreq * frequency_table excluding CPUFREQ_ENTRY_INVALID and * CPUFREQ_INEFFICIENT_FREQ frequencies. * @pos: the &struct cpufreq_frequency_table to use as a loop cursor. * @table: the &struct cpufreq_frequency_table to iterate over. * @idx: the table entry currently being processed. * @efficiencies: set to true to only iterate over efficient frequencies.
*/
#ifdef CONFIG_CPU_FREQ bool cpufreq_boost_enabled(void); int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state);
/* Find lowest freq at or above target in a table in ascending order */ staticinlineint cpufreq_table_find_index_al(struct cpufreq_policy *policy, unsignedint target_freq, bool efficiencies)
{ struct cpufreq_frequency_table *table = policy->freq_table; struct cpufreq_frequency_table *pos; unsignedint freq; int idx, best = -1;
/* Find lowest freq at or above target in a table in descending order */ staticinlineint cpufreq_table_find_index_dl(struct cpufreq_policy *policy, unsignedint target_freq, bool efficiencies)
{ struct cpufreq_frequency_table *table = policy->freq_table; struct cpufreq_frequency_table *pos; unsignedint freq; int idx, best = -1;
/* Works only on sorted freq-tables */ staticinlineint cpufreq_table_find_index_l(struct cpufreq_policy *policy, unsignedint target_freq, bool efficiencies)
{ return find_index_l(policy, target_freq, policy->min, policy->max, efficiencies);
}
/* Find highest freq at or below target in a table in ascending order */ staticinlineint cpufreq_table_find_index_ah(struct cpufreq_policy *policy, unsignedint target_freq, bool efficiencies)
{ struct cpufreq_frequency_table *table = policy->freq_table; struct cpufreq_frequency_table *pos; unsignedint freq; int idx, best = -1;
/* No freq found below target_freq */ if (best == -1) return idx;
return best;
}
return best;
}
/* Find highest freq at or below target in a table in descending order */ staticinlineint cpufreq_table_find_index_dh(struct cpufreq_policy *policy, unsignedint target_freq, bool efficiencies)
{ struct cpufreq_frequency_table *table = policy->freq_table; struct cpufreq_frequency_table *pos; unsignedint freq; int idx, best = -1;
/* Limit frequency index to honor min and max */ if (!cpufreq_is_in_limits(policy, min, max, idx) && efficiencies) {
efficiencies = false; goto retry;
}
/** * cpufreq_table_set_inefficient() - Mark a frequency as inefficient * @policy: the &struct cpufreq_policy containing the inefficient frequency * @frequency: the inefficient frequency * * The &struct cpufreq_policy must use a sorted frequency table * * Return: %0 on success or a negative errno code
*/
/* the following are really really optional */ externstruct freq_attr cpufreq_freq_attr_scaling_available_freqs; externstruct freq_attr cpufreq_freq_attr_scaling_boost_freqs; int cpufreq_table_validate_and_sort(struct cpufreq_policy *policy);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.