/* * The root of the list of all opp-tables. All opp_table structures branch off * from here, with each opp_table containing the list of opps it supports in * various states of availability.
*/
LIST_HEAD(opp_tables);
/* Lock to allow exclusive modification to the device and opp lists */
DEFINE_MUTEX(opp_table_lock); /* Flag indicating that opp_tables list is being updated at the moment */ staticbool opp_tables_busy;
/* OPP ID allocator */ static DEFINE_XARRAY_ALLOC1(opp_configs);
list_for_each_entry(opp_table, &opp_tables, node) { if (_find_opp_dev(dev, opp_table)) return dev_pm_opp_get_opp_table_ref(opp_table);
}
return ERR_PTR(-ENODEV);
}
/** * _find_opp_table() - find opp_table struct using device pointer * @dev: device pointer used to lookup OPP table * * Search OPP table for one containing matching device. * * Return: pointer to 'struct opp_table' if found, otherwise -ENODEV or * -EINVAL based on type of error. * * The callers must call dev_pm_opp_put_opp_table() after the table is used.
*/ struct opp_table *_find_opp_table(struct device *dev)
{ if (IS_ERR_OR_NULL(dev)) {
pr_err("%s: Invalid parameters\n", __func__); return ERR_PTR(-EINVAL);
}
/* * Returns true if multiple clocks aren't there, else returns false with WARN. * * We don't force clk_count == 1 here as there are users who don't have a clock * representation in the OPP table and manage the clock configuration themselves * in an platform specific way.
*/ staticbool assert_single_clk(struct opp_table *opp_table, unsignedint __always_unused index)
{ return !WARN_ON(opp_table->clk_count > 1);
}
/* * Returns true if clock table is large enough to contain the clock index.
*/ staticbool assert_clk_index(struct opp_table *opp_table, unsignedint index)
{ return opp_table->clk_count > index;
}
/* * Returns true if bandwidth table is large enough to contain the bandwidth index.
*/ staticbool assert_bandwidth_index(struct opp_table *opp_table, unsignedint index)
{ return opp_table->path_count > index;
}
/** * dev_pm_opp_get_bw() - Gets the bandwidth corresponding to an opp * @opp: opp for which bandwidth has to be returned for * @peak: select peak or average bandwidth * @index: bandwidth index * * Return: bandwidth in kBps, else return 0
*/ unsignedlong dev_pm_opp_get_bw(struct dev_pm_opp *opp, bool peak, int index)
{ if (IS_ERR_OR_NULL(opp)) {
pr_err("%s: Invalid parameters\n", __func__); return 0;
}
if (index >= opp->opp_table->path_count) return 0;
/** * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp * @opp: opp for which voltage has to be returned for * * Return: voltage in micro volt corresponding to the opp, else * return 0 * * This is useful only for devices with single power supply.
*/ unsignedlong dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
{ if (IS_ERR_OR_NULL(opp)) {
pr_err("%s: Invalid parameters\n", __func__); return 0;
}
/** * dev_pm_opp_get_supplies() - Gets the supply information corresponding to an opp * @opp: opp for which voltage has to be returned for * @supplies: Placeholder for copying the supply information. * * Return: negative error number on failure, 0 otherwise on success after * setting @supplies. * * This can be used for devices with any number of power supplies. The caller * must ensure the @supplies array must contain space for each regulator.
*/ int dev_pm_opp_get_supplies(struct dev_pm_opp *opp, struct dev_pm_opp_supply *supplies)
{ if (IS_ERR_OR_NULL(opp) || !supplies) {
pr_err("%s: Invalid parameters\n", __func__); return -EINVAL;
}
/** * dev_pm_opp_get_power() - Gets the power corresponding to an opp * @opp: opp for which power has to be returned for * * Return: power in micro watt corresponding to the opp, else * return 0 * * This is useful only for devices with single power supply.
*/ unsignedlong dev_pm_opp_get_power(struct dev_pm_opp *opp)
{ unsignedlong opp_power = 0; int i;
if (IS_ERR_OR_NULL(opp)) {
pr_err("%s: Invalid parameters\n", __func__); return 0;
} for (i = 0; i < opp->opp_table->regulator_count; i++)
opp_power += opp->supplies[i].u_watt;
/** * dev_pm_opp_get_freq_indexed() - Gets the frequency corresponding to an * available opp with specified index * @opp: opp for which frequency has to be returned for * @index: index of the frequency within the required opp * * Return: frequency in hertz corresponding to the opp with specified index, * else return 0
*/ unsignedlong dev_pm_opp_get_freq_indexed(struct dev_pm_opp *opp, u32 index)
{ if (IS_ERR_OR_NULL(opp) || index >= opp->opp_table->clk_count) {
pr_err("%s: Invalid parameters\n", __func__); return 0;
}
/** * dev_pm_opp_get_level() - Gets the level corresponding to an available opp * @opp: opp for which level value has to be returned for * * Return: level read from device tree corresponding to the opp, else * return U32_MAX.
*/ unsignedint dev_pm_opp_get_level(struct dev_pm_opp *opp)
{ if (IS_ERR_OR_NULL(opp) || !opp->available) {
pr_err("%s: Invalid parameters\n", __func__); return 0;
}
/** * dev_pm_opp_get_required_pstate() - Gets the required performance state * corresponding to an available opp * @opp: opp for which performance state has to be returned for * @index: index of the required opp * * Return: performance state read from device tree corresponding to the * required opp, else return U32_MAX.
*/ unsignedint dev_pm_opp_get_required_pstate(struct dev_pm_opp *opp, unsignedint index)
{ if (IS_ERR_OR_NULL(opp) || !opp->available ||
index >= opp->opp_table->required_opp_count) {
pr_err("%s: Invalid parameters\n", __func__); return 0;
}
/* required-opps not fully initialized yet */ if (lazy_linking_pending(opp->opp_table)) return 0;
/* The required OPP table must belong to a genpd */ if (unlikely(!opp->opp_table->required_opp_tables[index]->is_genpd)) {
pr_err("%s: Performance state is only valid for genpds.\n", __func__); return 0;
}
/** * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not * @opp: opp for which turbo mode is being verified * * Turbo OPPs are not for normal use, and can be enabled (under certain * conditions) for short duration of times to finish high throughput work * quickly. Running on them for longer times may overheat the chip. * * Return: true if opp is turbo opp, else false.
*/ bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp)
{ if (IS_ERR_OR_NULL(opp) || !opp->available) {
pr_err("%s: Invalid parameters\n", __func__); returnfalse;
}
/** * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds * @dev: device for which we do this operation * * Return: This function returns the max clock latency in nanoseconds.
*/ unsignedlong dev_pm_opp_get_max_clock_latency(struct device *dev)
{ struct opp_table *opp_table __free(put_opp_table);
opp_table = _find_opp_table(dev); if (IS_ERR(opp_table)) return 0;
/** * dev_pm_opp_get_max_volt_latency() - Get max voltage latency in nanoseconds * @dev: device for which we do this operation * * Return: This function returns the max voltage latency in nanoseconds.
*/ unsignedlong dev_pm_opp_get_max_volt_latency(struct device *dev)
{ struct opp_table *opp_table __free(put_opp_table); struct dev_pm_opp *opp; struct regulator *reg; unsignedlong latency_ns = 0; int ret, i, count; struct { unsignedlong min; unsignedlong max;
} *uV;
opp_table = _find_opp_table(dev); if (IS_ERR(opp_table)) return 0;
/* Regulator may not be required for the device */ if (!opp_table->regulators) return 0;
count = opp_table->regulator_count;
uV = kmalloc_array(count, sizeof(*uV), GFP_KERNEL); if (!uV) return 0;
scoped_guard(mutex, &opp_table->lock) { for (i = 0; i < count; i++) {
uV[i].min = ~0;
uV[i].max = 0;
list_for_each_entry(opp, &opp_table->opp_list, node) { if (!opp->available) continue;
if (opp->supplies[i].u_volt_min < uV[i].min)
uV[i].min = opp->supplies[i].u_volt_min; if (opp->supplies[i].u_volt_max > uV[i].max)
uV[i].max = opp->supplies[i].u_volt_max;
}
}
}
/* * The caller needs to ensure that opp_table (and hence the regulator) * isn't freed, while we are executing this routine.
*/ for (i = 0; i < count; i++) {
reg = opp_table->regulators[i];
ret = regulator_set_voltage_time(reg, uV[i].min, uV[i].max); if (ret > 0)
latency_ns += ret * 1000;
}
/** * dev_pm_opp_get_max_transition_latency() - Get max transition latency in * nanoseconds * @dev: device for which we do this operation * * Return: This function returns the max transition latency, in nanoseconds, to * switch from one OPP to other.
*/ unsignedlong dev_pm_opp_get_max_transition_latency(struct device *dev)
{ return dev_pm_opp_get_max_volt_latency(dev) +
dev_pm_opp_get_max_clock_latency(dev);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_transition_latency);
/** * dev_pm_opp_get_suspend_opp_freq() - Get frequency of suspend opp in Hz * @dev: device for which we do this operation * * Return: This function returns the frequency of the OPP marked as suspend_opp * if one is available, else returns 0;
*/ unsignedlong dev_pm_opp_get_suspend_opp_freq(struct device *dev)
{ struct opp_table *opp_table __free(put_opp_table); unsignedlong freq = 0;
opp_table = _find_opp_table(dev); if (IS_ERR(opp_table)) return 0;
if (opp_table->suspend_opp && opp_table->suspend_opp->available)
freq = dev_pm_opp_get_freq(opp_table->suspend_opp);
int _get_opp_count(struct opp_table *opp_table)
{ struct dev_pm_opp *opp; int count = 0;
guard(mutex)(&opp_table->lock);
list_for_each_entry(opp, &opp_table->opp_list, node) { if (opp->available)
count++;
}
return count;
}
/** * dev_pm_opp_get_opp_count() - Get number of opps available in the opp table * @dev: device for which we do this operation * * Return: This function returns the number of available opps if there are any, * else returns 0 if none or the corresponding error value.
*/ int dev_pm_opp_get_opp_count(struct device *dev)
{ struct opp_table *opp_table __free(put_opp_table);
opp_table = _find_opp_table(dev); if (IS_ERR(opp_table)) {
dev_dbg(dev, "%s: OPP table not found (%ld)\n",
__func__, PTR_ERR(opp_table)); return PTR_ERR(opp_table);
}
opp_table = _find_opp_table(dev); if (IS_ERR(opp_table)) {
dev_err(dev, "%s: OPP table not found (%ld)\n", __func__,
PTR_ERR(opp_table)); return ERR_CAST(opp_table);
}
staticstruct dev_pm_opp *_find_key_exact(struct device *dev, unsignedlong key, int index, bool available, unsignedlong (*read)(struct dev_pm_opp *opp, int index), bool (*assert)(struct opp_table *opp_table, unsignedint index))
{ /* * The value of key will be updated here, but will be ignored as the * caller doesn't need it.
*/ return _find_key(dev, &key, index, available, read, _compare_exact,
assert);
}
/** * dev_pm_opp_find_freq_exact() - search for an exact frequency * @dev: device for which we do this operation * @freq: frequency to search for * @available: true/false - match for available opp * * Return: Searches for exact match in the opp table and returns pointer to the * matching opp if found, else returns ERR_PTR in case of error and should * be handled using IS_ERR. Error return values can be: * EINVAL: for bad pointer * ERANGE: no match found for search * ENODEV: if device not found in list of registered devices * * Note: available is a modifier for the search. if available=true, then the * match is for exact matching frequency and is available in the stored OPP * table. if false, the match is for exact frequency which is not available. * * This provides a mechanism to enable an opp which is not available currently * or the opposite as well. * * The callers are required to call dev_pm_opp_put() for the returned OPP after * use.
*/ struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, unsignedlong freq, bool available)
{ return _find_key_exact(dev, freq, 0, available, _read_freq,
assert_single_clk);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
/** * dev_pm_opp_find_freq_exact_indexed() - Search for an exact freq for the * clock corresponding to the index * @dev: Device for which we do this operation * @freq: frequency to search for * @index: Clock index * @available: true/false - match for available opp * * Search for the matching exact OPP for the clock corresponding to the * specified index from a starting freq for a device. * * Return: matching *opp , else returns ERR_PTR in case of error and should be * handled using IS_ERR. Error return values can be: * EINVAL: for bad pointer * ERANGE: no match found for search * ENODEV: if device not found in list of registered devices * * The callers are required to call dev_pm_opp_put() for the returned OPP after * use.
*/ struct dev_pm_opp *
dev_pm_opp_find_freq_exact_indexed(struct device *dev, unsignedlong freq,
u32 index, bool available)
{ return _find_key_exact(dev, freq, index, available, _read_freq,
assert_clk_index);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact_indexed);
/** * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq * @dev: device for which we do this operation * @freq: Start frequency * * Search for the matching ceil *available* OPP from a starting freq * for a device. * * Return: matching *opp and refreshes *freq accordingly, else returns * ERR_PTR in case of error and should be handled using IS_ERR. Error return * values can be: * EINVAL: for bad pointer * ERANGE: no match found for search * ENODEV: if device not found in list of registered devices * * The callers are required to call dev_pm_opp_put() for the returned OPP after * use.
*/ struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev, unsignedlong *freq)
{ return _find_key_ceil(dev, freq, 0, true, _read_freq, assert_single_clk);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
/** * dev_pm_opp_find_freq_ceil_indexed() - Search for a rounded ceil freq for the * clock corresponding to the index * @dev: Device for which we do this operation * @freq: Start frequency * @index: Clock index * * Search for the matching ceil *available* OPP for the clock corresponding to * the specified index from a starting freq for a device. * * Return: matching *opp and refreshes *freq accordingly, else returns * ERR_PTR in case of error and should be handled using IS_ERR. Error return * values can be: * EINVAL: for bad pointer * ERANGE: no match found for search * ENODEV: if device not found in list of registered devices * * The callers are required to call dev_pm_opp_put() for the returned OPP after * use.
*/ struct dev_pm_opp *
dev_pm_opp_find_freq_ceil_indexed(struct device *dev, unsignedlong *freq,
u32 index)
{ return _find_key_ceil(dev, freq, index, true, _read_freq,
assert_clk_index);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil_indexed);
/** * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq * @dev: device for which we do this operation * @freq: Start frequency * * Search for the matching floor *available* OPP from a starting freq * for a device. * * Return: matching *opp and refreshes *freq accordingly, else returns * ERR_PTR in case of error and should be handled using IS_ERR. Error return * values can be: * EINVAL: for bad pointer * ERANGE: no match found for search * ENODEV: if device not found in list of registered devices * * The callers are required to call dev_pm_opp_put() for the returned OPP after * use.
*/ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, unsignedlong *freq)
{ return _find_key_floor(dev, freq, 0, true, _read_freq, assert_single_clk);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
/** * dev_pm_opp_find_freq_floor_indexed() - Search for a rounded floor freq for the * clock corresponding to the index * @dev: Device for which we do this operation * @freq: Start frequency * @index: Clock index * * Search for the matching floor *available* OPP for the clock corresponding to * the specified index from a starting freq for a device. * * Return: matching *opp and refreshes *freq accordingly, else returns * ERR_PTR in case of error and should be handled using IS_ERR. Error return * values can be: * EINVAL: for bad pointer * ERANGE: no match found for search * ENODEV: if device not found in list of registered devices * * The callers are required to call dev_pm_opp_put() for the returned OPP after * use.
*/ struct dev_pm_opp *
dev_pm_opp_find_freq_floor_indexed(struct device *dev, unsignedlong *freq,
u32 index)
{ return _find_key_floor(dev, freq, index, true, _read_freq, assert_clk_index);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor_indexed);
/** * dev_pm_opp_find_level_exact() - search for an exact level * @dev: device for which we do this operation * @level: level to search for * * Return: Searches for exact match in the opp table and returns pointer to the * matching opp if found, else returns ERR_PTR in case of error and should * be handled using IS_ERR. Error return values can be: * EINVAL: for bad pointer * ERANGE: no match found for search * ENODEV: if device not found in list of registered devices * * The callers are required to call dev_pm_opp_put() for the returned OPP after * use.
*/ struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev, unsignedint level)
{ return _find_key_exact(dev, level, 0, true, _read_level, NULL);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_exact);
/** * dev_pm_opp_find_level_ceil() - search for an rounded up level * @dev: device for which we do this operation * @level: level to search for * * Return: Searches for rounded up match in the opp table and returns pointer * to the matching opp if found, else returns ERR_PTR in case of error and * should be handled using IS_ERR. Error return values can be: * EINVAL: for bad pointer * ERANGE: no match found for search * ENODEV: if device not found in list of registered devices * * The callers are required to call dev_pm_opp_put() for the returned OPP after * use.
*/ struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev, unsignedint *level)
{ unsignedlong temp = *level; struct dev_pm_opp *opp;
opp = _find_key_ceil(dev, &temp, 0, true, _read_level, NULL); if (IS_ERR(opp)) return opp;
/* False match */ if (temp == OPP_LEVEL_UNSET) {
dev_err(dev, "%s: OPP levels aren't available\n", __func__);
dev_pm_opp_put(opp); return ERR_PTR(-ENODEV);
}
/** * dev_pm_opp_find_level_floor() - Search for a rounded floor level * @dev: device for which we do this operation * @level: Start level * * Search for the matching floor *available* OPP from a starting level * for a device. * * Return: matching *opp and refreshes *level accordingly, else returns * ERR_PTR in case of error and should be handled using IS_ERR. Error return * values can be: * EINVAL: for bad pointer * ERANGE: no match found for search * ENODEV: if device not found in list of registered devices * * The callers are required to call dev_pm_opp_put() for the returned OPP after * use.
*/ struct dev_pm_opp *dev_pm_opp_find_level_floor(struct device *dev, unsignedint *level)
{ unsignedlong temp = *level; struct dev_pm_opp *opp;
/** * dev_pm_opp_find_bw_ceil() - Search for a rounded ceil bandwidth * @dev: device for which we do this operation * @bw: start bandwidth * @index: which bandwidth to compare, in case of OPPs with several values * * Search for the matching floor *available* OPP from a starting bandwidth * for a device. * * Return: matching *opp and refreshes *bw accordingly, else returns * ERR_PTR in case of error and should be handled using IS_ERR. Error return * values can be: * EINVAL: for bad pointer * ERANGE: no match found for search * ENODEV: if device not found in list of registered devices * * The callers are required to call dev_pm_opp_put() for the returned OPP after * use.
*/ struct dev_pm_opp *dev_pm_opp_find_bw_ceil(struct device *dev, unsignedint *bw, int index)
{ unsignedlong temp = *bw; struct dev_pm_opp *opp;
/** * dev_pm_opp_find_bw_floor() - Search for a rounded floor bandwidth * @dev: device for which we do this operation * @bw: start bandwidth * @index: which bandwidth to compare, in case of OPPs with several values * * Search for the matching floor *available* OPP from a starting bandwidth * for a device. * * Return: matching *opp and refreshes *bw accordingly, else returns * ERR_PTR in case of error and should be handled using IS_ERR. Error return * values can be: * EINVAL: for bad pointer * ERANGE: no match found for search * ENODEV: if device not found in list of registered devices * * The callers are required to call dev_pm_opp_put() for the returned OPP after * use.
*/ struct dev_pm_opp *dev_pm_opp_find_bw_floor(struct device *dev, unsignedint *bw, int index)
{ unsignedlong temp = *bw; struct dev_pm_opp *opp;
ret = regulator_set_voltage_triplet(reg, supply->u_volt_min,
supply->u_volt, supply->u_volt_max); if (ret)
dev_err(dev, "%s: failed to set voltage (%lu %lu %lu mV): %d\n",
__func__, supply->u_volt_min, supply->u_volt,
supply->u_volt_max, ret);
/* One of target and opp must be available */ if (target) {
freq = *target;
} elseif (opp) {
freq = opp->rates[0];
} else {
WARN_ON(1); return -EINVAL;
}
ret = clk_set_rate(opp_table->clk, freq); if (ret) {
dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
ret);
} else {
opp_table->current_rate_single_clk = freq;
}
return ret;
}
/* * Simple implementation for configuring multiple clocks. Configure clocks in * the order in which they are present in the array while scaling up.
*/ int dev_pm_opp_config_clks_simple(struct device *dev, struct opp_table *opp_table, struct dev_pm_opp *opp, void *data, bool scaling_down)
{ int ret, i;
if (scaling_down) { for (i = opp_table->clk_count - 1; i >= 0; i--) {
ret = clk_set_rate(opp_table->clks[i], opp->rates[i]); if (ret) {
dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
ret); return ret;
}
}
} else { for (i = 0; i < opp_table->clk_count; i++) {
ret = clk_set_rate(opp_table->clks[i], opp->rates[i]); if (ret) {
dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
ret); return ret;
}
}
}
/* This function only supports single regulator per device */ if (WARN_ON(count > 1)) {
dev_err(dev, "multiple regulators are not supported\n"); return -EINVAL;
}
ret = _set_opp_voltage(dev, reg, new_opp->supplies); if (ret) return ret;
/* * Enable the regulator after setting its voltages, otherwise it breaks * some boot-enabled regulators.
*/ if (unlikely(!new_opp->opp_table->enabled)) {
ret = regulator_enable(reg); if (ret < 0)
dev_warn(dev, "Failed to enable regulator: %d", ret);
}
return 0;
}
staticint _set_opp_bw(conststruct opp_table *opp_table, struct dev_pm_opp *opp, struct device *dev)
{
u32 avg, peak; int i, ret;
if (!opp_table->paths) return 0;
for (i = 0; i < opp_table->path_count; i++) { if (!opp) {
avg = 0;
peak = 0;
} else {
avg = opp->bandwidth[i].avg;
peak = opp->bandwidth[i].peak;
}
ret = icc_set_bw(opp_table->paths[i], avg, peak); if (ret) {
dev_err(dev, "Failed to %s bandwidth[%d]: %d\n",
opp ? "set" : "remove", i, ret); return ret;
}
}
return 0;
}
staticint _set_opp_level(struct device *dev, struct dev_pm_opp *opp)
{ unsignedint level = 0; int ret = 0;
if (opp) { if (opp->level == OPP_LEVEL_UNSET) return 0;
level = opp->level;
}
/* Request a new performance state through the device's PM domain. */
ret = dev_pm_domain_set_performance_state(dev, level); if (ret)
dev_err(dev, "Failed to set performance state %u (%d)\n", level,
ret);
return ret;
}
/* This is only called for PM domain for now */ staticint _set_required_opps(struct device *dev, struct opp_table *opp_table, struct dev_pm_opp *opp, bool up)
{ struct device **devs = opp_table->required_devs; struct dev_pm_opp *required_opp; int index, target, delta, ret;
if (!devs) return 0;
/* required-opps not fully initialized yet */ if (lazy_linking_pending(opp_table)) return -EBUSY;
/* Scaling up? Set required OPPs in normal order, else reverse */ if (up) {
index = 0;
target = opp_table->required_opp_count;
delta = 1;
} else {
index = opp_table->required_opp_count - 1;
target = -1;
delta = -1;
}
while (index != target) { if (devs[index]) {
required_opp = opp ? opp->required_opps[index] : NULL;
ret = _set_opp_level(devs[index], required_opp); if (ret) return ret;
}
if (!IS_ERR(opp_table->clk)) {
freq = clk_get_rate(opp_table->clk);
opp = _find_freq_ceil(opp_table, &freq);
}
/* * Unable to find the current OPP ? Pick the first from the list since * it is in ascending order, otherwise rest of the code will need to * make special checks to validate current_opp.
*/ if (IS_ERR(opp)) {
guard(mutex)(&opp_table->lock);
opp = dev_pm_opp_get(list_first_entry(&opp_table->opp_list, struct dev_pm_opp, node));
}
opp_table->current_opp = opp;
}
staticint _disable_opp_table(struct device *dev, struct opp_table *opp_table)
{ int ret;
if (!opp_table->enabled) return 0;
/* * Some drivers need to support cases where some platforms may * have OPP table for the device, while others don't and * opp_set_rate() just needs to behave like clk_set_rate().
*/ if (!_get_opp_count(opp_table)) return 0;
ret = _set_opp_bw(opp_table, NULL, dev); if (ret) return ret;
if (opp_table->regulators)
regulator_disable(opp_table->regulators[0]);
ret = _set_opp_level(dev, NULL); if (ret) goto out;
ret = _set_required_opps(dev, opp_table, NULL, false);
if (unlikely(!opp)) return _disable_opp_table(dev, opp_table);
/* Find the currently set OPP if we don't know already */ if (unlikely(!opp_table->current_opp))
_find_current_opp(dev, opp_table);
old_opp = opp_table->current_opp;
/* Return early if nothing to do */ if (!forced && old_opp == opp && opp_table->enabled) {
dev_dbg_ratelimited(dev, "%s: OPPs are same, nothing to do\n", __func__); return 0;
}
/* Scaling up? Configure required OPPs before frequency */ if (!scaling_down) {
ret = _set_required_opps(dev, opp_table, opp, true); if (ret) {
dev_err(dev, "Failed to set required opps: %d\n", ret); return ret;
}
ret = _set_opp_level(dev, opp); if (ret) return ret;
ret = _set_opp_bw(opp_table, opp, dev); if (ret) {
dev_err(dev, "Failed to set bw: %d\n", ret); return ret;
}
if (opp_table->config_regulators) {
ret = opp_table->config_regulators(dev, old_opp, opp,
opp_table->regulators,
opp_table->regulator_count); if (ret) {
dev_err(dev, "Failed to set regulator voltages: %d\n",
ret); return ret;
}
}
}
if (opp_table->config_clks) {
ret = opp_table->config_clks(dev, opp_table, opp, clk_data, scaling_down); if (ret) return ret;
}
/* Scaling down? Configure required OPPs after frequency */ if (scaling_down) { if (opp_table->config_regulators) {
ret = opp_table->config_regulators(dev, old_opp, opp,
opp_table->regulators,
opp_table->regulator_count); if (ret) {
dev_err(dev, "Failed to set regulator voltages: %d\n",
ret); return ret;
}
}
ret = _set_opp_bw(opp_table, opp, dev); if (ret) {
dev_err(dev, "Failed to set bw: %d\n", ret); return ret;
}
ret = _set_opp_level(dev, opp); if (ret) return ret;
ret = _set_required_opps(dev, opp_table, opp, false); if (ret) {
dev_err(dev, "Failed to set required opps: %d\n", ret); return ret;
}
}
/* Make sure current_opp doesn't get freed */
opp_table->current_opp = dev_pm_opp_get(opp);
return ret;
}
/** * dev_pm_opp_set_rate() - Configure new OPP based on frequency * @dev: device for which we do this operation * @target_freq: frequency to achieve * * This configures the power-supplies to the levels specified by the OPP * corresponding to the target_freq, and programs the clock to a value <= * target_freq, as rounded by clk_round_rate(). Device wanting to run at fmax * provided by the opp, should have already rounded to the target OPP's * frequency.
*/ int dev_pm_opp_set_rate(struct device *dev, unsignedlong target_freq)
{ struct opp_table *opp_table __free(put_opp_table); struct dev_pm_opp *opp __free(put_opp) = NULL; unsignedlong freq = 0, temp_freq; bool forced = false;
opp_table = _find_opp_table(dev); if (IS_ERR(opp_table)) {
dev_err(dev, "%s: device's opp table doesn't exist\n", __func__); return PTR_ERR(opp_table);
}
if (target_freq) { /* * For IO devices which require an OPP on some platforms/SoCs * while just needing to scale the clock on some others * we look for empty OPP tables with just a clock handle and * scale only the clk. This makes dev_pm_opp_set_rate() * equivalent to a clk_set_rate()
*/ if (!_get_opp_count(opp_table)) { return opp_table->config_clks(dev, opp_table, NULL,
&target_freq, false);
}
/* * The clock driver may support finer resolution of the * frequencies than the OPP table, don't update the frequency we * pass to clk_set_rate() here.
*/
temp_freq = freq;
opp = _find_freq_ceil(opp_table, &temp_freq); if (IS_ERR(opp)) {
dev_err(dev, "%s: failed to find OPP for freq %lu (%ld)\n",
__func__, freq, PTR_ERR(opp)); return PTR_ERR(opp);
}
/* * An OPP entry specifies the highest frequency at which other * properties of the OPP entry apply. Even if the new OPP is * same as the old one, we may still reach here for a different * value of the frequency. In such a case, do not abort but * configure the hardware to the desired frequency forcefully.
*/
forced = opp_table->current_rate_single_clk != freq;
}
/** * dev_pm_opp_set_opp() - Configure device for OPP * @dev: device for which we do this operation * @opp: OPP to set to * * This configures the device based on the properties of the OPP passed to this * routine. * * Return: 0 on success, a negative error number otherwise.
*/ int dev_pm_opp_set_opp(struct device *dev, struct dev_pm_opp *opp)
{ struct opp_table *opp_table __free(put_opp_table);
opp_table = _find_opp_table(dev); if (IS_ERR(opp_table)) {
dev_err(dev, "%s: device opp doesn't exist\n", __func__); return PTR_ERR(opp_table);
}
/* Create debugfs entries for the opp_table */
opp_debug_register(opp_dev, opp_table);
return opp_dev;
}
staticstruct opp_table *_allocate_opp_table(struct device *dev, int index)
{ struct opp_table *opp_table; struct opp_device *opp_dev; int ret;
/* * Allocate a new OPP table. In the infrequent case where a new * device is needed to be added, we pay this penalty.
*/
opp_table = kzalloc(sizeof(*opp_table), GFP_KERNEL); if (!opp_table) return ERR_PTR(-ENOMEM);
/* Mark regulator count uninitialized */
opp_table->regulator_count = -1;
opp_dev = _add_opp_dev(dev, opp_table); if (!opp_dev) {
ret = -ENOMEM; goto err;
}
_of_init_opp_table(opp_table, dev, index);
/* Find interconnect path(s) for the device */
ret = dev_pm_opp_of_find_icc_paths(dev, opp_table); if (ret) { if (ret == -EPROBE_DEFER) goto remove_opp_dev;
/* * Return early if we don't need to get clk or we have already done it * earlier.
*/ if (!getclk || IS_ERR(opp_table) || !IS_ERR(opp_table->clk) ||
opp_table->clks) return opp_table;
/* Find clk for the device */
opp_table->clk = clk_get(dev, NULL);
ret = PTR_ERR_OR_ZERO(opp_table->clk); if (!ret) {
opp_table->config_clks = _opp_config_clk_single;
opp_table->clk_count = 1; return opp_table;
}
if (ret == -ENOENT) { /* * There are few platforms which don't want the OPP core to * manage device's clock settings. In such cases neither the * platform provides the clks explicitly to us, nor the DT * contains a valid clk entry. The OPP nodes in DT may still * contain "opp-hz" property though, which we need to parse and * allow the platform to find an OPP based on freq later on. * * This is a simple solution to take care of such corner cases, * i.e. make the clk_count 1, which lets us allocate space for * frequency in opp->rates and also parse the entries in DT.
*/
opp_table->clk_count = 1;
/* * We need to make sure that the OPP table for a device doesn't get added twice, * if this routine gets called in parallel with the same device pointer. * * The simplest way to enforce that is to perform everything (find existing * table and if not found, create a new one) under the opp_table_lock, so only * one creator gets access to the same. But that expands the critical section * under the lock and may end up causing circular dependencies with frameworks * like debugfs, interconnect or clock framework as they may be direct or * indirect users of OPP core. * * And for that reason we have to go for a bit tricky implementation here, which * uses the opp_tables_busy flag to indicate if another creator is in the middle * of adding an OPP table and others should wait for it to finish.
*/ struct opp_table *_add_opp_table_indexed(struct device *dev, int index, bool getclk)
{ struct opp_table *opp_table;
again:
mutex_lock(&opp_table_lock);
opp_table = _find_opp_table_unlocked(dev); if (!IS_ERR(opp_table)) goto unlock;
/* * The opp_tables list or an OPP table's dev_list is getting updated by * another user, wait for it to finish.
*/ if (unlikely(opp_tables_busy)) {
mutex_unlock(&opp_table_lock);
cpu_relax(); goto again;
}
/* * Notify the changes in the availability of the operable * frequency/voltage list.
*/
blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_REMOVE, opp);
_of_clear_opp(opp_table, opp);
opp_debug_remove_one(opp);
kfree(opp);
}
/** * dev_pm_opp_remove() - Remove an OPP from OPP table * @dev: device for which we do this operation * @freq: OPP to remove with matching 'freq' * * This function removes an opp from the opp table.
*/ void dev_pm_opp_remove(struct device *dev, unsignedlong freq)
{ struct opp_table *opp_table __free(put_opp_table); struct dev_pm_opp *opp = NULL, *iter;
opp_table = _find_opp_table(dev); if (IS_ERR(opp_table)) return;
if (!assert_single_clk(opp_table, 0)) return;
scoped_guard(mutex, &opp_table->lock) {
list_for_each_entry(iter, &opp_table->opp_list, node) { if (iter->rates[0] == freq) {
opp = iter; break;
}
}
}
if (opp) {
dev_pm_opp_put(opp);
/* Drop the reference taken by dev_pm_opp_add() */
dev_pm_opp_put_opp_table(opp_table);
} else {
dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n",
__func__, freq);
}
}
EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
list_for_each_entry(opp, &opp_table->opp_list, node) { /* * Refcount must be dropped only once for each OPP by OPP core, * do that with help of "removed" flag.
*/ if (!opp->removed && dynamic == opp->dynamic) return opp;
}
return NULL;
}
/* * Can't call dev_pm_opp_put() from under the lock as debugfs removal needs to * happen lock less to avoid circular dependency issues. This routine must be * called without the opp_table->lock held.
*/ staticvoid _opp_remove_all(struct opp_table *opp_table, bool dynamic)
{ struct dev_pm_opp *opp;
while ((opp = _opp_get_next(opp_table, dynamic))) {
opp->removed = true;
dev_pm_opp_put(opp);
/* Drop the references taken by dev_pm_opp_add() */ if (dynamic)
dev_pm_opp_put_opp_table(opp_table);
}
}
if (--opp_table->parsed_static_opps) returntrue;
}
_opp_remove_all(opp_table, false); returntrue;
}
/** * dev_pm_opp_remove_all_dynamic() - Remove all dynamically created OPPs * @dev: device for which we do this operation * * This function removes all dynamically created OPPs from the opp table.
*/ void dev_pm_opp_remove_all_dynamic(struct device *dev)
{ struct opp_table *opp_table __free(put_opp_table);
opp_table = _find_opp_table(dev); if (IS_ERR(opp_table)) return;
/* Allocate space for at least one supply */
supply_count = opp_table->regulator_count > 0 ?
opp_table->regulator_count : 1;
supply_size = sizeof(*opp->supplies) * supply_count;
clk_size = sizeof(*opp->rates) * opp_table->clk_count;
icc_size = sizeof(*opp->bandwidth) * opp_table->path_count;
/* allocate new OPP node and supplies structures */
opp = kzalloc(sizeof(*opp) + supply_size + clk_size + icc_size, GFP_KERNEL); if (!opp) return NULL;
/* Put the supplies, bw and clock at the end of the OPP structure */
opp->supplies = (struct dev_pm_opp_supply *)(opp + 1);
/* * Insert new OPP in order of increasing frequency and discard if * already present. * * Need to use &opp_table->opp_list in the condition part of the 'for' * loop, don't replace it with head otherwise it will become an infinite * loop.
*/
list_for_each_entry(opp, &opp_table->opp_list, node) {
opp_cmp = _opp_compare_key(opp_table, new_opp, opp); if (opp_cmp > 0) {
*head = &opp->node; continue;
}
/* Should we compare voltages for all regulators here ? */ return opp->available &&
new_opp->supplies[0].u_volt == opp->supplies[0].u_volt ? -EBUSY : -EEXIST;
}
return 0;
}
void _required_opps_available(struct dev_pm_opp *opp, int count)
{ int i;
for (i = 0; i < count; i++) { if (opp->required_opps[i]->available) continue;
opp->available = false;
pr_warn("%s: OPP not supported by required OPP %pOF (%lu)\n",
__func__, opp->required_opps[i]->np, opp->rates[0]); return;
}
}
/* * Returns: * 0: On success. And appropriate error message for duplicate OPPs. * -EBUSY: For OPP with same freq/volt and is available. The callers of * _opp_add() must return 0 if they receive -EBUSY from it. This is to make * sure we don't print error messages unnecessarily if different parts of * kernel try to initialize the OPP table. * -EEXIST: For OPP with same freq but different volt or is unavailable. This * should be considered an error by the callers of _opp_add().
*/ int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, struct opp_table *opp_table)
{ struct list_head *head; int ret;
scoped_guard(mutex, &opp_table->lock) {
head = &opp_table->opp_list;
ret = _opp_is_duplicate(dev, new_opp, opp_table, &head); if (ret) return ret;
if (!_opp_supported_by_regulators(new_opp, opp_table)) {
new_opp->available = false;
dev_warn(dev, "%s: OPP not supported by regulators (%lu)\n",
__func__, new_opp->rates[0]);
}
/* required-opps not fully initialized yet */ if (lazy_linking_pending(opp_table)) return 0;
/** * _opp_add_v1() - Allocate a OPP based on v1 bindings. * @opp_table: OPP table * @dev: device for which we do this operation * @data: The OPP data for the OPP to add * @dynamic: Dynamically added OPPs. * * This function adds an opp definition to the opp table and returns status. * The opp is made available by default and it can be controlled using * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove. * * NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table * and freed by dev_pm_opp_of_remove_table. * * Return: * 0 On success OR * Duplicate OPPs (both freq and volt are same) and opp->available * -EEXIST Freq are same and volt are different OR * Duplicate OPPs (both freq and volt are same) and !opp->available * -ENOMEM Memory allocation failure
*/ int _opp_add_v1(struct opp_table *opp_table, struct device *dev, struct dev_pm_opp_data *data, bool dynamic)
{ struct dev_pm_opp *new_opp; unsignedlong tol, u_volt = data->u_volt; int ret;
if (!assert_single_clk(opp_table, 0)) return -EINVAL;
new_opp = _opp_allocate(opp_table); if (!new_opp) return -ENOMEM;
ret = _opp_add(dev, new_opp, opp_table); if (ret) { /* Don't return error for duplicate OPPs */ if (ret == -EBUSY)
ret = 0; goto free_opp;
}
/* * Notify the changes in the availability of the operable * frequency/voltage list.
*/
blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADD, new_opp); return 0;
free_opp:
_opp_free(new_opp);
return ret;
}
/* * This is required only for the V2 bindings, and it enables a platform to * specify the hierarchy of versions it supports. OPP layer will then enable * OPPs, which are available for those versions, based on its 'opp-supported-hw' * property.
*/ staticint _opp_set_supported_hw(struct opp_table *opp_table, const u32 *versions, unsignedint count)
{ /* Another CPU that shares the OPP table has set the property ? */ if (opp_table->supported_hw) return 0;
opp_table->supported_hw = kmemdup_array(versions, count, sizeof(*versions), GFP_KERNEL); if (!opp_table->supported_hw) return -ENOMEM;
/* * This is required only for the V2 bindings, and it enables a platform to * specify the extn to be used for certain property names. The properties to * which the extension will apply are opp-microvolt and opp-microamp. OPP core * should postfix the property name with -<name> while looking for them.
*/ staticint _opp_set_prop_name(struct opp_table *opp_table, constchar *name)
{ /* Another CPU that shares the OPP table has set the property ? */ if (!opp_table->prop_name) {
opp_table->prop_name = kstrdup(name, GFP_KERNEL); if (!opp_table->prop_name) return -ENOMEM;
}
/* * In order to support OPP switching, OPP layer needs to know the name of the * device's regulators, as the core would be required to switch voltages as * well. * * This must be called before any OPPs are initialized for the device.
*/ staticint _opp_set_regulators(struct opp_table *opp_table, struct device *dev, constchar * const names[])
{ constchar * const *temp = names; struct regulator *reg; int count = 0, ret, i;
/* Count number of regulators */ while (*temp++)
count++;
if (!count) return -EINVAL;
/* Another CPU that shares the OPP table has set the regulators ? */ if (opp_table->regulators) return 0;
opp_table->regulators = kmalloc_array(count, sizeof(*opp_table->regulators),
GFP_KERNEL); if (!opp_table->regulators) return -ENOMEM;
for (i = 0; i < count; i++) {
reg = regulator_get_optional(dev, names[i]); if (IS_ERR(reg)) {
ret = dev_err_probe(dev, PTR_ERR(reg), "%s: no regulator (%s) found\n",
__func__, names[i]); goto free_regulators;
}
opp_table->regulators[i] = reg;
}
opp_table->regulator_count = count;
/* Set generic config_regulators() for single regulators here */ if (count == 1)
opp_table->config_regulators = _opp_config_regulator_single;
return 0;
free_regulators: while (i != 0)
regulator_put(opp_table->regulators[--i]);
staticvoid _put_clks(struct opp_table *opp_table, int count)
{ int i;
for (i = count - 1; i >= 0; i--)
clk_put(opp_table->clks[i]);
kfree(opp_table->clks);
opp_table->clks = NULL;
}
/* * In order to support OPP switching, OPP layer needs to get pointers to the * clocks for the device. Simple cases work fine without using this routine * (i.e. by passing connection-id as NULL), but for a device with multiple * clocks available, the OPP core needs to know the exact names of the clks to * use. * * This must be called before any OPPs are initialized for the device.
*/ staticint _opp_set_clknames(struct opp_table *opp_table, struct device *dev, constchar * const names[],
config_clks_t config_clks)
{ constchar * const *temp = names; int count = 0, ret, i; struct clk *clk;
/* Count number of clks */ while (*temp++)
count++;
/* * This is a special case where we have a single clock, whose connection * id name is NULL, i.e. first two entries are NULL in the array.
*/ if (!count && !names[1])
count = 1;
/* Fail early for invalid configurations */ if (!count || (!config_clks && count > 1)) return -EINVAL;
/* Another CPU that shares the OPP table has set the clkname ? */ if (opp_table->clks) return 0;
opp_table->clks = kmalloc_array(count, sizeof(*opp_table->clks),
GFP_KERNEL); if (!opp_table->clks) return -ENOMEM;
/* Find clks for the device */ for (i = 0; i < count; i++) {
clk = clk_get(dev, names[i]); if (IS_ERR(clk)) {
ret = dev_err_probe(dev, PTR_ERR(clk), "%s: Couldn't find clock with name: %s\n",
__func__, names[i]); goto free_clks;
}
/* Set generic single clk set here */ if (count == 1) { if (!opp_table->config_clks)
opp_table->config_clks = _opp_config_clk_single;
/* * We could have just dropped the "clk" field and used "clks" * everywhere. Instead we kept the "clk" field around for * following reasons: * * - avoiding clks[0] everywhere else. * - not running single clk helpers for multiple clk usecase by * mistake. * * Since this is single-clk case, just update the clk pointer * too.
*/
opp_table->clk = opp_table->clks[0];
}
return 0;
free_clks:
_put_clks(opp_table, i); return ret;
}
staticvoid _opp_put_clknames(struct opp_table *opp_table)
{ if (!opp_table->clks) return;
/* * This is useful to support platforms with multiple regulators per device. * * This must be called before any OPPs are initialized for the device.
*/ staticint _opp_set_config_regulators_helper(struct opp_table *opp_table, struct device *dev, config_regulators_t config_regulators)
{ /* Another CPU that shares the OPP table has set the helper ? */ if (!opp_table->config_regulators)
opp_table->config_regulators = config_regulators;
/* Genpd core takes care of propagation to parent genpd */ if (opp_table->is_genpd) {
dev_err(dev, "%s: Operation not supported for genpds\n", __func__); return -EOPNOTSUPP;
}
if (index >= opp_table->required_opp_count) {
dev_err(dev, "Required OPPs not available, can't set required devs\n"); return -EINVAL;
}
required_table = opp_table->required_opp_tables[index]; if (IS_ERR(required_table)) {
dev_err(dev, "Missing OPP table, unable to set the required devs\n"); return -ENODEV;
}
/* * The required_opp_tables parsing is not perfect, as the OPP core does * the parsing solely based on the DT node pointers. The core sets the * required_opp_tables entry to the first OPP table in the "opp_tables" * list, that matches with the node pointer. * * If the target DT OPP table is used by multiple devices and they all * create separate instances of 'struct opp_table' from it, then it is * possible that the required_opp_tables entry may be set to the * incorrect sibling device. * * Cross check it again and fix if required.
*/
gdev = dev_to_genpd_dev(required_dev); if (IS_ERR(gdev)) return PTR_ERR(gdev);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.