/* Returns 1 if GDSC status is status, 0 if not, and < 0 on error */ staticint gdsc_check_status(struct gdsc *sc, enum gdsc_status status)
{ unsignedint reg;
u32 val; int ret;
if (status == GDSC_ON && sc->rsupply) {
ret = regulator_enable(sc->rsupply); if (ret < 0) return ret;
}
ret = gdsc_update_collapse_bit(sc, status == GDSC_OFF);
/* If disabling votable gdscs, don't poll on status */ if ((sc->flags & VOTABLE) && status == GDSC_OFF && !wait) { /* * Add a short delay here to ensure that an enable * right after it was disabled does not put it in an * unknown state
*/
udelay(TIMEOUT_US); return 0;
}
if (sc->gds_hw_ctrl) { /* * The gds hw controller asserts/de-asserts the status bit soon * after it receives a power on/off request from a master. * The controller then takes around 8 xo cycles to start its * internal state machine and update the status bit. During * this time, the status bit does not reflect the true status * of the core. * Add a delay of 1 us between writing to the SW_COLLAPSE bit * and polling the status bit.
*/
udelay(1);
}
ret = gdsc_poll_status(sc, status);
WARN(ret, "%s status stuck at 'o%s'", sc->pd.name, status ? "ff" : "n");
if (!ret && status == GDSC_OFF && sc->rsupply) {
ret = regulator_disable(sc->rsupply); if (ret < 0) return ret;
}
return ret;
}
staticinlineint gdsc_deassert_reset(struct gdsc *sc)
{ int i;
for (i = 0; i < sc->reset_count; i++)
sc->rcdev->ops->deassert(sc->rcdev, sc->resets[i]); return 0;
}
staticinlineint gdsc_assert_reset(struct gdsc *sc)
{ int i;
for (i = 0; i < sc->reset_count; i++)
sc->rcdev->ops->assert(sc->rcdev, sc->resets[i]); return 0;
}
if (sc->pwrsts == PWRSTS_ON) return gdsc_deassert_reset(sc);
if (sc->flags & SW_RESET) {
gdsc_assert_reset(sc);
udelay(1);
gdsc_deassert_reset(sc);
}
if (sc->flags & CLAMP_IO) { if (sc->flags & AON_RESET)
gdsc_assert_reset_aon(sc);
gdsc_deassert_clamp_io(sc);
}
ret = gdsc_toggle_logic(sc, GDSC_ON, false); if (ret) return ret;
if (sc->pwrsts & PWRSTS_OFF)
gdsc_force_mem_on(sc);
/* * If clocks to this power domain were already on, they will take an * additional 4 clock cycles to re-enable after the power domain is * enabled. Delay to account for this. A delay is also needed to ensure * clocks are not enabled within 400ns of enabling power to the * memories.
*/
udelay(1);
if (sc->flags & RETAIN_FF_ENABLE)
gdsc_retain_ff_on(sc);
/* Turn on HW trigger mode if supported */ if (sc->flags & HW_CTRL) {
ret = gdsc_hwctrl(sc, true); if (ret) return ret; /* * Wait for the GDSC to go through a power down and * up cycle. In case a firmware ends up polling status * bits for the gdsc, it might read an 'on' status before * the GDSC can finish the power cycle. * We wait 1us before returning to ensure the firmware * can't immediately poll the status bits.
*/
udelay(1);
}
if (sc->pwrsts == PWRSTS_ON) return gdsc_assert_reset(sc);
/* Turn off HW trigger mode if supported */ if (sc->flags & HW_CTRL) {
ret = gdsc_hwctrl(sc, false); if (ret < 0) return ret; /* * Wait for the GDSC to go through a power down and * up cycle. In case we end up polling status * bits for the gdsc before the power cycle is completed * it might read an 'on' status wrongly.
*/
udelay(1);
ret = gdsc_poll_status(sc, GDSC_ON); if (ret) return ret;
}
if (sc->pwrsts & PWRSTS_OFF)
gdsc_clear_mem_on(sc);
/* * If the GDSC supports only a Retention state, apart from ON, * leave it in ON state. * There is no SW control to transition the GDSC into * Retention state. This happens in HW when the parent * domain goes down to a Low power state
*/ if (sc->pwrsts == PWRSTS_RET_ON) return 0;
ret = gdsc_toggle_logic(sc, GDSC_OFF, domain->synced_poweroff); if (ret) return ret;
if (sc->flags & CLAMP_IO)
gdsc_assert_clamp_io(sc);
/* * Wait for the GDSC to go through a power down and * up cycle. If we poll the status register before the * power cycle is finished we might read incorrect values.
*/
udelay(1);
/* * When the GDSC is switched to HW mode, HW can disable the GDSC. * When the GDSC is switched back to SW mode, the GDSC will be enabled * again, hence we need to poll for GDSC to complete the power up.
*/ if (!mode) return gdsc_poll_status(sc, GDSC_ON);
staticint gdsc_init(struct gdsc *sc)
{
u32 mask, val; int on, ret;
/* * Disable HW trigger: collapse/restore occur based on registers writes. * Disable SW override: Use hardware state-machine for sequencing. * Configure wait time between states.
*/
mask = HW_CONTROL_MASK | SW_OVERRIDE_MASK |
EN_REST_WAIT_MASK | EN_FEW_WAIT_MASK | CLK_DIS_WAIT_MASK;
if (!sc->en_rest_wait_val)
sc->en_rest_wait_val = EN_REST_WAIT_VAL; if (!sc->en_few_wait_val)
sc->en_few_wait_val = EN_FEW_WAIT_VAL; if (!sc->clk_dis_wait_val)
sc->clk_dis_wait_val = CLK_DIS_WAIT_VAL;
ret = regmap_update_bits(sc->regmap, sc->gdscr, mask, val); if (ret) return ret;
/* Force gdsc ON if only ON state is supported */ if (sc->pwrsts == PWRSTS_ON) {
ret = gdsc_toggle_logic(sc, GDSC_ON, false); if (ret) return ret;
}
on = gdsc_check_status(sc, GDSC_ON); if (on < 0) return on;
if (on) { /* The regulator must be on, sync the kernel state */ if (sc->rsupply) {
ret = regulator_enable(sc->rsupply); if (ret < 0) return ret;
}
/* * Votable GDSCs can be ON due to Vote from other masters. * If a Votable GDSC is ON, make sure we have a Vote.
*/ if (sc->flags & VOTABLE) {
ret = gdsc_update_collapse_bit(sc, false); if (ret) goto err_disable_supply;
}
/* * Make sure the retain bit is set if the GDSC is already on, * otherwise we end up turning off the GDSC and destroying all * the register contents that we thought we were saving.
*/ if (sc->flags & RETAIN_FF_ENABLE)
gdsc_retain_ff_on(sc);
/* Turn on HW trigger mode if supported */ if (sc->flags & HW_CTRL) {
ret = gdsc_hwctrl(sc, true); if (ret < 0) goto err_disable_supply;
}
} elseif (sc->flags & ALWAYS_ON) { /* If ALWAYS_ON GDSCs are not ON, turn them ON */
gdsc_enable(&sc->pd);
on = true;
}
if (on || (sc->pwrsts & PWRSTS_RET))
gdsc_force_mem_on(sc); else
gdsc_clear_mem_on(sc);
if (sc->flags & ALWAYS_ON)
sc->pd.flags |= GENPD_FLAG_ALWAYS_ON; if (!sc->pd.power_off)
sc->pd.power_off = gdsc_disable; if (!sc->pd.power_on)
sc->pd.power_on = gdsc_enable; if (sc->flags & HW_CTRL_TRIGGER) {
sc->pd.set_hwmode_dev = gdsc_set_hwmode;
sc->pd.get_hwmode_dev = gdsc_get_hwmode;
}
ret = pm_genpd_init(&sc->pd, NULL, !on); if (ret) goto err_disable_supply;
return 0;
err_disable_supply: if (on && sc->rsupply)
regulator_disable(sc->rsupply);
return ret;
}
staticint gdsc_add_subdomain_list(struct dev_pm_domain_list *pd_list, struct generic_pm_domain *subdomain)
{ int i, ret;
for (i = 0; i < pd_list->num_pds; i++) { struct device *dev = pd_list->pd_devs[i]; struct generic_pm_domain *genpd = pd_to_genpd(dev->pm_domain);
ret = pm_genpd_add_subdomain(genpd, subdomain); if (ret) return ret;
}
return 0;
}
staticvoid gdsc_remove_subdomain_list(struct dev_pm_domain_list *pd_list, struct generic_pm_domain *subdomain)
{ int i;
for (i = 0; i < pd_list->num_pds; i++) { struct device *dev = pd_list->pd_devs[i]; struct generic_pm_domain *genpd = pd_to_genpd(dev->pm_domain);
/* Remove subdomains */ for (i = num - 1; i >= 0; i--) { if (!scs[i]) continue; if (scs[i]->parent)
pm_genpd_remove_subdomain(scs[i]->parent, &scs[i]->pd); elseif (!IS_ERR_OR_NULL(dev->pm_domain))
pm_genpd_remove_subdomain(pd_to_genpd(dev->pm_domain), &scs[i]->pd); elseif (desc->pd_list)
gdsc_remove_subdomain_list(desc->pd_list, &scs[i]->pd);
}
}
int gdsc_register(struct gdsc_desc *desc, struct reset_controller_dev *rcdev, struct regmap *regmap)
{ int i, ret; struct genpd_onecell_data *data; struct device *dev = desc->dev; struct gdsc **scs = desc->scs;
size_t num = desc->num;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM;
data->domains = devm_kcalloc(dev, num, sizeof(*data->domains),
GFP_KERNEL); if (!data->domains) return -ENOMEM;
for (i = 0; i < num; i++) { if (!scs[i] || !scs[i]->supply) continue;
scs[i]->rsupply = devm_regulator_get_optional(dev, scs[i]->supply); if (IS_ERR(scs[i]->rsupply)) {
ret = PTR_ERR(scs[i]->rsupply); if (ret != -ENODEV) return ret;
scs[i]->rsupply = NULL;
}
}
data->num_domains = num; for (i = 0; i < num; i++) { if (!scs[i]) continue;
scs[i]->regmap = regmap;
scs[i]->rcdev = rcdev;
ret = gdsc_init(scs[i]); if (ret) return ret;
data->domains[i] = &scs[i]->pd;
}
/* Add subdomains */ for (i = 0; i < num; i++) { if (!scs[i]) continue; if (scs[i]->parent)
ret = pm_genpd_add_subdomain(scs[i]->parent, &scs[i]->pd); elseif (!IS_ERR_OR_NULL(dev->pm_domain))
ret = pm_genpd_add_subdomain(pd_to_genpd(dev->pm_domain), &scs[i]->pd); elseif (desc->pd_list)
ret = gdsc_add_subdomain_list(desc->pd_list, &scs[i]->pd);
/* * On SDM845+ the GPU GX domain is *almost* entirely controlled by the GMU * running in the CX domain so the CPU doesn't need to know anything about the * GX domain EXCEPT.... * * Hardware constraints dictate that the GX be powered down before the CX. If * the GMU crashes it could leave the GX on. In order to successfully bring back * the device the CPU needs to disable the GX headswitch. There being no sane * way to reach in and touch that register from deep inside the GPU driver we * need to set up the infrastructure to be able to ensure that the GPU can * ensure that the GX is off during this super special case. We do this by * defining a GX gdsc with a dummy enable function and a "default" disable * function. * * This allows us to attach with genpd_dev_pm_attach_by_name() in the GPU * driver. During power up, nothing will happen from the CPU (and the GMU will * power up normally but during power down this will ensure that the GX domain * is *really* off - this gives us a semi standard way of doing what we need.
*/ int gdsc_gx_do_nothing_enable(struct generic_pm_domain *domain)
{ struct gdsc *sc = domain_to_gdsc(domain); int ret = 0;
/* Enable the parent supply, when controlled through the regulator framework. */ if (sc->rsupply)
ret = regulator_enable(sc->rsupply);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.