// SPDX-License-Identifier: GPL-2.0+ /* * Marvell Armada 37xx SoC Peripheral clocks * * Copyright (C) 2016 Marvell * * Gregory CLEMENT <gregory.clement@free-electrons.com> * * Most of the peripheral clocks can be modelled like this: * _____ _______ _______ * TBG-A-P --| | | | | | ______ * TBG-B-P --| Mux |--| /div1 |--| /div2 |--| Gate |--> perip_clk * TBG-A-S --| | | | | | |______| * TBG-B-S --|_____| |_______| |_______| * * However some clocks may use only one or two block or and use the * xtal clock as parent.
*/
/* * This function is always called after the function * armada_3700_pm_dvfs_is_enabled, so no need to check again * if the base is valid.
*/
regmap_read(base, reg, &load_level);
/* * The register and the offset inside this register accessed to * read the current divider depend on the load level
*/
load_level &= ARMADA_37XX_NB_CPU_LOAD_MASK;
armada_3700_pm_dvfs_update_regs(load_level, ®, &offset);
/* * This function is always called after the function * armada_3700_pm_dvfs_is_enabled, so no need to check again * if the base is valid
*/
regmap_read(base, reg, &load_level);
/* * The register and the offset inside this register accessed to * read the current divider depend on the load level
*/
load_level &= ARMADA_37XX_NB_CPU_LOAD_MASK;
armada_3700_pm_dvfs_update_regs(load_level, ®, &offset);
if (armada_3700_pm_dvfs_is_enabled(pm_cpu->nb_pm_base)) {
val = armada_3700_pm_dvfs_get_cpu_parent(pm_cpu->nb_pm_base);
} else {
val = readl(pm_cpu->reg_mux) >> pm_cpu->shift_mux;
val &= pm_cpu->mask_mux;
}
if (armada_3700_pm_dvfs_is_enabled(pm_cpu->nb_pm_base))
div = armada_3700_pm_dvfs_get_cpu_div(pm_cpu->nb_pm_base); else
div = get_div(pm_cpu->reg_div, pm_cpu->shift_div); return DIV_ROUND_UP_ULL((u64)parent_rate, div);
}
staticlong clk_pm_cpu_round_rate(struct clk_hw *hw, unsignedlong rate, unsignedlong *parent_rate)
{ struct clk_pm_cpu *pm_cpu = to_clk_pm_cpu(hw); struct regmap *base = pm_cpu->nb_pm_base; unsignedint div = *parent_rate / rate; unsignedint load_level; /* only available when DVFS is enabled */ if (!armada_3700_pm_dvfs_is_enabled(base)) return -EINVAL;
val >>= offset;
val &= ARMADA_37XX_NB_TBG_DIV_MASK; if (val == div) /* * We found a load level matching the target * divider, switch to this load level and * return.
*/ return *parent_rate / div;
}
/* We didn't find any valid divider */ return -EINVAL;
}
/* * Workaround when base CPU frequnecy is 1000 or 1200 MHz * * Switching the CPU from the L2 or L3 frequencies (250/300 or 200 MHz * respectively) to L0 frequency (1/1.2 GHz) requires a significant * amount of time to let VDD stabilize to the appropriate * voltage. This amount of time is large enough that it cannot be * covered by the hardware countdown register. Due to this, the CPU * might start operating at L0 before the voltage is stabilized, * leading to CPU stalls. * * To work around this problem, we prevent switching directly from the * L2/L3 frequencies to the L0 frequency, and instead switch to the L1 * frequency in-between. The sequence therefore becomes: * 1. First switch from L2/L3 (200/250/300 MHz) to L1 (500/600 MHz) * 2. Sleep 20ms for stabling VDD voltage * 3. Then switch from L1 (500/600 MHz) to L0 (1000/1200 MHz).
*/ staticvoid clk_pm_cpu_set_rate_wa(struct clk_pm_cpu *pm_cpu, unsignedint new_level, unsignedlong rate, struct regmap *base)
{ unsignedint cur_level;
/* * System wants to go to L1 on its own. If we are going from L2/L3, * remember when 20ms will expire. If from L0, set the value so that * next switch to L0 won't have to wait.
*/ if (new_level == ARMADA_37XX_DVFS_LOAD_1) { if (cur_level == ARMADA_37XX_DVFS_LOAD_0)
pm_cpu->l1_expiration = jiffies; else
pm_cpu->l1_expiration = jiffies + msecs_to_jiffies(20); return;
}
/* * If we are setting to L2/L3, just invalidate L1 expiration time, * sleeping is not needed.
*/ if (rate < 1000*1000*1000) goto invalidate_l1_exp;
/* * We are going to L0 with rate >= 1GHz. Check whether we have been at * L1 for long enough time. If not, go to L1 for 20ms.
*/ if (pm_cpu->l1_expiration && time_is_before_eq_jiffies(pm_cpu->l1_expiration)) goto invalidate_l1_exp;
regmap_read(base, reg, &val);
val >>= offset;
val &= ARMADA_37XX_NB_TBG_DIV_MASK;
if (val == div) { /* * We found a load level matching the target * divider, switch to this load level and * return.
*/
reg = ARMADA_37XX_NB_CPU_LOAD;
mask = ARMADA_37XX_NB_CPU_LOAD_MASK;
/* Apply workaround when base CPU frequency is 1000 or 1200 MHz */ if (parent_rate >= 1000*1000*1000)
clk_pm_cpu_set_rate_wa(pm_cpu, load_level, rate, base);
regmap_update_bits(base, reg, mask, load_level);
return rate;
}
}
/* We didn't find any valid divider */ return -EINVAL;
}
driver_data->reg = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(driver_data->reg)) return PTR_ERR(driver_data->reg);
spin_lock_init(&driver_data->lock);
for (i = 0; i < num_periph; i++) { struct clk_hw **hw = &driver_data->hw_data->hws[i]; if (armada_3700_add_composite_clk(&data[i], driver_data->reg,
&driver_data->lock, dev, hw))
dev_err(dev, "Can't register periph clock %s\n",
data[i].name);
}
ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
driver_data->hw_data); if (ret) { for (i = 0; i < num_periph; i++)
clk_hw_unregister(driver_data->hw_data->hws[i]); return ret;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.