/** * struct at91_pm_bu - AT91 power management backup unit data structure * @suspended: true if suspended to backup mode * @reserved: reserved * @canary: canary data for memory checking after exit from backup mode * @resume: resume API * @ddr_phy_calibration: DDR PHY calibration data: ZQ0CR0, first 8 words * of the memory
*/ struct at91_pm_bu { int suspended; unsignedlong reserved;
phys_addr_t canary;
phys_addr_t resume; unsignedlong ddr_phy_calibration[BACKUP_DDR_PHY_CALIBRATION];
};
/** * struct at91_pm_sfrbu_regs - registers mapping for SFRBU * @pswbu: power switch BU control registers
*/ struct at91_pm_sfrbu_regs { struct {
u32 key;
u32 ctrl;
u32 state;
u32 softsw;
} pswbu;
};
/** * enum at91_pm_eth_clk - Ethernet clock indexes * @AT91_PM_ETH_PCLK: pclk index * @AT91_PM_ETH_HCLK: hclk index * @AT91_PM_ETH_MAX_CLK: max index
*/ enum at91_pm_eth_clk {
AT91_PM_ETH_PCLK,
AT91_PM_ETH_HCLK,
AT91_PM_ETH_MAX_CLK,
};
/** * enum at91_pm_eth - Ethernet controller indexes * @AT91_PM_G_ETH: gigabit Ethernet controller index * @AT91_PM_E_ETH: megabit Ethernet controller index * @AT91_PM_MAX_ETH: max index
*/ enum at91_pm_eth {
AT91_PM_G_ETH,
AT91_PM_E_ETH,
AT91_PM_MAX_ETH,
};
/** * struct at91_pm_quirk_eth - AT91 PM Ethernet quirks * @dev: Ethernet device * @np: Ethernet device node * @clks: Ethernet clocks * @modes: power management mode that this quirk applies to * @dns_modes: do not suspend modes: stop suspending if Ethernet is configured * as wakeup source but buggy and no other wakeup source is * available
*/ struct at91_pm_quirk_eth { struct device *dev; struct device_node *np; struct clk_bulk_data clks[AT91_PM_ETH_MAX_CLK];
u32 modes;
u32 dns_modes;
};
/* Interface NA in DT. */ if (!eth->np) returnfalse;
/* No quirks for this interface and current suspend mode. */ if (!(eth->modes & BIT(soc_pm.data.mode))) returnfalse;
if (!eth->dev) { /* Driver not probed. */
pdev = of_find_device_by_node(eth->np); if (!pdev) returnfalse; /* put_device(eth->dev) is called at the end of suspend. */
eth->dev = &pdev->dev;
}
/* No quirks if device isn't a wakeup source. */ if (!device_may_wakeup(eth->dev)) returnfalse;
returntrue;
}
staticint at91_pm_config_quirks(bool suspend)
{ struct at91_pm_quirk_eth *eth; int i, j, ret, tmp;
/* * Ethernet IPs who's device_node pointers are stored into * soc_pm.quirks.eth[].np cannot handle WoL packets while in ULP0, ULP1 * or both due to a hardware bug. If they receive WoL packets while in * ULP0 or ULP1 IPs could stop working or the whole system could stop * working. We cannot handle this scenario in the ethernet driver itself * as the driver is common to multiple vendors and also we only know * here, in this file, if we suspend to ULP0 or ULP1 mode. Thus handle * these scenarios here, as quirks.
*/ for (i = 0; i < AT91_PM_MAX_ETH; i++) {
eth = &soc_pm.quirks.eth[i];
if (!at91_pm_eth_quirk_is_valid(eth)) continue;
/* * For modes in dns_modes mask the system blocks if quirk is not * applied but if applied the interface doesn't act at WoL * events. Thus take care to avoid suspending if this interface * is the only configured wakeup source.
*/ if (suspend && eth->dns_modes & BIT(soc_pm.data.mode)) { int ws_count = 0; #ifdef CONFIG_PM_SLEEP struct wakeup_source *ws;
for_each_wakeup_source(ws) { if (ws->dev == eth->dev) continue;
ws_count++; break;
} #endif
/* * Checking !ws is good for all platforms with issues * even when both G_ETH and E_ETH are available as dns_modes * is populated only on G_ETH interface.
*/ if (!ws_count) {
pr_err("AT91: PM: Ethernet cannot resume from WoL!");
ret = -EPERM;
put_device(eth->dev);
eth->dev = NULL; /* No need to revert clock settings for this eth. */
i--; goto clk_unconfigure;
}
}
if (suspend) {
clk_bulk_disable_unprepare(AT91_PM_ETH_MAX_CLK, eth->clks);
} else {
ret = clk_bulk_prepare_enable(AT91_PM_ETH_MAX_CLK,
eth->clks); if (ret) goto clk_unconfigure; /* * Release the reference to eth->dev taken in * at91_pm_eth_quirk_is_valid().
*/
put_device(eth->dev);
eth->dev = NULL;
}
}
return 0;
clk_unconfigure: /* * In case of resume we reach this point if clk_prepare_enable() failed. * we don't want to revert the previous clk_prepare_enable() for the * other IP.
*/ for (j = i; j >= 0; j--) {
eth = &soc_pm.quirks.eth[j]; if (suspend) { if (!at91_pm_eth_quirk_is_valid(eth)) continue;
/* * Verify that all the clocks are correct before entering * slow-clock mode.
*/ staticint at91_pm_verify_clocks(void)
{ unsignedlong scsr; int i;
scsr = readl(soc_pm.data.pmc + AT91_PMC_SCSR);
/* USB must not be using PLLB */ if ((scsr & soc_pm.data.uhp_udp_mask) != 0) {
pr_err("AT91: PM - Suspend-to-RAM with USB still active\n"); return 0;
}
/* PCK0..PCK3 must be disabled, or configured to use clk32k */ for (i = 0; i < 4; i++) {
u32 css;
if ((scsr & (AT91_PMC_PCK0 << i)) == 0) continue;
css = readl(soc_pm.data.pmc + AT91_PMC_PCKR(i)) & AT91_PMC_CSS; if (css != AT91_PMC_CSS_SLOW) {
pr_err("AT91: PM - Suspend-to-RAM with PCK%d src %d\n", i, css); return 0;
}
}
return 1;
}
/* * Call this from platform driver suspend() to see how deeply to suspend. * For example, some controllers (like OHCI) need one of the PLL clocks * in order to act as a wakeup source, and those are not available when * going into slow clock mode. * * REVISIT: generalize as clk_will_be_available(clk)? Other platforms have * the very same problem (but not using at91 main_clk), and it'd be better * to add one generic API rather than lots of platform-specific ones.
*/ int at91_suspend_entering_slow_clock(void)
{ return (soc_pm.data.mode >= AT91_PM_ULP0);
}
EXPORT_SYMBOL(at91_suspend_entering_slow_clock);
if (soc_pm.data.mode == AT91_PM_BACKUP && soc_pm.data.ramc_phy) { /* * Bootloader will perform DDR recalibration and will try to * restore the ZQ0SR0 with the value saved here. But the * calibration is buggy and restoring some values from ZQ0SR0 * is forbidden and risky thus we need to provide processed * values for these.
*/
tmp = readl(soc_pm.data.ramc_phy + DDR3PHY_ZQ0SR0);
/* Store pull-down output impedance select. */
index = (tmp >> DDR3PHY_ZQ0SR0_PDO_OFF) & 0x1f;
soc_pm.bu->ddr_phy_calibration[0] = modified_fix_code[index] << DDR3PHY_ZQ0SR0_PDO_OFF;
/* Store pull-up output impedance select. */
index = (tmp >> DDR3PHY_ZQ0SR0_PUO_OFF) & 0x1f;
soc_pm.bu->ddr_phy_calibration[0] |= modified_fix_code[index] << DDR3PHY_ZQ0SR0_PUO_OFF;
/* Store pull-down on-die termination impedance select. */
index = (tmp >> DDR3PHY_ZQ0SR0_PDODT_OFF) & 0x1f;
soc_pm.bu->ddr_phy_calibration[0] |= modified_fix_code[index] << DDR3PHY_ZQ0SR0_PDODT_OFF;
/* Store pull-up on-die termination impedance select. */
index = (tmp >> DDR3PHY_ZQ0SRO_PUODT_OFF) & 0x1f;
soc_pm.bu->ddr_phy_calibration[0] |= modified_fix_code[index] << DDR3PHY_ZQ0SRO_PUODT_OFF;
/* * The 1st 8 words of memory might get corrupted in the process * of DDR PHY recalibration; it is saved here in securam and it * will be restored later, after recalibration, by bootloader
*/ for (i = 1; i < BACKUP_DDR_PHY_CALIBRATION; i++)
soc_pm.bu->ddr_phy_calibration[i] =
*((unsignedint *)soc_pm.memcs + (i - 1));
}
flush_cache_all();
outer_disable();
at91_suspend_sram_fn(&soc_pm.data);
return 0;
}
/** * at91_pm_switch_ba_to_auto() - Configure Backup Unit Power Switch * to automatic/hardware mode. * * The Backup Unit Power Switch can be managed either by software or hardware. * Enabling hardware mode allows the automatic transition of power between * VDDANA (or VDDIN33) and VDDBU (or VBAT, respectively), based on the * availability of these power sources. * * If the Backup Unit Power Switch is already in automatic mode, no action is * required. If it is in software-controlled mode, it is switched to automatic * mode to enhance safety and eliminate the need for toggling between power * sources.
*/ staticvoid at91_pm_switch_ba_to_auto(void)
{ unsignedint offset = offsetof(struct at91_pm_sfrbu_regs, pswbu); unsignedint val;
/* Just for safety. */ if (!soc_pm.data.sfrbu) return;
val = readl(soc_pm.data.sfrbu + offset);
/* Already on auto/hardware. */ if (!(val & soc_pm.sfrbu_regs.pswbu.ctrl)) return;
val &= ~soc_pm.sfrbu_regs.pswbu.ctrl;
val |= soc_pm.sfrbu_regs.pswbu.key;
writel(val, soc_pm.data.sfrbu + offset);
}
staticvoid at91_pm_suspend(suspend_state_t state)
{ if (soc_pm.data.mode == AT91_PM_BACKUP) {
at91_pm_switch_ba_to_auto();
cpu_suspend(0, at91_suspend_finish);
/* The SRAM is lost between suspend cycles */
at91_suspend_sram_fn = fncpy(at91_suspend_sram_fn,
&at91_pm_suspend_in_sram,
at91_pm_suspend_in_sram_sz);
/* * STANDBY mode has *all* drivers suspended; ignores irqs not marked as 'wakeup' * event sources; and reduces DRAM power. But otherwise it's identical to * PM_SUSPEND_ON: cpu idle, and nothing fancy done with main or cpu clocks. * * AT91_PM_ULP0 is like STANDBY plus slow clock mode, so drivers must * suspend more deeply, the master clock switches to the clk32k and turns off * the main oscillator * * AT91_PM_BACKUP turns off the whole SoC after placing the DDR in self refresh
*/ staticint at91_pm_enter(suspend_state_t state)
{ int ret;
ret = at91_pm_config_quirks(true); if (ret) return ret;
switch (state) { case PM_SUSPEND_MEM: case PM_SUSPEND_STANDBY: /* * Ensure that clocks are in a valid state.
*/ if (soc_pm.data.mode >= AT91_PM_ULP0 &&
!at91_pm_verify_clocks()) goto error;
/* * The AT91RM9200 goes into self-refresh mode with this command, and will * terminate self-refresh automatically on the next SDRAM access. * * Self-refresh mode is exited as soon as a memory access is made, but we don't * know for sure when that happens. However, we need to restore the low-power * mode if it was enabled before going idle. Restoring low-power mode while * still in self-refresh is "not recommended", but seems to work.
*/ staticvoid at91rm9200_standby(void)
{ asmvolatile( "b 1f\n\t" ".align 5\n\t" "1: mcr p15, 0, %0, c7, c10, 4\n\t" " str %2, [%1, %3]\n\t" " mcr p15, 0, %0, c7, c0, 4\n\t"
:
: "r" (0), "r" (soc_pm.data.ramc[0]), "r" (1), "r" (AT91_MC_SDRAMC_SRR));
}
/* We manage both DDRAM/SDRAM controllers, we need more than one value to * remember.
*/ staticvoid at91_ddr_standby(void)
{ /* Those two values allow us to delay self-refresh activation
* to the maximum. */
u32 lpr0, lpr1 = 0;
u32 mdr, saved_mdr0, saved_mdr1 = 0;
u32 saved_lpr0, saved_lpr1 = 0;
/* We manage both DDRAM/SDRAM controllers, we need more than one value to * remember.
*/ staticvoid at91sam9_sdram_standby(void)
{
u32 lpr0, lpr1 = 0;
u32 saved_lpr0, saved_lpr1 = 0;
/* self-refresh mode now */
at91_ramc_write(0, AT91_SDRAMC_LPR, lpr0); if (soc_pm.data.ramc[1])
at91_ramc_write(1, AT91_SDRAMC_LPR, lpr1);
cpu_do_idle();
at91_ramc_write(0, AT91_SDRAMC_LPR, saved_lpr0); if (soc_pm.data.ramc[1])
at91_ramc_write(1, AT91_SDRAMC_LPR, saved_lpr1);
}
staticvoid sama7g5_standby(void)
{ int pwrtmg, ratio;
pwrtmg = readl(soc_pm.data.ramc[0] + UDDRC_PWRCTL);
ratio = readl(soc_pm.data.pmc + AT91_PMC_RATIO);
/* * Place RAM into self-refresh after a maximum idle clocks. The maximum * idle clocks is configured by bootloader in * UDDRC_PWRMGT.SELFREF_TO_X32.
*/
writel(pwrtmg | UDDRC_PWRCTL_SELFREF_EN,
soc_pm.data.ramc[0] + UDDRC_PWRCTL); /* Divide CPU clock by 16. */
writel(ratio & ~AT91_PMC_RATIO_RATIO, soc_pm.data.pmc + AT91_PMC_RATIO);
static __init int at91_dt_ramc(bool phy_mandatory)
{ struct device_node *np; conststruct of_device_id *of_id; int idx = 0; void *standby = NULL; conststruct ramc_info *ramc; int ret;
for_each_matching_node_and_match(np, ramc_ids, &of_id) {
soc_pm.data.ramc[idx] = of_iomap(np, 0); if (!soc_pm.data.ramc[idx]) {
pr_err("unable to map ramc[%d] cpu registers\n", idx);
ret = -ENOMEM;
of_node_put(np); goto unmap_ramc;
}
ramc = of_id->data; if (ramc) { if (!standby)
standby = ramc->idle;
soc_pm.data.memctrl = ramc->memctrl;
}
idx++;
}
if (!idx) {
pr_err("unable to find compatible ram controller node in dtb\n");
ret = -ENODEV; goto unmap_ramc;
}
/* Lookup for DDR PHY node, if any. */
for_each_matching_node_and_match(np, ramc_phy_ids, &of_id) {
soc_pm.data.ramc_phy = of_iomap(np, 0); if (!soc_pm.data.ramc_phy) {
pr_err("unable to map ramc phy cpu registers\n");
ret = -ENOMEM;
of_node_put(np); goto unmap_ramc;
}
}
if (phy_mandatory && !soc_pm.data.ramc_phy) {
pr_err("DDR PHY is mandatory!\n");
ret = -ENODEV; goto unmap_ramc;
}
if (!standby) {
pr_warn("ramc no standby function available\n"); return 0;
}
at91_cpuidle_device.dev.platform_data = standby;
return 0;
unmap_ramc: while (idx)
iounmap(soc_pm.data.ramc[--idx]);
return ret;
}
staticvoid at91rm9200_idle(void)
{ /* * Disable the processor clock. The processor will be automatically * re-enabled by an interrupt or by a reset.
*/
writel(AT91_PMC_PCK, soc_pm.data.pmc + AT91_PMC_SCDR);
}
/* * Replaces _mode_to_replace with a supported mode that doesn't depend * on controller pointed by _map_bitmask * @_maps: u32 array containing AT91_PM_IOMAP() flags and indexed by AT91 * PM mode * @_map_bitmask: AT91_PM_IOMAP() bitmask; if _mode_to_replace depends on * controller represented by _map_bitmask, _mode_to_replace needs to be * updated * @_mode_to_replace: standby_mode or suspend_mode that need to be * updated * @_mode_to_check: standby_mode or suspend_mode; this is needed here * to avoid having standby_mode and suspend_mode set with the same AT91 * PM mode
*/ #define AT91_PM_REPLACE_MODE(_maps, _map_bitmask, _mode_to_replace, \
_mode_to_check) \ do { \ if (((_maps)[(_mode_to_replace)]) & (_map_bitmask)) { \ int _mode_to_use, _mode_complementary; \ /* Use ULP0 if it doesn't need _map_bitmask. */ \ if (!((_maps)[AT91_PM_ULP0] & (_map_bitmask))) {\
_mode_to_use = AT91_PM_ULP0; \
_mode_complementary = AT91_PM_STANDBY; \
} else { \
_mode_to_use = AT91_PM_STANDBY; \
_mode_complementary = AT91_PM_STANDBY; \
} \
\ if ((_mode_to_check) != _mode_to_use) \
(_mode_to_replace) = _mode_to_use; \ else \
(_mode_to_replace) = _mode_complementary;\
} \
} while (0)
/* * Replaces standby and suspend modes with default supported modes: * ULP0 and STANDBY. * @_maps: u32 array indexed by AT91 PM mode containing AT91_PM_IOMAP() * flags * @_map: controller specific name; standby and suspend mode need to be * replaced in order to not depend on this controller
*/ #define AT91_PM_REPLACE_MODES(_maps, _map) \ do { \
AT91_PM_REPLACE_MODE((_maps), BIT(AT91_PM_IOMAP_##_map),\
(soc_pm.data.standby_mode), \
(soc_pm.data.suspend_mode)); \
AT91_PM_REPLACE_MODE((_maps), BIT(AT91_PM_IOMAP_##_map),\
(soc_pm.data.suspend_mode), \
(soc_pm.data.standby_mode)); \
} while (0)
if (at91_suspend_sram_fn) {
suspend_set_ops(&at91_pm_ops);
pr_info("AT91: PM: standby: %s, suspend: %s\n",
pm_modes[soc_pm.data.standby_mode].pattern,
pm_modes[soc_pm.data.suspend_mode].pattern);
} else {
pr_info("AT91: PM not supported, due to no SRAM allocated\n");
}
}
void __init at91rm9200_pm_init(void)
{ int ret;
if (!IS_ENABLED(CONFIG_SOC_AT91RM9200)) return;
/* * Force STANDBY and ULP0 mode to avoid calling * at91_pm_modes_validate() which may increase booting time. * Platform supports anyway only STANDBY and ULP0 modes.
*/
soc_pm.data.standby_mode = AT91_PM_STANDBY;
soc_pm.data.suspend_mode = AT91_PM_ULP0;
ret = at91_dt_ramc(false); if (ret) return;
/* * AT91RM9200 SDRAM low-power mode cannot be used with self-refresh.
*/
at91_ramc_write(0, AT91_MC_SDRAMC_LPR, 0);
/* * Force STANDBY and ULP0 mode to avoid calling * at91_pm_modes_validate() which may increase booting time. * Platform supports anyway only STANDBY and ULP0 modes.
*/
soc_pm.data.standby_mode = AT91_PM_STANDBY;
soc_pm.data.suspend_mode = AT91_PM_ULP0;
at91_pm_modes_validate(modes, ARRAY_SIZE(modes));
at91_pm_modes_init(iomaps, ARRAY_SIZE(iomaps));
ret = at91_dt_ramc(false); if (ret) return;
at91_pm_init(NULL);
/* Quirks applies to ULP0, ULP0 fast and ULP1 modes. */
soc_pm.quirks.eth[AT91_PM_G_ETH].modes = BIT(AT91_PM_ULP0) |
BIT(AT91_PM_ULP0_FAST) |
BIT(AT91_PM_ULP1); /* Do not suspend in ULP0, ULP0 fast if GETH is the only wakeup source. */
soc_pm.quirks.eth[AT91_PM_G_ETH].dns_modes = BIT(AT91_PM_ULP0) |
BIT(AT91_PM_ULP0_FAST);
}
/* Quirk applies to ULP0, ULP0 fast and ULP1 modes. */
soc_pm.quirks.eth[AT91_PM_G_ETH].modes = BIT(AT91_PM_ULP0) |
BIT(AT91_PM_ULP0_FAST) |
BIT(AT91_PM_ULP1); /* * Do not suspend in ULP0, ULP0 fast if GETH is the only wakeup * source.
*/
soc_pm.quirks.eth[AT91_PM_G_ETH].dns_modes = BIT(AT91_PM_ULP0) |
BIT(AT91_PM_ULP0_FAST);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.