/* Maximum number of modes supported by platfoms that has low power mode capability */ constchar *pmc_lpm_modes[] = { "S0i2.0", "S0i2.1", "S0i2.2", "S0i3.0", "S0i3.1", "S0i3.2", "S0i3.3", "S0i3.4",
NULL
};
staticinline u64 pmc_core_adjust_slp_s0_step(struct pmc *pmc, u32 value)
{ /* * ADL PCH does not have the SLP_S0 counter and LPM Residency counters are * used as a workaround which uses 30.5 usec tick. All other client * programs have the legacy SLP_S0 residency counter that is using the 122 * usec tick.
*/ constint lpm_adj_x2 = pmc->map->lpm_res_counter_step_x2;
err = pmc_core_send_msg(pmc, &mphy_common_reg); if (err) return err;
/* Observed PMC HW response latency for MTPMC-MFPMC is ~10 ms */
msleep(10);
val = pmc_core_reg_read(pmc, SPT_PMC_MFPMC_OFFSET);
for (index = 0; map[index].name ; index++) {
seq_printf(s, "%-32s\tState: %s\n",
map[index].name,
map[index].bit_mask & val ? "Active" : "Idle");
}
return 0;
}
DEFINE_SHOW_ATTRIBUTE(pmc_core_pll);
int pmc_core_send_ltr_ignore(struct pmc_dev *pmcdev, u32 value, int ignore)
{ struct pmc *pmc; conststruct pmc_reg_map *map;
u32 reg; unsignedint pmc_index; int ltr_index;
ltr_index = value; /* For platforms with multiple pmcs, ltr index value given by user * is based on the contiguous indexes from ltr_show output. * pmc index and ltr index needs to be calculated from it.
*/ for (pmc_index = 0; pmc_index < ARRAY_SIZE(pmcdev->pmcs) && ltr_index >= 0; pmc_index++) {
pmc = pmcdev->pmcs[pmc_index];
if (!pmc) continue;
map = pmc->map; if (ltr_index <= map->ltr_ignore_max) break;
/* Along with IP names, ltr_show map includes CURRENT_PLATFORM * and AGGREGATED_SYSTEM values per PMC. Take these two index * values into account in ltr_index calculation. Also, to start * ltr index from zero for next pmc, subtract it by 1.
*/
ltr_index = ltr_index - (map->ltr_ignore_max + 2) - 1;
}
if (pmc_index >= ARRAY_SIZE(pmcdev->pmcs) || ltr_index < 0) return -EINVAL;
pr_debug("ltr_ignore for pmc%d: ltr_index:%d\n", pmc_index, ltr_index);
/* ltr_ignore_max is the max index value for LTR ignore register */
ltr_ign = pmc->ltr_ign | GENMASK(pmc->map->ltr_ignore_max, 0);
pmc_core_reg_write(pmc, pmc->map->ltr_ignore_offset, ltr_ign);
}
/* * Ignoring ME during suspend is blocking platforms with ADL PCH to get to * deeper S0ix substate.
*/
pmc_core_send_ltr_ignore(pmcdev, 6, 0);
}
/* * When there are multiple PMCs, though the PMC may exist, the * requirement register discovery could have failed so check * before accessing.
*/ if (!lpm_req_regs) continue;
/* Display the header */
pmc_core_substate_req_header_show(s, pmc_index);
/* Loop over maps */ for (mp = 0; mp < num_maps; mp++) {
u32 req_mask = 0;
u32 lpm_status;
u32 lpm_status_live; conststruct pmc_bit_map *map; int mode, i, len = 32;
/* * Capture the requirements and create a mask so that we only * show an element if it's required for at least one of the * enabled low power modes
*/
pmc_for_each_mode(mode, pmcdev)
req_mask |= lpm_req_regs[mp + (mode * num_maps)];
/* Get the last latched status for this map */
lpm_status = pmc_core_reg_read(pmc, sts_offset + (mp * 4));
/* Get the runtime status for this map */
lpm_status_live = pmc_core_reg_read(pmc, sts_offset_live + (mp * 4));
/* Loop over elements in this map */
map = maps[mp]; for (i = 0; map[i].name && i < len; i++) {
u32 bit_mask = map[i].bit_mask;
if (!(bit_mask & req_mask)) { /* * Not required for any enabled states * so don't display
*/ continue;
}
/* Display the element name in the first column */
seq_printf(s, "pmc%d: %26s |", pmc_index, map[i].name);
/* Loop over the enabled states and display if required */
pmc_for_each_mode(mode, pmcdev) { bool required = lpm_req_regs[mp + (mode * num_maps)] &
bit_mask;
seq_printf(s, " %9s |", required ? "Required" : " ");
}
/* In Status column, show the last captured state of this agent */
seq_printf(s, " %9s |", lpm_status & bit_mask ? "Yes" : " ");
/* In Live status column, show the live state of this agent */
seq_printf(s, " %11s |", lpm_status_live & bit_mask ? "Yes" : " ");
/* * For LPM mode latching we set the latch enable bit and selected mode * and clear everything else.
*/
reg = LPM_STS_LATCH_MODE | BIT(mode);
guard(mutex)(&pmcdev->lock);
pmc_core_reg_write(pmc, pmc->map->lpm_sts_latch_en_offset, reg);
staticbool pmc_core_pri_verify(u32 lpm_pri, u8 *mode_order)
{ unsignedint i, j;
if (!lpm_pri) returnfalse; /* * Each byte contains the priority level for 2 modes (7:4 and 3:0). * In a 32 bit register this allows for describing 8 modes. Store the * levels and look for values out of range.
*/ for (i = 0; i < 8; i++) { int level = lpm_pri & GENMASK(3, 0);
if (level >= LPM_MAX_NUM_MODES) returnfalse;
mode_order[i] = level;
lpm_pri >>= 4;
}
/* Check that we have unique values */ for (i = 0; i < LPM_MAX_NUM_MODES - 1; i++) for (j = i + 1; j < LPM_MAX_NUM_MODES; j++) if (mode_order[i] == mode_order[j]) returnfalse;
/* Use LPM Maps to indicate support for substates */ if (!pmc->map->lpm_num_maps) return;
lpm_en = pmc_core_reg_read(pmc, pmc->map->lpm_en_offset); /* For MTL, BIT 31 is not an lpm mode but a enable bit. * Lower byte is enough to cover the number of lpm modes for all * platforms and hence mask the upper 3 bytes.
*/
pmcdev->num_lpm_modes = hweight32(lpm_en & 0xFF);
/* * If lpm_pri value passes verification, then override the default * modes here. Otherwise stick with the default.
*/ if (pmc_core_pri_verify(lpm_pri, mode_order)) /* Get list of modes in priority order */ for (mode = 0; mode < LPM_MAX_NUM_MODES; mode++)
pri_order[mode_order[mode]] = mode; else
dev_warn(&pmcdev->pdev->dev, "Assuming a default substate order for this platform\n");
/* * Loop through all modes from lowest to highest priority, * and capture all enabled modes in order
*/
i = 0; for (p = LPM_MAX_NUM_MODES - 1; p >= 0; p--) { int mode = pri_order[p];
if (!(BIT(mode) & lpm_en)) continue;
pmcdev->lpm_en_modes[i++] = mode;
}
}
int get_primary_reg_base(struct pmc *pmc)
{
u64 slp_s0_addr;
if (lpit_read_residency_count_address(&slp_s0_addr)) {
pmc->base_addr = PMC_BASE_ADDR_DEFAULT;
if (pmcdev->has_die_c6) {
debugfs_create_file("die_c6_us_show", 0444,
pmcdev->dbgfs_dir, pmcdev,
&pmc_core_die_c6_us_fops);
}
}
static u32 pmc_core_find_guid(struct pmc_info *list, conststruct pmc_reg_map *map)
{ for (; list->map; ++list) if (list->map == map) return list->guid;
return 0;
}
/* * This function retrieves low power mode requirement data from PMC Low * Power Mode (LPM) table. * * In telemetry space, the LPM table contains a 4 byte header followed * by 8 consecutive mode blocks (one for each LPM mode). Each block * has a 4 byte header followed by a set of registers that describe the * IP state requirements for the given mode. The IP mapping is platform * specific but the same for each block, making for easy analysis. * Platforms only use a subset of the space to track the requirements * for their IPs. Callers provide the requirement registers they use as * a list of indices. Each requirement register is associated with an * IP map that's maintained by the caller. * * Header * +----+----------------------------+----------------------------+ * | 0 | REVISION | ENABLED MODES | * +----+--------------+-------------+-------------+--------------+ * * Low Power Mode 0 Block * +----+--------------+-------------+-------------+--------------+ * | 1 | SUB ID | SIZE | MAJOR | MINOR | * +----+--------------+-------------+-------------+--------------+ * | 2 | LPM0 Requirements 0 | * +----+---------------------------------------------------------+ * | | ... | * +----+---------------------------------------------------------+ * | 29 | LPM0 Requirements 27 | * +----+---------------------------------------------------------+ * * ... * * Low Power Mode 7 Block * +----+--------------+-------------+-------------+--------------+ * | | SUB ID | SIZE | MAJOR | MINOR | * +----+--------------+-------------+-------------+--------------+ * | 60 | LPM7 Requirements 0 | * +----+---------------------------------------------------------+ * | | ... | * +----+---------------------------------------------------------+ * | 87 | LPM7 Requirements 27 | * +----+---------------------------------------------------------+ *
*/ staticint pmc_core_get_lpm_req(struct pmc_dev *pmcdev, struct pmc *pmc, struct pci_dev *pcidev)
{ struct telem_endpoint *ep; const u8 *lpm_indices; int num_maps, mode_offset = 0; int ret, mode; int lpm_size;
u32 guid;
ret = pmc_ssram_telemetry_get_pmc_info(pmc_index, &pmc_ssram_telemetry); if (ret) return ret;
map = pmc_core_find_regmap(pmcdev->regmap_list, pmc_ssram_telemetry.devid); if (!map) return -ENODEV;
pmc = pmcdev->pmcs[pmc_index]; /* Memory for primary PMC has been allocated */ if (!pmc) {
pmc = devm_kzalloc(&pmcdev->pdev->dev, sizeof(*pmc), GFP_KERNEL); if (!pmc) return -ENOMEM;
}
/* * When supported, ssram init is used to achieve all available PMCs. * If ssram init fails, this function uses legacy method to at least get the * primary PMC.
*/ int generic_core_init(struct pmc_dev *pmcdev, struct pmc_dev_info *pmc_dev_info)
{ struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN]; bool ssram; int ret;
ssram = pmc_dev_info->regmap_list != NULL; if (ssram) {
pmcdev->regmap_list = pmc_dev_info->regmap_list;
ret = pmc_core_ssram_get_reg_base(pmcdev); /* * EAGAIN error code indicates Intel PMC SSRAM Telemetry driver * has not finished probe and PMC info is not available yet. Try * again later.
*/ if (ret == -EAGAIN) return -EPROBE_DEFER;
if (ret) {
dev_warn(&pmcdev->pdev->dev, "Failed to get PMC info from SSRAM, %d, using legacy init\n", ret);
ssram = false;
}
}
if (!ssram) {
pmc->map = pmc_dev_info->map;
ret = get_primary_reg_base(pmc); if (ret) return ret;
}
pmc_core_get_low_power_modes(pmcdev); if (pmc_dev_info->dmu_guid)
pmc_core_punit_pmt_init(pmcdev, pmc_dev_info->dmu_guid);
if (ssram) {
ret = pmc_core_ssram_get_lpm_reqs(pmcdev, pmc_dev_info->pci_func); if (ret) goto unmap_regbase;
}
return 0;
unmap_regbase: for (unsignedint i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) { struct pmc *pmc = pmcdev->pmcs[i];
if (pmc && pmc->regbase)
iounmap(pmc->regbase);
}
if (pmcdev->punit_ep)
pmt_telem_unregister_endpoint(pmcdev->punit_ep);
/* * This quirk can be used on those platforms where * the platform BIOS enforces 24Mhz crystal to shutdown * before PMC can assert SLP_S0#.
*/ staticbool xtal_ignore; staticint quirk_xtal_ignore(conststruct dmi_system_id *id)
{
xtal_ignore = true; return 0;
}
/* The last element in msr_map is empty */
pmcdev->num_of_pkgc = ARRAY_SIZE(msr_map) - 1;
pmcdev->pkgc_res_cnt = devm_kcalloc(&pdev->dev,
pmcdev->num_of_pkgc, sizeof(*pmcdev->pkgc_res_cnt),
GFP_KERNEL); if (!pmcdev->pkgc_res_cnt) return -ENOMEM;
ret = devm_mutex_init(&pdev->dev, &pmcdev->lock); if (ret) return ret;
if (pmc_dev_info->init)
ret = pmc_dev_info->init(pmcdev, pmc_dev_info); else
ret = generic_core_init(pmcdev, pmc_dev_info);
if (ret) {
platform_set_drvdata(pdev, NULL); return ret;
}
if (ltr_ignore_all_suspend)
pmc_core_ltr_ignore_all(pmcdev);
/* Check if the syspend will actually use S0ix */ if (pm_suspend_via_firmware()) return 0;
/* Save PKGC residency for checking later */ for (i = 0; i < pmcdev->num_of_pkgc; i++) { if (rdmsrq_safe(msr_map[i].bit_mask, &pmcdev->pkgc_res_cnt[i])) return -EIO;
}
/* Save S0ix residency for checking later */ if (pmc_core_dev_state_get(pmc, &pmcdev->s0ix_counter)) return -EIO;
/* Check if the syspend used S0ix */ if (pm_suspend_via_firmware()) return 0;
if (!pmc_core_is_s0ix_failed(pmcdev)) return 0;
if (!warn_on_s0ix_failures) return 0;
if (pmc_core_is_deepest_pkgc_failed(pmcdev)) { /* S0ix failed because of deepest PKGC entry failure */
dev_info(dev, "CPU did not enter %s!!! (%s cnt=0x%llx)\n",
msr_map[pmcdev->num_of_pkgc - 1].name,
msr_map[pmcdev->num_of_pkgc - 1].name,
pmcdev->pkgc_res_cnt[pmcdev->num_of_pkgc - 1]);
for (i = 0; i < pmcdev->num_of_pkgc; i++) {
u64 pc_cnt;
/* The real interesting case - S0ix failed - lets ask PMC why. */
dev_warn(dev, "CPU did not enter SLP_S0!!! (S0ix cnt=%llu)\n",
pmcdev->s0ix_counter);
if (pmc->map->slps0_dbg_maps)
pmc_core_slps0_display(pmc, dev, NULL);
for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) { struct pmc *pmc = pmcdev->pmcs[i];
if (!pmc) continue; if (pmc->map->lpm_sts)
pmc_core_lpm_display(pmc, dev, NULL, offset, i, "STATUS", maps);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.