/* * PG0 is HW controlled, so doesn't have a corresponding power well control knob * * {ICL,SKL}_DISP_PW1_IDX..{ICL,SKL}_DISP_PW4_IDX -> PG1..PG4
*/ staticenum skl_power_gate pw_idx_to_pg(struct intel_display *display, int pw_idx)
{ int pw1_idx = DISPLAY_VER(display) >= 11 ? ICL_PW_CTL_IDX_PW_1 : SKL_PW_CTL_IDX_PW_1;
struct i915_power_well_ops { conststruct i915_power_well_regs *regs; /* * Synchronize the well's hw state to match the current sw state, for * example enable/disable it based on the current refcount. Called * during driver init and resume time, possibly after first calling * the enable/disable handlers.
*/ void (*sync_hw)(struct intel_display *display, struct i915_power_well *power_well); /* * Enable the well and resources that depend on it (for example * interrupts located on the well). Called after the 0->1 refcount * transition.
*/ void (*enable)(struct intel_display *display, struct i915_power_well *power_well); /* * Disable the well and resources that depend on it. Called after * the 1->0 refcount transition.
*/ void (*disable)(struct intel_display *display, struct i915_power_well *power_well); /* Returns the hw enabled state. */ bool (*is_enabled)(struct intel_display *display, struct i915_power_well *power_well);
};
for_each_power_well(display, power_well) if (i915_power_well_instance(power_well)->id == power_well_id) return power_well;
/* * It's not feasible to add error checking code to the callers since * this condition really shouldn't happen and it doesn't even make sense * to abort things like display initialization sequences. Just return * the first power well and hope the WARN gets reported so we can fix * our driver.
*/
drm_WARN(display->drm, 1, "Power well %d not defined for this platform\n",
power_well_id); return &display->power.domains.power_wells[0];
}
void intel_power_well_put(struct intel_display *display, struct i915_power_well *power_well)
{
drm_WARN(display->drm, !power_well->count, "Use count on power well %s is already zero",
i915_power_well_instance(power_well)->name);
if (!--power_well->count)
intel_power_well_disable(display, power_well);
}
int intel_power_well_refcount(struct i915_power_well *power_well)
{ return power_well->count;
}
/* * Starting with Haswell, we have a "Power Down Well" that can be turned off * when not needed anymore. We have 4 registers that can request the power well * to be enabled, and it will only be disabled if none of the registers is * requesting it to be enabled.
*/ staticvoid hsw_power_well_post_enable(struct intel_display *display,
u8 irq_pipe_mask, bool has_vga)
{ if (has_vga)
intel_vga_reset_io_mem(display);
if (irq_pipe_mask)
gen8_irq_power_well_post_enable(display, irq_pipe_mask);
}
/* * FIXME should we care about the (VBT defined) dig_port->aux_ch * relationship or should this be purely defined by the hardware layout? * Currently if the port doesn't appear in the VBT, or if it's declared * as HDMI-only and routed to a combo PHY, the encoder either won't be * present at all or it will not have an aux_ch assigned.
*/ return dig_port ? intel_encoder_to_phy(&dig_port->base) : PHY_NONE;
}
/* * For some power wells we're not supposed to watch the status bit for * an ack, but rather just wait a fixed amount of time and then * proceed. This is only used on DG2.
*/ if (display->platform.dg2 && power_well->desc->fixed_enable_delay) {
usleep_range(600, 1200); return;
}
/* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */ if (intel_de_wait_for_set(display, regs->driver,
HSW_PWR_WELL_CTL_STATE(pw_idx), timeout)) {
drm_dbg_kms(display->drm, "%s power well enable timeout\n",
intel_power_well_name(power_well));
/* * Bspec doesn't require waiting for PWs to get disabled, but still do * this for paranoia. The known cases where a PW will be forced on: * - a KVMR request on any power well via the KVMR request register * - a DMC request on PW1 and MISC_IO power wells via the BIOS and * DEBUG request registers * Skip the wait in case any of the request bits are set and print a * diagnostic message.
*/
reqs = hsw_power_well_requesters(display, regs, pw_idx);
ret = intel_de_wait_for_clear(display, regs->driver,
HSW_PWR_WELL_CTL_STATE(pw_idx),
reqs ? 0 : 1); if (!ret) return;
/* Refresh requesters in case they popped up during the wait. */ if (!reqs)
reqs = hsw_power_well_requesters(display, regs, pw_idx);
/* * For PW1 we have to wait both for the PW0/PG0 fuse state * before enabling the power well and PW1/PG1's own fuse * state after the enabling. For all other power wells with * fuses we only have to wait for that PW/PG's fuse state * after the enabling.
*/ if (pg == SKL_PG1)
gen9_wait_for_power_well_fuses(display, SKL_PG0);
}
/* * FIXME not sure if we should derive the PHY from the pw_idx, or * from the VBT defined AUX_CH->DDI->PHY mapping.
*/
intel_de_rmw(display, ICL_PORT_CL_DW12(ICL_AUX_PW_TO_PHY(pw_idx)),
0, ICL_LANE_ENABLE_AUX);
/* * FIXME not sure if we should derive the PHY from the pw_idx, or * from the VBT defined AUX_CH->DDI->PHY mapping.
*/
intel_de_rmw(display, ICL_PORT_CL_DW12(ICL_AUX_PW_TO_PHY(pw_idx)),
ICL_LANE_ENABLE_AUX, 0);
/* * An AUX timeout is expected if the TBT DP tunnel is down, * or need to enable AUX on a legacy TypeC port as part of the TC-cold * exit sequence.
*/
timeout_expected = is_tbt || intel_tc_cold_requires_aux_pw(dig_port); if (DISPLAY_VER(display) == 11 && intel_tc_cold_requires_aux_pw(dig_port))
icl_tc_cold_exit(display);
/* * We should only use the power well if we explicitly asked the hardware to * enable it, so check if it's enabled and also check if we've requested it to * be enabled.
*/ staticbool hsw_power_well_enabled(struct intel_display *display, struct i915_power_well *power_well)
{ conststruct i915_power_well_regs *regs = power_well->desc->ops->regs; enum i915_power_well_id id = i915_power_well_instance(power_well)->id; int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) |
HSW_PWR_WELL_CTL_STATE(pw_idx);
u32 val;
val = intel_de_read(display, regs->driver);
/* * On GEN9 big core due to a DMC bug the driver's request bits for PW1 * and the MISC_IO PW will be not restored, so check instead for the * BIOS's own request bits, which are forced-on for these power wells * when exiting DC5/6.
*/ if (DISPLAY_VER(display) == 9 && !display->platform.broxton &&
(id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
val |= intel_de_read(display, regs->bios);
drm_WARN_ONCE(display->drm,
(intel_de_read(display, DC_STATE_EN) & DC_STATE_EN_DC9), "DC9 already programmed to be enabled.\n");
drm_WARN_ONCE(display->drm,
intel_de_read(display, DC_STATE_EN) &
DC_STATE_EN_UPTO_DC5, "DC5 still not disabled to enable DC9.\n");
drm_WARN_ONCE(display->drm,
intel_de_read(display, HSW_PWR_WELL_CTL2) &
HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2), "Power well 2 on.\n");
drm_WARN_ONCE(display->drm, intel_irqs_enabled(dev_priv), "Interrupts not disabled yet.\n");
/* * TODO: check for the following to verify the conditions to enter DC9 * state are satisfied: * 1] Check relevant display engine registers to verify if mode set * disable sequence was followed. * 2] Check if display uninitialize sequence is initialized.
*/
}
drm_WARN_ONCE(display->drm, intel_irqs_enabled(dev_priv), "Interrupts not disabled yet.\n");
drm_WARN_ONCE(display->drm,
intel_de_read(display, DC_STATE_EN) &
DC_STATE_EN_UPTO_DC5, "DC5 still not disabled.\n");
/* * TODO: check for the following to verify DC9 state was indeed * entered before programming to disable it: * 1] Check relevant display engine registers to verify if mode * set disable sequence was followed. * 2] Check if display uninitialize sequence is initialized.
*/
}
staticvoid gen9_write_dc_state(struct intel_display *display,
u32 state)
{ int rewrites = 0; int rereads = 0;
u32 v;
intel_de_write(display, DC_STATE_EN, state);
/* It has been observed that disabling the dc6 state sometimes * doesn't stick and dmc keeps returning old value. Make sure * the write really sticks enough times and also force rewrite until * we are confident that state is exactly what we want.
*/ do {
v = intel_de_read(display, DC_STATE_EN);
if (v != state) {
intel_de_write(display, DC_STATE_EN, state);
rewrites++;
rereads = 0;
} elseif (rereads++ > 5) { break;
}
} while (rewrites < 100);
if (v != state)
drm_err(display->drm, "Writing dc state to 0x%x failed, now 0x%x\n",
state, v);
/* Most of the times we need one retry, avoid spam */ if (rewrites > 1)
drm_dbg_kms(display->drm, "Rewrote dc state to 0x%x %d times\n",
state, rewrites);
}
val = intel_de_read(display, DC_STATE_EN) & gen9_dc_mask(display);
drm_dbg_kms(display->drm, "Resetting DC state tracking from %02x to %02x\n",
power_domains->dc_state, val);
power_domains->dc_state = val;
}
/** * gen9_set_dc_state - set target display C power state * @display: display instance * @state: target DC power state * - DC_STATE_DISABLE * - DC_STATE_EN_UPTO_DC5 * - DC_STATE_EN_UPTO_DC6 * - DC_STATE_EN_DC9 * * Signal to DMC firmware/HW the target DC power state passed in @state. * DMC/HW can turn off individual display clocks and power rails when entering * a deeper DC power state (higher in number) and turns these back when exiting * that state to a shallower power state (lower in number). The HW will decide * when to actually enter a given state on an on-demand basis, for instance * depending on the active state of display pipes. The state of display * registers backed by affected power rails are saved/restored as needed. * * Based on the above enabling a deeper DC power state is asynchronous wrt. * enabling it. Disabling a deeper power state is synchronous: for instance * setting %DC_STATE_DISABLE won't complete until all HW resources are turned * back on and register state is restored. This is guaranteed by the MMIO write * to DC_STATE_EN blocking until the state is restored.
*/ void gen9_set_dc_state(struct intel_display *display, u32 state)
{ struct i915_power_domains *power_domains = &display->power.domains; bool dc6_was_enabled, enable_dc6;
u32 mask;
u32 val;
if (!HAS_DISPLAY(display)) return;
if (drm_WARN_ON_ONCE(display->drm,
state & ~power_domains->allowed_dc_mask))
state &= power_domains->allowed_dc_mask;
if (!power_domains->initializing)
intel_psr_notify_dc5_dc6(display);
val = intel_de_read(display, DC_STATE_EN);
mask = gen9_dc_mask(display);
drm_dbg_kms(display->drm, "Setting DC state from %02x to %02x\n",
val & mask, state);
/* Check if DMC is ignoring our DC state requests */ if ((val & mask) != power_domains->dc_state)
drm_err(display->drm, "DC state mismatch (0x%x -> 0x%x)\n",
power_domains->dc_state, val & mask);
enable_dc6 = state & DC_STATE_EN_UPTO_DC6;
dc6_was_enabled = val & DC_STATE_EN_UPTO_DC6; if (!dc6_was_enabled && enable_dc6)
intel_dmc_update_dc6_allowed_count(display, true);
val &= ~mask;
val |= state;
gen9_write_dc_state(display, val);
if (!enable_dc6 && dc6_was_enabled)
intel_dmc_update_dc6_allowed_count(display, false);
/* Power wells at this level and above must be disabled for DC5 entry */ if (DISPLAY_VER(display) == 12)
high_pg = ICL_DISP_PW_3; else
high_pg = SKL_DISP_PW_2;
drm_dbg_kms(display->drm, "Enabling DC9\n"); /* * Power sequencer reset is needed on BXT/GLK, because the PPS registers * aren't always on, unlike with South Display Engine on PCH.
*/ if (display->platform.broxton || display->platform.geminilake)
bxt_pps_reset_all(display);
gen9_set_dc_state(display, DC_STATE_EN_DC9);
}
drm_WARN(display->drm,
hw_enabled_dbuf_slices != enabled_dbuf_slices, "Unexpected DBuf power power state (0x%08x, expected 0x%08x)\n",
hw_enabled_dbuf_slices,
enabled_dbuf_slices);
}
if (old_state == DC_STATE_EN_UPTO_DC5 ||
old_state == DC_STATE_EN_UPTO_DC6)
intel_dmc_wl_disable(display);
intel_cdclk_get_cdclk(display, &cdclk_config); /* Can't read out voltage_level so can't use intel_cdclk_changed() */
drm_WARN_ON(display->drm,
intel_cdclk_clock_changed(&display->cdclk.hw,
&cdclk_config));
gen9_assert_dbuf_enabled(display);
if (display->platform.geminilake || display->platform.broxton)
bxt_verify_dpio_phy_power_wells(display);
if (DISPLAY_VER(display) >= 11) /* * DMC retains HW context only for port A, the other combo * PHY's HW context for port B is lost after DC transitions, * so we need to restore it manually.
*/
intel_combo_phy_init(display);
}
if (wait_for(COND, 100))
drm_err(display->drm, "timeout setting power well state %08x (%08x)\n",
state,
vlv_punit_read(display->drm, PUNIT_REG_PWRGT_CTRL));
state = vlv_punit_read(display->drm, PUNIT_REG_PWRGT_STATUS) & mask; /* * We only ever set the power-on and power-gate states, anything * else is unexpected.
*/
drm_WARN_ON(display->drm, state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
state != PUNIT_PWRGT_PWR_GATE(pw_idx)); if (state == ctrl)
enabled = true;
/* * A transient state at this point would mean some unexpected party * is poking at the power controls too.
*/
ctrl = vlv_punit_read(display->drm, PUNIT_REG_PWRGT_CTRL) & mask;
drm_WARN_ON(display->drm, ctrl != state);
vlv_punit_put(display->drm);
return enabled;
}
staticvoid vlv_init_display_clock_gating(struct intel_display *display)
{ /* * On driver load, a pipe may be active and driving a DSI display. * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck * (and never recovering) in this case. intel_dsi_post_disable() will * clear it when we turn off the display.
*/
intel_de_rmw(display, DSPCLK_GATE_D(display),
~DPOUNIT_CLOCK_GATE_DISABLE, VRHUNIT_CLOCK_GATE_DISABLE);
/* * Enable the CRI clock source so we can get at the * display and the reference clock for VGA * hotplug / manual detection. Supposedly DSI also * needs the ref clock up and running. * * CHV DPLL B/C have some issues if VGA mode is enabled.
*/
for_each_pipe(display, pipe) {
u32 val = intel_de_read(display, DPLL(display, pipe));
val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; if (pipe != PIPE_A)
val |= DPLL_INTEGRATED_CRI_CLK_VLV;
/* * During driver initialization/resume we can avoid restoring the * part of the HW/SW state that will be inited anyway explicitly.
*/ if (display->power.domains.initializing) return;
/* Re-enable the ADPA, if we have one */
for_each_intel_encoder(display->drm, encoder) { if (encoder->type == INTEL_OUTPUT_ANALOG)
intel_crt_reset(&encoder->base);
}
staticvoid vlv_dpio_cmn_power_well_enable(struct intel_display *display, struct i915_power_well *power_well)
{ /* since ref/cri clock was enabled */
udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
vlv_set_power_well(display, power_well, true);
/* * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx - * 6. De-assert cmn_reset/side_reset. Same as VLV X0. * a. GUnit 0x2110 bit[0] set to 1 (def 0) * b. The other bits such as sfr settings / modesel may all * be set to 0. * * This should only be done on init and resume from S3 with * both PLLs disabled, or we risk losing DPIO and PLL * synchronization.
*/
intel_de_rmw(display, DPIO_CTL, 0, DPIO_CMNRST);
}
/* * The BIOS can leave the PHY is some weird state * where it doesn't fully power down some parts. * Disable the asserts until the PHY has been fully * reset (ie. the power well has been disabled at * least once).
*/ if (!display->power.chv_phy_assert[DPIO_PHY0])
phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
if (intel_power_well_is_enabled(display, cmn_bc)) {
phy_status |= PHY_POWERGOOD(DPIO_PHY0);
/* this assumes override is only used to enable lanes */ if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
/* CL1 is on whenever anything is on in either channel */ if (BITS_SET(phy_control,
PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
/* * The DPLLB check accounts for the pipe B + port A usage * with CL2 powered up but all the lanes in the second channel * powered down.
*/ if (BITS_SET(phy_control,
PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
(intel_de_read(display, DPLL(display, PIPE_B)) & DPLL_VCO_ENABLE) == 0)
phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
if (BITS_SET(phy_control,
PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0); if (BITS_SET(phy_control,
PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
if (intel_power_well_is_enabled(display, cmn_d)) {
phy_status |= PHY_POWERGOOD(DPIO_PHY1);
/* this assumes override is only used to enable lanes */ if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
if (BITS_SET(phy_control,
PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
/* * The PHY may be busy with some initial calibration and whatnot, * so the power state can take a while to actually change.
*/ if (intel_de_wait(display, DISPLAY_PHY_STATUS,
phy_status_mask, phy_status, 10))
drm_err(display->drm, "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
intel_de_read(display, DISPLAY_PHY_STATUS) & phy_status_mask,
phy_status, display->power.chv_phy_control);
}
drm_WARN_ON_ONCE(display->drm,
id != VLV_DISP_PW_DPIO_CMN_BC &&
id != CHV_DISP_PW_DPIO_CMN_D);
if (id == VLV_DISP_PW_DPIO_CMN_BC)
phy = DPIO_PHY0; else
phy = DPIO_PHY1;
/* since ref/cri clock was enabled */
udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
vlv_set_power_well(display, power_well, true);
/* Poll for phypwrgood signal */ if (intel_de_wait_for_set(display, DISPLAY_PHY_STATUS,
PHY_POWERGOOD(phy), 1))
drm_err(display->drm, "Display PHY %d is not power up\n",
phy);
if (id == VLV_DISP_PW_DPIO_CMN_BC) {
tmp = vlv_dpio_read(display->drm, phy, CHV_CMN_DW6_CH1);
tmp |= DPIO_DYNPWRDOWNEN_CH1;
vlv_dpio_write(display->drm, phy, CHV_CMN_DW6_CH1, tmp);
} else { /* * Force the non-existing CL2 off. BXT does this * too, so maybe it saves some power even though * CL2 doesn't exist?
*/
tmp = vlv_dpio_read(display->drm, phy, CHV_CMN_DW30);
tmp |= DPIO_CL2_LDOFUSE_PWRENB;
vlv_dpio_write(display->drm, phy, CHV_CMN_DW30, tmp);
}
/* * The BIOS can leave the PHY is some weird state * where it doesn't fully power down some parts. * Disable the asserts until the PHY has been fully * reset (ie. the power well has been disabled at * least once).
*/ if (!display->power.chv_phy_assert[phy]) return;
vlv_dpio_get(display->drm);
val = vlv_dpio_read(display->drm, phy, reg);
vlv_dpio_put(display->drm);
/* * This assumes !override is only used when the port is disabled. * All lanes should power down even without the override when * the port is disabled.
*/ if (!override || mask == 0xf) {
expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; /* * If CH1 common lane is not active anymore * (eg. for pipe B DPLL) the entire channel will * shut down, which causes the common lane registers * to read as 0. That means we can't actually check * the lane power down status bits, but as the entire * register reads as 0 it's a good indication that the * channel is indeed entirely powered down.
*/ if (ch == DPIO_CH1 && val == 0)
expected = 0;
} elseif (mask != 0x0) {
expected = DPIO_ANYDL_POWERDOWN;
} else {
expected = 0;
}
if (ch == DPIO_CH0)
actual = REG_FIELD_GET(DPIO_ANYDL_POWERDOWN_CH0 |
DPIO_ALLDL_POWERDOWN_CH0, val); else
actual = REG_FIELD_GET(DPIO_ANYDL_POWERDOWN_CH1 |
DPIO_ALLDL_POWERDOWN_CH1, val);
drm_WARN(display->drm, actual != expected, "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
!!(actual & DPIO_ALLDL_POWERDOWN),
!!(actual & DPIO_ANYDL_POWERDOWN),
!!(expected & DPIO_ALLDL_POWERDOWN),
!!(expected & DPIO_ANYDL_POWERDOWN),
reg, val);
}
state = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe); /* * We only ever set the power-on and power-gate states, anything * else is unexpected.
*/
drm_WARN_ON(display->drm, state != DP_SSS_PWR_ON(pipe) &&
state != DP_SSS_PWR_GATE(pipe));
enabled = state == DP_SSS_PWR_ON(pipe);
/* * A transient state at this point would mean some unexpected party * is poking at the power controls too.
*/
ctrl = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
drm_WARN_ON(display->drm, ctrl << 16 != state);
if (wait_for(COND, 100))
drm_err(display->drm, "timeout setting power well state %08x (%08x)\n",
state,
vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM));
if (block)
low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_BLOCK_REQ; else
low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_UNBLOCK_REQ;
/* * Spec states that we should timeout the request after 200us * but the function below will timeout after 500us
*/
ret = intel_pcode_read(display->drm, TGL_PCODE_TCCOLD, &low_val, &high_val); if (ret == 0) { if (block &&
(low_val & TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED))
ret = -EIO; else break;
}
staticbool
tgl_tc_cold_off_power_well_is_enabled(struct intel_display *display, struct i915_power_well *power_well)
{ /* * Not the correctly implementation but there is no way to just read it * from PCODE, so returning count to avoid state mismatch errors
*/ return intel_power_well_refcount(power_well);
}
/* * The power status flag cannot be used to determine whether aux * power wells have finished powering up. Instead we're * expected to just wait a fixed 600us after raising the request * bit.
*/
usleep_range(600, 1200);
}
if (intel_de_wait_for_set(display, XE2LPD_PICA_PW_CTL,
XE2LPD_PICA_CTL_POWER_STATUS, 1)) {
drm_dbg_kms(display->drm, "pica power well enable timeout\n");
drm_WARN(display->drm, 1, "Power well PICA timeout when enabled");
}
}
if (intel_de_wait_for_clear(display, XE2LPD_PICA_PW_CTL,
XE2LPD_PICA_CTL_POWER_STATUS, 1)) {
drm_dbg_kms(display->drm, "pica power well disable timeout\n");
drm_WARN(display->drm, 1, "Power well PICA timeout when disabled");
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.