staticconstchar *
intel_display_power_domain_str(enum intel_display_power_domain domain)
{ switch (domain) { case POWER_DOMAIN_DISPLAY_CORE: return"DISPLAY_CORE"; case POWER_DOMAIN_PIPE_A: return"PIPE_A"; case POWER_DOMAIN_PIPE_B: return"PIPE_B"; case POWER_DOMAIN_PIPE_C: return"PIPE_C"; case POWER_DOMAIN_PIPE_D: return"PIPE_D"; case POWER_DOMAIN_PIPE_PANEL_FITTER_A: return"PIPE_PANEL_FITTER_A"; case POWER_DOMAIN_PIPE_PANEL_FITTER_B: return"PIPE_PANEL_FITTER_B"; case POWER_DOMAIN_PIPE_PANEL_FITTER_C: return"PIPE_PANEL_FITTER_C"; case POWER_DOMAIN_PIPE_PANEL_FITTER_D: return"PIPE_PANEL_FITTER_D"; case POWER_DOMAIN_TRANSCODER_A: return"TRANSCODER_A"; case POWER_DOMAIN_TRANSCODER_B: return"TRANSCODER_B"; case POWER_DOMAIN_TRANSCODER_C: return"TRANSCODER_C"; case POWER_DOMAIN_TRANSCODER_D: return"TRANSCODER_D"; case POWER_DOMAIN_TRANSCODER_EDP: return"TRANSCODER_EDP"; case POWER_DOMAIN_TRANSCODER_DSI_A: return"TRANSCODER_DSI_A"; case POWER_DOMAIN_TRANSCODER_DSI_C: return"TRANSCODER_DSI_C"; case POWER_DOMAIN_TRANSCODER_VDSC_PW2: return"TRANSCODER_VDSC_PW2"; case POWER_DOMAIN_PORT_DDI_LANES_A: return"PORT_DDI_LANES_A"; case POWER_DOMAIN_PORT_DDI_LANES_B: return"PORT_DDI_LANES_B"; case POWER_DOMAIN_PORT_DDI_LANES_C: return"PORT_DDI_LANES_C"; case POWER_DOMAIN_PORT_DDI_LANES_D: return"PORT_DDI_LANES_D"; case POWER_DOMAIN_PORT_DDI_LANES_E: return"PORT_DDI_LANES_E"; case POWER_DOMAIN_PORT_DDI_LANES_F: return"PORT_DDI_LANES_F"; case POWER_DOMAIN_PORT_DDI_LANES_TC1: return"PORT_DDI_LANES_TC1"; case POWER_DOMAIN_PORT_DDI_LANES_TC2: return"PORT_DDI_LANES_TC2"; case POWER_DOMAIN_PORT_DDI_LANES_TC3: return"PORT_DDI_LANES_TC3"; case POWER_DOMAIN_PORT_DDI_LANES_TC4: return"PORT_DDI_LANES_TC4"; case POWER_DOMAIN_PORT_DDI_LANES_TC5: return"PORT_DDI_LANES_TC5"; case POWER_DOMAIN_PORT_DDI_LANES_TC6: return"PORT_DDI_LANES_TC6"; case POWER_DOMAIN_PORT_DDI_IO_A: return"PORT_DDI_IO_A"; case POWER_DOMAIN_PORT_DDI_IO_B: return"PORT_DDI_IO_B"; case POWER_DOMAIN_PORT_DDI_IO_C: return"PORT_DDI_IO_C"; case POWER_DOMAIN_PORT_DDI_IO_D: return"PORT_DDI_IO_D"; case POWER_DOMAIN_PORT_DDI_IO_E: return"PORT_DDI_IO_E"; case POWER_DOMAIN_PORT_DDI_IO_F: return"PORT_DDI_IO_F"; case POWER_DOMAIN_PORT_DDI_IO_TC1: return"PORT_DDI_IO_TC1"; case POWER_DOMAIN_PORT_DDI_IO_TC2: return"PORT_DDI_IO_TC2"; case POWER_DOMAIN_PORT_DDI_IO_TC3: return"PORT_DDI_IO_TC3"; case POWER_DOMAIN_PORT_DDI_IO_TC4: return"PORT_DDI_IO_TC4"; case POWER_DOMAIN_PORT_DDI_IO_TC5: return"PORT_DDI_IO_TC5"; case POWER_DOMAIN_PORT_DDI_IO_TC6: return"PORT_DDI_IO_TC6"; case POWER_DOMAIN_PORT_DSI: return"PORT_DSI"; case POWER_DOMAIN_PORT_CRT: return"PORT_CRT"; case POWER_DOMAIN_PORT_OTHER: return"PORT_OTHER"; case POWER_DOMAIN_VGA: return"VGA"; case POWER_DOMAIN_AUDIO_MMIO: return"AUDIO_MMIO"; case POWER_DOMAIN_AUDIO_PLAYBACK: return"AUDIO_PLAYBACK"; case POWER_DOMAIN_AUX_IO_A: return"AUX_IO_A"; case POWER_DOMAIN_AUX_IO_B: return"AUX_IO_B"; case POWER_DOMAIN_AUX_IO_C: return"AUX_IO_C"; case POWER_DOMAIN_AUX_IO_D: return"AUX_IO_D"; case POWER_DOMAIN_AUX_IO_E: return"AUX_IO_E"; case POWER_DOMAIN_AUX_IO_F: return"AUX_IO_F"; case POWER_DOMAIN_AUX_A: return"AUX_A"; case POWER_DOMAIN_AUX_B: return"AUX_B"; case POWER_DOMAIN_AUX_C: return"AUX_C"; case POWER_DOMAIN_AUX_D: return"AUX_D"; case POWER_DOMAIN_AUX_E: return"AUX_E"; case POWER_DOMAIN_AUX_F: return"AUX_F"; case POWER_DOMAIN_AUX_USBC1: return"AUX_USBC1"; case POWER_DOMAIN_AUX_USBC2: return"AUX_USBC2"; case POWER_DOMAIN_AUX_USBC3: return"AUX_USBC3"; case POWER_DOMAIN_AUX_USBC4: return"AUX_USBC4"; case POWER_DOMAIN_AUX_USBC5: return"AUX_USBC5"; case POWER_DOMAIN_AUX_USBC6: return"AUX_USBC6"; case POWER_DOMAIN_AUX_TBT1: return"AUX_TBT1"; case POWER_DOMAIN_AUX_TBT2: return"AUX_TBT2"; case POWER_DOMAIN_AUX_TBT3: return"AUX_TBT3"; case POWER_DOMAIN_AUX_TBT4: return"AUX_TBT4"; case POWER_DOMAIN_AUX_TBT5: return"AUX_TBT5"; case POWER_DOMAIN_AUX_TBT6: return"AUX_TBT6"; case POWER_DOMAIN_GMBUS: return"GMBUS"; case POWER_DOMAIN_INIT: return"INIT"; case POWER_DOMAIN_GT_IRQ: return"GT_IRQ"; case POWER_DOMAIN_DC_OFF: return"DC_OFF"; case POWER_DOMAIN_TC_COLD_OFF: return"TC_COLD_OFF"; default:
MISSING_CASE(domain); return"?";
}
}
if (intel_display_rpm_suspended(display)) returnfalse;
is_enabled = true;
for_each_power_domain_well_reverse(display, power_well, domain) { if (intel_power_well_is_always_on(power_well)) continue;
if (!intel_power_well_is_enabled_cached(power_well)) {
is_enabled = false; break;
}
}
return is_enabled;
}
/** * intel_display_power_is_enabled - check for a power domain * @display: display device instance * @domain: power domain to check * * This function can be used to check the hw power domain state. It is mostly * used in hardware state readout functions. Everywhere else code should rely * upon explicit power domain reference counting to ensure that the hardware * block is powered up before accessing it. * * Callers must hold the relevant modesetting locks to ensure that concurrent * threads can't disable the power well while the caller tries to read a few * registers. * * Returns: * True when the power domain is enabled, false otherwise.
*/ bool intel_display_power_is_enabled(struct intel_display *display, enum intel_display_power_domain domain)
{ struct i915_power_domains *power_domains = &display->power.domains; bool ret;
mutex_lock(&power_domains->lock);
ret = __intel_display_power_is_enabled(display, domain);
mutex_unlock(&power_domains->lock);
for (i = 0; i < ARRAY_SIZE(states) - 1; i++) { if (target_dc_state != states[i]) continue;
if (power_domains->allowed_dc_mask & target_dc_state) break;
target_dc_state = states[i + 1];
}
return target_dc_state;
}
/** * intel_display_power_set_target_dc_state - Set target dc state. * @display: display device * @state: state which needs to be set as target_dc_state. * * This function set the "DC off" power well target_dc_state, * based upon this target_dc_stste, "DC off" power well will * enable desired DC state.
*/ void intel_display_power_set_target_dc_state(struct intel_display *display,
u32 state)
{ struct i915_power_well *power_well; bool dc_off_enabled; struct i915_power_domains *power_domains = &display->power.domains;
if (drm_WARN_ON(display->drm, !power_well)) goto unlock;
state = sanitize_target_dc_state(display, state);
if (state == power_domains->target_dc_state) goto unlock;
dc_off_enabled = intel_power_well_is_enabled(display, power_well); /* * If DC off power well is disabled, need to enable and disable the * DC off power well to effect target DC state.
*/ if (!dc_off_enabled)
intel_power_well_enable(display, power_well);
power_domains->target_dc_state = state;
if (!dc_off_enabled)
intel_power_well_disable(display, power_well);
unlock:
mutex_unlock(&power_domains->lock);
}
/** * intel_display_power_get_current_dc_state - Set target dc state. * @display: display device * * This function set the "DC off" power well target_dc_state, * based upon this target_dc_stste, "DC off" power well will * enable desired DC state.
*/
u32 intel_display_power_get_current_dc_state(struct intel_display *display)
{ struct i915_power_well *power_well; struct i915_power_domains *power_domains = &display->power.domains;
u32 current_dc_state = DC_STATE_DISABLE;
/** * intel_display_power_get - grab a power domain reference * @display: display device instance * @domain: power domain to reference * * This function grabs a power domain reference for @domain and ensures that the * power domain and all its parents are powered up. Therefore users should only * grab a reference to the innermost power domain they need. * * Any power domain reference obtained by this function must have a symmetric * call to intel_display_power_put() to release the reference again.
*/
intel_wakeref_t intel_display_power_get(struct intel_display *display, enum intel_display_power_domain domain)
{ struct i915_power_domains *power_domains = &display->power.domains; struct ref_tracker *wakeref;
/** * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain * @display: display device instance * @domain: power domain to reference * * This function grabs a power domain reference for @domain and ensures that the * power domain and all its parents are powered up. Therefore users should only * grab a reference to the innermost power domain they need. * * Any power domain reference obtained by this function must have a symmetric * call to intel_display_power_put() to release the reference again.
*/
intel_wakeref_t
intel_display_power_get_if_enabled(struct intel_display *display, enum intel_display_power_domain domain)
{ struct i915_power_domains *power_domains = &display->power.domains; struct ref_tracker *wakeref; bool is_enabled;
wakeref = intel_display_rpm_get_if_in_use(display); if (!wakeref) return NULL;
/* * Bail out if all the domain refs pending to be released were grabbed * by subsequent gets or a flush_work.
*/
old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref); if (!old_work_wakeref) goto out_verify;
/* * Cancel the work that got queued after this one got dequeued, * since here we released the corresponding async-put reference.
*/
cancel_async_put_work(power_domains, false);
/* Requeue the work if more domains were async put meanwhile. */ if (!bitmap_empty(power_domains->async_put_domains[1].bits, POWER_DOMAIN_NUM)) {
bitmap_copy(power_domains->async_put_domains[0].bits,
power_domains->async_put_domains[1].bits,
POWER_DOMAIN_NUM);
bitmap_zero(power_domains->async_put_domains[1].bits,
POWER_DOMAIN_NUM);
queue_async_put_domains_work(power_domains,
fetch_and_zero(&new_work_wakeref),
power_domains->async_put_next_delay);
power_domains->async_put_next_delay = 0;
}
if (old_work_wakeref)
intel_display_rpm_put_raw(display, old_work_wakeref); if (new_work_wakeref)
intel_display_rpm_put_raw(display, new_work_wakeref);
}
/** * __intel_display_power_put_async - release a power domain reference asynchronously * @display: display device instance * @domain: power domain to reference * @wakeref: wakeref acquired for the reference that is being released * @delay_ms: delay of powering down the power domain * * This function drops the power domain reference obtained by * intel_display_power_get*() and schedules a work to power down the * corresponding hardware block if this is the last reference. * The power down is delayed by @delay_ms if this is >= 0, or by a default * 100 ms otherwise.
*/ void __intel_display_power_put_async(struct intel_display *display, enum intel_display_power_domain domain,
intel_wakeref_t wakeref, int delay_ms)
{ struct i915_power_domains *power_domains = &display->power.domains; struct ref_tracker *work_wakeref;
/* Let a pending work requeue itself or queue a new one. */ if (power_domains->async_put_wakeref) {
set_bit(domain, power_domains->async_put_domains[1].bits);
power_domains->async_put_next_delay = max(power_domains->async_put_next_delay,
delay_ms);
} else {
set_bit(domain, power_domains->async_put_domains[0].bits);
queue_async_put_domains_work(power_domains,
fetch_and_zero(&work_wakeref),
delay_ms);
}
if (work_wakeref)
intel_display_rpm_put_raw(display, work_wakeref);
intel_display_rpm_put(display, wakeref);
}
/** * intel_display_power_flush_work - flushes the async display power disabling work * @display: display device instance * * Flushes any pending work that was scheduled by a preceding * intel_display_power_put_async() call, completing the disabling of the * corresponding power domains. * * Note that the work handler function may still be running after this * function returns; to ensure that the work handler isn't running use * intel_display_power_flush_work_sync() instead.
*/ void intel_display_power_flush_work(struct intel_display *display)
{ struct i915_power_domains *power_domains = &display->power.domains; struct intel_power_domain_mask async_put_mask;
intel_wakeref_t work_wakeref;
mutex_lock(&power_domains->lock);
work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref); if (!work_wakeref) goto out_verify;
if (work_wakeref)
intel_display_rpm_put_raw(display, work_wakeref);
}
/** * intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work * @display: display device instance * * Like intel_display_power_flush_work(), but also ensure that the work * handler function is not running any more when this function returns.
*/ staticvoid
intel_display_power_flush_work_sync(struct intel_display *display)
{ struct i915_power_domains *power_domains = &display->power.domains;
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) /** * intel_display_power_put - release a power domain reference * @display: display device instance * @domain: power domain to reference * @wakeref: wakeref acquired for the reference that is being released * * This function drops the power domain reference obtained by * intel_display_power_get() and might power down the corresponding hardware * block right away if this is the last reference.
*/ void intel_display_power_put(struct intel_display *display, enum intel_display_power_domain domain,
intel_wakeref_t wakeref)
{
__intel_display_power_put(display, domain);
intel_display_rpm_put(display, wakeref);
} #else /** * intel_display_power_put_unchecked - release an unchecked power domain reference * @display: display device instance * @domain: power domain to reference * * This function drops the power domain reference obtained by * intel_display_power_get() and might power down the corresponding hardware * block right away if this is the last reference. * * This function is only for the power domain code's internal use to suppress wakeref * tracking when the corresponding debug kconfig option is disabled, should not * be used otherwise.
*/ void intel_display_power_put_unchecked(struct intel_display *display, enum intel_display_power_domain domain)
{
__intel_display_power_put(display, domain);
intel_display_rpm_put_unchecked(display);
} #endif
/* * DC9 has a separate HW flow from the rest of the DC states, * not depending on the DMC firmware. It's needed by system * suspend/resume, so allow it unconditionally.
*/
mask = display->platform.geminilake || display->platform.broxton ||
DISPLAY_VER(display) >= 11 ? DC_STATE_EN_DC9 : 0;
if (!display->params.disable_power_well)
max_dc = 0;
if (enable_dc >= 0 && enable_dc <= max_dc) {
requested_dc = enable_dc;
} elseif (enable_dc == -1) {
requested_dc = max_dc;
} elseif (enable_dc > max_dc && enable_dc <= 4) {
drm_dbg_kms(display->drm, "Adjusting requested max DC state (%d->%d)\n",
enable_dc, max_dc);
requested_dc = max_dc;
} else {
drm_err(display->drm, "Unexpected value for enable_dc (%d)\n", enable_dc);
requested_dc = max_dc;
}
switch (requested_dc) { case 4:
mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6; break; case 3:
mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC5; break; case 2:
mask |= DC_STATE_EN_UPTO_DC6; break; case 1:
mask |= DC_STATE_EN_UPTO_DC5; break;
}
drm_dbg_kms(display->drm, "Allowed DC state mask %02x\n", mask);
return mask;
}
/** * intel_power_domains_init - initializes the power domain structures * @display: display device instance * * Initializes the power domain structures for @display depending upon the * supported platform.
*/ int intel_power_domains_init(struct intel_display *display)
{ struct i915_power_domains *power_domains = &display->power.domains;
drm_WARN(display->drm, req_slices & ~slice_mask, "Invalid set of dbuf slices (0x%x) requested (total dbuf slices 0x%x)\n",
req_slices, slice_mask);
drm_dbg_kms(display->drm, "Updating dbuf slices to 0x%x\n",
req_slices);
/* * Might be running this in parallel to gen9_dc_off_power_well_enable * being called from intel_dp_detect for instance, * which causes assertion triggered by race condition, * as gen9_assert_dbuf_enabled might preempt this when registers * were already updated, while dev_priv was not.
*/
mutex_lock(&power_domains->lock);
if (DISPLAY_VER(display) >= 14)
intel_pmdemand_program_dbuf(display, slices_mask);
/* * Just power up at least 1 slice, we will * figure out later which slices we have and what we need.
*/
gen9_dbuf_slices_update(display, slices_mask);
}
/* * gen12 platforms that use abox1 and abox2 for pixel data reads still * expect us to program the abox_ctl0 register as well, even though * we don't have to program other instance-0 registers like BW_BUDDY.
*/ if (DISPLAY_VER(display) == 12)
abox_regs |= BIT(0);
staticvoid hsw_assert_cdclk(struct intel_display *display)
{
u32 val = intel_de_read(display, LCPLL_CTL);
/* * The LCPLL register should be turned on by the BIOS. For now * let's just check its state and print errors in case * something is wrong. Don't even try to turn it on.
*/
if (val & LCPLL_CD_SOURCE_FCLK)
drm_err(display->drm, "CDCLK source is not LCPLL\n");
if (val & LCPLL_PLL_DISABLE)
drm_err(display->drm, "LCPLL is disabled\n");
if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC)
drm_err(display->drm, "LCPLL not using non-SSC reference\n");
}
/* * In theory we can still leave IRQs enabled, as long as only the HPD * interrupts remain enabled. We used to check for that, but since it's * gen-specific and since we only disable LCPLL after we fully disable * the interrupts, the check below should be enough.
*/
INTEL_DISPLAY_STATE_WARN(display, intel_irqs_enabled(dev_priv), "IRQs enabled\n");
}
staticvoid hsw_write_dcomp(struct intel_display *display, u32 val)
{ if (display->platform.haswell) { if (intel_pcode_write(display->drm, GEN6_PCODE_WRITE_D_COMP, val))
drm_dbg_kms(display->drm, "Failed to write to D_COMP\n");
} else {
intel_de_write(display, D_COMP_BDW, val);
intel_de_posting_read(display, D_COMP_BDW);
}
}
/* * This function implements pieces of two sequences from BSpec: * - Sequence for display software to disable LCPLL * - Sequence for display software to allow package C8+ * The steps implemented here are just the steps that actually touch the LCPLL * register. Callers should take care of disabling all the display engine * functions, doing the mode unset, fixing interrupts, etc.
*/ staticvoid hsw_disable_lcpll(struct intel_display *display, bool switch_to_fclk, bool allow_power_down)
{
u32 val;
assert_can_disable_lcpll(display);
val = intel_de_read(display, LCPLL_CTL);
if (switch_to_fclk) {
val |= LCPLL_CD_SOURCE_FCLK;
intel_de_write(display, LCPLL_CTL, val);
if (wait_for_us(intel_de_read(display, LCPLL_CTL) &
LCPLL_CD_SOURCE_FCLK_DONE, 1))
drm_err(display->drm, "Switching to FCLK failed\n");
val = intel_de_read(display, LCPLL_CTL);
}
val |= LCPLL_PLL_DISABLE;
intel_de_write(display, LCPLL_CTL, val);
intel_de_posting_read(display, LCPLL_CTL);
if (intel_de_wait_for_clear(display, LCPLL_CTL, LCPLL_PLL_LOCK, 1))
drm_err(display->drm, "LCPLL still locked\n");
val = hsw_read_dcomp(display);
val |= D_COMP_COMP_DISABLE;
hsw_write_dcomp(display, val);
ndelay(100);
if (wait_for((hsw_read_dcomp(display) &
D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
drm_err(display->drm, "D_COMP RCOMP still in progress\n");
/* * Make sure we're not on PC8 state before disabling PC8, otherwise * we'll hang the machine. To prevent PC8 state, just enable force_wake.
*/
intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
if (val & LCPLL_POWER_DOWN_ALLOW) {
val &= ~LCPLL_POWER_DOWN_ALLOW;
intel_de_write(display, LCPLL_CTL, val);
intel_de_posting_read(display, LCPLL_CTL);
}
val = hsw_read_dcomp(display);
val |= D_COMP_COMP_FORCE;
val &= ~D_COMP_COMP_DISABLE;
hsw_write_dcomp(display, val);
val = intel_de_read(display, LCPLL_CTL);
val &= ~LCPLL_PLL_DISABLE;
intel_de_write(display, LCPLL_CTL, val);
if (intel_de_wait_for_set(display, LCPLL_CTL, LCPLL_PLL_LOCK, 5))
drm_err(display->drm, "LCPLL not locked yet\n");
if (val & LCPLL_CD_SOURCE_FCLK) {
intel_de_rmw(display, LCPLL_CTL, LCPLL_CD_SOURCE_FCLK, 0);
if (wait_for_us((intel_de_read(display, LCPLL_CTL) &
LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
drm_err(display->drm, "Switching back to LCPLL failed\n");
}
/* * Package states C8 and deeper are really deep PC states that can only be * reached when all the devices on the system allow it, so even if the graphics * device allows PC8+, it doesn't mean the system will actually get to these * states. Our driver only allows PC8+ when going into runtime PM. * * The requirements for PC8+ are that all the outputs are disabled, the power * well is disabled and most interrupts are disabled, and these are also * requirements for runtime PM. When these conditions are met, we manually do * the other conditions: disable the interrupts, clocks and switch LCPLL refclk * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard * hang the machine. * * When we really reach PC8 or deeper states (not just when we allow it) we lose * the state of some registers, so when we come back from PC8+ we need to * restore this state. We don't get into PC8+ if we're not in RC6, so we don't * need to take care of the registers kept by RC6. Notice that this happens even * if we don't put the device in PCI D3 state (which is what currently happens * because of the runtime PM support). * * For more, read "Display Sequences for Package C8" on the hardware * documentation.
*/ staticvoid hsw_enable_pc8(struct intel_display *display)
{
drm_dbg_kms(display->drm, "Enabling package C8+\n");
if (HAS_PCH_LPT_LP(display))
intel_de_rmw(display, SOUTH_DSPCLK_GATE_D,
PCH_LP_PARTITION_LEVEL_DISABLE, 0);
gen9_disable_dc_states(display); /* TODO: disable DMC program */
gen9_dbuf_disable(display);
intel_cdclk_uninit_hw(display);
/* The spec doesn't call for removing the reset handshake flag */ /* disable PG1 and Misc I/O */
mutex_lock(&power_domains->lock);
/* * BSpec says to keep the MISC IO power well enabled here, only * remove our request for power well 1. * Note that even though the driver's request is removed power well 1 * may stay enabled after this due to DMC's own request on it.
*/
well = lookup_power_well(display, SKL_DISP_PW_1);
intel_power_well_disable(display, well);
mutex_unlock(&power_domains->lock);
usleep_range(10, 30); /* 10 us delay per Bspec */
}
/* * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT * or else the reset will hang because there is no PCH to respond. * Move the handshake programming to initialization sequence. * Previously was left up to BIOS.
*/
intel_pch_reset_handshake(display, false);
gen9_disable_dc_states(display); /* TODO: disable DMC program */
gen9_dbuf_disable(display);
intel_cdclk_uninit_hw(display);
/* The spec doesn't call for removing the reset handshake flag */
/* * Disable PW1 (PG1). * Note that even though the driver's request is removed power well 1 * may stay enabled after this due to DMC's own request on it.
*/
mutex_lock(&power_domains->lock);
well = lookup_power_well(display, SKL_DISP_PW_1);
intel_power_well_disable(display, well);
mutex_unlock(&power_domains->lock);
usleep_range(10, 30); /* 10 us delay per Bspec */
}
/* 2. Initialize all combo phys */
intel_combo_phy_init(display);
/* * 3. Enable Power Well 1 (PG1). * The AUX IO power wells will be enabled on demand.
*/
mutex_lock(&power_domains->lock);
well = lookup_power_well(display, SKL_DISP_PW_1);
intel_power_well_enable(display, well);
mutex_unlock(&power_domains->lock);
if (DISPLAY_VER(display) == 14)
intel_de_rmw(display, DC_STATE_EN,
HOLD_PHY_PG1_LATCH | HOLD_PHY_CLKREQ_PG1_LATCH, 0);
if (DISPLAY_VER(display) == 12 || display->platform.dg2)
gen12_dbuf_slices_config(display);
/* 5. Enable DBUF. */
gen9_dbuf_enable(display);
/* 6. Setup MBUS. */
icl_mbus_init(display);
/* 7. Program arbiter BW_BUDDY registers */ if (DISPLAY_VER(display) >= 12)
tgl_bw_buddy_init(display);
/* 8. Ensure PHYs have completed calibration and adaptation */ if (display->platform.dg2)
intel_snps_phy_wait_for_calibration(display);
/* 9. XE2_HPD: Program CHICKEN_MISC_2 before any cursor or planes are enabled */ if (DISPLAY_VERx100(display) == 1401)
intel_de_rmw(display, CHICKEN_MISC_2, BMG_DARB_HALF_BLK_END_BURST, 1);
/* 3. Disable CD clock */
intel_cdclk_uninit_hw(display);
if (DISPLAY_VER(display) == 14)
intel_de_rmw(display, DC_STATE_EN, 0,
HOLD_PHY_PG1_LATCH | HOLD_PHY_CLKREQ_PG1_LATCH);
/* * 4. Disable Power Well 1 (PG1). * The AUX IO power wells are toggled on demand, so they are already * disabled at this point.
*/
mutex_lock(&power_domains->lock);
well = lookup_power_well(display, SKL_DISP_PW_1);
intel_power_well_disable(display, well);
mutex_unlock(&power_domains->lock);
/* * DISPLAY_PHY_CONTROL can get corrupted if read. As a * workaround never ever read DISPLAY_PHY_CONTROL, and * instead maintain a shadow copy ourselves. Use the actual * power well state and lane status to reconstruct the * expected initial value.
*/
display->power.chv_phy_control =
PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
/* * If all lanes are disabled we leave the override disabled * with all power down bits cleared to match the state we * would use after disabling the port. Otherwise enable the * override and set the lane powerdown bits accding to the * current lane status.
*/ if (intel_power_well_is_enabled(display, cmn_bc)) {
u32 status = intel_de_read(display, DPLL(display, PIPE_A)); unsignedint mask;
mask = status & DPLL_PORTB_READY_MASK; if (mask == 0xf)
mask = 0x0; else
display->power.chv_phy_control |=
PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
/* If the display might be already active skip this */ if (intel_power_well_is_enabled(display, cmn) &&
intel_power_well_is_enabled(display, disp2d) &&
intel_de_read(display, DPIO_CTL) & DPIO_CMNRST) return;
drm_dbg_kms(display->drm, "toggling display PHY side reset\n");
/* * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx: * Need to assert and de-assert PHY SB reset by gating the * common lane power, then un-gating it. * Simply ungating isn't enough to reset the PHY enough to get * ports and lanes running.
*/
intel_power_well_disable(display, cmn);
}
/** * intel_power_domains_init_hw - initialize hardware power domain state * @display: display device instance * @resume: Called from resume code paths or not * * This function initializes the hardware power domain state and enables all * power wells belonging to the INIT power domain. Power wells in other * domains (and not in the INIT domain) are referenced or disabled by * intel_modeset_readout_hw_state(). After that the reference count of each * power well must match its HW enabled state, see * intel_power_domains_verify_state(). * * It will return with power domains disabled (to be enabled later by * intel_power_domains_enable()) and must be paired with * intel_power_domains_driver_remove().
*/ void intel_power_domains_init_hw(struct intel_display *display, bool resume)
{ struct i915_power_domains *power_domains = &display->power.domains;
/* * Keep all power wells enabled for any dependent HW access during * initialization and to make sure we keep BIOS enabled display HW * resources powered until display HW readout is complete. We drop * this reference in intel_power_domains_enable().
*/
drm_WARN_ON(display->drm, power_domains->init_wakeref);
power_domains->init_wakeref =
intel_display_power_get(display, POWER_DOMAIN_INIT);
/* Disable power support if the user asked so. */ if (!display->params.disable_power_well) {
drm_WARN_ON(display->drm, power_domains->disable_wakeref);
display->power.domains.disable_wakeref = intel_display_power_get(display,
POWER_DOMAIN_INIT);
}
intel_power_domains_sync_hw(display);
power_domains->initializing = false;
}
/** * intel_power_domains_driver_remove - deinitialize hw power domain state * @display: display device instance * * De-initializes the display power domain HW state. It also ensures that the * device stays powered up so that the driver can be reloaded. * * It must be called with power domains already disabled (after a call to * intel_power_domains_disable()) and must be paired with * intel_power_domains_init_hw().
*/ void intel_power_domains_driver_remove(struct intel_display *display)
{
intel_wakeref_t wakeref __maybe_unused =
fetch_and_zero(&display->power.domains.init_wakeref);
/* Remove the refcount we took to keep power well support disabled. */ if (!display->params.disable_power_well)
intel_display_power_put(display, POWER_DOMAIN_INIT,
fetch_and_zero(&display->power.domains.disable_wakeref));
intel_display_power_flush_work_sync(display);
intel_power_domains_verify_state(display);
/* Keep the power well enabled, but cancel its rpm wakeref. */
intel_display_rpm_put(display, wakeref);
}
/** * intel_power_domains_sanitize_state - sanitize power domains state * @display: display device instance * * Sanitize the power domains state during driver loading and system resume. * The function will disable all display power wells that BIOS has enabled * without a user for it (any user for a power well has taken a reference * on it by the time this function is called, after the state of all the * pipe, encoder, etc. HW resources have been sanitized).
*/ void intel_power_domains_sanitize_state(struct intel_display *display)
{ struct i915_power_domains *power_domains = &display->power.domains; struct i915_power_well *power_well;
drm_dbg_kms(display->drm, "BIOS left unused %s power well enabled, disabling it\n",
intel_power_well_name(power_well));
intel_power_well_disable(display, power_well);
}
mutex_unlock(&power_domains->lock);
}
/** * intel_power_domains_enable - enable toggling of display power wells * @display: display device instance * * Enable the ondemand enabling/disabling of the display power wells. Note that * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled * only at specific points of the display modeset sequence, thus they are not * affected by the intel_power_domains_enable()/disable() calls. The purpose * of these function is to keep the rest of power wells enabled until the end * of display HW readout (which will acquire the power references reflecting * the current HW state).
*/ void intel_power_domains_enable(struct intel_display *display)
{
intel_wakeref_t wakeref __maybe_unused =
fetch_and_zero(&display->power.domains.init_wakeref);
/** * intel_power_domains_disable - disable toggling of display power wells * @display: display device instance * * Disable the ondemand enabling/disabling of the display power wells. See * intel_power_domains_enable() for which power wells this call controls.
*/ void intel_power_domains_disable(struct intel_display *display)
{ struct i915_power_domains *power_domains = &display->power.domains;
/** * intel_power_domains_suspend - suspend power domain state * @display: display device instance * @s2idle: specifies whether we go to idle, or deeper sleep * * This function prepares the hardware power domain state before entering * system suspend. * * It must be called with power domains already disabled (after a call to * intel_power_domains_disable()) and paired with intel_power_domains_resume().
*/ void intel_power_domains_suspend(struct intel_display *display, bool s2idle)
{ struct i915_power_domains *power_domains = &display->power.domains;
intel_wakeref_t wakeref __maybe_unused =
fetch_and_zero(&power_domains->init_wakeref);
/* * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9 * support don't manually deinit the power domains. This also means the * DMC firmware will stay active, it will power down any HW * resources as required and also enable deeper system power states * that would be blocked if the firmware was inactive.
*/ if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC9) && s2idle &&
intel_dmc_has_payload(display)) {
intel_display_power_flush_work(display);
intel_power_domains_verify_state(display); return;
}
/* * Even if power well support was disabled we still want to disable * power wells if power domains must be deinitialized for suspend.
*/ if (!display->params.disable_power_well)
intel_display_power_put(display, POWER_DOMAIN_INIT,
fetch_and_zero(&display->power.domains.disable_wakeref));
/** * intel_power_domains_resume - resume power domain state * @display: display device instance * * This function resume the hardware power domain state during system resume. *
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Dauer der Verarbeitung: 0.22 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.