Anforderungen  |   Konzepte  |   Entwurf  |   Entwicklung  |   Qualitätssicherung  |   Lebenszyklus  |   Steuerung
 
 
 
 


Quelle  intel_display.c   Sprache: C

 
/*
 * Copyright © 2006-2007 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 * DEALINGS IN THE SOFTWARE.
 *
 * Authors:
 * Eric Anholt <eric@anholt.net>
 */


#include <linux/dma-resv.h>
#include <linux/i2c.h>
#include <linux/input.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/string_helpers.h>

#include <drm/display/drm_dp_helper.h>
#include <drm/display/drm_dp_tunnel.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_fixed.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_rect.h>
#include <drm/drm_vblank.h>

#include "g4x_dp.h"
#include "g4x_hdmi.h"
#include "hsw_ips.h"
#include "i915_config.h"
#include "i915_drv.h"
#include "i915_reg.h"
#include "i915_utils.h"
#include "i9xx_plane.h"
#include "i9xx_plane_regs.h"
#include "i9xx_wm.h"
#include "intel_alpm.h"
#include "intel_atomic.h"
#include "intel_audio.h"
#include "intel_bo.h"
#include "intel_bw.h"
#include "intel_cdclk.h"
#include "intel_clock_gating.h"
#include "intel_color.h"
#include "intel_crt.h"
#include "intel_crtc.h"
#include "intel_crtc_state_dump.h"
#include "intel_cursor.h"
#include "intel_cursor_regs.h"
#include "intel_cx0_phy.h"
#include "intel_ddi.h"
#include "intel_de.h"
#include "intel_display_driver.h"
#include "intel_display_power.h"
#include "intel_display_regs.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dmc.h"
#include "intel_dp.h"
#include "intel_dp_link_training.h"
#include "intel_dp_mst.h"
#include "intel_dp_tunnel.h"
#include "intel_dpll.h"
#include "intel_dpll_mgr.h"
#include "intel_dpt.h"
#include "intel_dpt_common.h"
#include "intel_drrs.h"
#include "intel_dsb.h"
#include "intel_dsi.h"
#include "intel_dvo.h"
#include "intel_fb.h"
#include "intel_fbc.h"
#include "intel_fdi.h"
#include "intel_fifo_underrun.h"
#include "intel_flipq.h"
#include "intel_frontbuffer.h"
#include "intel_hdmi.h"
#include "intel_hotplug.h"
#include "intel_link_bw.h"
#include "intel_lvds.h"
#include "intel_lvds_regs.h"
#include "intel_modeset_setup.h"
#include "intel_modeset_verify.h"
#include "intel_overlay.h"
#include "intel_panel.h"
#include "intel_pch_display.h"
#include "intel_pch_refclk.h"
#include "intel_pfit.h"
#include "intel_pipe_crc.h"
#include "intel_plane.h"
#include "intel_plane_initial.h"
#include "intel_pmdemand.h"
#include "intel_pps.h"
#include "intel_psr.h"
#include "intel_psr_regs.h"
#include "intel_sdvo.h"
#include "intel_snps_phy.h"
#include "intel_tc.h"
#include "intel_tdf.h"
#include "intel_tv.h"
#include "intel_vblank.h"
#include "intel_vdsc.h"
#include "intel_vdsc_regs.h"
#include "intel_vga.h"
#include "intel_vrr.h"
#include "intel_wm.h"
#include "skl_scaler.h"
#include "skl_universal_plane.h"
#include "skl_watermark.h"
#include "vlv_dpio_phy_regs.h"
#include "vlv_dsi.h"
#include "vlv_dsi_pll.h"
#include "vlv_dsi_regs.h"
#include "vlv_sideband.h"

static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
static void hsw_set_transconf(const struct intel_crtc_state *crtc_state);
static void bdw_set_pipe_misc(struct intel_dsb *dsb,
         const struct intel_crtc_state *crtc_state);

/* returns HPLL frequency in kHz */
int vlv_get_hpll_vco(struct drm_device *drm)
{
 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };

 /* Obtain SKU information */
 hpll_freq = vlv_cck_read(drm, CCK_FUSE_REG) &
  CCK_FUSE_HPLL_FREQ_MASK;

 return vco_freq[hpll_freq] * 1000;
}

int vlv_get_cck_clock(struct drm_device *drm,
        const char *name, u32 reg, int ref_freq)
{
 u32 val;
 int divider;

 val = vlv_cck_read(drm, reg);
 divider = val & CCK_FREQUENCY_VALUES;

 drm_WARN(drm, (val & CCK_FREQUENCY_STATUS) !=
   (divider << CCK_FREQUENCY_STATUS_SHIFT),
   "%s change in progress\n", name);

 return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
}

int vlv_get_cck_clock_hpll(struct drm_device *drm,
      const char *name, u32 reg)
{
 struct drm_i915_private *dev_priv = to_i915(drm);
 int hpll;

 vlv_cck_get(drm);

 if (dev_priv->hpll_freq == 0)
  dev_priv->hpll_freq = vlv_get_hpll_vco(drm);

 hpll = vlv_get_cck_clock(drm, name, reg, dev_priv->hpll_freq);

 vlv_cck_put(drm);

 return hpll;
}

void intel_update_czclk(struct intel_display *display)
{
 struct drm_i915_private *dev_priv = to_i915(display->drm);

 if (!display->platform.valleyview && !display->platform.cherryview)
  return;

 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(display->drm, "czclk",
            CCK_CZ_CLOCK_CONTROL);

 drm_dbg_kms(display->drm, "CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
}

static bool is_hdr_mode(const struct intel_crtc_state *crtc_state)
{
 return (crtc_state->active_planes &
  ~(icl_hdr_plane_mask() | BIT(PLANE_CURSOR))) == 0;
}

/* WA Display #0827: Gen9:all */
static void
skl_wa_827(struct intel_display *display, enum pipe pipe, bool enable)
{
 intel_de_rmw(display, CLKGATE_DIS_PSL(pipe),
       DUPS1_GATING_DIS | DUPS2_GATING_DIS,
       enable ? DUPS1_GATING_DIS | DUPS2_GATING_DIS : 0);
}

/* Wa_2006604312:icl,ehl */
static void
icl_wa_scalerclkgating(struct intel_display *display, enum pipe pipe,
         bool enable)
{
 intel_de_rmw(display, CLKGATE_DIS_PSL(pipe),
       DPFR_GATING_DIS,
       enable ? DPFR_GATING_DIS : 0);
}

/* Wa_1604331009:icl,jsl,ehl */
static void
icl_wa_cursorclkgating(struct intel_display *display, enum pipe pipe,
         bool enable)
{
 intel_de_rmw(display, CLKGATE_DIS_PSL(pipe),
       CURSOR_GATING_DIS,
       enable ? CURSOR_GATING_DIS : 0);
}

static bool
is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
{
 return crtc_state->master_transcoder != INVALID_TRANSCODER;
}

bool
is_trans_port_sync_master(const struct intel_crtc_state *crtc_state)
{
 return crtc_state->sync_mode_slaves_mask != 0;
}

bool
is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
{
 return is_trans_port_sync_master(crtc_state) ||
  is_trans_port_sync_slave(crtc_state);
}

static enum pipe joiner_primary_pipe(const struct intel_crtc_state *crtc_state)
{
 return ffs(crtc_state->joiner_pipes) - 1;
}

/*
 * The following helper functions, despite being named for bigjoiner,
 * are applicable to both bigjoiner and uncompressed joiner configurations.
 */

static bool is_bigjoiner(const struct intel_crtc_state *crtc_state)
{
 return hweight8(crtc_state->joiner_pipes) >= 2;
}

static u8 bigjoiner_primary_pipes(const struct intel_crtc_state *crtc_state)
{
 if (!is_bigjoiner(crtc_state))
  return 0;

 return crtc_state->joiner_pipes & (0b01010101 << joiner_primary_pipe(crtc_state));
}

static unsigned int bigjoiner_secondary_pipes(const struct intel_crtc_state *crtc_state)
{
 if (!is_bigjoiner(crtc_state))
  return 0;

 return crtc_state->joiner_pipes & (0b10101010 << joiner_primary_pipe(crtc_state));
}

bool intel_crtc_is_bigjoiner_primary(const struct intel_crtc_state *crtc_state)
{
 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);

 if (!is_bigjoiner(crtc_state))
  return false;

 return BIT(crtc->pipe) & bigjoiner_primary_pipes(crtc_state);
}

bool intel_crtc_is_bigjoiner_secondary(const struct intel_crtc_state *crtc_state)
{
 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);

 if (!is_bigjoiner(crtc_state))
  return false;

 return BIT(crtc->pipe) & bigjoiner_secondary_pipes(crtc_state);
}

u8 _intel_modeset_primary_pipes(const struct intel_crtc_state *crtc_state)
{
 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);

 if (!is_bigjoiner(crtc_state))
  return BIT(crtc->pipe);

 return bigjoiner_primary_pipes(crtc_state);
}

u8 _intel_modeset_secondary_pipes(const struct intel_crtc_state *crtc_state)
{
 return bigjoiner_secondary_pipes(crtc_state);
}

bool intel_crtc_is_ultrajoiner(const struct intel_crtc_state *crtc_state)
{
 return intel_crtc_num_joined_pipes(crtc_state) >= 4;
}

static u8 ultrajoiner_primary_pipes(const struct intel_crtc_state *crtc_state)
{
 if (!intel_crtc_is_ultrajoiner(crtc_state))
  return 0;

 return crtc_state->joiner_pipes & (0b00010001 << joiner_primary_pipe(crtc_state));
}

bool intel_crtc_is_ultrajoiner_primary(const struct intel_crtc_state *crtc_state)
{
 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);

 return intel_crtc_is_ultrajoiner(crtc_state) &&
        BIT(crtc->pipe) & ultrajoiner_primary_pipes(crtc_state);
}

/*
 * The ultrajoiner enable bit doesn't seem to follow primary/secondary logic or
 * any other logic, so lets just add helper function to
 * at least hide this hassle..
 */

static u8 ultrajoiner_enable_pipes(const struct intel_crtc_state *crtc_state)
{
 if (!intel_crtc_is_ultrajoiner(crtc_state))
  return 0;

 return crtc_state->joiner_pipes & (0b01110111 << joiner_primary_pipe(crtc_state));
}

bool intel_crtc_ultrajoiner_enable_needed(const struct intel_crtc_state *crtc_state)
{
 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);

 return intel_crtc_is_ultrajoiner(crtc_state) &&
        BIT(crtc->pipe) & ultrajoiner_enable_pipes(crtc_state);
}

u8 intel_crtc_joiner_secondary_pipes(const struct intel_crtc_state *crtc_state)
{
 if (crtc_state->joiner_pipes)
  return crtc_state->joiner_pipes & ~BIT(joiner_primary_pipe(crtc_state));
 else
  return 0;
}

bool intel_crtc_is_joiner_secondary(const struct intel_crtc_state *crtc_state)
{
 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);

 return crtc_state->joiner_pipes &&
  crtc->pipe != joiner_primary_pipe(crtc_state);
}

bool intel_crtc_is_joiner_primary(const struct intel_crtc_state *crtc_state)
{
 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);

 return crtc_state->joiner_pipes &&
  crtc->pipe == joiner_primary_pipe(crtc_state);
}

int intel_crtc_num_joined_pipes(const struct intel_crtc_state *crtc_state)
{
 return hweight8(intel_crtc_joined_pipe_mask(crtc_state));
}

u8 intel_crtc_joined_pipe_mask(const struct intel_crtc_state *crtc_state)
{
 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);

 return BIT(crtc->pipe) | crtc_state->joiner_pipes;
}

struct intel_crtc *intel_primary_crtc(const struct intel_crtc_state *crtc_state)
{
 struct intel_display *display = to_intel_display(crtc_state);

 if (intel_crtc_is_joiner_secondary(crtc_state))
  return intel_crtc_for_pipe(display, joiner_primary_pipe(crtc_state));
 else
  return to_intel_crtc(crtc_state->uapi.crtc);
}

static void
intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
{
 struct intel_display *display = to_intel_display(old_crtc_state);
 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);

 if (DISPLAY_VER(display) >= 4) {
  enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;

  /* Wait for the Pipe State to go off */
  if (intel_de_wait_for_clear(display, TRANSCONF(display, cpu_transcoder),
         TRANSCONF_STATE_ENABLE, 100))
   drm_WARN(display->drm, 1, "pipe_off wait timed out\n");
 } else {
  intel_wait_for_pipe_scanline_stopped(crtc);
 }
}

void assert_transcoder(struct intel_display *display,
         enum transcoder cpu_transcoder, bool state)
{
 bool cur_state;
 enum intel_display_power_domain power_domain;
 intel_wakeref_t wakeref;

 /* we keep both pipes enabled on 830 */
 if (display->platform.i830)
  state = true;

 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
 wakeref = intel_display_power_get_if_enabled(display, power_domain);
 if (wakeref) {
  u32 val = intel_de_read(display,
     TRANSCONF(display, cpu_transcoder));
  cur_state = !!(val & TRANSCONF_ENABLE);

  intel_display_power_put(display, power_domain, wakeref);
 } else {
  cur_state = false;
 }

 INTEL_DISPLAY_STATE_WARN(display, cur_state != state,
     "transcoder %s assertion failure (expected %s, current %s)\n",
     transcoder_name(cpu_transcoder), str_on_off(state),
     str_on_off(cur_state));
}

static void assert_plane(struct intel_plane *plane, bool state)
{
 struct intel_display *display = to_intel_display(plane->base.dev);
 enum pipe pipe;
 bool cur_state;

 cur_state = plane->get_hw_state(plane, &pipe);

 INTEL_DISPLAY_STATE_WARN(display, cur_state != state,
     "%s assertion failure (expected %s, current %s)\n",
     plane->base.name, str_on_off(state),
     str_on_off(cur_state));
}

#define assert_plane_enabled(p) assert_plane(p, true)
#define assert_plane_disabled(p) assert_plane(p, false)

static void assert_planes_disabled(struct intel_crtc *crtc)
{
 struct intel_display *display = to_intel_display(crtc);
 struct intel_plane *plane;

 for_each_intel_plane_on_crtc(display->drm, crtc, plane)
  assert_plane_disabled(plane);
}

void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state)
{
 struct intel_display *display = to_intel_display(new_crtc_state);
 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
 enum pipe pipe = crtc->pipe;
 u32 val;

 drm_dbg_kms(display->drm, "enabling pipe %c\n", pipe_name(pipe));

 assert_planes_disabled(crtc);

 /*
 * A pipe without a PLL won't actually be able to drive bits from
 * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
 * need the check.
 */

 if (HAS_GMCH(display)) {
  if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
   assert_dsi_pll_enabled(display);
  else
   assert_pll_enabled(display, pipe);
 } else {
  if (new_crtc_state->has_pch_encoder) {
   /* if driving the PCH, we need FDI enabled */
   assert_fdi_rx_pll_enabled(display,
        intel_crtc_pch_transcoder(crtc));
   assert_fdi_tx_pll_enabled(display,
        (enum pipe) cpu_transcoder);
  }
  /* FIXME: assert CPU port conditions for SNB+ */
 }

 /* Wa_22012358565:adl-p */
 if (DISPLAY_VER(display) == 13)
  intel_de_rmw(display, PIPE_ARB_CTL(display, pipe),
        0, PIPE_ARB_USE_PROG_SLOTS);

 if (DISPLAY_VER(display) >= 14) {
  u32 clear = DP_DSC_INSERT_SF_AT_EOL_WA;
  u32 set = 0;

  if (DISPLAY_VER(display) == 14)
   set |= DP_FEC_BS_JITTER_WA;

  intel_de_rmw(display, CHICKEN_TRANS(display, cpu_transcoder),
        clear, set);
 }

 val = intel_de_read(display, TRANSCONF(display, cpu_transcoder));
 if (val & TRANSCONF_ENABLE) {
  /* we keep both pipes enabled on 830 */
  drm_WARN_ON(display->drm, !display->platform.i830);
  return;
 }

 /* Wa_1409098942:adlp+ */
 if (DISPLAY_VER(display) >= 13 &&
     new_crtc_state->dsc.compression_enable) {
  val &= ~TRANSCONF_PIXEL_COUNT_SCALING_MASK;
  val |= REG_FIELD_PREP(TRANSCONF_PIXEL_COUNT_SCALING_MASK,
          TRANSCONF_PIXEL_COUNT_SCALING_X4);
 }

 intel_de_write(display, TRANSCONF(display, cpu_transcoder),
         val | TRANSCONF_ENABLE);
 intel_de_posting_read(display, TRANSCONF(display, cpu_transcoder));

 /*
 * Until the pipe starts PIPEDSL reads will return a stale value,
 * which causes an apparent vblank timestamp jump when PIPEDSL
 * resets to its proper value. That also messes up the frame count
 * when it's derived from the timestamps. So let's wait for the
 * pipe to start properly before we call drm_crtc_vblank_on()
 */

 if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
  intel_wait_for_pipe_scanline_moving(crtc);
}

void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
{
 struct intel_display *display = to_intel_display(old_crtc_state);
 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
 enum pipe pipe = crtc->pipe;
 u32 val;

 drm_dbg_kms(display->drm, "disabling pipe %c\n", pipe_name(pipe));

 /*
 * Make sure planes won't keep trying to pump pixels to us,
 * or we might hang the display.
 */

 assert_planes_disabled(crtc);

 val = intel_de_read(display, TRANSCONF(display, cpu_transcoder));
 if ((val & TRANSCONF_ENABLE) == 0)
  return;

 /*
 * Double wide has implications for planes
 * so best keep it disabled when not needed.
 */

 if (old_crtc_state->double_wide)
  val &= ~TRANSCONF_DOUBLE_WIDE;

 /* Don't disable pipe or pipe PLLs if needed */
 if (!display->platform.i830)
  val &= ~TRANSCONF_ENABLE;

 /* Wa_1409098942:adlp+ */
 if (DISPLAY_VER(display) >= 13 &&
     old_crtc_state->dsc.compression_enable)
  val &= ~TRANSCONF_PIXEL_COUNT_SCALING_MASK;

 intel_de_write(display, TRANSCONF(display, cpu_transcoder), val);

 if (DISPLAY_VER(display) >= 12)
  intel_de_rmw(display, CHICKEN_TRANS(display, cpu_transcoder),
        FECSTALL_DIS_DPTSTREAM_DPTTG, 0);

 if ((val & TRANSCONF_ENABLE) == 0)
  intel_wait_for_pipe_off(old_crtc_state);
}

u32 intel_plane_fb_max_stride(struct drm_device *drm,
         u32 pixel_format, u64 modifier)
{
 struct intel_display *display = to_intel_display(drm);
 struct intel_crtc *crtc;
 struct intel_plane *plane;

 if (!HAS_DISPLAY(display))
  return 0;

 /*
 * We assume the primary plane for pipe A has
 * the highest stride limits of them all,
 * if in case pipe A is disabled, use the first pipe from pipe_mask.
 */

 crtc = intel_first_crtc(display);
 if (!crtc)
  return 0;

 plane = to_intel_plane(crtc->base.primary);

 return plane->max_stride(plane, pixel_format, modifier,
     DRM_MODE_ROTATE_0);
}

void intel_set_plane_visible(struct intel_crtc_state *crtc_state,
        struct intel_plane_state *plane_state,
        bool visible)
{
 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);

 plane_state->uapi.visible = visible;

 if (visible)
  crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base);
 else
  crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
}

void intel_plane_fixup_bitmasks(struct intel_crtc_state *crtc_state)
{
 struct intel_display *display = to_intel_display(crtc_state);
 struct drm_plane *plane;

 /*
 * Active_planes aliases if multiple "primary" or cursor planes
 * have been used on the same (or wrong) pipe. plane_mask uses
 * unique ids, hence we can use that to reconstruct active_planes.
 */

 crtc_state->enabled_planes = 0;
 crtc_state->active_planes = 0;

 drm_for_each_plane_mask(plane, display->drm,
    crtc_state->uapi.plane_mask) {
  crtc_state->enabled_planes |= BIT(to_intel_plane(plane)->id);
  crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
 }
}

void intel_plane_disable_noatomic(struct intel_crtc *crtc,
      struct intel_plane *plane)
{
 struct intel_display *display = to_intel_display(crtc);
 struct intel_crtc_state *crtc_state =
  to_intel_crtc_state(crtc->base.state);
 struct intel_plane_state *plane_state =
  to_intel_plane_state(plane->base.state);

 drm_dbg_kms(display->drm,
      "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
      plane->base.base.id, plane->base.name,
      crtc->base.base.id, crtc->base.name);

 intel_plane_set_invisible(crtc_state, plane_state);
 intel_set_plane_visible(crtc_state, plane_state, false);
 intel_plane_fixup_bitmasks(crtc_state);

 skl_wm_plane_disable_noatomic(crtc, plane);

 if ((crtc_state->active_planes & ~BIT(PLANE_CURSOR)) == 0 &&
     hsw_ips_disable(crtc_state)) {
  crtc_state->ips_enabled = false;
  intel_plane_initial_vblank_wait(crtc);
 }

 /*
 * Vblank time updates from the shadow to live plane control register
 * are blocked if the memory self-refresh mode is active at that
 * moment. So to make sure the plane gets truly disabled, disable
 * first the self-refresh mode. The self-refresh enable bit in turn
 * will be checked/applied by the HW only at the next frame start
 * event which is after the vblank start event, so we need to have a
 * wait-for-vblank between disabling the plane and the pipe.
 */

 if (HAS_GMCH(display) &&
     intel_set_memory_cxsr(display, false))
  intel_plane_initial_vblank_wait(crtc);

 /*
 * Gen2 reports pipe underruns whenever all planes are disabled.
 * So disable underrun reporting before all the planes get disabled.
 */

 if (DISPLAY_VER(display) == 2 && !crtc_state->active_planes)
  intel_set_cpu_fifo_underrun_reporting(display, crtc->pipe, false);

 intel_plane_disable_arm(NULL, plane, crtc_state);
 intel_plane_initial_vblank_wait(crtc);
}

unsigned int
intel_plane_fence_y_offset(const struct intel_plane_state *plane_state)
{
 int x = 0, y = 0;

 intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
       plane_state->view.color_plane[0].offset, 0);

 return y;
}

static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state)
{
 struct intel_display *display = to_intel_display(crtc_state);
 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 enum pipe pipe = crtc->pipe;
 u32 tmp;

 tmp = intel_de_read(display, PIPE_CHICKEN(pipe));

 /*
 * Display WA #1153: icl
 * enable hardware to bypass the alpha math
 * and rounding for per-pixel values 00 and 0xff
 */

 tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
 /*
 * Display WA # 1605353570: icl
 * Set the pixel rounding bit to 1 for allowing
 * passthrough of Frame buffer pixels unmodified
 * across pipe
 */

 tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;

 /*
 * Underrun recovery must always be disabled on display 13+.
 * DG2 chicken bit meaning is inverted compared to other platforms.
 */

 if (display->platform.dg2)
  tmp &= ~UNDERRUN_RECOVERY_ENABLE_DG2;
 else if ((DISPLAY_VER(display) >= 13) && (DISPLAY_VER(display) < 30))
  tmp |= UNDERRUN_RECOVERY_DISABLE_ADLP;

 /* Wa_14010547955:dg2 */
 if (display->platform.dg2)
  tmp |= DG2_RENDER_CCSTAG_4_3_EN;

 intel_de_write(display, PIPE_CHICKEN(pipe), tmp);
}

bool intel_has_pending_fb_unpin(struct intel_display *display)
{
 struct drm_crtc *crtc;
 bool cleanup_done;

 drm_for_each_crtc(crtc, display->drm) {
  struct drm_crtc_commit *commit;
  spin_lock(&crtc->commit_lock);
  commit = list_first_entry_or_null(&crtc->commit_list,
        struct drm_crtc_commit, commit_entry);
  cleanup_done = commit ?
   try_wait_for_completion(&commit->cleanup_done) : true;
  spin_unlock(&crtc->commit_lock);

  if (cleanup_done)
   continue;

  intel_crtc_wait_for_next_vblank(to_intel_crtc(crtc));

  return true;
 }

 return false;
}

/*
 * Finds the encoder associated with the given CRTC. This can only be
 * used when we know that the CRTC isn't feeding multiple encoders!
 */

struct intel_encoder *
intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
      const struct intel_crtc_state *crtc_state)
{
 const struct drm_connector_state *connector_state;
 const struct drm_connector *connector;
 struct intel_encoder *encoder = NULL;
 struct intel_crtc *primary_crtc;
 int num_encoders = 0;
 int i;

 primary_crtc = intel_primary_crtc(crtc_state);

 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
  if (connector_state->crtc != &primary_crtc->base)
   continue;

  encoder = to_intel_encoder(connector_state->best_encoder);
  num_encoders++;
 }

 drm_WARN(state->base.dev, num_encoders != 1,
   "%d encoders for pipe %c\n",
   num_encoders, pipe_name(primary_crtc->pipe));

 return encoder;
}

static void intel_crtc_dpms_overlay_disable(struct intel_crtc *crtc)
{
 if (crtc->overlay)
  (void) intel_overlay_switch_off(crtc->overlay);

 /* Let userspace switch the overlay on again. In most cases userspace
 * has to recompute where to put it anyway.
 */

}

static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state)
{
 struct intel_display *display = to_intel_display(crtc_state);

 if (!crtc_state->nv12_planes)
  return false;

 /* WA Display #0827: Gen9:all */
 if (DISPLAY_VER(display) == 9)
  return true;

 return false;
}

static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
{
 struct intel_display *display = to_intel_display(crtc_state);

 /* Wa_2006604312:icl,ehl */
 if (crtc_state->scaler_state.scaler_users > 0 && DISPLAY_VER(display) == 11)
  return true;

 return false;
}

static bool needs_cursorclk_wa(const struct intel_crtc_state *crtc_state)
{
 struct intel_display *display = to_intel_display(crtc_state);

 /* Wa_1604331009:icl,jsl,ehl */
 if (is_hdr_mode(crtc_state) &&
     crtc_state->active_planes & BIT(PLANE_CURSOR) &&
     DISPLAY_VER(display) == 11)
  return true;

 return false;
}

static void intel_async_flip_vtd_wa(struct intel_display *display,
        enum pipe pipe, bool enable)
{
 if (DISPLAY_VER(display) == 9) {
  /*
 * "Plane N stretch max must be programmed to 11b (x1)
 *  when Async flips are enabled on that plane."
 */

  intel_de_rmw(display, CHICKEN_PIPESL_1(pipe),
        SKL_PLANE1_STRETCH_MAX_MASK,
        enable ? SKL_PLANE1_STRETCH_MAX_X1 : SKL_PLANE1_STRETCH_MAX_X8);
 } else {
  /* Also needed on HSW/BDW albeit undocumented */
  intel_de_rmw(display, CHICKEN_PIPESL_1(pipe),
        HSW_PRI_STRETCH_MAX_MASK,
        enable ? HSW_PRI_STRETCH_MAX_X1 : HSW_PRI_STRETCH_MAX_X8);
 }
}

static bool needs_async_flip_vtd_wa(const struct intel_crtc_state *crtc_state)
{
 struct intel_display *display = to_intel_display(crtc_state);
 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);

 return crtc_state->uapi.async_flip && i915_vtd_active(i915) &&
  (DISPLAY_VER(display) == 9 || display->platform.broadwell ||
   display->platform.haswell);
}

static void intel_encoders_audio_enable(struct intel_atomic_state *state,
     struct intel_crtc *crtc)
{
 const struct intel_crtc_state *crtc_state =
  intel_atomic_get_new_crtc_state(state, crtc);
 const struct drm_connector_state *conn_state;
 struct drm_connector *conn;
 int i;

 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
  struct intel_encoder *encoder =
   to_intel_encoder(conn_state->best_encoder);

  if (conn_state->crtc != &crtc->base)
   continue;

  if (encoder->audio_enable)
   encoder->audio_enable(encoder, crtc_state, conn_state);
 }
}

static void intel_encoders_audio_disable(struct intel_atomic_state *state,
      struct intel_crtc *crtc)
{
 const struct intel_crtc_state *old_crtc_state =
  intel_atomic_get_old_crtc_state(state, crtc);
 const struct drm_connector_state *old_conn_state;
 struct drm_connector *conn;
 int i;

 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
  struct intel_encoder *encoder =
   to_intel_encoder(old_conn_state->best_encoder);

  if (old_conn_state->crtc != &crtc->base)
   continue;

  if (encoder->audio_disable)
   encoder->audio_disable(encoder, old_crtc_state, old_conn_state);
 }
}

#define is_enabling(feature, old_crtc_state, new_crtc_state) \
 ((!(old_crtc_state)->feature || intel_crtc_needs_modeset(new_crtc_state)) && \
  (new_crtc_state)->feature)
#define is_disabling(feature, old_crtc_state, new_crtc_state) \
 ((old_crtc_state)->feature && \
  (!(new_crtc_state)->feature || intel_crtc_needs_modeset(new_crtc_state)))

static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
       const struct intel_crtc_state *new_crtc_state)
{
 if (!new_crtc_state->hw.active)
  return false;

 return is_enabling(active_planes, old_crtc_state, new_crtc_state);
}

static bool planes_disabling(const struct intel_crtc_state *old_crtc_state,
        const struct intel_crtc_state *new_crtc_state)
{
 if (!old_crtc_state->hw.active)
  return false;

 return is_disabling(active_planes, old_crtc_state, new_crtc_state);
}

static bool vrr_params_changed(const struct intel_crtc_state *old_crtc_state,
          const struct intel_crtc_state *new_crtc_state)
{
 return old_crtc_state->vrr.flipline != new_crtc_state->vrr.flipline ||
  old_crtc_state->vrr.vmin != new_crtc_state->vrr.vmin ||
  old_crtc_state->vrr.vmax != new_crtc_state->vrr.vmax ||
  old_crtc_state->vrr.guardband != new_crtc_state->vrr.guardband ||
  old_crtc_state->vrr.pipeline_full != new_crtc_state->vrr.pipeline_full ||
  old_crtc_state->vrr.vsync_start != new_crtc_state->vrr.vsync_start ||
  old_crtc_state->vrr.vsync_end != new_crtc_state->vrr.vsync_end;
}

static bool cmrr_params_changed(const struct intel_crtc_state *old_crtc_state,
    const struct intel_crtc_state *new_crtc_state)
{
 return old_crtc_state->cmrr.cmrr_m != new_crtc_state->cmrr.cmrr_m ||
  old_crtc_state->cmrr.cmrr_n != new_crtc_state->cmrr.cmrr_n;
}

static bool intel_crtc_vrr_enabling(struct intel_atomic_state *state,
        struct intel_crtc *crtc)
{
 const struct intel_crtc_state *old_crtc_state =
  intel_atomic_get_old_crtc_state(state, crtc);
 const struct intel_crtc_state *new_crtc_state =
  intel_atomic_get_new_crtc_state(state, crtc);

 if (!new_crtc_state->hw.active)
  return false;

 return is_enabling(vrr.enable, old_crtc_state, new_crtc_state) ||
  (new_crtc_state->vrr.enable &&
   (new_crtc_state->update_m_n || new_crtc_state->update_lrr ||
    vrr_params_changed(old_crtc_state, new_crtc_state)));
}

bool intel_crtc_vrr_disabling(struct intel_atomic_state *state,
         struct intel_crtc *crtc)
{
 const struct intel_crtc_state *old_crtc_state =
  intel_atomic_get_old_crtc_state(state, crtc);
 const struct intel_crtc_state *new_crtc_state =
  intel_atomic_get_new_crtc_state(state, crtc);

 if (!old_crtc_state->hw.active)
  return false;

 return is_disabling(vrr.enable, old_crtc_state, new_crtc_state) ||
  (old_crtc_state->vrr.enable &&
   (new_crtc_state->update_m_n || new_crtc_state->update_lrr ||
    vrr_params_changed(old_crtc_state, new_crtc_state)));
}

static bool audio_enabling(const struct intel_crtc_state *old_crtc_state,
      const struct intel_crtc_state *new_crtc_state)
{
 if (!new_crtc_state->hw.active)
  return false;

 return is_enabling(has_audio, old_crtc_state, new_crtc_state) ||
  (new_crtc_state->has_audio &&
   memcmp(old_crtc_state->eld, new_crtc_state->eld, MAX_ELD_BYTES) != 0);
}

static bool audio_disabling(const struct intel_crtc_state *old_crtc_state,
       const struct intel_crtc_state *new_crtc_state)
{
 if (!old_crtc_state->hw.active)
  return false;

 return is_disabling(has_audio, old_crtc_state, new_crtc_state) ||
  (old_crtc_state->has_audio &&
   memcmp(old_crtc_state->eld, new_crtc_state->eld, MAX_ELD_BYTES) != 0);
}

#undef is_disabling
#undef is_enabling

static void intel_post_plane_update(struct intel_atomic_state *state,
        struct intel_crtc *crtc)
{
 struct intel_display *display = to_intel_display(state);
 const struct intel_crtc_state *old_crtc_state =
  intel_atomic_get_old_crtc_state(state, crtc);
 const struct intel_crtc_state *new_crtc_state =
  intel_atomic_get_new_crtc_state(state, crtc);
 enum pipe pipe = crtc->pipe;

 intel_frontbuffer_flip(display, new_crtc_state->fb_bits);

 if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
  intel_update_watermarks(display);

 intel_fbc_post_update(state, crtc);

 if (needs_async_flip_vtd_wa(old_crtc_state) &&
     !needs_async_flip_vtd_wa(new_crtc_state))
  intel_async_flip_vtd_wa(display, pipe, false);

 if (needs_nv12_wa(old_crtc_state) &&
     !needs_nv12_wa(new_crtc_state))
  skl_wa_827(display, pipe, false);

 if (needs_scalerclk_wa(old_crtc_state) &&
     !needs_scalerclk_wa(new_crtc_state))
  icl_wa_scalerclkgating(display, pipe, false);

 if (needs_cursorclk_wa(old_crtc_state) &&
     !needs_cursorclk_wa(new_crtc_state))
  icl_wa_cursorclkgating(display, pipe, false);

 if (intel_crtc_needs_color_update(new_crtc_state))
  intel_color_post_update(new_crtc_state);

 if (audio_enabling(old_crtc_state, new_crtc_state))
  intel_encoders_audio_enable(state, crtc);

 intel_alpm_post_plane_update(state, crtc);

 intel_psr_post_plane_update(state, crtc);
}

static void intel_post_plane_update_after_readout(struct intel_atomic_state *state,
        struct intel_crtc *crtc)
{
 const struct intel_crtc_state *new_crtc_state =
  intel_atomic_get_new_crtc_state(state, crtc);

 /* Must be done after gamma readout due to HSW split gamma vs. IPS w/a */
 hsw_ips_post_update(state, crtc);

 /*
 * Activate DRRS after state readout to avoid
 * dp_m_n vs. dp_m2_n2 confusion on BDW+.
 */

 intel_drrs_activate(new_crtc_state);
}

static void intel_crtc_enable_flip_done(struct intel_atomic_state *state,
     struct intel_crtc *crtc)
{
 const struct intel_crtc_state *crtc_state =
  intel_atomic_get_new_crtc_state(state, crtc);
 u8 update_planes = crtc_state->update_planes;
 const struct intel_plane_state __maybe_unused *plane_state;
 struct intel_plane *plane;
 int i;

 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
  if (plane->pipe == crtc->pipe &&
      update_planes & BIT(plane->id))
   plane->enable_flip_done(plane);
 }
}

static void intel_crtc_disable_flip_done(struct intel_atomic_state *state,
      struct intel_crtc *crtc)
{
 const struct intel_crtc_state *crtc_state =
  intel_atomic_get_new_crtc_state(state, crtc);
 u8 update_planes = crtc_state->update_planes;
 const struct intel_plane_state __maybe_unused *plane_state;
 struct intel_plane *plane;
 int i;

 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
  if (plane->pipe == crtc->pipe &&
      update_planes & BIT(plane->id))
   plane->disable_flip_done(plane);
 }
}

static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state,
          struct intel_crtc *crtc)
{
 const struct intel_crtc_state *old_crtc_state =
  intel_atomic_get_old_crtc_state(state, crtc);
 const struct intel_crtc_state *new_crtc_state =
  intel_atomic_get_new_crtc_state(state, crtc);
 u8 disable_async_flip_planes = old_crtc_state->async_flip_planes &
           ~new_crtc_state->async_flip_planes;
 const struct intel_plane_state *old_plane_state;
 struct intel_plane *plane;
 bool need_vbl_wait = false;
 int i;

 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
  if (plane->need_async_flip_toggle_wa &&
      plane->pipe == crtc->pipe &&
      disable_async_flip_planes & BIT(plane->id)) {
   /*
 * Apart from the async flip bit we want to
 * preserve the old state for the plane.
 */

   intel_plane_async_flip(NULL, plane,
            old_crtc_state, old_plane_state, false);
   need_vbl_wait = true;
  }
 }

 if (need_vbl_wait)
  intel_crtc_wait_for_next_vblank(crtc);
}

static void intel_pre_plane_update(struct intel_atomic_state *state,
       struct intel_crtc *crtc)
{
 struct intel_display *display = to_intel_display(state);
 const struct intel_crtc_state *old_crtc_state =
  intel_atomic_get_old_crtc_state(state, crtc);
 const struct intel_crtc_state *new_crtc_state =
  intel_atomic_get_new_crtc_state(state, crtc);
 enum pipe pipe = crtc->pipe;

 intel_alpm_pre_plane_update(state, crtc);
 intel_psr_pre_plane_update(state, crtc);

 if (intel_crtc_vrr_disabling(state, crtc)) {
  intel_vrr_disable(old_crtc_state);
  intel_crtc_update_active_timings(old_crtc_state, false);
 }

 if (audio_disabling(old_crtc_state, new_crtc_state))
  intel_encoders_audio_disable(state, crtc);

 intel_drrs_deactivate(old_crtc_state);

 if (hsw_ips_pre_update(state, crtc))
  intel_crtc_wait_for_next_vblank(crtc);

 if (intel_fbc_pre_update(state, crtc))
  intel_crtc_wait_for_next_vblank(crtc);

 if (!needs_async_flip_vtd_wa(old_crtc_state) &&
     needs_async_flip_vtd_wa(new_crtc_state))
  intel_async_flip_vtd_wa(display, pipe, true);

 /* Display WA 827 */
 if (!needs_nv12_wa(old_crtc_state) &&
     needs_nv12_wa(new_crtc_state))
  skl_wa_827(display, pipe, true);

 /* Wa_2006604312:icl,ehl */
 if (!needs_scalerclk_wa(old_crtc_state) &&
     needs_scalerclk_wa(new_crtc_state))
  icl_wa_scalerclkgating(display, pipe, true);

 /* Wa_1604331009:icl,jsl,ehl */
 if (!needs_cursorclk_wa(old_crtc_state) &&
     needs_cursorclk_wa(new_crtc_state))
  icl_wa_cursorclkgating(display, pipe, true);

 /*
 * Vblank time updates from the shadow to live plane control register
 * are blocked if the memory self-refresh mode is active at that
 * moment. So to make sure the plane gets truly disabled, disable
 * first the self-refresh mode. The self-refresh enable bit in turn
 * will be checked/applied by the HW only at the next frame start
 * event which is after the vblank start event, so we need to have a
 * wait-for-vblank between disabling the plane and the pipe.
 */

 if (HAS_GMCH(display) && old_crtc_state->hw.active &&
     new_crtc_state->disable_cxsr && intel_set_memory_cxsr(display, false))
  intel_crtc_wait_for_next_vblank(crtc);

 /*
 * IVB workaround: must disable low power watermarks for at least
 * one frame before enabling scaling.  LP watermarks can be re-enabled
 * when scaling is disabled.
 *
 * WaCxSRDisabledForSpriteScaling:ivb
 */

 if (!HAS_GMCH(display) && old_crtc_state->hw.active &&
     new_crtc_state->disable_cxsr && ilk_disable_cxsr(display))
  intel_crtc_wait_for_next_vblank(crtc);

 /*
 * If we're doing a modeset we don't need to do any
 * pre-vblank watermark programming here.
 */

 if (!intel_crtc_needs_modeset(new_crtc_state)) {
  /*
 * For platforms that support atomic watermarks, program the
 * 'intermediate' watermarks immediately.  On pre-gen9 platforms, these
 * will be the intermediate values that are safe for both pre- and
 * post- vblank; when vblank happens, the 'active' values will be set
 * to the final 'target' values and we'll do this again to get the
 * optimal watermarks.  For gen9+ platforms, the values we program here
 * will be the final target values which will get automatically latched
 * at vblank time; no further programming will be necessary.
 *
 * If a platform hasn't been transitioned to atomic watermarks yet,
 * we'll continue to update watermarks the old way, if flags tell
 * us to.
 */

  if (!intel_initial_watermarks(state, crtc))
   if (new_crtc_state->update_wm_pre)
    intel_update_watermarks(display);
 }

 /*
 * Gen2 reports pipe underruns whenever all planes are disabled.
 * So disable underrun reporting before all the planes get disabled.
 *
 * We do this after .initial_watermarks() so that we have a
 * chance of catching underruns with the intermediate watermarks
 * vs. the old plane configuration.
 */

 if (DISPLAY_VER(display) == 2 && planes_disabling(old_crtc_state, new_crtc_state))
  intel_set_cpu_fifo_underrun_reporting(display, pipe, false);

 /*
 * WA for platforms where async address update enable bit
 * is double buffered and only latched at start of vblank.
 */

 if (old_crtc_state->async_flip_planes & ~new_crtc_state->async_flip_planes)
  intel_crtc_async_flip_disable_wa(state, crtc);
}

static void intel_crtc_disable_planes(struct intel_atomic_state *state,
          struct intel_crtc *crtc)
{
 struct intel_display *display = to_intel_display(state);
 const struct intel_crtc_state *new_crtc_state =
  intel_atomic_get_new_crtc_state(state, crtc);
 unsigned int update_mask = new_crtc_state->update_planes;
 const struct intel_plane_state *old_plane_state;
 struct intel_plane *plane;
 unsigned fb_bits = 0;
 int i;

 intel_crtc_dpms_overlay_disable(crtc);

 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
  if (crtc->pipe != plane->pipe ||
      !(update_mask & BIT(plane->id)))
   continue;

  intel_plane_disable_arm(NULL, plane, new_crtc_state);

  if (old_plane_state->uapi.visible)
   fb_bits |= plane->frontbuffer_bit;
 }

 intel_frontbuffer_flip(display, fb_bits);
}

static void intel_encoders_update_prepare(struct intel_atomic_state *state)
{
 struct intel_display *display = to_intel_display(state);
 struct intel_crtc_state *new_crtc_state, *old_crtc_state;
 struct intel_crtc *crtc;
 int i;

 /*
 * Make sure the DPLL state is up-to-date for fastset TypeC ports after non-blocking commits.
 * TODO: Update the DPLL state for all cases in the encoder->update_prepare() hook.
 */

 if (display->dpll.mgr) {
  for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
   if (intel_crtc_needs_modeset(new_crtc_state))
    continue;

   new_crtc_state->intel_dpll = old_crtc_state->intel_dpll;
   new_crtc_state->dpll_hw_state = old_crtc_state->dpll_hw_state;
  }
 }
}

static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state,
       struct intel_crtc *crtc)
{
 const struct intel_crtc_state *crtc_state =
  intel_atomic_get_new_crtc_state(state, crtc);
 const struct drm_connector_state *conn_state;
 struct drm_connector *conn;
 int i;

 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
  struct intel_encoder *encoder =
   to_intel_encoder(conn_state->best_encoder);

  if (conn_state->crtc != &crtc->base)
   continue;

  if (encoder->pre_pll_enable)
   encoder->pre_pll_enable(state, encoder,
      crtc_state, conn_state);
 }
}

static void intel_encoders_pre_enable(struct intel_atomic_state *state,
          struct intel_crtc *crtc)
{
 const struct intel_crtc_state *crtc_state =
  intel_atomic_get_new_crtc_state(state, crtc);
 const struct drm_connector_state *conn_state;
 struct drm_connector *conn;
 int i;

 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
  struct intel_encoder *encoder =
   to_intel_encoder(conn_state->best_encoder);

  if (conn_state->crtc != &crtc->base)
   continue;

  if (encoder->pre_enable)
   encoder->pre_enable(state, encoder,
         crtc_state, conn_state);
 }
}

static void intel_encoders_enable(struct intel_atomic_state *state,
      struct intel_crtc *crtc)
{
 const struct intel_crtc_state *crtc_state =
  intel_atomic_get_new_crtc_state(state, crtc);
 const struct drm_connector_state *conn_state;
 struct drm_connector *conn;
 int i;

 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
  struct intel_encoder *encoder =
   to_intel_encoder(conn_state->best_encoder);

  if (conn_state->crtc != &crtc->base)
   continue;

  if (encoder->enable)
   encoder->enable(state, encoder,
     crtc_state, conn_state);
  intel_opregion_notify_encoder(encoder, true);
 }
}

static void intel_encoders_disable(struct intel_atomic_state *state,
       struct intel_crtc *crtc)
{
 const struct intel_crtc_state *old_crtc_state =
  intel_atomic_get_old_crtc_state(state, crtc);
 const struct drm_connector_state *old_conn_state;
 struct drm_connector *conn;
 int i;

 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
  struct intel_encoder *encoder =
   to_intel_encoder(old_conn_state->best_encoder);

  if (old_conn_state->crtc != &crtc->base)
   continue;

  intel_opregion_notify_encoder(encoder, false);
  if (encoder->disable)
   encoder->disable(state, encoder,
      old_crtc_state, old_conn_state);
 }
}

static void intel_encoders_post_disable(struct intel_atomic_state *state,
     struct intel_crtc *crtc)
{
 const struct intel_crtc_state *old_crtc_state =
  intel_atomic_get_old_crtc_state(state, crtc);
 const struct drm_connector_state *old_conn_state;
 struct drm_connector *conn;
 int i;

 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
  struct intel_encoder *encoder =
   to_intel_encoder(old_conn_state->best_encoder);

  if (old_conn_state->crtc != &crtc->base)
   continue;

  if (encoder->post_disable)
   encoder->post_disable(state, encoder,
           old_crtc_state, old_conn_state);
 }
}

static void intel_encoders_post_pll_disable(struct intel_atomic_state *state,
         struct intel_crtc *crtc)
{
 const struct intel_crtc_state *old_crtc_state =
  intel_atomic_get_old_crtc_state(state, crtc);
 const struct drm_connector_state *old_conn_state;
 struct drm_connector *conn;
 int i;

 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
  struct intel_encoder *encoder =
   to_intel_encoder(old_conn_state->best_encoder);

  if (old_conn_state->crtc != &crtc->base)
   continue;

  if (encoder->post_pll_disable)
   encoder->post_pll_disable(state, encoder,
        old_crtc_state, old_conn_state);
 }
}

static void intel_encoders_update_pipe(struct intel_atomic_state *state,
           struct intel_crtc *crtc)
{
 const struct intel_crtc_state *crtc_state =
  intel_atomic_get_new_crtc_state(state, crtc);
 const struct drm_connector_state *conn_state;
 struct drm_connector *conn;
 int i;

 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
  struct intel_encoder *encoder =
   to_intel_encoder(conn_state->best_encoder);

  if (conn_state->crtc != &crtc->base)
   continue;

  if (encoder->update_pipe)
   encoder->update_pipe(state, encoder,
          crtc_state, conn_state);
 }
}

static void ilk_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state)
{
 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;

 if (crtc_state->has_pch_encoder) {
  intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
            &crtc_state->fdi_m_n);
 } else if (intel_crtc_has_dp_encoder(crtc_state)) {
  intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
            &crtc_state->dp_m_n);
  intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder,
            &crtc_state->dp_m2_n2);
 }

 intel_set_transcoder_timings(crtc_state);

 ilk_set_pipeconf(crtc_state);
}

static void ilk_crtc_enable(struct intel_atomic_state *state,
       struct intel_crtc *crtc)
{
 struct intel_display *display = to_intel_display(crtc);
 const struct intel_crtc_state *new_crtc_state =
  intel_atomic_get_new_crtc_state(state, crtc);
 enum pipe pipe = crtc->pipe;

 if (drm_WARN_ON(display->drm, crtc->active))
  return;

 /*
 * Sometimes spurious CPU pipe underruns happen during FDI
 * training, at least with VGA+HDMI cloning. Suppress them.
 *
 * On ILK we get an occasional spurious CPU pipe underruns
 * between eDP port A enable and vdd enable. Also PCH port
 * enable seems to result in the occasional CPU pipe underrun.
 *
 * Spurious PCH underruns also occur during PCH enabling.
 */

 intel_set_cpu_fifo_underrun_reporting(display, pipe, false);
 intel_set_pch_fifo_underrun_reporting(display, pipe, false);

 ilk_configure_cpu_transcoder(new_crtc_state);

 intel_set_pipe_src_size(new_crtc_state);

 crtc->active = true;

 intel_encoders_pre_enable(state, crtc);

 if (new_crtc_state->has_pch_encoder) {
  ilk_pch_pre_enable(state, crtc);
 } else {
  assert_fdi_tx_disabled(display, pipe);
  assert_fdi_rx_disabled(display, pipe);
 }

 ilk_pfit_enable(new_crtc_state);

 /*
 * On ILK+ LUT must be loaded before the pipe is running but with
 * clocks enabled
 */

 intel_color_modeset(new_crtc_state);

 intel_initial_watermarks(state, crtc);
 intel_enable_transcoder(new_crtc_state);

 if (new_crtc_state->has_pch_encoder)
  ilk_pch_enable(state, crtc);

 intel_crtc_vblank_on(new_crtc_state);

 intel_encoders_enable(state, crtc);

 if (HAS_PCH_CPT(display))
  intel_wait_for_pipe_scanline_moving(crtc);

 /*
 * Must wait for vblank to avoid spurious PCH FIFO underruns.
 * And a second vblank wait is needed at least on ILK with
 * some interlaced HDMI modes. Let's do the double wait always
 * in case there are more corner cases we don't know about.
 */

 if (new_crtc_state->has_pch_encoder) {
  intel_crtc_wait_for_next_vblank(crtc);
  intel_crtc_wait_for_next_vblank(crtc);
 }
 intel_set_cpu_fifo_underrun_reporting(display, pipe, true);
 intel_set_pch_fifo_underrun_reporting(display, pipe, true);
}

/* Display WA #1180: WaDisableScalarClockGating: glk */
static bool glk_need_scaler_clock_gating_wa(const struct intel_crtc_state *crtc_state)
{
 struct intel_display *display = to_intel_display(crtc_state);

 return DISPLAY_VER(display) == 10 && crtc_state->pch_pfit.enabled;
}

static void glk_pipe_scaler_clock_gating_wa(struct intel_crtc *crtc, bool enable)
{
 struct intel_display *display = to_intel_display(crtc);
 u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;

 intel_de_rmw(display, CLKGATE_DIS_PSL(crtc->pipe),
       mask, enable ? mask : 0);
}

static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state)
{
 struct intel_display *display = to_intel_display(crtc_state);
 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);

 intel_de_write(display, WM_LINETIME(crtc->pipe),
         HSW_LINETIME(crtc_state->linetime) |
         HSW_IPS_LINETIME(crtc_state->ips_linetime));
}

static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
{
 struct intel_display *display = to_intel_display(crtc_state);

 intel_de_rmw(display, CHICKEN_TRANS(display, crtc_state->cpu_transcoder),
       HSW_FRAME_START_DELAY_MASK,
       HSW_FRAME_START_DELAY(crtc_state->framestart_delay - 1));
}

static void hsw_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state)
{
 struct intel_display *display = to_intel_display(crtc_state);
 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;

 if (crtc_state->has_pch_encoder) {
  intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
            &crtc_state->fdi_m_n);
 } else if (intel_crtc_has_dp_encoder(crtc_state)) {
  intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
            &crtc_state->dp_m_n);
  intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder,
            &crtc_state->dp_m2_n2);
 }

 intel_set_transcoder_timings(crtc_state);
 if (HAS_VRR(display))
  intel_vrr_set_transcoder_timings(crtc_state);

 if (cpu_transcoder != TRANSCODER_EDP)
  intel_de_write(display, TRANS_MULT(display, cpu_transcoder),
          crtc_state->pixel_multiplier - 1);

 hsw_set_frame_start_delay(crtc_state);

 hsw_set_transconf(crtc_state);
}

static void hsw_crtc_enable(struct intel_atomic_state *state,
       struct intel_crtc *crtc)
{
 struct intel_display *display = to_intel_display(state);
 const struct intel_crtc_state *new_crtc_state =
  intel_atomic_get_new_crtc_state(state, crtc);
 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
 struct intel_crtc *pipe_crtc;
 int i;

 if (drm_WARN_ON(display->drm, crtc->active))
  return;
 for_each_pipe_crtc_modeset_enable(display, pipe_crtc, new_crtc_state, i) {
  const struct intel_crtc_state *new_pipe_crtc_state =
   intel_atomic_get_new_crtc_state(state, pipe_crtc);

  intel_dmc_enable_pipe(new_pipe_crtc_state);
 }

 intel_encoders_pre_pll_enable(state, crtc);

 if (new_crtc_state->intel_dpll)
  intel_dpll_enable(new_crtc_state);

 intel_encoders_pre_enable(state, crtc);

 for_each_pipe_crtc_modeset_enable(display, pipe_crtc, new_crtc_state, i) {
  const struct intel_crtc_state *pipe_crtc_state =
   intel_atomic_get_new_crtc_state(state, pipe_crtc);

  intel_dsc_enable(pipe_crtc_state);

  if (HAS_UNCOMPRESSED_JOINER(display))
   intel_uncompressed_joiner_enable(pipe_crtc_state);

  intel_set_pipe_src_size(pipe_crtc_state);

  if (DISPLAY_VER(display) >= 9 || display->platform.broadwell)
   bdw_set_pipe_misc(NULL, pipe_crtc_state);
 }

 if (!transcoder_is_dsi(cpu_transcoder))
  hsw_configure_cpu_transcoder(new_crtc_state);

 for_each_pipe_crtc_modeset_enable(display, pipe_crtc, new_crtc_state, i) {
  const struct intel_crtc_state *pipe_crtc_state =
   intel_atomic_get_new_crtc_state(state, pipe_crtc);

  pipe_crtc->active = true;

  if (glk_need_scaler_clock_gating_wa(pipe_crtc_state))
   glk_pipe_scaler_clock_gating_wa(pipe_crtc, true);

  if (DISPLAY_VER(display) >= 9)
   skl_pfit_enable(pipe_crtc_state);
  else
   ilk_pfit_enable(pipe_crtc_state);

  /*
 * On ILK+ LUT must be loaded before the pipe is running but with
 * clocks enabled
 */

  intel_color_modeset(pipe_crtc_state);

  hsw_set_linetime_wm(pipe_crtc_state);

  if (DISPLAY_VER(display) >= 11)
   icl_set_pipe_chicken(pipe_crtc_state);

  intel_initial_watermarks(state, pipe_crtc);
 }

 intel_encoders_enable(state, crtc);

 for_each_pipe_crtc_modeset_enable(display, pipe_crtc, new_crtc_state, i) {
  const struct intel_crtc_state *pipe_crtc_state =
   intel_atomic_get_new_crtc_state(state, pipe_crtc);
  enum pipe hsw_workaround_pipe;

  if (glk_need_scaler_clock_gating_wa(pipe_crtc_state)) {
   intel_crtc_wait_for_next_vblank(pipe_crtc);
   glk_pipe_scaler_clock_gating_wa(pipe_crtc, false);
  }

  /*
 * If we change the relative order between pipe/planes
 * enabling, we need to change the workaround.
 */

  hsw_workaround_pipe = pipe_crtc_state->hsw_workaround_pipe;
  if (display->platform.haswell && hsw_workaround_pipe != INVALID_PIPE) {
   struct intel_crtc *wa_crtc =
    intel_crtc_for_pipe(display, hsw_workaround_pipe);

   intel_crtc_wait_for_next_vblank(wa_crtc);
   intel_crtc_wait_for_next_vblank(wa_crtc);
  }
 }
}

static void ilk_crtc_disable(struct intel_atomic_state *state,
        struct intel_crtc *crtc)
{
 struct intel_display *display = to_intel_display(crtc);
 const struct intel_crtc_state *old_crtc_state =
  intel_atomic_get_old_crtc_state(state, crtc);
 enum pipe pipe = crtc->pipe;

 /*
 * Sometimes spurious CPU pipe underruns happen when the
 * pipe is already disabled, but FDI RX/TX is still enabled.
 * Happens at least with VGA+HDMI cloning. Suppress them.
 */

 intel_set_cpu_fifo_underrun_reporting(display, pipe, false);
 intel_set_pch_fifo_underrun_reporting(display, pipe, false);

 intel_encoders_disable(state, crtc);

 intel_crtc_vblank_off(old_crtc_state);

 intel_disable_transcoder(old_crtc_state);

 ilk_pfit_disable(old_crtc_state);

 if (old_crtc_state->has_pch_encoder)
  ilk_pch_disable(state, crtc);

 intel_encoders_post_disable(state, crtc);

 if (old_crtc_state->has_pch_encoder)
  ilk_pch_post_disable(state, crtc);

 intel_set_cpu_fifo_underrun_reporting(display, pipe, true);
 intel_set_pch_fifo_underrun_reporting(display, pipe, true);
}

static void hsw_crtc_disable(struct intel_atomic_state *state,
        struct intel_crtc *crtc)
{
 struct intel_display *display = to_intel_display(state);
 const struct intel_crtc_state *old_crtc_state =
  intel_atomic_get_old_crtc_state(state, crtc);
 struct intel_crtc *pipe_crtc;
 int i;

 /*
 * FIXME collapse everything to one hook.
 * Need care with mst->ddi interactions.
 */

 intel_encoders_disable(state, crtc);
 intel_encoders_post_disable(state, crtc);

 intel_dpll_disable(old_crtc_state);

 intel_encoders_post_pll_disable(state, crtc);

 for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) {
  const struct intel_crtc_state *old_pipe_crtc_state =
   intel_atomic_get_old_crtc_state(state, pipe_crtc);

  intel_dmc_disable_pipe(old_pipe_crtc_state);
 }
}

/* Prefer intel_encoder_is_combo() */
bool intel_phy_is_combo(struct intel_display *display, enum phy phy)
{
 if (phy == PHY_NONE)
  return false;
 else if (display->platform.alderlake_s)
  return phy <= PHY_E;
 else if (display->platform.dg1 || display->platform.rocketlake)
  return phy <= PHY_D;
 else if (display->platform.jasperlake || display->platform.elkhartlake)
  return phy <= PHY_C;
 else if (display->platform.alderlake_p || IS_DISPLAY_VER(display, 11, 12))
  return phy <= PHY_B;
 else
  /*
 * DG2 outputs labelled as "combo PHY" in the bspec use
 * SNPS PHYs with completely different programming,
 * hence we always return false here.
 */

  return false;
}

/* Prefer intel_encoder_is_tc() */
bool intel_phy_is_tc(struct intel_display *display, enum phy phy)
{
 /*
 * Discrete GPU phy's are not attached to FIA's to support TC
 * subsystem Legacy or non-legacy, and only support native DP/HDMI
 */

 if (display->platform.dgfx)
  return false;

 if (DISPLAY_VER(display) >= 13)
  return phy >= PHY_F && phy <= PHY_I;
 else if (display->platform.tigerlake)
  return phy >= PHY_D && phy <= PHY_I;
 else if (display->platform.icelake)
  return phy >= PHY_C && phy <= PHY_F;

 return false;
}

/* Prefer intel_encoder_is_snps() */
bool intel_phy_is_snps(struct intel_display *display, enum phy phy)
{
 /*
 * For DG2, and for DG2 only, all four "combo" ports and the TC1 port
 * (PHY E) use Synopsis PHYs. See intel_phy_is_tc().
 */

 return display->platform.dg2 && phy > PHY_NONE && phy <= PHY_E;
}

/* Prefer intel_encoder_to_phy() */
enum phy intel_port_to_phy(struct intel_display *display, enum port port)
{
 if (DISPLAY_VER(display) >= 13 && port >= PORT_D_XELPD)
  return PHY_D + port - PORT_D_XELPD;
 else if (DISPLAY_VER(display) >= 13 && port >= PORT_TC1)
  return PHY_F + port - PORT_TC1;
 else if (display->platform.alderlake_s && port >= PORT_TC1)
  return PHY_B + port - PORT_TC1;
 else if ((display->platform.dg1 || display->platform.rocketlake) && port >= PORT_TC1)
  return PHY_C + port - PORT_TC1;
 else if ((display->platform.jasperlake || display->platform.elkhartlake) &&
   port == PORT_D)
  return PHY_A;

 return PHY_A + port - PORT_A;
}

/* Prefer intel_encoder_to_tc() */
enum tc_port intel_port_to_tc(struct intel_display *display, enum port port)
{
 if (!intel_phy_is_tc(display, intel_port_to_phy(display, port)))
  return TC_PORT_NONE;

 if (DISPLAY_VER(display) >= 12)
  return TC_PORT_1 + port - PORT_TC1;
 else
  return TC_PORT_1 + port - PORT_C;
}

enum phy intel_encoder_to_phy(struct intel_encoder *encoder)
{
 struct intel_display *display = to_intel_display(encoder);

 return intel_port_to_phy(display, encoder->port);
}

bool intel_encoder_is_combo(struct intel_encoder *encoder)
{
 struct intel_display *display = to_intel_display(encoder);

 return intel_phy_is_combo(display, intel_encoder_to_phy(encoder));
}

bool intel_encoder_is_snps(struct intel_encoder *encoder)
{
 struct intel_display *display = to_intel_display(encoder);

 return intel_phy_is_snps(display, intel_encoder_to_phy(encoder));
}

bool intel_encoder_is_tc(struct intel_encoder *encoder)
{
 struct intel_display *display = to_intel_display(encoder);

 return intel_phy_is_tc(display, intel_encoder_to_phy(encoder));
}

enum tc_port intel_encoder_to_tc(struct intel_encoder *encoder)
{
 struct intel_display *display = to_intel_display(encoder);

 return intel_port_to_tc(display, encoder->port);
}

enum intel_display_power_domain
intel_aux_power_domain(struct intel_digital_port *dig_port)
{
 struct intel_display *display = to_intel_display(dig_port);

 if (intel_tc_port_in_tbt_alt_mode(dig_port))
  return intel_display_power_tbt_aux_domain(display, dig_port->aux_ch);

 return intel_display_power_legacy_aux_domain(display, dig_port->aux_ch);
}

static void get_crtc_power_domains(struct intel_crtc_state *crtc_state,
       struct intel_power_domain_mask *mask)
{
 struct intel_display *display = to_intel_display(crtc_state);
 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
 struct drm_encoder *encoder;
 enum pipe pipe = crtc->pipe;

 bitmap_zero(mask->bits, POWER_DOMAIN_NUM);

 if (!crtc_state->hw.active)
  return;

 set_bit(POWER_DOMAIN_PIPE(pipe), mask->bits);
 set_bit(POWER_DOMAIN_TRANSCODER(cpu_transcoder), mask->bits);
 if (crtc_state->pch_pfit.enabled ||
     crtc_state->pch_pfit.force_thru)
  set_bit(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe), mask->bits);

 drm_for_each_encoder_mask(encoder, display->drm,
      crtc_state->uapi.encoder_mask) {
  struct intel_encoder *intel_encoder = to_intel_encoder(encoder);

  set_bit(intel_encoder->power_domain, mask->bits);
 }

 if (HAS_DDI(display) && crtc_state->has_audio)
  set_bit(POWER_DOMAIN_AUDIO_MMIO, mask->bits);

 if (crtc_state->intel_dpll)
  set_bit(POWER_DOMAIN_DISPLAY_CORE, mask->bits);

 if (crtc_state->dsc.compression_enable)
  set_bit(intel_dsc_power_domain(crtc, cpu_transcoder), mask->bits);
}

void intel_modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state,
       struct intel_power_domain_mask *old_domains)
{
 struct intel_display *display = to_intel_display(crtc_state);
 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 enum intel_display_power_domain domain;
 struct intel_power_domain_mask domains, new_domains;

 get_crtc_power_domains(crtc_state, &domains);

 bitmap_andnot(new_domains.bits,
        domains.bits,
        crtc->enabled_power_domains.mask.bits,
        POWER_DOMAIN_NUM);
 bitmap_andnot(old_domains->bits,
        crtc->enabled_power_domains.mask.bits,
        domains.bits,
        POWER_DOMAIN_NUM);

 for_each_power_domain(domain, &new_domains)
  intel_display_power_get_in_set(display,
            &crtc->enabled_power_domains,
            domain);
}

void intel_modeset_put_crtc_power_domains(struct intel_crtc *crtc,
       struct intel_power_domain_mask *domains)
{
 struct intel_display *display = to_intel_display(crtc);

 intel_display_power_put_mask_in_set(display,
         &crtc->enabled_power_domains,
         domains);
}

static void i9xx_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state)
{
 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;

 if (intel_crtc_has_dp_encoder(crtc_state)) {
  intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
            &crtc_state->dp_m_n);
  intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder,
            &crtc_state->dp_m2_n2);
 }

 intel_set_transcoder_timings(crtc_state);

 i9xx_set_pipeconf(crtc_state);
}

static void valleyview_crtc_enable(struct intel_atomic_state *state,
       struct intel_crtc *crtc)
{
 struct intel_display *display = to_intel_display(crtc);
 const struct intel_crtc_state *new_crtc_state =
  intel_atomic_get_new_crtc_state(state, crtc);
 enum pipe pipe = crtc->pipe;

 if (drm_WARN_ON(display->drm, crtc->active))
  return;

 i9xx_configure_cpu_transcoder(new_crtc_state);

 intel_set_pipe_src_size(new_crtc_state);

 intel_de_write(display, VLV_PIPE_MSA_MISC(display, pipe), 0);

 if (display->platform.cherryview && pipe == PIPE_B) {
  intel_de_write(display, CHV_BLEND(display, pipe),
          CHV_BLEND_LEGACY);
  intel_de_write(display, CHV_CANVAS(display, pipe), 0);
 }

 crtc->active = true;

 intel_set_cpu_fifo_underrun_reporting(display, pipe, true);

 intel_encoders_pre_pll_enable(state, crtc);

 if (display->platform.cherryview)
  chv_enable_pll(new_crtc_state);
 else
  vlv_enable_pll(new_crtc_state);

 intel_encoders_pre_enable(state, crtc);

 i9xx_pfit_enable(new_crtc_state);

 intel_color_modeset(new_crtc_state);

 intel_initial_watermarks(state, crtc);
 intel_enable_transcoder(new_crtc_state);

 intel_crtc_vblank_on(new_crtc_state);

 intel_encoders_enable(state, crtc);
}

static void i9xx_crtc_enable(struct intel_atomic_state *state,
        struct intel_crtc *crtc)
{
 struct intel_display *display = to_intel_display(crtc);
 const struct intel_crtc_state *new_crtc_state =
  intel_atomic_get_new_crtc_state(state, crtc);
 enum pipe pipe = crtc->pipe;

 if (drm_WARN_ON(display->drm, crtc->active))
  return;

 i9xx_configure_cpu_transcoder(new_crtc_state);

 intel_set_pipe_src_size(new_crtc_state);

 crtc->active = true;

 if (DISPLAY_VER(display) != 2)
  intel_set_cpu_fifo_underrun_reporting(display, pipe, true);

 intel_encoders_pre_enable(state, crtc);

 i9xx_enable_pll(new_crtc_state);

 i9xx_pfit_enable(new_crtc_state);

 intel_color_modeset(new_crtc_state);

 if (!intel_initial_watermarks(state, crtc))
  intel_update_watermarks(display);
 intel_enable_transcoder(new_crtc_state);

 intel_crtc_vblank_on(new_crtc_state);

 intel_encoders_enable(state, crtc);

 /* prevents spurious underruns */
 if (DISPLAY_VER(display) == 2)
  intel_crtc_wait_for_next_vblank(crtc);
}

static void i9xx_crtc_disable(struct intel_atomic_state *state,
         struct intel_crtc *crtc)
{
 struct intel_display *display = to_intel_display(state);
 struct intel_crtc_state *old_crtc_state =
  intel_atomic_get_old_crtc_state(state, crtc);
 enum pipe pipe = crtc->pipe;

 /*
 * On gen2 planes are double buffered but the pipe isn't, so we must
 * wait for planes to fully turn off before disabling the pipe.
 */

 if (DISPLAY_VER(display) == 2)
  intel_crtc_wait_for_next_vblank(crtc);

 intel_encoders_disable(state, crtc);

 intel_crtc_vblank_off(old_crtc_state);

 intel_disable_transcoder(old_crtc_state);

 i9xx_pfit_disable(old_crtc_state);

 intel_encoders_post_disable(state, crtc);

 if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
  if (display->platform.cherryview)
   chv_disable_pll(display, pipe);
  else if (display->platform.valleyview)
   vlv_disable_pll(display, pipe);
  else
   i9xx_disable_pll(old_crtc_state);
 }

 intel_encoders_post_pll_disable(state, crtc);

 if (DISPLAY_VER(display) != 2)
  intel_set_cpu_fifo_underrun_reporting(display, pipe, false);

 if (!display->funcs.wm->initial_watermarks)
  intel_update_watermarks(display);

 /* clock the pipe down to 640x480@60 to potentially save power */
 if (display->platform.i830)
  i830_enable_pipe(display, pipe);
}

void intel_encoder_destroy(struct drm_encoder *encoder)
{
 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);

 drm_encoder_cleanup(encoder);
 kfree(intel_encoder);
}

static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
{
 struct intel_display *display = to_intel_display(crtc);

--> --------------------

--> maximum size reached

--> --------------------

Messung V0.5
C=92 H=97 G=94

¤ Dauer der Verarbeitung: 0.12 Sekunden  (vorverarbeitet)  ¤

*© Formatika GbR, Deutschland






Wurzel

Suchen

Beweissystem der NASA

Beweissystem Isabelle

NIST Cobol Testsuite

Cephes Mathematical Library

Wiener Entwicklungsmethode

Haftungshinweis

Die Informationen auf dieser Webseite wurden nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit, noch Qualität der bereit gestellten Informationen zugesichert.

Bemerkung:

Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.






                                                                                                                                                                                                                                                                                                                                                                                                     


Neuigkeiten

     Aktuelles
     Motto des Tages

Software

     Produkte
     Quellcodebibliothek

Aktivitäten

     Artikel über Sicherheit
     Anleitung zur Aktivierung von SSL

Muße

     Gedichte
     Musik
     Bilder

Jenseits des Üblichen ....

Besucherstatistik

Besucherstatistik

Monitoring

Montastic status badge