Quellcodebibliothek Statistik Leitseite products/Sources/formale Sprachen/C/Linux/drivers/gpu/drm/amd/display/dc/hwss/dcn10/   (Open Source Betriebssystem Version 6.17.9©)  Datei vom 24.10.2025 mit Größe 128 kB image not shown  

Quelle  dcn10_hwseq.c   Sprache: C

 
/*
 * Copyright 2016 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 * Authors: AMD
 *
 */


#include <linux/delay.h>
#include "dm_services.h"
#include "basics/dc_common.h"
#include "core_types.h"
#include "resource.h"
#include "custom_float.h"
#include "dcn10_hwseq.h"
#include "dcn10/dcn10_hw_sequencer_debug.h"
#include "dce/dce_hwseq.h"
#include "abm.h"
#include "dmcu.h"
#include "dcn10/dcn10_optc.h"
#include "dcn10/dcn10_dpp.h"
#include "dcn10/dcn10_mpc.h"
#include "timing_generator.h"
#include "opp.h"
#include "ipp.h"
#include "mpc.h"
#include "reg_helper.h"
#include "dcn10/dcn10_hubp.h"
#include "dcn10/dcn10_hubbub.h"
#include "dcn10/dcn10_cm_common.h"
#include "dccg.h"
#include "clk_mgr.h"
#include "link_hwss.h"
#include "dpcd_defs.h"
#include "dsc.h"
#include "dce/dmub_psr.h"
#include "dc_dmub_srv.h"
#include "dce/dmub_hw_lock_mgr.h"
#include "dc_trace.h"
#include "dce/dmub_outbox.h"
#include "link.h"
#include "dc_state_priv.h"

#define DC_LOGGER \
 dc_logger
#define DC_LOGGER_INIT(logger) \
 struct dal_logger *dc_logger = logger

#define CTX \
 hws->ctx
#define REG(reg)\
 hws->regs->reg

#undef FN
#define FN(reg_name, field_name) \
 hws->shifts->field_name, hws->masks->field_name

/*print is 17 wide, first two characters are spaces*/
#define DTN_INFO_MICRO_SEC(ref_cycle) \
 print_microsec(dc_ctx, log_ctx, ref_cycle)

#define GAMMA_HW_POINTS_NUM 256

#define PGFSM_POWER_ON 0
#define PGFSM_POWER_OFF 2

static void print_microsec(struct dc_context *dc_ctx,
      struct dc_log_buffer_ctx *log_ctx,
      uint32_t ref_cycle)
{
 const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
 static const unsigned int frac = 1000;
 uint32_t us_x10 = (ref_cycle * frac) / ref_clk_mhz;

 DTN_INFO(" %11d.%03d",
   us_x10 / frac,
   us_x10 % frac);
}

/*
 * Delay until we passed busy-until-point to which we can
 * do necessary locking/programming on consecutive full updates
 */

void dcn10_wait_for_pipe_update_if_needed(struct dc *dc, struct pipe_ctx *pipe_ctx, bool is_surface_update_only)
{
 struct crtc_position position;
 struct dc_stream_state *stream = pipe_ctx->stream;
 unsigned int vpos, frame_count;
 uint32_t vupdate_start, vupdate_end, vblank_start;
 unsigned int lines_to_vupdate, us_to_vupdate;
 unsigned int us_per_line, us_vupdate;

 if (!pipe_ctx->stream ||
  !pipe_ctx->stream_res.tg ||
  !pipe_ctx->stream_res.stream_enc)
  return;

 if (pipe_ctx->prev_odm_pipe &&
    pipe_ctx->stream)
  return;

 if (!pipe_ctx->wait_is_required)
  return;

 struct timing_generator *tg = pipe_ctx->stream_res.tg;

 if (tg->funcs->is_tg_enabled && !tg->funcs->is_tg_enabled(tg))
  return;

 dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start,
      &vupdate_end);

 dc->hwss.get_position(&pipe_ctx, 1, &position);
 vpos = position.vertical_count;

 frame_count = tg->funcs->get_frame_count(tg);

 if (frame_count - pipe_ctx->wait_frame_count > 2)
  return;

 vblank_start = pipe_ctx->pipe_dlg_param.vblank_start;

 if (vpos >= vupdate_start && vupdate_start >= vblank_start)
  lines_to_vupdate = stream->timing.v_total - vpos + vupdate_start;
 else
  lines_to_vupdate = vupdate_start - vpos;

 us_per_line =
  stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz;
 us_to_vupdate = lines_to_vupdate * us_per_line;

 if (vupdate_end < vupdate_start)
  vupdate_end += stream->timing.v_total;

 if (lines_to_vupdate > stream->timing.v_total - vupdate_end + vupdate_start)
  us_to_vupdate = 0;

 us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line;

 if (is_surface_update_only && us_to_vupdate + us_vupdate > 200) {
  //surface updates come in at high irql
  pipe_ctx->wait_is_required = true;
  return;
 }

 fsleep(us_to_vupdate + us_vupdate);

 //clear
 pipe_ctx->next_vupdate = 0;
 pipe_ctx->wait_frame_count = 0;
 pipe_ctx->wait_is_required = false;
}

/*
 * On pipe unlock and programming, indicate pipe will be busy
 * until some frame and line (vupdate), this is required for consecutive
 * full updates, need to wait for updates
 * to latch to try and program the next update
 */

void dcn10_set_wait_for_update_needed_for_pipe(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
 uint32_t vupdate_start, vupdate_end;
 struct crtc_position position;
 unsigned int vpos, cur_frame;

 if (!pipe_ctx->stream ||
  !pipe_ctx->stream_res.tg ||
  !pipe_ctx->stream_res.stream_enc)
  return;

 dc->hwss.get_position(&pipe_ctx, 1, &position);
 vpos = position.vertical_count;

 dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start,
      &vupdate_end);

 struct timing_generator *tg = pipe_ctx->stream_res.tg;

 struct optc *optc1 = DCN10TG_FROM_TG(tg);

 ASSERT(optc1->max_frame_count != 0);

 if (tg->funcs->is_tg_enabled && !tg->funcs->is_tg_enabled(tg))
  return;

 pipe_ctx->next_vupdate = vupdate_start;

 cur_frame = tg->funcs->get_frame_count(tg);

 if (vpos < vupdate_start) {
  pipe_ctx->wait_frame_count = cur_frame;
 } else {
  if (cur_frame + 1 > optc1->max_frame_count)
   pipe_ctx->wait_frame_count = cur_frame + 1 - optc1->max_frame_count;
  else
   pipe_ctx->wait_frame_count = cur_frame + 1;
 }

 pipe_ctx->wait_is_required = true;
}

void dcn10_lock_all_pipes(struct dc *dc,
 struct dc_state *context,
 bool lock)
{
 struct pipe_ctx *pipe_ctx;
 struct pipe_ctx *old_pipe_ctx;
 struct timing_generator *tg;
 int i;

 for (i = 0; i < dc->res_pool->pipe_count; i++) {
  old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
  pipe_ctx = &context->res_ctx.pipe_ctx[i];
  tg = pipe_ctx->stream_res.tg;

  /*
 * Only lock the top pipe's tg to prevent redundant
 * (un)locking. Also skip if pipe is disabled.
 */

  if (pipe_ctx->top_pipe ||
      !pipe_ctx->stream ||
      (!pipe_ctx->plane_state && !old_pipe_ctx->plane_state) ||
      !tg->funcs->is_tg_enabled(tg) ||
   dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_PHANTOM)
   continue;

  if (lock)
   dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
  else
   dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
 }
}

static void log_mpc_crc(struct dc *dc,
 struct dc_log_buffer_ctx *log_ctx)
{
 struct dc_context *dc_ctx = dc->ctx;
 struct dce_hwseq *hws = dc->hwseq;

 if (REG(MPC_CRC_RESULT_GB))
  DTN_INFO("MPC_CRC_RESULT_GB:%d MPC_CRC_RESULT_C:%d MPC_CRC_RESULT_AR:%d\n",
  REG_READ(MPC_CRC_RESULT_GB), REG_READ(MPC_CRC_RESULT_C), REG_READ(MPC_CRC_RESULT_AR));
 if (REG(DPP_TOP0_DPP_CRC_VAL_B_A))
  DTN_INFO("DPP_TOP0_DPP_CRC_VAL_B_A:%d DPP_TOP0_DPP_CRC_VAL_R_G:%d\n",
  REG_READ(DPP_TOP0_DPP_CRC_VAL_B_A), REG_READ(DPP_TOP0_DPP_CRC_VAL_R_G));
}

static void dcn10_log_hubbub_state(struct dc *dc,
       struct dc_log_buffer_ctx *log_ctx)
{
 struct dc_context *dc_ctx = dc->ctx;
 struct dcn_hubbub_wm wm;
 int i;

 memset(&wm, 0, sizeof(struct dcn_hubbub_wm));
 dc->res_pool->hubbub->funcs->wm_read_state(dc->res_pool->hubbub, &wm);

 DTN_INFO("HUBBUB WM: data_urgent pte_meta_urgent"
   " sr_enter sr_exit dram_clk_change\n");

 for (i = 0; i < 4; i++) {
  struct dcn_hubbub_wm_set *s;

  s = &wm.sets[i];
  DTN_INFO("WM_Set[%d]:", s->wm_set);
  DTN_INFO_MICRO_SEC(s->data_urgent);
  DTN_INFO_MICRO_SEC(s->pte_meta_urgent);
  DTN_INFO_MICRO_SEC(s->sr_enter);
  DTN_INFO_MICRO_SEC(s->sr_exit);
  DTN_INFO_MICRO_SEC(s->dram_clk_change);
  DTN_INFO("\n");
 }

 DTN_INFO("\n");
}

static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
{
 struct dc_context *dc_ctx = dc->ctx;
 struct resource_pool *pool = dc->res_pool;
 int i;

 DTN_INFO(
  "HUBP: format addr_hi width height rot mir sw_mode dcc_en blank_en clock_en ttu_dis underflow min_ttu_vblank qos_low_wm qos_high_wm\n");
 for (i = 0; i < pool->pipe_count; i++) {
  struct hubp *hubp = pool->hubps[i];
  struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);

  hubp->funcs->hubp_read_state(hubp);

  if (!s->blank_en) {
   DTN_INFO("[%2d]: %5xh %6xh %5d %6d %2xh %2xh %6xh %6d %8d %8d %7d %8xh",
     hubp->inst,
     s->pixel_format,
     s->inuse_addr_hi,
     s->viewport_width,
     s->viewport_height,
     s->rotation_angle,
     s->h_mirror_en,
     s->sw_mode,
     s->dcc_en,
     s->blank_en,
     s->clock_en,
     s->ttu_disable,
     s->underflow_status);
   DTN_INFO_MICRO_SEC(s->min_ttu_vblank);
   DTN_INFO_MICRO_SEC(s->qos_level_low_wm);
   DTN_INFO_MICRO_SEC(s->qos_level_high_wm);
   DTN_INFO("\n");
  }
 }

 DTN_INFO("\n=======HUBP FL======\n");
 DTN_INFO(
  "HUBP FL: Enabled Done adr_mode width tmz xbar_sel_R xbar_sel_G xbar_sel_B adr_hi adr_low REFCYC Bias Scale Mode Format\n");
 for (i = 0; i < pool->pipe_count; i++) {
  struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
  struct dcn_fl_regs_st *fl_regs = &s->fl_regs;

  if (!s->blank_en) {
   DTN_INFO("[%2d]: %5xh %6xh %5d %6d %8xh %2xh %6xh %6d %8d %8d %7d %8xh %5x %5x %5x",
     pool->hubps[i]->inst,
     fl_regs->lut_enable,
     fl_regs->lut_done,
     fl_regs->lut_addr_mode,
     fl_regs->lut_width,
     fl_regs->lut_tmz,
     fl_regs->lut_crossbar_sel_r,
     fl_regs->lut_crossbar_sel_g,
     fl_regs->lut_crossbar_sel_b,
     fl_regs->lut_addr_hi,
     fl_regs->lut_addr_lo,
     fl_regs->refcyc_3dlut_group,
     fl_regs->lut_fl_bias,
     fl_regs->lut_fl_scale,
     fl_regs->lut_fl_mode,
     fl_regs->lut_fl_format);
   DTN_INFO("\n");
  }
 }

 DTN_INFO("\n=========RQ========\n");
 DTN_INFO("HUBP: drq_exp_m prq_exp_m mrq_exp_m crq_exp_m plane1_ba L:chunk_s min_chu_s meta_ch_s"
  " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h C:chunk_s min_chu_s meta_ch_s"
  " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h\n");
 for (i = 0; i < pool->pipe_count; i++) {
  struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
  struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;

  if (!s->blank_en)
   DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
    pool->hubps[i]->inst, rq_regs->drq_expansion_mode, rq_regs->prq_expansion_mode, rq_regs->mrq_expansion_mode,
    rq_regs->crq_expansion_mode, rq_regs->plane1_base_address, rq_regs->rq_regs_l.chunk_size,
    rq_regs->rq_regs_l.min_chunk_size, rq_regs->rq_regs_l.meta_chunk_size,
    rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs->rq_regs_l.dpte_group_size,
    rq_regs->rq_regs_l.mpte_group_size, rq_regs->rq_regs_l.swath_height,
    rq_regs->rq_regs_l.pte_row_height_linear, rq_regs->rq_regs_c.chunk_size, rq_regs->rq_regs_c.min_chunk_size,
    rq_regs->rq_regs_c.meta_chunk_size, rq_regs->rq_regs_c.min_meta_chunk_size,
    rq_regs->rq_regs_c.dpte_group_size, rq_regs->rq_regs_c.mpte_group_size,
    rq_regs->rq_regs_c.swath_height, rq_regs->rq_regs_c.pte_row_height_linear);
 }

 DTN_INFO("========DLG========\n");
 DTN_INFO("HUBP: rc_hbe dlg_vbe min_d_y_n rc_per_ht rc_x_a_s "
   " dst_y_a_s dst_y_pf dst_y_vvb dst_y_rvb dst_y_vfl dst_y_rfl rf_pix_fq"
   " vratio_pf vrat_pf_c rc_pg_vbl rc_pg_vbc rc_mc_vbl rc_mc_vbc rc_pg_fll"
   " rc_pg_flc rc_mc_fll rc_mc_flc pr_nom_l pr_nom_c rc_pg_nl rc_pg_nc "
   " mr_nom_l mr_nom_c rc_mc_nl rc_mc_nc rc_ld_pl rc_ld_pc rc_ld_l "
   " rc_ld_c cha_cur0 ofst_cur1 cha_cur1 vr_af_vc0 ddrq_limt x_rt_dlay"
   " x_rp_dlay x_rr_sfl rc_td_grp\n");

 for (i = 0; i < pool->pipe_count; i++) {
  struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
  struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr;

  if (!s->blank_en)
   DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
    " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
    " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %xh\n",
    pool->hubps[i]->inst, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start,
    dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler,
    dlg_regs->dst_y_prefetch, dlg_regs->dst_y_per_vm_vblank, dlg_regs->dst_y_per_row_vblank,
    dlg_regs->dst_y_per_vm_flip, dlg_regs->dst_y_per_row_flip, dlg_regs->ref_freq_to_pix_freq,
    dlg_regs->vratio_prefetch, dlg_regs->vratio_prefetch_c, dlg_regs->refcyc_per_pte_group_vblank_l,
    dlg_regs->refcyc_per_pte_group_vblank_c, dlg_regs->refcyc_per_meta_chunk_vblank_l,
    dlg_regs->refcyc_per_meta_chunk_vblank_c, dlg_regs->refcyc_per_pte_group_flip_l,
    dlg_regs->refcyc_per_pte_group_flip_c, dlg_regs->refcyc_per_meta_chunk_flip_l,
    dlg_regs->refcyc_per_meta_chunk_flip_c, dlg_regs->dst_y_per_pte_row_nom_l,
    dlg_regs->dst_y_per_pte_row_nom_c, dlg_regs->refcyc_per_pte_group_nom_l,
    dlg_regs->refcyc_per_pte_group_nom_c, dlg_regs->dst_y_per_meta_row_nom_l,
    dlg_regs->dst_y_per_meta_row_nom_c, dlg_regs->refcyc_per_meta_chunk_nom_l,
    dlg_regs->refcyc_per_meta_chunk_nom_c, dlg_regs->refcyc_per_line_delivery_pre_l,
    dlg_regs->refcyc_per_line_delivery_pre_c, dlg_regs->refcyc_per_line_delivery_l,
    dlg_regs->refcyc_per_line_delivery_c, dlg_regs->chunk_hdl_adjust_cur0, dlg_regs->dst_y_offset_cur1,
    dlg_regs->chunk_hdl_adjust_cur1, dlg_regs->vready_after_vcount0, dlg_regs->dst_y_delta_drq_limit,
    dlg_regs->xfc_reg_transfer_delay, dlg_regs->xfc_reg_precharge_delay,
    dlg_regs->xfc_reg_remote_surface_flip_latency, dlg_regs->refcyc_per_tdlut_group);
 }

 DTN_INFO("========TTU========\n");
 DTN_INFO("HUBP: qos_ll_wm qos_lh_wm mn_ttu_vb qos_l_flp rc_rd_p_l rc_rd_l rc_rd_p_c"
   " rc_rd_c rc_rd_c0 rc_rd_pc0 rc_rd_c1 rc_rd_pc1 qos_lf_l qos_rds_l"
   " qos_lf_c qos_rds_c qos_lf_c0 qos_rds_c0 qos_lf_c1 qos_rds_c1\n");
 for (i = 0; i < pool->pipe_count; i++) {
  struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
  struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &s->ttu_attr;

  if (!s->blank_en)
   DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
    pool->hubps[i]->inst, ttu_regs->qos_level_low_wm, ttu_regs->qos_level_high_wm, ttu_regs->min_ttu_vblank,
    ttu_regs->qos_level_flip, ttu_regs->refcyc_per_req_delivery_pre_l, ttu_regs->refcyc_per_req_delivery_l,
    ttu_regs->refcyc_per_req_delivery_pre_c, ttu_regs->refcyc_per_req_delivery_c, ttu_regs->refcyc_per_req_delivery_cur0,
    ttu_regs->refcyc_per_req_delivery_pre_cur0, ttu_regs->refcyc_per_req_delivery_cur1,
    ttu_regs->refcyc_per_req_delivery_pre_cur1, ttu_regs->qos_level_fixed_l, ttu_regs->qos_ramp_disable_l,
    ttu_regs->qos_level_fixed_c, ttu_regs->qos_ramp_disable_c, ttu_regs->qos_level_fixed_cur0,
    ttu_regs->qos_ramp_disable_cur0, ttu_regs->qos_level_fixed_cur1, ttu_regs->qos_ramp_disable_cur1);
 }
 DTN_INFO("\n");
}

static void dcn10_log_color_state(struct dc *dc,
      struct dc_log_buffer_ctx *log_ctx)
{
 struct dc_context *dc_ctx = dc->ctx;
 struct resource_pool *pool = dc->res_pool;
 bool is_gamut_remap_available = false;
 int i;

 DTN_INFO("DPP: IGAM format IGAM mode DGAM mode RGAM mode"
   " GAMUT adjust "
   "C11 C12 C13 C14 "
   "C21 C22 C23 C24 "
   "C31 C32 C33 C34 \n");
 for (i = 0; i < pool->pipe_count; i++) {
  struct dpp *dpp = pool->dpps[i];
  struct dcn_dpp_state s = {0};

  dpp->funcs->dpp_read_state(dpp, &s);
  if (dpp->funcs->dpp_get_gamut_remap) {
   dpp->funcs->dpp_get_gamut_remap(dpp, &s.gamut_remap);
   is_gamut_remap_available = true;
  }

  if (!s.is_enabled)
   continue;

  DTN_INFO("[%2d]: %11xh %11s %9s %9s",
    dpp->inst,
    s.igam_input_format,
    (s.igam_lut_mode == 0) ? "BypassFixed" :
     ((s.igam_lut_mode == 1) ? "BypassFloat" :
     ((s.igam_lut_mode == 2) ? "RAM" :
     ((s.igam_lut_mode == 3) ? "RAM" :
         "Unknown"))),
    (s.dgam_lut_mode == 0) ? "Bypass" :
     ((s.dgam_lut_mode == 1) ? "sRGB" :
     ((s.dgam_lut_mode == 2) ? "Ycc" :
     ((s.dgam_lut_mode == 3) ? "RAM" :
     ((s.dgam_lut_mode == 4) ? "RAM" :
         "Unknown")))),
    (s.rgam_lut_mode == 0) ? "Bypass" :
     ((s.rgam_lut_mode == 1) ? "sRGB" :
     ((s.rgam_lut_mode == 2) ? "Ycc" :
     ((s.rgam_lut_mode == 3) ? "RAM" :
     ((s.rgam_lut_mode == 4) ? "RAM" :
         "Unknown")))));
  if (is_gamut_remap_available)
   DTN_INFO(" %12s "
     "%010lld %010lld %010lld %010lld "
     "%010lld %010lld %010lld %010lld "
     "%010lld %010lld %010lld %010lld",
     (s.gamut_remap.gamut_adjust_type == 0) ? "Bypass" :
     ((s.gamut_remap.gamut_adjust_type == 1) ? "HW" : "SW"),
     s.gamut_remap.temperature_matrix[0].value,
     s.gamut_remap.temperature_matrix[1].value,
     s.gamut_remap.temperature_matrix[2].value,
     s.gamut_remap.temperature_matrix[3].value,
     s.gamut_remap.temperature_matrix[4].value,
     s.gamut_remap.temperature_matrix[5].value,
     s.gamut_remap.temperature_matrix[6].value,
     s.gamut_remap.temperature_matrix[7].value,
     s.gamut_remap.temperature_matrix[8].value,
     s.gamut_remap.temperature_matrix[9].value,
     s.gamut_remap.temperature_matrix[10].value,
     s.gamut_remap.temperature_matrix[11].value);

  DTN_INFO("\n");
 }
 DTN_INFO("\n");
 DTN_INFO("DPP Color Caps: input_lut_shared:%d icsc:%d"
   " dgam_ram:%d dgam_rom: srgb:%d,bt2020:%d,gamma2_2:%d,pq:%d,hlg:%d"
   " post_csc:%d gamcor:%d dgam_rom_for_yuv:%d 3d_lut:%d"
   " blnd_lut:%d oscs:%d\n\n",
   dc->caps.color.dpp.input_lut_shared,
   dc->caps.color.dpp.icsc,
   dc->caps.color.dpp.dgam_ram,
   dc->caps.color.dpp.dgam_rom_caps.srgb,
   dc->caps.color.dpp.dgam_rom_caps.bt2020,
   dc->caps.color.dpp.dgam_rom_caps.gamma2_2,
   dc->caps.color.dpp.dgam_rom_caps.pq,
   dc->caps.color.dpp.dgam_rom_caps.hlg,
   dc->caps.color.dpp.post_csc,
   dc->caps.color.dpp.gamma_corr,
   dc->caps.color.dpp.dgam_rom_for_yuv,
   dc->caps.color.dpp.hw_3d_lut,
   dc->caps.color.dpp.ogam_ram,
   dc->caps.color.dpp.ocsc);

 DTN_INFO("MPCC: OPP DPP MPCCBOT MODE ALPHA_MODE PREMULT OVERLAP_ONLY IDLE\n");
 for (i = 0; i < pool->mpcc_count; i++) {
  struct mpcc_state s = {0};

  pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
  if (s.opp_id != 0xf)
   DTN_INFO("[%2d]: %2xh %2xh %6xh %4d %10d %7d %12d %4d\n",
    i, s.opp_id, s.dpp_id, s.bot_mpcc_id,
    s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only,
    s.idle);
 }
 DTN_INFO("\n");
 DTN_INFO("MPC Color Caps: gamut_remap:%d, 3dlut:%d, ogam_ram:%d, ocsc:%d\n\n",
   dc->caps.color.mpc.gamut_remap,
   dc->caps.color.mpc.num_3dluts,
   dc->caps.color.mpc.ogam_ram,
   dc->caps.color.mpc.ocsc);
 DTN_INFO("===== MPC RMCM 3DLUT =====\n");
 DTN_INFO("MPCC: SIZE MODE MODE_CUR RD_SEL 30BIT_EN WR_EN_MASK RAM_SEL OUT_NORM_FACTOR FL_SEL OUT_OFFSET OUT_SCALE FL_DONE SOFT_UNDERFLOW HARD_UNDERFLOW MEM_PWR_ST FORCE DIS MODE\n");
 for (i = 0; i < pool->mpcc_count; i++) {
  struct mpcc_state s = {0};

  pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
  if (s.opp_id != 0xf)
   DTN_INFO("[%2d]: %4xh %4xh %6xh %4x %4x %4x %4x %4x %4xh %4xh %6xh %4x %4x %4x %4x %4x %4x %4x\n",
    i, s.rmcm_regs.rmcm_3dlut_size, s.rmcm_regs.rmcm_3dlut_mode, s.rmcm_regs.rmcm_3dlut_mode_cur,
    s.rmcm_regs.rmcm_3dlut_read_sel, s.rmcm_regs.rmcm_3dlut_30bit_en, s.rmcm_regs.rmcm_3dlut_wr_en_mask,
    s.rmcm_regs.rmcm_3dlut_ram_sel, s.rmcm_regs.rmcm_3dlut_out_norm_factor, s.rmcm_regs.rmcm_3dlut_fl_sel,
    s.rmcm_regs.rmcm_3dlut_out_offset_r, s.rmcm_regs.rmcm_3dlut_out_scale_r, s.rmcm_regs.rmcm_3dlut_fl_done,
    s.rmcm_regs.rmcm_3dlut_fl_soft_underflow, s.rmcm_regs.rmcm_3dlut_fl_hard_underflow, s.rmcm_regs.rmcm_3dlut_mem_pwr_state,
    s.rmcm_regs.rmcm_3dlut_mem_pwr_force, s.rmcm_regs.rmcm_3dlut_mem_pwr_dis, s.rmcm_regs.rmcm_3dlut_mem_pwr_mode);
 }
 DTN_INFO("\n");
 DTN_INFO("===== MPC RMCM Shaper =====\n");
 DTN_INFO("MPCC: CNTL LUT_MODE MODE_CUR WR_EN_MASK WR_SEL OFFSET SCALE START_B START_SEG_B END_B END_BASE_B MEM_PWR_ST FORCE DIS MODE\n");
 for (i = 0; i < pool->mpcc_count; i++) {
  struct mpcc_state s = {0};

  pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
  if (s.opp_id != 0xf)
   DTN_INFO("[%2d]: %4xh %4xh %6xh %4x %4x %4x %4x %4x %4xh %4xh %6xh %4x %4x %4x %4x\n",
    i, s.rmcm_regs.rmcm_cntl, s.rmcm_regs.rmcm_shaper_lut_mode, s.rmcm_regs.rmcm_shaper_mode_cur,
    s.rmcm_regs.rmcm_shaper_lut_write_en_mask, s.rmcm_regs.rmcm_shaper_lut_write_sel, s.rmcm_regs.rmcm_shaper_offset_b,
    s.rmcm_regs.rmcm_shaper_scale_b, s.rmcm_regs.rmcm_shaper_rama_exp_region_start_b, s.rmcm_regs.rmcm_shaper_rama_exp_region_start_seg_b,
    s.rmcm_regs.rmcm_shaper_rama_exp_region_end_b, s.rmcm_regs.rmcm_shaper_rama_exp_region_end_base_b, s.rmcm_regs.rmcm_shaper_mem_pwr_state,
    s.rmcm_regs.rmcm_shaper_mem_pwr_force, s.rmcm_regs.rmcm_shaper_mem_pwr_dis, s.rmcm_regs.rmcm_shaper_mem_pwr_mode);
 }
}

void dcn10_log_hw_state(struct dc *dc,
   struct dc_log_buffer_ctx *log_ctx)
{
 struct dc_context *dc_ctx = dc->ctx;
 struct resource_pool *pool = dc->res_pool;
 int i;

 DTN_INFO_BEGIN();

 dcn10_log_hubbub_state(dc, log_ctx);

 dcn10_log_hubp_states(dc, log_ctx);

 if (dc->hwss.log_color_state)
  dc->hwss.log_color_state(dc, log_ctx);
 else
  dcn10_log_color_state(dc, log_ctx);

 DTN_INFO("OTG: v_bs v_be v_ss v_se vpol vmax vmin vmax_sel vmin_sel h_bs h_be h_ss h_se hpol htot vtot underflow blank_en\n");

 for (i = 0; i < pool->timing_generator_count; i++) {
  struct timing_generator *tg = pool->timing_generators[i];
  struct dcn_otg_state s = {0};
  /* Read shared OTG state registers for all DCNx */
  if (tg->funcs->read_otg_state)
   tg->funcs->read_otg_state(tg, &s);

  /*
 * For DCN2 and greater, a register on the OPP is used to
 * determine if the CRTC is blanked instead of the OTG. So use
 * dpg_is_blanked() if exists, otherwise fallback on otg.
 *
 * TODO: Implement DCN-specific read_otg_state hooks.
 */

  if (pool->opps[i]->funcs->dpg_is_blanked)
   s.blank_enabled = pool->opps[i]->funcs->dpg_is_blanked(pool->opps[i]);
  else
   s.blank_enabled = tg->funcs->is_blanked(tg);

  //only print if OTG master is enabled
  if ((s.otg_enabled & 1) == 0)
   continue;

  DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %9d %9d %5d %5d %5d %5d %5d %5d %5d %9d %8d\n",
    tg->inst,
    s.v_blank_start,
    s.v_blank_end,
    s.v_sync_a_start,
    s.v_sync_a_end,
    s.v_sync_a_pol,
    s.v_total_max,
    s.v_total_min,
    s.v_total_max_sel,
    s.v_total_min_sel,
    s.h_blank_start,
    s.h_blank_end,
    s.h_sync_a_start,
    s.h_sync_a_end,
    s.h_sync_a_pol,
    s.h_total,
    s.v_total,
    s.underflow_occurred_status,
    s.blank_enabled);

  // Clear underflow for debug purposes
  // We want to keep underflow sticky bit on for the longevity tests outside of test environment.
  // This function is called only from Windows or Diags test environment, hence it's safe to clear
  // it from here without affecting the original intent.
  tg->funcs->clear_optc_underflow(tg);
 }
 DTN_INFO("\n");

 // dcn_dsc_state struct field bytes_per_pixel was renamed to bits_per_pixel
 // TODO: Update golden log header to reflect this name change
 DTN_INFO("DSC: CLOCK_EN SLICE_WIDTH Bytes_pp\n");
 for (i = 0; i < pool->res_cap->num_dsc; i++) {
  struct display_stream_compressor *dsc = pool->dscs[i];
  struct dcn_dsc_state s = {0};

  dsc->funcs->dsc_read_state(dsc, &s);
  DTN_INFO("[%d]: %-9d %-12d %-10d\n",
  dsc->inst,
   s.dsc_clock_en,
   s.dsc_slice_width,
   s.dsc_bits_per_pixel);
  DTN_INFO("\n");
 }
 DTN_INFO("\n");

 DTN_INFO("S_ENC: DSC_MODE SEC_GSP7_LINE_NUM"
   " VBID6_LINE_REFERENCE VBID6_LINE_NUM SEC_GSP7_ENABLE SEC_STREAM_ENABLE\n");
 for (i = 0; i < pool->stream_enc_count; i++) {
  struct stream_encoder *enc = pool->stream_enc[i];
  struct enc_state s = {0};

  if (enc->funcs->enc_read_state) {
   enc->funcs->enc_read_state(enc, &s);
   DTN_INFO("[%-3d]: %-9d %-18d %-21d %-15d %-16d %-17d\n",
    enc->id,
    s.dsc_mode,
    s.sec_gsp_pps_line_num,
    s.vbid6_line_reference,
    s.vbid6_line_num,
    s.sec_gsp_pps_enable,
    s.sec_stream_enable);
   DTN_INFO("\n");
  }
 }
 DTN_INFO("\n");

 DTN_INFO("L_ENC: DPHY_FEC_EN DPHY_FEC_READY_SHADOW DPHY_FEC_ACTIVE_STATUS DP_LINK_TRAINING_COMPLETE\n");
 for (i = 0; i < dc->link_count; i++) {
  struct link_encoder *lenc = dc->links[i]->link_enc;

  struct link_enc_state s = {0};

  if (lenc && lenc->funcs->read_state) {
   lenc->funcs->read_state(lenc, &s);
   DTN_INFO("[%-3d]: %-12d %-22d %-22d %-25d\n",
    i,
    s.dphy_fec_en,
    s.dphy_fec_ready_shadow,
    s.dphy_fec_active_status,
    s.dp_link_training_complete);
   DTN_INFO("\n");
  }
 }
 DTN_INFO("\n");

 DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d dcfclk_deep_sleep_khz:%d dispclk_khz:%d\n"
  "dppclk_khz:%d max_supported_dppclk_khz:%d fclk_khz:%d socclk_khz:%d\n\n",
   dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_khz,
   dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz,
   dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz,
   dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz,
   dc->current_state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz,
   dc->current_state->bw_ctx.bw.dcn.clk.fclk_khz,
   dc->current_state->bw_ctx.bw.dcn.clk.socclk_khz);

 log_mpc_crc(dc, log_ctx);

 {
  if (pool->hpo_dp_stream_enc_count > 0) {
   DTN_INFO("DP HPO S_ENC: Enabled OTG Format Depth Vid SDP Compressed Link\n");
   for (i = 0; i < pool->hpo_dp_stream_enc_count; i++) {
    struct hpo_dp_stream_encoder_state hpo_dp_se_state = {0};
    struct hpo_dp_stream_encoder *hpo_dp_stream_enc = pool->hpo_dp_stream_enc[i];

    if (hpo_dp_stream_enc && hpo_dp_stream_enc->funcs->read_state) {
     hpo_dp_stream_enc->funcs->read_state(hpo_dp_stream_enc, &hpo_dp_se_state);

     DTN_INFO("[%d]: %d %d %6s %d %d %d %d %d\n",
       hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0,
       hpo_dp_se_state.stream_enc_enabled,
       hpo_dp_se_state.otg_inst,
       (hpo_dp_se_state.pixel_encoding == 0) ? "4:4:4" :
         ((hpo_dp_se_state.pixel_encoding == 1) ? "4:2:2" :
         (hpo_dp_se_state.pixel_encoding == 2) ? "4:2:0" : "Y-Only"),
       (hpo_dp_se_state.component_depth == 0) ? 6 :
         ((hpo_dp_se_state.component_depth == 1) ? 8 :
         (hpo_dp_se_state.component_depth == 2) ? 10 : 12),
       hpo_dp_se_state.vid_stream_enabled,
       hpo_dp_se_state.sdp_enabled,
       hpo_dp_se_state.compressed_format,
       hpo_dp_se_state.mapped_to_link_enc);
    }
   }

   DTN_INFO("\n");
  }

  /* log DP HPO L_ENC section if any hpo_dp_link_enc exists */
  if (pool->hpo_dp_link_enc_count) {
   DTN_INFO("DP HPO L_ENC: Enabled Mode Lanes Stream Slots VC Rate X VC Rate Y\n");

   for (i = 0; i < pool->hpo_dp_link_enc_count; i++) {
    struct hpo_dp_link_encoder *hpo_dp_link_enc = pool->hpo_dp_link_enc[i];
    struct hpo_dp_link_enc_state hpo_dp_le_state = {0};

    if (hpo_dp_link_enc->funcs->read_state) {
     hpo_dp_link_enc->funcs->read_state(hpo_dp_link_enc, &hpo_dp_le_state);
     DTN_INFO("[%d]: %d %6s %d %d %d %d %d\n",
       hpo_dp_link_enc->inst,
       hpo_dp_le_state.link_enc_enabled,
       (hpo_dp_le_state.link_mode == 0) ? "TPS1" :
         (hpo_dp_le_state.link_mode == 1) ? "TPS2" :
         (hpo_dp_le_state.link_mode == 2) ? "ACTIVE" : "TEST",
       hpo_dp_le_state.lane_count,
       hpo_dp_le_state.stream_src[0],
       hpo_dp_le_state.slot_count[0],
       hpo_dp_le_state.vc_rate_x[0],
       hpo_dp_le_state.vc_rate_y[0]);
     DTN_INFO("\n");
    }
   }

   DTN_INFO("\n");
  }
 }

 DTN_INFO_END();
}

bool dcn10_did_underflow_occur(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
 struct hubp *hubp = pipe_ctx->plane_res.hubp;
 struct timing_generator *tg = pipe_ctx->stream_res.tg;

 if (tg->funcs->is_optc_underflow_occurred(tg)) {
  tg->funcs->clear_optc_underflow(tg);
  return true;
 }

 if (hubp->funcs->hubp_get_underflow_status(hubp)) {
  hubp->funcs->hubp_clear_underflow(hubp);
  return true;
 }
 return false;
}

void dcn10_enable_power_gating_plane(
 struct dce_hwseq *hws,
 bool enable)
{
 bool force_on = true/* disable power gating */

 if (enable)
  force_on = false;

 /* DCHUBP0/1/2/3 */
 REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on);
 REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN2_POWER_FORCEON, force_on);
 REG_UPDATE(DOMAIN4_PG_CONFIG, DOMAIN4_POWER_FORCEON, force_on);
 REG_UPDATE(DOMAIN6_PG_CONFIG, DOMAIN6_POWER_FORCEON, force_on);

 /* DPP0/1/2/3 */
 REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN1_POWER_FORCEON, force_on);
 REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN3_POWER_FORCEON, force_on);
 REG_UPDATE(DOMAIN5_PG_CONFIG, DOMAIN5_POWER_FORCEON, force_on);
 REG_UPDATE(DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, force_on);
}

void dcn10_disable_vga(
 struct dce_hwseq *hws)
{
 unsigned int in_vga1_mode = 0;
 unsigned int in_vga2_mode = 0;
 unsigned int in_vga3_mode = 0;
 unsigned int in_vga4_mode = 0;

 REG_GET(D1VGA_CONTROL, D1VGA_MODE_ENABLE, &in_vga1_mode);
 REG_GET(D2VGA_CONTROL, D2VGA_MODE_ENABLE, &in_vga2_mode);
 REG_GET(D3VGA_CONTROL, D3VGA_MODE_ENABLE, &in_vga3_mode);
 REG_GET(D4VGA_CONTROL, D4VGA_MODE_ENABLE, &in_vga4_mode);

 if (in_vga1_mode == 0 && in_vga2_mode == 0 &&
   in_vga3_mode == 0 && in_vga4_mode == 0)
  return;

 REG_WRITE(D1VGA_CONTROL, 0);
 REG_WRITE(D2VGA_CONTROL, 0);
 REG_WRITE(D3VGA_CONTROL, 0);
 REG_WRITE(D4VGA_CONTROL, 0);

 /* HW Engineer's Notes:
 *  During switch from vga->extended, if we set the VGA_TEST_ENABLE and
 *  then hit the VGA_TEST_RENDER_START, then the DCHUBP timing gets updated correctly.
 *
 *  Then vBIOS will have it poll for the VGA_TEST_RENDER_DONE and unset
 *  VGA_TEST_ENABLE, to leave it in the same state as before.
 */

 REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_ENABLE, 1);
 REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1);
}

/**
 * dcn10_dpp_pg_control - DPP power gate control.
 *
 * @hws: dce_hwseq reference.
 * @dpp_inst: DPP instance reference.
 * @power_on: true if we want to enable power gate, false otherwise.
 *
 * Enable or disable power gate in the specific DPP instance.
 */

void dcn10_dpp_pg_control(
  struct dce_hwseq *hws,
  unsigned int dpp_inst,
  bool power_on)
{
 uint32_t power_gate = power_on ? 0 : 1;
 uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF;

 if (hws->ctx->dc->debug.disable_dpp_power_gate)
  return;
 if (REG(DOMAIN1_PG_CONFIG) == 0)
  return;

 switch (dpp_inst) {
 case 0: /* DPP0 */
  REG_UPDATE(DOMAIN1_PG_CONFIG,
    DOMAIN1_POWER_GATE, power_gate);

  REG_WAIT(DOMAIN1_PG_STATUS,
    DOMAIN1_PGFSM_PWR_STATUS, pwr_status,
    1, 1000);
  break;
 case 1: /* DPP1 */
  REG_UPDATE(DOMAIN3_PG_CONFIG,
    DOMAIN3_POWER_GATE, power_gate);

  REG_WAIT(DOMAIN3_PG_STATUS,
    DOMAIN3_PGFSM_PWR_STATUS, pwr_status,
    1, 1000);
  break;
 case 2: /* DPP2 */
  REG_UPDATE(DOMAIN5_PG_CONFIG,
    DOMAIN5_POWER_GATE, power_gate);

  REG_WAIT(DOMAIN5_PG_STATUS,
    DOMAIN5_PGFSM_PWR_STATUS, pwr_status,
    1, 1000);
  break;
 case 3: /* DPP3 */
  REG_UPDATE(DOMAIN7_PG_CONFIG,
    DOMAIN7_POWER_GATE, power_gate);

  REG_WAIT(DOMAIN7_PG_STATUS,
    DOMAIN7_PGFSM_PWR_STATUS, pwr_status,
    1, 1000);
  break;
 default:
  BREAK_TO_DEBUGGER();
  break;
 }
}

/**
 * dcn10_hubp_pg_control - HUBP power gate control.
 *
 * @hws: dce_hwseq reference.
 * @hubp_inst: DPP instance reference.
 * @power_on: true if we want to enable power gate, false otherwise.
 *
 * Enable or disable power gate in the specific HUBP instance.
 */

void dcn10_hubp_pg_control(
  struct dce_hwseq *hws,
  unsigned int hubp_inst,
  bool power_on)
{
 uint32_t power_gate = power_on ? 0 : 1;
 uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF;

 if (hws->ctx->dc->debug.disable_hubp_power_gate)
  return;
 if (REG(DOMAIN0_PG_CONFIG) == 0)
  return;

 switch (hubp_inst) {
 case 0: /* DCHUBP0 */
  REG_UPDATE(DOMAIN0_PG_CONFIG,
    DOMAIN0_POWER_GATE, power_gate);

  REG_WAIT(DOMAIN0_PG_STATUS,
    DOMAIN0_PGFSM_PWR_STATUS, pwr_status,
    1, 1000);
  break;
 case 1: /* DCHUBP1 */
  REG_UPDATE(DOMAIN2_PG_CONFIG,
    DOMAIN2_POWER_GATE, power_gate);

  REG_WAIT(DOMAIN2_PG_STATUS,
    DOMAIN2_PGFSM_PWR_STATUS, pwr_status,
    1, 1000);
  break;
 case 2: /* DCHUBP2 */
  REG_UPDATE(DOMAIN4_PG_CONFIG,
    DOMAIN4_POWER_GATE, power_gate);

  REG_WAIT(DOMAIN4_PG_STATUS,
    DOMAIN4_PGFSM_PWR_STATUS, pwr_status,
    1, 1000);
  break;
 case 3: /* DCHUBP3 */
  REG_UPDATE(DOMAIN6_PG_CONFIG,
    DOMAIN6_POWER_GATE, power_gate);

  REG_WAIT(DOMAIN6_PG_STATUS,
    DOMAIN6_PGFSM_PWR_STATUS, pwr_status,
    1, 1000);
  break;
 default:
  BREAK_TO_DEBUGGER();
  break;
 }
}

static void power_on_plane_resources(
 struct dce_hwseq *hws,
 int plane_id)
{
 DC_LOGGER_INIT(hws->ctx->logger);

 if (hws->funcs.dpp_root_clock_control)
  hws->funcs.dpp_root_clock_control(hws, plane_id, true);

 if (REG(DC_IP_REQUEST_CNTL)) {
  REG_SET(DC_IP_REQUEST_CNTL, 0,
    IP_REQUEST_EN, 1);

  if (hws->funcs.dpp_pg_control)
   hws->funcs.dpp_pg_control(hws, plane_id, true);

  if (hws->funcs.hubp_pg_control)
   hws->funcs.hubp_pg_control(hws, plane_id, true);

  REG_SET(DC_IP_REQUEST_CNTL, 0,
    IP_REQUEST_EN, 0);
  DC_LOG_DEBUG(
    "Un-gated front end for pipe %d\n", plane_id);
 }
}

static void undo_DEGVIDCN10_253_wa(struct dc *dc)
{
 struct dce_hwseq *hws = dc->hwseq;
 struct hubp *hubp = dc->res_pool->hubps[0];

 if (!hws->wa_state.DEGVIDCN10_253_applied)
  return;

 hubp->funcs->set_blank(hubp, true);

 REG_SET(DC_IP_REQUEST_CNTL, 0,
   IP_REQUEST_EN, 1);

 hws->funcs.hubp_pg_control(hws, 0, false);
 REG_SET(DC_IP_REQUEST_CNTL, 0,
   IP_REQUEST_EN, 0);

 hws->wa_state.DEGVIDCN10_253_applied = false;
}

static void apply_DEGVIDCN10_253_wa(struct dc *dc)
{
 struct dce_hwseq *hws = dc->hwseq;
 struct hubp *hubp = dc->res_pool->hubps[0];
 int i;

 if (dc->debug.disable_stutter)
  return;

 if (!hws->wa.DEGVIDCN10_253)
  return;

 for (i = 0; i < dc->res_pool->pipe_count; i++) {
  if (!dc->res_pool->hubps[i]->power_gated)
   return;
 }

 /* all pipe power gated, apply work around to enable stutter. */

 REG_SET(DC_IP_REQUEST_CNTL, 0,
   IP_REQUEST_EN, 1);

 hws->funcs.hubp_pg_control(hws, 0, true);
 REG_SET(DC_IP_REQUEST_CNTL, 0,
   IP_REQUEST_EN, 0);

 hubp->funcs->set_hubp_blank_en(hubp, false);
 hws->wa_state.DEGVIDCN10_253_applied = true;
}

void dcn10_bios_golden_init(struct dc *dc)
{
 struct dce_hwseq *hws = dc->hwseq;
 struct dc_bios *bp = dc->ctx->dc_bios;
 int i;
 bool allow_self_fresh_force_enable = true;

 if (hws->funcs.s0i3_golden_init_wa && hws->funcs.s0i3_golden_init_wa(dc))
  return;

 if (dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled)
  allow_self_fresh_force_enable =
    dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub);


 /* WA for making DF sleep when idle after resume from S0i3.
 * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE is set to 1 by
 * command table, if DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0
 * before calling command table and it changed to 1 after,
 * it should be set back to 0.
 */


 /* initialize dcn global */
 bp->funcs->enable_disp_power_gating(bp,
   CONTROLLER_ID_D0, ASIC_PIPE_INIT);

 for (i = 0; i < dc->res_pool->pipe_count; i++) {
  /* initialize dcn per pipe */
  bp->funcs->enable_disp_power_gating(bp,
    CONTROLLER_ID_D0 + i, ASIC_PIPE_DISABLE);
 }

 if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
  if (allow_self_fresh_force_enable == false &&
    dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub))
   dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
          !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);

}

static void false_optc_underflow_wa(
  struct dc *dc,
  const struct dc_stream_state *stream,
  struct timing_generator *tg)
{
 int i;
 bool underflow;

 if (!dc->hwseq->wa.false_optc_underflow)
  return;

 underflow = tg->funcs->is_optc_underflow_occurred(tg);

 for (i = 0; i < dc->res_pool->pipe_count; i++) {
  struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];

  if (old_pipe_ctx->stream != stream)
   continue;

  dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, old_pipe_ctx);
 }

 if (tg->funcs->set_blank_data_double_buffer)
  tg->funcs->set_blank_data_double_buffer(tg, true);

 if (tg->funcs->is_optc_underflow_occurred(tg) && !underflow)
  tg->funcs->clear_optc_underflow(tg);
}

static int calculate_vready_offset_for_group(struct pipe_ctx *pipe)
{
 struct pipe_ctx *other_pipe;
 int vready_offset = pipe->pipe_dlg_param.vready_offset;

 /* Always use the largest vready_offset of all connected pipes */
 for (other_pipe = pipe->bottom_pipe; other_pipe != NULL; other_pipe = other_pipe->bottom_pipe) {
  if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
   vready_offset = other_pipe->pipe_dlg_param.vready_offset;
 }
 for (other_pipe = pipe->top_pipe; other_pipe != NULL; other_pipe = other_pipe->top_pipe) {
  if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
   vready_offset = other_pipe->pipe_dlg_param.vready_offset;
 }
 for (other_pipe = pipe->next_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->next_odm_pipe) {
  if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
   vready_offset = other_pipe->pipe_dlg_param.vready_offset;
 }
 for (other_pipe = pipe->prev_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->prev_odm_pipe) {
  if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
   vready_offset = other_pipe->pipe_dlg_param.vready_offset;
 }

 return vready_offset;
}

enum dc_status dcn10_enable_stream_timing(
  struct pipe_ctx *pipe_ctx,
  struct dc_state *context,
  struct dc *dc)
{
 struct dc_stream_state *stream = pipe_ctx->stream;
 enum dc_color_space color_space;
 struct tg_color black_color = {0};

 /* by upper caller loop, pipe0 is parent pipe and be called first.
 * back end is set up by for pipe0. Other children pipe share back end
 * with pipe 0. No program is needed.
 */

 if (pipe_ctx->top_pipe != NULL)
  return DC_OK;

 /* TODO check if timing_changed, disable stream if timing changed */

 /* HW program guide assume display already disable
 * by unplug sequence. OTG assume stop.
 */

 pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, true);

 if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
   pipe_ctx->clock_source,
   &pipe_ctx->stream_res.pix_clk_params,
   dc->link_srv->dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings),
   &pipe_ctx->pll_settings)) {
  BREAK_TO_DEBUGGER();
  return DC_ERROR_UNEXPECTED;
 }

 if (dc_is_hdmi_tmds_signal(stream->signal)) {
  stream->link->phy_state.symclk_ref_cnts.otg = 1;
  if (stream->link->phy_state.symclk_state == SYMCLK_OFF_TX_OFF)
   stream->link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;
  else
   stream->link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
 }

 pipe_ctx->stream_res.tg->funcs->program_timing(
   pipe_ctx->stream_res.tg,
   &stream->timing,
   calculate_vready_offset_for_group(pipe_ctx),
   pipe_ctx->pipe_dlg_param.vstartup_start,
   pipe_ctx->pipe_dlg_param.vupdate_offset,
   pipe_ctx->pipe_dlg_param.vupdate_width,
   pipe_ctx->pipe_dlg_param.pstate_keepout,
   pipe_ctx->stream->signal,
   true);

#if 0 /* move to after enable_crtc */
 /* TODO: OPP FMT, ABM. etc. should be done here. */
 /* or FPGA now. instance 0 only. TODO: move to opp.c */

 inst_offset = reg_offsets[pipe_ctx->stream_res.tg->inst].fmt;

 pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
    pipe_ctx->stream_res.opp,
    &stream->bit_depth_params,
    &stream->clamping);
#endif
 /* program otg blank color */
 color_space = stream->output_color_space;
 color_space_to_black_color(dc, color_space, &black_color);

 /*
 * The way 420 is packed, 2 channels carry Y component, 1 channel
 * alternate between Cb and Cr, so both channels need the pixel
 * value for Y
 */

 if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
  black_color.color_r_cr = black_color.color_g_y;

 if (pipe_ctx->stream_res.tg->funcs->set_blank_color)
  pipe_ctx->stream_res.tg->funcs->set_blank_color(
    pipe_ctx->stream_res.tg,
    &black_color);

 if (pipe_ctx->stream_res.tg->funcs->is_blanked &&
   !pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) {
  pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, true);
  hwss_wait_for_blank_complete(pipe_ctx->stream_res.tg);
  false_optc_underflow_wa(dc, pipe_ctx->stream, pipe_ctx->stream_res.tg);
 }

 /* VTG is  within DCHUB command block. DCFCLK is always on */
 if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) {
  BREAK_TO_DEBUGGER();
  return DC_ERROR_UNEXPECTED;
 }

 /* TODO program crtc source select for non-virtual signal*/
 /* TODO program FMT */
 /* TODO setup link_enc */
 /* TODO set stream attributes */
 /* TODO program audio */
 /* TODO enable stream if timing changed */
 /* TODO unblank stream if DP */

 return DC_OK;
}

static void dcn10_reset_back_end_for_pipe(
  struct dc *dc,
  struct pipe_ctx *pipe_ctx,
  struct dc_state *context)
{
 int i;
 struct dc_link *link;
 DC_LOGGER_INIT(dc->ctx->logger);
 if (pipe_ctx->stream_res.stream_enc == NULL) {
  pipe_ctx->stream = NULL;
  return;
 }

 link = pipe_ctx->stream->link;
 /* DPMS may already disable or */
 /* dpms_off status is incorrect due to fastboot
 * feature. When system resume from S4 with second
 * screen only, the dpms_off would be true but
 * VBIOS lit up eDP, so check link status too.
 */

 if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
  dc->link_srv->set_dpms_off(pipe_ctx);
 else if (pipe_ctx->stream_res.audio)
  dc->hwss.disable_audio_stream(pipe_ctx);

 if (pipe_ctx->stream_res.audio) {
  /*disable az_endpoint*/
  pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);

  /*free audio*/
  if (dc->caps.dynamic_audio == true) {
   /*we have to dynamic arbitrate the audio endpoints*/
   /*we free the resource, need reset is_audio_acquired*/
   update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
     pipe_ctx->stream_res.audio, false);
   pipe_ctx->stream_res.audio = NULL;
  }
 }

 /* by upper caller loop, parent pipe: pipe0, will be reset last.
 * back end share by all pipes and will be disable only when disable
 * parent pipe.
 */

 if (pipe_ctx->top_pipe == NULL) {

  if (pipe_ctx->stream_res.abm)
   dc->hwss.set_abm_immediate_disable(pipe_ctx);

  pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);

  pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
  set_drr_and_clear_adjust_pending(pipe_ctx, pipe_ctx->stream, NULL);
  if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
   pipe_ctx->stream->link->phy_state.symclk_ref_cnts.otg = 0;
 }

 for (i = 0; i < dc->res_pool->pipe_count; i++)
  if (&dc->current_state->res_ctx.pipe_ctx[i] == pipe_ctx)
   break;

 if (i == dc->res_pool->pipe_count)
  return;

 pipe_ctx->stream = NULL;
 DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
     pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
}

static bool dcn10_hw_wa_force_recovery(struct dc *dc)
{
 struct hubp *hubp ;
 unsigned int i;

 if (!dc->debug.recovery_enabled)
  return false;
 /*
DCHUBP_CNTL:HUBP_BLANK_EN=1
DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1
DCHUBP_CNTL:HUBP_DISABLE=1
DCHUBP_CNTL:HUBP_DISABLE=0
DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0
DCSURF_PRIMARY_SURFACE_ADDRESS
DCHUBP_CNTL:HUBP_BLANK_EN=0
*/


 for (i = 0; i < dc->res_pool->pipe_count; i++) {
  struct pipe_ctx *pipe_ctx =
   &dc->current_state->res_ctx.pipe_ctx[i];
  if (pipe_ctx != NULL) {
   hubp = pipe_ctx->plane_res.hubp;
   /*DCHUBP_CNTL:HUBP_BLANK_EN=1*/
   if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
    hubp->funcs->set_hubp_blank_en(hubp, true);
  }
 }
 /*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1*/
 hubbub1_soft_reset(dc->res_pool->hubbub, true);

 for (i = 0; i < dc->res_pool->pipe_count; i++) {
  struct pipe_ctx *pipe_ctx =
   &dc->current_state->res_ctx.pipe_ctx[i];
  if (pipe_ctx != NULL) {
   hubp = pipe_ctx->plane_res.hubp;
   /*DCHUBP_CNTL:HUBP_DISABLE=1*/
   if (hubp != NULL && hubp->funcs->hubp_disable_control)
    hubp->funcs->hubp_disable_control(hubp, true);
  }
 }
 for (i = 0; i < dc->res_pool->pipe_count; i++) {
  struct pipe_ctx *pipe_ctx =
   &dc->current_state->res_ctx.pipe_ctx[i];
  if (pipe_ctx != NULL) {
   hubp = pipe_ctx->plane_res.hubp;
   /*DCHUBP_CNTL:HUBP_DISABLE=0*/
   if (hubp != NULL && hubp->funcs->hubp_disable_control)
    hubp->funcs->hubp_disable_control(hubp, true);
  }
 }
 /*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0*/
 hubbub1_soft_reset(dc->res_pool->hubbub, false);
 for (i = 0; i < dc->res_pool->pipe_count; i++) {
  struct pipe_ctx *pipe_ctx =
   &dc->current_state->res_ctx.pipe_ctx[i];
  if (pipe_ctx != NULL) {
   hubp = pipe_ctx->plane_res.hubp;
   /*DCHUBP_CNTL:HUBP_BLANK_EN=0*/
   if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
    hubp->funcs->set_hubp_blank_en(hubp, true);
  }
 }
 return true;

}

void dcn10_verify_allow_pstate_change_high(struct dc *dc)
{
 struct hubbub *hubbub = dc->res_pool->hubbub;
 static bool should_log_hw_state; /* prevent hw state log by default */

 if (!hubbub->funcs->verify_allow_pstate_change_high)
  return;

 if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub)) {
  int i = 0;

  if (should_log_hw_state)
   dcn10_log_hw_state(dc, NULL);

  TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
  BREAK_TO_DEBUGGER();
  if (dcn10_hw_wa_force_recovery(dc)) {
   /*check again*/
   if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub))
    BREAK_TO_DEBUGGER();
  }
 }
}

/* trigger HW to start disconnect plane from stream on the next vsync */
void dcn10_plane_atomic_disconnect(struct dc *dc,
  struct dc_state *state,
  struct pipe_ctx *pipe_ctx)
{
 struct dce_hwseq *hws = dc->hwseq;
 struct hubp *hubp = pipe_ctx->plane_res.hubp;
 int dpp_id = pipe_ctx->plane_res.dpp->inst;
 struct mpc *mpc = dc->res_pool->mpc;
 struct mpc_tree *mpc_tree_params;
 struct mpcc *mpcc_to_remove = NULL;
 struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;

 mpc_tree_params = &(opp->mpc_tree_params);
 mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, dpp_id);

 /*Already reset*/
 if (mpcc_to_remove == NULL)
  return;

 mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
 // Phantom pipes have OTG disabled by default, so MPCC_STATUS will never assert idle,
 // so don't wait for MPCC_IDLE in the programming sequence
 if (dc_state_get_pipe_subvp_type(state, pipe_ctx) != SUBVP_PHANTOM)
  opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;

 dc->optimized_required = true;

 if (hubp->funcs->hubp_disconnect)
  hubp->funcs->hubp_disconnect(hubp);

 if (dc->debug.sanity_checks)
  hws->funcs.verify_allow_pstate_change_high(dc);
}

/**
 * dcn10_plane_atomic_power_down - Power down plane components.
 *
 * @dc: dc struct reference. used for grab hwseq.
 * @dpp: dpp struct reference.
 * @hubp: hubp struct reference.
 *
 * Keep in mind that this operation requires a power gate configuration;
 * however, requests for switch power gate are precisely controlled to avoid
 * problems. For this reason, power gate request is usually disabled. This
 * function first needs to enable the power gate request before disabling DPP
 * and HUBP. Finally, it disables the power gate request again.
 */

void dcn10_plane_atomic_power_down(struct dc *dc,
  struct dpp *dpp,
  struct hubp *hubp)
{
 struct dce_hwseq *hws = dc->hwseq;
 DC_LOGGER_INIT(dc->ctx->logger);

 if (REG(DC_IP_REQUEST_CNTL)) {
  REG_SET(DC_IP_REQUEST_CNTL, 0,
    IP_REQUEST_EN, 1);

  if (hws->funcs.dpp_pg_control)
   hws->funcs.dpp_pg_control(hws, dpp->inst, false);

  if (hws->funcs.hubp_pg_control)
   hws->funcs.hubp_pg_control(hws, hubp->inst, false);

  hubp->funcs->hubp_reset(hubp);
  dpp->funcs->dpp_reset(dpp);

  REG_SET(DC_IP_REQUEST_CNTL, 0,
    IP_REQUEST_EN, 0);
  DC_LOG_DEBUG(
    "Power gated front end %d\n", hubp->inst);
 }

 if (hws->funcs.dpp_root_clock_control)
  hws->funcs.dpp_root_clock_control(hws, dpp->inst, false);
}

/* disable HW used by plane.
 * note:  cannot disable until disconnect is complete
 */

void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
 struct dce_hwseq *hws = dc->hwseq;
 struct hubp *hubp = pipe_ctx->plane_res.hubp;
 struct dpp *dpp = pipe_ctx->plane_res.dpp;
 int opp_id = hubp->opp_id;

 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);

 hubp->funcs->hubp_clk_cntl(hubp, false);

 dpp->funcs->dpp_dppclk_control(dpp, falsefalse);

 if (opp_id != 0xf && pipe_ctx->stream_res.opp->mpc_tree_params.opp_list == NULL)
  pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
    pipe_ctx->stream_res.opp,
    false);

 hubp->power_gated = true;
 dc->optimized_required = false/* We're powering off, no need to optimize */

 hws->funcs.plane_atomic_power_down(dc,
   pipe_ctx->plane_res.dpp,
   pipe_ctx->plane_res.hubp);

 pipe_ctx->stream = NULL;
 memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
 memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res));
 pipe_ctx->top_pipe = NULL;
 pipe_ctx->bottom_pipe = NULL;
 pipe_ctx->plane_state = NULL;
}

void dcn10_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx)
{
 struct dce_hwseq *hws = dc->hwseq;
 DC_LOGGER_INIT(dc->ctx->logger);

 if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
  return;

 hws->funcs.plane_atomic_disable(dc, pipe_ctx);

 apply_DEGVIDCN10_253_wa(dc);

 DC_LOG_DC("Power down front end %d\n",
     pipe_ctx->pipe_idx);
}

void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
{
 int i;
 struct dce_hwseq *hws = dc->hwseq;
 struct hubbub *hubbub = dc->res_pool->hubbub;
 bool can_apply_seamless_boot = false;
 bool tg_enabled[MAX_PIPES] = {false};

 for (i = 0; i < context->stream_count; i++) {
  if (context->streams[i]->apply_seamless_boot_optimization) {
   can_apply_seamless_boot = true;
   break;
  }
 }

 for (i = 0; i < dc->res_pool->pipe_count; i++) {
  struct timing_generator *tg = dc->res_pool->timing_generators[i];
  struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];

  /* There is assumption that pipe_ctx is not mapping irregularly
 * to non-preferred front end. If pipe_ctx->stream is not NULL,
 * we will use the pipe, so don't disable
 */

  if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
   continue;

  /* Blank controller using driver code instead of
 * command table.
 */

  if (tg->funcs->is_tg_enabled(tg)) {
   if (hws->funcs.init_blank != NULL) {
    hws->funcs.init_blank(dc, tg);
    tg->funcs->lock(tg);
   } else {
    tg->funcs->lock(tg);
    tg->funcs->set_blank(tg, true);
    hwss_wait_for_blank_complete(tg);
   }
  }
 }

 /* Reset det size */
 for (i = 0; i < dc->res_pool->pipe_count; i++) {
  struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
  struct hubp *hubp = dc->res_pool->hubps[i];

  /* Do not need to reset for seamless boot */
  if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
   continue;

  if (hubbub && hubp) {
   if (hubbub->funcs->program_det_size)
    hubbub->funcs->program_det_size(hubbub, hubp->inst, 0);
   if (hubbub->funcs->program_det_segments)
    hubbub->funcs->program_det_segments(hubbub, hubp->inst, 0);
  }
 }

 /* num_opp will be equal to number of mpcc */
 for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
  struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];

  /* Cannot reset the MPC mux if seamless boot */
  if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
   continue;

  dc->res_pool->mpc->funcs->mpc_init_single_inst(
    dc->res_pool->mpc, i);
 }

 for (i = 0; i < dc->res_pool->pipe_count; i++) {
  struct timing_generator *tg = dc->res_pool->timing_generators[i];
  struct hubp *hubp = dc->res_pool->hubps[i];
  struct dpp *dpp = dc->res_pool->dpps[i];
  struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];

  /* There is assumption that pipe_ctx is not mapping irregularly
 * to non-preferred front end. If pipe_ctx->stream is not NULL,
 * we will use the pipe, so don't disable
 */

  if (can_apply_seamless_boot &&
   pipe_ctx->stream != NULL &&
   pipe_ctx->stream_res.tg->funcs->is_tg_enabled(
    pipe_ctx->stream_res.tg)) {
   // Enable double buffering for OTG_BLANK no matter if
   // seamless boot is enabled or not to suppress global sync
   // signals when OTG blanked. This is to prevent pipe from
   // requesting data while in PSR.
   tg->funcs->tg_init(tg);
   hubp->power_gated = true;
   tg_enabled[i] = true;
   continue;
  }

  /* Disable on the current state so the new one isn't cleared. */
  pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];

  hubp->funcs->hubp_reset(hubp);
  dpp->funcs->dpp_reset(dpp);

  pipe_ctx->stream_res.tg = tg;
  pipe_ctx->pipe_idx = i;

  pipe_ctx->plane_res.hubp = hubp;
  pipe_ctx->plane_res.dpp = dpp;
  pipe_ctx->plane_res.mpcc_inst = dpp->inst;
  hubp->mpcc_id = dpp->inst;
  hubp->opp_id = OPP_ID_INVALID;
  hubp->power_gated = false;

  dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
  dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
  dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
  pipe_ctx->stream_res.opp = dc->res_pool->opps[i];

  hws->funcs.plane_atomic_disconnect(dc, context, pipe_ctx);

  if (tg->funcs->is_tg_enabled(tg))
   tg->funcs->unlock(tg);

  dc->hwss.disable_plane(dc, context, pipe_ctx);

  pipe_ctx->stream_res.tg = NULL;
  pipe_ctx->plane_res.hubp = NULL;

  if (tg->funcs->is_tg_enabled(tg)) {
   if (tg->funcs->init_odm)
    tg->funcs->init_odm(tg);
  }

  tg->funcs->tg_init(tg);
 }

 /* Clean up MPC tree */
 for (i = 0; i < dc->res_pool->pipe_count; i++) {
  if (tg_enabled[i]) {
   if (dc->res_pool->opps[i]->mpc_tree_params.opp_list) {
    if (dc->res_pool->opps[i]->mpc_tree_params.opp_list->mpcc_bot) {
     int bot_id = dc->res_pool->opps[i]->mpc_tree_params.opp_list->mpcc_bot->mpcc_id;

     if ((bot_id < MAX_MPCC) && (bot_id < MAX_PIPES) && (!tg_enabled[bot_id]))
      dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
    }
   }
  }
 }

 /* Power gate DSCs */
 if (hws->funcs.dsc_pg_control != NULL) {
  uint32_t num_opps = 0;
  uint32_t opp_id_src0 = OPP_ID_INVALID;
  uint32_t opp_id_src1 = OPP_ID_INVALID;

  // Step 1: To find out which OPTC is running & OPTC DSC is ON
  // We can't use res_pool->res_cap->num_timing_generator to check
  // Because it records display pipes default setting built in driver,
  // not display pipes of the current chip.
  // Some ASICs would be fused display pipes less than the default setting.
  // In dcnxx_resource_construct function, driver would obatin real information.
  for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
   uint32_t optc_dsc_state = 0;
   struct timing_generator *tg = dc->res_pool->timing_generators[i];

   if (tg->funcs->is_tg_enabled(tg)) {
    if (tg->funcs->get_dsc_status)
     tg->funcs->get_dsc_status(tg, &optc_dsc_state);
    // Only one OPTC with DSC is ON, so if we got one result, we would exit this block.
    // non-zero value is DSC enabled
    if (optc_dsc_state != 0) {
     tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1);
     break;
    }
   }
  }

  // Step 2: To power down DSC but skip DSC  of running OPTC
  for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) {
   struct dcn_dsc_state s  = {0};

   dc->res_pool->dscs[i]->funcs->dsc_read_state(dc->res_pool->dscs[i], &s);

   if ((s.dsc_opp_source == opp_id_src0 || s.dsc_opp_source == opp_id_src1) &&
    s.dsc_clock_en && s.dsc_fw_en)
    continue;

   hws->funcs.dsc_pg_control(hws, dc->res_pool->dscs[i]->inst, false);
  }
 }
}

void dcn10_init_hw(struct dc *dc)
{
 int i;
 struct abm *abm = dc->res_pool->abm;
 struct dmcu *dmcu = dc->res_pool->dmcu;
 struct dce_hwseq *hws = dc->hwseq;
 struct dc_bios *dcb = dc->ctx->dc_bios;
 struct resource_pool *res_pool = dc->res_pool;
 uint32_t backlight = MAX_BACKLIGHT_LEVEL;
 uint32_t user_level = MAX_BACKLIGHT_LEVEL;
 bool   is_optimized_init_done = false;

 if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
  dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);

 /* Align bw context with hw config when system resume. */
 if (dc->clk_mgr && dc->clk_mgr->clks.dispclk_khz != 0 && dc->clk_mgr->clks.dppclk_khz != 0) {
  dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz = dc->clk_mgr->clks.dispclk_khz;
  dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz = dc->clk_mgr->clks.dppclk_khz;
 }

 // Initialize the dccg
 if (dc->res_pool->dccg && dc->res_pool->dccg->funcs->dccg_init)
  dc->res_pool->dccg->funcs->dccg_init(res_pool->dccg);

 if (!dcb->funcs->is_accelerated_mode(dcb))
  hws->funcs.disable_vga(dc->hwseq);

 if (!dc_dmub_srv_optimized_init_done(dc->ctx->dmub_srv))
  hws->funcs.bios_golden_init(dc);


 if (dc->ctx->dc_bios->fw_info_valid) {
  res_pool->ref_clocks.xtalin_clock_inKhz =
    dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;

  if (res_pool->dccg && res_pool->hubbub) {

   (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
     dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
     &res_pool->ref_clocks.dccg_ref_clock_inKhz);

   (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
     res_pool->ref_clocks.dccg_ref_clock_inKhz,
     &res_pool->ref_clocks.dchub_ref_clock_inKhz);
  } else {
   // Not all ASICs have DCCG sw component
   res_pool->ref_clocks.dccg_ref_clock_inKhz =
     res_pool->ref_clocks.xtalin_clock_inKhz;
   res_pool->ref_clocks.dchub_ref_clock_inKhz =
     res_pool->ref_clocks.xtalin_clock_inKhz;
  }
 } else
  ASSERT_CRITICAL(false);

 for (i = 0; i < dc->link_count; i++) {
  /* Power up AND update implementation according to the
 * required signal (which may be different from the
 * default signal on connector).
 */

  struct dc_link *link = dc->links[i];

  if (!is_optimized_init_done)
   link->link_enc->funcs->hw_init(link->link_enc);

  /* Check for enabled DIG to identify enabled display */
  if (link->link_enc->funcs->is_dig_enabled &&
   link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
   link->link_status.link_active = true;
   if (link->link_enc->funcs->fec_is_active &&
     link->link_enc->funcs->fec_is_active(link->link_enc))
    link->fec_state = dc_link_fec_enabled;
  }
 }

 /* we want to turn off all dp displays before doing detection */
 dc->link_srv->blank_all_dp_displays(dc);

 if (hws->funcs.enable_power_gating_plane)
  hws->funcs.enable_power_gating_plane(dc->hwseq, true);

 /* If taking control over from VBIOS, we may want to optimize our first
 * mode set, so we need to skip powering down pipes until we know which
 * pipes we want to use.
 * Otherwise, if taking control is not possible, we need to power
 * everything down.
 */

 if (dcb->funcs->is_accelerated_mode(dcb) || !dc->config.seamless_boot_edp_requested) {
  if (!is_optimized_init_done) {
   hws->funcs.init_pipes(dc, dc->current_state);
   if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
    dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
      !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
  }
 }

 if (!is_optimized_init_done) {

  for (i = 0; i < res_pool->audio_count; i++) {
   struct audio *audio = res_pool->audios[i];

   audio->funcs->hw_init(audio);
  }

  for (i = 0; i < dc->link_count; i++) {
   struct dc_link *link = dc->links[i];

   if (link->panel_cntl) {
    backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
    user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL;
   }
  }

  if (abm != NULL)
   abm->funcs->abm_init(abm, backlight, user_level);

  if (dmcu != NULL && !dmcu->auto_load_dmcu)
   dmcu->funcs->dmcu_init(dmcu);
 }

 if (abm != NULL && dmcu != NULL)
  abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);

 /* power AFMT HDMI memory TODO: may move to dis/en output save power*/
 if (!is_optimized_init_done)
  REG_WRITE(DIO_MEM_PWR_CTRL, 0);

 if (!dc->debug.disable_clock_gate) {
  /* enable all DCN clock gating */
  REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);

  REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);

  REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
 }

 if (dc->clk_mgr && dc->clk_mgr->funcs->notify_wm_ranges)
  dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
}

/* In headless boot cases, DIG may be turned
 * on which causes HW/SW discrepancies.
 * To avoid this, power down hardware on boot
 * if DIG is turned on
 */

void dcn10_power_down_on_boot(struct dc *dc)
{
 struct dc_link *edp_links[MAX_NUM_EDP];
 struct dc_link *edp_link = NULL;
 int edp_num;
 int i = 0;

 dc_get_edp_links(dc, edp_links, &edp_num);
 if (edp_num)
  edp_link = edp_links[0];

 if (edp_link && edp_link->link_enc->funcs->is_dig_enabled &&
   edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
   dc->hwseq->funcs.edp_backlight_control &&
   dc->hwseq->funcs.power_down &&
   dc->hwss.edp_power_control) {
  dc->hwseq->funcs.edp_backlight_control(edp_link, false);
  dc->hwseq->funcs.power_down(dc);
  dc->hwss.edp_power_control(edp_link, false);
 } else {
  for (i = 0; i < dc->link_count; i++) {
   struct dc_link *link = dc->links[i];

   if (link->link_enc && link->link_enc->funcs->is_dig_enabled &&
     link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
     dc->hwseq->funcs.power_down) {
    dc->hwseq->funcs.power_down(dc);
    break;
   }

  }
 }

 /*
 * Call update_clocks with empty context
 * to send DISPLAY_OFF
 * Otherwise DISPLAY_OFF may not be asserted
 */

 if (dc->clk_mgr->funcs->set_low_power_state)
  dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr);
}

void dcn10_reset_hw_ctx_wrap(
  struct dc *dc,
  struct dc_state *context)
{
 int i;
 struct dce_hwseq *hws = dc->hwseq;

 /* Reset Back End*/
 for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
  struct pipe_ctx *pipe_ctx_old =
   &dc->current_state->res_ctx.pipe_ctx[i];
  struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];

  if (!pipe_ctx_old->stream)
   continue;

  if (pipe_ctx_old->top_pipe)
   continue;

  if (!pipe_ctx->stream ||
    pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
   struct clock_source *old_clk = pipe_ctx_old->clock_source;

   dcn10_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
   if (hws->funcs.enable_stream_gating)
    hws->funcs.enable_stream_gating(dc, pipe_ctx_old);
   if (old_clk)
    old_clk->funcs->cs_power_down(old_clk);
  }
 }
}

static bool patch_address_for_sbs_tb_stereo(
  struct pipe_ctx *pipe_ctx, PHYSICAL_ADDRESS_LOC *addr)
{
 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
 bool sec_split = pipe_ctx->top_pipe &&
   pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
 if (sec_split && plane_state->address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
  (pipe_ctx->stream->timing.timing_3d_format ==
   TIMING_3D_FORMAT_SIDE_BY_SIDE ||
   pipe_ctx->stream->timing.timing_3d_format ==
   TIMING_3D_FORMAT_TOP_AND_BOTTOM)) {
  *addr = plane_state->address.grph_stereo.left_addr;
  plane_state->address.grph_stereo.left_addr =
  plane_state->address.grph_stereo.right_addr;
  return true;
 } else {
  if (pipe_ctx->stream->view_format != VIEW_3D_FORMAT_NONE &&
   plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO) {
   plane_state->address.type = PLN_ADDR_TYPE_GRPH_STEREO;
   plane_state->address.grph_stereo.right_addr =
   plane_state->address.grph_stereo.left_addr;
   plane_state->address.grph_stereo.right_meta_addr =
   plane_state->address.grph_stereo.left_meta_addr;
  }
 }
 return false;
}

void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
{
 bool addr_patched = false;
 PHYSICAL_ADDRESS_LOC addr;
 struct dc_plane_state *plane_state = pipe_ctx->plane_state;

 if (plane_state == NULL)
  return;

 addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr);

 pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr(
   pipe_ctx->plane_res.hubp,
   &plane_state->address,
   plane_state->flip_immediate);

 plane_state->status.requested_address = plane_state->address;

 if (plane_state->flip_immediate)
  plane_state->status.current_address = plane_state->address;

 if (addr_patched)
  pipe_ctx->plane_state->address.grph_stereo.left_addr = addr;
}

bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
   const struct dc_plane_state *plane_state)
{
--> --------------------

--> maximum size reached

--> --------------------

Messung V0.5
C=96 H=91 G=93

¤ Dauer der Verarbeitung: 0.23 Sekunden  (vorverarbeitet)  ¤

*© Formatika GbR, Deutschland






Wurzel

Suchen

Beweissystem der NASA

Beweissystem Isabelle

NIST Cobol Testsuite

Cephes Mathematical Library

Wiener Entwicklungsmethode

Haftungshinweis

Die Informationen auf dieser Webseite wurden nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit, noch Qualität der bereit gestellten Informationen zugesichert.

Bemerkung:

Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.