/* * Copyright 2016 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD *
*/
/* * On pipe unlock and programming, indicate pipe will be busy * until some frame and line (vupdate), this is required for consecutive * full updates, need to wait for updates * to latch to try and program the next update
*/ void dcn10_set_wait_for_update_needed_for_pipe(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
uint32_t vupdate_start, vupdate_end; struct crtc_position position; unsignedint vpos, cur_frame;
if (!pipe_ctx->stream ||
!pipe_ctx->stream_res.tg ||
!pipe_ctx->stream_res.stream_enc) return;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
pipe_ctx = &context->res_ctx.pipe_ctx[i];
tg = pipe_ctx->stream_res.tg;
/* * Only lock the top pipe's tg to prevent redundant * (un)locking. Also skip if pipe is disabled.
*/ if (pipe_ctx->top_pipe ||
!pipe_ctx->stream ||
(!pipe_ctx->plane_state && !old_pipe_ctx->plane_state) ||
!tg->funcs->is_tg_enabled(tg) ||
dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_PHANTOM) continue;
for (i = 0; i < pool->timing_generator_count; i++) { struct timing_generator *tg = pool->timing_generators[i]; struct dcn_otg_state s = {0}; /* Read shared OTG state registers for all DCNx */ if (tg->funcs->read_otg_state)
tg->funcs->read_otg_state(tg, &s);
/* * For DCN2 and greater, a register on the OPP is used to * determine if the CRTC is blanked instead of the OTG. So use * dpg_is_blanked() if exists, otherwise fallback on otg. * * TODO: Implement DCN-specific read_otg_state hooks.
*/ if (pool->opps[i]->funcs->dpg_is_blanked)
s.blank_enabled = pool->opps[i]->funcs->dpg_is_blanked(pool->opps[i]); else
s.blank_enabled = tg->funcs->is_blanked(tg);
//only print if OTG master is enabled if ((s.otg_enabled & 1) == 0) continue;
// Clear underflow for debug purposes // We want to keep underflow sticky bit on for the longevity tests outside of test environment. // This function is called only from Windows or Diags test environment, hence it's safe to clear // it from here without affecting the original intent.
tg->funcs->clear_optc_underflow(tg);
}
DTN_INFO("\n");
// dcn_dsc_state struct field bytes_per_pixel was renamed to bits_per_pixel // TODO: Update golden log header to reflect this name change
DTN_INFO("DSC: CLOCK_EN SLICE_WIDTH Bytes_pp\n"); for (i = 0; i < pool->res_cap->num_dsc; i++) { struct display_stream_compressor *dsc = pool->dscs[i]; struct dcn_dsc_state s = {0};
/* HW Engineer's Notes: * During switch from vga->extended, if we set the VGA_TEST_ENABLE and * then hit the VGA_TEST_RENDER_START, then the DCHUBP timing gets updated correctly. * * Then vBIOS will have it poll for the VGA_TEST_RENDER_DONE and unset * VGA_TEST_ENABLE, to leave it in the same state as before.
*/
REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_ENABLE, 1);
REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1);
}
/** * dcn10_dpp_pg_control - DPP power gate control. * * @hws: dce_hwseq reference. * @dpp_inst: DPP instance reference. * @power_on: true if we want to enable power gate, false otherwise. * * Enable or disable power gate in the specific DPP instance.
*/ void dcn10_dpp_pg_control( struct dce_hwseq *hws, unsignedint dpp_inst, bool power_on)
{
uint32_t power_gate = power_on ? 0 : 1;
uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF;
if (hws->ctx->dc->debug.disable_dpp_power_gate) return; if (REG(DOMAIN1_PG_CONFIG) == 0) return;
if (hws->funcs.s0i3_golden_init_wa && hws->funcs.s0i3_golden_init_wa(dc)) return;
if (dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled)
allow_self_fresh_force_enable =
dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub);
/* WA for making DF sleep when idle after resume from S0i3. * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE is set to 1 by * command table, if DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0 * before calling command table and it changed to 1 after, * it should be set back to 0.
*/
/* initialize dcn global */
bp->funcs->enable_disp_power_gating(bp,
CONTROLLER_ID_D0, ASIC_PIPE_INIT);
for (i = 0; i < dc->res_pool->pipe_count; i++) { /* initialize dcn per pipe */
bp->funcs->enable_disp_power_gating(bp,
CONTROLLER_ID_D0 + i, ASIC_PIPE_DISABLE);
}
if (dc->res_pool->hubbub->funcs->allow_self_refresh_control) if (allow_self_fresh_force_enable == false &&
dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub))
dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
}
staticvoid false_optc_underflow_wa( struct dc *dc, conststruct dc_stream_state *stream, struct timing_generator *tg)
{ int i; bool underflow;
/* by upper caller loop, pipe0 is parent pipe and be called first. * back end is set up by for pipe0. Other children pipe share back end * with pipe 0. No program is needed.
*/ if (pipe_ctx->top_pipe != NULL) return DC_OK;
/* TODO check if timing_changed, disable stream if timing changed */
/* HW program guide assume display already disable * by unplug sequence. OTG assume stop.
*/
pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, true);
pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
pipe_ctx->stream_res.opp,
&stream->bit_depth_params,
&stream->clamping); #endif /* program otg blank color */
color_space = stream->output_color_space;
color_space_to_black_color(dc, color_space, &black_color);
/* * The way 420 is packed, 2 channels carry Y component, 1 channel * alternate between Cb and Cr, so both channels need the pixel * value for Y
*/ if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
black_color.color_r_cr = black_color.color_g_y;
if (pipe_ctx->stream_res.tg->funcs->set_blank_color)
pipe_ctx->stream_res.tg->funcs->set_blank_color(
pipe_ctx->stream_res.tg,
&black_color);
/* VTG is within DCHUB command block. DCFCLK is always on */ if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) {
BREAK_TO_DEBUGGER(); return DC_ERROR_UNEXPECTED;
}
/* TODO program crtc source select for non-virtual signal*/ /* TODO program FMT */ /* TODO setup link_enc */ /* TODO set stream attributes */ /* TODO program audio */ /* TODO enable stream if timing changed */ /* TODO unblank stream if DP */
return DC_OK;
}
staticvoid dcn10_reset_back_end_for_pipe( struct dc *dc, struct pipe_ctx *pipe_ctx, struct dc_state *context)
{ int i; struct dc_link *link;
DC_LOGGER_INIT(dc->ctx->logger); if (pipe_ctx->stream_res.stream_enc == NULL) {
pipe_ctx->stream = NULL; return;
}
link = pipe_ctx->stream->link; /* DPMS may already disable or */ /* dpms_off status is incorrect due to fastboot * feature. When system resume from S4 with second * screen only, the dpms_off would be true but * VBIOS lit up eDP, so check link status too.
*/ if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
dc->link_srv->set_dpms_off(pipe_ctx); elseif (pipe_ctx->stream_res.audio)
dc->hwss.disable_audio_stream(pipe_ctx);
if (pipe_ctx->stream_res.audio) { /*disable az_endpoint*/
pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
/*free audio*/ if (dc->caps.dynamic_audio == true) { /*we have to dynamic arbitrate the audio endpoints*/ /*we free the resource, need reset is_audio_acquired*/
update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
pipe_ctx->stream_res.audio, false);
pipe_ctx->stream_res.audio = NULL;
}
}
/* by upper caller loop, parent pipe: pipe0, will be reset last. * back end share by all pipes and will be disable only when disable * parent pipe.
*/ if (pipe_ctx->top_pipe == NULL) {
if (pipe_ctx->stream_res.abm)
dc->hwss.set_abm_immediate_disable(pipe_ctx);
for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx =
&dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx != NULL) {
hubp = pipe_ctx->plane_res.hubp; /*DCHUBP_CNTL:HUBP_BLANK_EN=1*/ if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
hubp->funcs->set_hubp_blank_en(hubp, true);
}
} /*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1*/
hubbub1_soft_reset(dc->res_pool->hubbub, true);
for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx =
&dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx != NULL) {
hubp = pipe_ctx->plane_res.hubp; /*DCHUBP_CNTL:HUBP_DISABLE=1*/ if (hubp != NULL && hubp->funcs->hubp_disable_control)
hubp->funcs->hubp_disable_control(hubp, true);
}
} for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx =
&dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx != NULL) {
hubp = pipe_ctx->plane_res.hubp; /*DCHUBP_CNTL:HUBP_DISABLE=0*/ if (hubp != NULL && hubp->funcs->hubp_disable_control)
hubp->funcs->hubp_disable_control(hubp, true);
}
} /*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0*/
hubbub1_soft_reset(dc->res_pool->hubbub, false); for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx =
&dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx != NULL) {
hubp = pipe_ctx->plane_res.hubp; /*DCHUBP_CNTL:HUBP_BLANK_EN=0*/ if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
hubp->funcs->set_hubp_blank_en(hubp, true);
}
} returntrue;
}
void dcn10_verify_allow_pstate_change_high(struct dc *dc)
{ struct hubbub *hubbub = dc->res_pool->hubbub; staticbool should_log_hw_state; /* prevent hw state log by default */
if (!hubbub->funcs->verify_allow_pstate_change_high) return;
if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub)) { int i = 0;
if (should_log_hw_state)
dcn10_log_hw_state(dc, NULL);
TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
BREAK_TO_DEBUGGER(); if (dcn10_hw_wa_force_recovery(dc)) { /*check again*/ if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub))
BREAK_TO_DEBUGGER();
}
}
}
/* trigger HW to start disconnect plane from stream on the next vsync */ void dcn10_plane_atomic_disconnect(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx)
{ struct dce_hwseq *hws = dc->hwseq; struct hubp *hubp = pipe_ctx->plane_res.hubp; int dpp_id = pipe_ctx->plane_res.dpp->inst; struct mpc *mpc = dc->res_pool->mpc; struct mpc_tree *mpc_tree_params; struct mpcc *mpcc_to_remove = NULL; struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
/*Already reset*/ if (mpcc_to_remove == NULL) return;
mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove); // Phantom pipes have OTG disabled by default, so MPCC_STATUS will never assert idle, // so don't wait for MPCC_IDLE in the programming sequence if (dc_state_get_pipe_subvp_type(state, pipe_ctx) != SUBVP_PHANTOM)
opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
dc->optimized_required = true;
if (hubp->funcs->hubp_disconnect)
hubp->funcs->hubp_disconnect(hubp);
if (dc->debug.sanity_checks)
hws->funcs.verify_allow_pstate_change_high(dc);
}
/** * dcn10_plane_atomic_power_down - Power down plane components. * * @dc: dc struct reference. used for grab hwseq. * @dpp: dpp struct reference. * @hubp: hubp struct reference. * * Keep in mind that this operation requires a power gate configuration; * however, requests for switch power gate are precisely controlled to avoid * problems. For this reason, power gate request is usually disabled. This * function first needs to enable the power gate request before disabling DPP * and HUBP. Finally, it disables the power gate request again.
*/ void dcn10_plane_atomic_power_down(struct dc *dc, struct dpp *dpp, struct hubp *hubp)
{ struct dce_hwseq *hws = dc->hwseq;
DC_LOGGER_INIT(dc->ctx->logger);
if (REG(DC_IP_REQUEST_CNTL)) {
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 1);
if (hws->funcs.dpp_pg_control)
hws->funcs.dpp_pg_control(hws, dpp->inst, false);
if (hws->funcs.hubp_pg_control)
hws->funcs.hubp_pg_control(hws, hubp->inst, false);
for (i = 0; i < context->stream_count; i++) { if (context->streams[i]->apply_seamless_boot_optimization) {
can_apply_seamless_boot = true; break;
}
}
for (i = 0; i < dc->res_pool->pipe_count; i++) { struct timing_generator *tg = dc->res_pool->timing_generators[i]; struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
/* There is assumption that pipe_ctx is not mapping irregularly * to non-preferred front end. If pipe_ctx->stream is not NULL, * we will use the pipe, so don't disable
*/ if (pipe_ctx->stream != NULL && can_apply_seamless_boot) continue;
/* Blank controller using driver code instead of * command table.
*/ if (tg->funcs->is_tg_enabled(tg)) { if (hws->funcs.init_blank != NULL) {
hws->funcs.init_blank(dc, tg);
tg->funcs->lock(tg);
} else {
tg->funcs->lock(tg);
tg->funcs->set_blank(tg, true);
hwss_wait_for_blank_complete(tg);
}
}
}
/* Reset det size */ for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; struct hubp *hubp = dc->res_pool->hubps[i];
/* Do not need to reset for seamless boot */ if (pipe_ctx->stream != NULL && can_apply_seamless_boot) continue;
if (hubbub && hubp) { if (hubbub->funcs->program_det_size)
hubbub->funcs->program_det_size(hubbub, hubp->inst, 0); if (hubbub->funcs->program_det_segments)
hubbub->funcs->program_det_segments(hubbub, hubp->inst, 0);
}
}
/* num_opp will be equal to number of mpcc */ for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
/* Cannot reset the MPC mux if seamless boot */ if (pipe_ctx->stream != NULL && can_apply_seamless_boot) continue;
for (i = 0; i < dc->res_pool->pipe_count; i++) { struct timing_generator *tg = dc->res_pool->timing_generators[i]; struct hubp *hubp = dc->res_pool->hubps[i]; struct dpp *dpp = dc->res_pool->dpps[i]; struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
/* There is assumption that pipe_ctx is not mapping irregularly * to non-preferred front end. If pipe_ctx->stream is not NULL, * we will use the pipe, so don't disable
*/ if (can_apply_seamless_boot &&
pipe_ctx->stream != NULL &&
pipe_ctx->stream_res.tg->funcs->is_tg_enabled(
pipe_ctx->stream_res.tg)) { // Enable double buffering for OTG_BLANK no matter if // seamless boot is enabled or not to suppress global sync // signals when OTG blanked. This is to prevent pipe from // requesting data while in PSR.
tg->funcs->tg_init(tg);
hubp->power_gated = true;
tg_enabled[i] = true; continue;
}
/* Disable on the current state so the new one isn't cleared. */
pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
if (tg->funcs->is_tg_enabled(tg)) { if (tg->funcs->init_odm)
tg->funcs->init_odm(tg);
}
tg->funcs->tg_init(tg);
}
/* Clean up MPC tree */ for (i = 0; i < dc->res_pool->pipe_count; i++) { if (tg_enabled[i]) { if (dc->res_pool->opps[i]->mpc_tree_params.opp_list) { if (dc->res_pool->opps[i]->mpc_tree_params.opp_list->mpcc_bot) { int bot_id = dc->res_pool->opps[i]->mpc_tree_params.opp_list->mpcc_bot->mpcc_id;
// Step 1: To find out which OPTC is running & OPTC DSC is ON // We can't use res_pool->res_cap->num_timing_generator to check // Because it records display pipes default setting built in driver, // not display pipes of the current chip. // Some ASICs would be fused display pipes less than the default setting. // In dcnxx_resource_construct function, driver would obatin real information. for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
uint32_t optc_dsc_state = 0; struct timing_generator *tg = dc->res_pool->timing_generators[i];
if (tg->funcs->is_tg_enabled(tg)) { if (tg->funcs->get_dsc_status)
tg->funcs->get_dsc_status(tg, &optc_dsc_state); // Only one OPTC with DSC is ON, so if we got one result, we would exit this block. // non-zero value is DSC enabled if (optc_dsc_state != 0) {
tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1); break;
}
}
}
// Step 2: To power down DSC but skip DSC of running OPTC for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) { struct dcn_dsc_state s = {0};
(res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
res_pool->ref_clocks.dccg_ref_clock_inKhz,
&res_pool->ref_clocks.dchub_ref_clock_inKhz);
} else { // Not all ASICs have DCCG sw component
res_pool->ref_clocks.dccg_ref_clock_inKhz =
res_pool->ref_clocks.xtalin_clock_inKhz;
res_pool->ref_clocks.dchub_ref_clock_inKhz =
res_pool->ref_clocks.xtalin_clock_inKhz;
}
} else
ASSERT_CRITICAL(false);
for (i = 0; i < dc->link_count; i++) { /* Power up AND update implementation according to the * required signal (which may be different from the * default signal on connector).
*/ struct dc_link *link = dc->links[i];
if (!is_optimized_init_done)
link->link_enc->funcs->hw_init(link->link_enc);
/* Check for enabled DIG to identify enabled display */ if (link->link_enc->funcs->is_dig_enabled &&
link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
link->link_status.link_active = true; if (link->link_enc->funcs->fec_is_active &&
link->link_enc->funcs->fec_is_active(link->link_enc))
link->fec_state = dc_link_fec_enabled;
}
}
/* we want to turn off all dp displays before doing detection */
dc->link_srv->blank_all_dp_displays(dc);
if (hws->funcs.enable_power_gating_plane)
hws->funcs.enable_power_gating_plane(dc->hwseq, true);
/* If taking control over from VBIOS, we may want to optimize our first * mode set, so we need to skip powering down pipes until we know which * pipes we want to use. * Otherwise, if taking control is not possible, we need to power * everything down.
*/ if (dcb->funcs->is_accelerated_mode(dcb) || !dc->config.seamless_boot_edp_requested) { if (!is_optimized_init_done) {
hws->funcs.init_pipes(dc, dc->current_state); if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
}
}
if (!is_optimized_init_done) {
for (i = 0; i < res_pool->audio_count; i++) { struct audio *audio = res_pool->audios[i];
audio->funcs->hw_init(audio);
}
for (i = 0; i < dc->link_count; i++) { struct dc_link *link = dc->links[i];
/* power AFMT HDMI memory TODO: may move to dis/en output save power*/ if (!is_optimized_init_done)
REG_WRITE(DIO_MEM_PWR_CTRL, 0);
if (!dc->debug.disable_clock_gate) { /* enable all DCN clock gating */
REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
}
if (dc->clk_mgr && dc->clk_mgr->funcs->notify_wm_ranges)
dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
}
/* In headless boot cases, DIG may be turned * on which causes HW/SW discrepancies. * To avoid this, power down hardware on boot * if DIG is turned on
*/ void dcn10_power_down_on_boot(struct dc *dc)
{ struct dc_link *edp_links[MAX_NUM_EDP]; struct dc_link *edp_link = NULL; int edp_num; int i = 0;
dc_get_edp_links(dc, edp_links, &edp_num); if (edp_num)
edp_link = edp_links[0];
if (edp_link && edp_link->link_enc->funcs->is_dig_enabled &&
edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
dc->hwseq->funcs.edp_backlight_control &&
dc->hwseq->funcs.power_down &&
dc->hwss.edp_power_control) {
dc->hwseq->funcs.edp_backlight_control(edp_link, false);
dc->hwseq->funcs.power_down(dc);
dc->hwss.edp_power_control(edp_link, false);
} else { for (i = 0; i < dc->link_count; i++) { struct dc_link *link = dc->links[i];
/* * Call update_clocks with empty context * to send DISPLAY_OFF * Otherwise DISPLAY_OFF may not be asserted
*/ if (dc->clk_mgr->funcs->set_low_power_state)
dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr);
}
void dcn10_reset_hw_ctx_wrap( struct dc *dc, struct dc_state *context)
{ int i; struct dce_hwseq *hws = dc->hwseq;
/* Reset Back End*/ for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) { struct pipe_ctx *pipe_ctx_old =
&dc->current_state->res_ctx.pipe_ctx[i]; struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.