void dcn401_initialize_min_clocks(struct dc *dc)
{ struct dc_clocks *clocks = &dc->current_state->bw_ctx.bw.dcn.clk;
clocks->dcfclk_deep_sleep_khz = DCN3_2_DCFCLK_DS_INIT_KHZ;
clocks->dcfclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz * 1000;
clocks->socclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].socclk_mhz * 1000;
clocks->dramclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].memclk_mhz * 1000;
clocks->dppclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dppclk_mhz * 1000; if (dc->debug.disable_boot_optimizations) {
clocks->dispclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dispclk_mhz * 1000;
} else { /* Even though DPG_EN = 1 for the connected display, it still requires the * correct timing so we cannot set DISPCLK to min freq or it could cause * audio corruption. Read current DISPCLK from DENTIST and request the same * freq to ensure that the timing is valid and unchanged.
*/
clocks->dispclk_khz = dc->clk_mgr->funcs->get_dispclk_from_dentist(dc->clk_mgr);
}
clocks->ref_dtbclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dtbclk_mhz * 1000;
clocks->fclk_p_state_change_support = true;
clocks->p_state_change_support = true;
//For now assert if location is not pre-blend if (pipe_ctx->plane_state)
ASSERT(pipe_ctx->plane_state->mcm_location == MPCC_MOVABLE_CM_LOCATION_BEFORE);
// program MPCC_MCM_SECOND_GAMUT_REMAP for Bypass / Disable for now
mpc_adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
mpc_adjust.mpcc_gamut_remap_block_id = MPCC_MCM_SECOND_GAMUT_REMAP;
// program MPCC_OGAM_GAMUT_REMAP same as is currently used on DCN3x
memset(&mpc_adjust, 0, sizeof(mpc_adjust));
mpc_adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
mpc_adjust.mpcc_gamut_remap_block_id = MPCC_OGAM_GAMUT_REMAP;
if (pipe_ctx->top_pipe == NULL) { if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
mpc_adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW; for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
mpc_adjust.temperature_matrix[i] =
pipe_ctx->stream->gamut_remap_matrix.matrix[i];
}
}
if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->init_clocks) {
dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
// mark dcmode limits present if any clock has distinct AC and DC values from SMU
dc->caps.dcmode_power_limits_present = dc->clk_mgr->funcs->is_dc_mode_present &&
dc->clk_mgr->funcs->is_dc_mode_present(dc->clk_mgr);
}
// Initialize the dccg if (res_pool->dccg->funcs->dccg_init)
res_pool->dccg->funcs->dccg_init(res_pool->dccg);
// Disable DMUB Initialization until IPS state programming is finalized //if (!dcb->funcs->is_accelerated_mode(dcb)) { // hws->funcs.bios_golden_init(dc); //}
// Set default OPTC memory power states if (dc->debug.enable_mem_low_power.bits.optc) { // Shutdown when unassigned and light sleep in VBLANK
REG_SET_2(ODM_MEM_PWR_CTRL3, 0, ODM_MEM_UNASSIGNED_PWR_MODE, 3, ODM_MEM_VBLANK_PWR_MODE, 1);
}
if (dc->debug.enable_mem_low_power.bits.vga) { // Power down VGA memory
REG_UPDATE(MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, 1);
}
if (dc->ctx->dc_bios->fw_info_valid) {
res_pool->ref_clocks.xtalin_clock_inKhz =
dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
if (res_pool->hubbub) {
(res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
&res_pool->ref_clocks.dccg_ref_clock_inKhz);
(res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
res_pool->ref_clocks.dccg_ref_clock_inKhz,
&res_pool->ref_clocks.dchub_ref_clock_inKhz);
} else { // Not all ASICs have DCCG sw component
res_pool->ref_clocks.dccg_ref_clock_inKhz =
res_pool->ref_clocks.xtalin_clock_inKhz;
res_pool->ref_clocks.dchub_ref_clock_inKhz =
res_pool->ref_clocks.xtalin_clock_inKhz;
}
} else
ASSERT_CRITICAL(false);
for (i = 0; i < dc->link_count; i++) { /* Power up AND update implementation according to the * required signal (which may be different from the * default signal on connector).
*/ struct dc_link *link = dc->links[i];
if (link->ep_type != DISPLAY_ENDPOINT_PHY) continue;
link->link_enc->funcs->hw_init(link->link_enc);
/* Check for enabled DIG to identify enabled display */ if (link->link_enc->funcs->is_dig_enabled &&
link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
link->link_status.link_active = true;
link->phy_state.symclk_state = SYMCLK_ON_TX_ON; if (link->link_enc->funcs->fec_is_active &&
link->link_enc->funcs->fec_is_active(link->link_enc))
link->fec_state = dc_link_fec_enabled;
}
}
/* enable_power_gating_plane before dsc_pg_control because * FORCEON = 1 with hw default value on bootup, resume from s3
*/ if (hws->funcs.enable_power_gating_plane)
hws->funcs.enable_power_gating_plane(dc->hwseq, true);
/* we want to turn off all dp displays before doing detection */
dc->link_srv->blank_all_dp_displays(dc);
/* If taking control over from VBIOS, we may want to optimize our first * mode set, so we need to skip powering down pipes until we know which * pipes we want to use. * Otherwise, if taking control is not possible, we need to power * everything down.
*/ if (dcb->funcs->is_accelerated_mode(dcb) || !dc->config.seamless_boot_edp_requested) { /* Disable boot optimizations means power down everything including PHY, DIG, * and OTG (i.e. the boot is not optimized because we do a full power down).
*/ if (dc->hwss.enable_accelerated_mode && dc->debug.disable_boot_optimizations)
dc->hwss.enable_accelerated_mode(dc, dc->current_state); else
hws->funcs.init_pipes(dc, dc->current_state);
if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
dcn401_initialize_min_clocks(dc);
/* On HW init, allow idle optimizations after pipes have been turned off. * * In certain D3 cases (i.e. BOCO / BOMACO) it's possible that hardware state * is reset (i.e. not in idle at the time hw init is called), but software state * still has idle_optimizations = true, so we must disable idle optimizations first * (i.e. set false), then re-enable (set true).
*/
dc_allow_idle_optimizations(dc, false);
dc_allow_idle_optimizations(dc, true);
}
/* In headless boot cases, DIG may be turned * on which causes HW/SW discrepancies. * To avoid this, power down hardware on boot * if DIG is turned on and seamless boot not enabled
*/ if (!dc->config.seamless_boot_edp_requested) { struct dc_link *edp_links[MAX_NUM_EDP]; struct dc_link *edp_link;
dc_get_edp_links(dc, edp_links, &edp_num); if (edp_num) { for (i = 0; i < edp_num; i++) {
edp_link = edp_links[i]; if (edp_link->link_enc->funcs->is_dig_enabled &&
edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
dc->hwss.edp_backlight_control &&
hws->funcs.power_down &&
dc->hwss.edp_power_control) {
dc->hwss.edp_backlight_control(edp_link, false);
hws->funcs.power_down(dc);
dc->hwss.edp_power_control(edp_link, false);
}
}
} else { for (i = 0; i < dc->link_count; i++) { struct dc_link *link = dc->links[i];
if (link->link_enc->funcs->is_dig_enabled &&
link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
hws->funcs.power_down) {
hws->funcs.power_down(dc); break;
}
}
}
}
for (i = 0; i < res_pool->audio_count; i++) { struct audio *audio = res_pool->audios[i];
audio->funcs->hw_init(audio);
}
for (i = 0; i < dc->link_count; i++) { struct dc_link *link = dc->links[i];
for (i = 0; i < dc->res_pool->pipe_count; i++) { if (abms[i] != NULL && abms[i]->funcs != NULL)
abms[i]->funcs->abm_init(abms[i], backlight, user_level);
}
/* power AFMT HDMI memory TODO: may move to dis/en output save power*/
REG_WRITE(DIO_MEM_PWR_CTRL, 0);
if (!dc->debug.disable_clock_gate) { /* enable all DCN clock gating */
REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
}
dcn401_setup_hpo_hw_control(hws, true);
if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks)
dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub);
if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->notify_wm_ranges)
dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
dc->res_pool->hubbub->funcs->force_pstate_change_control(
dc->res_pool->hubbub, false, false);
if (dc->res_pool->hubbub->funcs->init_crb)
dc->res_pool->hubbub->funcs->init_crb(dc->res_pool->hubbub);
if (dc->res_pool->hubbub->funcs->set_request_limit && dc->config.sdpif_request_limit_words_per_umc > 0)
dc->res_pool->hubbub->funcs->set_request_limit(dc->res_pool->hubbub, dc->ctx->dc_bios->vram_info.num_chans, dc->config.sdpif_request_limit_words_per_umc);
// Get DMCUB capabilities if (dc->ctx->dmub_srv) {
dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv);
dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr;
dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver > 0;
dc->caps.dmub_caps.fams_ver = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver;
dc->debug.fams2_config.bits.enable &=
dc->caps.dmub_caps.fams_ver == dc->debug.fams_version.ver; // sw & fw fams versions must match for support if ((!dc->debug.fams2_config.bits.enable && dc->res_pool->funcs->update_bw_bounding_box)
|| res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000 != current_dchub_ref_freq) { /* update bounding box if FAMS2 disabled, or if dchub clk has changed */ if (dc->clk_mgr)
dc->res_pool->funcs->update_bw_bounding_box(dc,
dc->clk_mgr->bw_params);
}
}
}
if (hubp->funcs->hubp_program_3dlut_fl_addressing_mode)
hubp->funcs->hubp_program_3dlut_fl_addressing_mode(hubp, addr_mode);
switch (mcm_luts.lut3d_data.gpu_mem_params.format_params.format) { case DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12MSB:
format = hubp_3dlut_fl_format_unorm_12msb_bitslice; break; case DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12LSB:
format = hubp_3dlut_fl_format_unorm_12lsb_bitslice; break; case DC_CM2_GPU_MEM_FORMAT_16161616_FLOAT_FP1_5_10:
format = hubp_3dlut_fl_format_float_fp1_5_10; break;
} if (hubp->funcs->hubp_program_3dlut_fl_format)
hubp->funcs->hubp_program_3dlut_fl_format(hubp, format); if (hubp->funcs->hubp_update_3dlut_fl_bias_scale &&
mpc->funcs->mcm.program_bias_scale) {
mpc->funcs->mcm.program_bias_scale(mpc,
mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.bias,
mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.scale,
mpcc_id);
hubp->funcs->hubp_update_3dlut_fl_bias_scale(hubp,
mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.bias,
mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.scale);
}
//navi 4x has a bug and r and blue are swapped and need to be worked around here in //TODO: need to make a method for get_xbar per asic OR do the workaround in program_crossbar for 4x switch (mcm_luts.lut3d_data.gpu_mem_params.component_order) { case DC_CM2_GPU_MEM_PIXEL_COMPONENT_ORDER_RGBA: default:
crossbar_bit_slice_cr_r = hubp_3dlut_fl_crossbar_bit_slice_0_15;
crossbar_bit_slice_y_g = hubp_3dlut_fl_crossbar_bit_slice_16_31;
crossbar_bit_slice_cb_b = hubp_3dlut_fl_crossbar_bit_slice_32_47; break;
}
if (hubp->funcs->hubp_program_3dlut_fl_crossbar)
hubp->funcs->hubp_program_3dlut_fl_crossbar(hubp,
crossbar_bit_slice_cr_r,
crossbar_bit_slice_y_g,
crossbar_bit_slice_cb_b);
if (mpc->funcs->mcm.program_lut_read_write_control)
mpc->funcs->mcm.program_lut_read_write_control(mpc, MCM_LUT_3DLUT, lut_bank_a, true, mpcc_id);
if (mpc->funcs->mcm.program_3dlut_size)
mpc->funcs->mcm.program_3dlut_size(mpc, width, mpcc_id);
if (mpc->funcs->update_3dlut_fast_load_select)
mpc->funcs->update_3dlut_fast_load_select(mpc, mpcc_id, hubp->inst);
// 3D if (mpc->funcs->program_3dlut) { if (plane_state->lut3d_func.state.bits.initialized == 1)
result &= mpc->funcs->program_3dlut(mpc, &plane_state->lut3d_func.lut_3d, mpcc_id); else
result &= mpc->funcs->program_3dlut(mpc, NULL, mpcc_id);
}
return result;
}
bool dcn401_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, conststruct dc_stream_state *stream)
{ int mpcc_id = pipe_ctx->plane_res.hubp->inst; struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc; conststruct pwl_params *params = NULL; bool ret = false;
/* program OGAM or 3DLUT only for the top pipe*/ if (resource_is_pipe_type(pipe_ctx, OPP_HEAD)) { /*program shaper and 3dlut in MPC*/
ret = dcn32_set_mpc_shaper_3dlut(pipe_ctx, stream); if (ret == false && mpc->funcs->set_output_gamma) { if (stream->out_transfer_func.type == TF_TYPE_HWPWL)
params = &stream->out_transfer_func.pwl; elseif (pipe_ctx->stream->out_transfer_func.type ==
TF_TYPE_DISTRIBUTED_POINTS &&
cm3_helper_translate_curve_to_hw_format(
&stream->out_transfer_func,
&mpc->blender_params, false))
params = &mpc->blender_params; /* there are no ROM LUTs in OUTGAM */ if (stream->out_transfer_func.type == TF_TYPE_PREDEFINED)
BREAK_TO_DEBUGGER();
}
}
if (mpc->funcs->set_output_gamma)
mpc->funcs->set_output_gamma(mpc, mpcc_id, params);
if (dc->hwseq->funcs.PLAT_58856_wa && (!dc_is_dp_signal(stream->signal)))
dc->hwseq->funcs.PLAT_58856_wa(context, pipe_ctx);
/* if we are borrowing from hblank, h_addressable needs to be adjusted */ if (dc->debug.enable_hblank_borrow)
patched_crtc_timing.h_addressable = patched_crtc_timing.h_addressable + pipe_ctx->hblank_borrow;
/* VTG is within DCHUB command block. DCFCLK is always on */ if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) {
BREAK_TO_DEBUGGER(); return DC_ERROR_UNEXPECTED;
}
/* Event triggers and num frames initialized for DRR, but can be * later updated for PSR use. Note DRR trigger events are generated * regardless of whether num frames met.
*/ if (pipe_ctx->stream_res.tg->funcs->set_static_screen_control)
pipe_ctx->stream_res.tg->funcs->set_static_screen_control(
pipe_ctx->stream_res.tg, event_triggers, 2);
/* TODO program crtc source select for non-virtual signal*/ /* TODO program FMT */ /* TODO setup link_enc */ /* TODO set stream attributes */ /* TODO program audio */ /* TODO enable stream if timing changed */ /* TODO unblank stream if DP */
if (dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_PHANTOM) { if (pipe_ctx->stream_res.tg->funcs->phantom_crtc_post_enable)
pipe_ctx->stream_res.tg->funcs->phantom_crtc_post_enable(pipe_ctx->stream_res.tg);
}
return DC_OK;
}
staticenum phyd32clk_clock_source get_phyd32clk_src(struct dc_link *link)
{ switch (link->link_enc->transmitter) { case TRANSMITTER_UNIPHY_A: return PHYD32CLKA; case TRANSMITTER_UNIPHY_B: return PHYD32CLKB; case TRANSMITTER_UNIPHY_C: return PHYD32CLKC; case TRANSMITTER_UNIPHY_D: return PHYD32CLKD; case TRANSMITTER_UNIPHY_E: return PHYD32CLKE; default: return PHYD32CLKA;
}
}
if (dc_is_tmds_signal(pipe_ctx->stream->signal))
dcn401_calculate_dccg_tmds_div_value(pipe_ctx, tmds_div); else
*tmds_div = PIXEL_RATE_DIV_BY_1;
/* enable early control to avoid corruption on DP monitor*/
active_total_with_borders =
timing->h_addressable
+ timing->h_border_left
+ timing->h_border_right;
if (lane_count != 0)
*early_control = active_total_with_borders % lane_count;
if (*early_control == 0)
*early_control = lane_count;
/* DCN4 moved cursor composition after Scaler, so in HW it is in * recout space and for HW Cursor position programming need to * translate to recout space. * * Cursor X and Y position programmed into HW can't be negative, * in fact it is X, Y coordinate shifted for the HW Cursor Hot spot * position that goes into HW X and Y coordinates while HW Hot spot * X and Y coordinates are length relative to the cursor top left * corner, hotspot must be smaller than the cursor size. * * DMs/DC interface for Cursor position is in stream->src space, and * DMs supposed to transform Cursor coordinates to stream->src space, * then here we need to translate Cursor coordinates to stream->dst * space, as now in HW, Cursor coordinates are in per pipe recout * space, and for the given pipe valid coordinates are only in range * from 0,0 - recout width, recout height space. * If certain pipe combining is in place, need to further adjust per * pipe to make sure each pipe enabling cursor on its part of the * screen.
*/
x_pos = pipe_ctx->stream->dst.x + x_pos * pipe_ctx->stream->dst.width /
pipe_ctx->stream->src.width;
y_pos = pipe_ctx->stream->dst.y + y_pos * pipe_ctx->stream->dst.height /
pipe_ctx->stream->src.height;
/* If the cursor's source viewport is clipped then we need to * translate the cursor to appear in the correct position on * the screen. * * This translation isn't affected by scaling so it needs to be * done *after* we adjust the position for the scale factor. * * This is only done by opt-in for now since there are still * some usecases like tiled display that might enable the * cursor on both streams while expecting dc to clip it.
*/ if (pos_cpy.translate_by_source) {
x_pos += pipe_ctx->plane_state->src_rect.x;
y_pos += pipe_ctx->plane_state->src_rect.y;
}
/* Adjust for ODM Combine * next/prev_odm_offset is to account for scaled modes that have underscan
*/ if (odm_combine_on) {
prev_odm_pipe = pipe_ctx->prev_odm_pipe;
/* If the position on bottom MPC pipe is negative then we need to add to the hotspot and * adjust x_pos on bottom pipe to make cursor visible when crossing between MPC slices.
*/ if (mpc_combine_on &&
pipe_ctx->top_pipe &&
(pipe_ctx == pipe_ctx->top_pipe->bottom_pipe)) {
staticbool dcn401_check_no_memory_request_for_cab(struct dc *dc)
{ int i;
/* First, check no-memory-request case */ for (i = 0; i < dc->current_state->stream_count; i++) { if ((dc->current_state->stream_status[i].plane_count) &&
(dc->current_state->streams[i]->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED)) /* Fail eligibility on a visible stream */ returnfalse;
}
returntrue;
}
static uint32_t dcn401_calculate_cab_allocation(struct dc *dc, struct dc_state *ctx)
{ int i;
uint8_t num_ways = 0;
uint32_t mall_ss_size_bytes = 0;
mall_ss_size_bytes = ctx->bw_ctx.bw.dcn.mall_ss_size_bytes; // TODO add additional logic for PSR active stream exclusion optimization // mall_ss_psr_active_size_bytes = ctx->bw_ctx.bw.dcn.mall_ss_psr_active_size_bytes;
// Include cursor size for CAB allocation for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &ctx->res_ctx.pipe_ctx[i];
if (!pipe->stream || !pipe->plane_state) continue;
// Convert number of cache lines required to number of ways if (dc->debug.force_mall_ss_num_ways > 0)
num_ways = dc->debug.force_mall_ss_num_ways; elseif (dc->res_pool->funcs->calculate_mall_ways_from_bytes)
num_ways = dc->res_pool->funcs->calculate_mall_ways_from_bytes(dc, mall_ss_size_bytes); else
num_ways = 0;
return num_ways;
}
bool dcn401_apply_idle_power_optimizations(struct dc *dc, bool enable)
{ union dmub_rb_cmd cmd;
uint8_t ways, i; int j; bool mall_ss_unsupported = false; struct dc_plane_state *plane = NULL;
if (!dc->ctx->dmub_srv || !dc->current_state) returnfalse;
for (i = 0; i < dc->current_state->stream_count; i++) { /* MALL SS messaging is not supported with PSR at this time */ if (dc->current_state->streams[i] != NULL &&
dc->current_state->streams[i]->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED) {
DC_LOG_MALL("MALL SS not supported with PSR at this time\n"); returnfalse;
}
}
if (enable) { if (dcn401_check_no_memory_request_for_cab(dc)) { /* 1. Check no memory request case for CAB. * If no memory request case, send CAB_ACTION NO_DCN_REQ DMUB message
*/
DC_LOG_MALL("sending CAB action NO_DCN_REQ\n");
cmd.cab.header.sub_type = DMUB_CMD__CAB_NO_DCN_REQ;
} else { /* 2. Check if all surfaces can fit in CAB. * If surfaces can fit into CAB, send CAB_ACTION_ALLOW DMUB message * and configure HUBP's to fetch from MALL
*/
ways = dcn401_calculate_cab_allocation(dc, dc->current_state);
/* MALL not supported with Stereo3D or TMZ surface. If any plane is using stereo, * or TMZ surface, don't try to enter MALL.
*/ for (i = 0; i < dc->current_state->stream_count; i++) { for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) {
plane = dc->current_state->stream_status[i].plane_states[j];
/* check if any surfaces are updating address while using flip immediate and dcc */ while (pipe_ctx != NULL) { if (pipe_ctx->plane_state &&
pipe_ctx->plane_state->dcc.enable &&
pipe_ctx->plane_state->flip_immediate &&
pipe_ctx->plane_state->update_flags.bits.addr_update) {
is_wait_needed = true; break;
}
/* check next pipe */
pipe_ctx = pipe_ctx->bottom_pipe;
}
if (is_wait_needed && dc->debug.dcc_meta_propagation_delay_us > 0) {
udelay(dc->debug.dcc_meta_propagation_delay_us);
}
}
/* Any transition into P-State support should disable MCLK switching first to avoid hangs */ if (p_state_change_support) {
dc->optimized_required = true;
context->bw_ctx.bw.dcn.clk.p_state_change_support = false;
}
if (dc->clk_mgr->dc_mode_softmax_enabled) if (dc->clk_mgr->clks.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 &&
context->bw_ctx.bw.dcn.clk.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000)
dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz);
/* program dchubbub watermarks: * For assigning wm_optimized_required, use |= operator since we don't want * to clear the value if the optimize has not happened yet
*/
dc->wm_optimized_required |= hubbub->funcs->program_watermarks(hubbub,
&context->bw_ctx.bw.dcn.watermarks,
dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000, false); /* update timeout thresholds */ if (hubbub->funcs->program_arbiter) {
dc->wm_optimized_required |= hubbub->funcs->program_arbiter(hubbub, &context->bw_ctx.bw.dcn.arb_regs, false);
}
if (p_state_change_support != context->bw_ctx.bw.dcn.clk.p_state_change_support) { /* After disabling P-State, restore the original value to ensure we get the correct P-State
* on the next optimize. */
context->bw_ctx.bw.dcn.clk.p_state_change_support = p_state_change_support;
}
}
void dcn401_optimize_bandwidth( struct dc *dc, struct dc_state *context)
{ int i; struct hubbub *hubbub = dc->res_pool->hubbub;
/* enable fams2 if needed */ if (dc->debug.fams2_config.bits.enable) {
dcn401_fams2_global_control_lock(dc, context, true);
dcn401_fams2_update_config(dc, context, true);
dcn401_fams2_global_control_lock(dc, context, false);
}
void dcn401_fams2_global_control_lock(struct dc *dc, struct dc_state *context, bool lock)
{ /* use always for now */ union dmub_inbox0_cmd_lock_hw hw_lock_cmd = { 0 };
if (!dc->ctx || !dc->ctx->dmub_srv || !dc->debug.fams2_config.bits.enable) return;
if (resource_is_pipe_type(old_otg_master, OTG_MASTER)) {
old_opp_head_count = resource_get_opp_heads_for_otg_master(old_otg_master,
&dc->current_state->res_ctx,
old_opp_heads);
} else { // DC cannot assume that the current state and the new state // share the same OTG pipe since this is not true when called // in the context of a commit stream not checked. Hence, set // old_otg_master to NULL to skip the DSC configuration.
old_otg_master = NULL;
}
if (otg_master->stream_res.dsc)
dcn32_update_dsc_on_stream(otg_master,
otg_master->stream->timing.flags.DSC); if (old_otg_master && old_otg_master->stream_res.dsc) { for (i = 0; i < old_opp_head_count; i++) {
old_pipe = old_opp_heads[i];
new_pipe = &context->res_ctx.pipe_ctx[old_pipe->pipe_idx]; if (old_pipe->stream_res.dsc && !new_pipe->stream_res.dsc)
old_pipe->stream_res.dsc->funcs->dsc_disconnect(
old_pipe->stream_res.dsc);
}
}
}
void dcn401_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *otg_master)
{ struct pipe_ctx *opp_heads[MAX_PIPES]; int opp_inst[MAX_PIPES] = {0}; int opp_head_count; int odm_slice_width = resource_get_odm_slice_dst_width(otg_master, false); int last_odm_slice_width = resource_get_odm_slice_dst_width(otg_master, true); int i;
if (!resource_is_pipe_type(otg_master, DPP_PIPE)) /* * blank pattern is generated by OPP, reprogram blank pattern * due to OPP count change
*/
dc->hwseq->funcs.blank_pixel_data(dc, otg_master, true);
}
if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP)
hws->funcs.edp_backlight_control(link, true);
}
void dcn401_hardware_release(struct dc *dc)
{
dc_dmub_srv_fams2_update_config(dc, dc->current_state, false);
/* If pstate unsupported, or still supported * by firmware, force it supported by dcn
*/ if (dc->current_state) { if ((!dc->clk_mgr->clks.p_state_change_support ||
dc->current_state->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable) &&
dc->res_pool->hubbub->funcs->force_pstate_change_control)
dc->res_pool->hubbub->funcs->force_pstate_change_control(
dc->res_pool->hubbub, true, true);
if (lock) { for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &context->res_ctx.pipe_ctx[i];
tg = pipe->stream_res.tg;
if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
!tg->funcs->is_tg_enabled(tg) ||
dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) continue;
dc->hwss.pipe_control_lock(dc, pipe, true);
}
} else { /* Need to free DET being used first and have pipe update, then unlock the remaining pipes*/ for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &context->res_ctx.pipe_ctx[i];
tg = pipe->stream_res.tg;
if (dc->scratch.pipes_to_unlock_first[i]) { struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
dc->hwss.pipe_control_lock(dc, pipe, false); /* Assumes pipe of the same index in current_state is also an OTG_MASTER pipe*/
dcn401_wait_for_det_buffer_update_under_otg_master(dc, dc->current_state, old_pipe);
}
}
/* Unlocking the rest of the pipes */ for (i = 0; i < dc->res_pool->pipe_count; i++) { if (dc->scratch.pipes_to_unlock_first[i]) continue;
void dcn401_perform_3dlut_wa_unlock(struct pipe_ctx *pipe_ctx)
{ /* If 3DLUT FL is enabled and 3DLUT is in use, follow the workaround sequence for pipe unlock to make sure that * HUBP will properly fetch 3DLUT contents after unlock. * * This is meant to work around a known HW issue where VREADY will cancel the pending 3DLUT_ENABLE signal regardless * of whether OTG lock is currently being held or not.
*/ struct pipe_ctx *wa_pipes[MAX_PIPES] = { NULL }; struct pipe_ctx *odm_pipe, *mpc_pipe; int i, wa_pipe_ct = 0;
if (wa_pipe_ct > 0) { if (pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout)
pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout(pipe_ctx->stream_res.tg, true);
for (i = 0; i < wa_pipe_ct; ++i) { if (wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl)
wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl(wa_pipes[i]->plane_res.hubp, true);
}
pipe_ctx->stream_res.tg->funcs->unlock(pipe_ctx->stream_res.tg); if (pipe_ctx->stream_res.tg->funcs->wait_update_lock_status)
pipe_ctx->stream_res.tg->funcs->wait_update_lock_status(pipe_ctx->stream_res.tg, false);
for (i = 0; i < wa_pipe_ct; ++i) { if (wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl)
wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl(wa_pipes[i]->plane_res.hubp, true);
}
if (pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout)
pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout(pipe_ctx->stream_res.tg, false);
} else {
pipe_ctx->stream_res.tg->funcs->unlock(pipe_ctx->stream_res.tg);
}
}
/* DPMS may already disable or */ /* dpms_off status is incorrect due to fastboot * feature. When system resume from S4 with second * screen only, the dpms_off would be true but * VBIOS lit up eDP, so check link status too.
*/ if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
dc->link_srv->set_dpms_off(pipe_ctx); elseif (pipe_ctx->stream_res.audio)
dc->hwss.disable_audio_stream(pipe_ctx);
/*free audio*/ if (dc->caps.dynamic_audio == true) { /*we have to dynamic arbitrate the audio endpoints*/ /*we free the resource, need reset is_audio_acquired*/
update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
pipe_ctx->stream_res.audio, false);
pipe_ctx->stream_res.audio = NULL;
}
}
/* by upper caller loop, parent pipe: pipe0, will be reset last. * back end share by all pipes and will be disable only when disable * parent pipe.
*/ if (pipe_ctx->top_pipe == NULL) {
/* TODO - convert symclk_ref_cnts for otg to a bit map to solve * the case where the same symclk is shared across multiple otg * instances
*/ if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
link->phy_state.symclk_ref_cnts.otg = 0; if (link->phy_state.symclk_state == SYMCLK_ON_TX_OFF) {
link_hwss->disable_link_output(link,
&pipe_ctx->link_res, pipe_ctx->stream->signal);
link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF;
}
/* reset DTBCLK_P */ if (dc->res_pool->dccg->funcs->set_dtbclk_p_src)
dc->res_pool->dccg->funcs->set_dtbclk_p_src(dc->res_pool->dccg, REFCLK, pipe_ctx->stream_res.tg->inst);
}
/* * In case of a dangling plane, setting this to NULL unconditionally
--> --------------------
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.