/* * Copyright 2019 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD *
*/
for (i = 0 ; i < count; i++) { /* confirm no messages pending */ do {
status = dmub_srv_wait_for_idle(dmub, 100000);
} while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK);
/* queue command */ if (status == DMUB_STATUS_OK)
status = dmub_srv_reg_cmd_execute(dmub, &cmd_list[i]);
/* check for errors */ if (status != DMUB_STATUS_OK) { break;
}
}
if (status != DMUB_STATUS_OK) { if (status != DMUB_STATUS_POWER_STATE_D3) {
DC_ERROR("Error starting DMUB execution: status=%d\n", status);
dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
} returnfalse;
}
for (i = 0 ; i < count; i++) { // Queue command if (!cmd_list[i].cmd_common.header.multi_cmd_pending ||
dmub_rb_num_free(&dmub->inbox1.rb) >= count - i) {
status = dmub_srv_fb_cmd_queue(dmub, &cmd_list[i]);
} else {
status = DMUB_STATUS_QUEUE_FULL;
}
if (status == DMUB_STATUS_QUEUE_FULL) { /* Execute and wait for queue to become empty again. */
status = dmub_srv_fb_cmd_execute(dmub); if (status == DMUB_STATUS_POWER_STATE_D3) returnfalse;
do {
status = dmub_srv_wait_for_inbox_free(dmub, 100000, count - i);
} while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK);
/* Requeue the command. */
status = dmub_srv_fb_cmd_queue(dmub, &cmd_list[i]);
}
if (!dc_dmub_srv || !dc_dmub_srv->dmub) returnfalse;
dmub = dc_dmub_srv->dmub;
// Wait for DMUB to process command if (wait_type != DM_DMUB_WAIT_TYPE_NO_WAIT) { do {
status = dmub_srv_wait_for_idle(dmub, 100000);
} while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK);
if (status != DMUB_STATUS_OK) {
DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status); if (!dmub->debug.timeout_info.timeout_occured) {
dmub->debug.timeout_info.timeout_occured = true; if (cmd_list)
dmub->debug.timeout_info.timeout_cmd = *cmd_list;
dmub->debug.timeout_info.timestamp = dm_get_timestamp(dc_dmub_srv->ctx);
}
dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); returnfalse;
}
// Copy data back from ring buffer into command if (wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY && cmd_list) {
dmub_srv_cmd_get_response(dc_dmub_srv->dmub, cmd_list);
}
}
if (should_manage_pstate) { for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
if (!pipe->stream) continue;
/* If FAMS is being used to support P-State and there is a stream * that does not use FAMS, we are in an FPO + VActive scenario. * Assign vactive stretch margin in this case.
*/
stream_status = dc_state_get_stream_status(context, pipe->stream); if (stream_status && !stream_status->fpo_in_use) {
cmd.fw_assisted_mclk_switch.config_data.vactive_stretch_margin_us = dc->debug.fpo_vactive_margin_us; break;
}
pipe_idx++;
}
}
for (i = 0, k = 0; context && i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
if (!resource_is_pipe_type(pipe, OTG_MASTER)) continue;
// If command was processed, copy feature caps to dmub srv if (dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) &&
cmd.visual_confirm_color.header.ret_status == 0) {
memcpy(&dc->ctx->dmub_srv->dmub->visual_confirm_color,
&cmd.visual_confirm_color.visual_confirm_color_data, sizeof(struct dmub_visual_confirm_color));
}
}
/** * populate_subvp_cmd_drr_info - Helper to populate DRR pipe info for the DMCUB subvp command * * @dc: [in] pointer to dc object * @subvp_pipe: [in] pipe_ctx for the SubVP pipe * @vblank_pipe: [in] pipe_ctx for the DRR pipe * @pipe_data: [in] Pipe data which stores the VBLANK/DRR info * @context: [in] DC state for access to phantom stream * * Populate the DMCUB SubVP command with DRR pipe info. All the information * required for calculating the SubVP + DRR microschedule is populated here. * * High level algorithm: * 1. Get timing for SubVP pipe, phantom pipe, and DRR pipe * 2. Calculate the min and max vtotal which supports SubVP + DRR microschedule * 3. Populate the drr_info with the min and max supported vtotal values
*/ staticvoid populate_subvp_cmd_drr_info(struct dc *dc, struct dc_state *context, struct pipe_ctx *subvp_pipe, struct pipe_ctx *vblank_pipe, struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data)
{ struct dc_stream_state *phantom_stream = dc_state_get_paired_subvp_stream(context, subvp_pipe->stream); struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing; struct dc_crtc_timing *phantom_timing; struct dc_crtc_timing *drr_timing = &vblank_pipe->stream->timing;
uint16_t drr_frame_us = 0;
uint16_t min_drr_supported_us = 0;
uint16_t max_drr_supported_us = 0;
uint16_t max_drr_vblank_us = 0;
uint16_t max_drr_mallregion_us = 0;
uint16_t mall_region_us = 0;
uint16_t prefetch_us = 0;
uint16_t subvp_active_us = 0;
uint16_t drr_active_us = 0;
uint16_t min_vtotal_supported = 0;
uint16_t max_vtotal_supported = 0;
if (!phantom_stream) return;
phantom_timing = &phantom_stream->timing;
pipe_data->pipe_config.vblank_data.drr_info.drr_in_use = true;
pipe_data->pipe_config.vblank_data.drr_info.use_ramping = false; // for now don't use ramping
pipe_data->pipe_config.vblank_data.drr_info.drr_window_size_ms = 4; // hardcode 4ms DRR window for now
/* When calculating the max vtotal supported for SubVP + DRR cases, add * margin due to possible rounding errors (being off by 1 line in the * FW calculation can incorrectly push the P-State switch to wait 1 frame * longer).
*/
max_vtotal_supported = max_vtotal_supported - dc->caps.subvp_drr_max_vblank_margin_us;
/** * populate_subvp_cmd_vblank_pipe_info - Helper to populate VBLANK pipe info for the DMUB subvp command * * @dc: [in] current dc state * @context: [in] new dc state * @cmd: [in] DMUB cmd to be populated with SubVP info * @vblank_pipe: [in] pipe_ctx for the VBLANK pipe * @cmd_pipe_index: [in] index for the pipe array in DMCUB SubVP cmd * * Populate the DMCUB SubVP command with VBLANK pipe info. All the information * required to calculate the microschedule for SubVP + VBLANK case is stored in * the pipe_data (subvp_data and vblank_data). Also check if the VBLANK pipe * is a DRR display -- if it is make a call to populate drr_info.
*/ staticvoid populate_subvp_cmd_vblank_pipe_info(struct dc *dc, struct dc_state *context, union dmub_rb_cmd *cmd, struct pipe_ctx *vblank_pipe,
uint8_t cmd_pipe_index)
{
uint32_t i; struct pipe_ctx *pipe = NULL; struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data =
&cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[cmd_pipe_index];
// Find the SubVP pipe for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &context->res_ctx.pipe_ctx[i];
// We check for master pipe, but it shouldn't matter since we only need // the pipe for timing info (stream should be same for any pipe splits) if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
!resource_is_pipe_type(pipe, DPP_PIPE)) continue;
// Find the SubVP pipe if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) break;
}
/** * update_subvp_prefetch_end_to_mall_start - Helper for SubVP + SubVP case * * @dc: [in] current dc state * @context: [in] new dc state * @cmd: [in] DMUB cmd to be populated with SubVP info * @subvp_pipes: [in] Array of SubVP pipes (should always be length 2) * * For SubVP + SubVP, we use a single vertical interrupt to start the * microschedule for both SubVP pipes. In order for this to work correctly, the * MALL REGION of both SubVP pipes must start at the same time. This function * lengthens the prefetch end to mall start delay of the SubVP pipe that has * the shorter prefetch so that both MALL REGION's will start at the same time.
*/ staticvoid update_subvp_prefetch_end_to_mall_start(struct dc *dc, struct dc_state *context, union dmub_rb_cmd *cmd, struct pipe_ctx *subvp_pipes[])
{
uint32_t subvp0_prefetch_us = 0;
uint32_t subvp1_prefetch_us = 0;
uint32_t prefetch_delta_us = 0; struct dc_stream_state *phantom_stream0 = NULL; struct dc_stream_state *phantom_stream1 = NULL; struct dc_crtc_timing *phantom_timing0 = NULL; struct dc_crtc_timing *phantom_timing1 = NULL; struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data = NULL;
phantom_stream0 = dc_state_get_paired_subvp_stream(context, subvp_pipes[0]->stream); if (!phantom_stream0) return;
phantom_stream1 = dc_state_get_paired_subvp_stream(context, subvp_pipes[1]->stream); if (!phantom_stream1) return;
// Whichever SubVP PIPE has the smaller prefetch (including the prefetch end to mall start time) // should increase it's prefetch time to match the other if (subvp0_prefetch_us > subvp1_prefetch_us) {
pipe_data = &cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[1];
prefetch_delta_us = subvp0_prefetch_us - subvp1_prefetch_us;
pipe_data->pipe_config.subvp_data.prefetch_to_mall_start_lines =
div64_u64(((uint64_t)(dc->caps.subvp_prefetch_end_to_mall_start_us + prefetch_delta_us) *
((uint64_t)phantom_timing1->pix_clk_100hz * 100) + ((uint64_t)phantom_timing1->h_total * 1000000 - 1)),
((uint64_t)phantom_timing1->h_total * 1000000));
/** * populate_subvp_cmd_pipe_info - Helper to populate the SubVP pipe info for the DMUB subvp command * * @dc: [in] current dc state * @context: [in] new dc state * @cmd: [in] DMUB cmd to be populated with SubVP info * @subvp_pipe: [in] pipe_ctx for the SubVP pipe * @cmd_pipe_index: [in] index for the pipe array in DMCUB SubVP cmd * * Populate the DMCUB SubVP command with SubVP pipe info. All the information * required to calculate the microschedule for the SubVP pipe is stored in the * pipe_data of the DMCUB SubVP command.
*/ staticvoid populate_subvp_cmd_pipe_info(struct dc *dc, struct dc_state *context, union dmub_rb_cmd *cmd, struct pipe_ctx *subvp_pipe,
uint8_t cmd_pipe_index)
{
uint32_t j; struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data =
&cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[cmd_pipe_index]; struct dc_stream_state *phantom_stream = dc_state_get_paired_subvp_stream(context, subvp_pipe->stream); struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing; struct dc_crtc_timing *phantom_timing;
uint32_t out_num_stream, out_den_stream, out_num_plane, out_den_plane, out_num, out_den;
/* Calculate the scaling factor from the src and dst height. * e.g. If 3840x2160 being downscaled to 1920x1080, the scaling factor is 1/2. * Reduce the fraction 1080/2160 = 1/2 for the "scaling factor" * * Make sure to combine stream and plane scaling together.
*/
reduce_fraction(subvp_pipe->stream->src.height, subvp_pipe->stream->dst.height,
&out_num_stream, &out_den_stream);
reduce_fraction(subvp_pipe->plane_state->src_rect.height, subvp_pipe->plane_state->dst_rect.height,
&out_num_plane, &out_den_plane);
reduce_fraction(out_num_stream * out_num_plane, out_den_stream * out_den_plane, &out_num, &out_den);
pipe_data->pipe_config.subvp_data.scale_factor_numerator = out_num;
pipe_data->pipe_config.subvp_data.scale_factor_denominator = out_den;
// Prefetch lines is equal to VACTIVE + BP + VSYNC
pipe_data->pipe_config.subvp_data.prefetch_lines =
phantom_timing->v_total - phantom_timing->v_front_porch;
/** * dc_dmub_setup_subvp_dmub_command - Populate the DMCUB SubVP command * * @dc: [in] current dc state * @context: [in] new dc state * @enable: [in] if true enables the pipes population * * This function loops through each pipe and populates the DMUB SubVP CMD info * based on the pipe (e.g. SubVP, VBLANK).
*/ void dc_dmub_setup_subvp_dmub_command(struct dc *dc, struct dc_state *context, bool enable)
{
uint8_t cmd_pipe_index = 0;
uint32_t i, pipe_idx;
uint8_t subvp_count = 0; union dmub_rb_cmd cmd; struct pipe_ctx *subvp_pipes[2];
uint32_t wm_val_refclk = 0; enum mall_stream_type pipe_mall_type;
for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
/* For SubVP pipe count, only count the top most (ODM / MPC) pipe
*/ if (resource_is_pipe_type(pipe, OTG_MASTER) &&
resource_is_pipe_type(pipe, DPP_PIPE) &&
dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN)
subvp_pipes[subvp_count++] = pipe;
}
if (enable) { // For each pipe that is a "main" SUBVP pipe, fill in pipe data for DMUB SUBVP cmd for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe);
if (!pipe->stream) continue;
/* When populating subvp cmd info, only pass in the top most (ODM / MPC) pipe. * Any ODM or MPC splits being used in SubVP will be handled internally in * populate_subvp_cmd_pipe_info
*/ if (resource_is_pipe_type(pipe, OTG_MASTER) &&
resource_is_pipe_type(pipe, DPP_PIPE) &&
pipe_mall_type == SUBVP_MAIN) {
populate_subvp_cmd_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++);
} elseif (resource_is_pipe_type(pipe, OTG_MASTER) &&
resource_is_pipe_type(pipe, DPP_PIPE) &&
pipe_mall_type == SUBVP_NONE) { // Don't need to check for ActiveDRAMClockChangeMargin < 0, not valid in cases where // we run through DML without calculating "natural" P-state support
populate_subvp_cmd_vblank_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++);
// Store the original watermark value for this SubVP config so we can lower it when the // MCLK switch starts
wm_val_refclk = context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns *
(dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000) / 1000;
if (!dc_get_edp_link_panel_inst(hubp->ctx->dc,
pipe_ctx->stream->link, &panel_inst)) return;
/* Payload: Cursor Rect is built from position & attribute * x & y are obtained from postion
*/
payload->cursor_rect.x = hubp->cur_rect.x;
payload->cursor_rect.y = hubp->cur_rect.y; /* w & h are obtained from attribute */
payload->cursor_rect.width = hubp->cur_rect.w;
payload->cursor_rect.height = hubp->cur_rect.h;
/** * dc_send_update_cursor_info_to_dmu - Populate the DMCUB Cursor update info command * * @pCtx: [in] pipe context * @pipe_idx: [in] pipe index * * This function would store the cursor related information and pass it into * dmub
*/ void dc_send_update_cursor_info_to_dmu( struct pipe_ctx *pCtx, uint8_t pipe_idx)
{ union dmub_rb_cmd cmd[2]; union dmub_cmd_update_cursor_info_data *update_cursor_info_0 =
&cmd[0].update_cursor_info.update_cursor_info_data;
memset(cmd, 0, sizeof(cmd));
if (!dc_dmub_should_update_cursor_data(pCtx)) return; /* * Since we use multi_cmd_pending for dmub command, the 2nd command is * only assigned to store cursor attributes info. * 1st command can view as 2 parts, 1st is for PSR/Replay data, the other * is to store cursor position info. * * Command heaer type must be the same type if using multi_cmd_pending. * Besides, while process 2nd command in DMU, the sub type is useless. * So it's meanless to pass the sub type header with different type.
*/
if (!dc_dmub_srv || !dc_dmub_srv->dmub) returntrue;
if (dc_dmub_srv->ctx->dc->debug.dmcub_emulation) returntrue;
dc_ctx = dc_dmub_srv->ctx;
if (wait) { if (dc_dmub_srv->ctx->dc->debug.disable_timeout) { do {
status = dmub_srv_wait_for_hw_pwr_up(dc_dmub_srv->dmub, 500000);
} while (status != DMUB_STATUS_OK);
} else {
status = dmub_srv_wait_for_hw_pwr_up(dc_dmub_srv->dmub, 500000); if (status != DMUB_STATUS_OK) {
DC_ERROR("Error querying DMUB hw power up status: error=%d\n", status); returnfalse;
}
}
} else return dmub_srv_is_hw_pwr_up(dc_dmub_srv->dmub);
returntrue;
}
staticint count_active_streams(conststruct dc *dc)
{ int i, count = 0;
for (i = 0; i < dc->current_state->stream_count; ++i) { struct dc_stream_state *stream = dc->current_state->streams[i];
if (stream && (!stream->dpms_off || dc->config.disable_ips_in_dpms_off))
count += 1;
}
return count;
}
staticvoid dc_dmub_srv_notify_idle(conststruct dc *dc, bool allow_idle)
{ volatileconststruct dmub_shared_state_ips_fw *ips_fw; struct dc_dmub_srv *dc_dmub_srv; union dmub_rb_cmd cmd = {0};
if (dc->debug.dmcub_emulation) return;
if (!dc->ctx->dmub_srv || !dc->ctx->dmub_srv->dmub) return;
/* NOTE: This does not use the "wake" interface since this is part of the wake path. */ /* We also do not perform a wait since DMCUB could enter idle after the notification. */
dm_execute_dmub_cmd(dc->ctx, &cmd, allow_idle ? DM_DMUB_WAIT_TYPE_NO_WAIT : DM_DMUB_WAIT_TYPE_WAIT);
/* Register access should stop at this point. */ if (allow_idle)
dc_dmub_srv->needs_idle_wake = true;
}
/* Note: register access has technically not resumed for DCN here, but we * need to be message PMFW through our standard register interface.
*/
dc_dmub_srv->needs_idle_wake = false;
/* Detection may require reading scratch 0 - exit out of idle prior to the read. */ if (dc_dmub_srv->idle_allowed) {
dc_dmub_srv_apply_idle_power_optimizations(dc_dmub_srv->ctx->dc, false);
reallow_idle = true;
}
/* Re-enter idle if we're not about to immediately redetect links. */ if (!should_detect && reallow_idle && dc_dmub_srv->idle_exit_counter == 0 &&
!dc_dmub_srv->ctx->dc->debug.disable_dmub_reallow_idle)
dc_dmub_srv_apply_idle_power_optimizations(dc_dmub_srv->ctx->dc, true);
if (dc_dmub_srv->idle_allowed == allow_idle) return;
DC_LOG_IPS("%s state change: old=%d new=%d", __func__, dc_dmub_srv->idle_allowed, allow_idle);
/* * Entering a low power state requires a driver notification. * Powering up the hardware requires notifying PMFW and DMCUB. * Clearing the driver idle allow requires a DMCUB command. * DMCUB commands requires the DMCUB to be powered up and restored.
*/
if (!allow_idle) {
dc_dmub_srv->idle_exit_counter += 1;
dc_dmub_srv_exit_low_power_state(dc); /* * Idle is considered fully exited only after the sequence above * fully completes. If we have a race of two threads exiting * at the same time then it's safe to perform the sequence * twice as long as we're not re-entering. * * Infinite command submission is avoided by using the * dm_execute_dmub_cmd submission instead of the "wake" helpers.
*/
dc_dmub_srv->idle_allowed = false;
dc_dmub_srv->idle_exit_counter -= 1; if (dc_dmub_srv->idle_exit_counter < 0) {
ASSERT(0);
dc_dmub_srv->idle_exit_counter = 0;
}
} else { /* Consider idle as notified prior to the actual submission to
* prevent multiple entries. */
dc_dmub_srv->idle_allowed = true;
if (!dc_dmub_srv || !dc_dmub_srv->dmub) returnfalse;
if (count == 0) returntrue;
if (dc_dmub_srv->idle_allowed) {
dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, false);
reallow_idle = true;
}
/* * These may have different implementations in DM, so ensure * that we guide it to the expected helper.
*/ if (count > 1)
result = dm_execute_dmub_cmd_list(ctx, count, cmd, wait_type); else
result = dm_execute_dmub_cmd(ctx, cmd, wait_type);
/* apply feature configuration based on current driver state */
global_cmd->config.global.features.bits.enable_visual_confirm = dc->debug.visual_confirm == VISUAL_CONFIRM_FAMS2;
global_cmd->config.global.features.bits.enable = enable;
if (enable && context->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable) { /* set multi pending for global, and unset for last stream cmd */
global_cmd->header.multi_cmd_pending = 1;
cmd[2 * context->bw_ctx.bw.dcn.fams2_global_config.num_streams].fams2_config.header.multi_cmd_pending = 0;
num_cmds += 2 * context->bw_ctx.bw.dcn.fams2_global_config.num_streams;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.