/* * Copyright 2015 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD *
*/
/* sort connectors by link_enc_hw_instance first */ for (idx = connector_cnt; idx > 1 ; idx--) { for (idx_2 = 0; idx_2 < (idx - 1); idx_2++) { if (sort_connector[idx_2]->dc_link->link_enc_hw_inst >
sort_connector[idx_2 + 1]->dc_link->link_enc_hw_inst)
swap(sort_connector[idx_2], sort_connector[idx_2 + 1]);
}
}
/* * Sort mst connectors by RAD. mst connectors with the same enc_hw_instance are already * sorted together above.
*/ for (idx = 0; idx < connector_cnt; /*Do nothing*/) { if (sort_connector[idx]->mst_root) {
uint8_t i, j, k;
uint8_t mst_con_cnt = 1;
for (idx_2 = (idx + 1); idx_2 < connector_cnt; idx_2++) { if (sort_connector[idx_2]->mst_root == sort_connector[idx]->mst_root)
mst_con_cnt++; else break;
}
for (i = mst_con_cnt; i > 1; i--) { for (j = idx; j < (idx + i - 2); j++) { int mstb_lct = sort_connector[j]->mst_output_port->parent->lct; int next_mstb_lct = sort_connector[j + 1]->mst_output_port->parent->lct;
u8 *rad;
u8 *next_rad; bool swap = false;
/* Sort by mst tree depth first. Then compare RAD if depth is the same*/ if (mstb_lct > next_mstb_lct) {
swap = true;
} elseif (mstb_lct == next_mstb_lct) { if (mstb_lct == 1) { if (sort_connector[j]->mst_output_port->port_num > sort_connector[j + 1]->mst_output_port->port_num)
swap = true;
} elseif (mstb_lct > 1) {
rad = sort_connector[j]->mst_output_port->parent->rad;
next_rad = sort_connector[j + 1]->mst_output_port->parent->rad;
for (k = 0; k < mstb_lct - 1; k++) { int shift = (k % 2) ? 0 : 4; int port_num = (rad[k / 2] >> shift) & 0xf; int next_port_num = (next_rad[k / 2] >> shift) & 0xf;
if (port_num > next_port_num) {
swap = true; break;
}
}
} else {
DRM_ERROR("MST LCT shouldn't be set as < 1");
mutex_unlock(&ddev->mode_config.mutex); return;
}
}
if (swap)
swap(sort_connector[j], sort_connector[j + 1]);
}
}
idx += mst_con_cnt;
} else {
idx++;
}
}
/* Complete sorting. Assign relavant result to dm->secure_display_ctx.phy_id_mapping[]*/
memset(dm->secure_display_ctx.phy_id_mapping, 0, sizeof(dm->secure_display_ctx.phy_id_mapping)); for (idx = 0; idx < connector_cnt; idx++) {
aconnector = sort_connector[idx];
staticbool get_phy_id(struct amdgpu_display_manager *dm, struct amdgpu_dm_connector *aconnector, uint8_t *phy_id)
{ int idx, idx_2; bool found = false;
/* * Assume secure display start after all connectors are probed. The connection * config is static as well
*/ if (!dm->secure_display_ctx.phy_mapping_updated) {
DRM_WARN("%s Should update the phy id table before get it's value", __func__); returnfalse;
}
for (idx = 0; idx < dm->secure_display_ctx.phy_id_mapping_cnt; idx++) { if (!dm->secure_display_ctx.phy_id_mapping[idx].assigned) {
DRM_ERROR("phy_id_mapping[%d] should be assigned", idx); returnfalse;
}
if (aconnector->dc_link->link_enc_hw_inst ==
dm->secure_display_ctx.phy_id_mapping[idx].enc_hw_inst) { if (!dm->secure_display_ctx.phy_id_mapping[idx].is_mst) {
found = true; goto out;
} else { /* Could caused by wrongly pass mst root connector */ if (!aconnector->mst_output_port) {
DRM_ERROR("%s Check mst case but connector without a port assigned", __func__); returnfalse;
}
if (aconnector->mst_root &&
aconnector->mst_root->mst_mgr.mst_primary == NULL) {
DRM_WARN("%s pass in a stale mst connector", __func__);
}
if (aconnector->mst_output_port->parent->lct == dm->secure_display_ctx.phy_id_mapping[idx].lct &&
aconnector->mst_output_port->port_num == dm->secure_display_ctx.phy_id_mapping[idx].port_num) { if (aconnector->mst_output_port->parent->lct == 1) {
found = true; goto out;
} elseif (aconnector->mst_output_port->parent->lct > 1) { /* Check RAD */ for (idx_2 = 0; idx_2 < aconnector->mst_output_port->parent->lct - 1; idx_2++) { int shift = (idx_2 % 2) ? 0 : 4; int port_num = (aconnector->mst_output_port->parent->rad[idx_2 / 2] >> shift) & 0xf; int port_num2 = (dm->secure_display_ctx.phy_id_mapping[idx].rad[idx_2 / 2] >> shift) & 0xf;
if (port_num != port_num2) break;
}
if (idx_2 == aconnector->mst_output_port->parent->lct - 1) {
found = true; goto out;
}
} else {
DRM_ERROR("lCT should be >= 1"); returnfalse;
}
}
}
}
}
out: if (found) {
DRM_DEBUG_DRIVER("Associated secure display PHY ID as %d", idx);
*phy_id = idx;
} else {
DRM_WARN("Can't find associated phy ID"); returnfalse;
}
/* Disable secure_display if it was enabled */ if (was_activated && dm->secure_display_ctx.op_mode == LEGACY_MODE) { /* stop ROI update on this crtc */
flush_work(&dm->secure_display_ctx.crtc_ctx[crtc->index].notify_ta_work);
flush_work(&dm->secure_display_ctx.crtc_ctx[crtc->index].forward_roi_work);
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
/* need lock for multiple crtcs to use the command buffer */
mutex_lock(&psp->securedisplay_context.mutex); /* PSP TA is expected to finish data transmission over I2C within current frame, * even there are up to 4 crtcs request to send in this frame.
*/ if (dm->secure_display_ctx.support_mul_roi) {
psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC_V2);
for (i = 0; i < MAX_CRC_WINDOW_NUM; i++) { if (crc_cpy[i].crc_ready)
roi_idx |= 1 << i;
}
securedisplay_cmd->securedisplay_in_message.send_roi_crc_v2.roi_idx = roi_idx;
ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC_V2);
} else {
psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC);
if (source < 0) {
DRM_DEBUG_DRIVER("Unknown CRC source %s for CRTC%d\n",
src_name, crtc->index); return -EINVAL;
}
*values_cnt = 3; return 0;
}
int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc, struct dm_crtc_state *dm_crtc_state, enum amdgpu_dm_pipe_crc_source source)
{ struct amdgpu_device *adev = drm_to_adev(crtc->dev); struct dc_stream_state *stream_state = dm_crtc_state->stream; bool enable = amdgpu_dm_is_valid_crc_source(source); int ret = 0;
/* Configuration will be deferred to stream enable. */ if (!stream_state) return -EINVAL;
mutex_lock(&adev->dm.dc_lock);
/* For PSR1, check that the panel has exited PSR */ if (stream_state->link->psr_settings.psr_version < DC_PSR_VERSION_SU_1)
amdgpu_dm_psr_wait_disable(stream_state);
/* Enable or disable CRTC CRC generation */ if (dm_is_crc_source_crtc(source) || source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE) { if (!dc_stream_configure_crc(stream_state->ctx->dc,
stream_state, NULL, enable, enable, 0, true)) {
ret = -EINVAL; goto unlock;
}
}
if (commit) { /* * Need to wait for all outstanding programming to complete * in commit tail since it can modify CRC related fields and * hardware state. Since we're holding the CRTC lock we're * guaranteed that no other commit work can be queued off * before we modify the state below.
*/
ret = wait_for_completion_interruptible_timeout(
&commit->hw_done, 10 * HZ); if (ret) goto cleanup;
}
if (!aconn) {
DRM_DEBUG_DRIVER("No amd connector matching CRTC-%d\n", crtc->index);
ret = -EINVAL; goto cleanup;
}
aux = (aconn->mst_output_port) ? &aconn->mst_output_port->aux : &aconn->dm_dp_aux.aux;
if (!aux) {
DRM_DEBUG_DRIVER("No dp aux for amd connector\n");
ret = -EINVAL; goto cleanup;
}
if ((aconn->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort) &&
(aconn->base.connector_type != DRM_MODE_CONNECTOR_eDP)) {
DRM_DEBUG_DRIVER("No DP connector available for CRC source\n");
ret = -EINVAL; goto cleanup;
}
}
/* * Reading the CRC requires the vblank interrupt handler to be * enabled. Keep a reference until CRC capture stops.
*/
enabled = amdgpu_dm_is_valid_crc_source(cur_crc_src); if (!enabled && enable) {
ret = drm_crtc_vblank_get(crtc); if (ret) goto cleanup;
}
#ifdefined(CONFIG_DRM_AMD_SECURE_DISPLAY) /* Reset secure_display when we change crc source from debugfs */
amdgpu_dm_set_crc_window_default(crtc, crtc_state->stream); #endif
if (amdgpu_dm_crtc_configure_crc_source(crtc, crtc_state, source)) {
ret = -EINVAL; goto cleanup;
}
if (!enabled && enable) { if (dm_is_crc_source_dprx(source)) { if (drm_dp_start_crc(aux, crtc)) {
DRM_DEBUG_DRIVER("dp start crc failed\n");
ret = -EINVAL; goto cleanup;
}
}
} elseif (enabled && !enable) {
drm_crtc_vblank_put(crtc); if (dm_is_crc_source_dprx(source)) { if (drm_dp_stop_crc(aux)) {
DRM_DEBUG_DRIVER("dp stop crc failed\n");
ret = -EINVAL; goto cleanup;
}
}
}
/* Reset crc_skipped on dm state */
crtc_state->crc_skip_count = 0;
#ifdefined(CONFIG_DRM_AMD_SECURE_DISPLAY) /* Initialize phy id mapping table for secure display*/ if (dm->secure_display_ctx.op_mode == LEGACY_MODE &&
!dm->secure_display_ctx.phy_mapping_updated)
update_phy_id_mapping(adev); #endif
cleanup: if (commit)
drm_crtc_commit_put(commit);
drm_modeset_unlock(&crtc->mutex);
return ret;
}
/** * amdgpu_dm_crtc_handle_crc_irq: Report to DRM the CRC on given CRTC. * @crtc: DRM CRTC object. * * This function should be called at the end of a vblank, when the fb has been * fully processed through the pipe.
*/ void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc)
{ struct dm_crtc_state *crtc_state; struct dc_stream_state *stream_state; struct drm_device *drm_dev = NULL; enum amdgpu_dm_pipe_crc_source cur_crc_src; struct amdgpu_crtc *acrtc = NULL;
uint32_t crcs[3]; unsignedlong flags;
/* Early return if CRC capture is not enabled. */ if (!amdgpu_dm_is_valid_crc_source(cur_crc_src)) return;
/* * Since flipping and crc enablement happen asynchronously, we - more * often than not - will be returning an 'uncooked' crc on first frame. * Probably because hw isn't ready yet. For added security, skip the * first two CRC values.
*/ if (crtc_state->crc_skip_count < 2) {
crtc_state->crc_skip_count += 1; return;
}
if (dm_is_crc_source_crtc(cur_crc_src)) { if (!dc_stream_get_crc(stream_state->ctx->dc, stream_state, 0,
&crcs[0], &crcs[1], &crcs[2])) return;
/* Early return if CRC capture is not enabled. */ if (!amdgpu_dm_is_valid_crc_source(cur_crc_src) ||
!dm_is_crc_source_crtc(cur_crc_src)) {
spin_unlock_irqrestore(&drm_dev->event_lock, flags1); return;
}
if (!acrtc->dm_irq_params.crc_window_activated) {
spin_unlock_irqrestore(&drm_dev->event_lock, flags1); return;
}
crtc_ctx = &adev->dm.secure_display_ctx.crtc_ctx[acrtc->crtc_id]; if (WARN_ON(crtc_ctx->crtc != crtc)) { /* We have set the crtc when creating secure_display_crtc_context, * don't expect it to be changed here.
*/
crtc_ctx->crtc = crtc;
}
if (adev->dm.secure_display_ctx.op_mode == LEGACY_MODE) /* forward task to dmub to update ROI */
forward_roi_change = true; elseif (adev->dm.secure_display_ctx.op_mode == DISPLAY_CRC_MODE) /* update ROI via dm*/
dc_stream_configure_crc(stream_state->ctx->dc, stream_state,
&crc_window, true, true, i, false);
/* Statically skip 1 frame, because we may need to wait below things * before sending ROI to dmub: * 1. We defer the work by using system workqueue. * 2. We may need to wait for dc_lock before accessing dmub.
*/
acrtc->dm_irq_params.window_param[i].skip_frame_cnt = 1;
crtc_ctx->crc_info.crc[i].crc_ready = false;
} else { if (!dc_stream_get_crc(stream_state->ctx->dc, stream_state, i,
&crc_r[i], &crc_g[i], &crc_b[i]))
DRM_ERROR("Secure Display: fail to get crc from engine %d\n", i);
if (adev->dm.secure_display_ctx.op_mode == LEGACY_MODE) /* forward task to psp to read ROI/CRC and output via I2C */
notify_ta = true; elseif (adev->dm.secure_display_ctx.op_mode == DISPLAY_CRC_MODE) /* Avoid ROI window get changed, keep overwriting. */
dc_stream_configure_crc(stream_state->ctx->dc, stream_state,
&crc_window, true, true, i, false);
/* crc ready for psp to read out */
crtc_ctx->crc_info.crc[i].crc_ready = true;
}
}
if (forward_roi_change)
schedule_work(&crtc_ctx->forward_roi_work);
if (notify_ta)
schedule_work(&crtc_ctx->notify_ta_work);
spin_lock_irqsave(&crtc_ctx->crc_info.lock, flags1); for (i = 0; i < MAX_CRC_WINDOW_NUM; i++) {
crtc_ctx->crc_info.crc[i].crc_R = crc_r[i];
crtc_ctx->crc_info.crc[i].crc_G = crc_g[i];
crtc_ctx->crc_info.crc[i].crc_B = crc_b[i];
if (!crtc_ctx->roi[i].enable) {
crtc_ctx->crc_info.crc[i].frame_count = 0; continue;
}
if (!crtc_ctx->crc_info.crc[i].crc_ready)
all_crc_ready = false;
if (reset_crc_frame_count[i] || crtc_ctx->crc_info.crc[i].frame_count == UINT_MAX) /* Reset the reference frame count after user update the ROI * or it reaches the maximum value.
*/
crtc_ctx->crc_info.crc[i].frame_count = 0; else
crtc_ctx->crc_info.crc[i].frame_count += 1;
}
spin_unlock_irqrestore(&crtc_ctx->crc_info.lock, flags1);
if (all_crc_ready)
complete_all(&crtc_ctx->crc_info.completion);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.