/* * Copyright 2015 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD *
*/
/* The caprices of the preprocessor require that this be declared right here */ #define CREATE_TRACE_POINTS
/* Number of bytes in PSP header for firmware. */ #define PSP_HEADER_BYTES 0x100
/* Number of bytes in PSP footer for firmware. */ #define PSP_FOOTER_BYTES 0x100
/** * DOC: overview * * The AMDgpu display manager, **amdgpu_dm** (or even simpler, * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM * requests into DC requests, and DC responses into DRM responses. * * The root control structure is &struct amdgpu_display_manager.
*/
/* * initializes drm_device display related structures, based on the information * provided by DAL. The drm strcutures are: drm_crtc, drm_connector, * drm_encoder, drm_mode_config * * Returns 0 on success
*/ staticint amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev); /* removes and deallocates the drm structures, created by the above function */ staticvoid amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc)) return -EINVAL;
acrtc = adev->mode_info.crtcs[crtc];
if (!acrtc->dm_irq_params.stream) {
drm_err(adev_to_drm(adev), "dc_stream_state is NULL for crtc '%d'!\n",
crtc); return 0;
}
if (dc && dc->caps.ips_support && dc->idle_optimizations_allowed)
dc_allow_idle_optimizations(dc, false);
/* * TODO rework base driver to use values directly. * for now parse it back into reg-format
*/
dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
&v_blank_start,
&v_blank_end,
&h_position,
&v_position);
/* * DC will program planes with their z-order determined by their ordering * in the dc_surface_updates array. This comparator is used to sort them * by descending zpos.
*/ staticint dm_plane_layer_index_cmp(constvoid *a, constvoid *b)
{ conststruct dc_surface_update *sa = (struct dc_surface_update *)a; conststruct dc_surface_update *sb = (struct dc_surface_update *)b;
/** * update_planes_and_stream_adapter() - Send planes to be updated in DC * * DC has a generic way to update planes and stream via * dc_update_planes_and_stream function; however, DM might need some * adjustments and preparation before calling it. This function is a wrapper * for the dc_update_planes_and_stream that does any required configuration * before passing control to DC. * * @dc: Display Core control structure * @update_type: specify whether it is FULL/MEDIUM/FAST update * @planes_count: planes count to update * @stream: stream state * @stream_update: stream update * @array_of_surface_update: dc surface update pointer *
*/ staticinlinebool update_planes_and_stream_adapter(struct dc *dc, int update_type, int planes_count, struct dc_stream_state *stream, struct dc_stream_update *stream_update, struct dc_surface_update *array_of_surface_update)
{
sort(array_of_surface_update, planes_count, sizeof(*array_of_surface_update), dm_plane_layer_index_cmp, NULL);
/* * Previous frame finished and HW is ready for optimization.
*/
dc_post_update_surfaces_to_stream(dc);
/* IRQ could occur when in initial stage */ /* TODO work and BO cleanup */ if (amdgpu_crtc == NULL) {
drm_dbg_state(dev, "CRTC is null, returning.\n"); return;
}
/* Fixed refresh rate, or VRR scanout position outside front-porch? */ if (!vrr_active ||
!dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
&v_blank_end, &hpos, &vpos) ||
(vpos < v_blank_start)) { /* Update to correct count and vblank timestamp if racing with * vblank irq. This also updates to the correct vblank timestamp * even in VRR mode, as scanout is past the front-porch atm.
*/
drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
/* Wake up userspace by sending the pageflip event with proper * count and timestamp of vblank of flip completion.
*/ if (e) {
drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
/* Event sent, so done with vblank for this flip */
drm_crtc_vblank_put(&amdgpu_crtc->base);
}
} elseif (e) { /* VRR active and inside front-porch: vblank count and * timestamp for pageflip event will only be up to date after * drm_crtc_handle_vblank() has been executed from late vblank * irq handler after start of back-porch (vline 0). We queue the * pageflip event for send-out by drm_crtc_handle_vblank() with * updated timestamp and count, once it runs after us. * * We need to open-code this instead of using the helper * drm_crtc_arm_vblank_event(), as that helper would * call drm_crtc_accurate_vblank_count(), which we must * not call in VRR mode while we are in front-porch!
*/
/* sequence will be replaced by real count during send-out. */
e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
e->pipe = amdgpu_crtc->crtc_id;
list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
e = NULL;
}
/* Keep track of vblank of this flip for flip throttling. We use the * cooked hw counter, as that one incremented at start of this vblank * of pageflip completion, so last_flip_vblank is the forbidden count * for queueing new pageflips if vsync + VRR is enabled.
*/
amdgpu_crtc->dm_irq_params.last_flip_vblank =
amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
/* Core vblank handling is done here after end of front-porch in * vrr mode, as vblank timestamping will give valid results * while now done after front-porch. This will also deliver * page-flip completion events that have been queued to us * if a pageflip happened inside front-porch.
*/ if (vrr_active) {
amdgpu_dm_crtc_handle_vblank(acrtc);
/* BTR processing for pre-DCE12 ASICs */ if (acrtc->dm_irq_params.stream &&
adev->family < AMDGPU_FAMILY_AI) {
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
mod_freesync_handle_v_update(
adev->dm.freesync_module,
acrtc->dm_irq_params.stream,
&acrtc->dm_irq_params.vrr_params);
/** * Core vblank handling at start of front-porch is only possible * in non-vrr mode, as only there vblank timestamping will give * valid results while done in front-porch. Otherwise defer it * to dm_vupdate_high_irq after end of front-porch.
*/ if (!vrr_active)
amdgpu_dm_crtc_handle_vblank(acrtc);
/** * Following stuff must happen at start of vblank, for crc * computation and below-the-range btr support in vrr mode.
*/
amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
/* BTR updates need to happen before VUPDATE on Vega and above. */ if (adev->family < AMDGPU_FAMILY_AI) return;
/* * If there aren't any active_planes then DCH HUBP may be clock-gated. * In that case, pageflip completion interrupts won't fire and pageflip * completion events won't get delivered. Prevent this by sending * pending pageflip events from here if a flip is still pending. * * If any planes are enabled, use dm_pflip_high_irq() instead, to * avoid race conditions between flip programming and completion, * which could cause too early flip completion events.
*/ if (adev->family >= AMDGPU_FAMILY_RV &&
acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
acrtc->dm_irq_params.active_planes == 0) { if (acrtc->event) {
drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
acrtc->event = NULL;
drm_crtc_vblank_put(&acrtc->base);
}
acrtc->pflip_status = AMDGPU_FLIP_NONE;
}
/** * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command. * @adev: amdgpu_device pointer * @notify: dmub notification structure * * Dmub AUX or SET_CONFIG command completion processing callback * Copies dmub notification to DM which is to be read by AUX command. * issuing thread and also signals the event to wake up the thread.
*/ staticvoid dmub_aux_setconfig_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
{ if (adev->dm.dmub_notify)
memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification)); if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
complete(&adev->dm.dmub_aux_transfer_done);
}
if (notify == NULL) {
drm_err(adev_to_drm(adev), "DMUB HPD callback notification was NULL"); return;
}
if (notify->link_index > adev->dm.dc->link_count) {
drm_err(adev_to_drm(adev), "DMUB HPD index (%u)is abnormal", notify->link_index); return;
}
/* Skip DMUB HPD IRQ in suspend/resume. We will probe them later. */ if (notify->type == DMUB_NOTIFICATION_HPD && adev->in_suspend) {
drm_info(adev_to_drm(adev), "Skip DMUB HPD IRQ callback in suspend/resume\n"); return;
}
link_index = notify->link_index;
link = adev->dm.dc->links[link_index];
dev = adev->dm.ddev;
if (hpd_aconnector) { if (notify->type == DMUB_NOTIFICATION_HPD) { if (hpd_aconnector->dc_link->hpd_status == (notify->hpd_status == DP_HPD_PLUG))
drm_warn(adev_to_drm(adev), "DMUB reported hpd status unchanged. link_index=%u\n", link_index);
handle_hpd_irq_helper(hpd_aconnector);
} elseif (notify->type == DMUB_NOTIFICATION_HPD_IRQ) {
handle_hpd_rx_irq(hpd_aconnector);
}
}
}
/** * dmub_hpd_sense_callback - DMUB HPD sense processing callback. * @adev: amdgpu_device pointer * @notify: dmub notification structure * * HPD sense changes can occur during low power states and need to be * notified from firmware to driver.
*/ staticvoid dmub_hpd_sense_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
{
drm_dbg_driver(adev_to_drm(adev), "DMUB HPD SENSE callback.\n");
}
/** * register_dmub_notify_callback - Sets callback for DMUB notify * @adev: amdgpu_device pointer * @type: Type of dmub notification * @callback: Dmub interrupt callback function * @dmub_int_thread_offload: offload indicator * * API to register a dmub callback handler for a dmub notification * Also sets indicator whether callback processing to be offloaded. * to dmub interrupt handling thread * Return: true if successfully registered, false if there is existing registration
*/ staticbool register_dmub_notify_callback(struct amdgpu_device *adev, enum dmub_notification_type type,
dmub_notify_interrupt_callback_t callback, bool dmub_int_thread_offload)
{ if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
adev->dm.dmub_callback[type] = callback;
adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
} else returnfalse;
staticconstchar *dmub_notification_type_str(enum dmub_notification_type e)
{ switch (e) { case DMUB_NOTIFICATION_NO_DATA: return"NO_DATA"; case DMUB_NOTIFICATION_AUX_REPLY: return"AUX_REPLY"; case DMUB_NOTIFICATION_HPD: return"HPD"; case DMUB_NOTIFICATION_HPD_IRQ: return"HPD_IRQ"; case DMUB_NOTIFICATION_SET_CONFIG_REPLY: return"SET_CONFIG_REPLY"; case DMUB_NOTIFICATION_DPIA_NOTIFICATION: return"DPIA_NOTIFICATION"; case DMUB_NOTIFICATION_HPD_SENSE_NOTIFY: return"HPD_SENSE_NOTIFY"; case DMUB_NOTIFICATION_FUSED_IO: return"FUSED_IO"; default: return"";
}
}
do { if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
entry.param0, entry.param1);
if (max_size) { int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
&compressor->gpu_addr, &compressor->cpu_addr);
if (r)
drm_err(adev_to_drm(adev), "DM: Failed to initialize FBC\n"); else {
adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
drm_info(adev_to_drm(adev), "DM: FBC alloc %lu\n", max_size*4);
}
}
}
staticint amdgpu_dm_audio_component_get_eld(struct device *kdev, int port, int pipe, bool *enabled, unsignedchar *buf, int max_bytes)
{ struct drm_device *dev = dev_get_drvdata(kdev); struct amdgpu_device *adev = drm_to_adev(dev); struct drm_connector *connector; struct drm_connector_list_iter conn_iter; struct amdgpu_dm_connector *aconnector; int ret = 0;
if (!dmub_srv) /* DMUB isn't supported on the ASIC. */ return 0;
if (!fb_info) {
drm_err(adev_to_drm(adev), "No framebuffer info for DMUB service.\n"); return -EINVAL;
}
if (!dmub_fw) { /* Firmware required for DMUB support. */
drm_err(adev_to_drm(adev), "No firmware provided for DMUB.\n"); return -EINVAL;
}
/* initialize register offsets for ASICs with runtime initialization available */ if (dmub_srv->hw_funcs.init_reg_offsets)
dmub_srv->hw_funcs.init_reg_offsets(dmub_srv, ctx);
status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support); if (status != DMUB_STATUS_OK) {
drm_err(adev_to_drm(adev), "Error checking HW support for DMUB: %d\n", status); return -EINVAL;
}
if (!has_hw_support) {
drm_info(adev_to_drm(adev), "DMUB unsupported on ASIC\n"); return 0;
}
/* Reset DMCUB if it was previously running - before we overwrite its memory. */
status = dmub_srv_hw_reset(dmub_srv); if (status != DMUB_STATUS_OK)
drm_warn(adev_to_drm(adev), "Error resetting DMUB HW: %d\n", status);
/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP, * amdgpu_ucode_init_single_fw will load dmub firmware * fw_inst_const part to cw0; otherwise, the firmware back door load * will be done by dm_dmub_hw_init
*/ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
fw_inst_const_size);
}
if (fw_bss_data_size)
memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
fw_bss_data, fw_bss_data_size);
/* Copy firmware bios info into FB memory. */
memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
adev->bios_size);
/* Reset regions that need to be reset. */
memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
/* backdoor load firmware and trigger dmub running */ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
hw_params.load_inst_const = true;
if (dmcu)
hw_params.psp_version = dmcu->psp_version;
for (i = 0; i < fb_info->num_fb; ++i)
hw_params.fb[i] = &fb_info->fb[i];
switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { case IP_VERSION(3, 1, 3): case IP_VERSION(3, 1, 4): case IP_VERSION(3, 5, 0): case IP_VERSION(3, 5, 1): case IP_VERSION(3, 6, 0): case IP_VERSION(4, 0, 1):
hw_params.dpia_supported = true;
hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia; break; default: break;
}
switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { case IP_VERSION(3, 5, 0): case IP_VERSION(3, 5, 1): case IP_VERSION(3, 6, 0):
hw_params.ips_sequential_ono = adev->external_rev_id > 0x10;
hw_params.lower_hbr3_phy_ssc = true; break; default: break;
}
status = dmub_srv_hw_init(dmub_srv, &hw_params); if (status != DMUB_STATUS_OK) {
drm_err(adev_to_drm(adev), "Error initializing DMUB HW: %d\n", status); return -EINVAL;
}
/* Wait for firmware load to finish. */
status = dmub_srv_wait_for_auto_load(dmub_srv, 100000); if (status != DMUB_STATUS_OK)
drm_warn(adev_to_drm(adev), "Wait for DMUB auto-load failed: %d\n", status);
/* Init DMCU and ABM if available. */ if (dmcu && abm) {
dmcu->funcs->dmcu_init(dmcu);
abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
}
if (!adev->dm.dc->ctx->dmub_srv)
adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv); if (!adev->dm.dc->ctx->dmub_srv) {
drm_err(adev_to_drm(adev), "Couldn't allocate DC DMUB server!\n"); return -ENOMEM;
}
/* AGP aperture is disabled */ if (agp_bot > agp_top) {
logical_addr_low = adev->gmc.fb_start >> 18; if (adev->apu_flags & (AMD_APU_IS_RAVEN2 |
AMD_APU_IS_RENOIR |
AMD_APU_IS_GREEN_SARDINE)) /* * Raven2 has a HW issue that it is unable to use the vram which * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the * workaround that increase system aperture high address (add 1) * to get rid of the VM fault and hardware hang.
*/
logical_addr_high = (adev->gmc.fb_end >> 18) + 0x1; else
logical_addr_high = adev->gmc.fb_end >> 18;
} else {
logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18; if (adev->apu_flags & (AMD_APU_IS_RAVEN2 |
AMD_APU_IS_RENOIR |
AMD_APU_IS_GREEN_SARDINE)) /* * Raven2 has a HW issue that it is unable to use the vram which * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the * workaround that increase system aperture high address (add 1) * to get rid of the VM fault and hardware hang.
*/
logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18); else
logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
}
mutex_lock(&adev->dm.dc_lock); if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
dc_link_dp_handle_automated_test(dc_link);
if (aconnector->timing_changed) { /* force connector disconnect and reconnect */
force_connector_state(aconnector, DRM_FORCE_OFF);
msleep(100);
force_connector_state(aconnector, DRM_FORCE_UNSPECIFIED);
}
test_response.bits.ACK = 1;
core_link_write_dpcd(
dc_link,
DP_TEST_RESPONSE,
&test_response.raw, sizeof(test_response));
} elseif ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
dc_link_check_link_loss_status(dc_link, &offload_work->data) &&
dc_link_dp_allow_hpd_rx_irq(dc_link)) { /* offload_work->data is from handle_hpd_rx_irq-> * schedule_hpd_rx_offload_work.this is defer handle * for hpd short pulse. upon here, link status may be * changed, need get latest link status from dpcd * registers. if link status is good, skip run link * training again.
*/ union hpd_irq_data irq_data;
memset(&irq_data, 0, sizeof(irq_data));
/* before dc_link_dp_handle_link_loss, allow new link lost handle * request be added to work queue if link lost at end of dc_link_ * dp_handle_link_loss
*/
spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
offload_work->offload_wq->is_handling_link_loss = false;
spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
out_err: for (i = 0; i < max_caps; i++) { if (hpd_rx_offload_wq[i].wq)
destroy_workqueue(hpd_rx_offload_wq[i].wq);
}
kfree(hpd_rx_offload_wq); return NULL;
}
/* walk the da list in DM */
list_for_each_entry(da, &adev->dm.da_list, list) { if (pvMem == da->cpu_ptr) {
amdgpu_bo_free_kernel(&da->bo, &da->gpu_addr, &da->cpu_ptr);
list_del(&da->list);
kfree(da); break;
}
}
bb = dm_allocate_gpu_mem(adev,
DC_MEM_ALLOC_TYPE_GART,
bb_size,
&addr); if (!bb) return NULL;
for (i = 0; i < 4; i++) { /* Extract 16-bit chunk */
chunk = ((uint64_t) addr >> (i * 16)) & 0xFFFF; /* Send the chunk */
ret = dm_dmub_send_vbios_gpint_command(adev, send_addrs[i], chunk, 30000); if (ret != DMUB_STATUS_OK) goto free_bb;
}
/* Now ask DMUB to copy the bb */
ret = dm_dmub_send_vbios_gpint_command(adev, DMUB_GPINT__BB_COPY, 1, 200000); if (ret != DMUB_STATUS_OK) goto free_bb;
switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { case IP_VERSION(3, 5, 0): case IP_VERSION(3, 6, 0): case IP_VERSION(3, 5, 1):
ret = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF; break; default: /* ASICs older than DCN35 do not have IPSs */ if (amdgpu_ip_version(adev, DCE_HWIP, 0) < IP_VERSION(3, 5, 0))
ret = DMUB_IPS_DISABLE_ALL; break;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.