Anforderungen  |   Konzepte  |   Entwurf  |   Entwicklung  |   Qualitätssicherung  |   Lebenszyklus  |   Steuerung
 
 
 
 


Quelle  amdgpu_dm.c   Sprache: C

 
/*
 * Copyright 2015 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 * Authors: AMD
 *
 */


/* The caprices of the preprocessor require that this be declared right here */
#define CREATE_TRACE_POINTS

#include "dm_services_types.h"
#include "dc.h"
#include "link_enc_cfg.h"
#include "dc/inc/core_types.h"
#include "dal_asic_id.h"
#include "dmub/dmub_srv.h"
#include "dc/inc/hw/dmcu.h"
#include "dc/inc/hw/abm.h"
#include "dc/dc_dmub_srv.h"
#include "dc/dc_edid_parser.h"
#include "dc/dc_stat.h"
#include "dc/dc_state.h"
#include "amdgpu_dm_trace.h"
#include "dpcd_defs.h"
#include "link/protocols/link_dpcd.h"
#include "link_service_types.h"
#include "link/protocols/link_dp_capability.h"
#include "link/protocols/link_ddc.h"

#include "vid.h"
#include "amdgpu.h"
#include "amdgpu_display.h"
#include "amdgpu_ucode.h"
#include "atom.h"
#include "amdgpu_dm.h"
#include "amdgpu_dm_plane.h"
#include "amdgpu_dm_crtc.h"
#include "amdgpu_dm_hdcp.h"
#include <drm/display/drm_hdcp_helper.h>
#include "amdgpu_dm_wb.h"
#include "amdgpu_pm.h"
#include "amdgpu_atombios.h"

#include "amd_shared.h"
#include "amdgpu_dm_irq.h"
#include "dm_helpers.h"
#include "amdgpu_dm_mst_types.h"
#if defined(CONFIG_DEBUG_FS)
#include "amdgpu_dm_debugfs.h"
#endif
#include "amdgpu_dm_psr.h"
#include "amdgpu_dm_replay.h"

#include "ivsrcid/ivsrcid_vislands30.h"

#include <linux/backlight.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/pm_runtime.h>
#include <linux/pci.h>
#include <linux/power_supply.h>
#include <linux/firmware.h>
#include <linux/component.h>
#include <linux/sort.h>

#include <drm/display/drm_dp_mst_helper.h>
#include <drm/display/drm_hdmi_helper.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_fixed.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_edid.h>
#include <drm/drm_eld.h>
#include <drm/drm_utils.h>
#include <drm/drm_vblank.h>
#include <drm/drm_audio_component.h>
#include <drm/drm_gem_atomic_helper.h>

#include <media/cec-notifier.h>
#include <acpi/video.h>

#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"

#include "dcn/dcn_1_0_offset.h"
#include "dcn/dcn_1_0_sh_mask.h"
#include "soc15_hw_ip.h"
#include "soc15_common.h"
#include "vega10_ip_offset.h"

#include "gc/gc_11_0_0_offset.h"
#include "gc/gc_11_0_0_sh_mask.h"

#include "modules/inc/mod_freesync.h"
#include "modules/power/power_helpers.h"

static_assert(AMDGPU_DMUB_NOTIFICATION_MAX == DMUB_NOTIFICATION_MAX, "AMDGPU_DMUB_NOTIFICATION_MAX mismatch");

#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
#define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
#define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
#define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
#define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
#define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
#define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
#define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
#define FIRMWARE_DCN_314_DMUB "amdgpu/dcn_3_1_4_dmcub.bin"
MODULE_FIRMWARE(FIRMWARE_DCN_314_DMUB);
#define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin"
MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB);
#define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin"
MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB);

#define FIRMWARE_DCN_V3_2_0_DMCUB "amdgpu/dcn_3_2_0_dmcub.bin"
MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_0_DMCUB);
#define FIRMWARE_DCN_V3_2_1_DMCUB "amdgpu/dcn_3_2_1_dmcub.bin"
MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_1_DMCUB);

#define FIRMWARE_RAVEN_DMCU  "amdgpu/raven_dmcu.bin"
MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);

#define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);

#define FIRMWARE_DCN_35_DMUB "amdgpu/dcn_3_5_dmcub.bin"
MODULE_FIRMWARE(FIRMWARE_DCN_35_DMUB);

#define FIRMWARE_DCN_351_DMUB "amdgpu/dcn_3_5_1_dmcub.bin"
MODULE_FIRMWARE(FIRMWARE_DCN_351_DMUB);

#define FIRMWARE_DCN_36_DMUB "amdgpu/dcn_3_6_dmcub.bin"
MODULE_FIRMWARE(FIRMWARE_DCN_36_DMUB);

#define FIRMWARE_DCN_401_DMUB "amdgpu/dcn_4_0_1_dmcub.bin"
MODULE_FIRMWARE(FIRMWARE_DCN_401_DMUB);

/* Number of bytes in PSP header for firmware. */
#define PSP_HEADER_BYTES 0x100

/* Number of bytes in PSP footer for firmware. */
#define PSP_FOOTER_BYTES 0x100

/**
 * DOC: overview
 *
 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
 * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
 * requests into DC requests, and DC responses into DRM responses.
 *
 * The root control structure is &struct amdgpu_display_manager.
 */


/* basic init/fini API */
static int amdgpu_dm_init(struct amdgpu_device *adev);
static void amdgpu_dm_fini(struct amdgpu_device *adev);
static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
static void reset_freesync_config_for_crtc(struct dm_crtc_state *new_crtc_state);
static struct amdgpu_i2c_adapter *
create_i2c(struct ddc_service *ddc_service, bool oem);

static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
{
 switch (link->dpcd_caps.dongle_type) {
 case DISPLAY_DONGLE_NONE:
  return DRM_MODE_SUBCONNECTOR_Native;
 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
  return DRM_MODE_SUBCONNECTOR_VGA;
 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
 case DISPLAY_DONGLE_DP_DVI_DONGLE:
  return DRM_MODE_SUBCONNECTOR_DVID;
 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
  return DRM_MODE_SUBCONNECTOR_HDMIA;
 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
 default:
  return DRM_MODE_SUBCONNECTOR_Unknown;
 }
}

static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
{
 struct dc_link *link = aconnector->dc_link;
 struct drm_connector *connector = &aconnector->base;
 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;

 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
  return;

 if (aconnector->dc_sink)
  subconnector = get_subconnector_type(link);

 drm_object_property_set_value(&connector->base,
   connector->dev->mode_config.dp_subconnector_property,
   subconnector);
}

/*
 * initializes drm_device display related structures, based on the information
 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
 * drm_encoder, drm_mode_config
 *
 * Returns 0 on success
 */

static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
/* removes and deallocates the drm structures, created by the above function */
static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);

static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
        struct amdgpu_dm_connector *amdgpu_dm_connector,
        u32 link_index,
        struct amdgpu_encoder *amdgpu_encoder);
static int amdgpu_dm_encoder_init(struct drm_device *dev,
      struct amdgpu_encoder *aencoder,
      uint32_t link_index);

static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);

static int amdgpu_dm_atomic_setup_commit(struct drm_atomic_state *state);
static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);

static int amdgpu_dm_atomic_check(struct drm_device *dev,
      struct drm_atomic_state *state);

static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
static void handle_hpd_rx_irq(void *param);

static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
      int bl_idx,
      u32 user_brightness);

static bool
is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
     struct drm_crtc_state *new_crtc_state);
/*
 * dm_vblank_get_counter
 *
 * @brief
 * Get counter for number of vertical blanks
 *
 * @param
 * struct amdgpu_device *adev - [in] desired amdgpu device
 * int disp_idx - [in] which CRTC to get the counter from
 *
 * @return
 * Counter for vertical blanks
 */

static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
{
 struct amdgpu_crtc *acrtc = NULL;

 if (crtc >= adev->mode_info.num_crtc)
  return 0;

 acrtc = adev->mode_info.crtcs[crtc];

 if (!acrtc->dm_irq_params.stream) {
  drm_err(adev_to_drm(adev), "dc_stream_state is NULL for crtc '%d'!\n",
     crtc);
  return 0;
 }

 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
}

static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
      u32 *vbl, u32 *position)
{
 u32 v_blank_start = 0, v_blank_end = 0, h_position = 0, v_position = 0;
 struct amdgpu_crtc *acrtc = NULL;
 struct dc *dc = adev->dm.dc;

 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
  return -EINVAL;

 acrtc = adev->mode_info.crtcs[crtc];

 if (!acrtc->dm_irq_params.stream) {
  drm_err(adev_to_drm(adev), "dc_stream_state is NULL for crtc '%d'!\n",
     crtc);
  return 0;
 }

 if (dc && dc->caps.ips_support && dc->idle_optimizations_allowed)
  dc_allow_idle_optimizations(dc, false);

 /*
 * TODO rework base driver to use values directly.
 * for now parse it back into reg-format
 */

 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
     &v_blank_start,
     &v_blank_end,
     &h_position,
     &v_position);

 *position = v_position | (h_position << 16);
 *vbl = v_blank_start | (v_blank_end << 16);

 return 0;
}

static bool dm_is_idle(struct amdgpu_ip_block *ip_block)
{
 /* XXX todo */
 return true;
}

static int dm_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
 /* XXX todo */
 return 0;
}

static bool dm_check_soft_reset(struct amdgpu_ip_block *ip_block)
{
 return false;
}

static int dm_soft_reset(struct amdgpu_ip_block *ip_block)
{
 /* XXX todo */
 return 0;
}

static struct amdgpu_crtc *
get_crtc_by_otg_inst(struct amdgpu_device *adev,
       int otg_inst)
{
 struct drm_device *dev = adev_to_drm(adev);
 struct drm_crtc *crtc;
 struct amdgpu_crtc *amdgpu_crtc;

 if (WARN_ON(otg_inst == -1))
  return adev->mode_info.crtcs[0];

 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
  amdgpu_crtc = to_amdgpu_crtc(crtc);

  if (amdgpu_crtc->otg_inst == otg_inst)
   return amdgpu_crtc;
 }

 return NULL;
}

static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
           struct dm_crtc_state *new_state)
{
 if (new_state->stream->adjust.timing_adjust_pending)
  return true;
 if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
  return true;
 else if (amdgpu_dm_crtc_vrr_active(old_state) != amdgpu_dm_crtc_vrr_active(new_state))
  return true;
 else
  return false;
}

/*
 * DC will program planes with their z-order determined by their ordering
 * in the dc_surface_updates array. This comparator is used to sort them
 * by descending zpos.
 */

static int dm_plane_layer_index_cmp(const void *a, const void *b)
{
 const struct dc_surface_update *sa = (struct dc_surface_update *)a;
 const struct dc_surface_update *sb = (struct dc_surface_update *)b;

 /* Sort by descending dc_plane layer_index (i.e. normalized_zpos) */
 return sb->surface->layer_index - sa->surface->layer_index;
}

/**
 * update_planes_and_stream_adapter() - Send planes to be updated in DC
 *
 * DC has a generic way to update planes and stream via
 * dc_update_planes_and_stream function; however, DM might need some
 * adjustments and preparation before calling it. This function is a wrapper
 * for the dc_update_planes_and_stream that does any required configuration
 * before passing control to DC.
 *
 * @dc: Display Core control structure
 * @update_type: specify whether it is FULL/MEDIUM/FAST update
 * @planes_count: planes count to update
 * @stream: stream state
 * @stream_update: stream update
 * @array_of_surface_update: dc surface update pointer
 *
 */

static inline bool update_planes_and_stream_adapter(struct dc *dc,
          int update_type,
          int planes_count,
          struct dc_stream_state *stream,
          struct dc_stream_update *stream_update,
          struct dc_surface_update *array_of_surface_update)
{
 sort(array_of_surface_update, planes_count,
      sizeof(*array_of_surface_update), dm_plane_layer_index_cmp, NULL);

 /*
 * Previous frame finished and HW is ready for optimization.
 */

 dc_post_update_surfaces_to_stream(dc);

 return dc_update_planes_and_stream(dc,
        array_of_surface_update,
        planes_count,
        stream,
        stream_update);
}

/**
 * dm_pflip_high_irq() - Handle pageflip interrupt
 * @interrupt_params: ignored
 *
 * Handles the pageflip interrupt by notifying all interested parties
 * that the pageflip has been completed.
 */

static void dm_pflip_high_irq(void *interrupt_params)
{
 struct amdgpu_crtc *amdgpu_crtc;
 struct common_irq_params *irq_params = interrupt_params;
 struct amdgpu_device *adev = irq_params->adev;
 struct drm_device *dev = adev_to_drm(adev);
 unsigned long flags;
 struct drm_pending_vblank_event *e;
 u32 vpos, hpos, v_blank_start, v_blank_end;
 bool vrr_active;

 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);

 /* IRQ could occur when in initial stage */
 /* TODO work and BO cleanup */
 if (amdgpu_crtc == NULL) {
  drm_dbg_state(dev, "CRTC is null, returning.\n");
  return;
 }

 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);

 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
  drm_dbg_state(dev,
         "amdgpu_crtc->pflip_status = %d != AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p]\n",
         amdgpu_crtc->pflip_status, AMDGPU_FLIP_SUBMITTED,
         amdgpu_crtc->crtc_id, amdgpu_crtc);
  spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
  return;
 }

 /* page flip completed. */
 e = amdgpu_crtc->event;
 amdgpu_crtc->event = NULL;

 WARN_ON(!e);

 vrr_active = amdgpu_dm_crtc_vrr_active_irq(amdgpu_crtc);

 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
 if (!vrr_active ||
     !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
          &v_blank_end, &hpos, &vpos) ||
     (vpos < v_blank_start)) {
  /* Update to correct count and vblank timestamp if racing with
 * vblank irq. This also updates to the correct vblank timestamp
 * even in VRR mode, as scanout is past the front-porch atm.
 */

  drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);

  /* Wake up userspace by sending the pageflip event with proper
 * count and timestamp of vblank of flip completion.
 */

  if (e) {
   drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);

   /* Event sent, so done with vblank for this flip */
   drm_crtc_vblank_put(&amdgpu_crtc->base);
  }
 } else if (e) {
  /* VRR active and inside front-porch: vblank count and
 * timestamp for pageflip event will only be up to date after
 * drm_crtc_handle_vblank() has been executed from late vblank
 * irq handler after start of back-porch (vline 0). We queue the
 * pageflip event for send-out by drm_crtc_handle_vblank() with
 * updated timestamp and count, once it runs after us.
 *
 * We need to open-code this instead of using the helper
 * drm_crtc_arm_vblank_event(), as that helper would
 * call drm_crtc_accurate_vblank_count(), which we must
 * not call in VRR mode while we are in front-porch!
 */


  /* sequence will be replaced by real count during send-out. */
  e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
  e->pipe = amdgpu_crtc->crtc_id;

  list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
  e = NULL;
 }

 /* Keep track of vblank of this flip for flip throttling. We use the
 * cooked hw counter, as that one incremented at start of this vblank
 * of pageflip completion, so last_flip_vblank is the forbidden count
 * for queueing new pageflips if vsync + VRR is enabled.
 */

 amdgpu_crtc->dm_irq_params.last_flip_vblank =
  amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);

 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);

 drm_dbg_state(dev,
        "crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
        amdgpu_crtc->crtc_id, amdgpu_crtc, vrr_active, (int)!e);
}

static void dm_handle_vmin_vmax_update(struct work_struct *offload_work)
{
 struct vupdate_offload_work *work = container_of(offload_work, struct vupdate_offload_work, work);
 struct amdgpu_device *adev = work->adev;
 struct dc_stream_state *stream = work->stream;
 struct dc_crtc_timing_adjust *adjust = work->adjust;

 mutex_lock(&adev->dm.dc_lock);
 dc_stream_adjust_vmin_vmax(adev->dm.dc, stream, adjust);
 mutex_unlock(&adev->dm.dc_lock);

 dc_stream_release(stream);
 kfree(work->adjust);
 kfree(work);
}

static void schedule_dc_vmin_vmax(struct amdgpu_device *adev,
 struct dc_stream_state *stream,
 struct dc_crtc_timing_adjust *adjust)
{
 struct vupdate_offload_work *offload_work = kzalloc(sizeof(*offload_work), GFP_NOWAIT);
 if (!offload_work) {
  drm_dbg_driver(adev_to_drm(adev), "Failed to allocate vupdate_offload_work\n");
  return;
 }

 struct dc_crtc_timing_adjust *adjust_copy = kzalloc(sizeof(*adjust_copy), GFP_NOWAIT);
 if (!adjust_copy) {
  drm_dbg_driver(adev_to_drm(adev), "Failed to allocate adjust_copy\n");
  kfree(offload_work);
  return;
 }

 dc_stream_retain(stream);
 memcpy(adjust_copy, adjust, sizeof(*adjust_copy));

 INIT_WORK(&offload_work->work, dm_handle_vmin_vmax_update);
 offload_work->adev = adev;
 offload_work->stream = stream;
 offload_work->adjust = adjust_copy;

 queue_work(system_wq, &offload_work->work);
}

static void dm_vupdate_high_irq(void *interrupt_params)
{
 struct common_irq_params *irq_params = interrupt_params;
 struct amdgpu_device *adev = irq_params->adev;
 struct amdgpu_crtc *acrtc;
 struct drm_device *drm_dev;
 struct drm_vblank_crtc *vblank;
 ktime_t frame_duration_ns, previous_timestamp;
 unsigned long flags;
 int vrr_active;

 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);

 if (acrtc) {
  vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc);
  drm_dev = acrtc->base.dev;
  vblank = drm_crtc_vblank_crtc(&acrtc->base);
  previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
  frame_duration_ns = vblank->time - previous_timestamp;

  if (frame_duration_ns > 0) {
   trace_amdgpu_refresh_rate_track(acrtc->base.index,
      frame_duration_ns,
      ktime_divns(NSEC_PER_SEC, frame_duration_ns));
   atomic64_set(&irq_params->previous_timestamp, vblank->time);
  }

  drm_dbg_vbl(drm_dev,
       "crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
       vrr_active);

  /* Core vblank handling is done here after end of front-porch in
 * vrr mode, as vblank timestamping will give valid results
 * while now done after front-porch. This will also deliver
 * page-flip completion events that have been queued to us
 * if a pageflip happened inside front-porch.
 */

  if (vrr_active) {
   amdgpu_dm_crtc_handle_vblank(acrtc);

   /* BTR processing for pre-DCE12 ASICs */
   if (acrtc->dm_irq_params.stream &&
       adev->family < AMDGPU_FAMILY_AI) {
    spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
    mod_freesync_handle_v_update(
        adev->dm.freesync_module,
        acrtc->dm_irq_params.stream,
        &acrtc->dm_irq_params.vrr_params);

    schedule_dc_vmin_vmax(adev,
     acrtc->dm_irq_params.stream,
     &acrtc->dm_irq_params.vrr_params.adjust);
    spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
   }
  }
 }
}

/**
 * dm_crtc_high_irq() - Handles CRTC interrupt
 * @interrupt_params: used for determining the CRTC instance
 *
 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
 * event handler.
 */

static void dm_crtc_high_irq(void *interrupt_params)
{
 struct common_irq_params *irq_params = interrupt_params;
 struct amdgpu_device *adev = irq_params->adev;
 struct drm_writeback_job *job;
 struct amdgpu_crtc *acrtc;
 unsigned long flags;
 int vrr_active;

 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
 if (!acrtc)
  return;

 if (acrtc->wb_conn) {
  spin_lock_irqsave(&acrtc->wb_conn->job_lock, flags);

  if (acrtc->wb_pending) {
   job = list_first_entry_or_null(&acrtc->wb_conn->job_queue,
             struct drm_writeback_job,
             list_entry);
   acrtc->wb_pending = false;
   spin_unlock_irqrestore(&acrtc->wb_conn->job_lock, flags);

   if (job) {
    unsigned int v_total, refresh_hz;
    struct dc_stream_state *stream = acrtc->dm_irq_params.stream;

    v_total = stream->adjust.v_total_max ?
       stream->adjust.v_total_max : stream->timing.v_total;
    refresh_hz = div_u64((uint64_t) stream->timing.pix_clk_100hz *
          100LL, (v_total * stream->timing.h_total));
    mdelay(1000 / refresh_hz);

    drm_writeback_signal_completion(acrtc->wb_conn, 0);
    dc_stream_fc_disable_writeback(adev->dm.dc,
              acrtc->dm_irq_params.stream, 0);
   }
  } else
   spin_unlock_irqrestore(&acrtc->wb_conn->job_lock, flags);
 }

 vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc);

 drm_dbg_vbl(adev_to_drm(adev),
      "crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
      vrr_active, acrtc->dm_irq_params.active_planes);

 /**
 * Core vblank handling at start of front-porch is only possible
 * in non-vrr mode, as only there vblank timestamping will give
 * valid results while done in front-porch. Otherwise defer it
 * to dm_vupdate_high_irq after end of front-porch.
 */

 if (!vrr_active)
  amdgpu_dm_crtc_handle_vblank(acrtc);

 /**
 * Following stuff must happen at start of vblank, for crc
 * computation and below-the-range btr support in vrr mode.
 */

 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);

 /* BTR updates need to happen before VUPDATE on Vega and above. */
 if (adev->family < AMDGPU_FAMILY_AI)
  return;

 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);

 if (acrtc->dm_irq_params.stream &&
     acrtc->dm_irq_params.vrr_params.supported &&
     acrtc->dm_irq_params.freesync_config.state ==
      VRR_STATE_ACTIVE_VARIABLE) {
  mod_freesync_handle_v_update(adev->dm.freesync_module,
          acrtc->dm_irq_params.stream,
          &acrtc->dm_irq_params.vrr_params);

  schedule_dc_vmin_vmax(adev, acrtc->dm_irq_params.stream,
    &acrtc->dm_irq_params.vrr_params.adjust);
 }

 /*
 * If there aren't any active_planes then DCH HUBP may be clock-gated.
 * In that case, pageflip completion interrupts won't fire and pageflip
 * completion events won't get delivered. Prevent this by sending
 * pending pageflip events from here if a flip is still pending.
 *
 * If any planes are enabled, use dm_pflip_high_irq() instead, to
 * avoid race conditions between flip programming and completion,
 * which could cause too early flip completion events.
 */

 if (adev->family >= AMDGPU_FAMILY_RV &&
     acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
     acrtc->dm_irq_params.active_planes == 0) {
  if (acrtc->event) {
   drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
   acrtc->event = NULL;
   drm_crtc_vblank_put(&acrtc->base);
  }
  acrtc->pflip_status = AMDGPU_FLIP_NONE;
 }

 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
}

#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
/**
 * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
 * DCN generation ASICs
 * @interrupt_params: interrupt parameters
 *
 * Used to set crc window/read out crc value at vertical line 0 position
 */

static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
{
 struct common_irq_params *irq_params = interrupt_params;
 struct amdgpu_device *adev = irq_params->adev;
 struct amdgpu_crtc *acrtc;

 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);

 if (!acrtc)
  return;

 amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
}
#endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */

/**
 * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command.
 * @adev: amdgpu_device pointer
 * @notify: dmub notification structure
 *
 * Dmub AUX or SET_CONFIG command completion processing callback
 * Copies dmub notification to DM which is to be read by AUX command.
 * issuing thread and also signals the event to wake up the thread.
 */

static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
     struct dmub_notification *notify)
{
 if (adev->dm.dmub_notify)
  memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
 if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
  complete(&adev->dm.dmub_aux_transfer_done);
}

static void dmub_aux_fused_io_callback(struct amdgpu_device *adev,
     struct dmub_notification *notify)
{
 if (!adev || !notify) {
  ASSERT(false);
  return;
 }

 const struct dmub_cmd_fused_request *req = ¬ify->fused_request;
 const uint8_t ddc_line = req->u.aux.ddc_line;

 if (ddc_line >= ARRAY_SIZE(adev->dm.fused_io)) {
  ASSERT(false);
  return;
 }

 struct fused_io_sync *sync = &adev->dm.fused_io[ddc_line];

 static_assert(sizeof(*req) <= sizeof(sync->reply_data), "Size mismatch");
 memcpy(sync->reply_data, req, sizeof(*req));
 complete(&sync->replied);
}

/**
 * dmub_hpd_callback - DMUB HPD interrupt processing callback.
 * @adev: amdgpu_device pointer
 * @notify: dmub notification structure
 *
 * Dmub Hpd interrupt processing callback. Gets displayindex through the
 * ink index and calls helper to do the processing.
 */

static void dmub_hpd_callback(struct amdgpu_device *adev,
         struct dmub_notification *notify)
{
 struct amdgpu_dm_connector *aconnector;
 struct amdgpu_dm_connector *hpd_aconnector = NULL;
 struct drm_connector *connector;
 struct drm_connector_list_iter iter;
 struct dc_link *link;
 u8 link_index = 0;
 struct drm_device *dev;

 if (adev == NULL)
  return;

 if (notify == NULL) {
  drm_err(adev_to_drm(adev), "DMUB HPD callback notification was NULL");
  return;
 }

 if (notify->link_index > adev->dm.dc->link_count) {
  drm_err(adev_to_drm(adev), "DMUB HPD index (%u)is abnormal", notify->link_index);
  return;
 }

 /* Skip DMUB HPD IRQ in suspend/resume. We will probe them later. */
 if (notify->type == DMUB_NOTIFICATION_HPD && adev->in_suspend) {
  drm_info(adev_to_drm(adev), "Skip DMUB HPD IRQ callback in suspend/resume\n");
  return;
 }

 link_index = notify->link_index;
 link = adev->dm.dc->links[link_index];
 dev = adev->dm.ddev;

 drm_connector_list_iter_begin(dev, &iter);
 drm_for_each_connector_iter(connector, &iter) {

  if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
   continue;

  aconnector = to_amdgpu_dm_connector(connector);
  if (link && aconnector->dc_link == link) {
   if (notify->type == DMUB_NOTIFICATION_HPD)
    drm_info(adev_to_drm(adev), "DMUB HPD IRQ callback: link_index=%u\n", link_index);
   else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
    drm_info(adev_to_drm(adev), "DMUB HPD RX IRQ callback: link_index=%u\n", link_index);
   else
    drm_warn(adev_to_drm(adev), "DMUB Unknown HPD callback type %d, link_index=%u\n",
      notify->type, link_index);

   hpd_aconnector = aconnector;
   break;
  }
 }
 drm_connector_list_iter_end(&iter);

 if (hpd_aconnector) {
  if (notify->type == DMUB_NOTIFICATION_HPD) {
   if (hpd_aconnector->dc_link->hpd_status == (notify->hpd_status == DP_HPD_PLUG))
    drm_warn(adev_to_drm(adev), "DMUB reported hpd status unchanged. link_index=%u\n", link_index);
   handle_hpd_irq_helper(hpd_aconnector);
  } else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ) {
   handle_hpd_rx_irq(hpd_aconnector);
  }
 }
}

/**
 * dmub_hpd_sense_callback - DMUB HPD sense processing callback.
 * @adev: amdgpu_device pointer
 * @notify: dmub notification structure
 *
 * HPD sense changes can occur during low power states and need to be
 * notified from firmware to driver.
 */

static void dmub_hpd_sense_callback(struct amdgpu_device *adev,
         struct dmub_notification *notify)
{
 drm_dbg_driver(adev_to_drm(adev), "DMUB HPD SENSE callback.\n");
}

/**
 * register_dmub_notify_callback - Sets callback for DMUB notify
 * @adev: amdgpu_device pointer
 * @type: Type of dmub notification
 * @callback: Dmub interrupt callback function
 * @dmub_int_thread_offload: offload indicator
 *
 * API to register a dmub callback handler for a dmub notification
 * Also sets indicator whether callback processing to be offloaded.
 * to dmub interrupt handling thread
 * Return: true if successfully registered, false if there is existing registration
 */

static bool register_dmub_notify_callback(struct amdgpu_device *adev,
       enum dmub_notification_type type,
       dmub_notify_interrupt_callback_t callback,
       bool dmub_int_thread_offload)
{
 if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
  adev->dm.dmub_callback[type] = callback;
  adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
 } else
  return false;

 return true;
}

static void dm_handle_hpd_work(struct work_struct *work)
{
 struct dmub_hpd_work *dmub_hpd_wrk;

 dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);

 if (!dmub_hpd_wrk->dmub_notify) {
  drm_err(adev_to_drm(dmub_hpd_wrk->adev), "dmub_hpd_wrk dmub_notify is NULL");
  return;
 }

 if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
  dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
  dmub_hpd_wrk->dmub_notify);
 }

 kfree(dmub_hpd_wrk->dmub_notify);
 kfree(dmub_hpd_wrk);

}

static const char *dmub_notification_type_str(enum dmub_notification_type e)
{
 switch (e) {
 case DMUB_NOTIFICATION_NO_DATA:
  return "NO_DATA";
 case DMUB_NOTIFICATION_AUX_REPLY:
  return "AUX_REPLY";
 case DMUB_NOTIFICATION_HPD:
  return "HPD";
 case DMUB_NOTIFICATION_HPD_IRQ:
  return "HPD_IRQ";
 case DMUB_NOTIFICATION_SET_CONFIG_REPLY:
  return "SET_CONFIG_REPLY";
 case DMUB_NOTIFICATION_DPIA_NOTIFICATION:
  return "DPIA_NOTIFICATION";
 case DMUB_NOTIFICATION_HPD_SENSE_NOTIFY:
  return "HPD_SENSE_NOTIFY";
 case DMUB_NOTIFICATION_FUSED_IO:
  return "FUSED_IO";
 default:
  return "";
 }
}

#define DMUB_TRACE_MAX_READ 64
/**
 * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
 * @interrupt_params: used for determining the Outbox instance
 *
 * Handles the Outbox Interrupt
 * event handler.
 */

static void dm_dmub_outbox1_low_irq(void *interrupt_params)
{
 struct dmub_notification notify = {0};
 struct common_irq_params *irq_params = interrupt_params;
 struct amdgpu_device *adev = irq_params->adev;
 struct amdgpu_display_manager *dm = &adev->dm;
 struct dmcub_trace_buf_entry entry = { 0 };
 u32 count = 0;
 struct dmub_hpd_work *dmub_hpd_wrk;

 do {
  if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
   trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
       entry.param0, entry.param1);

   drm_dbg_driver(adev_to_drm(adev), "trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
     entry.trace_code, entry.tick_count, entry.param0, entry.param1);
  } else
   break;

  count++;

 } while (count <= DMUB_TRACE_MAX_READ);

 if (count > DMUB_TRACE_MAX_READ)
  drm_dbg_driver(adev_to_drm(adev), "Warning : count > DMUB_TRACE_MAX_READ");

 if (dc_enable_dmub_notifications(adev->dm.dc) &&
  irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {

  do {
   dc_stat_get_dmub_notification(adev->dm.dc, ¬ify);
   if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) {
    drm_err(adev_to_drm(adev), "DM: notify type %d invalid!", notify.type);
    continue;
   }
   if (!dm->dmub_callback[notify.type]) {
    drm_warn(adev_to_drm(adev), "DMUB notification skipped due to no handler: type=%s\n",
     dmub_notification_type_str(notify.type));
    continue;
   }
   if (dm->dmub_thread_offload[notify.type] == true) {
    dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
    if (!dmub_hpd_wrk) {
     drm_err(adev_to_drm(adev), "Failed to allocate dmub_hpd_wrk");
     return;
    }
    dmub_hpd_wrk->dmub_notify = kmemdup(¬ify, sizeof(struct dmub_notification),
            GFP_ATOMIC);
    if (!dmub_hpd_wrk->dmub_notify) {
     kfree(dmub_hpd_wrk);
     drm_err(adev_to_drm(adev), "Failed to allocate dmub_hpd_wrk->dmub_notify");
     return;
    }
    INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
    dmub_hpd_wrk->adev = adev;
    queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
   } else {
    dm->dmub_callback[notify.type](adev, ¬ify);
   }
  } while (notify.pending_notification);
 }
}

static int dm_set_clockgating_state(struct amdgpu_ip_block *ip_block,
    enum amd_clockgating_state state)
{
 return 0;
}

static int dm_set_powergating_state(struct amdgpu_ip_block *ip_block,
    enum amd_powergating_state state)
{
 return 0;
}

/* Prototypes of private functions */
static int dm_early_init(struct amdgpu_ip_block *ip_block);

/* Allocate memory for FBC compressed data  */
static void amdgpu_dm_fbc_init(struct drm_connector *connector)
{
 struct amdgpu_device *adev = drm_to_adev(connector->dev);
 struct dm_compressor_info *compressor = &adev->dm.compressor;
 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
 struct drm_display_mode *mode;
 unsigned long max_size = 0;

 if (adev->dm.dc->fbc_compressor == NULL)
  return;

 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
  return;

 if (compressor->bo_ptr)
  return;


 list_for_each_entry(mode, &connector->modes, head) {
  if (max_size < (unsigned long) mode->htotal * mode->vtotal)
   max_size = (unsigned long) mode->htotal * mode->vtotal;
 }

 if (max_size) {
  int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
       AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
       &compressor->gpu_addr, &compressor->cpu_addr);

  if (r)
   drm_err(adev_to_drm(adev), "DM: Failed to initialize FBC\n");
  else {
   adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
   drm_info(adev_to_drm(adev), "DM: FBC alloc %lu\n", max_size*4);
  }

 }

}

static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
       int pipe, bool *enabled,
       unsigned char *buf, int max_bytes)
{
 struct drm_device *dev = dev_get_drvdata(kdev);
 struct amdgpu_device *adev = drm_to_adev(dev);
 struct drm_connector *connector;
 struct drm_connector_list_iter conn_iter;
 struct amdgpu_dm_connector *aconnector;
 int ret = 0;

 *enabled = false;

 mutex_lock(&adev->dm.audio_lock);

 drm_connector_list_iter_begin(dev, &conn_iter);
 drm_for_each_connector_iter(connector, &conn_iter) {

  if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
   continue;

  aconnector = to_amdgpu_dm_connector(connector);
  if (aconnector->audio_inst != port)
   continue;

  *enabled = true;
  mutex_lock(&connector->eld_mutex);
  ret = drm_eld_size(connector->eld);
  memcpy(buf, connector->eld, min(max_bytes, ret));
  mutex_unlock(&connector->eld_mutex);

  break;
 }
 drm_connector_list_iter_end(&conn_iter);

 mutex_unlock(&adev->dm.audio_lock);

 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);

 return ret;
}

static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
 .get_eld = amdgpu_dm_audio_component_get_eld,
};

static int amdgpu_dm_audio_component_bind(struct device *kdev,
           struct device *hda_kdev, void *data)
{
 struct drm_device *dev = dev_get_drvdata(kdev);
 struct amdgpu_device *adev = drm_to_adev(dev);
 struct drm_audio_component *acomp = data;

 acomp->ops = &amdgpu_dm_audio_component_ops;
 acomp->dev = kdev;
 adev->dm.audio_component = acomp;

 return 0;
}

static void amdgpu_dm_audio_component_unbind(struct device *kdev,
       struct device *hda_kdev, void *data)
{
 struct amdgpu_device *adev = drm_to_adev(dev_get_drvdata(kdev));
 struct drm_audio_component *acomp = data;

 acomp->ops = NULL;
 acomp->dev = NULL;
 adev->dm.audio_component = NULL;
}

static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
 .bind = amdgpu_dm_audio_component_bind,
 .unbind = amdgpu_dm_audio_component_unbind,
};

static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
{
 int i, ret;

 if (!amdgpu_audio)
  return 0;

 adev->mode_info.audio.enabled = true;

 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;

 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
  adev->mode_info.audio.pin[i].channels = -1;
  adev->mode_info.audio.pin[i].rate = -1;
  adev->mode_info.audio.pin[i].bits_per_sample = -1;
  adev->mode_info.audio.pin[i].status_bits = 0;
  adev->mode_info.audio.pin[i].category_code = 0;
  adev->mode_info.audio.pin[i].connected = false;
  adev->mode_info.audio.pin[i].id =
   adev->dm.dc->res_pool->audios[i]->inst;
  adev->mode_info.audio.pin[i].offset = 0;
 }

 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
 if (ret < 0)
  return ret;

 adev->dm.audio_registered = true;

 return 0;
}

static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
{
 if (!amdgpu_audio)
  return;

 if (!adev->mode_info.audio.enabled)
  return;

 if (adev->dm.audio_registered) {
  component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
  adev->dm.audio_registered = false;
 }

 /* TODO: Disable audio? */

 adev->mode_info.audio.enabled = false;
}

static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
{
 struct drm_audio_component *acomp = adev->dm.audio_component;

 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
  DRM_DEBUG_KMS("Notify ELD: %d\n", pin);

  acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
       pin, -1);
 }
}

static int dm_dmub_hw_init(struct amdgpu_device *adev)
{
 const struct dmcub_firmware_header_v1_0 *hdr;
 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
 const struct firmware *dmub_fw = adev->dm.dmub_fw;
 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
 struct abm *abm = adev->dm.dc->res_pool->abm;
 struct dc_context *ctx = adev->dm.dc->ctx;
 struct dmub_srv_hw_params hw_params;
 enum dmub_status status;
 const unsigned char *fw_inst_const, *fw_bss_data;
 u32 i, fw_inst_const_size, fw_bss_data_size;
 bool has_hw_support;

 if (!dmub_srv)
  /* DMUB isn't supported on the ASIC. */
  return 0;

 if (!fb_info) {
  drm_err(adev_to_drm(adev), "No framebuffer info for DMUB service.\n");
  return -EINVAL;
 }

 if (!dmub_fw) {
  /* Firmware required for DMUB support. */
  drm_err(adev_to_drm(adev), "No firmware provided for DMUB.\n");
  return -EINVAL;
 }

 /* initialize register offsets for ASICs with runtime initialization available */
 if (dmub_srv->hw_funcs.init_reg_offsets)
  dmub_srv->hw_funcs.init_reg_offsets(dmub_srv, ctx);

 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
 if (status != DMUB_STATUS_OK) {
  drm_err(adev_to_drm(adev), "Error checking HW support for DMUB: %d\n", status);
  return -EINVAL;
 }

 if (!has_hw_support) {
  drm_info(adev_to_drm(adev), "DMUB unsupported on ASIC\n");
  return 0;
 }

 /* Reset DMCUB if it was previously running - before we overwrite its memory. */
 status = dmub_srv_hw_reset(dmub_srv);
 if (status != DMUB_STATUS_OK)
  drm_warn(adev_to_drm(adev), "Error resetting DMUB HW: %d\n", status);

 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;

 fw_inst_const = dmub_fw->data +
   le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
   PSP_HEADER_BYTES;

 fw_bss_data = dmub_fw->data +
        le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
        le32_to_cpu(hdr->inst_const_bytes);

 /* Copy firmware and bios info into FB memory. */
 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
        PSP_HEADER_BYTES - PSP_FOOTER_BYTES;

 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);

 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
 * amdgpu_ucode_init_single_fw will load dmub firmware
 * fw_inst_const part to cw0; otherwise, the firmware back door load
 * will be done by dm_dmub_hw_init
 */

 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
  memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
    fw_inst_const_size);
 }

 if (fw_bss_data_size)
  memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
         fw_bss_data, fw_bss_data_size);

 /* Copy firmware bios info into FB memory. */
 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
        adev->bios_size);

 /* Reset regions that need to be reset. */
 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);

 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
        fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);

 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
        fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);

 memset(fb_info->fb[DMUB_WINDOW_SHARED_STATE].cpu_addr, 0,
        fb_info->fb[DMUB_WINDOW_SHARED_STATE].size);

 /* Initialize hardware. */
 memset(&hw_params, 0, sizeof(hw_params));
 hw_params.fb_base = adev->gmc.fb_start;
 hw_params.fb_offset = adev->vm_manager.vram_base_offset;

 /* backdoor load firmware and trigger dmub running */
 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
  hw_params.load_inst_const = true;

 if (dmcu)
  hw_params.psp_version = dmcu->psp_version;

 for (i = 0; i < fb_info->num_fb; ++i)
  hw_params.fb[i] = &fb_info->fb[i];

 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
 case IP_VERSION(3, 1, 3):
 case IP_VERSION(3, 1, 4):
 case IP_VERSION(3, 5, 0):
 case IP_VERSION(3, 5, 1):
 case IP_VERSION(3, 6, 0):
 case IP_VERSION(4, 0, 1):
  hw_params.dpia_supported = true;
  hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
  break;
 default:
  break;
 }

 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
 case IP_VERSION(3, 5, 0):
 case IP_VERSION(3, 5, 1):
 case IP_VERSION(3, 6, 0):
  hw_params.ips_sequential_ono = adev->external_rev_id > 0x10;
  hw_params.lower_hbr3_phy_ssc = true;
  break;
 default:
  break;
 }

 status = dmub_srv_hw_init(dmub_srv, &hw_params);
 if (status != DMUB_STATUS_OK) {
  drm_err(adev_to_drm(adev), "Error initializing DMUB HW: %d\n", status);
  return -EINVAL;
 }

 /* Wait for firmware load to finish. */
 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
 if (status != DMUB_STATUS_OK)
  drm_warn(adev_to_drm(adev), "Wait for DMUB auto-load failed: %d\n", status);

 /* Init DMCU and ABM if available. */
 if (dmcu && abm) {
  dmcu->funcs->dmcu_init(dmcu);
  abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
 }

 if (!adev->dm.dc->ctx->dmub_srv)
  adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
 if (!adev->dm.dc->ctx->dmub_srv) {
  drm_err(adev_to_drm(adev), "Couldn't allocate DC DMUB server!\n");
  return -ENOMEM;
 }

 drm_info(adev_to_drm(adev), "DMUB hardware initialized: version=0x%08X\n",
   adev->dm.dmcub_fw_version);

 /* Keeping sanity checks off if
 * DCN31 >= 4.0.59.0
 * DCN314 >= 8.0.16.0
 * Otherwise, turn on sanity checks
 */

 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
 case IP_VERSION(3, 1, 2):
 case IP_VERSION(3, 1, 3):
  if (adev->dm.dmcub_fw_version &&
   adev->dm.dmcub_fw_version >= DMUB_FW_VERSION(4, 0, 0) &&
   adev->dm.dmcub_fw_version < DMUB_FW_VERSION(4, 0, 59))
    adev->dm.dc->debug.sanity_checks = true;
  break;
 case IP_VERSION(3, 1, 4):
  if (adev->dm.dmcub_fw_version &&
   adev->dm.dmcub_fw_version >= DMUB_FW_VERSION(4, 0, 0) &&
   adev->dm.dmcub_fw_version < DMUB_FW_VERSION(8, 0, 16))
    adev->dm.dc->debug.sanity_checks = true;
  break;
 default:
  break;
 }

 return 0;
}

static void dm_dmub_hw_resume(struct amdgpu_device *adev)
{
 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
 enum dmub_status status;
 bool init;
 int r;

 if (!dmub_srv) {
  /* DMUB isn't supported on the ASIC. */
  return;
 }

 status = dmub_srv_is_hw_init(dmub_srv, &init);
 if (status != DMUB_STATUS_OK)
  drm_warn(adev_to_drm(adev), "DMUB hardware init check failed: %d\n", status);

 if (status == DMUB_STATUS_OK && init) {
  /* Wait for firmware load to finish. */
  status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
  if (status != DMUB_STATUS_OK)
   drm_warn(adev_to_drm(adev), "Wait for DMUB auto-load failed: %d\n", status);
 } else {
  /* Perform the full hardware initialization. */
  r = dm_dmub_hw_init(adev);
  if (r)
   drm_err(adev_to_drm(adev), "DMUB interface failed to initialize: status=%d\n", r);
 }
}

static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
{
 u64 pt_base;
 u32 logical_addr_low;
 u32 logical_addr_high;
 u32 agp_base, agp_bot, agp_top;
 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;

 memset(pa_config, 0, sizeof(*pa_config));

 agp_base = 0;
 agp_bot = adev->gmc.agp_start >> 24;
 agp_top = adev->gmc.agp_end >> 24;

 /* AGP aperture is disabled */
 if (agp_bot > agp_top) {
  logical_addr_low = adev->gmc.fb_start >> 18;
  if (adev->apu_flags & (AMD_APU_IS_RAVEN2 |
           AMD_APU_IS_RENOIR |
           AMD_APU_IS_GREEN_SARDINE))
   /*
 * Raven2 has a HW issue that it is unable to use the vram which
 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
 * workaround that increase system aperture high address (add 1)
 * to get rid of the VM fault and hardware hang.
 */

   logical_addr_high = (adev->gmc.fb_end >> 18) + 0x1;
  else
   logical_addr_high = adev->gmc.fb_end >> 18;
 } else {
  logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
  if (adev->apu_flags & (AMD_APU_IS_RAVEN2 |
           AMD_APU_IS_RENOIR |
           AMD_APU_IS_GREEN_SARDINE))
   /*
 * Raven2 has a HW issue that it is unable to use the vram which
 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
 * workaround that increase system aperture high address (add 1)
 * to get rid of the VM fault and hardware hang.
 */

   logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
  else
   logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
 }

 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);

 page_table_start.high_part = upper_32_bits(adev->gmc.gart_start >>
         AMDGPU_GPU_PAGE_SHIFT);
 page_table_start.low_part = lower_32_bits(adev->gmc.gart_start >>
        AMDGPU_GPU_PAGE_SHIFT);
 page_table_end.high_part = upper_32_bits(adev->gmc.gart_end >>
       AMDGPU_GPU_PAGE_SHIFT);
 page_table_end.low_part = lower_32_bits(adev->gmc.gart_end >>
      AMDGPU_GPU_PAGE_SHIFT);
 page_table_base.high_part = upper_32_bits(pt_base);
 page_table_base.low_part = lower_32_bits(pt_base);

 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;

 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24;
 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;

 pa_config->system_aperture.fb_base = adev->gmc.fb_start;
 pa_config->system_aperture.fb_offset = adev->vm_manager.vram_base_offset;
 pa_config->system_aperture.fb_top = adev->gmc.fb_end;

 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;

 pa_config->is_hvm_enabled = adev->mode_info.gpu_vm_support;

}

static void force_connector_state(
 struct amdgpu_dm_connector *aconnector,
 enum drm_connector_force force_state)
{
 struct drm_connector *connector = &aconnector->base;

 mutex_lock(&connector->dev->mode_config.mutex);
 aconnector->base.force = force_state;
 mutex_unlock(&connector->dev->mode_config.mutex);

 mutex_lock(&aconnector->hpd_lock);
 drm_kms_helper_connector_hotplug_event(connector);
 mutex_unlock(&aconnector->hpd_lock);
}

static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
{
 struct hpd_rx_irq_offload_work *offload_work;
 struct amdgpu_dm_connector *aconnector;
 struct dc_link *dc_link;
 struct amdgpu_device *adev;
 enum dc_connection_type new_connection_type = dc_connection_none;
 unsigned long flags;
 union test_response test_response;

 memset(&test_response, 0, sizeof(test_response));

 offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
 aconnector = offload_work->offload_wq->aconnector;
 adev = offload_work->adev;

 if (!aconnector) {
  drm_err(adev_to_drm(adev), "Can't retrieve aconnector in hpd_rx_irq_offload_work");
  goto skip;
 }

 dc_link = aconnector->dc_link;

 mutex_lock(&aconnector->hpd_lock);
 if (!dc_link_detect_connection_type(dc_link, &new_connection_type))
  drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n");
 mutex_unlock(&aconnector->hpd_lock);

 if (new_connection_type == dc_connection_none)
  goto skip;

 if (amdgpu_in_reset(adev))
  goto skip;

 if (offload_work->data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
  offload_work->data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
  dm_handle_mst_sideband_msg_ready_event(&aconnector->mst_mgr, DOWN_OR_UP_MSG_RDY_EVENT);
  spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
  offload_work->offload_wq->is_handling_mst_msg_rdy_event = false;
  spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
  goto skip;
 }

 mutex_lock(&adev->dm.dc_lock);
 if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
  dc_link_dp_handle_automated_test(dc_link);

  if (aconnector->timing_changed) {
   /* force connector disconnect and reconnect */
   force_connector_state(aconnector, DRM_FORCE_OFF);
   msleep(100);
   force_connector_state(aconnector, DRM_FORCE_UNSPECIFIED);
  }

  test_response.bits.ACK = 1;

  core_link_write_dpcd(
  dc_link,
  DP_TEST_RESPONSE,
  &test_response.raw,
  sizeof(test_response));
 } else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
   dc_link_check_link_loss_status(dc_link, &offload_work->data) &&
   dc_link_dp_allow_hpd_rx_irq(dc_link)) {
  /* offload_work->data is from handle_hpd_rx_irq->
 * schedule_hpd_rx_offload_work.this is defer handle
 * for hpd short pulse. upon here, link status may be
 * changed, need get latest link status from dpcd
 * registers. if link status is good, skip run link
 * training again.
 */

  union hpd_irq_data irq_data;

  memset(&irq_data, 0, sizeof(irq_data));

  /* before dc_link_dp_handle_link_loss, allow new link lost handle
 * request be added to work queue if link lost at end of dc_link_
 * dp_handle_link_loss
 */

  spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
  offload_work->offload_wq->is_handling_link_loss = false;
  spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);

  if ((dc_link_dp_read_hpd_rx_irq_data(dc_link, &irq_data) == DC_OK) &&
   dc_link_check_link_loss_status(dc_link, &irq_data))
   dc_link_dp_handle_link_loss(dc_link);
 }
 mutex_unlock(&adev->dm.dc_lock);

skip:
 kfree(offload_work);

}

static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct amdgpu_device *adev)
{
 struct dc *dc = adev->dm.dc;
 int max_caps = dc->caps.max_links;
 int i = 0;
 struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;

 hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);

 if (!hpd_rx_offload_wq)
  return NULL;


 for (i = 0; i < max_caps; i++) {
  hpd_rx_offload_wq[i].wq =
        create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");

  if (hpd_rx_offload_wq[i].wq == NULL) {
   drm_err(adev_to_drm(adev), "create amdgpu_dm_hpd_rx_offload_wq fail!");
   goto out_err;
  }

  spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
 }

 return hpd_rx_offload_wq;

out_err:
 for (i = 0; i < max_caps; i++) {
  if (hpd_rx_offload_wq[i].wq)
   destroy_workqueue(hpd_rx_offload_wq[i].wq);
 }
 kfree(hpd_rx_offload_wq);
 return NULL;
}

struct amdgpu_stutter_quirk {
 u16 chip_vendor;
 u16 chip_device;
 u16 subsys_vendor;
 u16 subsys_device;
 u8 revision;
};

static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
 /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
 { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
 { 0, 0, 0, 0, 0 },
};

static bool dm_should_disable_stutter(struct pci_dev *pdev)
{
 const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;

 while (p && p->chip_device != 0) {
  if (pdev->vendor == p->chip_vendor &&
      pdev->device == p->chip_device &&
      pdev->subsystem_vendor == p->subsys_vendor &&
      pdev->subsystem_device == p->subsys_device &&
      pdev->revision == p->revision) {
   return true;
  }
  ++p;
 }
 return false;
}


void*
dm_allocate_gpu_mem(
  struct amdgpu_device *adev,
  enum dc_gpu_mem_alloc_type type,
  size_t size,
  long long *addr)
{
 struct dal_allocation *da;
 u32 domain = (type == DC_MEM_ALLOC_TYPE_GART) ?
  AMDGPU_GEM_DOMAIN_GTT : AMDGPU_GEM_DOMAIN_VRAM;
 int ret;

 da = kzalloc(sizeof(struct dal_allocation), GFP_KERNEL);
 if (!da)
  return NULL;

 ret = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
          domain, &da->bo,
          &da->gpu_addr, &da->cpu_ptr);

 *addr = da->gpu_addr;

 if (ret) {
  kfree(da);
  return NULL;
 }

 /* add da to list in dm */
 list_add(&da->list, &adev->dm.da_list);

 return da->cpu_ptr;
}

void
dm_free_gpu_mem(
  struct amdgpu_device *adev,
  enum dc_gpu_mem_alloc_type type,
  void *pvMem)
{
 struct dal_allocation *da;

 /* walk the da list in DM */
 list_for_each_entry(da, &adev->dm.da_list, list) {
  if (pvMem == da->cpu_ptr) {
   amdgpu_bo_free_kernel(&da->bo, &da->gpu_addr, &da->cpu_ptr);
   list_del(&da->list);
   kfree(da);
   break;
  }
 }

}

static enum dmub_status
dm_dmub_send_vbios_gpint_command(struct amdgpu_device *adev,
     enum dmub_gpint_command command_code,
     uint16_t param,
     uint32_t timeout_us)
{
 union dmub_gpint_data_register reg, test;
 uint32_t i;

 /* Assume that VBIOS DMUB is ready to take commands */

 reg.bits.status = 1;
 reg.bits.command_code = command_code;
 reg.bits.param = param;

 cgs_write_register(adev->dm.cgs_device, 0x34c0 + 0x01f8, reg.all);

 for (i = 0; i < timeout_us; ++i) {
  udelay(1);

  /* Check if our GPINT got acked */
  reg.bits.status = 0;
  test = (union dmub_gpint_data_register)
   cgs_read_register(adev->dm.cgs_device, 0x34c0 + 0x01f8);

  if (test.all == reg.all)
   return DMUB_STATUS_OK;
 }

 return DMUB_STATUS_TIMEOUT;
}

static void *dm_dmub_get_vbios_bounding_box(struct amdgpu_device *adev)
{
 void *bb;
 long long addr;
 unsigned int bb_size;
 int i = 0;
 uint16_t chunk;
 enum dmub_gpint_command send_addrs[] = {
  DMUB_GPINT__SET_BB_ADDR_WORD0,
  DMUB_GPINT__SET_BB_ADDR_WORD1,
  DMUB_GPINT__SET_BB_ADDR_WORD2,
  DMUB_GPINT__SET_BB_ADDR_WORD3,
 };
 enum dmub_status ret;

 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
 case IP_VERSION(4, 0, 1):
  bb_size = sizeof(struct dml2_soc_bb);
  break;
 default:
  return NULL;
 }

 bb =  dm_allocate_gpu_mem(adev,
      DC_MEM_ALLOC_TYPE_GART,
      bb_size,
      &addr);
 if (!bb)
  return NULL;

 for (i = 0; i < 4; i++) {
  /* Extract 16-bit chunk */
  chunk = ((uint64_t) addr >> (i * 16)) & 0xFFFF;
  /* Send the chunk */
  ret = dm_dmub_send_vbios_gpint_command(adev, send_addrs[i], chunk, 30000);
  if (ret != DMUB_STATUS_OK)
   goto free_bb;
 }

 /* Now ask DMUB to copy the bb */
 ret = dm_dmub_send_vbios_gpint_command(adev, DMUB_GPINT__BB_COPY, 1, 200000);
 if (ret != DMUB_STATUS_OK)
  goto free_bb;

 return bb;

free_bb:
 dm_free_gpu_mem(adev, DC_MEM_ALLOC_TYPE_GART, (void *) bb);
 return NULL;

}

static enum dmub_ips_disable_type dm_get_default_ips_mode(
 struct amdgpu_device *adev)
{
 enum dmub_ips_disable_type ret = DMUB_IPS_ENABLE;

 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
 case IP_VERSION(3, 5, 0):
 case IP_VERSION(3, 6, 0):
 case IP_VERSION(3, 5, 1):
  ret =  DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF;
  break;
 default:
  /* ASICs older than DCN35 do not have IPSs */
  if (amdgpu_ip_version(adev, DCE_HWIP, 0) < IP_VERSION(3, 5, 0))
   ret = DMUB_IPS_DISABLE_ALL;
  break;
 }

 return ret;
}

static int amdgpu_dm_init(struct amdgpu_device *adev)
{
 struct dc_init_data init_data;
 struct dc_callback_init init_params;
 int r;

 adev->dm.ddev = adev_to_drm(adev);
 adev->dm.adev = adev;

 /* Zero all the fields */
 memset(&init_data, 0, sizeof(init_data));
 memset(&init_params, 0, sizeof(init_params));

 mutex_init(&adev->dm.dpia_aux_lock);
 mutex_init(&adev->dm.dc_lock);
 mutex_init(&adev->dm.audio_lock);

 if (amdgpu_dm_irq_init(adev)) {
  drm_err(adev_to_drm(adev), "failed to initialize DM IRQ support.\n");
  goto error;
 }

 init_data.asic_id.chip_family = adev->family;

 init_data.asic_id.pci_revision_id = adev->pdev->revision;
 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
 init_data.asic_id.chip_id = adev->pdev->device;

 init_data.asic_id.vram_width = adev->gmc.vram_width;
 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
 init_data.asic_id.atombios_base_address =
  adev->mode_info.atom_context->bios;

 init_data.driver = adev;

 /* cgs_device was created in dm_sw_init() */
 init_data.cgs_device = adev->dm.cgs_device;

 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;

 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
 case IP_VERSION(2, 1, 0):
  switch (adev->dm.dmcub_fw_version) {
  case 0: /* development */
  case 0x1: /* linux-firmware.git hash 6d9f399 */
  case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
   init_data.flags.disable_dmcu = false;
   break;
  default:
   init_data.flags.disable_dmcu = true;
  }
  break;
 case IP_VERSION(2, 0, 3):
  init_data.flags.disable_dmcu = true;
  break;
 default:
  break;
 }

 /* APU support S/G display by default except:
 * ASICs before Carrizo,
 * RAVEN1 (Users reported stability issue)
 */


 if (adev->asic_type < CHIP_CARRIZO) {
  init_data.flags.gpu_vm_support = false;
 } else if (adev->asic_type == CHIP_RAVEN) {
  if (adev->apu_flags & AMD_APU_IS_RAVEN)
   init_data.flags.gpu_vm_support = false;
  else
   init_data.flags.gpu_vm_support = (amdgpu_sg_display != 0);
 } else {
  if (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(2, 0, 3))
   init_data.flags.gpu_vm_support = (amdgpu_sg_display == 1);
  else
   init_data.flags.gpu_vm_support =
    (amdgpu_sg_display != 0) && (adev->flags & AMD_IS_APU);
 }

 adev->mode_info.gpu_vm_support = init_data.flags.gpu_vm_support;

 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
  init_data.flags.fbc_support = true;

 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
  init_data.flags.multi_mon_pp_mclk_switch = true;

 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
  init_data.flags.disable_fractional_pwm = true;

 if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
  init_data.flags.edp_no_power_sequencing = true;

 if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
  init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
 if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
  init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;

 init_data.flags.seamless_boot_edp_requested = false;

 if (amdgpu_device_seamless_boot_supported(adev)) {
  init_data.flags.seamless_boot_edp_requested = true;
  init_data.flags.allow_seamless_boot_optimization = true;
  drm_dbg(adev->dm.ddev, "Seamless boot requested\n");
 }

 init_data.flags.enable_mipi_converter_optimization = true;

 init_data.dcn_reg_offsets = adev->reg_offset[DCE_HWIP][0];
 init_data.nbio_reg_offsets = adev->reg_offset[NBIO_HWIP][0];
 init_data.clk_reg_offsets = adev->reg_offset[CLK_HWIP][0];

 if (amdgpu_dc_debug_mask & DC_DISABLE_IPS)
  init_data.flags.disable_ips = DMUB_IPS_DISABLE_ALL;
 else if (amdgpu_dc_debug_mask & DC_DISABLE_IPS_DYNAMIC)
  init_data.flags.disable_ips = DMUB_IPS_DISABLE_DYNAMIC;
 else if (amdgpu_dc_debug_mask & DC_DISABLE_IPS2_DYNAMIC)
  init_data.flags.disable_ips = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF;
 else if (amdgpu_dc_debug_mask & DC_FORCE_IPS_ENABLE)
  init_data.flags.disable_ips = DMUB_IPS_ENABLE;
 else
  init_data.flags.disable_ips = dm_get_default_ips_mode(adev);

 init_data.flags.disable_ips_in_vpb = 0;

 /* DCN35 and above supports dynamic DTBCLK switch */
 if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 5, 0))
  init_data.flags.allow_0_dtb_clk = true;

 /* Enable DWB for tested platforms only */
 if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0))
  init_data.num_virtual_links = 1;

 retrieve_dmi_info(&adev->dm);
 if (adev->dm.edp0_on_dp1_quirk)
  init_data.flags.support_edp0_on_dp1 = true;

 if (adev->dm.bb_from_dmub)
  init_data.bb_from_dmub = adev->dm.bb_from_dmub;
 else
  init_data.bb_from_dmub = NULL;

 /* Display Core create. */
 adev->dm.dc = dc_create(&init_data);

 if (adev->dm.dc) {
  drm_info(adev_to_drm(adev), "Display Core v%s initialized on %s\n", DC_VER,
    dce_version_to_string(adev->dm.dc->ctx->dce_version));
 } else {
  drm_info(adev_to_drm(adev), "Display Core failed to initialize with v%s!\n", DC_VER);
  goto error;
 }

 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
  adev->dm.dc->debug.force_single_disp_pipe_split = false;
  adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
 }

 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
  adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
 if (dm_should_disable_stutter(adev->pdev))
  adev->dm.dc->debug.disable_stutter = true;

 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
  adev->dm.dc->debug.disable_stutter = true;

 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
  adev->dm.dc->debug.disable_dsc = true;

 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
  adev->dm.dc->debug.disable_clock_gate = true;

 if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH)
  adev->dm.dc->debug.force_subvp_mclk_switch = true;

 if (amdgpu_dc_debug_mask & DC_DISABLE_SUBVP_FAMS) {
  adev->dm.dc->debug.force_disable_subvp = true;
  adev->dm.dc->debug.fams2_config.bits.enable = false;
 }

 if (amdgpu_dc_debug_mask & DC_ENABLE_DML2) {
  adev->dm.dc->debug.using_dml2 = true;
  adev->dm.dc->debug.using_dml21 = true;
 }

 if (amdgpu_dc_debug_mask & DC_HDCP_LC_FORCE_FW_ENABLE)
  adev->dm.dc->debug.hdcp_lc_force_fw_enable = true;

 if (amdgpu_dc_debug_mask & DC_HDCP_LC_ENABLE_SW_FALLBACK)
  adev->dm.dc->debug.hdcp_lc_enable_sw_fallback = true;

 if (amdgpu_dc_debug_mask & DC_SKIP_DETECTION_LT)
  adev->dm.dc->debug.skip_detection_link_training = true;

 adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm;

 /* TODO: Remove after DP2 receiver gets proper support of Cable ID feature */
 adev->dm.dc->debug.ignore_cable_id = true;

 if (adev->dm.dc->caps.dp_hdmi21_pcon_support)
  drm_info(adev_to_drm(adev), "DP-HDMI FRL PCON supported\n");

 r = dm_dmub_hw_init(adev);
 if (r) {
  drm_err(adev_to_drm(adev), "DMUB interface failed to initialize: status=%d\n", r);
  goto error;
 }

 dc_hardware_init(adev->dm.dc);

 adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev);
 if (!adev->dm.hpd_rx_offload_wq) {
  drm_err(adev_to_drm(adev), "failed to create hpd rx offload workqueue.\n");
  goto error;
 }

 if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
  struct dc_phy_addr_space_config pa_config;

  mmhub_read_system_context(adev, &pa_config);

  // Call the DC init_memory func
  dc_setup_system_context(adev->dm.dc, &pa_config);
 }

 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
 if (!adev->dm.freesync_module) {
  drm_err(adev_to_drm(adev),
  "failed to initialize freesync_module.\n");
 } else
  drm_dbg_driver(adev_to_drm(adev), "amdgpu: freesync_module init done %p.\n",
    adev->dm.freesync_module);

 amdgpu_dm_init_color_mod();

 if (adev->dm.dc->caps.max_links > 0) {
  adev->dm.vblank_control_workqueue =
   create_singlethread_workqueue("dm_vblank_control_workqueue");
  if (!adev->dm.vblank_control_workqueue)
   drm_err(adev_to_drm(adev), "failed to initialize vblank_workqueue.\n");
 }

 if (adev->dm.dc->caps.ips_support &&
     adev->dm.dc->config.disable_ips != DMUB_IPS_DISABLE_ALL)
  adev->dm.idle_workqueue = idle_create_workqueue(adev);

 if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
  adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);

  if (!adev->dm.hdcp_workqueue)
   drm_err(adev_to_drm(adev), "failed to initialize hdcp_workqueue.\n");
  else
--> --------------------

--> maximum size reached

--> --------------------

Messung V0.5
C=94 H=97 G=95

¤ Dauer der Verarbeitung: 0.25 Sekunden  (vorverarbeitet)  ¤

*© Formatika GbR, Deutschland






Wurzel

Suchen

Beweissystem der NASA

Beweissystem Isabelle

NIST Cobol Testsuite

Cephes Mathematical Library

Wiener Entwicklungsmethode

Haftungshinweis

Die Informationen auf dieser Webseite wurden nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit, noch Qualität der bereit gestellten Informationen zugesichert.

Bemerkung:

Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.






                                                                                                                                                                                                                                                                                                                                                                                                     


Neuigkeiten

     Aktuelles
     Motto des Tages

Software

     Produkte
     Quellcodebibliothek

Aktivitäten

     Artikel über Sicherheit
     Anleitung zur Aktivierung von SSL

Muße

     Gedichte
     Musik
     Bilder

Jenseits des Üblichen ....

Besucherstatistik

Besucherstatistik

Monitoring

Montastic status badge