Quellcodebibliothek Statistik Leitseite products/Sources/formale Sprachen/C/Linux/drivers/gpu/drm/amd/amdgpu/   (Open Source Betriebssystem Version 6.17.9©)  Datei vom 24.10.2025 mit Größe 109 kB image not shown  

Quelle  dce_v6_0.c   Sprache: C

 
/*
 * Copyright 2015 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 */


#include <linux/pci.h>

#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_modeset_helper.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_vblank.h>

#include "amdgpu.h"
#include "amdgpu_pm.h"
#include "amdgpu_i2c.h"
#include "atom.h"
#include "amdgpu_atombios.h"
#include "atombios_crtc.h"
#include "atombios_encoders.h"
#include "amdgpu_pll.h"
#include "amdgpu_connectors.h"
#include "amdgpu_display.h"

#include "dce_v6_0.h"
#include "sid.h"

#include "bif/bif_3_0_d.h"
#include "bif/bif_3_0_sh_mask.h"

#include "oss/oss_1_0_d.h"
#include "oss/oss_1_0_sh_mask.h"

#include "gca/gfx_6_0_d.h"
#include "gca/gfx_6_0_sh_mask.h"
#include "gca/gfx_7_2_enum.h"

#include "gmc/gmc_6_0_d.h"
#include "gmc/gmc_6_0_sh_mask.h"

#include "dce/dce_6_0_d.h"
#include "dce/dce_6_0_sh_mask.h"

#include "si_enums.h"

static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev);
static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev);

static const u32 crtc_offsets[6] =
{
 CRTC0_REGISTER_OFFSET,
 CRTC1_REGISTER_OFFSET,
 CRTC2_REGISTER_OFFSET,
 CRTC3_REGISTER_OFFSET,
 CRTC4_REGISTER_OFFSET,
 CRTC5_REGISTER_OFFSET
};

static const u32 hpd_offsets[] =
{
 HPD0_REGISTER_OFFSET,
 HPD1_REGISTER_OFFSET,
 HPD2_REGISTER_OFFSET,
 HPD3_REGISTER_OFFSET,
 HPD4_REGISTER_OFFSET,
 HPD5_REGISTER_OFFSET
};

static const uint32_t dig_offsets[] = {
 CRTC0_REGISTER_OFFSET,
 CRTC1_REGISTER_OFFSET,
 CRTC2_REGISTER_OFFSET,
 CRTC3_REGISTER_OFFSET,
 CRTC4_REGISTER_OFFSET,
 CRTC5_REGISTER_OFFSET,
 (0x13830 - 0x7030) >> 2,
};

static const struct {
 uint32_t reg;
 uint32_t vblank;
 uint32_t vline;
 uint32_t hpd;

} interrupt_status_offsets[6] = { {
 .reg = mmDISP_INTERRUPT_STATUS,
 .vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
 .vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
 .hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
}, {
 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE,
 .vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
 .vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
 .hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
}, {
 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE2,
 .vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
 .vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
 .hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
}, {
 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE3,
 .vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
 .vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
 .hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
}, {
 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE4,
 .vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
 .vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
 .hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
}, {
 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE5,
 .vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
 .vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
 .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
} };

static u32 dce_v6_0_audio_endpt_rreg(struct amdgpu_device *adev,
         u32 block_offset, u32 reg)
{
 unsigned long flags;
 u32 r;

 spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
 r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset);
 spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);

 return r;
}

static void dce_v6_0_audio_endpt_wreg(struct amdgpu_device *adev,
          u32 block_offset, u32 reg, u32 v)
{
 unsigned long flags;

 spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset,
  reg | AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_WRITE_EN_MASK);
 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v);
 spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
}

static u32 dce_v6_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
{
 if (crtc >= adev->mode_info.num_crtc)
  return 0;
 else
  return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
}

static void dce_v6_0_pageflip_interrupt_init(struct amdgpu_device *adev)
{
 unsigned i;

 /* Enable pflip interrupts */
 for (i = 0; i < adev->mode_info.num_crtc; i++)
  amdgpu_irq_get(adev, &adev->pageflip_irq, i);
}

static void dce_v6_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
{
 unsigned i;

 /* Disable pflip interrupts */
 for (i = 0; i < adev->mode_info.num_crtc; i++)
  amdgpu_irq_put(adev, &adev->pageflip_irq, i);
}

/**
 * dce_v6_0_page_flip - pageflip callback.
 *
 * @adev: amdgpu_device pointer
 * @crtc_id: crtc to cleanup pageflip on
 * @crtc_base: new address of the crtc (GPU MC address)
 * @async: asynchronous flip
 *
 * Does the actual pageflip (evergreen+).
 * During vblank we take the crtc lock and wait for the update_pending
 * bit to go high, when it does, we release the lock, and allow the
 * double buffered update to take place.
 * Returns the current update pending status.
 */

static void dce_v6_0_page_flip(struct amdgpu_device *adev,
          int crtc_id, u64 crtc_base, bool async)
{
 struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
 struct drm_framebuffer *fb = amdgpu_crtc->base.primary->fb;

 /* flip at hsync for async, default is vsync */
 WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ?
        GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK : 0);
 /* update pitch */
 WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset,
        fb->pitches[0] / fb->format->cpp[0]);
 /* update the scanout addresses */
 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
        upper_32_bits(crtc_base));
 /* writing to the low address triggers the update */
 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
        (u32)crtc_base);
 /* post the write */
 RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
}

static int dce_v6_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
     u32 *vbl, u32 *position)
{
 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
  return -EINVAL;

 *vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
 *position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);

 return 0;
}

/**
 * dce_v6_0_hpd_sense - hpd sense callback.
 *
 * @adev: amdgpu_device pointer
 * @hpd: hpd (hotplug detect) pin
 *
 * Checks if a digital monitor is connected (evergreen+).
 * Returns true if connected, false if not connected.
 */

static bool dce_v6_0_hpd_sense(struct amdgpu_device *adev,
          enum amdgpu_hpd_id hpd)
{
 bool connected = false;

 if (hpd >= adev->mode_info.num_hpd)
  return connected;

 if (RREG32(mmDC_HPD1_INT_STATUS + hpd_offsets[hpd]) &
     DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
  connected = true;

 return connected;
}

/**
 * dce_v6_0_hpd_set_polarity - hpd set polarity callback.
 *
 * @adev: amdgpu_device pointer
 * @hpd: hpd (hotplug detect) pin
 *
 * Set the polarity of the hpd pin (evergreen+).
 */

static void dce_v6_0_hpd_set_polarity(struct amdgpu_device *adev,
          enum amdgpu_hpd_id hpd)
{
 u32 tmp;
 bool connected = dce_v6_0_hpd_sense(adev, hpd);

 if (hpd >= adev->mode_info.num_hpd)
  return;

 tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
 if (connected)
  tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
 else
  tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
}

static void dce_v6_0_hpd_int_ack(struct amdgpu_device *adev,
     int hpd)
{
 u32 tmp;

 if (hpd >= adev->mode_info.num_hpd) {
  DRM_DEBUG("invalid hpd %d\n", hpd);
  return;
 }

 tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
 tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
}

/**
 * dce_v6_0_hpd_init - hpd setup callback.
 *
 * @adev: amdgpu_device pointer
 *
 * Setup the hpd pins used by the card (evergreen+).
 * Enable the pin, set the polarity, and enable the hpd interrupts.
 */

static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
{
 struct drm_device *dev = adev_to_drm(adev);
 struct drm_connector *connector;
 struct drm_connector_list_iter iter;
 u32 tmp;

 drm_connector_list_iter_begin(dev, &iter);
 drm_for_each_connector_iter(connector, &iter) {
  struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);

  if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
   continue;

  tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
  tmp |= DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
  WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);

  if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
      connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
   /* don't try to enable hpd on eDP or LVDS avoid breaking the
 * aux dp channel on imac and help (but not completely fix)
 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
 * also avoid interrupt storms during dpms.
 */

   tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
   tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
   WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
   continue;
  }

  dce_v6_0_hpd_int_ack(adev, amdgpu_connector->hpd.hpd);
  dce_v6_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
  amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
 }
 drm_connector_list_iter_end(&iter);
}

/**
 * dce_v6_0_hpd_fini - hpd tear down callback.
 *
 * @adev: amdgpu_device pointer
 *
 * Tear down the hpd pins used by the card (evergreen+).
 * Disable the hpd interrupts.
 */

static void dce_v6_0_hpd_fini(struct amdgpu_device *adev)
{
 struct drm_device *dev = adev_to_drm(adev);
 struct drm_connector *connector;
 struct drm_connector_list_iter iter;
 u32 tmp;

 drm_connector_list_iter_begin(dev, &iter);
 drm_for_each_connector_iter(connector, &iter) {
  struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);

  if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
   continue;

  tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
  tmp &= ~DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
  WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);

  amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
 }
 drm_connector_list_iter_end(&iter);
}

static u32 dce_v6_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
{
 return mmDC_GPIO_HPD_A;
}

static bool dce_v6_0_is_display_hung(struct amdgpu_device *adev)
{
 u32 crtc_hung = 0;
 u32 crtc_status[6];
 u32 i, j, tmp;

 for (i = 0; i < adev->mode_info.num_crtc; i++) {
  if (RREG32(mmCRTC_CONTROL + crtc_offsets[i]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK) {
   crtc_status[i] = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
   crtc_hung |= (1 << i);
  }
 }

 for (j = 0; j < 10; j++) {
  for (i = 0; i < adev->mode_info.num_crtc; i++) {
   if (crtc_hung & (1 << i)) {
    tmp = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
    if (tmp != crtc_status[i])
     crtc_hung &= ~(1 << i);
   }
  }
  if (crtc_hung == 0)
   return false;
  udelay(100);
 }

 return true;
}

static void dce_v6_0_set_vga_render_state(struct amdgpu_device *adev,
       bool render)
{
 if (!render)
  WREG32(mmVGA_RENDER_CONTROL,
         RREG32(mmVGA_RENDER_CONTROL) & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK);
}

static int dce_v6_0_get_num_crtc(struct amdgpu_device *adev)
{
 switch (adev->asic_type) {
 case CHIP_TAHITI:
 case CHIP_PITCAIRN:
 case CHIP_VERDE:
  return 6;
 case CHIP_OLAND:
  return 2;
 default:
  return 0;
 }
}

void dce_v6_0_disable_dce(struct amdgpu_device *adev)
{
 /*Disable VGA render and enabled crtc, if has DCE engine*/
 if (amdgpu_atombios_has_dce_engine_info(adev)) {
  u32 tmp;
  int crtc_enabled, i;

  dce_v6_0_set_vga_render_state(adev, false);

  /*Disable crtc*/
  for (i = 0; i < dce_v6_0_get_num_crtc(adev); i++) {
   crtc_enabled = RREG32(mmCRTC_CONTROL + crtc_offsets[i]) &
    CRTC_CONTROL__CRTC_MASTER_EN_MASK;
   if (crtc_enabled) {
    WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
    tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
    tmp &= ~CRTC_CONTROL__CRTC_MASTER_EN_MASK;
    WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
    WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
   }
  }
 }
}

static void dce_v6_0_program_fmt(struct drm_encoder *encoder)
{
 struct drm_device *dev = encoder->dev;
 struct amdgpu_device *adev = drm_to_adev(dev);
 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
 int bpc = 0;
 u32 tmp = 0;
 enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;

 if (connector) {
  struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
  bpc = amdgpu_connector_get_monitor_bpc(connector);
  dither = amdgpu_connector->dither;
 }

 /* LVDS FMT is set up by atom */
 if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
  return;

 if (bpc == 0)
  return;


 switch (bpc) {
 case 6:
  if (dither == AMDGPU_FMT_DITHER_ENABLE)
   /* XXX sort out optimal dither settings */
   tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
    FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
    FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK);
  else
   tmp |= FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK;
  break;
 case 8:
  if (dither == AMDGPU_FMT_DITHER_ENABLE)
   /* XXX sort out optimal dither settings */
   tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
    FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
    FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK |
    FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
    FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH_MASK);
  else
   tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
    FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH_MASK);
  break;
 case 10:
 default:
  /* not needed */
  break;
 }

 WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
}

/**
 * si_get_number_of_dram_channels - get the number of dram channels
 *
 * @adev: amdgpu_device pointer
 *
 * Look up the number of video ram channels (CIK).
 * Used for display watermark bandwidth calculations
 * Returns the number of dram channels
 */

static u32 si_get_number_of_dram_channels(struct amdgpu_device *adev)
{
 u32 tmp = RREG32(mmMC_SHARED_CHMAP);

 switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
 case 0:
 default:
  return 1;
 case 1:
  return 2;
 case 2:
  return 4;
 case 3:
  return 8;
 case 4:
  return 3;
 case 5:
  return 6;
 case 6:
  return 10;
 case 7:
  return 12;
 case 8:
  return 16;
 }
}

struct dce6_wm_params {
 u32 dram_channels; /* number of dram channels */
 u32 yclk;          /* bandwidth per dram data pin in kHz */
 u32 sclk;          /* engine clock in kHz */
 u32 disp_clk;      /* display clock in kHz */
 u32 src_width;     /* viewport width */
 u32 active_time;   /* active display time in ns */
 u32 blank_time;    /* blank time in ns */
 bool interlaced;    /* mode is interlaced */
 fixed20_12 vsc;    /* vertical scale ratio */
 u32 num_heads;     /* number of active crtcs */
 u32 bytes_per_pixel; /* bytes per pixel display + overlay */
 u32 lb_size;       /* line buffer allocated to pipe */
 u32 vtaps;         /* vertical scaler taps */
};

/**
 * dce_v6_0_dram_bandwidth - get the dram bandwidth
 *
 * @wm: watermark calculation data
 *
 * Calculate the raw dram bandwidth (CIK).
 * Used for display watermark bandwidth calculations
 * Returns the dram bandwidth in MBytes/s
 */

static u32 dce_v6_0_dram_bandwidth(struct dce6_wm_params *wm)
{
 /* Calculate raw DRAM Bandwidth */
 fixed20_12 dram_efficiency; /* 0.7 */
 fixed20_12 yclk, dram_channels, bandwidth;
 fixed20_12 a;

 a.full = dfixed_const(1000);
 yclk.full = dfixed_const(wm->yclk);
 yclk.full = dfixed_div(yclk, a);
 dram_channels.full = dfixed_const(wm->dram_channels * 4);
 a.full = dfixed_const(10);
 dram_efficiency.full = dfixed_const(7);
 dram_efficiency.full = dfixed_div(dram_efficiency, a);
 bandwidth.full = dfixed_mul(dram_channels, yclk);
 bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);

 return dfixed_trunc(bandwidth);
}

/**
 * dce_v6_0_dram_bandwidth_for_display - get the dram bandwidth for display
 *
 * @wm: watermark calculation data
 *
 * Calculate the dram bandwidth used for display (CIK).
 * Used for display watermark bandwidth calculations
 * Returns the dram bandwidth for display in MBytes/s
 */

static u32 dce_v6_0_dram_bandwidth_for_display(struct dce6_wm_params *wm)
{
 /* Calculate DRAM Bandwidth and the part allocated to display. */
 fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
 fixed20_12 yclk, dram_channels, bandwidth;
 fixed20_12 a;

 a.full = dfixed_const(1000);
 yclk.full = dfixed_const(wm->yclk);
 yclk.full = dfixed_div(yclk, a);
 dram_channels.full = dfixed_const(wm->dram_channels * 4);
 a.full = dfixed_const(10);
 disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
 disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
 bandwidth.full = dfixed_mul(dram_channels, yclk);
 bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);

 return dfixed_trunc(bandwidth);
}

/**
 * dce_v6_0_data_return_bandwidth - get the data return bandwidth
 *
 * @wm: watermark calculation data
 *
 * Calculate the data return bandwidth used for display (CIK).
 * Used for display watermark bandwidth calculations
 * Returns the data return bandwidth in MBytes/s
 */

static u32 dce_v6_0_data_return_bandwidth(struct dce6_wm_params *wm)
{
 /* Calculate the display Data return Bandwidth */
 fixed20_12 return_efficiency; /* 0.8 */
 fixed20_12 sclk, bandwidth;
 fixed20_12 a;

 a.full = dfixed_const(1000);
 sclk.full = dfixed_const(wm->sclk);
 sclk.full = dfixed_div(sclk, a);
 a.full = dfixed_const(10);
 return_efficiency.full = dfixed_const(8);
 return_efficiency.full = dfixed_div(return_efficiency, a);
 a.full = dfixed_const(32);
 bandwidth.full = dfixed_mul(a, sclk);
 bandwidth.full = dfixed_mul(bandwidth, return_efficiency);

 return dfixed_trunc(bandwidth);
}

/**
 * dce_v6_0_dmif_request_bandwidth - get the dmif bandwidth
 *
 * @wm: watermark calculation data
 *
 * Calculate the dmif bandwidth used for display (CIK).
 * Used for display watermark bandwidth calculations
 * Returns the dmif bandwidth in MBytes/s
 */

static u32 dce_v6_0_dmif_request_bandwidth(struct dce6_wm_params *wm)
{
 /* Calculate the DMIF Request Bandwidth */
 fixed20_12 disp_clk_request_efficiency; /* 0.8 */
 fixed20_12 disp_clk, bandwidth;
 fixed20_12 a, b;

 a.full = dfixed_const(1000);
 disp_clk.full = dfixed_const(wm->disp_clk);
 disp_clk.full = dfixed_div(disp_clk, a);
 a.full = dfixed_const(32);
 b.full = dfixed_mul(a, disp_clk);

 a.full = dfixed_const(10);
 disp_clk_request_efficiency.full = dfixed_const(8);
 disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);

 bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);

 return dfixed_trunc(bandwidth);
}

/**
 * dce_v6_0_available_bandwidth - get the min available bandwidth
 *
 * @wm: watermark calculation data
 *
 * Calculate the min available bandwidth used for display (CIK).
 * Used for display watermark bandwidth calculations
 * Returns the min available bandwidth in MBytes/s
 */

static u32 dce_v6_0_available_bandwidth(struct dce6_wm_params *wm)
{
 /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
 u32 dram_bandwidth = dce_v6_0_dram_bandwidth(wm);
 u32 data_return_bandwidth = dce_v6_0_data_return_bandwidth(wm);
 u32 dmif_req_bandwidth = dce_v6_0_dmif_request_bandwidth(wm);

 return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
}

/**
 * dce_v6_0_average_bandwidth - get the average available bandwidth
 *
 * @wm: watermark calculation data
 *
 * Calculate the average available bandwidth used for display (CIK).
 * Used for display watermark bandwidth calculations
 * Returns the average available bandwidth in MBytes/s
 */

static u32 dce_v6_0_average_bandwidth(struct dce6_wm_params *wm)
{
 /* Calculate the display mode Average Bandwidth
 * DisplayMode should contain the source and destination dimensions,
 * timing, etc.
 */

 fixed20_12 bpp;
 fixed20_12 line_time;
 fixed20_12 src_width;
 fixed20_12 bandwidth;
 fixed20_12 a;

 a.full = dfixed_const(1000);
 line_time.full = dfixed_const(wm->active_time + wm->blank_time);
 line_time.full = dfixed_div(line_time, a);
 bpp.full = dfixed_const(wm->bytes_per_pixel);
 src_width.full = dfixed_const(wm->src_width);
 bandwidth.full = dfixed_mul(src_width, bpp);
 bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
 bandwidth.full = dfixed_div(bandwidth, line_time);

 return dfixed_trunc(bandwidth);
}

/**
 * dce_v6_0_latency_watermark - get the latency watermark
 *
 * @wm: watermark calculation data
 *
 * Calculate the latency watermark (CIK).
 * Used for display watermark bandwidth calculations
 * Returns the latency watermark in ns
 */

static u32 dce_v6_0_latency_watermark(struct dce6_wm_params *wm)
{
 /* First calculate the latency in ns */
 u32 mc_latency = 2000; /* 2000 ns. */
 u32 available_bandwidth = dce_v6_0_available_bandwidth(wm);
 u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
 u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
 u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
 u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
  (wm->num_heads * cursor_line_pair_return_time);
 u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
 u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
 u32 tmp, dmif_size = 12288;
 fixed20_12 a, b, c;

 if (wm->num_heads == 0)
  return 0;

 a.full = dfixed_const(2);
 b.full = dfixed_const(1);
 if ((wm->vsc.full > a.full) ||
     ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
     (wm->vtaps >= 5) ||
     ((wm->vsc.full >= a.full) && wm->interlaced))
  max_src_lines_per_dst_line = 4;
 else
  max_src_lines_per_dst_line = 2;

 a.full = dfixed_const(available_bandwidth);
 b.full = dfixed_const(wm->num_heads);
 a.full = dfixed_div(a, b);
 tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
 tmp = min(dfixed_trunc(a), tmp);

 lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);

 a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
 b.full = dfixed_const(1000);
 c.full = dfixed_const(lb_fill_bw);
 b.full = dfixed_div(c, b);
 a.full = dfixed_div(a, b);
 line_fill_time = dfixed_trunc(a);

 if (line_fill_time < wm->active_time)
  return latency;
 else
  return latency + (line_fill_time - wm->active_time);

}

/**
 * dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display - check
 * average and available dram bandwidth
 *
 * @wm: watermark calculation data
 *
 * Check if the display average bandwidth fits in the display
 * dram bandwidth (CIK).
 * Used for display watermark bandwidth calculations
 * Returns true if the display fits, false if not.
 */

static bool dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce6_wm_params *wm)
{
 if (dce_v6_0_average_bandwidth(wm) <=
     (dce_v6_0_dram_bandwidth_for_display(wm) / wm->num_heads))
  return true;
 else
  return false;
}

/**
 * dce_v6_0_average_bandwidth_vs_available_bandwidth - check
 * average and available bandwidth
 *
 * @wm: watermark calculation data
 *
 * Check if the display average bandwidth fits in the display
 * available bandwidth (CIK).
 * Used for display watermark bandwidth calculations
 * Returns true if the display fits, false if not.
 */

static bool dce_v6_0_average_bandwidth_vs_available_bandwidth(struct dce6_wm_params *wm)
{
 if (dce_v6_0_average_bandwidth(wm) <=
     (dce_v6_0_available_bandwidth(wm) / wm->num_heads))
  return true;
 else
  return false;
}

/**
 * dce_v6_0_check_latency_hiding - check latency hiding
 *
 * @wm: watermark calculation data
 *
 * Check latency hiding (CIK).
 * Used for display watermark bandwidth calculations
 * Returns true if the display fits, false if not.
 */

static bool dce_v6_0_check_latency_hiding(struct dce6_wm_params *wm)
{
 u32 lb_partitions = wm->lb_size / wm->src_width;
 u32 line_time = wm->active_time + wm->blank_time;
 u32 latency_tolerant_lines;
 u32 latency_hiding;
 fixed20_12 a;

 a.full = dfixed_const(1);
 if (wm->vsc.full > a.full)
  latency_tolerant_lines = 1;
 else {
  if (lb_partitions <= (wm->vtaps + 1))
   latency_tolerant_lines = 1;
  else
   latency_tolerant_lines = 2;
 }

 latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);

 if (dce_v6_0_latency_watermark(wm) <= latency_hiding)
  return true;
 else
  return false;
}

/**
 * dce_v6_0_program_watermarks - program display watermarks
 *
 * @adev: amdgpu_device pointer
 * @amdgpu_crtc: the selected display controller
 * @lb_size: line buffer size
 * @num_heads: number of display controllers in use
 *
 * Calculate and program the display watermarks for the
 * selected display controller (CIK).
 */

static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
     struct amdgpu_crtc *amdgpu_crtc,
     u32 lb_size, u32 num_heads)
{
 struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
 struct dce6_wm_params wm_low, wm_high;
 u32 dram_channels;
 u32 active_time;
 u32 line_time = 0;
 u32 latency_watermark_a = 0, latency_watermark_b = 0;
 u32 priority_a_mark = 0, priority_b_mark = 0;
 u32 priority_a_cnt = PRIORITY_OFF;
 u32 priority_b_cnt = PRIORITY_OFF;
 u32 tmp, arb_control3, lb_vblank_lead_lines = 0;
 fixed20_12 a, b, c;

 if (amdgpu_crtc->base.enabled && num_heads && mode) {
  active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
         (u32)mode->clock);
  line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
       (u32)mode->clock);
  line_time = min_t(u32, line_time, 65535);
  priority_a_cnt = 0;
  priority_b_cnt = 0;

  dram_channels = si_get_number_of_dram_channels(adev);

  /* watermark for high clocks */
  if (adev->pm.dpm_enabled) {
   wm_high.yclk =
    amdgpu_dpm_get_mclk(adev, false) * 10;
   wm_high.sclk =
    amdgpu_dpm_get_sclk(adev, false) * 10;
  } else {
   wm_high.yclk = adev->pm.current_mclk * 10;
   wm_high.sclk = adev->pm.current_sclk * 10;
  }

  wm_high.disp_clk = mode->clock;
  wm_high.src_width = mode->crtc_hdisplay;
  wm_high.active_time = active_time;
  wm_high.blank_time = line_time - wm_high.active_time;
  wm_high.interlaced = false;
  if (mode->flags & DRM_MODE_FLAG_INTERLACE)
   wm_high.interlaced = true;
  wm_high.vsc = amdgpu_crtc->vsc;
  wm_high.vtaps = 1;
  if (amdgpu_crtc->rmx_type != RMX_OFF)
   wm_high.vtaps = 2;
  wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
  wm_high.lb_size = lb_size;
  wm_high.dram_channels = dram_channels;
  wm_high.num_heads = num_heads;

  /* watermark for low clocks */
  if (adev->pm.dpm_enabled) {
   wm_low.yclk =
    amdgpu_dpm_get_mclk(adev, true) * 10;
   wm_low.sclk =
    amdgpu_dpm_get_sclk(adev, true) * 10;
  } else {
   wm_low.yclk = adev->pm.current_mclk * 10;
   wm_low.sclk = adev->pm.current_sclk * 10;
  }

  wm_low.disp_clk = mode->clock;
  wm_low.src_width = mode->crtc_hdisplay;
  wm_low.active_time = active_time;
  wm_low.blank_time = line_time - wm_low.active_time;
  wm_low.interlaced = false;
  if (mode->flags & DRM_MODE_FLAG_INTERLACE)
   wm_low.interlaced = true;
  wm_low.vsc = amdgpu_crtc->vsc;
  wm_low.vtaps = 1;
  if (amdgpu_crtc->rmx_type != RMX_OFF)
   wm_low.vtaps = 2;
  wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
  wm_low.lb_size = lb_size;
  wm_low.dram_channels = dram_channels;
  wm_low.num_heads = num_heads;

  /* set for high clocks */
  latency_watermark_a = min_t(u32, dce_v6_0_latency_watermark(&wm_high), 65535);
  /* set for low clocks */
  latency_watermark_b = min_t(u32, dce_v6_0_latency_watermark(&wm_low), 65535);

  /* possibly force display priority to high */
  /* should really do this at mode validation time... */
  if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
      !dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
      !dce_v6_0_check_latency_hiding(&wm_high) ||
      (adev->mode_info.disp_priority == 2)) {
   DRM_DEBUG_KMS("force priority to high\n");
   priority_a_cnt |= PRIORITY_ALWAYS_ON;
   priority_b_cnt |= PRIORITY_ALWAYS_ON;
  }
  if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
      !dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
      !dce_v6_0_check_latency_hiding(&wm_low) ||
      (adev->mode_info.disp_priority == 2)) {
   DRM_DEBUG_KMS("force priority to high\n");
   priority_a_cnt |= PRIORITY_ALWAYS_ON;
   priority_b_cnt |= PRIORITY_ALWAYS_ON;
  }

  a.full = dfixed_const(1000);
  b.full = dfixed_const(mode->clock);
  b.full = dfixed_div(b, a);
  c.full = dfixed_const(latency_watermark_a);
  c.full = dfixed_mul(c, b);
  c.full = dfixed_mul(c, amdgpu_crtc->hsc);
  c.full = dfixed_div(c, a);
  a.full = dfixed_const(16);
  c.full = dfixed_div(c, a);
  priority_a_mark = dfixed_trunc(c);
  priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;

  a.full = dfixed_const(1000);
  b.full = dfixed_const(mode->clock);
  b.full = dfixed_div(b, a);
  c.full = dfixed_const(latency_watermark_b);
  c.full = dfixed_mul(c, b);
  c.full = dfixed_mul(c, amdgpu_crtc->hsc);
  c.full = dfixed_div(c, a);
  a.full = dfixed_const(16);
  c.full = dfixed_div(c, a);
  priority_b_mark = dfixed_trunc(c);
  priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;

  lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
 }

 /* select wm A */
 arb_control3 = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
 tmp = arb_control3;
 tmp &= ~(3 << DPG_PIPE_ARBITRATION_CONTROL3__URGENCY_WATERMARK_MASK__SHIFT);
 tmp |= (1 << DPG_PIPE_ARBITRATION_CONTROL3__URGENCY_WATERMARK_MASK__SHIFT);
 WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
 WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
        ((latency_watermark_a << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT)  |
  (line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
 /* select wm B */
 tmp = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
 tmp &= ~(3 << DPG_PIPE_ARBITRATION_CONTROL3__URGENCY_WATERMARK_MASK__SHIFT);
 tmp |= (2 << DPG_PIPE_ARBITRATION_CONTROL3__URGENCY_WATERMARK_MASK__SHIFT);
 WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
 WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
        ((latency_watermark_b << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
  (line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
 /* restore original selection */
 WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, arb_control3);

 /* write the priority marks */
 WREG32(mmPRIORITY_A_CNT + amdgpu_crtc->crtc_offset, priority_a_cnt);
 WREG32(mmPRIORITY_B_CNT + amdgpu_crtc->crtc_offset, priority_b_cnt);

 /* save values for DPM */
 amdgpu_crtc->line_time = line_time;
 amdgpu_crtc->wm_high = latency_watermark_a;

 /* Save number of lines the linebuffer leads before the scanout */
 amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
}

/* watermark setup */
/**
 * dce_v6_0_line_buffer_adjust - Set up the line buffer
 *
 * @adev: amdgpu_device pointer
 * @amdgpu_crtc: the selected display controller
 * @mode: the current display mode on the selected display
 * controller
 * @other_mode: the display mode of another display controller
 *              that may be sharing the line buffer
 *
 * Setup up the line buffer allocation for
 * the selected display controller (CIK).
 * Returns the line buffer size in pixels.
 */

static u32 dce_v6_0_line_buffer_adjust(struct amdgpu_device *adev,
       struct amdgpu_crtc *amdgpu_crtc,
       struct drm_display_mode *mode,
       struct drm_display_mode *other_mode)
{
 u32 tmp, buffer_alloc, i;
 u32 pipe_offset = amdgpu_crtc->crtc_id * 0x8;
 /*
 * Line Buffer Setup
 * There are 3 line buffers, each one shared by 2 display controllers.
 * mmDC_LB_MEMORY_SPLIT controls how that line buffer is shared between
 * the display controllers.  The paritioning is done via one of four
 * preset allocations specified in bits 21:20:
 *  0 - half lb
 *  2 - whole lb, other crtc must be disabled
 */

 /* this can get tricky if we have two large displays on a paired group
 * of crtcs.  Ideally for multiple large displays we'd assign them to
 * non-linked crtcs for maximum line buffer allocation.
 */

 if (amdgpu_crtc->base.enabled && mode) {
  if (other_mode) {
   tmp = 0; /* 1/2 */
   buffer_alloc = 1;
  } else {
   tmp = 2; /* whole */
   buffer_alloc = 2;
  }
 } else {
  tmp = 0;
  buffer_alloc = 0;
 }

 WREG32(mmDC_LB_MEMORY_SPLIT + amdgpu_crtc->crtc_offset,
        (tmp << DC_LB_MEMORY_SPLIT__DC_LB_MEMORY_CONFIG__SHIFT));

 WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
        (buffer_alloc << PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT));
 for (i = 0; i < adev->usec_timeout; i++) {
  if (RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
      PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK)
   break;
  udelay(1);
 }

 if (amdgpu_crtc->base.enabled && mode) {
  switch (tmp) {
  case 0:
  default:
   return 4096 * 2;
  case 2:
   return 8192 * 2;
  }
 }

 /* controller not enabled, so no lb used */
 return 0;
}


/**
 * dce_v6_0_bandwidth_update - program display watermarks
 *
 * @adev: amdgpu_device pointer
 *
 * Calculate and program the display watermarks and line
 * buffer allocation (CIK).
 */

static void dce_v6_0_bandwidth_update(struct amdgpu_device *adev)
{
 struct drm_display_mode *mode0 = NULL;
 struct drm_display_mode *mode1 = NULL;
 u32 num_heads = 0, lb_size;
 int i;

 if (!adev->mode_info.mode_config_initialized)
  return;

 amdgpu_display_update_priority(adev);

 for (i = 0; i < adev->mode_info.num_crtc; i++) {
  if (adev->mode_info.crtcs[i]->base.enabled)
   num_heads++;
 }
 for (i = 0; i < adev->mode_info.num_crtc; i += 2) {
  mode0 = &adev->mode_info.crtcs[i]->base.mode;
  mode1 = &adev->mode_info.crtcs[i+1]->base.mode;
  lb_size = dce_v6_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode0, mode1);
  dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i], lb_size, num_heads);
  lb_size = dce_v6_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i+1], mode1, mode0);
  dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i+1], lb_size, num_heads);
 }
}

static void dce_v6_0_audio_get_connected_pins(struct amdgpu_device *adev)
{
 int i;
 u32 tmp;

 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
  tmp = RREG32_AUDIO_ENDPT(adev->mode_info.audio.pin[i].offset,
    ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
  if (REG_GET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT,
     PORT_CONNECTIVITY))
   adev->mode_info.audio.pin[i].connected = false;
  else
   adev->mode_info.audio.pin[i].connected = true;
 }

}

static struct amdgpu_audio_pin *dce_v6_0_audio_get_pin(struct amdgpu_device *adev)
{
 int i;

 dce_v6_0_audio_get_connected_pins(adev);

 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
  if (adev->mode_info.audio.pin[i].connected)
   return &adev->mode_info.audio.pin[i];
 }
 DRM_ERROR("No connected audio pins found!\n");
 return NULL;
}

static void dce_v6_0_audio_select_pin(struct drm_encoder *encoder)
{
 struct amdgpu_device *adev = drm_to_adev(encoder->dev);
 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;

 if (!dig || !dig->afmt || !dig->afmt->pin)
  return;

 WREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset,
        REG_SET_FIELD(0, AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT,
               dig->afmt->pin->id));
}

static void dce_v6_0_audio_write_latency_fields(struct drm_encoder *encoder,
      struct drm_display_mode *mode)
{
 struct drm_device *dev = encoder->dev;
 struct amdgpu_device *adev = drm_to_adev(dev);
 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
 struct drm_connector *connector;
 struct drm_connector_list_iter iter;
 struct amdgpu_connector *amdgpu_connector = NULL;
 int interlace = 0;
 u32 tmp;

 drm_connector_list_iter_begin(dev, &iter);
 drm_for_each_connector_iter(connector, &iter) {
  if (connector->encoder == encoder) {
   amdgpu_connector = to_amdgpu_connector(connector);
   break;
  }
 }
 drm_connector_list_iter_end(&iter);

 if (!amdgpu_connector) {
  DRM_ERROR("Couldn't find encoder's connector\n");
  return;
 }

 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
  interlace = 1;

 if (connector->latency_present[interlace]) {
  tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
    VIDEO_LIPSYNC, connector->video_latency[interlace]);
  tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
    AUDIO_LIPSYNC, connector->audio_latency[interlace]);
 } else {
  tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
    VIDEO_LIPSYNC, 0);
  tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
    AUDIO_LIPSYNC, 0);
 }
 WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
      ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
}

static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
{
 struct drm_device *dev = encoder->dev;
 struct amdgpu_device *adev = drm_to_adev(dev);
 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
 struct drm_connector *connector;
 struct drm_connector_list_iter iter;
 struct amdgpu_connector *amdgpu_connector = NULL;
 u8 *sadb = NULL;
 int sad_count;
 u32 tmp;

 drm_connector_list_iter_begin(dev, &iter);
 drm_for_each_connector_iter(connector, &iter) {
  if (connector->encoder == encoder) {
   amdgpu_connector = to_amdgpu_connector(connector);
   break;
  }
 }
 drm_connector_list_iter_end(&iter);

 if (!amdgpu_connector) {
  DRM_ERROR("Couldn't find encoder's connector\n");
  return;
 }

 sad_count = drm_edid_to_speaker_allocation(amdgpu_connector->edid, &sadb);
 if (sad_count < 0) {
  DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
  sad_count = 0;
 }

 /* program the speaker allocation */
 tmp = RREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
   ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
   HDMI_CONNECTION, 0);
 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
   DP_CONNECTION, 0);

 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort)
  tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
    DP_CONNECTION, 1);
 else
  tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
    HDMI_CONNECTION, 1);

 if (sad_count)
  tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
    SPEAKER_ALLOCATION, sadb[0]);
 else
  tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
    SPEAKER_ALLOCATION, 5); /* stereo */

 WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
   ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);

 kfree(sadb);
}

static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
{
 struct drm_device *dev = encoder->dev;
 struct amdgpu_device *adev = drm_to_adev(dev);
 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
 u32 offset;
 struct drm_connector *connector;
 struct drm_connector_list_iter iter;
 struct amdgpu_connector *amdgpu_connector = NULL;
 struct cea_sad *sads;
 int i, sad_count;

 static const u16 eld_reg_to_type[][2] = {
  { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
  { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
  { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
  { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
  { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
  { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
  { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
  { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
  { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
  { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
  { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
  { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
 };

 if (!dig || !dig->afmt || !dig->afmt->pin)
  return;

 offset = dig->afmt->pin->offset;

 drm_connector_list_iter_begin(dev, &iter);
 drm_for_each_connector_iter(connector, &iter) {
  if (connector->encoder == encoder) {
   amdgpu_connector = to_amdgpu_connector(connector);
   break;
  }
 }
 drm_connector_list_iter_end(&iter);

 if (!amdgpu_connector) {
  DRM_ERROR("Couldn't find encoder's connector\n");
  return;
 }

 sad_count = drm_edid_to_sad(amdgpu_connector->edid, &sads);
 if (sad_count < 0)
  DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
 if (sad_count <= 0)
  return;

 for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
  u32 value = 0;
  u8 stereo_freqs = 0;
  int max_channels = -1;
  int j;

  for (j = 0; j < sad_count; j++) {
   struct cea_sad *sad = &sads[j];

   if (sad->format == eld_reg_to_type[i][1]) {
    if (sad->channels > max_channels) {
     value = (sad->channels <<
      AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT) |
            (sad->byte2 <<
      AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT) |
            (sad->freq <<
      AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT);
     max_channels = sad->channels;
    }

    if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
     stereo_freqs |= sad->freq;
    else
     break;
   }
  }

  value |= (stereo_freqs <<
   AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT);

  WREG32_AUDIO_ENDPT(offset, eld_reg_to_type[i][0], value);
 }

 kfree(sads);
}

static void dce_v6_0_audio_enable(struct amdgpu_device *adev,
      struct amdgpu_audio_pin *pin,
      bool enable)
{
 if (!pin)
  return;

 WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
   enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0);
}

static const u32 pin_offsets[7] =
{
 AUD0_REGISTER_OFFSET,
 AUD1_REGISTER_OFFSET,
 AUD2_REGISTER_OFFSET,
 AUD3_REGISTER_OFFSET,
 AUD4_REGISTER_OFFSET,
 AUD5_REGISTER_OFFSET,
 AUD6_REGISTER_OFFSET,
};

static int dce_v6_0_audio_init(struct amdgpu_device *adev)
{
 int i;

 if (!amdgpu_audio)
  return 0;

 adev->mode_info.audio.enabled = true;

 switch (adev->asic_type) {
 case CHIP_TAHITI:
 case CHIP_PITCAIRN:
 case CHIP_VERDE:
 default:
  adev->mode_info.audio.num_pins = 6;
  break;
 case CHIP_OLAND:
  adev->mode_info.audio.num_pins = 2;
  break;
 }

 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
  adev->mode_info.audio.pin[i].channels = -1;
  adev->mode_info.audio.pin[i].rate = -1;
  adev->mode_info.audio.pin[i].bits_per_sample = -1;
  adev->mode_info.audio.pin[i].status_bits = 0;
  adev->mode_info.audio.pin[i].category_code = 0;
  adev->mode_info.audio.pin[i].connected = false;
  adev->mode_info.audio.pin[i].offset = pin_offsets[i];
  adev->mode_info.audio.pin[i].id = i;
  /* disable audio.  it will be set up later */
  /* XXX remove once we switch to ip funcs */
  dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
 }

 return 0;
}

static void dce_v6_0_audio_fini(struct amdgpu_device *adev)
{
 if (!amdgpu_audio)
  return;

 if (!adev->mode_info.audio.enabled)
  return;

 adev->mode_info.audio.enabled = false;
}

static void dce_v6_0_audio_set_vbi_packet(struct drm_encoder *encoder)
{
 struct drm_device *dev = encoder->dev;
 struct amdgpu_device *adev = drm_to_adev(dev);
 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
 u32 tmp;

 tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset);
 tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1);
 tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, 1);
 tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, 1);
 WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp);
}

static void dce_v6_0_audio_set_acr(struct drm_encoder *encoder,
       uint32_t clock, int bpc)
{
 struct drm_device *dev = encoder->dev;
 struct amdgpu_device *adev = drm_to_adev(dev);
 struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
 u32 tmp;

 tmp = RREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset);
 tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, 1);
 tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE,
   bpc > 8 ? 0 : 1);
 WREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset, tmp);

 tmp = RREG32(mmHDMI_ACR_32_0 + dig->afmt->offset);
 tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_0, HDMI_ACR_CTS_32, acr.cts_32khz);
 WREG32(mmHDMI_ACR_32_0 + dig->afmt->offset, tmp);
 tmp = RREG32(mmHDMI_ACR_32_1 + dig->afmt->offset);
 tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_1, HDMI_ACR_N_32, acr.n_32khz);
 WREG32(mmHDMI_ACR_32_1 + dig->afmt->offset, tmp);

 tmp = RREG32(mmHDMI_ACR_44_0 + dig->afmt->offset);
 tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_0, HDMI_ACR_CTS_44, acr.cts_44_1khz);
 WREG32(mmHDMI_ACR_44_0 + dig->afmt->offset, tmp);
 tmp = RREG32(mmHDMI_ACR_44_1 + dig->afmt->offset);
 tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_1, HDMI_ACR_N_44, acr.n_44_1khz);
 WREG32(mmHDMI_ACR_44_1 + dig->afmt->offset, tmp);

 tmp = RREG32(mmHDMI_ACR_48_0 + dig->afmt->offset);
 tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_0, HDMI_ACR_CTS_48, acr.cts_48khz);
 WREG32(mmHDMI_ACR_48_0 + dig->afmt->offset, tmp);
 tmp = RREG32(mmHDMI_ACR_48_1 + dig->afmt->offset);
 tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_1, HDMI_ACR_N_48, acr.n_48khz);
 WREG32(mmHDMI_ACR_48_1 + dig->afmt->offset, tmp);
}

static void dce_v6_0_audio_set_avi_infoframe(struct drm_encoder *encoder,
            struct drm_display_mode *mode)
{
 struct drm_device *dev = encoder->dev;
 struct amdgpu_device *adev = drm_to_adev(dev);
 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
 struct hdmi_avi_infoframe frame;
 u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
 uint8_t *payload = buffer + 3;
 uint8_t *header = buffer;
 ssize_t err;
 u32 tmp;

 err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode);
 if (err < 0) {
  DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
  return;
 }

 err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
 if (err < 0) {
  DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
  return;
 }

 WREG32(mmAFMT_AVI_INFO0 + dig->afmt->offset,
        payload[0x0] | (payload[0x1] << 8) | (payload[0x2] << 16) | (payload[0x3] << 24));
 WREG32(mmAFMT_AVI_INFO1 + dig->afmt->offset,
        payload[0x4] | (payload[0x5] << 8) | (payload[0x6] << 16) | (payload[0x7] << 24));
 WREG32(mmAFMT_AVI_INFO2 + dig->afmt->offset,
        payload[0x8] | (payload[0x9] << 8) | (payload[0xA] << 16) | (payload[0xB] << 24));
 WREG32(mmAFMT_AVI_INFO3 + dig->afmt->offset,
        payload[0xC] | (payload[0xD] << 8) | (header[1] << 24));

 tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
 /* anything other than 0 */
 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1,
   HDMI_AUDIO_INFO_LINE, 2);
 WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
}

static void dce_v6_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
{
 struct drm_device *dev = encoder->dev;
 struct amdgpu_device *adev = drm_to_adev(dev);
 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
 int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
 u32 tmp;

 /*
 * Two dtos: generally use dto0 for hdmi, dto1 for dp.
 * Express [24MHz / target pixel clock] as an exact rational
 * number (coefficient of two integer numbers.  DCCG_AUDIO_DTOx_PHASE
 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
 */

 tmp = RREG32(mmDCCG_AUDIO_DTO_SOURCE);
 tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE,
   DCCG_AUDIO_DTO0_SOURCE_SEL, amdgpu_crtc->crtc_id);
 if (em == ATOM_ENCODER_MODE_HDMI) {
  tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE,
    DCCG_AUDIO_DTO_SEL, 0);
 } else if (ENCODER_MODE_IS_DP(em)) {
  tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE,
    DCCG_AUDIO_DTO_SEL, 1);
 }
 WREG32(mmDCCG_AUDIO_DTO_SOURCE, tmp);
 if (em == ATOM_ENCODER_MODE_HDMI) {
  WREG32(mmDCCG_AUDIO_DTO0_PHASE, 24000);
  WREG32(mmDCCG_AUDIO_DTO0_MODULE, clock);
 } else if (ENCODER_MODE_IS_DP(em)) {
  WREG32(mmDCCG_AUDIO_DTO1_PHASE, 24000);
  WREG32(mmDCCG_AUDIO_DTO1_MODULE, clock);
 }
}

static void dce_v6_0_audio_set_packet(struct drm_encoder *encoder)
{
 struct drm_device *dev = encoder->dev;
 struct amdgpu_device *adev = drm_to_adev(dev);
 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
 u32 tmp;

 tmp = RREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset);
 tmp = REG_SET_FIELD(tmp, AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1);
 WREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);

 tmp = RREG32(mmAFMT_60958_0 + dig->afmt->offset);
 tmp = REG_SET_FIELD(tmp, AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, 1);
 WREG32(mmAFMT_60958_0 + dig->afmt->offset, tmp);

 tmp = RREG32(mmAFMT_60958_1 + dig->afmt->offset);
 tmp = REG_SET_FIELD(tmp, AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, 2);
 WREG32(mmAFMT_60958_1 + dig->afmt->offset, tmp);

 tmp = RREG32(mmAFMT_60958_2 + dig->afmt->offset);
 tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, 3);
 tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, 4);
 tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, 5);
 tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, 6);
 tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, 7);
 tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, 8);
 WREG32(mmAFMT_60958_2 + dig->afmt->offset, tmp);

 tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset);
 tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_CHANNEL_ENABLE, 0xff);
 WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset, tmp);

 tmp = RREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset);
 tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, 1);
 tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_PACKETS_PER_LINE, 3);
 WREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);

 tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
 tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_RESET_FIFO_WHEN_AUDIO_DIS, 1);
 tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1);
 WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
}

static void dce_v6_0_audio_set_mute(struct drm_encoder *encoder, bool mute)
{
 struct drm_device *dev = encoder->dev;
 struct amdgpu_device *adev = drm_to_adev(dev);
 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
 u32 tmp;

 tmp = RREG32(mmHDMI_GC + dig->afmt->offset);
 tmp = REG_SET_FIELD(tmp, HDMI_GC, HDMI_GC_AVMUTE, mute ? 1 : 0);
 WREG32(mmHDMI_GC + dig->afmt->offset, tmp);
}

static void dce_v6_0_audio_hdmi_enable(struct drm_encoder *encoder, bool enable)
{
 struct drm_device *dev = encoder->dev;
 struct amdgpu_device *adev = drm_to_adev(dev);
 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
 u32 tmp;

 if (enable) {
  tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
  tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 1);
  tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 1);
  tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1);
  tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 1);
  WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);

  tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
  tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AVI_INFO_LINE, 2);
  WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);

  tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
  tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1);
  WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
 } else {
  tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
  tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 0);
  tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 0);
  tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 0);
  tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 0);
  WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);

  tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
  tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 0);
  WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
 }
}

static void dce_v6_0_audio_dp_enable(struct drm_encoder *encoder, bool enable)
{
 struct drm_device *dev = encoder->dev;
 struct amdgpu_device *adev = drm_to_adev(dev);
 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
 u32 tmp;

 if (enable) {
  tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
  tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1);
  WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);

  tmp = RREG32(mmDP_SEC_TIMESTAMP + dig->afmt->offset);
  tmp = REG_SET_FIELD(tmp, DP_SEC_TIMESTAMP, DP_SEC_TIMESTAMP_MODE, 1);
  WREG32(mmDP_SEC_TIMESTAMP + dig->afmt->offset, tmp);

  tmp = RREG32(mmDP_SEC_CNTL + dig->afmt->offset);
  tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_ASP_ENABLE, 1);
  tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_ATP_ENABLE, 1);
  tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_AIP_ENABLE, 1);
  tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
  WREG32(mmDP_SEC_CNTL + dig->afmt->offset, tmp);
 } else {
  WREG32(mmDP_SEC_CNTL + dig->afmt->offset, 0);
 }
}

static void dce_v6_0_afmt_setmode(struct drm_encoder *encoder,
      struct drm_display_mode *mode)
{
 struct drm_device *dev = encoder->dev;
 struct amdgpu_device *adev = drm_to_adev(dev);
 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
 struct drm_connector *connector;
 struct drm_connector_list_iter iter;
 struct amdgpu_connector *amdgpu_connector = NULL;
 int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
 int bpc = 8;

 if (!dig || !dig->afmt)
  return;

 drm_connector_list_iter_begin(dev, &iter);
 drm_for_each_connector_iter(connector, &iter) {
  if (connector->encoder == encoder) {
   amdgpu_connector = to_amdgpu_connector(connector);
   break;
  }
 }
 drm_connector_list_iter_end(&iter);

 if (!amdgpu_connector) {
  DRM_ERROR("Couldn't find encoder's connector\n");
  return;
 }

 if (!dig->afmt->enabled)
  return;

 dig->afmt->pin = dce_v6_0_audio_get_pin(adev);
 if (!dig->afmt->pin)
  return;

 if (encoder->crtc) {
  struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
  bpc = amdgpu_crtc->bpc;
 }

 /* disable audio before setting up hw */
 dce_v6_0_audio_enable(adev, dig->afmt->pin, false);

 dce_v6_0_audio_set_mute(encoder, true);
 dce_v6_0_audio_write_speaker_allocation(encoder);
 dce_v6_0_audio_write_sad_regs(encoder);
 dce_v6_0_audio_write_latency_fields(encoder, mode);
 if (em == ATOM_ENCODER_MODE_HDMI) {
  dce_v6_0_audio_set_dto(encoder, mode->clock);
  dce_v6_0_audio_set_vbi_packet(encoder);
  dce_v6_0_audio_set_acr(encoder, mode->clock, bpc);
 } else if (ENCODER_MODE_IS_DP(em)) {
  dce_v6_0_audio_set_dto(encoder, adev->clock.default_dispclk * 10);
 }
 dce_v6_0_audio_set_packet(encoder);
 dce_v6_0_audio_select_pin(encoder);
 dce_v6_0_audio_set_avi_infoframe(encoder, mode);
 dce_v6_0_audio_set_mute(encoder, false);
 if (em == ATOM_ENCODER_MODE_HDMI) {
  dce_v6_0_audio_hdmi_enable(encoder, 1);
 } else if (ENCODER_MODE_IS_DP(em)) {
  dce_v6_0_audio_dp_enable(encoder, 1);
 }

 /* enable audio after setting up hw */
 dce_v6_0_audio_enable(adev, dig->afmt->pin, true);
}

static void dce_v6_0_afmt_enable(struct drm_encoder *encoder, bool enable)
{
 struct drm_device *dev = encoder->dev;
 struct amdgpu_device *adev = drm_to_adev(dev);
 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;

 if (!dig || !dig->afmt)
  return;

 /* Silent, r600_hdmi_enable will raise WARN for us */
 if (enable && dig->afmt->enabled)
  return;

 if (!enable && !dig->afmt->enabled)
  return;

 if (!enable && dig->afmt->pin) {
  dce_v6_0_audio_enable(adev, dig->afmt->pin, false);
  dig->afmt->pin = NULL;
 }

 dig->afmt->enabled = enable;

 DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
    enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
}

static int dce_v6_0_afmt_init(struct amdgpu_device *adev)
{
 int i, j;

 for (i = 0; i < adev->mode_info.num_dig; i++)
  adev->mode_info.afmt[i] = NULL;

 /* DCE6 has audio blocks tied to DIG encoders */
 for (i = 0; i < adev->mode_info.num_dig; i++) {
  adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
  if (adev->mode_info.afmt[i]) {
   adev->mode_info.afmt[i]->offset = dig_offsets[i];
   adev->mode_info.afmt[i]->id = i;
  } else {
   for (j = 0; j < i; j++) {
    kfree(adev->mode_info.afmt[j]);
    adev->mode_info.afmt[j] = NULL;
   }
   DRM_ERROR("Out of memory allocating afmt table\n");
   return -ENOMEM;
  }
 }
 return 0;
}

static void dce_v6_0_afmt_fini(struct amdgpu_device *adev)
{
 int i;

 for (i = 0; i < adev->mode_info.num_dig; i++) {
  kfree(adev->mode_info.afmt[i]);
  adev->mode_info.afmt[i] = NULL;
 }
}

static const u32 vga_control_regs[6] =
{
 mmD1VGA_CONTROL,
 mmD2VGA_CONTROL,
 mmD3VGA_CONTROL,
 mmD4VGA_CONTROL,
 mmD5VGA_CONTROL,
 mmD6VGA_CONTROL,
};

static void dce_v6_0_vga_enable(struct drm_crtc *crtc, bool enable)
{
 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
 struct drm_device *dev = crtc->dev;
 struct amdgpu_device *adev = drm_to_adev(dev);
 u32 vga_control;

 vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
 WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | (enable ? 1 : 0));
}

static void dce_v6_0_grph_enable(struct drm_crtc *crtc, bool enable)
{
 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
 struct drm_device *dev = crtc->dev;
 struct amdgpu_device *adev = drm_to_adev(dev);

 WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, enable ? 1 : 0);
}

static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
         struct drm_framebuffer *fb,
         int x, int y, int atomic)
{
 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
 struct drm_device *dev = crtc->dev;
 struct amdgpu_device *adev = drm_to_adev(dev);
 struct drm_framebuffer *target_fb;
 struct drm_gem_object *obj;
 struct amdgpu_bo *abo;
 uint64_t fb_location, tiling_flags;
 uint32_t fb_format, fb_pitch_pixels, pipe_config;
 u32 fb_swap = (GRPH_ENDIAN_NONE << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
 u32 viewport_w, viewport_h;
 int r;
 bool bypass_lut = false;

 /* no fb bound */
 if (!atomic && !crtc->primary->fb) {
  DRM_DEBUG_KMS("No FB bound\n");
  return 0;
 }

 if (atomic)
  target_fb = fb;
 else
  target_fb = crtc->primary->fb;

 /* If atomic, assume fb object is pinned & idle & fenced and
 * just update base pointers
 */

 obj = target_fb->obj[0];
 abo = gem_to_amdgpu_bo(obj);
 r = amdgpu_bo_reserve(abo, false);
 if (unlikely(r != 0))
  return r;

 if (!atomic) {
  abo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
  r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
  if (unlikely(r != 0)) {
   amdgpu_bo_unreserve(abo);
   return -EINVAL;
  }
 }
 fb_location = amdgpu_bo_gpu_offset(abo);

 amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
 amdgpu_bo_unreserve(abo);

 switch (target_fb->format->format) {
 case DRM_FORMAT_C8:
  fb_format = ((GRPH_DEPTH_8BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
        (GRPH_FORMAT_INDEXED << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
  break;
 case DRM_FORMAT_XRGB4444:
 case DRM_FORMAT_ARGB4444:
  fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
        (GRPH_FORMAT_ARGB4444 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
#ifdef __BIG_ENDIAN
  fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
#endif
  break;
 case DRM_FORMAT_XRGB1555:
 case DRM_FORMAT_ARGB1555:
  fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
        (GRPH_FORMAT_ARGB1555 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
#ifdef __BIG_ENDIAN
  fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
#endif
  break;
 case DRM_FORMAT_BGRX5551:
 case DRM_FORMAT_BGRA5551:
  fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
        (GRPH_FORMAT_BGRA5551 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
#ifdef __BIG_ENDIAN
  fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
#endif
  break;
 case DRM_FORMAT_RGB565:
  fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
        (GRPH_FORMAT_ARGB565 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
#ifdef __BIG_ENDIAN
  fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
#endif
  break;
 case DRM_FORMAT_XRGB8888:
 case DRM_FORMAT_ARGB8888:
  fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
        (GRPH_FORMAT_ARGB8888 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
#ifdef __BIG_ENDIAN
  fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
#endif
  break;
 case DRM_FORMAT_XRGB2101010:
 case DRM_FORMAT_ARGB2101010:
  fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
        (GRPH_FORMAT_ARGB2101010 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
#ifdef __BIG_ENDIAN
  fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
#endif
  /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
  bypass_lut = true;
  break;
 case DRM_FORMAT_BGRX1010102:
 case DRM_FORMAT_BGRA1010102:
  fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
        (GRPH_FORMAT_BGRA1010102 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
#ifdef __BIG_ENDIAN
  fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
#endif
  /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
  bypass_lut = true;
  break;
 case DRM_FORMAT_XBGR8888:
 case DRM_FORMAT_ABGR8888:
  fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
        (GRPH_FORMAT_ARGB8888 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
  fb_swap = ((GRPH_RED_SEL_B << GRPH_SWAP_CNTL__GRPH_RED_CROSSBAR__SHIFT) |
      (GRPH_BLUE_SEL_R << GRPH_SWAP_CNTL__GRPH_BLUE_CROSSBAR__SHIFT));
#ifdef __BIG_ENDIAN
  fb_swap |= (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
#endif
  break;
 default:
  DRM_ERROR("Unsupported screen format %p4cc\n",
     &target_fb->format->format);
  return -EINVAL;
 }

 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) {
--> --------------------

--> maximum size reached

--> --------------------

Messung V0.5
C=97 H=99 G=97

¤ Dauer der Verarbeitung: 0.19 Sekunden  (vorverarbeitet)  ¤

*© Formatika GbR, Deutschland






Wurzel

Suchen

Beweissystem der NASA

Beweissystem Isabelle

NIST Cobol Testsuite

Cephes Mathematical Library

Wiener Entwicklungsmethode

Haftungshinweis

Die Informationen auf dieser Webseite wurden nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit, noch Qualität der bereit gestellten Informationen zugesichert.

Bemerkung:

Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.