/*
* Copyright © 2016 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
*/
#include <drm/drm_print.h>
#include "i915_utils.h"
#include "i9xx_plane_regs.h"
#include "intel_color.h"
#include "intel_color_regs.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dsb.h"
#include "intel_vrr.h"
struct intel_color_funcs {
int (*color_check)(struct intel_atomic_state *state,
struct intel_crtc *crtc);
/*
* Program non-arming double buffered color management registers
* before vblank evasion. The registers should then latch after
* the arming register is written (by color_commit_arm()) during
* the next vblank start, alongside any other double buffered
* registers involved with the same commit. This hook is optional.
*/
void (*color_commit_noarm)(struct intel_dsb *dsb,
const struct intel_crtc_state *crtc_state);
/*
* Program arming double buffered color management registers
* during vblank evasion. The registers (and whatever other registers
* they arm that were written by color_commit_noarm) should then latch
* during the next vblank start, alongside any other double buffered
* registers involved with the same commit.
*/
void (*color_commit_arm)(struct intel_dsb *dsb,
const struct intel_crtc_state *crtc_state);
/*
* Perform any extra tasks needed after all the
* double buffered registers have been latched.
*/
void (*color_post_update)(const struct intel_crtc_state *crtc_state);
/*
* Load LUTs (and other single buffered color management
* registers). Will (hopefully) be called during the vblank
* following the latching of any double buffered registers
* involved with the same commit.
*/
void (*load_luts)(const struct intel_crtc_state *crtc_state);
/*
* Read out the LUTs from the hardware into the software state.
* Used by eg. the hardware state checker.
*/
void (*read_luts)(struct intel_crtc_state *crtc_state);
/*
* Compare the LUTs
*/
bool (*lut_equal)(const struct intel_crtc_state *crtc_state,
const struct drm_property_blob *blob1,
const struct drm_property_blob *blob2,
bool is_pre_csc_lut);
/*
* Read out the CSCs (if any) from the hardware into the
* software state. Used by eg. the hardware state checker.
*/
void (*read_csc)(struct intel_crtc_state *crtc_state);
/*
* Read config other than LUTs and CSCs, before them. Optional.
*/
void (*get_config)(struct intel_crtc_state *crtc_state);
};
#define CTM_COEFF_SIGN (1ULL << 63)
#define CTM_COEFF_1_0 (1ULL << 32)
#define CTM_COEFF_2_0 (CTM_COEFF_1_0 << 1)
#define CTM_COEFF_4_0 (CTM_COEFF_2_0 << 1)
#define CTM_COEFF_8_0 (CTM_COEFF_4_0 << 1)
#define CTM_COEFF_0_5 (CTM_COEFF_1_0 >> 1)
#define CTM_COEFF_0_25 (CTM_COEFF_0_5 >> 1)
#define CTM_COEFF_0_125 (CTM_COEFF_0_25 >> 1)
#define CTM_COEFF_LIMITED_RANGE ((235ULL - 16ULL) * CTM_COEFF_1_0 / 255)
#define CTM_COEFF_NEGATIVE(coeff) (((coeff) & CTM_COEFF_SIGN) != 0)
#define CTM_COEFF_ABS(coeff) ((coeff) & (CTM_COEFF_SIGN - 1))
#define LEGACY_LUT_LENGTH 256
/*
* ILK+ csc matrix:
*
* |R/Cr| | c0 c1 c2 | ( |R/Cr| |preoff0| ) |postoff0|
* |G/Y | = | c3 c4 c5 | x ( |G/Y | + |preoff1| ) + |postoff1|
* |B/Cb| | c6 c7 c8 | ( |B/Cb| |preoff2| ) |postoff2|
*
* ILK/SNB don't have explicit post offsets, and instead
* CSC_MODE_YUV_TO_RGB and CSC_BLACK_SCREEN_OFFSET are used:
* CSC_MODE_YUV_TO_RGB=0 + CSC_BLACK_SCREEN_OFFSET=0 -> 1/2, 0, 1/2
* CSC_MODE_YUV_TO_RGB=0 + CSC_BLACK_SCREEN_OFFSET=1 -> 1/2, 1/16, 1/2
* CSC_MODE_YUV_TO_RGB=1 + CSC_BLACK_SCREEN_OFFSET=0 -> 0, 0, 0
* CSC_MODE_YUV_TO_RGB=1 + CSC_BLACK_SCREEN_OFFSET=1 -> 1/16, 1/16, 1/16
*/
/*
* Extract the CSC coefficient from a CTM coefficient (in U32.32 fixed point
* format). This macro takes the coefficient we want transformed and the
* number of fractional bits.
*
* We only have a 9 bits precision window which slides depending on the value
* of the CTM coefficient and we write the value from bit 3. We also round the
* value.
*/
#define ILK_CSC_COEFF_FP(coeff, fbits) \
(clamp_val(((coeff) >> (32 - (fbits) - 3)) + 4, 0, 0xfff) & 0xff8)
#define ILK_CSC_COEFF_1_0 0x7800
#define ILK_CSC_COEFF_LIMITED_RANGE ((235 - 16) << (12 - 8)) /* exponent 0 */
#define ILK_CSC_POSTOFF_LIMITED_RANGE (16 << (12 - 8))
static const struct intel_csc_matrix ilk_csc_matrix_identity = {
.preoff = {},
.coeff = {
ILK_CSC_COEFF_1_0, 0, 0,
0, ILK_CSC_COEFF_1_0, 0,
0, 0, ILK_CSC_COEFF_1_0,
},
.postoff = {},
};
/* Full range RGB -> limited range RGB matrix */
static const struct intel_csc_matrix ilk_csc_matrix_limited_range = {
.preoff = {},
.coeff = {
ILK_CSC_COEFF_LIMITED_RANGE, 0, 0,
0, ILK_CSC_COEFF_LIMITED_RANGE, 0,
0, 0, ILK_CSC_COEFF_LIMITED_RANGE,
},
.postoff = {
ILK_CSC_POSTOFF_LIMITED_RANGE,
ILK_CSC_POSTOFF_LIMITED_RANGE,
ILK_CSC_POSTOFF_LIMITED_RANGE,
},
};
/* BT.709 full range RGB -> limited range YCbCr matrix */
static const struct intel_csc_matrix ilk_csc_matrix_rgb_to_ycbcr = {
.preoff = {},
.coeff = {
0x1e08, 0x9cc0, 0xb528,
0x2ba8, 0x09d8, 0x37e8,
0xbce8, 0x9ad8, 0x1e08,
},
.postoff = {
0x0800, 0x0100, 0x0800,
},
};
static void intel_csc_clear(struct intel_csc_matrix *csc)
{
memset(csc, 0, sizeof (*csc));
}
static bool lut_is_legacy(const struct drm_property_blob *lut)
{
return lut && drm_color_lut_size(lut) == LEGACY_LUT_LENGTH;
}
/*
* When using limited range, multiply the matrix given by userspace by
* the matrix that we would use for the limited range.
*/
static u64 *ctm_mult_by_limited(u64 *result, const u64 *input)
{
int i;
for (i = 0; i < 9; i++) {
u64 user_coeff = input[i];
u32 limited_coeff = CTM_COEFF_LIMITED_RANGE;
u32 abs_coeff = clamp_val(CTM_COEFF_ABS(user_coeff), 0,
CTM_COEFF_4_0 - 1) >> 2;
/*
* By scaling every co-efficient with limited range (16-235)
* vs full range (0-255) the final o/p will be scaled down to
* fit in the limited range supported by the panel.
*/
result[i] = mul_u32_u32(limited_coeff, abs_coeff) >> 30;
result[i] |= user_coeff & CTM_COEFF_SIGN;
}
return result;
}
static void ilk_update_pipe_csc(struct intel_dsb *dsb,
struct intel_crtc *crtc,
const struct intel_csc_matrix *csc)
{
struct intel_display *display = to_intel_display(crtc->base.dev);
enum pipe pipe = crtc->pipe;
intel_de_write_dsb(display, dsb, PIPE_CSC_PREOFF_HI(pipe),
csc->preoff[0]);
intel_de_write_dsb(display, dsb, PIPE_CSC_PREOFF_ME(pipe),
csc->preoff[1]);
intel_de_write_dsb(display, dsb, PIPE_CSC_PREOFF_LO(pipe),
csc->preoff[2]);
intel_de_write_dsb(display, dsb, PIPE_CSC_COEFF_RY_GY(pipe),
csc->coeff[0] << 16 | csc->coeff[1]);
intel_de_write_dsb(display, dsb, PIPE_CSC_COEFF_BY(pipe),
csc->coeff[2] << 16);
intel_de_write_dsb(display, dsb, PIPE_CSC_COEFF_RU_GU(pipe),
csc->coeff[3] << 16 | csc->coeff[4]);
intel_de_write_dsb(display, dsb, PIPE_CSC_COEFF_BU(pipe),
csc->coeff[5] << 16);
intel_de_write_dsb(display, dsb, PIPE_CSC_COEFF_RV_GV(pipe),
csc->coeff[6] << 16 | csc->coeff[7]);
intel_de_write_dsb(display, dsb, PIPE_CSC_COEFF_BV(pipe),
csc->coeff[8] << 16);
if (DISPLAY_VER(display) < 7)
return ;
intel_de_write_dsb(display, dsb, PIPE_CSC_POSTOFF_HI(pipe),
csc->postoff[0]);
intel_de_write_dsb(display, dsb, PIPE_CSC_POSTOFF_ME(pipe),
csc->postoff[1]);
intel_de_write_dsb(display, dsb, PIPE_CSC_POSTOFF_LO(pipe),
csc->postoff[2]);
}
static void ilk_read_pipe_csc(struct intel_crtc *crtc,
struct intel_csc_matrix *csc)
{
struct intel_display *display = to_intel_display(crtc);
enum pipe pipe = crtc->pipe;
u32 tmp;
csc->preoff[0] = intel_de_read_fw(display, PIPE_CSC_PREOFF_HI(pipe));
csc->preoff[1] = intel_de_read_fw(display, PIPE_CSC_PREOFF_ME(pipe));
csc->preoff[2] = intel_de_read_fw(display, PIPE_CSC_PREOFF_LO(pipe));
tmp = intel_de_read_fw(display, PIPE_CSC_COEFF_RY_GY(pipe));
csc->coeff[0] = tmp >> 16;
csc->coeff[1] = tmp & 0xffff;
tmp = intel_de_read_fw(display, PIPE_CSC_COEFF_BY(pipe));
csc->coeff[2] = tmp >> 16;
tmp = intel_de_read_fw(display, PIPE_CSC_COEFF_RU_GU(pipe));
csc->coeff[3] = tmp >> 16;
csc->coeff[4] = tmp & 0xffff;
tmp = intel_de_read_fw(display, PIPE_CSC_COEFF_BU(pipe));
csc->coeff[5] = tmp >> 16;
tmp = intel_de_read_fw(display, PIPE_CSC_COEFF_RV_GV(pipe));
csc->coeff[6] = tmp >> 16;
csc->coeff[7] = tmp & 0xffff;
tmp = intel_de_read_fw(display, PIPE_CSC_COEFF_BV(pipe));
csc->coeff[8] = tmp >> 16;
if (DISPLAY_VER(display) < 7)
return ;
csc->postoff[0] = intel_de_read_fw(display, PIPE_CSC_POSTOFF_HI(pipe));
csc->postoff[1] = intel_de_read_fw(display, PIPE_CSC_POSTOFF_ME(pipe));
csc->postoff[2] = intel_de_read_fw(display, PIPE_CSC_POSTOFF_LO(pipe));
}
static void ilk_read_csc(struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
if (crtc_state->csc_enable)
ilk_read_pipe_csc(crtc, &crtc_state->csc);
}
static void skl_read_csc(struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
/*
* Display WA #1184: skl,glk
* Wa_1406463849: icl
*
* Danger! On SKL-ICL *reads* from the CSC coeff/offset registers
* will disarm an already armed CSC double buffer update.
* So this must not be called while armed. Fortunately the state checker
* readout happens only after the update has been already been latched.
*
* On earlier and later platforms only writes to said registers will
* disarm the update. This is considered normal behavior and also
* happens with various other hardware units.
*/
if (crtc_state->csc_enable)
ilk_read_pipe_csc(crtc, &crtc_state->csc);
}
static void icl_update_output_csc(struct intel_dsb *dsb,
struct intel_crtc *crtc,
const struct intel_csc_matrix *csc)
{
struct intel_display *display = to_intel_display(crtc->base.dev);
enum pipe pipe = crtc->pipe;
intel_de_write_dsb(display, dsb, PIPE_CSC_OUTPUT_PREOFF_HI(pipe),
csc->preoff[0]);
intel_de_write_dsb(display, dsb, PIPE_CSC_OUTPUT_PREOFF_ME(pipe),
csc->preoff[1]);
intel_de_write_dsb(display, dsb, PIPE_CSC_OUTPUT_PREOFF_LO(pipe),
csc->preoff[2]);
intel_de_write_dsb(display, dsb, PIPE_CSC_OUTPUT_COEFF_RY_GY(pipe),
csc->coeff[0] << 16 | csc->coeff[1]);
intel_de_write_dsb(display, dsb, PIPE_CSC_OUTPUT_COEFF_BY(pipe),
csc->coeff[2] << 16);
intel_de_write_dsb(display, dsb, PIPE_CSC_OUTPUT_COEFF_RU_GU(pipe),
csc->coeff[3] << 16 | csc->coeff[4]);
intel_de_write_dsb(display, dsb, PIPE_CSC_OUTPUT_COEFF_BU(pipe),
csc->coeff[5] << 16);
intel_de_write_dsb(display, dsb, PIPE_CSC_OUTPUT_COEFF_RV_GV(pipe),
csc->coeff[6] << 16 | csc->coeff[7]);
intel_de_write_dsb(display, dsb, PIPE_CSC_OUTPUT_COEFF_BV(pipe),
csc->coeff[8] << 16);
intel_de_write_dsb(display, dsb, PIPE_CSC_OUTPUT_POSTOFF_HI(pipe),
csc->postoff[0]);
intel_de_write_dsb(display, dsb, PIPE_CSC_OUTPUT_POSTOFF_ME(pipe),
csc->postoff[1]);
intel_de_write_dsb(display, dsb, PIPE_CSC_OUTPUT_POSTOFF_LO(pipe),
csc->postoff[2]);
}
static void icl_read_output_csc(struct intel_crtc *crtc,
struct intel_csc_matrix *csc)
{
struct intel_display *display = to_intel_display(crtc);
enum pipe pipe = crtc->pipe;
u32 tmp;
csc->preoff[0] = intel_de_read_fw(display, PIPE_CSC_OUTPUT_PREOFF_HI(pipe));
csc->preoff[1] = intel_de_read_fw(display, PIPE_CSC_OUTPUT_PREOFF_ME(pipe));
csc->preoff[2] = intel_de_read_fw(display, PIPE_CSC_OUTPUT_PREOFF_LO(pipe));
tmp = intel_de_read_fw(display, PIPE_CSC_OUTPUT_COEFF_RY_GY(pipe));
csc->coeff[0] = tmp >> 16;
csc->coeff[1] = tmp & 0xffff;
tmp = intel_de_read_fw(display, PIPE_CSC_OUTPUT_COEFF_BY(pipe));
csc->coeff[2] = tmp >> 16;
tmp = intel_de_read_fw(display, PIPE_CSC_OUTPUT_COEFF_RU_GU(pipe));
csc->coeff[3] = tmp >> 16;
csc->coeff[4] = tmp & 0xffff;
tmp = intel_de_read_fw(display, PIPE_CSC_OUTPUT_COEFF_BU(pipe));
csc->coeff[5] = tmp >> 16;
tmp = intel_de_read_fw(display, PIPE_CSC_OUTPUT_COEFF_RV_GV(pipe));
csc->coeff[6] = tmp >> 16;
csc->coeff[7] = tmp & 0xffff;
tmp = intel_de_read_fw(display, PIPE_CSC_OUTPUT_COEFF_BV(pipe));
csc->coeff[8] = tmp >> 16;
csc->postoff[0] = intel_de_read_fw(display, PIPE_CSC_OUTPUT_POSTOFF_HI(pipe));
csc->postoff[1] = intel_de_read_fw(display, PIPE_CSC_OUTPUT_POSTOFF_ME(pipe));
csc->postoff[2] = intel_de_read_fw(display, PIPE_CSC_OUTPUT_POSTOFF_LO(pipe));
}
static void icl_read_csc(struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
/*
* Wa_1406463849: icl
*
* See skl_read_csc()
*/
if (crtc_state->csc_mode & ICL_CSC_ENABLE)
ilk_read_pipe_csc(crtc, &crtc_state->csc);
if (crtc_state->csc_mode & ICL_OUTPUT_CSC_ENABLE)
icl_read_output_csc(crtc, &crtc_state->output_csc);
}
static bool ilk_limited_range(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
/* icl+ have dedicated output CSC */
if (DISPLAY_VER(display) >= 11)
return false ;
/* pre-hsw have TRANSCONF_COLOR_RANGE_SELECT */
if (DISPLAY_VER(display) < 7 || display->platform.ivybridge)
return false ;
return crtc_state->limited_color_range;
}
static bool ilk_lut_limited_range(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
if (!ilk_limited_range(crtc_state))
return false ;
if (crtc_state->c8_planes)
return false ;
if (DISPLAY_VER(display) == 10)
return crtc_state->hw.gamma_lut;
else
return crtc_state->hw.gamma_lut &&
(crtc_state->hw.degamma_lut || crtc_state->hw.ctm);
}
static bool ilk_csc_limited_range(const struct intel_crtc_state *crtc_state)
{
if (!ilk_limited_range(crtc_state))
return false ;
return !ilk_lut_limited_range(crtc_state);
}
static void ilk_csc_copy(struct intel_display *display,
struct intel_csc_matrix *dst,
const struct intel_csc_matrix *src)
{
*dst = *src;
if (DISPLAY_VER(display) < 7)
memset(dst->postoff, 0, sizeof (dst->postoff));
}
static void ilk_csc_convert_ctm(const struct intel_crtc_state *crtc_state,
struct intel_csc_matrix *csc,
bool limited_color_range)
{
struct intel_display *display = to_intel_display(crtc_state);
const struct drm_color_ctm *ctm = crtc_state->hw.ctm->data;
const u64 *input;
u64 temp[9];
int i;
/* for preoff/postoff */
if (limited_color_range)
ilk_csc_copy(display, csc, &ilk_csc_matrix_limited_range);
else
ilk_csc_copy(display, csc, &ilk_csc_matrix_identity);
if (limited_color_range)
input = ctm_mult_by_limited(temp, ctm->matrix);
else
input = ctm->matrix;
/*
* Convert fixed point S31.32 input to format supported by the
* hardware.
*/
for (i = 0; i < 9; i++) {
u64 abs_coeff = ((1ULL << 63) - 1) & input[i];
/*
* Clamp input value to min/max supported by
* hardware.
*/
abs_coeff = clamp_val(abs_coeff, 0, CTM_COEFF_4_0 - 1);
csc->coeff[i] = 0;
/* sign bit */
if (CTM_COEFF_NEGATIVE(input[i]))
csc->coeff[i] |= 1 << 15;
if (abs_coeff < CTM_COEFF_0_125)
csc->coeff[i] |= (3 << 12) |
ILK_CSC_COEFF_FP(abs_coeff, 12);
else if (abs_coeff < CTM_COEFF_0_25)
csc->coeff[i] |= (2 << 12) |
ILK_CSC_COEFF_FP(abs_coeff, 11);
else if (abs_coeff < CTM_COEFF_0_5)
csc->coeff[i] |= (1 << 12) |
ILK_CSC_COEFF_FP(abs_coeff, 10);
else if (abs_coeff < CTM_COEFF_1_0)
csc->coeff[i] |= ILK_CSC_COEFF_FP(abs_coeff, 9);
else if (abs_coeff < CTM_COEFF_2_0)
csc->coeff[i] |= (7 << 12) |
ILK_CSC_COEFF_FP(abs_coeff, 8);
else
csc->coeff[i] |= (6 << 12) |
ILK_CSC_COEFF_FP(abs_coeff, 7);
}
}
static void ilk_assign_csc(struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
bool limited_color_range = ilk_csc_limited_range(crtc_state);
if (crtc_state->hw.ctm) {
drm_WARN_ON(display->drm, !crtc_state->csc_enable);
ilk_csc_convert_ctm(crtc_state, &crtc_state->csc, limited_color_range);
} else if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) {
drm_WARN_ON(display->drm, !crtc_state->csc_enable);
ilk_csc_copy(display, &crtc_state->csc, &ilk_csc_matrix_rgb_to_ycbcr);
} else if (limited_color_range) {
drm_WARN_ON(display->drm, !crtc_state->csc_enable);
ilk_csc_copy(display, &crtc_state->csc, &ilk_csc_matrix_limited_range);
} else if (crtc_state->csc_enable) {
/*
* On GLK both pipe CSC and degamma LUT are controlled
* by csc_enable. Hence for the cases where the degama
* LUT is needed but CSC is not we need to load an
* identity matrix.
*/
drm_WARN_ON(display->drm, !display->platform.geminilake);
ilk_csc_copy(display, &crtc_state->csc, &ilk_csc_matrix_identity);
} else {
intel_csc_clear(&crtc_state->csc);
}
}
static void ilk_load_csc_matrix(struct intel_dsb *dsb,
const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
if (crtc_state->csc_enable)
ilk_update_pipe_csc(dsb, crtc, &crtc_state->csc);
}
static void icl_assign_csc(struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
if (crtc_state->hw.ctm) {
drm_WARN_ON(display->drm, (crtc_state->csc_mode & ICL_CSC_ENABLE) == 0);
ilk_csc_convert_ctm(crtc_state, &crtc_state->csc, false );
} else {
drm_WARN_ON(display->drm, (crtc_state->csc_mode & ICL_CSC_ENABLE) != 0);
intel_csc_clear(&crtc_state->csc);
}
if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) {
drm_WARN_ON(display->drm, (crtc_state->csc_mode & ICL_OUTPUT_CSC_ENABLE) == 0);
ilk_csc_copy(display, &crtc_state->output_csc, &ilk_csc_matrix_rgb_to_ycbcr);
} else if (crtc_state->limited_color_range) {
drm_WARN_ON(display->drm, (crtc_state->csc_mode & ICL_OUTPUT_CSC_ENABLE) == 0);
ilk_csc_copy(display, &crtc_state->output_csc, &ilk_csc_matrix_limited_range);
} else {
drm_WARN_ON(display->drm, (crtc_state->csc_mode & ICL_OUTPUT_CSC_ENABLE) != 0);
intel_csc_clear(&crtc_state->output_csc);
}
}
static void icl_load_csc_matrix(struct intel_dsb *dsb,
const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
if (crtc_state->csc_mode & ICL_CSC_ENABLE)
ilk_update_pipe_csc(dsb, crtc, &crtc_state->csc);
if (crtc_state->csc_mode & ICL_OUTPUT_CSC_ENABLE)
icl_update_output_csc(dsb, crtc, &crtc_state->output_csc);
}
static u16 ctm_to_twos_complement(u64 coeff, int int_bits, int frac_bits)
{
s64 c = CTM_COEFF_ABS(coeff);
/* leave an extra bit for rounding */
c >>= 32 - frac_bits - 1;
/* round and drop the extra bit */
c = (c + 1) >> 1;
if (CTM_COEFF_NEGATIVE(coeff))
c = -c;
c = clamp(c, -(s64)BIT(int_bits + frac_bits - 1),
(s64)(BIT(int_bits + frac_bits - 1) - 1));
return c & (BIT(int_bits + frac_bits) - 1);
}
/*
* VLV/CHV Wide Gamut Color Correction (WGC) CSC
* |r| | c0 c1 c2 | |r|
* |g| = | c3 c4 c5 | x |g|
* |b| | c6 c7 c8 | |b|
*
* Coefficients are two's complement s2.10.
*/
static void vlv_wgc_csc_convert_ctm(const struct intel_crtc_state *crtc_state,
struct intel_csc_matrix *csc)
{
const struct drm_color_ctm *ctm = crtc_state->hw.ctm->data;
int i;
for (i = 0; i < 9; i++)
csc->coeff[i] = ctm_to_twos_complement(ctm->matrix[i], 2, 10);
}
static void vlv_load_wgc_csc(struct intel_crtc *crtc,
const struct intel_csc_matrix *csc)
{
struct intel_display *display = to_intel_display(crtc);
enum pipe pipe = crtc->pipe;
intel_de_write_fw(display, PIPE_WGC_C01_C00(display, pipe),
csc->coeff[1] << 16 | csc->coeff[0]);
intel_de_write_fw(display, PIPE_WGC_C02(display, pipe),
csc->coeff[2]);
intel_de_write_fw(display, PIPE_WGC_C11_C10(display, pipe),
csc->coeff[4] << 16 | csc->coeff[3]);
intel_de_write_fw(display, PIPE_WGC_C12(display, pipe),
csc->coeff[5]);
intel_de_write_fw(display, PIPE_WGC_C21_C20(display, pipe),
csc->coeff[7] << 16 | csc->coeff[6]);
intel_de_write_fw(display, PIPE_WGC_C22(display, pipe),
csc->coeff[8]);
}
static void vlv_read_wgc_csc(struct intel_crtc *crtc,
struct intel_csc_matrix *csc)
{
struct intel_display *display = to_intel_display(crtc);
enum pipe pipe = crtc->pipe;
u32 tmp;
tmp = intel_de_read_fw(display, PIPE_WGC_C01_C00(display, pipe));
csc->coeff[0] = tmp & 0xffff;
csc->coeff[1] = tmp >> 16;
tmp = intel_de_read_fw(display, PIPE_WGC_C02(display, pipe));
csc->coeff[2] = tmp & 0xffff;
tmp = intel_de_read_fw(display, PIPE_WGC_C11_C10(display, pipe));
csc->coeff[3] = tmp & 0xffff;
csc->coeff[4] = tmp >> 16;
tmp = intel_de_read_fw(display, PIPE_WGC_C12(display, pipe));
csc->coeff[5] = tmp & 0xffff;
tmp = intel_de_read_fw(display, PIPE_WGC_C21_C20(display, pipe));
csc->coeff[6] = tmp & 0xffff;
csc->coeff[7] = tmp >> 16;
tmp = intel_de_read_fw(display, PIPE_WGC_C22(display, pipe));
csc->coeff[8] = tmp & 0xffff;
}
static void vlv_read_csc(struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
if (crtc_state->wgc_enable)
vlv_read_wgc_csc(crtc, &crtc_state->csc);
}
static void vlv_assign_csc(struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
if (crtc_state->hw.ctm) {
drm_WARN_ON(display->drm, !crtc_state->wgc_enable);
vlv_wgc_csc_convert_ctm(crtc_state, &crtc_state->csc);
} else {
drm_WARN_ON(display->drm, crtc_state->wgc_enable);
intel_csc_clear(&crtc_state->csc);
}
}
/*
* CHV Color Gamut Mapping (CGM) CSC
* |r| | c0 c1 c2 | |r|
* |g| = | c3 c4 c5 | x |g|
* |b| | c6 c7 c8 | |b|
*
* Coefficients are two's complement s4.12.
*/
static void chv_cgm_csc_convert_ctm(const struct intel_crtc_state *crtc_state,
struct intel_csc_matrix *csc)
{
const struct drm_color_ctm *ctm = crtc_state->hw.ctm->data;
int i;
for (i = 0; i < 9; i++)
csc->coeff[i] = ctm_to_twos_complement(ctm->matrix[i], 4, 12);
}
#define CHV_CGM_CSC_COEFF_1_0 (1 << 12)
static const struct intel_csc_matrix chv_cgm_csc_matrix_identity = {
.coeff = {
CHV_CGM_CSC_COEFF_1_0, 0, 0,
0, CHV_CGM_CSC_COEFF_1_0, 0,
0, 0, CHV_CGM_CSC_COEFF_1_0,
},
};
static void chv_load_cgm_csc(struct intel_crtc *crtc,
const struct intel_csc_matrix *csc)
{
struct intel_display *display = to_intel_display(crtc);
enum pipe pipe = crtc->pipe;
intel_de_write_fw(display, CGM_PIPE_CSC_COEFF01(pipe),
csc->coeff[1] << 16 | csc->coeff[0]);
intel_de_write_fw(display, CGM_PIPE_CSC_COEFF23(pipe),
csc->coeff[3] << 16 | csc->coeff[2]);
intel_de_write_fw(display, CGM_PIPE_CSC_COEFF45(pipe),
csc->coeff[5] << 16 | csc->coeff[4]);
intel_de_write_fw(display, CGM_PIPE_CSC_COEFF67(pipe),
csc->coeff[7] << 16 | csc->coeff[6]);
intel_de_write_fw(display, CGM_PIPE_CSC_COEFF8(pipe),
csc->coeff[8]);
}
static void chv_read_cgm_csc(struct intel_crtc *crtc,
struct intel_csc_matrix *csc)
{
struct intel_display *display = to_intel_display(crtc);
enum pipe pipe = crtc->pipe;
u32 tmp;
tmp = intel_de_read_fw(display, CGM_PIPE_CSC_COEFF01(pipe));
csc->coeff[0] = tmp & 0xffff;
csc->coeff[1] = tmp >> 16;
tmp = intel_de_read_fw(display, CGM_PIPE_CSC_COEFF23(pipe));
csc->coeff[2] = tmp & 0xffff;
csc->coeff[3] = tmp >> 16;
tmp = intel_de_read_fw(display, CGM_PIPE_CSC_COEFF45(pipe));
csc->coeff[4] = tmp & 0xffff;
csc->coeff[5] = tmp >> 16;
tmp = intel_de_read_fw(display, CGM_PIPE_CSC_COEFF67(pipe));
csc->coeff[6] = tmp & 0xffff;
csc->coeff[7] = tmp >> 16;
tmp = intel_de_read_fw(display, CGM_PIPE_CSC_COEFF8(pipe));
csc->coeff[8] = tmp & 0xffff;
}
static void chv_read_csc(struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
if (crtc_state->cgm_mode & CGM_PIPE_MODE_CSC)
chv_read_cgm_csc(crtc, &crtc_state->csc);
}
static void chv_assign_csc(struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
drm_WARN_ON(display->drm, crtc_state->wgc_enable);
if (crtc_state->hw.ctm) {
drm_WARN_ON(display->drm, (crtc_state->cgm_mode & CGM_PIPE_MODE_CSC) == 0);
chv_cgm_csc_convert_ctm(crtc_state, &crtc_state->csc);
} else {
drm_WARN_ON(display->drm, (crtc_state->cgm_mode & CGM_PIPE_MODE_CSC) == 0);
crtc_state->csc = chv_cgm_csc_matrix_identity;
}
}
/* convert hw value with given bit_precision to lut property val */
static u32 intel_color_lut_pack(u32 val, int bit_precision)
{
if (bit_precision > 16)
return DIV_ROUND_CLOSEST_ULL(mul_u32_u32(val, (1 << 16) - 1),
(1 << bit_precision) - 1);
else
return DIV_ROUND_CLOSEST(val * ((1 << 16) - 1),
(1 << bit_precision) - 1);
}
static u32 i9xx_lut_8(const struct drm_color_lut *color)
{
return REG_FIELD_PREP(PALETTE_RED_MASK, drm_color_lut_extract(color->red, 8)) |
REG_FIELD_PREP(PALETTE_GREEN_MASK, drm_color_lut_extract(color->green, 8)) |
REG_FIELD_PREP(PALETTE_BLUE_MASK, drm_color_lut_extract(color->blue, 8));
}
static void i9xx_lut_8_pack(struct drm_color_lut *entry, u32 val)
{
entry->red = intel_color_lut_pack(REG_FIELD_GET(PALETTE_RED_MASK, val), 8);
entry->green = intel_color_lut_pack(REG_FIELD_GET(PALETTE_GREEN_MASK, val), 8);
entry->blue = intel_color_lut_pack(REG_FIELD_GET(PALETTE_BLUE_MASK, val), 8);
}
/* i8xx/i9xx+ 10bit slope format "even DW" (low 8 bits) */
static u32 _i9xx_lut_10_ldw(u16 a)
{
return drm_color_lut_extract(a, 10) & 0xff;
}
static u32 i9xx_lut_10_ldw(const struct drm_color_lut *color)
{
return REG_FIELD_PREP(PALETTE_RED_MASK, _i9xx_lut_10_ldw(color[0].red)) |
REG_FIELD_PREP(PALETTE_GREEN_MASK, _i9xx_lut_10_ldw(color[0].green)) |
REG_FIELD_PREP(PALETTE_BLUE_MASK, _i9xx_lut_10_ldw(color[0].blue));
}
/* i8xx/i9xx+ 10bit slope format "odd DW" (high 2 bits + slope) */
static u32 _i9xx_lut_10_udw(u16 a, u16 b)
{
unsigned int mantissa, exponent;
a = drm_color_lut_extract(a, 10);
b = drm_color_lut_extract(b, 10);
/* b = a + 8 * m * 2 ^ -e */
mantissa = clamp(b - a, 0, 0x7f);
exponent = 3;
while (mantissa > 0xf) {
mantissa >>= 1;
exponent--;
}
return (exponent << 6) |
(mantissa << 2) |
(a >> 8);
}
static u32 i9xx_lut_10_udw(const struct drm_color_lut *color)
{
return REG_FIELD_PREP(PALETTE_RED_MASK, _i9xx_lut_10_udw(color[0].red, color[1].red)) |
REG_FIELD_PREP(PALETTE_GREEN_MASK, _i9xx_lut_10_udw(color[0].green, color[1].green)) |
REG_FIELD_PREP(PALETTE_BLUE_MASK, _i9xx_lut_10_udw(color[0].blue, color[1].blue));
}
static void i9xx_lut_10_pack(struct drm_color_lut *color,
u32 ldw, u32 udw)
{
u16 red = REG_FIELD_GET(PALETTE_10BIT_RED_LDW_MASK, ldw) |
REG_FIELD_GET(PALETTE_10BIT_RED_UDW_MASK, udw) << 8;
u16 green = REG_FIELD_GET(PALETTE_10BIT_GREEN_LDW_MASK, ldw) |
REG_FIELD_GET(PALETTE_10BIT_GREEN_UDW_MASK, udw) << 8;
u16 blue = REG_FIELD_GET(PALETTE_10BIT_BLUE_LDW_MASK, ldw) |
REG_FIELD_GET(PALETTE_10BIT_BLUE_UDW_MASK, udw) << 8;
color->red = intel_color_lut_pack(red, 10);
color->green = intel_color_lut_pack(green, 10);
color->blue = intel_color_lut_pack(blue, 10);
}
static void i9xx_lut_10_pack_slope(struct drm_color_lut *color,
u32 ldw, u32 udw)
{
int r_exp = REG_FIELD_GET(PALETTE_10BIT_RED_EXP_MASK, udw);
int r_mant = REG_FIELD_GET(PALETTE_10BIT_RED_MANT_MASK, udw);
int g_exp = REG_FIELD_GET(PALETTE_10BIT_GREEN_EXP_MASK, udw);
int g_mant = REG_FIELD_GET(PALETTE_10BIT_GREEN_MANT_MASK, udw);
int b_exp = REG_FIELD_GET(PALETTE_10BIT_BLUE_EXP_MASK, udw);
int b_mant = REG_FIELD_GET(PALETTE_10BIT_BLUE_MANT_MASK, udw);
i9xx_lut_10_pack(color, ldw, udw);
color->red += r_mant << (3 - r_exp);
color->green += g_mant << (3 - g_exp);
color->blue += b_mant << (3 - b_exp);
}
/* i965+ "10.6" bit interpolated format "even DW" (low 8 bits) */
static u32 i965_lut_10p6_ldw(const struct drm_color_lut *color)
{
return REG_FIELD_PREP(PALETTE_RED_MASK, color->red & 0xff) |
REG_FIELD_PREP(PALETTE_GREEN_MASK, color->green & 0xff) |
REG_FIELD_PREP(PALETTE_BLUE_MASK, color->blue & 0xff);
}
/* i965+ "10.6" interpolated format "odd DW" (high 8 bits) */
static u32 i965_lut_10p6_udw(const struct drm_color_lut *color)
{
return REG_FIELD_PREP(PALETTE_RED_MASK, color->red >> 8) |
REG_FIELD_PREP(PALETTE_GREEN_MASK, color->green >> 8) |
REG_FIELD_PREP(PALETTE_BLUE_MASK, color->blue >> 8);
}
static void i965_lut_10p6_pack(struct drm_color_lut *entry, u32 ldw, u32 udw)
{
entry->red = REG_FIELD_GET(PALETTE_RED_MASK, udw) << 8 |
REG_FIELD_GET(PALETTE_RED_MASK, ldw);
entry->green = REG_FIELD_GET(PALETTE_GREEN_MASK, udw) << 8 |
REG_FIELD_GET(PALETTE_GREEN_MASK, ldw);
entry->blue = REG_FIELD_GET(PALETTE_BLUE_MASK, udw) << 8 |
REG_FIELD_GET(PALETTE_BLUE_MASK, ldw);
}
static u16 i965_lut_11p6_max_pack(u32 val)
{
/* PIPEGCMAX is 11.6, clamp to 10.6 */
return min(val, 0xffffu);
}
static u32 ilk_lut_10(const struct drm_color_lut *color)
{
return REG_FIELD_PREP(PREC_PALETTE_10_RED_MASK, drm_color_lut_extract(color->red, 10)) |
REG_FIELD_PREP(PREC_PALETTE_10_GREEN_MASK, drm_color_lut_extract(color->green, 10)) |
REG_FIELD_PREP(PREC_PALETTE_10_BLUE_MASK, drm_color_lut_extract(color->blue, 10));
}
static void ilk_lut_10_pack(struct drm_color_lut *entry, u32 val)
{
entry->red = intel_color_lut_pack(REG_FIELD_GET(PREC_PALETTE_10_RED_MASK, val), 10);
entry->green = intel_color_lut_pack(REG_FIELD_GET(PREC_PALETTE_10_GREEN_MASK, val), 10);
entry->blue = intel_color_lut_pack(REG_FIELD_GET(PREC_PALETTE_10_BLUE_MASK, val), 10);
}
/* ilk+ "12.4" interpolated format (low 6 bits) */
static u32 ilk_lut_12p4_ldw(const struct drm_color_lut *color)
{
return REG_FIELD_PREP(PREC_PALETTE_12P4_RED_LDW_MASK, color->red & 0x3f) |
REG_FIELD_PREP(PREC_PALETTE_12P4_GREEN_LDW_MASK, color->green & 0x3f) |
REG_FIELD_PREP(PREC_PALETTE_12P4_BLUE_LDW_MASK, color->blue & 0x3f);
}
/* ilk+ "12.4" interpolated format (high 10 bits) */
static u32 ilk_lut_12p4_udw(const struct drm_color_lut *color)
{
return REG_FIELD_PREP(PREC_PALETTE_12P4_RED_UDW_MASK, color->red >> 6) |
REG_FIELD_PREP(PREC_PALETTE_12P4_GREEN_UDW_MASK, color->green >> 6) |
REG_FIELD_PREP(PREC_PALETTE_12P4_BLUE_UDW_MASK, color->blue >> 6);
}
static void ilk_lut_12p4_pack(struct drm_color_lut *entry, u32 ldw, u32 udw)
{
entry->red = REG_FIELD_GET(PREC_PALETTE_12P4_RED_UDW_MASK, udw) << 6 |
REG_FIELD_GET(PREC_PALETTE_12P4_RED_LDW_MASK, ldw);
entry->green = REG_FIELD_GET(PREC_PALETTE_12P4_GREEN_UDW_MASK, udw) << 6 |
REG_FIELD_GET(PREC_PALETTE_12P4_GREEN_LDW_MASK, ldw);
entry->blue = REG_FIELD_GET(PREC_PALETTE_12P4_BLUE_UDW_MASK, udw) << 6 |
REG_FIELD_GET(PREC_PALETTE_12P4_BLUE_LDW_MASK, ldw);
}
static void icl_color_commit_noarm(struct intel_dsb *dsb,
const struct intel_crtc_state *crtc_state)
{
/*
* Despite Wa_1406463849, ICL no longer suffers from the SKL
* DC5/PSR CSC black screen issue (see skl_color_commit_noarm()).
* Possibly due to the extra sticky CSC arming
* (see icl_color_post_update()).
*
* On TGL+ all CSC arming issues have been properly fixed.
*/
icl_load_csc_matrix(dsb, crtc_state);
}
static void skl_color_commit_noarm(struct intel_dsb *dsb,
const struct intel_crtc_state *crtc_state)
{
/*
* Possibly related to display WA #1184, SKL CSC loses the latched
* CSC coeff/offset register values if the CSC registers are disarmed
* between DC5 exit and PSR exit. This will cause the plane(s) to
* output all black (until CSC_MODE is rearmed and properly latched).
* Once PSR exit (and proper register latching) has occurred the
* danger is over. Thus when PSR is enabled the CSC coeff/offset
* register programming will be performed from skl_color_commit_arm()
* which is called after PSR exit.
*/
if (!crtc_state->has_psr)
ilk_load_csc_matrix(dsb, crtc_state);
}
static void ilk_color_commit_noarm(struct intel_dsb *dsb,
const struct intel_crtc_state *crtc_state)
{
ilk_load_csc_matrix(dsb, crtc_state);
}
static void i9xx_color_commit_arm(struct intel_dsb *dsb,
const struct intel_crtc_state *crtc_state)
{
/* update TRANSCONF GAMMA_MODE */
i9xx_set_pipeconf(crtc_state);
}
static void ilk_color_commit_arm(struct intel_dsb *dsb,
const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_display *display = to_intel_display(crtc);
/* update TRANSCONF GAMMA_MODE */
ilk_set_pipeconf(crtc_state);
intel_de_write_fw(display, PIPE_CSC_MODE(crtc->pipe),
crtc_state->csc_mode);
}
static void hsw_color_commit_arm(struct intel_dsb *dsb,
const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_display *display = to_intel_display(crtc);
intel_de_write(display, GAMMA_MODE(crtc->pipe),
crtc_state->gamma_mode);
intel_de_write_fw(display, PIPE_CSC_MODE(crtc->pipe),
crtc_state->csc_mode);
}
static u32 hsw_read_gamma_mode(struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(crtc);
return intel_de_read(display, GAMMA_MODE(crtc->pipe));
}
static u32 ilk_read_csc_mode(struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(crtc);
return intel_de_read(display, PIPE_CSC_MODE(crtc->pipe));
}
static void i9xx_get_config(struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_plane *plane = to_intel_plane(crtc->base.primary);
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
u32 tmp;
tmp = intel_de_read(display, DSPCNTR(display, i9xx_plane));
if (tmp & DISP_PIPE_GAMMA_ENABLE)
crtc_state->gamma_enable = true ;
if (!HAS_GMCH(display) && tmp & DISP_PIPE_CSC_ENABLE)
crtc_state->csc_enable = true ;
}
static void hsw_get_config(struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
crtc_state->gamma_mode = hsw_read_gamma_mode(crtc);
crtc_state->csc_mode = ilk_read_csc_mode(crtc);
i9xx_get_config(crtc_state);
}
static void skl_get_config(struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
u32 tmp;
crtc_state->gamma_mode = hsw_read_gamma_mode(crtc);
crtc_state->csc_mode = ilk_read_csc_mode(crtc);
tmp = intel_de_read(display, SKL_BOTTOM_COLOR(crtc->pipe));
if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
crtc_state->gamma_enable = true ;
if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
crtc_state->csc_enable = true ;
}
static void skl_color_commit_arm(struct intel_dsb *dsb,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum pipe pipe = crtc->pipe;
u32 val = 0;
if (crtc_state->has_psr)
ilk_load_csc_matrix(dsb, crtc_state);
/*
* We don't (yet) allow userspace to control the pipe background color,
* so force it to black, but apply pipe gamma and CSC appropriately
* so that its handling will match how we program our planes.
*/
if (crtc_state->gamma_enable)
val |= SKL_BOTTOM_COLOR_GAMMA_ENABLE;
if (crtc_state->csc_enable)
val |= SKL_BOTTOM_COLOR_CSC_ENABLE;
intel_de_write_dsb(display, dsb, SKL_BOTTOM_COLOR(pipe), val);
intel_de_write_dsb(display, dsb, GAMMA_MODE(crtc->pipe), crtc_state->gamma_mode);
intel_de_write_dsb(display, dsb, PIPE_CSC_MODE(crtc->pipe), crtc_state->csc_mode);
}
static void icl_color_commit_arm(struct intel_dsb *dsb,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum pipe pipe = crtc->pipe;
/*
* We don't (yet) allow userspace to control the pipe background color,
* so force it to black.
*/
intel_de_write_dsb(display, dsb, SKL_BOTTOM_COLOR(pipe), 0);
intel_de_write_dsb(display, dsb, GAMMA_MODE(crtc->pipe), crtc_state->gamma_mode);
intel_de_write_dsb(display, dsb, PIPE_CSC_MODE(crtc->pipe), crtc_state->csc_mode);
}
static void icl_color_post_update(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
/*
* Despite Wa_1406463849, ICL CSC is no longer disarmed by
* coeff/offset register *writes*. Instead, once CSC_MODE
* is armed it stays armed, even after it has been latched.
* Afterwards the coeff/offset registers become effectively
* self-arming. That self-arming must be disabled before the
* next icl_color_commit_noarm() tries to write the next set
* of coeff/offset registers. Fortunately register *reads*
* do still disarm the CSC. Naturally this must not be done
* until the previously written CSC registers have actually
* been latched.
*
* TGL+ no longer need this workaround.
*/
intel_de_read_fw(display, PIPE_CSC_PREOFF_HI(crtc->pipe));
}
static struct drm_property_blob *
create_linear_lut(struct intel_display *display, int lut_size)
{
struct drm_property_blob *blob;
struct drm_color_lut *lut;
int i;
blob = drm_property_create_blob(display->drm,
sizeof (lut[0]) * lut_size,
NULL);
if (IS_ERR(blob))
return blob;
lut = blob->data;
for (i = 0; i < lut_size; i++) {
u16 val = 0xffff * i / (lut_size - 1);
lut[i].red = val;
lut[i].green = val;
lut[i].blue = val;
}
return blob;
}
static u16 lut_limited_range(unsigned int value)
{
unsigned int min = 16 << 8;
unsigned int max = 235 << 8;
return value * (max - min) / 0xffff + min;
}
static struct drm_property_blob *
create_resized_lut(struct intel_display *display,
const struct drm_property_blob *blob_in, int lut_out_size,
bool limited_color_range)
{
int i, lut_in_size = drm_color_lut_size(blob_in);
struct drm_property_blob *blob_out;
const struct drm_color_lut *lut_in;
struct drm_color_lut *lut_out;
blob_out = drm_property_create_blob(display->drm,
sizeof (lut_out[0]) * lut_out_size,
NULL);
if (IS_ERR(blob_out))
return blob_out;
lut_in = blob_in->data;
lut_out = blob_out->data;
for (i = 0; i < lut_out_size; i++) {
const struct drm_color_lut *entry =
&lut_in[i * (lut_in_size - 1) / (lut_out_size - 1)];
if (limited_color_range) {
lut_out[i].red = lut_limited_range(entry->red);
lut_out[i].green = lut_limited_range(entry->green);
lut_out[i].blue = lut_limited_range(entry->blue);
} else {
lut_out[i] = *entry;
}
}
return blob_out;
}
static void i9xx_load_lut_8(struct intel_crtc *crtc,
const struct drm_property_blob *blob)
{
struct intel_display *display = to_intel_display(crtc);
const struct drm_color_lut *lut;
enum pipe pipe = crtc->pipe;
int i;
if (!blob)
return ;
lut = blob->data;
for (i = 0; i < 256; i++)
intel_de_write_fw(display, PALETTE(display, pipe, i),
i9xx_lut_8(&lut[i]));
}
static void i9xx_load_lut_10(struct intel_crtc *crtc,
const struct drm_property_blob *blob)
{
struct intel_display *display = to_intel_display(crtc);
const struct drm_color_lut *lut = blob->data;
int i, lut_size = drm_color_lut_size(blob);
enum pipe pipe = crtc->pipe;
for (i = 0; i < lut_size - 1; i++) {
intel_de_write_fw(display,
PALETTE(display, pipe, 2 * i + 0),
i9xx_lut_10_ldw(&lut[i]));
intel_de_write_fw(display,
PALETTE(display, pipe, 2 * i + 1),
i9xx_lut_10_udw(&lut[i]));
}
}
static void i9xx_load_luts(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
const struct drm_property_blob *post_csc_lut = crtc_state->post_csc_lut;
switch (crtc_state->gamma_mode) {
case GAMMA_MODE_MODE_8BIT:
i9xx_load_lut_8(crtc, post_csc_lut);
break ;
case GAMMA_MODE_MODE_10BIT:
i9xx_load_lut_10(crtc, post_csc_lut);
break ;
default :
MISSING_CASE(crtc_state->gamma_mode);
break ;
}
}
static void i965_load_lut_10p6(struct intel_crtc *crtc,
const struct drm_property_blob *blob)
{
struct intel_display *display = to_intel_display(crtc);
const struct drm_color_lut *lut = blob->data;
int i, lut_size = drm_color_lut_size(blob);
enum pipe pipe = crtc->pipe;
for (i = 0; i < lut_size - 1; i++) {
intel_de_write_fw(display,
PALETTE(display, pipe, 2 * i + 0),
i965_lut_10p6_ldw(&lut[i]));
intel_de_write_fw(display,
PALETTE(display, pipe, 2 * i + 1),
i965_lut_10p6_udw(&lut[i]));
}
intel_de_write_fw(display, PIPEGCMAX(display, pipe, 0), lut[i].red);
intel_de_write_fw(display, PIPEGCMAX(display, pipe, 1), lut[i].green);
intel_de_write_fw(display, PIPEGCMAX(display, pipe, 2), lut[i].blue);
}
static void i965_load_luts(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
const struct drm_property_blob *post_csc_lut = crtc_state->post_csc_lut;
switch (crtc_state->gamma_mode) {
case GAMMA_MODE_MODE_8BIT:
i9xx_load_lut_8(crtc, post_csc_lut);
break ;
case GAMMA_MODE_MODE_10BIT:
i965_load_lut_10p6(crtc, post_csc_lut);
break ;
default :
MISSING_CASE(crtc_state->gamma_mode);
break ;
}
}
static void ilk_lut_write(const struct intel_crtc_state *crtc_state,
i915_reg_t reg, u32 val)
{
struct intel_display *display = to_intel_display(crtc_state);
if (crtc_state->dsb_color)
intel_dsb_reg_write(crtc_state->dsb_color, reg, val);
else
intel_de_write_fw(display, reg, val);
}
static void ilk_lut_write_indexed(const struct intel_crtc_state *crtc_state,
i915_reg_t reg, u32 val)
{
struct intel_display *display = to_intel_display(crtc_state);
if (crtc_state->dsb_color)
intel_dsb_reg_write_indexed(crtc_state->dsb_color, reg, val);
else
intel_de_write_fw(display, reg, val);
}
static void ilk_load_lut_8(const struct intel_crtc_state *crtc_state,
const struct drm_property_blob *blob)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
const struct drm_color_lut *lut;
enum pipe pipe = crtc->pipe;
int i;
if (!blob)
return ;
lut = blob->data;
/*
* DSB fails to correctly load the legacy LUT unless
* we either write each entry twice when using posted
* writes, or we use non-posted writes.
*
* If palette anti-collision is active during LUT
* register writes:
* - posted writes simply get dropped and thus the LUT
* contents may not be correctly updated
* - non-posted writes are blocked and thus the LUT
* contents are always correct, but simultaneous CPU
* MMIO access will start to fail
*
* Choose the lesser of two evils and use posted writes.
* Using posted writes is also faster, even when having
* to write each register twice.
*/
for (i = 0; i < 256; i++) {
ilk_lut_write(crtc_state, LGC_PALETTE(pipe, i),
i9xx_lut_8(&lut[i]));
if (crtc_state->dsb_color)
ilk_lut_write(crtc_state, LGC_PALETTE(pipe, i),
i9xx_lut_8(&lut[i]));
}
}
static void ilk_load_lut_10(const struct intel_crtc_state *crtc_state,
const struct drm_property_blob *blob)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
const struct drm_color_lut *lut = blob->data;
int i, lut_size = drm_color_lut_size(blob);
enum pipe pipe = crtc->pipe;
for (i = 0; i < lut_size; i++)
ilk_lut_write(crtc_state, PREC_PALETTE(pipe, i),
ilk_lut_10(&lut[i]));
}
static void ilk_load_luts(const struct intel_crtc_state *crtc_state)
{
const struct drm_property_blob *post_csc_lut = crtc_state->post_csc_lut;
const struct drm_property_blob *pre_csc_lut = crtc_state->pre_csc_lut;
const struct drm_property_blob *blob = post_csc_lut ?: pre_csc_lut;
switch (crtc_state->gamma_mode) {
case GAMMA_MODE_MODE_8BIT:
ilk_load_lut_8(crtc_state, blob);
break ;
case GAMMA_MODE_MODE_10BIT:
ilk_load_lut_10(crtc_state, blob);
break ;
default :
MISSING_CASE(crtc_state->gamma_mode);
break ;
}
}
static int ivb_lut_10_size(u32 prec_index)
{
if (prec_index & PAL_PREC_SPLIT_MODE)
return 512;
else
return 1024;
}
/*
* IVB/HSW Bspec / PAL_PREC_INDEX:
* "Restriction : Index auto increment mode is not
* supported and must not be enabled."
*/
static void ivb_load_lut_10(const struct intel_crtc_state *crtc_state,
const struct drm_property_blob *blob,
u32 prec_index)
{
const struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
const struct drm_color_lut *lut = blob->data;
int i, lut_size = drm_color_lut_size(blob);
enum pipe pipe = crtc->pipe;
for (i = 0; i < lut_size; i++) {
ilk_lut_write(crtc_state, PREC_PAL_INDEX(pipe),
prec_index + i);
ilk_lut_write(crtc_state, PREC_PAL_DATA(pipe),
ilk_lut_10(&lut[i]));
}
/*
* Reset the index, otherwise it prevents the legacy palette to be
* written properly.
*/
ilk_lut_write(crtc_state, PREC_PAL_INDEX(pipe),
PAL_PREC_INDEX_VALUE(0));
}
/* On BDW+ the index auto increment mode actually works */
static void bdw_load_lut_10(const struct intel_crtc_state *crtc_state,
const struct drm_property_blob *blob,
u32 prec_index)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
const struct drm_color_lut *lut = blob->data;
int i, lut_size = drm_color_lut_size(blob);
enum pipe pipe = crtc->pipe;
ilk_lut_write(crtc_state, PREC_PAL_INDEX(pipe),
prec_index);
ilk_lut_write(crtc_state, PREC_PAL_INDEX(pipe),
PAL_PREC_AUTO_INCREMENT |
prec_index);
for (i = 0; i < lut_size; i++)
ilk_lut_write_indexed(crtc_state, PREC_PAL_DATA(pipe),
ilk_lut_10(&lut[i]));
/*
* Reset the index, otherwise it prevents the legacy palette to be
* written properly.
*/
ilk_lut_write(crtc_state, PREC_PAL_INDEX(pipe),
PAL_PREC_INDEX_VALUE(0));
}
static void ivb_load_lut_ext_max(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum pipe pipe = crtc->pipe;
/* Program the max register to clamp values > 1.0. */
ilk_lut_write(crtc_state, PREC_PAL_EXT_GC_MAX(pipe, 0), 1 << 16);
ilk_lut_write(crtc_state, PREC_PAL_EXT_GC_MAX(pipe, 1), 1 << 16);
ilk_lut_write(crtc_state, PREC_PAL_EXT_GC_MAX(pipe, 2), 1 << 16);
}
static void glk_load_lut_ext2_max(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum pipe pipe = crtc->pipe;
/* Program the max register to clamp values > 1.0. */
ilk_lut_write(crtc_state, PREC_PAL_EXT2_GC_MAX(pipe, 0), 1 << 16);
ilk_lut_write(crtc_state, PREC_PAL_EXT2_GC_MAX(pipe, 1), 1 << 16);
ilk_lut_write(crtc_state, PREC_PAL_EXT2_GC_MAX(pipe, 2), 1 << 16);
}
static void ivb_load_luts(const struct intel_crtc_state *crtc_state)
{
const struct drm_property_blob *post_csc_lut = crtc_state->post_csc_lut;
const struct drm_property_blob *pre_csc_lut = crtc_state->pre_csc_lut;
const struct drm_property_blob *blob = post_csc_lut ?: pre_csc_lut;
switch (crtc_state->gamma_mode) {
case GAMMA_MODE_MODE_8BIT:
ilk_load_lut_8(crtc_state, blob);
break ;
case GAMMA_MODE_MODE_SPLIT:
ivb_load_lut_10(crtc_state, pre_csc_lut, PAL_PREC_SPLIT_MODE |
PAL_PREC_INDEX_VALUE(0));
ivb_load_lut_ext_max(crtc_state);
ivb_load_lut_10(crtc_state, post_csc_lut, PAL_PREC_SPLIT_MODE |
PAL_PREC_INDEX_VALUE(512));
break ;
case GAMMA_MODE_MODE_10BIT:
ivb_load_lut_10(crtc_state, blob,
PAL_PREC_INDEX_VALUE(0));
ivb_load_lut_ext_max(crtc_state);
break ;
default :
MISSING_CASE(crtc_state->gamma_mode);
break ;
}
}
static void bdw_load_luts(const struct intel_crtc_state *crtc_state)
{
const struct drm_property_blob *post_csc_lut = crtc_state->post_csc_lut;
const struct drm_property_blob *pre_csc_lut = crtc_state->pre_csc_lut;
const struct drm_property_blob *blob = post_csc_lut ?: pre_csc_lut;
switch (crtc_state->gamma_mode) {
case GAMMA_MODE_MODE_8BIT:
ilk_load_lut_8(crtc_state, blob);
break ;
case GAMMA_MODE_MODE_SPLIT:
bdw_load_lut_10(crtc_state, pre_csc_lut, PAL_PREC_SPLIT_MODE |
PAL_PREC_INDEX_VALUE(0));
ivb_load_lut_ext_max(crtc_state);
bdw_load_lut_10(crtc_state, post_csc_lut, PAL_PREC_SPLIT_MODE |
PAL_PREC_INDEX_VALUE(512));
break ;
case GAMMA_MODE_MODE_10BIT:
bdw_load_lut_10(crtc_state, blob,
PAL_PREC_INDEX_VALUE(0));
ivb_load_lut_ext_max(crtc_state);
break ;
default :
MISSING_CASE(crtc_state->gamma_mode);
break ;
}
}
static int glk_degamma_lut_size(struct intel_display *display)
{
if (DISPLAY_VER(display) >= 13)
return 131;
else
return 35;
}
static u32 glk_degamma_lut(const struct drm_color_lut *color)
{
return color->green;
}
static void glk_degamma_lut_pack(struct drm_color_lut *entry, u32 val)
{
/* PRE_CSC_GAMC_DATA is 3.16, clamp to 0.16 */
entry->red = entry->green = entry->blue = min(val, 0xffffu);
}
static u32 mtl_degamma_lut(const struct drm_color_lut *color)
{
return drm_color_lut_extract(color->green, 24);
}
static void mtl_degamma_lut_pack(struct drm_color_lut *entry, u32 val)
{
/* PRE_CSC_GAMC_DATA is 3.24, clamp to 0.16 */
entry->red = entry->green = entry->blue =
intel_color_lut_pack(min(val, 0xffffffu), 24);
}
static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state,
const struct drm_property_blob *blob)
{
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
const struct drm_color_lut *lut = blob->data;
int i, lut_size = drm_color_lut_size(blob);
enum pipe pipe = crtc->pipe;
/*
* When setting the auto-increment bit, the hardware seems to
* ignore the index bits, so we need to reset it to index 0
* separately.
*/
ilk_lut_write(crtc_state, PRE_CSC_GAMC_INDEX(pipe),
PRE_CSC_GAMC_INDEX_VALUE(0));
ilk_lut_write(crtc_state, PRE_CSC_GAMC_INDEX(pipe),
PRE_CSC_GAMC_AUTO_INCREMENT |
PRE_CSC_GAMC_INDEX_VALUE(0));
for (i = 0; i < lut_size; i++) {
/*
* First lut_size entries represent range from 0 to 1.0
* 3 additional lut entries will represent extended range
* inputs 3.0 and 7.0 respectively, currently clamped
* at 1.0. Since the precision is 16bit, the user
* value can be directly filled to register.
* The pipe degamma table in GLK+ onwards doesn't
* support different values per channel, so this just
* programs green value which will be equal to Red and
* Blue into the lut registers.
* ToDo: Extend to max 7.0. Enable 32 bit input value
* as compared to just 16 to achieve this.
*/
ilk_lut_write_indexed(crtc_state, PRE_CSC_GAMC_DATA(pipe),
DISPLAY_VER(display) >= 14 ?
mtl_degamma_lut(&lut[i]) : glk_degamma_lut(&lut[i]));
}
/* Clamp values > 1.0. */
while (i++ < glk_degamma_lut_size(display))
ilk_lut_write_indexed(crtc_state, PRE_CSC_GAMC_DATA(pipe),
DISPLAY_VER(display) >= 14 ?
1 << 24 : 1 << 16);
ilk_lut_write(crtc_state, PRE_CSC_GAMC_INDEX(pipe), 0);
}
static void glk_load_luts(const struct intel_crtc_state *crtc_state)
{
const struct drm_property_blob *pre_csc_lut = crtc_state->pre_csc_lut;
const struct drm_property_blob *post_csc_lut = crtc_state->post_csc_lut;
if (pre_csc_lut)
glk_load_degamma_lut(crtc_state, pre_csc_lut);
switch (crtc_state->gamma_mode) {
case GAMMA_MODE_MODE_8BIT:
ilk_load_lut_8(crtc_state, post_csc_lut);
break ;
case GAMMA_MODE_MODE_10BIT:
bdw_load_lut_10(crtc_state, post_csc_lut, PAL_PREC_INDEX_VALUE(0));
ivb_load_lut_ext_max(crtc_state);
glk_load_lut_ext2_max(crtc_state);
break ;
default :
MISSING_CASE(crtc_state->gamma_mode);
break ;
}
}
static void
ivb_load_lut_max(const struct intel_crtc_state *crtc_state,
const struct drm_color_lut *color)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum pipe pipe = crtc->pipe;
/* FIXME LUT entries are 16 bit only, so we can prog 0xFFFF max */
ilk_lut_write(crtc_state, PREC_PAL_GC_MAX(pipe, 0), color->red);
ilk_lut_write(crtc_state, PREC_PAL_GC_MAX(pipe, 1), color->green);
ilk_lut_write(crtc_state, PREC_PAL_GC_MAX(pipe, 2), color->blue);
}
static void
icl_program_gamma_superfine_segment(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
const struct drm_property_blob *blob = crtc_state->post_csc_lut;
const struct drm_color_lut *lut = blob->data;
enum pipe pipe = crtc->pipe;
int i;
/*
* Program Super Fine segment (let's call it seg1)...
*
* Super Fine segment's step is 1/(8 * 128 * 256) and it has
* 9 entries, corresponding to values 0, 1/(8 * 128 * 256),
* 2/(8 * 128 * 256) ... 8/(8 * 128 * 256).
*/
ilk_lut_write(crtc_state, PREC_PAL_MULTI_SEG_INDEX(pipe),
PAL_PREC_MULTI_SEG_INDEX_VALUE(0));
ilk_lut_write(crtc_state, PREC_PAL_MULTI_SEG_INDEX(pipe),
PAL_PREC_AUTO_INCREMENT |
PAL_PREC_MULTI_SEG_INDEX_VALUE(0));
for (i = 0; i < 9; i++) {
const struct drm_color_lut *entry = &lut[i];
ilk_lut_write_indexed(crtc_state, PREC_PAL_MULTI_SEG_DATA(pipe),
ilk_lut_12p4_ldw(entry));
ilk_lut_write_indexed(crtc_state, PREC_PAL_MULTI_SEG_DATA(pipe),
ilk_lut_12p4_udw(entry));
}
ilk_lut_write(crtc_state, PREC_PAL_MULTI_SEG_INDEX(pipe),
PAL_PREC_MULTI_SEG_INDEX_VALUE(0));
}
static void
icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
const struct drm_property_blob *blob = crtc_state->post_csc_lut;
const struct drm_color_lut *lut = blob->data;
const struct drm_color_lut *entry;
enum pipe pipe = crtc->pipe;
int i;
/*
* Program Fine segment (let's call it seg2)...
*
* Fine segment's step is 1/(128 * 256) i.e. 1/(128 * 256), 2/(128 * 256)
* ... 256/(128 * 256). So in order to program fine segment of LUT we
* need to pick every 8th entry in the LUT, and program 256 indexes.
*
* PAL_PREC_INDEX[0] and PAL_PREC_INDEX[1] map to seg2[1],
* seg2[0] being unused by the hardware.
*/
ilk_lut_write(crtc_state, PREC_PAL_INDEX(pipe),
PAL_PREC_INDEX_VALUE(0));
ilk_lut_write(crtc_state, PREC_PAL_INDEX(pipe),
PAL_PREC_AUTO_INCREMENT |
PAL_PREC_INDEX_VALUE(0));
for (i = 1; i < 257; i++) {
entry = &lut[i * 8];
ilk_lut_write_indexed(crtc_state, PREC_PAL_DATA(pipe),
ilk_lut_12p4_ldw(entry));
ilk_lut_write_indexed(crtc_state, PREC_PAL_DATA(pipe),
ilk_lut_12p4_udw(entry));
}
/*
* Program Coarse segment (let's call it seg3)...
*
* Coarse segment starts from index 0 and it's step is 1/256 ie 0,
* 1/256, 2/256 ... 256/256. As per the description of each entry in LUT
* above, we need to pick every (8 * 128)th entry in LUT, and
* program 256 of those.
*
* Spec is not very clear about if entries seg3[0] and seg3[1] are
* being used or not, but we still need to program these to advance
* the index.
*/
for (i = 0; i < 256; i++) {
entry = &lut[i * 8 * 128];
ilk_lut_write_indexed(crtc_state, PREC_PAL_DATA(pipe),
ilk_lut_12p4_ldw(entry));
ilk_lut_write_indexed(crtc_state, PREC_PAL_DATA(pipe),
ilk_lut_12p4_udw(entry));
}
ilk_lut_write(crtc_state, PREC_PAL_INDEX(pipe),
PAL_PREC_INDEX_VALUE(0));
/* The last entry in the LUT is to be programmed in GCMAX */
entry = &lut[256 * 8 * 128];
ivb_load_lut_max(crtc_state, entry);
}
static void icl_load_luts(const struct intel_crtc_state *crtc_state)
{
const struct drm_property_blob *pre_csc_lut = crtc_state->pre_csc_lut;
const struct drm_property_blob *post_csc_lut = crtc_state->post_csc_lut;
if (pre_csc_lut)
glk_load_degamma_lut(crtc_state, pre_csc_lut);
switch (crtc_state->gamma_mode & GAMMA_MODE_MODE_MASK) {
case GAMMA_MODE_MODE_8BIT:
ilk_load_lut_8(crtc_state, post_csc_lut);
break ;
case GAMMA_MODE_MODE_12BIT_MULTI_SEG:
icl_program_gamma_superfine_segment(crtc_state);
icl_program_gamma_multi_segment(crtc_state);
ivb_load_lut_ext_max(crtc_state);
glk_load_lut_ext2_max(crtc_state);
break ;
case GAMMA_MODE_MODE_10BIT:
bdw_load_lut_10(crtc_state, post_csc_lut, PAL_PREC_INDEX_VALUE(0));
ivb_load_lut_ext_max(crtc_state);
glk_load_lut_ext2_max(crtc_state);
break ;
default :
MISSING_CASE(crtc_state->gamma_mode);
break ;
}
}
static void vlv_load_luts(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
if (crtc_state->wgc_enable)
vlv_load_wgc_csc(crtc, &crtc_state->csc);
i965_load_luts(crtc_state);
}
static u32 chv_cgm_degamma_ldw(const struct drm_color_lut *color)
{
return REG_FIELD_PREP(CGM_PIPE_DEGAMMA_GREEN_LDW_MASK, drm_color_lut_extract(color->green, 14)) |
REG_FIELD_PREP(CGM_PIPE_DEGAMMA_BLUE_LDW_MASK, drm_color_lut_extract(color->blue, 14));
}
static u32 chv_cgm_degamma_udw(const struct drm_color_lut *color)
{
return REG_FIELD_PREP(CGM_PIPE_DEGAMMA_RED_UDW_MASK, drm_color_lut_extract(color->red, 14));
}
static void chv_cgm_degamma_pack(struct drm_color_lut *entry, u32 ldw, u32 udw)
{
entry->green = intel_color_lut_pack(REG_FIELD_GET(CGM_PIPE_DEGAMMA_GREEN_LDW_MASK, ldw), 14);
entry->blue = intel_color_lut_pack(REG_FIELD_GET(CGM_PIPE_DEGAMMA_BLUE_LDW_MASK, ldw), 14);
entry->red = intel_color_lut_pack(REG_FIELD_GET(CGM_PIPE_DEGAMMA_RED_UDW_MASK, udw), 14);
}
static void chv_load_cgm_degamma(struct intel_crtc *crtc,
const struct drm_property_blob *blob)
{
struct intel_display *display = to_intel_display(crtc);
const struct drm_color_lut *lut = blob->data;
int i, lut_size = drm_color_lut_size(blob);
enum pipe pipe = crtc->pipe;
for (i = 0; i < lut_size; i++) {
intel_de_write_fw(display, CGM_PIPE_DEGAMMA(pipe, i, 0),
chv_cgm_degamma_ldw(&lut[i]));
intel_de_write_fw(display, CGM_PIPE_DEGAMMA(pipe, i, 1),
chv_cgm_degamma_udw(&lut[i]));
}
}
static u32 chv_cgm_gamma_ldw(const struct drm_color_lut *color)
{
return REG_FIELD_PREP(CGM_PIPE_GAMMA_GREEN_LDW_MASK, drm_color_lut_extract(color->green, 10)) |
REG_FIELD_PREP(CGM_PIPE_GAMMA_BLUE_LDW_MASK, drm_color_lut_extract(color->blue, 10));
}
static u32 chv_cgm_gamma_udw(const struct drm_color_lut *color)
{
return REG_FIELD_PREP(CGM_PIPE_GAMMA_RED_UDW_MASK, drm_color_lut_extract(color->red, 10));
}
static void chv_cgm_gamma_pack(struct drm_color_lut *entry, u32 ldw, u32 udw)
{
entry->green = intel_color_lut_pack(REG_FIELD_GET(CGM_PIPE_GAMMA_GREEN_LDW_MASK, ldw), 10);
entry->blue = intel_color_lut_pack(REG_FIELD_GET(CGM_PIPE_GAMMA_BLUE_LDW_MASK, ldw), 10);
entry->red = intel_color_lut_pack(REG_FIELD_GET(CGM_PIPE_GAMMA_RED_UDW_MASK, udw), 10);
}
static void chv_load_cgm_gamma(struct intel_crtc *crtc,
const struct drm_property_blob *blob)
{
struct intel_display *display = to_intel_display(crtc);
const struct drm_color_lut *lut = blob->data;
int i, lut_size = drm_color_lut_size(blob);
enum pipe pipe = crtc->pipe;
for (i = 0; i < lut_size; i++) {
intel_de_write_fw(display, CGM_PIPE_GAMMA(pipe, i, 0),
chv_cgm_gamma_ldw(&lut[i]));
intel_de_write_fw(display, CGM_PIPE_GAMMA(pipe, i, 1),
chv_cgm_gamma_udw(&lut[i]));
}
}
static void chv_load_luts(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
const struct drm_property_blob *pre_csc_lut = crtc_state->pre_csc_lut;
const struct drm_property_blob *post_csc_lut = crtc_state->post_csc_lut;
if (crtc_state->cgm_mode & CGM_PIPE_MODE_CSC)
chv_load_cgm_csc(crtc, &crtc_state->csc);
if (crtc_state->cgm_mode & CGM_PIPE_MODE_DEGAMMA)
chv_load_cgm_degamma(crtc, pre_csc_lut);
if (crtc_state->cgm_mode & CGM_PIPE_MODE_GAMMA)
chv_load_cgm_gamma(crtc, post_csc_lut);
else
i965_load_luts(crtc_state);
intel_de_write_fw(display, CGM_PIPE_MODE(crtc->pipe),
crtc_state->cgm_mode);
}
void intel_color_load_luts(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
if (crtc_state->dsb_color)
return ;
display->funcs.color->load_luts(crtc_state);
}
void intel_color_commit_noarm(struct intel_dsb *dsb,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
if (display->funcs.color->color_commit_noarm)
display->funcs.color->color_commit_noarm(dsb, crtc_state);
}
void intel_color_commit_arm(struct intel_dsb *dsb,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
display->funcs.color->color_commit_arm(dsb, crtc_state);
}
void intel_color_post_update(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
if (display->funcs.color->color_post_update)
display->funcs.color->color_post_update(crtc_state);
}
void intel_color_modeset(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
intel_color_load_luts(crtc_state);
intel_color_commit_noarm(NULL, crtc_state);
intel_color_commit_arm(NULL, crtc_state);
if (DISPLAY_VER(display) < 9) {
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_plane *plane = to_intel_plane(crtc->base.primary);
/* update DSPCNTR to configure gamma/csc for pipe bottom color */
plane->disable_arm(NULL, plane, crtc_state);
}
}
bool intel_color_uses_dsb(const struct intel_crtc_state *crtc_state)
{
return crtc_state->dsb_color;
}
bool intel_color_uses_chained_dsb(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
return crtc_state->dsb_color && !HAS_DOUBLE_BUFFERED_LUT(display);
}
bool intel_color_uses_gosub_dsb(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
return crtc_state->dsb_color && HAS_DOUBLE_BUFFERED_LUT(display);
}
void intel_color_prepare_commit(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
if (!crtc_state->hw.active ||
intel_crtc_needs_modeset(crtc_state))
return ;
if (!intel_crtc_needs_color_update(crtc_state))
return ;
if (!crtc_state->pre_csc_lut && !crtc_state->post_csc_lut)
return ;
if (HAS_DOUBLE_BUFFERED_LUT(display))
crtc_state->dsb_color = intel_dsb_prepare(state, crtc, INTEL_DSB_0, 1024);
else
crtc_state->dsb_color = intel_dsb_prepare(state, crtc, INTEL_DSB_1, 1024);
if (!intel_color_uses_dsb(crtc_state))
return ;
display->funcs.color->load_luts(crtc_state);
if (crtc_state->use_dsb && intel_color_uses_chained_dsb(crtc_state)) {
intel_vrr_send_push(crtc_state->dsb_color, crtc_state);
intel_dsb_wait_vblank_delay(state, crtc_state->dsb_color);
intel_vrr_check_push_sent(crtc_state->dsb_color, crtc_state);
intel_dsb_interrupt(crtc_state->dsb_color);
}
if (intel_color_uses_gosub_dsb(crtc_state))
intel_dsb_gosub_finish(crtc_state->dsb_color);
else
intel_dsb_finish(crtc_state->dsb_color);
}
void intel_color_cleanup_commit(struct intel_crtc_state *crtc_state)
{
if (crtc_state->dsb_color) {
intel_dsb_cleanup(crtc_state->dsb_color);
crtc_state->dsb_color = NULL;
}
}
void intel_color_wait_commit(const struct intel_crtc_state *crtc_state)
{
if (crtc_state->dsb_color)
intel_dsb_wait(crtc_state->dsb_color);
}
static bool intel_can_preload_luts(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
if (HAS_DOUBLE_BUFFERED_LUT(display))
return false ;
return !old_crtc_state->post_csc_lut &&
!old_crtc_state->pre_csc_lut;
}
static bool vlv_can_preload_luts(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
return !old_crtc_state->wgc_enable &&
!old_crtc_state->post_csc_lut;
}
static bool chv_can_preload_luts(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
/*
* CGM_PIPE_MODE is itself single buffered. We'd have to
* somehow split it out from chv_load_luts() if we wanted
* the ability to preload the CGM LUTs/CSC without tearing.
*/
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5 C=97 H=95 G=95
¤ Dauer der Verarbeitung: 0.20 Sekunden
¤
*© Formatika GbR, Deutschland