/* * FIXME: We still don't have the proper code detect if we need to apply the WA, * so assume we'll always need it in order to avoid underruns.
*/ staticbool skl_needs_memory_bw_wa(struct intel_display *display)
{ return DISPLAY_VER(display) == 9;
}
staticvoid intel_sagv_init(struct intel_display *display)
{ if (!HAS_SAGV(display))
display->sagv.status = I915_SAGV_NOT_CONTROLLED;
/* * Probe to see if we have working SAGV control. * For icl+ this was already determined by intel_bw_init_hw().
*/ if (DISPLAY_VER(display) < 11)
skl_sagv_disable(display);
/* avoid overflow when adding with wm0 latency/etc. */ if (drm_WARN(display->drm, display->sagv.block_time_us > U16_MAX, "Excessive SAGV block time %u, ignoring\n",
display->sagv.block_time_us))
display->sagv.block_time_us = 0;
if (!intel_has_sagv(display))
display->sagv.block_time_us = 0;
}
/* * SAGV dynamically adjusts the system agent voltage and clock frequencies * depending on power and performance requirements. The display engine access * to system memory is blocked during the adjustment time. Because of the * blocking time, having this enabled can cause full system hangs and/or pipe * underruns if we don't meet all of the following requirements: * * - <= 1 pipe enabled * - All planes can enable watermarks for latencies >= SAGV engine block time * - We're not using an interlaced display configuration
*/ staticvoid skl_sagv_enable(struct intel_display *display)
{ int ret;
if (!intel_has_sagv(display)) return;
if (display->sagv.status == I915_SAGV_ENABLED) return;
drm_dbg_kms(display->drm, "Enabling SAGV\n");
ret = intel_pcode_write(display->drm, GEN9_PCODE_SAGV_CONTROL,
GEN9_SAGV_ENABLE);
/* We don't need to wait for SAGV when enabling */
/* * Some skl systems, pre-release machines in particular, * don't actually have SAGV.
*/ if (display->platform.skylake && ret == -ENXIO) {
drm_dbg(display->drm, "No SAGV found on system, ignoring\n");
display->sagv.status = I915_SAGV_NOT_CONTROLLED; return;
} elseif (ret < 0) {
drm_err(display->drm, "Failed to enable SAGV\n"); return;
}
display->sagv.status = I915_SAGV_ENABLED;
}
staticvoid skl_sagv_disable(struct intel_display *display)
{ int ret;
if (!intel_has_sagv(display)) return;
if (display->sagv.status == I915_SAGV_DISABLED) return;
drm_dbg_kms(display->drm, "Disabling SAGV\n"); /* bspec says to keep retrying for at least 1 ms */
ret = intel_pcode_request(display->drm, GEN9_PCODE_SAGV_CONTROL,
GEN9_SAGV_DISABLE,
GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED, 1); /* * Some skl systems, pre-release machines in particular, * don't actually have SAGV.
*/ if (display->platform.skylake && ret == -ENXIO) {
drm_dbg(display->drm, "No SAGV found on system, ignoring\n");
display->sagv.status = I915_SAGV_NOT_CONTROLLED; return;
} elseif (ret < 0) {
drm_err(display->drm, "Failed to disable SAGV (%d)\n", ret); return;
}
/* * Just return if we can't control SAGV or don't have it. * This is different from situation when we have SAGV but just can't * afford it due to DBuf limitation - in case if SAGV is completely * disabled in a BIOS, we are not even allowed to send a PCode request, * as it will throw an error. So have to check it here.
*/ if (!intel_has_sagv(display)) return;
if (DISPLAY_VER(display) >= 11)
icl_sagv_pre_plane_update(state); else
skl_sagv_pre_plane_update(state);
}
/* * Just return if we can't control SAGV or don't have it. * This is different from situation when we have SAGV but just can't * afford it due to DBuf limitation - in case if SAGV is completely * disabled in a BIOS, we are not even allowed to send a PCode request, * as it will throw an error. So have to check it here.
*/ if (!intel_has_sagv(display)) return;
if (DISPLAY_VER(display) >= 11)
icl_sagv_post_plane_update(state); else
skl_sagv_post_plane_update(state);
}
/* * All enabled planes must have enabled a common wm level that * can tolerate memory latencies higher than sagv_block_time_us
*/ if (wm->wm[0].enable && !wm->wm[max_level].can_sagv) returnfalse;
}
/* * SAGV is initially forced off because its current * state can't be queried from pcode. Allow SAGV to * be enabled upon the first real commit.
*/ if (crtc_state->inherited) returnfalse;
if (DISPLAY_VER(display) >= 12) return tgl_crtc_can_enable_sagv(crtc_state); else return skl_crtc_can_enable_sagv(crtc_state);
}
/* * Per plane DDB entry can in a really worst case be on multiple slices * but single entry is anyway contiguous.
*/ while (start_slice <= end_slice) {
slice_mask |= BIT(start_slice);
start_slice++;
}
/* * Watermark/ddb requirement highly depends upon width of the * framebuffer, So instead of allocating DDB equally among pipes * distribute DDB based on resolution/width of the display.
*/
drm_mode_get_hv_timing(pipe_mode, &hdisplay, &vdisplay);
for_each_pipe(display, pipe) { int weight = dbuf_state->weight[pipe];
/* * Do not account pipes using other slice sets * luckily as of current BSpec slice sets do not partially * intersect(pipes share either same one slice or same slice set * i.e no partial intersection), so it is enough to check for * equality for now.
*/ if (dbuf_state->slices[pipe] != dbuf_state->slices[for_pipe]) continue;
out: if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe] &&
skl_ddb_entry_equal(&old_dbuf_state->ddb[pipe],
&new_dbuf_state->ddb[pipe])) return 0;
ret = intel_atomic_lock_global_state(&new_dbuf_state->base); if (ret) return ret;
crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); if (IS_ERR(crtc_state)) return PTR_ERR(crtc_state);
/* * Used for checking overlaps, so we need absolute * offsets instead of MBUS relative offsets.
*/
crtc_state->wm.skl.ddb.start = mbus_offset + new_dbuf_state->ddb[pipe].start;
crtc_state->wm.skl.ddb.end = mbus_offset + new_dbuf_state->ddb[pipe].end;
/* Cursor doesn't support NV12/planar, so no extra calculation needed */ if (plane_id == PLANE_CURSOR) {
val = intel_de_read(display, CUR_BUF_CFG(pipe));
skl_ddb_entry_init_from_hw(ddb, val); return;
}
val = intel_de_read(display, PLANE_BUF_CFG(pipe, plane_id));
skl_ddb_entry_init_from_hw(ddb, val);
if (DISPLAY_VER(display) >= 30) {
val = intel_de_read(display, PLANE_MIN_BUF_CFG(pipe, plane_id));
/* * Table taken from Bspec 12716 * Pipes do have some preferred DBuf slice affinity, * plus there are some hardcoded requirements on how * those should be distributed for multipipe scenarios. * For more DBuf slices algorithm can get even more messy * and less readable, so decided to use a table almost * as is from BSpec itself - that way it is at least easier * to compare, change and check.
*/ staticconststruct dbuf_slice_conf_entry icl_allowed_dbufs[] = /* Autogenerated with igt/tools/intel_dbuf_map tool: */
{
{
.active_pipes = BIT(PIPE_A),
.dbuf_mask = {
[PIPE_A] = BIT(DBUF_S1),
},
},
{
.active_pipes = BIT(PIPE_B),
.dbuf_mask = {
[PIPE_B] = BIT(DBUF_S1),
},
},
{
.active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
.dbuf_mask = {
[PIPE_A] = BIT(DBUF_S1),
[PIPE_B] = BIT(DBUF_S2),
},
},
{
.active_pipes = BIT(PIPE_C),
.dbuf_mask = {
[PIPE_C] = BIT(DBUF_S2),
},
},
{
.active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
.dbuf_mask = {
[PIPE_A] = BIT(DBUF_S1),
[PIPE_C] = BIT(DBUF_S2),
},
},
{
.active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
.dbuf_mask = {
[PIPE_B] = BIT(DBUF_S1),
[PIPE_C] = BIT(DBUF_S2),
},
},
{
.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
.dbuf_mask = {
[PIPE_A] = BIT(DBUF_S1),
[PIPE_B] = BIT(DBUF_S1),
[PIPE_C] = BIT(DBUF_S2),
},
},
{}
};
/* * Table taken from Bspec 49255 * Pipes do have some preferred DBuf slice affinity, * plus there are some hardcoded requirements on how * those should be distributed for multipipe scenarios. * For more DBuf slices algorithm can get even more messy * and less readable, so decided to use a table almost * as is from BSpec itself - that way it is at least easier * to compare, change and check.
*/ staticconststruct dbuf_slice_conf_entry tgl_allowed_dbufs[] = /* Autogenerated with igt/tools/intel_dbuf_map tool: */
{
{
.active_pipes = BIT(PIPE_A),
.dbuf_mask = {
[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
},
},
{
.active_pipes = BIT(PIPE_B),
.dbuf_mask = {
[PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
},
},
{
.active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
.dbuf_mask = {
[PIPE_A] = BIT(DBUF_S2),
[PIPE_B] = BIT(DBUF_S1),
},
},
{
.active_pipes = BIT(PIPE_C),
.dbuf_mask = {
[PIPE_C] = BIT(DBUF_S2) | BIT(DBUF_S1),
},
},
{
.active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
.dbuf_mask = {
[PIPE_A] = BIT(DBUF_S1),
[PIPE_C] = BIT(DBUF_S2),
},
},
{
.active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
.dbuf_mask = {
[PIPE_B] = BIT(DBUF_S1),
[PIPE_C] = BIT(DBUF_S2),
},
},
{
.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
.dbuf_mask = {
[PIPE_A] = BIT(DBUF_S1),
[PIPE_B] = BIT(DBUF_S1),
[PIPE_C] = BIT(DBUF_S2),
},
},
{
.active_pipes = BIT(PIPE_D),
.dbuf_mask = {
[PIPE_D] = BIT(DBUF_S2) | BIT(DBUF_S1),
},
},
{
.active_pipes = BIT(PIPE_A) | BIT(PIPE_D),
.dbuf_mask = {
[PIPE_A] = BIT(DBUF_S1),
[PIPE_D] = BIT(DBUF_S2),
},
},
{
.active_pipes = BIT(PIPE_B) | BIT(PIPE_D),
.dbuf_mask = {
[PIPE_B] = BIT(DBUF_S1),
[PIPE_D] = BIT(DBUF_S2),
},
},
{
.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D),
.dbuf_mask = {
[PIPE_A] = BIT(DBUF_S1),
[PIPE_B] = BIT(DBUF_S1),
[PIPE_D] = BIT(DBUF_S2),
},
},
{
.active_pipes = BIT(PIPE_C) | BIT(PIPE_D),
.dbuf_mask = {
[PIPE_C] = BIT(DBUF_S1),
[PIPE_D] = BIT(DBUF_S2),
},
},
{
.active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D),
.dbuf_mask = {
[PIPE_A] = BIT(DBUF_S1),
[PIPE_C] = BIT(DBUF_S2),
[PIPE_D] = BIT(DBUF_S2),
},
},
{
.active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
.dbuf_mask = {
[PIPE_B] = BIT(DBUF_S1),
[PIPE_C] = BIT(DBUF_S2),
[PIPE_D] = BIT(DBUF_S2),
},
},
{
.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
.dbuf_mask = {
[PIPE_A] = BIT(DBUF_S1),
[PIPE_B] = BIT(DBUF_S1),
[PIPE_C] = BIT(DBUF_S2),
[PIPE_D] = BIT(DBUF_S2),
},
},
{}
};
for (i = 0; dbuf_slices[i].active_pipes != 0; i++) { if (dbuf_slices[i].active_pipes == active_pipes &&
dbuf_slices[i].join_mbus == join_mbus) return dbuf_slices[i].dbuf_mask[pipe];
} return 0;
}
/* * This function finds an entry with same enabled pipe configuration and * returns correspondent DBuf slice mask as stated in BSpec for particular * platform.
*/ static u8 icl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
{ /* * FIXME: For ICL this is still a bit unclear as prev BSpec revision * required calculating "pipe ratio" in order to determine * if one or two slices can be used for single pipe configurations * as additional constraint to the existing table. * However based on recent info, it should be not "pipe ratio" * but rather ratio between pixel_rate and cdclk with additional * constants, so for now we are using only table until this is * clarified. Also this is the reason why crtc_state param is * still here - we will need it once those additional constraints * pop up.
*/ return compute_dbuf_slices(pipe, active_pipes, join_mbus,
icl_allowed_dbufs);
}
/* Xe3+ are auto minimum DDB capble. So don't force minimal wm0 */ return IS_DISPLAY_VER(display, 13, 20) &&
crtc_state->uapi.async_flip &&
plane->async_flip;
}
unsignedint
skl_plane_relative_data_rate(conststruct intel_crtc_state *crtc_state, struct intel_plane *plane, int width, int height, int cpp)
{ /* * We calculate extra ddb based on ratio plane rate/total data rate * in case, in some cases we should not allocate extra ddb for the plane, * so do not count its data rate, if this is the case.
*/ if (use_minimal_wm0_only(crtc_state, plane)) return 0;
if (pipe_wm->use_sagv_wm) return &wm->sagv.trans_wm;
return &wm->trans_wm;
}
/* * We only disable the watermarks for each plane if * they exceed the ddb allocation of said plane. This * is done so that we don't end up touching cursor * watermarks needlessly when some other plane reduces * our max possible watermark level. * * Bspec has this to say about the PLANE_WM enable bit: * "All the watermarks at this level for all enabled * planes must be enabled before the level will be used." * So this is actually safe to do.
*/ staticvoid
skl_check_wm_level(struct skl_wm_level *wm, conststruct skl_ddb_entry *ddb)
{ if (wm->min_ddb_alloc > skl_ddb_entry_size(ddb))
memset(wm, 0, sizeof(*wm));
}
staticbool skl_need_wm_copy_wa(struct intel_display *display, int level, conststruct skl_plane_wm *wm)
{ /* * Wa_1408961008:icl, ehl * Wa_14012656716:tgl, adl * Wa_14017887344:icl * Wa_14017868169:adl, tgl * Due to some power saving optimizations, different subsystems * like PSR, might still use even disabled wm level registers, * for "reference", so lets keep at least the values sane. * Considering amount of WA requiring us to do similar things, was * decided to simply do it for all of the platforms, as those wm * levels are disabled, this isn't going to do harm anyway.
*/ return level > 0 && !wm->wm[level].enable;
}
if (data_rate) {
extra = min_t(u16, iter->size,
DIV64_U64_ROUND_UP(iter->size * data_rate,
iter->data_rate));
iter->size -= extra;
iter->data_rate -= data_rate;
}
/* * Keep ddb entry of all disabled planes explicitly zeroed * to avoid skl_ddb_add_affected_planes() adding them to * the state when other planes change their allocations.
*/
size = wm->min_ddb_alloc + extra; if (size)
iter->start = skl_ddb_entry_init(ddb, iter->start,
iter->start + size);
}
/* * Find the highest watermark level for which we can satisfy the block * requirement of active planes.
*/ for (level = display->wm.num_levels - 1; level >= 0; level--) {
blocks = 0;
for_each_plane_id_on_crtc(crtc, plane_id) { conststruct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
if (plane_id == PLANE_CURSOR) { conststruct skl_ddb_entry *ddb =
&crtc_state->wm.skl.plane_ddb[plane_id];
/* avoid the WARN later when we don't allocate any extra DDB */ if (iter.data_rate == 0)
iter.size = 0;
/* * Grant each plane the blocks it requires at the highest achievable * watermark level, plus an extra share of the leftover blocks * proportional to its relative data rate.
*/
for_each_plane_id_on_crtc(crtc, plane_id) { struct skl_ddb_entry *ddb =
&crtc_state->wm.skl.plane_ddb[plane_id]; struct skl_ddb_entry *ddb_y =
&crtc_state->wm.skl.plane_ddb_y[plane_id];
u16 *min_ddb = &crtc_state->wm.skl.plane_min_ddb[plane_id];
u16 *interim_ddb =
&crtc_state->wm.skl.plane_interim_ddb[plane_id]; conststruct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
/* * When we calculated watermark values we didn't know how high * of a level we'd actually be able to hit, so we just marked * all levels as "enabled." Go back now and disable the ones * that aren't actually possible.
*/ for (level++; level < display->wm.num_levels; level++) {
for_each_plane_id_on_crtc(crtc, plane_id) { conststruct skl_ddb_entry *ddb =
&crtc_state->wm.skl.plane_ddb[plane_id]; conststruct skl_ddb_entry *ddb_y =
&crtc_state->wm.skl.plane_ddb_y[plane_id]; struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
/* * Go back and disable the transition and SAGV watermarks * if it turns out we don't have enough DDB blocks for them.
*/
for_each_plane_id_on_crtc(crtc, plane_id) { conststruct skl_ddb_entry *ddb =
&crtc_state->wm.skl.plane_ddb[plane_id]; conststruct skl_ddb_entry *ddb_y =
&crtc_state->wm.skl.plane_ddb_y[plane_id];
u16 *interim_ddb =
&crtc_state->wm.skl.plane_interim_ddb[plane_id]; struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
skl_check_wm_level(&wm->sagv.wm0, ddb); if (DISPLAY_VER(display) >= 30)
*interim_ddb = wm->sagv.wm0.min_ddb_alloc;
skl_check_wm_level(&wm->sagv.trans_wm, ddb);
}
return 0;
}
/* * The max latency should be 257 (max the punit can code is 255 and we add 2us * for the read latency) and cpp should always be <= 8, so that * should allow pixel_rate up to ~2 GHz which seems sufficient since max * 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
*/ static uint_fixed_16_16_t
skl_wm_method1(struct intel_display *display, u32 pixel_rate,
u8 cpp, u32 latency, u32 dbuf_block_size)
{
u32 wm_intermediate_val;
uint_fixed_16_16_t ret;
/* only planar format has two planes */ if (color_plane == 1 &&
!intel_format_info_is_yuv_semiplanar(format, modifier)) {
drm_dbg_kms(display->drm, "Non planar format have single plane\n"); return -EINVAL;
}
staticint
skl_compute_plane_wm_params(conststruct intel_crtc_state *crtc_state, conststruct intel_plane_state *plane_state, struct skl_wm_params *wp, int color_plane)
{ conststruct drm_framebuffer *fb = plane_state->hw.fb; int width;
/* * Src coordinates are already rotated by 270 degrees for * the 90/270 degree plane rotation cases (to match the * GTT mapping), hence no need to account for rotation here.
*/
width = drm_rect_width(&plane_state->uapi.src) >> 16;
blocks = fixed16_to_u32_round_up(selected_result); if (DISPLAY_VER(display) < 30)
blocks++;
/* * Lets have blocks at minimum equivalent to plane_blocks_per_line * as there will be at minimum one line for lines configuration. This * is a work around for FIFO underruns observed with resolutions like * 4k 60 Hz in single channel DRAM configurations. * * As per the Bspec 49325, if the ddb allocation can hold at least * one plane_blocks_per_line, we should have selected method2 in * the above logic. Assuming that modern versions have enough dbuf * and method2 guarantees blocks equivalent to at least 1 line, * select the blocks as plane_blocks_per_line. * * TODO: Revisit the logic when we have better understanding on DRAM * channels' impact on the level 0 memory latency and the relevant * wm calculations.
*/ if (skl_wm_has_lines(display, level))
blocks = max(blocks,
fixed16_to_u32_round_up(wp->plane_blocks_per_line));
lines = div_round_up_fixed16(selected_result,
wp->plane_blocks_per_line);
if (DISPLAY_VER(display) == 9) { /* Display WA #1125: skl,bxt,kbl */ if (level == 0 && wp->rc_surface)
blocks += fixed16_to_u32_round_up(wp->y_tile_minimum);
/* Display WA #1126: skl,bxt,kbl */ if (level >= 1 && level <= 7) { if (wp->y_tiled) {
blocks += fixed16_to_u32_round_up(wp->y_tile_minimum);
lines += wp->y_min_scanlines;
} else {
blocks++;
}
/* * Make sure result blocks for higher latency levels are * at least as high as level below the current level. * Assumption in DDB algorithm optimization for special * cases. Also covers Display WA #1125 for RC.
*/ if (result_prev->blocks > blocks)
blocks = result_prev->blocks;
}
}
if (DISPLAY_VER(display) >= 11) { if (wp->y_tiled) { int extra_lines;
if (lines > skl_wm_max_lines(display)) { /* reject it */
result->min_ddb_alloc = U16_MAX; return;
}
/* * If lines is valid, assume we can use this watermark level * for now. We'll come back and disable it after we calculate the * DDB allocation if it turns out we don't actually have enough * blocks to satisfy it.
*/
result->blocks = blocks;
result->lines = lines; /* Bspec says: value >= plane ddb allocation -> invalid, hence the +1 here */
result->min_ddb_alloc = max(min_ddb_alloc, blocks) + 1;
result->enable = true;
result->auto_min_alloc_wm_enable = xe3_auto_min_alloc_capable(plane, level);
/* Display WA #1140: glk,cnl */ if (DISPLAY_VER(display) == 10)
trans_amount = 0; else
trans_amount = 10; /* This is configurable amount */
trans_offset = trans_min + trans_amount;
/* * The spec asks for Selected Result Blocks for wm0 (the real value), * not Result Blocks (the integer value). Pay attention to the capital * letters. The value wm_l0->blocks is actually Result Blocks, but * since Result Blocks is the ceiling of Selected Result Blocks plus 1, * and since we later will have to get the ceiling of the sum in the * transition watermarks calculation, we can just pretend Selected * Result Blocks is Result Blocks minus 1 and it should work for the * current platforms.
*/
wm0_blocks = wm0->blocks - 1;
/* * Just assume we can enable the transition watermark. After * computing the DDB we'll come back and disable it if that * assumption turns out to be false.
*/
trans_wm->blocks = blocks;
trans_wm->min_ddb_alloc = max_t(u16, wm0->min_ddb_alloc, blocks + 1);
trans_wm->enable = true;
}
/* uv plane watermarks must also be validated for NV12/Planar */
ret = skl_compute_plane_wm_params(crtc_state, plane_state,
&wm_params, 1); if (ret) return ret;
level = skl_max_wm_level_for_vblank(crtc_state, wm0_lines); if (level < 0) return level;
/* * PSR needs to toggle LATENCY_REPORTING_REMOVED_PIPE_* * based on whether we're limited by the vblank duration.
*/
crtc_state->wm_level_disabled = level < display->wm.num_levels - 1;
for (level++; level < display->wm.num_levels; level++) { enum plane_id plane_id;
/* * FIXME just clear enable or flag the entire * thing as bad via min_ddb_alloc=U16_MAX?
*/
wm->wm[level].enable = false;
wm->uv_wm[level].enable = false;
}
}
for_each_new_intel_plane_in_state(state, plane, plane_state, i) { /* * FIXME should perhaps check {old,new}_plane_crtc->hw.crtc * instead but we don't populate that correctly for NV12 Y * planes so for now hack this.
*/ if (plane->pipe != crtc->pipe) continue;
if (DISPLAY_VER(display) >= 11)
ret = icl_build_plane_wm(crtc_state, plane_state); else
ret = skl_build_plane_wm(crtc_state, plane_state); if (ret) return ret;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.