/* * Copyright 2012-15 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD *
*/
#define VISUAL_CONFIRM_BASE_DEFAULT 3 #define VISUAL_CONFIRM_BASE_MIN 1 #define VISUAL_CONFIRM_BASE_MAX 10 /* we choose 240 because it is a common denominator of common v addressable * such as 2160, 1440, 1200, 960. So we take 1/240 portion of v addressable as * the visual confirm dpp offset height. So visual confirm height can stay * relatively the same independent from timing used.
*/ #define VISUAL_CONFIRM_DPP_OFFSET_DENO 240
switch (dc_version) { #ifdefined(CONFIG_DRM_AMD_DC_SI) case DCE_VERSION_6_0:
res_pool = dce60_create_resource_pool(
init_data->num_virtual_links, dc); break; case DCE_VERSION_6_1:
res_pool = dce61_create_resource_pool(
init_data->num_virtual_links, dc); break; case DCE_VERSION_6_4:
res_pool = dce64_create_resource_pool(
init_data->num_virtual_links, dc); break; #endif case DCE_VERSION_8_0:
res_pool = dce80_create_resource_pool(
init_data->num_virtual_links, dc); break; case DCE_VERSION_8_1:
res_pool = dce81_create_resource_pool(
init_data->num_virtual_links, dc); break; case DCE_VERSION_8_3:
res_pool = dce83_create_resource_pool(
init_data->num_virtual_links, dc); break; case DCE_VERSION_10_0:
res_pool = dce100_create_resource_pool(
init_data->num_virtual_links, dc); break; case DCE_VERSION_11_0:
res_pool = dce110_create_resource_pool(
init_data->num_virtual_links, dc,
init_data->asic_id); break; case DCE_VERSION_11_2: case DCE_VERSION_11_22:
res_pool = dce112_create_resource_pool(
init_data->num_virtual_links, dc); break; case DCE_VERSION_12_0: case DCE_VERSION_12_1:
res_pool = dce120_create_resource_pool(
init_data->num_virtual_links, dc); break;
#ifdefined(CONFIG_DRM_AMD_DC_FP) case DCN_VERSION_1_0: case DCN_VERSION_1_01:
res_pool = dcn10_create_resource_pool(init_data, dc); break; case DCN_VERSION_2_0:
res_pool = dcn20_create_resource_pool(init_data, dc); break; case DCN_VERSION_2_1:
res_pool = dcn21_create_resource_pool(init_data, dc); break; case DCN_VERSION_2_01:
res_pool = dcn201_create_resource_pool(init_data, dc); break; case DCN_VERSION_3_0:
res_pool = dcn30_create_resource_pool(init_data, dc); break; case DCN_VERSION_3_01:
res_pool = dcn301_create_resource_pool(init_data, dc); break; case DCN_VERSION_3_02:
res_pool = dcn302_create_resource_pool(init_data, dc); break; case DCN_VERSION_3_03:
res_pool = dcn303_create_resource_pool(init_data, dc); break; case DCN_VERSION_3_1:
res_pool = dcn31_create_resource_pool(init_data, dc); break; case DCN_VERSION_3_14:
res_pool = dcn314_create_resource_pool(init_data, dc); break; case DCN_VERSION_3_15:
res_pool = dcn315_create_resource_pool(init_data, dc); break; case DCN_VERSION_3_16:
res_pool = dcn316_create_resource_pool(init_data, dc); break; case DCN_VERSION_3_2:
res_pool = dcn32_create_resource_pool(init_data, dc); break; case DCN_VERSION_3_21:
res_pool = dcn321_create_resource_pool(init_data, dc); break; case DCN_VERSION_3_5:
res_pool = dcn35_create_resource_pool(init_data, dc); break; case DCN_VERSION_3_51:
res_pool = dcn351_create_resource_pool(init_data, dc); break; case DCN_VERSION_3_6:
res_pool = dcn36_create_resource_pool(init_data, dc); break; case DCN_VERSION_4_01:
res_pool = dcn401_create_resource_pool(init_data, dc); break; #endif/* CONFIG_DRM_AMD_DC_FP */ default: break;
}
if (res_pool != NULL) { if (dc->ctx->dc_bios->fw_info_valid) {
res_pool->ref_clocks.xtalin_clock_inKhz =
dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency; /* initialize with firmware data first, no all * ASIC have DCCG SW component. FPGA or * simulation need initialization of * dccg_ref_clock_inKhz, dchub_ref_clock_inKhz * with xtalin_clock_inKhz
*/
res_pool->ref_clocks.dccg_ref_clock_inKhz =
res_pool->ref_clocks.xtalin_clock_inKhz;
res_pool->ref_clocks.dchub_ref_clock_inKhz =
res_pool->ref_clocks.xtalin_clock_inKhz;
} else
ASSERT_CRITICAL(false);
}
return res_pool;
}
void dc_destroy_resource_pool(struct dc *dc)
{ if (dc) { if (dc->res_pool)
dc->res_pool->funcs->destroy(&dc->res_pool);
if (create_funcs->read_dce_straps)
create_funcs->read_dce_straps(dc->ctx, &straps);
pool->audio_count = 0; if (create_funcs->create_audio) { /* find the total number of streams available via the * AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT * registers (one for each pin) starting from pin 1 * up to the max number of audio pins. * We stop on the first pin where * PORT_CONNECTIVITY == 1 (as instructed by HW team).
*/
update_num_audio(&straps, &num_audio, &pool->audio_support); for (i = 0; i < caps->num_audio; i++) { struct audio *aud = create_funcs->create_audio(ctx, i);
if (aud == NULL) {
DC_ERR("DC: failed to create audio!\n"); returnfalse;
} if (!aud->funcs->endpoint_valid(aud)) {
aud->funcs->destroy(&aud); break;
}
pool->audios[i] = aud;
pool->audio_count++;
}
}
pool->stream_enc_count = 0; if (create_funcs->create_stream_encoder) { for (i = 0; i < caps->num_stream_encoder; i++) {
pool->stream_enc[i] = create_funcs->create_stream_encoder(i, ctx); if (pool->stream_enc[i] == NULL)
DC_ERR("DC: failed to create stream_encoder!\n");
pool->stream_enc_count++;
}
}
pool->hpo_dp_stream_enc_count = 0; if (create_funcs->create_hpo_dp_stream_encoder) { for (i = 0; i < caps->num_hpo_dp_stream_encoder; i++) {
pool->hpo_dp_stream_enc[i] = create_funcs->create_hpo_dp_stream_encoder(i+ENGINE_ID_HPO_DP_0, ctx); if (pool->hpo_dp_stream_enc[i] == NULL)
DC_ERR("DC: failed to create HPO DP stream encoder!\n");
pool->hpo_dp_stream_enc_count++;
}
}
pool->hpo_dp_link_enc_count = 0; if (create_funcs->create_hpo_dp_link_encoder) { for (i = 0; i < caps->num_hpo_dp_link_encoder; i++) {
pool->hpo_dp_link_enc[i] = create_funcs->create_hpo_dp_link_encoder(i, ctx); if (pool->hpo_dp_link_enc[i] == NULL)
DC_ERR("DC: failed to create HPO DP link encoder!\n");
pool->hpo_dp_link_enc_count++;
}
}
for (i = 0; i < caps->num_mpc_3dlut; i++) {
pool->mpc_lut[i] = dc_create_3dlut_func(); if (pool->mpc_lut[i] == NULL)
DC_ERR("DC: failed to create MPC 3dlut!\n");
pool->mpc_shaper[i] = dc_create_transfer_func(); if (pool->mpc_shaper[i] == NULL)
DC_ERR("DC: failed to create MPC shaper!\n");
}
dc->caps.dynamic_audio = false; if (pool->audio_count < pool->stream_enc_count) {
dc->caps.dynamic_audio = true;
} for (i = 0; i < num_virtual_links; i++) {
pool->stream_enc[pool->stream_enc_count] =
virtual_stream_encoder_create(
ctx, ctx->dc_bios); if (pool->stream_enc[pool->stream_enc_count] == NULL) {
DC_ERR("DC: failed to create stream_encoder!\n"); returnfalse;
}
pool->stream_enc_count++;
}
if (horizontal_mirror)
*flip_horz_scan_dir = !*flip_horz_scan_dir;
}
staticstruct rect intersect_rec(conststruct rect *r0, conststruct rect *r1)
{ struct rect rec; int r0_x_end = r0->x + r0->width; int r1_x_end = r1->x + r1->width; int r0_y_end = r0->y + r0->height; int r1_y_end = r1->y + r1->height;
/* in case that there is no intersection */ if (rec.width < 0 || rec.height < 0)
memset(&rec, 0, sizeof(rec));
return rec;
}
staticstruct rect shift_rec(conststruct rect *rec_in, int x, int y)
{ struct rect rec_out = *rec_in;
rec_out.x += x;
rec_out.y += y;
return rec_out;
}
staticstruct rect calculate_plane_rec_in_timing_active( struct pipe_ctx *pipe_ctx, conststruct rect *rec_in)
{ /* * The following diagram shows an example where we map a 1920x1200 * desktop to a 2560x1440 timing with a plane rect in the middle * of the screen. To map a plane rect from Stream Source to Timing * Active space, we first multiply stream scaling ratios (i.e 2304/1920 * horizontal and 1440/1200 vertical) to the plane's x and y, then * we add stream destination offsets (i.e 128 horizontal, 0 vertical). * This will give us a plane rect's position in Timing Active. However * we have to remove the fractional. The rule is that we find left/right * and top/bottom positions and round the value to the adjacent integer. * * Stream Source Space * ------------ * __________________________________________________ * |Stream Source (1920 x 1200) ^ | * | y | * | <------- w --------|> | * | __________________V | * |<-- x -->|Plane//////////////| ^ | * | |(pre scale)////////| | | * | |///////////////////| | | * | |///////////////////| h | * | |///////////////////| | | * | |///////////////////| | | * | |///////////////////| V | * | | * | | * |__________________________________________________| * * * Timing Active Space * --------------------------------- * * Timing Active (2560 x 1440) * __________________________________________________ * |*****| Stteam Destination (2304 x 1440) |*****| * |*****| |*****| * |<128>| |*****| * |*****| __________________ |*****| * |*****| |Plane/////////////| |*****| * |*****| |(post scale)//////| |*****| * |*****| |//////////////////| |*****| * |*****| |//////////////////| |*****| * |*****| |//////////////////| |*****| * |*****| |//////////////////| |*****| * |*****| |*****| * |*****| |*****| * |*****| |*****| * |*****|______________________________________|*****| * * So the resulting formulas are shown below: * * recout_x = 128 + round(plane_x * 2304 / 1920) * recout_w = 128 + round((plane_x + plane_w) * 2304 / 1920) - recout_x * recout_y = 0 + round(plane_y * 1440 / 1280) * recout_h = 0 + round((plane_y + plane_h) * 1440 / 1200) - recout_y * * NOTE: fixed point division is not error free. To reduce errors * introduced by fixed point division, we divide only after * multiplication is complete.
*/ conststruct dc_stream_state *stream = pipe_ctx->stream; struct rect rec_out = {0}; struct fixed31_32 temp;
if (stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE)
mpc_rec.x -= (mpc_rec.width * mpc_slice_idx);
/* extra pixels in the division remainder need to go to pipes after * the extra pixel index minus one(epimo) defined here as:
*/ if (mpc_slice_idx > epimo) {
mpc_rec.x += mpc_slice_idx - epimo - 1;
mpc_rec.width += 1;
}
/* * The function maps a plane clip from Stream Source Space to ODM Slice Space * and calculates the rec of the overlapping area of MPC slice of the plane * clip, ODM slice associated with the pipe context and stream destination rec.
*/ staticvoid calculate_recout(struct pipe_ctx *pipe_ctx)
{ /* * A plane clip represents the desired plane size and position in Stream * Source Space. Stream Source is the destination where all planes are * blended (i.e. positioned, scaled and overlaid). It is a canvas where * all planes associated with the current stream are drawn together. * After Stream Source is completed, we will further scale and * reposition the entire canvas of the stream source to Stream * Destination in Timing Active Space. This could be due to display * overscan adjustment where we will need to rescale and reposition all * the planes so they can fit into a TV with overscan or downscale * upscale features such as GPU scaling or VSR. * * This two step blending is a virtual procedure in software. In * hardware there is no such thing as Stream Source. all planes are * blended once in Timing Active Space. Software virtualizes a Stream * Source space to decouple the math complicity so scaling param * calculation focuses on one step at a time. * * In the following two diagrams, user applied 10% overscan adjustment * so the Stream Source needs to be scaled down a little before mapping * to Timing Active Space. As a result the Plane Clip is also scaled * down by the same ratio, Plane Clip position (i.e. x and y) with * respect to Stream Source is also scaled down. To map it in Timing * Active Space additional x and y offsets from Stream Destination are * added to Plane Clip as well. * * Stream Source Space * ------------ * __________________________________________________ * |Stream Source (3840 x 2160) ^ | * | y | * | | | * | __________________V | * |<-- x -->|Plane Clip/////////| | * | |(pre scale)////////| | * | |///////////////////| | * | |///////////////////| | * | |///////////////////| | * | |///////////////////| | * | |///////////////////| | * | | * | | * |__________________________________________________| * * * Timing Active Space (3840 x 2160) * --------------------------------- * * Timing Active * __________________________________________________ * | y_____________________________________________ | * |x |Stream Destination (3456 x 1944) | | * | | | | * | | __________________ | | * | | |Plane Clip////////| | | * | | |(post scale)//////| | | * | | |//////////////////| | | * | | |//////////////////| | | * | | |//////////////////| | | * | | |//////////////////| | | * | | | | * | | | | * | |____________________________________________| | * |__________________________________________________| * * * In Timing Active Space a plane clip could be further sliced into * pieces called MPC slices. Each Pipe Context is responsible for * processing only one MPC slice so the plane processing workload can be * distributed to multiple DPP Pipes. MPC slices could be blended * together to a single ODM slice. Each ODM slice is responsible for * processing a portion of Timing Active divided horizontally so the * output pixel processing workload can be distributed to multiple OPP * pipes. All ODM slices are mapped together in ODM block so all MPC * slices belong to different ODM slices could be pieced together to * form a single image in Timing Active. MPC slices must belong to * single ODM slice. If an MPC slice goes across ODM slice boundary, it * needs to be divided into two MPC slices one for each ODM slice. * * In the following diagram the output pixel processing workload is * divided horizontally into two ODM slices one for each OPP blend tree. * OPP0 blend tree is responsible for processing left half of Timing * Active, while OPP2 blend tree is responsible for processing right * half. * * The plane has two MPC slices. However since the right MPC slice goes * across ODM boundary, two DPP pipes are needed one for each OPP blend * tree. (i.e. DPP1 for OPP0 blend tree and DPP2 for OPP2 blend tree). * * Assuming that we have a Pipe Context associated with OPP0 and DPP1 * working on processing the plane in the diagram. We want to know the * width and height of the shaded rectangle and its relative position * with respect to the ODM slice0. This is called the recout of the pipe * context. * * Planes can be at arbitrary size and position and there could be an * arbitrary number of MPC and ODM slices. The algorithm needs to take * all scenarios into account. * * Timing Active Space (3840 x 2160) * --------------------------------- * * Timing Active * __________________________________________________ * |OPP0(ODM slice0)^ |OPP2(ODM slice1) | * | y | | * | | <- w -> | * | _____V________|____ | * | |DPP0 ^ |DPP1 |DPP2| | * |<------ x |-----|->|/////| | | * | | | |/////| | | * | | h |/////| | | * | | | |/////| | | * | |_____V__|/////|____| | * | | | * | | | * | | | * |_________________________|________________________| * *
*/ struct rect plane_clip; struct rect mpc_slice_of_plane_clip; struct rect odm_slice_src; struct rect overlapping_area;
plane_clip = calculate_plane_rec_in_timing_active(pipe_ctx,
&pipe_ctx->plane_state->clip_rect); /* guard plane clip from drawing beyond stream dst here */
plane_clip = intersect_rec(&plane_clip,
&pipe_ctx->stream->dst);
mpc_slice_of_plane_clip = calculate_mpc_slice_in_timing_active(
pipe_ctx, &plane_clip);
odm_slice_src = resource_get_odm_slice_src_rect(pipe_ctx);
overlapping_area = intersect_rec(&mpc_slice_of_plane_clip, &odm_slice_src); if (overlapping_area.height > 0 &&
overlapping_area.width > 0) { /* shift the overlapping area so it is with respect to current * ODM slice source's position
*/
pipe_ctx->plane_res.scl_data.recout = shift_rec(
&overlapping_area,
-odm_slice_src.x, -odm_slice_src.y);
adjust_recout_for_visual_confirm(
&pipe_ctx->plane_res.scl_data.recout,
pipe_ctx);
} else { /* if there is no overlap, zero recout */
memset(&pipe_ctx->plane_res.scl_data.recout, 0, sizeof(struct rect));
}
/*Swap surf_src height and width since scaling ratios are in recout rotation*/ if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270)
swap(surf_src.height, surf_src.width);
/* * We completely calculate vp offset, size and inits here based entirely on scaling * ratios and recout for pixel perfect pipe combine.
*/ staticvoid calculate_init_and_vp( bool flip_scan_dir, int recout_offset_within_recout_full, int recout_size, int src_size, int taps, struct fixed31_32 ratio, struct fixed31_32 *init, int *vp_offset, int *vp_size)
{ struct fixed31_32 temp; int int_part;
/* * First of the taps starts sampling pixel number <init_int_part> corresponding to recout * pixel 1. Next recout pixel samples int part of <init + scaling ratio> and so on. * All following calculations are based on this logic. * * Init calculated according to formula: * init = (scaling_ratio + number_of_taps + 1) / 2 * init_bot = init + scaling_ratio * to get pixel perfect combine add the fraction from calculating vp offset
*/
temp = dc_fixpt_mul_int(ratio, recout_offset_within_recout_full);
*vp_offset = dc_fixpt_floor(temp);
temp.value &= 0xffffffff;
*init = dc_fixpt_truncate(dc_fixpt_add(dc_fixpt_div_int(
dc_fixpt_add_int(ratio, taps + 1), 2), temp), 19); /* * If viewport has non 0 offset and there are more taps than covered by init then * we should decrease the offset and increase init so we are never sampling * outside of viewport.
*/
int_part = dc_fixpt_floor(*init); if (int_part < taps) {
int_part = taps - int_part; if (int_part > *vp_offset)
int_part = *vp_offset;
*vp_offset -= int_part;
*init = dc_fixpt_add_int(*init, int_part);
} /* * If taps are sampling outside of viewport at end of recout and there are more pixels * available in the surface we should increase the viewport size, regardless set vp to * only what is used.
*/
temp = dc_fixpt_add(*init, dc_fixpt_mul_int(ratio, recout_size - 1));
*vp_size = dc_fixpt_floor(temp); if (*vp_size + *vp_offset > src_size)
*vp_size = src_size - *vp_offset;
/* We did all the math assuming we are scanning same direction as display does, * however mirror/rotation changes how vp scans vs how it is offset. If scan direction * is flipped we simply need to calculate offset from the other side of plane. * Note that outside of viewport all scaling hardware works in recout space.
*/ if (flip_scan_dir)
*vp_offset = src_size - *vp_offset - *vp_size;
}
staticvoid calculate_inits_and_viewports(struct pipe_ctx *pipe_ctx)
{ conststruct dc_plane_state *plane_state = pipe_ctx->plane_state; struct scaler_data *data = &pipe_ctx->plane_res.scl_data; struct rect src = plane_state->src_rect; struct rect recout_dst_in_active_timing; struct rect recout_clip_in_active_timing; struct rect recout_clip_in_recout_dst; struct rect overlap_in_active_timing; struct rect odm_slice_src = resource_get_odm_slice_src_rect(pipe_ctx); int vpc_div = (data->format == PIXEL_FORMAT_420BPP8
|| data->format == PIXEL_FORMAT_420BPP10) ? 2 : 1; bool orthogonal_rotation, flip_vert_scan_dir, flip_horz_scan_dir;
/* * Work in recout rotation since that requires less transformations
*/
get_vp_scan_direction(
plane_state->rotation,
plane_state->horizontal_mirror,
&orthogonal_rotation,
&flip_vert_scan_dir,
&flip_horz_scan_dir);
if (orthogonal_rotation) {
swap(src.width, src.height);
swap(flip_vert_scan_dir, flip_horz_scan_dir);
}
/* Timing borders are part of vactive that we are also supposed to skip in addition * to any stream dst offset. Since dm logic assumes dst is in addressable * space we need to add the left and top borders to dst offsets temporarily. * TODO: fix in DM, stream dst is supposed to be in vactive
*/
pipe_ctx->stream->dst.x += timing->h_border_left;
pipe_ctx->stream->dst.y += timing->v_border_top;
/* Calculate H and V active size */
pipe_ctx->plane_res.scl_data.h_active = odm_slice_src.width;
pipe_ctx->plane_res.scl_data.v_active = odm_slice_src.height;
pipe_ctx->plane_res.scl_data.format = convert_pixel_format_to_dalsurface(
pipe_ctx->plane_state->format);
// Convert pipe_ctx to respective input params for SPL
translate_SPL_in_params_from_pipe_ctx(pipe_ctx, spl_in); /* Pass visual confirm debug information */
calculate_adjust_recout_for_visual_confirm(pipe_ctx,
&spl_in->debug.visual_confirm_base_offset,
&spl_in->debug.visual_confirm_dpp_offset); // Set SPL output parameters to dscl_prog_data to be used for hw registers
spl_out->dscl_prog_data = resource_get_dscl_prog_data(pipe_ctx); // Calculate scaler parameters from SPL
res = spl_calculate_scaler_params(spl_in, spl_out); // Convert respective out params from SPL to scaler data
translate_SPL_out_params_to_pipe_ctx(pipe_ctx, spl_out);
/* Ignore scaler failure if pipe context plane is phantom plane */ if (!res && plane_state->is_phantom)
res = true;
} else { #endif /* depends on h_active */
calculate_recout(pipe_ctx); /* depends on pixel format */
calculate_scaling_ratios(pipe_ctx);
/* * LB calculations depend on vp size, h/v_active and scaling ratios * Setting line buffer pixel depth to 24bpp yields banding * on certain displays, such as the Sharp 4k. 36bpp is needed * to support SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 and * SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616 with actual > 10 bpc * precision on DCN display engines, but apparently not for DCE, as * far as testing on DCE-11.2 and DCE-8 showed. Various DCE parts have * problems: Carrizo with DCE_VERSION_11_0 does not like 36 bpp lb depth, * neither do DCE-8 at 4k resolution, or DCE-11.2 (broken identify pixel * passthrough). Therefore only use 36 bpp on DCN where it is actually needed.
*/ if (plane_state->ctx->dce_version > DCE_VERSION_MAX)
pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_36BPP; else
pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP;
// get TAP value with 100x100 dummy data for max scaling qualify, override // if a new scaling quality required
pipe_ctx->plane_res.scl_data.viewport.width = 100;
pipe_ctx->plane_res.scl_data.viewport.height = 100;
pipe_ctx->plane_res.scl_data.viewport_c.width = 100;
pipe_ctx->plane_res.scl_data.viewport_c.height = 100; if (pipe_ctx->plane_res.xfm != NULL)
res = pipe_ctx->plane_res.xfm->funcs->transform_get_optimal_number_of_taps(
pipe_ctx->plane_res.xfm, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality);
if (pipe_ctx->plane_res.dpp != NULL)
res = pipe_ctx->plane_res.dpp->funcs->dpp_get_optimal_number_of_taps(
pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality);
temp = pipe_ctx->plane_res.scl_data.taps;
calculate_inits_and_viewports(pipe_ctx);
if (pipe_ctx->plane_res.xfm != NULL)
res = pipe_ctx->plane_res.xfm->funcs->transform_get_optimal_number_of_taps(
pipe_ctx->plane_res.xfm, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality);
if (pipe_ctx->plane_res.dpp != NULL)
res = pipe_ctx->plane_res.dpp->funcs->dpp_get_optimal_number_of_taps(
pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality);
if (pipe_ctx->plane_res.xfm != NULL)
res = pipe_ctx->plane_res.xfm->funcs->transform_get_optimal_number_of_taps(
pipe_ctx->plane_res.xfm,
&pipe_ctx->plane_res.scl_data,
&plane_state->scaling_quality);
if (pipe_ctx->plane_res.dpp != NULL)
res = pipe_ctx->plane_res.dpp->funcs->dpp_get_optimal_number_of_taps(
pipe_ctx->plane_res.dpp,
&pipe_ctx->plane_res.scl_data,
&plane_state->scaling_quality);
}
/* Ignore scaler failure if pipe context plane is phantom plane */ if (!res && plane_state->is_phantom)
res = true;
/* * Handle side by side and top bottom 3d recout offsets after vp calculation * since 3d is special and needs to calculate vp as if there is no recout offset * This may break with rotation, good thing we aren't mixing hw rotation and 3d
*/ if (pipe_ctx->top_pipe && pipe_ctx->top_pipe->plane_state == plane_state) {
ASSERT(plane_state->rotation == ROTATION_ANGLE_0 ||
(pipe_ctx->stream->view_format != VIEW_3D_FORMAT_TOP_AND_BOTTOM &&
pipe_ctx->stream->view_format != VIEW_3D_FORMAT_SIDE_BY_SIDE)); if (pipe_ctx->stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM)
pipe_ctx->plane_res.scl_data.recout.y += pipe_ctx->plane_res.scl_data.recout.height; elseif (pipe_ctx->stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE)
pipe_ctx->plane_res.scl_data.recout.x += pipe_ctx->plane_res.scl_data.recout.width;
}
/** * Disable the cursor if there's another pipe above this with a * plane that contains this pipe's viewport to prevent double cursor * and incorrect scaling artifacts.
*/ for (test_pipe = pipe_ctx->top_pipe; test_pipe;
test_pipe = test_pipe->top_pipe) { struct rect r2; int r2_right, r2_bottom; // Skip invisible layer and pipe-split plane on same layer if (!test_pipe->plane_state ||
!test_pipe->plane_state->visible ||
test_pipe->plane_state->layer_index == cur_layer) continue;
/** * There is another half plane on same layer because of * pipe-split, merge together per same height.
*/ for (split_pipe = pipe_ctx->top_pipe; split_pipe;
split_pipe = split_pipe->top_pipe) if (split_pipe->plane_state->layer_index == test_pipe->plane_state->layer_index) { struct rect r2_half;
enum dc_status resource_build_scaling_params_for_context( conststruct dc *dc, struct dc_state *context)
{ int i;
for (i = 0; i < MAX_PIPES; i++) { if (context->res_ctx.pipe_ctx[i].plane_state != NULL &&
context->res_ctx.pipe_ctx[i].stream != NULL) if (!resource_build_scaling_params(&context->res_ctx.pipe_ctx[i])) return DC_FAIL_SCALING;
}
/* * We add a preferred pipe mapping to avoid the chance that * MPCCs already in use will need to be reassigned to other trees. * For example, if we went with the strict, assign backwards logic: * * (State 1) * Display A on, no surface, top pipe = 0 * Display B on, no surface, top pipe = 1 * * (State 2) * Display A on, no surface, top pipe = 0 * Display B on, surface enable, top pipe = 1, bottom pipe = 5 * * (State 3) * Display A on, surface enable, top pipe = 0, bottom pipe = 5 * Display B on, surface enable, top pipe = 1, bottom pipe = 4 * * The state 2->3 transition requires remapping MPCC 5 from display B * to display A. * * However, with the preferred pipe logic, state 2 would look like: * * (State 2) * Display A on, no surface, top pipe = 0 * Display B on, surface enable, top pipe = 1, bottom pipe = 4 * * This would then cause 2->3 to not require remapping any MPCCs.
*/ if (primary_pipe) { int preferred_pipe_idx = (pool->pipe_count - 1) - primary_pipe->pipe_idx; if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) {
secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx];
secondary_pipe->pipe_idx = preferred_pipe_idx;
}
}
/* * search backwards for the second pipe to keep pipe * assignment more consistent
*/ if (!secondary_pipe) for (i = pool->pipe_count - 1; i >= 0; i--) { if (res_ctx->pipe_ctx[i].stream == NULL) {
secondary_pipe = &res_ctx->pipe_ctx[i];
secondary_pipe->pipe_idx = i; break;
}
}
while (cur_sec_dpp) { /* find a free pipe used in current opp blend tree, * this is to avoid MPO pipe switching to different opp blending * tree
*/
new_pipe = &new_res_ctx->pipe_ctx[cur_sec_dpp->pipe_idx]; if (resource_is_pipe_type(new_pipe, FREE_PIPE)) {
free_pipe_idx = cur_sec_dpp->pipe_idx; break;
}
cur_sec_dpp = cur_sec_dpp->bottom_pipe;
}
return free_pipe_idx;
}
int recource_find_free_pipe_not_used_in_cur_res_ctx( conststruct resource_context *cur_res_ctx, struct resource_context *new_res_ctx, conststruct resource_pool *pool)
{ int free_pipe_idx = FREE_PIPE_INDEX_NOT_FOUND; conststruct pipe_ctx *new_pipe, *cur_pipe; int i;
for (i = 0; i < pool->pipe_count; i++) {
cur_pipe = &cur_res_ctx->pipe_ctx[i];
new_pipe = &new_res_ctx->pipe_ctx[i];
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.