if ((rec->y + adjust->y) >= 0)
rec->y += adjust->y;
if ((rec->width + adjust->width) >= 1)
rec->width += adjust->width;
if ((rec->height + adjust->height) >= 1)
rec->height += adjust->height;
}
staticstruct spl_rect calculate_plane_rec_in_timing_active( struct spl_in *spl_in, conststruct spl_rect *rec_in)
{ /* * The following diagram shows an example where we map a 1920x1200 * desktop to a 2560x1440 timing with a plane rect in the middle * of the screen. To map a plane rect from Stream Source to Timing * Active space, we first multiply stream scaling ratios (i.e 2304/1920 * horizontal and 1440/1200 vertical) to the plane's x and y, then * we add stream destination offsets (i.e 128 horizontal, 0 vertical). * This will give us a plane rect's position in Timing Active. However * we have to remove the fractional. The rule is that we find left/right * and top/bottom positions and round the value to the adjacent integer. * * Stream Source Space * ------------ * __________________________________________________ * |Stream Source (1920 x 1200) ^ | * | y | * | <------- w --------|> | * | __________________V | * |<-- x -->|Plane//////////////| ^ | * | |(pre scale)////////| | | * | |///////////////////| | | * | |///////////////////| h | * | |///////////////////| | | * | |///////////////////| | | * | |///////////////////| V | * | | * | | * |__________________________________________________| * * * Timing Active Space * --------------------------------- * * Timing Active (2560 x 1440) * __________________________________________________ * |*****| Stteam Destination (2304 x 1440) |*****| * |*****| |*****| * |<128>| |*****| * |*****| __________________ |*****| * |*****| |Plane/////////////| |*****| * |*****| |(post scale)//////| |*****| * |*****| |//////////////////| |*****| * |*****| |//////////////////| |*****| * |*****| |//////////////////| |*****| * |*****| |//////////////////| |*****| * |*****| |*****| * |*****| |*****| * |*****| |*****| * |*****|______________________________________|*****| * * So the resulting formulas are shown below: * * recout_x = 128 + round(plane_x * 2304 / 1920) * recout_w = 128 + round((plane_x + plane_w) * 2304 / 1920) - recout_x * recout_y = 0 + round(plane_y * 1440 / 1200) * recout_h = 0 + round((plane_y + plane_h) * 1440 / 1200) - recout_y * * NOTE: fixed point division is not error free. To reduce errors * introduced by fixed point division, we divide only after * multiplication is complete.
*/ conststruct spl_rect *stream_src = &spl_in->basic_out.src_rect; conststruct spl_rect *stream_dst = &spl_in->basic_out.dst_rect; struct spl_rect rec_out = {0}; struct spl_fixed31_32 temp;
/* extra pixels in the division remainder need to go to pipes after * the extra pixel index minus one(epimo) defined here as:
*/ if (mpc_slice_idx > epimo && spl_in->basic_in.custom_width == 0) {
mpc_rec.x += mpc_slice_idx - epimo - 1;
mpc_rec.width += 1;
}
staticstruct spl_rect calculate_odm_slice_in_timing_active(struct spl_in *spl_in)
{ int odm_slice_count = spl_in->basic_out.odm_combine_factor; int odm_slice_idx = spl_in->odm_slice_index; bool is_last_odm_slice = (odm_slice_idx + 1) == odm_slice_count; int h_active = spl_in->basic_out.output_size.width; int v_active = spl_in->basic_out.output_size.height; int odm_slice_width; struct spl_rect odm_rec;
if (spl_in->basic_out.odm_combine_factor > 0) {
odm_slice_width = h_active / odm_slice_count; /* * deprecated, caller must pass in odm slice rect i.e OPP input * rect in timing active for the new interface.
*/ if (spl_in->basic_out.use_two_pixels_per_container && (odm_slice_width % 2))
odm_slice_width++;
odm_rec.x = odm_slice_width * odm_slice_idx;
odm_rec.width = is_last_odm_slice ? /* last slice width is the reminder of h_active */
h_active - odm_slice_width * (odm_slice_count - 1) : /* odm slice width is the floor of h_active / count */
odm_slice_width;
odm_rec.y = 0;
odm_rec.height = v_active;
return odm_rec;
}
return spl_in->basic_out.odm_slice_rect;
}
staticvoid spl_calculate_recout(struct spl_in *spl_in, struct spl_scratch *spl_scratch, struct spl_out *spl_out)
{ /* * A plane clip represents the desired plane size and position in Stream * Source Space. Stream Source is the destination where all planes are * blended (i.e. positioned, scaled and overlaid). It is a canvas where * all planes associated with the current stream are drawn together. * After Stream Source is completed, we will further scale and * reposition the entire canvas of the stream source to Stream * Destination in Timing Active Space. This could be due to display * overscan adjustment where we will need to rescale and reposition all * the planes so they can fit into a TV with overscan or downscale * upscale features such as GPU scaling or VSR. * * This two step blending is a virtual procedure in software. In * hardware there is no such thing as Stream Source. all planes are * blended once in Timing Active Space. Software virtualizes a Stream * Source space to decouple the math complicity so scaling param * calculation focuses on one step at a time. * * In the following two diagrams, user applied 10% overscan adjustment * so the Stream Source needs to be scaled down a little before mapping * to Timing Active Space. As a result the Plane Clip is also scaled * down by the same ratio, Plane Clip position (i.e. x and y) with * respect to Stream Source is also scaled down. To map it in Timing * Active Space additional x and y offsets from Stream Destination are * added to Plane Clip as well. * * Stream Source Space * ------------ * __________________________________________________ * |Stream Source (3840 x 2160) ^ | * | y | * | | | * | __________________V | * |<-- x -->|Plane Clip/////////| | * | |(pre scale)////////| | * | |///////////////////| | * | |///////////////////| | * | |///////////////////| | * | |///////////////////| | * | |///////////////////| | * | | * | | * |__________________________________________________| * * * Timing Active Space (3840 x 2160) * --------------------------------- * * Timing Active * __________________________________________________ * | y_____________________________________________ | * |x |Stream Destination (3456 x 1944) | | * | | | | * | | __________________ | | * | | |Plane Clip////////| | | * | | |(post scale)//////| | | * | | |//////////////////| | | * | | |//////////////////| | | * | | |//////////////////| | | * | | |//////////////////| | | * | | | | * | | | | * | |____________________________________________| | * |__________________________________________________| * * * In Timing Active Space a plane clip could be further sliced into * pieces called MPC slices. Each Pipe Context is responsible for * processing only one MPC slice so the plane processing workload can be * distributed to multiple DPP Pipes. MPC slices could be blended * together to a single ODM slice. Each ODM slice is responsible for * processing a portion of Timing Active divided horizontally so the * output pixel processing workload can be distributed to multiple OPP * pipes. All ODM slices are mapped together in ODM block so all MPC * slices belong to different ODM slices could be pieced together to * form a single image in Timing Active. MPC slices must belong to * single ODM slice. If an MPC slice goes across ODM slice boundary, it * needs to be divided into two MPC slices one for each ODM slice. * * In the following diagram the output pixel processing workload is * divided horizontally into two ODM slices one for each OPP blend tree. * OPP0 blend tree is responsible for processing left half of Timing * Active, while OPP2 blend tree is responsible for processing right * half. * * The plane has two MPC slices. However since the right MPC slice goes * across ODM boundary, two DPP pipes are needed one for each OPP blend * tree. (i.e. DPP1 for OPP0 blend tree and DPP2 for OPP2 blend tree). * * Assuming that we have a Pipe Context associated with OPP0 and DPP1 * working on processing the plane in the diagram. We want to know the * width and height of the shaded rectangle and its relative position * with respect to the ODM slice0. This is called the recout of the pipe * context. * * Planes can be at arbitrary size and position and there could be an * arbitrary number of MPC and ODM slices. The algorithm needs to take * all scenarios into account. * * Timing Active Space (3840 x 2160) * --------------------------------- * * Timing Active * __________________________________________________ * |OPP0(ODM slice0)^ |OPP2(ODM slice1) | * | y | | * | | <- w -> | * | _____V________|____ | * | |DPP0 ^ |DPP1 |DPP2| | * |<------ x |-----|->|/////| | | * | | | |/////| | | * | | h |/////| | | * | | | |/////| | | * | |_____V__|/////|____| | * | | | * | | | * | | | * |_________________________|________________________| * *
*/ struct spl_rect plane_clip; struct spl_rect mpc_slice_of_plane_clip; struct spl_rect odm_slice; struct spl_rect overlapping_area;
if (overlapping_area.height > 0 &&
overlapping_area.width > 0) { /* shift the overlapping area so it is with respect to current * ODM slice's position
*/
spl_scratch->scl_data.recout = shift_rec(
&overlapping_area,
-odm_slice.x, -odm_slice.y);
spl_scratch->scl_data.recout.height -=
spl_in->debug.visual_confirm_base_offset;
spl_scratch->scl_data.recout.height -=
spl_in->debug.visual_confirm_dpp_offset;
} else /* if there is no overlap, zero recout */
memset(&spl_scratch->scl_data.recout, 0, sizeof(struct spl_rect));
}
/*Swap surf_src height and width since scaling ratios are in recout rotation*/ if (spl_in->basic_in.rotation == SPL_ROTATION_ANGLE_90 ||
spl_in->basic_in.rotation == SPL_ROTATION_ANGLE_270)
spl_swap(surf_src.height, surf_src.width);
/* * Coefficient table and some registers are different based on ratio * that is output/input. Currently we calculate input/output * Store 1/ratio in recip_ratio for those lookups
*/
spl_scratch->scl_data.recip_ratios.horz = spl_fixpt_recip(
spl_scratch->scl_data.ratios.horz);
spl_scratch->scl_data.recip_ratios.vert = spl_fixpt_recip(
spl_scratch->scl_data.ratios.vert);
spl_scratch->scl_data.recip_ratios.horz_c = spl_fixpt_recip(
spl_scratch->scl_data.ratios.horz_c);
spl_scratch->scl_data.recip_ratios.vert_c = spl_fixpt_recip(
spl_scratch->scl_data.ratios.vert_c);
}
if (horizontal_mirror)
*flip_horz_scan_dir = !*flip_horz_scan_dir;
}
/* * We completely calculate vp offset, size and inits here based entirely on scaling * ratios and recout for pixel perfect pipe combine.
*/ staticvoid spl_calculate_init_and_vp(bool flip_scan_dir, int recout_offset_within_recout_full, int recout_size, int src_size, int taps, struct spl_fixed31_32 ratio, struct spl_fixed31_32 init_adj, struct spl_fixed31_32 *init, int *vp_offset, int *vp_size)
{ struct spl_fixed31_32 temp; int int_part;
/* * First of the taps starts sampling pixel number <init_int_part> corresponding to recout * pixel 1. Next recout pixel samples int part of <init + scaling ratio> and so on. * All following calculations are based on this logic. * * Init calculated according to formula: * init = (scaling_ratio + number_of_taps + 1) / 2 * init_bot = init + scaling_ratio * to get pixel perfect combine add the fraction from calculating vp offset
*/
temp = spl_fixpt_mul_int(ratio, recout_offset_within_recout_full);
*vp_offset = spl_fixpt_floor(temp);
temp.value &= 0xffffffff;
*init = spl_fixpt_add(spl_fixpt_div_int(spl_fixpt_add_int(ratio, taps + 1), 2), temp);
*init = spl_fixpt_add(*init, init_adj);
*init = spl_fixpt_truncate(*init, 19);
/* * If viewport has non 0 offset and there are more taps than covered by init then * we should decrease the offset and increase init so we are never sampling * outside of viewport.
*/
int_part = spl_fixpt_floor(*init); if (int_part < taps) {
int_part = taps - int_part; if (int_part > *vp_offset)
int_part = *vp_offset;
*vp_offset -= int_part;
*init = spl_fixpt_add_int(*init, int_part);
} /* * If taps are sampling outside of viewport at end of recout and there are more pixels * available in the surface we should increase the viewport size, regardless set vp to * only what is used.
*/
temp = spl_fixpt_add(*init, spl_fixpt_mul_int(ratio, recout_size - 1));
*vp_size = spl_fixpt_floor(temp); if (*vp_size + *vp_offset > src_size)
*vp_size = src_size - *vp_offset;
/* We did all the math assuming we are scanning same direction as display does, * however mirror/rotation changes how vp scans vs how it is offset. If scan direction * is flipped we simply need to calculate offset from the other side of plane. * Note that outside of viewport all scaling hardware works in recout space.
*/ if (flip_scan_dir)
*vp_offset = src_size - *vp_offset - *vp_size;
}
recout_clip_in_active_timing = shift_rec(
&spl_scratch->scl_data.recout, odm_slice.x, odm_slice.y);
recout_dst_in_active_timing = calculate_plane_rec_in_timing_active(
spl_in, &spl_in->basic_in.dst_rect);
overlap_in_active_timing = intersect_rec(&recout_clip_in_active_timing,
&recout_dst_in_active_timing); if (overlap_in_active_timing.width > 0 &&
overlap_in_active_timing.height > 0)
recout_clip_in_recout_dst = shift_rec(&overlap_in_active_timing,
-recout_dst_in_active_timing.x,
-recout_dst_in_active_timing.y); else
memset(&recout_clip_in_recout_dst, 0, sizeof(struct spl_rect)); /* * Work in recout rotation since that requires less transformations
*/
spl_get_vp_scan_direction(
spl_in->basic_in.rotation,
spl_in->basic_in.horizontal_mirror,
&orthogonal_rotation,
&flip_vert_scan_dir,
&flip_horz_scan_dir);
if (spl_is_subsampled_format(spl_in->basic_in.format)) { /* this gives the direction of the cositing (negative will move * left, right otherwise)
*/ int h_sign = flip_horz_scan_dir ? -1 : 1; int v_sign = flip_vert_scan_dir ? -1 : 1;
staticvoid spl_handle_3d_recout(struct spl_in *spl_in, struct spl_rect *recout)
{ /* * Handle side by side and top bottom 3d recout offsets after vp calculation * since 3d is special and needs to calculate vp as if there is no recout offset * This may break with rotation, good thing we aren't mixing hw rotation and 3d
*/ if (spl_in->basic_in.mpc_h_slice_index) {
SPL_ASSERT(spl_in->basic_in.rotation == SPL_ROTATION_ANGLE_0 ||
(spl_in->basic_out.view_format != SPL_VIEW_3D_TOP_AND_BOTTOM &&
spl_in->basic_out.view_format != SPL_VIEW_3D_SIDE_BY_SIDE)); if (spl_in->basic_out.view_format == SPL_VIEW_3D_TOP_AND_BOTTOM)
recout->y += recout->height; elseif (spl_in->basic_out.view_format == SPL_VIEW_3D_SIDE_BY_SIDE)
recout->x += recout->width;
}
}
staticvoid spl_clamp_viewport(struct spl_rect *viewport, int min_viewport_size)
{ if (min_viewport_size == 0)
min_viewport_size = MIN_VIEWPORT_SIZE; /* Clamp minimum viewport size */ if (viewport->height < min_viewport_size)
viewport->height = min_viewport_size; if (viewport->width < min_viewport_size)
viewport->width = min_viewport_size;
}
/* Bypass if ratio is 1:1 with no ISHARP or force scale on */ if (data->ratios.horz.value == one
&& data->ratios.vert.value == one
&& data->ratios.horz_c.value == one
&& data->ratios.vert_c.value == one
&& !spl_in->basic_out.always_scale
&& !enable_isharp) return SCL_MODE_SCALING_444_BYPASS;
if (!spl_is_subsampled_format(pixel_format)) { if (spl_is_video_format(pixel_format)) return SCL_MODE_SCALING_444_YCBCR_ENABLE; else return SCL_MODE_SCALING_444_RGB_ENABLE;
}
/* * Bypass YUV if Y is 1:1 with no ISHARP * Do not bypass UV at 1:1 for cositing to be applied
*/ if (!enable_isharp) { if (data->ratios.horz.value == one && data->ratios.vert.value == one && !spl_in->basic_out.always_scale) return SCL_MODE_SCALING_420_LUMA_BYPASS;
}
return SCL_MODE_SCALING_420_YCBCR_ENABLE;
}
staticvoid spl_choose_lls_policy(enum spl_pixel_format format, enum linear_light_scaling *lls_pref)
{ if (spl_is_subsampled_format(format))
*lls_pref = LLS_PREF_NO; else/* RGB or YUV444 */
*lls_pref = LLS_PREF_YES;
}
/* Enable EASF ?*/ staticbool enable_easf(struct spl_in *spl_in, struct spl_scratch *spl_scratch)
{ int vratio = 0; int hratio = 0; bool skip_easf = false;
if (spl_in->disable_easf)
skip_easf = true;
vratio = spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert);
hratio = spl_fixpt_ceil(spl_scratch->scl_data.ratios.horz);
/* * No EASF support for downscaling > 2:1 * EASF support for upscaling or downscaling up to 2:1
*/ if ((vratio > 2) || (hratio > 2))
skip_easf = true;
/* * If lls_pref is LLS_PREF_DONT_CARE, then use pixel format * to determine whether to use LINEAR or NONLINEAR scaling
*/ if (spl_in->lls_pref == LLS_PREF_DONT_CARE)
spl_choose_lls_policy(spl_in->basic_in.format,
&spl_in->lls_pref);
/* Check for linear scaling or EASF preferred */ if (spl_in->lls_pref != LLS_PREF_YES && !spl_in->prefer_easf)
skip_easf = true;
return skip_easf;
}
/* Check if video is in fullscreen mode */ staticbool spl_is_video_fullscreen(struct spl_in *spl_in)
{ if (spl_is_video_format(spl_in->basic_in.format) && spl_in->is_fullscreen) returntrue; returnfalse;
}
staticbool spl_get_isharp_en(struct spl_in *spl_in, struct spl_scratch *spl_scratch)
{ bool enable_isharp = false; int vratio = 0; int hratio = 0; struct spl_taps taps = spl_scratch->scl_data.taps; bool fullscreen = spl_is_video_fullscreen(spl_in);
/* Return if adaptive sharpness is disabled */ if (spl_in->adaptive_sharpness.enable == false) return enable_isharp;
vratio = spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert);
hratio = spl_fixpt_ceil(spl_scratch->scl_data.ratios.horz);
/* No iSHARP support for downscaling */ if (vratio > 1 || hratio > 1) return enable_isharp;
// Scaling is up to 1:1 (no scaling) or upscaling
/* * Apply sharpness to RGB and YUV (NV12/P010) * surfaces based on policy setting
*/ if (!spl_is_video_format(spl_in->basic_in.format) &&
(spl_in->sharpen_policy == SHARPEN_YUV)) return enable_isharp; elseif ((spl_is_video_format(spl_in->basic_in.format) && !fullscreen) &&
(spl_in->sharpen_policy == SHARPEN_RGB_FULLSCREEN_YUV)) return enable_isharp; elseif (!spl_in->is_fullscreen &&
spl_in->sharpen_policy == SHARPEN_FULLSCREEN_ALL) return enable_isharp;
/* Calculate number of tap with adaptive scaling off */ staticvoid spl_get_taps_non_adaptive_scaler( struct spl_scratch *spl_scratch, conststruct spl_taps *in_taps, bool is_subsampled)
{ bool check_max_downscale = false;
/* Disable adaptive scaler and sharpener when integer scaling is enabled */ if (spl_in->scaling_quality.integer_scaling) {
spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps, is_subsampled);
*enable_easf_v = false;
*enable_easf_h = false;
*enable_isharp = false; returntrue;
}
/* Check if we are using EASF or not */
skip_easf = enable_easf(spl_in, spl_scratch);
/* * Set default taps if none are provided * From programming guide: taps = min{ ceil(2*H_RATIO,1), 8} for downscaling * taps = 4 for upscaling
*/ if (skip_easf) {
spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps, is_subsampled);
} else { if (spl_is_video_format(spl_in->basic_in.format)) {
spl_scratch->scl_data.taps.h_taps = 6;
spl_scratch->scl_data.taps.v_taps = 6;
spl_scratch->scl_data.taps.h_taps_c = 4;
spl_scratch->scl_data.taps.v_taps_c = 4;
} else { /* RGB */
spl_scratch->scl_data.taps.h_taps = 6;
spl_scratch->scl_data.taps.v_taps = 6;
spl_scratch->scl_data.taps.h_taps_c = 6;
spl_scratch->scl_data.taps.v_taps_c = 6;
}
}
/*Ensure we can support the requested number of vtaps*/
min_taps_y = spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert);
min_taps_c = spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert_c);
/* Use LB_MEMORY_CONFIG_3 for 4:2:0 */ if (spl_is_yuv420(spl_in->basic_in.format))
lb_config = LB_MEMORY_CONFIG_3; else
lb_config = LB_MEMORY_CONFIG_0; // Determine max vtap support by calculating how much line buffer can fit
spl_in->callbacks.spl_calc_lb_num_partitions(spl_in->basic_out.alpha_en, &spl_scratch->scl_data,
lb_config, &num_part_y, &num_part_c); /* MAX_V_TAPS = MIN (NUM_LINES - MAX(CEILING(V_RATIO,1)-2, 0), 8) */ if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert) > 2) if ((spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert) - 2) > num_part_y)
max_taps_y = 0; else
max_taps_y = num_part_y - (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert) - 2); else
max_taps_y = num_part_y;
if (max_taps_y < min_taps_y) returnfalse; elseif (max_taps_c < min_taps_c) returnfalse;
if (spl_scratch->scl_data.taps.v_taps > max_taps_y)
spl_scratch->scl_data.taps.v_taps = max_taps_y;
if (spl_scratch->scl_data.taps.v_taps_c > max_taps_c)
spl_scratch->scl_data.taps.v_taps_c = max_taps_c;
if (!skip_easf) { /* * RGB ( L + NL ) and Linear HDR support 6x6, 6x4, 6x3, 4x4, 4x3 * NL YUV420 only supports 6x6, 6x4 for Y and 4x4 for UV * * If LB does not support 3, 4, or 6 taps, then disable EASF_V * and only enable EASF_H. So for RGB, support 6x2, 4x2 * and for NL YUV420, support 6x2 for Y and 4x2 for UV * * All other cases, have to disable EASF_V and EASF_H * * If optimal no of taps is 5, then set it to 4 * If optimal no of taps is 7 or 8, then fine since max tap is 6 *
*/ if (spl_scratch->scl_data.taps.v_taps == 5)
spl_scratch->scl_data.taps.v_taps = 4;
if (spl_scratch->scl_data.taps.v_taps_c == 5)
spl_scratch->scl_data.taps.v_taps_c = 4;
if (spl_scratch->scl_data.taps.h_taps == 5)
spl_scratch->scl_data.taps.h_taps = 4;
if (spl_scratch->scl_data.taps.h_taps_c == 5)
spl_scratch->scl_data.taps.h_taps_c = 4;
/* Sharpener requires scaler to be enabled, including for 1:1 * Check if ISHARP can be enabled * If ISHARP is not enabled, set taps to 1 if ratio is 1:1 * except for chroma taps. Keep previous taps so it can * handle cositing
*/
*enable_isharp = spl_get_isharp_en(spl_in, spl_scratch); if (!*enable_isharp && !spl_in->basic_out.always_scale) { if ((IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz)) &&
(IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert))) {
spl_scratch->scl_data.taps.h_taps = 1;
spl_scratch->scl_data.taps.v_taps = 1; if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz_c) && !is_subsampled)
spl_scratch->scl_data.taps.h_taps_c = 1;
if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert_c) && !is_subsampled)
spl_scratch->scl_data.taps.v_taps_c = 1;
// Set values for recout
dscl_prog_data->recout = spl_scratch->scl_data.recout; // Set values for MPC Size
dscl_prog_data->mpc_size.width = spl_scratch->scl_data.h_active;
dscl_prog_data->mpc_size.height = spl_scratch->scl_data.v_active;
// SCL_MODE - Set SCL_MODE data
dscl_prog_data->dscl_mode = spl_get_dscl_mode(spl_in, data, enable_isharp,
enable_easf);
if (enable_easf_h) {
dscl_prog_data->easf_h_en = true;
dscl_prog_data->easf_h_ring = 0;
dscl_prog_data->easf_h_sharp_factor = 1;
dscl_prog_data->easf_h_bf1_en =
1; // 1-bit, BF1 calculation enable, 0=disable, 1=enable
dscl_prog_data->easf_h_bf2_mode =
0xF; // 4-bit, BF2 calculation mode /* 2-bit, BF3 chroma mode correction calculation mode */
dscl_prog_data->easf_h_bf3_mode = spl_get_h_bf3_mode(
spl_scratch->scl_data.recip_ratios.horz); /* FP1.5.10; (2.0) Ring reducer gain for 4 or 6-tap mode [H_REDUCER_GAIN4] */
dscl_prog_data->easf_h_ringest_eventap_reduceg1 =
spl_get_reducer_gain4(spl_scratch->scl_data.taps.h_taps,
spl_scratch->scl_data.recip_ratios.horz); /* FP1.5.10; (2.5) Ring reducer gain for 6-tap mode [V_REDUCER_GAIN6] */
dscl_prog_data->easf_h_ringest_eventap_reduceg2 =
spl_get_reducer_gain6(spl_scratch->scl_data.taps.h_taps,
spl_scratch->scl_data.recip_ratios.horz); /* FP1.5.10; (-0.135742) Ring gain for 6-tap set to -139/1024 */
dscl_prog_data->easf_h_ringest_eventap_gain1 =
spl_get_gainRing4(spl_scratch->scl_data.taps.h_taps,
spl_scratch->scl_data.recip_ratios.horz); /* FP1.5.10; (-0.024414) Ring gain for 6-tap set to -25/1024 */
dscl_prog_data->easf_h_ringest_eventap_gain2 =
spl_get_gainRing6(spl_scratch->scl_data.taps.h_taps,
spl_scratch->scl_data.recip_ratios.horz);
dscl_prog_data->easf_h_bf_maxa = 63; //Horz Max BF value A in U0.6 format.Selected if H_FCNTL==0
dscl_prog_data->easf_h_bf_maxb = 63; //Horz Max BF value B in U0.6 format.Selected if H_FCNTL==1
dscl_prog_data->easf_h_bf_mina = 0; //Horz Min BF value B in U0.6 format.Selected if H_FCNTL==0
dscl_prog_data->easf_h_bf_minb = 0; //Horz Min BF value B in U0.6 format.Selected if H_FCNTL==1 if (lls_pref == LLS_PREF_YES) {
dscl_prog_data->easf_h_bf2_flat1_gain = 4; // U1.3, BF2 Flat1 Gain control
dscl_prog_data->easf_h_bf2_flat2_gain = 8; // U4.0, BF2 Flat2 Gain control
dscl_prog_data->easf_h_bf2_roc_gain = 4; // U2.2, Rate Of Change control
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.