/* * Copyright 2011 The LibYuv Project Authors. All rights reserved. * * Use of this source code is governed by a BSD-style license * that can be found in the LICENSE file in the root of the source * tree. An additional intellectual property rights grant can be found * in the file PATENTS. All contributing project authors may * be found in the AUTHORS file in the root of the source tree.
*/
#include"libyuv/planar_functions.h"
#include <assert.h> #include <string.h> // for memset()
#include"libyuv/cpu_id.h" #include"libyuv/row.h" #include"libyuv/scale_row.h"// for ScaleRowDown2
LIBYUV_API void CopyPlane_16(const uint16_t* src_y, int src_stride_y,
uint16_t* dst_y, int dst_stride_y, int width, int height) {
CopyPlane((const uint8_t*)src_y, src_stride_y * 2, (uint8_t*)dst_y,
dst_stride_y * 2, width * 2, height);
}
// Convert a plane of 16 bit data to 8 bit
LIBYUV_API void Convert16To8Plane(const uint16_t* src_y, int src_stride_y,
uint8_t* dst_y, int dst_stride_y, int scale, // 16384 for 10 bits int width, int height) { int y; void (*Convert16To8Row)(const uint16_t* src_y, uint8_t* dst_y, int scale, int width) = Convert16To8Row_C;
if (width <= 0 || height == 0) { return;
} // Negative height means invert the image. if (height < 0) {
height = -height;
dst_y = dst_y + (height - 1) * dst_stride_y;
dst_stride_y = -dst_stride_y;
} // Coalesce rows. if (src_stride_y == width && dst_stride_y == width) {
width *= height;
height = 1;
src_stride_y = dst_stride_y = 0;
} #ifdefined(HAS_CONVERT16TO8ROW_NEON) if (TestCpuFlag(kCpuHasNEON)) {
Convert16To8Row = Convert16To8Row_Any_NEON; if (IS_ALIGNED(width, 16)) {
Convert16To8Row = Convert16To8Row_NEON;
}
} #endif #ifdefined(HAS_CONVERT16TO8ROW_SME) if (TestCpuFlag(kCpuHasSME)) {
Convert16To8Row = Convert16To8Row_SME;
} #endif #ifdefined(HAS_CONVERT16TO8ROW_SSSE3) if (TestCpuFlag(kCpuHasSSSE3)) {
Convert16To8Row = Convert16To8Row_Any_SSSE3; if (IS_ALIGNED(width, 16)) {
Convert16To8Row = Convert16To8Row_SSSE3;
}
} #endif #ifdefined(HAS_CONVERT16TO8ROW_AVX2) if (TestCpuFlag(kCpuHasAVX2)) {
Convert16To8Row = Convert16To8Row_Any_AVX2; if (IS_ALIGNED(width, 32)) {
Convert16To8Row = Convert16To8Row_AVX2;
}
} #endif #ifdefined(HAS_CONVERT16TO8ROW_AVX512BW) if (TestCpuFlag(kCpuHasAVX512BW)) {
Convert16To8Row = Convert16To8Row_Any_AVX512BW; if (IS_ALIGNED(width, 64)) {
Convert16To8Row = Convert16To8Row_AVX512BW;
}
} #endif
// Convert a plane of 8 bit data to 16 bit
LIBYUV_API void Convert8To16Plane(const uint8_t* src_y, int src_stride_y,
uint16_t* dst_y, int dst_stride_y, int scale, // 1024 for 10 bits int width, int height) { int y; void (*Convert8To16Row)(const uint8_t* src_y, uint16_t* dst_y, int scale, int width) = Convert8To16Row_C;
// Copy I422.
LIBYUV_API int I422Copy(const uint8_t* src_y, int src_stride_y, const uint8_t* src_u, int src_stride_u, const uint8_t* src_v, int src_stride_v,
uint8_t* dst_y, int dst_stride_y,
uint8_t* dst_u, int dst_stride_u,
uint8_t* dst_v, int dst_stride_v, int width, int height) { int halfwidth = (width + 1) >> 1;
// Copy I210.
LIBYUV_API int I210Copy(const uint16_t* src_y, int src_stride_y, const uint16_t* src_u, int src_stride_u, const uint16_t* src_v, int src_stride_v,
uint16_t* dst_y, int dst_stride_y,
uint16_t* dst_u, int dst_stride_u,
uint16_t* dst_v, int dst_stride_v, int width, int height) { int halfwidth = (width + 1) >> 1;
// Copy NV21. Supports inverting.
LIBYUV_API int NV21Copy(const uint8_t* src_y, int src_stride_y, const uint8_t* src_vu, int src_stride_vu,
uint8_t* dst_y, int dst_stride_y,
uint8_t* dst_vu, int dst_stride_vu, int width, int height) { return NV12Copy(src_y, src_stride_y, src_vu, src_stride_vu, dst_y,
dst_stride_y, dst_vu, dst_stride_vu, width, height);
}
// Support function for NV12 etc UV channels. // Width and height are plane sizes (typically half pixel width).
LIBYUV_API void SplitUVPlane(const uint8_t* src_uv, int src_stride_uv,
uint8_t* dst_u, int dst_stride_u,
uint8_t* dst_v, int dst_stride_v, int width, int height) { int y; void (*SplitUVRow)(const uint8_t* src_uv, uint8_t* dst_u, uint8_t* dst_v, int width) = SplitUVRow_C; if (width <= 0 || height == 0) { return;
} // Negative height means invert the image. if (height < 0) {
height = -height;
dst_u = dst_u + (height - 1) * dst_stride_u;
dst_v = dst_v + (height - 1) * dst_stride_v;
dst_stride_u = -dst_stride_u;
dst_stride_v = -dst_stride_v;
} // Coalesce rows. if (src_stride_uv == width * 2 && dst_stride_u == width &&
dst_stride_v == width) {
width *= height;
height = 1;
src_stride_uv = dst_stride_u = dst_stride_v = 0;
} #ifdefined(HAS_SPLITUVROW_SSE2) if (TestCpuFlag(kCpuHasSSE2)) {
SplitUVRow = SplitUVRow_Any_SSE2; if (IS_ALIGNED(width, 16)) {
SplitUVRow = SplitUVRow_SSE2;
}
} #endif #ifdefined(HAS_SPLITUVROW_AVX2) if (TestCpuFlag(kCpuHasAVX2)) {
SplitUVRow = SplitUVRow_Any_AVX2; if (IS_ALIGNED(width, 32)) {
SplitUVRow = SplitUVRow_AVX2;
}
} #endif #ifdefined(HAS_SPLITUVROW_NEON) if (TestCpuFlag(kCpuHasNEON)) {
SplitUVRow = SplitUVRow_Any_NEON; if (IS_ALIGNED(width, 16)) {
SplitUVRow = SplitUVRow_NEON;
}
} #endif #ifdefined(HAS_SPLITUVROW_MSA) if (TestCpuFlag(kCpuHasMSA)) {
SplitUVRow = SplitUVRow_Any_MSA; if (IS_ALIGNED(width, 32)) {
SplitUVRow = SplitUVRow_MSA;
}
} #endif #ifdefined(HAS_SPLITUVROW_LSX) if (TestCpuFlag(kCpuHasLSX)) {
SplitUVRow = SplitUVRow_Any_LSX; if (IS_ALIGNED(width, 32)) {
SplitUVRow = SplitUVRow_LSX;
}
} #endif #ifdefined(HAS_SPLITUVROW_RVV) if (TestCpuFlag(kCpuHasRVV)) {
SplitUVRow = SplitUVRow_RVV;
} #endif
for (y = 0; y < height; ++y) { // Copy a row of UV.
SplitUVRow(src_uv, dst_u, dst_v, width);
dst_u += dst_stride_u;
dst_v += dst_stride_v;
src_uv += src_stride_uv;
}
}
LIBYUV_API void MergeUVPlane(const uint8_t* src_u, int src_stride_u, const uint8_t* src_v, int src_stride_v,
uint8_t* dst_uv, int dst_stride_uv, int width, int height) { int y; void (*MergeUVRow)(const uint8_t* src_u, const uint8_t* src_v,
uint8_t* dst_uv, int width) = MergeUVRow_C; if (width <= 0 || height == 0) { return;
} // Negative height means invert the image. if (height < 0) {
height = -height;
dst_uv = dst_uv + (height - 1) * dst_stride_uv;
dst_stride_uv = -dst_stride_uv;
} // Coalesce rows. if (src_stride_u == width && src_stride_v == width &&
dst_stride_uv == width * 2) {
width *= height;
height = 1;
src_stride_u = src_stride_v = dst_stride_uv = 0;
} #ifdefined(HAS_MERGEUVROW_SSE2) if (TestCpuFlag(kCpuHasSSE2)) {
MergeUVRow = MergeUVRow_Any_SSE2; if (IS_ALIGNED(width, 16)) {
MergeUVRow = MergeUVRow_SSE2;
}
} #endif #ifdefined(HAS_MERGEUVROW_AVX2) if (TestCpuFlag(kCpuHasAVX2)) {
MergeUVRow = MergeUVRow_Any_AVX2; if (IS_ALIGNED(width, 16)) {
MergeUVRow = MergeUVRow_AVX2;
}
} #endif #ifdefined(HAS_MERGEUVROW_AVX512BW) if (TestCpuFlag(kCpuHasAVX512BW)) {
MergeUVRow = MergeUVRow_Any_AVX512BW; if (IS_ALIGNED(width, 32)) {
MergeUVRow = MergeUVRow_AVX512BW;
}
} #endif #ifdefined(HAS_MERGEUVROW_NEON) if (TestCpuFlag(kCpuHasNEON)) {
MergeUVRow = MergeUVRow_Any_NEON; if (IS_ALIGNED(width, 16)) {
MergeUVRow = MergeUVRow_NEON;
}
} #endif #ifdefined(HAS_MERGEUVROW_SME) if (TestCpuFlag(kCpuHasSME)) {
MergeUVRow = MergeUVRow_SME;
} #endif #ifdefined(HAS_MERGEUVROW_MSA) if (TestCpuFlag(kCpuHasMSA)) {
MergeUVRow = MergeUVRow_Any_MSA; if (IS_ALIGNED(width, 16)) {
MergeUVRow = MergeUVRow_MSA;
}
} #endif #ifdefined(HAS_MERGEUVROW_LSX) if (TestCpuFlag(kCpuHasLSX)) {
MergeUVRow = MergeUVRow_Any_LSX; if (IS_ALIGNED(width, 16)) {
MergeUVRow = MergeUVRow_LSX;
}
} #endif #ifdefined(HAS_MERGEUVROW_RVV) if (TestCpuFlag(kCpuHasRVV)) {
MergeUVRow = MergeUVRow_RVV;
} #endif
for (y = 0; y < height; ++y) { // Merge a row of U and V into a row of UV.
MergeUVRow(src_u, src_v, dst_uv, width);
src_u += src_stride_u;
src_v += src_stride_v;
dst_uv += dst_stride_uv;
}
}
// Support function for P010 etc UV channels. // Width and height are plane sizes (typically half pixel width).
LIBYUV_API void SplitUVPlane_16(const uint16_t* src_uv, int src_stride_uv,
uint16_t* dst_u, int dst_stride_u,
uint16_t* dst_v, int dst_stride_v, int width, int height, int depth) { int y; void (*SplitUVRow_16)(const uint16_t* src_uv, uint16_t* dst_u,
uint16_t* dst_v, int depth, int width) =
SplitUVRow_16_C; if (width <= 0 || height == 0) { return;
} // Negative height means invert the image. if (height < 0) {
height = -height;
dst_u = dst_u + (height - 1) * dst_stride_u;
dst_v = dst_v + (height - 1) * dst_stride_v;
dst_stride_u = -dst_stride_u;
dst_stride_v = -dst_stride_v;
} // Coalesce rows. if (src_stride_uv == width * 2 && dst_stride_u == width &&
dst_stride_v == width) {
width *= height;
height = 1;
src_stride_uv = dst_stride_u = dst_stride_v = 0;
} #ifdefined(HAS_SPLITUVROW_16_AVX2) if (TestCpuFlag(kCpuHasAVX2)) {
SplitUVRow_16 = SplitUVRow_16_Any_AVX2; if (IS_ALIGNED(width, 16)) {
SplitUVRow_16 = SplitUVRow_16_AVX2;
}
} #endif #ifdefined(HAS_SPLITUVROW_16_NEON) if (TestCpuFlag(kCpuHasNEON)) {
SplitUVRow_16 = SplitUVRow_16_Any_NEON; if (IS_ALIGNED(width, 8)) {
SplitUVRow_16 = SplitUVRow_16_NEON;
}
} #endif
for (y = 0; y < height; ++y) { // Copy a row of UV.
SplitUVRow_16(src_uv, dst_u, dst_v, depth, width);
dst_u += dst_stride_u;
dst_v += dst_stride_v;
src_uv += src_stride_uv;
}
}
LIBYUV_API void MergeUVPlane_16(const uint16_t* src_u, int src_stride_u, const uint16_t* src_v, int src_stride_v,
uint16_t* dst_uv, int dst_stride_uv, int width, int height, int depth) { int y; void (*MergeUVRow_16)(const uint16_t* src_u, const uint16_t* src_v,
uint16_t* dst_uv, int depth, int width) =
MergeUVRow_16_C;
assert(depth >= 8);
assert(depth <= 16); if (width <= 0 || height == 0) { return;
} // Negative height means invert the image. if (height < 0) {
height = -height;
dst_uv = dst_uv + (height - 1) * dst_stride_uv;
dst_stride_uv = -dst_stride_uv;
} // Coalesce rows. if (src_stride_u == width && src_stride_v == width &&
dst_stride_uv == width * 2) {
width *= height;
height = 1;
src_stride_u = src_stride_v = dst_stride_uv = 0;
} #ifdefined(HAS_MERGEUVROW_16_AVX2) if (TestCpuFlag(kCpuHasAVX2)) {
MergeUVRow_16 = MergeUVRow_16_Any_AVX2; if (IS_ALIGNED(width, 8)) {
MergeUVRow_16 = MergeUVRow_16_AVX2;
}
} #endif #ifdefined(HAS_MERGEUVROW_16_NEON) if (TestCpuFlag(kCpuHasNEON)) {
MergeUVRow_16 = MergeUVRow_16_Any_NEON; if (IS_ALIGNED(width, 8)) {
MergeUVRow_16 = MergeUVRow_16_NEON;
}
} #endif #ifdefined(HAS_MERGEUVROW_16_SME) if (TestCpuFlag(kCpuHasSME)) {
MergeUVRow_16 = MergeUVRow_16_SME;
} #endif
for (y = 0; y < height; ++y) { // Merge a row of U and V into a row of UV.
MergeUVRow_16(src_u, src_v, dst_uv, depth, width);
src_u += src_stride_u;
src_v += src_stride_v;
dst_uv += dst_stride_uv;
}
}
// Convert plane from lsb to msb
LIBYUV_API void ConvertToMSBPlane_16(const uint16_t* src_y, int src_stride_y,
uint16_t* dst_y, int dst_stride_y, int width, int height, int depth) { int y; int scale = 1 << (16 - depth); void (*MultiplyRow_16)(const uint16_t* src_y, uint16_t* dst_y, int scale, int width) = MultiplyRow_16_C; if (width <= 0 || height == 0) { return;
} // Negative height means invert the image. if (height < 0) {
height = -height;
dst_y = dst_y + (height - 1) * dst_stride_y;
dst_stride_y = -dst_stride_y;
} // Coalesce rows. if (src_stride_y == width && dst_stride_y == width) {
width *= height;
height = 1;
src_stride_y = dst_stride_y = 0;
}
#ifdefined(HAS_MULTIPLYROW_16_AVX2) if (TestCpuFlag(kCpuHasAVX2)) {
MultiplyRow_16 = MultiplyRow_16_Any_AVX2; if (IS_ALIGNED(width, 32)) {
MultiplyRow_16 = MultiplyRow_16_AVX2;
}
} #endif #ifdefined(HAS_MULTIPLYROW_16_NEON) if (TestCpuFlag(kCpuHasNEON)) {
MultiplyRow_16 = MultiplyRow_16_Any_NEON; if (IS_ALIGNED(width, 16)) {
MultiplyRow_16 = MultiplyRow_16_NEON;
}
} #endif #ifdefined(HAS_MULTIPLYROW_16_SME) if (TestCpuFlag(kCpuHasSME)) {
MultiplyRow_16 = MultiplyRow_16_SME;
} #endif
// Swap U and V channels in interleaved UV plane.
LIBYUV_API void SwapUVPlane(const uint8_t* src_uv, int src_stride_uv,
uint8_t* dst_vu, int dst_stride_vu, int width, int height) { int y; void (*SwapUVRow)(const uint8_t* src_uv, uint8_t* dst_vu, int width) =
SwapUVRow_C; if (width <= 0 || height == 0) { return;
} // Negative height means invert the image. if (height < 0) {
height = -height;
src_uv = src_uv + (height - 1) * src_stride_uv;
src_stride_uv = -src_stride_uv;
} // Coalesce rows. if (src_stride_uv == width * 2 && dst_stride_vu == width * 2) {
width *= height;
height = 1;
src_stride_uv = dst_stride_vu = 0;
}
#ifdefined(HAS_SWAPUVROW_SSSE3) if (TestCpuFlag(kCpuHasSSSE3)) {
SwapUVRow = SwapUVRow_Any_SSSE3; if (IS_ALIGNED(width, 16)) {
SwapUVRow = SwapUVRow_SSSE3;
}
} #endif #ifdefined(HAS_SWAPUVROW_AVX2) if (TestCpuFlag(kCpuHasAVX2)) {
SwapUVRow = SwapUVRow_Any_AVX2; if (IS_ALIGNED(width, 32)) {
SwapUVRow = SwapUVRow_AVX2;
}
} #endif #ifdefined(HAS_SWAPUVROW_NEON) if (TestCpuFlag(kCpuHasNEON)) {
SwapUVRow = SwapUVRow_Any_NEON; if (IS_ALIGNED(width, 16)) {
SwapUVRow = SwapUVRow_NEON;
}
} #endif
for (y = 0; y < height; ++y) {
SwapUVRow(src_uv, dst_vu, width);
src_uv += src_stride_uv;
dst_vu += dst_stride_vu;
}
}
// Convert NV21 to NV12.
LIBYUV_API int NV21ToNV12(const uint8_t* src_y, int src_stride_y, const uint8_t* src_vu, int src_stride_vu,
uint8_t* dst_y, int dst_stride_y,
uint8_t* dst_uv, int dst_stride_uv, int width, int height) { int halfwidth = (width + 1) >> 1; int halfheight = (height + 1) >> 1;
// Test if tile_height is a power of 2 (16 or 32) #define IS_POWEROFTWO(x) (!((x) & ((x)-1)))
// Detile a plane of data // tile width is 16 and assumed. // tile_height is 16 or 32 for MM21. // src_stride_y is bytes per row of source ignoring tiling. e.g. 640 // TODO: More detile row functions.
LIBYUV_API int DetilePlane(const uint8_t* src_y, int src_stride_y,
uint8_t* dst_y, int dst_stride_y, int width, int height, int tile_height) { const ptrdiff_t src_tile_stride = 16 * tile_height; int y; void (*DetileRow)(const uint8_t* src, ptrdiff_t src_tile_stride, uint8_t* dst, int width) = DetileRow_C; if (!src_y || !dst_y || width <= 0 || height == 0 ||
!IS_POWEROFTWO(tile_height)) { return -1;
}
// Negative height means invert the image. if (height < 0) {
height = -height;
dst_y = dst_y + (height - 1) * dst_stride_y;
dst_stride_y = -dst_stride_y;
}
#ifdefined(HAS_DETILEROW_SSE2) if (TestCpuFlag(kCpuHasSSE2)) {
DetileRow = DetileRow_Any_SSE2; if (IS_ALIGNED(width, 16)) {
DetileRow = DetileRow_SSE2;
}
} #endif #ifdefined(HAS_DETILEROW_NEON) if (TestCpuFlag(kCpuHasNEON)) {
DetileRow = DetileRow_Any_NEON; if (IS_ALIGNED(width, 16)) {
DetileRow = DetileRow_NEON;
}
} #endif
// Detile plane for (y = 0; y < height; ++y) {
DetileRow(src_y, src_tile_stride, dst_y, width);
dst_y += dst_stride_y;
src_y += 16; // Advance to next row of tiles. if ((y & (tile_height - 1)) == (tile_height - 1)) {
src_y = src_y - src_tile_stride + src_stride_y * tile_height;
}
} return 0;
}
// Convert a plane of 16 bit tiles of 16 x H to linear. // tile width is 16 and assumed. // tile_height is 16 or 32 for MT2T.
LIBYUV_API int DetilePlane_16(const uint16_t* src_y, int src_stride_y,
uint16_t* dst_y, int dst_stride_y, int width, int height, int tile_height) { const ptrdiff_t src_tile_stride = 16 * tile_height; int y; void (*DetileRow_16)(const uint16_t* src, ptrdiff_t src_tile_stride,
uint16_t* dst, int width) = DetileRow_16_C; if (!src_y || !dst_y || width <= 0 || height == 0 ||
!IS_POWEROFTWO(tile_height)) { return -1;
}
// Negative height means invert the image. if (height < 0) {
height = -height;
dst_y = dst_y + (height - 1) * dst_stride_y;
dst_stride_y = -dst_stride_y;
}
#ifdefined(HAS_DETILEROW_16_SSE2) if (TestCpuFlag(kCpuHasSSE2)) {
DetileRow_16 = DetileRow_16_Any_SSE2; if (IS_ALIGNED(width, 16)) {
DetileRow_16 = DetileRow_16_SSE2;
}
} #endif #ifdefined(HAS_DETILEROW_16_AVX) if (TestCpuFlag(kCpuHasAVX)) {
DetileRow_16 = DetileRow_16_Any_AVX; if (IS_ALIGNED(width, 16)) {
DetileRow_16 = DetileRow_16_AVX;
}
} #endif #ifdefined(HAS_DETILEROW_16_NEON) if (TestCpuFlag(kCpuHasNEON)) {
DetileRow_16 = DetileRow_16_Any_NEON; if (IS_ALIGNED(width, 16)) {
DetileRow_16 = DetileRow_16_NEON;
}
} #endif
// Detile plane for (y = 0; y < height; ++y) {
DetileRow_16(src_y, src_tile_stride, dst_y, width);
dst_y += dst_stride_y;
src_y += 16; // Advance to next row of tiles. if ((y & (tile_height - 1)) == (tile_height - 1)) {
src_y = src_y - src_tile_stride + src_stride_y * tile_height;
}
} return 0;
}
LIBYUV_API void DetileSplitUVPlane(const uint8_t* src_uv, int src_stride_uv,
uint8_t* dst_u, int dst_stride_u,
uint8_t* dst_v, int dst_stride_v, int width, int height, int tile_height) { const ptrdiff_t src_tile_stride = 16 * tile_height; int y; void (*DetileSplitUVRow)(const uint8_t* src, ptrdiff_t src_tile_stride,
uint8_t* dst_u, uint8_t* dst_v, int width) =
DetileSplitUVRow_C;
assert(src_stride_uv >= 0);
assert(tile_height > 0);
assert(src_stride_uv > 0);
// Support function for NV12 etc RGB channels. // Width and height are plane sizes (typically half pixel width).
LIBYUV_API void SplitRGBPlane(const uint8_t* src_rgb, int src_stride_rgb,
uint8_t* dst_r, int dst_stride_r,
uint8_t* dst_g, int dst_stride_g,
uint8_t* dst_b, int dst_stride_b, int width, int height) { int y; void (*SplitRGBRow)(const uint8_t* src_rgb, uint8_t* dst_r, uint8_t* dst_g,
uint8_t* dst_b, int width) = SplitRGBRow_C; if (width <= 0 || height == 0) { return;
} // Negative height means invert the image. if (height < 0) {
height = -height;
dst_r = dst_r + (height - 1) * dst_stride_r;
dst_g = dst_g + (height - 1) * dst_stride_g;
dst_b = dst_b + (height - 1) * dst_stride_b;
dst_stride_r = -dst_stride_r;
dst_stride_g = -dst_stride_g;
dst_stride_b = -dst_stride_b;
} // Coalesce rows. if (src_stride_rgb == width * 3 && dst_stride_r == width &&
dst_stride_g == width && dst_stride_b == width) {
width *= height;
height = 1;
src_stride_rgb = dst_stride_r = dst_stride_g = dst_stride_b = 0;
} #ifdefined(HAS_SPLITRGBROW_SSSE3) if (TestCpuFlag(kCpuHasSSSE3)) {
SplitRGBRow = SplitRGBRow_Any_SSSE3; if (IS_ALIGNED(width, 16)) {
SplitRGBRow = SplitRGBRow_SSSE3;
}
} #endif #ifdefined(HAS_SPLITRGBROW_SSE41) if (TestCpuFlag(kCpuHasSSE41)) {
SplitRGBRow = SplitRGBRow_Any_SSE41; if (IS_ALIGNED(width, 16)) {
SplitRGBRow = SplitRGBRow_SSE41;
}
} #endif #ifdefined(HAS_SPLITRGBROW_AVX2) if (TestCpuFlag(kCpuHasAVX2)) {
SplitRGBRow = SplitRGBRow_Any_AVX2; if (IS_ALIGNED(width, 32)) {
SplitRGBRow = SplitRGBRow_AVX2;
}
} #endif #ifdefined(HAS_SPLITRGBROW_NEON) if (TestCpuFlag(kCpuHasNEON)) {
SplitRGBRow = SplitRGBRow_Any_NEON; if (IS_ALIGNED(width, 16)) {
SplitRGBRow = SplitRGBRow_NEON;
}
} #endif #ifdefined(HAS_SPLITRGBROW_RVV) if (TestCpuFlag(kCpuHasRVV)) {
SplitRGBRow = SplitRGBRow_RVV;
} #endif
for (y = 0; y < height; ++y) { // Copy a row of RGB.
SplitRGBRow(src_rgb, dst_r, dst_g, dst_b, width);
dst_r += dst_stride_r;
dst_g += dst_stride_g;
dst_b += dst_stride_b;
src_rgb += src_stride_rgb;
}
}
LIBYUV_API void MergeRGBPlane(const uint8_t* src_r, int src_stride_r, const uint8_t* src_g, int src_stride_g, const uint8_t* src_b, int src_stride_b,
uint8_t* dst_rgb, int dst_stride_rgb, int width, int height) { int y; void (*MergeRGBRow)(const uint8_t* src_r, const uint8_t* src_g, const uint8_t* src_b, uint8_t* dst_rgb, int width) =
MergeRGBRow_C; if (width <= 0 || height == 0) { return;
} // Coalesce rows. // Negative height means invert the image. if (height < 0) {
height = -height;
dst_rgb = dst_rgb + (height - 1) * dst_stride_rgb;
dst_stride_rgb = -dst_stride_rgb;
} // Coalesce rows. if (src_stride_r == width && src_stride_g == width && src_stride_b == width &&
dst_stride_rgb == width * 3) {
width *= height;
height = 1;
src_stride_r = src_stride_g = src_stride_b = dst_stride_rgb = 0;
} #ifdefined(HAS_MERGERGBROW_SSSE3) if (TestCpuFlag(kCpuHasSSSE3)) {
MergeRGBRow = MergeRGBRow_Any_SSSE3; if (IS_ALIGNED(width, 16)) {
MergeRGBRow = MergeRGBRow_SSSE3;
}
} #endif #ifdefined(HAS_MERGERGBROW_NEON) if (TestCpuFlag(kCpuHasNEON)) {
MergeRGBRow = MergeRGBRow_Any_NEON; if (IS_ALIGNED(width, 16)) {
MergeRGBRow = MergeRGBRow_NEON;
}
} #endif #ifdefined(HAS_MERGERGBROW_RVV) if (TestCpuFlag(kCpuHasRVV)) {
MergeRGBRow = MergeRGBRow_RVV;
} #endif
for (y = 0; y < height; ++y) { // Merge a row of U and V into a row of RGB.
MergeRGBRow(src_r, src_g, src_b, dst_rgb, width);
src_r += src_stride_r;
src_g += src_stride_g;
src_b += src_stride_b;
dst_rgb += dst_stride_rgb;
}
}
LIBYUV_NOINLINE staticvoid SplitARGBPlaneAlpha(const uint8_t* src_argb, int src_stride_argb,
uint8_t* dst_r, int dst_stride_r,
uint8_t* dst_g, int dst_stride_g,
uint8_t* dst_b, int dst_stride_b,
uint8_t* dst_a, int dst_stride_a, int width, int height) { int y; void (*SplitARGBRow)(const uint8_t* src_rgb, uint8_t* dst_r, uint8_t* dst_g,
uint8_t* dst_b, uint8_t* dst_a, int width) =
SplitARGBRow_C;
// TODO(yuan): Support 2 bit alpha channel.
LIBYUV_API void MergeXR30Plane(const uint16_t* src_r, int src_stride_r, const uint16_t* src_g, int src_stride_g, const uint16_t* src_b, int src_stride_b,
uint8_t* dst_ar30, int dst_stride_ar30, int width, int height, int depth) { int y; void (*MergeXR30Row)(const uint16_t* src_r, const uint16_t* src_g, const uint16_t* src_b, uint8_t* dst_ar30, int depth, int width) = MergeXR30Row_C;
LIBYUV_NOINLINE staticvoid MergeAR64PlaneOpaque(const uint16_t* src_r, int src_stride_r, const uint16_t* src_g, int src_stride_g, const uint16_t* src_b, int src_stride_b,
uint16_t* dst_ar64, int dst_stride_ar64, int width, int height, int depth) { int y; void (*MergeXR64Row)(const uint16_t* src_r, const uint16_t* src_g, const uint16_t* src_b, uint16_t* dst_argb, int depth, int width) = MergeXR64Row_C;
LIBYUV_NOINLINE staticvoid MergeARGB16To8PlaneOpaque(const uint16_t* src_r, int src_stride_r, const uint16_t* src_g, int src_stride_g, const uint16_t* src_b, int src_stride_b,
uint8_t* dst_argb, int dst_stride_argb, int width, int height, int depth) { int y; void (*MergeXRGB16To8Row)(const uint16_t* src_r, const uint16_t* src_g, const uint16_t* src_b, uint8_t* dst_argb, int depth, int width) = MergeXRGB16To8Row_C;
// Convert UYVY to I422.
LIBYUV_API int UYVYToI422(const uint8_t* src_uyvy, int src_stride_uyvy,
uint8_t* dst_y, int dst_stride_y,
uint8_t* dst_u, int dst_stride_u,
uint8_t* dst_v, int dst_stride_v, int width, int height) { int y; void (*UYVYToUV422Row)(const uint8_t* src_uyvy, uint8_t* dst_u,
uint8_t* dst_v, int width) = UYVYToUV422Row_C;
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Dauer der Verarbeitung: 0.27 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.