aports/main/libyuv/fix-loongarch64-build.patch

377 lines
12 KiB
Diff

diff --git a/CMakeLists.txt b/CMakeLists.txt
index 9abfa74..f129d52 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -26,6 +26,40 @@ if(MSVC)
ADD_DEFINITIONS ( -D_CRT_SECURE_NO_WARNINGS )
endif()
+# add the option to automatically add "-mlsx" and "-mlasx" to enable SIMD optimization on loongarch platform
+string(TOLOWER "${CMAKE_SYSTEM_PROCESSOR}" SYSPROC)
+set(LOONGARCH64_ALIASES loongarch64)
+list(FIND LOONGARCH64_ALIASES "${SYSPROC}" LOONGARCH64MATCH)
+
+if(LOONGARCH64MATCH GREATER "-1")
+ set(LOONGARCH64 1)
+endif()
+
+if(LOONGARCH64)
+ include(CheckCXXSourceCompiles)
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wno-narrowing")
+ check_cxx_source_compiles("
+ int main(int argc, char **argv) {
+ __asm__ volatile (
+ \"vadd.w $vr0, $vr1, $vr1\"
+ );
+ return 0; }" SUPPORTS_LSX)
+
+ check_cxx_source_compiles("
+ int main(int argc, char **argv) {
+ __asm__ volatile (
+ \"xvadd.w $xr0, $xr1, $xr1\"
+ );
+ return 0; }" SUPPORTS_LASX)
+
+ if(SUPPORTS_LSX)
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mlsx")
+ endif()
+ if(SUPPORTS_LASX)
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mlasx")
+ endif()
+endif()
+
# this creates the static library (.a)
ADD_LIBRARY ( ${ly_lib_static} STATIC ${ly_source_files} )
diff --git a/source/convert_argb.cc b/source/convert_argb.cc
index 3655e30..3dcc68d 100644
--- a/source/convert_argb.cc
+++ b/source/convert_argb.cc
@@ -2101,7 +2101,22 @@ int I420AlphaToARGBMatrix(const uint8_t* src_y,
ARGBAttenuateRow = ARGBAttenuateRow_RVV;
}
#endif
-
+#if defined(HAS_ARGBATTENUATEROW_LSX)
+ if (TestCpuFlag(kCpuHasLSX)) {
+ ARGBAttenuateRow = ARGBAttenuateRow_Any_LSX;
+ if (IS_ALIGNED(width, 8)) {
+ ARGBAttenuateRow = ARGBAttenuateRow_LSX;
+ }
+ }
+#endif
+#if defined(HAS_ARGBATTENUATEROW_LASX)
+ if (TestCpuFlag(kCpuHasLASX)) {
+ ARGBAttenuateRow = ARGBAttenuateRow_Any_LASX;
+ if (IS_ALIGNED(width, 16)) {
+ ARGBAttenuateRow = ARGBAttenuateRow_LASX;
+ }
+ }
+#endif
for (y = 0; y < height; ++y) {
I422AlphaToARGBRow(src_y, src_u, src_v, src_a, dst_argb, yuvconstants,
width);
@@ -2244,7 +2259,22 @@ int I422AlphaToARGBMatrix(const uint8_t* src_y,
ARGBAttenuateRow = ARGBAttenuateRow_RVV;
}
#endif
-
+#if defined(HAS_ARGBATTENUATEROW_LSX)
+ if (TestCpuFlag(kCpuHasLSX)) {
+ ARGBAttenuateRow = ARGBAttenuateRow_Any_LSX;
+ if (IS_ALIGNED(width, 8)) {
+ ARGBAttenuateRow = ARGBAttenuateRow_LSX;
+ }
+ }
+#endif
+#if defined(HAS_ARGBATTENUATEROW_LASX)
+ if (TestCpuFlag(kCpuHasLASX)) {
+ ARGBAttenuateRow = ARGBAttenuateRow_Any_LASX;
+ if (IS_ALIGNED(width, 16)) {
+ ARGBAttenuateRow = ARGBAttenuateRow_LASX;
+ }
+ }
+#endif
for (y = 0; y < height; ++y) {
I422AlphaToARGBRow(src_y, src_u, src_v, src_a, dst_argb, yuvconstants,
width);
@@ -2369,7 +2399,22 @@ int I444AlphaToARGBMatrix(const uint8_t* src_y,
ARGBAttenuateRow = ARGBAttenuateRow_RVV;
}
#endif
-
+#if defined(HAS_ARGBATTENUATEROW_LSX)
+ if (TestCpuFlag(kCpuHasLSX)) {
+ ARGBAttenuateRow = ARGBAttenuateRow_Any_LSX;
+ if (IS_ALIGNED(width, 8)) {
+ ARGBAttenuateRow = ARGBAttenuateRow_LSX;
+ }
+ }
+#endif
+#if defined(HAS_ARGBATTENUATEROW_LASX)
+ if (TestCpuFlag(kCpuHasLASX)) {
+ ARGBAttenuateRow = ARGBAttenuateRow_Any_LASX;
+ if (IS_ALIGNED(width, 16)) {
+ ARGBAttenuateRow = ARGBAttenuateRow_LASX;
+ }
+ }
+#endif
for (y = 0; y < height; ++y) {
I444AlphaToARGBRow(src_y, src_u, src_v, src_a, dst_argb, yuvconstants,
width);
@@ -2602,7 +2647,22 @@ int I010AlphaToARGBMatrix(const uint16_t* src_y,
ARGBAttenuateRow = ARGBAttenuateRow_RVV;
}
#endif
-
+#if defined(HAS_ARGBATTENUATEROW_LSX)
+ if (TestCpuFlag(kCpuHasLSX)) {
+ ARGBAttenuateRow = ARGBAttenuateRow_Any_LSX;
+ if (IS_ALIGNED(width, 8)) {
+ ARGBAttenuateRow = ARGBAttenuateRow_LSX;
+ }
+ }
+#endif
+#if defined(HAS_ARGBATTENUATEROW_LASX)
+ if (TestCpuFlag(kCpuHasLASX)) {
+ ARGBAttenuateRow = ARGBAttenuateRow_Any_LASX;
+ if (IS_ALIGNED(width, 16)) {
+ ARGBAttenuateRow = ARGBAttenuateRow_LASX;
+ }
+ }
+#endif
for (y = 0; y < height; ++y) {
I210AlphaToARGBRow(src_y, src_u, src_v, src_a, dst_argb, yuvconstants,
width);
@@ -2708,7 +2768,22 @@ int I210AlphaToARGBMatrix(const uint16_t* src_y,
ARGBAttenuateRow = ARGBAttenuateRow_RVV;
}
#endif
-
+#if defined(HAS_ARGBATTENUATEROW_LSX)
+ if (TestCpuFlag(kCpuHasLSX)) {
+ ARGBAttenuateRow = ARGBAttenuateRow_Any_LSX;
+ if (IS_ALIGNED(width, 8)) {
+ ARGBAttenuateRow = ARGBAttenuateRow_LSX;
+ }
+ }
+#endif
+#if defined(HAS_ARGBATTENUATEROW_LASX)
+ if (TestCpuFlag(kCpuHasLASX)) {
+ ARGBAttenuateRow = ARGBAttenuateRow_Any_LASX;
+ if (IS_ALIGNED(width, 16)) {
+ ARGBAttenuateRow = ARGBAttenuateRow_LASX;
+ }
+ }
+#endif
for (y = 0; y < height; ++y) {
I210AlphaToARGBRow(src_y, src_u, src_v, src_a, dst_argb, yuvconstants,
width);
@@ -2812,7 +2887,22 @@ int I410AlphaToARGBMatrix(const uint16_t* src_y,
ARGBAttenuateRow = ARGBAttenuateRow_RVV;
}
#endif
-
+#if defined(HAS_ARGBATTENUATEROW_LSX)
+ if (TestCpuFlag(kCpuHasLSX)) {
+ ARGBAttenuateRow = ARGBAttenuateRow_Any_LSX;
+ if (IS_ALIGNED(width, 8)) {
+ ARGBAttenuateRow = ARGBAttenuateRow_LSX;
+ }
+ }
+#endif
+#if defined(HAS_ARGBATTENUATEROW_LASX)
+ if (TestCpuFlag(kCpuHasLASX)) {
+ ARGBAttenuateRow = ARGBAttenuateRow_Any_LASX;
+ if (IS_ALIGNED(width, 16)) {
+ ARGBAttenuateRow = ARGBAttenuateRow_LASX;
+ }
+ }
+#endif
for (y = 0; y < height; ++y) {
I410AlphaToARGBRow(src_y, src_u, src_v, src_a, dst_argb, yuvconstants,
width);
@@ -7081,7 +7171,22 @@ static int I420AlphaToARGBMatrixBilinear(
ARGBAttenuateRow = ARGBAttenuateRow_RVV;
}
#endif
-
+#if defined(HAS_ARGBATTENUATEROW_LSX)
+ if (TestCpuFlag(kCpuHasLSX)) {
+ ARGBAttenuateRow = ARGBAttenuateRow_Any_LSX;
+ if (IS_ALIGNED(width, 8)) {
+ ARGBAttenuateRow = ARGBAttenuateRow_LSX;
+ }
+ }
+#endif
+#if defined(HAS_ARGBATTENUATEROW_LASX)
+ if (TestCpuFlag(kCpuHasLASX)) {
+ ARGBAttenuateRow = ARGBAttenuateRow_Any_LASX;
+ if (IS_ALIGNED(width, 16)) {
+ ARGBAttenuateRow = ARGBAttenuateRow_LASX;
+ }
+ }
+#endif
#if defined(HAS_SCALEROWUP2_BILINEAR_SSE2)
if (TestCpuFlag(kCpuHasSSE2)) {
Scale2RowUp_Bilinear = ScaleRowUp2_Bilinear_Any_SSE2;
@@ -7291,7 +7396,22 @@ static int I422AlphaToARGBMatrixLinear(const uint8_t* src_y,
ARGBAttenuateRow = ARGBAttenuateRow_RVV;
}
#endif
-
+#if defined(HAS_ARGBATTENUATEROW_LSX)
+ if (TestCpuFlag(kCpuHasLSX)) {
+ ARGBAttenuateRow = ARGBAttenuateRow_Any_LSX;
+ if (IS_ALIGNED(width, 8)) {
+ ARGBAttenuateRow = ARGBAttenuateRow_LSX;
+ }
+ }
+#endif
+#if defined(HAS_ARGBATTENUATEROW_LASX)
+ if (TestCpuFlag(kCpuHasLASX)) {
+ ARGBAttenuateRow = ARGBAttenuateRow_Any_LASX;
+ if (IS_ALIGNED(width, 16)) {
+ ARGBAttenuateRow = ARGBAttenuateRow_LASX;
+ }
+ }
+#endif
#if defined(HAS_SCALEROWUP2_LINEAR_SSE2)
if (TestCpuFlag(kCpuHasSSE2)) {
ScaleRowUp2_Linear = ScaleRowUp2_Linear_Any_SSE2;
@@ -7437,7 +7557,22 @@ static int I010AlphaToARGBMatrixBilinear(
ARGBAttenuateRow = ARGBAttenuateRow_RVV;
}
#endif
-
+#if defined(HAS_ARGBATTENUATEROW_LSX)
+ if (TestCpuFlag(kCpuHasLSX)) {
+ ARGBAttenuateRow = ARGBAttenuateRow_Any_LSX;
+ if (IS_ALIGNED(width, 8)) {
+ ARGBAttenuateRow = ARGBAttenuateRow_LSX;
+ }
+ }
+#endif
+#if defined(HAS_ARGBATTENUATEROW_LASX)
+ if (TestCpuFlag(kCpuHasLASX)) {
+ ARGBAttenuateRow = ARGBAttenuateRow_Any_LASX;
+ if (IS_ALIGNED(width, 16)) {
+ ARGBAttenuateRow = ARGBAttenuateRow_LASX;
+ }
+ }
+#endif
#if defined(HAS_SCALEROWUP2_BILINEAR_12_SSSE3)
if (TestCpuFlag(kCpuHasSSSE3)) {
Scale2RowUp_Bilinear_12 = ScaleRowUp2_Bilinear_12_Any_SSSE3;
@@ -7605,7 +7740,22 @@ static int I210AlphaToARGBMatrixLinear(const uint16_t* src_y,
ARGBAttenuateRow = ARGBAttenuateRow_RVV;
}
#endif
-
+#if defined(HAS_ARGBATTENUATEROW_LSX)
+ if (TestCpuFlag(kCpuHasLSX)) {
+ ARGBAttenuateRow = ARGBAttenuateRow_Any_LSX;
+ if (IS_ALIGNED(width, 8)) {
+ ARGBAttenuateRow = ARGBAttenuateRow_LSX;
+ }
+ }
+#endif
+#if defined(HAS_ARGBATTENUATEROW_LASX)
+ if (TestCpuFlag(kCpuHasLASX)) {
+ ARGBAttenuateRow = ARGBAttenuateRow_Any_LASX;
+ if (IS_ALIGNED(width, 16)) {
+ ARGBAttenuateRow = ARGBAttenuateRow_LASX;
+ }
+ }
+#endif
#if defined(HAS_SCALEROWUP2_LINEAR_12_SSSE3)
if (TestCpuFlag(kCpuHasSSSE3)) {
ScaleRowUp2_Linear = ScaleRowUp2_Linear_12_Any_SSSE3;
diff --git a/source/row_lasx.cc b/source/row_lasx.cc
index be85022..3613b0a 100644
--- a/source/row_lasx.cc
+++ b/source/row_lasx.cc
@@ -1148,24 +1148,26 @@ void ARGBAttenuateRow_LASX(const uint8_t* src_argb,
__m256i b, g, r, a, dst0, dst1;
__m256i control = {0x0005000100040000, 0x0007000300060002, 0x0005000100040000,
0x0007000300060002};
+ __m256i zero = __lasx_xvldi(0);
+ __m256i const_add = __lasx_xvldi(0x8ff);
for (x = 0; x < len; x++) {
DUP2_ARG2(__lasx_xvld, src_argb, 0, src_argb, 32, src0, src1);
tmp0 = __lasx_xvpickev_b(src1, src0);
tmp1 = __lasx_xvpickod_b(src1, src0);
- b = __lasx_xvpackev_b(tmp0, tmp0);
- r = __lasx_xvpackod_b(tmp0, tmp0);
- g = __lasx_xvpackev_b(tmp1, tmp1);
- a = __lasx_xvpackod_b(tmp1, tmp1);
- reg0 = __lasx_xvmulwev_w_hu(b, a);
- reg1 = __lasx_xvmulwod_w_hu(b, a);
- reg2 = __lasx_xvmulwev_w_hu(r, a);
- reg3 = __lasx_xvmulwod_w_hu(r, a);
- reg4 = __lasx_xvmulwev_w_hu(g, a);
- reg5 = __lasx_xvmulwod_w_hu(g, a);
- reg0 = __lasx_xvssrani_h_w(reg1, reg0, 24);
- reg2 = __lasx_xvssrani_h_w(reg3, reg2, 24);
- reg4 = __lasx_xvssrani_h_w(reg5, reg4, 24);
+ b = __lasx_xvpackev_b(zero, tmp0);
+ r = __lasx_xvpackod_b(zero, tmp0);
+ g = __lasx_xvpackev_b(zero, tmp1);
+ a = __lasx_xvpackod_b(zero, tmp1);
+ reg0 = __lasx_xvmaddwev_w_hu(const_add, b, a);
+ reg1 = __lasx_xvmaddwod_w_hu(const_add, b, a);
+ reg2 = __lasx_xvmaddwev_w_hu(const_add, r, a);
+ reg3 = __lasx_xvmaddwod_w_hu(const_add, r, a);
+ reg4 = __lasx_xvmaddwev_w_hu(const_add, g, a);
+ reg5 = __lasx_xvmaddwod_w_hu(const_add, g, a);
+ reg0 = __lasx_xvssrani_h_w(reg1, reg0, 8);
+ reg2 = __lasx_xvssrani_h_w(reg3, reg2, 8);
+ reg4 = __lasx_xvssrani_h_w(reg5, reg4, 8);
reg0 = __lasx_xvshuf_h(control, reg0, reg0);
reg2 = __lasx_xvshuf_h(control, reg2, reg2);
reg4 = __lasx_xvshuf_h(control, reg4, reg4);
diff --git a/source/row_lsx.cc b/source/row_lsx.cc
index fa088c9..10546a9 100644
--- a/source/row_lsx.cc
+++ b/source/row_lsx.cc
@@ -1102,24 +1102,26 @@ void ARGBAttenuateRow_LSX(const uint8_t* src_argb,
__m128i reg0, reg1, reg2, reg3, reg4, reg5;
__m128i b, g, r, a, dst0, dst1;
__m128i control = {0x0005000100040000, 0x0007000300060002};
+ __m128i zero = __lsx_vldi(0);
+ __m128i const_add = __lsx_vldi(0x8ff);
for (x = 0; x < len; x++) {
DUP2_ARG2(__lsx_vld, src_argb, 0, src_argb, 16, src0, src1);
tmp0 = __lsx_vpickev_b(src1, src0);
tmp1 = __lsx_vpickod_b(src1, src0);
- b = __lsx_vpackev_b(tmp0, tmp0);
- r = __lsx_vpackod_b(tmp0, tmp0);
- g = __lsx_vpackev_b(tmp1, tmp1);
- a = __lsx_vpackod_b(tmp1, tmp1);
- reg0 = __lsx_vmulwev_w_hu(b, a);
- reg1 = __lsx_vmulwod_w_hu(b, a);
- reg2 = __lsx_vmulwev_w_hu(r, a);
- reg3 = __lsx_vmulwod_w_hu(r, a);
- reg4 = __lsx_vmulwev_w_hu(g, a);
- reg5 = __lsx_vmulwod_w_hu(g, a);
- reg0 = __lsx_vssrani_h_w(reg1, reg0, 24);
- reg2 = __lsx_vssrani_h_w(reg3, reg2, 24);
- reg4 = __lsx_vssrani_h_w(reg5, reg4, 24);
+ b = __lsx_vpackev_b(zero, tmp0);
+ r = __lsx_vpackod_b(zero, tmp0);
+ g = __lsx_vpackev_b(zero, tmp1);
+ a = __lsx_vpackod_b(zero, tmp1);
+ reg0 = __lsx_vmaddwev_w_hu(const_add, b, a);
+ reg1 = __lsx_vmaddwod_w_hu(const_add, b, a);
+ reg2 = __lsx_vmaddwev_w_hu(const_add, r, a);
+ reg3 = __lsx_vmaddwod_w_hu(const_add, r, a);
+ reg4 = __lsx_vmaddwev_w_hu(const_add, g, a);
+ reg5 = __lsx_vmaddwod_w_hu(const_add, g, a);
+ reg0 = __lsx_vssrani_h_w(reg1, reg0, 8);
+ reg2 = __lsx_vssrani_h_w(reg3, reg2, 8);
+ reg4 = __lsx_vssrani_h_w(reg5, reg4, 8);
reg0 = __lsx_vshuf_h(control, reg0, reg0);
reg2 = __lsx_vshuf_h(control, reg2, reg2);
reg4 = __lsx_vshuf_h(control, reg4, reg4);