From 91a70d1572d34ab0fb22b9082fd24b49499e3dcc Mon Sep 17 00:00:00 2001 From: Leigh Scott Date: Sun, 31 May 2020 08:25:37 +0100 Subject: [PATCH] Update to 3.4 --- ...ct512-is-needed-on-all-architectures.patch | 35 -- sources | 2 +- x265-2.8-asm-primitives.patch | 347 ------------------ x265-arm-cflags.patch | 45 +-- x265-detect_cpu_armhfp.patch | 44 --- x265.spec | 10 +- 6 files changed, 31 insertions(+), 452 deletions(-) delete mode 100644 0003-detect512-is-needed-on-all-architectures.patch delete mode 100644 x265-2.8-asm-primitives.patch diff --git a/0003-detect512-is-needed-on-all-architectures.patch b/0003-detect512-is-needed-on-all-architectures.patch deleted file mode 100644 index 16aab24..0000000 --- a/0003-detect512-is-needed-on-all-architectures.patch +++ /dev/null @@ -1,35 +0,0 @@ -From: Adam Sampson -Date: Sun, 14 Oct 2018 14:04:18 +0200 -Subject: detect512 is needed on all architectures - ---- - source/common/cpu.cpp | 9 +++++---- - 1 file changed, 5 insertions(+), 4 deletions(-) - -diff --git a/source/common/cpu.cpp b/source/common/cpu.cpp -index 0681ff5..fa687da 100644 ---- a/source/common/cpu.cpp -+++ b/source/common/cpu.cpp -@@ -110,6 +110,11 @@ const cpu_name_t cpu_names[] = - { "", 0 }, - }; - -+bool detect512() -+{ -+ return(enable512); -+} -+ - #if X265_ARCH_X86 - - extern "C" { -@@ -123,10 +128,6 @@ uint64_t PFX(cpu_xgetbv)(int xcr); - #pragma warning(disable: 4309) // truncation of constant value - #endif - --bool detect512() --{ -- return(enable512); --} - uint32_t cpu_detect(bool benableavx512 ) - { - diff --git a/sources b/sources index a1063e5..e336356 100644 --- a/sources +++ b/sources @@ -1 +1 @@ -SHA512 (x265_3.3.tar.gz) = 3d56900ecd58e83d2ecc93d956524e3b0e622dbe3c74a137c7b21f98599245547403401e9ec7f679996fd09fca788aa10272bf17e58a68fce449c8cb555ad7c0 +SHA512 (x265_3.4.tar.gz) = 576b18711935e7da8433b2170d24ed159eb12ff1a18399360afa1b2132db33b463145c65ed918f667528ee954bbdfb5c69e5480f1c1df801515cefc592f3206e diff --git a/x265-2.8-asm-primitives.patch b/x265-2.8-asm-primitives.patch deleted file mode 100644 index 89b42f2..0000000 --- a/x265-2.8-asm-primitives.patch +++ /dev/null @@ -1,347 +0,0 @@ ---- ./source/common/arm/asm-primitives.cpp.orig 2018-05-21 02:33:10.000000000 -0600 -+++ ./source/common/arm/asm-primitives.cpp 2018-05-28 20:38:37.302378303 -0600 -@@ -48,77 +48,77 @@ void setupAssemblyPrimitives(EncoderPrim - p.ssim_4x4x2_core = PFX(ssim_4x4x2_core_neon); - - // addAvg -- p.pu[LUMA_4x4].addAvg = PFX(addAvg_4x4_neon); -- p.pu[LUMA_4x8].addAvg = PFX(addAvg_4x8_neon); -- p.pu[LUMA_4x16].addAvg = PFX(addAvg_4x16_neon); -- p.pu[LUMA_8x4].addAvg = PFX(addAvg_8x4_neon); -- p.pu[LUMA_8x8].addAvg = PFX(addAvg_8x8_neon); -- p.pu[LUMA_8x16].addAvg = PFX(addAvg_8x16_neon); -- p.pu[LUMA_8x32].addAvg = PFX(addAvg_8x32_neon); -- p.pu[LUMA_12x16].addAvg = PFX(addAvg_12x16_neon); -- p.pu[LUMA_16x4].addAvg = PFX(addAvg_16x4_neon); -- p.pu[LUMA_16x8].addAvg = PFX(addAvg_16x8_neon); -- p.pu[LUMA_16x12].addAvg = PFX(addAvg_16x12_neon); -- p.pu[LUMA_16x16].addAvg = PFX(addAvg_16x16_neon); -- p.pu[LUMA_16x32].addAvg = PFX(addAvg_16x32_neon); -- p.pu[LUMA_16x64].addAvg = PFX(addAvg_16x64_neon); -- p.pu[LUMA_24x32].addAvg = PFX(addAvg_24x32_neon); -- p.pu[LUMA_32x8].addAvg = PFX(addAvg_32x8_neon); -- p.pu[LUMA_32x16].addAvg = PFX(addAvg_32x16_neon); -- p.pu[LUMA_32x24].addAvg = PFX(addAvg_32x24_neon); -- p.pu[LUMA_32x32].addAvg = PFX(addAvg_32x32_neon); -- p.pu[LUMA_32x64].addAvg = PFX(addAvg_32x64_neon); -- p.pu[LUMA_48x64].addAvg = PFX(addAvg_48x64_neon); -- p.pu[LUMA_64x16].addAvg = PFX(addAvg_64x16_neon); -- p.pu[LUMA_64x32].addAvg = PFX(addAvg_64x32_neon); -- p.pu[LUMA_64x48].addAvg = PFX(addAvg_64x48_neon); -- p.pu[LUMA_64x64].addAvg = PFX(addAvg_64x64_neon); -+ p.pu[LUMA_4x4].addAvg[ALIGNED] = PFX(addAvg_4x4_neon); -+ p.pu[LUMA_4x8].addAvg[ALIGNED] = PFX(addAvg_4x8_neon); -+ p.pu[LUMA_4x16].addAvg[ALIGNED] = PFX(addAvg_4x16_neon); -+ p.pu[LUMA_8x4].addAvg[ALIGNED] = PFX(addAvg_8x4_neon); -+ p.pu[LUMA_8x8].addAvg[ALIGNED] = PFX(addAvg_8x8_neon); -+ p.pu[LUMA_8x16].addAvg[ALIGNED] = PFX(addAvg_8x16_neon); -+ p.pu[LUMA_8x32].addAvg[ALIGNED] = PFX(addAvg_8x32_neon); -+ p.pu[LUMA_12x16].addAvg[ALIGNED] = PFX(addAvg_12x16_neon); -+ p.pu[LUMA_16x4].addAvg[ALIGNED] = PFX(addAvg_16x4_neon); -+ p.pu[LUMA_16x8].addAvg[ALIGNED] = PFX(addAvg_16x8_neon); -+ p.pu[LUMA_16x12].addAvg[ALIGNED] = PFX(addAvg_16x12_neon); -+ p.pu[LUMA_16x16].addAvg[ALIGNED] = PFX(addAvg_16x16_neon); -+ p.pu[LUMA_16x32].addAvg[ALIGNED] = PFX(addAvg_16x32_neon); -+ p.pu[LUMA_16x64].addAvg[ALIGNED] = PFX(addAvg_16x64_neon); -+ p.pu[LUMA_24x32].addAvg[ALIGNED] = PFX(addAvg_24x32_neon); -+ p.pu[LUMA_32x8].addAvg[ALIGNED] = PFX(addAvg_32x8_neon); -+ p.pu[LUMA_32x16].addAvg[ALIGNED] = PFX(addAvg_32x16_neon); -+ p.pu[LUMA_32x24].addAvg[ALIGNED] = PFX(addAvg_32x24_neon); -+ p.pu[LUMA_32x32].addAvg[ALIGNED] = PFX(addAvg_32x32_neon); -+ p.pu[LUMA_32x64].addAvg[ALIGNED] = PFX(addAvg_32x64_neon); -+ p.pu[LUMA_48x64].addAvg[ALIGNED] = PFX(addAvg_48x64_neon); -+ p.pu[LUMA_64x16].addAvg[ALIGNED] = PFX(addAvg_64x16_neon); -+ p.pu[LUMA_64x32].addAvg[ALIGNED] = PFX(addAvg_64x32_neon); -+ p.pu[LUMA_64x48].addAvg[ALIGNED] = PFX(addAvg_64x48_neon); -+ p.pu[LUMA_64x64].addAvg[ALIGNED] = PFX(addAvg_64x64_neon); - - // chroma addAvg -- p.chroma[X265_CSP_I420].pu[CHROMA_420_4x2].addAvg = PFX(addAvg_4x2_neon); -- p.chroma[X265_CSP_I420].pu[CHROMA_420_4x4].addAvg = PFX(addAvg_4x4_neon); -- p.chroma[X265_CSP_I420].pu[CHROMA_420_4x8].addAvg = PFX(addAvg_4x8_neon); -- p.chroma[X265_CSP_I420].pu[CHROMA_420_4x16].addAvg = PFX(addAvg_4x16_neon); -- p.chroma[X265_CSP_I420].pu[CHROMA_420_6x8].addAvg = PFX(addAvg_6x8_neon); -- p.chroma[X265_CSP_I420].pu[CHROMA_420_8x2].addAvg = PFX(addAvg_8x2_neon); -- p.chroma[X265_CSP_I420].pu[CHROMA_420_8x4].addAvg = PFX(addAvg_8x4_neon); -- p.chroma[X265_CSP_I420].pu[CHROMA_420_8x6].addAvg = PFX(addAvg_8x6_neon); -- p.chroma[X265_CSP_I420].pu[CHROMA_420_8x8].addAvg = PFX(addAvg_8x8_neon); -- p.chroma[X265_CSP_I420].pu[CHROMA_420_8x16].addAvg = PFX(addAvg_8x16_neon); -- p.chroma[X265_CSP_I420].pu[CHROMA_420_8x32].addAvg = PFX(addAvg_8x32_neon); -- p.chroma[X265_CSP_I420].pu[CHROMA_420_12x16].addAvg = PFX(addAvg_12x16_neon); -- p.chroma[X265_CSP_I420].pu[CHROMA_420_16x4].addAvg = PFX(addAvg_16x4_neon); -- p.chroma[X265_CSP_I420].pu[CHROMA_420_16x8].addAvg = PFX(addAvg_16x8_neon); -- p.chroma[X265_CSP_I420].pu[CHROMA_420_16x12].addAvg = PFX(addAvg_16x12_neon); -- p.chroma[X265_CSP_I420].pu[CHROMA_420_16x16].addAvg = PFX(addAvg_16x16_neon); -- p.chroma[X265_CSP_I420].pu[CHROMA_420_16x32].addAvg = PFX(addAvg_16x32_neon); -- p.chroma[X265_CSP_I420].pu[CHROMA_420_24x32].addAvg = PFX(addAvg_24x32_neon); -- p.chroma[X265_CSP_I420].pu[CHROMA_420_32x8].addAvg = PFX(addAvg_32x8_neon); -- p.chroma[X265_CSP_I420].pu[CHROMA_420_32x16].addAvg = PFX(addAvg_32x16_neon); -- p.chroma[X265_CSP_I420].pu[CHROMA_420_32x24].addAvg = PFX(addAvg_32x24_neon); -- p.chroma[X265_CSP_I420].pu[CHROMA_420_32x32].addAvg = PFX(addAvg_32x32_neon); -- -- p.chroma[X265_CSP_I422].pu[CHROMA_422_4x8].addAvg = PFX(addAvg_4x8_neon); -- p.chroma[X265_CSP_I422].pu[CHROMA_422_4x16].addAvg = PFX(addAvg_4x16_neon); -- p.chroma[X265_CSP_I422].pu[CHROMA_422_4x32].addAvg = PFX(addAvg_4x32_neon); -- p.chroma[X265_CSP_I422].pu[CHROMA_422_6x16].addAvg = PFX(addAvg_6x16_neon); -- p.chroma[X265_CSP_I422].pu[CHROMA_422_8x4].addAvg = PFX(addAvg_8x4_neon); -- p.chroma[X265_CSP_I422].pu[CHROMA_422_8x8].addAvg = PFX(addAvg_8x8_neon); -- p.chroma[X265_CSP_I422].pu[CHROMA_422_8x12].addAvg = PFX(addAvg_8x12_neon); -- p.chroma[X265_CSP_I422].pu[CHROMA_422_8x16].addAvg = PFX(addAvg_8x16_neon); -- p.chroma[X265_CSP_I422].pu[CHROMA_422_8x32].addAvg = PFX(addAvg_8x32_neon); -- p.chroma[X265_CSP_I422].pu[CHROMA_422_8x64].addAvg = PFX(addAvg_8x64_neon); -- p.chroma[X265_CSP_I422].pu[CHROMA_422_12x32].addAvg = PFX(addAvg_12x32_neon); -- p.chroma[X265_CSP_I422].pu[CHROMA_422_16x8].addAvg = PFX(addAvg_16x8_neon); -- p.chroma[X265_CSP_I422].pu[CHROMA_422_16x16].addAvg = PFX(addAvg_16x16_neon); -- p.chroma[X265_CSP_I422].pu[CHROMA_422_16x24].addAvg = PFX(addAvg_16x24_neon); -- p.chroma[X265_CSP_I422].pu[CHROMA_422_16x32].addAvg = PFX(addAvg_16x32_neon); -- p.chroma[X265_CSP_I422].pu[CHROMA_422_16x64].addAvg = PFX(addAvg_16x64_neon); -- p.chroma[X265_CSP_I422].pu[CHROMA_422_24x64].addAvg = PFX(addAvg_24x64_neon); -- p.chroma[X265_CSP_I422].pu[CHROMA_422_32x16].addAvg = PFX(addAvg_32x16_neon); -- p.chroma[X265_CSP_I422].pu[CHROMA_422_32x32].addAvg = PFX(addAvg_32x32_neon); -- p.chroma[X265_CSP_I422].pu[CHROMA_422_32x48].addAvg = PFX(addAvg_32x48_neon); -- p.chroma[X265_CSP_I422].pu[CHROMA_422_32x64].addAvg = PFX(addAvg_32x64_neon); -+ p.chroma[X265_CSP_I420].pu[CHROMA_420_4x2].addAvg[ALIGNED] = PFX(addAvg_4x2_neon); -+ p.chroma[X265_CSP_I420].pu[CHROMA_420_4x4].addAvg[ALIGNED] = PFX(addAvg_4x4_neon); -+ p.chroma[X265_CSP_I420].pu[CHROMA_420_4x8].addAvg[ALIGNED] = PFX(addAvg_4x8_neon); -+ p.chroma[X265_CSP_I420].pu[CHROMA_420_4x16].addAvg[ALIGNED] = PFX(addAvg_4x16_neon); -+ p.chroma[X265_CSP_I420].pu[CHROMA_420_6x8].addAvg[ALIGNED] = PFX(addAvg_6x8_neon); -+ p.chroma[X265_CSP_I420].pu[CHROMA_420_8x2].addAvg[ALIGNED] = PFX(addAvg_8x2_neon); -+ p.chroma[X265_CSP_I420].pu[CHROMA_420_8x4].addAvg[ALIGNED] = PFX(addAvg_8x4_neon); -+ p.chroma[X265_CSP_I420].pu[CHROMA_420_8x6].addAvg[ALIGNED] = PFX(addAvg_8x6_neon); -+ p.chroma[X265_CSP_I420].pu[CHROMA_420_8x8].addAvg[ALIGNED] = PFX(addAvg_8x8_neon); -+ p.chroma[X265_CSP_I420].pu[CHROMA_420_8x16].addAvg[ALIGNED] = PFX(addAvg_8x16_neon); -+ p.chroma[X265_CSP_I420].pu[CHROMA_420_8x32].addAvg[ALIGNED] = PFX(addAvg_8x32_neon); -+ p.chroma[X265_CSP_I420].pu[CHROMA_420_12x16].addAvg[ALIGNED] = PFX(addAvg_12x16_neon); -+ p.chroma[X265_CSP_I420].pu[CHROMA_420_16x4].addAvg[ALIGNED] = PFX(addAvg_16x4_neon); -+ p.chroma[X265_CSP_I420].pu[CHROMA_420_16x8].addAvg[ALIGNED] = PFX(addAvg_16x8_neon); -+ p.chroma[X265_CSP_I420].pu[CHROMA_420_16x12].addAvg[ALIGNED] = PFX(addAvg_16x12_neon); -+ p.chroma[X265_CSP_I420].pu[CHROMA_420_16x16].addAvg[ALIGNED] = PFX(addAvg_16x16_neon); -+ p.chroma[X265_CSP_I420].pu[CHROMA_420_16x32].addAvg[ALIGNED] = PFX(addAvg_16x32_neon); -+ p.chroma[X265_CSP_I420].pu[CHROMA_420_24x32].addAvg[ALIGNED] = PFX(addAvg_24x32_neon); -+ p.chroma[X265_CSP_I420].pu[CHROMA_420_32x8].addAvg[ALIGNED] = PFX(addAvg_32x8_neon); -+ p.chroma[X265_CSP_I420].pu[CHROMA_420_32x16].addAvg[ALIGNED] = PFX(addAvg_32x16_neon); -+ p.chroma[X265_CSP_I420].pu[CHROMA_420_32x24].addAvg[ALIGNED] = PFX(addAvg_32x24_neon); -+ p.chroma[X265_CSP_I420].pu[CHROMA_420_32x32].addAvg[ALIGNED] = PFX(addAvg_32x32_neon); -+ -+ p.chroma[X265_CSP_I422].pu[CHROMA_422_4x8].addAvg[ALIGNED] = PFX(addAvg_4x8_neon); -+ p.chroma[X265_CSP_I422].pu[CHROMA_422_4x16].addAvg[ALIGNED] = PFX(addAvg_4x16_neon); -+ p.chroma[X265_CSP_I422].pu[CHROMA_422_4x32].addAvg[ALIGNED] = PFX(addAvg_4x32_neon); -+ p.chroma[X265_CSP_I422].pu[CHROMA_422_6x16].addAvg[ALIGNED] = PFX(addAvg_6x16_neon); -+ p.chroma[X265_CSP_I422].pu[CHROMA_422_8x4].addAvg[ALIGNED] = PFX(addAvg_8x4_neon); -+ p.chroma[X265_CSP_I422].pu[CHROMA_422_8x8].addAvg[ALIGNED] = PFX(addAvg_8x8_neon); -+ p.chroma[X265_CSP_I422].pu[CHROMA_422_8x12].addAvg[ALIGNED] = PFX(addAvg_8x12_neon); -+ p.chroma[X265_CSP_I422].pu[CHROMA_422_8x16].addAvg[ALIGNED] = PFX(addAvg_8x16_neon); -+ p.chroma[X265_CSP_I422].pu[CHROMA_422_8x32].addAvg[ALIGNED] = PFX(addAvg_8x32_neon); -+ p.chroma[X265_CSP_I422].pu[CHROMA_422_8x64].addAvg[ALIGNED] = PFX(addAvg_8x64_neon); -+ p.chroma[X265_CSP_I422].pu[CHROMA_422_12x32].addAvg[ALIGNED] = PFX(addAvg_12x32_neon); -+ p.chroma[X265_CSP_I422].pu[CHROMA_422_16x8].addAvg[ALIGNED] = PFX(addAvg_16x8_neon); -+ p.chroma[X265_CSP_I422].pu[CHROMA_422_16x16].addAvg[ALIGNED] = PFX(addAvg_16x16_neon); -+ p.chroma[X265_CSP_I422].pu[CHROMA_422_16x24].addAvg[ALIGNED] = PFX(addAvg_16x24_neon); -+ p.chroma[X265_CSP_I422].pu[CHROMA_422_16x32].addAvg[ALIGNED] = PFX(addAvg_16x32_neon); -+ p.chroma[X265_CSP_I422].pu[CHROMA_422_16x64].addAvg[ALIGNED] = PFX(addAvg_16x64_neon); -+ p.chroma[X265_CSP_I422].pu[CHROMA_422_24x64].addAvg[ALIGNED] = PFX(addAvg_24x64_neon); -+ p.chroma[X265_CSP_I422].pu[CHROMA_422_32x16].addAvg[ALIGNED] = PFX(addAvg_32x16_neon); -+ p.chroma[X265_CSP_I422].pu[CHROMA_422_32x32].addAvg[ALIGNED] = PFX(addAvg_32x32_neon); -+ p.chroma[X265_CSP_I422].pu[CHROMA_422_32x48].addAvg[ALIGNED] = PFX(addAvg_32x48_neon); -+ p.chroma[X265_CSP_I422].pu[CHROMA_422_32x64].addAvg[ALIGNED] = PFX(addAvg_32x64_neon); - - // quant - p.quant = PFX(quant_neon); -@@ -402,7 +402,7 @@ void setupAssemblyPrimitives(EncoderPrim - p.scale2D_64to32 = PFX(scale2D_64to32_neon); - - // scale1D_128to64 -- p.scale1D_128to64 = PFX(scale1D_128to64_neon); -+ p.scale1D_128to64[ALIGNED] = PFX(scale1D_128to64_neon); - - // copy_count - p.cu[BLOCK_4x4].copy_cnt = PFX(copy_cnt_4_neon); -@@ -411,37 +411,37 @@ void setupAssemblyPrimitives(EncoderPrim - p.cu[BLOCK_32x32].copy_cnt = PFX(copy_cnt_32_neon); - - // filterPixelToShort -- p.pu[LUMA_4x4].convert_p2s = PFX(filterPixelToShort_4x4_neon); -- p.pu[LUMA_4x8].convert_p2s = PFX(filterPixelToShort_4x8_neon); -- p.pu[LUMA_4x16].convert_p2s = PFX(filterPixelToShort_4x16_neon); -- p.pu[LUMA_8x4].convert_p2s = PFX(filterPixelToShort_8x4_neon); -- p.pu[LUMA_8x8].convert_p2s = PFX(filterPixelToShort_8x8_neon); -- p.pu[LUMA_8x16].convert_p2s = PFX(filterPixelToShort_8x16_neon); -- p.pu[LUMA_8x32].convert_p2s = PFX(filterPixelToShort_8x32_neon); -- p.pu[LUMA_12x16].convert_p2s = PFX(filterPixelToShort_12x16_neon); -- p.pu[LUMA_16x4].convert_p2s = PFX(filterPixelToShort_16x4_neon); -- p.pu[LUMA_16x8].convert_p2s = PFX(filterPixelToShort_16x8_neon); -- p.pu[LUMA_16x12].convert_p2s = PFX(filterPixelToShort_16x12_neon); -- p.pu[LUMA_16x16].convert_p2s = PFX(filterPixelToShort_16x16_neon); -- p.pu[LUMA_16x32].convert_p2s = PFX(filterPixelToShort_16x32_neon); -- p.pu[LUMA_16x64].convert_p2s = PFX(filterPixelToShort_16x64_neon); -- p.pu[LUMA_24x32].convert_p2s = PFX(filterPixelToShort_24x32_neon); -- p.pu[LUMA_32x8].convert_p2s = PFX(filterPixelToShort_32x8_neon); -- p.pu[LUMA_32x16].convert_p2s = PFX(filterPixelToShort_32x16_neon); -- p.pu[LUMA_32x24].convert_p2s = PFX(filterPixelToShort_32x24_neon); -- p.pu[LUMA_32x32].convert_p2s = PFX(filterPixelToShort_32x32_neon); -- p.pu[LUMA_32x64].convert_p2s = PFX(filterPixelToShort_32x64_neon); -- p.pu[LUMA_48x64].convert_p2s = PFX(filterPixelToShort_48x64_neon); -- p.pu[LUMA_64x16].convert_p2s = PFX(filterPixelToShort_64x16_neon); -- p.pu[LUMA_64x32].convert_p2s = PFX(filterPixelToShort_64x32_neon); -- p.pu[LUMA_64x48].convert_p2s = PFX(filterPixelToShort_64x48_neon); -- p.pu[LUMA_64x64].convert_p2s = PFX(filterPixelToShort_64x64_neon); -+ p.pu[LUMA_4x4].convert_p2s[ALIGNED] = PFX(filterPixelToShort_4x4_neon); -+ p.pu[LUMA_4x8].convert_p2s[ALIGNED] = PFX(filterPixelToShort_4x8_neon); -+ p.pu[LUMA_4x16].convert_p2s[ALIGNED] = PFX(filterPixelToShort_4x16_neon); -+ p.pu[LUMA_8x4].convert_p2s[ALIGNED] = PFX(filterPixelToShort_8x4_neon); -+ p.pu[LUMA_8x8].convert_p2s[ALIGNED] = PFX(filterPixelToShort_8x8_neon); -+ p.pu[LUMA_8x16].convert_p2s[ALIGNED] = PFX(filterPixelToShort_8x16_neon); -+ p.pu[LUMA_8x32].convert_p2s[ALIGNED] = PFX(filterPixelToShort_8x32_neon); -+ p.pu[LUMA_12x16].convert_p2s[ALIGNED] = PFX(filterPixelToShort_12x16_neon); -+ p.pu[LUMA_16x4].convert_p2s[ALIGNED] = PFX(filterPixelToShort_16x4_neon); -+ p.pu[LUMA_16x8].convert_p2s[ALIGNED] = PFX(filterPixelToShort_16x8_neon); -+ p.pu[LUMA_16x12].convert_p2s[ALIGNED] = PFX(filterPixelToShort_16x12_neon); -+ p.pu[LUMA_16x16].convert_p2s[ALIGNED] = PFX(filterPixelToShort_16x16_neon); -+ p.pu[LUMA_16x32].convert_p2s[ALIGNED] = PFX(filterPixelToShort_16x32_neon); -+ p.pu[LUMA_16x64].convert_p2s[ALIGNED] = PFX(filterPixelToShort_16x64_neon); -+ p.pu[LUMA_24x32].convert_p2s[ALIGNED] = PFX(filterPixelToShort_24x32_neon); -+ p.pu[LUMA_32x8].convert_p2s[ALIGNED] = PFX(filterPixelToShort_32x8_neon); -+ p.pu[LUMA_32x16].convert_p2s[ALIGNED] = PFX(filterPixelToShort_32x16_neon); -+ p.pu[LUMA_32x24].convert_p2s[ALIGNED] = PFX(filterPixelToShort_32x24_neon); -+ p.pu[LUMA_32x32].convert_p2s[ALIGNED] = PFX(filterPixelToShort_32x32_neon); -+ p.pu[LUMA_32x64].convert_p2s[ALIGNED] = PFX(filterPixelToShort_32x64_neon); -+ p.pu[LUMA_48x64].convert_p2s[ALIGNED] = PFX(filterPixelToShort_48x64_neon); -+ p.pu[LUMA_64x16].convert_p2s[ALIGNED] = PFX(filterPixelToShort_64x16_neon); -+ p.pu[LUMA_64x32].convert_p2s[ALIGNED] = PFX(filterPixelToShort_64x32_neon); -+ p.pu[LUMA_64x48].convert_p2s[ALIGNED] = PFX(filterPixelToShort_64x48_neon); -+ p.pu[LUMA_64x64].convert_p2s[ALIGNED] = PFX(filterPixelToShort_64x64_neon); - - // Block_fill -- p.cu[BLOCK_4x4].blockfill_s = PFX(blockfill_s_4x4_neon); -- p.cu[BLOCK_8x8].blockfill_s = PFX(blockfill_s_8x8_neon); -- p.cu[BLOCK_16x16].blockfill_s = PFX(blockfill_s_16x16_neon); -- p.cu[BLOCK_32x32].blockfill_s = PFX(blockfill_s_32x32_neon); -+ p.cu[BLOCK_4x4].blockfill_s[ALIGNED] = PFX(blockfill_s_4x4_neon); -+ p.cu[BLOCK_8x8].blockfill_s[ALIGNED] = PFX(blockfill_s_8x8_neon); -+ p.cu[BLOCK_16x16].blockfill_s[ALIGNED] = PFX(blockfill_s_16x16_neon); -+ p.cu[BLOCK_32x32].blockfill_s[ALIGNED] = PFX(blockfill_s_32x32_neon); - - // Blockcopy_ss - p.cu[BLOCK_4x4].copy_ss = PFX(blockcopy_ss_4x4_neon); -@@ -495,21 +495,21 @@ void setupAssemblyPrimitives(EncoderPrim - p.chroma[X265_CSP_I422].cu[BLOCK_422_32x64].copy_sp = PFX(blockcopy_sp_32x64_neon); - - // pixel_add_ps -- p.cu[BLOCK_4x4].add_ps = PFX(pixel_add_ps_4x4_neon); -- p.cu[BLOCK_8x8].add_ps = PFX(pixel_add_ps_8x8_neon); -- p.cu[BLOCK_16x16].add_ps = PFX(pixel_add_ps_16x16_neon); -- p.cu[BLOCK_32x32].add_ps = PFX(pixel_add_ps_32x32_neon); -- p.cu[BLOCK_64x64].add_ps = PFX(pixel_add_ps_64x64_neon); -+ p.cu[BLOCK_4x4].add_ps[ALIGNED] = PFX(pixel_add_ps_4x4_neon); -+ p.cu[BLOCK_8x8].add_ps[ALIGNED] = PFX(pixel_add_ps_8x8_neon); -+ p.cu[BLOCK_16x16].add_ps[ALIGNED] = PFX(pixel_add_ps_16x16_neon); -+ p.cu[BLOCK_32x32].add_ps[ALIGNED] = PFX(pixel_add_ps_32x32_neon); -+ p.cu[BLOCK_64x64].add_ps[ALIGNED] = PFX(pixel_add_ps_64x64_neon); - - // chroma add_ps -- p.chroma[X265_CSP_I420].cu[BLOCK_420_4x4].add_ps = PFX(pixel_add_ps_4x4_neon); -- p.chroma[X265_CSP_I420].cu[BLOCK_420_8x8].add_ps = PFX(pixel_add_ps_8x8_neon); -- p.chroma[X265_CSP_I420].cu[BLOCK_420_16x16].add_ps = PFX(pixel_add_ps_16x16_neon); -- p.chroma[X265_CSP_I420].cu[BLOCK_420_32x32].add_ps = PFX(pixel_add_ps_32x32_neon); -- p.chroma[X265_CSP_I422].cu[BLOCK_422_4x8].add_ps = PFX(pixel_add_ps_4x8_neon); -- p.chroma[X265_CSP_I422].cu[BLOCK_422_8x16].add_ps = PFX(pixel_add_ps_8x16_neon); -- p.chroma[X265_CSP_I422].cu[BLOCK_422_16x32].add_ps = PFX(pixel_add_ps_16x32_neon); -- p.chroma[X265_CSP_I422].cu[BLOCK_422_32x64].add_ps = PFX(pixel_add_ps_32x64_neon); -+ p.chroma[X265_CSP_I420].cu[BLOCK_420_4x4].add_ps[ALIGNED] = PFX(pixel_add_ps_4x4_neon); -+ p.chroma[X265_CSP_I420].cu[BLOCK_420_8x8].add_ps[ALIGNED] = PFX(pixel_add_ps_8x8_neon); -+ p.chroma[X265_CSP_I420].cu[BLOCK_420_16x16].add_ps[ALIGNED] = PFX(pixel_add_ps_16x16_neon); -+ p.chroma[X265_CSP_I420].cu[BLOCK_420_32x32].add_ps[ALIGNED] = PFX(pixel_add_ps_32x32_neon); -+ p.chroma[X265_CSP_I422].cu[BLOCK_422_4x8].add_ps[ALIGNED] = PFX(pixel_add_ps_4x8_neon); -+ p.chroma[X265_CSP_I422].cu[BLOCK_422_8x16].add_ps[ALIGNED] = PFX(pixel_add_ps_8x16_neon); -+ p.chroma[X265_CSP_I422].cu[BLOCK_422_16x32].add_ps[ALIGNED] = PFX(pixel_add_ps_16x32_neon); -+ p.chroma[X265_CSP_I422].cu[BLOCK_422_32x64].add_ps[ALIGNED] = PFX(pixel_add_ps_32x64_neon); - - // cpy2Dto1D_shr - p.cu[BLOCK_4x4].cpy2Dto1D_shr = PFX(cpy2Dto1D_shr_4x4_neon); -@@ -518,10 +518,10 @@ void setupAssemblyPrimitives(EncoderPrim - p.cu[BLOCK_32x32].cpy2Dto1D_shr = PFX(cpy2Dto1D_shr_32x32_neon); - - // ssd_s -- p.cu[BLOCK_4x4].ssd_s = PFX(pixel_ssd_s_4x4_neon); -- p.cu[BLOCK_8x8].ssd_s = PFX(pixel_ssd_s_8x8_neon); -- p.cu[BLOCK_16x16].ssd_s = PFX(pixel_ssd_s_16x16_neon); -- p.cu[BLOCK_32x32].ssd_s = PFX(pixel_ssd_s_32x32_neon); -+ p.cu[BLOCK_4x4].ssd_s[ALIGNED] = PFX(pixel_ssd_s_4x4_neon); -+ p.cu[BLOCK_8x8].ssd_s[ALIGNED] = PFX(pixel_ssd_s_8x8_neon); -+ p.cu[BLOCK_16x16].ssd_s[ALIGNED] = PFX(pixel_ssd_s_16x16_neon); -+ p.cu[BLOCK_32x32].ssd_s[ALIGNED] = PFX(pixel_ssd_s_32x32_neon); - - // sse_ss - p.cu[BLOCK_4x4].sse_ss = PFX(pixel_sse_ss_4x4_neon); -@@ -548,10 +548,10 @@ void setupAssemblyPrimitives(EncoderPrim - p.chroma[X265_CSP_I422].cu[BLOCK_422_32x64].sub_ps = PFX(pixel_sub_ps_32x64_neon); - - // calc_Residual -- p.cu[BLOCK_4x4].calcresidual = PFX(getResidual4_neon); -- p.cu[BLOCK_8x8].calcresidual = PFX(getResidual8_neon); -- p.cu[BLOCK_16x16].calcresidual = PFX(getResidual16_neon); -- p.cu[BLOCK_32x32].calcresidual = PFX(getResidual32_neon); -+ p.cu[BLOCK_4x4].calcresidual[ALIGNED] = PFX(getResidual4_neon); -+ p.cu[BLOCK_8x8].calcresidual[ALIGNED] = PFX(getResidual8_neon); -+ p.cu[BLOCK_16x16].calcresidual[ALIGNED] = PFX(getResidual16_neon); -+ p.cu[BLOCK_32x32].calcresidual[ALIGNED] = PFX(getResidual32_neon); - - // sse_pp - p.cu[BLOCK_4x4].sse_pp = PFX(pixel_sse_pp_4x4_neon); -@@ -722,31 +722,31 @@ void setupAssemblyPrimitives(EncoderPrim - p.pu[LUMA_64x64].sad_x4 = PFX(sad_x4_64x64_neon); - - // pixel_avg_pp -- p.pu[LUMA_4x4].pixelavg_pp = PFX(pixel_avg_pp_4x4_neon); -- p.pu[LUMA_4x8].pixelavg_pp = PFX(pixel_avg_pp_4x8_neon); -- p.pu[LUMA_4x16].pixelavg_pp = PFX(pixel_avg_pp_4x16_neon); -- p.pu[LUMA_8x4].pixelavg_pp = PFX(pixel_avg_pp_8x4_neon); -- p.pu[LUMA_8x8].pixelavg_pp = PFX(pixel_avg_pp_8x8_neon); -- p.pu[LUMA_8x16].pixelavg_pp = PFX(pixel_avg_pp_8x16_neon); -- p.pu[LUMA_8x32].pixelavg_pp = PFX(pixel_avg_pp_8x32_neon); -- p.pu[LUMA_12x16].pixelavg_pp = PFX(pixel_avg_pp_12x16_neon); -- p.pu[LUMA_16x4].pixelavg_pp = PFX(pixel_avg_pp_16x4_neon); -- p.pu[LUMA_16x8].pixelavg_pp = PFX(pixel_avg_pp_16x8_neon); -- p.pu[LUMA_16x12].pixelavg_pp = PFX(pixel_avg_pp_16x12_neon); -- p.pu[LUMA_16x16].pixelavg_pp = PFX(pixel_avg_pp_16x16_neon); -- p.pu[LUMA_16x32].pixelavg_pp = PFX(pixel_avg_pp_16x32_neon); -- p.pu[LUMA_16x64].pixelavg_pp = PFX(pixel_avg_pp_16x64_neon); -- p.pu[LUMA_24x32].pixelavg_pp = PFX(pixel_avg_pp_24x32_neon); -- p.pu[LUMA_32x8].pixelavg_pp = PFX(pixel_avg_pp_32x8_neon); -- p.pu[LUMA_32x16].pixelavg_pp = PFX(pixel_avg_pp_32x16_neon); -- p.pu[LUMA_32x24].pixelavg_pp = PFX(pixel_avg_pp_32x24_neon); -- p.pu[LUMA_32x32].pixelavg_pp = PFX(pixel_avg_pp_32x32_neon); -- p.pu[LUMA_32x64].pixelavg_pp = PFX(pixel_avg_pp_32x64_neon); -- p.pu[LUMA_48x64].pixelavg_pp = PFX(pixel_avg_pp_48x64_neon); -- p.pu[LUMA_64x16].pixelavg_pp = PFX(pixel_avg_pp_64x16_neon); -- p.pu[LUMA_64x32].pixelavg_pp = PFX(pixel_avg_pp_64x32_neon); -- p.pu[LUMA_64x48].pixelavg_pp = PFX(pixel_avg_pp_64x48_neon); -- p.pu[LUMA_64x64].pixelavg_pp = PFX(pixel_avg_pp_64x64_neon); -+ p.pu[LUMA_4x4].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_4x4_neon); -+ p.pu[LUMA_4x8].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_4x8_neon); -+ p.pu[LUMA_4x16].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_4x16_neon); -+ p.pu[LUMA_8x4].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_8x4_neon); -+ p.pu[LUMA_8x8].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_8x8_neon); -+ p.pu[LUMA_8x16].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_8x16_neon); -+ p.pu[LUMA_8x32].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_8x32_neon); -+ p.pu[LUMA_12x16].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_12x16_neon); -+ p.pu[LUMA_16x4].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_16x4_neon); -+ p.pu[LUMA_16x8].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_16x8_neon); -+ p.pu[LUMA_16x12].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_16x12_neon); -+ p.pu[LUMA_16x16].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_16x16_neon); -+ p.pu[LUMA_16x32].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_16x32_neon); -+ p.pu[LUMA_16x64].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_16x64_neon); -+ p.pu[LUMA_24x32].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_24x32_neon); -+ p.pu[LUMA_32x8].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_32x8_neon); -+ p.pu[LUMA_32x16].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_32x16_neon); -+ p.pu[LUMA_32x24].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_32x24_neon); -+ p.pu[LUMA_32x32].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_32x32_neon); -+ p.pu[LUMA_32x64].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_32x64_neon); -+ p.pu[LUMA_48x64].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_48x64_neon); -+ p.pu[LUMA_64x16].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_64x16_neon); -+ p.pu[LUMA_64x32].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_64x32_neon); -+ p.pu[LUMA_64x48].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_64x48_neon); -+ p.pu[LUMA_64x64].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_64x64_neon); - - // planecopy - p.planecopy_cp = PFX(pixel_planecopy_cp_neon); diff --git a/x265-arm-cflags.patch b/x265-arm-cflags.patch index 30d213a..8cd5c86 100644 --- a/x265-arm-cflags.patch +++ b/x265-arm-cflags.patch @@ -1,26 +1,35 @@ -diff -up x265_2.7/source/CMakeLists.txt.cflags x265_2.7/source/CMakeLists.txt ---- x265_2.7/source/CMakeLists.txt.cflags 2018-02-27 20:19:33.328932385 +0100 -+++ x265_2.7/source/CMakeLists.txt 2018-02-27 20:29:14.808956583 +0100 -@@ -233,18 +233,6 @@ if(GCC) +--- x265_3.4/source/CMakeLists.txt.cflags ++++ x265_3.4/source/CMakeLists.txt +@@ -238,28 +238,6 @@ endif() endif() endif() - if(ARM AND CROSS_COMPILE_ARM) -- set(ARM_ARGS -march=armv6 -mfloat-abi=soft -mfpu=vfp -marm -fPIC) +- if(ARM64) +- set(ARM_ARGS -fPIC) +- else() +- set(ARM_ARGS -march=armv6 -mfloat-abi=soft -mfpu=vfp -marm -fPIC) +- endif() +- message(STATUS "cross compile arm") - elseif(ARM) -- find_package(Neon) -- if(CPU_HAS_NEON) -- set(ARM_ARGS -mcpu=native -mfloat-abi=hard -mfpu=neon -marm -fPIC) +- if(ARM64) +- set(ARM_ARGS -fPIC) - add_definitions(-DHAVE_NEON) - else() -- set(ARM_ARGS -mcpu=native -mfloat-abi=hard -mfpu=vfp -marm) +- find_package(Neon) +- if(CPU_HAS_NEON) +- set(ARM_ARGS -mcpu=native -mfloat-abi=hard -mfpu=neon -marm -fPIC) +- add_definitions(-DHAVE_NEON) +- else() +- set(ARM_ARGS -mcpu=native -mfloat-abi=hard -mfpu=vfp -marm) +- endif() - endif() - endif() - add_definitions(${ARM_ARGS}) if(FPROFILE_GENERATE) if(INTEL_CXX) add_definitions(-prof-gen -prof-dir="${CMAKE_CURRENT_BINARY_DIR}") -@@ -517,7 +505,7 @@ if((MSVC_IDE OR XCODE OR GCC) AND ENABLE +@@ -546,7 +524,7 @@ add_custom_command( OUTPUT ${ASM}.${SUFFIX} COMMAND ${CMAKE_CXX_COMPILER} @@ -29,10 +38,10 @@ diff -up x265_2.7/source/CMakeLists.txt.cflags x265_2.7/source/CMakeLists.txt DEPENDS ${ASM_SRC}) endforeach() elseif(X86) -diff -up x265_2.7/source/dynamicHDR10/CMakeLists.txt.cflags x265_2.7/source/dynamicHDR10/CMakeLists.txt ---- x265_2.7/source/dynamicHDR10/CMakeLists.txt.cflags 2018-02-21 09:55:56.000000000 +0100 -+++ x265_2.7/source/dynamicHDR10/CMakeLists.txt 2018-02-27 20:29:45.377062994 +0100 -@@ -42,18 +42,6 @@ if(GCC) + +--- x265_3.4/source/dynamicHDR10/CMakeLists.txt.cflags ++++ x265_3.4/source/dynamicHDR10/CMakeLists.txt +@@ -42,18 +42,6 @@ endif() endif() endif() @@ -51,10 +60,4 @@ diff -up x265_2.7/source/dynamicHDR10/CMakeLists.txt.cflags x265_2.7/source/dyna if(FPROFILE_GENERATE) if(INTEL_CXX) add_definitions(-prof-gen -prof-dir="${CMAKE_CURRENT_BINARY_DIR}") -@@ -150,4 +138,4 @@ set(BIN_INSTALL_DIR bin CACHE STRING "In - option(ENABLE_SHARED "Build shared library" OFF) - - install(FILES hdr10plus.h DESTINATION include) --endif() -\ Pas de fin de ligne à la fin du fichier -+endif() + diff --git a/x265-detect_cpu_armhfp.patch b/x265-detect_cpu_armhfp.patch index c9a92b8..797ad04 100644 --- a/x265-detect_cpu_armhfp.patch +++ b/x265-detect_cpu_armhfp.patch @@ -1,47 +1,3 @@ -diff -up x265_2.2/source/CMakeLists.txt.orig x265_2.2/source/CMakeLists.txt ---- x265_2.2/source/CMakeLists.txt.orig 2016-12-23 06:57:39.000000000 +0100 -+++ x265_2.2/source/CMakeLists.txt 2017-01-03 11:18:34.773738470 +0100 -@@ -41,9 +41,11 @@ SET(CMAKE_MODULE_PATH "${PROJECT_SOURCE_ - # System architecture detection - string(TOLOWER "${CMAKE_SYSTEM_PROCESSOR}" SYSPROC) - set(X86_ALIASES x86 i386 i686 x86_64 amd64) --set(ARM_ALIASES armv6l armv7l) -+set(ARMv6_ALIASES armv6l) -+set(ARMv7_ALIASES armv7l) - list(FIND X86_ALIASES "${SYSPROC}" X86MATCH) --list(FIND ARM_ALIASES "${SYSPROC}" ARMMATCH) -+list(FIND ARMv6_ALIASES "${SYSPROC}" ARMv6MATCH) -+list(FIND ARMv7_ALIASES "${SYSPROC}" ARMv7MATCH) - set(POWER_ALIASES ppc64 ppc64le) - list(FIND POWER_ALIASES "${SYSPROC}" POWERMATCH) - if("${SYSPROC}" STREQUAL "" OR X86MATCH GREATER "-1") -@@ -65,15 +67,24 @@ elseif(POWERMATCH GREATER "-1") - add_definitions(-DPPC64=1) - message(STATUS "Detected POWER PPC64 target processor") - endif() --elseif(ARMMATCH GREATER "-1") -+elseif(ARMv6MATCH GREATER "-1") - if(CROSS_COMPILE_ARM) - message(STATUS "Cross compiling for ARM arch") - else() - set(CROSS_COMPILE_ARM 0) - endif() -- message(STATUS "Detected ARM target processor") -+ message(STATUS "Detected ARMv6 target processor") - set(ARM 1) - add_definitions(-DX265_ARCH_ARM=1 -DHAVE_ARMV6=1) -+elseif(ARMv7MATCH GREATER "-1") -+ if(CROSS_COMPILE_ARM) -+ message(STATUS "Cross compiling for ARM arch") -+ else() -+ set(CROSS_COMPILE_ARM 0) -+ endif() -+ message(STATUS "Detected ARMv7 target processor") -+ set(ARM 1) -+ add_definitions(-DX265_ARCH_ARM=1 -DHAVE_ARMV7=1) - else() - message(STATUS "CMAKE_SYSTEM_PROCESSOR value `${CMAKE_SYSTEM_PROCESSOR}` is unknown") - message(STATUS "Please add this value near ${CMAKE_CURRENT_LIST_FILE}:${CMAKE_CURRENT_LIST_LINE}") --- x265_v2.6/source/test/testharness.h.orig 2017-12-30 22:27:49.827620181 +0000 +++ x265_v2.6/source/test/testharness.h 2017-12-30 22:30:53.239500941 +0000 @@ -70,9 +70,10 @@ protected: diff --git a/x265.spec b/x265.spec index 76ccd41..76c2086 100644 --- a/x265.spec +++ b/x265.spec @@ -1,9 +1,9 @@ -%global _so_version 188 +%global _so_version 192 Summary: H.265/HEVC encoder Name: x265 -Version: 3.3 -Release: 2%{?dist} +Version: 3.4 +Release: 1%{?dist} URL: http://x265.org/ # source/Lib/TLibCommon - BSD # source/Lib/TLibEncoder - BSD @@ -17,7 +17,6 @@ Patch1: x265-high-bit-depth-soname.patch Patch2: x265-detect_cpu_armhfp.patch Patch3: x265-arm-cflags.patch Patch4: x265-pkgconfig_path_fix.patch -Patch5: x265-2.8-asm-primitives.patch BuildRequires: gcc-c++ BuildRequires: cmake3 @@ -139,6 +138,9 @@ done %{_libdir}/pkgconfig/x265.pc %changelog +* Sun May 31 2020 Leigh Scott - 3.4-1 +- Update to 3.4 + * Wed Mar 11 2020 Nicolas Chauvet - 3.3-2 - Rebuilt for i686