diff --git a/.gitignore b/.gitignore index 594d208..f3a1c5d 100644 --- a/.gitignore +++ b/.gitignore @@ -6,3 +6,4 @@ x265_1.6.tar.gz /x265_2.5.tar.gz /x265_2.6.tar.gz /x265_2.7.tar.gz +/x265_2.8.tar.gz diff --git a/sources b/sources index 7181119..2c38b2f 100644 --- a/sources +++ b/sources @@ -1 +1 @@ -b0d7d20da2a418fa4f53a559946ea079 x265_2.7.tar.gz +b68dcd4e8a495e53e53034a11fec5eb9 x265_2.8.tar.gz diff --git a/x265-2.8-asm-primitives.patch b/x265-2.8-asm-primitives.patch new file mode 100644 index 0000000..89b42f2 --- /dev/null +++ b/x265-2.8-asm-primitives.patch @@ -0,0 +1,347 @@ +--- ./source/common/arm/asm-primitives.cpp.orig 2018-05-21 02:33:10.000000000 -0600 ++++ ./source/common/arm/asm-primitives.cpp 2018-05-28 20:38:37.302378303 -0600 +@@ -48,77 +48,77 @@ void setupAssemblyPrimitives(EncoderPrim + p.ssim_4x4x2_core = PFX(ssim_4x4x2_core_neon); + + // addAvg +- p.pu[LUMA_4x4].addAvg = PFX(addAvg_4x4_neon); +- p.pu[LUMA_4x8].addAvg = PFX(addAvg_4x8_neon); +- p.pu[LUMA_4x16].addAvg = PFX(addAvg_4x16_neon); +- p.pu[LUMA_8x4].addAvg = PFX(addAvg_8x4_neon); +- p.pu[LUMA_8x8].addAvg = PFX(addAvg_8x8_neon); +- p.pu[LUMA_8x16].addAvg = PFX(addAvg_8x16_neon); +- p.pu[LUMA_8x32].addAvg = PFX(addAvg_8x32_neon); +- p.pu[LUMA_12x16].addAvg = PFX(addAvg_12x16_neon); +- p.pu[LUMA_16x4].addAvg = PFX(addAvg_16x4_neon); +- p.pu[LUMA_16x8].addAvg = PFX(addAvg_16x8_neon); +- p.pu[LUMA_16x12].addAvg = PFX(addAvg_16x12_neon); +- p.pu[LUMA_16x16].addAvg = PFX(addAvg_16x16_neon); +- p.pu[LUMA_16x32].addAvg = PFX(addAvg_16x32_neon); +- p.pu[LUMA_16x64].addAvg = PFX(addAvg_16x64_neon); +- p.pu[LUMA_24x32].addAvg = PFX(addAvg_24x32_neon); +- p.pu[LUMA_32x8].addAvg = PFX(addAvg_32x8_neon); +- p.pu[LUMA_32x16].addAvg = PFX(addAvg_32x16_neon); +- p.pu[LUMA_32x24].addAvg = PFX(addAvg_32x24_neon); +- p.pu[LUMA_32x32].addAvg = PFX(addAvg_32x32_neon); +- p.pu[LUMA_32x64].addAvg = PFX(addAvg_32x64_neon); +- p.pu[LUMA_48x64].addAvg = PFX(addAvg_48x64_neon); +- p.pu[LUMA_64x16].addAvg = PFX(addAvg_64x16_neon); +- p.pu[LUMA_64x32].addAvg = PFX(addAvg_64x32_neon); +- p.pu[LUMA_64x48].addAvg = PFX(addAvg_64x48_neon); +- p.pu[LUMA_64x64].addAvg = PFX(addAvg_64x64_neon); ++ p.pu[LUMA_4x4].addAvg[ALIGNED] = PFX(addAvg_4x4_neon); ++ p.pu[LUMA_4x8].addAvg[ALIGNED] = PFX(addAvg_4x8_neon); ++ p.pu[LUMA_4x16].addAvg[ALIGNED] = PFX(addAvg_4x16_neon); ++ p.pu[LUMA_8x4].addAvg[ALIGNED] = PFX(addAvg_8x4_neon); ++ p.pu[LUMA_8x8].addAvg[ALIGNED] = PFX(addAvg_8x8_neon); ++ p.pu[LUMA_8x16].addAvg[ALIGNED] = PFX(addAvg_8x16_neon); ++ p.pu[LUMA_8x32].addAvg[ALIGNED] = PFX(addAvg_8x32_neon); ++ p.pu[LUMA_12x16].addAvg[ALIGNED] = PFX(addAvg_12x16_neon); ++ p.pu[LUMA_16x4].addAvg[ALIGNED] = PFX(addAvg_16x4_neon); ++ p.pu[LUMA_16x8].addAvg[ALIGNED] = PFX(addAvg_16x8_neon); ++ p.pu[LUMA_16x12].addAvg[ALIGNED] = PFX(addAvg_16x12_neon); ++ p.pu[LUMA_16x16].addAvg[ALIGNED] = PFX(addAvg_16x16_neon); ++ p.pu[LUMA_16x32].addAvg[ALIGNED] = PFX(addAvg_16x32_neon); ++ p.pu[LUMA_16x64].addAvg[ALIGNED] = PFX(addAvg_16x64_neon); ++ p.pu[LUMA_24x32].addAvg[ALIGNED] = PFX(addAvg_24x32_neon); ++ p.pu[LUMA_32x8].addAvg[ALIGNED] = PFX(addAvg_32x8_neon); ++ p.pu[LUMA_32x16].addAvg[ALIGNED] = PFX(addAvg_32x16_neon); ++ p.pu[LUMA_32x24].addAvg[ALIGNED] = PFX(addAvg_32x24_neon); ++ p.pu[LUMA_32x32].addAvg[ALIGNED] = PFX(addAvg_32x32_neon); ++ p.pu[LUMA_32x64].addAvg[ALIGNED] = PFX(addAvg_32x64_neon); ++ p.pu[LUMA_48x64].addAvg[ALIGNED] = PFX(addAvg_48x64_neon); ++ p.pu[LUMA_64x16].addAvg[ALIGNED] = PFX(addAvg_64x16_neon); ++ p.pu[LUMA_64x32].addAvg[ALIGNED] = PFX(addAvg_64x32_neon); ++ p.pu[LUMA_64x48].addAvg[ALIGNED] = PFX(addAvg_64x48_neon); ++ p.pu[LUMA_64x64].addAvg[ALIGNED] = PFX(addAvg_64x64_neon); + + // chroma addAvg +- p.chroma[X265_CSP_I420].pu[CHROMA_420_4x2].addAvg = PFX(addAvg_4x2_neon); +- p.chroma[X265_CSP_I420].pu[CHROMA_420_4x4].addAvg = PFX(addAvg_4x4_neon); +- p.chroma[X265_CSP_I420].pu[CHROMA_420_4x8].addAvg = PFX(addAvg_4x8_neon); +- p.chroma[X265_CSP_I420].pu[CHROMA_420_4x16].addAvg = PFX(addAvg_4x16_neon); +- p.chroma[X265_CSP_I420].pu[CHROMA_420_6x8].addAvg = PFX(addAvg_6x8_neon); +- p.chroma[X265_CSP_I420].pu[CHROMA_420_8x2].addAvg = PFX(addAvg_8x2_neon); +- p.chroma[X265_CSP_I420].pu[CHROMA_420_8x4].addAvg = PFX(addAvg_8x4_neon); +- p.chroma[X265_CSP_I420].pu[CHROMA_420_8x6].addAvg = PFX(addAvg_8x6_neon); +- p.chroma[X265_CSP_I420].pu[CHROMA_420_8x8].addAvg = PFX(addAvg_8x8_neon); +- p.chroma[X265_CSP_I420].pu[CHROMA_420_8x16].addAvg = PFX(addAvg_8x16_neon); +- p.chroma[X265_CSP_I420].pu[CHROMA_420_8x32].addAvg = PFX(addAvg_8x32_neon); +- p.chroma[X265_CSP_I420].pu[CHROMA_420_12x16].addAvg = PFX(addAvg_12x16_neon); +- p.chroma[X265_CSP_I420].pu[CHROMA_420_16x4].addAvg = PFX(addAvg_16x4_neon); +- p.chroma[X265_CSP_I420].pu[CHROMA_420_16x8].addAvg = PFX(addAvg_16x8_neon); +- p.chroma[X265_CSP_I420].pu[CHROMA_420_16x12].addAvg = PFX(addAvg_16x12_neon); +- p.chroma[X265_CSP_I420].pu[CHROMA_420_16x16].addAvg = PFX(addAvg_16x16_neon); +- p.chroma[X265_CSP_I420].pu[CHROMA_420_16x32].addAvg = PFX(addAvg_16x32_neon); +- p.chroma[X265_CSP_I420].pu[CHROMA_420_24x32].addAvg = PFX(addAvg_24x32_neon); +- p.chroma[X265_CSP_I420].pu[CHROMA_420_32x8].addAvg = PFX(addAvg_32x8_neon); +- p.chroma[X265_CSP_I420].pu[CHROMA_420_32x16].addAvg = PFX(addAvg_32x16_neon); +- p.chroma[X265_CSP_I420].pu[CHROMA_420_32x24].addAvg = PFX(addAvg_32x24_neon); +- p.chroma[X265_CSP_I420].pu[CHROMA_420_32x32].addAvg = PFX(addAvg_32x32_neon); +- +- p.chroma[X265_CSP_I422].pu[CHROMA_422_4x8].addAvg = PFX(addAvg_4x8_neon); +- p.chroma[X265_CSP_I422].pu[CHROMA_422_4x16].addAvg = PFX(addAvg_4x16_neon); +- p.chroma[X265_CSP_I422].pu[CHROMA_422_4x32].addAvg = PFX(addAvg_4x32_neon); +- p.chroma[X265_CSP_I422].pu[CHROMA_422_6x16].addAvg = PFX(addAvg_6x16_neon); +- p.chroma[X265_CSP_I422].pu[CHROMA_422_8x4].addAvg = PFX(addAvg_8x4_neon); +- p.chroma[X265_CSP_I422].pu[CHROMA_422_8x8].addAvg = PFX(addAvg_8x8_neon); +- p.chroma[X265_CSP_I422].pu[CHROMA_422_8x12].addAvg = PFX(addAvg_8x12_neon); +- p.chroma[X265_CSP_I422].pu[CHROMA_422_8x16].addAvg = PFX(addAvg_8x16_neon); +- p.chroma[X265_CSP_I422].pu[CHROMA_422_8x32].addAvg = PFX(addAvg_8x32_neon); +- p.chroma[X265_CSP_I422].pu[CHROMA_422_8x64].addAvg = PFX(addAvg_8x64_neon); +- p.chroma[X265_CSP_I422].pu[CHROMA_422_12x32].addAvg = PFX(addAvg_12x32_neon); +- p.chroma[X265_CSP_I422].pu[CHROMA_422_16x8].addAvg = PFX(addAvg_16x8_neon); +- p.chroma[X265_CSP_I422].pu[CHROMA_422_16x16].addAvg = PFX(addAvg_16x16_neon); +- p.chroma[X265_CSP_I422].pu[CHROMA_422_16x24].addAvg = PFX(addAvg_16x24_neon); +- p.chroma[X265_CSP_I422].pu[CHROMA_422_16x32].addAvg = PFX(addAvg_16x32_neon); +- p.chroma[X265_CSP_I422].pu[CHROMA_422_16x64].addAvg = PFX(addAvg_16x64_neon); +- p.chroma[X265_CSP_I422].pu[CHROMA_422_24x64].addAvg = PFX(addAvg_24x64_neon); +- p.chroma[X265_CSP_I422].pu[CHROMA_422_32x16].addAvg = PFX(addAvg_32x16_neon); +- p.chroma[X265_CSP_I422].pu[CHROMA_422_32x32].addAvg = PFX(addAvg_32x32_neon); +- p.chroma[X265_CSP_I422].pu[CHROMA_422_32x48].addAvg = PFX(addAvg_32x48_neon); +- p.chroma[X265_CSP_I422].pu[CHROMA_422_32x64].addAvg = PFX(addAvg_32x64_neon); ++ p.chroma[X265_CSP_I420].pu[CHROMA_420_4x2].addAvg[ALIGNED] = PFX(addAvg_4x2_neon); ++ p.chroma[X265_CSP_I420].pu[CHROMA_420_4x4].addAvg[ALIGNED] = PFX(addAvg_4x4_neon); ++ p.chroma[X265_CSP_I420].pu[CHROMA_420_4x8].addAvg[ALIGNED] = PFX(addAvg_4x8_neon); ++ p.chroma[X265_CSP_I420].pu[CHROMA_420_4x16].addAvg[ALIGNED] = PFX(addAvg_4x16_neon); ++ p.chroma[X265_CSP_I420].pu[CHROMA_420_6x8].addAvg[ALIGNED] = PFX(addAvg_6x8_neon); ++ p.chroma[X265_CSP_I420].pu[CHROMA_420_8x2].addAvg[ALIGNED] = PFX(addAvg_8x2_neon); ++ p.chroma[X265_CSP_I420].pu[CHROMA_420_8x4].addAvg[ALIGNED] = PFX(addAvg_8x4_neon); ++ p.chroma[X265_CSP_I420].pu[CHROMA_420_8x6].addAvg[ALIGNED] = PFX(addAvg_8x6_neon); ++ p.chroma[X265_CSP_I420].pu[CHROMA_420_8x8].addAvg[ALIGNED] = PFX(addAvg_8x8_neon); ++ p.chroma[X265_CSP_I420].pu[CHROMA_420_8x16].addAvg[ALIGNED] = PFX(addAvg_8x16_neon); ++ p.chroma[X265_CSP_I420].pu[CHROMA_420_8x32].addAvg[ALIGNED] = PFX(addAvg_8x32_neon); ++ p.chroma[X265_CSP_I420].pu[CHROMA_420_12x16].addAvg[ALIGNED] = PFX(addAvg_12x16_neon); ++ p.chroma[X265_CSP_I420].pu[CHROMA_420_16x4].addAvg[ALIGNED] = PFX(addAvg_16x4_neon); ++ p.chroma[X265_CSP_I420].pu[CHROMA_420_16x8].addAvg[ALIGNED] = PFX(addAvg_16x8_neon); ++ p.chroma[X265_CSP_I420].pu[CHROMA_420_16x12].addAvg[ALIGNED] = PFX(addAvg_16x12_neon); ++ p.chroma[X265_CSP_I420].pu[CHROMA_420_16x16].addAvg[ALIGNED] = PFX(addAvg_16x16_neon); ++ p.chroma[X265_CSP_I420].pu[CHROMA_420_16x32].addAvg[ALIGNED] = PFX(addAvg_16x32_neon); ++ p.chroma[X265_CSP_I420].pu[CHROMA_420_24x32].addAvg[ALIGNED] = PFX(addAvg_24x32_neon); ++ p.chroma[X265_CSP_I420].pu[CHROMA_420_32x8].addAvg[ALIGNED] = PFX(addAvg_32x8_neon); ++ p.chroma[X265_CSP_I420].pu[CHROMA_420_32x16].addAvg[ALIGNED] = PFX(addAvg_32x16_neon); ++ p.chroma[X265_CSP_I420].pu[CHROMA_420_32x24].addAvg[ALIGNED] = PFX(addAvg_32x24_neon); ++ p.chroma[X265_CSP_I420].pu[CHROMA_420_32x32].addAvg[ALIGNED] = PFX(addAvg_32x32_neon); ++ ++ p.chroma[X265_CSP_I422].pu[CHROMA_422_4x8].addAvg[ALIGNED] = PFX(addAvg_4x8_neon); ++ p.chroma[X265_CSP_I422].pu[CHROMA_422_4x16].addAvg[ALIGNED] = PFX(addAvg_4x16_neon); ++ p.chroma[X265_CSP_I422].pu[CHROMA_422_4x32].addAvg[ALIGNED] = PFX(addAvg_4x32_neon); ++ p.chroma[X265_CSP_I422].pu[CHROMA_422_6x16].addAvg[ALIGNED] = PFX(addAvg_6x16_neon); ++ p.chroma[X265_CSP_I422].pu[CHROMA_422_8x4].addAvg[ALIGNED] = PFX(addAvg_8x4_neon); ++ p.chroma[X265_CSP_I422].pu[CHROMA_422_8x8].addAvg[ALIGNED] = PFX(addAvg_8x8_neon); ++ p.chroma[X265_CSP_I422].pu[CHROMA_422_8x12].addAvg[ALIGNED] = PFX(addAvg_8x12_neon); ++ p.chroma[X265_CSP_I422].pu[CHROMA_422_8x16].addAvg[ALIGNED] = PFX(addAvg_8x16_neon); ++ p.chroma[X265_CSP_I422].pu[CHROMA_422_8x32].addAvg[ALIGNED] = PFX(addAvg_8x32_neon); ++ p.chroma[X265_CSP_I422].pu[CHROMA_422_8x64].addAvg[ALIGNED] = PFX(addAvg_8x64_neon); ++ p.chroma[X265_CSP_I422].pu[CHROMA_422_12x32].addAvg[ALIGNED] = PFX(addAvg_12x32_neon); ++ p.chroma[X265_CSP_I422].pu[CHROMA_422_16x8].addAvg[ALIGNED] = PFX(addAvg_16x8_neon); ++ p.chroma[X265_CSP_I422].pu[CHROMA_422_16x16].addAvg[ALIGNED] = PFX(addAvg_16x16_neon); ++ p.chroma[X265_CSP_I422].pu[CHROMA_422_16x24].addAvg[ALIGNED] = PFX(addAvg_16x24_neon); ++ p.chroma[X265_CSP_I422].pu[CHROMA_422_16x32].addAvg[ALIGNED] = PFX(addAvg_16x32_neon); ++ p.chroma[X265_CSP_I422].pu[CHROMA_422_16x64].addAvg[ALIGNED] = PFX(addAvg_16x64_neon); ++ p.chroma[X265_CSP_I422].pu[CHROMA_422_24x64].addAvg[ALIGNED] = PFX(addAvg_24x64_neon); ++ p.chroma[X265_CSP_I422].pu[CHROMA_422_32x16].addAvg[ALIGNED] = PFX(addAvg_32x16_neon); ++ p.chroma[X265_CSP_I422].pu[CHROMA_422_32x32].addAvg[ALIGNED] = PFX(addAvg_32x32_neon); ++ p.chroma[X265_CSP_I422].pu[CHROMA_422_32x48].addAvg[ALIGNED] = PFX(addAvg_32x48_neon); ++ p.chroma[X265_CSP_I422].pu[CHROMA_422_32x64].addAvg[ALIGNED] = PFX(addAvg_32x64_neon); + + // quant + p.quant = PFX(quant_neon); +@@ -402,7 +402,7 @@ void setupAssemblyPrimitives(EncoderPrim + p.scale2D_64to32 = PFX(scale2D_64to32_neon); + + // scale1D_128to64 +- p.scale1D_128to64 = PFX(scale1D_128to64_neon); ++ p.scale1D_128to64[ALIGNED] = PFX(scale1D_128to64_neon); + + // copy_count + p.cu[BLOCK_4x4].copy_cnt = PFX(copy_cnt_4_neon); +@@ -411,37 +411,37 @@ void setupAssemblyPrimitives(EncoderPrim + p.cu[BLOCK_32x32].copy_cnt = PFX(copy_cnt_32_neon); + + // filterPixelToShort +- p.pu[LUMA_4x4].convert_p2s = PFX(filterPixelToShort_4x4_neon); +- p.pu[LUMA_4x8].convert_p2s = PFX(filterPixelToShort_4x8_neon); +- p.pu[LUMA_4x16].convert_p2s = PFX(filterPixelToShort_4x16_neon); +- p.pu[LUMA_8x4].convert_p2s = PFX(filterPixelToShort_8x4_neon); +- p.pu[LUMA_8x8].convert_p2s = PFX(filterPixelToShort_8x8_neon); +- p.pu[LUMA_8x16].convert_p2s = PFX(filterPixelToShort_8x16_neon); +- p.pu[LUMA_8x32].convert_p2s = PFX(filterPixelToShort_8x32_neon); +- p.pu[LUMA_12x16].convert_p2s = PFX(filterPixelToShort_12x16_neon); +- p.pu[LUMA_16x4].convert_p2s = PFX(filterPixelToShort_16x4_neon); +- p.pu[LUMA_16x8].convert_p2s = PFX(filterPixelToShort_16x8_neon); +- p.pu[LUMA_16x12].convert_p2s = PFX(filterPixelToShort_16x12_neon); +- p.pu[LUMA_16x16].convert_p2s = PFX(filterPixelToShort_16x16_neon); +- p.pu[LUMA_16x32].convert_p2s = PFX(filterPixelToShort_16x32_neon); +- p.pu[LUMA_16x64].convert_p2s = PFX(filterPixelToShort_16x64_neon); +- p.pu[LUMA_24x32].convert_p2s = PFX(filterPixelToShort_24x32_neon); +- p.pu[LUMA_32x8].convert_p2s = PFX(filterPixelToShort_32x8_neon); +- p.pu[LUMA_32x16].convert_p2s = PFX(filterPixelToShort_32x16_neon); +- p.pu[LUMA_32x24].convert_p2s = PFX(filterPixelToShort_32x24_neon); +- p.pu[LUMA_32x32].convert_p2s = PFX(filterPixelToShort_32x32_neon); +- p.pu[LUMA_32x64].convert_p2s = PFX(filterPixelToShort_32x64_neon); +- p.pu[LUMA_48x64].convert_p2s = PFX(filterPixelToShort_48x64_neon); +- p.pu[LUMA_64x16].convert_p2s = PFX(filterPixelToShort_64x16_neon); +- p.pu[LUMA_64x32].convert_p2s = PFX(filterPixelToShort_64x32_neon); +- p.pu[LUMA_64x48].convert_p2s = PFX(filterPixelToShort_64x48_neon); +- p.pu[LUMA_64x64].convert_p2s = PFX(filterPixelToShort_64x64_neon); ++ p.pu[LUMA_4x4].convert_p2s[ALIGNED] = PFX(filterPixelToShort_4x4_neon); ++ p.pu[LUMA_4x8].convert_p2s[ALIGNED] = PFX(filterPixelToShort_4x8_neon); ++ p.pu[LUMA_4x16].convert_p2s[ALIGNED] = PFX(filterPixelToShort_4x16_neon); ++ p.pu[LUMA_8x4].convert_p2s[ALIGNED] = PFX(filterPixelToShort_8x4_neon); ++ p.pu[LUMA_8x8].convert_p2s[ALIGNED] = PFX(filterPixelToShort_8x8_neon); ++ p.pu[LUMA_8x16].convert_p2s[ALIGNED] = PFX(filterPixelToShort_8x16_neon); ++ p.pu[LUMA_8x32].convert_p2s[ALIGNED] = PFX(filterPixelToShort_8x32_neon); ++ p.pu[LUMA_12x16].convert_p2s[ALIGNED] = PFX(filterPixelToShort_12x16_neon); ++ p.pu[LUMA_16x4].convert_p2s[ALIGNED] = PFX(filterPixelToShort_16x4_neon); ++ p.pu[LUMA_16x8].convert_p2s[ALIGNED] = PFX(filterPixelToShort_16x8_neon); ++ p.pu[LUMA_16x12].convert_p2s[ALIGNED] = PFX(filterPixelToShort_16x12_neon); ++ p.pu[LUMA_16x16].convert_p2s[ALIGNED] = PFX(filterPixelToShort_16x16_neon); ++ p.pu[LUMA_16x32].convert_p2s[ALIGNED] = PFX(filterPixelToShort_16x32_neon); ++ p.pu[LUMA_16x64].convert_p2s[ALIGNED] = PFX(filterPixelToShort_16x64_neon); ++ p.pu[LUMA_24x32].convert_p2s[ALIGNED] = PFX(filterPixelToShort_24x32_neon); ++ p.pu[LUMA_32x8].convert_p2s[ALIGNED] = PFX(filterPixelToShort_32x8_neon); ++ p.pu[LUMA_32x16].convert_p2s[ALIGNED] = PFX(filterPixelToShort_32x16_neon); ++ p.pu[LUMA_32x24].convert_p2s[ALIGNED] = PFX(filterPixelToShort_32x24_neon); ++ p.pu[LUMA_32x32].convert_p2s[ALIGNED] = PFX(filterPixelToShort_32x32_neon); ++ p.pu[LUMA_32x64].convert_p2s[ALIGNED] = PFX(filterPixelToShort_32x64_neon); ++ p.pu[LUMA_48x64].convert_p2s[ALIGNED] = PFX(filterPixelToShort_48x64_neon); ++ p.pu[LUMA_64x16].convert_p2s[ALIGNED] = PFX(filterPixelToShort_64x16_neon); ++ p.pu[LUMA_64x32].convert_p2s[ALIGNED] = PFX(filterPixelToShort_64x32_neon); ++ p.pu[LUMA_64x48].convert_p2s[ALIGNED] = PFX(filterPixelToShort_64x48_neon); ++ p.pu[LUMA_64x64].convert_p2s[ALIGNED] = PFX(filterPixelToShort_64x64_neon); + + // Block_fill +- p.cu[BLOCK_4x4].blockfill_s = PFX(blockfill_s_4x4_neon); +- p.cu[BLOCK_8x8].blockfill_s = PFX(blockfill_s_8x8_neon); +- p.cu[BLOCK_16x16].blockfill_s = PFX(blockfill_s_16x16_neon); +- p.cu[BLOCK_32x32].blockfill_s = PFX(blockfill_s_32x32_neon); ++ p.cu[BLOCK_4x4].blockfill_s[ALIGNED] = PFX(blockfill_s_4x4_neon); ++ p.cu[BLOCK_8x8].blockfill_s[ALIGNED] = PFX(blockfill_s_8x8_neon); ++ p.cu[BLOCK_16x16].blockfill_s[ALIGNED] = PFX(blockfill_s_16x16_neon); ++ p.cu[BLOCK_32x32].blockfill_s[ALIGNED] = PFX(blockfill_s_32x32_neon); + + // Blockcopy_ss + p.cu[BLOCK_4x4].copy_ss = PFX(blockcopy_ss_4x4_neon); +@@ -495,21 +495,21 @@ void setupAssemblyPrimitives(EncoderPrim + p.chroma[X265_CSP_I422].cu[BLOCK_422_32x64].copy_sp = PFX(blockcopy_sp_32x64_neon); + + // pixel_add_ps +- p.cu[BLOCK_4x4].add_ps = PFX(pixel_add_ps_4x4_neon); +- p.cu[BLOCK_8x8].add_ps = PFX(pixel_add_ps_8x8_neon); +- p.cu[BLOCK_16x16].add_ps = PFX(pixel_add_ps_16x16_neon); +- p.cu[BLOCK_32x32].add_ps = PFX(pixel_add_ps_32x32_neon); +- p.cu[BLOCK_64x64].add_ps = PFX(pixel_add_ps_64x64_neon); ++ p.cu[BLOCK_4x4].add_ps[ALIGNED] = PFX(pixel_add_ps_4x4_neon); ++ p.cu[BLOCK_8x8].add_ps[ALIGNED] = PFX(pixel_add_ps_8x8_neon); ++ p.cu[BLOCK_16x16].add_ps[ALIGNED] = PFX(pixel_add_ps_16x16_neon); ++ p.cu[BLOCK_32x32].add_ps[ALIGNED] = PFX(pixel_add_ps_32x32_neon); ++ p.cu[BLOCK_64x64].add_ps[ALIGNED] = PFX(pixel_add_ps_64x64_neon); + + // chroma add_ps +- p.chroma[X265_CSP_I420].cu[BLOCK_420_4x4].add_ps = PFX(pixel_add_ps_4x4_neon); +- p.chroma[X265_CSP_I420].cu[BLOCK_420_8x8].add_ps = PFX(pixel_add_ps_8x8_neon); +- p.chroma[X265_CSP_I420].cu[BLOCK_420_16x16].add_ps = PFX(pixel_add_ps_16x16_neon); +- p.chroma[X265_CSP_I420].cu[BLOCK_420_32x32].add_ps = PFX(pixel_add_ps_32x32_neon); +- p.chroma[X265_CSP_I422].cu[BLOCK_422_4x8].add_ps = PFX(pixel_add_ps_4x8_neon); +- p.chroma[X265_CSP_I422].cu[BLOCK_422_8x16].add_ps = PFX(pixel_add_ps_8x16_neon); +- p.chroma[X265_CSP_I422].cu[BLOCK_422_16x32].add_ps = PFX(pixel_add_ps_16x32_neon); +- p.chroma[X265_CSP_I422].cu[BLOCK_422_32x64].add_ps = PFX(pixel_add_ps_32x64_neon); ++ p.chroma[X265_CSP_I420].cu[BLOCK_420_4x4].add_ps[ALIGNED] = PFX(pixel_add_ps_4x4_neon); ++ p.chroma[X265_CSP_I420].cu[BLOCK_420_8x8].add_ps[ALIGNED] = PFX(pixel_add_ps_8x8_neon); ++ p.chroma[X265_CSP_I420].cu[BLOCK_420_16x16].add_ps[ALIGNED] = PFX(pixel_add_ps_16x16_neon); ++ p.chroma[X265_CSP_I420].cu[BLOCK_420_32x32].add_ps[ALIGNED] = PFX(pixel_add_ps_32x32_neon); ++ p.chroma[X265_CSP_I422].cu[BLOCK_422_4x8].add_ps[ALIGNED] = PFX(pixel_add_ps_4x8_neon); ++ p.chroma[X265_CSP_I422].cu[BLOCK_422_8x16].add_ps[ALIGNED] = PFX(pixel_add_ps_8x16_neon); ++ p.chroma[X265_CSP_I422].cu[BLOCK_422_16x32].add_ps[ALIGNED] = PFX(pixel_add_ps_16x32_neon); ++ p.chroma[X265_CSP_I422].cu[BLOCK_422_32x64].add_ps[ALIGNED] = PFX(pixel_add_ps_32x64_neon); + + // cpy2Dto1D_shr + p.cu[BLOCK_4x4].cpy2Dto1D_shr = PFX(cpy2Dto1D_shr_4x4_neon); +@@ -518,10 +518,10 @@ void setupAssemblyPrimitives(EncoderPrim + p.cu[BLOCK_32x32].cpy2Dto1D_shr = PFX(cpy2Dto1D_shr_32x32_neon); + + // ssd_s +- p.cu[BLOCK_4x4].ssd_s = PFX(pixel_ssd_s_4x4_neon); +- p.cu[BLOCK_8x8].ssd_s = PFX(pixel_ssd_s_8x8_neon); +- p.cu[BLOCK_16x16].ssd_s = PFX(pixel_ssd_s_16x16_neon); +- p.cu[BLOCK_32x32].ssd_s = PFX(pixel_ssd_s_32x32_neon); ++ p.cu[BLOCK_4x4].ssd_s[ALIGNED] = PFX(pixel_ssd_s_4x4_neon); ++ p.cu[BLOCK_8x8].ssd_s[ALIGNED] = PFX(pixel_ssd_s_8x8_neon); ++ p.cu[BLOCK_16x16].ssd_s[ALIGNED] = PFX(pixel_ssd_s_16x16_neon); ++ p.cu[BLOCK_32x32].ssd_s[ALIGNED] = PFX(pixel_ssd_s_32x32_neon); + + // sse_ss + p.cu[BLOCK_4x4].sse_ss = PFX(pixel_sse_ss_4x4_neon); +@@ -548,10 +548,10 @@ void setupAssemblyPrimitives(EncoderPrim + p.chroma[X265_CSP_I422].cu[BLOCK_422_32x64].sub_ps = PFX(pixel_sub_ps_32x64_neon); + + // calc_Residual +- p.cu[BLOCK_4x4].calcresidual = PFX(getResidual4_neon); +- p.cu[BLOCK_8x8].calcresidual = PFX(getResidual8_neon); +- p.cu[BLOCK_16x16].calcresidual = PFX(getResidual16_neon); +- p.cu[BLOCK_32x32].calcresidual = PFX(getResidual32_neon); ++ p.cu[BLOCK_4x4].calcresidual[ALIGNED] = PFX(getResidual4_neon); ++ p.cu[BLOCK_8x8].calcresidual[ALIGNED] = PFX(getResidual8_neon); ++ p.cu[BLOCK_16x16].calcresidual[ALIGNED] = PFX(getResidual16_neon); ++ p.cu[BLOCK_32x32].calcresidual[ALIGNED] = PFX(getResidual32_neon); + + // sse_pp + p.cu[BLOCK_4x4].sse_pp = PFX(pixel_sse_pp_4x4_neon); +@@ -722,31 +722,31 @@ void setupAssemblyPrimitives(EncoderPrim + p.pu[LUMA_64x64].sad_x4 = PFX(sad_x4_64x64_neon); + + // pixel_avg_pp +- p.pu[LUMA_4x4].pixelavg_pp = PFX(pixel_avg_pp_4x4_neon); +- p.pu[LUMA_4x8].pixelavg_pp = PFX(pixel_avg_pp_4x8_neon); +- p.pu[LUMA_4x16].pixelavg_pp = PFX(pixel_avg_pp_4x16_neon); +- p.pu[LUMA_8x4].pixelavg_pp = PFX(pixel_avg_pp_8x4_neon); +- p.pu[LUMA_8x8].pixelavg_pp = PFX(pixel_avg_pp_8x8_neon); +- p.pu[LUMA_8x16].pixelavg_pp = PFX(pixel_avg_pp_8x16_neon); +- p.pu[LUMA_8x32].pixelavg_pp = PFX(pixel_avg_pp_8x32_neon); +- p.pu[LUMA_12x16].pixelavg_pp = PFX(pixel_avg_pp_12x16_neon); +- p.pu[LUMA_16x4].pixelavg_pp = PFX(pixel_avg_pp_16x4_neon); +- p.pu[LUMA_16x8].pixelavg_pp = PFX(pixel_avg_pp_16x8_neon); +- p.pu[LUMA_16x12].pixelavg_pp = PFX(pixel_avg_pp_16x12_neon); +- p.pu[LUMA_16x16].pixelavg_pp = PFX(pixel_avg_pp_16x16_neon); +- p.pu[LUMA_16x32].pixelavg_pp = PFX(pixel_avg_pp_16x32_neon); +- p.pu[LUMA_16x64].pixelavg_pp = PFX(pixel_avg_pp_16x64_neon); +- p.pu[LUMA_24x32].pixelavg_pp = PFX(pixel_avg_pp_24x32_neon); +- p.pu[LUMA_32x8].pixelavg_pp = PFX(pixel_avg_pp_32x8_neon); +- p.pu[LUMA_32x16].pixelavg_pp = PFX(pixel_avg_pp_32x16_neon); +- p.pu[LUMA_32x24].pixelavg_pp = PFX(pixel_avg_pp_32x24_neon); +- p.pu[LUMA_32x32].pixelavg_pp = PFX(pixel_avg_pp_32x32_neon); +- p.pu[LUMA_32x64].pixelavg_pp = PFX(pixel_avg_pp_32x64_neon); +- p.pu[LUMA_48x64].pixelavg_pp = PFX(pixel_avg_pp_48x64_neon); +- p.pu[LUMA_64x16].pixelavg_pp = PFX(pixel_avg_pp_64x16_neon); +- p.pu[LUMA_64x32].pixelavg_pp = PFX(pixel_avg_pp_64x32_neon); +- p.pu[LUMA_64x48].pixelavg_pp = PFX(pixel_avg_pp_64x48_neon); +- p.pu[LUMA_64x64].pixelavg_pp = PFX(pixel_avg_pp_64x64_neon); ++ p.pu[LUMA_4x4].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_4x4_neon); ++ p.pu[LUMA_4x8].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_4x8_neon); ++ p.pu[LUMA_4x16].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_4x16_neon); ++ p.pu[LUMA_8x4].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_8x4_neon); ++ p.pu[LUMA_8x8].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_8x8_neon); ++ p.pu[LUMA_8x16].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_8x16_neon); ++ p.pu[LUMA_8x32].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_8x32_neon); ++ p.pu[LUMA_12x16].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_12x16_neon); ++ p.pu[LUMA_16x4].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_16x4_neon); ++ p.pu[LUMA_16x8].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_16x8_neon); ++ p.pu[LUMA_16x12].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_16x12_neon); ++ p.pu[LUMA_16x16].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_16x16_neon); ++ p.pu[LUMA_16x32].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_16x32_neon); ++ p.pu[LUMA_16x64].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_16x64_neon); ++ p.pu[LUMA_24x32].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_24x32_neon); ++ p.pu[LUMA_32x8].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_32x8_neon); ++ p.pu[LUMA_32x16].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_32x16_neon); ++ p.pu[LUMA_32x24].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_32x24_neon); ++ p.pu[LUMA_32x32].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_32x32_neon); ++ p.pu[LUMA_32x64].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_32x64_neon); ++ p.pu[LUMA_48x64].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_48x64_neon); ++ p.pu[LUMA_64x16].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_64x16_neon); ++ p.pu[LUMA_64x32].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_64x32_neon); ++ p.pu[LUMA_64x48].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_64x48_neon); ++ p.pu[LUMA_64x64].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_64x64_neon); + + // planecopy + p.planecopy_cp = PFX(pixel_planecopy_cp_neon); diff --git a/x265-4504219210793536d921ee4e0b3058698c630bf4.diff b/x265-4504219210793536d921ee4e0b3058698c630bf4.diff new file mode 100644 index 0000000..30fe38e --- /dev/null +++ b/x265-4504219210793536d921ee4e0b3058698c630bf4.diff @@ -0,0 +1,19 @@ +# HG changeset patch +# User Jayashree +# Date 1527224165 -19800 +# Node ID 4504219210793536d921ee4e0b3058698c630bf4 +# Parent cc2c5e46f3c87d27e3602af30b06ba6a0fbe2704 +Fix build error on on ppc64le + +diff --git a/source/common/param.cpp b/source/common/param.cpp +--- a/source/common/param.cpp ++++ b/source/common/param.cpp +@@ -633,7 +633,7 @@ + if (bValueWasNull) + p->cpuid = atobool(value); + else +- p->cpuid = parseCpuName(value, bError); ++ p->cpuid = parseCpuName(value, bError, false); + #endif + } + OPT("fps") diff --git a/x265-detect_cpu_armhfp.patch b/x265-detect_cpu_armhfp.patch index 3dfc10d..c9a92b8 100644 --- a/x265-detect_cpu_armhfp.patch +++ b/x265-detect_cpu_armhfp.patch @@ -50,7 +50,7 @@ diff -up x265_2.2/source/CMakeLists.txt.orig x265_2.2/source/CMakeLists.txt #include -#elif ( !defined(__APPLE__) && defined (__GNUC__) && defined(__ARM_NEON__)) -#include - #elif defined(__GNUC__) + #elif defined(__GNUC__) && (!defined(__clang__) || __clang_major__ < 4) +#if ( !defined(__APPLE__) && defined(__ARM_NEON__)) +#include +#endif diff --git a/x265.spec b/x265.spec index 5cbc16c..be9618a 100644 --- a/x265.spec +++ b/x265.spec @@ -1,9 +1,9 @@ -%global _so_version 151 +%global _so_version 160 Summary: H.265/HEVC encoder Name: x265 -Version: 2.7 -Release: 5%{?dist} +Version: 2.8 +Release: 1%{?dist} URL: http://x265.org/ # source/Lib/TLibCommon - BSD # source/Lib/TLibEncoder - BSD @@ -18,7 +18,10 @@ Patch1: x265-high-bit-depth-soname.patch Patch2: x265-detect_cpu_armhfp.patch Patch3: x265-arm-cflags.patch Patch4: x265-pkgconfig_path_fix.patch +Patch5: x265-4504219210793536d921ee4e0b3058698c630bf4.diff +Patch6: x265-2.8-asm-primitives.patch +BuildRequires: gcc-c++ BuildRequires: cmake3 BuildRequires: nasm BuildRequires: ninja-build @@ -137,6 +140,11 @@ done %{_libdir}/pkgconfig/x265.pc %changelog +* Thu Oct 04 2018 Sérgio Basto - 2.8-1 +- Update to 2.8 more 2 patches to fix builds on non-x86 and arm + https://bitbucket.org/multicoreware/x265/issues/404/28-fails-to-build-on-ppc64le-gnu-linux + https://bitbucket.org/multicoreware/x265/issues/406/arm-assembly-fail-to-compile-on-18 + * Sun Aug 19 2018 Leigh Scott - 2.7-5 - Rebuilt for Fedora 29 Mass Rebuild binutils issue