commit
985fd38e6f
@ -0,0 +1 @@
|
||||
SOURCES/x265-3.1.2.tar.gz
|
@ -0,0 +1 @@
|
||||
3557778827b3839468c40d6563754cee6f33bc75 SOURCES/x265-3.1.2.tar.gz
|
@ -0,0 +1,35 @@
|
||||
From: Adam Sampson <unknown@bitbucket>
|
||||
Date: Sun, 14 Oct 2018 14:04:18 +0200
|
||||
Subject: detect512 is needed on all architectures
|
||||
|
||||
---
|
||||
source/common/cpu.cpp | 9 +++++----
|
||||
1 file changed, 5 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/source/common/cpu.cpp b/source/common/cpu.cpp
|
||||
index 0681ff5..fa687da 100644
|
||||
--- a/source/common/cpu.cpp
|
||||
+++ b/source/common/cpu.cpp
|
||||
@@ -110,6 +110,11 @@ const cpu_name_t cpu_names[] =
|
||||
{ "", 0 },
|
||||
};
|
||||
|
||||
+bool detect512()
|
||||
+{
|
||||
+ return(enable512);
|
||||
+}
|
||||
+
|
||||
#if X265_ARCH_X86
|
||||
|
||||
extern "C" {
|
||||
@@ -123,10 +128,6 @@ uint64_t PFX(cpu_xgetbv)(int xcr);
|
||||
#pragma warning(disable: 4309) // truncation of constant value
|
||||
#endif
|
||||
|
||||
-bool detect512()
|
||||
-{
|
||||
- return(enable512);
|
||||
-}
|
||||
uint32_t cpu_detect(bool benableavx512 )
|
||||
{
|
||||
|
@ -0,0 +1,347 @@
|
||||
--- ./source/common/arm/asm-primitives.cpp.orig 2018-05-21 02:33:10.000000000 -0600
|
||||
+++ ./source/common/arm/asm-primitives.cpp 2018-05-28 20:38:37.302378303 -0600
|
||||
@@ -48,77 +48,77 @@ void setupAssemblyPrimitives(EncoderPrim
|
||||
p.ssim_4x4x2_core = PFX(ssim_4x4x2_core_neon);
|
||||
|
||||
// addAvg
|
||||
- p.pu[LUMA_4x4].addAvg = PFX(addAvg_4x4_neon);
|
||||
- p.pu[LUMA_4x8].addAvg = PFX(addAvg_4x8_neon);
|
||||
- p.pu[LUMA_4x16].addAvg = PFX(addAvg_4x16_neon);
|
||||
- p.pu[LUMA_8x4].addAvg = PFX(addAvg_8x4_neon);
|
||||
- p.pu[LUMA_8x8].addAvg = PFX(addAvg_8x8_neon);
|
||||
- p.pu[LUMA_8x16].addAvg = PFX(addAvg_8x16_neon);
|
||||
- p.pu[LUMA_8x32].addAvg = PFX(addAvg_8x32_neon);
|
||||
- p.pu[LUMA_12x16].addAvg = PFX(addAvg_12x16_neon);
|
||||
- p.pu[LUMA_16x4].addAvg = PFX(addAvg_16x4_neon);
|
||||
- p.pu[LUMA_16x8].addAvg = PFX(addAvg_16x8_neon);
|
||||
- p.pu[LUMA_16x12].addAvg = PFX(addAvg_16x12_neon);
|
||||
- p.pu[LUMA_16x16].addAvg = PFX(addAvg_16x16_neon);
|
||||
- p.pu[LUMA_16x32].addAvg = PFX(addAvg_16x32_neon);
|
||||
- p.pu[LUMA_16x64].addAvg = PFX(addAvg_16x64_neon);
|
||||
- p.pu[LUMA_24x32].addAvg = PFX(addAvg_24x32_neon);
|
||||
- p.pu[LUMA_32x8].addAvg = PFX(addAvg_32x8_neon);
|
||||
- p.pu[LUMA_32x16].addAvg = PFX(addAvg_32x16_neon);
|
||||
- p.pu[LUMA_32x24].addAvg = PFX(addAvg_32x24_neon);
|
||||
- p.pu[LUMA_32x32].addAvg = PFX(addAvg_32x32_neon);
|
||||
- p.pu[LUMA_32x64].addAvg = PFX(addAvg_32x64_neon);
|
||||
- p.pu[LUMA_48x64].addAvg = PFX(addAvg_48x64_neon);
|
||||
- p.pu[LUMA_64x16].addAvg = PFX(addAvg_64x16_neon);
|
||||
- p.pu[LUMA_64x32].addAvg = PFX(addAvg_64x32_neon);
|
||||
- p.pu[LUMA_64x48].addAvg = PFX(addAvg_64x48_neon);
|
||||
- p.pu[LUMA_64x64].addAvg = PFX(addAvg_64x64_neon);
|
||||
+ p.pu[LUMA_4x4].addAvg[ALIGNED] = PFX(addAvg_4x4_neon);
|
||||
+ p.pu[LUMA_4x8].addAvg[ALIGNED] = PFX(addAvg_4x8_neon);
|
||||
+ p.pu[LUMA_4x16].addAvg[ALIGNED] = PFX(addAvg_4x16_neon);
|
||||
+ p.pu[LUMA_8x4].addAvg[ALIGNED] = PFX(addAvg_8x4_neon);
|
||||
+ p.pu[LUMA_8x8].addAvg[ALIGNED] = PFX(addAvg_8x8_neon);
|
||||
+ p.pu[LUMA_8x16].addAvg[ALIGNED] = PFX(addAvg_8x16_neon);
|
||||
+ p.pu[LUMA_8x32].addAvg[ALIGNED] = PFX(addAvg_8x32_neon);
|
||||
+ p.pu[LUMA_12x16].addAvg[ALIGNED] = PFX(addAvg_12x16_neon);
|
||||
+ p.pu[LUMA_16x4].addAvg[ALIGNED] = PFX(addAvg_16x4_neon);
|
||||
+ p.pu[LUMA_16x8].addAvg[ALIGNED] = PFX(addAvg_16x8_neon);
|
||||
+ p.pu[LUMA_16x12].addAvg[ALIGNED] = PFX(addAvg_16x12_neon);
|
||||
+ p.pu[LUMA_16x16].addAvg[ALIGNED] = PFX(addAvg_16x16_neon);
|
||||
+ p.pu[LUMA_16x32].addAvg[ALIGNED] = PFX(addAvg_16x32_neon);
|
||||
+ p.pu[LUMA_16x64].addAvg[ALIGNED] = PFX(addAvg_16x64_neon);
|
||||
+ p.pu[LUMA_24x32].addAvg[ALIGNED] = PFX(addAvg_24x32_neon);
|
||||
+ p.pu[LUMA_32x8].addAvg[ALIGNED] = PFX(addAvg_32x8_neon);
|
||||
+ p.pu[LUMA_32x16].addAvg[ALIGNED] = PFX(addAvg_32x16_neon);
|
||||
+ p.pu[LUMA_32x24].addAvg[ALIGNED] = PFX(addAvg_32x24_neon);
|
||||
+ p.pu[LUMA_32x32].addAvg[ALIGNED] = PFX(addAvg_32x32_neon);
|
||||
+ p.pu[LUMA_32x64].addAvg[ALIGNED] = PFX(addAvg_32x64_neon);
|
||||
+ p.pu[LUMA_48x64].addAvg[ALIGNED] = PFX(addAvg_48x64_neon);
|
||||
+ p.pu[LUMA_64x16].addAvg[ALIGNED] = PFX(addAvg_64x16_neon);
|
||||
+ p.pu[LUMA_64x32].addAvg[ALIGNED] = PFX(addAvg_64x32_neon);
|
||||
+ p.pu[LUMA_64x48].addAvg[ALIGNED] = PFX(addAvg_64x48_neon);
|
||||
+ p.pu[LUMA_64x64].addAvg[ALIGNED] = PFX(addAvg_64x64_neon);
|
||||
|
||||
// chroma addAvg
|
||||
- p.chroma[X265_CSP_I420].pu[CHROMA_420_4x2].addAvg = PFX(addAvg_4x2_neon);
|
||||
- p.chroma[X265_CSP_I420].pu[CHROMA_420_4x4].addAvg = PFX(addAvg_4x4_neon);
|
||||
- p.chroma[X265_CSP_I420].pu[CHROMA_420_4x8].addAvg = PFX(addAvg_4x8_neon);
|
||||
- p.chroma[X265_CSP_I420].pu[CHROMA_420_4x16].addAvg = PFX(addAvg_4x16_neon);
|
||||
- p.chroma[X265_CSP_I420].pu[CHROMA_420_6x8].addAvg = PFX(addAvg_6x8_neon);
|
||||
- p.chroma[X265_CSP_I420].pu[CHROMA_420_8x2].addAvg = PFX(addAvg_8x2_neon);
|
||||
- p.chroma[X265_CSP_I420].pu[CHROMA_420_8x4].addAvg = PFX(addAvg_8x4_neon);
|
||||
- p.chroma[X265_CSP_I420].pu[CHROMA_420_8x6].addAvg = PFX(addAvg_8x6_neon);
|
||||
- p.chroma[X265_CSP_I420].pu[CHROMA_420_8x8].addAvg = PFX(addAvg_8x8_neon);
|
||||
- p.chroma[X265_CSP_I420].pu[CHROMA_420_8x16].addAvg = PFX(addAvg_8x16_neon);
|
||||
- p.chroma[X265_CSP_I420].pu[CHROMA_420_8x32].addAvg = PFX(addAvg_8x32_neon);
|
||||
- p.chroma[X265_CSP_I420].pu[CHROMA_420_12x16].addAvg = PFX(addAvg_12x16_neon);
|
||||
- p.chroma[X265_CSP_I420].pu[CHROMA_420_16x4].addAvg = PFX(addAvg_16x4_neon);
|
||||
- p.chroma[X265_CSP_I420].pu[CHROMA_420_16x8].addAvg = PFX(addAvg_16x8_neon);
|
||||
- p.chroma[X265_CSP_I420].pu[CHROMA_420_16x12].addAvg = PFX(addAvg_16x12_neon);
|
||||
- p.chroma[X265_CSP_I420].pu[CHROMA_420_16x16].addAvg = PFX(addAvg_16x16_neon);
|
||||
- p.chroma[X265_CSP_I420].pu[CHROMA_420_16x32].addAvg = PFX(addAvg_16x32_neon);
|
||||
- p.chroma[X265_CSP_I420].pu[CHROMA_420_24x32].addAvg = PFX(addAvg_24x32_neon);
|
||||
- p.chroma[X265_CSP_I420].pu[CHROMA_420_32x8].addAvg = PFX(addAvg_32x8_neon);
|
||||
- p.chroma[X265_CSP_I420].pu[CHROMA_420_32x16].addAvg = PFX(addAvg_32x16_neon);
|
||||
- p.chroma[X265_CSP_I420].pu[CHROMA_420_32x24].addAvg = PFX(addAvg_32x24_neon);
|
||||
- p.chroma[X265_CSP_I420].pu[CHROMA_420_32x32].addAvg = PFX(addAvg_32x32_neon);
|
||||
-
|
||||
- p.chroma[X265_CSP_I422].pu[CHROMA_422_4x8].addAvg = PFX(addAvg_4x8_neon);
|
||||
- p.chroma[X265_CSP_I422].pu[CHROMA_422_4x16].addAvg = PFX(addAvg_4x16_neon);
|
||||
- p.chroma[X265_CSP_I422].pu[CHROMA_422_4x32].addAvg = PFX(addAvg_4x32_neon);
|
||||
- p.chroma[X265_CSP_I422].pu[CHROMA_422_6x16].addAvg = PFX(addAvg_6x16_neon);
|
||||
- p.chroma[X265_CSP_I422].pu[CHROMA_422_8x4].addAvg = PFX(addAvg_8x4_neon);
|
||||
- p.chroma[X265_CSP_I422].pu[CHROMA_422_8x8].addAvg = PFX(addAvg_8x8_neon);
|
||||
- p.chroma[X265_CSP_I422].pu[CHROMA_422_8x12].addAvg = PFX(addAvg_8x12_neon);
|
||||
- p.chroma[X265_CSP_I422].pu[CHROMA_422_8x16].addAvg = PFX(addAvg_8x16_neon);
|
||||
- p.chroma[X265_CSP_I422].pu[CHROMA_422_8x32].addAvg = PFX(addAvg_8x32_neon);
|
||||
- p.chroma[X265_CSP_I422].pu[CHROMA_422_8x64].addAvg = PFX(addAvg_8x64_neon);
|
||||
- p.chroma[X265_CSP_I422].pu[CHROMA_422_12x32].addAvg = PFX(addAvg_12x32_neon);
|
||||
- p.chroma[X265_CSP_I422].pu[CHROMA_422_16x8].addAvg = PFX(addAvg_16x8_neon);
|
||||
- p.chroma[X265_CSP_I422].pu[CHROMA_422_16x16].addAvg = PFX(addAvg_16x16_neon);
|
||||
- p.chroma[X265_CSP_I422].pu[CHROMA_422_16x24].addAvg = PFX(addAvg_16x24_neon);
|
||||
- p.chroma[X265_CSP_I422].pu[CHROMA_422_16x32].addAvg = PFX(addAvg_16x32_neon);
|
||||
- p.chroma[X265_CSP_I422].pu[CHROMA_422_16x64].addAvg = PFX(addAvg_16x64_neon);
|
||||
- p.chroma[X265_CSP_I422].pu[CHROMA_422_24x64].addAvg = PFX(addAvg_24x64_neon);
|
||||
- p.chroma[X265_CSP_I422].pu[CHROMA_422_32x16].addAvg = PFX(addAvg_32x16_neon);
|
||||
- p.chroma[X265_CSP_I422].pu[CHROMA_422_32x32].addAvg = PFX(addAvg_32x32_neon);
|
||||
- p.chroma[X265_CSP_I422].pu[CHROMA_422_32x48].addAvg = PFX(addAvg_32x48_neon);
|
||||
- p.chroma[X265_CSP_I422].pu[CHROMA_422_32x64].addAvg = PFX(addAvg_32x64_neon);
|
||||
+ p.chroma[X265_CSP_I420].pu[CHROMA_420_4x2].addAvg[ALIGNED] = PFX(addAvg_4x2_neon);
|
||||
+ p.chroma[X265_CSP_I420].pu[CHROMA_420_4x4].addAvg[ALIGNED] = PFX(addAvg_4x4_neon);
|
||||
+ p.chroma[X265_CSP_I420].pu[CHROMA_420_4x8].addAvg[ALIGNED] = PFX(addAvg_4x8_neon);
|
||||
+ p.chroma[X265_CSP_I420].pu[CHROMA_420_4x16].addAvg[ALIGNED] = PFX(addAvg_4x16_neon);
|
||||
+ p.chroma[X265_CSP_I420].pu[CHROMA_420_6x8].addAvg[ALIGNED] = PFX(addAvg_6x8_neon);
|
||||
+ p.chroma[X265_CSP_I420].pu[CHROMA_420_8x2].addAvg[ALIGNED] = PFX(addAvg_8x2_neon);
|
||||
+ p.chroma[X265_CSP_I420].pu[CHROMA_420_8x4].addAvg[ALIGNED] = PFX(addAvg_8x4_neon);
|
||||
+ p.chroma[X265_CSP_I420].pu[CHROMA_420_8x6].addAvg[ALIGNED] = PFX(addAvg_8x6_neon);
|
||||
+ p.chroma[X265_CSP_I420].pu[CHROMA_420_8x8].addAvg[ALIGNED] = PFX(addAvg_8x8_neon);
|
||||
+ p.chroma[X265_CSP_I420].pu[CHROMA_420_8x16].addAvg[ALIGNED] = PFX(addAvg_8x16_neon);
|
||||
+ p.chroma[X265_CSP_I420].pu[CHROMA_420_8x32].addAvg[ALIGNED] = PFX(addAvg_8x32_neon);
|
||||
+ p.chroma[X265_CSP_I420].pu[CHROMA_420_12x16].addAvg[ALIGNED] = PFX(addAvg_12x16_neon);
|
||||
+ p.chroma[X265_CSP_I420].pu[CHROMA_420_16x4].addAvg[ALIGNED] = PFX(addAvg_16x4_neon);
|
||||
+ p.chroma[X265_CSP_I420].pu[CHROMA_420_16x8].addAvg[ALIGNED] = PFX(addAvg_16x8_neon);
|
||||
+ p.chroma[X265_CSP_I420].pu[CHROMA_420_16x12].addAvg[ALIGNED] = PFX(addAvg_16x12_neon);
|
||||
+ p.chroma[X265_CSP_I420].pu[CHROMA_420_16x16].addAvg[ALIGNED] = PFX(addAvg_16x16_neon);
|
||||
+ p.chroma[X265_CSP_I420].pu[CHROMA_420_16x32].addAvg[ALIGNED] = PFX(addAvg_16x32_neon);
|
||||
+ p.chroma[X265_CSP_I420].pu[CHROMA_420_24x32].addAvg[ALIGNED] = PFX(addAvg_24x32_neon);
|
||||
+ p.chroma[X265_CSP_I420].pu[CHROMA_420_32x8].addAvg[ALIGNED] = PFX(addAvg_32x8_neon);
|
||||
+ p.chroma[X265_CSP_I420].pu[CHROMA_420_32x16].addAvg[ALIGNED] = PFX(addAvg_32x16_neon);
|
||||
+ p.chroma[X265_CSP_I420].pu[CHROMA_420_32x24].addAvg[ALIGNED] = PFX(addAvg_32x24_neon);
|
||||
+ p.chroma[X265_CSP_I420].pu[CHROMA_420_32x32].addAvg[ALIGNED] = PFX(addAvg_32x32_neon);
|
||||
+
|
||||
+ p.chroma[X265_CSP_I422].pu[CHROMA_422_4x8].addAvg[ALIGNED] = PFX(addAvg_4x8_neon);
|
||||
+ p.chroma[X265_CSP_I422].pu[CHROMA_422_4x16].addAvg[ALIGNED] = PFX(addAvg_4x16_neon);
|
||||
+ p.chroma[X265_CSP_I422].pu[CHROMA_422_4x32].addAvg[ALIGNED] = PFX(addAvg_4x32_neon);
|
||||
+ p.chroma[X265_CSP_I422].pu[CHROMA_422_6x16].addAvg[ALIGNED] = PFX(addAvg_6x16_neon);
|
||||
+ p.chroma[X265_CSP_I422].pu[CHROMA_422_8x4].addAvg[ALIGNED] = PFX(addAvg_8x4_neon);
|
||||
+ p.chroma[X265_CSP_I422].pu[CHROMA_422_8x8].addAvg[ALIGNED] = PFX(addAvg_8x8_neon);
|
||||
+ p.chroma[X265_CSP_I422].pu[CHROMA_422_8x12].addAvg[ALIGNED] = PFX(addAvg_8x12_neon);
|
||||
+ p.chroma[X265_CSP_I422].pu[CHROMA_422_8x16].addAvg[ALIGNED] = PFX(addAvg_8x16_neon);
|
||||
+ p.chroma[X265_CSP_I422].pu[CHROMA_422_8x32].addAvg[ALIGNED] = PFX(addAvg_8x32_neon);
|
||||
+ p.chroma[X265_CSP_I422].pu[CHROMA_422_8x64].addAvg[ALIGNED] = PFX(addAvg_8x64_neon);
|
||||
+ p.chroma[X265_CSP_I422].pu[CHROMA_422_12x32].addAvg[ALIGNED] = PFX(addAvg_12x32_neon);
|
||||
+ p.chroma[X265_CSP_I422].pu[CHROMA_422_16x8].addAvg[ALIGNED] = PFX(addAvg_16x8_neon);
|
||||
+ p.chroma[X265_CSP_I422].pu[CHROMA_422_16x16].addAvg[ALIGNED] = PFX(addAvg_16x16_neon);
|
||||
+ p.chroma[X265_CSP_I422].pu[CHROMA_422_16x24].addAvg[ALIGNED] = PFX(addAvg_16x24_neon);
|
||||
+ p.chroma[X265_CSP_I422].pu[CHROMA_422_16x32].addAvg[ALIGNED] = PFX(addAvg_16x32_neon);
|
||||
+ p.chroma[X265_CSP_I422].pu[CHROMA_422_16x64].addAvg[ALIGNED] = PFX(addAvg_16x64_neon);
|
||||
+ p.chroma[X265_CSP_I422].pu[CHROMA_422_24x64].addAvg[ALIGNED] = PFX(addAvg_24x64_neon);
|
||||
+ p.chroma[X265_CSP_I422].pu[CHROMA_422_32x16].addAvg[ALIGNED] = PFX(addAvg_32x16_neon);
|
||||
+ p.chroma[X265_CSP_I422].pu[CHROMA_422_32x32].addAvg[ALIGNED] = PFX(addAvg_32x32_neon);
|
||||
+ p.chroma[X265_CSP_I422].pu[CHROMA_422_32x48].addAvg[ALIGNED] = PFX(addAvg_32x48_neon);
|
||||
+ p.chroma[X265_CSP_I422].pu[CHROMA_422_32x64].addAvg[ALIGNED] = PFX(addAvg_32x64_neon);
|
||||
|
||||
// quant
|
||||
p.quant = PFX(quant_neon);
|
||||
@@ -402,7 +402,7 @@ void setupAssemblyPrimitives(EncoderPrim
|
||||
p.scale2D_64to32 = PFX(scale2D_64to32_neon);
|
||||
|
||||
// scale1D_128to64
|
||||
- p.scale1D_128to64 = PFX(scale1D_128to64_neon);
|
||||
+ p.scale1D_128to64[ALIGNED] = PFX(scale1D_128to64_neon);
|
||||
|
||||
// copy_count
|
||||
p.cu[BLOCK_4x4].copy_cnt = PFX(copy_cnt_4_neon);
|
||||
@@ -411,37 +411,37 @@ void setupAssemblyPrimitives(EncoderPrim
|
||||
p.cu[BLOCK_32x32].copy_cnt = PFX(copy_cnt_32_neon);
|
||||
|
||||
// filterPixelToShort
|
||||
- p.pu[LUMA_4x4].convert_p2s = PFX(filterPixelToShort_4x4_neon);
|
||||
- p.pu[LUMA_4x8].convert_p2s = PFX(filterPixelToShort_4x8_neon);
|
||||
- p.pu[LUMA_4x16].convert_p2s = PFX(filterPixelToShort_4x16_neon);
|
||||
- p.pu[LUMA_8x4].convert_p2s = PFX(filterPixelToShort_8x4_neon);
|
||||
- p.pu[LUMA_8x8].convert_p2s = PFX(filterPixelToShort_8x8_neon);
|
||||
- p.pu[LUMA_8x16].convert_p2s = PFX(filterPixelToShort_8x16_neon);
|
||||
- p.pu[LUMA_8x32].convert_p2s = PFX(filterPixelToShort_8x32_neon);
|
||||
- p.pu[LUMA_12x16].convert_p2s = PFX(filterPixelToShort_12x16_neon);
|
||||
- p.pu[LUMA_16x4].convert_p2s = PFX(filterPixelToShort_16x4_neon);
|
||||
- p.pu[LUMA_16x8].convert_p2s = PFX(filterPixelToShort_16x8_neon);
|
||||
- p.pu[LUMA_16x12].convert_p2s = PFX(filterPixelToShort_16x12_neon);
|
||||
- p.pu[LUMA_16x16].convert_p2s = PFX(filterPixelToShort_16x16_neon);
|
||||
- p.pu[LUMA_16x32].convert_p2s = PFX(filterPixelToShort_16x32_neon);
|
||||
- p.pu[LUMA_16x64].convert_p2s = PFX(filterPixelToShort_16x64_neon);
|
||||
- p.pu[LUMA_24x32].convert_p2s = PFX(filterPixelToShort_24x32_neon);
|
||||
- p.pu[LUMA_32x8].convert_p2s = PFX(filterPixelToShort_32x8_neon);
|
||||
- p.pu[LUMA_32x16].convert_p2s = PFX(filterPixelToShort_32x16_neon);
|
||||
- p.pu[LUMA_32x24].convert_p2s = PFX(filterPixelToShort_32x24_neon);
|
||||
- p.pu[LUMA_32x32].convert_p2s = PFX(filterPixelToShort_32x32_neon);
|
||||
- p.pu[LUMA_32x64].convert_p2s = PFX(filterPixelToShort_32x64_neon);
|
||||
- p.pu[LUMA_48x64].convert_p2s = PFX(filterPixelToShort_48x64_neon);
|
||||
- p.pu[LUMA_64x16].convert_p2s = PFX(filterPixelToShort_64x16_neon);
|
||||
- p.pu[LUMA_64x32].convert_p2s = PFX(filterPixelToShort_64x32_neon);
|
||||
- p.pu[LUMA_64x48].convert_p2s = PFX(filterPixelToShort_64x48_neon);
|
||||
- p.pu[LUMA_64x64].convert_p2s = PFX(filterPixelToShort_64x64_neon);
|
||||
+ p.pu[LUMA_4x4].convert_p2s[ALIGNED] = PFX(filterPixelToShort_4x4_neon);
|
||||
+ p.pu[LUMA_4x8].convert_p2s[ALIGNED] = PFX(filterPixelToShort_4x8_neon);
|
||||
+ p.pu[LUMA_4x16].convert_p2s[ALIGNED] = PFX(filterPixelToShort_4x16_neon);
|
||||
+ p.pu[LUMA_8x4].convert_p2s[ALIGNED] = PFX(filterPixelToShort_8x4_neon);
|
||||
+ p.pu[LUMA_8x8].convert_p2s[ALIGNED] = PFX(filterPixelToShort_8x8_neon);
|
||||
+ p.pu[LUMA_8x16].convert_p2s[ALIGNED] = PFX(filterPixelToShort_8x16_neon);
|
||||
+ p.pu[LUMA_8x32].convert_p2s[ALIGNED] = PFX(filterPixelToShort_8x32_neon);
|
||||
+ p.pu[LUMA_12x16].convert_p2s[ALIGNED] = PFX(filterPixelToShort_12x16_neon);
|
||||
+ p.pu[LUMA_16x4].convert_p2s[ALIGNED] = PFX(filterPixelToShort_16x4_neon);
|
||||
+ p.pu[LUMA_16x8].convert_p2s[ALIGNED] = PFX(filterPixelToShort_16x8_neon);
|
||||
+ p.pu[LUMA_16x12].convert_p2s[ALIGNED] = PFX(filterPixelToShort_16x12_neon);
|
||||
+ p.pu[LUMA_16x16].convert_p2s[ALIGNED] = PFX(filterPixelToShort_16x16_neon);
|
||||
+ p.pu[LUMA_16x32].convert_p2s[ALIGNED] = PFX(filterPixelToShort_16x32_neon);
|
||||
+ p.pu[LUMA_16x64].convert_p2s[ALIGNED] = PFX(filterPixelToShort_16x64_neon);
|
||||
+ p.pu[LUMA_24x32].convert_p2s[ALIGNED] = PFX(filterPixelToShort_24x32_neon);
|
||||
+ p.pu[LUMA_32x8].convert_p2s[ALIGNED] = PFX(filterPixelToShort_32x8_neon);
|
||||
+ p.pu[LUMA_32x16].convert_p2s[ALIGNED] = PFX(filterPixelToShort_32x16_neon);
|
||||
+ p.pu[LUMA_32x24].convert_p2s[ALIGNED] = PFX(filterPixelToShort_32x24_neon);
|
||||
+ p.pu[LUMA_32x32].convert_p2s[ALIGNED] = PFX(filterPixelToShort_32x32_neon);
|
||||
+ p.pu[LUMA_32x64].convert_p2s[ALIGNED] = PFX(filterPixelToShort_32x64_neon);
|
||||
+ p.pu[LUMA_48x64].convert_p2s[ALIGNED] = PFX(filterPixelToShort_48x64_neon);
|
||||
+ p.pu[LUMA_64x16].convert_p2s[ALIGNED] = PFX(filterPixelToShort_64x16_neon);
|
||||
+ p.pu[LUMA_64x32].convert_p2s[ALIGNED] = PFX(filterPixelToShort_64x32_neon);
|
||||
+ p.pu[LUMA_64x48].convert_p2s[ALIGNED] = PFX(filterPixelToShort_64x48_neon);
|
||||
+ p.pu[LUMA_64x64].convert_p2s[ALIGNED] = PFX(filterPixelToShort_64x64_neon);
|
||||
|
||||
// Block_fill
|
||||
- p.cu[BLOCK_4x4].blockfill_s = PFX(blockfill_s_4x4_neon);
|
||||
- p.cu[BLOCK_8x8].blockfill_s = PFX(blockfill_s_8x8_neon);
|
||||
- p.cu[BLOCK_16x16].blockfill_s = PFX(blockfill_s_16x16_neon);
|
||||
- p.cu[BLOCK_32x32].blockfill_s = PFX(blockfill_s_32x32_neon);
|
||||
+ p.cu[BLOCK_4x4].blockfill_s[ALIGNED] = PFX(blockfill_s_4x4_neon);
|
||||
+ p.cu[BLOCK_8x8].blockfill_s[ALIGNED] = PFX(blockfill_s_8x8_neon);
|
||||
+ p.cu[BLOCK_16x16].blockfill_s[ALIGNED] = PFX(blockfill_s_16x16_neon);
|
||||
+ p.cu[BLOCK_32x32].blockfill_s[ALIGNED] = PFX(blockfill_s_32x32_neon);
|
||||
|
||||
// Blockcopy_ss
|
||||
p.cu[BLOCK_4x4].copy_ss = PFX(blockcopy_ss_4x4_neon);
|
||||
@@ -495,21 +495,21 @@ void setupAssemblyPrimitives(EncoderPrim
|
||||
p.chroma[X265_CSP_I422].cu[BLOCK_422_32x64].copy_sp = PFX(blockcopy_sp_32x64_neon);
|
||||
|
||||
// pixel_add_ps
|
||||
- p.cu[BLOCK_4x4].add_ps = PFX(pixel_add_ps_4x4_neon);
|
||||
- p.cu[BLOCK_8x8].add_ps = PFX(pixel_add_ps_8x8_neon);
|
||||
- p.cu[BLOCK_16x16].add_ps = PFX(pixel_add_ps_16x16_neon);
|
||||
- p.cu[BLOCK_32x32].add_ps = PFX(pixel_add_ps_32x32_neon);
|
||||
- p.cu[BLOCK_64x64].add_ps = PFX(pixel_add_ps_64x64_neon);
|
||||
+ p.cu[BLOCK_4x4].add_ps[ALIGNED] = PFX(pixel_add_ps_4x4_neon);
|
||||
+ p.cu[BLOCK_8x8].add_ps[ALIGNED] = PFX(pixel_add_ps_8x8_neon);
|
||||
+ p.cu[BLOCK_16x16].add_ps[ALIGNED] = PFX(pixel_add_ps_16x16_neon);
|
||||
+ p.cu[BLOCK_32x32].add_ps[ALIGNED] = PFX(pixel_add_ps_32x32_neon);
|
||||
+ p.cu[BLOCK_64x64].add_ps[ALIGNED] = PFX(pixel_add_ps_64x64_neon);
|
||||
|
||||
// chroma add_ps
|
||||
- p.chroma[X265_CSP_I420].cu[BLOCK_420_4x4].add_ps = PFX(pixel_add_ps_4x4_neon);
|
||||
- p.chroma[X265_CSP_I420].cu[BLOCK_420_8x8].add_ps = PFX(pixel_add_ps_8x8_neon);
|
||||
- p.chroma[X265_CSP_I420].cu[BLOCK_420_16x16].add_ps = PFX(pixel_add_ps_16x16_neon);
|
||||
- p.chroma[X265_CSP_I420].cu[BLOCK_420_32x32].add_ps = PFX(pixel_add_ps_32x32_neon);
|
||||
- p.chroma[X265_CSP_I422].cu[BLOCK_422_4x8].add_ps = PFX(pixel_add_ps_4x8_neon);
|
||||
- p.chroma[X265_CSP_I422].cu[BLOCK_422_8x16].add_ps = PFX(pixel_add_ps_8x16_neon);
|
||||
- p.chroma[X265_CSP_I422].cu[BLOCK_422_16x32].add_ps = PFX(pixel_add_ps_16x32_neon);
|
||||
- p.chroma[X265_CSP_I422].cu[BLOCK_422_32x64].add_ps = PFX(pixel_add_ps_32x64_neon);
|
||||
+ p.chroma[X265_CSP_I420].cu[BLOCK_420_4x4].add_ps[ALIGNED] = PFX(pixel_add_ps_4x4_neon);
|
||||
+ p.chroma[X265_CSP_I420].cu[BLOCK_420_8x8].add_ps[ALIGNED] = PFX(pixel_add_ps_8x8_neon);
|
||||
+ p.chroma[X265_CSP_I420].cu[BLOCK_420_16x16].add_ps[ALIGNED] = PFX(pixel_add_ps_16x16_neon);
|
||||
+ p.chroma[X265_CSP_I420].cu[BLOCK_420_32x32].add_ps[ALIGNED] = PFX(pixel_add_ps_32x32_neon);
|
||||
+ p.chroma[X265_CSP_I422].cu[BLOCK_422_4x8].add_ps[ALIGNED] = PFX(pixel_add_ps_4x8_neon);
|
||||
+ p.chroma[X265_CSP_I422].cu[BLOCK_422_8x16].add_ps[ALIGNED] = PFX(pixel_add_ps_8x16_neon);
|
||||
+ p.chroma[X265_CSP_I422].cu[BLOCK_422_16x32].add_ps[ALIGNED] = PFX(pixel_add_ps_16x32_neon);
|
||||
+ p.chroma[X265_CSP_I422].cu[BLOCK_422_32x64].add_ps[ALIGNED] = PFX(pixel_add_ps_32x64_neon);
|
||||
|
||||
// cpy2Dto1D_shr
|
||||
p.cu[BLOCK_4x4].cpy2Dto1D_shr = PFX(cpy2Dto1D_shr_4x4_neon);
|
||||
@@ -518,10 +518,10 @@ void setupAssemblyPrimitives(EncoderPrim
|
||||
p.cu[BLOCK_32x32].cpy2Dto1D_shr = PFX(cpy2Dto1D_shr_32x32_neon);
|
||||
|
||||
// ssd_s
|
||||
- p.cu[BLOCK_4x4].ssd_s = PFX(pixel_ssd_s_4x4_neon);
|
||||
- p.cu[BLOCK_8x8].ssd_s = PFX(pixel_ssd_s_8x8_neon);
|
||||
- p.cu[BLOCK_16x16].ssd_s = PFX(pixel_ssd_s_16x16_neon);
|
||||
- p.cu[BLOCK_32x32].ssd_s = PFX(pixel_ssd_s_32x32_neon);
|
||||
+ p.cu[BLOCK_4x4].ssd_s[ALIGNED] = PFX(pixel_ssd_s_4x4_neon);
|
||||
+ p.cu[BLOCK_8x8].ssd_s[ALIGNED] = PFX(pixel_ssd_s_8x8_neon);
|
||||
+ p.cu[BLOCK_16x16].ssd_s[ALIGNED] = PFX(pixel_ssd_s_16x16_neon);
|
||||
+ p.cu[BLOCK_32x32].ssd_s[ALIGNED] = PFX(pixel_ssd_s_32x32_neon);
|
||||
|
||||
// sse_ss
|
||||
p.cu[BLOCK_4x4].sse_ss = PFX(pixel_sse_ss_4x4_neon);
|
||||
@@ -548,10 +548,10 @@ void setupAssemblyPrimitives(EncoderPrim
|
||||
p.chroma[X265_CSP_I422].cu[BLOCK_422_32x64].sub_ps = PFX(pixel_sub_ps_32x64_neon);
|
||||
|
||||
// calc_Residual
|
||||
- p.cu[BLOCK_4x4].calcresidual = PFX(getResidual4_neon);
|
||||
- p.cu[BLOCK_8x8].calcresidual = PFX(getResidual8_neon);
|
||||
- p.cu[BLOCK_16x16].calcresidual = PFX(getResidual16_neon);
|
||||
- p.cu[BLOCK_32x32].calcresidual = PFX(getResidual32_neon);
|
||||
+ p.cu[BLOCK_4x4].calcresidual[ALIGNED] = PFX(getResidual4_neon);
|
||||
+ p.cu[BLOCK_8x8].calcresidual[ALIGNED] = PFX(getResidual8_neon);
|
||||
+ p.cu[BLOCK_16x16].calcresidual[ALIGNED] = PFX(getResidual16_neon);
|
||||
+ p.cu[BLOCK_32x32].calcresidual[ALIGNED] = PFX(getResidual32_neon);
|
||||
|
||||
// sse_pp
|
||||
p.cu[BLOCK_4x4].sse_pp = PFX(pixel_sse_pp_4x4_neon);
|
||||
@@ -722,31 +722,31 @@ void setupAssemblyPrimitives(EncoderPrim
|
||||
p.pu[LUMA_64x64].sad_x4 = PFX(sad_x4_64x64_neon);
|
||||
|
||||
// pixel_avg_pp
|
||||
- p.pu[LUMA_4x4].pixelavg_pp = PFX(pixel_avg_pp_4x4_neon);
|
||||
- p.pu[LUMA_4x8].pixelavg_pp = PFX(pixel_avg_pp_4x8_neon);
|
||||
- p.pu[LUMA_4x16].pixelavg_pp = PFX(pixel_avg_pp_4x16_neon);
|
||||
- p.pu[LUMA_8x4].pixelavg_pp = PFX(pixel_avg_pp_8x4_neon);
|
||||
- p.pu[LUMA_8x8].pixelavg_pp = PFX(pixel_avg_pp_8x8_neon);
|
||||
- p.pu[LUMA_8x16].pixelavg_pp = PFX(pixel_avg_pp_8x16_neon);
|
||||
- p.pu[LUMA_8x32].pixelavg_pp = PFX(pixel_avg_pp_8x32_neon);
|
||||
- p.pu[LUMA_12x16].pixelavg_pp = PFX(pixel_avg_pp_12x16_neon);
|
||||
- p.pu[LUMA_16x4].pixelavg_pp = PFX(pixel_avg_pp_16x4_neon);
|
||||
- p.pu[LUMA_16x8].pixelavg_pp = PFX(pixel_avg_pp_16x8_neon);
|
||||
- p.pu[LUMA_16x12].pixelavg_pp = PFX(pixel_avg_pp_16x12_neon);
|
||||
- p.pu[LUMA_16x16].pixelavg_pp = PFX(pixel_avg_pp_16x16_neon);
|
||||
- p.pu[LUMA_16x32].pixelavg_pp = PFX(pixel_avg_pp_16x32_neon);
|
||||
- p.pu[LUMA_16x64].pixelavg_pp = PFX(pixel_avg_pp_16x64_neon);
|
||||
- p.pu[LUMA_24x32].pixelavg_pp = PFX(pixel_avg_pp_24x32_neon);
|
||||
- p.pu[LUMA_32x8].pixelavg_pp = PFX(pixel_avg_pp_32x8_neon);
|
||||
- p.pu[LUMA_32x16].pixelavg_pp = PFX(pixel_avg_pp_32x16_neon);
|
||||
- p.pu[LUMA_32x24].pixelavg_pp = PFX(pixel_avg_pp_32x24_neon);
|
||||
- p.pu[LUMA_32x32].pixelavg_pp = PFX(pixel_avg_pp_32x32_neon);
|
||||
- p.pu[LUMA_32x64].pixelavg_pp = PFX(pixel_avg_pp_32x64_neon);
|
||||
- p.pu[LUMA_48x64].pixelavg_pp = PFX(pixel_avg_pp_48x64_neon);
|
||||
- p.pu[LUMA_64x16].pixelavg_pp = PFX(pixel_avg_pp_64x16_neon);
|
||||
- p.pu[LUMA_64x32].pixelavg_pp = PFX(pixel_avg_pp_64x32_neon);
|
||||
- p.pu[LUMA_64x48].pixelavg_pp = PFX(pixel_avg_pp_64x48_neon);
|
||||
- p.pu[LUMA_64x64].pixelavg_pp = PFX(pixel_avg_pp_64x64_neon);
|
||||
+ p.pu[LUMA_4x4].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_4x4_neon);
|
||||
+ p.pu[LUMA_4x8].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_4x8_neon);
|
||||
+ p.pu[LUMA_4x16].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_4x16_neon);
|
||||
+ p.pu[LUMA_8x4].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_8x4_neon);
|
||||
+ p.pu[LUMA_8x8].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_8x8_neon);
|
||||
+ p.pu[LUMA_8x16].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_8x16_neon);
|
||||
+ p.pu[LUMA_8x32].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_8x32_neon);
|
||||
+ p.pu[LUMA_12x16].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_12x16_neon);
|
||||
+ p.pu[LUMA_16x4].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_16x4_neon);
|
||||
+ p.pu[LUMA_16x8].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_16x8_neon);
|
||||
+ p.pu[LUMA_16x12].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_16x12_neon);
|
||||
+ p.pu[LUMA_16x16].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_16x16_neon);
|
||||
+ p.pu[LUMA_16x32].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_16x32_neon);
|
||||
+ p.pu[LUMA_16x64].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_16x64_neon);
|
||||
+ p.pu[LUMA_24x32].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_24x32_neon);
|
||||
+ p.pu[LUMA_32x8].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_32x8_neon);
|
||||
+ p.pu[LUMA_32x16].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_32x16_neon);
|
||||
+ p.pu[LUMA_32x24].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_32x24_neon);
|
||||
+ p.pu[LUMA_32x32].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_32x32_neon);
|
||||
+ p.pu[LUMA_32x64].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_32x64_neon);
|
||||
+ p.pu[LUMA_48x64].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_48x64_neon);
|
||||
+ p.pu[LUMA_64x16].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_64x16_neon);
|
||||
+ p.pu[LUMA_64x32].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_64x32_neon);
|
||||
+ p.pu[LUMA_64x48].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_64x48_neon);
|
||||
+ p.pu[LUMA_64x64].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_64x64_neon);
|
||||
|
||||
// planecopy
|
||||
p.planecopy_cp = PFX(pixel_planecopy_cp_neon);
|
@ -0,0 +1,60 @@
|
||||
diff -up x265_2.7/source/CMakeLists.txt.cflags x265_2.7/source/CMakeLists.txt
|
||||
--- x265_2.7/source/CMakeLists.txt.cflags 2018-02-27 20:19:33.328932385 +0100
|
||||
+++ x265_2.7/source/CMakeLists.txt 2018-02-27 20:29:14.808956583 +0100
|
||||
@@ -233,18 +233,6 @@ if(GCC)
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
- if(ARM AND CROSS_COMPILE_ARM)
|
||||
- set(ARM_ARGS -march=armv6 -mfloat-abi=soft -mfpu=vfp -marm -fPIC)
|
||||
- elseif(ARM)
|
||||
- find_package(Neon)
|
||||
- if(CPU_HAS_NEON)
|
||||
- set(ARM_ARGS -mcpu=native -mfloat-abi=hard -mfpu=neon -marm -fPIC)
|
||||
- add_definitions(-DHAVE_NEON)
|
||||
- else()
|
||||
- set(ARM_ARGS -mcpu=native -mfloat-abi=hard -mfpu=vfp -marm)
|
||||
- endif()
|
||||
- endif()
|
||||
- add_definitions(${ARM_ARGS})
|
||||
if(FPROFILE_GENERATE)
|
||||
if(INTEL_CXX)
|
||||
add_definitions(-prof-gen -prof-dir="${CMAKE_CURRENT_BINARY_DIR}")
|
||||
@@ -517,7 +505,7 @@ if((MSVC_IDE OR XCODE OR GCC) AND ENABLE
|
||||
add_custom_command(
|
||||
OUTPUT ${ASM}.${SUFFIX}
|
||||
COMMAND ${CMAKE_CXX_COMPILER}
|
||||
- ARGS ${ARM_ARGS} -c ${ASM_SRC} -o ${ASM}.${SUFFIX}
|
||||
+ ARGS ${CFLAGS} -c ${ASM_SRC} -o ${ASM}.${SUFFIX}
|
||||
DEPENDS ${ASM_SRC})
|
||||
endforeach()
|
||||
elseif(X86)
|
||||
diff -up x265_2.7/source/dynamicHDR10/CMakeLists.txt.cflags x265_2.7/source/dynamicHDR10/CMakeLists.txt
|
||||
--- x265_2.7/source/dynamicHDR10/CMakeLists.txt.cflags 2018-02-21 09:55:56.000000000 +0100
|
||||
+++ x265_2.7/source/dynamicHDR10/CMakeLists.txt 2018-02-27 20:29:45.377062994 +0100
|
||||
@@ -42,18 +42,6 @@ if(GCC)
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
- if(ARM AND CROSS_COMPILE_ARM)
|
||||
- set(ARM_ARGS -march=armv6 -mfloat-abi=soft -mfpu=vfp -marm -fPIC)
|
||||
- elseif(ARM)
|
||||
- find_package(Neon)
|
||||
- if(CPU_HAS_NEON)
|
||||
- set(ARM_ARGS -mcpu=native -mfloat-abi=hard -mfpu=neon -marm -fPIC)
|
||||
- add_definitions(-DHAVE_NEON)
|
||||
- else()
|
||||
- set(ARM_ARGS -mcpu=native -mfloat-abi=hard -mfpu=vfp -marm)
|
||||
- endif()
|
||||
- endif()
|
||||
- add_definitions(${ARM_ARGS})
|
||||
if(FPROFILE_GENERATE)
|
||||
if(INTEL_CXX)
|
||||
add_definitions(-prof-gen -prof-dir="${CMAKE_CURRENT_BINARY_DIR}")
|
||||
@@ -150,4 +138,4 @@ set(BIN_INSTALL_DIR bin CACHE STRING "In
|
||||
option(ENABLE_SHARED "Build shared library" OFF)
|
||||
|
||||
install(FILES hdr10plus.h DESTINATION include)
|
||||
-endif()
|
||||
\ Pas de fin de ligne à la fin du fichier
|
||||
+endif()
|
@ -0,0 +1,59 @@
|
||||
diff -up x265_2.2/source/CMakeLists.txt.orig x265_2.2/source/CMakeLists.txt
|
||||
--- x265_2.2/source/CMakeLists.txt.orig 2016-12-23 06:57:39.000000000 +0100
|
||||
+++ x265_2.2/source/CMakeLists.txt 2017-01-03 11:18:34.773738470 +0100
|
||||
@@ -41,9 +41,11 @@ SET(CMAKE_MODULE_PATH "${PROJECT_SOURCE_
|
||||
# System architecture detection
|
||||
string(TOLOWER "${CMAKE_SYSTEM_PROCESSOR}" SYSPROC)
|
||||
set(X86_ALIASES x86 i386 i686 x86_64 amd64)
|
||||
-set(ARM_ALIASES armv6l armv7l)
|
||||
+set(ARMv6_ALIASES armv6l)
|
||||
+set(ARMv7_ALIASES armv7l)
|
||||
list(FIND X86_ALIASES "${SYSPROC}" X86MATCH)
|
||||
-list(FIND ARM_ALIASES "${SYSPROC}" ARMMATCH)
|
||||
+list(FIND ARMv6_ALIASES "${SYSPROC}" ARMv6MATCH)
|
||||
+list(FIND ARMv7_ALIASES "${SYSPROC}" ARMv7MATCH)
|
||||
set(POWER_ALIASES ppc64 ppc64le)
|
||||
list(FIND POWER_ALIASES "${SYSPROC}" POWERMATCH)
|
||||
if("${SYSPROC}" STREQUAL "" OR X86MATCH GREATER "-1")
|
||||
@@ -65,15 +67,24 @@ elseif(POWERMATCH GREATER "-1")
|
||||
add_definitions(-DPPC64=1)
|
||||
message(STATUS "Detected POWER PPC64 target processor")
|
||||
endif()
|
||||
-elseif(ARMMATCH GREATER "-1")
|
||||
+elseif(ARMv6MATCH GREATER "-1")
|
||||
if(CROSS_COMPILE_ARM)
|
||||
message(STATUS "Cross compiling for ARM arch")
|
||||
else()
|
||||
set(CROSS_COMPILE_ARM 0)
|
||||
endif()
|
||||
- message(STATUS "Detected ARM target processor")
|
||||
+ message(STATUS "Detected ARMv6 target processor")
|
||||
set(ARM 1)
|
||||
add_definitions(-DX265_ARCH_ARM=1 -DHAVE_ARMV6=1)
|
||||
+elseif(ARMv7MATCH GREATER "-1")
|
||||
+ if(CROSS_COMPILE_ARM)
|
||||
+ message(STATUS "Cross compiling for ARM arch")
|
||||
+ else()
|
||||
+ set(CROSS_COMPILE_ARM 0)
|
||||
+ endif()
|
||||
+ message(STATUS "Detected ARMv7 target processor")
|
||||
+ set(ARM 1)
|
||||
+ add_definitions(-DX265_ARCH_ARM=1 -DHAVE_ARMV7=1)
|
||||
else()
|
||||
message(STATUS "CMAKE_SYSTEM_PROCESSOR value `${CMAKE_SYSTEM_PROCESSOR}` is unknown")
|
||||
message(STATUS "Please add this value near ${CMAKE_CURRENT_LIST_FILE}:${CMAKE_CURRENT_LIST_LINE}")
|
||||
--- x265_v2.6/source/test/testharness.h.orig 2017-12-30 22:27:49.827620181 +0000
|
||||
+++ x265_v2.6/source/test/testharness.h 2017-12-30 22:30:53.239500941 +0000
|
||||
@@ -70,9 +70,10 @@ protected:
|
||||
#include <intrin.h>
|
||||
#elif (!defined(__APPLE__) && (defined (__GNUC__) && (defined(__x86_64__) || defined(__i386__))))
|
||||
#include <x86intrin.h>
|
||||
-#elif ( !defined(__APPLE__) && defined (__GNUC__) && defined(__ARM_NEON__))
|
||||
-#include <arm_neon.h>
|
||||
#elif defined(__GNUC__) && (!defined(__clang__) || __clang_major__ < 4)
|
||||
+#if ( !defined(__APPLE__) && defined(__ARM_NEON__))
|
||||
+#include <arm_neon.h>
|
||||
+#endif
|
||||
/* fallback for older GCC/MinGW */
|
||||
static inline uint32_t __rdtsc(void)
|
||||
{
|
@ -0,0 +1,31 @@
|
||||
--- a/source/CMakeLists.txt
|
||||
+++ b/source/CMakeLists.txt
|
||||
@@ -611,7 +611,15 @@
|
||||
if(MSVC)
|
||||
set_target_properties(x265-shared PROPERTIES OUTPUT_NAME libx265)
|
||||
else()
|
||||
- set_target_properties(x265-shared PROPERTIES OUTPUT_NAME x265)
|
||||
+ if(HIGH_BIT_DEPTH)
|
||||
+ if(MAIN12)
|
||||
+ set_target_properties(x265-shared PROPERTIES OUTPUT_NAME x265_main12)
|
||||
+ else()
|
||||
+ set_target_properties(x265-shared PROPERTIES OUTPUT_NAME x265_main10)
|
||||
+ endif()
|
||||
+ else()
|
||||
+ set_target_properties(x265-shared PROPERTIES OUTPUT_NAME x265)
|
||||
+ endif(HIGH_BIT_DEPTH)
|
||||
endif()
|
||||
if(UNIX)
|
||||
set_target_properties(x265-shared PROPERTIES VERSION ${X265_BUILD})
|
||||
--- a/source/encoder/api.cpp
|
||||
+++ b/source/encoder/api.cpp
|
||||
@@ -704,7 +704,7 @@
|
||||
#define ext ".dylib"
|
||||
#else
|
||||
#include <dlfcn.h>
|
||||
-#define ext ".so"
|
||||
+#define ext ".so." xstr(X265_BUILD)
|
||||
#endif
|
||||
#if defined(__GNUC__) && __GNUC__ >= 8
|
||||
#pragma GCC diagnostic ignored "-Wcast-function-type"
|
||||
|
@ -0,0 +1,11 @@
|
||||
--- a/source/CMakeLists.txt
|
||||
+++ b/source/CMakeLists.txt
|
||||
@@ -212,7 +212,7 @@
|
||||
add_definitions(-std=gnu++98)
|
||||
endif()
|
||||
if(ENABLE_PIC)
|
||||
- add_definitions(-fPIC)
|
||||
+ add_definitions(-fPIC -DPIC)
|
||||
endif(ENABLE_PIC)
|
||||
if(NATIVE_BUILD)
|
||||
if(INTEL_CXX)
|
@ -0,0 +1,11 @@
|
||||
--- a/source/x265.pc.in
|
||||
+++ b/source/x265.pc.in
|
||||
@@ -1,6 +1,6 @@
|
||||
prefix=@CMAKE_INSTALL_PREFIX@
|
||||
exec_prefix=${prefix}
|
||||
-libdir=${exec_prefix}/@LIB_INSTALL_DIR@
|
||||
+libdir=@LIB_INSTALL_DIR@
|
||||
includedir=${prefix}/include
|
||||
|
||||
Name: @CMAKE_PROJECT_NAME@
|
||||
|
@ -0,0 +1,269 @@
|
||||
%global _so_version 176
|
||||
|
||||
Summary: H.265/HEVC encoder
|
||||
Name: x265
|
||||
Version: 3.1.2
|
||||
Release: 1%{?dist}
|
||||
URL: http://x265.org/
|
||||
# source/Lib/TLibCommon - BSD
|
||||
# source/Lib/TLibEncoder - BSD
|
||||
# everything else - GPLv2+
|
||||
License: GPLv2+ and BSD
|
||||
|
||||
# Official upstream is http://hg.videolan.org/ - using github mirror
|
||||
Source0: https://github.com/videolan/x265/archive/%{version}/%{name}-%{version}.tar.gz
|
||||
|
||||
# fix building as PIC
|
||||
Patch0: x265-pic.patch
|
||||
Patch1: x265-high-bit-depth-soname.patch
|
||||
Patch2: x265-detect_cpu_armhfp.patch
|
||||
Patch3: x265-arm-cflags.patch
|
||||
Patch4: x265-pkgconfig_path_fix.patch
|
||||
Patch5: x265-2.8-asm-primitives.patch
|
||||
|
||||
BuildRequires: gcc-c++
|
||||
BuildRequires: cmake3
|
||||
%{?el7:BuildRequires: epel-rpm-macros}
|
||||
BuildRequires: nasm
|
||||
BuildRequires: ninja-build
|
||||
|
||||
%ifnarch armv7hl armv7hnl s390 s390x
|
||||
BuildRequires: numactl-devel
|
||||
%endif
|
||||
|
||||
%description
|
||||
The primary objective of x265 is to become the best H.265/HEVC encoder
|
||||
available anywhere, offering the highest compression efficiency and the highest
|
||||
performance on a wide variety of hardware platforms.
|
||||
|
||||
This package contains the command line encoder.
|
||||
|
||||
%package libs
|
||||
Summary: H.265/HEVC encoder library
|
||||
|
||||
%description libs
|
||||
The primary objective of x265 is to become the best H.265/HEVC encoder
|
||||
available anywhere, offering the highest compression efficiency and the
|
||||
highest performance on a wide variety of hardware platforms.
|
||||
|
||||
This package contains the shared library.
|
||||
|
||||
%package devel
|
||||
Summary: H.265/HEVC encoder library development files
|
||||
Requires: %{name}-libs%{?_isa} = %{version}-%{release}
|
||||
|
||||
%description devel
|
||||
The primary objective of x265 is to become the best H.265/HEVC encoder
|
||||
available anywhere, offering the highest compression efficiency and the highest
|
||||
performance on a wide variety of hardware platforms.
|
||||
|
||||
This package contains the shared library development files.
|
||||
|
||||
%prep
|
||||
%autosetup -p1
|
||||
|
||||
%build
|
||||
# High depth libraries (from source/h265.h):
|
||||
# If the requested bitDepth is not supported by the linked libx265,
|
||||
# it will attempt to dynamically bind x265_api_get() from a shared
|
||||
# library with an appropriate name:
|
||||
# 8bit: libx265_main.so
|
||||
# 10bit: libx265_main10.so
|
||||
|
||||
build() {
|
||||
%cmake3 -Wno-dev -G "Ninja" \
|
||||
-DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON \
|
||||
-DCMAKE_SKIP_RPATH:BOOL=YES \
|
||||
-DENABLE_PIC:BOOL=ON \
|
||||
-DENABLE_TESTS:BOOL=ON \
|
||||
$* \
|
||||
../source
|
||||
%ninja_build
|
||||
}
|
||||
|
||||
# High depth 10/12 bit libraries are supported only on 64 bit. They require
|
||||
# disabled AltiVec instructions for building on ppc64/ppc64le.
|
||||
%ifarch x86_64 aarch64 ppc64 ppc64le
|
||||
mkdir 10bit; pushd 10bit
|
||||
build -DENABLE_CLI=OFF -DENABLE_ALTIVEC=OFF -DHIGH_BIT_DEPTH=ON
|
||||
popd
|
||||
|
||||
mkdir 12bit; pushd 12bit
|
||||
build -DENABLE_CLI=OFF -DENABLE_ALTIVEC=OFF -DHIGH_BIT_DEPTH=ON -DMAIN12=ON
|
||||
popd
|
||||
%endif
|
||||
|
||||
# 8 bit base library + encoder
|
||||
mkdir 8bit; pushd 8bit
|
||||
build
|
||||
popd
|
||||
|
||||
%install
|
||||
for i in 8 10 12; do
|
||||
if [ -d ${i}bit ]; then
|
||||
pushd ${i}bit
|
||||
%ninja_install
|
||||
# Remove unversioned library, should not be linked to
|
||||
rm -f %{buildroot}%{_libdir}/libx265_main${i}.so
|
||||
popd
|
||||
fi
|
||||
done
|
||||
|
||||
find %{buildroot} -name "*.a" -delete
|
||||
|
||||
%check
|
||||
for i in 8 10 12; do
|
||||
if [ -d ${i}bit ]; then
|
||||
pushd ${i}bit
|
||||
test/TestBench || :
|
||||
popd
|
||||
fi
|
||||
done
|
||||
|
||||
%ldconfig_scriptlets libs
|
||||
|
||||
%files
|
||||
%{_bindir}/x265
|
||||
|
||||
%files libs
|
||||
%license COPYING
|
||||
%{_libdir}/libx265.so.%{_so_version}
|
||||
%ifarch x86_64 aarch64 ppc64 ppc64le
|
||||
%{_libdir}/libx265_main10.so.%{_so_version}
|
||||
%{_libdir}/libx265_main12.so.%{_so_version}
|
||||
%endif
|
||||
|
||||
%files devel
|
||||
%doc doc/*
|
||||
%{_includedir}/x265.h
|
||||
%{_includedir}/x265_config.h
|
||||
%{_libdir}/libx265.so
|
||||
%{_libdir}/pkgconfig/x265.pc
|
||||
|
||||
%changelog
|
||||
* Mon Feb 26 2024 Sergey Cherevko <s.cherevko@msvsphere-os.ru> - 3.1.2-1
|
||||
- Rebuilt for MSVSphere 8.9
|
||||
|
||||
* Sun Aug 04 2019 Leigh Scott <leigh123linux@googlemail.com> - 3.1.2-1
|
||||
- Update to 3.1.2
|
||||
|
||||
* Fri Jun 28 2019 Nicolas Chauvet <kwizart@gmail.com> - 3.1-1
|
||||
- Update to 3.1
|
||||
- Switch to github mirror
|
||||
|
||||
* Tue Mar 05 2019 RPM Fusion Release Engineering <leigh123linux@gmail.com> - 3.0-2
|
||||
- Rebuilt for https://fedoraproject.org/wiki/Fedora_30_Mass_Rebuild
|
||||
|
||||
* Thu Feb 28 2019 Leigh Scott <leigh123linux@googlemail.com> - 3.0-1
|
||||
- Update to 3.0
|
||||
|
||||
* Sun Dec 30 2018 Leigh Scott <leigh123linux@googlemail.com> - 2.9-3
|
||||
- Rebuild against newer nasm on el7 (rfbz #5128)
|
||||
|
||||
* Wed Nov 21 2018 Antonio Trande <sagitter@fedoraproject.org> - 2.9-2
|
||||
- Rebuild for ffmpeg-3.* on el7
|
||||
|
||||
* Sun Nov 18 2018 Leigh Scott <leigh123linux@googlemail.com> - 2.9-1
|
||||
- Update to 2.9
|
||||
|
||||
* Thu Oct 04 2018 Sérgio Basto <sergio@serjux.com> - 2.8-1
|
||||
- Update to 2.8 more 2 patches to fix builds on non-x86 and arm
|
||||
https://bitbucket.org/multicoreware/x265/issues/404/28-fails-to-build-on-ppc64le-gnu-linux
|
||||
https://bitbucket.org/multicoreware/x265/issues/406/arm-assembly-fail-to-compile-on-18
|
||||
|
||||
* Sun Aug 19 2018 Leigh Scott <leigh123linux@googlemail.com> - 2.7-5
|
||||
- Rebuilt for Fedora 29 Mass Rebuild binutils issue
|
||||
|
||||
* Fri Jul 27 2018 RPM Fusion Release Engineering <leigh123linux@gmail.com> - 2.7-4
|
||||
- Rebuilt for https://fedoraproject.org/wiki/Fedora_29_Mass_Rebuild
|
||||
|
||||
* Sun Apr 08 2018 Leigh Scott <leigh123linux@googlemail.com> - 2.7-3
|
||||
- Fix pkgconfig file (rfbz #4853)
|
||||
|
||||
* Tue Feb 27 2018 Nicolas Chauvet <kwizart@gmail.com> - 2.7-2
|
||||
- Fix CFLAGS on ARM
|
||||
|
||||
* Tue Feb 27 2018 Leigh Scott <leigh123linux@googlemail.com> - 2.7-1
|
||||
- update to 2.7
|
||||
- Drop shared test patch as it causes nasm build to fail
|
||||
- Fix scriptlets
|
||||
- Use ninja to build
|
||||
|
||||
* Sat Dec 30 2017 Sérgio Basto <sergio@serjux.com> - 2.6-1
|
||||
- Update x265 to 2.6
|
||||
|
||||
* Mon Oct 16 2017 Leigh Scott <leigh123linux@googlemail.com> - 2.5-1
|
||||
- update to 2.5
|
||||
|
||||
* Thu Aug 31 2017 RPM Fusion Release Engineering <kwizart@rpmfusion.org> - 2.4-2
|
||||
- Rebuilt for https://fedoraproject.org/wiki/Fedora_27_Mass_Rebuild
|
||||
|
||||
* Sat Apr 29 2017 Leigh Scott <leigh123linux@googlemail.com> - 2.4-1
|
||||
- update to 2.4
|
||||
|
||||
* Mon Apr 10 2017 Simone Caronni <negativo17@gmail.com> - 2.2-3
|
||||
- Use source from multicoreware website.
|
||||
- Clean up SPEC file a bit (formatting, 80 char wide descriptions).
|
||||
- Enable shared 10/12 bit libraries on 64 bit architectures.
|
||||
|
||||
* Mon Mar 20 2017 RPM Fusion Release Engineering <kwizart@rpmfusion.org> - 2.2-2
|
||||
- Rebuilt for https://fedoraproject.org/wiki/Fedora_26_Mass_Rebuild
|
||||
|
||||
* Tue Jan 03 2017 Dominik Mierzejewski <rpm@greysector.net> - 2.2-1
|
||||
- update to 2.2
|
||||
- spell out SO version in file list
|
||||
- fix typo in patch
|
||||
|
||||
* Mon Nov 07 2016 Sérgio Basto <sergio@serjux.com> - 2.1-1
|
||||
- Update to 2.1
|
||||
|
||||
* Thu Aug 18 2016 Sérgio Basto <sergio@serjux.com> - 1.9-3
|
||||
- Clean spec, Vascom patches series, rfbz #4199, add license tag
|
||||
|
||||
* Tue Jul 19 2016 Dominik Mierzejewski <rpm@greysector.net> - 1.9-2
|
||||
- use https for source URL
|
||||
- enable NUMA support
|
||||
- make sure Fedora compiler flags are used on ARM
|
||||
|
||||
* Fri Apr 08 2016 Adrian Reber <adrian@lisas.de> - 1.9-1
|
||||
- Update to 1.9
|
||||
|
||||
* Sun Oct 25 2015 Dominik Mierzejewski <rpm@greysector.net> 1.8-2
|
||||
- fix building as PIC
|
||||
- update SO version in file list
|
||||
|
||||
* Sat Oct 24 2015 Nicolas Chauvet <kwizart@gmail.com> - 1.8-1
|
||||
- Update to 1.8
|
||||
- Avoid tests for now
|
||||
|
||||
* Wed Apr 15 2015 Dominik Mierzejewski <rpm@greysector.net> 1.6-1
|
||||
- update to 1.6 (ABI bump, rfbz#3593)
|
||||
- release tarballs are now hosted on videolan.org
|
||||
- drop obsolete patches
|
||||
|
||||
* Thu Dec 18 2014 Dominik Mierzejewski <rpm@greysector.net> 1.2-6
|
||||
- fix build on armv7l arch (partially fix rfbz#3361, patch by Nicolas Chauvet)
|
||||
- don't run tests on ARM for now (rfbz#3361)
|
||||
|
||||
* Sun Aug 17 2014 Dominik Mierzejewski <rpm@greysector.net> 1.2-5
|
||||
- don't include contributor agreement in doc
|
||||
- make sure /usr/share/doc/x265 is owned
|
||||
- add a comment noting which files are BSD-licenced
|
||||
|
||||
* Fri Aug 08 2014 Dominik Mierzejewski <rpm@greysector.net> 1.2-4
|
||||
- don't create bogus soname (patch by Xavier)
|
||||
|
||||
* Thu Jul 17 2014 Dominik Mierzejewski <rpm@greysector.net> 1.2-3
|
||||
- fix tr call to remove DOS EOL
|
||||
- build the library with -fPIC on arm and i686, too
|
||||
|
||||
* Sun Jul 13 2014 Dominik Mierzejewski <rpm@greysector.net> 1.2-2
|
||||
- use version in source URL
|
||||
- update License tag
|
||||
- fix EOL in drag-uncrustify.bat
|
||||
- don't link test binaries with shared binary on x86 (segfault)
|
||||
|
||||
* Thu Jul 10 2014 Dominik Mierzejewski <rpm@greysector.net> 1.2-1
|
||||
- initial build
|
||||
- fix pkgconfig file install location
|
||||
- link test binaries with shared library
|
Loading…
Reference in new issue