From 7cc1a55c2ea5f6b8682a25545fdde6a28a61194c Mon Sep 17 00:00:00 2001 From: Stephan Raue Date: Thu, 5 Jul 2012 18:25:01 +0200 Subject: [PATCH] ffmpeg: update XBMC upstream patches Signed-off-by: Stephan Raue --- ...ependent-code-to-access-data-symbols.patch | 207 ++++++++++++++++++ 1 file changed, 207 insertions(+) create mode 100644 packages/multimedia/ffmpeg/patches/ffmpeg-0.10.4-0030-ARM-generate-position-independent-code-to-access-data-symbols.patch diff --git a/packages/multimedia/ffmpeg/patches/ffmpeg-0.10.4-0030-ARM-generate-position-independent-code-to-access-data-symbols.patch b/packages/multimedia/ffmpeg/patches/ffmpeg-0.10.4-0030-ARM-generate-position-independent-code-to-access-data-symbols.patch new file mode 100644 index 0000000000..b31f59b481 --- /dev/null +++ b/packages/multimedia/ffmpeg/patches/ffmpeg-0.10.4-0030-ARM-generate-position-independent-code-to-access-data-symbols.patch @@ -0,0 +1,207 @@ +From 2ac4f22e8cea4489431cf879edba31124f19c3ba Mon Sep 17 00:00:00 2001 +From: Memphiz +Date: Fri, 29 Jun 2012 19:41:48 +0200 +Subject: [PATCH] [ffmpeg] - ARM: generate position independent code to access + data symbols - this finally fixes our ATV2 builds. I send + more then a really big THX to M.Rullgard from libav for + making this happen. + +--- + lib/ffmpeg/libavcodec/arm/ac3dsp_armv6.S | 4 +- + lib/ffmpeg/libavcodec/arm/asm.S | 73 +++++++++++++++++++++++++++- + lib/ffmpeg/libavcodec/arm/fft_fixed_neon.S | 2 +- + lib/ffmpeg/libavcodec/arm/fft_neon.S | 4 +- + lib/ffmpeg/libavcodec/arm/vp3dsp_neon.S | 3 +- + lib/ffmpeg/libavcodec/arm/vp8_armv6.S | 4 +- + 6 files changed, 80 insertions(+), 10 deletions(-) + +diff --git a/libavcodec/arm/ac3dsp_armv6.S b/libavcodec/arm/ac3dsp_armv6.S +index b6aee86..97099d6 100644 +--- a/libavcodec/arm/ac3dsp_armv6.S ++++ b/libavcodec/arm/ac3dsp_armv6.S +@@ -26,8 +26,8 @@ function ff_ac3_bit_alloc_calc_bap_armv6, export=1 + beq 4f + push {r4-r11,lr} + add r5, sp, #40 +- movrel r4, X(ff_ac3_bin_to_band_tab) +- movrel lr, X(ff_ac3_band_start_tab) ++ movrelx r4, X(ff_ac3_bin_to_band_tab), r11 ++ movrelx lr, X(ff_ac3_band_start_tab) + ldm r5, {r5-r7} + ldrb r4, [r4, r2] + add r1, r1, r2, lsl #1 @ psd + start +diff --git a/libavcodec/arm/asm.S b/libavcodec/arm/asm.S +index e540eac..d34a16d 100644 +--- a/libavcodec/arm/asm.S ++++ b/libavcodec/arm/asm.S +@@ -62,7 +62,14 @@ ELF .eabi_attribute 25, \val + .endm + + .macro function name, export=0 ++ .set .Lpic_idx, 0 ++ .set .Lpic_gp, 0 + .macro endfunc ++ .if .Lpic_idx ++ .altmacro ++ put_pic %(.Lpic_idx - 1) ++ .noaltmacro ++ .endif + ELF .size \name, . - \name + .endfunc + .purgem endfunc +@@ -99,8 +106,44 @@ ELF .size \name, . - \name + #endif + .endm + ++.macro put_pic num ++ put_pic_\num ++.endm ++ ++.macro do_def_pic num, val, label ++ .macro put_pic_\num ++ .if \num ++ .altmacro ++ put_pic %(\num - 1) ++ .noaltmacro ++ .endif ++\label: .word \val ++ .purgem put_pic_\num ++ .endm ++.endm ++ ++.macro def_pic val, label ++ .altmacro ++ do_def_pic %.Lpic_idx, \val, \label ++ .noaltmacro ++ .set .Lpic_idx, .Lpic_idx + 1 ++.endm ++ ++.macro ldpic rd, val, indir=0 ++ ldr \rd, .Lpicoff\@ ++.Lpic\@: ++ .if \indir ++ ldr \rd, [pc, \rd] ++ .else ++ add \rd, pc, \rd ++ .endif ++ def_pic \val - (.Lpic\@ + (8 >> CONFIG_THUMB)), .Lpicoff\@ ++.endm ++ + .macro movrel rd, val +-#if HAVE_ARMV6T2 && !CONFIG_PIC && !defined(__APPLE__) ++#if CONFIG_PIC ++ ldpic \rd, \val ++#elif HAVE_ARMV6T2 && !defined(__APPLE__) + movw \rd, #:lower16:\val + movt \rd, #:upper16:\val + #else +@@ -108,6 +151,34 @@ ELF .size \name, . - \name + #endif + .endm + ++.macro movrelx rd, val, gp ++#if CONFIG_PIC && defined(__ELF__) ++ .ifnb \gp ++ .if .Lpic_gp ++ .unreq gp ++ .endif ++ gp .req \gp ++ ldpic gp, _GLOBAL_OFFSET_TABLE_ ++ .elseif !.Lpic_gp ++ gp .req r12 ++ ldpic gp, _GLOBAL_OFFSET_TABLE_ ++ .endif ++ .set .Lpic_gp, 1 ++ ldr \rd, .Lpicoff\@ ++ ldr \rd, [gp, \rd] ++ def_pic \val(GOT), .Lpicoff\@ ++#elif CONFIG_PIC && defined(__APPLE__) ++ ldpic \rd, .Lpic\@, indir=1 ++ .non_lazy_symbol_pointer ++.Lpic\@: ++ .indirect_symbol \val ++ .word 0 ++ .text ++#else ++ movrel \rd, \val ++#endif ++.endm ++ + .macro ldr_pre rt, rn, rm:vararg + A ldr \rt, [\rn, \rm]! + T add \rn, \rn, \rm +diff --git a/libavcodec/arm/fft_fixed_neon.S b/libavcodec/arm/fft_fixed_neon.S +index 0316b80..d6b7dac 100644 +--- a/libavcodec/arm/fft_fixed_neon.S ++++ b/libavcodec/arm/fft_fixed_neon.S +@@ -214,7 +214,7 @@ function fft\n\()_neon + bl fft\n4\()_neon + mov r0, r4 + pop {r4, lr} +- movrel r1, X(ff_cos_\n\()_fixed) ++ movrelx r1, X(ff_cos_\n\()_fixed) + mov r2, #\n4/2 + b fft_pass_neon + endfunc +diff --git a/libavcodec/arm/fft_neon.S b/libavcodec/arm/fft_neon.S +index ef8e4d4..d36927b 100644 +--- a/libavcodec/arm/fft_neon.S ++++ b/libavcodec/arm/fft_neon.S +@@ -143,7 +143,7 @@ function fft16_neon + vswp d29, d30 @ q14{r12,i12,i14,r15} q15{r13,i13,i15,r14} + vadd.f32 q0, q12, q13 @ {t1,t2,t5,t6} + vadd.f32 q1, q14, q15 @ {t1a,t2a,t5a,t6a} +- movrel r2, X(ff_cos_16) ++ movrelx r2, X(ff_cos_16) + vsub.f32 q13, q12, q13 @ {t3,t4,t7,t8} + vrev64.32 d1, d1 + vsub.f32 q15, q14, q15 @ {t3a,t4a,t7a,t8a} +@@ -290,7 +290,7 @@ function fft\n\()_neon + bl fft\n4\()_neon + mov r0, r4 + pop {r4, lr} +- movrel r1, X(ff_cos_\n) ++ movrelx r1, X(ff_cos_\n) + mov r2, #\n4/2 + b fft_pass_neon + endfunc +diff --git a/libavcodec/arm/vp3dsp_neon.S b/libavcodec/arm/vp3dsp_neon.S +index ae3e402..90d9f80 100644 +--- a/libavcodec/arm/vp3dsp_neon.S ++++ b/libavcodec/arm/vp3dsp_neon.S +@@ -116,9 +116,8 @@ function vp3_idct_start_neon + vadd.s16 q1, q8, q12 + vsub.s16 q8, q8, q12 + vld1.64 {d28-d31}, [r2,:128]! +-endfunc + +-function vp3_idct_core_neon ++vp3_idct_core_neon: + vmull.s16 q2, d18, xC1S7 // (ip[1] * C1) << 16 + vmull.s16 q3, d19, xC1S7 + vmull.s16 q4, d2, xC4S4 // ((ip[0] + ip[4]) * C4) << 16 +diff --git a/libavcodec/arm/vp8_armv6.S b/libavcodec/arm/vp8_armv6.S +index 8a3beb9..adb49f7 100644 +--- a/libavcodec/arm/vp8_armv6.S ++++ b/libavcodec/arm/vp8_armv6.S +@@ -65,7 +65,7 @@ T orrcs \cw, \cw, \t1 + + function ff_decode_block_coeffs_armv6, export=1 + push {r0,r1,r4-r11,lr} +- movrel lr, X(ff_vp56_norm_shift) ++ movrelx lr, X(ff_vp56_norm_shift) + ldrd r4, r5, [sp, #44] @ token_prob, qmul + cmp r3, #0 + ldr r11, [r5] +@@ -206,7 +206,7 @@ A orrcs r8, r8, r10, lsl r6 + mov r9, #8 + it ge + addge r12, r12, #1 +- movrel r4, X(ff_vp8_dct_cat_prob) ++ movrelx r4, X(ff_vp8_dct_cat_prob), r1 + lsl r9, r9, r12 + ldr r4, [r4, r12, lsl #2] + add r12, r9, #3 +-- +1.7.9.4 +