diff options
| author | Nitin A Kamble <nitin.a.kamble@intel.com> | 2011-12-02 12:20:06 -0800 |
|---|---|---|
| committer | Richard Purdie <richard.purdie@linuxfoundation.org> | 2011-12-05 22:47:07 +0000 |
| commit | f3c0a02b920efa5747474b4c65aba12a928e56de (patch) | |
| tree | 4152fb62bb1aa96c80c7d5a9671fc02f8e61fa8d | |
| parent | b60d604f233565edc47b9d132ce7141a4eeb17f9 (diff) | |
| download | poky-f3c0a02b920efa5747474b4c65aba12a928e56de.tar.gz | |
liboil: patch source code for x32
Make the assembly syntax compatible with x32 gcc. Othewise x32 gcc throws errors.
This Fixes bug: [YOCTO #1412]
(From OE-Core rev: f43d633540b41c94eacfbc3c5c450ddd192b2164)
Signed-off-by: Nitin A Kamble <nitin.a.kamble@intel.com>
Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
| -rw-r--r-- | meta/recipes-support/liboil/liboil-0.3.17/liboil_fix_for_x32.patch | 222 | ||||
| -rw-r--r-- | meta/recipes-support/liboil/liboil_0.3.17.bb | 3 |
2 files changed, 224 insertions, 1 deletions
diff --git a/meta/recipes-support/liboil/liboil-0.3.17/liboil_fix_for_x32.patch b/meta/recipes-support/liboil/liboil-0.3.17/liboil_fix_for_x32.patch new file mode 100644 index 0000000000..473380e9fc --- /dev/null +++ b/meta/recipes-support/liboil/liboil-0.3.17/liboil_fix_for_x32.patch | |||
| @@ -0,0 +1,222 @@ | |||
| 1 | Upstream-Status: Pending | ||
| 2 | |||
| 3 | Make the assembly syntax compatible with x32 gcc. Othewise x32 gcc throws errors. | ||
| 4 | |||
| 5 | Signed-Off-By: Nitin A Kamble <nitin.a.kamble@intel.com> | ||
| 6 | 2011/12/01 | ||
| 7 | |||
| 8 | |||
| 9 | Index: liboil-0.3.17/liboil/amd64/wavelet.c | ||
| 10 | =================================================================== | ||
| 11 | --- liboil-0.3.17.orig/liboil/amd64/wavelet.c | ||
| 12 | +++ liboil-0.3.17/liboil/amd64/wavelet.c | ||
| 13 | @@ -21,14 +21,14 @@ deinterleave2_asm (int16_t *d1, int16_t | ||
| 14 | asm volatile ("\n" | ||
| 15 | " sub $2, %%rcx\n" | ||
| 16 | "1:\n" | ||
| 17 | - " movw (%1,%%rcx,4), %%ax\n" | ||
| 18 | - " movw %%ax, (%0,%%rcx,2)\n" | ||
| 19 | - " movw 2(%1,%%rcx,4), %%ax\n" | ||
| 20 | - " movw %%ax, (%2,%%rcx,2)\n" | ||
| 21 | - " movw 4(%1,%%rcx,4), %%ax\n" | ||
| 22 | - " movw %%ax, 2(%0,%%rcx,2)\n" | ||
| 23 | - " movw 6(%1,%%rcx,4), %%ax\n" | ||
| 24 | - " movw %%ax, 2(%2,%%rcx,2)\n" | ||
| 25 | + " movw (%q1,%%rcx,4), %%ax\n" | ||
| 26 | + " movw %%ax, (%q0,%%rcx,2)\n" | ||
| 27 | + " movw 2(%q1,%%rcx,4), %%ax\n" | ||
| 28 | + " movw %%ax, (%q2,%%rcx,2)\n" | ||
| 29 | + " movw 4(%q1,%%rcx,4), %%ax\n" | ||
| 30 | + " movw %%ax, 2(%q0,%%rcx,2)\n" | ||
| 31 | + " movw 6(%q1,%%rcx,4), %%ax\n" | ||
| 32 | + " movw %%ax, 2(%q2,%%rcx,2)\n" | ||
| 33 | " sub $2, %%rcx\n" | ||
| 34 | " jge 1b\n" | ||
| 35 | : "+r" (d1), "+r" (s_2xn), "+r" (d2), "+c" (n) | ||
| 36 | @@ -53,20 +53,20 @@ deinterleave2_mmx (int16_t *d1, int16_t | ||
| 37 | asm volatile ("\n" | ||
| 38 | " xor %%rcx, %%rcx\n" | ||
| 39 | "1:\n" | ||
| 40 | - " movq (%1,%%rcx,4), %%mm0\n" | ||
| 41 | - " movq 8(%1,%%rcx,4), %%mm1\n" | ||
| 42 | + " movq (%q1,%%rcx,4), %%mm0\n" | ||
| 43 | + " movq 8(%q1,%%rcx,4), %%mm1\n" | ||
| 44 | " pslld $16, %%mm0\n" | ||
| 45 | " pslld $16, %%mm1\n" | ||
| 46 | " psrad $16, %%mm0\n" | ||
| 47 | " psrad $16, %%mm1\n" | ||
| 48 | " packssdw %%mm1, %%mm0\n" | ||
| 49 | - " movq %%mm0, (%0,%%rcx,2)\n" | ||
| 50 | - " movq (%1,%%rcx,4), %%mm0\n" | ||
| 51 | - " movq 8(%1,%%rcx,4), %%mm1\n" | ||
| 52 | + " movq %%mm0, (%q0,%%rcx,2)\n" | ||
| 53 | + " movq (%q1,%%rcx,4), %%mm0\n" | ||
| 54 | + " movq 8(%q1,%%rcx,4), %%mm1\n" | ||
| 55 | " psrad $16, %%mm0\n" | ||
| 56 | " psrad $16, %%mm1\n" | ||
| 57 | " packssdw %%mm1, %%mm0\n" | ||
| 58 | - " movq %%mm0, (%2,%%rcx,2)\n" | ||
| 59 | + " movq %%mm0, (%q2,%%rcx,2)\n" | ||
| 60 | " add $4, %%rcx\n" | ||
| 61 | " cmp %3, %%ecx\n" | ||
| 62 | " jl 1b\n" | ||
| 63 | @@ -93,10 +93,10 @@ deinterleave2_mmx_2 (int16_t *d1, int16_ | ||
| 64 | asm volatile ("\n" | ||
| 65 | " xor %%rcx, %%rcx\n" | ||
| 66 | "1:\n" | ||
| 67 | - " pshufw $0xd8, (%1,%%rcx,4), %%mm0\n" | ||
| 68 | - " movd %%mm0, (%0,%%rcx,2)\n" | ||
| 69 | - " pshufw $0x8d, (%1,%%rcx,4), %%mm0\n" | ||
| 70 | - " movd %%mm0, (%2,%%rcx,2)\n" | ||
| 71 | + " pshufw $0xd8, (%q1,%%rcx,4), %%mm0\n" | ||
| 72 | + " movd %%mm0, (%q0,%%rcx,2)\n" | ||
| 73 | + " pshufw $0x8d, (%q1,%%rcx,4), %%mm0\n" | ||
| 74 | + " movd %%mm0, (%q2,%%rcx,2)\n" | ||
| 75 | " add $2, %%rcx\n" | ||
| 76 | " cmp %3, %%ecx\n" | ||
| 77 | " jl 1b\n" | ||
| 78 | @@ -123,16 +123,16 @@ deinterleave2_mmx_3 (int16_t *d1, int16_ | ||
| 79 | asm volatile ("\n" | ||
| 80 | " xor %%rcx, %%rcx\n" | ||
| 81 | "1:\n" | ||
| 82 | - " movq (%1,%%rcx,4), %%mm1\n" | ||
| 83 | - " movq (%1,%%rcx,4), %%mm2\n" | ||
| 84 | - " movq 8(%1,%%rcx,4), %%mm0\n" | ||
| 85 | + " movq (%q1,%%rcx,4), %%mm1\n" | ||
| 86 | + " movq (%q1,%%rcx,4), %%mm2\n" | ||
| 87 | + " movq 8(%q1,%%rcx,4), %%mm0\n" | ||
| 88 | " punpcklwd %%mm0, %%mm1\n" | ||
| 89 | " punpckhwd %%mm0, %%mm2\n" | ||
| 90 | " movq %%mm1, %%mm0\n" | ||
| 91 | " punpcklwd %%mm2, %%mm0\n" | ||
| 92 | " punpckhwd %%mm2, %%mm1\n" | ||
| 93 | - " movq %%mm0, (%0,%%rcx,2)\n" | ||
| 94 | - " movq %%mm1, (%2,%%rcx,2)\n" | ||
| 95 | + " movq %%mm0, (%q0,%%rcx,2)\n" | ||
| 96 | + " movq %%mm1, (%q2,%%rcx,2)\n" | ||
| 97 | " add $4, %%rcx\n" | ||
| 98 | " cmp %3, %%ecx\n" | ||
| 99 | " jl 1b\n" | ||
| 100 | @@ -159,26 +159,26 @@ deinterleave2_mmx_4 (int16_t *d1, int16_ | ||
| 101 | asm volatile ("\n" | ||
| 102 | " xor %%rcx, %%rcx\n" | ||
| 103 | "1:\n" | ||
| 104 | - " movq (%1,%%rcx,4), %%mm1\n" | ||
| 105 | + " movq (%q1,%%rcx,4), %%mm1\n" | ||
| 106 | " movq %%mm1, %%mm2\n" | ||
| 107 | - " movq 8(%1,%%rcx,4), %%mm0\n" | ||
| 108 | - " movq 16(%1,%%rcx,4), %%mm5\n" | ||
| 109 | + " movq 8(%q1,%%rcx,4), %%mm0\n" | ||
| 110 | + " movq 16(%q1,%%rcx,4), %%mm5\n" | ||
| 111 | " punpcklwd %%mm0, %%mm1\n" | ||
| 112 | " movq %%mm5, %%mm6\n" | ||
| 113 | " punpckhwd %%mm0, %%mm2\n" | ||
| 114 | - " movq 24(%1,%%rcx,4), %%mm4\n" | ||
| 115 | + " movq 24(%q1,%%rcx,4), %%mm4\n" | ||
| 116 | " movq %%mm1, %%mm0\n" | ||
| 117 | " punpcklwd %%mm4, %%mm5\n" | ||
| 118 | " punpcklwd %%mm2, %%mm0\n" | ||
| 119 | " punpckhwd %%mm4, %%mm6\n" | ||
| 120 | " punpckhwd %%mm2, %%mm1\n" | ||
| 121 | " movq %%mm5, %%mm4\n" | ||
| 122 | - " movq %%mm0, (%0,%%rcx,2)\n" | ||
| 123 | + " movq %%mm0, (%q0,%%rcx,2)\n" | ||
| 124 | " punpcklwd %%mm6, %%mm4\n" | ||
| 125 | - " movq %%mm1, (%2,%%rcx,2)\n" | ||
| 126 | + " movq %%mm1, (%q2,%%rcx,2)\n" | ||
| 127 | " punpckhwd %%mm6, %%mm5\n" | ||
| 128 | - " movq %%mm4, 8(%0,%%rcx,2)\n" | ||
| 129 | - " movq %%mm5, 8(%2,%%rcx,2)\n" | ||
| 130 | + " movq %%mm4, 8(%q0,%%rcx,2)\n" | ||
| 131 | + " movq %%mm5, 8(%q2,%%rcx,2)\n" | ||
| 132 | " add $8, %%rcx\n" | ||
| 133 | " cmp %3, %%ecx\n" | ||
| 134 | " jl 1b\n" | ||
| 135 | @@ -252,13 +252,13 @@ interleave2_mmx (int16_t *d_2xn, int16_t | ||
| 136 | asm volatile ("\n" | ||
| 137 | " xor %%rcx, %%rcx\n" | ||
| 138 | "1:\n" | ||
| 139 | - " movq (%1,%%rcx,2), %%mm0\n" | ||
| 140 | - " movq (%2,%%rcx,2), %%mm1\n" | ||
| 141 | + " movq (%q1,%%rcx,2), %%mm0\n" | ||
| 142 | + " movq (%q2,%%rcx,2), %%mm1\n" | ||
| 143 | " movq %%mm0, %%mm2\n" | ||
| 144 | " punpckhwd %%mm1, %%mm0\n" | ||
| 145 | " punpcklwd %%mm1, %%mm2\n" | ||
| 146 | - " movq %%mm2, (%0,%%rcx,4)\n" | ||
| 147 | - " movq %%mm0, 8(%0,%%rcx,4)\n" | ||
| 148 | + " movq %%mm2, (%q0,%%rcx,4)\n" | ||
| 149 | + " movq %%mm0, 8(%q0,%%rcx,4)\n" | ||
| 150 | " add $4, %%rcx\n" | ||
| 151 | " cmp %3, %%ecx\n" | ||
| 152 | " jl 1b\n" | ||
| 153 | @@ -285,12 +285,12 @@ lift_add_shift1_mmx (int16_t *d, int16_t | ||
| 154 | asm volatile ("\n" | ||
| 155 | " xor %%rcx, %%rcx\n" | ||
| 156 | "1:\n" | ||
| 157 | - " movq (%2,%%rcx,2), %%mm1\n" | ||
| 158 | - " movq (%3,%%rcx,2), %%mm2\n" | ||
| 159 | + " movq (%q2,%%rcx,2), %%mm1\n" | ||
| 160 | + " movq (%q3,%%rcx,2), %%mm2\n" | ||
| 161 | " paddw %%mm2, %%mm1\n" | ||
| 162 | " psraw $1, %%mm1\n" | ||
| 163 | - " paddw (%1,%%rcx,2), %%mm1\n" | ||
| 164 | - " movq %%mm1, (%0,%%rcx,2)\n" | ||
| 165 | + " paddw (%q1,%%rcx,2), %%mm1\n" | ||
| 166 | + " movq %%mm1, (%q0,%%rcx,2)\n" | ||
| 167 | " add $4, %%rcx\n" | ||
| 168 | " cmp %4, %%ecx\n" | ||
| 169 | " jl 1b\n" | ||
| 170 | @@ -317,13 +317,13 @@ lift_sub_shift1_mmx (int16_t *d, int16_t | ||
| 171 | asm volatile ("\n" | ||
| 172 | " xor %%rcx, %%rcx\n" | ||
| 173 | "1:\n" | ||
| 174 | - " movq (%2,%%rcx,2), %%mm1\n" | ||
| 175 | - " movq (%3,%%rcx,2), %%mm2\n" | ||
| 176 | - " movq (%1,%%rcx,2), %%mm0\n" | ||
| 177 | + " movq (%q2,%%rcx,2), %%mm1\n" | ||
| 178 | + " movq (%q3,%%rcx,2), %%mm2\n" | ||
| 179 | + " movq (%q1,%%rcx,2), %%mm0\n" | ||
| 180 | " paddw %%mm2, %%mm1\n" | ||
| 181 | " psraw $1, %%mm1\n" | ||
| 182 | " psubw %%mm1, %%mm0\n" | ||
| 183 | - " movq %%mm0, (%0,%%rcx,2)\n" | ||
| 184 | + " movq %%mm0, (%q0,%%rcx,2)\n" | ||
| 185 | " add $4, %%rcx\n" | ||
| 186 | " cmp %4, %%ecx\n" | ||
| 187 | " jl 1b\n" | ||
| 188 | @@ -350,12 +350,12 @@ lift_add_shift2_mmx (int16_t *d, int16_t | ||
| 189 | asm volatile ("\n" | ||
| 190 | " xor %%rcx, %%rcx\n" | ||
| 191 | "1:\n" | ||
| 192 | - " movq (%2,%%rcx,2), %%mm1\n" | ||
| 193 | - " movq (%3,%%rcx,2), %%mm2\n" | ||
| 194 | + " movq (%q2,%%rcx,2), %%mm1\n" | ||
| 195 | + " movq (%q3,%%rcx,2), %%mm2\n" | ||
| 196 | " paddw %%mm2, %%mm1\n" | ||
| 197 | " psraw $2, %%mm1\n" | ||
| 198 | - " paddw (%1,%%rcx,2), %%mm1\n" | ||
| 199 | - " movq %%mm1, (%0,%%rcx,2)\n" | ||
| 200 | + " paddw (%q1,%%rcx,2), %%mm1\n" | ||
| 201 | + " movq %%mm1, (%q0,%%rcx,2)\n" | ||
| 202 | " add $4, %%rcx\n" | ||
| 203 | " cmp %4, %%ecx\n" | ||
| 204 | " jl 1b\n" | ||
| 205 | @@ -382,13 +382,13 @@ lift_sub_shift2_mmx (int16_t *d, int16_t | ||
| 206 | asm volatile ("\n" | ||
| 207 | " xor %%rcx, %%rcx\n" | ||
| 208 | "1:\n" | ||
| 209 | - " movq (%2,%%rcx,2), %%mm1\n" | ||
| 210 | - " movq (%3,%%rcx,2), %%mm2\n" | ||
| 211 | - " movq (%1,%%rcx,2), %%mm0\n" | ||
| 212 | + " movq (%q2,%%rcx,2), %%mm1\n" | ||
| 213 | + " movq (%q3,%%rcx,2), %%mm2\n" | ||
| 214 | + " movq (%q1,%%rcx,2), %%mm0\n" | ||
| 215 | " paddw %%mm2, %%mm1\n" | ||
| 216 | " psraw $2, %%mm1\n" | ||
| 217 | " psubw %%mm1, %%mm0\n" | ||
| 218 | - " movq %%mm0, (%0,%%rcx,2)\n" | ||
| 219 | + " movq %%mm0, (%q0,%%rcx,2)\n" | ||
| 220 | " add $4, %%rcx\n" | ||
| 221 | " cmp %4, %%ecx\n" | ||
| 222 | " jl 1b\n" | ||
diff --git a/meta/recipes-support/liboil/liboil_0.3.17.bb b/meta/recipes-support/liboil/liboil_0.3.17.bb index 10a845e42f..b1e21ab353 100644 --- a/meta/recipes-support/liboil/liboil_0.3.17.bb +++ b/meta/recipes-support/liboil/liboil_0.3.17.bb | |||
| @@ -10,12 +10,13 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=ad80780d9c5205d63481a0184e199a15 \ | |||
| 10 | file://testsuite/trans.c;endline=29;md5=380ecd43121fe3dcc0d8d7e5984f283d" | 10 | file://testsuite/trans.c;endline=29;md5=380ecd43121fe3dcc0d8d7e5984f283d" |
| 11 | 11 | ||
| 12 | DEPENDS = "glib-2.0" | 12 | DEPENDS = "glib-2.0" |
| 13 | PR = "r4" | 13 | PR = "r5" |
| 14 | 14 | ||
| 15 | SRC_URI = "http://liboil.freedesktop.org/download/${BPN}-${PV}.tar.gz \ | 15 | SRC_URI = "http://liboil.freedesktop.org/download/${BPN}-${PV}.tar.gz \ |
| 16 | file://no-tests.patch \ | 16 | file://no-tests.patch \ |
| 17 | file://fix-unaligned-whitelist.patch \ | 17 | file://fix-unaligned-whitelist.patch \ |
| 18 | file://0001-Fix-enable-vfp-flag.patch \ | 18 | file://0001-Fix-enable-vfp-flag.patch \ |
| 19 | file://liboil_fix_for_x32.patch \ | ||
| 19 | " | 20 | " |
| 20 | 21 | ||
| 21 | SRC_URI[md5sum] = "47dc734f82faeb2964d97771cfd2e701" | 22 | SRC_URI[md5sum] = "47dc734f82faeb2964d97771cfd2e701" |
