summaryrefslogtreecommitdiffstats
path: root/meta-oe/recipes-support/opencv/opencv
diff options
context:
space:
mode:
authorKhem Raj <raj.khem@gmail.com>2017-09-19 21:14:28 -0700
committerMartin Jansa <Martin.Jansa@gmail.com>2017-09-22 22:50:47 +0000
commit98f96aa15e3035ec4fda9225f4fc673633288856 (patch)
tree2985f43d27ec910b3ddcd10267f803eb2f62a16f /meta-oe/recipes-support/opencv/opencv
parent281b007d48b86fd15adb10fb52e6ea10d40009c4 (diff)
downloadmeta-openembedded-98f96aa15e3035ec4fda9225f4fc673633288856.tar.gz
opencv: Fix build on aarch64
Enable intrinsics on arm/neon with clang while here Signed-off-by: Khem Raj <raj.khem@gmail.com> Signed-off-by: Martin Jansa <Martin.Jansa@gmail.com>
Diffstat (limited to 'meta-oe/recipes-support/opencv/opencv')
-rw-r--r--meta-oe/recipes-support/opencv/opencv/0001-carotene-don-t-use-__asm__-with-aarch64.patch1250
-rw-r--r--meta-oe/recipes-support/opencv/opencv/0002-Do-not-enable-asm-with-clang.patch993
2 files changed, 2243 insertions, 0 deletions
diff --git a/meta-oe/recipes-support/opencv/opencv/0001-carotene-don-t-use-__asm__-with-aarch64.patch b/meta-oe/recipes-support/opencv/opencv/0001-carotene-don-t-use-__asm__-with-aarch64.patch
new file mode 100644
index 000000000..a1a56e0e4
--- /dev/null
+++ b/meta-oe/recipes-support/opencv/opencv/0001-carotene-don-t-use-__asm__-with-aarch64.patch
@@ -0,0 +1,1250 @@
1From 353fc92618ce0dc6bab4a3e8bff1c13c3b613110 Mon Sep 17 00:00:00 2001
2From: Alexander Alekhin <alexander.alekhin@intel.com>
3Date: Wed, 23 Aug 2017 17:41:23 +0300
4Subject: [PATCH 1/2] carotene: don't use __asm__ with aarch64
5
6---
7Upstream-Status: Backport
8
9 3rdparty/carotene/src/channel_extract.cpp | 4 +-
10 3rdparty/carotene/src/channels_combine.cpp | 2 +-
11 3rdparty/carotene/src/colorconvert.cpp | 104 ++++++++++++++---------------
12 3rdparty/carotene/src/convert.cpp | 54 +++++++--------
13 3rdparty/carotene/src/convert_scale.cpp | 72 ++++++++++----------
14 3rdparty/carotene/src/gaussian_blur.cpp | 6 +-
15 3rdparty/carotene/src/pyramid.cpp | 20 +++---
16 3rdparty/carotene/src/scharr.cpp | 4 +-
17 8 files changed, 133 insertions(+), 133 deletions(-)
18
19diff --git a/3rdparty/carotene/src/channel_extract.cpp b/3rdparty/carotene/src/channel_extract.cpp
20index f663bc6005..8238a3ece8 100644
21--- a/3rdparty/carotene/src/channel_extract.cpp
22+++ b/3rdparty/carotene/src/channel_extract.cpp
23@@ -231,7 +231,7 @@ void extract4(const Size2D &size,
24 srcStride == dst2Stride && \
25 srcStride == dst3Stride &&
26
27-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
28+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
29
30 #define SPLIT_ASM2(sgn, bits) __asm__ ( \
31 "vld2." #bits " {d0, d2}, [%[in0]] \n\t" \
32@@ -351,7 +351,7 @@ void extract4(const Size2D &size,
33 } \
34 }
35
36-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
37+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
38
39 #define ALPHA_QUAD(sgn, bits) { \
40 internal::prefetch(src + sj); \
41diff --git a/3rdparty/carotene/src/channels_combine.cpp b/3rdparty/carotene/src/channels_combine.cpp
42index 157c8b8121..fc98fb9181 100644
43--- a/3rdparty/carotene/src/channels_combine.cpp
44+++ b/3rdparty/carotene/src/channels_combine.cpp
45@@ -77,7 +77,7 @@ namespace CAROTENE_NS {
46 dstStride == src2Stride && \
47 dstStride == src3Stride &&
48
49-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
50+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
51
52 #define MERGE_ASM2(sgn, bits) __asm__ ( \
53 "vld1." #bits " {d0-d1}, [%[in0]] \n\t" \
54diff --git a/3rdparty/carotene/src/colorconvert.cpp b/3rdparty/carotene/src/colorconvert.cpp
55index 3037fe672a..26ae54b15c 100644
56--- a/3rdparty/carotene/src/colorconvert.cpp
57+++ b/3rdparty/carotene/src/colorconvert.cpp
58@@ -97,7 +97,7 @@ void rgb2gray(const Size2D &size, COLOR_SPACE color_space,
59 const u32 G2Y = color_space == COLOR_SPACE_BT601 ? G2Y_BT601 : G2Y_BT709;
60 const u32 B2Y = color_space == COLOR_SPACE_BT601 ? B2Y_BT601 : B2Y_BT709;
61
62-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
63+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
64 register int16x4_t v_r2y asm ("d31") = vmov_n_s16(R2Y);
65 register int16x4_t v_g2y asm ("d30") = vmov_n_s16(G2Y);
66 register int16x4_t v_b2y asm ("d29") = vmov_n_s16(B2Y);
67@@ -116,7 +116,7 @@ void rgb2gray(const Size2D &size, COLOR_SPACE color_space,
68 u8 * dst = internal::getRowPtr(dstBase, dstStride, i);
69 size_t sj = 0u, dj = 0u;
70
71-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
72+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
73 for (; dj < roiw8; sj += 24, dj += 8)
74 {
75 internal::prefetch(src + sj);
76@@ -198,7 +198,7 @@ void rgbx2gray(const Size2D &size, COLOR_SPACE color_space,
77 const u32 G2Y = color_space == COLOR_SPACE_BT601 ? G2Y_BT601 : G2Y_BT709;
78 const u32 B2Y = color_space == COLOR_SPACE_BT601 ? B2Y_BT601 : B2Y_BT709;
79
80-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
81+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
82 register int16x4_t v_r2y asm ("d31") = vmov_n_s16(R2Y);
83 register int16x4_t v_g2y asm ("d30") = vmov_n_s16(G2Y);
84 register int16x4_t v_b2y asm ("d29") = vmov_n_s16(B2Y);
85@@ -217,7 +217,7 @@ void rgbx2gray(const Size2D &size, COLOR_SPACE color_space,
86 u8 * dst = internal::getRowPtr(dstBase, dstStride, i);
87 size_t sj = 0u, dj = 0u;
88
89-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
90+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
91 for (; dj < roiw8; sj += 32, dj += 8)
92 {
93 internal::prefetch(src + sj);
94@@ -300,7 +300,7 @@ void bgr2gray(const Size2D &size, COLOR_SPACE color_space,
95 const u32 G2Y = color_space == COLOR_SPACE_BT601 ? G2Y_BT601 : G2Y_BT709;
96 const u32 B2Y = color_space == COLOR_SPACE_BT601 ? B2Y_BT601 : B2Y_BT709;
97
98-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
99+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
100 register int16x4_t v_r2y asm ("d31") = vmov_n_s16(R2Y);
101 register int16x4_t v_g2y asm ("d30") = vmov_n_s16(G2Y);
102 register int16x4_t v_b2y asm ("d29") = vmov_n_s16(B2Y);
103@@ -319,7 +319,7 @@ void bgr2gray(const Size2D &size, COLOR_SPACE color_space,
104 u8 * dst = internal::getRowPtr(dstBase, dstStride, i);
105 size_t sj = 0u, dj = 0u;
106
107-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
108+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
109 for (; dj < roiw8; sj += 24, dj += 8)
110 {
111 internal::prefetch(src + sj);
112@@ -402,7 +402,7 @@ void bgrx2gray(const Size2D &size, COLOR_SPACE color_space,
113 const u32 G2Y = color_space == COLOR_SPACE_BT601 ? G2Y_BT601 : G2Y_BT709;
114 const u32 B2Y = color_space == COLOR_SPACE_BT601 ? B2Y_BT601 : B2Y_BT709;
115
116-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
117+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
118 register int16x4_t v_r2y asm ("d31") = vmov_n_s16(R2Y);
119 register int16x4_t v_g2y asm ("d30") = vmov_n_s16(G2Y);
120 register int16x4_t v_b2y asm ("d29") = vmov_n_s16(B2Y);
121@@ -421,7 +421,7 @@ void bgrx2gray(const Size2D &size, COLOR_SPACE color_space,
122 u8 * dst = internal::getRowPtr(dstBase, dstStride, i);
123 size_t sj = 0u, dj = 0u;
124
125-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
126+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
127 for (; dj < roiw8; sj += 32, dj += 8)
128 {
129 internal::prefetch(src + sj);
130@@ -512,7 +512,7 @@ void gray2rgb(const Size2D &size,
131 for (; sj < roiw16; sj += 16, dj += 48)
132 {
133 internal::prefetch(src + sj);
134-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
135+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
136 __asm__ (
137 "vld1.8 {d0-d1}, [%[in0]] \n\t"
138 "vmov.8 q1, q0 \n\t"
139@@ -538,7 +538,7 @@ void gray2rgb(const Size2D &size,
140
141 if (sj < roiw8)
142 {
143-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
144+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
145 __asm__ (
146 "vld1.8 {d0}, [%[in]] \n\t"
147 "vmov.8 d1, d0 \n\t"
148@@ -584,7 +584,7 @@ void gray2rgbx(const Size2D &size,
149 size_t roiw16 = size.width >= 15 ? size.width - 15 : 0;
150 size_t roiw8 = size.width >= 7 ? size.width - 7 : 0;
151
152-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
153+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
154 register uint8x16_t vc255 asm ("q4") = vmovq_n_u8(255);
155 #else
156 uint8x16x4_t vRgba;
157@@ -602,7 +602,7 @@ void gray2rgbx(const Size2D &size,
158 for (; sj < roiw16; sj += 16, dj += 64)
159 {
160 internal::prefetch(src + sj);
161-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
162+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
163 __asm__ (
164 "vld1.8 {d0-d1}, [%[in0]] \n\t"
165 "vmov.8 q1, q0 \n\t"
166@@ -628,7 +628,7 @@ void gray2rgbx(const Size2D &size,
167
168 if (sj < roiw8)
169 {
170-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
171+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
172 __asm__ (
173 "vld1.8 {d5}, [%[in]] \n\t"
174 "vmov.8 d6, d5 \n\t"
175@@ -672,7 +672,7 @@ void rgb2rgbx(const Size2D &size,
176 internal::assertSupportedConfiguration();
177 #ifdef CAROTENE_NEON
178 size_t roiw8 = size.width >= 7 ? size.width - 7 : 0;
179-#if defined(__GNUC__) && defined(__arm__)
180+#if !defined(__aarch64__) && defined(__GNUC__) && defined(__arm__)
181 register uint8x8_t vc255_0 asm ("d3") = vmov_n_u8(255);
182 #else
183 size_t roiw16 = size.width >= 15 ? size.width - 15 : 0;
184@@ -688,7 +688,7 @@ void rgb2rgbx(const Size2D &size,
185 u8 * dst = internal::getRowPtr(dstBase, dstStride, i);
186 size_t sj = 0u, dj = 0u, j = 0u;
187
188-#if defined(__GNUC__) && defined(__arm__)
189+#if !defined(__aarch64__) && defined(__GNUC__) && defined(__arm__)
190 for (; j < roiw8; sj += 24, dj += 32, j += 8)
191 {
192 internal::prefetch(src + sj);
193@@ -742,7 +742,7 @@ void rgbx2rgb(const Size2D &size,
194 internal::assertSupportedConfiguration();
195 #ifdef CAROTENE_NEON
196 size_t roiw8 = size.width >= 7 ? size.width - 7 : 0;
197-#if !defined(__GNUC__) || !defined(__arm__)
198+#if !(!defined(__aarch64__) && defined(__GNUC__) && defined(__arm__))
199 size_t roiw16 = size.width >= 15 ? size.width - 15 : 0;
200 union { uint8x16x4_t v4; uint8x16x3_t v3; } v_dst0;
201 union { uint8x8x4_t v4; uint8x8x3_t v3; } v_dst;
202@@ -754,7 +754,7 @@ void rgbx2rgb(const Size2D &size,
203 u8 * dst = internal::getRowPtr(dstBase, dstStride, i);
204 size_t sj = 0u, dj = 0u, j = 0u;
205
206-#if defined(__GNUC__) && defined(__arm__)
207+#if !defined(__aarch64__) && defined(__GNUC__) && defined(__arm__)
208 for (; j < roiw8; sj += 32, dj += 24, j += 8)
209 {
210 internal::prefetch(src + sj);
211@@ -805,7 +805,7 @@ void rgb2bgr(const Size2D &size,
212 {
213 internal::assertSupportedConfiguration();
214 #ifdef CAROTENE_NEON
215-#if !defined(__GNUC__) || !defined(__arm__)
216+#if !(!defined(__aarch64__) && defined(__GNUC__) && defined(__arm__))
217 size_t roiw16 = size.width >= 15 ? size.width - 15 : 0;
218 #endif
219 size_t roiw8 = size.width >= 7 ? size.width - 7 : 0;
220@@ -817,7 +817,7 @@ void rgb2bgr(const Size2D &size,
221 size_t sj = 0u, dj = 0u, j = 0u;
222
223
224-#if defined(__GNUC__) && defined(__arm__)
225+#if !defined(__aarch64__) && defined(__GNUC__) && defined(__arm__)
226 for (; j < roiw8; sj += 24, dj += 24, j += 8)
227 {
228 internal::prefetch(src + sj);
229@@ -874,7 +874,7 @@ void rgbx2bgrx(const Size2D &size,
230 {
231 internal::assertSupportedConfiguration();
232 #ifdef CAROTENE_NEON
233-#if !defined(__GNUC__) || !defined(__arm__)
234+#if !(!defined(__aarch64__) && defined(__GNUC__) && defined(__arm__))
235 size_t roiw16 = size.width >= 15 ? size.width - 15 : 0;
236 #endif
237 size_t roiw8 = size.width >= 7 ? size.width - 7 : 0;
238@@ -885,7 +885,7 @@ void rgbx2bgrx(const Size2D &size,
239 u8 * dst = internal::getRowPtr(dstBase, dstStride, i);
240 size_t sj = 0u, dj = 0u, j = 0u;
241
242-#if defined(__GNUC__) && defined(__arm__)
243+#if !defined(__aarch64__) && defined(__GNUC__) && defined(__arm__)
244 for (; j < roiw8; sj += 32, dj += 32, j += 8)
245 {
246 internal::prefetch(src + sj);
247@@ -943,7 +943,7 @@ void rgbx2bgr(const Size2D &size,
248 {
249 internal::assertSupportedConfiguration();
250 #ifdef CAROTENE_NEON
251-#if !defined(__GNUC__) || !defined(__arm__)
252+#if !(!defined(__aarch64__) && defined(__GNUC__) && defined(__arm__))
253 size_t roiw16 = size.width >= 15 ? size.width - 15 : 0;
254 #endif
255 size_t roiw8 = size.width >= 7 ? size.width - 7 : 0;
256@@ -954,7 +954,7 @@ void rgbx2bgr(const Size2D &size,
257 u8 * dst = internal::getRowPtr(dstBase, dstStride, i);
258 size_t sj = 0u, dj = 0u, j = 0u;
259
260-#if defined(__GNUC__) && defined(__arm__)
261+#if !defined(__aarch64__) && defined(__GNUC__) && defined(__arm__)
262 for (; j < roiw8; sj += 32, dj += 24, j += 8)
263 {
264 internal::prefetch(src + sj);
265@@ -1010,7 +1010,7 @@ void rgb2bgrx(const Size2D &size,
266 {
267 internal::assertSupportedConfiguration();
268 #ifdef CAROTENE_NEON
269-#if defined(__GNUC__) && defined(__arm__)
270+#if !defined(__aarch64__) && defined(__GNUC__) && defined(__arm__)
271 register uint8x8_t vc255 asm ("d3") = vmov_n_u8(255);
272 #else
273 union { uint8x16x4_t v4; uint8x16x3_t v3; } vals0;
274@@ -1019,7 +1019,7 @@ void rgb2bgrx(const Size2D &size,
275 vals8.v4.val[3] = vmov_n_u8(255);
276 #endif
277
278-#if !defined(__GNUC__) || !defined(__arm__)
279+#if !(!defined(__aarch64__) && defined(__GNUC__) && defined(__arm__))
280 size_t roiw16 = size.width >= 15 ? size.width - 15 : 0;
281 #endif
282 size_t roiw8 = size.width >= 7 ? size.width - 7 : 0;
283@@ -1030,7 +1030,7 @@ void rgb2bgrx(const Size2D &size,
284 u8 * dst = internal::getRowPtr(dstBase, dstStride, i);
285 size_t sj = 0u, dj = 0u, j = 0u;
286
287-#if defined(__GNUC__) && defined(__arm__)
288+#if !defined(__aarch64__) && defined(__GNUC__) && defined(__arm__)
289 for (; j < roiw8; sj += 24, dj += 32, j += 8)
290 {
291 internal::prefetch(src + sj);
292@@ -1409,7 +1409,7 @@ inline void convertToHSV(const s32 r, const s32 g, const s32 b,
293 "d24","d25","d26","d27","d28","d29","d30","d31" \
294 );
295
296-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
297+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
298
299 #define YCRCB_CONSTS \
300 register int16x4_t vcYR asm ("d31") = vmov_n_s16(4899); \
301@@ -1555,7 +1555,7 @@ inline uint8x8x3_t convertToYCrCb( const int16x8_t& vR, const int16x8_t& vG, con
302 #define COEFF_G ( 8663)
303 #define COEFF_B (-17705)
304
305-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
306+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
307 #define YUV420ALPHA3_CONST
308 #define YUV420ALPHA4_CONST register uint8x16_t c255 asm ("q13") = vmovq_n_u8(255);
309 #define YUV420ALPHA3_CONVERT
310@@ -1852,7 +1852,7 @@ void rgb2hsv(const Size2D &size,
311 #ifdef CAROTENE_NEON
312 size_t roiw8 = size.width >= 7 ? size.width - 7 : 0;
313 const s32 hsv_shift = 12;
314-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
315+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
316 register const f32 vsdiv_table = f32(255 << hsv_shift);
317 register f32 vhdiv_table = f32(hrange << hsv_shift);
318 register const s32 vhrange = hrange;
319@@ -1871,7 +1871,7 @@ void rgb2hsv(const Size2D &size,
320 for (; j < roiw8; sj += 24, dj += 24, j += 8)
321 {
322 internal::prefetch(src + sj);
323-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
324+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
325 CONVERT_TO_HSV_ASM(vld3.8 {d0-d2}, d0, d2)
326 #else
327 uint8x8x3_t vRgb = vld3_u8(src + sj);
328@@ -1904,7 +1904,7 @@ void rgbx2hsv(const Size2D &size,
329 #ifdef CAROTENE_NEON
330 size_t roiw8 = size.width >= 7 ? size.width - 7 : 0;
331 const s32 hsv_shift = 12;
332-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
333+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
334 register const f32 vsdiv_table = f32(255 << hsv_shift);
335 register f32 vhdiv_table = f32(hrange << hsv_shift);
336 register const s32 vhrange = hrange;
337@@ -1923,7 +1923,7 @@ void rgbx2hsv(const Size2D &size,
338 for (; j < roiw8; sj += 32, dj += 24, j += 8)
339 {
340 internal::prefetch(src + sj);
341-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
342+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
343 CONVERT_TO_HSV_ASM(vld4.8 {d0-d3}, d0, d2)
344 #else
345 uint8x8x4_t vRgb = vld4_u8(src + sj);
346@@ -1956,7 +1956,7 @@ void bgr2hsv(const Size2D &size,
347 #ifdef CAROTENE_NEON
348 size_t roiw8 = size.width >= 7 ? size.width - 7 : 0;
349 const s32 hsv_shift = 12;
350-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
351+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
352 register const f32 vsdiv_table = f32(255 << hsv_shift);
353 register f32 vhdiv_table = f32(hrange << hsv_shift);
354 register const s32 vhrange = hrange;
355@@ -1975,7 +1975,7 @@ void bgr2hsv(const Size2D &size,
356 for (; j < roiw8; sj += 24, dj += 24, j += 8)
357 {
358 internal::prefetch(src + sj);
359-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
360+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
361 CONVERT_TO_HSV_ASM(vld3.8 {d0-d2}, d2, d0)
362 #else
363 uint8x8x3_t vRgb = vld3_u8(src + sj);
364@@ -2008,7 +2008,7 @@ void bgrx2hsv(const Size2D &size,
365 #ifdef CAROTENE_NEON
366 size_t roiw8 = size.width >= 7 ? size.width - 7 : 0;
367 const s32 hsv_shift = 12;
368-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
369+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
370 register const f32 vsdiv_table = f32(255 << hsv_shift);
371 register f32 vhdiv_table = f32(hrange << hsv_shift);
372 register const s32 vhrange = hrange;
373@@ -2027,7 +2027,7 @@ void bgrx2hsv(const Size2D &size,
374 for (; j < roiw8; sj += 32, dj += 24, j += 8)
375 {
376 internal::prefetch(src + sj);
377-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
378+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
379 CONVERT_TO_HSV_ASM(vld4.8 {d0-d3}, d2, d0)
380 #else
381 uint8x8x4_t vRgb = vld4_u8(src + sj);
382@@ -2068,7 +2068,7 @@ void rgbx2bgr565(const Size2D &size,
383 for (; j < roiw16; sj += 64, dj += 32, j += 16)
384 {
385 internal::prefetch(src + sj);
386-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
387+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
388 __asm__ (
389 "vld4.8 {d2, d4, d6, d8}, [%[in0]] @ q0 q1 q2 q3 q4 \n\t"
390 "vld4.8 {d3, d5, d7, d9}, [%[in1]] @ xxxxxxxx rrrrRRRR ggggGGGG bbbbBBBB xxxxxxxx \n\t"
391@@ -2122,7 +2122,7 @@ void rgb2bgr565(const Size2D &size,
392 for (; j < roiw16; sj += 48, dj += 32, j += 16)
393 {
394 internal::prefetch(src + sj);
395-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
396+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
397 __asm__ (
398 "vld3.8 {d2, d4, d6}, [%[in0]] @ q0 q1 q2 q3 q4 \n\t"
399 "vld3.8 {d3, d5, d7}, [%[in1]] @ xxxxxxxx rrrrRRRR ggggGGGG bbbbBBBB xxxxxxxx \n\t"
400@@ -2176,7 +2176,7 @@ void rgbx2rgb565(const Size2D &size,
401 for (; j < roiw16; sj += 64, dj += 32, j += 16)
402 {
403 internal::prefetch(src + sj);
404-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
405+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
406 __asm__ (
407 "vld4.8 {d0, d2, d4, d6}, [%[in0]] @ q0 q1 q2 q3 \n\t"
408 "vld4.8 {d1, d3, d5, d7}, [%[in1]] @ rrrrRRRR ggggGGGG bbbbBBBB aaaaAAAA \n\t"
409@@ -2230,7 +2230,7 @@ void rgb2rgb565(const Size2D &size,
410 for (; j < roiw16; sj += 48, dj += 32, j += 16)
411 {
412 internal::prefetch(src + sj);
413-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
414+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
415 __asm__ (
416 "vld3.8 {d0, d2, d4}, [%[in0]] @ q0 q1 q2 q3 \n\t"
417 "vld3.8 {d1, d3, d5}, [%[in1]] @ rrrrRRRR ggggGGGG bbbbBBBB xxxxxxxx \n\t"
418@@ -2285,7 +2285,7 @@ void rgb2ycrcb(const Size2D &size,
419 for (; j < roiw8; sj += 24, dj += 24, j += 8)
420 {
421 internal::prefetch(src + sj);
422-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
423+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
424 CONVERTTOYCRCB(vld3.8 {d0-d2}, d0, d1, d2)
425 #else
426 uint8x8x3_t vRgb = vld3_u8(src + sj);
427@@ -2329,7 +2329,7 @@ void rgbx2ycrcb(const Size2D &size,
428 for (; j < roiw8; sj += 32, dj += 24, j += 8)
429 {
430 internal::prefetch(src + sj);
431-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
432+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
433 CONVERTTOYCRCB(vld4.8 {d0-d3}, d0, d1, d2)
434 #else
435 uint8x8x4_t vRgba = vld4_u8(src + sj);
436@@ -2373,7 +2373,7 @@ void bgr2ycrcb(const Size2D &size,
437 for (; j < roiw8; sj += 24, dj += 24, j += 8)
438 {
439 internal::prefetch(src + sj);
440-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
441+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
442 CONVERTTOYCRCB(vld3.8 {d0-d2}, d2, d1, d0)
443 #else
444 uint8x8x3_t vBgr = vld3_u8(src + sj);
445@@ -2417,7 +2417,7 @@ void bgrx2ycrcb(const Size2D &size,
446 for (; j < roiw8; sj += 32, dj += 24, j += 8)
447 {
448 internal::prefetch(src + sj);
449-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
450+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
451 CONVERTTOYCRCB(vld4.8 {d0-d3}, d2, d1, d0)
452 #else
453 uint8x8x4_t vBgra = vld4_u8(src + sj);
454@@ -2499,7 +2499,7 @@ void yuv420sp2rgb(const Size2D &size,
455 internal::prefetch(uv + j);
456 internal::prefetch(y1 + j);
457 internal::prefetch(y2 + j);
458-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
459+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
460 CONVERTYUV420TORGB(3, d1, d0, q5, q6)
461 #else
462 convertYUV420.ToRGB(y1 + j, y2 + j, uv + j, dst1 + dj, dst2 + dj);
463@@ -2545,7 +2545,7 @@ void yuv420sp2rgbx(const Size2D &size,
464 internal::prefetch(uv + j);
465 internal::prefetch(y1 + j);
466 internal::prefetch(y2 + j);
467-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
468+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
469 CONVERTYUV420TORGB(4, d1, d0, q5, q6)
470 #else
471 convertYUV420.ToRGB(y1 + j, y2 + j, uv + j, dst1 + dj, dst2 + dj);
472@@ -2591,7 +2591,7 @@ void yuv420i2rgb(const Size2D &size,
473 internal::prefetch(uv + j);
474 internal::prefetch(y1 + j);
475 internal::prefetch(y2 + j);
476-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
477+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
478 CONVERTYUV420TORGB(3, d0, d1, q5, q6)
479 #else
480 convertYUV420.ToRGB(y1 + j, y2 + j, uv + j, dst1 + dj, dst2 + dj);
481@@ -2637,7 +2637,7 @@ void yuv420i2rgbx(const Size2D &size,
482 internal::prefetch(uv + j);
483 internal::prefetch(y1 + j);
484 internal::prefetch(y2 + j);
485-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
486+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
487 CONVERTYUV420TORGB(4, d0, d1, q5, q6)
488 #else
489 convertYUV420.ToRGB(y1 + j, y2 + j, uv + j, dst1 + dj, dst2 + dj);
490@@ -2683,7 +2683,7 @@ void yuv420sp2bgr(const Size2D &size,
491 internal::prefetch(uv + j);
492 internal::prefetch(y1 + j);
493 internal::prefetch(y2 + j);
494-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
495+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
496 CONVERTYUV420TORGB(3, d1, d0, q6, q5)
497 #else
498 convertYUV420.ToRGB(y1 + j, y2 + j, uv + j, dst1 + dj, dst2 + dj);
499@@ -2729,7 +2729,7 @@ void yuv420sp2bgrx(const Size2D &size,
500 internal::prefetch(uv + j);
501 internal::prefetch(y1 + j);
502 internal::prefetch(y2 + j);
503-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
504+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
505 CONVERTYUV420TORGB(4, d1, d0, q6, q5)
506 #else
507 convertYUV420.ToRGB(y1 + j, y2 + j, uv + j, dst1 + dj, dst2 + dj);
508@@ -2775,7 +2775,7 @@ void yuv420i2bgr(const Size2D &size,
509 internal::prefetch(uv + j);
510 internal::prefetch(y1 + j);
511 internal::prefetch(y2 + j);
512-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
513+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
514 CONVERTYUV420TORGB(3, d0, d1, q6, q5)
515 #else
516 convertYUV420.ToRGB(y1 + j, y2 + j, uv + j, dst1 + dj, dst2 + dj);
517@@ -2821,7 +2821,7 @@ void yuv420i2bgrx(const Size2D &size,
518 internal::prefetch(uv + j);
519 internal::prefetch(y1 + j);
520 internal::prefetch(y2 + j);
521-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
522+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
523 CONVERTYUV420TORGB(4, d0, d1, q6, q5)
524 #else
525 convertYUV420.ToRGB(y1 + j, y2 + j, uv + j, dst1 + dj, dst2 + dj);
526diff --git a/3rdparty/carotene/src/convert.cpp b/3rdparty/carotene/src/convert.cpp
527index 403f16d86a..64b6db78ab 100644
528--- a/3rdparty/carotene/src/convert.cpp
529+++ b/3rdparty/carotene/src/convert.cpp
530@@ -101,7 +101,7 @@ CVT_FUNC(u8, s8, 16,
531 }
532 })
533
534-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
535+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
536 CVT_FUNC(u8, u16, 16,
537 register uint8x16_t zero0 asm ("q1") = vmovq_n_u8(0);,
538 {
539@@ -135,7 +135,7 @@ CVT_FUNC(u8, u16, 16,
540 })
541 #endif
542
543-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
544+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
545 CVT_FUNC(u8, s32, 16,
546 register uint8x16_t zero0 asm ("q1") = vmovq_n_u8(0);
547 register uint8x16_t zero1 asm ("q2") = vmovq_n_u8(0);
548@@ -173,7 +173,7 @@ CVT_FUNC(u8, s32, 16,
549 })
550 #endif
551
552-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
553+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
554 CVT_FUNC(u8, f32, 16,
555 ,
556 {
557@@ -248,7 +248,7 @@ CVT_FUNC(s8, u8, 16,
558 }
559 })
560
561-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
562+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
563 CVT_FUNC(s8, u16, 16,
564 register uint8x16_t zero0 asm ("q1") = vmovq_n_u8(0);,
565 {
566@@ -284,7 +284,7 @@ CVT_FUNC(s8, u16, 16,
567 })
568 #endif
569
570-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
571+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
572 CVT_FUNC(s8, s16, 16,
573 ,
574 {
575@@ -323,7 +323,7 @@ CVT_FUNC(s8, s16, 16,
576 })
577 #endif
578
579-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
580+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
581 CVT_FUNC(s8, s32, 16,
582 ,
583 {
584@@ -377,7 +377,7 @@ CVT_FUNC(s8, s32, 16,
585 })
586 #endif
587
588-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
589+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
590 CVT_FUNC(s8, f32, 16,
591 ,
592 {
593@@ -440,7 +440,7 @@ CVT_FUNC(s8, f32, 16,
594 })
595 #endif
596
597-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
598+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
599 CVT_FUNC(u16, u8, 16,
600 ,
601 {
602@@ -479,7 +479,7 @@ CVT_FUNC(u16, u8, 16,
603 })
604 #endif
605
606-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
607+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
608 CVT_FUNC(u16, s8, 16,
609 register uint8x16_t v127 asm ("q4") = vmovq_n_u8(127);,
610 {
611@@ -522,7 +522,7 @@ CVT_FUNC(u16, s8, 16,
612 })
613 #endif
614
615-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
616+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
617 CVT_FUNC(u16, s16, 8,
618 register uint16x8_t v32767 asm ("q4") = vmovq_n_u16(0x7FFF);,
619 {
620@@ -555,7 +555,7 @@ CVT_FUNC(u16, s16, 8,
621 })
622 #endif
623
624-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
625+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
626 CVT_FUNC(u16, s32, 8,
627 register uint16x8_t zero0 asm ("q1") = vmovq_n_u16(0);,
628 {
629@@ -589,7 +589,7 @@ CVT_FUNC(u16, s32, 8,
630 })
631 #endif
632
633-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
634+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
635 CVT_FUNC(u16, f32, 8,
636 ,
637 {
638@@ -633,7 +633,7 @@ CVT_FUNC(u16, f32, 8,
639 })
640 #endif
641
642-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
643+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
644 CVT_FUNC(s16, u8, 16,
645 ,
646 {
647@@ -672,7 +672,7 @@ CVT_FUNC(s16, u8, 16,
648 })
649 #endif
650
651-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
652+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
653 CVT_FUNC(s16, s8, 16,
654 ,
655 {
656@@ -711,7 +711,7 @@ CVT_FUNC(s16, s8, 16,
657 })
658 #endif
659
660-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
661+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
662 CVT_FUNC(s16, u16, 8,
663 register int16x8_t vZero asm ("q4") = vmovq_n_s16(0);,
664 {
665@@ -747,7 +747,7 @@ CVT_FUNC(s16, u16, 8,
666 })
667 #endif
668
669-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
670+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
671 CVT_FUNC(s16, s32, 8,
672 ,
673 {
674@@ -786,7 +786,7 @@ CVT_FUNC(s16, s32, 8,
675 })
676 #endif
677
678-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
679+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
680 CVT_FUNC(s16, f32, 8,
681 ,
682 {
683@@ -829,7 +829,7 @@ CVT_FUNC(s16, f32, 8,
684 })
685 #endif
686
687-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
688+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
689 CVT_FUNC(s32, u8, 8,
690 ,
691 {
692@@ -870,7 +870,7 @@ CVT_FUNC(s32, u8, 8,
693 })
694 #endif
695
696-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
697+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
698 CVT_FUNC(s32, s8, 8,
699 ,
700 {
701@@ -911,7 +911,7 @@ CVT_FUNC(s32, s8, 8,
702 })
703 #endif
704
705-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
706+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
707 CVT_FUNC(s32, u16, 8,
708 ,
709 {
710@@ -950,7 +950,7 @@ CVT_FUNC(s32, u16, 8,
711 })
712 #endif
713
714-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
715+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
716 CVT_FUNC(s32, s16, 8,
717 ,
718 {
719@@ -989,7 +989,7 @@ CVT_FUNC(s32, s16, 8,
720 })
721 #endif
722
723-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
724+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
725 CVT_FUNC(s32, f32, 8,
726 ,
727 {
728@@ -1034,7 +1034,7 @@ CVT_FUNC(s32, f32, 8,
729 })
730 #endif
731
732-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
733+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
734 CVT_FUNC(f32, u8, 8,
735 register float32x4_t vmult asm ("q0") = vdupq_n_f32((float)(1 << 16));
736 register uint32x4_t vmask asm ("q1") = vdupq_n_u32(1<<16);,
737@@ -1101,7 +1101,7 @@ CVT_FUNC(f32, u8, 8,
738 })
739 #endif
740
741-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
742+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
743 CVT_FUNC(f32, s8, 8,
744 register float32x4_t vhalf asm ("q0") = vdupq_n_f32(0.5f);,
745 {
746@@ -1153,7 +1153,7 @@ CVT_FUNC(f32, s8, 8,
747 })
748 #endif
749
750-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
751+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
752 CVT_FUNC(f32, u16, 8,
753 register float32x4_t vhalf asm ("q0") = vdupq_n_f32(0.5f);,
754 {
755@@ -1212,7 +1212,7 @@ CVT_FUNC(f32, u16, 8,
756 })
757 #endif
758
759-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
760+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
761 CVT_FUNC(f32, s16, 8,
762 register float32x4_t vhalf asm ("q0") = vdupq_n_f32(0.5f);,
763 {
764@@ -1271,7 +1271,7 @@ CVT_FUNC(f32, s16, 8,
765 })
766 #endif
767
768-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
769+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
770 CVT_FUNC(f32, s32, 8,
771 register float32x4_t vhalf asm ("q0") = vdupq_n_f32(0.5f);,
772 {
773diff --git a/3rdparty/carotene/src/convert_scale.cpp b/3rdparty/carotene/src/convert_scale.cpp
774index 0a14a8035c..ae41a985c8 100644
775--- a/3rdparty/carotene/src/convert_scale.cpp
776+++ b/3rdparty/carotene/src/convert_scale.cpp
777@@ -135,7 +135,7 @@ namespace CAROTENE_NS {
778
779 #endif
780
781-#if defined(__GNUC__) && defined(__arm__)
782+#if !defined(__aarch64__) && defined(__GNUC__) && defined(__arm__)
783 CVTS_FUNC1(u8, 16,
784 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
785 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
786@@ -220,7 +220,7 @@ CVTS_FUNC1(u8, 16,
787 })
788 #endif
789
790-#if defined(__GNUC__) && defined(__arm__)
791+#if !defined(__aarch64__) && defined(__GNUC__) && defined(__arm__)
792 CVTS_FUNC(u8, s8, 16,
793 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
794 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
795@@ -305,7 +305,7 @@ CVTS_FUNC(u8, s8, 16,
796 })
797 #endif
798
799-#if defined(__GNUC__) && defined(__arm__)
800+#if !defined(__aarch64__) && defined(__GNUC__) && defined(__arm__)
801 CVTS_FUNC(u8, u16, 16,
802 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
803 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
804@@ -389,7 +389,7 @@ CVTS_FUNC(u8, u16, 16,
805 })
806 #endif
807
808-#if defined(__GNUC__) && defined(__arm__)
809+#if !defined(__aarch64__) && defined(__GNUC__) && defined(__arm__)
810 CVTS_FUNC(u8, s16, 16,
811 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
812 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
813@@ -473,7 +473,7 @@ CVTS_FUNC(u8, s16, 16,
814 })
815 #endif
816
817-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
818+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
819 CVTS_FUNC(u8, s32, 16,
820 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
821 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
822@@ -562,7 +562,7 @@ CVTS_FUNC(u8, s32, 16,
823 })
824 #endif
825
826-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
827+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
828 CVTS_FUNC(u8, f32, 16,
829 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
830 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta);,
831@@ -643,7 +643,7 @@ CVTS_FUNC(u8, f32, 16,
832 })
833 #endif
834
835-#if defined(__GNUC__) && defined(__arm__)
836+#if !defined(__aarch64__) && defined(__GNUC__) && defined(__arm__)
837 CVTS_FUNC(s8, u8, 16,
838 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
839 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
840@@ -728,7 +728,7 @@ CVTS_FUNC(s8, u8, 16,
841 })
842 #endif
843
844-#if defined(__GNUC__) && defined(__arm__)
845+#if !defined(__aarch64__) && defined(__GNUC__) && defined(__arm__)
846 CVTS_FUNC1(s8, 16,
847 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
848 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
849@@ -813,7 +813,7 @@ CVTS_FUNC1(s8, 16,
850 })
851 #endif
852
853-#if defined(__GNUC__) && defined(__arm__)
854+#if !defined(__aarch64__) && defined(__GNUC__) && defined(__arm__)
855 CVTS_FUNC(s8, u16, 16,
856 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
857 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
858@@ -899,7 +899,7 @@ CVTS_FUNC(s8, u16, 16,
859 })
860 #endif
861
862-#if defined(__GNUC__) && defined(__arm__)
863+#if !defined(__aarch64__) && defined(__GNUC__) && defined(__arm__)
864 CVTS_FUNC(s8, s16, 16,
865 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
866 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
867@@ -985,7 +985,7 @@ CVTS_FUNC(s8, s16, 16,
868 })
869 #endif
870
871-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
872+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
873 CVTS_FUNC(s8, s32, 16,
874 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
875 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
876@@ -1074,7 +1074,7 @@ CVTS_FUNC(s8, s32, 16,
877 })
878 #endif
879
880-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
881+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
882 CVTS_FUNC(s8, f32, 16,
883 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
884 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta);,
885@@ -1155,7 +1155,7 @@ CVTS_FUNC(s8, f32, 16,
886 })
887 #endif
888
889-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
890+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
891 CVTS_FUNC(u16, u8, 16,
892 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
893 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
894@@ -1214,7 +1214,7 @@ CVTS_FUNC(u16, u8, 16,
895 })
896 #endif
897
898-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
899+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
900 CVTS_FUNC(u16, s8, 16,
901 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
902 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
903@@ -1273,7 +1273,7 @@ CVTS_FUNC(u16, s8, 16,
904 })
905 #endif
906
907-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
908+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
909 CVTS_FUNC1(u16, 16,
910 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
911 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
912@@ -1330,7 +1330,7 @@ CVTS_FUNC1(u16, 16,
913 })
914 #endif
915
916-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
917+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
918 CVTS_FUNC(u16, s16, 8,
919 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
920 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
921@@ -1387,7 +1387,7 @@ CVTS_FUNC(u16, s16, 8,
922 })
923 #endif
924
925-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
926+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
927 CVTS_FUNC(u16, s32, 8,
928 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
929 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
930@@ -1443,7 +1443,7 @@ CVTS_FUNC(u16, s32, 8,
931 })
932 #endif
933
934-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
935+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
936 CVTS_FUNC(u16, f32, 8,
937 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
938 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta);,
939@@ -1495,7 +1495,7 @@ CVTS_FUNC(u16, f32, 8,
940 })
941 #endif
942
943-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
944+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
945 CVTS_FUNC(s16, u8, 16,
946 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
947 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
948@@ -1554,7 +1554,7 @@ CVTS_FUNC(s16, u8, 16,
949 })
950 #endif
951
952-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
953+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
954 CVTS_FUNC(s16, s8, 16,
955 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
956 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
957@@ -1613,7 +1613,7 @@ CVTS_FUNC(s16, s8, 16,
958 })
959 #endif
960
961-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
962+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
963 CVTS_FUNC(s16, u16, 8,
964 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
965 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
966@@ -1670,7 +1670,7 @@ CVTS_FUNC(s16, u16, 8,
967 })
968 #endif
969
970-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
971+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
972 CVTS_FUNC1(s16, 16,
973 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
974 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
975@@ -1727,7 +1727,7 @@ CVTS_FUNC1(s16, 16,
976 })
977 #endif
978
979-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
980+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
981 CVTS_FUNC(s16, s32, 8,
982 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
983 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
984@@ -1783,7 +1783,7 @@ CVTS_FUNC(s16, s32, 8,
985 })
986 #endif
987
988-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
989+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
990 CVTS_FUNC(s16, f32, 8,
991 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
992 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta);,
993@@ -1835,7 +1835,7 @@ CVTS_FUNC(s16, f32, 8,
994 })
995 #endif
996
997-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
998+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
999 CVTS_FUNC(s32, u8, 8,
1000 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
1001 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
1002@@ -1893,7 +1893,7 @@ CVTS_FUNC(s32, u8, 8,
1003 })
1004 #endif
1005
1006-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1007+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1008 CVTS_FUNC(s32, s8, 8,
1009 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
1010 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
1011@@ -1951,7 +1951,7 @@ CVTS_FUNC(s32, s8, 8,
1012 })
1013 #endif
1014
1015-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1016+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1017 CVTS_FUNC(s32, u16, 8,
1018 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
1019 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
1020@@ -2007,7 +2007,7 @@ CVTS_FUNC(s32, u16, 8,
1021 })
1022 #endif
1023
1024-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1025+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1026 CVTS_FUNC(s32, s16, 8,
1027 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
1028 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
1029@@ -2063,7 +2063,7 @@ CVTS_FUNC(s32, s16, 8,
1030 })
1031 #endif
1032
1033-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1034+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1035 CVTS_FUNC1(s32, 8,
1036 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
1037 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
1038@@ -2118,7 +2118,7 @@ CVTS_FUNC1(s32, 8,
1039 })
1040 #endif
1041
1042-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1043+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1044 CVTS_FUNC(s32, f32, 8,
1045 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
1046 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta);,
1047@@ -2169,7 +2169,7 @@ CVTS_FUNC(s32, f32, 8,
1048 })
1049 #endif
1050
1051-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1052+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1053 CVTS_FUNC(f32, u8, 8,
1054 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)((1 << 16)*alpha));
1055 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)((1 << 16)*beta));
1056@@ -2239,7 +2239,7 @@ CVTS_FUNC(f32, u8, 8,
1057 })
1058 #endif
1059
1060-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1061+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1062 CVTS_FUNC(f32, s8, 8,
1063 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
1064 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
1065@@ -2293,7 +2293,7 @@ CVTS_FUNC(f32, s8, 8,
1066 })
1067 #endif
1068
1069-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1070+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1071 CVTS_FUNC(f32, u16, 8,
1072 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
1073 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
1074@@ -2345,7 +2345,7 @@ CVTS_FUNC(f32, u16, 8,
1075 })
1076 #endif
1077
1078-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1079+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1080 CVTS_FUNC(f32, s16, 8,
1081 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
1082 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
1083@@ -2397,7 +2397,7 @@ CVTS_FUNC(f32, s16, 8,
1084 })
1085 #endif
1086
1087-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1088+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1089 CVTS_FUNC(f32, s32, 8,
1090 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
1091 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
1092@@ -2448,7 +2448,7 @@ CVTS_FUNC(f32, s32, 8,
1093 })
1094 #endif
1095
1096-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1097+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1098 CVTS_FUNC1(f32, 8,
1099 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
1100 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta);,
1101diff --git a/3rdparty/carotene/src/gaussian_blur.cpp b/3rdparty/carotene/src/gaussian_blur.cpp
1102index 1b5399436f..f7b5f18d79 100644
1103--- a/3rdparty/carotene/src/gaussian_blur.cpp
1104+++ b/3rdparty/carotene/src/gaussian_blur.cpp
1105@@ -327,7 +327,7 @@ void gaussianBlur5x5(const Size2D &size, s32 cn,
1106 u16* lidx1 = lane + x - 1*2;
1107 u16* lidx3 = lane + x + 1*2;
1108 u16* lidx4 = lane + x + 2*2;
1109-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1110+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1111 __asm__ __volatile__ (
1112 "vld2.16 {d0, d2}, [%[in0]]! \n\t"
1113 "vld2.16 {d1, d3}, [%[in0]] \n\t"
1114@@ -398,7 +398,7 @@ void gaussianBlur5x5(const Size2D &size, s32 cn,
1115 u16* lidx1 = lane + x - 1*3;
1116 u16* lidx3 = lane + x + 1*3;
1117 u16* lidx4 = lane + x + 2*3;
1118-#if defined(__GNUC__) && defined(__arm__)
1119+#if !defined(__aarch64__) && defined(__GNUC__) && defined(__arm__)
1120 __asm__ __volatile__ (
1121 "vld3.16 {d0, d2, d4}, [%[in0]]! \n\t"
1122 "vld3.16 {d1, d3, d5}, [%[in0]] \n\t"
1123@@ -482,7 +482,7 @@ void gaussianBlur5x5(const Size2D &size, s32 cn,
1124 u16* lidx1 = lane + x - 1*4;
1125 u16* lidx3 = lane + x + 1*4;
1126 u16* lidx4 = lane + x + 2*4;
1127-#if defined(__GNUC__) && defined(__arm__)
1128+#if !defined(__aarch64__) && defined(__GNUC__) && defined(__arm__)
1129 __asm__ __volatile__ (
1130 "vld4.16 {d0, d2, d4, d6}, [%[in0]]! \n\t"
1131 "vld4.16 {d1, d3, d5, d7}, [%[in0]] \n\t"
1132diff --git a/3rdparty/carotene/src/pyramid.cpp b/3rdparty/carotene/src/pyramid.cpp
1133index 8ef1268933..232ccf3efd 100644
1134--- a/3rdparty/carotene/src/pyramid.cpp
1135+++ b/3rdparty/carotene/src/pyramid.cpp
1136@@ -331,7 +331,7 @@ void gaussianPyramidDown(const Size2D &srcSize,
1137 for (; x < roiw8; x += 8)
1138 {
1139 internal::prefetch(lane + 2 * x);
1140-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1141+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1142 __asm__ (
1143 "vld2.16 {d0-d3}, [%[in0]] \n\t"
1144 "vld2.16 {d4-d7}, [%[in4]] \n\t"
1145@@ -538,7 +538,7 @@ void gaussianPyramidDown(const Size2D &srcSize,
1146 for (; x < roiw4; x += 4)
1147 {
1148 internal::prefetch(lane + 2 * x);
1149-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1150+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1151 __asm__ (
1152 "vld2.32 {d0-d3}, [%[in0]] \n\t"
1153 "vld2.32 {d4-d7}, [%[in4]] \n\t"
1154@@ -672,7 +672,7 @@ void gaussianPyramidDown(const Size2D &srcSize,
1155 std::vector<f32> _buf(cn*(srcSize.width + 4) + 32/sizeof(f32));
1156 f32* lane = internal::alignPtr(&_buf[2*cn], 32);
1157
1158-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1159+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1160 register float32x4_t vc6d4f32 asm ("q11") = vmovq_n_f32(1.5f); // 6/4
1161 register float32x4_t vc1d4f32 asm ("q12") = vmovq_n_f32(0.25f); // 1/4
1162
1163@@ -739,7 +739,7 @@ void gaussianPyramidDown(const Size2D &srcSize,
1164 for (; x < roiw4; x += 4)
1165 {
1166 internal::prefetch(lane + 2 * x);
1167-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1168+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1169 __asm__ __volatile__ (
1170 "vld2.32 {d0-d3}, [%[in0]] \n\t"
1171 "vld2.32 {d8-d11}, [%[in4]] \n\t"
1172@@ -932,7 +932,7 @@ pyrUp8uHorizontalConvolution:
1173 for (; x < lim; x += 8)
1174 {
1175 internal::prefetch(lane + x);
1176-#if defined(__GNUC__) && defined(__arm__)
1177+#if !defined(__aarch64__) && defined(__GNUC__) && defined(__arm__)
1178 __asm__ (
1179 "vld1.16 {d0-d1}, [%[in0]] /*q0 = v0*/ \n\t"
1180 "vld1.16 {d2-d3}, [%[in2]] /*q1 = v2*/ \n\t"
1181@@ -973,7 +973,7 @@ pyrUp8uHorizontalConvolution:
1182 for (; x < lim; x += 24)
1183 {
1184 internal::prefetch(lane + x);
1185-#if defined(__GNUC__) && defined(__arm__)
1186+#if !defined(__aarch64__) && defined(__GNUC__) && defined(__arm__)
1187 __asm__ (
1188 "vmov.u16 q9, #6 \n\t"
1189 "vld3.16 {d0, d2, d4}, [%[in0]] /*v0*/ \n\t"
1190@@ -1064,7 +1064,7 @@ pyrUp8uHorizontalConvolution:
1191 for (; x < lim; x += 8)
1192 {
1193 internal::prefetch(lane + x);
1194-#if defined(__GNUC__) && defined(__arm__)
1195+#if !defined(__aarch64__) && defined(__GNUC__) && defined(__arm__)
1196 __asm__ (
1197 "vld1.16 {d0-d1}, [%[in0]] /*q0 = v0*/ \n\t"
1198 "vld1.16 {d2-d3}, [%[in2]] /*q1 = v2*/ \n\t"
1199@@ -1210,7 +1210,7 @@ pyrUp16sHorizontalConvolution:
1200 for (; x < lim; x += 4)
1201 {
1202 internal::prefetch(lane + x);
1203-#if defined(__GNUC__) && defined(__arm__)
1204+#if !defined(__aarch64__) && defined(__GNUC__) && defined(__arm__)
1205 __asm__ (
1206 "vld1.32 {d0-d1}, [%[in0]] /*q0 = v0*/ \n\t"
1207 "vld1.32 {d2-d3}, [%[in2]] /*q1 = v2*/ \n\t"
1208@@ -1251,7 +1251,7 @@ pyrUp16sHorizontalConvolution:
1209 for (; x < lim; x += 12)
1210 {
1211 internal::prefetch(lane + x + 3);
1212-#if defined(__GNUC__) && defined(__arm__)
1213+#if !defined(__aarch64__) && defined(__GNUC__) && defined(__arm__)
1214 __asm__ (
1215 "vmov.s32 q9, #6 \n\t"
1216 "vld3.32 {d0, d2, d4}, [%[in0]] /*v0*/ \n\t"
1217@@ -1343,7 +1343,7 @@ pyrUp16sHorizontalConvolution:
1218 for (; x < lim; x += 4)
1219 {
1220 internal::prefetch(lane + x);
1221-#if defined(__GNUC__) && defined(__arm__)
1222+#if !defined(__aarch64__) && defined(__GNUC__) && defined(__arm__)
1223 __asm__ (
1224 "vld1.32 {d0-d1}, [%[in0]] /*q0 = v0*/ \n\t"
1225 "vld1.32 {d2-d3}, [%[in2]] /*q1 = v2*/ \n\t"
1226diff --git a/3rdparty/carotene/src/scharr.cpp b/3rdparty/carotene/src/scharr.cpp
1227index 5695804fe4..8d3b6328b1 100644
1228--- a/3rdparty/carotene/src/scharr.cpp
1229+++ b/3rdparty/carotene/src/scharr.cpp
1230@@ -109,7 +109,7 @@ void ScharrDeriv(const Size2D &size, s32 cn,
1231 internal::prefetch(srow0 + x);
1232 internal::prefetch(srow1 + x);
1233 internal::prefetch(srow2 + x);
1234-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1235+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1236 __asm__ (
1237 "vld1.8 {d0}, [%[src0]] \n\t"
1238 "vld1.8 {d2}, [%[src2]] \n\t"
1239@@ -161,7 +161,7 @@ void ScharrDeriv(const Size2D &size, s32 cn,
1240 x = 0;
1241 for( ; x < roiw8; x += 8 )
1242 {
1243-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
1244+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
1245 __asm__ (
1246 "vld1.16 {d4-d5}, [%[s2ptr]] \n\t"
1247 "vld1.16 {d8-d9}, [%[s4ptr]] \n\t"
1248--
12492.14.1
1250
diff --git a/meta-oe/recipes-support/opencv/opencv/0002-Do-not-enable-asm-with-clang.patch b/meta-oe/recipes-support/opencv/opencv/0002-Do-not-enable-asm-with-clang.patch
new file mode 100644
index 000000000..22e868a03
--- /dev/null
+++ b/meta-oe/recipes-support/opencv/opencv/0002-Do-not-enable-asm-with-clang.patch
@@ -0,0 +1,993 @@
1From 333f60165b6737588eb975a5e4393d847011a1cd Mon Sep 17 00:00:00 2001
2From: Khem Raj <raj.khem@gmail.com>
3Date: Tue, 19 Sep 2017 18:07:35 -0700
4Subject: [PATCH 2/2] Do not enable asm with clang
5
6clang pretends to be gcc 4.2.0 which means we will
7use inline asm for no reason, instead of builtins
8on clang when possible.
9
10Signed-off-by: Khem Raj <raj.khem@gmail.com>
11---
12Upstream-Status: Submitted
13 3rdparty/carotene/src/channel_extract.cpp | 4 +-
14 3rdparty/carotene/src/channels_combine.cpp | 2 +-
15 3rdparty/carotene/src/colorconvert.cpp | 78 +++++++++++++++---------------
16 3rdparty/carotene/src/convert.cpp | 54 ++++++++++-----------
17 3rdparty/carotene/src/convert_scale.cpp | 56 ++++++++++-----------
18 3rdparty/carotene/src/gaussian_blur.cpp | 2 +-
19 3rdparty/carotene/src/pyramid.cpp | 8 +--
20 3rdparty/carotene/src/scharr.cpp | 4 +-
21 8 files changed, 104 insertions(+), 104 deletions(-)
22
23diff --git a/3rdparty/carotene/src/channel_extract.cpp b/3rdparty/carotene/src/channel_extract.cpp
24index 8238a3ece8..ff4fb3770c 100644
25--- a/3rdparty/carotene/src/channel_extract.cpp
26+++ b/3rdparty/carotene/src/channel_extract.cpp
27@@ -231,7 +231,7 @@ void extract4(const Size2D &size,
28 srcStride == dst2Stride && \
29 srcStride == dst3Stride &&
30
31-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
32+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
33
34 #define SPLIT_ASM2(sgn, bits) __asm__ ( \
35 "vld2." #bits " {d0, d2}, [%[in0]] \n\t" \
36@@ -351,7 +351,7 @@ void extract4(const Size2D &size,
37 } \
38 }
39
40-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
41+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
42
43 #define ALPHA_QUAD(sgn, bits) { \
44 internal::prefetch(src + sj); \
45diff --git a/3rdparty/carotene/src/channels_combine.cpp b/3rdparty/carotene/src/channels_combine.cpp
46index fc98fb9181..5d9251d51c 100644
47--- a/3rdparty/carotene/src/channels_combine.cpp
48+++ b/3rdparty/carotene/src/channels_combine.cpp
49@@ -77,7 +77,7 @@ namespace CAROTENE_NS {
50 dstStride == src2Stride && \
51 dstStride == src3Stride &&
52
53-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
54+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
55
56 #define MERGE_ASM2(sgn, bits) __asm__ ( \
57 "vld1." #bits " {d0-d1}, [%[in0]] \n\t" \
58diff --git a/3rdparty/carotene/src/colorconvert.cpp b/3rdparty/carotene/src/colorconvert.cpp
59index 26ae54b15c..d3a40fe64e 100644
60--- a/3rdparty/carotene/src/colorconvert.cpp
61+++ b/3rdparty/carotene/src/colorconvert.cpp
62@@ -97,7 +97,7 @@ void rgb2gray(const Size2D &size, COLOR_SPACE color_space,
63 const u32 G2Y = color_space == COLOR_SPACE_BT601 ? G2Y_BT601 : G2Y_BT709;
64 const u32 B2Y = color_space == COLOR_SPACE_BT601 ? B2Y_BT601 : B2Y_BT709;
65
66-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
67+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
68 register int16x4_t v_r2y asm ("d31") = vmov_n_s16(R2Y);
69 register int16x4_t v_g2y asm ("d30") = vmov_n_s16(G2Y);
70 register int16x4_t v_b2y asm ("d29") = vmov_n_s16(B2Y);
71@@ -116,7 +116,7 @@ void rgb2gray(const Size2D &size, COLOR_SPACE color_space,
72 u8 * dst = internal::getRowPtr(dstBase, dstStride, i);
73 size_t sj = 0u, dj = 0u;
74
75-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
76+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
77 for (; dj < roiw8; sj += 24, dj += 8)
78 {
79 internal::prefetch(src + sj);
80@@ -198,7 +198,7 @@ void rgbx2gray(const Size2D &size, COLOR_SPACE color_space,
81 const u32 G2Y = color_space == COLOR_SPACE_BT601 ? G2Y_BT601 : G2Y_BT709;
82 const u32 B2Y = color_space == COLOR_SPACE_BT601 ? B2Y_BT601 : B2Y_BT709;
83
84-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
85+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
86 register int16x4_t v_r2y asm ("d31") = vmov_n_s16(R2Y);
87 register int16x4_t v_g2y asm ("d30") = vmov_n_s16(G2Y);
88 register int16x4_t v_b2y asm ("d29") = vmov_n_s16(B2Y);
89@@ -217,7 +217,7 @@ void rgbx2gray(const Size2D &size, COLOR_SPACE color_space,
90 u8 * dst = internal::getRowPtr(dstBase, dstStride, i);
91 size_t sj = 0u, dj = 0u;
92
93-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
94+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
95 for (; dj < roiw8; sj += 32, dj += 8)
96 {
97 internal::prefetch(src + sj);
98@@ -300,7 +300,7 @@ void bgr2gray(const Size2D &size, COLOR_SPACE color_space,
99 const u32 G2Y = color_space == COLOR_SPACE_BT601 ? G2Y_BT601 : G2Y_BT709;
100 const u32 B2Y = color_space == COLOR_SPACE_BT601 ? B2Y_BT601 : B2Y_BT709;
101
102-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
103+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
104 register int16x4_t v_r2y asm ("d31") = vmov_n_s16(R2Y);
105 register int16x4_t v_g2y asm ("d30") = vmov_n_s16(G2Y);
106 register int16x4_t v_b2y asm ("d29") = vmov_n_s16(B2Y);
107@@ -319,7 +319,7 @@ void bgr2gray(const Size2D &size, COLOR_SPACE color_space,
108 u8 * dst = internal::getRowPtr(dstBase, dstStride, i);
109 size_t sj = 0u, dj = 0u;
110
111-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
112+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
113 for (; dj < roiw8; sj += 24, dj += 8)
114 {
115 internal::prefetch(src + sj);
116@@ -402,7 +402,7 @@ void bgrx2gray(const Size2D &size, COLOR_SPACE color_space,
117 const u32 G2Y = color_space == COLOR_SPACE_BT601 ? G2Y_BT601 : G2Y_BT709;
118 const u32 B2Y = color_space == COLOR_SPACE_BT601 ? B2Y_BT601 : B2Y_BT709;
119
120-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
121+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
122 register int16x4_t v_r2y asm ("d31") = vmov_n_s16(R2Y);
123 register int16x4_t v_g2y asm ("d30") = vmov_n_s16(G2Y);
124 register int16x4_t v_b2y asm ("d29") = vmov_n_s16(B2Y);
125@@ -421,7 +421,7 @@ void bgrx2gray(const Size2D &size, COLOR_SPACE color_space,
126 u8 * dst = internal::getRowPtr(dstBase, dstStride, i);
127 size_t sj = 0u, dj = 0u;
128
129-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
130+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
131 for (; dj < roiw8; sj += 32, dj += 8)
132 {
133 internal::prefetch(src + sj);
134@@ -512,7 +512,7 @@ void gray2rgb(const Size2D &size,
135 for (; sj < roiw16; sj += 16, dj += 48)
136 {
137 internal::prefetch(src + sj);
138-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
139+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
140 __asm__ (
141 "vld1.8 {d0-d1}, [%[in0]] \n\t"
142 "vmov.8 q1, q0 \n\t"
143@@ -538,7 +538,7 @@ void gray2rgb(const Size2D &size,
144
145 if (sj < roiw8)
146 {
147-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
148+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
149 __asm__ (
150 "vld1.8 {d0}, [%[in]] \n\t"
151 "vmov.8 d1, d0 \n\t"
152@@ -584,7 +584,7 @@ void gray2rgbx(const Size2D &size,
153 size_t roiw16 = size.width >= 15 ? size.width - 15 : 0;
154 size_t roiw8 = size.width >= 7 ? size.width - 7 : 0;
155
156-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
157+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
158 register uint8x16_t vc255 asm ("q4") = vmovq_n_u8(255);
159 #else
160 uint8x16x4_t vRgba;
161@@ -602,7 +602,7 @@ void gray2rgbx(const Size2D &size,
162 for (; sj < roiw16; sj += 16, dj += 64)
163 {
164 internal::prefetch(src + sj);
165-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
166+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
167 __asm__ (
168 "vld1.8 {d0-d1}, [%[in0]] \n\t"
169 "vmov.8 q1, q0 \n\t"
170@@ -628,7 +628,7 @@ void gray2rgbx(const Size2D &size,
171
172 if (sj < roiw8)
173 {
174-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
175+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
176 __asm__ (
177 "vld1.8 {d5}, [%[in]] \n\t"
178 "vmov.8 d6, d5 \n\t"
179@@ -1409,7 +1409,7 @@ inline void convertToHSV(const s32 r, const s32 g, const s32 b,
180 "d24","d25","d26","d27","d28","d29","d30","d31" \
181 );
182
183-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
184+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
185
186 #define YCRCB_CONSTS \
187 register int16x4_t vcYR asm ("d31") = vmov_n_s16(4899); \
188@@ -1555,7 +1555,7 @@ inline uint8x8x3_t convertToYCrCb( const int16x8_t& vR, const int16x8_t& vG, con
189 #define COEFF_G ( 8663)
190 #define COEFF_B (-17705)
191
192-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
193+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
194 #define YUV420ALPHA3_CONST
195 #define YUV420ALPHA4_CONST register uint8x16_t c255 asm ("q13") = vmovq_n_u8(255);
196 #define YUV420ALPHA3_CONVERT
197@@ -1852,7 +1852,7 @@ void rgb2hsv(const Size2D &size,
198 #ifdef CAROTENE_NEON
199 size_t roiw8 = size.width >= 7 ? size.width - 7 : 0;
200 const s32 hsv_shift = 12;
201-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
202+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
203 register const f32 vsdiv_table = f32(255 << hsv_shift);
204 register f32 vhdiv_table = f32(hrange << hsv_shift);
205 register const s32 vhrange = hrange;
206@@ -1871,7 +1871,7 @@ void rgb2hsv(const Size2D &size,
207 for (; j < roiw8; sj += 24, dj += 24, j += 8)
208 {
209 internal::prefetch(src + sj);
210-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
211+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
212 CONVERT_TO_HSV_ASM(vld3.8 {d0-d2}, d0, d2)
213 #else
214 uint8x8x3_t vRgb = vld3_u8(src + sj);
215@@ -1904,7 +1904,7 @@ void rgbx2hsv(const Size2D &size,
216 #ifdef CAROTENE_NEON
217 size_t roiw8 = size.width >= 7 ? size.width - 7 : 0;
218 const s32 hsv_shift = 12;
219-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
220+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
221 register const f32 vsdiv_table = f32(255 << hsv_shift);
222 register f32 vhdiv_table = f32(hrange << hsv_shift);
223 register const s32 vhrange = hrange;
224@@ -1923,7 +1923,7 @@ void rgbx2hsv(const Size2D &size,
225 for (; j < roiw8; sj += 32, dj += 24, j += 8)
226 {
227 internal::prefetch(src + sj);
228-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
229+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
230 CONVERT_TO_HSV_ASM(vld4.8 {d0-d3}, d0, d2)
231 #else
232 uint8x8x4_t vRgb = vld4_u8(src + sj);
233@@ -1956,7 +1956,7 @@ void bgr2hsv(const Size2D &size,
234 #ifdef CAROTENE_NEON
235 size_t roiw8 = size.width >= 7 ? size.width - 7 : 0;
236 const s32 hsv_shift = 12;
237-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
238+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
239 register const f32 vsdiv_table = f32(255 << hsv_shift);
240 register f32 vhdiv_table = f32(hrange << hsv_shift);
241 register const s32 vhrange = hrange;
242@@ -1975,7 +1975,7 @@ void bgr2hsv(const Size2D &size,
243 for (; j < roiw8; sj += 24, dj += 24, j += 8)
244 {
245 internal::prefetch(src + sj);
246-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
247+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
248 CONVERT_TO_HSV_ASM(vld3.8 {d0-d2}, d2, d0)
249 #else
250 uint8x8x3_t vRgb = vld3_u8(src + sj);
251@@ -2008,7 +2008,7 @@ void bgrx2hsv(const Size2D &size,
252 #ifdef CAROTENE_NEON
253 size_t roiw8 = size.width >= 7 ? size.width - 7 : 0;
254 const s32 hsv_shift = 12;
255-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
256+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
257 register const f32 vsdiv_table = f32(255 << hsv_shift);
258 register f32 vhdiv_table = f32(hrange << hsv_shift);
259 register const s32 vhrange = hrange;
260@@ -2027,7 +2027,7 @@ void bgrx2hsv(const Size2D &size,
261 for (; j < roiw8; sj += 32, dj += 24, j += 8)
262 {
263 internal::prefetch(src + sj);
264-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
265+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
266 CONVERT_TO_HSV_ASM(vld4.8 {d0-d3}, d2, d0)
267 #else
268 uint8x8x4_t vRgb = vld4_u8(src + sj);
269@@ -2068,7 +2068,7 @@ void rgbx2bgr565(const Size2D &size,
270 for (; j < roiw16; sj += 64, dj += 32, j += 16)
271 {
272 internal::prefetch(src + sj);
273-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
274+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
275 __asm__ (
276 "vld4.8 {d2, d4, d6, d8}, [%[in0]] @ q0 q1 q2 q3 q4 \n\t"
277 "vld4.8 {d3, d5, d7, d9}, [%[in1]] @ xxxxxxxx rrrrRRRR ggggGGGG bbbbBBBB xxxxxxxx \n\t"
278@@ -2122,7 +2122,7 @@ void rgb2bgr565(const Size2D &size,
279 for (; j < roiw16; sj += 48, dj += 32, j += 16)
280 {
281 internal::prefetch(src + sj);
282-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
283+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
284 __asm__ (
285 "vld3.8 {d2, d4, d6}, [%[in0]] @ q0 q1 q2 q3 q4 \n\t"
286 "vld3.8 {d3, d5, d7}, [%[in1]] @ xxxxxxxx rrrrRRRR ggggGGGG bbbbBBBB xxxxxxxx \n\t"
287@@ -2176,7 +2176,7 @@ void rgbx2rgb565(const Size2D &size,
288 for (; j < roiw16; sj += 64, dj += 32, j += 16)
289 {
290 internal::prefetch(src + sj);
291-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
292+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
293 __asm__ (
294 "vld4.8 {d0, d2, d4, d6}, [%[in0]] @ q0 q1 q2 q3 \n\t"
295 "vld4.8 {d1, d3, d5, d7}, [%[in1]] @ rrrrRRRR ggggGGGG bbbbBBBB aaaaAAAA \n\t"
296@@ -2230,7 +2230,7 @@ void rgb2rgb565(const Size2D &size,
297 for (; j < roiw16; sj += 48, dj += 32, j += 16)
298 {
299 internal::prefetch(src + sj);
300-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
301+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
302 __asm__ (
303 "vld3.8 {d0, d2, d4}, [%[in0]] @ q0 q1 q2 q3 \n\t"
304 "vld3.8 {d1, d3, d5}, [%[in1]] @ rrrrRRRR ggggGGGG bbbbBBBB xxxxxxxx \n\t"
305@@ -2285,7 +2285,7 @@ void rgb2ycrcb(const Size2D &size,
306 for (; j < roiw8; sj += 24, dj += 24, j += 8)
307 {
308 internal::prefetch(src + sj);
309-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
310+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
311 CONVERTTOYCRCB(vld3.8 {d0-d2}, d0, d1, d2)
312 #else
313 uint8x8x3_t vRgb = vld3_u8(src + sj);
314@@ -2329,7 +2329,7 @@ void rgbx2ycrcb(const Size2D &size,
315 for (; j < roiw8; sj += 32, dj += 24, j += 8)
316 {
317 internal::prefetch(src + sj);
318-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
319+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
320 CONVERTTOYCRCB(vld4.8 {d0-d3}, d0, d1, d2)
321 #else
322 uint8x8x4_t vRgba = vld4_u8(src + sj);
323@@ -2373,7 +2373,7 @@ void bgr2ycrcb(const Size2D &size,
324 for (; j < roiw8; sj += 24, dj += 24, j += 8)
325 {
326 internal::prefetch(src + sj);
327-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
328+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
329 CONVERTTOYCRCB(vld3.8 {d0-d2}, d2, d1, d0)
330 #else
331 uint8x8x3_t vBgr = vld3_u8(src + sj);
332@@ -2417,7 +2417,7 @@ void bgrx2ycrcb(const Size2D &size,
333 for (; j < roiw8; sj += 32, dj += 24, j += 8)
334 {
335 internal::prefetch(src + sj);
336-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
337+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
338 CONVERTTOYCRCB(vld4.8 {d0-d3}, d2, d1, d0)
339 #else
340 uint8x8x4_t vBgra = vld4_u8(src + sj);
341@@ -2499,7 +2499,7 @@ void yuv420sp2rgb(const Size2D &size,
342 internal::prefetch(uv + j);
343 internal::prefetch(y1 + j);
344 internal::prefetch(y2 + j);
345-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
346+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
347 CONVERTYUV420TORGB(3, d1, d0, q5, q6)
348 #else
349 convertYUV420.ToRGB(y1 + j, y2 + j, uv + j, dst1 + dj, dst2 + dj);
350@@ -2545,7 +2545,7 @@ void yuv420sp2rgbx(const Size2D &size,
351 internal::prefetch(uv + j);
352 internal::prefetch(y1 + j);
353 internal::prefetch(y2 + j);
354-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
355+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
356 CONVERTYUV420TORGB(4, d1, d0, q5, q6)
357 #else
358 convertYUV420.ToRGB(y1 + j, y2 + j, uv + j, dst1 + dj, dst2 + dj);
359@@ -2591,7 +2591,7 @@ void yuv420i2rgb(const Size2D &size,
360 internal::prefetch(uv + j);
361 internal::prefetch(y1 + j);
362 internal::prefetch(y2 + j);
363-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
364+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
365 CONVERTYUV420TORGB(3, d0, d1, q5, q6)
366 #else
367 convertYUV420.ToRGB(y1 + j, y2 + j, uv + j, dst1 + dj, dst2 + dj);
368@@ -2637,7 +2637,7 @@ void yuv420i2rgbx(const Size2D &size,
369 internal::prefetch(uv + j);
370 internal::prefetch(y1 + j);
371 internal::prefetch(y2 + j);
372-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
373+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
374 CONVERTYUV420TORGB(4, d0, d1, q5, q6)
375 #else
376 convertYUV420.ToRGB(y1 + j, y2 + j, uv + j, dst1 + dj, dst2 + dj);
377@@ -2683,7 +2683,7 @@ void yuv420sp2bgr(const Size2D &size,
378 internal::prefetch(uv + j);
379 internal::prefetch(y1 + j);
380 internal::prefetch(y2 + j);
381-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
382+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
383 CONVERTYUV420TORGB(3, d1, d0, q6, q5)
384 #else
385 convertYUV420.ToRGB(y1 + j, y2 + j, uv + j, dst1 + dj, dst2 + dj);
386@@ -2729,7 +2729,7 @@ void yuv420sp2bgrx(const Size2D &size,
387 internal::prefetch(uv + j);
388 internal::prefetch(y1 + j);
389 internal::prefetch(y2 + j);
390-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
391+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
392 CONVERTYUV420TORGB(4, d1, d0, q6, q5)
393 #else
394 convertYUV420.ToRGB(y1 + j, y2 + j, uv + j, dst1 + dj, dst2 + dj);
395@@ -2775,7 +2775,7 @@ void yuv420i2bgr(const Size2D &size,
396 internal::prefetch(uv + j);
397 internal::prefetch(y1 + j);
398 internal::prefetch(y2 + j);
399-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
400+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
401 CONVERTYUV420TORGB(3, d0, d1, q6, q5)
402 #else
403 convertYUV420.ToRGB(y1 + j, y2 + j, uv + j, dst1 + dj, dst2 + dj);
404@@ -2821,7 +2821,7 @@ void yuv420i2bgrx(const Size2D &size,
405 internal::prefetch(uv + j);
406 internal::prefetch(y1 + j);
407 internal::prefetch(y2 + j);
408-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
409+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
410 CONVERTYUV420TORGB(4, d0, d1, q6, q5)
411 #else
412 convertYUV420.ToRGB(y1 + j, y2 + j, uv + j, dst1 + dj, dst2 + dj);
413diff --git a/3rdparty/carotene/src/convert.cpp b/3rdparty/carotene/src/convert.cpp
414index 64b6db78ab..f0c2d153f2 100644
415--- a/3rdparty/carotene/src/convert.cpp
416+++ b/3rdparty/carotene/src/convert.cpp
417@@ -101,7 +101,7 @@ CVT_FUNC(u8, s8, 16,
418 }
419 })
420
421-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
422+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
423 CVT_FUNC(u8, u16, 16,
424 register uint8x16_t zero0 asm ("q1") = vmovq_n_u8(0);,
425 {
426@@ -135,7 +135,7 @@ CVT_FUNC(u8, u16, 16,
427 })
428 #endif
429
430-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
431+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
432 CVT_FUNC(u8, s32, 16,
433 register uint8x16_t zero0 asm ("q1") = vmovq_n_u8(0);
434 register uint8x16_t zero1 asm ("q2") = vmovq_n_u8(0);
435@@ -173,7 +173,7 @@ CVT_FUNC(u8, s32, 16,
436 })
437 #endif
438
439-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
440+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6 && !defined(__clang__)
441 CVT_FUNC(u8, f32, 16,
442 ,
443 {
444@@ -248,7 +248,7 @@ CVT_FUNC(s8, u8, 16,
445 }
446 })
447
448-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
449+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
450 CVT_FUNC(s8, u16, 16,
451 register uint8x16_t zero0 asm ("q1") = vmovq_n_u8(0);,
452 {
453@@ -284,7 +284,7 @@ CVT_FUNC(s8, u16, 16,
454 })
455 #endif
456
457-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
458+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6 && !defined(__clang__)
459 CVT_FUNC(s8, s16, 16,
460 ,
461 {
462@@ -323,7 +323,7 @@ CVT_FUNC(s8, s16, 16,
463 })
464 #endif
465
466-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
467+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
468 CVT_FUNC(s8, s32, 16,
469 ,
470 {
471@@ -377,7 +377,7 @@ CVT_FUNC(s8, s32, 16,
472 })
473 #endif
474
475-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
476+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6 && !defined(__clang__)
477 CVT_FUNC(s8, f32, 16,
478 ,
479 {
480@@ -440,7 +440,7 @@ CVT_FUNC(s8, f32, 16,
481 })
482 #endif
483
484-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
485+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6 && !defined(__clang__)
486 CVT_FUNC(u16, u8, 16,
487 ,
488 {
489@@ -479,7 +479,7 @@ CVT_FUNC(u16, u8, 16,
490 })
491 #endif
492
493-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
494+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6 && !defined(__clang__)
495 CVT_FUNC(u16, s8, 16,
496 register uint8x16_t v127 asm ("q4") = vmovq_n_u8(127);,
497 {
498@@ -522,7 +522,7 @@ CVT_FUNC(u16, s8, 16,
499 })
500 #endif
501
502-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
503+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
504 CVT_FUNC(u16, s16, 8,
505 register uint16x8_t v32767 asm ("q4") = vmovq_n_u16(0x7FFF);,
506 {
507@@ -555,7 +555,7 @@ CVT_FUNC(u16, s16, 8,
508 })
509 #endif
510
511-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
512+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
513 CVT_FUNC(u16, s32, 8,
514 register uint16x8_t zero0 asm ("q1") = vmovq_n_u16(0);,
515 {
516@@ -589,7 +589,7 @@ CVT_FUNC(u16, s32, 8,
517 })
518 #endif
519
520-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
521+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6 && !defined(__clang__)
522 CVT_FUNC(u16, f32, 8,
523 ,
524 {
525@@ -633,7 +633,7 @@ CVT_FUNC(u16, f32, 8,
526 })
527 #endif
528
529-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
530+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6 && !defined(__clang__)
531 CVT_FUNC(s16, u8, 16,
532 ,
533 {
534@@ -672,7 +672,7 @@ CVT_FUNC(s16, u8, 16,
535 })
536 #endif
537
538-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
539+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6 && !defined(__clang__)
540 CVT_FUNC(s16, s8, 16,
541 ,
542 {
543@@ -711,7 +711,7 @@ CVT_FUNC(s16, s8, 16,
544 })
545 #endif
546
547-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
548+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
549 CVT_FUNC(s16, u16, 8,
550 register int16x8_t vZero asm ("q4") = vmovq_n_s16(0);,
551 {
552@@ -747,7 +747,7 @@ CVT_FUNC(s16, u16, 8,
553 })
554 #endif
555
556-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
557+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6 && !defined(__clang__)
558 CVT_FUNC(s16, s32, 8,
559 ,
560 {
561@@ -786,7 +786,7 @@ CVT_FUNC(s16, s32, 8,
562 })
563 #endif
564
565-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
566+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6 && !defined(__clang__)
567 CVT_FUNC(s16, f32, 8,
568 ,
569 {
570@@ -829,7 +829,7 @@ CVT_FUNC(s16, f32, 8,
571 })
572 #endif
573
574-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
575+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6 && !defined(__clang__)
576 CVT_FUNC(s32, u8, 8,
577 ,
578 {
579@@ -870,7 +870,7 @@ CVT_FUNC(s32, u8, 8,
580 })
581 #endif
582
583-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
584+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6 && !defined(__clang__)
585 CVT_FUNC(s32, s8, 8,
586 ,
587 {
588@@ -911,7 +911,7 @@ CVT_FUNC(s32, s8, 8,
589 })
590 #endif
591
592-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
593+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6 && !defined(__clang__)
594 CVT_FUNC(s32, u16, 8,
595 ,
596 {
597@@ -950,7 +950,7 @@ CVT_FUNC(s32, u16, 8,
598 })
599 #endif
600
601-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
602+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6 && !defined(__clang__)
603 CVT_FUNC(s32, s16, 8,
604 ,
605 {
606@@ -989,7 +989,7 @@ CVT_FUNC(s32, s16, 8,
607 })
608 #endif
609
610-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
611+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6 && !defined(__clang__)
612 CVT_FUNC(s32, f32, 8,
613 ,
614 {
615@@ -1034,7 +1034,7 @@ CVT_FUNC(s32, f32, 8,
616 })
617 #endif
618
619-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
620+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6 && !defined(__clang__)
621 CVT_FUNC(f32, u8, 8,
622 register float32x4_t vmult asm ("q0") = vdupq_n_f32((float)(1 << 16));
623 register uint32x4_t vmask asm ("q1") = vdupq_n_u32(1<<16);,
624@@ -1101,7 +1101,7 @@ CVT_FUNC(f32, u8, 8,
625 })
626 #endif
627
628-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
629+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6 && !defined(__clang__)
630 CVT_FUNC(f32, s8, 8,
631 register float32x4_t vhalf asm ("q0") = vdupq_n_f32(0.5f);,
632 {
633@@ -1153,7 +1153,7 @@ CVT_FUNC(f32, s8, 8,
634 })
635 #endif
636
637-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
638+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6 && !defined(__clang__)
639 CVT_FUNC(f32, u16, 8,
640 register float32x4_t vhalf asm ("q0") = vdupq_n_f32(0.5f);,
641 {
642@@ -1212,7 +1212,7 @@ CVT_FUNC(f32, u16, 8,
643 })
644 #endif
645
646-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
647+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6 && !defined(__clang__)
648 CVT_FUNC(f32, s16, 8,
649 register float32x4_t vhalf asm ("q0") = vdupq_n_f32(0.5f);,
650 {
651@@ -1271,7 +1271,7 @@ CVT_FUNC(f32, s16, 8,
652 })
653 #endif
654
655-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
656+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6 && !defined(__clang__)
657 CVT_FUNC(f32, s32, 8,
658 register float32x4_t vhalf asm ("q0") = vdupq_n_f32(0.5f);,
659 {
660diff --git a/3rdparty/carotene/src/convert_scale.cpp b/3rdparty/carotene/src/convert_scale.cpp
661index ae41a985c8..d599d24c1e 100644
662--- a/3rdparty/carotene/src/convert_scale.cpp
663+++ b/3rdparty/carotene/src/convert_scale.cpp
664@@ -473,7 +473,7 @@ CVTS_FUNC(u8, s16, 16,
665 })
666 #endif
667
668-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
669+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
670 CVTS_FUNC(u8, s32, 16,
671 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
672 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
673@@ -562,7 +562,7 @@ CVTS_FUNC(u8, s32, 16,
674 })
675 #endif
676
677-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
678+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
679 CVTS_FUNC(u8, f32, 16,
680 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
681 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta);,
682@@ -985,7 +985,7 @@ CVTS_FUNC(s8, s16, 16,
683 })
684 #endif
685
686-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
687+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
688 CVTS_FUNC(s8, s32, 16,
689 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
690 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
691@@ -1074,7 +1074,7 @@ CVTS_FUNC(s8, s32, 16,
692 })
693 #endif
694
695-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
696+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
697 CVTS_FUNC(s8, f32, 16,
698 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
699 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta);,
700@@ -1155,7 +1155,7 @@ CVTS_FUNC(s8, f32, 16,
701 })
702 #endif
703
704-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
705+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
706 CVTS_FUNC(u16, u8, 16,
707 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
708 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
709@@ -1214,7 +1214,7 @@ CVTS_FUNC(u16, u8, 16,
710 })
711 #endif
712
713-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
714+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
715 CVTS_FUNC(u16, s8, 16,
716 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
717 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
718@@ -1273,7 +1273,7 @@ CVTS_FUNC(u16, s8, 16,
719 })
720 #endif
721
722-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
723+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
724 CVTS_FUNC1(u16, 16,
725 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
726 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
727@@ -1330,7 +1330,7 @@ CVTS_FUNC1(u16, 16,
728 })
729 #endif
730
731-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
732+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
733 CVTS_FUNC(u16, s16, 8,
734 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
735 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
736@@ -1387,7 +1387,7 @@ CVTS_FUNC(u16, s16, 8,
737 })
738 #endif
739
740-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
741+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
742 CVTS_FUNC(u16, s32, 8,
743 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
744 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
745@@ -1443,7 +1443,7 @@ CVTS_FUNC(u16, s32, 8,
746 })
747 #endif
748
749-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
750+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
751 CVTS_FUNC(u16, f32, 8,
752 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
753 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta);,
754@@ -1495,7 +1495,7 @@ CVTS_FUNC(u16, f32, 8,
755 })
756 #endif
757
758-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
759+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
760 CVTS_FUNC(s16, u8, 16,
761 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
762 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
763@@ -1554,7 +1554,7 @@ CVTS_FUNC(s16, u8, 16,
764 })
765 #endif
766
767-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
768+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
769 CVTS_FUNC(s16, s8, 16,
770 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
771 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
772@@ -1613,7 +1613,7 @@ CVTS_FUNC(s16, s8, 16,
773 })
774 #endif
775
776-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
777+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
778 CVTS_FUNC(s16, u16, 8,
779 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
780 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
781@@ -1670,7 +1670,7 @@ CVTS_FUNC(s16, u16, 8,
782 })
783 #endif
784
785-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
786+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
787 CVTS_FUNC1(s16, 16,
788 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
789 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
790@@ -1727,7 +1727,7 @@ CVTS_FUNC1(s16, 16,
791 })
792 #endif
793
794-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
795+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
796 CVTS_FUNC(s16, s32, 8,
797 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
798 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
799@@ -1783,7 +1783,7 @@ CVTS_FUNC(s16, s32, 8,
800 })
801 #endif
802
803-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
804+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
805 CVTS_FUNC(s16, f32, 8,
806 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
807 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta);,
808@@ -1835,7 +1835,7 @@ CVTS_FUNC(s16, f32, 8,
809 })
810 #endif
811
812-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
813+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
814 CVTS_FUNC(s32, u8, 8,
815 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
816 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
817@@ -1893,7 +1893,7 @@ CVTS_FUNC(s32, u8, 8,
818 })
819 #endif
820
821-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
822+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
823 CVTS_FUNC(s32, s8, 8,
824 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
825 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
826@@ -1951,7 +1951,7 @@ CVTS_FUNC(s32, s8, 8,
827 })
828 #endif
829
830-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
831+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
832 CVTS_FUNC(s32, u16, 8,
833 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
834 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
835@@ -2007,7 +2007,7 @@ CVTS_FUNC(s32, u16, 8,
836 })
837 #endif
838
839-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
840+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
841 CVTS_FUNC(s32, s16, 8,
842 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
843 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
844@@ -2063,7 +2063,7 @@ CVTS_FUNC(s32, s16, 8,
845 })
846 #endif
847
848-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
849+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
850 CVTS_FUNC1(s32, 8,
851 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
852 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
853@@ -2118,7 +2118,7 @@ CVTS_FUNC1(s32, 8,
854 })
855 #endif
856
857-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
858+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
859 CVTS_FUNC(s32, f32, 8,
860 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
861 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta);,
862@@ -2169,7 +2169,7 @@ CVTS_FUNC(s32, f32, 8,
863 })
864 #endif
865
866-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
867+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
868 CVTS_FUNC(f32, u8, 8,
869 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)((1 << 16)*alpha));
870 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)((1 << 16)*beta));
871@@ -2239,7 +2239,7 @@ CVTS_FUNC(f32, u8, 8,
872 })
873 #endif
874
875-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
876+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
877 CVTS_FUNC(f32, s8, 8,
878 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
879 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
880@@ -2293,7 +2293,7 @@ CVTS_FUNC(f32, s8, 8,
881 })
882 #endif
883
884-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
885+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
886 CVTS_FUNC(f32, u16, 8,
887 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
888 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
889@@ -2345,7 +2345,7 @@ CVTS_FUNC(f32, u16, 8,
890 })
891 #endif
892
893-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
894+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
895 CVTS_FUNC(f32, s16, 8,
896 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
897 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
898@@ -2397,7 +2397,7 @@ CVTS_FUNC(f32, s16, 8,
899 })
900 #endif
901
902-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
903+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
904 CVTS_FUNC(f32, s32, 8,
905 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
906 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
907@@ -2448,7 +2448,7 @@ CVTS_FUNC(f32, s32, 8,
908 })
909 #endif
910
911-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
912+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
913 CVTS_FUNC1(f32, 8,
914 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
915 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta);,
916diff --git a/3rdparty/carotene/src/gaussian_blur.cpp b/3rdparty/carotene/src/gaussian_blur.cpp
917index f7b5f18d79..e5aa8fc75b 100644
918--- a/3rdparty/carotene/src/gaussian_blur.cpp
919+++ b/3rdparty/carotene/src/gaussian_blur.cpp
920@@ -327,7 +327,7 @@ void gaussianBlur5x5(const Size2D &size, s32 cn,
921 u16* lidx1 = lane + x - 1*2;
922 u16* lidx3 = lane + x + 1*2;
923 u16* lidx4 = lane + x + 2*2;
924-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
925+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
926 __asm__ __volatile__ (
927 "vld2.16 {d0, d2}, [%[in0]]! \n\t"
928 "vld2.16 {d1, d3}, [%[in0]] \n\t"
929diff --git a/3rdparty/carotene/src/pyramid.cpp b/3rdparty/carotene/src/pyramid.cpp
930index 232ccf3efd..d4e32ea50f 100644
931--- a/3rdparty/carotene/src/pyramid.cpp
932+++ b/3rdparty/carotene/src/pyramid.cpp
933@@ -331,7 +331,7 @@ void gaussianPyramidDown(const Size2D &srcSize,
934 for (; x < roiw8; x += 8)
935 {
936 internal::prefetch(lane + 2 * x);
937-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
938+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
939 __asm__ (
940 "vld2.16 {d0-d3}, [%[in0]] \n\t"
941 "vld2.16 {d4-d7}, [%[in4]] \n\t"
942@@ -538,7 +538,7 @@ void gaussianPyramidDown(const Size2D &srcSize,
943 for (; x < roiw4; x += 4)
944 {
945 internal::prefetch(lane + 2 * x);
946-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
947+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
948 __asm__ (
949 "vld2.32 {d0-d3}, [%[in0]] \n\t"
950 "vld2.32 {d4-d7}, [%[in4]] \n\t"
951@@ -672,7 +672,7 @@ void gaussianPyramidDown(const Size2D &srcSize,
952 std::vector<f32> _buf(cn*(srcSize.width + 4) + 32/sizeof(f32));
953 f32* lane = internal::alignPtr(&_buf[2*cn], 32);
954
955-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
956+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
957 register float32x4_t vc6d4f32 asm ("q11") = vmovq_n_f32(1.5f); // 6/4
958 register float32x4_t vc1d4f32 asm ("q12") = vmovq_n_f32(0.25f); // 1/4
959
960@@ -739,7 +739,7 @@ void gaussianPyramidDown(const Size2D &srcSize,
961 for (; x < roiw4; x += 4)
962 {
963 internal::prefetch(lane + 2 * x);
964-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
965+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
966 __asm__ __volatile__ (
967 "vld2.32 {d0-d3}, [%[in0]] \n\t"
968 "vld2.32 {d8-d11}, [%[in4]] \n\t"
969diff --git a/3rdparty/carotene/src/scharr.cpp b/3rdparty/carotene/src/scharr.cpp
970index 8d3b6328b1..36f6b2276e 100644
971--- a/3rdparty/carotene/src/scharr.cpp
972+++ b/3rdparty/carotene/src/scharr.cpp
973@@ -109,7 +109,7 @@ void ScharrDeriv(const Size2D &size, s32 cn,
974 internal::prefetch(srow0 + x);
975 internal::prefetch(srow1 + x);
976 internal::prefetch(srow2 + x);
977-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
978+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__)
979 __asm__ (
980 "vld1.8 {d0}, [%[src0]] \n\t"
981 "vld1.8 {d2}, [%[src2]] \n\t"
982@@ -161,7 +161,7 @@ void ScharrDeriv(const Size2D &size, s32 cn,
983 x = 0;
984 for( ; x < roiw8; x += 8 )
985 {
986-#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
987+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6 && !defined(__clang__)
988 __asm__ (
989 "vld1.16 {d4-d5}, [%[s2ptr]] \n\t"
990 "vld1.16 {d8-d9}, [%[s4ptr]] \n\t"
991--
9922.14.1
993