summaryrefslogtreecommitdiffstats
path: root/meta/recipes-core/glibc/glibc/CVE-2020-6096.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta/recipes-core/glibc/glibc/CVE-2020-6096.patch')
-rw-r--r--meta/recipes-core/glibc/glibc/CVE-2020-6096.patch112
1 files changed, 112 insertions, 0 deletions
diff --git a/meta/recipes-core/glibc/glibc/CVE-2020-6096.patch b/meta/recipes-core/glibc/glibc/CVE-2020-6096.patch
new file mode 100644
index 0000000000..9c26f76432
--- /dev/null
+++ b/meta/recipes-core/glibc/glibc/CVE-2020-6096.patch
@@ -0,0 +1,112 @@
1From beea361050728138b82c57dda0c4810402d342b9 Mon Sep 17 00:00:00 2001
2From: Alexander Anisimov <a.anisimov@omprussia.ru>
3Date: Wed, 8 Jul 2020 14:18:31 +0200
4Subject: [PATCH] arm: CVE-2020-6096: Fix multiarch memcpy for negative length
5 [BZ #25620]
6
7Unsigned branch instructions could be used for r2 to fix the wrong
8behavior when a negative length is passed to memcpy.
9This commit fixes the armv7 version.
10
11Upstream-Status: Backport
12CVE: CVE-2020-6096 patch #1
13Signed-off-by: Armin Kuster <akuster@mvista.com>
14
15---
16 sysdeps/arm/armv7/multiarch/memcpy_impl.S | 22 +++++++++++-----------
17 1 file changed, 11 insertions(+), 11 deletions(-)
18
19diff --git a/sysdeps/arm/armv7/multiarch/memcpy_impl.S b/sysdeps/arm/armv7/multiarch/memcpy_impl.S
20index bf4ac7077f..379bb56fc9 100644
21--- a/sysdeps/arm/armv7/multiarch/memcpy_impl.S
22+++ b/sysdeps/arm/armv7/multiarch/memcpy_impl.S
23@@ -268,7 +268,7 @@ ENTRY(memcpy)
24
25 mov dst, dstin /* Preserve dstin, we need to return it. */
26 cmp count, #64
27- bge .Lcpy_not_short
28+ bhs .Lcpy_not_short
29 /* Deal with small copies quickly by dropping straight into the
30 exit block. */
31
32@@ -351,10 +351,10 @@ ENTRY(memcpy)
33
34 1:
35 subs tmp2, count, #64 /* Use tmp2 for count. */
36- blt .Ltail63aligned
37+ blo .Ltail63aligned
38
39 cmp tmp2, #512
40- bge .Lcpy_body_long
41+ bhs .Lcpy_body_long
42
43 .Lcpy_body_medium: /* Count in tmp2. */
44 #ifdef USE_VFP
45@@ -378,7 +378,7 @@ ENTRY(memcpy)
46 add src, src, #64
47 vstr d1, [dst, #56]
48 add dst, dst, #64
49- bge 1b
50+ bhs 1b
51 tst tmp2, #0x3f
52 beq .Ldone
53
54@@ -412,7 +412,7 @@ ENTRY(memcpy)
55 ldrd A_l, A_h, [src, #64]!
56 strd A_l, A_h, [dst, #64]!
57 subs tmp2, tmp2, #64
58- bge 1b
59+ bhs 1b
60 tst tmp2, #0x3f
61 bne 1f
62 ldr tmp2,[sp], #FRAME_SIZE
63@@ -482,7 +482,7 @@ ENTRY(memcpy)
64 add src, src, #32
65
66 subs tmp2, tmp2, #prefetch_lines * 64 * 2
67- blt 2f
68+ blo 2f
69 1:
70 cpy_line_vfp d3, 0
71 cpy_line_vfp d4, 64
72@@ -494,7 +494,7 @@ ENTRY(memcpy)
73 add dst, dst, #2 * 64
74 add src, src, #2 * 64
75 subs tmp2, tmp2, #prefetch_lines * 64
76- bge 1b
77+ bhs 1b
78
79 2:
80 cpy_tail_vfp d3, 0
81@@ -615,8 +615,8 @@ ENTRY(memcpy)
82 1:
83 pld [src, #(3 * 64)]
84 subs count, count, #64
85- ldrmi tmp2, [sp], #FRAME_SIZE
86- bmi .Ltail63unaligned
87+ ldrlo tmp2, [sp], #FRAME_SIZE
88+ blo .Ltail63unaligned
89 pld [src, #(4 * 64)]
90
91 #ifdef USE_NEON
92@@ -633,7 +633,7 @@ ENTRY(memcpy)
93 neon_load_multi d0-d3, src
94 neon_load_multi d4-d7, src
95 subs count, count, #64
96- bmi 2f
97+ blo 2f
98 1:
99 pld [src, #(4 * 64)]
100 neon_store_multi d0-d3, dst
101@@ -641,7 +641,7 @@ ENTRY(memcpy)
102 neon_store_multi d4-d7, dst
103 neon_load_multi d4-d7, src
104 subs count, count, #64
105- bpl 1b
106+ bhs 1b
107 2:
108 neon_store_multi d0-d3, dst
109 neon_store_multi d4-d7, dst
110--
1112.17.1
112