summaryrefslogtreecommitdiffstats
path: root/meta/recipes-connectivity/openssl/openssl/CVE-2023-6129.patch
blob: c2cbedd1b701add984c1700ad8ca582b6256b826 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
From 5b139f95c9a47a55a0c54100f3837b1eee942b04 Mon Sep 17 00:00:00 2001
From: Rohan McLure <rmclure@linux.ibm.com>
Date: Thu, 4 Jan 2024 10:25:50 +0100
Subject: [PATCH] poly1305-ppc.pl: Fix vector register clobbering

Fixes CVE-2023-6129

The POLY1305 MAC (message authentication code) implementation in OpenSSL for
PowerPC CPUs saves the the contents of vector registers in different order
than they are restored. Thus the contents of some of these vector registers
is corrupted when returning to the caller. The vulnerable code is used only
on newer PowerPC processors supporting the PowerISA 2.07 instructions.

Reviewed-by: Matt Caswell <matt@openssl.org>
Reviewed-by: Richard Levitte <levitte@openssl.org>
Reviewed-by: Tomas Mraz <tomas@openssl.org>
(Merged from https://github.com/openssl/openssl/pull/23200)

(cherry picked from commit 8d847a3ffd4f0b17ee33962cf69c36224925b34f)

CVE: CVE-2023-6129
Upstream-Status: Backport
Signed-off-by: Ross Burton <ross.burton@arm.com>
---
 crypto/poly1305/asm/poly1305-ppc.pl | 42 ++++++++++++++---------------
 1 file changed, 21 insertions(+), 21 deletions(-)

diff --git a/crypto/poly1305/asm/poly1305-ppc.pl b/crypto/poly1305/asm/poly1305-ppc.pl
index 9f86134d923fb..2e601bb9c24be 100755
--- a/crypto/poly1305/asm/poly1305-ppc.pl
+++ b/crypto/poly1305/asm/poly1305-ppc.pl
@@ -744,7 +744,7 @@
 my $LOCALS= 6*$SIZE_T;
 my $VSXFRAME = $LOCALS + 6*$SIZE_T;
    $VSXFRAME += 128;	# local variables
-   $VSXFRAME += 13*16;	# v20-v31 offload
+   $VSXFRAME += 12*16;	# v20-v31 offload
 
 my $BIG_ENDIAN = ($flavour !~ /le/) ? 4 : 0;
 
@@ -919,12 +919,12 @@
 	addi	r11,r11,32
 	stvx	v22,r10,$sp
 	addi	r10,r10,32
-	stvx	v23,r10,$sp
-	addi	r10,r10,32
-	stvx	v24,r11,$sp
+	stvx	v23,r11,$sp
 	addi	r11,r11,32
-	stvx	v25,r10,$sp
+	stvx	v24,r10,$sp
 	addi	r10,r10,32
+	stvx	v25,r11,$sp
+	addi	r11,r11,32
 	stvx	v26,r10,$sp
 	addi	r10,r10,32
 	stvx	v27,r11,$sp
@@ -1153,12 +1153,12 @@
 	addi	r11,r11,32
 	stvx	v22,r10,$sp
 	addi	r10,r10,32
-	stvx	v23,r10,$sp
-	addi	r10,r10,32
-	stvx	v24,r11,$sp
+	stvx	v23,r11,$sp
 	addi	r11,r11,32
-	stvx	v25,r10,$sp
+	stvx	v24,r10,$sp
 	addi	r10,r10,32
+	stvx	v25,r11,$sp
+	addi	r11,r11,32
 	stvx	v26,r10,$sp
 	addi	r10,r10,32
 	stvx	v27,r11,$sp
@@ -1899,26 +1899,26 @@
 	mtspr	256,r12				# restore vrsave
 	lvx	v20,r10,$sp
 	addi	r10,r10,32
-	lvx	v21,r10,$sp
-	addi	r10,r10,32
-	lvx	v22,r11,$sp
+	lvx	v21,r11,$sp
 	addi	r11,r11,32
-	lvx	v23,r10,$sp
+	lvx	v22,r10,$sp
 	addi	r10,r10,32
-	lvx	v24,r11,$sp
+	lvx	v23,r11,$sp
 	addi	r11,r11,32
-	lvx	v25,r10,$sp
+	lvx	v24,r10,$sp
 	addi	r10,r10,32
-	lvx	v26,r11,$sp
+	lvx	v25,r11,$sp
 	addi	r11,r11,32
-	lvx	v27,r10,$sp
+	lvx	v26,r10,$sp
 	addi	r10,r10,32
-	lvx	v28,r11,$sp
+	lvx	v27,r11,$sp
 	addi	r11,r11,32
-	lvx	v29,r10,$sp
+	lvx	v28,r10,$sp
 	addi	r10,r10,32
-	lvx	v30,r11,$sp
-	lvx	v31,r10,$sp
+	lvx	v29,r11,$sp
+	addi	r11,r11,32
+	lvx	v30,r10,$sp
+	lvx	v31,r11,$sp
 	$POP	r27,`$VSXFRAME-$SIZE_T*5`($sp)
 	$POP	r28,`$VSXFRAME-$SIZE_T*4`($sp)
 	$POP	r29,`$VSXFRAME-$SIZE_T*3`($sp)