diff options
author | Deepthi Hemraj <Deepthi.Hemraj@windriver.com> | 2024-11-03 21:47:13 -0800 |
---|---|---|
committer | Steve Sakoman <steve@sakoman.com> | 2024-11-09 05:53:57 -0800 |
commit | 43955da4e1d56180ddb064a4de54c6dec35b809b (patch) | |
tree | 2479568938fa21bd452f5e018dc68e91c261faa1 | |
parent | 35e6556f78078da32510974decf3d24070f6836a (diff) | |
download | poky-43955da4e1d56180ddb064a4de54c6dec35b809b.tar.gz |
rust-llvm: Fix CVE-2024-0151
(From OE-Core rev: 175e22f2df542b0e1eb638c43c11eeefe794b0b7)
Signed-off-by: Deepthi Hemraj <Deepthi.Hemraj@windriver.com>
Signed-off-by: Steve Sakoman <steve@sakoman.com>
-rw-r--r-- | meta/recipes-devtools/rust/rust-llvm/0004-llvm-Fix-CVE-2024-0151.patch | 1086 | ||||
-rw-r--r-- | meta/recipes-devtools/rust/rust-llvm_1.75.0.bb | 3 |
2 files changed, 1088 insertions, 1 deletions
diff --git a/meta/recipes-devtools/rust/rust-llvm/0004-llvm-Fix-CVE-2024-0151.patch b/meta/recipes-devtools/rust/rust-llvm/0004-llvm-Fix-CVE-2024-0151.patch new file mode 100644 index 0000000000..c05685e64d --- /dev/null +++ b/meta/recipes-devtools/rust/rust-llvm/0004-llvm-Fix-CVE-2024-0151.patch | |||
@@ -0,0 +1,1086 @@ | |||
1 | commit 78ff617d3f573fb3a9b2fef180fa0fd43d5584ea | ||
2 | Author: Lucas Duarte Prates <lucas.prates@arm.com> | ||
3 | Date: Thu Jun 20 10:22:01 2024 +0100 | ||
4 | |||
5 | [ARM] CMSE security mitigation on function arguments and returned values (#89944) | ||
6 | |||
7 | The ABI mandates two things related to function calls: | ||
8 | - Function arguments must be sign- or zero-extended to the register | ||
9 | size by the caller. | ||
10 | - Return values must be sign- or zero-extended to the register size by | ||
11 | the callee. | ||
12 | |||
13 | As consequence, callees can assume that function arguments have been | ||
14 | extended and so can callers with regards to return values. | ||
15 | |||
16 | Here lies the problem: Nonsecure code might deliberately ignore this | ||
17 | mandate with the intent of attempting an exploit. It might try to pass | ||
18 | values that lie outside the expected type's value range in order to | ||
19 | trigger undefined behaviour, e.g. out of bounds access. | ||
20 | |||
21 | With the mitigation implemented, Secure code always performs extension | ||
22 | of values passed by Nonsecure code. | ||
23 | |||
24 | This addresses the vulnerability described in CVE-2024-0151. | ||
25 | |||
26 | Patches by Victor Campos. | ||
27 | |||
28 | --------- | ||
29 | |||
30 | Co-authored-by: Victor Campos <victor.campos@arm.com> | ||
31 | |||
32 | Upstream-Status: Backport [https://github.com/llvm/llvm-project/commit/78ff617d3f573fb3a9b2fef180fa0fd43d5584ea] | ||
33 | CVE: CVE-2024-0151 | ||
34 | Signed-off-by: Deepthi Hemraj <Deepthi.Hemraj@windriver.com> | ||
35 | --- | ||
36 | diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp | ||
37 | index bfe137b95602..5490c3c9df6c 100644 | ||
38 | --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp | ||
39 | +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp | ||
40 | @@ -156,6 +156,17 @@ static const MCPhysReg GPRArgRegs[] = { | ||
41 | ARM::R0, ARM::R1, ARM::R2, ARM::R3 | ||
42 | }; | ||
43 | |||
44 | +static SDValue handleCMSEValue(const SDValue &Value, const ISD::InputArg &Arg, | ||
45 | + SelectionDAG &DAG, const SDLoc &DL) { | ||
46 | + assert(Arg.ArgVT.isScalarInteger()); | ||
47 | + assert(Arg.ArgVT.bitsLT(MVT::i32)); | ||
48 | + SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, Arg.ArgVT, Value); | ||
49 | + SDValue Ext = | ||
50 | + DAG.getNode(Arg.Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, DL, | ||
51 | + MVT::i32, Trunc); | ||
52 | + return Ext; | ||
53 | +} | ||
54 | + | ||
55 | void ARMTargetLowering::addTypeForNEON(MVT VT, MVT PromotedLdStVT) { | ||
56 | if (VT != PromotedLdStVT) { | ||
57 | setOperationAction(ISD::LOAD, VT, Promote); | ||
58 | @@ -2196,7 +2207,7 @@ SDValue ARMTargetLowering::LowerCallResult( | ||
59 | SDValue Chain, SDValue InGlue, CallingConv::ID CallConv, bool isVarArg, | ||
60 | const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, | ||
61 | SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool isThisReturn, | ||
62 | - SDValue ThisVal) const { | ||
63 | + SDValue ThisVal, bool isCmseNSCall) const { | ||
64 | // Assign locations to each value returned by this call. | ||
65 | SmallVector<CCValAssign, 16> RVLocs; | ||
66 | CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, | ||
67 | @@ -2274,6 +2285,15 @@ SDValue ARMTargetLowering::LowerCallResult( | ||
68 | (VA.getValVT() == MVT::f16 || VA.getValVT() == MVT::bf16)) | ||
69 | Val = MoveToHPR(dl, DAG, VA.getLocVT(), VA.getValVT(), Val); | ||
70 | |||
71 | + // On CMSE Non-secure Calls, call results (returned values) whose bitwidth | ||
72 | + // is less than 32 bits must be sign- or zero-extended after the call for | ||
73 | + // security reasons. Although the ABI mandates an extension done by the | ||
74 | + // callee, the latter cannot be trusted to follow the rules of the ABI. | ||
75 | + const ISD::InputArg &Arg = Ins[VA.getValNo()]; | ||
76 | + if (isCmseNSCall && Arg.ArgVT.isScalarInteger() && | ||
77 | + VA.getLocVT().isScalarInteger() && Arg.ArgVT.bitsLT(MVT::i32)) | ||
78 | + Val = handleCMSEValue(Val, Arg, DAG, dl); | ||
79 | + | ||
80 | InVals.push_back(Val); | ||
81 | } | ||
82 | |||
83 | @@ -2888,7 +2908,7 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, | ||
84 | // return. | ||
85 | return LowerCallResult(Chain, InGlue, CallConv, isVarArg, Ins, dl, DAG, | ||
86 | InVals, isThisReturn, | ||
87 | - isThisReturn ? OutVals[0] : SDValue()); | ||
88 | + isThisReturn ? OutVals[0] : SDValue(), isCmseNSCall); | ||
89 | } | ||
90 | |||
91 | /// HandleByVal - Every parameter *after* a byval parameter is passed | ||
92 | @@ -4485,8 +4505,6 @@ SDValue ARMTargetLowering::LowerFormalArguments( | ||
93 | *DAG.getContext()); | ||
94 | CCInfo.AnalyzeFormalArguments(Ins, CCAssignFnForCall(CallConv, isVarArg)); | ||
95 | |||
96 | - SmallVector<SDValue, 16> ArgValues; | ||
97 | - SDValue ArgValue; | ||
98 | Function::const_arg_iterator CurOrigArg = MF.getFunction().arg_begin(); | ||
99 | unsigned CurArgIdx = 0; | ||
100 | |||
101 | @@ -4541,6 +4559,7 @@ SDValue ARMTargetLowering::LowerFormalArguments( | ||
102 | // Arguments stored in registers. | ||
103 | if (VA.isRegLoc()) { | ||
104 | EVT RegVT = VA.getLocVT(); | ||
105 | + SDValue ArgValue; | ||
106 | |||
107 | if (VA.needsCustom() && VA.getLocVT() == MVT::v2f64) { | ||
108 | // f64 and vector types are split up into multiple registers or | ||
109 | @@ -4604,16 +4623,6 @@ SDValue ARMTargetLowering::LowerFormalArguments( | ||
110 | case CCValAssign::BCvt: | ||
111 | ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue); | ||
112 | break; | ||
113 | - case CCValAssign::SExt: | ||
114 | - ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue, | ||
115 | - DAG.getValueType(VA.getValVT())); | ||
116 | - ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); | ||
117 | - break; | ||
118 | - case CCValAssign::ZExt: | ||
119 | - ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue, | ||
120 | - DAG.getValueType(VA.getValVT())); | ||
121 | - ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); | ||
122 | - break; | ||
123 | } | ||
124 | |||
125 | // f16 arguments have their size extended to 4 bytes and passed as if they | ||
126 | @@ -4623,6 +4632,15 @@ SDValue ARMTargetLowering::LowerFormalArguments( | ||
127 | (VA.getValVT() == MVT::f16 || VA.getValVT() == MVT::bf16)) | ||
128 | ArgValue = MoveToHPR(dl, DAG, VA.getLocVT(), VA.getValVT(), ArgValue); | ||
129 | |||
130 | + // On CMSE Entry Functions, formal integer arguments whose bitwidth is | ||
131 | + // less than 32 bits must be sign- or zero-extended in the callee for | ||
132 | + // security reasons. Although the ABI mandates an extension done by the | ||
133 | + // caller, the latter cannot be trusted to follow the rules of the ABI. | ||
134 | + const ISD::InputArg &Arg = Ins[VA.getValNo()]; | ||
135 | + if (AFI->isCmseNSEntryFunction() && Arg.ArgVT.isScalarInteger() && | ||
136 | + RegVT.isScalarInteger() && Arg.ArgVT.bitsLT(MVT::i32)) | ||
137 | + ArgValue = handleCMSEValue(ArgValue, Arg, DAG, dl); | ||
138 | + | ||
139 | InVals.push_back(ArgValue); | ||
140 | } else { // VA.isRegLoc() | ||
141 | // Only arguments passed on the stack should make it here. | ||
142 | diff --git a/llvm/lib/Target/ARM/ARMISelLowering.h b/llvm/lib/Target/ARM/ARMISelLowering.h | ||
143 | index 62a52bdb03f7..a255e9b6fc36 100644 | ||
144 | --- a/llvm/lib/Target/ARM/ARMISelLowering.h | ||
145 | +++ b/llvm/lib/Target/ARM/ARMISelLowering.h | ||
146 | @@ -891,7 +891,7 @@ class VectorType; | ||
147 | const SmallVectorImpl<ISD::InputArg> &Ins, | ||
148 | const SDLoc &dl, SelectionDAG &DAG, | ||
149 | SmallVectorImpl<SDValue> &InVals, bool isThisReturn, | ||
150 | - SDValue ThisVal) const; | ||
151 | + SDValue ThisVal, bool isCmseNSCall) const; | ||
152 | |||
153 | bool supportSplitCSR(MachineFunction *MF) const override { | ||
154 | return MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS && | ||
155 | diff --git a/llvm/test/CodeGen/ARM/cmse-harden-call-returned-values.ll b/llvm/test/CodeGen/ARM/cmse-harden-call-returned-values.ll | ||
156 | new file mode 100644 | ||
157 | index 0000000000..58eef443c25e | ||
158 | --- /dev/null | ||
159 | +++ b/llvm/test/CodeGen/ARM/cmse-harden-call-returned-values.ll | ||
160 | @@ -0,0 +1,552 @@ | ||
161 | +; RUN: llc %s -mtriple=thumbv8m.main -o - | FileCheck %s --check-prefixes V8M-COMMON,V8M-LE | ||
162 | +; RUN: llc %s -mtriple=thumbebv8m.main -o - | FileCheck %s --check-prefixes V8M-COMMON,V8M-BE | ||
163 | +; RUN: llc %s -mtriple=thumbv8.1m.main -o - | FileCheck %s --check-prefixes V81M-COMMON,V81M-LE | ||
164 | +; RUN: llc %s -mtriple=thumbebv8.1m.main -o - | FileCheck %s --check-prefixes V81M-COMMON,V81M-BE | ||
165 | + | ||
166 | +@get_idx = hidden local_unnamed_addr global ptr null, align 4 | ||
167 | +@arr = hidden local_unnamed_addr global [256 x i32] zeroinitializer, align 4 | ||
168 | + | ||
169 | +define i32 @access_i16() { | ||
170 | +; V8M-COMMON-LABEL: access_i16: | ||
171 | +; V8M-COMMON: @ %bb.0: @ %entry | ||
172 | +; V8M-COMMON-NEXT: push {r7, lr} | ||
173 | +; V8M-COMMON-NEXT: movw r0, :lower16:get_idx | ||
174 | +; V8M-COMMON-NEXT: movt r0, :upper16:get_idx | ||
175 | +; V8M-COMMON-NEXT: ldr r0, [r0] | ||
176 | +; V8M-COMMON-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11} | ||
177 | +; V8M-COMMON-NEXT: bic r0, r0, #1 | ||
178 | +; V8M-COMMON-NEXT: sub sp, #136 | ||
179 | +; V8M-COMMON-NEXT: vlstm sp, {d0 - d15} | ||
180 | +; V8M-COMMON-NEXT: mov r1, r0 | ||
181 | +; V8M-COMMON-NEXT: mov r2, r0 | ||
182 | +; V8M-COMMON-NEXT: mov r3, r0 | ||
183 | +; V8M-COMMON-NEXT: mov r4, r0 | ||
184 | +; V8M-COMMON-NEXT: mov r5, r0 | ||
185 | +; V8M-COMMON-NEXT: mov r6, r0 | ||
186 | +; V8M-COMMON-NEXT: mov r7, r0 | ||
187 | +; V8M-COMMON-NEXT: mov r8, r0 | ||
188 | +; V8M-COMMON-NEXT: mov r9, r0 | ||
189 | +; V8M-COMMON-NEXT: mov r10, r0 | ||
190 | +; V8M-COMMON-NEXT: mov r11, r0 | ||
191 | +; V8M-COMMON-NEXT: mov r12, r0 | ||
192 | +; V8M-COMMON-NEXT: msr apsr_nzcvq, r0 | ||
193 | +; V8M-COMMON-NEXT: blxns r0 | ||
194 | +; V8M-COMMON-NEXT: vlldm sp, {d0 - d15} | ||
195 | +; V8M-COMMON-NEXT: add sp, #136 | ||
196 | +; V8M-COMMON-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11} | ||
197 | +; V8M-COMMON-NEXT: movw r1, :lower16:arr | ||
198 | +; V8M-COMMON-NEXT: sxth r0, r0 | ||
199 | +; V8M-COMMON-NEXT: movt r1, :upper16:arr | ||
200 | +; V8M-COMMON-NEXT: ldr.w r0, [r1, r0, lsl #2] | ||
201 | +; V8M-COMMON-NEXT: pop {r7, pc} | ||
202 | +; | ||
203 | +; V81M-COMMON-LABEL: access_i16: | ||
204 | +; V81M-COMMON: @ %bb.0: @ %entry | ||
205 | +; V81M-COMMON-NEXT: push {r7, lr} | ||
206 | +; V81M-COMMON-NEXT: movw r0, :lower16:get_idx | ||
207 | +; V81M-COMMON-NEXT: movt r0, :upper16:get_idx | ||
208 | +; V81M-COMMON-NEXT: ldr r0, [r0] | ||
209 | +; V81M-COMMON-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11} | ||
210 | +; V81M-COMMON-NEXT: bic r0, r0, #1 | ||
211 | +; V81M-COMMON-NEXT: sub sp, #136 | ||
212 | +; V81M-COMMON-NEXT: vlstm sp, {d0 - d15} | ||
213 | +; V81M-COMMON-NEXT: clrm {r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, apsr} | ||
214 | +; V81M-COMMON-NEXT: blxns r0 | ||
215 | +; V81M-COMMON-NEXT: vlldm sp, {d0 - d15} | ||
216 | +; V81M-COMMON-NEXT: add sp, #136 | ||
217 | +; V81M-COMMON-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11} | ||
218 | +; V81M-COMMON-NEXT: movw r1, :lower16:arr | ||
219 | +; V81M-COMMON-NEXT: sxth r0, r0 | ||
220 | +; V81M-COMMON-NEXT: movt r1, :upper16:arr | ||
221 | +; V81M-COMMON-NEXT: ldr.w r0, [r1, r0, lsl #2] | ||
222 | +; V81M-COMMON-NEXT: pop {r7, pc} | ||
223 | +entry: | ||
224 | + %0 = load ptr, ptr @get_idx, align 4 | ||
225 | + %call = tail call signext i16 %0() "cmse_nonsecure_call" | ||
226 | + %idxprom = sext i16 %call to i32 | ||
227 | + %arrayidx = getelementptr inbounds [256 x i32], ptr @arr, i32 0, i32 %idxprom | ||
228 | + %1 = load i32, ptr %arrayidx, align 4 | ||
229 | + ret i32 %1 | ||
230 | +} | ||
231 | + | ||
232 | +define i32 @access_u16() { | ||
233 | +; V8M-COMMON-LABEL: access_u16: | ||
234 | +; V8M-COMMON: @ %bb.0: @ %entry | ||
235 | +; V8M-COMMON-NEXT: push {r7, lr} | ||
236 | +; V8M-COMMON-NEXT: movw r0, :lower16:get_idx | ||
237 | +; V8M-COMMON-NEXT: movt r0, :upper16:get_idx | ||
238 | +; V8M-COMMON-NEXT: ldr r0, [r0] | ||
239 | +; V8M-COMMON-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11} | ||
240 | +; V8M-COMMON-NEXT: bic r0, r0, #1 | ||
241 | +; V8M-COMMON-NEXT: sub sp, #136 | ||
242 | +; V8M-COMMON-NEXT: vlstm sp, {d0 - d15} | ||
243 | +; V8M-COMMON-NEXT: mov r1, r0 | ||
244 | +; V8M-COMMON-NEXT: mov r2, r0 | ||
245 | +; V8M-COMMON-NEXT: mov r3, r0 | ||
246 | +; V8M-COMMON-NEXT: mov r4, r0 | ||
247 | +; V8M-COMMON-NEXT: mov r5, r0 | ||
248 | +; V8M-COMMON-NEXT: mov r6, r0 | ||
249 | +; V8M-COMMON-NEXT: mov r7, r0 | ||
250 | +; V8M-COMMON-NEXT: mov r8, r0 | ||
251 | +; V8M-COMMON-NEXT: mov r9, r0 | ||
252 | +; V8M-COMMON-NEXT: mov r10, r0 | ||
253 | +; V8M-COMMON-NEXT: mov r11, r0 | ||
254 | +; V8M-COMMON-NEXT: mov r12, r0 | ||
255 | +; V8M-COMMON-NEXT: msr apsr_nzcvq, r0 | ||
256 | +; V8M-COMMON-NEXT: blxns r0 | ||
257 | +; V8M-COMMON-NEXT: vlldm sp, {d0 - d15} | ||
258 | +; V8M-COMMON-NEXT: add sp, #136 | ||
259 | +; V8M-COMMON-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11} | ||
260 | +; V8M-COMMON-NEXT: movw r1, :lower16:arr | ||
261 | +; V8M-COMMON-NEXT: uxth r0, r0 | ||
262 | +; V8M-COMMON-NEXT: movt r1, :upper16:arr | ||
263 | +; V8M-COMMON-NEXT: ldr.w r0, [r1, r0, lsl #2] | ||
264 | +; V8M-COMMON-NEXT: pop {r7, pc} | ||
265 | +; | ||
266 | +; V81M-COMMON-LABEL: access_u16: | ||
267 | +; V81M-COMMON: @ %bb.0: @ %entry | ||
268 | +; V81M-COMMON-NEXT: push {r7, lr} | ||
269 | +; V81M-COMMON-NEXT: movw r0, :lower16:get_idx | ||
270 | +; V81M-COMMON-NEXT: movt r0, :upper16:get_idx | ||
271 | +; V81M-COMMON-NEXT: ldr r0, [r0] | ||
272 | +; V81M-COMMON-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11} | ||
273 | +; V81M-COMMON-NEXT: bic r0, r0, #1 | ||
274 | +; V81M-COMMON-NEXT: sub sp, #136 | ||
275 | +; V81M-COMMON-NEXT: vlstm sp, {d0 - d15} | ||
276 | +; V81M-COMMON-NEXT: clrm {r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, apsr} | ||
277 | +; V81M-COMMON-NEXT: blxns r0 | ||
278 | +; V81M-COMMON-NEXT: vlldm sp, {d0 - d15} | ||
279 | +; V81M-COMMON-NEXT: add sp, #136 | ||
280 | +; V81M-COMMON-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11} | ||
281 | +; V81M-COMMON-NEXT: movw r1, :lower16:arr | ||
282 | +; V81M-COMMON-NEXT: uxth r0, r0 | ||
283 | +; V81M-COMMON-NEXT: movt r1, :upper16:arr | ||
284 | +; V81M-COMMON-NEXT: ldr.w r0, [r1, r0, lsl #2] | ||
285 | +; V81M-COMMON-NEXT: pop {r7, pc} | ||
286 | +entry: | ||
287 | + %0 = load ptr, ptr @get_idx, align 4 | ||
288 | + %call = tail call zeroext i16 %0() "cmse_nonsecure_call" | ||
289 | + %idxprom = zext i16 %call to i32 | ||
290 | + %arrayidx = getelementptr inbounds [256 x i32], ptr @arr, i32 0, i32 %idxprom | ||
291 | + %1 = load i32, ptr %arrayidx, align 4 | ||
292 | + ret i32 %1 | ||
293 | +} | ||
294 | + | ||
295 | +define i32 @access_i8() { | ||
296 | +; V8M-COMMON-LABEL: access_i8: | ||
297 | +; V8M-COMMON: @ %bb.0: @ %entry | ||
298 | +; V8M-COMMON-NEXT: push {r7, lr} | ||
299 | +; V8M-COMMON-NEXT: movw r0, :lower16:get_idx | ||
300 | +; V8M-COMMON-NEXT: movt r0, :upper16:get_idx | ||
301 | +; V8M-COMMON-NEXT: ldr r0, [r0] | ||
302 | +; V8M-COMMON-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11} | ||
303 | +; V8M-COMMON-NEXT: bic r0, r0, #1 | ||
304 | +; V8M-COMMON-NEXT: sub sp, #136 | ||
305 | +; V8M-COMMON-NEXT: vlstm sp, {d0 - d15} | ||
306 | +; V8M-COMMON-NEXT: mov r1, r0 | ||
307 | +; V8M-COMMON-NEXT: mov r2, r0 | ||
308 | +; V8M-COMMON-NEXT: mov r3, r0 | ||
309 | +; V8M-COMMON-NEXT: mov r4, r0 | ||
310 | +; V8M-COMMON-NEXT: mov r5, r0 | ||
311 | +; V8M-COMMON-NEXT: mov r6, r0 | ||
312 | +; V8M-COMMON-NEXT: mov r7, r0 | ||
313 | +; V8M-COMMON-NEXT: mov r8, r0 | ||
314 | +; V8M-COMMON-NEXT: mov r9, r0 | ||
315 | +; V8M-COMMON-NEXT: mov r10, r0 | ||
316 | +; V8M-COMMON-NEXT: mov r11, r0 | ||
317 | +; V8M-COMMON-NEXT: mov r12, r0 | ||
318 | +; V8M-COMMON-NEXT: msr apsr_nzcvq, r0 | ||
319 | +; V8M-COMMON-NEXT: blxns r0 | ||
320 | +; V8M-COMMON-NEXT: vlldm sp, {d0 - d15} | ||
321 | +; V8M-COMMON-NEXT: add sp, #136 | ||
322 | +; V8M-COMMON-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11} | ||
323 | +; V8M-COMMON-NEXT: movw r1, :lower16:arr | ||
324 | +; V8M-COMMON-NEXT: sxtb r0, r0 | ||
325 | +; V8M-COMMON-NEXT: movt r1, :upper16:arr | ||
326 | +; V8M-COMMON-NEXT: ldr.w r0, [r1, r0, lsl #2] | ||
327 | +; V8M-COMMON-NEXT: pop {r7, pc} | ||
328 | +; | ||
329 | +; V81M-COMMON-LABEL: access_i8: | ||
330 | +; V81M-COMMON: @ %bb.0: @ %entry | ||
331 | +; V81M-COMMON-NEXT: push {r7, lr} | ||
332 | +; V81M-COMMON-NEXT: movw r0, :lower16:get_idx | ||
333 | +; V81M-COMMON-NEXT: movt r0, :upper16:get_idx | ||
334 | +; V81M-COMMON-NEXT: ldr r0, [r0] | ||
335 | +; V81M-COMMON-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11} | ||
336 | +; V81M-COMMON-NEXT: bic r0, r0, #1 | ||
337 | +; V81M-COMMON-NEXT: sub sp, #136 | ||
338 | +; V81M-COMMON-NEXT: vlstm sp, {d0 - d15} | ||
339 | +; V81M-COMMON-NEXT: clrm {r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, apsr} | ||
340 | +; V81M-COMMON-NEXT: blxns r0 | ||
341 | +; V81M-COMMON-NEXT: vlldm sp, {d0 - d15} | ||
342 | +; V81M-COMMON-NEXT: add sp, #136 | ||
343 | +; V81M-COMMON-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11} | ||
344 | +; V81M-COMMON-NEXT: movw r1, :lower16:arr | ||
345 | +; V81M-COMMON-NEXT: sxtb r0, r0 | ||
346 | +; V81M-COMMON-NEXT: movt r1, :upper16:arr | ||
347 | +; V81M-COMMON-NEXT: ldr.w r0, [r1, r0, lsl #2] | ||
348 | +; V81M-COMMON-NEXT: pop {r7, pc} | ||
349 | +entry: | ||
350 | + %0 = load ptr, ptr @get_idx, align 4 | ||
351 | + %call = tail call signext i8 %0() "cmse_nonsecure_call" | ||
352 | + %idxprom = sext i8 %call to i32 | ||
353 | + %arrayidx = getelementptr inbounds [256 x i32], ptr @arr, i32 0, i32 %idxprom | ||
354 | + %1 = load i32, ptr %arrayidx, align 4 | ||
355 | + ret i32 %1 | ||
356 | +} | ||
357 | + | ||
358 | +define i32 @access_u8() { | ||
359 | +; V8M-COMMON-LABEL: access_u8: | ||
360 | +; V8M-COMMON: @ %bb.0: @ %entry | ||
361 | +; V8M-COMMON-NEXT: push {r7, lr} | ||
362 | +; V8M-COMMON-NEXT: movw r0, :lower16:get_idx | ||
363 | +; V8M-COMMON-NEXT: movt r0, :upper16:get_idx | ||
364 | +; V8M-COMMON-NEXT: ldr r0, [r0] | ||
365 | +; V8M-COMMON-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11} | ||
366 | +; V8M-COMMON-NEXT: bic r0, r0, #1 | ||
367 | +; V8M-COMMON-NEXT: sub sp, #136 | ||
368 | +; V8M-COMMON-NEXT: vlstm sp, {d0 - d15} | ||
369 | +; V8M-COMMON-NEXT: mov r1, r0 | ||
370 | +; V8M-COMMON-NEXT: mov r2, r0 | ||
371 | +; V8M-COMMON-NEXT: mov r3, r0 | ||
372 | +; V8M-COMMON-NEXT: mov r4, r0 | ||
373 | +; V8M-COMMON-NEXT: mov r5, r0 | ||
374 | +; V8M-COMMON-NEXT: mov r6, r0 | ||
375 | +; V8M-COMMON-NEXT: mov r7, r0 | ||
376 | +; V8M-COMMON-NEXT: mov r8, r0 | ||
377 | +; V8M-COMMON-NEXT: mov r9, r0 | ||
378 | +; V8M-COMMON-NEXT: mov r10, r0 | ||
379 | +; V8M-COMMON-NEXT: mov r11, r0 | ||
380 | +; V8M-COMMON-NEXT: mov r12, r0 | ||
381 | +; V8M-COMMON-NEXT: msr apsr_nzcvq, r0 | ||
382 | +; V8M-COMMON-NEXT: blxns r0 | ||
383 | +; V8M-COMMON-NEXT: vlldm sp, {d0 - d15} | ||
384 | +; V8M-COMMON-NEXT: add sp, #136 | ||
385 | +; V8M-COMMON-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11} | ||
386 | +; V8M-COMMON-NEXT: movw r1, :lower16:arr | ||
387 | +; V8M-COMMON-NEXT: uxtb r0, r0 | ||
388 | +; V8M-COMMON-NEXT: movt r1, :upper16:arr | ||
389 | +; V8M-COMMON-NEXT: ldr.w r0, [r1, r0, lsl #2] | ||
390 | +; V8M-COMMON-NEXT: pop {r7, pc} | ||
391 | +; | ||
392 | +; V81M-COMMON-LABEL: access_u8: | ||
393 | +; V81M-COMMON: @ %bb.0: @ %entry | ||
394 | +; V81M-COMMON-NEXT: push {r7, lr} | ||
395 | +; V81M-COMMON-NEXT: movw r0, :lower16:get_idx | ||
396 | +; V81M-COMMON-NEXT: movt r0, :upper16:get_idx | ||
397 | +; V81M-COMMON-NEXT: ldr r0, [r0] | ||
398 | +; V81M-COMMON-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11} | ||
399 | +; V81M-COMMON-NEXT: bic r0, r0, #1 | ||
400 | +; V81M-COMMON-NEXT: sub sp, #136 | ||
401 | +; V81M-COMMON-NEXT: vlstm sp, {d0 - d15} | ||
402 | +; V81M-COMMON-NEXT: clrm {r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, apsr} | ||
403 | +; V81M-COMMON-NEXT: blxns r0 | ||
404 | +; V81M-COMMON-NEXT: vlldm sp, {d0 - d15} | ||
405 | +; V81M-COMMON-NEXT: add sp, #136 | ||
406 | +; V81M-COMMON-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11} | ||
407 | +; V81M-COMMON-NEXT: movw r1, :lower16:arr | ||
408 | +; V81M-COMMON-NEXT: uxtb r0, r0 | ||
409 | +; V81M-COMMON-NEXT: movt r1, :upper16:arr | ||
410 | +; V81M-COMMON-NEXT: ldr.w r0, [r1, r0, lsl #2] | ||
411 | +; V81M-COMMON-NEXT: pop {r7, pc} | ||
412 | +entry: | ||
413 | + %0 = load ptr, ptr @get_idx, align 4 | ||
414 | + %call = tail call zeroext i8 %0() "cmse_nonsecure_call" | ||
415 | + %idxprom = zext i8 %call to i32 | ||
416 | + %arrayidx = getelementptr inbounds [256 x i32], ptr @arr, i32 0, i32 %idxprom | ||
417 | + %1 = load i32, ptr %arrayidx, align 4 | ||
418 | + ret i32 %1 | ||
419 | +} | ||
420 | + | ||
421 | +define i32 @access_i1() { | ||
422 | +; V8M-COMMON-LABEL: access_i1: | ||
423 | +; V8M-COMMON: @ %bb.0: @ %entry | ||
424 | +; V8M-COMMON-NEXT: push {r7, lr} | ||
425 | +; V8M-COMMON-NEXT: movw r0, :lower16:get_idx | ||
426 | +; V8M-COMMON-NEXT: movt r0, :upper16:get_idx | ||
427 | +; V8M-COMMON-NEXT: ldr r0, [r0] | ||
428 | +; V8M-COMMON-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11} | ||
429 | +; V8M-COMMON-NEXT: bic r0, r0, #1 | ||
430 | +; V8M-COMMON-NEXT: sub sp, #136 | ||
431 | +; V8M-COMMON-NEXT: vlstm sp, {d0 - d15} | ||
432 | +; V8M-COMMON-NEXT: mov r1, r0 | ||
433 | +; V8M-COMMON-NEXT: mov r2, r0 | ||
434 | +; V8M-COMMON-NEXT: mov r3, r0 | ||
435 | +; V8M-COMMON-NEXT: mov r4, r0 | ||
436 | +; V8M-COMMON-NEXT: mov r5, r0 | ||
437 | +; V8M-COMMON-NEXT: mov r6, r0 | ||
438 | +; V8M-COMMON-NEXT: mov r7, r0 | ||
439 | +; V8M-COMMON-NEXT: mov r8, r0 | ||
440 | +; V8M-COMMON-NEXT: mov r9, r0 | ||
441 | +; V8M-COMMON-NEXT: mov r10, r0 | ||
442 | +; V8M-COMMON-NEXT: mov r11, r0 | ||
443 | +; V8M-COMMON-NEXT: mov r12, r0 | ||
444 | +; V8M-COMMON-NEXT: msr apsr_nzcvq, r0 | ||
445 | +; V8M-COMMON-NEXT: blxns r0 | ||
446 | +; V8M-COMMON-NEXT: vlldm sp, {d0 - d15} | ||
447 | +; V8M-COMMON-NEXT: add sp, #136 | ||
448 | +; V8M-COMMON-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11} | ||
449 | +; V8M-COMMON-NEXT: movw r1, :lower16:arr | ||
450 | +; V8M-COMMON-NEXT: and r0, r0, #1 | ||
451 | +; V8M-COMMON-NEXT: movt r1, :upper16:arr | ||
452 | +; V8M-COMMON-NEXT: ldr.w r0, [r1, r0, lsl #2] | ||
453 | +; V8M-COMMON-NEXT: pop {r7, pc} | ||
454 | +; | ||
455 | +; V81M-COMMON-LABEL: access_i1: | ||
456 | +; V81M-COMMON: @ %bb.0: @ %entry | ||
457 | +; V81M-COMMON-NEXT: push {r7, lr} | ||
458 | +; V81M-COMMON-NEXT: movw r0, :lower16:get_idx | ||
459 | +; V81M-COMMON-NEXT: movt r0, :upper16:get_idx | ||
460 | +; V81M-COMMON-NEXT: ldr r0, [r0] | ||
461 | +; V81M-COMMON-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11} | ||
462 | +; V81M-COMMON-NEXT: bic r0, r0, #1 | ||
463 | +; V81M-COMMON-NEXT: sub sp, #136 | ||
464 | +; V81M-COMMON-NEXT: vlstm sp, {d0 - d15} | ||
465 | +; V81M-COMMON-NEXT: clrm {r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, apsr} | ||
466 | +; V81M-COMMON-NEXT: blxns r0 | ||
467 | +; V81M-COMMON-NEXT: vlldm sp, {d0 - d15} | ||
468 | +; V81M-COMMON-NEXT: add sp, #136 | ||
469 | +; V81M-COMMON-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11} | ||
470 | +; V81M-COMMON-NEXT: movw r1, :lower16:arr | ||
471 | +; V81M-COMMON-NEXT: and r0, r0, #1 | ||
472 | +; V81M-COMMON-NEXT: movt r1, :upper16:arr | ||
473 | +; V81M-COMMON-NEXT: ldr.w r0, [r1, r0, lsl #2] | ||
474 | +; V81M-COMMON-NEXT: pop {r7, pc} | ||
475 | +entry: | ||
476 | + %0 = load ptr, ptr @get_idx, align 4 | ||
477 | + %call = tail call zeroext i1 %0() "cmse_nonsecure_call" | ||
478 | + %idxprom = zext i1 %call to i32 | ||
479 | + %arrayidx = getelementptr inbounds [256 x i32], ptr @arr, i32 0, i32 %idxprom | ||
480 | + %1 = load i32, ptr %arrayidx, align 4 | ||
481 | + ret i32 %1 | ||
482 | +} | ||
483 | + | ||
484 | +define i32 @access_i5() { | ||
485 | +; V8M-COMMON-LABEL: access_i5: | ||
486 | +; V8M-COMMON: @ %bb.0: @ %entry | ||
487 | +; V8M-COMMON-NEXT: push {r7, lr} | ||
488 | +; V8M-COMMON-NEXT: movw r0, :lower16:get_idx | ||
489 | +; V8M-COMMON-NEXT: movt r0, :upper16:get_idx | ||
490 | +; V8M-COMMON-NEXT: ldr r0, [r0] | ||
491 | +; V8M-COMMON-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11} | ||
492 | +; V8M-COMMON-NEXT: bic r0, r0, #1 | ||
493 | +; V8M-COMMON-NEXT: sub sp, #136 | ||
494 | +; V8M-COMMON-NEXT: vlstm sp, {d0 - d15} | ||
495 | +; V8M-COMMON-NEXT: mov r1, r0 | ||
496 | +; V8M-COMMON-NEXT: mov r2, r0 | ||
497 | +; V8M-COMMON-NEXT: mov r3, r0 | ||
498 | +; V8M-COMMON-NEXT: mov r4, r0 | ||
499 | +; V8M-COMMON-NEXT: mov r5, r0 | ||
500 | +; V8M-COMMON-NEXT: mov r6, r0 | ||
501 | +; V8M-COMMON-NEXT: mov r7, r0 | ||
502 | +; V8M-COMMON-NEXT: mov r8, r0 | ||
503 | +; V8M-COMMON-NEXT: mov r9, r0 | ||
504 | +; V8M-COMMON-NEXT: mov r10, r0 | ||
505 | +; V8M-COMMON-NEXT: mov r11, r0 | ||
506 | +; V8M-COMMON-NEXT: mov r12, r0 | ||
507 | +; V8M-COMMON-NEXT: msr apsr_nzcvq, r0 | ||
508 | +; V8M-COMMON-NEXT: blxns r0 | ||
509 | +; V8M-COMMON-NEXT: vlldm sp, {d0 - d15} | ||
510 | +; V8M-COMMON-NEXT: add sp, #136 | ||
511 | +; V8M-COMMON-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11} | ||
512 | +; V8M-COMMON-NEXT: movw r1, :lower16:arr | ||
513 | +; V8M-COMMON-NEXT: sbfx r0, r0, #0, #5 | ||
514 | +; V8M-COMMON-NEXT: movt r1, :upper16:arr | ||
515 | +; V8M-COMMON-NEXT: ldr.w r0, [r1, r0, lsl #2] | ||
516 | +; V8M-COMMON-NEXT: pop {r7, pc} | ||
517 | +; | ||
518 | +; V81M-COMMON-LABEL: access_i5: | ||
519 | +; V81M-COMMON: @ %bb.0: @ %entry | ||
520 | +; V81M-COMMON-NEXT: push {r7, lr} | ||
521 | +; V81M-COMMON-NEXT: movw r0, :lower16:get_idx | ||
522 | +; V81M-COMMON-NEXT: movt r0, :upper16:get_idx | ||
523 | +; V81M-COMMON-NEXT: ldr r0, [r0] | ||
524 | +; V81M-COMMON-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11} | ||
525 | +; V81M-COMMON-NEXT: bic r0, r0, #1 | ||
526 | +; V81M-COMMON-NEXT: sub sp, #136 | ||
527 | +; V81M-COMMON-NEXT: vlstm sp, {d0 - d15} | ||
528 | +; V81M-COMMON-NEXT: clrm {r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, apsr} | ||
529 | +; V81M-COMMON-NEXT: blxns r0 | ||
530 | +; V81M-COMMON-NEXT: vlldm sp, {d0 - d15} | ||
531 | +; V81M-COMMON-NEXT: add sp, #136 | ||
532 | +; V81M-COMMON-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11} | ||
533 | +; V81M-COMMON-NEXT: movw r1, :lower16:arr | ||
534 | +; V81M-COMMON-NEXT: sbfx r0, r0, #0, #5 | ||
535 | +; V81M-COMMON-NEXT: movt r1, :upper16:arr | ||
536 | +; V81M-COMMON-NEXT: ldr.w r0, [r1, r0, lsl #2] | ||
537 | +; V81M-COMMON-NEXT: pop {r7, pc} | ||
538 | +entry: | ||
539 | + %0 = load ptr, ptr @get_idx, align 4 | ||
540 | + %call = tail call signext i5 %0() "cmse_nonsecure_call" | ||
541 | + %idxprom = sext i5 %call to i32 | ||
542 | + %arrayidx = getelementptr inbounds [256 x i32], ptr @arr, i32 0, i32 %idxprom | ||
543 | + %1 = load i32, ptr %arrayidx, align 4 | ||
544 | + ret i32 %1 | ||
545 | +} | ||
546 | + | ||
547 | +define i32 @access_u5() { | ||
548 | +; V8M-COMMON-LABEL: access_u5: | ||
549 | +; V8M-COMMON: @ %bb.0: @ %entry | ||
550 | +; V8M-COMMON-NEXT: push {r7, lr} | ||
551 | +; V8M-COMMON-NEXT: movw r0, :lower16:get_idx | ||
552 | +; V8M-COMMON-NEXT: movt r0, :upper16:get_idx | ||
553 | +; V8M-COMMON-NEXT: ldr r0, [r0] | ||
554 | +; V8M-COMMON-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11} | ||
555 | +; V8M-COMMON-NEXT: bic r0, r0, #1 | ||
556 | +; V8M-COMMON-NEXT: sub sp, #136 | ||
557 | +; V8M-COMMON-NEXT: vlstm sp, {d0 - d15} | ||
558 | +; V8M-COMMON-NEXT: mov r1, r0 | ||
559 | +; V8M-COMMON-NEXT: mov r2, r0 | ||
560 | +; V8M-COMMON-NEXT: mov r3, r0 | ||
561 | +; V8M-COMMON-NEXT: mov r4, r0 | ||
562 | +; V8M-COMMON-NEXT: mov r5, r0 | ||
563 | +; V8M-COMMON-NEXT: mov r6, r0 | ||
564 | +; V8M-COMMON-NEXT: mov r7, r0 | ||
565 | +; V8M-COMMON-NEXT: mov r8, r0 | ||
566 | +; V8M-COMMON-NEXT: mov r9, r0 | ||
567 | +; V8M-COMMON-NEXT: mov r10, r0 | ||
568 | +; V8M-COMMON-NEXT: mov r11, r0 | ||
569 | +; V8M-COMMON-NEXT: mov r12, r0 | ||
570 | +; V8M-COMMON-NEXT: msr apsr_nzcvq, r0 | ||
571 | +; V8M-COMMON-NEXT: blxns r0 | ||
572 | +; V8M-COMMON-NEXT: vlldm sp, {d0 - d15} | ||
573 | +; V8M-COMMON-NEXT: add sp, #136 | ||
574 | +; V8M-COMMON-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11} | ||
575 | +; V8M-COMMON-NEXT: movw r1, :lower16:arr | ||
576 | +; V8M-COMMON-NEXT: and r0, r0, #31 | ||
577 | +; V8M-COMMON-NEXT: movt r1, :upper16:arr | ||
578 | +; V8M-COMMON-NEXT: ldr.w r0, [r1, r0, lsl #2] | ||
579 | +; V8M-COMMON-NEXT: pop {r7, pc} | ||
580 | +; | ||
581 | +; V81M-COMMON-LABEL: access_u5: | ||
582 | +; V81M-COMMON: @ %bb.0: @ %entry | ||
583 | +; V81M-COMMON-NEXT: push {r7, lr} | ||
584 | +; V81M-COMMON-NEXT: movw r0, :lower16:get_idx | ||
585 | +; V81M-COMMON-NEXT: movt r0, :upper16:get_idx | ||
586 | +; V81M-COMMON-NEXT: ldr r0, [r0] | ||
587 | +; V81M-COMMON-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11} | ||
588 | +; V81M-COMMON-NEXT: bic r0, r0, #1 | ||
589 | +; V81M-COMMON-NEXT: sub sp, #136 | ||
590 | +; V81M-COMMON-NEXT: vlstm sp, {d0 - d15} | ||
591 | +; V81M-COMMON-NEXT: clrm {r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, apsr} | ||
592 | +; V81M-COMMON-NEXT: blxns r0 | ||
593 | +; V81M-COMMON-NEXT: vlldm sp, {d0 - d15} | ||
594 | +; V81M-COMMON-NEXT: add sp, #136 | ||
595 | +; V81M-COMMON-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11} | ||
596 | +; V81M-COMMON-NEXT: movw r1, :lower16:arr | ||
597 | +; V81M-COMMON-NEXT: and r0, r0, #31 | ||
598 | +; V81M-COMMON-NEXT: movt r1, :upper16:arr | ||
599 | +; V81M-COMMON-NEXT: ldr.w r0, [r1, r0, lsl #2] | ||
600 | +; V81M-COMMON-NEXT: pop {r7, pc} | ||
601 | +entry: | ||
602 | + %0 = load ptr, ptr @get_idx, align 4 | ||
603 | + %call = tail call zeroext i5 %0() "cmse_nonsecure_call" | ||
604 | + %idxprom = zext i5 %call to i32 | ||
605 | + %arrayidx = getelementptr inbounds [256 x i32], ptr @arr, i32 0, i32 %idxprom | ||
606 | + %1 = load i32, ptr %arrayidx, align 4 | ||
607 | + ret i32 %1 | ||
608 | +} | ||
609 | + | ||
610 | +define i32 @access_i33(ptr %f) { | ||
611 | +; V8M-COMMON-LABEL: access_i33: | ||
612 | +; V8M-COMMON: @ %bb.0: @ %entry | ||
613 | +; V8M-COMMON-NEXT: push {r7, lr} | ||
614 | +; V8M-COMMON-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11} | ||
615 | +; V8M-COMMON-NEXT: bic r0, r0, #1 | ||
616 | +; V8M-COMMON-NEXT: sub sp, #136 | ||
617 | +; V8M-COMMON-NEXT: vlstm sp, {d0 - d15} | ||
618 | +; V8M-COMMON-NEXT: mov r1, r0 | ||
619 | +; V8M-COMMON-NEXT: mov r2, r0 | ||
620 | +; V8M-COMMON-NEXT: mov r3, r0 | ||
621 | +; V8M-COMMON-NEXT: mov r4, r0 | ||
622 | +; V8M-COMMON-NEXT: mov r5, r0 | ||
623 | +; V8M-COMMON-NEXT: mov r6, r0 | ||
624 | +; V8M-COMMON-NEXT: mov r7, r0 | ||
625 | +; V8M-COMMON-NEXT: mov r8, r0 | ||
626 | +; V8M-COMMON-NEXT: mov r9, r0 | ||
627 | +; V8M-COMMON-NEXT: mov r10, r0 | ||
628 | +; V8M-COMMON-NEXT: mov r11, r0 | ||
629 | +; V8M-COMMON-NEXT: mov r12, r0 | ||
630 | +; V8M-COMMON-NEXT: msr apsr_nzcvq, r0 | ||
631 | +; V8M-COMMON-NEXT: blxns r0 | ||
632 | +; V8M-COMMON-NEXT: vlldm sp, {d0 - d15} | ||
633 | +; V8M-COMMON-NEXT: add sp, #136 | ||
634 | +; V8M-COMMON-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11} | ||
635 | +; V8M-LE-NEXT: and r0, r1, #1 | ||
636 | +; V8M-BE-NEXT: and r0, r0, #1 | ||
637 | +; V8M-COMMON-NEXT: rsb.w r0, r0, #0 | ||
638 | +; V8M-COMMON-NEXT: pop {r7, pc} | ||
639 | +; | ||
640 | +; V81M-COMMON-LABEL: access_i33: | ||
641 | +; V81M-COMMON: @ %bb.0: @ %entry | ||
642 | +; V81M-COMMON-NEXT: push {r7, lr} | ||
643 | +; V81M-COMMON-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11} | ||
644 | +; V81M-COMMON-NEXT: bic r0, r0, #1 | ||
645 | +; V81M-COMMON-NEXT: sub sp, #136 | ||
646 | +; V81M-COMMON-NEXT: vlstm sp, {d0 - d15} | ||
647 | +; V81M-COMMON-NEXT: clrm {r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, apsr} | ||
648 | +; V81M-COMMON-NEXT: blxns r0 | ||
649 | +; V81M-COMMON-NEXT: vlldm sp, {d0 - d15} | ||
650 | +; V81M-COMMON-NEXT: add sp, #136 | ||
651 | +; V81M-COMMON-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11} | ||
652 | +; V81M-LE-NEXT: and r0, r1, #1 | ||
653 | +; V81M-BE-NEXT: and r0, r0, #1 | ||
654 | +; V81M-COMMON-NEXT: rsb.w r0, r0, #0 | ||
655 | +; V81M-COMMON-NEXT: pop {r7, pc} | ||
656 | +entry: | ||
657 | + %call = tail call i33 %f() "cmse_nonsecure_call" | ||
658 | + %shr = ashr i33 %call, 32 | ||
659 | + %conv = trunc nsw i33 %shr to i32 | ||
660 | + ret i32 %conv | ||
661 | +} | ||
662 | + | ||
663 | +define i32 @access_u33(ptr %f) { | ||
664 | +; V8M-COMMON-LABEL: access_u33: | ||
665 | +; V8M-COMMON: @ %bb.0: @ %entry | ||
666 | +; V8M-COMMON-NEXT: push {r7, lr} | ||
667 | +; V8M-COMMON-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11} | ||
668 | +; V8M-COMMON-NEXT: bic r0, r0, #1 | ||
669 | +; V8M-COMMON-NEXT: sub sp, #136 | ||
670 | +; V8M-COMMON-NEXT: vlstm sp, {d0 - d15} | ||
671 | +; V8M-COMMON-NEXT: mov r1, r0 | ||
672 | +; V8M-COMMON-NEXT: mov r2, r0 | ||
673 | +; V8M-COMMON-NEXT: mov r3, r0 | ||
674 | +; V8M-COMMON-NEXT: mov r4, r0 | ||
675 | +; V8M-COMMON-NEXT: mov r5, r0 | ||
676 | +; V8M-COMMON-NEXT: mov r6, r0 | ||
677 | +; V8M-COMMON-NEXT: mov r7, r0 | ||
678 | +; V8M-COMMON-NEXT: mov r8, r0 | ||
679 | +; V8M-COMMON-NEXT: mov r9, r0 | ||
680 | +; V8M-COMMON-NEXT: mov r10, r0 | ||
681 | +; V8M-COMMON-NEXT: mov r11, r0 | ||
682 | +; V8M-COMMON-NEXT: mov r12, r0 | ||
683 | +; V8M-COMMON-NEXT: msr apsr_nzcvq, r0 | ||
684 | +; V8M-COMMON-NEXT: blxns r0 | ||
685 | +; V8M-COMMON-NEXT: vlldm sp, {d0 - d15} | ||
686 | +; V8M-COMMON-NEXT: add sp, #136 | ||
687 | +; V8M-COMMON-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11} | ||
688 | +; V8M-LE-NEXT: and r0, r1, #1 | ||
689 | +; V8M-BE-NEXT: and r0, r0, #1 | ||
690 | +; V8M-COMMON-NEXT: pop {r7, pc} | ||
691 | +; | ||
692 | +; V81M-COMMON-LABEL: access_u33: | ||
693 | +; V81M-COMMON: @ %bb.0: @ %entry | ||
694 | +; V81M-COMMON-NEXT: push {r7, lr} | ||
695 | +; V81M-COMMON-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11} | ||
696 | +; V81M-COMMON-NEXT: bic r0, r0, #1 | ||
697 | +; V81M-COMMON-NEXT: sub sp, #136 | ||
698 | +; V81M-COMMON-NEXT: vlstm sp, {d0 - d15} | ||
699 | +; V81M-COMMON-NEXT: clrm {r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, apsr} | ||
700 | +; V81M-COMMON-NEXT: blxns r0 | ||
701 | +; V81M-COMMON-NEXT: vlldm sp, {d0 - d15} | ||
702 | +; V81M-COMMON-NEXT: add sp, #136 | ||
703 | +; V81M-COMMON-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11} | ||
704 | +; V81M-LE-NEXT: and r0, r1, #1 | ||
705 | +; V81M-BE-NEXT: and r0, r0, #1 | ||
706 | +; V81M-COMMON-NEXT: pop {r7, pc} | ||
707 | +entry: | ||
708 | + %call = tail call i33 %f() "cmse_nonsecure_call" | ||
709 | + %shr = lshr i33 %call, 32 | ||
710 | + %conv = trunc nuw nsw i33 %shr to i32 | ||
711 | + ret i32 %conv | ||
712 | +} | ||
713 | diff --git a/llvm/test/CodeGen/ARM/cmse-harden-entry-arguments.ll b/llvm/test/CodeGen/ARM/cmse-harden-entry-arguments.ll | ||
714 | new file mode 100644 | ||
715 | index 0000000000..c66ab00566dd | ||
716 | --- /dev/null | ||
717 | +++ b/llvm/test/CodeGen/ARM/cmse-harden-entry-arguments.ll | ||
718 | @@ -0,0 +1,368 @@ | ||
719 | +; RUN: llc %s -mtriple=thumbv8m.main -o - | FileCheck %s --check-prefixes V8M-COMMON,V8M-LE | ||
720 | +; RUN: llc %s -mtriple=thumbebv8m.main -o - | FileCheck %s --check-prefixes V8M-COMMON,V8M-BE | ||
721 | +; RUN: llc %s -mtriple=thumbv8.1m.main -o - | FileCheck %s --check-prefixes V81M-COMMON,V81M-LE | ||
722 | +; RUN: llc %s -mtriple=thumbebv8.1m.main -o - | FileCheck %s --check-prefixes V81M-COMMON,V81M-BE | ||
723 | + | ||
724 | +@arr = hidden local_unnamed_addr global [256 x i32] zeroinitializer, align 4 | ||
725 | + | ||
726 | +define i32 @access_i16(i16 signext %idx) "cmse_nonsecure_entry" { | ||
727 | +; V8M-COMMON-LABEL: access_i16: | ||
728 | +; V8M-COMMON: @ %bb.0: @ %entry | ||
729 | +; V8M-COMMON-NEXT: movw r1, :lower16:arr | ||
730 | +; V8M-COMMON-NEXT: sxth r0, r0 | ||
731 | +; V8M-COMMON-NEXT: movt r1, :upper16:arr | ||
732 | +; V8M-COMMON-NEXT: mov r2, lr | ||
733 | +; V8M-COMMON-NEXT: ldr.w r0, [r1, r0, lsl #2] | ||
734 | +; V8M-COMMON-NEXT: mov r1, lr | ||
735 | +; V8M-COMMON-NEXT: mov r3, lr | ||
736 | +; V8M-COMMON-NEXT: msr apsr_nzcvq, lr | ||
737 | +; V8M-COMMON-NEXT: mov r12, lr | ||
738 | +; V8M-COMMON-NEXT: bxns lr | ||
739 | +; | ||
740 | +; V81M-COMMON-LABEL: access_i16: | ||
741 | +; V81M-COMMON: @ %bb.0: @ %entry | ||
742 | +; V81M-COMMON-NEXT: vstr fpcxtns, [sp, #-4]! | ||
743 | +; V81M-COMMON-NEXT: movw r1, :lower16:arr | ||
744 | +; V81M-COMMON-NEXT: sxth r0, r0 | ||
745 | +; V81M-COMMON-NEXT: movt r1, :upper16:arr | ||
746 | +; V81M-COMMON-NEXT: ldr.w r0, [r1, r0, lsl #2] | ||
747 | +; V81M-COMMON-NEXT: vscclrm {s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15, vpr} | ||
748 | +; V81M-COMMON-NEXT: vldr fpcxtns, [sp], #4 | ||
749 | +; V81M-COMMON-NEXT: clrm {r1, r2, r3, r12, apsr} | ||
750 | +; V81M-COMMON-NEXT: bxns lr | ||
751 | +entry: | ||
752 | + %idxprom = sext i16 %idx to i32 | ||
753 | + %arrayidx = getelementptr inbounds [256 x i32], ptr @arr, i32 0, i32 %idxprom | ||
754 | + %0 = load i32, ptr %arrayidx, align 4 | ||
755 | + ret i32 %0 | ||
756 | +} | ||
757 | + | ||
758 | +define i32 @access_u16(i16 zeroext %idx) "cmse_nonsecure_entry" { | ||
759 | +; V8M-COMMON-LABEL: access_u16: | ||
760 | +; V8M-COMMON: @ %bb.0: @ %entry | ||
761 | +; V8M-COMMON-NEXT: movw r1, :lower16:arr | ||
762 | +; V8M-COMMON-NEXT: uxth r0, r0 | ||
763 | +; V8M-COMMON-NEXT: movt r1, :upper16:arr | ||
764 | +; V8M-COMMON-NEXT: mov r2, lr | ||
765 | +; V8M-COMMON-NEXT: ldr.w r0, [r1, r0, lsl #2] | ||
766 | +; V8M-COMMON-NEXT: mov r1, lr | ||
767 | +; V8M-COMMON-NEXT: mov r3, lr | ||
768 | +; V8M-COMMON-NEXT: msr apsr_nzcvq, lr | ||
769 | +; V8M-COMMON-NEXT: mov r12, lr | ||
770 | +; V8M-COMMON-NEXT: bxns lr | ||
771 | +; | ||
772 | +; V81M-COMMON-LABEL: access_u16: | ||
773 | +; V81M-COMMON: @ %bb.0: @ %entry | ||
774 | +; V81M-COMMON-NEXT: vstr fpcxtns, [sp, #-4]! | ||
775 | +; V81M-COMMON-NEXT: movw r1, :lower16:arr | ||
776 | +; V81M-COMMON-NEXT: uxth r0, r0 | ||
777 | +; V81M-COMMON-NEXT: movt r1, :upper16:arr | ||
778 | +; V81M-COMMON-NEXT: ldr.w r0, [r1, r0, lsl #2] | ||
779 | +; V81M-COMMON-NEXT: vscclrm {s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15, vpr} | ||
780 | +; V81M-COMMON-NEXT: vldr fpcxtns, [sp], #4 | ||
781 | +; V81M-COMMON-NEXT: clrm {r1, r2, r3, r12, apsr} | ||
782 | +; V81M-COMMON-NEXT: bxns lr | ||
783 | +entry: | ||
784 | + %idxprom = zext i16 %idx to i32 | ||
785 | + %arrayidx = getelementptr inbounds [256 x i32], ptr @arr, i32 0, i32 %idxprom | ||
786 | + %0 = load i32, ptr %arrayidx, align 4 | ||
787 | + ret i32 %0 | ||
788 | +} | ||
789 | + | ||
790 | +define i32 @access_i8(i8 signext %idx) "cmse_nonsecure_entry" { | ||
791 | +; V8M-COMMON-LABEL: access_i8: | ||
792 | +; V8M-COMMON: @ %bb.0: @ %entry | ||
793 | +; V8M-COMMON-NEXT: movw r1, :lower16:arr | ||
794 | +; V8M-COMMON-NEXT: sxtb r0, r0 | ||
795 | +; V8M-COMMON-NEXT: movt r1, :upper16:arr | ||
796 | +; V8M-COMMON-NEXT: mov r2, lr | ||
797 | +; V8M-COMMON-NEXT: ldr.w r0, [r1, r0, lsl #2] | ||
798 | +; V8M-COMMON-NEXT: mov r1, lr | ||
799 | +; V8M-COMMON-NEXT: mov r3, lr | ||
800 | +; V8M-COMMON-NEXT: msr apsr_nzcvq, lr | ||
801 | +; V8M-COMMON-NEXT: mov r12, lr | ||
802 | +; V8M-COMMON-NEXT: bxns lr | ||
803 | +; | ||
804 | +; V81M-COMMON-LABEL: access_i8: | ||
805 | +; V81M-COMMON: @ %bb.0: @ %entry | ||
806 | +; V81M-COMMON-NEXT: vstr fpcxtns, [sp, #-4]! | ||
807 | +; V81M-COMMON-NEXT: movw r1, :lower16:arr | ||
808 | +; V81M-COMMON-NEXT: sxtb r0, r0 | ||
809 | +; V81M-COMMON-NEXT: movt r1, :upper16:arr | ||
810 | +; V81M-COMMON-NEXT: ldr.w r0, [r1, r0, lsl #2] | ||
811 | +; V81M-COMMON-NEXT: vscclrm {s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15, vpr} | ||
812 | +; V81M-COMMON-NEXT: vldr fpcxtns, [sp], #4 | ||
813 | +; V81M-COMMON-NEXT: clrm {r1, r2, r3, r12, apsr} | ||
814 | +; V81M-COMMON-NEXT: bxns lr | ||
815 | +entry: | ||
816 | + %idxprom = sext i8 %idx to i32 | ||
817 | + %arrayidx = getelementptr inbounds [256 x i32], ptr @arr, i32 0, i32 %idxprom | ||
818 | + %0 = load i32, ptr %arrayidx, align 4 | ||
819 | + ret i32 %0 | ||
820 | +} | ||
821 | + | ||
822 | +define i32 @access_u8(i8 zeroext %idx) "cmse_nonsecure_entry" { | ||
823 | +; V8M-COMMON-LABEL: access_u8: | ||
824 | +; V8M-COMMON: @ %bb.0: @ %entry | ||
825 | +; V8M-COMMON-NEXT: movw r1, :lower16:arr | ||
826 | +; V8M-COMMON-NEXT: uxtb r0, r0 | ||
827 | +; V8M-COMMON-NEXT: movt r1, :upper16:arr | ||
828 | +; V8M-COMMON-NEXT: mov r2, lr | ||
829 | +; V8M-COMMON-NEXT: ldr.w r0, [r1, r0, lsl #2] | ||
830 | +; V8M-COMMON-NEXT: mov r1, lr | ||
831 | +; V8M-COMMON-NEXT: mov r3, lr | ||
832 | +; V8M-COMMON-NEXT: msr apsr_nzcvq, lr | ||
833 | +; V8M-COMMON-NEXT: mov r12, lr | ||
834 | +; V8M-COMMON-NEXT: bxns lr | ||
835 | +; | ||
836 | +; V81M-COMMON-LABEL: access_u8: | ||
837 | +; V81M-COMMON: @ %bb.0: @ %entry | ||
838 | +; V81M-COMMON-NEXT: vstr fpcxtns, [sp, #-4]! | ||
839 | +; V81M-COMMON-NEXT: movw r1, :lower16:arr | ||
840 | +; V81M-COMMON-NEXT: uxtb r0, r0 | ||
841 | +; V81M-COMMON-NEXT: movt r1, :upper16:arr | ||
842 | +; V81M-COMMON-NEXT: ldr.w r0, [r1, r0, lsl #2] | ||
843 | +; V81M-COMMON-NEXT: vscclrm {s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15, vpr} | ||
844 | +; V81M-COMMON-NEXT: vldr fpcxtns, [sp], #4 | ||
845 | +; V81M-COMMON-NEXT: clrm {r1, r2, r3, r12, apsr} | ||
846 | +; V81M-COMMON-NEXT: bxns lr | ||
847 | +entry: | ||
848 | + %idxprom = zext i8 %idx to i32 | ||
849 | + %arrayidx = getelementptr inbounds [256 x i32], ptr @arr, i32 0, i32 %idxprom | ||
850 | + %0 = load i32, ptr %arrayidx, align 4 | ||
851 | + ret i32 %0 | ||
852 | +} | ||
853 | + | ||
854 | +define i32 @access_i1(i1 signext %idx) "cmse_nonsecure_entry" { | ||
855 | +; V8M-COMMON-LABEL: access_i1: | ||
856 | +; V8M-COMMON: @ %bb.0: @ %entry | ||
857 | +; V8M-COMMON-NEXT: and r0, r0, #1 | ||
858 | +; V8M-COMMON-NEXT: movw r1, :lower16:arr | ||
859 | +; V8M-COMMON-NEXT: rsbs r0, r0, #0 | ||
860 | +; V8M-COMMON-NEXT: movt r1, :upper16:arr | ||
861 | +; V8M-COMMON-NEXT: and r0, r0, #1 | ||
862 | +; V8M-COMMON-NEXT: mov r2, lr | ||
863 | +; V8M-COMMON-NEXT: mov r3, lr | ||
864 | +; V8M-COMMON-NEXT: mov r12, lr | ||
865 | +; V8M-COMMON-NEXT: ldr.w r0, [r1, r0, lsl #2] | ||
866 | +; V8M-COMMON-NEXT: mov r1, lr | ||
867 | +; V8M-COMMON-NEXT: msr apsr_nzcvq, lr | ||
868 | +; V8M-COMMON-NEXT: bxns lr | ||
869 | +; | ||
870 | +; V81M-COMMON-LABEL: access_i1: | ||
871 | +; V81M-COMMON: @ %bb.0: @ %entry | ||
872 | +; V81M-COMMON-NEXT: vstr fpcxtns, [sp, #-4]! | ||
873 | +; V81M-COMMON-NEXT: and r0, r0, #1 | ||
874 | +; V81M-COMMON-NEXT: movw r1, :lower16:arr | ||
875 | +; V81M-COMMON-NEXT: rsbs r0, r0, #0 | ||
876 | +; V81M-COMMON-NEXT: movt r1, :upper16:arr | ||
877 | +; V81M-COMMON-NEXT: and r0, r0, #1 | ||
878 | +; V81M-COMMON-NEXT: ldr.w r0, [r1, r0, lsl #2] | ||
879 | +; V81M-COMMON-NEXT: vscclrm {s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15, vpr} | ||
880 | +; V81M-COMMON-NEXT: vldr fpcxtns, [sp], #4 | ||
881 | +; V81M-COMMON-NEXT: clrm {r1, r2, r3, r12, apsr} | ||
882 | +; V81M-COMMON-NEXT: bxns lr | ||
883 | +entry: | ||
884 | + %idxprom = zext i1 %idx to i32 | ||
885 | + %arrayidx = getelementptr inbounds [256 x i32], ptr @arr, i32 0, i32 %idxprom | ||
886 | + %0 = load i32, ptr %arrayidx, align 4 | ||
887 | + ret i32 %0 | ||
888 | +} | ||
889 | + | ||
890 | +define i32 @access_i5(i5 signext %idx) "cmse_nonsecure_entry" { | ||
891 | +; V8M-COMMON-LABEL: access_i5: | ||
892 | +; V8M-COMMON: @ %bb.0: @ %entry | ||
893 | +; V8M-COMMON-NEXT: movw r1, :lower16:arr | ||
894 | +; V8M-COMMON-NEXT: sbfx r0, r0, #0, #5 | ||
895 | +; V8M-COMMON-NEXT: movt r1, :upper16:arr | ||
896 | +; V8M-COMMON-NEXT: mov r2, lr | ||
897 | +; V8M-COMMON-NEXT: ldr.w r0, [r1, r0, lsl #2] | ||
898 | +; V8M-COMMON-NEXT: mov r1, lr | ||
899 | +; V8M-COMMON-NEXT: mov r3, lr | ||
900 | +; V8M-COMMON-NEXT: msr apsr_nzcvq, lr | ||
901 | +; V8M-COMMON-NEXT: mov r12, lr | ||
902 | +; V8M-COMMON-NEXT: bxns lr | ||
903 | +; | ||
904 | +; V81M-COMMON-LABEL: access_i5: | ||
905 | +; V81M-COMMON: @ %bb.0: @ %entry | ||
906 | +; V81M-COMMON-NEXT: vstr fpcxtns, [sp, #-4]! | ||
907 | +; V81M-COMMON-NEXT: movw r1, :lower16:arr | ||
908 | +; V81M-COMMON-NEXT: sbfx r0, r0, #0, #5 | ||
909 | +; V81M-COMMON-NEXT: movt r1, :upper16:arr | ||
910 | +; V81M-COMMON-NEXT: ldr.w r0, [r1, r0, lsl #2] | ||
911 | +; V81M-COMMON-NEXT: vscclrm {s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15, vpr} | ||
912 | +; V81M-COMMON-NEXT: vldr fpcxtns, [sp], #4 | ||
913 | +; V81M-COMMON-NEXT: clrm {r1, r2, r3, r12, apsr} | ||
914 | +; V81M-COMMON-NEXT: bxns lr | ||
915 | +entry: | ||
916 | + %idxprom = sext i5 %idx to i32 | ||
917 | + %arrayidx = getelementptr inbounds [256 x i32], ptr @arr, i32 0, i32 %idxprom | ||
918 | + %0 = load i32, ptr %arrayidx, align 4 | ||
919 | + ret i32 %0 | ||
920 | +} | ||
921 | + | ||
922 | +define i32 @access_u5(i5 zeroext %idx) "cmse_nonsecure_entry" { | ||
923 | +; V8M-COMMON-LABEL: access_u5: | ||
924 | +; V8M-COMMON: @ %bb.0: @ %entry | ||
925 | +; V8M-COMMON-NEXT: movw r1, :lower16:arr | ||
926 | +; V8M-COMMON-NEXT: and r0, r0, #31 | ||
927 | +; V8M-COMMON-NEXT: movt r1, :upper16:arr | ||
928 | +; V8M-COMMON-NEXT: mov r2, lr | ||
929 | +; V8M-COMMON-NEXT: ldr.w r0, [r1, r0, lsl #2] | ||
930 | +; V8M-COMMON-NEXT: mov r1, lr | ||
931 | +; V8M-COMMON-NEXT: mov r3, lr | ||
932 | +; V8M-COMMON-NEXT: msr apsr_nzcvq, lr | ||
933 | +; V8M-COMMON-NEXT: mov r12, lr | ||
934 | +; V8M-COMMON-NEXT: bxns lr | ||
935 | +; | ||
936 | +; V81M-COMMON-LABEL: access_u5: | ||
937 | +; V81M-COMMON: @ %bb.0: @ %entry | ||
938 | +; V81M-COMMON-NEXT: vstr fpcxtns, [sp, #-4]! | ||
939 | +; V81M-COMMON-NEXT: movw r1, :lower16:arr | ||
940 | +; V81M-COMMON-NEXT: and r0, r0, #31 | ||
941 | +; V81M-COMMON-NEXT: movt r1, :upper16:arr | ||
942 | +; V81M-COMMON-NEXT: ldr.w r0, [r1, r0, lsl #2] | ||
943 | +; V81M-COMMON-NEXT: vscclrm {s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15, vpr} | ||
944 | +; V81M-COMMON-NEXT: vldr fpcxtns, [sp], #4 | ||
945 | +; V81M-COMMON-NEXT: clrm {r1, r2, r3, r12, apsr} | ||
946 | +; V81M-COMMON-NEXT: bxns lr | ||
947 | +entry: | ||
948 | + %idxprom = zext i5 %idx to i32 | ||
949 | + %arrayidx = getelementptr inbounds [256 x i32], ptr @arr, i32 0, i32 %idxprom | ||
950 | + %0 = load i32, ptr %arrayidx, align 4 | ||
951 | + ret i32 %0 | ||
952 | +} | ||
953 | + | ||
954 | +define i32 @access_i33(i33 %arg) "cmse_nonsecure_entry" { | ||
955 | +; V8M-COMMON-LABEL: access_i33: | ||
956 | +; V8M-COMMON: @ %bb.0: @ %entry | ||
957 | +; V8M-LE-NEXT: and r0, r1, #1 | ||
958 | +; V8M-BE-NEXT: and r0, r0, #1 | ||
959 | +; V8M-COMMON-NEXT: mov r1, lr | ||
960 | +; V8M-COMMON-NEXT: rsbs r0, r0, #0 | ||
961 | +; V8M-COMMON-NEXT: mov r2, lr | ||
962 | +; V8M-COMMON-NEXT: mov r3, lr | ||
963 | +; V8M-COMMON-NEXT: mov r12, lr | ||
964 | +; V8M-COMMON-NEXT: msr apsr_nzcvq, lr | ||
965 | +; V8M-COMMON-NEXT: bxns lr | ||
966 | +; | ||
967 | +; V81M-COMMON-LABEL: access_i33: | ||
968 | +; V81M-COMMON: @ %bb.0: @ %entry | ||
969 | +; V81M-COMMON-NEXT: vstr fpcxtns, [sp, #-4]! | ||
970 | +; V81M-LE-NEXT: and r0, r1, #1 | ||
971 | +; V81M-BE-NEXT: and r0, r0, #1 | ||
972 | +; V81M-COMMON-NEXT: vscclrm {s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15, vpr} | ||
973 | +; V81M-COMMON-NEXT: rsbs r0, r0, #0 | ||
974 | +; V81M-COMMON-NEXT: vldr fpcxtns, [sp], #4 | ||
975 | +; V81M-COMMON-NEXT: clrm {r1, r2, r3, r12, apsr} | ||
976 | +; V81M-COMMON-NEXT: bxns lr | ||
977 | +entry: | ||
978 | + %shr = ashr i33 %arg, 32 | ||
979 | + %conv = trunc nsw i33 %shr to i32 | ||
980 | + ret i32 %conv | ||
981 | +} | ||
982 | + | ||
983 | +define i32 @access_u33(i33 %arg) "cmse_nonsecure_entry" { | ||
984 | +; V8M-COMMON-LABEL: access_u33: | ||
985 | +; V8M-COMMON: @ %bb.0: @ %entry | ||
986 | +; V8M-LE-NEXT: and r0, r1, #1 | ||
987 | +; V8M-BE-NEXT: and r0, r0, #1 | ||
988 | +; V8M-COMMON-NEXT: mov r1, lr | ||
989 | +; V8M-COMMON-NEXT: mov r2, lr | ||
990 | +; V8M-COMMON-NEXT: mov r3, lr | ||
991 | +; V8M-COMMON-NEXT: mov r12, lr | ||
992 | +; V8M-COMMON-NEXT: msr apsr_nzcvq, lr | ||
993 | +; V8M-COMMON-NEXT: bxns lr | ||
994 | +; | ||
995 | +; V81M-COMMON-LABEL: access_u33: | ||
996 | +; V81M-COMMON: @ %bb.0: @ %entry | ||
997 | +; V81M-COMMON-NEXT: vstr fpcxtns, [sp, #-4]! | ||
998 | +; V81M-LE-NEXT: and r0, r1, #1 | ||
999 | +; V81M-BE-NEXT: and r0, r0, #1 | ||
1000 | +; V81M-COMMON-NEXT: vscclrm {s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15, vpr} | ||
1001 | +; V81M-COMMON-NEXT: vldr fpcxtns, [sp], #4 | ||
1002 | +; V81M-COMMON-NEXT: clrm {r1, r2, r3, r12, apsr} | ||
1003 | +; V81M-COMMON-NEXT: bxns lr | ||
1004 | +entry: | ||
1005 | + %shr = lshr i33 %arg, 32 | ||
1006 | + %conv = trunc nuw nsw i33 %shr to i32 | ||
1007 | + ret i32 %conv | ||
1008 | +} | ||
1009 | + | ||
1010 | +define i32 @access_i65(ptr byval(i65) %0) "cmse_nonsecure_entry" { | ||
1011 | +; V8M-COMMON-LABEL: access_i65: | ||
1012 | +; V8M-COMMON: @ %bb.0: @ %entry | ||
1013 | +; V8M-COMMON-NEXT: sub sp, #16 | ||
1014 | +; V8M-COMMON-NEXT: stm.w sp, {r0, r1, r2, r3} | ||
1015 | +; V8M-LE-NEXT: ldrb.w r0, [sp, #8] | ||
1016 | +; V8M-LE-NEXT: and r0, r0, #1 | ||
1017 | +; V8M-LE-NEXT: rsbs r0, r0, #0 | ||
1018 | +; V8M-BE-NEXT: movs r1, #0 | ||
1019 | +; V8M-BE-NEXT: sub.w r0, r1, r0, lsr #24 | ||
1020 | +; V8M-COMMON-NEXT: add sp, #16 | ||
1021 | +; V8M-COMMON-NEXT: mov r1, lr | ||
1022 | +; V8M-COMMON-NEXT: mov r2, lr | ||
1023 | +; V8M-COMMON-NEXT: mov r3, lr | ||
1024 | +; V8M-COMMON-NEXT: mov r12, lr | ||
1025 | +; V8M-COMMON-NEXT: msr apsr_nzcvq, lr | ||
1026 | +; V8M-COMMON-NEXT: bxns lr | ||
1027 | +; | ||
1028 | +; V81M-COMMON-LABEL: access_i65: | ||
1029 | +; V81M-COMMON: @ %bb.0: @ %entry | ||
1030 | +; V81M-COMMON-NEXT: vstr fpcxtns, [sp, #-4]! | ||
1031 | +; V81M-COMMON-NEXT: sub sp, #16 | ||
1032 | +; V81M-COMMON-NEXT: add sp, #4 | ||
1033 | +; V81M-COMMON-NEXT: stm.w sp, {r0, r1, r2, r3} | ||
1034 | +; V81M-LE-NEXT: ldrb.w r0, [sp, #8] | ||
1035 | +; V81M-LE-NEXT: and r0, r0, #1 | ||
1036 | +; V81M-LE-NEXT: rsbs r0, r0, #0 | ||
1037 | +; V81M-BE-NEXT: movs r1, #0 | ||
1038 | +; V81M-BE-NEXT: sub.w r0, r1, r0, lsr #24 | ||
1039 | +; V81M-COMMON-NEXT: sub sp, #4 | ||
1040 | +; V81M-COMMON-NEXT: add sp, #16 | ||
1041 | +; V81M-COMMON-NEXT: vscclrm {s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15, vpr} | ||
1042 | +; V81M-COMMON-NEXT: vldr fpcxtns, [sp], #4 | ||
1043 | +; V81M-COMMON-NEXT: clrm {r1, r2, r3, r12, apsr} | ||
1044 | +; V81M-COMMON-NEXT: bxns lr | ||
1045 | +entry: | ||
1046 | + %arg = load i65, ptr %0, align 8 | ||
1047 | + %shr = ashr i65 %arg, 64 | ||
1048 | + %conv = trunc nsw i65 %shr to i32 | ||
1049 | + ret i32 %conv | ||
1050 | +} | ||
1051 | + | ||
1052 | +define i32 @access_u65(ptr byval(i65) %0) "cmse_nonsecure_entry" { | ||
1053 | +; V8M-COMMON-LABEL: access_u65: | ||
1054 | +; V8M-COMMON: @ %bb.0: @ %entry | ||
1055 | +; V8M-COMMON-NEXT: sub sp, #16 | ||
1056 | +; V8M-COMMON-NEXT: stm.w sp, {r0, r1, r2, r3} | ||
1057 | +; V8M-LE-NEXT: ldrb.w r0, [sp, #8] | ||
1058 | +; V8M-BE-NEXT: lsrs r0, r0, #24 | ||
1059 | +; V8M-COMMON-NEXT: add sp, #16 | ||
1060 | +; V8M-COMMON-NEXT: mov r1, lr | ||
1061 | +; V8M-COMMON-NEXT: mov r2, lr | ||
1062 | +; V8M-COMMON-NEXT: mov r3, lr | ||
1063 | +; V8M-COMMON-NEXT: mov r12, lr | ||
1064 | +; V8M-COMMON-NEXT: msr apsr_nzcvq, lr | ||
1065 | +; V8M-COMMON-NEXT: bxns lr | ||
1066 | +; | ||
1067 | +; V81M-COMMON-LABEL: access_u65: | ||
1068 | +; V81M-COMMON: @ %bb.0: @ %entry | ||
1069 | +; V81M-COMMON-NEXT: vstr fpcxtns, [sp, #-4]! | ||
1070 | +; V81M-COMMON-NEXT: sub sp, #16 | ||
1071 | +; V81M-COMMON-NEXT: add sp, #4 | ||
1072 | +; V81M-COMMON-NEXT: stm.w sp, {r0, r1, r2, r3} | ||
1073 | +; V81M-LE-NEXT: ldrb.w r0, [sp, #8] | ||
1074 | +; V81M-BE-NEXT: lsrs r0, r0, #24 | ||
1075 | +; V81M-COMMON-NEXT: sub sp, #4 | ||
1076 | +; V81M-COMMON-NEXT: add sp, #16 | ||
1077 | +; V81M-COMMON-NEXT: vscclrm {s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15, vpr} | ||
1078 | +; V81M-COMMON-NEXT: vldr fpcxtns, [sp], #4 | ||
1079 | +; V81M-COMMON-NEXT: clrm {r1, r2, r3, r12, apsr} | ||
1080 | +; V81M-COMMON-NEXT: bxns lr | ||
1081 | +entry: | ||
1082 | + %arg = load i65, ptr %0, align 8 | ||
1083 | + %shr = lshr i65 %arg, 64 | ||
1084 | + %conv = trunc nuw nsw i65 %shr to i32 | ||
1085 | + ret i32 %conv | ||
1086 | +} | ||
diff --git a/meta/recipes-devtools/rust/rust-llvm_1.75.0.bb b/meta/recipes-devtools/rust/rust-llvm_1.75.0.bb index 13bdadb5e7..292fc15c55 100644 --- a/meta/recipes-devtools/rust/rust-llvm_1.75.0.bb +++ b/meta/recipes-devtools/rust/rust-llvm_1.75.0.bb | |||
@@ -10,7 +10,8 @@ require rust-source.inc | |||
10 | 10 | ||
11 | SRC_URI += "file://0002-llvm-allow-env-override-of-exe-path.patch;striplevel=2 \ | 11 | SRC_URI += "file://0002-llvm-allow-env-override-of-exe-path.patch;striplevel=2 \ |
12 | file://0001-AsmMatcherEmitter-sort-ClassInfo-lists-by-name-as-we.patch;striplevel=2 \ | 12 | file://0001-AsmMatcherEmitter-sort-ClassInfo-lists-by-name-as-we.patch;striplevel=2 \ |
13 | file://0003-llvm-fix-include-benchmarks.patch;striplevel=2" | 13 | file://0003-llvm-fix-include-benchmarks.patch;striplevel=2 \ |
14 | file://0004-llvm-Fix-CVE-2024-0151.patch;striplevel=2" | ||
14 | 15 | ||
15 | S = "${RUSTSRC}/src/llvm-project/llvm" | 16 | S = "${RUSTSRC}/src/llvm-project/llvm" |
16 | 17 | ||