summaryrefslogtreecommitdiffstats
path: root/meta-oe/recipes-devtools/gcc/gcc-4.5/linaro/gcc-4.5-linaro-r99349.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-oe/recipes-devtools/gcc/gcc-4.5/linaro/gcc-4.5-linaro-r99349.patch')
-rw-r--r--meta-oe/recipes-devtools/gcc/gcc-4.5/linaro/gcc-4.5-linaro-r99349.patch401
1 files changed, 401 insertions, 0 deletions
diff --git a/meta-oe/recipes-devtools/gcc/gcc-4.5/linaro/gcc-4.5-linaro-r99349.patch b/meta-oe/recipes-devtools/gcc/gcc-4.5/linaro/gcc-4.5-linaro-r99349.patch
new file mode 100644
index 000000000..a95b649e4
--- /dev/null
+++ b/meta-oe/recipes-devtools/gcc/gcc-4.5/linaro/gcc-4.5-linaro-r99349.patch
@@ -0,0 +1,401 @@
1 * config/arm/arm.c (thumb2_size_rtx_costs): New.
2 (arm_rtx_costs): Call above for Thumb-2.
3
42010-07-26 Julian Brown <julian@codesourcery.com>
5
6 Merge from Sourcery G++ 4.4:
7
8 2010-02-23 Julian Brown <julian@codesourcery.com>
9
10 gcc/
11 * calls.c (precompute_register_parameters): Avoid generating a
12 register move if optimizing for size.
13
14
15=== modified file 'gcc/config/arm/arm.c'
16--- old/gcc/config/arm/arm.c 2010-08-13 10:43:42 +0000
17+++ new/gcc/config/arm/arm.c 2010-08-13 10:55:28 +0000
18@@ -141,6 +141,7 @@
19 static bool arm_have_conditional_execution (void);
20 static bool arm_rtx_costs_1 (rtx, enum rtx_code, int*, bool);
21 static bool arm_size_rtx_costs (rtx, enum rtx_code, enum rtx_code, int *);
22+static bool thumb2_size_rtx_costs (rtx, enum rtx_code, enum rtx_code, int *);
23 static bool arm_slowmul_rtx_costs (rtx, enum rtx_code, enum rtx_code, int *, bool);
24 static bool arm_fastmul_rtx_costs (rtx, enum rtx_code, enum rtx_code, int *, bool);
25 static bool arm_xscale_rtx_costs (rtx, enum rtx_code, enum rtx_code, int *, bool);
26@@ -7316,14 +7317,372 @@
27 }
28 }
29
30+static bool
31+thumb2_size_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
32+ int *total)
33+{
34+ /* Attempt to give a lower cost to RTXs which can optimistically be
35+ represented as short insns, assuming that the right conditions will hold
36+ later (e.g. low registers will be chosen if a short insn requires them).
37+
38+ Note that we don't make wide insns cost twice as much as narrow insns,
39+ because we can't prove that a particular RTX will actually use a narrow
40+ insn, because not enough information is available (e.g., we don't know
41+ which hard registers pseudos will be assigned). Consider these to be
42+ "expected" sizes/weightings.
43+
44+ (COSTS_NARROW_INSNS has the same weight as COSTS_N_INSNS.) */
45+
46+#define COSTS_NARROW_INSNS(N) ((N) * 4)
47+#define COSTS_WIDE_INSNS(N) ((N) * 6)
48+#define THUMB2_LIBCALL_COST COSTS_WIDE_INSNS (2)
49+ enum machine_mode mode = GET_MODE (x);
50+
51+ switch (code)
52+ {
53+ case MEM:
54+ if (REG_P (XEXP (x, 0)))
55+ {
56+ /* Hopefully this will use a narrow ldm/stm insn. */
57+ *total = COSTS_NARROW_INSNS (1);
58+ return true;
59+ }
60+ else if ((GET_CODE (XEXP (x, 0)) == SYMBOL_REF
61+ && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)))
62+ || reg_mentioned_p (virtual_stack_vars_rtx, XEXP (x, 0))
63+ || reg_mentioned_p (stack_pointer_rtx, XEXP (x, 0)))
64+ {
65+ *total = COSTS_NARROW_INSNS (ARM_NUM_REGS (mode));
66+ return true;
67+ }
68+ else if (GET_CODE (XEXP (x, 0)) == PLUS)
69+ {
70+ rtx plus = XEXP (x, 0);
71+
72+ if (GET_CODE (XEXP (plus, 1)) == CONST_INT)
73+ {
74+ HOST_WIDE_INT cst = INTVAL (XEXP (plus, 1));
75+
76+ if (cst >= 0 && cst < 256)
77+ *total = COSTS_NARROW_INSNS (ARM_NUM_REGS (mode));
78+ else
79+ *total = COSTS_WIDE_INSNS (ARM_NUM_REGS (mode));
80+
81+ *total += rtx_cost (XEXP (plus, 0), code, false);
82+
83+ return true;
84+ }
85+ }
86+
87+ *total = COSTS_NARROW_INSNS (ARM_NUM_REGS (mode));
88+ return false;
89+
90+ case DIV:
91+ case MOD:
92+ case UDIV:
93+ case UMOD:
94+ if (arm_arch_hwdiv)
95+ *total = COSTS_WIDE_INSNS (1);
96+ else
97+ *total = THUMB2_LIBCALL_COST;
98+ return false;
99+
100+ case ROTATE:
101+ if (mode == SImode && REG_P (XEXP (x, 1)))
102+ {
103+ *total = COSTS_WIDE_INSNS (1) + COSTS_NARROW_INSNS (1)
104+ + rtx_cost (XEXP (x, 0), code, false);
105+ return true;
106+ }
107+ /* Fall through */
108+
109+ case ASHIFT:
110+ case LSHIFTRT:
111+ case ASHIFTRT:
112+ if (mode == DImode && GET_CODE (XEXP (x, 1)) == CONST_INT)
113+ {
114+ *total = COSTS_WIDE_INSNS (3) + rtx_cost (XEXP (x, 0), code, false);
115+ return true;
116+ }
117+ else if (mode == SImode)
118+ {
119+ *total = COSTS_NARROW_INSNS (1);
120+ return false;
121+ }
122+
123+ /* Needs a libcall. */
124+ *total = THUMB2_LIBCALL_COST;
125+ return false;
126+
127+ case ROTATERT:
128+ if (mode == DImode && GET_CODE (XEXP (x, 1)) == CONST_INT)
129+ {
130+ *total = COSTS_WIDE_INSNS (3) + rtx_cost (XEXP (x, 0), code, false);
131+ return true;
132+ }
133+ else if (mode == SImode)
134+ {
135+ if (GET_CODE (XEXP (x, 1)) == CONST_INT)
136+ *total = COSTS_WIDE_INSNS (1) + rtx_cost (XEXP (x, 0), code, false);
137+ else
138+ *total = COSTS_NARROW_INSNS (1)
139+ + rtx_cost (XEXP (x, 0), code, false);
140+ return true;
141+ }
142+
143+ /* Needs a libcall. */
144+ *total = THUMB2_LIBCALL_COST;
145+ return false;
146+
147+ case MINUS:
148+ if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT
149+ && (mode == SFmode || !TARGET_VFP_SINGLE))
150+ {
151+ *total = COSTS_WIDE_INSNS (1);
152+ return false;
153+ }
154+
155+ if (mode == SImode)
156+ {
157+ enum rtx_code subcode0 = GET_CODE (XEXP (x, 0));
158+ enum rtx_code subcode1 = GET_CODE (XEXP (x, 1));
159+
160+ if (subcode0 == ROTATE || subcode0 == ROTATERT || subcode0 == ASHIFT
161+ || subcode0 == LSHIFTRT || subcode0 == ASHIFTRT
162+ || subcode1 == ROTATE || subcode1 == ROTATERT
163+ || subcode1 == ASHIFT || subcode1 == LSHIFTRT
164+ || subcode1 == ASHIFTRT)
165+ {
166+ /* It's just the cost of the two operands. */
167+ *total = 0;
168+ return false;
169+ }
170+
171+ if (subcode1 == CONST_INT)
172+ {
173+ HOST_WIDE_INT cst = INTVAL (XEXP (x, 1));
174+
175+ if (cst >= 0 && cst < 256)
176+ *total = COSTS_NARROW_INSNS (1);
177+ else
178+ *total = COSTS_WIDE_INSNS (1);
179+
180+ *total += rtx_cost (XEXP (x, 0), code, false);
181+
182+ return true;
183+ }
184+
185+ *total = COSTS_NARROW_INSNS (1);
186+ return false;
187+ }
188+
189+ *total = COSTS_WIDE_INSNS (ARM_NUM_REGS (mode));
190+ return false;
191+
192+ case PLUS:
193+ if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT
194+ && (mode == SFmode || !TARGET_VFP_SINGLE))
195+ {
196+ *total = COSTS_WIDE_INSNS (1);
197+ return false;
198+ }
199+
200+ /* Fall through */
201+ case AND: case XOR: case IOR:
202+ if (mode == SImode)
203+ {
204+ enum rtx_code subcode = GET_CODE (XEXP (x, 0));
205+
206+ if (subcode == ROTATE || subcode == ROTATERT || subcode == ASHIFT
207+ || subcode == LSHIFTRT || subcode == ASHIFTRT
208+ || (code == AND && subcode == NOT))
209+ {
210+ /* It's just the cost of the two operands. */
211+ *total = 0;
212+ return false;
213+ }
214+
215+ if (code == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
216+ {
217+ HOST_WIDE_INT cst = INTVAL (XEXP (x, 1));
218+
219+ if ((reg_mentioned_p (virtual_stack_vars_rtx, XEXP (x, 0))
220+ || reg_mentioned_p (stack_pointer_rtx, XEXP (x, 0)))
221+ && cst > -512 && cst < 1024)
222+ /* Only approximately correct, depending on destination
223+ register. */
224+ *total = COSTS_NARROW_INSNS (1);
225+ else if (cst > -256 && cst < 256)
226+ *total = COSTS_NARROW_INSNS (1);
227+ else
228+ *total = COSTS_WIDE_INSNS (1);
229+
230+ *total += rtx_cost (XEXP (x, 0), code, false);
231+
232+ return true;
233+ }
234+
235+ if (subcode == MULT
236+ && power_of_two_operand (XEXP (XEXP (x, 0), 1), mode))
237+ {
238+ *total = COSTS_WIDE_INSNS (1)
239+ + rtx_cost (XEXP (x, 1), code, false);
240+ return true;
241+ }
242+ }
243+
244+ *total = COSTS_NARROW_INSNS (ARM_NUM_REGS (mode));
245+ return false;
246+
247+ case MULT:
248+ if (mode == SImode && GET_CODE (XEXP (x, 1)) != CONST_INT)
249+ {
250+ /* Might be using muls. */
251+ *total = COSTS_NARROW_INSNS (1);
252+ return false;
253+ }
254+ *total = COSTS_WIDE_INSNS (ARM_NUM_REGS (mode));
255+ return false;
256+
257+ case NEG:
258+ if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT
259+ && (mode == SFmode || !TARGET_VFP_SINGLE))
260+ {
261+ *total = COSTS_WIDE_INSNS (1);
262+ return false;
263+ }
264+
265+ /* Fall through */
266+ case NOT:
267+ if (mode == SImode)
268+ {
269+ *total = COSTS_NARROW_INSNS (1);
270+ return false;
271+ }
272+ *total = COSTS_WIDE_INSNS (ARM_NUM_REGS (mode));
273+ return false;
274+
275+ case IF_THEN_ELSE:
276+ *total = COSTS_NARROW_INSNS (1);
277+ return false;
278+
279+ case COMPARE:
280+ if (cc_register (XEXP (x, 0), VOIDmode))
281+ *total = 0;
282+ else
283+ *total = COSTS_NARROW_INSNS (1);
284+ return false;
285+
286+ case ABS:
287+ if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT
288+ && (mode == SFmode || !TARGET_VFP_SINGLE))
289+ *total = COSTS_WIDE_INSNS (1);
290+ else
291+ *total = COSTS_NARROW_INSNS (ARM_NUM_REGS (mode)) * 2;
292+ return false;
293+
294+ case SIGN_EXTEND:
295+ if (GET_MODE_SIZE (mode) <= 4)
296+ *total = GET_CODE (XEXP (x, 0)) == MEM ? 0 : COSTS_NARROW_INSNS (1);
297+ else
298+ *total = COSTS_NARROW_INSNS (1)
299+ + COSTS_WIDE_INSNS (ARM_NUM_REGS (mode));
300+ return false;
301+
302+ case ZERO_EXTEND:
303+ if (GET_MODE_SIZE (mode) > 4)
304+ *total = COSTS_WIDE_INSNS (ARM_NUM_REGS (mode) - 1);
305+ else if (GET_CODE (XEXP (x, 0)) == MEM)
306+ *total = 0;
307+ else
308+ *total = COSTS_NARROW_INSNS (1);
309+ return false;
310+
311+ case CONST_INT:
312+ {
313+ HOST_WIDE_INT cst = INTVAL (x);
314+
315+ switch (outer_code)
316+ {
317+ case PLUS:
318+ if (cst > -256 && cst < 256)
319+ *total = 0;
320+ else
321+ /* See note about optabs below. */
322+ *total = COSTS_N_INSNS (1);
323+ return true;
324+
325+ case MINUS:
326+ case COMPARE:
327+ if (cst >= 0 && cst < 256)
328+ *total = 0;
329+ else
330+ /* See note about optabs below. */
331+ *total = COSTS_N_INSNS (1);
332+ return true;
333+
334+ case ASHIFT:
335+ case ASHIFTRT:
336+ case LSHIFTRT:
337+ *total = 0;
338+ return true;
339+
340+ default:
341+ /* Constants are compared explicitly against COSTS_N_INSNS (1) in
342+ optabs.c, creating an alternative, larger code sequence for more
343+ expensive constants). So, it doesn't pay to make some constants
344+ cost more than this. */
345+ *total = COSTS_N_INSNS (1);
346+ }
347+ return true;
348+ }
349+
350+ case CONST:
351+ case LABEL_REF:
352+ case SYMBOL_REF:
353+ *total = COSTS_WIDE_INSNS (2);
354+ return true;
355+
356+ case CONST_DOUBLE:
357+ *total = COSTS_WIDE_INSNS (4);
358+ return true;
359+
360+ case HIGH:
361+ case LO_SUM:
362+ /* We prefer constant pool entries to MOVW/MOVT pairs, so bump the
363+ cost of these slightly. */
364+ *total = COSTS_WIDE_INSNS (1) + 1;
365+ return true;
366+
367+ default:
368+ if (mode != VOIDmode)
369+ *total = COSTS_WIDE_INSNS (ARM_NUM_REGS (mode));
370+ else
371+ /* A guess (inherited from arm_size_rtx_costs). */
372+ *total = COSTS_WIDE_INSNS (4);
373+ return false;
374+ }
375+
376+ return true;
377+#undef THUMB2_LIBCALL_COST
378+#undef COSTS_WIDE_INSNS
379+#undef COSTS_NARROW_INSNS
380+}
381+
382 /* RTX costs when optimizing for size. */
383 static bool
384 arm_rtx_costs (rtx x, int code, int outer_code, int *total,
385 bool speed)
386 {
387 if (!speed)
388- return arm_size_rtx_costs (x, (enum rtx_code) code,
389- (enum rtx_code) outer_code, total);
390+ {
391+ if (TARGET_THUMB2)
392+ return thumb2_size_rtx_costs (x, (enum rtx_code) code,
393+ (enum rtx_code) outer_code, total);
394+ else
395+ return arm_size_rtx_costs (x, (enum rtx_code) code,
396+ (enum rtx_code) outer_code, total);
397+ }
398 else
399 return all_cores[(int)arm_tune].rtx_costs (x, (enum rtx_code) code,
400 (enum rtx_code) outer_code,
401