summaryrefslogtreecommitdiffstats
path: root/toolchain-layer/recipes-devtools/gcc/gcc-4.6/linaro/gcc-4.6-linaro-r106827.patch
diff options
context:
space:
mode:
Diffstat (limited to 'toolchain-layer/recipes-devtools/gcc/gcc-4.6/linaro/gcc-4.6-linaro-r106827.patch')
-rw-r--r--toolchain-layer/recipes-devtools/gcc/gcc-4.6/linaro/gcc-4.6-linaro-r106827.patch622
1 files changed, 0 insertions, 622 deletions
diff --git a/toolchain-layer/recipes-devtools/gcc/gcc-4.6/linaro/gcc-4.6-linaro-r106827.patch b/toolchain-layer/recipes-devtools/gcc/gcc-4.6/linaro/gcc-4.6-linaro-r106827.patch
deleted file mode 100644
index 28caa40076..0000000000
--- a/toolchain-layer/recipes-devtools/gcc/gcc-4.6/linaro/gcc-4.6-linaro-r106827.patch
+++ /dev/null
@@ -1,622 +0,0 @@
12011-10-17 Michael Hope <michael.hope@linaro.org>
2
3 Backport from mainline r178852:
4
5 2011-09-14 Julian Brown <julian@codesourcery.com>
6
7 gcc/
8 * config/arm/arm.c (arm_override_options): Add unaligned_access
9 support.
10 (arm_file_start): Emit attribute for unaligned access as appropriate.
11 * config/arm/arm.md (UNSPEC_UNALIGNED_LOAD)
12 (UNSPEC_UNALIGNED_STORE): Add constants for unspecs.
13 (insv, extzv): Add unaligned-access support.
14 (extv): Change to expander. Likewise.
15 (extzv_t1, extv_regsi): Add helpers.
16 (unaligned_loadsi, unaligned_loadhis, unaligned_loadhiu)
17 (unaligned_storesi, unaligned_storehi): New.
18 (*extv_reg): New (previous extv implementation).
19 * config/arm/arm.opt (munaligned_access): Add option.
20 * config/arm/constraints.md (Uw): New constraint.
21 * expmed.c (store_bit_field_1): Adjust bitfield numbering according
22 to size of access, not size of unit, when BITS_BIG_ENDIAN !=
23 BYTES_BIG_ENDIAN. Don't use bitfield accesses for
24 volatile accesses when -fstrict-volatile-bitfields is in effect.
25 (extract_bit_field_1): Likewise.
26
27 Backport from mainline r172697:
28
29 2011-04-19 Wei Guozhi <carrot@google.com>
30
31 PR target/47855
32 gcc/
33 * config/arm/arm-protos.h (thumb1_legitimate_address_p): New prototype.
34 * config/arm/arm.c (thumb1_legitimate_address_p): Remove the static
35 linkage.
36 * config/arm/constraints.md (Uu): New constraint.
37 * config/arm/arm.md (*arm_movqi_insn): Compute attr "length".
38
39=== modified file 'gcc/config/arm/arm-protos.h'
40Index: gcc-4_6-branch/gcc/config/arm/arm-protos.h
41===================================================================
42--- gcc-4_6-branch.orig/gcc/config/arm/arm-protos.h 2012-03-05 16:07:15.000000000 -0800
43+++ gcc-4_6-branch/gcc/config/arm/arm-protos.h 2012-03-05 16:07:50.392936694 -0800
44@@ -59,6 +59,7 @@
45 int);
46 extern rtx thumb_legitimize_reload_address (rtx *, enum machine_mode, int, int,
47 int);
48+extern int thumb1_legitimate_address_p (enum machine_mode, rtx, int);
49 extern int arm_const_double_rtx (rtx);
50 extern int neg_const_double_rtx_ok_for_fpa (rtx);
51 extern int vfp3_const_double_rtx (rtx);
52Index: gcc-4_6-branch/gcc/config/arm/arm.c
53===================================================================
54--- gcc-4_6-branch.orig/gcc/config/arm/arm.c 2012-03-05 16:07:15.000000000 -0800
55+++ gcc-4_6-branch/gcc/config/arm/arm.c 2012-03-05 16:07:50.400936694 -0800
56@@ -2065,6 +2065,28 @@
57 fix_cm3_ldrd = 0;
58 }
59
60+ /* Enable -munaligned-access by default for
61+ - all ARMv6 architecture-based processors
62+ - ARMv7-A, ARMv7-R, and ARMv7-M architecture-based processors.
63+
64+ Disable -munaligned-access by default for
65+ - all pre-ARMv6 architecture-based processors
66+ - ARMv6-M architecture-based processors. */
67+
68+ if (unaligned_access == 2)
69+ {
70+ if (arm_arch6 && (arm_arch_notm || arm_arch7))
71+ unaligned_access = 1;
72+ else
73+ unaligned_access = 0;
74+ }
75+ else if (unaligned_access == 1
76+ && !(arm_arch6 && (arm_arch_notm || arm_arch7)))
77+ {
78+ warning (0, "target CPU does not support unaligned accesses");
79+ unaligned_access = 0;
80+ }
81+
82 if (TARGET_THUMB1 && flag_schedule_insns)
83 {
84 /* Don't warn since it's on by default in -O2. */
85@@ -6123,7 +6145,7 @@
86 addresses based on the frame pointer or arg pointer until the
87 reload pass starts. This is so that eliminating such addresses
88 into stack based ones won't produce impossible code. */
89-static int
90+int
91 thumb1_legitimate_address_p (enum machine_mode mode, rtx x, int strict_p)
92 {
93 /* ??? Not clear if this is right. Experiment. */
94@@ -22251,6 +22273,10 @@
95 val = 6;
96 asm_fprintf (asm_out_file, "\t.eabi_attribute 30, %d\n", val);
97
98+ /* Tag_CPU_unaligned_access. */
99+ asm_fprintf (asm_out_file, "\t.eabi_attribute 34, %d\n",
100+ unaligned_access);
101+
102 /* Tag_ABI_FP_16bit_format. */
103 if (arm_fp16_format)
104 asm_fprintf (asm_out_file, "\t.eabi_attribute 38, %d\n",
105Index: gcc-4_6-branch/gcc/config/arm/arm.md
106===================================================================
107--- gcc-4_6-branch.orig/gcc/config/arm/arm.md 2012-03-05 16:07:15.000000000 -0800
108+++ gcc-4_6-branch/gcc/config/arm/arm.md 2012-03-05 16:09:26.284941314 -0800
109@@ -114,6 +114,10 @@
110 ; another symbolic address.
111 (UNSPEC_MEMORY_BARRIER 28) ; Represent a memory barrier.
112 (UNSPEC_PIC_UNIFIED 29) ; Create a common pic addressing form.
113+ (UNSPEC_UNALIGNED_LOAD 30) ; Used to represent ldr/ldrh instructions that access
114+ ; unaligned locations, on architectures which support
115+ ; that.
116+ (UNSPEC_UNALIGNED_STORE 31) ; Same for str/strh.
117 ]
118 )
119
120@@ -2461,10 +2465,10 @@
121 ;;; this insv pattern, so this pattern needs to be reevalutated.
122
123 (define_expand "insv"
124- [(set (zero_extract:SI (match_operand:SI 0 "s_register_operand" "")
125- (match_operand:SI 1 "general_operand" "")
126- (match_operand:SI 2 "general_operand" ""))
127- (match_operand:SI 3 "reg_or_int_operand" ""))]
128+ [(set (zero_extract (match_operand 0 "nonimmediate_operand" "")
129+ (match_operand 1 "general_operand" "")
130+ (match_operand 2 "general_operand" ""))
131+ (match_operand 3 "reg_or_int_operand" ""))]
132 "TARGET_ARM || arm_arch_thumb2"
133 "
134 {
135@@ -2475,35 +2479,70 @@
136
137 if (arm_arch_thumb2)
138 {
139- bool use_bfi = TRUE;
140-
141- if (GET_CODE (operands[3]) == CONST_INT)
142+ if (unaligned_access && MEM_P (operands[0])
143+ && s_register_operand (operands[3], GET_MODE (operands[3]))
144+ && (width == 16 || width == 32) && (start_bit % BITS_PER_UNIT) == 0)
145 {
146- HOST_WIDE_INT val = INTVAL (operands[3]) & mask;
147+ rtx base_addr;
148+
149+ if (BYTES_BIG_ENDIAN)
150+ start_bit = GET_MODE_BITSIZE (GET_MODE (operands[3])) - width
151+ - start_bit;
152
153- if (val == 0)
154+ if (width == 32)
155 {
156- emit_insn (gen_insv_zero (operands[0], operands[1],
157- operands[2]));
158- DONE;
159+ base_addr = adjust_address (operands[0], SImode,
160+ start_bit / BITS_PER_UNIT);
161+ emit_insn (gen_unaligned_storesi (base_addr, operands[3]));
162 }
163+ else
164+ {
165+ rtx tmp = gen_reg_rtx (HImode);
166
167- /* See if the set can be done with a single orr instruction. */
168- if (val == mask && const_ok_for_arm (val << start_bit))
169- use_bfi = FALSE;
170+ base_addr = adjust_address (operands[0], HImode,
171+ start_bit / BITS_PER_UNIT);
172+ emit_move_insn (tmp, gen_lowpart (HImode, operands[3]));
173+ emit_insn (gen_unaligned_storehi (base_addr, tmp));
174+ }
175+ DONE;
176 }
177-
178- if (use_bfi)
179+ else if (s_register_operand (operands[0], GET_MODE (operands[0])))
180 {
181- if (GET_CODE (operands[3]) != REG)
182- operands[3] = force_reg (SImode, operands[3]);
183+ bool use_bfi = TRUE;
184
185- emit_insn (gen_insv_t2 (operands[0], operands[1], operands[2],
186- operands[3]));
187- DONE;
188+ if (GET_CODE (operands[3]) == CONST_INT)
189+ {
190+ HOST_WIDE_INT val = INTVAL (operands[3]) & mask;
191+
192+ if (val == 0)
193+ {
194+ emit_insn (gen_insv_zero (operands[0], operands[1],
195+ operands[2]));
196+ DONE;
197+ }
198+
199+ /* See if the set can be done with a single orr instruction. */
200+ if (val == mask && const_ok_for_arm (val << start_bit))
201+ use_bfi = FALSE;
202+ }
203+
204+ if (use_bfi)
205+ {
206+ if (GET_CODE (operands[3]) != REG)
207+ operands[3] = force_reg (SImode, operands[3]);
208+
209+ emit_insn (gen_insv_t2 (operands[0], operands[1], operands[2],
210+ operands[3]));
211+ DONE;
212+ }
213 }
214+ else
215+ FAIL;
216 }
217
218+ if (!s_register_operand (operands[0], GET_MODE (operands[0])))
219+ FAIL;
220+
221 target = copy_rtx (operands[0]);
222 /* Avoid using a subreg as a subtarget, and avoid writing a paradoxical
223 subreg as the final target. */
224@@ -3695,12 +3734,10 @@
225 ;; to reduce register pressure later on.
226
227 (define_expand "extzv"
228- [(set (match_dup 4)
229- (ashift:SI (match_operand:SI 1 "register_operand" "")
230- (match_operand:SI 2 "const_int_operand" "")))
231- (set (match_operand:SI 0 "register_operand" "")
232- (lshiftrt:SI (match_dup 4)
233- (match_operand:SI 3 "const_int_operand" "")))]
234+ [(set (match_operand 0 "s_register_operand" "")
235+ (zero_extract (match_operand 1 "nonimmediate_operand" "")
236+ (match_operand 2 "const_int_operand" "")
237+ (match_operand 3 "const_int_operand" "")))]
238 "TARGET_THUMB1 || arm_arch_thumb2"
239 "
240 {
241@@ -3709,10 +3746,57 @@
242
243 if (arm_arch_thumb2)
244 {
245- emit_insn (gen_extzv_t2 (operands[0], operands[1], operands[2],
246- operands[3]));
247- DONE;
248+ HOST_WIDE_INT width = INTVAL (operands[2]);
249+ HOST_WIDE_INT bitpos = INTVAL (operands[3]);
250+
251+ if (unaligned_access && MEM_P (operands[1])
252+ && (width == 16 || width == 32) && (bitpos % BITS_PER_UNIT) == 0)
253+ {
254+ rtx base_addr;
255+
256+ if (BYTES_BIG_ENDIAN)
257+ bitpos = GET_MODE_BITSIZE (GET_MODE (operands[0])) - width
258+ - bitpos;
259+
260+ if (width == 32)
261+ {
262+ base_addr = adjust_address (operands[1], SImode,
263+ bitpos / BITS_PER_UNIT);
264+ emit_insn (gen_unaligned_loadsi (operands[0], base_addr));
265+ }
266+ else
267+ {
268+ rtx dest = operands[0];
269+ rtx tmp = gen_reg_rtx (SImode);
270+
271+ /* We may get a paradoxical subreg here. Strip it off. */
272+ if (GET_CODE (dest) == SUBREG
273+ && GET_MODE (dest) == SImode
274+ && GET_MODE (SUBREG_REG (dest)) == HImode)
275+ dest = SUBREG_REG (dest);
276+
277+ if (GET_MODE_BITSIZE (GET_MODE (dest)) != width)
278+ FAIL;
279+
280+ base_addr = adjust_address (operands[1], HImode,
281+ bitpos / BITS_PER_UNIT);
282+ emit_insn (gen_unaligned_loadhiu (tmp, base_addr));
283+ emit_move_insn (gen_lowpart (SImode, dest), tmp);
284+ }
285+ DONE;
286+ }
287+ else if (s_register_operand (operands[1], GET_MODE (operands[1])))
288+ {
289+ emit_insn (gen_extzv_t2 (operands[0], operands[1], operands[2],
290+ operands[3]));
291+ DONE;
292+ }
293+ else
294+ FAIL;
295 }
296+
297+ if (!s_register_operand (operands[1], GET_MODE (operands[1])))
298+ FAIL;
299
300 operands[3] = GEN_INT (rshift);
301
302@@ -3722,12 +3806,154 @@
303 DONE;
304 }
305
306- operands[2] = GEN_INT (lshift);
307- operands[4] = gen_reg_rtx (SImode);
308+ emit_insn (gen_extzv_t1 (operands[0], operands[1], GEN_INT (lshift),
309+ operands[3], gen_reg_rtx (SImode)));
310+ DONE;
311 }"
312 )
313
314-(define_insn "extv"
315+;; Helper for extzv, for the Thumb-1 register-shifts case.
316+
317+(define_expand "extzv_t1"
318+ [(set (match_operand:SI 4 "s_register_operand" "")
319+ (ashift:SI (match_operand:SI 1 "nonimmediate_operand" "")
320+ (match_operand:SI 2 "const_int_operand" "")))
321+ (set (match_operand:SI 0 "s_register_operand" "")
322+ (lshiftrt:SI (match_dup 4)
323+ (match_operand:SI 3 "const_int_operand" "")))]
324+ "TARGET_THUMB1"
325+ "")
326+
327+(define_expand "extv"
328+ [(set (match_operand 0 "s_register_operand" "")
329+ (sign_extract (match_operand 1 "nonimmediate_operand" "")
330+ (match_operand 2 "const_int_operand" "")
331+ (match_operand 3 "const_int_operand" "")))]
332+ "arm_arch_thumb2"
333+{
334+ HOST_WIDE_INT width = INTVAL (operands[2]);
335+ HOST_WIDE_INT bitpos = INTVAL (operands[3]);
336+
337+ if (unaligned_access && MEM_P (operands[1]) && (width == 16 || width == 32)
338+ && (bitpos % BITS_PER_UNIT) == 0)
339+ {
340+ rtx base_addr;
341+
342+ if (BYTES_BIG_ENDIAN)
343+ bitpos = GET_MODE_BITSIZE (GET_MODE (operands[0])) - width - bitpos;
344+
345+ if (width == 32)
346+ {
347+ base_addr = adjust_address (operands[1], SImode,
348+ bitpos / BITS_PER_UNIT);
349+ emit_insn (gen_unaligned_loadsi (operands[0], base_addr));
350+ }
351+ else
352+ {
353+ rtx dest = operands[0];
354+ rtx tmp = gen_reg_rtx (SImode);
355+
356+ /* We may get a paradoxical subreg here. Strip it off. */
357+ if (GET_CODE (dest) == SUBREG
358+ && GET_MODE (dest) == SImode
359+ && GET_MODE (SUBREG_REG (dest)) == HImode)
360+ dest = SUBREG_REG (dest);
361+
362+ if (GET_MODE_BITSIZE (GET_MODE (dest)) != width)
363+ FAIL;
364+
365+ base_addr = adjust_address (operands[1], HImode,
366+ bitpos / BITS_PER_UNIT);
367+ emit_insn (gen_unaligned_loadhis (tmp, base_addr));
368+ emit_move_insn (gen_lowpart (SImode, dest), tmp);
369+ }
370+
371+ DONE;
372+ }
373+ else if (!s_register_operand (operands[1], GET_MODE (operands[1])))
374+ FAIL;
375+ else if (GET_MODE (operands[0]) == SImode
376+ && GET_MODE (operands[1]) == SImode)
377+ {
378+ emit_insn (gen_extv_regsi (operands[0], operands[1], operands[2],
379+ operands[3]));
380+ DONE;
381+ }
382+
383+ FAIL;
384+})
385+
386+; Helper to expand register forms of extv with the proper modes.
387+
388+(define_expand "extv_regsi"
389+ [(set (match_operand:SI 0 "s_register_operand" "")
390+ (sign_extract:SI (match_operand:SI 1 "s_register_operand" "")
391+ (match_operand 2 "const_int_operand" "")
392+ (match_operand 3 "const_int_operand" "")))]
393+ ""
394+{
395+})
396+
397+; ARMv6+ unaligned load/store instructions (used for packed structure accesses).
398+
399+(define_insn "unaligned_loadsi"
400+ [(set (match_operand:SI 0 "s_register_operand" "=l,r")
401+ (unspec:SI [(match_operand:SI 1 "memory_operand" "Uw,m")]
402+ UNSPEC_UNALIGNED_LOAD))]
403+ "unaligned_access && TARGET_32BIT"
404+ "ldr%?\t%0, %1\t@ unaligned"
405+ [(set_attr "arch" "t2,any")
406+ (set_attr "length" "2,4")
407+ (set_attr "predicable" "yes")
408+ (set_attr "type" "load1")])
409+
410+(define_insn "unaligned_loadhis"
411+ [(set (match_operand:SI 0 "s_register_operand" "=l,r")
412+ (sign_extend:SI
413+ (unspec:HI [(match_operand:HI 1 "memory_operand" "Uw,m")]
414+ UNSPEC_UNALIGNED_LOAD)))]
415+ "unaligned_access && TARGET_32BIT"
416+ "ldr%(sh%)\t%0, %1\t@ unaligned"
417+ [(set_attr "arch" "t2,any")
418+ (set_attr "length" "2,4")
419+ (set_attr "predicable" "yes")
420+ (set_attr "type" "load_byte")])
421+
422+(define_insn "unaligned_loadhiu"
423+ [(set (match_operand:SI 0 "s_register_operand" "=l,r")
424+ (zero_extend:SI
425+ (unspec:HI [(match_operand:HI 1 "memory_operand" "Uw,m")]
426+ UNSPEC_UNALIGNED_LOAD)))]
427+ "unaligned_access && TARGET_32BIT"
428+ "ldr%(h%)\t%0, %1\t@ unaligned"
429+ [(set_attr "arch" "t2,any")
430+ (set_attr "length" "2,4")
431+ (set_attr "predicable" "yes")
432+ (set_attr "type" "load_byte")])
433+
434+(define_insn "unaligned_storesi"
435+ [(set (match_operand:SI 0 "memory_operand" "=Uw,m")
436+ (unspec:SI [(match_operand:SI 1 "s_register_operand" "l,r")]
437+ UNSPEC_UNALIGNED_STORE))]
438+ "unaligned_access && TARGET_32BIT"
439+ "str%?\t%1, %0\t@ unaligned"
440+ [(set_attr "arch" "t2,any")
441+ (set_attr "length" "2,4")
442+ (set_attr "predicable" "yes")
443+ (set_attr "type" "store1")])
444+
445+(define_insn "unaligned_storehi"
446+ [(set (match_operand:HI 0 "memory_operand" "=Uw,m")
447+ (unspec:HI [(match_operand:HI 1 "s_register_operand" "l,r")]
448+ UNSPEC_UNALIGNED_STORE))]
449+ "unaligned_access && TARGET_32BIT"
450+ "str%(h%)\t%1, %0\t@ unaligned"
451+ [(set_attr "arch" "t2,any")
452+ (set_attr "length" "2,4")
453+ (set_attr "predicable" "yes")
454+ (set_attr "type" "store1")])
455+
456+(define_insn "*extv_reg"
457 [(set (match_operand:SI 0 "s_register_operand" "=r")
458 (sign_extract:SI (match_operand:SI 1 "s_register_operand" "r")
459 (match_operand:SI 2 "const_int_operand" "M")
460@@ -6069,8 +6295,8 @@
461
462
463 (define_insn "*arm_movqi_insn"
464- [(set (match_operand:QI 0 "nonimmediate_operand" "=r,r,r,m")
465- (match_operand:QI 1 "general_operand" "rI,K,m,r"))]
466+ [(set (match_operand:QI 0 "nonimmediate_operand" "=r,r,l,Uu,r,m")
467+ (match_operand:QI 1 "general_operand" "rI,K,Uu,l,m,r"))]
468 "TARGET_32BIT
469 && ( register_operand (operands[0], QImode)
470 || register_operand (operands[1], QImode))"
471@@ -6078,10 +6304,14 @@
472 mov%?\\t%0, %1
473 mvn%?\\t%0, #%B1
474 ldr%(b%)\\t%0, %1
475+ str%(b%)\\t%1, %0
476+ ldr%(b%)\\t%0, %1
477 str%(b%)\\t%1, %0"
478- [(set_attr "type" "*,*,load1,store1")
479- (set_attr "insn" "mov,mvn,*,*")
480- (set_attr "predicable" "yes")]
481+ [(set_attr "type" "*,*,load1,store1,load1,store1")
482+ (set_attr "insn" "mov,mvn,*,*,*,*")
483+ (set_attr "predicable" "yes")
484+ (set_attr "arch" "any,any,t2,t2,any,any")
485+ (set_attr "length" "4,4,2,2,4,4")]
486 )
487
488 (define_insn "*thumb1_movqi_insn"
489Index: gcc-4_6-branch/gcc/config/arm/arm.opt
490===================================================================
491--- gcc-4_6-branch.orig/gcc/config/arm/arm.opt 2012-03-05 16:07:14.000000000 -0800
492+++ gcc-4_6-branch/gcc/config/arm/arm.opt 2012-03-05 16:07:50.404936697 -0800
493@@ -173,3 +173,7 @@
494 Target Report Var(fix_cm3_ldrd) Init(2)
495 Avoid overlapping destination and address registers on LDRD instructions
496 that may trigger Cortex-M3 errata.
497+
498+munaligned-access
499+Target Report Var(unaligned_access) Init(2)
500+Enable unaligned word and halfword accesses to packed data.
501Index: gcc-4_6-branch/gcc/config/arm/constraints.md
502===================================================================
503--- gcc-4_6-branch.orig/gcc/config/arm/constraints.md 2012-03-05 16:07:14.000000000 -0800
504+++ gcc-4_6-branch/gcc/config/arm/constraints.md 2012-03-05 16:07:50.404936697 -0800
505@@ -36,6 +36,7 @@
506 ;; The following memory constraints have been used:
507 ;; in ARM/Thumb-2 state: Q, Ut, Uv, Uy, Un, Um, Us
508 ;; in ARM state: Uq
509+;; in Thumb state: Uu, Uw
510
511
512 (define_register_constraint "f" "TARGET_ARM ? FPA_REGS : NO_REGS"
513@@ -344,6 +345,27 @@
514 (and (match_code "mem")
515 (match_test "REG_P (XEXP (op, 0))")))
516
517+(define_memory_constraint "Uu"
518+ "@internal
519+ In Thumb state an address that is valid in 16bit encoding."
520+ (and (match_code "mem")
521+ (match_test "TARGET_THUMB
522+ && thumb1_legitimate_address_p (GET_MODE (op), XEXP (op, 0),
523+ 0)")))
524+
525+; The 16-bit post-increment LDR/STR accepted by thumb1_legitimate_address_p
526+; are actually LDM/STM instructions, so cannot be used to access unaligned
527+; data.
528+(define_memory_constraint "Uw"
529+ "@internal
530+ In Thumb state an address that is valid in 16bit encoding, and that can be
531+ used for unaligned accesses."
532+ (and (match_code "mem")
533+ (match_test "TARGET_THUMB
534+ && thumb1_legitimate_address_p (GET_MODE (op), XEXP (op, 0),
535+ 0)
536+ && GET_CODE (XEXP (op, 0)) != POST_INC")))
537+
538 ;; We used to have constraint letters for S and R in ARM state, but
539 ;; all uses of these now appear to have been removed.
540
541Index: gcc-4_6-branch/gcc/expmed.c
542===================================================================
543--- gcc-4_6-branch.orig/gcc/expmed.c 2012-01-04 15:37:51.000000000 -0800
544+++ gcc-4_6-branch/gcc/expmed.c 2012-03-05 16:07:50.404936697 -0800
545@@ -657,6 +657,10 @@
546 && GET_MODE (value) != BLKmode
547 && bitsize > 0
548 && GET_MODE_BITSIZE (op_mode) >= bitsize
549+ /* Do not use insv for volatile bitfields when
550+ -fstrict-volatile-bitfields is in effect. */
551+ && !(MEM_P (op0) && MEM_VOLATILE_P (op0)
552+ && flag_strict_volatile_bitfields > 0)
553 && ! ((REG_P (op0) || GET_CODE (op0) == SUBREG)
554 && (bitsize + bitpos > GET_MODE_BITSIZE (op_mode)))
555 && insn_data[CODE_FOR_insv].operand[1].predicate (GEN_INT (bitsize),
556@@ -700,19 +704,21 @@
557 copy_back = true;
558 }
559
560- /* On big-endian machines, we count bits from the most significant.
561- If the bit field insn does not, we must invert. */
562-
563- if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
564- xbitpos = unit - bitsize - xbitpos;
565-
566 /* We have been counting XBITPOS within UNIT.
567 Count instead within the size of the register. */
568- if (BITS_BIG_ENDIAN && !MEM_P (xop0))
569+ if (BYTES_BIG_ENDIAN && !MEM_P (xop0))
570 xbitpos += GET_MODE_BITSIZE (op_mode) - unit;
571
572 unit = GET_MODE_BITSIZE (op_mode);
573
574+ /* If BITS_BIG_ENDIAN is zero on a BYTES_BIG_ENDIAN machine, we count
575+ "backwards" from the size of the unit we are inserting into.
576+ Otherwise, we count bits from the most significant on a
577+ BYTES/BITS_BIG_ENDIAN machine. */
578+
579+ if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
580+ xbitpos = unit - bitsize - xbitpos;
581+
582 /* Convert VALUE to op_mode (which insv insn wants) in VALUE1. */
583 value1 = value;
584 if (GET_MODE (value) != op_mode)
585@@ -1528,6 +1534,10 @@
586 if (ext_mode != MAX_MACHINE_MODE
587 && bitsize > 0
588 && GET_MODE_BITSIZE (ext_mode) >= bitsize
589+ /* Do not use extv/extzv for volatile bitfields when
590+ -fstrict-volatile-bitfields is in effect. */
591+ && !(MEM_P (op0) && MEM_VOLATILE_P (op0)
592+ && flag_strict_volatile_bitfields > 0)
593 /* If op0 is a register, we need it in EXT_MODE to make it
594 acceptable to the format of ext(z)v. */
595 && !(GET_CODE (op0) == SUBREG && GET_MODE (op0) != ext_mode)
596@@ -1552,17 +1562,20 @@
597 /* Get ref to first byte containing part of the field. */
598 xop0 = adjust_address (xop0, byte_mode, xoffset);
599
600- /* On big-endian machines, we count bits from the most significant.
601- If the bit field insn does not, we must invert. */
602- if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
603- xbitpos = unit - bitsize - xbitpos;
604-
605 /* Now convert from counting within UNIT to counting in EXT_MODE. */
606- if (BITS_BIG_ENDIAN && !MEM_P (xop0))
607+ if (BYTES_BIG_ENDIAN && !MEM_P (xop0))
608 xbitpos += GET_MODE_BITSIZE (ext_mode) - unit;
609
610 unit = GET_MODE_BITSIZE (ext_mode);
611
612+ /* If BITS_BIG_ENDIAN is zero on a BYTES_BIG_ENDIAN machine, we count
613+ "backwards" from the size of the unit we are extracting from.
614+ Otherwise, we count bits from the most significant on a
615+ BYTES/BITS_BIG_ENDIAN machine. */
616+
617+ if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
618+ xbitpos = unit - bitsize - xbitpos;
619+
620 if (xtarget == 0)
621 xtarget = xspec_target = gen_reg_rtx (tmode);
622