diff options
Diffstat (limited to 'recipes-devtools/gcc/gcc-4.5/linaro/gcc-4.5-linaro-r99404.patch')
| -rw-r--r-- | recipes-devtools/gcc/gcc-4.5/linaro/gcc-4.5-linaro-r99404.patch | 386 |
1 files changed, 386 insertions, 0 deletions
diff --git a/recipes-devtools/gcc/gcc-4.5/linaro/gcc-4.5-linaro-r99404.patch b/recipes-devtools/gcc/gcc-4.5/linaro/gcc-4.5-linaro-r99404.patch new file mode 100644 index 0000000000..2753300925 --- /dev/null +++ b/recipes-devtools/gcc/gcc-4.5/linaro/gcc-4.5-linaro-r99404.patch | |||
| @@ -0,0 +1,386 @@ | |||
| 1 | 2010-09-20 Jie Zhang <jie@codesourcery.com> | ||
| 2 | |||
| 3 | Issue #9019 | ||
| 4 | |||
| 5 | Backport from mainline: | ||
| 6 | |||
| 7 | gcc/ | ||
| 8 | 2010-09-20 Jie Zhang <jie@codesourcery.com> | ||
| 9 | * config/arm/arm.c (arm_address_offset_is_imm): New. | ||
| 10 | (arm_early_store_addr_dep): New. | ||
| 11 | (arm_early_load_addr_dep): New. | ||
| 12 | * config/arm/arm-protos.h (arm_early_store_addr_dep): Declare. | ||
| 13 | (arm_early_load_addr_dep): Declare. | ||
| 14 | (arm_address_offset_is_imm): Declare. | ||
| 15 | * config/arm/cortex-m4.md: New file. | ||
| 16 | * config/arm/cortex-m4-fpu.md: New file. | ||
| 17 | * config/arm/arm.md: Include cortex-m4.md and cortex-m4-fpu.md. | ||
| 18 | (attr generic_sched): Exclude cortexm4. | ||
| 19 | (attr generic_vfp): Exclude cortexm4. | ||
| 20 | |||
| 21 | === modified file 'gcc/config/arm/arm-protos.h' | ||
| 22 | Index: gcc-4.5/gcc/config/arm/arm-protos.h | ||
| 23 | =================================================================== | ||
| 24 | --- gcc-4.5.orig/gcc/config/arm/arm-protos.h | ||
| 25 | +++ gcc-4.5/gcc/config/arm/arm-protos.h | ||
| 26 | @@ -87,6 +87,8 @@ extern int arm_coproc_mem_operand (rtx, | ||
| 27 | extern int neon_vector_mem_operand (rtx, int); | ||
| 28 | extern int neon_struct_mem_operand (rtx); | ||
| 29 | extern int arm_no_early_store_addr_dep (rtx, rtx); | ||
| 30 | +extern int arm_early_store_addr_dep (rtx, rtx); | ||
| 31 | +extern int arm_early_load_addr_dep (rtx, rtx); | ||
| 32 | extern int arm_no_early_alu_shift_dep (rtx, rtx); | ||
| 33 | extern int arm_no_early_alu_shift_value_dep (rtx, rtx); | ||
| 34 | extern int arm_no_early_mul_dep (rtx, rtx); | ||
| 35 | @@ -131,6 +133,7 @@ extern const char *output_move_quad (rtx | ||
| 36 | extern const char *output_move_vfp (rtx *operands); | ||
| 37 | extern const char *output_move_neon (rtx *operands); | ||
| 38 | extern int arm_attr_length_move_neon (rtx); | ||
| 39 | +extern int arm_address_offset_is_imm (rtx); | ||
| 40 | extern const char *output_add_immediate (rtx *); | ||
| 41 | extern const char *arithmetic_instr (rtx, int); | ||
| 42 | extern void output_ascii_pseudo_op (FILE *, const unsigned char *, int); | ||
| 43 | Index: gcc-4.5/gcc/config/arm/arm.c | ||
| 44 | =================================================================== | ||
| 45 | --- gcc-4.5.orig/gcc/config/arm/arm.c | ||
| 46 | +++ gcc-4.5/gcc/config/arm/arm.c | ||
| 47 | @@ -13542,6 +13542,34 @@ arm_attr_length_move_neon (rtx insn) | ||
| 48 | return 4; | ||
| 49 | } | ||
| 50 | |||
| 51 | +/* Return nonzero if the offset in the address is an immediate. Otherwise, | ||
| 52 | + return zero. */ | ||
| 53 | + | ||
| 54 | +int | ||
| 55 | +arm_address_offset_is_imm (rtx insn) | ||
| 56 | +{ | ||
| 57 | + rtx mem, addr; | ||
| 58 | + | ||
| 59 | + extract_insn_cached (insn); | ||
| 60 | + | ||
| 61 | + if (REG_P (recog_data.operand[0])) | ||
| 62 | + return 0; | ||
| 63 | + | ||
| 64 | + mem = recog_data.operand[0]; | ||
| 65 | + | ||
| 66 | + gcc_assert (MEM_P (mem)); | ||
| 67 | + | ||
| 68 | + addr = XEXP (mem, 0); | ||
| 69 | + | ||
| 70 | + if (GET_CODE (addr) == REG | ||
| 71 | + || (GET_CODE (addr) == PLUS | ||
| 72 | + && GET_CODE (XEXP (addr, 0)) == REG | ||
| 73 | + && GET_CODE (XEXP (addr, 1)) == CONST_INT)) | ||
| 74 | + return 1; | ||
| 75 | + else | ||
| 76 | + return 0; | ||
| 77 | +} | ||
| 78 | + | ||
| 79 | /* Output an ADD r, s, #n where n may be too big for one instruction. | ||
| 80 | If adding zero to one register, output nothing. */ | ||
| 81 | const char * | ||
| 82 | @@ -21620,6 +21648,38 @@ arm_no_early_store_addr_dep (rtx produce | ||
| 83 | return !reg_overlap_mentioned_p (value, addr); | ||
| 84 | } | ||
| 85 | |||
| 86 | +/* Return nonzero if the CONSUMER instruction (a store) does need | ||
| 87 | + PRODUCER's value to calculate the address. */ | ||
| 88 | + | ||
| 89 | +int | ||
| 90 | +arm_early_store_addr_dep (rtx producer, rtx consumer) | ||
| 91 | +{ | ||
| 92 | + return !arm_no_early_store_addr_dep (producer, consumer); | ||
| 93 | +} | ||
| 94 | + | ||
| 95 | +/* Return nonzero if the CONSUMER instruction (a load) does need | ||
| 96 | + PRODUCER's value to calculate the address. */ | ||
| 97 | + | ||
| 98 | +int | ||
| 99 | +arm_early_load_addr_dep (rtx producer, rtx consumer) | ||
| 100 | +{ | ||
| 101 | + rtx value = PATTERN (producer); | ||
| 102 | + rtx addr = PATTERN (consumer); | ||
| 103 | + | ||
| 104 | + if (GET_CODE (value) == COND_EXEC) | ||
| 105 | + value = COND_EXEC_CODE (value); | ||
| 106 | + if (GET_CODE (value) == PARALLEL) | ||
| 107 | + value = XVECEXP (value, 0, 0); | ||
| 108 | + value = XEXP (value, 0); | ||
| 109 | + if (GET_CODE (addr) == COND_EXEC) | ||
| 110 | + addr = COND_EXEC_CODE (addr); | ||
| 111 | + if (GET_CODE (addr) == PARALLEL) | ||
| 112 | + addr = XVECEXP (addr, 0, 0); | ||
| 113 | + addr = XEXP (addr, 1); | ||
| 114 | + | ||
| 115 | + return reg_overlap_mentioned_p (value, addr); | ||
| 116 | +} | ||
| 117 | + | ||
| 118 | /* Return nonzero if the CONSUMER instruction (an ALU op) does not | ||
| 119 | have an early register shift value or amount dependency on the | ||
| 120 | result of PRODUCER. */ | ||
| 121 | Index: gcc-4.5/gcc/config/arm/arm.md | ||
| 122 | =================================================================== | ||
| 123 | --- gcc-4.5.orig/gcc/config/arm/arm.md | ||
| 124 | +++ gcc-4.5/gcc/config/arm/arm.md | ||
| 125 | @@ -434,16 +434,16 @@ | ||
| 126 | ;; True if the generic scheduling description should be used. | ||
| 127 | |||
| 128 | (define_attr "generic_sched" "yes,no" | ||
| 129 | - (const (if_then_else | ||
| 130 | - (ior (eq_attr "tune" "arm926ejs,arm1020e,arm1026ejs,arm1136js,arm1136jfs,cortexa5,cortexa8,cortexa9") | ||
| 131 | - (eq_attr "tune_cortexr4" "yes")) | ||
| 132 | + (const (if_then_else | ||
| 133 | + (ior (eq_attr "tune" "arm926ejs,arm1020e,arm1026ejs,arm1136js,arm1136jfs,cortexa5,cortexa8,cortexa9,cortexm4") | ||
| 134 | + (eq_attr "tune_cortexr4" "yes")) | ||
| 135 | (const_string "no") | ||
| 136 | (const_string "yes")))) | ||
| 137 | |||
| 138 | (define_attr "generic_vfp" "yes,no" | ||
| 139 | (const (if_then_else | ||
| 140 | (and (eq_attr "fpu" "vfp") | ||
| 141 | - (eq_attr "tune" "!arm1020e,arm1022e,cortexa5,cortexa8,cortexa9") | ||
| 142 | + (eq_attr "tune" "!arm1020e,arm1022e,cortexa5,cortexa8,cortexa9,cortexm4") | ||
| 143 | (eq_attr "tune_cortexr4" "no")) | ||
| 144 | (const_string "yes") | ||
| 145 | (const_string "no")))) | ||
| 146 | @@ -472,6 +472,8 @@ | ||
| 147 | (include "cortex-a9.md") | ||
| 148 | (include "cortex-r4.md") | ||
| 149 | (include "cortex-r4f.md") | ||
| 150 | +(include "cortex-m4.md") | ||
| 151 | +(include "cortex-m4-fpu.md") | ||
| 152 | (include "vfp11.md") | ||
| 153 | |||
| 154 | |||
| 155 | Index: gcc-4.5/gcc/config/arm/cortex-m4-fpu.md | ||
| 156 | =================================================================== | ||
| 157 | --- /dev/null | ||
| 158 | +++ gcc-4.5/gcc/config/arm/cortex-m4-fpu.md | ||
| 159 | @@ -0,0 +1,111 @@ | ||
| 160 | +;; ARM Cortex-M4 FPU pipeline description | ||
| 161 | +;; Copyright (C) 2010 Free Software Foundation, Inc. | ||
| 162 | +;; Contributed by CodeSourcery. | ||
| 163 | +;; | ||
| 164 | +;; This file is part of GCC. | ||
| 165 | +;; | ||
| 166 | +;; GCC is free software; you can redistribute it and/or modify it | ||
| 167 | +;; under the terms of the GNU General Public License as published by | ||
| 168 | +;; the Free Software Foundation; either version 3, or (at your option) | ||
| 169 | +;; any later version. | ||
| 170 | +;; | ||
| 171 | +;; GCC is distributed in the hope that it will be useful, but | ||
| 172 | +;; WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 173 | +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 174 | +;; General Public License for more details. | ||
| 175 | +;; | ||
| 176 | +;; You should have received a copy of the GNU General Public License | ||
| 177 | +;; along with GCC; see the file COPYING3. If not see | ||
| 178 | +;; <http://www.gnu.org/licenses/>. | ||
| 179 | + | ||
| 180 | +;; Use an artifial unit to model FPU. | ||
| 181 | +(define_cpu_unit "cortex_m4_v" "cortex_m4") | ||
| 182 | + | ||
| 183 | +(define_reservation "cortex_m4_ex_v" "cortex_m4_ex+cortex_m4_v") | ||
| 184 | + | ||
| 185 | +;; Integer instructions following VDIV or VSQRT complete out-of-order. | ||
| 186 | +(define_insn_reservation "cortex_m4_fdivs" 15 | ||
| 187 | + (and (eq_attr "tune" "cortexm4") | ||
| 188 | + (eq_attr "type" "fdivs")) | ||
| 189 | + "cortex_m4_ex_v,cortex_m4_v*13") | ||
| 190 | + | ||
| 191 | +(define_insn_reservation "cortex_m4_vmov_1" 1 | ||
| 192 | + (and (eq_attr "tune" "cortexm4") | ||
| 193 | + (eq_attr "type" "fcpys,fconsts")) | ||
| 194 | + "cortex_m4_ex_v") | ||
| 195 | + | ||
| 196 | +(define_insn_reservation "cortex_m4_vmov_2" 2 | ||
| 197 | + (and (eq_attr "tune" "cortexm4") | ||
| 198 | + (eq_attr "type" "f_2_r,r_2_f")) | ||
| 199 | + "cortex_m4_ex_v*2") | ||
| 200 | + | ||
| 201 | +(define_insn_reservation "cortex_m4_fmuls" 2 | ||
| 202 | + (and (eq_attr "tune" "cortexm4") | ||
| 203 | + (eq_attr "type" "fmuls")) | ||
| 204 | + "cortex_m4_ex_v") | ||
| 205 | + | ||
| 206 | +(define_insn_reservation "cortex_m4_fmacs" 4 | ||
| 207 | + (and (eq_attr "tune" "cortexm4") | ||
| 208 | + (eq_attr "type" "fmacs")) | ||
| 209 | + "cortex_m4_ex_v*3") | ||
| 210 | + | ||
| 211 | +(define_insn_reservation "cortex_m4_ffariths" 1 | ||
| 212 | + (and (eq_attr "tune" "cortexm4") | ||
| 213 | + (eq_attr "type" "ffariths")) | ||
| 214 | + "cortex_m4_ex_v") | ||
| 215 | + | ||
| 216 | +(define_insn_reservation "cortex_m4_fadds" 2 | ||
| 217 | + (and (eq_attr "tune" "cortexm4") | ||
| 218 | + (eq_attr "type" "fadds")) | ||
| 219 | + "cortex_m4_ex_v") | ||
| 220 | + | ||
| 221 | +(define_insn_reservation "cortex_m4_fcmps" 1 | ||
| 222 | + (and (eq_attr "tune" "cortexm4") | ||
| 223 | + (eq_attr "type" "fcmps")) | ||
| 224 | + "cortex_m4_ex_v") | ||
| 225 | + | ||
| 226 | +(define_insn_reservation "cortex_m4_f_flag" 1 | ||
| 227 | + (and (eq_attr "tune" "cortexm4") | ||
| 228 | + (eq_attr "type" "f_flag")) | ||
| 229 | + "cortex_m4_ex_v") | ||
| 230 | + | ||
| 231 | +(define_insn_reservation "cortex_m4_f_cvt" 2 | ||
| 232 | + (and (eq_attr "tune" "cortexm4") | ||
| 233 | + (eq_attr "type" "f_cvt")) | ||
| 234 | + "cortex_m4_ex_v") | ||
| 235 | + | ||
| 236 | +(define_insn_reservation "cortex_m4_f_load" 2 | ||
| 237 | + (and (eq_attr "tune" "cortexm4") | ||
| 238 | + (eq_attr "type" "f_load")) | ||
| 239 | + "cortex_m4_ex_v*2") | ||
| 240 | + | ||
| 241 | +(define_insn_reservation "cortex_m4_f_store" 2 | ||
| 242 | + (and (eq_attr "tune" "cortexm4") | ||
| 243 | + (eq_attr "type" "f_store")) | ||
| 244 | + "cortex_m4_ex_v*2") | ||
| 245 | + | ||
| 246 | +(define_insn_reservation "cortex_m4_f_loadd" 3 | ||
| 247 | + (and (eq_attr "tune" "cortexm4") | ||
| 248 | + (eq_attr "type" "f_loadd")) | ||
| 249 | + "cortex_m4_ex_v*3") | ||
| 250 | + | ||
| 251 | +(define_insn_reservation "cortex_m4_f_stored" 3 | ||
| 252 | + (and (eq_attr "tune" "cortexm4") | ||
| 253 | + (eq_attr "type" "f_stored")) | ||
| 254 | + "cortex_m4_ex_v*3") | ||
| 255 | + | ||
| 256 | +;; MAC instructions consume their addend one cycle later. If the result | ||
| 257 | +;; of an arithmetic instruction is consumed as the addend of the following | ||
| 258 | +;; MAC instruction, the latency can be decreased by one. | ||
| 259 | + | ||
| 260 | +(define_bypass 1 "cortex_m4_fadds,cortex_m4_fmuls,cortex_m4_f_cvt" | ||
| 261 | + "cortex_m4_fmacs" | ||
| 262 | + "arm_no_early_mul_dep") | ||
| 263 | + | ||
| 264 | +(define_bypass 3 "cortex_m4_fmacs" | ||
| 265 | + "cortex_m4_fmacs" | ||
| 266 | + "arm_no_early_mul_dep") | ||
| 267 | + | ||
| 268 | +(define_bypass 14 "cortex_m4_fdivs" | ||
| 269 | + "cortex_m4_fmacs" | ||
| 270 | + "arm_no_early_mul_dep") | ||
| 271 | Index: gcc-4.5/gcc/config/arm/cortex-m4.md | ||
| 272 | =================================================================== | ||
| 273 | --- /dev/null | ||
| 274 | +++ gcc-4.5/gcc/config/arm/cortex-m4.md | ||
| 275 | @@ -0,0 +1,111 @@ | ||
| 276 | +;; ARM Cortex-M4 pipeline description | ||
| 277 | +;; Copyright (C) 2010 Free Software Foundation, Inc. | ||
| 278 | +;; Contributed by CodeSourcery. | ||
| 279 | +;; | ||
| 280 | +;; This file is part of GCC. | ||
| 281 | +;; | ||
| 282 | +;; GCC is free software; you can redistribute it and/or modify it | ||
| 283 | +;; under the terms of the GNU General Public License as published by | ||
| 284 | +;; the Free Software Foundation; either version 3, or (at your option) | ||
| 285 | +;; any later version. | ||
| 286 | +;; | ||
| 287 | +;; GCC is distributed in the hope that it will be useful, but | ||
| 288 | +;; WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 289 | +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 290 | +;; General Public License for more details. | ||
| 291 | +;; | ||
| 292 | +;; You should have received a copy of the GNU General Public License | ||
| 293 | +;; along with GCC; see the file COPYING3. If not see | ||
| 294 | +;; <http://www.gnu.org/licenses/>. | ||
| 295 | + | ||
| 296 | +(define_automaton "cortex_m4") | ||
| 297 | + | ||
| 298 | +;; We model the pipelining of LDR instructions by using two artificial units. | ||
| 299 | + | ||
| 300 | +(define_cpu_unit "cortex_m4_a" "cortex_m4") | ||
| 301 | + | ||
| 302 | +(define_cpu_unit "cortex_m4_b" "cortex_m4") | ||
| 303 | + | ||
| 304 | +(define_reservation "cortex_m4_ex" "cortex_m4_a+cortex_m4_b") | ||
| 305 | + | ||
| 306 | +;; ALU and multiply is one cycle. | ||
| 307 | +(define_insn_reservation "cortex_m4_alu" 1 | ||
| 308 | + (and (eq_attr "tune" "cortexm4") | ||
| 309 | + (eq_attr "type" "alu,alu_shift,alu_shift_reg,mult")) | ||
| 310 | + "cortex_m4_ex") | ||
| 311 | + | ||
| 312 | +;; Byte, half-word and word load is two cycles. | ||
| 313 | +(define_insn_reservation "cortex_m4_load1" 2 | ||
| 314 | + (and (eq_attr "tune" "cortexm4") | ||
| 315 | + (eq_attr "type" "load_byte,load1")) | ||
| 316 | + "cortex_m4_a, cortex_m4_b") | ||
| 317 | + | ||
| 318 | +;; str rx, [ry, #imm] is always one cycle. | ||
| 319 | +(define_insn_reservation "cortex_m4_store1_1" 1 | ||
| 320 | + (and (and (eq_attr "tune" "cortexm4") | ||
| 321 | + (eq_attr "type" "store1")) | ||
| 322 | + (ne (symbol_ref ("arm_address_offset_is_imm (insn)")) (const_int 0))) | ||
| 323 | + "cortex_m4_a") | ||
| 324 | + | ||
| 325 | +;; Other byte, half-word and word load is two cycles. | ||
| 326 | +(define_insn_reservation "cortex_m4_store1_2" 2 | ||
| 327 | + (and (and (eq_attr "tune" "cortexm4") | ||
| 328 | + (eq_attr "type" "store1")) | ||
| 329 | + (eq (symbol_ref ("arm_address_offset_is_imm (insn)")) (const_int 0))) | ||
| 330 | + "cortex_m4_a*2") | ||
| 331 | + | ||
| 332 | +(define_insn_reservation "cortex_m4_load2" 3 | ||
| 333 | + (and (eq_attr "tune" "cortexm4") | ||
| 334 | + (eq_attr "type" "load2")) | ||
| 335 | + "cortex_m4_ex*3") | ||
| 336 | + | ||
| 337 | +(define_insn_reservation "cortex_m4_store2" 3 | ||
| 338 | + (and (eq_attr "tune" "cortexm4") | ||
| 339 | + (eq_attr "type" "store2")) | ||
| 340 | + "cortex_m4_ex*3") | ||
| 341 | + | ||
| 342 | +(define_insn_reservation "cortex_m4_load3" 4 | ||
| 343 | + (and (eq_attr "tune" "cortexm4") | ||
| 344 | + (eq_attr "type" "load3")) | ||
| 345 | + "cortex_m4_ex*4") | ||
| 346 | + | ||
| 347 | +(define_insn_reservation "cortex_m4_store3" 4 | ||
| 348 | + (and (eq_attr "tune" "cortexm4") | ||
| 349 | + (eq_attr "type" "store3")) | ||
| 350 | + "cortex_m4_ex*4") | ||
| 351 | + | ||
| 352 | +(define_insn_reservation "cortex_m4_load4" 5 | ||
| 353 | + (and (eq_attr "tune" "cortexm4") | ||
| 354 | + (eq_attr "type" "load4")) | ||
| 355 | + "cortex_m4_ex*5") | ||
| 356 | + | ||
| 357 | +(define_insn_reservation "cortex_m4_store4" 5 | ||
| 358 | + (and (eq_attr "tune" "cortexm4") | ||
| 359 | + (eq_attr "type" "store4")) | ||
| 360 | + "cortex_m4_ex*5") | ||
| 361 | + | ||
| 362 | +;; If the address of load or store depends on the result of the preceding | ||
| 363 | +;; instruction, the latency is increased by one. | ||
| 364 | + | ||
| 365 | +(define_bypass 2 "cortex_m4_alu" | ||
| 366 | + "cortex_m4_load1" | ||
| 367 | + "arm_early_load_addr_dep") | ||
| 368 | + | ||
| 369 | +(define_bypass 2 "cortex_m4_alu" | ||
| 370 | + "cortex_m4_store1_1,cortex_m4_store1_2" | ||
| 371 | + "arm_early_store_addr_dep") | ||
| 372 | + | ||
| 373 | +(define_insn_reservation "cortex_m4_branch" 3 | ||
| 374 | + (and (eq_attr "tune" "cortexm4") | ||
| 375 | + (eq_attr "type" "branch")) | ||
| 376 | + "cortex_m4_ex*3") | ||
| 377 | + | ||
| 378 | +(define_insn_reservation "cortex_m4_call" 3 | ||
| 379 | + (and (eq_attr "tune" "cortexm4") | ||
| 380 | + (eq_attr "type" "call")) | ||
| 381 | + "cortex_m4_ex*3") | ||
| 382 | + | ||
| 383 | +(define_insn_reservation "cortex_m4_block" 1 | ||
| 384 | + (and (eq_attr "tune" "cortexm4") | ||
| 385 | + (eq_attr "type" "block")) | ||
| 386 | + "cortex_m4_ex") | ||
