1 /* Output routines for GCC for ARM/RISCiX.
2 Copyright (C) 1991, 93, 94, 95, 96, 1997 Free Software Foundation, Inc.
3 Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
4 and Martin Simmons (@harleqn.co.uk).
5 More major hacks by Richard Earnshaw (rwe11@cl.cam.ac.uk)
7 This file is part of GNU CC.
9 GNU CC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2, or (at your option)
14 GNU CC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GNU CC; see the file COPYING. If not, write to
21 the Free Software Foundation, 59 Temple Place - Suite 330,
22 Boston, MA 02111-1307, USA. */
29 #include "hard-reg-set.h"
31 #include "insn-config.h"
32 #include "conditions.h"
33 #include "insn-flags.h"
35 #include "insn-attr.h"
41 /* The maximum number of insns skipped which will be conditionalised if
43 #define MAX_INSNS_SKIPPED 5
45 /* Some function declarations. */
46 extern FILE *asm_out_file
;
48 static HOST_WIDE_INT int_log2
PROTO ((HOST_WIDE_INT
));
49 static char *output_multi_immediate
PROTO ((rtx
*, char *, char *, int,
51 static int arm_gen_constant
PROTO ((enum rtx_code
, enum machine_mode
,
52 HOST_WIDE_INT
, rtx
, rtx
, int, int));
53 static int arm_naked_function_p
PROTO ((tree
));
54 static void init_fpa_table
PROTO ((void));
55 static enum machine_mode select_dominance_cc_mode
PROTO ((enum rtx_code
, rtx
,
57 static HOST_WIDE_INT add_constant
PROTO ((rtx
, enum machine_mode
));
58 static void dump_table
PROTO ((rtx
));
59 static int fixit
PROTO ((rtx
, enum machine_mode
, int));
60 static rtx find_barrier
PROTO ((rtx
, int));
61 static int broken_move
PROTO ((rtx
));
62 static char *fp_const_from_val
PROTO ((REAL_VALUE_TYPE
*));
63 static int eliminate_lr2ip
PROTO ((rtx
*));
64 static char *shift_op
PROTO ((rtx
, HOST_WIDE_INT
*));
65 static int pattern_really_clobbers_lr
PROTO ((rtx
));
66 static int function_really_clobbers_lr
PROTO ((rtx
));
67 static void emit_multi_reg_push
PROTO ((int));
68 static void emit_sfm
PROTO ((int, int));
69 static enum arm_cond_code get_arm_condition_code
PROTO ((rtx
));
71 /* Define the information needed to generate branch insns. This is
72 stored from the compare operation. */
74 rtx arm_compare_op0
, arm_compare_op1
;
77 /* What type of cpu are we compiling for? */
78 enum processor_type arm_cpu
;
80 /* What type of floating point are we tuning for? */
81 enum floating_point_type arm_fpu
;
83 /* What type of floating point instructions are available? */
84 enum floating_point_type arm_fpu_arch
;
86 /* What program mode is the cpu running in? 26-bit mode or 32-bit mode */
87 enum prog_mode_type arm_prgmode
;
89 /* Set by the -mfp=... option */
90 char *target_fp_name
= NULL
;
92 /* Nonzero if this is an "M" variant of the processor. */
93 int arm_fast_multiply
= 0;
95 /* Nonzero if this chip supports the ARM Architecture 4 extensions */
98 /* Set to the features we should tune the code for (multiply speed etc). */
101 /* In case of a PRE_INC, POST_INC, PRE_DEC, POST_DEC memory reference, we
102 must report the mode of the memory reference from PRINT_OPERAND to
103 PRINT_OPERAND_ADDRESS. */
104 enum machine_mode output_memory_reference_mode
;
106 /* Nonzero if the prologue must setup `fp'. */
107 int current_function_anonymous_args
;
109 /* The register number to be used for the PIC offset register. */
110 int arm_pic_register
= 9;
112 /* Location counter of .text segment. */
113 int arm_text_location
= 0;
115 /* Set to one if we think that lr is only saved because of subroutine calls,
116 but all of these can be `put after' return insns */
117 int lr_save_eliminated
;
119 /* Set to 1 when a return insn is output, this means that the epilogue
122 static int return_used_this_function
;
124 static int arm_constant_limit
= 3;
126 /* For an explanation of these variables, see final_prescan_insn below. */
128 enum arm_cond_code arm_current_cc
;
130 int arm_target_label
;
132 /* The condition codes of the ARM, and the inverse function. */
133 char *arm_condition_codes
[] =
135 "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
136 "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
139 static enum arm_cond_code
get_arm_condition_code ();
142 /* Initialization code */
144 struct arm_cpu_select arm_select
[4] =
146 /* switch name, tune arch */
147 { (char *)0, "--with-cpu=", 1, 1 },
148 { (char *)0, "-mcpu=", 1, 1 },
149 { (char *)0, "-march=", 0, 1 },
150 { (char *)0, "-mtune=", 1, 0 },
153 #define FL_CO_PROC 0x01 /* Has external co-processor bus */
154 #define FL_FAST_MULT 0x02 /* Fast multiply */
155 #define FL_MODE26 0x04 /* 26-bit mode support */
156 #define FL_MODE32 0x08 /* 32-bit mode support */
157 #define FL_ARCH4 0x10 /* Architecture rel 4 */
158 #define FL_THUMB 0x20 /* Thumb aware */
163 enum processor_type type
;
167 /* Not all of these give usefully different compilation alternatives,
168 but there is no simple way of generalizing them. */
169 static struct processors all_procs
[] =
171 {"arm2", PROCESSOR_ARM2
, FL_CO_PROC
| FL_MODE26
},
172 {"arm250", PROCESSOR_ARM2
, FL_CO_PROC
| FL_MODE26
},
173 {"arm3", PROCESSOR_ARM2
, FL_CO_PROC
| FL_MODE26
},
174 {"arm6", PROCESSOR_ARM6
, FL_CO_PROC
| FL_MODE32
| FL_MODE26
},
175 {"arm600", PROCESSOR_ARM6
, FL_CO_PROC
| FL_MODE32
| FL_MODE26
},
176 {"arm610", PROCESSOR_ARM6
, FL_MODE32
| FL_MODE26
},
177 {"arm7", PROCESSOR_ARM7
, FL_CO_PROC
| FL_MODE32
| FL_MODE26
},
178 /* arm7m doesn't exist on its own, only in conjunction with D, (and I), but
179 those don't alter the code, so it is sometimes known as the arm7m */
180 {"arm7m", PROCESSOR_ARM7
, (FL_CO_PROC
| FL_FAST_MULT
| FL_MODE32
182 {"arm7dm", PROCESSOR_ARM7
, (FL_CO_PROC
| FL_FAST_MULT
| FL_MODE32
184 {"arm7dmi", PROCESSOR_ARM7
, (FL_CO_PROC
| FL_FAST_MULT
| FL_MODE32
186 {"arm700", PROCESSOR_ARM7
, FL_CO_PROC
| FL_MODE32
| FL_MODE26
},
187 {"arm710", PROCESSOR_ARM7
, FL_MODE32
| FL_MODE26
},
188 {"arm7100", PROCESSOR_ARM7
, FL_MODE32
| FL_MODE26
},
189 {"arm7500", PROCESSOR_ARM7
, FL_MODE32
| FL_MODE26
},
190 /* Doesn't really have an external co-proc, but does have embedded fpu */
191 {"arm7500fe", PROCESSOR_ARM7
, FL_CO_PROC
| FL_MODE32
| FL_MODE26
},
192 {"arm7tdmi", PROCESSOR_ARM7
, (FL_CO_PROC
| FL_FAST_MULT
| FL_MODE32
193 | FL_ARCH4
| FL_THUMB
)},
194 {"arm8", PROCESSOR_ARM8
, (FL_FAST_MULT
| FL_MODE32
| FL_MODE26
196 {"arm810", PROCESSOR_ARM8
, (FL_FAST_MULT
| FL_MODE32
| FL_MODE26
198 {"strongarm", PROCESSOR_STARM
, (FL_FAST_MULT
| FL_MODE32
| FL_MODE26
200 {"strongarm110", PROCESSOR_STARM
, (FL_FAST_MULT
| FL_MODE32
| FL_MODE26
202 {"armv2", PROCESSOR_NONE
, FL_CO_PROC
| FL_MODE26
},
203 {"armv2a", PROCESSOR_NONE
, FL_CO_PROC
| FL_MODE26
},
204 {"armv3", PROCESSOR_NONE
, FL_CO_PROC
| FL_MODE32
| FL_MODE26
},
205 {"armv3m", PROCESSOR_NONE
, (FL_CO_PROC
| FL_FAST_MULT
| FL_MODE32
207 {"armv4", PROCESSOR_NONE
, (FL_CO_PROC
| FL_FAST_MULT
| FL_MODE32
208 | FL_MODE26
| FL_ARCH4
)},
209 /* Strictly, FL_MODE26 is a permitted option for v4t, but there are no
210 implementations that support it, so we will leave it out for now. */
211 {"armv4t", PROCESSOR_NONE
, (FL_CO_PROC
| FL_FAST_MULT
| FL_MODE32
216 /* Fix up any incompatible options that the user has specified.
217 This has now turned into a maze. */
219 arm_override_options ()
221 int arm_thumb_aware
= 0;
224 struct arm_cpu_select
*ptr
;
225 static struct cpu_default
{
229 { TARGET_CPU_arm2
, "arm2" },
230 { TARGET_CPU_arm6
, "arm6" },
231 { TARGET_CPU_arm610
, "arm610" },
232 { TARGET_CPU_arm7dm
, "arm7dm" },
233 { TARGET_CPU_arm7500fe
, "arm7500fe" },
234 { TARGET_CPU_arm7tdmi
, "arm7tdmi" },
235 { TARGET_CPU_arm8
, "arm8" },
236 { TARGET_CPU_arm810
, "arm810" },
237 { TARGET_CPU_strongarm
, "strongarm" },
240 struct cpu_default
*def
;
242 /* Set the default. */
243 for (def
= &cpu_defaults
[0]; def
->name
; ++def
)
244 if (def
->cpu
== TARGET_CPU_DEFAULT
)
249 arm_select
[0].string
= def
->name
;
251 for (i
= 0; i
< sizeof (arm_select
) / sizeof (arm_select
[0]); i
++)
253 ptr
= &arm_select
[i
];
254 if (ptr
->string
!= (char *)0 && ptr
->string
[0] != '\0')
256 struct processors
*sel
;
258 for (sel
= all_procs
; sel
->name
!= NULL
; sel
++)
259 if (! strcmp (ptr
->string
, sel
->name
))
261 /* -march= is the only flag that can take an architecture
262 type, so if we match when the tune bit is set, the
263 option was invalid. */
266 if (sel
->type
== PROCESSOR_NONE
)
267 continue; /* Its an architecture, not a cpu */
270 tune_flags
= sel
->flags
;
279 if (sel
->name
== NULL
)
280 error ("bad value (%s) for %s switch", ptr
->string
, ptr
->name
);
284 if (write_symbols
!= NO_DEBUG
&& flag_omit_frame_pointer
)
285 warning ("-g with -fomit-frame-pointer may not give sensible debugging");
287 if (TARGET_POKE_FUNCTION_NAME
)
288 target_flags
|= ARM_FLAG_APCS_FRAME
;
291 warning ("Option '-m6' deprecated. Use: '-mapcs-32' or -mcpu=<proc>");
294 warning ("Option '-m3' deprecated. Use: '-mapcs-26' or -mcpu=<proc>");
296 if (TARGET_APCS_REENT
&& flag_pic
)
297 fatal ("-fpic and -mapcs-reent are incompatible");
299 if (TARGET_APCS_REENT
)
300 warning ("APCS reentrant code not supported.");
302 /* If stack checking is disabled, we can use r10 as the PIC register,
303 which keeps r9 available. */
304 if (flag_pic
&& ! TARGET_APCS_STACK
)
305 arm_pic_register
= 10;
307 /* Well, I'm about to have a go, but pic is NOT going to be compatible
308 with APCS reentrancy, since that requires too much support in the
309 assembler and linker, and the ARMASM assembler seems to lack some
310 required directives. */
312 warning ("Position independent code not supported. Ignored");
314 if (TARGET_APCS_FLOAT
)
315 warning ("Passing floating point arguments in fp regs not yet supported");
317 if (TARGET_APCS_STACK
&& ! TARGET_APCS
)
319 warning ("-mapcs-stack-check incompatible with -mno-apcs-frame");
320 target_flags
|= ARM_FLAG_APCS_FRAME
;
323 /* Default is to tune for an FPA */
326 /* Default value for floating point code... if no co-processor
327 bus, then schedule for emulated floating point. Otherwise,
328 assume the user has an FPA.
329 Note: this does not prevent use of floating point instructions,
330 -msoft-float does that. */
331 if (tune_flags
& FL_CO_PROC
== 0)
334 arm_fast_multiply
= (flags
& FL_FAST_MULT
) != 0;
335 arm_arch4
= (flags
& FL_ARCH4
) != 0;
336 arm_thumb_aware
= (flags
& FL_THUMB
) != 0;
340 if (strcmp (target_fp_name
, "2") == 0)
341 arm_fpu_arch
= FP_SOFT2
;
342 else if (strcmp (target_fp_name
, "3") == 0)
343 arm_fpu_arch
= FP_HARD
;
345 fatal ("Invalid floating point emulation option: -mfpe=%s",
349 arm_fpu_arch
= FP_DEFAULT
;
351 if (TARGET_THUMB_INTERWORK
&& ! arm_thumb_aware
)
353 warning ("This processor variant does not support Thumb interworking");
354 target_flags
&= ~ARM_FLAG_THUMB
;
357 if (TARGET_FPE
&& arm_fpu
!= FP_HARD
)
360 /* For arm2/3 there is no need to do any scheduling if there is only
361 a floating point emulator, or we are doing software floating-point. */
362 if ((TARGET_SOFT_FLOAT
|| arm_fpu
!= FP_HARD
) && arm_cpu
== PROCESSOR_ARM2
)
363 flag_schedule_insns
= flag_schedule_insns_after_reload
= 0;
365 arm_prog_mode
= TARGET_APCS_32
? PROG_MODE_PROG32
: PROG_MODE_PROG26
;
369 /* Return 1 if it is possible to return using a single instruction */
376 if (!reload_completed
||current_function_pretend_args_size
377 || current_function_anonymous_args
378 || ((get_frame_size () + current_function_outgoing_args_size
!= 0)
379 && !(TARGET_APCS
|| frame_pointer_needed
)))
382 /* Can't be done if interworking with Thumb, and any registers have been
384 if (TARGET_THUMB_INTERWORK
)
385 for (regno
= 0; regno
< 16; regno
++)
386 if (regs_ever_live
[regno
] && ! call_used_regs
[regno
])
389 /* Can't be done if any of the FPU regs are pushed, since this also
391 for (regno
= 16; regno
< 24; regno
++)
392 if (regs_ever_live
[regno
] && ! call_used_regs
[regno
])
395 /* If a function is naked, don't use the "return" insn. */
396 if (arm_naked_function_p (current_function_decl
))
402 /* Return TRUE if int I is a valid immediate ARM constant. */
408 unsigned HOST_WIDE_INT mask
= ~0xFF;
410 /* For machines with >32 bit HOST_WIDE_INT, the bits above bit 31 must
411 be all zero, or all one. */
412 if ((i
& ~(unsigned HOST_WIDE_INT
) 0xffffffff) != 0
413 && ((i
& ~(unsigned HOST_WIDE_INT
) 0xffffffff)
414 != (((HOST_WIDE_INT
) -1) & ~(unsigned HOST_WIDE_INT
) 0xffffffff)))
417 /* Fast return for 0 and powers of 2 */
418 if ((i
& (i
- 1)) == 0)
423 if ((i
& mask
& (unsigned HOST_WIDE_INT
) 0xffffffff) == 0)
426 (mask
<< 2) | ((mask
& (unsigned HOST_WIDE_INT
) 0xffffffff)
427 >> (32 - 2)) | ~((unsigned HOST_WIDE_INT
) 0xffffffff);
428 } while (mask
!= ~0xFF);
433 /* Return true if I is a valid constant for the operation CODE. */
435 const_ok_for_op (i
, code
, mode
)
438 enum machine_mode mode
;
440 if (const_ok_for_arm (i
))
446 return const_ok_for_arm (ARM_SIGN_EXTEND (-i
));
448 case MINUS
: /* Should only occur with (MINUS I reg) => rsb */
454 return const_ok_for_arm (ARM_SIGN_EXTEND (~i
));
461 /* Emit a sequence of insns to handle a large constant.
462 CODE is the code of the operation required, it can be any of SET, PLUS,
463 IOR, AND, XOR, MINUS;
464 MODE is the mode in which the operation is being performed;
465 VAL is the integer to operate on;
466 SOURCE is the other operand (a register, or a null-pointer for SET);
467 SUBTARGETS means it is safe to create scratch registers if that will
468 either produce a simpler sequence, or we will want to cse the values.
469 Return value is the number of insns emitted. */
472 arm_split_constant (code
, mode
, val
, target
, source
, subtargets
)
474 enum machine_mode mode
;
480 if (subtargets
|| code
== SET
481 || (GET_CODE (target
) == REG
&& GET_CODE (source
) == REG
482 && REGNO (target
) != REGNO (source
)))
486 if (arm_gen_constant (code
, mode
, val
, target
, source
, 1, 0)
487 > arm_constant_limit
+ (code
!= SET
))
491 /* Currently SET is the only monadic value for CODE, all
492 the rest are diadic. */
493 emit_insn (gen_rtx (SET
, VOIDmode
, target
, GEN_INT (val
)));
498 rtx temp
= subtargets
? gen_reg_rtx (mode
) : target
;
500 emit_insn (gen_rtx (SET
, VOIDmode
, temp
, GEN_INT (val
)));
501 /* For MINUS, the value is subtracted from, since we never
502 have subtraction of a constant. */
504 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
505 gen_rtx (code
, mode
, temp
, source
)));
507 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
508 gen_rtx (code
, mode
, source
, temp
)));
514 return arm_gen_constant (code
, mode
, val
, target
, source
, subtargets
, 1);
517 /* As above, but extra parameter GENERATE which, if clear, suppresses
520 arm_gen_constant (code
, mode
, val
, target
, source
, subtargets
, generate
)
522 enum machine_mode mode
;
532 int can_negate_initial
= 0;
535 int num_bits_set
= 0;
536 int set_sign_bit_copies
= 0;
537 int clear_sign_bit_copies
= 0;
538 int clear_zero_bit_copies
= 0;
539 int set_zero_bit_copies
= 0;
542 unsigned HOST_WIDE_INT temp1
, temp2
;
543 unsigned HOST_WIDE_INT remainder
= val
& 0xffffffff;
545 /* find out which operations are safe for a given CODE. Also do a quick
546 check for degenerate cases; these can occur when DImode operations
558 can_negate_initial
= 1;
562 if (remainder
== 0xffffffff)
565 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
566 GEN_INT (ARM_SIGN_EXTEND (val
))));
571 if (reload_completed
&& rtx_equal_p (target
, source
))
574 emit_insn (gen_rtx (SET
, VOIDmode
, target
, source
));
583 emit_insn (gen_rtx (SET
, VOIDmode
, target
, const0_rtx
));
586 if (remainder
== 0xffffffff)
588 if (reload_completed
&& rtx_equal_p (target
, source
))
591 emit_insn (gen_rtx (SET
, VOIDmode
, target
, source
));
600 if (reload_completed
&& rtx_equal_p (target
, source
))
603 emit_insn (gen_rtx (SET
, VOIDmode
, target
, source
));
606 if (remainder
== 0xffffffff)
609 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
610 gen_rtx (NOT
, mode
, source
)));
614 /* We don't know how to handle this yet below. */
618 /* We treat MINUS as (val - source), since (source - val) is always
619 passed as (source + (-val)). */
623 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
624 gen_rtx (NEG
, mode
, source
)));
627 if (const_ok_for_arm (val
))
630 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
631 gen_rtx (MINUS
, mode
, GEN_INT (val
), source
)));
642 /* If we can do it in one insn get out quickly */
643 if (const_ok_for_arm (val
)
644 || (can_negate_initial
&& const_ok_for_arm (-val
))
645 || (can_invert
&& const_ok_for_arm (~val
)))
648 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
649 (source
? gen_rtx (code
, mode
, source
,
656 /* Calculate a few attributes that may be useful for specific
659 for (i
= 31; i
>= 0; i
--)
661 if ((remainder
& (1 << i
)) == 0)
662 clear_sign_bit_copies
++;
667 for (i
= 31; i
>= 0; i
--)
669 if ((remainder
& (1 << i
)) != 0)
670 set_sign_bit_copies
++;
675 for (i
= 0; i
<= 31; i
++)
677 if ((remainder
& (1 << i
)) == 0)
678 clear_zero_bit_copies
++;
683 for (i
= 0; i
<= 31; i
++)
685 if ((remainder
& (1 << i
)) != 0)
686 set_zero_bit_copies
++;
694 /* See if we can do this by sign_extending a constant that is known
695 to be negative. This is a good, way of doing it, since the shift
696 may well merge into a subsequent insn. */
697 if (set_sign_bit_copies
> 1)
700 (temp1
= ARM_SIGN_EXTEND (remainder
701 << (set_sign_bit_copies
- 1))))
705 new_src
= subtargets
? gen_reg_rtx (mode
) : target
;
706 emit_insn (gen_rtx (SET
, VOIDmode
, new_src
,
708 emit_insn (gen_ashrsi3 (target
, new_src
,
709 GEN_INT (set_sign_bit_copies
- 1)));
713 /* For an inverted constant, we will need to set the low bits,
714 these will be shifted out of harm's way. */
715 temp1
|= (1 << (set_sign_bit_copies
- 1)) - 1;
716 if (const_ok_for_arm (~temp1
))
720 new_src
= subtargets
? gen_reg_rtx (mode
) : target
;
721 emit_insn (gen_rtx (SET
, VOIDmode
, new_src
,
723 emit_insn (gen_ashrsi3 (target
, new_src
,
724 GEN_INT (set_sign_bit_copies
- 1)));
730 /* See if we can generate this by setting the bottom (or the top)
731 16 bits, and then shifting these into the other half of the
732 word. We only look for the simplest cases, to do more would cost
733 too much. Be careful, however, not to generate this when the
734 alternative would take fewer insns. */
735 if (val
& 0xffff0000)
737 temp1
= remainder
& 0xffff0000;
738 temp2
= remainder
& 0x0000ffff;
740 /* Overlaps outside this range are best done using other methods. */
741 for (i
= 9; i
< 24; i
++)
743 if ((((temp2
| (temp2
<< i
)) & 0xffffffff) == remainder
)
744 && ! const_ok_for_arm (temp2
))
746 insns
= arm_gen_constant (code
, mode
, temp2
,
747 new_src
= (subtargets
750 source
, subtargets
, generate
);
753 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
755 gen_rtx (ASHIFT
, mode
, source
,
762 /* Don't duplicate cases already considered. */
763 for (i
= 17; i
< 24; i
++)
765 if (((temp1
| (temp1
>> i
)) == remainder
)
766 && ! const_ok_for_arm (temp1
))
768 insns
= arm_gen_constant (code
, mode
, temp1
,
769 new_src
= (subtargets
772 source
, subtargets
, generate
);
775 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
777 gen_rtx (LSHIFTRT
, mode
,
778 source
, GEN_INT (i
)),
788 /* If we have IOR or XOR, and the constant can be loaded in a
789 single instruction, and we can find a temporary to put it in,
790 then this can be done in two instructions instead of 3-4. */
792 || (reload_completed
&& ! reg_mentioned_p (target
, source
)))
794 if (const_ok_for_arm (ARM_SIGN_EXTEND (~ val
)))
798 rtx sub
= subtargets
? gen_reg_rtx (mode
) : target
;
800 emit_insn (gen_rtx (SET
, VOIDmode
, sub
, GEN_INT (val
)));
801 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
802 gen_rtx (code
, mode
, source
, sub
)));
811 if (set_sign_bit_copies
> 8
812 && (val
& (-1 << (32 - set_sign_bit_copies
))) == val
)
816 rtx sub
= subtargets
? gen_reg_rtx (mode
) : target
;
817 rtx shift
= GEN_INT (set_sign_bit_copies
);
819 emit_insn (gen_rtx (SET
, VOIDmode
, sub
,
821 gen_rtx (ASHIFT
, mode
, source
,
823 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
825 gen_rtx (LSHIFTRT
, mode
, sub
,
831 if (set_zero_bit_copies
> 8
832 && (remainder
& ((1 << set_zero_bit_copies
) - 1)) == remainder
)
836 rtx sub
= subtargets
? gen_reg_rtx (mode
) : target
;
837 rtx shift
= GEN_INT (set_zero_bit_copies
);
839 emit_insn (gen_rtx (SET
, VOIDmode
, sub
,
841 gen_rtx (LSHIFTRT
, mode
, source
,
843 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
845 gen_rtx (ASHIFT
, mode
, sub
,
851 if (const_ok_for_arm (temp1
= ARM_SIGN_EXTEND (~ val
)))
855 rtx sub
= subtargets
? gen_reg_rtx (mode
) : target
;
856 emit_insn (gen_rtx (SET
, VOIDmode
, sub
,
857 gen_rtx (NOT
, mode
, source
)));
860 sub
= gen_reg_rtx (mode
);
861 emit_insn (gen_rtx (SET
, VOIDmode
, sub
,
862 gen_rtx (AND
, mode
, source
,
864 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
865 gen_rtx (NOT
, mode
, sub
)));
872 /* See if two shifts will do 2 or more insn's worth of work. */
873 if (clear_sign_bit_copies
>= 16 && clear_sign_bit_copies
< 24)
875 HOST_WIDE_INT shift_mask
= ((0xffffffff
876 << (32 - clear_sign_bit_copies
))
881 if ((remainder
| shift_mask
) != 0xffffffff)
885 new_source
= subtargets
? gen_reg_rtx (mode
) : target
;
886 insns
= arm_gen_constant (AND
, mode
, remainder
| shift_mask
,
887 new_source
, source
, subtargets
, 1);
891 insns
= arm_gen_constant (AND
, mode
, remainder
| shift_mask
,
892 new_source
, source
, subtargets
, 0);
897 shift
= GEN_INT (clear_sign_bit_copies
);
898 new_source
= subtargets
? gen_reg_rtx (mode
) : target
;
899 emit_insn (gen_ashlsi3 (new_source
, source
, shift
));
900 emit_insn (gen_lshrsi3 (target
, new_source
, shift
));
906 if (clear_zero_bit_copies
>= 16 && clear_zero_bit_copies
< 24)
908 HOST_WIDE_INT shift_mask
= (1 << clear_zero_bit_copies
) - 1;
912 if ((remainder
| shift_mask
) != 0xffffffff)
916 new_source
= subtargets
? gen_reg_rtx (mode
) : target
;
917 insns
= arm_gen_constant (AND
, mode
, remainder
| shift_mask
,
918 new_source
, source
, subtargets
, 1);
922 insns
= arm_gen_constant (AND
, mode
, remainder
| shift_mask
,
923 new_source
, source
, subtargets
, 0);
928 shift
= GEN_INT (clear_zero_bit_copies
);
929 new_source
= subtargets
? gen_reg_rtx (mode
) : target
;
930 emit_insn (gen_lshrsi3 (new_source
, source
, shift
));
931 emit_insn (gen_ashlsi3 (target
, new_source
, shift
));
943 for (i
= 0; i
< 32; i
++)
944 if (remainder
& (1 << i
))
947 if (code
== AND
|| (can_invert
&& num_bits_set
> 16))
948 remainder
= (~remainder
) & 0xffffffff;
949 else if (code
== PLUS
&& num_bits_set
> 16)
950 remainder
= (-remainder
) & 0xffffffff;
957 /* Now try and find a way of doing the job in either two or three
959 We start by looking for the largest block of zeros that are aligned on
960 a 2-bit boundary, we then fill up the temps, wrapping around to the
961 top of the word when we drop off the bottom.
962 In the worst case this code should produce no more than four insns. */
965 int best_consecutive_zeros
= 0;
967 for (i
= 0; i
< 32; i
+= 2)
969 int consecutive_zeros
= 0;
971 if (! (remainder
& (3 << i
)))
973 while ((i
< 32) && ! (remainder
& (3 << i
)))
975 consecutive_zeros
+= 2;
978 if (consecutive_zeros
> best_consecutive_zeros
)
980 best_consecutive_zeros
= consecutive_zeros
;
981 best_start
= i
- consecutive_zeros
;
987 /* Now start emitting the insns, starting with the one with the highest
988 bit set: we do this so that the smallest number will be emitted last;
989 this is more likely to be combinable with addressing insns. */
997 if (remainder
& (3 << (i
- 2)))
1002 temp1
= remainder
& ((0x0ff << end
)
1003 | ((i
< end
) ? (0xff >> (32 - end
)) : 0));
1004 remainder
&= ~temp1
;
1009 emit_insn (gen_rtx (SET
, VOIDmode
,
1010 new_src
= (subtargets
1011 ? gen_reg_rtx (mode
)
1013 GEN_INT (can_invert
? ~temp1
: temp1
)));
1017 else if (code
== MINUS
)
1020 emit_insn (gen_rtx (SET
, VOIDmode
,
1021 new_src
= (subtargets
1022 ? gen_reg_rtx (mode
)
1024 gen_rtx (code
, mode
, GEN_INT (temp1
),
1031 emit_insn (gen_rtx (SET
, VOIDmode
,
1032 new_src
= (remainder
1034 ? gen_reg_rtx (mode
)
1037 gen_rtx (code
, mode
, source
,
1038 GEN_INT (can_invert
? ~temp1
1049 } while (remainder
);
1054 /* Canonicalize a comparison so that we are more likely to recognize it.
1055 This can be done for a few constant compares, where we can make the
1056 immediate value easier to load. */
1058 arm_canonicalize_comparison (code
, op1
)
1062 HOST_WIDE_INT i
= INTVAL (*op1
);
1072 if (i
!= (1 << (HOST_BITS_PER_WIDE_INT
- 1) - 1)
1073 && (const_ok_for_arm (i
+1) || const_ok_for_arm (- (i
+1))))
1075 *op1
= GEN_INT (i
+1);
1076 return code
== GT
? GE
: LT
;
1082 if (i
!= (1 << (HOST_BITS_PER_WIDE_INT
- 1))
1083 && (const_ok_for_arm (i
-1) || const_ok_for_arm (- (i
-1))))
1085 *op1
= GEN_INT (i
-1);
1086 return code
== GE
? GT
: LE
;
1093 && (const_ok_for_arm (i
+1) || const_ok_for_arm (- (i
+1))))
1095 *op1
= GEN_INT (i
+ 1);
1096 return code
== GTU
? GEU
: LTU
;
1103 && (const_ok_for_arm (i
- 1) || const_ok_for_arm (- (i
- 1))))
1105 *op1
= GEN_INT (i
- 1);
1106 return code
== GEU
? GTU
: LEU
;
1118 /* Handle aggregates that are not laid out in a BLKmode element.
1119 This is a sub-element of RETURN_IN_MEMORY. */
1121 arm_return_in_memory (type
)
1124 if (TREE_CODE (type
) == RECORD_TYPE
)
1128 /* For a struct, we can return in a register if every element was a
1130 for (field
= TYPE_FIELDS (type
); field
; field
= TREE_CHAIN (field
))
1131 if (TREE_CODE (field
) != FIELD_DECL
1132 || ! DECL_BIT_FIELD_TYPE (field
))
1137 else if (TREE_CODE (type
) == UNION_TYPE
)
1141 /* Unions can be returned in registers if every element is
1142 integral, or can be returned in an integer register. */
1143 for (field
= TYPE_FIELDS (type
); field
; field
= TREE_CHAIN (field
))
1145 if (TREE_CODE (field
) != FIELD_DECL
1146 || (AGGREGATE_TYPE_P (TREE_TYPE (field
))
1147 && RETURN_IN_MEMORY (TREE_TYPE (field
)))
1148 || FLOAT_TYPE_P (TREE_TYPE (field
)))
1153 /* XXX Not sure what should be done for other aggregates, so put them in
1159 legitimate_pic_operand_p (x
)
1162 if (CONSTANT_P (x
) && flag_pic
1163 && (GET_CODE (x
) == SYMBOL_REF
1164 || (GET_CODE (x
) == CONST
1165 && GET_CODE (XEXP (x
, 0)) == PLUS
1166 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == SYMBOL_REF
)))
1173 legitimize_pic_address (orig
, mode
, reg
)
1175 enum machine_mode mode
;
1178 if (GET_CODE (orig
) == SYMBOL_REF
)
1180 rtx pic_ref
, address
;
1186 if (reload_in_progress
|| reload_completed
)
1189 reg
= gen_reg_rtx (Pmode
);
1194 #ifdef AOF_ASSEMBLER
1195 /* The AOF assembler can generate relocations for these directly, and
1196 understands that the PIC register has to be added into the offset.
1198 insn
= emit_insn (gen_pic_load_addr_based (reg
, orig
));
1201 address
= gen_reg_rtx (Pmode
);
1205 emit_insn (gen_pic_load_addr (address
, orig
));
1207 pic_ref
= gen_rtx (MEM
, Pmode
,
1208 gen_rtx (PLUS
, Pmode
, pic_offset_table_rtx
, address
));
1209 RTX_UNCHANGING_P (pic_ref
) = 1;
1210 insn
= emit_move_insn (reg
, pic_ref
);
1212 current_function_uses_pic_offset_table
= 1;
1213 /* Put a REG_EQUAL note on this insn, so that it can be optimized
1215 REG_NOTES (insn
) = gen_rtx (EXPR_LIST
, REG_EQUAL
, orig
,
1219 else if (GET_CODE (orig
) == CONST
)
1223 if (GET_CODE (XEXP (orig
, 0)) == PLUS
1224 && XEXP (XEXP (orig
, 0), 0) == pic_offset_table_rtx
)
1229 if (reload_in_progress
|| reload_completed
)
1232 reg
= gen_reg_rtx (Pmode
);
1235 if (GET_CODE (XEXP (orig
, 0)) == PLUS
)
1237 base
= legitimize_pic_address (XEXP (XEXP (orig
, 0), 0), Pmode
, reg
);
1238 offset
= legitimize_pic_address (XEXP (XEXP (orig
, 0), 1), Pmode
,
1239 base
== reg
? 0 : reg
);
1244 if (GET_CODE (offset
) == CONST_INT
)
1246 /* The base register doesn't really matter, we only want to
1247 test the index for the appropriate mode. */
1248 GO_IF_LEGITIMATE_INDEX (mode
, 0, offset
, win
);
1250 if (! reload_in_progress
&& ! reload_completed
)
1251 offset
= force_reg (Pmode
, offset
);
1256 if (GET_CODE (offset
) == CONST_INT
)
1257 return plus_constant_for_output (base
, INTVAL (offset
));
1260 if (GET_MODE_SIZE (mode
) > 4
1261 && (GET_MODE_CLASS (mode
) == MODE_INT
1262 || TARGET_SOFT_FLOAT
))
1264 emit_insn (gen_addsi3 (reg
, base
, offset
));
1268 return gen_rtx (PLUS
, Pmode
, base
, offset
);
1270 else if (GET_CODE (orig
) == LABEL_REF
)
1271 current_function_uses_pic_offset_table
= 1;
1290 #ifndef AOF_ASSEMBLER
1291 rtx l1
, pic_tmp
, pic_tmp2
, seq
;
1292 rtx global_offset_table
;
1294 if (current_function_uses_pic_offset_table
== 0)
1301 l1
= gen_label_rtx ();
1303 global_offset_table
= gen_rtx (SYMBOL_REF
, Pmode
, "_GLOBAL_OFFSET_TABLE_");
1304 /* The PC contains 'dot'+8, but the label L1 is on the next
1305 instruction, so the offset is only 'dot'+4. */
1306 pic_tmp
= gen_rtx (CONST
, VOIDmode
,
1307 gen_rtx (PLUS
, Pmode
,
1308 gen_rtx (LABEL_REF
, VOIDmode
, l1
),
1310 pic_tmp2
= gen_rtx (CONST
, VOIDmode
,
1311 gen_rtx (PLUS
, Pmode
,
1312 global_offset_table
,
1315 pic_rtx
= gen_rtx (CONST
, Pmode
,
1316 gen_rtx (MINUS
, Pmode
, pic_tmp2
, pic_tmp
));
1318 emit_insn (gen_pic_load_addr (pic_offset_table_rtx
, pic_rtx
));
1319 emit_jump_insn (gen_pic_add_dot_plus_eight(l1
, pic_offset_table_rtx
));
1322 seq
= gen_sequence ();
1324 emit_insn_after (seq
, get_insns ());
1326 /* Need to emit this whether or not we obey regdecls,
1327 since setjmp/longjmp can cause life info to screw up. */
1328 emit_insn (gen_rtx (USE
, VOIDmode
, pic_offset_table_rtx
));
1329 #endif /* AOF_ASSEMBLER */
1332 #define REG_OR_SUBREG_REG(X) \
1333 (GET_CODE (X) == REG \
1334 || (GET_CODE (X) == SUBREG && GET_CODE (SUBREG_REG (X)) == REG))
1336 #define REG_OR_SUBREG_RTX(X) \
1337 (GET_CODE (X) == REG ? (X) : SUBREG_REG (X))
1339 #define ARM_FRAME_RTX(X) \
1340 ((X) == frame_pointer_rtx || (X) == stack_pointer_rtx \
1341 || (X) == arg_pointer_rtx)
1344 arm_rtx_costs (x
, code
, outer_code
)
1346 enum rtx_code code
, outer_code
;
1348 enum machine_mode mode
= GET_MODE (x
);
1349 enum rtx_code subcode
;
1355 /* Memory costs quite a lot for the first word, but subsequent words
1356 load at the equivalent of a single insn each. */
1357 return (10 + 4 * ((GET_MODE_SIZE (mode
) - 1) / UNITS_PER_WORD
)
1358 + (CONSTANT_POOL_ADDRESS_P (x
) ? 4 : 0));
1365 if (mode
== SImode
&& GET_CODE (XEXP (x
, 1)) == REG
)
1372 case ASHIFT
: case LSHIFTRT
: case ASHIFTRT
:
1374 return (8 + (GET_CODE (XEXP (x
, 1)) == CONST_INT
? 0 : 8)
1375 + ((GET_CODE (XEXP (x
, 0)) == REG
1376 || (GET_CODE (XEXP (x
, 0)) == SUBREG
1377 && GET_CODE (SUBREG_REG (XEXP (x
, 0))) == REG
))
1379 return (1 + ((GET_CODE (XEXP (x
, 0)) == REG
1380 || (GET_CODE (XEXP (x
, 0)) == SUBREG
1381 && GET_CODE (SUBREG_REG (XEXP (x
, 0))) == REG
))
1383 + ((GET_CODE (XEXP (x
, 1)) == REG
1384 || (GET_CODE (XEXP (x
, 1)) == SUBREG
1385 && GET_CODE (SUBREG_REG (XEXP (x
, 1))) == REG
)
1386 || (GET_CODE (XEXP (x
, 1)) == CONST_INT
))
1391 return (4 + (REG_OR_SUBREG_REG (XEXP (x
, 1)) ? 0 : 8)
1392 + ((REG_OR_SUBREG_REG (XEXP (x
, 0))
1393 || (GET_CODE (XEXP (x
, 0)) == CONST_INT
1394 && const_ok_for_arm (INTVAL (XEXP (x
, 0)))))
1397 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
1398 return (2 + ((REG_OR_SUBREG_REG (XEXP (x
, 1))
1399 || (GET_CODE (XEXP (x
, 1)) == CONST_DOUBLE
1400 && const_double_rtx_ok_for_fpu (XEXP (x
, 1))))
1402 + ((REG_OR_SUBREG_REG (XEXP (x
, 0))
1403 || (GET_CODE (XEXP (x
, 0)) == CONST_DOUBLE
1404 && const_double_rtx_ok_for_fpu (XEXP (x
, 0))))
1407 if (((GET_CODE (XEXP (x
, 0)) == CONST_INT
1408 && const_ok_for_arm (INTVAL (XEXP (x
, 0)))
1409 && REG_OR_SUBREG_REG (XEXP (x
, 1))))
1410 || (((subcode
= GET_CODE (XEXP (x
, 1))) == ASHIFT
1411 || subcode
== ASHIFTRT
|| subcode
== LSHIFTRT
1412 || subcode
== ROTATE
|| subcode
== ROTATERT
1414 && GET_CODE (XEXP (XEXP (x
, 1), 1)) == CONST_INT
1415 && ((INTVAL (XEXP (XEXP (x
, 1), 1)) &
1416 (INTVAL (XEXP (XEXP (x
, 1), 1)) - 1)) == 0)))
1417 && REG_OR_SUBREG_REG (XEXP (XEXP (x
, 1), 0))
1418 && (REG_OR_SUBREG_REG (XEXP (XEXP (x
, 1), 1))
1419 || GET_CODE (XEXP (XEXP (x
, 1), 1)) == CONST_INT
)
1420 && REG_OR_SUBREG_REG (XEXP (x
, 0))))
1425 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
1426 return (2 + (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 8)
1427 + ((REG_OR_SUBREG_REG (XEXP (x
, 1))
1428 || (GET_CODE (XEXP (x
, 1)) == CONST_DOUBLE
1429 && const_double_rtx_ok_for_fpu (XEXP (x
, 1))))
1433 case AND
: case XOR
: case IOR
:
1436 /* Normally the frame registers will be spilt into reg+const during
1437 reload, so it is a bad idea to combine them with other instructions,
1438 since then they might not be moved outside of loops. As a compromise
1439 we allow integration with ops that have a constant as their second
1441 if ((REG_OR_SUBREG_REG (XEXP (x
, 0))
1442 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x
, 0)))
1443 && GET_CODE (XEXP (x
, 1)) != CONST_INT
)
1444 || (REG_OR_SUBREG_REG (XEXP (x
, 0))
1445 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x
, 0)))))
1449 return (4 + extra_cost
+ (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 8)
1450 + ((REG_OR_SUBREG_REG (XEXP (x
, 1))
1451 || (GET_CODE (XEXP (x
, 1)) == CONST_INT
1452 && const_ok_for_op (INTVAL (XEXP (x
, 1)), code
, mode
)))
1455 if (REG_OR_SUBREG_REG (XEXP (x
, 0)))
1456 return (1 + (GET_CODE (XEXP (x
, 1)) == CONST_INT
? 0 : extra_cost
)
1457 + ((REG_OR_SUBREG_REG (XEXP (x
, 1))
1458 || (GET_CODE (XEXP (x
, 1)) == CONST_INT
1459 && const_ok_for_op (INTVAL (XEXP (x
, 1)), code
, mode
)))
1462 else if (REG_OR_SUBREG_REG (XEXP (x
, 1)))
1463 return (1 + extra_cost
1464 + ((((subcode
= GET_CODE (XEXP (x
, 0))) == ASHIFT
1465 || subcode
== LSHIFTRT
|| subcode
== ASHIFTRT
1466 || subcode
== ROTATE
|| subcode
== ROTATERT
1468 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
1469 && ((INTVAL (XEXP (XEXP (x
, 0), 1)) &
1470 (INTVAL (XEXP (XEXP (x
, 0), 1)) - 1)) == 0))
1471 && (REG_OR_SUBREG_REG (XEXP (XEXP (x
, 0), 0)))
1472 && ((REG_OR_SUBREG_REG (XEXP (XEXP (x
, 0), 1)))
1473 || GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
)))
1479 /* There is no point basing this on the tuning, since it is always the
1480 fast variant if it exists at all */
1481 if (arm_fast_multiply
&& mode
== DImode
1482 && (GET_CODE (XEXP (x
, 0)) == GET_CODE (XEXP (x
, 1)))
1483 && (GET_CODE (XEXP (x
, 0)) == ZERO_EXTEND
1484 || GET_CODE (XEXP (x
, 0)) == SIGN_EXTEND
))
1487 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
1491 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
)
1493 unsigned HOST_WIDE_INT i
= (INTVAL (XEXP (x
, 1))
1494 & (unsigned HOST_WIDE_INT
) 0xffffffff);
1495 int add_cost
= const_ok_for_arm (i
) ? 4 : 8;
1497 /* Tune as appropriate */
1498 int booth_unit_size
= ((tune_flags
& FL_FAST_MULT
) ? 8 : 2);
1500 for (j
= 0; i
&& j
< 32; j
+= booth_unit_size
)
1502 i
>>= booth_unit_size
;
1509 return (((tune_flags
& FL_FAST_MULT
) ? 8 : 30)
1510 + (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 4)
1511 + (REG_OR_SUBREG_REG (XEXP (x
, 1)) ? 0 : 4));
1514 if (arm_fast_multiply
&& mode
== SImode
1515 && GET_CODE (XEXP (x
, 0)) == LSHIFTRT
1516 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == MULT
1517 && (GET_CODE (XEXP (XEXP (XEXP (x
, 0), 0), 0))
1518 == GET_CODE (XEXP (XEXP (XEXP (x
, 0), 0), 1)))
1519 && (GET_CODE (XEXP (XEXP (XEXP (x
, 0), 0), 0)) == ZERO_EXTEND
1520 || GET_CODE (XEXP (XEXP (XEXP (x
, 0), 0), 0)) == SIGN_EXTEND
))
1525 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
1526 return 4 + (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 6);
1530 return 4 + (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 4);
1532 return 1 + (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 4);
1535 if (GET_CODE (XEXP (x
, 1)) == PC
|| GET_CODE (XEXP (x
, 2)) == PC
)
1543 return 4 + (mode
== DImode
? 4 : 0);
1546 if (GET_MODE (XEXP (x
, 0)) == QImode
)
1547 return (4 + (mode
== DImode
? 4 : 0)
1548 + (GET_CODE (XEXP (x
, 0)) == MEM
? 10 : 0));
1551 switch (GET_MODE (XEXP (x
, 0)))
1554 return (1 + (mode
== DImode
? 4 : 0)
1555 + (GET_CODE (XEXP (x
, 0)) == MEM
? 10 : 0));
1558 return (4 + (mode
== DImode
? 4 : 0)
1559 + (GET_CODE (XEXP (x
, 0)) == MEM
? 10 : 0));
1562 return (1 + (GET_CODE (XEXP (x
, 0)) == MEM
? 10 : 0));
1572 arm_adjust_cost (insn
, link
, dep
, cost
)
1580 if ((i_pat
= single_set (insn
)) != NULL
1581 && GET_CODE (SET_SRC (i_pat
)) == MEM
1582 && (d_pat
= single_set (dep
)) != NULL
1583 && GET_CODE (SET_DEST (d_pat
)) == MEM
)
1585 /* This is a load after a store, there is no conflict if the load reads
1586 from a cached area. Assume that loads from the stack, and from the
1587 constant pool are cached, and that others will miss. This is a
1590 /* debug_rtx (insn);
1593 fprintf (stderr, "costs %d\n", cost); */
1595 if (CONSTANT_POOL_ADDRESS_P (XEXP (SET_SRC (i_pat
), 0))
1596 || reg_mentioned_p (stack_pointer_rtx
, XEXP (SET_SRC (i_pat
), 0))
1597 || reg_mentioned_p (frame_pointer_rtx
, XEXP (SET_SRC (i_pat
), 0))
1598 || reg_mentioned_p (hard_frame_pointer_rtx
,
1599 XEXP (SET_SRC (i_pat
), 0)))
1601 /* fprintf (stderr, "***** Now 1\n"); */
1609 /* This code has been fixed for cross compilation. */
1611 static int fpa_consts_inited
= 0;
1613 char *strings_fpa
[8] = {
1615 "4", "5", "0.5", "10"
1618 static REAL_VALUE_TYPE values_fpa
[8];
1626 for (i
= 0; i
< 8; i
++)
1628 r
= REAL_VALUE_ATOF (strings_fpa
[i
], DFmode
);
1632 fpa_consts_inited
= 1;
1635 /* Return TRUE if rtx X is a valid immediate FPU constant. */
1638 const_double_rtx_ok_for_fpu (x
)
1644 if (!fpa_consts_inited
)
1647 REAL_VALUE_FROM_CONST_DOUBLE (r
, x
);
1648 if (REAL_VALUE_MINUS_ZERO (r
))
1651 for (i
= 0; i
< 8; i
++)
1652 if (REAL_VALUES_EQUAL (r
, values_fpa
[i
]))
1658 /* Return TRUE if rtx X is a valid immediate FPU constant. */
1661 neg_const_double_rtx_ok_for_fpu (x
)
1667 if (!fpa_consts_inited
)
1670 REAL_VALUE_FROM_CONST_DOUBLE (r
, x
);
1671 r
= REAL_VALUE_NEGATE (r
);
1672 if (REAL_VALUE_MINUS_ZERO (r
))
1675 for (i
= 0; i
< 8; i
++)
1676 if (REAL_VALUES_EQUAL (r
, values_fpa
[i
]))
1682 /* Predicates for `match_operand' and `match_operator'. */
1684 /* s_register_operand is the same as register_operand, but it doesn't accept
1687 This function exists because at the time it was put in it led to better
1688 code. SUBREG(MEM) always needs a reload in the places where
1689 s_register_operand is used, and this seemed to lead to excessive
1693 s_register_operand (op
, mode
)
1695 enum machine_mode mode
;
1697 if (GET_MODE (op
) != mode
&& mode
!= VOIDmode
)
1700 if (GET_CODE (op
) == SUBREG
)
1701 op
= SUBREG_REG (op
);
1703 /* We don't consider registers whose class is NO_REGS
1704 to be a register operand. */
1705 return (GET_CODE (op
) == REG
1706 && (REGNO (op
) >= FIRST_PSEUDO_REGISTER
1707 || REGNO_REG_CLASS (REGNO (op
)) != NO_REGS
));
1710 /* Only accept reg, subreg(reg), const_int. */
1713 reg_or_int_operand (op
, mode
)
1715 enum machine_mode mode
;
1717 if (GET_CODE (op
) == CONST_INT
)
1720 if (GET_MODE (op
) != mode
&& mode
!= VOIDmode
)
1723 if (GET_CODE (op
) == SUBREG
)
1724 op
= SUBREG_REG (op
);
1726 /* We don't consider registers whose class is NO_REGS
1727 to be a register operand. */
1728 return (GET_CODE (op
) == REG
1729 && (REGNO (op
) >= FIRST_PSEUDO_REGISTER
1730 || REGNO_REG_CLASS (REGNO (op
)) != NO_REGS
));
1733 /* Return 1 if OP is an item in memory, given that we are in reload. */
1736 reload_memory_operand (op
, mode
)
1738 enum machine_mode mode
;
1740 int regno
= true_regnum (op
);
1742 return (! CONSTANT_P (op
)
1744 || (GET_CODE (op
) == REG
1745 && REGNO (op
) >= FIRST_PSEUDO_REGISTER
)));
1748 /* Return TRUE for valid operands for the rhs of an ARM instruction. */
1751 arm_rhs_operand (op
, mode
)
1753 enum machine_mode mode
;
1755 return (s_register_operand (op
, mode
)
1756 || (GET_CODE (op
) == CONST_INT
&& const_ok_for_arm (INTVAL (op
))));
1759 /* Return TRUE for valid operands for the rhs of an ARM instruction, or a load.
1763 arm_rhsm_operand (op
, mode
)
1765 enum machine_mode mode
;
1767 return (s_register_operand (op
, mode
)
1768 || (GET_CODE (op
) == CONST_INT
&& const_ok_for_arm (INTVAL (op
)))
1769 || memory_operand (op
, mode
));
1772 /* Return TRUE for valid operands for the rhs of an ARM instruction, or if a
1773 constant that is valid when negated. */
1776 arm_add_operand (op
, mode
)
1778 enum machine_mode mode
;
1780 return (s_register_operand (op
, mode
)
1781 || (GET_CODE (op
) == CONST_INT
1782 && (const_ok_for_arm (INTVAL (op
))
1783 || const_ok_for_arm (-INTVAL (op
)))));
1787 arm_not_operand (op
, mode
)
1789 enum machine_mode mode
;
1791 return (s_register_operand (op
, mode
)
1792 || (GET_CODE (op
) == CONST_INT
1793 && (const_ok_for_arm (INTVAL (op
))
1794 || const_ok_for_arm (~INTVAL (op
)))));
1797 /* Return TRUE if the operand is a memory reference which contains an
1798 offsettable address. */
1800 offsettable_memory_operand (op
, mode
)
1802 enum machine_mode mode
;
1804 if (mode
== VOIDmode
)
1805 mode
= GET_MODE (op
);
1807 return (mode
== GET_MODE (op
)
1808 && GET_CODE (op
) == MEM
1809 && offsettable_address_p (reload_completed
| reload_in_progress
,
1810 mode
, XEXP (op
, 0)));
1813 /* Return TRUE if the operand is a memory reference which is, or can be
1814 made word aligned by adjusting the offset. */
1816 alignable_memory_operand (op
, mode
)
1818 enum machine_mode mode
;
1822 if (mode
== VOIDmode
)
1823 mode
= GET_MODE (op
);
1825 if (mode
!= GET_MODE (op
) || GET_CODE (op
) != MEM
)
1830 return ((GET_CODE (reg
= op
) == REG
1831 || (GET_CODE (op
) == SUBREG
1832 && GET_CODE (reg
= SUBREG_REG (op
)) == REG
)
1833 || (GET_CODE (op
) == PLUS
1834 && GET_CODE (XEXP (op
, 1)) == CONST_INT
1835 && (GET_CODE (reg
= XEXP (op
, 0)) == REG
1836 || (GET_CODE (XEXP (op
, 0)) == SUBREG
1837 && GET_CODE (reg
= SUBREG_REG (XEXP (op
, 0))) == REG
))))
1838 && REGNO_POINTER_ALIGN (REGNO (reg
)) >= 4);
1841 /* Similar to s_register_operand, but does not allow hard integer
1844 f_register_operand (op
, mode
)
1846 enum machine_mode mode
;
1848 if (GET_MODE (op
) != mode
&& mode
!= VOIDmode
)
1851 if (GET_CODE (op
) == SUBREG
)
1852 op
= SUBREG_REG (op
);
1854 /* We don't consider registers whose class is NO_REGS
1855 to be a register operand. */
1856 return (GET_CODE (op
) == REG
1857 && (REGNO (op
) >= FIRST_PSEUDO_REGISTER
1858 || REGNO_REG_CLASS (REGNO (op
)) == FPU_REGS
));
1861 /* Return TRUE for valid operands for the rhs of an FPU instruction. */
1864 fpu_rhs_operand (op
, mode
)
1866 enum machine_mode mode
;
1868 if (s_register_operand (op
, mode
))
1870 else if (GET_CODE (op
) == CONST_DOUBLE
)
1871 return (const_double_rtx_ok_for_fpu (op
));
1877 fpu_add_operand (op
, mode
)
1879 enum machine_mode mode
;
1881 if (s_register_operand (op
, mode
))
1883 else if (GET_CODE (op
) == CONST_DOUBLE
)
1884 return (const_double_rtx_ok_for_fpu (op
)
1885 || neg_const_double_rtx_ok_for_fpu (op
));
1890 /* Return nonzero if OP is a constant power of two. */
1893 power_of_two_operand (op
, mode
)
1895 enum machine_mode mode
;
1897 if (GET_CODE (op
) == CONST_INT
)
1899 HOST_WIDE_INT value
= INTVAL(op
);
1900 return value
!= 0 && (value
& (value
- 1)) == 0;
1905 /* Return TRUE for a valid operand of a DImode operation.
1906 Either: REG, CONST_DOUBLE or MEM(DImode_address).
1907 Note that this disallows MEM(REG+REG), but allows
1908 MEM(PRE/POST_INC/DEC(REG)). */
1911 di_operand (op
, mode
)
1913 enum machine_mode mode
;
1915 if (s_register_operand (op
, mode
))
1918 switch (GET_CODE (op
))
1925 return memory_address_p (DImode
, XEXP (op
, 0));
1932 /* Return TRUE for a valid operand of a DFmode operation when -msoft-float.
1933 Either: REG, CONST_DOUBLE or MEM(DImode_address).
1934 Note that this disallows MEM(REG+REG), but allows
1935 MEM(PRE/POST_INC/DEC(REG)). */
1938 soft_df_operand (op
, mode
)
1940 enum machine_mode mode
;
1942 if (s_register_operand (op
, mode
))
1945 switch (GET_CODE (op
))
1951 return memory_address_p (DFmode
, XEXP (op
, 0));
1958 /* Return TRUE for valid index operands. */
1961 index_operand (op
, mode
)
1963 enum machine_mode mode
;
1965 return (s_register_operand(op
, mode
)
1966 || (immediate_operand (op
, mode
)
1967 && INTVAL (op
) < 4096 && INTVAL (op
) > -4096));
1970 /* Return TRUE for valid shifts by a constant. This also accepts any
1971 power of two on the (somewhat overly relaxed) assumption that the
1972 shift operator in this case was a mult. */
1975 const_shift_operand (op
, mode
)
1977 enum machine_mode mode
;
1979 return (power_of_two_operand (op
, mode
)
1980 || (immediate_operand (op
, mode
)
1981 && (INTVAL (op
) < 32 && INTVAL (op
) > 0)));
1984 /* Return TRUE for arithmetic operators which can be combined with a multiply
1988 shiftable_operator (x
, mode
)
1990 enum machine_mode mode
;
1992 if (GET_MODE (x
) != mode
)
1996 enum rtx_code code
= GET_CODE (x
);
1998 return (code
== PLUS
|| code
== MINUS
1999 || code
== IOR
|| code
== XOR
|| code
== AND
);
2003 /* Return TRUE for shift operators. */
2006 shift_operator (x
, mode
)
2008 enum machine_mode mode
;
2010 if (GET_MODE (x
) != mode
)
2014 enum rtx_code code
= GET_CODE (x
);
2017 return power_of_two_operand (XEXP (x
, 1));
2019 return (code
== ASHIFT
|| code
== ASHIFTRT
|| code
== LSHIFTRT
2020 || code
== ROTATERT
);
2024 int equality_operator (x
, mode
)
2026 enum machine_mode mode
;
2028 return GET_CODE (x
) == EQ
|| GET_CODE (x
) == NE
;
2031 /* Return TRUE for SMIN SMAX UMIN UMAX operators. */
2034 minmax_operator (x
, mode
)
2036 enum machine_mode mode
;
2038 enum rtx_code code
= GET_CODE (x
);
2040 if (GET_MODE (x
) != mode
)
2043 return code
== SMIN
|| code
== SMAX
|| code
== UMIN
|| code
== UMAX
;
2046 /* return TRUE if x is EQ or NE */
2048 /* Return TRUE if this is the condition code register, if we aren't given
2049 a mode, accept any class CCmode register */
2052 cc_register (x
, mode
)
2054 enum machine_mode mode
;
2056 if (mode
== VOIDmode
)
2058 mode
= GET_MODE (x
);
2059 if (GET_MODE_CLASS (mode
) != MODE_CC
)
2063 if (mode
== GET_MODE (x
) && GET_CODE (x
) == REG
&& REGNO (x
) == 24)
2069 /* Return TRUE if this is the condition code register, if we aren't given
2070 a mode, accept any class CCmode register which indicates a dominance
2074 dominant_cc_register (x
, mode
)
2076 enum machine_mode mode
;
2078 if (mode
== VOIDmode
)
2080 mode
= GET_MODE (x
);
2081 if (GET_MODE_CLASS (mode
) != MODE_CC
)
2085 if (mode
!= CC_DNEmode
&& mode
!= CC_DEQmode
2086 && mode
!= CC_DLEmode
&& mode
!= CC_DLTmode
2087 && mode
!= CC_DGEmode
&& mode
!= CC_DGTmode
2088 && mode
!= CC_DLEUmode
&& mode
!= CC_DLTUmode
2089 && mode
!= CC_DGEUmode
&& mode
!= CC_DGTUmode
)
2092 if (mode
== GET_MODE (x
) && GET_CODE (x
) == REG
&& REGNO (x
) == 24)
2098 /* Return TRUE if X references a SYMBOL_REF. */
2100 symbol_mentioned_p (x
)
2106 if (GET_CODE (x
) == SYMBOL_REF
)
2109 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
2110 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
2116 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
2117 if (symbol_mentioned_p (XVECEXP (x
, i
, j
)))
2120 else if (fmt
[i
] == 'e' && symbol_mentioned_p (XEXP (x
, i
)))
2127 /* Return TRUE if X references a LABEL_REF. */
2129 label_mentioned_p (x
)
2135 if (GET_CODE (x
) == LABEL_REF
)
2138 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
2139 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
2145 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
2146 if (label_mentioned_p (XVECEXP (x
, i
, j
)))
2149 else if (fmt
[i
] == 'e' && label_mentioned_p (XEXP (x
, i
)))
2160 enum rtx_code code
= GET_CODE (x
);
2164 else if (code
== SMIN
)
2166 else if (code
== UMIN
)
2168 else if (code
== UMAX
)
2174 /* Return 1 if memory locations are adjacent */
2177 adjacent_mem_locations (a
, b
)
2180 int val0
= 0, val1
= 0;
2183 if ((GET_CODE (XEXP (a
, 0)) == REG
2184 || (GET_CODE (XEXP (a
, 0)) == PLUS
2185 && GET_CODE (XEXP (XEXP (a
, 0), 1)) == CONST_INT
))
2186 && (GET_CODE (XEXP (b
, 0)) == REG
2187 || (GET_CODE (XEXP (b
, 0)) == PLUS
2188 && GET_CODE (XEXP (XEXP (b
, 0), 1)) == CONST_INT
)))
2190 if (GET_CODE (XEXP (a
, 0)) == PLUS
)
2192 reg0
= REGNO (XEXP (XEXP (a
, 0), 0));
2193 val0
= INTVAL (XEXP (XEXP (a
, 0), 1));
2196 reg0
= REGNO (XEXP (a
, 0));
2197 if (GET_CODE (XEXP (b
, 0)) == PLUS
)
2199 reg1
= REGNO (XEXP (XEXP (b
, 0), 0));
2200 val1
= INTVAL (XEXP (XEXP (b
, 0), 1));
2203 reg1
= REGNO (XEXP (b
, 0));
2204 return (reg0
== reg1
) && ((val1
- val0
) == 4 || (val0
- val1
) == 4);
2209 /* Return 1 if OP is a load multiple operation. It is known to be
2210 parallel and the first section will be tested. */
2213 load_multiple_operation (op
, mode
)
2215 enum machine_mode mode
;
2217 HOST_WIDE_INT count
= XVECLEN (op
, 0);
2220 HOST_WIDE_INT i
= 1, base
= 0;
2224 || GET_CODE (XVECEXP (op
, 0, 0)) != SET
)
2227 /* Check to see if this might be a write-back */
2228 if (GET_CODE (SET_SRC (elt
= XVECEXP (op
, 0, 0))) == PLUS
)
2233 /* Now check it more carefully */
2234 if (GET_CODE (SET_DEST (elt
)) != REG
2235 || GET_CODE (XEXP (SET_SRC (elt
), 0)) != REG
2236 || REGNO (XEXP (SET_SRC (elt
), 0)) != REGNO (SET_DEST (elt
))
2237 || GET_CODE (XEXP (SET_SRC (elt
), 1)) != CONST_INT
2238 || INTVAL (XEXP (SET_SRC (elt
), 1)) != (count
- 2) * 4
2239 || GET_CODE (XVECEXP (op
, 0, count
- 1)) != CLOBBER
2240 || GET_CODE (XEXP (XVECEXP (op
, 0, count
- 1), 0)) != REG
2241 || REGNO (XEXP (XVECEXP (op
, 0, count
- 1), 0))
2242 != REGNO (SET_DEST (elt
)))
2248 /* Perform a quick check so we don't blow up below. */
2250 || GET_CODE (XVECEXP (op
, 0, i
- 1)) != SET
2251 || GET_CODE (SET_DEST (XVECEXP (op
, 0, i
- 1))) != REG
2252 || GET_CODE (SET_SRC (XVECEXP (op
, 0, i
- 1))) != MEM
)
2255 dest_regno
= REGNO (SET_DEST (XVECEXP (op
, 0, i
- 1)));
2256 src_addr
= XEXP (SET_SRC (XVECEXP (op
, 0, i
- 1)), 0);
2258 for (; i
< count
; i
++)
2260 rtx elt
= XVECEXP (op
, 0, i
);
2262 if (GET_CODE (elt
) != SET
2263 || GET_CODE (SET_DEST (elt
)) != REG
2264 || GET_MODE (SET_DEST (elt
)) != SImode
2265 || REGNO (SET_DEST (elt
)) != dest_regno
+ i
- base
2266 || GET_CODE (SET_SRC (elt
)) != MEM
2267 || GET_MODE (SET_SRC (elt
)) != SImode
2268 || GET_CODE (XEXP (SET_SRC (elt
), 0)) != PLUS
2269 || ! rtx_equal_p (XEXP (XEXP (SET_SRC (elt
), 0), 0), src_addr
)
2270 || GET_CODE (XEXP (XEXP (SET_SRC (elt
), 0), 1)) != CONST_INT
2271 || INTVAL (XEXP (XEXP (SET_SRC (elt
), 0), 1)) != (i
- base
) * 4)
2278 /* Return 1 if OP is a store multiple operation. It is known to be
2279 parallel and the first section will be tested. */
2282 store_multiple_operation (op
, mode
)
2284 enum machine_mode mode
;
2286 HOST_WIDE_INT count
= XVECLEN (op
, 0);
2289 HOST_WIDE_INT i
= 1, base
= 0;
2293 || GET_CODE (XVECEXP (op
, 0, 0)) != SET
)
2296 /* Check to see if this might be a write-back */
2297 if (GET_CODE (SET_SRC (elt
= XVECEXP (op
, 0, 0))) == PLUS
)
2302 /* Now check it more carefully */
2303 if (GET_CODE (SET_DEST (elt
)) != REG
2304 || GET_CODE (XEXP (SET_SRC (elt
), 0)) != REG
2305 || REGNO (XEXP (SET_SRC (elt
), 0)) != REGNO (SET_DEST (elt
))
2306 || GET_CODE (XEXP (SET_SRC (elt
), 1)) != CONST_INT
2307 || INTVAL (XEXP (SET_SRC (elt
), 1)) != (count
- 2) * 4
2308 || GET_CODE (XVECEXP (op
, 0, count
- 1)) != CLOBBER
2309 || GET_CODE (XEXP (XVECEXP (op
, 0, count
- 1), 0)) != REG
2310 || REGNO (XEXP (XVECEXP (op
, 0, count
- 1), 0))
2311 != REGNO (SET_DEST (elt
)))
2317 /* Perform a quick check so we don't blow up below. */
2319 || GET_CODE (XVECEXP (op
, 0, i
- 1)) != SET
2320 || GET_CODE (SET_DEST (XVECEXP (op
, 0, i
- 1))) != MEM
2321 || GET_CODE (SET_SRC (XVECEXP (op
, 0, i
- 1))) != REG
)
2324 src_regno
= REGNO (SET_SRC (XVECEXP (op
, 0, i
- 1)));
2325 dest_addr
= XEXP (SET_DEST (XVECEXP (op
, 0, i
- 1)), 0);
2327 for (; i
< count
; i
++)
2329 elt
= XVECEXP (op
, 0, i
);
2331 if (GET_CODE (elt
) != SET
2332 || GET_CODE (SET_SRC (elt
)) != REG
2333 || GET_MODE (SET_SRC (elt
)) != SImode
2334 || REGNO (SET_SRC (elt
)) != src_regno
+ i
- base
2335 || GET_CODE (SET_DEST (elt
)) != MEM
2336 || GET_MODE (SET_DEST (elt
)) != SImode
2337 || GET_CODE (XEXP (SET_DEST (elt
), 0)) != PLUS
2338 || ! rtx_equal_p (XEXP (XEXP (SET_DEST (elt
), 0), 0), dest_addr
)
2339 || GET_CODE (XEXP (XEXP (SET_DEST (elt
), 0), 1)) != CONST_INT
2340 || INTVAL (XEXP (XEXP (SET_DEST (elt
), 0), 1)) != (i
- base
) * 4)
2348 load_multiple_sequence (operands
, nops
, regs
, base
, load_offset
)
2353 HOST_WIDE_INT
*load_offset
;
2355 int unsorted_regs
[4];
2356 HOST_WIDE_INT unsorted_offsets
[4];
2361 /* Can only handle 2, 3, or 4 insns at present, though could be easily
2362 extended if required. */
2363 if (nops
< 2 || nops
> 4)
2366 /* Loop over the operands and check that the memory references are
2367 suitable (ie immediate offsets from the same base register). At
2368 the same time, extract the target register, and the memory
2370 for (i
= 0; i
< nops
; i
++)
2375 /* Convert a subreg of a mem into the mem itself. */
2376 if (GET_CODE (operands
[nops
+ i
]) == SUBREG
)
2377 operands
[nops
+ i
] = alter_subreg(operands
[nops
+ i
]);
2379 if (GET_CODE (operands
[nops
+ i
]) != MEM
)
2382 /* Don't reorder volatile memory references; it doesn't seem worth
2383 looking for the case where the order is ok anyway. */
2384 if (MEM_VOLATILE_P (operands
[nops
+ i
]))
2387 offset
= const0_rtx
;
2389 if ((GET_CODE (reg
= XEXP (operands
[nops
+ i
], 0)) == REG
2390 || (GET_CODE (reg
) == SUBREG
2391 && GET_CODE (reg
= SUBREG_REG (reg
)) == REG
))
2392 || (GET_CODE (XEXP (operands
[nops
+ i
], 0)) == PLUS
2393 && ((GET_CODE (reg
= XEXP (XEXP (operands
[nops
+ i
], 0), 0))
2395 || (GET_CODE (reg
) == SUBREG
2396 && GET_CODE (reg
= SUBREG_REG (reg
)) == REG
))
2397 && (GET_CODE (offset
= XEXP (XEXP (operands
[nops
+ i
], 0), 1))
2402 base_reg
= REGNO(reg
);
2403 unsorted_regs
[0] = (GET_CODE (operands
[i
]) == REG
2404 ? REGNO (operands
[i
])
2405 : REGNO (SUBREG_REG (operands
[i
])));
2410 if (base_reg
!= REGNO (reg
))
2411 /* Not addressed from the same base register. */
2414 unsorted_regs
[i
] = (GET_CODE (operands
[i
]) == REG
2415 ? REGNO (operands
[i
])
2416 : REGNO (SUBREG_REG (operands
[i
])));
2417 if (unsorted_regs
[i
] < unsorted_regs
[order
[0]])
2421 /* If it isn't an integer register, or if it overwrites the
2422 base register but isn't the last insn in the list, then
2423 we can't do this. */
2424 if (unsorted_regs
[i
] < 0 || unsorted_regs
[i
] > 14
2425 || (i
!= nops
- 1 && unsorted_regs
[i
] == base_reg
))
2428 unsorted_offsets
[i
] = INTVAL (offset
);
2431 /* Not a suitable memory address. */
2435 /* All the useful information has now been extracted from the
2436 operands into unsorted_regs and unsorted_offsets; additionally,
2437 order[0] has been set to the lowest numbered register in the
2438 list. Sort the registers into order, and check that the memory
2439 offsets are ascending and adjacent. */
2441 for (i
= 1; i
< nops
; i
++)
2445 order
[i
] = order
[i
- 1];
2446 for (j
= 0; j
< nops
; j
++)
2447 if (unsorted_regs
[j
] > unsorted_regs
[order
[i
- 1]]
2448 && (order
[i
] == order
[i
- 1]
2449 || unsorted_regs
[j
] < unsorted_regs
[order
[i
]]))
2452 /* Have we found a suitable register? if not, one must be used more
2454 if (order
[i
] == order
[i
- 1])
2457 /* Is the memory address adjacent and ascending? */
2458 if (unsorted_offsets
[order
[i
]] != unsorted_offsets
[order
[i
- 1]] + 4)
2466 for (i
= 0; i
< nops
; i
++)
2467 regs
[i
] = unsorted_regs
[order
[i
]];
2469 *load_offset
= unsorted_offsets
[order
[0]];
2472 if (unsorted_offsets
[order
[0]] == 0)
2473 return 1; /* ldmia */
2475 if (unsorted_offsets
[order
[0]] == 4)
2476 return 2; /* ldmib */
2478 if (unsorted_offsets
[order
[nops
- 1]] == 0)
2479 return 3; /* ldmda */
2481 if (unsorted_offsets
[order
[nops
- 1]] == -4)
2482 return 4; /* ldmdb */
2484 /* Can't do it without setting up the offset, only do this if it takes
2485 no more than one insn. */
2486 return (const_ok_for_arm (unsorted_offsets
[order
[0]])
2487 || const_ok_for_arm (-unsorted_offsets
[order
[0]])) ? 5 : 0;
2491 emit_ldm_seq (operands
, nops
)
2497 HOST_WIDE_INT offset
;
2501 switch (load_multiple_sequence (operands
, nops
, regs
, &base_reg
, &offset
))
2504 strcpy (buf
, "ldm%?ia\t");
2508 strcpy (buf
, "ldm%?ib\t");
2512 strcpy (buf
, "ldm%?da\t");
2516 strcpy (buf
, "ldm%?db\t");
2521 sprintf (buf
, "add%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX
,
2522 reg_names
[regs
[0]], REGISTER_PREFIX
, reg_names
[base_reg
],
2525 sprintf (buf
, "sub%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX
,
2526 reg_names
[regs
[0]], REGISTER_PREFIX
, reg_names
[base_reg
],
2528 output_asm_insn (buf
, operands
);
2530 strcpy (buf
, "ldm%?ia\t");
2537 sprintf (buf
+ strlen (buf
), "%s%s, {%s%s", REGISTER_PREFIX
,
2538 reg_names
[base_reg
], REGISTER_PREFIX
, reg_names
[regs
[0]]);
2540 for (i
= 1; i
< nops
; i
++)
2541 sprintf (buf
+ strlen (buf
), ", %s%s", REGISTER_PREFIX
,
2542 reg_names
[regs
[i
]]);
2544 strcat (buf
, "}\t%@ phole ldm");
2546 output_asm_insn (buf
, operands
);
2551 store_multiple_sequence (operands
, nops
, regs
, base
, load_offset
)
2556 HOST_WIDE_INT
*load_offset
;
2558 int unsorted_regs
[4];
2559 HOST_WIDE_INT unsorted_offsets
[4];
2564 /* Can only handle 2, 3, or 4 insns at present, though could be easily
2565 extended if required. */
2566 if (nops
< 2 || nops
> 4)
2569 /* Loop over the operands and check that the memory references are
2570 suitable (ie immediate offsets from the same base register). At
2571 the same time, extract the target register, and the memory
2573 for (i
= 0; i
< nops
; i
++)
2578 /* Convert a subreg of a mem into the mem itself. */
2579 if (GET_CODE (operands
[nops
+ i
]) == SUBREG
)
2580 operands
[nops
+ i
] = alter_subreg(operands
[nops
+ i
]);
2582 if (GET_CODE (operands
[nops
+ i
]) != MEM
)
2585 /* Don't reorder volatile memory references; it doesn't seem worth
2586 looking for the case where the order is ok anyway. */
2587 if (MEM_VOLATILE_P (operands
[nops
+ i
]))
2590 offset
= const0_rtx
;
2592 if ((GET_CODE (reg
= XEXP (operands
[nops
+ i
], 0)) == REG
2593 || (GET_CODE (reg
) == SUBREG
2594 && GET_CODE (reg
= SUBREG_REG (reg
)) == REG
))
2595 || (GET_CODE (XEXP (operands
[nops
+ i
], 0)) == PLUS
2596 && ((GET_CODE (reg
= XEXP (XEXP (operands
[nops
+ i
], 0), 0))
2598 || (GET_CODE (reg
) == SUBREG
2599 && GET_CODE (reg
= SUBREG_REG (reg
)) == REG
))
2600 && (GET_CODE (offset
= XEXP (XEXP (operands
[nops
+ i
], 0), 1))
2605 base_reg
= REGNO(reg
);
2606 unsorted_regs
[0] = (GET_CODE (operands
[i
]) == REG
2607 ? REGNO (operands
[i
])
2608 : REGNO (SUBREG_REG (operands
[i
])));
2613 if (base_reg
!= REGNO (reg
))
2614 /* Not addressed from the same base register. */
2617 unsorted_regs
[i
] = (GET_CODE (operands
[i
]) == REG
2618 ? REGNO (operands
[i
])
2619 : REGNO (SUBREG_REG (operands
[i
])));
2620 if (unsorted_regs
[i
] < unsorted_regs
[order
[0]])
2624 /* If it isn't an integer register, then we can't do this. */
2625 if (unsorted_regs
[i
] < 0 || unsorted_regs
[i
] > 14)
2628 unsorted_offsets
[i
] = INTVAL (offset
);
2631 /* Not a suitable memory address. */
2635 /* All the useful information has now been extracted from the
2636 operands into unsorted_regs and unsorted_offsets; additionally,
2637 order[0] has been set to the lowest numbered register in the
2638 list. Sort the registers into order, and check that the memory
2639 offsets are ascending and adjacent. */
2641 for (i
= 1; i
< nops
; i
++)
2645 order
[i
] = order
[i
- 1];
2646 for (j
= 0; j
< nops
; j
++)
2647 if (unsorted_regs
[j
] > unsorted_regs
[order
[i
- 1]]
2648 && (order
[i
] == order
[i
- 1]
2649 || unsorted_regs
[j
] < unsorted_regs
[order
[i
]]))
2652 /* Have we found a suitable register? if not, one must be used more
2654 if (order
[i
] == order
[i
- 1])
2657 /* Is the memory address adjacent and ascending? */
2658 if (unsorted_offsets
[order
[i
]] != unsorted_offsets
[order
[i
- 1]] + 4)
2666 for (i
= 0; i
< nops
; i
++)
2667 regs
[i
] = unsorted_regs
[order
[i
]];
2669 *load_offset
= unsorted_offsets
[order
[0]];
2672 if (unsorted_offsets
[order
[0]] == 0)
2673 return 1; /* stmia */
2675 if (unsorted_offsets
[order
[0]] == 4)
2676 return 2; /* stmib */
2678 if (unsorted_offsets
[order
[nops
- 1]] == 0)
2679 return 3; /* stmda */
2681 if (unsorted_offsets
[order
[nops
- 1]] == -4)
2682 return 4; /* stmdb */
2688 emit_stm_seq (operands
, nops
)
2694 HOST_WIDE_INT offset
;
2698 switch (store_multiple_sequence (operands
, nops
, regs
, &base_reg
, &offset
))
2701 strcpy (buf
, "stm%?ia\t");
2705 strcpy (buf
, "stm%?ib\t");
2709 strcpy (buf
, "stm%?da\t");
2713 strcpy (buf
, "stm%?db\t");
2720 sprintf (buf
+ strlen (buf
), "%s%s, {%s%s", REGISTER_PREFIX
,
2721 reg_names
[base_reg
], REGISTER_PREFIX
, reg_names
[regs
[0]]);
2723 for (i
= 1; i
< nops
; i
++)
2724 sprintf (buf
+ strlen (buf
), ", %s%s", REGISTER_PREFIX
,
2725 reg_names
[regs
[i
]]);
2727 strcat (buf
, "}\t%@ phole stm");
2729 output_asm_insn (buf
, operands
);
2734 multi_register_push (op
, mode
)
2736 enum machine_mode mode
;
2738 if (GET_CODE (op
) != PARALLEL
2739 || (GET_CODE (XVECEXP (op
, 0, 0)) != SET
)
2740 || (GET_CODE (SET_SRC (XVECEXP (op
, 0, 0))) != UNSPEC
)
2741 || (XINT (SET_SRC (XVECEXP (op
, 0, 0)), 1) != 2))
2748 /* Routines for use with attributes */
2750 /* Return nonzero if ATTR is a valid attribute for DECL.
2751 ATTRIBUTES are any existing attributes and ARGS are the arguments
2754 Supported attributes:
2756 naked: don't output any prologue or epilogue code, the user is assumed
2757 to do the right thing. */
2760 arm_valid_machine_decl_attribute (decl
, attributes
, attr
, args
)
2766 if (args
!= NULL_TREE
)
2769 if (is_attribute_p ("naked", attr
))
2770 return TREE_CODE (decl
) == FUNCTION_DECL
;
2774 /* Return non-zero if FUNC is a naked function. */
2777 arm_naked_function_p (func
)
2782 if (TREE_CODE (func
) != FUNCTION_DECL
)
2785 a
= lookup_attribute ("naked", DECL_MACHINE_ATTRIBUTES (func
));
2786 return a
!= NULL_TREE
;
2789 /* Routines for use in generating RTL */
2792 arm_gen_load_multiple (base_regno
, count
, from
, up
, write_back
, unchanging_p
,
2804 int sign
= up
? 1 : -1;
2807 result
= gen_rtx (PARALLEL
, VOIDmode
,
2808 rtvec_alloc (count
+ (write_back
? 2 : 0)));
2811 XVECEXP (result
, 0, 0)
2812 = gen_rtx (SET
, GET_MODE (from
), from
,
2813 plus_constant (from
, count
* 4 * sign
));
2818 for (j
= 0; i
< count
; i
++, j
++)
2820 mem
= gen_rtx (MEM
, SImode
, plus_constant (from
, j
* 4 * sign
));
2821 RTX_UNCHANGING_P (mem
) = unchanging_p
;
2822 MEM_IN_STRUCT_P (mem
) = in_struct_p
;
2824 XVECEXP (result
, 0, i
) = gen_rtx (SET
, VOIDmode
,
2825 gen_rtx (REG
, SImode
, base_regno
+ j
),
2830 XVECEXP (result
, 0, i
) = gen_rtx (CLOBBER
, SImode
, from
);
2836 arm_gen_store_multiple (base_regno
, count
, to
, up
, write_back
, unchanging_p
,
2848 int sign
= up
? 1 : -1;
2851 result
= gen_rtx (PARALLEL
, VOIDmode
,
2852 rtvec_alloc (count
+ (write_back
? 2 : 0)));
2855 XVECEXP (result
, 0, 0)
2856 = gen_rtx (SET
, GET_MODE (to
), to
,
2857 plus_constant (to
, count
* 4 * sign
));
2862 for (j
= 0; i
< count
; i
++, j
++)
2864 mem
= gen_rtx (MEM
, SImode
, plus_constant (to
, j
* 4 * sign
));
2865 RTX_UNCHANGING_P (mem
) = unchanging_p
;
2866 MEM_IN_STRUCT_P (mem
) = in_struct_p
;
2868 XVECEXP (result
, 0, i
) = gen_rtx (SET
, VOIDmode
, mem
,
2869 gen_rtx (REG
, SImode
, base_regno
+ j
));
2873 XVECEXP (result
, 0, i
) = gen_rtx (CLOBBER
, SImode
, to
);
2879 arm_gen_movstrqi (operands
)
2882 HOST_WIDE_INT in_words_to_go
, out_words_to_go
, last_bytes
;
2885 rtx st_src
, st_dst
, end_src
, end_dst
, fin_src
, fin_dst
;
2886 rtx part_bytes_reg
= NULL
;
2888 int dst_unchanging_p
, dst_in_struct_p
, src_unchanging_p
, src_in_struct_p
;
2889 extern int optimize
;
2891 if (GET_CODE (operands
[2]) != CONST_INT
2892 || GET_CODE (operands
[3]) != CONST_INT
2893 || INTVAL (operands
[2]) > 64
2894 || INTVAL (operands
[3]) & 3)
2897 st_dst
= XEXP (operands
[0], 0);
2898 st_src
= XEXP (operands
[1], 0);
2900 dst_unchanging_p
= RTX_UNCHANGING_P (operands
[0]);
2901 dst_in_struct_p
= MEM_IN_STRUCT_P (operands
[0]);
2902 src_unchanging_p
= RTX_UNCHANGING_P (operands
[1]);
2903 src_in_struct_p
= MEM_IN_STRUCT_P (operands
[1]);
2905 fin_dst
= dst
= copy_to_mode_reg (SImode
, st_dst
);
2906 fin_src
= src
= copy_to_mode_reg (SImode
, st_src
);
2908 in_words_to_go
= (INTVAL (operands
[2]) + 3) / 4;
2909 out_words_to_go
= INTVAL (operands
[2]) / 4;
2910 last_bytes
= INTVAL (operands
[2]) & 3;
2912 if (out_words_to_go
!= in_words_to_go
&& ((in_words_to_go
- 1) & 3) != 0)
2913 part_bytes_reg
= gen_rtx (REG
, SImode
, (in_words_to_go
- 1) & 3);
2915 for (i
= 0; in_words_to_go
>= 2; i
+=4)
2917 if (in_words_to_go
> 4)
2918 emit_insn (arm_gen_load_multiple (0, 4, src
, TRUE
, TRUE
,
2919 src_unchanging_p
, src_in_struct_p
));
2921 emit_insn (arm_gen_load_multiple (0, in_words_to_go
, src
, TRUE
,
2922 FALSE
, src_unchanging_p
,
2925 if (out_words_to_go
)
2927 if (out_words_to_go
> 4)
2928 emit_insn (arm_gen_store_multiple (0, 4, dst
, TRUE
, TRUE
,
2931 else if (out_words_to_go
!= 1)
2932 emit_insn (arm_gen_store_multiple (0, out_words_to_go
,
2940 mem
= gen_rtx (MEM
, SImode
, dst
);
2941 RTX_UNCHANGING_P (mem
) = dst_unchanging_p
;
2942 MEM_IN_STRUCT_P (mem
) = dst_in_struct_p
;
2943 emit_move_insn (mem
, gen_rtx (REG
, SImode
, 0));
2944 if (last_bytes
!= 0)
2945 emit_insn (gen_addsi3 (dst
, dst
, GEN_INT (4)));
2949 in_words_to_go
-= in_words_to_go
< 4 ? in_words_to_go
: 4;
2950 out_words_to_go
-= out_words_to_go
< 4 ? out_words_to_go
: 4;
2953 /* OUT_WORDS_TO_GO will be zero here if there are byte stores to do. */
2954 if (out_words_to_go
)
2958 mem
= gen_rtx (MEM
, SImode
, src
);
2959 RTX_UNCHANGING_P (mem
) = src_unchanging_p
;
2960 MEM_IN_STRUCT_P (mem
) = src_in_struct_p
;
2961 emit_move_insn (sreg
= gen_reg_rtx (SImode
), mem
);
2962 emit_move_insn (fin_src
= gen_reg_rtx (SImode
), plus_constant (src
, 4));
2964 mem
= gen_rtx (MEM
, SImode
, dst
);
2965 RTX_UNCHANGING_P (mem
) = dst_unchanging_p
;
2966 MEM_IN_STRUCT_P (mem
) = dst_in_struct_p
;
2967 emit_move_insn (mem
, sreg
);
2968 emit_move_insn (fin_dst
= gen_reg_rtx (SImode
), plus_constant (dst
, 4));
2971 if (in_words_to_go
) /* Sanity check */
2977 if (in_words_to_go
< 0)
2980 mem
= gen_rtx (MEM
, SImode
, src
);
2981 RTX_UNCHANGING_P (mem
) = src_unchanging_p
;
2982 MEM_IN_STRUCT_P (mem
) = src_in_struct_p
;
2983 part_bytes_reg
= copy_to_mode_reg (SImode
, mem
);
2986 if (BYTES_BIG_ENDIAN
&& last_bytes
)
2988 rtx tmp
= gen_reg_rtx (SImode
);
2990 if (part_bytes_reg
== NULL
)
2993 /* The bytes we want are in the top end of the word */
2994 emit_insn (gen_lshrsi3 (tmp
, part_bytes_reg
,
2995 GEN_INT (8 * (4 - last_bytes
))));
2996 part_bytes_reg
= tmp
;
3000 mem
= gen_rtx (MEM
, QImode
, plus_constant (dst
, last_bytes
- 1));
3001 RTX_UNCHANGING_P (mem
) = dst_unchanging_p
;
3002 MEM_IN_STRUCT_P (mem
) = dst_in_struct_p
;
3003 emit_move_insn (mem
, gen_rtx (SUBREG
, QImode
, part_bytes_reg
, 0));
3006 tmp
= gen_reg_rtx (SImode
);
3007 emit_insn (gen_lshrsi3 (tmp
, part_bytes_reg
, GEN_INT (8)));
3008 part_bytes_reg
= tmp
;
3017 if (part_bytes_reg
== NULL
)
3020 mem
= gen_rtx (MEM
, QImode
, dst
);
3021 RTX_UNCHANGING_P (mem
) = dst_unchanging_p
;
3022 MEM_IN_STRUCT_P (mem
) = dst_in_struct_p
;
3023 emit_move_insn (mem
, gen_rtx (SUBREG
, QImode
, part_bytes_reg
, 0));
3026 rtx tmp
= gen_reg_rtx (SImode
);
3028 emit_insn (gen_addsi3 (dst
, dst
, const1_rtx
));
3029 emit_insn (gen_lshrsi3 (tmp
, part_bytes_reg
, GEN_INT (8)));
3030 part_bytes_reg
= tmp
;
3038 /* Generate a memory reference for a half word, such that it will be loaded
3039 into the top 16 bits of the word. We can assume that the address is
3040 known to be alignable and of the form reg, or plus (reg, const). */
3042 gen_rotated_half_load (memref
)
3045 HOST_WIDE_INT offset
= 0;
3046 rtx base
= XEXP (memref
, 0);
3048 if (GET_CODE (base
) == PLUS
)
3050 offset
= INTVAL (XEXP (base
, 1));
3051 base
= XEXP (base
, 0);
3054 /* If we aren't allowed to generate unaligned addresses, then fail. */
3055 if (TARGET_SHORT_BY_BYTES
3056 && ((BYTES_BIG_ENDIAN
? 1 : 0) ^ ((offset
& 2) == 0)))
3059 base
= gen_rtx (MEM
, SImode
, plus_constant (base
, offset
& ~2));
3061 if ((BYTES_BIG_ENDIAN
? 1 : 0) ^ ((offset
& 2) == 2))
3064 return gen_rtx (ROTATE
, SImode
, base
, GEN_INT (16));
3067 static enum machine_mode
3068 select_dominance_cc_mode (op
, x
, y
, cond_or
)
3072 HOST_WIDE_INT cond_or
;
3074 enum rtx_code cond1
, cond2
;
3077 /* Currently we will probably get the wrong result if the individual
3078 comparisons are not simple. This also ensures that it is safe to
3079 reverse a comparison if necessary. */
3080 if ((arm_select_cc_mode (cond1
= GET_CODE (x
), XEXP (x
, 0), XEXP (x
, 1))
3082 || (arm_select_cc_mode (cond2
= GET_CODE (y
), XEXP (y
, 0), XEXP (y
, 1))
3087 cond1
= reverse_condition (cond1
);
3089 /* If the comparisons are not equal, and one doesn't dominate the other,
3090 then we can't do this. */
3092 && ! comparison_dominates_p (cond1
, cond2
)
3093 && (swapped
= 1, ! comparison_dominates_p (cond2
, cond1
)))
3098 enum rtx_code temp
= cond1
;
3106 if (cond2
== EQ
|| ! cond_or
)
3111 case LE
: return CC_DLEmode
;
3112 case LEU
: return CC_DLEUmode
;
3113 case GE
: return CC_DGEmode
;
3114 case GEU
: return CC_DGEUmode
;
3120 if (cond2
== LT
|| ! cond_or
)
3129 if (cond2
== GT
|| ! cond_or
)
3138 if (cond2
== LTU
|| ! cond_or
)
3147 if (cond2
== GTU
|| ! cond_or
)
3155 /* The remaining cases only occur when both comparisons are the
3177 arm_select_cc_mode (op
, x
, y
)
3182 /* All floating point compares return CCFP if it is an equality
3183 comparison, and CCFPE otherwise. */
3184 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_FLOAT
)
3185 return (op
== EQ
|| op
== NE
) ? CCFPmode
: CCFPEmode
;
3187 /* A compare with a shifted operand. Because of canonicalization, the
3188 comparison will have to be swapped when we emit the assembler. */
3189 if (GET_MODE (y
) == SImode
&& GET_CODE (y
) == REG
3190 && (GET_CODE (x
) == ASHIFT
|| GET_CODE (x
) == ASHIFTRT
3191 || GET_CODE (x
) == LSHIFTRT
|| GET_CODE (x
) == ROTATE
3192 || GET_CODE (x
) == ROTATERT
))
3195 /* This is a special case that is used by combine to allow a
3196 comparison of a shifted byte load to be split into a zero-extend
3197 followed by a comparison of the shifted integer (only valid for
3198 equalities and unsigned inequalities). */
3199 if (GET_MODE (x
) == SImode
3200 && GET_CODE (x
) == ASHIFT
3201 && GET_CODE (XEXP (x
, 1)) == CONST_INT
&& INTVAL (XEXP (x
, 1)) == 24
3202 && GET_CODE (XEXP (x
, 0)) == SUBREG
3203 && GET_CODE (SUBREG_REG (XEXP (x
, 0))) == MEM
3204 && GET_MODE (SUBREG_REG (XEXP (x
, 0))) == QImode
3205 && (op
== EQ
|| op
== NE
3206 || op
== GEU
|| op
== GTU
|| op
== LTU
|| op
== LEU
)
3207 && GET_CODE (y
) == CONST_INT
)
3210 /* An operation that sets the condition codes as a side-effect, the
3211 V flag is not set correctly, so we can only use comparisons where
3212 this doesn't matter. (For LT and GE we can use "mi" and "pl"
3214 if (GET_MODE (x
) == SImode
3216 && (op
== EQ
|| op
== NE
|| op
== LT
|| op
== GE
)
3217 && (GET_CODE (x
) == PLUS
|| GET_CODE (x
) == MINUS
3218 || GET_CODE (x
) == AND
|| GET_CODE (x
) == IOR
3219 || GET_CODE (x
) == XOR
|| GET_CODE (x
) == MULT
3220 || GET_CODE (x
) == NOT
|| GET_CODE (x
) == NEG
3221 || GET_CODE (x
) == LSHIFTRT
3222 || GET_CODE (x
) == ASHIFT
|| GET_CODE (x
) == ASHIFTRT
3223 || GET_CODE (x
) == ROTATERT
|| GET_CODE (x
) == ZERO_EXTRACT
))
3226 /* A construct for a conditional compare, if the false arm contains
3227 0, then both conditions must be true, otherwise either condition
3228 must be true. Not all conditions are possible, so CCmode is
3229 returned if it can't be done. */
3230 if (GET_CODE (x
) == IF_THEN_ELSE
3231 && (XEXP (x
, 2) == const0_rtx
3232 || XEXP (x
, 2) == const1_rtx
)
3233 && GET_RTX_CLASS (GET_CODE (XEXP (x
, 0))) == '<'
3234 && GET_RTX_CLASS (GET_CODE (XEXP (x
, 1))) == '<')
3235 return select_dominance_cc_mode (op
, XEXP (x
, 0), XEXP (x
, 1),
3236 INTVAL (XEXP (x
, 2)));
3238 if (GET_MODE (x
) == QImode
&& (op
== EQ
|| op
== NE
))
3241 if (GET_MODE (x
) == SImode
&& (op
== LTU
|| op
== GEU
)
3242 && GET_CODE (x
) == PLUS
3243 && (rtx_equal_p (XEXP (x
, 0), y
) || rtx_equal_p (XEXP (x
, 1), y
)))
3249 /* X and Y are two things to compare using CODE. Emit the compare insn and
3250 return the rtx for register 0 in the proper mode. FP means this is a
3251 floating point compare: I don't think that it is needed on the arm. */
3254 gen_compare_reg (code
, x
, y
, fp
)
3258 enum machine_mode mode
= SELECT_CC_MODE (code
, x
, y
);
3259 rtx cc_reg
= gen_rtx (REG
, mode
, 24);
3261 emit_insn (gen_rtx (SET
, VOIDmode
, cc_reg
,
3262 gen_rtx (COMPARE
, mode
, x
, y
)));
3268 arm_reload_in_hi (operands
)
3271 rtx base
= find_replacement (&XEXP (operands
[1], 0));
3273 emit_insn (gen_zero_extendqisi2 (operands
[2], gen_rtx (MEM
, QImode
, base
)));
3274 emit_insn (gen_zero_extendqisi2 (gen_rtx (SUBREG
, SImode
, operands
[0], 0),
3275 gen_rtx (MEM
, QImode
,
3276 plus_constant (base
, 1))));
3277 if (BYTES_BIG_ENDIAN
)
3278 emit_insn (gen_rtx (SET
, VOIDmode
, gen_rtx (SUBREG
, SImode
,
3280 gen_rtx (IOR
, SImode
,
3281 gen_rtx (ASHIFT
, SImode
,
3282 gen_rtx (SUBREG
, SImode
,
3287 emit_insn (gen_rtx (SET
, VOIDmode
, gen_rtx (SUBREG
, SImode
,
3289 gen_rtx (IOR
, SImode
,
3290 gen_rtx (ASHIFT
, SImode
,
3293 gen_rtx (SUBREG
, SImode
, operands
[0], 0))));
3297 arm_reload_out_hi (operands
)
3300 rtx base
= find_replacement (&XEXP (operands
[0], 0));
3302 if (BYTES_BIG_ENDIAN
)
3304 emit_insn (gen_movqi (gen_rtx (MEM
, QImode
, plus_constant (base
, 1)),
3305 gen_rtx (SUBREG
, QImode
, operands
[1], 0)));
3306 emit_insn (gen_lshrsi3 (operands
[2],
3307 gen_rtx (SUBREG
, SImode
, operands
[1], 0),
3309 emit_insn (gen_movqi (gen_rtx (MEM
, QImode
, base
),
3310 gen_rtx (SUBREG
, QImode
, operands
[2], 0)));
3314 emit_insn (gen_movqi (gen_rtx (MEM
, QImode
, base
),
3315 gen_rtx (SUBREG
, QImode
, operands
[1], 0)));
3316 emit_insn (gen_lshrsi3 (operands
[2],
3317 gen_rtx (SUBREG
, SImode
, operands
[1], 0),
3319 emit_insn (gen_movqi (gen_rtx (MEM
, QImode
, plus_constant (base
, 1)),
3320 gen_rtx (SUBREG
, QImode
, operands
[2], 0)));
3324 /* Routines for manipulation of the constant pool. */
3325 /* This is unashamedly hacked from the version in sh.c, since the problem is
3326 extremely similar. */
3328 /* Arm instructions cannot load a large constant into a register,
3329 constants have to come from a pc relative load. The reference of a pc
3330 relative load instruction must be less than 1k infront of the instruction.
3331 This means that we often have to dump a constant inside a function, and
3332 generate code to branch around it.
3334 It is important to minimize this, since the branches will slow things
3335 down and make things bigger.
3337 Worst case code looks like:
3353 We fix this by performing a scan before scheduling, which notices which
3354 instructions need to have their operands fetched from the constant table
3355 and builds the table.
3360 scan, find an instruction which needs a pcrel move. Look forward, find th
3361 last barrier which is within MAX_COUNT bytes of the requirement.
3362 If there isn't one, make one. Process all the instructions between
3363 the find and the barrier.
3365 In the above example, we can tell that L3 is within 1k of L1, so
3366 the first move can be shrunk from the 2 insn+constant sequence into
3367 just 1 insn, and the constant moved to L3 to make:
3378 Then the second move becomes the target for the shortening process.
3384 rtx value
; /* Value in table */
3385 HOST_WIDE_INT next_offset
;
3386 enum machine_mode mode
; /* Mode of value */
3389 /* The maximum number of constants that can fit into one pool, since
3390 the pc relative range is 0...1020 bytes and constants are at least 4
3393 #define MAX_POOL_SIZE (1020/4)
3394 static pool_node pool_vector
[MAX_POOL_SIZE
];
3395 static int pool_size
;
3396 static rtx pool_vector_label
;
3398 /* Add a constant to the pool and return its label. */
3399 static HOST_WIDE_INT
3400 add_constant (x
, mode
)
3402 enum machine_mode mode
;
3406 HOST_WIDE_INT offset
;
3408 if (mode
== SImode
&& GET_CODE (x
) == MEM
&& CONSTANT_P (XEXP (x
, 0))
3409 && CONSTANT_POOL_ADDRESS_P (XEXP (x
, 0)))
3410 x
= get_pool_constant (XEXP (x
, 0));
3411 #ifndef AOF_ASSEMBLER
3412 else if (GET_CODE (x
) == UNSPEC
&& XINT (x
, 1) == 3)
3413 x
= XVECEXP (x
, 0, 0);
3416 #ifdef AOF_ASSEMBLER
3417 /* PIC Symbol references need to be converted into offsets into the
3419 if (flag_pic
&& GET_CODE (x
) == SYMBOL_REF
)
3420 x
= aof_pic_entry (x
);
3421 #endif /* AOF_ASSEMBLER */
3423 /* First see if we've already got it */
3424 for (i
= 0; i
< pool_size
; i
++)
3426 if (GET_CODE (x
) == pool_vector
[i
].value
->code
3427 && mode
== pool_vector
[i
].mode
)
3429 if (GET_CODE (x
) == CODE_LABEL
)
3431 if (XINT (x
, 3) != XINT (pool_vector
[i
].value
, 3))
3434 if (rtx_equal_p (x
, pool_vector
[i
].value
))
3435 return pool_vector
[i
].next_offset
- GET_MODE_SIZE (mode
);
3439 /* Need a new one */
3440 pool_vector
[pool_size
].next_offset
= GET_MODE_SIZE (mode
);
3443 pool_vector_label
= gen_label_rtx ();
3445 pool_vector
[pool_size
].next_offset
3446 += (offset
= pool_vector
[pool_size
- 1].next_offset
);
3448 pool_vector
[pool_size
].value
= x
;
3449 pool_vector
[pool_size
].mode
= mode
;
3454 /* Output the literal table */
3461 scan
= emit_label_after (gen_label_rtx (), scan
);
3462 scan
= emit_insn_after (gen_align_4 (), scan
);
3463 scan
= emit_label_after (pool_vector_label
, scan
);
3465 for (i
= 0; i
< pool_size
; i
++)
3467 pool_node
*p
= pool_vector
+ i
;
3469 switch (GET_MODE_SIZE (p
->mode
))
3472 scan
= emit_insn_after (gen_consttable_4 (p
->value
), scan
);
3476 scan
= emit_insn_after (gen_consttable_8 (p
->value
), scan
);
3485 scan
= emit_insn_after (gen_consttable_end (), scan
);
3486 scan
= emit_barrier_after (scan
);
3490 /* Non zero if the src operand needs to be fixed up */
3492 fixit (src
, mode
, destreg
)
3494 enum machine_mode mode
;
3497 if (CONSTANT_P (src
))
3499 if (GET_CODE (src
) == CONST_INT
)
3500 return (! const_ok_for_arm (INTVAL (src
))
3501 && ! const_ok_for_arm (~INTVAL (src
)));
3502 if (GET_CODE (src
) == CONST_DOUBLE
)
3503 return (GET_MODE (src
) == VOIDmode
3505 || (! const_double_rtx_ok_for_fpu (src
)
3506 && ! neg_const_double_rtx_ok_for_fpu (src
)));
3507 return symbol_mentioned_p (src
);
3509 #ifndef AOF_ASSEMBLER
3510 else if (GET_CODE (src
) == UNSPEC
&& XINT (src
, 1) == 3)
3514 return (mode
== SImode
&& GET_CODE (src
) == MEM
3515 && GET_CODE (XEXP (src
, 0)) == SYMBOL_REF
3516 && CONSTANT_POOL_ADDRESS_P (XEXP (src
, 0)));
3519 /* Find the last barrier less than MAX_COUNT bytes from FROM, or create one. */
3521 find_barrier (from
, max_count
)
3526 rtx found_barrier
= 0;
3528 while (from
&& count
< max_count
)
3530 if (GET_CODE (from
) == BARRIER
)
3531 found_barrier
= from
;
3533 /* Count the length of this insn */
3534 if (GET_CODE (from
) == INSN
3535 && GET_CODE (PATTERN (from
)) == SET
3536 && CONSTANT_P (SET_SRC (PATTERN (from
)))
3537 && CONSTANT_POOL_ADDRESS_P (SET_SRC (PATTERN (from
))))
3539 rtx src
= SET_SRC (PATTERN (from
));
3543 count
+= get_attr_length (from
);
3545 from
= NEXT_INSN (from
);
3550 /* We didn't find a barrier in time to
3551 dump our stuff, so we'll make one */
3552 rtx label
= gen_label_rtx ();
3555 from
= PREV_INSN (from
);
3557 from
= get_last_insn ();
3559 /* Walk back to be just before any jump */
3560 while (GET_CODE (from
) == JUMP_INSN
3561 || GET_CODE (from
) == NOTE
3562 || GET_CODE (from
) == CODE_LABEL
)
3563 from
= PREV_INSN (from
);
3565 from
= emit_jump_insn_after (gen_jump (label
), from
);
3566 JUMP_LABEL (from
) = label
;
3567 found_barrier
= emit_barrier_after (from
);
3568 emit_label_after (label
, found_barrier
);
3569 return found_barrier
;
3572 return found_barrier
;
3575 /* Non zero if the insn is a move instruction which needs to be fixed. */
3580 if (!INSN_DELETED_P (insn
)
3581 && GET_CODE (insn
) == INSN
3582 && GET_CODE (PATTERN (insn
)) == SET
)
3584 rtx pat
= PATTERN (insn
);
3585 rtx src
= SET_SRC (pat
);
3586 rtx dst
= SET_DEST (pat
);
3588 enum machine_mode mode
= GET_MODE (dst
);
3592 if (GET_CODE (dst
) == REG
)
3593 destreg
= REGNO (dst
);
3594 else if (GET_CODE (dst
) == SUBREG
&& GET_CODE (SUBREG_REG (dst
)) == REG
)
3595 destreg
= REGNO (SUBREG_REG (dst
));
3597 return fixit (src
, mode
, destreg
);
3611 /* The ldr instruction can work with up to a 4k offset, and most constants
3612 will be loaded with one of these instructions; however, the adr
3613 instruction and the ldf instructions only work with a 1k offset. This
3614 code needs to be rewritten to use the 4k offset when possible, and to
3615 adjust when a 1k offset is needed. For now we just use a 1k offset
3619 /* Floating point operands can't work further than 1024 bytes from the
3620 PC, so to make things simple we restrict all loads for such functions.
3622 if (TARGET_HARD_FLOAT
)
3623 for (regno
= 16; regno
< 24; regno
++)
3624 if (regs_ever_live
[regno
])
3633 for (insn
= first
; insn
; insn
= NEXT_INSN (insn
))
3635 if (broken_move (insn
))
3637 /* This is a broken move instruction, scan ahead looking for
3638 a barrier to stick the constant table behind */
3640 rtx barrier
= find_barrier (insn
, count_size
);
3642 /* Now find all the moves between the points and modify them */
3643 for (scan
= insn
; scan
!= barrier
; scan
= NEXT_INSN (scan
))
3645 if (broken_move (scan
))
3647 /* This is a broken move instruction, add it to the pool */
3648 rtx pat
= PATTERN (scan
);
3649 rtx src
= SET_SRC (pat
);
3650 rtx dst
= SET_DEST (pat
);
3651 enum machine_mode mode
= GET_MODE (dst
);
3652 HOST_WIDE_INT offset
;
3658 /* If this is an HImode constant load, convert it into
3659 an SImode constant load. Since the register is always
3660 32 bits this is safe. We have to do this, since the
3661 load pc-relative instruction only does a 32-bit load. */
3665 if (GET_CODE (dst
) != REG
)
3667 PUT_MODE (dst
, SImode
);
3670 offset
= add_constant (src
, mode
);
3671 addr
= plus_constant (gen_rtx (LABEL_REF
, VOIDmode
,
3675 /* For wide moves to integer regs we need to split the
3676 address calculation off into a separate insn, so that
3677 the load can then be done with a load-multiple. This is
3678 safe, since we have already noted the length of such
3679 insns to be 8, and we are immediately over-writing the
3680 scratch we have grabbed with the final result. */
3681 if (GET_MODE_SIZE (mode
) > 4
3682 && (scratch
= REGNO (dst
)) < 16)
3684 rtx reg
= gen_rtx (REG
, SImode
, scratch
);
3685 newinsn
= emit_insn_after (gen_movaddr (reg
, addr
),
3690 newsrc
= gen_rtx (MEM
, mode
, addr
);
3692 /* Build a jump insn wrapper around the move instead
3693 of an ordinary insn, because we want to have room for
3694 the target label rtx in fld[7], which an ordinary
3695 insn doesn't have. */
3696 newinsn
= emit_jump_insn_after (gen_rtx (SET
, VOIDmode
,
3699 JUMP_LABEL (newinsn
) = pool_vector_label
;
3701 /* But it's still an ordinary insn */
3702 PUT_CODE (newinsn
, INSN
);
3709 dump_table (barrier
);
3716 /* Routines to output assembly language. */
3718 /* If the rtx is the correct value then return the string of the number.
3719 In this way we can ensure that valid double constants are generated even
3720 when cross compiling. */
3722 fp_immediate_constant (x
)
3728 if (!fpa_consts_inited
)
3731 REAL_VALUE_FROM_CONST_DOUBLE (r
, x
);
3732 for (i
= 0; i
< 8; i
++)
3733 if (REAL_VALUES_EQUAL (r
, values_fpa
[i
]))
3734 return strings_fpa
[i
];
3739 /* As for fp_immediate_constant, but value is passed directly, not in rtx. */
3741 fp_const_from_val (r
)
3746 if (! fpa_consts_inited
)
3749 for (i
= 0; i
< 8; i
++)
3750 if (REAL_VALUES_EQUAL (*r
, values_fpa
[i
]))
3751 return strings_fpa
[i
];
3756 /* Output the operands of a LDM/STM instruction to STREAM.
3757 MASK is the ARM register set mask of which only bits 0-15 are important.
3758 INSTR is the possibly suffixed base register. HAT unequals zero if a hat
3759 must follow the register list. */
3762 print_multi_reg (stream
, instr
, mask
, hat
)
3768 int not_first
= FALSE
;
3770 fputc ('\t', stream
);
3771 fprintf (stream
, instr
, REGISTER_PREFIX
);
3772 fputs (", {", stream
);
3773 for (i
= 0; i
< 16; i
++)
3774 if (mask
& (1 << i
))
3777 fprintf (stream
, ", ");
3778 fprintf (stream
, "%s%s", REGISTER_PREFIX
, reg_names
[i
]);
3782 fprintf (stream
, "}%s\n", hat
? "^" : "");
3785 /* Output a 'call' insn. */
3788 output_call (operands
)
3791 /* Handle calls to lr using ip (which may be clobbered in subr anyway). */
3793 if (REGNO (operands
[0]) == 14)
3795 operands
[0] = gen_rtx (REG
, SImode
, 12);
3796 output_asm_insn ("mov%?\t%0, %|lr", operands
);
3798 output_asm_insn ("mov%?\t%|lr, %|pc", operands
);
3799 output_asm_insn ("mov%?\t%|pc, %0", operands
);
3807 int something_changed
= 0;
3809 int code
= GET_CODE (x0
);
3816 if (REGNO (x0
) == 14)
3818 *x
= gen_rtx (REG
, SImode
, 12);
3823 /* Scan through the sub-elements and change any references there */
3824 fmt
= GET_RTX_FORMAT (code
);
3825 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3827 something_changed
|= eliminate_lr2ip (&XEXP (x0
, i
));
3828 else if (fmt
[i
] == 'E')
3829 for (j
= 0; j
< XVECLEN (x0
, i
); j
++)
3830 something_changed
|= eliminate_lr2ip (&XVECEXP (x0
, i
, j
));
3831 return something_changed
;
3835 /* Output a 'call' insn that is a reference in memory. */
3838 output_call_mem (operands
)
3841 operands
[0] = copy_rtx (operands
[0]); /* Be ultra careful */
3842 /* Handle calls using lr by using ip (which may be clobbered in subr anyway).
3844 if (eliminate_lr2ip (&operands
[0]))
3845 output_asm_insn ("mov%?\t%|ip, %|lr", operands
);
3847 output_asm_insn ("mov%?\t%|lr, %|pc", operands
);
3848 output_asm_insn ("ldr%?\t%|pc, %0", operands
);
3853 /* Output a move from arm registers to an fpu registers.
3854 OPERANDS[0] is an fpu register.
3855 OPERANDS[1] is the first registers of an arm register pair. */
3858 output_mov_long_double_fpu_from_arm (operands
)
3861 int arm_reg0
= REGNO (operands
[1]);
3867 ops
[0] = gen_rtx (REG
, SImode
, arm_reg0
);
3868 ops
[1] = gen_rtx (REG
, SImode
, 1 + arm_reg0
);
3869 ops
[2] = gen_rtx (REG
, SImode
, 2 + arm_reg0
);
3871 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1, %2}", ops
);
3872 output_asm_insn ("ldf%?e\t%0, [%|sp], #12", operands
);
3876 /* Output a move from an fpu register to arm registers.
3877 OPERANDS[0] is the first registers of an arm register pair.
3878 OPERANDS[1] is an fpu register. */
3881 output_mov_long_double_arm_from_fpu (operands
)
3884 int arm_reg0
= REGNO (operands
[0]);
3890 ops
[0] = gen_rtx (REG
, SImode
, arm_reg0
);
3891 ops
[1] = gen_rtx (REG
, SImode
, 1 + arm_reg0
);
3892 ops
[2] = gen_rtx (REG
, SImode
, 2 + arm_reg0
);
3894 output_asm_insn ("stf%?e\t%1, [%|sp, #-12]!", operands
);
3895 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1, %2}", ops
);
3899 /* Output a move from arm registers to arm registers of a long double
3900 OPERANDS[0] is the destination.
3901 OPERANDS[1] is the source. */
3903 output_mov_long_double_arm_from_arm (operands
)
3906 /* We have to be careful here because the two might overlap */
3907 int dest_start
= REGNO (operands
[0]);
3908 int src_start
= REGNO (operands
[1]);
3912 if (dest_start
< src_start
)
3914 for (i
= 0; i
< 3; i
++)
3916 ops
[0] = gen_rtx (REG
, SImode
, dest_start
+ i
);
3917 ops
[1] = gen_rtx (REG
, SImode
, src_start
+ i
);
3918 output_asm_insn ("mov%?\t%0, %1", ops
);
3923 for (i
= 2; i
>= 0; i
--)
3925 ops
[0] = gen_rtx (REG
, SImode
, dest_start
+ i
);
3926 ops
[1] = gen_rtx (REG
, SImode
, src_start
+ i
);
3927 output_asm_insn ("mov%?\t%0, %1", ops
);
3935 /* Output a move from arm registers to an fpu registers.
3936 OPERANDS[0] is an fpu register.
3937 OPERANDS[1] is the first registers of an arm register pair. */
3940 output_mov_double_fpu_from_arm (operands
)
3943 int arm_reg0
= REGNO (operands
[1]);
3948 ops
[0] = gen_rtx (REG
, SImode
, arm_reg0
);
3949 ops
[1] = gen_rtx (REG
, SImode
, 1 + arm_reg0
);
3950 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1}", ops
);
3951 output_asm_insn ("ldf%?d\t%0, [%|sp], #8", operands
);
3955 /* Output a move from an fpu register to arm registers.
3956 OPERANDS[0] is the first registers of an arm register pair.
3957 OPERANDS[1] is an fpu register. */
3960 output_mov_double_arm_from_fpu (operands
)
3963 int arm_reg0
= REGNO (operands
[0]);
3969 ops
[0] = gen_rtx (REG
, SImode
, arm_reg0
);
3970 ops
[1] = gen_rtx (REG
, SImode
, 1 + arm_reg0
);
3971 output_asm_insn ("stf%?d\t%1, [%|sp, #-8]!", operands
);
3972 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1}", ops
);
3976 /* Output a move between double words.
3977 It must be REG<-REG, REG<-CONST_DOUBLE, REG<-CONST_INT, REG<-MEM
3978 or MEM<-REG and all MEMs must be offsettable addresses. */
3981 output_move_double (operands
)
3984 enum rtx_code code0
= GET_CODE (operands
[0]);
3985 enum rtx_code code1
= GET_CODE (operands
[1]);
3990 int reg0
= REGNO (operands
[0]);
3992 otherops
[0] = gen_rtx (REG
, SImode
, 1 + reg0
);
3995 int reg1
= REGNO (operands
[1]);
3999 /* Ensure the second source is not overwritten */
4000 if (reg1
== reg0
+ (WORDS_BIG_ENDIAN
? -1 : 1))
4001 output_asm_insn("mov%?\t%Q0, %Q1\n\tmov%?\t%R0, %R1", operands
);
4003 output_asm_insn("mov%?\t%R0, %R1\n\tmov%?\t%Q0, %Q1", operands
);
4005 else if (code1
== CONST_DOUBLE
)
4007 if (GET_MODE (operands
[1]) == DFmode
)
4010 union real_extract u
;
4012 bcopy ((char *) &CONST_DOUBLE_LOW (operands
[1]), (char *) &u
,
4014 REAL_VALUE_TO_TARGET_DOUBLE (u
.d
, l
);
4015 otherops
[1] = GEN_INT(l
[1]);
4016 operands
[1] = GEN_INT(l
[0]);
4018 else if (GET_MODE (operands
[1]) != VOIDmode
)
4020 else if (WORDS_BIG_ENDIAN
)
4023 otherops
[1] = GEN_INT (CONST_DOUBLE_LOW (operands
[1]));
4024 operands
[1] = GEN_INT (CONST_DOUBLE_HIGH (operands
[1]));
4029 otherops
[1] = GEN_INT (CONST_DOUBLE_HIGH (operands
[1]));
4030 operands
[1] = GEN_INT (CONST_DOUBLE_LOW (operands
[1]));
4032 output_mov_immediate (operands
);
4033 output_mov_immediate (otherops
);
4035 else if (code1
== CONST_INT
)
4037 #if HOST_BITS_PER_WIDE_INT > 32
4038 /* If HOST_WIDE_INT is more than 32 bits, the intval tells us
4039 what the upper word is. */
4040 if (WORDS_BIG_ENDIAN
)
4042 otherops
[1] = GEN_INT (ARM_SIGN_EXTEND (INTVAL (operands
[1])));
4043 operands
[1] = GEN_INT (INTVAL (operands
[1]) >> 32);
4047 otherops
[1] = GEN_INT (INTVAL (operands
[1]) >> 32);
4048 operands
[1] = GEN_INT (ARM_SIGN_EXTEND (INTVAL (operands
[1])));
4051 /* Sign extend the intval into the high-order word */
4052 if (WORDS_BIG_ENDIAN
)
4054 otherops
[1] = operands
[1];
4055 operands
[1] = (INTVAL (operands
[1]) < 0
4056 ? constm1_rtx
: const0_rtx
);
4059 otherops
[1] = INTVAL (operands
[1]) < 0 ? constm1_rtx
: const0_rtx
;
4061 output_mov_immediate (otherops
);
4062 output_mov_immediate (operands
);
4064 else if (code1
== MEM
)
4066 switch (GET_CODE (XEXP (operands
[1], 0)))
4069 output_asm_insn ("ldm%?ia\t%m1, %M0", operands
);
4073 abort (); /* Should never happen now */
4077 output_asm_insn ("ldm%?db\t%m1!, %M0", operands
);
4081 output_asm_insn ("ldm%?ia\t%m1!, %M0", operands
);
4085 abort (); /* Should never happen now */
4090 output_asm_insn ("adr%?\t%0, %1", operands
);
4091 output_asm_insn ("ldm%?ia\t%0, %M0", operands
);
4095 if (arm_add_operand (XEXP (XEXP (operands
[1], 0), 1)))
4097 otherops
[0] = operands
[0];
4098 otherops
[1] = XEXP (XEXP (operands
[1], 0), 0);
4099 otherops
[2] = XEXP (XEXP (operands
[1], 0), 1);
4100 if (GET_CODE (XEXP (operands
[1], 0)) == PLUS
)
4102 if (GET_CODE (otherops
[2]) == CONST_INT
)
4104 switch (INTVAL (otherops
[2]))
4107 output_asm_insn ("ldm%?db\t%1, %M0", otherops
);
4110 output_asm_insn ("ldm%?da\t%1, %M0", otherops
);
4113 output_asm_insn ("ldm%?ib\t%1, %M0", otherops
);
4116 if (!(const_ok_for_arm (INTVAL (otherops
[2]))))
4117 output_asm_insn ("sub%?\t%0, %1, #%n2", otherops
);
4119 output_asm_insn ("add%?\t%0, %1, %2", otherops
);
4122 output_asm_insn ("add%?\t%0, %1, %2", otherops
);
4125 output_asm_insn ("sub%?\t%0, %1, %2", otherops
);
4126 return "ldm%?ia\t%0, %M0";
4130 otherops
[1] = adj_offsettable_operand (operands
[1], 4);
4131 /* Take care of overlapping base/data reg. */
4132 if (reg_mentioned_p (operands
[0], operands
[1]))
4134 output_asm_insn ("ldr%?\t%0, %1", otherops
);
4135 output_asm_insn ("ldr%?\t%0, %1", operands
);
4139 output_asm_insn ("ldr%?\t%0, %1", operands
);
4140 output_asm_insn ("ldr%?\t%0, %1", otherops
);
4146 abort(); /* Constraints should prevent this */
4148 else if (code0
== MEM
&& code1
== REG
)
4150 if (REGNO (operands
[1]) == 12)
4153 switch (GET_CODE (XEXP (operands
[0], 0)))
4156 output_asm_insn ("stm%?ia\t%m0, %M1", operands
);
4160 abort (); /* Should never happen now */
4164 output_asm_insn ("stm%?db\t%m0!, %M1", operands
);
4168 output_asm_insn ("stm%?ia\t%m0!, %M1", operands
);
4172 abort (); /* Should never happen now */
4176 if (GET_CODE (XEXP (XEXP (operands
[0], 0), 1)) == CONST_INT
)
4178 switch (INTVAL (XEXP (XEXP (operands
[0], 0), 1)))
4181 output_asm_insn ("stm%?db\t%m0, %M1", operands
);
4185 output_asm_insn ("stm%?da\t%m0, %M1", operands
);
4189 output_asm_insn ("stm%?ib\t%m0, %M1", operands
);
4196 otherops
[0] = adj_offsettable_operand (operands
[0], 4);
4197 otherops
[1] = gen_rtx (REG
, SImode
, 1 + REGNO (operands
[1]));
4198 output_asm_insn ("str%?\t%1, %0", operands
);
4199 output_asm_insn ("str%?\t%1, %0", otherops
);
4203 abort(); /* Constraints should prevent this */
4209 /* Output an arbitrary MOV reg, #n.
4210 OPERANDS[0] is a register. OPERANDS[1] is a const_int. */
4213 output_mov_immediate (operands
)
4216 HOST_WIDE_INT n
= INTVAL (operands
[1]);
4220 /* Try to use one MOV */
4221 if (const_ok_for_arm (n
))
4223 output_asm_insn ("mov%?\t%0, %1", operands
);
4227 /* Try to use one MVN */
4228 if (const_ok_for_arm (~n
))
4230 operands
[1] = GEN_INT (~n
);
4231 output_asm_insn ("mvn%?\t%0, %1", operands
);
4235 /* If all else fails, make it out of ORRs or BICs as appropriate. */
4237 for (i
=0; i
< 32; i
++)
4241 if (n_ones
> 16) /* Shorter to use MVN with BIC in this case. */
4242 output_multi_immediate(operands
, "mvn%?\t%0, %1", "bic%?\t%0, %0, %1", 1,
4245 output_multi_immediate(operands
, "mov%?\t%0, %1", "orr%?\t%0, %0, %1", 1,
4252 /* Output an ADD r, s, #n where n may be too big for one instruction. If
4253 adding zero to one register, output nothing. */
4256 output_add_immediate (operands
)
4259 HOST_WIDE_INT n
= INTVAL (operands
[2]);
4261 if (n
!= 0 || REGNO (operands
[0]) != REGNO (operands
[1]))
4264 output_multi_immediate (operands
,
4265 "sub%?\t%0, %1, %2", "sub%?\t%0, %0, %2", 2,
4268 output_multi_immediate (operands
,
4269 "add%?\t%0, %1, %2", "add%?\t%0, %0, %2", 2,
4276 /* Output a multiple immediate operation.
4277 OPERANDS is the vector of operands referred to in the output patterns.
4278 INSTR1 is the output pattern to use for the first constant.
4279 INSTR2 is the output pattern to use for subsequent constants.
4280 IMMED_OP is the index of the constant slot in OPERANDS.
4281 N is the constant value. */
4284 output_multi_immediate (operands
, instr1
, instr2
, immed_op
, n
)
4286 char *instr1
, *instr2
;
4290 #if HOST_BITS_PER_WIDE_INT > 32
4296 operands
[immed_op
] = const0_rtx
;
4297 output_asm_insn (instr1
, operands
); /* Quick and easy output */
4302 char *instr
= instr1
;
4304 /* Note that n is never zero here (which would give no output) */
4305 for (i
= 0; i
< 32; i
+= 2)
4309 operands
[immed_op
] = GEN_INT (n
& (255 << i
));
4310 output_asm_insn (instr
, operands
);
4320 /* Return the appropriate ARM instruction for the operation code.
4321 The returned result should not be overwritten. OP is the rtx of the
4322 operation. SHIFT_FIRST_ARG is TRUE if the first argument of the operator
4326 arithmetic_instr (op
, shift_first_arg
)
4328 int shift_first_arg
;
4330 switch (GET_CODE (op
))
4336 return shift_first_arg
? "rsb" : "sub";
4353 /* Ensure valid constant shifts and return the appropriate shift mnemonic
4354 for the operation code. The returned result should not be overwritten.
4355 OP is the rtx code of the shift.
4356 On exit, *AMOUNTP will be -1 if the shift is by a register, or a constant
4360 shift_op (op
, amountp
)
4362 HOST_WIDE_INT
*amountp
;
4365 enum rtx_code code
= GET_CODE (op
);
4367 if (GET_CODE (XEXP (op
, 1)) == REG
|| GET_CODE (XEXP (op
, 1)) == SUBREG
)
4369 else if (GET_CODE (XEXP (op
, 1)) == CONST_INT
)
4370 *amountp
= INTVAL (XEXP (op
, 1));
4393 /* We never have to worry about the amount being other than a
4394 power of 2, since this case can never be reloaded from a reg. */
4396 *amountp
= int_log2 (*amountp
);
4407 /* This is not 100% correct, but follows from the desire to merge
4408 multiplication by a power of 2 with the recognizer for a
4409 shift. >=32 is not a valid shift for "asl", so we must try and
4410 output a shift that produces the correct arithmetical result.
4411 Using lsr #32 is identical except for the fact that the carry bit
4412 is not set correctly if we set the flags; but we never use the
4413 carry bit from such an operation, so we can ignore that. */
4414 if (code
== ROTATERT
)
4415 *amountp
&= 31; /* Rotate is just modulo 32 */
4416 else if (*amountp
!= (*amountp
& 31))
4423 /* Shifts of 0 are no-ops. */
4432 /* Obtain the shift from the POWER of two. */
4434 static HOST_WIDE_INT
4436 HOST_WIDE_INT power
;
4438 HOST_WIDE_INT shift
= 0;
4440 while (((((HOST_WIDE_INT
) 1) << shift
) & power
) == 0)
4450 /* Output a .ascii pseudo-op, keeping track of lengths. This is because
4451 /bin/as is horribly restrictive. */
4454 output_ascii_pseudo_op (stream
, p
, len
)
4460 int len_so_far
= 1000;
4461 int chars_so_far
= 0;
4463 for (i
= 0; i
< len
; i
++)
4465 register int c
= p
[i
];
4467 if (len_so_far
> 50)
4470 fputs ("\"\n", stream
);
4471 fputs ("\t.ascii\t\"", stream
);
4476 if (c
== '\"' || c
== '\\')
4482 if (c
>= ' ' && c
< 0177)
4489 fprintf (stream
, "\\%03o", c
);
4496 fputs ("\"\n", stream
);
4500 /* Try to determine whether a pattern really clobbers the link register.
4501 This information is useful when peepholing, so that lr need not be pushed
4502 if we combine a call followed by a return.
4503 NOTE: This code does not check for side-effect expressions in a SET_SRC:
4504 such a check should not be needed because these only update an existing
4505 value within a register; the register must still be set elsewhere within
4509 pattern_really_clobbers_lr (x
)
4514 switch (GET_CODE (x
))
4517 switch (GET_CODE (SET_DEST (x
)))
4520 return REGNO (SET_DEST (x
)) == 14;
4523 if (GET_CODE (XEXP (SET_DEST (x
), 0)) == REG
)
4524 return REGNO (XEXP (SET_DEST (x
), 0)) == 14;
4526 if (GET_CODE (XEXP (SET_DEST (x
), 0)) == MEM
)
4535 for (i
= 0; i
< XVECLEN (x
, 0); i
++)
4536 if (pattern_really_clobbers_lr (XVECEXP (x
, 0, i
)))
4541 switch (GET_CODE (XEXP (x
, 0)))
4544 return REGNO (XEXP (x
, 0)) == 14;
4547 if (GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
)
4548 return REGNO (XEXP (XEXP (x
, 0), 0)) == 14;
4564 function_really_clobbers_lr (first
)
4569 for (insn
= first
; insn
; insn
= next_nonnote_insn (insn
))
4571 switch (GET_CODE (insn
))
4576 case JUMP_INSN
: /* Jump insns only change the PC (and conds) */
4581 if (pattern_really_clobbers_lr (PATTERN (insn
)))
4586 /* Don't yet know how to handle those calls that are not to a
4588 if (GET_CODE (PATTERN (insn
)) != PARALLEL
)
4591 switch (GET_CODE (XVECEXP (PATTERN (insn
), 0, 0)))
4594 if (GET_CODE (XEXP (XEXP (XVECEXP (PATTERN (insn
), 0, 0), 0), 0))
4600 if (GET_CODE (XEXP (XEXP (SET_SRC (XVECEXP (PATTERN (insn
),
4606 default: /* Don't recognize it, be safe */
4610 /* A call can be made (by peepholing) not to clobber lr iff it is
4611 followed by a return. There may, however, be a use insn iff
4612 we are returning the result of the call.
4613 If we run off the end of the insn chain, then that means the
4614 call was at the end of the function. Unfortunately we don't
4615 have a return insn for the peephole to recognize, so we
4616 must reject this. (Can this be fixed by adding our own insn?) */
4617 if ((next
= next_nonnote_insn (insn
)) == NULL
)
4620 /* No need to worry about lr if the call never returns */
4621 if (GET_CODE (next
) == BARRIER
)
4624 if (GET_CODE (next
) == INSN
&& GET_CODE (PATTERN (next
)) == USE
4625 && (GET_CODE (XVECEXP (PATTERN (insn
), 0, 0)) == SET
)
4626 && (REGNO (SET_DEST (XVECEXP (PATTERN (insn
), 0, 0)))
4627 == REGNO (XEXP (PATTERN (next
), 0))))
4628 if ((next
= next_nonnote_insn (next
)) == NULL
)
4631 if (GET_CODE (next
) == JUMP_INSN
4632 && GET_CODE (PATTERN (next
)) == RETURN
)
4641 /* We have reached the end of the chain so lr was _not_ clobbered */
4646 output_return_instruction (operand
, really_return
, reverse
)
4652 int reg
, live_regs
= 0;
4653 int volatile_func
= (optimize
> 0
4654 && TREE_THIS_VOLATILE (current_function_decl
));
4656 return_used_this_function
= 1;
4661 /* If this function was declared non-returning, and we have found a tail
4662 call, then we have to trust that the called function won't return. */
4663 if (! really_return
)
4666 /* Otherwise, trap an attempted return by aborting. */
4668 ops
[1] = gen_rtx (SYMBOL_REF
, Pmode
, "abort");
4669 assemble_external_libcall (ops
[1]);
4670 output_asm_insn (reverse
? "bl%D0\t%a1" : "bl%d0\t%a1", ops
);
4674 if (current_function_calls_alloca
&& ! really_return
)
4677 for (reg
= 0; reg
<= 10; reg
++)
4678 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
4681 if (live_regs
|| (regs_ever_live
[14] && ! lr_save_eliminated
))
4684 if (frame_pointer_needed
)
4689 if (lr_save_eliminated
|| ! regs_ever_live
[14])
4692 if (frame_pointer_needed
)
4694 reverse
? "ldm%?%D0ea\t%|fp, {" : "ldm%?%d0ea\t%|fp, {");
4697 reverse
? "ldm%?%D0fd\t%|sp!, {" : "ldm%?%d0fd\t%|sp!, {");
4699 for (reg
= 0; reg
<= 10; reg
++)
4700 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
4702 strcat (instr
, "%|");
4703 strcat (instr
, reg_names
[reg
]);
4705 strcat (instr
, ", ");
4708 if (frame_pointer_needed
)
4710 strcat (instr
, "%|");
4711 strcat (instr
, reg_names
[11]);
4712 strcat (instr
, ", ");
4713 strcat (instr
, "%|");
4714 strcat (instr
, reg_names
[13]);
4715 strcat (instr
, ", ");
4716 strcat (instr
, "%|");
4717 strcat (instr
, really_return
? reg_names
[15] : reg_names
[14]);
4721 strcat (instr
, "%|");
4722 strcat (instr
, really_return
? reg_names
[15] : reg_names
[14]);
4724 strcat (instr
, (TARGET_APCS_32
|| !really_return
) ? "}" : "}^");
4725 output_asm_insn (instr
, &operand
);
4727 else if (really_return
)
4729 if (TARGET_THUMB_INTERWORK
)
4730 sprintf (instr
, "bx%%?%%%s\t%%|lr", reverse
? "D" : "d");
4732 sprintf (instr
, "mov%%?%%%s0%s\t%%|pc, %%|lr",
4733 reverse
? "D" : "d", TARGET_APCS_32
? "" : "s");
4734 output_asm_insn (instr
, &operand
);
4740 /* Return nonzero if optimizing and the current function is volatile.
4741 Such functions never return, and many memory cycles can be saved
4742 by not storing register values that will never be needed again.
4743 This optimization was added to speed up context switching in a
4744 kernel application. */
4747 arm_volatile_func ()
4749 return (optimize
> 0 && TREE_THIS_VOLATILE (current_function_decl
));
4752 /* The amount of stack adjustment that happens here, in output_return and in
4753 output_epilogue must be exactly the same as was calculated during reload,
4754 or things will point to the wrong place. The only time we can safely
4755 ignore this constraint is when a function has no arguments on the stack,
4756 no stack frame requirement and no live registers execpt for `lr'. If we
4757 can guarantee that by making all function calls into tail calls and that
4758 lr is not clobbered in any other way, then there is no need to push lr
4762 output_func_prologue (f
, frame_size
)
4766 int reg
, live_regs_mask
= 0;
4768 int volatile_func
= (optimize
> 0
4769 && TREE_THIS_VOLATILE (current_function_decl
));
4771 /* Nonzero if we must stuff some register arguments onto the stack as if
4772 they were passed there. */
4773 int store_arg_regs
= 0;
4775 if (arm_ccfsm_state
|| arm_target_insn
)
4776 abort (); /* Sanity check */
4778 if (arm_naked_function_p (current_function_decl
))
4781 return_used_this_function
= 0;
4782 lr_save_eliminated
= 0;
4784 fprintf (f
, "\t%s args = %d, pretend = %d, frame = %d\n",
4785 ASM_COMMENT_START
, current_function_args_size
,
4786 current_function_pretend_args_size
, frame_size
);
4787 fprintf (f
, "\t%s frame_needed = %d, current_function_anonymous_args = %d\n",
4788 ASM_COMMENT_START
, frame_pointer_needed
,
4789 current_function_anonymous_args
);
4792 fprintf (f
, "\t%s Volatile function.\n", ASM_COMMENT_START
);
4794 if (current_function_anonymous_args
&& current_function_pretend_args_size
)
4797 for (reg
= 0; reg
<= 10; reg
++)
4798 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
4799 live_regs_mask
|= (1 << reg
);
4801 if (frame_pointer_needed
)
4802 live_regs_mask
|= 0xD800;
4803 else if (regs_ever_live
[14])
4805 if (! current_function_args_size
4806 && ! function_really_clobbers_lr (get_insns ()))
4807 lr_save_eliminated
= 1;
4809 live_regs_mask
|= 0x4000;
4814 /* if a di mode load/store multiple is used, and the base register
4815 is r3, then r4 can become an ever live register without lr
4816 doing so, in this case we need to push lr as well, or we
4817 will fail to get a proper return. */
4819 live_regs_mask
|= 0x4000;
4820 lr_save_eliminated
= 0;
4824 if (lr_save_eliminated
)
4825 fprintf (f
,"\t%s I don't think this function clobbers lr\n",
4828 #ifdef AOF_ASSEMBLER
4830 fprintf (f
, "\tmov\t%sip, %s%s\n", REGISTER_PREFIX
, REGISTER_PREFIX
,
4831 reg_names
[PIC_OFFSET_TABLE_REGNUM
]);
4837 output_func_epilogue (f
, frame_size
)
4841 int reg
, live_regs_mask
= 0;
4842 /* If we need this then it will always be at least this much */
4843 int floats_offset
= 12;
4845 int volatile_func
= (optimize
> 0
4846 && TREE_THIS_VOLATILE (current_function_decl
));
4848 if (use_return_insn() && return_used_this_function
)
4850 if ((frame_size
+ current_function_outgoing_args_size
) != 0
4851 && !(frame_pointer_needed
|| TARGET_APCS
))
4856 /* Naked functions don't have epilogues. */
4857 if (arm_naked_function_p (current_function_decl
))
4860 /* A volatile function should never return. Call abort. */
4863 rtx op
= gen_rtx (SYMBOL_REF
, Pmode
, "abort");
4864 assemble_external_libcall (op
);
4865 output_asm_insn ("bl\t%a0", &op
);
4869 for (reg
= 0; reg
<= 10; reg
++)
4870 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
4872 live_regs_mask
|= (1 << reg
);
4876 if (frame_pointer_needed
)
4878 if (arm_fpu_arch
== FP_SOFT2
)
4880 for (reg
= 23; reg
> 15; reg
--)
4881 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
4883 floats_offset
+= 12;
4884 fprintf (f
, "\tldfe\t%s%s, [%sfp, #-%d]\n", REGISTER_PREFIX
,
4885 reg_names
[reg
], REGISTER_PREFIX
, floats_offset
);
4892 for (reg
= 23; reg
> 15; reg
--)
4894 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
4896 floats_offset
+= 12;
4897 /* We can't unstack more than four registers at once */
4898 if (start_reg
- reg
== 3)
4900 fprintf (f
, "\tlfm\t%s%s, 4, [%sfp, #-%d]\n",
4901 REGISTER_PREFIX
, reg_names
[reg
],
4902 REGISTER_PREFIX
, floats_offset
);
4903 start_reg
= reg
- 1;
4908 if (reg
!= start_reg
)
4909 fprintf (f
, "\tlfm\t%s%s, %d, [%sfp, #-%d]\n",
4910 REGISTER_PREFIX
, reg_names
[reg
+ 1],
4911 start_reg
- reg
, REGISTER_PREFIX
, floats_offset
);
4913 start_reg
= reg
- 1;
4917 /* Just in case the last register checked also needs unstacking. */
4918 if (reg
!= start_reg
)
4919 fprintf (f
, "\tlfm\t%s%s, %d, [%sfp, #-%d]\n",
4920 REGISTER_PREFIX
, reg_names
[reg
+ 1],
4921 start_reg
- reg
, REGISTER_PREFIX
, floats_offset
);
4924 if (TARGET_THUMB_INTERWORK
)
4926 live_regs_mask
|= 0x6800;
4927 print_multi_reg (f
, "ldmea\t%sfp", live_regs_mask
, FALSE
);
4928 fprintf (f
, "\tbx\t%slr\n", REGISTER_PREFIX
);
4932 live_regs_mask
|= 0xA800;
4933 print_multi_reg (f
, "ldmea\t%sfp", live_regs_mask
,
4934 TARGET_APCS_32
? FALSE
: TRUE
);
4939 /* Restore stack pointer if necessary. */
4940 if (frame_size
+ current_function_outgoing_args_size
!= 0)
4942 operands
[0] = operands
[1] = stack_pointer_rtx
;
4943 operands
[2] = GEN_INT (frame_size
4944 + current_function_outgoing_args_size
);
4945 output_add_immediate (operands
);
4948 if (arm_fpu_arch
== FP_SOFT2
)
4950 for (reg
= 16; reg
< 24; reg
++)
4951 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
4952 fprintf (f
, "\tldfe\t%s%s, [%ssp], #12\n", REGISTER_PREFIX
,
4953 reg_names
[reg
], REGISTER_PREFIX
);
4959 for (reg
= 16; reg
< 24; reg
++)
4961 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
4963 if (reg
- start_reg
== 3)
4965 fprintf (f
, "\tlfmfd\t%s%s, 4, [%ssp]!\n",
4966 REGISTER_PREFIX
, reg_names
[start_reg
],
4968 start_reg
= reg
+ 1;
4973 if (reg
!= start_reg
)
4974 fprintf (f
, "\tlfmfd\t%s%s, %d, [%ssp]!\n",
4975 REGISTER_PREFIX
, reg_names
[start_reg
],
4976 reg
- start_reg
, REGISTER_PREFIX
);
4978 start_reg
= reg
+ 1;
4982 /* Just in case the last register checked also needs unstacking. */
4983 if (reg
!= start_reg
)
4984 fprintf (f
, "\tlfmfd\t%s%s, %d, [%ssp]!\n",
4985 REGISTER_PREFIX
, reg_names
[start_reg
],
4986 reg
- start_reg
, REGISTER_PREFIX
);
4989 if (current_function_pretend_args_size
== 0 && regs_ever_live
[14])
4991 if (TARGET_THUMB_INTERWORK
)
4993 if (! lr_save_eliminated
)
4994 print_multi_reg(f
, "ldmfd\t%ssp!", live_regs_mask
| 0x4000,
4997 fprintf (f
, "\tbx\t%slr\n", REGISTER_PREFIX
);
4999 else if (lr_save_eliminated
)
5000 fprintf (f
, (TARGET_APCS_32
? "\tmov\t%spc, %slr\n"
5001 : "\tmovs\t%spc, %slr\n"),
5002 REGISTER_PREFIX
, REGISTER_PREFIX
, f
);
5004 print_multi_reg (f
, "ldmfd\t%ssp!", live_regs_mask
| 0x8000,
5005 TARGET_APCS_32
? FALSE
: TRUE
);
5009 if (live_regs_mask
|| regs_ever_live
[14])
5011 /* Restore the integer regs, and the return address into lr */
5012 if (! lr_save_eliminated
)
5013 live_regs_mask
|= 0x4000;
5015 if (live_regs_mask
!= 0)
5016 print_multi_reg (f
, "ldmfd\t%ssp!", live_regs_mask
, FALSE
);
5019 if (current_function_pretend_args_size
)
5021 /* Unwind the pre-pushed regs */
5022 operands
[0] = operands
[1] = stack_pointer_rtx
;
5023 operands
[2] = GEN_INT (current_function_pretend_args_size
);
5024 output_add_immediate (operands
);
5026 /* And finally, go home */
5027 if (TARGET_THUMB_INTERWORK
)
5028 fprintf (f
, "\tbx\t%slr\n", REGISTER_PREFIX
);
5030 fprintf (f
, (TARGET_APCS_32
? "\tmov\t%spc, %slr\n"
5031 : "\tmovs\t%spc, %slr\n"),
5032 REGISTER_PREFIX
, REGISTER_PREFIX
, f
);
5038 current_function_anonymous_args
= 0;
5042 emit_multi_reg_push (mask
)
5049 for (i
= 0; i
< 16; i
++)
5050 if (mask
& (1 << i
))
5053 if (num_regs
== 0 || num_regs
> 16)
5056 par
= gen_rtx (PARALLEL
, VOIDmode
, rtvec_alloc (num_regs
));
5058 for (i
= 0; i
< 16; i
++)
5060 if (mask
& (1 << i
))
5063 = gen_rtx (SET
, VOIDmode
, gen_rtx (MEM
, BLKmode
,
5064 gen_rtx (PRE_DEC
, BLKmode
,
5065 stack_pointer_rtx
)),
5066 gen_rtx (UNSPEC
, BLKmode
,
5067 gen_rtvec (1, gen_rtx (REG
, SImode
, i
)),
5073 for (j
= 1, i
++; j
< num_regs
; i
++)
5075 if (mask
& (1 << i
))
5078 = gen_rtx (USE
, VOIDmode
, gen_rtx (REG
, SImode
, i
));
5087 emit_sfm (base_reg
, count
)
5094 par
= gen_rtx (PARALLEL
, VOIDmode
, rtvec_alloc (count
));
5096 XVECEXP (par
, 0, 0) = gen_rtx (SET
, VOIDmode
,
5097 gen_rtx (MEM
, BLKmode
,
5098 gen_rtx (PRE_DEC
, BLKmode
,
5099 stack_pointer_rtx
)),
5100 gen_rtx (UNSPEC
, BLKmode
,
5101 gen_rtvec (1, gen_rtx (REG
, XFmode
,
5104 for (i
= 1; i
< count
; i
++)
5105 XVECEXP (par
, 0, i
) = gen_rtx (USE
, VOIDmode
,
5106 gen_rtx (REG
, XFmode
, base_reg
++));
5112 arm_expand_prologue ()
5115 rtx amount
= GEN_INT (-(get_frame_size ()
5116 + current_function_outgoing_args_size
));
5119 int live_regs_mask
= 0;
5120 int store_arg_regs
= 0;
5121 int volatile_func
= (optimize
> 0
5122 && TREE_THIS_VOLATILE (current_function_decl
));
5124 /* Naked functions don't have prologues. */
5125 if (arm_naked_function_p (current_function_decl
))
5128 if (current_function_anonymous_args
&& current_function_pretend_args_size
)
5131 if (! volatile_func
)
5132 for (reg
= 0; reg
<= 10; reg
++)
5133 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
5134 live_regs_mask
|= 1 << reg
;
5136 if (! volatile_func
&& regs_ever_live
[14])
5137 live_regs_mask
|= 0x4000;
5139 if (frame_pointer_needed
)
5141 live_regs_mask
|= 0xD800;
5142 emit_insn (gen_movsi (gen_rtx (REG
, SImode
, 12),
5143 stack_pointer_rtx
));
5146 if (current_function_pretend_args_size
)
5149 emit_multi_reg_push ((0xf0 >> (current_function_pretend_args_size
/ 4))
5152 emit_insn (gen_addsi3 (stack_pointer_rtx
, stack_pointer_rtx
,
5153 GEN_INT (-current_function_pretend_args_size
)));
5158 /* If we have to push any regs, then we must push lr as well, or
5159 we won't get a proper return. */
5160 live_regs_mask
|= 0x4000;
5161 emit_multi_reg_push (live_regs_mask
);
5164 /* For now the integer regs are still pushed in output_func_epilogue (). */
5166 if (! volatile_func
)
5168 if (arm_fpu_arch
== FP_SOFT2
)
5170 for (reg
= 23; reg
> 15; reg
--)
5171 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
5172 emit_insn (gen_rtx (SET
, VOIDmode
,
5173 gen_rtx (MEM
, XFmode
,
5174 gen_rtx (PRE_DEC
, XFmode
,
5175 stack_pointer_rtx
)),
5176 gen_rtx (REG
, XFmode
, reg
)));
5182 for (reg
= 23; reg
> 15; reg
--)
5184 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
5186 if (start_reg
- reg
== 3)
5189 start_reg
= reg
- 1;
5194 if (start_reg
!= reg
)
5195 emit_sfm (reg
+ 1, start_reg
- reg
);
5196 start_reg
= reg
- 1;
5200 if (start_reg
!= reg
)
5201 emit_sfm (reg
+ 1, start_reg
- reg
);
5205 if (frame_pointer_needed
)
5206 emit_insn (gen_addsi3 (hard_frame_pointer_rtx
, gen_rtx (REG
, SImode
, 12),
5208 (-(4 + current_function_pretend_args_size
)))));
5210 if (amount
!= const0_rtx
)
5212 emit_insn (gen_addsi3 (stack_pointer_rtx
, stack_pointer_rtx
, amount
));
5213 emit_insn (gen_rtx (CLOBBER
, VOIDmode
,
5214 gen_rtx (MEM
, BLKmode
, stack_pointer_rtx
)));
5217 /* If we are profiling, make sure no instructions are scheduled before
5218 the call to mcount. */
5219 if (profile_flag
|| profile_block_flag
)
5220 emit_insn (gen_blockage ());
5224 /* If CODE is 'd', then the X is a condition operand and the instruction
5225 should only be executed if the condition is true.
5226 if CODE is 'D', then the X is a condition operand and the instruction
5227 should only be executed if the condition is false: however, if the mode
5228 of the comparison is CCFPEmode, then always execute the instruction -- we
5229 do this because in these circumstances !GE does not necessarily imply LT;
5230 in these cases the instruction pattern will take care to make sure that
5231 an instruction containing %d will follow, thereby undoing the effects of
5232 doing this instruction unconditionally.
5233 If CODE is 'N' then X is a floating point operand that must be negated
5235 If CODE is 'B' then output a bitwise inverted value of X (a const int).
5236 If X is a REG and CODE is `M', output a ldm/stm style multi-reg. */
5239 arm_print_operand (stream
, x
, code
)
5247 fputs (ASM_COMMENT_START
, stream
);
5251 fputs (REGISTER_PREFIX
, stream
);
5255 if (arm_ccfsm_state
== 3 || arm_ccfsm_state
== 4)
5256 fputs (arm_condition_codes
[arm_current_cc
], stream
);
5262 REAL_VALUE_FROM_CONST_DOUBLE (r
, x
);
5263 r
= REAL_VALUE_NEGATE (r
);
5264 fprintf (stream
, "%s", fp_const_from_val (&r
));
5269 if (GET_CODE (x
) == CONST_INT
)
5271 #if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_INT
5276 ARM_SIGN_EXTEND (~ INTVAL (x
)));
5280 output_addr_const (stream
, x
);
5285 fprintf (stream
, "%s", arithmetic_instr (x
, 1));
5289 fprintf (stream
, "%s", arithmetic_instr (x
, 0));
5295 char *shift
= shift_op (x
, &val
);
5299 fprintf (stream
, ", %s ", shift_op (x
, &val
));
5301 arm_print_operand (stream
, XEXP (x
, 1), 0);
5304 #if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_INT
5317 fputs (REGISTER_PREFIX
, stream
);
5318 fputs (reg_names
[REGNO (x
) + (WORDS_BIG_ENDIAN
? 1 : 0)], stream
);
5324 fputs (REGISTER_PREFIX
, stream
);
5325 fputs (reg_names
[REGNO (x
) + (WORDS_BIG_ENDIAN
? 0 : 1)], stream
);
5329 fputs (REGISTER_PREFIX
, stream
);
5330 if (GET_CODE (XEXP (x
, 0)) == REG
)
5331 fputs (reg_names
[REGNO (XEXP (x
, 0))], stream
);
5333 fputs (reg_names
[REGNO (XEXP (XEXP (x
, 0), 0))], stream
);
5337 fprintf (stream
, "{%s%s-%s%s}", REGISTER_PREFIX
, reg_names
[REGNO (x
)],
5338 REGISTER_PREFIX
, reg_names
[REGNO (x
) - 1
5339 + ((GET_MODE_SIZE (GET_MODE (x
))
5340 + GET_MODE_SIZE (SImode
) - 1)
5341 / GET_MODE_SIZE (SImode
))]);
5346 fputs (arm_condition_codes
[get_arm_condition_code (x
)],
5352 fputs (arm_condition_codes
[ARM_INVERSE_CONDITION_CODE
5353 (get_arm_condition_code (x
))],
5361 if (GET_CODE (x
) == REG
)
5363 fputs (REGISTER_PREFIX
, stream
);
5364 fputs (reg_names
[REGNO (x
)], stream
);
5366 else if (GET_CODE (x
) == MEM
)
5368 output_memory_reference_mode
= GET_MODE (x
);
5369 output_address (XEXP (x
, 0));
5371 else if (GET_CODE (x
) == CONST_DOUBLE
)
5372 fprintf (stream
, "#%s", fp_immediate_constant (x
));
5373 else if (GET_CODE (x
) == NEG
)
5374 abort (); /* This should never happen now. */
5377 fputc ('#', stream
);
5378 output_addr_const (stream
, x
);
5384 /* A finite state machine takes care of noticing whether or not instructions
5385 can be conditionally executed, and thus decrease execution time and code
5386 size by deleting branch instructions. The fsm is controlled by
5387 final_prescan_insn, and controls the actions of ASM_OUTPUT_OPCODE. */
5389 /* The state of the fsm controlling condition codes are:
5390 0: normal, do nothing special
5391 1: make ASM_OUTPUT_OPCODE not output this instruction
5392 2: make ASM_OUTPUT_OPCODE not output this instruction
5393 3: make instructions conditional
5394 4: make instructions conditional
5396 State transitions (state->state by whom under condition):
5397 0 -> 1 final_prescan_insn if the `target' is a label
5398 0 -> 2 final_prescan_insn if the `target' is an unconditional branch
5399 1 -> 3 ASM_OUTPUT_OPCODE after not having output the conditional branch
5400 2 -> 4 ASM_OUTPUT_OPCODE after not having output the conditional branch
5401 3 -> 0 ASM_OUTPUT_INTERNAL_LABEL if the `target' label is reached
5402 (the target label has CODE_LABEL_NUMBER equal to arm_target_label).
5403 4 -> 0 final_prescan_insn if the `target' unconditional branch is reached
5404 (the target insn is arm_target_insn).
5406 If the jump clobbers the conditions then we use states 2 and 4.
5408 A similar thing can be done with conditional return insns.
5410 XXX In case the `target' is an unconditional branch, this conditionalising
5411 of the instructions always reduces code size, but not always execution
5412 time. But then, I want to reduce the code size to somewhere near what
5413 /bin/cc produces. */
5415 /* Returns the index of the ARM condition code string in
5416 `arm_condition_codes'. COMPARISON should be an rtx like
5417 `(eq (...) (...))'. */
5419 static enum arm_cond_code
5420 get_arm_condition_code (comparison
)
5423 enum machine_mode mode
= GET_MODE (XEXP (comparison
, 0));
5425 register enum rtx_code comp_code
= GET_CODE (comparison
);
5427 if (GET_MODE_CLASS (mode
) != MODE_CC
)
5428 mode
= SELECT_CC_MODE (comp_code
, XEXP (comparison
, 0),
5429 XEXP (comparison
, 1));
5433 case CC_DNEmode
: code
= ARM_NE
; goto dominance
;
5434 case CC_DEQmode
: code
= ARM_EQ
; goto dominance
;
5435 case CC_DGEmode
: code
= ARM_GE
; goto dominance
;
5436 case CC_DGTmode
: code
= ARM_GT
; goto dominance
;
5437 case CC_DLEmode
: code
= ARM_LE
; goto dominance
;
5438 case CC_DLTmode
: code
= ARM_LT
; goto dominance
;
5439 case CC_DGEUmode
: code
= ARM_CS
; goto dominance
;
5440 case CC_DGTUmode
: code
= ARM_HI
; goto dominance
;
5441 case CC_DLEUmode
: code
= ARM_LS
; goto dominance
;
5442 case CC_DLTUmode
: code
= ARM_CC
;
5445 if (comp_code
!= EQ
&& comp_code
!= NE
)
5448 if (comp_code
== EQ
)
5449 return ARM_INVERSE_CONDITION_CODE (code
);
5455 case NE
: return ARM_NE
;
5456 case EQ
: return ARM_EQ
;
5457 case GE
: return ARM_PL
;
5458 case LT
: return ARM_MI
;
5466 case NE
: return ARM_NE
;
5467 case EQ
: return ARM_EQ
;
5474 case GE
: return ARM_GE
;
5475 case GT
: return ARM_GT
;
5476 case LE
: return ARM_LS
;
5477 case LT
: return ARM_MI
;
5484 case NE
: return ARM_NE
;
5485 case EQ
: return ARM_EQ
;
5486 case GE
: return ARM_LE
;
5487 case GT
: return ARM_LT
;
5488 case LE
: return ARM_GE
;
5489 case LT
: return ARM_GT
;
5490 case GEU
: return ARM_LS
;
5491 case GTU
: return ARM_CC
;
5492 case LEU
: return ARM_CS
;
5493 case LTU
: return ARM_HI
;
5500 case LTU
: return ARM_CS
;
5501 case GEU
: return ARM_CC
;
5508 case NE
: return ARM_NE
;
5509 case EQ
: return ARM_EQ
;
5510 case GE
: return ARM_GE
;
5511 case GT
: return ARM_GT
;
5512 case LE
: return ARM_LE
;
5513 case LT
: return ARM_LT
;
5514 case GEU
: return ARM_CS
;
5515 case GTU
: return ARM_HI
;
5516 case LEU
: return ARM_LS
;
5517 case LTU
: return ARM_CC
;
5529 final_prescan_insn (insn
, opvec
, noperands
)
5534 /* BODY will hold the body of INSN. */
5535 register rtx body
= PATTERN (insn
);
5537 /* This will be 1 if trying to repeat the trick, and things need to be
5538 reversed if it appears to fail. */
5541 /* JUMP_CLOBBERS will be one implies that the conditions if a branch is
5542 taken are clobbered, even if the rtl suggests otherwise. It also
5543 means that we have to grub around within the jump expression to find
5544 out what the conditions are when the jump isn't taken. */
5545 int jump_clobbers
= 0;
5547 /* If we start with a return insn, we only succeed if we find another one. */
5548 int seeking_return
= 0;
5550 /* START_INSN will hold the insn from where we start looking. This is the
5551 first insn after the following code_label if REVERSE is true. */
5552 rtx start_insn
= insn
;
5554 /* If in state 4, check if the target branch is reached, in order to
5555 change back to state 0. */
5556 if (arm_ccfsm_state
== 4)
5558 if (insn
== arm_target_insn
)
5560 arm_target_insn
= NULL
;
5561 arm_ccfsm_state
= 0;
5566 /* If in state 3, it is possible to repeat the trick, if this insn is an
5567 unconditional branch to a label, and immediately following this branch
5568 is the previous target label which is only used once, and the label this
5569 branch jumps to is not too far off. */
5570 if (arm_ccfsm_state
== 3)
5572 if (simplejump_p (insn
))
5574 start_insn
= next_nonnote_insn (start_insn
);
5575 if (GET_CODE (start_insn
) == BARRIER
)
5577 /* XXX Isn't this always a barrier? */
5578 start_insn
= next_nonnote_insn (start_insn
);
5580 if (GET_CODE (start_insn
) == CODE_LABEL
5581 && CODE_LABEL_NUMBER (start_insn
) == arm_target_label
5582 && LABEL_NUSES (start_insn
) == 1)
5587 else if (GET_CODE (body
) == RETURN
)
5589 start_insn
= next_nonnote_insn (start_insn
);
5590 if (GET_CODE (start_insn
) == BARRIER
)
5591 start_insn
= next_nonnote_insn (start_insn
);
5592 if (GET_CODE (start_insn
) == CODE_LABEL
5593 && CODE_LABEL_NUMBER (start_insn
) == arm_target_label
5594 && LABEL_NUSES (start_insn
) == 1)
5606 if (arm_ccfsm_state
!= 0 && !reverse
)
5608 if (GET_CODE (insn
) != JUMP_INSN
)
5611 /* This jump might be paralleled with a clobber of the condition codes
5612 the jump should always come first */
5613 if (GET_CODE (body
) == PARALLEL
&& XVECLEN (body
, 0) > 0)
5614 body
= XVECEXP (body
, 0, 0);
5617 /* If this is a conditional return then we don't want to know */
5618 if (GET_CODE (body
) == SET
&& GET_CODE (SET_DEST (body
)) == PC
5619 && GET_CODE (SET_SRC (body
)) == IF_THEN_ELSE
5620 && (GET_CODE (XEXP (SET_SRC (body
), 1)) == RETURN
5621 || GET_CODE (XEXP (SET_SRC (body
), 2)) == RETURN
))
5626 || (GET_CODE (body
) == SET
&& GET_CODE (SET_DEST (body
)) == PC
5627 && GET_CODE (SET_SRC (body
)) == IF_THEN_ELSE
))
5630 int fail
= FALSE
, succeed
= FALSE
;
5631 /* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */
5632 int then_not_else
= TRUE
;
5633 rtx this_insn
= start_insn
, label
= 0;
5635 if (get_attr_conds (insn
) == CONDS_JUMP_CLOB
)
5637 /* The code below is wrong for these, and I haven't time to
5638 fix it now. So we just do the safe thing and return. This
5639 whole function needs re-writing anyway. */
5644 /* Register the insn jumped to. */
5647 if (!seeking_return
)
5648 label
= XEXP (SET_SRC (body
), 0);
5650 else if (GET_CODE (XEXP (SET_SRC (body
), 1)) == LABEL_REF
)
5651 label
= XEXP (XEXP (SET_SRC (body
), 1), 0);
5652 else if (GET_CODE (XEXP (SET_SRC (body
), 2)) == LABEL_REF
)
5654 label
= XEXP (XEXP (SET_SRC (body
), 2), 0);
5655 then_not_else
= FALSE
;
5657 else if (GET_CODE (XEXP (SET_SRC (body
), 1)) == RETURN
)
5659 else if (GET_CODE (XEXP (SET_SRC (body
), 2)) == RETURN
)
5662 then_not_else
= FALSE
;
5667 /* See how many insns this branch skips, and what kind of insns. If all
5668 insns are okay, and the label or unconditional branch to the same
5669 label is not too far away, succeed. */
5670 for (insns_skipped
= 0;
5671 !fail
&& !succeed
&& insns_skipped
++ < MAX_INSNS_SKIPPED
;)
5675 this_insn
= next_nonnote_insn (this_insn
);
5679 scanbody
= PATTERN (this_insn
);
5681 switch (GET_CODE (this_insn
))
5684 /* Succeed if it is the target label, otherwise fail since
5685 control falls in from somewhere else. */
5686 if (this_insn
== label
)
5690 arm_ccfsm_state
= 2;
5691 this_insn
= next_nonnote_insn (this_insn
);
5694 arm_ccfsm_state
= 1;
5702 /* Succeed if the following insn is the target label.
5704 If return insns are used then the last insn in a function
5705 will be a barrier. */
5706 this_insn
= next_nonnote_insn (this_insn
);
5707 if (this_insn
&& this_insn
== label
)
5711 arm_ccfsm_state
= 2;
5712 this_insn
= next_nonnote_insn (this_insn
);
5715 arm_ccfsm_state
= 1;
5723 /* If using 32-bit addresses the cc is not preserved over
5727 /* Succeed if the following insn is the target label,
5728 or if the following two insns are a barrier and
5729 the target label. */
5730 this_insn
= next_nonnote_insn (this_insn
);
5731 if (this_insn
&& GET_CODE (this_insn
) == BARRIER
)
5732 this_insn
= next_nonnote_insn (this_insn
);
5734 if (this_insn
&& this_insn
== label
5735 && insns_skipped
< MAX_INSNS_SKIPPED
)
5739 arm_ccfsm_state
= 2;
5740 this_insn
= next_nonnote_insn (this_insn
);
5743 arm_ccfsm_state
= 1;
5752 /* If this is an unconditional branch to the same label, succeed.
5753 If it is to another label, do nothing. If it is conditional,
5755 /* XXX Probably, the test for the SET and the PC are unnecessary. */
5757 if (GET_CODE (scanbody
) == SET
5758 && GET_CODE (SET_DEST (scanbody
)) == PC
)
5760 if (GET_CODE (SET_SRC (scanbody
)) == LABEL_REF
5761 && XEXP (SET_SRC (scanbody
), 0) == label
&& !reverse
)
5763 arm_ccfsm_state
= 2;
5766 else if (GET_CODE (SET_SRC (scanbody
)) == IF_THEN_ELSE
)
5769 else if (GET_CODE (scanbody
) == RETURN
5772 arm_ccfsm_state
= 2;
5775 else if (GET_CODE (scanbody
) == PARALLEL
)
5777 switch (get_attr_conds (this_insn
))
5789 /* Instructions using or affecting the condition codes make it
5791 if ((GET_CODE (scanbody
) == SET
5792 || GET_CODE (scanbody
) == PARALLEL
)
5793 && get_attr_conds (this_insn
) != CONDS_NOCOND
)
5803 if ((!seeking_return
) && (arm_ccfsm_state
== 1 || reverse
))
5804 arm_target_label
= CODE_LABEL_NUMBER (label
);
5805 else if (seeking_return
|| arm_ccfsm_state
== 2)
5807 while (this_insn
&& GET_CODE (PATTERN (this_insn
)) == USE
)
5809 this_insn
= next_nonnote_insn (this_insn
);
5810 if (this_insn
&& (GET_CODE (this_insn
) == BARRIER
5811 || GET_CODE (this_insn
) == CODE_LABEL
))
5816 /* Oh, dear! we ran off the end.. give up */
5817 recog (PATTERN (insn
), insn
, NULL_PTR
);
5818 arm_ccfsm_state
= 0;
5819 arm_target_insn
= NULL
;
5822 arm_target_insn
= this_insn
;
5831 get_arm_condition_code (XEXP (XEXP (XEXP (SET_SRC (body
),
5833 if (GET_CODE (XEXP (XEXP (SET_SRC (body
), 0), 0)) == AND
)
5834 arm_current_cc
= ARM_INVERSE_CONDITION_CODE (arm_current_cc
);
5835 if (GET_CODE (XEXP (SET_SRC (body
), 0)) == NE
)
5836 arm_current_cc
= ARM_INVERSE_CONDITION_CODE (arm_current_cc
);
5840 /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from
5843 arm_current_cc
= get_arm_condition_code (XEXP (SET_SRC (body
),
5847 if (reverse
|| then_not_else
)
5848 arm_current_cc
= ARM_INVERSE_CONDITION_CODE (arm_current_cc
);
5850 /* restore recog_operand (getting the attributes of other insns can
5851 destroy this array, but final.c assumes that it remains intact
5852 across this call; since the insn has been recognized already we
5853 call recog direct). */
5854 recog (PATTERN (insn
), insn
, NULL_PTR
);
5858 #ifdef AOF_ASSEMBLER
5859 /* Special functions only needed when producing AOF syntax assembler. */
5861 rtx aof_pic_label
= NULL_RTX
;
5864 struct pic_chain
*next
;
5868 static struct pic_chain
*aof_pic_chain
= NULL
;
5874 struct pic_chain
**chainp
;
5877 if (aof_pic_label
== NULL_RTX
)
5879 /* This needs to persist throughout the compilation. */
5880 end_temporary_allocation ();
5881 aof_pic_label
= gen_rtx (SYMBOL_REF
, Pmode
, "x$adcons");
5882 resume_temporary_allocation ();
5885 for (offset
= 0, chainp
= &aof_pic_chain
; *chainp
;
5886 offset
+= 4, chainp
= &(*chainp
)->next
)
5887 if ((*chainp
)->symname
== XSTR (x
, 0))
5888 return plus_constant (aof_pic_label
, offset
);
5890 *chainp
= (struct pic_chain
*) xmalloc (sizeof (struct pic_chain
));
5891 (*chainp
)->next
= NULL
;
5892 (*chainp
)->symname
= XSTR (x
, 0);
5893 return plus_constant (aof_pic_label
, offset
);
5897 aof_dump_pic_table (f
)
5900 struct pic_chain
*chain
;
5902 if (aof_pic_chain
== NULL
)
5905 fprintf (f
, "\tAREA |%s$$adcons|, BASED %s%s\n",
5906 reg_names
[PIC_OFFSET_TABLE_REGNUM
], REGISTER_PREFIX
,
5907 reg_names
[PIC_OFFSET_TABLE_REGNUM
]);
5908 fputs ("|x$adcons|\n", f
);
5910 for (chain
= aof_pic_chain
; chain
; chain
= chain
->next
)
5912 fputs ("\tDCD\t", f
);
5913 assemble_name (f
, chain
->symname
);
5918 int arm_text_section_count
= 1;
5923 static char buf
[100];
5924 sprintf (buf
, "\tAREA |C$$code%d|, CODE, READONLY",
5925 arm_text_section_count
++);
5927 strcat (buf
, ", PIC, REENTRANT");
5931 static int arm_data_section_count
= 1;
5936 static char buf
[100];
5937 sprintf (buf
, "\tAREA |C$$data%d|, DATA", arm_data_section_count
++);
5941 /* The AOF assembler is religiously strict about declarations of
5942 imported and exported symbols, so that it is impossible to declare
5943 a function as imported near the beginning of the file, and then to
5944 export it later on. It is, however, possible to delay the decision
5945 until all the functions in the file have been compiled. To get
5946 around this, we maintain a list of the imports and exports, and
5947 delete from it any that are subsequently defined. At the end of
5948 compilation we spit the remainder of the list out before the END
5953 struct import
*next
;
5957 static struct import
*imports_list
= NULL
;
5960 aof_add_import (name
)
5965 for (new = imports_list
; new; new = new->next
)
5966 if (new->name
== name
)
5969 new = (struct import
*) xmalloc (sizeof (struct import
));
5970 new->next
= imports_list
;
5976 aof_delete_import (name
)
5979 struct import
**old
;
5981 for (old
= &imports_list
; *old
; old
= & (*old
)->next
)
5983 if ((*old
)->name
== name
)
5985 *old
= (*old
)->next
;
5991 int arm_main_function
= 0;
5994 aof_dump_imports (f
)
5997 /* The AOF assembler needs this to cause the startup code to be extracted
5998 from the library. Brining in __main causes the whole thing to work
6000 if (arm_main_function
)
6003 fputs ("\tIMPORT __main\n", f
);
6004 fputs ("\tDCD __main\n", f
);
6007 /* Now dump the remaining imports. */
6008 while (imports_list
)
6010 fprintf (f
, "\tIMPORT\t");
6011 assemble_name (f
, imports_list
->name
);
6013 imports_list
= imports_list
->next
;
6016 #endif /* AOF_ASSEMBLER */