1 /* Subroutines used for code generation on the DEC Alpha.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to
20 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
21 Boston, MA 02110-1301, USA. */
26 #include "coretypes.h"
31 #include "hard-reg-set.h"
33 #include "insn-config.h"
34 #include "conditions.h"
36 #include "insn-attr.h"
47 #include "integrate.h"
50 #include "target-def.h"
52 #include "langhooks.h"
53 #include <splay-tree.h>
54 #include "cfglayout.h"
55 #include "tree-gimple.h"
56 #include "tree-flow.h"
57 #include "tree-stdarg.h"
59 /* Specify which cpu to schedule for. */
60 enum processor_type alpha_tune
;
62 /* Which cpu we're generating code for. */
63 enum processor_type alpha_cpu
;
65 static const char * const alpha_cpu_name
[] =
70 /* Specify how accurate floating-point traps need to be. */
72 enum alpha_trap_precision alpha_tp
;
74 /* Specify the floating-point rounding mode. */
76 enum alpha_fp_rounding_mode alpha_fprm
;
78 /* Specify which things cause traps. */
80 enum alpha_fp_trap_mode alpha_fptm
;
82 /* Save information from a "cmpxx" operation until the branch or scc is
85 struct alpha_compare alpha_compare
;
87 /* Nonzero if inside of a function, because the Alpha asm can't
88 handle .files inside of functions. */
90 static int inside_function
= FALSE
;
92 /* The number of cycles of latency we should assume on memory reads. */
94 int alpha_memory_latency
= 3;
96 /* Whether the function needs the GP. */
98 static int alpha_function_needs_gp
;
100 /* The alias set for prologue/epilogue register save/restore. */
102 static GTY(()) int alpha_sr_alias_set
;
104 /* The assembler name of the current function. */
106 static const char *alpha_fnname
;
108 /* The next explicit relocation sequence number. */
109 extern GTY(()) int alpha_next_sequence_number
;
110 int alpha_next_sequence_number
= 1;
112 /* The literal and gpdisp sequence numbers for this insn, as printed
113 by %# and %* respectively. */
114 extern GTY(()) int alpha_this_literal_sequence_number
;
115 extern GTY(()) int alpha_this_gpdisp_sequence_number
;
116 int alpha_this_literal_sequence_number
;
117 int alpha_this_gpdisp_sequence_number
;
119 /* Costs of various operations on the different architectures. */
121 struct alpha_rtx_cost_data
123 unsigned char fp_add
;
124 unsigned char fp_mult
;
125 unsigned char fp_div_sf
;
126 unsigned char fp_div_df
;
127 unsigned char int_mult_si
;
128 unsigned char int_mult_di
;
129 unsigned char int_shift
;
130 unsigned char int_cmov
;
131 unsigned short int_div
;
134 static struct alpha_rtx_cost_data
const alpha_rtx_cost_data
[PROCESSOR_MAX
] =
137 COSTS_N_INSNS (6), /* fp_add */
138 COSTS_N_INSNS (6), /* fp_mult */
139 COSTS_N_INSNS (34), /* fp_div_sf */
140 COSTS_N_INSNS (63), /* fp_div_df */
141 COSTS_N_INSNS (23), /* int_mult_si */
142 COSTS_N_INSNS (23), /* int_mult_di */
143 COSTS_N_INSNS (2), /* int_shift */
144 COSTS_N_INSNS (2), /* int_cmov */
145 COSTS_N_INSNS (97), /* int_div */
148 COSTS_N_INSNS (4), /* fp_add */
149 COSTS_N_INSNS (4), /* fp_mult */
150 COSTS_N_INSNS (15), /* fp_div_sf */
151 COSTS_N_INSNS (22), /* fp_div_df */
152 COSTS_N_INSNS (8), /* int_mult_si */
153 COSTS_N_INSNS (12), /* int_mult_di */
154 COSTS_N_INSNS (1) + 1, /* int_shift */
155 COSTS_N_INSNS (1), /* int_cmov */
156 COSTS_N_INSNS (83), /* int_div */
159 COSTS_N_INSNS (4), /* fp_add */
160 COSTS_N_INSNS (4), /* fp_mult */
161 COSTS_N_INSNS (12), /* fp_div_sf */
162 COSTS_N_INSNS (15), /* fp_div_df */
163 COSTS_N_INSNS (7), /* int_mult_si */
164 COSTS_N_INSNS (7), /* int_mult_di */
165 COSTS_N_INSNS (1), /* int_shift */
166 COSTS_N_INSNS (2), /* int_cmov */
167 COSTS_N_INSNS (86), /* int_div */
171 /* Similar but tuned for code size instead of execution latency. The
172 extra +N is fractional cost tuning based on latency. It's used to
173 encourage use of cheaper insns like shift, but only if there's just
176 static struct alpha_rtx_cost_data
const alpha_rtx_cost_size
=
178 COSTS_N_INSNS (1), /* fp_add */
179 COSTS_N_INSNS (1), /* fp_mult */
180 COSTS_N_INSNS (1), /* fp_div_sf */
181 COSTS_N_INSNS (1) + 1, /* fp_div_df */
182 COSTS_N_INSNS (1) + 1, /* int_mult_si */
183 COSTS_N_INSNS (1) + 2, /* int_mult_di */
184 COSTS_N_INSNS (1), /* int_shift */
185 COSTS_N_INSNS (1), /* int_cmov */
186 COSTS_N_INSNS (6), /* int_div */
189 /* Get the number of args of a function in one of two ways. */
190 #if TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK
191 #define NUM_ARGS current_function_args_info.num_args
193 #define NUM_ARGS current_function_args_info
199 /* Declarations of static functions. */
200 static struct machine_function
*alpha_init_machine_status (void);
201 static rtx
alpha_emit_xfloating_compare (enum rtx_code
*, rtx
, rtx
);
203 #if TARGET_ABI_OPEN_VMS
204 static void alpha_write_linkage (FILE *, const char *, tree
);
207 static void unicosmk_output_deferred_case_vectors (FILE *);
208 static void unicosmk_gen_dsib (unsigned long *);
209 static void unicosmk_output_ssib (FILE *, const char *);
210 static int unicosmk_need_dex (rtx
);
212 /* Implement TARGET_HANDLE_OPTION. */
215 alpha_handle_option (size_t code
, const char *arg
, int value
)
221 target_flags
|= MASK_SOFT_FP
;
225 case OPT_mieee_with_inexact
:
226 target_flags
|= MASK_IEEE_CONFORMANT
;
230 if (value
!= 16 && value
!= 32 && value
!= 64)
231 error ("bad value %qs for -mtls-size switch", arg
);
238 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
239 /* Implement TARGET_MANGLE_FUNDAMENTAL_TYPE. */
242 alpha_mangle_fundamental_type (tree type
)
244 if (TYPE_MAIN_VARIANT (type
) == long_double_type_node
245 && TARGET_LONG_DOUBLE_128
)
248 /* For all other types, use normal C++ mangling. */
253 /* Parse target option strings. */
256 override_options (void)
258 static const struct cpu_table
{
259 const char *const name
;
260 const enum processor_type processor
;
263 { "ev4", PROCESSOR_EV4
, 0 },
264 { "ev45", PROCESSOR_EV4
, 0 },
265 { "21064", PROCESSOR_EV4
, 0 },
266 { "ev5", PROCESSOR_EV5
, 0 },
267 { "21164", PROCESSOR_EV5
, 0 },
268 { "ev56", PROCESSOR_EV5
, MASK_BWX
},
269 { "21164a", PROCESSOR_EV5
, MASK_BWX
},
270 { "pca56", PROCESSOR_EV5
, MASK_BWX
|MASK_MAX
},
271 { "21164PC",PROCESSOR_EV5
, MASK_BWX
|MASK_MAX
},
272 { "21164pc",PROCESSOR_EV5
, MASK_BWX
|MASK_MAX
},
273 { "ev6", PROCESSOR_EV6
, MASK_BWX
|MASK_MAX
|MASK_FIX
},
274 { "21264", PROCESSOR_EV6
, MASK_BWX
|MASK_MAX
|MASK_FIX
},
275 { "ev67", PROCESSOR_EV6
, MASK_BWX
|MASK_MAX
|MASK_FIX
|MASK_CIX
},
276 { "21264a", PROCESSOR_EV6
, MASK_BWX
|MASK_MAX
|MASK_FIX
|MASK_CIX
},
282 /* Unicos/Mk doesn't have shared libraries. */
283 if (TARGET_ABI_UNICOSMK
&& flag_pic
)
285 warning (0, "-f%s ignored for Unicos/Mk (not supported)",
286 (flag_pic
> 1) ? "PIC" : "pic");
290 /* On Unicos/Mk, the native compiler consistently generates /d suffices for
291 floating-point instructions. Make that the default for this target. */
292 if (TARGET_ABI_UNICOSMK
)
293 alpha_fprm
= ALPHA_FPRM_DYN
;
295 alpha_fprm
= ALPHA_FPRM_NORM
;
297 alpha_tp
= ALPHA_TP_PROG
;
298 alpha_fptm
= ALPHA_FPTM_N
;
300 /* We cannot use su and sui qualifiers for conversion instructions on
301 Unicos/Mk. I'm not sure if this is due to assembler or hardware
302 limitations. Right now, we issue a warning if -mieee is specified
303 and then ignore it; eventually, we should either get it right or
304 disable the option altogether. */
308 if (TARGET_ABI_UNICOSMK
)
309 warning (0, "-mieee not supported on Unicos/Mk");
312 alpha_tp
= ALPHA_TP_INSN
;
313 alpha_fptm
= ALPHA_FPTM_SU
;
317 if (TARGET_IEEE_WITH_INEXACT
)
319 if (TARGET_ABI_UNICOSMK
)
320 warning (0, "-mieee-with-inexact not supported on Unicos/Mk");
323 alpha_tp
= ALPHA_TP_INSN
;
324 alpha_fptm
= ALPHA_FPTM_SUI
;
330 if (! strcmp (alpha_tp_string
, "p"))
331 alpha_tp
= ALPHA_TP_PROG
;
332 else if (! strcmp (alpha_tp_string
, "f"))
333 alpha_tp
= ALPHA_TP_FUNC
;
334 else if (! strcmp (alpha_tp_string
, "i"))
335 alpha_tp
= ALPHA_TP_INSN
;
337 error ("bad value %qs for -mtrap-precision switch", alpha_tp_string
);
340 if (alpha_fprm_string
)
342 if (! strcmp (alpha_fprm_string
, "n"))
343 alpha_fprm
= ALPHA_FPRM_NORM
;
344 else if (! strcmp (alpha_fprm_string
, "m"))
345 alpha_fprm
= ALPHA_FPRM_MINF
;
346 else if (! strcmp (alpha_fprm_string
, "c"))
347 alpha_fprm
= ALPHA_FPRM_CHOP
;
348 else if (! strcmp (alpha_fprm_string
,"d"))
349 alpha_fprm
= ALPHA_FPRM_DYN
;
351 error ("bad value %qs for -mfp-rounding-mode switch",
355 if (alpha_fptm_string
)
357 if (strcmp (alpha_fptm_string
, "n") == 0)
358 alpha_fptm
= ALPHA_FPTM_N
;
359 else if (strcmp (alpha_fptm_string
, "u") == 0)
360 alpha_fptm
= ALPHA_FPTM_U
;
361 else if (strcmp (alpha_fptm_string
, "su") == 0)
362 alpha_fptm
= ALPHA_FPTM_SU
;
363 else if (strcmp (alpha_fptm_string
, "sui") == 0)
364 alpha_fptm
= ALPHA_FPTM_SUI
;
366 error ("bad value %qs for -mfp-trap-mode switch", alpha_fptm_string
);
369 if (alpha_cpu_string
)
371 for (i
= 0; cpu_table
[i
].name
; i
++)
372 if (! strcmp (alpha_cpu_string
, cpu_table
[i
].name
))
374 alpha_tune
= alpha_cpu
= cpu_table
[i
].processor
;
375 target_flags
&= ~ (MASK_BWX
| MASK_MAX
| MASK_FIX
| MASK_CIX
);
376 target_flags
|= cpu_table
[i
].flags
;
379 if (! cpu_table
[i
].name
)
380 error ("bad value %qs for -mcpu switch", alpha_cpu_string
);
383 if (alpha_tune_string
)
385 for (i
= 0; cpu_table
[i
].name
; i
++)
386 if (! strcmp (alpha_tune_string
, cpu_table
[i
].name
))
388 alpha_tune
= cpu_table
[i
].processor
;
391 if (! cpu_table
[i
].name
)
392 error ("bad value %qs for -mcpu switch", alpha_tune_string
);
395 /* Do some sanity checks on the above options. */
397 if (TARGET_ABI_UNICOSMK
&& alpha_fptm
!= ALPHA_FPTM_N
)
399 warning (0, "trap mode not supported on Unicos/Mk");
400 alpha_fptm
= ALPHA_FPTM_N
;
403 if ((alpha_fptm
== ALPHA_FPTM_SU
|| alpha_fptm
== ALPHA_FPTM_SUI
)
404 && alpha_tp
!= ALPHA_TP_INSN
&& alpha_cpu
!= PROCESSOR_EV6
)
406 warning (0, "fp software completion requires -mtrap-precision=i");
407 alpha_tp
= ALPHA_TP_INSN
;
410 if (alpha_cpu
== PROCESSOR_EV6
)
412 /* Except for EV6 pass 1 (not released), we always have precise
413 arithmetic traps. Which means we can do software completion
414 without minding trap shadows. */
415 alpha_tp
= ALPHA_TP_PROG
;
418 if (TARGET_FLOAT_VAX
)
420 if (alpha_fprm
== ALPHA_FPRM_MINF
|| alpha_fprm
== ALPHA_FPRM_DYN
)
422 warning (0, "rounding mode not supported for VAX floats");
423 alpha_fprm
= ALPHA_FPRM_NORM
;
425 if (alpha_fptm
== ALPHA_FPTM_SUI
)
427 warning (0, "trap mode not supported for VAX floats");
428 alpha_fptm
= ALPHA_FPTM_SU
;
430 if (target_flags_explicit
& MASK_LONG_DOUBLE_128
)
431 warning (0, "128-bit long double not supported for VAX floats");
432 target_flags
&= ~MASK_LONG_DOUBLE_128
;
439 if (!alpha_mlat_string
)
440 alpha_mlat_string
= "L1";
442 if (ISDIGIT ((unsigned char)alpha_mlat_string
[0])
443 && (lat
= strtol (alpha_mlat_string
, &end
, 10), *end
== '\0'))
445 else if ((alpha_mlat_string
[0] == 'L' || alpha_mlat_string
[0] == 'l')
446 && ISDIGIT ((unsigned char)alpha_mlat_string
[1])
447 && alpha_mlat_string
[2] == '\0')
449 static int const cache_latency
[][4] =
451 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
452 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
453 { 3, 12, 30 }, /* ev6 -- Bcache from DS20 LMbench. */
456 lat
= alpha_mlat_string
[1] - '0';
457 if (lat
<= 0 || lat
> 3 || cache_latency
[alpha_tune
][lat
-1] == -1)
459 warning (0, "L%d cache latency unknown for %s",
460 lat
, alpha_cpu_name
[alpha_tune
]);
464 lat
= cache_latency
[alpha_tune
][lat
-1];
466 else if (! strcmp (alpha_mlat_string
, "main"))
468 /* Most current memories have about 370ns latency. This is
469 a reasonable guess for a fast cpu. */
474 warning (0, "bad value %qs for -mmemory-latency", alpha_mlat_string
);
478 alpha_memory_latency
= lat
;
481 /* Default the definition of "small data" to 8 bytes. */
485 /* Infer TARGET_SMALL_DATA from -fpic/-fPIC. */
487 target_flags
|= MASK_SMALL_DATA
;
488 else if (flag_pic
== 2)
489 target_flags
&= ~MASK_SMALL_DATA
;
491 /* Align labels and loops for optimal branching. */
492 /* ??? Kludge these by not doing anything if we don't optimize and also if
493 we are writing ECOFF symbols to work around a bug in DEC's assembler. */
494 if (optimize
> 0 && write_symbols
!= SDB_DEBUG
)
496 if (align_loops
<= 0)
498 if (align_jumps
<= 0)
501 if (align_functions
<= 0)
502 align_functions
= 16;
504 /* Acquire a unique set number for our register saves and restores. */
505 alpha_sr_alias_set
= new_alias_set ();
507 /* Register variables and functions with the garbage collector. */
509 /* Set up function hooks. */
510 init_machine_status
= alpha_init_machine_status
;
512 /* Tell the compiler when we're using VAX floating point. */
513 if (TARGET_FLOAT_VAX
)
515 REAL_MODE_FORMAT (SFmode
) = &vax_f_format
;
516 REAL_MODE_FORMAT (DFmode
) = &vax_g_format
;
517 REAL_MODE_FORMAT (TFmode
) = NULL
;
520 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
521 if (!(target_flags_explicit
& MASK_LONG_DOUBLE_128
))
522 target_flags
|= MASK_LONG_DOUBLE_128
;
526 /* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
529 zap_mask (HOST_WIDE_INT value
)
533 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
/ HOST_BITS_PER_CHAR
;
535 if ((value
& 0xff) != 0 && (value
& 0xff) != 0xff)
541 /* Return true if OP is valid for a particular TLS relocation.
542 We are already guaranteed that OP is a CONST. */
545 tls_symbolic_operand_1 (rtx op
, int size
, int unspec
)
549 if (GET_CODE (op
) != UNSPEC
|| XINT (op
, 1) != unspec
)
551 op
= XVECEXP (op
, 0, 0);
553 if (GET_CODE (op
) != SYMBOL_REF
)
556 switch (SYMBOL_REF_TLS_MODEL (op
))
558 case TLS_MODEL_LOCAL_DYNAMIC
:
559 return unspec
== UNSPEC_DTPREL
&& size
== alpha_tls_size
;
560 case TLS_MODEL_INITIAL_EXEC
:
561 return unspec
== UNSPEC_TPREL
&& size
== 64;
562 case TLS_MODEL_LOCAL_EXEC
:
563 return unspec
== UNSPEC_TPREL
&& size
== alpha_tls_size
;
569 /* Used by aligned_memory_operand and unaligned_memory_operand to
570 resolve what reload is going to do with OP if it's a register. */
573 resolve_reload_operand (rtx op
)
575 if (reload_in_progress
)
578 if (GET_CODE (tmp
) == SUBREG
)
579 tmp
= SUBREG_REG (tmp
);
580 if (GET_CODE (tmp
) == REG
581 && REGNO (tmp
) >= FIRST_PSEUDO_REGISTER
)
583 op
= reg_equiv_memory_loc
[REGNO (tmp
)];
591 /* Implements CONST_OK_FOR_LETTER_P. Return true if the value matches
592 the range defined for C in [I-P]. */
595 alpha_const_ok_for_letter_p (HOST_WIDE_INT value
, int c
)
600 /* An unsigned 8 bit constant. */
601 return (unsigned HOST_WIDE_INT
) value
< 0x100;
603 /* The constant zero. */
606 /* A signed 16 bit constant. */
607 return (unsigned HOST_WIDE_INT
) (value
+ 0x8000) < 0x10000;
609 /* A shifted signed 16 bit constant appropriate for LDAH. */
610 return ((value
& 0xffff) == 0
611 && ((value
) >> 31 == -1 || value
>> 31 == 0));
613 /* A constant that can be AND'ed with using a ZAP insn. */
614 return zap_mask (value
);
616 /* A complemented unsigned 8 bit constant. */
617 return (unsigned HOST_WIDE_INT
) (~ value
) < 0x100;
619 /* A negated unsigned 8 bit constant. */
620 return (unsigned HOST_WIDE_INT
) (- value
) < 0x100;
622 /* The constant 1, 2 or 3. */
623 return value
== 1 || value
== 2 || value
== 3;
630 /* Implements CONST_DOUBLE_OK_FOR_LETTER_P. Return true if VALUE
631 matches for C in [GH]. */
634 alpha_const_double_ok_for_letter_p (rtx value
, int c
)
639 /* The floating point zero constant. */
640 return (GET_MODE_CLASS (GET_MODE (value
)) == MODE_FLOAT
641 && value
== CONST0_RTX (GET_MODE (value
)));
644 /* A valid operand of a ZAP insn. */
645 return (GET_MODE (value
) == VOIDmode
646 && zap_mask (CONST_DOUBLE_LOW (value
))
647 && zap_mask (CONST_DOUBLE_HIGH (value
)));
654 /* Implements CONST_DOUBLE_OK_FOR_LETTER_P. Return true if VALUE
658 alpha_extra_constraint (rtx value
, int c
)
663 return normal_memory_operand (value
, VOIDmode
);
665 return direct_call_operand (value
, Pmode
);
667 return (GET_CODE (value
) == CONST_INT
668 && (unsigned HOST_WIDE_INT
) INTVAL (value
) < 64);
670 return GET_CODE (value
) == HIGH
;
672 return TARGET_ABI_UNICOSMK
&& symbolic_operand (value
, VOIDmode
);
674 return (GET_CODE (value
) == CONST_VECTOR
675 && value
== CONST0_RTX (GET_MODE (value
)));
681 /* The scalar modes supported differs from the default check-what-c-supports
682 version in that sometimes TFmode is available even when long double
683 indicates only DFmode. On unicosmk, we have the situation that HImode
684 doesn't map to any C type, but of course we still support that. */
687 alpha_scalar_mode_supported_p (enum machine_mode mode
)
695 case TImode
: /* via optabs.c */
703 return TARGET_HAS_XFLOATING_LIBS
;
710 /* Alpha implements a couple of integer vector mode operations when
711 TARGET_MAX is enabled. We do not check TARGET_MAX here, however,
712 which allows the vectorizer to operate on e.g. move instructions,
713 or when expand_vector_operations can do something useful. */
716 alpha_vector_mode_supported_p (enum machine_mode mode
)
718 return mode
== V8QImode
|| mode
== V4HImode
|| mode
== V2SImode
;
721 /* Return 1 if this function can directly return via $26. */
726 return (! TARGET_ABI_OPEN_VMS
&& ! TARGET_ABI_UNICOSMK
728 && alpha_sa_size () == 0
729 && get_frame_size () == 0
730 && current_function_outgoing_args_size
== 0
731 && current_function_pretend_args_size
== 0);
734 /* Return the ADDR_VEC associated with a tablejump insn. */
737 alpha_tablejump_addr_vec (rtx insn
)
741 tmp
= JUMP_LABEL (insn
);
744 tmp
= NEXT_INSN (tmp
);
747 if (GET_CODE (tmp
) == JUMP_INSN
748 && GET_CODE (PATTERN (tmp
)) == ADDR_DIFF_VEC
)
749 return PATTERN (tmp
);
753 /* Return the label of the predicted edge, or CONST0_RTX if we don't know. */
756 alpha_tablejump_best_label (rtx insn
)
758 rtx jump_table
= alpha_tablejump_addr_vec (insn
);
759 rtx best_label
= NULL_RTX
;
761 /* ??? Once the CFG doesn't keep getting completely rebuilt, look
762 there for edge frequency counts from profile data. */
766 int n_labels
= XVECLEN (jump_table
, 1);
770 for (i
= 0; i
< n_labels
; i
++)
774 for (j
= i
+ 1; j
< n_labels
; j
++)
775 if (XEXP (XVECEXP (jump_table
, 1, i
), 0)
776 == XEXP (XVECEXP (jump_table
, 1, j
), 0))
779 if (count
> best_count
)
780 best_count
= count
, best_label
= XVECEXP (jump_table
, 1, i
);
784 return best_label
? best_label
: const0_rtx
;
787 /* Return the TLS model to use for SYMBOL. */
789 static enum tls_model
790 tls_symbolic_operand_type (rtx symbol
)
792 enum tls_model model
;
794 if (GET_CODE (symbol
) != SYMBOL_REF
)
796 model
= SYMBOL_REF_TLS_MODEL (symbol
);
798 /* Local-exec with a 64-bit size is the same code as initial-exec. */
799 if (model
== TLS_MODEL_LOCAL_EXEC
&& alpha_tls_size
== 64)
800 model
= TLS_MODEL_INITIAL_EXEC
;
805 /* Return true if the function DECL will share the same GP as any
806 function in the current unit of translation. */
809 decl_has_samegp (tree decl
)
811 /* Functions that are not local can be overridden, and thus may
812 not share the same gp. */
813 if (!(*targetm
.binds_local_p
) (decl
))
816 /* If -msmall-data is in effect, assume that there is only one GP
817 for the module, and so any local symbol has this property. We
818 need explicit relocations to be able to enforce this for symbols
819 not defined in this unit of translation, however. */
820 if (TARGET_EXPLICIT_RELOCS
&& TARGET_SMALL_DATA
)
823 /* Functions that are not external are defined in this UoT. */
824 /* ??? Irritatingly, static functions not yet emitted are still
825 marked "external". Apply this to non-static functions only. */
826 return !TREE_PUBLIC (decl
) || !DECL_EXTERNAL (decl
);
829 /* Return true if EXP should be placed in the small data section. */
832 alpha_in_small_data_p (tree exp
)
834 /* We want to merge strings, so we never consider them small data. */
835 if (TREE_CODE (exp
) == STRING_CST
)
838 /* Functions are never in the small data area. Duh. */
839 if (TREE_CODE (exp
) == FUNCTION_DECL
)
842 if (TREE_CODE (exp
) == VAR_DECL
&& DECL_SECTION_NAME (exp
))
844 const char *section
= TREE_STRING_POINTER (DECL_SECTION_NAME (exp
));
845 if (strcmp (section
, ".sdata") == 0
846 || strcmp (section
, ".sbss") == 0)
851 HOST_WIDE_INT size
= int_size_in_bytes (TREE_TYPE (exp
));
853 /* If this is an incomplete type with size 0, then we can't put it
854 in sdata because it might be too big when completed. */
855 if (size
> 0 && (unsigned HOST_WIDE_INT
) size
<= g_switch_value
)
862 #if TARGET_ABI_OPEN_VMS
864 alpha_linkage_symbol_p (const char *symname
)
866 int symlen
= strlen (symname
);
869 return strcmp (&symname
[symlen
- 4], "..lk") == 0;
874 #define LINKAGE_SYMBOL_REF_P(X) \
875 ((GET_CODE (X) == SYMBOL_REF \
876 && alpha_linkage_symbol_p (XSTR (X, 0))) \
877 || (GET_CODE (X) == CONST \
878 && GET_CODE (XEXP (X, 0)) == PLUS \
879 && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \
880 && alpha_linkage_symbol_p (XSTR (XEXP (XEXP (X, 0), 0), 0))))
883 /* legitimate_address_p recognizes an RTL expression that is a valid
884 memory address for an instruction. The MODE argument is the
885 machine mode for the MEM expression that wants to use this address.
887 For Alpha, we have either a constant address or the sum of a
888 register and a constant address, or just a register. For DImode,
889 any of those forms can be surrounded with an AND that clear the
890 low-order three bits; this is an "unaligned" access. */
893 alpha_legitimate_address_p (enum machine_mode mode
, rtx x
, int strict
)
895 /* If this is an ldq_u type address, discard the outer AND. */
897 && GET_CODE (x
) == AND
898 && GET_CODE (XEXP (x
, 1)) == CONST_INT
899 && INTVAL (XEXP (x
, 1)) == -8)
902 /* Discard non-paradoxical subregs. */
903 if (GET_CODE (x
) == SUBREG
904 && (GET_MODE_SIZE (GET_MODE (x
))
905 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
)))))
908 /* Unadorned general registers are valid. */
911 ? STRICT_REG_OK_FOR_BASE_P (x
)
912 : NONSTRICT_REG_OK_FOR_BASE_P (x
)))
915 /* Constant addresses (i.e. +/- 32k) are valid. */
916 if (CONSTANT_ADDRESS_P (x
))
919 #if TARGET_ABI_OPEN_VMS
920 if (LINKAGE_SYMBOL_REF_P (x
))
924 /* Register plus a small constant offset is valid. */
925 if (GET_CODE (x
) == PLUS
)
927 rtx ofs
= XEXP (x
, 1);
930 /* Discard non-paradoxical subregs. */
931 if (GET_CODE (x
) == SUBREG
932 && (GET_MODE_SIZE (GET_MODE (x
))
933 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
)))))
939 && NONSTRICT_REG_OK_FP_BASE_P (x
)
940 && GET_CODE (ofs
) == CONST_INT
)
943 ? STRICT_REG_OK_FOR_BASE_P (x
)
944 : NONSTRICT_REG_OK_FOR_BASE_P (x
))
945 && CONSTANT_ADDRESS_P (ofs
))
950 /* If we're managing explicit relocations, LO_SUM is valid, as
951 are small data symbols. */
952 else if (TARGET_EXPLICIT_RELOCS
)
954 if (small_symbolic_operand (x
, Pmode
))
957 if (GET_CODE (x
) == LO_SUM
)
959 rtx ofs
= XEXP (x
, 1);
962 /* Discard non-paradoxical subregs. */
963 if (GET_CODE (x
) == SUBREG
964 && (GET_MODE_SIZE (GET_MODE (x
))
965 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
)))))
968 /* Must have a valid base register. */
971 ? STRICT_REG_OK_FOR_BASE_P (x
)
972 : NONSTRICT_REG_OK_FOR_BASE_P (x
))))
975 /* The symbol must be local. */
976 if (local_symbolic_operand (ofs
, Pmode
)
977 || dtp32_symbolic_operand (ofs
, Pmode
)
978 || tp32_symbolic_operand (ofs
, Pmode
))
986 /* Build the SYMBOL_REF for __tls_get_addr. */
988 static GTY(()) rtx tls_get_addr_libfunc
;
991 get_tls_get_addr (void)
993 if (!tls_get_addr_libfunc
)
994 tls_get_addr_libfunc
= init_one_libfunc ("__tls_get_addr");
995 return tls_get_addr_libfunc
;
998 /* Try machine-dependent ways of modifying an illegitimate address
999 to be legitimate. If we find one, return the new, valid address. */
1002 alpha_legitimize_address (rtx x
, rtx scratch
,
1003 enum machine_mode mode ATTRIBUTE_UNUSED
)
1005 HOST_WIDE_INT addend
;
1007 /* If the address is (plus reg const_int) and the CONST_INT is not a
1008 valid offset, compute the high part of the constant and add it to
1009 the register. Then our address is (plus temp low-part-const). */
1010 if (GET_CODE (x
) == PLUS
1011 && GET_CODE (XEXP (x
, 0)) == REG
1012 && GET_CODE (XEXP (x
, 1)) == CONST_INT
1013 && ! CONSTANT_ADDRESS_P (XEXP (x
, 1)))
1015 addend
= INTVAL (XEXP (x
, 1));
1020 /* If the address is (const (plus FOO const_int)), find the low-order
1021 part of the CONST_INT. Then load FOO plus any high-order part of the
1022 CONST_INT into a register. Our address is (plus reg low-part-const).
1023 This is done to reduce the number of GOT entries. */
1025 && GET_CODE (x
) == CONST
1026 && GET_CODE (XEXP (x
, 0)) == PLUS
1027 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
)
1029 addend
= INTVAL (XEXP (XEXP (x
, 0), 1));
1030 x
= force_reg (Pmode
, XEXP (XEXP (x
, 0), 0));
1034 /* If we have a (plus reg const), emit the load as in (2), then add
1035 the two registers, and finally generate (plus reg low-part-const) as
1038 && GET_CODE (x
) == PLUS
1039 && GET_CODE (XEXP (x
, 0)) == REG
1040 && GET_CODE (XEXP (x
, 1)) == CONST
1041 && GET_CODE (XEXP (XEXP (x
, 1), 0)) == PLUS
1042 && GET_CODE (XEXP (XEXP (XEXP (x
, 1), 0), 1)) == CONST_INT
)
1044 addend
= INTVAL (XEXP (XEXP (XEXP (x
, 1), 0), 1));
1045 x
= expand_simple_binop (Pmode
, PLUS
, XEXP (x
, 0),
1046 XEXP (XEXP (XEXP (x
, 1), 0), 0),
1047 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
1051 /* If this is a local symbol, split the address into HIGH/LO_SUM parts. */
1052 if (TARGET_EXPLICIT_RELOCS
&& symbolic_operand (x
, Pmode
))
1054 rtx r0
, r16
, eqv
, tga
, tp
, insn
, dest
, seq
;
1056 switch (tls_symbolic_operand_type (x
))
1058 case TLS_MODEL_NONE
:
1061 case TLS_MODEL_GLOBAL_DYNAMIC
:
1064 r0
= gen_rtx_REG (Pmode
, 0);
1065 r16
= gen_rtx_REG (Pmode
, 16);
1066 tga
= get_tls_get_addr ();
1067 dest
= gen_reg_rtx (Pmode
);
1068 seq
= GEN_INT (alpha_next_sequence_number
++);
1070 emit_insn (gen_movdi_er_tlsgd (r16
, pic_offset_table_rtx
, x
, seq
));
1071 insn
= gen_call_value_osf_tlsgd (r0
, tga
, seq
);
1072 insn
= emit_call_insn (insn
);
1073 CONST_OR_PURE_CALL_P (insn
) = 1;
1074 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), r16
);
1076 insn
= get_insns ();
1079 emit_libcall_block (insn
, dest
, r0
, x
);
1082 case TLS_MODEL_LOCAL_DYNAMIC
:
1085 r0
= gen_rtx_REG (Pmode
, 0);
1086 r16
= gen_rtx_REG (Pmode
, 16);
1087 tga
= get_tls_get_addr ();
1088 scratch
= gen_reg_rtx (Pmode
);
1089 seq
= GEN_INT (alpha_next_sequence_number
++);
1091 emit_insn (gen_movdi_er_tlsldm (r16
, pic_offset_table_rtx
, seq
));
1092 insn
= gen_call_value_osf_tlsldm (r0
, tga
, seq
);
1093 insn
= emit_call_insn (insn
);
1094 CONST_OR_PURE_CALL_P (insn
) = 1;
1095 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), r16
);
1097 insn
= get_insns ();
1100 eqv
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, const0_rtx
),
1101 UNSPEC_TLSLDM_CALL
);
1102 emit_libcall_block (insn
, scratch
, r0
, eqv
);
1104 eqv
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, x
), UNSPEC_DTPREL
);
1105 eqv
= gen_rtx_CONST (Pmode
, eqv
);
1107 if (alpha_tls_size
== 64)
1109 dest
= gen_reg_rtx (Pmode
);
1110 emit_insn (gen_rtx_SET (VOIDmode
, dest
, eqv
));
1111 emit_insn (gen_adddi3 (dest
, dest
, scratch
));
1114 if (alpha_tls_size
== 32)
1116 insn
= gen_rtx_HIGH (Pmode
, eqv
);
1117 insn
= gen_rtx_PLUS (Pmode
, scratch
, insn
);
1118 scratch
= gen_reg_rtx (Pmode
);
1119 emit_insn (gen_rtx_SET (VOIDmode
, scratch
, insn
));
1121 return gen_rtx_LO_SUM (Pmode
, scratch
, eqv
);
1123 case TLS_MODEL_INITIAL_EXEC
:
1124 eqv
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, x
), UNSPEC_TPREL
);
1125 eqv
= gen_rtx_CONST (Pmode
, eqv
);
1126 tp
= gen_reg_rtx (Pmode
);
1127 scratch
= gen_reg_rtx (Pmode
);
1128 dest
= gen_reg_rtx (Pmode
);
1130 emit_insn (gen_load_tp (tp
));
1131 emit_insn (gen_rtx_SET (VOIDmode
, scratch
, eqv
));
1132 emit_insn (gen_adddi3 (dest
, tp
, scratch
));
1135 case TLS_MODEL_LOCAL_EXEC
:
1136 eqv
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, x
), UNSPEC_TPREL
);
1137 eqv
= gen_rtx_CONST (Pmode
, eqv
);
1138 tp
= gen_reg_rtx (Pmode
);
1140 emit_insn (gen_load_tp (tp
));
1141 if (alpha_tls_size
== 32)
1143 insn
= gen_rtx_HIGH (Pmode
, eqv
);
1144 insn
= gen_rtx_PLUS (Pmode
, tp
, insn
);
1145 tp
= gen_reg_rtx (Pmode
);
1146 emit_insn (gen_rtx_SET (VOIDmode
, tp
, insn
));
1148 return gen_rtx_LO_SUM (Pmode
, tp
, eqv
);
1154 if (local_symbolic_operand (x
, Pmode
))
1156 if (small_symbolic_operand (x
, Pmode
))
1160 if (!no_new_pseudos
)
1161 scratch
= gen_reg_rtx (Pmode
);
1162 emit_insn (gen_rtx_SET (VOIDmode
, scratch
,
1163 gen_rtx_HIGH (Pmode
, x
)));
1164 return gen_rtx_LO_SUM (Pmode
, scratch
, x
);
1173 HOST_WIDE_INT low
, high
;
1175 low
= ((addend
& 0xffff) ^ 0x8000) - 0x8000;
1177 high
= ((addend
& 0xffffffff) ^ 0x80000000) - 0x80000000;
1181 x
= expand_simple_binop (Pmode
, PLUS
, x
, GEN_INT (addend
),
1182 (no_new_pseudos
? scratch
: NULL_RTX
),
1183 1, OPTAB_LIB_WIDEN
);
1185 x
= expand_simple_binop (Pmode
, PLUS
, x
, GEN_INT (high
),
1186 (no_new_pseudos
? scratch
: NULL_RTX
),
1187 1, OPTAB_LIB_WIDEN
);
1189 return plus_constant (x
, low
);
1193 /* Primarily this is required for TLS symbols, but given that our move
1194 patterns *ought* to be able to handle any symbol at any time, we
1195 should never be spilling symbolic operands to the constant pool, ever. */
1198 alpha_cannot_force_const_mem (rtx x
)
1200 enum rtx_code code
= GET_CODE (x
);
1201 return code
== SYMBOL_REF
|| code
== LABEL_REF
|| code
== CONST
;
1204 /* We do not allow indirect calls to be optimized into sibling calls, nor
1205 can we allow a call to a function with a different GP to be optimized
1209 alpha_function_ok_for_sibcall (tree decl
, tree exp ATTRIBUTE_UNUSED
)
1211 /* Can't do indirect tail calls, since we don't know if the target
1212 uses the same GP. */
1216 /* Otherwise, we can make a tail call if the target function shares
1218 return decl_has_samegp (decl
);
1222 some_small_symbolic_operand_int (rtx
*px
, void *data ATTRIBUTE_UNUSED
)
1226 /* Don't re-split. */
1227 if (GET_CODE (x
) == LO_SUM
)
1230 return small_symbolic_operand (x
, Pmode
) != 0;
1234 split_small_symbolic_operand_1 (rtx
*px
, void *data ATTRIBUTE_UNUSED
)
1238 /* Don't re-split. */
1239 if (GET_CODE (x
) == LO_SUM
)
1242 if (small_symbolic_operand (x
, Pmode
))
1244 x
= gen_rtx_LO_SUM (Pmode
, pic_offset_table_rtx
, x
);
1253 split_small_symbolic_operand (rtx x
)
1256 for_each_rtx (&x
, split_small_symbolic_operand_1
, NULL
);
1260 /* Indicate that INSN cannot be duplicated. This is true for any insn
1261 that we've marked with gpdisp relocs, since those have to stay in
1262 1-1 correspondence with one another.
1264 Technically we could copy them if we could set up a mapping from one
1265 sequence number to another, across the set of insns to be duplicated.
1266 This seems overly complicated and error-prone since interblock motion
1267 from sched-ebb could move one of the pair of insns to a different block.
1269 Also cannot allow jsr insns to be duplicated. If they throw exceptions,
1270 then they'll be in a different block from their ldgp. Which could lead
1271 the bb reorder code to think that it would be ok to copy just the block
1272 containing the call and branch to the block containing the ldgp. */
1275 alpha_cannot_copy_insn_p (rtx insn
)
1277 if (!reload_completed
|| !TARGET_EXPLICIT_RELOCS
)
1279 if (recog_memoized (insn
) >= 0)
1280 return get_attr_cannot_copy (insn
);
1286 /* Try a machine-dependent way of reloading an illegitimate address
1287 operand. If we find one, push the reload and return the new rtx. */
1290 alpha_legitimize_reload_address (rtx x
,
1291 enum machine_mode mode ATTRIBUTE_UNUSED
,
1292 int opnum
, int type
,
1293 int ind_levels ATTRIBUTE_UNUSED
)
1295 /* We must recognize output that we have already generated ourselves. */
1296 if (GET_CODE (x
) == PLUS
1297 && GET_CODE (XEXP (x
, 0)) == PLUS
1298 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
1299 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
1300 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
1302 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
1303 BASE_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
1308 /* We wish to handle large displacements off a base register by
1309 splitting the addend across an ldah and the mem insn. This
1310 cuts number of extra insns needed from 3 to 1. */
1311 if (GET_CODE (x
) == PLUS
1312 && GET_CODE (XEXP (x
, 0)) == REG
1313 && REGNO (XEXP (x
, 0)) < FIRST_PSEUDO_REGISTER
1314 && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x
, 0)))
1315 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
1317 HOST_WIDE_INT val
= INTVAL (XEXP (x
, 1));
1318 HOST_WIDE_INT low
= ((val
& 0xffff) ^ 0x8000) - 0x8000;
1320 = (((val
- low
) & 0xffffffff) ^ 0x80000000) - 0x80000000;
1322 /* Check for 32-bit overflow. */
1323 if (high
+ low
!= val
)
1326 /* Reload the high part into a base reg; leave the low part
1327 in the mem directly. */
1328 x
= gen_rtx_PLUS (GET_MODE (x
),
1329 gen_rtx_PLUS (GET_MODE (x
), XEXP (x
, 0),
1333 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
1334 BASE_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
1342 /* Compute a (partial) cost for rtx X. Return true if the complete
1343 cost has been computed, and false if subexpressions should be
1344 scanned. In either case, *TOTAL contains the cost result. */
1347 alpha_rtx_costs (rtx x
, int code
, int outer_code
, int *total
)
1349 enum machine_mode mode
= GET_MODE (x
);
1350 bool float_mode_p
= FLOAT_MODE_P (mode
);
1351 const struct alpha_rtx_cost_data
*cost_data
;
1354 cost_data
= &alpha_rtx_cost_size
;
1356 cost_data
= &alpha_rtx_cost_data
[alpha_tune
];
1361 /* If this is an 8-bit constant, return zero since it can be used
1362 nearly anywhere with no cost. If it is a valid operand for an
1363 ADD or AND, likewise return 0 if we know it will be used in that
1364 context. Otherwise, return 2 since it might be used there later.
1365 All other constants take at least two insns. */
1366 if (INTVAL (x
) >= 0 && INTVAL (x
) < 256)
1374 if (x
== CONST0_RTX (mode
))
1376 else if ((outer_code
== PLUS
&& add_operand (x
, VOIDmode
))
1377 || (outer_code
== AND
&& and_operand (x
, VOIDmode
)))
1379 else if (add_operand (x
, VOIDmode
) || and_operand (x
, VOIDmode
))
1382 *total
= COSTS_N_INSNS (2);
1388 if (TARGET_EXPLICIT_RELOCS
&& small_symbolic_operand (x
, VOIDmode
))
1389 *total
= COSTS_N_INSNS (outer_code
!= MEM
);
1390 else if (TARGET_EXPLICIT_RELOCS
&& local_symbolic_operand (x
, VOIDmode
))
1391 *total
= COSTS_N_INSNS (1 + (outer_code
!= MEM
));
1392 else if (tls_symbolic_operand_type (x
))
1393 /* Estimate of cost for call_pal rduniq. */
1394 /* ??? How many insns do we emit here? More than one... */
1395 *total
= COSTS_N_INSNS (15);
1397 /* Otherwise we do a load from the GOT. */
1398 *total
= COSTS_N_INSNS (optimize_size
? 1 : alpha_memory_latency
);
1402 /* This is effectively an add_operand. */
1409 *total
= cost_data
->fp_add
;
1410 else if (GET_CODE (XEXP (x
, 0)) == MULT
1411 && const48_operand (XEXP (XEXP (x
, 0), 1), VOIDmode
))
1413 *total
= (rtx_cost (XEXP (XEXP (x
, 0), 0), outer_code
)
1414 + rtx_cost (XEXP (x
, 1), outer_code
) + COSTS_N_INSNS (1));
1421 *total
= cost_data
->fp_mult
;
1422 else if (mode
== DImode
)
1423 *total
= cost_data
->int_mult_di
;
1425 *total
= cost_data
->int_mult_si
;
1429 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
1430 && INTVAL (XEXP (x
, 1)) <= 3)
1432 *total
= COSTS_N_INSNS (1);
1439 *total
= cost_data
->int_shift
;
1444 *total
= cost_data
->fp_add
;
1446 *total
= cost_data
->int_cmov
;
1454 *total
= cost_data
->int_div
;
1455 else if (mode
== SFmode
)
1456 *total
= cost_data
->fp_div_sf
;
1458 *total
= cost_data
->fp_div_df
;
1462 *total
= COSTS_N_INSNS (optimize_size
? 1 : alpha_memory_latency
);
1468 *total
= COSTS_N_INSNS (1);
1476 *total
= COSTS_N_INSNS (1) + cost_data
->int_cmov
;
1482 case UNSIGNED_FLOAT
:
1485 case FLOAT_TRUNCATE
:
1486 *total
= cost_data
->fp_add
;
1490 if (GET_CODE (XEXP (x
, 0)) == MEM
)
1493 *total
= cost_data
->fp_add
;
1501 /* REF is an alignable memory location. Place an aligned SImode
1502 reference into *PALIGNED_MEM and the number of bits to shift into
1503 *PBITNUM. SCRATCH is a free register for use in reloading out
1504 of range stack slots. */
1507 get_aligned_mem (rtx ref
, rtx
*paligned_mem
, rtx
*pbitnum
)
1510 HOST_WIDE_INT disp
, offset
;
1512 gcc_assert (GET_CODE (ref
) == MEM
);
1514 if (reload_in_progress
1515 && ! memory_address_p (GET_MODE (ref
), XEXP (ref
, 0)))
1517 base
= find_replacement (&XEXP (ref
, 0));
1518 gcc_assert (memory_address_p (GET_MODE (ref
), base
));
1521 base
= XEXP (ref
, 0);
1523 if (GET_CODE (base
) == PLUS
)
1524 disp
= INTVAL (XEXP (base
, 1)), base
= XEXP (base
, 0);
1528 /* Find the byte offset within an aligned word. If the memory itself is
1529 claimed to be aligned, believe it. Otherwise, aligned_memory_operand
1530 will have examined the base register and determined it is aligned, and
1531 thus displacements from it are naturally alignable. */
1532 if (MEM_ALIGN (ref
) >= 32)
1537 /* Access the entire aligned word. */
1538 *paligned_mem
= widen_memory_access (ref
, SImode
, -offset
);
1540 /* Convert the byte offset within the word to a bit offset. */
1541 if (WORDS_BIG_ENDIAN
)
1542 offset
= 32 - (GET_MODE_BITSIZE (GET_MODE (ref
)) + offset
* 8);
1545 *pbitnum
= GEN_INT (offset
);
1548 /* Similar, but just get the address. Handle the two reload cases.
1549 Add EXTRA_OFFSET to the address we return. */
1552 get_unaligned_address (rtx ref
, int extra_offset
)
1555 HOST_WIDE_INT offset
= 0;
1557 gcc_assert (GET_CODE (ref
) == MEM
);
1559 if (reload_in_progress
1560 && ! memory_address_p (GET_MODE (ref
), XEXP (ref
, 0)))
1562 base
= find_replacement (&XEXP (ref
, 0));
1564 gcc_assert (memory_address_p (GET_MODE (ref
), base
));
1567 base
= XEXP (ref
, 0);
1569 if (GET_CODE (base
) == PLUS
)
1570 offset
+= INTVAL (XEXP (base
, 1)), base
= XEXP (base
, 0);
1572 return plus_constant (base
, offset
+ extra_offset
);
1575 /* On the Alpha, all (non-symbolic) constants except zero go into
1576 a floating-point register via memory. Note that we cannot
1577 return anything that is not a subset of CLASS, and that some
1578 symbolic constants cannot be dropped to memory. */
1581 alpha_preferred_reload_class(rtx x
, enum reg_class
class)
1583 /* Zero is present in any register class. */
1584 if (x
== CONST0_RTX (GET_MODE (x
)))
1587 /* These sorts of constants we can easily drop to memory. */
1588 if (GET_CODE (x
) == CONST_INT
1589 || GET_CODE (x
) == CONST_DOUBLE
1590 || GET_CODE (x
) == CONST_VECTOR
)
1592 if (class == FLOAT_REGS
)
1594 if (class == ALL_REGS
)
1595 return GENERAL_REGS
;
1599 /* All other kinds of constants should not (and in the case of HIGH
1600 cannot) be dropped to memory -- instead we use a GENERAL_REGS
1601 secondary reload. */
1603 return (class == ALL_REGS
? GENERAL_REGS
: class);
1608 /* Loading and storing HImode or QImode values to and from memory
1609 usually requires a scratch register. The exceptions are loading
1610 QImode and HImode from an aligned address to a general register
1611 unless byte instructions are permitted.
1613 We also cannot load an unaligned address or a paradoxical SUBREG
1614 into an FP register.
1616 We also cannot do integral arithmetic into FP regs, as might result
1617 from register elimination into a DImode fp register. */
1620 alpha_secondary_reload_class (enum reg_class
class, enum machine_mode mode
,
1623 if ((mode
== QImode
|| mode
== HImode
) && ! TARGET_BWX
)
1625 if (GET_CODE (x
) == MEM
1626 || (GET_CODE (x
) == REG
&& REGNO (x
) >= FIRST_PSEUDO_REGISTER
)
1627 || (GET_CODE (x
) == SUBREG
1628 && (GET_CODE (SUBREG_REG (x
)) == MEM
1629 || (GET_CODE (SUBREG_REG (x
)) == REG
1630 && REGNO (SUBREG_REG (x
)) >= FIRST_PSEUDO_REGISTER
))))
1632 if (!in
|| !aligned_memory_operand(x
, mode
))
1633 return GENERAL_REGS
;
1637 if (class == FLOAT_REGS
)
1639 if (GET_CODE (x
) == MEM
&& GET_CODE (XEXP (x
, 0)) == AND
)
1640 return GENERAL_REGS
;
1642 if (GET_CODE (x
) == SUBREG
1643 && (GET_MODE_SIZE (GET_MODE (x
))
1644 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
)))))
1645 return GENERAL_REGS
;
1647 if (in
&& INTEGRAL_MODE_P (mode
)
1648 && ! (memory_operand (x
, mode
) || x
== const0_rtx
))
1649 return GENERAL_REGS
;
1655 /* Subfunction of the following function. Update the flags of any MEM
1656 found in part of X. */
1659 alpha_set_memflags_1 (rtx
*xp
, void *data
)
1661 rtx x
= *xp
, orig
= (rtx
) data
;
1663 if (GET_CODE (x
) != MEM
)
1666 MEM_VOLATILE_P (x
) = MEM_VOLATILE_P (orig
);
1667 MEM_IN_STRUCT_P (x
) = MEM_IN_STRUCT_P (orig
);
1668 MEM_SCALAR_P (x
) = MEM_SCALAR_P (orig
);
1669 MEM_NOTRAP_P (x
) = MEM_NOTRAP_P (orig
);
1670 MEM_READONLY_P (x
) = MEM_READONLY_P (orig
);
1672 /* Sadly, we cannot use alias sets because the extra aliasing
1673 produced by the AND interferes. Given that two-byte quantities
1674 are the only thing we would be able to differentiate anyway,
1675 there does not seem to be any point in convoluting the early
1676 out of the alias check. */
1681 /* Given INSN, which is an INSN list or the PATTERN of a single insn
1682 generated to perform a memory operation, look for any MEMs in either
1683 a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and
1684 volatile flags from REF into each of the MEMs found. If REF is not
1685 a MEM, don't do anything. */
1688 alpha_set_memflags (rtx insn
, rtx ref
)
1692 if (GET_CODE (ref
) != MEM
)
1695 /* This is only called from alpha.md, after having had something
1696 generated from one of the insn patterns. So if everything is
1697 zero, the pattern is already up-to-date. */
1698 if (!MEM_VOLATILE_P (ref
)
1699 && !MEM_IN_STRUCT_P (ref
)
1700 && !MEM_SCALAR_P (ref
)
1701 && !MEM_NOTRAP_P (ref
)
1702 && !MEM_READONLY_P (ref
))
1706 base_ptr
= &PATTERN (insn
);
1709 for_each_rtx (base_ptr
, alpha_set_memflags_1
, (void *) ref
);
1712 static rtx
alpha_emit_set_const (rtx
, enum machine_mode
, HOST_WIDE_INT
,
1715 /* Internal routine for alpha_emit_set_const to check for N or below insns.
1716 If NO_OUTPUT is true, then we only check to see if N insns are possible,
1717 and return pc_rtx if successful. */
1720 alpha_emit_set_const_1 (rtx target
, enum machine_mode mode
,
1721 HOST_WIDE_INT c
, int n
, bool no_output
)
1725 /* Use a pseudo if highly optimizing and still generating RTL. */
1727 = (flag_expensive_optimizations
&& !no_new_pseudos
? 0 : target
);
1730 /* If this is a sign-extended 32-bit constant, we can do this in at most
1731 three insns, so do it if we have enough insns left. We always have
1732 a sign-extended 32-bit constant when compiling on a narrow machine. */
1734 if (HOST_BITS_PER_WIDE_INT
!= 64
1735 || c
>> 31 == -1 || c
>> 31 == 0)
1737 HOST_WIDE_INT low
= ((c
& 0xffff) ^ 0x8000) - 0x8000;
1738 HOST_WIDE_INT tmp1
= c
- low
;
1739 HOST_WIDE_INT high
= (((tmp1
>> 16) & 0xffff) ^ 0x8000) - 0x8000;
1740 HOST_WIDE_INT extra
= 0;
1742 /* If HIGH will be interpreted as negative but the constant is
1743 positive, we must adjust it to do two ldha insns. */
1745 if ((high
& 0x8000) != 0 && c
>= 0)
1749 high
= ((tmp1
>> 16) & 0xffff) - 2 * ((tmp1
>> 16) & 0x8000);
1752 if (c
== low
|| (low
== 0 && extra
== 0))
1754 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
1755 but that meant that we can't handle INT_MIN on 32-bit machines
1756 (like NT/Alpha), because we recurse indefinitely through
1757 emit_move_insn to gen_movdi. So instead, since we know exactly
1758 what we want, create it explicitly. */
1763 target
= gen_reg_rtx (mode
);
1764 emit_insn (gen_rtx_SET (VOIDmode
, target
, GEN_INT (c
)));
1767 else if (n
>= 2 + (extra
!= 0))
1773 emit_insn (gen_rtx_SET (VOIDmode
, target
, GEN_INT (high
<< 16)));
1777 temp
= copy_to_suggested_reg (GEN_INT (high
<< 16),
1780 /* As of 2002-02-23, addsi3 is only available when not optimizing.
1781 This means that if we go through expand_binop, we'll try to
1782 generate extensions, etc, which will require new pseudos, which
1783 will fail during some split phases. The SImode add patterns
1784 still exist, but are not named. So build the insns by hand. */
1789 subtarget
= gen_reg_rtx (mode
);
1790 insn
= gen_rtx_PLUS (mode
, temp
, GEN_INT (extra
<< 16));
1791 insn
= gen_rtx_SET (VOIDmode
, subtarget
, insn
);
1797 target
= gen_reg_rtx (mode
);
1798 insn
= gen_rtx_PLUS (mode
, temp
, GEN_INT (low
));
1799 insn
= gen_rtx_SET (VOIDmode
, target
, insn
);
1805 /* If we couldn't do it that way, try some other methods. But if we have
1806 no instructions left, don't bother. Likewise, if this is SImode and
1807 we can't make pseudos, we can't do anything since the expand_binop
1808 and expand_unop calls will widen and try to make pseudos. */
1810 if (n
== 1 || (mode
== SImode
&& no_new_pseudos
))
1813 /* Next, see if we can load a related constant and then shift and possibly
1814 negate it to get the constant we want. Try this once each increasing
1815 numbers of insns. */
1817 for (i
= 1; i
< n
; i
++)
1819 /* First, see if minus some low bits, we've an easy load of
1822 new = ((c
& 0xffff) ^ 0x8000) - 0x8000;
1825 temp
= alpha_emit_set_const (subtarget
, mode
, c
- new, i
, no_output
);
1830 return expand_binop (mode
, add_optab
, temp
, GEN_INT (new),
1831 target
, 0, OPTAB_WIDEN
);
1835 /* Next try complementing. */
1836 temp
= alpha_emit_set_const (subtarget
, mode
, ~c
, i
, no_output
);
1841 return expand_unop (mode
, one_cmpl_optab
, temp
, target
, 0);
1844 /* Next try to form a constant and do a left shift. We can do this
1845 if some low-order bits are zero; the exact_log2 call below tells
1846 us that information. The bits we are shifting out could be any
1847 value, but here we'll just try the 0- and sign-extended forms of
1848 the constant. To try to increase the chance of having the same
1849 constant in more than one insn, start at the highest number of
1850 bits to shift, but try all possibilities in case a ZAPNOT will
1853 bits
= exact_log2 (c
& -c
);
1855 for (; bits
> 0; bits
--)
1858 temp
= alpha_emit_set_const (subtarget
, mode
, new, i
, no_output
);
1861 new = (unsigned HOST_WIDE_INT
)c
>> bits
;
1862 temp
= alpha_emit_set_const (subtarget
, mode
, new,
1869 return expand_binop (mode
, ashl_optab
, temp
, GEN_INT (bits
),
1870 target
, 0, OPTAB_WIDEN
);
1874 /* Now try high-order zero bits. Here we try the shifted-in bits as
1875 all zero and all ones. Be careful to avoid shifting outside the
1876 mode and to avoid shifting outside the host wide int size. */
1877 /* On narrow hosts, don't shift a 1 into the high bit, since we'll
1878 confuse the recursive call and set all of the high 32 bits. */
1880 bits
= (MIN (HOST_BITS_PER_WIDE_INT
, GET_MODE_SIZE (mode
) * 8)
1881 - floor_log2 (c
) - 1 - (HOST_BITS_PER_WIDE_INT
< 64));
1883 for (; bits
> 0; bits
--)
1886 temp
= alpha_emit_set_const (subtarget
, mode
, new, i
, no_output
);
1889 new = (c
<< bits
) | (((HOST_WIDE_INT
) 1 << bits
) - 1);
1890 temp
= alpha_emit_set_const (subtarget
, mode
, new,
1897 return expand_binop (mode
, lshr_optab
, temp
, GEN_INT (bits
),
1898 target
, 1, OPTAB_WIDEN
);
1902 /* Now try high-order 1 bits. We get that with a sign-extension.
1903 But one bit isn't enough here. Be careful to avoid shifting outside
1904 the mode and to avoid shifting outside the host wide int size. */
1906 bits
= (MIN (HOST_BITS_PER_WIDE_INT
, GET_MODE_SIZE (mode
) * 8)
1907 - floor_log2 (~ c
) - 2);
1909 for (; bits
> 0; bits
--)
1912 temp
= alpha_emit_set_const (subtarget
, mode
, new, i
, no_output
);
1915 new = (c
<< bits
) | (((HOST_WIDE_INT
) 1 << bits
) - 1);
1916 temp
= alpha_emit_set_const (subtarget
, mode
, new,
1923 return expand_binop (mode
, ashr_optab
, temp
, GEN_INT (bits
),
1924 target
, 0, OPTAB_WIDEN
);
1929 #if HOST_BITS_PER_WIDE_INT == 64
1930 /* Finally, see if can load a value into the target that is the same as the
1931 constant except that all bytes that are 0 are changed to be 0xff. If we
1932 can, then we can do a ZAPNOT to obtain the desired constant. */
1935 for (i
= 0; i
< 64; i
+= 8)
1936 if ((new & ((HOST_WIDE_INT
) 0xff << i
)) == 0)
1937 new |= (HOST_WIDE_INT
) 0xff << i
;
1939 /* We are only called for SImode and DImode. If this is SImode, ensure that
1940 we are sign extended to a full word. */
1943 new = ((new & 0xffffffff) ^ 0x80000000) - 0x80000000;
1947 temp
= alpha_emit_set_const (subtarget
, mode
, new, n
- 1, no_output
);
1952 return expand_binop (mode
, and_optab
, temp
, GEN_INT (c
| ~ new),
1953 target
, 0, OPTAB_WIDEN
);
1961 /* Try to output insns to set TARGET equal to the constant C if it can be
1962 done in less than N insns. Do all computations in MODE. Returns the place
1963 where the output has been placed if it can be done and the insns have been
1964 emitted. If it would take more than N insns, zero is returned and no
1965 insns and emitted. */
1968 alpha_emit_set_const (rtx target
, enum machine_mode mode
,
1969 HOST_WIDE_INT c
, int n
, bool no_output
)
1971 enum machine_mode orig_mode
= mode
;
1972 rtx orig_target
= target
;
1976 /* If we can't make any pseudos, TARGET is an SImode hard register, we
1977 can't load this constant in one insn, do this in DImode. */
1978 if (no_new_pseudos
&& mode
== SImode
1979 && GET_CODE (target
) == REG
&& REGNO (target
) < FIRST_PSEUDO_REGISTER
)
1981 result
= alpha_emit_set_const_1 (target
, mode
, c
, 1, no_output
);
1985 target
= no_output
? NULL
: gen_lowpart (DImode
, target
);
1988 else if (mode
== V8QImode
|| mode
== V4HImode
|| mode
== V2SImode
)
1990 target
= no_output
? NULL
: gen_lowpart (DImode
, target
);
1994 /* Try 1 insn, then 2, then up to N. */
1995 for (i
= 1; i
<= n
; i
++)
1997 result
= alpha_emit_set_const_1 (target
, mode
, c
, i
, no_output
);
2005 insn
= get_last_insn ();
2006 set
= single_set (insn
);
2007 if (! CONSTANT_P (SET_SRC (set
)))
2008 set_unique_reg_note (get_last_insn (), REG_EQUAL
, GEN_INT (c
));
2013 /* Allow for the case where we changed the mode of TARGET. */
2016 if (result
== target
)
2017 result
= orig_target
;
2018 else if (mode
!= orig_mode
)
2019 result
= gen_lowpart (orig_mode
, result
);
2025 /* Having failed to find a 3 insn sequence in alpha_emit_set_const,
2026 fall back to a straight forward decomposition. We do this to avoid
2027 exponential run times encountered when looking for longer sequences
2028 with alpha_emit_set_const. */
2031 alpha_emit_set_long_const (rtx target
, HOST_WIDE_INT c1
, HOST_WIDE_INT c2
)
2033 HOST_WIDE_INT d1
, d2
, d3
, d4
;
2035 /* Decompose the entire word */
2036 #if HOST_BITS_PER_WIDE_INT >= 64
2037 gcc_assert (c2
== -(c1
< 0));
2038 d1
= ((c1
& 0xffff) ^ 0x8000) - 0x8000;
2040 d2
= ((c1
& 0xffffffff) ^ 0x80000000) - 0x80000000;
2041 c1
= (c1
- d2
) >> 32;
2042 d3
= ((c1
& 0xffff) ^ 0x8000) - 0x8000;
2044 d4
= ((c1
& 0xffffffff) ^ 0x80000000) - 0x80000000;
2045 gcc_assert (c1
== d4
);
2047 d1
= ((c1
& 0xffff) ^ 0x8000) - 0x8000;
2049 d2
= ((c1
& 0xffffffff) ^ 0x80000000) - 0x80000000;
2050 gcc_assert (c1
== d2
);
2052 d3
= ((c2
& 0xffff) ^ 0x8000) - 0x8000;
2054 d4
= ((c2
& 0xffffffff) ^ 0x80000000) - 0x80000000;
2055 gcc_assert (c2
== d4
);
2058 /* Construct the high word */
2061 emit_move_insn (target
, GEN_INT (d4
));
2063 emit_move_insn (target
, gen_rtx_PLUS (DImode
, target
, GEN_INT (d3
)));
2066 emit_move_insn (target
, GEN_INT (d3
));
2068 /* Shift it into place */
2069 emit_move_insn (target
, gen_rtx_ASHIFT (DImode
, target
, GEN_INT (32)));
2071 /* Add in the low bits. */
2073 emit_move_insn (target
, gen_rtx_PLUS (DImode
, target
, GEN_INT (d2
)));
2075 emit_move_insn (target
, gen_rtx_PLUS (DImode
, target
, GEN_INT (d1
)));
2080 /* Given an integral CONST_INT, CONST_DOUBLE, or CONST_VECTOR, return
2084 alpha_extract_integer (rtx x
, HOST_WIDE_INT
*p0
, HOST_WIDE_INT
*p1
)
2086 HOST_WIDE_INT i0
, i1
;
2088 if (GET_CODE (x
) == CONST_VECTOR
)
2089 x
= simplify_subreg (DImode
, x
, GET_MODE (x
), 0);
2092 if (GET_CODE (x
) == CONST_INT
)
2097 else if (HOST_BITS_PER_WIDE_INT
>= 64)
2099 i0
= CONST_DOUBLE_LOW (x
);
2104 i0
= CONST_DOUBLE_LOW (x
);
2105 i1
= CONST_DOUBLE_HIGH (x
);
2112 /* Implement LEGITIMATE_CONSTANT_P. This is all constants for which we
2113 are willing to load the value into a register via a move pattern.
2114 Normally this is all symbolic constants, integral constants that
2115 take three or fewer instructions, and floating-point zero. */
2118 alpha_legitimate_constant_p (rtx x
)
2120 enum machine_mode mode
= GET_MODE (x
);
2121 HOST_WIDE_INT i0
, i1
;
2123 switch (GET_CODE (x
))
2132 if (x
== CONST0_RTX (mode
))
2134 if (FLOAT_MODE_P (mode
))
2139 if (x
== CONST0_RTX (mode
))
2141 if (GET_MODE_CLASS (mode
) != MODE_VECTOR_INT
)
2143 if (GET_MODE_SIZE (mode
) != 8)
2149 if (TARGET_BUILD_CONSTANTS
)
2151 alpha_extract_integer (x
, &i0
, &i1
);
2152 if (HOST_BITS_PER_WIDE_INT
>= 64 || i1
== (-i0
< 0))
2153 return alpha_emit_set_const_1 (x
, mode
, i0
, 3, true) != NULL
;
2161 /* Operand 1 is known to be a constant, and should require more than one
2162 instruction to load. Emit that multi-part load. */
2165 alpha_split_const_mov (enum machine_mode mode
, rtx
*operands
)
2167 HOST_WIDE_INT i0
, i1
;
2168 rtx temp
= NULL_RTX
;
2170 alpha_extract_integer (operands
[1], &i0
, &i1
);
2172 if (HOST_BITS_PER_WIDE_INT
>= 64 || i1
== -(i0
< 0))
2173 temp
= alpha_emit_set_const (operands
[0], mode
, i0
, 3, false);
2175 if (!temp
&& TARGET_BUILD_CONSTANTS
)
2176 temp
= alpha_emit_set_long_const (operands
[0], i0
, i1
);
2180 if (!rtx_equal_p (operands
[0], temp
))
2181 emit_move_insn (operands
[0], temp
);
2188 /* Expand a move instruction; return true if all work is done.
2189 We don't handle non-bwx subword loads here. */
2192 alpha_expand_mov (enum machine_mode mode
, rtx
*operands
)
2194 /* If the output is not a register, the input must be. */
2195 if (GET_CODE (operands
[0]) == MEM
2196 && ! reg_or_0_operand (operands
[1], mode
))
2197 operands
[1] = force_reg (mode
, operands
[1]);
2199 /* Allow legitimize_address to perform some simplifications. */
2200 if (mode
== Pmode
&& symbolic_operand (operands
[1], mode
))
2204 tmp
= alpha_legitimize_address (operands
[1], operands
[0], mode
);
2207 if (tmp
== operands
[0])
2214 /* Early out for non-constants and valid constants. */
2215 if (! CONSTANT_P (operands
[1]) || input_operand (operands
[1], mode
))
2218 /* Split large integers. */
2219 if (GET_CODE (operands
[1]) == CONST_INT
2220 || GET_CODE (operands
[1]) == CONST_DOUBLE
2221 || GET_CODE (operands
[1]) == CONST_VECTOR
)
2223 if (alpha_split_const_mov (mode
, operands
))
2227 /* Otherwise we've nothing left but to drop the thing to memory. */
2228 operands
[1] = force_const_mem (mode
, operands
[1]);
2229 if (reload_in_progress
)
2231 emit_move_insn (operands
[0], XEXP (operands
[1], 0));
2232 operands
[1] = copy_rtx (operands
[1]);
2233 XEXP (operands
[1], 0) = operands
[0];
2236 operands
[1] = validize_mem (operands
[1]);
2240 /* Expand a non-bwx QImode or HImode move instruction;
2241 return true if all work is done. */
2244 alpha_expand_mov_nobwx (enum machine_mode mode
, rtx
*operands
)
2246 /* If the output is not a register, the input must be. */
2247 if (GET_CODE (operands
[0]) == MEM
)
2248 operands
[1] = force_reg (mode
, operands
[1]);
2250 /* Handle four memory cases, unaligned and aligned for either the input
2251 or the output. The only case where we can be called during reload is
2252 for aligned loads; all other cases require temporaries. */
2254 if (GET_CODE (operands
[1]) == MEM
2255 || (GET_CODE (operands
[1]) == SUBREG
2256 && GET_CODE (SUBREG_REG (operands
[1])) == MEM
)
2257 || (reload_in_progress
&& GET_CODE (operands
[1]) == REG
2258 && REGNO (operands
[1]) >= FIRST_PSEUDO_REGISTER
)
2259 || (reload_in_progress
&& GET_CODE (operands
[1]) == SUBREG
2260 && GET_CODE (SUBREG_REG (operands
[1])) == REG
2261 && REGNO (SUBREG_REG (operands
[1])) >= FIRST_PSEUDO_REGISTER
))
2263 if (aligned_memory_operand (operands
[1], mode
))
2265 if (reload_in_progress
)
2267 emit_insn ((mode
== QImode
2268 ? gen_reload_inqi_help
2269 : gen_reload_inhi_help
)
2270 (operands
[0], operands
[1],
2271 gen_rtx_REG (SImode
, REGNO (operands
[0]))));
2275 rtx aligned_mem
, bitnum
;
2276 rtx scratch
= gen_reg_rtx (SImode
);
2280 get_aligned_mem (operands
[1], &aligned_mem
, &bitnum
);
2282 subtarget
= operands
[0];
2283 if (GET_CODE (subtarget
) == REG
)
2284 subtarget
= gen_lowpart (DImode
, subtarget
), copyout
= false;
2286 subtarget
= gen_reg_rtx (DImode
), copyout
= true;
2288 emit_insn ((mode
== QImode
2289 ? gen_aligned_loadqi
2290 : gen_aligned_loadhi
)
2291 (subtarget
, aligned_mem
, bitnum
, scratch
));
2294 emit_move_insn (operands
[0], gen_lowpart (mode
, subtarget
));
2299 /* Don't pass these as parameters since that makes the generated
2300 code depend on parameter evaluation order which will cause
2301 bootstrap failures. */
2303 rtx temp1
, temp2
, seq
, subtarget
;
2306 temp1
= gen_reg_rtx (DImode
);
2307 temp2
= gen_reg_rtx (DImode
);
2309 subtarget
= operands
[0];
2310 if (GET_CODE (subtarget
) == REG
)
2311 subtarget
= gen_lowpart (DImode
, subtarget
), copyout
= false;
2313 subtarget
= gen_reg_rtx (DImode
), copyout
= true;
2315 seq
= ((mode
== QImode
2316 ? gen_unaligned_loadqi
2317 : gen_unaligned_loadhi
)
2318 (subtarget
, get_unaligned_address (operands
[1], 0),
2320 alpha_set_memflags (seq
, operands
[1]);
2324 emit_move_insn (operands
[0], gen_lowpart (mode
, subtarget
));
2329 if (GET_CODE (operands
[0]) == MEM
2330 || (GET_CODE (operands
[0]) == SUBREG
2331 && GET_CODE (SUBREG_REG (operands
[0])) == MEM
)
2332 || (reload_in_progress
&& GET_CODE (operands
[0]) == REG
2333 && REGNO (operands
[0]) >= FIRST_PSEUDO_REGISTER
)
2334 || (reload_in_progress
&& GET_CODE (operands
[0]) == SUBREG
2335 && GET_CODE (SUBREG_REG (operands
[0])) == REG
2336 && REGNO (operands
[0]) >= FIRST_PSEUDO_REGISTER
))
2338 if (aligned_memory_operand (operands
[0], mode
))
2340 rtx aligned_mem
, bitnum
;
2341 rtx temp1
= gen_reg_rtx (SImode
);
2342 rtx temp2
= gen_reg_rtx (SImode
);
2344 get_aligned_mem (operands
[0], &aligned_mem
, &bitnum
);
2346 emit_insn (gen_aligned_store (aligned_mem
, operands
[1], bitnum
,
2351 rtx temp1
= gen_reg_rtx (DImode
);
2352 rtx temp2
= gen_reg_rtx (DImode
);
2353 rtx temp3
= gen_reg_rtx (DImode
);
2354 rtx seq
= ((mode
== QImode
2355 ? gen_unaligned_storeqi
2356 : gen_unaligned_storehi
)
2357 (get_unaligned_address (operands
[0], 0),
2358 operands
[1], temp1
, temp2
, temp3
));
2360 alpha_set_memflags (seq
, operands
[0]);
2369 /* Implement the movmisalign patterns. One of the operands is a memory
2370 that is not naturally aligned. Emit instructions to load it. */
2373 alpha_expand_movmisalign (enum machine_mode mode
, rtx
*operands
)
2375 /* Honor misaligned loads, for those we promised to do so. */
2376 if (MEM_P (operands
[1]))
2380 if (register_operand (operands
[0], mode
))
2383 tmp
= gen_reg_rtx (mode
);
2385 alpha_expand_unaligned_load (tmp
, operands
[1], 8, 0, 0);
2386 if (tmp
!= operands
[0])
2387 emit_move_insn (operands
[0], tmp
);
2389 else if (MEM_P (operands
[0]))
2391 if (!reg_or_0_operand (operands
[1], mode
))
2392 operands
[1] = force_reg (mode
, operands
[1]);
2393 alpha_expand_unaligned_store (operands
[0], operands
[1], 8, 0);
2399 /* Generate an unsigned DImode to FP conversion. This is the same code
2400 optabs would emit if we didn't have TFmode patterns.
2402 For SFmode, this is the only construction I've found that can pass
2403 gcc.c-torture/execute/ieee/rbug.c. No scenario that uses DFmode
2404 intermediates will work, because you'll get intermediate rounding
2405 that ruins the end result. Some of this could be fixed by turning
2406 on round-to-positive-infinity, but that requires diddling the fpsr,
2407 which kills performance. I tried turning this around and converting
2408 to a negative number, so that I could turn on /m, but either I did
2409 it wrong or there's something else cause I wound up with the exact
2410 same single-bit error. There is a branch-less form of this same code:
2421 fcmoveq $f10,$f11,$f0
2423 I'm not using it because it's the same number of instructions as
2424 this branch-full form, and it has more serialized long latency
2425 instructions on the critical path.
2427 For DFmode, we can avoid rounding errors by breaking up the word
2428 into two pieces, converting them separately, and adding them back:
2430 LC0: .long 0,0x5f800000
2435 cpyse $f11,$f31,$f10
2436 cpyse $f31,$f11,$f11
2444 This doesn't seem to be a clear-cut win over the optabs form.
2445 It probably all depends on the distribution of numbers being
2446 converted -- in the optabs form, all but high-bit-set has a
2447 much lower minimum execution time. */
2450 alpha_emit_floatuns (rtx operands
[2])
2452 rtx neglab
, donelab
, i0
, i1
, f0
, in
, out
;
2453 enum machine_mode mode
;
2456 in
= force_reg (DImode
, operands
[1]);
2457 mode
= GET_MODE (out
);
2458 neglab
= gen_label_rtx ();
2459 donelab
= gen_label_rtx ();
2460 i0
= gen_reg_rtx (DImode
);
2461 i1
= gen_reg_rtx (DImode
);
2462 f0
= gen_reg_rtx (mode
);
2464 emit_cmp_and_jump_insns (in
, const0_rtx
, LT
, const0_rtx
, DImode
, 0, neglab
);
2466 emit_insn (gen_rtx_SET (VOIDmode
, out
, gen_rtx_FLOAT (mode
, in
)));
2467 emit_jump_insn (gen_jump (donelab
));
2470 emit_label (neglab
);
2472 emit_insn (gen_lshrdi3 (i0
, in
, const1_rtx
));
2473 emit_insn (gen_anddi3 (i1
, in
, const1_rtx
));
2474 emit_insn (gen_iordi3 (i0
, i0
, i1
));
2475 emit_insn (gen_rtx_SET (VOIDmode
, f0
, gen_rtx_FLOAT (mode
, i0
)));
2476 emit_insn (gen_rtx_SET (VOIDmode
, out
, gen_rtx_PLUS (mode
, f0
, f0
)));
2478 emit_label (donelab
);
2481 /* Generate the comparison for a conditional branch. */
2484 alpha_emit_conditional_branch (enum rtx_code code
)
2486 enum rtx_code cmp_code
, branch_code
;
2487 enum machine_mode cmp_mode
, branch_mode
= VOIDmode
;
2488 rtx op0
= alpha_compare
.op0
, op1
= alpha_compare
.op1
;
2491 if (alpha_compare
.fp_p
&& GET_MODE (op0
) == TFmode
)
2493 op0
= alpha_emit_xfloating_compare (&code
, op0
, op1
);
2495 alpha_compare
.fp_p
= 0;
2498 /* The general case: fold the comparison code to the types of compares
2499 that we have, choosing the branch as necessary. */
2502 case EQ
: case LE
: case LT
: case LEU
: case LTU
:
2504 /* We have these compares: */
2505 cmp_code
= code
, branch_code
= NE
;
2510 /* These must be reversed. */
2511 cmp_code
= reverse_condition (code
), branch_code
= EQ
;
2514 case GE
: case GT
: case GEU
: case GTU
:
2515 /* For FP, we swap them, for INT, we reverse them. */
2516 if (alpha_compare
.fp_p
)
2518 cmp_code
= swap_condition (code
);
2520 tem
= op0
, op0
= op1
, op1
= tem
;
2524 cmp_code
= reverse_condition (code
);
2533 if (alpha_compare
.fp_p
)
2536 if (flag_unsafe_math_optimizations
)
2538 /* When we are not as concerned about non-finite values, and we
2539 are comparing against zero, we can branch directly. */
2540 if (op1
== CONST0_RTX (DFmode
))
2541 cmp_code
= UNKNOWN
, branch_code
= code
;
2542 else if (op0
== CONST0_RTX (DFmode
))
2544 /* Undo the swap we probably did just above. */
2545 tem
= op0
, op0
= op1
, op1
= tem
;
2546 branch_code
= swap_condition (cmp_code
);
2552 /* ??? We mark the branch mode to be CCmode to prevent the
2553 compare and branch from being combined, since the compare
2554 insn follows IEEE rules that the branch does not. */
2555 branch_mode
= CCmode
;
2562 /* The following optimizations are only for signed compares. */
2563 if (code
!= LEU
&& code
!= LTU
&& code
!= GEU
&& code
!= GTU
)
2565 /* Whee. Compare and branch against 0 directly. */
2566 if (op1
== const0_rtx
)
2567 cmp_code
= UNKNOWN
, branch_code
= code
;
2569 /* If the constants doesn't fit into an immediate, but can
2570 be generated by lda/ldah, we adjust the argument and
2571 compare against zero, so we can use beq/bne directly. */
2572 /* ??? Don't do this when comparing against symbols, otherwise
2573 we'll reduce (&x == 0x1234) to (&x-0x1234 == 0), which will
2574 be declared false out of hand (at least for non-weak). */
2575 else if (GET_CODE (op1
) == CONST_INT
2576 && (code
== EQ
|| code
== NE
)
2577 && !(symbolic_operand (op0
, VOIDmode
)
2578 || (GET_CODE (op0
) == REG
&& REG_POINTER (op0
))))
2580 HOST_WIDE_INT v
= INTVAL (op1
), n
= -v
;
2582 if (! CONST_OK_FOR_LETTER_P (v
, 'I')
2583 && (CONST_OK_FOR_LETTER_P (n
, 'K')
2584 || CONST_OK_FOR_LETTER_P (n
, 'L')))
2586 cmp_code
= PLUS
, branch_code
= code
;
2592 if (!reg_or_0_operand (op0
, DImode
))
2593 op0
= force_reg (DImode
, op0
);
2594 if (cmp_code
!= PLUS
&& !reg_or_8bit_operand (op1
, DImode
))
2595 op1
= force_reg (DImode
, op1
);
2598 /* Emit an initial compare instruction, if necessary. */
2600 if (cmp_code
!= UNKNOWN
)
2602 tem
= gen_reg_rtx (cmp_mode
);
2603 emit_move_insn (tem
, gen_rtx_fmt_ee (cmp_code
, cmp_mode
, op0
, op1
));
2606 /* Zero the operands. */
2607 memset (&alpha_compare
, 0, sizeof (alpha_compare
));
2609 /* Return the branch comparison. */
2610 return gen_rtx_fmt_ee (branch_code
, branch_mode
, tem
, CONST0_RTX (cmp_mode
));
2613 /* Certain simplifications can be done to make invalid setcc operations
2614 valid. Return the final comparison, or NULL if we can't work. */
2617 alpha_emit_setcc (enum rtx_code code
)
2619 enum rtx_code cmp_code
;
2620 rtx op0
= alpha_compare
.op0
, op1
= alpha_compare
.op1
;
2621 int fp_p
= alpha_compare
.fp_p
;
2624 /* Zero the operands. */
2625 memset (&alpha_compare
, 0, sizeof (alpha_compare
));
2627 if (fp_p
&& GET_MODE (op0
) == TFmode
)
2629 op0
= alpha_emit_xfloating_compare (&code
, op0
, op1
);
2634 if (fp_p
&& !TARGET_FIX
)
2637 /* The general case: fold the comparison code to the types of compares
2638 that we have, choosing the branch as necessary. */
2643 case EQ
: case LE
: case LT
: case LEU
: case LTU
:
2645 /* We have these compares. */
2647 cmp_code
= code
, code
= NE
;
2651 if (!fp_p
&& op1
== const0_rtx
)
2656 cmp_code
= reverse_condition (code
);
2660 case GE
: case GT
: case GEU
: case GTU
:
2661 /* These normally need swapping, but for integer zero we have
2662 special patterns that recognize swapped operands. */
2663 if (!fp_p
&& op1
== const0_rtx
)
2665 code
= swap_condition (code
);
2667 cmp_code
= code
, code
= NE
;
2668 tmp
= op0
, op0
= op1
, op1
= tmp
;
2677 if (!register_operand (op0
, DImode
))
2678 op0
= force_reg (DImode
, op0
);
2679 if (!reg_or_8bit_operand (op1
, DImode
))
2680 op1
= force_reg (DImode
, op1
);
2683 /* Emit an initial compare instruction, if necessary. */
2684 if (cmp_code
!= UNKNOWN
)
2686 enum machine_mode mode
= fp_p
? DFmode
: DImode
;
2688 tmp
= gen_reg_rtx (mode
);
2689 emit_insn (gen_rtx_SET (VOIDmode
, tmp
,
2690 gen_rtx_fmt_ee (cmp_code
, mode
, op0
, op1
)));
2692 op0
= fp_p
? gen_lowpart (DImode
, tmp
) : tmp
;
2696 /* Return the setcc comparison. */
2697 return gen_rtx_fmt_ee (code
, DImode
, op0
, op1
);
2701 /* Rewrite a comparison against zero CMP of the form
2702 (CODE (cc0) (const_int 0)) so it can be written validly in
2703 a conditional move (if_then_else CMP ...).
2704 If both of the operands that set cc0 are nonzero we must emit
2705 an insn to perform the compare (it can't be done within
2706 the conditional move). */
2709 alpha_emit_conditional_move (rtx cmp
, enum machine_mode mode
)
2711 enum rtx_code code
= GET_CODE (cmp
);
2712 enum rtx_code cmov_code
= NE
;
2713 rtx op0
= alpha_compare
.op0
;
2714 rtx op1
= alpha_compare
.op1
;
2715 int fp_p
= alpha_compare
.fp_p
;
2716 enum machine_mode cmp_mode
2717 = (GET_MODE (op0
) == VOIDmode
? DImode
: GET_MODE (op0
));
2718 enum machine_mode cmp_op_mode
= fp_p
? DFmode
: DImode
;
2719 enum machine_mode cmov_mode
= VOIDmode
;
2720 int local_fast_math
= flag_unsafe_math_optimizations
;
2723 /* Zero the operands. */
2724 memset (&alpha_compare
, 0, sizeof (alpha_compare
));
2726 if (fp_p
!= FLOAT_MODE_P (mode
))
2728 enum rtx_code cmp_code
;
2733 /* If we have fp<->int register move instructions, do a cmov by
2734 performing the comparison in fp registers, and move the
2735 zero/nonzero value to integer registers, where we can then
2736 use a normal cmov, or vice-versa. */
2740 case EQ
: case LE
: case LT
: case LEU
: case LTU
:
2741 /* We have these compares. */
2742 cmp_code
= code
, code
= NE
;
2746 /* This must be reversed. */
2747 cmp_code
= EQ
, code
= EQ
;
2750 case GE
: case GT
: case GEU
: case GTU
:
2751 /* These normally need swapping, but for integer zero we have
2752 special patterns that recognize swapped operands. */
2753 if (!fp_p
&& op1
== const0_rtx
)
2754 cmp_code
= code
, code
= NE
;
2757 cmp_code
= swap_condition (code
);
2759 tem
= op0
, op0
= op1
, op1
= tem
;
2767 tem
= gen_reg_rtx (cmp_op_mode
);
2768 emit_insn (gen_rtx_SET (VOIDmode
, tem
,
2769 gen_rtx_fmt_ee (cmp_code
, cmp_op_mode
,
2772 cmp_mode
= cmp_op_mode
= fp_p
? DImode
: DFmode
;
2773 op0
= gen_lowpart (cmp_op_mode
, tem
);
2774 op1
= CONST0_RTX (cmp_op_mode
);
2776 local_fast_math
= 1;
2779 /* We may be able to use a conditional move directly.
2780 This avoids emitting spurious compares. */
2781 if (signed_comparison_operator (cmp
, VOIDmode
)
2782 && (!fp_p
|| local_fast_math
)
2783 && (op0
== CONST0_RTX (cmp_mode
) || op1
== CONST0_RTX (cmp_mode
)))
2784 return gen_rtx_fmt_ee (code
, VOIDmode
, op0
, op1
);
2786 /* We can't put the comparison inside the conditional move;
2787 emit a compare instruction and put that inside the
2788 conditional move. Make sure we emit only comparisons we have;
2789 swap or reverse as necessary. */
2796 case EQ
: case LE
: case LT
: case LEU
: case LTU
:
2797 /* We have these compares: */
2801 /* This must be reversed. */
2802 code
= reverse_condition (code
);
2806 case GE
: case GT
: case GEU
: case GTU
:
2807 /* These must be swapped. */
2808 if (op1
!= CONST0_RTX (cmp_mode
))
2810 code
= swap_condition (code
);
2811 tem
= op0
, op0
= op1
, op1
= tem
;
2821 if (!reg_or_0_operand (op0
, DImode
))
2822 op0
= force_reg (DImode
, op0
);
2823 if (!reg_or_8bit_operand (op1
, DImode
))
2824 op1
= force_reg (DImode
, op1
);
2827 /* ??? We mark the branch mode to be CCmode to prevent the compare
2828 and cmov from being combined, since the compare insn follows IEEE
2829 rules that the cmov does not. */
2830 if (fp_p
&& !local_fast_math
)
2833 tem
= gen_reg_rtx (cmp_op_mode
);
2834 emit_move_insn (tem
, gen_rtx_fmt_ee (code
, cmp_op_mode
, op0
, op1
));
2835 return gen_rtx_fmt_ee (cmov_code
, cmov_mode
, tem
, CONST0_RTX (cmp_op_mode
));
2838 /* Simplify a conditional move of two constants into a setcc with
2839 arithmetic. This is done with a splitter since combine would
2840 just undo the work if done during code generation. It also catches
2841 cases we wouldn't have before cse. */
2844 alpha_split_conditional_move (enum rtx_code code
, rtx dest
, rtx cond
,
2845 rtx t_rtx
, rtx f_rtx
)
2847 HOST_WIDE_INT t
, f
, diff
;
2848 enum machine_mode mode
;
2849 rtx target
, subtarget
, tmp
;
2851 mode
= GET_MODE (dest
);
2856 if (((code
== NE
|| code
== EQ
) && diff
< 0)
2857 || (code
== GE
|| code
== GT
))
2859 code
= reverse_condition (code
);
2860 diff
= t
, t
= f
, f
= diff
;
2864 subtarget
= target
= dest
;
2867 target
= gen_lowpart (DImode
, dest
);
2868 if (! no_new_pseudos
)
2869 subtarget
= gen_reg_rtx (DImode
);
2873 /* Below, we must be careful to use copy_rtx on target and subtarget
2874 in intermediate insns, as they may be a subreg rtx, which may not
2877 if (f
== 0 && exact_log2 (diff
) > 0
2878 /* On EV6, we've got enough shifters to make non-arithmetic shifts
2879 viable over a longer latency cmove. On EV5, the E0 slot is a
2880 scarce resource, and on EV4 shift has the same latency as a cmove. */
2881 && (diff
<= 8 || alpha_tune
== PROCESSOR_EV6
))
2883 tmp
= gen_rtx_fmt_ee (code
, DImode
, cond
, const0_rtx
);
2884 emit_insn (gen_rtx_SET (VOIDmode
, copy_rtx (subtarget
), tmp
));
2886 tmp
= gen_rtx_ASHIFT (DImode
, copy_rtx (subtarget
),
2887 GEN_INT (exact_log2 (t
)));
2888 emit_insn (gen_rtx_SET (VOIDmode
, target
, tmp
));
2890 else if (f
== 0 && t
== -1)
2892 tmp
= gen_rtx_fmt_ee (code
, DImode
, cond
, const0_rtx
);
2893 emit_insn (gen_rtx_SET (VOIDmode
, copy_rtx (subtarget
), tmp
));
2895 emit_insn (gen_negdi2 (target
, copy_rtx (subtarget
)));
2897 else if (diff
== 1 || diff
== 4 || diff
== 8)
2901 tmp
= gen_rtx_fmt_ee (code
, DImode
, cond
, const0_rtx
);
2902 emit_insn (gen_rtx_SET (VOIDmode
, copy_rtx (subtarget
), tmp
));
2905 emit_insn (gen_adddi3 (target
, copy_rtx (subtarget
), GEN_INT (f
)));
2908 add_op
= GEN_INT (f
);
2909 if (sext_add_operand (add_op
, mode
))
2911 tmp
= gen_rtx_MULT (DImode
, copy_rtx (subtarget
),
2913 tmp
= gen_rtx_PLUS (DImode
, tmp
, add_op
);
2914 emit_insn (gen_rtx_SET (VOIDmode
, target
, tmp
));
2926 /* Look up the function X_floating library function name for the
2929 struct xfloating_op
GTY(())
2931 const enum rtx_code code
;
2932 const char *const GTY((skip
)) osf_func
;
2933 const char *const GTY((skip
)) vms_func
;
2937 static GTY(()) struct xfloating_op xfloating_ops
[] =
2939 { PLUS
, "_OtsAddX", "OTS$ADD_X", 0 },
2940 { MINUS
, "_OtsSubX", "OTS$SUB_X", 0 },
2941 { MULT
, "_OtsMulX", "OTS$MUL_X", 0 },
2942 { DIV
, "_OtsDivX", "OTS$DIV_X", 0 },
2943 { EQ
, "_OtsEqlX", "OTS$EQL_X", 0 },
2944 { NE
, "_OtsNeqX", "OTS$NEQ_X", 0 },
2945 { LT
, "_OtsLssX", "OTS$LSS_X", 0 },
2946 { LE
, "_OtsLeqX", "OTS$LEQ_X", 0 },
2947 { GT
, "_OtsGtrX", "OTS$GTR_X", 0 },
2948 { GE
, "_OtsGeqX", "OTS$GEQ_X", 0 },
2949 { FIX
, "_OtsCvtXQ", "OTS$CVTXQ", 0 },
2950 { FLOAT
, "_OtsCvtQX", "OTS$CVTQX", 0 },
2951 { UNSIGNED_FLOAT
, "_OtsCvtQUX", "OTS$CVTQUX", 0 },
2952 { FLOAT_EXTEND
, "_OtsConvertFloatTX", "OTS$CVT_FLOAT_T_X", 0 },
2953 { FLOAT_TRUNCATE
, "_OtsConvertFloatXT", "OTS$CVT_FLOAT_X_T", 0 }
2956 static GTY(()) struct xfloating_op vax_cvt_ops
[] =
2958 { FLOAT_EXTEND
, "_OtsConvertFloatGX", "OTS$CVT_FLOAT_G_X", 0 },
2959 { FLOAT_TRUNCATE
, "_OtsConvertFloatXG", "OTS$CVT_FLOAT_X_G", 0 }
2963 alpha_lookup_xfloating_lib_func (enum rtx_code code
)
2965 struct xfloating_op
*ops
= xfloating_ops
;
2966 long n
= ARRAY_SIZE (xfloating_ops
);
2969 gcc_assert (TARGET_HAS_XFLOATING_LIBS
);
2971 /* How irritating. Nothing to key off for the main table. */
2972 if (TARGET_FLOAT_VAX
&& (code
== FLOAT_EXTEND
|| code
== FLOAT_TRUNCATE
))
2975 n
= ARRAY_SIZE (vax_cvt_ops
);
2978 for (i
= 0; i
< n
; ++i
, ++ops
)
2979 if (ops
->code
== code
)
2981 rtx func
= ops
->libcall
;
2984 func
= init_one_libfunc (TARGET_ABI_OPEN_VMS
2985 ? ops
->vms_func
: ops
->osf_func
);
2986 ops
->libcall
= func
;
2994 /* Most X_floating operations take the rounding mode as an argument.
2995 Compute that here. */
2998 alpha_compute_xfloating_mode_arg (enum rtx_code code
,
2999 enum alpha_fp_rounding_mode round
)
3005 case ALPHA_FPRM_NORM
:
3008 case ALPHA_FPRM_MINF
:
3011 case ALPHA_FPRM_CHOP
:
3014 case ALPHA_FPRM_DYN
:
3020 /* XXX For reference, round to +inf is mode = 3. */
3023 if (code
== FLOAT_TRUNCATE
&& alpha_fptm
== ALPHA_FPTM_N
)
3029 /* Emit an X_floating library function call.
3031 Note that these functions do not follow normal calling conventions:
3032 TFmode arguments are passed in two integer registers (as opposed to
3033 indirect); TFmode return values appear in R16+R17.
3035 FUNC is the function to call.
3036 TARGET is where the output belongs.
3037 OPERANDS are the inputs.
3038 NOPERANDS is the count of inputs.
3039 EQUIV is the expression equivalent for the function.
3043 alpha_emit_xfloating_libcall (rtx func
, rtx target
, rtx operands
[],
3044 int noperands
, rtx equiv
)
3046 rtx usage
= NULL_RTX
, tmp
, reg
;
3051 for (i
= 0; i
< noperands
; ++i
)
3053 switch (GET_MODE (operands
[i
]))
3056 reg
= gen_rtx_REG (TFmode
, regno
);
3061 reg
= gen_rtx_REG (DFmode
, regno
+ 32);
3066 gcc_assert (GET_CODE (operands
[i
]) == CONST_INT
);
3069 reg
= gen_rtx_REG (DImode
, regno
);
3077 emit_move_insn (reg
, operands
[i
]);
3078 usage
= alloc_EXPR_LIST (0, gen_rtx_USE (VOIDmode
, reg
), usage
);
3081 switch (GET_MODE (target
))
3084 reg
= gen_rtx_REG (TFmode
, 16);
3087 reg
= gen_rtx_REG (DFmode
, 32);
3090 reg
= gen_rtx_REG (DImode
, 0);
3096 tmp
= gen_rtx_MEM (QImode
, func
);
3097 tmp
= emit_call_insn (GEN_CALL_VALUE (reg
, tmp
, const0_rtx
,
3098 const0_rtx
, const0_rtx
));
3099 CALL_INSN_FUNCTION_USAGE (tmp
) = usage
;
3100 CONST_OR_PURE_CALL_P (tmp
) = 1;
3105 emit_libcall_block (tmp
, target
, reg
, equiv
);
3108 /* Emit an X_floating library function call for arithmetic (+,-,*,/). */
3111 alpha_emit_xfloating_arith (enum rtx_code code
, rtx operands
[])
3115 rtx out_operands
[3];
3117 func
= alpha_lookup_xfloating_lib_func (code
);
3118 mode
= alpha_compute_xfloating_mode_arg (code
, alpha_fprm
);
3120 out_operands
[0] = operands
[1];
3121 out_operands
[1] = operands
[2];
3122 out_operands
[2] = GEN_INT (mode
);
3123 alpha_emit_xfloating_libcall (func
, operands
[0], out_operands
, 3,
3124 gen_rtx_fmt_ee (code
, TFmode
, operands
[1],
3128 /* Emit an X_floating library function call for a comparison. */
3131 alpha_emit_xfloating_compare (enum rtx_code
*pcode
, rtx op0
, rtx op1
)
3133 enum rtx_code cmp_code
, res_code
;
3134 rtx func
, out
, operands
[2];
3136 /* X_floating library comparison functions return
3140 Convert the compare against the raw return value. */
3168 func
= alpha_lookup_xfloating_lib_func (cmp_code
);
3172 out
= gen_reg_rtx (DImode
);
3174 /* ??? Strange mode for equiv because what's actually returned
3175 is -1,0,1, not a proper boolean value. */
3176 alpha_emit_xfloating_libcall (func
, out
, operands
, 2,
3177 gen_rtx_fmt_ee (cmp_code
, CCmode
, op0
, op1
));
3182 /* Emit an X_floating library function call for a conversion. */
3185 alpha_emit_xfloating_cvt (enum rtx_code orig_code
, rtx operands
[])
3187 int noperands
= 1, mode
;
3188 rtx out_operands
[2];
3190 enum rtx_code code
= orig_code
;
3192 if (code
== UNSIGNED_FIX
)
3195 func
= alpha_lookup_xfloating_lib_func (code
);
3197 out_operands
[0] = operands
[1];
3202 mode
= alpha_compute_xfloating_mode_arg (code
, ALPHA_FPRM_CHOP
);
3203 out_operands
[1] = GEN_INT (mode
);
3206 case FLOAT_TRUNCATE
:
3207 mode
= alpha_compute_xfloating_mode_arg (code
, alpha_fprm
);
3208 out_operands
[1] = GEN_INT (mode
);
3215 alpha_emit_xfloating_libcall (func
, operands
[0], out_operands
, noperands
,
3216 gen_rtx_fmt_e (orig_code
,
3217 GET_MODE (operands
[0]),
3221 /* Split a TImode or TFmode move from OP[1] to OP[0] into a pair of
3222 DImode moves from OP[2,3] to OP[0,1]. If FIXUP_OVERLAP is true,
3223 guarantee that the sequence
3226 is valid. Naturally, output operand ordering is little-endian.
3227 This is used by *movtf_internal and *movti_internal. */
3230 alpha_split_tmode_pair (rtx operands
[4], enum machine_mode mode
,
3233 switch (GET_CODE (operands
[1]))
3236 operands
[3] = gen_rtx_REG (DImode
, REGNO (operands
[1]) + 1);
3237 operands
[2] = gen_rtx_REG (DImode
, REGNO (operands
[1]));
3241 operands
[3] = adjust_address (operands
[1], DImode
, 8);
3242 operands
[2] = adjust_address (operands
[1], DImode
, 0);
3247 gcc_assert (operands
[1] == CONST0_RTX (mode
));
3248 operands
[2] = operands
[3] = const0_rtx
;
3255 switch (GET_CODE (operands
[0]))
3258 operands
[1] = gen_rtx_REG (DImode
, REGNO (operands
[0]) + 1);
3259 operands
[0] = gen_rtx_REG (DImode
, REGNO (operands
[0]));
3263 operands
[1] = adjust_address (operands
[0], DImode
, 8);
3264 operands
[0] = adjust_address (operands
[0], DImode
, 0);
3271 if (fixup_overlap
&& reg_overlap_mentioned_p (operands
[0], operands
[3]))
3274 tmp
= operands
[0], operands
[0] = operands
[1], operands
[1] = tmp
;
3275 tmp
= operands
[2], operands
[2] = operands
[3], operands
[3] = tmp
;
3279 /* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
3280 op2 is a register containing the sign bit, operation is the
3281 logical operation to be performed. */
3284 alpha_split_tfmode_frobsign (rtx operands
[3], rtx (*operation
) (rtx
, rtx
, rtx
))
3286 rtx high_bit
= operands
[2];
3290 alpha_split_tmode_pair (operands
, TFmode
, false);
3292 /* Detect three flavors of operand overlap. */
3294 if (rtx_equal_p (operands
[0], operands
[2]))
3296 else if (rtx_equal_p (operands
[1], operands
[2]))
3298 if (rtx_equal_p (operands
[0], high_bit
))
3305 emit_move_insn (operands
[0], operands
[2]);
3307 /* ??? If the destination overlaps both source tf and high_bit, then
3308 assume source tf is dead in its entirety and use the other half
3309 for a scratch register. Otherwise "scratch" is just the proper
3310 destination register. */
3311 scratch
= operands
[move
< 2 ? 1 : 3];
3313 emit_insn ((*operation
) (scratch
, high_bit
, operands
[3]));
3317 emit_move_insn (operands
[0], operands
[2]);
3319 emit_move_insn (operands
[1], scratch
);
3323 /* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
3327 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
3328 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
3329 lda r3,X(r11) lda r3,X+2(r11)
3330 extwl r1,r3,r1 extql r1,r3,r1
3331 extwh r2,r3,r2 extqh r2,r3,r2
3332 or r1.r2.r1 or r1,r2,r1
3335 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
3336 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
3337 lda r3,X(r11) lda r3,X(r11)
3338 extll r1,r3,r1 extll r1,r3,r1
3339 extlh r2,r3,r2 extlh r2,r3,r2
3340 or r1.r2.r1 addl r1,r2,r1
3342 quad: ldq_u r1,X(r11)
3351 alpha_expand_unaligned_load (rtx tgt
, rtx mem
, HOST_WIDE_INT size
,
3352 HOST_WIDE_INT ofs
, int sign
)
3354 rtx meml
, memh
, addr
, extl
, exth
, tmp
, mema
;
3355 enum machine_mode mode
;
3357 if (TARGET_BWX
&& size
== 2)
3359 meml
= adjust_address (mem
, QImode
, ofs
);
3360 memh
= adjust_address (mem
, QImode
, ofs
+1);
3361 if (BYTES_BIG_ENDIAN
)
3362 tmp
= meml
, meml
= memh
, memh
= tmp
;
3363 extl
= gen_reg_rtx (DImode
);
3364 exth
= gen_reg_rtx (DImode
);
3365 emit_insn (gen_zero_extendqidi2 (extl
, meml
));
3366 emit_insn (gen_zero_extendqidi2 (exth
, memh
));
3367 exth
= expand_simple_binop (DImode
, ASHIFT
, exth
, GEN_INT (8),
3368 NULL
, 1, OPTAB_LIB_WIDEN
);
3369 addr
= expand_simple_binop (DImode
, IOR
, extl
, exth
,
3370 NULL
, 1, OPTAB_LIB_WIDEN
);
3372 if (sign
&& GET_MODE (tgt
) != HImode
)
3374 addr
= gen_lowpart (HImode
, addr
);
3375 emit_insn (gen_extend_insn (tgt
, addr
, GET_MODE (tgt
), HImode
, 0));
3379 if (GET_MODE (tgt
) != DImode
)
3380 addr
= gen_lowpart (GET_MODE (tgt
), addr
);
3381 emit_move_insn (tgt
, addr
);
3386 meml
= gen_reg_rtx (DImode
);
3387 memh
= gen_reg_rtx (DImode
);
3388 addr
= gen_reg_rtx (DImode
);
3389 extl
= gen_reg_rtx (DImode
);
3390 exth
= gen_reg_rtx (DImode
);
3392 mema
= XEXP (mem
, 0);
3393 if (GET_CODE (mema
) == LO_SUM
)
3394 mema
= force_reg (Pmode
, mema
);
3396 /* AND addresses cannot be in any alias set, since they may implicitly
3397 alias surrounding code. Ideally we'd have some alias set that
3398 covered all types except those with alignment 8 or higher. */
3400 tmp
= change_address (mem
, DImode
,
3401 gen_rtx_AND (DImode
,
3402 plus_constant (mema
, ofs
),
3404 set_mem_alias_set (tmp
, 0);
3405 emit_move_insn (meml
, tmp
);
3407 tmp
= change_address (mem
, DImode
,
3408 gen_rtx_AND (DImode
,
3409 plus_constant (mema
, ofs
+ size
- 1),
3411 set_mem_alias_set (tmp
, 0);
3412 emit_move_insn (memh
, tmp
);
3414 if (WORDS_BIG_ENDIAN
&& sign
&& (size
== 2 || size
== 4))
3416 emit_move_insn (addr
, plus_constant (mema
, -1));
3418 emit_insn (gen_extqh_be (extl
, meml
, addr
));
3419 emit_insn (gen_extxl_be (exth
, memh
, GEN_INT (64), addr
));
3421 addr
= expand_binop (DImode
, ior_optab
, extl
, exth
, tgt
, 1, OPTAB_WIDEN
);
3422 addr
= expand_binop (DImode
, ashr_optab
, addr
, GEN_INT (64 - size
*8),
3423 addr
, 1, OPTAB_WIDEN
);
3425 else if (sign
&& size
== 2)
3427 emit_move_insn (addr
, plus_constant (mema
, ofs
+2));
3429 emit_insn (gen_extxl_le (extl
, meml
, GEN_INT (64), addr
));
3430 emit_insn (gen_extqh_le (exth
, memh
, addr
));
3432 /* We must use tgt here for the target. Alpha-vms port fails if we use
3433 addr for the target, because addr is marked as a pointer and combine
3434 knows that pointers are always sign-extended 32 bit values. */
3435 addr
= expand_binop (DImode
, ior_optab
, extl
, exth
, tgt
, 1, OPTAB_WIDEN
);
3436 addr
= expand_binop (DImode
, ashr_optab
, addr
, GEN_INT (48),
3437 addr
, 1, OPTAB_WIDEN
);
3441 if (WORDS_BIG_ENDIAN
)
3443 emit_move_insn (addr
, plus_constant (mema
, ofs
+size
-1));
3447 emit_insn (gen_extwh_be (extl
, meml
, addr
));
3452 emit_insn (gen_extlh_be (extl
, meml
, addr
));
3457 emit_insn (gen_extqh_be (extl
, meml
, addr
));
3464 emit_insn (gen_extxl_be (exth
, memh
, GEN_INT (size
*8), addr
));
3468 emit_move_insn (addr
, plus_constant (mema
, ofs
));
3469 emit_insn (gen_extxl_le (extl
, meml
, GEN_INT (size
*8), addr
));
3473 emit_insn (gen_extwh_le (exth
, memh
, addr
));
3478 emit_insn (gen_extlh_le (exth
, memh
, addr
));
3483 emit_insn (gen_extqh_le (exth
, memh
, addr
));
3492 addr
= expand_binop (mode
, ior_optab
, gen_lowpart (mode
, extl
),
3493 gen_lowpart (mode
, exth
), gen_lowpart (mode
, tgt
),
3498 emit_move_insn (tgt
, gen_lowpart (GET_MODE (tgt
), addr
));
3501 /* Similarly, use ins and msk instructions to perform unaligned stores. */
3504 alpha_expand_unaligned_store (rtx dst
, rtx src
,
3505 HOST_WIDE_INT size
, HOST_WIDE_INT ofs
)
3507 rtx dstl
, dsth
, addr
, insl
, insh
, meml
, memh
, dsta
;
3509 if (TARGET_BWX
&& size
== 2)
3511 if (src
!= const0_rtx
)
3513 dstl
= gen_lowpart (QImode
, src
);
3514 dsth
= expand_simple_binop (DImode
, LSHIFTRT
, src
, GEN_INT (8),
3515 NULL
, 1, OPTAB_LIB_WIDEN
);
3516 dsth
= gen_lowpart (QImode
, dsth
);
3519 dstl
= dsth
= const0_rtx
;
3521 meml
= adjust_address (dst
, QImode
, ofs
);
3522 memh
= adjust_address (dst
, QImode
, ofs
+1);
3523 if (BYTES_BIG_ENDIAN
)
3524 addr
= meml
, meml
= memh
, memh
= addr
;
3526 emit_move_insn (meml
, dstl
);
3527 emit_move_insn (memh
, dsth
);
3531 dstl
= gen_reg_rtx (DImode
);
3532 dsth
= gen_reg_rtx (DImode
);
3533 insl
= gen_reg_rtx (DImode
);
3534 insh
= gen_reg_rtx (DImode
);
3536 dsta
= XEXP (dst
, 0);
3537 if (GET_CODE (dsta
) == LO_SUM
)
3538 dsta
= force_reg (Pmode
, dsta
);
3540 /* AND addresses cannot be in any alias set, since they may implicitly
3541 alias surrounding code. Ideally we'd have some alias set that
3542 covered all types except those with alignment 8 or higher. */
3544 meml
= change_address (dst
, DImode
,
3545 gen_rtx_AND (DImode
,
3546 plus_constant (dsta
, ofs
),
3548 set_mem_alias_set (meml
, 0);
3550 memh
= change_address (dst
, DImode
,
3551 gen_rtx_AND (DImode
,
3552 plus_constant (dsta
, ofs
+ size
- 1),
3554 set_mem_alias_set (memh
, 0);
3556 emit_move_insn (dsth
, memh
);
3557 emit_move_insn (dstl
, meml
);
3558 if (WORDS_BIG_ENDIAN
)
3560 addr
= copy_addr_to_reg (plus_constant (dsta
, ofs
+size
-1));
3562 if (src
!= const0_rtx
)
3567 emit_insn (gen_inswl_be (insh
, gen_lowpart (HImode
,src
), addr
));
3570 emit_insn (gen_insll_be (insh
, gen_lowpart (SImode
,src
), addr
));
3573 emit_insn (gen_insql_be (insh
, gen_lowpart (DImode
,src
), addr
));
3576 emit_insn (gen_insxh (insl
, gen_lowpart (DImode
, src
),
3577 GEN_INT (size
*8), addr
));
3583 emit_insn (gen_mskxl_be (dsth
, dsth
, GEN_INT (0xffff), addr
));
3587 rtx msk
= immed_double_const (0xffffffff, 0, DImode
);
3588 emit_insn (gen_mskxl_be (dsth
, dsth
, msk
, addr
));
3592 emit_insn (gen_mskxl_be (dsth
, dsth
, constm1_rtx
, addr
));
3596 emit_insn (gen_mskxh (dstl
, dstl
, GEN_INT (size
*8), addr
));
3600 addr
= copy_addr_to_reg (plus_constant (dsta
, ofs
));
3602 if (src
!= CONST0_RTX (GET_MODE (src
)))
3604 emit_insn (gen_insxh (insh
, gen_lowpart (DImode
, src
),
3605 GEN_INT (size
*8), addr
));
3610 emit_insn (gen_inswl_le (insl
, gen_lowpart (HImode
, src
), addr
));
3613 emit_insn (gen_insll_le (insl
, gen_lowpart (SImode
, src
), addr
));
3616 emit_insn (gen_insql_le (insl
, src
, addr
));
3621 emit_insn (gen_mskxh (dsth
, dsth
, GEN_INT (size
*8), addr
));
3626 emit_insn (gen_mskxl_le (dstl
, dstl
, GEN_INT (0xffff), addr
));
3630 rtx msk
= immed_double_const (0xffffffff, 0, DImode
);
3631 emit_insn (gen_mskxl_le (dstl
, dstl
, msk
, addr
));
3635 emit_insn (gen_mskxl_le (dstl
, dstl
, constm1_rtx
, addr
));
3640 if (src
!= CONST0_RTX (GET_MODE (src
)))
3642 dsth
= expand_binop (DImode
, ior_optab
, insh
, dsth
, dsth
, 0, OPTAB_WIDEN
);
3643 dstl
= expand_binop (DImode
, ior_optab
, insl
, dstl
, dstl
, 0, OPTAB_WIDEN
);
3646 if (WORDS_BIG_ENDIAN
)
3648 emit_move_insn (meml
, dstl
);
3649 emit_move_insn (memh
, dsth
);
3653 /* Must store high before low for degenerate case of aligned. */
3654 emit_move_insn (memh
, dsth
);
3655 emit_move_insn (meml
, dstl
);
3659 /* The block move code tries to maximize speed by separating loads and
3660 stores at the expense of register pressure: we load all of the data
3661 before we store it back out. There are two secondary effects worth
3662 mentioning, that this speeds copying to/from aligned and unaligned
3663 buffers, and that it makes the code significantly easier to write. */
3665 #define MAX_MOVE_WORDS 8
3667 /* Load an integral number of consecutive unaligned quadwords. */
3670 alpha_expand_unaligned_load_words (rtx
*out_regs
, rtx smem
,
3671 HOST_WIDE_INT words
, HOST_WIDE_INT ofs
)
3673 rtx
const im8
= GEN_INT (-8);
3674 rtx
const i64
= GEN_INT (64);
3675 rtx ext_tmps
[MAX_MOVE_WORDS
], data_regs
[MAX_MOVE_WORDS
+1];
3676 rtx sreg
, areg
, tmp
, smema
;
3679 smema
= XEXP (smem
, 0);
3680 if (GET_CODE (smema
) == LO_SUM
)
3681 smema
= force_reg (Pmode
, smema
);
3683 /* Generate all the tmp registers we need. */
3684 for (i
= 0; i
< words
; ++i
)
3686 data_regs
[i
] = out_regs
[i
];
3687 ext_tmps
[i
] = gen_reg_rtx (DImode
);
3689 data_regs
[words
] = gen_reg_rtx (DImode
);
3692 smem
= adjust_address (smem
, GET_MODE (smem
), ofs
);
3694 /* Load up all of the source data. */
3695 for (i
= 0; i
< words
; ++i
)
3697 tmp
= change_address (smem
, DImode
,
3698 gen_rtx_AND (DImode
,
3699 plus_constant (smema
, 8*i
),
3701 set_mem_alias_set (tmp
, 0);
3702 emit_move_insn (data_regs
[i
], tmp
);
3705 tmp
= change_address (smem
, DImode
,
3706 gen_rtx_AND (DImode
,
3707 plus_constant (smema
, 8*words
- 1),
3709 set_mem_alias_set (tmp
, 0);
3710 emit_move_insn (data_regs
[words
], tmp
);
3712 /* Extract the half-word fragments. Unfortunately DEC decided to make
3713 extxh with offset zero a noop instead of zeroing the register, so
3714 we must take care of that edge condition ourselves with cmov. */
3716 sreg
= copy_addr_to_reg (smema
);
3717 areg
= expand_binop (DImode
, and_optab
, sreg
, GEN_INT (7), NULL
,
3719 if (WORDS_BIG_ENDIAN
)
3720 emit_move_insn (sreg
, plus_constant (sreg
, 7));
3721 for (i
= 0; i
< words
; ++i
)
3723 if (WORDS_BIG_ENDIAN
)
3725 emit_insn (gen_extqh_be (data_regs
[i
], data_regs
[i
], sreg
));
3726 emit_insn (gen_extxl_be (ext_tmps
[i
], data_regs
[i
+1], i64
, sreg
));
3730 emit_insn (gen_extxl_le (data_regs
[i
], data_regs
[i
], i64
, sreg
));
3731 emit_insn (gen_extqh_le (ext_tmps
[i
], data_regs
[i
+1], sreg
));
3733 emit_insn (gen_rtx_SET (VOIDmode
, ext_tmps
[i
],
3734 gen_rtx_IF_THEN_ELSE (DImode
,
3735 gen_rtx_EQ (DImode
, areg
,
3737 const0_rtx
, ext_tmps
[i
])));
3740 /* Merge the half-words into whole words. */
3741 for (i
= 0; i
< words
; ++i
)
3743 out_regs
[i
] = expand_binop (DImode
, ior_optab
, data_regs
[i
],
3744 ext_tmps
[i
], data_regs
[i
], 1, OPTAB_WIDEN
);
3748 /* Store an integral number of consecutive unaligned quadwords. DATA_REGS
3749 may be NULL to store zeros. */
3752 alpha_expand_unaligned_store_words (rtx
*data_regs
, rtx dmem
,
3753 HOST_WIDE_INT words
, HOST_WIDE_INT ofs
)
3755 rtx
const im8
= GEN_INT (-8);
3756 rtx
const i64
= GEN_INT (64);
3757 rtx ins_tmps
[MAX_MOVE_WORDS
];
3758 rtx st_tmp_1
, st_tmp_2
, dreg
;
3759 rtx st_addr_1
, st_addr_2
, dmema
;
3762 dmema
= XEXP (dmem
, 0);
3763 if (GET_CODE (dmema
) == LO_SUM
)
3764 dmema
= force_reg (Pmode
, dmema
);
3766 /* Generate all the tmp registers we need. */
3767 if (data_regs
!= NULL
)
3768 for (i
= 0; i
< words
; ++i
)
3769 ins_tmps
[i
] = gen_reg_rtx(DImode
);
3770 st_tmp_1
= gen_reg_rtx(DImode
);
3771 st_tmp_2
= gen_reg_rtx(DImode
);
3774 dmem
= adjust_address (dmem
, GET_MODE (dmem
), ofs
);
3776 st_addr_2
= change_address (dmem
, DImode
,
3777 gen_rtx_AND (DImode
,
3778 plus_constant (dmema
, words
*8 - 1),
3780 set_mem_alias_set (st_addr_2
, 0);
3782 st_addr_1
= change_address (dmem
, DImode
,
3783 gen_rtx_AND (DImode
, dmema
, im8
));
3784 set_mem_alias_set (st_addr_1
, 0);
3786 /* Load up the destination end bits. */
3787 emit_move_insn (st_tmp_2
, st_addr_2
);
3788 emit_move_insn (st_tmp_1
, st_addr_1
);
3790 /* Shift the input data into place. */
3791 dreg
= copy_addr_to_reg (dmema
);
3792 if (WORDS_BIG_ENDIAN
)
3793 emit_move_insn (dreg
, plus_constant (dreg
, 7));
3794 if (data_regs
!= NULL
)
3796 for (i
= words
-1; i
>= 0; --i
)
3798 if (WORDS_BIG_ENDIAN
)
3800 emit_insn (gen_insql_be (ins_tmps
[i
], data_regs
[i
], dreg
));
3801 emit_insn (gen_insxh (data_regs
[i
], data_regs
[i
], i64
, dreg
));
3805 emit_insn (gen_insxh (ins_tmps
[i
], data_regs
[i
], i64
, dreg
));
3806 emit_insn (gen_insql_le (data_regs
[i
], data_regs
[i
], dreg
));
3809 for (i
= words
-1; i
> 0; --i
)
3811 ins_tmps
[i
-1] = expand_binop (DImode
, ior_optab
, data_regs
[i
],
3812 ins_tmps
[i
-1], ins_tmps
[i
-1], 1,
3817 /* Split and merge the ends with the destination data. */
3818 if (WORDS_BIG_ENDIAN
)
3820 emit_insn (gen_mskxl_be (st_tmp_2
, st_tmp_2
, constm1_rtx
, dreg
));
3821 emit_insn (gen_mskxh (st_tmp_1
, st_tmp_1
, i64
, dreg
));
3825 emit_insn (gen_mskxh (st_tmp_2
, st_tmp_2
, i64
, dreg
));
3826 emit_insn (gen_mskxl_le (st_tmp_1
, st_tmp_1
, constm1_rtx
, dreg
));
3829 if (data_regs
!= NULL
)
3831 st_tmp_2
= expand_binop (DImode
, ior_optab
, st_tmp_2
, ins_tmps
[words
-1],
3832 st_tmp_2
, 1, OPTAB_WIDEN
);
3833 st_tmp_1
= expand_binop (DImode
, ior_optab
, st_tmp_1
, data_regs
[0],
3834 st_tmp_1
, 1, OPTAB_WIDEN
);
3838 if (WORDS_BIG_ENDIAN
)
3839 emit_move_insn (st_addr_1
, st_tmp_1
);
3841 emit_move_insn (st_addr_2
, st_tmp_2
);
3842 for (i
= words
-1; i
> 0; --i
)
3844 rtx tmp
= change_address (dmem
, DImode
,
3845 gen_rtx_AND (DImode
,
3846 plus_constant(dmema
,
3847 WORDS_BIG_ENDIAN
? i
*8-1 : i
*8),
3849 set_mem_alias_set (tmp
, 0);
3850 emit_move_insn (tmp
, data_regs
? ins_tmps
[i
-1] : const0_rtx
);
3852 if (WORDS_BIG_ENDIAN
)
3853 emit_move_insn (st_addr_2
, st_tmp_2
);
3855 emit_move_insn (st_addr_1
, st_tmp_1
);
3859 /* Expand string/block move operations.
3861 operands[0] is the pointer to the destination.
3862 operands[1] is the pointer to the source.
3863 operands[2] is the number of bytes to move.
3864 operands[3] is the alignment. */
3867 alpha_expand_block_move (rtx operands
[])
3869 rtx bytes_rtx
= operands
[2];
3870 rtx align_rtx
= operands
[3];
3871 HOST_WIDE_INT orig_bytes
= INTVAL (bytes_rtx
);
3872 HOST_WIDE_INT bytes
= orig_bytes
;
3873 HOST_WIDE_INT src_align
= INTVAL (align_rtx
) * BITS_PER_UNIT
;
3874 HOST_WIDE_INT dst_align
= src_align
;
3875 rtx orig_src
= operands
[1];
3876 rtx orig_dst
= operands
[0];
3877 rtx data_regs
[2 * MAX_MOVE_WORDS
+ 16];
3879 unsigned int i
, words
, ofs
, nregs
= 0;
3881 if (orig_bytes
<= 0)
3883 else if (orig_bytes
> MAX_MOVE_WORDS
* UNITS_PER_WORD
)
3886 /* Look for additional alignment information from recorded register info. */
3888 tmp
= XEXP (orig_src
, 0);
3889 if (GET_CODE (tmp
) == REG
)
3890 src_align
= MAX (src_align
, REGNO_POINTER_ALIGN (REGNO (tmp
)));
3891 else if (GET_CODE (tmp
) == PLUS
3892 && GET_CODE (XEXP (tmp
, 0)) == REG
3893 && GET_CODE (XEXP (tmp
, 1)) == CONST_INT
)
3895 unsigned HOST_WIDE_INT c
= INTVAL (XEXP (tmp
, 1));
3896 unsigned int a
= REGNO_POINTER_ALIGN (REGNO (XEXP (tmp
, 0)));
3900 if (a
>= 64 && c
% 8 == 0)
3902 else if (a
>= 32 && c
% 4 == 0)
3904 else if (a
>= 16 && c
% 2 == 0)
3909 tmp
= XEXP (orig_dst
, 0);
3910 if (GET_CODE (tmp
) == REG
)
3911 dst_align
= MAX (dst_align
, REGNO_POINTER_ALIGN (REGNO (tmp
)));
3912 else if (GET_CODE (tmp
) == PLUS
3913 && GET_CODE (XEXP (tmp
, 0)) == REG
3914 && GET_CODE (XEXP (tmp
, 1)) == CONST_INT
)
3916 unsigned HOST_WIDE_INT c
= INTVAL (XEXP (tmp
, 1));
3917 unsigned int a
= REGNO_POINTER_ALIGN (REGNO (XEXP (tmp
, 0)));
3921 if (a
>= 64 && c
% 8 == 0)
3923 else if (a
>= 32 && c
% 4 == 0)
3925 else if (a
>= 16 && c
% 2 == 0)
3931 if (src_align
>= 64 && bytes
>= 8)
3935 for (i
= 0; i
< words
; ++i
)
3936 data_regs
[nregs
+ i
] = gen_reg_rtx (DImode
);
3938 for (i
= 0; i
< words
; ++i
)
3939 emit_move_insn (data_regs
[nregs
+ i
],
3940 adjust_address (orig_src
, DImode
, ofs
+ i
* 8));
3947 if (src_align
>= 32 && bytes
>= 4)
3951 for (i
= 0; i
< words
; ++i
)
3952 data_regs
[nregs
+ i
] = gen_reg_rtx (SImode
);
3954 for (i
= 0; i
< words
; ++i
)
3955 emit_move_insn (data_regs
[nregs
+ i
],
3956 adjust_address (orig_src
, SImode
, ofs
+ i
* 4));
3967 for (i
= 0; i
< words
+1; ++i
)
3968 data_regs
[nregs
+ i
] = gen_reg_rtx (DImode
);
3970 alpha_expand_unaligned_load_words (data_regs
+ nregs
, orig_src
,
3978 if (! TARGET_BWX
&& bytes
>= 4)
3980 data_regs
[nregs
++] = tmp
= gen_reg_rtx (SImode
);
3981 alpha_expand_unaligned_load (tmp
, orig_src
, 4, ofs
, 0);
3988 if (src_align
>= 16)
3991 data_regs
[nregs
++] = tmp
= gen_reg_rtx (HImode
);
3992 emit_move_insn (tmp
, adjust_address (orig_src
, HImode
, ofs
));
3995 } while (bytes
>= 2);
3997 else if (! TARGET_BWX
)
3999 data_regs
[nregs
++] = tmp
= gen_reg_rtx (HImode
);
4000 alpha_expand_unaligned_load (tmp
, orig_src
, 2, ofs
, 0);
4008 data_regs
[nregs
++] = tmp
= gen_reg_rtx (QImode
);
4009 emit_move_insn (tmp
, adjust_address (orig_src
, QImode
, ofs
));
4014 gcc_assert (nregs
<= ARRAY_SIZE (data_regs
));
4016 /* Now save it back out again. */
4020 /* Write out the data in whatever chunks reading the source allowed. */
4021 if (dst_align
>= 64)
4023 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == DImode
)
4025 emit_move_insn (adjust_address (orig_dst
, DImode
, ofs
),
4032 if (dst_align
>= 32)
4034 /* If the source has remaining DImode regs, write them out in
4036 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == DImode
)
4038 tmp
= expand_binop (DImode
, lshr_optab
, data_regs
[i
], GEN_INT (32),
4039 NULL_RTX
, 1, OPTAB_WIDEN
);
4041 emit_move_insn (adjust_address (orig_dst
, SImode
, ofs
),
4042 gen_lowpart (SImode
, data_regs
[i
]));
4043 emit_move_insn (adjust_address (orig_dst
, SImode
, ofs
+ 4),
4044 gen_lowpart (SImode
, tmp
));
4049 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == SImode
)
4051 emit_move_insn (adjust_address (orig_dst
, SImode
, ofs
),
4058 if (i
< nregs
&& GET_MODE (data_regs
[i
]) == DImode
)
4060 /* Write out a remaining block of words using unaligned methods. */
4062 for (words
= 1; i
+ words
< nregs
; words
++)
4063 if (GET_MODE (data_regs
[i
+ words
]) != DImode
)
4067 alpha_expand_unaligned_store (orig_dst
, data_regs
[i
], 8, ofs
);
4069 alpha_expand_unaligned_store_words (data_regs
+ i
, orig_dst
,
4076 /* Due to the above, this won't be aligned. */
4077 /* ??? If we have more than one of these, consider constructing full
4078 words in registers and using alpha_expand_unaligned_store_words. */
4079 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == SImode
)
4081 alpha_expand_unaligned_store (orig_dst
, data_regs
[i
], 4, ofs
);
4086 if (dst_align
>= 16)
4087 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == HImode
)
4089 emit_move_insn (adjust_address (orig_dst
, HImode
, ofs
), data_regs
[i
]);
4094 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == HImode
)
4096 alpha_expand_unaligned_store (orig_dst
, data_regs
[i
], 2, ofs
);
4101 /* The remainder must be byte copies. */
4104 gcc_assert (GET_MODE (data_regs
[i
]) == QImode
);
4105 emit_move_insn (adjust_address (orig_dst
, QImode
, ofs
), data_regs
[i
]);
4114 alpha_expand_block_clear (rtx operands
[])
4116 rtx bytes_rtx
= operands
[1];
4117 rtx align_rtx
= operands
[3];
4118 HOST_WIDE_INT orig_bytes
= INTVAL (bytes_rtx
);
4119 HOST_WIDE_INT bytes
= orig_bytes
;
4120 HOST_WIDE_INT align
= INTVAL (align_rtx
) * BITS_PER_UNIT
;
4121 HOST_WIDE_INT alignofs
= 0;
4122 rtx orig_dst
= operands
[0];
4124 int i
, words
, ofs
= 0;
4126 if (orig_bytes
<= 0)
4128 if (orig_bytes
> MAX_MOVE_WORDS
* UNITS_PER_WORD
)
4131 /* Look for stricter alignment. */
4132 tmp
= XEXP (orig_dst
, 0);
4133 if (GET_CODE (tmp
) == REG
)
4134 align
= MAX (align
, REGNO_POINTER_ALIGN (REGNO (tmp
)));
4135 else if (GET_CODE (tmp
) == PLUS
4136 && GET_CODE (XEXP (tmp
, 0)) == REG
4137 && GET_CODE (XEXP (tmp
, 1)) == CONST_INT
)
4139 HOST_WIDE_INT c
= INTVAL (XEXP (tmp
, 1));
4140 int a
= REGNO_POINTER_ALIGN (REGNO (XEXP (tmp
, 0)));
4145 align
= a
, alignofs
= 8 - c
% 8;
4147 align
= a
, alignofs
= 4 - c
% 4;
4149 align
= a
, alignofs
= 2 - c
% 2;
4153 /* Handle an unaligned prefix first. */
4157 #if HOST_BITS_PER_WIDE_INT >= 64
4158 /* Given that alignofs is bounded by align, the only time BWX could
4159 generate three stores is for a 7 byte fill. Prefer two individual
4160 stores over a load/mask/store sequence. */
4161 if ((!TARGET_BWX
|| alignofs
== 7)
4163 && !(alignofs
== 4 && bytes
>= 4))
4165 enum machine_mode mode
= (align
>= 64 ? DImode
: SImode
);
4166 int inv_alignofs
= (align
>= 64 ? 8 : 4) - alignofs
;
4170 mem
= adjust_address (orig_dst
, mode
, ofs
- inv_alignofs
);
4171 set_mem_alias_set (mem
, 0);
4173 mask
= ~(~(HOST_WIDE_INT
)0 << (inv_alignofs
* 8));
4174 if (bytes
< alignofs
)
4176 mask
|= ~(HOST_WIDE_INT
)0 << ((inv_alignofs
+ bytes
) * 8);
4187 tmp
= expand_binop (mode
, and_optab
, mem
, GEN_INT (mask
),
4188 NULL_RTX
, 1, OPTAB_WIDEN
);
4190 emit_move_insn (mem
, tmp
);
4194 if (TARGET_BWX
&& (alignofs
& 1) && bytes
>= 1)
4196 emit_move_insn (adjust_address (orig_dst
, QImode
, ofs
), const0_rtx
);
4201 if (TARGET_BWX
&& align
>= 16 && (alignofs
& 3) == 2 && bytes
>= 2)
4203 emit_move_insn (adjust_address (orig_dst
, HImode
, ofs
), const0_rtx
);
4208 if (alignofs
== 4 && bytes
>= 4)
4210 emit_move_insn (adjust_address (orig_dst
, SImode
, ofs
), const0_rtx
);
4216 /* If we've not used the extra lead alignment information by now,
4217 we won't be able to. Downgrade align to match what's left over. */
4220 alignofs
= alignofs
& -alignofs
;
4221 align
= MIN (align
, alignofs
* BITS_PER_UNIT
);
4225 /* Handle a block of contiguous long-words. */
4227 if (align
>= 64 && bytes
>= 8)
4231 for (i
= 0; i
< words
; ++i
)
4232 emit_move_insn (adjust_address (orig_dst
, DImode
, ofs
+ i
* 8),
4239 /* If the block is large and appropriately aligned, emit a single
4240 store followed by a sequence of stq_u insns. */
4242 if (align
>= 32 && bytes
> 16)
4246 emit_move_insn (adjust_address (orig_dst
, SImode
, ofs
), const0_rtx
);
4250 orig_dsta
= XEXP (orig_dst
, 0);
4251 if (GET_CODE (orig_dsta
) == LO_SUM
)
4252 orig_dsta
= force_reg (Pmode
, orig_dsta
);
4255 for (i
= 0; i
< words
; ++i
)
4258 = change_address (orig_dst
, DImode
,
4259 gen_rtx_AND (DImode
,
4260 plus_constant (orig_dsta
, ofs
+ i
*8),
4262 set_mem_alias_set (mem
, 0);
4263 emit_move_insn (mem
, const0_rtx
);
4266 /* Depending on the alignment, the first stq_u may have overlapped
4267 with the initial stl, which means that the last stq_u didn't
4268 write as much as it would appear. Leave those questionable bytes
4270 bytes
-= words
* 8 - 4;
4271 ofs
+= words
* 8 - 4;
4274 /* Handle a smaller block of aligned words. */
4276 if ((align
>= 64 && bytes
== 4)
4277 || (align
== 32 && bytes
>= 4))
4281 for (i
= 0; i
< words
; ++i
)
4282 emit_move_insn (adjust_address (orig_dst
, SImode
, ofs
+ i
* 4),
4289 /* An unaligned block uses stq_u stores for as many as possible. */
4295 alpha_expand_unaligned_store_words (NULL
, orig_dst
, words
, ofs
);
4301 /* Next clean up any trailing pieces. */
4303 #if HOST_BITS_PER_WIDE_INT >= 64
4304 /* Count the number of bits in BYTES for which aligned stores could
4307 for (i
= (TARGET_BWX
? 1 : 4); i
* BITS_PER_UNIT
<= align
; i
<<= 1)
4311 /* If we have appropriate alignment (and it wouldn't take too many
4312 instructions otherwise), mask out the bytes we need. */
4313 if (TARGET_BWX
? words
> 2 : bytes
> 0)
4320 mem
= adjust_address (orig_dst
, DImode
, ofs
);
4321 set_mem_alias_set (mem
, 0);
4323 mask
= ~(HOST_WIDE_INT
)0 << (bytes
* 8);
4325 tmp
= expand_binop (DImode
, and_optab
, mem
, GEN_INT (mask
),
4326 NULL_RTX
, 1, OPTAB_WIDEN
);
4328 emit_move_insn (mem
, tmp
);
4331 else if (align
>= 32 && bytes
< 4)
4336 mem
= adjust_address (orig_dst
, SImode
, ofs
);
4337 set_mem_alias_set (mem
, 0);
4339 mask
= ~(HOST_WIDE_INT
)0 << (bytes
* 8);
4341 tmp
= expand_binop (SImode
, and_optab
, mem
, GEN_INT (mask
),
4342 NULL_RTX
, 1, OPTAB_WIDEN
);
4344 emit_move_insn (mem
, tmp
);
4350 if (!TARGET_BWX
&& bytes
>= 4)
4352 alpha_expand_unaligned_store (orig_dst
, const0_rtx
, 4, ofs
);
4362 emit_move_insn (adjust_address (orig_dst
, HImode
, ofs
),
4366 } while (bytes
>= 2);
4368 else if (! TARGET_BWX
)
4370 alpha_expand_unaligned_store (orig_dst
, const0_rtx
, 2, ofs
);
4378 emit_move_insn (adjust_address (orig_dst
, QImode
, ofs
), const0_rtx
);
4386 /* Returns a mask so that zap(x, value) == x & mask. */
4389 alpha_expand_zap_mask (HOST_WIDE_INT value
)
4394 if (HOST_BITS_PER_WIDE_INT
>= 64)
4396 HOST_WIDE_INT mask
= 0;
4398 for (i
= 7; i
>= 0; --i
)
4401 if (!((value
>> i
) & 1))
4405 result
= gen_int_mode (mask
, DImode
);
4409 HOST_WIDE_INT mask_lo
= 0, mask_hi
= 0;
4411 gcc_assert (HOST_BITS_PER_WIDE_INT
== 32);
4413 for (i
= 7; i
>= 4; --i
)
4416 if (!((value
>> i
) & 1))
4420 for (i
= 3; i
>= 0; --i
)
4423 if (!((value
>> i
) & 1))
4427 result
= immed_double_const (mask_lo
, mask_hi
, DImode
);
4434 alpha_expand_builtin_vector_binop (rtx (*gen
) (rtx
, rtx
, rtx
),
4435 enum machine_mode mode
,
4436 rtx op0
, rtx op1
, rtx op2
)
4438 op0
= gen_lowpart (mode
, op0
);
4440 if (op1
== const0_rtx
)
4441 op1
= CONST0_RTX (mode
);
4443 op1
= gen_lowpart (mode
, op1
);
4445 if (op2
== const0_rtx
)
4446 op2
= CONST0_RTX (mode
);
4448 op2
= gen_lowpart (mode
, op2
);
4450 emit_insn ((*gen
) (op0
, op1
, op2
));
4453 /* A subroutine of the atomic operation splitters. Jump to LABEL if
4454 COND is true. Mark the jump as unlikely to be taken. */
4457 emit_unlikely_jump (rtx cond
, rtx label
)
4459 rtx very_unlikely
= GEN_INT (REG_BR_PROB_BASE
/ 100 - 1);
4462 x
= gen_rtx_IF_THEN_ELSE (VOIDmode
, cond
, label
, pc_rtx
);
4463 x
= emit_jump_insn (gen_rtx_SET (VOIDmode
, pc_rtx
, x
));
4464 REG_NOTES (x
) = gen_rtx_EXPR_LIST (REG_BR_PROB
, very_unlikely
, NULL_RTX
);
4467 /* A subroutine of the atomic operation splitters. Emit a load-locked
4468 instruction in MODE. */
4471 emit_load_locked (enum machine_mode mode
, rtx reg
, rtx mem
)
4473 rtx (*fn
) (rtx
, rtx
) = NULL
;
4475 fn
= gen_load_locked_si
;
4476 else if (mode
== DImode
)
4477 fn
= gen_load_locked_di
;
4478 emit_insn (fn (reg
, mem
));
4481 /* A subroutine of the atomic operation splitters. Emit a store-conditional
4482 instruction in MODE. */
4485 emit_store_conditional (enum machine_mode mode
, rtx res
, rtx mem
, rtx val
)
4487 rtx (*fn
) (rtx
, rtx
, rtx
) = NULL
;
4489 fn
= gen_store_conditional_si
;
4490 else if (mode
== DImode
)
4491 fn
= gen_store_conditional_di
;
4492 emit_insn (fn (res
, mem
, val
));
4495 /* A subroutine of the atomic operation splitters. Emit an insxl
4496 instruction in MODE. */
4499 emit_insxl (enum machine_mode mode
, rtx op1
, rtx op2
)
4501 rtx ret
= gen_reg_rtx (DImode
);
4502 rtx (*fn
) (rtx
, rtx
, rtx
);
4504 if (WORDS_BIG_ENDIAN
)
4518 emit_insn (fn (ret
, op1
, op2
));
4523 /* Expand an an atomic fetch-and-operate pattern. CODE is the binary operation
4524 to perform. MEM is the memory on which to operate. VAL is the second
4525 operand of the binary operator. BEFORE and AFTER are optional locations to
4526 return the value of MEM either before of after the operation. SCRATCH is
4527 a scratch register. */
4530 alpha_split_atomic_op (enum rtx_code code
, rtx mem
, rtx val
,
4531 rtx before
, rtx after
, rtx scratch
)
4533 enum machine_mode mode
= GET_MODE (mem
);
4534 rtx label
, x
, cond
= gen_rtx_REG (DImode
, REGNO (scratch
));
4536 emit_insn (gen_memory_barrier ());
4538 label
= gen_label_rtx ();
4540 label
= gen_rtx_LABEL_REF (DImode
, label
);
4544 emit_load_locked (mode
, before
, mem
);
4547 x
= gen_rtx_AND (mode
, gen_rtx_NOT (mode
, before
), val
);
4549 x
= gen_rtx_fmt_ee (code
, mode
, before
, val
);
4551 emit_insn (gen_rtx_SET (VOIDmode
, after
, copy_rtx (x
)));
4552 emit_insn (gen_rtx_SET (VOIDmode
, scratch
, x
));
4554 emit_store_conditional (mode
, cond
, mem
, scratch
);
4556 x
= gen_rtx_EQ (DImode
, cond
, const0_rtx
);
4557 emit_unlikely_jump (x
, label
);
4559 emit_insn (gen_memory_barrier ());
4562 /* Expand a compare and swap operation. */
4565 alpha_split_compare_and_swap (rtx retval
, rtx mem
, rtx oldval
, rtx newval
,
4568 enum machine_mode mode
= GET_MODE (mem
);
4569 rtx label1
, label2
, x
, cond
= gen_lowpart (DImode
, scratch
);
4571 emit_insn (gen_memory_barrier ());
4573 label1
= gen_rtx_LABEL_REF (DImode
, gen_label_rtx ());
4574 label2
= gen_rtx_LABEL_REF (DImode
, gen_label_rtx ());
4575 emit_label (XEXP (label1
, 0));
4577 emit_load_locked (mode
, retval
, mem
);
4579 x
= gen_lowpart (DImode
, retval
);
4580 if (oldval
== const0_rtx
)
4581 x
= gen_rtx_NE (DImode
, x
, const0_rtx
);
4584 x
= gen_rtx_EQ (DImode
, x
, oldval
);
4585 emit_insn (gen_rtx_SET (VOIDmode
, cond
, x
));
4586 x
= gen_rtx_EQ (DImode
, cond
, const0_rtx
);
4588 emit_unlikely_jump (x
, label2
);
4590 emit_move_insn (scratch
, newval
);
4591 emit_store_conditional (mode
, cond
, mem
, scratch
);
4593 x
= gen_rtx_EQ (DImode
, cond
, const0_rtx
);
4594 emit_unlikely_jump (x
, label1
);
4596 emit_insn (gen_memory_barrier ());
4597 emit_label (XEXP (label2
, 0));
4601 alpha_expand_compare_and_swap_12 (rtx dst
, rtx mem
, rtx oldval
, rtx newval
)
4603 enum machine_mode mode
= GET_MODE (mem
);
4604 rtx addr
, align
, wdst
;
4605 rtx (*fn5
) (rtx
, rtx
, rtx
, rtx
, rtx
);
4607 addr
= force_reg (DImode
, XEXP (mem
, 0));
4608 align
= expand_simple_binop (Pmode
, AND
, addr
, GEN_INT (-8),
4609 NULL_RTX
, 1, OPTAB_DIRECT
);
4611 oldval
= convert_modes (DImode
, mode
, oldval
, 1);
4612 newval
= emit_insxl (mode
, newval
, addr
);
4614 wdst
= gen_reg_rtx (DImode
);
4616 fn5
= gen_sync_compare_and_swapqi_1
;
4618 fn5
= gen_sync_compare_and_swaphi_1
;
4619 emit_insn (fn5 (wdst
, addr
, oldval
, newval
, align
));
4621 emit_move_insn (dst
, gen_lowpart (mode
, wdst
));
4625 alpha_split_compare_and_swap_12 (enum machine_mode mode
, rtx dest
, rtx addr
,
4626 rtx oldval
, rtx newval
, rtx align
,
4627 rtx scratch
, rtx cond
)
4629 rtx label1
, label2
, mem
, width
, mask
, x
;
4631 mem
= gen_rtx_MEM (DImode
, align
);
4632 MEM_VOLATILE_P (mem
) = 1;
4634 emit_insn (gen_memory_barrier ());
4635 label1
= gen_rtx_LABEL_REF (DImode
, gen_label_rtx ());
4636 label2
= gen_rtx_LABEL_REF (DImode
, gen_label_rtx ());
4637 emit_label (XEXP (label1
, 0));
4639 emit_load_locked (DImode
, scratch
, mem
);
4641 width
= GEN_INT (GET_MODE_BITSIZE (mode
));
4642 mask
= GEN_INT (mode
== QImode
? 0xff : 0xffff);
4643 if (WORDS_BIG_ENDIAN
)
4644 emit_insn (gen_extxl_be (dest
, scratch
, width
, addr
));
4646 emit_insn (gen_extxl_le (dest
, scratch
, width
, addr
));
4648 if (oldval
== const0_rtx
)
4649 x
= gen_rtx_NE (DImode
, dest
, const0_rtx
);
4652 x
= gen_rtx_EQ (DImode
, dest
, oldval
);
4653 emit_insn (gen_rtx_SET (VOIDmode
, cond
, x
));
4654 x
= gen_rtx_EQ (DImode
, cond
, const0_rtx
);
4656 emit_unlikely_jump (x
, label2
);
4658 if (WORDS_BIG_ENDIAN
)
4659 emit_insn (gen_mskxl_be (scratch
, scratch
, mask
, addr
));
4661 emit_insn (gen_mskxl_le (scratch
, scratch
, mask
, addr
));
4662 emit_insn (gen_iordi3 (scratch
, scratch
, newval
));
4664 emit_store_conditional (DImode
, scratch
, mem
, scratch
);
4666 x
= gen_rtx_EQ (DImode
, scratch
, const0_rtx
);
4667 emit_unlikely_jump (x
, label1
);
4669 emit_insn (gen_memory_barrier ());
4670 emit_label (XEXP (label2
, 0));
4673 /* Expand an atomic exchange operation. */
4676 alpha_split_lock_test_and_set (rtx retval
, rtx mem
, rtx val
, rtx scratch
)
4678 enum machine_mode mode
= GET_MODE (mem
);
4679 rtx label
, x
, cond
= gen_lowpart (DImode
, scratch
);
4681 emit_insn (gen_memory_barrier ());
4683 label
= gen_rtx_LABEL_REF (DImode
, gen_label_rtx ());
4684 emit_label (XEXP (label
, 0));
4686 emit_load_locked (mode
, retval
, mem
);
4687 emit_move_insn (scratch
, val
);
4688 emit_store_conditional (mode
, cond
, mem
, scratch
);
4690 x
= gen_rtx_EQ (DImode
, cond
, const0_rtx
);
4691 emit_unlikely_jump (x
, label
);
4695 alpha_expand_lock_test_and_set_12 (rtx dst
, rtx mem
, rtx val
)
4697 enum machine_mode mode
= GET_MODE (mem
);
4698 rtx addr
, align
, wdst
;
4699 rtx (*fn4
) (rtx
, rtx
, rtx
, rtx
);
4701 /* Force the address into a register. */
4702 addr
= force_reg (DImode
, XEXP (mem
, 0));
4704 /* Align it to a multiple of 8. */
4705 align
= expand_simple_binop (Pmode
, AND
, addr
, GEN_INT (-8),
4706 NULL_RTX
, 1, OPTAB_DIRECT
);
4708 /* Insert val into the correct byte location within the word. */
4709 val
= emit_insxl (mode
, val
, addr
);
4711 wdst
= gen_reg_rtx (DImode
);
4713 fn4
= gen_sync_lock_test_and_setqi_1
;
4715 fn4
= gen_sync_lock_test_and_sethi_1
;
4716 emit_insn (fn4 (wdst
, addr
, val
, align
));
4718 emit_move_insn (dst
, gen_lowpart (mode
, wdst
));
4722 alpha_split_lock_test_and_set_12 (enum machine_mode mode
, rtx dest
, rtx addr
,
4723 rtx val
, rtx align
, rtx scratch
)
4725 rtx label
, mem
, width
, mask
, x
;
4727 mem
= gen_rtx_MEM (DImode
, align
);
4728 MEM_VOLATILE_P (mem
) = 1;
4730 emit_insn (gen_memory_barrier ());
4731 label
= gen_rtx_LABEL_REF (DImode
, gen_label_rtx ());
4732 emit_label (XEXP (label
, 0));
4734 emit_load_locked (DImode
, scratch
, mem
);
4736 width
= GEN_INT (GET_MODE_BITSIZE (mode
));
4737 mask
= GEN_INT (mode
== QImode
? 0xff : 0xffff);
4738 if (WORDS_BIG_ENDIAN
)
4740 emit_insn (gen_extxl_be (dest
, scratch
, width
, addr
));
4741 emit_insn (gen_mskxl_be (scratch
, scratch
, mask
, addr
));
4745 emit_insn (gen_extxl_le (dest
, scratch
, width
, addr
));
4746 emit_insn (gen_mskxl_le (scratch
, scratch
, mask
, addr
));
4748 emit_insn (gen_iordi3 (scratch
, scratch
, val
));
4750 emit_store_conditional (DImode
, scratch
, mem
, scratch
);
4752 x
= gen_rtx_EQ (DImode
, scratch
, const0_rtx
);
4753 emit_unlikely_jump (x
, label
);
4756 /* Adjust the cost of a scheduling dependency. Return the new cost of
4757 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4760 alpha_adjust_cost (rtx insn
, rtx link
, rtx dep_insn
, int cost
)
4762 enum attr_type insn_type
, dep_insn_type
;
4764 /* If the dependence is an anti-dependence, there is no cost. For an
4765 output dependence, there is sometimes a cost, but it doesn't seem
4766 worth handling those few cases. */
4767 if (REG_NOTE_KIND (link
) != 0)
4770 /* If we can't recognize the insns, we can't really do anything. */
4771 if (recog_memoized (insn
) < 0 || recog_memoized (dep_insn
) < 0)
4774 insn_type
= get_attr_type (insn
);
4775 dep_insn_type
= get_attr_type (dep_insn
);
4777 /* Bring in the user-defined memory latency. */
4778 if (dep_insn_type
== TYPE_ILD
4779 || dep_insn_type
== TYPE_FLD
4780 || dep_insn_type
== TYPE_LDSYM
)
4781 cost
+= alpha_memory_latency
-1;
4783 /* Everything else handled in DFA bypasses now. */
4788 /* The number of instructions that can be issued per cycle. */
4791 alpha_issue_rate (void)
4793 return (alpha_tune
== PROCESSOR_EV4
? 2 : 4);
4796 /* How many alternative schedules to try. This should be as wide as the
4797 scheduling freedom in the DFA, but no wider. Making this value too
4798 large results extra work for the scheduler.
4800 For EV4, loads can be issued to either IB0 or IB1, thus we have 2
4801 alternative schedules. For EV5, we can choose between E0/E1 and
4802 FA/FM. For EV6, an arithmetic insn can be issued to U0/U1/L0/L1. */
4805 alpha_multipass_dfa_lookahead (void)
4807 return (alpha_tune
== PROCESSOR_EV6
? 4 : 2);
4810 /* Machine-specific function data. */
4812 struct machine_function
GTY(())
4815 /* List of call information words for calls from this function. */
4816 struct rtx_def
*first_ciw
;
4817 struct rtx_def
*last_ciw
;
4820 /* List of deferred case vectors. */
4821 struct rtx_def
*addr_list
;
4824 const char *some_ld_name
;
4826 /* For TARGET_LD_BUGGY_LDGP. */
4827 struct rtx_def
*gp_save_rtx
;
4830 /* How to allocate a 'struct machine_function'. */
4832 static struct machine_function
*
4833 alpha_init_machine_status (void)
4835 return ((struct machine_function
*)
4836 ggc_alloc_cleared (sizeof (struct machine_function
)));
4839 /* Functions to save and restore alpha_return_addr_rtx. */
4841 /* Start the ball rolling with RETURN_ADDR_RTX. */
4844 alpha_return_addr (int count
, rtx frame ATTRIBUTE_UNUSED
)
4849 return get_hard_reg_initial_val (Pmode
, REG_RA
);
4852 /* Return or create a memory slot containing the gp value for the current
4853 function. Needed only if TARGET_LD_BUGGY_LDGP. */
4856 alpha_gp_save_rtx (void)
4858 rtx seq
, m
= cfun
->machine
->gp_save_rtx
;
4864 m
= assign_stack_local (DImode
, UNITS_PER_WORD
, BITS_PER_WORD
);
4865 m
= validize_mem (m
);
4866 emit_move_insn (m
, pic_offset_table_rtx
);
4870 emit_insn_after (seq
, entry_of_function ());
4872 cfun
->machine
->gp_save_rtx
= m
;
4879 alpha_ra_ever_killed (void)
4883 if (!has_hard_reg_initial_val (Pmode
, REG_RA
))
4884 return regs_ever_live
[REG_RA
];
4886 push_topmost_sequence ();
4888 pop_topmost_sequence ();
4890 return reg_set_between_p (gen_rtx_REG (Pmode
, REG_RA
), top
, NULL_RTX
);
4894 /* Return the trap mode suffix applicable to the current
4895 instruction, or NULL. */
4898 get_trap_mode_suffix (void)
4900 enum attr_trap_suffix s
= get_attr_trap_suffix (current_output_insn
);
4904 case TRAP_SUFFIX_NONE
:
4907 case TRAP_SUFFIX_SU
:
4908 if (alpha_fptm
>= ALPHA_FPTM_SU
)
4912 case TRAP_SUFFIX_SUI
:
4913 if (alpha_fptm
>= ALPHA_FPTM_SUI
)
4917 case TRAP_SUFFIX_V_SV
:
4925 case ALPHA_FPTM_SUI
:
4931 case TRAP_SUFFIX_V_SV_SVI
:
4940 case ALPHA_FPTM_SUI
:
4947 case TRAP_SUFFIX_U_SU_SUI
:
4956 case ALPHA_FPTM_SUI
:
4969 /* Return the rounding mode suffix applicable to the current
4970 instruction, or NULL. */
4973 get_round_mode_suffix (void)
4975 enum attr_round_suffix s
= get_attr_round_suffix (current_output_insn
);
4979 case ROUND_SUFFIX_NONE
:
4981 case ROUND_SUFFIX_NORMAL
:
4984 case ALPHA_FPRM_NORM
:
4986 case ALPHA_FPRM_MINF
:
4988 case ALPHA_FPRM_CHOP
:
4990 case ALPHA_FPRM_DYN
:
4997 case ROUND_SUFFIX_C
:
5006 /* Locate some local-dynamic symbol still in use by this function
5007 so that we can print its name in some movdi_er_tlsldm pattern. */
5010 get_some_local_dynamic_name_1 (rtx
*px
, void *data ATTRIBUTE_UNUSED
)
5014 if (GET_CODE (x
) == SYMBOL_REF
5015 && SYMBOL_REF_TLS_MODEL (x
) == TLS_MODEL_LOCAL_DYNAMIC
)
5017 cfun
->machine
->some_ld_name
= XSTR (x
, 0);
5025 get_some_local_dynamic_name (void)
5029 if (cfun
->machine
->some_ld_name
)
5030 return cfun
->machine
->some_ld_name
;
5032 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
5034 && for_each_rtx (&PATTERN (insn
), get_some_local_dynamic_name_1
, 0))
5035 return cfun
->machine
->some_ld_name
;
5040 /* Print an operand. Recognize special options, documented below. */
5043 print_operand (FILE *file
, rtx x
, int code
)
5050 /* Print the assembler name of the current function. */
5051 assemble_name (file
, alpha_fnname
);
5055 assemble_name (file
, get_some_local_dynamic_name ());
5060 const char *trap
= get_trap_mode_suffix ();
5061 const char *round
= get_round_mode_suffix ();
5064 fprintf (file
, (TARGET_AS_SLASH_BEFORE_SUFFIX
? "/%s%s" : "%s%s"),
5065 (trap
? trap
: ""), (round
? round
: ""));
5070 /* Generates single precision instruction suffix. */
5071 fputc ((TARGET_FLOAT_VAX
? 'f' : 's'), file
);
5075 /* Generates double precision instruction suffix. */
5076 fputc ((TARGET_FLOAT_VAX
? 'g' : 't'), file
);
5080 /* Generates a nop after a noreturn call at the very end of the
5082 if (next_real_insn (current_output_insn
) == 0)
5083 fprintf (file
, "\n\tnop");
5087 if (alpha_this_literal_sequence_number
== 0)
5088 alpha_this_literal_sequence_number
= alpha_next_sequence_number
++;
5089 fprintf (file
, "%d", alpha_this_literal_sequence_number
);
5093 if (alpha_this_gpdisp_sequence_number
== 0)
5094 alpha_this_gpdisp_sequence_number
= alpha_next_sequence_number
++;
5095 fprintf (file
, "%d", alpha_this_gpdisp_sequence_number
);
5099 if (GET_CODE (x
) == HIGH
)
5100 output_addr_const (file
, XEXP (x
, 0));
5102 output_operand_lossage ("invalid %%H value");
5109 if (GET_CODE (x
) == UNSPEC
&& XINT (x
, 1) == UNSPEC_TLSGD_CALL
)
5111 x
= XVECEXP (x
, 0, 0);
5112 lituse
= "lituse_tlsgd";
5114 else if (GET_CODE (x
) == UNSPEC
&& XINT (x
, 1) == UNSPEC_TLSLDM_CALL
)
5116 x
= XVECEXP (x
, 0, 0);
5117 lituse
= "lituse_tlsldm";
5119 else if (GET_CODE (x
) == CONST_INT
)
5120 lituse
= "lituse_jsr";
5123 output_operand_lossage ("invalid %%J value");
5127 if (x
!= const0_rtx
)
5128 fprintf (file
, "\t\t!%s!%d", lituse
, (int) INTVAL (x
));
5136 #ifdef HAVE_AS_JSRDIRECT_RELOCS
5137 lituse
= "lituse_jsrdirect";
5139 lituse
= "lituse_jsr";
5142 gcc_assert (INTVAL (x
) != 0);
5143 fprintf (file
, "\t\t!%s!%d", lituse
, (int) INTVAL (x
));
5147 /* If this operand is the constant zero, write it as "$31". */
5148 if (GET_CODE (x
) == REG
)
5149 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
5150 else if (x
== CONST0_RTX (GET_MODE (x
)))
5151 fprintf (file
, "$31");
5153 output_operand_lossage ("invalid %%r value");
5157 /* Similar, but for floating-point. */
5158 if (GET_CODE (x
) == REG
)
5159 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
5160 else if (x
== CONST0_RTX (GET_MODE (x
)))
5161 fprintf (file
, "$f31");
5163 output_operand_lossage ("invalid %%R value");
5167 /* Write the 1's complement of a constant. */
5168 if (GET_CODE (x
) != CONST_INT
)
5169 output_operand_lossage ("invalid %%N value");
5171 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, ~ INTVAL (x
));
5175 /* Write 1 << C, for a constant C. */
5176 if (GET_CODE (x
) != CONST_INT
)
5177 output_operand_lossage ("invalid %%P value");
5179 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, (HOST_WIDE_INT
) 1 << INTVAL (x
));
5183 /* Write the high-order 16 bits of a constant, sign-extended. */
5184 if (GET_CODE (x
) != CONST_INT
)
5185 output_operand_lossage ("invalid %%h value");
5187 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (x
) >> 16);
5191 /* Write the low-order 16 bits of a constant, sign-extended. */
5192 if (GET_CODE (x
) != CONST_INT
)
5193 output_operand_lossage ("invalid %%L value");
5195 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
,
5196 (INTVAL (x
) & 0xffff) - 2 * (INTVAL (x
) & 0x8000));
5200 /* Write mask for ZAP insn. */
5201 if (GET_CODE (x
) == CONST_DOUBLE
)
5203 HOST_WIDE_INT mask
= 0;
5204 HOST_WIDE_INT value
;
5206 value
= CONST_DOUBLE_LOW (x
);
5207 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
/ HOST_BITS_PER_CHAR
;
5212 value
= CONST_DOUBLE_HIGH (x
);
5213 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
/ HOST_BITS_PER_CHAR
;
5216 mask
|= (1 << (i
+ sizeof (int)));
5218 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, mask
& 0xff);
5221 else if (GET_CODE (x
) == CONST_INT
)
5223 HOST_WIDE_INT mask
= 0, value
= INTVAL (x
);
5225 for (i
= 0; i
< 8; i
++, value
>>= 8)
5229 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, mask
);
5232 output_operand_lossage ("invalid %%m value");
5236 /* 'b', 'w', 'l', or 'q' as the value of the constant. */
5237 if (GET_CODE (x
) != CONST_INT
5238 || (INTVAL (x
) != 8 && INTVAL (x
) != 16
5239 && INTVAL (x
) != 32 && INTVAL (x
) != 64))
5240 output_operand_lossage ("invalid %%M value");
5242 fprintf (file
, "%s",
5243 (INTVAL (x
) == 8 ? "b"
5244 : INTVAL (x
) == 16 ? "w"
5245 : INTVAL (x
) == 32 ? "l"
5250 /* Similar, except do it from the mask. */
5251 if (GET_CODE (x
) == CONST_INT
)
5253 HOST_WIDE_INT value
= INTVAL (x
);
5260 if (value
== 0xffff)
5265 if (value
== 0xffffffff)
5276 else if (HOST_BITS_PER_WIDE_INT
== 32
5277 && GET_CODE (x
) == CONST_DOUBLE
5278 && CONST_DOUBLE_LOW (x
) == 0xffffffff
5279 && CONST_DOUBLE_HIGH (x
) == 0)
5284 output_operand_lossage ("invalid %%U value");
5288 /* Write the constant value divided by 8 for little-endian mode or
5289 (56 - value) / 8 for big-endian mode. */
5291 if (GET_CODE (x
) != CONST_INT
5292 || (unsigned HOST_WIDE_INT
) INTVAL (x
) >= (WORDS_BIG_ENDIAN
5295 || (INTVAL (x
) & 7) != 0)
5296 output_operand_lossage ("invalid %%s value");
5298 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
,
5300 ? (56 - INTVAL (x
)) / 8
5305 /* Same, except compute (64 - c) / 8 */
5307 if (GET_CODE (x
) != CONST_INT
5308 && (unsigned HOST_WIDE_INT
) INTVAL (x
) >= 64
5309 && (INTVAL (x
) & 7) != 8)
5310 output_operand_lossage ("invalid %%s value");
5312 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, (64 - INTVAL (x
)) / 8);
5317 /* On Unicos/Mk systems: use a DEX expression if the symbol
5318 clashes with a register name. */
5319 int dex
= unicosmk_need_dex (x
);
5321 fprintf (file
, "DEX(%d)", dex
);
5323 output_addr_const (file
, x
);
5327 case 'C': case 'D': case 'c': case 'd':
5328 /* Write out comparison name. */
5330 enum rtx_code c
= GET_CODE (x
);
5332 if (!COMPARISON_P (x
))
5333 output_operand_lossage ("invalid %%C value");
5335 else if (code
== 'D')
5336 c
= reverse_condition (c
);
5337 else if (code
== 'c')
5338 c
= swap_condition (c
);
5339 else if (code
== 'd')
5340 c
= swap_condition (reverse_condition (c
));
5343 fprintf (file
, "ule");
5345 fprintf (file
, "ult");
5346 else if (c
== UNORDERED
)
5347 fprintf (file
, "un");
5349 fprintf (file
, "%s", GET_RTX_NAME (c
));
5354 /* Write the divide or modulus operator. */
5355 switch (GET_CODE (x
))
5358 fprintf (file
, "div%s", GET_MODE (x
) == SImode
? "l" : "q");
5361 fprintf (file
, "div%su", GET_MODE (x
) == SImode
? "l" : "q");
5364 fprintf (file
, "rem%s", GET_MODE (x
) == SImode
? "l" : "q");
5367 fprintf (file
, "rem%su", GET_MODE (x
) == SImode
? "l" : "q");
5370 output_operand_lossage ("invalid %%E value");
5376 /* Write "_u" for unaligned access. */
5377 if (GET_CODE (x
) == MEM
&& GET_CODE (XEXP (x
, 0)) == AND
)
5378 fprintf (file
, "_u");
5382 if (GET_CODE (x
) == REG
)
5383 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
5384 else if (GET_CODE (x
) == MEM
)
5385 output_address (XEXP (x
, 0));
5386 else if (GET_CODE (x
) == CONST
&& GET_CODE (XEXP (x
, 0)) == UNSPEC
)
5388 switch (XINT (XEXP (x
, 0), 1))
5392 output_addr_const (file
, XVECEXP (XEXP (x
, 0), 0, 0));
5395 output_operand_lossage ("unknown relocation unspec");
5400 output_addr_const (file
, x
);
5404 output_operand_lossage ("invalid %%xn code");
5409 print_operand_address (FILE *file
, rtx addr
)
5412 HOST_WIDE_INT offset
= 0;
5414 if (GET_CODE (addr
) == AND
)
5415 addr
= XEXP (addr
, 0);
5417 if (GET_CODE (addr
) == PLUS
5418 && GET_CODE (XEXP (addr
, 1)) == CONST_INT
)
5420 offset
= INTVAL (XEXP (addr
, 1));
5421 addr
= XEXP (addr
, 0);
5424 if (GET_CODE (addr
) == LO_SUM
)
5426 const char *reloc16
, *reloclo
;
5427 rtx op1
= XEXP (addr
, 1);
5429 if (GET_CODE (op1
) == CONST
&& GET_CODE (XEXP (op1
, 0)) == UNSPEC
)
5431 op1
= XEXP (op1
, 0);
5432 switch (XINT (op1
, 1))
5436 reloclo
= (alpha_tls_size
== 16 ? "dtprel" : "dtprello");
5440 reloclo
= (alpha_tls_size
== 16 ? "tprel" : "tprello");
5443 output_operand_lossage ("unknown relocation unspec");
5447 output_addr_const (file
, XVECEXP (op1
, 0, 0));
5452 reloclo
= "gprellow";
5453 output_addr_const (file
, op1
);
5457 fprintf (file
, "+" HOST_WIDE_INT_PRINT_DEC
, offset
);
5459 addr
= XEXP (addr
, 0);
5460 switch (GET_CODE (addr
))
5463 basereg
= REGNO (addr
);
5467 basereg
= subreg_regno (addr
);
5474 fprintf (file
, "($%d)\t\t!%s", basereg
,
5475 (basereg
== 29 ? reloc16
: reloclo
));
5479 switch (GET_CODE (addr
))
5482 basereg
= REGNO (addr
);
5486 basereg
= subreg_regno (addr
);
5490 offset
= INTVAL (addr
);
5493 #if TARGET_ABI_OPEN_VMS
5495 fprintf (file
, "%s", XSTR (addr
, 0));
5499 gcc_assert (GET_CODE (XEXP (addr
, 0)) == PLUS
5500 && GET_CODE (XEXP (XEXP (addr
, 0), 0)) == SYMBOL_REF
);
5501 fprintf (file
, "%s+" HOST_WIDE_INT_PRINT_DEC
,
5502 XSTR (XEXP (XEXP (addr
, 0), 0), 0),
5503 INTVAL (XEXP (XEXP (addr
, 0), 1)));
5511 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
"($%d)", offset
, basereg
);
5514 /* Emit RTL insns to initialize the variable parts of a trampoline at
5515 TRAMP. FNADDR is an RTX for the address of the function's pure
5516 code. CXT is an RTX for the static chain value for the function.
5518 The three offset parameters are for the individual template's
5519 layout. A JMPOFS < 0 indicates that the trampoline does not
5520 contain instructions at all.
5522 We assume here that a function will be called many more times than
5523 its address is taken (e.g., it might be passed to qsort), so we
5524 take the trouble to initialize the "hint" field in the JMP insn.
5525 Note that the hint field is PC (new) + 4 * bits 13:0. */
5528 alpha_initialize_trampoline (rtx tramp
, rtx fnaddr
, rtx cxt
,
5529 int fnofs
, int cxtofs
, int jmpofs
)
5531 rtx temp
, temp1
, addr
;
5532 /* VMS really uses DImode pointers in memory at this point. */
5533 enum machine_mode mode
= TARGET_ABI_OPEN_VMS
? Pmode
: ptr_mode
;
5535 #ifdef POINTERS_EXTEND_UNSIGNED
5536 fnaddr
= convert_memory_address (mode
, fnaddr
);
5537 cxt
= convert_memory_address (mode
, cxt
);
5540 /* Store function address and CXT. */
5541 addr
= memory_address (mode
, plus_constant (tramp
, fnofs
));
5542 emit_move_insn (gen_rtx_MEM (mode
, addr
), fnaddr
);
5543 addr
= memory_address (mode
, plus_constant (tramp
, cxtofs
));
5544 emit_move_insn (gen_rtx_MEM (mode
, addr
), cxt
);
5546 /* This has been disabled since the hint only has a 32k range, and in
5547 no existing OS is the stack within 32k of the text segment. */
5548 if (0 && jmpofs
>= 0)
5550 /* Compute hint value. */
5551 temp
= force_operand (plus_constant (tramp
, jmpofs
+4), NULL_RTX
);
5552 temp
= expand_binop (DImode
, sub_optab
, fnaddr
, temp
, temp
, 1,
5554 temp
= expand_shift (RSHIFT_EXPR
, Pmode
, temp
,
5555 build_int_cst (NULL_TREE
, 2), NULL_RTX
, 1);
5556 temp
= expand_and (SImode
, gen_lowpart (SImode
, temp
),
5557 GEN_INT (0x3fff), 0);
5559 /* Merge in the hint. */
5560 addr
= memory_address (SImode
, plus_constant (tramp
, jmpofs
));
5561 temp1
= force_reg (SImode
, gen_rtx_MEM (SImode
, addr
));
5562 temp1
= expand_and (SImode
, temp1
, GEN_INT (0xffffc000), NULL_RTX
);
5563 temp1
= expand_binop (SImode
, ior_optab
, temp1
, temp
, temp1
, 1,
5565 emit_move_insn (gen_rtx_MEM (SImode
, addr
), temp1
);
5568 #ifdef ENABLE_EXECUTE_STACK
5569 emit_library_call (init_one_libfunc ("__enable_execute_stack"),
5570 0, VOIDmode
, 1, tramp
, Pmode
);
5574 emit_insn (gen_imb ());
5577 /* Determine where to put an argument to a function.
5578 Value is zero to push the argument on the stack,
5579 or a hard register in which to store the argument.
5581 MODE is the argument's machine mode.
5582 TYPE is the data type of the argument (as a tree).
5583 This is null for libcalls where that information may
5585 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5586 the preceding args and about the function being called.
5587 NAMED is nonzero if this argument is a named parameter
5588 (otherwise it is an extra parameter matching an ellipsis).
5590 On Alpha the first 6 words of args are normally in registers
5591 and the rest are pushed. */
5594 function_arg (CUMULATIVE_ARGS cum
, enum machine_mode mode
, tree type
,
5595 int named ATTRIBUTE_UNUSED
)
5600 /* Don't get confused and pass small structures in FP registers. */
5601 if (type
&& AGGREGATE_TYPE_P (type
))
5605 #ifdef ENABLE_CHECKING
5606 /* With alpha_split_complex_arg, we shouldn't see any raw complex
5608 gcc_assert (!COMPLEX_MODE_P (mode
));
5611 /* Set up defaults for FP operands passed in FP registers, and
5612 integral operands passed in integer registers. */
5613 if (TARGET_FPREGS
&& GET_MODE_CLASS (mode
) == MODE_FLOAT
)
5619 /* ??? Irritatingly, the definition of CUMULATIVE_ARGS is different for
5620 the three platforms, so we can't avoid conditional compilation. */
5621 #if TARGET_ABI_OPEN_VMS
5623 if (mode
== VOIDmode
)
5624 return alpha_arg_info_reg_val (cum
);
5626 num_args
= cum
.num_args
;
5628 || targetm
.calls
.must_pass_in_stack (mode
, type
))
5631 #elif TARGET_ABI_UNICOSMK
5635 /* If this is the last argument, generate the call info word (CIW). */
5636 /* ??? We don't include the caller's line number in the CIW because
5637 I don't know how to determine it if debug infos are turned off. */
5638 if (mode
== VOIDmode
)
5647 for (i
= 0; i
< cum
.num_reg_words
&& i
< 5; i
++)
5648 if (cum
.reg_args_type
[i
])
5649 lo
|= (1 << (7 - i
));
5651 if (cum
.num_reg_words
== 6 && cum
.reg_args_type
[5])
5654 lo
|= cum
.num_reg_words
;
5656 #if HOST_BITS_PER_WIDE_INT == 32
5657 hi
= (cum
.num_args
<< 20) | cum
.num_arg_words
;
5659 lo
= lo
| ((HOST_WIDE_INT
) cum
.num_args
<< 52)
5660 | ((HOST_WIDE_INT
) cum
.num_arg_words
<< 32);
5663 ciw
= immed_double_const (lo
, hi
, DImode
);
5665 return gen_rtx_UNSPEC (DImode
, gen_rtvec (1, ciw
),
5666 UNSPEC_UMK_LOAD_CIW
);
5669 size
= ALPHA_ARG_SIZE (mode
, type
, named
);
5670 num_args
= cum
.num_reg_words
;
5672 || cum
.num_reg_words
+ size
> 6
5673 || targetm
.calls
.must_pass_in_stack (mode
, type
))
5675 else if (type
&& TYPE_MODE (type
) == BLKmode
)
5679 reg1
= gen_rtx_REG (DImode
, num_args
+ 16);
5680 reg1
= gen_rtx_EXPR_LIST (DImode
, reg1
, const0_rtx
);
5682 /* The argument fits in two registers. Note that we still need to
5683 reserve a register for empty structures. */
5687 return gen_rtx_PARALLEL (mode
, gen_rtvec (1, reg1
));
5690 reg2
= gen_rtx_REG (DImode
, num_args
+ 17);
5691 reg2
= gen_rtx_EXPR_LIST (DImode
, reg2
, GEN_INT (8));
5692 return gen_rtx_PARALLEL (mode
, gen_rtvec (2, reg1
, reg2
));
5696 #elif TARGET_ABI_OSF
5702 /* VOID is passed as a special flag for "last argument". */
5703 if (type
== void_type_node
)
5705 else if (targetm
.calls
.must_pass_in_stack (mode
, type
))
5709 #error Unhandled ABI
5712 return gen_rtx_REG (mode
, num_args
+ basereg
);
5716 alpha_arg_partial_bytes (CUMULATIVE_ARGS
*cum ATTRIBUTE_UNUSED
,
5717 enum machine_mode mode ATTRIBUTE_UNUSED
,
5718 tree type ATTRIBUTE_UNUSED
,
5719 bool named ATTRIBUTE_UNUSED
)
5723 #if TARGET_ABI_OPEN_VMS
5724 if (cum
->num_args
< 6
5725 && 6 < cum
->num_args
+ ALPHA_ARG_SIZE (mode
, type
, named
))
5726 words
= 6 - cum
->num_args
;
5727 #elif TARGET_ABI_UNICOSMK
5728 /* Never any split arguments. */
5729 #elif TARGET_ABI_OSF
5730 if (*cum
< 6 && 6 < *cum
+ ALPHA_ARG_SIZE (mode
, type
, named
))
5733 #error Unhandled ABI
5736 return words
* UNITS_PER_WORD
;
5740 /* Return true if TYPE must be returned in memory, instead of in registers. */
5743 alpha_return_in_memory (tree type
, tree fndecl ATTRIBUTE_UNUSED
)
5745 enum machine_mode mode
= VOIDmode
;
5750 mode
= TYPE_MODE (type
);
5752 /* All aggregates are returned in memory. */
5753 if (AGGREGATE_TYPE_P (type
))
5757 size
= GET_MODE_SIZE (mode
);
5758 switch (GET_MODE_CLASS (mode
))
5760 case MODE_VECTOR_FLOAT
:
5761 /* Pass all float vectors in memory, like an aggregate. */
5764 case MODE_COMPLEX_FLOAT
:
5765 /* We judge complex floats on the size of their element,
5766 not the size of the whole type. */
5767 size
= GET_MODE_UNIT_SIZE (mode
);
5772 case MODE_COMPLEX_INT
:
5773 case MODE_VECTOR_INT
:
5777 /* ??? We get called on all sorts of random stuff from
5778 aggregate_value_p. We must return something, but it's not
5779 clear what's safe to return. Pretend it's a struct I
5784 /* Otherwise types must fit in one register. */
5785 return size
> UNITS_PER_WORD
;
5788 /* Return true if TYPE should be passed by invisible reference. */
5791 alpha_pass_by_reference (CUMULATIVE_ARGS
*ca ATTRIBUTE_UNUSED
,
5792 enum machine_mode mode
,
5793 tree type ATTRIBUTE_UNUSED
,
5794 bool named ATTRIBUTE_UNUSED
)
5796 return mode
== TFmode
|| mode
== TCmode
;
5799 /* Define how to find the value returned by a function. VALTYPE is the
5800 data type of the value (as a tree). If the precise function being
5801 called is known, FUNC is its FUNCTION_DECL; otherwise, FUNC is 0.
5802 MODE is set instead of VALTYPE for libcalls.
5804 On Alpha the value is found in $0 for integer functions and
5805 $f0 for floating-point functions. */
5808 function_value (tree valtype
, tree func ATTRIBUTE_UNUSED
,
5809 enum machine_mode mode
)
5811 unsigned int regnum
, dummy
;
5812 enum mode_class
class;
5814 gcc_assert (!valtype
|| !alpha_return_in_memory (valtype
, func
));
5817 mode
= TYPE_MODE (valtype
);
5819 class = GET_MODE_CLASS (mode
);
5823 PROMOTE_MODE (mode
, dummy
, valtype
);
5826 case MODE_COMPLEX_INT
:
5827 case MODE_VECTOR_INT
:
5835 case MODE_COMPLEX_FLOAT
:
5837 enum machine_mode cmode
= GET_MODE_INNER (mode
);
5839 return gen_rtx_PARALLEL
5842 gen_rtx_EXPR_LIST (VOIDmode
, gen_rtx_REG (cmode
, 32),
5844 gen_rtx_EXPR_LIST (VOIDmode
, gen_rtx_REG (cmode
, 33),
5845 GEN_INT (GET_MODE_SIZE (cmode
)))));
5852 return gen_rtx_REG (mode
, regnum
);
5855 /* TCmode complex values are passed by invisible reference. We
5856 should not split these values. */
5859 alpha_split_complex_arg (tree type
)
5861 return TYPE_MODE (type
) != TCmode
;
5865 alpha_build_builtin_va_list (void)
5867 tree base
, ofs
, space
, record
, type_decl
;
5869 if (TARGET_ABI_OPEN_VMS
|| TARGET_ABI_UNICOSMK
)
5870 return ptr_type_node
;
5872 record
= (*lang_hooks
.types
.make_type
) (RECORD_TYPE
);
5873 type_decl
= build_decl (TYPE_DECL
, get_identifier ("__va_list_tag"), record
);
5874 TREE_CHAIN (record
) = type_decl
;
5875 TYPE_NAME (record
) = type_decl
;
5877 /* C++? SET_IS_AGGR_TYPE (record, 1); */
5879 /* Dummy field to prevent alignment warnings. */
5880 space
= build_decl (FIELD_DECL
, NULL_TREE
, integer_type_node
);
5881 DECL_FIELD_CONTEXT (space
) = record
;
5882 DECL_ARTIFICIAL (space
) = 1;
5883 DECL_IGNORED_P (space
) = 1;
5885 ofs
= build_decl (FIELD_DECL
, get_identifier ("__offset"),
5887 DECL_FIELD_CONTEXT (ofs
) = record
;
5888 TREE_CHAIN (ofs
) = space
;
5890 base
= build_decl (FIELD_DECL
, get_identifier ("__base"),
5892 DECL_FIELD_CONTEXT (base
) = record
;
5893 TREE_CHAIN (base
) = ofs
;
5895 TYPE_FIELDS (record
) = base
;
5896 layout_type (record
);
5898 va_list_gpr_counter_field
= ofs
;
5903 /* Helper function for alpha_stdarg_optimize_hook. Skip over casts
5904 and constant additions. */
5907 va_list_skip_additions (tree lhs
)
5911 if (TREE_CODE (lhs
) != SSA_NAME
)
5916 stmt
= SSA_NAME_DEF_STMT (lhs
);
5918 if (TREE_CODE (stmt
) == PHI_NODE
)
5921 if (TREE_CODE (stmt
) != MODIFY_EXPR
5922 || TREE_OPERAND (stmt
, 0) != lhs
)
5925 rhs
= TREE_OPERAND (stmt
, 1);
5926 if (TREE_CODE (rhs
) == WITH_SIZE_EXPR
)
5927 rhs
= TREE_OPERAND (rhs
, 0);
5929 if ((TREE_CODE (rhs
) != NOP_EXPR
5930 && TREE_CODE (rhs
) != CONVERT_EXPR
5931 && (TREE_CODE (rhs
) != PLUS_EXPR
5932 || TREE_CODE (TREE_OPERAND (rhs
, 1)) != INTEGER_CST
5933 || !host_integerp (TREE_OPERAND (rhs
, 1), 1)))
5934 || TREE_CODE (TREE_OPERAND (rhs
, 0)) != SSA_NAME
)
5937 lhs
= TREE_OPERAND (rhs
, 0);
5941 /* Check if LHS = RHS statement is
5942 LHS = *(ap.__base + ap.__offset + cst)
5945 + ((ap.__offset + cst <= 47)
5946 ? ap.__offset + cst - 48 : ap.__offset + cst) + cst2).
5947 If the former, indicate that GPR registers are needed,
5948 if the latter, indicate that FPR registers are needed.
5949 On alpha, cfun->va_list_gpr_size is used as size of the needed
5950 regs and cfun->va_list_fpr_size is a bitmask, bit 0 set if
5951 GPR registers are needed and bit 1 set if FPR registers are needed.
5952 Return true if va_list references should not be scanned for the current
5956 alpha_stdarg_optimize_hook (struct stdarg_info
*si
, tree lhs
, tree rhs
)
5958 tree base
, offset
, arg1
, arg2
;
5961 if (TREE_CODE (rhs
) != INDIRECT_REF
5962 || TREE_CODE (TREE_OPERAND (rhs
, 0)) != SSA_NAME
)
5965 lhs
= va_list_skip_additions (TREE_OPERAND (rhs
, 0));
5966 if (lhs
== NULL_TREE
5967 || TREE_CODE (lhs
) != PLUS_EXPR
)
5970 base
= TREE_OPERAND (lhs
, 0);
5971 if (TREE_CODE (base
) == SSA_NAME
)
5972 base
= va_list_skip_additions (base
);
5974 if (TREE_CODE (base
) != COMPONENT_REF
5975 || TREE_OPERAND (base
, 1) != TYPE_FIELDS (va_list_type_node
))
5977 base
= TREE_OPERAND (lhs
, 0);
5978 if (TREE_CODE (base
) == SSA_NAME
)
5979 base
= va_list_skip_additions (base
);
5981 if (TREE_CODE (base
) != COMPONENT_REF
5982 || TREE_OPERAND (base
, 1) != TYPE_FIELDS (va_list_type_node
))
5988 base
= get_base_address (base
);
5989 if (TREE_CODE (base
) != VAR_DECL
5990 || !bitmap_bit_p (si
->va_list_vars
, DECL_UID (base
)))
5993 offset
= TREE_OPERAND (lhs
, offset_arg
);
5994 if (TREE_CODE (offset
) == SSA_NAME
)
5995 offset
= va_list_skip_additions (offset
);
5997 if (TREE_CODE (offset
) == PHI_NODE
)
6001 if (PHI_NUM_ARGS (offset
) != 2)
6004 arg1
= va_list_skip_additions (PHI_ARG_DEF (offset
, 0));
6005 arg2
= va_list_skip_additions (PHI_ARG_DEF (offset
, 1));
6006 if (TREE_CODE (arg2
) != MINUS_EXPR
&& TREE_CODE (arg2
) != PLUS_EXPR
)
6012 if (TREE_CODE (arg2
) != MINUS_EXPR
&& TREE_CODE (arg2
) != PLUS_EXPR
)
6015 if (!host_integerp (TREE_OPERAND (arg2
, 1), 0))
6018 sub
= tree_low_cst (TREE_OPERAND (arg2
, 1), 0);
6019 if (TREE_CODE (arg2
) == MINUS_EXPR
)
6021 if (sub
< -48 || sub
> -32)
6024 arg2
= va_list_skip_additions (TREE_OPERAND (arg2
, 0));
6028 if (TREE_CODE (arg1
) == SSA_NAME
)
6029 arg1
= va_list_skip_additions (arg1
);
6031 if (TREE_CODE (arg1
) != COMPONENT_REF
6032 || TREE_OPERAND (arg1
, 1) != va_list_gpr_counter_field
6033 || get_base_address (arg1
) != base
)
6036 /* Need floating point regs. */
6037 cfun
->va_list_fpr_size
|= 2;
6039 else if (TREE_CODE (offset
) != COMPONENT_REF
6040 || TREE_OPERAND (offset
, 1) != va_list_gpr_counter_field
6041 || get_base_address (offset
) != base
)
6044 /* Need general regs. */
6045 cfun
->va_list_fpr_size
|= 1;
6049 si
->va_list_escapes
= true;
6054 /* Perform any needed actions needed for a function that is receiving a
6055 variable number of arguments. */
6058 alpha_setup_incoming_varargs (CUMULATIVE_ARGS
*pcum
, enum machine_mode mode
,
6059 tree type
, int *pretend_size
, int no_rtl
)
6061 CUMULATIVE_ARGS cum
= *pcum
;
6063 /* Skip the current argument. */
6064 FUNCTION_ARG_ADVANCE (cum
, mode
, type
, 1);
6066 #if TARGET_ABI_UNICOSMK
6067 /* On Unicos/Mk, the standard subroutine __T3E_MISMATCH stores all register
6068 arguments on the stack. Unfortunately, it doesn't always store the first
6069 one (i.e. the one that arrives in $16 or $f16). This is not a problem
6070 with stdargs as we always have at least one named argument there. */
6071 if (cum
.num_reg_words
< 6)
6075 emit_insn (gen_umk_mismatch_args (GEN_INT (cum
.num_reg_words
)));
6076 emit_insn (gen_arg_home_umk ());
6080 #elif TARGET_ABI_OPEN_VMS
6081 /* For VMS, we allocate space for all 6 arg registers plus a count.
6083 However, if NO registers need to be saved, don't allocate any space.
6084 This is not only because we won't need the space, but because AP
6085 includes the current_pretend_args_size and we don't want to mess up
6086 any ap-relative addresses already made. */
6087 if (cum
.num_args
< 6)
6091 emit_move_insn (gen_rtx_REG (DImode
, 1), virtual_incoming_args_rtx
);
6092 emit_insn (gen_arg_home ());
6094 *pretend_size
= 7 * UNITS_PER_WORD
;
6097 /* On OSF/1 and friends, we allocate space for all 12 arg registers, but
6098 only push those that are remaining. However, if NO registers need to
6099 be saved, don't allocate any space. This is not only because we won't
6100 need the space, but because AP includes the current_pretend_args_size
6101 and we don't want to mess up any ap-relative addresses already made.
6103 If we are not to use the floating-point registers, save the integer
6104 registers where we would put the floating-point registers. This is
6105 not the most efficient way to implement varargs with just one register
6106 class, but it isn't worth doing anything more efficient in this rare
6113 int count
, set
= get_varargs_alias_set ();
6116 count
= cfun
->va_list_gpr_size
/ UNITS_PER_WORD
;
6117 if (count
> 6 - cum
)
6120 /* Detect whether integer registers or floating-point registers
6121 are needed by the detected va_arg statements. See above for
6122 how these values are computed. Note that the "escape" value
6123 is VA_LIST_MAX_FPR_SIZE, which is 255, which has both of
6125 gcc_assert ((VA_LIST_MAX_FPR_SIZE
& 3) == 3);
6127 if (cfun
->va_list_fpr_size
& 1)
6129 tmp
= gen_rtx_MEM (BLKmode
,
6130 plus_constant (virtual_incoming_args_rtx
,
6131 (cum
+ 6) * UNITS_PER_WORD
));
6132 MEM_NOTRAP_P (tmp
) = 1;
6133 set_mem_alias_set (tmp
, set
);
6134 move_block_from_reg (16 + cum
, tmp
, count
);
6137 if (cfun
->va_list_fpr_size
& 2)
6139 tmp
= gen_rtx_MEM (BLKmode
,
6140 plus_constant (virtual_incoming_args_rtx
,
6141 cum
* UNITS_PER_WORD
));
6142 MEM_NOTRAP_P (tmp
) = 1;
6143 set_mem_alias_set (tmp
, set
);
6144 move_block_from_reg (16 + cum
+ TARGET_FPREGS
*32, tmp
, count
);
6147 *pretend_size
= 12 * UNITS_PER_WORD
;
6152 alpha_va_start (tree valist
, rtx nextarg ATTRIBUTE_UNUSED
)
6154 HOST_WIDE_INT offset
;
6155 tree t
, offset_field
, base_field
;
6157 if (TREE_CODE (TREE_TYPE (valist
)) == ERROR_MARK
)
6160 if (TARGET_ABI_UNICOSMK
)
6161 std_expand_builtin_va_start (valist
, nextarg
);
6163 /* For Unix, TARGET_SETUP_INCOMING_VARARGS moves the starting address base
6164 up by 48, storing fp arg registers in the first 48 bytes, and the
6165 integer arg registers in the next 48 bytes. This is only done,
6166 however, if any integer registers need to be stored.
6168 If no integer registers need be stored, then we must subtract 48
6169 in order to account for the integer arg registers which are counted
6170 in argsize above, but which are not actually stored on the stack.
6171 Must further be careful here about structures straddling the last
6172 integer argument register; that futzes with pretend_args_size,
6173 which changes the meaning of AP. */
6176 offset
= TARGET_ABI_OPEN_VMS
? UNITS_PER_WORD
: 6 * UNITS_PER_WORD
;
6178 offset
= -6 * UNITS_PER_WORD
+ current_function_pretend_args_size
;
6180 if (TARGET_ABI_OPEN_VMS
)
6182 nextarg
= plus_constant (nextarg
, offset
);
6183 nextarg
= plus_constant (nextarg
, NUM_ARGS
* UNITS_PER_WORD
);
6184 t
= build2 (MODIFY_EXPR
, TREE_TYPE (valist
), valist
,
6185 make_tree (ptr_type_node
, nextarg
));
6186 TREE_SIDE_EFFECTS (t
) = 1;
6188 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
6192 base_field
= TYPE_FIELDS (TREE_TYPE (valist
));
6193 offset_field
= TREE_CHAIN (base_field
);
6195 base_field
= build3 (COMPONENT_REF
, TREE_TYPE (base_field
),
6196 valist
, base_field
, NULL_TREE
);
6197 offset_field
= build3 (COMPONENT_REF
, TREE_TYPE (offset_field
),
6198 valist
, offset_field
, NULL_TREE
);
6200 t
= make_tree (ptr_type_node
, virtual_incoming_args_rtx
);
6201 t
= build2 (PLUS_EXPR
, ptr_type_node
, t
,
6202 build_int_cst (NULL_TREE
, offset
));
6203 t
= build2 (MODIFY_EXPR
, TREE_TYPE (base_field
), base_field
, t
);
6204 TREE_SIDE_EFFECTS (t
) = 1;
6205 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
6207 t
= build_int_cst (NULL_TREE
, NUM_ARGS
* UNITS_PER_WORD
);
6208 t
= build2 (MODIFY_EXPR
, TREE_TYPE (offset_field
), offset_field
, t
);
6209 TREE_SIDE_EFFECTS (t
) = 1;
6210 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
6215 alpha_gimplify_va_arg_1 (tree type
, tree base
, tree offset
, tree
*pre_p
)
6217 tree type_size
, ptr_type
, addend
, t
, addr
, internal_post
;
6219 /* If the type could not be passed in registers, skip the block
6220 reserved for the registers. */
6221 if (targetm
.calls
.must_pass_in_stack (TYPE_MODE (type
), type
))
6223 t
= build_int_cst (TREE_TYPE (offset
), 6*8);
6224 t
= build2 (MODIFY_EXPR
, TREE_TYPE (offset
), offset
,
6225 build2 (MAX_EXPR
, TREE_TYPE (offset
), offset
, t
));
6226 gimplify_and_add (t
, pre_p
);
6230 ptr_type
= build_pointer_type (type
);
6232 if (TREE_CODE (type
) == COMPLEX_TYPE
)
6234 tree real_part
, imag_part
, real_temp
;
6236 real_part
= alpha_gimplify_va_arg_1 (TREE_TYPE (type
), base
,
6239 /* Copy the value into a new temporary, lest the formal temporary
6240 be reused out from under us. */
6241 real_temp
= get_initialized_tmp_var (real_part
, pre_p
, NULL
);
6243 imag_part
= alpha_gimplify_va_arg_1 (TREE_TYPE (type
), base
,
6246 return build2 (COMPLEX_EXPR
, type
, real_temp
, imag_part
);
6248 else if (TREE_CODE (type
) == REAL_TYPE
)
6250 tree fpaddend
, cond
, fourtyeight
;
6252 fourtyeight
= build_int_cst (TREE_TYPE (addend
), 6*8);
6253 fpaddend
= fold_build2 (MINUS_EXPR
, TREE_TYPE (addend
),
6254 addend
, fourtyeight
);
6255 cond
= fold_build2 (LT_EXPR
, boolean_type_node
, addend
, fourtyeight
);
6256 addend
= fold_build3 (COND_EXPR
, TREE_TYPE (addend
), cond
,
6260 /* Build the final address and force that value into a temporary. */
6261 addr
= build2 (PLUS_EXPR
, ptr_type
, fold_convert (ptr_type
, base
),
6262 fold_convert (ptr_type
, addend
));
6263 internal_post
= NULL
;
6264 gimplify_expr (&addr
, pre_p
, &internal_post
, is_gimple_val
, fb_rvalue
);
6265 append_to_statement_list (internal_post
, pre_p
);
6267 /* Update the offset field. */
6268 type_size
= TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type
));
6269 if (type_size
== NULL
|| TREE_OVERFLOW (type_size
))
6273 t
= size_binop (PLUS_EXPR
, type_size
, size_int (7));
6274 t
= size_binop (TRUNC_DIV_EXPR
, t
, size_int (8));
6275 t
= size_binop (MULT_EXPR
, t
, size_int (8));
6277 t
= fold_convert (TREE_TYPE (offset
), t
);
6278 t
= build2 (MODIFY_EXPR
, void_type_node
, offset
,
6279 build2 (PLUS_EXPR
, TREE_TYPE (offset
), offset
, t
));
6280 gimplify_and_add (t
, pre_p
);
6282 return build_va_arg_indirect_ref (addr
);
6286 alpha_gimplify_va_arg (tree valist
, tree type
, tree
*pre_p
, tree
*post_p
)
6288 tree offset_field
, base_field
, offset
, base
, t
, r
;
6291 if (TARGET_ABI_OPEN_VMS
|| TARGET_ABI_UNICOSMK
)
6292 return std_gimplify_va_arg_expr (valist
, type
, pre_p
, post_p
);
6294 base_field
= TYPE_FIELDS (va_list_type_node
);
6295 offset_field
= TREE_CHAIN (base_field
);
6296 base_field
= build3 (COMPONENT_REF
, TREE_TYPE (base_field
),
6297 valist
, base_field
, NULL_TREE
);
6298 offset_field
= build3 (COMPONENT_REF
, TREE_TYPE (offset_field
),
6299 valist
, offset_field
, NULL_TREE
);
6301 /* Pull the fields of the structure out into temporaries. Since we never
6302 modify the base field, we can use a formal temporary. Sign-extend the
6303 offset field so that it's the proper width for pointer arithmetic. */
6304 base
= get_formal_tmp_var (base_field
, pre_p
);
6306 t
= fold_convert (lang_hooks
.types
.type_for_size (64, 0), offset_field
);
6307 offset
= get_initialized_tmp_var (t
, pre_p
, NULL
);
6309 indirect
= pass_by_reference (NULL
, TYPE_MODE (type
), type
, false);
6311 type
= build_pointer_type (type
);
6313 /* Find the value. Note that this will be a stable indirection, or
6314 a composite of stable indirections in the case of complex. */
6315 r
= alpha_gimplify_va_arg_1 (type
, base
, offset
, pre_p
);
6317 /* Stuff the offset temporary back into its field. */
6318 t
= build2 (MODIFY_EXPR
, void_type_node
, offset_field
,
6319 fold_convert (TREE_TYPE (offset_field
), offset
));
6320 gimplify_and_add (t
, pre_p
);
6323 r
= build_va_arg_indirect_ref (r
);
6332 ALPHA_BUILTIN_CMPBGE
,
6333 ALPHA_BUILTIN_EXTBL
,
6334 ALPHA_BUILTIN_EXTWL
,
6335 ALPHA_BUILTIN_EXTLL
,
6336 ALPHA_BUILTIN_EXTQL
,
6337 ALPHA_BUILTIN_EXTWH
,
6338 ALPHA_BUILTIN_EXTLH
,
6339 ALPHA_BUILTIN_EXTQH
,
6340 ALPHA_BUILTIN_INSBL
,
6341 ALPHA_BUILTIN_INSWL
,
6342 ALPHA_BUILTIN_INSLL
,
6343 ALPHA_BUILTIN_INSQL
,
6344 ALPHA_BUILTIN_INSWH
,
6345 ALPHA_BUILTIN_INSLH
,
6346 ALPHA_BUILTIN_INSQH
,
6347 ALPHA_BUILTIN_MSKBL
,
6348 ALPHA_BUILTIN_MSKWL
,
6349 ALPHA_BUILTIN_MSKLL
,
6350 ALPHA_BUILTIN_MSKQL
,
6351 ALPHA_BUILTIN_MSKWH
,
6352 ALPHA_BUILTIN_MSKLH
,
6353 ALPHA_BUILTIN_MSKQH
,
6354 ALPHA_BUILTIN_UMULH
,
6356 ALPHA_BUILTIN_ZAPNOT
,
6357 ALPHA_BUILTIN_AMASK
,
6358 ALPHA_BUILTIN_IMPLVER
,
6360 ALPHA_BUILTIN_THREAD_POINTER
,
6361 ALPHA_BUILTIN_SET_THREAD_POINTER
,
6364 ALPHA_BUILTIN_MINUB8
,
6365 ALPHA_BUILTIN_MINSB8
,
6366 ALPHA_BUILTIN_MINUW4
,
6367 ALPHA_BUILTIN_MINSW4
,
6368 ALPHA_BUILTIN_MAXUB8
,
6369 ALPHA_BUILTIN_MAXSB8
,
6370 ALPHA_BUILTIN_MAXUW4
,
6371 ALPHA_BUILTIN_MAXSW4
,
6375 ALPHA_BUILTIN_UNPKBL
,
6376 ALPHA_BUILTIN_UNPKBW
,
6381 ALPHA_BUILTIN_CTPOP
,
6386 static unsigned int const code_for_builtin
[ALPHA_BUILTIN_max
] = {
6387 CODE_FOR_builtin_cmpbge
,
6388 CODE_FOR_builtin_extbl
,
6389 CODE_FOR_builtin_extwl
,
6390 CODE_FOR_builtin_extll
,
6391 CODE_FOR_builtin_extql
,
6392 CODE_FOR_builtin_extwh
,
6393 CODE_FOR_builtin_extlh
,
6394 CODE_FOR_builtin_extqh
,
6395 CODE_FOR_builtin_insbl
,
6396 CODE_FOR_builtin_inswl
,
6397 CODE_FOR_builtin_insll
,
6398 CODE_FOR_builtin_insql
,
6399 CODE_FOR_builtin_inswh
,
6400 CODE_FOR_builtin_inslh
,
6401 CODE_FOR_builtin_insqh
,
6402 CODE_FOR_builtin_mskbl
,
6403 CODE_FOR_builtin_mskwl
,
6404 CODE_FOR_builtin_mskll
,
6405 CODE_FOR_builtin_mskql
,
6406 CODE_FOR_builtin_mskwh
,
6407 CODE_FOR_builtin_msklh
,
6408 CODE_FOR_builtin_mskqh
,
6409 CODE_FOR_umuldi3_highpart
,
6410 CODE_FOR_builtin_zap
,
6411 CODE_FOR_builtin_zapnot
,
6412 CODE_FOR_builtin_amask
,
6413 CODE_FOR_builtin_implver
,
6414 CODE_FOR_builtin_rpcc
,
6419 CODE_FOR_builtin_minub8
,
6420 CODE_FOR_builtin_minsb8
,
6421 CODE_FOR_builtin_minuw4
,
6422 CODE_FOR_builtin_minsw4
,
6423 CODE_FOR_builtin_maxub8
,
6424 CODE_FOR_builtin_maxsb8
,
6425 CODE_FOR_builtin_maxuw4
,
6426 CODE_FOR_builtin_maxsw4
,
6427 CODE_FOR_builtin_perr
,
6428 CODE_FOR_builtin_pklb
,
6429 CODE_FOR_builtin_pkwb
,
6430 CODE_FOR_builtin_unpkbl
,
6431 CODE_FOR_builtin_unpkbw
,
6436 CODE_FOR_popcountdi2
6439 struct alpha_builtin_def
6442 enum alpha_builtin code
;
6443 unsigned int target_mask
;
6447 static struct alpha_builtin_def
const zero_arg_builtins
[] = {
6448 { "__builtin_alpha_implver", ALPHA_BUILTIN_IMPLVER
, 0, true },
6449 { "__builtin_alpha_rpcc", ALPHA_BUILTIN_RPCC
, 0, false }
6452 static struct alpha_builtin_def
const one_arg_builtins
[] = {
6453 { "__builtin_alpha_amask", ALPHA_BUILTIN_AMASK
, 0, true },
6454 { "__builtin_alpha_pklb", ALPHA_BUILTIN_PKLB
, MASK_MAX
, true },
6455 { "__builtin_alpha_pkwb", ALPHA_BUILTIN_PKWB
, MASK_MAX
, true },
6456 { "__builtin_alpha_unpkbl", ALPHA_BUILTIN_UNPKBL
, MASK_MAX
, true },
6457 { "__builtin_alpha_unpkbw", ALPHA_BUILTIN_UNPKBW
, MASK_MAX
, true },
6458 { "__builtin_alpha_cttz", ALPHA_BUILTIN_CTTZ
, MASK_CIX
, true },
6459 { "__builtin_alpha_ctlz", ALPHA_BUILTIN_CTLZ
, MASK_CIX
, true },
6460 { "__builtin_alpha_ctpop", ALPHA_BUILTIN_CTPOP
, MASK_CIX
, true }
6463 static struct alpha_builtin_def
const two_arg_builtins
[] = {
6464 { "__builtin_alpha_cmpbge", ALPHA_BUILTIN_CMPBGE
, 0, true },
6465 { "__builtin_alpha_extbl", ALPHA_BUILTIN_EXTBL
, 0, true },
6466 { "__builtin_alpha_extwl", ALPHA_BUILTIN_EXTWL
, 0, true },
6467 { "__builtin_alpha_extll", ALPHA_BUILTIN_EXTLL
, 0, true },
6468 { "__builtin_alpha_extql", ALPHA_BUILTIN_EXTQL
, 0, true },
6469 { "__builtin_alpha_extwh", ALPHA_BUILTIN_EXTWH
, 0, true },
6470 { "__builtin_alpha_extlh", ALPHA_BUILTIN_EXTLH
, 0, true },
6471 { "__builtin_alpha_extqh", ALPHA_BUILTIN_EXTQH
, 0, true },
6472 { "__builtin_alpha_insbl", ALPHA_BUILTIN_INSBL
, 0, true },
6473 { "__builtin_alpha_inswl", ALPHA_BUILTIN_INSWL
, 0, true },
6474 { "__builtin_alpha_insll", ALPHA_BUILTIN_INSLL
, 0, true },
6475 { "__builtin_alpha_insql", ALPHA_BUILTIN_INSQL
, 0, true },
6476 { "__builtin_alpha_inswh", ALPHA_BUILTIN_INSWH
, 0, true },
6477 { "__builtin_alpha_inslh", ALPHA_BUILTIN_INSLH
, 0, true },
6478 { "__builtin_alpha_insqh", ALPHA_BUILTIN_INSQH
, 0, true },
6479 { "__builtin_alpha_mskbl", ALPHA_BUILTIN_MSKBL
, 0, true },
6480 { "__builtin_alpha_mskwl", ALPHA_BUILTIN_MSKWL
, 0, true },
6481 { "__builtin_alpha_mskll", ALPHA_BUILTIN_MSKLL
, 0, true },
6482 { "__builtin_alpha_mskql", ALPHA_BUILTIN_MSKQL
, 0, true },
6483 { "__builtin_alpha_mskwh", ALPHA_BUILTIN_MSKWH
, 0, true },
6484 { "__builtin_alpha_msklh", ALPHA_BUILTIN_MSKLH
, 0, true },
6485 { "__builtin_alpha_mskqh", ALPHA_BUILTIN_MSKQH
, 0, true },
6486 { "__builtin_alpha_umulh", ALPHA_BUILTIN_UMULH
, 0, true },
6487 { "__builtin_alpha_zap", ALPHA_BUILTIN_ZAP
, 0, true },
6488 { "__builtin_alpha_zapnot", ALPHA_BUILTIN_ZAPNOT
, 0, true },
6489 { "__builtin_alpha_minub8", ALPHA_BUILTIN_MINUB8
, MASK_MAX
, true },
6490 { "__builtin_alpha_minsb8", ALPHA_BUILTIN_MINSB8
, MASK_MAX
, true },
6491 { "__builtin_alpha_minuw4", ALPHA_BUILTIN_MINUW4
, MASK_MAX
, true },
6492 { "__builtin_alpha_minsw4", ALPHA_BUILTIN_MINSW4
, MASK_MAX
, true },
6493 { "__builtin_alpha_maxub8", ALPHA_BUILTIN_MAXUB8
, MASK_MAX
, true },
6494 { "__builtin_alpha_maxsb8", ALPHA_BUILTIN_MAXSB8
, MASK_MAX
, true },
6495 { "__builtin_alpha_maxuw4", ALPHA_BUILTIN_MAXUW4
, MASK_MAX
, true },
6496 { "__builtin_alpha_maxsw4", ALPHA_BUILTIN_MAXSW4
, MASK_MAX
, true },
6497 { "__builtin_alpha_perr", ALPHA_BUILTIN_PERR
, MASK_MAX
, true }
6500 static GTY(()) tree alpha_v8qi_u
;
6501 static GTY(()) tree alpha_v8qi_s
;
6502 static GTY(()) tree alpha_v4hi_u
;
6503 static GTY(()) tree alpha_v4hi_s
;
6506 alpha_init_builtins (void)
6508 const struct alpha_builtin_def
*p
;
6509 tree dimode_integer_type_node
;
6510 tree ftype
, attrs
[2];
6513 dimode_integer_type_node
= lang_hooks
.types
.type_for_mode (DImode
, 0);
6515 attrs
[0] = tree_cons (get_identifier ("nothrow"), NULL
, NULL
);
6516 attrs
[1] = tree_cons (get_identifier ("const"), NULL
, attrs
[0]);
6518 ftype
= build_function_type (dimode_integer_type_node
, void_list_node
);
6520 p
= zero_arg_builtins
;
6521 for (i
= 0; i
< ARRAY_SIZE (zero_arg_builtins
); ++i
, ++p
)
6522 if ((target_flags
& p
->target_mask
) == p
->target_mask
)
6523 lang_hooks
.builtin_function (p
->name
, ftype
, p
->code
, BUILT_IN_MD
,
6524 NULL
, attrs
[p
->is_const
]);
6526 ftype
= build_function_type_list (dimode_integer_type_node
,
6527 dimode_integer_type_node
, NULL_TREE
);
6529 p
= one_arg_builtins
;
6530 for (i
= 0; i
< ARRAY_SIZE (one_arg_builtins
); ++i
, ++p
)
6531 if ((target_flags
& p
->target_mask
) == p
->target_mask
)
6532 lang_hooks
.builtin_function (p
->name
, ftype
, p
->code
, BUILT_IN_MD
,
6533 NULL
, attrs
[p
->is_const
]);
6535 ftype
= build_function_type_list (dimode_integer_type_node
,
6536 dimode_integer_type_node
,
6537 dimode_integer_type_node
, NULL_TREE
);
6539 p
= two_arg_builtins
;
6540 for (i
= 0; i
< ARRAY_SIZE (two_arg_builtins
); ++i
, ++p
)
6541 if ((target_flags
& p
->target_mask
) == p
->target_mask
)
6542 lang_hooks
.builtin_function (p
->name
, ftype
, p
->code
, BUILT_IN_MD
,
6543 NULL
, attrs
[p
->is_const
]);
6545 ftype
= build_function_type (ptr_type_node
, void_list_node
);
6546 lang_hooks
.builtin_function ("__builtin_thread_pointer", ftype
,
6547 ALPHA_BUILTIN_THREAD_POINTER
, BUILT_IN_MD
,
6550 ftype
= build_function_type_list (void_type_node
, ptr_type_node
, NULL_TREE
);
6551 lang_hooks
.builtin_function ("__builtin_set_thread_pointer", ftype
,
6552 ALPHA_BUILTIN_SET_THREAD_POINTER
, BUILT_IN_MD
,
6555 alpha_v8qi_u
= build_vector_type (unsigned_intQI_type_node
, 8);
6556 alpha_v8qi_s
= build_vector_type (intQI_type_node
, 8);
6557 alpha_v4hi_u
= build_vector_type (unsigned_intHI_type_node
, 4);
6558 alpha_v4hi_s
= build_vector_type (intHI_type_node
, 4);
6561 /* Expand an expression EXP that calls a built-in function,
6562 with result going to TARGET if that's convenient
6563 (and in mode MODE if that's convenient).
6564 SUBTARGET may be used as the target for computing one of EXP's operands.
6565 IGNORE is nonzero if the value is to be ignored. */
6568 alpha_expand_builtin (tree exp
, rtx target
,
6569 rtx subtarget ATTRIBUTE_UNUSED
,
6570 enum machine_mode mode ATTRIBUTE_UNUSED
,
6571 int ignore ATTRIBUTE_UNUSED
)
6575 tree fndecl
= TREE_OPERAND (TREE_OPERAND (exp
, 0), 0);
6576 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
6577 tree arglist
= TREE_OPERAND (exp
, 1);
6578 enum insn_code icode
;
6579 rtx op
[MAX_ARGS
], pat
;
6583 if (fcode
>= ALPHA_BUILTIN_max
)
6584 internal_error ("bad builtin fcode");
6585 icode
= code_for_builtin
[fcode
];
6587 internal_error ("bad builtin fcode");
6589 nonvoid
= TREE_TYPE (TREE_TYPE (fndecl
)) != void_type_node
;
6591 for (arglist
= TREE_OPERAND (exp
, 1), arity
= 0;
6593 arglist
= TREE_CHAIN (arglist
), arity
++)
6595 const struct insn_operand_data
*insn_op
;
6597 tree arg
= TREE_VALUE (arglist
);
6598 if (arg
== error_mark_node
)
6600 if (arity
> MAX_ARGS
)
6603 insn_op
= &insn_data
[icode
].operand
[arity
+ nonvoid
];
6605 op
[arity
] = expand_expr (arg
, NULL_RTX
, insn_op
->mode
, 0);
6607 if (!(*insn_op
->predicate
) (op
[arity
], insn_op
->mode
))
6608 op
[arity
] = copy_to_mode_reg (insn_op
->mode
, op
[arity
]);
6613 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
6615 || GET_MODE (target
) != tmode
6616 || !(*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
6617 target
= gen_reg_rtx (tmode
);
6623 pat
= GEN_FCN (icode
) (target
);
6627 pat
= GEN_FCN (icode
) (target
, op
[0]);
6629 pat
= GEN_FCN (icode
) (op
[0]);
6632 pat
= GEN_FCN (icode
) (target
, op
[0], op
[1]);
6648 /* Several bits below assume HWI >= 64 bits. This should be enforced
6650 #if HOST_BITS_PER_WIDE_INT < 64
6651 # error "HOST_WIDE_INT too small"
6654 /* Fold the builtin for the CMPBGE instruction. This is a vector comparison
6655 with an 8 bit output vector. OPINT contains the integer operands; bit N
6656 of OP_CONST is set if OPINT[N] is valid. */
6659 alpha_fold_builtin_cmpbge (unsigned HOST_WIDE_INT opint
[], long op_const
)
6664 for (i
= 0, val
= 0; i
< 8; ++i
)
6666 unsigned HOST_WIDE_INT c0
= (opint
[0] >> (i
* 8)) & 0xff;
6667 unsigned HOST_WIDE_INT c1
= (opint
[1] >> (i
* 8)) & 0xff;
6671 return build_int_cst (long_integer_type_node
, val
);
6673 else if (op_const
== 2 && opint
[1] == 0)
6674 return build_int_cst (long_integer_type_node
, 0xff);
6678 /* Fold the builtin for the ZAPNOT instruction. This is essentially a
6679 specialized form of an AND operation. Other byte manipulation instructions
6680 are defined in terms of this instruction, so this is also used as a
6681 subroutine for other builtins.
6683 OP contains the tree operands; OPINT contains the extracted integer values.
6684 Bit N of OP_CONST it set if OPINT[N] is valid. OP may be null if only
6685 OPINT may be considered. */
6688 alpha_fold_builtin_zapnot (tree
*op
, unsigned HOST_WIDE_INT opint
[],
6693 unsigned HOST_WIDE_INT mask
= 0;
6696 for (i
= 0; i
< 8; ++i
)
6697 if ((opint
[1] >> i
) & 1)
6698 mask
|= (unsigned HOST_WIDE_INT
)0xff << (i
* 8);
6701 return build_int_cst (long_integer_type_node
, opint
[0] & mask
);
6704 return fold (build2 (BIT_AND_EXPR
, long_integer_type_node
, op
[0],
6705 build_int_cst (long_integer_type_node
, mask
)));
6707 else if ((op_const
& 1) && opint
[0] == 0)
6708 return build_int_cst (long_integer_type_node
, 0);
6712 /* Fold the builtins for the EXT family of instructions. */
6715 alpha_fold_builtin_extxx (tree op
[], unsigned HOST_WIDE_INT opint
[],
6716 long op_const
, unsigned HOST_WIDE_INT bytemask
,
6720 tree
*zap_op
= NULL
;
6724 unsigned HOST_WIDE_INT loc
;
6727 if (BYTES_BIG_ENDIAN
)
6735 unsigned HOST_WIDE_INT temp
= opint
[0];
6748 opint
[1] = bytemask
;
6749 return alpha_fold_builtin_zapnot (zap_op
, opint
, zap_const
);
6752 /* Fold the builtins for the INS family of instructions. */
6755 alpha_fold_builtin_insxx (tree op
[], unsigned HOST_WIDE_INT opint
[],
6756 long op_const
, unsigned HOST_WIDE_INT bytemask
,
6759 if ((op_const
& 1) && opint
[0] == 0)
6760 return build_int_cst (long_integer_type_node
, 0);
6764 unsigned HOST_WIDE_INT temp
, loc
, byteloc
;
6765 tree
*zap_op
= NULL
;
6768 if (BYTES_BIG_ENDIAN
)
6775 byteloc
= (64 - (loc
* 8)) & 0x3f;
6792 opint
[1] = bytemask
;
6793 return alpha_fold_builtin_zapnot (zap_op
, opint
, op_const
);
6800 alpha_fold_builtin_mskxx (tree op
[], unsigned HOST_WIDE_INT opint
[],
6801 long op_const
, unsigned HOST_WIDE_INT bytemask
,
6806 unsigned HOST_WIDE_INT loc
;
6809 if (BYTES_BIG_ENDIAN
)
6816 opint
[1] = bytemask
^ 0xff;
6819 return alpha_fold_builtin_zapnot (op
, opint
, op_const
);
6823 alpha_fold_builtin_umulh (unsigned HOST_WIDE_INT opint
[], long op_const
)
6829 unsigned HOST_WIDE_INT l
;
6832 mul_double (opint
[0], 0, opint
[1], 0, &l
, &h
);
6834 #if HOST_BITS_PER_WIDE_INT > 64
6838 return build_int_cst (long_integer_type_node
, h
);
6842 opint
[1] = opint
[0];
6845 /* Note that (X*1) >> 64 == 0. */
6846 if (opint
[1] == 0 || opint
[1] == 1)
6847 return build_int_cst (long_integer_type_node
, 0);
6854 alpha_fold_vector_minmax (enum tree_code code
, tree op
[], tree vtype
)
6856 tree op0
= fold_convert (vtype
, op
[0]);
6857 tree op1
= fold_convert (vtype
, op
[1]);
6858 tree val
= fold (build2 (code
, vtype
, op0
, op1
));
6859 return fold_convert (long_integer_type_node
, val
);
6863 alpha_fold_builtin_perr (unsigned HOST_WIDE_INT opint
[], long op_const
)
6865 unsigned HOST_WIDE_INT temp
= 0;
6871 for (i
= 0; i
< 8; ++i
)
6873 unsigned HOST_WIDE_INT a
= (opint
[0] >> (i
* 8)) & 0xff;
6874 unsigned HOST_WIDE_INT b
= (opint
[1] >> (i
* 8)) & 0xff;
6881 return build_int_cst (long_integer_type_node
, temp
);
6885 alpha_fold_builtin_pklb (unsigned HOST_WIDE_INT opint
[], long op_const
)
6887 unsigned HOST_WIDE_INT temp
;
6892 temp
= opint
[0] & 0xff;
6893 temp
|= (opint
[0] >> 24) & 0xff00;
6895 return build_int_cst (long_integer_type_node
, temp
);
6899 alpha_fold_builtin_pkwb (unsigned HOST_WIDE_INT opint
[], long op_const
)
6901 unsigned HOST_WIDE_INT temp
;
6906 temp
= opint
[0] & 0xff;
6907 temp
|= (opint
[0] >> 8) & 0xff00;
6908 temp
|= (opint
[0] >> 16) & 0xff0000;
6909 temp
|= (opint
[0] >> 24) & 0xff000000;
6911 return build_int_cst (long_integer_type_node
, temp
);
6915 alpha_fold_builtin_unpkbl (unsigned HOST_WIDE_INT opint
[], long op_const
)
6917 unsigned HOST_WIDE_INT temp
;
6922 temp
= opint
[0] & 0xff;
6923 temp
|= (opint
[0] & 0xff00) << 24;
6925 return build_int_cst (long_integer_type_node
, temp
);
6929 alpha_fold_builtin_unpkbw (unsigned HOST_WIDE_INT opint
[], long op_const
)
6931 unsigned HOST_WIDE_INT temp
;
6936 temp
= opint
[0] & 0xff;
6937 temp
|= (opint
[0] & 0x0000ff00) << 8;
6938 temp
|= (opint
[0] & 0x00ff0000) << 16;
6939 temp
|= (opint
[0] & 0xff000000) << 24;
6941 return build_int_cst (long_integer_type_node
, temp
);
6945 alpha_fold_builtin_cttz (unsigned HOST_WIDE_INT opint
[], long op_const
)
6947 unsigned HOST_WIDE_INT temp
;
6955 temp
= exact_log2 (opint
[0] & -opint
[0]);
6957 return build_int_cst (long_integer_type_node
, temp
);
6961 alpha_fold_builtin_ctlz (unsigned HOST_WIDE_INT opint
[], long op_const
)
6963 unsigned HOST_WIDE_INT temp
;
6971 temp
= 64 - floor_log2 (opint
[0]) - 1;
6973 return build_int_cst (long_integer_type_node
, temp
);
6977 alpha_fold_builtin_ctpop (unsigned HOST_WIDE_INT opint
[], long op_const
)
6979 unsigned HOST_WIDE_INT temp
, op
;
6987 temp
++, op
&= op
- 1;
6989 return build_int_cst (long_integer_type_node
, temp
);
6992 /* Fold one of our builtin functions. */
6995 alpha_fold_builtin (tree fndecl
, tree arglist
, bool ignore ATTRIBUTE_UNUSED
)
6997 tree op
[MAX_ARGS
], t
;
6998 unsigned HOST_WIDE_INT opint
[MAX_ARGS
];
6999 long op_const
= 0, arity
= 0;
7001 for (t
= arglist
; t
; t
= TREE_CHAIN (t
), ++arity
)
7003 tree arg
= TREE_VALUE (t
);
7004 if (arg
== error_mark_node
)
7006 if (arity
>= MAX_ARGS
)
7011 if (TREE_CODE (arg
) == INTEGER_CST
)
7013 op_const
|= 1L << arity
;
7014 opint
[arity
] = int_cst_value (arg
);
7018 switch (DECL_FUNCTION_CODE (fndecl
))
7020 case ALPHA_BUILTIN_CMPBGE
:
7021 return alpha_fold_builtin_cmpbge (opint
, op_const
);
7023 case ALPHA_BUILTIN_EXTBL
:
7024 return alpha_fold_builtin_extxx (op
, opint
, op_const
, 0x01, false);
7025 case ALPHA_BUILTIN_EXTWL
:
7026 return alpha_fold_builtin_extxx (op
, opint
, op_const
, 0x03, false);
7027 case ALPHA_BUILTIN_EXTLL
:
7028 return alpha_fold_builtin_extxx (op
, opint
, op_const
, 0x0f, false);
7029 case ALPHA_BUILTIN_EXTQL
:
7030 return alpha_fold_builtin_extxx (op
, opint
, op_const
, 0xff, false);
7031 case ALPHA_BUILTIN_EXTWH
:
7032 return alpha_fold_builtin_extxx (op
, opint
, op_const
, 0x03, true);
7033 case ALPHA_BUILTIN_EXTLH
:
7034 return alpha_fold_builtin_extxx (op
, opint
, op_const
, 0x0f, true);
7035 case ALPHA_BUILTIN_EXTQH
:
7036 return alpha_fold_builtin_extxx (op
, opint
, op_const
, 0xff, true);
7038 case ALPHA_BUILTIN_INSBL
:
7039 return alpha_fold_builtin_insxx (op
, opint
, op_const
, 0x01, false);
7040 case ALPHA_BUILTIN_INSWL
:
7041 return alpha_fold_builtin_insxx (op
, opint
, op_const
, 0x03, false);
7042 case ALPHA_BUILTIN_INSLL
:
7043 return alpha_fold_builtin_insxx (op
, opint
, op_const
, 0x0f, false);
7044 case ALPHA_BUILTIN_INSQL
:
7045 return alpha_fold_builtin_insxx (op
, opint
, op_const
, 0xff, false);
7046 case ALPHA_BUILTIN_INSWH
:
7047 return alpha_fold_builtin_insxx (op
, opint
, op_const
, 0x03, true);
7048 case ALPHA_BUILTIN_INSLH
:
7049 return alpha_fold_builtin_insxx (op
, opint
, op_const
, 0x0f, true);
7050 case ALPHA_BUILTIN_INSQH
:
7051 return alpha_fold_builtin_insxx (op
, opint
, op_const
, 0xff, true);
7053 case ALPHA_BUILTIN_MSKBL
:
7054 return alpha_fold_builtin_mskxx (op
, opint
, op_const
, 0x01, false);
7055 case ALPHA_BUILTIN_MSKWL
:
7056 return alpha_fold_builtin_mskxx (op
, opint
, op_const
, 0x03, false);
7057 case ALPHA_BUILTIN_MSKLL
:
7058 return alpha_fold_builtin_mskxx (op
, opint
, op_const
, 0x0f, false);
7059 case ALPHA_BUILTIN_MSKQL
:
7060 return alpha_fold_builtin_mskxx (op
, opint
, op_const
, 0xff, false);
7061 case ALPHA_BUILTIN_MSKWH
:
7062 return alpha_fold_builtin_mskxx (op
, opint
, op_const
, 0x03, true);
7063 case ALPHA_BUILTIN_MSKLH
:
7064 return alpha_fold_builtin_mskxx (op
, opint
, op_const
, 0x0f, true);
7065 case ALPHA_BUILTIN_MSKQH
:
7066 return alpha_fold_builtin_mskxx (op
, opint
, op_const
, 0xff, true);
7068 case ALPHA_BUILTIN_UMULH
:
7069 return alpha_fold_builtin_umulh (opint
, op_const
);
7071 case ALPHA_BUILTIN_ZAP
:
7074 case ALPHA_BUILTIN_ZAPNOT
:
7075 return alpha_fold_builtin_zapnot (op
, opint
, op_const
);
7077 case ALPHA_BUILTIN_MINUB8
:
7078 return alpha_fold_vector_minmax (MIN_EXPR
, op
, alpha_v8qi_u
);
7079 case ALPHA_BUILTIN_MINSB8
:
7080 return alpha_fold_vector_minmax (MIN_EXPR
, op
, alpha_v8qi_s
);
7081 case ALPHA_BUILTIN_MINUW4
:
7082 return alpha_fold_vector_minmax (MIN_EXPR
, op
, alpha_v4hi_u
);
7083 case ALPHA_BUILTIN_MINSW4
:
7084 return alpha_fold_vector_minmax (MIN_EXPR
, op
, alpha_v4hi_s
);
7085 case ALPHA_BUILTIN_MAXUB8
:
7086 return alpha_fold_vector_minmax (MAX_EXPR
, op
, alpha_v8qi_u
);
7087 case ALPHA_BUILTIN_MAXSB8
:
7088 return alpha_fold_vector_minmax (MAX_EXPR
, op
, alpha_v8qi_s
);
7089 case ALPHA_BUILTIN_MAXUW4
:
7090 return alpha_fold_vector_minmax (MAX_EXPR
, op
, alpha_v4hi_u
);
7091 case ALPHA_BUILTIN_MAXSW4
:
7092 return alpha_fold_vector_minmax (MAX_EXPR
, op
, alpha_v4hi_s
);
7094 case ALPHA_BUILTIN_PERR
:
7095 return alpha_fold_builtin_perr (opint
, op_const
);
7096 case ALPHA_BUILTIN_PKLB
:
7097 return alpha_fold_builtin_pklb (opint
, op_const
);
7098 case ALPHA_BUILTIN_PKWB
:
7099 return alpha_fold_builtin_pkwb (opint
, op_const
);
7100 case ALPHA_BUILTIN_UNPKBL
:
7101 return alpha_fold_builtin_unpkbl (opint
, op_const
);
7102 case ALPHA_BUILTIN_UNPKBW
:
7103 return alpha_fold_builtin_unpkbw (opint
, op_const
);
7105 case ALPHA_BUILTIN_CTTZ
:
7106 return alpha_fold_builtin_cttz (opint
, op_const
);
7107 case ALPHA_BUILTIN_CTLZ
:
7108 return alpha_fold_builtin_ctlz (opint
, op_const
);
7109 case ALPHA_BUILTIN_CTPOP
:
7110 return alpha_fold_builtin_ctpop (opint
, op_const
);
7112 case ALPHA_BUILTIN_AMASK
:
7113 case ALPHA_BUILTIN_IMPLVER
:
7114 case ALPHA_BUILTIN_RPCC
:
7115 case ALPHA_BUILTIN_THREAD_POINTER
:
7116 case ALPHA_BUILTIN_SET_THREAD_POINTER
:
7117 /* None of these are foldable at compile-time. */
7123 /* This page contains routines that are used to determine what the function
7124 prologue and epilogue code will do and write them out. */
7126 /* Compute the size of the save area in the stack. */
7128 /* These variables are used for communication between the following functions.
7129 They indicate various things about the current function being compiled
7130 that are used to tell what kind of prologue, epilogue and procedure
7131 descriptor to generate. */
7133 /* Nonzero if we need a stack procedure. */
7134 enum alpha_procedure_types
{PT_NULL
= 0, PT_REGISTER
= 1, PT_STACK
= 2};
7135 static enum alpha_procedure_types alpha_procedure_type
;
7137 /* Register number (either FP or SP) that is used to unwind the frame. */
7138 static int vms_unwind_regno
;
7140 /* Register number used to save FP. We need not have one for RA since
7141 we don't modify it for register procedures. This is only defined
7142 for register frame procedures. */
7143 static int vms_save_fp_regno
;
7145 /* Register number used to reference objects off our PV. */
7146 static int vms_base_regno
;
7148 /* Compute register masks for saved registers. */
7151 alpha_sa_mask (unsigned long *imaskP
, unsigned long *fmaskP
)
7153 unsigned long imask
= 0;
7154 unsigned long fmask
= 0;
7157 /* When outputting a thunk, we don't have valid register life info,
7158 but assemble_start_function wants to output .frame and .mask
7160 if (current_function_is_thunk
)
7167 if (TARGET_ABI_OPEN_VMS
&& alpha_procedure_type
== PT_STACK
)
7168 imask
|= (1UL << HARD_FRAME_POINTER_REGNUM
);
7170 /* One for every register we have to save. */
7171 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
7172 if (! fixed_regs
[i
] && ! call_used_regs
[i
]
7173 && regs_ever_live
[i
] && i
!= REG_RA
7174 && (!TARGET_ABI_UNICOSMK
|| i
!= HARD_FRAME_POINTER_REGNUM
))
7177 imask
|= (1UL << i
);
7179 fmask
|= (1UL << (i
- 32));
7182 /* We need to restore these for the handler. */
7183 if (current_function_calls_eh_return
)
7187 unsigned regno
= EH_RETURN_DATA_REGNO (i
);
7188 if (regno
== INVALID_REGNUM
)
7190 imask
|= 1UL << regno
;
7194 /* If any register spilled, then spill the return address also. */
7195 /* ??? This is required by the Digital stack unwind specification
7196 and isn't needed if we're doing Dwarf2 unwinding. */
7197 if (imask
|| fmask
|| alpha_ra_ever_killed ())
7198 imask
|= (1UL << REG_RA
);
7205 alpha_sa_size (void)
7207 unsigned long mask
[2];
7211 alpha_sa_mask (&mask
[0], &mask
[1]);
7213 if (TARGET_ABI_UNICOSMK
)
7215 if (mask
[0] || mask
[1])
7220 for (j
= 0; j
< 2; ++j
)
7221 for (i
= 0; i
< 32; ++i
)
7222 if ((mask
[j
] >> i
) & 1)
7226 if (TARGET_ABI_UNICOSMK
)
7228 /* We might not need to generate a frame if we don't make any calls
7229 (including calls to __T3E_MISMATCH if this is a vararg function),
7230 don't have any local variables which require stack slots, don't
7231 use alloca and have not determined that we need a frame for other
7234 alpha_procedure_type
7235 = (sa_size
|| get_frame_size() != 0
7236 || current_function_outgoing_args_size
7237 || current_function_stdarg
|| current_function_calls_alloca
7238 || frame_pointer_needed
)
7239 ? PT_STACK
: PT_REGISTER
;
7241 /* Always reserve space for saving callee-saved registers if we
7242 need a frame as required by the calling convention. */
7243 if (alpha_procedure_type
== PT_STACK
)
7246 else if (TARGET_ABI_OPEN_VMS
)
7248 /* Start by assuming we can use a register procedure if we don't
7249 make any calls (REG_RA not used) or need to save any
7250 registers and a stack procedure if we do. */
7251 if ((mask
[0] >> REG_RA
) & 1)
7252 alpha_procedure_type
= PT_STACK
;
7253 else if (get_frame_size() != 0)
7254 alpha_procedure_type
= PT_REGISTER
;
7256 alpha_procedure_type
= PT_NULL
;
7258 /* Don't reserve space for saving FP & RA yet. Do that later after we've
7259 made the final decision on stack procedure vs register procedure. */
7260 if (alpha_procedure_type
== PT_STACK
)
7263 /* Decide whether to refer to objects off our PV via FP or PV.
7264 If we need FP for something else or if we receive a nonlocal
7265 goto (which expects PV to contain the value), we must use PV.
7266 Otherwise, start by assuming we can use FP. */
7269 = (frame_pointer_needed
7270 || current_function_has_nonlocal_label
7271 || alpha_procedure_type
== PT_STACK
7272 || current_function_outgoing_args_size
)
7273 ? REG_PV
: HARD_FRAME_POINTER_REGNUM
;
7275 /* If we want to copy PV into FP, we need to find some register
7276 in which to save FP. */
7278 vms_save_fp_regno
= -1;
7279 if (vms_base_regno
== HARD_FRAME_POINTER_REGNUM
)
7280 for (i
= 0; i
< 32; i
++)
7281 if (! fixed_regs
[i
] && call_used_regs
[i
] && ! regs_ever_live
[i
])
7282 vms_save_fp_regno
= i
;
7284 if (vms_save_fp_regno
== -1 && alpha_procedure_type
== PT_REGISTER
)
7285 vms_base_regno
= REG_PV
, alpha_procedure_type
= PT_STACK
;
7286 else if (alpha_procedure_type
== PT_NULL
)
7287 vms_base_regno
= REG_PV
;
7289 /* Stack unwinding should be done via FP unless we use it for PV. */
7290 vms_unwind_regno
= (vms_base_regno
== REG_PV
7291 ? HARD_FRAME_POINTER_REGNUM
: STACK_POINTER_REGNUM
);
7293 /* If this is a stack procedure, allow space for saving FP and RA. */
7294 if (alpha_procedure_type
== PT_STACK
)
7299 /* Our size must be even (multiple of 16 bytes). */
7307 /* Define the offset between two registers, one to be eliminated,
7308 and the other its replacement, at the start of a routine. */
7311 alpha_initial_elimination_offset (unsigned int from
,
7312 unsigned int to ATTRIBUTE_UNUSED
)
7316 ret
= alpha_sa_size ();
7317 ret
+= ALPHA_ROUND (current_function_outgoing_args_size
);
7321 case FRAME_POINTER_REGNUM
:
7324 case ARG_POINTER_REGNUM
:
7325 ret
+= (ALPHA_ROUND (get_frame_size ()
7326 + current_function_pretend_args_size
)
7327 - current_function_pretend_args_size
);
7338 alpha_pv_save_size (void)
7341 return alpha_procedure_type
== PT_STACK
? 8 : 0;
7345 alpha_using_fp (void)
7348 return vms_unwind_regno
== HARD_FRAME_POINTER_REGNUM
;
7351 #if TARGET_ABI_OPEN_VMS
7353 const struct attribute_spec vms_attribute_table
[] =
7355 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
7356 { "overlaid", 0, 0, true, false, false, NULL
},
7357 { "global", 0, 0, true, false, false, NULL
},
7358 { "initialize", 0, 0, true, false, false, NULL
},
7359 { NULL
, 0, 0, false, false, false, NULL
}
7365 find_lo_sum_using_gp (rtx
*px
, void *data ATTRIBUTE_UNUSED
)
7367 return GET_CODE (*px
) == LO_SUM
&& XEXP (*px
, 0) == pic_offset_table_rtx
;
7371 alpha_find_lo_sum_using_gp (rtx insn
)
7373 return for_each_rtx (&PATTERN (insn
), find_lo_sum_using_gp
, NULL
) > 0;
7377 alpha_does_function_need_gp (void)
7381 /* The GP being variable is an OSF abi thing. */
7382 if (! TARGET_ABI_OSF
)
7385 /* We need the gp to load the address of __mcount. */
7386 if (TARGET_PROFILING_NEEDS_GP
&& current_function_profile
)
7389 /* The code emitted by alpha_output_mi_thunk_osf uses the gp. */
7390 if (current_function_is_thunk
)
7393 /* The nonlocal receiver pattern assumes that the gp is valid for
7394 the nested function. Reasonable because it's almost always set
7395 correctly already. For the cases where that's wrong, make sure
7396 the nested function loads its gp on entry. */
7397 if (current_function_has_nonlocal_goto
)
7400 /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
7401 Even if we are a static function, we still need to do this in case
7402 our address is taken and passed to something like qsort. */
7404 push_topmost_sequence ();
7405 insn
= get_insns ();
7406 pop_topmost_sequence ();
7408 for (; insn
; insn
= NEXT_INSN (insn
))
7410 && GET_CODE (PATTERN (insn
)) != USE
7411 && GET_CODE (PATTERN (insn
)) != CLOBBER
7412 && get_attr_usegp (insn
))
7419 /* Helper function to set RTX_FRAME_RELATED_P on instructions, including
7423 set_frame_related_p (void)
7425 rtx seq
= get_insns ();
7436 while (insn
!= NULL_RTX
)
7438 RTX_FRAME_RELATED_P (insn
) = 1;
7439 insn
= NEXT_INSN (insn
);
7441 seq
= emit_insn (seq
);
7445 seq
= emit_insn (seq
);
7446 RTX_FRAME_RELATED_P (seq
) = 1;
7451 #define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
7453 /* Generates a store with the proper unwind info attached. VALUE is
7454 stored at BASE_REG+BASE_OFS. If FRAME_BIAS is nonzero, then BASE_REG
7455 contains SP+FRAME_BIAS, and that is the unwind info that should be
7456 generated. If FRAME_REG != VALUE, then VALUE is being stored on
7457 behalf of FRAME_REG, and FRAME_REG should be present in the unwind. */
7460 emit_frame_store_1 (rtx value
, rtx base_reg
, HOST_WIDE_INT frame_bias
,
7461 HOST_WIDE_INT base_ofs
, rtx frame_reg
)
7463 rtx addr
, mem
, insn
;
7465 addr
= plus_constant (base_reg
, base_ofs
);
7466 mem
= gen_rtx_MEM (DImode
, addr
);
7467 set_mem_alias_set (mem
, alpha_sr_alias_set
);
7469 insn
= emit_move_insn (mem
, value
);
7470 RTX_FRAME_RELATED_P (insn
) = 1;
7472 if (frame_bias
|| value
!= frame_reg
)
7476 addr
= plus_constant (stack_pointer_rtx
, frame_bias
+ base_ofs
);
7477 mem
= gen_rtx_MEM (DImode
, addr
);
7481 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR
,
7482 gen_rtx_SET (VOIDmode
, mem
, frame_reg
),
7488 emit_frame_store (unsigned int regno
, rtx base_reg
,
7489 HOST_WIDE_INT frame_bias
, HOST_WIDE_INT base_ofs
)
7491 rtx reg
= gen_rtx_REG (DImode
, regno
);
7492 emit_frame_store_1 (reg
, base_reg
, frame_bias
, base_ofs
, reg
);
7495 /* Write function prologue. */
7497 /* On vms we have two kinds of functions:
7499 - stack frame (PROC_STACK)
7500 these are 'normal' functions with local vars and which are
7501 calling other functions
7502 - register frame (PROC_REGISTER)
7503 keeps all data in registers, needs no stack
7505 We must pass this to the assembler so it can generate the
7506 proper pdsc (procedure descriptor)
7507 This is done with the '.pdesc' command.
7509 On not-vms, we don't really differentiate between the two, as we can
7510 simply allocate stack without saving registers. */
7513 alpha_expand_prologue (void)
7515 /* Registers to save. */
7516 unsigned long imask
= 0;
7517 unsigned long fmask
= 0;
7518 /* Stack space needed for pushing registers clobbered by us. */
7519 HOST_WIDE_INT sa_size
;
7520 /* Complete stack size needed. */
7521 HOST_WIDE_INT frame_size
;
7522 /* Offset from base reg to register save area. */
7523 HOST_WIDE_INT reg_offset
;
7527 sa_size
= alpha_sa_size ();
7529 frame_size
= get_frame_size ();
7530 if (TARGET_ABI_OPEN_VMS
)
7531 frame_size
= ALPHA_ROUND (sa_size
7532 + (alpha_procedure_type
== PT_STACK
? 8 : 0)
7534 + current_function_pretend_args_size
);
7535 else if (TARGET_ABI_UNICOSMK
)
7536 /* We have to allocate space for the DSIB if we generate a frame. */
7537 frame_size
= ALPHA_ROUND (sa_size
7538 + (alpha_procedure_type
== PT_STACK
? 48 : 0))
7539 + ALPHA_ROUND (frame_size
7540 + current_function_outgoing_args_size
);
7542 frame_size
= (ALPHA_ROUND (current_function_outgoing_args_size
)
7544 + ALPHA_ROUND (frame_size
7545 + current_function_pretend_args_size
));
7547 if (TARGET_ABI_OPEN_VMS
)
7550 reg_offset
= ALPHA_ROUND (current_function_outgoing_args_size
);
7552 alpha_sa_mask (&imask
, &fmask
);
7554 /* Emit an insn to reload GP, if needed. */
7557 alpha_function_needs_gp
= alpha_does_function_need_gp ();
7558 if (alpha_function_needs_gp
)
7559 emit_insn (gen_prologue_ldgp ());
7562 /* TARGET_PROFILING_NEEDS_GP actually implies that we need to insert
7563 the call to mcount ourselves, rather than having the linker do it
7564 magically in response to -pg. Since _mcount has special linkage,
7565 don't represent the call as a call. */
7566 if (TARGET_PROFILING_NEEDS_GP
&& current_function_profile
)
7567 emit_insn (gen_prologue_mcount ());
7569 if (TARGET_ABI_UNICOSMK
)
7570 unicosmk_gen_dsib (&imask
);
7572 /* Adjust the stack by the frame size. If the frame size is > 4096
7573 bytes, we need to be sure we probe somewhere in the first and last
7574 4096 bytes (we can probably get away without the latter test) and
7575 every 8192 bytes in between. If the frame size is > 32768, we
7576 do this in a loop. Otherwise, we generate the explicit probe
7579 Note that we are only allowed to adjust sp once in the prologue. */
7581 if (frame_size
<= 32768)
7583 if (frame_size
> 4096)
7587 for (probed
= 4096; probed
< frame_size
; probed
+= 8192)
7588 emit_insn (gen_probe_stack (GEN_INT (TARGET_ABI_UNICOSMK
7592 /* We only have to do this probe if we aren't saving registers. */
7593 if (sa_size
== 0 && frame_size
> probed
- 4096)
7594 emit_insn (gen_probe_stack (GEN_INT (-frame_size
)));
7597 if (frame_size
!= 0)
7598 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx
, stack_pointer_rtx
,
7599 GEN_INT (TARGET_ABI_UNICOSMK
7605 /* Here we generate code to set R22 to SP + 4096 and set R23 to the
7606 number of 8192 byte blocks to probe. We then probe each block
7607 in the loop and then set SP to the proper location. If the
7608 amount remaining is > 4096, we have to do one more probe if we
7609 are not saving any registers. */
7611 HOST_WIDE_INT blocks
= (frame_size
+ 4096) / 8192;
7612 HOST_WIDE_INT leftover
= frame_size
+ 4096 - blocks
* 8192;
7613 rtx ptr
= gen_rtx_REG (DImode
, 22);
7614 rtx count
= gen_rtx_REG (DImode
, 23);
7617 emit_move_insn (count
, GEN_INT (blocks
));
7618 emit_insn (gen_adddi3 (ptr
, stack_pointer_rtx
,
7619 GEN_INT (TARGET_ABI_UNICOSMK
? 4096 - 64 : 4096)));
7621 /* Because of the difficulty in emitting a new basic block this
7622 late in the compilation, generate the loop as a single insn. */
7623 emit_insn (gen_prologue_stack_probe_loop (count
, ptr
));
7625 if (leftover
> 4096 && sa_size
== 0)
7627 rtx last
= gen_rtx_MEM (DImode
, plus_constant (ptr
, -leftover
));
7628 MEM_VOLATILE_P (last
) = 1;
7629 emit_move_insn (last
, const0_rtx
);
7632 if (TARGET_ABI_WINDOWS_NT
)
7634 /* For NT stack unwind (done by 'reverse execution'), it's
7635 not OK to take the result of a loop, even though the value
7636 is already in ptr, so we reload it via a single operation
7637 and subtract it to sp.
7639 Yes, that's correct -- we have to reload the whole constant
7640 into a temporary via ldah+lda then subtract from sp. */
7642 HOST_WIDE_INT lo
, hi
;
7643 lo
= ((frame_size
& 0xffff) ^ 0x8000) - 0x8000;
7644 hi
= frame_size
- lo
;
7646 emit_move_insn (ptr
, GEN_INT (hi
));
7647 emit_insn (gen_adddi3 (ptr
, ptr
, GEN_INT (lo
)));
7648 seq
= emit_insn (gen_subdi3 (stack_pointer_rtx
, stack_pointer_rtx
,
7653 seq
= emit_insn (gen_adddi3 (stack_pointer_rtx
, ptr
,
7654 GEN_INT (-leftover
)));
7657 /* This alternative is special, because the DWARF code cannot
7658 possibly intuit through the loop above. So we invent this
7659 note it looks at instead. */
7660 RTX_FRAME_RELATED_P (seq
) = 1;
7662 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR
,
7663 gen_rtx_SET (VOIDmode
, stack_pointer_rtx
,
7664 gen_rtx_PLUS (Pmode
, stack_pointer_rtx
,
7665 GEN_INT (TARGET_ABI_UNICOSMK
7671 if (!TARGET_ABI_UNICOSMK
)
7673 HOST_WIDE_INT sa_bias
= 0;
7675 /* Cope with very large offsets to the register save area. */
7676 sa_reg
= stack_pointer_rtx
;
7677 if (reg_offset
+ sa_size
> 0x8000)
7679 int low
= ((reg_offset
& 0xffff) ^ 0x8000) - 0x8000;
7682 if (low
+ sa_size
<= 0x8000)
7683 sa_bias
= reg_offset
- low
, reg_offset
= low
;
7685 sa_bias
= reg_offset
, reg_offset
= 0;
7687 sa_reg
= gen_rtx_REG (DImode
, 24);
7688 sa_bias_rtx
= GEN_INT (sa_bias
);
7690 if (add_operand (sa_bias_rtx
, DImode
))
7691 emit_insn (gen_adddi3 (sa_reg
, stack_pointer_rtx
, sa_bias_rtx
));
7694 emit_move_insn (sa_reg
, sa_bias_rtx
);
7695 emit_insn (gen_adddi3 (sa_reg
, stack_pointer_rtx
, sa_reg
));
7699 /* Save regs in stack order. Beginning with VMS PV. */
7700 if (TARGET_ABI_OPEN_VMS
&& alpha_procedure_type
== PT_STACK
)
7701 emit_frame_store (REG_PV
, stack_pointer_rtx
, 0, 0);
7703 /* Save register RA next. */
7704 if (imask
& (1UL << REG_RA
))
7706 emit_frame_store (REG_RA
, sa_reg
, sa_bias
, reg_offset
);
7707 imask
&= ~(1UL << REG_RA
);
7711 /* Now save any other registers required to be saved. */
7712 for (i
= 0; i
< 31; i
++)
7713 if (imask
& (1UL << i
))
7715 emit_frame_store (i
, sa_reg
, sa_bias
, reg_offset
);
7719 for (i
= 0; i
< 31; i
++)
7720 if (fmask
& (1UL << i
))
7722 emit_frame_store (i
+32, sa_reg
, sa_bias
, reg_offset
);
7726 else if (TARGET_ABI_UNICOSMK
&& alpha_procedure_type
== PT_STACK
)
7728 /* The standard frame on the T3E includes space for saving registers.
7729 We just have to use it. We don't have to save the return address and
7730 the old frame pointer here - they are saved in the DSIB. */
7733 for (i
= 9; i
< 15; i
++)
7734 if (imask
& (1UL << i
))
7736 emit_frame_store (i
, hard_frame_pointer_rtx
, 0, reg_offset
);
7739 for (i
= 2; i
< 10; i
++)
7740 if (fmask
& (1UL << i
))
7742 emit_frame_store (i
+32, hard_frame_pointer_rtx
, 0, reg_offset
);
7747 if (TARGET_ABI_OPEN_VMS
)
7749 if (alpha_procedure_type
== PT_REGISTER
)
7750 /* Register frame procedures save the fp.
7751 ?? Ought to have a dwarf2 save for this. */
7752 emit_move_insn (gen_rtx_REG (DImode
, vms_save_fp_regno
),
7753 hard_frame_pointer_rtx
);
7755 if (alpha_procedure_type
!= PT_NULL
&& vms_base_regno
!= REG_PV
)
7756 emit_insn (gen_force_movdi (gen_rtx_REG (DImode
, vms_base_regno
),
7757 gen_rtx_REG (DImode
, REG_PV
)));
7759 if (alpha_procedure_type
!= PT_NULL
7760 && vms_unwind_regno
== HARD_FRAME_POINTER_REGNUM
)
7761 FRP (emit_move_insn (hard_frame_pointer_rtx
, stack_pointer_rtx
));
7763 /* If we have to allocate space for outgoing args, do it now. */
7764 if (current_function_outgoing_args_size
!= 0)
7767 = emit_move_insn (stack_pointer_rtx
,
7769 (hard_frame_pointer_rtx
,
7771 (current_function_outgoing_args_size
))));
7773 /* Only set FRAME_RELATED_P on the stack adjustment we just emitted
7774 if ! frame_pointer_needed. Setting the bit will change the CFA
7775 computation rule to use sp again, which would be wrong if we had
7776 frame_pointer_needed, as this means sp might move unpredictably
7780 frame_pointer_needed
7781 => vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
7783 current_function_outgoing_args_size != 0
7784 => alpha_procedure_type != PT_NULL,
7786 so when we are not setting the bit here, we are guaranteed to
7787 have emitted an FRP frame pointer update just before. */
7788 RTX_FRAME_RELATED_P (seq
) = ! frame_pointer_needed
;
7791 else if (!TARGET_ABI_UNICOSMK
)
7793 /* If we need a frame pointer, set it from the stack pointer. */
7794 if (frame_pointer_needed
)
7796 if (TARGET_CAN_FAULT_IN_PROLOGUE
)
7797 FRP (emit_move_insn (hard_frame_pointer_rtx
, stack_pointer_rtx
));
7799 /* This must always be the last instruction in the
7800 prologue, thus we emit a special move + clobber. */
7801 FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx
,
7802 stack_pointer_rtx
, sa_reg
)));
7806 /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
7807 the prologue, for exception handling reasons, we cannot do this for
7808 any insn that might fault. We could prevent this for mems with a
7809 (clobber:BLK (scratch)), but this doesn't work for fp insns. So we
7810 have to prevent all such scheduling with a blockage.
7812 Linux, on the other hand, never bothered to implement OSF/1's
7813 exception handling, and so doesn't care about such things. Anyone
7814 planning to use dwarf2 frame-unwind info can also omit the blockage. */
7816 if (! TARGET_CAN_FAULT_IN_PROLOGUE
)
7817 emit_insn (gen_blockage ());
7820 /* Count the number of .file directives, so that .loc is up to date. */
7821 int num_source_filenames
= 0;
7823 /* Output the textual info surrounding the prologue. */
7826 alpha_start_function (FILE *file
, const char *fnname
,
7827 tree decl ATTRIBUTE_UNUSED
)
7829 unsigned long imask
= 0;
7830 unsigned long fmask
= 0;
7831 /* Stack space needed for pushing registers clobbered by us. */
7832 HOST_WIDE_INT sa_size
;
7833 /* Complete stack size needed. */
7834 unsigned HOST_WIDE_INT frame_size
;
7835 /* Offset from base reg to register save area. */
7836 HOST_WIDE_INT reg_offset
;
7837 char *entry_label
= (char *) alloca (strlen (fnname
) + 6);
7840 /* Don't emit an extern directive for functions defined in the same file. */
7841 if (TARGET_ABI_UNICOSMK
)
7844 name_tree
= get_identifier (fnname
);
7845 TREE_ASM_WRITTEN (name_tree
) = 1;
7848 alpha_fnname
= fnname
;
7849 sa_size
= alpha_sa_size ();
7851 frame_size
= get_frame_size ();
7852 if (TARGET_ABI_OPEN_VMS
)
7853 frame_size
= ALPHA_ROUND (sa_size
7854 + (alpha_procedure_type
== PT_STACK
? 8 : 0)
7856 + current_function_pretend_args_size
);
7857 else if (TARGET_ABI_UNICOSMK
)
7858 frame_size
= ALPHA_ROUND (sa_size
7859 + (alpha_procedure_type
== PT_STACK
? 48 : 0))
7860 + ALPHA_ROUND (frame_size
7861 + current_function_outgoing_args_size
);
7863 frame_size
= (ALPHA_ROUND (current_function_outgoing_args_size
)
7865 + ALPHA_ROUND (frame_size
7866 + current_function_pretend_args_size
));
7868 if (TARGET_ABI_OPEN_VMS
)
7871 reg_offset
= ALPHA_ROUND (current_function_outgoing_args_size
);
7873 alpha_sa_mask (&imask
, &fmask
);
7875 /* Ecoff can handle multiple .file directives, so put out file and lineno.
7876 We have to do that before the .ent directive as we cannot switch
7877 files within procedures with native ecoff because line numbers are
7878 linked to procedure descriptors.
7879 Outputting the lineno helps debugging of one line functions as they
7880 would otherwise get no line number at all. Please note that we would
7881 like to put out last_linenum from final.c, but it is not accessible. */
7883 if (write_symbols
== SDB_DEBUG
)
7885 #ifdef ASM_OUTPUT_SOURCE_FILENAME
7886 ASM_OUTPUT_SOURCE_FILENAME (file
,
7887 DECL_SOURCE_FILE (current_function_decl
));
7889 #ifdef SDB_OUTPUT_SOURCE_LINE
7890 if (debug_info_level
!= DINFO_LEVEL_TERSE
)
7891 SDB_OUTPUT_SOURCE_LINE (file
,
7892 DECL_SOURCE_LINE (current_function_decl
));
7896 /* Issue function start and label. */
7897 if (TARGET_ABI_OPEN_VMS
7898 || (!TARGET_ABI_UNICOSMK
&& !flag_inhibit_size_directive
))
7900 fputs ("\t.ent ", file
);
7901 assemble_name (file
, fnname
);
7904 /* If the function needs GP, we'll write the "..ng" label there.
7905 Otherwise, do it here. */
7907 && ! alpha_function_needs_gp
7908 && ! current_function_is_thunk
)
7911 assemble_name (file
, fnname
);
7912 fputs ("..ng:\n", file
);
7916 strcpy (entry_label
, fnname
);
7917 if (TARGET_ABI_OPEN_VMS
)
7918 strcat (entry_label
, "..en");
7920 /* For public functions, the label must be globalized by appending an
7921 additional colon. */
7922 if (TARGET_ABI_UNICOSMK
&& TREE_PUBLIC (decl
))
7923 strcat (entry_label
, ":");
7925 ASM_OUTPUT_LABEL (file
, entry_label
);
7926 inside_function
= TRUE
;
7928 if (TARGET_ABI_OPEN_VMS
)
7929 fprintf (file
, "\t.base $%d\n", vms_base_regno
);
7931 if (!TARGET_ABI_OPEN_VMS
&& !TARGET_ABI_UNICOSMK
&& TARGET_IEEE_CONFORMANT
7932 && !flag_inhibit_size_directive
)
7934 /* Set flags in procedure descriptor to request IEEE-conformant
7935 math-library routines. The value we set it to is PDSC_EXC_IEEE
7936 (/usr/include/pdsc.h). */
7937 fputs ("\t.eflag 48\n", file
);
7940 /* Set up offsets to alpha virtual arg/local debugging pointer. */
7941 alpha_auto_offset
= -frame_size
+ current_function_pretend_args_size
;
7942 alpha_arg_offset
= -frame_size
+ 48;
7944 /* Describe our frame. If the frame size is larger than an integer,
7945 print it as zero to avoid an assembler error. We won't be
7946 properly describing such a frame, but that's the best we can do. */
7947 if (TARGET_ABI_UNICOSMK
)
7949 else if (TARGET_ABI_OPEN_VMS
)
7950 fprintf (file
, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC
",$26,"
7951 HOST_WIDE_INT_PRINT_DEC
"\n",
7953 frame_size
>= (1UL << 31) ? 0 : frame_size
,
7955 else if (!flag_inhibit_size_directive
)
7956 fprintf (file
, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC
",$26,%d\n",
7957 (frame_pointer_needed
7958 ? HARD_FRAME_POINTER_REGNUM
: STACK_POINTER_REGNUM
),
7959 frame_size
>= (1UL << 31) ? 0 : frame_size
,
7960 current_function_pretend_args_size
);
7962 /* Describe which registers were spilled. */
7963 if (TARGET_ABI_UNICOSMK
)
7965 else if (TARGET_ABI_OPEN_VMS
)
7968 /* ??? Does VMS care if mask contains ra? The old code didn't
7969 set it, so I don't here. */
7970 fprintf (file
, "\t.mask 0x%lx,0\n", imask
& ~(1UL << REG_RA
));
7972 fprintf (file
, "\t.fmask 0x%lx,0\n", fmask
);
7973 if (alpha_procedure_type
== PT_REGISTER
)
7974 fprintf (file
, "\t.fp_save $%d\n", vms_save_fp_regno
);
7976 else if (!flag_inhibit_size_directive
)
7980 fprintf (file
, "\t.mask 0x%lx," HOST_WIDE_INT_PRINT_DEC
"\n", imask
,
7981 frame_size
>= (1UL << 31) ? 0 : reg_offset
- frame_size
);
7983 for (i
= 0; i
< 32; ++i
)
7984 if (imask
& (1UL << i
))
7989 fprintf (file
, "\t.fmask 0x%lx," HOST_WIDE_INT_PRINT_DEC
"\n", fmask
,
7990 frame_size
>= (1UL << 31) ? 0 : reg_offset
- frame_size
);
7993 #if TARGET_ABI_OPEN_VMS
7994 /* Ifdef'ed cause link_section are only available then. */
7995 switch_to_section (readonly_data_section
);
7996 fprintf (file
, "\t.align 3\n");
7997 assemble_name (file
, fnname
); fputs ("..na:\n", file
);
7998 fputs ("\t.ascii \"", file
);
7999 assemble_name (file
, fnname
);
8000 fputs ("\\0\"\n", file
);
8001 alpha_need_linkage (fnname
, 1);
8002 switch_to_section (text_section
);
8006 /* Emit the .prologue note at the scheduled end of the prologue. */
8009 alpha_output_function_end_prologue (FILE *file
)
8011 if (TARGET_ABI_UNICOSMK
)
8013 else if (TARGET_ABI_OPEN_VMS
)
8014 fputs ("\t.prologue\n", file
);
8015 else if (TARGET_ABI_WINDOWS_NT
)
8016 fputs ("\t.prologue 0\n", file
);
8017 else if (!flag_inhibit_size_directive
)
8018 fprintf (file
, "\t.prologue %d\n",
8019 alpha_function_needs_gp
|| current_function_is_thunk
);
8022 /* Write function epilogue. */
8024 /* ??? At some point we will want to support full unwind, and so will
8025 need to mark the epilogue as well. At the moment, we just confuse
8028 #define FRP(exp) exp
8031 alpha_expand_epilogue (void)
8033 /* Registers to save. */
8034 unsigned long imask
= 0;
8035 unsigned long fmask
= 0;
8036 /* Stack space needed for pushing registers clobbered by us. */
8037 HOST_WIDE_INT sa_size
;
8038 /* Complete stack size needed. */
8039 HOST_WIDE_INT frame_size
;
8040 /* Offset from base reg to register save area. */
8041 HOST_WIDE_INT reg_offset
;
8042 int fp_is_frame_pointer
, fp_offset
;
8043 rtx sa_reg
, sa_reg_exp
= NULL
;
8044 rtx sp_adj1
, sp_adj2
, mem
;
8048 sa_size
= alpha_sa_size ();
8050 frame_size
= get_frame_size ();
8051 if (TARGET_ABI_OPEN_VMS
)
8052 frame_size
= ALPHA_ROUND (sa_size
8053 + (alpha_procedure_type
== PT_STACK
? 8 : 0)
8055 + current_function_pretend_args_size
);
8056 else if (TARGET_ABI_UNICOSMK
)
8057 frame_size
= ALPHA_ROUND (sa_size
8058 + (alpha_procedure_type
== PT_STACK
? 48 : 0))
8059 + ALPHA_ROUND (frame_size
8060 + current_function_outgoing_args_size
);
8062 frame_size
= (ALPHA_ROUND (current_function_outgoing_args_size
)
8064 + ALPHA_ROUND (frame_size
8065 + current_function_pretend_args_size
));
8067 if (TARGET_ABI_OPEN_VMS
)
8069 if (alpha_procedure_type
== PT_STACK
)
8075 reg_offset
= ALPHA_ROUND (current_function_outgoing_args_size
);
8077 alpha_sa_mask (&imask
, &fmask
);
8080 = ((TARGET_ABI_OPEN_VMS
&& alpha_procedure_type
== PT_STACK
)
8081 || (!TARGET_ABI_OPEN_VMS
&& frame_pointer_needed
));
8083 sa_reg
= stack_pointer_rtx
;
8085 if (current_function_calls_eh_return
)
8086 eh_ofs
= EH_RETURN_STACKADJ_RTX
;
8090 if (!TARGET_ABI_UNICOSMK
&& sa_size
)
8092 /* If we have a frame pointer, restore SP from it. */
8093 if ((TARGET_ABI_OPEN_VMS
8094 && vms_unwind_regno
== HARD_FRAME_POINTER_REGNUM
)
8095 || (!TARGET_ABI_OPEN_VMS
&& frame_pointer_needed
))
8096 FRP (emit_move_insn (stack_pointer_rtx
, hard_frame_pointer_rtx
));
8098 /* Cope with very large offsets to the register save area. */
8099 if (reg_offset
+ sa_size
> 0x8000)
8101 int low
= ((reg_offset
& 0xffff) ^ 0x8000) - 0x8000;
8104 if (low
+ sa_size
<= 0x8000)
8105 bias
= reg_offset
- low
, reg_offset
= low
;
8107 bias
= reg_offset
, reg_offset
= 0;
8109 sa_reg
= gen_rtx_REG (DImode
, 22);
8110 sa_reg_exp
= plus_constant (stack_pointer_rtx
, bias
);
8112 FRP (emit_move_insn (sa_reg
, sa_reg_exp
));
8115 /* Restore registers in order, excepting a true frame pointer. */
8117 mem
= gen_rtx_MEM (DImode
, plus_constant (sa_reg
, reg_offset
));
8119 set_mem_alias_set (mem
, alpha_sr_alias_set
);
8120 FRP (emit_move_insn (gen_rtx_REG (DImode
, REG_RA
), mem
));
8123 imask
&= ~(1UL << REG_RA
);
8125 for (i
= 0; i
< 31; ++i
)
8126 if (imask
& (1UL << i
))
8128 if (i
== HARD_FRAME_POINTER_REGNUM
&& fp_is_frame_pointer
)
8129 fp_offset
= reg_offset
;
8132 mem
= gen_rtx_MEM (DImode
, plus_constant(sa_reg
, reg_offset
));
8133 set_mem_alias_set (mem
, alpha_sr_alias_set
);
8134 FRP (emit_move_insn (gen_rtx_REG (DImode
, i
), mem
));
8139 for (i
= 0; i
< 31; ++i
)
8140 if (fmask
& (1UL << i
))
8142 mem
= gen_rtx_MEM (DFmode
, plus_constant(sa_reg
, reg_offset
));
8143 set_mem_alias_set (mem
, alpha_sr_alias_set
);
8144 FRP (emit_move_insn (gen_rtx_REG (DFmode
, i
+32), mem
));
8148 else if (TARGET_ABI_UNICOSMK
&& alpha_procedure_type
== PT_STACK
)
8150 /* Restore callee-saved general-purpose registers. */
8154 for (i
= 9; i
< 15; i
++)
8155 if (imask
& (1UL << i
))
8157 mem
= gen_rtx_MEM (DImode
, plus_constant(hard_frame_pointer_rtx
,
8159 set_mem_alias_set (mem
, alpha_sr_alias_set
);
8160 FRP (emit_move_insn (gen_rtx_REG (DImode
, i
), mem
));
8164 for (i
= 2; i
< 10; i
++)
8165 if (fmask
& (1UL << i
))
8167 mem
= gen_rtx_MEM (DFmode
, plus_constant(hard_frame_pointer_rtx
,
8169 set_mem_alias_set (mem
, alpha_sr_alias_set
);
8170 FRP (emit_move_insn (gen_rtx_REG (DFmode
, i
+32), mem
));
8174 /* Restore the return address from the DSIB. */
8176 mem
= gen_rtx_MEM (DImode
, plus_constant(hard_frame_pointer_rtx
, -8));
8177 set_mem_alias_set (mem
, alpha_sr_alias_set
);
8178 FRP (emit_move_insn (gen_rtx_REG (DImode
, REG_RA
), mem
));
8181 if (frame_size
|| eh_ofs
)
8183 sp_adj1
= stack_pointer_rtx
;
8187 sp_adj1
= gen_rtx_REG (DImode
, 23);
8188 emit_move_insn (sp_adj1
,
8189 gen_rtx_PLUS (Pmode
, stack_pointer_rtx
, eh_ofs
));
8192 /* If the stack size is large, begin computation into a temporary
8193 register so as not to interfere with a potential fp restore,
8194 which must be consecutive with an SP restore. */
8195 if (frame_size
< 32768
8196 && ! (TARGET_ABI_UNICOSMK
&& current_function_calls_alloca
))
8197 sp_adj2
= GEN_INT (frame_size
);
8198 else if (TARGET_ABI_UNICOSMK
)
8200 sp_adj1
= gen_rtx_REG (DImode
, 23);
8201 FRP (emit_move_insn (sp_adj1
, hard_frame_pointer_rtx
));
8202 sp_adj2
= const0_rtx
;
8204 else if (frame_size
< 0x40007fffL
)
8206 int low
= ((frame_size
& 0xffff) ^ 0x8000) - 0x8000;
8208 sp_adj2
= plus_constant (sp_adj1
, frame_size
- low
);
8209 if (sa_reg_exp
&& rtx_equal_p (sa_reg_exp
, sp_adj2
))
8213 sp_adj1
= gen_rtx_REG (DImode
, 23);
8214 FRP (emit_move_insn (sp_adj1
, sp_adj2
));
8216 sp_adj2
= GEN_INT (low
);
8220 rtx tmp
= gen_rtx_REG (DImode
, 23);
8221 FRP (sp_adj2
= alpha_emit_set_const (tmp
, DImode
, frame_size
,
8225 /* We can't drop new things to memory this late, afaik,
8226 so build it up by pieces. */
8227 FRP (sp_adj2
= alpha_emit_set_long_const (tmp
, frame_size
,
8228 -(frame_size
< 0)));
8229 gcc_assert (sp_adj2
);
8233 /* From now on, things must be in order. So emit blockages. */
8235 /* Restore the frame pointer. */
8236 if (TARGET_ABI_UNICOSMK
)
8238 emit_insn (gen_blockage ());
8239 mem
= gen_rtx_MEM (DImode
,
8240 plus_constant (hard_frame_pointer_rtx
, -16));
8241 set_mem_alias_set (mem
, alpha_sr_alias_set
);
8242 FRP (emit_move_insn (hard_frame_pointer_rtx
, mem
));
8244 else if (fp_is_frame_pointer
)
8246 emit_insn (gen_blockage ());
8247 mem
= gen_rtx_MEM (DImode
, plus_constant (sa_reg
, fp_offset
));
8248 set_mem_alias_set (mem
, alpha_sr_alias_set
);
8249 FRP (emit_move_insn (hard_frame_pointer_rtx
, mem
));
8251 else if (TARGET_ABI_OPEN_VMS
)
8253 emit_insn (gen_blockage ());
8254 FRP (emit_move_insn (hard_frame_pointer_rtx
,
8255 gen_rtx_REG (DImode
, vms_save_fp_regno
)));
8258 /* Restore the stack pointer. */
8259 emit_insn (gen_blockage ());
8260 if (sp_adj2
== const0_rtx
)
8261 FRP (emit_move_insn (stack_pointer_rtx
, sp_adj1
));
8263 FRP (emit_move_insn (stack_pointer_rtx
,
8264 gen_rtx_PLUS (DImode
, sp_adj1
, sp_adj2
)));
8268 if (TARGET_ABI_OPEN_VMS
&& alpha_procedure_type
== PT_REGISTER
)
8270 emit_insn (gen_blockage ());
8271 FRP (emit_move_insn (hard_frame_pointer_rtx
,
8272 gen_rtx_REG (DImode
, vms_save_fp_regno
)));
8274 else if (TARGET_ABI_UNICOSMK
&& alpha_procedure_type
!= PT_STACK
)
8276 /* Decrement the frame pointer if the function does not have a
8279 emit_insn (gen_blockage ());
8280 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx
,
8281 hard_frame_pointer_rtx
, constm1_rtx
)));
8286 /* Output the rest of the textual info surrounding the epilogue. */
8289 alpha_end_function (FILE *file
, const char *fnname
, tree decl ATTRIBUTE_UNUSED
)
8291 #if TARGET_ABI_OPEN_VMS
8292 alpha_write_linkage (file
, fnname
, decl
);
8295 /* End the function. */
8296 if (!TARGET_ABI_UNICOSMK
&& !flag_inhibit_size_directive
)
8298 fputs ("\t.end ", file
);
8299 assemble_name (file
, fnname
);
8302 inside_function
= FALSE
;
8304 /* Output jump tables and the static subroutine information block. */
8305 if (TARGET_ABI_UNICOSMK
)
8307 unicosmk_output_ssib (file
, fnname
);
8308 unicosmk_output_deferred_case_vectors (file
);
8313 /* Emit a tail call to FUNCTION after adjusting THIS by DELTA.
8315 In order to avoid the hordes of differences between generated code
8316 with and without TARGET_EXPLICIT_RELOCS, and to avoid duplicating
8317 lots of code loading up large constants, generate rtl and emit it
8318 instead of going straight to text.
8320 Not sure why this idea hasn't been explored before... */
8323 alpha_output_mi_thunk_osf (FILE *file
, tree thunk_fndecl ATTRIBUTE_UNUSED
,
8324 HOST_WIDE_INT delta
, HOST_WIDE_INT vcall_offset
,
8327 HOST_WIDE_INT hi
, lo
;
8328 rtx
this, insn
, funexp
;
8330 reset_block_changes ();
8332 /* We always require a valid GP. */
8333 emit_insn (gen_prologue_ldgp ());
8334 emit_note (NOTE_INSN_PROLOGUE_END
);
8336 /* Find the "this" pointer. If the function returns a structure,
8337 the structure return pointer is in $16. */
8338 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function
)), function
))
8339 this = gen_rtx_REG (Pmode
, 17);
8341 this = gen_rtx_REG (Pmode
, 16);
8343 /* Add DELTA. When possible we use ldah+lda. Otherwise load the
8344 entire constant for the add. */
8345 lo
= ((delta
& 0xffff) ^ 0x8000) - 0x8000;
8346 hi
= (((delta
- lo
) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8347 if (hi
+ lo
== delta
)
8350 emit_insn (gen_adddi3 (this, this, GEN_INT (hi
)));
8352 emit_insn (gen_adddi3 (this, this, GEN_INT (lo
)));
8356 rtx tmp
= alpha_emit_set_long_const (gen_rtx_REG (Pmode
, 0),
8357 delta
, -(delta
< 0));
8358 emit_insn (gen_adddi3 (this, this, tmp
));
8361 /* Add a delta stored in the vtable at VCALL_OFFSET. */
8366 tmp
= gen_rtx_REG (Pmode
, 0);
8367 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, this));
8369 lo
= ((vcall_offset
& 0xffff) ^ 0x8000) - 0x8000;
8370 hi
= (((vcall_offset
- lo
) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8371 if (hi
+ lo
== vcall_offset
)
8374 emit_insn (gen_adddi3 (tmp
, tmp
, GEN_INT (hi
)));
8378 tmp2
= alpha_emit_set_long_const (gen_rtx_REG (Pmode
, 1),
8379 vcall_offset
, -(vcall_offset
< 0));
8380 emit_insn (gen_adddi3 (tmp
, tmp
, tmp2
));
8384 tmp2
= gen_rtx_PLUS (Pmode
, tmp
, GEN_INT (lo
));
8387 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, tmp2
));
8389 emit_insn (gen_adddi3 (this, this, tmp
));
8392 /* Generate a tail call to the target function. */
8393 if (! TREE_USED (function
))
8395 assemble_external (function
);
8396 TREE_USED (function
) = 1;
8398 funexp
= XEXP (DECL_RTL (function
), 0);
8399 funexp
= gen_rtx_MEM (FUNCTION_MODE
, funexp
);
8400 insn
= emit_call_insn (gen_sibcall (funexp
, const0_rtx
));
8401 SIBLING_CALL_P (insn
) = 1;
8403 /* Run just enough of rest_of_compilation to get the insns emitted.
8404 There's not really enough bulk here to make other passes such as
8405 instruction scheduling worth while. Note that use_thunk calls
8406 assemble_start_function and assemble_end_function. */
8407 insn
= get_insns ();
8408 insn_locators_initialize ();
8409 shorten_branches (insn
);
8410 final_start_function (insn
, file
, 1);
8411 final (insn
, file
, 1);
8412 final_end_function ();
8414 #endif /* TARGET_ABI_OSF */
8416 /* Debugging support. */
8420 /* Count the number of sdb related labels are generated (to find block
8421 start and end boundaries). */
8423 int sdb_label_count
= 0;
8425 /* Name of the file containing the current function. */
8427 static const char *current_function_file
= "";
8429 /* Offsets to alpha virtual arg/local debugging pointers. */
8431 long alpha_arg_offset
;
8432 long alpha_auto_offset
;
8434 /* Emit a new filename to a stream. */
8437 alpha_output_filename (FILE *stream
, const char *name
)
8439 static int first_time
= TRUE
;
8444 ++num_source_filenames
;
8445 current_function_file
= name
;
8446 fprintf (stream
, "\t.file\t%d ", num_source_filenames
);
8447 output_quoted_string (stream
, name
);
8448 fprintf (stream
, "\n");
8449 if (!TARGET_GAS
&& write_symbols
== DBX_DEBUG
)
8450 fprintf (stream
, "\t#@stabs\n");
8453 else if (write_symbols
== DBX_DEBUG
)
8454 /* dbxout.c will emit an appropriate .stabs directive. */
8457 else if (name
!= current_function_file
8458 && strcmp (name
, current_function_file
) != 0)
8460 if (inside_function
&& ! TARGET_GAS
)
8461 fprintf (stream
, "\t#.file\t%d ", num_source_filenames
);
8464 ++num_source_filenames
;
8465 current_function_file
= name
;
8466 fprintf (stream
, "\t.file\t%d ", num_source_filenames
);
8469 output_quoted_string (stream
, name
);
8470 fprintf (stream
, "\n");
8474 /* Structure to show the current status of registers and memory. */
8476 struct shadow_summary
8479 unsigned int i
: 31; /* Mask of int regs */
8480 unsigned int fp
: 31; /* Mask of fp regs */
8481 unsigned int mem
: 1; /* mem == imem | fpmem */
8485 /* Summary the effects of expression X on the machine. Update SUM, a pointer
8486 to the summary structure. SET is nonzero if the insn is setting the
8487 object, otherwise zero. */
8490 summarize_insn (rtx x
, struct shadow_summary
*sum
, int set
)
8492 const char *format_ptr
;
8498 switch (GET_CODE (x
))
8500 /* ??? Note that this case would be incorrect if the Alpha had a
8501 ZERO_EXTRACT in SET_DEST. */
8503 summarize_insn (SET_SRC (x
), sum
, 0);
8504 summarize_insn (SET_DEST (x
), sum
, 1);
8508 summarize_insn (XEXP (x
, 0), sum
, 1);
8512 summarize_insn (XEXP (x
, 0), sum
, 0);
8516 for (i
= ASM_OPERANDS_INPUT_LENGTH (x
) - 1; i
>= 0; i
--)
8517 summarize_insn (ASM_OPERANDS_INPUT (x
, i
), sum
, 0);
8521 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
8522 summarize_insn (XVECEXP (x
, 0, i
), sum
, 0);
8526 summarize_insn (SUBREG_REG (x
), sum
, 0);
8531 int regno
= REGNO (x
);
8532 unsigned long mask
= ((unsigned long) 1) << (regno
% 32);
8534 if (regno
== 31 || regno
== 63)
8540 sum
->defd
.i
|= mask
;
8542 sum
->defd
.fp
|= mask
;
8547 sum
->used
.i
|= mask
;
8549 sum
->used
.fp
|= mask
;
8560 /* Find the regs used in memory address computation: */
8561 summarize_insn (XEXP (x
, 0), sum
, 0);
8564 case CONST_INT
: case CONST_DOUBLE
:
8565 case SYMBOL_REF
: case LABEL_REF
: case CONST
:
8566 case SCRATCH
: case ASM_INPUT
:
8569 /* Handle common unary and binary ops for efficiency. */
8570 case COMPARE
: case PLUS
: case MINUS
: case MULT
: case DIV
:
8571 case MOD
: case UDIV
: case UMOD
: case AND
: case IOR
:
8572 case XOR
: case ASHIFT
: case ROTATE
: case ASHIFTRT
: case LSHIFTRT
:
8573 case ROTATERT
: case SMIN
: case SMAX
: case UMIN
: case UMAX
:
8574 case NE
: case EQ
: case GE
: case GT
: case LE
:
8575 case LT
: case GEU
: case GTU
: case LEU
: case LTU
:
8576 summarize_insn (XEXP (x
, 0), sum
, 0);
8577 summarize_insn (XEXP (x
, 1), sum
, 0);
8580 case NEG
: case NOT
: case SIGN_EXTEND
: case ZERO_EXTEND
:
8581 case TRUNCATE
: case FLOAT_EXTEND
: case FLOAT_TRUNCATE
: case FLOAT
:
8582 case FIX
: case UNSIGNED_FLOAT
: case UNSIGNED_FIX
: case ABS
:
8583 case SQRT
: case FFS
:
8584 summarize_insn (XEXP (x
, 0), sum
, 0);
8588 format_ptr
= GET_RTX_FORMAT (GET_CODE (x
));
8589 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
8590 switch (format_ptr
[i
])
8593 summarize_insn (XEXP (x
, i
), sum
, 0);
8597 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
8598 summarize_insn (XVECEXP (x
, i
, j
), sum
, 0);
8610 /* Ensure a sufficient number of `trapb' insns are in the code when
8611 the user requests code with a trap precision of functions or
8614 In naive mode, when the user requests a trap-precision of
8615 "instruction", a trapb is needed after every instruction that may
8616 generate a trap. This ensures that the code is resumption safe but
8619 When optimizations are turned on, we delay issuing a trapb as long
8620 as possible. In this context, a trap shadow is the sequence of
8621 instructions that starts with a (potentially) trap generating
8622 instruction and extends to the next trapb or call_pal instruction
8623 (but GCC never generates call_pal by itself). We can delay (and
8624 therefore sometimes omit) a trapb subject to the following
8627 (a) On entry to the trap shadow, if any Alpha register or memory
8628 location contains a value that is used as an operand value by some
8629 instruction in the trap shadow (live on entry), then no instruction
8630 in the trap shadow may modify the register or memory location.
8632 (b) Within the trap shadow, the computation of the base register
8633 for a memory load or store instruction may not involve using the
8634 result of an instruction that might generate an UNPREDICTABLE
8637 (c) Within the trap shadow, no register may be used more than once
8638 as a destination register. (This is to make life easier for the
8641 (d) The trap shadow may not include any branch instructions. */
8644 alpha_handle_trap_shadows (void)
8646 struct shadow_summary shadow
;
8647 int trap_pending
, exception_nesting
;
8651 exception_nesting
= 0;
8654 shadow
.used
.mem
= 0;
8655 shadow
.defd
= shadow
.used
;
8657 for (i
= get_insns (); i
; i
= NEXT_INSN (i
))
8659 if (GET_CODE (i
) == NOTE
)
8661 switch (NOTE_LINE_NUMBER (i
))
8663 case NOTE_INSN_EH_REGION_BEG
:
8664 exception_nesting
++;
8669 case NOTE_INSN_EH_REGION_END
:
8670 exception_nesting
--;
8675 case NOTE_INSN_EPILOGUE_BEG
:
8676 if (trap_pending
&& alpha_tp
>= ALPHA_TP_FUNC
)
8681 else if (trap_pending
)
8683 if (alpha_tp
== ALPHA_TP_FUNC
)
8685 if (GET_CODE (i
) == JUMP_INSN
8686 && GET_CODE (PATTERN (i
)) == RETURN
)
8689 else if (alpha_tp
== ALPHA_TP_INSN
)
8693 struct shadow_summary sum
;
8698 sum
.defd
= sum
.used
;
8700 switch (GET_CODE (i
))
8703 /* Annoyingly, get_attr_trap will die on these. */
8704 if (GET_CODE (PATTERN (i
)) == USE
8705 || GET_CODE (PATTERN (i
)) == CLOBBER
)
8708 summarize_insn (PATTERN (i
), &sum
, 0);
8710 if ((sum
.defd
.i
& shadow
.defd
.i
)
8711 || (sum
.defd
.fp
& shadow
.defd
.fp
))
8713 /* (c) would be violated */
8717 /* Combine shadow with summary of current insn: */
8718 shadow
.used
.i
|= sum
.used
.i
;
8719 shadow
.used
.fp
|= sum
.used
.fp
;
8720 shadow
.used
.mem
|= sum
.used
.mem
;
8721 shadow
.defd
.i
|= sum
.defd
.i
;
8722 shadow
.defd
.fp
|= sum
.defd
.fp
;
8723 shadow
.defd
.mem
|= sum
.defd
.mem
;
8725 if ((sum
.defd
.i
& shadow
.used
.i
)
8726 || (sum
.defd
.fp
& shadow
.used
.fp
)
8727 || (sum
.defd
.mem
& shadow
.used
.mem
))
8729 /* (a) would be violated (also takes care of (b)) */
8730 gcc_assert (get_attr_trap (i
) != TRAP_YES
8731 || (!(sum
.defd
.i
& sum
.used
.i
)
8732 && !(sum
.defd
.fp
& sum
.used
.fp
)));
8750 n
= emit_insn_before (gen_trapb (), i
);
8751 PUT_MODE (n
, TImode
);
8752 PUT_MODE (i
, TImode
);
8756 shadow
.used
.mem
= 0;
8757 shadow
.defd
= shadow
.used
;
8762 if ((exception_nesting
> 0 || alpha_tp
>= ALPHA_TP_FUNC
)
8763 && GET_CODE (i
) == INSN
8764 && GET_CODE (PATTERN (i
)) != USE
8765 && GET_CODE (PATTERN (i
)) != CLOBBER
8766 && get_attr_trap (i
) == TRAP_YES
)
8768 if (optimize
&& !trap_pending
)
8769 summarize_insn (PATTERN (i
), &shadow
, 0);
8775 /* Alpha can only issue instruction groups simultaneously if they are
8776 suitably aligned. This is very processor-specific. */
8777 /* There are a number of entries in alphaev4_insn_pipe and alphaev5_insn_pipe
8778 that are marked "fake". These instructions do not exist on that target,
8779 but it is possible to see these insns with deranged combinations of
8780 command-line options, such as "-mtune=ev4 -mmax". Instead of aborting,
8781 choose a result at random. */
8783 enum alphaev4_pipe
{
8790 enum alphaev5_pipe
{
8801 static enum alphaev4_pipe
8802 alphaev4_insn_pipe (rtx insn
)
8804 if (recog_memoized (insn
) < 0)
8806 if (get_attr_length (insn
) != 4)
8809 switch (get_attr_type (insn
))
8825 case TYPE_MVI
: /* fake */
8840 case TYPE_FSQRT
: /* fake */
8841 case TYPE_FTOI
: /* fake */
8842 case TYPE_ITOF
: /* fake */
8850 static enum alphaev5_pipe
8851 alphaev5_insn_pipe (rtx insn
)
8853 if (recog_memoized (insn
) < 0)
8855 if (get_attr_length (insn
) != 4)
8858 switch (get_attr_type (insn
))
8878 case TYPE_FTOI
: /* fake */
8879 case TYPE_ITOF
: /* fake */
8894 case TYPE_FSQRT
: /* fake */
8905 /* IN_USE is a mask of the slots currently filled within the insn group.
8906 The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
8907 the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
8909 LEN is, of course, the length of the group in bytes. */
8912 alphaev4_next_group (rtx insn
, int *pin_use
, int *plen
)
8919 || GET_CODE (PATTERN (insn
)) == CLOBBER
8920 || GET_CODE (PATTERN (insn
)) == USE
)
8925 enum alphaev4_pipe pipe
;
8927 pipe
= alphaev4_insn_pipe (insn
);
8931 /* Force complex instructions to start new groups. */
8935 /* If this is a completely unrecognized insn, it's an asm.
8936 We don't know how long it is, so record length as -1 to
8937 signal a needed realignment. */
8938 if (recog_memoized (insn
) < 0)
8941 len
= get_attr_length (insn
);
8945 if (in_use
& EV4_IB0
)
8947 if (in_use
& EV4_IB1
)
8952 in_use
|= EV4_IB0
| EV4_IBX
;
8956 if (in_use
& EV4_IB0
)
8958 if (!(in_use
& EV4_IBX
) || (in_use
& EV4_IB1
))
8966 if (in_use
& EV4_IB1
)
8976 /* Haifa doesn't do well scheduling branches. */
8977 if (GET_CODE (insn
) == JUMP_INSN
)
8981 insn
= next_nonnote_insn (insn
);
8983 if (!insn
|| ! INSN_P (insn
))
8986 /* Let Haifa tell us where it thinks insn group boundaries are. */
8987 if (GET_MODE (insn
) == TImode
)
8990 if (GET_CODE (insn
) == CLOBBER
|| GET_CODE (insn
) == USE
)
8995 insn
= next_nonnote_insn (insn
);
9003 /* IN_USE is a mask of the slots currently filled within the insn group.
9004 The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
9005 the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
9007 LEN is, of course, the length of the group in bytes. */
9010 alphaev5_next_group (rtx insn
, int *pin_use
, int *plen
)
9017 || GET_CODE (PATTERN (insn
)) == CLOBBER
9018 || GET_CODE (PATTERN (insn
)) == USE
)
9023 enum alphaev5_pipe pipe
;
9025 pipe
= alphaev5_insn_pipe (insn
);
9029 /* Force complex instructions to start new groups. */
9033 /* If this is a completely unrecognized insn, it's an asm.
9034 We don't know how long it is, so record length as -1 to
9035 signal a needed realignment. */
9036 if (recog_memoized (insn
) < 0)
9039 len
= get_attr_length (insn
);
9042 /* ??? Most of the places below, we would like to assert never
9043 happen, as it would indicate an error either in Haifa, or
9044 in the scheduling description. Unfortunately, Haifa never
9045 schedules the last instruction of the BB, so we don't have
9046 an accurate TI bit to go off. */
9048 if (in_use
& EV5_E0
)
9050 if (in_use
& EV5_E1
)
9055 in_use
|= EV5_E0
| EV5_E01
;
9059 if (in_use
& EV5_E0
)
9061 if (!(in_use
& EV5_E01
) || (in_use
& EV5_E1
))
9069 if (in_use
& EV5_E1
)
9075 if (in_use
& EV5_FA
)
9077 if (in_use
& EV5_FM
)
9082 in_use
|= EV5_FA
| EV5_FAM
;
9086 if (in_use
& EV5_FA
)
9092 if (in_use
& EV5_FM
)
9105 /* Haifa doesn't do well scheduling branches. */
9106 /* ??? If this is predicted not-taken, slotting continues, except
9107 that no more IBR, FBR, or JSR insns may be slotted. */
9108 if (GET_CODE (insn
) == JUMP_INSN
)
9112 insn
= next_nonnote_insn (insn
);
9114 if (!insn
|| ! INSN_P (insn
))
9117 /* Let Haifa tell us where it thinks insn group boundaries are. */
9118 if (GET_MODE (insn
) == TImode
)
9121 if (GET_CODE (insn
) == CLOBBER
|| GET_CODE (insn
) == USE
)
9126 insn
= next_nonnote_insn (insn
);
9135 alphaev4_next_nop (int *pin_use
)
9137 int in_use
= *pin_use
;
9140 if (!(in_use
& EV4_IB0
))
9145 else if ((in_use
& (EV4_IBX
|EV4_IB1
)) == EV4_IBX
)
9150 else if (TARGET_FP
&& !(in_use
& EV4_IB1
))
9163 alphaev5_next_nop (int *pin_use
)
9165 int in_use
= *pin_use
;
9168 if (!(in_use
& EV5_E1
))
9173 else if (TARGET_FP
&& !(in_use
& EV5_FA
))
9178 else if (TARGET_FP
&& !(in_use
& EV5_FM
))
9190 /* The instruction group alignment main loop. */
9193 alpha_align_insns (unsigned int max_align
,
9194 rtx (*next_group
) (rtx
, int *, int *),
9195 rtx (*next_nop
) (int *))
9197 /* ALIGN is the known alignment for the insn group. */
9199 /* OFS is the offset of the current insn in the insn group. */
9201 int prev_in_use
, in_use
, len
, ldgp
;
9204 /* Let shorten branches care for assigning alignments to code labels. */
9205 shorten_branches (get_insns ());
9207 if (align_functions
< 4)
9209 else if ((unsigned int) align_functions
< max_align
)
9210 align
= align_functions
;
9214 ofs
= prev_in_use
= 0;
9216 if (GET_CODE (i
) == NOTE
)
9217 i
= next_nonnote_insn (i
);
9219 ldgp
= alpha_function_needs_gp
? 8 : 0;
9223 next
= (*next_group
) (i
, &in_use
, &len
);
9225 /* When we see a label, resync alignment etc. */
9226 if (GET_CODE (i
) == CODE_LABEL
)
9228 unsigned int new_align
= 1 << label_to_alignment (i
);
9230 if (new_align
>= align
)
9232 align
= new_align
< max_align
? new_align
: max_align
;
9236 else if (ofs
& (new_align
-1))
9237 ofs
= (ofs
| (new_align
-1)) + 1;
9241 /* Handle complex instructions special. */
9242 else if (in_use
== 0)
9244 /* Asms will have length < 0. This is a signal that we have
9245 lost alignment knowledge. Assume, however, that the asm
9246 will not mis-align instructions. */
9255 /* If the known alignment is smaller than the recognized insn group,
9256 realign the output. */
9257 else if ((int) align
< len
)
9259 unsigned int new_log_align
= len
> 8 ? 4 : 3;
9262 where
= prev
= prev_nonnote_insn (i
);
9263 if (!where
|| GET_CODE (where
) != CODE_LABEL
)
9266 /* Can't realign between a call and its gp reload. */
9267 if (! (TARGET_EXPLICIT_RELOCS
9268 && prev
&& GET_CODE (prev
) == CALL_INSN
))
9270 emit_insn_before (gen_realign (GEN_INT (new_log_align
)), where
);
9271 align
= 1 << new_log_align
;
9276 /* We may not insert padding inside the initial ldgp sequence. */
9280 /* If the group won't fit in the same INT16 as the previous,
9281 we need to add padding to keep the group together. Rather
9282 than simply leaving the insn filling to the assembler, we
9283 can make use of the knowledge of what sorts of instructions
9284 were issued in the previous group to make sure that all of
9285 the added nops are really free. */
9286 else if (ofs
+ len
> (int) align
)
9288 int nop_count
= (align
- ofs
) / 4;
9291 /* Insert nops before labels, branches, and calls to truly merge
9292 the execution of the nops with the previous instruction group. */
9293 where
= prev_nonnote_insn (i
);
9296 if (GET_CODE (where
) == CODE_LABEL
)
9298 rtx where2
= prev_nonnote_insn (where
);
9299 if (where2
&& GET_CODE (where2
) == JUMP_INSN
)
9302 else if (GET_CODE (where
) == INSN
)
9309 emit_insn_before ((*next_nop
)(&prev_in_use
), where
);
9310 while (--nop_count
);
9314 ofs
= (ofs
+ len
) & (align
- 1);
9315 prev_in_use
= in_use
;
9320 /* Machine dependent reorg pass. */
9325 if (alpha_tp
!= ALPHA_TP_PROG
|| flag_exceptions
)
9326 alpha_handle_trap_shadows ();
9328 /* Due to the number of extra trapb insns, don't bother fixing up
9329 alignment when trap precision is instruction. Moreover, we can
9330 only do our job when sched2 is run. */
9331 if (optimize
&& !optimize_size
9332 && alpha_tp
!= ALPHA_TP_INSN
9333 && flag_schedule_insns_after_reload
)
9335 if (alpha_tune
== PROCESSOR_EV4
)
9336 alpha_align_insns (8, alphaev4_next_group
, alphaev4_next_nop
);
9337 else if (alpha_tune
== PROCESSOR_EV5
)
9338 alpha_align_insns (16, alphaev5_next_group
, alphaev5_next_nop
);
9342 #if !TARGET_ABI_UNICOSMK
9349 alpha_file_start (void)
9351 #ifdef OBJECT_FORMAT_ELF
9352 /* If emitting dwarf2 debug information, we cannot generate a .file
9353 directive to start the file, as it will conflict with dwarf2out
9354 file numbers. So it's only useful when emitting mdebug output. */
9355 targetm
.file_start_file_directive
= (write_symbols
== DBX_DEBUG
);
9358 default_file_start ();
9360 fprintf (asm_out_file
, "\t.verstamp %d %d\n", MS_STAMP
, LS_STAMP
);
9363 fputs ("\t.set noreorder\n", asm_out_file
);
9364 fputs ("\t.set volatile\n", asm_out_file
);
9365 if (!TARGET_ABI_OPEN_VMS
)
9366 fputs ("\t.set noat\n", asm_out_file
);
9367 if (TARGET_EXPLICIT_RELOCS
)
9368 fputs ("\t.set nomacro\n", asm_out_file
);
9369 if (TARGET_SUPPORT_ARCH
| TARGET_BWX
| TARGET_MAX
| TARGET_FIX
| TARGET_CIX
)
9373 if (alpha_cpu
== PROCESSOR_EV6
|| TARGET_FIX
|| TARGET_CIX
)
9375 else if (TARGET_MAX
)
9377 else if (TARGET_BWX
)
9379 else if (alpha_cpu
== PROCESSOR_EV5
)
9384 fprintf (asm_out_file
, "\t.arch %s\n", arch
);
9389 #ifdef OBJECT_FORMAT_ELF
9391 /* Return a section for X. The only special thing we do here is to
9392 honor small data. */
9395 alpha_elf_select_rtx_section (enum machine_mode mode
, rtx x
,
9396 unsigned HOST_WIDE_INT align
)
9398 if (TARGET_SMALL_DATA
&& GET_MODE_SIZE (mode
) <= g_switch_value
)
9399 /* ??? Consider using mergeable sdata sections. */
9400 return sdata_section
;
9402 return default_elf_select_rtx_section (mode
, x
, align
);
9405 #endif /* OBJECT_FORMAT_ELF */
9407 /* Structure to collect function names for final output in link section. */
9408 /* Note that items marked with GTY can't be ifdef'ed out. */
9410 enum links_kind
{KIND_UNUSED
, KIND_LOCAL
, KIND_EXTERN
};
9411 enum reloc_kind
{KIND_LINKAGE
, KIND_CODEADDR
};
9413 struct alpha_links
GTY(())
9417 enum links_kind lkind
;
9418 enum reloc_kind rkind
;
9421 struct alpha_funcs
GTY(())
9424 splay_tree
GTY ((param1_is (char *), param2_is (struct alpha_links
*)))
9428 static GTY ((param1_is (char *), param2_is (struct alpha_links
*)))
9429 splay_tree alpha_links_tree
;
9430 static GTY ((param1_is (tree
), param2_is (struct alpha_funcs
*)))
9431 splay_tree alpha_funcs_tree
;
9433 static GTY(()) int alpha_funcs_num
;
9435 #if TARGET_ABI_OPEN_VMS
9437 /* Return the VMS argument type corresponding to MODE. */
9440 alpha_arg_type (enum machine_mode mode
)
9445 return TARGET_FLOAT_VAX
? FF
: FS
;
9447 return TARGET_FLOAT_VAX
? FD
: FT
;
9453 /* Return an rtx for an integer representing the VMS Argument Information
9457 alpha_arg_info_reg_val (CUMULATIVE_ARGS cum
)
9459 unsigned HOST_WIDE_INT regval
= cum
.num_args
;
9462 for (i
= 0; i
< 6; i
++)
9463 regval
|= ((int) cum
.atypes
[i
]) << (i
* 3 + 8);
9465 return GEN_INT (regval
);
9468 /* Make (or fake) .linkage entry for function call.
9470 IS_LOCAL is 0 if name is used in call, 1 if name is used in definition.
9472 Return an SYMBOL_REF rtx for the linkage. */
9475 alpha_need_linkage (const char *name
, int is_local
)
9477 splay_tree_node node
;
9478 struct alpha_links
*al
;
9485 struct alpha_funcs
*cfaf
;
9487 if (!alpha_funcs_tree
)
9488 alpha_funcs_tree
= splay_tree_new_ggc ((splay_tree_compare_fn
)
9489 splay_tree_compare_pointers
);
9491 cfaf
= (struct alpha_funcs
*) ggc_alloc (sizeof (struct alpha_funcs
));
9494 cfaf
->num
= ++alpha_funcs_num
;
9496 splay_tree_insert (alpha_funcs_tree
,
9497 (splay_tree_key
) current_function_decl
,
9498 (splay_tree_value
) cfaf
);
9501 if (alpha_links_tree
)
9503 /* Is this name already defined? */
9505 node
= splay_tree_lookup (alpha_links_tree
, (splay_tree_key
) name
);
9508 al
= (struct alpha_links
*) node
->value
;
9511 /* Defined here but external assumed. */
9512 if (al
->lkind
== KIND_EXTERN
)
9513 al
->lkind
= KIND_LOCAL
;
9517 /* Used here but unused assumed. */
9518 if (al
->lkind
== KIND_UNUSED
)
9519 al
->lkind
= KIND_LOCAL
;
9525 alpha_links_tree
= splay_tree_new_ggc ((splay_tree_compare_fn
) strcmp
);
9527 al
= (struct alpha_links
*) ggc_alloc (sizeof (struct alpha_links
));
9528 name
= ggc_strdup (name
);
9530 /* Assume external if no definition. */
9531 al
->lkind
= (is_local
? KIND_UNUSED
: KIND_EXTERN
);
9533 /* Ensure we have an IDENTIFIER so assemble_name can mark it used. */
9534 get_identifier (name
);
9536 /* Construct a SYMBOL_REF for us to call. */
9538 size_t name_len
= strlen (name
);
9539 char *linksym
= alloca (name_len
+ 6);
9541 memcpy (linksym
+ 1, name
, name_len
);
9542 memcpy (linksym
+ 1 + name_len
, "..lk", 5);
9543 al
->linkage
= gen_rtx_SYMBOL_REF (Pmode
,
9544 ggc_alloc_string (linksym
, name_len
+ 5));
9547 splay_tree_insert (alpha_links_tree
, (splay_tree_key
) name
,
9548 (splay_tree_value
) al
);
9554 alpha_use_linkage (rtx linkage
, tree cfundecl
, int lflag
, int rflag
)
9556 splay_tree_node cfunnode
;
9557 struct alpha_funcs
*cfaf
;
9558 struct alpha_links
*al
;
9559 const char *name
= XSTR (linkage
, 0);
9561 cfaf
= (struct alpha_funcs
*) 0;
9562 al
= (struct alpha_links
*) 0;
9564 cfunnode
= splay_tree_lookup (alpha_funcs_tree
, (splay_tree_key
) cfundecl
);
9565 cfaf
= (struct alpha_funcs
*) cfunnode
->value
;
9569 splay_tree_node lnode
;
9571 /* Is this name already defined? */
9573 lnode
= splay_tree_lookup (cfaf
->links
, (splay_tree_key
) name
);
9575 al
= (struct alpha_links
*) lnode
->value
;
9578 cfaf
->links
= splay_tree_new_ggc ((splay_tree_compare_fn
) strcmp
);
9586 splay_tree_node node
= 0;
9587 struct alpha_links
*anl
;
9592 name_len
= strlen (name
);
9594 al
= (struct alpha_links
*) ggc_alloc (sizeof (struct alpha_links
));
9595 al
->num
= cfaf
->num
;
9597 node
= splay_tree_lookup (alpha_links_tree
, (splay_tree_key
) name
);
9600 anl
= (struct alpha_links
*) node
->value
;
9601 al
->lkind
= anl
->lkind
;
9604 sprintf (buf
, "$%d..%s..lk", cfaf
->num
, name
);
9605 buflen
= strlen (buf
);
9606 linksym
= alloca (buflen
+ 1);
9607 memcpy (linksym
, buf
, buflen
+ 1);
9609 al
->linkage
= gen_rtx_SYMBOL_REF
9610 (Pmode
, ggc_alloc_string (linksym
, buflen
+ 1));
9612 splay_tree_insert (cfaf
->links
, (splay_tree_key
) name
,
9613 (splay_tree_value
) al
);
9617 al
->rkind
= KIND_CODEADDR
;
9619 al
->rkind
= KIND_LINKAGE
;
9622 return gen_rtx_MEM (Pmode
, plus_constant (al
->linkage
, 8));
9628 alpha_write_one_linkage (splay_tree_node node
, void *data
)
9630 const char *const name
= (const char *) node
->key
;
9631 struct alpha_links
*link
= (struct alpha_links
*) node
->value
;
9632 FILE *stream
= (FILE *) data
;
9634 fprintf (stream
, "$%d..%s..lk:\n", link
->num
, name
);
9635 if (link
->rkind
== KIND_CODEADDR
)
9637 if (link
->lkind
== KIND_LOCAL
)
9639 /* Local and used */
9640 fprintf (stream
, "\t.quad %s..en\n", name
);
9644 /* External and used, request code address. */
9645 fprintf (stream
, "\t.code_address %s\n", name
);
9650 if (link
->lkind
== KIND_LOCAL
)
9652 /* Local and used, build linkage pair. */
9653 fprintf (stream
, "\t.quad %s..en\n", name
);
9654 fprintf (stream
, "\t.quad %s\n", name
);
9658 /* External and used, request linkage pair. */
9659 fprintf (stream
, "\t.linkage %s\n", name
);
9667 alpha_write_linkage (FILE *stream
, const char *funname
, tree fundecl
)
9669 splay_tree_node node
;
9670 struct alpha_funcs
*func
;
9672 fprintf (stream
, "\t.link\n");
9673 fprintf (stream
, "\t.align 3\n");
9676 node
= splay_tree_lookup (alpha_funcs_tree
, (splay_tree_key
) fundecl
);
9677 func
= (struct alpha_funcs
*) node
->value
;
9679 fputs ("\t.name ", stream
);
9680 assemble_name (stream
, funname
);
9681 fputs ("..na\n", stream
);
9682 ASM_OUTPUT_LABEL (stream
, funname
);
9683 fprintf (stream
, "\t.pdesc ");
9684 assemble_name (stream
, funname
);
9685 fprintf (stream
, "..en,%s\n",
9686 alpha_procedure_type
== PT_STACK
? "stack"
9687 : alpha_procedure_type
== PT_REGISTER
? "reg" : "null");
9691 splay_tree_foreach (func
->links
, alpha_write_one_linkage
, stream
);
9692 /* splay_tree_delete (func->links); */
9696 /* Given a decl, a section name, and whether the decl initializer
9697 has relocs, choose attributes for the section. */
9699 #define SECTION_VMS_OVERLAY SECTION_FORGET
9700 #define SECTION_VMS_GLOBAL SECTION_MACH_DEP
9701 #define SECTION_VMS_INITIALIZE (SECTION_VMS_GLOBAL << 1)
9704 vms_section_type_flags (tree decl
, const char *name
, int reloc
)
9706 unsigned int flags
= default_section_type_flags (decl
, name
, reloc
);
9708 if (decl
&& DECL_ATTRIBUTES (decl
)
9709 && lookup_attribute ("overlaid", DECL_ATTRIBUTES (decl
)))
9710 flags
|= SECTION_VMS_OVERLAY
;
9711 if (decl
&& DECL_ATTRIBUTES (decl
)
9712 && lookup_attribute ("global", DECL_ATTRIBUTES (decl
)))
9713 flags
|= SECTION_VMS_GLOBAL
;
9714 if (decl
&& DECL_ATTRIBUTES (decl
)
9715 && lookup_attribute ("initialize", DECL_ATTRIBUTES (decl
)))
9716 flags
|= SECTION_VMS_INITIALIZE
;
9721 /* Switch to an arbitrary section NAME with attributes as specified
9722 by FLAGS. ALIGN specifies any known alignment requirements for
9723 the section; 0 if the default should be used. */
9726 vms_asm_named_section (const char *name
, unsigned int flags
,
9727 tree decl ATTRIBUTE_UNUSED
)
9729 fputc ('\n', asm_out_file
);
9730 fprintf (asm_out_file
, ".section\t%s", name
);
9732 if (flags
& SECTION_VMS_OVERLAY
)
9733 fprintf (asm_out_file
, ",OVR");
9734 if (flags
& SECTION_VMS_GLOBAL
)
9735 fprintf (asm_out_file
, ",GBL");
9736 if (flags
& SECTION_VMS_INITIALIZE
)
9737 fprintf (asm_out_file
, ",NOMOD");
9738 if (flags
& SECTION_DEBUG
)
9739 fprintf (asm_out_file
, ",NOWRT");
9741 fputc ('\n', asm_out_file
);
9744 /* Record an element in the table of global constructors. SYMBOL is
9745 a SYMBOL_REF of the function to be called; PRIORITY is a number
9746 between 0 and MAX_INIT_PRIORITY.
9748 Differs from default_ctors_section_asm_out_constructor in that the
9749 width of the .ctors entry is always 64 bits, rather than the 32 bits
9750 used by a normal pointer. */
9753 vms_asm_out_constructor (rtx symbol
, int priority ATTRIBUTE_UNUSED
)
9755 switch_to_section (ctors_section
);
9756 assemble_align (BITS_PER_WORD
);
9757 assemble_integer (symbol
, UNITS_PER_WORD
, BITS_PER_WORD
, 1);
9761 vms_asm_out_destructor (rtx symbol
, int priority ATTRIBUTE_UNUSED
)
9763 switch_to_section (dtors_section
);
9764 assemble_align (BITS_PER_WORD
);
9765 assemble_integer (symbol
, UNITS_PER_WORD
, BITS_PER_WORD
, 1);
9770 alpha_need_linkage (const char *name ATTRIBUTE_UNUSED
,
9771 int is_local ATTRIBUTE_UNUSED
)
9777 alpha_use_linkage (rtx linkage ATTRIBUTE_UNUSED
,
9778 tree cfundecl ATTRIBUTE_UNUSED
,
9779 int lflag ATTRIBUTE_UNUSED
,
9780 int rflag ATTRIBUTE_UNUSED
)
9785 #endif /* TARGET_ABI_OPEN_VMS */
9787 #if TARGET_ABI_UNICOSMK
9789 /* This evaluates to true if we do not know how to pass TYPE solely in
9790 registers. This is the case for all arguments that do not fit in two
9794 unicosmk_must_pass_in_stack (enum machine_mode mode
, tree type
)
9799 if (TREE_CODE (TYPE_SIZE (type
)) != INTEGER_CST
)
9801 if (TREE_ADDRESSABLE (type
))
9804 return ALPHA_ARG_SIZE (mode
, type
, 0) > 2;
9807 /* Define the offset between two registers, one to be eliminated, and the
9808 other its replacement, at the start of a routine. */
9811 unicosmk_initial_elimination_offset (int from
, int to
)
9815 fixed_size
= alpha_sa_size();
9816 if (fixed_size
!= 0)
9819 if (from
== FRAME_POINTER_REGNUM
&& to
== HARD_FRAME_POINTER_REGNUM
)
9821 else if (from
== ARG_POINTER_REGNUM
&& to
== HARD_FRAME_POINTER_REGNUM
)
9823 else if (from
== FRAME_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
9824 return (ALPHA_ROUND (current_function_outgoing_args_size
)
9825 + ALPHA_ROUND (get_frame_size()));
9826 else if (from
== ARG_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
9827 return (ALPHA_ROUND (fixed_size
)
9828 + ALPHA_ROUND (get_frame_size()
9829 + current_function_outgoing_args_size
));
9834 /* Output the module name for .ident and .end directives. We have to strip
9835 directories and add make sure that the module name starts with a letter
9839 unicosmk_output_module_name (FILE *file
)
9841 const char *name
= lbasename (main_input_filename
);
9842 unsigned len
= strlen (name
);
9843 char *clean_name
= alloca (len
+ 2);
9844 char *ptr
= clean_name
;
9846 /* CAM only accepts module names that start with a letter or '$'. We
9847 prefix the module name with a '$' if necessary. */
9849 if (!ISALPHA (*name
))
9851 memcpy (ptr
, name
, len
+ 1);
9852 clean_symbol_name (clean_name
);
9853 fputs (clean_name
, file
);
9856 /* Output the definition of a common variable. */
9859 unicosmk_output_common (FILE *file
, const char *name
, int size
, int align
)
9862 printf ("T3E__: common %s\n", name
);
9865 fputs("\t.endp\n\n\t.psect ", file
);
9866 assemble_name(file
, name
);
9867 fprintf(file
, ",%d,common\n", floor_log2 (align
/ BITS_PER_UNIT
));
9868 fprintf(file
, "\t.byte\t0:%d\n", size
);
9870 /* Mark the symbol as defined in this module. */
9871 name_tree
= get_identifier (name
);
9872 TREE_ASM_WRITTEN (name_tree
) = 1;
9875 #define SECTION_PUBLIC SECTION_MACH_DEP
9876 #define SECTION_MAIN (SECTION_PUBLIC << 1)
9877 static int current_section_align
;
9879 /* A get_unnamed_section callback for switching to the text section. */
9882 unicosmk_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED
)
9884 static int count
= 0;
9885 fprintf (asm_out_file
, "\t.endp\n\n\t.psect\tgcc@text___%d,code\n", count
++);
9888 /* A get_unnamed_section callback for switching to the data section. */
9891 unicosmk_output_data_section_asm_op (const void *data ATTRIBUTE_UNUSED
)
9893 static int count
= 1;
9894 fprintf (asm_out_file
, "\t.endp\n\n\t.psect\tgcc@data___%d,data\n", count
++);
9897 /* Implement TARGET_ASM_INIT_SECTIONS.
9899 The Cray assembler is really weird with respect to sections. It has only
9900 named sections and you can't reopen a section once it has been closed.
9901 This means that we have to generate unique names whenever we want to
9902 reenter the text or the data section. */
9905 unicosmk_init_sections (void)
9907 text_section
= get_unnamed_section (SECTION_CODE
,
9908 unicosmk_output_text_section_asm_op
,
9910 data_section
= get_unnamed_section (SECTION_WRITE
,
9911 unicosmk_output_data_section_asm_op
,
9913 readonly_data_section
= data_section
;
9917 unicosmk_section_type_flags (tree decl
, const char *name
,
9918 int reloc ATTRIBUTE_UNUSED
)
9920 unsigned int flags
= default_section_type_flags (decl
, name
, reloc
);
9925 if (TREE_CODE (decl
) == FUNCTION_DECL
)
9927 current_section_align
= floor_log2 (FUNCTION_BOUNDARY
/ BITS_PER_UNIT
);
9928 if (align_functions_log
> current_section_align
)
9929 current_section_align
= align_functions_log
;
9931 if (! strcmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl
)), "main"))
9932 flags
|= SECTION_MAIN
;
9935 current_section_align
= floor_log2 (DECL_ALIGN (decl
) / BITS_PER_UNIT
);
9937 if (TREE_PUBLIC (decl
))
9938 flags
|= SECTION_PUBLIC
;
9943 /* Generate a section name for decl and associate it with the
9947 unicosmk_unique_section (tree decl
, int reloc ATTRIBUTE_UNUSED
)
9954 name
= IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl
));
9955 name
= default_strip_name_encoding (name
);
9956 len
= strlen (name
);
9958 if (TREE_CODE (decl
) == FUNCTION_DECL
)
9962 /* It is essential that we prefix the section name here because
9963 otherwise the section names generated for constructors and
9964 destructors confuse collect2. */
9966 string
= alloca (len
+ 6);
9967 sprintf (string
, "code@%s", name
);
9968 DECL_SECTION_NAME (decl
) = build_string (len
+ 5, string
);
9970 else if (TREE_PUBLIC (decl
))
9971 DECL_SECTION_NAME (decl
) = build_string (len
, name
);
9976 string
= alloca (len
+ 6);
9977 sprintf (string
, "data@%s", name
);
9978 DECL_SECTION_NAME (decl
) = build_string (len
+ 5, string
);
9982 /* Switch to an arbitrary section NAME with attributes as specified
9983 by FLAGS. ALIGN specifies any known alignment requirements for
9984 the section; 0 if the default should be used. */
9987 unicosmk_asm_named_section (const char *name
, unsigned int flags
,
9988 tree decl ATTRIBUTE_UNUSED
)
9992 /* Close the previous section. */
9994 fputs ("\t.endp\n\n", asm_out_file
);
9996 /* Find out what kind of section we are opening. */
9998 if (flags
& SECTION_MAIN
)
9999 fputs ("\t.start\tmain\n", asm_out_file
);
10001 if (flags
& SECTION_CODE
)
10003 else if (flags
& SECTION_PUBLIC
)
10008 if (current_section_align
!= 0)
10009 fprintf (asm_out_file
, "\t.psect\t%s,%d,%s\n", name
,
10010 current_section_align
, kind
);
10012 fprintf (asm_out_file
, "\t.psect\t%s,%s\n", name
, kind
);
10016 unicosmk_insert_attributes (tree decl
, tree
*attr_ptr ATTRIBUTE_UNUSED
)
10019 && (TREE_PUBLIC (decl
) || TREE_CODE (decl
) == FUNCTION_DECL
))
10020 unicosmk_unique_section (decl
, 0);
10023 /* Output an alignment directive. We have to use the macro 'gcc@code@align'
10024 in code sections because .align fill unused space with zeroes. */
10027 unicosmk_output_align (FILE *file
, int align
)
10029 if (inside_function
)
10030 fprintf (file
, "\tgcc@code@align\t%d\n", align
);
10032 fprintf (file
, "\t.align\t%d\n", align
);
10035 /* Add a case vector to the current function's list of deferred case
10036 vectors. Case vectors have to be put into a separate section because CAM
10037 does not allow data definitions in code sections. */
10040 unicosmk_defer_case_vector (rtx lab
, rtx vec
)
10042 struct machine_function
*machine
= cfun
->machine
;
10044 vec
= gen_rtx_EXPR_LIST (VOIDmode
, lab
, vec
);
10045 machine
->addr_list
= gen_rtx_EXPR_LIST (VOIDmode
, vec
,
10046 machine
->addr_list
);
10049 /* Output a case vector. */
10052 unicosmk_output_addr_vec (FILE *file
, rtx vec
)
10054 rtx lab
= XEXP (vec
, 0);
10055 rtx body
= XEXP (vec
, 1);
10056 int vlen
= XVECLEN (body
, 0);
10059 (*targetm
.asm_out
.internal_label
) (file
, "L", CODE_LABEL_NUMBER (lab
));
10061 for (idx
= 0; idx
< vlen
; idx
++)
10063 ASM_OUTPUT_ADDR_VEC_ELT
10064 (file
, CODE_LABEL_NUMBER (XEXP (XVECEXP (body
, 0, idx
), 0)));
10068 /* Output current function's deferred case vectors. */
10071 unicosmk_output_deferred_case_vectors (FILE *file
)
10073 struct machine_function
*machine
= cfun
->machine
;
10076 if (machine
->addr_list
== NULL_RTX
)
10079 switch_to_section (data_section
);
10080 for (t
= machine
->addr_list
; t
; t
= XEXP (t
, 1))
10081 unicosmk_output_addr_vec (file
, XEXP (t
, 0));
10084 /* Generate the name of the SSIB section for the current function. */
10086 #define SSIB_PREFIX "__SSIB_"
10087 #define SSIB_PREFIX_LEN 7
10089 static const char *
10090 unicosmk_ssib_name (void)
10092 /* This is ok since CAM won't be able to deal with names longer than that
10095 static char name
[256];
10098 const char *fnname
;
10101 x
= DECL_RTL (cfun
->decl
);
10102 gcc_assert (GET_CODE (x
) == MEM
);
10104 gcc_assert (GET_CODE (x
) == SYMBOL_REF
);
10105 fnname
= XSTR (x
, 0);
10107 len
= strlen (fnname
);
10108 if (len
+ SSIB_PREFIX_LEN
> 255)
10109 len
= 255 - SSIB_PREFIX_LEN
;
10111 strcpy (name
, SSIB_PREFIX
);
10112 strncpy (name
+ SSIB_PREFIX_LEN
, fnname
, len
);
10113 name
[len
+ SSIB_PREFIX_LEN
] = 0;
10118 /* Set up the dynamic subprogram information block (DSIB) and update the
10119 frame pointer register ($15) for subroutines which have a frame. If the
10120 subroutine doesn't have a frame, simply increment $15. */
10123 unicosmk_gen_dsib (unsigned long *imaskP
)
10125 if (alpha_procedure_type
== PT_STACK
)
10127 const char *ssib_name
;
10130 /* Allocate 64 bytes for the DSIB. */
10132 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx
, stack_pointer_rtx
,
10134 emit_insn (gen_blockage ());
10136 /* Save the return address. */
10138 mem
= gen_rtx_MEM (DImode
, plus_constant (stack_pointer_rtx
, 56));
10139 set_mem_alias_set (mem
, alpha_sr_alias_set
);
10140 FRP (emit_move_insn (mem
, gen_rtx_REG (DImode
, REG_RA
)));
10141 (*imaskP
) &= ~(1UL << REG_RA
);
10143 /* Save the old frame pointer. */
10145 mem
= gen_rtx_MEM (DImode
, plus_constant (stack_pointer_rtx
, 48));
10146 set_mem_alias_set (mem
, alpha_sr_alias_set
);
10147 FRP (emit_move_insn (mem
, hard_frame_pointer_rtx
));
10148 (*imaskP
) &= ~(1UL << HARD_FRAME_POINTER_REGNUM
);
10150 emit_insn (gen_blockage ());
10152 /* Store the SSIB pointer. */
10154 ssib_name
= ggc_strdup (unicosmk_ssib_name ());
10155 mem
= gen_rtx_MEM (DImode
, plus_constant (stack_pointer_rtx
, 32));
10156 set_mem_alias_set (mem
, alpha_sr_alias_set
);
10158 FRP (emit_move_insn (gen_rtx_REG (DImode
, 5),
10159 gen_rtx_SYMBOL_REF (Pmode
, ssib_name
)));
10160 FRP (emit_move_insn (mem
, gen_rtx_REG (DImode
, 5)));
10162 /* Save the CIW index. */
10164 mem
= gen_rtx_MEM (DImode
, plus_constant (stack_pointer_rtx
, 24));
10165 set_mem_alias_set (mem
, alpha_sr_alias_set
);
10166 FRP (emit_move_insn (mem
, gen_rtx_REG (DImode
, 25)));
10168 emit_insn (gen_blockage ());
10170 /* Set the new frame pointer. */
10172 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx
,
10173 stack_pointer_rtx
, GEN_INT (64))));
10178 /* Increment the frame pointer register to indicate that we do not
10181 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx
,
10182 hard_frame_pointer_rtx
, const1_rtx
)));
10186 /* Output the static subroutine information block for the current
10190 unicosmk_output_ssib (FILE *file
, const char *fnname
)
10196 struct machine_function
*machine
= cfun
->machine
;
10199 fprintf (file
, "\t.endp\n\n\t.psect\t%s%s,data\n", user_label_prefix
,
10200 unicosmk_ssib_name ());
10202 /* Some required stuff and the function name length. */
10204 len
= strlen (fnname
);
10205 fprintf (file
, "\t.quad\t^X20008%2.2X28\n", len
);
10208 ??? We don't do that yet. */
10210 fputs ("\t.quad\t0\n", file
);
10212 /* Function address. */
10214 fputs ("\t.quad\t", file
);
10215 assemble_name (file
, fnname
);
10218 fputs ("\t.quad\t0\n", file
);
10219 fputs ("\t.quad\t0\n", file
);
10222 ??? We do it the same way Cray CC does it but this could be
10225 for( i
= 0; i
< len
; i
++ )
10226 fprintf (file
, "\t.byte\t%d\n", (int)(fnname
[i
]));
10227 if( (len
% 8) == 0 )
10228 fputs ("\t.quad\t0\n", file
);
10230 fprintf (file
, "\t.bits\t%d : 0\n", (8 - (len
% 8))*8);
10232 /* All call information words used in the function. */
10234 for (x
= machine
->first_ciw
; x
; x
= XEXP (x
, 1))
10237 #if HOST_BITS_PER_WIDE_INT == 32
10238 fprintf (file
, "\t.quad\t" HOST_WIDE_INT_PRINT_DOUBLE_HEX
"\n",
10239 CONST_DOUBLE_HIGH (ciw
), CONST_DOUBLE_LOW (ciw
));
10241 fprintf (file
, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX
"\n", INTVAL (ciw
));
10246 /* Add a call information word (CIW) to the list of the current function's
10247 CIWs and return its index.
10249 X is a CONST_INT or CONST_DOUBLE representing the CIW. */
10252 unicosmk_add_call_info_word (rtx x
)
10255 struct machine_function
*machine
= cfun
->machine
;
10257 node
= gen_rtx_EXPR_LIST (VOIDmode
, x
, NULL_RTX
);
10258 if (machine
->first_ciw
== NULL_RTX
)
10259 machine
->first_ciw
= node
;
10261 XEXP (machine
->last_ciw
, 1) = node
;
10263 machine
->last_ciw
= node
;
10264 ++machine
->ciw_count
;
10266 return GEN_INT (machine
->ciw_count
10267 + strlen (current_function_name ())/8 + 5);
10270 /* The Cray assembler doesn't accept extern declarations for symbols which
10271 are defined in the same file. We have to keep track of all global
10272 symbols which are referenced and/or defined in a source file and output
10273 extern declarations for those which are referenced but not defined at
10274 the end of file. */
10276 /* List of identifiers for which an extern declaration might have to be
10278 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10280 struct unicosmk_extern_list
10282 struct unicosmk_extern_list
*next
;
10286 static struct unicosmk_extern_list
*unicosmk_extern_head
= 0;
10288 /* Output extern declarations which are required for every asm file. */
10291 unicosmk_output_default_externs (FILE *file
)
10293 static const char *const externs
[] =
10294 { "__T3E_MISMATCH" };
10299 n
= ARRAY_SIZE (externs
);
10301 for (i
= 0; i
< n
; i
++)
10302 fprintf (file
, "\t.extern\t%s\n", externs
[i
]);
10305 /* Output extern declarations for global symbols which are have been
10306 referenced but not defined. */
10309 unicosmk_output_externs (FILE *file
)
10311 struct unicosmk_extern_list
*p
;
10312 const char *real_name
;
10316 len
= strlen (user_label_prefix
);
10317 for (p
= unicosmk_extern_head
; p
!= 0; p
= p
->next
)
10319 /* We have to strip the encoding and possibly remove user_label_prefix
10320 from the identifier in order to handle -fleading-underscore and
10321 explicit asm names correctly (cf. gcc.dg/asm-names-1.c). */
10322 real_name
= default_strip_name_encoding (p
->name
);
10323 if (len
&& p
->name
[0] == '*'
10324 && !memcmp (real_name
, user_label_prefix
, len
))
10327 name_tree
= get_identifier (real_name
);
10328 if (! TREE_ASM_WRITTEN (name_tree
))
10330 TREE_ASM_WRITTEN (name_tree
) = 1;
10331 fputs ("\t.extern\t", file
);
10332 assemble_name (file
, p
->name
);
10338 /* Record an extern. */
10341 unicosmk_add_extern (const char *name
)
10343 struct unicosmk_extern_list
*p
;
10345 p
= (struct unicosmk_extern_list
*)
10346 xmalloc (sizeof (struct unicosmk_extern_list
));
10347 p
->next
= unicosmk_extern_head
;
10349 unicosmk_extern_head
= p
;
10352 /* The Cray assembler generates incorrect code if identifiers which
10353 conflict with register names are used as instruction operands. We have
10354 to replace such identifiers with DEX expressions. */
10356 /* Structure to collect identifiers which have been replaced by DEX
10358 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10360 struct unicosmk_dex
{
10361 struct unicosmk_dex
*next
;
10365 /* List of identifiers which have been replaced by DEX expressions. The DEX
10366 number is determined by the position in the list. */
10368 static struct unicosmk_dex
*unicosmk_dex_list
= NULL
;
10370 /* The number of elements in the DEX list. */
10372 static int unicosmk_dex_count
= 0;
10374 /* Check if NAME must be replaced by a DEX expression. */
10377 unicosmk_special_name (const char *name
)
10379 if (name
[0] == '*')
10382 if (name
[0] == '$')
10385 if (name
[0] != 'r' && name
[0] != 'f' && name
[0] != 'R' && name
[0] != 'F')
10390 case '1': case '2':
10391 return (name
[2] == '\0' || (ISDIGIT (name
[2]) && name
[3] == '\0'));
10394 return (name
[2] == '\0'
10395 || ((name
[2] == '0' || name
[2] == '1') && name
[3] == '\0'));
10398 return (ISDIGIT (name
[1]) && name
[2] == '\0');
10402 /* Return the DEX number if X must be replaced by a DEX expression and 0
10406 unicosmk_need_dex (rtx x
)
10408 struct unicosmk_dex
*dex
;
10412 if (GET_CODE (x
) != SYMBOL_REF
)
10416 if (! unicosmk_special_name (name
))
10419 i
= unicosmk_dex_count
;
10420 for (dex
= unicosmk_dex_list
; dex
; dex
= dex
->next
)
10422 if (! strcmp (name
, dex
->name
))
10427 dex
= (struct unicosmk_dex
*) xmalloc (sizeof (struct unicosmk_dex
));
10429 dex
->next
= unicosmk_dex_list
;
10430 unicosmk_dex_list
= dex
;
10432 ++unicosmk_dex_count
;
10433 return unicosmk_dex_count
;
10436 /* Output the DEX definitions for this file. */
10439 unicosmk_output_dex (FILE *file
)
10441 struct unicosmk_dex
*dex
;
10444 if (unicosmk_dex_list
== NULL
)
10447 fprintf (file
, "\t.dexstart\n");
10449 i
= unicosmk_dex_count
;
10450 for (dex
= unicosmk_dex_list
; dex
; dex
= dex
->next
)
10452 fprintf (file
, "\tDEX (%d) = ", i
);
10453 assemble_name (file
, dex
->name
);
10458 fprintf (file
, "\t.dexend\n");
10461 /* Output text that to appear at the beginning of an assembler file. */
10464 unicosmk_file_start (void)
10468 fputs ("\t.ident\t", asm_out_file
);
10469 unicosmk_output_module_name (asm_out_file
);
10470 fputs ("\n\n", asm_out_file
);
10472 /* The Unicos/Mk assembler uses different register names. Instead of trying
10473 to support them, we simply use micro definitions. */
10475 /* CAM has different register names: rN for the integer register N and fN
10476 for the floating-point register N. Instead of trying to use these in
10477 alpha.md, we define the symbols $N and $fN to refer to the appropriate
10480 for (i
= 0; i
< 32; ++i
)
10481 fprintf (asm_out_file
, "$%d <- r%d\n", i
, i
);
10483 for (i
= 0; i
< 32; ++i
)
10484 fprintf (asm_out_file
, "$f%d <- f%d\n", i
, i
);
10486 putc ('\n', asm_out_file
);
10488 /* The .align directive fill unused space with zeroes which does not work
10489 in code sections. We define the macro 'gcc@code@align' which uses nops
10490 instead. Note that it assumes that code sections always have the
10491 biggest possible alignment since . refers to the current offset from
10492 the beginning of the section. */
10494 fputs ("\t.macro gcc@code@align n\n", asm_out_file
);
10495 fputs ("gcc@n@bytes = 1 << n\n", asm_out_file
);
10496 fputs ("gcc@here = . % gcc@n@bytes\n", asm_out_file
);
10497 fputs ("\t.if ne, gcc@here, 0\n", asm_out_file
);
10498 fputs ("\t.repeat (gcc@n@bytes - gcc@here) / 4\n", asm_out_file
);
10499 fputs ("\tbis r31,r31,r31\n", asm_out_file
);
10500 fputs ("\t.endr\n", asm_out_file
);
10501 fputs ("\t.endif\n", asm_out_file
);
10502 fputs ("\t.endm gcc@code@align\n\n", asm_out_file
);
10504 /* Output extern declarations which should always be visible. */
10505 unicosmk_output_default_externs (asm_out_file
);
10507 /* Open a dummy section. We always need to be inside a section for the
10508 section-switching code to work correctly.
10509 ??? This should be a module id or something like that. I still have to
10510 figure out what the rules for those are. */
10511 fputs ("\n\t.psect\t$SG00000,data\n", asm_out_file
);
10514 /* Output text to appear at the end of an assembler file. This includes all
10515 pending extern declarations and DEX expressions. */
10518 unicosmk_file_end (void)
10520 fputs ("\t.endp\n\n", asm_out_file
);
10522 /* Output all pending externs. */
10524 unicosmk_output_externs (asm_out_file
);
10526 /* Output dex definitions used for functions whose names conflict with
10529 unicosmk_output_dex (asm_out_file
);
10531 fputs ("\t.end\t", asm_out_file
);
10532 unicosmk_output_module_name (asm_out_file
);
10533 putc ('\n', asm_out_file
);
10539 unicosmk_output_deferred_case_vectors (FILE *file ATTRIBUTE_UNUSED
)
10543 unicosmk_gen_dsib (unsigned long *imaskP ATTRIBUTE_UNUSED
)
10547 unicosmk_output_ssib (FILE * file ATTRIBUTE_UNUSED
,
10548 const char * fnname ATTRIBUTE_UNUSED
)
10552 unicosmk_add_call_info_word (rtx x ATTRIBUTE_UNUSED
)
10558 unicosmk_need_dex (rtx x ATTRIBUTE_UNUSED
)
10563 #endif /* TARGET_ABI_UNICOSMK */
10566 alpha_init_libfuncs (void)
10568 if (TARGET_ABI_UNICOSMK
)
10570 /* Prevent gcc from generating calls to __divsi3. */
10571 set_optab_libfunc (sdiv_optab
, SImode
, 0);
10572 set_optab_libfunc (udiv_optab
, SImode
, 0);
10574 /* Use the functions provided by the system library
10575 for DImode integer division. */
10576 set_optab_libfunc (sdiv_optab
, DImode
, "$sldiv");
10577 set_optab_libfunc (udiv_optab
, DImode
, "$uldiv");
10579 else if (TARGET_ABI_OPEN_VMS
)
10581 /* Use the VMS runtime library functions for division and
10583 set_optab_libfunc (sdiv_optab
, SImode
, "OTS$DIV_I");
10584 set_optab_libfunc (sdiv_optab
, DImode
, "OTS$DIV_L");
10585 set_optab_libfunc (udiv_optab
, SImode
, "OTS$DIV_UI");
10586 set_optab_libfunc (udiv_optab
, DImode
, "OTS$DIV_UL");
10587 set_optab_libfunc (smod_optab
, SImode
, "OTS$REM_I");
10588 set_optab_libfunc (smod_optab
, DImode
, "OTS$REM_L");
10589 set_optab_libfunc (umod_optab
, SImode
, "OTS$REM_UI");
10590 set_optab_libfunc (umod_optab
, DImode
, "OTS$REM_UL");
10595 /* Initialize the GCC target structure. */
10596 #if TARGET_ABI_OPEN_VMS
10597 # undef TARGET_ATTRIBUTE_TABLE
10598 # define TARGET_ATTRIBUTE_TABLE vms_attribute_table
10599 # undef TARGET_SECTION_TYPE_FLAGS
10600 # define TARGET_SECTION_TYPE_FLAGS vms_section_type_flags
10603 #undef TARGET_IN_SMALL_DATA_P
10604 #define TARGET_IN_SMALL_DATA_P alpha_in_small_data_p
10606 #if TARGET_ABI_UNICOSMK
10607 # undef TARGET_INSERT_ATTRIBUTES
10608 # define TARGET_INSERT_ATTRIBUTES unicosmk_insert_attributes
10609 # undef TARGET_SECTION_TYPE_FLAGS
10610 # define TARGET_SECTION_TYPE_FLAGS unicosmk_section_type_flags
10611 # undef TARGET_ASM_UNIQUE_SECTION
10612 # define TARGET_ASM_UNIQUE_SECTION unicosmk_unique_section
10613 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
10614 #define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
10615 # undef TARGET_ASM_GLOBALIZE_LABEL
10616 # define TARGET_ASM_GLOBALIZE_LABEL hook_void_FILEptr_constcharptr
10617 # undef TARGET_MUST_PASS_IN_STACK
10618 # define TARGET_MUST_PASS_IN_STACK unicosmk_must_pass_in_stack
10621 #undef TARGET_ASM_ALIGNED_HI_OP
10622 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
10623 #undef TARGET_ASM_ALIGNED_DI_OP
10624 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
10626 /* Default unaligned ops are provided for ELF systems. To get unaligned
10627 data for non-ELF systems, we have to turn off auto alignment. */
10628 #ifndef OBJECT_FORMAT_ELF
10629 #undef TARGET_ASM_UNALIGNED_HI_OP
10630 #define TARGET_ASM_UNALIGNED_HI_OP "\t.align 0\n\t.word\t"
10631 #undef TARGET_ASM_UNALIGNED_SI_OP
10632 #define TARGET_ASM_UNALIGNED_SI_OP "\t.align 0\n\t.long\t"
10633 #undef TARGET_ASM_UNALIGNED_DI_OP
10634 #define TARGET_ASM_UNALIGNED_DI_OP "\t.align 0\n\t.quad\t"
10637 #ifdef OBJECT_FORMAT_ELF
10638 #undef TARGET_ASM_SELECT_RTX_SECTION
10639 #define TARGET_ASM_SELECT_RTX_SECTION alpha_elf_select_rtx_section
10642 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
10643 #define TARGET_ASM_FUNCTION_END_PROLOGUE alpha_output_function_end_prologue
10645 #undef TARGET_INIT_LIBFUNCS
10646 #define TARGET_INIT_LIBFUNCS alpha_init_libfuncs
10648 #if TARGET_ABI_UNICOSMK
10649 #undef TARGET_ASM_FILE_START
10650 #define TARGET_ASM_FILE_START unicosmk_file_start
10651 #undef TARGET_ASM_FILE_END
10652 #define TARGET_ASM_FILE_END unicosmk_file_end
10654 #undef TARGET_ASM_FILE_START
10655 #define TARGET_ASM_FILE_START alpha_file_start
10656 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
10657 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
10660 #undef TARGET_SCHED_ADJUST_COST
10661 #define TARGET_SCHED_ADJUST_COST alpha_adjust_cost
10662 #undef TARGET_SCHED_ISSUE_RATE
10663 #define TARGET_SCHED_ISSUE_RATE alpha_issue_rate
10664 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
10665 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
10666 alpha_multipass_dfa_lookahead
10668 #undef TARGET_HAVE_TLS
10669 #define TARGET_HAVE_TLS HAVE_AS_TLS
10671 #undef TARGET_INIT_BUILTINS
10672 #define TARGET_INIT_BUILTINS alpha_init_builtins
10673 #undef TARGET_EXPAND_BUILTIN
10674 #define TARGET_EXPAND_BUILTIN alpha_expand_builtin
10675 #undef TARGET_FOLD_BUILTIN
10676 #define TARGET_FOLD_BUILTIN alpha_fold_builtin
10678 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
10679 #define TARGET_FUNCTION_OK_FOR_SIBCALL alpha_function_ok_for_sibcall
10680 #undef TARGET_CANNOT_COPY_INSN_P
10681 #define TARGET_CANNOT_COPY_INSN_P alpha_cannot_copy_insn_p
10682 #undef TARGET_CANNOT_FORCE_CONST_MEM
10683 #define TARGET_CANNOT_FORCE_CONST_MEM alpha_cannot_force_const_mem
10686 #undef TARGET_ASM_OUTPUT_MI_THUNK
10687 #define TARGET_ASM_OUTPUT_MI_THUNK alpha_output_mi_thunk_osf
10688 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
10689 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
10690 #undef TARGET_STDARG_OPTIMIZE_HOOK
10691 #define TARGET_STDARG_OPTIMIZE_HOOK alpha_stdarg_optimize_hook
10694 #undef TARGET_RTX_COSTS
10695 #define TARGET_RTX_COSTS alpha_rtx_costs
10696 #undef TARGET_ADDRESS_COST
10697 #define TARGET_ADDRESS_COST hook_int_rtx_0
10699 #undef TARGET_MACHINE_DEPENDENT_REORG
10700 #define TARGET_MACHINE_DEPENDENT_REORG alpha_reorg
10702 #undef TARGET_PROMOTE_FUNCTION_ARGS
10703 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
10704 #undef TARGET_PROMOTE_FUNCTION_RETURN
10705 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
10706 #undef TARGET_PROMOTE_PROTOTYPES
10707 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_false
10708 #undef TARGET_RETURN_IN_MEMORY
10709 #define TARGET_RETURN_IN_MEMORY alpha_return_in_memory
10710 #undef TARGET_PASS_BY_REFERENCE
10711 #define TARGET_PASS_BY_REFERENCE alpha_pass_by_reference
10712 #undef TARGET_SETUP_INCOMING_VARARGS
10713 #define TARGET_SETUP_INCOMING_VARARGS alpha_setup_incoming_varargs
10714 #undef TARGET_STRICT_ARGUMENT_NAMING
10715 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
10716 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
10717 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
10718 #undef TARGET_SPLIT_COMPLEX_ARG
10719 #define TARGET_SPLIT_COMPLEX_ARG alpha_split_complex_arg
10720 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
10721 #define TARGET_GIMPLIFY_VA_ARG_EXPR alpha_gimplify_va_arg
10722 #undef TARGET_ARG_PARTIAL_BYTES
10723 #define TARGET_ARG_PARTIAL_BYTES alpha_arg_partial_bytes
10725 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10726 #define TARGET_SCALAR_MODE_SUPPORTED_P alpha_scalar_mode_supported_p
10727 #undef TARGET_VECTOR_MODE_SUPPORTED_P
10728 #define TARGET_VECTOR_MODE_SUPPORTED_P alpha_vector_mode_supported_p
10730 #undef TARGET_BUILD_BUILTIN_VA_LIST
10731 #define TARGET_BUILD_BUILTIN_VA_LIST alpha_build_builtin_va_list
10733 /* The Alpha architecture does not require sequential consistency. See
10734 http://www.cs.umd.edu/~pugh/java/memoryModel/AlphaReordering.html
10735 for an example of how it can be violated in practice. */
10736 #undef TARGET_RELAXED_ORDERING
10737 #define TARGET_RELAXED_ORDERING true
10739 #undef TARGET_DEFAULT_TARGET_FLAGS
10740 #define TARGET_DEFAULT_TARGET_FLAGS \
10741 (TARGET_DEFAULT | TARGET_CPU_DEFAULT | TARGET_DEFAULT_EXPLICIT_RELOCS)
10742 #undef TARGET_HANDLE_OPTION
10743 #define TARGET_HANDLE_OPTION alpha_handle_option
10745 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
10746 #undef TARGET_MANGLE_FUNDAMENTAL_TYPE
10747 #define TARGET_MANGLE_FUNDAMENTAL_TYPE alpha_mangle_fundamental_type
10750 struct gcc_target targetm
= TARGET_INITIALIZER
;
10753 #include "gt-alpha.h"