1 /* Subroutines used for code generation on the DEC Alpha.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
4 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to
20 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
21 Boston, MA 02110-1301, USA. */
26 #include "coretypes.h"
31 #include "hard-reg-set.h"
33 #include "insn-config.h"
34 #include "conditions.h"
36 #include "insn-attr.h"
47 #include "integrate.h"
50 #include "target-def.h"
52 #include "langhooks.h"
53 #include <splay-tree.h>
54 #include "cfglayout.h"
55 #include "tree-gimple.h"
56 #include "tree-flow.h"
57 #include "tree-stdarg.h"
58 #include "tm-constrs.h"
61 /* Specify which cpu to schedule for. */
62 enum processor_type alpha_tune
;
64 /* Which cpu we're generating code for. */
65 enum processor_type alpha_cpu
;
67 static const char * const alpha_cpu_name
[] =
72 /* Specify how accurate floating-point traps need to be. */
74 enum alpha_trap_precision alpha_tp
;
76 /* Specify the floating-point rounding mode. */
78 enum alpha_fp_rounding_mode alpha_fprm
;
80 /* Specify which things cause traps. */
82 enum alpha_fp_trap_mode alpha_fptm
;
84 /* Save information from a "cmpxx" operation until the branch or scc is
87 struct alpha_compare alpha_compare
;
89 /* Nonzero if inside of a function, because the Alpha asm can't
90 handle .files inside of functions. */
92 static int inside_function
= FALSE
;
94 /* The number of cycles of latency we should assume on memory reads. */
96 int alpha_memory_latency
= 3;
98 /* Whether the function needs the GP. */
100 static int alpha_function_needs_gp
;
102 /* The alias set for prologue/epilogue register save/restore. */
104 static GTY(()) int alpha_sr_alias_set
;
106 /* The assembler name of the current function. */
108 static const char *alpha_fnname
;
110 /* The next explicit relocation sequence number. */
111 extern GTY(()) int alpha_next_sequence_number
;
112 int alpha_next_sequence_number
= 1;
114 /* The literal and gpdisp sequence numbers for this insn, as printed
115 by %# and %* respectively. */
116 extern GTY(()) int alpha_this_literal_sequence_number
;
117 extern GTY(()) int alpha_this_gpdisp_sequence_number
;
118 int alpha_this_literal_sequence_number
;
119 int alpha_this_gpdisp_sequence_number
;
121 /* Costs of various operations on the different architectures. */
123 struct alpha_rtx_cost_data
125 unsigned char fp_add
;
126 unsigned char fp_mult
;
127 unsigned char fp_div_sf
;
128 unsigned char fp_div_df
;
129 unsigned char int_mult_si
;
130 unsigned char int_mult_di
;
131 unsigned char int_shift
;
132 unsigned char int_cmov
;
133 unsigned short int_div
;
136 static struct alpha_rtx_cost_data
const alpha_rtx_cost_data
[PROCESSOR_MAX
] =
139 COSTS_N_INSNS (6), /* fp_add */
140 COSTS_N_INSNS (6), /* fp_mult */
141 COSTS_N_INSNS (34), /* fp_div_sf */
142 COSTS_N_INSNS (63), /* fp_div_df */
143 COSTS_N_INSNS (23), /* int_mult_si */
144 COSTS_N_INSNS (23), /* int_mult_di */
145 COSTS_N_INSNS (2), /* int_shift */
146 COSTS_N_INSNS (2), /* int_cmov */
147 COSTS_N_INSNS (97), /* int_div */
150 COSTS_N_INSNS (4), /* fp_add */
151 COSTS_N_INSNS (4), /* fp_mult */
152 COSTS_N_INSNS (15), /* fp_div_sf */
153 COSTS_N_INSNS (22), /* fp_div_df */
154 COSTS_N_INSNS (8), /* int_mult_si */
155 COSTS_N_INSNS (12), /* int_mult_di */
156 COSTS_N_INSNS (1) + 1, /* int_shift */
157 COSTS_N_INSNS (1), /* int_cmov */
158 COSTS_N_INSNS (83), /* int_div */
161 COSTS_N_INSNS (4), /* fp_add */
162 COSTS_N_INSNS (4), /* fp_mult */
163 COSTS_N_INSNS (12), /* fp_div_sf */
164 COSTS_N_INSNS (15), /* fp_div_df */
165 COSTS_N_INSNS (7), /* int_mult_si */
166 COSTS_N_INSNS (7), /* int_mult_di */
167 COSTS_N_INSNS (1), /* int_shift */
168 COSTS_N_INSNS (2), /* int_cmov */
169 COSTS_N_INSNS (86), /* int_div */
173 /* Similar but tuned for code size instead of execution latency. The
174 extra +N is fractional cost tuning based on latency. It's used to
175 encourage use of cheaper insns like shift, but only if there's just
178 static struct alpha_rtx_cost_data
const alpha_rtx_cost_size
=
180 COSTS_N_INSNS (1), /* fp_add */
181 COSTS_N_INSNS (1), /* fp_mult */
182 COSTS_N_INSNS (1), /* fp_div_sf */
183 COSTS_N_INSNS (1) + 1, /* fp_div_df */
184 COSTS_N_INSNS (1) + 1, /* int_mult_si */
185 COSTS_N_INSNS (1) + 2, /* int_mult_di */
186 COSTS_N_INSNS (1), /* int_shift */
187 COSTS_N_INSNS (1), /* int_cmov */
188 COSTS_N_INSNS (6), /* int_div */
191 /* Get the number of args of a function in one of two ways. */
192 #if TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK
193 #define NUM_ARGS current_function_args_info.num_args
195 #define NUM_ARGS current_function_args_info
201 /* Declarations of static functions. */
202 static struct machine_function
*alpha_init_machine_status (void);
203 static rtx
alpha_emit_xfloating_compare (enum rtx_code
*, rtx
, rtx
);
205 #if TARGET_ABI_OPEN_VMS
206 static void alpha_write_linkage (FILE *, const char *, tree
);
209 static void unicosmk_output_deferred_case_vectors (FILE *);
210 static void unicosmk_gen_dsib (unsigned long *);
211 static void unicosmk_output_ssib (FILE *, const char *);
212 static int unicosmk_need_dex (rtx
);
214 /* Implement TARGET_HANDLE_OPTION. */
217 alpha_handle_option (size_t code
, const char *arg
, int value
)
223 target_flags
|= MASK_SOFT_FP
;
227 case OPT_mieee_with_inexact
:
228 target_flags
|= MASK_IEEE_CONFORMANT
;
232 if (value
!= 16 && value
!= 32 && value
!= 64)
233 error ("bad value %qs for -mtls-size switch", arg
);
240 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
241 /* Implement TARGET_MANGLE_FUNDAMENTAL_TYPE. */
244 alpha_mangle_fundamental_type (tree type
)
246 if (TYPE_MAIN_VARIANT (type
) == long_double_type_node
247 && TARGET_LONG_DOUBLE_128
)
250 /* For all other types, use normal C++ mangling. */
255 /* Parse target option strings. */
258 override_options (void)
260 static const struct cpu_table
{
261 const char *const name
;
262 const enum processor_type processor
;
265 { "ev4", PROCESSOR_EV4
, 0 },
266 { "ev45", PROCESSOR_EV4
, 0 },
267 { "21064", PROCESSOR_EV4
, 0 },
268 { "ev5", PROCESSOR_EV5
, 0 },
269 { "21164", PROCESSOR_EV5
, 0 },
270 { "ev56", PROCESSOR_EV5
, MASK_BWX
},
271 { "21164a", PROCESSOR_EV5
, MASK_BWX
},
272 { "pca56", PROCESSOR_EV5
, MASK_BWX
|MASK_MAX
},
273 { "21164PC",PROCESSOR_EV5
, MASK_BWX
|MASK_MAX
},
274 { "21164pc",PROCESSOR_EV5
, MASK_BWX
|MASK_MAX
},
275 { "ev6", PROCESSOR_EV6
, MASK_BWX
|MASK_MAX
|MASK_FIX
},
276 { "21264", PROCESSOR_EV6
, MASK_BWX
|MASK_MAX
|MASK_FIX
},
277 { "ev67", PROCESSOR_EV6
, MASK_BWX
|MASK_MAX
|MASK_FIX
|MASK_CIX
},
278 { "21264a", PROCESSOR_EV6
, MASK_BWX
|MASK_MAX
|MASK_FIX
|MASK_CIX
},
284 /* Unicos/Mk doesn't have shared libraries. */
285 if (TARGET_ABI_UNICOSMK
&& flag_pic
)
287 warning (0, "-f%s ignored for Unicos/Mk (not supported)",
288 (flag_pic
> 1) ? "PIC" : "pic");
292 /* On Unicos/Mk, the native compiler consistently generates /d suffices for
293 floating-point instructions. Make that the default for this target. */
294 if (TARGET_ABI_UNICOSMK
)
295 alpha_fprm
= ALPHA_FPRM_DYN
;
297 alpha_fprm
= ALPHA_FPRM_NORM
;
299 alpha_tp
= ALPHA_TP_PROG
;
300 alpha_fptm
= ALPHA_FPTM_N
;
302 /* We cannot use su and sui qualifiers for conversion instructions on
303 Unicos/Mk. I'm not sure if this is due to assembler or hardware
304 limitations. Right now, we issue a warning if -mieee is specified
305 and then ignore it; eventually, we should either get it right or
306 disable the option altogether. */
310 if (TARGET_ABI_UNICOSMK
)
311 warning (0, "-mieee not supported on Unicos/Mk");
314 alpha_tp
= ALPHA_TP_INSN
;
315 alpha_fptm
= ALPHA_FPTM_SU
;
319 if (TARGET_IEEE_WITH_INEXACT
)
321 if (TARGET_ABI_UNICOSMK
)
322 warning (0, "-mieee-with-inexact not supported on Unicos/Mk");
325 alpha_tp
= ALPHA_TP_INSN
;
326 alpha_fptm
= ALPHA_FPTM_SUI
;
332 if (! strcmp (alpha_tp_string
, "p"))
333 alpha_tp
= ALPHA_TP_PROG
;
334 else if (! strcmp (alpha_tp_string
, "f"))
335 alpha_tp
= ALPHA_TP_FUNC
;
336 else if (! strcmp (alpha_tp_string
, "i"))
337 alpha_tp
= ALPHA_TP_INSN
;
339 error ("bad value %qs for -mtrap-precision switch", alpha_tp_string
);
342 if (alpha_fprm_string
)
344 if (! strcmp (alpha_fprm_string
, "n"))
345 alpha_fprm
= ALPHA_FPRM_NORM
;
346 else if (! strcmp (alpha_fprm_string
, "m"))
347 alpha_fprm
= ALPHA_FPRM_MINF
;
348 else if (! strcmp (alpha_fprm_string
, "c"))
349 alpha_fprm
= ALPHA_FPRM_CHOP
;
350 else if (! strcmp (alpha_fprm_string
,"d"))
351 alpha_fprm
= ALPHA_FPRM_DYN
;
353 error ("bad value %qs for -mfp-rounding-mode switch",
357 if (alpha_fptm_string
)
359 if (strcmp (alpha_fptm_string
, "n") == 0)
360 alpha_fptm
= ALPHA_FPTM_N
;
361 else if (strcmp (alpha_fptm_string
, "u") == 0)
362 alpha_fptm
= ALPHA_FPTM_U
;
363 else if (strcmp (alpha_fptm_string
, "su") == 0)
364 alpha_fptm
= ALPHA_FPTM_SU
;
365 else if (strcmp (alpha_fptm_string
, "sui") == 0)
366 alpha_fptm
= ALPHA_FPTM_SUI
;
368 error ("bad value %qs for -mfp-trap-mode switch", alpha_fptm_string
);
371 if (alpha_cpu_string
)
373 for (i
= 0; cpu_table
[i
].name
; i
++)
374 if (! strcmp (alpha_cpu_string
, cpu_table
[i
].name
))
376 alpha_tune
= alpha_cpu
= cpu_table
[i
].processor
;
377 target_flags
&= ~ (MASK_BWX
| MASK_MAX
| MASK_FIX
| MASK_CIX
);
378 target_flags
|= cpu_table
[i
].flags
;
381 if (! cpu_table
[i
].name
)
382 error ("bad value %qs for -mcpu switch", alpha_cpu_string
);
385 if (alpha_tune_string
)
387 for (i
= 0; cpu_table
[i
].name
; i
++)
388 if (! strcmp (alpha_tune_string
, cpu_table
[i
].name
))
390 alpha_tune
= cpu_table
[i
].processor
;
393 if (! cpu_table
[i
].name
)
394 error ("bad value %qs for -mcpu switch", alpha_tune_string
);
397 /* Do some sanity checks on the above options. */
399 if (TARGET_ABI_UNICOSMK
&& alpha_fptm
!= ALPHA_FPTM_N
)
401 warning (0, "trap mode not supported on Unicos/Mk");
402 alpha_fptm
= ALPHA_FPTM_N
;
405 if ((alpha_fptm
== ALPHA_FPTM_SU
|| alpha_fptm
== ALPHA_FPTM_SUI
)
406 && alpha_tp
!= ALPHA_TP_INSN
&& alpha_cpu
!= PROCESSOR_EV6
)
408 warning (0, "fp software completion requires -mtrap-precision=i");
409 alpha_tp
= ALPHA_TP_INSN
;
412 if (alpha_cpu
== PROCESSOR_EV6
)
414 /* Except for EV6 pass 1 (not released), we always have precise
415 arithmetic traps. Which means we can do software completion
416 without minding trap shadows. */
417 alpha_tp
= ALPHA_TP_PROG
;
420 if (TARGET_FLOAT_VAX
)
422 if (alpha_fprm
== ALPHA_FPRM_MINF
|| alpha_fprm
== ALPHA_FPRM_DYN
)
424 warning (0, "rounding mode not supported for VAX floats");
425 alpha_fprm
= ALPHA_FPRM_NORM
;
427 if (alpha_fptm
== ALPHA_FPTM_SUI
)
429 warning (0, "trap mode not supported for VAX floats");
430 alpha_fptm
= ALPHA_FPTM_SU
;
432 if (target_flags_explicit
& MASK_LONG_DOUBLE_128
)
433 warning (0, "128-bit long double not supported for VAX floats");
434 target_flags
&= ~MASK_LONG_DOUBLE_128
;
441 if (!alpha_mlat_string
)
442 alpha_mlat_string
= "L1";
444 if (ISDIGIT ((unsigned char)alpha_mlat_string
[0])
445 && (lat
= strtol (alpha_mlat_string
, &end
, 10), *end
== '\0'))
447 else if ((alpha_mlat_string
[0] == 'L' || alpha_mlat_string
[0] == 'l')
448 && ISDIGIT ((unsigned char)alpha_mlat_string
[1])
449 && alpha_mlat_string
[2] == '\0')
451 static int const cache_latency
[][4] =
453 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
454 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
455 { 3, 12, 30 }, /* ev6 -- Bcache from DS20 LMbench. */
458 lat
= alpha_mlat_string
[1] - '0';
459 if (lat
<= 0 || lat
> 3 || cache_latency
[alpha_tune
][lat
-1] == -1)
461 warning (0, "L%d cache latency unknown for %s",
462 lat
, alpha_cpu_name
[alpha_tune
]);
466 lat
= cache_latency
[alpha_tune
][lat
-1];
468 else if (! strcmp (alpha_mlat_string
, "main"))
470 /* Most current memories have about 370ns latency. This is
471 a reasonable guess for a fast cpu. */
476 warning (0, "bad value %qs for -mmemory-latency", alpha_mlat_string
);
480 alpha_memory_latency
= lat
;
483 /* Default the definition of "small data" to 8 bytes. */
487 /* Infer TARGET_SMALL_DATA from -fpic/-fPIC. */
489 target_flags
|= MASK_SMALL_DATA
;
490 else if (flag_pic
== 2)
491 target_flags
&= ~MASK_SMALL_DATA
;
493 /* Align labels and loops for optimal branching. */
494 /* ??? Kludge these by not doing anything if we don't optimize and also if
495 we are writing ECOFF symbols to work around a bug in DEC's assembler. */
496 if (optimize
> 0 && write_symbols
!= SDB_DEBUG
)
498 if (align_loops
<= 0)
500 if (align_jumps
<= 0)
503 if (align_functions
<= 0)
504 align_functions
= 16;
506 /* Acquire a unique set number for our register saves and restores. */
507 alpha_sr_alias_set
= new_alias_set ();
509 /* Register variables and functions with the garbage collector. */
511 /* Set up function hooks. */
512 init_machine_status
= alpha_init_machine_status
;
514 /* Tell the compiler when we're using VAX floating point. */
515 if (TARGET_FLOAT_VAX
)
517 REAL_MODE_FORMAT (SFmode
) = &vax_f_format
;
518 REAL_MODE_FORMAT (DFmode
) = &vax_g_format
;
519 REAL_MODE_FORMAT (TFmode
) = NULL
;
522 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
523 if (!(target_flags_explicit
& MASK_LONG_DOUBLE_128
))
524 target_flags
|= MASK_LONG_DOUBLE_128
;
528 /* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
531 zap_mask (HOST_WIDE_INT value
)
535 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
/ HOST_BITS_PER_CHAR
;
537 if ((value
& 0xff) != 0 && (value
& 0xff) != 0xff)
543 /* Return true if OP is valid for a particular TLS relocation.
544 We are already guaranteed that OP is a CONST. */
547 tls_symbolic_operand_1 (rtx op
, int size
, int unspec
)
551 if (GET_CODE (op
) != UNSPEC
|| XINT (op
, 1) != unspec
)
553 op
= XVECEXP (op
, 0, 0);
555 if (GET_CODE (op
) != SYMBOL_REF
)
558 switch (SYMBOL_REF_TLS_MODEL (op
))
560 case TLS_MODEL_LOCAL_DYNAMIC
:
561 return unspec
== UNSPEC_DTPREL
&& size
== alpha_tls_size
;
562 case TLS_MODEL_INITIAL_EXEC
:
563 return unspec
== UNSPEC_TPREL
&& size
== 64;
564 case TLS_MODEL_LOCAL_EXEC
:
565 return unspec
== UNSPEC_TPREL
&& size
== alpha_tls_size
;
571 /* Used by aligned_memory_operand and unaligned_memory_operand to
572 resolve what reload is going to do with OP if it's a register. */
575 resolve_reload_operand (rtx op
)
577 if (reload_in_progress
)
580 if (GET_CODE (tmp
) == SUBREG
)
581 tmp
= SUBREG_REG (tmp
);
582 if (GET_CODE (tmp
) == REG
583 && REGNO (tmp
) >= FIRST_PSEUDO_REGISTER
)
585 op
= reg_equiv_memory_loc
[REGNO (tmp
)];
593 /* The scalar modes supported differs from the default check-what-c-supports
594 version in that sometimes TFmode is available even when long double
595 indicates only DFmode. On unicosmk, we have the situation that HImode
596 doesn't map to any C type, but of course we still support that. */
599 alpha_scalar_mode_supported_p (enum machine_mode mode
)
607 case TImode
: /* via optabs.c */
615 return TARGET_HAS_XFLOATING_LIBS
;
622 /* Alpha implements a couple of integer vector mode operations when
623 TARGET_MAX is enabled. We do not check TARGET_MAX here, however,
624 which allows the vectorizer to operate on e.g. move instructions,
625 or when expand_vector_operations can do something useful. */
628 alpha_vector_mode_supported_p (enum machine_mode mode
)
630 return mode
== V8QImode
|| mode
== V4HImode
|| mode
== V2SImode
;
633 /* Return 1 if this function can directly return via $26. */
638 return (! TARGET_ABI_OPEN_VMS
&& ! TARGET_ABI_UNICOSMK
640 && alpha_sa_size () == 0
641 && get_frame_size () == 0
642 && current_function_outgoing_args_size
== 0
643 && current_function_pretend_args_size
== 0);
646 /* Return the ADDR_VEC associated with a tablejump insn. */
649 alpha_tablejump_addr_vec (rtx insn
)
653 tmp
= JUMP_LABEL (insn
);
656 tmp
= NEXT_INSN (tmp
);
659 if (GET_CODE (tmp
) == JUMP_INSN
660 && GET_CODE (PATTERN (tmp
)) == ADDR_DIFF_VEC
)
661 return PATTERN (tmp
);
665 /* Return the label of the predicted edge, or CONST0_RTX if we don't know. */
668 alpha_tablejump_best_label (rtx insn
)
670 rtx jump_table
= alpha_tablejump_addr_vec (insn
);
671 rtx best_label
= NULL_RTX
;
673 /* ??? Once the CFG doesn't keep getting completely rebuilt, look
674 there for edge frequency counts from profile data. */
678 int n_labels
= XVECLEN (jump_table
, 1);
682 for (i
= 0; i
< n_labels
; i
++)
686 for (j
= i
+ 1; j
< n_labels
; j
++)
687 if (XEXP (XVECEXP (jump_table
, 1, i
), 0)
688 == XEXP (XVECEXP (jump_table
, 1, j
), 0))
691 if (count
> best_count
)
692 best_count
= count
, best_label
= XVECEXP (jump_table
, 1, i
);
696 return best_label
? best_label
: const0_rtx
;
699 /* Return the TLS model to use for SYMBOL. */
701 static enum tls_model
702 tls_symbolic_operand_type (rtx symbol
)
704 enum tls_model model
;
706 if (GET_CODE (symbol
) != SYMBOL_REF
)
708 model
= SYMBOL_REF_TLS_MODEL (symbol
);
710 /* Local-exec with a 64-bit size is the same code as initial-exec. */
711 if (model
== TLS_MODEL_LOCAL_EXEC
&& alpha_tls_size
== 64)
712 model
= TLS_MODEL_INITIAL_EXEC
;
717 /* Return true if the function DECL will share the same GP as any
718 function in the current unit of translation. */
721 decl_has_samegp (tree decl
)
723 /* Functions that are not local can be overridden, and thus may
724 not share the same gp. */
725 if (!(*targetm
.binds_local_p
) (decl
))
728 /* If -msmall-data is in effect, assume that there is only one GP
729 for the module, and so any local symbol has this property. We
730 need explicit relocations to be able to enforce this for symbols
731 not defined in this unit of translation, however. */
732 if (TARGET_EXPLICIT_RELOCS
&& TARGET_SMALL_DATA
)
735 /* Functions that are not external are defined in this UoT. */
736 /* ??? Irritatingly, static functions not yet emitted are still
737 marked "external". Apply this to non-static functions only. */
738 return !TREE_PUBLIC (decl
) || !DECL_EXTERNAL (decl
);
741 /* Return true if EXP should be placed in the small data section. */
744 alpha_in_small_data_p (tree exp
)
746 /* We want to merge strings, so we never consider them small data. */
747 if (TREE_CODE (exp
) == STRING_CST
)
750 /* Functions are never in the small data area. Duh. */
751 if (TREE_CODE (exp
) == FUNCTION_DECL
)
754 if (TREE_CODE (exp
) == VAR_DECL
&& DECL_SECTION_NAME (exp
))
756 const char *section
= TREE_STRING_POINTER (DECL_SECTION_NAME (exp
));
757 if (strcmp (section
, ".sdata") == 0
758 || strcmp (section
, ".sbss") == 0)
763 HOST_WIDE_INT size
= int_size_in_bytes (TREE_TYPE (exp
));
765 /* If this is an incomplete type with size 0, then we can't put it
766 in sdata because it might be too big when completed. */
767 if (size
> 0 && (unsigned HOST_WIDE_INT
) size
<= g_switch_value
)
774 #if TARGET_ABI_OPEN_VMS
776 alpha_linkage_symbol_p (const char *symname
)
778 int symlen
= strlen (symname
);
781 return strcmp (&symname
[symlen
- 4], "..lk") == 0;
786 #define LINKAGE_SYMBOL_REF_P(X) \
787 ((GET_CODE (X) == SYMBOL_REF \
788 && alpha_linkage_symbol_p (XSTR (X, 0))) \
789 || (GET_CODE (X) == CONST \
790 && GET_CODE (XEXP (X, 0)) == PLUS \
791 && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \
792 && alpha_linkage_symbol_p (XSTR (XEXP (XEXP (X, 0), 0), 0))))
795 /* legitimate_address_p recognizes an RTL expression that is a valid
796 memory address for an instruction. The MODE argument is the
797 machine mode for the MEM expression that wants to use this address.
799 For Alpha, we have either a constant address or the sum of a
800 register and a constant address, or just a register. For DImode,
801 any of those forms can be surrounded with an AND that clear the
802 low-order three bits; this is an "unaligned" access. */
805 alpha_legitimate_address_p (enum machine_mode mode
, rtx x
, int strict
)
807 /* If this is an ldq_u type address, discard the outer AND. */
809 && GET_CODE (x
) == AND
810 && GET_CODE (XEXP (x
, 1)) == CONST_INT
811 && INTVAL (XEXP (x
, 1)) == -8)
814 /* Discard non-paradoxical subregs. */
815 if (GET_CODE (x
) == SUBREG
816 && (GET_MODE_SIZE (GET_MODE (x
))
817 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
)))))
820 /* Unadorned general registers are valid. */
823 ? STRICT_REG_OK_FOR_BASE_P (x
)
824 : NONSTRICT_REG_OK_FOR_BASE_P (x
)))
827 /* Constant addresses (i.e. +/- 32k) are valid. */
828 if (CONSTANT_ADDRESS_P (x
))
831 #if TARGET_ABI_OPEN_VMS
832 if (LINKAGE_SYMBOL_REF_P (x
))
836 /* Register plus a small constant offset is valid. */
837 if (GET_CODE (x
) == PLUS
)
839 rtx ofs
= XEXP (x
, 1);
842 /* Discard non-paradoxical subregs. */
843 if (GET_CODE (x
) == SUBREG
844 && (GET_MODE_SIZE (GET_MODE (x
))
845 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
)))))
851 && NONSTRICT_REG_OK_FP_BASE_P (x
)
852 && GET_CODE (ofs
) == CONST_INT
)
855 ? STRICT_REG_OK_FOR_BASE_P (x
)
856 : NONSTRICT_REG_OK_FOR_BASE_P (x
))
857 && CONSTANT_ADDRESS_P (ofs
))
862 /* If we're managing explicit relocations, LO_SUM is valid, as
863 are small data symbols. */
864 else if (TARGET_EXPLICIT_RELOCS
)
866 if (small_symbolic_operand (x
, Pmode
))
869 if (GET_CODE (x
) == LO_SUM
)
871 rtx ofs
= XEXP (x
, 1);
874 /* Discard non-paradoxical subregs. */
875 if (GET_CODE (x
) == SUBREG
876 && (GET_MODE_SIZE (GET_MODE (x
))
877 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
)))))
880 /* Must have a valid base register. */
883 ? STRICT_REG_OK_FOR_BASE_P (x
)
884 : NONSTRICT_REG_OK_FOR_BASE_P (x
))))
887 /* The symbol must be local. */
888 if (local_symbolic_operand (ofs
, Pmode
)
889 || dtp32_symbolic_operand (ofs
, Pmode
)
890 || tp32_symbolic_operand (ofs
, Pmode
))
898 /* Build the SYMBOL_REF for __tls_get_addr. */
900 static GTY(()) rtx tls_get_addr_libfunc
;
903 get_tls_get_addr (void)
905 if (!tls_get_addr_libfunc
)
906 tls_get_addr_libfunc
= init_one_libfunc ("__tls_get_addr");
907 return tls_get_addr_libfunc
;
910 /* Try machine-dependent ways of modifying an illegitimate address
911 to be legitimate. If we find one, return the new, valid address. */
914 alpha_legitimize_address (rtx x
, rtx scratch
,
915 enum machine_mode mode ATTRIBUTE_UNUSED
)
917 HOST_WIDE_INT addend
;
919 /* If the address is (plus reg const_int) and the CONST_INT is not a
920 valid offset, compute the high part of the constant and add it to
921 the register. Then our address is (plus temp low-part-const). */
922 if (GET_CODE (x
) == PLUS
923 && GET_CODE (XEXP (x
, 0)) == REG
924 && GET_CODE (XEXP (x
, 1)) == CONST_INT
925 && ! CONSTANT_ADDRESS_P (XEXP (x
, 1)))
927 addend
= INTVAL (XEXP (x
, 1));
932 /* If the address is (const (plus FOO const_int)), find the low-order
933 part of the CONST_INT. Then load FOO plus any high-order part of the
934 CONST_INT into a register. Our address is (plus reg low-part-const).
935 This is done to reduce the number of GOT entries. */
937 && GET_CODE (x
) == CONST
938 && GET_CODE (XEXP (x
, 0)) == PLUS
939 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
)
941 addend
= INTVAL (XEXP (XEXP (x
, 0), 1));
942 x
= force_reg (Pmode
, XEXP (XEXP (x
, 0), 0));
946 /* If we have a (plus reg const), emit the load as in (2), then add
947 the two registers, and finally generate (plus reg low-part-const) as
950 && GET_CODE (x
) == PLUS
951 && GET_CODE (XEXP (x
, 0)) == REG
952 && GET_CODE (XEXP (x
, 1)) == CONST
953 && GET_CODE (XEXP (XEXP (x
, 1), 0)) == PLUS
954 && GET_CODE (XEXP (XEXP (XEXP (x
, 1), 0), 1)) == CONST_INT
)
956 addend
= INTVAL (XEXP (XEXP (XEXP (x
, 1), 0), 1));
957 x
= expand_simple_binop (Pmode
, PLUS
, XEXP (x
, 0),
958 XEXP (XEXP (XEXP (x
, 1), 0), 0),
959 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
963 /* If this is a local symbol, split the address into HIGH/LO_SUM parts. */
964 if (TARGET_EXPLICIT_RELOCS
&& symbolic_operand (x
, Pmode
))
966 rtx r0
, r16
, eqv
, tga
, tp
, insn
, dest
, seq
;
968 switch (tls_symbolic_operand_type (x
))
973 case TLS_MODEL_GLOBAL_DYNAMIC
:
976 r0
= gen_rtx_REG (Pmode
, 0);
977 r16
= gen_rtx_REG (Pmode
, 16);
978 tga
= get_tls_get_addr ();
979 dest
= gen_reg_rtx (Pmode
);
980 seq
= GEN_INT (alpha_next_sequence_number
++);
982 emit_insn (gen_movdi_er_tlsgd (r16
, pic_offset_table_rtx
, x
, seq
));
983 insn
= gen_call_value_osf_tlsgd (r0
, tga
, seq
);
984 insn
= emit_call_insn (insn
);
985 CONST_OR_PURE_CALL_P (insn
) = 1;
986 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), r16
);
991 emit_libcall_block (insn
, dest
, r0
, x
);
994 case TLS_MODEL_LOCAL_DYNAMIC
:
997 r0
= gen_rtx_REG (Pmode
, 0);
998 r16
= gen_rtx_REG (Pmode
, 16);
999 tga
= get_tls_get_addr ();
1000 scratch
= gen_reg_rtx (Pmode
);
1001 seq
= GEN_INT (alpha_next_sequence_number
++);
1003 emit_insn (gen_movdi_er_tlsldm (r16
, pic_offset_table_rtx
, seq
));
1004 insn
= gen_call_value_osf_tlsldm (r0
, tga
, seq
);
1005 insn
= emit_call_insn (insn
);
1006 CONST_OR_PURE_CALL_P (insn
) = 1;
1007 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), r16
);
1009 insn
= get_insns ();
1012 eqv
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, const0_rtx
),
1013 UNSPEC_TLSLDM_CALL
);
1014 emit_libcall_block (insn
, scratch
, r0
, eqv
);
1016 eqv
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, x
), UNSPEC_DTPREL
);
1017 eqv
= gen_rtx_CONST (Pmode
, eqv
);
1019 if (alpha_tls_size
== 64)
1021 dest
= gen_reg_rtx (Pmode
);
1022 emit_insn (gen_rtx_SET (VOIDmode
, dest
, eqv
));
1023 emit_insn (gen_adddi3 (dest
, dest
, scratch
));
1026 if (alpha_tls_size
== 32)
1028 insn
= gen_rtx_HIGH (Pmode
, eqv
);
1029 insn
= gen_rtx_PLUS (Pmode
, scratch
, insn
);
1030 scratch
= gen_reg_rtx (Pmode
);
1031 emit_insn (gen_rtx_SET (VOIDmode
, scratch
, insn
));
1033 return gen_rtx_LO_SUM (Pmode
, scratch
, eqv
);
1035 case TLS_MODEL_INITIAL_EXEC
:
1036 eqv
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, x
), UNSPEC_TPREL
);
1037 eqv
= gen_rtx_CONST (Pmode
, eqv
);
1038 tp
= gen_reg_rtx (Pmode
);
1039 scratch
= gen_reg_rtx (Pmode
);
1040 dest
= gen_reg_rtx (Pmode
);
1042 emit_insn (gen_load_tp (tp
));
1043 emit_insn (gen_rtx_SET (VOIDmode
, scratch
, eqv
));
1044 emit_insn (gen_adddi3 (dest
, tp
, scratch
));
1047 case TLS_MODEL_LOCAL_EXEC
:
1048 eqv
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, x
), UNSPEC_TPREL
);
1049 eqv
= gen_rtx_CONST (Pmode
, eqv
);
1050 tp
= gen_reg_rtx (Pmode
);
1052 emit_insn (gen_load_tp (tp
));
1053 if (alpha_tls_size
== 32)
1055 insn
= gen_rtx_HIGH (Pmode
, eqv
);
1056 insn
= gen_rtx_PLUS (Pmode
, tp
, insn
);
1057 tp
= gen_reg_rtx (Pmode
);
1058 emit_insn (gen_rtx_SET (VOIDmode
, tp
, insn
));
1060 return gen_rtx_LO_SUM (Pmode
, tp
, eqv
);
1066 if (local_symbolic_operand (x
, Pmode
))
1068 if (small_symbolic_operand (x
, Pmode
))
1072 if (!no_new_pseudos
)
1073 scratch
= gen_reg_rtx (Pmode
);
1074 emit_insn (gen_rtx_SET (VOIDmode
, scratch
,
1075 gen_rtx_HIGH (Pmode
, x
)));
1076 return gen_rtx_LO_SUM (Pmode
, scratch
, x
);
1085 HOST_WIDE_INT low
, high
;
1087 low
= ((addend
& 0xffff) ^ 0x8000) - 0x8000;
1089 high
= ((addend
& 0xffffffff) ^ 0x80000000) - 0x80000000;
1093 x
= expand_simple_binop (Pmode
, PLUS
, x
, GEN_INT (addend
),
1094 (no_new_pseudos
? scratch
: NULL_RTX
),
1095 1, OPTAB_LIB_WIDEN
);
1097 x
= expand_simple_binop (Pmode
, PLUS
, x
, GEN_INT (high
),
1098 (no_new_pseudos
? scratch
: NULL_RTX
),
1099 1, OPTAB_LIB_WIDEN
);
1101 return plus_constant (x
, low
);
1105 /* Primarily this is required for TLS symbols, but given that our move
1106 patterns *ought* to be able to handle any symbol at any time, we
1107 should never be spilling symbolic operands to the constant pool, ever. */
1110 alpha_cannot_force_const_mem (rtx x
)
1112 enum rtx_code code
= GET_CODE (x
);
1113 return code
== SYMBOL_REF
|| code
== LABEL_REF
|| code
== CONST
;
1116 /* We do not allow indirect calls to be optimized into sibling calls, nor
1117 can we allow a call to a function with a different GP to be optimized
1121 alpha_function_ok_for_sibcall (tree decl
, tree exp ATTRIBUTE_UNUSED
)
1123 /* Can't do indirect tail calls, since we don't know if the target
1124 uses the same GP. */
1128 /* Otherwise, we can make a tail call if the target function shares
1130 return decl_has_samegp (decl
);
1134 some_small_symbolic_operand_int (rtx
*px
, void *data ATTRIBUTE_UNUSED
)
1138 /* Don't re-split. */
1139 if (GET_CODE (x
) == LO_SUM
)
1142 return small_symbolic_operand (x
, Pmode
) != 0;
1146 split_small_symbolic_operand_1 (rtx
*px
, void *data ATTRIBUTE_UNUSED
)
1150 /* Don't re-split. */
1151 if (GET_CODE (x
) == LO_SUM
)
1154 if (small_symbolic_operand (x
, Pmode
))
1156 x
= gen_rtx_LO_SUM (Pmode
, pic_offset_table_rtx
, x
);
1165 split_small_symbolic_operand (rtx x
)
1168 for_each_rtx (&x
, split_small_symbolic_operand_1
, NULL
);
1172 /* Indicate that INSN cannot be duplicated. This is true for any insn
1173 that we've marked with gpdisp relocs, since those have to stay in
1174 1-1 correspondence with one another.
1176 Technically we could copy them if we could set up a mapping from one
1177 sequence number to another, across the set of insns to be duplicated.
1178 This seems overly complicated and error-prone since interblock motion
1179 from sched-ebb could move one of the pair of insns to a different block.
1181 Also cannot allow jsr insns to be duplicated. If they throw exceptions,
1182 then they'll be in a different block from their ldgp. Which could lead
1183 the bb reorder code to think that it would be ok to copy just the block
1184 containing the call and branch to the block containing the ldgp. */
1187 alpha_cannot_copy_insn_p (rtx insn
)
1189 if (!reload_completed
|| !TARGET_EXPLICIT_RELOCS
)
1191 if (recog_memoized (insn
) >= 0)
1192 return get_attr_cannot_copy (insn
);
1198 /* Try a machine-dependent way of reloading an illegitimate address
1199 operand. If we find one, push the reload and return the new rtx. */
1202 alpha_legitimize_reload_address (rtx x
,
1203 enum machine_mode mode ATTRIBUTE_UNUSED
,
1204 int opnum
, int type
,
1205 int ind_levels ATTRIBUTE_UNUSED
)
1207 /* We must recognize output that we have already generated ourselves. */
1208 if (GET_CODE (x
) == PLUS
1209 && GET_CODE (XEXP (x
, 0)) == PLUS
1210 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
1211 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
1212 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
1214 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
1215 BASE_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
1220 /* We wish to handle large displacements off a base register by
1221 splitting the addend across an ldah and the mem insn. This
1222 cuts number of extra insns needed from 3 to 1. */
1223 if (GET_CODE (x
) == PLUS
1224 && GET_CODE (XEXP (x
, 0)) == REG
1225 && REGNO (XEXP (x
, 0)) < FIRST_PSEUDO_REGISTER
1226 && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x
, 0)))
1227 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
1229 HOST_WIDE_INT val
= INTVAL (XEXP (x
, 1));
1230 HOST_WIDE_INT low
= ((val
& 0xffff) ^ 0x8000) - 0x8000;
1232 = (((val
- low
) & 0xffffffff) ^ 0x80000000) - 0x80000000;
1234 /* Check for 32-bit overflow. */
1235 if (high
+ low
!= val
)
1238 /* Reload the high part into a base reg; leave the low part
1239 in the mem directly. */
1240 x
= gen_rtx_PLUS (GET_MODE (x
),
1241 gen_rtx_PLUS (GET_MODE (x
), XEXP (x
, 0),
1245 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
1246 BASE_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
1254 /* Compute a (partial) cost for rtx X. Return true if the complete
1255 cost has been computed, and false if subexpressions should be
1256 scanned. In either case, *TOTAL contains the cost result. */
1259 alpha_rtx_costs (rtx x
, int code
, int outer_code
, int *total
)
1261 enum machine_mode mode
= GET_MODE (x
);
1262 bool float_mode_p
= FLOAT_MODE_P (mode
);
1263 const struct alpha_rtx_cost_data
*cost_data
;
1266 cost_data
= &alpha_rtx_cost_size
;
1268 cost_data
= &alpha_rtx_cost_data
[alpha_tune
];
1273 /* If this is an 8-bit constant, return zero since it can be used
1274 nearly anywhere with no cost. If it is a valid operand for an
1275 ADD or AND, likewise return 0 if we know it will be used in that
1276 context. Otherwise, return 2 since it might be used there later.
1277 All other constants take at least two insns. */
1278 if (INTVAL (x
) >= 0 && INTVAL (x
) < 256)
1286 if (x
== CONST0_RTX (mode
))
1288 else if ((outer_code
== PLUS
&& add_operand (x
, VOIDmode
))
1289 || (outer_code
== AND
&& and_operand (x
, VOIDmode
)))
1291 else if (add_operand (x
, VOIDmode
) || and_operand (x
, VOIDmode
))
1294 *total
= COSTS_N_INSNS (2);
1300 if (TARGET_EXPLICIT_RELOCS
&& small_symbolic_operand (x
, VOIDmode
))
1301 *total
= COSTS_N_INSNS (outer_code
!= MEM
);
1302 else if (TARGET_EXPLICIT_RELOCS
&& local_symbolic_operand (x
, VOIDmode
))
1303 *total
= COSTS_N_INSNS (1 + (outer_code
!= MEM
));
1304 else if (tls_symbolic_operand_type (x
))
1305 /* Estimate of cost for call_pal rduniq. */
1306 /* ??? How many insns do we emit here? More than one... */
1307 *total
= COSTS_N_INSNS (15);
1309 /* Otherwise we do a load from the GOT. */
1310 *total
= COSTS_N_INSNS (optimize_size
? 1 : alpha_memory_latency
);
1314 /* This is effectively an add_operand. */
1321 *total
= cost_data
->fp_add
;
1322 else if (GET_CODE (XEXP (x
, 0)) == MULT
1323 && const48_operand (XEXP (XEXP (x
, 0), 1), VOIDmode
))
1325 *total
= (rtx_cost (XEXP (XEXP (x
, 0), 0), outer_code
)
1326 + rtx_cost (XEXP (x
, 1), outer_code
) + COSTS_N_INSNS (1));
1333 *total
= cost_data
->fp_mult
;
1334 else if (mode
== DImode
)
1335 *total
= cost_data
->int_mult_di
;
1337 *total
= cost_data
->int_mult_si
;
1341 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
1342 && INTVAL (XEXP (x
, 1)) <= 3)
1344 *total
= COSTS_N_INSNS (1);
1351 *total
= cost_data
->int_shift
;
1356 *total
= cost_data
->fp_add
;
1358 *total
= cost_data
->int_cmov
;
1366 *total
= cost_data
->int_div
;
1367 else if (mode
== SFmode
)
1368 *total
= cost_data
->fp_div_sf
;
1370 *total
= cost_data
->fp_div_df
;
1374 *total
= COSTS_N_INSNS (optimize_size
? 1 : alpha_memory_latency
);
1380 *total
= COSTS_N_INSNS (1);
1388 *total
= COSTS_N_INSNS (1) + cost_data
->int_cmov
;
1394 case UNSIGNED_FLOAT
:
1397 case FLOAT_TRUNCATE
:
1398 *total
= cost_data
->fp_add
;
1402 if (GET_CODE (XEXP (x
, 0)) == MEM
)
1405 *total
= cost_data
->fp_add
;
1413 /* REF is an alignable memory location. Place an aligned SImode
1414 reference into *PALIGNED_MEM and the number of bits to shift into
1415 *PBITNUM. SCRATCH is a free register for use in reloading out
1416 of range stack slots. */
1419 get_aligned_mem (rtx ref
, rtx
*paligned_mem
, rtx
*pbitnum
)
1422 HOST_WIDE_INT disp
, offset
;
1424 gcc_assert (GET_CODE (ref
) == MEM
);
1426 if (reload_in_progress
1427 && ! memory_address_p (GET_MODE (ref
), XEXP (ref
, 0)))
1429 base
= find_replacement (&XEXP (ref
, 0));
1430 gcc_assert (memory_address_p (GET_MODE (ref
), base
));
1433 base
= XEXP (ref
, 0);
1435 if (GET_CODE (base
) == PLUS
)
1436 disp
= INTVAL (XEXP (base
, 1)), base
= XEXP (base
, 0);
1440 /* Find the byte offset within an aligned word. If the memory itself is
1441 claimed to be aligned, believe it. Otherwise, aligned_memory_operand
1442 will have examined the base register and determined it is aligned, and
1443 thus displacements from it are naturally alignable. */
1444 if (MEM_ALIGN (ref
) >= 32)
1449 /* Access the entire aligned word. */
1450 *paligned_mem
= widen_memory_access (ref
, SImode
, -offset
);
1452 /* Convert the byte offset within the word to a bit offset. */
1453 if (WORDS_BIG_ENDIAN
)
1454 offset
= 32 - (GET_MODE_BITSIZE (GET_MODE (ref
)) + offset
* 8);
1457 *pbitnum
= GEN_INT (offset
);
1460 /* Similar, but just get the address. Handle the two reload cases.
1461 Add EXTRA_OFFSET to the address we return. */
1464 get_unaligned_address (rtx ref
, int extra_offset
)
1467 HOST_WIDE_INT offset
= 0;
1469 gcc_assert (GET_CODE (ref
) == MEM
);
1471 if (reload_in_progress
1472 && ! memory_address_p (GET_MODE (ref
), XEXP (ref
, 0)))
1474 base
= find_replacement (&XEXP (ref
, 0));
1476 gcc_assert (memory_address_p (GET_MODE (ref
), base
));
1479 base
= XEXP (ref
, 0);
1481 if (GET_CODE (base
) == PLUS
)
1482 offset
+= INTVAL (XEXP (base
, 1)), base
= XEXP (base
, 0);
1484 return plus_constant (base
, offset
+ extra_offset
);
1487 /* On the Alpha, all (non-symbolic) constants except zero go into
1488 a floating-point register via memory. Note that we cannot
1489 return anything that is not a subset of CLASS, and that some
1490 symbolic constants cannot be dropped to memory. */
1493 alpha_preferred_reload_class(rtx x
, enum reg_class
class)
1495 /* Zero is present in any register class. */
1496 if (x
== CONST0_RTX (GET_MODE (x
)))
1499 /* These sorts of constants we can easily drop to memory. */
1500 if (GET_CODE (x
) == CONST_INT
1501 || GET_CODE (x
) == CONST_DOUBLE
1502 || GET_CODE (x
) == CONST_VECTOR
)
1504 if (class == FLOAT_REGS
)
1506 if (class == ALL_REGS
)
1507 return GENERAL_REGS
;
1511 /* All other kinds of constants should not (and in the case of HIGH
1512 cannot) be dropped to memory -- instead we use a GENERAL_REGS
1513 secondary reload. */
1515 return (class == ALL_REGS
? GENERAL_REGS
: class);
1520 /* Loading and storing HImode or QImode values to and from memory
1521 usually requires a scratch register. The exceptions are loading
1522 QImode and HImode from an aligned address to a general register
1523 unless byte instructions are permitted.
1525 We also cannot load an unaligned address or a paradoxical SUBREG
1526 into an FP register.
1528 We also cannot do integral arithmetic into FP regs, as might result
1529 from register elimination into a DImode fp register. */
1532 alpha_secondary_reload_class (enum reg_class
class, enum machine_mode mode
,
1535 if ((mode
== QImode
|| mode
== HImode
) && ! TARGET_BWX
)
1537 if (GET_CODE (x
) == MEM
1538 || (GET_CODE (x
) == REG
&& REGNO (x
) >= FIRST_PSEUDO_REGISTER
)
1539 || (GET_CODE (x
) == SUBREG
1540 && (GET_CODE (SUBREG_REG (x
)) == MEM
1541 || (GET_CODE (SUBREG_REG (x
)) == REG
1542 && REGNO (SUBREG_REG (x
)) >= FIRST_PSEUDO_REGISTER
))))
1544 if (!in
|| !aligned_memory_operand(x
, mode
))
1545 return GENERAL_REGS
;
1549 if (class == FLOAT_REGS
)
1551 if (GET_CODE (x
) == MEM
&& GET_CODE (XEXP (x
, 0)) == AND
)
1552 return GENERAL_REGS
;
1554 if (GET_CODE (x
) == SUBREG
1555 && (GET_MODE_SIZE (GET_MODE (x
))
1556 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
)))))
1557 return GENERAL_REGS
;
1559 if (in
&& INTEGRAL_MODE_P (mode
)
1560 && ! (memory_operand (x
, mode
) || x
== const0_rtx
))
1561 return GENERAL_REGS
;
1567 /* Subfunction of the following function. Update the flags of any MEM
1568 found in part of X. */
1571 alpha_set_memflags_1 (rtx
*xp
, void *data
)
1573 rtx x
= *xp
, orig
= (rtx
) data
;
1575 if (GET_CODE (x
) != MEM
)
1578 MEM_VOLATILE_P (x
) = MEM_VOLATILE_P (orig
);
1579 MEM_IN_STRUCT_P (x
) = MEM_IN_STRUCT_P (orig
);
1580 MEM_SCALAR_P (x
) = MEM_SCALAR_P (orig
);
1581 MEM_NOTRAP_P (x
) = MEM_NOTRAP_P (orig
);
1582 MEM_READONLY_P (x
) = MEM_READONLY_P (orig
);
1584 /* Sadly, we cannot use alias sets because the extra aliasing
1585 produced by the AND interferes. Given that two-byte quantities
1586 are the only thing we would be able to differentiate anyway,
1587 there does not seem to be any point in convoluting the early
1588 out of the alias check. */
1593 /* Given INSN, which is an INSN list or the PATTERN of a single insn
1594 generated to perform a memory operation, look for any MEMs in either
1595 a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and
1596 volatile flags from REF into each of the MEMs found. If REF is not
1597 a MEM, don't do anything. */
1600 alpha_set_memflags (rtx insn
, rtx ref
)
1604 if (GET_CODE (ref
) != MEM
)
1607 /* This is only called from alpha.md, after having had something
1608 generated from one of the insn patterns. So if everything is
1609 zero, the pattern is already up-to-date. */
1610 if (!MEM_VOLATILE_P (ref
)
1611 && !MEM_IN_STRUCT_P (ref
)
1612 && !MEM_SCALAR_P (ref
)
1613 && !MEM_NOTRAP_P (ref
)
1614 && !MEM_READONLY_P (ref
))
1618 base_ptr
= &PATTERN (insn
);
1621 for_each_rtx (base_ptr
, alpha_set_memflags_1
, (void *) ref
);
1624 static rtx
alpha_emit_set_const (rtx
, enum machine_mode
, HOST_WIDE_INT
,
1627 /* Internal routine for alpha_emit_set_const to check for N or below insns.
1628 If NO_OUTPUT is true, then we only check to see if N insns are possible,
1629 and return pc_rtx if successful. */
1632 alpha_emit_set_const_1 (rtx target
, enum machine_mode mode
,
1633 HOST_WIDE_INT c
, int n
, bool no_output
)
1637 /* Use a pseudo if highly optimizing and still generating RTL. */
1639 = (flag_expensive_optimizations
&& !no_new_pseudos
? 0 : target
);
1642 /* If this is a sign-extended 32-bit constant, we can do this in at most
1643 three insns, so do it if we have enough insns left. We always have
1644 a sign-extended 32-bit constant when compiling on a narrow machine. */
1646 if (HOST_BITS_PER_WIDE_INT
!= 64
1647 || c
>> 31 == -1 || c
>> 31 == 0)
1649 HOST_WIDE_INT low
= ((c
& 0xffff) ^ 0x8000) - 0x8000;
1650 HOST_WIDE_INT tmp1
= c
- low
;
1651 HOST_WIDE_INT high
= (((tmp1
>> 16) & 0xffff) ^ 0x8000) - 0x8000;
1652 HOST_WIDE_INT extra
= 0;
1654 /* If HIGH will be interpreted as negative but the constant is
1655 positive, we must adjust it to do two ldha insns. */
1657 if ((high
& 0x8000) != 0 && c
>= 0)
1661 high
= ((tmp1
>> 16) & 0xffff) - 2 * ((tmp1
>> 16) & 0x8000);
1664 if (c
== low
|| (low
== 0 && extra
== 0))
1666 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
1667 but that meant that we can't handle INT_MIN on 32-bit machines
1668 (like NT/Alpha), because we recurse indefinitely through
1669 emit_move_insn to gen_movdi. So instead, since we know exactly
1670 what we want, create it explicitly. */
1675 target
= gen_reg_rtx (mode
);
1676 emit_insn (gen_rtx_SET (VOIDmode
, target
, GEN_INT (c
)));
1679 else if (n
>= 2 + (extra
!= 0))
1685 emit_insn (gen_rtx_SET (VOIDmode
, target
, GEN_INT (high
<< 16)));
1689 temp
= copy_to_suggested_reg (GEN_INT (high
<< 16),
1692 /* As of 2002-02-23, addsi3 is only available when not optimizing.
1693 This means that if we go through expand_binop, we'll try to
1694 generate extensions, etc, which will require new pseudos, which
1695 will fail during some split phases. The SImode add patterns
1696 still exist, but are not named. So build the insns by hand. */
1701 subtarget
= gen_reg_rtx (mode
);
1702 insn
= gen_rtx_PLUS (mode
, temp
, GEN_INT (extra
<< 16));
1703 insn
= gen_rtx_SET (VOIDmode
, subtarget
, insn
);
1709 target
= gen_reg_rtx (mode
);
1710 insn
= gen_rtx_PLUS (mode
, temp
, GEN_INT (low
));
1711 insn
= gen_rtx_SET (VOIDmode
, target
, insn
);
1717 /* If we couldn't do it that way, try some other methods. But if we have
1718 no instructions left, don't bother. Likewise, if this is SImode and
1719 we can't make pseudos, we can't do anything since the expand_binop
1720 and expand_unop calls will widen and try to make pseudos. */
1722 if (n
== 1 || (mode
== SImode
&& no_new_pseudos
))
1725 /* Next, see if we can load a related constant and then shift and possibly
1726 negate it to get the constant we want. Try this once each increasing
1727 numbers of insns. */
1729 for (i
= 1; i
< n
; i
++)
1731 /* First, see if minus some low bits, we've an easy load of
1734 new = ((c
& 0xffff) ^ 0x8000) - 0x8000;
1737 temp
= alpha_emit_set_const (subtarget
, mode
, c
- new, i
, no_output
);
1742 return expand_binop (mode
, add_optab
, temp
, GEN_INT (new),
1743 target
, 0, OPTAB_WIDEN
);
1747 /* Next try complementing. */
1748 temp
= alpha_emit_set_const (subtarget
, mode
, ~c
, i
, no_output
);
1753 return expand_unop (mode
, one_cmpl_optab
, temp
, target
, 0);
1756 /* Next try to form a constant and do a left shift. We can do this
1757 if some low-order bits are zero; the exact_log2 call below tells
1758 us that information. The bits we are shifting out could be any
1759 value, but here we'll just try the 0- and sign-extended forms of
1760 the constant. To try to increase the chance of having the same
1761 constant in more than one insn, start at the highest number of
1762 bits to shift, but try all possibilities in case a ZAPNOT will
1765 bits
= exact_log2 (c
& -c
);
1767 for (; bits
> 0; bits
--)
1770 temp
= alpha_emit_set_const (subtarget
, mode
, new, i
, no_output
);
1773 new = (unsigned HOST_WIDE_INT
)c
>> bits
;
1774 temp
= alpha_emit_set_const (subtarget
, mode
, new,
1781 return expand_binop (mode
, ashl_optab
, temp
, GEN_INT (bits
),
1782 target
, 0, OPTAB_WIDEN
);
1786 /* Now try high-order zero bits. Here we try the shifted-in bits as
1787 all zero and all ones. Be careful to avoid shifting outside the
1788 mode and to avoid shifting outside the host wide int size. */
1789 /* On narrow hosts, don't shift a 1 into the high bit, since we'll
1790 confuse the recursive call and set all of the high 32 bits. */
1792 bits
= (MIN (HOST_BITS_PER_WIDE_INT
, GET_MODE_SIZE (mode
) * 8)
1793 - floor_log2 (c
) - 1 - (HOST_BITS_PER_WIDE_INT
< 64));
1795 for (; bits
> 0; bits
--)
1798 temp
= alpha_emit_set_const (subtarget
, mode
, new, i
, no_output
);
1801 new = (c
<< bits
) | (((HOST_WIDE_INT
) 1 << bits
) - 1);
1802 temp
= alpha_emit_set_const (subtarget
, mode
, new,
1809 return expand_binop (mode
, lshr_optab
, temp
, GEN_INT (bits
),
1810 target
, 1, OPTAB_WIDEN
);
1814 /* Now try high-order 1 bits. We get that with a sign-extension.
1815 But one bit isn't enough here. Be careful to avoid shifting outside
1816 the mode and to avoid shifting outside the host wide int size. */
1818 bits
= (MIN (HOST_BITS_PER_WIDE_INT
, GET_MODE_SIZE (mode
) * 8)
1819 - floor_log2 (~ c
) - 2);
1821 for (; bits
> 0; bits
--)
1824 temp
= alpha_emit_set_const (subtarget
, mode
, new, i
, no_output
);
1827 new = (c
<< bits
) | (((HOST_WIDE_INT
) 1 << bits
) - 1);
1828 temp
= alpha_emit_set_const (subtarget
, mode
, new,
1835 return expand_binop (mode
, ashr_optab
, temp
, GEN_INT (bits
),
1836 target
, 0, OPTAB_WIDEN
);
1841 #if HOST_BITS_PER_WIDE_INT == 64
1842 /* Finally, see if can load a value into the target that is the same as the
1843 constant except that all bytes that are 0 are changed to be 0xff. If we
1844 can, then we can do a ZAPNOT to obtain the desired constant. */
1847 for (i
= 0; i
< 64; i
+= 8)
1848 if ((new & ((HOST_WIDE_INT
) 0xff << i
)) == 0)
1849 new |= (HOST_WIDE_INT
) 0xff << i
;
1851 /* We are only called for SImode and DImode. If this is SImode, ensure that
1852 we are sign extended to a full word. */
1855 new = ((new & 0xffffffff) ^ 0x80000000) - 0x80000000;
1859 temp
= alpha_emit_set_const (subtarget
, mode
, new, n
- 1, no_output
);
1864 return expand_binop (mode
, and_optab
, temp
, GEN_INT (c
| ~ new),
1865 target
, 0, OPTAB_WIDEN
);
1873 /* Try to output insns to set TARGET equal to the constant C if it can be
1874 done in less than N insns. Do all computations in MODE. Returns the place
1875 where the output has been placed if it can be done and the insns have been
1876 emitted. If it would take more than N insns, zero is returned and no
1877 insns and emitted. */
1880 alpha_emit_set_const (rtx target
, enum machine_mode mode
,
1881 HOST_WIDE_INT c
, int n
, bool no_output
)
1883 enum machine_mode orig_mode
= mode
;
1884 rtx orig_target
= target
;
1888 /* If we can't make any pseudos, TARGET is an SImode hard register, we
1889 can't load this constant in one insn, do this in DImode. */
1890 if (no_new_pseudos
&& mode
== SImode
1891 && GET_CODE (target
) == REG
&& REGNO (target
) < FIRST_PSEUDO_REGISTER
)
1893 result
= alpha_emit_set_const_1 (target
, mode
, c
, 1, no_output
);
1897 target
= no_output
? NULL
: gen_lowpart (DImode
, target
);
1900 else if (mode
== V8QImode
|| mode
== V4HImode
|| mode
== V2SImode
)
1902 target
= no_output
? NULL
: gen_lowpart (DImode
, target
);
1906 /* Try 1 insn, then 2, then up to N. */
1907 for (i
= 1; i
<= n
; i
++)
1909 result
= alpha_emit_set_const_1 (target
, mode
, c
, i
, no_output
);
1917 insn
= get_last_insn ();
1918 set
= single_set (insn
);
1919 if (! CONSTANT_P (SET_SRC (set
)))
1920 set_unique_reg_note (get_last_insn (), REG_EQUAL
, GEN_INT (c
));
1925 /* Allow for the case where we changed the mode of TARGET. */
1928 if (result
== target
)
1929 result
= orig_target
;
1930 else if (mode
!= orig_mode
)
1931 result
= gen_lowpart (orig_mode
, result
);
1937 /* Having failed to find a 3 insn sequence in alpha_emit_set_const,
1938 fall back to a straight forward decomposition. We do this to avoid
1939 exponential run times encountered when looking for longer sequences
1940 with alpha_emit_set_const. */
1943 alpha_emit_set_long_const (rtx target
, HOST_WIDE_INT c1
, HOST_WIDE_INT c2
)
1945 HOST_WIDE_INT d1
, d2
, d3
, d4
;
1947 /* Decompose the entire word */
1948 #if HOST_BITS_PER_WIDE_INT >= 64
1949 gcc_assert (c2
== -(c1
< 0));
1950 d1
= ((c1
& 0xffff) ^ 0x8000) - 0x8000;
1952 d2
= ((c1
& 0xffffffff) ^ 0x80000000) - 0x80000000;
1953 c1
= (c1
- d2
) >> 32;
1954 d3
= ((c1
& 0xffff) ^ 0x8000) - 0x8000;
1956 d4
= ((c1
& 0xffffffff) ^ 0x80000000) - 0x80000000;
1957 gcc_assert (c1
== d4
);
1959 d1
= ((c1
& 0xffff) ^ 0x8000) - 0x8000;
1961 d2
= ((c1
& 0xffffffff) ^ 0x80000000) - 0x80000000;
1962 gcc_assert (c1
== d2
);
1964 d3
= ((c2
& 0xffff) ^ 0x8000) - 0x8000;
1966 d4
= ((c2
& 0xffffffff) ^ 0x80000000) - 0x80000000;
1967 gcc_assert (c2
== d4
);
1970 /* Construct the high word */
1973 emit_move_insn (target
, GEN_INT (d4
));
1975 emit_move_insn (target
, gen_rtx_PLUS (DImode
, target
, GEN_INT (d3
)));
1978 emit_move_insn (target
, GEN_INT (d3
));
1980 /* Shift it into place */
1981 emit_move_insn (target
, gen_rtx_ASHIFT (DImode
, target
, GEN_INT (32)));
1983 /* Add in the low bits. */
1985 emit_move_insn (target
, gen_rtx_PLUS (DImode
, target
, GEN_INT (d2
)));
1987 emit_move_insn (target
, gen_rtx_PLUS (DImode
, target
, GEN_INT (d1
)));
1992 /* Given an integral CONST_INT, CONST_DOUBLE, or CONST_VECTOR, return
1996 alpha_extract_integer (rtx x
, HOST_WIDE_INT
*p0
, HOST_WIDE_INT
*p1
)
1998 HOST_WIDE_INT i0
, i1
;
2000 if (GET_CODE (x
) == CONST_VECTOR
)
2001 x
= simplify_subreg (DImode
, x
, GET_MODE (x
), 0);
2004 if (GET_CODE (x
) == CONST_INT
)
2009 else if (HOST_BITS_PER_WIDE_INT
>= 64)
2011 i0
= CONST_DOUBLE_LOW (x
);
2016 i0
= CONST_DOUBLE_LOW (x
);
2017 i1
= CONST_DOUBLE_HIGH (x
);
2024 /* Implement LEGITIMATE_CONSTANT_P. This is all constants for which we
2025 are willing to load the value into a register via a move pattern.
2026 Normally this is all symbolic constants, integral constants that
2027 take three or fewer instructions, and floating-point zero. */
2030 alpha_legitimate_constant_p (rtx x
)
2032 enum machine_mode mode
= GET_MODE (x
);
2033 HOST_WIDE_INT i0
, i1
;
2035 switch (GET_CODE (x
))
2043 /* TLS symbols are never valid. */
2044 return SYMBOL_REF_TLS_MODEL (x
) == 0;
2047 if (x
== CONST0_RTX (mode
))
2049 if (FLOAT_MODE_P (mode
))
2054 if (x
== CONST0_RTX (mode
))
2056 if (GET_MODE_CLASS (mode
) != MODE_VECTOR_INT
)
2058 if (GET_MODE_SIZE (mode
) != 8)
2064 if (TARGET_BUILD_CONSTANTS
)
2066 alpha_extract_integer (x
, &i0
, &i1
);
2067 if (HOST_BITS_PER_WIDE_INT
>= 64 || i1
== (-i0
< 0))
2068 return alpha_emit_set_const_1 (x
, mode
, i0
, 3, true) != NULL
;
2076 /* Operand 1 is known to be a constant, and should require more than one
2077 instruction to load. Emit that multi-part load. */
2080 alpha_split_const_mov (enum machine_mode mode
, rtx
*operands
)
2082 HOST_WIDE_INT i0
, i1
;
2083 rtx temp
= NULL_RTX
;
2085 alpha_extract_integer (operands
[1], &i0
, &i1
);
2087 if (HOST_BITS_PER_WIDE_INT
>= 64 || i1
== -(i0
< 0))
2088 temp
= alpha_emit_set_const (operands
[0], mode
, i0
, 3, false);
2090 if (!temp
&& TARGET_BUILD_CONSTANTS
)
2091 temp
= alpha_emit_set_long_const (operands
[0], i0
, i1
);
2095 if (!rtx_equal_p (operands
[0], temp
))
2096 emit_move_insn (operands
[0], temp
);
2103 /* Expand a move instruction; return true if all work is done.
2104 We don't handle non-bwx subword loads here. */
2107 alpha_expand_mov (enum machine_mode mode
, rtx
*operands
)
2109 /* If the output is not a register, the input must be. */
2110 if (GET_CODE (operands
[0]) == MEM
2111 && ! reg_or_0_operand (operands
[1], mode
))
2112 operands
[1] = force_reg (mode
, operands
[1]);
2114 /* Allow legitimize_address to perform some simplifications. */
2115 if (mode
== Pmode
&& symbolic_operand (operands
[1], mode
))
2119 tmp
= alpha_legitimize_address (operands
[1], operands
[0], mode
);
2122 if (tmp
== operands
[0])
2129 /* Early out for non-constants and valid constants. */
2130 if (! CONSTANT_P (operands
[1]) || input_operand (operands
[1], mode
))
2133 /* Split large integers. */
2134 if (GET_CODE (operands
[1]) == CONST_INT
2135 || GET_CODE (operands
[1]) == CONST_DOUBLE
2136 || GET_CODE (operands
[1]) == CONST_VECTOR
)
2138 if (alpha_split_const_mov (mode
, operands
))
2142 /* Otherwise we've nothing left but to drop the thing to memory. */
2143 operands
[1] = force_const_mem (mode
, operands
[1]);
2144 if (reload_in_progress
)
2146 emit_move_insn (operands
[0], XEXP (operands
[1], 0));
2147 operands
[1] = copy_rtx (operands
[1]);
2148 XEXP (operands
[1], 0) = operands
[0];
2151 operands
[1] = validize_mem (operands
[1]);
2155 /* Expand a non-bwx QImode or HImode move instruction;
2156 return true if all work is done. */
2159 alpha_expand_mov_nobwx (enum machine_mode mode
, rtx
*operands
)
2161 /* If the output is not a register, the input must be. */
2162 if (GET_CODE (operands
[0]) == MEM
)
2163 operands
[1] = force_reg (mode
, operands
[1]);
2165 /* Handle four memory cases, unaligned and aligned for either the input
2166 or the output. The only case where we can be called during reload is
2167 for aligned loads; all other cases require temporaries. */
2169 if (GET_CODE (operands
[1]) == MEM
2170 || (GET_CODE (operands
[1]) == SUBREG
2171 && GET_CODE (SUBREG_REG (operands
[1])) == MEM
)
2172 || (reload_in_progress
&& GET_CODE (operands
[1]) == REG
2173 && REGNO (operands
[1]) >= FIRST_PSEUDO_REGISTER
)
2174 || (reload_in_progress
&& GET_CODE (operands
[1]) == SUBREG
2175 && GET_CODE (SUBREG_REG (operands
[1])) == REG
2176 && REGNO (SUBREG_REG (operands
[1])) >= FIRST_PSEUDO_REGISTER
))
2178 if (aligned_memory_operand (operands
[1], mode
))
2180 if (reload_in_progress
)
2182 emit_insn ((mode
== QImode
2183 ? gen_reload_inqi_help
2184 : gen_reload_inhi_help
)
2185 (operands
[0], operands
[1],
2186 gen_rtx_REG (SImode
, REGNO (operands
[0]))));
2190 rtx aligned_mem
, bitnum
;
2191 rtx scratch
= gen_reg_rtx (SImode
);
2195 get_aligned_mem (operands
[1], &aligned_mem
, &bitnum
);
2197 subtarget
= operands
[0];
2198 if (GET_CODE (subtarget
) == REG
)
2199 subtarget
= gen_lowpart (DImode
, subtarget
), copyout
= false;
2201 subtarget
= gen_reg_rtx (DImode
), copyout
= true;
2203 emit_insn ((mode
== QImode
2204 ? gen_aligned_loadqi
2205 : gen_aligned_loadhi
)
2206 (subtarget
, aligned_mem
, bitnum
, scratch
));
2209 emit_move_insn (operands
[0], gen_lowpart (mode
, subtarget
));
2214 /* Don't pass these as parameters since that makes the generated
2215 code depend on parameter evaluation order which will cause
2216 bootstrap failures. */
2218 rtx temp1
, temp2
, seq
, subtarget
;
2221 temp1
= gen_reg_rtx (DImode
);
2222 temp2
= gen_reg_rtx (DImode
);
2224 subtarget
= operands
[0];
2225 if (GET_CODE (subtarget
) == REG
)
2226 subtarget
= gen_lowpart (DImode
, subtarget
), copyout
= false;
2228 subtarget
= gen_reg_rtx (DImode
), copyout
= true;
2230 seq
= ((mode
== QImode
2231 ? gen_unaligned_loadqi
2232 : gen_unaligned_loadhi
)
2233 (subtarget
, get_unaligned_address (operands
[1], 0),
2235 alpha_set_memflags (seq
, operands
[1]);
2239 emit_move_insn (operands
[0], gen_lowpart (mode
, subtarget
));
2244 if (GET_CODE (operands
[0]) == MEM
2245 || (GET_CODE (operands
[0]) == SUBREG
2246 && GET_CODE (SUBREG_REG (operands
[0])) == MEM
)
2247 || (reload_in_progress
&& GET_CODE (operands
[0]) == REG
2248 && REGNO (operands
[0]) >= FIRST_PSEUDO_REGISTER
)
2249 || (reload_in_progress
&& GET_CODE (operands
[0]) == SUBREG
2250 && GET_CODE (SUBREG_REG (operands
[0])) == REG
2251 && REGNO (operands
[0]) >= FIRST_PSEUDO_REGISTER
))
2253 if (aligned_memory_operand (operands
[0], mode
))
2255 rtx aligned_mem
, bitnum
;
2256 rtx temp1
= gen_reg_rtx (SImode
);
2257 rtx temp2
= gen_reg_rtx (SImode
);
2259 get_aligned_mem (operands
[0], &aligned_mem
, &bitnum
);
2261 emit_insn (gen_aligned_store (aligned_mem
, operands
[1], bitnum
,
2266 rtx temp1
= gen_reg_rtx (DImode
);
2267 rtx temp2
= gen_reg_rtx (DImode
);
2268 rtx temp3
= gen_reg_rtx (DImode
);
2269 rtx seq
= ((mode
== QImode
2270 ? gen_unaligned_storeqi
2271 : gen_unaligned_storehi
)
2272 (get_unaligned_address (operands
[0], 0),
2273 operands
[1], temp1
, temp2
, temp3
));
2275 alpha_set_memflags (seq
, operands
[0]);
2284 /* Implement the movmisalign patterns. One of the operands is a memory
2285 that is not naturally aligned. Emit instructions to load it. */
2288 alpha_expand_movmisalign (enum machine_mode mode
, rtx
*operands
)
2290 /* Honor misaligned loads, for those we promised to do so. */
2291 if (MEM_P (operands
[1]))
2295 if (register_operand (operands
[0], mode
))
2298 tmp
= gen_reg_rtx (mode
);
2300 alpha_expand_unaligned_load (tmp
, operands
[1], 8, 0, 0);
2301 if (tmp
!= operands
[0])
2302 emit_move_insn (operands
[0], tmp
);
2304 else if (MEM_P (operands
[0]))
2306 if (!reg_or_0_operand (operands
[1], mode
))
2307 operands
[1] = force_reg (mode
, operands
[1]);
2308 alpha_expand_unaligned_store (operands
[0], operands
[1], 8, 0);
2314 /* Generate an unsigned DImode to FP conversion. This is the same code
2315 optabs would emit if we didn't have TFmode patterns.
2317 For SFmode, this is the only construction I've found that can pass
2318 gcc.c-torture/execute/ieee/rbug.c. No scenario that uses DFmode
2319 intermediates will work, because you'll get intermediate rounding
2320 that ruins the end result. Some of this could be fixed by turning
2321 on round-to-positive-infinity, but that requires diddling the fpsr,
2322 which kills performance. I tried turning this around and converting
2323 to a negative number, so that I could turn on /m, but either I did
2324 it wrong or there's something else cause I wound up with the exact
2325 same single-bit error. There is a branch-less form of this same code:
2336 fcmoveq $f10,$f11,$f0
2338 I'm not using it because it's the same number of instructions as
2339 this branch-full form, and it has more serialized long latency
2340 instructions on the critical path.
2342 For DFmode, we can avoid rounding errors by breaking up the word
2343 into two pieces, converting them separately, and adding them back:
2345 LC0: .long 0,0x5f800000
2350 cpyse $f11,$f31,$f10
2351 cpyse $f31,$f11,$f11
2359 This doesn't seem to be a clear-cut win over the optabs form.
2360 It probably all depends on the distribution of numbers being
2361 converted -- in the optabs form, all but high-bit-set has a
2362 much lower minimum execution time. */
2365 alpha_emit_floatuns (rtx operands
[2])
2367 rtx neglab
, donelab
, i0
, i1
, f0
, in
, out
;
2368 enum machine_mode mode
;
2371 in
= force_reg (DImode
, operands
[1]);
2372 mode
= GET_MODE (out
);
2373 neglab
= gen_label_rtx ();
2374 donelab
= gen_label_rtx ();
2375 i0
= gen_reg_rtx (DImode
);
2376 i1
= gen_reg_rtx (DImode
);
2377 f0
= gen_reg_rtx (mode
);
2379 emit_cmp_and_jump_insns (in
, const0_rtx
, LT
, const0_rtx
, DImode
, 0, neglab
);
2381 emit_insn (gen_rtx_SET (VOIDmode
, out
, gen_rtx_FLOAT (mode
, in
)));
2382 emit_jump_insn (gen_jump (donelab
));
2385 emit_label (neglab
);
2387 emit_insn (gen_lshrdi3 (i0
, in
, const1_rtx
));
2388 emit_insn (gen_anddi3 (i1
, in
, const1_rtx
));
2389 emit_insn (gen_iordi3 (i0
, i0
, i1
));
2390 emit_insn (gen_rtx_SET (VOIDmode
, f0
, gen_rtx_FLOAT (mode
, i0
)));
2391 emit_insn (gen_rtx_SET (VOIDmode
, out
, gen_rtx_PLUS (mode
, f0
, f0
)));
2393 emit_label (donelab
);
2396 /* Generate the comparison for a conditional branch. */
2399 alpha_emit_conditional_branch (enum rtx_code code
)
2401 enum rtx_code cmp_code
, branch_code
;
2402 enum machine_mode cmp_mode
, branch_mode
= VOIDmode
;
2403 rtx op0
= alpha_compare
.op0
, op1
= alpha_compare
.op1
;
2406 if (alpha_compare
.fp_p
&& GET_MODE (op0
) == TFmode
)
2408 op0
= alpha_emit_xfloating_compare (&code
, op0
, op1
);
2410 alpha_compare
.fp_p
= 0;
2413 /* The general case: fold the comparison code to the types of compares
2414 that we have, choosing the branch as necessary. */
2417 case EQ
: case LE
: case LT
: case LEU
: case LTU
:
2419 /* We have these compares: */
2420 cmp_code
= code
, branch_code
= NE
;
2425 /* These must be reversed. */
2426 cmp_code
= reverse_condition (code
), branch_code
= EQ
;
2429 case GE
: case GT
: case GEU
: case GTU
:
2430 /* For FP, we swap them, for INT, we reverse them. */
2431 if (alpha_compare
.fp_p
)
2433 cmp_code
= swap_condition (code
);
2435 tem
= op0
, op0
= op1
, op1
= tem
;
2439 cmp_code
= reverse_condition (code
);
2448 if (alpha_compare
.fp_p
)
2451 if (flag_unsafe_math_optimizations
)
2453 /* When we are not as concerned about non-finite values, and we
2454 are comparing against zero, we can branch directly. */
2455 if (op1
== CONST0_RTX (DFmode
))
2456 cmp_code
= UNKNOWN
, branch_code
= code
;
2457 else if (op0
== CONST0_RTX (DFmode
))
2459 /* Undo the swap we probably did just above. */
2460 tem
= op0
, op0
= op1
, op1
= tem
;
2461 branch_code
= swap_condition (cmp_code
);
2467 /* ??? We mark the branch mode to be CCmode to prevent the
2468 compare and branch from being combined, since the compare
2469 insn follows IEEE rules that the branch does not. */
2470 branch_mode
= CCmode
;
2477 /* The following optimizations are only for signed compares. */
2478 if (code
!= LEU
&& code
!= LTU
&& code
!= GEU
&& code
!= GTU
)
2480 /* Whee. Compare and branch against 0 directly. */
2481 if (op1
== const0_rtx
)
2482 cmp_code
= UNKNOWN
, branch_code
= code
;
2484 /* If the constants doesn't fit into an immediate, but can
2485 be generated by lda/ldah, we adjust the argument and
2486 compare against zero, so we can use beq/bne directly. */
2487 /* ??? Don't do this when comparing against symbols, otherwise
2488 we'll reduce (&x == 0x1234) to (&x-0x1234 == 0), which will
2489 be declared false out of hand (at least for non-weak). */
2490 else if (GET_CODE (op1
) == CONST_INT
2491 && (code
== EQ
|| code
== NE
)
2492 && !(symbolic_operand (op0
, VOIDmode
)
2493 || (GET_CODE (op0
) == REG
&& REG_POINTER (op0
))))
2495 rtx n_op1
= GEN_INT (-INTVAL (op1
));
2497 if (! satisfies_constraint_I (op1
)
2498 && (satisfies_constraint_K (n_op1
)
2499 || satisfies_constraint_L (n_op1
)))
2500 cmp_code
= PLUS
, branch_code
= code
, op1
= n_op1
;
2504 if (!reg_or_0_operand (op0
, DImode
))
2505 op0
= force_reg (DImode
, op0
);
2506 if (cmp_code
!= PLUS
&& !reg_or_8bit_operand (op1
, DImode
))
2507 op1
= force_reg (DImode
, op1
);
2510 /* Emit an initial compare instruction, if necessary. */
2512 if (cmp_code
!= UNKNOWN
)
2514 tem
= gen_reg_rtx (cmp_mode
);
2515 emit_move_insn (tem
, gen_rtx_fmt_ee (cmp_code
, cmp_mode
, op0
, op1
));
2518 /* Zero the operands. */
2519 memset (&alpha_compare
, 0, sizeof (alpha_compare
));
2521 /* Return the branch comparison. */
2522 return gen_rtx_fmt_ee (branch_code
, branch_mode
, tem
, CONST0_RTX (cmp_mode
));
2525 /* Certain simplifications can be done to make invalid setcc operations
2526 valid. Return the final comparison, or NULL if we can't work. */
2529 alpha_emit_setcc (enum rtx_code code
)
2531 enum rtx_code cmp_code
;
2532 rtx op0
= alpha_compare
.op0
, op1
= alpha_compare
.op1
;
2533 int fp_p
= alpha_compare
.fp_p
;
2536 /* Zero the operands. */
2537 memset (&alpha_compare
, 0, sizeof (alpha_compare
));
2539 if (fp_p
&& GET_MODE (op0
) == TFmode
)
2541 op0
= alpha_emit_xfloating_compare (&code
, op0
, op1
);
2546 if (fp_p
&& !TARGET_FIX
)
2549 /* The general case: fold the comparison code to the types of compares
2550 that we have, choosing the branch as necessary. */
2555 case EQ
: case LE
: case LT
: case LEU
: case LTU
:
2557 /* We have these compares. */
2559 cmp_code
= code
, code
= NE
;
2563 if (!fp_p
&& op1
== const0_rtx
)
2568 cmp_code
= reverse_condition (code
);
2572 case GE
: case GT
: case GEU
: case GTU
:
2573 /* These normally need swapping, but for integer zero we have
2574 special patterns that recognize swapped operands. */
2575 if (!fp_p
&& op1
== const0_rtx
)
2577 code
= swap_condition (code
);
2579 cmp_code
= code
, code
= NE
;
2580 tmp
= op0
, op0
= op1
, op1
= tmp
;
2589 if (!register_operand (op0
, DImode
))
2590 op0
= force_reg (DImode
, op0
);
2591 if (!reg_or_8bit_operand (op1
, DImode
))
2592 op1
= force_reg (DImode
, op1
);
2595 /* Emit an initial compare instruction, if necessary. */
2596 if (cmp_code
!= UNKNOWN
)
2598 enum machine_mode mode
= fp_p
? DFmode
: DImode
;
2600 tmp
= gen_reg_rtx (mode
);
2601 emit_insn (gen_rtx_SET (VOIDmode
, tmp
,
2602 gen_rtx_fmt_ee (cmp_code
, mode
, op0
, op1
)));
2604 op0
= fp_p
? gen_lowpart (DImode
, tmp
) : tmp
;
2608 /* Return the setcc comparison. */
2609 return gen_rtx_fmt_ee (code
, DImode
, op0
, op1
);
2613 /* Rewrite a comparison against zero CMP of the form
2614 (CODE (cc0) (const_int 0)) so it can be written validly in
2615 a conditional move (if_then_else CMP ...).
2616 If both of the operands that set cc0 are nonzero we must emit
2617 an insn to perform the compare (it can't be done within
2618 the conditional move). */
2621 alpha_emit_conditional_move (rtx cmp
, enum machine_mode mode
)
2623 enum rtx_code code
= GET_CODE (cmp
);
2624 enum rtx_code cmov_code
= NE
;
2625 rtx op0
= alpha_compare
.op0
;
2626 rtx op1
= alpha_compare
.op1
;
2627 int fp_p
= alpha_compare
.fp_p
;
2628 enum machine_mode cmp_mode
2629 = (GET_MODE (op0
) == VOIDmode
? DImode
: GET_MODE (op0
));
2630 enum machine_mode cmp_op_mode
= fp_p
? DFmode
: DImode
;
2631 enum machine_mode cmov_mode
= VOIDmode
;
2632 int local_fast_math
= flag_unsafe_math_optimizations
;
2635 /* Zero the operands. */
2636 memset (&alpha_compare
, 0, sizeof (alpha_compare
));
2638 if (fp_p
!= FLOAT_MODE_P (mode
))
2640 enum rtx_code cmp_code
;
2645 /* If we have fp<->int register move instructions, do a cmov by
2646 performing the comparison in fp registers, and move the
2647 zero/nonzero value to integer registers, where we can then
2648 use a normal cmov, or vice-versa. */
2652 case EQ
: case LE
: case LT
: case LEU
: case LTU
:
2653 /* We have these compares. */
2654 cmp_code
= code
, code
= NE
;
2658 /* This must be reversed. */
2659 cmp_code
= EQ
, code
= EQ
;
2662 case GE
: case GT
: case GEU
: case GTU
:
2663 /* These normally need swapping, but for integer zero we have
2664 special patterns that recognize swapped operands. */
2665 if (!fp_p
&& op1
== const0_rtx
)
2666 cmp_code
= code
, code
= NE
;
2669 cmp_code
= swap_condition (code
);
2671 tem
= op0
, op0
= op1
, op1
= tem
;
2679 tem
= gen_reg_rtx (cmp_op_mode
);
2680 emit_insn (gen_rtx_SET (VOIDmode
, tem
,
2681 gen_rtx_fmt_ee (cmp_code
, cmp_op_mode
,
2684 cmp_mode
= cmp_op_mode
= fp_p
? DImode
: DFmode
;
2685 op0
= gen_lowpart (cmp_op_mode
, tem
);
2686 op1
= CONST0_RTX (cmp_op_mode
);
2688 local_fast_math
= 1;
2691 /* We may be able to use a conditional move directly.
2692 This avoids emitting spurious compares. */
2693 if (signed_comparison_operator (cmp
, VOIDmode
)
2694 && (!fp_p
|| local_fast_math
)
2695 && (op0
== CONST0_RTX (cmp_mode
) || op1
== CONST0_RTX (cmp_mode
)))
2696 return gen_rtx_fmt_ee (code
, VOIDmode
, op0
, op1
);
2698 /* We can't put the comparison inside the conditional move;
2699 emit a compare instruction and put that inside the
2700 conditional move. Make sure we emit only comparisons we have;
2701 swap or reverse as necessary. */
2708 case EQ
: case LE
: case LT
: case LEU
: case LTU
:
2709 /* We have these compares: */
2713 /* This must be reversed. */
2714 code
= reverse_condition (code
);
2718 case GE
: case GT
: case GEU
: case GTU
:
2719 /* These must be swapped. */
2720 if (op1
!= CONST0_RTX (cmp_mode
))
2722 code
= swap_condition (code
);
2723 tem
= op0
, op0
= op1
, op1
= tem
;
2733 if (!reg_or_0_operand (op0
, DImode
))
2734 op0
= force_reg (DImode
, op0
);
2735 if (!reg_or_8bit_operand (op1
, DImode
))
2736 op1
= force_reg (DImode
, op1
);
2739 /* ??? We mark the branch mode to be CCmode to prevent the compare
2740 and cmov from being combined, since the compare insn follows IEEE
2741 rules that the cmov does not. */
2742 if (fp_p
&& !local_fast_math
)
2745 tem
= gen_reg_rtx (cmp_op_mode
);
2746 emit_move_insn (tem
, gen_rtx_fmt_ee (code
, cmp_op_mode
, op0
, op1
));
2747 return gen_rtx_fmt_ee (cmov_code
, cmov_mode
, tem
, CONST0_RTX (cmp_op_mode
));
2750 /* Simplify a conditional move of two constants into a setcc with
2751 arithmetic. This is done with a splitter since combine would
2752 just undo the work if done during code generation. It also catches
2753 cases we wouldn't have before cse. */
2756 alpha_split_conditional_move (enum rtx_code code
, rtx dest
, rtx cond
,
2757 rtx t_rtx
, rtx f_rtx
)
2759 HOST_WIDE_INT t
, f
, diff
;
2760 enum machine_mode mode
;
2761 rtx target
, subtarget
, tmp
;
2763 mode
= GET_MODE (dest
);
2768 if (((code
== NE
|| code
== EQ
) && diff
< 0)
2769 || (code
== GE
|| code
== GT
))
2771 code
= reverse_condition (code
);
2772 diff
= t
, t
= f
, f
= diff
;
2776 subtarget
= target
= dest
;
2779 target
= gen_lowpart (DImode
, dest
);
2780 if (! no_new_pseudos
)
2781 subtarget
= gen_reg_rtx (DImode
);
2785 /* Below, we must be careful to use copy_rtx on target and subtarget
2786 in intermediate insns, as they may be a subreg rtx, which may not
2789 if (f
== 0 && exact_log2 (diff
) > 0
2790 /* On EV6, we've got enough shifters to make non-arithmetic shifts
2791 viable over a longer latency cmove. On EV5, the E0 slot is a
2792 scarce resource, and on EV4 shift has the same latency as a cmove. */
2793 && (diff
<= 8 || alpha_tune
== PROCESSOR_EV6
))
2795 tmp
= gen_rtx_fmt_ee (code
, DImode
, cond
, const0_rtx
);
2796 emit_insn (gen_rtx_SET (VOIDmode
, copy_rtx (subtarget
), tmp
));
2798 tmp
= gen_rtx_ASHIFT (DImode
, copy_rtx (subtarget
),
2799 GEN_INT (exact_log2 (t
)));
2800 emit_insn (gen_rtx_SET (VOIDmode
, target
, tmp
));
2802 else if (f
== 0 && t
== -1)
2804 tmp
= gen_rtx_fmt_ee (code
, DImode
, cond
, const0_rtx
);
2805 emit_insn (gen_rtx_SET (VOIDmode
, copy_rtx (subtarget
), tmp
));
2807 emit_insn (gen_negdi2 (target
, copy_rtx (subtarget
)));
2809 else if (diff
== 1 || diff
== 4 || diff
== 8)
2813 tmp
= gen_rtx_fmt_ee (code
, DImode
, cond
, const0_rtx
);
2814 emit_insn (gen_rtx_SET (VOIDmode
, copy_rtx (subtarget
), tmp
));
2817 emit_insn (gen_adddi3 (target
, copy_rtx (subtarget
), GEN_INT (f
)));
2820 add_op
= GEN_INT (f
);
2821 if (sext_add_operand (add_op
, mode
))
2823 tmp
= gen_rtx_MULT (DImode
, copy_rtx (subtarget
),
2825 tmp
= gen_rtx_PLUS (DImode
, tmp
, add_op
);
2826 emit_insn (gen_rtx_SET (VOIDmode
, target
, tmp
));
2838 /* Look up the function X_floating library function name for the
2841 struct xfloating_op
GTY(())
2843 const enum rtx_code code
;
2844 const char *const GTY((skip
)) osf_func
;
2845 const char *const GTY((skip
)) vms_func
;
2849 static GTY(()) struct xfloating_op xfloating_ops
[] =
2851 { PLUS
, "_OtsAddX", "OTS$ADD_X", 0 },
2852 { MINUS
, "_OtsSubX", "OTS$SUB_X", 0 },
2853 { MULT
, "_OtsMulX", "OTS$MUL_X", 0 },
2854 { DIV
, "_OtsDivX", "OTS$DIV_X", 0 },
2855 { EQ
, "_OtsEqlX", "OTS$EQL_X", 0 },
2856 { NE
, "_OtsNeqX", "OTS$NEQ_X", 0 },
2857 { LT
, "_OtsLssX", "OTS$LSS_X", 0 },
2858 { LE
, "_OtsLeqX", "OTS$LEQ_X", 0 },
2859 { GT
, "_OtsGtrX", "OTS$GTR_X", 0 },
2860 { GE
, "_OtsGeqX", "OTS$GEQ_X", 0 },
2861 { FIX
, "_OtsCvtXQ", "OTS$CVTXQ", 0 },
2862 { FLOAT
, "_OtsCvtQX", "OTS$CVTQX", 0 },
2863 { UNSIGNED_FLOAT
, "_OtsCvtQUX", "OTS$CVTQUX", 0 },
2864 { FLOAT_EXTEND
, "_OtsConvertFloatTX", "OTS$CVT_FLOAT_T_X", 0 },
2865 { FLOAT_TRUNCATE
, "_OtsConvertFloatXT", "OTS$CVT_FLOAT_X_T", 0 }
2868 static GTY(()) struct xfloating_op vax_cvt_ops
[] =
2870 { FLOAT_EXTEND
, "_OtsConvertFloatGX", "OTS$CVT_FLOAT_G_X", 0 },
2871 { FLOAT_TRUNCATE
, "_OtsConvertFloatXG", "OTS$CVT_FLOAT_X_G", 0 }
2875 alpha_lookup_xfloating_lib_func (enum rtx_code code
)
2877 struct xfloating_op
*ops
= xfloating_ops
;
2878 long n
= ARRAY_SIZE (xfloating_ops
);
2881 gcc_assert (TARGET_HAS_XFLOATING_LIBS
);
2883 /* How irritating. Nothing to key off for the main table. */
2884 if (TARGET_FLOAT_VAX
&& (code
== FLOAT_EXTEND
|| code
== FLOAT_TRUNCATE
))
2887 n
= ARRAY_SIZE (vax_cvt_ops
);
2890 for (i
= 0; i
< n
; ++i
, ++ops
)
2891 if (ops
->code
== code
)
2893 rtx func
= ops
->libcall
;
2896 func
= init_one_libfunc (TARGET_ABI_OPEN_VMS
2897 ? ops
->vms_func
: ops
->osf_func
);
2898 ops
->libcall
= func
;
2906 /* Most X_floating operations take the rounding mode as an argument.
2907 Compute that here. */
2910 alpha_compute_xfloating_mode_arg (enum rtx_code code
,
2911 enum alpha_fp_rounding_mode round
)
2917 case ALPHA_FPRM_NORM
:
2920 case ALPHA_FPRM_MINF
:
2923 case ALPHA_FPRM_CHOP
:
2926 case ALPHA_FPRM_DYN
:
2932 /* XXX For reference, round to +inf is mode = 3. */
2935 if (code
== FLOAT_TRUNCATE
&& alpha_fptm
== ALPHA_FPTM_N
)
2941 /* Emit an X_floating library function call.
2943 Note that these functions do not follow normal calling conventions:
2944 TFmode arguments are passed in two integer registers (as opposed to
2945 indirect); TFmode return values appear in R16+R17.
2947 FUNC is the function to call.
2948 TARGET is where the output belongs.
2949 OPERANDS are the inputs.
2950 NOPERANDS is the count of inputs.
2951 EQUIV is the expression equivalent for the function.
2955 alpha_emit_xfloating_libcall (rtx func
, rtx target
, rtx operands
[],
2956 int noperands
, rtx equiv
)
2958 rtx usage
= NULL_RTX
, tmp
, reg
;
2963 for (i
= 0; i
< noperands
; ++i
)
2965 switch (GET_MODE (operands
[i
]))
2968 reg
= gen_rtx_REG (TFmode
, regno
);
2973 reg
= gen_rtx_REG (DFmode
, regno
+ 32);
2978 gcc_assert (GET_CODE (operands
[i
]) == CONST_INT
);
2981 reg
= gen_rtx_REG (DImode
, regno
);
2989 emit_move_insn (reg
, operands
[i
]);
2990 usage
= alloc_EXPR_LIST (0, gen_rtx_USE (VOIDmode
, reg
), usage
);
2993 switch (GET_MODE (target
))
2996 reg
= gen_rtx_REG (TFmode
, 16);
2999 reg
= gen_rtx_REG (DFmode
, 32);
3002 reg
= gen_rtx_REG (DImode
, 0);
3008 tmp
= gen_rtx_MEM (QImode
, func
);
3009 tmp
= emit_call_insn (GEN_CALL_VALUE (reg
, tmp
, const0_rtx
,
3010 const0_rtx
, const0_rtx
));
3011 CALL_INSN_FUNCTION_USAGE (tmp
) = usage
;
3012 CONST_OR_PURE_CALL_P (tmp
) = 1;
3017 emit_libcall_block (tmp
, target
, reg
, equiv
);
3020 /* Emit an X_floating library function call for arithmetic (+,-,*,/). */
3023 alpha_emit_xfloating_arith (enum rtx_code code
, rtx operands
[])
3027 rtx out_operands
[3];
3029 func
= alpha_lookup_xfloating_lib_func (code
);
3030 mode
= alpha_compute_xfloating_mode_arg (code
, alpha_fprm
);
3032 out_operands
[0] = operands
[1];
3033 out_operands
[1] = operands
[2];
3034 out_operands
[2] = GEN_INT (mode
);
3035 alpha_emit_xfloating_libcall (func
, operands
[0], out_operands
, 3,
3036 gen_rtx_fmt_ee (code
, TFmode
, operands
[1],
3040 /* Emit an X_floating library function call for a comparison. */
3043 alpha_emit_xfloating_compare (enum rtx_code
*pcode
, rtx op0
, rtx op1
)
3045 enum rtx_code cmp_code
, res_code
;
3046 rtx func
, out
, operands
[2];
3048 /* X_floating library comparison functions return
3052 Convert the compare against the raw return value. */
3080 func
= alpha_lookup_xfloating_lib_func (cmp_code
);
3084 out
= gen_reg_rtx (DImode
);
3086 /* ??? Strange mode for equiv because what's actually returned
3087 is -1,0,1, not a proper boolean value. */
3088 alpha_emit_xfloating_libcall (func
, out
, operands
, 2,
3089 gen_rtx_fmt_ee (cmp_code
, CCmode
, op0
, op1
));
3094 /* Emit an X_floating library function call for a conversion. */
3097 alpha_emit_xfloating_cvt (enum rtx_code orig_code
, rtx operands
[])
3099 int noperands
= 1, mode
;
3100 rtx out_operands
[2];
3102 enum rtx_code code
= orig_code
;
3104 if (code
== UNSIGNED_FIX
)
3107 func
= alpha_lookup_xfloating_lib_func (code
);
3109 out_operands
[0] = operands
[1];
3114 mode
= alpha_compute_xfloating_mode_arg (code
, ALPHA_FPRM_CHOP
);
3115 out_operands
[1] = GEN_INT (mode
);
3118 case FLOAT_TRUNCATE
:
3119 mode
= alpha_compute_xfloating_mode_arg (code
, alpha_fprm
);
3120 out_operands
[1] = GEN_INT (mode
);
3127 alpha_emit_xfloating_libcall (func
, operands
[0], out_operands
, noperands
,
3128 gen_rtx_fmt_e (orig_code
,
3129 GET_MODE (operands
[0]),
3133 /* Split a TImode or TFmode move from OP[1] to OP[0] into a pair of
3134 DImode moves from OP[2,3] to OP[0,1]. If FIXUP_OVERLAP is true,
3135 guarantee that the sequence
3138 is valid. Naturally, output operand ordering is little-endian.
3139 This is used by *movtf_internal and *movti_internal. */
3142 alpha_split_tmode_pair (rtx operands
[4], enum machine_mode mode
,
3145 switch (GET_CODE (operands
[1]))
3148 operands
[3] = gen_rtx_REG (DImode
, REGNO (operands
[1]) + 1);
3149 operands
[2] = gen_rtx_REG (DImode
, REGNO (operands
[1]));
3153 operands
[3] = adjust_address (operands
[1], DImode
, 8);
3154 operands
[2] = adjust_address (operands
[1], DImode
, 0);
3159 gcc_assert (operands
[1] == CONST0_RTX (mode
));
3160 operands
[2] = operands
[3] = const0_rtx
;
3167 switch (GET_CODE (operands
[0]))
3170 operands
[1] = gen_rtx_REG (DImode
, REGNO (operands
[0]) + 1);
3171 operands
[0] = gen_rtx_REG (DImode
, REGNO (operands
[0]));
3175 operands
[1] = adjust_address (operands
[0], DImode
, 8);
3176 operands
[0] = adjust_address (operands
[0], DImode
, 0);
3183 if (fixup_overlap
&& reg_overlap_mentioned_p (operands
[0], operands
[3]))
3186 tmp
= operands
[0], operands
[0] = operands
[1], operands
[1] = tmp
;
3187 tmp
= operands
[2], operands
[2] = operands
[3], operands
[3] = tmp
;
3191 /* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
3192 op2 is a register containing the sign bit, operation is the
3193 logical operation to be performed. */
3196 alpha_split_tfmode_frobsign (rtx operands
[3], rtx (*operation
) (rtx
, rtx
, rtx
))
3198 rtx high_bit
= operands
[2];
3202 alpha_split_tmode_pair (operands
, TFmode
, false);
3204 /* Detect three flavors of operand overlap. */
3206 if (rtx_equal_p (operands
[0], operands
[2]))
3208 else if (rtx_equal_p (operands
[1], operands
[2]))
3210 if (rtx_equal_p (operands
[0], high_bit
))
3217 emit_move_insn (operands
[0], operands
[2]);
3219 /* ??? If the destination overlaps both source tf and high_bit, then
3220 assume source tf is dead in its entirety and use the other half
3221 for a scratch register. Otherwise "scratch" is just the proper
3222 destination register. */
3223 scratch
= operands
[move
< 2 ? 1 : 3];
3225 emit_insn ((*operation
) (scratch
, high_bit
, operands
[3]));
3229 emit_move_insn (operands
[0], operands
[2]);
3231 emit_move_insn (operands
[1], scratch
);
3235 /* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
3239 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
3240 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
3241 lda r3,X(r11) lda r3,X+2(r11)
3242 extwl r1,r3,r1 extql r1,r3,r1
3243 extwh r2,r3,r2 extqh r2,r3,r2
3244 or r1.r2.r1 or r1,r2,r1
3247 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
3248 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
3249 lda r3,X(r11) lda r3,X(r11)
3250 extll r1,r3,r1 extll r1,r3,r1
3251 extlh r2,r3,r2 extlh r2,r3,r2
3252 or r1.r2.r1 addl r1,r2,r1
3254 quad: ldq_u r1,X(r11)
3263 alpha_expand_unaligned_load (rtx tgt
, rtx mem
, HOST_WIDE_INT size
,
3264 HOST_WIDE_INT ofs
, int sign
)
3266 rtx meml
, memh
, addr
, extl
, exth
, tmp
, mema
;
3267 enum machine_mode mode
;
3269 if (TARGET_BWX
&& size
== 2)
3271 meml
= adjust_address (mem
, QImode
, ofs
);
3272 memh
= adjust_address (mem
, QImode
, ofs
+1);
3273 if (BYTES_BIG_ENDIAN
)
3274 tmp
= meml
, meml
= memh
, memh
= tmp
;
3275 extl
= gen_reg_rtx (DImode
);
3276 exth
= gen_reg_rtx (DImode
);
3277 emit_insn (gen_zero_extendqidi2 (extl
, meml
));
3278 emit_insn (gen_zero_extendqidi2 (exth
, memh
));
3279 exth
= expand_simple_binop (DImode
, ASHIFT
, exth
, GEN_INT (8),
3280 NULL
, 1, OPTAB_LIB_WIDEN
);
3281 addr
= expand_simple_binop (DImode
, IOR
, extl
, exth
,
3282 NULL
, 1, OPTAB_LIB_WIDEN
);
3284 if (sign
&& GET_MODE (tgt
) != HImode
)
3286 addr
= gen_lowpart (HImode
, addr
);
3287 emit_insn (gen_extend_insn (tgt
, addr
, GET_MODE (tgt
), HImode
, 0));
3291 if (GET_MODE (tgt
) != DImode
)
3292 addr
= gen_lowpart (GET_MODE (tgt
), addr
);
3293 emit_move_insn (tgt
, addr
);
3298 meml
= gen_reg_rtx (DImode
);
3299 memh
= gen_reg_rtx (DImode
);
3300 addr
= gen_reg_rtx (DImode
);
3301 extl
= gen_reg_rtx (DImode
);
3302 exth
= gen_reg_rtx (DImode
);
3304 mema
= XEXP (mem
, 0);
3305 if (GET_CODE (mema
) == LO_SUM
)
3306 mema
= force_reg (Pmode
, mema
);
3308 /* AND addresses cannot be in any alias set, since they may implicitly
3309 alias surrounding code. Ideally we'd have some alias set that
3310 covered all types except those with alignment 8 or higher. */
3312 tmp
= change_address (mem
, DImode
,
3313 gen_rtx_AND (DImode
,
3314 plus_constant (mema
, ofs
),
3316 set_mem_alias_set (tmp
, 0);
3317 emit_move_insn (meml
, tmp
);
3319 tmp
= change_address (mem
, DImode
,
3320 gen_rtx_AND (DImode
,
3321 plus_constant (mema
, ofs
+ size
- 1),
3323 set_mem_alias_set (tmp
, 0);
3324 emit_move_insn (memh
, tmp
);
3326 if (WORDS_BIG_ENDIAN
&& sign
&& (size
== 2 || size
== 4))
3328 emit_move_insn (addr
, plus_constant (mema
, -1));
3330 emit_insn (gen_extqh_be (extl
, meml
, addr
));
3331 emit_insn (gen_extxl_be (exth
, memh
, GEN_INT (64), addr
));
3333 addr
= expand_binop (DImode
, ior_optab
, extl
, exth
, tgt
, 1, OPTAB_WIDEN
);
3334 addr
= expand_binop (DImode
, ashr_optab
, addr
, GEN_INT (64 - size
*8),
3335 addr
, 1, OPTAB_WIDEN
);
3337 else if (sign
&& size
== 2)
3339 emit_move_insn (addr
, plus_constant (mema
, ofs
+2));
3341 emit_insn (gen_extxl_le (extl
, meml
, GEN_INT (64), addr
));
3342 emit_insn (gen_extqh_le (exth
, memh
, addr
));
3344 /* We must use tgt here for the target. Alpha-vms port fails if we use
3345 addr for the target, because addr is marked as a pointer and combine
3346 knows that pointers are always sign-extended 32-bit values. */
3347 addr
= expand_binop (DImode
, ior_optab
, extl
, exth
, tgt
, 1, OPTAB_WIDEN
);
3348 addr
= expand_binop (DImode
, ashr_optab
, addr
, GEN_INT (48),
3349 addr
, 1, OPTAB_WIDEN
);
3353 if (WORDS_BIG_ENDIAN
)
3355 emit_move_insn (addr
, plus_constant (mema
, ofs
+size
-1));
3359 emit_insn (gen_extwh_be (extl
, meml
, addr
));
3364 emit_insn (gen_extlh_be (extl
, meml
, addr
));
3369 emit_insn (gen_extqh_be (extl
, meml
, addr
));
3376 emit_insn (gen_extxl_be (exth
, memh
, GEN_INT (size
*8), addr
));
3380 emit_move_insn (addr
, plus_constant (mema
, ofs
));
3381 emit_insn (gen_extxl_le (extl
, meml
, GEN_INT (size
*8), addr
));
3385 emit_insn (gen_extwh_le (exth
, memh
, addr
));
3390 emit_insn (gen_extlh_le (exth
, memh
, addr
));
3395 emit_insn (gen_extqh_le (exth
, memh
, addr
));
3404 addr
= expand_binop (mode
, ior_optab
, gen_lowpart (mode
, extl
),
3405 gen_lowpart (mode
, exth
), gen_lowpart (mode
, tgt
),
3410 emit_move_insn (tgt
, gen_lowpart (GET_MODE (tgt
), addr
));
3413 /* Similarly, use ins and msk instructions to perform unaligned stores. */
3416 alpha_expand_unaligned_store (rtx dst
, rtx src
,
3417 HOST_WIDE_INT size
, HOST_WIDE_INT ofs
)
3419 rtx dstl
, dsth
, addr
, insl
, insh
, meml
, memh
, dsta
;
3421 if (TARGET_BWX
&& size
== 2)
3423 if (src
!= const0_rtx
)
3425 dstl
= gen_lowpart (QImode
, src
);
3426 dsth
= expand_simple_binop (DImode
, LSHIFTRT
, src
, GEN_INT (8),
3427 NULL
, 1, OPTAB_LIB_WIDEN
);
3428 dsth
= gen_lowpart (QImode
, dsth
);
3431 dstl
= dsth
= const0_rtx
;
3433 meml
= adjust_address (dst
, QImode
, ofs
);
3434 memh
= adjust_address (dst
, QImode
, ofs
+1);
3435 if (BYTES_BIG_ENDIAN
)
3436 addr
= meml
, meml
= memh
, memh
= addr
;
3438 emit_move_insn (meml
, dstl
);
3439 emit_move_insn (memh
, dsth
);
3443 dstl
= gen_reg_rtx (DImode
);
3444 dsth
= gen_reg_rtx (DImode
);
3445 insl
= gen_reg_rtx (DImode
);
3446 insh
= gen_reg_rtx (DImode
);
3448 dsta
= XEXP (dst
, 0);
3449 if (GET_CODE (dsta
) == LO_SUM
)
3450 dsta
= force_reg (Pmode
, dsta
);
3452 /* AND addresses cannot be in any alias set, since they may implicitly
3453 alias surrounding code. Ideally we'd have some alias set that
3454 covered all types except those with alignment 8 or higher. */
3456 meml
= change_address (dst
, DImode
,
3457 gen_rtx_AND (DImode
,
3458 plus_constant (dsta
, ofs
),
3460 set_mem_alias_set (meml
, 0);
3462 memh
= change_address (dst
, DImode
,
3463 gen_rtx_AND (DImode
,
3464 plus_constant (dsta
, ofs
+ size
- 1),
3466 set_mem_alias_set (memh
, 0);
3468 emit_move_insn (dsth
, memh
);
3469 emit_move_insn (dstl
, meml
);
3470 if (WORDS_BIG_ENDIAN
)
3472 addr
= copy_addr_to_reg (plus_constant (dsta
, ofs
+size
-1));
3474 if (src
!= const0_rtx
)
3479 emit_insn (gen_inswl_be (insh
, gen_lowpart (HImode
,src
), addr
));
3482 emit_insn (gen_insll_be (insh
, gen_lowpart (SImode
,src
), addr
));
3485 emit_insn (gen_insql_be (insh
, gen_lowpart (DImode
,src
), addr
));
3488 emit_insn (gen_insxh (insl
, gen_lowpart (DImode
, src
),
3489 GEN_INT (size
*8), addr
));
3495 emit_insn (gen_mskxl_be (dsth
, dsth
, GEN_INT (0xffff), addr
));
3499 rtx msk
= immed_double_const (0xffffffff, 0, DImode
);
3500 emit_insn (gen_mskxl_be (dsth
, dsth
, msk
, addr
));
3504 emit_insn (gen_mskxl_be (dsth
, dsth
, constm1_rtx
, addr
));
3508 emit_insn (gen_mskxh (dstl
, dstl
, GEN_INT (size
*8), addr
));
3512 addr
= copy_addr_to_reg (plus_constant (dsta
, ofs
));
3514 if (src
!= CONST0_RTX (GET_MODE (src
)))
3516 emit_insn (gen_insxh (insh
, gen_lowpart (DImode
, src
),
3517 GEN_INT (size
*8), addr
));
3522 emit_insn (gen_inswl_le (insl
, gen_lowpart (HImode
, src
), addr
));
3525 emit_insn (gen_insll_le (insl
, gen_lowpart (SImode
, src
), addr
));
3528 emit_insn (gen_insql_le (insl
, src
, addr
));
3533 emit_insn (gen_mskxh (dsth
, dsth
, GEN_INT (size
*8), addr
));
3538 emit_insn (gen_mskxl_le (dstl
, dstl
, GEN_INT (0xffff), addr
));
3542 rtx msk
= immed_double_const (0xffffffff, 0, DImode
);
3543 emit_insn (gen_mskxl_le (dstl
, dstl
, msk
, addr
));
3547 emit_insn (gen_mskxl_le (dstl
, dstl
, constm1_rtx
, addr
));
3552 if (src
!= CONST0_RTX (GET_MODE (src
)))
3554 dsth
= expand_binop (DImode
, ior_optab
, insh
, dsth
, dsth
, 0, OPTAB_WIDEN
);
3555 dstl
= expand_binop (DImode
, ior_optab
, insl
, dstl
, dstl
, 0, OPTAB_WIDEN
);
3558 if (WORDS_BIG_ENDIAN
)
3560 emit_move_insn (meml
, dstl
);
3561 emit_move_insn (memh
, dsth
);
3565 /* Must store high before low for degenerate case of aligned. */
3566 emit_move_insn (memh
, dsth
);
3567 emit_move_insn (meml
, dstl
);
3571 /* The block move code tries to maximize speed by separating loads and
3572 stores at the expense of register pressure: we load all of the data
3573 before we store it back out. There are two secondary effects worth
3574 mentioning, that this speeds copying to/from aligned and unaligned
3575 buffers, and that it makes the code significantly easier to write. */
3577 #define MAX_MOVE_WORDS 8
3579 /* Load an integral number of consecutive unaligned quadwords. */
3582 alpha_expand_unaligned_load_words (rtx
*out_regs
, rtx smem
,
3583 HOST_WIDE_INT words
, HOST_WIDE_INT ofs
)
3585 rtx
const im8
= GEN_INT (-8);
3586 rtx
const i64
= GEN_INT (64);
3587 rtx ext_tmps
[MAX_MOVE_WORDS
], data_regs
[MAX_MOVE_WORDS
+1];
3588 rtx sreg
, areg
, tmp
, smema
;
3591 smema
= XEXP (smem
, 0);
3592 if (GET_CODE (smema
) == LO_SUM
)
3593 smema
= force_reg (Pmode
, smema
);
3595 /* Generate all the tmp registers we need. */
3596 for (i
= 0; i
< words
; ++i
)
3598 data_regs
[i
] = out_regs
[i
];
3599 ext_tmps
[i
] = gen_reg_rtx (DImode
);
3601 data_regs
[words
] = gen_reg_rtx (DImode
);
3604 smem
= adjust_address (smem
, GET_MODE (smem
), ofs
);
3606 /* Load up all of the source data. */
3607 for (i
= 0; i
< words
; ++i
)
3609 tmp
= change_address (smem
, DImode
,
3610 gen_rtx_AND (DImode
,
3611 plus_constant (smema
, 8*i
),
3613 set_mem_alias_set (tmp
, 0);
3614 emit_move_insn (data_regs
[i
], tmp
);
3617 tmp
= change_address (smem
, DImode
,
3618 gen_rtx_AND (DImode
,
3619 plus_constant (smema
, 8*words
- 1),
3621 set_mem_alias_set (tmp
, 0);
3622 emit_move_insn (data_regs
[words
], tmp
);
3624 /* Extract the half-word fragments. Unfortunately DEC decided to make
3625 extxh with offset zero a noop instead of zeroing the register, so
3626 we must take care of that edge condition ourselves with cmov. */
3628 sreg
= copy_addr_to_reg (smema
);
3629 areg
= expand_binop (DImode
, and_optab
, sreg
, GEN_INT (7), NULL
,
3631 if (WORDS_BIG_ENDIAN
)
3632 emit_move_insn (sreg
, plus_constant (sreg
, 7));
3633 for (i
= 0; i
< words
; ++i
)
3635 if (WORDS_BIG_ENDIAN
)
3637 emit_insn (gen_extqh_be (data_regs
[i
], data_regs
[i
], sreg
));
3638 emit_insn (gen_extxl_be (ext_tmps
[i
], data_regs
[i
+1], i64
, sreg
));
3642 emit_insn (gen_extxl_le (data_regs
[i
], data_regs
[i
], i64
, sreg
));
3643 emit_insn (gen_extqh_le (ext_tmps
[i
], data_regs
[i
+1], sreg
));
3645 emit_insn (gen_rtx_SET (VOIDmode
, ext_tmps
[i
],
3646 gen_rtx_IF_THEN_ELSE (DImode
,
3647 gen_rtx_EQ (DImode
, areg
,
3649 const0_rtx
, ext_tmps
[i
])));
3652 /* Merge the half-words into whole words. */
3653 for (i
= 0; i
< words
; ++i
)
3655 out_regs
[i
] = expand_binop (DImode
, ior_optab
, data_regs
[i
],
3656 ext_tmps
[i
], data_regs
[i
], 1, OPTAB_WIDEN
);
3660 /* Store an integral number of consecutive unaligned quadwords. DATA_REGS
3661 may be NULL to store zeros. */
3664 alpha_expand_unaligned_store_words (rtx
*data_regs
, rtx dmem
,
3665 HOST_WIDE_INT words
, HOST_WIDE_INT ofs
)
3667 rtx
const im8
= GEN_INT (-8);
3668 rtx
const i64
= GEN_INT (64);
3669 rtx ins_tmps
[MAX_MOVE_WORDS
];
3670 rtx st_tmp_1
, st_tmp_2
, dreg
;
3671 rtx st_addr_1
, st_addr_2
, dmema
;
3674 dmema
= XEXP (dmem
, 0);
3675 if (GET_CODE (dmema
) == LO_SUM
)
3676 dmema
= force_reg (Pmode
, dmema
);
3678 /* Generate all the tmp registers we need. */
3679 if (data_regs
!= NULL
)
3680 for (i
= 0; i
< words
; ++i
)
3681 ins_tmps
[i
] = gen_reg_rtx(DImode
);
3682 st_tmp_1
= gen_reg_rtx(DImode
);
3683 st_tmp_2
= gen_reg_rtx(DImode
);
3686 dmem
= adjust_address (dmem
, GET_MODE (dmem
), ofs
);
3688 st_addr_2
= change_address (dmem
, DImode
,
3689 gen_rtx_AND (DImode
,
3690 plus_constant (dmema
, words
*8 - 1),
3692 set_mem_alias_set (st_addr_2
, 0);
3694 st_addr_1
= change_address (dmem
, DImode
,
3695 gen_rtx_AND (DImode
, dmema
, im8
));
3696 set_mem_alias_set (st_addr_1
, 0);
3698 /* Load up the destination end bits. */
3699 emit_move_insn (st_tmp_2
, st_addr_2
);
3700 emit_move_insn (st_tmp_1
, st_addr_1
);
3702 /* Shift the input data into place. */
3703 dreg
= copy_addr_to_reg (dmema
);
3704 if (WORDS_BIG_ENDIAN
)
3705 emit_move_insn (dreg
, plus_constant (dreg
, 7));
3706 if (data_regs
!= NULL
)
3708 for (i
= words
-1; i
>= 0; --i
)
3710 if (WORDS_BIG_ENDIAN
)
3712 emit_insn (gen_insql_be (ins_tmps
[i
], data_regs
[i
], dreg
));
3713 emit_insn (gen_insxh (data_regs
[i
], data_regs
[i
], i64
, dreg
));
3717 emit_insn (gen_insxh (ins_tmps
[i
], data_regs
[i
], i64
, dreg
));
3718 emit_insn (gen_insql_le (data_regs
[i
], data_regs
[i
], dreg
));
3721 for (i
= words
-1; i
> 0; --i
)
3723 ins_tmps
[i
-1] = expand_binop (DImode
, ior_optab
, data_regs
[i
],
3724 ins_tmps
[i
-1], ins_tmps
[i
-1], 1,
3729 /* Split and merge the ends with the destination data. */
3730 if (WORDS_BIG_ENDIAN
)
3732 emit_insn (gen_mskxl_be (st_tmp_2
, st_tmp_2
, constm1_rtx
, dreg
));
3733 emit_insn (gen_mskxh (st_tmp_1
, st_tmp_1
, i64
, dreg
));
3737 emit_insn (gen_mskxh (st_tmp_2
, st_tmp_2
, i64
, dreg
));
3738 emit_insn (gen_mskxl_le (st_tmp_1
, st_tmp_1
, constm1_rtx
, dreg
));
3741 if (data_regs
!= NULL
)
3743 st_tmp_2
= expand_binop (DImode
, ior_optab
, st_tmp_2
, ins_tmps
[words
-1],
3744 st_tmp_2
, 1, OPTAB_WIDEN
);
3745 st_tmp_1
= expand_binop (DImode
, ior_optab
, st_tmp_1
, data_regs
[0],
3746 st_tmp_1
, 1, OPTAB_WIDEN
);
3750 if (WORDS_BIG_ENDIAN
)
3751 emit_move_insn (st_addr_1
, st_tmp_1
);
3753 emit_move_insn (st_addr_2
, st_tmp_2
);
3754 for (i
= words
-1; i
> 0; --i
)
3756 rtx tmp
= change_address (dmem
, DImode
,
3757 gen_rtx_AND (DImode
,
3758 plus_constant(dmema
,
3759 WORDS_BIG_ENDIAN
? i
*8-1 : i
*8),
3761 set_mem_alias_set (tmp
, 0);
3762 emit_move_insn (tmp
, data_regs
? ins_tmps
[i
-1] : const0_rtx
);
3764 if (WORDS_BIG_ENDIAN
)
3765 emit_move_insn (st_addr_2
, st_tmp_2
);
3767 emit_move_insn (st_addr_1
, st_tmp_1
);
3771 /* Expand string/block move operations.
3773 operands[0] is the pointer to the destination.
3774 operands[1] is the pointer to the source.
3775 operands[2] is the number of bytes to move.
3776 operands[3] is the alignment. */
3779 alpha_expand_block_move (rtx operands
[])
3781 rtx bytes_rtx
= operands
[2];
3782 rtx align_rtx
= operands
[3];
3783 HOST_WIDE_INT orig_bytes
= INTVAL (bytes_rtx
);
3784 HOST_WIDE_INT bytes
= orig_bytes
;
3785 HOST_WIDE_INT src_align
= INTVAL (align_rtx
) * BITS_PER_UNIT
;
3786 HOST_WIDE_INT dst_align
= src_align
;
3787 rtx orig_src
= operands
[1];
3788 rtx orig_dst
= operands
[0];
3789 rtx data_regs
[2 * MAX_MOVE_WORDS
+ 16];
3791 unsigned int i
, words
, ofs
, nregs
= 0;
3793 if (orig_bytes
<= 0)
3795 else if (orig_bytes
> MAX_MOVE_WORDS
* UNITS_PER_WORD
)
3798 /* Look for additional alignment information from recorded register info. */
3800 tmp
= XEXP (orig_src
, 0);
3801 if (GET_CODE (tmp
) == REG
)
3802 src_align
= MAX (src_align
, REGNO_POINTER_ALIGN (REGNO (tmp
)));
3803 else if (GET_CODE (tmp
) == PLUS
3804 && GET_CODE (XEXP (tmp
, 0)) == REG
3805 && GET_CODE (XEXP (tmp
, 1)) == CONST_INT
)
3807 unsigned HOST_WIDE_INT c
= INTVAL (XEXP (tmp
, 1));
3808 unsigned int a
= REGNO_POINTER_ALIGN (REGNO (XEXP (tmp
, 0)));
3812 if (a
>= 64 && c
% 8 == 0)
3814 else if (a
>= 32 && c
% 4 == 0)
3816 else if (a
>= 16 && c
% 2 == 0)
3821 tmp
= XEXP (orig_dst
, 0);
3822 if (GET_CODE (tmp
) == REG
)
3823 dst_align
= MAX (dst_align
, REGNO_POINTER_ALIGN (REGNO (tmp
)));
3824 else if (GET_CODE (tmp
) == PLUS
3825 && GET_CODE (XEXP (tmp
, 0)) == REG
3826 && GET_CODE (XEXP (tmp
, 1)) == CONST_INT
)
3828 unsigned HOST_WIDE_INT c
= INTVAL (XEXP (tmp
, 1));
3829 unsigned int a
= REGNO_POINTER_ALIGN (REGNO (XEXP (tmp
, 0)));
3833 if (a
>= 64 && c
% 8 == 0)
3835 else if (a
>= 32 && c
% 4 == 0)
3837 else if (a
>= 16 && c
% 2 == 0)
3843 if (src_align
>= 64 && bytes
>= 8)
3847 for (i
= 0; i
< words
; ++i
)
3848 data_regs
[nregs
+ i
] = gen_reg_rtx (DImode
);
3850 for (i
= 0; i
< words
; ++i
)
3851 emit_move_insn (data_regs
[nregs
+ i
],
3852 adjust_address (orig_src
, DImode
, ofs
+ i
* 8));
3859 if (src_align
>= 32 && bytes
>= 4)
3863 for (i
= 0; i
< words
; ++i
)
3864 data_regs
[nregs
+ i
] = gen_reg_rtx (SImode
);
3866 for (i
= 0; i
< words
; ++i
)
3867 emit_move_insn (data_regs
[nregs
+ i
],
3868 adjust_address (orig_src
, SImode
, ofs
+ i
* 4));
3879 for (i
= 0; i
< words
+1; ++i
)
3880 data_regs
[nregs
+ i
] = gen_reg_rtx (DImode
);
3882 alpha_expand_unaligned_load_words (data_regs
+ nregs
, orig_src
,
3890 if (! TARGET_BWX
&& bytes
>= 4)
3892 data_regs
[nregs
++] = tmp
= gen_reg_rtx (SImode
);
3893 alpha_expand_unaligned_load (tmp
, orig_src
, 4, ofs
, 0);
3900 if (src_align
>= 16)
3903 data_regs
[nregs
++] = tmp
= gen_reg_rtx (HImode
);
3904 emit_move_insn (tmp
, adjust_address (orig_src
, HImode
, ofs
));
3907 } while (bytes
>= 2);
3909 else if (! TARGET_BWX
)
3911 data_regs
[nregs
++] = tmp
= gen_reg_rtx (HImode
);
3912 alpha_expand_unaligned_load (tmp
, orig_src
, 2, ofs
, 0);
3920 data_regs
[nregs
++] = tmp
= gen_reg_rtx (QImode
);
3921 emit_move_insn (tmp
, adjust_address (orig_src
, QImode
, ofs
));
3926 gcc_assert (nregs
<= ARRAY_SIZE (data_regs
));
3928 /* Now save it back out again. */
3932 /* Write out the data in whatever chunks reading the source allowed. */
3933 if (dst_align
>= 64)
3935 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == DImode
)
3937 emit_move_insn (adjust_address (orig_dst
, DImode
, ofs
),
3944 if (dst_align
>= 32)
3946 /* If the source has remaining DImode regs, write them out in
3948 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == DImode
)
3950 tmp
= expand_binop (DImode
, lshr_optab
, data_regs
[i
], GEN_INT (32),
3951 NULL_RTX
, 1, OPTAB_WIDEN
);
3953 emit_move_insn (adjust_address (orig_dst
, SImode
, ofs
),
3954 gen_lowpart (SImode
, data_regs
[i
]));
3955 emit_move_insn (adjust_address (orig_dst
, SImode
, ofs
+ 4),
3956 gen_lowpart (SImode
, tmp
));
3961 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == SImode
)
3963 emit_move_insn (adjust_address (orig_dst
, SImode
, ofs
),
3970 if (i
< nregs
&& GET_MODE (data_regs
[i
]) == DImode
)
3972 /* Write out a remaining block of words using unaligned methods. */
3974 for (words
= 1; i
+ words
< nregs
; words
++)
3975 if (GET_MODE (data_regs
[i
+ words
]) != DImode
)
3979 alpha_expand_unaligned_store (orig_dst
, data_regs
[i
], 8, ofs
);
3981 alpha_expand_unaligned_store_words (data_regs
+ i
, orig_dst
,
3988 /* Due to the above, this won't be aligned. */
3989 /* ??? If we have more than one of these, consider constructing full
3990 words in registers and using alpha_expand_unaligned_store_words. */
3991 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == SImode
)
3993 alpha_expand_unaligned_store (orig_dst
, data_regs
[i
], 4, ofs
);
3998 if (dst_align
>= 16)
3999 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == HImode
)
4001 emit_move_insn (adjust_address (orig_dst
, HImode
, ofs
), data_regs
[i
]);
4006 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == HImode
)
4008 alpha_expand_unaligned_store (orig_dst
, data_regs
[i
], 2, ofs
);
4013 /* The remainder must be byte copies. */
4016 gcc_assert (GET_MODE (data_regs
[i
]) == QImode
);
4017 emit_move_insn (adjust_address (orig_dst
, QImode
, ofs
), data_regs
[i
]);
4026 alpha_expand_block_clear (rtx operands
[])
4028 rtx bytes_rtx
= operands
[1];
4029 rtx align_rtx
= operands
[3];
4030 HOST_WIDE_INT orig_bytes
= INTVAL (bytes_rtx
);
4031 HOST_WIDE_INT bytes
= orig_bytes
;
4032 HOST_WIDE_INT align
= INTVAL (align_rtx
) * BITS_PER_UNIT
;
4033 HOST_WIDE_INT alignofs
= 0;
4034 rtx orig_dst
= operands
[0];
4036 int i
, words
, ofs
= 0;
4038 if (orig_bytes
<= 0)
4040 if (orig_bytes
> MAX_MOVE_WORDS
* UNITS_PER_WORD
)
4043 /* Look for stricter alignment. */
4044 tmp
= XEXP (orig_dst
, 0);
4045 if (GET_CODE (tmp
) == REG
)
4046 align
= MAX (align
, REGNO_POINTER_ALIGN (REGNO (tmp
)));
4047 else if (GET_CODE (tmp
) == PLUS
4048 && GET_CODE (XEXP (tmp
, 0)) == REG
4049 && GET_CODE (XEXP (tmp
, 1)) == CONST_INT
)
4051 HOST_WIDE_INT c
= INTVAL (XEXP (tmp
, 1));
4052 int a
= REGNO_POINTER_ALIGN (REGNO (XEXP (tmp
, 0)));
4057 align
= a
, alignofs
= 8 - c
% 8;
4059 align
= a
, alignofs
= 4 - c
% 4;
4061 align
= a
, alignofs
= 2 - c
% 2;
4065 /* Handle an unaligned prefix first. */
4069 #if HOST_BITS_PER_WIDE_INT >= 64
4070 /* Given that alignofs is bounded by align, the only time BWX could
4071 generate three stores is for a 7 byte fill. Prefer two individual
4072 stores over a load/mask/store sequence. */
4073 if ((!TARGET_BWX
|| alignofs
== 7)
4075 && !(alignofs
== 4 && bytes
>= 4))
4077 enum machine_mode mode
= (align
>= 64 ? DImode
: SImode
);
4078 int inv_alignofs
= (align
>= 64 ? 8 : 4) - alignofs
;
4082 mem
= adjust_address (orig_dst
, mode
, ofs
- inv_alignofs
);
4083 set_mem_alias_set (mem
, 0);
4085 mask
= ~(~(HOST_WIDE_INT
)0 << (inv_alignofs
* 8));
4086 if (bytes
< alignofs
)
4088 mask
|= ~(HOST_WIDE_INT
)0 << ((inv_alignofs
+ bytes
) * 8);
4099 tmp
= expand_binop (mode
, and_optab
, mem
, GEN_INT (mask
),
4100 NULL_RTX
, 1, OPTAB_WIDEN
);
4102 emit_move_insn (mem
, tmp
);
4106 if (TARGET_BWX
&& (alignofs
& 1) && bytes
>= 1)
4108 emit_move_insn (adjust_address (orig_dst
, QImode
, ofs
), const0_rtx
);
4113 if (TARGET_BWX
&& align
>= 16 && (alignofs
& 3) == 2 && bytes
>= 2)
4115 emit_move_insn (adjust_address (orig_dst
, HImode
, ofs
), const0_rtx
);
4120 if (alignofs
== 4 && bytes
>= 4)
4122 emit_move_insn (adjust_address (orig_dst
, SImode
, ofs
), const0_rtx
);
4128 /* If we've not used the extra lead alignment information by now,
4129 we won't be able to. Downgrade align to match what's left over. */
4132 alignofs
= alignofs
& -alignofs
;
4133 align
= MIN (align
, alignofs
* BITS_PER_UNIT
);
4137 /* Handle a block of contiguous long-words. */
4139 if (align
>= 64 && bytes
>= 8)
4143 for (i
= 0; i
< words
; ++i
)
4144 emit_move_insn (adjust_address (orig_dst
, DImode
, ofs
+ i
* 8),
4151 /* If the block is large and appropriately aligned, emit a single
4152 store followed by a sequence of stq_u insns. */
4154 if (align
>= 32 && bytes
> 16)
4158 emit_move_insn (adjust_address (orig_dst
, SImode
, ofs
), const0_rtx
);
4162 orig_dsta
= XEXP (orig_dst
, 0);
4163 if (GET_CODE (orig_dsta
) == LO_SUM
)
4164 orig_dsta
= force_reg (Pmode
, orig_dsta
);
4167 for (i
= 0; i
< words
; ++i
)
4170 = change_address (orig_dst
, DImode
,
4171 gen_rtx_AND (DImode
,
4172 plus_constant (orig_dsta
, ofs
+ i
*8),
4174 set_mem_alias_set (mem
, 0);
4175 emit_move_insn (mem
, const0_rtx
);
4178 /* Depending on the alignment, the first stq_u may have overlapped
4179 with the initial stl, which means that the last stq_u didn't
4180 write as much as it would appear. Leave those questionable bytes
4182 bytes
-= words
* 8 - 4;
4183 ofs
+= words
* 8 - 4;
4186 /* Handle a smaller block of aligned words. */
4188 if ((align
>= 64 && bytes
== 4)
4189 || (align
== 32 && bytes
>= 4))
4193 for (i
= 0; i
< words
; ++i
)
4194 emit_move_insn (adjust_address (orig_dst
, SImode
, ofs
+ i
* 4),
4201 /* An unaligned block uses stq_u stores for as many as possible. */
4207 alpha_expand_unaligned_store_words (NULL
, orig_dst
, words
, ofs
);
4213 /* Next clean up any trailing pieces. */
4215 #if HOST_BITS_PER_WIDE_INT >= 64
4216 /* Count the number of bits in BYTES for which aligned stores could
4219 for (i
= (TARGET_BWX
? 1 : 4); i
* BITS_PER_UNIT
<= align
; i
<<= 1)
4223 /* If we have appropriate alignment (and it wouldn't take too many
4224 instructions otherwise), mask out the bytes we need. */
4225 if (TARGET_BWX
? words
> 2 : bytes
> 0)
4232 mem
= adjust_address (orig_dst
, DImode
, ofs
);
4233 set_mem_alias_set (mem
, 0);
4235 mask
= ~(HOST_WIDE_INT
)0 << (bytes
* 8);
4237 tmp
= expand_binop (DImode
, and_optab
, mem
, GEN_INT (mask
),
4238 NULL_RTX
, 1, OPTAB_WIDEN
);
4240 emit_move_insn (mem
, tmp
);
4243 else if (align
>= 32 && bytes
< 4)
4248 mem
= adjust_address (orig_dst
, SImode
, ofs
);
4249 set_mem_alias_set (mem
, 0);
4251 mask
= ~(HOST_WIDE_INT
)0 << (bytes
* 8);
4253 tmp
= expand_binop (SImode
, and_optab
, mem
, GEN_INT (mask
),
4254 NULL_RTX
, 1, OPTAB_WIDEN
);
4256 emit_move_insn (mem
, tmp
);
4262 if (!TARGET_BWX
&& bytes
>= 4)
4264 alpha_expand_unaligned_store (orig_dst
, const0_rtx
, 4, ofs
);
4274 emit_move_insn (adjust_address (orig_dst
, HImode
, ofs
),
4278 } while (bytes
>= 2);
4280 else if (! TARGET_BWX
)
4282 alpha_expand_unaligned_store (orig_dst
, const0_rtx
, 2, ofs
);
4290 emit_move_insn (adjust_address (orig_dst
, QImode
, ofs
), const0_rtx
);
4298 /* Returns a mask so that zap(x, value) == x & mask. */
4301 alpha_expand_zap_mask (HOST_WIDE_INT value
)
4306 if (HOST_BITS_PER_WIDE_INT
>= 64)
4308 HOST_WIDE_INT mask
= 0;
4310 for (i
= 7; i
>= 0; --i
)
4313 if (!((value
>> i
) & 1))
4317 result
= gen_int_mode (mask
, DImode
);
4321 HOST_WIDE_INT mask_lo
= 0, mask_hi
= 0;
4323 gcc_assert (HOST_BITS_PER_WIDE_INT
== 32);
4325 for (i
= 7; i
>= 4; --i
)
4328 if (!((value
>> i
) & 1))
4332 for (i
= 3; i
>= 0; --i
)
4335 if (!((value
>> i
) & 1))
4339 result
= immed_double_const (mask_lo
, mask_hi
, DImode
);
4346 alpha_expand_builtin_vector_binop (rtx (*gen
) (rtx
, rtx
, rtx
),
4347 enum machine_mode mode
,
4348 rtx op0
, rtx op1
, rtx op2
)
4350 op0
= gen_lowpart (mode
, op0
);
4352 if (op1
== const0_rtx
)
4353 op1
= CONST0_RTX (mode
);
4355 op1
= gen_lowpart (mode
, op1
);
4357 if (op2
== const0_rtx
)
4358 op2
= CONST0_RTX (mode
);
4360 op2
= gen_lowpart (mode
, op2
);
4362 emit_insn ((*gen
) (op0
, op1
, op2
));
4365 /* A subroutine of the atomic operation splitters. Jump to LABEL if
4366 COND is true. Mark the jump as unlikely to be taken. */
4369 emit_unlikely_jump (rtx cond
, rtx label
)
4371 rtx very_unlikely
= GEN_INT (REG_BR_PROB_BASE
/ 100 - 1);
4374 x
= gen_rtx_IF_THEN_ELSE (VOIDmode
, cond
, label
, pc_rtx
);
4375 x
= emit_jump_insn (gen_rtx_SET (VOIDmode
, pc_rtx
, x
));
4376 REG_NOTES (x
) = gen_rtx_EXPR_LIST (REG_BR_PROB
, very_unlikely
, NULL_RTX
);
4379 /* A subroutine of the atomic operation splitters. Emit a load-locked
4380 instruction in MODE. */
4383 emit_load_locked (enum machine_mode mode
, rtx reg
, rtx mem
)
4385 rtx (*fn
) (rtx
, rtx
) = NULL
;
4387 fn
= gen_load_locked_si
;
4388 else if (mode
== DImode
)
4389 fn
= gen_load_locked_di
;
4390 emit_insn (fn (reg
, mem
));
4393 /* A subroutine of the atomic operation splitters. Emit a store-conditional
4394 instruction in MODE. */
4397 emit_store_conditional (enum machine_mode mode
, rtx res
, rtx mem
, rtx val
)
4399 rtx (*fn
) (rtx
, rtx
, rtx
) = NULL
;
4401 fn
= gen_store_conditional_si
;
4402 else if (mode
== DImode
)
4403 fn
= gen_store_conditional_di
;
4404 emit_insn (fn (res
, mem
, val
));
4407 /* A subroutine of the atomic operation splitters. Emit an insxl
4408 instruction in MODE. */
4411 emit_insxl (enum machine_mode mode
, rtx op1
, rtx op2
)
4413 rtx ret
= gen_reg_rtx (DImode
);
4414 rtx (*fn
) (rtx
, rtx
, rtx
);
4416 if (WORDS_BIG_ENDIAN
)
4430 /* The insbl and inswl patterns require a register operand. */
4431 op1
= force_reg (mode
, op1
);
4432 emit_insn (fn (ret
, op1
, op2
));
4437 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
4438 to perform. MEM is the memory on which to operate. VAL is the second
4439 operand of the binary operator. BEFORE and AFTER are optional locations to
4440 return the value of MEM either before of after the operation. SCRATCH is
4441 a scratch register. */
4444 alpha_split_atomic_op (enum rtx_code code
, rtx mem
, rtx val
,
4445 rtx before
, rtx after
, rtx scratch
)
4447 enum machine_mode mode
= GET_MODE (mem
);
4448 rtx label
, x
, cond
= gen_rtx_REG (DImode
, REGNO (scratch
));
4450 emit_insn (gen_memory_barrier ());
4452 label
= gen_label_rtx ();
4454 label
= gen_rtx_LABEL_REF (DImode
, label
);
4458 emit_load_locked (mode
, before
, mem
);
4461 x
= gen_rtx_AND (mode
, gen_rtx_NOT (mode
, before
), val
);
4463 x
= gen_rtx_fmt_ee (code
, mode
, before
, val
);
4465 emit_insn (gen_rtx_SET (VOIDmode
, after
, copy_rtx (x
)));
4466 emit_insn (gen_rtx_SET (VOIDmode
, scratch
, x
));
4468 emit_store_conditional (mode
, cond
, mem
, scratch
);
4470 x
= gen_rtx_EQ (DImode
, cond
, const0_rtx
);
4471 emit_unlikely_jump (x
, label
);
4473 emit_insn (gen_memory_barrier ());
4476 /* Expand a compare and swap operation. */
4479 alpha_split_compare_and_swap (rtx retval
, rtx mem
, rtx oldval
, rtx newval
,
4482 enum machine_mode mode
= GET_MODE (mem
);
4483 rtx label1
, label2
, x
, cond
= gen_lowpart (DImode
, scratch
);
4485 emit_insn (gen_memory_barrier ());
4487 label1
= gen_rtx_LABEL_REF (DImode
, gen_label_rtx ());
4488 label2
= gen_rtx_LABEL_REF (DImode
, gen_label_rtx ());
4489 emit_label (XEXP (label1
, 0));
4491 emit_load_locked (mode
, retval
, mem
);
4493 x
= gen_lowpart (DImode
, retval
);
4494 if (oldval
== const0_rtx
)
4495 x
= gen_rtx_NE (DImode
, x
, const0_rtx
);
4498 x
= gen_rtx_EQ (DImode
, x
, oldval
);
4499 emit_insn (gen_rtx_SET (VOIDmode
, cond
, x
));
4500 x
= gen_rtx_EQ (DImode
, cond
, const0_rtx
);
4502 emit_unlikely_jump (x
, label2
);
4504 emit_move_insn (scratch
, newval
);
4505 emit_store_conditional (mode
, cond
, mem
, scratch
);
4507 x
= gen_rtx_EQ (DImode
, cond
, const0_rtx
);
4508 emit_unlikely_jump (x
, label1
);
4510 emit_insn (gen_memory_barrier ());
4511 emit_label (XEXP (label2
, 0));
4515 alpha_expand_compare_and_swap_12 (rtx dst
, rtx mem
, rtx oldval
, rtx newval
)
4517 enum machine_mode mode
= GET_MODE (mem
);
4518 rtx addr
, align
, wdst
;
4519 rtx (*fn5
) (rtx
, rtx
, rtx
, rtx
, rtx
);
4521 addr
= force_reg (DImode
, XEXP (mem
, 0));
4522 align
= expand_simple_binop (Pmode
, AND
, addr
, GEN_INT (-8),
4523 NULL_RTX
, 1, OPTAB_DIRECT
);
4525 oldval
= convert_modes (DImode
, mode
, oldval
, 1);
4526 newval
= emit_insxl (mode
, newval
, addr
);
4528 wdst
= gen_reg_rtx (DImode
);
4530 fn5
= gen_sync_compare_and_swapqi_1
;
4532 fn5
= gen_sync_compare_and_swaphi_1
;
4533 emit_insn (fn5 (wdst
, addr
, oldval
, newval
, align
));
4535 emit_move_insn (dst
, gen_lowpart (mode
, wdst
));
4539 alpha_split_compare_and_swap_12 (enum machine_mode mode
, rtx dest
, rtx addr
,
4540 rtx oldval
, rtx newval
, rtx align
,
4541 rtx scratch
, rtx cond
)
4543 rtx label1
, label2
, mem
, width
, mask
, x
;
4545 mem
= gen_rtx_MEM (DImode
, align
);
4546 MEM_VOLATILE_P (mem
) = 1;
4548 emit_insn (gen_memory_barrier ());
4549 label1
= gen_rtx_LABEL_REF (DImode
, gen_label_rtx ());
4550 label2
= gen_rtx_LABEL_REF (DImode
, gen_label_rtx ());
4551 emit_label (XEXP (label1
, 0));
4553 emit_load_locked (DImode
, scratch
, mem
);
4555 width
= GEN_INT (GET_MODE_BITSIZE (mode
));
4556 mask
= GEN_INT (mode
== QImode
? 0xff : 0xffff);
4557 if (WORDS_BIG_ENDIAN
)
4558 emit_insn (gen_extxl_be (dest
, scratch
, width
, addr
));
4560 emit_insn (gen_extxl_le (dest
, scratch
, width
, addr
));
4562 if (oldval
== const0_rtx
)
4563 x
= gen_rtx_NE (DImode
, dest
, const0_rtx
);
4566 x
= gen_rtx_EQ (DImode
, dest
, oldval
);
4567 emit_insn (gen_rtx_SET (VOIDmode
, cond
, x
));
4568 x
= gen_rtx_EQ (DImode
, cond
, const0_rtx
);
4570 emit_unlikely_jump (x
, label2
);
4572 if (WORDS_BIG_ENDIAN
)
4573 emit_insn (gen_mskxl_be (scratch
, scratch
, mask
, addr
));
4575 emit_insn (gen_mskxl_le (scratch
, scratch
, mask
, addr
));
4576 emit_insn (gen_iordi3 (scratch
, scratch
, newval
));
4578 emit_store_conditional (DImode
, scratch
, mem
, scratch
);
4580 x
= gen_rtx_EQ (DImode
, scratch
, const0_rtx
);
4581 emit_unlikely_jump (x
, label1
);
4583 emit_insn (gen_memory_barrier ());
4584 emit_label (XEXP (label2
, 0));
4587 /* Expand an atomic exchange operation. */
4590 alpha_split_lock_test_and_set (rtx retval
, rtx mem
, rtx val
, rtx scratch
)
4592 enum machine_mode mode
= GET_MODE (mem
);
4593 rtx label
, x
, cond
= gen_lowpart (DImode
, scratch
);
4595 emit_insn (gen_memory_barrier ());
4597 label
= gen_rtx_LABEL_REF (DImode
, gen_label_rtx ());
4598 emit_label (XEXP (label
, 0));
4600 emit_load_locked (mode
, retval
, mem
);
4601 emit_move_insn (scratch
, val
);
4602 emit_store_conditional (mode
, cond
, mem
, scratch
);
4604 x
= gen_rtx_EQ (DImode
, cond
, const0_rtx
);
4605 emit_unlikely_jump (x
, label
);
4609 alpha_expand_lock_test_and_set_12 (rtx dst
, rtx mem
, rtx val
)
4611 enum machine_mode mode
= GET_MODE (mem
);
4612 rtx addr
, align
, wdst
;
4613 rtx (*fn4
) (rtx
, rtx
, rtx
, rtx
);
4615 /* Force the address into a register. */
4616 addr
= force_reg (DImode
, XEXP (mem
, 0));
4618 /* Align it to a multiple of 8. */
4619 align
= expand_simple_binop (Pmode
, AND
, addr
, GEN_INT (-8),
4620 NULL_RTX
, 1, OPTAB_DIRECT
);
4622 /* Insert val into the correct byte location within the word. */
4623 val
= emit_insxl (mode
, val
, addr
);
4625 wdst
= gen_reg_rtx (DImode
);
4627 fn4
= gen_sync_lock_test_and_setqi_1
;
4629 fn4
= gen_sync_lock_test_and_sethi_1
;
4630 emit_insn (fn4 (wdst
, addr
, val
, align
));
4632 emit_move_insn (dst
, gen_lowpart (mode
, wdst
));
4636 alpha_split_lock_test_and_set_12 (enum machine_mode mode
, rtx dest
, rtx addr
,
4637 rtx val
, rtx align
, rtx scratch
)
4639 rtx label
, mem
, width
, mask
, x
;
4641 mem
= gen_rtx_MEM (DImode
, align
);
4642 MEM_VOLATILE_P (mem
) = 1;
4644 emit_insn (gen_memory_barrier ());
4645 label
= gen_rtx_LABEL_REF (DImode
, gen_label_rtx ());
4646 emit_label (XEXP (label
, 0));
4648 emit_load_locked (DImode
, scratch
, mem
);
4650 width
= GEN_INT (GET_MODE_BITSIZE (mode
));
4651 mask
= GEN_INT (mode
== QImode
? 0xff : 0xffff);
4652 if (WORDS_BIG_ENDIAN
)
4654 emit_insn (gen_extxl_be (dest
, scratch
, width
, addr
));
4655 emit_insn (gen_mskxl_be (scratch
, scratch
, mask
, addr
));
4659 emit_insn (gen_extxl_le (dest
, scratch
, width
, addr
));
4660 emit_insn (gen_mskxl_le (scratch
, scratch
, mask
, addr
));
4662 emit_insn (gen_iordi3 (scratch
, scratch
, val
));
4664 emit_store_conditional (DImode
, scratch
, mem
, scratch
);
4666 x
= gen_rtx_EQ (DImode
, scratch
, const0_rtx
);
4667 emit_unlikely_jump (x
, label
);
4670 /* Adjust the cost of a scheduling dependency. Return the new cost of
4671 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4674 alpha_adjust_cost (rtx insn
, rtx link
, rtx dep_insn
, int cost
)
4676 enum attr_type insn_type
, dep_insn_type
;
4678 /* If the dependence is an anti-dependence, there is no cost. For an
4679 output dependence, there is sometimes a cost, but it doesn't seem
4680 worth handling those few cases. */
4681 if (REG_NOTE_KIND (link
) != 0)
4684 /* If we can't recognize the insns, we can't really do anything. */
4685 if (recog_memoized (insn
) < 0 || recog_memoized (dep_insn
) < 0)
4688 insn_type
= get_attr_type (insn
);
4689 dep_insn_type
= get_attr_type (dep_insn
);
4691 /* Bring in the user-defined memory latency. */
4692 if (dep_insn_type
== TYPE_ILD
4693 || dep_insn_type
== TYPE_FLD
4694 || dep_insn_type
== TYPE_LDSYM
)
4695 cost
+= alpha_memory_latency
-1;
4697 /* Everything else handled in DFA bypasses now. */
4702 /* The number of instructions that can be issued per cycle. */
4705 alpha_issue_rate (void)
4707 return (alpha_tune
== PROCESSOR_EV4
? 2 : 4);
4710 /* How many alternative schedules to try. This should be as wide as the
4711 scheduling freedom in the DFA, but no wider. Making this value too
4712 large results extra work for the scheduler.
4714 For EV4, loads can be issued to either IB0 or IB1, thus we have 2
4715 alternative schedules. For EV5, we can choose between E0/E1 and
4716 FA/FM. For EV6, an arithmetic insn can be issued to U0/U1/L0/L1. */
4719 alpha_multipass_dfa_lookahead (void)
4721 return (alpha_tune
== PROCESSOR_EV6
? 4 : 2);
4724 /* Machine-specific function data. */
4726 struct machine_function
GTY(())
4729 /* List of call information words for calls from this function. */
4730 struct rtx_def
*first_ciw
;
4731 struct rtx_def
*last_ciw
;
4734 /* List of deferred case vectors. */
4735 struct rtx_def
*addr_list
;
4738 const char *some_ld_name
;
4740 /* For TARGET_LD_BUGGY_LDGP. */
4741 struct rtx_def
*gp_save_rtx
;
4744 /* How to allocate a 'struct machine_function'. */
4746 static struct machine_function
*
4747 alpha_init_machine_status (void)
4749 return ((struct machine_function
*)
4750 ggc_alloc_cleared (sizeof (struct machine_function
)));
4753 /* Functions to save and restore alpha_return_addr_rtx. */
4755 /* Start the ball rolling with RETURN_ADDR_RTX. */
4758 alpha_return_addr (int count
, rtx frame ATTRIBUTE_UNUSED
)
4763 return get_hard_reg_initial_val (Pmode
, REG_RA
);
4766 /* Return or create a memory slot containing the gp value for the current
4767 function. Needed only if TARGET_LD_BUGGY_LDGP. */
4770 alpha_gp_save_rtx (void)
4772 rtx seq
, m
= cfun
->machine
->gp_save_rtx
;
4778 m
= assign_stack_local (DImode
, UNITS_PER_WORD
, BITS_PER_WORD
);
4779 m
= validize_mem (m
);
4780 emit_move_insn (m
, pic_offset_table_rtx
);
4784 emit_insn_at_entry (seq
);
4786 cfun
->machine
->gp_save_rtx
= m
;
4793 alpha_ra_ever_killed (void)
4797 if (!has_hard_reg_initial_val (Pmode
, REG_RA
))
4798 return regs_ever_live
[REG_RA
];
4800 push_topmost_sequence ();
4802 pop_topmost_sequence ();
4804 return reg_set_between_p (gen_rtx_REG (Pmode
, REG_RA
), top
, NULL_RTX
);
4808 /* Return the trap mode suffix applicable to the current
4809 instruction, or NULL. */
4812 get_trap_mode_suffix (void)
4814 enum attr_trap_suffix s
= get_attr_trap_suffix (current_output_insn
);
4818 case TRAP_SUFFIX_NONE
:
4821 case TRAP_SUFFIX_SU
:
4822 if (alpha_fptm
>= ALPHA_FPTM_SU
)
4826 case TRAP_SUFFIX_SUI
:
4827 if (alpha_fptm
>= ALPHA_FPTM_SUI
)
4831 case TRAP_SUFFIX_V_SV
:
4839 case ALPHA_FPTM_SUI
:
4845 case TRAP_SUFFIX_V_SV_SVI
:
4854 case ALPHA_FPTM_SUI
:
4861 case TRAP_SUFFIX_U_SU_SUI
:
4870 case ALPHA_FPTM_SUI
:
4883 /* Return the rounding mode suffix applicable to the current
4884 instruction, or NULL. */
4887 get_round_mode_suffix (void)
4889 enum attr_round_suffix s
= get_attr_round_suffix (current_output_insn
);
4893 case ROUND_SUFFIX_NONE
:
4895 case ROUND_SUFFIX_NORMAL
:
4898 case ALPHA_FPRM_NORM
:
4900 case ALPHA_FPRM_MINF
:
4902 case ALPHA_FPRM_CHOP
:
4904 case ALPHA_FPRM_DYN
:
4911 case ROUND_SUFFIX_C
:
4920 /* Locate some local-dynamic symbol still in use by this function
4921 so that we can print its name in some movdi_er_tlsldm pattern. */
4924 get_some_local_dynamic_name_1 (rtx
*px
, void *data ATTRIBUTE_UNUSED
)
4928 if (GET_CODE (x
) == SYMBOL_REF
4929 && SYMBOL_REF_TLS_MODEL (x
) == TLS_MODEL_LOCAL_DYNAMIC
)
4931 cfun
->machine
->some_ld_name
= XSTR (x
, 0);
4939 get_some_local_dynamic_name (void)
4943 if (cfun
->machine
->some_ld_name
)
4944 return cfun
->machine
->some_ld_name
;
4946 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
4948 && for_each_rtx (&PATTERN (insn
), get_some_local_dynamic_name_1
, 0))
4949 return cfun
->machine
->some_ld_name
;
4954 /* Print an operand. Recognize special options, documented below. */
4957 print_operand (FILE *file
, rtx x
, int code
)
4964 /* Print the assembler name of the current function. */
4965 assemble_name (file
, alpha_fnname
);
4969 assemble_name (file
, get_some_local_dynamic_name ());
4974 const char *trap
= get_trap_mode_suffix ();
4975 const char *round
= get_round_mode_suffix ();
4978 fprintf (file
, (TARGET_AS_SLASH_BEFORE_SUFFIX
? "/%s%s" : "%s%s"),
4979 (trap
? trap
: ""), (round
? round
: ""));
4984 /* Generates single precision instruction suffix. */
4985 fputc ((TARGET_FLOAT_VAX
? 'f' : 's'), file
);
4989 /* Generates double precision instruction suffix. */
4990 fputc ((TARGET_FLOAT_VAX
? 'g' : 't'), file
);
4994 /* Generates a nop after a noreturn call at the very end of the
4996 if (next_real_insn (current_output_insn
) == 0)
4997 fprintf (file
, "\n\tnop");
5001 if (alpha_this_literal_sequence_number
== 0)
5002 alpha_this_literal_sequence_number
= alpha_next_sequence_number
++;
5003 fprintf (file
, "%d", alpha_this_literal_sequence_number
);
5007 if (alpha_this_gpdisp_sequence_number
== 0)
5008 alpha_this_gpdisp_sequence_number
= alpha_next_sequence_number
++;
5009 fprintf (file
, "%d", alpha_this_gpdisp_sequence_number
);
5013 if (GET_CODE (x
) == HIGH
)
5014 output_addr_const (file
, XEXP (x
, 0));
5016 output_operand_lossage ("invalid %%H value");
5023 if (GET_CODE (x
) == UNSPEC
&& XINT (x
, 1) == UNSPEC_TLSGD_CALL
)
5025 x
= XVECEXP (x
, 0, 0);
5026 lituse
= "lituse_tlsgd";
5028 else if (GET_CODE (x
) == UNSPEC
&& XINT (x
, 1) == UNSPEC_TLSLDM_CALL
)
5030 x
= XVECEXP (x
, 0, 0);
5031 lituse
= "lituse_tlsldm";
5033 else if (GET_CODE (x
) == CONST_INT
)
5034 lituse
= "lituse_jsr";
5037 output_operand_lossage ("invalid %%J value");
5041 if (x
!= const0_rtx
)
5042 fprintf (file
, "\t\t!%s!%d", lituse
, (int) INTVAL (x
));
5050 #ifdef HAVE_AS_JSRDIRECT_RELOCS
5051 lituse
= "lituse_jsrdirect";
5053 lituse
= "lituse_jsr";
5056 gcc_assert (INTVAL (x
) != 0);
5057 fprintf (file
, "\t\t!%s!%d", lituse
, (int) INTVAL (x
));
5061 /* If this operand is the constant zero, write it as "$31". */
5062 if (GET_CODE (x
) == REG
)
5063 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
5064 else if (x
== CONST0_RTX (GET_MODE (x
)))
5065 fprintf (file
, "$31");
5067 output_operand_lossage ("invalid %%r value");
5071 /* Similar, but for floating-point. */
5072 if (GET_CODE (x
) == REG
)
5073 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
5074 else if (x
== CONST0_RTX (GET_MODE (x
)))
5075 fprintf (file
, "$f31");
5077 output_operand_lossage ("invalid %%R value");
5081 /* Write the 1's complement of a constant. */
5082 if (GET_CODE (x
) != CONST_INT
)
5083 output_operand_lossage ("invalid %%N value");
5085 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, ~ INTVAL (x
));
5089 /* Write 1 << C, for a constant C. */
5090 if (GET_CODE (x
) != CONST_INT
)
5091 output_operand_lossage ("invalid %%P value");
5093 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, (HOST_WIDE_INT
) 1 << INTVAL (x
));
5097 /* Write the high-order 16 bits of a constant, sign-extended. */
5098 if (GET_CODE (x
) != CONST_INT
)
5099 output_operand_lossage ("invalid %%h value");
5101 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (x
) >> 16);
5105 /* Write the low-order 16 bits of a constant, sign-extended. */
5106 if (GET_CODE (x
) != CONST_INT
)
5107 output_operand_lossage ("invalid %%L value");
5109 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
,
5110 (INTVAL (x
) & 0xffff) - 2 * (INTVAL (x
) & 0x8000));
5114 /* Write mask for ZAP insn. */
5115 if (GET_CODE (x
) == CONST_DOUBLE
)
5117 HOST_WIDE_INT mask
= 0;
5118 HOST_WIDE_INT value
;
5120 value
= CONST_DOUBLE_LOW (x
);
5121 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
/ HOST_BITS_PER_CHAR
;
5126 value
= CONST_DOUBLE_HIGH (x
);
5127 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
/ HOST_BITS_PER_CHAR
;
5130 mask
|= (1 << (i
+ sizeof (int)));
5132 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, mask
& 0xff);
5135 else if (GET_CODE (x
) == CONST_INT
)
5137 HOST_WIDE_INT mask
= 0, value
= INTVAL (x
);
5139 for (i
= 0; i
< 8; i
++, value
>>= 8)
5143 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, mask
);
5146 output_operand_lossage ("invalid %%m value");
5150 /* 'b', 'w', 'l', or 'q' as the value of the constant. */
5151 if (GET_CODE (x
) != CONST_INT
5152 || (INTVAL (x
) != 8 && INTVAL (x
) != 16
5153 && INTVAL (x
) != 32 && INTVAL (x
) != 64))
5154 output_operand_lossage ("invalid %%M value");
5156 fprintf (file
, "%s",
5157 (INTVAL (x
) == 8 ? "b"
5158 : INTVAL (x
) == 16 ? "w"
5159 : INTVAL (x
) == 32 ? "l"
5164 /* Similar, except do it from the mask. */
5165 if (GET_CODE (x
) == CONST_INT
)
5167 HOST_WIDE_INT value
= INTVAL (x
);
5174 if (value
== 0xffff)
5179 if (value
== 0xffffffff)
5190 else if (HOST_BITS_PER_WIDE_INT
== 32
5191 && GET_CODE (x
) == CONST_DOUBLE
5192 && CONST_DOUBLE_LOW (x
) == 0xffffffff
5193 && CONST_DOUBLE_HIGH (x
) == 0)
5198 output_operand_lossage ("invalid %%U value");
5202 /* Write the constant value divided by 8 for little-endian mode or
5203 (56 - value) / 8 for big-endian mode. */
5205 if (GET_CODE (x
) != CONST_INT
5206 || (unsigned HOST_WIDE_INT
) INTVAL (x
) >= (WORDS_BIG_ENDIAN
5209 || (INTVAL (x
) & 7) != 0)
5210 output_operand_lossage ("invalid %%s value");
5212 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
,
5214 ? (56 - INTVAL (x
)) / 8
5219 /* Same, except compute (64 - c) / 8 */
5221 if (GET_CODE (x
) != CONST_INT
5222 && (unsigned HOST_WIDE_INT
) INTVAL (x
) >= 64
5223 && (INTVAL (x
) & 7) != 8)
5224 output_operand_lossage ("invalid %%s value");
5226 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, (64 - INTVAL (x
)) / 8);
5231 /* On Unicos/Mk systems: use a DEX expression if the symbol
5232 clashes with a register name. */
5233 int dex
= unicosmk_need_dex (x
);
5235 fprintf (file
, "DEX(%d)", dex
);
5237 output_addr_const (file
, x
);
5241 case 'C': case 'D': case 'c': case 'd':
5242 /* Write out comparison name. */
5244 enum rtx_code c
= GET_CODE (x
);
5246 if (!COMPARISON_P (x
))
5247 output_operand_lossage ("invalid %%C value");
5249 else if (code
== 'D')
5250 c
= reverse_condition (c
);
5251 else if (code
== 'c')
5252 c
= swap_condition (c
);
5253 else if (code
== 'd')
5254 c
= swap_condition (reverse_condition (c
));
5257 fprintf (file
, "ule");
5259 fprintf (file
, "ult");
5260 else if (c
== UNORDERED
)
5261 fprintf (file
, "un");
5263 fprintf (file
, "%s", GET_RTX_NAME (c
));
5268 /* Write the divide or modulus operator. */
5269 switch (GET_CODE (x
))
5272 fprintf (file
, "div%s", GET_MODE (x
) == SImode
? "l" : "q");
5275 fprintf (file
, "div%su", GET_MODE (x
) == SImode
? "l" : "q");
5278 fprintf (file
, "rem%s", GET_MODE (x
) == SImode
? "l" : "q");
5281 fprintf (file
, "rem%su", GET_MODE (x
) == SImode
? "l" : "q");
5284 output_operand_lossage ("invalid %%E value");
5290 /* Write "_u" for unaligned access. */
5291 if (GET_CODE (x
) == MEM
&& GET_CODE (XEXP (x
, 0)) == AND
)
5292 fprintf (file
, "_u");
5296 if (GET_CODE (x
) == REG
)
5297 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
5298 else if (GET_CODE (x
) == MEM
)
5299 output_address (XEXP (x
, 0));
5300 else if (GET_CODE (x
) == CONST
&& GET_CODE (XEXP (x
, 0)) == UNSPEC
)
5302 switch (XINT (XEXP (x
, 0), 1))
5306 output_addr_const (file
, XVECEXP (XEXP (x
, 0), 0, 0));
5309 output_operand_lossage ("unknown relocation unspec");
5314 output_addr_const (file
, x
);
5318 output_operand_lossage ("invalid %%xn code");
5323 print_operand_address (FILE *file
, rtx addr
)
5326 HOST_WIDE_INT offset
= 0;
5328 if (GET_CODE (addr
) == AND
)
5329 addr
= XEXP (addr
, 0);
5331 if (GET_CODE (addr
) == PLUS
5332 && GET_CODE (XEXP (addr
, 1)) == CONST_INT
)
5334 offset
= INTVAL (XEXP (addr
, 1));
5335 addr
= XEXP (addr
, 0);
5338 if (GET_CODE (addr
) == LO_SUM
)
5340 const char *reloc16
, *reloclo
;
5341 rtx op1
= XEXP (addr
, 1);
5343 if (GET_CODE (op1
) == CONST
&& GET_CODE (XEXP (op1
, 0)) == UNSPEC
)
5345 op1
= XEXP (op1
, 0);
5346 switch (XINT (op1
, 1))
5350 reloclo
= (alpha_tls_size
== 16 ? "dtprel" : "dtprello");
5354 reloclo
= (alpha_tls_size
== 16 ? "tprel" : "tprello");
5357 output_operand_lossage ("unknown relocation unspec");
5361 output_addr_const (file
, XVECEXP (op1
, 0, 0));
5366 reloclo
= "gprellow";
5367 output_addr_const (file
, op1
);
5371 fprintf (file
, "+" HOST_WIDE_INT_PRINT_DEC
, offset
);
5373 addr
= XEXP (addr
, 0);
5374 switch (GET_CODE (addr
))
5377 basereg
= REGNO (addr
);
5381 basereg
= subreg_regno (addr
);
5388 fprintf (file
, "($%d)\t\t!%s", basereg
,
5389 (basereg
== 29 ? reloc16
: reloclo
));
5393 switch (GET_CODE (addr
))
5396 basereg
= REGNO (addr
);
5400 basereg
= subreg_regno (addr
);
5404 offset
= INTVAL (addr
);
5407 #if TARGET_ABI_OPEN_VMS
5409 fprintf (file
, "%s", XSTR (addr
, 0));
5413 gcc_assert (GET_CODE (XEXP (addr
, 0)) == PLUS
5414 && GET_CODE (XEXP (XEXP (addr
, 0), 0)) == SYMBOL_REF
);
5415 fprintf (file
, "%s+" HOST_WIDE_INT_PRINT_DEC
,
5416 XSTR (XEXP (XEXP (addr
, 0), 0), 0),
5417 INTVAL (XEXP (XEXP (addr
, 0), 1)));
5425 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
"($%d)", offset
, basereg
);
5428 /* Emit RTL insns to initialize the variable parts of a trampoline at
5429 TRAMP. FNADDR is an RTX for the address of the function's pure
5430 code. CXT is an RTX for the static chain value for the function.
5432 The three offset parameters are for the individual template's
5433 layout. A JMPOFS < 0 indicates that the trampoline does not
5434 contain instructions at all.
5436 We assume here that a function will be called many more times than
5437 its address is taken (e.g., it might be passed to qsort), so we
5438 take the trouble to initialize the "hint" field in the JMP insn.
5439 Note that the hint field is PC (new) + 4 * bits 13:0. */
5442 alpha_initialize_trampoline (rtx tramp
, rtx fnaddr
, rtx cxt
,
5443 int fnofs
, int cxtofs
, int jmpofs
)
5445 rtx temp
, temp1
, addr
;
5446 /* VMS really uses DImode pointers in memory at this point. */
5447 enum machine_mode mode
= TARGET_ABI_OPEN_VMS
? Pmode
: ptr_mode
;
5449 #ifdef POINTERS_EXTEND_UNSIGNED
5450 fnaddr
= convert_memory_address (mode
, fnaddr
);
5451 cxt
= convert_memory_address (mode
, cxt
);
5454 /* Store function address and CXT. */
5455 addr
= memory_address (mode
, plus_constant (tramp
, fnofs
));
5456 emit_move_insn (gen_rtx_MEM (mode
, addr
), fnaddr
);
5457 addr
= memory_address (mode
, plus_constant (tramp
, cxtofs
));
5458 emit_move_insn (gen_rtx_MEM (mode
, addr
), cxt
);
5460 /* This has been disabled since the hint only has a 32k range, and in
5461 no existing OS is the stack within 32k of the text segment. */
5462 if (0 && jmpofs
>= 0)
5464 /* Compute hint value. */
5465 temp
= force_operand (plus_constant (tramp
, jmpofs
+4), NULL_RTX
);
5466 temp
= expand_binop (DImode
, sub_optab
, fnaddr
, temp
, temp
, 1,
5468 temp
= expand_shift (RSHIFT_EXPR
, Pmode
, temp
,
5469 build_int_cst (NULL_TREE
, 2), NULL_RTX
, 1);
5470 temp
= expand_and (SImode
, gen_lowpart (SImode
, temp
),
5471 GEN_INT (0x3fff), 0);
5473 /* Merge in the hint. */
5474 addr
= memory_address (SImode
, plus_constant (tramp
, jmpofs
));
5475 temp1
= force_reg (SImode
, gen_rtx_MEM (SImode
, addr
));
5476 temp1
= expand_and (SImode
, temp1
, GEN_INT (0xffffc000), NULL_RTX
);
5477 temp1
= expand_binop (SImode
, ior_optab
, temp1
, temp
, temp1
, 1,
5479 emit_move_insn (gen_rtx_MEM (SImode
, addr
), temp1
);
5482 #ifdef ENABLE_EXECUTE_STACK
5483 emit_library_call (init_one_libfunc ("__enable_execute_stack"),
5484 0, VOIDmode
, 1, tramp
, Pmode
);
5488 emit_insn (gen_imb ());
5491 /* Determine where to put an argument to a function.
5492 Value is zero to push the argument on the stack,
5493 or a hard register in which to store the argument.
5495 MODE is the argument's machine mode.
5496 TYPE is the data type of the argument (as a tree).
5497 This is null for libcalls where that information may
5499 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5500 the preceding args and about the function being called.
5501 NAMED is nonzero if this argument is a named parameter
5502 (otherwise it is an extra parameter matching an ellipsis).
5504 On Alpha the first 6 words of args are normally in registers
5505 and the rest are pushed. */
5508 function_arg (CUMULATIVE_ARGS cum
, enum machine_mode mode
, tree type
,
5509 int named ATTRIBUTE_UNUSED
)
5514 /* Don't get confused and pass small structures in FP registers. */
5515 if (type
&& AGGREGATE_TYPE_P (type
))
5519 #ifdef ENABLE_CHECKING
5520 /* With alpha_split_complex_arg, we shouldn't see any raw complex
5522 gcc_assert (!COMPLEX_MODE_P (mode
));
5525 /* Set up defaults for FP operands passed in FP registers, and
5526 integral operands passed in integer registers. */
5527 if (TARGET_FPREGS
&& GET_MODE_CLASS (mode
) == MODE_FLOAT
)
5533 /* ??? Irritatingly, the definition of CUMULATIVE_ARGS is different for
5534 the three platforms, so we can't avoid conditional compilation. */
5535 #if TARGET_ABI_OPEN_VMS
5537 if (mode
== VOIDmode
)
5538 return alpha_arg_info_reg_val (cum
);
5540 num_args
= cum
.num_args
;
5542 || targetm
.calls
.must_pass_in_stack (mode
, type
))
5545 #elif TARGET_ABI_UNICOSMK
5549 /* If this is the last argument, generate the call info word (CIW). */
5550 /* ??? We don't include the caller's line number in the CIW because
5551 I don't know how to determine it if debug infos are turned off. */
5552 if (mode
== VOIDmode
)
5561 for (i
= 0; i
< cum
.num_reg_words
&& i
< 5; i
++)
5562 if (cum
.reg_args_type
[i
])
5563 lo
|= (1 << (7 - i
));
5565 if (cum
.num_reg_words
== 6 && cum
.reg_args_type
[5])
5568 lo
|= cum
.num_reg_words
;
5570 #if HOST_BITS_PER_WIDE_INT == 32
5571 hi
= (cum
.num_args
<< 20) | cum
.num_arg_words
;
5573 lo
= lo
| ((HOST_WIDE_INT
) cum
.num_args
<< 52)
5574 | ((HOST_WIDE_INT
) cum
.num_arg_words
<< 32);
5577 ciw
= immed_double_const (lo
, hi
, DImode
);
5579 return gen_rtx_UNSPEC (DImode
, gen_rtvec (1, ciw
),
5580 UNSPEC_UMK_LOAD_CIW
);
5583 size
= ALPHA_ARG_SIZE (mode
, type
, named
);
5584 num_args
= cum
.num_reg_words
;
5586 || cum
.num_reg_words
+ size
> 6
5587 || targetm
.calls
.must_pass_in_stack (mode
, type
))
5589 else if (type
&& TYPE_MODE (type
) == BLKmode
)
5593 reg1
= gen_rtx_REG (DImode
, num_args
+ 16);
5594 reg1
= gen_rtx_EXPR_LIST (DImode
, reg1
, const0_rtx
);
5596 /* The argument fits in two registers. Note that we still need to
5597 reserve a register for empty structures. */
5601 return gen_rtx_PARALLEL (mode
, gen_rtvec (1, reg1
));
5604 reg2
= gen_rtx_REG (DImode
, num_args
+ 17);
5605 reg2
= gen_rtx_EXPR_LIST (DImode
, reg2
, GEN_INT (8));
5606 return gen_rtx_PARALLEL (mode
, gen_rtvec (2, reg1
, reg2
));
5610 #elif TARGET_ABI_OSF
5616 /* VOID is passed as a special flag for "last argument". */
5617 if (type
== void_type_node
)
5619 else if (targetm
.calls
.must_pass_in_stack (mode
, type
))
5623 #error Unhandled ABI
5626 return gen_rtx_REG (mode
, num_args
+ basereg
);
5630 alpha_arg_partial_bytes (CUMULATIVE_ARGS
*cum ATTRIBUTE_UNUSED
,
5631 enum machine_mode mode ATTRIBUTE_UNUSED
,
5632 tree type ATTRIBUTE_UNUSED
,
5633 bool named ATTRIBUTE_UNUSED
)
5637 #if TARGET_ABI_OPEN_VMS
5638 if (cum
->num_args
< 6
5639 && 6 < cum
->num_args
+ ALPHA_ARG_SIZE (mode
, type
, named
))
5640 words
= 6 - cum
->num_args
;
5641 #elif TARGET_ABI_UNICOSMK
5642 /* Never any split arguments. */
5643 #elif TARGET_ABI_OSF
5644 if (*cum
< 6 && 6 < *cum
+ ALPHA_ARG_SIZE (mode
, type
, named
))
5647 #error Unhandled ABI
5650 return words
* UNITS_PER_WORD
;
5654 /* Return true if TYPE must be returned in memory, instead of in registers. */
5657 alpha_return_in_memory (tree type
, tree fndecl ATTRIBUTE_UNUSED
)
5659 enum machine_mode mode
= VOIDmode
;
5664 mode
= TYPE_MODE (type
);
5666 /* All aggregates are returned in memory. */
5667 if (AGGREGATE_TYPE_P (type
))
5671 size
= GET_MODE_SIZE (mode
);
5672 switch (GET_MODE_CLASS (mode
))
5674 case MODE_VECTOR_FLOAT
:
5675 /* Pass all float vectors in memory, like an aggregate. */
5678 case MODE_COMPLEX_FLOAT
:
5679 /* We judge complex floats on the size of their element,
5680 not the size of the whole type. */
5681 size
= GET_MODE_UNIT_SIZE (mode
);
5686 case MODE_COMPLEX_INT
:
5687 case MODE_VECTOR_INT
:
5691 /* ??? We get called on all sorts of random stuff from
5692 aggregate_value_p. We must return something, but it's not
5693 clear what's safe to return. Pretend it's a struct I
5698 /* Otherwise types must fit in one register. */
5699 return size
> UNITS_PER_WORD
;
5702 /* Return true if TYPE should be passed by invisible reference. */
5705 alpha_pass_by_reference (CUMULATIVE_ARGS
*ca ATTRIBUTE_UNUSED
,
5706 enum machine_mode mode
,
5707 tree type ATTRIBUTE_UNUSED
,
5708 bool named ATTRIBUTE_UNUSED
)
5710 return mode
== TFmode
|| mode
== TCmode
;
5713 /* Define how to find the value returned by a function. VALTYPE is the
5714 data type of the value (as a tree). If the precise function being
5715 called is known, FUNC is its FUNCTION_DECL; otherwise, FUNC is 0.
5716 MODE is set instead of VALTYPE for libcalls.
5718 On Alpha the value is found in $0 for integer functions and
5719 $f0 for floating-point functions. */
5722 function_value (tree valtype
, tree func ATTRIBUTE_UNUSED
,
5723 enum machine_mode mode
)
5725 unsigned int regnum
, dummy
;
5726 enum mode_class
class;
5728 gcc_assert (!valtype
|| !alpha_return_in_memory (valtype
, func
));
5731 mode
= TYPE_MODE (valtype
);
5733 class = GET_MODE_CLASS (mode
);
5737 PROMOTE_MODE (mode
, dummy
, valtype
);
5740 case MODE_COMPLEX_INT
:
5741 case MODE_VECTOR_INT
:
5749 case MODE_COMPLEX_FLOAT
:
5751 enum machine_mode cmode
= GET_MODE_INNER (mode
);
5753 return gen_rtx_PARALLEL
5756 gen_rtx_EXPR_LIST (VOIDmode
, gen_rtx_REG (cmode
, 32),
5758 gen_rtx_EXPR_LIST (VOIDmode
, gen_rtx_REG (cmode
, 33),
5759 GEN_INT (GET_MODE_SIZE (cmode
)))));
5766 return gen_rtx_REG (mode
, regnum
);
5769 /* TCmode complex values are passed by invisible reference. We
5770 should not split these values. */
5773 alpha_split_complex_arg (tree type
)
5775 return TYPE_MODE (type
) != TCmode
;
5779 alpha_build_builtin_va_list (void)
5781 tree base
, ofs
, space
, record
, type_decl
;
5783 if (TARGET_ABI_OPEN_VMS
|| TARGET_ABI_UNICOSMK
)
5784 return ptr_type_node
;
5786 record
= (*lang_hooks
.types
.make_type
) (RECORD_TYPE
);
5787 type_decl
= build_decl (TYPE_DECL
, get_identifier ("__va_list_tag"), record
);
5788 TREE_CHAIN (record
) = type_decl
;
5789 TYPE_NAME (record
) = type_decl
;
5791 /* C++? SET_IS_AGGR_TYPE (record, 1); */
5793 /* Dummy field to prevent alignment warnings. */
5794 space
= build_decl (FIELD_DECL
, NULL_TREE
, integer_type_node
);
5795 DECL_FIELD_CONTEXT (space
) = record
;
5796 DECL_ARTIFICIAL (space
) = 1;
5797 DECL_IGNORED_P (space
) = 1;
5799 ofs
= build_decl (FIELD_DECL
, get_identifier ("__offset"),
5801 DECL_FIELD_CONTEXT (ofs
) = record
;
5802 TREE_CHAIN (ofs
) = space
;
5804 base
= build_decl (FIELD_DECL
, get_identifier ("__base"),
5806 DECL_FIELD_CONTEXT (base
) = record
;
5807 TREE_CHAIN (base
) = ofs
;
5809 TYPE_FIELDS (record
) = base
;
5810 layout_type (record
);
5812 va_list_gpr_counter_field
= ofs
;
5817 /* Helper function for alpha_stdarg_optimize_hook. Skip over casts
5818 and constant additions. */
5821 va_list_skip_additions (tree lhs
)
5825 if (TREE_CODE (lhs
) != SSA_NAME
)
5830 stmt
= SSA_NAME_DEF_STMT (lhs
);
5832 if (TREE_CODE (stmt
) == PHI_NODE
)
5835 if (TREE_CODE (stmt
) != GIMPLE_MODIFY_STMT
5836 || GIMPLE_STMT_OPERAND (stmt
, 0) != lhs
)
5839 rhs
= GIMPLE_STMT_OPERAND (stmt
, 1);
5840 if (TREE_CODE (rhs
) == WITH_SIZE_EXPR
)
5841 rhs
= TREE_OPERAND (rhs
, 0);
5843 if ((TREE_CODE (rhs
) != NOP_EXPR
5844 && TREE_CODE (rhs
) != CONVERT_EXPR
5845 && (TREE_CODE (rhs
) != PLUS_EXPR
5846 || TREE_CODE (TREE_OPERAND (rhs
, 1)) != INTEGER_CST
5847 || !host_integerp (TREE_OPERAND (rhs
, 1), 1)))
5848 || TREE_CODE (TREE_OPERAND (rhs
, 0)) != SSA_NAME
)
5851 lhs
= TREE_OPERAND (rhs
, 0);
5855 /* Check if LHS = RHS statement is
5856 LHS = *(ap.__base + ap.__offset + cst)
5859 + ((ap.__offset + cst <= 47)
5860 ? ap.__offset + cst - 48 : ap.__offset + cst) + cst2).
5861 If the former, indicate that GPR registers are needed,
5862 if the latter, indicate that FPR registers are needed.
5864 Also look for LHS = (*ptr).field, where ptr is one of the forms
5867 On alpha, cfun->va_list_gpr_size is used as size of the needed
5868 regs and cfun->va_list_fpr_size is a bitmask, bit 0 set if GPR
5869 registers are needed and bit 1 set if FPR registers are needed.
5870 Return true if va_list references should not be scanned for the
5871 current statement. */
5874 alpha_stdarg_optimize_hook (struct stdarg_info
*si
, tree lhs
, tree rhs
)
5876 tree base
, offset
, arg1
, arg2
;
5879 while (handled_component_p (rhs
))
5880 rhs
= TREE_OPERAND (rhs
, 0);
5881 if (TREE_CODE (rhs
) != INDIRECT_REF
5882 || TREE_CODE (TREE_OPERAND (rhs
, 0)) != SSA_NAME
)
5885 lhs
= va_list_skip_additions (TREE_OPERAND (rhs
, 0));
5886 if (lhs
== NULL_TREE
5887 || TREE_CODE (lhs
) != PLUS_EXPR
)
5890 base
= TREE_OPERAND (lhs
, 0);
5891 if (TREE_CODE (base
) == SSA_NAME
)
5892 base
= va_list_skip_additions (base
);
5894 if (TREE_CODE (base
) != COMPONENT_REF
5895 || TREE_OPERAND (base
, 1) != TYPE_FIELDS (va_list_type_node
))
5897 base
= TREE_OPERAND (lhs
, 0);
5898 if (TREE_CODE (base
) == SSA_NAME
)
5899 base
= va_list_skip_additions (base
);
5901 if (TREE_CODE (base
) != COMPONENT_REF
5902 || TREE_OPERAND (base
, 1) != TYPE_FIELDS (va_list_type_node
))
5908 base
= get_base_address (base
);
5909 if (TREE_CODE (base
) != VAR_DECL
5910 || !bitmap_bit_p (si
->va_list_vars
, DECL_UID (base
)))
5913 offset
= TREE_OPERAND (lhs
, offset_arg
);
5914 if (TREE_CODE (offset
) == SSA_NAME
)
5915 offset
= va_list_skip_additions (offset
);
5917 if (TREE_CODE (offset
) == PHI_NODE
)
5921 if (PHI_NUM_ARGS (offset
) != 2)
5924 arg1
= va_list_skip_additions (PHI_ARG_DEF (offset
, 0));
5925 arg2
= va_list_skip_additions (PHI_ARG_DEF (offset
, 1));
5926 if (TREE_CODE (arg2
) != MINUS_EXPR
&& TREE_CODE (arg2
) != PLUS_EXPR
)
5932 if (TREE_CODE (arg2
) != MINUS_EXPR
&& TREE_CODE (arg2
) != PLUS_EXPR
)
5935 if (!host_integerp (TREE_OPERAND (arg2
, 1), 0))
5938 sub
= tree_low_cst (TREE_OPERAND (arg2
, 1), 0);
5939 if (TREE_CODE (arg2
) == MINUS_EXPR
)
5941 if (sub
< -48 || sub
> -32)
5944 arg2
= va_list_skip_additions (TREE_OPERAND (arg2
, 0));
5948 if (TREE_CODE (arg1
) == SSA_NAME
)
5949 arg1
= va_list_skip_additions (arg1
);
5951 if (TREE_CODE (arg1
) != COMPONENT_REF
5952 || TREE_OPERAND (arg1
, 1) != va_list_gpr_counter_field
5953 || get_base_address (arg1
) != base
)
5956 /* Need floating point regs. */
5957 cfun
->va_list_fpr_size
|= 2;
5959 else if (TREE_CODE (offset
) != COMPONENT_REF
5960 || TREE_OPERAND (offset
, 1) != va_list_gpr_counter_field
5961 || get_base_address (offset
) != base
)
5964 /* Need general regs. */
5965 cfun
->va_list_fpr_size
|= 1;
5969 si
->va_list_escapes
= true;
5974 /* Perform any needed actions needed for a function that is receiving a
5975 variable number of arguments. */
5978 alpha_setup_incoming_varargs (CUMULATIVE_ARGS
*pcum
, enum machine_mode mode
,
5979 tree type
, int *pretend_size
, int no_rtl
)
5981 CUMULATIVE_ARGS cum
= *pcum
;
5983 /* Skip the current argument. */
5984 FUNCTION_ARG_ADVANCE (cum
, mode
, type
, 1);
5986 #if TARGET_ABI_UNICOSMK
5987 /* On Unicos/Mk, the standard subroutine __T3E_MISMATCH stores all register
5988 arguments on the stack. Unfortunately, it doesn't always store the first
5989 one (i.e. the one that arrives in $16 or $f16). This is not a problem
5990 with stdargs as we always have at least one named argument there. */
5991 if (cum
.num_reg_words
< 6)
5995 emit_insn (gen_umk_mismatch_args (GEN_INT (cum
.num_reg_words
)));
5996 emit_insn (gen_arg_home_umk ());
6000 #elif TARGET_ABI_OPEN_VMS
6001 /* For VMS, we allocate space for all 6 arg registers plus a count.
6003 However, if NO registers need to be saved, don't allocate any space.
6004 This is not only because we won't need the space, but because AP
6005 includes the current_pretend_args_size and we don't want to mess up
6006 any ap-relative addresses already made. */
6007 if (cum
.num_args
< 6)
6011 emit_move_insn (gen_rtx_REG (DImode
, 1), virtual_incoming_args_rtx
);
6012 emit_insn (gen_arg_home ());
6014 *pretend_size
= 7 * UNITS_PER_WORD
;
6017 /* On OSF/1 and friends, we allocate space for all 12 arg registers, but
6018 only push those that are remaining. However, if NO registers need to
6019 be saved, don't allocate any space. This is not only because we won't
6020 need the space, but because AP includes the current_pretend_args_size
6021 and we don't want to mess up any ap-relative addresses already made.
6023 If we are not to use the floating-point registers, save the integer
6024 registers where we would put the floating-point registers. This is
6025 not the most efficient way to implement varargs with just one register
6026 class, but it isn't worth doing anything more efficient in this rare
6033 int count
, set
= get_varargs_alias_set ();
6036 count
= cfun
->va_list_gpr_size
/ UNITS_PER_WORD
;
6037 if (count
> 6 - cum
)
6040 /* Detect whether integer registers or floating-point registers
6041 are needed by the detected va_arg statements. See above for
6042 how these values are computed. Note that the "escape" value
6043 is VA_LIST_MAX_FPR_SIZE, which is 255, which has both of
6045 gcc_assert ((VA_LIST_MAX_FPR_SIZE
& 3) == 3);
6047 if (cfun
->va_list_fpr_size
& 1)
6049 tmp
= gen_rtx_MEM (BLKmode
,
6050 plus_constant (virtual_incoming_args_rtx
,
6051 (cum
+ 6) * UNITS_PER_WORD
));
6052 MEM_NOTRAP_P (tmp
) = 1;
6053 set_mem_alias_set (tmp
, set
);
6054 move_block_from_reg (16 + cum
, tmp
, count
);
6057 if (cfun
->va_list_fpr_size
& 2)
6059 tmp
= gen_rtx_MEM (BLKmode
,
6060 plus_constant (virtual_incoming_args_rtx
,
6061 cum
* UNITS_PER_WORD
));
6062 MEM_NOTRAP_P (tmp
) = 1;
6063 set_mem_alias_set (tmp
, set
);
6064 move_block_from_reg (16 + cum
+ TARGET_FPREGS
*32, tmp
, count
);
6067 *pretend_size
= 12 * UNITS_PER_WORD
;
6072 alpha_va_start (tree valist
, rtx nextarg ATTRIBUTE_UNUSED
)
6074 HOST_WIDE_INT offset
;
6075 tree t
, offset_field
, base_field
;
6077 if (TREE_CODE (TREE_TYPE (valist
)) == ERROR_MARK
)
6080 if (TARGET_ABI_UNICOSMK
)
6081 std_expand_builtin_va_start (valist
, nextarg
);
6083 /* For Unix, TARGET_SETUP_INCOMING_VARARGS moves the starting address base
6084 up by 48, storing fp arg registers in the first 48 bytes, and the
6085 integer arg registers in the next 48 bytes. This is only done,
6086 however, if any integer registers need to be stored.
6088 If no integer registers need be stored, then we must subtract 48
6089 in order to account for the integer arg registers which are counted
6090 in argsize above, but which are not actually stored on the stack.
6091 Must further be careful here about structures straddling the last
6092 integer argument register; that futzes with pretend_args_size,
6093 which changes the meaning of AP. */
6096 offset
= TARGET_ABI_OPEN_VMS
? UNITS_PER_WORD
: 6 * UNITS_PER_WORD
;
6098 offset
= -6 * UNITS_PER_WORD
+ current_function_pretend_args_size
;
6100 if (TARGET_ABI_OPEN_VMS
)
6102 nextarg
= plus_constant (nextarg
, offset
);
6103 nextarg
= plus_constant (nextarg
, NUM_ARGS
* UNITS_PER_WORD
);
6104 t
= build2 (GIMPLE_MODIFY_STMT
, TREE_TYPE (valist
), valist
,
6105 make_tree (ptr_type_node
, nextarg
));
6106 TREE_SIDE_EFFECTS (t
) = 1;
6108 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
6112 base_field
= TYPE_FIELDS (TREE_TYPE (valist
));
6113 offset_field
= TREE_CHAIN (base_field
);
6115 base_field
= build3 (COMPONENT_REF
, TREE_TYPE (base_field
),
6116 valist
, base_field
, NULL_TREE
);
6117 offset_field
= build3 (COMPONENT_REF
, TREE_TYPE (offset_field
),
6118 valist
, offset_field
, NULL_TREE
);
6120 t
= make_tree (ptr_type_node
, virtual_incoming_args_rtx
);
6121 t
= build2 (PLUS_EXPR
, ptr_type_node
, t
,
6122 build_int_cst (NULL_TREE
, offset
));
6123 t
= build2 (GIMPLE_MODIFY_STMT
, TREE_TYPE (base_field
), base_field
, t
);
6124 TREE_SIDE_EFFECTS (t
) = 1;
6125 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
6127 t
= build_int_cst (NULL_TREE
, NUM_ARGS
* UNITS_PER_WORD
);
6128 t
= build2 (GIMPLE_MODIFY_STMT
, TREE_TYPE (offset_field
),
6130 TREE_SIDE_EFFECTS (t
) = 1;
6131 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
6136 alpha_gimplify_va_arg_1 (tree type
, tree base
, tree offset
, tree
*pre_p
)
6138 tree type_size
, ptr_type
, addend
, t
, addr
, internal_post
;
6140 /* If the type could not be passed in registers, skip the block
6141 reserved for the registers. */
6142 if (targetm
.calls
.must_pass_in_stack (TYPE_MODE (type
), type
))
6144 t
= build_int_cst (TREE_TYPE (offset
), 6*8);
6145 t
= build2 (GIMPLE_MODIFY_STMT
, TREE_TYPE (offset
), offset
,
6146 build2 (MAX_EXPR
, TREE_TYPE (offset
), offset
, t
));
6147 gimplify_and_add (t
, pre_p
);
6151 ptr_type
= build_pointer_type (type
);
6153 if (TREE_CODE (type
) == COMPLEX_TYPE
)
6155 tree real_part
, imag_part
, real_temp
;
6157 real_part
= alpha_gimplify_va_arg_1 (TREE_TYPE (type
), base
,
6160 /* Copy the value into a new temporary, lest the formal temporary
6161 be reused out from under us. */
6162 real_temp
= get_initialized_tmp_var (real_part
, pre_p
, NULL
);
6164 imag_part
= alpha_gimplify_va_arg_1 (TREE_TYPE (type
), base
,
6167 return build2 (COMPLEX_EXPR
, type
, real_temp
, imag_part
);
6169 else if (TREE_CODE (type
) == REAL_TYPE
)
6171 tree fpaddend
, cond
, fourtyeight
;
6173 fourtyeight
= build_int_cst (TREE_TYPE (addend
), 6*8);
6174 fpaddend
= fold_build2 (MINUS_EXPR
, TREE_TYPE (addend
),
6175 addend
, fourtyeight
);
6176 cond
= fold_build2 (LT_EXPR
, boolean_type_node
, addend
, fourtyeight
);
6177 addend
= fold_build3 (COND_EXPR
, TREE_TYPE (addend
), cond
,
6181 /* Build the final address and force that value into a temporary. */
6182 addr
= build2 (PLUS_EXPR
, ptr_type
, fold_convert (ptr_type
, base
),
6183 fold_convert (ptr_type
, addend
));
6184 internal_post
= NULL
;
6185 gimplify_expr (&addr
, pre_p
, &internal_post
, is_gimple_val
, fb_rvalue
);
6186 append_to_statement_list (internal_post
, pre_p
);
6188 /* Update the offset field. */
6189 type_size
= TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type
));
6190 if (type_size
== NULL
|| TREE_OVERFLOW (type_size
))
6194 t
= size_binop (PLUS_EXPR
, type_size
, size_int (7));
6195 t
= size_binop (TRUNC_DIV_EXPR
, t
, size_int (8));
6196 t
= size_binop (MULT_EXPR
, t
, size_int (8));
6198 t
= fold_convert (TREE_TYPE (offset
), t
);
6199 t
= build2 (GIMPLE_MODIFY_STMT
, void_type_node
, offset
,
6200 build2 (PLUS_EXPR
, TREE_TYPE (offset
), offset
, t
));
6201 gimplify_and_add (t
, pre_p
);
6203 return build_va_arg_indirect_ref (addr
);
6207 alpha_gimplify_va_arg (tree valist
, tree type
, tree
*pre_p
, tree
*post_p
)
6209 tree offset_field
, base_field
, offset
, base
, t
, r
;
6212 if (TARGET_ABI_OPEN_VMS
|| TARGET_ABI_UNICOSMK
)
6213 return std_gimplify_va_arg_expr (valist
, type
, pre_p
, post_p
);
6215 base_field
= TYPE_FIELDS (va_list_type_node
);
6216 offset_field
= TREE_CHAIN (base_field
);
6217 base_field
= build3 (COMPONENT_REF
, TREE_TYPE (base_field
),
6218 valist
, base_field
, NULL_TREE
);
6219 offset_field
= build3 (COMPONENT_REF
, TREE_TYPE (offset_field
),
6220 valist
, offset_field
, NULL_TREE
);
6222 /* Pull the fields of the structure out into temporaries. Since we never
6223 modify the base field, we can use a formal temporary. Sign-extend the
6224 offset field so that it's the proper width for pointer arithmetic. */
6225 base
= get_formal_tmp_var (base_field
, pre_p
);
6227 t
= fold_convert (lang_hooks
.types
.type_for_size (64, 0), offset_field
);
6228 offset
= get_initialized_tmp_var (t
, pre_p
, NULL
);
6230 indirect
= pass_by_reference (NULL
, TYPE_MODE (type
), type
, false);
6232 type
= build_pointer_type (type
);
6234 /* Find the value. Note that this will be a stable indirection, or
6235 a composite of stable indirections in the case of complex. */
6236 r
= alpha_gimplify_va_arg_1 (type
, base
, offset
, pre_p
);
6238 /* Stuff the offset temporary back into its field. */
6239 t
= build2 (GIMPLE_MODIFY_STMT
, void_type_node
, offset_field
,
6240 fold_convert (TREE_TYPE (offset_field
), offset
));
6241 gimplify_and_add (t
, pre_p
);
6244 r
= build_va_arg_indirect_ref (r
);
6253 ALPHA_BUILTIN_CMPBGE
,
6254 ALPHA_BUILTIN_EXTBL
,
6255 ALPHA_BUILTIN_EXTWL
,
6256 ALPHA_BUILTIN_EXTLL
,
6257 ALPHA_BUILTIN_EXTQL
,
6258 ALPHA_BUILTIN_EXTWH
,
6259 ALPHA_BUILTIN_EXTLH
,
6260 ALPHA_BUILTIN_EXTQH
,
6261 ALPHA_BUILTIN_INSBL
,
6262 ALPHA_BUILTIN_INSWL
,
6263 ALPHA_BUILTIN_INSLL
,
6264 ALPHA_BUILTIN_INSQL
,
6265 ALPHA_BUILTIN_INSWH
,
6266 ALPHA_BUILTIN_INSLH
,
6267 ALPHA_BUILTIN_INSQH
,
6268 ALPHA_BUILTIN_MSKBL
,
6269 ALPHA_BUILTIN_MSKWL
,
6270 ALPHA_BUILTIN_MSKLL
,
6271 ALPHA_BUILTIN_MSKQL
,
6272 ALPHA_BUILTIN_MSKWH
,
6273 ALPHA_BUILTIN_MSKLH
,
6274 ALPHA_BUILTIN_MSKQH
,
6275 ALPHA_BUILTIN_UMULH
,
6277 ALPHA_BUILTIN_ZAPNOT
,
6278 ALPHA_BUILTIN_AMASK
,
6279 ALPHA_BUILTIN_IMPLVER
,
6281 ALPHA_BUILTIN_THREAD_POINTER
,
6282 ALPHA_BUILTIN_SET_THREAD_POINTER
,
6285 ALPHA_BUILTIN_MINUB8
,
6286 ALPHA_BUILTIN_MINSB8
,
6287 ALPHA_BUILTIN_MINUW4
,
6288 ALPHA_BUILTIN_MINSW4
,
6289 ALPHA_BUILTIN_MAXUB8
,
6290 ALPHA_BUILTIN_MAXSB8
,
6291 ALPHA_BUILTIN_MAXUW4
,
6292 ALPHA_BUILTIN_MAXSW4
,
6296 ALPHA_BUILTIN_UNPKBL
,
6297 ALPHA_BUILTIN_UNPKBW
,
6302 ALPHA_BUILTIN_CTPOP
,
6307 static unsigned int const code_for_builtin
[ALPHA_BUILTIN_max
] = {
6308 CODE_FOR_builtin_cmpbge
,
6309 CODE_FOR_builtin_extbl
,
6310 CODE_FOR_builtin_extwl
,
6311 CODE_FOR_builtin_extll
,
6312 CODE_FOR_builtin_extql
,
6313 CODE_FOR_builtin_extwh
,
6314 CODE_FOR_builtin_extlh
,
6315 CODE_FOR_builtin_extqh
,
6316 CODE_FOR_builtin_insbl
,
6317 CODE_FOR_builtin_inswl
,
6318 CODE_FOR_builtin_insll
,
6319 CODE_FOR_builtin_insql
,
6320 CODE_FOR_builtin_inswh
,
6321 CODE_FOR_builtin_inslh
,
6322 CODE_FOR_builtin_insqh
,
6323 CODE_FOR_builtin_mskbl
,
6324 CODE_FOR_builtin_mskwl
,
6325 CODE_FOR_builtin_mskll
,
6326 CODE_FOR_builtin_mskql
,
6327 CODE_FOR_builtin_mskwh
,
6328 CODE_FOR_builtin_msklh
,
6329 CODE_FOR_builtin_mskqh
,
6330 CODE_FOR_umuldi3_highpart
,
6331 CODE_FOR_builtin_zap
,
6332 CODE_FOR_builtin_zapnot
,
6333 CODE_FOR_builtin_amask
,
6334 CODE_FOR_builtin_implver
,
6335 CODE_FOR_builtin_rpcc
,
6340 CODE_FOR_builtin_minub8
,
6341 CODE_FOR_builtin_minsb8
,
6342 CODE_FOR_builtin_minuw4
,
6343 CODE_FOR_builtin_minsw4
,
6344 CODE_FOR_builtin_maxub8
,
6345 CODE_FOR_builtin_maxsb8
,
6346 CODE_FOR_builtin_maxuw4
,
6347 CODE_FOR_builtin_maxsw4
,
6348 CODE_FOR_builtin_perr
,
6349 CODE_FOR_builtin_pklb
,
6350 CODE_FOR_builtin_pkwb
,
6351 CODE_FOR_builtin_unpkbl
,
6352 CODE_FOR_builtin_unpkbw
,
6357 CODE_FOR_popcountdi2
6360 struct alpha_builtin_def
6363 enum alpha_builtin code
;
6364 unsigned int target_mask
;
6368 static struct alpha_builtin_def
const zero_arg_builtins
[] = {
6369 { "__builtin_alpha_implver", ALPHA_BUILTIN_IMPLVER
, 0, true },
6370 { "__builtin_alpha_rpcc", ALPHA_BUILTIN_RPCC
, 0, false }
6373 static struct alpha_builtin_def
const one_arg_builtins
[] = {
6374 { "__builtin_alpha_amask", ALPHA_BUILTIN_AMASK
, 0, true },
6375 { "__builtin_alpha_pklb", ALPHA_BUILTIN_PKLB
, MASK_MAX
, true },
6376 { "__builtin_alpha_pkwb", ALPHA_BUILTIN_PKWB
, MASK_MAX
, true },
6377 { "__builtin_alpha_unpkbl", ALPHA_BUILTIN_UNPKBL
, MASK_MAX
, true },
6378 { "__builtin_alpha_unpkbw", ALPHA_BUILTIN_UNPKBW
, MASK_MAX
, true },
6379 { "__builtin_alpha_cttz", ALPHA_BUILTIN_CTTZ
, MASK_CIX
, true },
6380 { "__builtin_alpha_ctlz", ALPHA_BUILTIN_CTLZ
, MASK_CIX
, true },
6381 { "__builtin_alpha_ctpop", ALPHA_BUILTIN_CTPOP
, MASK_CIX
, true }
6384 static struct alpha_builtin_def
const two_arg_builtins
[] = {
6385 { "__builtin_alpha_cmpbge", ALPHA_BUILTIN_CMPBGE
, 0, true },
6386 { "__builtin_alpha_extbl", ALPHA_BUILTIN_EXTBL
, 0, true },
6387 { "__builtin_alpha_extwl", ALPHA_BUILTIN_EXTWL
, 0, true },
6388 { "__builtin_alpha_extll", ALPHA_BUILTIN_EXTLL
, 0, true },
6389 { "__builtin_alpha_extql", ALPHA_BUILTIN_EXTQL
, 0, true },
6390 { "__builtin_alpha_extwh", ALPHA_BUILTIN_EXTWH
, 0, true },
6391 { "__builtin_alpha_extlh", ALPHA_BUILTIN_EXTLH
, 0, true },
6392 { "__builtin_alpha_extqh", ALPHA_BUILTIN_EXTQH
, 0, true },
6393 { "__builtin_alpha_insbl", ALPHA_BUILTIN_INSBL
, 0, true },
6394 { "__builtin_alpha_inswl", ALPHA_BUILTIN_INSWL
, 0, true },
6395 { "__builtin_alpha_insll", ALPHA_BUILTIN_INSLL
, 0, true },
6396 { "__builtin_alpha_insql", ALPHA_BUILTIN_INSQL
, 0, true },
6397 { "__builtin_alpha_inswh", ALPHA_BUILTIN_INSWH
, 0, true },
6398 { "__builtin_alpha_inslh", ALPHA_BUILTIN_INSLH
, 0, true },
6399 { "__builtin_alpha_insqh", ALPHA_BUILTIN_INSQH
, 0, true },
6400 { "__builtin_alpha_mskbl", ALPHA_BUILTIN_MSKBL
, 0, true },
6401 { "__builtin_alpha_mskwl", ALPHA_BUILTIN_MSKWL
, 0, true },
6402 { "__builtin_alpha_mskll", ALPHA_BUILTIN_MSKLL
, 0, true },
6403 { "__builtin_alpha_mskql", ALPHA_BUILTIN_MSKQL
, 0, true },
6404 { "__builtin_alpha_mskwh", ALPHA_BUILTIN_MSKWH
, 0, true },
6405 { "__builtin_alpha_msklh", ALPHA_BUILTIN_MSKLH
, 0, true },
6406 { "__builtin_alpha_mskqh", ALPHA_BUILTIN_MSKQH
, 0, true },
6407 { "__builtin_alpha_umulh", ALPHA_BUILTIN_UMULH
, 0, true },
6408 { "__builtin_alpha_zap", ALPHA_BUILTIN_ZAP
, 0, true },
6409 { "__builtin_alpha_zapnot", ALPHA_BUILTIN_ZAPNOT
, 0, true },
6410 { "__builtin_alpha_minub8", ALPHA_BUILTIN_MINUB8
, MASK_MAX
, true },
6411 { "__builtin_alpha_minsb8", ALPHA_BUILTIN_MINSB8
, MASK_MAX
, true },
6412 { "__builtin_alpha_minuw4", ALPHA_BUILTIN_MINUW4
, MASK_MAX
, true },
6413 { "__builtin_alpha_minsw4", ALPHA_BUILTIN_MINSW4
, MASK_MAX
, true },
6414 { "__builtin_alpha_maxub8", ALPHA_BUILTIN_MAXUB8
, MASK_MAX
, true },
6415 { "__builtin_alpha_maxsb8", ALPHA_BUILTIN_MAXSB8
, MASK_MAX
, true },
6416 { "__builtin_alpha_maxuw4", ALPHA_BUILTIN_MAXUW4
, MASK_MAX
, true },
6417 { "__builtin_alpha_maxsw4", ALPHA_BUILTIN_MAXSW4
, MASK_MAX
, true },
6418 { "__builtin_alpha_perr", ALPHA_BUILTIN_PERR
, MASK_MAX
, true }
6421 static GTY(()) tree alpha_v8qi_u
;
6422 static GTY(()) tree alpha_v8qi_s
;
6423 static GTY(()) tree alpha_v4hi_u
;
6424 static GTY(()) tree alpha_v4hi_s
;
6426 /* Helper function of alpha_init_builtins. Add the COUNT built-in
6427 functions pointed to by P, with function type FTYPE. */
6430 alpha_add_builtins (const struct alpha_builtin_def
*p
, size_t count
,
6436 for (i
= 0; i
< count
; ++i
, ++p
)
6437 if ((target_flags
& p
->target_mask
) == p
->target_mask
)
6439 decl
= add_builtin_function (p
->name
, ftype
, p
->code
, BUILT_IN_MD
,
6442 TREE_READONLY (decl
) = 1;
6443 TREE_NOTHROW (decl
) = 1;
6449 alpha_init_builtins (void)
6451 tree dimode_integer_type_node
;
6454 dimode_integer_type_node
= lang_hooks
.types
.type_for_mode (DImode
, 0);
6456 ftype
= build_function_type (dimode_integer_type_node
, void_list_node
);
6457 alpha_add_builtins (zero_arg_builtins
, ARRAY_SIZE (zero_arg_builtins
),
6460 ftype
= build_function_type_list (dimode_integer_type_node
,
6461 dimode_integer_type_node
, NULL_TREE
);
6462 alpha_add_builtins (one_arg_builtins
, ARRAY_SIZE (one_arg_builtins
),
6465 ftype
= build_function_type_list (dimode_integer_type_node
,
6466 dimode_integer_type_node
,
6467 dimode_integer_type_node
, NULL_TREE
);
6468 alpha_add_builtins (two_arg_builtins
, ARRAY_SIZE (two_arg_builtins
),
6471 ftype
= build_function_type (ptr_type_node
, void_list_node
);
6472 decl
= add_builtin_function ("__builtin_thread_pointer", ftype
,
6473 ALPHA_BUILTIN_THREAD_POINTER
, BUILT_IN_MD
,
6475 TREE_NOTHROW (decl
) = 1;
6477 ftype
= build_function_type_list (void_type_node
, ptr_type_node
, NULL_TREE
);
6478 decl
= add_builtin_function ("__builtin_set_thread_pointer", ftype
,
6479 ALPHA_BUILTIN_SET_THREAD_POINTER
, BUILT_IN_MD
,
6481 TREE_NOTHROW (decl
) = 1;
6483 alpha_v8qi_u
= build_vector_type (unsigned_intQI_type_node
, 8);
6484 alpha_v8qi_s
= build_vector_type (intQI_type_node
, 8);
6485 alpha_v4hi_u
= build_vector_type (unsigned_intHI_type_node
, 4);
6486 alpha_v4hi_s
= build_vector_type (intHI_type_node
, 4);
6489 /* Expand an expression EXP that calls a built-in function,
6490 with result going to TARGET if that's convenient
6491 (and in mode MODE if that's convenient).
6492 SUBTARGET may be used as the target for computing one of EXP's operands.
6493 IGNORE is nonzero if the value is to be ignored. */
6496 alpha_expand_builtin (tree exp
, rtx target
,
6497 rtx subtarget ATTRIBUTE_UNUSED
,
6498 enum machine_mode mode ATTRIBUTE_UNUSED
,
6499 int ignore ATTRIBUTE_UNUSED
)
6503 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
6504 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
6506 call_expr_arg_iterator iter
;
6507 enum insn_code icode
;
6508 rtx op
[MAX_ARGS
], pat
;
6512 if (fcode
>= ALPHA_BUILTIN_max
)
6513 internal_error ("bad builtin fcode");
6514 icode
= code_for_builtin
[fcode
];
6516 internal_error ("bad builtin fcode");
6518 nonvoid
= TREE_TYPE (TREE_TYPE (fndecl
)) != void_type_node
;
6521 FOR_EACH_CALL_EXPR_ARG (arg
, iter
, exp
)
6523 const struct insn_operand_data
*insn_op
;
6525 if (arg
== error_mark_node
)
6527 if (arity
> MAX_ARGS
)
6530 insn_op
= &insn_data
[icode
].operand
[arity
+ nonvoid
];
6532 op
[arity
] = expand_expr (arg
, NULL_RTX
, insn_op
->mode
, 0);
6534 if (!(*insn_op
->predicate
) (op
[arity
], insn_op
->mode
))
6535 op
[arity
] = copy_to_mode_reg (insn_op
->mode
, op
[arity
]);
6541 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
6543 || GET_MODE (target
) != tmode
6544 || !(*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
6545 target
= gen_reg_rtx (tmode
);
6551 pat
= GEN_FCN (icode
) (target
);
6555 pat
= GEN_FCN (icode
) (target
, op
[0]);
6557 pat
= GEN_FCN (icode
) (op
[0]);
6560 pat
= GEN_FCN (icode
) (target
, op
[0], op
[1]);
6576 /* Several bits below assume HWI >= 64 bits. This should be enforced
6578 #if HOST_BITS_PER_WIDE_INT < 64
6579 # error "HOST_WIDE_INT too small"
6582 /* Fold the builtin for the CMPBGE instruction. This is a vector comparison
6583 with an 8-bit output vector. OPINT contains the integer operands; bit N
6584 of OP_CONST is set if OPINT[N] is valid. */
6587 alpha_fold_builtin_cmpbge (unsigned HOST_WIDE_INT opint
[], long op_const
)
6592 for (i
= 0, val
= 0; i
< 8; ++i
)
6594 unsigned HOST_WIDE_INT c0
= (opint
[0] >> (i
* 8)) & 0xff;
6595 unsigned HOST_WIDE_INT c1
= (opint
[1] >> (i
* 8)) & 0xff;
6599 return build_int_cst (long_integer_type_node
, val
);
6601 else if (op_const
== 2 && opint
[1] == 0)
6602 return build_int_cst (long_integer_type_node
, 0xff);
6606 /* Fold the builtin for the ZAPNOT instruction. This is essentially a
6607 specialized form of an AND operation. Other byte manipulation instructions
6608 are defined in terms of this instruction, so this is also used as a
6609 subroutine for other builtins.
6611 OP contains the tree operands; OPINT contains the extracted integer values.
6612 Bit N of OP_CONST it set if OPINT[N] is valid. OP may be null if only
6613 OPINT may be considered. */
6616 alpha_fold_builtin_zapnot (tree
*op
, unsigned HOST_WIDE_INT opint
[],
6621 unsigned HOST_WIDE_INT mask
= 0;
6624 for (i
= 0; i
< 8; ++i
)
6625 if ((opint
[1] >> i
) & 1)
6626 mask
|= (unsigned HOST_WIDE_INT
)0xff << (i
* 8);
6629 return build_int_cst (long_integer_type_node
, opint
[0] & mask
);
6632 return fold_build2 (BIT_AND_EXPR
, long_integer_type_node
, op
[0],
6633 build_int_cst (long_integer_type_node
, mask
));
6635 else if ((op_const
& 1) && opint
[0] == 0)
6636 return build_int_cst (long_integer_type_node
, 0);
6640 /* Fold the builtins for the EXT family of instructions. */
6643 alpha_fold_builtin_extxx (tree op
[], unsigned HOST_WIDE_INT opint
[],
6644 long op_const
, unsigned HOST_WIDE_INT bytemask
,
6648 tree
*zap_op
= NULL
;
6652 unsigned HOST_WIDE_INT loc
;
6655 if (BYTES_BIG_ENDIAN
)
6663 unsigned HOST_WIDE_INT temp
= opint
[0];
6676 opint
[1] = bytemask
;
6677 return alpha_fold_builtin_zapnot (zap_op
, opint
, zap_const
);
6680 /* Fold the builtins for the INS family of instructions. */
6683 alpha_fold_builtin_insxx (tree op
[], unsigned HOST_WIDE_INT opint
[],
6684 long op_const
, unsigned HOST_WIDE_INT bytemask
,
6687 if ((op_const
& 1) && opint
[0] == 0)
6688 return build_int_cst (long_integer_type_node
, 0);
6692 unsigned HOST_WIDE_INT temp
, loc
, byteloc
;
6693 tree
*zap_op
= NULL
;
6696 if (BYTES_BIG_ENDIAN
)
6703 byteloc
= (64 - (loc
* 8)) & 0x3f;
6720 opint
[1] = bytemask
;
6721 return alpha_fold_builtin_zapnot (zap_op
, opint
, op_const
);
6728 alpha_fold_builtin_mskxx (tree op
[], unsigned HOST_WIDE_INT opint
[],
6729 long op_const
, unsigned HOST_WIDE_INT bytemask
,
6734 unsigned HOST_WIDE_INT loc
;
6737 if (BYTES_BIG_ENDIAN
)
6744 opint
[1] = bytemask
^ 0xff;
6747 return alpha_fold_builtin_zapnot (op
, opint
, op_const
);
6751 alpha_fold_builtin_umulh (unsigned HOST_WIDE_INT opint
[], long op_const
)
6757 unsigned HOST_WIDE_INT l
;
6760 mul_double (opint
[0], 0, opint
[1], 0, &l
, &h
);
6762 #if HOST_BITS_PER_WIDE_INT > 64
6766 return build_int_cst (long_integer_type_node
, h
);
6770 opint
[1] = opint
[0];
6773 /* Note that (X*1) >> 64 == 0. */
6774 if (opint
[1] == 0 || opint
[1] == 1)
6775 return build_int_cst (long_integer_type_node
, 0);
6782 alpha_fold_vector_minmax (enum tree_code code
, tree op
[], tree vtype
)
6784 tree op0
= fold_convert (vtype
, op
[0]);
6785 tree op1
= fold_convert (vtype
, op
[1]);
6786 tree val
= fold_build2 (code
, vtype
, op0
, op1
);
6787 return fold_convert (long_integer_type_node
, val
);
6791 alpha_fold_builtin_perr (unsigned HOST_WIDE_INT opint
[], long op_const
)
6793 unsigned HOST_WIDE_INT temp
= 0;
6799 for (i
= 0; i
< 8; ++i
)
6801 unsigned HOST_WIDE_INT a
= (opint
[0] >> (i
* 8)) & 0xff;
6802 unsigned HOST_WIDE_INT b
= (opint
[1] >> (i
* 8)) & 0xff;
6809 return build_int_cst (long_integer_type_node
, temp
);
6813 alpha_fold_builtin_pklb (unsigned HOST_WIDE_INT opint
[], long op_const
)
6815 unsigned HOST_WIDE_INT temp
;
6820 temp
= opint
[0] & 0xff;
6821 temp
|= (opint
[0] >> 24) & 0xff00;
6823 return build_int_cst (long_integer_type_node
, temp
);
6827 alpha_fold_builtin_pkwb (unsigned HOST_WIDE_INT opint
[], long op_const
)
6829 unsigned HOST_WIDE_INT temp
;
6834 temp
= opint
[0] & 0xff;
6835 temp
|= (opint
[0] >> 8) & 0xff00;
6836 temp
|= (opint
[0] >> 16) & 0xff0000;
6837 temp
|= (opint
[0] >> 24) & 0xff000000;
6839 return build_int_cst (long_integer_type_node
, temp
);
6843 alpha_fold_builtin_unpkbl (unsigned HOST_WIDE_INT opint
[], long op_const
)
6845 unsigned HOST_WIDE_INT temp
;
6850 temp
= opint
[0] & 0xff;
6851 temp
|= (opint
[0] & 0xff00) << 24;
6853 return build_int_cst (long_integer_type_node
, temp
);
6857 alpha_fold_builtin_unpkbw (unsigned HOST_WIDE_INT opint
[], long op_const
)
6859 unsigned HOST_WIDE_INT temp
;
6864 temp
= opint
[0] & 0xff;
6865 temp
|= (opint
[0] & 0x0000ff00) << 8;
6866 temp
|= (opint
[0] & 0x00ff0000) << 16;
6867 temp
|= (opint
[0] & 0xff000000) << 24;
6869 return build_int_cst (long_integer_type_node
, temp
);
6873 alpha_fold_builtin_cttz (unsigned HOST_WIDE_INT opint
[], long op_const
)
6875 unsigned HOST_WIDE_INT temp
;
6883 temp
= exact_log2 (opint
[0] & -opint
[0]);
6885 return build_int_cst (long_integer_type_node
, temp
);
6889 alpha_fold_builtin_ctlz (unsigned HOST_WIDE_INT opint
[], long op_const
)
6891 unsigned HOST_WIDE_INT temp
;
6899 temp
= 64 - floor_log2 (opint
[0]) - 1;
6901 return build_int_cst (long_integer_type_node
, temp
);
6905 alpha_fold_builtin_ctpop (unsigned HOST_WIDE_INT opint
[], long op_const
)
6907 unsigned HOST_WIDE_INT temp
, op
;
6915 temp
++, op
&= op
- 1;
6917 return build_int_cst (long_integer_type_node
, temp
);
6920 /* Fold one of our builtin functions. */
6923 alpha_fold_builtin (tree fndecl
, tree arglist
, bool ignore ATTRIBUTE_UNUSED
)
6925 tree op
[MAX_ARGS
], t
;
6926 unsigned HOST_WIDE_INT opint
[MAX_ARGS
];
6927 long op_const
= 0, arity
= 0;
6929 for (t
= arglist
; t
; t
= TREE_CHAIN (t
), ++arity
)
6931 tree arg
= TREE_VALUE (t
);
6932 if (arg
== error_mark_node
)
6934 if (arity
>= MAX_ARGS
)
6939 if (TREE_CODE (arg
) == INTEGER_CST
)
6941 op_const
|= 1L << arity
;
6942 opint
[arity
] = int_cst_value (arg
);
6946 switch (DECL_FUNCTION_CODE (fndecl
))
6948 case ALPHA_BUILTIN_CMPBGE
:
6949 return alpha_fold_builtin_cmpbge (opint
, op_const
);
6951 case ALPHA_BUILTIN_EXTBL
:
6952 return alpha_fold_builtin_extxx (op
, opint
, op_const
, 0x01, false);
6953 case ALPHA_BUILTIN_EXTWL
:
6954 return alpha_fold_builtin_extxx (op
, opint
, op_const
, 0x03, false);
6955 case ALPHA_BUILTIN_EXTLL
:
6956 return alpha_fold_builtin_extxx (op
, opint
, op_const
, 0x0f, false);
6957 case ALPHA_BUILTIN_EXTQL
:
6958 return alpha_fold_builtin_extxx (op
, opint
, op_const
, 0xff, false);
6959 case ALPHA_BUILTIN_EXTWH
:
6960 return alpha_fold_builtin_extxx (op
, opint
, op_const
, 0x03, true);
6961 case ALPHA_BUILTIN_EXTLH
:
6962 return alpha_fold_builtin_extxx (op
, opint
, op_const
, 0x0f, true);
6963 case ALPHA_BUILTIN_EXTQH
:
6964 return alpha_fold_builtin_extxx (op
, opint
, op_const
, 0xff, true);
6966 case ALPHA_BUILTIN_INSBL
:
6967 return alpha_fold_builtin_insxx (op
, opint
, op_const
, 0x01, false);
6968 case ALPHA_BUILTIN_INSWL
:
6969 return alpha_fold_builtin_insxx (op
, opint
, op_const
, 0x03, false);
6970 case ALPHA_BUILTIN_INSLL
:
6971 return alpha_fold_builtin_insxx (op
, opint
, op_const
, 0x0f, false);
6972 case ALPHA_BUILTIN_INSQL
:
6973 return alpha_fold_builtin_insxx (op
, opint
, op_const
, 0xff, false);
6974 case ALPHA_BUILTIN_INSWH
:
6975 return alpha_fold_builtin_insxx (op
, opint
, op_const
, 0x03, true);
6976 case ALPHA_BUILTIN_INSLH
:
6977 return alpha_fold_builtin_insxx (op
, opint
, op_const
, 0x0f, true);
6978 case ALPHA_BUILTIN_INSQH
:
6979 return alpha_fold_builtin_insxx (op
, opint
, op_const
, 0xff, true);
6981 case ALPHA_BUILTIN_MSKBL
:
6982 return alpha_fold_builtin_mskxx (op
, opint
, op_const
, 0x01, false);
6983 case ALPHA_BUILTIN_MSKWL
:
6984 return alpha_fold_builtin_mskxx (op
, opint
, op_const
, 0x03, false);
6985 case ALPHA_BUILTIN_MSKLL
:
6986 return alpha_fold_builtin_mskxx (op
, opint
, op_const
, 0x0f, false);
6987 case ALPHA_BUILTIN_MSKQL
:
6988 return alpha_fold_builtin_mskxx (op
, opint
, op_const
, 0xff, false);
6989 case ALPHA_BUILTIN_MSKWH
:
6990 return alpha_fold_builtin_mskxx (op
, opint
, op_const
, 0x03, true);
6991 case ALPHA_BUILTIN_MSKLH
:
6992 return alpha_fold_builtin_mskxx (op
, opint
, op_const
, 0x0f, true);
6993 case ALPHA_BUILTIN_MSKQH
:
6994 return alpha_fold_builtin_mskxx (op
, opint
, op_const
, 0xff, true);
6996 case ALPHA_BUILTIN_UMULH
:
6997 return alpha_fold_builtin_umulh (opint
, op_const
);
6999 case ALPHA_BUILTIN_ZAP
:
7002 case ALPHA_BUILTIN_ZAPNOT
:
7003 return alpha_fold_builtin_zapnot (op
, opint
, op_const
);
7005 case ALPHA_BUILTIN_MINUB8
:
7006 return alpha_fold_vector_minmax (MIN_EXPR
, op
, alpha_v8qi_u
);
7007 case ALPHA_BUILTIN_MINSB8
:
7008 return alpha_fold_vector_minmax (MIN_EXPR
, op
, alpha_v8qi_s
);
7009 case ALPHA_BUILTIN_MINUW4
:
7010 return alpha_fold_vector_minmax (MIN_EXPR
, op
, alpha_v4hi_u
);
7011 case ALPHA_BUILTIN_MINSW4
:
7012 return alpha_fold_vector_minmax (MIN_EXPR
, op
, alpha_v4hi_s
);
7013 case ALPHA_BUILTIN_MAXUB8
:
7014 return alpha_fold_vector_minmax (MAX_EXPR
, op
, alpha_v8qi_u
);
7015 case ALPHA_BUILTIN_MAXSB8
:
7016 return alpha_fold_vector_minmax (MAX_EXPR
, op
, alpha_v8qi_s
);
7017 case ALPHA_BUILTIN_MAXUW4
:
7018 return alpha_fold_vector_minmax (MAX_EXPR
, op
, alpha_v4hi_u
);
7019 case ALPHA_BUILTIN_MAXSW4
:
7020 return alpha_fold_vector_minmax (MAX_EXPR
, op
, alpha_v4hi_s
);
7022 case ALPHA_BUILTIN_PERR
:
7023 return alpha_fold_builtin_perr (opint
, op_const
);
7024 case ALPHA_BUILTIN_PKLB
:
7025 return alpha_fold_builtin_pklb (opint
, op_const
);
7026 case ALPHA_BUILTIN_PKWB
:
7027 return alpha_fold_builtin_pkwb (opint
, op_const
);
7028 case ALPHA_BUILTIN_UNPKBL
:
7029 return alpha_fold_builtin_unpkbl (opint
, op_const
);
7030 case ALPHA_BUILTIN_UNPKBW
:
7031 return alpha_fold_builtin_unpkbw (opint
, op_const
);
7033 case ALPHA_BUILTIN_CTTZ
:
7034 return alpha_fold_builtin_cttz (opint
, op_const
);
7035 case ALPHA_BUILTIN_CTLZ
:
7036 return alpha_fold_builtin_ctlz (opint
, op_const
);
7037 case ALPHA_BUILTIN_CTPOP
:
7038 return alpha_fold_builtin_ctpop (opint
, op_const
);
7040 case ALPHA_BUILTIN_AMASK
:
7041 case ALPHA_BUILTIN_IMPLVER
:
7042 case ALPHA_BUILTIN_RPCC
:
7043 case ALPHA_BUILTIN_THREAD_POINTER
:
7044 case ALPHA_BUILTIN_SET_THREAD_POINTER
:
7045 /* None of these are foldable at compile-time. */
7051 /* This page contains routines that are used to determine what the function
7052 prologue and epilogue code will do and write them out. */
7054 /* Compute the size of the save area in the stack. */
7056 /* These variables are used for communication between the following functions.
7057 They indicate various things about the current function being compiled
7058 that are used to tell what kind of prologue, epilogue and procedure
7059 descriptor to generate. */
7061 /* Nonzero if we need a stack procedure. */
7062 enum alpha_procedure_types
{PT_NULL
= 0, PT_REGISTER
= 1, PT_STACK
= 2};
7063 static enum alpha_procedure_types alpha_procedure_type
;
7065 /* Register number (either FP or SP) that is used to unwind the frame. */
7066 static int vms_unwind_regno
;
7068 /* Register number used to save FP. We need not have one for RA since
7069 we don't modify it for register procedures. This is only defined
7070 for register frame procedures. */
7071 static int vms_save_fp_regno
;
7073 /* Register number used to reference objects off our PV. */
7074 static int vms_base_regno
;
7076 /* Compute register masks for saved registers. */
7079 alpha_sa_mask (unsigned long *imaskP
, unsigned long *fmaskP
)
7081 unsigned long imask
= 0;
7082 unsigned long fmask
= 0;
7085 /* When outputting a thunk, we don't have valid register life info,
7086 but assemble_start_function wants to output .frame and .mask
7088 if (current_function_is_thunk
)
7095 if (TARGET_ABI_OPEN_VMS
&& alpha_procedure_type
== PT_STACK
)
7096 imask
|= (1UL << HARD_FRAME_POINTER_REGNUM
);
7098 /* One for every register we have to save. */
7099 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
7100 if (! fixed_regs
[i
] && ! call_used_regs
[i
]
7101 && regs_ever_live
[i
] && i
!= REG_RA
7102 && (!TARGET_ABI_UNICOSMK
|| i
!= HARD_FRAME_POINTER_REGNUM
))
7105 imask
|= (1UL << i
);
7107 fmask
|= (1UL << (i
- 32));
7110 /* We need to restore these for the handler. */
7111 if (current_function_calls_eh_return
)
7115 unsigned regno
= EH_RETURN_DATA_REGNO (i
);
7116 if (regno
== INVALID_REGNUM
)
7118 imask
|= 1UL << regno
;
7122 /* If any register spilled, then spill the return address also. */
7123 /* ??? This is required by the Digital stack unwind specification
7124 and isn't needed if we're doing Dwarf2 unwinding. */
7125 if (imask
|| fmask
|| alpha_ra_ever_killed ())
7126 imask
|= (1UL << REG_RA
);
7133 alpha_sa_size (void)
7135 unsigned long mask
[2];
7139 alpha_sa_mask (&mask
[0], &mask
[1]);
7141 if (TARGET_ABI_UNICOSMK
)
7143 if (mask
[0] || mask
[1])
7148 for (j
= 0; j
< 2; ++j
)
7149 for (i
= 0; i
< 32; ++i
)
7150 if ((mask
[j
] >> i
) & 1)
7154 if (TARGET_ABI_UNICOSMK
)
7156 /* We might not need to generate a frame if we don't make any calls
7157 (including calls to __T3E_MISMATCH if this is a vararg function),
7158 don't have any local variables which require stack slots, don't
7159 use alloca and have not determined that we need a frame for other
7162 alpha_procedure_type
7163 = (sa_size
|| get_frame_size() != 0
7164 || current_function_outgoing_args_size
7165 || current_function_stdarg
|| current_function_calls_alloca
7166 || frame_pointer_needed
)
7167 ? PT_STACK
: PT_REGISTER
;
7169 /* Always reserve space for saving callee-saved registers if we
7170 need a frame as required by the calling convention. */
7171 if (alpha_procedure_type
== PT_STACK
)
7174 else if (TARGET_ABI_OPEN_VMS
)
7176 /* Start by assuming we can use a register procedure if we don't
7177 make any calls (REG_RA not used) or need to save any
7178 registers and a stack procedure if we do. */
7179 if ((mask
[0] >> REG_RA
) & 1)
7180 alpha_procedure_type
= PT_STACK
;
7181 else if (get_frame_size() != 0)
7182 alpha_procedure_type
= PT_REGISTER
;
7184 alpha_procedure_type
= PT_NULL
;
7186 /* Don't reserve space for saving FP & RA yet. Do that later after we've
7187 made the final decision on stack procedure vs register procedure. */
7188 if (alpha_procedure_type
== PT_STACK
)
7191 /* Decide whether to refer to objects off our PV via FP or PV.
7192 If we need FP for something else or if we receive a nonlocal
7193 goto (which expects PV to contain the value), we must use PV.
7194 Otherwise, start by assuming we can use FP. */
7197 = (frame_pointer_needed
7198 || current_function_has_nonlocal_label
7199 || alpha_procedure_type
== PT_STACK
7200 || current_function_outgoing_args_size
)
7201 ? REG_PV
: HARD_FRAME_POINTER_REGNUM
;
7203 /* If we want to copy PV into FP, we need to find some register
7204 in which to save FP. */
7206 vms_save_fp_regno
= -1;
7207 if (vms_base_regno
== HARD_FRAME_POINTER_REGNUM
)
7208 for (i
= 0; i
< 32; i
++)
7209 if (! fixed_regs
[i
] && call_used_regs
[i
] && ! regs_ever_live
[i
])
7210 vms_save_fp_regno
= i
;
7212 if (vms_save_fp_regno
== -1 && alpha_procedure_type
== PT_REGISTER
)
7213 vms_base_regno
= REG_PV
, alpha_procedure_type
= PT_STACK
;
7214 else if (alpha_procedure_type
== PT_NULL
)
7215 vms_base_regno
= REG_PV
;
7217 /* Stack unwinding should be done via FP unless we use it for PV. */
7218 vms_unwind_regno
= (vms_base_regno
== REG_PV
7219 ? HARD_FRAME_POINTER_REGNUM
: STACK_POINTER_REGNUM
);
7221 /* If this is a stack procedure, allow space for saving FP and RA. */
7222 if (alpha_procedure_type
== PT_STACK
)
7227 /* Our size must be even (multiple of 16 bytes). */
7235 /* Define the offset between two registers, one to be eliminated,
7236 and the other its replacement, at the start of a routine. */
7239 alpha_initial_elimination_offset (unsigned int from
,
7240 unsigned int to ATTRIBUTE_UNUSED
)
7244 ret
= alpha_sa_size ();
7245 ret
+= ALPHA_ROUND (current_function_outgoing_args_size
);
7249 case FRAME_POINTER_REGNUM
:
7252 case ARG_POINTER_REGNUM
:
7253 ret
+= (ALPHA_ROUND (get_frame_size ()
7254 + current_function_pretend_args_size
)
7255 - current_function_pretend_args_size
);
7266 alpha_pv_save_size (void)
7269 return alpha_procedure_type
== PT_STACK
? 8 : 0;
7273 alpha_using_fp (void)
7276 return vms_unwind_regno
== HARD_FRAME_POINTER_REGNUM
;
7279 #if TARGET_ABI_OPEN_VMS
7281 const struct attribute_spec vms_attribute_table
[] =
7283 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
7284 { "overlaid", 0, 0, true, false, false, NULL
},
7285 { "global", 0, 0, true, false, false, NULL
},
7286 { "initialize", 0, 0, true, false, false, NULL
},
7287 { NULL
, 0, 0, false, false, false, NULL
}
7293 find_lo_sum_using_gp (rtx
*px
, void *data ATTRIBUTE_UNUSED
)
7295 return GET_CODE (*px
) == LO_SUM
&& XEXP (*px
, 0) == pic_offset_table_rtx
;
7299 alpha_find_lo_sum_using_gp (rtx insn
)
7301 return for_each_rtx (&PATTERN (insn
), find_lo_sum_using_gp
, NULL
) > 0;
7305 alpha_does_function_need_gp (void)
7309 /* The GP being variable is an OSF abi thing. */
7310 if (! TARGET_ABI_OSF
)
7313 /* We need the gp to load the address of __mcount. */
7314 if (TARGET_PROFILING_NEEDS_GP
&& current_function_profile
)
7317 /* The code emitted by alpha_output_mi_thunk_osf uses the gp. */
7318 if (current_function_is_thunk
)
7321 /* The nonlocal receiver pattern assumes that the gp is valid for
7322 the nested function. Reasonable because it's almost always set
7323 correctly already. For the cases where that's wrong, make sure
7324 the nested function loads its gp on entry. */
7325 if (current_function_has_nonlocal_goto
)
7328 /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
7329 Even if we are a static function, we still need to do this in case
7330 our address is taken and passed to something like qsort. */
7332 push_topmost_sequence ();
7333 insn
= get_insns ();
7334 pop_topmost_sequence ();
7336 for (; insn
; insn
= NEXT_INSN (insn
))
7338 && ! JUMP_TABLE_DATA_P (insn
)
7339 && GET_CODE (PATTERN (insn
)) != USE
7340 && GET_CODE (PATTERN (insn
)) != CLOBBER
7341 && get_attr_usegp (insn
))
7348 /* Helper function to set RTX_FRAME_RELATED_P on instructions, including
7352 set_frame_related_p (void)
7354 rtx seq
= get_insns ();
7365 while (insn
!= NULL_RTX
)
7367 RTX_FRAME_RELATED_P (insn
) = 1;
7368 insn
= NEXT_INSN (insn
);
7370 seq
= emit_insn (seq
);
7374 seq
= emit_insn (seq
);
7375 RTX_FRAME_RELATED_P (seq
) = 1;
7380 #define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
7382 /* Generates a store with the proper unwind info attached. VALUE is
7383 stored at BASE_REG+BASE_OFS. If FRAME_BIAS is nonzero, then BASE_REG
7384 contains SP+FRAME_BIAS, and that is the unwind info that should be
7385 generated. If FRAME_REG != VALUE, then VALUE is being stored on
7386 behalf of FRAME_REG, and FRAME_REG should be present in the unwind. */
7389 emit_frame_store_1 (rtx value
, rtx base_reg
, HOST_WIDE_INT frame_bias
,
7390 HOST_WIDE_INT base_ofs
, rtx frame_reg
)
7392 rtx addr
, mem
, insn
;
7394 addr
= plus_constant (base_reg
, base_ofs
);
7395 mem
= gen_rtx_MEM (DImode
, addr
);
7396 set_mem_alias_set (mem
, alpha_sr_alias_set
);
7398 insn
= emit_move_insn (mem
, value
);
7399 RTX_FRAME_RELATED_P (insn
) = 1;
7401 if (frame_bias
|| value
!= frame_reg
)
7405 addr
= plus_constant (stack_pointer_rtx
, frame_bias
+ base_ofs
);
7406 mem
= gen_rtx_MEM (DImode
, addr
);
7410 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR
,
7411 gen_rtx_SET (VOIDmode
, mem
, frame_reg
),
7417 emit_frame_store (unsigned int regno
, rtx base_reg
,
7418 HOST_WIDE_INT frame_bias
, HOST_WIDE_INT base_ofs
)
7420 rtx reg
= gen_rtx_REG (DImode
, regno
);
7421 emit_frame_store_1 (reg
, base_reg
, frame_bias
, base_ofs
, reg
);
7424 /* Write function prologue. */
7426 /* On vms we have two kinds of functions:
7428 - stack frame (PROC_STACK)
7429 these are 'normal' functions with local vars and which are
7430 calling other functions
7431 - register frame (PROC_REGISTER)
7432 keeps all data in registers, needs no stack
7434 We must pass this to the assembler so it can generate the
7435 proper pdsc (procedure descriptor)
7436 This is done with the '.pdesc' command.
7438 On not-vms, we don't really differentiate between the two, as we can
7439 simply allocate stack without saving registers. */
7442 alpha_expand_prologue (void)
7444 /* Registers to save. */
7445 unsigned long imask
= 0;
7446 unsigned long fmask
= 0;
7447 /* Stack space needed for pushing registers clobbered by us. */
7448 HOST_WIDE_INT sa_size
;
7449 /* Complete stack size needed. */
7450 HOST_WIDE_INT frame_size
;
7451 /* Offset from base reg to register save area. */
7452 HOST_WIDE_INT reg_offset
;
7456 sa_size
= alpha_sa_size ();
7458 frame_size
= get_frame_size ();
7459 if (TARGET_ABI_OPEN_VMS
)
7460 frame_size
= ALPHA_ROUND (sa_size
7461 + (alpha_procedure_type
== PT_STACK
? 8 : 0)
7463 + current_function_pretend_args_size
);
7464 else if (TARGET_ABI_UNICOSMK
)
7465 /* We have to allocate space for the DSIB if we generate a frame. */
7466 frame_size
= ALPHA_ROUND (sa_size
7467 + (alpha_procedure_type
== PT_STACK
? 48 : 0))
7468 + ALPHA_ROUND (frame_size
7469 + current_function_outgoing_args_size
);
7471 frame_size
= (ALPHA_ROUND (current_function_outgoing_args_size
)
7473 + ALPHA_ROUND (frame_size
7474 + current_function_pretend_args_size
));
7476 if (TARGET_ABI_OPEN_VMS
)
7479 reg_offset
= ALPHA_ROUND (current_function_outgoing_args_size
);
7481 alpha_sa_mask (&imask
, &fmask
);
7483 /* Emit an insn to reload GP, if needed. */
7486 alpha_function_needs_gp
= alpha_does_function_need_gp ();
7487 if (alpha_function_needs_gp
)
7488 emit_insn (gen_prologue_ldgp ());
7491 /* TARGET_PROFILING_NEEDS_GP actually implies that we need to insert
7492 the call to mcount ourselves, rather than having the linker do it
7493 magically in response to -pg. Since _mcount has special linkage,
7494 don't represent the call as a call. */
7495 if (TARGET_PROFILING_NEEDS_GP
&& current_function_profile
)
7496 emit_insn (gen_prologue_mcount ());
7498 if (TARGET_ABI_UNICOSMK
)
7499 unicosmk_gen_dsib (&imask
);
7501 /* Adjust the stack by the frame size. If the frame size is > 4096
7502 bytes, we need to be sure we probe somewhere in the first and last
7503 4096 bytes (we can probably get away without the latter test) and
7504 every 8192 bytes in between. If the frame size is > 32768, we
7505 do this in a loop. Otherwise, we generate the explicit probe
7508 Note that we are only allowed to adjust sp once in the prologue. */
7510 if (frame_size
<= 32768)
7512 if (frame_size
> 4096)
7516 for (probed
= 4096; probed
< frame_size
; probed
+= 8192)
7517 emit_insn (gen_probe_stack (GEN_INT (TARGET_ABI_UNICOSMK
7521 /* We only have to do this probe if we aren't saving registers. */
7522 if (sa_size
== 0 && frame_size
> probed
- 4096)
7523 emit_insn (gen_probe_stack (GEN_INT (-frame_size
)));
7526 if (frame_size
!= 0)
7527 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx
, stack_pointer_rtx
,
7528 GEN_INT (TARGET_ABI_UNICOSMK
7534 /* Here we generate code to set R22 to SP + 4096 and set R23 to the
7535 number of 8192 byte blocks to probe. We then probe each block
7536 in the loop and then set SP to the proper location. If the
7537 amount remaining is > 4096, we have to do one more probe if we
7538 are not saving any registers. */
7540 HOST_WIDE_INT blocks
= (frame_size
+ 4096) / 8192;
7541 HOST_WIDE_INT leftover
= frame_size
+ 4096 - blocks
* 8192;
7542 rtx ptr
= gen_rtx_REG (DImode
, 22);
7543 rtx count
= gen_rtx_REG (DImode
, 23);
7546 emit_move_insn (count
, GEN_INT (blocks
));
7547 emit_insn (gen_adddi3 (ptr
, stack_pointer_rtx
,
7548 GEN_INT (TARGET_ABI_UNICOSMK
? 4096 - 64 : 4096)));
7550 /* Because of the difficulty in emitting a new basic block this
7551 late in the compilation, generate the loop as a single insn. */
7552 emit_insn (gen_prologue_stack_probe_loop (count
, ptr
));
7554 if (leftover
> 4096 && sa_size
== 0)
7556 rtx last
= gen_rtx_MEM (DImode
, plus_constant (ptr
, -leftover
));
7557 MEM_VOLATILE_P (last
) = 1;
7558 emit_move_insn (last
, const0_rtx
);
7561 if (TARGET_ABI_WINDOWS_NT
)
7563 /* For NT stack unwind (done by 'reverse execution'), it's
7564 not OK to take the result of a loop, even though the value
7565 is already in ptr, so we reload it via a single operation
7566 and subtract it to sp.
7568 Yes, that's correct -- we have to reload the whole constant
7569 into a temporary via ldah+lda then subtract from sp. */
7571 HOST_WIDE_INT lo
, hi
;
7572 lo
= ((frame_size
& 0xffff) ^ 0x8000) - 0x8000;
7573 hi
= frame_size
- lo
;
7575 emit_move_insn (ptr
, GEN_INT (hi
));
7576 emit_insn (gen_adddi3 (ptr
, ptr
, GEN_INT (lo
)));
7577 seq
= emit_insn (gen_subdi3 (stack_pointer_rtx
, stack_pointer_rtx
,
7582 seq
= emit_insn (gen_adddi3 (stack_pointer_rtx
, ptr
,
7583 GEN_INT (-leftover
)));
7586 /* This alternative is special, because the DWARF code cannot
7587 possibly intuit through the loop above. So we invent this
7588 note it looks at instead. */
7589 RTX_FRAME_RELATED_P (seq
) = 1;
7591 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR
,
7592 gen_rtx_SET (VOIDmode
, stack_pointer_rtx
,
7593 gen_rtx_PLUS (Pmode
, stack_pointer_rtx
,
7594 GEN_INT (TARGET_ABI_UNICOSMK
7600 if (!TARGET_ABI_UNICOSMK
)
7602 HOST_WIDE_INT sa_bias
= 0;
7604 /* Cope with very large offsets to the register save area. */
7605 sa_reg
= stack_pointer_rtx
;
7606 if (reg_offset
+ sa_size
> 0x8000)
7608 int low
= ((reg_offset
& 0xffff) ^ 0x8000) - 0x8000;
7611 if (low
+ sa_size
<= 0x8000)
7612 sa_bias
= reg_offset
- low
, reg_offset
= low
;
7614 sa_bias
= reg_offset
, reg_offset
= 0;
7616 sa_reg
= gen_rtx_REG (DImode
, 24);
7617 sa_bias_rtx
= GEN_INT (sa_bias
);
7619 if (add_operand (sa_bias_rtx
, DImode
))
7620 emit_insn (gen_adddi3 (sa_reg
, stack_pointer_rtx
, sa_bias_rtx
));
7623 emit_move_insn (sa_reg
, sa_bias_rtx
);
7624 emit_insn (gen_adddi3 (sa_reg
, stack_pointer_rtx
, sa_reg
));
7628 /* Save regs in stack order. Beginning with VMS PV. */
7629 if (TARGET_ABI_OPEN_VMS
&& alpha_procedure_type
== PT_STACK
)
7630 emit_frame_store (REG_PV
, stack_pointer_rtx
, 0, 0);
7632 /* Save register RA next. */
7633 if (imask
& (1UL << REG_RA
))
7635 emit_frame_store (REG_RA
, sa_reg
, sa_bias
, reg_offset
);
7636 imask
&= ~(1UL << REG_RA
);
7640 /* Now save any other registers required to be saved. */
7641 for (i
= 0; i
< 31; i
++)
7642 if (imask
& (1UL << i
))
7644 emit_frame_store (i
, sa_reg
, sa_bias
, reg_offset
);
7648 for (i
= 0; i
< 31; i
++)
7649 if (fmask
& (1UL << i
))
7651 emit_frame_store (i
+32, sa_reg
, sa_bias
, reg_offset
);
7655 else if (TARGET_ABI_UNICOSMK
&& alpha_procedure_type
== PT_STACK
)
7657 /* The standard frame on the T3E includes space for saving registers.
7658 We just have to use it. We don't have to save the return address and
7659 the old frame pointer here - they are saved in the DSIB. */
7662 for (i
= 9; i
< 15; i
++)
7663 if (imask
& (1UL << i
))
7665 emit_frame_store (i
, hard_frame_pointer_rtx
, 0, reg_offset
);
7668 for (i
= 2; i
< 10; i
++)
7669 if (fmask
& (1UL << i
))
7671 emit_frame_store (i
+32, hard_frame_pointer_rtx
, 0, reg_offset
);
7676 if (TARGET_ABI_OPEN_VMS
)
7678 if (alpha_procedure_type
== PT_REGISTER
)
7679 /* Register frame procedures save the fp.
7680 ?? Ought to have a dwarf2 save for this. */
7681 emit_move_insn (gen_rtx_REG (DImode
, vms_save_fp_regno
),
7682 hard_frame_pointer_rtx
);
7684 if (alpha_procedure_type
!= PT_NULL
&& vms_base_regno
!= REG_PV
)
7685 emit_insn (gen_force_movdi (gen_rtx_REG (DImode
, vms_base_regno
),
7686 gen_rtx_REG (DImode
, REG_PV
)));
7688 if (alpha_procedure_type
!= PT_NULL
7689 && vms_unwind_regno
== HARD_FRAME_POINTER_REGNUM
)
7690 FRP (emit_move_insn (hard_frame_pointer_rtx
, stack_pointer_rtx
));
7692 /* If we have to allocate space for outgoing args, do it now. */
7693 if (current_function_outgoing_args_size
!= 0)
7696 = emit_move_insn (stack_pointer_rtx
,
7698 (hard_frame_pointer_rtx
,
7700 (current_function_outgoing_args_size
))));
7702 /* Only set FRAME_RELATED_P on the stack adjustment we just emitted
7703 if ! frame_pointer_needed. Setting the bit will change the CFA
7704 computation rule to use sp again, which would be wrong if we had
7705 frame_pointer_needed, as this means sp might move unpredictably
7709 frame_pointer_needed
7710 => vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
7712 current_function_outgoing_args_size != 0
7713 => alpha_procedure_type != PT_NULL,
7715 so when we are not setting the bit here, we are guaranteed to
7716 have emitted an FRP frame pointer update just before. */
7717 RTX_FRAME_RELATED_P (seq
) = ! frame_pointer_needed
;
7720 else if (!TARGET_ABI_UNICOSMK
)
7722 /* If we need a frame pointer, set it from the stack pointer. */
7723 if (frame_pointer_needed
)
7725 if (TARGET_CAN_FAULT_IN_PROLOGUE
)
7726 FRP (emit_move_insn (hard_frame_pointer_rtx
, stack_pointer_rtx
));
7728 /* This must always be the last instruction in the
7729 prologue, thus we emit a special move + clobber. */
7730 FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx
,
7731 stack_pointer_rtx
, sa_reg
)));
7735 /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
7736 the prologue, for exception handling reasons, we cannot do this for
7737 any insn that might fault. We could prevent this for mems with a
7738 (clobber:BLK (scratch)), but this doesn't work for fp insns. So we
7739 have to prevent all such scheduling with a blockage.
7741 Linux, on the other hand, never bothered to implement OSF/1's
7742 exception handling, and so doesn't care about such things. Anyone
7743 planning to use dwarf2 frame-unwind info can also omit the blockage. */
7745 if (! TARGET_CAN_FAULT_IN_PROLOGUE
)
7746 emit_insn (gen_blockage ());
7749 /* Count the number of .file directives, so that .loc is up to date. */
7750 int num_source_filenames
= 0;
7752 /* Output the textual info surrounding the prologue. */
7755 alpha_start_function (FILE *file
, const char *fnname
,
7756 tree decl ATTRIBUTE_UNUSED
)
7758 unsigned long imask
= 0;
7759 unsigned long fmask
= 0;
7760 /* Stack space needed for pushing registers clobbered by us. */
7761 HOST_WIDE_INT sa_size
;
7762 /* Complete stack size needed. */
7763 unsigned HOST_WIDE_INT frame_size
;
7764 /* The maximum debuggable frame size (512 Kbytes using Tru64 as). */
7765 unsigned HOST_WIDE_INT max_frame_size
= TARGET_ABI_OSF
&& !TARGET_GAS
7768 /* Offset from base reg to register save area. */
7769 HOST_WIDE_INT reg_offset
;
7770 char *entry_label
= (char *) alloca (strlen (fnname
) + 6);
7773 /* Don't emit an extern directive for functions defined in the same file. */
7774 if (TARGET_ABI_UNICOSMK
)
7777 name_tree
= get_identifier (fnname
);
7778 TREE_ASM_WRITTEN (name_tree
) = 1;
7781 alpha_fnname
= fnname
;
7782 sa_size
= alpha_sa_size ();
7784 frame_size
= get_frame_size ();
7785 if (TARGET_ABI_OPEN_VMS
)
7786 frame_size
= ALPHA_ROUND (sa_size
7787 + (alpha_procedure_type
== PT_STACK
? 8 : 0)
7789 + current_function_pretend_args_size
);
7790 else if (TARGET_ABI_UNICOSMK
)
7791 frame_size
= ALPHA_ROUND (sa_size
7792 + (alpha_procedure_type
== PT_STACK
? 48 : 0))
7793 + ALPHA_ROUND (frame_size
7794 + current_function_outgoing_args_size
);
7796 frame_size
= (ALPHA_ROUND (current_function_outgoing_args_size
)
7798 + ALPHA_ROUND (frame_size
7799 + current_function_pretend_args_size
));
7801 if (TARGET_ABI_OPEN_VMS
)
7804 reg_offset
= ALPHA_ROUND (current_function_outgoing_args_size
);
7806 alpha_sa_mask (&imask
, &fmask
);
7808 /* Ecoff can handle multiple .file directives, so put out file and lineno.
7809 We have to do that before the .ent directive as we cannot switch
7810 files within procedures with native ecoff because line numbers are
7811 linked to procedure descriptors.
7812 Outputting the lineno helps debugging of one line functions as they
7813 would otherwise get no line number at all. Please note that we would
7814 like to put out last_linenum from final.c, but it is not accessible. */
7816 if (write_symbols
== SDB_DEBUG
)
7818 #ifdef ASM_OUTPUT_SOURCE_FILENAME
7819 ASM_OUTPUT_SOURCE_FILENAME (file
,
7820 DECL_SOURCE_FILE (current_function_decl
));
7822 #ifdef SDB_OUTPUT_SOURCE_LINE
7823 if (debug_info_level
!= DINFO_LEVEL_TERSE
)
7824 SDB_OUTPUT_SOURCE_LINE (file
,
7825 DECL_SOURCE_LINE (current_function_decl
));
7829 /* Issue function start and label. */
7830 if (TARGET_ABI_OPEN_VMS
7831 || (!TARGET_ABI_UNICOSMK
&& !flag_inhibit_size_directive
))
7833 fputs ("\t.ent ", file
);
7834 assemble_name (file
, fnname
);
7837 /* If the function needs GP, we'll write the "..ng" label there.
7838 Otherwise, do it here. */
7840 && ! alpha_function_needs_gp
7841 && ! current_function_is_thunk
)
7844 assemble_name (file
, fnname
);
7845 fputs ("..ng:\n", file
);
7849 strcpy (entry_label
, fnname
);
7850 if (TARGET_ABI_OPEN_VMS
)
7851 strcat (entry_label
, "..en");
7853 /* For public functions, the label must be globalized by appending an
7854 additional colon. */
7855 if (TARGET_ABI_UNICOSMK
&& TREE_PUBLIC (decl
))
7856 strcat (entry_label
, ":");
7858 ASM_OUTPUT_LABEL (file
, entry_label
);
7859 inside_function
= TRUE
;
7861 if (TARGET_ABI_OPEN_VMS
)
7862 fprintf (file
, "\t.base $%d\n", vms_base_regno
);
7864 if (!TARGET_ABI_OPEN_VMS
&& !TARGET_ABI_UNICOSMK
&& TARGET_IEEE_CONFORMANT
7865 && !flag_inhibit_size_directive
)
7867 /* Set flags in procedure descriptor to request IEEE-conformant
7868 math-library routines. The value we set it to is PDSC_EXC_IEEE
7869 (/usr/include/pdsc.h). */
7870 fputs ("\t.eflag 48\n", file
);
7873 /* Set up offsets to alpha virtual arg/local debugging pointer. */
7874 alpha_auto_offset
= -frame_size
+ current_function_pretend_args_size
;
7875 alpha_arg_offset
= -frame_size
+ 48;
7877 /* Describe our frame. If the frame size is larger than an integer,
7878 print it as zero to avoid an assembler error. We won't be
7879 properly describing such a frame, but that's the best we can do. */
7880 if (TARGET_ABI_UNICOSMK
)
7882 else if (TARGET_ABI_OPEN_VMS
)
7883 fprintf (file
, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC
",$26,"
7884 HOST_WIDE_INT_PRINT_DEC
"\n",
7886 frame_size
>= (1UL << 31) ? 0 : frame_size
,
7888 else if (!flag_inhibit_size_directive
)
7889 fprintf (file
, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC
",$26,%d\n",
7890 (frame_pointer_needed
7891 ? HARD_FRAME_POINTER_REGNUM
: STACK_POINTER_REGNUM
),
7892 frame_size
>= max_frame_size
? 0 : frame_size
,
7893 current_function_pretend_args_size
);
7895 /* Describe which registers were spilled. */
7896 if (TARGET_ABI_UNICOSMK
)
7898 else if (TARGET_ABI_OPEN_VMS
)
7901 /* ??? Does VMS care if mask contains ra? The old code didn't
7902 set it, so I don't here. */
7903 fprintf (file
, "\t.mask 0x%lx,0\n", imask
& ~(1UL << REG_RA
));
7905 fprintf (file
, "\t.fmask 0x%lx,0\n", fmask
);
7906 if (alpha_procedure_type
== PT_REGISTER
)
7907 fprintf (file
, "\t.fp_save $%d\n", vms_save_fp_regno
);
7909 else if (!flag_inhibit_size_directive
)
7913 fprintf (file
, "\t.mask 0x%lx," HOST_WIDE_INT_PRINT_DEC
"\n", imask
,
7914 frame_size
>= max_frame_size
? 0 : reg_offset
- frame_size
);
7916 for (i
= 0; i
< 32; ++i
)
7917 if (imask
& (1UL << i
))
7922 fprintf (file
, "\t.fmask 0x%lx," HOST_WIDE_INT_PRINT_DEC
"\n", fmask
,
7923 frame_size
>= max_frame_size
? 0 : reg_offset
- frame_size
);
7926 #if TARGET_ABI_OPEN_VMS
7927 /* Ifdef'ed cause link_section are only available then. */
7928 switch_to_section (readonly_data_section
);
7929 fprintf (file
, "\t.align 3\n");
7930 assemble_name (file
, fnname
); fputs ("..na:\n", file
);
7931 fputs ("\t.ascii \"", file
);
7932 assemble_name (file
, fnname
);
7933 fputs ("\\0\"\n", file
);
7934 alpha_need_linkage (fnname
, 1);
7935 switch_to_section (text_section
);
7939 /* Emit the .prologue note at the scheduled end of the prologue. */
7942 alpha_output_function_end_prologue (FILE *file
)
7944 if (TARGET_ABI_UNICOSMK
)
7946 else if (TARGET_ABI_OPEN_VMS
)
7947 fputs ("\t.prologue\n", file
);
7948 else if (TARGET_ABI_WINDOWS_NT
)
7949 fputs ("\t.prologue 0\n", file
);
7950 else if (!flag_inhibit_size_directive
)
7951 fprintf (file
, "\t.prologue %d\n",
7952 alpha_function_needs_gp
|| current_function_is_thunk
);
7955 /* Write function epilogue. */
7957 /* ??? At some point we will want to support full unwind, and so will
7958 need to mark the epilogue as well. At the moment, we just confuse
7961 #define FRP(exp) exp
7964 alpha_expand_epilogue (void)
7966 /* Registers to save. */
7967 unsigned long imask
= 0;
7968 unsigned long fmask
= 0;
7969 /* Stack space needed for pushing registers clobbered by us. */
7970 HOST_WIDE_INT sa_size
;
7971 /* Complete stack size needed. */
7972 HOST_WIDE_INT frame_size
;
7973 /* Offset from base reg to register save area. */
7974 HOST_WIDE_INT reg_offset
;
7975 int fp_is_frame_pointer
, fp_offset
;
7976 rtx sa_reg
, sa_reg_exp
= NULL
;
7977 rtx sp_adj1
, sp_adj2
, mem
;
7981 sa_size
= alpha_sa_size ();
7983 frame_size
= get_frame_size ();
7984 if (TARGET_ABI_OPEN_VMS
)
7985 frame_size
= ALPHA_ROUND (sa_size
7986 + (alpha_procedure_type
== PT_STACK
? 8 : 0)
7988 + current_function_pretend_args_size
);
7989 else if (TARGET_ABI_UNICOSMK
)
7990 frame_size
= ALPHA_ROUND (sa_size
7991 + (alpha_procedure_type
== PT_STACK
? 48 : 0))
7992 + ALPHA_ROUND (frame_size
7993 + current_function_outgoing_args_size
);
7995 frame_size
= (ALPHA_ROUND (current_function_outgoing_args_size
)
7997 + ALPHA_ROUND (frame_size
7998 + current_function_pretend_args_size
));
8000 if (TARGET_ABI_OPEN_VMS
)
8002 if (alpha_procedure_type
== PT_STACK
)
8008 reg_offset
= ALPHA_ROUND (current_function_outgoing_args_size
);
8010 alpha_sa_mask (&imask
, &fmask
);
8013 = ((TARGET_ABI_OPEN_VMS
&& alpha_procedure_type
== PT_STACK
)
8014 || (!TARGET_ABI_OPEN_VMS
&& frame_pointer_needed
));
8016 sa_reg
= stack_pointer_rtx
;
8018 if (current_function_calls_eh_return
)
8019 eh_ofs
= EH_RETURN_STACKADJ_RTX
;
8023 if (!TARGET_ABI_UNICOSMK
&& sa_size
)
8025 /* If we have a frame pointer, restore SP from it. */
8026 if ((TARGET_ABI_OPEN_VMS
8027 && vms_unwind_regno
== HARD_FRAME_POINTER_REGNUM
)
8028 || (!TARGET_ABI_OPEN_VMS
&& frame_pointer_needed
))
8029 FRP (emit_move_insn (stack_pointer_rtx
, hard_frame_pointer_rtx
));
8031 /* Cope with very large offsets to the register save area. */
8032 if (reg_offset
+ sa_size
> 0x8000)
8034 int low
= ((reg_offset
& 0xffff) ^ 0x8000) - 0x8000;
8037 if (low
+ sa_size
<= 0x8000)
8038 bias
= reg_offset
- low
, reg_offset
= low
;
8040 bias
= reg_offset
, reg_offset
= 0;
8042 sa_reg
= gen_rtx_REG (DImode
, 22);
8043 sa_reg_exp
= plus_constant (stack_pointer_rtx
, bias
);
8045 FRP (emit_move_insn (sa_reg
, sa_reg_exp
));
8048 /* Restore registers in order, excepting a true frame pointer. */
8050 mem
= gen_rtx_MEM (DImode
, plus_constant (sa_reg
, reg_offset
));
8052 set_mem_alias_set (mem
, alpha_sr_alias_set
);
8053 FRP (emit_move_insn (gen_rtx_REG (DImode
, REG_RA
), mem
));
8056 imask
&= ~(1UL << REG_RA
);
8058 for (i
= 0; i
< 31; ++i
)
8059 if (imask
& (1UL << i
))
8061 if (i
== HARD_FRAME_POINTER_REGNUM
&& fp_is_frame_pointer
)
8062 fp_offset
= reg_offset
;
8065 mem
= gen_rtx_MEM (DImode
, plus_constant(sa_reg
, reg_offset
));
8066 set_mem_alias_set (mem
, alpha_sr_alias_set
);
8067 FRP (emit_move_insn (gen_rtx_REG (DImode
, i
), mem
));
8072 for (i
= 0; i
< 31; ++i
)
8073 if (fmask
& (1UL << i
))
8075 mem
= gen_rtx_MEM (DFmode
, plus_constant(sa_reg
, reg_offset
));
8076 set_mem_alias_set (mem
, alpha_sr_alias_set
);
8077 FRP (emit_move_insn (gen_rtx_REG (DFmode
, i
+32), mem
));
8081 else if (TARGET_ABI_UNICOSMK
&& alpha_procedure_type
== PT_STACK
)
8083 /* Restore callee-saved general-purpose registers. */
8087 for (i
= 9; i
< 15; i
++)
8088 if (imask
& (1UL << i
))
8090 mem
= gen_rtx_MEM (DImode
, plus_constant(hard_frame_pointer_rtx
,
8092 set_mem_alias_set (mem
, alpha_sr_alias_set
);
8093 FRP (emit_move_insn (gen_rtx_REG (DImode
, i
), mem
));
8097 for (i
= 2; i
< 10; i
++)
8098 if (fmask
& (1UL << i
))
8100 mem
= gen_rtx_MEM (DFmode
, plus_constant(hard_frame_pointer_rtx
,
8102 set_mem_alias_set (mem
, alpha_sr_alias_set
);
8103 FRP (emit_move_insn (gen_rtx_REG (DFmode
, i
+32), mem
));
8107 /* Restore the return address from the DSIB. */
8109 mem
= gen_rtx_MEM (DImode
, plus_constant(hard_frame_pointer_rtx
, -8));
8110 set_mem_alias_set (mem
, alpha_sr_alias_set
);
8111 FRP (emit_move_insn (gen_rtx_REG (DImode
, REG_RA
), mem
));
8114 if (frame_size
|| eh_ofs
)
8116 sp_adj1
= stack_pointer_rtx
;
8120 sp_adj1
= gen_rtx_REG (DImode
, 23);
8121 emit_move_insn (sp_adj1
,
8122 gen_rtx_PLUS (Pmode
, stack_pointer_rtx
, eh_ofs
));
8125 /* If the stack size is large, begin computation into a temporary
8126 register so as not to interfere with a potential fp restore,
8127 which must be consecutive with an SP restore. */
8128 if (frame_size
< 32768
8129 && ! (TARGET_ABI_UNICOSMK
&& current_function_calls_alloca
))
8130 sp_adj2
= GEN_INT (frame_size
);
8131 else if (TARGET_ABI_UNICOSMK
)
8133 sp_adj1
= gen_rtx_REG (DImode
, 23);
8134 FRP (emit_move_insn (sp_adj1
, hard_frame_pointer_rtx
));
8135 sp_adj2
= const0_rtx
;
8137 else if (frame_size
< 0x40007fffL
)
8139 int low
= ((frame_size
& 0xffff) ^ 0x8000) - 0x8000;
8141 sp_adj2
= plus_constant (sp_adj1
, frame_size
- low
);
8142 if (sa_reg_exp
&& rtx_equal_p (sa_reg_exp
, sp_adj2
))
8146 sp_adj1
= gen_rtx_REG (DImode
, 23);
8147 FRP (emit_move_insn (sp_adj1
, sp_adj2
));
8149 sp_adj2
= GEN_INT (low
);
8153 rtx tmp
= gen_rtx_REG (DImode
, 23);
8154 FRP (sp_adj2
= alpha_emit_set_const (tmp
, DImode
, frame_size
,
8158 /* We can't drop new things to memory this late, afaik,
8159 so build it up by pieces. */
8160 FRP (sp_adj2
= alpha_emit_set_long_const (tmp
, frame_size
,
8161 -(frame_size
< 0)));
8162 gcc_assert (sp_adj2
);
8166 /* From now on, things must be in order. So emit blockages. */
8168 /* Restore the frame pointer. */
8169 if (TARGET_ABI_UNICOSMK
)
8171 emit_insn (gen_blockage ());
8172 mem
= gen_rtx_MEM (DImode
,
8173 plus_constant (hard_frame_pointer_rtx
, -16));
8174 set_mem_alias_set (mem
, alpha_sr_alias_set
);
8175 FRP (emit_move_insn (hard_frame_pointer_rtx
, mem
));
8177 else if (fp_is_frame_pointer
)
8179 emit_insn (gen_blockage ());
8180 mem
= gen_rtx_MEM (DImode
, plus_constant (sa_reg
, fp_offset
));
8181 set_mem_alias_set (mem
, alpha_sr_alias_set
);
8182 FRP (emit_move_insn (hard_frame_pointer_rtx
, mem
));
8184 else if (TARGET_ABI_OPEN_VMS
)
8186 emit_insn (gen_blockage ());
8187 FRP (emit_move_insn (hard_frame_pointer_rtx
,
8188 gen_rtx_REG (DImode
, vms_save_fp_regno
)));
8191 /* Restore the stack pointer. */
8192 emit_insn (gen_blockage ());
8193 if (sp_adj2
== const0_rtx
)
8194 FRP (emit_move_insn (stack_pointer_rtx
, sp_adj1
));
8196 FRP (emit_move_insn (stack_pointer_rtx
,
8197 gen_rtx_PLUS (DImode
, sp_adj1
, sp_adj2
)));
8201 if (TARGET_ABI_OPEN_VMS
&& alpha_procedure_type
== PT_REGISTER
)
8203 emit_insn (gen_blockage ());
8204 FRP (emit_move_insn (hard_frame_pointer_rtx
,
8205 gen_rtx_REG (DImode
, vms_save_fp_regno
)));
8207 else if (TARGET_ABI_UNICOSMK
&& alpha_procedure_type
!= PT_STACK
)
8209 /* Decrement the frame pointer if the function does not have a
8212 emit_insn (gen_blockage ());
8213 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx
,
8214 hard_frame_pointer_rtx
, constm1_rtx
)));
8219 /* Output the rest of the textual info surrounding the epilogue. */
8222 alpha_end_function (FILE *file
, const char *fnname
, tree decl ATTRIBUTE_UNUSED
)
8224 #if TARGET_ABI_OPEN_VMS
8225 alpha_write_linkage (file
, fnname
, decl
);
8228 /* End the function. */
8229 if (!TARGET_ABI_UNICOSMK
&& !flag_inhibit_size_directive
)
8231 fputs ("\t.end ", file
);
8232 assemble_name (file
, fnname
);
8235 inside_function
= FALSE
;
8237 /* Output jump tables and the static subroutine information block. */
8238 if (TARGET_ABI_UNICOSMK
)
8240 unicosmk_output_ssib (file
, fnname
);
8241 unicosmk_output_deferred_case_vectors (file
);
8246 /* Emit a tail call to FUNCTION after adjusting THIS by DELTA.
8248 In order to avoid the hordes of differences between generated code
8249 with and without TARGET_EXPLICIT_RELOCS, and to avoid duplicating
8250 lots of code loading up large constants, generate rtl and emit it
8251 instead of going straight to text.
8253 Not sure why this idea hasn't been explored before... */
8256 alpha_output_mi_thunk_osf (FILE *file
, tree thunk_fndecl ATTRIBUTE_UNUSED
,
8257 HOST_WIDE_INT delta
, HOST_WIDE_INT vcall_offset
,
8260 HOST_WIDE_INT hi
, lo
;
8261 rtx
this, insn
, funexp
;
8263 reset_block_changes ();
8265 /* We always require a valid GP. */
8266 emit_insn (gen_prologue_ldgp ());
8267 emit_note (NOTE_INSN_PROLOGUE_END
);
8269 /* Find the "this" pointer. If the function returns a structure,
8270 the structure return pointer is in $16. */
8271 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function
)), function
))
8272 this = gen_rtx_REG (Pmode
, 17);
8274 this = gen_rtx_REG (Pmode
, 16);
8276 /* Add DELTA. When possible we use ldah+lda. Otherwise load the
8277 entire constant for the add. */
8278 lo
= ((delta
& 0xffff) ^ 0x8000) - 0x8000;
8279 hi
= (((delta
- lo
) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8280 if (hi
+ lo
== delta
)
8283 emit_insn (gen_adddi3 (this, this, GEN_INT (hi
)));
8285 emit_insn (gen_adddi3 (this, this, GEN_INT (lo
)));
8289 rtx tmp
= alpha_emit_set_long_const (gen_rtx_REG (Pmode
, 0),
8290 delta
, -(delta
< 0));
8291 emit_insn (gen_adddi3 (this, this, tmp
));
8294 /* Add a delta stored in the vtable at VCALL_OFFSET. */
8299 tmp
= gen_rtx_REG (Pmode
, 0);
8300 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, this));
8302 lo
= ((vcall_offset
& 0xffff) ^ 0x8000) - 0x8000;
8303 hi
= (((vcall_offset
- lo
) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8304 if (hi
+ lo
== vcall_offset
)
8307 emit_insn (gen_adddi3 (tmp
, tmp
, GEN_INT (hi
)));
8311 tmp2
= alpha_emit_set_long_const (gen_rtx_REG (Pmode
, 1),
8312 vcall_offset
, -(vcall_offset
< 0));
8313 emit_insn (gen_adddi3 (tmp
, tmp
, tmp2
));
8317 tmp2
= gen_rtx_PLUS (Pmode
, tmp
, GEN_INT (lo
));
8320 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, tmp2
));
8322 emit_insn (gen_adddi3 (this, this, tmp
));
8325 /* Generate a tail call to the target function. */
8326 if (! TREE_USED (function
))
8328 assemble_external (function
);
8329 TREE_USED (function
) = 1;
8331 funexp
= XEXP (DECL_RTL (function
), 0);
8332 funexp
= gen_rtx_MEM (FUNCTION_MODE
, funexp
);
8333 insn
= emit_call_insn (gen_sibcall (funexp
, const0_rtx
));
8334 SIBLING_CALL_P (insn
) = 1;
8336 /* Run just enough of rest_of_compilation to get the insns emitted.
8337 There's not really enough bulk here to make other passes such as
8338 instruction scheduling worth while. Note that use_thunk calls
8339 assemble_start_function and assemble_end_function. */
8340 insn
= get_insns ();
8341 insn_locators_initialize ();
8342 shorten_branches (insn
);
8343 final_start_function (insn
, file
, 1);
8344 final (insn
, file
, 1);
8345 final_end_function ();
8347 #endif /* TARGET_ABI_OSF */
8349 /* Debugging support. */
8353 /* Count the number of sdb related labels are generated (to find block
8354 start and end boundaries). */
8356 int sdb_label_count
= 0;
8358 /* Name of the file containing the current function. */
8360 static const char *current_function_file
= "";
8362 /* Offsets to alpha virtual arg/local debugging pointers. */
8364 long alpha_arg_offset
;
8365 long alpha_auto_offset
;
8367 /* Emit a new filename to a stream. */
8370 alpha_output_filename (FILE *stream
, const char *name
)
8372 static int first_time
= TRUE
;
8377 ++num_source_filenames
;
8378 current_function_file
= name
;
8379 fprintf (stream
, "\t.file\t%d ", num_source_filenames
);
8380 output_quoted_string (stream
, name
);
8381 fprintf (stream
, "\n");
8382 if (!TARGET_GAS
&& write_symbols
== DBX_DEBUG
)
8383 fprintf (stream
, "\t#@stabs\n");
8386 else if (write_symbols
== DBX_DEBUG
)
8387 /* dbxout.c will emit an appropriate .stabs directive. */
8390 else if (name
!= current_function_file
8391 && strcmp (name
, current_function_file
) != 0)
8393 if (inside_function
&& ! TARGET_GAS
)
8394 fprintf (stream
, "\t#.file\t%d ", num_source_filenames
);
8397 ++num_source_filenames
;
8398 current_function_file
= name
;
8399 fprintf (stream
, "\t.file\t%d ", num_source_filenames
);
8402 output_quoted_string (stream
, name
);
8403 fprintf (stream
, "\n");
8407 /* Structure to show the current status of registers and memory. */
8409 struct shadow_summary
8412 unsigned int i
: 31; /* Mask of int regs */
8413 unsigned int fp
: 31; /* Mask of fp regs */
8414 unsigned int mem
: 1; /* mem == imem | fpmem */
8418 /* Summary the effects of expression X on the machine. Update SUM, a pointer
8419 to the summary structure. SET is nonzero if the insn is setting the
8420 object, otherwise zero. */
8423 summarize_insn (rtx x
, struct shadow_summary
*sum
, int set
)
8425 const char *format_ptr
;
8431 switch (GET_CODE (x
))
8433 /* ??? Note that this case would be incorrect if the Alpha had a
8434 ZERO_EXTRACT in SET_DEST. */
8436 summarize_insn (SET_SRC (x
), sum
, 0);
8437 summarize_insn (SET_DEST (x
), sum
, 1);
8441 summarize_insn (XEXP (x
, 0), sum
, 1);
8445 summarize_insn (XEXP (x
, 0), sum
, 0);
8449 for (i
= ASM_OPERANDS_INPUT_LENGTH (x
) - 1; i
>= 0; i
--)
8450 summarize_insn (ASM_OPERANDS_INPUT (x
, i
), sum
, 0);
8454 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
8455 summarize_insn (XVECEXP (x
, 0, i
), sum
, 0);
8459 summarize_insn (SUBREG_REG (x
), sum
, 0);
8464 int regno
= REGNO (x
);
8465 unsigned long mask
= ((unsigned long) 1) << (regno
% 32);
8467 if (regno
== 31 || regno
== 63)
8473 sum
->defd
.i
|= mask
;
8475 sum
->defd
.fp
|= mask
;
8480 sum
->used
.i
|= mask
;
8482 sum
->used
.fp
|= mask
;
8493 /* Find the regs used in memory address computation: */
8494 summarize_insn (XEXP (x
, 0), sum
, 0);
8497 case CONST_INT
: case CONST_DOUBLE
:
8498 case SYMBOL_REF
: case LABEL_REF
: case CONST
:
8499 case SCRATCH
: case ASM_INPUT
:
8502 /* Handle common unary and binary ops for efficiency. */
8503 case COMPARE
: case PLUS
: case MINUS
: case MULT
: case DIV
:
8504 case MOD
: case UDIV
: case UMOD
: case AND
: case IOR
:
8505 case XOR
: case ASHIFT
: case ROTATE
: case ASHIFTRT
: case LSHIFTRT
:
8506 case ROTATERT
: case SMIN
: case SMAX
: case UMIN
: case UMAX
:
8507 case NE
: case EQ
: case GE
: case GT
: case LE
:
8508 case LT
: case GEU
: case GTU
: case LEU
: case LTU
:
8509 summarize_insn (XEXP (x
, 0), sum
, 0);
8510 summarize_insn (XEXP (x
, 1), sum
, 0);
8513 case NEG
: case NOT
: case SIGN_EXTEND
: case ZERO_EXTEND
:
8514 case TRUNCATE
: case FLOAT_EXTEND
: case FLOAT_TRUNCATE
: case FLOAT
:
8515 case FIX
: case UNSIGNED_FLOAT
: case UNSIGNED_FIX
: case ABS
:
8516 case SQRT
: case FFS
:
8517 summarize_insn (XEXP (x
, 0), sum
, 0);
8521 format_ptr
= GET_RTX_FORMAT (GET_CODE (x
));
8522 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
8523 switch (format_ptr
[i
])
8526 summarize_insn (XEXP (x
, i
), sum
, 0);
8530 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
8531 summarize_insn (XVECEXP (x
, i
, j
), sum
, 0);
8543 /* Ensure a sufficient number of `trapb' insns are in the code when
8544 the user requests code with a trap precision of functions or
8547 In naive mode, when the user requests a trap-precision of
8548 "instruction", a trapb is needed after every instruction that may
8549 generate a trap. This ensures that the code is resumption safe but
8552 When optimizations are turned on, we delay issuing a trapb as long
8553 as possible. In this context, a trap shadow is the sequence of
8554 instructions that starts with a (potentially) trap generating
8555 instruction and extends to the next trapb or call_pal instruction
8556 (but GCC never generates call_pal by itself). We can delay (and
8557 therefore sometimes omit) a trapb subject to the following
8560 (a) On entry to the trap shadow, if any Alpha register or memory
8561 location contains a value that is used as an operand value by some
8562 instruction in the trap shadow (live on entry), then no instruction
8563 in the trap shadow may modify the register or memory location.
8565 (b) Within the trap shadow, the computation of the base register
8566 for a memory load or store instruction may not involve using the
8567 result of an instruction that might generate an UNPREDICTABLE
8570 (c) Within the trap shadow, no register may be used more than once
8571 as a destination register. (This is to make life easier for the
8574 (d) The trap shadow may not include any branch instructions. */
8577 alpha_handle_trap_shadows (void)
8579 struct shadow_summary shadow
;
8580 int trap_pending
, exception_nesting
;
8584 exception_nesting
= 0;
8587 shadow
.used
.mem
= 0;
8588 shadow
.defd
= shadow
.used
;
8590 for (i
= get_insns (); i
; i
= NEXT_INSN (i
))
8592 if (GET_CODE (i
) == NOTE
)
8594 switch (NOTE_LINE_NUMBER (i
))
8596 case NOTE_INSN_EH_REGION_BEG
:
8597 exception_nesting
++;
8602 case NOTE_INSN_EH_REGION_END
:
8603 exception_nesting
--;
8608 case NOTE_INSN_EPILOGUE_BEG
:
8609 if (trap_pending
&& alpha_tp
>= ALPHA_TP_FUNC
)
8614 else if (trap_pending
)
8616 if (alpha_tp
== ALPHA_TP_FUNC
)
8618 if (GET_CODE (i
) == JUMP_INSN
8619 && GET_CODE (PATTERN (i
)) == RETURN
)
8622 else if (alpha_tp
== ALPHA_TP_INSN
)
8626 struct shadow_summary sum
;
8631 sum
.defd
= sum
.used
;
8633 switch (GET_CODE (i
))
8636 /* Annoyingly, get_attr_trap will die on these. */
8637 if (GET_CODE (PATTERN (i
)) == USE
8638 || GET_CODE (PATTERN (i
)) == CLOBBER
)
8641 summarize_insn (PATTERN (i
), &sum
, 0);
8643 if ((sum
.defd
.i
& shadow
.defd
.i
)
8644 || (sum
.defd
.fp
& shadow
.defd
.fp
))
8646 /* (c) would be violated */
8650 /* Combine shadow with summary of current insn: */
8651 shadow
.used
.i
|= sum
.used
.i
;
8652 shadow
.used
.fp
|= sum
.used
.fp
;
8653 shadow
.used
.mem
|= sum
.used
.mem
;
8654 shadow
.defd
.i
|= sum
.defd
.i
;
8655 shadow
.defd
.fp
|= sum
.defd
.fp
;
8656 shadow
.defd
.mem
|= sum
.defd
.mem
;
8658 if ((sum
.defd
.i
& shadow
.used
.i
)
8659 || (sum
.defd
.fp
& shadow
.used
.fp
)
8660 || (sum
.defd
.mem
& shadow
.used
.mem
))
8662 /* (a) would be violated (also takes care of (b)) */
8663 gcc_assert (get_attr_trap (i
) != TRAP_YES
8664 || (!(sum
.defd
.i
& sum
.used
.i
)
8665 && !(sum
.defd
.fp
& sum
.used
.fp
)));
8683 n
= emit_insn_before (gen_trapb (), i
);
8684 PUT_MODE (n
, TImode
);
8685 PUT_MODE (i
, TImode
);
8689 shadow
.used
.mem
= 0;
8690 shadow
.defd
= shadow
.used
;
8695 if ((exception_nesting
> 0 || alpha_tp
>= ALPHA_TP_FUNC
)
8696 && GET_CODE (i
) == INSN
8697 && GET_CODE (PATTERN (i
)) != USE
8698 && GET_CODE (PATTERN (i
)) != CLOBBER
8699 && get_attr_trap (i
) == TRAP_YES
)
8701 if (optimize
&& !trap_pending
)
8702 summarize_insn (PATTERN (i
), &shadow
, 0);
8708 /* Alpha can only issue instruction groups simultaneously if they are
8709 suitably aligned. This is very processor-specific. */
8710 /* There are a number of entries in alphaev4_insn_pipe and alphaev5_insn_pipe
8711 that are marked "fake". These instructions do not exist on that target,
8712 but it is possible to see these insns with deranged combinations of
8713 command-line options, such as "-mtune=ev4 -mmax". Instead of aborting,
8714 choose a result at random. */
8716 enum alphaev4_pipe
{
8723 enum alphaev5_pipe
{
8734 static enum alphaev4_pipe
8735 alphaev4_insn_pipe (rtx insn
)
8737 if (recog_memoized (insn
) < 0)
8739 if (get_attr_length (insn
) != 4)
8742 switch (get_attr_type (insn
))
8758 case TYPE_MVI
: /* fake */
8773 case TYPE_FSQRT
: /* fake */
8774 case TYPE_FTOI
: /* fake */
8775 case TYPE_ITOF
: /* fake */
8783 static enum alphaev5_pipe
8784 alphaev5_insn_pipe (rtx insn
)
8786 if (recog_memoized (insn
) < 0)
8788 if (get_attr_length (insn
) != 4)
8791 switch (get_attr_type (insn
))
8811 case TYPE_FTOI
: /* fake */
8812 case TYPE_ITOF
: /* fake */
8827 case TYPE_FSQRT
: /* fake */
8838 /* IN_USE is a mask of the slots currently filled within the insn group.
8839 The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
8840 the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
8842 LEN is, of course, the length of the group in bytes. */
8845 alphaev4_next_group (rtx insn
, int *pin_use
, int *plen
)
8852 || GET_CODE (PATTERN (insn
)) == CLOBBER
8853 || GET_CODE (PATTERN (insn
)) == USE
)
8858 enum alphaev4_pipe pipe
;
8860 pipe
= alphaev4_insn_pipe (insn
);
8864 /* Force complex instructions to start new groups. */
8868 /* If this is a completely unrecognized insn, it's an asm.
8869 We don't know how long it is, so record length as -1 to
8870 signal a needed realignment. */
8871 if (recog_memoized (insn
) < 0)
8874 len
= get_attr_length (insn
);
8878 if (in_use
& EV4_IB0
)
8880 if (in_use
& EV4_IB1
)
8885 in_use
|= EV4_IB0
| EV4_IBX
;
8889 if (in_use
& EV4_IB0
)
8891 if (!(in_use
& EV4_IBX
) || (in_use
& EV4_IB1
))
8899 if (in_use
& EV4_IB1
)
8909 /* Haifa doesn't do well scheduling branches. */
8910 if (GET_CODE (insn
) == JUMP_INSN
)
8914 insn
= next_nonnote_insn (insn
);
8916 if (!insn
|| ! INSN_P (insn
))
8919 /* Let Haifa tell us where it thinks insn group boundaries are. */
8920 if (GET_MODE (insn
) == TImode
)
8923 if (GET_CODE (insn
) == CLOBBER
|| GET_CODE (insn
) == USE
)
8928 insn
= next_nonnote_insn (insn
);
8936 /* IN_USE is a mask of the slots currently filled within the insn group.
8937 The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
8938 the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
8940 LEN is, of course, the length of the group in bytes. */
8943 alphaev5_next_group (rtx insn
, int *pin_use
, int *plen
)
8950 || GET_CODE (PATTERN (insn
)) == CLOBBER
8951 || GET_CODE (PATTERN (insn
)) == USE
)
8956 enum alphaev5_pipe pipe
;
8958 pipe
= alphaev5_insn_pipe (insn
);
8962 /* Force complex instructions to start new groups. */
8966 /* If this is a completely unrecognized insn, it's an asm.
8967 We don't know how long it is, so record length as -1 to
8968 signal a needed realignment. */
8969 if (recog_memoized (insn
) < 0)
8972 len
= get_attr_length (insn
);
8975 /* ??? Most of the places below, we would like to assert never
8976 happen, as it would indicate an error either in Haifa, or
8977 in the scheduling description. Unfortunately, Haifa never
8978 schedules the last instruction of the BB, so we don't have
8979 an accurate TI bit to go off. */
8981 if (in_use
& EV5_E0
)
8983 if (in_use
& EV5_E1
)
8988 in_use
|= EV5_E0
| EV5_E01
;
8992 if (in_use
& EV5_E0
)
8994 if (!(in_use
& EV5_E01
) || (in_use
& EV5_E1
))
9002 if (in_use
& EV5_E1
)
9008 if (in_use
& EV5_FA
)
9010 if (in_use
& EV5_FM
)
9015 in_use
|= EV5_FA
| EV5_FAM
;
9019 if (in_use
& EV5_FA
)
9025 if (in_use
& EV5_FM
)
9038 /* Haifa doesn't do well scheduling branches. */
9039 /* ??? If this is predicted not-taken, slotting continues, except
9040 that no more IBR, FBR, or JSR insns may be slotted. */
9041 if (GET_CODE (insn
) == JUMP_INSN
)
9045 insn
= next_nonnote_insn (insn
);
9047 if (!insn
|| ! INSN_P (insn
))
9050 /* Let Haifa tell us where it thinks insn group boundaries are. */
9051 if (GET_MODE (insn
) == TImode
)
9054 if (GET_CODE (insn
) == CLOBBER
|| GET_CODE (insn
) == USE
)
9059 insn
= next_nonnote_insn (insn
);
9068 alphaev4_next_nop (int *pin_use
)
9070 int in_use
= *pin_use
;
9073 if (!(in_use
& EV4_IB0
))
9078 else if ((in_use
& (EV4_IBX
|EV4_IB1
)) == EV4_IBX
)
9083 else if (TARGET_FP
&& !(in_use
& EV4_IB1
))
9096 alphaev5_next_nop (int *pin_use
)
9098 int in_use
= *pin_use
;
9101 if (!(in_use
& EV5_E1
))
9106 else if (TARGET_FP
&& !(in_use
& EV5_FA
))
9111 else if (TARGET_FP
&& !(in_use
& EV5_FM
))
9123 /* The instruction group alignment main loop. */
9126 alpha_align_insns (unsigned int max_align
,
9127 rtx (*next_group
) (rtx
, int *, int *),
9128 rtx (*next_nop
) (int *))
9130 /* ALIGN is the known alignment for the insn group. */
9132 /* OFS is the offset of the current insn in the insn group. */
9134 int prev_in_use
, in_use
, len
, ldgp
;
9137 /* Let shorten branches care for assigning alignments to code labels. */
9138 shorten_branches (get_insns ());
9140 if (align_functions
< 4)
9142 else if ((unsigned int) align_functions
< max_align
)
9143 align
= align_functions
;
9147 ofs
= prev_in_use
= 0;
9149 if (GET_CODE (i
) == NOTE
)
9150 i
= next_nonnote_insn (i
);
9152 ldgp
= alpha_function_needs_gp
? 8 : 0;
9156 next
= (*next_group
) (i
, &in_use
, &len
);
9158 /* When we see a label, resync alignment etc. */
9159 if (GET_CODE (i
) == CODE_LABEL
)
9161 unsigned int new_align
= 1 << label_to_alignment (i
);
9163 if (new_align
>= align
)
9165 align
= new_align
< max_align
? new_align
: max_align
;
9169 else if (ofs
& (new_align
-1))
9170 ofs
= (ofs
| (new_align
-1)) + 1;
9174 /* Handle complex instructions special. */
9175 else if (in_use
== 0)
9177 /* Asms will have length < 0. This is a signal that we have
9178 lost alignment knowledge. Assume, however, that the asm
9179 will not mis-align instructions. */
9188 /* If the known alignment is smaller than the recognized insn group,
9189 realign the output. */
9190 else if ((int) align
< len
)
9192 unsigned int new_log_align
= len
> 8 ? 4 : 3;
9195 where
= prev
= prev_nonnote_insn (i
);
9196 if (!where
|| GET_CODE (where
) != CODE_LABEL
)
9199 /* Can't realign between a call and its gp reload. */
9200 if (! (TARGET_EXPLICIT_RELOCS
9201 && prev
&& GET_CODE (prev
) == CALL_INSN
))
9203 emit_insn_before (gen_realign (GEN_INT (new_log_align
)), where
);
9204 align
= 1 << new_log_align
;
9209 /* We may not insert padding inside the initial ldgp sequence. */
9213 /* If the group won't fit in the same INT16 as the previous,
9214 we need to add padding to keep the group together. Rather
9215 than simply leaving the insn filling to the assembler, we
9216 can make use of the knowledge of what sorts of instructions
9217 were issued in the previous group to make sure that all of
9218 the added nops are really free. */
9219 else if (ofs
+ len
> (int) align
)
9221 int nop_count
= (align
- ofs
) / 4;
9224 /* Insert nops before labels, branches, and calls to truly merge
9225 the execution of the nops with the previous instruction group. */
9226 where
= prev_nonnote_insn (i
);
9229 if (GET_CODE (where
) == CODE_LABEL
)
9231 rtx where2
= prev_nonnote_insn (where
);
9232 if (where2
&& GET_CODE (where2
) == JUMP_INSN
)
9235 else if (GET_CODE (where
) == INSN
)
9242 emit_insn_before ((*next_nop
)(&prev_in_use
), where
);
9243 while (--nop_count
);
9247 ofs
= (ofs
+ len
) & (align
- 1);
9248 prev_in_use
= in_use
;
9253 /* Machine dependent reorg pass. */
9258 if (alpha_tp
!= ALPHA_TP_PROG
|| flag_exceptions
)
9259 alpha_handle_trap_shadows ();
9261 /* Due to the number of extra trapb insns, don't bother fixing up
9262 alignment when trap precision is instruction. Moreover, we can
9263 only do our job when sched2 is run. */
9264 if (optimize
&& !optimize_size
9265 && alpha_tp
!= ALPHA_TP_INSN
9266 && flag_schedule_insns_after_reload
)
9268 if (alpha_tune
== PROCESSOR_EV4
)
9269 alpha_align_insns (8, alphaev4_next_group
, alphaev4_next_nop
);
9270 else if (alpha_tune
== PROCESSOR_EV5
)
9271 alpha_align_insns (16, alphaev5_next_group
, alphaev5_next_nop
);
9275 #if !TARGET_ABI_UNICOSMK
9282 alpha_file_start (void)
9284 #ifdef OBJECT_FORMAT_ELF
9285 /* If emitting dwarf2 debug information, we cannot generate a .file
9286 directive to start the file, as it will conflict with dwarf2out
9287 file numbers. So it's only useful when emitting mdebug output. */
9288 targetm
.file_start_file_directive
= (write_symbols
== DBX_DEBUG
);
9291 default_file_start ();
9293 fprintf (asm_out_file
, "\t.verstamp %d %d\n", MS_STAMP
, LS_STAMP
);
9296 fputs ("\t.set noreorder\n", asm_out_file
);
9297 fputs ("\t.set volatile\n", asm_out_file
);
9298 if (!TARGET_ABI_OPEN_VMS
)
9299 fputs ("\t.set noat\n", asm_out_file
);
9300 if (TARGET_EXPLICIT_RELOCS
)
9301 fputs ("\t.set nomacro\n", asm_out_file
);
9302 if (TARGET_SUPPORT_ARCH
| TARGET_BWX
| TARGET_MAX
| TARGET_FIX
| TARGET_CIX
)
9306 if (alpha_cpu
== PROCESSOR_EV6
|| TARGET_FIX
|| TARGET_CIX
)
9308 else if (TARGET_MAX
)
9310 else if (TARGET_BWX
)
9312 else if (alpha_cpu
== PROCESSOR_EV5
)
9317 fprintf (asm_out_file
, "\t.arch %s\n", arch
);
9322 #ifdef OBJECT_FORMAT_ELF
9323 /* Since we don't have a .dynbss section, we should not allow global
9324 relocations in the .rodata section. */
9327 alpha_elf_reloc_rw_mask (void)
9329 return flag_pic
? 3 : 2;
9332 /* Return a section for X. The only special thing we do here is to
9333 honor small data. */
9336 alpha_elf_select_rtx_section (enum machine_mode mode
, rtx x
,
9337 unsigned HOST_WIDE_INT align
)
9339 if (TARGET_SMALL_DATA
&& GET_MODE_SIZE (mode
) <= g_switch_value
)
9340 /* ??? Consider using mergeable sdata sections. */
9341 return sdata_section
;
9343 return default_elf_select_rtx_section (mode
, x
, align
);
9346 #endif /* OBJECT_FORMAT_ELF */
9348 /* Structure to collect function names for final output in link section. */
9349 /* Note that items marked with GTY can't be ifdef'ed out. */
9351 enum links_kind
{KIND_UNUSED
, KIND_LOCAL
, KIND_EXTERN
};
9352 enum reloc_kind
{KIND_LINKAGE
, KIND_CODEADDR
};
9354 struct alpha_links
GTY(())
9358 enum links_kind lkind
;
9359 enum reloc_kind rkind
;
9362 struct alpha_funcs
GTY(())
9365 splay_tree
GTY ((param1_is (char *), param2_is (struct alpha_links
*)))
9369 static GTY ((param1_is (char *), param2_is (struct alpha_links
*)))
9370 splay_tree alpha_links_tree
;
9371 static GTY ((param1_is (tree
), param2_is (struct alpha_funcs
*)))
9372 splay_tree alpha_funcs_tree
;
9374 static GTY(()) int alpha_funcs_num
;
9376 #if TARGET_ABI_OPEN_VMS
9378 /* Return the VMS argument type corresponding to MODE. */
9381 alpha_arg_type (enum machine_mode mode
)
9386 return TARGET_FLOAT_VAX
? FF
: FS
;
9388 return TARGET_FLOAT_VAX
? FD
: FT
;
9394 /* Return an rtx for an integer representing the VMS Argument Information
9398 alpha_arg_info_reg_val (CUMULATIVE_ARGS cum
)
9400 unsigned HOST_WIDE_INT regval
= cum
.num_args
;
9403 for (i
= 0; i
< 6; i
++)
9404 regval
|= ((int) cum
.atypes
[i
]) << (i
* 3 + 8);
9406 return GEN_INT (regval
);
9409 /* Make (or fake) .linkage entry for function call.
9411 IS_LOCAL is 0 if name is used in call, 1 if name is used in definition.
9413 Return an SYMBOL_REF rtx for the linkage. */
9416 alpha_need_linkage (const char *name
, int is_local
)
9418 splay_tree_node node
;
9419 struct alpha_links
*al
;
9426 struct alpha_funcs
*cfaf
;
9428 if (!alpha_funcs_tree
)
9429 alpha_funcs_tree
= splay_tree_new_ggc ((splay_tree_compare_fn
)
9430 splay_tree_compare_pointers
);
9432 cfaf
= (struct alpha_funcs
*) ggc_alloc (sizeof (struct alpha_funcs
));
9435 cfaf
->num
= ++alpha_funcs_num
;
9437 splay_tree_insert (alpha_funcs_tree
,
9438 (splay_tree_key
) current_function_decl
,
9439 (splay_tree_value
) cfaf
);
9442 if (alpha_links_tree
)
9444 /* Is this name already defined? */
9446 node
= splay_tree_lookup (alpha_links_tree
, (splay_tree_key
) name
);
9449 al
= (struct alpha_links
*) node
->value
;
9452 /* Defined here but external assumed. */
9453 if (al
->lkind
== KIND_EXTERN
)
9454 al
->lkind
= KIND_LOCAL
;
9458 /* Used here but unused assumed. */
9459 if (al
->lkind
== KIND_UNUSED
)
9460 al
->lkind
= KIND_LOCAL
;
9466 alpha_links_tree
= splay_tree_new_ggc ((splay_tree_compare_fn
) strcmp
);
9468 al
= (struct alpha_links
*) ggc_alloc (sizeof (struct alpha_links
));
9469 name
= ggc_strdup (name
);
9471 /* Assume external if no definition. */
9472 al
->lkind
= (is_local
? KIND_UNUSED
: KIND_EXTERN
);
9474 /* Ensure we have an IDENTIFIER so assemble_name can mark it used. */
9475 get_identifier (name
);
9477 /* Construct a SYMBOL_REF for us to call. */
9479 size_t name_len
= strlen (name
);
9480 char *linksym
= alloca (name_len
+ 6);
9482 memcpy (linksym
+ 1, name
, name_len
);
9483 memcpy (linksym
+ 1 + name_len
, "..lk", 5);
9484 al
->linkage
= gen_rtx_SYMBOL_REF (Pmode
,
9485 ggc_alloc_string (linksym
, name_len
+ 5));
9488 splay_tree_insert (alpha_links_tree
, (splay_tree_key
) name
,
9489 (splay_tree_value
) al
);
9495 alpha_use_linkage (rtx linkage
, tree cfundecl
, int lflag
, int rflag
)
9497 splay_tree_node cfunnode
;
9498 struct alpha_funcs
*cfaf
;
9499 struct alpha_links
*al
;
9500 const char *name
= XSTR (linkage
, 0);
9502 cfaf
= (struct alpha_funcs
*) 0;
9503 al
= (struct alpha_links
*) 0;
9505 cfunnode
= splay_tree_lookup (alpha_funcs_tree
, (splay_tree_key
) cfundecl
);
9506 cfaf
= (struct alpha_funcs
*) cfunnode
->value
;
9510 splay_tree_node lnode
;
9512 /* Is this name already defined? */
9514 lnode
= splay_tree_lookup (cfaf
->links
, (splay_tree_key
) name
);
9516 al
= (struct alpha_links
*) lnode
->value
;
9519 cfaf
->links
= splay_tree_new_ggc ((splay_tree_compare_fn
) strcmp
);
9527 splay_tree_node node
= 0;
9528 struct alpha_links
*anl
;
9533 name_len
= strlen (name
);
9535 al
= (struct alpha_links
*) ggc_alloc (sizeof (struct alpha_links
));
9536 al
->num
= cfaf
->num
;
9538 node
= splay_tree_lookup (alpha_links_tree
, (splay_tree_key
) name
);
9541 anl
= (struct alpha_links
*) node
->value
;
9542 al
->lkind
= anl
->lkind
;
9545 sprintf (buf
, "$%d..%s..lk", cfaf
->num
, name
);
9546 buflen
= strlen (buf
);
9547 linksym
= alloca (buflen
+ 1);
9548 memcpy (linksym
, buf
, buflen
+ 1);
9550 al
->linkage
= gen_rtx_SYMBOL_REF
9551 (Pmode
, ggc_alloc_string (linksym
, buflen
+ 1));
9553 splay_tree_insert (cfaf
->links
, (splay_tree_key
) name
,
9554 (splay_tree_value
) al
);
9558 al
->rkind
= KIND_CODEADDR
;
9560 al
->rkind
= KIND_LINKAGE
;
9563 return gen_rtx_MEM (Pmode
, plus_constant (al
->linkage
, 8));
9569 alpha_write_one_linkage (splay_tree_node node
, void *data
)
9571 const char *const name
= (const char *) node
->key
;
9572 struct alpha_links
*link
= (struct alpha_links
*) node
->value
;
9573 FILE *stream
= (FILE *) data
;
9575 fprintf (stream
, "$%d..%s..lk:\n", link
->num
, name
);
9576 if (link
->rkind
== KIND_CODEADDR
)
9578 if (link
->lkind
== KIND_LOCAL
)
9580 /* Local and used */
9581 fprintf (stream
, "\t.quad %s..en\n", name
);
9585 /* External and used, request code address. */
9586 fprintf (stream
, "\t.code_address %s\n", name
);
9591 if (link
->lkind
== KIND_LOCAL
)
9593 /* Local and used, build linkage pair. */
9594 fprintf (stream
, "\t.quad %s..en\n", name
);
9595 fprintf (stream
, "\t.quad %s\n", name
);
9599 /* External and used, request linkage pair. */
9600 fprintf (stream
, "\t.linkage %s\n", name
);
9608 alpha_write_linkage (FILE *stream
, const char *funname
, tree fundecl
)
9610 splay_tree_node node
;
9611 struct alpha_funcs
*func
;
9613 fprintf (stream
, "\t.link\n");
9614 fprintf (stream
, "\t.align 3\n");
9617 node
= splay_tree_lookup (alpha_funcs_tree
, (splay_tree_key
) fundecl
);
9618 func
= (struct alpha_funcs
*) node
->value
;
9620 fputs ("\t.name ", stream
);
9621 assemble_name (stream
, funname
);
9622 fputs ("..na\n", stream
);
9623 ASM_OUTPUT_LABEL (stream
, funname
);
9624 fprintf (stream
, "\t.pdesc ");
9625 assemble_name (stream
, funname
);
9626 fprintf (stream
, "..en,%s\n",
9627 alpha_procedure_type
== PT_STACK
? "stack"
9628 : alpha_procedure_type
== PT_REGISTER
? "reg" : "null");
9632 splay_tree_foreach (func
->links
, alpha_write_one_linkage
, stream
);
9633 /* splay_tree_delete (func->links); */
9637 /* Given a decl, a section name, and whether the decl initializer
9638 has relocs, choose attributes for the section. */
9640 #define SECTION_VMS_OVERLAY SECTION_FORGET
9641 #define SECTION_VMS_GLOBAL SECTION_MACH_DEP
9642 #define SECTION_VMS_INITIALIZE (SECTION_VMS_GLOBAL << 1)
9645 vms_section_type_flags (tree decl
, const char *name
, int reloc
)
9647 unsigned int flags
= default_section_type_flags (decl
, name
, reloc
);
9649 if (decl
&& DECL_ATTRIBUTES (decl
)
9650 && lookup_attribute ("overlaid", DECL_ATTRIBUTES (decl
)))
9651 flags
|= SECTION_VMS_OVERLAY
;
9652 if (decl
&& DECL_ATTRIBUTES (decl
)
9653 && lookup_attribute ("global", DECL_ATTRIBUTES (decl
)))
9654 flags
|= SECTION_VMS_GLOBAL
;
9655 if (decl
&& DECL_ATTRIBUTES (decl
)
9656 && lookup_attribute ("initialize", DECL_ATTRIBUTES (decl
)))
9657 flags
|= SECTION_VMS_INITIALIZE
;
9662 /* Switch to an arbitrary section NAME with attributes as specified
9663 by FLAGS. ALIGN specifies any known alignment requirements for
9664 the section; 0 if the default should be used. */
9667 vms_asm_named_section (const char *name
, unsigned int flags
,
9668 tree decl ATTRIBUTE_UNUSED
)
9670 fputc ('\n', asm_out_file
);
9671 fprintf (asm_out_file
, ".section\t%s", name
);
9673 if (flags
& SECTION_VMS_OVERLAY
)
9674 fprintf (asm_out_file
, ",OVR");
9675 if (flags
& SECTION_VMS_GLOBAL
)
9676 fprintf (asm_out_file
, ",GBL");
9677 if (flags
& SECTION_VMS_INITIALIZE
)
9678 fprintf (asm_out_file
, ",NOMOD");
9679 if (flags
& SECTION_DEBUG
)
9680 fprintf (asm_out_file
, ",NOWRT");
9682 fputc ('\n', asm_out_file
);
9685 /* Record an element in the table of global constructors. SYMBOL is
9686 a SYMBOL_REF of the function to be called; PRIORITY is a number
9687 between 0 and MAX_INIT_PRIORITY.
9689 Differs from default_ctors_section_asm_out_constructor in that the
9690 width of the .ctors entry is always 64 bits, rather than the 32 bits
9691 used by a normal pointer. */
9694 vms_asm_out_constructor (rtx symbol
, int priority ATTRIBUTE_UNUSED
)
9696 switch_to_section (ctors_section
);
9697 assemble_align (BITS_PER_WORD
);
9698 assemble_integer (symbol
, UNITS_PER_WORD
, BITS_PER_WORD
, 1);
9702 vms_asm_out_destructor (rtx symbol
, int priority ATTRIBUTE_UNUSED
)
9704 switch_to_section (dtors_section
);
9705 assemble_align (BITS_PER_WORD
);
9706 assemble_integer (symbol
, UNITS_PER_WORD
, BITS_PER_WORD
, 1);
9711 alpha_need_linkage (const char *name ATTRIBUTE_UNUSED
,
9712 int is_local ATTRIBUTE_UNUSED
)
9718 alpha_use_linkage (rtx linkage ATTRIBUTE_UNUSED
,
9719 tree cfundecl ATTRIBUTE_UNUSED
,
9720 int lflag ATTRIBUTE_UNUSED
,
9721 int rflag ATTRIBUTE_UNUSED
)
9726 #endif /* TARGET_ABI_OPEN_VMS */
9728 #if TARGET_ABI_UNICOSMK
9730 /* This evaluates to true if we do not know how to pass TYPE solely in
9731 registers. This is the case for all arguments that do not fit in two
9735 unicosmk_must_pass_in_stack (enum machine_mode mode
, tree type
)
9740 if (TREE_CODE (TYPE_SIZE (type
)) != INTEGER_CST
)
9742 if (TREE_ADDRESSABLE (type
))
9745 return ALPHA_ARG_SIZE (mode
, type
, 0) > 2;
9748 /* Define the offset between two registers, one to be eliminated, and the
9749 other its replacement, at the start of a routine. */
9752 unicosmk_initial_elimination_offset (int from
, int to
)
9756 fixed_size
= alpha_sa_size();
9757 if (fixed_size
!= 0)
9760 if (from
== FRAME_POINTER_REGNUM
&& to
== HARD_FRAME_POINTER_REGNUM
)
9762 else if (from
== ARG_POINTER_REGNUM
&& to
== HARD_FRAME_POINTER_REGNUM
)
9764 else if (from
== FRAME_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
9765 return (ALPHA_ROUND (current_function_outgoing_args_size
)
9766 + ALPHA_ROUND (get_frame_size()));
9767 else if (from
== ARG_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
9768 return (ALPHA_ROUND (fixed_size
)
9769 + ALPHA_ROUND (get_frame_size()
9770 + current_function_outgoing_args_size
));
9775 /* Output the module name for .ident and .end directives. We have to strip
9776 directories and add make sure that the module name starts with a letter
9780 unicosmk_output_module_name (FILE *file
)
9782 const char *name
= lbasename (main_input_filename
);
9783 unsigned len
= strlen (name
);
9784 char *clean_name
= alloca (len
+ 2);
9785 char *ptr
= clean_name
;
9787 /* CAM only accepts module names that start with a letter or '$'. We
9788 prefix the module name with a '$' if necessary. */
9790 if (!ISALPHA (*name
))
9792 memcpy (ptr
, name
, len
+ 1);
9793 clean_symbol_name (clean_name
);
9794 fputs (clean_name
, file
);
9797 /* Output the definition of a common variable. */
9800 unicosmk_output_common (FILE *file
, const char *name
, int size
, int align
)
9803 printf ("T3E__: common %s\n", name
);
9806 fputs("\t.endp\n\n\t.psect ", file
);
9807 assemble_name(file
, name
);
9808 fprintf(file
, ",%d,common\n", floor_log2 (align
/ BITS_PER_UNIT
));
9809 fprintf(file
, "\t.byte\t0:%d\n", size
);
9811 /* Mark the symbol as defined in this module. */
9812 name_tree
= get_identifier (name
);
9813 TREE_ASM_WRITTEN (name_tree
) = 1;
9816 #define SECTION_PUBLIC SECTION_MACH_DEP
9817 #define SECTION_MAIN (SECTION_PUBLIC << 1)
9818 static int current_section_align
;
9820 /* A get_unnamed_section callback for switching to the text section. */
9823 unicosmk_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED
)
9825 static int count
= 0;
9826 fprintf (asm_out_file
, "\t.endp\n\n\t.psect\tgcc@text___%d,code\n", count
++);
9829 /* A get_unnamed_section callback for switching to the data section. */
9832 unicosmk_output_data_section_asm_op (const void *data ATTRIBUTE_UNUSED
)
9834 static int count
= 1;
9835 fprintf (asm_out_file
, "\t.endp\n\n\t.psect\tgcc@data___%d,data\n", count
++);
9838 /* Implement TARGET_ASM_INIT_SECTIONS.
9840 The Cray assembler is really weird with respect to sections. It has only
9841 named sections and you can't reopen a section once it has been closed.
9842 This means that we have to generate unique names whenever we want to
9843 reenter the text or the data section. */
9846 unicosmk_init_sections (void)
9848 text_section
= get_unnamed_section (SECTION_CODE
,
9849 unicosmk_output_text_section_asm_op
,
9851 data_section
= get_unnamed_section (SECTION_WRITE
,
9852 unicosmk_output_data_section_asm_op
,
9854 readonly_data_section
= data_section
;
9858 unicosmk_section_type_flags (tree decl
, const char *name
,
9859 int reloc ATTRIBUTE_UNUSED
)
9861 unsigned int flags
= default_section_type_flags (decl
, name
, reloc
);
9866 if (TREE_CODE (decl
) == FUNCTION_DECL
)
9868 current_section_align
= floor_log2 (FUNCTION_BOUNDARY
/ BITS_PER_UNIT
);
9869 if (align_functions_log
> current_section_align
)
9870 current_section_align
= align_functions_log
;
9872 if (! strcmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl
)), "main"))
9873 flags
|= SECTION_MAIN
;
9876 current_section_align
= floor_log2 (DECL_ALIGN (decl
) / BITS_PER_UNIT
);
9878 if (TREE_PUBLIC (decl
))
9879 flags
|= SECTION_PUBLIC
;
9884 /* Generate a section name for decl and associate it with the
9888 unicosmk_unique_section (tree decl
, int reloc ATTRIBUTE_UNUSED
)
9895 name
= IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl
));
9896 name
= default_strip_name_encoding (name
);
9897 len
= strlen (name
);
9899 if (TREE_CODE (decl
) == FUNCTION_DECL
)
9903 /* It is essential that we prefix the section name here because
9904 otherwise the section names generated for constructors and
9905 destructors confuse collect2. */
9907 string
= alloca (len
+ 6);
9908 sprintf (string
, "code@%s", name
);
9909 DECL_SECTION_NAME (decl
) = build_string (len
+ 5, string
);
9911 else if (TREE_PUBLIC (decl
))
9912 DECL_SECTION_NAME (decl
) = build_string (len
, name
);
9917 string
= alloca (len
+ 6);
9918 sprintf (string
, "data@%s", name
);
9919 DECL_SECTION_NAME (decl
) = build_string (len
+ 5, string
);
9923 /* Switch to an arbitrary section NAME with attributes as specified
9924 by FLAGS. ALIGN specifies any known alignment requirements for
9925 the section; 0 if the default should be used. */
9928 unicosmk_asm_named_section (const char *name
, unsigned int flags
,
9929 tree decl ATTRIBUTE_UNUSED
)
9933 /* Close the previous section. */
9935 fputs ("\t.endp\n\n", asm_out_file
);
9937 /* Find out what kind of section we are opening. */
9939 if (flags
& SECTION_MAIN
)
9940 fputs ("\t.start\tmain\n", asm_out_file
);
9942 if (flags
& SECTION_CODE
)
9944 else if (flags
& SECTION_PUBLIC
)
9949 if (current_section_align
!= 0)
9950 fprintf (asm_out_file
, "\t.psect\t%s,%d,%s\n", name
,
9951 current_section_align
, kind
);
9953 fprintf (asm_out_file
, "\t.psect\t%s,%s\n", name
, kind
);
9957 unicosmk_insert_attributes (tree decl
, tree
*attr_ptr ATTRIBUTE_UNUSED
)
9960 && (TREE_PUBLIC (decl
) || TREE_CODE (decl
) == FUNCTION_DECL
))
9961 unicosmk_unique_section (decl
, 0);
9964 /* Output an alignment directive. We have to use the macro 'gcc@code@align'
9965 in code sections because .align fill unused space with zeroes. */
9968 unicosmk_output_align (FILE *file
, int align
)
9970 if (inside_function
)
9971 fprintf (file
, "\tgcc@code@align\t%d\n", align
);
9973 fprintf (file
, "\t.align\t%d\n", align
);
9976 /* Add a case vector to the current function's list of deferred case
9977 vectors. Case vectors have to be put into a separate section because CAM
9978 does not allow data definitions in code sections. */
9981 unicosmk_defer_case_vector (rtx lab
, rtx vec
)
9983 struct machine_function
*machine
= cfun
->machine
;
9985 vec
= gen_rtx_EXPR_LIST (VOIDmode
, lab
, vec
);
9986 machine
->addr_list
= gen_rtx_EXPR_LIST (VOIDmode
, vec
,
9987 machine
->addr_list
);
9990 /* Output a case vector. */
9993 unicosmk_output_addr_vec (FILE *file
, rtx vec
)
9995 rtx lab
= XEXP (vec
, 0);
9996 rtx body
= XEXP (vec
, 1);
9997 int vlen
= XVECLEN (body
, 0);
10000 (*targetm
.asm_out
.internal_label
) (file
, "L", CODE_LABEL_NUMBER (lab
));
10002 for (idx
= 0; idx
< vlen
; idx
++)
10004 ASM_OUTPUT_ADDR_VEC_ELT
10005 (file
, CODE_LABEL_NUMBER (XEXP (XVECEXP (body
, 0, idx
), 0)));
10009 /* Output current function's deferred case vectors. */
10012 unicosmk_output_deferred_case_vectors (FILE *file
)
10014 struct machine_function
*machine
= cfun
->machine
;
10017 if (machine
->addr_list
== NULL_RTX
)
10020 switch_to_section (data_section
);
10021 for (t
= machine
->addr_list
; t
; t
= XEXP (t
, 1))
10022 unicosmk_output_addr_vec (file
, XEXP (t
, 0));
10025 /* Generate the name of the SSIB section for the current function. */
10027 #define SSIB_PREFIX "__SSIB_"
10028 #define SSIB_PREFIX_LEN 7
10030 static const char *
10031 unicosmk_ssib_name (void)
10033 /* This is ok since CAM won't be able to deal with names longer than that
10036 static char name
[256];
10039 const char *fnname
;
10042 x
= DECL_RTL (cfun
->decl
);
10043 gcc_assert (GET_CODE (x
) == MEM
);
10045 gcc_assert (GET_CODE (x
) == SYMBOL_REF
);
10046 fnname
= XSTR (x
, 0);
10048 len
= strlen (fnname
);
10049 if (len
+ SSIB_PREFIX_LEN
> 255)
10050 len
= 255 - SSIB_PREFIX_LEN
;
10052 strcpy (name
, SSIB_PREFIX
);
10053 strncpy (name
+ SSIB_PREFIX_LEN
, fnname
, len
);
10054 name
[len
+ SSIB_PREFIX_LEN
] = 0;
10059 /* Set up the dynamic subprogram information block (DSIB) and update the
10060 frame pointer register ($15) for subroutines which have a frame. If the
10061 subroutine doesn't have a frame, simply increment $15. */
10064 unicosmk_gen_dsib (unsigned long *imaskP
)
10066 if (alpha_procedure_type
== PT_STACK
)
10068 const char *ssib_name
;
10071 /* Allocate 64 bytes for the DSIB. */
10073 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx
, stack_pointer_rtx
,
10075 emit_insn (gen_blockage ());
10077 /* Save the return address. */
10079 mem
= gen_rtx_MEM (DImode
, plus_constant (stack_pointer_rtx
, 56));
10080 set_mem_alias_set (mem
, alpha_sr_alias_set
);
10081 FRP (emit_move_insn (mem
, gen_rtx_REG (DImode
, REG_RA
)));
10082 (*imaskP
) &= ~(1UL << REG_RA
);
10084 /* Save the old frame pointer. */
10086 mem
= gen_rtx_MEM (DImode
, plus_constant (stack_pointer_rtx
, 48));
10087 set_mem_alias_set (mem
, alpha_sr_alias_set
);
10088 FRP (emit_move_insn (mem
, hard_frame_pointer_rtx
));
10089 (*imaskP
) &= ~(1UL << HARD_FRAME_POINTER_REGNUM
);
10091 emit_insn (gen_blockage ());
10093 /* Store the SSIB pointer. */
10095 ssib_name
= ggc_strdup (unicosmk_ssib_name ());
10096 mem
= gen_rtx_MEM (DImode
, plus_constant (stack_pointer_rtx
, 32));
10097 set_mem_alias_set (mem
, alpha_sr_alias_set
);
10099 FRP (emit_move_insn (gen_rtx_REG (DImode
, 5),
10100 gen_rtx_SYMBOL_REF (Pmode
, ssib_name
)));
10101 FRP (emit_move_insn (mem
, gen_rtx_REG (DImode
, 5)));
10103 /* Save the CIW index. */
10105 mem
= gen_rtx_MEM (DImode
, plus_constant (stack_pointer_rtx
, 24));
10106 set_mem_alias_set (mem
, alpha_sr_alias_set
);
10107 FRP (emit_move_insn (mem
, gen_rtx_REG (DImode
, 25)));
10109 emit_insn (gen_blockage ());
10111 /* Set the new frame pointer. */
10113 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx
,
10114 stack_pointer_rtx
, GEN_INT (64))));
10119 /* Increment the frame pointer register to indicate that we do not
10122 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx
,
10123 hard_frame_pointer_rtx
, const1_rtx
)));
10127 /* Output the static subroutine information block for the current
10131 unicosmk_output_ssib (FILE *file
, const char *fnname
)
10137 struct machine_function
*machine
= cfun
->machine
;
10140 fprintf (file
, "\t.endp\n\n\t.psect\t%s%s,data\n", user_label_prefix
,
10141 unicosmk_ssib_name ());
10143 /* Some required stuff and the function name length. */
10145 len
= strlen (fnname
);
10146 fprintf (file
, "\t.quad\t^X20008%2.2X28\n", len
);
10149 ??? We don't do that yet. */
10151 fputs ("\t.quad\t0\n", file
);
10153 /* Function address. */
10155 fputs ("\t.quad\t", file
);
10156 assemble_name (file
, fnname
);
10159 fputs ("\t.quad\t0\n", file
);
10160 fputs ("\t.quad\t0\n", file
);
10163 ??? We do it the same way Cray CC does it but this could be
10166 for( i
= 0; i
< len
; i
++ )
10167 fprintf (file
, "\t.byte\t%d\n", (int)(fnname
[i
]));
10168 if( (len
% 8) == 0 )
10169 fputs ("\t.quad\t0\n", file
);
10171 fprintf (file
, "\t.bits\t%d : 0\n", (8 - (len
% 8))*8);
10173 /* All call information words used in the function. */
10175 for (x
= machine
->first_ciw
; x
; x
= XEXP (x
, 1))
10178 #if HOST_BITS_PER_WIDE_INT == 32
10179 fprintf (file
, "\t.quad\t" HOST_WIDE_INT_PRINT_DOUBLE_HEX
"\n",
10180 CONST_DOUBLE_HIGH (ciw
), CONST_DOUBLE_LOW (ciw
));
10182 fprintf (file
, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX
"\n", INTVAL (ciw
));
10187 /* Add a call information word (CIW) to the list of the current function's
10188 CIWs and return its index.
10190 X is a CONST_INT or CONST_DOUBLE representing the CIW. */
10193 unicosmk_add_call_info_word (rtx x
)
10196 struct machine_function
*machine
= cfun
->machine
;
10198 node
= gen_rtx_EXPR_LIST (VOIDmode
, x
, NULL_RTX
);
10199 if (machine
->first_ciw
== NULL_RTX
)
10200 machine
->first_ciw
= node
;
10202 XEXP (machine
->last_ciw
, 1) = node
;
10204 machine
->last_ciw
= node
;
10205 ++machine
->ciw_count
;
10207 return GEN_INT (machine
->ciw_count
10208 + strlen (current_function_name ())/8 + 5);
10211 /* The Cray assembler doesn't accept extern declarations for symbols which
10212 are defined in the same file. We have to keep track of all global
10213 symbols which are referenced and/or defined in a source file and output
10214 extern declarations for those which are referenced but not defined at
10215 the end of file. */
10217 /* List of identifiers for which an extern declaration might have to be
10219 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10221 struct unicosmk_extern_list
10223 struct unicosmk_extern_list
*next
;
10227 static struct unicosmk_extern_list
*unicosmk_extern_head
= 0;
10229 /* Output extern declarations which are required for every asm file. */
10232 unicosmk_output_default_externs (FILE *file
)
10234 static const char *const externs
[] =
10235 { "__T3E_MISMATCH" };
10240 n
= ARRAY_SIZE (externs
);
10242 for (i
= 0; i
< n
; i
++)
10243 fprintf (file
, "\t.extern\t%s\n", externs
[i
]);
10246 /* Output extern declarations for global symbols which are have been
10247 referenced but not defined. */
10250 unicosmk_output_externs (FILE *file
)
10252 struct unicosmk_extern_list
*p
;
10253 const char *real_name
;
10257 len
= strlen (user_label_prefix
);
10258 for (p
= unicosmk_extern_head
; p
!= 0; p
= p
->next
)
10260 /* We have to strip the encoding and possibly remove user_label_prefix
10261 from the identifier in order to handle -fleading-underscore and
10262 explicit asm names correctly (cf. gcc.dg/asm-names-1.c). */
10263 real_name
= default_strip_name_encoding (p
->name
);
10264 if (len
&& p
->name
[0] == '*'
10265 && !memcmp (real_name
, user_label_prefix
, len
))
10268 name_tree
= get_identifier (real_name
);
10269 if (! TREE_ASM_WRITTEN (name_tree
))
10271 TREE_ASM_WRITTEN (name_tree
) = 1;
10272 fputs ("\t.extern\t", file
);
10273 assemble_name (file
, p
->name
);
10279 /* Record an extern. */
10282 unicosmk_add_extern (const char *name
)
10284 struct unicosmk_extern_list
*p
;
10286 p
= (struct unicosmk_extern_list
*)
10287 xmalloc (sizeof (struct unicosmk_extern_list
));
10288 p
->next
= unicosmk_extern_head
;
10290 unicosmk_extern_head
= p
;
10293 /* The Cray assembler generates incorrect code if identifiers which
10294 conflict with register names are used as instruction operands. We have
10295 to replace such identifiers with DEX expressions. */
10297 /* Structure to collect identifiers which have been replaced by DEX
10299 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10301 struct unicosmk_dex
{
10302 struct unicosmk_dex
*next
;
10306 /* List of identifiers which have been replaced by DEX expressions. The DEX
10307 number is determined by the position in the list. */
10309 static struct unicosmk_dex
*unicosmk_dex_list
= NULL
;
10311 /* The number of elements in the DEX list. */
10313 static int unicosmk_dex_count
= 0;
10315 /* Check if NAME must be replaced by a DEX expression. */
10318 unicosmk_special_name (const char *name
)
10320 if (name
[0] == '*')
10323 if (name
[0] == '$')
10326 if (name
[0] != 'r' && name
[0] != 'f' && name
[0] != 'R' && name
[0] != 'F')
10331 case '1': case '2':
10332 return (name
[2] == '\0' || (ISDIGIT (name
[2]) && name
[3] == '\0'));
10335 return (name
[2] == '\0'
10336 || ((name
[2] == '0' || name
[2] == '1') && name
[3] == '\0'));
10339 return (ISDIGIT (name
[1]) && name
[2] == '\0');
10343 /* Return the DEX number if X must be replaced by a DEX expression and 0
10347 unicosmk_need_dex (rtx x
)
10349 struct unicosmk_dex
*dex
;
10353 if (GET_CODE (x
) != SYMBOL_REF
)
10357 if (! unicosmk_special_name (name
))
10360 i
= unicosmk_dex_count
;
10361 for (dex
= unicosmk_dex_list
; dex
; dex
= dex
->next
)
10363 if (! strcmp (name
, dex
->name
))
10368 dex
= (struct unicosmk_dex
*) xmalloc (sizeof (struct unicosmk_dex
));
10370 dex
->next
= unicosmk_dex_list
;
10371 unicosmk_dex_list
= dex
;
10373 ++unicosmk_dex_count
;
10374 return unicosmk_dex_count
;
10377 /* Output the DEX definitions for this file. */
10380 unicosmk_output_dex (FILE *file
)
10382 struct unicosmk_dex
*dex
;
10385 if (unicosmk_dex_list
== NULL
)
10388 fprintf (file
, "\t.dexstart\n");
10390 i
= unicosmk_dex_count
;
10391 for (dex
= unicosmk_dex_list
; dex
; dex
= dex
->next
)
10393 fprintf (file
, "\tDEX (%d) = ", i
);
10394 assemble_name (file
, dex
->name
);
10399 fprintf (file
, "\t.dexend\n");
10402 /* Output text that to appear at the beginning of an assembler file. */
10405 unicosmk_file_start (void)
10409 fputs ("\t.ident\t", asm_out_file
);
10410 unicosmk_output_module_name (asm_out_file
);
10411 fputs ("\n\n", asm_out_file
);
10413 /* The Unicos/Mk assembler uses different register names. Instead of trying
10414 to support them, we simply use micro definitions. */
10416 /* CAM has different register names: rN for the integer register N and fN
10417 for the floating-point register N. Instead of trying to use these in
10418 alpha.md, we define the symbols $N and $fN to refer to the appropriate
10421 for (i
= 0; i
< 32; ++i
)
10422 fprintf (asm_out_file
, "$%d <- r%d\n", i
, i
);
10424 for (i
= 0; i
< 32; ++i
)
10425 fprintf (asm_out_file
, "$f%d <- f%d\n", i
, i
);
10427 putc ('\n', asm_out_file
);
10429 /* The .align directive fill unused space with zeroes which does not work
10430 in code sections. We define the macro 'gcc@code@align' which uses nops
10431 instead. Note that it assumes that code sections always have the
10432 biggest possible alignment since . refers to the current offset from
10433 the beginning of the section. */
10435 fputs ("\t.macro gcc@code@align n\n", asm_out_file
);
10436 fputs ("gcc@n@bytes = 1 << n\n", asm_out_file
);
10437 fputs ("gcc@here = . % gcc@n@bytes\n", asm_out_file
);
10438 fputs ("\t.if ne, gcc@here, 0\n", asm_out_file
);
10439 fputs ("\t.repeat (gcc@n@bytes - gcc@here) / 4\n", asm_out_file
);
10440 fputs ("\tbis r31,r31,r31\n", asm_out_file
);
10441 fputs ("\t.endr\n", asm_out_file
);
10442 fputs ("\t.endif\n", asm_out_file
);
10443 fputs ("\t.endm gcc@code@align\n\n", asm_out_file
);
10445 /* Output extern declarations which should always be visible. */
10446 unicosmk_output_default_externs (asm_out_file
);
10448 /* Open a dummy section. We always need to be inside a section for the
10449 section-switching code to work correctly.
10450 ??? This should be a module id or something like that. I still have to
10451 figure out what the rules for those are. */
10452 fputs ("\n\t.psect\t$SG00000,data\n", asm_out_file
);
10455 /* Output text to appear at the end of an assembler file. This includes all
10456 pending extern declarations and DEX expressions. */
10459 unicosmk_file_end (void)
10461 fputs ("\t.endp\n\n", asm_out_file
);
10463 /* Output all pending externs. */
10465 unicosmk_output_externs (asm_out_file
);
10467 /* Output dex definitions used for functions whose names conflict with
10470 unicosmk_output_dex (asm_out_file
);
10472 fputs ("\t.end\t", asm_out_file
);
10473 unicosmk_output_module_name (asm_out_file
);
10474 putc ('\n', asm_out_file
);
10480 unicosmk_output_deferred_case_vectors (FILE *file ATTRIBUTE_UNUSED
)
10484 unicosmk_gen_dsib (unsigned long *imaskP ATTRIBUTE_UNUSED
)
10488 unicosmk_output_ssib (FILE * file ATTRIBUTE_UNUSED
,
10489 const char * fnname ATTRIBUTE_UNUSED
)
10493 unicosmk_add_call_info_word (rtx x ATTRIBUTE_UNUSED
)
10499 unicosmk_need_dex (rtx x ATTRIBUTE_UNUSED
)
10504 #endif /* TARGET_ABI_UNICOSMK */
10507 alpha_init_libfuncs (void)
10509 if (TARGET_ABI_UNICOSMK
)
10511 /* Prevent gcc from generating calls to __divsi3. */
10512 set_optab_libfunc (sdiv_optab
, SImode
, 0);
10513 set_optab_libfunc (udiv_optab
, SImode
, 0);
10515 /* Use the functions provided by the system library
10516 for DImode integer division. */
10517 set_optab_libfunc (sdiv_optab
, DImode
, "$sldiv");
10518 set_optab_libfunc (udiv_optab
, DImode
, "$uldiv");
10520 else if (TARGET_ABI_OPEN_VMS
)
10522 /* Use the VMS runtime library functions for division and
10524 set_optab_libfunc (sdiv_optab
, SImode
, "OTS$DIV_I");
10525 set_optab_libfunc (sdiv_optab
, DImode
, "OTS$DIV_L");
10526 set_optab_libfunc (udiv_optab
, SImode
, "OTS$DIV_UI");
10527 set_optab_libfunc (udiv_optab
, DImode
, "OTS$DIV_UL");
10528 set_optab_libfunc (smod_optab
, SImode
, "OTS$REM_I");
10529 set_optab_libfunc (smod_optab
, DImode
, "OTS$REM_L");
10530 set_optab_libfunc (umod_optab
, SImode
, "OTS$REM_UI");
10531 set_optab_libfunc (umod_optab
, DImode
, "OTS$REM_UL");
10536 /* Initialize the GCC target structure. */
10537 #if TARGET_ABI_OPEN_VMS
10538 # undef TARGET_ATTRIBUTE_TABLE
10539 # define TARGET_ATTRIBUTE_TABLE vms_attribute_table
10540 # undef TARGET_SECTION_TYPE_FLAGS
10541 # define TARGET_SECTION_TYPE_FLAGS vms_section_type_flags
10544 #undef TARGET_IN_SMALL_DATA_P
10545 #define TARGET_IN_SMALL_DATA_P alpha_in_small_data_p
10547 #if TARGET_ABI_UNICOSMK
10548 # undef TARGET_INSERT_ATTRIBUTES
10549 # define TARGET_INSERT_ATTRIBUTES unicosmk_insert_attributes
10550 # undef TARGET_SECTION_TYPE_FLAGS
10551 # define TARGET_SECTION_TYPE_FLAGS unicosmk_section_type_flags
10552 # undef TARGET_ASM_UNIQUE_SECTION
10553 # define TARGET_ASM_UNIQUE_SECTION unicosmk_unique_section
10554 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
10555 #define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
10556 # undef TARGET_ASM_GLOBALIZE_LABEL
10557 # define TARGET_ASM_GLOBALIZE_LABEL hook_void_FILEptr_constcharptr
10558 # undef TARGET_MUST_PASS_IN_STACK
10559 # define TARGET_MUST_PASS_IN_STACK unicosmk_must_pass_in_stack
10562 #undef TARGET_ASM_ALIGNED_HI_OP
10563 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
10564 #undef TARGET_ASM_ALIGNED_DI_OP
10565 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
10567 /* Default unaligned ops are provided for ELF systems. To get unaligned
10568 data for non-ELF systems, we have to turn off auto alignment. */
10569 #ifndef OBJECT_FORMAT_ELF
10570 #undef TARGET_ASM_UNALIGNED_HI_OP
10571 #define TARGET_ASM_UNALIGNED_HI_OP "\t.align 0\n\t.word\t"
10572 #undef TARGET_ASM_UNALIGNED_SI_OP
10573 #define TARGET_ASM_UNALIGNED_SI_OP "\t.align 0\n\t.long\t"
10574 #undef TARGET_ASM_UNALIGNED_DI_OP
10575 #define TARGET_ASM_UNALIGNED_DI_OP "\t.align 0\n\t.quad\t"
10578 #ifdef OBJECT_FORMAT_ELF
10579 #undef TARGET_ASM_RELOC_RW_MASK
10580 #define TARGET_ASM_RELOC_RW_MASK alpha_elf_reloc_rw_mask
10581 #undef TARGET_ASM_SELECT_RTX_SECTION
10582 #define TARGET_ASM_SELECT_RTX_SECTION alpha_elf_select_rtx_section
10585 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
10586 #define TARGET_ASM_FUNCTION_END_PROLOGUE alpha_output_function_end_prologue
10588 #undef TARGET_INIT_LIBFUNCS
10589 #define TARGET_INIT_LIBFUNCS alpha_init_libfuncs
10591 #if TARGET_ABI_UNICOSMK
10592 #undef TARGET_ASM_FILE_START
10593 #define TARGET_ASM_FILE_START unicosmk_file_start
10594 #undef TARGET_ASM_FILE_END
10595 #define TARGET_ASM_FILE_END unicosmk_file_end
10597 #undef TARGET_ASM_FILE_START
10598 #define TARGET_ASM_FILE_START alpha_file_start
10599 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
10600 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
10603 #undef TARGET_SCHED_ADJUST_COST
10604 #define TARGET_SCHED_ADJUST_COST alpha_adjust_cost
10605 #undef TARGET_SCHED_ISSUE_RATE
10606 #define TARGET_SCHED_ISSUE_RATE alpha_issue_rate
10607 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
10608 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
10609 alpha_multipass_dfa_lookahead
10611 #undef TARGET_HAVE_TLS
10612 #define TARGET_HAVE_TLS HAVE_AS_TLS
10614 #undef TARGET_INIT_BUILTINS
10615 #define TARGET_INIT_BUILTINS alpha_init_builtins
10616 #undef TARGET_EXPAND_BUILTIN
10617 #define TARGET_EXPAND_BUILTIN alpha_expand_builtin
10618 #undef TARGET_FOLD_BUILTIN
10619 #define TARGET_FOLD_BUILTIN alpha_fold_builtin
10621 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
10622 #define TARGET_FUNCTION_OK_FOR_SIBCALL alpha_function_ok_for_sibcall
10623 #undef TARGET_CANNOT_COPY_INSN_P
10624 #define TARGET_CANNOT_COPY_INSN_P alpha_cannot_copy_insn_p
10625 #undef TARGET_CANNOT_FORCE_CONST_MEM
10626 #define TARGET_CANNOT_FORCE_CONST_MEM alpha_cannot_force_const_mem
10629 #undef TARGET_ASM_OUTPUT_MI_THUNK
10630 #define TARGET_ASM_OUTPUT_MI_THUNK alpha_output_mi_thunk_osf
10631 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
10632 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
10633 #undef TARGET_STDARG_OPTIMIZE_HOOK
10634 #define TARGET_STDARG_OPTIMIZE_HOOK alpha_stdarg_optimize_hook
10637 #undef TARGET_RTX_COSTS
10638 #define TARGET_RTX_COSTS alpha_rtx_costs
10639 #undef TARGET_ADDRESS_COST
10640 #define TARGET_ADDRESS_COST hook_int_rtx_0
10642 #undef TARGET_MACHINE_DEPENDENT_REORG
10643 #define TARGET_MACHINE_DEPENDENT_REORG alpha_reorg
10645 #undef TARGET_PROMOTE_FUNCTION_ARGS
10646 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
10647 #undef TARGET_PROMOTE_FUNCTION_RETURN
10648 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
10649 #undef TARGET_PROMOTE_PROTOTYPES
10650 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_false
10651 #undef TARGET_RETURN_IN_MEMORY
10652 #define TARGET_RETURN_IN_MEMORY alpha_return_in_memory
10653 #undef TARGET_PASS_BY_REFERENCE
10654 #define TARGET_PASS_BY_REFERENCE alpha_pass_by_reference
10655 #undef TARGET_SETUP_INCOMING_VARARGS
10656 #define TARGET_SETUP_INCOMING_VARARGS alpha_setup_incoming_varargs
10657 #undef TARGET_STRICT_ARGUMENT_NAMING
10658 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
10659 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
10660 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
10661 #undef TARGET_SPLIT_COMPLEX_ARG
10662 #define TARGET_SPLIT_COMPLEX_ARG alpha_split_complex_arg
10663 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
10664 #define TARGET_GIMPLIFY_VA_ARG_EXPR alpha_gimplify_va_arg
10665 #undef TARGET_ARG_PARTIAL_BYTES
10666 #define TARGET_ARG_PARTIAL_BYTES alpha_arg_partial_bytes
10668 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10669 #define TARGET_SCALAR_MODE_SUPPORTED_P alpha_scalar_mode_supported_p
10670 #undef TARGET_VECTOR_MODE_SUPPORTED_P
10671 #define TARGET_VECTOR_MODE_SUPPORTED_P alpha_vector_mode_supported_p
10673 #undef TARGET_BUILD_BUILTIN_VA_LIST
10674 #define TARGET_BUILD_BUILTIN_VA_LIST alpha_build_builtin_va_list
10676 /* The Alpha architecture does not require sequential consistency. See
10677 http://www.cs.umd.edu/~pugh/java/memoryModel/AlphaReordering.html
10678 for an example of how it can be violated in practice. */
10679 #undef TARGET_RELAXED_ORDERING
10680 #define TARGET_RELAXED_ORDERING true
10682 #undef TARGET_DEFAULT_TARGET_FLAGS
10683 #define TARGET_DEFAULT_TARGET_FLAGS \
10684 (TARGET_DEFAULT | TARGET_CPU_DEFAULT | TARGET_DEFAULT_EXPLICIT_RELOCS)
10685 #undef TARGET_HANDLE_OPTION
10686 #define TARGET_HANDLE_OPTION alpha_handle_option
10688 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
10689 #undef TARGET_MANGLE_FUNDAMENTAL_TYPE
10690 #define TARGET_MANGLE_FUNDAMENTAL_TYPE alpha_mangle_fundamental_type
10693 struct gcc_target targetm
= TARGET_INITIALIZER
;
10696 #include "gt-alpha.h"