2d613ae22920bb30b39433af029bc067f234f4bf
[gcc.git] / gcc / config / alpha / alpha.c
1 /* Subroutines used for code generation on the DEC Alpha.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
4 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5
6 This file is part of GNU CC.
7
8 GNU CC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
11 any later version.
12
13 GNU CC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GNU CC; see the file COPYING. If not, write to
20 the Free Software Foundation, 59 Temple Place - Suite 330,
21 Boston, MA 02111-1307, USA. */
22
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "real.h"
33 #include "insn-config.h"
34 #include "conditions.h"
35 #include "output.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "recog.h"
39 #include "expr.h"
40 #include "optabs.h"
41 #include "reload.h"
42 #include "obstack.h"
43 #include "except.h"
44 #include "function.h"
45 #include "toplev.h"
46 #include "ggc.h"
47 #include "integrate.h"
48 #include "tm_p.h"
49 #include "target.h"
50 #include "target-def.h"
51 #include "debug.h"
52 #include "langhooks.h"
53 #include <splay-tree.h>
54
55 /* Specify which cpu to schedule for. */
56
57 enum processor_type alpha_cpu;
58 static const char * const alpha_cpu_name[] =
59 {
60 "ev4", "ev5", "ev6"
61 };
62
63 /* Specify how accurate floating-point traps need to be. */
64
65 enum alpha_trap_precision alpha_tp;
66
67 /* Specify the floating-point rounding mode. */
68
69 enum alpha_fp_rounding_mode alpha_fprm;
70
71 /* Specify which things cause traps. */
72
73 enum alpha_fp_trap_mode alpha_fptm;
74
75 /* Specify bit size of immediate TLS offsets. */
76
77 int alpha_tls_size = 32;
78
79 /* Strings decoded into the above options. */
80
81 const char *alpha_cpu_string; /* -mcpu= */
82 const char *alpha_tune_string; /* -mtune= */
83 const char *alpha_tp_string; /* -mtrap-precision=[p|s|i] */
84 const char *alpha_fprm_string; /* -mfp-rounding-mode=[n|m|c|d] */
85 const char *alpha_fptm_string; /* -mfp-trap-mode=[n|u|su|sui] */
86 const char *alpha_mlat_string; /* -mmemory-latency= */
87 const char *alpha_tls_size_string; /* -mtls-size=[16|32|64] */
88
89 /* Save information from a "cmpxx" operation until the branch or scc is
90 emitted. */
91
92 struct alpha_compare alpha_compare;
93
94 /* Nonzero if inside of a function, because the Alpha asm can't
95 handle .files inside of functions. */
96
97 static int inside_function = FALSE;
98
99 /* The number of cycles of latency we should assume on memory reads. */
100
101 int alpha_memory_latency = 3;
102
103 /* Whether the function needs the GP. */
104
105 static int alpha_function_needs_gp;
106
107 /* The alias set for prologue/epilogue register save/restore. */
108
109 static GTY(()) int alpha_sr_alias_set;
110
111 /* The assembler name of the current function. */
112
113 static const char *alpha_fnname;
114
115 /* The next explicit relocation sequence number. */
116 extern GTY(()) int alpha_next_sequence_number;
117 int alpha_next_sequence_number = 1;
118
119 /* The literal and gpdisp sequence numbers for this insn, as printed
120 by %# and %* respectively. */
121 extern GTY(()) int alpha_this_literal_sequence_number;
122 extern GTY(()) int alpha_this_gpdisp_sequence_number;
123 int alpha_this_literal_sequence_number;
124 int alpha_this_gpdisp_sequence_number;
125
126 /* Costs of various operations on the different architectures. */
127
128 struct alpha_rtx_cost_data
129 {
130 unsigned char fp_add;
131 unsigned char fp_mult;
132 unsigned char fp_div_sf;
133 unsigned char fp_div_df;
134 unsigned char int_mult_si;
135 unsigned char int_mult_di;
136 unsigned char int_shift;
137 unsigned char int_cmov;
138 };
139
140 static struct alpha_rtx_cost_data const alpha_rtx_cost_data[PROCESSOR_MAX] =
141 {
142 { /* EV4 */
143 COSTS_N_INSNS (6), /* fp_add */
144 COSTS_N_INSNS (6), /* fp_mult */
145 COSTS_N_INSNS (34), /* fp_div_sf */
146 COSTS_N_INSNS (63), /* fp_div_df */
147 COSTS_N_INSNS (23), /* int_mult_si */
148 COSTS_N_INSNS (23), /* int_mult_di */
149 COSTS_N_INSNS (2), /* int_shift */
150 COSTS_N_INSNS (2), /* int_cmov */
151 },
152 { /* EV5 */
153 COSTS_N_INSNS (4), /* fp_add */
154 COSTS_N_INSNS (4), /* fp_mult */
155 COSTS_N_INSNS (15), /* fp_div_sf */
156 COSTS_N_INSNS (22), /* fp_div_df */
157 COSTS_N_INSNS (8), /* int_mult_si */
158 COSTS_N_INSNS (12), /* int_mult_di */
159 COSTS_N_INSNS (1) + 1, /* int_shift */
160 COSTS_N_INSNS (1), /* int_cmov */
161 },
162 { /* EV6 */
163 COSTS_N_INSNS (4), /* fp_add */
164 COSTS_N_INSNS (4), /* fp_mult */
165 COSTS_N_INSNS (12), /* fp_div_sf */
166 COSTS_N_INSNS (15), /* fp_div_df */
167 COSTS_N_INSNS (7), /* int_mult_si */
168 COSTS_N_INSNS (7), /* int_mult_di */
169 COSTS_N_INSNS (1), /* int_shift */
170 COSTS_N_INSNS (2), /* int_cmov */
171 },
172 };
173
174 /* Declarations of static functions. */
175 static bool alpha_function_ok_for_sibcall
176 PARAMS ((tree, tree));
177 static int tls_symbolic_operand_1
178 PARAMS ((rtx, enum machine_mode, int, int));
179 static enum tls_model tls_symbolic_operand_type
180 PARAMS ((rtx));
181 static bool decl_has_samegp
182 PARAMS ((tree));
183 static bool alpha_in_small_data_p
184 PARAMS ((tree));
185 static rtx get_tls_get_addr
186 PARAMS ((void));
187 static int some_small_symbolic_operand_1
188 PARAMS ((rtx *, void *));
189 static int split_small_symbolic_operand_1
190 PARAMS ((rtx *, void *));
191 static bool alpha_cannot_copy_insn_p
192 PARAMS ((rtx));
193 static bool alpha_rtx_costs
194 PARAMS ((rtx, int, int, int *));
195 static void alpha_set_memflags_1
196 PARAMS ((rtx, int, int, int));
197 static rtx alpha_emit_set_const_1
198 PARAMS ((rtx, enum machine_mode, HOST_WIDE_INT, int));
199 static void alpha_expand_unaligned_load_words
200 PARAMS ((rtx *out_regs, rtx smem, HOST_WIDE_INT words, HOST_WIDE_INT ofs));
201 static void alpha_expand_unaligned_store_words
202 PARAMS ((rtx *out_regs, rtx smem, HOST_WIDE_INT words, HOST_WIDE_INT ofs));
203 static void alpha_init_builtins
204 PARAMS ((void));
205 static rtx alpha_expand_builtin
206 PARAMS ((tree, rtx, rtx, enum machine_mode, int));
207 static void alpha_sa_mask
208 PARAMS ((unsigned long *imaskP, unsigned long *fmaskP));
209 static int find_lo_sum_using_gp
210 PARAMS ((rtx *, void *));
211 static int alpha_does_function_need_gp
212 PARAMS ((void));
213 static int alpha_ra_ever_killed
214 PARAMS ((void));
215 static const char *get_trap_mode_suffix
216 PARAMS ((void));
217 static const char *get_round_mode_suffix
218 PARAMS ((void));
219 static const char *get_some_local_dynamic_name
220 PARAMS ((void));
221 static int get_some_local_dynamic_name_1
222 PARAMS ((rtx *, void *));
223 static rtx set_frame_related_p
224 PARAMS ((void));
225 static const char *alpha_lookup_xfloating_lib_func
226 PARAMS ((enum rtx_code));
227 static int alpha_compute_xfloating_mode_arg
228 PARAMS ((enum rtx_code, enum alpha_fp_rounding_mode));
229 static void alpha_emit_xfloating_libcall
230 PARAMS ((const char *, rtx, rtx[], int, rtx));
231 static rtx alpha_emit_xfloating_compare
232 PARAMS ((enum rtx_code, rtx, rtx));
233 static void alpha_output_function_end_prologue
234 PARAMS ((FILE *));
235 static int alpha_adjust_cost
236 PARAMS ((rtx, rtx, rtx, int));
237 static int alpha_issue_rate
238 PARAMS ((void));
239 static int alpha_use_dfa_pipeline_interface
240 PARAMS ((void));
241 static int alpha_multipass_dfa_lookahead
242 PARAMS ((void));
243 static void alpha_reorg
244 PARAMS ((void));
245
246 #ifdef OBJECT_FORMAT_ELF
247 static void alpha_elf_select_rtx_section
248 PARAMS ((enum machine_mode, rtx, unsigned HOST_WIDE_INT));
249 #endif
250
251 #if TARGET_ABI_OPEN_VMS
252 static bool alpha_linkage_symbol_p
253 PARAMS ((const char *symname));
254 static int alpha_write_one_linkage
255 PARAMS ((splay_tree_node, void *));
256 static void alpha_write_linkage
257 PARAMS ((FILE *, const char *, tree));
258 #endif
259
260 #if TARGET_ABI_OSF
261 static void alpha_output_mi_thunk_osf
262 PARAMS ((FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT, tree));
263 #endif
264
265 static struct machine_function * alpha_init_machine_status
266 PARAMS ((void));
267
268 static void unicosmk_output_deferred_case_vectors PARAMS ((FILE *));
269 static void unicosmk_gen_dsib PARAMS ((unsigned long *imaskP));
270 static void unicosmk_output_ssib PARAMS ((FILE *, const char *));
271 static int unicosmk_need_dex PARAMS ((rtx));
272 static void unicosmk_file_end PARAMS ((void));
273
274 /* Get the number of args of a function in one of two ways. */
275 #if TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK
276 #define NUM_ARGS current_function_args_info.num_args
277 #else
278 #define NUM_ARGS current_function_args_info
279 #endif
280
281 #define REG_PV 27
282 #define REG_RA 26
283 \f
284 /* Initialize the GCC target structure. */
285 #if TARGET_ABI_OPEN_VMS
286 const struct attribute_spec vms_attribute_table[];
287 static unsigned int vms_section_type_flags PARAMS ((tree, const char *, int));
288 static void vms_asm_named_section PARAMS ((const char *, unsigned int));
289 static void vms_asm_out_constructor PARAMS ((rtx, int));
290 static void vms_asm_out_destructor PARAMS ((rtx, int));
291 # undef TARGET_ATTRIBUTE_TABLE
292 # define TARGET_ATTRIBUTE_TABLE vms_attribute_table
293 # undef TARGET_SECTION_TYPE_FLAGS
294 # define TARGET_SECTION_TYPE_FLAGS vms_section_type_flags
295 #endif
296
297 #undef TARGET_IN_SMALL_DATA_P
298 #define TARGET_IN_SMALL_DATA_P alpha_in_small_data_p
299
300 #if TARGET_ABI_UNICOSMK
301 static void unicosmk_asm_named_section PARAMS ((const char *, unsigned int));
302 static void unicosmk_insert_attributes PARAMS ((tree, tree *));
303 static unsigned int unicosmk_section_type_flags PARAMS ((tree, const char *,
304 int));
305 static void unicosmk_unique_section PARAMS ((tree, int));
306 # undef TARGET_INSERT_ATTRIBUTES
307 # define TARGET_INSERT_ATTRIBUTES unicosmk_insert_attributes
308 # undef TARGET_SECTION_TYPE_FLAGS
309 # define TARGET_SECTION_TYPE_FLAGS unicosmk_section_type_flags
310 # undef TARGET_ASM_UNIQUE_SECTION
311 # define TARGET_ASM_UNIQUE_SECTION unicosmk_unique_section
312 # undef TARGET_ASM_GLOBALIZE_LABEL
313 # define TARGET_ASM_GLOBALIZE_LABEL hook_void_FILEptr_constcharptr
314 #endif
315
316 #undef TARGET_ASM_ALIGNED_HI_OP
317 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
318 #undef TARGET_ASM_ALIGNED_DI_OP
319 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
320
321 /* Default unaligned ops are provided for ELF systems. To get unaligned
322 data for non-ELF systems, we have to turn off auto alignment. */
323 #ifndef OBJECT_FORMAT_ELF
324 #undef TARGET_ASM_UNALIGNED_HI_OP
325 #define TARGET_ASM_UNALIGNED_HI_OP "\t.align 0\n\t.word\t"
326 #undef TARGET_ASM_UNALIGNED_SI_OP
327 #define TARGET_ASM_UNALIGNED_SI_OP "\t.align 0\n\t.long\t"
328 #undef TARGET_ASM_UNALIGNED_DI_OP
329 #define TARGET_ASM_UNALIGNED_DI_OP "\t.align 0\n\t.quad\t"
330 #endif
331
332 #ifdef OBJECT_FORMAT_ELF
333 #undef TARGET_ASM_SELECT_RTX_SECTION
334 #define TARGET_ASM_SELECT_RTX_SECTION alpha_elf_select_rtx_section
335 #endif
336
337 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
338 #define TARGET_ASM_FUNCTION_END_PROLOGUE alpha_output_function_end_prologue
339
340 #undef TARGET_SCHED_ADJUST_COST
341 #define TARGET_SCHED_ADJUST_COST alpha_adjust_cost
342 #undef TARGET_SCHED_ISSUE_RATE
343 #define TARGET_SCHED_ISSUE_RATE alpha_issue_rate
344 #undef TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE
345 #define TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE \
346 alpha_use_dfa_pipeline_interface
347 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
348 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
349 alpha_multipass_dfa_lookahead
350
351 #undef TARGET_HAVE_TLS
352 #define TARGET_HAVE_TLS HAVE_AS_TLS
353
354 #undef TARGET_INIT_BUILTINS
355 #define TARGET_INIT_BUILTINS alpha_init_builtins
356 #undef TARGET_EXPAND_BUILTIN
357 #define TARGET_EXPAND_BUILTIN alpha_expand_builtin
358
359 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
360 #define TARGET_FUNCTION_OK_FOR_SIBCALL alpha_function_ok_for_sibcall
361 #undef TARGET_CANNOT_COPY_INSN_P
362 #define TARGET_CANNOT_COPY_INSN_P alpha_cannot_copy_insn_p
363
364 #if TARGET_ABI_OSF
365 #undef TARGET_ASM_OUTPUT_MI_THUNK
366 #define TARGET_ASM_OUTPUT_MI_THUNK alpha_output_mi_thunk_osf
367 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
368 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
369 #endif
370
371 #undef TARGET_RTX_COSTS
372 #define TARGET_RTX_COSTS alpha_rtx_costs
373 #undef TARGET_ADDRESS_COST
374 #define TARGET_ADDRESS_COST hook_int_rtx_0
375
376 #undef TARGET_MACHINE_DEPENDENT_REORG
377 #define TARGET_MACHINE_DEPENDENT_REORG alpha_reorg
378
379 struct gcc_target targetm = TARGET_INITIALIZER;
380 \f
381 /* Parse target option strings. */
382
383 void
384 override_options ()
385 {
386 int i;
387 static const struct cpu_table {
388 const char *const name;
389 const enum processor_type processor;
390 const int flags;
391 } cpu_table[] = {
392 #define EV5_MASK (MASK_CPU_EV5)
393 #define EV6_MASK (MASK_CPU_EV6|MASK_BWX|MASK_MAX|MASK_FIX)
394 { "ev4", PROCESSOR_EV4, 0 },
395 { "ev45", PROCESSOR_EV4, 0 },
396 { "21064", PROCESSOR_EV4, 0 },
397 { "ev5", PROCESSOR_EV5, EV5_MASK },
398 { "21164", PROCESSOR_EV5, EV5_MASK },
399 { "ev56", PROCESSOR_EV5, EV5_MASK|MASK_BWX },
400 { "21164a", PROCESSOR_EV5, EV5_MASK|MASK_BWX },
401 { "pca56", PROCESSOR_EV5, EV5_MASK|MASK_BWX|MASK_MAX },
402 { "21164PC",PROCESSOR_EV5, EV5_MASK|MASK_BWX|MASK_MAX },
403 { "21164pc",PROCESSOR_EV5, EV5_MASK|MASK_BWX|MASK_MAX },
404 { "ev6", PROCESSOR_EV6, EV6_MASK },
405 { "21264", PROCESSOR_EV6, EV6_MASK },
406 { "ev67", PROCESSOR_EV6, EV6_MASK|MASK_CIX },
407 { "21264a", PROCESSOR_EV6, EV6_MASK|MASK_CIX },
408 { 0, 0, 0 }
409 };
410
411 /* Unicos/Mk doesn't have shared libraries. */
412 if (TARGET_ABI_UNICOSMK && flag_pic)
413 {
414 warning ("-f%s ignored for Unicos/Mk (not supported)",
415 (flag_pic > 1) ? "PIC" : "pic");
416 flag_pic = 0;
417 }
418
419 /* On Unicos/Mk, the native compiler consistenly generates /d suffices for
420 floating-point instructions. Make that the default for this target. */
421 if (TARGET_ABI_UNICOSMK)
422 alpha_fprm = ALPHA_FPRM_DYN;
423 else
424 alpha_fprm = ALPHA_FPRM_NORM;
425
426 alpha_tp = ALPHA_TP_PROG;
427 alpha_fptm = ALPHA_FPTM_N;
428
429 /* We cannot use su and sui qualifiers for conversion instructions on
430 Unicos/Mk. I'm not sure if this is due to assembler or hardware
431 limitations. Right now, we issue a warning if -mieee is specified
432 and then ignore it; eventually, we should either get it right or
433 disable the option altogether. */
434
435 if (TARGET_IEEE)
436 {
437 if (TARGET_ABI_UNICOSMK)
438 warning ("-mieee not supported on Unicos/Mk");
439 else
440 {
441 alpha_tp = ALPHA_TP_INSN;
442 alpha_fptm = ALPHA_FPTM_SU;
443 }
444 }
445
446 if (TARGET_IEEE_WITH_INEXACT)
447 {
448 if (TARGET_ABI_UNICOSMK)
449 warning ("-mieee-with-inexact not supported on Unicos/Mk");
450 else
451 {
452 alpha_tp = ALPHA_TP_INSN;
453 alpha_fptm = ALPHA_FPTM_SUI;
454 }
455 }
456
457 if (alpha_tp_string)
458 {
459 if (! strcmp (alpha_tp_string, "p"))
460 alpha_tp = ALPHA_TP_PROG;
461 else if (! strcmp (alpha_tp_string, "f"))
462 alpha_tp = ALPHA_TP_FUNC;
463 else if (! strcmp (alpha_tp_string, "i"))
464 alpha_tp = ALPHA_TP_INSN;
465 else
466 error ("bad value `%s' for -mtrap-precision switch", alpha_tp_string);
467 }
468
469 if (alpha_fprm_string)
470 {
471 if (! strcmp (alpha_fprm_string, "n"))
472 alpha_fprm = ALPHA_FPRM_NORM;
473 else if (! strcmp (alpha_fprm_string, "m"))
474 alpha_fprm = ALPHA_FPRM_MINF;
475 else if (! strcmp (alpha_fprm_string, "c"))
476 alpha_fprm = ALPHA_FPRM_CHOP;
477 else if (! strcmp (alpha_fprm_string,"d"))
478 alpha_fprm = ALPHA_FPRM_DYN;
479 else
480 error ("bad value `%s' for -mfp-rounding-mode switch",
481 alpha_fprm_string);
482 }
483
484 if (alpha_fptm_string)
485 {
486 if (strcmp (alpha_fptm_string, "n") == 0)
487 alpha_fptm = ALPHA_FPTM_N;
488 else if (strcmp (alpha_fptm_string, "u") == 0)
489 alpha_fptm = ALPHA_FPTM_U;
490 else if (strcmp (alpha_fptm_string, "su") == 0)
491 alpha_fptm = ALPHA_FPTM_SU;
492 else if (strcmp (alpha_fptm_string, "sui") == 0)
493 alpha_fptm = ALPHA_FPTM_SUI;
494 else
495 error ("bad value `%s' for -mfp-trap-mode switch", alpha_fptm_string);
496 }
497
498 if (alpha_tls_size_string)
499 {
500 if (strcmp (alpha_tls_size_string, "16") == 0)
501 alpha_tls_size = 16;
502 else if (strcmp (alpha_tls_size_string, "32") == 0)
503 alpha_tls_size = 32;
504 else if (strcmp (alpha_tls_size_string, "64") == 0)
505 alpha_tls_size = 64;
506 else
507 error ("bad value `%s' for -mtls-size switch", alpha_tls_size_string);
508 }
509
510 alpha_cpu
511 = TARGET_CPU_DEFAULT & MASK_CPU_EV6 ? PROCESSOR_EV6
512 : (TARGET_CPU_DEFAULT & MASK_CPU_EV5 ? PROCESSOR_EV5 : PROCESSOR_EV4);
513
514 if (alpha_cpu_string)
515 {
516 for (i = 0; cpu_table [i].name; i++)
517 if (! strcmp (alpha_cpu_string, cpu_table [i].name))
518 {
519 alpha_cpu = cpu_table [i].processor;
520 target_flags &= ~ (MASK_BWX | MASK_MAX | MASK_FIX | MASK_CIX
521 | MASK_CPU_EV5 | MASK_CPU_EV6);
522 target_flags |= cpu_table [i].flags;
523 break;
524 }
525 if (! cpu_table [i].name)
526 error ("bad value `%s' for -mcpu switch", alpha_cpu_string);
527 }
528
529 if (alpha_tune_string)
530 {
531 for (i = 0; cpu_table [i].name; i++)
532 if (! strcmp (alpha_tune_string, cpu_table [i].name))
533 {
534 alpha_cpu = cpu_table [i].processor;
535 break;
536 }
537 if (! cpu_table [i].name)
538 error ("bad value `%s' for -mcpu switch", alpha_tune_string);
539 }
540
541 /* Do some sanity checks on the above options. */
542
543 if (TARGET_ABI_UNICOSMK && alpha_fptm != ALPHA_FPTM_N)
544 {
545 warning ("trap mode not supported on Unicos/Mk");
546 alpha_fptm = ALPHA_FPTM_N;
547 }
548
549 if ((alpha_fptm == ALPHA_FPTM_SU || alpha_fptm == ALPHA_FPTM_SUI)
550 && alpha_tp != ALPHA_TP_INSN && ! TARGET_CPU_EV6)
551 {
552 warning ("fp software completion requires -mtrap-precision=i");
553 alpha_tp = ALPHA_TP_INSN;
554 }
555
556 if (TARGET_CPU_EV6)
557 {
558 /* Except for EV6 pass 1 (not released), we always have precise
559 arithmetic traps. Which means we can do software completion
560 without minding trap shadows. */
561 alpha_tp = ALPHA_TP_PROG;
562 }
563
564 if (TARGET_FLOAT_VAX)
565 {
566 if (alpha_fprm == ALPHA_FPRM_MINF || alpha_fprm == ALPHA_FPRM_DYN)
567 {
568 warning ("rounding mode not supported for VAX floats");
569 alpha_fprm = ALPHA_FPRM_NORM;
570 }
571 if (alpha_fptm == ALPHA_FPTM_SUI)
572 {
573 warning ("trap mode not supported for VAX floats");
574 alpha_fptm = ALPHA_FPTM_SU;
575 }
576 }
577
578 {
579 char *end;
580 int lat;
581
582 if (!alpha_mlat_string)
583 alpha_mlat_string = "L1";
584
585 if (ISDIGIT ((unsigned char)alpha_mlat_string[0])
586 && (lat = strtol (alpha_mlat_string, &end, 10), *end == '\0'))
587 ;
588 else if ((alpha_mlat_string[0] == 'L' || alpha_mlat_string[0] == 'l')
589 && ISDIGIT ((unsigned char)alpha_mlat_string[1])
590 && alpha_mlat_string[2] == '\0')
591 {
592 static int const cache_latency[][4] =
593 {
594 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
595 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
596 { 3, 12, 30 }, /* ev6 -- Bcache from DS20 LMbench. */
597 };
598
599 lat = alpha_mlat_string[1] - '0';
600 if (lat <= 0 || lat > 3 || cache_latency[alpha_cpu][lat-1] == -1)
601 {
602 warning ("L%d cache latency unknown for %s",
603 lat, alpha_cpu_name[alpha_cpu]);
604 lat = 3;
605 }
606 else
607 lat = cache_latency[alpha_cpu][lat-1];
608 }
609 else if (! strcmp (alpha_mlat_string, "main"))
610 {
611 /* Most current memories have about 370ns latency. This is
612 a reasonable guess for a fast cpu. */
613 lat = 150;
614 }
615 else
616 {
617 warning ("bad value `%s' for -mmemory-latency", alpha_mlat_string);
618 lat = 3;
619 }
620
621 alpha_memory_latency = lat;
622 }
623
624 /* Default the definition of "small data" to 8 bytes. */
625 if (!g_switch_set)
626 g_switch_value = 8;
627
628 /* Infer TARGET_SMALL_DATA from -fpic/-fPIC. */
629 if (flag_pic == 1)
630 target_flags |= MASK_SMALL_DATA;
631 else if (flag_pic == 2)
632 target_flags &= ~MASK_SMALL_DATA;
633
634 /* Align labels and loops for optimal branching. */
635 /* ??? Kludge these by not doing anything if we don't optimize and also if
636 we are writing ECOFF symbols to work around a bug in DEC's assembler. */
637 if (optimize > 0 && write_symbols != SDB_DEBUG)
638 {
639 if (align_loops <= 0)
640 align_loops = 16;
641 if (align_jumps <= 0)
642 align_jumps = 16;
643 }
644 if (align_functions <= 0)
645 align_functions = 16;
646
647 /* Acquire a unique set number for our register saves and restores. */
648 alpha_sr_alias_set = new_alias_set ();
649
650 /* Register variables and functions with the garbage collector. */
651
652 /* Set up function hooks. */
653 init_machine_status = alpha_init_machine_status;
654
655 /* Tell the compiler when we're using VAX floating point. */
656 if (TARGET_FLOAT_VAX)
657 {
658 real_format_for_mode[SFmode - QFmode] = &vax_f_format;
659 real_format_for_mode[DFmode - QFmode] = &vax_g_format;
660 real_format_for_mode[TFmode - QFmode] = NULL;
661 }
662 }
663 \f
664 /* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
665
666 int
667 zap_mask (value)
668 HOST_WIDE_INT value;
669 {
670 int i;
671
672 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
673 i++, value >>= 8)
674 if ((value & 0xff) != 0 && (value & 0xff) != 0xff)
675 return 0;
676
677 return 1;
678 }
679
680 /* Returns 1 if OP is either the constant zero or a register. If a
681 register, it must be in the proper mode unless MODE is VOIDmode. */
682
683 int
684 reg_or_0_operand (op, mode)
685 register rtx op;
686 enum machine_mode mode;
687 {
688 return op == CONST0_RTX (mode) || register_operand (op, mode);
689 }
690
691 /* Return 1 if OP is a constant in the range of 0-63 (for a shift) or
692 any register. */
693
694 int
695 reg_or_6bit_operand (op, mode)
696 register rtx op;
697 enum machine_mode mode;
698 {
699 return ((GET_CODE (op) == CONST_INT
700 && (unsigned HOST_WIDE_INT) INTVAL (op) < 64)
701 || register_operand (op, mode));
702 }
703
704
705 /* Return 1 if OP is an 8-bit constant or any register. */
706
707 int
708 reg_or_8bit_operand (op, mode)
709 register rtx op;
710 enum machine_mode mode;
711 {
712 return ((GET_CODE (op) == CONST_INT
713 && (unsigned HOST_WIDE_INT) INTVAL (op) < 0x100)
714 || register_operand (op, mode));
715 }
716
717 /* Return 1 if OP is a constant or any register. */
718
719 int
720 reg_or_const_int_operand (op, mode)
721 register rtx op;
722 enum machine_mode mode;
723 {
724 return GET_CODE (op) == CONST_INT || register_operand (op, mode);
725 }
726
727 /* Return 1 if OP is an 8-bit constant. */
728
729 int
730 cint8_operand (op, mode)
731 register rtx op;
732 enum machine_mode mode ATTRIBUTE_UNUSED;
733 {
734 return ((GET_CODE (op) == CONST_INT
735 && (unsigned HOST_WIDE_INT) INTVAL (op) < 0x100));
736 }
737
738 /* Return 1 if the operand is a valid second operand to an add insn. */
739
740 int
741 add_operand (op, mode)
742 register rtx op;
743 enum machine_mode mode;
744 {
745 if (GET_CODE (op) == CONST_INT)
746 /* Constraints I, J, O and P are covered by K. */
747 return (CONST_OK_FOR_LETTER_P (INTVAL (op), 'K')
748 || CONST_OK_FOR_LETTER_P (INTVAL (op), 'L'));
749
750 return register_operand (op, mode);
751 }
752
753 /* Return 1 if the operand is a valid second operand to a sign-extending
754 add insn. */
755
756 int
757 sext_add_operand (op, mode)
758 register rtx op;
759 enum machine_mode mode;
760 {
761 if (GET_CODE (op) == CONST_INT)
762 return (CONST_OK_FOR_LETTER_P (INTVAL (op), 'I')
763 || CONST_OK_FOR_LETTER_P (INTVAL (op), 'O'));
764
765 return reg_not_elim_operand (op, mode);
766 }
767
768 /* Return 1 if OP is the constant 4 or 8. */
769
770 int
771 const48_operand (op, mode)
772 register rtx op;
773 enum machine_mode mode ATTRIBUTE_UNUSED;
774 {
775 return (GET_CODE (op) == CONST_INT
776 && (INTVAL (op) == 4 || INTVAL (op) == 8));
777 }
778
779 /* Return 1 if OP is a valid first operand to an AND insn. */
780
781 int
782 and_operand (op, mode)
783 register rtx op;
784 enum machine_mode mode;
785 {
786 if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == VOIDmode)
787 return (zap_mask (CONST_DOUBLE_LOW (op))
788 && zap_mask (CONST_DOUBLE_HIGH (op)));
789
790 if (GET_CODE (op) == CONST_INT)
791 return ((unsigned HOST_WIDE_INT) INTVAL (op) < 0x100
792 || (unsigned HOST_WIDE_INT) ~ INTVAL (op) < 0x100
793 || zap_mask (INTVAL (op)));
794
795 return register_operand (op, mode);
796 }
797
798 /* Return 1 if OP is a valid first operand to an IOR or XOR insn. */
799
800 int
801 or_operand (op, mode)
802 register rtx op;
803 enum machine_mode mode;
804 {
805 if (GET_CODE (op) == CONST_INT)
806 return ((unsigned HOST_WIDE_INT) INTVAL (op) < 0x100
807 || (unsigned HOST_WIDE_INT) ~ INTVAL (op) < 0x100);
808
809 return register_operand (op, mode);
810 }
811
812 /* Return 1 if OP is a constant that is the width, in bits, of an integral
813 mode smaller than DImode. */
814
815 int
816 mode_width_operand (op, mode)
817 register rtx op;
818 enum machine_mode mode ATTRIBUTE_UNUSED;
819 {
820 return (GET_CODE (op) == CONST_INT
821 && (INTVAL (op) == 8 || INTVAL (op) == 16
822 || INTVAL (op) == 32 || INTVAL (op) == 64));
823 }
824
825 /* Return 1 if OP is a constant that is the width of an integral machine mode
826 smaller than an integer. */
827
828 int
829 mode_mask_operand (op, mode)
830 register rtx op;
831 enum machine_mode mode ATTRIBUTE_UNUSED;
832 {
833 if (GET_CODE (op) == CONST_INT)
834 {
835 HOST_WIDE_INT value = INTVAL (op);
836
837 if (value == 0xff)
838 return 1;
839 if (value == 0xffff)
840 return 1;
841 if (value == 0xffffffff)
842 return 1;
843 if (value == -1)
844 return 1;
845 }
846 else if (HOST_BITS_PER_WIDE_INT == 32 && GET_CODE (op) == CONST_DOUBLE)
847 {
848 if (CONST_DOUBLE_LOW (op) == 0xffffffff && CONST_DOUBLE_HIGH (op) == 0)
849 return 1;
850 }
851
852 return 0;
853 }
854
855 /* Return 1 if OP is a multiple of 8 less than 64. */
856
857 int
858 mul8_operand (op, mode)
859 register rtx op;
860 enum machine_mode mode ATTRIBUTE_UNUSED;
861 {
862 return (GET_CODE (op) == CONST_INT
863 && (unsigned HOST_WIDE_INT) INTVAL (op) < 64
864 && (INTVAL (op) & 7) == 0);
865 }
866
867 /* Return 1 if OP is the zero constant for MODE. */
868
869 int
870 const0_operand (op, mode)
871 register rtx op;
872 enum machine_mode mode;
873 {
874 return op == CONST0_RTX (mode);
875 }
876
877 /* Return 1 if OP is a hard floating-point register. */
878
879 int
880 hard_fp_register_operand (op, mode)
881 register rtx op;
882 enum machine_mode mode;
883 {
884 if (mode != VOIDmode && GET_MODE (op) != VOIDmode && mode != GET_MODE (op))
885 return 0;
886
887 if (GET_CODE (op) == SUBREG)
888 op = SUBREG_REG (op);
889 return GET_CODE (op) == REG && REGNO_REG_CLASS (REGNO (op)) == FLOAT_REGS;
890 }
891
892 /* Return 1 if OP is a hard general register. */
893
894 int
895 hard_int_register_operand (op, mode)
896 register rtx op;
897 enum machine_mode mode;
898 {
899 if (mode != VOIDmode && GET_MODE (op) != VOIDmode && mode != GET_MODE (op))
900 return 0;
901
902 if (GET_CODE (op) == SUBREG)
903 op = SUBREG_REG (op);
904 return GET_CODE (op) == REG && REGNO_REG_CLASS (REGNO (op)) == GENERAL_REGS;
905 }
906
907 /* Return 1 if OP is a register or a constant integer. */
908
909
910 int
911 reg_or_cint_operand (op, mode)
912 register rtx op;
913 enum machine_mode mode;
914 {
915 return (GET_CODE (op) == CONST_INT
916 || register_operand (op, mode));
917 }
918
919 /* Return 1 if OP is something that can be reloaded into a register;
920 if it is a MEM, it need not be valid. */
921
922 int
923 some_operand (op, mode)
924 register rtx op;
925 enum machine_mode mode;
926 {
927 if (mode != VOIDmode && GET_MODE (op) != VOIDmode && mode != GET_MODE (op))
928 return 0;
929
930 switch (GET_CODE (op))
931 {
932 case REG:
933 case MEM:
934 case CONST_INT:
935 case CONST_DOUBLE:
936 case CONST_VECTOR:
937 case LABEL_REF:
938 case SYMBOL_REF:
939 case CONST:
940 case HIGH:
941 return 1;
942
943 case SUBREG:
944 return some_operand (SUBREG_REG (op), VOIDmode);
945
946 default:
947 break;
948 }
949
950 return 0;
951 }
952
953 /* Likewise, but don't accept constants. */
954
955 int
956 some_ni_operand (op, mode)
957 register rtx op;
958 enum machine_mode mode;
959 {
960 if (GET_MODE (op) != mode && mode != VOIDmode)
961 return 0;
962
963 if (GET_CODE (op) == SUBREG)
964 op = SUBREG_REG (op);
965
966 return (GET_CODE (op) == REG || GET_CODE (op) == MEM);
967 }
968
969 /* Return 1 if OP is a valid operand for the source of a move insn. */
970
971 int
972 input_operand (op, mode)
973 register rtx op;
974 enum machine_mode mode;
975 {
976 if (mode != VOIDmode && GET_MODE (op) != VOIDmode && mode != GET_MODE (op))
977 return 0;
978
979 if (GET_MODE_CLASS (mode) == MODE_FLOAT && GET_MODE (op) != mode)
980 return 0;
981
982 switch (GET_CODE (op))
983 {
984 case LABEL_REF:
985 case SYMBOL_REF:
986 case CONST:
987 if (TARGET_EXPLICIT_RELOCS)
988 {
989 /* We don't split symbolic operands into something unintelligable
990 until after reload, but we do not wish non-small, non-global
991 symbolic operands to be reconstructed from their high/lo_sum
992 form. */
993 return (small_symbolic_operand (op, mode)
994 || global_symbolic_operand (op, mode)
995 || gotdtp_symbolic_operand (op, mode)
996 || gottp_symbolic_operand (op, mode));
997 }
998
999 /* This handles both the Windows/NT and OSF cases. */
1000 return mode == ptr_mode || mode == DImode;
1001
1002 case HIGH:
1003 return (TARGET_EXPLICIT_RELOCS
1004 && local_symbolic_operand (XEXP (op, 0), mode));
1005
1006 case REG:
1007 case ADDRESSOF:
1008 return 1;
1009
1010 case SUBREG:
1011 if (register_operand (op, mode))
1012 return 1;
1013 /* ... fall through ... */
1014 case MEM:
1015 return ((TARGET_BWX || (mode != HImode && mode != QImode))
1016 && general_operand (op, mode));
1017
1018 case CONST_DOUBLE:
1019 case CONST_VECTOR:
1020 return op == CONST0_RTX (mode);
1021
1022 case CONST_INT:
1023 return mode == QImode || mode == HImode || add_operand (op, mode);
1024
1025 case CONSTANT_P_RTX:
1026 return 1;
1027
1028 default:
1029 break;
1030 }
1031
1032 return 0;
1033 }
1034
1035 /* Return 1 if OP is a SYMBOL_REF for a function known to be in this
1036 file, and in the same section as the current function. */
1037
1038 int
1039 samegp_function_operand (op, mode)
1040 rtx op;
1041 enum machine_mode mode ATTRIBUTE_UNUSED;
1042 {
1043 if (GET_CODE (op) != SYMBOL_REF)
1044 return false;
1045
1046 /* Easy test for recursion. */
1047 if (op == XEXP (DECL_RTL (current_function_decl), 0))
1048 return true;
1049
1050 /* Functions that are not local can be overridden, and thus may
1051 not share the same gp. */
1052 if (! SYMBOL_REF_LOCAL_P (op))
1053 return false;
1054
1055 /* If -msmall-data is in effect, assume that there is only one GP
1056 for the module, and so any local symbol has this property. We
1057 need explicit relocations to be able to enforce this for symbols
1058 not defined in this unit of translation, however. */
1059 if (TARGET_EXPLICIT_RELOCS && TARGET_SMALL_DATA)
1060 return true;
1061
1062 /* Functions that are not external are defined in this UoT,
1063 and thus must share the same gp. */
1064 return ! SYMBOL_REF_EXTERNAL_P (op);
1065 }
1066
1067 /* Return 1 if OP is a SYMBOL_REF for which we can make a call via bsr. */
1068
1069 int
1070 direct_call_operand (op, mode)
1071 rtx op;
1072 enum machine_mode mode;
1073 {
1074 tree op_decl, cfun_sec, op_sec;
1075
1076 /* Must share the same GP. */
1077 if (!samegp_function_operand (op, mode))
1078 return false;
1079
1080 /* If profiling is implemented via linker tricks, we can't jump
1081 to the nogp alternate entry point. Note that current_function_profile
1082 would not be correct, since that doesn't indicate if the target
1083 function uses profiling. */
1084 /* ??? TARGET_PROFILING_NEEDS_GP isn't really the right test,
1085 but is approximately correct for the OSF ABIs. Don't know
1086 what to do for VMS, NT, or UMK. */
1087 if (!TARGET_PROFILING_NEEDS_GP && profile_flag)
1088 return false;
1089
1090 /* Must be a function. In some cases folks create thunks in static
1091 data structures and then make calls to them. If we allow the
1092 direct call, we'll get an error from the linker about !samegp reloc
1093 against a symbol without a .prologue directive. */
1094 if (!SYMBOL_REF_FUNCTION_P (op))
1095 return false;
1096
1097 /* Must be "near" so that the branch is assumed to reach. With
1098 -msmall-text, this is assumed true of all local symbols. Since
1099 we've already checked samegp, locality is already assured. */
1100 if (TARGET_SMALL_TEXT)
1101 return true;
1102
1103 /* Otherwise, a decl is "near" if it is defined in the same section. */
1104 if (flag_function_sections)
1105 return false;
1106
1107 op_decl = SYMBOL_REF_DECL (op);
1108 if (DECL_ONE_ONLY (current_function_decl)
1109 || (op_decl && DECL_ONE_ONLY (op_decl)))
1110 return false;
1111
1112 cfun_sec = DECL_SECTION_NAME (current_function_decl);
1113 op_sec = op_decl ? DECL_SECTION_NAME (op_decl) : NULL;
1114 return ((!cfun_sec && !op_sec)
1115 || (cfun_sec && op_sec
1116 && strcmp (TREE_STRING_POINTER (cfun_sec),
1117 TREE_STRING_POINTER (op_sec)) == 0));
1118 }
1119
1120 /* Return true if OP is a LABEL_REF, or SYMBOL_REF or CONST referencing
1121 a (non-tls) variable known to be defined in this file. */
1122
1123 int
1124 local_symbolic_operand (op, mode)
1125 rtx op;
1126 enum machine_mode mode;
1127 {
1128 if (mode != VOIDmode && GET_MODE (op) != VOIDmode && mode != GET_MODE (op))
1129 return 0;
1130
1131 if (GET_CODE (op) == LABEL_REF)
1132 return 1;
1133
1134 if (GET_CODE (op) == CONST
1135 && GET_CODE (XEXP (op, 0)) == PLUS
1136 && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST_INT)
1137 op = XEXP (XEXP (op, 0), 0);
1138
1139 if (GET_CODE (op) != SYMBOL_REF)
1140 return 0;
1141
1142 return SYMBOL_REF_LOCAL_P (op) && !SYMBOL_REF_TLS_MODEL (op);
1143 }
1144
1145 /* Return true if OP is a SYMBOL_REF or CONST referencing a variable
1146 known to be defined in this file in the small data area. */
1147
1148 int
1149 small_symbolic_operand (op, mode)
1150 rtx op;
1151 enum machine_mode mode ATTRIBUTE_UNUSED;
1152 {
1153 if (! TARGET_SMALL_DATA)
1154 return 0;
1155
1156 if (mode != VOIDmode && GET_MODE (op) != VOIDmode && mode != GET_MODE (op))
1157 return 0;
1158
1159 if (GET_CODE (op) == CONST
1160 && GET_CODE (XEXP (op, 0)) == PLUS
1161 && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST_INT)
1162 op = XEXP (XEXP (op, 0), 0);
1163
1164 if (GET_CODE (op) != SYMBOL_REF)
1165 return 0;
1166
1167 /* ??? There's no encode_section_info equivalent for the rtl
1168 constant pool, so SYMBOL_FLAG_SMALL never gets set. */
1169 if (CONSTANT_POOL_ADDRESS_P (op))
1170 return GET_MODE_SIZE (get_pool_mode (op)) <= g_switch_value;
1171
1172 return (SYMBOL_REF_LOCAL_P (op)
1173 && SYMBOL_REF_SMALL_P (op)
1174 && SYMBOL_REF_TLS_MODEL (op) == 0);
1175 }
1176
1177 /* Return true if OP is a SYMBOL_REF or CONST referencing a variable
1178 not known (or known not) to be defined in this file. */
1179
1180 int
1181 global_symbolic_operand (op, mode)
1182 rtx op;
1183 enum machine_mode mode;
1184 {
1185 if (mode != VOIDmode && GET_MODE (op) != VOIDmode && mode != GET_MODE (op))
1186 return 0;
1187
1188 if (GET_CODE (op) == CONST
1189 && GET_CODE (XEXP (op, 0)) == PLUS
1190 && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST_INT)
1191 op = XEXP (XEXP (op, 0), 0);
1192
1193 if (GET_CODE (op) != SYMBOL_REF)
1194 return 0;
1195
1196 return !SYMBOL_REF_LOCAL_P (op) && !SYMBOL_REF_TLS_MODEL (op);
1197 }
1198
1199 /* Return 1 if OP is a valid operand for the MEM of a CALL insn. */
1200
1201 int
1202 call_operand (op, mode)
1203 rtx op;
1204 enum machine_mode mode;
1205 {
1206 if (mode != Pmode)
1207 return 0;
1208
1209 if (GET_CODE (op) == REG)
1210 {
1211 if (TARGET_ABI_OSF)
1212 {
1213 /* Disallow virtual registers to cope with pathalogical test cases
1214 such as compile/930117-1.c in which the virtual reg decomposes
1215 to the frame pointer. Which is a hard reg that is not $27. */
1216 return (REGNO (op) == 27 || REGNO (op) > LAST_VIRTUAL_REGISTER);
1217 }
1218 else
1219 return 1;
1220 }
1221 if (TARGET_ABI_UNICOSMK)
1222 return 0;
1223 if (GET_CODE (op) == SYMBOL_REF)
1224 return 1;
1225
1226 return 0;
1227 }
1228
1229 /* Returns 1 if OP is a symbolic operand, i.e. a symbol_ref or a label_ref,
1230 possibly with an offset. */
1231
1232 int
1233 symbolic_operand (op, mode)
1234 register rtx op;
1235 enum machine_mode mode;
1236 {
1237 if (mode != VOIDmode && GET_MODE (op) != VOIDmode && mode != GET_MODE (op))
1238 return 0;
1239 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
1240 return 1;
1241 if (GET_CODE (op) == CONST
1242 && GET_CODE (XEXP (op,0)) == PLUS
1243 && GET_CODE (XEXP (XEXP (op,0), 0)) == SYMBOL_REF
1244 && GET_CODE (XEXP (XEXP (op,0), 1)) == CONST_INT)
1245 return 1;
1246 return 0;
1247 }
1248
1249 /* Return true if OP is valid for a particular TLS relocation. */
1250
1251 static int
1252 tls_symbolic_operand_1 (op, mode, size, unspec)
1253 rtx op;
1254 enum machine_mode mode;
1255 int size, unspec;
1256 {
1257 if (mode != VOIDmode && GET_MODE (op) != VOIDmode && mode != GET_MODE (op))
1258 return 0;
1259
1260 if (GET_CODE (op) != CONST)
1261 return 0;
1262 op = XEXP (op, 0);
1263
1264 if (GET_CODE (op) != UNSPEC || XINT (op, 1) != unspec)
1265 return 0;
1266 op = XVECEXP (op, 0, 0);
1267
1268 if (GET_CODE (op) != SYMBOL_REF)
1269 return 0;
1270
1271 if (SYMBOL_REF_LOCAL_P (op))
1272 {
1273 if (alpha_tls_size > size)
1274 return 0;
1275 }
1276 else
1277 {
1278 if (size != 64)
1279 return 0;
1280 }
1281
1282 switch (SYMBOL_REF_TLS_MODEL (op))
1283 {
1284 case TLS_MODEL_LOCAL_DYNAMIC:
1285 return unspec == UNSPEC_DTPREL;
1286 case TLS_MODEL_INITIAL_EXEC:
1287 return unspec == UNSPEC_TPREL && size == 64;
1288 case TLS_MODEL_LOCAL_EXEC:
1289 return unspec == UNSPEC_TPREL;
1290 default:
1291 abort ();
1292 }
1293 }
1294
1295 /* Return true if OP is valid for 16-bit DTP relative relocations. */
1296
1297 int
1298 dtp16_symbolic_operand (op, mode)
1299 rtx op;
1300 enum machine_mode mode;
1301 {
1302 return tls_symbolic_operand_1 (op, mode, 16, UNSPEC_DTPREL);
1303 }
1304
1305 /* Return true if OP is valid for 32-bit DTP relative relocations. */
1306
1307 int
1308 dtp32_symbolic_operand (op, mode)
1309 rtx op;
1310 enum machine_mode mode;
1311 {
1312 return tls_symbolic_operand_1 (op, mode, 32, UNSPEC_DTPREL);
1313 }
1314
1315 /* Return true if OP is valid for 64-bit DTP relative relocations. */
1316
1317 int
1318 gotdtp_symbolic_operand (op, mode)
1319 rtx op;
1320 enum machine_mode mode;
1321 {
1322 return tls_symbolic_operand_1 (op, mode, 64, UNSPEC_DTPREL);
1323 }
1324
1325 /* Return true if OP is valid for 16-bit TP relative relocations. */
1326
1327 int
1328 tp16_symbolic_operand (op, mode)
1329 rtx op;
1330 enum machine_mode mode;
1331 {
1332 return tls_symbolic_operand_1 (op, mode, 16, UNSPEC_TPREL);
1333 }
1334
1335 /* Return true if OP is valid for 32-bit TP relative relocations. */
1336
1337 int
1338 tp32_symbolic_operand (op, mode)
1339 rtx op;
1340 enum machine_mode mode;
1341 {
1342 return tls_symbolic_operand_1 (op, mode, 32, UNSPEC_TPREL);
1343 }
1344
1345 /* Return true if OP is valid for 64-bit TP relative relocations. */
1346
1347 int
1348 gottp_symbolic_operand (op, mode)
1349 rtx op;
1350 enum machine_mode mode;
1351 {
1352 return tls_symbolic_operand_1 (op, mode, 64, UNSPEC_TPREL);
1353 }
1354
1355 /* Return 1 if OP is a valid Alpha comparison operator. Here we know which
1356 comparisons are valid in which insn. */
1357
1358 int
1359 alpha_comparison_operator (op, mode)
1360 register rtx op;
1361 enum machine_mode mode;
1362 {
1363 enum rtx_code code = GET_CODE (op);
1364
1365 if (mode != GET_MODE (op) && mode != VOIDmode)
1366 return 0;
1367
1368 return (code == EQ || code == LE || code == LT
1369 || code == LEU || code == LTU);
1370 }
1371
1372 /* Return 1 if OP is a valid Alpha comparison operator against zero.
1373 Here we know which comparisons are valid in which insn. */
1374
1375 int
1376 alpha_zero_comparison_operator (op, mode)
1377 register rtx op;
1378 enum machine_mode mode;
1379 {
1380 enum rtx_code code = GET_CODE (op);
1381
1382 if (mode != GET_MODE (op) && mode != VOIDmode)
1383 return 0;
1384
1385 return (code == EQ || code == NE || code == LE || code == LT
1386 || code == LEU || code == LTU);
1387 }
1388
1389 /* Return 1 if OP is a valid Alpha swapped comparison operator. */
1390
1391 int
1392 alpha_swapped_comparison_operator (op, mode)
1393 register rtx op;
1394 enum machine_mode mode;
1395 {
1396 enum rtx_code code = GET_CODE (op);
1397
1398 if ((mode != GET_MODE (op) && mode != VOIDmode)
1399 || GET_RTX_CLASS (code) != '<')
1400 return 0;
1401
1402 code = swap_condition (code);
1403 return (code == EQ || code == LE || code == LT
1404 || code == LEU || code == LTU);
1405 }
1406
1407 /* Return 1 if OP is a signed comparison operation. */
1408
1409 int
1410 signed_comparison_operator (op, mode)
1411 register rtx op;
1412 enum machine_mode mode ATTRIBUTE_UNUSED;
1413 {
1414 enum rtx_code code = GET_CODE (op);
1415
1416 if (mode != GET_MODE (op) && mode != VOIDmode)
1417 return 0;
1418
1419 return (code == EQ || code == NE
1420 || code == LE || code == LT
1421 || code == GE || code == GT);
1422 }
1423
1424 /* Return 1 if OP is a valid Alpha floating point comparison operator.
1425 Here we know which comparisons are valid in which insn. */
1426
1427 int
1428 alpha_fp_comparison_operator (op, mode)
1429 register rtx op;
1430 enum machine_mode mode;
1431 {
1432 enum rtx_code code = GET_CODE (op);
1433
1434 if (mode != GET_MODE (op) && mode != VOIDmode)
1435 return 0;
1436
1437 return (code == EQ || code == LE || code == LT || code == UNORDERED);
1438 }
1439
1440 /* Return 1 if this is a divide or modulus operator. */
1441
1442 int
1443 divmod_operator (op, mode)
1444 register rtx op;
1445 enum machine_mode mode ATTRIBUTE_UNUSED;
1446 {
1447 switch (GET_CODE (op))
1448 {
1449 case DIV: case MOD: case UDIV: case UMOD:
1450 return 1;
1451
1452 default:
1453 break;
1454 }
1455
1456 return 0;
1457 }
1458
1459 /* Return 1 if this memory address is a known aligned register plus
1460 a constant. It must be a valid address. This means that we can do
1461 this as an aligned reference plus some offset.
1462
1463 Take into account what reload will do. */
1464
1465 int
1466 aligned_memory_operand (op, mode)
1467 register rtx op;
1468 enum machine_mode mode;
1469 {
1470 rtx base;
1471
1472 if (reload_in_progress)
1473 {
1474 rtx tmp = op;
1475 if (GET_CODE (tmp) == SUBREG)
1476 tmp = SUBREG_REG (tmp);
1477 if (GET_CODE (tmp) == REG
1478 && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
1479 {
1480 op = reg_equiv_memory_loc[REGNO (tmp)];
1481 if (op == 0)
1482 return 0;
1483 }
1484 }
1485
1486 if (GET_CODE (op) != MEM
1487 || GET_MODE (op) != mode)
1488 return 0;
1489 op = XEXP (op, 0);
1490
1491 /* LEGITIMIZE_RELOAD_ADDRESS creates (plus (plus reg const_hi) const_lo)
1492 sorts of constructs. Dig for the real base register. */
1493 if (reload_in_progress
1494 && GET_CODE (op) == PLUS
1495 && GET_CODE (XEXP (op, 0)) == PLUS)
1496 base = XEXP (XEXP (op, 0), 0);
1497 else
1498 {
1499 if (! memory_address_p (mode, op))
1500 return 0;
1501 base = (GET_CODE (op) == PLUS ? XEXP (op, 0) : op);
1502 }
1503
1504 return (GET_CODE (base) == REG && REGNO_POINTER_ALIGN (REGNO (base)) >= 32);
1505 }
1506
1507 /* Similar, but return 1 if OP is a MEM which is not alignable. */
1508
1509 int
1510 unaligned_memory_operand (op, mode)
1511 register rtx op;
1512 enum machine_mode mode;
1513 {
1514 rtx base;
1515
1516 if (reload_in_progress)
1517 {
1518 rtx tmp = op;
1519 if (GET_CODE (tmp) == SUBREG)
1520 tmp = SUBREG_REG (tmp);
1521 if (GET_CODE (tmp) == REG
1522 && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
1523 {
1524 op = reg_equiv_memory_loc[REGNO (tmp)];
1525 if (op == 0)
1526 return 0;
1527 }
1528 }
1529
1530 if (GET_CODE (op) != MEM
1531 || GET_MODE (op) != mode)
1532 return 0;
1533 op = XEXP (op, 0);
1534
1535 /* LEGITIMIZE_RELOAD_ADDRESS creates (plus (plus reg const_hi) const_lo)
1536 sorts of constructs. Dig for the real base register. */
1537 if (reload_in_progress
1538 && GET_CODE (op) == PLUS
1539 && GET_CODE (XEXP (op, 0)) == PLUS)
1540 base = XEXP (XEXP (op, 0), 0);
1541 else
1542 {
1543 if (! memory_address_p (mode, op))
1544 return 0;
1545 base = (GET_CODE (op) == PLUS ? XEXP (op, 0) : op);
1546 }
1547
1548 return (GET_CODE (base) == REG && REGNO_POINTER_ALIGN (REGNO (base)) < 32);
1549 }
1550
1551 /* Return 1 if OP is either a register or an unaligned memory location. */
1552
1553 int
1554 reg_or_unaligned_mem_operand (op, mode)
1555 rtx op;
1556 enum machine_mode mode;
1557 {
1558 return register_operand (op, mode) || unaligned_memory_operand (op, mode);
1559 }
1560
1561 /* Return 1 if OP is any memory location. During reload a pseudo matches. */
1562
1563 int
1564 any_memory_operand (op, mode)
1565 register rtx op;
1566 enum machine_mode mode ATTRIBUTE_UNUSED;
1567 {
1568 return (GET_CODE (op) == MEM
1569 || (GET_CODE (op) == SUBREG && GET_CODE (SUBREG_REG (op)) == REG)
1570 || (reload_in_progress && GET_CODE (op) == REG
1571 && REGNO (op) >= FIRST_PSEUDO_REGISTER)
1572 || (reload_in_progress && GET_CODE (op) == SUBREG
1573 && GET_CODE (SUBREG_REG (op)) == REG
1574 && REGNO (SUBREG_REG (op)) >= FIRST_PSEUDO_REGISTER));
1575 }
1576
1577 /* Returns 1 if OP is not an eliminable register.
1578
1579 This exists to cure a pathological abort in the s8addq (et al) patterns,
1580
1581 long foo () { long t; bar(); return (long) &t * 26107; }
1582
1583 which run afoul of a hack in reload to cure a (presumably) similar
1584 problem with lea-type instructions on other targets. But there is
1585 one of us and many of them, so work around the problem by selectively
1586 preventing combine from making the optimization. */
1587
1588 int
1589 reg_not_elim_operand (op, mode)
1590 register rtx op;
1591 enum machine_mode mode;
1592 {
1593 rtx inner = op;
1594 if (GET_CODE (op) == SUBREG)
1595 inner = SUBREG_REG (op);
1596 if (inner == frame_pointer_rtx || inner == arg_pointer_rtx)
1597 return 0;
1598
1599 return register_operand (op, mode);
1600 }
1601
1602 /* Return 1 is OP is a memory location that is not a reference (using
1603 an AND) to an unaligned location. Take into account what reload
1604 will do. */
1605
1606 int
1607 normal_memory_operand (op, mode)
1608 register rtx op;
1609 enum machine_mode mode ATTRIBUTE_UNUSED;
1610 {
1611 if (reload_in_progress)
1612 {
1613 rtx tmp = op;
1614 if (GET_CODE (tmp) == SUBREG)
1615 tmp = SUBREG_REG (tmp);
1616 if (GET_CODE (tmp) == REG
1617 && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
1618 {
1619 op = reg_equiv_memory_loc[REGNO (tmp)];
1620
1621 /* This may not have been assigned an equivalent address if it will
1622 be eliminated. In that case, it doesn't matter what we do. */
1623 if (op == 0)
1624 return 1;
1625 }
1626 }
1627
1628 return GET_CODE (op) == MEM && GET_CODE (XEXP (op, 0)) != AND;
1629 }
1630
1631 /* Accept a register, but not a subreg of any kind. This allows us to
1632 avoid pathological cases in reload wrt data movement common in
1633 int->fp conversion. */
1634
1635 int
1636 reg_no_subreg_operand (op, mode)
1637 register rtx op;
1638 enum machine_mode mode;
1639 {
1640 if (GET_CODE (op) != REG)
1641 return 0;
1642 return register_operand (op, mode);
1643 }
1644
1645 /* Recognize an addition operation that includes a constant. Used to
1646 convince reload to canonize (plus (plus reg c1) c2) during register
1647 elimination. */
1648
1649 int
1650 addition_operation (op, mode)
1651 register rtx op;
1652 enum machine_mode mode;
1653 {
1654 if (GET_MODE (op) != mode && mode != VOIDmode)
1655 return 0;
1656 if (GET_CODE (op) == PLUS
1657 && register_operand (XEXP (op, 0), mode)
1658 && GET_CODE (XEXP (op, 1)) == CONST_INT
1659 && CONST_OK_FOR_LETTER_P (INTVAL (XEXP (op, 1)), 'K'))
1660 return 1;
1661 return 0;
1662 }
1663
1664 /* Implements CONST_OK_FOR_LETTER_P. Return true if the value matches
1665 the range defined for C in [I-P]. */
1666
1667 bool
1668 alpha_const_ok_for_letter_p (value, c)
1669 HOST_WIDE_INT value;
1670 int c;
1671 {
1672 switch (c)
1673 {
1674 case 'I':
1675 /* An unsigned 8 bit constant. */
1676 return (unsigned HOST_WIDE_INT) value < 0x100;
1677 case 'J':
1678 /* The constant zero. */
1679 return value == 0;
1680 case 'K':
1681 /* A signed 16 bit constant. */
1682 return (unsigned HOST_WIDE_INT) (value + 0x8000) < 0x10000;
1683 case 'L':
1684 /* A shifted signed 16 bit constant appropriate for LDAH. */
1685 return ((value & 0xffff) == 0
1686 && ((value) >> 31 == -1 || value >> 31 == 0));
1687 case 'M':
1688 /* A constant that can be AND'ed with using a ZAP insn. */
1689 return zap_mask (value);
1690 case 'N':
1691 /* A complemented unsigned 8 bit constant. */
1692 return (unsigned HOST_WIDE_INT) (~ value) < 0x100;
1693 case 'O':
1694 /* A negated unsigned 8 bit constant. */
1695 return (unsigned HOST_WIDE_INT) (- value) < 0x100;
1696 case 'P':
1697 /* The constant 1, 2 or 3. */
1698 return value == 1 || value == 2 || value == 3;
1699
1700 default:
1701 return false;
1702 }
1703 }
1704
1705 /* Implements CONST_DOUBLE_OK_FOR_LETTER_P. Return true if VALUE
1706 matches for C in [GH]. */
1707
1708 bool
1709 alpha_const_double_ok_for_letter_p (value, c)
1710 rtx value;
1711 int c;
1712 {
1713 switch (c)
1714 {
1715 case 'G':
1716 /* The floating point zero constant. */
1717 return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
1718 && value == CONST0_RTX (GET_MODE (value)));
1719
1720 case 'H':
1721 /* A valid operand of a ZAP insn. */
1722 return (GET_MODE (value) == VOIDmode
1723 && zap_mask (CONST_DOUBLE_LOW (value))
1724 && zap_mask (CONST_DOUBLE_HIGH (value)));
1725
1726 default:
1727 return false;
1728 }
1729 }
1730
1731 /* Implements CONST_DOUBLE_OK_FOR_LETTER_P. Return true if VALUE
1732 matches for C. */
1733
1734 bool
1735 alpha_extra_constraint (value, c)
1736 rtx value;
1737 int c;
1738 {
1739 switch (c)
1740 {
1741 case 'Q':
1742 return normal_memory_operand (value, VOIDmode);
1743 case 'R':
1744 return direct_call_operand (value, Pmode);
1745 case 'S':
1746 return (GET_CODE (value) == CONST_INT
1747 && (unsigned HOST_WIDE_INT) INTVAL (value) < 64);
1748 case 'T':
1749 return GET_CODE (value) == HIGH;
1750 case 'U':
1751 return TARGET_ABI_UNICOSMK && symbolic_operand (value, VOIDmode);
1752 case 'W':
1753 return (GET_CODE (value) == CONST_VECTOR
1754 && value == CONST0_RTX (GET_MODE (value)));
1755 default:
1756 return false;
1757 }
1758 }
1759
1760 /* Return 1 if this function can directly return via $26. */
1761
1762 int
1763 direct_return ()
1764 {
1765 return (! TARGET_ABI_OPEN_VMS && ! TARGET_ABI_UNICOSMK
1766 && reload_completed
1767 && alpha_sa_size () == 0
1768 && get_frame_size () == 0
1769 && current_function_outgoing_args_size == 0
1770 && current_function_pretend_args_size == 0);
1771 }
1772
1773 /* Return the ADDR_VEC associated with a tablejump insn. */
1774
1775 rtx
1776 alpha_tablejump_addr_vec (insn)
1777 rtx insn;
1778 {
1779 rtx tmp;
1780
1781 tmp = JUMP_LABEL (insn);
1782 if (!tmp)
1783 return NULL_RTX;
1784 tmp = NEXT_INSN (tmp);
1785 if (!tmp)
1786 return NULL_RTX;
1787 if (GET_CODE (tmp) == JUMP_INSN
1788 && GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC)
1789 return PATTERN (tmp);
1790 return NULL_RTX;
1791 }
1792
1793 /* Return the label of the predicted edge, or CONST0_RTX if we don't know. */
1794
1795 rtx
1796 alpha_tablejump_best_label (insn)
1797 rtx insn;
1798 {
1799 rtx jump_table = alpha_tablejump_addr_vec (insn);
1800 rtx best_label = NULL_RTX;
1801
1802 /* ??? Once the CFG doesn't keep getting completely rebuilt, look
1803 there for edge frequency counts from profile data. */
1804
1805 if (jump_table)
1806 {
1807 int n_labels = XVECLEN (jump_table, 1);
1808 int best_count = -1;
1809 int i, j;
1810
1811 for (i = 0; i < n_labels; i++)
1812 {
1813 int count = 1;
1814
1815 for (j = i + 1; j < n_labels; j++)
1816 if (XEXP (XVECEXP (jump_table, 1, i), 0)
1817 == XEXP (XVECEXP (jump_table, 1, j), 0))
1818 count++;
1819
1820 if (count > best_count)
1821 best_count = count, best_label = XVECEXP (jump_table, 1, i);
1822 }
1823 }
1824
1825 return best_label ? best_label : const0_rtx;
1826 }
1827
1828 /* Return the TLS model to use for SYMBOL. */
1829
1830 static enum tls_model
1831 tls_symbolic_operand_type (symbol)
1832 rtx symbol;
1833 {
1834 enum tls_model model;
1835
1836 if (GET_CODE (symbol) != SYMBOL_REF)
1837 return 0;
1838 model = SYMBOL_REF_TLS_MODEL (symbol);
1839
1840 /* Local-exec with a 64-bit size is the same code as initial-exec. */
1841 if (model == TLS_MODEL_LOCAL_EXEC && alpha_tls_size == 64)
1842 model = TLS_MODEL_INITIAL_EXEC;
1843
1844 return model;
1845 }
1846 \f
1847 /* Return true if the function DECL will share the same GP as any
1848 function in the current unit of translation. */
1849
1850 static bool
1851 decl_has_samegp (decl)
1852 tree decl;
1853 {
1854 /* Functions that are not local can be overridden, and thus may
1855 not share the same gp. */
1856 if (!(*targetm.binds_local_p) (decl))
1857 return false;
1858
1859 /* If -msmall-data is in effect, assume that there is only one GP
1860 for the module, and so any local symbol has this property. We
1861 need explicit relocations to be able to enforce this for symbols
1862 not defined in this unit of translation, however. */
1863 if (TARGET_EXPLICIT_RELOCS && TARGET_SMALL_DATA)
1864 return true;
1865
1866 /* Functions that are not external are defined in this UoT. */
1867 /* ??? Irritatingly, static functions not yet emitted are still
1868 marked "external". Apply this to non-static functions only. */
1869 return !TREE_PUBLIC (decl) || !DECL_EXTERNAL (decl);
1870 }
1871
1872 /* Return true if EXP should be placed in the small data section. */
1873
1874 static bool
1875 alpha_in_small_data_p (exp)
1876 tree exp;
1877 {
1878 /* We want to merge strings, so we never consider them small data. */
1879 if (TREE_CODE (exp) == STRING_CST)
1880 return false;
1881
1882 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
1883 {
1884 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
1885 if (strcmp (section, ".sdata") == 0
1886 || strcmp (section, ".sbss") == 0)
1887 return true;
1888 }
1889 else
1890 {
1891 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
1892
1893 /* If this is an incomplete type with size 0, then we can't put it
1894 in sdata because it might be too big when completed. */
1895 if (size > 0 && (unsigned HOST_WIDE_INT) size <= g_switch_value)
1896 return true;
1897 }
1898
1899 return false;
1900 }
1901
1902 #if TARGET_ABI_OPEN_VMS
1903 static bool
1904 alpha_linkage_symbol_p (symname)
1905 const char *symname;
1906 {
1907 int symlen = strlen (symname);
1908
1909 if (symlen > 4)
1910 return strcmp (&symname [symlen - 4], "..lk") == 0;
1911
1912 return false;
1913 }
1914
1915 #define LINKAGE_SYMBOL_REF_P(X) \
1916 ((GET_CODE (X) == SYMBOL_REF \
1917 && alpha_linkage_symbol_p (XSTR (X, 0))) \
1918 || (GET_CODE (X) == CONST \
1919 && GET_CODE (XEXP (X, 0)) == PLUS \
1920 && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \
1921 && alpha_linkage_symbol_p (XSTR (XEXP (XEXP (X, 0), 0), 0))))
1922 #endif
1923
1924 /* legitimate_address_p recognizes an RTL expression that is a valid
1925 memory address for an instruction. The MODE argument is the
1926 machine mode for the MEM expression that wants to use this address.
1927
1928 For Alpha, we have either a constant address or the sum of a
1929 register and a constant address, or just a register. For DImode,
1930 any of those forms can be surrounded with an AND that clear the
1931 low-order three bits; this is an "unaligned" access. */
1932
1933 bool
1934 alpha_legitimate_address_p (mode, x, strict)
1935 enum machine_mode mode;
1936 rtx x;
1937 int strict;
1938 {
1939 /* If this is an ldq_u type address, discard the outer AND. */
1940 if (mode == DImode
1941 && GET_CODE (x) == AND
1942 && GET_CODE (XEXP (x, 1)) == CONST_INT
1943 && INTVAL (XEXP (x, 1)) == -8)
1944 x = XEXP (x, 0);
1945
1946 /* Discard non-paradoxical subregs. */
1947 if (GET_CODE (x) == SUBREG
1948 && (GET_MODE_SIZE (GET_MODE (x))
1949 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
1950 x = SUBREG_REG (x);
1951
1952 /* Unadorned general registers are valid. */
1953 if (REG_P (x)
1954 && (strict
1955 ? STRICT_REG_OK_FOR_BASE_P (x)
1956 : NONSTRICT_REG_OK_FOR_BASE_P (x)))
1957 return true;
1958
1959 /* Constant addresses (i.e. +/- 32k) are valid. */
1960 if (CONSTANT_ADDRESS_P (x))
1961 return true;
1962
1963 #if TARGET_ABI_OPEN_VMS
1964 if (LINKAGE_SYMBOL_REF_P (x))
1965 return true;
1966 #endif
1967
1968 /* Register plus a small constant offset is valid. */
1969 if (GET_CODE (x) == PLUS)
1970 {
1971 rtx ofs = XEXP (x, 1);
1972 x = XEXP (x, 0);
1973
1974 /* Discard non-paradoxical subregs. */
1975 if (GET_CODE (x) == SUBREG
1976 && (GET_MODE_SIZE (GET_MODE (x))
1977 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
1978 x = SUBREG_REG (x);
1979
1980 if (REG_P (x))
1981 {
1982 if (! strict
1983 && NONSTRICT_REG_OK_FP_BASE_P (x)
1984 && GET_CODE (ofs) == CONST_INT)
1985 return true;
1986 if ((strict
1987 ? STRICT_REG_OK_FOR_BASE_P (x)
1988 : NONSTRICT_REG_OK_FOR_BASE_P (x))
1989 && CONSTANT_ADDRESS_P (ofs))
1990 return true;
1991 }
1992 else if (GET_CODE (x) == ADDRESSOF
1993 && GET_CODE (ofs) == CONST_INT)
1994 return true;
1995 }
1996
1997 /* If we're managing explicit relocations, LO_SUM is valid, as
1998 are small data symbols. */
1999 else if (TARGET_EXPLICIT_RELOCS)
2000 {
2001 if (small_symbolic_operand (x, Pmode))
2002 return true;
2003
2004 if (GET_CODE (x) == LO_SUM)
2005 {
2006 rtx ofs = XEXP (x, 1);
2007 x = XEXP (x, 0);
2008
2009 /* Discard non-paradoxical subregs. */
2010 if (GET_CODE (x) == SUBREG
2011 && (GET_MODE_SIZE (GET_MODE (x))
2012 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
2013 x = SUBREG_REG (x);
2014
2015 /* Must have a valid base register. */
2016 if (! (REG_P (x)
2017 && (strict
2018 ? STRICT_REG_OK_FOR_BASE_P (x)
2019 : NONSTRICT_REG_OK_FOR_BASE_P (x))))
2020 return false;
2021
2022 /* The symbol must be local. */
2023 if (local_symbolic_operand (ofs, Pmode)
2024 || dtp32_symbolic_operand (ofs, Pmode)
2025 || tp32_symbolic_operand (ofs, Pmode))
2026 return true;
2027 }
2028 }
2029
2030 return false;
2031 }
2032
2033 /* Build the SYMBOL_REF for __tls_get_addr. */
2034
2035 static GTY(()) rtx tls_get_addr_libfunc;
2036
2037 static rtx
2038 get_tls_get_addr ()
2039 {
2040 if (!tls_get_addr_libfunc)
2041 tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
2042 return tls_get_addr_libfunc;
2043 }
2044
2045 /* Try machine-dependent ways of modifying an illegitimate address
2046 to be legitimate. If we find one, return the new, valid address. */
2047
2048 rtx
2049 alpha_legitimize_address (x, scratch, mode)
2050 rtx x;
2051 rtx scratch;
2052 enum machine_mode mode ATTRIBUTE_UNUSED;
2053 {
2054 HOST_WIDE_INT addend;
2055
2056 /* If the address is (plus reg const_int) and the CONST_INT is not a
2057 valid offset, compute the high part of the constant and add it to
2058 the register. Then our address is (plus temp low-part-const). */
2059 if (GET_CODE (x) == PLUS
2060 && GET_CODE (XEXP (x, 0)) == REG
2061 && GET_CODE (XEXP (x, 1)) == CONST_INT
2062 && ! CONSTANT_ADDRESS_P (XEXP (x, 1)))
2063 {
2064 addend = INTVAL (XEXP (x, 1));
2065 x = XEXP (x, 0);
2066 goto split_addend;
2067 }
2068
2069 /* If the address is (const (plus FOO const_int)), find the low-order
2070 part of the CONST_INT. Then load FOO plus any high-order part of the
2071 CONST_INT into a register. Our address is (plus reg low-part-const).
2072 This is done to reduce the number of GOT entries. */
2073 if (!no_new_pseudos
2074 && GET_CODE (x) == CONST
2075 && GET_CODE (XEXP (x, 0)) == PLUS
2076 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
2077 {
2078 addend = INTVAL (XEXP (XEXP (x, 0), 1));
2079 x = force_reg (Pmode, XEXP (XEXP (x, 0), 0));
2080 goto split_addend;
2081 }
2082
2083 /* If we have a (plus reg const), emit the load as in (2), then add
2084 the two registers, and finally generate (plus reg low-part-const) as
2085 our address. */
2086 if (!no_new_pseudos
2087 && GET_CODE (x) == PLUS
2088 && GET_CODE (XEXP (x, 0)) == REG
2089 && GET_CODE (XEXP (x, 1)) == CONST
2090 && GET_CODE (XEXP (XEXP (x, 1), 0)) == PLUS
2091 && GET_CODE (XEXP (XEXP (XEXP (x, 1), 0), 1)) == CONST_INT)
2092 {
2093 addend = INTVAL (XEXP (XEXP (XEXP (x, 1), 0), 1));
2094 x = expand_simple_binop (Pmode, PLUS, XEXP (x, 0),
2095 XEXP (XEXP (XEXP (x, 1), 0), 0),
2096 NULL_RTX, 1, OPTAB_LIB_WIDEN);
2097 goto split_addend;
2098 }
2099
2100 /* If this is a local symbol, split the address into HIGH/LO_SUM parts. */
2101 if (TARGET_EXPLICIT_RELOCS && symbolic_operand (x, Pmode))
2102 {
2103 rtx r0, r16, eqv, tga, tp, insn, dest, seq;
2104
2105 switch (tls_symbolic_operand_type (x))
2106 {
2107 case TLS_MODEL_GLOBAL_DYNAMIC:
2108 start_sequence ();
2109
2110 r0 = gen_rtx_REG (Pmode, 0);
2111 r16 = gen_rtx_REG (Pmode, 16);
2112 tga = get_tls_get_addr ();
2113 dest = gen_reg_rtx (Pmode);
2114 seq = GEN_INT (alpha_next_sequence_number++);
2115
2116 emit_insn (gen_movdi_er_tlsgd (r16, pic_offset_table_rtx, x, seq));
2117 insn = gen_call_value_osf_tlsgd (r0, tga, seq);
2118 insn = emit_call_insn (insn);
2119 CONST_OR_PURE_CALL_P (insn) = 1;
2120 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
2121
2122 insn = get_insns ();
2123 end_sequence ();
2124
2125 emit_libcall_block (insn, dest, r0, x);
2126 return dest;
2127
2128 case TLS_MODEL_LOCAL_DYNAMIC:
2129 start_sequence ();
2130
2131 r0 = gen_rtx_REG (Pmode, 0);
2132 r16 = gen_rtx_REG (Pmode, 16);
2133 tga = get_tls_get_addr ();
2134 scratch = gen_reg_rtx (Pmode);
2135 seq = GEN_INT (alpha_next_sequence_number++);
2136
2137 emit_insn (gen_movdi_er_tlsldm (r16, pic_offset_table_rtx, seq));
2138 insn = gen_call_value_osf_tlsldm (r0, tga, seq);
2139 insn = emit_call_insn (insn);
2140 CONST_OR_PURE_CALL_P (insn) = 1;
2141 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
2142
2143 insn = get_insns ();
2144 end_sequence ();
2145
2146 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
2147 UNSPEC_TLSLDM_CALL);
2148 emit_libcall_block (insn, scratch, r0, eqv);
2149
2150 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPREL);
2151 eqv = gen_rtx_CONST (Pmode, eqv);
2152
2153 if (alpha_tls_size == 64)
2154 {
2155 dest = gen_reg_rtx (Pmode);
2156 emit_insn (gen_rtx_SET (VOIDmode, dest, eqv));
2157 emit_insn (gen_adddi3 (dest, dest, scratch));
2158 return dest;
2159 }
2160 if (alpha_tls_size == 32)
2161 {
2162 insn = gen_rtx_HIGH (Pmode, eqv);
2163 insn = gen_rtx_PLUS (Pmode, scratch, insn);
2164 scratch = gen_reg_rtx (Pmode);
2165 emit_insn (gen_rtx_SET (VOIDmode, scratch, insn));
2166 }
2167 return gen_rtx_LO_SUM (Pmode, scratch, eqv);
2168
2169 case TLS_MODEL_INITIAL_EXEC:
2170 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
2171 eqv = gen_rtx_CONST (Pmode, eqv);
2172 tp = gen_reg_rtx (Pmode);
2173 scratch = gen_reg_rtx (Pmode);
2174 dest = gen_reg_rtx (Pmode);
2175
2176 emit_insn (gen_load_tp (tp));
2177 emit_insn (gen_rtx_SET (VOIDmode, scratch, eqv));
2178 emit_insn (gen_adddi3 (dest, tp, scratch));
2179 return dest;
2180
2181 case TLS_MODEL_LOCAL_EXEC:
2182 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
2183 eqv = gen_rtx_CONST (Pmode, eqv);
2184 tp = gen_reg_rtx (Pmode);
2185
2186 emit_insn (gen_load_tp (tp));
2187 if (alpha_tls_size == 32)
2188 {
2189 insn = gen_rtx_HIGH (Pmode, eqv);
2190 insn = gen_rtx_PLUS (Pmode, tp, insn);
2191 tp = gen_reg_rtx (Pmode);
2192 emit_insn (gen_rtx_SET (VOIDmode, tp, insn));
2193 }
2194 return gen_rtx_LO_SUM (Pmode, tp, eqv);
2195 }
2196
2197 if (local_symbolic_operand (x, Pmode))
2198 {
2199 if (small_symbolic_operand (x, Pmode))
2200 return x;
2201 else
2202 {
2203 if (!no_new_pseudos)
2204 scratch = gen_reg_rtx (Pmode);
2205 emit_insn (gen_rtx_SET (VOIDmode, scratch,
2206 gen_rtx_HIGH (Pmode, x)));
2207 return gen_rtx_LO_SUM (Pmode, scratch, x);
2208 }
2209 }
2210 }
2211
2212 return NULL;
2213
2214 split_addend:
2215 {
2216 HOST_WIDE_INT low, high;
2217
2218 low = ((addend & 0xffff) ^ 0x8000) - 0x8000;
2219 addend -= low;
2220 high = ((addend & 0xffffffff) ^ 0x80000000) - 0x80000000;
2221 addend -= high;
2222
2223 if (addend)
2224 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (addend),
2225 (no_new_pseudos ? scratch : NULL_RTX),
2226 1, OPTAB_LIB_WIDEN);
2227 if (high)
2228 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (high),
2229 (no_new_pseudos ? scratch : NULL_RTX),
2230 1, OPTAB_LIB_WIDEN);
2231
2232 return plus_constant (x, low);
2233 }
2234 }
2235
2236 /* We do not allow indirect calls to be optimized into sibling calls, nor
2237 can we allow a call to a function with a different GP to be optimized
2238 into a sibcall. */
2239
2240 static bool
2241 alpha_function_ok_for_sibcall (decl, exp)
2242 tree decl;
2243 tree exp ATTRIBUTE_UNUSED;
2244 {
2245 /* Can't do indirect tail calls, since we don't know if the target
2246 uses the same GP. */
2247 if (!decl)
2248 return false;
2249
2250 /* Otherwise, we can make a tail call if the target function shares
2251 the same GP. */
2252 return decl_has_samegp (decl);
2253 }
2254
2255 /* For TARGET_EXPLICIT_RELOCS, we don't obfuscate a SYMBOL_REF to a
2256 small symbolic operand until after reload. At which point we need
2257 to replace (mem (symbol_ref)) with (mem (lo_sum $29 symbol_ref))
2258 so that sched2 has the proper dependency information. */
2259
2260 int
2261 some_small_symbolic_operand (x, mode)
2262 rtx x;
2263 enum machine_mode mode ATTRIBUTE_UNUSED;
2264 {
2265 return for_each_rtx (&x, some_small_symbolic_operand_1, NULL);
2266 }
2267
2268 static int
2269 some_small_symbolic_operand_1 (px, data)
2270 rtx *px;
2271 void *data ATTRIBUTE_UNUSED;
2272 {
2273 rtx x = *px;
2274
2275 /* Don't re-split. */
2276 if (GET_CODE (x) == LO_SUM)
2277 return -1;
2278
2279 return small_symbolic_operand (x, Pmode) != 0;
2280 }
2281
2282 rtx
2283 split_small_symbolic_operand (x)
2284 rtx x;
2285 {
2286 x = copy_insn (x);
2287 for_each_rtx (&x, split_small_symbolic_operand_1, NULL);
2288 return x;
2289 }
2290
2291 static int
2292 split_small_symbolic_operand_1 (px, data)
2293 rtx *px;
2294 void *data ATTRIBUTE_UNUSED;
2295 {
2296 rtx x = *px;
2297
2298 /* Don't re-split. */
2299 if (GET_CODE (x) == LO_SUM)
2300 return -1;
2301
2302 if (small_symbolic_operand (x, Pmode))
2303 {
2304 x = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, x);
2305 *px = x;
2306 return -1;
2307 }
2308
2309 return 0;
2310 }
2311
2312 /* Indicate that INSN cannot be duplicated. This is true for any insn
2313 that we've marked with gpdisp relocs, since those have to stay in
2314 1-1 correspondence with one another.
2315
2316 Techinically we could copy them if we could set up a mapping from one
2317 sequence number to another, across the set of insns to be duplicated.
2318 This seems overly complicated and error-prone since interblock motion
2319 from sched-ebb could move one of the pair of insns to a different block. */
2320
2321 static bool
2322 alpha_cannot_copy_insn_p (insn)
2323 rtx insn;
2324 {
2325 rtx pat;
2326
2327 if (!reload_completed || !TARGET_EXPLICIT_RELOCS)
2328 return false;
2329
2330 if (GET_CODE (insn) != INSN)
2331 return false;
2332 if (asm_noperands (insn) >= 0)
2333 return false;
2334
2335 pat = PATTERN (insn);
2336 if (GET_CODE (pat) != SET)
2337 return false;
2338 pat = SET_SRC (pat);
2339 if (GET_CODE (pat) == UNSPEC_VOLATILE)
2340 {
2341 if (XINT (pat, 1) == UNSPECV_LDGP1
2342 || XINT (pat, 1) == UNSPECV_PLDGP2)
2343 return true;
2344 }
2345 else if (GET_CODE (pat) == UNSPEC)
2346 {
2347 if (XINT (pat, 1) == UNSPEC_LDGP2)
2348 return true;
2349 }
2350
2351 return false;
2352 }
2353
2354
2355 /* Try a machine-dependent way of reloading an illegitimate address
2356 operand. If we find one, push the reload and return the new rtx. */
2357
2358 rtx
2359 alpha_legitimize_reload_address (x, mode, opnum, type, ind_levels)
2360 rtx x;
2361 enum machine_mode mode ATTRIBUTE_UNUSED;
2362 int opnum;
2363 int type;
2364 int ind_levels ATTRIBUTE_UNUSED;
2365 {
2366 /* We must recognize output that we have already generated ourselves. */
2367 if (GET_CODE (x) == PLUS
2368 && GET_CODE (XEXP (x, 0)) == PLUS
2369 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
2370 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
2371 && GET_CODE (XEXP (x, 1)) == CONST_INT)
2372 {
2373 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
2374 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
2375 opnum, type);
2376 return x;
2377 }
2378
2379 /* We wish to handle large displacements off a base register by
2380 splitting the addend across an ldah and the mem insn. This
2381 cuts number of extra insns needed from 3 to 1. */
2382 if (GET_CODE (x) == PLUS
2383 && GET_CODE (XEXP (x, 0)) == REG
2384 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
2385 && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x, 0)))
2386 && GET_CODE (XEXP (x, 1)) == CONST_INT)
2387 {
2388 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
2389 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
2390 HOST_WIDE_INT high
2391 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
2392
2393 /* Check for 32-bit overflow. */
2394 if (high + low != val)
2395 return NULL_RTX;
2396
2397 /* Reload the high part into a base reg; leave the low part
2398 in the mem directly. */
2399 x = gen_rtx_PLUS (GET_MODE (x),
2400 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
2401 GEN_INT (high)),
2402 GEN_INT (low));
2403
2404 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
2405 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
2406 opnum, type);
2407 return x;
2408 }
2409
2410 return NULL_RTX;
2411 }
2412 \f
2413 /* Compute a (partial) cost for rtx X. Return true if the complete
2414 cost has been computed, and false if subexpressions should be
2415 scanned. In either case, *TOTAL contains the cost result. */
2416
2417 static bool
2418 alpha_rtx_costs (x, code, outer_code, total)
2419 rtx x;
2420 int code, outer_code;
2421 int *total;
2422 {
2423 enum machine_mode mode = GET_MODE (x);
2424 bool float_mode_p = FLOAT_MODE_P (mode);
2425
2426 switch (code)
2427 {
2428 /* If this is an 8-bit constant, return zero since it can be used
2429 nearly anywhere with no cost. If it is a valid operand for an
2430 ADD or AND, likewise return 0 if we know it will be used in that
2431 context. Otherwise, return 2 since it might be used there later.
2432 All other constants take at least two insns. */
2433 case CONST_INT:
2434 if (INTVAL (x) >= 0 && INTVAL (x) < 256)
2435 {
2436 *total = 0;
2437 return true;
2438 }
2439 /* FALLTHRU */
2440
2441 case CONST_DOUBLE:
2442 if (x == CONST0_RTX (mode))
2443 *total = 0;
2444 else if ((outer_code == PLUS && add_operand (x, VOIDmode))
2445 || (outer_code == AND && and_operand (x, VOIDmode)))
2446 *total = 0;
2447 else if (add_operand (x, VOIDmode) || and_operand (x, VOIDmode))
2448 *total = 2;
2449 else
2450 *total = COSTS_N_INSNS (2);
2451 return true;
2452
2453 case CONST:
2454 case SYMBOL_REF:
2455 case LABEL_REF:
2456 if (TARGET_EXPLICIT_RELOCS && small_symbolic_operand (x, VOIDmode))
2457 *total = COSTS_N_INSNS (outer_code != MEM);
2458 else if (TARGET_EXPLICIT_RELOCS && local_symbolic_operand (x, VOIDmode))
2459 *total = COSTS_N_INSNS (1 + (outer_code != MEM));
2460 else if (tls_symbolic_operand_type (x))
2461 /* Estimate of cost for call_pal rduniq. */
2462 *total = COSTS_N_INSNS (15);
2463 else
2464 /* Otherwise we do a load from the GOT. */
2465 *total = COSTS_N_INSNS (alpha_memory_latency);
2466 return true;
2467
2468 case PLUS:
2469 case MINUS:
2470 if (float_mode_p)
2471 *total = alpha_rtx_cost_data[alpha_cpu].fp_add;
2472 else if (GET_CODE (XEXP (x, 0)) == MULT
2473 && const48_operand (XEXP (XEXP (x, 0), 1), VOIDmode))
2474 {
2475 *total = (rtx_cost (XEXP (XEXP (x, 0), 0), outer_code)
2476 + rtx_cost (XEXP (x, 1), outer_code) + 2);
2477 return true;
2478 }
2479 return false;
2480
2481 case MULT:
2482 if (float_mode_p)
2483 *total = alpha_rtx_cost_data[alpha_cpu].fp_mult;
2484 else if (mode == DImode)
2485 *total = alpha_rtx_cost_data[alpha_cpu].int_mult_di;
2486 else
2487 *total = alpha_rtx_cost_data[alpha_cpu].int_mult_si;
2488 return false;
2489
2490 case ASHIFT:
2491 if (GET_CODE (XEXP (x, 1)) == CONST_INT
2492 && INTVAL (XEXP (x, 1)) <= 3)
2493 {
2494 *total = COSTS_N_INSNS (1);
2495 return false;
2496 }
2497 /* FALLTHRU */
2498
2499 case ASHIFTRT:
2500 case LSHIFTRT:
2501 *total = alpha_rtx_cost_data[alpha_cpu].int_shift;
2502 return false;
2503
2504 case IF_THEN_ELSE:
2505 if (float_mode_p)
2506 *total = alpha_rtx_cost_data[alpha_cpu].fp_add;
2507 else
2508 *total = alpha_rtx_cost_data[alpha_cpu].int_cmov;
2509 return false;
2510
2511 case DIV:
2512 case UDIV:
2513 case MOD:
2514 case UMOD:
2515 if (!float_mode_p)
2516 *total = COSTS_N_INSNS (70); /* ??? */
2517 else if (mode == SFmode)
2518 *total = alpha_rtx_cost_data[alpha_cpu].fp_div_sf;
2519 else
2520 *total = alpha_rtx_cost_data[alpha_cpu].fp_div_df;
2521 return false;
2522
2523 case MEM:
2524 *total = COSTS_N_INSNS (alpha_memory_latency);
2525 return true;
2526
2527 case NEG:
2528 if (! float_mode_p)
2529 {
2530 *total = COSTS_N_INSNS (1);
2531 return false;
2532 }
2533 /* FALLTHRU */
2534
2535 case ABS:
2536 if (! float_mode_p)
2537 {
2538 *total = COSTS_N_INSNS (1) + alpha_rtx_cost_data[alpha_cpu].int_cmov;
2539 return false;
2540 }
2541 /* FALLTHRU */
2542
2543 case FLOAT:
2544 case UNSIGNED_FLOAT:
2545 case FIX:
2546 case UNSIGNED_FIX:
2547 case FLOAT_EXTEND:
2548 case FLOAT_TRUNCATE:
2549 *total = alpha_rtx_cost_data[alpha_cpu].fp_add;
2550 return false;
2551
2552 default:
2553 return false;
2554 }
2555 }
2556 \f
2557 /* REF is an alignable memory location. Place an aligned SImode
2558 reference into *PALIGNED_MEM and the number of bits to shift into
2559 *PBITNUM. SCRATCH is a free register for use in reloading out
2560 of range stack slots. */
2561
2562 void
2563 get_aligned_mem (ref, paligned_mem, pbitnum)
2564 rtx ref;
2565 rtx *paligned_mem, *pbitnum;
2566 {
2567 rtx base;
2568 HOST_WIDE_INT offset = 0;
2569
2570 if (GET_CODE (ref) != MEM)
2571 abort ();
2572
2573 if (reload_in_progress
2574 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
2575 {
2576 base = find_replacement (&XEXP (ref, 0));
2577
2578 if (! memory_address_p (GET_MODE (ref), base))
2579 abort ();
2580 }
2581 else
2582 {
2583 base = XEXP (ref, 0);
2584 }
2585
2586 if (GET_CODE (base) == PLUS)
2587 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
2588
2589 *paligned_mem
2590 = widen_memory_access (ref, SImode, (offset & ~3) - offset);
2591
2592 if (WORDS_BIG_ENDIAN)
2593 *pbitnum = GEN_INT (32 - (GET_MODE_BITSIZE (GET_MODE (ref))
2594 + (offset & 3) * 8));
2595 else
2596 *pbitnum = GEN_INT ((offset & 3) * 8);
2597 }
2598
2599 /* Similar, but just get the address. Handle the two reload cases.
2600 Add EXTRA_OFFSET to the address we return. */
2601
2602 rtx
2603 get_unaligned_address (ref, extra_offset)
2604 rtx ref;
2605 int extra_offset;
2606 {
2607 rtx base;
2608 HOST_WIDE_INT offset = 0;
2609
2610 if (GET_CODE (ref) != MEM)
2611 abort ();
2612
2613 if (reload_in_progress
2614 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
2615 {
2616 base = find_replacement (&XEXP (ref, 0));
2617
2618 if (! memory_address_p (GET_MODE (ref), base))
2619 abort ();
2620 }
2621 else
2622 {
2623 base = XEXP (ref, 0);
2624 }
2625
2626 if (GET_CODE (base) == PLUS)
2627 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
2628
2629 return plus_constant (base, offset + extra_offset);
2630 }
2631
2632 /* On the Alpha, all (non-symbolic) constants except zero go into
2633 a floating-point register via memory. Note that we cannot
2634 return anything that is not a subset of CLASS, and that some
2635 symbolic constants cannot be dropped to memory. */
2636
2637 enum reg_class
2638 alpha_preferred_reload_class(x, class)
2639 rtx x;
2640 enum reg_class class;
2641 {
2642 /* Zero is present in any register class. */
2643 if (x == CONST0_RTX (GET_MODE (x)))
2644 return class;
2645
2646 /* These sorts of constants we can easily drop to memory. */
2647 if (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE)
2648 {
2649 if (class == FLOAT_REGS)
2650 return NO_REGS;
2651 if (class == ALL_REGS)
2652 return GENERAL_REGS;
2653 return class;
2654 }
2655
2656 /* All other kinds of constants should not (and in the case of HIGH
2657 cannot) be dropped to memory -- instead we use a GENERAL_REGS
2658 secondary reload. */
2659 if (CONSTANT_P (x))
2660 return (class == ALL_REGS ? GENERAL_REGS : class);
2661
2662 return class;
2663 }
2664
2665 /* Loading and storing HImode or QImode values to and from memory
2666 usually requires a scratch register. The exceptions are loading
2667 QImode and HImode from an aligned address to a general register
2668 unless byte instructions are permitted.
2669
2670 We also cannot load an unaligned address or a paradoxical SUBREG
2671 into an FP register.
2672
2673 We also cannot do integral arithmetic into FP regs, as might result
2674 from register elimination into a DImode fp register. */
2675
2676 enum reg_class
2677 secondary_reload_class (class, mode, x, in)
2678 enum reg_class class;
2679 enum machine_mode mode;
2680 rtx x;
2681 int in;
2682 {
2683 if ((mode == QImode || mode == HImode) && ! TARGET_BWX)
2684 {
2685 if (GET_CODE (x) == MEM
2686 || (GET_CODE (x) == REG && REGNO (x) >= FIRST_PSEUDO_REGISTER)
2687 || (GET_CODE (x) == SUBREG
2688 && (GET_CODE (SUBREG_REG (x)) == MEM
2689 || (GET_CODE (SUBREG_REG (x)) == REG
2690 && REGNO (SUBREG_REG (x)) >= FIRST_PSEUDO_REGISTER))))
2691 {
2692 if (!in || !aligned_memory_operand(x, mode))
2693 return GENERAL_REGS;
2694 }
2695 }
2696
2697 if (class == FLOAT_REGS)
2698 {
2699 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == AND)
2700 return GENERAL_REGS;
2701
2702 if (GET_CODE (x) == SUBREG
2703 && (GET_MODE_SIZE (GET_MODE (x))
2704 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
2705 return GENERAL_REGS;
2706
2707 if (in && INTEGRAL_MODE_P (mode)
2708 && ! (memory_operand (x, mode) || x == const0_rtx))
2709 return GENERAL_REGS;
2710 }
2711
2712 return NO_REGS;
2713 }
2714 \f
2715 /* Subfunction of the following function. Update the flags of any MEM
2716 found in part of X. */
2717
2718 static void
2719 alpha_set_memflags_1 (x, in_struct_p, volatile_p, unchanging_p)
2720 rtx x;
2721 int in_struct_p, volatile_p, unchanging_p;
2722 {
2723 int i;
2724
2725 switch (GET_CODE (x))
2726 {
2727 case SEQUENCE:
2728 abort ();
2729
2730 case PARALLEL:
2731 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
2732 alpha_set_memflags_1 (XVECEXP (x, 0, i), in_struct_p, volatile_p,
2733 unchanging_p);
2734 break;
2735
2736 case INSN:
2737 alpha_set_memflags_1 (PATTERN (x), in_struct_p, volatile_p,
2738 unchanging_p);
2739 break;
2740
2741 case SET:
2742 alpha_set_memflags_1 (SET_DEST (x), in_struct_p, volatile_p,
2743 unchanging_p);
2744 alpha_set_memflags_1 (SET_SRC (x), in_struct_p, volatile_p,
2745 unchanging_p);
2746 break;
2747
2748 case MEM:
2749 MEM_IN_STRUCT_P (x) = in_struct_p;
2750 MEM_VOLATILE_P (x) = volatile_p;
2751 RTX_UNCHANGING_P (x) = unchanging_p;
2752 /* Sadly, we cannot use alias sets because the extra aliasing
2753 produced by the AND interferes. Given that two-byte quantities
2754 are the only thing we would be able to differentiate anyway,
2755 there does not seem to be any point in convoluting the early
2756 out of the alias check. */
2757 break;
2758
2759 default:
2760 break;
2761 }
2762 }
2763
2764 /* Given INSN, which is an INSN list or the PATTERN of a single insn
2765 generated to perform a memory operation, look for any MEMs in either
2766 a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and
2767 volatile flags from REF into each of the MEMs found. If REF is not
2768 a MEM, don't do anything. */
2769
2770 void
2771 alpha_set_memflags (insn, ref)
2772 rtx insn;
2773 rtx ref;
2774 {
2775 int in_struct_p, volatile_p, unchanging_p;
2776
2777 if (GET_CODE (ref) != MEM)
2778 return;
2779
2780 in_struct_p = MEM_IN_STRUCT_P (ref);
2781 volatile_p = MEM_VOLATILE_P (ref);
2782 unchanging_p = RTX_UNCHANGING_P (ref);
2783
2784 /* This is only called from alpha.md, after having had something
2785 generated from one of the insn patterns. So if everything is
2786 zero, the pattern is already up-to-date. */
2787 if (! in_struct_p && ! volatile_p && ! unchanging_p)
2788 return;
2789
2790 alpha_set_memflags_1 (insn, in_struct_p, volatile_p, unchanging_p);
2791 }
2792 \f
2793 /* Try to output insns to set TARGET equal to the constant C if it can be
2794 done in less than N insns. Do all computations in MODE. Returns the place
2795 where the output has been placed if it can be done and the insns have been
2796 emitted. If it would take more than N insns, zero is returned and no
2797 insns and emitted. */
2798
2799 rtx
2800 alpha_emit_set_const (target, mode, c, n)
2801 rtx target;
2802 enum machine_mode mode;
2803 HOST_WIDE_INT c;
2804 int n;
2805 {
2806 rtx result = 0;
2807 rtx orig_target = target;
2808 int i;
2809
2810 /* If we can't make any pseudos, TARGET is an SImode hard register, we
2811 can't load this constant in one insn, do this in DImode. */
2812 if (no_new_pseudos && mode == SImode
2813 && GET_CODE (target) == REG && REGNO (target) < FIRST_PSEUDO_REGISTER
2814 && (result = alpha_emit_set_const_1 (target, mode, c, 1)) == 0)
2815 {
2816 target = gen_lowpart (DImode, target);
2817 mode = DImode;
2818 }
2819
2820 /* Try 1 insn, then 2, then up to N. */
2821 for (i = 1; i <= n; i++)
2822 {
2823 result = alpha_emit_set_const_1 (target, mode, c, i);
2824 if (result)
2825 {
2826 rtx insn = get_last_insn ();
2827 rtx set = single_set (insn);
2828 if (! CONSTANT_P (SET_SRC (set)))
2829 set_unique_reg_note (get_last_insn (), REG_EQUAL, GEN_INT (c));
2830 break;
2831 }
2832 }
2833
2834 /* Allow for the case where we changed the mode of TARGET. */
2835 if (result == target)
2836 result = orig_target;
2837
2838 return result;
2839 }
2840
2841 /* Internal routine for the above to check for N or below insns. */
2842
2843 static rtx
2844 alpha_emit_set_const_1 (target, mode, c, n)
2845 rtx target;
2846 enum machine_mode mode;
2847 HOST_WIDE_INT c;
2848 int n;
2849 {
2850 HOST_WIDE_INT new;
2851 int i, bits;
2852 /* Use a pseudo if highly optimizing and still generating RTL. */
2853 rtx subtarget
2854 = (flag_expensive_optimizations && !no_new_pseudos ? 0 : target);
2855 rtx temp, insn;
2856
2857 /* If this is a sign-extended 32-bit constant, we can do this in at most
2858 three insns, so do it if we have enough insns left. We always have
2859 a sign-extended 32-bit constant when compiling on a narrow machine. */
2860
2861 if (HOST_BITS_PER_WIDE_INT != 64
2862 || c >> 31 == -1 || c >> 31 == 0)
2863 {
2864 HOST_WIDE_INT low = ((c & 0xffff) ^ 0x8000) - 0x8000;
2865 HOST_WIDE_INT tmp1 = c - low;
2866 HOST_WIDE_INT high = (((tmp1 >> 16) & 0xffff) ^ 0x8000) - 0x8000;
2867 HOST_WIDE_INT extra = 0;
2868
2869 /* If HIGH will be interpreted as negative but the constant is
2870 positive, we must adjust it to do two ldha insns. */
2871
2872 if ((high & 0x8000) != 0 && c >= 0)
2873 {
2874 extra = 0x4000;
2875 tmp1 -= 0x40000000;
2876 high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
2877 }
2878
2879 if (c == low || (low == 0 && extra == 0))
2880 {
2881 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
2882 but that meant that we can't handle INT_MIN on 32-bit machines
2883 (like NT/Alpha), because we recurse indefinitely through
2884 emit_move_insn to gen_movdi. So instead, since we know exactly
2885 what we want, create it explicitly. */
2886
2887 if (target == NULL)
2888 target = gen_reg_rtx (mode);
2889 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (c)));
2890 return target;
2891 }
2892 else if (n >= 2 + (extra != 0))
2893 {
2894 temp = copy_to_suggested_reg (GEN_INT (high << 16), subtarget, mode);
2895
2896 /* As of 2002-02-23, addsi3 is only available when not optimizing.
2897 This means that if we go through expand_binop, we'll try to
2898 generate extensions, etc, which will require new pseudos, which
2899 will fail during some split phases. The SImode add patterns
2900 still exist, but are not named. So build the insns by hand. */
2901
2902 if (extra != 0)
2903 {
2904 if (! subtarget)
2905 subtarget = gen_reg_rtx (mode);
2906 insn = gen_rtx_PLUS (mode, temp, GEN_INT (extra << 16));
2907 insn = gen_rtx_SET (VOIDmode, subtarget, insn);
2908 emit_insn (insn);
2909 temp = subtarget;
2910 }
2911
2912 if (target == NULL)
2913 target = gen_reg_rtx (mode);
2914 insn = gen_rtx_PLUS (mode, temp, GEN_INT (low));
2915 insn = gen_rtx_SET (VOIDmode, target, insn);
2916 emit_insn (insn);
2917 return target;
2918 }
2919 }
2920
2921 /* If we couldn't do it that way, try some other methods. But if we have
2922 no instructions left, don't bother. Likewise, if this is SImode and
2923 we can't make pseudos, we can't do anything since the expand_binop
2924 and expand_unop calls will widen and try to make pseudos. */
2925
2926 if (n == 1 || (mode == SImode && no_new_pseudos))
2927 return 0;
2928
2929 /* Next, see if we can load a related constant and then shift and possibly
2930 negate it to get the constant we want. Try this once each increasing
2931 numbers of insns. */
2932
2933 for (i = 1; i < n; i++)
2934 {
2935 /* First, see if minus some low bits, we've an easy load of
2936 high bits. */
2937
2938 new = ((c & 0xffff) ^ 0x8000) - 0x8000;
2939 if (new != 0
2940 && (temp = alpha_emit_set_const (subtarget, mode, c - new, i)) != 0)
2941 return expand_binop (mode, add_optab, temp, GEN_INT (new),
2942 target, 0, OPTAB_WIDEN);
2943
2944 /* Next try complementing. */
2945 if ((temp = alpha_emit_set_const (subtarget, mode, ~ c, i)) != 0)
2946 return expand_unop (mode, one_cmpl_optab, temp, target, 0);
2947
2948 /* Next try to form a constant and do a left shift. We can do this
2949 if some low-order bits are zero; the exact_log2 call below tells
2950 us that information. The bits we are shifting out could be any
2951 value, but here we'll just try the 0- and sign-extended forms of
2952 the constant. To try to increase the chance of having the same
2953 constant in more than one insn, start at the highest number of
2954 bits to shift, but try all possibilities in case a ZAPNOT will
2955 be useful. */
2956
2957 if ((bits = exact_log2 (c & - c)) > 0)
2958 for (; bits > 0; bits--)
2959 if ((temp = (alpha_emit_set_const
2960 (subtarget, mode, c >> bits, i))) != 0
2961 || ((temp = (alpha_emit_set_const
2962 (subtarget, mode,
2963 ((unsigned HOST_WIDE_INT) c) >> bits, i)))
2964 != 0))
2965 return expand_binop (mode, ashl_optab, temp, GEN_INT (bits),
2966 target, 0, OPTAB_WIDEN);
2967
2968 /* Now try high-order zero bits. Here we try the shifted-in bits as
2969 all zero and all ones. Be careful to avoid shifting outside the
2970 mode and to avoid shifting outside the host wide int size. */
2971 /* On narrow hosts, don't shift a 1 into the high bit, since we'll
2972 confuse the recursive call and set all of the high 32 bits. */
2973
2974 if ((bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
2975 - floor_log2 (c) - 1 - (HOST_BITS_PER_WIDE_INT < 64))) > 0)
2976 for (; bits > 0; bits--)
2977 if ((temp = alpha_emit_set_const (subtarget, mode,
2978 c << bits, i)) != 0
2979 || ((temp = (alpha_emit_set_const
2980 (subtarget, mode,
2981 ((c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1)),
2982 i)))
2983 != 0))
2984 return expand_binop (mode, lshr_optab, temp, GEN_INT (bits),
2985 target, 1, OPTAB_WIDEN);
2986
2987 /* Now try high-order 1 bits. We get that with a sign-extension.
2988 But one bit isn't enough here. Be careful to avoid shifting outside
2989 the mode and to avoid shifting outside the host wide int size. */
2990
2991 if ((bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
2992 - floor_log2 (~ c) - 2)) > 0)
2993 for (; bits > 0; bits--)
2994 if ((temp = alpha_emit_set_const (subtarget, mode,
2995 c << bits, i)) != 0
2996 || ((temp = (alpha_emit_set_const
2997 (subtarget, mode,
2998 ((c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1)),
2999 i)))
3000 != 0))
3001 return expand_binop (mode, ashr_optab, temp, GEN_INT (bits),
3002 target, 0, OPTAB_WIDEN);
3003 }
3004
3005 #if HOST_BITS_PER_WIDE_INT == 64
3006 /* Finally, see if can load a value into the target that is the same as the
3007 constant except that all bytes that are 0 are changed to be 0xff. If we
3008 can, then we can do a ZAPNOT to obtain the desired constant. */
3009
3010 new = c;
3011 for (i = 0; i < 64; i += 8)
3012 if ((new & ((HOST_WIDE_INT) 0xff << i)) == 0)
3013 new |= (HOST_WIDE_INT) 0xff << i;
3014
3015 /* We are only called for SImode and DImode. If this is SImode, ensure that
3016 we are sign extended to a full word. */
3017
3018 if (mode == SImode)
3019 new = ((new & 0xffffffff) ^ 0x80000000) - 0x80000000;
3020
3021 if (new != c && new != -1
3022 && (temp = alpha_emit_set_const (subtarget, mode, new, n - 1)) != 0)
3023 return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new),
3024 target, 0, OPTAB_WIDEN);
3025 #endif
3026
3027 return 0;
3028 }
3029
3030 /* Having failed to find a 3 insn sequence in alpha_emit_set_const,
3031 fall back to a straight forward decomposition. We do this to avoid
3032 exponential run times encountered when looking for longer sequences
3033 with alpha_emit_set_const. */
3034
3035 rtx
3036 alpha_emit_set_long_const (target, c1, c2)
3037 rtx target;
3038 HOST_WIDE_INT c1, c2;
3039 {
3040 HOST_WIDE_INT d1, d2, d3, d4;
3041
3042 /* Decompose the entire word */
3043 #if HOST_BITS_PER_WIDE_INT >= 64
3044 if (c2 != -(c1 < 0))
3045 abort ();
3046 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
3047 c1 -= d1;
3048 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
3049 c1 = (c1 - d2) >> 32;
3050 d3 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
3051 c1 -= d3;
3052 d4 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
3053 if (c1 != d4)
3054 abort ();
3055 #else
3056 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
3057 c1 -= d1;
3058 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
3059 if (c1 != d2)
3060 abort ();
3061 c2 += (d2 < 0);
3062 d3 = ((c2 & 0xffff) ^ 0x8000) - 0x8000;
3063 c2 -= d3;
3064 d4 = ((c2 & 0xffffffff) ^ 0x80000000) - 0x80000000;
3065 if (c2 != d4)
3066 abort ();
3067 #endif
3068
3069 /* Construct the high word */
3070 if (d4)
3071 {
3072 emit_move_insn (target, GEN_INT (d4));
3073 if (d3)
3074 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d3)));
3075 }
3076 else
3077 emit_move_insn (target, GEN_INT (d3));
3078
3079 /* Shift it into place */
3080 emit_move_insn (target, gen_rtx_ASHIFT (DImode, target, GEN_INT (32)));
3081
3082 /* Add in the low bits. */
3083 if (d2)
3084 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d2)));
3085 if (d1)
3086 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d1)));
3087
3088 return target;
3089 }
3090
3091 /* Expand a move instruction; return true if all work is done.
3092 We don't handle non-bwx subword loads here. */
3093
3094 bool
3095 alpha_expand_mov (mode, operands)
3096 enum machine_mode mode;
3097 rtx *operands;
3098 {
3099 /* If the output is not a register, the input must be. */
3100 if (GET_CODE (operands[0]) == MEM
3101 && ! reg_or_0_operand (operands[1], mode))
3102 operands[1] = force_reg (mode, operands[1]);
3103
3104 /* Allow legitimize_address to perform some simplifications. */
3105 if (mode == Pmode && symbolic_operand (operands[1], mode))
3106 {
3107 rtx tmp;
3108
3109 /* With RTL inlining, at -O3, rtl is generated, stored, then actually
3110 compiled at the end of compilation. In the meantime, someone can
3111 re-encode-section-info on some symbol changing it e.g. from global
3112 to local-not-small. If this happens, we'd have emitted a plain
3113 load rather than a high+losum load and not recognize the insn.
3114
3115 So if rtl inlining is in effect, we delay the global/not-global
3116 decision until rest_of_compilation by wrapping it in an
3117 UNSPEC_SYMBOL. */
3118 if (TARGET_EXPLICIT_RELOCS && flag_inline_functions
3119 && rtx_equal_function_value_matters
3120 && global_symbolic_operand (operands[1], mode))
3121 {
3122 emit_insn (gen_movdi_er_maybe_g (operands[0], operands[1]));
3123 return true;
3124 }
3125
3126 tmp = alpha_legitimize_address (operands[1], operands[0], mode);
3127 if (tmp)
3128 {
3129 if (tmp == operands[0])
3130 return true;
3131 operands[1] = tmp;
3132 return false;
3133 }
3134 }
3135
3136 /* Early out for non-constants and valid constants. */
3137 if (! CONSTANT_P (operands[1]) || input_operand (operands[1], mode))
3138 return false;
3139
3140 /* Split large integers. */
3141 if (GET_CODE (operands[1]) == CONST_INT
3142 || GET_CODE (operands[1]) == CONST_DOUBLE)
3143 {
3144 HOST_WIDE_INT i0, i1;
3145 rtx temp = NULL_RTX;
3146
3147 if (GET_CODE (operands[1]) == CONST_INT)
3148 {
3149 i0 = INTVAL (operands[1]);
3150 i1 = -(i0 < 0);
3151 }
3152 else if (HOST_BITS_PER_WIDE_INT >= 64)
3153 {
3154 i0 = CONST_DOUBLE_LOW (operands[1]);
3155 i1 = -(i0 < 0);
3156 }
3157 else
3158 {
3159 i0 = CONST_DOUBLE_LOW (operands[1]);
3160 i1 = CONST_DOUBLE_HIGH (operands[1]);
3161 }
3162
3163 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == -(i0 < 0))
3164 temp = alpha_emit_set_const (operands[0], mode, i0, 3);
3165
3166 if (!temp && TARGET_BUILD_CONSTANTS)
3167 temp = alpha_emit_set_long_const (operands[0], i0, i1);
3168
3169 if (temp)
3170 {
3171 if (rtx_equal_p (operands[0], temp))
3172 return true;
3173 operands[1] = temp;
3174 return false;
3175 }
3176 }
3177
3178 /* Otherwise we've nothing left but to drop the thing to memory. */
3179 operands[1] = force_const_mem (mode, operands[1]);
3180 if (reload_in_progress)
3181 {
3182 emit_move_insn (operands[0], XEXP (operands[1], 0));
3183 operands[1] = copy_rtx (operands[1]);
3184 XEXP (operands[1], 0) = operands[0];
3185 }
3186 else
3187 operands[1] = validize_mem (operands[1]);
3188 return false;
3189 }
3190
3191 /* Expand a non-bwx QImode or HImode move instruction;
3192 return true if all work is done. */
3193
3194 bool
3195 alpha_expand_mov_nobwx (mode, operands)
3196 enum machine_mode mode;
3197 rtx *operands;
3198 {
3199 /* If the output is not a register, the input must be. */
3200 if (GET_CODE (operands[0]) == MEM)
3201 operands[1] = force_reg (mode, operands[1]);
3202
3203 /* Handle four memory cases, unaligned and aligned for either the input
3204 or the output. The only case where we can be called during reload is
3205 for aligned loads; all other cases require temporaries. */
3206
3207 if (GET_CODE (operands[1]) == MEM
3208 || (GET_CODE (operands[1]) == SUBREG
3209 && GET_CODE (SUBREG_REG (operands[1])) == MEM)
3210 || (reload_in_progress && GET_CODE (operands[1]) == REG
3211 && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER)
3212 || (reload_in_progress && GET_CODE (operands[1]) == SUBREG
3213 && GET_CODE (SUBREG_REG (operands[1])) == REG
3214 && REGNO (SUBREG_REG (operands[1])) >= FIRST_PSEUDO_REGISTER))
3215 {
3216 if (aligned_memory_operand (operands[1], mode))
3217 {
3218 if (reload_in_progress)
3219 {
3220 emit_insn ((mode == QImode
3221 ? gen_reload_inqi_help
3222 : gen_reload_inhi_help)
3223 (operands[0], operands[1],
3224 gen_rtx_REG (SImode, REGNO (operands[0]))));
3225 }
3226 else
3227 {
3228 rtx aligned_mem, bitnum;
3229 rtx scratch = gen_reg_rtx (SImode);
3230
3231 get_aligned_mem (operands[1], &aligned_mem, &bitnum);
3232
3233 emit_insn ((mode == QImode
3234 ? gen_aligned_loadqi
3235 : gen_aligned_loadhi)
3236 (operands[0], aligned_mem, bitnum, scratch));
3237 }
3238 }
3239 else
3240 {
3241 /* Don't pass these as parameters since that makes the generated
3242 code depend on parameter evaluation order which will cause
3243 bootstrap failures. */
3244
3245 rtx temp1 = gen_reg_rtx (DImode);
3246 rtx temp2 = gen_reg_rtx (DImode);
3247 rtx seq = ((mode == QImode
3248 ? gen_unaligned_loadqi
3249 : gen_unaligned_loadhi)
3250 (operands[0], get_unaligned_address (operands[1], 0),
3251 temp1, temp2));
3252
3253 alpha_set_memflags (seq, operands[1]);
3254 emit_insn (seq);
3255 }
3256 return true;
3257 }
3258
3259 if (GET_CODE (operands[0]) == MEM
3260 || (GET_CODE (operands[0]) == SUBREG
3261 && GET_CODE (SUBREG_REG (operands[0])) == MEM)
3262 || (reload_in_progress && GET_CODE (operands[0]) == REG
3263 && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER)
3264 || (reload_in_progress && GET_CODE (operands[0]) == SUBREG
3265 && GET_CODE (SUBREG_REG (operands[0])) == REG
3266 && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER))
3267 {
3268 if (aligned_memory_operand (operands[0], mode))
3269 {
3270 rtx aligned_mem, bitnum;
3271 rtx temp1 = gen_reg_rtx (SImode);
3272 rtx temp2 = gen_reg_rtx (SImode);
3273
3274 get_aligned_mem (operands[0], &aligned_mem, &bitnum);
3275
3276 emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
3277 temp1, temp2));
3278 }
3279 else
3280 {
3281 rtx temp1 = gen_reg_rtx (DImode);
3282 rtx temp2 = gen_reg_rtx (DImode);
3283 rtx temp3 = gen_reg_rtx (DImode);
3284 rtx seq = ((mode == QImode
3285 ? gen_unaligned_storeqi
3286 : gen_unaligned_storehi)
3287 (get_unaligned_address (operands[0], 0),
3288 operands[1], temp1, temp2, temp3));
3289
3290 alpha_set_memflags (seq, operands[0]);
3291 emit_insn (seq);
3292 }
3293 return true;
3294 }
3295
3296 return false;
3297 }
3298
3299 /* Generate an unsigned DImode to FP conversion. This is the same code
3300 optabs would emit if we didn't have TFmode patterns.
3301
3302 For SFmode, this is the only construction I've found that can pass
3303 gcc.c-torture/execute/ieee/rbug.c. No scenario that uses DFmode
3304 intermediates will work, because you'll get intermediate rounding
3305 that ruins the end result. Some of this could be fixed by turning
3306 on round-to-positive-infinity, but that requires diddling the fpsr,
3307 which kills performance. I tried turning this around and converting
3308 to a negative number, so that I could turn on /m, but either I did
3309 it wrong or there's something else cause I wound up with the exact
3310 same single-bit error. There is a branch-less form of this same code:
3311
3312 srl $16,1,$1
3313 and $16,1,$2
3314 cmplt $16,0,$3
3315 or $1,$2,$2
3316 cmovge $16,$16,$2
3317 itoft $3,$f10
3318 itoft $2,$f11
3319 cvtqs $f11,$f11
3320 adds $f11,$f11,$f0
3321 fcmoveq $f10,$f11,$f0
3322
3323 I'm not using it because it's the same number of instructions as
3324 this branch-full form, and it has more serialized long latency
3325 instructions on the critical path.
3326
3327 For DFmode, we can avoid rounding errors by breaking up the word
3328 into two pieces, converting them separately, and adding them back:
3329
3330 LC0: .long 0,0x5f800000
3331
3332 itoft $16,$f11
3333 lda $2,LC0
3334 cmplt $16,0,$1
3335 cpyse $f11,$f31,$f10
3336 cpyse $f31,$f11,$f11
3337 s4addq $1,$2,$1
3338 lds $f12,0($1)
3339 cvtqt $f10,$f10
3340 cvtqt $f11,$f11
3341 addt $f12,$f10,$f0
3342 addt $f0,$f11,$f0
3343
3344 This doesn't seem to be a clear-cut win over the optabs form.
3345 It probably all depends on the distribution of numbers being
3346 converted -- in the optabs form, all but high-bit-set has a
3347 much lower minimum execution time. */
3348
3349 void
3350 alpha_emit_floatuns (operands)
3351 rtx operands[2];
3352 {
3353 rtx neglab, donelab, i0, i1, f0, in, out;
3354 enum machine_mode mode;
3355
3356 out = operands[0];
3357 in = force_reg (DImode, operands[1]);
3358 mode = GET_MODE (out);
3359 neglab = gen_label_rtx ();
3360 donelab = gen_label_rtx ();
3361 i0 = gen_reg_rtx (DImode);
3362 i1 = gen_reg_rtx (DImode);
3363 f0 = gen_reg_rtx (mode);
3364
3365 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
3366
3367 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
3368 emit_jump_insn (gen_jump (donelab));
3369 emit_barrier ();
3370
3371 emit_label (neglab);
3372
3373 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
3374 emit_insn (gen_anddi3 (i1, in, const1_rtx));
3375 emit_insn (gen_iordi3 (i0, i0, i1));
3376 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
3377 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
3378
3379 emit_label (donelab);
3380 }
3381
3382 /* Generate the comparison for a conditional branch. */
3383
3384 rtx
3385 alpha_emit_conditional_branch (code)
3386 enum rtx_code code;
3387 {
3388 enum rtx_code cmp_code, branch_code;
3389 enum machine_mode cmp_mode, branch_mode = VOIDmode;
3390 rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
3391 rtx tem;
3392
3393 if (alpha_compare.fp_p && GET_MODE (op0) == TFmode)
3394 {
3395 if (! TARGET_HAS_XFLOATING_LIBS)
3396 abort ();
3397
3398 /* X_floating library comparison functions return
3399 -1 unordered
3400 0 false
3401 1 true
3402 Convert the compare against the raw return value. */
3403
3404 switch (code)
3405 {
3406 case UNORDERED:
3407 cmp_code = EQ;
3408 code = LT;
3409 break;
3410 case ORDERED:
3411 cmp_code = EQ;
3412 code = GE;
3413 break;
3414 case NE:
3415 cmp_code = NE;
3416 code = NE;
3417 break;
3418 default:
3419 cmp_code = code;
3420 code = GT;
3421 break;
3422 }
3423
3424 op0 = alpha_emit_xfloating_compare (cmp_code, op0, op1);
3425 op1 = const0_rtx;
3426 alpha_compare.fp_p = 0;
3427 }
3428
3429 /* The general case: fold the comparison code to the types of compares
3430 that we have, choosing the branch as necessary. */
3431 switch (code)
3432 {
3433 case EQ: case LE: case LT: case LEU: case LTU:
3434 case UNORDERED:
3435 /* We have these compares: */
3436 cmp_code = code, branch_code = NE;
3437 break;
3438
3439 case NE:
3440 case ORDERED:
3441 /* These must be reversed. */
3442 cmp_code = reverse_condition (code), branch_code = EQ;
3443 break;
3444
3445 case GE: case GT: case GEU: case GTU:
3446 /* For FP, we swap them, for INT, we reverse them. */
3447 if (alpha_compare.fp_p)
3448 {
3449 cmp_code = swap_condition (code);
3450 branch_code = NE;
3451 tem = op0, op0 = op1, op1 = tem;
3452 }
3453 else
3454 {
3455 cmp_code = reverse_condition (code);
3456 branch_code = EQ;
3457 }
3458 break;
3459
3460 default:
3461 abort ();
3462 }
3463
3464 if (alpha_compare.fp_p)
3465 {
3466 cmp_mode = DFmode;
3467 if (flag_unsafe_math_optimizations)
3468 {
3469 /* When we are not as concerned about non-finite values, and we
3470 are comparing against zero, we can branch directly. */
3471 if (op1 == CONST0_RTX (DFmode))
3472 cmp_code = NIL, branch_code = code;
3473 else if (op0 == CONST0_RTX (DFmode))
3474 {
3475 /* Undo the swap we probably did just above. */
3476 tem = op0, op0 = op1, op1 = tem;
3477 branch_code = swap_condition (cmp_code);
3478 cmp_code = NIL;
3479 }
3480 }
3481 else
3482 {
3483 /* ??? We mark the branch mode to be CCmode to prevent the
3484 compare and branch from being combined, since the compare
3485 insn follows IEEE rules that the branch does not. */
3486 branch_mode = CCmode;
3487 }
3488 }
3489 else
3490 {
3491 cmp_mode = DImode;
3492
3493 /* The following optimizations are only for signed compares. */
3494 if (code != LEU && code != LTU && code != GEU && code != GTU)
3495 {
3496 /* Whee. Compare and branch against 0 directly. */
3497 if (op1 == const0_rtx)
3498 cmp_code = NIL, branch_code = code;
3499
3500 /* We want to use cmpcc/bcc when we can, since there is a zero delay
3501 bypass between logicals and br/cmov on EV5. But we don't want to
3502 force valid immediate constants into registers needlessly. */
3503 else if (GET_CODE (op1) == CONST_INT)
3504 {
3505 HOST_WIDE_INT v = INTVAL (op1), n = -v;
3506
3507 if (! CONST_OK_FOR_LETTER_P (v, 'I')
3508 && (CONST_OK_FOR_LETTER_P (n, 'K')
3509 || CONST_OK_FOR_LETTER_P (n, 'L')))
3510 {
3511 cmp_code = PLUS, branch_code = code;
3512 op1 = GEN_INT (n);
3513 }
3514 }
3515 }
3516
3517 if (!reg_or_0_operand (op0, DImode))
3518 op0 = force_reg (DImode, op0);
3519 if (cmp_code != PLUS && !reg_or_8bit_operand (op1, DImode))
3520 op1 = force_reg (DImode, op1);
3521 }
3522
3523 /* Emit an initial compare instruction, if necessary. */
3524 tem = op0;
3525 if (cmp_code != NIL)
3526 {
3527 tem = gen_reg_rtx (cmp_mode);
3528 emit_move_insn (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1));
3529 }
3530
3531 /* Zero the operands. */
3532 memset (&alpha_compare, 0, sizeof (alpha_compare));
3533
3534 /* Return the branch comparison. */
3535 return gen_rtx_fmt_ee (branch_code, branch_mode, tem, CONST0_RTX (cmp_mode));
3536 }
3537
3538 /* Certain simplifications can be done to make invalid setcc operations
3539 valid. Return the final comparison, or NULL if we can't work. */
3540
3541 rtx
3542 alpha_emit_setcc (code)
3543 enum rtx_code code;
3544 {
3545 enum rtx_code cmp_code;
3546 rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
3547 int fp_p = alpha_compare.fp_p;
3548 rtx tmp;
3549
3550 /* Zero the operands. */
3551 memset (&alpha_compare, 0, sizeof (alpha_compare));
3552
3553 if (fp_p && GET_MODE (op0) == TFmode)
3554 {
3555 if (! TARGET_HAS_XFLOATING_LIBS)
3556 abort ();
3557
3558 /* X_floating library comparison functions return
3559 -1 unordered
3560 0 false
3561 1 true
3562 Convert the compare against the raw return value. */
3563
3564 if (code == UNORDERED || code == ORDERED)
3565 cmp_code = EQ;
3566 else
3567 cmp_code = code;
3568
3569 op0 = alpha_emit_xfloating_compare (cmp_code, op0, op1);
3570 op1 = const0_rtx;
3571 fp_p = 0;
3572
3573 if (code == UNORDERED)
3574 code = LT;
3575 else if (code == ORDERED)
3576 code = GE;
3577 else
3578 code = GT;
3579 }
3580
3581 if (fp_p && !TARGET_FIX)
3582 return NULL_RTX;
3583
3584 /* The general case: fold the comparison code to the types of compares
3585 that we have, choosing the branch as necessary. */
3586
3587 cmp_code = NIL;
3588 switch (code)
3589 {
3590 case EQ: case LE: case LT: case LEU: case LTU:
3591 case UNORDERED:
3592 /* We have these compares. */
3593 if (fp_p)
3594 cmp_code = code, code = NE;
3595 break;
3596
3597 case NE:
3598 if (!fp_p && op1 == const0_rtx)
3599 break;
3600 /* FALLTHRU */
3601
3602 case ORDERED:
3603 cmp_code = reverse_condition (code);
3604 code = EQ;
3605 break;
3606
3607 case GE: case GT: case GEU: case GTU:
3608 /* These normally need swapping, but for integer zero we have
3609 special patterns that recognize swapped operands. */
3610 if (!fp_p && op1 == const0_rtx)
3611 break;
3612 code = swap_condition (code);
3613 if (fp_p)
3614 cmp_code = code, code = NE;
3615 tmp = op0, op0 = op1, op1 = tmp;
3616 break;
3617
3618 default:
3619 abort ();
3620 }
3621
3622 if (!fp_p)
3623 {
3624 if (!register_operand (op0, DImode))
3625 op0 = force_reg (DImode, op0);
3626 if (!reg_or_8bit_operand (op1, DImode))
3627 op1 = force_reg (DImode, op1);
3628 }
3629
3630 /* Emit an initial compare instruction, if necessary. */
3631 if (cmp_code != NIL)
3632 {
3633 enum machine_mode mode = fp_p ? DFmode : DImode;
3634
3635 tmp = gen_reg_rtx (mode);
3636 emit_insn (gen_rtx_SET (VOIDmode, tmp,
3637 gen_rtx_fmt_ee (cmp_code, mode, op0, op1)));
3638
3639 op0 = fp_p ? gen_lowpart (DImode, tmp) : tmp;
3640 op1 = const0_rtx;
3641 }
3642
3643 /* Return the setcc comparison. */
3644 return gen_rtx_fmt_ee (code, DImode, op0, op1);
3645 }
3646
3647
3648 /* Rewrite a comparison against zero CMP of the form
3649 (CODE (cc0) (const_int 0)) so it can be written validly in
3650 a conditional move (if_then_else CMP ...).
3651 If both of the operands that set cc0 are nonzero we must emit
3652 an insn to perform the compare (it can't be done within
3653 the conditional move). */
3654 rtx
3655 alpha_emit_conditional_move (cmp, mode)
3656 rtx cmp;
3657 enum machine_mode mode;
3658 {
3659 enum rtx_code code = GET_CODE (cmp);
3660 enum rtx_code cmov_code = NE;
3661 rtx op0 = alpha_compare.op0;
3662 rtx op1 = alpha_compare.op1;
3663 int fp_p = alpha_compare.fp_p;
3664 enum machine_mode cmp_mode
3665 = (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0));
3666 enum machine_mode cmp_op_mode = fp_p ? DFmode : DImode;
3667 enum machine_mode cmov_mode = VOIDmode;
3668 int local_fast_math = flag_unsafe_math_optimizations;
3669 rtx tem;
3670
3671 /* Zero the operands. */
3672 memset (&alpha_compare, 0, sizeof (alpha_compare));
3673
3674 if (fp_p != FLOAT_MODE_P (mode))
3675 {
3676 enum rtx_code cmp_code;
3677
3678 if (! TARGET_FIX)
3679 return 0;
3680
3681 /* If we have fp<->int register move instructions, do a cmov by
3682 performing the comparison in fp registers, and move the
3683 zero/nonzero value to integer registers, where we can then
3684 use a normal cmov, or vice-versa. */
3685
3686 switch (code)
3687 {
3688 case EQ: case LE: case LT: case LEU: case LTU:
3689 /* We have these compares. */
3690 cmp_code = code, code = NE;
3691 break;
3692
3693 case NE:
3694 /* This must be reversed. */
3695 cmp_code = EQ, code = EQ;
3696 break;
3697
3698 case GE: case GT: case GEU: case GTU:
3699 /* These normally need swapping, but for integer zero we have
3700 special patterns that recognize swapped operands. */
3701 if (!fp_p && op1 == const0_rtx)
3702 cmp_code = code, code = NE;
3703 else
3704 {
3705 cmp_code = swap_condition (code);
3706 code = NE;
3707 tem = op0, op0 = op1, op1 = tem;
3708 }
3709 break;
3710
3711 default:
3712 abort ();
3713 }
3714
3715 tem = gen_reg_rtx (cmp_op_mode);
3716 emit_insn (gen_rtx_SET (VOIDmode, tem,
3717 gen_rtx_fmt_ee (cmp_code, cmp_op_mode,
3718 op0, op1)));
3719
3720 cmp_mode = cmp_op_mode = fp_p ? DImode : DFmode;
3721 op0 = gen_lowpart (cmp_op_mode, tem);
3722 op1 = CONST0_RTX (cmp_op_mode);
3723 fp_p = !fp_p;
3724 local_fast_math = 1;
3725 }
3726
3727 /* We may be able to use a conditional move directly.
3728 This avoids emitting spurious compares. */
3729 if (signed_comparison_operator (cmp, VOIDmode)
3730 && (!fp_p || local_fast_math)
3731 && (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode)))
3732 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
3733
3734 /* We can't put the comparison inside the conditional move;
3735 emit a compare instruction and put that inside the
3736 conditional move. Make sure we emit only comparisons we have;
3737 swap or reverse as necessary. */
3738
3739 if (no_new_pseudos)
3740 return NULL_RTX;
3741
3742 switch (code)
3743 {
3744 case EQ: case LE: case LT: case LEU: case LTU:
3745 /* We have these compares: */
3746 break;
3747
3748 case NE:
3749 /* This must be reversed. */
3750 code = reverse_condition (code);
3751 cmov_code = EQ;
3752 break;
3753
3754 case GE: case GT: case GEU: case GTU:
3755 /* These must be swapped. */
3756 if (op1 != CONST0_RTX (cmp_mode))
3757 {
3758 code = swap_condition (code);
3759 tem = op0, op0 = op1, op1 = tem;
3760 }
3761 break;
3762
3763 default:
3764 abort ();
3765 }
3766
3767 if (!fp_p)
3768 {
3769 if (!reg_or_0_operand (op0, DImode))
3770 op0 = force_reg (DImode, op0);
3771 if (!reg_or_8bit_operand (op1, DImode))
3772 op1 = force_reg (DImode, op1);
3773 }
3774
3775 /* ??? We mark the branch mode to be CCmode to prevent the compare
3776 and cmov from being combined, since the compare insn follows IEEE
3777 rules that the cmov does not. */
3778 if (fp_p && !local_fast_math)
3779 cmov_mode = CCmode;
3780
3781 tem = gen_reg_rtx (cmp_op_mode);
3782 emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_op_mode, op0, op1));
3783 return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_op_mode));
3784 }
3785
3786 /* Simplify a conditional move of two constants into a setcc with
3787 arithmetic. This is done with a splitter since combine would
3788 just undo the work if done during code generation. It also catches
3789 cases we wouldn't have before cse. */
3790
3791 int
3792 alpha_split_conditional_move (code, dest, cond, t_rtx, f_rtx)
3793 enum rtx_code code;
3794 rtx dest, cond, t_rtx, f_rtx;
3795 {
3796 HOST_WIDE_INT t, f, diff;
3797 enum machine_mode mode;
3798 rtx target, subtarget, tmp;
3799
3800 mode = GET_MODE (dest);
3801 t = INTVAL (t_rtx);
3802 f = INTVAL (f_rtx);
3803 diff = t - f;
3804
3805 if (((code == NE || code == EQ) && diff < 0)
3806 || (code == GE || code == GT))
3807 {
3808 code = reverse_condition (code);
3809 diff = t, t = f, f = diff;
3810 diff = t - f;
3811 }
3812
3813 subtarget = target = dest;
3814 if (mode != DImode)
3815 {
3816 target = gen_lowpart (DImode, dest);
3817 if (! no_new_pseudos)
3818 subtarget = gen_reg_rtx (DImode);
3819 else
3820 subtarget = target;
3821 }
3822 /* Below, we must be careful to use copy_rtx on target and subtarget
3823 in intermediate insns, as they may be a subreg rtx, which may not
3824 be shared. */
3825
3826 if (f == 0 && exact_log2 (diff) > 0
3827 /* On EV6, we've got enough shifters to make non-arithmatic shifts
3828 viable over a longer latency cmove. On EV5, the E0 slot is a
3829 scarce resource, and on EV4 shift has the same latency as a cmove. */
3830 && (diff <= 8 || alpha_cpu == PROCESSOR_EV6))
3831 {
3832 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
3833 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
3834
3835 tmp = gen_rtx_ASHIFT (DImode, copy_rtx (subtarget),
3836 GEN_INT (exact_log2 (t)));
3837 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
3838 }
3839 else if (f == 0 && t == -1)
3840 {
3841 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
3842 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
3843
3844 emit_insn (gen_negdi2 (target, copy_rtx (subtarget)));
3845 }
3846 else if (diff == 1 || diff == 4 || diff == 8)
3847 {
3848 rtx add_op;
3849
3850 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
3851 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
3852
3853 if (diff == 1)
3854 emit_insn (gen_adddi3 (target, copy_rtx (subtarget), GEN_INT (f)));
3855 else
3856 {
3857 add_op = GEN_INT (f);
3858 if (sext_add_operand (add_op, mode))
3859 {
3860 tmp = gen_rtx_MULT (DImode, copy_rtx (subtarget),
3861 GEN_INT (diff));
3862 tmp = gen_rtx_PLUS (DImode, tmp, add_op);
3863 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
3864 }
3865 else
3866 return 0;
3867 }
3868 }
3869 else
3870 return 0;
3871
3872 return 1;
3873 }
3874 \f
3875 /* Look up the function X_floating library function name for the
3876 given operation. */
3877
3878 static const char *
3879 alpha_lookup_xfloating_lib_func (code)
3880 enum rtx_code code;
3881 {
3882 struct xfloating_op
3883 {
3884 const enum rtx_code code;
3885 const char *const func;
3886 };
3887
3888 static const struct xfloating_op vms_xfloating_ops[] =
3889 {
3890 { PLUS, "OTS$ADD_X" },
3891 { MINUS, "OTS$SUB_X" },
3892 { MULT, "OTS$MUL_X" },
3893 { DIV, "OTS$DIV_X" },
3894 { EQ, "OTS$EQL_X" },
3895 { NE, "OTS$NEQ_X" },
3896 { LT, "OTS$LSS_X" },
3897 { LE, "OTS$LEQ_X" },
3898 { GT, "OTS$GTR_X" },
3899 { GE, "OTS$GEQ_X" },
3900 { FIX, "OTS$CVTXQ" },
3901 { FLOAT, "OTS$CVTQX" },
3902 { UNSIGNED_FLOAT, "OTS$CVTQUX" },
3903 { FLOAT_EXTEND, "OTS$CVT_FLOAT_T_X" },
3904 { FLOAT_TRUNCATE, "OTS$CVT_FLOAT_X_T" },
3905 };
3906
3907 static const struct xfloating_op osf_xfloating_ops[] =
3908 {
3909 { PLUS, "_OtsAddX" },
3910 { MINUS, "_OtsSubX" },
3911 { MULT, "_OtsMulX" },
3912 { DIV, "_OtsDivX" },
3913 { EQ, "_OtsEqlX" },
3914 { NE, "_OtsNeqX" },
3915 { LT, "_OtsLssX" },
3916 { LE, "_OtsLeqX" },
3917 { GT, "_OtsGtrX" },
3918 { GE, "_OtsGeqX" },
3919 { FIX, "_OtsCvtXQ" },
3920 { FLOAT, "_OtsCvtQX" },
3921 { UNSIGNED_FLOAT, "_OtsCvtQUX" },
3922 { FLOAT_EXTEND, "_OtsConvertFloatTX" },
3923 { FLOAT_TRUNCATE, "_OtsConvertFloatXT" },
3924 };
3925
3926 const struct xfloating_op *ops;
3927 const long n = ARRAY_SIZE (osf_xfloating_ops);
3928 long i;
3929
3930 /* How irritating. Nothing to key off for the table. Hardcode
3931 knowledge of the G_floating routines. */
3932 if (TARGET_FLOAT_VAX)
3933 {
3934 if (TARGET_ABI_OPEN_VMS)
3935 {
3936 if (code == FLOAT_EXTEND)
3937 return "OTS$CVT_FLOAT_G_X";
3938 if (code == FLOAT_TRUNCATE)
3939 return "OTS$CVT_FLOAT_X_G";
3940 }
3941 else
3942 {
3943 if (code == FLOAT_EXTEND)
3944 return "_OtsConvertFloatGX";
3945 if (code == FLOAT_TRUNCATE)
3946 return "_OtsConvertFloatXG";
3947 }
3948 }
3949
3950 if (TARGET_ABI_OPEN_VMS)
3951 ops = vms_xfloating_ops;
3952 else
3953 ops = osf_xfloating_ops;
3954
3955 for (i = 0; i < n; ++i)
3956 if (ops[i].code == code)
3957 return ops[i].func;
3958
3959 abort();
3960 }
3961
3962 /* Most X_floating operations take the rounding mode as an argument.
3963 Compute that here. */
3964
3965 static int
3966 alpha_compute_xfloating_mode_arg (code, round)
3967 enum rtx_code code;
3968 enum alpha_fp_rounding_mode round;
3969 {
3970 int mode;
3971
3972 switch (round)
3973 {
3974 case ALPHA_FPRM_NORM:
3975 mode = 2;
3976 break;
3977 case ALPHA_FPRM_MINF:
3978 mode = 1;
3979 break;
3980 case ALPHA_FPRM_CHOP:
3981 mode = 0;
3982 break;
3983 case ALPHA_FPRM_DYN:
3984 mode = 4;
3985 break;
3986 default:
3987 abort ();
3988
3989 /* XXX For reference, round to +inf is mode = 3. */
3990 }
3991
3992 if (code == FLOAT_TRUNCATE && alpha_fptm == ALPHA_FPTM_N)
3993 mode |= 0x10000;
3994
3995 return mode;
3996 }
3997
3998 /* Emit an X_floating library function call.
3999
4000 Note that these functions do not follow normal calling conventions:
4001 TFmode arguments are passed in two integer registers (as opposed to
4002 indirect); TFmode return values appear in R16+R17.
4003
4004 FUNC is the function name to call.
4005 TARGET is where the output belongs.
4006 OPERANDS are the inputs.
4007 NOPERANDS is the count of inputs.
4008 EQUIV is the expression equivalent for the function.
4009 */
4010
4011 static void
4012 alpha_emit_xfloating_libcall (func, target, operands, noperands, equiv)
4013 const char *func;
4014 rtx target;
4015 rtx operands[];
4016 int noperands;
4017 rtx equiv;
4018 {
4019 rtx usage = NULL_RTX, tmp, reg;
4020 int regno = 16, i;
4021
4022 start_sequence ();
4023
4024 for (i = 0; i < noperands; ++i)
4025 {
4026 switch (GET_MODE (operands[i]))
4027 {
4028 case TFmode:
4029 reg = gen_rtx_REG (TFmode, regno);
4030 regno += 2;
4031 break;
4032
4033 case DFmode:
4034 reg = gen_rtx_REG (DFmode, regno + 32);
4035 regno += 1;
4036 break;
4037
4038 case VOIDmode:
4039 if (GET_CODE (operands[i]) != CONST_INT)
4040 abort ();
4041 /* FALLTHRU */
4042 case DImode:
4043 reg = gen_rtx_REG (DImode, regno);
4044 regno += 1;
4045 break;
4046
4047 default:
4048 abort ();
4049 }
4050
4051 emit_move_insn (reg, operands[i]);
4052 usage = alloc_EXPR_LIST (0, gen_rtx_USE (VOIDmode, reg), usage);
4053 }
4054
4055 switch (GET_MODE (target))
4056 {
4057 case TFmode:
4058 reg = gen_rtx_REG (TFmode, 16);
4059 break;
4060 case DFmode:
4061 reg = gen_rtx_REG (DFmode, 32);
4062 break;
4063 case DImode:
4064 reg = gen_rtx_REG (DImode, 0);
4065 break;
4066 default:
4067 abort ();
4068 }
4069
4070 tmp = gen_rtx_MEM (QImode, init_one_libfunc (func));
4071 tmp = emit_call_insn (GEN_CALL_VALUE (reg, tmp, const0_rtx,
4072 const0_rtx, const0_rtx));
4073 CALL_INSN_FUNCTION_USAGE (tmp) = usage;
4074
4075 tmp = get_insns ();
4076 end_sequence ();
4077
4078 emit_libcall_block (tmp, target, reg, equiv);
4079 }
4080
4081 /* Emit an X_floating library function call for arithmetic (+,-,*,/). */
4082
4083 void
4084 alpha_emit_xfloating_arith (code, operands)
4085 enum rtx_code code;
4086 rtx operands[];
4087 {
4088 const char *func;
4089 int mode;
4090 rtx out_operands[3];
4091
4092 func = alpha_lookup_xfloating_lib_func (code);
4093 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
4094
4095 out_operands[0] = operands[1];
4096 out_operands[1] = operands[2];
4097 out_operands[2] = GEN_INT (mode);
4098 alpha_emit_xfloating_libcall (func, operands[0], out_operands, 3,
4099 gen_rtx_fmt_ee (code, TFmode, operands[1],
4100 operands[2]));
4101 }
4102
4103 /* Emit an X_floating library function call for a comparison. */
4104
4105 static rtx
4106 alpha_emit_xfloating_compare (code, op0, op1)
4107 enum rtx_code code;
4108 rtx op0, op1;
4109 {
4110 const char *func;
4111 rtx out, operands[2];
4112
4113 func = alpha_lookup_xfloating_lib_func (code);
4114
4115 operands[0] = op0;
4116 operands[1] = op1;
4117 out = gen_reg_rtx (DImode);
4118
4119 /* ??? Strange mode for equiv because what's actually returned
4120 is -1,0,1, not a proper boolean value. */
4121 alpha_emit_xfloating_libcall (func, out, operands, 2,
4122 gen_rtx_fmt_ee (code, CCmode, op0, op1));
4123
4124 return out;
4125 }
4126
4127 /* Emit an X_floating library function call for a conversion. */
4128
4129 void
4130 alpha_emit_xfloating_cvt (code, operands)
4131 enum rtx_code code;
4132 rtx operands[];
4133 {
4134 int noperands = 1, mode;
4135 rtx out_operands[2];
4136 const char *func;
4137
4138 func = alpha_lookup_xfloating_lib_func (code);
4139
4140 out_operands[0] = operands[1];
4141
4142 switch (code)
4143 {
4144 case FIX:
4145 mode = alpha_compute_xfloating_mode_arg (code, ALPHA_FPRM_CHOP);
4146 out_operands[1] = GEN_INT (mode);
4147 noperands = 2;
4148 break;
4149 case FLOAT_TRUNCATE:
4150 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
4151 out_operands[1] = GEN_INT (mode);
4152 noperands = 2;
4153 break;
4154 default:
4155 break;
4156 }
4157
4158 alpha_emit_xfloating_libcall (func, operands[0], out_operands, noperands,
4159 gen_rtx_fmt_e (code, GET_MODE (operands[0]),
4160 operands[1]));
4161 }
4162
4163 /* Split a TFmode OP[1] into DImode OP[2,3] and likewise for
4164 OP[0] into OP[0,1]. Naturally, output operand ordering is
4165 little-endian. */
4166
4167 void
4168 alpha_split_tfmode_pair (operands)
4169 rtx operands[4];
4170 {
4171 if (GET_CODE (operands[1]) == REG)
4172 {
4173 operands[3] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
4174 operands[2] = gen_rtx_REG (DImode, REGNO (operands[1]));
4175 }
4176 else if (GET_CODE (operands[1]) == MEM)
4177 {
4178 operands[3] = adjust_address (operands[1], DImode, 8);
4179 operands[2] = adjust_address (operands[1], DImode, 0);
4180 }
4181 else if (operands[1] == CONST0_RTX (TFmode))
4182 operands[2] = operands[3] = const0_rtx;
4183 else
4184 abort ();
4185
4186 if (GET_CODE (operands[0]) == REG)
4187 {
4188 operands[1] = gen_rtx_REG (DImode, REGNO (operands[0]) + 1);
4189 operands[0] = gen_rtx_REG (DImode, REGNO (operands[0]));
4190 }
4191 else if (GET_CODE (operands[0]) == MEM)
4192 {
4193 operands[1] = adjust_address (operands[0], DImode, 8);
4194 operands[0] = adjust_address (operands[0], DImode, 0);
4195 }
4196 else
4197 abort ();
4198 }
4199
4200 /* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
4201 op2 is a register containing the sign bit, operation is the
4202 logical operation to be performed. */
4203
4204 void
4205 alpha_split_tfmode_frobsign (operands, operation)
4206 rtx operands[3];
4207 rtx (*operation) PARAMS ((rtx, rtx, rtx));
4208 {
4209 rtx high_bit = operands[2];
4210 rtx scratch;
4211 int move;
4212
4213 alpha_split_tfmode_pair (operands);
4214
4215 /* Detect three flavors of operand overlap. */
4216 move = 1;
4217 if (rtx_equal_p (operands[0], operands[2]))
4218 move = 0;
4219 else if (rtx_equal_p (operands[1], operands[2]))
4220 {
4221 if (rtx_equal_p (operands[0], high_bit))
4222 move = 2;
4223 else
4224 move = -1;
4225 }
4226
4227 if (move < 0)
4228 emit_move_insn (operands[0], operands[2]);
4229
4230 /* ??? If the destination overlaps both source tf and high_bit, then
4231 assume source tf is dead in its entirety and use the other half
4232 for a scratch register. Otherwise "scratch" is just the proper
4233 destination register. */
4234 scratch = operands[move < 2 ? 1 : 3];
4235
4236 emit_insn ((*operation) (scratch, high_bit, operands[3]));
4237
4238 if (move > 0)
4239 {
4240 emit_move_insn (operands[0], operands[2]);
4241 if (move > 1)
4242 emit_move_insn (operands[1], scratch);
4243 }
4244 }
4245 \f
4246 /* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
4247 unaligned data:
4248
4249 unsigned: signed:
4250 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
4251 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
4252 lda r3,X(r11) lda r3,X+2(r11)
4253 extwl r1,r3,r1 extql r1,r3,r1
4254 extwh r2,r3,r2 extqh r2,r3,r2
4255 or r1.r2.r1 or r1,r2,r1
4256 sra r1,48,r1
4257
4258 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
4259 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
4260 lda r3,X(r11) lda r3,X(r11)
4261 extll r1,r3,r1 extll r1,r3,r1
4262 extlh r2,r3,r2 extlh r2,r3,r2
4263 or r1.r2.r1 addl r1,r2,r1
4264
4265 quad: ldq_u r1,X(r11)
4266 ldq_u r2,X+7(r11)
4267 lda r3,X(r11)
4268 extql r1,r3,r1
4269 extqh r2,r3,r2
4270 or r1.r2.r1
4271 */
4272
4273 void
4274 alpha_expand_unaligned_load (tgt, mem, size, ofs, sign)
4275 rtx tgt, mem;
4276 HOST_WIDE_INT size, ofs;
4277 int sign;
4278 {
4279 rtx meml, memh, addr, extl, exth, tmp, mema;
4280 enum machine_mode mode;
4281
4282 meml = gen_reg_rtx (DImode);
4283 memh = gen_reg_rtx (DImode);
4284 addr = gen_reg_rtx (DImode);
4285 extl = gen_reg_rtx (DImode);
4286 exth = gen_reg_rtx (DImode);
4287
4288 mema = XEXP (mem, 0);
4289 if (GET_CODE (mema) == LO_SUM)
4290 mema = force_reg (Pmode, mema);
4291
4292 /* AND addresses cannot be in any alias set, since they may implicitly
4293 alias surrounding code. Ideally we'd have some alias set that
4294 covered all types except those with alignment 8 or higher. */
4295
4296 tmp = change_address (mem, DImode,
4297 gen_rtx_AND (DImode,
4298 plus_constant (mema, ofs),
4299 GEN_INT (-8)));
4300 set_mem_alias_set (tmp, 0);
4301 emit_move_insn (meml, tmp);
4302
4303 tmp = change_address (mem, DImode,
4304 gen_rtx_AND (DImode,
4305 plus_constant (mema, ofs + size - 1),
4306 GEN_INT (-8)));
4307 set_mem_alias_set (tmp, 0);
4308 emit_move_insn (memh, tmp);
4309
4310 if (WORDS_BIG_ENDIAN && sign && (size == 2 || size == 4))
4311 {
4312 emit_move_insn (addr, plus_constant (mema, -1));
4313
4314 emit_insn (gen_extqh_be (extl, meml, addr));
4315 emit_insn (gen_extxl_be (exth, memh, GEN_INT (64), addr));
4316
4317 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
4318 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (64 - size*8),
4319 addr, 1, OPTAB_WIDEN);
4320 }
4321 else if (sign && size == 2)
4322 {
4323 emit_move_insn (addr, plus_constant (mema, ofs+2));
4324
4325 emit_insn (gen_extxl_le (extl, meml, GEN_INT (64), addr));
4326 emit_insn (gen_extqh_le (exth, memh, addr));
4327
4328 /* We must use tgt here for the target. Alpha-vms port fails if we use
4329 addr for the target, because addr is marked as a pointer and combine
4330 knows that pointers are always sign-extended 32 bit values. */
4331 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
4332 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48),
4333 addr, 1, OPTAB_WIDEN);
4334 }
4335 else
4336 {
4337 if (WORDS_BIG_ENDIAN)
4338 {
4339 emit_move_insn (addr, plus_constant (mema, ofs+size-1));
4340 switch ((int) size)
4341 {
4342 case 2:
4343 emit_insn (gen_extwh_be (extl, meml, addr));
4344 mode = HImode;
4345 break;
4346
4347 case 4:
4348 emit_insn (gen_extlh_be (extl, meml, addr));
4349 mode = SImode;
4350 break;
4351
4352 case 8:
4353 emit_insn (gen_extqh_be (extl, meml, addr));
4354 mode = DImode;
4355 break;
4356
4357 default:
4358 abort ();
4359 }
4360 emit_insn (gen_extxl_be (exth, memh, GEN_INT (size*8), addr));
4361 }
4362 else
4363 {
4364 emit_move_insn (addr, plus_constant (mema, ofs));
4365 emit_insn (gen_extxl_le (extl, meml, GEN_INT (size*8), addr));
4366 switch ((int) size)
4367 {
4368 case 2:
4369 emit_insn (gen_extwh_le (exth, memh, addr));
4370 mode = HImode;
4371 break;
4372
4373 case 4:
4374 emit_insn (gen_extlh_le (exth, memh, addr));
4375 mode = SImode;
4376 break;
4377
4378 case 8:
4379 emit_insn (gen_extqh_le (exth, memh, addr));
4380 mode = DImode;
4381 break;
4382
4383 default:
4384 abort();
4385 }
4386 }
4387
4388 addr = expand_binop (mode, ior_optab, gen_lowpart (mode, extl),
4389 gen_lowpart (mode, exth), gen_lowpart (mode, tgt),
4390 sign, OPTAB_WIDEN);
4391 }
4392
4393 if (addr != tgt)
4394 emit_move_insn (tgt, gen_lowpart(GET_MODE (tgt), addr));
4395 }
4396
4397 /* Similarly, use ins and msk instructions to perform unaligned stores. */
4398
4399 void
4400 alpha_expand_unaligned_store (dst, src, size, ofs)
4401 rtx dst, src;
4402 HOST_WIDE_INT size, ofs;
4403 {
4404 rtx dstl, dsth, addr, insl, insh, meml, memh, dsta;
4405
4406 dstl = gen_reg_rtx (DImode);
4407 dsth = gen_reg_rtx (DImode);
4408 insl = gen_reg_rtx (DImode);
4409 insh = gen_reg_rtx (DImode);
4410
4411 dsta = XEXP (dst, 0);
4412 if (GET_CODE (dsta) == LO_SUM)
4413 dsta = force_reg (Pmode, dsta);
4414
4415 /* AND addresses cannot be in any alias set, since they may implicitly
4416 alias surrounding code. Ideally we'd have some alias set that
4417 covered all types except those with alignment 8 or higher. */
4418
4419 meml = change_address (dst, DImode,
4420 gen_rtx_AND (DImode,
4421 plus_constant (dsta, ofs),
4422 GEN_INT (-8)));
4423 set_mem_alias_set (meml, 0);
4424
4425 memh = change_address (dst, DImode,
4426 gen_rtx_AND (DImode,
4427 plus_constant (dsta, ofs + size - 1),
4428 GEN_INT (-8)));
4429 set_mem_alias_set (memh, 0);
4430
4431 emit_move_insn (dsth, memh);
4432 emit_move_insn (dstl, meml);
4433 if (WORDS_BIG_ENDIAN)
4434 {
4435 addr = copy_addr_to_reg (plus_constant (dsta, ofs+size-1));
4436
4437 if (src != const0_rtx)
4438 {
4439 switch ((int) size)
4440 {
4441 case 2:
4442 emit_insn (gen_inswl_be (insh, gen_lowpart (HImode,src), addr));
4443 break;
4444 case 4:
4445 emit_insn (gen_insll_be (insh, gen_lowpart (SImode,src), addr));
4446 break;
4447 case 8:
4448 emit_insn (gen_insql_be (insh, gen_lowpart (DImode,src), addr));
4449 break;
4450 }
4451 emit_insn (gen_insxh (insl, gen_lowpart (DImode, src),
4452 GEN_INT (size*8), addr));
4453 }
4454
4455 switch ((int) size)
4456 {
4457 case 2:
4458 emit_insn (gen_mskxl_be (dsth, dsth, GEN_INT (0xffff), addr));
4459 break;
4460 case 4:
4461 {
4462 rtx msk = immed_double_const (0xffffffff, 0, DImode);
4463 emit_insn (gen_mskxl_be (dsth, dsth, msk, addr));
4464 break;
4465 }
4466 case 8:
4467 emit_insn (gen_mskxl_be (dsth, dsth, constm1_rtx, addr));
4468 break;
4469 }
4470
4471 emit_insn (gen_mskxh (dstl, dstl, GEN_INT (size*8), addr));
4472 }
4473 else
4474 {
4475 addr = copy_addr_to_reg (plus_constant (dsta, ofs));
4476
4477 if (src != const0_rtx)
4478 {
4479 emit_insn (gen_insxh (insh, gen_lowpart (DImode, src),
4480 GEN_INT (size*8), addr));
4481
4482 switch ((int) size)
4483 {
4484 case 2:
4485 emit_insn (gen_inswl_le (insl, gen_lowpart (HImode, src), addr));
4486 break;
4487 case 4:
4488 emit_insn (gen_insll_le (insl, gen_lowpart (SImode, src), addr));
4489 break;
4490 case 8:
4491 emit_insn (gen_insql_le (insl, src, addr));
4492 break;
4493 }
4494 }
4495
4496 emit_insn (gen_mskxh (dsth, dsth, GEN_INT (size*8), addr));
4497
4498 switch ((int) size)
4499 {
4500 case 2:
4501 emit_insn (gen_mskxl_le (dstl, dstl, GEN_INT (0xffff), addr));
4502 break;
4503 case 4:
4504 {
4505 rtx msk = immed_double_const (0xffffffff, 0, DImode);
4506 emit_insn (gen_mskxl_le (dstl, dstl, msk, addr));
4507 break;
4508 }
4509 case 8:
4510 emit_insn (gen_mskxl_le (dstl, dstl, constm1_rtx, addr));
4511 break;
4512 }
4513 }
4514
4515 if (src != const0_rtx)
4516 {
4517 dsth = expand_binop (DImode, ior_optab, insh, dsth, dsth, 0, OPTAB_WIDEN);
4518 dstl = expand_binop (DImode, ior_optab, insl, dstl, dstl, 0, OPTAB_WIDEN);
4519 }
4520
4521 if (WORDS_BIG_ENDIAN)
4522 {
4523 emit_move_insn (meml, dstl);
4524 emit_move_insn (memh, dsth);
4525 }
4526 else
4527 {
4528 /* Must store high before low for degenerate case of aligned. */
4529 emit_move_insn (memh, dsth);
4530 emit_move_insn (meml, dstl);
4531 }
4532 }
4533
4534 /* The block move code tries to maximize speed by separating loads and
4535 stores at the expense of register pressure: we load all of the data
4536 before we store it back out. There are two secondary effects worth
4537 mentioning, that this speeds copying to/from aligned and unaligned
4538 buffers, and that it makes the code significantly easier to write. */
4539
4540 #define MAX_MOVE_WORDS 8
4541
4542 /* Load an integral number of consecutive unaligned quadwords. */
4543
4544 static void
4545 alpha_expand_unaligned_load_words (out_regs, smem, words, ofs)
4546 rtx *out_regs;
4547 rtx smem;
4548 HOST_WIDE_INT words, ofs;
4549 {
4550 rtx const im8 = GEN_INT (-8);
4551 rtx const i64 = GEN_INT (64);
4552 rtx ext_tmps[MAX_MOVE_WORDS], data_regs[MAX_MOVE_WORDS+1];
4553 rtx sreg, areg, tmp, smema;
4554 HOST_WIDE_INT i;
4555
4556 smema = XEXP (smem, 0);
4557 if (GET_CODE (smema) == LO_SUM)
4558 smema = force_reg (Pmode, smema);
4559
4560 /* Generate all the tmp registers we need. */
4561 for (i = 0; i < words; ++i)
4562 {
4563 data_regs[i] = out_regs[i];
4564 ext_tmps[i] = gen_reg_rtx (DImode);
4565 }
4566 data_regs[words] = gen_reg_rtx (DImode);
4567
4568 if (ofs != 0)
4569 smem = adjust_address (smem, GET_MODE (smem), ofs);
4570
4571 /* Load up all of the source data. */
4572 for (i = 0; i < words; ++i)
4573 {
4574 tmp = change_address (smem, DImode,
4575 gen_rtx_AND (DImode,
4576 plus_constant (smema, 8*i),
4577 im8));
4578 set_mem_alias_set (tmp, 0);
4579 emit_move_insn (data_regs[i], tmp);
4580 }
4581
4582 tmp = change_address (smem, DImode,
4583 gen_rtx_AND (DImode,
4584 plus_constant (smema, 8*words - 1),
4585 im8));
4586 set_mem_alias_set (tmp, 0);
4587 emit_move_insn (data_regs[words], tmp);
4588
4589 /* Extract the half-word fragments. Unfortunately DEC decided to make
4590 extxh with offset zero a noop instead of zeroing the register, so
4591 we must take care of that edge condition ourselves with cmov. */
4592
4593 sreg = copy_addr_to_reg (smema);
4594 areg = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL,
4595 1, OPTAB_WIDEN);
4596 if (WORDS_BIG_ENDIAN)
4597 emit_move_insn (sreg, plus_constant (sreg, 7));
4598 for (i = 0; i < words; ++i)
4599 {
4600 if (WORDS_BIG_ENDIAN)
4601 {
4602 emit_insn (gen_extqh_be (data_regs[i], data_regs[i], sreg));
4603 emit_insn (gen_extxl_be (ext_tmps[i], data_regs[i+1], i64, sreg));
4604 }
4605 else
4606 {
4607 emit_insn (gen_extxl_le (data_regs[i], data_regs[i], i64, sreg));
4608 emit_insn (gen_extqh_le (ext_tmps[i], data_regs[i+1], sreg));
4609 }
4610 emit_insn (gen_rtx_SET (VOIDmode, ext_tmps[i],
4611 gen_rtx_IF_THEN_ELSE (DImode,
4612 gen_rtx_EQ (DImode, areg,
4613 const0_rtx),
4614 const0_rtx, ext_tmps[i])));
4615 }
4616
4617 /* Merge the half-words into whole words. */
4618 for (i = 0; i < words; ++i)
4619 {
4620 out_regs[i] = expand_binop (DImode, ior_optab, data_regs[i],
4621 ext_tmps[i], data_regs[i], 1, OPTAB_WIDEN);
4622 }
4623 }
4624
4625 /* Store an integral number of consecutive unaligned quadwords. DATA_REGS
4626 may be NULL to store zeros. */
4627
4628 static void
4629 alpha_expand_unaligned_store_words (data_regs, dmem, words, ofs)
4630 rtx *data_regs;
4631 rtx dmem;
4632 HOST_WIDE_INT words, ofs;
4633 {
4634 rtx const im8 = GEN_INT (-8);
4635 rtx const i64 = GEN_INT (64);
4636 rtx ins_tmps[MAX_MOVE_WORDS];
4637 rtx st_tmp_1, st_tmp_2, dreg;
4638 rtx st_addr_1, st_addr_2, dmema;
4639 HOST_WIDE_INT i;
4640
4641 dmema = XEXP (dmem, 0);
4642 if (GET_CODE (dmema) == LO_SUM)
4643 dmema = force_reg (Pmode, dmema);
4644
4645 /* Generate all the tmp registers we need. */
4646 if (data_regs != NULL)
4647 for (i = 0; i < words; ++i)
4648 ins_tmps[i] = gen_reg_rtx(DImode);
4649 st_tmp_1 = gen_reg_rtx(DImode);
4650 st_tmp_2 = gen_reg_rtx(DImode);
4651
4652 if (ofs != 0)
4653 dmem = adjust_address (dmem, GET_MODE (dmem), ofs);
4654
4655 st_addr_2 = change_address (dmem, DImode,
4656 gen_rtx_AND (DImode,
4657 plus_constant (dmema, words*8 - 1),
4658 im8));
4659 set_mem_alias_set (st_addr_2, 0);
4660
4661 st_addr_1 = change_address (dmem, DImode,
4662 gen_rtx_AND (DImode, dmema, im8));
4663 set_mem_alias_set (st_addr_1, 0);
4664
4665 /* Load up the destination end bits. */
4666 emit_move_insn (st_tmp_2, st_addr_2);
4667 emit_move_insn (st_tmp_1, st_addr_1);
4668
4669 /* Shift the input data into place. */
4670 dreg = copy_addr_to_reg (dmema);
4671 if (WORDS_BIG_ENDIAN)
4672 emit_move_insn (dreg, plus_constant (dreg, 7));
4673 if (data_regs != NULL)
4674 {
4675 for (i = words-1; i >= 0; --i)
4676 {
4677 if (WORDS_BIG_ENDIAN)
4678 {
4679 emit_insn (gen_insql_be (ins_tmps[i], data_regs[i], dreg));
4680 emit_insn (gen_insxh (data_regs[i], data_regs[i], i64, dreg));
4681 }
4682 else
4683 {
4684 emit_insn (gen_insxh (ins_tmps[i], data_regs[i], i64, dreg));
4685 emit_insn (gen_insql_le (data_regs[i], data_regs[i], dreg));
4686 }
4687 }
4688 for (i = words-1; i > 0; --i)
4689 {
4690 ins_tmps[i-1] = expand_binop (DImode, ior_optab, data_regs[i],
4691 ins_tmps[i-1], ins_tmps[i-1], 1,
4692 OPTAB_WIDEN);
4693 }
4694 }
4695
4696 /* Split and merge the ends with the destination data. */
4697 if (WORDS_BIG_ENDIAN)
4698 {
4699 emit_insn (gen_mskxl_be (st_tmp_2, st_tmp_2, constm1_rtx, dreg));
4700 emit_insn (gen_mskxh (st_tmp_1, st_tmp_1, i64, dreg));
4701 }
4702 else
4703 {
4704 emit_insn (gen_mskxh (st_tmp_2, st_tmp_2, i64, dreg));
4705 emit_insn (gen_mskxl_le (st_tmp_1, st_tmp_1, constm1_rtx, dreg));
4706 }
4707
4708 if (data_regs != NULL)
4709 {
4710 st_tmp_2 = expand_binop (DImode, ior_optab, st_tmp_2, ins_tmps[words-1],
4711 st_tmp_2, 1, OPTAB_WIDEN);
4712 st_tmp_1 = expand_binop (DImode, ior_optab, st_tmp_1, data_regs[0],
4713 st_tmp_1, 1, OPTAB_WIDEN);
4714 }
4715
4716 /* Store it all. */
4717 if (WORDS_BIG_ENDIAN)
4718 emit_move_insn (st_addr_1, st_tmp_1);
4719 else
4720 emit_move_insn (st_addr_2, st_tmp_2);
4721 for (i = words-1; i > 0; --i)
4722 {
4723 rtx tmp = change_address (dmem, DImode,
4724 gen_rtx_AND (DImode,
4725 plus_constant(dmema,
4726 WORDS_BIG_ENDIAN ? i*8-1 : i*8),
4727 im8));
4728 set_mem_alias_set (tmp, 0);
4729 emit_move_insn (tmp, data_regs ? ins_tmps[i-1] : const0_rtx);
4730 }
4731 if (WORDS_BIG_ENDIAN)
4732 emit_move_insn (st_addr_2, st_tmp_2);
4733 else
4734 emit_move_insn (st_addr_1, st_tmp_1);
4735 }
4736
4737
4738 /* Expand string/block move operations.
4739
4740 operands[0] is the pointer to the destination.
4741 operands[1] is the pointer to the source.
4742 operands[2] is the number of bytes to move.
4743 operands[3] is the alignment. */
4744
4745 int
4746 alpha_expand_block_move (operands)
4747 rtx operands[];
4748 {
4749 rtx bytes_rtx = operands[2];
4750 rtx align_rtx = operands[3];
4751 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
4752 HOST_WIDE_INT bytes = orig_bytes;
4753 HOST_WIDE_INT src_align = INTVAL (align_rtx) * BITS_PER_UNIT;
4754 HOST_WIDE_INT dst_align = src_align;
4755 rtx orig_src = operands[1];
4756 rtx orig_dst = operands[0];
4757 rtx data_regs[2 * MAX_MOVE_WORDS + 16];
4758 rtx tmp;
4759 unsigned int i, words, ofs, nregs = 0;
4760
4761 if (orig_bytes <= 0)
4762 return 1;
4763 else if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
4764 return 0;
4765
4766 /* Look for additional alignment information from recorded register info. */
4767
4768 tmp = XEXP (orig_src, 0);
4769 if (GET_CODE (tmp) == REG)
4770 src_align = MAX (src_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
4771 else if (GET_CODE (tmp) == PLUS
4772 && GET_CODE (XEXP (tmp, 0)) == REG
4773 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
4774 {
4775 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
4776 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
4777
4778 if (a > src_align)
4779 {
4780 if (a >= 64 && c % 8 == 0)
4781 src_align = 64;
4782 else if (a >= 32 && c % 4 == 0)
4783 src_align = 32;
4784 else if (a >= 16 && c % 2 == 0)
4785 src_align = 16;
4786 }
4787 }
4788
4789 tmp = XEXP (orig_dst, 0);
4790 if (GET_CODE (tmp) == REG)
4791 dst_align = MAX (dst_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
4792 else if (GET_CODE (tmp) == PLUS
4793 && GET_CODE (XEXP (tmp, 0)) == REG
4794 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
4795 {
4796 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
4797 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
4798
4799 if (a > dst_align)
4800 {
4801 if (a >= 64 && c % 8 == 0)
4802 dst_align = 64;
4803 else if (a >= 32 && c % 4 == 0)
4804 dst_align = 32;
4805 else if (a >= 16 && c % 2 == 0)
4806 dst_align = 16;
4807 }
4808 }
4809
4810 /* Load the entire block into registers. */
4811 if (GET_CODE (XEXP (orig_src, 0)) == ADDRESSOF)
4812 {
4813 enum machine_mode mode;
4814
4815 tmp = XEXP (XEXP (orig_src, 0), 0);
4816
4817 /* Don't use the existing register if we're reading more than
4818 is held in the register. Nor if there is not a mode that
4819 handles the exact size. */
4820 mode = mode_for_size (bytes * BITS_PER_UNIT, MODE_INT, 1);
4821 if (mode != BLKmode
4822 && GET_MODE_SIZE (GET_MODE (tmp)) >= bytes)
4823 {
4824 if (mode == TImode)
4825 {
4826 data_regs[nregs] = gen_lowpart (DImode, tmp);
4827 data_regs[nregs + 1] = gen_highpart (DImode, tmp);
4828 nregs += 2;
4829 }
4830 else
4831 data_regs[nregs++] = gen_lowpart (mode, tmp);
4832
4833 goto src_done;
4834 }
4835
4836 /* No appropriate mode; fall back on memory. */
4837 orig_src = replace_equiv_address (orig_src,
4838 copy_addr_to_reg (XEXP (orig_src, 0)));
4839 src_align = GET_MODE_BITSIZE (GET_MODE (tmp));
4840 }
4841
4842 ofs = 0;
4843 if (src_align >= 64 && bytes >= 8)
4844 {
4845 words = bytes / 8;
4846
4847 for (i = 0; i < words; ++i)
4848 data_regs[nregs + i] = gen_reg_rtx (DImode);
4849
4850 for (i = 0; i < words; ++i)
4851 emit_move_insn (data_regs[nregs + i],
4852 adjust_address (orig_src, DImode, ofs + i * 8));
4853
4854 nregs += words;
4855 bytes -= words * 8;
4856 ofs += words * 8;
4857 }
4858
4859 if (src_align >= 32 && bytes >= 4)
4860 {
4861 words = bytes / 4;
4862
4863 for (i = 0; i < words; ++i)
4864 data_regs[nregs + i] = gen_reg_rtx (SImode);
4865
4866 for (i = 0; i < words; ++i)
4867 emit_move_insn (data_regs[nregs + i],
4868 adjust_address (orig_src, SImode, ofs + i * 4));
4869
4870 nregs += words;
4871 bytes -= words * 4;
4872 ofs += words * 4;
4873 }
4874
4875 if (bytes >= 8)
4876 {
4877 words = bytes / 8;
4878
4879 for (i = 0; i < words+1; ++i)
4880 data_regs[nregs + i] = gen_reg_rtx (DImode);
4881
4882 alpha_expand_unaligned_load_words (data_regs + nregs, orig_src,
4883 words, ofs);
4884
4885 nregs += words;
4886 bytes -= words * 8;
4887 ofs += words * 8;
4888 }
4889
4890 if (! TARGET_BWX && bytes >= 4)
4891 {
4892 data_regs[nregs++] = tmp = gen_reg_rtx (SImode);
4893 alpha_expand_unaligned_load (tmp, orig_src, 4, ofs, 0);
4894 bytes -= 4;
4895 ofs += 4;
4896 }
4897
4898 if (bytes >= 2)
4899 {
4900 if (src_align >= 16)
4901 {
4902 do {
4903 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
4904 emit_move_insn (tmp, adjust_address (orig_src, HImode, ofs));
4905 bytes -= 2;
4906 ofs += 2;
4907 } while (bytes >= 2);
4908 }
4909 else if (! TARGET_BWX)
4910 {
4911 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
4912 alpha_expand_unaligned_load (tmp, orig_src, 2, ofs, 0);
4913 bytes -= 2;
4914 ofs += 2;
4915 }
4916 }
4917
4918 while (bytes > 0)
4919 {
4920 data_regs[nregs++] = tmp = gen_reg_rtx (QImode);
4921 emit_move_insn (tmp, adjust_address (orig_src, QImode, ofs));
4922 bytes -= 1;
4923 ofs += 1;
4924 }
4925
4926 src_done:
4927
4928 if (nregs > ARRAY_SIZE (data_regs))
4929 abort ();
4930
4931 /* Now save it back out again. */
4932
4933 i = 0, ofs = 0;
4934
4935 if (GET_CODE (XEXP (orig_dst, 0)) == ADDRESSOF)
4936 {
4937 enum machine_mode mode;
4938 tmp = XEXP (XEXP (orig_dst, 0), 0);
4939
4940 mode = mode_for_size (orig_bytes * BITS_PER_UNIT, MODE_INT, 1);
4941 if (GET_MODE (tmp) == mode)
4942 {
4943 if (nregs == 1)
4944 {
4945 emit_move_insn (tmp, data_regs[0]);
4946 i = 1;
4947 goto dst_done;
4948 }
4949
4950 else if (nregs == 2 && mode == TImode)
4951 {
4952 /* Undo the subregging done above when copying between
4953 two TImode registers. */
4954 if (GET_CODE (data_regs[0]) == SUBREG
4955 && GET_MODE (SUBREG_REG (data_regs[0])) == TImode)
4956 emit_move_insn (tmp, SUBREG_REG (data_regs[0]));
4957 else
4958 {
4959 rtx seq;
4960
4961 start_sequence ();
4962 emit_move_insn (gen_lowpart (DImode, tmp), data_regs[0]);
4963 emit_move_insn (gen_highpart (DImode, tmp), data_regs[1]);
4964 seq = get_insns ();
4965 end_sequence ();
4966
4967 emit_no_conflict_block (seq, tmp, data_regs[0],
4968 data_regs[1], NULL_RTX);
4969 }
4970
4971 i = 2;
4972 goto dst_done;
4973 }
4974 }
4975
4976 /* ??? If nregs > 1, consider reconstructing the word in regs. */
4977 /* ??? Optimize mode < dst_mode with strict_low_part. */
4978
4979 /* No appropriate mode; fall back on memory. We can speed things
4980 up by recognizing extra alignment information. */
4981 orig_dst = replace_equiv_address (orig_dst,
4982 copy_addr_to_reg (XEXP (orig_dst, 0)));
4983 dst_align = GET_MODE_BITSIZE (GET_MODE (tmp));
4984 }
4985
4986 /* Write out the data in whatever chunks reading the source allowed. */
4987 if (dst_align >= 64)
4988 {
4989 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
4990 {
4991 emit_move_insn (adjust_address (orig_dst, DImode, ofs),
4992 data_regs[i]);
4993 ofs += 8;
4994 i++;
4995 }
4996 }
4997
4998 if (dst_align >= 32)
4999 {
5000 /* If the source has remaining DImode regs, write them out in
5001 two pieces. */
5002 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
5003 {
5004 tmp = expand_binop (DImode, lshr_optab, data_regs[i], GEN_INT (32),
5005 NULL_RTX, 1, OPTAB_WIDEN);
5006
5007 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
5008 gen_lowpart (SImode, data_regs[i]));
5009 emit_move_insn (adjust_address (orig_dst, SImode, ofs + 4),
5010 gen_lowpart (SImode, tmp));
5011 ofs += 8;
5012 i++;
5013 }
5014
5015 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
5016 {
5017 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
5018 data_regs[i]);
5019 ofs += 4;
5020 i++;
5021 }
5022 }
5023
5024 if (i < nregs && GET_MODE (data_regs[i]) == DImode)
5025 {
5026 /* Write out a remaining block of words using unaligned methods. */
5027
5028 for (words = 1; i + words < nregs; words++)
5029 if (GET_MODE (data_regs[i + words]) != DImode)
5030 break;
5031
5032 if (words == 1)
5033 alpha_expand_unaligned_store (orig_dst, data_regs[i], 8, ofs);
5034 else
5035 alpha_expand_unaligned_store_words (data_regs + i, orig_dst,
5036 words, ofs);
5037
5038 i += words;
5039 ofs += words * 8;
5040 }
5041
5042 /* Due to the above, this won't be aligned. */
5043 /* ??? If we have more than one of these, consider constructing full
5044 words in registers and using alpha_expand_unaligned_store_words. */
5045 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
5046 {
5047 alpha_expand_unaligned_store (orig_dst, data_regs[i], 4, ofs);
5048 ofs += 4;
5049 i++;
5050 }
5051
5052 if (dst_align >= 16)
5053 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
5054 {
5055 emit_move_insn (adjust_address (orig_dst, HImode, ofs), data_regs[i]);
5056 i++;
5057 ofs += 2;
5058 }
5059 else
5060 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
5061 {
5062 alpha_expand_unaligned_store (orig_dst, data_regs[i], 2, ofs);
5063 i++;
5064 ofs += 2;
5065 }
5066
5067 while (i < nregs && GET_MODE (data_regs[i]) == QImode)
5068 {
5069 emit_move_insn (adjust_address (orig_dst, QImode, ofs), data_regs[i]);
5070 i++;
5071 ofs += 1;
5072 }
5073
5074 dst_done:
5075
5076 if (i != nregs)
5077 abort ();
5078
5079 return 1;
5080 }
5081
5082 int
5083 alpha_expand_block_clear (operands)
5084 rtx operands[];
5085 {
5086 rtx bytes_rtx = operands[1];
5087 rtx align_rtx = operands[2];
5088 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
5089 HOST_WIDE_INT bytes = orig_bytes;
5090 HOST_WIDE_INT align = INTVAL (align_rtx) * BITS_PER_UNIT;
5091 HOST_WIDE_INT alignofs = 0;
5092 rtx orig_dst = operands[0];
5093 rtx tmp;
5094 int i, words, ofs = 0;
5095
5096 if (orig_bytes <= 0)
5097 return 1;
5098 if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
5099 return 0;
5100
5101 /* Look for stricter alignment. */
5102 tmp = XEXP (orig_dst, 0);
5103 if (GET_CODE (tmp) == REG)
5104 align = MAX (align, REGNO_POINTER_ALIGN (REGNO (tmp)));
5105 else if (GET_CODE (tmp) == PLUS
5106 && GET_CODE (XEXP (tmp, 0)) == REG
5107 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
5108 {
5109 HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
5110 int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
5111
5112 if (a > align)
5113 {
5114 if (a >= 64)
5115 align = a, alignofs = 8 - c % 8;
5116 else if (a >= 32)
5117 align = a, alignofs = 4 - c % 4;
5118 else if (a >= 16)
5119 align = a, alignofs = 2 - c % 2;
5120 }
5121 }
5122 else if (GET_CODE (tmp) == ADDRESSOF)
5123 {
5124 enum machine_mode mode;
5125
5126 mode = mode_for_size (bytes * BITS_PER_UNIT, MODE_INT, 1);
5127 if (GET_MODE (XEXP (tmp, 0)) == mode)
5128 {
5129 emit_move_insn (XEXP (tmp, 0), const0_rtx);
5130 return 1;
5131 }
5132
5133 /* No appropriate mode; fall back on memory. */
5134 orig_dst = replace_equiv_address (orig_dst, copy_addr_to_reg (tmp));
5135 align = GET_MODE_BITSIZE (GET_MODE (XEXP (tmp, 0)));
5136 }
5137
5138 /* Handle an unaligned prefix first. */
5139
5140 if (alignofs > 0)
5141 {
5142 #if HOST_BITS_PER_WIDE_INT >= 64
5143 /* Given that alignofs is bounded by align, the only time BWX could
5144 generate three stores is for a 7 byte fill. Prefer two individual
5145 stores over a load/mask/store sequence. */
5146 if ((!TARGET_BWX || alignofs == 7)
5147 && align >= 32
5148 && !(alignofs == 4 && bytes >= 4))
5149 {
5150 enum machine_mode mode = (align >= 64 ? DImode : SImode);
5151 int inv_alignofs = (align >= 64 ? 8 : 4) - alignofs;
5152 rtx mem, tmp;
5153 HOST_WIDE_INT mask;
5154
5155 mem = adjust_address (orig_dst, mode, ofs - inv_alignofs);
5156 set_mem_alias_set (mem, 0);
5157
5158 mask = ~(~(HOST_WIDE_INT)0 << (inv_alignofs * 8));
5159 if (bytes < alignofs)
5160 {
5161 mask |= ~(HOST_WIDE_INT)0 << ((inv_alignofs + bytes) * 8);
5162 ofs += bytes;
5163 bytes = 0;
5164 }
5165 else
5166 {
5167 bytes -= alignofs;
5168 ofs += alignofs;
5169 }
5170 alignofs = 0;
5171
5172 tmp = expand_binop (mode, and_optab, mem, GEN_INT (mask),
5173 NULL_RTX, 1, OPTAB_WIDEN);
5174
5175 emit_move_insn (mem, tmp);
5176 }
5177 #endif
5178
5179 if (TARGET_BWX && (alignofs & 1) && bytes >= 1)
5180 {
5181 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
5182 bytes -= 1;
5183 ofs += 1;
5184 alignofs -= 1;
5185 }
5186 if (TARGET_BWX && align >= 16 && (alignofs & 3) == 2 && bytes >= 2)
5187 {
5188 emit_move_insn (adjust_address (orig_dst, HImode, ofs), const0_rtx);
5189 bytes -= 2;
5190 ofs += 2;
5191 alignofs -= 2;
5192 }
5193 if (alignofs == 4 && bytes >= 4)
5194 {
5195 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
5196 bytes -= 4;
5197 ofs += 4;
5198 alignofs = 0;
5199 }
5200
5201 /* If we've not used the extra lead alignment information by now,
5202 we won't be able to. Downgrade align to match what's left over. */
5203 if (alignofs > 0)
5204 {
5205 alignofs = alignofs & -alignofs;
5206 align = MIN (align, alignofs * BITS_PER_UNIT);
5207 }
5208 }
5209
5210 /* Handle a block of contiguous long-words. */
5211
5212 if (align >= 64 && bytes >= 8)
5213 {
5214 words = bytes / 8;
5215
5216 for (i = 0; i < words; ++i)
5217 emit_move_insn (adjust_address (orig_dst, DImode, ofs + i * 8),
5218 const0_rtx);
5219
5220 bytes -= words * 8;
5221 ofs += words * 8;
5222 }
5223
5224 /* If the block is large and appropriately aligned, emit a single
5225 store followed by a sequence of stq_u insns. */
5226
5227 if (align >= 32 && bytes > 16)
5228 {
5229 rtx orig_dsta;
5230
5231 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
5232 bytes -= 4;
5233 ofs += 4;
5234
5235 orig_dsta = XEXP (orig_dst, 0);
5236 if (GET_CODE (orig_dsta) == LO_SUM)
5237 orig_dsta = force_reg (Pmode, orig_dsta);
5238
5239 words = bytes / 8;
5240 for (i = 0; i < words; ++i)
5241 {
5242 rtx mem
5243 = change_address (orig_dst, DImode,
5244 gen_rtx_AND (DImode,
5245 plus_constant (orig_dsta, ofs + i*8),
5246 GEN_INT (-8)));
5247 set_mem_alias_set (mem, 0);
5248 emit_move_insn (mem, const0_rtx);
5249 }
5250
5251 /* Depending on the alignment, the first stq_u may have overlapped
5252 with the initial stl, which means that the last stq_u didn't
5253 write as much as it would appear. Leave those questionable bytes
5254 unaccounted for. */
5255 bytes -= words * 8 - 4;
5256 ofs += words * 8 - 4;
5257 }
5258
5259 /* Handle a smaller block of aligned words. */
5260
5261 if ((align >= 64 && bytes == 4)
5262 || (align == 32 && bytes >= 4))
5263 {
5264 words = bytes / 4;
5265
5266 for (i = 0; i < words; ++i)
5267 emit_move_insn (adjust_address (orig_dst, SImode, ofs + i * 4),
5268 const0_rtx);
5269
5270 bytes -= words * 4;
5271 ofs += words * 4;
5272 }
5273
5274 /* An unaligned block uses stq_u stores for as many as possible. */
5275
5276 if (bytes >= 8)
5277 {
5278 words = bytes / 8;
5279
5280 alpha_expand_unaligned_store_words (NULL, orig_dst, words, ofs);
5281
5282 bytes -= words * 8;
5283 ofs += words * 8;
5284 }
5285
5286 /* Next clean up any trailing pieces. */
5287
5288 #if HOST_BITS_PER_WIDE_INT >= 64
5289 /* Count the number of bits in BYTES for which aligned stores could
5290 be emitted. */
5291 words = 0;
5292 for (i = (TARGET_BWX ? 1 : 4); i * BITS_PER_UNIT <= align ; i <<= 1)
5293 if (bytes & i)
5294 words += 1;
5295
5296 /* If we have appropriate alignment (and it wouldn't take too many
5297 instructions otherwise), mask out the bytes we need. */
5298 if (TARGET_BWX ? words > 2 : bytes > 0)
5299 {
5300 if (align >= 64)
5301 {
5302 rtx mem, tmp;
5303 HOST_WIDE_INT mask;
5304
5305 mem = adjust_address (orig_dst, DImode, ofs);
5306 set_mem_alias_set (mem, 0);
5307
5308 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
5309
5310 tmp = expand_binop (DImode, and_optab, mem, GEN_INT (mask),
5311 NULL_RTX, 1, OPTAB_WIDEN);
5312
5313 emit_move_insn (mem, tmp);
5314 return 1;
5315 }
5316 else if (align >= 32 && bytes < 4)
5317 {
5318 rtx mem, tmp;
5319 HOST_WIDE_INT mask;
5320
5321 mem = adjust_address (orig_dst, SImode, ofs);
5322 set_mem_alias_set (mem, 0);
5323
5324 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
5325
5326 tmp = expand_binop (SImode, and_optab, mem, GEN_INT (mask),
5327 NULL_RTX, 1, OPTAB_WIDEN);
5328
5329 emit_move_insn (mem, tmp);
5330 return 1;
5331 }
5332 }
5333 #endif
5334
5335 if (!TARGET_BWX && bytes >= 4)
5336 {
5337 alpha_expand_unaligned_store (orig_dst, const0_rtx, 4, ofs);
5338 bytes -= 4;
5339 ofs += 4;
5340 }
5341
5342 if (bytes >= 2)
5343 {
5344 if (align >= 16)
5345 {
5346 do {
5347 emit_move_insn (adjust_address (orig_dst, HImode, ofs),
5348 const0_rtx);
5349 bytes -= 2;
5350 ofs += 2;
5351 } while (bytes >= 2);
5352 }
5353 else if (! TARGET_BWX)
5354 {
5355 alpha_expand_unaligned_store (orig_dst, const0_rtx, 2, ofs);
5356 bytes -= 2;
5357 ofs += 2;
5358 }
5359 }
5360
5361 while (bytes > 0)
5362 {
5363 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
5364 bytes -= 1;
5365 ofs += 1;
5366 }
5367
5368 return 1;
5369 }
5370
5371 /* Returns a mask so that zap(x, value) == x & mask. */
5372
5373 rtx
5374 alpha_expand_zap_mask (value)
5375 HOST_WIDE_INT value;
5376 {
5377 rtx result;
5378 int i;
5379
5380 if (HOST_BITS_PER_WIDE_INT >= 64)
5381 {
5382 HOST_WIDE_INT mask = 0;
5383
5384 for (i = 7; i >= 0; --i)
5385 {
5386 mask <<= 8;
5387 if (!((value >> i) & 1))
5388 mask |= 0xff;
5389 }
5390
5391 result = gen_int_mode (mask, DImode);
5392 }
5393 else if (HOST_BITS_PER_WIDE_INT == 32)
5394 {
5395 HOST_WIDE_INT mask_lo = 0, mask_hi = 0;
5396
5397 for (i = 7; i >= 4; --i)
5398 {
5399 mask_hi <<= 8;
5400 if (!((value >> i) & 1))
5401 mask_hi |= 0xff;
5402 }
5403
5404 for (i = 3; i >= 0; --i)
5405 {
5406 mask_lo <<= 8;
5407 if (!((value >> i) & 1))
5408 mask_lo |= 0xff;
5409 }
5410
5411 result = immed_double_const (mask_lo, mask_hi, DImode);
5412 }
5413 else
5414 abort ();
5415
5416 return result;
5417 }
5418
5419 void
5420 alpha_expand_builtin_vector_binop (gen, mode, op0, op1, op2)
5421 rtx (*gen) PARAMS ((rtx, rtx, rtx));
5422 enum machine_mode mode;
5423 rtx op0, op1, op2;
5424 {
5425 op0 = gen_lowpart (mode, op0);
5426
5427 if (op1 == const0_rtx)
5428 op1 = CONST0_RTX (mode);
5429 else
5430 op1 = gen_lowpart (mode, op1);
5431
5432 if (op2 == const0_rtx)
5433 op2 = CONST0_RTX (mode);
5434 else
5435 op2 = gen_lowpart (mode, op2);
5436
5437 emit_insn ((*gen) (op0, op1, op2));
5438 }
5439 \f
5440 /* Adjust the cost of a scheduling dependency. Return the new cost of
5441 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
5442
5443 static int
5444 alpha_adjust_cost (insn, link, dep_insn, cost)
5445 rtx insn;
5446 rtx link;
5447 rtx dep_insn;
5448 int cost;
5449 {
5450 enum attr_type insn_type, dep_insn_type;
5451
5452 /* If the dependence is an anti-dependence, there is no cost. For an
5453 output dependence, there is sometimes a cost, but it doesn't seem
5454 worth handling those few cases. */
5455 if (REG_NOTE_KIND (link) != 0)
5456 return cost;
5457
5458 /* If we can't recognize the insns, we can't really do anything. */
5459 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
5460 return cost;
5461
5462 insn_type = get_attr_type (insn);
5463 dep_insn_type = get_attr_type (dep_insn);
5464
5465 /* Bring in the user-defined memory latency. */
5466 if (dep_insn_type == TYPE_ILD
5467 || dep_insn_type == TYPE_FLD
5468 || dep_insn_type == TYPE_LDSYM)
5469 cost += alpha_memory_latency-1;
5470
5471 /* Everything else handled in DFA bypasses now. */
5472
5473 return cost;
5474 }
5475
5476 /* The number of instructions that can be issued per cycle. */
5477
5478 static int
5479 alpha_issue_rate ()
5480 {
5481 return (alpha_cpu == PROCESSOR_EV4 ? 2 : 4);
5482 }
5483
5484 static int
5485 alpha_use_dfa_pipeline_interface ()
5486 {
5487 return true;
5488 }
5489
5490 /* How many alternative schedules to try. This should be as wide as the
5491 scheduling freedom in the DFA, but no wider. Making this value too
5492 large results extra work for the scheduler.
5493
5494 For EV4, loads can be issued to either IB0 or IB1, thus we have 2
5495 alternative schedules. For EV5, we can choose between E0/E1 and
5496 FA/FM. For EV6, an arithmatic insn can be issued to U0/U1/L0/L1. */
5497
5498 static int
5499 alpha_multipass_dfa_lookahead ()
5500 {
5501 return (alpha_cpu == PROCESSOR_EV6 ? 4 : 2);
5502 }
5503 \f
5504 /* Machine-specific function data. */
5505
5506 struct machine_function GTY(())
5507 {
5508 /* For unicosmk. */
5509 /* List of call information words for calls from this function. */
5510 struct rtx_def *first_ciw;
5511 struct rtx_def *last_ciw;
5512 int ciw_count;
5513
5514 /* List of deferred case vectors. */
5515 struct rtx_def *addr_list;
5516
5517 /* For OSF. */
5518 const char *some_ld_name;
5519 };
5520
5521 /* How to allocate a 'struct machine_function'. */
5522
5523 static struct machine_function *
5524 alpha_init_machine_status ()
5525 {
5526 return ((struct machine_function *)
5527 ggc_alloc_cleared (sizeof (struct machine_function)));
5528 }
5529
5530 /* Functions to save and restore alpha_return_addr_rtx. */
5531
5532 /* Start the ball rolling with RETURN_ADDR_RTX. */
5533
5534 rtx
5535 alpha_return_addr (count, frame)
5536 int count;
5537 rtx frame ATTRIBUTE_UNUSED;
5538 {
5539 if (count != 0)
5540 return const0_rtx;
5541
5542 return get_hard_reg_initial_val (Pmode, REG_RA);
5543 }
5544
5545 /* Return or create a pseudo containing the gp value for the current
5546 function. Needed only if TARGET_LD_BUGGY_LDGP. */
5547
5548 rtx
5549 alpha_gp_save_rtx ()
5550 {
5551 rtx r = get_hard_reg_initial_val (DImode, 29);
5552 if (GET_CODE (r) != MEM)
5553 r = gen_mem_addressof (r, NULL_TREE, /*rescan=*/true);
5554 return r;
5555 }
5556
5557 static int
5558 alpha_ra_ever_killed ()
5559 {
5560 rtx top;
5561
5562 if (!has_hard_reg_initial_val (Pmode, REG_RA))
5563 return regs_ever_live[REG_RA];
5564
5565 push_topmost_sequence ();
5566 top = get_insns ();
5567 pop_topmost_sequence ();
5568
5569 return reg_set_between_p (gen_rtx_REG (Pmode, REG_RA), top, NULL_RTX);
5570 }
5571
5572 \f
5573 /* Return the trap mode suffix applicable to the current
5574 instruction, or NULL. */
5575
5576 static const char *
5577 get_trap_mode_suffix ()
5578 {
5579 enum attr_trap_suffix s = get_attr_trap_suffix (current_output_insn);
5580
5581 switch (s)
5582 {
5583 case TRAP_SUFFIX_NONE:
5584 return NULL;
5585
5586 case TRAP_SUFFIX_SU:
5587 if (alpha_fptm >= ALPHA_FPTM_SU)
5588 return "su";
5589 return NULL;
5590
5591 case TRAP_SUFFIX_SUI:
5592 if (alpha_fptm >= ALPHA_FPTM_SUI)
5593 return "sui";
5594 return NULL;
5595
5596 case TRAP_SUFFIX_V_SV:
5597 switch (alpha_fptm)
5598 {
5599 case ALPHA_FPTM_N:
5600 return NULL;
5601 case ALPHA_FPTM_U:
5602 return "v";
5603 case ALPHA_FPTM_SU:
5604 case ALPHA_FPTM_SUI:
5605 return "sv";
5606 }
5607 break;
5608
5609 case TRAP_SUFFIX_V_SV_SVI:
5610 switch (alpha_fptm)
5611 {
5612 case ALPHA_FPTM_N:
5613 return NULL;
5614 case ALPHA_FPTM_U:
5615 return "v";
5616 case ALPHA_FPTM_SU:
5617 return "sv";
5618 case ALPHA_FPTM_SUI:
5619 return "svi";
5620 }
5621 break;
5622
5623 case TRAP_SUFFIX_U_SU_SUI:
5624 switch (alpha_fptm)
5625 {
5626 case ALPHA_FPTM_N:
5627 return NULL;
5628 case ALPHA_FPTM_U:
5629 return "u";
5630 case ALPHA_FPTM_SU:
5631 return "su";
5632 case ALPHA_FPTM_SUI:
5633 return "sui";
5634 }
5635 break;
5636 }
5637 abort ();
5638 }
5639
5640 /* Return the rounding mode suffix applicable to the current
5641 instruction, or NULL. */
5642
5643 static const char *
5644 get_round_mode_suffix ()
5645 {
5646 enum attr_round_suffix s = get_attr_round_suffix (current_output_insn);
5647
5648 switch (s)
5649 {
5650 case ROUND_SUFFIX_NONE:
5651 return NULL;
5652 case ROUND_SUFFIX_NORMAL:
5653 switch (alpha_fprm)
5654 {
5655 case ALPHA_FPRM_NORM:
5656 return NULL;
5657 case ALPHA_FPRM_MINF:
5658 return "m";
5659 case ALPHA_FPRM_CHOP:
5660 return "c";
5661 case ALPHA_FPRM_DYN:
5662 return "d";
5663 }
5664 break;
5665
5666 case ROUND_SUFFIX_C:
5667 return "c";
5668 }
5669 abort ();
5670 }
5671
5672 /* Locate some local-dynamic symbol still in use by this function
5673 so that we can print its name in some movdi_er_tlsldm pattern. */
5674
5675 static const char *
5676 get_some_local_dynamic_name ()
5677 {
5678 rtx insn;
5679
5680 if (cfun->machine->some_ld_name)
5681 return cfun->machine->some_ld_name;
5682
5683 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
5684 if (INSN_P (insn)
5685 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
5686 return cfun->machine->some_ld_name;
5687
5688 abort ();
5689 }
5690
5691 static int
5692 get_some_local_dynamic_name_1 (px, data)
5693 rtx *px;
5694 void *data ATTRIBUTE_UNUSED;
5695 {
5696 rtx x = *px;
5697
5698 if (GET_CODE (x) == SYMBOL_REF
5699 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
5700 {
5701 cfun->machine->some_ld_name = XSTR (x, 0);
5702 return 1;
5703 }
5704
5705 return 0;
5706 }
5707
5708 /* Print an operand. Recognize special options, documented below. */
5709
5710 void
5711 print_operand (file, x, code)
5712 FILE *file;
5713 rtx x;
5714 int code;
5715 {
5716 int i;
5717
5718 switch (code)
5719 {
5720 case '~':
5721 /* Print the assembler name of the current function. */
5722 assemble_name (file, alpha_fnname);
5723 break;
5724
5725 case '&':
5726 assemble_name (file, get_some_local_dynamic_name ());
5727 break;
5728
5729 case '/':
5730 {
5731 const char *trap = get_trap_mode_suffix ();
5732 const char *round = get_round_mode_suffix ();
5733
5734 if (trap || round)
5735 fprintf (file, (TARGET_AS_SLASH_BEFORE_SUFFIX ? "/%s%s" : "%s%s"),
5736 (trap ? trap : ""), (round ? round : ""));
5737 break;
5738 }
5739
5740 case ',':
5741 /* Generates single precision instruction suffix. */
5742 fputc ((TARGET_FLOAT_VAX ? 'f' : 's'), file);
5743 break;
5744
5745 case '-':
5746 /* Generates double precision instruction suffix. */
5747 fputc ((TARGET_FLOAT_VAX ? 'g' : 't'), file);
5748 break;
5749
5750 case '+':
5751 /* Generates a nop after a noreturn call at the very end of the
5752 function. */
5753 if (next_real_insn (current_output_insn) == 0)
5754 fprintf (file, "\n\tnop");
5755 break;
5756
5757 case '#':
5758 if (alpha_this_literal_sequence_number == 0)
5759 alpha_this_literal_sequence_number = alpha_next_sequence_number++;
5760 fprintf (file, "%d", alpha_this_literal_sequence_number);
5761 break;
5762
5763 case '*':
5764 if (alpha_this_gpdisp_sequence_number == 0)
5765 alpha_this_gpdisp_sequence_number = alpha_next_sequence_number++;
5766 fprintf (file, "%d", alpha_this_gpdisp_sequence_number);
5767 break;
5768
5769 case 'H':
5770 if (GET_CODE (x) == HIGH)
5771 output_addr_const (file, XEXP (x, 0));
5772 else
5773 output_operand_lossage ("invalid %%H value");
5774 break;
5775
5776 case 'J':
5777 {
5778 const char *lituse;
5779
5780 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD_CALL)
5781 {
5782 x = XVECEXP (x, 0, 0);
5783 lituse = "lituse_tlsgd";
5784 }
5785 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM_CALL)
5786 {
5787 x = XVECEXP (x, 0, 0);
5788 lituse = "lituse_tlsldm";
5789 }
5790 else if (GET_CODE (x) == CONST_INT)
5791 lituse = "lituse_jsr";
5792 else
5793 {
5794 output_operand_lossage ("invalid %%J value");
5795 break;
5796 }
5797
5798 if (x != const0_rtx)
5799 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5800 }
5801 break;
5802
5803 case 'r':
5804 /* If this operand is the constant zero, write it as "$31". */
5805 if (GET_CODE (x) == REG)
5806 fprintf (file, "%s", reg_names[REGNO (x)]);
5807 else if (x == CONST0_RTX (GET_MODE (x)))
5808 fprintf (file, "$31");
5809 else
5810 output_operand_lossage ("invalid %%r value");
5811 break;
5812
5813 case 'R':
5814 /* Similar, but for floating-point. */
5815 if (GET_CODE (x) == REG)
5816 fprintf (file, "%s", reg_names[REGNO (x)]);
5817 else if (x == CONST0_RTX (GET_MODE (x)))
5818 fprintf (file, "$f31");
5819 else
5820 output_operand_lossage ("invalid %%R value");
5821 break;
5822
5823 case 'N':
5824 /* Write the 1's complement of a constant. */
5825 if (GET_CODE (x) != CONST_INT)
5826 output_operand_lossage ("invalid %%N value");
5827
5828 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
5829 break;
5830
5831 case 'P':
5832 /* Write 1 << C, for a constant C. */
5833 if (GET_CODE (x) != CONST_INT)
5834 output_operand_lossage ("invalid %%P value");
5835
5836 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT) 1 << INTVAL (x));
5837 break;
5838
5839 case 'h':
5840 /* Write the high-order 16 bits of a constant, sign-extended. */
5841 if (GET_CODE (x) != CONST_INT)
5842 output_operand_lossage ("invalid %%h value");
5843
5844 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) >> 16);
5845 break;
5846
5847 case 'L':
5848 /* Write the low-order 16 bits of a constant, sign-extended. */
5849 if (GET_CODE (x) != CONST_INT)
5850 output_operand_lossage ("invalid %%L value");
5851
5852 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5853 (INTVAL (x) & 0xffff) - 2 * (INTVAL (x) & 0x8000));
5854 break;
5855
5856 case 'm':
5857 /* Write mask for ZAP insn. */
5858 if (GET_CODE (x) == CONST_DOUBLE)
5859 {
5860 HOST_WIDE_INT mask = 0;
5861 HOST_WIDE_INT value;
5862
5863 value = CONST_DOUBLE_LOW (x);
5864 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5865 i++, value >>= 8)
5866 if (value & 0xff)
5867 mask |= (1 << i);
5868
5869 value = CONST_DOUBLE_HIGH (x);
5870 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5871 i++, value >>= 8)
5872 if (value & 0xff)
5873 mask |= (1 << (i + sizeof (int)));
5874
5875 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask & 0xff);
5876 }
5877
5878 else if (GET_CODE (x) == CONST_INT)
5879 {
5880 HOST_WIDE_INT mask = 0, value = INTVAL (x);
5881
5882 for (i = 0; i < 8; i++, value >>= 8)
5883 if (value & 0xff)
5884 mask |= (1 << i);
5885
5886 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask);
5887 }
5888 else
5889 output_operand_lossage ("invalid %%m value");
5890 break;
5891
5892 case 'M':
5893 /* 'b', 'w', 'l', or 'q' as the value of the constant. */
5894 if (GET_CODE (x) != CONST_INT
5895 || (INTVAL (x) != 8 && INTVAL (x) != 16
5896 && INTVAL (x) != 32 && INTVAL (x) != 64))
5897 output_operand_lossage ("invalid %%M value");
5898
5899 fprintf (file, "%s",
5900 (INTVAL (x) == 8 ? "b"
5901 : INTVAL (x) == 16 ? "w"
5902 : INTVAL (x) == 32 ? "l"
5903 : "q"));
5904 break;
5905
5906 case 'U':
5907 /* Similar, except do it from the mask. */
5908 if (GET_CODE (x) == CONST_INT)
5909 {
5910 HOST_WIDE_INT value = INTVAL (x);
5911
5912 if (value == 0xff)
5913 {
5914 fputc ('b', file);
5915 break;
5916 }
5917 if (value == 0xffff)
5918 {
5919 fputc ('w', file);
5920 break;
5921 }
5922 if (value == 0xffffffff)
5923 {
5924 fputc ('l', file);
5925 break;
5926 }
5927 if (value == -1)
5928 {
5929 fputc ('q', file);
5930 break;
5931 }
5932 }
5933 else if (HOST_BITS_PER_WIDE_INT == 32
5934 && GET_CODE (x) == CONST_DOUBLE
5935 && CONST_DOUBLE_LOW (x) == 0xffffffff
5936 && CONST_DOUBLE_HIGH (x) == 0)
5937 {
5938 fputc ('l', file);
5939 break;
5940 }
5941 output_operand_lossage ("invalid %%U value");
5942 break;
5943
5944 case 's':
5945 /* Write the constant value divided by 8 for little-endian mode or
5946 (56 - value) / 8 for big-endian mode. */
5947
5948 if (GET_CODE (x) != CONST_INT
5949 || (unsigned HOST_WIDE_INT) INTVAL (x) >= (WORDS_BIG_ENDIAN
5950 ? 56
5951 : 64)
5952 || (INTVAL (x) & 7) != 0)
5953 output_operand_lossage ("invalid %%s value");
5954
5955 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5956 WORDS_BIG_ENDIAN
5957 ? (56 - INTVAL (x)) / 8
5958 : INTVAL (x) / 8);
5959 break;
5960
5961 case 'S':
5962 /* Same, except compute (64 - c) / 8 */
5963
5964 if (GET_CODE (x) != CONST_INT
5965 && (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
5966 && (INTVAL (x) & 7) != 8)
5967 output_operand_lossage ("invalid %%s value");
5968
5969 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (64 - INTVAL (x)) / 8);
5970 break;
5971
5972 case 't':
5973 {
5974 /* On Unicos/Mk systems: use a DEX expression if the symbol
5975 clashes with a register name. */
5976 int dex = unicosmk_need_dex (x);
5977 if (dex)
5978 fprintf (file, "DEX(%d)", dex);
5979 else
5980 output_addr_const (file, x);
5981 }
5982 break;
5983
5984 case 'C': case 'D': case 'c': case 'd':
5985 /* Write out comparison name. */
5986 {
5987 enum rtx_code c = GET_CODE (x);
5988
5989 if (GET_RTX_CLASS (c) != '<')
5990 output_operand_lossage ("invalid %%C value");
5991
5992 else if (code == 'D')
5993 c = reverse_condition (c);
5994 else if (code == 'c')
5995 c = swap_condition (c);
5996 else if (code == 'd')
5997 c = swap_condition (reverse_condition (c));
5998
5999 if (c == LEU)
6000 fprintf (file, "ule");
6001 else if (c == LTU)
6002 fprintf (file, "ult");
6003 else if (c == UNORDERED)
6004 fprintf (file, "un");
6005 else
6006 fprintf (file, "%s", GET_RTX_NAME (c));
6007 }
6008 break;
6009
6010 case 'E':
6011 /* Write the divide or modulus operator. */
6012 switch (GET_CODE (x))
6013 {
6014 case DIV:
6015 fprintf (file, "div%s", GET_MODE (x) == SImode ? "l" : "q");
6016 break;
6017 case UDIV:
6018 fprintf (file, "div%su", GET_MODE (x) == SImode ? "l" : "q");
6019 break;
6020 case MOD:
6021 fprintf (file, "rem%s", GET_MODE (x) == SImode ? "l" : "q");
6022 break;
6023 case UMOD:
6024 fprintf (file, "rem%su", GET_MODE (x) == SImode ? "l" : "q");
6025 break;
6026 default:
6027 output_operand_lossage ("invalid %%E value");
6028 break;
6029 }
6030 break;
6031
6032 case 'A':
6033 /* Write "_u" for unaligned access. */
6034 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == AND)
6035 fprintf (file, "_u");
6036 break;
6037
6038 case 0:
6039 if (GET_CODE (x) == REG)
6040 fprintf (file, "%s", reg_names[REGNO (x)]);
6041 else if (GET_CODE (x) == MEM)
6042 output_address (XEXP (x, 0));
6043 else if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == UNSPEC)
6044 {
6045 switch (XINT (XEXP (x, 0), 1))
6046 {
6047 case UNSPEC_DTPREL:
6048 case UNSPEC_TPREL:
6049 output_addr_const (file, XVECEXP (XEXP (x, 0), 0, 0));
6050 break;
6051 default:
6052 output_operand_lossage ("unknown relocation unspec");
6053 break;
6054 }
6055 }
6056 else
6057 output_addr_const (file, x);
6058 break;
6059
6060 default:
6061 output_operand_lossage ("invalid %%xn code");
6062 }
6063 }
6064
6065 void
6066 print_operand_address (file, addr)
6067 FILE *file;
6068 rtx addr;
6069 {
6070 int basereg = 31;
6071 HOST_WIDE_INT offset = 0;
6072
6073 if (GET_CODE (addr) == AND)
6074 addr = XEXP (addr, 0);
6075
6076 if (GET_CODE (addr) == PLUS
6077 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
6078 {
6079 offset = INTVAL (XEXP (addr, 1));
6080 addr = XEXP (addr, 0);
6081 }
6082
6083 if (GET_CODE (addr) == LO_SUM)
6084 {
6085 const char *reloc16, *reloclo;
6086 rtx op1 = XEXP (addr, 1);
6087
6088 if (GET_CODE (op1) == CONST && GET_CODE (XEXP (op1, 0)) == UNSPEC)
6089 {
6090 op1 = XEXP (op1, 0);
6091 switch (XINT (op1, 1))
6092 {
6093 case UNSPEC_DTPREL:
6094 reloc16 = NULL;
6095 reloclo = (alpha_tls_size == 16 ? "dtprel" : "dtprello");
6096 break;
6097 case UNSPEC_TPREL:
6098 reloc16 = NULL;
6099 reloclo = (alpha_tls_size == 16 ? "tprel" : "tprello");
6100 break;
6101 default:
6102 output_operand_lossage ("unknown relocation unspec");
6103 return;
6104 }
6105
6106 output_addr_const (file, XVECEXP (op1, 0, 0));
6107 }
6108 else
6109 {
6110 reloc16 = "gprel";
6111 reloclo = "gprellow";
6112 output_addr_const (file, op1);
6113 }
6114
6115 if (offset)
6116 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
6117
6118 addr = XEXP (addr, 0);
6119 if (GET_CODE (addr) == REG)
6120 basereg = REGNO (addr);
6121 else if (GET_CODE (addr) == SUBREG
6122 && GET_CODE (SUBREG_REG (addr)) == REG)
6123 basereg = subreg_regno (addr);
6124 else
6125 abort ();
6126
6127 fprintf (file, "($%d)\t\t!%s", basereg,
6128 (basereg == 29 ? reloc16 : reloclo));
6129 return;
6130 }
6131
6132 if (GET_CODE (addr) == REG)
6133 basereg = REGNO (addr);
6134 else if (GET_CODE (addr) == SUBREG
6135 && GET_CODE (SUBREG_REG (addr)) == REG)
6136 basereg = subreg_regno (addr);
6137 else if (GET_CODE (addr) == CONST_INT)
6138 offset = INTVAL (addr);
6139
6140 #if TARGET_ABI_OPEN_VMS
6141 else if (GET_CODE (addr) == SYMBOL_REF)
6142 {
6143 fprintf (file, "%s", XSTR (addr, 0));
6144 return;
6145 }
6146 else if (GET_CODE (addr) == CONST
6147 && GET_CODE (XEXP (addr, 0)) == PLUS
6148 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF)
6149 {
6150 fprintf (file, "%s+" HOST_WIDE_INT_PRINT_DEC,
6151 XSTR (XEXP (XEXP (addr, 0), 0), 0),
6152 INTVAL (XEXP (XEXP (addr, 0), 1)));
6153 return;
6154 }
6155 #endif
6156
6157 else
6158 abort ();
6159
6160 fprintf (file, HOST_WIDE_INT_PRINT_DEC "($%d)", offset, basereg);
6161 }
6162 \f
6163 /* Emit RTL insns to initialize the variable parts of a trampoline at
6164 TRAMP. FNADDR is an RTX for the address of the function's pure
6165 code. CXT is an RTX for the static chain value for the function.
6166
6167 The three offset parameters are for the individual template's
6168 layout. A JMPOFS < 0 indicates that the trampoline does not
6169 contain instructions at all.
6170
6171 We assume here that a function will be called many more times than
6172 its address is taken (e.g., it might be passed to qsort), so we
6173 take the trouble to initialize the "hint" field in the JMP insn.
6174 Note that the hint field is PC (new) + 4 * bits 13:0. */
6175
6176 void
6177 alpha_initialize_trampoline (tramp, fnaddr, cxt, fnofs, cxtofs, jmpofs)
6178 rtx tramp, fnaddr, cxt;
6179 int fnofs, cxtofs, jmpofs;
6180 {
6181 rtx temp, temp1, addr;
6182 /* VMS really uses DImode pointers in memory at this point. */
6183 enum machine_mode mode = TARGET_ABI_OPEN_VMS ? Pmode : ptr_mode;
6184
6185 #ifdef POINTERS_EXTEND_UNSIGNED
6186 fnaddr = convert_memory_address (mode, fnaddr);
6187 cxt = convert_memory_address (mode, cxt);
6188 #endif
6189
6190 /* Store function address and CXT. */
6191 addr = memory_address (mode, plus_constant (tramp, fnofs));
6192 emit_move_insn (gen_rtx_MEM (mode, addr), fnaddr);
6193 addr = memory_address (mode, plus_constant (tramp, cxtofs));
6194 emit_move_insn (gen_rtx_MEM (mode, addr), cxt);
6195
6196 /* This has been disabled since the hint only has a 32k range, and in
6197 no existing OS is the stack within 32k of the text segment. */
6198 if (0 && jmpofs >= 0)
6199 {
6200 /* Compute hint value. */
6201 temp = force_operand (plus_constant (tramp, jmpofs+4), NULL_RTX);
6202 temp = expand_binop (DImode, sub_optab, fnaddr, temp, temp, 1,
6203 OPTAB_WIDEN);
6204 temp = expand_shift (RSHIFT_EXPR, Pmode, temp,
6205 build_int_2 (2, 0), NULL_RTX, 1);
6206 temp = expand_and (SImode, gen_lowpart (SImode, temp),
6207 GEN_INT (0x3fff), 0);
6208
6209 /* Merge in the hint. */
6210 addr = memory_address (SImode, plus_constant (tramp, jmpofs));
6211 temp1 = force_reg (SImode, gen_rtx_MEM (SImode, addr));
6212 temp1 = expand_and (SImode, temp1, GEN_INT (0xffffc000), NULL_RTX);
6213 temp1 = expand_binop (SImode, ior_optab, temp1, temp, temp1, 1,
6214 OPTAB_WIDEN);
6215 emit_move_insn (gen_rtx_MEM (SImode, addr), temp1);
6216 }
6217
6218 #ifdef TRANSFER_FROM_TRAMPOLINE
6219 emit_library_call (init_one_libfunc ("__enable_execute_stack"),
6220 0, VOIDmode, 1, tramp, Pmode);
6221 #endif
6222
6223 if (jmpofs >= 0)
6224 emit_insn (gen_imb ());
6225 }
6226 \f
6227 /* Determine where to put an argument to a function.
6228 Value is zero to push the argument on the stack,
6229 or a hard register in which to store the argument.
6230
6231 MODE is the argument's machine mode.
6232 TYPE is the data type of the argument (as a tree).
6233 This is null for libcalls where that information may
6234 not be available.
6235 CUM is a variable of type CUMULATIVE_ARGS which gives info about
6236 the preceding args and about the function being called.
6237 NAMED is nonzero if this argument is a named parameter
6238 (otherwise it is an extra parameter matching an ellipsis).
6239
6240 On Alpha the first 6 words of args are normally in registers
6241 and the rest are pushed. */
6242
6243 rtx
6244 function_arg (cum, mode, type, named)
6245 CUMULATIVE_ARGS cum;
6246 enum machine_mode mode;
6247 tree type;
6248 int named ATTRIBUTE_UNUSED;
6249 {
6250 int basereg;
6251 int num_args;
6252
6253 /* Set up defaults for FP operands passed in FP registers, and
6254 integral operands passed in integer registers. */
6255 if (TARGET_FPREGS
6256 && (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
6257 || GET_MODE_CLASS (mode) == MODE_FLOAT))
6258 basereg = 32 + 16;
6259 else
6260 basereg = 16;
6261
6262 /* ??? Irritatingly, the definition of CUMULATIVE_ARGS is different for
6263 the three platforms, so we can't avoid conditional compilation. */
6264 #if TARGET_ABI_OPEN_VMS
6265 {
6266 if (mode == VOIDmode)
6267 return alpha_arg_info_reg_val (cum);
6268
6269 num_args = cum.num_args;
6270 if (num_args >= 6 || MUST_PASS_IN_STACK (mode, type))
6271 return NULL_RTX;
6272 }
6273 #else
6274 #if TARGET_ABI_UNICOSMK
6275 {
6276 int size;
6277
6278 /* If this is the last argument, generate the call info word (CIW). */
6279 /* ??? We don't include the caller's line number in the CIW because
6280 I don't know how to determine it if debug infos are turned off. */
6281 if (mode == VOIDmode)
6282 {
6283 int i;
6284 HOST_WIDE_INT lo;
6285 HOST_WIDE_INT hi;
6286 rtx ciw;
6287
6288 lo = 0;
6289
6290 for (i = 0; i < cum.num_reg_words && i < 5; i++)
6291 if (cum.reg_args_type[i])
6292 lo |= (1 << (7 - i));
6293
6294 if (cum.num_reg_words == 6 && cum.reg_args_type[5])
6295 lo |= 7;
6296 else
6297 lo |= cum.num_reg_words;
6298
6299 #if HOST_BITS_PER_WIDE_INT == 32
6300 hi = (cum.num_args << 20) | cum.num_arg_words;
6301 #else
6302 lo = lo | ((HOST_WIDE_INT) cum.num_args << 52)
6303 | ((HOST_WIDE_INT) cum.num_arg_words << 32);
6304 hi = 0;
6305 #endif
6306 ciw = immed_double_const (lo, hi, DImode);
6307
6308 return gen_rtx_UNSPEC (DImode, gen_rtvec (1, ciw),
6309 UNSPEC_UMK_LOAD_CIW);
6310 }
6311
6312 size = ALPHA_ARG_SIZE (mode, type, named);
6313 num_args = cum.num_reg_words;
6314 if (MUST_PASS_IN_STACK (mode, type)
6315 || cum.num_reg_words + size > 6 || cum.force_stack)
6316 return NULL_RTX;
6317 else if (type && TYPE_MODE (type) == BLKmode)
6318 {
6319 rtx reg1, reg2;
6320
6321 reg1 = gen_rtx_REG (DImode, num_args + 16);
6322 reg1 = gen_rtx_EXPR_LIST (DImode, reg1, const0_rtx);
6323
6324 /* The argument fits in two registers. Note that we still need to
6325 reserve a register for empty structures. */
6326 if (size == 0)
6327 return NULL_RTX;
6328 else if (size == 1)
6329 return gen_rtx_PARALLEL (mode, gen_rtvec (1, reg1));
6330 else
6331 {
6332 reg2 = gen_rtx_REG (DImode, num_args + 17);
6333 reg2 = gen_rtx_EXPR_LIST (DImode, reg2, GEN_INT (8));
6334 return gen_rtx_PARALLEL (mode, gen_rtvec (2, reg1, reg2));
6335 }
6336 }
6337 }
6338 #else
6339 {
6340 if (cum >= 6)
6341 return NULL_RTX;
6342 num_args = cum;
6343
6344 /* VOID is passed as a special flag for "last argument". */
6345 if (type == void_type_node)
6346 basereg = 16;
6347 else if (MUST_PASS_IN_STACK (mode, type))
6348 return NULL_RTX;
6349 else if (FUNCTION_ARG_PASS_BY_REFERENCE (cum, mode, type, named))
6350 basereg = 16;
6351 }
6352 #endif /* TARGET_ABI_UNICOSMK */
6353 #endif /* TARGET_ABI_OPEN_VMS */
6354
6355 return gen_rtx_REG (mode, num_args + basereg);
6356 }
6357
6358 tree
6359 alpha_build_va_list ()
6360 {
6361 tree base, ofs, record, type_decl;
6362
6363 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
6364 return ptr_type_node;
6365
6366 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
6367 type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
6368 TREE_CHAIN (record) = type_decl;
6369 TYPE_NAME (record) = type_decl;
6370
6371 /* C++? SET_IS_AGGR_TYPE (record, 1); */
6372
6373 ofs = build_decl (FIELD_DECL, get_identifier ("__offset"),
6374 integer_type_node);
6375 DECL_FIELD_CONTEXT (ofs) = record;
6376
6377 base = build_decl (FIELD_DECL, get_identifier ("__base"),
6378 ptr_type_node);
6379 DECL_FIELD_CONTEXT (base) = record;
6380 TREE_CHAIN (base) = ofs;
6381
6382 TYPE_FIELDS (record) = base;
6383 layout_type (record);
6384
6385 return record;
6386 }
6387
6388 /* Perform any needed actions needed for a function that is receiving a
6389 variable number of arguments.
6390
6391 On the Alpha, we allocate space for all 12 arg registers, but only
6392 push those that are remaining. However, if NO registers need to be
6393 saved, don't allocate any space. This is not only because we won't
6394 need the space, but because AP includes the current_pretend_args_size
6395 and we don't want to mess up any ap-relative addresses already made.
6396
6397 If we are not to use the floating-point registers, save the integer
6398 registers where we would put the floating-point registers. This is
6399 not the most efficient way to implement varargs with just one register
6400 class, but it isn't worth doing anything more efficient in this rare
6401 case. */
6402
6403 #if TARGET_ABI_OSF
6404 void
6405 alpha_setup_incoming_varargs(cum, mode, type, pretend_size, no_rtl)
6406 CUMULATIVE_ARGS cum;
6407 enum machine_mode mode ATTRIBUTE_UNUSED;
6408 tree type ATTRIBUTE_UNUSED;
6409 int *pretend_size;
6410 int no_rtl;
6411 {
6412 if (cum >= 6)
6413 return;
6414
6415 if (!no_rtl)
6416 {
6417 int set = get_varargs_alias_set ();
6418 rtx tmp;
6419
6420 tmp = gen_rtx_MEM (BLKmode,
6421 plus_constant (virtual_incoming_args_rtx,
6422 (cum + 6) * UNITS_PER_WORD));
6423 set_mem_alias_set (tmp, set);
6424 move_block_from_reg (16 + cum, tmp, 6 - cum);
6425
6426 tmp = gen_rtx_MEM (BLKmode,
6427 plus_constant (virtual_incoming_args_rtx,
6428 cum * UNITS_PER_WORD));
6429 set_mem_alias_set (tmp, set);
6430 move_block_from_reg (16 + (TARGET_FPREGS ? 32 : 0) + cum, tmp,
6431 6 - cum);
6432 }
6433 *pretend_size = 12 * UNITS_PER_WORD;
6434 }
6435 #endif
6436
6437 void
6438 alpha_va_start (valist, nextarg)
6439 tree valist;
6440 rtx nextarg ATTRIBUTE_UNUSED;
6441 {
6442 HOST_WIDE_INT offset;
6443 tree t, offset_field, base_field;
6444
6445 if (TREE_CODE (TREE_TYPE (valist)) == ERROR_MARK)
6446 return;
6447
6448 if (TARGET_ABI_UNICOSMK)
6449 std_expand_builtin_va_start (valist, nextarg);
6450
6451 /* For Unix, SETUP_INCOMING_VARARGS moves the starting address base
6452 up by 48, storing fp arg registers in the first 48 bytes, and the
6453 integer arg registers in the next 48 bytes. This is only done,
6454 however, if any integer registers need to be stored.
6455
6456 If no integer registers need be stored, then we must subtract 48
6457 in order to account for the integer arg registers which are counted
6458 in argsize above, but which are not actually stored on the stack.
6459 Must further be careful here about structures straddling the last
6460 integer argument register; that futzes with pretend_args_size,
6461 which changes the meaning of AP. */
6462
6463 if (NUM_ARGS <= 6)
6464 offset = TARGET_ABI_OPEN_VMS ? UNITS_PER_WORD : 6 * UNITS_PER_WORD;
6465 else
6466 offset = -6 * UNITS_PER_WORD + current_function_pretend_args_size;
6467
6468 if (TARGET_ABI_OPEN_VMS)
6469 {
6470 nextarg = plus_constant (nextarg, offset);
6471 nextarg = plus_constant (nextarg, NUM_ARGS * UNITS_PER_WORD);
6472 t = build (MODIFY_EXPR, TREE_TYPE (valist), valist,
6473 make_tree (ptr_type_node, nextarg));
6474 TREE_SIDE_EFFECTS (t) = 1;
6475
6476 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6477 }
6478 else
6479 {
6480 base_field = TYPE_FIELDS (TREE_TYPE (valist));
6481 offset_field = TREE_CHAIN (base_field);
6482
6483 base_field = build (COMPONENT_REF, TREE_TYPE (base_field),
6484 valist, base_field);
6485 offset_field = build (COMPONENT_REF, TREE_TYPE (offset_field),
6486 valist, offset_field);
6487
6488 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
6489 t = build (PLUS_EXPR, ptr_type_node, t, build_int_2 (offset, 0));
6490 t = build (MODIFY_EXPR, TREE_TYPE (base_field), base_field, t);
6491 TREE_SIDE_EFFECTS (t) = 1;
6492 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6493
6494 t = build_int_2 (NUM_ARGS * UNITS_PER_WORD, 0);
6495 t = build (MODIFY_EXPR, TREE_TYPE (offset_field), offset_field, t);
6496 TREE_SIDE_EFFECTS (t) = 1;
6497 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6498 }
6499 }
6500
6501 rtx
6502 alpha_va_arg (valist, type)
6503 tree valist, type;
6504 {
6505 rtx addr;
6506 tree t, type_size, rounded_size;
6507 tree offset_field, base_field, addr_tree, addend;
6508 tree wide_type, wide_ofs;
6509 int indirect = 0;
6510
6511 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
6512 return std_expand_builtin_va_arg (valist, type);
6513
6514 if (type == error_mark_node
6515 || (type_size = TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type))) == NULL
6516 || TREE_OVERFLOW (type_size))
6517 rounded_size = size_zero_node;
6518 else
6519 rounded_size = fold (build (MULT_EXPR, sizetype,
6520 fold (build (TRUNC_DIV_EXPR, sizetype,
6521 fold (build (PLUS_EXPR, sizetype,
6522 type_size,
6523 size_int (7))),
6524 size_int (8))),
6525 size_int (8)));
6526
6527 base_field = TYPE_FIELDS (TREE_TYPE (valist));
6528 offset_field = TREE_CHAIN (base_field);
6529
6530 base_field = build (COMPONENT_REF, TREE_TYPE (base_field),
6531 valist, base_field);
6532 offset_field = build (COMPONENT_REF, TREE_TYPE (offset_field),
6533 valist, offset_field);
6534
6535 /* If the type could not be passed in registers, skip the block
6536 reserved for the registers. */
6537 if (MUST_PASS_IN_STACK (TYPE_MODE (type), type))
6538 {
6539 t = build (MODIFY_EXPR, TREE_TYPE (offset_field), offset_field,
6540 build (MAX_EXPR, TREE_TYPE (offset_field),
6541 offset_field, build_int_2 (6*8, 0)));
6542 TREE_SIDE_EFFECTS (t) = 1;
6543 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6544 }
6545
6546 wide_type = make_signed_type (64);
6547 wide_ofs = save_expr (build1 (CONVERT_EXPR, wide_type, offset_field));
6548
6549 addend = wide_ofs;
6550
6551 if (TYPE_MODE (type) == TFmode || TYPE_MODE (type) == TCmode)
6552 {
6553 indirect = 1;
6554 rounded_size = size_int (UNITS_PER_WORD);
6555 }
6556 else if (FLOAT_TYPE_P (type))
6557 {
6558 tree fpaddend, cond;
6559
6560 fpaddend = fold (build (PLUS_EXPR, TREE_TYPE (addend),
6561 addend, build_int_2 (-6*8, 0)));
6562
6563 cond = fold (build (LT_EXPR, integer_type_node,
6564 wide_ofs, build_int_2 (6*8, 0)));
6565
6566 addend = fold (build (COND_EXPR, TREE_TYPE (addend), cond,
6567 fpaddend, addend));
6568 }
6569
6570 addr_tree = build (PLUS_EXPR, TREE_TYPE (base_field),
6571 base_field, addend);
6572
6573 addr = expand_expr (addr_tree, NULL_RTX, Pmode, EXPAND_NORMAL);
6574 addr = copy_to_reg (addr);
6575
6576 t = build (MODIFY_EXPR, TREE_TYPE (offset_field), offset_field,
6577 build (PLUS_EXPR, TREE_TYPE (offset_field),
6578 offset_field, rounded_size));
6579 TREE_SIDE_EFFECTS (t) = 1;
6580 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6581
6582 if (indirect)
6583 {
6584 addr = force_reg (Pmode, addr);
6585 addr = gen_rtx_MEM (Pmode, addr);
6586 }
6587
6588 return addr;
6589 }
6590 \f
6591 /* Builtins. */
6592
6593 enum alpha_builtin
6594 {
6595 ALPHA_BUILTIN_CMPBGE,
6596 ALPHA_BUILTIN_EXTBL,
6597 ALPHA_BUILTIN_EXTWL,
6598 ALPHA_BUILTIN_EXTLL,
6599 ALPHA_BUILTIN_EXTQL,
6600 ALPHA_BUILTIN_EXTWH,
6601 ALPHA_BUILTIN_EXTLH,
6602 ALPHA_BUILTIN_EXTQH,
6603 ALPHA_BUILTIN_INSBL,
6604 ALPHA_BUILTIN_INSWL,
6605 ALPHA_BUILTIN_INSLL,
6606 ALPHA_BUILTIN_INSQL,
6607 ALPHA_BUILTIN_INSWH,
6608 ALPHA_BUILTIN_INSLH,
6609 ALPHA_BUILTIN_INSQH,
6610 ALPHA_BUILTIN_MSKBL,
6611 ALPHA_BUILTIN_MSKWL,
6612 ALPHA_BUILTIN_MSKLL,
6613 ALPHA_BUILTIN_MSKQL,
6614 ALPHA_BUILTIN_MSKWH,
6615 ALPHA_BUILTIN_MSKLH,
6616 ALPHA_BUILTIN_MSKQH,
6617 ALPHA_BUILTIN_UMULH,
6618 ALPHA_BUILTIN_ZAP,
6619 ALPHA_BUILTIN_ZAPNOT,
6620 ALPHA_BUILTIN_AMASK,
6621 ALPHA_BUILTIN_IMPLVER,
6622 ALPHA_BUILTIN_RPCC,
6623 ALPHA_BUILTIN_THREAD_POINTER,
6624 ALPHA_BUILTIN_SET_THREAD_POINTER,
6625
6626 /* TARGET_MAX */
6627 ALPHA_BUILTIN_MINUB8,
6628 ALPHA_BUILTIN_MINSB8,
6629 ALPHA_BUILTIN_MINUW4,
6630 ALPHA_BUILTIN_MINSW4,
6631 ALPHA_BUILTIN_MAXUB8,
6632 ALPHA_BUILTIN_MAXSB8,
6633 ALPHA_BUILTIN_MAXUW4,
6634 ALPHA_BUILTIN_MAXSW4,
6635 ALPHA_BUILTIN_PERR,
6636 ALPHA_BUILTIN_PKLB,
6637 ALPHA_BUILTIN_PKWB,
6638 ALPHA_BUILTIN_UNPKBL,
6639 ALPHA_BUILTIN_UNPKBW,
6640
6641 /* TARGET_CIX */
6642 ALPHA_BUILTIN_CTTZ,
6643 ALPHA_BUILTIN_CTLZ,
6644 ALPHA_BUILTIN_CTPOP,
6645
6646 ALPHA_BUILTIN_max
6647 };
6648
6649 static unsigned int const code_for_builtin[ALPHA_BUILTIN_max] = {
6650 CODE_FOR_builtin_cmpbge,
6651 CODE_FOR_builtin_extbl,
6652 CODE_FOR_builtin_extwl,
6653 CODE_FOR_builtin_extll,
6654 CODE_FOR_builtin_extql,
6655 CODE_FOR_builtin_extwh,
6656 CODE_FOR_builtin_extlh,
6657 CODE_FOR_builtin_extqh,
6658 CODE_FOR_builtin_insbl,
6659 CODE_FOR_builtin_inswl,
6660 CODE_FOR_builtin_insll,
6661 CODE_FOR_builtin_insql,
6662 CODE_FOR_builtin_inswh,
6663 CODE_FOR_builtin_inslh,
6664 CODE_FOR_builtin_insqh,
6665 CODE_FOR_builtin_mskbl,
6666 CODE_FOR_builtin_mskwl,
6667 CODE_FOR_builtin_mskll,
6668 CODE_FOR_builtin_mskql,
6669 CODE_FOR_builtin_mskwh,
6670 CODE_FOR_builtin_msklh,
6671 CODE_FOR_builtin_mskqh,
6672 CODE_FOR_umuldi3_highpart,
6673 CODE_FOR_builtin_zap,
6674 CODE_FOR_builtin_zapnot,
6675 CODE_FOR_builtin_amask,
6676 CODE_FOR_builtin_implver,
6677 CODE_FOR_builtin_rpcc,
6678 CODE_FOR_load_tp,
6679 CODE_FOR_set_tp,
6680
6681 /* TARGET_MAX */
6682 CODE_FOR_builtin_minub8,
6683 CODE_FOR_builtin_minsb8,
6684 CODE_FOR_builtin_minuw4,
6685 CODE_FOR_builtin_minsw4,
6686 CODE_FOR_builtin_maxub8,
6687 CODE_FOR_builtin_maxsb8,
6688 CODE_FOR_builtin_maxuw4,
6689 CODE_FOR_builtin_maxsw4,
6690 CODE_FOR_builtin_perr,
6691 CODE_FOR_builtin_pklb,
6692 CODE_FOR_builtin_pkwb,
6693 CODE_FOR_builtin_unpkbl,
6694 CODE_FOR_builtin_unpkbw,
6695
6696 /* TARGET_CIX */
6697 CODE_FOR_builtin_cttz,
6698 CODE_FOR_builtin_ctlz,
6699 CODE_FOR_builtin_ctpop
6700 };
6701
6702 struct alpha_builtin_def
6703 {
6704 const char *name;
6705 enum alpha_builtin code;
6706 unsigned int target_mask;
6707 };
6708
6709 static struct alpha_builtin_def const zero_arg_builtins[] = {
6710 { "__builtin_alpha_implver", ALPHA_BUILTIN_IMPLVER, 0 },
6711 { "__builtin_alpha_rpcc", ALPHA_BUILTIN_RPCC, 0 }
6712 };
6713
6714 static struct alpha_builtin_def const one_arg_builtins[] = {
6715 { "__builtin_alpha_amask", ALPHA_BUILTIN_AMASK, 0 },
6716 { "__builtin_alpha_pklb", ALPHA_BUILTIN_PKLB, MASK_MAX },
6717 { "__builtin_alpha_pkwb", ALPHA_BUILTIN_PKWB, MASK_MAX },
6718 { "__builtin_alpha_unpkbl", ALPHA_BUILTIN_UNPKBL, MASK_MAX },
6719 { "__builtin_alpha_unpkbw", ALPHA_BUILTIN_UNPKBW, MASK_MAX },
6720 { "__builtin_alpha_cttz", ALPHA_BUILTIN_CTTZ, MASK_CIX },
6721 { "__builtin_alpha_ctlz", ALPHA_BUILTIN_CTLZ, MASK_CIX },
6722 { "__builtin_alpha_ctpop", ALPHA_BUILTIN_CTPOP, MASK_CIX }
6723 };
6724
6725 static struct alpha_builtin_def const two_arg_builtins[] = {
6726 { "__builtin_alpha_cmpbge", ALPHA_BUILTIN_CMPBGE, 0 },
6727 { "__builtin_alpha_extbl", ALPHA_BUILTIN_EXTBL, 0 },
6728 { "__builtin_alpha_extwl", ALPHA_BUILTIN_EXTWL, 0 },
6729 { "__builtin_alpha_extll", ALPHA_BUILTIN_EXTLL, 0 },
6730 { "__builtin_alpha_extql", ALPHA_BUILTIN_EXTQL, 0 },
6731 { "__builtin_alpha_extwh", ALPHA_BUILTIN_EXTWH, 0 },
6732 { "__builtin_alpha_extlh", ALPHA_BUILTIN_EXTLH, 0 },
6733 { "__builtin_alpha_extqh", ALPHA_BUILTIN_EXTQH, 0 },
6734 { "__builtin_alpha_insbl", ALPHA_BUILTIN_INSBL, 0 },
6735 { "__builtin_alpha_inswl", ALPHA_BUILTIN_INSWL, 0 },
6736 { "__builtin_alpha_insll", ALPHA_BUILTIN_INSLL, 0 },
6737 { "__builtin_alpha_insql", ALPHA_BUILTIN_INSQL, 0 },
6738 { "__builtin_alpha_inswh", ALPHA_BUILTIN_INSWH, 0 },
6739 { "__builtin_alpha_inslh", ALPHA_BUILTIN_INSLH, 0 },
6740 { "__builtin_alpha_insqh", ALPHA_BUILTIN_INSQH, 0 },
6741 { "__builtin_alpha_mskbl", ALPHA_BUILTIN_MSKBL, 0 },
6742 { "__builtin_alpha_mskwl", ALPHA_BUILTIN_MSKWL, 0 },
6743 { "__builtin_alpha_mskll", ALPHA_BUILTIN_MSKLL, 0 },
6744 { "__builtin_alpha_mskql", ALPHA_BUILTIN_MSKQL, 0 },
6745 { "__builtin_alpha_mskwh", ALPHA_BUILTIN_MSKWH, 0 },
6746 { "__builtin_alpha_msklh", ALPHA_BUILTIN_MSKLH, 0 },
6747 { "__builtin_alpha_mskqh", ALPHA_BUILTIN_MSKQH, 0 },
6748 { "__builtin_alpha_umulh", ALPHA_BUILTIN_UMULH, 0 },
6749 { "__builtin_alpha_zap", ALPHA_BUILTIN_ZAP, 0 },
6750 { "__builtin_alpha_zapnot", ALPHA_BUILTIN_ZAPNOT, 0 },
6751 { "__builtin_alpha_minub8", ALPHA_BUILTIN_MINUB8, MASK_MAX },
6752 { "__builtin_alpha_minsb8", ALPHA_BUILTIN_MINSB8, MASK_MAX },
6753 { "__builtin_alpha_minuw4", ALPHA_BUILTIN_MINUW4, MASK_MAX },
6754 { "__builtin_alpha_minsw4", ALPHA_BUILTIN_MINSW4, MASK_MAX },
6755 { "__builtin_alpha_maxub8", ALPHA_BUILTIN_MAXUB8, MASK_MAX },
6756 { "__builtin_alpha_maxsb8", ALPHA_BUILTIN_MAXSB8, MASK_MAX },
6757 { "__builtin_alpha_maxuw4", ALPHA_BUILTIN_MAXUW4, MASK_MAX },
6758 { "__builtin_alpha_maxsw4", ALPHA_BUILTIN_MAXSW4, MASK_MAX },
6759 { "__builtin_alpha_perr", ALPHA_BUILTIN_PERR, MASK_MAX }
6760 };
6761
6762 static void
6763 alpha_init_builtins ()
6764 {
6765 const struct alpha_builtin_def *p;
6766 tree ftype;
6767 size_t i;
6768
6769 ftype = build_function_type (long_integer_type_node, void_list_node);
6770
6771 p = zero_arg_builtins;
6772 for (i = 0; i < ARRAY_SIZE (zero_arg_builtins); ++i, ++p)
6773 if ((target_flags & p->target_mask) == p->target_mask)
6774 builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
6775 NULL, NULL_TREE);
6776
6777 ftype = build_function_type_list (long_integer_type_node,
6778 long_integer_type_node, NULL_TREE);
6779
6780 p = one_arg_builtins;
6781 for (i = 0; i < ARRAY_SIZE (one_arg_builtins); ++i, ++p)
6782 if ((target_flags & p->target_mask) == p->target_mask)
6783 builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
6784 NULL, NULL_TREE);
6785
6786 ftype = build_function_type_list (long_integer_type_node,
6787 long_integer_type_node,
6788 long_integer_type_node, NULL_TREE);
6789
6790 p = two_arg_builtins;
6791 for (i = 0; i < ARRAY_SIZE (two_arg_builtins); ++i, ++p)
6792 if ((target_flags & p->target_mask) == p->target_mask)
6793 builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
6794 NULL, NULL_TREE);
6795
6796 ftype = build_function_type (ptr_type_node, void_list_node);
6797 builtin_function ("__builtin_thread_pointer", ftype,
6798 ALPHA_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
6799 NULL, NULL_TREE);
6800
6801 ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
6802 builtin_function ("__builtin_set_thread_pointer", ftype,
6803 ALPHA_BUILTIN_SET_THREAD_POINTER, BUILT_IN_MD,
6804 NULL, NULL_TREE);
6805 }
6806
6807 /* Expand an expression EXP that calls a built-in function,
6808 with result going to TARGET if that's convenient
6809 (and in mode MODE if that's convenient).
6810 SUBTARGET may be used as the target for computing one of EXP's operands.
6811 IGNORE is nonzero if the value is to be ignored. */
6812
6813 static rtx
6814 alpha_expand_builtin (exp, target, subtarget, mode, ignore)
6815 tree exp;
6816 rtx target;
6817 rtx subtarget ATTRIBUTE_UNUSED;
6818 enum machine_mode mode ATTRIBUTE_UNUSED;
6819 int ignore ATTRIBUTE_UNUSED;
6820 {
6821 #define MAX_ARGS 2
6822
6823 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
6824 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
6825 tree arglist = TREE_OPERAND (exp, 1);
6826 enum insn_code icode;
6827 rtx op[MAX_ARGS], pat;
6828 int arity;
6829 bool nonvoid;
6830
6831 if (fcode >= ALPHA_BUILTIN_max)
6832 internal_error ("bad builtin fcode");
6833 icode = code_for_builtin[fcode];
6834 if (icode == 0)
6835 internal_error ("bad builtin fcode");
6836
6837 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
6838
6839 for (arglist = TREE_OPERAND (exp, 1), arity = 0;
6840 arglist;
6841 arglist = TREE_CHAIN (arglist), arity++)
6842 {
6843 const struct insn_operand_data *insn_op;
6844
6845 tree arg = TREE_VALUE (arglist);
6846 if (arg == error_mark_node)
6847 return NULL_RTX;
6848 if (arity > MAX_ARGS)
6849 return NULL_RTX;
6850
6851 insn_op = &insn_data[icode].operand[arity + nonvoid];
6852
6853 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, 0);
6854
6855 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
6856 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
6857 }
6858
6859 if (nonvoid)
6860 {
6861 enum machine_mode tmode = insn_data[icode].operand[0].mode;
6862 if (!target
6863 || GET_MODE (target) != tmode
6864 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
6865 target = gen_reg_rtx (tmode);
6866 }
6867
6868 switch (arity)
6869 {
6870 case 0:
6871 pat = GEN_FCN (icode) (target);
6872 break;
6873 case 1:
6874 if (nonvoid)
6875 pat = GEN_FCN (icode) (target, op[0]);
6876 else
6877 pat = GEN_FCN (icode) (op[0]);
6878 break;
6879 case 2:
6880 pat = GEN_FCN (icode) (target, op[0], op[1]);
6881 break;
6882 default:
6883 abort ();
6884 }
6885 if (!pat)
6886 return NULL_RTX;
6887 emit_insn (pat);
6888
6889 if (nonvoid)
6890 return target;
6891 else
6892 return const0_rtx;
6893 }
6894 \f
6895 /* This page contains routines that are used to determine what the function
6896 prologue and epilogue code will do and write them out. */
6897
6898 /* Compute the size of the save area in the stack. */
6899
6900 /* These variables are used for communication between the following functions.
6901 They indicate various things about the current function being compiled
6902 that are used to tell what kind of prologue, epilogue and procedure
6903 descriptior to generate. */
6904
6905 /* Nonzero if we need a stack procedure. */
6906 enum alpha_procedure_types {PT_NULL = 0, PT_REGISTER = 1, PT_STACK = 2};
6907 static enum alpha_procedure_types alpha_procedure_type;
6908
6909 /* Register number (either FP or SP) that is used to unwind the frame. */
6910 static int vms_unwind_regno;
6911
6912 /* Register number used to save FP. We need not have one for RA since
6913 we don't modify it for register procedures. This is only defined
6914 for register frame procedures. */
6915 static int vms_save_fp_regno;
6916
6917 /* Register number used to reference objects off our PV. */
6918 static int vms_base_regno;
6919
6920 /* Compute register masks for saved registers. */
6921
6922 static void
6923 alpha_sa_mask (imaskP, fmaskP)
6924 unsigned long *imaskP;
6925 unsigned long *fmaskP;
6926 {
6927 unsigned long imask = 0;
6928 unsigned long fmask = 0;
6929 unsigned int i;
6930
6931 /* Irritatingly, there are two kinds of thunks -- those created with
6932 TARGET_ASM_OUTPUT_MI_THUNK and those with DECL_THUNK_P that go
6933 through the regular part of the compiler. In the
6934 TARGET_ASM_OUTPUT_MI_THUNK case we don't have valid register life
6935 info, but assemble_start_function wants to output .frame and
6936 .mask directives. */
6937 if (current_function_is_thunk && !no_new_pseudos)
6938 {
6939 *imaskP = 0;
6940 *fmaskP = 0;
6941 return;
6942 }
6943
6944 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
6945 imask |= (1UL << HARD_FRAME_POINTER_REGNUM);
6946
6947 /* One for every register we have to save. */
6948 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
6949 if (! fixed_regs[i] && ! call_used_regs[i]
6950 && regs_ever_live[i] && i != REG_RA
6951 && (!TARGET_ABI_UNICOSMK || i != HARD_FRAME_POINTER_REGNUM))
6952 {
6953 if (i < 32)
6954 imask |= (1UL << i);
6955 else
6956 fmask |= (1UL << (i - 32));
6957 }
6958
6959 /* We need to restore these for the handler. */
6960 if (current_function_calls_eh_return)
6961 for (i = 0; ; ++i)
6962 {
6963 unsigned regno = EH_RETURN_DATA_REGNO (i);
6964 if (regno == INVALID_REGNUM)
6965 break;
6966 imask |= 1UL << regno;
6967 }
6968
6969 /* If any register spilled, then spill the return address also. */
6970 /* ??? This is required by the Digital stack unwind specification
6971 and isn't needed if we're doing Dwarf2 unwinding. */
6972 if (imask || fmask || alpha_ra_ever_killed ())
6973 imask |= (1UL << REG_RA);
6974
6975 *imaskP = imask;
6976 *fmaskP = fmask;
6977 }
6978
6979 int
6980 alpha_sa_size ()
6981 {
6982 unsigned long mask[2];
6983 int sa_size = 0;
6984 int i, j;
6985
6986 alpha_sa_mask (&mask[0], &mask[1]);
6987
6988 if (TARGET_ABI_UNICOSMK)
6989 {
6990 if (mask[0] || mask[1])
6991 sa_size = 14;
6992 }
6993 else
6994 {
6995 for (j = 0; j < 2; ++j)
6996 for (i = 0; i < 32; ++i)
6997 if ((mask[j] >> i) & 1)
6998 sa_size++;
6999 }
7000
7001 if (TARGET_ABI_UNICOSMK)
7002 {
7003 /* We might not need to generate a frame if we don't make any calls
7004 (including calls to __T3E_MISMATCH if this is a vararg function),
7005 don't have any local variables which require stack slots, don't
7006 use alloca and have not determined that we need a frame for other
7007 reasons. */
7008
7009 alpha_procedure_type
7010 = (sa_size || get_frame_size() != 0
7011 || current_function_outgoing_args_size
7012 || current_function_stdarg || current_function_calls_alloca
7013 || frame_pointer_needed)
7014 ? PT_STACK : PT_REGISTER;
7015
7016 /* Always reserve space for saving callee-saved registers if we
7017 need a frame as required by the calling convention. */
7018 if (alpha_procedure_type == PT_STACK)
7019 sa_size = 14;
7020 }
7021 else if (TARGET_ABI_OPEN_VMS)
7022 {
7023 /* Start by assuming we can use a register procedure if we don't
7024 make any calls (REG_RA not used) or need to save any
7025 registers and a stack procedure if we do. */
7026 if ((mask[0] >> REG_RA) & 1)
7027 alpha_procedure_type = PT_STACK;
7028 else if (get_frame_size() != 0)
7029 alpha_procedure_type = PT_REGISTER;
7030 else
7031 alpha_procedure_type = PT_NULL;
7032
7033 /* Don't reserve space for saving FP & RA yet. Do that later after we've
7034 made the final decision on stack procedure vs register procedure. */
7035 if (alpha_procedure_type == PT_STACK)
7036 sa_size -= 2;
7037
7038 /* Decide whether to refer to objects off our PV via FP or PV.
7039 If we need FP for something else or if we receive a nonlocal
7040 goto (which expects PV to contain the value), we must use PV.
7041 Otherwise, start by assuming we can use FP. */
7042
7043 vms_base_regno
7044 = (frame_pointer_needed
7045 || current_function_has_nonlocal_label
7046 || alpha_procedure_type == PT_STACK
7047 || current_function_outgoing_args_size)
7048 ? REG_PV : HARD_FRAME_POINTER_REGNUM;
7049
7050 /* If we want to copy PV into FP, we need to find some register
7051 in which to save FP. */
7052
7053 vms_save_fp_regno = -1;
7054 if (vms_base_regno == HARD_FRAME_POINTER_REGNUM)
7055 for (i = 0; i < 32; i++)
7056 if (! fixed_regs[i] && call_used_regs[i] && ! regs_ever_live[i])
7057 vms_save_fp_regno = i;
7058
7059 if (vms_save_fp_regno == -1 && alpha_procedure_type == PT_REGISTER)
7060 vms_base_regno = REG_PV, alpha_procedure_type = PT_STACK;
7061 else if (alpha_procedure_type == PT_NULL)
7062 vms_base_regno = REG_PV;
7063
7064 /* Stack unwinding should be done via FP unless we use it for PV. */
7065 vms_unwind_regno = (vms_base_regno == REG_PV
7066 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM);
7067
7068 /* If this is a stack procedure, allow space for saving FP and RA. */
7069 if (alpha_procedure_type == PT_STACK)
7070 sa_size += 2;
7071 }
7072 else
7073 {
7074 /* Our size must be even (multiple of 16 bytes). */
7075 if (sa_size & 1)
7076 sa_size++;
7077 }
7078
7079 return sa_size * 8;
7080 }
7081
7082 /* Define the offset between two registers, one to be eliminated,
7083 and the other its replacement, at the start of a routine. */
7084
7085 HOST_WIDE_INT
7086 alpha_initial_elimination_offset (from, to)
7087 unsigned int from, to ATTRIBUTE_UNUSED;
7088 {
7089 HOST_WIDE_INT ret;
7090
7091 ret = alpha_sa_size ();
7092 ret += ALPHA_ROUND (current_function_outgoing_args_size);
7093
7094 if (from == FRAME_POINTER_REGNUM)
7095 ;
7096 else if (from == ARG_POINTER_REGNUM)
7097 ret += (ALPHA_ROUND (get_frame_size ()
7098 + current_function_pretend_args_size)
7099 - current_function_pretend_args_size);
7100 else
7101 abort ();
7102
7103 return ret;
7104 }
7105
7106 int
7107 alpha_pv_save_size ()
7108 {
7109 alpha_sa_size ();
7110 return alpha_procedure_type == PT_STACK ? 8 : 0;
7111 }
7112
7113 int
7114 alpha_using_fp ()
7115 {
7116 alpha_sa_size ();
7117 return vms_unwind_regno == HARD_FRAME_POINTER_REGNUM;
7118 }
7119
7120 #if TARGET_ABI_OPEN_VMS
7121
7122 const struct attribute_spec vms_attribute_table[] =
7123 {
7124 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
7125 { "overlaid", 0, 0, true, false, false, NULL },
7126 { "global", 0, 0, true, false, false, NULL },
7127 { "initialize", 0, 0, true, false, false, NULL },
7128 { NULL, 0, 0, false, false, false, NULL }
7129 };
7130
7131 #endif
7132
7133 static int
7134 find_lo_sum_using_gp (px, data)
7135 rtx *px;
7136 void *data ATTRIBUTE_UNUSED;
7137 {
7138 return GET_CODE (*px) == LO_SUM && XEXP (*px, 0) == pic_offset_table_rtx;
7139 }
7140
7141 int
7142 alpha_find_lo_sum_using_gp (insn)
7143 rtx insn;
7144 {
7145 return for_each_rtx (&PATTERN (insn), find_lo_sum_using_gp, NULL) > 0;
7146 }
7147
7148 static int
7149 alpha_does_function_need_gp ()
7150 {
7151 rtx insn;
7152
7153 /* The GP being variable is an OSF abi thing. */
7154 if (! TARGET_ABI_OSF)
7155 return 0;
7156
7157 if (TARGET_PROFILING_NEEDS_GP && current_function_profile)
7158 return 1;
7159
7160 if (current_function_is_thunk)
7161 return 1;
7162
7163 /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
7164 Even if we are a static function, we still need to do this in case
7165 our address is taken and passed to something like qsort. */
7166
7167 push_topmost_sequence ();
7168 insn = get_insns ();
7169 pop_topmost_sequence ();
7170
7171 for (; insn; insn = NEXT_INSN (insn))
7172 if (INSN_P (insn)
7173 && GET_CODE (PATTERN (insn)) != USE
7174 && GET_CODE (PATTERN (insn)) != CLOBBER
7175 && get_attr_usegp (insn))
7176 return 1;
7177
7178 return 0;
7179 }
7180
7181 /* Write a version stamp. Don't write anything if we are running as a
7182 cross-compiler. Otherwise, use the versions in /usr/include/stamp.h. */
7183
7184 #ifdef HAVE_STAMP_H
7185 #include <stamp.h>
7186 #endif
7187
7188 void
7189 alpha_write_verstamp (file)
7190 FILE *file ATTRIBUTE_UNUSED;
7191 {
7192 #ifdef MS_STAMP
7193 fprintf (file, "\t.verstamp %d %d\n", MS_STAMP, LS_STAMP);
7194 #endif
7195 }
7196 \f
7197 /* Helper function to set RTX_FRAME_RELATED_P on instructions, including
7198 sequences. */
7199
7200 static rtx
7201 set_frame_related_p ()
7202 {
7203 rtx seq = get_insns ();
7204 rtx insn;
7205
7206 end_sequence ();
7207
7208 if (!seq)
7209 return NULL_RTX;
7210
7211 if (INSN_P (seq))
7212 {
7213 insn = seq;
7214 while (insn != NULL_RTX)
7215 {
7216 RTX_FRAME_RELATED_P (insn) = 1;
7217 insn = NEXT_INSN (insn);
7218 }
7219 seq = emit_insn (seq);
7220 }
7221 else
7222 {
7223 seq = emit_insn (seq);
7224 RTX_FRAME_RELATED_P (seq) = 1;
7225 }
7226 return seq;
7227 }
7228
7229 #define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
7230
7231 /* Write function prologue. */
7232
7233 /* On vms we have two kinds of functions:
7234
7235 - stack frame (PROC_STACK)
7236 these are 'normal' functions with local vars and which are
7237 calling other functions
7238 - register frame (PROC_REGISTER)
7239 keeps all data in registers, needs no stack
7240
7241 We must pass this to the assembler so it can generate the
7242 proper pdsc (procedure descriptor)
7243 This is done with the '.pdesc' command.
7244
7245 On not-vms, we don't really differentiate between the two, as we can
7246 simply allocate stack without saving registers. */
7247
7248 void
7249 alpha_expand_prologue ()
7250 {
7251 /* Registers to save. */
7252 unsigned long imask = 0;
7253 unsigned long fmask = 0;
7254 /* Stack space needed for pushing registers clobbered by us. */
7255 HOST_WIDE_INT sa_size;
7256 /* Complete stack size needed. */
7257 HOST_WIDE_INT frame_size;
7258 /* Offset from base reg to register save area. */
7259 HOST_WIDE_INT reg_offset;
7260 rtx sa_reg, mem;
7261 int i;
7262
7263 sa_size = alpha_sa_size ();
7264
7265 frame_size = get_frame_size ();
7266 if (TARGET_ABI_OPEN_VMS)
7267 frame_size = ALPHA_ROUND (sa_size
7268 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7269 + frame_size
7270 + current_function_pretend_args_size);
7271 else if (TARGET_ABI_UNICOSMK)
7272 /* We have to allocate space for the DSIB if we generate a frame. */
7273 frame_size = ALPHA_ROUND (sa_size
7274 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7275 + ALPHA_ROUND (frame_size
7276 + current_function_outgoing_args_size);
7277 else
7278 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
7279 + sa_size
7280 + ALPHA_ROUND (frame_size
7281 + current_function_pretend_args_size));
7282
7283 if (TARGET_ABI_OPEN_VMS)
7284 reg_offset = 8;
7285 else
7286 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
7287
7288 alpha_sa_mask (&imask, &fmask);
7289
7290 /* Emit an insn to reload GP, if needed. */
7291 if (TARGET_ABI_OSF)
7292 {
7293 alpha_function_needs_gp = alpha_does_function_need_gp ();
7294 if (alpha_function_needs_gp)
7295 emit_insn (gen_prologue_ldgp ());
7296 }
7297
7298 /* TARGET_PROFILING_NEEDS_GP actually implies that we need to insert
7299 the call to mcount ourselves, rather than having the linker do it
7300 magically in response to -pg. Since _mcount has special linkage,
7301 don't represent the call as a call. */
7302 if (TARGET_PROFILING_NEEDS_GP && current_function_profile)
7303 emit_insn (gen_prologue_mcount ());
7304
7305 if (TARGET_ABI_UNICOSMK)
7306 unicosmk_gen_dsib (&imask);
7307
7308 /* Adjust the stack by the frame size. If the frame size is > 4096
7309 bytes, we need to be sure we probe somewhere in the first and last
7310 4096 bytes (we can probably get away without the latter test) and
7311 every 8192 bytes in between. If the frame size is > 32768, we
7312 do this in a loop. Otherwise, we generate the explicit probe
7313 instructions.
7314
7315 Note that we are only allowed to adjust sp once in the prologue. */
7316
7317 if (frame_size <= 32768)
7318 {
7319 if (frame_size > 4096)
7320 {
7321 int probed = 4096;
7322
7323 do
7324 emit_insn (gen_probe_stack (GEN_INT (TARGET_ABI_UNICOSMK
7325 ? -probed + 64
7326 : -probed)));
7327 while ((probed += 8192) < frame_size);
7328
7329 /* We only have to do this probe if we aren't saving registers. */
7330 if (sa_size == 0 && probed + 4096 < frame_size)
7331 emit_insn (gen_probe_stack (GEN_INT (-frame_size)));
7332 }
7333
7334 if (frame_size != 0)
7335 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
7336 GEN_INT (TARGET_ABI_UNICOSMK
7337 ? -frame_size + 64
7338 : -frame_size))));
7339 }
7340 else
7341 {
7342 /* Here we generate code to set R22 to SP + 4096 and set R23 to the
7343 number of 8192 byte blocks to probe. We then probe each block
7344 in the loop and then set SP to the proper location. If the
7345 amount remaining is > 4096, we have to do one more probe if we
7346 are not saving any registers. */
7347
7348 HOST_WIDE_INT blocks = (frame_size + 4096) / 8192;
7349 HOST_WIDE_INT leftover = frame_size + 4096 - blocks * 8192;
7350 rtx ptr = gen_rtx_REG (DImode, 22);
7351 rtx count = gen_rtx_REG (DImode, 23);
7352 rtx seq;
7353
7354 emit_move_insn (count, GEN_INT (blocks));
7355 emit_insn (gen_adddi3 (ptr, stack_pointer_rtx,
7356 GEN_INT (TARGET_ABI_UNICOSMK ? 4096 - 64 : 4096)));
7357
7358 /* Because of the difficulty in emitting a new basic block this
7359 late in the compilation, generate the loop as a single insn. */
7360 emit_insn (gen_prologue_stack_probe_loop (count, ptr));
7361
7362 if (leftover > 4096 && sa_size == 0)
7363 {
7364 rtx last = gen_rtx_MEM (DImode, plus_constant (ptr, -leftover));
7365 MEM_VOLATILE_P (last) = 1;
7366 emit_move_insn (last, const0_rtx);
7367 }
7368
7369 if (TARGET_ABI_WINDOWS_NT)
7370 {
7371 /* For NT stack unwind (done by 'reverse execution'), it's
7372 not OK to take the result of a loop, even though the value
7373 is already in ptr, so we reload it via a single operation
7374 and subtract it to sp.
7375
7376 Yes, that's correct -- we have to reload the whole constant
7377 into a temporary via ldah+lda then subtract from sp. To
7378 ensure we get ldah+lda, we use a special pattern. */
7379
7380 HOST_WIDE_INT lo, hi;
7381 lo = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
7382 hi = frame_size - lo;
7383
7384 emit_move_insn (ptr, GEN_INT (hi));
7385 emit_insn (gen_nt_lda (ptr, GEN_INT (lo)));
7386 seq = emit_insn (gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx,
7387 ptr));
7388 }
7389 else
7390 {
7391 seq = emit_insn (gen_adddi3 (stack_pointer_rtx, ptr,
7392 GEN_INT (-leftover)));
7393 }
7394
7395 /* This alternative is special, because the DWARF code cannot
7396 possibly intuit through the loop above. So we invent this
7397 note it looks at instead. */
7398 RTX_FRAME_RELATED_P (seq) = 1;
7399 REG_NOTES (seq)
7400 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7401 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7402 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
7403 GEN_INT (TARGET_ABI_UNICOSMK
7404 ? -frame_size + 64
7405 : -frame_size))),
7406 REG_NOTES (seq));
7407 }
7408
7409 if (!TARGET_ABI_UNICOSMK)
7410 {
7411 /* Cope with very large offsets to the register save area. */
7412 sa_reg = stack_pointer_rtx;
7413 if (reg_offset + sa_size > 0x8000)
7414 {
7415 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
7416 HOST_WIDE_INT bias;
7417
7418 if (low + sa_size <= 0x8000)
7419 bias = reg_offset - low, reg_offset = low;
7420 else
7421 bias = reg_offset, reg_offset = 0;
7422
7423 sa_reg = gen_rtx_REG (DImode, 24);
7424 FRP (emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx,
7425 GEN_INT (bias))));
7426 }
7427
7428 /* Save regs in stack order. Beginning with VMS PV. */
7429 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7430 {
7431 mem = gen_rtx_MEM (DImode, stack_pointer_rtx);
7432 set_mem_alias_set (mem, alpha_sr_alias_set);
7433 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, REG_PV)));
7434 }
7435
7436 /* Save register RA next. */
7437 if (imask & (1UL << REG_RA))
7438 {
7439 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset));
7440 set_mem_alias_set (mem, alpha_sr_alias_set);
7441 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, REG_RA)));
7442 imask &= ~(1UL << REG_RA);
7443 reg_offset += 8;
7444 }
7445
7446 /* Now save any other registers required to be saved. */
7447 for (i = 0; i < 32; i++)
7448 if (imask & (1UL << i))
7449 {
7450 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset));
7451 set_mem_alias_set (mem, alpha_sr_alias_set);
7452 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, i)));
7453 reg_offset += 8;
7454 }
7455
7456 for (i = 0; i < 32; i++)
7457 if (fmask & (1UL << i))
7458 {
7459 mem = gen_rtx_MEM (DFmode, plus_constant (sa_reg, reg_offset));
7460 set_mem_alias_set (mem, alpha_sr_alias_set);
7461 FRP (emit_move_insn (mem, gen_rtx_REG (DFmode, i+32)));
7462 reg_offset += 8;
7463 }
7464 }
7465 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
7466 {
7467 /* The standard frame on the T3E includes space for saving registers.
7468 We just have to use it. We don't have to save the return address and
7469 the old frame pointer here - they are saved in the DSIB. */
7470
7471 reg_offset = -56;
7472 for (i = 9; i < 15; i++)
7473 if (imask & (1UL << i))
7474 {
7475 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx,
7476 reg_offset));
7477 set_mem_alias_set (mem, alpha_sr_alias_set);
7478 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, i)));
7479 reg_offset -= 8;
7480 }
7481 for (i = 2; i < 10; i++)
7482 if (fmask & (1UL << i))
7483 {
7484 mem = gen_rtx_MEM (DFmode, plus_constant (hard_frame_pointer_rtx,
7485 reg_offset));
7486 set_mem_alias_set (mem, alpha_sr_alias_set);
7487 FRP (emit_move_insn (mem, gen_rtx_REG (DFmode, i+32)));
7488 reg_offset -= 8;
7489 }
7490 }
7491
7492 if (TARGET_ABI_OPEN_VMS)
7493 {
7494 if (alpha_procedure_type == PT_REGISTER)
7495 /* Register frame procedures save the fp.
7496 ?? Ought to have a dwarf2 save for this. */
7497 emit_move_insn (gen_rtx_REG (DImode, vms_save_fp_regno),
7498 hard_frame_pointer_rtx);
7499
7500 if (alpha_procedure_type != PT_NULL && vms_base_regno != REG_PV)
7501 emit_insn (gen_force_movdi (gen_rtx_REG (DImode, vms_base_regno),
7502 gen_rtx_REG (DImode, REG_PV)));
7503
7504 if (alpha_procedure_type != PT_NULL
7505 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
7506 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7507
7508 /* If we have to allocate space for outgoing args, do it now. */
7509 if (current_function_outgoing_args_size != 0)
7510 {
7511 rtx seq
7512 = emit_move_insn (stack_pointer_rtx,
7513 plus_constant
7514 (hard_frame_pointer_rtx,
7515 - (ALPHA_ROUND
7516 (current_function_outgoing_args_size))));
7517
7518 /* Only set FRAME_RELATED_P on the stack adjustment we just emitted
7519 if ! frame_pointer_needed. Setting the bit will change the CFA
7520 computation rule to use sp again, which would be wrong if we had
7521 frame_pointer_needed, as this means sp might move unpredictably
7522 later on.
7523
7524 Also, note that
7525 frame_pointer_needed
7526 => vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
7527 and
7528 current_function_outgoing_args_size != 0
7529 => alpha_procedure_type != PT_NULL,
7530
7531 so when we are not setting the bit here, we are guaranteed to
7532 have emited an FRP frame pointer update just before. */
7533 RTX_FRAME_RELATED_P (seq) = ! frame_pointer_needed;
7534 }
7535 }
7536 else if (!TARGET_ABI_UNICOSMK)
7537 {
7538 /* If we need a frame pointer, set it from the stack pointer. */
7539 if (frame_pointer_needed)
7540 {
7541 if (TARGET_CAN_FAULT_IN_PROLOGUE)
7542 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7543 else
7544 /* This must always be the last instruction in the
7545 prologue, thus we emit a special move + clobber. */
7546 FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx,
7547 stack_pointer_rtx, sa_reg)));
7548 }
7549 }
7550
7551 /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
7552 the prologue, for exception handling reasons, we cannot do this for
7553 any insn that might fault. We could prevent this for mems with a
7554 (clobber:BLK (scratch)), but this doesn't work for fp insns. So we
7555 have to prevent all such scheduling with a blockage.
7556
7557 Linux, on the other hand, never bothered to implement OSF/1's
7558 exception handling, and so doesn't care about such things. Anyone
7559 planning to use dwarf2 frame-unwind info can also omit the blockage. */
7560
7561 if (! TARGET_CAN_FAULT_IN_PROLOGUE)
7562 emit_insn (gen_blockage ());
7563 }
7564
7565 /* Output the textual info surrounding the prologue. */
7566
7567 void
7568 alpha_start_function (file, fnname, decl)
7569 FILE *file;
7570 const char *fnname;
7571 tree decl ATTRIBUTE_UNUSED;
7572 {
7573 unsigned long imask = 0;
7574 unsigned long fmask = 0;
7575 /* Stack space needed for pushing registers clobbered by us. */
7576 HOST_WIDE_INT sa_size;
7577 /* Complete stack size needed. */
7578 unsigned HOST_WIDE_INT frame_size;
7579 /* Offset from base reg to register save area. */
7580 HOST_WIDE_INT reg_offset;
7581 char *entry_label = (char *) alloca (strlen (fnname) + 6);
7582 int i;
7583
7584 /* Don't emit an extern directive for functions defined in the same file. */
7585 if (TARGET_ABI_UNICOSMK)
7586 {
7587 tree name_tree;
7588 name_tree = get_identifier (fnname);
7589 TREE_ASM_WRITTEN (name_tree) = 1;
7590 }
7591
7592 alpha_fnname = fnname;
7593 sa_size = alpha_sa_size ();
7594
7595 frame_size = get_frame_size ();
7596 if (TARGET_ABI_OPEN_VMS)
7597 frame_size = ALPHA_ROUND (sa_size
7598 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7599 + frame_size
7600 + current_function_pretend_args_size);
7601 else if (TARGET_ABI_UNICOSMK)
7602 frame_size = ALPHA_ROUND (sa_size
7603 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7604 + ALPHA_ROUND (frame_size
7605 + current_function_outgoing_args_size);
7606 else
7607 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
7608 + sa_size
7609 + ALPHA_ROUND (frame_size
7610 + current_function_pretend_args_size));
7611
7612 if (TARGET_ABI_OPEN_VMS)
7613 reg_offset = 8;
7614 else
7615 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
7616
7617 alpha_sa_mask (&imask, &fmask);
7618
7619 /* Ecoff can handle multiple .file directives, so put out file and lineno.
7620 We have to do that before the .ent directive as we cannot switch
7621 files within procedures with native ecoff because line numbers are
7622 linked to procedure descriptors.
7623 Outputting the lineno helps debugging of one line functions as they
7624 would otherwise get no line number at all. Please note that we would
7625 like to put out last_linenum from final.c, but it is not accessible. */
7626
7627 if (write_symbols == SDB_DEBUG)
7628 {
7629 #ifdef ASM_OUTPUT_SOURCE_FILENAME
7630 ASM_OUTPUT_SOURCE_FILENAME (file,
7631 DECL_SOURCE_FILE (current_function_decl));
7632 #endif
7633 #ifdef ASM_OUTPUT_SOURCE_LINE
7634 if (debug_info_level != DINFO_LEVEL_TERSE)
7635 ASM_OUTPUT_SOURCE_LINE (file,
7636 DECL_SOURCE_LINE (current_function_decl));
7637 #endif
7638 }
7639
7640 /* Issue function start and label. */
7641 if (TARGET_ABI_OPEN_VMS
7642 || (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive))
7643 {
7644 fputs ("\t.ent ", file);
7645 assemble_name (file, fnname);
7646 putc ('\n', file);
7647
7648 /* If the function needs GP, we'll write the "..ng" label there.
7649 Otherwise, do it here. */
7650 if (TARGET_ABI_OSF
7651 && ! alpha_function_needs_gp
7652 && ! current_function_is_thunk)
7653 {
7654 putc ('$', file);
7655 assemble_name (file, fnname);
7656 fputs ("..ng:\n", file);
7657 }
7658 }
7659
7660 strcpy (entry_label, fnname);
7661 if (TARGET_ABI_OPEN_VMS)
7662 strcat (entry_label, "..en");
7663
7664 /* For public functions, the label must be globalized by appending an
7665 additional colon. */
7666 if (TARGET_ABI_UNICOSMK && TREE_PUBLIC (decl))
7667 strcat (entry_label, ":");
7668
7669 ASM_OUTPUT_LABEL (file, entry_label);
7670 inside_function = TRUE;
7671
7672 if (TARGET_ABI_OPEN_VMS)
7673 fprintf (file, "\t.base $%d\n", vms_base_regno);
7674
7675 if (!TARGET_ABI_OPEN_VMS && !TARGET_ABI_UNICOSMK && TARGET_IEEE_CONFORMANT
7676 && !flag_inhibit_size_directive)
7677 {
7678 /* Set flags in procedure descriptor to request IEEE-conformant
7679 math-library routines. The value we set it to is PDSC_EXC_IEEE
7680 (/usr/include/pdsc.h). */
7681 fputs ("\t.eflag 48\n", file);
7682 }
7683
7684 /* Set up offsets to alpha virtual arg/local debugging pointer. */
7685 alpha_auto_offset = -frame_size + current_function_pretend_args_size;
7686 alpha_arg_offset = -frame_size + 48;
7687
7688 /* Describe our frame. If the frame size is larger than an integer,
7689 print it as zero to avoid an assembler error. We won't be
7690 properly describing such a frame, but that's the best we can do. */
7691 if (TARGET_ABI_UNICOSMK)
7692 ;
7693 else if (TARGET_ABI_OPEN_VMS)
7694 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,"
7695 HOST_WIDE_INT_PRINT_DEC "\n",
7696 vms_unwind_regno,
7697 frame_size >= (1UL << 31) ? 0 : frame_size,
7698 reg_offset);
7699 else if (!flag_inhibit_size_directive)
7700 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,%d\n",
7701 (frame_pointer_needed
7702 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM),
7703 frame_size >= (1UL << 31) ? 0 : frame_size,
7704 current_function_pretend_args_size);
7705
7706 /* Describe which registers were spilled. */
7707 if (TARGET_ABI_UNICOSMK)
7708 ;
7709 else if (TARGET_ABI_OPEN_VMS)
7710 {
7711 if (imask)
7712 /* ??? Does VMS care if mask contains ra? The old code didn't
7713 set it, so I don't here. */
7714 fprintf (file, "\t.mask 0x%lx,0\n", imask & ~(1UL << REG_RA));
7715 if (fmask)
7716 fprintf (file, "\t.fmask 0x%lx,0\n", fmask);
7717 if (alpha_procedure_type == PT_REGISTER)
7718 fprintf (file, "\t.fp_save $%d\n", vms_save_fp_regno);
7719 }
7720 else if (!flag_inhibit_size_directive)
7721 {
7722 if (imask)
7723 {
7724 fprintf (file, "\t.mask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", imask,
7725 frame_size >= (1UL << 31) ? 0 : reg_offset - frame_size);
7726
7727 for (i = 0; i < 32; ++i)
7728 if (imask & (1UL << i))
7729 reg_offset += 8;
7730 }
7731
7732 if (fmask)
7733 fprintf (file, "\t.fmask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", fmask,
7734 frame_size >= (1UL << 31) ? 0 : reg_offset - frame_size);
7735 }
7736
7737 #if TARGET_ABI_OPEN_VMS
7738 /* Ifdef'ed cause link_section are only available then. */
7739 readonly_data_section ();
7740 fprintf (file, "\t.align 3\n");
7741 assemble_name (file, fnname); fputs ("..na:\n", file);
7742 fputs ("\t.ascii \"", file);
7743 assemble_name (file, fnname);
7744 fputs ("\\0\"\n", file);
7745 alpha_need_linkage (fnname, 1);
7746 text_section ();
7747 #endif
7748 }
7749
7750 /* Emit the .prologue note at the scheduled end of the prologue. */
7751
7752 static void
7753 alpha_output_function_end_prologue (file)
7754 FILE *file;
7755 {
7756 if (TARGET_ABI_UNICOSMK)
7757 ;
7758 else if (TARGET_ABI_OPEN_VMS)
7759 fputs ("\t.prologue\n", file);
7760 else if (TARGET_ABI_WINDOWS_NT)
7761 fputs ("\t.prologue 0\n", file);
7762 else if (!flag_inhibit_size_directive)
7763 fprintf (file, "\t.prologue %d\n",
7764 alpha_function_needs_gp || current_function_is_thunk);
7765 }
7766
7767 /* Write function epilogue. */
7768
7769 /* ??? At some point we will want to support full unwind, and so will
7770 need to mark the epilogue as well. At the moment, we just confuse
7771 dwarf2out. */
7772 #undef FRP
7773 #define FRP(exp) exp
7774
7775 void
7776 alpha_expand_epilogue ()
7777 {
7778 /* Registers to save. */
7779 unsigned long imask = 0;
7780 unsigned long fmask = 0;
7781 /* Stack space needed for pushing registers clobbered by us. */
7782 HOST_WIDE_INT sa_size;
7783 /* Complete stack size needed. */
7784 HOST_WIDE_INT frame_size;
7785 /* Offset from base reg to register save area. */
7786 HOST_WIDE_INT reg_offset;
7787 int fp_is_frame_pointer, fp_offset;
7788 rtx sa_reg, sa_reg_exp = NULL;
7789 rtx sp_adj1, sp_adj2, mem;
7790 rtx eh_ofs;
7791 int i;
7792
7793 sa_size = alpha_sa_size ();
7794
7795 frame_size = get_frame_size ();
7796 if (TARGET_ABI_OPEN_VMS)
7797 frame_size = ALPHA_ROUND (sa_size
7798 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7799 + frame_size
7800 + current_function_pretend_args_size);
7801 else if (TARGET_ABI_UNICOSMK)
7802 frame_size = ALPHA_ROUND (sa_size
7803 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7804 + ALPHA_ROUND (frame_size
7805 + current_function_outgoing_args_size);
7806 else
7807 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
7808 + sa_size
7809 + ALPHA_ROUND (frame_size
7810 + current_function_pretend_args_size));
7811
7812 if (TARGET_ABI_OPEN_VMS)
7813 {
7814 if (alpha_procedure_type == PT_STACK)
7815 reg_offset = 8;
7816 else
7817 reg_offset = 0;
7818 }
7819 else
7820 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
7821
7822 alpha_sa_mask (&imask, &fmask);
7823
7824 fp_is_frame_pointer
7825 = ((TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7826 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed));
7827 fp_offset = 0;
7828 sa_reg = stack_pointer_rtx;
7829
7830 if (current_function_calls_eh_return)
7831 eh_ofs = EH_RETURN_STACKADJ_RTX;
7832 else
7833 eh_ofs = NULL_RTX;
7834
7835 if (!TARGET_ABI_UNICOSMK && sa_size)
7836 {
7837 /* If we have a frame pointer, restore SP from it. */
7838 if ((TARGET_ABI_OPEN_VMS
7839 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
7840 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed))
7841 FRP (emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx));
7842
7843 /* Cope with very large offsets to the register save area. */
7844 if (reg_offset + sa_size > 0x8000)
7845 {
7846 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
7847 HOST_WIDE_INT bias;
7848
7849 if (low + sa_size <= 0x8000)
7850 bias = reg_offset - low, reg_offset = low;
7851 else
7852 bias = reg_offset, reg_offset = 0;
7853
7854 sa_reg = gen_rtx_REG (DImode, 22);
7855 sa_reg_exp = plus_constant (stack_pointer_rtx, bias);
7856
7857 FRP (emit_move_insn (sa_reg, sa_reg_exp));
7858 }
7859
7860 /* Restore registers in order, excepting a true frame pointer. */
7861
7862 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset));
7863 if (! eh_ofs)
7864 set_mem_alias_set (mem, alpha_sr_alias_set);
7865 FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
7866
7867 reg_offset += 8;
7868 imask &= ~(1UL << REG_RA);
7869
7870 for (i = 0; i < 32; ++i)
7871 if (imask & (1UL << i))
7872 {
7873 if (i == HARD_FRAME_POINTER_REGNUM && fp_is_frame_pointer)
7874 fp_offset = reg_offset;
7875 else
7876 {
7877 mem = gen_rtx_MEM (DImode, plus_constant(sa_reg, reg_offset));
7878 set_mem_alias_set (mem, alpha_sr_alias_set);
7879 FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
7880 }
7881 reg_offset += 8;
7882 }
7883
7884 for (i = 0; i < 32; ++i)
7885 if (fmask & (1UL << i))
7886 {
7887 mem = gen_rtx_MEM (DFmode, plus_constant(sa_reg, reg_offset));
7888 set_mem_alias_set (mem, alpha_sr_alias_set);
7889 FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
7890 reg_offset += 8;
7891 }
7892 }
7893 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
7894 {
7895 /* Restore callee-saved general-purpose registers. */
7896
7897 reg_offset = -56;
7898
7899 for (i = 9; i < 15; i++)
7900 if (imask & (1UL << i))
7901 {
7902 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx,
7903 reg_offset));
7904 set_mem_alias_set (mem, alpha_sr_alias_set);
7905 FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
7906 reg_offset -= 8;
7907 }
7908
7909 for (i = 2; i < 10; i++)
7910 if (fmask & (1UL << i))
7911 {
7912 mem = gen_rtx_MEM (DFmode, plus_constant(hard_frame_pointer_rtx,
7913 reg_offset));
7914 set_mem_alias_set (mem, alpha_sr_alias_set);
7915 FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
7916 reg_offset -= 8;
7917 }
7918
7919 /* Restore the return address from the DSIB. */
7920
7921 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx, -8));
7922 set_mem_alias_set (mem, alpha_sr_alias_set);
7923 FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
7924 }
7925
7926 if (frame_size || eh_ofs)
7927 {
7928 sp_adj1 = stack_pointer_rtx;
7929
7930 if (eh_ofs)
7931 {
7932 sp_adj1 = gen_rtx_REG (DImode, 23);
7933 emit_move_insn (sp_adj1,
7934 gen_rtx_PLUS (Pmode, stack_pointer_rtx, eh_ofs));
7935 }
7936
7937 /* If the stack size is large, begin computation into a temporary
7938 register so as not to interfere with a potential fp restore,
7939 which must be consecutive with an SP restore. */
7940 if (frame_size < 32768
7941 && ! (TARGET_ABI_UNICOSMK && current_function_calls_alloca))
7942 sp_adj2 = GEN_INT (frame_size);
7943 else if (TARGET_ABI_UNICOSMK)
7944 {
7945 sp_adj1 = gen_rtx_REG (DImode, 23);
7946 FRP (emit_move_insn (sp_adj1, hard_frame_pointer_rtx));
7947 sp_adj2 = const0_rtx;
7948 }
7949 else if (frame_size < 0x40007fffL)
7950 {
7951 int low = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
7952
7953 sp_adj2 = plus_constant (sp_adj1, frame_size - low);
7954 if (sa_reg_exp && rtx_equal_p (sa_reg_exp, sp_adj2))
7955 sp_adj1 = sa_reg;
7956 else
7957 {
7958 sp_adj1 = gen_rtx_REG (DImode, 23);
7959 FRP (emit_move_insn (sp_adj1, sp_adj2));
7960 }
7961 sp_adj2 = GEN_INT (low);
7962 }
7963 else
7964 {
7965 rtx tmp = gen_rtx_REG (DImode, 23);
7966 FRP (sp_adj2 = alpha_emit_set_const (tmp, DImode, frame_size, 3));
7967 if (!sp_adj2)
7968 {
7969 /* We can't drop new things to memory this late, afaik,
7970 so build it up by pieces. */
7971 FRP (sp_adj2 = alpha_emit_set_long_const (tmp, frame_size,
7972 -(frame_size < 0)));
7973 if (!sp_adj2)
7974 abort ();
7975 }
7976 }
7977
7978 /* From now on, things must be in order. So emit blockages. */
7979
7980 /* Restore the frame pointer. */
7981 if (TARGET_ABI_UNICOSMK)
7982 {
7983 emit_insn (gen_blockage ());
7984 mem = gen_rtx_MEM (DImode,
7985 plus_constant (hard_frame_pointer_rtx, -16));
7986 set_mem_alias_set (mem, alpha_sr_alias_set);
7987 FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
7988 }
7989 else if (fp_is_frame_pointer)
7990 {
7991 emit_insn (gen_blockage ());
7992 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, fp_offset));
7993 set_mem_alias_set (mem, alpha_sr_alias_set);
7994 FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
7995 }
7996 else if (TARGET_ABI_OPEN_VMS)
7997 {
7998 emit_insn (gen_blockage ());
7999 FRP (emit_move_insn (hard_frame_pointer_rtx,
8000 gen_rtx_REG (DImode, vms_save_fp_regno)));
8001 }
8002
8003 /* Restore the stack pointer. */
8004 emit_insn (gen_blockage ());
8005 if (sp_adj2 == const0_rtx)
8006 FRP (emit_move_insn (stack_pointer_rtx, sp_adj1));
8007 else
8008 FRP (emit_move_insn (stack_pointer_rtx,
8009 gen_rtx_PLUS (DImode, sp_adj1, sp_adj2)));
8010 }
8011 else
8012 {
8013 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_REGISTER)
8014 {
8015 emit_insn (gen_blockage ());
8016 FRP (emit_move_insn (hard_frame_pointer_rtx,
8017 gen_rtx_REG (DImode, vms_save_fp_regno)));
8018 }
8019 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type != PT_STACK)
8020 {
8021 /* Decrement the frame pointer if the function does not have a
8022 frame. */
8023
8024 emit_insn (gen_blockage ());
8025 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
8026 hard_frame_pointer_rtx, GEN_INT (-1))));
8027 }
8028 }
8029 }
8030 \f
8031 /* Output the rest of the textual info surrounding the epilogue. */
8032
8033 void
8034 alpha_end_function (file, fnname, decl)
8035 FILE *file;
8036 const char *fnname;
8037 tree decl ATTRIBUTE_UNUSED;
8038 {
8039 /* End the function. */
8040 if (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive)
8041 {
8042 fputs ("\t.end ", file);
8043 assemble_name (file, fnname);
8044 putc ('\n', file);
8045 }
8046 inside_function = FALSE;
8047
8048 #if TARGET_ABI_OPEN_VMS
8049 alpha_write_linkage (file, fnname, decl);
8050 #endif
8051
8052 /* Output jump tables and the static subroutine information block. */
8053 if (TARGET_ABI_UNICOSMK)
8054 {
8055 unicosmk_output_ssib (file, fnname);
8056 unicosmk_output_deferred_case_vectors (file);
8057 }
8058 }
8059
8060 #if TARGET_ABI_OSF
8061 /* Emit a tail call to FUNCTION after adjusting THIS by DELTA.
8062
8063 In order to avoid the hordes of differences between generated code
8064 with and without TARGET_EXPLICIT_RELOCS, and to avoid duplicating
8065 lots of code loading up large constants, generate rtl and emit it
8066 instead of going straight to text.
8067
8068 Not sure why this idea hasn't been explored before... */
8069
8070 static void
8071 alpha_output_mi_thunk_osf (file, thunk_fndecl, delta, vcall_offset, function)
8072 FILE *file;
8073 tree thunk_fndecl ATTRIBUTE_UNUSED;
8074 HOST_WIDE_INT delta;
8075 HOST_WIDE_INT vcall_offset;
8076 tree function;
8077 {
8078 HOST_WIDE_INT hi, lo;
8079 rtx this, insn, funexp;
8080
8081 /* We always require a valid GP. */
8082 emit_insn (gen_prologue_ldgp ());
8083 emit_note (NULL, NOTE_INSN_PROLOGUE_END);
8084
8085 /* Find the "this" pointer. If the function returns a structure,
8086 the structure return pointer is in $16. */
8087 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function))))
8088 this = gen_rtx_REG (Pmode, 17);
8089 else
8090 this = gen_rtx_REG (Pmode, 16);
8091
8092 /* Add DELTA. When possible we use ldah+lda. Otherwise load the
8093 entire constant for the add. */
8094 lo = ((delta & 0xffff) ^ 0x8000) - 0x8000;
8095 hi = (((delta - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8096 if (hi + lo == delta)
8097 {
8098 if (hi)
8099 emit_insn (gen_adddi3 (this, this, GEN_INT (hi)));
8100 if (lo)
8101 emit_insn (gen_adddi3 (this, this, GEN_INT (lo)));
8102 }
8103 else
8104 {
8105 rtx tmp = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 0),
8106 delta, -(delta < 0));
8107 emit_insn (gen_adddi3 (this, this, tmp));
8108 }
8109
8110 /* Add a delta stored in the vtable at VCALL_OFFSET. */
8111 if (vcall_offset)
8112 {
8113 rtx tmp, tmp2;
8114
8115 tmp = gen_rtx_REG (Pmode, 0);
8116 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this));
8117
8118 lo = ((vcall_offset & 0xffff) ^ 0x8000) - 0x8000;
8119 hi = (((vcall_offset - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8120 if (hi + lo == vcall_offset)
8121 {
8122 if (hi)
8123 emit_insn (gen_adddi3 (tmp, tmp, GEN_INT (hi)));
8124 }
8125 else
8126 {
8127 tmp2 = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 1),
8128 vcall_offset, -(vcall_offset < 0));
8129 emit_insn (gen_adddi3 (tmp, tmp, tmp2));
8130 lo = 0;
8131 }
8132 if (lo)
8133 tmp2 = gen_rtx_PLUS (Pmode, tmp, GEN_INT (lo));
8134 else
8135 tmp2 = tmp;
8136 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp2));
8137
8138 emit_insn (gen_adddi3 (this, this, tmp));
8139 }
8140
8141 /* Generate a tail call to the target function. */
8142 if (! TREE_USED (function))
8143 {
8144 assemble_external (function);
8145 TREE_USED (function) = 1;
8146 }
8147 funexp = XEXP (DECL_RTL (function), 0);
8148 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8149 insn = emit_call_insn (gen_sibcall (funexp, const0_rtx));
8150 SIBLING_CALL_P (insn) = 1;
8151
8152 /* Run just enough of rest_of_compilation to get the insns emitted.
8153 There's not really enough bulk here to make other passes such as
8154 instruction scheduling worth while. Note that use_thunk calls
8155 assemble_start_function and assemble_end_function. */
8156 insn = get_insns ();
8157 shorten_branches (insn);
8158 final_start_function (insn, file, 1);
8159 final (insn, file, 1, 0);
8160 final_end_function ();
8161 }
8162 #endif /* TARGET_ABI_OSF */
8163 \f
8164 /* Debugging support. */
8165
8166 #include "gstab.h"
8167
8168 /* Count the number of sdb related labels are generated (to find block
8169 start and end boundaries). */
8170
8171 int sdb_label_count = 0;
8172
8173 /* Next label # for each statement. */
8174
8175 static int sym_lineno = 0;
8176
8177 /* Count the number of .file directives, so that .loc is up to date. */
8178
8179 static int num_source_filenames = 0;
8180
8181 /* Name of the file containing the current function. */
8182
8183 static const char *current_function_file = "";
8184
8185 /* Offsets to alpha virtual arg/local debugging pointers. */
8186
8187 long alpha_arg_offset;
8188 long alpha_auto_offset;
8189 \f
8190 /* Emit a new filename to a stream. */
8191
8192 void
8193 alpha_output_filename (stream, name)
8194 FILE *stream;
8195 const char *name;
8196 {
8197 static int first_time = TRUE;
8198 char ltext_label_name[100];
8199
8200 if (first_time)
8201 {
8202 first_time = FALSE;
8203 ++num_source_filenames;
8204 current_function_file = name;
8205 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8206 output_quoted_string (stream, name);
8207 fprintf (stream, "\n");
8208 if (!TARGET_GAS && write_symbols == DBX_DEBUG)
8209 fprintf (stream, "\t#@stabs\n");
8210 }
8211
8212 else if (write_symbols == DBX_DEBUG)
8213 {
8214 ASM_GENERATE_INTERNAL_LABEL (ltext_label_name, "Ltext", 0);
8215 fprintf (stream, "%s", ASM_STABS_OP);
8216 output_quoted_string (stream, name);
8217 fprintf (stream, ",%d,0,0,%s\n", N_SOL, &ltext_label_name[1]);
8218 }
8219
8220 else if (name != current_function_file
8221 && strcmp (name, current_function_file) != 0)
8222 {
8223 if (inside_function && ! TARGET_GAS)
8224 fprintf (stream, "\t#.file\t%d ", num_source_filenames);
8225 else
8226 {
8227 ++num_source_filenames;
8228 current_function_file = name;
8229 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8230 }
8231
8232 output_quoted_string (stream, name);
8233 fprintf (stream, "\n");
8234 }
8235 }
8236 \f
8237 /* Emit a linenumber to a stream. */
8238
8239 void
8240 alpha_output_lineno (stream, line)
8241 FILE *stream;
8242 int line;
8243 {
8244 if (write_symbols == DBX_DEBUG)
8245 {
8246 /* mips-tfile doesn't understand .stabd directives. */
8247 ++sym_lineno;
8248 fprintf (stream, "$LM%d:\n%s%d,0,%d,$LM%d\n",
8249 sym_lineno, ASM_STABN_OP, N_SLINE, line, sym_lineno);
8250 }
8251 else
8252 fprintf (stream, "\n\t.loc\t%d %d\n", num_source_filenames, line);
8253 }
8254 \f
8255 /* Structure to show the current status of registers and memory. */
8256
8257 struct shadow_summary
8258 {
8259 struct {
8260 unsigned int i : 31; /* Mask of int regs */
8261 unsigned int fp : 31; /* Mask of fp regs */
8262 unsigned int mem : 1; /* mem == imem | fpmem */
8263 } used, defd;
8264 };
8265
8266 static void summarize_insn PARAMS ((rtx, struct shadow_summary *, int));
8267 static void alpha_handle_trap_shadows PARAMS ((void));
8268
8269 /* Summary the effects of expression X on the machine. Update SUM, a pointer
8270 to the summary structure. SET is nonzero if the insn is setting the
8271 object, otherwise zero. */
8272
8273 static void
8274 summarize_insn (x, sum, set)
8275 rtx x;
8276 struct shadow_summary *sum;
8277 int set;
8278 {
8279 const char *format_ptr;
8280 int i, j;
8281
8282 if (x == 0)
8283 return;
8284
8285 switch (GET_CODE (x))
8286 {
8287 /* ??? Note that this case would be incorrect if the Alpha had a
8288 ZERO_EXTRACT in SET_DEST. */
8289 case SET:
8290 summarize_insn (SET_SRC (x), sum, 0);
8291 summarize_insn (SET_DEST (x), sum, 1);
8292 break;
8293
8294 case CLOBBER:
8295 summarize_insn (XEXP (x, 0), sum, 1);
8296 break;
8297
8298 case USE:
8299 summarize_insn (XEXP (x, 0), sum, 0);
8300 break;
8301
8302 case ASM_OPERANDS:
8303 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
8304 summarize_insn (ASM_OPERANDS_INPUT (x, i), sum, 0);
8305 break;
8306
8307 case PARALLEL:
8308 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
8309 summarize_insn (XVECEXP (x, 0, i), sum, 0);
8310 break;
8311
8312 case SUBREG:
8313 summarize_insn (SUBREG_REG (x), sum, 0);
8314 break;
8315
8316 case REG:
8317 {
8318 int regno = REGNO (x);
8319 unsigned long mask = ((unsigned long) 1) << (regno % 32);
8320
8321 if (regno == 31 || regno == 63)
8322 break;
8323
8324 if (set)
8325 {
8326 if (regno < 32)
8327 sum->defd.i |= mask;
8328 else
8329 sum->defd.fp |= mask;
8330 }
8331 else
8332 {
8333 if (regno < 32)
8334 sum->used.i |= mask;
8335 else
8336 sum->used.fp |= mask;
8337 }
8338 }
8339 break;
8340
8341 case MEM:
8342 if (set)
8343 sum->defd.mem = 1;
8344 else
8345 sum->used.mem = 1;
8346
8347 /* Find the regs used in memory address computation: */
8348 summarize_insn (XEXP (x, 0), sum, 0);
8349 break;
8350
8351 case CONST_INT: case CONST_DOUBLE:
8352 case SYMBOL_REF: case LABEL_REF: case CONST:
8353 case SCRATCH: case ASM_INPUT:
8354 break;
8355
8356 /* Handle common unary and binary ops for efficiency. */
8357 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
8358 case MOD: case UDIV: case UMOD: case AND: case IOR:
8359 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
8360 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
8361 case NE: case EQ: case GE: case GT: case LE:
8362 case LT: case GEU: case GTU: case LEU: case LTU:
8363 summarize_insn (XEXP (x, 0), sum, 0);
8364 summarize_insn (XEXP (x, 1), sum, 0);
8365 break;
8366
8367 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
8368 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
8369 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
8370 case SQRT: case FFS:
8371 summarize_insn (XEXP (x, 0), sum, 0);
8372 break;
8373
8374 default:
8375 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
8376 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8377 switch (format_ptr[i])
8378 {
8379 case 'e':
8380 summarize_insn (XEXP (x, i), sum, 0);
8381 break;
8382
8383 case 'E':
8384 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8385 summarize_insn (XVECEXP (x, i, j), sum, 0);
8386 break;
8387
8388 case 'i':
8389 break;
8390
8391 default:
8392 abort ();
8393 }
8394 }
8395 }
8396
8397 /* Ensure a sufficient number of `trapb' insns are in the code when
8398 the user requests code with a trap precision of functions or
8399 instructions.
8400
8401 In naive mode, when the user requests a trap-precision of
8402 "instruction", a trapb is needed after every instruction that may
8403 generate a trap. This ensures that the code is resumption safe but
8404 it is also slow.
8405
8406 When optimizations are turned on, we delay issuing a trapb as long
8407 as possible. In this context, a trap shadow is the sequence of
8408 instructions that starts with a (potentially) trap generating
8409 instruction and extends to the next trapb or call_pal instruction
8410 (but GCC never generates call_pal by itself). We can delay (and
8411 therefore sometimes omit) a trapb subject to the following
8412 conditions:
8413
8414 (a) On entry to the trap shadow, if any Alpha register or memory
8415 location contains a value that is used as an operand value by some
8416 instruction in the trap shadow (live on entry), then no instruction
8417 in the trap shadow may modify the register or memory location.
8418
8419 (b) Within the trap shadow, the computation of the base register
8420 for a memory load or store instruction may not involve using the
8421 result of an instruction that might generate an UNPREDICTABLE
8422 result.
8423
8424 (c) Within the trap shadow, no register may be used more than once
8425 as a destination register. (This is to make life easier for the
8426 trap-handler.)
8427
8428 (d) The trap shadow may not include any branch instructions. */
8429
8430 static void
8431 alpha_handle_trap_shadows ()
8432 {
8433 struct shadow_summary shadow;
8434 int trap_pending, exception_nesting;
8435 rtx i, n;
8436
8437 trap_pending = 0;
8438 exception_nesting = 0;
8439 shadow.used.i = 0;
8440 shadow.used.fp = 0;
8441 shadow.used.mem = 0;
8442 shadow.defd = shadow.used;
8443
8444 for (i = get_insns (); i ; i = NEXT_INSN (i))
8445 {
8446 if (GET_CODE (i) == NOTE)
8447 {
8448 switch (NOTE_LINE_NUMBER (i))
8449 {
8450 case NOTE_INSN_EH_REGION_BEG:
8451 exception_nesting++;
8452 if (trap_pending)
8453 goto close_shadow;
8454 break;
8455
8456 case NOTE_INSN_EH_REGION_END:
8457 exception_nesting--;
8458 if (trap_pending)
8459 goto close_shadow;
8460 break;
8461
8462 case NOTE_INSN_EPILOGUE_BEG:
8463 if (trap_pending && alpha_tp >= ALPHA_TP_FUNC)
8464 goto close_shadow;
8465 break;
8466 }
8467 }
8468 else if (trap_pending)
8469 {
8470 if (alpha_tp == ALPHA_TP_FUNC)
8471 {
8472 if (GET_CODE (i) == JUMP_INSN
8473 && GET_CODE (PATTERN (i)) == RETURN)
8474 goto close_shadow;
8475 }
8476 else if (alpha_tp == ALPHA_TP_INSN)
8477 {
8478 if (optimize > 0)
8479 {
8480 struct shadow_summary sum;
8481
8482 sum.used.i = 0;
8483 sum.used.fp = 0;
8484 sum.used.mem = 0;
8485 sum.defd = sum.used;
8486
8487 switch (GET_CODE (i))
8488 {
8489 case INSN:
8490 /* Annoyingly, get_attr_trap will abort on these. */
8491 if (GET_CODE (PATTERN (i)) == USE
8492 || GET_CODE (PATTERN (i)) == CLOBBER)
8493 break;
8494
8495 summarize_insn (PATTERN (i), &sum, 0);
8496
8497 if ((sum.defd.i & shadow.defd.i)
8498 || (sum.defd.fp & shadow.defd.fp))
8499 {
8500 /* (c) would be violated */
8501 goto close_shadow;
8502 }
8503
8504 /* Combine shadow with summary of current insn: */
8505 shadow.used.i |= sum.used.i;
8506 shadow.used.fp |= sum.used.fp;
8507 shadow.used.mem |= sum.used.mem;
8508 shadow.defd.i |= sum.defd.i;
8509 shadow.defd.fp |= sum.defd.fp;
8510 shadow.defd.mem |= sum.defd.mem;
8511
8512 if ((sum.defd.i & shadow.used.i)
8513 || (sum.defd.fp & shadow.used.fp)
8514 || (sum.defd.mem & shadow.used.mem))
8515 {
8516 /* (a) would be violated (also takes care of (b)) */
8517 if (get_attr_trap (i) == TRAP_YES
8518 && ((sum.defd.i & sum.used.i)
8519 || (sum.defd.fp & sum.used.fp)))
8520 abort ();
8521
8522 goto close_shadow;
8523 }
8524 break;
8525
8526 case JUMP_INSN:
8527 case CALL_INSN:
8528 case CODE_LABEL:
8529 goto close_shadow;
8530
8531 default:
8532 abort ();
8533 }
8534 }
8535 else
8536 {
8537 close_shadow:
8538 n = emit_insn_before (gen_trapb (), i);
8539 PUT_MODE (n, TImode);
8540 PUT_MODE (i, TImode);
8541 trap_pending = 0;
8542 shadow.used.i = 0;
8543 shadow.used.fp = 0;
8544 shadow.used.mem = 0;
8545 shadow.defd = shadow.used;
8546 }
8547 }
8548 }
8549
8550 if ((exception_nesting > 0 || alpha_tp >= ALPHA_TP_FUNC)
8551 && GET_CODE (i) == INSN
8552 && GET_CODE (PATTERN (i)) != USE
8553 && GET_CODE (PATTERN (i)) != CLOBBER
8554 && get_attr_trap (i) == TRAP_YES)
8555 {
8556 if (optimize && !trap_pending)
8557 summarize_insn (PATTERN (i), &shadow, 0);
8558 trap_pending = 1;
8559 }
8560 }
8561 }
8562 \f
8563 /* Alpha can only issue instruction groups simultaneously if they are
8564 suitibly aligned. This is very processor-specific. */
8565
8566 enum alphaev4_pipe {
8567 EV4_STOP = 0,
8568 EV4_IB0 = 1,
8569 EV4_IB1 = 2,
8570 EV4_IBX = 4
8571 };
8572
8573 enum alphaev5_pipe {
8574 EV5_STOP = 0,
8575 EV5_NONE = 1,
8576 EV5_E01 = 2,
8577 EV5_E0 = 4,
8578 EV5_E1 = 8,
8579 EV5_FAM = 16,
8580 EV5_FA = 32,
8581 EV5_FM = 64
8582 };
8583
8584 static enum alphaev4_pipe alphaev4_insn_pipe PARAMS ((rtx));
8585 static enum alphaev5_pipe alphaev5_insn_pipe PARAMS ((rtx));
8586 static rtx alphaev4_next_group PARAMS ((rtx, int *, int *));
8587 static rtx alphaev5_next_group PARAMS ((rtx, int *, int *));
8588 static rtx alphaev4_next_nop PARAMS ((int *));
8589 static rtx alphaev5_next_nop PARAMS ((int *));
8590
8591 static void alpha_align_insns
8592 PARAMS ((unsigned int, rtx (*)(rtx, int *, int *), rtx (*)(int *)));
8593
8594 static enum alphaev4_pipe
8595 alphaev4_insn_pipe (insn)
8596 rtx insn;
8597 {
8598 if (recog_memoized (insn) < 0)
8599 return EV4_STOP;
8600 if (get_attr_length (insn) != 4)
8601 return EV4_STOP;
8602
8603 switch (get_attr_type (insn))
8604 {
8605 case TYPE_ILD:
8606 case TYPE_FLD:
8607 return EV4_IBX;
8608
8609 case TYPE_LDSYM:
8610 case TYPE_IADD:
8611 case TYPE_ILOG:
8612 case TYPE_ICMOV:
8613 case TYPE_ICMP:
8614 case TYPE_IST:
8615 case TYPE_FST:
8616 case TYPE_SHIFT:
8617 case TYPE_IMUL:
8618 case TYPE_FBR:
8619 return EV4_IB0;
8620
8621 case TYPE_MISC:
8622 case TYPE_IBR:
8623 case TYPE_JSR:
8624 case TYPE_CALLPAL:
8625 case TYPE_FCPYS:
8626 case TYPE_FCMOV:
8627 case TYPE_FADD:
8628 case TYPE_FDIV:
8629 case TYPE_FMUL:
8630 return EV4_IB1;
8631
8632 default:
8633 abort ();
8634 }
8635 }
8636
8637 static enum alphaev5_pipe
8638 alphaev5_insn_pipe (insn)
8639 rtx insn;
8640 {
8641 if (recog_memoized (insn) < 0)
8642 return EV5_STOP;
8643 if (get_attr_length (insn) != 4)
8644 return EV5_STOP;
8645
8646 switch (get_attr_type (insn))
8647 {
8648 case TYPE_ILD:
8649 case TYPE_FLD:
8650 case TYPE_LDSYM:
8651 case TYPE_IADD:
8652 case TYPE_ILOG:
8653 case TYPE_ICMOV:
8654 case TYPE_ICMP:
8655 return EV5_E01;
8656
8657 case TYPE_IST:
8658 case TYPE_FST:
8659 case TYPE_SHIFT:
8660 case TYPE_IMUL:
8661 case TYPE_MISC:
8662 case TYPE_MVI:
8663 return EV5_E0;
8664
8665 case TYPE_IBR:
8666 case TYPE_JSR:
8667 case TYPE_CALLPAL:
8668 return EV5_E1;
8669
8670 case TYPE_FCPYS:
8671 return EV5_FAM;
8672
8673 case TYPE_FBR:
8674 case TYPE_FCMOV:
8675 case TYPE_FADD:
8676 case TYPE_FDIV:
8677 return EV5_FA;
8678
8679 case TYPE_FMUL:
8680 return EV5_FM;
8681
8682 default:
8683 abort();
8684 }
8685 }
8686
8687 /* IN_USE is a mask of the slots currently filled within the insn group.
8688 The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
8689 the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
8690
8691 LEN is, of course, the length of the group in bytes. */
8692
8693 static rtx
8694 alphaev4_next_group (insn, pin_use, plen)
8695 rtx insn;
8696 int *pin_use, *plen;
8697 {
8698 int len, in_use;
8699
8700 len = in_use = 0;
8701
8702 if (! INSN_P (insn)
8703 || GET_CODE (PATTERN (insn)) == CLOBBER
8704 || GET_CODE (PATTERN (insn)) == USE)
8705 goto next_and_done;
8706
8707 while (1)
8708 {
8709 enum alphaev4_pipe pipe;
8710
8711 pipe = alphaev4_insn_pipe (insn);
8712 switch (pipe)
8713 {
8714 case EV4_STOP:
8715 /* Force complex instructions to start new groups. */
8716 if (in_use)
8717 goto done;
8718
8719 /* If this is a completely unrecognized insn, its an asm.
8720 We don't know how long it is, so record length as -1 to
8721 signal a needed realignment. */
8722 if (recog_memoized (insn) < 0)
8723 len = -1;
8724 else
8725 len = get_attr_length (insn);
8726 goto next_and_done;
8727
8728 case EV4_IBX:
8729 if (in_use & EV4_IB0)
8730 {
8731 if (in_use & EV4_IB1)
8732 goto done;
8733 in_use |= EV4_IB1;
8734 }
8735 else
8736 in_use |= EV4_IB0 | EV4_IBX;
8737 break;
8738
8739 case EV4_IB0:
8740 if (in_use & EV4_IB0)
8741 {
8742 if (!(in_use & EV4_IBX) || (in_use & EV4_IB1))
8743 goto done;
8744 in_use |= EV4_IB1;
8745 }
8746 in_use |= EV4_IB0;
8747 break;
8748
8749 case EV4_IB1:
8750 if (in_use & EV4_IB1)
8751 goto done;
8752 in_use |= EV4_IB1;
8753 break;
8754
8755 default:
8756 abort();
8757 }
8758 len += 4;
8759
8760 /* Haifa doesn't do well scheduling branches. */
8761 if (GET_CODE (insn) == JUMP_INSN)
8762 goto next_and_done;
8763
8764 next:
8765 insn = next_nonnote_insn (insn);
8766
8767 if (!insn || ! INSN_P (insn))
8768 goto done;
8769
8770 /* Let Haifa tell us where it thinks insn group boundaries are. */
8771 if (GET_MODE (insn) == TImode)
8772 goto done;
8773
8774 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
8775 goto next;
8776 }
8777
8778 next_and_done:
8779 insn = next_nonnote_insn (insn);
8780
8781 done:
8782 *plen = len;
8783 *pin_use = in_use;
8784 return insn;
8785 }
8786
8787 /* IN_USE is a mask of the slots currently filled within the insn group.
8788 The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
8789 the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
8790
8791 LEN is, of course, the length of the group in bytes. */
8792
8793 static rtx
8794 alphaev5_next_group (insn, pin_use, plen)
8795 rtx insn;
8796 int *pin_use, *plen;
8797 {
8798 int len, in_use;
8799
8800 len = in_use = 0;
8801
8802 if (! INSN_P (insn)
8803 || GET_CODE (PATTERN (insn)) == CLOBBER
8804 || GET_CODE (PATTERN (insn)) == USE)
8805 goto next_and_done;
8806
8807 while (1)
8808 {
8809 enum alphaev5_pipe pipe;
8810
8811 pipe = alphaev5_insn_pipe (insn);
8812 switch (pipe)
8813 {
8814 case EV5_STOP:
8815 /* Force complex instructions to start new groups. */
8816 if (in_use)
8817 goto done;
8818
8819 /* If this is a completely unrecognized insn, its an asm.
8820 We don't know how long it is, so record length as -1 to
8821 signal a needed realignment. */
8822 if (recog_memoized (insn) < 0)
8823 len = -1;
8824 else
8825 len = get_attr_length (insn);
8826 goto next_and_done;
8827
8828 /* ??? Most of the places below, we would like to abort, as
8829 it would indicate an error either in Haifa, or in the
8830 scheduling description. Unfortunately, Haifa never
8831 schedules the last instruction of the BB, so we don't
8832 have an accurate TI bit to go off. */
8833 case EV5_E01:
8834 if (in_use & EV5_E0)
8835 {
8836 if (in_use & EV5_E1)
8837 goto done;
8838 in_use |= EV5_E1;
8839 }
8840 else
8841 in_use |= EV5_E0 | EV5_E01;
8842 break;
8843
8844 case EV5_E0:
8845 if (in_use & EV5_E0)
8846 {
8847 if (!(in_use & EV5_E01) || (in_use & EV5_E1))
8848 goto done;
8849 in_use |= EV5_E1;
8850 }
8851 in_use |= EV5_E0;
8852 break;
8853
8854 case EV5_E1:
8855 if (in_use & EV5_E1)
8856 goto done;
8857 in_use |= EV5_E1;
8858 break;
8859
8860 case EV5_FAM:
8861 if (in_use & EV5_FA)
8862 {
8863 if (in_use & EV5_FM)
8864 goto done;
8865 in_use |= EV5_FM;
8866 }
8867 else
8868 in_use |= EV5_FA | EV5_FAM;
8869 break;
8870
8871 case EV5_FA:
8872 if (in_use & EV5_FA)
8873 goto done;
8874 in_use |= EV5_FA;
8875 break;
8876
8877 case EV5_FM:
8878 if (in_use & EV5_FM)
8879 goto done;
8880 in_use |= EV5_FM;
8881 break;
8882
8883 case EV5_NONE:
8884 break;
8885
8886 default:
8887 abort();
8888 }
8889 len += 4;
8890
8891 /* Haifa doesn't do well scheduling branches. */
8892 /* ??? If this is predicted not-taken, slotting continues, except
8893 that no more IBR, FBR, or JSR insns may be slotted. */
8894 if (GET_CODE (insn) == JUMP_INSN)
8895 goto next_and_done;
8896
8897 next:
8898 insn = next_nonnote_insn (insn);
8899
8900 if (!insn || ! INSN_P (insn))
8901 goto done;
8902
8903 /* Let Haifa tell us where it thinks insn group boundaries are. */
8904 if (GET_MODE (insn) == TImode)
8905 goto done;
8906
8907 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
8908 goto next;
8909 }
8910
8911 next_and_done:
8912 insn = next_nonnote_insn (insn);
8913
8914 done:
8915 *plen = len;
8916 *pin_use = in_use;
8917 return insn;
8918 }
8919
8920 static rtx
8921 alphaev4_next_nop (pin_use)
8922 int *pin_use;
8923 {
8924 int in_use = *pin_use;
8925 rtx nop;
8926
8927 if (!(in_use & EV4_IB0))
8928 {
8929 in_use |= EV4_IB0;
8930 nop = gen_nop ();
8931 }
8932 else if ((in_use & (EV4_IBX|EV4_IB1)) == EV4_IBX)
8933 {
8934 in_use |= EV4_IB1;
8935 nop = gen_nop ();
8936 }
8937 else if (TARGET_FP && !(in_use & EV4_IB1))
8938 {
8939 in_use |= EV4_IB1;
8940 nop = gen_fnop ();
8941 }
8942 else
8943 nop = gen_unop ();
8944
8945 *pin_use = in_use;
8946 return nop;
8947 }
8948
8949 static rtx
8950 alphaev5_next_nop (pin_use)
8951 int *pin_use;
8952 {
8953 int in_use = *pin_use;
8954 rtx nop;
8955
8956 if (!(in_use & EV5_E1))
8957 {
8958 in_use |= EV5_E1;
8959 nop = gen_nop ();
8960 }
8961 else if (TARGET_FP && !(in_use & EV5_FA))
8962 {
8963 in_use |= EV5_FA;
8964 nop = gen_fnop ();
8965 }
8966 else if (TARGET_FP && !(in_use & EV5_FM))
8967 {
8968 in_use |= EV5_FM;
8969 nop = gen_fnop ();
8970 }
8971 else
8972 nop = gen_unop ();
8973
8974 *pin_use = in_use;
8975 return nop;
8976 }
8977
8978 /* The instruction group alignment main loop. */
8979
8980 static void
8981 alpha_align_insns (max_align, next_group, next_nop)
8982 unsigned int max_align;
8983 rtx (*next_group) PARAMS ((rtx, int *, int *));
8984 rtx (*next_nop) PARAMS ((int *));
8985 {
8986 /* ALIGN is the known alignment for the insn group. */
8987 unsigned int align;
8988 /* OFS is the offset of the current insn in the insn group. */
8989 int ofs;
8990 int prev_in_use, in_use, len;
8991 rtx i, next;
8992
8993 /* Let shorten branches care for assigning alignments to code labels. */
8994 shorten_branches (get_insns ());
8995
8996 if (align_functions < 4)
8997 align = 4;
8998 else if ((unsigned int) align_functions < max_align)
8999 align = align_functions;
9000 else
9001 align = max_align;
9002
9003 ofs = prev_in_use = 0;
9004 i = get_insns ();
9005 if (GET_CODE (i) == NOTE)
9006 i = next_nonnote_insn (i);
9007
9008 while (i)
9009 {
9010 next = (*next_group) (i, &in_use, &len);
9011
9012 /* When we see a label, resync alignment etc. */
9013 if (GET_CODE (i) == CODE_LABEL)
9014 {
9015 unsigned int new_align = 1 << label_to_alignment (i);
9016
9017 if (new_align >= align)
9018 {
9019 align = new_align < max_align ? new_align : max_align;
9020 ofs = 0;
9021 }
9022
9023 else if (ofs & (new_align-1))
9024 ofs = (ofs | (new_align-1)) + 1;
9025 if (len != 0)
9026 abort();
9027 }
9028
9029 /* Handle complex instructions special. */
9030 else if (in_use == 0)
9031 {
9032 /* Asms will have length < 0. This is a signal that we have
9033 lost alignment knowledge. Assume, however, that the asm
9034 will not mis-align instructions. */
9035 if (len < 0)
9036 {
9037 ofs = 0;
9038 align = 4;
9039 len = 0;
9040 }
9041 }
9042
9043 /* If the known alignment is smaller than the recognized insn group,
9044 realign the output. */
9045 else if ((int) align < len)
9046 {
9047 unsigned int new_log_align = len > 8 ? 4 : 3;
9048 rtx prev, where;
9049
9050 where = prev = prev_nonnote_insn (i);
9051 if (!where || GET_CODE (where) != CODE_LABEL)
9052 where = i;
9053
9054 /* Can't realign between a call and its gp reload. */
9055 if (! (TARGET_EXPLICIT_RELOCS
9056 && prev && GET_CODE (prev) == CALL_INSN))
9057 {
9058 emit_insn_before (gen_realign (GEN_INT (new_log_align)), where);
9059 align = 1 << new_log_align;
9060 ofs = 0;
9061 }
9062 }
9063
9064 /* If the group won't fit in the same INT16 as the previous,
9065 we need to add padding to keep the group together. Rather
9066 than simply leaving the insn filling to the assembler, we
9067 can make use of the knowledge of what sorts of instructions
9068 were issued in the previous group to make sure that all of
9069 the added nops are really free. */
9070 else if (ofs + len > (int) align)
9071 {
9072 int nop_count = (align - ofs) / 4;
9073 rtx where;
9074
9075 /* Insert nops before labels, branches, and calls to truely merge
9076 the execution of the nops with the previous instruction group. */
9077 where = prev_nonnote_insn (i);
9078 if (where)
9079 {
9080 if (GET_CODE (where) == CODE_LABEL)
9081 {
9082 rtx where2 = prev_nonnote_insn (where);
9083 if (where2 && GET_CODE (where2) == JUMP_INSN)
9084 where = where2;
9085 }
9086 else if (GET_CODE (where) == INSN)
9087 where = i;
9088 }
9089 else
9090 where = i;
9091
9092 do
9093 emit_insn_before ((*next_nop)(&prev_in_use), where);
9094 while (--nop_count);
9095 ofs = 0;
9096 }
9097
9098 ofs = (ofs + len) & (align - 1);
9099 prev_in_use = in_use;
9100 i = next;
9101 }
9102 }
9103 \f
9104 /* Machine dependent reorg pass. */
9105
9106 static void
9107 alpha_reorg ()
9108 {
9109 if (alpha_tp != ALPHA_TP_PROG || flag_exceptions)
9110 alpha_handle_trap_shadows ();
9111
9112 /* Due to the number of extra trapb insns, don't bother fixing up
9113 alignment when trap precision is instruction. Moreover, we can
9114 only do our job when sched2 is run. */
9115 if (optimize && !optimize_size
9116 && alpha_tp != ALPHA_TP_INSN
9117 && flag_schedule_insns_after_reload)
9118 {
9119 if (alpha_cpu == PROCESSOR_EV4)
9120 alpha_align_insns (8, alphaev4_next_group, alphaev4_next_nop);
9121 else if (alpha_cpu == PROCESSOR_EV5)
9122 alpha_align_insns (16, alphaev5_next_group, alphaev5_next_nop);
9123 }
9124 }
9125 \f
9126 #ifdef OBJECT_FORMAT_ELF
9127
9128 /* Switch to the section to which we should output X. The only thing
9129 special we do here is to honor small data. */
9130
9131 static void
9132 alpha_elf_select_rtx_section (mode, x, align)
9133 enum machine_mode mode;
9134 rtx x;
9135 unsigned HOST_WIDE_INT align;
9136 {
9137 if (TARGET_SMALL_DATA && GET_MODE_SIZE (mode) <= g_switch_value)
9138 /* ??? Consider using mergable sdata sections. */
9139 sdata_section ();
9140 else
9141 default_elf_select_rtx_section (mode, x, align);
9142 }
9143
9144 #endif /* OBJECT_FORMAT_ELF */
9145 \f
9146 /* Structure to collect function names for final output in link section. */
9147 /* Note that items marked with GTY can't be ifdef'ed out. */
9148
9149 enum links_kind {KIND_UNUSED, KIND_LOCAL, KIND_EXTERN};
9150 enum reloc_kind {KIND_LINKAGE, KIND_CODEADDR};
9151
9152 struct alpha_links GTY(())
9153 {
9154 int num;
9155 rtx linkage;
9156 enum links_kind lkind;
9157 enum reloc_kind rkind;
9158 };
9159
9160 struct alpha_funcs GTY(())
9161 {
9162 int num;
9163 splay_tree GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9164 links;
9165 };
9166
9167 static GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9168 splay_tree alpha_links_tree;
9169 static GTY ((param1_is (tree), param2_is (struct alpha_funcs *)))
9170 splay_tree alpha_funcs_tree;
9171
9172 static GTY(()) int alpha_funcs_num;
9173
9174 #if TARGET_ABI_OPEN_VMS
9175
9176 /* Return the VMS argument type corresponding to MODE. */
9177
9178 enum avms_arg_type
9179 alpha_arg_type (mode)
9180 enum machine_mode mode;
9181 {
9182 switch (mode)
9183 {
9184 case SFmode:
9185 return TARGET_FLOAT_VAX ? FF : FS;
9186 case DFmode:
9187 return TARGET_FLOAT_VAX ? FD : FT;
9188 default:
9189 return I64;
9190 }
9191 }
9192
9193 /* Return an rtx for an integer representing the VMS Argument Information
9194 register value. */
9195
9196 rtx
9197 alpha_arg_info_reg_val (cum)
9198 CUMULATIVE_ARGS cum;
9199 {
9200 unsigned HOST_WIDE_INT regval = cum.num_args;
9201 int i;
9202
9203 for (i = 0; i < 6; i++)
9204 regval |= ((int) cum.atypes[i]) << (i * 3 + 8);
9205
9206 return GEN_INT (regval);
9207 }
9208 \f
9209 /* Make (or fake) .linkage entry for function call.
9210
9211 IS_LOCAL is 0 if name is used in call, 1 if name is used in definition.
9212
9213 Return an SYMBOL_REF rtx for the linkage. */
9214
9215 rtx
9216 alpha_need_linkage (name, is_local)
9217 const char *name;
9218 int is_local;
9219 {
9220 splay_tree_node node;
9221 struct alpha_links *al;
9222
9223 if (name[0] == '*')
9224 name++;
9225
9226 if (is_local)
9227 {
9228 struct alpha_funcs *cfaf;
9229
9230 if (!alpha_funcs_tree)
9231 alpha_funcs_tree = splay_tree_new_ggc ((splay_tree_compare_fn)
9232 splay_tree_compare_pointers);
9233
9234 cfaf = (struct alpha_funcs *) ggc_alloc (sizeof (struct alpha_funcs));
9235
9236 cfaf->links = 0;
9237 cfaf->num = ++alpha_funcs_num;
9238
9239 splay_tree_insert (alpha_funcs_tree,
9240 (splay_tree_key) current_function_decl,
9241 (splay_tree_value) cfaf);
9242 }
9243
9244 if (alpha_links_tree)
9245 {
9246 /* Is this name already defined? */
9247
9248 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
9249 if (node)
9250 {
9251 al = (struct alpha_links *) node->value;
9252 if (is_local)
9253 {
9254 /* Defined here but external assumed. */
9255 if (al->lkind == KIND_EXTERN)
9256 al->lkind = KIND_LOCAL;
9257 }
9258 else
9259 {
9260 /* Used here but unused assumed. */
9261 if (al->lkind == KIND_UNUSED)
9262 al->lkind = KIND_LOCAL;
9263 }
9264 return al->linkage;
9265 }
9266 }
9267 else
9268 alpha_links_tree = splay_tree_new_ggc ((splay_tree_compare_fn) strcmp);
9269
9270 al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
9271 name = ggc_strdup (name);
9272
9273 /* Assume external if no definition. */
9274 al->lkind = (is_local ? KIND_UNUSED : KIND_EXTERN);
9275
9276 /* Ensure we have an IDENTIFIER so assemble_name can mark it used. */
9277 get_identifier (name);
9278
9279 /* Construct a SYMBOL_REF for us to call. */
9280 {
9281 size_t name_len = strlen (name);
9282 char *linksym = alloca (name_len + 6);
9283 linksym[0] = '$';
9284 memcpy (linksym + 1, name, name_len);
9285 memcpy (linksym + 1 + name_len, "..lk", 5);
9286 al->linkage = gen_rtx_SYMBOL_REF (Pmode,
9287 ggc_alloc_string (linksym, name_len + 5));
9288 }
9289
9290 splay_tree_insert (alpha_links_tree, (splay_tree_key) name,
9291 (splay_tree_value) al);
9292
9293 return al->linkage;
9294 }
9295
9296 rtx
9297 alpha_use_linkage (linkage, cfundecl, lflag, rflag)
9298 rtx linkage;
9299 tree cfundecl;
9300 int lflag;
9301 int rflag;
9302 {
9303 splay_tree_node cfunnode;
9304 struct alpha_funcs *cfaf;
9305 struct alpha_links *al;
9306 const char *name = XSTR (linkage, 0);
9307
9308 cfaf = (struct alpha_funcs *) 0;
9309 al = (struct alpha_links *) 0;
9310
9311 cfunnode = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) cfundecl);
9312 cfaf = (struct alpha_funcs *) cfunnode->value;
9313
9314 if (cfaf->links)
9315 {
9316 splay_tree_node lnode;
9317
9318 /* Is this name already defined? */
9319
9320 lnode = splay_tree_lookup (cfaf->links, (splay_tree_key) name);
9321 if (lnode)
9322 al = (struct alpha_links *) lnode->value;
9323 }
9324 else
9325 cfaf->links = splay_tree_new_ggc ((splay_tree_compare_fn) strcmp);
9326
9327 if (!al)
9328 {
9329 size_t name_len;
9330 size_t buflen;
9331 char buf [512];
9332 char *linksym;
9333 splay_tree_node node = 0;
9334 struct alpha_links *anl;
9335
9336 if (name[0] == '*')
9337 name++;
9338
9339 name_len = strlen (name);
9340
9341 al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
9342 al->num = cfaf->num;
9343
9344 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
9345 if (node)
9346 {
9347 anl = (struct alpha_links *) node->value;
9348 al->lkind = anl->lkind;
9349 }
9350
9351 sprintf (buf, "$%d..%s..lk", cfaf->num, name);
9352 buflen = strlen (buf);
9353 linksym = alloca (buflen + 1);
9354 memcpy (linksym, buf, buflen + 1);
9355
9356 al->linkage = gen_rtx_SYMBOL_REF
9357 (Pmode, ggc_alloc_string (linksym, buflen + 1));
9358
9359 splay_tree_insert (cfaf->links, (splay_tree_key) name,
9360 (splay_tree_value) al);
9361 }
9362
9363 if (rflag)
9364 al->rkind = KIND_CODEADDR;
9365 else
9366 al->rkind = KIND_LINKAGE;
9367
9368 if (lflag)
9369 return gen_rtx_MEM (Pmode, plus_constant (al->linkage, 8));
9370 else
9371 return al->linkage;
9372 }
9373
9374 static int
9375 alpha_write_one_linkage (node, data)
9376 splay_tree_node node;
9377 void *data;
9378 {
9379 const char *const name = (const char *) node->key;
9380 struct alpha_links *link = (struct alpha_links *) node->value;
9381 FILE *stream = (FILE *) data;
9382
9383 fprintf (stream, "$%d..%s..lk:\n", link->num, name);
9384 if (link->rkind == KIND_CODEADDR)
9385 {
9386 if (link->lkind == KIND_LOCAL)
9387 {
9388 /* Local and used */
9389 fprintf (stream, "\t.quad %s..en\n", name);
9390 }
9391 else
9392 {
9393 /* External and used, request code address. */
9394 fprintf (stream, "\t.code_address %s\n", name);
9395 }
9396 }
9397 else
9398 {
9399 if (link->lkind == KIND_LOCAL)
9400 {
9401 /* Local and used, build linkage pair. */
9402 fprintf (stream, "\t.quad %s..en\n", name);
9403 fprintf (stream, "\t.quad %s\n", name);
9404 }
9405 else
9406 {
9407 /* External and used, request linkage pair. */
9408 fprintf (stream, "\t.linkage %s\n", name);
9409 }
9410 }
9411
9412 return 0;
9413 }
9414
9415 static void
9416 alpha_write_linkage (stream, funname, fundecl)
9417 FILE *stream;
9418 const char *funname;
9419 tree fundecl;
9420 {
9421 splay_tree_node node;
9422 struct alpha_funcs *func;
9423
9424 link_section ();
9425 fprintf (stream, "\t.align 3\n");
9426 node = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) fundecl);
9427 func = (struct alpha_funcs *) node->value;
9428
9429 fputs ("\t.name ", stream);
9430 assemble_name (stream, funname);
9431 fputs ("..na\n", stream);
9432 ASM_OUTPUT_LABEL (stream, funname);
9433 fprintf (stream, "\t.pdesc ");
9434 assemble_name (stream, funname);
9435 fprintf (stream, "..en,%s\n",
9436 alpha_procedure_type == PT_STACK ? "stack"
9437 : alpha_procedure_type == PT_REGISTER ? "reg" : "null");
9438
9439 if (func->links)
9440 {
9441 splay_tree_foreach (func->links, alpha_write_one_linkage, stream);
9442 /* splay_tree_delete (func->links); */
9443 }
9444 }
9445
9446 /* Given a decl, a section name, and whether the decl initializer
9447 has relocs, choose attributes for the section. */
9448
9449 #define SECTION_VMS_OVERLAY SECTION_FORGET
9450 #define SECTION_VMS_GLOBAL SECTION_MACH_DEP
9451 #define SECTION_VMS_INITIALIZE (SECTION_VMS_GLOBAL << 1)
9452
9453 static unsigned int
9454 vms_section_type_flags (decl, name, reloc)
9455 tree decl;
9456 const char *name;
9457 int reloc;
9458 {
9459 unsigned int flags = default_section_type_flags (decl, name, reloc);
9460
9461 if (decl && DECL_ATTRIBUTES (decl)
9462 && lookup_attribute ("overlaid", DECL_ATTRIBUTES (decl)))
9463 flags |= SECTION_VMS_OVERLAY;
9464 if (decl && DECL_ATTRIBUTES (decl)
9465 && lookup_attribute ("global", DECL_ATTRIBUTES (decl)))
9466 flags |= SECTION_VMS_GLOBAL;
9467 if (decl && DECL_ATTRIBUTES (decl)
9468 && lookup_attribute ("initialize", DECL_ATTRIBUTES (decl)))
9469 flags |= SECTION_VMS_INITIALIZE;
9470
9471 return flags;
9472 }
9473
9474 /* Switch to an arbitrary section NAME with attributes as specified
9475 by FLAGS. ALIGN specifies any known alignment requirements for
9476 the section; 0 if the default should be used. */
9477
9478 static void
9479 vms_asm_named_section (name, flags)
9480 const char *name;
9481 unsigned int flags;
9482 {
9483 fputc ('\n', asm_out_file);
9484 fprintf (asm_out_file, ".section\t%s", name);
9485
9486 if (flags & SECTION_VMS_OVERLAY)
9487 fprintf (asm_out_file, ",OVR");
9488 if (flags & SECTION_VMS_GLOBAL)
9489 fprintf (asm_out_file, ",GBL");
9490 if (flags & SECTION_VMS_INITIALIZE)
9491 fprintf (asm_out_file, ",NOMOD");
9492 if (flags & SECTION_DEBUG)
9493 fprintf (asm_out_file, ",NOWRT");
9494
9495 fputc ('\n', asm_out_file);
9496 }
9497
9498 /* Record an element in the table of global constructors. SYMBOL is
9499 a SYMBOL_REF of the function to be called; PRIORITY is a number
9500 between 0 and MAX_INIT_PRIORITY.
9501
9502 Differs from default_ctors_section_asm_out_constructor in that the
9503 width of the .ctors entry is always 64 bits, rather than the 32 bits
9504 used by a normal pointer. */
9505
9506 static void
9507 vms_asm_out_constructor (symbol, priority)
9508 rtx symbol;
9509 int priority ATTRIBUTE_UNUSED;
9510 {
9511 ctors_section ();
9512 assemble_align (BITS_PER_WORD);
9513 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9514 }
9515
9516 static void
9517 vms_asm_out_destructor (symbol, priority)
9518 rtx symbol;
9519 int priority ATTRIBUTE_UNUSED;
9520 {
9521 dtors_section ();
9522 assemble_align (BITS_PER_WORD);
9523 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9524 }
9525 #else
9526
9527 rtx
9528 alpha_need_linkage (name, is_local)
9529 const char *name ATTRIBUTE_UNUSED;
9530 int is_local ATTRIBUTE_UNUSED;
9531 {
9532 return NULL_RTX;
9533 }
9534
9535 rtx
9536 alpha_use_linkage (linkage, cfundecl, lflag, rflag)
9537 rtx linkage ATTRIBUTE_UNUSED;
9538 tree cfundecl ATTRIBUTE_UNUSED;
9539 int lflag ATTRIBUTE_UNUSED;
9540 int rflag ATTRIBUTE_UNUSED;
9541 {
9542 return NULL_RTX;
9543 }
9544
9545 #endif /* TARGET_ABI_OPEN_VMS */
9546 \f
9547 #if TARGET_ABI_UNICOSMK
9548
9549 static void unicosmk_output_module_name PARAMS ((FILE *));
9550 static void unicosmk_output_default_externs PARAMS ((FILE *));
9551 static void unicosmk_output_dex PARAMS ((FILE *));
9552 static void unicosmk_output_externs PARAMS ((FILE *));
9553 static void unicosmk_output_addr_vec PARAMS ((FILE *, rtx));
9554 static const char *unicosmk_ssib_name PARAMS ((void));
9555 static int unicosmk_special_name PARAMS ((const char *));
9556
9557 /* Define the offset between two registers, one to be eliminated, and the
9558 other its replacement, at the start of a routine. */
9559
9560 int
9561 unicosmk_initial_elimination_offset (from, to)
9562 int from;
9563 int to;
9564 {
9565 int fixed_size;
9566
9567 fixed_size = alpha_sa_size();
9568 if (fixed_size != 0)
9569 fixed_size += 48;
9570
9571 if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
9572 return -fixed_size;
9573 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
9574 return 0;
9575 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
9576 return (ALPHA_ROUND (current_function_outgoing_args_size)
9577 + ALPHA_ROUND (get_frame_size()));
9578 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
9579 return (ALPHA_ROUND (fixed_size)
9580 + ALPHA_ROUND (get_frame_size()
9581 + current_function_outgoing_args_size));
9582 else
9583 abort ();
9584 }
9585
9586 /* Output the module name for .ident and .end directives. We have to strip
9587 directories and add make sure that the module name starts with a letter
9588 or '$'. */
9589
9590 static void
9591 unicosmk_output_module_name (file)
9592 FILE *file;
9593 {
9594 const char *name;
9595
9596 /* Strip directories. */
9597
9598 name = strrchr (main_input_filename, '/');
9599 if (name)
9600 ++name;
9601 else
9602 name = main_input_filename;
9603
9604 /* CAM only accepts module names that start with a letter or '$'. We
9605 prefix the module name with a '$' if necessary. */
9606
9607 if (!ISALPHA (*name))
9608 putc ('$', file);
9609 output_clean_symbol_name (file, name);
9610 }
9611
9612 /* Output text that to appear at the beginning of an assembler file. */
9613
9614 void
9615 unicosmk_asm_file_start (file)
9616 FILE *file;
9617 {
9618 int i;
9619
9620 fputs ("\t.ident\t", file);
9621 unicosmk_output_module_name (file);
9622 fputs ("\n\n", file);
9623
9624 /* The Unicos/Mk assembler uses different register names. Instead of trying
9625 to support them, we simply use micro definitions. */
9626
9627 /* CAM has different register names: rN for the integer register N and fN
9628 for the floating-point register N. Instead of trying to use these in
9629 alpha.md, we define the symbols $N and $fN to refer to the appropriate
9630 register. */
9631
9632 for (i = 0; i < 32; ++i)
9633 fprintf (file, "$%d <- r%d\n", i, i);
9634
9635 for (i = 0; i < 32; ++i)
9636 fprintf (file, "$f%d <- f%d\n", i, i);
9637
9638 putc ('\n', file);
9639
9640 /* The .align directive fill unused space with zeroes which does not work
9641 in code sections. We define the macro 'gcc@code@align' which uses nops
9642 instead. Note that it assumes that code sections always have the
9643 biggest possible alignment since . refers to the current offset from
9644 the beginning of the section. */
9645
9646 fputs ("\t.macro gcc@code@align n\n", file);
9647 fputs ("gcc@n@bytes = 1 << n\n", file);
9648 fputs ("gcc@here = . % gcc@n@bytes\n", file);
9649 fputs ("\t.if ne, gcc@here, 0\n", file);
9650 fputs ("\t.repeat (gcc@n@bytes - gcc@here) / 4\n", file);
9651 fputs ("\tbis r31,r31,r31\n", file);
9652 fputs ("\t.endr\n", file);
9653 fputs ("\t.endif\n", file);
9654 fputs ("\t.endm gcc@code@align\n\n", file);
9655
9656 /* Output extern declarations which should always be visible. */
9657 unicosmk_output_default_externs (file);
9658
9659 /* Open a dummy section. We always need to be inside a section for the
9660 section-switching code to work correctly.
9661 ??? This should be a module id or something like that. I still have to
9662 figure out what the rules for those are. */
9663 fputs ("\n\t.psect\t$SG00000,data\n", file);
9664 }
9665
9666 /* Output text to appear at the end of an assembler file. This includes all
9667 pending extern declarations and DEX expressions. */
9668
9669 static void
9670 unicosmk_file_end ()
9671 {
9672 fputs ("\t.endp\n\n", asm_out_file);
9673
9674 /* Output all pending externs. */
9675
9676 unicosmk_output_externs (asm_out_file);
9677
9678 /* Output dex definitions used for functions whose names conflict with
9679 register names. */
9680
9681 unicosmk_output_dex (asm_out_file);
9682
9683 fputs ("\t.end\t", asm_out_file);
9684 unicosmk_output_module_name (asm_out_file);
9685 putc ('\n', asm_out_file);
9686 }
9687
9688 /* Output the definition of a common variable. */
9689
9690 void
9691 unicosmk_output_common (file, name, size, align)
9692 FILE *file;
9693 const char *name;
9694 int size;
9695 int align;
9696 {
9697 tree name_tree;
9698 printf ("T3E__: common %s\n", name);
9699
9700 common_section ();
9701 fputs("\t.endp\n\n\t.psect ", file);
9702 assemble_name(file, name);
9703 fprintf(file, ",%d,common\n", floor_log2 (align / BITS_PER_UNIT));
9704 fprintf(file, "\t.byte\t0:%d\n", size);
9705
9706 /* Mark the symbol as defined in this module. */
9707 name_tree = get_identifier (name);
9708 TREE_ASM_WRITTEN (name_tree) = 1;
9709 }
9710
9711 #define SECTION_PUBLIC SECTION_MACH_DEP
9712 #define SECTION_MAIN (SECTION_PUBLIC << 1)
9713 static int current_section_align;
9714
9715 static unsigned int
9716 unicosmk_section_type_flags (decl, name, reloc)
9717 tree decl;
9718 const char *name;
9719 int reloc ATTRIBUTE_UNUSED;
9720 {
9721 unsigned int flags = default_section_type_flags (decl, name, reloc);
9722
9723 if (!decl)
9724 return flags;
9725
9726 if (TREE_CODE (decl) == FUNCTION_DECL)
9727 {
9728 current_section_align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
9729 if (align_functions_log > current_section_align)
9730 current_section_align = align_functions_log;
9731
9732 if (! strcmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)), "main"))
9733 flags |= SECTION_MAIN;
9734 }
9735 else
9736 current_section_align = floor_log2 (DECL_ALIGN (decl) / BITS_PER_UNIT);
9737
9738 if (TREE_PUBLIC (decl))
9739 flags |= SECTION_PUBLIC;
9740
9741 return flags;
9742 }
9743
9744 /* Generate a section name for decl and associate it with the
9745 declaration. */
9746
9747 static void
9748 unicosmk_unique_section (decl, reloc)
9749 tree decl;
9750 int reloc ATTRIBUTE_UNUSED;
9751 {
9752 const char *name;
9753 int len;
9754
9755 if (!decl)
9756 abort ();
9757
9758 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
9759 name = default_strip_name_encoding (name);
9760 len = strlen (name);
9761
9762 if (TREE_CODE (decl) == FUNCTION_DECL)
9763 {
9764 char *string;
9765
9766 /* It is essential that we prefix the section name here because
9767 otherwise the section names generated for constructors and
9768 destructors confuse collect2. */
9769
9770 string = alloca (len + 6);
9771 sprintf (string, "code@%s", name);
9772 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
9773 }
9774 else if (TREE_PUBLIC (decl))
9775 DECL_SECTION_NAME (decl) = build_string (len, name);
9776 else
9777 {
9778 char *string;
9779
9780 string = alloca (len + 6);
9781 sprintf (string, "data@%s", name);
9782 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
9783 }
9784 }
9785
9786 /* Switch to an arbitrary section NAME with attributes as specified
9787 by FLAGS. ALIGN specifies any known alignment requirements for
9788 the section; 0 if the default should be used. */
9789
9790 static void
9791 unicosmk_asm_named_section (name, flags)
9792 const char *name;
9793 unsigned int flags;
9794 {
9795 const char *kind;
9796
9797 /* Close the previous section. */
9798
9799 fputs ("\t.endp\n\n", asm_out_file);
9800
9801 /* Find out what kind of section we are opening. */
9802
9803 if (flags & SECTION_MAIN)
9804 fputs ("\t.start\tmain\n", asm_out_file);
9805
9806 if (flags & SECTION_CODE)
9807 kind = "code";
9808 else if (flags & SECTION_PUBLIC)
9809 kind = "common";
9810 else
9811 kind = "data";
9812
9813 if (current_section_align != 0)
9814 fprintf (asm_out_file, "\t.psect\t%s,%d,%s\n", name,
9815 current_section_align, kind);
9816 else
9817 fprintf (asm_out_file, "\t.psect\t%s,%s\n", name, kind);
9818 }
9819
9820 static void
9821 unicosmk_insert_attributes (decl, attr_ptr)
9822 tree decl;
9823 tree *attr_ptr ATTRIBUTE_UNUSED;
9824 {
9825 if (DECL_P (decl)
9826 && (TREE_PUBLIC (decl) || TREE_CODE (decl) == FUNCTION_DECL))
9827 unicosmk_unique_section (decl, 0);
9828 }
9829
9830 /* Output an alignment directive. We have to use the macro 'gcc@code@align'
9831 in code sections because .align fill unused space with zeroes. */
9832
9833 void
9834 unicosmk_output_align (file, align)
9835 FILE *file;
9836 int align;
9837 {
9838 if (inside_function)
9839 fprintf (file, "\tgcc@code@align\t%d\n", align);
9840 else
9841 fprintf (file, "\t.align\t%d\n", align);
9842 }
9843
9844 /* Add a case vector to the current function's list of deferred case
9845 vectors. Case vectors have to be put into a separate section because CAM
9846 does not allow data definitions in code sections. */
9847
9848 void
9849 unicosmk_defer_case_vector (lab, vec)
9850 rtx lab;
9851 rtx vec;
9852 {
9853 struct machine_function *machine = cfun->machine;
9854
9855 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
9856 machine->addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec,
9857 machine->addr_list);
9858 }
9859
9860 /* Output a case vector. */
9861
9862 static void
9863 unicosmk_output_addr_vec (file, vec)
9864 FILE *file;
9865 rtx vec;
9866 {
9867 rtx lab = XEXP (vec, 0);
9868 rtx body = XEXP (vec, 1);
9869 int vlen = XVECLEN (body, 0);
9870 int idx;
9871
9872 (*targetm.asm_out.internal_label) (file, "L", CODE_LABEL_NUMBER (lab));
9873
9874 for (idx = 0; idx < vlen; idx++)
9875 {
9876 ASM_OUTPUT_ADDR_VEC_ELT
9877 (file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
9878 }
9879 }
9880
9881 /* Output current function's deferred case vectors. */
9882
9883 static void
9884 unicosmk_output_deferred_case_vectors (file)
9885 FILE *file;
9886 {
9887 struct machine_function *machine = cfun->machine;
9888 rtx t;
9889
9890 if (machine->addr_list == NULL_RTX)
9891 return;
9892
9893 data_section ();
9894 for (t = machine->addr_list; t; t = XEXP (t, 1))
9895 unicosmk_output_addr_vec (file, XEXP (t, 0));
9896 }
9897
9898 /* Set up the dynamic subprogram information block (DSIB) and update the
9899 frame pointer register ($15) for subroutines which have a frame. If the
9900 subroutine doesn't have a frame, simply increment $15. */
9901
9902 static void
9903 unicosmk_gen_dsib (imaskP)
9904 unsigned long * imaskP;
9905 {
9906 if (alpha_procedure_type == PT_STACK)
9907 {
9908 const char *ssib_name;
9909 rtx mem;
9910
9911 /* Allocate 64 bytes for the DSIB. */
9912
9913 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
9914 GEN_INT (-64))));
9915 emit_insn (gen_blockage ());
9916
9917 /* Save the return address. */
9918
9919 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 56));
9920 set_mem_alias_set (mem, alpha_sr_alias_set);
9921 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, REG_RA)));
9922 (*imaskP) &= ~(1UL << REG_RA);
9923
9924 /* Save the old frame pointer. */
9925
9926 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 48));
9927 set_mem_alias_set (mem, alpha_sr_alias_set);
9928 FRP (emit_move_insn (mem, hard_frame_pointer_rtx));
9929 (*imaskP) &= ~(1UL << HARD_FRAME_POINTER_REGNUM);
9930
9931 emit_insn (gen_blockage ());
9932
9933 /* Store the SSIB pointer. */
9934
9935 ssib_name = ggc_strdup (unicosmk_ssib_name ());
9936 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 32));
9937 set_mem_alias_set (mem, alpha_sr_alias_set);
9938
9939 FRP (emit_move_insn (gen_rtx_REG (DImode, 5),
9940 gen_rtx_SYMBOL_REF (Pmode, ssib_name)));
9941 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 5)));
9942
9943 /* Save the CIW index. */
9944
9945 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 24));
9946 set_mem_alias_set (mem, alpha_sr_alias_set);
9947 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 25)));
9948
9949 emit_insn (gen_blockage ());
9950
9951 /* Set the new frame pointer. */
9952
9953 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
9954 stack_pointer_rtx, GEN_INT (64))));
9955
9956 }
9957 else
9958 {
9959 /* Increment the frame pointer register to indicate that we do not
9960 have a frame. */
9961
9962 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
9963 hard_frame_pointer_rtx, GEN_INT (1))));
9964 }
9965 }
9966
9967 #define SSIB_PREFIX "__SSIB_"
9968 #define SSIB_PREFIX_LEN 7
9969
9970 /* Generate the name of the SSIB section for the current function. */
9971
9972 static const char *
9973 unicosmk_ssib_name ()
9974 {
9975 /* This is ok since CAM won't be able to deal with names longer than that
9976 anyway. */
9977
9978 static char name[256];
9979
9980 rtx x;
9981 const char *fnname;
9982 int len;
9983
9984 x = DECL_RTL (cfun->decl);
9985 if (GET_CODE (x) != MEM)
9986 abort ();
9987 x = XEXP (x, 0);
9988 if (GET_CODE (x) != SYMBOL_REF)
9989 abort ();
9990 fnname = XSTR (x, 0);
9991
9992 len = strlen (fnname);
9993 if (len + SSIB_PREFIX_LEN > 255)
9994 len = 255 - SSIB_PREFIX_LEN;
9995
9996 strcpy (name, SSIB_PREFIX);
9997 strncpy (name + SSIB_PREFIX_LEN, fnname, len);
9998 name[len + SSIB_PREFIX_LEN] = 0;
9999
10000 return name;
10001 }
10002
10003 /* Output the static subroutine information block for the current
10004 function. */
10005
10006 static void
10007 unicosmk_output_ssib (file, fnname)
10008 FILE *file;
10009 const char *fnname;
10010 {
10011 int len;
10012 int i;
10013 rtx x;
10014 rtx ciw;
10015 struct machine_function *machine = cfun->machine;
10016
10017 ssib_section ();
10018 fprintf (file, "\t.endp\n\n\t.psect\t%s%s,data\n", user_label_prefix,
10019 unicosmk_ssib_name ());
10020
10021 /* Some required stuff and the function name length. */
10022
10023 len = strlen (fnname);
10024 fprintf (file, "\t.quad\t^X20008%2.2X28\n", len);
10025
10026 /* Saved registers
10027 ??? We don't do that yet. */
10028
10029 fputs ("\t.quad\t0\n", file);
10030
10031 /* Function address. */
10032
10033 fputs ("\t.quad\t", file);
10034 assemble_name (file, fnname);
10035 putc ('\n', file);
10036
10037 fputs ("\t.quad\t0\n", file);
10038 fputs ("\t.quad\t0\n", file);
10039
10040 /* Function name.
10041 ??? We do it the same way Cray CC does it but this could be
10042 simplified. */
10043
10044 for( i = 0; i < len; i++ )
10045 fprintf (file, "\t.byte\t%d\n", (int)(fnname[i]));
10046 if( (len % 8) == 0 )
10047 fputs ("\t.quad\t0\n", file);
10048 else
10049 fprintf (file, "\t.bits\t%d : 0\n", (8 - (len % 8))*8);
10050
10051 /* All call information words used in the function. */
10052
10053 for (x = machine->first_ciw; x; x = XEXP (x, 1))
10054 {
10055 ciw = XEXP (x, 0);
10056 #if HOST_BITS_PER_WIDE_INT == 32
10057 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_DOUBLE_HEX "\n",
10058 CONST_DOUBLE_HIGH (ciw), CONST_DOUBLE_LOW (ciw));
10059 #else
10060 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n", INTVAL (ciw));
10061 #endif
10062 }
10063 }
10064
10065 /* Add a call information word (CIW) to the list of the current function's
10066 CIWs and return its index.
10067
10068 X is a CONST_INT or CONST_DOUBLE representing the CIW. */
10069
10070 rtx
10071 unicosmk_add_call_info_word (x)
10072 rtx x;
10073 {
10074 rtx node;
10075 struct machine_function *machine = cfun->machine;
10076
10077 node = gen_rtx_EXPR_LIST (VOIDmode, x, NULL_RTX);
10078 if (machine->first_ciw == NULL_RTX)
10079 machine->first_ciw = node;
10080 else
10081 XEXP (machine->last_ciw, 1) = node;
10082
10083 machine->last_ciw = node;
10084 ++machine->ciw_count;
10085
10086 return GEN_INT (machine->ciw_count
10087 + strlen (current_function_name)/8 + 5);
10088 }
10089
10090 static char unicosmk_section_buf[100];
10091
10092 char *
10093 unicosmk_text_section ()
10094 {
10095 static int count = 0;
10096 sprintf (unicosmk_section_buf, "\t.endp\n\n\t.psect\tgcc@text___%d,code",
10097 count++);
10098 return unicosmk_section_buf;
10099 }
10100
10101 char *
10102 unicosmk_data_section ()
10103 {
10104 static int count = 1;
10105 sprintf (unicosmk_section_buf, "\t.endp\n\n\t.psect\tgcc@data___%d,data",
10106 count++);
10107 return unicosmk_section_buf;
10108 }
10109
10110 /* The Cray assembler doesn't accept extern declarations for symbols which
10111 are defined in the same file. We have to keep track of all global
10112 symbols which are referenced and/or defined in a source file and output
10113 extern declarations for those which are referenced but not defined at
10114 the end of file. */
10115
10116 /* List of identifiers for which an extern declaration might have to be
10117 emitted. */
10118 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10119
10120 struct unicosmk_extern_list
10121 {
10122 struct unicosmk_extern_list *next;
10123 const char *name;
10124 };
10125
10126 static struct unicosmk_extern_list *unicosmk_extern_head = 0;
10127
10128 /* Output extern declarations which are required for every asm file. */
10129
10130 static void
10131 unicosmk_output_default_externs (file)
10132 FILE *file;
10133 {
10134 static const char *const externs[] =
10135 { "__T3E_MISMATCH" };
10136
10137 int i;
10138 int n;
10139
10140 n = ARRAY_SIZE (externs);
10141
10142 for (i = 0; i < n; i++)
10143 fprintf (file, "\t.extern\t%s\n", externs[i]);
10144 }
10145
10146 /* Output extern declarations for global symbols which are have been
10147 referenced but not defined. */
10148
10149 static void
10150 unicosmk_output_externs (file)
10151 FILE *file;
10152 {
10153 struct unicosmk_extern_list *p;
10154 const char *real_name;
10155 int len;
10156 tree name_tree;
10157
10158 len = strlen (user_label_prefix);
10159 for (p = unicosmk_extern_head; p != 0; p = p->next)
10160 {
10161 /* We have to strip the encoding and possibly remove user_label_prefix
10162 from the identifier in order to handle -fleading-underscore and
10163 explicit asm names correctly (cf. gcc.dg/asm-names-1.c). */
10164 real_name = default_strip_name_encoding (p->name);
10165 if (len && p->name[0] == '*'
10166 && !memcmp (real_name, user_label_prefix, len))
10167 real_name += len;
10168
10169 name_tree = get_identifier (real_name);
10170 if (! TREE_ASM_WRITTEN (name_tree))
10171 {
10172 TREE_ASM_WRITTEN (name_tree) = 1;
10173 fputs ("\t.extern\t", file);
10174 assemble_name (file, p->name);
10175 putc ('\n', file);
10176 }
10177 }
10178 }
10179
10180 /* Record an extern. */
10181
10182 void
10183 unicosmk_add_extern (name)
10184 const char *name;
10185 {
10186 struct unicosmk_extern_list *p;
10187
10188 p = (struct unicosmk_extern_list *)
10189 xmalloc (sizeof (struct unicosmk_extern_list));
10190 p->next = unicosmk_extern_head;
10191 p->name = name;
10192 unicosmk_extern_head = p;
10193 }
10194
10195 /* The Cray assembler generates incorrect code if identifiers which
10196 conflict with register names are used as instruction operands. We have
10197 to replace such identifiers with DEX expressions. */
10198
10199 /* Structure to collect identifiers which have been replaced by DEX
10200 expressions. */
10201 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10202
10203 struct unicosmk_dex {
10204 struct unicosmk_dex *next;
10205 const char *name;
10206 };
10207
10208 /* List of identifiers which have been replaced by DEX expressions. The DEX
10209 number is determined by the position in the list. */
10210
10211 static struct unicosmk_dex *unicosmk_dex_list = NULL;
10212
10213 /* The number of elements in the DEX list. */
10214
10215 static int unicosmk_dex_count = 0;
10216
10217 /* Check if NAME must be replaced by a DEX expression. */
10218
10219 static int
10220 unicosmk_special_name (name)
10221 const char *name;
10222 {
10223 if (name[0] == '*')
10224 ++name;
10225
10226 if (name[0] == '$')
10227 ++name;
10228
10229 if (name[0] != 'r' && name[0] != 'f' && name[0] != 'R' && name[0] != 'F')
10230 return 0;
10231
10232 switch (name[1])
10233 {
10234 case '1': case '2':
10235 return (name[2] == '\0' || (ISDIGIT (name[2]) && name[3] == '\0'));
10236
10237 case '3':
10238 return (name[2] == '\0'
10239 || ((name[2] == '0' || name[2] == '1') && name[3] == '\0'));
10240
10241 default:
10242 return (ISDIGIT (name[1]) && name[2] == '\0');
10243 }
10244 }
10245
10246 /* Return the DEX number if X must be replaced by a DEX expression and 0
10247 otherwise. */
10248
10249 static int
10250 unicosmk_need_dex (x)
10251 rtx x;
10252 {
10253 struct unicosmk_dex *dex;
10254 const char *name;
10255 int i;
10256
10257 if (GET_CODE (x) != SYMBOL_REF)
10258 return 0;
10259
10260 name = XSTR (x,0);
10261 if (! unicosmk_special_name (name))
10262 return 0;
10263
10264 i = unicosmk_dex_count;
10265 for (dex = unicosmk_dex_list; dex; dex = dex->next)
10266 {
10267 if (! strcmp (name, dex->name))
10268 return i;
10269 --i;
10270 }
10271
10272 dex = (struct unicosmk_dex *) xmalloc (sizeof (struct unicosmk_dex));
10273 dex->name = name;
10274 dex->next = unicosmk_dex_list;
10275 unicosmk_dex_list = dex;
10276
10277 ++unicosmk_dex_count;
10278 return unicosmk_dex_count;
10279 }
10280
10281 /* Output the DEX definitions for this file. */
10282
10283 static void
10284 unicosmk_output_dex (file)
10285 FILE *file;
10286 {
10287 struct unicosmk_dex *dex;
10288 int i;
10289
10290 if (unicosmk_dex_list == NULL)
10291 return;
10292
10293 fprintf (file, "\t.dexstart\n");
10294
10295 i = unicosmk_dex_count;
10296 for (dex = unicosmk_dex_list; dex; dex = dex->next)
10297 {
10298 fprintf (file, "\tDEX (%d) = ", i);
10299 assemble_name (file, dex->name);
10300 putc ('\n', file);
10301 --i;
10302 }
10303
10304 fprintf (file, "\t.dexend\n");
10305 }
10306
10307 #else
10308
10309 static void
10310 unicosmk_output_deferred_case_vectors (file)
10311 FILE *file ATTRIBUTE_UNUSED;
10312 {}
10313
10314 static void
10315 unicosmk_gen_dsib (imaskP)
10316 unsigned long * imaskP ATTRIBUTE_UNUSED;
10317 {}
10318
10319 static void
10320 unicosmk_output_ssib (file, fnname)
10321 FILE * file ATTRIBUTE_UNUSED;
10322 const char * fnname ATTRIBUTE_UNUSED;
10323 {}
10324
10325 rtx
10326 unicosmk_add_call_info_word (x)
10327 rtx x ATTRIBUTE_UNUSED;
10328 {
10329 return NULL_RTX;
10330 }
10331
10332 static int
10333 unicosmk_need_dex (x)
10334 rtx x ATTRIBUTE_UNUSED;
10335 {
10336 return 0;
10337 }
10338
10339 #endif /* TARGET_ABI_UNICOSMK */
10340
10341 #include "gt-alpha.h"
10342