510c1a8156893108e5b9bf466b0567feea52870b
[gcc.git] / gcc / config / alpha / alpha.c
1 /* Subroutines used for code generation on the DEC Alpha.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
4 Free Software Foundation, Inc.
5 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3, or (at your option)
12 any later version.
13
14 GCC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "output.h"
35 #include "insn-attr.h"
36 #include "flags.h"
37 #include "recog.h"
38 #include "expr.h"
39 #include "optabs.h"
40 #include "reload.h"
41 #include "obstack.h"
42 #include "except.h"
43 #include "function.h"
44 #include "diagnostic-core.h"
45 #include "ggc.h"
46 #include "integrate.h"
47 #include "tm_p.h"
48 #include "target.h"
49 #include "target-def.h"
50 #include "common/common-target.h"
51 #include "debug.h"
52 #include "langhooks.h"
53 #include "splay-tree.h"
54 #include "cfglayout.h"
55 #include "gimple.h"
56 #include "tree-flow.h"
57 #include "tree-stdarg.h"
58 #include "tm-constrs.h"
59 #include "df.h"
60 #include "libfuncs.h"
61 #include "opts.h"
62
63 /* Specify which cpu to schedule for. */
64 enum processor_type alpha_tune;
65
66 /* Which cpu we're generating code for. */
67 enum processor_type alpha_cpu;
68
69 static const char * const alpha_cpu_name[] =
70 {
71 "ev4", "ev5", "ev6"
72 };
73
74 /* Specify how accurate floating-point traps need to be. */
75
76 enum alpha_trap_precision alpha_tp;
77
78 /* Specify the floating-point rounding mode. */
79
80 enum alpha_fp_rounding_mode alpha_fprm;
81
82 /* Specify which things cause traps. */
83
84 enum alpha_fp_trap_mode alpha_fptm;
85
86 /* Nonzero if inside of a function, because the Alpha asm can't
87 handle .files inside of functions. */
88
89 static int inside_function = FALSE;
90
91 /* The number of cycles of latency we should assume on memory reads. */
92
93 int alpha_memory_latency = 3;
94
95 /* Whether the function needs the GP. */
96
97 static int alpha_function_needs_gp;
98
99 /* The assembler name of the current function. */
100
101 static const char *alpha_fnname;
102
103 /* The next explicit relocation sequence number. */
104 extern GTY(()) int alpha_next_sequence_number;
105 int alpha_next_sequence_number = 1;
106
107 /* The literal and gpdisp sequence numbers for this insn, as printed
108 by %# and %* respectively. */
109 extern GTY(()) int alpha_this_literal_sequence_number;
110 extern GTY(()) int alpha_this_gpdisp_sequence_number;
111 int alpha_this_literal_sequence_number;
112 int alpha_this_gpdisp_sequence_number;
113
114 /* Costs of various operations on the different architectures. */
115
116 struct alpha_rtx_cost_data
117 {
118 unsigned char fp_add;
119 unsigned char fp_mult;
120 unsigned char fp_div_sf;
121 unsigned char fp_div_df;
122 unsigned char int_mult_si;
123 unsigned char int_mult_di;
124 unsigned char int_shift;
125 unsigned char int_cmov;
126 unsigned short int_div;
127 };
128
129 static struct alpha_rtx_cost_data const alpha_rtx_cost_data[PROCESSOR_MAX] =
130 {
131 { /* EV4 */
132 COSTS_N_INSNS (6), /* fp_add */
133 COSTS_N_INSNS (6), /* fp_mult */
134 COSTS_N_INSNS (34), /* fp_div_sf */
135 COSTS_N_INSNS (63), /* fp_div_df */
136 COSTS_N_INSNS (23), /* int_mult_si */
137 COSTS_N_INSNS (23), /* int_mult_di */
138 COSTS_N_INSNS (2), /* int_shift */
139 COSTS_N_INSNS (2), /* int_cmov */
140 COSTS_N_INSNS (97), /* int_div */
141 },
142 { /* EV5 */
143 COSTS_N_INSNS (4), /* fp_add */
144 COSTS_N_INSNS (4), /* fp_mult */
145 COSTS_N_INSNS (15), /* fp_div_sf */
146 COSTS_N_INSNS (22), /* fp_div_df */
147 COSTS_N_INSNS (8), /* int_mult_si */
148 COSTS_N_INSNS (12), /* int_mult_di */
149 COSTS_N_INSNS (1) + 1, /* int_shift */
150 COSTS_N_INSNS (1), /* int_cmov */
151 COSTS_N_INSNS (83), /* int_div */
152 },
153 { /* EV6 */
154 COSTS_N_INSNS (4), /* fp_add */
155 COSTS_N_INSNS (4), /* fp_mult */
156 COSTS_N_INSNS (12), /* fp_div_sf */
157 COSTS_N_INSNS (15), /* fp_div_df */
158 COSTS_N_INSNS (7), /* int_mult_si */
159 COSTS_N_INSNS (7), /* int_mult_di */
160 COSTS_N_INSNS (1), /* int_shift */
161 COSTS_N_INSNS (2), /* int_cmov */
162 COSTS_N_INSNS (86), /* int_div */
163 },
164 };
165
166 /* Similar but tuned for code size instead of execution latency. The
167 extra +N is fractional cost tuning based on latency. It's used to
168 encourage use of cheaper insns like shift, but only if there's just
169 one of them. */
170
171 static struct alpha_rtx_cost_data const alpha_rtx_cost_size =
172 {
173 COSTS_N_INSNS (1), /* fp_add */
174 COSTS_N_INSNS (1), /* fp_mult */
175 COSTS_N_INSNS (1), /* fp_div_sf */
176 COSTS_N_INSNS (1) + 1, /* fp_div_df */
177 COSTS_N_INSNS (1) + 1, /* int_mult_si */
178 COSTS_N_INSNS (1) + 2, /* int_mult_di */
179 COSTS_N_INSNS (1), /* int_shift */
180 COSTS_N_INSNS (1), /* int_cmov */
181 COSTS_N_INSNS (6), /* int_div */
182 };
183
184 /* Get the number of args of a function in one of two ways. */
185 #if TARGET_ABI_OPEN_VMS
186 #define NUM_ARGS crtl->args.info.num_args
187 #else
188 #define NUM_ARGS crtl->args.info
189 #endif
190
191 #define REG_PV 27
192 #define REG_RA 26
193
194 /* Declarations of static functions. */
195 static struct machine_function *alpha_init_machine_status (void);
196 static rtx alpha_emit_xfloating_compare (enum rtx_code *, rtx, rtx);
197
198 #if TARGET_ABI_OPEN_VMS
199 static void alpha_write_linkage (FILE *, const char *, tree);
200 static bool vms_valid_pointer_mode (enum machine_mode);
201 #endif
202 \f
203 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
204 /* Implement TARGET_MANGLE_TYPE. */
205
206 static const char *
207 alpha_mangle_type (const_tree type)
208 {
209 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
210 && TARGET_LONG_DOUBLE_128)
211 return "g";
212
213 /* For all other types, use normal C++ mangling. */
214 return NULL;
215 }
216 #endif
217
218 /* Parse target option strings. */
219
220 static void
221 alpha_option_override (void)
222 {
223 static const struct cpu_table {
224 const char *const name;
225 const enum processor_type processor;
226 const int flags;
227 } cpu_table[] = {
228 { "ev4", PROCESSOR_EV4, 0 },
229 { "ev45", PROCESSOR_EV4, 0 },
230 { "21064", PROCESSOR_EV4, 0 },
231 { "ev5", PROCESSOR_EV5, 0 },
232 { "21164", PROCESSOR_EV5, 0 },
233 { "ev56", PROCESSOR_EV5, MASK_BWX },
234 { "21164a", PROCESSOR_EV5, MASK_BWX },
235 { "pca56", PROCESSOR_EV5, MASK_BWX|MASK_MAX },
236 { "21164PC",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
237 { "21164pc",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
238 { "ev6", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
239 { "21264", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
240 { "ev67", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
241 { "21264a", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX }
242 };
243
244 int const ct_size = ARRAY_SIZE (cpu_table);
245 int i;
246
247 #ifdef SUBTARGET_OVERRIDE_OPTIONS
248 SUBTARGET_OVERRIDE_OPTIONS;
249 #endif
250
251 alpha_fprm = ALPHA_FPRM_NORM;
252 alpha_tp = ALPHA_TP_PROG;
253 alpha_fptm = ALPHA_FPTM_N;
254
255 if (TARGET_IEEE)
256 {
257 alpha_tp = ALPHA_TP_INSN;
258 alpha_fptm = ALPHA_FPTM_SU;
259 }
260 if (TARGET_IEEE_WITH_INEXACT)
261 {
262 alpha_tp = ALPHA_TP_INSN;
263 alpha_fptm = ALPHA_FPTM_SUI;
264 }
265
266 if (alpha_tp_string)
267 {
268 if (! strcmp (alpha_tp_string, "p"))
269 alpha_tp = ALPHA_TP_PROG;
270 else if (! strcmp (alpha_tp_string, "f"))
271 alpha_tp = ALPHA_TP_FUNC;
272 else if (! strcmp (alpha_tp_string, "i"))
273 alpha_tp = ALPHA_TP_INSN;
274 else
275 error ("bad value %qs for -mtrap-precision switch", alpha_tp_string);
276 }
277
278 if (alpha_fprm_string)
279 {
280 if (! strcmp (alpha_fprm_string, "n"))
281 alpha_fprm = ALPHA_FPRM_NORM;
282 else if (! strcmp (alpha_fprm_string, "m"))
283 alpha_fprm = ALPHA_FPRM_MINF;
284 else if (! strcmp (alpha_fprm_string, "c"))
285 alpha_fprm = ALPHA_FPRM_CHOP;
286 else if (! strcmp (alpha_fprm_string,"d"))
287 alpha_fprm = ALPHA_FPRM_DYN;
288 else
289 error ("bad value %qs for -mfp-rounding-mode switch",
290 alpha_fprm_string);
291 }
292
293 if (alpha_fptm_string)
294 {
295 if (strcmp (alpha_fptm_string, "n") == 0)
296 alpha_fptm = ALPHA_FPTM_N;
297 else if (strcmp (alpha_fptm_string, "u") == 0)
298 alpha_fptm = ALPHA_FPTM_U;
299 else if (strcmp (alpha_fptm_string, "su") == 0)
300 alpha_fptm = ALPHA_FPTM_SU;
301 else if (strcmp (alpha_fptm_string, "sui") == 0)
302 alpha_fptm = ALPHA_FPTM_SUI;
303 else
304 error ("bad value %qs for -mfp-trap-mode switch", alpha_fptm_string);
305 }
306
307 if (alpha_cpu_string)
308 {
309 for (i = 0; i < ct_size; i++)
310 if (! strcmp (alpha_cpu_string, cpu_table [i].name))
311 {
312 alpha_tune = alpha_cpu = cpu_table [i].processor;
313 target_flags &= ~ (MASK_BWX | MASK_MAX | MASK_FIX | MASK_CIX);
314 target_flags |= cpu_table [i].flags;
315 break;
316 }
317 if (i == ct_size)
318 error ("bad value %qs for -mcpu switch", alpha_cpu_string);
319 }
320
321 if (alpha_tune_string)
322 {
323 for (i = 0; i < ct_size; i++)
324 if (! strcmp (alpha_tune_string, cpu_table [i].name))
325 {
326 alpha_tune = cpu_table [i].processor;
327 break;
328 }
329 if (i == ct_size)
330 error ("bad value %qs for -mtune switch", alpha_tune_string);
331 }
332
333 /* Do some sanity checks on the above options. */
334
335 if ((alpha_fptm == ALPHA_FPTM_SU || alpha_fptm == ALPHA_FPTM_SUI)
336 && alpha_tp != ALPHA_TP_INSN && alpha_cpu != PROCESSOR_EV6)
337 {
338 warning (0, "fp software completion requires -mtrap-precision=i");
339 alpha_tp = ALPHA_TP_INSN;
340 }
341
342 if (alpha_cpu == PROCESSOR_EV6)
343 {
344 /* Except for EV6 pass 1 (not released), we always have precise
345 arithmetic traps. Which means we can do software completion
346 without minding trap shadows. */
347 alpha_tp = ALPHA_TP_PROG;
348 }
349
350 if (TARGET_FLOAT_VAX)
351 {
352 if (alpha_fprm == ALPHA_FPRM_MINF || alpha_fprm == ALPHA_FPRM_DYN)
353 {
354 warning (0, "rounding mode not supported for VAX floats");
355 alpha_fprm = ALPHA_FPRM_NORM;
356 }
357 if (alpha_fptm == ALPHA_FPTM_SUI)
358 {
359 warning (0, "trap mode not supported for VAX floats");
360 alpha_fptm = ALPHA_FPTM_SU;
361 }
362 if (target_flags_explicit & MASK_LONG_DOUBLE_128)
363 warning (0, "128-bit long double not supported for VAX floats");
364 target_flags &= ~MASK_LONG_DOUBLE_128;
365 }
366
367 {
368 char *end;
369 int lat;
370
371 if (!alpha_mlat_string)
372 alpha_mlat_string = "L1";
373
374 if (ISDIGIT ((unsigned char)alpha_mlat_string[0])
375 && (lat = strtol (alpha_mlat_string, &end, 10), *end == '\0'))
376 ;
377 else if ((alpha_mlat_string[0] == 'L' || alpha_mlat_string[0] == 'l')
378 && ISDIGIT ((unsigned char)alpha_mlat_string[1])
379 && alpha_mlat_string[2] == '\0')
380 {
381 static int const cache_latency[][4] =
382 {
383 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
384 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
385 { 3, 12, 30 }, /* ev6 -- Bcache from DS20 LMbench. */
386 };
387
388 lat = alpha_mlat_string[1] - '0';
389 if (lat <= 0 || lat > 3 || cache_latency[alpha_tune][lat-1] == -1)
390 {
391 warning (0, "L%d cache latency unknown for %s",
392 lat, alpha_cpu_name[alpha_tune]);
393 lat = 3;
394 }
395 else
396 lat = cache_latency[alpha_tune][lat-1];
397 }
398 else if (! strcmp (alpha_mlat_string, "main"))
399 {
400 /* Most current memories have about 370ns latency. This is
401 a reasonable guess for a fast cpu. */
402 lat = 150;
403 }
404 else
405 {
406 warning (0, "bad value %qs for -mmemory-latency", alpha_mlat_string);
407 lat = 3;
408 }
409
410 alpha_memory_latency = lat;
411 }
412
413 /* Default the definition of "small data" to 8 bytes. */
414 if (!global_options_set.x_g_switch_value)
415 g_switch_value = 8;
416
417 /* Infer TARGET_SMALL_DATA from -fpic/-fPIC. */
418 if (flag_pic == 1)
419 target_flags |= MASK_SMALL_DATA;
420 else if (flag_pic == 2)
421 target_flags &= ~MASK_SMALL_DATA;
422
423 /* Align labels and loops for optimal branching. */
424 /* ??? Kludge these by not doing anything if we don't optimize and also if
425 we are writing ECOFF symbols to work around a bug in DEC's assembler. */
426 if (optimize > 0 && write_symbols != SDB_DEBUG)
427 {
428 if (align_loops <= 0)
429 align_loops = 16;
430 if (align_jumps <= 0)
431 align_jumps = 16;
432 }
433 if (align_functions <= 0)
434 align_functions = 16;
435
436 /* Register variables and functions with the garbage collector. */
437
438 /* Set up function hooks. */
439 init_machine_status = alpha_init_machine_status;
440
441 /* Tell the compiler when we're using VAX floating point. */
442 if (TARGET_FLOAT_VAX)
443 {
444 REAL_MODE_FORMAT (SFmode) = &vax_f_format;
445 REAL_MODE_FORMAT (DFmode) = &vax_g_format;
446 REAL_MODE_FORMAT (TFmode) = NULL;
447 }
448
449 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
450 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
451 target_flags |= MASK_LONG_DOUBLE_128;
452 #endif
453 }
454 \f
455 /* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
456
457 int
458 zap_mask (HOST_WIDE_INT value)
459 {
460 int i;
461
462 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
463 i++, value >>= 8)
464 if ((value & 0xff) != 0 && (value & 0xff) != 0xff)
465 return 0;
466
467 return 1;
468 }
469
470 /* Return true if OP is valid for a particular TLS relocation.
471 We are already guaranteed that OP is a CONST. */
472
473 int
474 tls_symbolic_operand_1 (rtx op, int size, int unspec)
475 {
476 op = XEXP (op, 0);
477
478 if (GET_CODE (op) != UNSPEC || XINT (op, 1) != unspec)
479 return 0;
480 op = XVECEXP (op, 0, 0);
481
482 if (GET_CODE (op) != SYMBOL_REF)
483 return 0;
484
485 switch (SYMBOL_REF_TLS_MODEL (op))
486 {
487 case TLS_MODEL_LOCAL_DYNAMIC:
488 return unspec == UNSPEC_DTPREL && size == alpha_tls_size;
489 case TLS_MODEL_INITIAL_EXEC:
490 return unspec == UNSPEC_TPREL && size == 64;
491 case TLS_MODEL_LOCAL_EXEC:
492 return unspec == UNSPEC_TPREL && size == alpha_tls_size;
493 default:
494 gcc_unreachable ();
495 }
496 }
497
498 /* Used by aligned_memory_operand and unaligned_memory_operand to
499 resolve what reload is going to do with OP if it's a register. */
500
501 rtx
502 resolve_reload_operand (rtx op)
503 {
504 if (reload_in_progress)
505 {
506 rtx tmp = op;
507 if (GET_CODE (tmp) == SUBREG)
508 tmp = SUBREG_REG (tmp);
509 if (REG_P (tmp)
510 && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
511 {
512 op = reg_equiv_memory_loc (REGNO (tmp));
513 if (op == 0)
514 return 0;
515 }
516 }
517 return op;
518 }
519
520 /* The scalar modes supported differs from the default check-what-c-supports
521 version in that sometimes TFmode is available even when long double
522 indicates only DFmode. */
523
524 static bool
525 alpha_scalar_mode_supported_p (enum machine_mode mode)
526 {
527 switch (mode)
528 {
529 case QImode:
530 case HImode:
531 case SImode:
532 case DImode:
533 case TImode: /* via optabs.c */
534 return true;
535
536 case SFmode:
537 case DFmode:
538 return true;
539
540 case TFmode:
541 return TARGET_HAS_XFLOATING_LIBS;
542
543 default:
544 return false;
545 }
546 }
547
548 /* Alpha implements a couple of integer vector mode operations when
549 TARGET_MAX is enabled. We do not check TARGET_MAX here, however,
550 which allows the vectorizer to operate on e.g. move instructions,
551 or when expand_vector_operations can do something useful. */
552
553 static bool
554 alpha_vector_mode_supported_p (enum machine_mode mode)
555 {
556 return mode == V8QImode || mode == V4HImode || mode == V2SImode;
557 }
558
559 /* Return 1 if this function can directly return via $26. */
560
561 int
562 direct_return (void)
563 {
564 return (TARGET_ABI_OSF
565 && reload_completed
566 && alpha_sa_size () == 0
567 && get_frame_size () == 0
568 && crtl->outgoing_args_size == 0
569 && crtl->args.pretend_args_size == 0);
570 }
571
572 /* Return the ADDR_VEC associated with a tablejump insn. */
573
574 rtx
575 alpha_tablejump_addr_vec (rtx insn)
576 {
577 rtx tmp;
578
579 tmp = JUMP_LABEL (insn);
580 if (!tmp)
581 return NULL_RTX;
582 tmp = NEXT_INSN (tmp);
583 if (!tmp)
584 return NULL_RTX;
585 if (JUMP_P (tmp)
586 && GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC)
587 return PATTERN (tmp);
588 return NULL_RTX;
589 }
590
591 /* Return the label of the predicted edge, or CONST0_RTX if we don't know. */
592
593 rtx
594 alpha_tablejump_best_label (rtx insn)
595 {
596 rtx jump_table = alpha_tablejump_addr_vec (insn);
597 rtx best_label = NULL_RTX;
598
599 /* ??? Once the CFG doesn't keep getting completely rebuilt, look
600 there for edge frequency counts from profile data. */
601
602 if (jump_table)
603 {
604 int n_labels = XVECLEN (jump_table, 1);
605 int best_count = -1;
606 int i, j;
607
608 for (i = 0; i < n_labels; i++)
609 {
610 int count = 1;
611
612 for (j = i + 1; j < n_labels; j++)
613 if (XEXP (XVECEXP (jump_table, 1, i), 0)
614 == XEXP (XVECEXP (jump_table, 1, j), 0))
615 count++;
616
617 if (count > best_count)
618 best_count = count, best_label = XVECEXP (jump_table, 1, i);
619 }
620 }
621
622 return best_label ? best_label : const0_rtx;
623 }
624
625 /* Return the TLS model to use for SYMBOL. */
626
627 static enum tls_model
628 tls_symbolic_operand_type (rtx symbol)
629 {
630 enum tls_model model;
631
632 if (GET_CODE (symbol) != SYMBOL_REF)
633 return TLS_MODEL_NONE;
634 model = SYMBOL_REF_TLS_MODEL (symbol);
635
636 /* Local-exec with a 64-bit size is the same code as initial-exec. */
637 if (model == TLS_MODEL_LOCAL_EXEC && alpha_tls_size == 64)
638 model = TLS_MODEL_INITIAL_EXEC;
639
640 return model;
641 }
642 \f
643 /* Return true if the function DECL will share the same GP as any
644 function in the current unit of translation. */
645
646 static bool
647 decl_has_samegp (const_tree decl)
648 {
649 /* Functions that are not local can be overridden, and thus may
650 not share the same gp. */
651 if (!(*targetm.binds_local_p) (decl))
652 return false;
653
654 /* If -msmall-data is in effect, assume that there is only one GP
655 for the module, and so any local symbol has this property. We
656 need explicit relocations to be able to enforce this for symbols
657 not defined in this unit of translation, however. */
658 if (TARGET_EXPLICIT_RELOCS && TARGET_SMALL_DATA)
659 return true;
660
661 /* Functions that are not external are defined in this UoT. */
662 /* ??? Irritatingly, static functions not yet emitted are still
663 marked "external". Apply this to non-static functions only. */
664 return !TREE_PUBLIC (decl) || !DECL_EXTERNAL (decl);
665 }
666
667 /* Return true if EXP should be placed in the small data section. */
668
669 static bool
670 alpha_in_small_data_p (const_tree exp)
671 {
672 /* We want to merge strings, so we never consider them small data. */
673 if (TREE_CODE (exp) == STRING_CST)
674 return false;
675
676 /* Functions are never in the small data area. Duh. */
677 if (TREE_CODE (exp) == FUNCTION_DECL)
678 return false;
679
680 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
681 {
682 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
683 if (strcmp (section, ".sdata") == 0
684 || strcmp (section, ".sbss") == 0)
685 return true;
686 }
687 else
688 {
689 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
690
691 /* If this is an incomplete type with size 0, then we can't put it
692 in sdata because it might be too big when completed. */
693 if (size > 0 && size <= g_switch_value)
694 return true;
695 }
696
697 return false;
698 }
699
700 #if TARGET_ABI_OPEN_VMS
701 static bool
702 vms_valid_pointer_mode (enum machine_mode mode)
703 {
704 return (mode == SImode || mode == DImode);
705 }
706
707 static bool
708 alpha_linkage_symbol_p (const char *symname)
709 {
710 int symlen = strlen (symname);
711
712 if (symlen > 4)
713 return strcmp (&symname [symlen - 4], "..lk") == 0;
714
715 return false;
716 }
717
718 #define LINKAGE_SYMBOL_REF_P(X) \
719 ((GET_CODE (X) == SYMBOL_REF \
720 && alpha_linkage_symbol_p (XSTR (X, 0))) \
721 || (GET_CODE (X) == CONST \
722 && GET_CODE (XEXP (X, 0)) == PLUS \
723 && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \
724 && alpha_linkage_symbol_p (XSTR (XEXP (XEXP (X, 0), 0), 0))))
725 #endif
726
727 /* legitimate_address_p recognizes an RTL expression that is a valid
728 memory address for an instruction. The MODE argument is the
729 machine mode for the MEM expression that wants to use this address.
730
731 For Alpha, we have either a constant address or the sum of a
732 register and a constant address, or just a register. For DImode,
733 any of those forms can be surrounded with an AND that clear the
734 low-order three bits; this is an "unaligned" access. */
735
736 static bool
737 alpha_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
738 {
739 /* If this is an ldq_u type address, discard the outer AND. */
740 if (mode == DImode
741 && GET_CODE (x) == AND
742 && CONST_INT_P (XEXP (x, 1))
743 && INTVAL (XEXP (x, 1)) == -8)
744 x = XEXP (x, 0);
745
746 /* Discard non-paradoxical subregs. */
747 if (GET_CODE (x) == SUBREG
748 && (GET_MODE_SIZE (GET_MODE (x))
749 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
750 x = SUBREG_REG (x);
751
752 /* Unadorned general registers are valid. */
753 if (REG_P (x)
754 && (strict
755 ? STRICT_REG_OK_FOR_BASE_P (x)
756 : NONSTRICT_REG_OK_FOR_BASE_P (x)))
757 return true;
758
759 /* Constant addresses (i.e. +/- 32k) are valid. */
760 if (CONSTANT_ADDRESS_P (x))
761 return true;
762
763 #if TARGET_ABI_OPEN_VMS
764 if (LINKAGE_SYMBOL_REF_P (x))
765 return true;
766 #endif
767
768 /* Register plus a small constant offset is valid. */
769 if (GET_CODE (x) == PLUS)
770 {
771 rtx ofs = XEXP (x, 1);
772 x = XEXP (x, 0);
773
774 /* Discard non-paradoxical subregs. */
775 if (GET_CODE (x) == SUBREG
776 && (GET_MODE_SIZE (GET_MODE (x))
777 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
778 x = SUBREG_REG (x);
779
780 if (REG_P (x))
781 {
782 if (! strict
783 && NONSTRICT_REG_OK_FP_BASE_P (x)
784 && CONST_INT_P (ofs))
785 return true;
786 if ((strict
787 ? STRICT_REG_OK_FOR_BASE_P (x)
788 : NONSTRICT_REG_OK_FOR_BASE_P (x))
789 && CONSTANT_ADDRESS_P (ofs))
790 return true;
791 }
792 }
793
794 /* If we're managing explicit relocations, LO_SUM is valid, as are small
795 data symbols. Avoid explicit relocations of modes larger than word
796 mode since i.e. $LC0+8($1) can fold around +/- 32k offset. */
797 else if (TARGET_EXPLICIT_RELOCS
798 && GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
799 {
800 if (small_symbolic_operand (x, Pmode))
801 return true;
802
803 if (GET_CODE (x) == LO_SUM)
804 {
805 rtx ofs = XEXP (x, 1);
806 x = XEXP (x, 0);
807
808 /* Discard non-paradoxical subregs. */
809 if (GET_CODE (x) == SUBREG
810 && (GET_MODE_SIZE (GET_MODE (x))
811 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
812 x = SUBREG_REG (x);
813
814 /* Must have a valid base register. */
815 if (! (REG_P (x)
816 && (strict
817 ? STRICT_REG_OK_FOR_BASE_P (x)
818 : NONSTRICT_REG_OK_FOR_BASE_P (x))))
819 return false;
820
821 /* The symbol must be local. */
822 if (local_symbolic_operand (ofs, Pmode)
823 || dtp32_symbolic_operand (ofs, Pmode)
824 || tp32_symbolic_operand (ofs, Pmode))
825 return true;
826 }
827 }
828
829 return false;
830 }
831
832 /* Build the SYMBOL_REF for __tls_get_addr. */
833
834 static GTY(()) rtx tls_get_addr_libfunc;
835
836 static rtx
837 get_tls_get_addr (void)
838 {
839 if (!tls_get_addr_libfunc)
840 tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
841 return tls_get_addr_libfunc;
842 }
843
844 /* Try machine-dependent ways of modifying an illegitimate address
845 to be legitimate. If we find one, return the new, valid address. */
846
847 static rtx
848 alpha_legitimize_address_1 (rtx x, rtx scratch, enum machine_mode mode)
849 {
850 HOST_WIDE_INT addend;
851
852 /* If the address is (plus reg const_int) and the CONST_INT is not a
853 valid offset, compute the high part of the constant and add it to
854 the register. Then our address is (plus temp low-part-const). */
855 if (GET_CODE (x) == PLUS
856 && REG_P (XEXP (x, 0))
857 && CONST_INT_P (XEXP (x, 1))
858 && ! CONSTANT_ADDRESS_P (XEXP (x, 1)))
859 {
860 addend = INTVAL (XEXP (x, 1));
861 x = XEXP (x, 0);
862 goto split_addend;
863 }
864
865 /* If the address is (const (plus FOO const_int)), find the low-order
866 part of the CONST_INT. Then load FOO plus any high-order part of the
867 CONST_INT into a register. Our address is (plus reg low-part-const).
868 This is done to reduce the number of GOT entries. */
869 if (can_create_pseudo_p ()
870 && GET_CODE (x) == CONST
871 && GET_CODE (XEXP (x, 0)) == PLUS
872 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
873 {
874 addend = INTVAL (XEXP (XEXP (x, 0), 1));
875 x = force_reg (Pmode, XEXP (XEXP (x, 0), 0));
876 goto split_addend;
877 }
878
879 /* If we have a (plus reg const), emit the load as in (2), then add
880 the two registers, and finally generate (plus reg low-part-const) as
881 our address. */
882 if (can_create_pseudo_p ()
883 && GET_CODE (x) == PLUS
884 && REG_P (XEXP (x, 0))
885 && GET_CODE (XEXP (x, 1)) == CONST
886 && GET_CODE (XEXP (XEXP (x, 1), 0)) == PLUS
887 && CONST_INT_P (XEXP (XEXP (XEXP (x, 1), 0), 1)))
888 {
889 addend = INTVAL (XEXP (XEXP (XEXP (x, 1), 0), 1));
890 x = expand_simple_binop (Pmode, PLUS, XEXP (x, 0),
891 XEXP (XEXP (XEXP (x, 1), 0), 0),
892 NULL_RTX, 1, OPTAB_LIB_WIDEN);
893 goto split_addend;
894 }
895
896 /* If this is a local symbol, split the address into HIGH/LO_SUM parts.
897 Avoid modes larger than word mode since i.e. $LC0+8($1) can fold
898 around +/- 32k offset. */
899 if (TARGET_EXPLICIT_RELOCS
900 && GET_MODE_SIZE (mode) <= UNITS_PER_WORD
901 && symbolic_operand (x, Pmode))
902 {
903 rtx r0, r16, eqv, tga, tp, insn, dest, seq;
904
905 switch (tls_symbolic_operand_type (x))
906 {
907 case TLS_MODEL_NONE:
908 break;
909
910 case TLS_MODEL_GLOBAL_DYNAMIC:
911 start_sequence ();
912
913 r0 = gen_rtx_REG (Pmode, 0);
914 r16 = gen_rtx_REG (Pmode, 16);
915 tga = get_tls_get_addr ();
916 dest = gen_reg_rtx (Pmode);
917 seq = GEN_INT (alpha_next_sequence_number++);
918
919 emit_insn (gen_movdi_er_tlsgd (r16, pic_offset_table_rtx, x, seq));
920 insn = gen_call_value_osf_tlsgd (r0, tga, seq);
921 insn = emit_call_insn (insn);
922 RTL_CONST_CALL_P (insn) = 1;
923 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
924
925 insn = get_insns ();
926 end_sequence ();
927
928 emit_libcall_block (insn, dest, r0, x);
929 return dest;
930
931 case TLS_MODEL_LOCAL_DYNAMIC:
932 start_sequence ();
933
934 r0 = gen_rtx_REG (Pmode, 0);
935 r16 = gen_rtx_REG (Pmode, 16);
936 tga = get_tls_get_addr ();
937 scratch = gen_reg_rtx (Pmode);
938 seq = GEN_INT (alpha_next_sequence_number++);
939
940 emit_insn (gen_movdi_er_tlsldm (r16, pic_offset_table_rtx, seq));
941 insn = gen_call_value_osf_tlsldm (r0, tga, seq);
942 insn = emit_call_insn (insn);
943 RTL_CONST_CALL_P (insn) = 1;
944 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
945
946 insn = get_insns ();
947 end_sequence ();
948
949 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
950 UNSPEC_TLSLDM_CALL);
951 emit_libcall_block (insn, scratch, r0, eqv);
952
953 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPREL);
954 eqv = gen_rtx_CONST (Pmode, eqv);
955
956 if (alpha_tls_size == 64)
957 {
958 dest = gen_reg_rtx (Pmode);
959 emit_insn (gen_rtx_SET (VOIDmode, dest, eqv));
960 emit_insn (gen_adddi3 (dest, dest, scratch));
961 return dest;
962 }
963 if (alpha_tls_size == 32)
964 {
965 insn = gen_rtx_HIGH (Pmode, eqv);
966 insn = gen_rtx_PLUS (Pmode, scratch, insn);
967 scratch = gen_reg_rtx (Pmode);
968 emit_insn (gen_rtx_SET (VOIDmode, scratch, insn));
969 }
970 return gen_rtx_LO_SUM (Pmode, scratch, eqv);
971
972 case TLS_MODEL_INITIAL_EXEC:
973 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
974 eqv = gen_rtx_CONST (Pmode, eqv);
975 tp = gen_reg_rtx (Pmode);
976 scratch = gen_reg_rtx (Pmode);
977 dest = gen_reg_rtx (Pmode);
978
979 emit_insn (gen_load_tp (tp));
980 emit_insn (gen_rtx_SET (VOIDmode, scratch, eqv));
981 emit_insn (gen_adddi3 (dest, tp, scratch));
982 return dest;
983
984 case TLS_MODEL_LOCAL_EXEC:
985 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
986 eqv = gen_rtx_CONST (Pmode, eqv);
987 tp = gen_reg_rtx (Pmode);
988
989 emit_insn (gen_load_tp (tp));
990 if (alpha_tls_size == 32)
991 {
992 insn = gen_rtx_HIGH (Pmode, eqv);
993 insn = gen_rtx_PLUS (Pmode, tp, insn);
994 tp = gen_reg_rtx (Pmode);
995 emit_insn (gen_rtx_SET (VOIDmode, tp, insn));
996 }
997 return gen_rtx_LO_SUM (Pmode, tp, eqv);
998
999 default:
1000 gcc_unreachable ();
1001 }
1002
1003 if (local_symbolic_operand (x, Pmode))
1004 {
1005 if (small_symbolic_operand (x, Pmode))
1006 return x;
1007 else
1008 {
1009 if (can_create_pseudo_p ())
1010 scratch = gen_reg_rtx (Pmode);
1011 emit_insn (gen_rtx_SET (VOIDmode, scratch,
1012 gen_rtx_HIGH (Pmode, x)));
1013 return gen_rtx_LO_SUM (Pmode, scratch, x);
1014 }
1015 }
1016 }
1017
1018 return NULL;
1019
1020 split_addend:
1021 {
1022 HOST_WIDE_INT low, high;
1023
1024 low = ((addend & 0xffff) ^ 0x8000) - 0x8000;
1025 addend -= low;
1026 high = ((addend & 0xffffffff) ^ 0x80000000) - 0x80000000;
1027 addend -= high;
1028
1029 if (addend)
1030 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (addend),
1031 (!can_create_pseudo_p () ? scratch : NULL_RTX),
1032 1, OPTAB_LIB_WIDEN);
1033 if (high)
1034 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (high),
1035 (!can_create_pseudo_p () ? scratch : NULL_RTX),
1036 1, OPTAB_LIB_WIDEN);
1037
1038 return plus_constant (x, low);
1039 }
1040 }
1041
1042
1043 /* Try machine-dependent ways of modifying an illegitimate address
1044 to be legitimate. Return X or the new, valid address. */
1045
1046 static rtx
1047 alpha_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1048 enum machine_mode mode)
1049 {
1050 rtx new_x = alpha_legitimize_address_1 (x, NULL_RTX, mode);
1051 return new_x ? new_x : x;
1052 }
1053
1054 /* Primarily this is required for TLS symbols, but given that our move
1055 patterns *ought* to be able to handle any symbol at any time, we
1056 should never be spilling symbolic operands to the constant pool, ever. */
1057
1058 static bool
1059 alpha_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
1060 {
1061 enum rtx_code code = GET_CODE (x);
1062 return code == SYMBOL_REF || code == LABEL_REF || code == CONST;
1063 }
1064
1065 /* We do not allow indirect calls to be optimized into sibling calls, nor
1066 can we allow a call to a function with a different GP to be optimized
1067 into a sibcall. */
1068
1069 static bool
1070 alpha_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
1071 {
1072 /* Can't do indirect tail calls, since we don't know if the target
1073 uses the same GP. */
1074 if (!decl)
1075 return false;
1076
1077 /* Otherwise, we can make a tail call if the target function shares
1078 the same GP. */
1079 return decl_has_samegp (decl);
1080 }
1081
1082 int
1083 some_small_symbolic_operand_int (rtx *px, void *data ATTRIBUTE_UNUSED)
1084 {
1085 rtx x = *px;
1086
1087 /* Don't re-split. */
1088 if (GET_CODE (x) == LO_SUM)
1089 return -1;
1090
1091 return small_symbolic_operand (x, Pmode) != 0;
1092 }
1093
1094 static int
1095 split_small_symbolic_operand_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
1096 {
1097 rtx x = *px;
1098
1099 /* Don't re-split. */
1100 if (GET_CODE (x) == LO_SUM)
1101 return -1;
1102
1103 if (small_symbolic_operand (x, Pmode))
1104 {
1105 x = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, x);
1106 *px = x;
1107 return -1;
1108 }
1109
1110 return 0;
1111 }
1112
1113 rtx
1114 split_small_symbolic_operand (rtx x)
1115 {
1116 x = copy_insn (x);
1117 for_each_rtx (&x, split_small_symbolic_operand_1, NULL);
1118 return x;
1119 }
1120
1121 /* Indicate that INSN cannot be duplicated. This is true for any insn
1122 that we've marked with gpdisp relocs, since those have to stay in
1123 1-1 correspondence with one another.
1124
1125 Technically we could copy them if we could set up a mapping from one
1126 sequence number to another, across the set of insns to be duplicated.
1127 This seems overly complicated and error-prone since interblock motion
1128 from sched-ebb could move one of the pair of insns to a different block.
1129
1130 Also cannot allow jsr insns to be duplicated. If they throw exceptions,
1131 then they'll be in a different block from their ldgp. Which could lead
1132 the bb reorder code to think that it would be ok to copy just the block
1133 containing the call and branch to the block containing the ldgp. */
1134
1135 static bool
1136 alpha_cannot_copy_insn_p (rtx insn)
1137 {
1138 if (!reload_completed || !TARGET_EXPLICIT_RELOCS)
1139 return false;
1140 if (recog_memoized (insn) >= 0)
1141 return get_attr_cannot_copy (insn);
1142 else
1143 return false;
1144 }
1145
1146
1147 /* Try a machine-dependent way of reloading an illegitimate address
1148 operand. If we find one, push the reload and return the new rtx. */
1149
1150 rtx
1151 alpha_legitimize_reload_address (rtx x,
1152 enum machine_mode mode ATTRIBUTE_UNUSED,
1153 int opnum, int type,
1154 int ind_levels ATTRIBUTE_UNUSED)
1155 {
1156 /* We must recognize output that we have already generated ourselves. */
1157 if (GET_CODE (x) == PLUS
1158 && GET_CODE (XEXP (x, 0)) == PLUS
1159 && REG_P (XEXP (XEXP (x, 0), 0))
1160 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
1161 && CONST_INT_P (XEXP (x, 1)))
1162 {
1163 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1164 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1165 opnum, (enum reload_type) type);
1166 return x;
1167 }
1168
1169 /* We wish to handle large displacements off a base register by
1170 splitting the addend across an ldah and the mem insn. This
1171 cuts number of extra insns needed from 3 to 1. */
1172 if (GET_CODE (x) == PLUS
1173 && REG_P (XEXP (x, 0))
1174 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
1175 && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x, 0)))
1176 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1177 {
1178 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
1179 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
1180 HOST_WIDE_INT high
1181 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
1182
1183 /* Check for 32-bit overflow. */
1184 if (high + low != val)
1185 return NULL_RTX;
1186
1187 /* Reload the high part into a base reg; leave the low part
1188 in the mem directly. */
1189 x = gen_rtx_PLUS (GET_MODE (x),
1190 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
1191 GEN_INT (high)),
1192 GEN_INT (low));
1193
1194 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1195 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1196 opnum, (enum reload_type) type);
1197 return x;
1198 }
1199
1200 return NULL_RTX;
1201 }
1202 \f
1203 /* Compute a (partial) cost for rtx X. Return true if the complete
1204 cost has been computed, and false if subexpressions should be
1205 scanned. In either case, *TOTAL contains the cost result. */
1206
1207 static bool
1208 alpha_rtx_costs (rtx x, int code, int outer_code, int *total,
1209 bool speed)
1210 {
1211 enum machine_mode mode = GET_MODE (x);
1212 bool float_mode_p = FLOAT_MODE_P (mode);
1213 const struct alpha_rtx_cost_data *cost_data;
1214
1215 if (!speed)
1216 cost_data = &alpha_rtx_cost_size;
1217 else
1218 cost_data = &alpha_rtx_cost_data[alpha_tune];
1219
1220 switch (code)
1221 {
1222 case CONST_INT:
1223 /* If this is an 8-bit constant, return zero since it can be used
1224 nearly anywhere with no cost. If it is a valid operand for an
1225 ADD or AND, likewise return 0 if we know it will be used in that
1226 context. Otherwise, return 2 since it might be used there later.
1227 All other constants take at least two insns. */
1228 if (INTVAL (x) >= 0 && INTVAL (x) < 256)
1229 {
1230 *total = 0;
1231 return true;
1232 }
1233 /* FALLTHRU */
1234
1235 case CONST_DOUBLE:
1236 if (x == CONST0_RTX (mode))
1237 *total = 0;
1238 else if ((outer_code == PLUS && add_operand (x, VOIDmode))
1239 || (outer_code == AND && and_operand (x, VOIDmode)))
1240 *total = 0;
1241 else if (add_operand (x, VOIDmode) || and_operand (x, VOIDmode))
1242 *total = 2;
1243 else
1244 *total = COSTS_N_INSNS (2);
1245 return true;
1246
1247 case CONST:
1248 case SYMBOL_REF:
1249 case LABEL_REF:
1250 if (TARGET_EXPLICIT_RELOCS && small_symbolic_operand (x, VOIDmode))
1251 *total = COSTS_N_INSNS (outer_code != MEM);
1252 else if (TARGET_EXPLICIT_RELOCS && local_symbolic_operand (x, VOIDmode))
1253 *total = COSTS_N_INSNS (1 + (outer_code != MEM));
1254 else if (tls_symbolic_operand_type (x))
1255 /* Estimate of cost for call_pal rduniq. */
1256 /* ??? How many insns do we emit here? More than one... */
1257 *total = COSTS_N_INSNS (15);
1258 else
1259 /* Otherwise we do a load from the GOT. */
1260 *total = COSTS_N_INSNS (!speed ? 1 : alpha_memory_latency);
1261 return true;
1262
1263 case HIGH:
1264 /* This is effectively an add_operand. */
1265 *total = 2;
1266 return true;
1267
1268 case PLUS:
1269 case MINUS:
1270 if (float_mode_p)
1271 *total = cost_data->fp_add;
1272 else if (GET_CODE (XEXP (x, 0)) == MULT
1273 && const48_operand (XEXP (XEXP (x, 0), 1), VOIDmode))
1274 {
1275 *total = (rtx_cost (XEXP (XEXP (x, 0), 0),
1276 (enum rtx_code) outer_code, speed)
1277 + rtx_cost (XEXP (x, 1),
1278 (enum rtx_code) outer_code, speed)
1279 + COSTS_N_INSNS (1));
1280 return true;
1281 }
1282 return false;
1283
1284 case MULT:
1285 if (float_mode_p)
1286 *total = cost_data->fp_mult;
1287 else if (mode == DImode)
1288 *total = cost_data->int_mult_di;
1289 else
1290 *total = cost_data->int_mult_si;
1291 return false;
1292
1293 case ASHIFT:
1294 if (CONST_INT_P (XEXP (x, 1))
1295 && INTVAL (XEXP (x, 1)) <= 3)
1296 {
1297 *total = COSTS_N_INSNS (1);
1298 return false;
1299 }
1300 /* FALLTHRU */
1301
1302 case ASHIFTRT:
1303 case LSHIFTRT:
1304 *total = cost_data->int_shift;
1305 return false;
1306
1307 case IF_THEN_ELSE:
1308 if (float_mode_p)
1309 *total = cost_data->fp_add;
1310 else
1311 *total = cost_data->int_cmov;
1312 return false;
1313
1314 case DIV:
1315 case UDIV:
1316 case MOD:
1317 case UMOD:
1318 if (!float_mode_p)
1319 *total = cost_data->int_div;
1320 else if (mode == SFmode)
1321 *total = cost_data->fp_div_sf;
1322 else
1323 *total = cost_data->fp_div_df;
1324 return false;
1325
1326 case MEM:
1327 *total = COSTS_N_INSNS (!speed ? 1 : alpha_memory_latency);
1328 return true;
1329
1330 case NEG:
1331 if (! float_mode_p)
1332 {
1333 *total = COSTS_N_INSNS (1);
1334 return false;
1335 }
1336 /* FALLTHRU */
1337
1338 case ABS:
1339 if (! float_mode_p)
1340 {
1341 *total = COSTS_N_INSNS (1) + cost_data->int_cmov;
1342 return false;
1343 }
1344 /* FALLTHRU */
1345
1346 case FLOAT:
1347 case UNSIGNED_FLOAT:
1348 case FIX:
1349 case UNSIGNED_FIX:
1350 case FLOAT_TRUNCATE:
1351 *total = cost_data->fp_add;
1352 return false;
1353
1354 case FLOAT_EXTEND:
1355 if (MEM_P (XEXP (x, 0)))
1356 *total = 0;
1357 else
1358 *total = cost_data->fp_add;
1359 return false;
1360
1361 default:
1362 return false;
1363 }
1364 }
1365 \f
1366 /* REF is an alignable memory location. Place an aligned SImode
1367 reference into *PALIGNED_MEM and the number of bits to shift into
1368 *PBITNUM. SCRATCH is a free register for use in reloading out
1369 of range stack slots. */
1370
1371 void
1372 get_aligned_mem (rtx ref, rtx *paligned_mem, rtx *pbitnum)
1373 {
1374 rtx base;
1375 HOST_WIDE_INT disp, offset;
1376
1377 gcc_assert (MEM_P (ref));
1378
1379 if (reload_in_progress
1380 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1381 {
1382 base = find_replacement (&XEXP (ref, 0));
1383 gcc_assert (memory_address_p (GET_MODE (ref), base));
1384 }
1385 else
1386 base = XEXP (ref, 0);
1387
1388 if (GET_CODE (base) == PLUS)
1389 disp = INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1390 else
1391 disp = 0;
1392
1393 /* Find the byte offset within an aligned word. If the memory itself is
1394 claimed to be aligned, believe it. Otherwise, aligned_memory_operand
1395 will have examined the base register and determined it is aligned, and
1396 thus displacements from it are naturally alignable. */
1397 if (MEM_ALIGN (ref) >= 32)
1398 offset = 0;
1399 else
1400 offset = disp & 3;
1401
1402 /* The location should not cross aligned word boundary. */
1403 gcc_assert (offset + GET_MODE_SIZE (GET_MODE (ref))
1404 <= GET_MODE_SIZE (SImode));
1405
1406 /* Access the entire aligned word. */
1407 *paligned_mem = widen_memory_access (ref, SImode, -offset);
1408
1409 /* Convert the byte offset within the word to a bit offset. */
1410 offset *= BITS_PER_UNIT;
1411 *pbitnum = GEN_INT (offset);
1412 }
1413
1414 /* Similar, but just get the address. Handle the two reload cases.
1415 Add EXTRA_OFFSET to the address we return. */
1416
1417 rtx
1418 get_unaligned_address (rtx ref)
1419 {
1420 rtx base;
1421 HOST_WIDE_INT offset = 0;
1422
1423 gcc_assert (MEM_P (ref));
1424
1425 if (reload_in_progress
1426 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1427 {
1428 base = find_replacement (&XEXP (ref, 0));
1429
1430 gcc_assert (memory_address_p (GET_MODE (ref), base));
1431 }
1432 else
1433 base = XEXP (ref, 0);
1434
1435 if (GET_CODE (base) == PLUS)
1436 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1437
1438 return plus_constant (base, offset);
1439 }
1440
1441 /* Compute a value X, such that X & 7 == (ADDR + OFS) & 7.
1442 X is always returned in a register. */
1443
1444 rtx
1445 get_unaligned_offset (rtx addr, HOST_WIDE_INT ofs)
1446 {
1447 if (GET_CODE (addr) == PLUS)
1448 {
1449 ofs += INTVAL (XEXP (addr, 1));
1450 addr = XEXP (addr, 0);
1451 }
1452
1453 return expand_simple_binop (Pmode, PLUS, addr, GEN_INT (ofs & 7),
1454 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1455 }
1456
1457 /* On the Alpha, all (non-symbolic) constants except zero go into
1458 a floating-point register via memory. Note that we cannot
1459 return anything that is not a subset of RCLASS, and that some
1460 symbolic constants cannot be dropped to memory. */
1461
1462 enum reg_class
1463 alpha_preferred_reload_class(rtx x, enum reg_class rclass)
1464 {
1465 /* Zero is present in any register class. */
1466 if (x == CONST0_RTX (GET_MODE (x)))
1467 return rclass;
1468
1469 /* These sorts of constants we can easily drop to memory. */
1470 if (CONST_INT_P (x)
1471 || GET_CODE (x) == CONST_DOUBLE
1472 || GET_CODE (x) == CONST_VECTOR)
1473 {
1474 if (rclass == FLOAT_REGS)
1475 return NO_REGS;
1476 if (rclass == ALL_REGS)
1477 return GENERAL_REGS;
1478 return rclass;
1479 }
1480
1481 /* All other kinds of constants should not (and in the case of HIGH
1482 cannot) be dropped to memory -- instead we use a GENERAL_REGS
1483 secondary reload. */
1484 if (CONSTANT_P (x))
1485 return (rclass == ALL_REGS ? GENERAL_REGS : rclass);
1486
1487 return rclass;
1488 }
1489
1490 /* Inform reload about cases where moving X with a mode MODE to a register in
1491 RCLASS requires an extra scratch or immediate register. Return the class
1492 needed for the immediate register. */
1493
1494 static reg_class_t
1495 alpha_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
1496 enum machine_mode mode, secondary_reload_info *sri)
1497 {
1498 enum reg_class rclass = (enum reg_class) rclass_i;
1499
1500 /* Loading and storing HImode or QImode values to and from memory
1501 usually requires a scratch register. */
1502 if (!TARGET_BWX && (mode == QImode || mode == HImode || mode == CQImode))
1503 {
1504 if (any_memory_operand (x, mode))
1505 {
1506 if (in_p)
1507 {
1508 if (!aligned_memory_operand (x, mode))
1509 sri->icode = direct_optab_handler (reload_in_optab, mode);
1510 }
1511 else
1512 sri->icode = direct_optab_handler (reload_out_optab, mode);
1513 return NO_REGS;
1514 }
1515 }
1516
1517 /* We also cannot do integral arithmetic into FP regs, as might result
1518 from register elimination into a DImode fp register. */
1519 if (rclass == FLOAT_REGS)
1520 {
1521 if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
1522 return GENERAL_REGS;
1523 if (in_p && INTEGRAL_MODE_P (mode)
1524 && !MEM_P (x) && !REG_P (x) && !CONST_INT_P (x))
1525 return GENERAL_REGS;
1526 }
1527
1528 return NO_REGS;
1529 }
1530 \f
1531 /* Subfunction of the following function. Update the flags of any MEM
1532 found in part of X. */
1533
1534 static int
1535 alpha_set_memflags_1 (rtx *xp, void *data)
1536 {
1537 rtx x = *xp, orig = (rtx) data;
1538
1539 if (!MEM_P (x))
1540 return 0;
1541
1542 MEM_VOLATILE_P (x) = MEM_VOLATILE_P (orig);
1543 MEM_IN_STRUCT_P (x) = MEM_IN_STRUCT_P (orig);
1544 MEM_SCALAR_P (x) = MEM_SCALAR_P (orig);
1545 MEM_NOTRAP_P (x) = MEM_NOTRAP_P (orig);
1546 MEM_READONLY_P (x) = MEM_READONLY_P (orig);
1547
1548 /* Sadly, we cannot use alias sets because the extra aliasing
1549 produced by the AND interferes. Given that two-byte quantities
1550 are the only thing we would be able to differentiate anyway,
1551 there does not seem to be any point in convoluting the early
1552 out of the alias check. */
1553
1554 return -1;
1555 }
1556
1557 /* Given SEQ, which is an INSN list, look for any MEMs in either
1558 a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and
1559 volatile flags from REF into each of the MEMs found. If REF is not
1560 a MEM, don't do anything. */
1561
1562 void
1563 alpha_set_memflags (rtx seq, rtx ref)
1564 {
1565 rtx insn;
1566
1567 if (!MEM_P (ref))
1568 return;
1569
1570 /* This is only called from alpha.md, after having had something
1571 generated from one of the insn patterns. So if everything is
1572 zero, the pattern is already up-to-date. */
1573 if (!MEM_VOLATILE_P (ref)
1574 && !MEM_IN_STRUCT_P (ref)
1575 && !MEM_SCALAR_P (ref)
1576 && !MEM_NOTRAP_P (ref)
1577 && !MEM_READONLY_P (ref))
1578 return;
1579
1580 for (insn = seq; insn; insn = NEXT_INSN (insn))
1581 if (INSN_P (insn))
1582 for_each_rtx (&PATTERN (insn), alpha_set_memflags_1, (void *) ref);
1583 else
1584 gcc_unreachable ();
1585 }
1586 \f
1587 static rtx alpha_emit_set_const (rtx, enum machine_mode, HOST_WIDE_INT,
1588 int, bool);
1589
1590 /* Internal routine for alpha_emit_set_const to check for N or below insns.
1591 If NO_OUTPUT is true, then we only check to see if N insns are possible,
1592 and return pc_rtx if successful. */
1593
1594 static rtx
1595 alpha_emit_set_const_1 (rtx target, enum machine_mode mode,
1596 HOST_WIDE_INT c, int n, bool no_output)
1597 {
1598 HOST_WIDE_INT new_const;
1599 int i, bits;
1600 /* Use a pseudo if highly optimizing and still generating RTL. */
1601 rtx subtarget
1602 = (flag_expensive_optimizations && can_create_pseudo_p () ? 0 : target);
1603 rtx temp, insn;
1604
1605 /* If this is a sign-extended 32-bit constant, we can do this in at most
1606 three insns, so do it if we have enough insns left. We always have
1607 a sign-extended 32-bit constant when compiling on a narrow machine. */
1608
1609 if (HOST_BITS_PER_WIDE_INT != 64
1610 || c >> 31 == -1 || c >> 31 == 0)
1611 {
1612 HOST_WIDE_INT low = ((c & 0xffff) ^ 0x8000) - 0x8000;
1613 HOST_WIDE_INT tmp1 = c - low;
1614 HOST_WIDE_INT high = (((tmp1 >> 16) & 0xffff) ^ 0x8000) - 0x8000;
1615 HOST_WIDE_INT extra = 0;
1616
1617 /* If HIGH will be interpreted as negative but the constant is
1618 positive, we must adjust it to do two ldha insns. */
1619
1620 if ((high & 0x8000) != 0 && c >= 0)
1621 {
1622 extra = 0x4000;
1623 tmp1 -= 0x40000000;
1624 high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
1625 }
1626
1627 if (c == low || (low == 0 && extra == 0))
1628 {
1629 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
1630 but that meant that we can't handle INT_MIN on 32-bit machines
1631 (like NT/Alpha), because we recurse indefinitely through
1632 emit_move_insn to gen_movdi. So instead, since we know exactly
1633 what we want, create it explicitly. */
1634
1635 if (no_output)
1636 return pc_rtx;
1637 if (target == NULL)
1638 target = gen_reg_rtx (mode);
1639 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (c)));
1640 return target;
1641 }
1642 else if (n >= 2 + (extra != 0))
1643 {
1644 if (no_output)
1645 return pc_rtx;
1646 if (!can_create_pseudo_p ())
1647 {
1648 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (high << 16)));
1649 temp = target;
1650 }
1651 else
1652 temp = copy_to_suggested_reg (GEN_INT (high << 16),
1653 subtarget, mode);
1654
1655 /* As of 2002-02-23, addsi3 is only available when not optimizing.
1656 This means that if we go through expand_binop, we'll try to
1657 generate extensions, etc, which will require new pseudos, which
1658 will fail during some split phases. The SImode add patterns
1659 still exist, but are not named. So build the insns by hand. */
1660
1661 if (extra != 0)
1662 {
1663 if (! subtarget)
1664 subtarget = gen_reg_rtx (mode);
1665 insn = gen_rtx_PLUS (mode, temp, GEN_INT (extra << 16));
1666 insn = gen_rtx_SET (VOIDmode, subtarget, insn);
1667 emit_insn (insn);
1668 temp = subtarget;
1669 }
1670
1671 if (target == NULL)
1672 target = gen_reg_rtx (mode);
1673 insn = gen_rtx_PLUS (mode, temp, GEN_INT (low));
1674 insn = gen_rtx_SET (VOIDmode, target, insn);
1675 emit_insn (insn);
1676 return target;
1677 }
1678 }
1679
1680 /* If we couldn't do it that way, try some other methods. But if we have
1681 no instructions left, don't bother. Likewise, if this is SImode and
1682 we can't make pseudos, we can't do anything since the expand_binop
1683 and expand_unop calls will widen and try to make pseudos. */
1684
1685 if (n == 1 || (mode == SImode && !can_create_pseudo_p ()))
1686 return 0;
1687
1688 /* Next, see if we can load a related constant and then shift and possibly
1689 negate it to get the constant we want. Try this once each increasing
1690 numbers of insns. */
1691
1692 for (i = 1; i < n; i++)
1693 {
1694 /* First, see if minus some low bits, we've an easy load of
1695 high bits. */
1696
1697 new_const = ((c & 0xffff) ^ 0x8000) - 0x8000;
1698 if (new_const != 0)
1699 {
1700 temp = alpha_emit_set_const (subtarget, mode, c - new_const, i, no_output);
1701 if (temp)
1702 {
1703 if (no_output)
1704 return temp;
1705 return expand_binop (mode, add_optab, temp, GEN_INT (new_const),
1706 target, 0, OPTAB_WIDEN);
1707 }
1708 }
1709
1710 /* Next try complementing. */
1711 temp = alpha_emit_set_const (subtarget, mode, ~c, i, no_output);
1712 if (temp)
1713 {
1714 if (no_output)
1715 return temp;
1716 return expand_unop (mode, one_cmpl_optab, temp, target, 0);
1717 }
1718
1719 /* Next try to form a constant and do a left shift. We can do this
1720 if some low-order bits are zero; the exact_log2 call below tells
1721 us that information. The bits we are shifting out could be any
1722 value, but here we'll just try the 0- and sign-extended forms of
1723 the constant. To try to increase the chance of having the same
1724 constant in more than one insn, start at the highest number of
1725 bits to shift, but try all possibilities in case a ZAPNOT will
1726 be useful. */
1727
1728 bits = exact_log2 (c & -c);
1729 if (bits > 0)
1730 for (; bits > 0; bits--)
1731 {
1732 new_const = c >> bits;
1733 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1734 if (!temp && c < 0)
1735 {
1736 new_const = (unsigned HOST_WIDE_INT)c >> bits;
1737 temp = alpha_emit_set_const (subtarget, mode, new_const,
1738 i, no_output);
1739 }
1740 if (temp)
1741 {
1742 if (no_output)
1743 return temp;
1744 return expand_binop (mode, ashl_optab, temp, GEN_INT (bits),
1745 target, 0, OPTAB_WIDEN);
1746 }
1747 }
1748
1749 /* Now try high-order zero bits. Here we try the shifted-in bits as
1750 all zero and all ones. Be careful to avoid shifting outside the
1751 mode and to avoid shifting outside the host wide int size. */
1752 /* On narrow hosts, don't shift a 1 into the high bit, since we'll
1753 confuse the recursive call and set all of the high 32 bits. */
1754
1755 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1756 - floor_log2 (c) - 1 - (HOST_BITS_PER_WIDE_INT < 64));
1757 if (bits > 0)
1758 for (; bits > 0; bits--)
1759 {
1760 new_const = c << bits;
1761 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1762 if (!temp)
1763 {
1764 new_const = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1765 temp = alpha_emit_set_const (subtarget, mode, new_const,
1766 i, no_output);
1767 }
1768 if (temp)
1769 {
1770 if (no_output)
1771 return temp;
1772 return expand_binop (mode, lshr_optab, temp, GEN_INT (bits),
1773 target, 1, OPTAB_WIDEN);
1774 }
1775 }
1776
1777 /* Now try high-order 1 bits. We get that with a sign-extension.
1778 But one bit isn't enough here. Be careful to avoid shifting outside
1779 the mode and to avoid shifting outside the host wide int size. */
1780
1781 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1782 - floor_log2 (~ c) - 2);
1783 if (bits > 0)
1784 for (; bits > 0; bits--)
1785 {
1786 new_const = c << bits;
1787 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1788 if (!temp)
1789 {
1790 new_const = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1791 temp = alpha_emit_set_const (subtarget, mode, new_const,
1792 i, no_output);
1793 }
1794 if (temp)
1795 {
1796 if (no_output)
1797 return temp;
1798 return expand_binop (mode, ashr_optab, temp, GEN_INT (bits),
1799 target, 0, OPTAB_WIDEN);
1800 }
1801 }
1802 }
1803
1804 #if HOST_BITS_PER_WIDE_INT == 64
1805 /* Finally, see if can load a value into the target that is the same as the
1806 constant except that all bytes that are 0 are changed to be 0xff. If we
1807 can, then we can do a ZAPNOT to obtain the desired constant. */
1808
1809 new_const = c;
1810 for (i = 0; i < 64; i += 8)
1811 if ((new_const & ((HOST_WIDE_INT) 0xff << i)) == 0)
1812 new_const |= (HOST_WIDE_INT) 0xff << i;
1813
1814 /* We are only called for SImode and DImode. If this is SImode, ensure that
1815 we are sign extended to a full word. */
1816
1817 if (mode == SImode)
1818 new_const = ((new_const & 0xffffffff) ^ 0x80000000) - 0x80000000;
1819
1820 if (new_const != c)
1821 {
1822 temp = alpha_emit_set_const (subtarget, mode, new_const, n - 1, no_output);
1823 if (temp)
1824 {
1825 if (no_output)
1826 return temp;
1827 return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new_const),
1828 target, 0, OPTAB_WIDEN);
1829 }
1830 }
1831 #endif
1832
1833 return 0;
1834 }
1835
1836 /* Try to output insns to set TARGET equal to the constant C if it can be
1837 done in less than N insns. Do all computations in MODE. Returns the place
1838 where the output has been placed if it can be done and the insns have been
1839 emitted. If it would take more than N insns, zero is returned and no
1840 insns and emitted. */
1841
1842 static rtx
1843 alpha_emit_set_const (rtx target, enum machine_mode mode,
1844 HOST_WIDE_INT c, int n, bool no_output)
1845 {
1846 enum machine_mode orig_mode = mode;
1847 rtx orig_target = target;
1848 rtx result = 0;
1849 int i;
1850
1851 /* If we can't make any pseudos, TARGET is an SImode hard register, we
1852 can't load this constant in one insn, do this in DImode. */
1853 if (!can_create_pseudo_p () && mode == SImode
1854 && REG_P (target) && REGNO (target) < FIRST_PSEUDO_REGISTER)
1855 {
1856 result = alpha_emit_set_const_1 (target, mode, c, 1, no_output);
1857 if (result)
1858 return result;
1859
1860 target = no_output ? NULL : gen_lowpart (DImode, target);
1861 mode = DImode;
1862 }
1863 else if (mode == V8QImode || mode == V4HImode || mode == V2SImode)
1864 {
1865 target = no_output ? NULL : gen_lowpart (DImode, target);
1866 mode = DImode;
1867 }
1868
1869 /* Try 1 insn, then 2, then up to N. */
1870 for (i = 1; i <= n; i++)
1871 {
1872 result = alpha_emit_set_const_1 (target, mode, c, i, no_output);
1873 if (result)
1874 {
1875 rtx insn, set;
1876
1877 if (no_output)
1878 return result;
1879
1880 insn = get_last_insn ();
1881 set = single_set (insn);
1882 if (! CONSTANT_P (SET_SRC (set)))
1883 set_unique_reg_note (get_last_insn (), REG_EQUAL, GEN_INT (c));
1884 break;
1885 }
1886 }
1887
1888 /* Allow for the case where we changed the mode of TARGET. */
1889 if (result)
1890 {
1891 if (result == target)
1892 result = orig_target;
1893 else if (mode != orig_mode)
1894 result = gen_lowpart (orig_mode, result);
1895 }
1896
1897 return result;
1898 }
1899
1900 /* Having failed to find a 3 insn sequence in alpha_emit_set_const,
1901 fall back to a straight forward decomposition. We do this to avoid
1902 exponential run times encountered when looking for longer sequences
1903 with alpha_emit_set_const. */
1904
1905 static rtx
1906 alpha_emit_set_long_const (rtx target, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
1907 {
1908 HOST_WIDE_INT d1, d2, d3, d4;
1909
1910 /* Decompose the entire word */
1911 #if HOST_BITS_PER_WIDE_INT >= 64
1912 gcc_assert (c2 == -(c1 < 0));
1913 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1914 c1 -= d1;
1915 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1916 c1 = (c1 - d2) >> 32;
1917 d3 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1918 c1 -= d3;
1919 d4 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1920 gcc_assert (c1 == d4);
1921 #else
1922 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1923 c1 -= d1;
1924 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1925 gcc_assert (c1 == d2);
1926 c2 += (d2 < 0);
1927 d3 = ((c2 & 0xffff) ^ 0x8000) - 0x8000;
1928 c2 -= d3;
1929 d4 = ((c2 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1930 gcc_assert (c2 == d4);
1931 #endif
1932
1933 /* Construct the high word */
1934 if (d4)
1935 {
1936 emit_move_insn (target, GEN_INT (d4));
1937 if (d3)
1938 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d3)));
1939 }
1940 else
1941 emit_move_insn (target, GEN_INT (d3));
1942
1943 /* Shift it into place */
1944 emit_move_insn (target, gen_rtx_ASHIFT (DImode, target, GEN_INT (32)));
1945
1946 /* Add in the low bits. */
1947 if (d2)
1948 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d2)));
1949 if (d1)
1950 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d1)));
1951
1952 return target;
1953 }
1954
1955 /* Given an integral CONST_INT, CONST_DOUBLE, or CONST_VECTOR, return
1956 the low 64 bits. */
1957
1958 static void
1959 alpha_extract_integer (rtx x, HOST_WIDE_INT *p0, HOST_WIDE_INT *p1)
1960 {
1961 HOST_WIDE_INT i0, i1;
1962
1963 if (GET_CODE (x) == CONST_VECTOR)
1964 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
1965
1966
1967 if (CONST_INT_P (x))
1968 {
1969 i0 = INTVAL (x);
1970 i1 = -(i0 < 0);
1971 }
1972 else if (HOST_BITS_PER_WIDE_INT >= 64)
1973 {
1974 i0 = CONST_DOUBLE_LOW (x);
1975 i1 = -(i0 < 0);
1976 }
1977 else
1978 {
1979 i0 = CONST_DOUBLE_LOW (x);
1980 i1 = CONST_DOUBLE_HIGH (x);
1981 }
1982
1983 *p0 = i0;
1984 *p1 = i1;
1985 }
1986
1987 /* Implement TARGET_LEGITIMATE_CONSTANT_P. This is all constants for which
1988 we are willing to load the value into a register via a move pattern.
1989 Normally this is all symbolic constants, integral constants that
1990 take three or fewer instructions, and floating-point zero. */
1991
1992 bool
1993 alpha_legitimate_constant_p (enum machine_mode mode, rtx x)
1994 {
1995 HOST_WIDE_INT i0, i1;
1996
1997 switch (GET_CODE (x))
1998 {
1999 case LABEL_REF:
2000 case HIGH:
2001 return true;
2002
2003 case CONST:
2004 if (GET_CODE (XEXP (x, 0)) == PLUS
2005 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
2006 x = XEXP (XEXP (x, 0), 0);
2007 else
2008 return true;
2009
2010 if (GET_CODE (x) != SYMBOL_REF)
2011 return true;
2012
2013 /* FALLTHRU */
2014
2015 case SYMBOL_REF:
2016 /* TLS symbols are never valid. */
2017 return SYMBOL_REF_TLS_MODEL (x) == 0;
2018
2019 case CONST_DOUBLE:
2020 if (x == CONST0_RTX (mode))
2021 return true;
2022 if (FLOAT_MODE_P (mode))
2023 return false;
2024 goto do_integer;
2025
2026 case CONST_VECTOR:
2027 if (x == CONST0_RTX (mode))
2028 return true;
2029 if (GET_MODE_CLASS (mode) != MODE_VECTOR_INT)
2030 return false;
2031 if (GET_MODE_SIZE (mode) != 8)
2032 return false;
2033 goto do_integer;
2034
2035 case CONST_INT:
2036 do_integer:
2037 if (TARGET_BUILD_CONSTANTS)
2038 return true;
2039 alpha_extract_integer (x, &i0, &i1);
2040 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == (-i0 < 0))
2041 return alpha_emit_set_const_1 (x, mode, i0, 3, true) != NULL;
2042 return false;
2043
2044 default:
2045 return false;
2046 }
2047 }
2048
2049 /* Operand 1 is known to be a constant, and should require more than one
2050 instruction to load. Emit that multi-part load. */
2051
2052 bool
2053 alpha_split_const_mov (enum machine_mode mode, rtx *operands)
2054 {
2055 HOST_WIDE_INT i0, i1;
2056 rtx temp = NULL_RTX;
2057
2058 alpha_extract_integer (operands[1], &i0, &i1);
2059
2060 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == -(i0 < 0))
2061 temp = alpha_emit_set_const (operands[0], mode, i0, 3, false);
2062
2063 if (!temp && TARGET_BUILD_CONSTANTS)
2064 temp = alpha_emit_set_long_const (operands[0], i0, i1);
2065
2066 if (temp)
2067 {
2068 if (!rtx_equal_p (operands[0], temp))
2069 emit_move_insn (operands[0], temp);
2070 return true;
2071 }
2072
2073 return false;
2074 }
2075
2076 /* Expand a move instruction; return true if all work is done.
2077 We don't handle non-bwx subword loads here. */
2078
2079 bool
2080 alpha_expand_mov (enum machine_mode mode, rtx *operands)
2081 {
2082 rtx tmp;
2083
2084 /* If the output is not a register, the input must be. */
2085 if (MEM_P (operands[0])
2086 && ! reg_or_0_operand (operands[1], mode))
2087 operands[1] = force_reg (mode, operands[1]);
2088
2089 /* Allow legitimize_address to perform some simplifications. */
2090 if (mode == Pmode && symbolic_operand (operands[1], mode))
2091 {
2092 tmp = alpha_legitimize_address_1 (operands[1], operands[0], mode);
2093 if (tmp)
2094 {
2095 if (tmp == operands[0])
2096 return true;
2097 operands[1] = tmp;
2098 return false;
2099 }
2100 }
2101
2102 /* Early out for non-constants and valid constants. */
2103 if (! CONSTANT_P (operands[1]) || input_operand (operands[1], mode))
2104 return false;
2105
2106 /* Split large integers. */
2107 if (CONST_INT_P (operands[1])
2108 || GET_CODE (operands[1]) == CONST_DOUBLE
2109 || GET_CODE (operands[1]) == CONST_VECTOR)
2110 {
2111 if (alpha_split_const_mov (mode, operands))
2112 return true;
2113 }
2114
2115 /* Otherwise we've nothing left but to drop the thing to memory. */
2116 tmp = force_const_mem (mode, operands[1]);
2117
2118 if (tmp == NULL_RTX)
2119 return false;
2120
2121 if (reload_in_progress)
2122 {
2123 emit_move_insn (operands[0], XEXP (tmp, 0));
2124 operands[1] = replace_equiv_address (tmp, operands[0]);
2125 }
2126 else
2127 operands[1] = validize_mem (tmp);
2128 return false;
2129 }
2130
2131 /* Expand a non-bwx QImode or HImode move instruction;
2132 return true if all work is done. */
2133
2134 bool
2135 alpha_expand_mov_nobwx (enum machine_mode mode, rtx *operands)
2136 {
2137 rtx seq;
2138
2139 /* If the output is not a register, the input must be. */
2140 if (MEM_P (operands[0]))
2141 operands[1] = force_reg (mode, operands[1]);
2142
2143 /* Handle four memory cases, unaligned and aligned for either the input
2144 or the output. The only case where we can be called during reload is
2145 for aligned loads; all other cases require temporaries. */
2146
2147 if (any_memory_operand (operands[1], mode))
2148 {
2149 if (aligned_memory_operand (operands[1], mode))
2150 {
2151 if (reload_in_progress)
2152 {
2153 if (mode == QImode)
2154 seq = gen_reload_inqi_aligned (operands[0], operands[1]);
2155 else
2156 seq = gen_reload_inhi_aligned (operands[0], operands[1]);
2157 emit_insn (seq);
2158 }
2159 else
2160 {
2161 rtx aligned_mem, bitnum;
2162 rtx scratch = gen_reg_rtx (SImode);
2163 rtx subtarget;
2164 bool copyout;
2165
2166 get_aligned_mem (operands[1], &aligned_mem, &bitnum);
2167
2168 subtarget = operands[0];
2169 if (REG_P (subtarget))
2170 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2171 else
2172 subtarget = gen_reg_rtx (DImode), copyout = true;
2173
2174 if (mode == QImode)
2175 seq = gen_aligned_loadqi (subtarget, aligned_mem,
2176 bitnum, scratch);
2177 else
2178 seq = gen_aligned_loadhi (subtarget, aligned_mem,
2179 bitnum, scratch);
2180 emit_insn (seq);
2181
2182 if (copyout)
2183 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2184 }
2185 }
2186 else
2187 {
2188 /* Don't pass these as parameters since that makes the generated
2189 code depend on parameter evaluation order which will cause
2190 bootstrap failures. */
2191
2192 rtx temp1, temp2, subtarget, ua;
2193 bool copyout;
2194
2195 temp1 = gen_reg_rtx (DImode);
2196 temp2 = gen_reg_rtx (DImode);
2197
2198 subtarget = operands[0];
2199 if (REG_P (subtarget))
2200 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2201 else
2202 subtarget = gen_reg_rtx (DImode), copyout = true;
2203
2204 ua = get_unaligned_address (operands[1]);
2205 if (mode == QImode)
2206 seq = gen_unaligned_loadqi (subtarget, ua, temp1, temp2);
2207 else
2208 seq = gen_unaligned_loadhi (subtarget, ua, temp1, temp2);
2209
2210 alpha_set_memflags (seq, operands[1]);
2211 emit_insn (seq);
2212
2213 if (copyout)
2214 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2215 }
2216 return true;
2217 }
2218
2219 if (any_memory_operand (operands[0], mode))
2220 {
2221 if (aligned_memory_operand (operands[0], mode))
2222 {
2223 rtx aligned_mem, bitnum;
2224 rtx temp1 = gen_reg_rtx (SImode);
2225 rtx temp2 = gen_reg_rtx (SImode);
2226
2227 get_aligned_mem (operands[0], &aligned_mem, &bitnum);
2228
2229 emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
2230 temp1, temp2));
2231 }
2232 else
2233 {
2234 rtx temp1 = gen_reg_rtx (DImode);
2235 rtx temp2 = gen_reg_rtx (DImode);
2236 rtx temp3 = gen_reg_rtx (DImode);
2237 rtx ua = get_unaligned_address (operands[0]);
2238
2239 if (mode == QImode)
2240 seq = gen_unaligned_storeqi (ua, operands[1], temp1, temp2, temp3);
2241 else
2242 seq = gen_unaligned_storehi (ua, operands[1], temp1, temp2, temp3);
2243
2244 alpha_set_memflags (seq, operands[0]);
2245 emit_insn (seq);
2246 }
2247 return true;
2248 }
2249
2250 return false;
2251 }
2252
2253 /* Implement the movmisalign patterns. One of the operands is a memory
2254 that is not naturally aligned. Emit instructions to load it. */
2255
2256 void
2257 alpha_expand_movmisalign (enum machine_mode mode, rtx *operands)
2258 {
2259 /* Honor misaligned loads, for those we promised to do so. */
2260 if (MEM_P (operands[1]))
2261 {
2262 rtx tmp;
2263
2264 if (register_operand (operands[0], mode))
2265 tmp = operands[0];
2266 else
2267 tmp = gen_reg_rtx (mode);
2268
2269 alpha_expand_unaligned_load (tmp, operands[1], 8, 0, 0);
2270 if (tmp != operands[0])
2271 emit_move_insn (operands[0], tmp);
2272 }
2273 else if (MEM_P (operands[0]))
2274 {
2275 if (!reg_or_0_operand (operands[1], mode))
2276 operands[1] = force_reg (mode, operands[1]);
2277 alpha_expand_unaligned_store (operands[0], operands[1], 8, 0);
2278 }
2279 else
2280 gcc_unreachable ();
2281 }
2282
2283 /* Generate an unsigned DImode to FP conversion. This is the same code
2284 optabs would emit if we didn't have TFmode patterns.
2285
2286 For SFmode, this is the only construction I've found that can pass
2287 gcc.c-torture/execute/ieee/rbug.c. No scenario that uses DFmode
2288 intermediates will work, because you'll get intermediate rounding
2289 that ruins the end result. Some of this could be fixed by turning
2290 on round-to-positive-infinity, but that requires diddling the fpsr,
2291 which kills performance. I tried turning this around and converting
2292 to a negative number, so that I could turn on /m, but either I did
2293 it wrong or there's something else cause I wound up with the exact
2294 same single-bit error. There is a branch-less form of this same code:
2295
2296 srl $16,1,$1
2297 and $16,1,$2
2298 cmplt $16,0,$3
2299 or $1,$2,$2
2300 cmovge $16,$16,$2
2301 itoft $3,$f10
2302 itoft $2,$f11
2303 cvtqs $f11,$f11
2304 adds $f11,$f11,$f0
2305 fcmoveq $f10,$f11,$f0
2306
2307 I'm not using it because it's the same number of instructions as
2308 this branch-full form, and it has more serialized long latency
2309 instructions on the critical path.
2310
2311 For DFmode, we can avoid rounding errors by breaking up the word
2312 into two pieces, converting them separately, and adding them back:
2313
2314 LC0: .long 0,0x5f800000
2315
2316 itoft $16,$f11
2317 lda $2,LC0
2318 cmplt $16,0,$1
2319 cpyse $f11,$f31,$f10
2320 cpyse $f31,$f11,$f11
2321 s4addq $1,$2,$1
2322 lds $f12,0($1)
2323 cvtqt $f10,$f10
2324 cvtqt $f11,$f11
2325 addt $f12,$f10,$f0
2326 addt $f0,$f11,$f0
2327
2328 This doesn't seem to be a clear-cut win over the optabs form.
2329 It probably all depends on the distribution of numbers being
2330 converted -- in the optabs form, all but high-bit-set has a
2331 much lower minimum execution time. */
2332
2333 void
2334 alpha_emit_floatuns (rtx operands[2])
2335 {
2336 rtx neglab, donelab, i0, i1, f0, in, out;
2337 enum machine_mode mode;
2338
2339 out = operands[0];
2340 in = force_reg (DImode, operands[1]);
2341 mode = GET_MODE (out);
2342 neglab = gen_label_rtx ();
2343 donelab = gen_label_rtx ();
2344 i0 = gen_reg_rtx (DImode);
2345 i1 = gen_reg_rtx (DImode);
2346 f0 = gen_reg_rtx (mode);
2347
2348 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
2349
2350 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
2351 emit_jump_insn (gen_jump (donelab));
2352 emit_barrier ();
2353
2354 emit_label (neglab);
2355
2356 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
2357 emit_insn (gen_anddi3 (i1, in, const1_rtx));
2358 emit_insn (gen_iordi3 (i0, i0, i1));
2359 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
2360 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
2361
2362 emit_label (donelab);
2363 }
2364
2365 /* Generate the comparison for a conditional branch. */
2366
2367 void
2368 alpha_emit_conditional_branch (rtx operands[], enum machine_mode cmp_mode)
2369 {
2370 enum rtx_code cmp_code, branch_code;
2371 enum machine_mode branch_mode = VOIDmode;
2372 enum rtx_code code = GET_CODE (operands[0]);
2373 rtx op0 = operands[1], op1 = operands[2];
2374 rtx tem;
2375
2376 if (cmp_mode == TFmode)
2377 {
2378 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2379 op1 = const0_rtx;
2380 cmp_mode = DImode;
2381 }
2382
2383 /* The general case: fold the comparison code to the types of compares
2384 that we have, choosing the branch as necessary. */
2385 switch (code)
2386 {
2387 case EQ: case LE: case LT: case LEU: case LTU:
2388 case UNORDERED:
2389 /* We have these compares: */
2390 cmp_code = code, branch_code = NE;
2391 break;
2392
2393 case NE:
2394 case ORDERED:
2395 /* These must be reversed. */
2396 cmp_code = reverse_condition (code), branch_code = EQ;
2397 break;
2398
2399 case GE: case GT: case GEU: case GTU:
2400 /* For FP, we swap them, for INT, we reverse them. */
2401 if (cmp_mode == DFmode)
2402 {
2403 cmp_code = swap_condition (code);
2404 branch_code = NE;
2405 tem = op0, op0 = op1, op1 = tem;
2406 }
2407 else
2408 {
2409 cmp_code = reverse_condition (code);
2410 branch_code = EQ;
2411 }
2412 break;
2413
2414 default:
2415 gcc_unreachable ();
2416 }
2417
2418 if (cmp_mode == DFmode)
2419 {
2420 if (flag_unsafe_math_optimizations && cmp_code != UNORDERED)
2421 {
2422 /* When we are not as concerned about non-finite values, and we
2423 are comparing against zero, we can branch directly. */
2424 if (op1 == CONST0_RTX (DFmode))
2425 cmp_code = UNKNOWN, branch_code = code;
2426 else if (op0 == CONST0_RTX (DFmode))
2427 {
2428 /* Undo the swap we probably did just above. */
2429 tem = op0, op0 = op1, op1 = tem;
2430 branch_code = swap_condition (cmp_code);
2431 cmp_code = UNKNOWN;
2432 }
2433 }
2434 else
2435 {
2436 /* ??? We mark the branch mode to be CCmode to prevent the
2437 compare and branch from being combined, since the compare
2438 insn follows IEEE rules that the branch does not. */
2439 branch_mode = CCmode;
2440 }
2441 }
2442 else
2443 {
2444 /* The following optimizations are only for signed compares. */
2445 if (code != LEU && code != LTU && code != GEU && code != GTU)
2446 {
2447 /* Whee. Compare and branch against 0 directly. */
2448 if (op1 == const0_rtx)
2449 cmp_code = UNKNOWN, branch_code = code;
2450
2451 /* If the constants doesn't fit into an immediate, but can
2452 be generated by lda/ldah, we adjust the argument and
2453 compare against zero, so we can use beq/bne directly. */
2454 /* ??? Don't do this when comparing against symbols, otherwise
2455 we'll reduce (&x == 0x1234) to (&x-0x1234 == 0), which will
2456 be declared false out of hand (at least for non-weak). */
2457 else if (CONST_INT_P (op1)
2458 && (code == EQ || code == NE)
2459 && !(symbolic_operand (op0, VOIDmode)
2460 || (REG_P (op0) && REG_POINTER (op0))))
2461 {
2462 rtx n_op1 = GEN_INT (-INTVAL (op1));
2463
2464 if (! satisfies_constraint_I (op1)
2465 && (satisfies_constraint_K (n_op1)
2466 || satisfies_constraint_L (n_op1)))
2467 cmp_code = PLUS, branch_code = code, op1 = n_op1;
2468 }
2469 }
2470
2471 if (!reg_or_0_operand (op0, DImode))
2472 op0 = force_reg (DImode, op0);
2473 if (cmp_code != PLUS && !reg_or_8bit_operand (op1, DImode))
2474 op1 = force_reg (DImode, op1);
2475 }
2476
2477 /* Emit an initial compare instruction, if necessary. */
2478 tem = op0;
2479 if (cmp_code != UNKNOWN)
2480 {
2481 tem = gen_reg_rtx (cmp_mode);
2482 emit_move_insn (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1));
2483 }
2484
2485 /* Emit the branch instruction. */
2486 tem = gen_rtx_SET (VOIDmode, pc_rtx,
2487 gen_rtx_IF_THEN_ELSE (VOIDmode,
2488 gen_rtx_fmt_ee (branch_code,
2489 branch_mode, tem,
2490 CONST0_RTX (cmp_mode)),
2491 gen_rtx_LABEL_REF (VOIDmode,
2492 operands[3]),
2493 pc_rtx));
2494 emit_jump_insn (tem);
2495 }
2496
2497 /* Certain simplifications can be done to make invalid setcc operations
2498 valid. Return the final comparison, or NULL if we can't work. */
2499
2500 bool
2501 alpha_emit_setcc (rtx operands[], enum machine_mode cmp_mode)
2502 {
2503 enum rtx_code cmp_code;
2504 enum rtx_code code = GET_CODE (operands[1]);
2505 rtx op0 = operands[2], op1 = operands[3];
2506 rtx tmp;
2507
2508 if (cmp_mode == TFmode)
2509 {
2510 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2511 op1 = const0_rtx;
2512 cmp_mode = DImode;
2513 }
2514
2515 if (cmp_mode == DFmode && !TARGET_FIX)
2516 return 0;
2517
2518 /* The general case: fold the comparison code to the types of compares
2519 that we have, choosing the branch as necessary. */
2520
2521 cmp_code = UNKNOWN;
2522 switch (code)
2523 {
2524 case EQ: case LE: case LT: case LEU: case LTU:
2525 case UNORDERED:
2526 /* We have these compares. */
2527 if (cmp_mode == DFmode)
2528 cmp_code = code, code = NE;
2529 break;
2530
2531 case NE:
2532 if (cmp_mode == DImode && op1 == const0_rtx)
2533 break;
2534 /* FALLTHRU */
2535
2536 case ORDERED:
2537 cmp_code = reverse_condition (code);
2538 code = EQ;
2539 break;
2540
2541 case GE: case GT: case GEU: case GTU:
2542 /* These normally need swapping, but for integer zero we have
2543 special patterns that recognize swapped operands. */
2544 if (cmp_mode == DImode && op1 == const0_rtx)
2545 break;
2546 code = swap_condition (code);
2547 if (cmp_mode == DFmode)
2548 cmp_code = code, code = NE;
2549 tmp = op0, op0 = op1, op1 = tmp;
2550 break;
2551
2552 default:
2553 gcc_unreachable ();
2554 }
2555
2556 if (cmp_mode == DImode)
2557 {
2558 if (!register_operand (op0, DImode))
2559 op0 = force_reg (DImode, op0);
2560 if (!reg_or_8bit_operand (op1, DImode))
2561 op1 = force_reg (DImode, op1);
2562 }
2563
2564 /* Emit an initial compare instruction, if necessary. */
2565 if (cmp_code != UNKNOWN)
2566 {
2567 tmp = gen_reg_rtx (cmp_mode);
2568 emit_insn (gen_rtx_SET (VOIDmode, tmp,
2569 gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1)));
2570
2571 op0 = cmp_mode != DImode ? gen_lowpart (DImode, tmp) : tmp;
2572 op1 = const0_rtx;
2573 }
2574
2575 /* Emit the setcc instruction. */
2576 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2577 gen_rtx_fmt_ee (code, DImode, op0, op1)));
2578 return true;
2579 }
2580
2581
2582 /* Rewrite a comparison against zero CMP of the form
2583 (CODE (cc0) (const_int 0)) so it can be written validly in
2584 a conditional move (if_then_else CMP ...).
2585 If both of the operands that set cc0 are nonzero we must emit
2586 an insn to perform the compare (it can't be done within
2587 the conditional move). */
2588
2589 rtx
2590 alpha_emit_conditional_move (rtx cmp, enum machine_mode mode)
2591 {
2592 enum rtx_code code = GET_CODE (cmp);
2593 enum rtx_code cmov_code = NE;
2594 rtx op0 = XEXP (cmp, 0);
2595 rtx op1 = XEXP (cmp, 1);
2596 enum machine_mode cmp_mode
2597 = (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0));
2598 enum machine_mode cmov_mode = VOIDmode;
2599 int local_fast_math = flag_unsafe_math_optimizations;
2600 rtx tem;
2601
2602 if (cmp_mode == TFmode)
2603 {
2604 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2605 op1 = const0_rtx;
2606 cmp_mode = DImode;
2607 }
2608
2609 gcc_assert (cmp_mode == DFmode || cmp_mode == DImode);
2610
2611 if (FLOAT_MODE_P (cmp_mode) != FLOAT_MODE_P (mode))
2612 {
2613 enum rtx_code cmp_code;
2614
2615 if (! TARGET_FIX)
2616 return 0;
2617
2618 /* If we have fp<->int register move instructions, do a cmov by
2619 performing the comparison in fp registers, and move the
2620 zero/nonzero value to integer registers, where we can then
2621 use a normal cmov, or vice-versa. */
2622
2623 switch (code)
2624 {
2625 case EQ: case LE: case LT: case LEU: case LTU:
2626 /* We have these compares. */
2627 cmp_code = code, code = NE;
2628 break;
2629
2630 case NE:
2631 /* This must be reversed. */
2632 cmp_code = EQ, code = EQ;
2633 break;
2634
2635 case GE: case GT: case GEU: case GTU:
2636 /* These normally need swapping, but for integer zero we have
2637 special patterns that recognize swapped operands. */
2638 if (cmp_mode == DImode && op1 == const0_rtx)
2639 cmp_code = code, code = NE;
2640 else
2641 {
2642 cmp_code = swap_condition (code);
2643 code = NE;
2644 tem = op0, op0 = op1, op1 = tem;
2645 }
2646 break;
2647
2648 default:
2649 gcc_unreachable ();
2650 }
2651
2652 tem = gen_reg_rtx (cmp_mode);
2653 emit_insn (gen_rtx_SET (VOIDmode, tem,
2654 gen_rtx_fmt_ee (cmp_code, cmp_mode,
2655 op0, op1)));
2656
2657 cmp_mode = cmp_mode == DImode ? DFmode : DImode;
2658 op0 = gen_lowpart (cmp_mode, tem);
2659 op1 = CONST0_RTX (cmp_mode);
2660 local_fast_math = 1;
2661 }
2662
2663 /* We may be able to use a conditional move directly.
2664 This avoids emitting spurious compares. */
2665 if (signed_comparison_operator (cmp, VOIDmode)
2666 && (cmp_mode == DImode || local_fast_math)
2667 && (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode)))
2668 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
2669
2670 /* We can't put the comparison inside the conditional move;
2671 emit a compare instruction and put that inside the
2672 conditional move. Make sure we emit only comparisons we have;
2673 swap or reverse as necessary. */
2674
2675 if (!can_create_pseudo_p ())
2676 return NULL_RTX;
2677
2678 switch (code)
2679 {
2680 case EQ: case LE: case LT: case LEU: case LTU:
2681 /* We have these compares: */
2682 break;
2683
2684 case NE:
2685 /* This must be reversed. */
2686 code = reverse_condition (code);
2687 cmov_code = EQ;
2688 break;
2689
2690 case GE: case GT: case GEU: case GTU:
2691 /* These must be swapped. */
2692 if (op1 != CONST0_RTX (cmp_mode))
2693 {
2694 code = swap_condition (code);
2695 tem = op0, op0 = op1, op1 = tem;
2696 }
2697 break;
2698
2699 default:
2700 gcc_unreachable ();
2701 }
2702
2703 if (cmp_mode == DImode)
2704 {
2705 if (!reg_or_0_operand (op0, DImode))
2706 op0 = force_reg (DImode, op0);
2707 if (!reg_or_8bit_operand (op1, DImode))
2708 op1 = force_reg (DImode, op1);
2709 }
2710
2711 /* ??? We mark the branch mode to be CCmode to prevent the compare
2712 and cmov from being combined, since the compare insn follows IEEE
2713 rules that the cmov does not. */
2714 if (cmp_mode == DFmode && !local_fast_math)
2715 cmov_mode = CCmode;
2716
2717 tem = gen_reg_rtx (cmp_mode);
2718 emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_mode, op0, op1));
2719 return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_mode));
2720 }
2721
2722 /* Simplify a conditional move of two constants into a setcc with
2723 arithmetic. This is done with a splitter since combine would
2724 just undo the work if done during code generation. It also catches
2725 cases we wouldn't have before cse. */
2726
2727 int
2728 alpha_split_conditional_move (enum rtx_code code, rtx dest, rtx cond,
2729 rtx t_rtx, rtx f_rtx)
2730 {
2731 HOST_WIDE_INT t, f, diff;
2732 enum machine_mode mode;
2733 rtx target, subtarget, tmp;
2734
2735 mode = GET_MODE (dest);
2736 t = INTVAL (t_rtx);
2737 f = INTVAL (f_rtx);
2738 diff = t - f;
2739
2740 if (((code == NE || code == EQ) && diff < 0)
2741 || (code == GE || code == GT))
2742 {
2743 code = reverse_condition (code);
2744 diff = t, t = f, f = diff;
2745 diff = t - f;
2746 }
2747
2748 subtarget = target = dest;
2749 if (mode != DImode)
2750 {
2751 target = gen_lowpart (DImode, dest);
2752 if (can_create_pseudo_p ())
2753 subtarget = gen_reg_rtx (DImode);
2754 else
2755 subtarget = target;
2756 }
2757 /* Below, we must be careful to use copy_rtx on target and subtarget
2758 in intermediate insns, as they may be a subreg rtx, which may not
2759 be shared. */
2760
2761 if (f == 0 && exact_log2 (diff) > 0
2762 /* On EV6, we've got enough shifters to make non-arithmetic shifts
2763 viable over a longer latency cmove. On EV5, the E0 slot is a
2764 scarce resource, and on EV4 shift has the same latency as a cmove. */
2765 && (diff <= 8 || alpha_tune == PROCESSOR_EV6))
2766 {
2767 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2768 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2769
2770 tmp = gen_rtx_ASHIFT (DImode, copy_rtx (subtarget),
2771 GEN_INT (exact_log2 (t)));
2772 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2773 }
2774 else if (f == 0 && t == -1)
2775 {
2776 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2777 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2778
2779 emit_insn (gen_negdi2 (target, copy_rtx (subtarget)));
2780 }
2781 else if (diff == 1 || diff == 4 || diff == 8)
2782 {
2783 rtx add_op;
2784
2785 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2786 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2787
2788 if (diff == 1)
2789 emit_insn (gen_adddi3 (target, copy_rtx (subtarget), GEN_INT (f)));
2790 else
2791 {
2792 add_op = GEN_INT (f);
2793 if (sext_add_operand (add_op, mode))
2794 {
2795 tmp = gen_rtx_MULT (DImode, copy_rtx (subtarget),
2796 GEN_INT (diff));
2797 tmp = gen_rtx_PLUS (DImode, tmp, add_op);
2798 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2799 }
2800 else
2801 return 0;
2802 }
2803 }
2804 else
2805 return 0;
2806
2807 return 1;
2808 }
2809 \f
2810 /* Look up the function X_floating library function name for the
2811 given operation. */
2812
2813 struct GTY(()) xfloating_op
2814 {
2815 const enum rtx_code code;
2816 const char *const GTY((skip)) osf_func;
2817 const char *const GTY((skip)) vms_func;
2818 rtx libcall;
2819 };
2820
2821 static GTY(()) struct xfloating_op xfloating_ops[] =
2822 {
2823 { PLUS, "_OtsAddX", "OTS$ADD_X", 0 },
2824 { MINUS, "_OtsSubX", "OTS$SUB_X", 0 },
2825 { MULT, "_OtsMulX", "OTS$MUL_X", 0 },
2826 { DIV, "_OtsDivX", "OTS$DIV_X", 0 },
2827 { EQ, "_OtsEqlX", "OTS$EQL_X", 0 },
2828 { NE, "_OtsNeqX", "OTS$NEQ_X", 0 },
2829 { LT, "_OtsLssX", "OTS$LSS_X", 0 },
2830 { LE, "_OtsLeqX", "OTS$LEQ_X", 0 },
2831 { GT, "_OtsGtrX", "OTS$GTR_X", 0 },
2832 { GE, "_OtsGeqX", "OTS$GEQ_X", 0 },
2833 { FIX, "_OtsCvtXQ", "OTS$CVTXQ", 0 },
2834 { FLOAT, "_OtsCvtQX", "OTS$CVTQX", 0 },
2835 { UNSIGNED_FLOAT, "_OtsCvtQUX", "OTS$CVTQUX", 0 },
2836 { FLOAT_EXTEND, "_OtsConvertFloatTX", "OTS$CVT_FLOAT_T_X", 0 },
2837 { FLOAT_TRUNCATE, "_OtsConvertFloatXT", "OTS$CVT_FLOAT_X_T", 0 }
2838 };
2839
2840 static GTY(()) struct xfloating_op vax_cvt_ops[] =
2841 {
2842 { FLOAT_EXTEND, "_OtsConvertFloatGX", "OTS$CVT_FLOAT_G_X", 0 },
2843 { FLOAT_TRUNCATE, "_OtsConvertFloatXG", "OTS$CVT_FLOAT_X_G", 0 }
2844 };
2845
2846 static rtx
2847 alpha_lookup_xfloating_lib_func (enum rtx_code code)
2848 {
2849 struct xfloating_op *ops = xfloating_ops;
2850 long n = ARRAY_SIZE (xfloating_ops);
2851 long i;
2852
2853 gcc_assert (TARGET_HAS_XFLOATING_LIBS);
2854
2855 /* How irritating. Nothing to key off for the main table. */
2856 if (TARGET_FLOAT_VAX && (code == FLOAT_EXTEND || code == FLOAT_TRUNCATE))
2857 {
2858 ops = vax_cvt_ops;
2859 n = ARRAY_SIZE (vax_cvt_ops);
2860 }
2861
2862 for (i = 0; i < n; ++i, ++ops)
2863 if (ops->code == code)
2864 {
2865 rtx func = ops->libcall;
2866 if (!func)
2867 {
2868 func = init_one_libfunc (TARGET_ABI_OPEN_VMS
2869 ? ops->vms_func : ops->osf_func);
2870 ops->libcall = func;
2871 }
2872 return func;
2873 }
2874
2875 gcc_unreachable ();
2876 }
2877
2878 /* Most X_floating operations take the rounding mode as an argument.
2879 Compute that here. */
2880
2881 static int
2882 alpha_compute_xfloating_mode_arg (enum rtx_code code,
2883 enum alpha_fp_rounding_mode round)
2884 {
2885 int mode;
2886
2887 switch (round)
2888 {
2889 case ALPHA_FPRM_NORM:
2890 mode = 2;
2891 break;
2892 case ALPHA_FPRM_MINF:
2893 mode = 1;
2894 break;
2895 case ALPHA_FPRM_CHOP:
2896 mode = 0;
2897 break;
2898 case ALPHA_FPRM_DYN:
2899 mode = 4;
2900 break;
2901 default:
2902 gcc_unreachable ();
2903
2904 /* XXX For reference, round to +inf is mode = 3. */
2905 }
2906
2907 if (code == FLOAT_TRUNCATE && alpha_fptm == ALPHA_FPTM_N)
2908 mode |= 0x10000;
2909
2910 return mode;
2911 }
2912
2913 /* Emit an X_floating library function call.
2914
2915 Note that these functions do not follow normal calling conventions:
2916 TFmode arguments are passed in two integer registers (as opposed to
2917 indirect); TFmode return values appear in R16+R17.
2918
2919 FUNC is the function to call.
2920 TARGET is where the output belongs.
2921 OPERANDS are the inputs.
2922 NOPERANDS is the count of inputs.
2923 EQUIV is the expression equivalent for the function.
2924 */
2925
2926 static void
2927 alpha_emit_xfloating_libcall (rtx func, rtx target, rtx operands[],
2928 int noperands, rtx equiv)
2929 {
2930 rtx usage = NULL_RTX, tmp, reg;
2931 int regno = 16, i;
2932
2933 start_sequence ();
2934
2935 for (i = 0; i < noperands; ++i)
2936 {
2937 switch (GET_MODE (operands[i]))
2938 {
2939 case TFmode:
2940 reg = gen_rtx_REG (TFmode, regno);
2941 regno += 2;
2942 break;
2943
2944 case DFmode:
2945 reg = gen_rtx_REG (DFmode, regno + 32);
2946 regno += 1;
2947 break;
2948
2949 case VOIDmode:
2950 gcc_assert (CONST_INT_P (operands[i]));
2951 /* FALLTHRU */
2952 case DImode:
2953 reg = gen_rtx_REG (DImode, regno);
2954 regno += 1;
2955 break;
2956
2957 default:
2958 gcc_unreachable ();
2959 }
2960
2961 emit_move_insn (reg, operands[i]);
2962 use_reg (&usage, reg);
2963 }
2964
2965 switch (GET_MODE (target))
2966 {
2967 case TFmode:
2968 reg = gen_rtx_REG (TFmode, 16);
2969 break;
2970 case DFmode:
2971 reg = gen_rtx_REG (DFmode, 32);
2972 break;
2973 case DImode:
2974 reg = gen_rtx_REG (DImode, 0);
2975 break;
2976 default:
2977 gcc_unreachable ();
2978 }
2979
2980 tmp = gen_rtx_MEM (QImode, func);
2981 tmp = emit_call_insn (GEN_CALL_VALUE (reg, tmp, const0_rtx,
2982 const0_rtx, const0_rtx));
2983 CALL_INSN_FUNCTION_USAGE (tmp) = usage;
2984 RTL_CONST_CALL_P (tmp) = 1;
2985
2986 tmp = get_insns ();
2987 end_sequence ();
2988
2989 emit_libcall_block (tmp, target, reg, equiv);
2990 }
2991
2992 /* Emit an X_floating library function call for arithmetic (+,-,*,/). */
2993
2994 void
2995 alpha_emit_xfloating_arith (enum rtx_code code, rtx operands[])
2996 {
2997 rtx func;
2998 int mode;
2999 rtx out_operands[3];
3000
3001 func = alpha_lookup_xfloating_lib_func (code);
3002 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3003
3004 out_operands[0] = operands[1];
3005 out_operands[1] = operands[2];
3006 out_operands[2] = GEN_INT (mode);
3007 alpha_emit_xfloating_libcall (func, operands[0], out_operands, 3,
3008 gen_rtx_fmt_ee (code, TFmode, operands[1],
3009 operands[2]));
3010 }
3011
3012 /* Emit an X_floating library function call for a comparison. */
3013
3014 static rtx
3015 alpha_emit_xfloating_compare (enum rtx_code *pcode, rtx op0, rtx op1)
3016 {
3017 enum rtx_code cmp_code, res_code;
3018 rtx func, out, operands[2], note;
3019
3020 /* X_floating library comparison functions return
3021 -1 unordered
3022 0 false
3023 1 true
3024 Convert the compare against the raw return value. */
3025
3026 cmp_code = *pcode;
3027 switch (cmp_code)
3028 {
3029 case UNORDERED:
3030 cmp_code = EQ;
3031 res_code = LT;
3032 break;
3033 case ORDERED:
3034 cmp_code = EQ;
3035 res_code = GE;
3036 break;
3037 case NE:
3038 res_code = NE;
3039 break;
3040 case EQ:
3041 case LT:
3042 case GT:
3043 case LE:
3044 case GE:
3045 res_code = GT;
3046 break;
3047 default:
3048 gcc_unreachable ();
3049 }
3050 *pcode = res_code;
3051
3052 func = alpha_lookup_xfloating_lib_func (cmp_code);
3053
3054 operands[0] = op0;
3055 operands[1] = op1;
3056 out = gen_reg_rtx (DImode);
3057
3058 /* What's actually returned is -1,0,1, not a proper boolean value,
3059 so use an EXPR_LIST as with a generic libcall instead of a
3060 comparison type expression. */
3061 note = gen_rtx_EXPR_LIST (VOIDmode, op1, NULL_RTX);
3062 note = gen_rtx_EXPR_LIST (VOIDmode, op0, note);
3063 note = gen_rtx_EXPR_LIST (VOIDmode, func, note);
3064 alpha_emit_xfloating_libcall (func, out, operands, 2, note);
3065
3066 return out;
3067 }
3068
3069 /* Emit an X_floating library function call for a conversion. */
3070
3071 void
3072 alpha_emit_xfloating_cvt (enum rtx_code orig_code, rtx operands[])
3073 {
3074 int noperands = 1, mode;
3075 rtx out_operands[2];
3076 rtx func;
3077 enum rtx_code code = orig_code;
3078
3079 if (code == UNSIGNED_FIX)
3080 code = FIX;
3081
3082 func = alpha_lookup_xfloating_lib_func (code);
3083
3084 out_operands[0] = operands[1];
3085
3086 switch (code)
3087 {
3088 case FIX:
3089 mode = alpha_compute_xfloating_mode_arg (code, ALPHA_FPRM_CHOP);
3090 out_operands[1] = GEN_INT (mode);
3091 noperands = 2;
3092 break;
3093 case FLOAT_TRUNCATE:
3094 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3095 out_operands[1] = GEN_INT (mode);
3096 noperands = 2;
3097 break;
3098 default:
3099 break;
3100 }
3101
3102 alpha_emit_xfloating_libcall (func, operands[0], out_operands, noperands,
3103 gen_rtx_fmt_e (orig_code,
3104 GET_MODE (operands[0]),
3105 operands[1]));
3106 }
3107
3108 /* Split a TImode or TFmode move from OP[1] to OP[0] into a pair of
3109 DImode moves from OP[2,3] to OP[0,1]. If FIXUP_OVERLAP is true,
3110 guarantee that the sequence
3111 set (OP[0] OP[2])
3112 set (OP[1] OP[3])
3113 is valid. Naturally, output operand ordering is little-endian.
3114 This is used by *movtf_internal and *movti_internal. */
3115
3116 void
3117 alpha_split_tmode_pair (rtx operands[4], enum machine_mode mode,
3118 bool fixup_overlap)
3119 {
3120 switch (GET_CODE (operands[1]))
3121 {
3122 case REG:
3123 operands[3] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
3124 operands[2] = gen_rtx_REG (DImode, REGNO (operands[1]));
3125 break;
3126
3127 case MEM:
3128 operands[3] = adjust_address (operands[1], DImode, 8);
3129 operands[2] = adjust_address (operands[1], DImode, 0);
3130 break;
3131
3132 case CONST_INT:
3133 case CONST_DOUBLE:
3134 gcc_assert (operands[1] == CONST0_RTX (mode));
3135 operands[2] = operands[3] = const0_rtx;
3136 break;
3137
3138 default:
3139 gcc_unreachable ();
3140 }
3141
3142 switch (GET_CODE (operands[0]))
3143 {
3144 case REG:
3145 operands[1] = gen_rtx_REG (DImode, REGNO (operands[0]) + 1);
3146 operands[0] = gen_rtx_REG (DImode, REGNO (operands[0]));
3147 break;
3148
3149 case MEM:
3150 operands[1] = adjust_address (operands[0], DImode, 8);
3151 operands[0] = adjust_address (operands[0], DImode, 0);
3152 break;
3153
3154 default:
3155 gcc_unreachable ();
3156 }
3157
3158 if (fixup_overlap && reg_overlap_mentioned_p (operands[0], operands[3]))
3159 {
3160 rtx tmp;
3161 tmp = operands[0], operands[0] = operands[1], operands[1] = tmp;
3162 tmp = operands[2], operands[2] = operands[3], operands[3] = tmp;
3163 }
3164 }
3165
3166 /* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
3167 op2 is a register containing the sign bit, operation is the
3168 logical operation to be performed. */
3169
3170 void
3171 alpha_split_tfmode_frobsign (rtx operands[3], rtx (*operation) (rtx, rtx, rtx))
3172 {
3173 rtx high_bit = operands[2];
3174 rtx scratch;
3175 int move;
3176
3177 alpha_split_tmode_pair (operands, TFmode, false);
3178
3179 /* Detect three flavors of operand overlap. */
3180 move = 1;
3181 if (rtx_equal_p (operands[0], operands[2]))
3182 move = 0;
3183 else if (rtx_equal_p (operands[1], operands[2]))
3184 {
3185 if (rtx_equal_p (operands[0], high_bit))
3186 move = 2;
3187 else
3188 move = -1;
3189 }
3190
3191 if (move < 0)
3192 emit_move_insn (operands[0], operands[2]);
3193
3194 /* ??? If the destination overlaps both source tf and high_bit, then
3195 assume source tf is dead in its entirety and use the other half
3196 for a scratch register. Otherwise "scratch" is just the proper
3197 destination register. */
3198 scratch = operands[move < 2 ? 1 : 3];
3199
3200 emit_insn ((*operation) (scratch, high_bit, operands[3]));
3201
3202 if (move > 0)
3203 {
3204 emit_move_insn (operands[0], operands[2]);
3205 if (move > 1)
3206 emit_move_insn (operands[1], scratch);
3207 }
3208 }
3209 \f
3210 /* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
3211 unaligned data:
3212
3213 unsigned: signed:
3214 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
3215 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
3216 lda r3,X(r11) lda r3,X+2(r11)
3217 extwl r1,r3,r1 extql r1,r3,r1
3218 extwh r2,r3,r2 extqh r2,r3,r2
3219 or r1.r2.r1 or r1,r2,r1
3220 sra r1,48,r1
3221
3222 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
3223 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
3224 lda r3,X(r11) lda r3,X(r11)
3225 extll r1,r3,r1 extll r1,r3,r1
3226 extlh r2,r3,r2 extlh r2,r3,r2
3227 or r1.r2.r1 addl r1,r2,r1
3228
3229 quad: ldq_u r1,X(r11)
3230 ldq_u r2,X+7(r11)
3231 lda r3,X(r11)
3232 extql r1,r3,r1
3233 extqh r2,r3,r2
3234 or r1.r2.r1
3235 */
3236
3237 void
3238 alpha_expand_unaligned_load (rtx tgt, rtx mem, HOST_WIDE_INT size,
3239 HOST_WIDE_INT ofs, int sign)
3240 {
3241 rtx meml, memh, addr, extl, exth, tmp, mema;
3242 enum machine_mode mode;
3243
3244 if (TARGET_BWX && size == 2)
3245 {
3246 meml = adjust_address (mem, QImode, ofs);
3247 memh = adjust_address (mem, QImode, ofs+1);
3248 extl = gen_reg_rtx (DImode);
3249 exth = gen_reg_rtx (DImode);
3250 emit_insn (gen_zero_extendqidi2 (extl, meml));
3251 emit_insn (gen_zero_extendqidi2 (exth, memh));
3252 exth = expand_simple_binop (DImode, ASHIFT, exth, GEN_INT (8),
3253 NULL, 1, OPTAB_LIB_WIDEN);
3254 addr = expand_simple_binop (DImode, IOR, extl, exth,
3255 NULL, 1, OPTAB_LIB_WIDEN);
3256
3257 if (sign && GET_MODE (tgt) != HImode)
3258 {
3259 addr = gen_lowpart (HImode, addr);
3260 emit_insn (gen_extend_insn (tgt, addr, GET_MODE (tgt), HImode, 0));
3261 }
3262 else
3263 {
3264 if (GET_MODE (tgt) != DImode)
3265 addr = gen_lowpart (GET_MODE (tgt), addr);
3266 emit_move_insn (tgt, addr);
3267 }
3268 return;
3269 }
3270
3271 meml = gen_reg_rtx (DImode);
3272 memh = gen_reg_rtx (DImode);
3273 addr = gen_reg_rtx (DImode);
3274 extl = gen_reg_rtx (DImode);
3275 exth = gen_reg_rtx (DImode);
3276
3277 mema = XEXP (mem, 0);
3278 if (GET_CODE (mema) == LO_SUM)
3279 mema = force_reg (Pmode, mema);
3280
3281 /* AND addresses cannot be in any alias set, since they may implicitly
3282 alias surrounding code. Ideally we'd have some alias set that
3283 covered all types except those with alignment 8 or higher. */
3284
3285 tmp = change_address (mem, DImode,
3286 gen_rtx_AND (DImode,
3287 plus_constant (mema, ofs),
3288 GEN_INT (-8)));
3289 set_mem_alias_set (tmp, 0);
3290 emit_move_insn (meml, tmp);
3291
3292 tmp = change_address (mem, DImode,
3293 gen_rtx_AND (DImode,
3294 plus_constant (mema, ofs + size - 1),
3295 GEN_INT (-8)));
3296 set_mem_alias_set (tmp, 0);
3297 emit_move_insn (memh, tmp);
3298
3299 if (sign && size == 2)
3300 {
3301 emit_move_insn (addr, plus_constant (mema, ofs+2));
3302
3303 emit_insn (gen_extql (extl, meml, addr));
3304 emit_insn (gen_extqh (exth, memh, addr));
3305
3306 /* We must use tgt here for the target. Alpha-vms port fails if we use
3307 addr for the target, because addr is marked as a pointer and combine
3308 knows that pointers are always sign-extended 32-bit values. */
3309 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3310 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48),
3311 addr, 1, OPTAB_WIDEN);
3312 }
3313 else
3314 {
3315 emit_move_insn (addr, plus_constant (mema, ofs));
3316 emit_insn (gen_extxl (extl, meml, GEN_INT (size*8), addr));
3317 switch ((int) size)
3318 {
3319 case 2:
3320 emit_insn (gen_extwh (exth, memh, addr));
3321 mode = HImode;
3322 break;
3323 case 4:
3324 emit_insn (gen_extlh (exth, memh, addr));
3325 mode = SImode;
3326 break;
3327 case 8:
3328 emit_insn (gen_extqh (exth, memh, addr));
3329 mode = DImode;
3330 break;
3331 default:
3332 gcc_unreachable ();
3333 }
3334
3335 addr = expand_binop (mode, ior_optab, gen_lowpart (mode, extl),
3336 gen_lowpart (mode, exth), gen_lowpart (mode, tgt),
3337 sign, OPTAB_WIDEN);
3338 }
3339
3340 if (addr != tgt)
3341 emit_move_insn (tgt, gen_lowpart (GET_MODE (tgt), addr));
3342 }
3343
3344 /* Similarly, use ins and msk instructions to perform unaligned stores. */
3345
3346 void
3347 alpha_expand_unaligned_store (rtx dst, rtx src,
3348 HOST_WIDE_INT size, HOST_WIDE_INT ofs)
3349 {
3350 rtx dstl, dsth, addr, insl, insh, meml, memh, dsta;
3351
3352 if (TARGET_BWX && size == 2)
3353 {
3354 if (src != const0_rtx)
3355 {
3356 dstl = gen_lowpart (QImode, src);
3357 dsth = expand_simple_binop (DImode, LSHIFTRT, src, GEN_INT (8),
3358 NULL, 1, OPTAB_LIB_WIDEN);
3359 dsth = gen_lowpart (QImode, dsth);
3360 }
3361 else
3362 dstl = dsth = const0_rtx;
3363
3364 meml = adjust_address (dst, QImode, ofs);
3365 memh = adjust_address (dst, QImode, ofs+1);
3366
3367 emit_move_insn (meml, dstl);
3368 emit_move_insn (memh, dsth);
3369 return;
3370 }
3371
3372 dstl = gen_reg_rtx (DImode);
3373 dsth = gen_reg_rtx (DImode);
3374 insl = gen_reg_rtx (DImode);
3375 insh = gen_reg_rtx (DImode);
3376
3377 dsta = XEXP (dst, 0);
3378 if (GET_CODE (dsta) == LO_SUM)
3379 dsta = force_reg (Pmode, dsta);
3380
3381 /* AND addresses cannot be in any alias set, since they may implicitly
3382 alias surrounding code. Ideally we'd have some alias set that
3383 covered all types except those with alignment 8 or higher. */
3384
3385 meml = change_address (dst, DImode,
3386 gen_rtx_AND (DImode,
3387 plus_constant (dsta, ofs),
3388 GEN_INT (-8)));
3389 set_mem_alias_set (meml, 0);
3390
3391 memh = change_address (dst, DImode,
3392 gen_rtx_AND (DImode,
3393 plus_constant (dsta, ofs + size - 1),
3394 GEN_INT (-8)));
3395 set_mem_alias_set (memh, 0);
3396
3397 emit_move_insn (dsth, memh);
3398 emit_move_insn (dstl, meml);
3399
3400 addr = copy_addr_to_reg (plus_constant (dsta, ofs));
3401
3402 if (src != CONST0_RTX (GET_MODE (src)))
3403 {
3404 emit_insn (gen_insxh (insh, gen_lowpart (DImode, src),
3405 GEN_INT (size*8), addr));
3406
3407 switch ((int) size)
3408 {
3409 case 2:
3410 emit_insn (gen_inswl (insl, gen_lowpart (HImode, src), addr));
3411 break;
3412 case 4:
3413 emit_insn (gen_insll (insl, gen_lowpart (SImode, src), addr));
3414 break;
3415 case 8:
3416 emit_insn (gen_insql (insl, gen_lowpart (DImode, src), addr));
3417 break;
3418 default:
3419 gcc_unreachable ();
3420 }
3421 }
3422
3423 emit_insn (gen_mskxh (dsth, dsth, GEN_INT (size*8), addr));
3424
3425 switch ((int) size)
3426 {
3427 case 2:
3428 emit_insn (gen_mskwl (dstl, dstl, addr));
3429 break;
3430 case 4:
3431 emit_insn (gen_mskll (dstl, dstl, addr));
3432 break;
3433 case 8:
3434 emit_insn (gen_mskql (dstl, dstl, addr));
3435 break;
3436 default:
3437 gcc_unreachable ();
3438 }
3439
3440 if (src != CONST0_RTX (GET_MODE (src)))
3441 {
3442 dsth = expand_binop (DImode, ior_optab, insh, dsth, dsth, 0, OPTAB_WIDEN);
3443 dstl = expand_binop (DImode, ior_optab, insl, dstl, dstl, 0, OPTAB_WIDEN);
3444 }
3445
3446 /* Must store high before low for degenerate case of aligned. */
3447 emit_move_insn (memh, dsth);
3448 emit_move_insn (meml, dstl);
3449 }
3450
3451 /* The block move code tries to maximize speed by separating loads and
3452 stores at the expense of register pressure: we load all of the data
3453 before we store it back out. There are two secondary effects worth
3454 mentioning, that this speeds copying to/from aligned and unaligned
3455 buffers, and that it makes the code significantly easier to write. */
3456
3457 #define MAX_MOVE_WORDS 8
3458
3459 /* Load an integral number of consecutive unaligned quadwords. */
3460
3461 static void
3462 alpha_expand_unaligned_load_words (rtx *out_regs, rtx smem,
3463 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3464 {
3465 rtx const im8 = GEN_INT (-8);
3466 rtx ext_tmps[MAX_MOVE_WORDS], data_regs[MAX_MOVE_WORDS+1];
3467 rtx sreg, areg, tmp, smema;
3468 HOST_WIDE_INT i;
3469
3470 smema = XEXP (smem, 0);
3471 if (GET_CODE (smema) == LO_SUM)
3472 smema = force_reg (Pmode, smema);
3473
3474 /* Generate all the tmp registers we need. */
3475 for (i = 0; i < words; ++i)
3476 {
3477 data_regs[i] = out_regs[i];
3478 ext_tmps[i] = gen_reg_rtx (DImode);
3479 }
3480 data_regs[words] = gen_reg_rtx (DImode);
3481
3482 if (ofs != 0)
3483 smem = adjust_address (smem, GET_MODE (smem), ofs);
3484
3485 /* Load up all of the source data. */
3486 for (i = 0; i < words; ++i)
3487 {
3488 tmp = change_address (smem, DImode,
3489 gen_rtx_AND (DImode,
3490 plus_constant (smema, 8*i),
3491 im8));
3492 set_mem_alias_set (tmp, 0);
3493 emit_move_insn (data_regs[i], tmp);
3494 }
3495
3496 tmp = change_address (smem, DImode,
3497 gen_rtx_AND (DImode,
3498 plus_constant (smema, 8*words - 1),
3499 im8));
3500 set_mem_alias_set (tmp, 0);
3501 emit_move_insn (data_regs[words], tmp);
3502
3503 /* Extract the half-word fragments. Unfortunately DEC decided to make
3504 extxh with offset zero a noop instead of zeroing the register, so
3505 we must take care of that edge condition ourselves with cmov. */
3506
3507 sreg = copy_addr_to_reg (smema);
3508 areg = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL,
3509 1, OPTAB_WIDEN);
3510 for (i = 0; i < words; ++i)
3511 {
3512 emit_insn (gen_extql (data_regs[i], data_regs[i], sreg));
3513 emit_insn (gen_extqh (ext_tmps[i], data_regs[i+1], sreg));
3514 emit_insn (gen_rtx_SET (VOIDmode, ext_tmps[i],
3515 gen_rtx_IF_THEN_ELSE (DImode,
3516 gen_rtx_EQ (DImode, areg,
3517 const0_rtx),
3518 const0_rtx, ext_tmps[i])));
3519 }
3520
3521 /* Merge the half-words into whole words. */
3522 for (i = 0; i < words; ++i)
3523 {
3524 out_regs[i] = expand_binop (DImode, ior_optab, data_regs[i],
3525 ext_tmps[i], data_regs[i], 1, OPTAB_WIDEN);
3526 }
3527 }
3528
3529 /* Store an integral number of consecutive unaligned quadwords. DATA_REGS
3530 may be NULL to store zeros. */
3531
3532 static void
3533 alpha_expand_unaligned_store_words (rtx *data_regs, rtx dmem,
3534 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3535 {
3536 rtx const im8 = GEN_INT (-8);
3537 rtx ins_tmps[MAX_MOVE_WORDS];
3538 rtx st_tmp_1, st_tmp_2, dreg;
3539 rtx st_addr_1, st_addr_2, dmema;
3540 HOST_WIDE_INT i;
3541
3542 dmema = XEXP (dmem, 0);
3543 if (GET_CODE (dmema) == LO_SUM)
3544 dmema = force_reg (Pmode, dmema);
3545
3546 /* Generate all the tmp registers we need. */
3547 if (data_regs != NULL)
3548 for (i = 0; i < words; ++i)
3549 ins_tmps[i] = gen_reg_rtx(DImode);
3550 st_tmp_1 = gen_reg_rtx(DImode);
3551 st_tmp_2 = gen_reg_rtx(DImode);
3552
3553 if (ofs != 0)
3554 dmem = adjust_address (dmem, GET_MODE (dmem), ofs);
3555
3556 st_addr_2 = change_address (dmem, DImode,
3557 gen_rtx_AND (DImode,
3558 plus_constant (dmema, words*8 - 1),
3559 im8));
3560 set_mem_alias_set (st_addr_2, 0);
3561
3562 st_addr_1 = change_address (dmem, DImode,
3563 gen_rtx_AND (DImode, dmema, im8));
3564 set_mem_alias_set (st_addr_1, 0);
3565
3566 /* Load up the destination end bits. */
3567 emit_move_insn (st_tmp_2, st_addr_2);
3568 emit_move_insn (st_tmp_1, st_addr_1);
3569
3570 /* Shift the input data into place. */
3571 dreg = copy_addr_to_reg (dmema);
3572 if (data_regs != NULL)
3573 {
3574 for (i = words-1; i >= 0; --i)
3575 {
3576 emit_insn (gen_insqh (ins_tmps[i], data_regs[i], dreg));
3577 emit_insn (gen_insql (data_regs[i], data_regs[i], dreg));
3578 }
3579 for (i = words-1; i > 0; --i)
3580 {
3581 ins_tmps[i-1] = expand_binop (DImode, ior_optab, data_regs[i],
3582 ins_tmps[i-1], ins_tmps[i-1], 1,
3583 OPTAB_WIDEN);
3584 }
3585 }
3586
3587 /* Split and merge the ends with the destination data. */
3588 emit_insn (gen_mskqh (st_tmp_2, st_tmp_2, dreg));
3589 emit_insn (gen_mskql (st_tmp_1, st_tmp_1, dreg));
3590
3591 if (data_regs != NULL)
3592 {
3593 st_tmp_2 = expand_binop (DImode, ior_optab, st_tmp_2, ins_tmps[words-1],
3594 st_tmp_2, 1, OPTAB_WIDEN);
3595 st_tmp_1 = expand_binop (DImode, ior_optab, st_tmp_1, data_regs[0],
3596 st_tmp_1, 1, OPTAB_WIDEN);
3597 }
3598
3599 /* Store it all. */
3600 emit_move_insn (st_addr_2, st_tmp_2);
3601 for (i = words-1; i > 0; --i)
3602 {
3603 rtx tmp = change_address (dmem, DImode,
3604 gen_rtx_AND (DImode,
3605 plus_constant (dmema, i*8),
3606 im8));
3607 set_mem_alias_set (tmp, 0);
3608 emit_move_insn (tmp, data_regs ? ins_tmps[i-1] : const0_rtx);
3609 }
3610 emit_move_insn (st_addr_1, st_tmp_1);
3611 }
3612
3613
3614 /* Expand string/block move operations.
3615
3616 operands[0] is the pointer to the destination.
3617 operands[1] is the pointer to the source.
3618 operands[2] is the number of bytes to move.
3619 operands[3] is the alignment. */
3620
3621 int
3622 alpha_expand_block_move (rtx operands[])
3623 {
3624 rtx bytes_rtx = operands[2];
3625 rtx align_rtx = operands[3];
3626 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
3627 HOST_WIDE_INT bytes = orig_bytes;
3628 HOST_WIDE_INT src_align = INTVAL (align_rtx) * BITS_PER_UNIT;
3629 HOST_WIDE_INT dst_align = src_align;
3630 rtx orig_src = operands[1];
3631 rtx orig_dst = operands[0];
3632 rtx data_regs[2 * MAX_MOVE_WORDS + 16];
3633 rtx tmp;
3634 unsigned int i, words, ofs, nregs = 0;
3635
3636 if (orig_bytes <= 0)
3637 return 1;
3638 else if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
3639 return 0;
3640
3641 /* Look for additional alignment information from recorded register info. */
3642
3643 tmp = XEXP (orig_src, 0);
3644 if (REG_P (tmp))
3645 src_align = MAX (src_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3646 else if (GET_CODE (tmp) == PLUS
3647 && REG_P (XEXP (tmp, 0))
3648 && CONST_INT_P (XEXP (tmp, 1)))
3649 {
3650 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3651 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3652
3653 if (a > src_align)
3654 {
3655 if (a >= 64 && c % 8 == 0)
3656 src_align = 64;
3657 else if (a >= 32 && c % 4 == 0)
3658 src_align = 32;
3659 else if (a >= 16 && c % 2 == 0)
3660 src_align = 16;
3661 }
3662 }
3663
3664 tmp = XEXP (orig_dst, 0);
3665 if (REG_P (tmp))
3666 dst_align = MAX (dst_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3667 else if (GET_CODE (tmp) == PLUS
3668 && REG_P (XEXP (tmp, 0))
3669 && CONST_INT_P (XEXP (tmp, 1)))
3670 {
3671 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3672 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3673
3674 if (a > dst_align)
3675 {
3676 if (a >= 64 && c % 8 == 0)
3677 dst_align = 64;
3678 else if (a >= 32 && c % 4 == 0)
3679 dst_align = 32;
3680 else if (a >= 16 && c % 2 == 0)
3681 dst_align = 16;
3682 }
3683 }
3684
3685 ofs = 0;
3686 if (src_align >= 64 && bytes >= 8)
3687 {
3688 words = bytes / 8;
3689
3690 for (i = 0; i < words; ++i)
3691 data_regs[nregs + i] = gen_reg_rtx (DImode);
3692
3693 for (i = 0; i < words; ++i)
3694 emit_move_insn (data_regs[nregs + i],
3695 adjust_address (orig_src, DImode, ofs + i * 8));
3696
3697 nregs += words;
3698 bytes -= words * 8;
3699 ofs += words * 8;
3700 }
3701
3702 if (src_align >= 32 && bytes >= 4)
3703 {
3704 words = bytes / 4;
3705
3706 for (i = 0; i < words; ++i)
3707 data_regs[nregs + i] = gen_reg_rtx (SImode);
3708
3709 for (i = 0; i < words; ++i)
3710 emit_move_insn (data_regs[nregs + i],
3711 adjust_address (orig_src, SImode, ofs + i * 4));
3712
3713 nregs += words;
3714 bytes -= words * 4;
3715 ofs += words * 4;
3716 }
3717
3718 if (bytes >= 8)
3719 {
3720 words = bytes / 8;
3721
3722 for (i = 0; i < words+1; ++i)
3723 data_regs[nregs + i] = gen_reg_rtx (DImode);
3724
3725 alpha_expand_unaligned_load_words (data_regs + nregs, orig_src,
3726 words, ofs);
3727
3728 nregs += words;
3729 bytes -= words * 8;
3730 ofs += words * 8;
3731 }
3732
3733 if (! TARGET_BWX && bytes >= 4)
3734 {
3735 data_regs[nregs++] = tmp = gen_reg_rtx (SImode);
3736 alpha_expand_unaligned_load (tmp, orig_src, 4, ofs, 0);
3737 bytes -= 4;
3738 ofs += 4;
3739 }
3740
3741 if (bytes >= 2)
3742 {
3743 if (src_align >= 16)
3744 {
3745 do {
3746 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3747 emit_move_insn (tmp, adjust_address (orig_src, HImode, ofs));
3748 bytes -= 2;
3749 ofs += 2;
3750 } while (bytes >= 2);
3751 }
3752 else if (! TARGET_BWX)
3753 {
3754 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3755 alpha_expand_unaligned_load (tmp, orig_src, 2, ofs, 0);
3756 bytes -= 2;
3757 ofs += 2;
3758 }
3759 }
3760
3761 while (bytes > 0)
3762 {
3763 data_regs[nregs++] = tmp = gen_reg_rtx (QImode);
3764 emit_move_insn (tmp, adjust_address (orig_src, QImode, ofs));
3765 bytes -= 1;
3766 ofs += 1;
3767 }
3768
3769 gcc_assert (nregs <= ARRAY_SIZE (data_regs));
3770
3771 /* Now save it back out again. */
3772
3773 i = 0, ofs = 0;
3774
3775 /* Write out the data in whatever chunks reading the source allowed. */
3776 if (dst_align >= 64)
3777 {
3778 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3779 {
3780 emit_move_insn (adjust_address (orig_dst, DImode, ofs),
3781 data_regs[i]);
3782 ofs += 8;
3783 i++;
3784 }
3785 }
3786
3787 if (dst_align >= 32)
3788 {
3789 /* If the source has remaining DImode regs, write them out in
3790 two pieces. */
3791 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3792 {
3793 tmp = expand_binop (DImode, lshr_optab, data_regs[i], GEN_INT (32),
3794 NULL_RTX, 1, OPTAB_WIDEN);
3795
3796 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
3797 gen_lowpart (SImode, data_regs[i]));
3798 emit_move_insn (adjust_address (orig_dst, SImode, ofs + 4),
3799 gen_lowpart (SImode, tmp));
3800 ofs += 8;
3801 i++;
3802 }
3803
3804 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
3805 {
3806 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
3807 data_regs[i]);
3808 ofs += 4;
3809 i++;
3810 }
3811 }
3812
3813 if (i < nregs && GET_MODE (data_regs[i]) == DImode)
3814 {
3815 /* Write out a remaining block of words using unaligned methods. */
3816
3817 for (words = 1; i + words < nregs; words++)
3818 if (GET_MODE (data_regs[i + words]) != DImode)
3819 break;
3820
3821 if (words == 1)
3822 alpha_expand_unaligned_store (orig_dst, data_regs[i], 8, ofs);
3823 else
3824 alpha_expand_unaligned_store_words (data_regs + i, orig_dst,
3825 words, ofs);
3826
3827 i += words;
3828 ofs += words * 8;
3829 }
3830
3831 /* Due to the above, this won't be aligned. */
3832 /* ??? If we have more than one of these, consider constructing full
3833 words in registers and using alpha_expand_unaligned_store_words. */
3834 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
3835 {
3836 alpha_expand_unaligned_store (orig_dst, data_regs[i], 4, ofs);
3837 ofs += 4;
3838 i++;
3839 }
3840
3841 if (dst_align >= 16)
3842 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
3843 {
3844 emit_move_insn (adjust_address (orig_dst, HImode, ofs), data_regs[i]);
3845 i++;
3846 ofs += 2;
3847 }
3848 else
3849 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
3850 {
3851 alpha_expand_unaligned_store (orig_dst, data_regs[i], 2, ofs);
3852 i++;
3853 ofs += 2;
3854 }
3855
3856 /* The remainder must be byte copies. */
3857 while (i < nregs)
3858 {
3859 gcc_assert (GET_MODE (data_regs[i]) == QImode);
3860 emit_move_insn (adjust_address (orig_dst, QImode, ofs), data_regs[i]);
3861 i++;
3862 ofs += 1;
3863 }
3864
3865 return 1;
3866 }
3867
3868 int
3869 alpha_expand_block_clear (rtx operands[])
3870 {
3871 rtx bytes_rtx = operands[1];
3872 rtx align_rtx = operands[3];
3873 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
3874 HOST_WIDE_INT bytes = orig_bytes;
3875 HOST_WIDE_INT align = INTVAL (align_rtx) * BITS_PER_UNIT;
3876 HOST_WIDE_INT alignofs = 0;
3877 rtx orig_dst = operands[0];
3878 rtx tmp;
3879 int i, words, ofs = 0;
3880
3881 if (orig_bytes <= 0)
3882 return 1;
3883 if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
3884 return 0;
3885
3886 /* Look for stricter alignment. */
3887 tmp = XEXP (orig_dst, 0);
3888 if (REG_P (tmp))
3889 align = MAX (align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3890 else if (GET_CODE (tmp) == PLUS
3891 && REG_P (XEXP (tmp, 0))
3892 && CONST_INT_P (XEXP (tmp, 1)))
3893 {
3894 HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3895 int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3896
3897 if (a > align)
3898 {
3899 if (a >= 64)
3900 align = a, alignofs = 8 - c % 8;
3901 else if (a >= 32)
3902 align = a, alignofs = 4 - c % 4;
3903 else if (a >= 16)
3904 align = a, alignofs = 2 - c % 2;
3905 }
3906 }
3907
3908 /* Handle an unaligned prefix first. */
3909
3910 if (alignofs > 0)
3911 {
3912 #if HOST_BITS_PER_WIDE_INT >= 64
3913 /* Given that alignofs is bounded by align, the only time BWX could
3914 generate three stores is for a 7 byte fill. Prefer two individual
3915 stores over a load/mask/store sequence. */
3916 if ((!TARGET_BWX || alignofs == 7)
3917 && align >= 32
3918 && !(alignofs == 4 && bytes >= 4))
3919 {
3920 enum machine_mode mode = (align >= 64 ? DImode : SImode);
3921 int inv_alignofs = (align >= 64 ? 8 : 4) - alignofs;
3922 rtx mem, tmp;
3923 HOST_WIDE_INT mask;
3924
3925 mem = adjust_address (orig_dst, mode, ofs - inv_alignofs);
3926 set_mem_alias_set (mem, 0);
3927
3928 mask = ~(~(HOST_WIDE_INT)0 << (inv_alignofs * 8));
3929 if (bytes < alignofs)
3930 {
3931 mask |= ~(HOST_WIDE_INT)0 << ((inv_alignofs + bytes) * 8);
3932 ofs += bytes;
3933 bytes = 0;
3934 }
3935 else
3936 {
3937 bytes -= alignofs;
3938 ofs += alignofs;
3939 }
3940 alignofs = 0;
3941
3942 tmp = expand_binop (mode, and_optab, mem, GEN_INT (mask),
3943 NULL_RTX, 1, OPTAB_WIDEN);
3944
3945 emit_move_insn (mem, tmp);
3946 }
3947 #endif
3948
3949 if (TARGET_BWX && (alignofs & 1) && bytes >= 1)
3950 {
3951 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
3952 bytes -= 1;
3953 ofs += 1;
3954 alignofs -= 1;
3955 }
3956 if (TARGET_BWX && align >= 16 && (alignofs & 3) == 2 && bytes >= 2)
3957 {
3958 emit_move_insn (adjust_address (orig_dst, HImode, ofs), const0_rtx);
3959 bytes -= 2;
3960 ofs += 2;
3961 alignofs -= 2;
3962 }
3963 if (alignofs == 4 && bytes >= 4)
3964 {
3965 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
3966 bytes -= 4;
3967 ofs += 4;
3968 alignofs = 0;
3969 }
3970
3971 /* If we've not used the extra lead alignment information by now,
3972 we won't be able to. Downgrade align to match what's left over. */
3973 if (alignofs > 0)
3974 {
3975 alignofs = alignofs & -alignofs;
3976 align = MIN (align, alignofs * BITS_PER_UNIT);
3977 }
3978 }
3979
3980 /* Handle a block of contiguous long-words. */
3981
3982 if (align >= 64 && bytes >= 8)
3983 {
3984 words = bytes / 8;
3985
3986 for (i = 0; i < words; ++i)
3987 emit_move_insn (adjust_address (orig_dst, DImode, ofs + i * 8),
3988 const0_rtx);
3989
3990 bytes -= words * 8;
3991 ofs += words * 8;
3992 }
3993
3994 /* If the block is large and appropriately aligned, emit a single
3995 store followed by a sequence of stq_u insns. */
3996
3997 if (align >= 32 && bytes > 16)
3998 {
3999 rtx orig_dsta;
4000
4001 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4002 bytes -= 4;
4003 ofs += 4;
4004
4005 orig_dsta = XEXP (orig_dst, 0);
4006 if (GET_CODE (orig_dsta) == LO_SUM)
4007 orig_dsta = force_reg (Pmode, orig_dsta);
4008
4009 words = bytes / 8;
4010 for (i = 0; i < words; ++i)
4011 {
4012 rtx mem
4013 = change_address (orig_dst, DImode,
4014 gen_rtx_AND (DImode,
4015 plus_constant (orig_dsta, ofs + i*8),
4016 GEN_INT (-8)));
4017 set_mem_alias_set (mem, 0);
4018 emit_move_insn (mem, const0_rtx);
4019 }
4020
4021 /* Depending on the alignment, the first stq_u may have overlapped
4022 with the initial stl, which means that the last stq_u didn't
4023 write as much as it would appear. Leave those questionable bytes
4024 unaccounted for. */
4025 bytes -= words * 8 - 4;
4026 ofs += words * 8 - 4;
4027 }
4028
4029 /* Handle a smaller block of aligned words. */
4030
4031 if ((align >= 64 && bytes == 4)
4032 || (align == 32 && bytes >= 4))
4033 {
4034 words = bytes / 4;
4035
4036 for (i = 0; i < words; ++i)
4037 emit_move_insn (adjust_address (orig_dst, SImode, ofs + i * 4),
4038 const0_rtx);
4039
4040 bytes -= words * 4;
4041 ofs += words * 4;
4042 }
4043
4044 /* An unaligned block uses stq_u stores for as many as possible. */
4045
4046 if (bytes >= 8)
4047 {
4048 words = bytes / 8;
4049
4050 alpha_expand_unaligned_store_words (NULL, orig_dst, words, ofs);
4051
4052 bytes -= words * 8;
4053 ofs += words * 8;
4054 }
4055
4056 /* Next clean up any trailing pieces. */
4057
4058 #if HOST_BITS_PER_WIDE_INT >= 64
4059 /* Count the number of bits in BYTES for which aligned stores could
4060 be emitted. */
4061 words = 0;
4062 for (i = (TARGET_BWX ? 1 : 4); i * BITS_PER_UNIT <= align ; i <<= 1)
4063 if (bytes & i)
4064 words += 1;
4065
4066 /* If we have appropriate alignment (and it wouldn't take too many
4067 instructions otherwise), mask out the bytes we need. */
4068 if (TARGET_BWX ? words > 2 : bytes > 0)
4069 {
4070 if (align >= 64)
4071 {
4072 rtx mem, tmp;
4073 HOST_WIDE_INT mask;
4074
4075 mem = adjust_address (orig_dst, DImode, ofs);
4076 set_mem_alias_set (mem, 0);
4077
4078 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4079
4080 tmp = expand_binop (DImode, and_optab, mem, GEN_INT (mask),
4081 NULL_RTX, 1, OPTAB_WIDEN);
4082
4083 emit_move_insn (mem, tmp);
4084 return 1;
4085 }
4086 else if (align >= 32 && bytes < 4)
4087 {
4088 rtx mem, tmp;
4089 HOST_WIDE_INT mask;
4090
4091 mem = adjust_address (orig_dst, SImode, ofs);
4092 set_mem_alias_set (mem, 0);
4093
4094 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4095
4096 tmp = expand_binop (SImode, and_optab, mem, GEN_INT (mask),
4097 NULL_RTX, 1, OPTAB_WIDEN);
4098
4099 emit_move_insn (mem, tmp);
4100 return 1;
4101 }
4102 }
4103 #endif
4104
4105 if (!TARGET_BWX && bytes >= 4)
4106 {
4107 alpha_expand_unaligned_store (orig_dst, const0_rtx, 4, ofs);
4108 bytes -= 4;
4109 ofs += 4;
4110 }
4111
4112 if (bytes >= 2)
4113 {
4114 if (align >= 16)
4115 {
4116 do {
4117 emit_move_insn (adjust_address (orig_dst, HImode, ofs),
4118 const0_rtx);
4119 bytes -= 2;
4120 ofs += 2;
4121 } while (bytes >= 2);
4122 }
4123 else if (! TARGET_BWX)
4124 {
4125 alpha_expand_unaligned_store (orig_dst, const0_rtx, 2, ofs);
4126 bytes -= 2;
4127 ofs += 2;
4128 }
4129 }
4130
4131 while (bytes > 0)
4132 {
4133 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4134 bytes -= 1;
4135 ofs += 1;
4136 }
4137
4138 return 1;
4139 }
4140
4141 /* Returns a mask so that zap(x, value) == x & mask. */
4142
4143 rtx
4144 alpha_expand_zap_mask (HOST_WIDE_INT value)
4145 {
4146 rtx result;
4147 int i;
4148
4149 if (HOST_BITS_PER_WIDE_INT >= 64)
4150 {
4151 HOST_WIDE_INT mask = 0;
4152
4153 for (i = 7; i >= 0; --i)
4154 {
4155 mask <<= 8;
4156 if (!((value >> i) & 1))
4157 mask |= 0xff;
4158 }
4159
4160 result = gen_int_mode (mask, DImode);
4161 }
4162 else
4163 {
4164 HOST_WIDE_INT mask_lo = 0, mask_hi = 0;
4165
4166 gcc_assert (HOST_BITS_PER_WIDE_INT == 32);
4167
4168 for (i = 7; i >= 4; --i)
4169 {
4170 mask_hi <<= 8;
4171 if (!((value >> i) & 1))
4172 mask_hi |= 0xff;
4173 }
4174
4175 for (i = 3; i >= 0; --i)
4176 {
4177 mask_lo <<= 8;
4178 if (!((value >> i) & 1))
4179 mask_lo |= 0xff;
4180 }
4181
4182 result = immed_double_const (mask_lo, mask_hi, DImode);
4183 }
4184
4185 return result;
4186 }
4187
4188 void
4189 alpha_expand_builtin_vector_binop (rtx (*gen) (rtx, rtx, rtx),
4190 enum machine_mode mode,
4191 rtx op0, rtx op1, rtx op2)
4192 {
4193 op0 = gen_lowpart (mode, op0);
4194
4195 if (op1 == const0_rtx)
4196 op1 = CONST0_RTX (mode);
4197 else
4198 op1 = gen_lowpart (mode, op1);
4199
4200 if (op2 == const0_rtx)
4201 op2 = CONST0_RTX (mode);
4202 else
4203 op2 = gen_lowpart (mode, op2);
4204
4205 emit_insn ((*gen) (op0, op1, op2));
4206 }
4207
4208 /* A subroutine of the atomic operation splitters. Jump to LABEL if
4209 COND is true. Mark the jump as unlikely to be taken. */
4210
4211 static void
4212 emit_unlikely_jump (rtx cond, rtx label)
4213 {
4214 rtx very_unlikely = GEN_INT (REG_BR_PROB_BASE / 100 - 1);
4215 rtx x;
4216
4217 x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
4218 x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
4219 add_reg_note (x, REG_BR_PROB, very_unlikely);
4220 }
4221
4222 /* A subroutine of the atomic operation splitters. Emit a load-locked
4223 instruction in MODE. */
4224
4225 static void
4226 emit_load_locked (enum machine_mode mode, rtx reg, rtx mem)
4227 {
4228 rtx (*fn) (rtx, rtx) = NULL;
4229 if (mode == SImode)
4230 fn = gen_load_locked_si;
4231 else if (mode == DImode)
4232 fn = gen_load_locked_di;
4233 emit_insn (fn (reg, mem));
4234 }
4235
4236 /* A subroutine of the atomic operation splitters. Emit a store-conditional
4237 instruction in MODE. */
4238
4239 static void
4240 emit_store_conditional (enum machine_mode mode, rtx res, rtx mem, rtx val)
4241 {
4242 rtx (*fn) (rtx, rtx, rtx) = NULL;
4243 if (mode == SImode)
4244 fn = gen_store_conditional_si;
4245 else if (mode == DImode)
4246 fn = gen_store_conditional_di;
4247 emit_insn (fn (res, mem, val));
4248 }
4249
4250 /* A subroutine of the atomic operation splitters. Emit an insxl
4251 instruction in MODE. */
4252
4253 static rtx
4254 emit_insxl (enum machine_mode mode, rtx op1, rtx op2)
4255 {
4256 rtx ret = gen_reg_rtx (DImode);
4257 rtx (*fn) (rtx, rtx, rtx);
4258
4259 switch (mode)
4260 {
4261 case QImode:
4262 fn = gen_insbl;
4263 break;
4264 case HImode:
4265 fn = gen_inswl;
4266 break;
4267 case SImode:
4268 fn = gen_insll;
4269 break;
4270 case DImode:
4271 fn = gen_insql;
4272 break;
4273 default:
4274 gcc_unreachable ();
4275 }
4276
4277 op1 = force_reg (mode, op1);
4278 emit_insn (fn (ret, op1, op2));
4279
4280 return ret;
4281 }
4282
4283 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
4284 to perform. MEM is the memory on which to operate. VAL is the second
4285 operand of the binary operator. BEFORE and AFTER are optional locations to
4286 return the value of MEM either before of after the operation. SCRATCH is
4287 a scratch register. */
4288
4289 void
4290 alpha_split_atomic_op (enum rtx_code code, rtx mem, rtx val,
4291 rtx before, rtx after, rtx scratch)
4292 {
4293 enum machine_mode mode = GET_MODE (mem);
4294 rtx label, x, cond = gen_rtx_REG (DImode, REGNO (scratch));
4295
4296 emit_insn (gen_memory_barrier ());
4297
4298 label = gen_label_rtx ();
4299 emit_label (label);
4300 label = gen_rtx_LABEL_REF (DImode, label);
4301
4302 if (before == NULL)
4303 before = scratch;
4304 emit_load_locked (mode, before, mem);
4305
4306 if (code == NOT)
4307 {
4308 x = gen_rtx_AND (mode, before, val);
4309 emit_insn (gen_rtx_SET (VOIDmode, val, x));
4310
4311 x = gen_rtx_NOT (mode, val);
4312 }
4313 else
4314 x = gen_rtx_fmt_ee (code, mode, before, val);
4315 if (after)
4316 emit_insn (gen_rtx_SET (VOIDmode, after, copy_rtx (x)));
4317 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
4318
4319 emit_store_conditional (mode, cond, mem, scratch);
4320
4321 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4322 emit_unlikely_jump (x, label);
4323
4324 emit_insn (gen_memory_barrier ());
4325 }
4326
4327 /* Expand a compare and swap operation. */
4328
4329 void
4330 alpha_split_compare_and_swap (rtx retval, rtx mem, rtx oldval, rtx newval,
4331 rtx scratch)
4332 {
4333 enum machine_mode mode = GET_MODE (mem);
4334 rtx label1, label2, x, cond = gen_lowpart (DImode, scratch);
4335
4336 emit_insn (gen_memory_barrier ());
4337
4338 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4339 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4340 emit_label (XEXP (label1, 0));
4341
4342 emit_load_locked (mode, retval, mem);
4343
4344 x = gen_lowpart (DImode, retval);
4345 if (oldval == const0_rtx)
4346 x = gen_rtx_NE (DImode, x, const0_rtx);
4347 else
4348 {
4349 x = gen_rtx_EQ (DImode, x, oldval);
4350 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4351 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4352 }
4353 emit_unlikely_jump (x, label2);
4354
4355 emit_move_insn (scratch, newval);
4356 emit_store_conditional (mode, cond, mem, scratch);
4357
4358 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4359 emit_unlikely_jump (x, label1);
4360
4361 emit_insn (gen_memory_barrier ());
4362 emit_label (XEXP (label2, 0));
4363 }
4364
4365 void
4366 alpha_expand_compare_and_swap_12 (rtx dst, rtx mem, rtx oldval, rtx newval)
4367 {
4368 enum machine_mode mode = GET_MODE (mem);
4369 rtx addr, align, wdst;
4370 rtx (*fn5) (rtx, rtx, rtx, rtx, rtx);
4371
4372 addr = force_reg (DImode, XEXP (mem, 0));
4373 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4374 NULL_RTX, 1, OPTAB_DIRECT);
4375
4376 oldval = convert_modes (DImode, mode, oldval, 1);
4377 newval = emit_insxl (mode, newval, addr);
4378
4379 wdst = gen_reg_rtx (DImode);
4380 if (mode == QImode)
4381 fn5 = gen_sync_compare_and_swapqi_1;
4382 else
4383 fn5 = gen_sync_compare_and_swaphi_1;
4384 emit_insn (fn5 (wdst, addr, oldval, newval, align));
4385
4386 emit_move_insn (dst, gen_lowpart (mode, wdst));
4387 }
4388
4389 void
4390 alpha_split_compare_and_swap_12 (enum machine_mode mode, rtx dest, rtx addr,
4391 rtx oldval, rtx newval, rtx align,
4392 rtx scratch, rtx cond)
4393 {
4394 rtx label1, label2, mem, width, mask, x;
4395
4396 mem = gen_rtx_MEM (DImode, align);
4397 MEM_VOLATILE_P (mem) = 1;
4398
4399 emit_insn (gen_memory_barrier ());
4400 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4401 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4402 emit_label (XEXP (label1, 0));
4403
4404 emit_load_locked (DImode, scratch, mem);
4405
4406 width = GEN_INT (GET_MODE_BITSIZE (mode));
4407 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4408 emit_insn (gen_extxl (dest, scratch, width, addr));
4409
4410 if (oldval == const0_rtx)
4411 x = gen_rtx_NE (DImode, dest, const0_rtx);
4412 else
4413 {
4414 x = gen_rtx_EQ (DImode, dest, oldval);
4415 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4416 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4417 }
4418 emit_unlikely_jump (x, label2);
4419
4420 emit_insn (gen_mskxl (scratch, scratch, mask, addr));
4421 emit_insn (gen_iordi3 (scratch, scratch, newval));
4422
4423 emit_store_conditional (DImode, scratch, mem, scratch);
4424
4425 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4426 emit_unlikely_jump (x, label1);
4427
4428 emit_insn (gen_memory_barrier ());
4429 emit_label (XEXP (label2, 0));
4430 }
4431
4432 /* Expand an atomic exchange operation. */
4433
4434 void
4435 alpha_split_lock_test_and_set (rtx retval, rtx mem, rtx val, rtx scratch)
4436 {
4437 enum machine_mode mode = GET_MODE (mem);
4438 rtx label, x, cond = gen_lowpart (DImode, scratch);
4439
4440 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4441 emit_label (XEXP (label, 0));
4442
4443 emit_load_locked (mode, retval, mem);
4444 emit_move_insn (scratch, val);
4445 emit_store_conditional (mode, cond, mem, scratch);
4446
4447 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4448 emit_unlikely_jump (x, label);
4449
4450 emit_insn (gen_memory_barrier ());
4451 }
4452
4453 void
4454 alpha_expand_lock_test_and_set_12 (rtx dst, rtx mem, rtx val)
4455 {
4456 enum machine_mode mode = GET_MODE (mem);
4457 rtx addr, align, wdst;
4458 rtx (*fn4) (rtx, rtx, rtx, rtx);
4459
4460 /* Force the address into a register. */
4461 addr = force_reg (DImode, XEXP (mem, 0));
4462
4463 /* Align it to a multiple of 8. */
4464 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4465 NULL_RTX, 1, OPTAB_DIRECT);
4466
4467 /* Insert val into the correct byte location within the word. */
4468 val = emit_insxl (mode, val, addr);
4469
4470 wdst = gen_reg_rtx (DImode);
4471 if (mode == QImode)
4472 fn4 = gen_sync_lock_test_and_setqi_1;
4473 else
4474 fn4 = gen_sync_lock_test_and_sethi_1;
4475 emit_insn (fn4 (wdst, addr, val, align));
4476
4477 emit_move_insn (dst, gen_lowpart (mode, wdst));
4478 }
4479
4480 void
4481 alpha_split_lock_test_and_set_12 (enum machine_mode mode, rtx dest, rtx addr,
4482 rtx val, rtx align, rtx scratch)
4483 {
4484 rtx label, mem, width, mask, x;
4485
4486 mem = gen_rtx_MEM (DImode, align);
4487 MEM_VOLATILE_P (mem) = 1;
4488
4489 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4490 emit_label (XEXP (label, 0));
4491
4492 emit_load_locked (DImode, scratch, mem);
4493
4494 width = GEN_INT (GET_MODE_BITSIZE (mode));
4495 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4496 emit_insn (gen_extxl (dest, scratch, width, addr));
4497 emit_insn (gen_mskxl (scratch, scratch, mask, addr));
4498 emit_insn (gen_iordi3 (scratch, scratch, val));
4499
4500 emit_store_conditional (DImode, scratch, mem, scratch);
4501
4502 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4503 emit_unlikely_jump (x, label);
4504
4505 emit_insn (gen_memory_barrier ());
4506 }
4507 \f
4508 /* Adjust the cost of a scheduling dependency. Return the new cost of
4509 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4510
4511 static int
4512 alpha_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4513 {
4514 enum attr_type dep_insn_type;
4515
4516 /* If the dependence is an anti-dependence, there is no cost. For an
4517 output dependence, there is sometimes a cost, but it doesn't seem
4518 worth handling those few cases. */
4519 if (REG_NOTE_KIND (link) != 0)
4520 return cost;
4521
4522 /* If we can't recognize the insns, we can't really do anything. */
4523 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
4524 return cost;
4525
4526 dep_insn_type = get_attr_type (dep_insn);
4527
4528 /* Bring in the user-defined memory latency. */
4529 if (dep_insn_type == TYPE_ILD
4530 || dep_insn_type == TYPE_FLD
4531 || dep_insn_type == TYPE_LDSYM)
4532 cost += alpha_memory_latency-1;
4533
4534 /* Everything else handled in DFA bypasses now. */
4535
4536 return cost;
4537 }
4538
4539 /* The number of instructions that can be issued per cycle. */
4540
4541 static int
4542 alpha_issue_rate (void)
4543 {
4544 return (alpha_tune == PROCESSOR_EV4 ? 2 : 4);
4545 }
4546
4547 /* How many alternative schedules to try. This should be as wide as the
4548 scheduling freedom in the DFA, but no wider. Making this value too
4549 large results extra work for the scheduler.
4550
4551 For EV4, loads can be issued to either IB0 or IB1, thus we have 2
4552 alternative schedules. For EV5, we can choose between E0/E1 and
4553 FA/FM. For EV6, an arithmetic insn can be issued to U0/U1/L0/L1. */
4554
4555 static int
4556 alpha_multipass_dfa_lookahead (void)
4557 {
4558 return (alpha_tune == PROCESSOR_EV6 ? 4 : 2);
4559 }
4560 \f
4561 /* Machine-specific function data. */
4562
4563 struct GTY(()) machine_function
4564 {
4565 /* For OSF. */
4566 const char *some_ld_name;
4567
4568 /* For TARGET_LD_BUGGY_LDGP. */
4569 rtx gp_save_rtx;
4570
4571 /* For VMS condition handlers. */
4572 bool uses_condition_handler;
4573 };
4574
4575 /* How to allocate a 'struct machine_function'. */
4576
4577 static struct machine_function *
4578 alpha_init_machine_status (void)
4579 {
4580 return ggc_alloc_cleared_machine_function ();
4581 }
4582
4583 /* Support for frame based VMS condition handlers. */
4584
4585 /* A VMS condition handler may be established for a function with a call to
4586 __builtin_establish_vms_condition_handler, and cancelled with a call to
4587 __builtin_revert_vms_condition_handler.
4588
4589 The VMS Condition Handling Facility knows about the existence of a handler
4590 from the procedure descriptor .handler field. As the VMS native compilers,
4591 we store the user specified handler's address at a fixed location in the
4592 stack frame and point the procedure descriptor at a common wrapper which
4593 fetches the real handler's address and issues an indirect call.
4594
4595 The indirection wrapper is "__gcc_shell_handler", provided by libgcc.
4596
4597 We force the procedure kind to PT_STACK, and the fixed frame location is
4598 fp+8, just before the register save area. We use the handler_data field in
4599 the procedure descriptor to state the fp offset at which the installed
4600 handler address can be found. */
4601
4602 #define VMS_COND_HANDLER_FP_OFFSET 8
4603
4604 /* Expand code to store the currently installed user VMS condition handler
4605 into TARGET and install HANDLER as the new condition handler. */
4606
4607 void
4608 alpha_expand_builtin_establish_vms_condition_handler (rtx target, rtx handler)
4609 {
4610 rtx handler_slot_address
4611 = plus_constant (hard_frame_pointer_rtx, VMS_COND_HANDLER_FP_OFFSET);
4612
4613 rtx handler_slot
4614 = gen_rtx_MEM (DImode, handler_slot_address);
4615
4616 emit_move_insn (target, handler_slot);
4617 emit_move_insn (handler_slot, handler);
4618
4619 /* Notify the start/prologue/epilogue emitters that the condition handler
4620 slot is needed. In addition to reserving the slot space, this will force
4621 the procedure kind to PT_STACK so ensure that the hard_frame_pointer_rtx
4622 use above is correct. */
4623 cfun->machine->uses_condition_handler = true;
4624 }
4625
4626 /* Expand code to store the current VMS condition handler into TARGET and
4627 nullify it. */
4628
4629 void
4630 alpha_expand_builtin_revert_vms_condition_handler (rtx target)
4631 {
4632 /* We implement this by establishing a null condition handler, with the tiny
4633 side effect of setting uses_condition_handler. This is a little bit
4634 pessimistic if no actual builtin_establish call is ever issued, which is
4635 not a real problem and expected never to happen anyway. */
4636
4637 alpha_expand_builtin_establish_vms_condition_handler (target, const0_rtx);
4638 }
4639
4640 /* Functions to save and restore alpha_return_addr_rtx. */
4641
4642 /* Start the ball rolling with RETURN_ADDR_RTX. */
4643
4644 rtx
4645 alpha_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
4646 {
4647 if (count != 0)
4648 return const0_rtx;
4649
4650 return get_hard_reg_initial_val (Pmode, REG_RA);
4651 }
4652
4653 /* Return or create a memory slot containing the gp value for the current
4654 function. Needed only if TARGET_LD_BUGGY_LDGP. */
4655
4656 rtx
4657 alpha_gp_save_rtx (void)
4658 {
4659 rtx seq, m = cfun->machine->gp_save_rtx;
4660
4661 if (m == NULL)
4662 {
4663 start_sequence ();
4664
4665 m = assign_stack_local (DImode, UNITS_PER_WORD, BITS_PER_WORD);
4666 m = validize_mem (m);
4667 emit_move_insn (m, pic_offset_table_rtx);
4668
4669 seq = get_insns ();
4670 end_sequence ();
4671
4672 /* We used to simply emit the sequence after entry_of_function.
4673 However this breaks the CFG if the first instruction in the
4674 first block is not the NOTE_INSN_BASIC_BLOCK, for example a
4675 label. Emit the sequence properly on the edge. We are only
4676 invoked from dw2_build_landing_pads and finish_eh_generation
4677 will call commit_edge_insertions thanks to a kludge. */
4678 insert_insn_on_edge (seq, single_succ_edge (ENTRY_BLOCK_PTR));
4679
4680 cfun->machine->gp_save_rtx = m;
4681 }
4682
4683 return m;
4684 }
4685
4686 static int
4687 alpha_ra_ever_killed (void)
4688 {
4689 rtx top;
4690
4691 if (!has_hard_reg_initial_val (Pmode, REG_RA))
4692 return (int)df_regs_ever_live_p (REG_RA);
4693
4694 push_topmost_sequence ();
4695 top = get_insns ();
4696 pop_topmost_sequence ();
4697
4698 return reg_set_between_p (gen_rtx_REG (Pmode, REG_RA), top, NULL_RTX);
4699 }
4700
4701 \f
4702 /* Return the trap mode suffix applicable to the current
4703 instruction, or NULL. */
4704
4705 static const char *
4706 get_trap_mode_suffix (void)
4707 {
4708 enum attr_trap_suffix s = get_attr_trap_suffix (current_output_insn);
4709
4710 switch (s)
4711 {
4712 case TRAP_SUFFIX_NONE:
4713 return NULL;
4714
4715 case TRAP_SUFFIX_SU:
4716 if (alpha_fptm >= ALPHA_FPTM_SU)
4717 return "su";
4718 return NULL;
4719
4720 case TRAP_SUFFIX_SUI:
4721 if (alpha_fptm >= ALPHA_FPTM_SUI)
4722 return "sui";
4723 return NULL;
4724
4725 case TRAP_SUFFIX_V_SV:
4726 switch (alpha_fptm)
4727 {
4728 case ALPHA_FPTM_N:
4729 return NULL;
4730 case ALPHA_FPTM_U:
4731 return "v";
4732 case ALPHA_FPTM_SU:
4733 case ALPHA_FPTM_SUI:
4734 return "sv";
4735 default:
4736 gcc_unreachable ();
4737 }
4738
4739 case TRAP_SUFFIX_V_SV_SVI:
4740 switch (alpha_fptm)
4741 {
4742 case ALPHA_FPTM_N:
4743 return NULL;
4744 case ALPHA_FPTM_U:
4745 return "v";
4746 case ALPHA_FPTM_SU:
4747 return "sv";
4748 case ALPHA_FPTM_SUI:
4749 return "svi";
4750 default:
4751 gcc_unreachable ();
4752 }
4753 break;
4754
4755 case TRAP_SUFFIX_U_SU_SUI:
4756 switch (alpha_fptm)
4757 {
4758 case ALPHA_FPTM_N:
4759 return NULL;
4760 case ALPHA_FPTM_U:
4761 return "u";
4762 case ALPHA_FPTM_SU:
4763 return "su";
4764 case ALPHA_FPTM_SUI:
4765 return "sui";
4766 default:
4767 gcc_unreachable ();
4768 }
4769 break;
4770
4771 default:
4772 gcc_unreachable ();
4773 }
4774 gcc_unreachable ();
4775 }
4776
4777 /* Return the rounding mode suffix applicable to the current
4778 instruction, or NULL. */
4779
4780 static const char *
4781 get_round_mode_suffix (void)
4782 {
4783 enum attr_round_suffix s = get_attr_round_suffix (current_output_insn);
4784
4785 switch (s)
4786 {
4787 case ROUND_SUFFIX_NONE:
4788 return NULL;
4789 case ROUND_SUFFIX_NORMAL:
4790 switch (alpha_fprm)
4791 {
4792 case ALPHA_FPRM_NORM:
4793 return NULL;
4794 case ALPHA_FPRM_MINF:
4795 return "m";
4796 case ALPHA_FPRM_CHOP:
4797 return "c";
4798 case ALPHA_FPRM_DYN:
4799 return "d";
4800 default:
4801 gcc_unreachable ();
4802 }
4803 break;
4804
4805 case ROUND_SUFFIX_C:
4806 return "c";
4807
4808 default:
4809 gcc_unreachable ();
4810 }
4811 gcc_unreachable ();
4812 }
4813
4814 /* Locate some local-dynamic symbol still in use by this function
4815 so that we can print its name in some movdi_er_tlsldm pattern. */
4816
4817 static int
4818 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
4819 {
4820 rtx x = *px;
4821
4822 if (GET_CODE (x) == SYMBOL_REF
4823 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
4824 {
4825 cfun->machine->some_ld_name = XSTR (x, 0);
4826 return 1;
4827 }
4828
4829 return 0;
4830 }
4831
4832 static const char *
4833 get_some_local_dynamic_name (void)
4834 {
4835 rtx insn;
4836
4837 if (cfun->machine->some_ld_name)
4838 return cfun->machine->some_ld_name;
4839
4840 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
4841 if (INSN_P (insn)
4842 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
4843 return cfun->machine->some_ld_name;
4844
4845 gcc_unreachable ();
4846 }
4847
4848 /* Print an operand. Recognize special options, documented below. */
4849
4850 void
4851 print_operand (FILE *file, rtx x, int code)
4852 {
4853 int i;
4854
4855 switch (code)
4856 {
4857 case '~':
4858 /* Print the assembler name of the current function. */
4859 assemble_name (file, alpha_fnname);
4860 break;
4861
4862 case '&':
4863 assemble_name (file, get_some_local_dynamic_name ());
4864 break;
4865
4866 case '/':
4867 {
4868 const char *trap = get_trap_mode_suffix ();
4869 const char *round = get_round_mode_suffix ();
4870
4871 if (trap || round)
4872 fprintf (file, (TARGET_AS_SLASH_BEFORE_SUFFIX ? "/%s%s" : "%s%s"),
4873 (trap ? trap : ""), (round ? round : ""));
4874 break;
4875 }
4876
4877 case ',':
4878 /* Generates single precision instruction suffix. */
4879 fputc ((TARGET_FLOAT_VAX ? 'f' : 's'), file);
4880 break;
4881
4882 case '-':
4883 /* Generates double precision instruction suffix. */
4884 fputc ((TARGET_FLOAT_VAX ? 'g' : 't'), file);
4885 break;
4886
4887 case '#':
4888 if (alpha_this_literal_sequence_number == 0)
4889 alpha_this_literal_sequence_number = alpha_next_sequence_number++;
4890 fprintf (file, "%d", alpha_this_literal_sequence_number);
4891 break;
4892
4893 case '*':
4894 if (alpha_this_gpdisp_sequence_number == 0)
4895 alpha_this_gpdisp_sequence_number = alpha_next_sequence_number++;
4896 fprintf (file, "%d", alpha_this_gpdisp_sequence_number);
4897 break;
4898
4899 case 'H':
4900 if (GET_CODE (x) == HIGH)
4901 output_addr_const (file, XEXP (x, 0));
4902 else
4903 output_operand_lossage ("invalid %%H value");
4904 break;
4905
4906 case 'J':
4907 {
4908 const char *lituse;
4909
4910 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD_CALL)
4911 {
4912 x = XVECEXP (x, 0, 0);
4913 lituse = "lituse_tlsgd";
4914 }
4915 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM_CALL)
4916 {
4917 x = XVECEXP (x, 0, 0);
4918 lituse = "lituse_tlsldm";
4919 }
4920 else if (CONST_INT_P (x))
4921 lituse = "lituse_jsr";
4922 else
4923 {
4924 output_operand_lossage ("invalid %%J value");
4925 break;
4926 }
4927
4928 if (x != const0_rtx)
4929 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
4930 }
4931 break;
4932
4933 case 'j':
4934 {
4935 const char *lituse;
4936
4937 #ifdef HAVE_AS_JSRDIRECT_RELOCS
4938 lituse = "lituse_jsrdirect";
4939 #else
4940 lituse = "lituse_jsr";
4941 #endif
4942
4943 gcc_assert (INTVAL (x) != 0);
4944 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
4945 }
4946 break;
4947 case 'r':
4948 /* If this operand is the constant zero, write it as "$31". */
4949 if (REG_P (x))
4950 fprintf (file, "%s", reg_names[REGNO (x)]);
4951 else if (x == CONST0_RTX (GET_MODE (x)))
4952 fprintf (file, "$31");
4953 else
4954 output_operand_lossage ("invalid %%r value");
4955 break;
4956
4957 case 'R':
4958 /* Similar, but for floating-point. */
4959 if (REG_P (x))
4960 fprintf (file, "%s", reg_names[REGNO (x)]);
4961 else if (x == CONST0_RTX (GET_MODE (x)))
4962 fprintf (file, "$f31");
4963 else
4964 output_operand_lossage ("invalid %%R value");
4965 break;
4966
4967 case 'N':
4968 /* Write the 1's complement of a constant. */
4969 if (!CONST_INT_P (x))
4970 output_operand_lossage ("invalid %%N value");
4971
4972 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
4973 break;
4974
4975 case 'P':
4976 /* Write 1 << C, for a constant C. */
4977 if (!CONST_INT_P (x))
4978 output_operand_lossage ("invalid %%P value");
4979
4980 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT) 1 << INTVAL (x));
4981 break;
4982
4983 case 'h':
4984 /* Write the high-order 16 bits of a constant, sign-extended. */
4985 if (!CONST_INT_P (x))
4986 output_operand_lossage ("invalid %%h value");
4987
4988 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) >> 16);
4989 break;
4990
4991 case 'L':
4992 /* Write the low-order 16 bits of a constant, sign-extended. */
4993 if (!CONST_INT_P (x))
4994 output_operand_lossage ("invalid %%L value");
4995
4996 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
4997 (INTVAL (x) & 0xffff) - 2 * (INTVAL (x) & 0x8000));
4998 break;
4999
5000 case 'm':
5001 /* Write mask for ZAP insn. */
5002 if (GET_CODE (x) == CONST_DOUBLE)
5003 {
5004 HOST_WIDE_INT mask = 0;
5005 HOST_WIDE_INT value;
5006
5007 value = CONST_DOUBLE_LOW (x);
5008 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5009 i++, value >>= 8)
5010 if (value & 0xff)
5011 mask |= (1 << i);
5012
5013 value = CONST_DOUBLE_HIGH (x);
5014 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5015 i++, value >>= 8)
5016 if (value & 0xff)
5017 mask |= (1 << (i + sizeof (int)));
5018
5019 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask & 0xff);
5020 }
5021
5022 else if (CONST_INT_P (x))
5023 {
5024 HOST_WIDE_INT mask = 0, value = INTVAL (x);
5025
5026 for (i = 0; i < 8; i++, value >>= 8)
5027 if (value & 0xff)
5028 mask |= (1 << i);
5029
5030 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask);
5031 }
5032 else
5033 output_operand_lossage ("invalid %%m value");
5034 break;
5035
5036 case 'M':
5037 /* 'b', 'w', 'l', or 'q' as the value of the constant. */
5038 if (!CONST_INT_P (x)
5039 || (INTVAL (x) != 8 && INTVAL (x) != 16
5040 && INTVAL (x) != 32 && INTVAL (x) != 64))
5041 output_operand_lossage ("invalid %%M value");
5042
5043 fprintf (file, "%s",
5044 (INTVAL (x) == 8 ? "b"
5045 : INTVAL (x) == 16 ? "w"
5046 : INTVAL (x) == 32 ? "l"
5047 : "q"));
5048 break;
5049
5050 case 'U':
5051 /* Similar, except do it from the mask. */
5052 if (CONST_INT_P (x))
5053 {
5054 HOST_WIDE_INT value = INTVAL (x);
5055
5056 if (value == 0xff)
5057 {
5058 fputc ('b', file);
5059 break;
5060 }
5061 if (value == 0xffff)
5062 {
5063 fputc ('w', file);
5064 break;
5065 }
5066 if (value == 0xffffffff)
5067 {
5068 fputc ('l', file);
5069 break;
5070 }
5071 if (value == -1)
5072 {
5073 fputc ('q', file);
5074 break;
5075 }
5076 }
5077 else if (HOST_BITS_PER_WIDE_INT == 32
5078 && GET_CODE (x) == CONST_DOUBLE
5079 && CONST_DOUBLE_LOW (x) == 0xffffffff
5080 && CONST_DOUBLE_HIGH (x) == 0)
5081 {
5082 fputc ('l', file);
5083 break;
5084 }
5085 output_operand_lossage ("invalid %%U value");
5086 break;
5087
5088 case 's':
5089 /* Write the constant value divided by 8. */
5090 if (!CONST_INT_P (x)
5091 || (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
5092 || (INTVAL (x) & 7) != 0)
5093 output_operand_lossage ("invalid %%s value");
5094
5095 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) / 8);
5096 break;
5097
5098 case 'S':
5099 /* Same, except compute (64 - c) / 8 */
5100
5101 if (!CONST_INT_P (x)
5102 && (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
5103 && (INTVAL (x) & 7) != 8)
5104 output_operand_lossage ("invalid %%s value");
5105
5106 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (64 - INTVAL (x)) / 8);
5107 break;
5108
5109 case 'C': case 'D': case 'c': case 'd':
5110 /* Write out comparison name. */
5111 {
5112 enum rtx_code c = GET_CODE (x);
5113
5114 if (!COMPARISON_P (x))
5115 output_operand_lossage ("invalid %%C value");
5116
5117 else if (code == 'D')
5118 c = reverse_condition (c);
5119 else if (code == 'c')
5120 c = swap_condition (c);
5121 else if (code == 'd')
5122 c = swap_condition (reverse_condition (c));
5123
5124 if (c == LEU)
5125 fprintf (file, "ule");
5126 else if (c == LTU)
5127 fprintf (file, "ult");
5128 else if (c == UNORDERED)
5129 fprintf (file, "un");
5130 else
5131 fprintf (file, "%s", GET_RTX_NAME (c));
5132 }
5133 break;
5134
5135 case 'E':
5136 /* Write the divide or modulus operator. */
5137 switch (GET_CODE (x))
5138 {
5139 case DIV:
5140 fprintf (file, "div%s", GET_MODE (x) == SImode ? "l" : "q");
5141 break;
5142 case UDIV:
5143 fprintf (file, "div%su", GET_MODE (x) == SImode ? "l" : "q");
5144 break;
5145 case MOD:
5146 fprintf (file, "rem%s", GET_MODE (x) == SImode ? "l" : "q");
5147 break;
5148 case UMOD:
5149 fprintf (file, "rem%su", GET_MODE (x) == SImode ? "l" : "q");
5150 break;
5151 default:
5152 output_operand_lossage ("invalid %%E value");
5153 break;
5154 }
5155 break;
5156
5157 case 'A':
5158 /* Write "_u" for unaligned access. */
5159 if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
5160 fprintf (file, "_u");
5161 break;
5162
5163 case 0:
5164 if (REG_P (x))
5165 fprintf (file, "%s", reg_names[REGNO (x)]);
5166 else if (MEM_P (x))
5167 output_address (XEXP (x, 0));
5168 else if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == UNSPEC)
5169 {
5170 switch (XINT (XEXP (x, 0), 1))
5171 {
5172 case UNSPEC_DTPREL:
5173 case UNSPEC_TPREL:
5174 output_addr_const (file, XVECEXP (XEXP (x, 0), 0, 0));
5175 break;
5176 default:
5177 output_operand_lossage ("unknown relocation unspec");
5178 break;
5179 }
5180 }
5181 else
5182 output_addr_const (file, x);
5183 break;
5184
5185 default:
5186 output_operand_lossage ("invalid %%xn code");
5187 }
5188 }
5189
5190 void
5191 print_operand_address (FILE *file, rtx addr)
5192 {
5193 int basereg = 31;
5194 HOST_WIDE_INT offset = 0;
5195
5196 if (GET_CODE (addr) == AND)
5197 addr = XEXP (addr, 0);
5198
5199 if (GET_CODE (addr) == PLUS
5200 && CONST_INT_P (XEXP (addr, 1)))
5201 {
5202 offset = INTVAL (XEXP (addr, 1));
5203 addr = XEXP (addr, 0);
5204 }
5205
5206 if (GET_CODE (addr) == LO_SUM)
5207 {
5208 const char *reloc16, *reloclo;
5209 rtx op1 = XEXP (addr, 1);
5210
5211 if (GET_CODE (op1) == CONST && GET_CODE (XEXP (op1, 0)) == UNSPEC)
5212 {
5213 op1 = XEXP (op1, 0);
5214 switch (XINT (op1, 1))
5215 {
5216 case UNSPEC_DTPREL:
5217 reloc16 = NULL;
5218 reloclo = (alpha_tls_size == 16 ? "dtprel" : "dtprello");
5219 break;
5220 case UNSPEC_TPREL:
5221 reloc16 = NULL;
5222 reloclo = (alpha_tls_size == 16 ? "tprel" : "tprello");
5223 break;
5224 default:
5225 output_operand_lossage ("unknown relocation unspec");
5226 return;
5227 }
5228
5229 output_addr_const (file, XVECEXP (op1, 0, 0));
5230 }
5231 else
5232 {
5233 reloc16 = "gprel";
5234 reloclo = "gprellow";
5235 output_addr_const (file, op1);
5236 }
5237
5238 if (offset)
5239 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
5240
5241 addr = XEXP (addr, 0);
5242 switch (GET_CODE (addr))
5243 {
5244 case REG:
5245 basereg = REGNO (addr);
5246 break;
5247
5248 case SUBREG:
5249 basereg = subreg_regno (addr);
5250 break;
5251
5252 default:
5253 gcc_unreachable ();
5254 }
5255
5256 fprintf (file, "($%d)\t\t!%s", basereg,
5257 (basereg == 29 ? reloc16 : reloclo));
5258 return;
5259 }
5260
5261 switch (GET_CODE (addr))
5262 {
5263 case REG:
5264 basereg = REGNO (addr);
5265 break;
5266
5267 case SUBREG:
5268 basereg = subreg_regno (addr);
5269 break;
5270
5271 case CONST_INT:
5272 offset = INTVAL (addr);
5273 break;
5274
5275 #if TARGET_ABI_OPEN_VMS
5276 case SYMBOL_REF:
5277 fprintf (file, "%s", XSTR (addr, 0));
5278 return;
5279
5280 case CONST:
5281 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS
5282 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF);
5283 fprintf (file, "%s+" HOST_WIDE_INT_PRINT_DEC,
5284 XSTR (XEXP (XEXP (addr, 0), 0), 0),
5285 INTVAL (XEXP (XEXP (addr, 0), 1)));
5286 return;
5287
5288 #endif
5289 default:
5290 gcc_unreachable ();
5291 }
5292
5293 fprintf (file, HOST_WIDE_INT_PRINT_DEC "($%d)", offset, basereg);
5294 }
5295 \f
5296 /* Emit RTL insns to initialize the variable parts of a trampoline at
5297 M_TRAMP. FNDECL is target function's decl. CHAIN_VALUE is an rtx
5298 for the static chain value for the function. */
5299
5300 static void
5301 alpha_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
5302 {
5303 rtx fnaddr, mem, word1, word2;
5304
5305 fnaddr = XEXP (DECL_RTL (fndecl), 0);
5306
5307 #ifdef POINTERS_EXTEND_UNSIGNED
5308 fnaddr = convert_memory_address (Pmode, fnaddr);
5309 chain_value = convert_memory_address (Pmode, chain_value);
5310 #endif
5311
5312 if (TARGET_ABI_OPEN_VMS)
5313 {
5314 const char *fnname;
5315 char *trname;
5316
5317 /* Construct the name of the trampoline entry point. */
5318 fnname = XSTR (fnaddr, 0);
5319 trname = (char *) alloca (strlen (fnname) + 5);
5320 strcpy (trname, fnname);
5321 strcat (trname, "..tr");
5322 fnname = ggc_alloc_string (trname, strlen (trname) + 1);
5323 word2 = gen_rtx_SYMBOL_REF (Pmode, fnname);
5324
5325 /* Trampoline (or "bounded") procedure descriptor is constructed from
5326 the function's procedure descriptor with certain fields zeroed IAW
5327 the VMS calling standard. This is stored in the first quadword. */
5328 word1 = force_reg (DImode, gen_const_mem (DImode, fnaddr));
5329 word1 = expand_and (DImode, word1, GEN_INT (0xffff0fff0000fff0), NULL);
5330 }
5331 else
5332 {
5333 /* These 4 instructions are:
5334 ldq $1,24($27)
5335 ldq $27,16($27)
5336 jmp $31,($27),0
5337 nop
5338 We don't bother setting the HINT field of the jump; the nop
5339 is merely there for padding. */
5340 word1 = GEN_INT (0xa77b0010a43b0018);
5341 word2 = GEN_INT (0x47ff041f6bfb0000);
5342 }
5343
5344 /* Store the first two words, as computed above. */
5345 mem = adjust_address (m_tramp, DImode, 0);
5346 emit_move_insn (mem, word1);
5347 mem = adjust_address (m_tramp, DImode, 8);
5348 emit_move_insn (mem, word2);
5349
5350 /* Store function address and static chain value. */
5351 mem = adjust_address (m_tramp, Pmode, 16);
5352 emit_move_insn (mem, fnaddr);
5353 mem = adjust_address (m_tramp, Pmode, 24);
5354 emit_move_insn (mem, chain_value);
5355
5356 if (TARGET_ABI_OSF)
5357 {
5358 emit_insn (gen_imb ());
5359 #ifdef HAVE_ENABLE_EXECUTE_STACK
5360 emit_library_call (init_one_libfunc ("__enable_execute_stack"),
5361 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
5362 #endif
5363 }
5364 }
5365 \f
5366 /* Determine where to put an argument to a function.
5367 Value is zero to push the argument on the stack,
5368 or a hard register in which to store the argument.
5369
5370 MODE is the argument's machine mode.
5371 TYPE is the data type of the argument (as a tree).
5372 This is null for libcalls where that information may
5373 not be available.
5374 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5375 the preceding args and about the function being called.
5376 NAMED is nonzero if this argument is a named parameter
5377 (otherwise it is an extra parameter matching an ellipsis).
5378
5379 On Alpha the first 6 words of args are normally in registers
5380 and the rest are pushed. */
5381
5382 static rtx
5383 alpha_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
5384 const_tree type, bool named ATTRIBUTE_UNUSED)
5385 {
5386 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
5387 int basereg;
5388 int num_args;
5389
5390 /* Don't get confused and pass small structures in FP registers. */
5391 if (type && AGGREGATE_TYPE_P (type))
5392 basereg = 16;
5393 else
5394 {
5395 #ifdef ENABLE_CHECKING
5396 /* With alpha_split_complex_arg, we shouldn't see any raw complex
5397 values here. */
5398 gcc_assert (!COMPLEX_MODE_P (mode));
5399 #endif
5400
5401 /* Set up defaults for FP operands passed in FP registers, and
5402 integral operands passed in integer registers. */
5403 if (TARGET_FPREGS && GET_MODE_CLASS (mode) == MODE_FLOAT)
5404 basereg = 32 + 16;
5405 else
5406 basereg = 16;
5407 }
5408
5409 /* ??? Irritatingly, the definition of CUMULATIVE_ARGS is different for
5410 the two platforms, so we can't avoid conditional compilation. */
5411 #if TARGET_ABI_OPEN_VMS
5412 {
5413 if (mode == VOIDmode)
5414 return alpha_arg_info_reg_val (*cum);
5415
5416 num_args = cum->num_args;
5417 if (num_args >= 6
5418 || targetm.calls.must_pass_in_stack (mode, type))
5419 return NULL_RTX;
5420 }
5421 #elif TARGET_ABI_OSF
5422 {
5423 if (*cum >= 6)
5424 return NULL_RTX;
5425 num_args = *cum;
5426
5427 /* VOID is passed as a special flag for "last argument". */
5428 if (type == void_type_node)
5429 basereg = 16;
5430 else if (targetm.calls.must_pass_in_stack (mode, type))
5431 return NULL_RTX;
5432 }
5433 #else
5434 #error Unhandled ABI
5435 #endif
5436
5437 return gen_rtx_REG (mode, num_args + basereg);
5438 }
5439
5440 /* Update the data in CUM to advance over an argument
5441 of mode MODE and data type TYPE.
5442 (TYPE is null for libcalls where that information may not be available.) */
5443
5444 static void
5445 alpha_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
5446 const_tree type, bool named ATTRIBUTE_UNUSED)
5447 {
5448 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
5449 bool onstack = targetm.calls.must_pass_in_stack (mode, type);
5450 int increment = onstack ? 6 : ALPHA_ARG_SIZE (mode, type, named);
5451
5452 #if TARGET_ABI_OSF
5453 *cum += increment;
5454 #else
5455 if (!onstack && cum->num_args < 6)
5456 cum->atypes[cum->num_args] = alpha_arg_type (mode);
5457 cum->num_args += increment;
5458 #endif
5459 }
5460
5461 static int
5462 alpha_arg_partial_bytes (cumulative_args_t cum_v,
5463 enum machine_mode mode ATTRIBUTE_UNUSED,
5464 tree type ATTRIBUTE_UNUSED,
5465 bool named ATTRIBUTE_UNUSED)
5466 {
5467 int words = 0;
5468 CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED = get_cumulative_args (cum_v);
5469
5470 #if TARGET_ABI_OPEN_VMS
5471 if (cum->num_args < 6
5472 && 6 < cum->num_args + ALPHA_ARG_SIZE (mode, type, named))
5473 words = 6 - cum->num_args;
5474 #elif TARGET_ABI_OSF
5475 if (*cum < 6 && 6 < *cum + ALPHA_ARG_SIZE (mode, type, named))
5476 words = 6 - *cum;
5477 #else
5478 #error Unhandled ABI
5479 #endif
5480
5481 return words * UNITS_PER_WORD;
5482 }
5483
5484
5485 /* Return true if TYPE must be returned in memory, instead of in registers. */
5486
5487 static bool
5488 alpha_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
5489 {
5490 enum machine_mode mode = VOIDmode;
5491 int size;
5492
5493 if (type)
5494 {
5495 mode = TYPE_MODE (type);
5496
5497 /* All aggregates are returned in memory, except on OpenVMS where
5498 records that fit 64 bits should be returned by immediate value
5499 as required by section 3.8.7.1 of the OpenVMS Calling Standard. */
5500 if (TARGET_ABI_OPEN_VMS
5501 && TREE_CODE (type) != ARRAY_TYPE
5502 && (unsigned HOST_WIDE_INT) int_size_in_bytes(type) <= 8)
5503 return false;
5504
5505 if (AGGREGATE_TYPE_P (type))
5506 return true;
5507 }
5508
5509 size = GET_MODE_SIZE (mode);
5510 switch (GET_MODE_CLASS (mode))
5511 {
5512 case MODE_VECTOR_FLOAT:
5513 /* Pass all float vectors in memory, like an aggregate. */
5514 return true;
5515
5516 case MODE_COMPLEX_FLOAT:
5517 /* We judge complex floats on the size of their element,
5518 not the size of the whole type. */
5519 size = GET_MODE_UNIT_SIZE (mode);
5520 break;
5521
5522 case MODE_INT:
5523 case MODE_FLOAT:
5524 case MODE_COMPLEX_INT:
5525 case MODE_VECTOR_INT:
5526 break;
5527
5528 default:
5529 /* ??? We get called on all sorts of random stuff from
5530 aggregate_value_p. We must return something, but it's not
5531 clear what's safe to return. Pretend it's a struct I
5532 guess. */
5533 return true;
5534 }
5535
5536 /* Otherwise types must fit in one register. */
5537 return size > UNITS_PER_WORD;
5538 }
5539
5540 /* Return true if TYPE should be passed by invisible reference. */
5541
5542 static bool
5543 alpha_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
5544 enum machine_mode mode,
5545 const_tree type ATTRIBUTE_UNUSED,
5546 bool named ATTRIBUTE_UNUSED)
5547 {
5548 return mode == TFmode || mode == TCmode;
5549 }
5550
5551 /* Define how to find the value returned by a function. VALTYPE is the
5552 data type of the value (as a tree). If the precise function being
5553 called is known, FUNC is its FUNCTION_DECL; otherwise, FUNC is 0.
5554 MODE is set instead of VALTYPE for libcalls.
5555
5556 On Alpha the value is found in $0 for integer functions and
5557 $f0 for floating-point functions. */
5558
5559 rtx
5560 function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED,
5561 enum machine_mode mode)
5562 {
5563 unsigned int regnum, dummy ATTRIBUTE_UNUSED;
5564 enum mode_class mclass;
5565
5566 gcc_assert (!valtype || !alpha_return_in_memory (valtype, func));
5567
5568 if (valtype)
5569 mode = TYPE_MODE (valtype);
5570
5571 mclass = GET_MODE_CLASS (mode);
5572 switch (mclass)
5573 {
5574 case MODE_INT:
5575 /* Do the same thing as PROMOTE_MODE except for libcalls on VMS,
5576 where we have them returning both SImode and DImode. */
5577 if (!(TARGET_ABI_OPEN_VMS && valtype && AGGREGATE_TYPE_P (valtype)))
5578 PROMOTE_MODE (mode, dummy, valtype);
5579 /* FALLTHRU */
5580
5581 case MODE_COMPLEX_INT:
5582 case MODE_VECTOR_INT:
5583 regnum = 0;
5584 break;
5585
5586 case MODE_FLOAT:
5587 regnum = 32;
5588 break;
5589
5590 case MODE_COMPLEX_FLOAT:
5591 {
5592 enum machine_mode cmode = GET_MODE_INNER (mode);
5593
5594 return gen_rtx_PARALLEL
5595 (VOIDmode,
5596 gen_rtvec (2,
5597 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 32),
5598 const0_rtx),
5599 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 33),
5600 GEN_INT (GET_MODE_SIZE (cmode)))));
5601 }
5602
5603 case MODE_RANDOM:
5604 /* We should only reach here for BLKmode on VMS. */
5605 gcc_assert (TARGET_ABI_OPEN_VMS && mode == BLKmode);
5606 regnum = 0;
5607 break;
5608
5609 default:
5610 gcc_unreachable ();
5611 }
5612
5613 return gen_rtx_REG (mode, regnum);
5614 }
5615
5616 /* TCmode complex values are passed by invisible reference. We
5617 should not split these values. */
5618
5619 static bool
5620 alpha_split_complex_arg (const_tree type)
5621 {
5622 return TYPE_MODE (type) != TCmode;
5623 }
5624
5625 static tree
5626 alpha_build_builtin_va_list (void)
5627 {
5628 tree base, ofs, space, record, type_decl;
5629
5630 if (TARGET_ABI_OPEN_VMS)
5631 return ptr_type_node;
5632
5633 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
5634 type_decl = build_decl (BUILTINS_LOCATION,
5635 TYPE_DECL, get_identifier ("__va_list_tag"), record);
5636 TYPE_STUB_DECL (record) = type_decl;
5637 TYPE_NAME (record) = type_decl;
5638
5639 /* C++? SET_IS_AGGR_TYPE (record, 1); */
5640
5641 /* Dummy field to prevent alignment warnings. */
5642 space = build_decl (BUILTINS_LOCATION,
5643 FIELD_DECL, NULL_TREE, integer_type_node);
5644 DECL_FIELD_CONTEXT (space) = record;
5645 DECL_ARTIFICIAL (space) = 1;
5646 DECL_IGNORED_P (space) = 1;
5647
5648 ofs = build_decl (BUILTINS_LOCATION,
5649 FIELD_DECL, get_identifier ("__offset"),
5650 integer_type_node);
5651 DECL_FIELD_CONTEXT (ofs) = record;
5652 DECL_CHAIN (ofs) = space;
5653 /* ??? This is a hack, __offset is marked volatile to prevent
5654 DCE that confuses stdarg optimization and results in
5655 gcc.c-torture/execute/stdarg-1.c failure. See PR 41089. */
5656 TREE_THIS_VOLATILE (ofs) = 1;
5657
5658 base = build_decl (BUILTINS_LOCATION,
5659 FIELD_DECL, get_identifier ("__base"),
5660 ptr_type_node);
5661 DECL_FIELD_CONTEXT (base) = record;
5662 DECL_CHAIN (base) = ofs;
5663
5664 TYPE_FIELDS (record) = base;
5665 layout_type (record);
5666
5667 va_list_gpr_counter_field = ofs;
5668 return record;
5669 }
5670
5671 #if TARGET_ABI_OSF
5672 /* Helper function for alpha_stdarg_optimize_hook. Skip over casts
5673 and constant additions. */
5674
5675 static gimple
5676 va_list_skip_additions (tree lhs)
5677 {
5678 gimple stmt;
5679
5680 for (;;)
5681 {
5682 enum tree_code code;
5683
5684 stmt = SSA_NAME_DEF_STMT (lhs);
5685
5686 if (gimple_code (stmt) == GIMPLE_PHI)
5687 return stmt;
5688
5689 if (!is_gimple_assign (stmt)
5690 || gimple_assign_lhs (stmt) != lhs)
5691 return NULL;
5692
5693 if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
5694 return stmt;
5695 code = gimple_assign_rhs_code (stmt);
5696 if (!CONVERT_EXPR_CODE_P (code)
5697 && ((code != PLUS_EXPR && code != POINTER_PLUS_EXPR)
5698 || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST
5699 || !host_integerp (gimple_assign_rhs2 (stmt), 1)))
5700 return stmt;
5701
5702 lhs = gimple_assign_rhs1 (stmt);
5703 }
5704 }
5705
5706 /* Check if LHS = RHS statement is
5707 LHS = *(ap.__base + ap.__offset + cst)
5708 or
5709 LHS = *(ap.__base
5710 + ((ap.__offset + cst <= 47)
5711 ? ap.__offset + cst - 48 : ap.__offset + cst) + cst2).
5712 If the former, indicate that GPR registers are needed,
5713 if the latter, indicate that FPR registers are needed.
5714
5715 Also look for LHS = (*ptr).field, where ptr is one of the forms
5716 listed above.
5717
5718 On alpha, cfun->va_list_gpr_size is used as size of the needed
5719 regs and cfun->va_list_fpr_size is a bitmask, bit 0 set if GPR
5720 registers are needed and bit 1 set if FPR registers are needed.
5721 Return true if va_list references should not be scanned for the
5722 current statement. */
5723
5724 static bool
5725 alpha_stdarg_optimize_hook (struct stdarg_info *si, const_gimple stmt)
5726 {
5727 tree base, offset, rhs;
5728 int offset_arg = 1;
5729 gimple base_stmt;
5730
5731 if (get_gimple_rhs_class (gimple_assign_rhs_code (stmt))
5732 != GIMPLE_SINGLE_RHS)
5733 return false;
5734
5735 rhs = gimple_assign_rhs1 (stmt);
5736 while (handled_component_p (rhs))
5737 rhs = TREE_OPERAND (rhs, 0);
5738 if (TREE_CODE (rhs) != MEM_REF
5739 || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
5740 return false;
5741
5742 stmt = va_list_skip_additions (TREE_OPERAND (rhs, 0));
5743 if (stmt == NULL
5744 || !is_gimple_assign (stmt)
5745 || gimple_assign_rhs_code (stmt) != POINTER_PLUS_EXPR)
5746 return false;
5747
5748 base = gimple_assign_rhs1 (stmt);
5749 if (TREE_CODE (base) == SSA_NAME)
5750 {
5751 base_stmt = va_list_skip_additions (base);
5752 if (base_stmt
5753 && is_gimple_assign (base_stmt)
5754 && gimple_assign_rhs_code (base_stmt) == COMPONENT_REF)
5755 base = gimple_assign_rhs1 (base_stmt);
5756 }
5757
5758 if (TREE_CODE (base) != COMPONENT_REF
5759 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5760 {
5761 base = gimple_assign_rhs2 (stmt);
5762 if (TREE_CODE (base) == SSA_NAME)
5763 {
5764 base_stmt = va_list_skip_additions (base);
5765 if (base_stmt
5766 && is_gimple_assign (base_stmt)
5767 && gimple_assign_rhs_code (base_stmt) == COMPONENT_REF)
5768 base = gimple_assign_rhs1 (base_stmt);
5769 }
5770
5771 if (TREE_CODE (base) != COMPONENT_REF
5772 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5773 return false;
5774
5775 offset_arg = 0;
5776 }
5777
5778 base = get_base_address (base);
5779 if (TREE_CODE (base) != VAR_DECL
5780 || !bitmap_bit_p (si->va_list_vars, DECL_UID (base)))
5781 return false;
5782
5783 offset = gimple_op (stmt, 1 + offset_arg);
5784 if (TREE_CODE (offset) == SSA_NAME)
5785 {
5786 gimple offset_stmt = va_list_skip_additions (offset);
5787
5788 if (offset_stmt
5789 && gimple_code (offset_stmt) == GIMPLE_PHI)
5790 {
5791 HOST_WIDE_INT sub;
5792 gimple arg1_stmt, arg2_stmt;
5793 tree arg1, arg2;
5794 enum tree_code code1, code2;
5795
5796 if (gimple_phi_num_args (offset_stmt) != 2)
5797 goto escapes;
5798
5799 arg1_stmt
5800 = va_list_skip_additions (gimple_phi_arg_def (offset_stmt, 0));
5801 arg2_stmt
5802 = va_list_skip_additions (gimple_phi_arg_def (offset_stmt, 1));
5803 if (arg1_stmt == NULL
5804 || !is_gimple_assign (arg1_stmt)
5805 || arg2_stmt == NULL
5806 || !is_gimple_assign (arg2_stmt))
5807 goto escapes;
5808
5809 code1 = gimple_assign_rhs_code (arg1_stmt);
5810 code2 = gimple_assign_rhs_code (arg2_stmt);
5811 if (code1 == COMPONENT_REF
5812 && (code2 == MINUS_EXPR || code2 == PLUS_EXPR))
5813 /* Do nothing. */;
5814 else if (code2 == COMPONENT_REF
5815 && (code1 == MINUS_EXPR || code1 == PLUS_EXPR))
5816 {
5817 gimple tem = arg1_stmt;
5818 code2 = code1;
5819 arg1_stmt = arg2_stmt;
5820 arg2_stmt = tem;
5821 }
5822 else
5823 goto escapes;
5824
5825 if (!host_integerp (gimple_assign_rhs2 (arg2_stmt), 0))
5826 goto escapes;
5827
5828 sub = tree_low_cst (gimple_assign_rhs2 (arg2_stmt), 0);
5829 if (code2 == MINUS_EXPR)
5830 sub = -sub;
5831 if (sub < -48 || sub > -32)
5832 goto escapes;
5833
5834 arg1 = gimple_assign_rhs1 (arg1_stmt);
5835 arg2 = gimple_assign_rhs1 (arg2_stmt);
5836 if (TREE_CODE (arg2) == SSA_NAME)
5837 {
5838 arg2_stmt = va_list_skip_additions (arg2);
5839 if (arg2_stmt == NULL
5840 || !is_gimple_assign (arg2_stmt)
5841 || gimple_assign_rhs_code (arg2_stmt) != COMPONENT_REF)
5842 goto escapes;
5843 arg2 = gimple_assign_rhs1 (arg2_stmt);
5844 }
5845 if (arg1 != arg2)
5846 goto escapes;
5847
5848 if (TREE_CODE (arg1) != COMPONENT_REF
5849 || TREE_OPERAND (arg1, 1) != va_list_gpr_counter_field
5850 || get_base_address (arg1) != base)
5851 goto escapes;
5852
5853 /* Need floating point regs. */
5854 cfun->va_list_fpr_size |= 2;
5855 return false;
5856 }
5857 if (offset_stmt
5858 && is_gimple_assign (offset_stmt)
5859 && gimple_assign_rhs_code (offset_stmt) == COMPONENT_REF)
5860 offset = gimple_assign_rhs1 (offset_stmt);
5861 }
5862 if (TREE_CODE (offset) != COMPONENT_REF
5863 || TREE_OPERAND (offset, 1) != va_list_gpr_counter_field
5864 || get_base_address (offset) != base)
5865 goto escapes;
5866 else
5867 /* Need general regs. */
5868 cfun->va_list_fpr_size |= 1;
5869 return false;
5870
5871 escapes:
5872 si->va_list_escapes = true;
5873 return false;
5874 }
5875 #endif
5876
5877 /* Perform any needed actions needed for a function that is receiving a
5878 variable number of arguments. */
5879
5880 static void
5881 alpha_setup_incoming_varargs (cumulative_args_t pcum, enum machine_mode mode,
5882 tree type, int *pretend_size, int no_rtl)
5883 {
5884 CUMULATIVE_ARGS cum = *get_cumulative_args (pcum);
5885
5886 /* Skip the current argument. */
5887 targetm.calls.function_arg_advance (pack_cumulative_args (&cum), mode, type,
5888 true);
5889
5890 #if TARGET_ABI_OPEN_VMS
5891 /* For VMS, we allocate space for all 6 arg registers plus a count.
5892
5893 However, if NO registers need to be saved, don't allocate any space.
5894 This is not only because we won't need the space, but because AP
5895 includes the current_pretend_args_size and we don't want to mess up
5896 any ap-relative addresses already made. */
5897 if (cum.num_args < 6)
5898 {
5899 if (!no_rtl)
5900 {
5901 emit_move_insn (gen_rtx_REG (DImode, 1), virtual_incoming_args_rtx);
5902 emit_insn (gen_arg_home ());
5903 }
5904 *pretend_size = 7 * UNITS_PER_WORD;
5905 }
5906 #else
5907 /* On OSF/1 and friends, we allocate space for all 12 arg registers, but
5908 only push those that are remaining. However, if NO registers need to
5909 be saved, don't allocate any space. This is not only because we won't
5910 need the space, but because AP includes the current_pretend_args_size
5911 and we don't want to mess up any ap-relative addresses already made.
5912
5913 If we are not to use the floating-point registers, save the integer
5914 registers where we would put the floating-point registers. This is
5915 not the most efficient way to implement varargs with just one register
5916 class, but it isn't worth doing anything more efficient in this rare
5917 case. */
5918 if (cum >= 6)
5919 return;
5920
5921 if (!no_rtl)
5922 {
5923 int count;
5924 alias_set_type set = get_varargs_alias_set ();
5925 rtx tmp;
5926
5927 count = cfun->va_list_gpr_size / UNITS_PER_WORD;
5928 if (count > 6 - cum)
5929 count = 6 - cum;
5930
5931 /* Detect whether integer registers or floating-point registers
5932 are needed by the detected va_arg statements. See above for
5933 how these values are computed. Note that the "escape" value
5934 is VA_LIST_MAX_FPR_SIZE, which is 255, which has both of
5935 these bits set. */
5936 gcc_assert ((VA_LIST_MAX_FPR_SIZE & 3) == 3);
5937
5938 if (cfun->va_list_fpr_size & 1)
5939 {
5940 tmp = gen_rtx_MEM (BLKmode,
5941 plus_constant (virtual_incoming_args_rtx,
5942 (cum + 6) * UNITS_PER_WORD));
5943 MEM_NOTRAP_P (tmp) = 1;
5944 set_mem_alias_set (tmp, set);
5945 move_block_from_reg (16 + cum, tmp, count);
5946 }
5947
5948 if (cfun->va_list_fpr_size & 2)
5949 {
5950 tmp = gen_rtx_MEM (BLKmode,
5951 plus_constant (virtual_incoming_args_rtx,
5952 cum * UNITS_PER_WORD));
5953 MEM_NOTRAP_P (tmp) = 1;
5954 set_mem_alias_set (tmp, set);
5955 move_block_from_reg (16 + cum + TARGET_FPREGS*32, tmp, count);
5956 }
5957 }
5958 *pretend_size = 12 * UNITS_PER_WORD;
5959 #endif
5960 }
5961
5962 static void
5963 alpha_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
5964 {
5965 HOST_WIDE_INT offset;
5966 tree t, offset_field, base_field;
5967
5968 if (TREE_CODE (TREE_TYPE (valist)) == ERROR_MARK)
5969 return;
5970
5971 /* For Unix, TARGET_SETUP_INCOMING_VARARGS moves the starting address base
5972 up by 48, storing fp arg registers in the first 48 bytes, and the
5973 integer arg registers in the next 48 bytes. This is only done,
5974 however, if any integer registers need to be stored.
5975
5976 If no integer registers need be stored, then we must subtract 48
5977 in order to account for the integer arg registers which are counted
5978 in argsize above, but which are not actually stored on the stack.
5979 Must further be careful here about structures straddling the last
5980 integer argument register; that futzes with pretend_args_size,
5981 which changes the meaning of AP. */
5982
5983 if (NUM_ARGS < 6)
5984 offset = TARGET_ABI_OPEN_VMS ? UNITS_PER_WORD : 6 * UNITS_PER_WORD;
5985 else
5986 offset = -6 * UNITS_PER_WORD + crtl->args.pretend_args_size;
5987
5988 if (TARGET_ABI_OPEN_VMS)
5989 {
5990 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
5991 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t,
5992 size_int (offset + NUM_ARGS * UNITS_PER_WORD));
5993 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
5994 TREE_SIDE_EFFECTS (t) = 1;
5995 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5996 }
5997 else
5998 {
5999 base_field = TYPE_FIELDS (TREE_TYPE (valist));
6000 offset_field = DECL_CHAIN (base_field);
6001
6002 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6003 valist, base_field, NULL_TREE);
6004 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6005 valist, offset_field, NULL_TREE);
6006
6007 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
6008 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t,
6009 size_int (offset));
6010 t = build2 (MODIFY_EXPR, TREE_TYPE (base_field), base_field, t);
6011 TREE_SIDE_EFFECTS (t) = 1;
6012 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6013
6014 t = build_int_cst (NULL_TREE, NUM_ARGS * UNITS_PER_WORD);
6015 t = build2 (MODIFY_EXPR, TREE_TYPE (offset_field), offset_field, t);
6016 TREE_SIDE_EFFECTS (t) = 1;
6017 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6018 }
6019 }
6020
6021 static tree
6022 alpha_gimplify_va_arg_1 (tree type, tree base, tree offset,
6023 gimple_seq *pre_p)
6024 {
6025 tree type_size, ptr_type, addend, t, addr;
6026 gimple_seq internal_post;
6027
6028 /* If the type could not be passed in registers, skip the block
6029 reserved for the registers. */
6030 if (targetm.calls.must_pass_in_stack (TYPE_MODE (type), type))
6031 {
6032 t = build_int_cst (TREE_TYPE (offset), 6*8);
6033 gimplify_assign (offset,
6034 build2 (MAX_EXPR, TREE_TYPE (offset), offset, t),
6035 pre_p);
6036 }
6037
6038 addend = offset;
6039 ptr_type = build_pointer_type_for_mode (type, ptr_mode, true);
6040
6041 if (TREE_CODE (type) == COMPLEX_TYPE)
6042 {
6043 tree real_part, imag_part, real_temp;
6044
6045 real_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6046 offset, pre_p);
6047
6048 /* Copy the value into a new temporary, lest the formal temporary
6049 be reused out from under us. */
6050 real_temp = get_initialized_tmp_var (real_part, pre_p, NULL);
6051
6052 imag_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6053 offset, pre_p);
6054
6055 return build2 (COMPLEX_EXPR, type, real_temp, imag_part);
6056 }
6057 else if (TREE_CODE (type) == REAL_TYPE)
6058 {
6059 tree fpaddend, cond, fourtyeight;
6060
6061 fourtyeight = build_int_cst (TREE_TYPE (addend), 6*8);
6062 fpaddend = fold_build2 (MINUS_EXPR, TREE_TYPE (addend),
6063 addend, fourtyeight);
6064 cond = fold_build2 (LT_EXPR, boolean_type_node, addend, fourtyeight);
6065 addend = fold_build3 (COND_EXPR, TREE_TYPE (addend), cond,
6066 fpaddend, addend);
6067 }
6068
6069 /* Build the final address and force that value into a temporary. */
6070 addr = build2 (POINTER_PLUS_EXPR, ptr_type, fold_convert (ptr_type, base),
6071 fold_convert (sizetype, addend));
6072 internal_post = NULL;
6073 gimplify_expr (&addr, pre_p, &internal_post, is_gimple_val, fb_rvalue);
6074 gimple_seq_add_seq (pre_p, internal_post);
6075
6076 /* Update the offset field. */
6077 type_size = TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type));
6078 if (type_size == NULL || TREE_OVERFLOW (type_size))
6079 t = size_zero_node;
6080 else
6081 {
6082 t = size_binop (PLUS_EXPR, type_size, size_int (7));
6083 t = size_binop (TRUNC_DIV_EXPR, t, size_int (8));
6084 t = size_binop (MULT_EXPR, t, size_int (8));
6085 }
6086 t = fold_convert (TREE_TYPE (offset), t);
6087 gimplify_assign (offset, build2 (PLUS_EXPR, TREE_TYPE (offset), offset, t),
6088 pre_p);
6089
6090 return build_va_arg_indirect_ref (addr);
6091 }
6092
6093 static tree
6094 alpha_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
6095 gimple_seq *post_p)
6096 {
6097 tree offset_field, base_field, offset, base, t, r;
6098 bool indirect;
6099
6100 if (TARGET_ABI_OPEN_VMS)
6101 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6102
6103 base_field = TYPE_FIELDS (va_list_type_node);
6104 offset_field = DECL_CHAIN (base_field);
6105 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6106 valist, base_field, NULL_TREE);
6107 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6108 valist, offset_field, NULL_TREE);
6109
6110 /* Pull the fields of the structure out into temporaries. Since we never
6111 modify the base field, we can use a formal temporary. Sign-extend the
6112 offset field so that it's the proper width for pointer arithmetic. */
6113 base = get_formal_tmp_var (base_field, pre_p);
6114
6115 t = fold_convert (lang_hooks.types.type_for_size (64, 0), offset_field);
6116 offset = get_initialized_tmp_var (t, pre_p, NULL);
6117
6118 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, false);
6119 if (indirect)
6120 type = build_pointer_type_for_mode (type, ptr_mode, true);
6121
6122 /* Find the value. Note that this will be a stable indirection, or
6123 a composite of stable indirections in the case of complex. */
6124 r = alpha_gimplify_va_arg_1 (type, base, offset, pre_p);
6125
6126 /* Stuff the offset temporary back into its field. */
6127 gimplify_assign (unshare_expr (offset_field),
6128 fold_convert (TREE_TYPE (offset_field), offset), pre_p);
6129
6130 if (indirect)
6131 r = build_va_arg_indirect_ref (r);
6132
6133 return r;
6134 }
6135 \f
6136 /* Builtins. */
6137
6138 enum alpha_builtin
6139 {
6140 ALPHA_BUILTIN_CMPBGE,
6141 ALPHA_BUILTIN_EXTBL,
6142 ALPHA_BUILTIN_EXTWL,
6143 ALPHA_BUILTIN_EXTLL,
6144 ALPHA_BUILTIN_EXTQL,
6145 ALPHA_BUILTIN_EXTWH,
6146 ALPHA_BUILTIN_EXTLH,
6147 ALPHA_BUILTIN_EXTQH,
6148 ALPHA_BUILTIN_INSBL,
6149 ALPHA_BUILTIN_INSWL,
6150 ALPHA_BUILTIN_INSLL,
6151 ALPHA_BUILTIN_INSQL,
6152 ALPHA_BUILTIN_INSWH,
6153 ALPHA_BUILTIN_INSLH,
6154 ALPHA_BUILTIN_INSQH,
6155 ALPHA_BUILTIN_MSKBL,
6156 ALPHA_BUILTIN_MSKWL,
6157 ALPHA_BUILTIN_MSKLL,
6158 ALPHA_BUILTIN_MSKQL,
6159 ALPHA_BUILTIN_MSKWH,
6160 ALPHA_BUILTIN_MSKLH,
6161 ALPHA_BUILTIN_MSKQH,
6162 ALPHA_BUILTIN_UMULH,
6163 ALPHA_BUILTIN_ZAP,
6164 ALPHA_BUILTIN_ZAPNOT,
6165 ALPHA_BUILTIN_AMASK,
6166 ALPHA_BUILTIN_IMPLVER,
6167 ALPHA_BUILTIN_RPCC,
6168 ALPHA_BUILTIN_THREAD_POINTER,
6169 ALPHA_BUILTIN_SET_THREAD_POINTER,
6170 ALPHA_BUILTIN_ESTABLISH_VMS_CONDITION_HANDLER,
6171 ALPHA_BUILTIN_REVERT_VMS_CONDITION_HANDLER,
6172
6173 /* TARGET_MAX */
6174 ALPHA_BUILTIN_MINUB8,
6175 ALPHA_BUILTIN_MINSB8,
6176 ALPHA_BUILTIN_MINUW4,
6177 ALPHA_BUILTIN_MINSW4,
6178 ALPHA_BUILTIN_MAXUB8,
6179 ALPHA_BUILTIN_MAXSB8,
6180 ALPHA_BUILTIN_MAXUW4,
6181 ALPHA_BUILTIN_MAXSW4,
6182 ALPHA_BUILTIN_PERR,
6183 ALPHA_BUILTIN_PKLB,
6184 ALPHA_BUILTIN_PKWB,
6185 ALPHA_BUILTIN_UNPKBL,
6186 ALPHA_BUILTIN_UNPKBW,
6187
6188 /* TARGET_CIX */
6189 ALPHA_BUILTIN_CTTZ,
6190 ALPHA_BUILTIN_CTLZ,
6191 ALPHA_BUILTIN_CTPOP,
6192
6193 ALPHA_BUILTIN_max
6194 };
6195
6196 static enum insn_code const code_for_builtin[ALPHA_BUILTIN_max] = {
6197 CODE_FOR_builtin_cmpbge,
6198 CODE_FOR_extbl,
6199 CODE_FOR_extwl,
6200 CODE_FOR_extll,
6201 CODE_FOR_extql,
6202 CODE_FOR_extwh,
6203 CODE_FOR_extlh,
6204 CODE_FOR_extqh,
6205 CODE_FOR_builtin_insbl,
6206 CODE_FOR_builtin_inswl,
6207 CODE_FOR_builtin_insll,
6208 CODE_FOR_insql,
6209 CODE_FOR_inswh,
6210 CODE_FOR_inslh,
6211 CODE_FOR_insqh,
6212 CODE_FOR_mskbl,
6213 CODE_FOR_mskwl,
6214 CODE_FOR_mskll,
6215 CODE_FOR_mskql,
6216 CODE_FOR_mskwh,
6217 CODE_FOR_msklh,
6218 CODE_FOR_mskqh,
6219 CODE_FOR_umuldi3_highpart,
6220 CODE_FOR_builtin_zap,
6221 CODE_FOR_builtin_zapnot,
6222 CODE_FOR_builtin_amask,
6223 CODE_FOR_builtin_implver,
6224 CODE_FOR_builtin_rpcc,
6225 CODE_FOR_load_tp,
6226 CODE_FOR_set_tp,
6227 CODE_FOR_builtin_establish_vms_condition_handler,
6228 CODE_FOR_builtin_revert_vms_condition_handler,
6229
6230 /* TARGET_MAX */
6231 CODE_FOR_builtin_minub8,
6232 CODE_FOR_builtin_minsb8,
6233 CODE_FOR_builtin_minuw4,
6234 CODE_FOR_builtin_minsw4,
6235 CODE_FOR_builtin_maxub8,
6236 CODE_FOR_builtin_maxsb8,
6237 CODE_FOR_builtin_maxuw4,
6238 CODE_FOR_builtin_maxsw4,
6239 CODE_FOR_builtin_perr,
6240 CODE_FOR_builtin_pklb,
6241 CODE_FOR_builtin_pkwb,
6242 CODE_FOR_builtin_unpkbl,
6243 CODE_FOR_builtin_unpkbw,
6244
6245 /* TARGET_CIX */
6246 CODE_FOR_ctzdi2,
6247 CODE_FOR_clzdi2,
6248 CODE_FOR_popcountdi2
6249 };
6250
6251 struct alpha_builtin_def
6252 {
6253 const char *name;
6254 enum alpha_builtin code;
6255 unsigned int target_mask;
6256 bool is_const;
6257 };
6258
6259 static struct alpha_builtin_def const zero_arg_builtins[] = {
6260 { "__builtin_alpha_implver", ALPHA_BUILTIN_IMPLVER, 0, true },
6261 { "__builtin_alpha_rpcc", ALPHA_BUILTIN_RPCC, 0, false }
6262 };
6263
6264 static struct alpha_builtin_def const one_arg_builtins[] = {
6265 { "__builtin_alpha_amask", ALPHA_BUILTIN_AMASK, 0, true },
6266 { "__builtin_alpha_pklb", ALPHA_BUILTIN_PKLB, MASK_MAX, true },
6267 { "__builtin_alpha_pkwb", ALPHA_BUILTIN_PKWB, MASK_MAX, true },
6268 { "__builtin_alpha_unpkbl", ALPHA_BUILTIN_UNPKBL, MASK_MAX, true },
6269 { "__builtin_alpha_unpkbw", ALPHA_BUILTIN_UNPKBW, MASK_MAX, true },
6270 { "__builtin_alpha_cttz", ALPHA_BUILTIN_CTTZ, MASK_CIX, true },
6271 { "__builtin_alpha_ctlz", ALPHA_BUILTIN_CTLZ, MASK_CIX, true },
6272 { "__builtin_alpha_ctpop", ALPHA_BUILTIN_CTPOP, MASK_CIX, true }
6273 };
6274
6275 static struct alpha_builtin_def const two_arg_builtins[] = {
6276 { "__builtin_alpha_cmpbge", ALPHA_BUILTIN_CMPBGE, 0, true },
6277 { "__builtin_alpha_extbl", ALPHA_BUILTIN_EXTBL, 0, true },
6278 { "__builtin_alpha_extwl", ALPHA_BUILTIN_EXTWL, 0, true },
6279 { "__builtin_alpha_extll", ALPHA_BUILTIN_EXTLL, 0, true },
6280 { "__builtin_alpha_extql", ALPHA_BUILTIN_EXTQL, 0, true },
6281 { "__builtin_alpha_extwh", ALPHA_BUILTIN_EXTWH, 0, true },
6282 { "__builtin_alpha_extlh", ALPHA_BUILTIN_EXTLH, 0, true },
6283 { "__builtin_alpha_extqh", ALPHA_BUILTIN_EXTQH, 0, true },
6284 { "__builtin_alpha_insbl", ALPHA_BUILTIN_INSBL, 0, true },
6285 { "__builtin_alpha_inswl", ALPHA_BUILTIN_INSWL, 0, true },
6286 { "__builtin_alpha_insll", ALPHA_BUILTIN_INSLL, 0, true },
6287 { "__builtin_alpha_insql", ALPHA_BUILTIN_INSQL, 0, true },
6288 { "__builtin_alpha_inswh", ALPHA_BUILTIN_INSWH, 0, true },
6289 { "__builtin_alpha_inslh", ALPHA_BUILTIN_INSLH, 0, true },
6290 { "__builtin_alpha_insqh", ALPHA_BUILTIN_INSQH, 0, true },
6291 { "__builtin_alpha_mskbl", ALPHA_BUILTIN_MSKBL, 0, true },
6292 { "__builtin_alpha_mskwl", ALPHA_BUILTIN_MSKWL, 0, true },
6293 { "__builtin_alpha_mskll", ALPHA_BUILTIN_MSKLL, 0, true },
6294 { "__builtin_alpha_mskql", ALPHA_BUILTIN_MSKQL, 0, true },
6295 { "__builtin_alpha_mskwh", ALPHA_BUILTIN_MSKWH, 0, true },
6296 { "__builtin_alpha_msklh", ALPHA_BUILTIN_MSKLH, 0, true },
6297 { "__builtin_alpha_mskqh", ALPHA_BUILTIN_MSKQH, 0, true },
6298 { "__builtin_alpha_umulh", ALPHA_BUILTIN_UMULH, 0, true },
6299 { "__builtin_alpha_zap", ALPHA_BUILTIN_ZAP, 0, true },
6300 { "__builtin_alpha_zapnot", ALPHA_BUILTIN_ZAPNOT, 0, true },
6301 { "__builtin_alpha_minub8", ALPHA_BUILTIN_MINUB8, MASK_MAX, true },
6302 { "__builtin_alpha_minsb8", ALPHA_BUILTIN_MINSB8, MASK_MAX, true },
6303 { "__builtin_alpha_minuw4", ALPHA_BUILTIN_MINUW4, MASK_MAX, true },
6304 { "__builtin_alpha_minsw4", ALPHA_BUILTIN_MINSW4, MASK_MAX, true },
6305 { "__builtin_alpha_maxub8", ALPHA_BUILTIN_MAXUB8, MASK_MAX, true },
6306 { "__builtin_alpha_maxsb8", ALPHA_BUILTIN_MAXSB8, MASK_MAX, true },
6307 { "__builtin_alpha_maxuw4", ALPHA_BUILTIN_MAXUW4, MASK_MAX, true },
6308 { "__builtin_alpha_maxsw4", ALPHA_BUILTIN_MAXSW4, MASK_MAX, true },
6309 { "__builtin_alpha_perr", ALPHA_BUILTIN_PERR, MASK_MAX, true }
6310 };
6311
6312 static GTY(()) tree alpha_v8qi_u;
6313 static GTY(()) tree alpha_v8qi_s;
6314 static GTY(()) tree alpha_v4hi_u;
6315 static GTY(()) tree alpha_v4hi_s;
6316
6317 static GTY(()) tree alpha_builtins[(int) ALPHA_BUILTIN_max];
6318
6319 /* Return the alpha builtin for CODE. */
6320
6321 static tree
6322 alpha_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
6323 {
6324 if (code >= ALPHA_BUILTIN_max)
6325 return error_mark_node;
6326 return alpha_builtins[code];
6327 }
6328
6329 /* Helper function of alpha_init_builtins. Add the built-in specified
6330 by NAME, TYPE, CODE, and ECF. */
6331
6332 static void
6333 alpha_builtin_function (const char *name, tree ftype,
6334 enum alpha_builtin code, unsigned ecf)
6335 {
6336 tree decl = add_builtin_function (name, ftype, (int) code,
6337 BUILT_IN_MD, NULL, NULL_TREE);
6338
6339 if (ecf & ECF_CONST)
6340 TREE_READONLY (decl) = 1;
6341 if (ecf & ECF_NOTHROW)
6342 TREE_NOTHROW (decl) = 1;
6343
6344 alpha_builtins [(int) code] = decl;
6345 }
6346
6347 /* Helper function of alpha_init_builtins. Add the COUNT built-in
6348 functions pointed to by P, with function type FTYPE. */
6349
6350 static void
6351 alpha_add_builtins (const struct alpha_builtin_def *p, size_t count,
6352 tree ftype)
6353 {
6354 size_t i;
6355
6356 for (i = 0; i < count; ++i, ++p)
6357 if ((target_flags & p->target_mask) == p->target_mask)
6358 alpha_builtin_function (p->name, ftype, p->code,
6359 (p->is_const ? ECF_CONST : 0) | ECF_NOTHROW);
6360 }
6361
6362 static void
6363 alpha_init_builtins (void)
6364 {
6365 tree dimode_integer_type_node;
6366 tree ftype;
6367
6368 dimode_integer_type_node = lang_hooks.types.type_for_mode (DImode, 0);
6369
6370 /* Fwrite on VMS is non-standard. */
6371 #if TARGET_ABI_OPEN_VMS
6372 implicit_built_in_decls[(int) BUILT_IN_FWRITE] = NULL_TREE;
6373 implicit_built_in_decls[(int) BUILT_IN_FWRITE_UNLOCKED] = NULL_TREE;
6374 #endif
6375
6376 ftype = build_function_type_list (dimode_integer_type_node, NULL_TREE);
6377 alpha_add_builtins (zero_arg_builtins, ARRAY_SIZE (zero_arg_builtins),
6378 ftype);
6379
6380 ftype = build_function_type_list (dimode_integer_type_node,
6381 dimode_integer_type_node, NULL_TREE);
6382 alpha_add_builtins (one_arg_builtins, ARRAY_SIZE (one_arg_builtins),
6383 ftype);
6384
6385 ftype = build_function_type_list (dimode_integer_type_node,
6386 dimode_integer_type_node,
6387 dimode_integer_type_node, NULL_TREE);
6388 alpha_add_builtins (two_arg_builtins, ARRAY_SIZE (two_arg_builtins),
6389 ftype);
6390
6391 ftype = build_function_type_list (ptr_type_node, NULL_TREE);
6392 alpha_builtin_function ("__builtin_thread_pointer", ftype,
6393 ALPHA_BUILTIN_THREAD_POINTER, ECF_NOTHROW);
6394
6395 ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
6396 alpha_builtin_function ("__builtin_set_thread_pointer", ftype,
6397 ALPHA_BUILTIN_SET_THREAD_POINTER, ECF_NOTHROW);
6398
6399 if (TARGET_ABI_OPEN_VMS)
6400 {
6401 ftype = build_function_type_list (ptr_type_node, ptr_type_node,
6402 NULL_TREE);
6403 alpha_builtin_function ("__builtin_establish_vms_condition_handler",
6404 ftype,
6405 ALPHA_BUILTIN_ESTABLISH_VMS_CONDITION_HANDLER,
6406 0);
6407
6408 ftype = build_function_type_list (ptr_type_node, void_type_node,
6409 NULL_TREE);
6410 alpha_builtin_function ("__builtin_revert_vms_condition_handler", ftype,
6411 ALPHA_BUILTIN_REVERT_VMS_CONDITION_HANDLER, 0);
6412 }
6413
6414 alpha_v8qi_u = build_vector_type (unsigned_intQI_type_node, 8);
6415 alpha_v8qi_s = build_vector_type (intQI_type_node, 8);
6416 alpha_v4hi_u = build_vector_type (unsigned_intHI_type_node, 4);
6417 alpha_v4hi_s = build_vector_type (intHI_type_node, 4);
6418 }
6419
6420 /* Expand an expression EXP that calls a built-in function,
6421 with result going to TARGET if that's convenient
6422 (and in mode MODE if that's convenient).
6423 SUBTARGET may be used as the target for computing one of EXP's operands.
6424 IGNORE is nonzero if the value is to be ignored. */
6425
6426 static rtx
6427 alpha_expand_builtin (tree exp, rtx target,
6428 rtx subtarget ATTRIBUTE_UNUSED,
6429 enum machine_mode mode ATTRIBUTE_UNUSED,
6430 int ignore ATTRIBUTE_UNUSED)
6431 {
6432 #define MAX_ARGS 2
6433
6434 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
6435 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
6436 tree arg;
6437 call_expr_arg_iterator iter;
6438 enum insn_code icode;
6439 rtx op[MAX_ARGS], pat;
6440 int arity;
6441 bool nonvoid;
6442
6443 if (fcode >= ALPHA_BUILTIN_max)
6444 internal_error ("bad builtin fcode");
6445 icode = code_for_builtin[fcode];
6446 if (icode == 0)
6447 internal_error ("bad builtin fcode");
6448
6449 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
6450
6451 arity = 0;
6452 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
6453 {
6454 const struct insn_operand_data *insn_op;
6455
6456 if (arg == error_mark_node)
6457 return NULL_RTX;
6458 if (arity > MAX_ARGS)
6459 return NULL_RTX;
6460
6461 insn_op = &insn_data[icode].operand[arity + nonvoid];
6462
6463 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
6464
6465 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
6466 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
6467 arity++;
6468 }
6469
6470 if (nonvoid)
6471 {
6472 enum machine_mode tmode = insn_data[icode].operand[0].mode;
6473 if (!target
6474 || GET_MODE (target) != tmode
6475 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
6476 target = gen_reg_rtx (tmode);
6477 }
6478
6479 switch (arity)
6480 {
6481 case 0:
6482 pat = GEN_FCN (icode) (target);
6483 break;
6484 case 1:
6485 if (nonvoid)
6486 pat = GEN_FCN (icode) (target, op[0]);
6487 else
6488 pat = GEN_FCN (icode) (op[0]);
6489 break;
6490 case 2:
6491 pat = GEN_FCN (icode) (target, op[0], op[1]);
6492 break;
6493 default:
6494 gcc_unreachable ();
6495 }
6496 if (!pat)
6497 return NULL_RTX;
6498 emit_insn (pat);
6499
6500 if (nonvoid)
6501 return target;
6502 else
6503 return const0_rtx;
6504 }
6505
6506
6507 /* Several bits below assume HWI >= 64 bits. This should be enforced
6508 by config.gcc. */
6509 #if HOST_BITS_PER_WIDE_INT < 64
6510 # error "HOST_WIDE_INT too small"
6511 #endif
6512
6513 /* Fold the builtin for the CMPBGE instruction. This is a vector comparison
6514 with an 8-bit output vector. OPINT contains the integer operands; bit N
6515 of OP_CONST is set if OPINT[N] is valid. */
6516
6517 static tree
6518 alpha_fold_builtin_cmpbge (unsigned HOST_WIDE_INT opint[], long op_const)
6519 {
6520 if (op_const == 3)
6521 {
6522 int i, val;
6523 for (i = 0, val = 0; i < 8; ++i)
6524 {
6525 unsigned HOST_WIDE_INT c0 = (opint[0] >> (i * 8)) & 0xff;
6526 unsigned HOST_WIDE_INT c1 = (opint[1] >> (i * 8)) & 0xff;
6527 if (c0 >= c1)
6528 val |= 1 << i;
6529 }
6530 return build_int_cst (long_integer_type_node, val);
6531 }
6532 else if (op_const == 2 && opint[1] == 0)
6533 return build_int_cst (long_integer_type_node, 0xff);
6534 return NULL;
6535 }
6536
6537 /* Fold the builtin for the ZAPNOT instruction. This is essentially a
6538 specialized form of an AND operation. Other byte manipulation instructions
6539 are defined in terms of this instruction, so this is also used as a
6540 subroutine for other builtins.
6541
6542 OP contains the tree operands; OPINT contains the extracted integer values.
6543 Bit N of OP_CONST it set if OPINT[N] is valid. OP may be null if only
6544 OPINT may be considered. */
6545
6546 static tree
6547 alpha_fold_builtin_zapnot (tree *op, unsigned HOST_WIDE_INT opint[],
6548 long op_const)
6549 {
6550 if (op_const & 2)
6551 {
6552 unsigned HOST_WIDE_INT mask = 0;
6553 int i;
6554
6555 for (i = 0; i < 8; ++i)
6556 if ((opint[1] >> i) & 1)
6557 mask |= (unsigned HOST_WIDE_INT)0xff << (i * 8);
6558
6559 if (op_const & 1)
6560 return build_int_cst (long_integer_type_node, opint[0] & mask);
6561
6562 if (op)
6563 return fold_build2 (BIT_AND_EXPR, long_integer_type_node, op[0],
6564 build_int_cst (long_integer_type_node, mask));
6565 }
6566 else if ((op_const & 1) && opint[0] == 0)
6567 return build_int_cst (long_integer_type_node, 0);
6568 return NULL;
6569 }
6570
6571 /* Fold the builtins for the EXT family of instructions. */
6572
6573 static tree
6574 alpha_fold_builtin_extxx (tree op[], unsigned HOST_WIDE_INT opint[],
6575 long op_const, unsigned HOST_WIDE_INT bytemask,
6576 bool is_high)
6577 {
6578 long zap_const = 2;
6579 tree *zap_op = NULL;
6580
6581 if (op_const & 2)
6582 {
6583 unsigned HOST_WIDE_INT loc;
6584
6585 loc = opint[1] & 7;
6586 loc *= BITS_PER_UNIT;
6587
6588 if (loc != 0)
6589 {
6590 if (op_const & 1)
6591 {
6592 unsigned HOST_WIDE_INT temp = opint[0];
6593 if (is_high)
6594 temp <<= loc;
6595 else
6596 temp >>= loc;
6597 opint[0] = temp;
6598 zap_const = 3;
6599 }
6600 }
6601 else
6602 zap_op = op;
6603 }
6604
6605 opint[1] = bytemask;
6606 return alpha_fold_builtin_zapnot (zap_op, opint, zap_const);
6607 }
6608
6609 /* Fold the builtins for the INS family of instructions. */
6610
6611 static tree
6612 alpha_fold_builtin_insxx (tree op[], unsigned HOST_WIDE_INT opint[],
6613 long op_const, unsigned HOST_WIDE_INT bytemask,
6614 bool is_high)
6615 {
6616 if ((op_const & 1) && opint[0] == 0)
6617 return build_int_cst (long_integer_type_node, 0);
6618
6619 if (op_const & 2)
6620 {
6621 unsigned HOST_WIDE_INT temp, loc, byteloc;
6622 tree *zap_op = NULL;
6623
6624 loc = opint[1] & 7;
6625 bytemask <<= loc;
6626
6627 temp = opint[0];
6628 if (is_high)
6629 {
6630 byteloc = (64 - (loc * 8)) & 0x3f;
6631 if (byteloc == 0)
6632 zap_op = op;
6633 else
6634 temp >>= byteloc;
6635 bytemask >>= 8;
6636 }
6637 else
6638 {
6639 byteloc = loc * 8;
6640 if (byteloc == 0)
6641 zap_op = op;
6642 else
6643 temp <<= byteloc;
6644 }
6645
6646 opint[0] = temp;
6647 opint[1] = bytemask;
6648 return alpha_fold_builtin_zapnot (zap_op, opint, op_const);
6649 }
6650
6651 return NULL;
6652 }
6653
6654 static tree
6655 alpha_fold_builtin_mskxx (tree op[], unsigned HOST_WIDE_INT opint[],
6656 long op_const, unsigned HOST_WIDE_INT bytemask,
6657 bool is_high)
6658 {
6659 if (op_const & 2)
6660 {
6661 unsigned HOST_WIDE_INT loc;
6662
6663 loc = opint[1] & 7;
6664 bytemask <<= loc;
6665
6666 if (is_high)
6667 bytemask >>= 8;
6668
6669 opint[1] = bytemask ^ 0xff;
6670 }
6671
6672 return alpha_fold_builtin_zapnot (op, opint, op_const);
6673 }
6674
6675 static tree
6676 alpha_fold_builtin_umulh (unsigned HOST_WIDE_INT opint[], long op_const)
6677 {
6678 switch (op_const)
6679 {
6680 case 3:
6681 {
6682 unsigned HOST_WIDE_INT l;
6683 HOST_WIDE_INT h;
6684
6685 mul_double (opint[0], 0, opint[1], 0, &l, &h);
6686
6687 #if HOST_BITS_PER_WIDE_INT > 64
6688 # error fixme
6689 #endif
6690
6691 return build_int_cst (long_integer_type_node, h);
6692 }
6693
6694 case 1:
6695 opint[1] = opint[0];
6696 /* FALLTHRU */
6697 case 2:
6698 /* Note that (X*1) >> 64 == 0. */
6699 if (opint[1] == 0 || opint[1] == 1)
6700 return build_int_cst (long_integer_type_node, 0);
6701 break;
6702 }
6703 return NULL;
6704 }
6705
6706 static tree
6707 alpha_fold_vector_minmax (enum tree_code code, tree op[], tree vtype)
6708 {
6709 tree op0 = fold_convert (vtype, op[0]);
6710 tree op1 = fold_convert (vtype, op[1]);
6711 tree val = fold_build2 (code, vtype, op0, op1);
6712 return fold_build1 (VIEW_CONVERT_EXPR, long_integer_type_node, val);
6713 }
6714
6715 static tree
6716 alpha_fold_builtin_perr (unsigned HOST_WIDE_INT opint[], long op_const)
6717 {
6718 unsigned HOST_WIDE_INT temp = 0;
6719 int i;
6720
6721 if (op_const != 3)
6722 return NULL;
6723
6724 for (i = 0; i < 8; ++i)
6725 {
6726 unsigned HOST_WIDE_INT a = (opint[0] >> (i * 8)) & 0xff;
6727 unsigned HOST_WIDE_INT b = (opint[1] >> (i * 8)) & 0xff;
6728 if (a >= b)
6729 temp += a - b;
6730 else
6731 temp += b - a;
6732 }
6733
6734 return build_int_cst (long_integer_type_node, temp);
6735 }
6736
6737 static tree
6738 alpha_fold_builtin_pklb (unsigned HOST_WIDE_INT opint[], long op_const)
6739 {
6740 unsigned HOST_WIDE_INT temp;
6741
6742 if (op_const == 0)
6743 return NULL;
6744
6745 temp = opint[0] & 0xff;
6746 temp |= (opint[0] >> 24) & 0xff00;
6747
6748 return build_int_cst (long_integer_type_node, temp);
6749 }
6750
6751 static tree
6752 alpha_fold_builtin_pkwb (unsigned HOST_WIDE_INT opint[], long op_const)
6753 {
6754 unsigned HOST_WIDE_INT temp;
6755
6756 if (op_const == 0)
6757 return NULL;
6758
6759 temp = opint[0] & 0xff;
6760 temp |= (opint[0] >> 8) & 0xff00;
6761 temp |= (opint[0] >> 16) & 0xff0000;
6762 temp |= (opint[0] >> 24) & 0xff000000;
6763
6764 return build_int_cst (long_integer_type_node, temp);
6765 }
6766
6767 static tree
6768 alpha_fold_builtin_unpkbl (unsigned HOST_WIDE_INT opint[], long op_const)
6769 {
6770 unsigned HOST_WIDE_INT temp;
6771
6772 if (op_const == 0)
6773 return NULL;
6774
6775 temp = opint[0] & 0xff;
6776 temp |= (opint[0] & 0xff00) << 24;
6777
6778 return build_int_cst (long_integer_type_node, temp);
6779 }
6780
6781 static tree
6782 alpha_fold_builtin_unpkbw (unsigned HOST_WIDE_INT opint[], long op_const)
6783 {
6784 unsigned HOST_WIDE_INT temp;
6785
6786 if (op_const == 0)
6787 return NULL;
6788
6789 temp = opint[0] & 0xff;
6790 temp |= (opint[0] & 0x0000ff00) << 8;
6791 temp |= (opint[0] & 0x00ff0000) << 16;
6792 temp |= (opint[0] & 0xff000000) << 24;
6793
6794 return build_int_cst (long_integer_type_node, temp);
6795 }
6796
6797 static tree
6798 alpha_fold_builtin_cttz (unsigned HOST_WIDE_INT opint[], long op_const)
6799 {
6800 unsigned HOST_WIDE_INT temp;
6801
6802 if (op_const == 0)
6803 return NULL;
6804
6805 if (opint[0] == 0)
6806 temp = 64;
6807 else
6808 temp = exact_log2 (opint[0] & -opint[0]);
6809
6810 return build_int_cst (long_integer_type_node, temp);
6811 }
6812
6813 static tree
6814 alpha_fold_builtin_ctlz (unsigned HOST_WIDE_INT opint[], long op_const)
6815 {
6816 unsigned HOST_WIDE_INT temp;
6817
6818 if (op_const == 0)
6819 return NULL;
6820
6821 if (opint[0] == 0)
6822 temp = 64;
6823 else
6824 temp = 64 - floor_log2 (opint[0]) - 1;
6825
6826 return build_int_cst (long_integer_type_node, temp);
6827 }
6828
6829 static tree
6830 alpha_fold_builtin_ctpop (unsigned HOST_WIDE_INT opint[], long op_const)
6831 {
6832 unsigned HOST_WIDE_INT temp, op;
6833
6834 if (op_const == 0)
6835 return NULL;
6836
6837 op = opint[0];
6838 temp = 0;
6839 while (op)
6840 temp++, op &= op - 1;
6841
6842 return build_int_cst (long_integer_type_node, temp);
6843 }
6844
6845 /* Fold one of our builtin functions. */
6846
6847 static tree
6848 alpha_fold_builtin (tree fndecl, int n_args, tree *op,
6849 bool ignore ATTRIBUTE_UNUSED)
6850 {
6851 unsigned HOST_WIDE_INT opint[MAX_ARGS];
6852 long op_const = 0;
6853 int i;
6854
6855 if (n_args >= MAX_ARGS)
6856 return NULL;
6857
6858 for (i = 0; i < n_args; i++)
6859 {
6860 tree arg = op[i];
6861 if (arg == error_mark_node)
6862 return NULL;
6863
6864 opint[i] = 0;
6865 if (TREE_CODE (arg) == INTEGER_CST)
6866 {
6867 op_const |= 1L << i;
6868 opint[i] = int_cst_value (arg);
6869 }
6870 }
6871
6872 switch (DECL_FUNCTION_CODE (fndecl))
6873 {
6874 case ALPHA_BUILTIN_CMPBGE:
6875 return alpha_fold_builtin_cmpbge (opint, op_const);
6876
6877 case ALPHA_BUILTIN_EXTBL:
6878 return alpha_fold_builtin_extxx (op, opint, op_const, 0x01, false);
6879 case ALPHA_BUILTIN_EXTWL:
6880 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, false);
6881 case ALPHA_BUILTIN_EXTLL:
6882 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, false);
6883 case ALPHA_BUILTIN_EXTQL:
6884 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, false);
6885 case ALPHA_BUILTIN_EXTWH:
6886 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, true);
6887 case ALPHA_BUILTIN_EXTLH:
6888 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, true);
6889 case ALPHA_BUILTIN_EXTQH:
6890 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, true);
6891
6892 case ALPHA_BUILTIN_INSBL:
6893 return alpha_fold_builtin_insxx (op, opint, op_const, 0x01, false);
6894 case ALPHA_BUILTIN_INSWL:
6895 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, false);
6896 case ALPHA_BUILTIN_INSLL:
6897 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, false);
6898 case ALPHA_BUILTIN_INSQL:
6899 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, false);
6900 case ALPHA_BUILTIN_INSWH:
6901 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, true);
6902 case ALPHA_BUILTIN_INSLH:
6903 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, true);
6904 case ALPHA_BUILTIN_INSQH:
6905 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, true);
6906
6907 case ALPHA_BUILTIN_MSKBL:
6908 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x01, false);
6909 case ALPHA_BUILTIN_MSKWL:
6910 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, false);
6911 case ALPHA_BUILTIN_MSKLL:
6912 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, false);
6913 case ALPHA_BUILTIN_MSKQL:
6914 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, false);
6915 case ALPHA_BUILTIN_MSKWH:
6916 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, true);
6917 case ALPHA_BUILTIN_MSKLH:
6918 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, true);
6919 case ALPHA_BUILTIN_MSKQH:
6920 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, true);
6921
6922 case ALPHA_BUILTIN_UMULH:
6923 return alpha_fold_builtin_umulh (opint, op_const);
6924
6925 case ALPHA_BUILTIN_ZAP:
6926 opint[1] ^= 0xff;
6927 /* FALLTHRU */
6928 case ALPHA_BUILTIN_ZAPNOT:
6929 return alpha_fold_builtin_zapnot (op, opint, op_const);
6930
6931 case ALPHA_BUILTIN_MINUB8:
6932 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_u);
6933 case ALPHA_BUILTIN_MINSB8:
6934 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_s);
6935 case ALPHA_BUILTIN_MINUW4:
6936 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_u);
6937 case ALPHA_BUILTIN_MINSW4:
6938 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_s);
6939 case ALPHA_BUILTIN_MAXUB8:
6940 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_u);
6941 case ALPHA_BUILTIN_MAXSB8:
6942 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_s);
6943 case ALPHA_BUILTIN_MAXUW4:
6944 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_u);
6945 case ALPHA_BUILTIN_MAXSW4:
6946 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_s);
6947
6948 case ALPHA_BUILTIN_PERR:
6949 return alpha_fold_builtin_perr (opint, op_const);
6950 case ALPHA_BUILTIN_PKLB:
6951 return alpha_fold_builtin_pklb (opint, op_const);
6952 case ALPHA_BUILTIN_PKWB:
6953 return alpha_fold_builtin_pkwb (opint, op_const);
6954 case ALPHA_BUILTIN_UNPKBL:
6955 return alpha_fold_builtin_unpkbl (opint, op_const);
6956 case ALPHA_BUILTIN_UNPKBW:
6957 return alpha_fold_builtin_unpkbw (opint, op_const);
6958
6959 case ALPHA_BUILTIN_CTTZ:
6960 return alpha_fold_builtin_cttz (opint, op_const);
6961 case ALPHA_BUILTIN_CTLZ:
6962 return alpha_fold_builtin_ctlz (opint, op_const);
6963 case ALPHA_BUILTIN_CTPOP:
6964 return alpha_fold_builtin_ctpop (opint, op_const);
6965
6966 case ALPHA_BUILTIN_AMASK:
6967 case ALPHA_BUILTIN_IMPLVER:
6968 case ALPHA_BUILTIN_RPCC:
6969 case ALPHA_BUILTIN_THREAD_POINTER:
6970 case ALPHA_BUILTIN_SET_THREAD_POINTER:
6971 /* None of these are foldable at compile-time. */
6972 default:
6973 return NULL;
6974 }
6975 }
6976 \f
6977 /* This page contains routines that are used to determine what the function
6978 prologue and epilogue code will do and write them out. */
6979
6980 /* Compute the size of the save area in the stack. */
6981
6982 /* These variables are used for communication between the following functions.
6983 They indicate various things about the current function being compiled
6984 that are used to tell what kind of prologue, epilogue and procedure
6985 descriptor to generate. */
6986
6987 /* Nonzero if we need a stack procedure. */
6988 enum alpha_procedure_types {PT_NULL = 0, PT_REGISTER = 1, PT_STACK = 2};
6989 static enum alpha_procedure_types alpha_procedure_type;
6990
6991 /* Register number (either FP or SP) that is used to unwind the frame. */
6992 static int vms_unwind_regno;
6993
6994 /* Register number used to save FP. We need not have one for RA since
6995 we don't modify it for register procedures. This is only defined
6996 for register frame procedures. */
6997 static int vms_save_fp_regno;
6998
6999 /* Register number used to reference objects off our PV. */
7000 static int vms_base_regno;
7001
7002 /* Compute register masks for saved registers. */
7003
7004 static void
7005 alpha_sa_mask (unsigned long *imaskP, unsigned long *fmaskP)
7006 {
7007 unsigned long imask = 0;
7008 unsigned long fmask = 0;
7009 unsigned int i;
7010
7011 /* When outputting a thunk, we don't have valid register life info,
7012 but assemble_start_function wants to output .frame and .mask
7013 directives. */
7014 if (cfun->is_thunk)
7015 {
7016 *imaskP = 0;
7017 *fmaskP = 0;
7018 return;
7019 }
7020
7021 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7022 imask |= (1UL << HARD_FRAME_POINTER_REGNUM);
7023
7024 /* One for every register we have to save. */
7025 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
7026 if (! fixed_regs[i] && ! call_used_regs[i]
7027 && df_regs_ever_live_p (i) && i != REG_RA)
7028 {
7029 if (i < 32)
7030 imask |= (1UL << i);
7031 else
7032 fmask |= (1UL << (i - 32));
7033 }
7034
7035 /* We need to restore these for the handler. */
7036 if (crtl->calls_eh_return)
7037 {
7038 for (i = 0; ; ++i)
7039 {
7040 unsigned regno = EH_RETURN_DATA_REGNO (i);
7041 if (regno == INVALID_REGNUM)
7042 break;
7043 imask |= 1UL << regno;
7044 }
7045 }
7046
7047 /* If any register spilled, then spill the return address also. */
7048 /* ??? This is required by the Digital stack unwind specification
7049 and isn't needed if we're doing Dwarf2 unwinding. */
7050 if (imask || fmask || alpha_ra_ever_killed ())
7051 imask |= (1UL << REG_RA);
7052
7053 *imaskP = imask;
7054 *fmaskP = fmask;
7055 }
7056
7057 int
7058 alpha_sa_size (void)
7059 {
7060 unsigned long mask[2];
7061 int sa_size = 0;
7062 int i, j;
7063
7064 alpha_sa_mask (&mask[0], &mask[1]);
7065
7066 for (j = 0; j < 2; ++j)
7067 for (i = 0; i < 32; ++i)
7068 if ((mask[j] >> i) & 1)
7069 sa_size++;
7070
7071 if (TARGET_ABI_OPEN_VMS)
7072 {
7073 /* Start with a stack procedure if we make any calls (REG_RA used), or
7074 need a frame pointer, with a register procedure if we otherwise need
7075 at least a slot, and with a null procedure in other cases. */
7076 if ((mask[0] >> REG_RA) & 1 || frame_pointer_needed)
7077 alpha_procedure_type = PT_STACK;
7078 else if (get_frame_size() != 0)
7079 alpha_procedure_type = PT_REGISTER;
7080 else
7081 alpha_procedure_type = PT_NULL;
7082
7083 /* Don't reserve space for saving FP & RA yet. Do that later after we've
7084 made the final decision on stack procedure vs register procedure. */
7085 if (alpha_procedure_type == PT_STACK)
7086 sa_size -= 2;
7087
7088 /* Decide whether to refer to objects off our PV via FP or PV.
7089 If we need FP for something else or if we receive a nonlocal
7090 goto (which expects PV to contain the value), we must use PV.
7091 Otherwise, start by assuming we can use FP. */
7092
7093 vms_base_regno
7094 = (frame_pointer_needed
7095 || cfun->has_nonlocal_label
7096 || alpha_procedure_type == PT_STACK
7097 || crtl->outgoing_args_size)
7098 ? REG_PV : HARD_FRAME_POINTER_REGNUM;
7099
7100 /* If we want to copy PV into FP, we need to find some register
7101 in which to save FP. */
7102
7103 vms_save_fp_regno = -1;
7104 if (vms_base_regno == HARD_FRAME_POINTER_REGNUM)
7105 for (i = 0; i < 32; i++)
7106 if (! fixed_regs[i] && call_used_regs[i] && ! df_regs_ever_live_p (i))
7107 vms_save_fp_regno = i;
7108
7109 /* A VMS condition handler requires a stack procedure in our
7110 implementation. (not required by the calling standard). */
7111 if ((vms_save_fp_regno == -1 && alpha_procedure_type == PT_REGISTER)
7112 || cfun->machine->uses_condition_handler)
7113 vms_base_regno = REG_PV, alpha_procedure_type = PT_STACK;
7114 else if (alpha_procedure_type == PT_NULL)
7115 vms_base_regno = REG_PV;
7116
7117 /* Stack unwinding should be done via FP unless we use it for PV. */
7118 vms_unwind_regno = (vms_base_regno == REG_PV
7119 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM);
7120
7121 /* If this is a stack procedure, allow space for saving FP, RA and
7122 a condition handler slot if needed. */
7123 if (alpha_procedure_type == PT_STACK)
7124 sa_size += 2 + cfun->machine->uses_condition_handler;
7125 }
7126 else
7127 {
7128 /* Our size must be even (multiple of 16 bytes). */
7129 if (sa_size & 1)
7130 sa_size++;
7131 }
7132
7133 return sa_size * 8;
7134 }
7135
7136 /* Define the offset between two registers, one to be eliminated,
7137 and the other its replacement, at the start of a routine. */
7138
7139 HOST_WIDE_INT
7140 alpha_initial_elimination_offset (unsigned int from,
7141 unsigned int to ATTRIBUTE_UNUSED)
7142 {
7143 HOST_WIDE_INT ret;
7144
7145 ret = alpha_sa_size ();
7146 ret += ALPHA_ROUND (crtl->outgoing_args_size);
7147
7148 switch (from)
7149 {
7150 case FRAME_POINTER_REGNUM:
7151 break;
7152
7153 case ARG_POINTER_REGNUM:
7154 ret += (ALPHA_ROUND (get_frame_size ()
7155 + crtl->args.pretend_args_size)
7156 - crtl->args.pretend_args_size);
7157 break;
7158
7159 default:
7160 gcc_unreachable ();
7161 }
7162
7163 return ret;
7164 }
7165
7166 #if TARGET_ABI_OPEN_VMS
7167
7168 /* Worker function for TARGET_CAN_ELIMINATE. */
7169
7170 static bool
7171 alpha_vms_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
7172 {
7173 /* We need the alpha_procedure_type to decide. Evaluate it now. */
7174 alpha_sa_size ();
7175
7176 switch (alpha_procedure_type)
7177 {
7178 case PT_NULL:
7179 /* NULL procedures have no frame of their own and we only
7180 know how to resolve from the current stack pointer. */
7181 return to == STACK_POINTER_REGNUM;
7182
7183 case PT_REGISTER:
7184 case PT_STACK:
7185 /* We always eliminate except to the stack pointer if there is no
7186 usable frame pointer at hand. */
7187 return (to != STACK_POINTER_REGNUM
7188 || vms_unwind_regno != HARD_FRAME_POINTER_REGNUM);
7189 }
7190
7191 gcc_unreachable ();
7192 }
7193
7194 /* FROM is to be eliminated for TO. Return the offset so that TO+offset
7195 designates the same location as FROM. */
7196
7197 HOST_WIDE_INT
7198 alpha_vms_initial_elimination_offset (unsigned int from, unsigned int to)
7199 {
7200 /* The only possible attempts we ever expect are ARG or FRAME_PTR to
7201 HARD_FRAME or STACK_PTR. We need the alpha_procedure_type to decide
7202 on the proper computations and will need the register save area size
7203 in most cases. */
7204
7205 HOST_WIDE_INT sa_size = alpha_sa_size ();
7206
7207 /* PT_NULL procedures have no frame of their own and we only allow
7208 elimination to the stack pointer. This is the argument pointer and we
7209 resolve the soft frame pointer to that as well. */
7210
7211 if (alpha_procedure_type == PT_NULL)
7212 return 0;
7213
7214 /* For a PT_STACK procedure the frame layout looks as follows
7215
7216 -----> decreasing addresses
7217
7218 < size rounded up to 16 | likewise >
7219 --------------#------------------------------+++--------------+++-------#
7220 incoming args # pretended args | "frame" | regs sa | PV | outgoing args #
7221 --------------#---------------------------------------------------------#
7222 ^ ^ ^ ^
7223 ARG_PTR FRAME_PTR HARD_FRAME_PTR STACK_PTR
7224
7225
7226 PT_REGISTER procedures are similar in that they may have a frame of their
7227 own. They have no regs-sa/pv/outgoing-args area.
7228
7229 We first compute offset to HARD_FRAME_PTR, then add what we need to get
7230 to STACK_PTR if need be. */
7231
7232 {
7233 HOST_WIDE_INT offset;
7234 HOST_WIDE_INT pv_save_size = alpha_procedure_type == PT_STACK ? 8 : 0;
7235
7236 switch (from)
7237 {
7238 case FRAME_POINTER_REGNUM:
7239 offset = ALPHA_ROUND (sa_size + pv_save_size);
7240 break;
7241 case ARG_POINTER_REGNUM:
7242 offset = (ALPHA_ROUND (sa_size + pv_save_size
7243 + get_frame_size ()
7244 + crtl->args.pretend_args_size)
7245 - crtl->args.pretend_args_size);
7246 break;
7247 default:
7248 gcc_unreachable ();
7249 }
7250
7251 if (to == STACK_POINTER_REGNUM)
7252 offset += ALPHA_ROUND (crtl->outgoing_args_size);
7253
7254 return offset;
7255 }
7256 }
7257
7258 #define COMMON_OBJECT "common_object"
7259
7260 static tree
7261 common_object_handler (tree *node, tree name ATTRIBUTE_UNUSED,
7262 tree args ATTRIBUTE_UNUSED, int flags ATTRIBUTE_UNUSED,
7263 bool *no_add_attrs ATTRIBUTE_UNUSED)
7264 {
7265 tree decl = *node;
7266 gcc_assert (DECL_P (decl));
7267
7268 DECL_COMMON (decl) = 1;
7269 return NULL_TREE;
7270 }
7271
7272 static const struct attribute_spec vms_attribute_table[] =
7273 {
7274 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
7275 affects_type_identity } */
7276 { COMMON_OBJECT, 0, 1, true, false, false, common_object_handler, false },
7277 { NULL, 0, 0, false, false, false, NULL, false }
7278 };
7279
7280 void
7281 vms_output_aligned_decl_common(FILE *file, tree decl, const char *name,
7282 unsigned HOST_WIDE_INT size,
7283 unsigned int align)
7284 {
7285 tree attr = DECL_ATTRIBUTES (decl);
7286 fprintf (file, "%s", COMMON_ASM_OP);
7287 assemble_name (file, name);
7288 fprintf (file, "," HOST_WIDE_INT_PRINT_UNSIGNED, size);
7289 /* ??? Unlike on OSF/1, the alignment factor is not in log units. */
7290 fprintf (file, ",%u", align / BITS_PER_UNIT);
7291 if (attr)
7292 {
7293 attr = lookup_attribute (COMMON_OBJECT, attr);
7294 if (attr)
7295 fprintf (file, ",%s",
7296 IDENTIFIER_POINTER (TREE_VALUE (TREE_VALUE (attr))));
7297 }
7298 fputc ('\n', file);
7299 }
7300
7301 #undef COMMON_OBJECT
7302
7303 #endif
7304
7305 static int
7306 find_lo_sum_using_gp (rtx *px, void *data ATTRIBUTE_UNUSED)
7307 {
7308 return GET_CODE (*px) == LO_SUM && XEXP (*px, 0) == pic_offset_table_rtx;
7309 }
7310
7311 int
7312 alpha_find_lo_sum_using_gp (rtx insn)
7313 {
7314 return for_each_rtx (&PATTERN (insn), find_lo_sum_using_gp, NULL) > 0;
7315 }
7316
7317 static int
7318 alpha_does_function_need_gp (void)
7319 {
7320 rtx insn;
7321
7322 /* The GP being variable is an OSF abi thing. */
7323 if (! TARGET_ABI_OSF)
7324 return 0;
7325
7326 /* We need the gp to load the address of __mcount. */
7327 if (TARGET_PROFILING_NEEDS_GP && crtl->profile)
7328 return 1;
7329
7330 /* The code emitted by alpha_output_mi_thunk_osf uses the gp. */
7331 if (cfun->is_thunk)
7332 return 1;
7333
7334 /* The nonlocal receiver pattern assumes that the gp is valid for
7335 the nested function. Reasonable because it's almost always set
7336 correctly already. For the cases where that's wrong, make sure
7337 the nested function loads its gp on entry. */
7338 if (crtl->has_nonlocal_goto)
7339 return 1;
7340
7341 /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
7342 Even if we are a static function, we still need to do this in case
7343 our address is taken and passed to something like qsort. */
7344
7345 push_topmost_sequence ();
7346 insn = get_insns ();
7347 pop_topmost_sequence ();
7348
7349 for (; insn; insn = NEXT_INSN (insn))
7350 if (NONDEBUG_INSN_P (insn)
7351 && ! JUMP_TABLE_DATA_P (insn)
7352 && GET_CODE (PATTERN (insn)) != USE
7353 && GET_CODE (PATTERN (insn)) != CLOBBER
7354 && get_attr_usegp (insn))
7355 return 1;
7356
7357 return 0;
7358 }
7359
7360 \f
7361 /* Helper function to set RTX_FRAME_RELATED_P on instructions, including
7362 sequences. */
7363
7364 static rtx
7365 set_frame_related_p (void)
7366 {
7367 rtx seq = get_insns ();
7368 rtx insn;
7369
7370 end_sequence ();
7371
7372 if (!seq)
7373 return NULL_RTX;
7374
7375 if (INSN_P (seq))
7376 {
7377 insn = seq;
7378 while (insn != NULL_RTX)
7379 {
7380 RTX_FRAME_RELATED_P (insn) = 1;
7381 insn = NEXT_INSN (insn);
7382 }
7383 seq = emit_insn (seq);
7384 }
7385 else
7386 {
7387 seq = emit_insn (seq);
7388 RTX_FRAME_RELATED_P (seq) = 1;
7389 }
7390 return seq;
7391 }
7392
7393 #define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
7394
7395 /* Generates a store with the proper unwind info attached. VALUE is
7396 stored at BASE_REG+BASE_OFS. If FRAME_BIAS is nonzero, then BASE_REG
7397 contains SP+FRAME_BIAS, and that is the unwind info that should be
7398 generated. If FRAME_REG != VALUE, then VALUE is being stored on
7399 behalf of FRAME_REG, and FRAME_REG should be present in the unwind. */
7400
7401 static void
7402 emit_frame_store_1 (rtx value, rtx base_reg, HOST_WIDE_INT frame_bias,
7403 HOST_WIDE_INT base_ofs, rtx frame_reg)
7404 {
7405 rtx addr, mem, insn;
7406
7407 addr = plus_constant (base_reg, base_ofs);
7408 mem = gen_frame_mem (DImode, addr);
7409
7410 insn = emit_move_insn (mem, value);
7411 RTX_FRAME_RELATED_P (insn) = 1;
7412
7413 if (frame_bias || value != frame_reg)
7414 {
7415 if (frame_bias)
7416 {
7417 addr = plus_constant (stack_pointer_rtx, frame_bias + base_ofs);
7418 mem = gen_rtx_MEM (DImode, addr);
7419 }
7420
7421 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
7422 gen_rtx_SET (VOIDmode, mem, frame_reg));
7423 }
7424 }
7425
7426 static void
7427 emit_frame_store (unsigned int regno, rtx base_reg,
7428 HOST_WIDE_INT frame_bias, HOST_WIDE_INT base_ofs)
7429 {
7430 rtx reg = gen_rtx_REG (DImode, regno);
7431 emit_frame_store_1 (reg, base_reg, frame_bias, base_ofs, reg);
7432 }
7433
7434 /* Compute the frame size. SIZE is the size of the "naked" frame
7435 and SA_SIZE is the size of the register save area. */
7436
7437 static HOST_WIDE_INT
7438 compute_frame_size (HOST_WIDE_INT size, HOST_WIDE_INT sa_size)
7439 {
7440 if (TARGET_ABI_OPEN_VMS)
7441 return ALPHA_ROUND (sa_size
7442 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7443 + size
7444 + crtl->args.pretend_args_size);
7445 else
7446 return ALPHA_ROUND (crtl->outgoing_args_size)
7447 + sa_size
7448 + ALPHA_ROUND (size
7449 + crtl->args.pretend_args_size);
7450 }
7451
7452 /* Write function prologue. */
7453
7454 /* On vms we have two kinds of functions:
7455
7456 - stack frame (PROC_STACK)
7457 these are 'normal' functions with local vars and which are
7458 calling other functions
7459 - register frame (PROC_REGISTER)
7460 keeps all data in registers, needs no stack
7461
7462 We must pass this to the assembler so it can generate the
7463 proper pdsc (procedure descriptor)
7464 This is done with the '.pdesc' command.
7465
7466 On not-vms, we don't really differentiate between the two, as we can
7467 simply allocate stack without saving registers. */
7468
7469 void
7470 alpha_expand_prologue (void)
7471 {
7472 /* Registers to save. */
7473 unsigned long imask = 0;
7474 unsigned long fmask = 0;
7475 /* Stack space needed for pushing registers clobbered by us. */
7476 HOST_WIDE_INT sa_size, sa_bias;
7477 /* Complete stack size needed. */
7478 HOST_WIDE_INT frame_size;
7479 /* Probed stack size; it additionally includes the size of
7480 the "reserve region" if any. */
7481 HOST_WIDE_INT probed_size;
7482 /* Offset from base reg to register save area. */
7483 HOST_WIDE_INT reg_offset;
7484 rtx sa_reg;
7485 int i;
7486
7487 sa_size = alpha_sa_size ();
7488 frame_size = compute_frame_size (get_frame_size (), sa_size);
7489
7490 if (flag_stack_usage_info)
7491 current_function_static_stack_size = frame_size;
7492
7493 if (TARGET_ABI_OPEN_VMS)
7494 reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
7495 else
7496 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
7497
7498 alpha_sa_mask (&imask, &fmask);
7499
7500 /* Emit an insn to reload GP, if needed. */
7501 if (TARGET_ABI_OSF)
7502 {
7503 alpha_function_needs_gp = alpha_does_function_need_gp ();
7504 if (alpha_function_needs_gp)
7505 emit_insn (gen_prologue_ldgp ());
7506 }
7507
7508 /* TARGET_PROFILING_NEEDS_GP actually implies that we need to insert
7509 the call to mcount ourselves, rather than having the linker do it
7510 magically in response to -pg. Since _mcount has special linkage,
7511 don't represent the call as a call. */
7512 if (TARGET_PROFILING_NEEDS_GP && crtl->profile)
7513 emit_insn (gen_prologue_mcount ());
7514
7515 /* Adjust the stack by the frame size. If the frame size is > 4096
7516 bytes, we need to be sure we probe somewhere in the first and last
7517 4096 bytes (we can probably get away without the latter test) and
7518 every 8192 bytes in between. If the frame size is > 32768, we
7519 do this in a loop. Otherwise, we generate the explicit probe
7520 instructions.
7521
7522 Note that we are only allowed to adjust sp once in the prologue. */
7523
7524 probed_size = frame_size;
7525 if (flag_stack_check)
7526 probed_size += STACK_CHECK_PROTECT;
7527
7528 if (probed_size <= 32768)
7529 {
7530 if (probed_size > 4096)
7531 {
7532 int probed;
7533
7534 for (probed = 4096; probed < probed_size; probed += 8192)
7535 emit_insn (gen_probe_stack (GEN_INT (-probed)));
7536
7537 /* We only have to do this probe if we aren't saving registers or
7538 if we are probing beyond the frame because of -fstack-check. */
7539 if ((sa_size == 0 && probed_size > probed - 4096)
7540 || flag_stack_check)
7541 emit_insn (gen_probe_stack (GEN_INT (-probed_size)));
7542 }
7543
7544 if (frame_size != 0)
7545 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
7546 GEN_INT (-frame_size))));
7547 }
7548 else
7549 {
7550 /* Here we generate code to set R22 to SP + 4096 and set R23 to the
7551 number of 8192 byte blocks to probe. We then probe each block
7552 in the loop and then set SP to the proper location. If the
7553 amount remaining is > 4096, we have to do one more probe if we
7554 are not saving any registers or if we are probing beyond the
7555 frame because of -fstack-check. */
7556
7557 HOST_WIDE_INT blocks = (probed_size + 4096) / 8192;
7558 HOST_WIDE_INT leftover = probed_size + 4096 - blocks * 8192;
7559 rtx ptr = gen_rtx_REG (DImode, 22);
7560 rtx count = gen_rtx_REG (DImode, 23);
7561 rtx seq;
7562
7563 emit_move_insn (count, GEN_INT (blocks));
7564 emit_insn (gen_adddi3 (ptr, stack_pointer_rtx, GEN_INT (4096)));
7565
7566 /* Because of the difficulty in emitting a new basic block this
7567 late in the compilation, generate the loop as a single insn. */
7568 emit_insn (gen_prologue_stack_probe_loop (count, ptr));
7569
7570 if ((leftover > 4096 && sa_size == 0) || flag_stack_check)
7571 {
7572 rtx last = gen_rtx_MEM (DImode, plus_constant (ptr, -leftover));
7573 MEM_VOLATILE_P (last) = 1;
7574 emit_move_insn (last, const0_rtx);
7575 }
7576
7577 if (flag_stack_check)
7578 {
7579 /* If -fstack-check is specified we have to load the entire
7580 constant into a register and subtract from the sp in one go,
7581 because the probed stack size is not equal to the frame size. */
7582 HOST_WIDE_INT lo, hi;
7583 lo = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
7584 hi = frame_size - lo;
7585
7586 emit_move_insn (ptr, GEN_INT (hi));
7587 emit_insn (gen_adddi3 (ptr, ptr, GEN_INT (lo)));
7588 seq = emit_insn (gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx,
7589 ptr));
7590 }
7591 else
7592 {
7593 seq = emit_insn (gen_adddi3 (stack_pointer_rtx, ptr,
7594 GEN_INT (-leftover)));
7595 }
7596
7597 /* This alternative is special, because the DWARF code cannot
7598 possibly intuit through the loop above. So we invent this
7599 note it looks at instead. */
7600 RTX_FRAME_RELATED_P (seq) = 1;
7601 add_reg_note (seq, REG_FRAME_RELATED_EXPR,
7602 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7603 plus_constant (stack_pointer_rtx,
7604 -frame_size)));
7605 }
7606
7607 /* Cope with very large offsets to the register save area. */
7608 sa_bias = 0;
7609 sa_reg = stack_pointer_rtx;
7610 if (reg_offset + sa_size > 0x8000)
7611 {
7612 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
7613 rtx sa_bias_rtx;
7614
7615 if (low + sa_size <= 0x8000)
7616 sa_bias = reg_offset - low, reg_offset = low;
7617 else
7618 sa_bias = reg_offset, reg_offset = 0;
7619
7620 sa_reg = gen_rtx_REG (DImode, 24);
7621 sa_bias_rtx = GEN_INT (sa_bias);
7622
7623 if (add_operand (sa_bias_rtx, DImode))
7624 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_bias_rtx));
7625 else
7626 {
7627 emit_move_insn (sa_reg, sa_bias_rtx);
7628 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_reg));
7629 }
7630 }
7631
7632 /* Save regs in stack order. Beginning with VMS PV. */
7633 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7634 emit_frame_store (REG_PV, stack_pointer_rtx, 0, 0);
7635
7636 /* Save register RA next. */
7637 if (imask & (1UL << REG_RA))
7638 {
7639 emit_frame_store (REG_RA, sa_reg, sa_bias, reg_offset);
7640 imask &= ~(1UL << REG_RA);
7641 reg_offset += 8;
7642 }
7643
7644 /* Now save any other registers required to be saved. */
7645 for (i = 0; i < 31; i++)
7646 if (imask & (1UL << i))
7647 {
7648 emit_frame_store (i, sa_reg, sa_bias, reg_offset);
7649 reg_offset += 8;
7650 }
7651
7652 for (i = 0; i < 31; i++)
7653 if (fmask & (1UL << i))
7654 {
7655 emit_frame_store (i+32, sa_reg, sa_bias, reg_offset);
7656 reg_offset += 8;
7657 }
7658
7659 if (TARGET_ABI_OPEN_VMS)
7660 {
7661 /* Register frame procedures save the fp. */
7662 if (alpha_procedure_type == PT_REGISTER)
7663 {
7664 rtx insn = emit_move_insn (gen_rtx_REG (DImode, vms_save_fp_regno),
7665 hard_frame_pointer_rtx);
7666 add_reg_note (insn, REG_CFA_REGISTER, NULL);
7667 RTX_FRAME_RELATED_P (insn) = 1;
7668 }
7669
7670 if (alpha_procedure_type != PT_NULL && vms_base_regno != REG_PV)
7671 emit_insn (gen_force_movdi (gen_rtx_REG (DImode, vms_base_regno),
7672 gen_rtx_REG (DImode, REG_PV)));
7673
7674 if (alpha_procedure_type != PT_NULL
7675 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
7676 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7677
7678 /* If we have to allocate space for outgoing args, do it now. */
7679 if (crtl->outgoing_args_size != 0)
7680 {
7681 rtx seq
7682 = emit_move_insn (stack_pointer_rtx,
7683 plus_constant
7684 (hard_frame_pointer_rtx,
7685 - (ALPHA_ROUND
7686 (crtl->outgoing_args_size))));
7687
7688 /* Only set FRAME_RELATED_P on the stack adjustment we just emitted
7689 if ! frame_pointer_needed. Setting the bit will change the CFA
7690 computation rule to use sp again, which would be wrong if we had
7691 frame_pointer_needed, as this means sp might move unpredictably
7692 later on.
7693
7694 Also, note that
7695 frame_pointer_needed
7696 => vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
7697 and
7698 crtl->outgoing_args_size != 0
7699 => alpha_procedure_type != PT_NULL,
7700
7701 so when we are not setting the bit here, we are guaranteed to
7702 have emitted an FRP frame pointer update just before. */
7703 RTX_FRAME_RELATED_P (seq) = ! frame_pointer_needed;
7704 }
7705 }
7706 else
7707 {
7708 /* If we need a frame pointer, set it from the stack pointer. */
7709 if (frame_pointer_needed)
7710 {
7711 if (TARGET_CAN_FAULT_IN_PROLOGUE)
7712 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7713 else
7714 /* This must always be the last instruction in the
7715 prologue, thus we emit a special move + clobber. */
7716 FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx,
7717 stack_pointer_rtx, sa_reg)));
7718 }
7719 }
7720
7721 /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
7722 the prologue, for exception handling reasons, we cannot do this for
7723 any insn that might fault. We could prevent this for mems with a
7724 (clobber:BLK (scratch)), but this doesn't work for fp insns. So we
7725 have to prevent all such scheduling with a blockage.
7726
7727 Linux, on the other hand, never bothered to implement OSF/1's
7728 exception handling, and so doesn't care about such things. Anyone
7729 planning to use dwarf2 frame-unwind info can also omit the blockage. */
7730
7731 if (! TARGET_CAN_FAULT_IN_PROLOGUE)
7732 emit_insn (gen_blockage ());
7733 }
7734
7735 /* Count the number of .file directives, so that .loc is up to date. */
7736 int num_source_filenames = 0;
7737
7738 /* Output the textual info surrounding the prologue. */
7739
7740 void
7741 alpha_start_function (FILE *file, const char *fnname,
7742 tree decl ATTRIBUTE_UNUSED)
7743 {
7744 unsigned long imask = 0;
7745 unsigned long fmask = 0;
7746 /* Stack space needed for pushing registers clobbered by us. */
7747 HOST_WIDE_INT sa_size;
7748 /* Complete stack size needed. */
7749 unsigned HOST_WIDE_INT frame_size;
7750 /* The maximum debuggable frame size (512 Kbytes using Tru64 as). */
7751 unsigned HOST_WIDE_INT max_frame_size = TARGET_ABI_OSF && !TARGET_GAS
7752 ? 524288
7753 : 1UL << 31;
7754 /* Offset from base reg to register save area. */
7755 HOST_WIDE_INT reg_offset;
7756 char *entry_label = (char *) alloca (strlen (fnname) + 6);
7757 char *tramp_label = (char *) alloca (strlen (fnname) + 6);
7758 int i;
7759
7760 #if TARGET_ABI_OPEN_VMS
7761 if (vms_debug_main
7762 && strncmp (vms_debug_main, fnname, strlen (vms_debug_main)) == 0)
7763 {
7764 targetm.asm_out.globalize_label (asm_out_file, VMS_DEBUG_MAIN_POINTER);
7765 ASM_OUTPUT_DEF (asm_out_file, VMS_DEBUG_MAIN_POINTER, fnname);
7766 switch_to_section (text_section);
7767 vms_debug_main = NULL;
7768 }
7769 #endif
7770
7771 alpha_fnname = fnname;
7772 sa_size = alpha_sa_size ();
7773 frame_size = compute_frame_size (get_frame_size (), sa_size);
7774
7775 if (TARGET_ABI_OPEN_VMS)
7776 reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
7777 else
7778 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
7779
7780 alpha_sa_mask (&imask, &fmask);
7781
7782 /* Ecoff can handle multiple .file directives, so put out file and lineno.
7783 We have to do that before the .ent directive as we cannot switch
7784 files within procedures with native ecoff because line numbers are
7785 linked to procedure descriptors.
7786 Outputting the lineno helps debugging of one line functions as they
7787 would otherwise get no line number at all. Please note that we would
7788 like to put out last_linenum from final.c, but it is not accessible. */
7789
7790 if (write_symbols == SDB_DEBUG)
7791 {
7792 #ifdef ASM_OUTPUT_SOURCE_FILENAME
7793 ASM_OUTPUT_SOURCE_FILENAME (file,
7794 DECL_SOURCE_FILE (current_function_decl));
7795 #endif
7796 #ifdef SDB_OUTPUT_SOURCE_LINE
7797 if (debug_info_level != DINFO_LEVEL_TERSE)
7798 SDB_OUTPUT_SOURCE_LINE (file,
7799 DECL_SOURCE_LINE (current_function_decl));
7800 #endif
7801 }
7802
7803 /* Issue function start and label. */
7804 if (TARGET_ABI_OPEN_VMS || !flag_inhibit_size_directive)
7805 {
7806 fputs ("\t.ent ", file);
7807 assemble_name (file, fnname);
7808 putc ('\n', file);
7809
7810 /* If the function needs GP, we'll write the "..ng" label there.
7811 Otherwise, do it here. */
7812 if (TARGET_ABI_OSF
7813 && ! alpha_function_needs_gp
7814 && ! cfun->is_thunk)
7815 {
7816 putc ('$', file);
7817 assemble_name (file, fnname);
7818 fputs ("..ng:\n", file);
7819 }
7820 }
7821 /* Nested functions on VMS that are potentially called via trampoline
7822 get a special transfer entry point that loads the called functions
7823 procedure descriptor and static chain. */
7824 if (TARGET_ABI_OPEN_VMS
7825 && !TREE_PUBLIC (decl)
7826 && DECL_CONTEXT (decl)
7827 && !TYPE_P (DECL_CONTEXT (decl)))
7828 {
7829 strcpy (tramp_label, fnname);
7830 strcat (tramp_label, "..tr");
7831 ASM_OUTPUT_LABEL (file, tramp_label);
7832 fprintf (file, "\tldq $1,24($27)\n");
7833 fprintf (file, "\tldq $27,16($27)\n");
7834 }
7835
7836 strcpy (entry_label, fnname);
7837 if (TARGET_ABI_OPEN_VMS)
7838 strcat (entry_label, "..en");
7839
7840 ASM_OUTPUT_LABEL (file, entry_label);
7841 inside_function = TRUE;
7842
7843 if (TARGET_ABI_OPEN_VMS)
7844 fprintf (file, "\t.base $%d\n", vms_base_regno);
7845
7846 if (TARGET_ABI_OSF
7847 && TARGET_IEEE_CONFORMANT
7848 && !flag_inhibit_size_directive)
7849 {
7850 /* Set flags in procedure descriptor to request IEEE-conformant
7851 math-library routines. The value we set it to is PDSC_EXC_IEEE
7852 (/usr/include/pdsc.h). */
7853 fputs ("\t.eflag 48\n", file);
7854 }
7855
7856 /* Set up offsets to alpha virtual arg/local debugging pointer. */
7857 alpha_auto_offset = -frame_size + crtl->args.pretend_args_size;
7858 alpha_arg_offset = -frame_size + 48;
7859
7860 /* Describe our frame. If the frame size is larger than an integer,
7861 print it as zero to avoid an assembler error. We won't be
7862 properly describing such a frame, but that's the best we can do. */
7863 if (TARGET_ABI_OPEN_VMS)
7864 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,"
7865 HOST_WIDE_INT_PRINT_DEC "\n",
7866 vms_unwind_regno,
7867 frame_size >= (1UL << 31) ? 0 : frame_size,
7868 reg_offset);
7869 else if (!flag_inhibit_size_directive)
7870 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,%d\n",
7871 (frame_pointer_needed
7872 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM),
7873 frame_size >= max_frame_size ? 0 : frame_size,
7874 crtl->args.pretend_args_size);
7875
7876 /* Describe which registers were spilled. */
7877 if (TARGET_ABI_OPEN_VMS)
7878 {
7879 if (imask)
7880 /* ??? Does VMS care if mask contains ra? The old code didn't
7881 set it, so I don't here. */
7882 fprintf (file, "\t.mask 0x%lx,0\n", imask & ~(1UL << REG_RA));
7883 if (fmask)
7884 fprintf (file, "\t.fmask 0x%lx,0\n", fmask);
7885 if (alpha_procedure_type == PT_REGISTER)
7886 fprintf (file, "\t.fp_save $%d\n", vms_save_fp_regno);
7887 }
7888 else if (!flag_inhibit_size_directive)
7889 {
7890 if (imask)
7891 {
7892 fprintf (file, "\t.mask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", imask,
7893 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
7894
7895 for (i = 0; i < 32; ++i)
7896 if (imask & (1UL << i))
7897 reg_offset += 8;
7898 }
7899
7900 if (fmask)
7901 fprintf (file, "\t.fmask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", fmask,
7902 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
7903 }
7904
7905 #if TARGET_ABI_OPEN_VMS
7906 /* If a user condition handler has been installed at some point, emit
7907 the procedure descriptor bits to point the Condition Handling Facility
7908 at the indirection wrapper, and state the fp offset at which the user
7909 handler may be found. */
7910 if (cfun->machine->uses_condition_handler)
7911 {
7912 fprintf (file, "\t.handler __gcc_shell_handler\n");
7913 fprintf (file, "\t.handler_data %d\n", VMS_COND_HANDLER_FP_OFFSET);
7914 }
7915
7916 /* Ifdef'ed cause link_section are only available then. */
7917 switch_to_section (readonly_data_section);
7918 fprintf (file, "\t.align 3\n");
7919 assemble_name (file, fnname); fputs ("..na:\n", file);
7920 fputs ("\t.ascii \"", file);
7921 assemble_name (file, fnname);
7922 fputs ("\\0\"\n", file);
7923 alpha_need_linkage (fnname, 1);
7924 switch_to_section (text_section);
7925 #endif
7926 }
7927
7928 /* Emit the .prologue note at the scheduled end of the prologue. */
7929
7930 static void
7931 alpha_output_function_end_prologue (FILE *file)
7932 {
7933 if (TARGET_ABI_OPEN_VMS)
7934 fputs ("\t.prologue\n", file);
7935 else if (!flag_inhibit_size_directive)
7936 fprintf (file, "\t.prologue %d\n",
7937 alpha_function_needs_gp || cfun->is_thunk);
7938 }
7939
7940 /* Write function epilogue. */
7941
7942 void
7943 alpha_expand_epilogue (void)
7944 {
7945 /* Registers to save. */
7946 unsigned long imask = 0;
7947 unsigned long fmask = 0;
7948 /* Stack space needed for pushing registers clobbered by us. */
7949 HOST_WIDE_INT sa_size;
7950 /* Complete stack size needed. */
7951 HOST_WIDE_INT frame_size;
7952 /* Offset from base reg to register save area. */
7953 HOST_WIDE_INT reg_offset;
7954 int fp_is_frame_pointer, fp_offset;
7955 rtx sa_reg, sa_reg_exp = NULL;
7956 rtx sp_adj1, sp_adj2, mem, reg, insn;
7957 rtx eh_ofs;
7958 rtx cfa_restores = NULL_RTX;
7959 int i;
7960
7961 sa_size = alpha_sa_size ();
7962 frame_size = compute_frame_size (get_frame_size (), sa_size);
7963
7964 if (TARGET_ABI_OPEN_VMS)
7965 {
7966 if (alpha_procedure_type == PT_STACK)
7967 reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
7968 else
7969 reg_offset = 0;
7970 }
7971 else
7972 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
7973
7974 alpha_sa_mask (&imask, &fmask);
7975
7976 fp_is_frame_pointer
7977 = (TARGET_ABI_OPEN_VMS
7978 ? alpha_procedure_type == PT_STACK
7979 : frame_pointer_needed);
7980 fp_offset = 0;
7981 sa_reg = stack_pointer_rtx;
7982
7983 if (crtl->calls_eh_return)
7984 eh_ofs = EH_RETURN_STACKADJ_RTX;
7985 else
7986 eh_ofs = NULL_RTX;
7987
7988 if (sa_size)
7989 {
7990 /* If we have a frame pointer, restore SP from it. */
7991 if (TARGET_ABI_OPEN_VMS
7992 ? vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
7993 : frame_pointer_needed)
7994 emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
7995
7996 /* Cope with very large offsets to the register save area. */
7997 if (reg_offset + sa_size > 0x8000)
7998 {
7999 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
8000 HOST_WIDE_INT bias;
8001
8002 if (low + sa_size <= 0x8000)
8003 bias = reg_offset - low, reg_offset = low;
8004 else
8005 bias = reg_offset, reg_offset = 0;
8006
8007 sa_reg = gen_rtx_REG (DImode, 22);
8008 sa_reg_exp = plus_constant (stack_pointer_rtx, bias);
8009
8010 emit_move_insn (sa_reg, sa_reg_exp);
8011 }
8012
8013 /* Restore registers in order, excepting a true frame pointer. */
8014
8015 mem = gen_frame_mem (DImode, plus_constant (sa_reg, reg_offset));
8016 reg = gen_rtx_REG (DImode, REG_RA);
8017 emit_move_insn (reg, mem);
8018 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
8019
8020 reg_offset += 8;
8021 imask &= ~(1UL << REG_RA);
8022
8023 for (i = 0; i < 31; ++i)
8024 if (imask & (1UL << i))
8025 {
8026 if (i == HARD_FRAME_POINTER_REGNUM && fp_is_frame_pointer)
8027 fp_offset = reg_offset;
8028 else
8029 {
8030 mem = gen_frame_mem (DImode,
8031 plus_constant (sa_reg, reg_offset));
8032 reg = gen_rtx_REG (DImode, i);
8033 emit_move_insn (reg, mem);
8034 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
8035 cfa_restores);
8036 }
8037 reg_offset += 8;
8038 }
8039
8040 for (i = 0; i < 31; ++i)
8041 if (fmask & (1UL << i))
8042 {
8043 mem = gen_frame_mem (DFmode, plus_constant (sa_reg, reg_offset));
8044 reg = gen_rtx_REG (DFmode, i+32);
8045 emit_move_insn (reg, mem);
8046 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
8047 reg_offset += 8;
8048 }
8049 }
8050
8051 if (frame_size || eh_ofs)
8052 {
8053 sp_adj1 = stack_pointer_rtx;
8054
8055 if (eh_ofs)
8056 {
8057 sp_adj1 = gen_rtx_REG (DImode, 23);
8058 emit_move_insn (sp_adj1,
8059 gen_rtx_PLUS (Pmode, stack_pointer_rtx, eh_ofs));
8060 }
8061
8062 /* If the stack size is large, begin computation into a temporary
8063 register so as not to interfere with a potential fp restore,
8064 which must be consecutive with an SP restore. */
8065 if (frame_size < 32768 && !cfun->calls_alloca)
8066 sp_adj2 = GEN_INT (frame_size);
8067 else if (frame_size < 0x40007fffL)
8068 {
8069 int low = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
8070
8071 sp_adj2 = plus_constant (sp_adj1, frame_size - low);
8072 if (sa_reg_exp && rtx_equal_p (sa_reg_exp, sp_adj2))
8073 sp_adj1 = sa_reg;
8074 else
8075 {
8076 sp_adj1 = gen_rtx_REG (DImode, 23);
8077 emit_move_insn (sp_adj1, sp_adj2);
8078 }
8079 sp_adj2 = GEN_INT (low);
8080 }
8081 else
8082 {
8083 rtx tmp = gen_rtx_REG (DImode, 23);
8084 sp_adj2 = alpha_emit_set_const (tmp, DImode, frame_size, 3, false);
8085 if (!sp_adj2)
8086 {
8087 /* We can't drop new things to memory this late, afaik,
8088 so build it up by pieces. */
8089 sp_adj2 = alpha_emit_set_long_const (tmp, frame_size,
8090 -(frame_size < 0));
8091 gcc_assert (sp_adj2);
8092 }
8093 }
8094
8095 /* From now on, things must be in order. So emit blockages. */
8096
8097 /* Restore the frame pointer. */
8098 if (fp_is_frame_pointer)
8099 {
8100 emit_insn (gen_blockage ());
8101 mem = gen_frame_mem (DImode, plus_constant (sa_reg, fp_offset));
8102 emit_move_insn (hard_frame_pointer_rtx, mem);
8103 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
8104 hard_frame_pointer_rtx, cfa_restores);
8105 }
8106 else if (TARGET_ABI_OPEN_VMS)
8107 {
8108 emit_insn (gen_blockage ());
8109 emit_move_insn (hard_frame_pointer_rtx,
8110 gen_rtx_REG (DImode, vms_save_fp_regno));
8111 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
8112 hard_frame_pointer_rtx, cfa_restores);
8113 }
8114
8115 /* Restore the stack pointer. */
8116 emit_insn (gen_blockage ());
8117 if (sp_adj2 == const0_rtx)
8118 insn = emit_move_insn (stack_pointer_rtx, sp_adj1);
8119 else
8120 insn = emit_move_insn (stack_pointer_rtx,
8121 gen_rtx_PLUS (DImode, sp_adj1, sp_adj2));
8122 REG_NOTES (insn) = cfa_restores;
8123 add_reg_note (insn, REG_CFA_DEF_CFA, stack_pointer_rtx);
8124 RTX_FRAME_RELATED_P (insn) = 1;
8125 }
8126 else
8127 {
8128 gcc_assert (cfa_restores == NULL);
8129
8130 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_REGISTER)
8131 {
8132 emit_insn (gen_blockage ());
8133 insn = emit_move_insn (hard_frame_pointer_rtx,
8134 gen_rtx_REG (DImode, vms_save_fp_regno));
8135 add_reg_note (insn, REG_CFA_RESTORE, hard_frame_pointer_rtx);
8136 RTX_FRAME_RELATED_P (insn) = 1;
8137 }
8138 }
8139 }
8140 \f
8141 /* Output the rest of the textual info surrounding the epilogue. */
8142
8143 void
8144 alpha_end_function (FILE *file, const char *fnname, tree decl ATTRIBUTE_UNUSED)
8145 {
8146 rtx insn;
8147
8148 /* We output a nop after noreturn calls at the very end of the function to
8149 ensure that the return address always remains in the caller's code range,
8150 as not doing so might confuse unwinding engines. */
8151 insn = get_last_insn ();
8152 if (!INSN_P (insn))
8153 insn = prev_active_insn (insn);
8154 if (insn && CALL_P (insn))
8155 output_asm_insn (get_insn_template (CODE_FOR_nop, NULL), NULL);
8156
8157 #if TARGET_ABI_OPEN_VMS
8158 alpha_write_linkage (file, fnname, decl);
8159 #endif
8160
8161 /* End the function. */
8162 if (TARGET_ABI_OPEN_VMS
8163 || !flag_inhibit_size_directive)
8164 {
8165 fputs ("\t.end ", file);
8166 assemble_name (file, fnname);
8167 putc ('\n', file);
8168 }
8169 inside_function = FALSE;
8170 }
8171
8172 #if TARGET_ABI_OPEN_VMS
8173 void avms_asm_output_external (FILE *file, tree decl ATTRIBUTE_UNUSED, const char *name)
8174 {
8175 #ifdef DO_CRTL_NAMES
8176 DO_CRTL_NAMES;
8177 #endif
8178 }
8179 #endif
8180
8181 #if TARGET_ABI_OSF
8182 /* Emit a tail call to FUNCTION after adjusting THIS by DELTA.
8183
8184 In order to avoid the hordes of differences between generated code
8185 with and without TARGET_EXPLICIT_RELOCS, and to avoid duplicating
8186 lots of code loading up large constants, generate rtl and emit it
8187 instead of going straight to text.
8188
8189 Not sure why this idea hasn't been explored before... */
8190
8191 static void
8192 alpha_output_mi_thunk_osf (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8193 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8194 tree function)
8195 {
8196 HOST_WIDE_INT hi, lo;
8197 rtx this_rtx, insn, funexp;
8198
8199 /* We always require a valid GP. */
8200 emit_insn (gen_prologue_ldgp ());
8201 emit_note (NOTE_INSN_PROLOGUE_END);
8202
8203 /* Find the "this" pointer. If the function returns a structure,
8204 the structure return pointer is in $16. */
8205 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8206 this_rtx = gen_rtx_REG (Pmode, 17);
8207 else
8208 this_rtx = gen_rtx_REG (Pmode, 16);
8209
8210 /* Add DELTA. When possible we use ldah+lda. Otherwise load the
8211 entire constant for the add. */
8212 lo = ((delta & 0xffff) ^ 0x8000) - 0x8000;
8213 hi = (((delta - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8214 if (hi + lo == delta)
8215 {
8216 if (hi)
8217 emit_insn (gen_adddi3 (this_rtx, this_rtx, GEN_INT (hi)));
8218 if (lo)
8219 emit_insn (gen_adddi3 (this_rtx, this_rtx, GEN_INT (lo)));
8220 }
8221 else
8222 {
8223 rtx tmp = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 0),
8224 delta, -(delta < 0));
8225 emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
8226 }
8227
8228 /* Add a delta stored in the vtable at VCALL_OFFSET. */
8229 if (vcall_offset)
8230 {
8231 rtx tmp, tmp2;
8232
8233 tmp = gen_rtx_REG (Pmode, 0);
8234 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
8235
8236 lo = ((vcall_offset & 0xffff) ^ 0x8000) - 0x8000;
8237 hi = (((vcall_offset - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8238 if (hi + lo == vcall_offset)
8239 {
8240 if (hi)
8241 emit_insn (gen_adddi3 (tmp, tmp, GEN_INT (hi)));
8242 }
8243 else
8244 {
8245 tmp2 = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 1),
8246 vcall_offset, -(vcall_offset < 0));
8247 emit_insn (gen_adddi3 (tmp, tmp, tmp2));
8248 lo = 0;
8249 }
8250 if (lo)
8251 tmp2 = gen_rtx_PLUS (Pmode, tmp, GEN_INT (lo));
8252 else
8253 tmp2 = tmp;
8254 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp2));
8255
8256 emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
8257 }
8258
8259 /* Generate a tail call to the target function. */
8260 if (! TREE_USED (function))
8261 {
8262 assemble_external (function);
8263 TREE_USED (function) = 1;
8264 }
8265 funexp = XEXP (DECL_RTL (function), 0);
8266 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8267 insn = emit_call_insn (gen_sibcall (funexp, const0_rtx));
8268 SIBLING_CALL_P (insn) = 1;
8269
8270 /* Run just enough of rest_of_compilation to get the insns emitted.
8271 There's not really enough bulk here to make other passes such as
8272 instruction scheduling worth while. Note that use_thunk calls
8273 assemble_start_function and assemble_end_function. */
8274 insn = get_insns ();
8275 insn_locators_alloc ();
8276 shorten_branches (insn);
8277 final_start_function (insn, file, 1);
8278 final (insn, file, 1);
8279 final_end_function ();
8280 }
8281 #endif /* TARGET_ABI_OSF */
8282 \f
8283 /* Debugging support. */
8284
8285 #include "gstab.h"
8286
8287 /* Count the number of sdb related labels are generated (to find block
8288 start and end boundaries). */
8289
8290 int sdb_label_count = 0;
8291
8292 /* Name of the file containing the current function. */
8293
8294 static const char *current_function_file = "";
8295
8296 /* Offsets to alpha virtual arg/local debugging pointers. */
8297
8298 long alpha_arg_offset;
8299 long alpha_auto_offset;
8300 \f
8301 /* Emit a new filename to a stream. */
8302
8303 void
8304 alpha_output_filename (FILE *stream, const char *name)
8305 {
8306 static int first_time = TRUE;
8307
8308 if (first_time)
8309 {
8310 first_time = FALSE;
8311 ++num_source_filenames;
8312 current_function_file = name;
8313 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8314 output_quoted_string (stream, name);
8315 fprintf (stream, "\n");
8316 if (!TARGET_GAS && write_symbols == DBX_DEBUG)
8317 fprintf (stream, "\t#@stabs\n");
8318 }
8319
8320 else if (write_symbols == DBX_DEBUG)
8321 /* dbxout.c will emit an appropriate .stabs directive. */
8322 return;
8323
8324 else if (name != current_function_file
8325 && strcmp (name, current_function_file) != 0)
8326 {
8327 if (inside_function && ! TARGET_GAS)
8328 fprintf (stream, "\t#.file\t%d ", num_source_filenames);
8329 else
8330 {
8331 ++num_source_filenames;
8332 current_function_file = name;
8333 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8334 }
8335
8336 output_quoted_string (stream, name);
8337 fprintf (stream, "\n");
8338 }
8339 }
8340 \f
8341 /* Structure to show the current status of registers and memory. */
8342
8343 struct shadow_summary
8344 {
8345 struct {
8346 unsigned int i : 31; /* Mask of int regs */
8347 unsigned int fp : 31; /* Mask of fp regs */
8348 unsigned int mem : 1; /* mem == imem | fpmem */
8349 } used, defd;
8350 };
8351
8352 /* Summary the effects of expression X on the machine. Update SUM, a pointer
8353 to the summary structure. SET is nonzero if the insn is setting the
8354 object, otherwise zero. */
8355
8356 static void
8357 summarize_insn (rtx x, struct shadow_summary *sum, int set)
8358 {
8359 const char *format_ptr;
8360 int i, j;
8361
8362 if (x == 0)
8363 return;
8364
8365 switch (GET_CODE (x))
8366 {
8367 /* ??? Note that this case would be incorrect if the Alpha had a
8368 ZERO_EXTRACT in SET_DEST. */
8369 case SET:
8370 summarize_insn (SET_SRC (x), sum, 0);
8371 summarize_insn (SET_DEST (x), sum, 1);
8372 break;
8373
8374 case CLOBBER:
8375 summarize_insn (XEXP (x, 0), sum, 1);
8376 break;
8377
8378 case USE:
8379 summarize_insn (XEXP (x, 0), sum, 0);
8380 break;
8381
8382 case ASM_OPERANDS:
8383 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
8384 summarize_insn (ASM_OPERANDS_INPUT (x, i), sum, 0);
8385 break;
8386
8387 case PARALLEL:
8388 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
8389 summarize_insn (XVECEXP (x, 0, i), sum, 0);
8390 break;
8391
8392 case SUBREG:
8393 summarize_insn (SUBREG_REG (x), sum, 0);
8394 break;
8395
8396 case REG:
8397 {
8398 int regno = REGNO (x);
8399 unsigned long mask = ((unsigned long) 1) << (regno % 32);
8400
8401 if (regno == 31 || regno == 63)
8402 break;
8403
8404 if (set)
8405 {
8406 if (regno < 32)
8407 sum->defd.i |= mask;
8408 else
8409 sum->defd.fp |= mask;
8410 }
8411 else
8412 {
8413 if (regno < 32)
8414 sum->used.i |= mask;
8415 else
8416 sum->used.fp |= mask;
8417 }
8418 }
8419 break;
8420
8421 case MEM:
8422 if (set)
8423 sum->defd.mem = 1;
8424 else
8425 sum->used.mem = 1;
8426
8427 /* Find the regs used in memory address computation: */
8428 summarize_insn (XEXP (x, 0), sum, 0);
8429 break;
8430
8431 case CONST_INT: case CONST_DOUBLE:
8432 case SYMBOL_REF: case LABEL_REF: case CONST:
8433 case SCRATCH: case ASM_INPUT:
8434 break;
8435
8436 /* Handle common unary and binary ops for efficiency. */
8437 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
8438 case MOD: case UDIV: case UMOD: case AND: case IOR:
8439 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
8440 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
8441 case NE: case EQ: case GE: case GT: case LE:
8442 case LT: case GEU: case GTU: case LEU: case LTU:
8443 summarize_insn (XEXP (x, 0), sum, 0);
8444 summarize_insn (XEXP (x, 1), sum, 0);
8445 break;
8446
8447 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
8448 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
8449 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
8450 case SQRT: case FFS:
8451 summarize_insn (XEXP (x, 0), sum, 0);
8452 break;
8453
8454 default:
8455 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
8456 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8457 switch (format_ptr[i])
8458 {
8459 case 'e':
8460 summarize_insn (XEXP (x, i), sum, 0);
8461 break;
8462
8463 case 'E':
8464 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8465 summarize_insn (XVECEXP (x, i, j), sum, 0);
8466 break;
8467
8468 case 'i':
8469 break;
8470
8471 default:
8472 gcc_unreachable ();
8473 }
8474 }
8475 }
8476
8477 /* Ensure a sufficient number of `trapb' insns are in the code when
8478 the user requests code with a trap precision of functions or
8479 instructions.
8480
8481 In naive mode, when the user requests a trap-precision of
8482 "instruction", a trapb is needed after every instruction that may
8483 generate a trap. This ensures that the code is resumption safe but
8484 it is also slow.
8485
8486 When optimizations are turned on, we delay issuing a trapb as long
8487 as possible. In this context, a trap shadow is the sequence of
8488 instructions that starts with a (potentially) trap generating
8489 instruction and extends to the next trapb or call_pal instruction
8490 (but GCC never generates call_pal by itself). We can delay (and
8491 therefore sometimes omit) a trapb subject to the following
8492 conditions:
8493
8494 (a) On entry to the trap shadow, if any Alpha register or memory
8495 location contains a value that is used as an operand value by some
8496 instruction in the trap shadow (live on entry), then no instruction
8497 in the trap shadow may modify the register or memory location.
8498
8499 (b) Within the trap shadow, the computation of the base register
8500 for a memory load or store instruction may not involve using the
8501 result of an instruction that might generate an UNPREDICTABLE
8502 result.
8503
8504 (c) Within the trap shadow, no register may be used more than once
8505 as a destination register. (This is to make life easier for the
8506 trap-handler.)
8507
8508 (d) The trap shadow may not include any branch instructions. */
8509
8510 static void
8511 alpha_handle_trap_shadows (void)
8512 {
8513 struct shadow_summary shadow;
8514 int trap_pending, exception_nesting;
8515 rtx i, n;
8516
8517 trap_pending = 0;
8518 exception_nesting = 0;
8519 shadow.used.i = 0;
8520 shadow.used.fp = 0;
8521 shadow.used.mem = 0;
8522 shadow.defd = shadow.used;
8523
8524 for (i = get_insns (); i ; i = NEXT_INSN (i))
8525 {
8526 if (NOTE_P (i))
8527 {
8528 switch (NOTE_KIND (i))
8529 {
8530 case NOTE_INSN_EH_REGION_BEG:
8531 exception_nesting++;
8532 if (trap_pending)
8533 goto close_shadow;
8534 break;
8535
8536 case NOTE_INSN_EH_REGION_END:
8537 exception_nesting--;
8538 if (trap_pending)
8539 goto close_shadow;
8540 break;
8541
8542 case NOTE_INSN_EPILOGUE_BEG:
8543 if (trap_pending && alpha_tp >= ALPHA_TP_FUNC)
8544 goto close_shadow;
8545 break;
8546 }
8547 }
8548 else if (trap_pending)
8549 {
8550 if (alpha_tp == ALPHA_TP_FUNC)
8551 {
8552 if (JUMP_P (i)
8553 && GET_CODE (PATTERN (i)) == RETURN)
8554 goto close_shadow;
8555 }
8556 else if (alpha_tp == ALPHA_TP_INSN)
8557 {
8558 if (optimize > 0)
8559 {
8560 struct shadow_summary sum;
8561
8562 sum.used.i = 0;
8563 sum.used.fp = 0;
8564 sum.used.mem = 0;
8565 sum.defd = sum.used;
8566
8567 switch (GET_CODE (i))
8568 {
8569 case INSN:
8570 /* Annoyingly, get_attr_trap will die on these. */
8571 if (GET_CODE (PATTERN (i)) == USE
8572 || GET_CODE (PATTERN (i)) == CLOBBER)
8573 break;
8574
8575 summarize_insn (PATTERN (i), &sum, 0);
8576
8577 if ((sum.defd.i & shadow.defd.i)
8578 || (sum.defd.fp & shadow.defd.fp))
8579 {
8580 /* (c) would be violated */
8581 goto close_shadow;
8582 }
8583
8584 /* Combine shadow with summary of current insn: */
8585 shadow.used.i |= sum.used.i;
8586 shadow.used.fp |= sum.used.fp;
8587 shadow.used.mem |= sum.used.mem;
8588 shadow.defd.i |= sum.defd.i;
8589 shadow.defd.fp |= sum.defd.fp;
8590 shadow.defd.mem |= sum.defd.mem;
8591
8592 if ((sum.defd.i & shadow.used.i)
8593 || (sum.defd.fp & shadow.used.fp)
8594 || (sum.defd.mem & shadow.used.mem))
8595 {
8596 /* (a) would be violated (also takes care of (b)) */
8597 gcc_assert (get_attr_trap (i) != TRAP_YES
8598 || (!(sum.defd.i & sum.used.i)
8599 && !(sum.defd.fp & sum.used.fp)));
8600
8601 goto close_shadow;
8602 }
8603 break;
8604
8605 case JUMP_INSN:
8606 case CALL_INSN:
8607 case CODE_LABEL:
8608 goto close_shadow;
8609
8610 default:
8611 gcc_unreachable ();
8612 }
8613 }
8614 else
8615 {
8616 close_shadow:
8617 n = emit_insn_before (gen_trapb (), i);
8618 PUT_MODE (n, TImode);
8619 PUT_MODE (i, TImode);
8620 trap_pending = 0;
8621 shadow.used.i = 0;
8622 shadow.used.fp = 0;
8623 shadow.used.mem = 0;
8624 shadow.defd = shadow.used;
8625 }
8626 }
8627 }
8628
8629 if ((exception_nesting > 0 || alpha_tp >= ALPHA_TP_FUNC)
8630 && NONJUMP_INSN_P (i)
8631 && GET_CODE (PATTERN (i)) != USE
8632 && GET_CODE (PATTERN (i)) != CLOBBER
8633 && get_attr_trap (i) == TRAP_YES)
8634 {
8635 if (optimize && !trap_pending)
8636 summarize_insn (PATTERN (i), &shadow, 0);
8637 trap_pending = 1;
8638 }
8639 }
8640 }
8641 \f
8642 /* Alpha can only issue instruction groups simultaneously if they are
8643 suitably aligned. This is very processor-specific. */
8644 /* There are a number of entries in alphaev4_insn_pipe and alphaev5_insn_pipe
8645 that are marked "fake". These instructions do not exist on that target,
8646 but it is possible to see these insns with deranged combinations of
8647 command-line options, such as "-mtune=ev4 -mmax". Instead of aborting,
8648 choose a result at random. */
8649
8650 enum alphaev4_pipe {
8651 EV4_STOP = 0,
8652 EV4_IB0 = 1,
8653 EV4_IB1 = 2,
8654 EV4_IBX = 4
8655 };
8656
8657 enum alphaev5_pipe {
8658 EV5_STOP = 0,
8659 EV5_NONE = 1,
8660 EV5_E01 = 2,
8661 EV5_E0 = 4,
8662 EV5_E1 = 8,
8663 EV5_FAM = 16,
8664 EV5_FA = 32,
8665 EV5_FM = 64
8666 };
8667
8668 static enum alphaev4_pipe
8669 alphaev4_insn_pipe (rtx insn)
8670 {
8671 if (recog_memoized (insn) < 0)
8672 return EV4_STOP;
8673 if (get_attr_length (insn) != 4)
8674 return EV4_STOP;
8675
8676 switch (get_attr_type (insn))
8677 {
8678 case TYPE_ILD:
8679 case TYPE_LDSYM:
8680 case TYPE_FLD:
8681 case TYPE_LD_L:
8682 return EV4_IBX;
8683
8684 case TYPE_IADD:
8685 case TYPE_ILOG:
8686 case TYPE_ICMOV:
8687 case TYPE_ICMP:
8688 case TYPE_FST:
8689 case TYPE_SHIFT:
8690 case TYPE_IMUL:
8691 case TYPE_FBR:
8692 case TYPE_MVI: /* fake */
8693 return EV4_IB0;
8694
8695 case TYPE_IST:
8696 case TYPE_MISC:
8697 case TYPE_IBR:
8698 case TYPE_JSR:
8699 case TYPE_CALLPAL:
8700 case TYPE_FCPYS:
8701 case TYPE_FCMOV:
8702 case TYPE_FADD:
8703 case TYPE_FDIV:
8704 case TYPE_FMUL:
8705 case TYPE_ST_C:
8706 case TYPE_MB:
8707 case TYPE_FSQRT: /* fake */
8708 case TYPE_FTOI: /* fake */
8709 case TYPE_ITOF: /* fake */
8710 return EV4_IB1;
8711
8712 default:
8713 gcc_unreachable ();
8714 }
8715 }
8716
8717 static enum alphaev5_pipe
8718 alphaev5_insn_pipe (rtx insn)
8719 {
8720 if (recog_memoized (insn) < 0)
8721 return EV5_STOP;
8722 if (get_attr_length (insn) != 4)
8723 return EV5_STOP;
8724
8725 switch (get_attr_type (insn))
8726 {
8727 case TYPE_ILD:
8728 case TYPE_FLD:
8729 case TYPE_LDSYM:
8730 case TYPE_IADD:
8731 case TYPE_ILOG:
8732 case TYPE_ICMOV:
8733 case TYPE_ICMP:
8734 return EV5_E01;
8735
8736 case TYPE_IST:
8737 case TYPE_FST:
8738 case TYPE_SHIFT:
8739 case TYPE_IMUL:
8740 case TYPE_MISC:
8741 case TYPE_MVI:
8742 case TYPE_LD_L:
8743 case TYPE_ST_C:
8744 case TYPE_MB:
8745 case TYPE_FTOI: /* fake */
8746 case TYPE_ITOF: /* fake */
8747 return EV5_E0;
8748
8749 case TYPE_IBR:
8750 case TYPE_JSR:
8751 case TYPE_CALLPAL:
8752 return EV5_E1;
8753
8754 case TYPE_FCPYS:
8755 return EV5_FAM;
8756
8757 case TYPE_FBR:
8758 case TYPE_FCMOV:
8759 case TYPE_FADD:
8760 case TYPE_FDIV:
8761 case TYPE_FSQRT: /* fake */
8762 return EV5_FA;
8763
8764 case TYPE_FMUL:
8765 return EV5_FM;
8766
8767 default:
8768 gcc_unreachable ();
8769 }
8770 }
8771
8772 /* IN_USE is a mask of the slots currently filled within the insn group.
8773 The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
8774 the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
8775
8776 LEN is, of course, the length of the group in bytes. */
8777
8778 static rtx
8779 alphaev4_next_group (rtx insn, int *pin_use, int *plen)
8780 {
8781 int len, in_use;
8782
8783 len = in_use = 0;
8784
8785 if (! INSN_P (insn)
8786 || GET_CODE (PATTERN (insn)) == CLOBBER
8787 || GET_CODE (PATTERN (insn)) == USE)
8788 goto next_and_done;
8789
8790 while (1)
8791 {
8792 enum alphaev4_pipe pipe;
8793
8794 pipe = alphaev4_insn_pipe (insn);
8795 switch (pipe)
8796 {
8797 case EV4_STOP:
8798 /* Force complex instructions to start new groups. */
8799 if (in_use)
8800 goto done;
8801
8802 /* If this is a completely unrecognized insn, it's an asm.
8803 We don't know how long it is, so record length as -1 to
8804 signal a needed realignment. */
8805 if (recog_memoized (insn) < 0)
8806 len = -1;
8807 else
8808 len = get_attr_length (insn);
8809 goto next_and_done;
8810
8811 case EV4_IBX:
8812 if (in_use & EV4_IB0)
8813 {
8814 if (in_use & EV4_IB1)
8815 goto done;
8816 in_use |= EV4_IB1;
8817 }
8818 else
8819 in_use |= EV4_IB0 | EV4_IBX;
8820 break;
8821
8822 case EV4_IB0:
8823 if (in_use & EV4_IB0)
8824 {
8825 if (!(in_use & EV4_IBX) || (in_use & EV4_IB1))
8826 goto done;
8827 in_use |= EV4_IB1;
8828 }
8829 in_use |= EV4_IB0;
8830 break;
8831
8832 case EV4_IB1:
8833 if (in_use & EV4_IB1)
8834 goto done;
8835 in_use |= EV4_IB1;
8836 break;
8837
8838 default:
8839 gcc_unreachable ();
8840 }
8841 len += 4;
8842
8843 /* Haifa doesn't do well scheduling branches. */
8844 if (JUMP_P (insn))
8845 goto next_and_done;
8846
8847 next:
8848 insn = next_nonnote_insn (insn);
8849
8850 if (!insn || ! INSN_P (insn))
8851 goto done;
8852
8853 /* Let Haifa tell us where it thinks insn group boundaries are. */
8854 if (GET_MODE (insn) == TImode)
8855 goto done;
8856
8857 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
8858 goto next;
8859 }
8860
8861 next_and_done:
8862 insn = next_nonnote_insn (insn);
8863
8864 done:
8865 *plen = len;
8866 *pin_use = in_use;
8867 return insn;
8868 }
8869
8870 /* IN_USE is a mask of the slots currently filled within the insn group.
8871 The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
8872 the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
8873
8874 LEN is, of course, the length of the group in bytes. */
8875
8876 static rtx
8877 alphaev5_next_group (rtx insn, int *pin_use, int *plen)
8878 {
8879 int len, in_use;
8880
8881 len = in_use = 0;
8882
8883 if (! INSN_P (insn)
8884 || GET_CODE (PATTERN (insn)) == CLOBBER
8885 || GET_CODE (PATTERN (insn)) == USE)
8886 goto next_and_done;
8887
8888 while (1)
8889 {
8890 enum alphaev5_pipe pipe;
8891
8892 pipe = alphaev5_insn_pipe (insn);
8893 switch (pipe)
8894 {
8895 case EV5_STOP:
8896 /* Force complex instructions to start new groups. */
8897 if (in_use)
8898 goto done;
8899
8900 /* If this is a completely unrecognized insn, it's an asm.
8901 We don't know how long it is, so record length as -1 to
8902 signal a needed realignment. */
8903 if (recog_memoized (insn) < 0)
8904 len = -1;
8905 else
8906 len = get_attr_length (insn);
8907 goto next_and_done;
8908
8909 /* ??? Most of the places below, we would like to assert never
8910 happen, as it would indicate an error either in Haifa, or
8911 in the scheduling description. Unfortunately, Haifa never
8912 schedules the last instruction of the BB, so we don't have
8913 an accurate TI bit to go off. */
8914 case EV5_E01:
8915 if (in_use & EV5_E0)
8916 {
8917 if (in_use & EV5_E1)
8918 goto done;
8919 in_use |= EV5_E1;
8920 }
8921 else
8922 in_use |= EV5_E0 | EV5_E01;
8923 break;
8924
8925 case EV5_E0:
8926 if (in_use & EV5_E0)
8927 {
8928 if (!(in_use & EV5_E01) || (in_use & EV5_E1))
8929 goto done;
8930 in_use |= EV5_E1;
8931 }
8932 in_use |= EV5_E0;
8933 break;
8934
8935 case EV5_E1:
8936 if (in_use & EV5_E1)
8937 goto done;
8938 in_use |= EV5_E1;
8939 break;
8940
8941 case EV5_FAM:
8942 if (in_use & EV5_FA)
8943 {
8944 if (in_use & EV5_FM)
8945 goto done;
8946 in_use |= EV5_FM;
8947 }
8948 else
8949 in_use |= EV5_FA | EV5_FAM;
8950 break;
8951
8952 case EV5_FA:
8953 if (in_use & EV5_FA)
8954 goto done;
8955 in_use |= EV5_FA;
8956 break;
8957
8958 case EV5_FM:
8959 if (in_use & EV5_FM)
8960 goto done;
8961 in_use |= EV5_FM;
8962 break;
8963
8964 case EV5_NONE:
8965 break;
8966
8967 default:
8968 gcc_unreachable ();
8969 }
8970 len += 4;
8971
8972 /* Haifa doesn't do well scheduling branches. */
8973 /* ??? If this is predicted not-taken, slotting continues, except
8974 that no more IBR, FBR, or JSR insns may be slotted. */
8975 if (JUMP_P (insn))
8976 goto next_and_done;
8977
8978 next:
8979 insn = next_nonnote_insn (insn);
8980
8981 if (!insn || ! INSN_P (insn))
8982 goto done;
8983
8984 /* Let Haifa tell us where it thinks insn group boundaries are. */
8985 if (GET_MODE (insn) == TImode)
8986 goto done;
8987
8988 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
8989 goto next;
8990 }
8991
8992 next_and_done:
8993 insn = next_nonnote_insn (insn);
8994
8995 done:
8996 *plen = len;
8997 *pin_use = in_use;
8998 return insn;
8999 }
9000
9001 static rtx
9002 alphaev4_next_nop (int *pin_use)
9003 {
9004 int in_use = *pin_use;
9005 rtx nop;
9006
9007 if (!(in_use & EV4_IB0))
9008 {
9009 in_use |= EV4_IB0;
9010 nop = gen_nop ();
9011 }
9012 else if ((in_use & (EV4_IBX|EV4_IB1)) == EV4_IBX)
9013 {
9014 in_use |= EV4_IB1;
9015 nop = gen_nop ();
9016 }
9017 else if (TARGET_FP && !(in_use & EV4_IB1))
9018 {
9019 in_use |= EV4_IB1;
9020 nop = gen_fnop ();
9021 }
9022 else
9023 nop = gen_unop ();
9024
9025 *pin_use = in_use;
9026 return nop;
9027 }
9028
9029 static rtx
9030 alphaev5_next_nop (int *pin_use)
9031 {
9032 int in_use = *pin_use;
9033 rtx nop;
9034
9035 if (!(in_use & EV5_E1))
9036 {
9037 in_use |= EV5_E1;
9038 nop = gen_nop ();
9039 }
9040 else if (TARGET_FP && !(in_use & EV5_FA))
9041 {
9042 in_use |= EV5_FA;
9043 nop = gen_fnop ();
9044 }
9045 else if (TARGET_FP && !(in_use & EV5_FM))
9046 {
9047 in_use |= EV5_FM;
9048 nop = gen_fnop ();
9049 }
9050 else
9051 nop = gen_unop ();
9052
9053 *pin_use = in_use;
9054 return nop;
9055 }
9056
9057 /* The instruction group alignment main loop. */
9058
9059 static void
9060 alpha_align_insns (unsigned int max_align,
9061 rtx (*next_group) (rtx, int *, int *),
9062 rtx (*next_nop) (int *))
9063 {
9064 /* ALIGN is the known alignment for the insn group. */
9065 unsigned int align;
9066 /* OFS is the offset of the current insn in the insn group. */
9067 int ofs;
9068 int prev_in_use, in_use, len, ldgp;
9069 rtx i, next;
9070
9071 /* Let shorten branches care for assigning alignments to code labels. */
9072 shorten_branches (get_insns ());
9073
9074 if (align_functions < 4)
9075 align = 4;
9076 else if ((unsigned int) align_functions < max_align)
9077 align = align_functions;
9078 else
9079 align = max_align;
9080
9081 ofs = prev_in_use = 0;
9082 i = get_insns ();
9083 if (NOTE_P (i))
9084 i = next_nonnote_insn (i);
9085
9086 ldgp = alpha_function_needs_gp ? 8 : 0;
9087
9088 while (i)
9089 {
9090 next = (*next_group) (i, &in_use, &len);
9091
9092 /* When we see a label, resync alignment etc. */
9093 if (LABEL_P (i))
9094 {
9095 unsigned int new_align = 1 << label_to_alignment (i);
9096
9097 if (new_align >= align)
9098 {
9099 align = new_align < max_align ? new_align : max_align;
9100 ofs = 0;
9101 }
9102
9103 else if (ofs & (new_align-1))
9104 ofs = (ofs | (new_align-1)) + 1;
9105 gcc_assert (!len);
9106 }
9107
9108 /* Handle complex instructions special. */
9109 else if (in_use == 0)
9110 {
9111 /* Asms will have length < 0. This is a signal that we have
9112 lost alignment knowledge. Assume, however, that the asm
9113 will not mis-align instructions. */
9114 if (len < 0)
9115 {
9116 ofs = 0;
9117 align = 4;
9118 len = 0;
9119 }
9120 }
9121
9122 /* If the known alignment is smaller than the recognized insn group,
9123 realign the output. */
9124 else if ((int) align < len)
9125 {
9126 unsigned int new_log_align = len > 8 ? 4 : 3;
9127 rtx prev, where;
9128
9129 where = prev = prev_nonnote_insn (i);
9130 if (!where || !LABEL_P (where))
9131 where = i;
9132
9133 /* Can't realign between a call and its gp reload. */
9134 if (! (TARGET_EXPLICIT_RELOCS
9135 && prev && CALL_P (prev)))
9136 {
9137 emit_insn_before (gen_realign (GEN_INT (new_log_align)), where);
9138 align = 1 << new_log_align;
9139 ofs = 0;
9140 }
9141 }
9142
9143 /* We may not insert padding inside the initial ldgp sequence. */
9144 else if (ldgp > 0)
9145 ldgp -= len;
9146
9147 /* If the group won't fit in the same INT16 as the previous,
9148 we need to add padding to keep the group together. Rather
9149 than simply leaving the insn filling to the assembler, we
9150 can make use of the knowledge of what sorts of instructions
9151 were issued in the previous group to make sure that all of
9152 the added nops are really free. */
9153 else if (ofs + len > (int) align)
9154 {
9155 int nop_count = (align - ofs) / 4;
9156 rtx where;
9157
9158 /* Insert nops before labels, branches, and calls to truly merge
9159 the execution of the nops with the previous instruction group. */
9160 where = prev_nonnote_insn (i);
9161 if (where)
9162 {
9163 if (LABEL_P (where))
9164 {
9165 rtx where2 = prev_nonnote_insn (where);
9166 if (where2 && JUMP_P (where2))
9167 where = where2;
9168 }
9169 else if (NONJUMP_INSN_P (where))
9170 where = i;
9171 }
9172 else
9173 where = i;
9174
9175 do
9176 emit_insn_before ((*next_nop)(&prev_in_use), where);
9177 while (--nop_count);
9178 ofs = 0;
9179 }
9180
9181 ofs = (ofs + len) & (align - 1);
9182 prev_in_use = in_use;
9183 i = next;
9184 }
9185 }
9186
9187 /* Insert an unop between a noreturn function call and GP load. */
9188
9189 static void
9190 alpha_pad_noreturn (void)
9191 {
9192 rtx insn, next;
9193
9194 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9195 {
9196 if (! (CALL_P (insn)
9197 && find_reg_note (insn, REG_NORETURN, NULL_RTX)))
9198 continue;
9199
9200 /* Make sure we do not split a call and its corresponding
9201 CALL_ARG_LOCATION note. */
9202 if (CALL_P (insn))
9203 {
9204 next = NEXT_INSN (insn);
9205 if (next && NOTE_P (next)
9206 && NOTE_KIND (next) == NOTE_INSN_CALL_ARG_LOCATION)
9207 insn = next;
9208 }
9209
9210 next = next_active_insn (insn);
9211
9212 if (next)
9213 {
9214 rtx pat = PATTERN (next);
9215
9216 if (GET_CODE (pat) == SET
9217 && GET_CODE (SET_SRC (pat)) == UNSPEC_VOLATILE
9218 && XINT (SET_SRC (pat), 1) == UNSPECV_LDGP1)
9219 emit_insn_after (gen_unop (), insn);
9220 }
9221 }
9222 }
9223 \f
9224 /* Machine dependent reorg pass. */
9225
9226 static void
9227 alpha_reorg (void)
9228 {
9229 /* Workaround for a linker error that triggers when an
9230 exception handler immediatelly follows a noreturn function.
9231
9232 The instruction stream from an object file:
9233
9234 54: 00 40 5b 6b jsr ra,(t12),58 <__func+0x58>
9235 58: 00 00 ba 27 ldah gp,0(ra)
9236 5c: 00 00 bd 23 lda gp,0(gp)
9237 60: 00 00 7d a7 ldq t12,0(gp)
9238 64: 00 40 5b 6b jsr ra,(t12),68 <__func+0x68>
9239
9240 was converted in the final link pass to:
9241
9242 fdb24: a0 03 40 d3 bsr ra,fe9a8 <_called_func+0x8>
9243 fdb28: 00 00 fe 2f unop
9244 fdb2c: 00 00 fe 2f unop
9245 fdb30: 30 82 7d a7 ldq t12,-32208(gp)
9246 fdb34: 00 40 5b 6b jsr ra,(t12),fdb38 <__func+0x68>
9247
9248 GP load instructions were wrongly cleared by the linker relaxation
9249 pass. This workaround prevents removal of GP loads by inserting
9250 an unop instruction between a noreturn function call and
9251 exception handler prologue. */
9252
9253 if (current_function_has_exception_handlers ())
9254 alpha_pad_noreturn ();
9255
9256 if (alpha_tp != ALPHA_TP_PROG || flag_exceptions)
9257 alpha_handle_trap_shadows ();
9258
9259 /* Due to the number of extra trapb insns, don't bother fixing up
9260 alignment when trap precision is instruction. Moreover, we can
9261 only do our job when sched2 is run. */
9262 if (optimize && !optimize_size
9263 && alpha_tp != ALPHA_TP_INSN
9264 && flag_schedule_insns_after_reload)
9265 {
9266 if (alpha_tune == PROCESSOR_EV4)
9267 alpha_align_insns (8, alphaev4_next_group, alphaev4_next_nop);
9268 else if (alpha_tune == PROCESSOR_EV5)
9269 alpha_align_insns (16, alphaev5_next_group, alphaev5_next_nop);
9270 }
9271 }
9272 \f
9273 #ifdef HAVE_STAMP_H
9274 #include <stamp.h>
9275 #endif
9276
9277 static void
9278 alpha_file_start (void)
9279 {
9280 #ifdef OBJECT_FORMAT_ELF
9281 /* If emitting dwarf2 debug information, we cannot generate a .file
9282 directive to start the file, as it will conflict with dwarf2out
9283 file numbers. So it's only useful when emitting mdebug output. */
9284 targetm.asm_file_start_file_directive = (write_symbols == DBX_DEBUG);
9285 #endif
9286
9287 default_file_start ();
9288 #ifdef MS_STAMP
9289 fprintf (asm_out_file, "\t.verstamp %d %d\n", MS_STAMP, LS_STAMP);
9290 #endif
9291
9292 fputs ("\t.set noreorder\n", asm_out_file);
9293 fputs ("\t.set volatile\n", asm_out_file);
9294 if (TARGET_ABI_OSF)
9295 fputs ("\t.set noat\n", asm_out_file);
9296 if (TARGET_EXPLICIT_RELOCS)
9297 fputs ("\t.set nomacro\n", asm_out_file);
9298 if (TARGET_SUPPORT_ARCH | TARGET_BWX | TARGET_MAX | TARGET_FIX | TARGET_CIX)
9299 {
9300 const char *arch;
9301
9302 if (alpha_cpu == PROCESSOR_EV6 || TARGET_FIX || TARGET_CIX)
9303 arch = "ev6";
9304 else if (TARGET_MAX)
9305 arch = "pca56";
9306 else if (TARGET_BWX)
9307 arch = "ev56";
9308 else if (alpha_cpu == PROCESSOR_EV5)
9309 arch = "ev5";
9310 else
9311 arch = "ev4";
9312
9313 fprintf (asm_out_file, "\t.arch %s\n", arch);
9314 }
9315 }
9316
9317 #ifdef OBJECT_FORMAT_ELF
9318 /* Since we don't have a .dynbss section, we should not allow global
9319 relocations in the .rodata section. */
9320
9321 static int
9322 alpha_elf_reloc_rw_mask (void)
9323 {
9324 return flag_pic ? 3 : 2;
9325 }
9326
9327 /* Return a section for X. The only special thing we do here is to
9328 honor small data. */
9329
9330 static section *
9331 alpha_elf_select_rtx_section (enum machine_mode mode, rtx x,
9332 unsigned HOST_WIDE_INT align)
9333 {
9334 if (TARGET_SMALL_DATA && GET_MODE_SIZE (mode) <= g_switch_value)
9335 /* ??? Consider using mergeable sdata sections. */
9336 return sdata_section;
9337 else
9338 return default_elf_select_rtx_section (mode, x, align);
9339 }
9340
9341 static unsigned int
9342 alpha_elf_section_type_flags (tree decl, const char *name, int reloc)
9343 {
9344 unsigned int flags = 0;
9345
9346 if (strcmp (name, ".sdata") == 0
9347 || strncmp (name, ".sdata.", 7) == 0
9348 || strncmp (name, ".gnu.linkonce.s.", 16) == 0
9349 || strcmp (name, ".sbss") == 0
9350 || strncmp (name, ".sbss.", 6) == 0
9351 || strncmp (name, ".gnu.linkonce.sb.", 17) == 0)
9352 flags = SECTION_SMALL;
9353
9354 flags |= default_section_type_flags (decl, name, reloc);
9355 return flags;
9356 }
9357 #endif /* OBJECT_FORMAT_ELF */
9358 \f
9359 /* Structure to collect function names for final output in link section. */
9360 /* Note that items marked with GTY can't be ifdef'ed out. */
9361
9362 enum links_kind {KIND_UNUSED, KIND_LOCAL, KIND_EXTERN};
9363 enum reloc_kind {KIND_LINKAGE, KIND_CODEADDR};
9364
9365 struct GTY(()) alpha_links
9366 {
9367 int num;
9368 const char *target;
9369 rtx linkage;
9370 enum links_kind lkind;
9371 enum reloc_kind rkind;
9372 };
9373
9374 struct GTY(()) alpha_funcs
9375 {
9376 int num;
9377 splay_tree GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9378 links;
9379 };
9380
9381 static GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9382 splay_tree alpha_links_tree;
9383 static GTY ((param1_is (tree), param2_is (struct alpha_funcs *)))
9384 splay_tree alpha_funcs_tree;
9385
9386 static GTY(()) int alpha_funcs_num;
9387
9388 #if TARGET_ABI_OPEN_VMS
9389
9390 /* Return the VMS argument type corresponding to MODE. */
9391
9392 enum avms_arg_type
9393 alpha_arg_type (enum machine_mode mode)
9394 {
9395 switch (mode)
9396 {
9397 case SFmode:
9398 return TARGET_FLOAT_VAX ? FF : FS;
9399 case DFmode:
9400 return TARGET_FLOAT_VAX ? FD : FT;
9401 default:
9402 return I64;
9403 }
9404 }
9405
9406 /* Return an rtx for an integer representing the VMS Argument Information
9407 register value. */
9408
9409 rtx
9410 alpha_arg_info_reg_val (CUMULATIVE_ARGS cum)
9411 {
9412 unsigned HOST_WIDE_INT regval = cum.num_args;
9413 int i;
9414
9415 for (i = 0; i < 6; i++)
9416 regval |= ((int) cum.atypes[i]) << (i * 3 + 8);
9417
9418 return GEN_INT (regval);
9419 }
9420 \f
9421 /* Register the need for a (fake) .linkage entry for calls to function NAME.
9422 IS_LOCAL is 1 if this is for a definition, 0 if this is for a real call.
9423 Return a SYMBOL_REF suited to the call instruction. */
9424
9425 rtx
9426 alpha_need_linkage (const char *name, int is_local)
9427 {
9428 splay_tree_node node;
9429 struct alpha_links *al;
9430 const char *target;
9431 tree id;
9432
9433 if (name[0] == '*')
9434 name++;
9435
9436 if (is_local)
9437 {
9438 struct alpha_funcs *cfaf;
9439
9440 if (!alpha_funcs_tree)
9441 alpha_funcs_tree = splay_tree_new_ggc
9442 (splay_tree_compare_pointers,
9443 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_s,
9444 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_node_s);
9445
9446
9447 cfaf = ggc_alloc_alpha_funcs ();
9448
9449 cfaf->links = 0;
9450 cfaf->num = ++alpha_funcs_num;
9451
9452 splay_tree_insert (alpha_funcs_tree,
9453 (splay_tree_key) current_function_decl,
9454 (splay_tree_value) cfaf);
9455 }
9456
9457 if (alpha_links_tree)
9458 {
9459 /* Is this name already defined? */
9460
9461 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
9462 if (node)
9463 {
9464 al = (struct alpha_links *) node->value;
9465 if (is_local)
9466 {
9467 /* Defined here but external assumed. */
9468 if (al->lkind == KIND_EXTERN)
9469 al->lkind = KIND_LOCAL;
9470 }
9471 else
9472 {
9473 /* Used here but unused assumed. */
9474 if (al->lkind == KIND_UNUSED)
9475 al->lkind = KIND_LOCAL;
9476 }
9477 return al->linkage;
9478 }
9479 }
9480 else
9481 alpha_links_tree = splay_tree_new_ggc
9482 ((splay_tree_compare_fn) strcmp,
9483 ggc_alloc_splay_tree_str_alpha_links_splay_tree_s,
9484 ggc_alloc_splay_tree_str_alpha_links_splay_tree_node_s);
9485
9486 al = ggc_alloc_alpha_links ();
9487 name = ggc_strdup (name);
9488
9489 /* Assume external if no definition. */
9490 al->lkind = (is_local ? KIND_UNUSED : KIND_EXTERN);
9491
9492 /* Ensure we have an IDENTIFIER so assemble_name can mark it used
9493 and find the ultimate alias target like assemble_name. */
9494 id = get_identifier (name);
9495 target = NULL;
9496 while (IDENTIFIER_TRANSPARENT_ALIAS (id))
9497 {
9498 id = TREE_CHAIN (id);
9499 target = IDENTIFIER_POINTER (id);
9500 }
9501
9502 al->target = target ? target : name;
9503 al->linkage = gen_rtx_SYMBOL_REF (Pmode, name);
9504
9505 splay_tree_insert (alpha_links_tree, (splay_tree_key) name,
9506 (splay_tree_value) al);
9507
9508 return al->linkage;
9509 }
9510
9511 /* Return a SYMBOL_REF representing the reference to the .linkage entry
9512 of function FUNC built for calls made from CFUNDECL. LFLAG is 1 if
9513 this is the reference to the linkage pointer value, 0 if this is the
9514 reference to the function entry value. RFLAG is 1 if this a reduced
9515 reference (code address only), 0 if this is a full reference. */
9516
9517 rtx
9518 alpha_use_linkage (rtx func, tree cfundecl, int lflag, int rflag)
9519 {
9520 splay_tree_node cfunnode;
9521 struct alpha_funcs *cfaf;
9522 struct alpha_links *al;
9523 const char *name = XSTR (func, 0);
9524
9525 cfaf = (struct alpha_funcs *) 0;
9526 al = (struct alpha_links *) 0;
9527
9528 cfunnode = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) cfundecl);
9529 cfaf = (struct alpha_funcs *) cfunnode->value;
9530
9531 if (cfaf->links)
9532 {
9533 splay_tree_node lnode;
9534
9535 /* Is this name already defined? */
9536
9537 lnode = splay_tree_lookup (cfaf->links, (splay_tree_key) name);
9538 if (lnode)
9539 al = (struct alpha_links *) lnode->value;
9540 }
9541 else
9542 cfaf->links = splay_tree_new_ggc
9543 ((splay_tree_compare_fn) strcmp,
9544 ggc_alloc_splay_tree_str_alpha_links_splay_tree_s,
9545 ggc_alloc_splay_tree_str_alpha_links_splay_tree_node_s);
9546
9547 if (!al)
9548 {
9549 size_t name_len;
9550 size_t buflen;
9551 char *linksym;
9552 splay_tree_node node = 0;
9553 struct alpha_links *anl;
9554
9555 if (name[0] == '*')
9556 name++;
9557
9558 name_len = strlen (name);
9559 linksym = (char *) alloca (name_len + 50);
9560
9561 al = ggc_alloc_alpha_links ();
9562 al->num = cfaf->num;
9563 al->target = NULL;
9564
9565 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
9566 if (node)
9567 {
9568 anl = (struct alpha_links *) node->value;
9569 al->lkind = anl->lkind;
9570 name = anl->target;
9571 }
9572
9573 sprintf (linksym, "$%d..%s..lk", cfaf->num, name);
9574 buflen = strlen (linksym);
9575
9576 al->linkage = gen_rtx_SYMBOL_REF
9577 (Pmode, ggc_alloc_string (linksym, buflen + 1));
9578
9579 splay_tree_insert (cfaf->links, (splay_tree_key) name,
9580 (splay_tree_value) al);
9581 }
9582
9583 if (rflag)
9584 al->rkind = KIND_CODEADDR;
9585 else
9586 al->rkind = KIND_LINKAGE;
9587
9588 if (lflag)
9589 return gen_rtx_MEM (Pmode, plus_constant (al->linkage, 8));
9590 else
9591 return al->linkage;
9592 }
9593
9594 static int
9595 alpha_write_one_linkage (splay_tree_node node, void *data)
9596 {
9597 const char *const name = (const char *) node->key;
9598 struct alpha_links *link = (struct alpha_links *) node->value;
9599 FILE *stream = (FILE *) data;
9600
9601 fprintf (stream, "$%d..%s..lk:\n", link->num, name);
9602 if (link->rkind == KIND_CODEADDR)
9603 {
9604 if (link->lkind == KIND_LOCAL)
9605 {
9606 /* Local and used */
9607 fprintf (stream, "\t.quad %s..en\n", name);
9608 }
9609 else
9610 {
9611 /* External and used, request code address. */
9612 fprintf (stream, "\t.code_address %s\n", name);
9613 }
9614 }
9615 else
9616 {
9617 if (link->lkind == KIND_LOCAL)
9618 {
9619 /* Local and used, build linkage pair. */
9620 fprintf (stream, "\t.quad %s..en\n", name);
9621 fprintf (stream, "\t.quad %s\n", name);
9622 }
9623 else
9624 {
9625 /* External and used, request linkage pair. */
9626 fprintf (stream, "\t.linkage %s\n", name);
9627 }
9628 }
9629
9630 return 0;
9631 }
9632
9633 static void
9634 alpha_write_linkage (FILE *stream, const char *funname, tree fundecl)
9635 {
9636 splay_tree_node node;
9637 struct alpha_funcs *func;
9638
9639 fprintf (stream, "\t.link\n");
9640 fprintf (stream, "\t.align 3\n");
9641 in_section = NULL;
9642
9643 node = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) fundecl);
9644 func = (struct alpha_funcs *) node->value;
9645
9646 fputs ("\t.name ", stream);
9647 assemble_name (stream, funname);
9648 fputs ("..na\n", stream);
9649 ASM_OUTPUT_LABEL (stream, funname);
9650 fprintf (stream, "\t.pdesc ");
9651 assemble_name (stream, funname);
9652 fprintf (stream, "..en,%s\n",
9653 alpha_procedure_type == PT_STACK ? "stack"
9654 : alpha_procedure_type == PT_REGISTER ? "reg" : "null");
9655
9656 if (func->links)
9657 {
9658 splay_tree_foreach (func->links, alpha_write_one_linkage, stream);
9659 /* splay_tree_delete (func->links); */
9660 }
9661 }
9662
9663 /* Switch to an arbitrary section NAME with attributes as specified
9664 by FLAGS. ALIGN specifies any known alignment requirements for
9665 the section; 0 if the default should be used. */
9666
9667 static void
9668 vms_asm_named_section (const char *name, unsigned int flags,
9669 tree decl ATTRIBUTE_UNUSED)
9670 {
9671 fputc ('\n', asm_out_file);
9672 fprintf (asm_out_file, ".section\t%s", name);
9673
9674 if (flags & SECTION_DEBUG)
9675 fprintf (asm_out_file, ",NOWRT");
9676
9677 fputc ('\n', asm_out_file);
9678 }
9679
9680 /* Record an element in the table of global constructors. SYMBOL is
9681 a SYMBOL_REF of the function to be called; PRIORITY is a number
9682 between 0 and MAX_INIT_PRIORITY.
9683
9684 Differs from default_ctors_section_asm_out_constructor in that the
9685 width of the .ctors entry is always 64 bits, rather than the 32 bits
9686 used by a normal pointer. */
9687
9688 static void
9689 vms_asm_out_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9690 {
9691 switch_to_section (ctors_section);
9692 assemble_align (BITS_PER_WORD);
9693 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9694 }
9695
9696 static void
9697 vms_asm_out_destructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9698 {
9699 switch_to_section (dtors_section);
9700 assemble_align (BITS_PER_WORD);
9701 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9702 }
9703 #else
9704
9705 rtx
9706 alpha_need_linkage (const char *name ATTRIBUTE_UNUSED,
9707 int is_local ATTRIBUTE_UNUSED)
9708 {
9709 return NULL_RTX;
9710 }
9711
9712 rtx
9713 alpha_use_linkage (rtx func ATTRIBUTE_UNUSED,
9714 tree cfundecl ATTRIBUTE_UNUSED,
9715 int lflag ATTRIBUTE_UNUSED,
9716 int rflag ATTRIBUTE_UNUSED)
9717 {
9718 return NULL_RTX;
9719 }
9720
9721 #endif /* TARGET_ABI_OPEN_VMS */
9722 \f
9723 static void
9724 alpha_init_libfuncs (void)
9725 {
9726 if (TARGET_ABI_OPEN_VMS)
9727 {
9728 /* Use the VMS runtime library functions for division and
9729 remainder. */
9730 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
9731 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
9732 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
9733 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
9734 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
9735 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
9736 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
9737 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
9738 abort_libfunc = init_one_libfunc ("decc$abort");
9739 memcmp_libfunc = init_one_libfunc ("decc$memcmp");
9740 #ifdef MEM_LIBFUNCS_INIT
9741 MEM_LIBFUNCS_INIT;
9742 #endif
9743 }
9744 }
9745
9746 /* On the Alpha, we use this to disable the floating-point registers
9747 when they don't exist. */
9748
9749 static void
9750 alpha_conditional_register_usage (void)
9751 {
9752 int i;
9753 if (! TARGET_FPREGS)
9754 for (i = 32; i < 63; i++)
9755 fixed_regs[i] = call_used_regs[i] = 1;
9756 }
9757 \f
9758 /* Initialize the GCC target structure. */
9759 #if TARGET_ABI_OPEN_VMS
9760 # undef TARGET_ATTRIBUTE_TABLE
9761 # define TARGET_ATTRIBUTE_TABLE vms_attribute_table
9762 # undef TARGET_CAN_ELIMINATE
9763 # define TARGET_CAN_ELIMINATE alpha_vms_can_eliminate
9764 #endif
9765
9766 #undef TARGET_IN_SMALL_DATA_P
9767 #define TARGET_IN_SMALL_DATA_P alpha_in_small_data_p
9768
9769 #undef TARGET_ASM_ALIGNED_HI_OP
9770 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
9771 #undef TARGET_ASM_ALIGNED_DI_OP
9772 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
9773
9774 /* Default unaligned ops are provided for ELF systems. To get unaligned
9775 data for non-ELF systems, we have to turn off auto alignment. */
9776 #if !defined (OBJECT_FORMAT_ELF) || TARGET_ABI_OPEN_VMS
9777 #undef TARGET_ASM_UNALIGNED_HI_OP
9778 #define TARGET_ASM_UNALIGNED_HI_OP "\t.align 0\n\t.word\t"
9779 #undef TARGET_ASM_UNALIGNED_SI_OP
9780 #define TARGET_ASM_UNALIGNED_SI_OP "\t.align 0\n\t.long\t"
9781 #undef TARGET_ASM_UNALIGNED_DI_OP
9782 #define TARGET_ASM_UNALIGNED_DI_OP "\t.align 0\n\t.quad\t"
9783 #endif
9784
9785 #ifdef OBJECT_FORMAT_ELF
9786 #undef TARGET_ASM_RELOC_RW_MASK
9787 #define TARGET_ASM_RELOC_RW_MASK alpha_elf_reloc_rw_mask
9788 #undef TARGET_ASM_SELECT_RTX_SECTION
9789 #define TARGET_ASM_SELECT_RTX_SECTION alpha_elf_select_rtx_section
9790 #undef TARGET_SECTION_TYPE_FLAGS
9791 #define TARGET_SECTION_TYPE_FLAGS alpha_elf_section_type_flags
9792 #endif
9793
9794 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
9795 #define TARGET_ASM_FUNCTION_END_PROLOGUE alpha_output_function_end_prologue
9796
9797 #undef TARGET_INIT_LIBFUNCS
9798 #define TARGET_INIT_LIBFUNCS alpha_init_libfuncs
9799
9800 #undef TARGET_LEGITIMIZE_ADDRESS
9801 #define TARGET_LEGITIMIZE_ADDRESS alpha_legitimize_address
9802
9803 #undef TARGET_ASM_FILE_START
9804 #define TARGET_ASM_FILE_START alpha_file_start
9805 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
9806 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
9807
9808 #undef TARGET_SCHED_ADJUST_COST
9809 #define TARGET_SCHED_ADJUST_COST alpha_adjust_cost
9810 #undef TARGET_SCHED_ISSUE_RATE
9811 #define TARGET_SCHED_ISSUE_RATE alpha_issue_rate
9812 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
9813 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
9814 alpha_multipass_dfa_lookahead
9815
9816 #undef TARGET_HAVE_TLS
9817 #define TARGET_HAVE_TLS HAVE_AS_TLS
9818
9819 #undef TARGET_BUILTIN_DECL
9820 #define TARGET_BUILTIN_DECL alpha_builtin_decl
9821 #undef TARGET_INIT_BUILTINS
9822 #define TARGET_INIT_BUILTINS alpha_init_builtins
9823 #undef TARGET_EXPAND_BUILTIN
9824 #define TARGET_EXPAND_BUILTIN alpha_expand_builtin
9825 #undef TARGET_FOLD_BUILTIN
9826 #define TARGET_FOLD_BUILTIN alpha_fold_builtin
9827
9828 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
9829 #define TARGET_FUNCTION_OK_FOR_SIBCALL alpha_function_ok_for_sibcall
9830 #undef TARGET_CANNOT_COPY_INSN_P
9831 #define TARGET_CANNOT_COPY_INSN_P alpha_cannot_copy_insn_p
9832 #undef TARGET_LEGITIMATE_CONSTANT_P
9833 #define TARGET_LEGITIMATE_CONSTANT_P alpha_legitimate_constant_p
9834 #undef TARGET_CANNOT_FORCE_CONST_MEM
9835 #define TARGET_CANNOT_FORCE_CONST_MEM alpha_cannot_force_const_mem
9836
9837 #if TARGET_ABI_OSF
9838 #undef TARGET_ASM_OUTPUT_MI_THUNK
9839 #define TARGET_ASM_OUTPUT_MI_THUNK alpha_output_mi_thunk_osf
9840 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
9841 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
9842 #undef TARGET_STDARG_OPTIMIZE_HOOK
9843 #define TARGET_STDARG_OPTIMIZE_HOOK alpha_stdarg_optimize_hook
9844 #endif
9845
9846 #undef TARGET_RTX_COSTS
9847 #define TARGET_RTX_COSTS alpha_rtx_costs
9848 #undef TARGET_ADDRESS_COST
9849 #define TARGET_ADDRESS_COST hook_int_rtx_bool_0
9850
9851 #undef TARGET_MACHINE_DEPENDENT_REORG
9852 #define TARGET_MACHINE_DEPENDENT_REORG alpha_reorg
9853
9854 #undef TARGET_PROMOTE_FUNCTION_MODE
9855 #define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
9856 #undef TARGET_PROMOTE_PROTOTYPES
9857 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_false
9858 #undef TARGET_RETURN_IN_MEMORY
9859 #define TARGET_RETURN_IN_MEMORY alpha_return_in_memory
9860 #undef TARGET_PASS_BY_REFERENCE
9861 #define TARGET_PASS_BY_REFERENCE alpha_pass_by_reference
9862 #undef TARGET_SETUP_INCOMING_VARARGS
9863 #define TARGET_SETUP_INCOMING_VARARGS alpha_setup_incoming_varargs
9864 #undef TARGET_STRICT_ARGUMENT_NAMING
9865 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
9866 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
9867 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
9868 #undef TARGET_SPLIT_COMPLEX_ARG
9869 #define TARGET_SPLIT_COMPLEX_ARG alpha_split_complex_arg
9870 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
9871 #define TARGET_GIMPLIFY_VA_ARG_EXPR alpha_gimplify_va_arg
9872 #undef TARGET_ARG_PARTIAL_BYTES
9873 #define TARGET_ARG_PARTIAL_BYTES alpha_arg_partial_bytes
9874 #undef TARGET_FUNCTION_ARG
9875 #define TARGET_FUNCTION_ARG alpha_function_arg
9876 #undef TARGET_FUNCTION_ARG_ADVANCE
9877 #define TARGET_FUNCTION_ARG_ADVANCE alpha_function_arg_advance
9878 #undef TARGET_TRAMPOLINE_INIT
9879 #define TARGET_TRAMPOLINE_INIT alpha_trampoline_init
9880
9881 #undef TARGET_SECONDARY_RELOAD
9882 #define TARGET_SECONDARY_RELOAD alpha_secondary_reload
9883
9884 #undef TARGET_SCALAR_MODE_SUPPORTED_P
9885 #define TARGET_SCALAR_MODE_SUPPORTED_P alpha_scalar_mode_supported_p
9886 #undef TARGET_VECTOR_MODE_SUPPORTED_P
9887 #define TARGET_VECTOR_MODE_SUPPORTED_P alpha_vector_mode_supported_p
9888
9889 #undef TARGET_BUILD_BUILTIN_VA_LIST
9890 #define TARGET_BUILD_BUILTIN_VA_LIST alpha_build_builtin_va_list
9891
9892 #undef TARGET_EXPAND_BUILTIN_VA_START
9893 #define TARGET_EXPAND_BUILTIN_VA_START alpha_va_start
9894
9895 /* The Alpha architecture does not require sequential consistency. See
9896 http://www.cs.umd.edu/~pugh/java/memoryModel/AlphaReordering.html
9897 for an example of how it can be violated in practice. */
9898 #undef TARGET_RELAXED_ORDERING
9899 #define TARGET_RELAXED_ORDERING true
9900
9901 #undef TARGET_OPTION_OVERRIDE
9902 #define TARGET_OPTION_OVERRIDE alpha_option_override
9903
9904 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
9905 #undef TARGET_MANGLE_TYPE
9906 #define TARGET_MANGLE_TYPE alpha_mangle_type
9907 #endif
9908
9909 #undef TARGET_LEGITIMATE_ADDRESS_P
9910 #define TARGET_LEGITIMATE_ADDRESS_P alpha_legitimate_address_p
9911
9912 #undef TARGET_CONDITIONAL_REGISTER_USAGE
9913 #define TARGET_CONDITIONAL_REGISTER_USAGE alpha_conditional_register_usage
9914
9915 struct gcc_target targetm = TARGET_INITIALIZER;
9916
9917 \f
9918 #include "gt-alpha.h"