[multiple changes]
[gcc.git] / gcc / config / alpha / alpha.c
1 /* Subroutines used for code generation on the DEC Alpha.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
11 any later version.
12
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to
20 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
21 Boston, MA 02110-1301, USA. */
22
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "real.h"
33 #include "insn-config.h"
34 #include "conditions.h"
35 #include "output.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "recog.h"
39 #include "expr.h"
40 #include "optabs.h"
41 #include "reload.h"
42 #include "obstack.h"
43 #include "except.h"
44 #include "function.h"
45 #include "toplev.h"
46 #include "ggc.h"
47 #include "integrate.h"
48 #include "tm_p.h"
49 #include "target.h"
50 #include "target-def.h"
51 #include "debug.h"
52 #include "langhooks.h"
53 #include <splay-tree.h>
54 #include "cfglayout.h"
55 #include "tree-gimple.h"
56 #include "tree-flow.h"
57 #include "tree-stdarg.h"
58
59 /* Specify which cpu to schedule for. */
60 enum processor_type alpha_tune;
61
62 /* Which cpu we're generating code for. */
63 enum processor_type alpha_cpu;
64
65 static const char * const alpha_cpu_name[] =
66 {
67 "ev4", "ev5", "ev6"
68 };
69
70 /* Specify how accurate floating-point traps need to be. */
71
72 enum alpha_trap_precision alpha_tp;
73
74 /* Specify the floating-point rounding mode. */
75
76 enum alpha_fp_rounding_mode alpha_fprm;
77
78 /* Specify which things cause traps. */
79
80 enum alpha_fp_trap_mode alpha_fptm;
81
82 /* Save information from a "cmpxx" operation until the branch or scc is
83 emitted. */
84
85 struct alpha_compare alpha_compare;
86
87 /* Nonzero if inside of a function, because the Alpha asm can't
88 handle .files inside of functions. */
89
90 static int inside_function = FALSE;
91
92 /* The number of cycles of latency we should assume on memory reads. */
93
94 int alpha_memory_latency = 3;
95
96 /* Whether the function needs the GP. */
97
98 static int alpha_function_needs_gp;
99
100 /* The alias set for prologue/epilogue register save/restore. */
101
102 static GTY(()) int alpha_sr_alias_set;
103
104 /* The assembler name of the current function. */
105
106 static const char *alpha_fnname;
107
108 /* The next explicit relocation sequence number. */
109 extern GTY(()) int alpha_next_sequence_number;
110 int alpha_next_sequence_number = 1;
111
112 /* The literal and gpdisp sequence numbers for this insn, as printed
113 by %# and %* respectively. */
114 extern GTY(()) int alpha_this_literal_sequence_number;
115 extern GTY(()) int alpha_this_gpdisp_sequence_number;
116 int alpha_this_literal_sequence_number;
117 int alpha_this_gpdisp_sequence_number;
118
119 /* Costs of various operations on the different architectures. */
120
121 struct alpha_rtx_cost_data
122 {
123 unsigned char fp_add;
124 unsigned char fp_mult;
125 unsigned char fp_div_sf;
126 unsigned char fp_div_df;
127 unsigned char int_mult_si;
128 unsigned char int_mult_di;
129 unsigned char int_shift;
130 unsigned char int_cmov;
131 unsigned short int_div;
132 };
133
134 static struct alpha_rtx_cost_data const alpha_rtx_cost_data[PROCESSOR_MAX] =
135 {
136 { /* EV4 */
137 COSTS_N_INSNS (6), /* fp_add */
138 COSTS_N_INSNS (6), /* fp_mult */
139 COSTS_N_INSNS (34), /* fp_div_sf */
140 COSTS_N_INSNS (63), /* fp_div_df */
141 COSTS_N_INSNS (23), /* int_mult_si */
142 COSTS_N_INSNS (23), /* int_mult_di */
143 COSTS_N_INSNS (2), /* int_shift */
144 COSTS_N_INSNS (2), /* int_cmov */
145 COSTS_N_INSNS (97), /* int_div */
146 },
147 { /* EV5 */
148 COSTS_N_INSNS (4), /* fp_add */
149 COSTS_N_INSNS (4), /* fp_mult */
150 COSTS_N_INSNS (15), /* fp_div_sf */
151 COSTS_N_INSNS (22), /* fp_div_df */
152 COSTS_N_INSNS (8), /* int_mult_si */
153 COSTS_N_INSNS (12), /* int_mult_di */
154 COSTS_N_INSNS (1) + 1, /* int_shift */
155 COSTS_N_INSNS (1), /* int_cmov */
156 COSTS_N_INSNS (83), /* int_div */
157 },
158 { /* EV6 */
159 COSTS_N_INSNS (4), /* fp_add */
160 COSTS_N_INSNS (4), /* fp_mult */
161 COSTS_N_INSNS (12), /* fp_div_sf */
162 COSTS_N_INSNS (15), /* fp_div_df */
163 COSTS_N_INSNS (7), /* int_mult_si */
164 COSTS_N_INSNS (7), /* int_mult_di */
165 COSTS_N_INSNS (1), /* int_shift */
166 COSTS_N_INSNS (2), /* int_cmov */
167 COSTS_N_INSNS (86), /* int_div */
168 },
169 };
170
171 /* Similar but tuned for code size instead of execution latency. The
172 extra +N is fractional cost tuning based on latency. It's used to
173 encourage use of cheaper insns like shift, but only if there's just
174 one of them. */
175
176 static struct alpha_rtx_cost_data const alpha_rtx_cost_size =
177 {
178 COSTS_N_INSNS (1), /* fp_add */
179 COSTS_N_INSNS (1), /* fp_mult */
180 COSTS_N_INSNS (1), /* fp_div_sf */
181 COSTS_N_INSNS (1) + 1, /* fp_div_df */
182 COSTS_N_INSNS (1) + 1, /* int_mult_si */
183 COSTS_N_INSNS (1) + 2, /* int_mult_di */
184 COSTS_N_INSNS (1), /* int_shift */
185 COSTS_N_INSNS (1), /* int_cmov */
186 COSTS_N_INSNS (6), /* int_div */
187 };
188
189 /* Get the number of args of a function in one of two ways. */
190 #if TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK
191 #define NUM_ARGS current_function_args_info.num_args
192 #else
193 #define NUM_ARGS current_function_args_info
194 #endif
195
196 #define REG_PV 27
197 #define REG_RA 26
198
199 /* Declarations of static functions. */
200 static struct machine_function *alpha_init_machine_status (void);
201 static rtx alpha_emit_xfloating_compare (enum rtx_code *, rtx, rtx);
202
203 #if TARGET_ABI_OPEN_VMS
204 static void alpha_write_linkage (FILE *, const char *, tree);
205 #endif
206
207 static void unicosmk_output_deferred_case_vectors (FILE *);
208 static void unicosmk_gen_dsib (unsigned long *);
209 static void unicosmk_output_ssib (FILE *, const char *);
210 static int unicosmk_need_dex (rtx);
211 \f
212 /* Implement TARGET_HANDLE_OPTION. */
213
214 static bool
215 alpha_handle_option (size_t code, const char *arg, int value)
216 {
217 switch (code)
218 {
219 case OPT_mfp_regs:
220 if (value == 0)
221 target_flags |= MASK_SOFT_FP;
222 break;
223
224 case OPT_mieee:
225 case OPT_mieee_with_inexact:
226 target_flags |= MASK_IEEE_CONFORMANT;
227 break;
228
229 case OPT_mtls_size_:
230 if (value != 16 && value != 32 && value != 64)
231 error ("bad value %qs for -mtls-size switch", arg);
232 break;
233 }
234
235 return true;
236 }
237
238 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
239 /* Implement TARGET_MANGLE_FUNDAMENTAL_TYPE. */
240
241 static const char *
242 alpha_mangle_fundamental_type (tree type)
243 {
244 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
245 && TARGET_LONG_DOUBLE_128)
246 return "g";
247
248 /* For all other types, use normal C++ mangling. */
249 return NULL;
250 }
251 #endif
252
253 /* Parse target option strings. */
254
255 void
256 override_options (void)
257 {
258 static const struct cpu_table {
259 const char *const name;
260 const enum processor_type processor;
261 const int flags;
262 } cpu_table[] = {
263 { "ev4", PROCESSOR_EV4, 0 },
264 { "ev45", PROCESSOR_EV4, 0 },
265 { "21064", PROCESSOR_EV4, 0 },
266 { "ev5", PROCESSOR_EV5, 0 },
267 { "21164", PROCESSOR_EV5, 0 },
268 { "ev56", PROCESSOR_EV5, MASK_BWX },
269 { "21164a", PROCESSOR_EV5, MASK_BWX },
270 { "pca56", PROCESSOR_EV5, MASK_BWX|MASK_MAX },
271 { "21164PC",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
272 { "21164pc",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
273 { "ev6", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
274 { "21264", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
275 { "ev67", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
276 { "21264a", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
277 { 0, 0, 0 }
278 };
279
280 int i;
281
282 /* Unicos/Mk doesn't have shared libraries. */
283 if (TARGET_ABI_UNICOSMK && flag_pic)
284 {
285 warning (0, "-f%s ignored for Unicos/Mk (not supported)",
286 (flag_pic > 1) ? "PIC" : "pic");
287 flag_pic = 0;
288 }
289
290 /* On Unicos/Mk, the native compiler consistently generates /d suffices for
291 floating-point instructions. Make that the default for this target. */
292 if (TARGET_ABI_UNICOSMK)
293 alpha_fprm = ALPHA_FPRM_DYN;
294 else
295 alpha_fprm = ALPHA_FPRM_NORM;
296
297 alpha_tp = ALPHA_TP_PROG;
298 alpha_fptm = ALPHA_FPTM_N;
299
300 /* We cannot use su and sui qualifiers for conversion instructions on
301 Unicos/Mk. I'm not sure if this is due to assembler or hardware
302 limitations. Right now, we issue a warning if -mieee is specified
303 and then ignore it; eventually, we should either get it right or
304 disable the option altogether. */
305
306 if (TARGET_IEEE)
307 {
308 if (TARGET_ABI_UNICOSMK)
309 warning (0, "-mieee not supported on Unicos/Mk");
310 else
311 {
312 alpha_tp = ALPHA_TP_INSN;
313 alpha_fptm = ALPHA_FPTM_SU;
314 }
315 }
316
317 if (TARGET_IEEE_WITH_INEXACT)
318 {
319 if (TARGET_ABI_UNICOSMK)
320 warning (0, "-mieee-with-inexact not supported on Unicos/Mk");
321 else
322 {
323 alpha_tp = ALPHA_TP_INSN;
324 alpha_fptm = ALPHA_FPTM_SUI;
325 }
326 }
327
328 if (alpha_tp_string)
329 {
330 if (! strcmp (alpha_tp_string, "p"))
331 alpha_tp = ALPHA_TP_PROG;
332 else if (! strcmp (alpha_tp_string, "f"))
333 alpha_tp = ALPHA_TP_FUNC;
334 else if (! strcmp (alpha_tp_string, "i"))
335 alpha_tp = ALPHA_TP_INSN;
336 else
337 error ("bad value %qs for -mtrap-precision switch", alpha_tp_string);
338 }
339
340 if (alpha_fprm_string)
341 {
342 if (! strcmp (alpha_fprm_string, "n"))
343 alpha_fprm = ALPHA_FPRM_NORM;
344 else if (! strcmp (alpha_fprm_string, "m"))
345 alpha_fprm = ALPHA_FPRM_MINF;
346 else if (! strcmp (alpha_fprm_string, "c"))
347 alpha_fprm = ALPHA_FPRM_CHOP;
348 else if (! strcmp (alpha_fprm_string,"d"))
349 alpha_fprm = ALPHA_FPRM_DYN;
350 else
351 error ("bad value %qs for -mfp-rounding-mode switch",
352 alpha_fprm_string);
353 }
354
355 if (alpha_fptm_string)
356 {
357 if (strcmp (alpha_fptm_string, "n") == 0)
358 alpha_fptm = ALPHA_FPTM_N;
359 else if (strcmp (alpha_fptm_string, "u") == 0)
360 alpha_fptm = ALPHA_FPTM_U;
361 else if (strcmp (alpha_fptm_string, "su") == 0)
362 alpha_fptm = ALPHA_FPTM_SU;
363 else if (strcmp (alpha_fptm_string, "sui") == 0)
364 alpha_fptm = ALPHA_FPTM_SUI;
365 else
366 error ("bad value %qs for -mfp-trap-mode switch", alpha_fptm_string);
367 }
368
369 if (alpha_cpu_string)
370 {
371 for (i = 0; cpu_table [i].name; i++)
372 if (! strcmp (alpha_cpu_string, cpu_table [i].name))
373 {
374 alpha_tune = alpha_cpu = cpu_table [i].processor;
375 target_flags &= ~ (MASK_BWX | MASK_MAX | MASK_FIX | MASK_CIX);
376 target_flags |= cpu_table [i].flags;
377 break;
378 }
379 if (! cpu_table [i].name)
380 error ("bad value %qs for -mcpu switch", alpha_cpu_string);
381 }
382
383 if (alpha_tune_string)
384 {
385 for (i = 0; cpu_table [i].name; i++)
386 if (! strcmp (alpha_tune_string, cpu_table [i].name))
387 {
388 alpha_tune = cpu_table [i].processor;
389 break;
390 }
391 if (! cpu_table [i].name)
392 error ("bad value %qs for -mcpu switch", alpha_tune_string);
393 }
394
395 /* Do some sanity checks on the above options. */
396
397 if (TARGET_ABI_UNICOSMK && alpha_fptm != ALPHA_FPTM_N)
398 {
399 warning (0, "trap mode not supported on Unicos/Mk");
400 alpha_fptm = ALPHA_FPTM_N;
401 }
402
403 if ((alpha_fptm == ALPHA_FPTM_SU || alpha_fptm == ALPHA_FPTM_SUI)
404 && alpha_tp != ALPHA_TP_INSN && alpha_cpu != PROCESSOR_EV6)
405 {
406 warning (0, "fp software completion requires -mtrap-precision=i");
407 alpha_tp = ALPHA_TP_INSN;
408 }
409
410 if (alpha_cpu == PROCESSOR_EV6)
411 {
412 /* Except for EV6 pass 1 (not released), we always have precise
413 arithmetic traps. Which means we can do software completion
414 without minding trap shadows. */
415 alpha_tp = ALPHA_TP_PROG;
416 }
417
418 if (TARGET_FLOAT_VAX)
419 {
420 if (alpha_fprm == ALPHA_FPRM_MINF || alpha_fprm == ALPHA_FPRM_DYN)
421 {
422 warning (0, "rounding mode not supported for VAX floats");
423 alpha_fprm = ALPHA_FPRM_NORM;
424 }
425 if (alpha_fptm == ALPHA_FPTM_SUI)
426 {
427 warning (0, "trap mode not supported for VAX floats");
428 alpha_fptm = ALPHA_FPTM_SU;
429 }
430 if (target_flags_explicit & MASK_LONG_DOUBLE_128)
431 warning (0, "128-bit long double not supported for VAX floats");
432 target_flags &= ~MASK_LONG_DOUBLE_128;
433 }
434
435 {
436 char *end;
437 int lat;
438
439 if (!alpha_mlat_string)
440 alpha_mlat_string = "L1";
441
442 if (ISDIGIT ((unsigned char)alpha_mlat_string[0])
443 && (lat = strtol (alpha_mlat_string, &end, 10), *end == '\0'))
444 ;
445 else if ((alpha_mlat_string[0] == 'L' || alpha_mlat_string[0] == 'l')
446 && ISDIGIT ((unsigned char)alpha_mlat_string[1])
447 && alpha_mlat_string[2] == '\0')
448 {
449 static int const cache_latency[][4] =
450 {
451 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
452 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
453 { 3, 12, 30 }, /* ev6 -- Bcache from DS20 LMbench. */
454 };
455
456 lat = alpha_mlat_string[1] - '0';
457 if (lat <= 0 || lat > 3 || cache_latency[alpha_tune][lat-1] == -1)
458 {
459 warning (0, "L%d cache latency unknown for %s",
460 lat, alpha_cpu_name[alpha_tune]);
461 lat = 3;
462 }
463 else
464 lat = cache_latency[alpha_tune][lat-1];
465 }
466 else if (! strcmp (alpha_mlat_string, "main"))
467 {
468 /* Most current memories have about 370ns latency. This is
469 a reasonable guess for a fast cpu. */
470 lat = 150;
471 }
472 else
473 {
474 warning (0, "bad value %qs for -mmemory-latency", alpha_mlat_string);
475 lat = 3;
476 }
477
478 alpha_memory_latency = lat;
479 }
480
481 /* Default the definition of "small data" to 8 bytes. */
482 if (!g_switch_set)
483 g_switch_value = 8;
484
485 /* Infer TARGET_SMALL_DATA from -fpic/-fPIC. */
486 if (flag_pic == 1)
487 target_flags |= MASK_SMALL_DATA;
488 else if (flag_pic == 2)
489 target_flags &= ~MASK_SMALL_DATA;
490
491 /* Align labels and loops for optimal branching. */
492 /* ??? Kludge these by not doing anything if we don't optimize and also if
493 we are writing ECOFF symbols to work around a bug in DEC's assembler. */
494 if (optimize > 0 && write_symbols != SDB_DEBUG)
495 {
496 if (align_loops <= 0)
497 align_loops = 16;
498 if (align_jumps <= 0)
499 align_jumps = 16;
500 }
501 if (align_functions <= 0)
502 align_functions = 16;
503
504 /* Acquire a unique set number for our register saves and restores. */
505 alpha_sr_alias_set = new_alias_set ();
506
507 /* Register variables and functions with the garbage collector. */
508
509 /* Set up function hooks. */
510 init_machine_status = alpha_init_machine_status;
511
512 /* Tell the compiler when we're using VAX floating point. */
513 if (TARGET_FLOAT_VAX)
514 {
515 REAL_MODE_FORMAT (SFmode) = &vax_f_format;
516 REAL_MODE_FORMAT (DFmode) = &vax_g_format;
517 REAL_MODE_FORMAT (TFmode) = NULL;
518 }
519
520 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
521 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
522 target_flags |= MASK_LONG_DOUBLE_128;
523 #endif
524 }
525 \f
526 /* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
527
528 int
529 zap_mask (HOST_WIDE_INT value)
530 {
531 int i;
532
533 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
534 i++, value >>= 8)
535 if ((value & 0xff) != 0 && (value & 0xff) != 0xff)
536 return 0;
537
538 return 1;
539 }
540
541 /* Return true if OP is valid for a particular TLS relocation.
542 We are already guaranteed that OP is a CONST. */
543
544 int
545 tls_symbolic_operand_1 (rtx op, int size, int unspec)
546 {
547 op = XEXP (op, 0);
548
549 if (GET_CODE (op) != UNSPEC || XINT (op, 1) != unspec)
550 return 0;
551 op = XVECEXP (op, 0, 0);
552
553 if (GET_CODE (op) != SYMBOL_REF)
554 return 0;
555
556 switch (SYMBOL_REF_TLS_MODEL (op))
557 {
558 case TLS_MODEL_LOCAL_DYNAMIC:
559 return unspec == UNSPEC_DTPREL && size == alpha_tls_size;
560 case TLS_MODEL_INITIAL_EXEC:
561 return unspec == UNSPEC_TPREL && size == 64;
562 case TLS_MODEL_LOCAL_EXEC:
563 return unspec == UNSPEC_TPREL && size == alpha_tls_size;
564 default:
565 gcc_unreachable ();
566 }
567 }
568
569 /* Used by aligned_memory_operand and unaligned_memory_operand to
570 resolve what reload is going to do with OP if it's a register. */
571
572 rtx
573 resolve_reload_operand (rtx op)
574 {
575 if (reload_in_progress)
576 {
577 rtx tmp = op;
578 if (GET_CODE (tmp) == SUBREG)
579 tmp = SUBREG_REG (tmp);
580 if (GET_CODE (tmp) == REG
581 && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
582 {
583 op = reg_equiv_memory_loc[REGNO (tmp)];
584 if (op == 0)
585 return 0;
586 }
587 }
588 return op;
589 }
590
591 /* Implements CONST_OK_FOR_LETTER_P. Return true if the value matches
592 the range defined for C in [I-P]. */
593
594 bool
595 alpha_const_ok_for_letter_p (HOST_WIDE_INT value, int c)
596 {
597 switch (c)
598 {
599 case 'I':
600 /* An unsigned 8 bit constant. */
601 return (unsigned HOST_WIDE_INT) value < 0x100;
602 case 'J':
603 /* The constant zero. */
604 return value == 0;
605 case 'K':
606 /* A signed 16 bit constant. */
607 return (unsigned HOST_WIDE_INT) (value + 0x8000) < 0x10000;
608 case 'L':
609 /* A shifted signed 16 bit constant appropriate for LDAH. */
610 return ((value & 0xffff) == 0
611 && ((value) >> 31 == -1 || value >> 31 == 0));
612 case 'M':
613 /* A constant that can be AND'ed with using a ZAP insn. */
614 return zap_mask (value);
615 case 'N':
616 /* A complemented unsigned 8 bit constant. */
617 return (unsigned HOST_WIDE_INT) (~ value) < 0x100;
618 case 'O':
619 /* A negated unsigned 8 bit constant. */
620 return (unsigned HOST_WIDE_INT) (- value) < 0x100;
621 case 'P':
622 /* The constant 1, 2 or 3. */
623 return value == 1 || value == 2 || value == 3;
624
625 default:
626 return false;
627 }
628 }
629
630 /* Implements CONST_DOUBLE_OK_FOR_LETTER_P. Return true if VALUE
631 matches for C in [GH]. */
632
633 bool
634 alpha_const_double_ok_for_letter_p (rtx value, int c)
635 {
636 switch (c)
637 {
638 case 'G':
639 /* The floating point zero constant. */
640 return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
641 && value == CONST0_RTX (GET_MODE (value)));
642
643 case 'H':
644 /* A valid operand of a ZAP insn. */
645 return (GET_MODE (value) == VOIDmode
646 && zap_mask (CONST_DOUBLE_LOW (value))
647 && zap_mask (CONST_DOUBLE_HIGH (value)));
648
649 default:
650 return false;
651 }
652 }
653
654 /* Implements CONST_DOUBLE_OK_FOR_LETTER_P. Return true if VALUE
655 matches for C. */
656
657 bool
658 alpha_extra_constraint (rtx value, int c)
659 {
660 switch (c)
661 {
662 case 'Q':
663 return normal_memory_operand (value, VOIDmode);
664 case 'R':
665 return direct_call_operand (value, Pmode);
666 case 'S':
667 return (GET_CODE (value) == CONST_INT
668 && (unsigned HOST_WIDE_INT) INTVAL (value) < 64);
669 case 'T':
670 return GET_CODE (value) == HIGH;
671 case 'U':
672 return TARGET_ABI_UNICOSMK && symbolic_operand (value, VOIDmode);
673 case 'W':
674 return (GET_CODE (value) == CONST_VECTOR
675 && value == CONST0_RTX (GET_MODE (value)));
676 default:
677 return false;
678 }
679 }
680
681 /* The scalar modes supported differs from the default check-what-c-supports
682 version in that sometimes TFmode is available even when long double
683 indicates only DFmode. On unicosmk, we have the situation that HImode
684 doesn't map to any C type, but of course we still support that. */
685
686 static bool
687 alpha_scalar_mode_supported_p (enum machine_mode mode)
688 {
689 switch (mode)
690 {
691 case QImode:
692 case HImode:
693 case SImode:
694 case DImode:
695 case TImode: /* via optabs.c */
696 return true;
697
698 case SFmode:
699 case DFmode:
700 return true;
701
702 case TFmode:
703 return TARGET_HAS_XFLOATING_LIBS;
704
705 default:
706 return false;
707 }
708 }
709
710 /* Alpha implements a couple of integer vector mode operations when
711 TARGET_MAX is enabled. We do not check TARGET_MAX here, however,
712 which allows the vectorizer to operate on e.g. move instructions,
713 or when expand_vector_operations can do something useful. */
714
715 static bool
716 alpha_vector_mode_supported_p (enum machine_mode mode)
717 {
718 return mode == V8QImode || mode == V4HImode || mode == V2SImode;
719 }
720
721 /* Return 1 if this function can directly return via $26. */
722
723 int
724 direct_return (void)
725 {
726 return (! TARGET_ABI_OPEN_VMS && ! TARGET_ABI_UNICOSMK
727 && reload_completed
728 && alpha_sa_size () == 0
729 && get_frame_size () == 0
730 && current_function_outgoing_args_size == 0
731 && current_function_pretend_args_size == 0);
732 }
733
734 /* Return the ADDR_VEC associated with a tablejump insn. */
735
736 rtx
737 alpha_tablejump_addr_vec (rtx insn)
738 {
739 rtx tmp;
740
741 tmp = JUMP_LABEL (insn);
742 if (!tmp)
743 return NULL_RTX;
744 tmp = NEXT_INSN (tmp);
745 if (!tmp)
746 return NULL_RTX;
747 if (GET_CODE (tmp) == JUMP_INSN
748 && GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC)
749 return PATTERN (tmp);
750 return NULL_RTX;
751 }
752
753 /* Return the label of the predicted edge, or CONST0_RTX if we don't know. */
754
755 rtx
756 alpha_tablejump_best_label (rtx insn)
757 {
758 rtx jump_table = alpha_tablejump_addr_vec (insn);
759 rtx best_label = NULL_RTX;
760
761 /* ??? Once the CFG doesn't keep getting completely rebuilt, look
762 there for edge frequency counts from profile data. */
763
764 if (jump_table)
765 {
766 int n_labels = XVECLEN (jump_table, 1);
767 int best_count = -1;
768 int i, j;
769
770 for (i = 0; i < n_labels; i++)
771 {
772 int count = 1;
773
774 for (j = i + 1; j < n_labels; j++)
775 if (XEXP (XVECEXP (jump_table, 1, i), 0)
776 == XEXP (XVECEXP (jump_table, 1, j), 0))
777 count++;
778
779 if (count > best_count)
780 best_count = count, best_label = XVECEXP (jump_table, 1, i);
781 }
782 }
783
784 return best_label ? best_label : const0_rtx;
785 }
786
787 /* Return the TLS model to use for SYMBOL. */
788
789 static enum tls_model
790 tls_symbolic_operand_type (rtx symbol)
791 {
792 enum tls_model model;
793
794 if (GET_CODE (symbol) != SYMBOL_REF)
795 return 0;
796 model = SYMBOL_REF_TLS_MODEL (symbol);
797
798 /* Local-exec with a 64-bit size is the same code as initial-exec. */
799 if (model == TLS_MODEL_LOCAL_EXEC && alpha_tls_size == 64)
800 model = TLS_MODEL_INITIAL_EXEC;
801
802 return model;
803 }
804 \f
805 /* Return true if the function DECL will share the same GP as any
806 function in the current unit of translation. */
807
808 static bool
809 decl_has_samegp (tree decl)
810 {
811 /* Functions that are not local can be overridden, and thus may
812 not share the same gp. */
813 if (!(*targetm.binds_local_p) (decl))
814 return false;
815
816 /* If -msmall-data is in effect, assume that there is only one GP
817 for the module, and so any local symbol has this property. We
818 need explicit relocations to be able to enforce this for symbols
819 not defined in this unit of translation, however. */
820 if (TARGET_EXPLICIT_RELOCS && TARGET_SMALL_DATA)
821 return true;
822
823 /* Functions that are not external are defined in this UoT. */
824 /* ??? Irritatingly, static functions not yet emitted are still
825 marked "external". Apply this to non-static functions only. */
826 return !TREE_PUBLIC (decl) || !DECL_EXTERNAL (decl);
827 }
828
829 /* Return true if EXP should be placed in the small data section. */
830
831 static bool
832 alpha_in_small_data_p (tree exp)
833 {
834 /* We want to merge strings, so we never consider them small data. */
835 if (TREE_CODE (exp) == STRING_CST)
836 return false;
837
838 /* Functions are never in the small data area. Duh. */
839 if (TREE_CODE (exp) == FUNCTION_DECL)
840 return false;
841
842 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
843 {
844 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
845 if (strcmp (section, ".sdata") == 0
846 || strcmp (section, ".sbss") == 0)
847 return true;
848 }
849 else
850 {
851 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
852
853 /* If this is an incomplete type with size 0, then we can't put it
854 in sdata because it might be too big when completed. */
855 if (size > 0 && (unsigned HOST_WIDE_INT) size <= g_switch_value)
856 return true;
857 }
858
859 return false;
860 }
861
862 #if TARGET_ABI_OPEN_VMS
863 static bool
864 alpha_linkage_symbol_p (const char *symname)
865 {
866 int symlen = strlen (symname);
867
868 if (symlen > 4)
869 return strcmp (&symname [symlen - 4], "..lk") == 0;
870
871 return false;
872 }
873
874 #define LINKAGE_SYMBOL_REF_P(X) \
875 ((GET_CODE (X) == SYMBOL_REF \
876 && alpha_linkage_symbol_p (XSTR (X, 0))) \
877 || (GET_CODE (X) == CONST \
878 && GET_CODE (XEXP (X, 0)) == PLUS \
879 && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \
880 && alpha_linkage_symbol_p (XSTR (XEXP (XEXP (X, 0), 0), 0))))
881 #endif
882
883 /* legitimate_address_p recognizes an RTL expression that is a valid
884 memory address for an instruction. The MODE argument is the
885 machine mode for the MEM expression that wants to use this address.
886
887 For Alpha, we have either a constant address or the sum of a
888 register and a constant address, or just a register. For DImode,
889 any of those forms can be surrounded with an AND that clear the
890 low-order three bits; this is an "unaligned" access. */
891
892 bool
893 alpha_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
894 {
895 /* If this is an ldq_u type address, discard the outer AND. */
896 if (mode == DImode
897 && GET_CODE (x) == AND
898 && GET_CODE (XEXP (x, 1)) == CONST_INT
899 && INTVAL (XEXP (x, 1)) == -8)
900 x = XEXP (x, 0);
901
902 /* Discard non-paradoxical subregs. */
903 if (GET_CODE (x) == SUBREG
904 && (GET_MODE_SIZE (GET_MODE (x))
905 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
906 x = SUBREG_REG (x);
907
908 /* Unadorned general registers are valid. */
909 if (REG_P (x)
910 && (strict
911 ? STRICT_REG_OK_FOR_BASE_P (x)
912 : NONSTRICT_REG_OK_FOR_BASE_P (x)))
913 return true;
914
915 /* Constant addresses (i.e. +/- 32k) are valid. */
916 if (CONSTANT_ADDRESS_P (x))
917 return true;
918
919 #if TARGET_ABI_OPEN_VMS
920 if (LINKAGE_SYMBOL_REF_P (x))
921 return true;
922 #endif
923
924 /* Register plus a small constant offset is valid. */
925 if (GET_CODE (x) == PLUS)
926 {
927 rtx ofs = XEXP (x, 1);
928 x = XEXP (x, 0);
929
930 /* Discard non-paradoxical subregs. */
931 if (GET_CODE (x) == SUBREG
932 && (GET_MODE_SIZE (GET_MODE (x))
933 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
934 x = SUBREG_REG (x);
935
936 if (REG_P (x))
937 {
938 if (! strict
939 && NONSTRICT_REG_OK_FP_BASE_P (x)
940 && GET_CODE (ofs) == CONST_INT)
941 return true;
942 if ((strict
943 ? STRICT_REG_OK_FOR_BASE_P (x)
944 : NONSTRICT_REG_OK_FOR_BASE_P (x))
945 && CONSTANT_ADDRESS_P (ofs))
946 return true;
947 }
948 }
949
950 /* If we're managing explicit relocations, LO_SUM is valid, as
951 are small data symbols. */
952 else if (TARGET_EXPLICIT_RELOCS)
953 {
954 if (small_symbolic_operand (x, Pmode))
955 return true;
956
957 if (GET_CODE (x) == LO_SUM)
958 {
959 rtx ofs = XEXP (x, 1);
960 x = XEXP (x, 0);
961
962 /* Discard non-paradoxical subregs. */
963 if (GET_CODE (x) == SUBREG
964 && (GET_MODE_SIZE (GET_MODE (x))
965 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
966 x = SUBREG_REG (x);
967
968 /* Must have a valid base register. */
969 if (! (REG_P (x)
970 && (strict
971 ? STRICT_REG_OK_FOR_BASE_P (x)
972 : NONSTRICT_REG_OK_FOR_BASE_P (x))))
973 return false;
974
975 /* The symbol must be local. */
976 if (local_symbolic_operand (ofs, Pmode)
977 || dtp32_symbolic_operand (ofs, Pmode)
978 || tp32_symbolic_operand (ofs, Pmode))
979 return true;
980 }
981 }
982
983 return false;
984 }
985
986 /* Build the SYMBOL_REF for __tls_get_addr. */
987
988 static GTY(()) rtx tls_get_addr_libfunc;
989
990 static rtx
991 get_tls_get_addr (void)
992 {
993 if (!tls_get_addr_libfunc)
994 tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
995 return tls_get_addr_libfunc;
996 }
997
998 /* Try machine-dependent ways of modifying an illegitimate address
999 to be legitimate. If we find one, return the new, valid address. */
1000
1001 rtx
1002 alpha_legitimize_address (rtx x, rtx scratch,
1003 enum machine_mode mode ATTRIBUTE_UNUSED)
1004 {
1005 HOST_WIDE_INT addend;
1006
1007 /* If the address is (plus reg const_int) and the CONST_INT is not a
1008 valid offset, compute the high part of the constant and add it to
1009 the register. Then our address is (plus temp low-part-const). */
1010 if (GET_CODE (x) == PLUS
1011 && GET_CODE (XEXP (x, 0)) == REG
1012 && GET_CODE (XEXP (x, 1)) == CONST_INT
1013 && ! CONSTANT_ADDRESS_P (XEXP (x, 1)))
1014 {
1015 addend = INTVAL (XEXP (x, 1));
1016 x = XEXP (x, 0);
1017 goto split_addend;
1018 }
1019
1020 /* If the address is (const (plus FOO const_int)), find the low-order
1021 part of the CONST_INT. Then load FOO plus any high-order part of the
1022 CONST_INT into a register. Our address is (plus reg low-part-const).
1023 This is done to reduce the number of GOT entries. */
1024 if (!no_new_pseudos
1025 && GET_CODE (x) == CONST
1026 && GET_CODE (XEXP (x, 0)) == PLUS
1027 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
1028 {
1029 addend = INTVAL (XEXP (XEXP (x, 0), 1));
1030 x = force_reg (Pmode, XEXP (XEXP (x, 0), 0));
1031 goto split_addend;
1032 }
1033
1034 /* If we have a (plus reg const), emit the load as in (2), then add
1035 the two registers, and finally generate (plus reg low-part-const) as
1036 our address. */
1037 if (!no_new_pseudos
1038 && GET_CODE (x) == PLUS
1039 && GET_CODE (XEXP (x, 0)) == REG
1040 && GET_CODE (XEXP (x, 1)) == CONST
1041 && GET_CODE (XEXP (XEXP (x, 1), 0)) == PLUS
1042 && GET_CODE (XEXP (XEXP (XEXP (x, 1), 0), 1)) == CONST_INT)
1043 {
1044 addend = INTVAL (XEXP (XEXP (XEXP (x, 1), 0), 1));
1045 x = expand_simple_binop (Pmode, PLUS, XEXP (x, 0),
1046 XEXP (XEXP (XEXP (x, 1), 0), 0),
1047 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1048 goto split_addend;
1049 }
1050
1051 /* If this is a local symbol, split the address into HIGH/LO_SUM parts. */
1052 if (TARGET_EXPLICIT_RELOCS && symbolic_operand (x, Pmode))
1053 {
1054 rtx r0, r16, eqv, tga, tp, insn, dest, seq;
1055
1056 switch (tls_symbolic_operand_type (x))
1057 {
1058 case TLS_MODEL_NONE:
1059 break;
1060
1061 case TLS_MODEL_GLOBAL_DYNAMIC:
1062 start_sequence ();
1063
1064 r0 = gen_rtx_REG (Pmode, 0);
1065 r16 = gen_rtx_REG (Pmode, 16);
1066 tga = get_tls_get_addr ();
1067 dest = gen_reg_rtx (Pmode);
1068 seq = GEN_INT (alpha_next_sequence_number++);
1069
1070 emit_insn (gen_movdi_er_tlsgd (r16, pic_offset_table_rtx, x, seq));
1071 insn = gen_call_value_osf_tlsgd (r0, tga, seq);
1072 insn = emit_call_insn (insn);
1073 CONST_OR_PURE_CALL_P (insn) = 1;
1074 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1075
1076 insn = get_insns ();
1077 end_sequence ();
1078
1079 emit_libcall_block (insn, dest, r0, x);
1080 return dest;
1081
1082 case TLS_MODEL_LOCAL_DYNAMIC:
1083 start_sequence ();
1084
1085 r0 = gen_rtx_REG (Pmode, 0);
1086 r16 = gen_rtx_REG (Pmode, 16);
1087 tga = get_tls_get_addr ();
1088 scratch = gen_reg_rtx (Pmode);
1089 seq = GEN_INT (alpha_next_sequence_number++);
1090
1091 emit_insn (gen_movdi_er_tlsldm (r16, pic_offset_table_rtx, seq));
1092 insn = gen_call_value_osf_tlsldm (r0, tga, seq);
1093 insn = emit_call_insn (insn);
1094 CONST_OR_PURE_CALL_P (insn) = 1;
1095 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1096
1097 insn = get_insns ();
1098 end_sequence ();
1099
1100 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
1101 UNSPEC_TLSLDM_CALL);
1102 emit_libcall_block (insn, scratch, r0, eqv);
1103
1104 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPREL);
1105 eqv = gen_rtx_CONST (Pmode, eqv);
1106
1107 if (alpha_tls_size == 64)
1108 {
1109 dest = gen_reg_rtx (Pmode);
1110 emit_insn (gen_rtx_SET (VOIDmode, dest, eqv));
1111 emit_insn (gen_adddi3 (dest, dest, scratch));
1112 return dest;
1113 }
1114 if (alpha_tls_size == 32)
1115 {
1116 insn = gen_rtx_HIGH (Pmode, eqv);
1117 insn = gen_rtx_PLUS (Pmode, scratch, insn);
1118 scratch = gen_reg_rtx (Pmode);
1119 emit_insn (gen_rtx_SET (VOIDmode, scratch, insn));
1120 }
1121 return gen_rtx_LO_SUM (Pmode, scratch, eqv);
1122
1123 case TLS_MODEL_INITIAL_EXEC:
1124 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1125 eqv = gen_rtx_CONST (Pmode, eqv);
1126 tp = gen_reg_rtx (Pmode);
1127 scratch = gen_reg_rtx (Pmode);
1128 dest = gen_reg_rtx (Pmode);
1129
1130 emit_insn (gen_load_tp (tp));
1131 emit_insn (gen_rtx_SET (VOIDmode, scratch, eqv));
1132 emit_insn (gen_adddi3 (dest, tp, scratch));
1133 return dest;
1134
1135 case TLS_MODEL_LOCAL_EXEC:
1136 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1137 eqv = gen_rtx_CONST (Pmode, eqv);
1138 tp = gen_reg_rtx (Pmode);
1139
1140 emit_insn (gen_load_tp (tp));
1141 if (alpha_tls_size == 32)
1142 {
1143 insn = gen_rtx_HIGH (Pmode, eqv);
1144 insn = gen_rtx_PLUS (Pmode, tp, insn);
1145 tp = gen_reg_rtx (Pmode);
1146 emit_insn (gen_rtx_SET (VOIDmode, tp, insn));
1147 }
1148 return gen_rtx_LO_SUM (Pmode, tp, eqv);
1149
1150 default:
1151 gcc_unreachable ();
1152 }
1153
1154 if (local_symbolic_operand (x, Pmode))
1155 {
1156 if (small_symbolic_operand (x, Pmode))
1157 return x;
1158 else
1159 {
1160 if (!no_new_pseudos)
1161 scratch = gen_reg_rtx (Pmode);
1162 emit_insn (gen_rtx_SET (VOIDmode, scratch,
1163 gen_rtx_HIGH (Pmode, x)));
1164 return gen_rtx_LO_SUM (Pmode, scratch, x);
1165 }
1166 }
1167 }
1168
1169 return NULL;
1170
1171 split_addend:
1172 {
1173 HOST_WIDE_INT low, high;
1174
1175 low = ((addend & 0xffff) ^ 0x8000) - 0x8000;
1176 addend -= low;
1177 high = ((addend & 0xffffffff) ^ 0x80000000) - 0x80000000;
1178 addend -= high;
1179
1180 if (addend)
1181 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (addend),
1182 (no_new_pseudos ? scratch : NULL_RTX),
1183 1, OPTAB_LIB_WIDEN);
1184 if (high)
1185 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (high),
1186 (no_new_pseudos ? scratch : NULL_RTX),
1187 1, OPTAB_LIB_WIDEN);
1188
1189 return plus_constant (x, low);
1190 }
1191 }
1192
1193 /* Primarily this is required for TLS symbols, but given that our move
1194 patterns *ought* to be able to handle any symbol at any time, we
1195 should never be spilling symbolic operands to the constant pool, ever. */
1196
1197 static bool
1198 alpha_cannot_force_const_mem (rtx x)
1199 {
1200 enum rtx_code code = GET_CODE (x);
1201 return code == SYMBOL_REF || code == LABEL_REF || code == CONST;
1202 }
1203
1204 /* We do not allow indirect calls to be optimized into sibling calls, nor
1205 can we allow a call to a function with a different GP to be optimized
1206 into a sibcall. */
1207
1208 static bool
1209 alpha_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
1210 {
1211 /* Can't do indirect tail calls, since we don't know if the target
1212 uses the same GP. */
1213 if (!decl)
1214 return false;
1215
1216 /* Otherwise, we can make a tail call if the target function shares
1217 the same GP. */
1218 return decl_has_samegp (decl);
1219 }
1220
1221 int
1222 some_small_symbolic_operand_int (rtx *px, void *data ATTRIBUTE_UNUSED)
1223 {
1224 rtx x = *px;
1225
1226 /* Don't re-split. */
1227 if (GET_CODE (x) == LO_SUM)
1228 return -1;
1229
1230 return small_symbolic_operand (x, Pmode) != 0;
1231 }
1232
1233 static int
1234 split_small_symbolic_operand_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
1235 {
1236 rtx x = *px;
1237
1238 /* Don't re-split. */
1239 if (GET_CODE (x) == LO_SUM)
1240 return -1;
1241
1242 if (small_symbolic_operand (x, Pmode))
1243 {
1244 x = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, x);
1245 *px = x;
1246 return -1;
1247 }
1248
1249 return 0;
1250 }
1251
1252 rtx
1253 split_small_symbolic_operand (rtx x)
1254 {
1255 x = copy_insn (x);
1256 for_each_rtx (&x, split_small_symbolic_operand_1, NULL);
1257 return x;
1258 }
1259
1260 /* Indicate that INSN cannot be duplicated. This is true for any insn
1261 that we've marked with gpdisp relocs, since those have to stay in
1262 1-1 correspondence with one another.
1263
1264 Technically we could copy them if we could set up a mapping from one
1265 sequence number to another, across the set of insns to be duplicated.
1266 This seems overly complicated and error-prone since interblock motion
1267 from sched-ebb could move one of the pair of insns to a different block.
1268
1269 Also cannot allow jsr insns to be duplicated. If they throw exceptions,
1270 then they'll be in a different block from their ldgp. Which could lead
1271 the bb reorder code to think that it would be ok to copy just the block
1272 containing the call and branch to the block containing the ldgp. */
1273
1274 static bool
1275 alpha_cannot_copy_insn_p (rtx insn)
1276 {
1277 if (!reload_completed || !TARGET_EXPLICIT_RELOCS)
1278 return false;
1279 if (recog_memoized (insn) >= 0)
1280 return get_attr_cannot_copy (insn);
1281 else
1282 return false;
1283 }
1284
1285
1286 /* Try a machine-dependent way of reloading an illegitimate address
1287 operand. If we find one, push the reload and return the new rtx. */
1288
1289 rtx
1290 alpha_legitimize_reload_address (rtx x,
1291 enum machine_mode mode ATTRIBUTE_UNUSED,
1292 int opnum, int type,
1293 int ind_levels ATTRIBUTE_UNUSED)
1294 {
1295 /* We must recognize output that we have already generated ourselves. */
1296 if (GET_CODE (x) == PLUS
1297 && GET_CODE (XEXP (x, 0)) == PLUS
1298 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
1299 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1300 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1301 {
1302 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1303 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1304 opnum, type);
1305 return x;
1306 }
1307
1308 /* We wish to handle large displacements off a base register by
1309 splitting the addend across an ldah and the mem insn. This
1310 cuts number of extra insns needed from 3 to 1. */
1311 if (GET_CODE (x) == PLUS
1312 && GET_CODE (XEXP (x, 0)) == REG
1313 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
1314 && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x, 0)))
1315 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1316 {
1317 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
1318 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
1319 HOST_WIDE_INT high
1320 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
1321
1322 /* Check for 32-bit overflow. */
1323 if (high + low != val)
1324 return NULL_RTX;
1325
1326 /* Reload the high part into a base reg; leave the low part
1327 in the mem directly. */
1328 x = gen_rtx_PLUS (GET_MODE (x),
1329 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
1330 GEN_INT (high)),
1331 GEN_INT (low));
1332
1333 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1334 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1335 opnum, type);
1336 return x;
1337 }
1338
1339 return NULL_RTX;
1340 }
1341 \f
1342 /* Compute a (partial) cost for rtx X. Return true if the complete
1343 cost has been computed, and false if subexpressions should be
1344 scanned. In either case, *TOTAL contains the cost result. */
1345
1346 static bool
1347 alpha_rtx_costs (rtx x, int code, int outer_code, int *total)
1348 {
1349 enum machine_mode mode = GET_MODE (x);
1350 bool float_mode_p = FLOAT_MODE_P (mode);
1351 const struct alpha_rtx_cost_data *cost_data;
1352
1353 if (optimize_size)
1354 cost_data = &alpha_rtx_cost_size;
1355 else
1356 cost_data = &alpha_rtx_cost_data[alpha_tune];
1357
1358 switch (code)
1359 {
1360 case CONST_INT:
1361 /* If this is an 8-bit constant, return zero since it can be used
1362 nearly anywhere with no cost. If it is a valid operand for an
1363 ADD or AND, likewise return 0 if we know it will be used in that
1364 context. Otherwise, return 2 since it might be used there later.
1365 All other constants take at least two insns. */
1366 if (INTVAL (x) >= 0 && INTVAL (x) < 256)
1367 {
1368 *total = 0;
1369 return true;
1370 }
1371 /* FALLTHRU */
1372
1373 case CONST_DOUBLE:
1374 if (x == CONST0_RTX (mode))
1375 *total = 0;
1376 else if ((outer_code == PLUS && add_operand (x, VOIDmode))
1377 || (outer_code == AND && and_operand (x, VOIDmode)))
1378 *total = 0;
1379 else if (add_operand (x, VOIDmode) || and_operand (x, VOIDmode))
1380 *total = 2;
1381 else
1382 *total = COSTS_N_INSNS (2);
1383 return true;
1384
1385 case CONST:
1386 case SYMBOL_REF:
1387 case LABEL_REF:
1388 if (TARGET_EXPLICIT_RELOCS && small_symbolic_operand (x, VOIDmode))
1389 *total = COSTS_N_INSNS (outer_code != MEM);
1390 else if (TARGET_EXPLICIT_RELOCS && local_symbolic_operand (x, VOIDmode))
1391 *total = COSTS_N_INSNS (1 + (outer_code != MEM));
1392 else if (tls_symbolic_operand_type (x))
1393 /* Estimate of cost for call_pal rduniq. */
1394 /* ??? How many insns do we emit here? More than one... */
1395 *total = COSTS_N_INSNS (15);
1396 else
1397 /* Otherwise we do a load from the GOT. */
1398 *total = COSTS_N_INSNS (optimize_size ? 1 : alpha_memory_latency);
1399 return true;
1400
1401 case HIGH:
1402 /* This is effectively an add_operand. */
1403 *total = 2;
1404 return true;
1405
1406 case PLUS:
1407 case MINUS:
1408 if (float_mode_p)
1409 *total = cost_data->fp_add;
1410 else if (GET_CODE (XEXP (x, 0)) == MULT
1411 && const48_operand (XEXP (XEXP (x, 0), 1), VOIDmode))
1412 {
1413 *total = (rtx_cost (XEXP (XEXP (x, 0), 0), outer_code)
1414 + rtx_cost (XEXP (x, 1), outer_code) + COSTS_N_INSNS (1));
1415 return true;
1416 }
1417 return false;
1418
1419 case MULT:
1420 if (float_mode_p)
1421 *total = cost_data->fp_mult;
1422 else if (mode == DImode)
1423 *total = cost_data->int_mult_di;
1424 else
1425 *total = cost_data->int_mult_si;
1426 return false;
1427
1428 case ASHIFT:
1429 if (GET_CODE (XEXP (x, 1)) == CONST_INT
1430 && INTVAL (XEXP (x, 1)) <= 3)
1431 {
1432 *total = COSTS_N_INSNS (1);
1433 return false;
1434 }
1435 /* FALLTHRU */
1436
1437 case ASHIFTRT:
1438 case LSHIFTRT:
1439 *total = cost_data->int_shift;
1440 return false;
1441
1442 case IF_THEN_ELSE:
1443 if (float_mode_p)
1444 *total = cost_data->fp_add;
1445 else
1446 *total = cost_data->int_cmov;
1447 return false;
1448
1449 case DIV:
1450 case UDIV:
1451 case MOD:
1452 case UMOD:
1453 if (!float_mode_p)
1454 *total = cost_data->int_div;
1455 else if (mode == SFmode)
1456 *total = cost_data->fp_div_sf;
1457 else
1458 *total = cost_data->fp_div_df;
1459 return false;
1460
1461 case MEM:
1462 *total = COSTS_N_INSNS (optimize_size ? 1 : alpha_memory_latency);
1463 return true;
1464
1465 case NEG:
1466 if (! float_mode_p)
1467 {
1468 *total = COSTS_N_INSNS (1);
1469 return false;
1470 }
1471 /* FALLTHRU */
1472
1473 case ABS:
1474 if (! float_mode_p)
1475 {
1476 *total = COSTS_N_INSNS (1) + cost_data->int_cmov;
1477 return false;
1478 }
1479 /* FALLTHRU */
1480
1481 case FLOAT:
1482 case UNSIGNED_FLOAT:
1483 case FIX:
1484 case UNSIGNED_FIX:
1485 case FLOAT_TRUNCATE:
1486 *total = cost_data->fp_add;
1487 return false;
1488
1489 case FLOAT_EXTEND:
1490 if (GET_CODE (XEXP (x, 0)) == MEM)
1491 *total = 0;
1492 else
1493 *total = cost_data->fp_add;
1494 return false;
1495
1496 default:
1497 return false;
1498 }
1499 }
1500 \f
1501 /* REF is an alignable memory location. Place an aligned SImode
1502 reference into *PALIGNED_MEM and the number of bits to shift into
1503 *PBITNUM. SCRATCH is a free register for use in reloading out
1504 of range stack slots. */
1505
1506 void
1507 get_aligned_mem (rtx ref, rtx *paligned_mem, rtx *pbitnum)
1508 {
1509 rtx base;
1510 HOST_WIDE_INT disp, offset;
1511
1512 gcc_assert (GET_CODE (ref) == MEM);
1513
1514 if (reload_in_progress
1515 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1516 {
1517 base = find_replacement (&XEXP (ref, 0));
1518 gcc_assert (memory_address_p (GET_MODE (ref), base));
1519 }
1520 else
1521 base = XEXP (ref, 0);
1522
1523 if (GET_CODE (base) == PLUS)
1524 disp = INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1525 else
1526 disp = 0;
1527
1528 /* Find the byte offset within an aligned word. If the memory itself is
1529 claimed to be aligned, believe it. Otherwise, aligned_memory_operand
1530 will have examined the base register and determined it is aligned, and
1531 thus displacements from it are naturally alignable. */
1532 if (MEM_ALIGN (ref) >= 32)
1533 offset = 0;
1534 else
1535 offset = disp & 3;
1536
1537 /* Access the entire aligned word. */
1538 *paligned_mem = widen_memory_access (ref, SImode, -offset);
1539
1540 /* Convert the byte offset within the word to a bit offset. */
1541 if (WORDS_BIG_ENDIAN)
1542 offset = 32 - (GET_MODE_BITSIZE (GET_MODE (ref)) + offset * 8);
1543 else
1544 offset *= 8;
1545 *pbitnum = GEN_INT (offset);
1546 }
1547
1548 /* Similar, but just get the address. Handle the two reload cases.
1549 Add EXTRA_OFFSET to the address we return. */
1550
1551 rtx
1552 get_unaligned_address (rtx ref, int extra_offset)
1553 {
1554 rtx base;
1555 HOST_WIDE_INT offset = 0;
1556
1557 gcc_assert (GET_CODE (ref) == MEM);
1558
1559 if (reload_in_progress
1560 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1561 {
1562 base = find_replacement (&XEXP (ref, 0));
1563
1564 gcc_assert (memory_address_p (GET_MODE (ref), base));
1565 }
1566 else
1567 base = XEXP (ref, 0);
1568
1569 if (GET_CODE (base) == PLUS)
1570 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1571
1572 return plus_constant (base, offset + extra_offset);
1573 }
1574
1575 /* On the Alpha, all (non-symbolic) constants except zero go into
1576 a floating-point register via memory. Note that we cannot
1577 return anything that is not a subset of CLASS, and that some
1578 symbolic constants cannot be dropped to memory. */
1579
1580 enum reg_class
1581 alpha_preferred_reload_class(rtx x, enum reg_class class)
1582 {
1583 /* Zero is present in any register class. */
1584 if (x == CONST0_RTX (GET_MODE (x)))
1585 return class;
1586
1587 /* These sorts of constants we can easily drop to memory. */
1588 if (GET_CODE (x) == CONST_INT
1589 || GET_CODE (x) == CONST_DOUBLE
1590 || GET_CODE (x) == CONST_VECTOR)
1591 {
1592 if (class == FLOAT_REGS)
1593 return NO_REGS;
1594 if (class == ALL_REGS)
1595 return GENERAL_REGS;
1596 return class;
1597 }
1598
1599 /* All other kinds of constants should not (and in the case of HIGH
1600 cannot) be dropped to memory -- instead we use a GENERAL_REGS
1601 secondary reload. */
1602 if (CONSTANT_P (x))
1603 return (class == ALL_REGS ? GENERAL_REGS : class);
1604
1605 return class;
1606 }
1607
1608 /* Loading and storing HImode or QImode values to and from memory
1609 usually requires a scratch register. The exceptions are loading
1610 QImode and HImode from an aligned address to a general register
1611 unless byte instructions are permitted.
1612
1613 We also cannot load an unaligned address or a paradoxical SUBREG
1614 into an FP register.
1615
1616 We also cannot do integral arithmetic into FP regs, as might result
1617 from register elimination into a DImode fp register. */
1618
1619 enum reg_class
1620 alpha_secondary_reload_class (enum reg_class class, enum machine_mode mode,
1621 rtx x, int in)
1622 {
1623 if ((mode == QImode || mode == HImode) && ! TARGET_BWX)
1624 {
1625 if (GET_CODE (x) == MEM
1626 || (GET_CODE (x) == REG && REGNO (x) >= FIRST_PSEUDO_REGISTER)
1627 || (GET_CODE (x) == SUBREG
1628 && (GET_CODE (SUBREG_REG (x)) == MEM
1629 || (GET_CODE (SUBREG_REG (x)) == REG
1630 && REGNO (SUBREG_REG (x)) >= FIRST_PSEUDO_REGISTER))))
1631 {
1632 if (!in || !aligned_memory_operand(x, mode))
1633 return GENERAL_REGS;
1634 }
1635 }
1636
1637 if (class == FLOAT_REGS)
1638 {
1639 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == AND)
1640 return GENERAL_REGS;
1641
1642 if (GET_CODE (x) == SUBREG
1643 && (GET_MODE_SIZE (GET_MODE (x))
1644 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
1645 return GENERAL_REGS;
1646
1647 if (in && INTEGRAL_MODE_P (mode)
1648 && ! (memory_operand (x, mode) || x == const0_rtx))
1649 return GENERAL_REGS;
1650 }
1651
1652 return NO_REGS;
1653 }
1654 \f
1655 /* Subfunction of the following function. Update the flags of any MEM
1656 found in part of X. */
1657
1658 static int
1659 alpha_set_memflags_1 (rtx *xp, void *data)
1660 {
1661 rtx x = *xp, orig = (rtx) data;
1662
1663 if (GET_CODE (x) != MEM)
1664 return 0;
1665
1666 MEM_VOLATILE_P (x) = MEM_VOLATILE_P (orig);
1667 MEM_IN_STRUCT_P (x) = MEM_IN_STRUCT_P (orig);
1668 MEM_SCALAR_P (x) = MEM_SCALAR_P (orig);
1669 MEM_NOTRAP_P (x) = MEM_NOTRAP_P (orig);
1670 MEM_READONLY_P (x) = MEM_READONLY_P (orig);
1671
1672 /* Sadly, we cannot use alias sets because the extra aliasing
1673 produced by the AND interferes. Given that two-byte quantities
1674 are the only thing we would be able to differentiate anyway,
1675 there does not seem to be any point in convoluting the early
1676 out of the alias check. */
1677
1678 return -1;
1679 }
1680
1681 /* Given INSN, which is an INSN list or the PATTERN of a single insn
1682 generated to perform a memory operation, look for any MEMs in either
1683 a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and
1684 volatile flags from REF into each of the MEMs found. If REF is not
1685 a MEM, don't do anything. */
1686
1687 void
1688 alpha_set_memflags (rtx insn, rtx ref)
1689 {
1690 rtx *base_ptr;
1691
1692 if (GET_CODE (ref) != MEM)
1693 return;
1694
1695 /* This is only called from alpha.md, after having had something
1696 generated from one of the insn patterns. So if everything is
1697 zero, the pattern is already up-to-date. */
1698 if (!MEM_VOLATILE_P (ref)
1699 && !MEM_IN_STRUCT_P (ref)
1700 && !MEM_SCALAR_P (ref)
1701 && !MEM_NOTRAP_P (ref)
1702 && !MEM_READONLY_P (ref))
1703 return;
1704
1705 if (INSN_P (insn))
1706 base_ptr = &PATTERN (insn);
1707 else
1708 base_ptr = &insn;
1709 for_each_rtx (base_ptr, alpha_set_memflags_1, (void *) ref);
1710 }
1711 \f
1712 static rtx alpha_emit_set_const (rtx, enum machine_mode, HOST_WIDE_INT,
1713 int, bool);
1714
1715 /* Internal routine for alpha_emit_set_const to check for N or below insns.
1716 If NO_OUTPUT is true, then we only check to see if N insns are possible,
1717 and return pc_rtx if successful. */
1718
1719 static rtx
1720 alpha_emit_set_const_1 (rtx target, enum machine_mode mode,
1721 HOST_WIDE_INT c, int n, bool no_output)
1722 {
1723 HOST_WIDE_INT new;
1724 int i, bits;
1725 /* Use a pseudo if highly optimizing and still generating RTL. */
1726 rtx subtarget
1727 = (flag_expensive_optimizations && !no_new_pseudos ? 0 : target);
1728 rtx temp, insn;
1729
1730 /* If this is a sign-extended 32-bit constant, we can do this in at most
1731 three insns, so do it if we have enough insns left. We always have
1732 a sign-extended 32-bit constant when compiling on a narrow machine. */
1733
1734 if (HOST_BITS_PER_WIDE_INT != 64
1735 || c >> 31 == -1 || c >> 31 == 0)
1736 {
1737 HOST_WIDE_INT low = ((c & 0xffff) ^ 0x8000) - 0x8000;
1738 HOST_WIDE_INT tmp1 = c - low;
1739 HOST_WIDE_INT high = (((tmp1 >> 16) & 0xffff) ^ 0x8000) - 0x8000;
1740 HOST_WIDE_INT extra = 0;
1741
1742 /* If HIGH will be interpreted as negative but the constant is
1743 positive, we must adjust it to do two ldha insns. */
1744
1745 if ((high & 0x8000) != 0 && c >= 0)
1746 {
1747 extra = 0x4000;
1748 tmp1 -= 0x40000000;
1749 high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
1750 }
1751
1752 if (c == low || (low == 0 && extra == 0))
1753 {
1754 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
1755 but that meant that we can't handle INT_MIN on 32-bit machines
1756 (like NT/Alpha), because we recurse indefinitely through
1757 emit_move_insn to gen_movdi. So instead, since we know exactly
1758 what we want, create it explicitly. */
1759
1760 if (no_output)
1761 return pc_rtx;
1762 if (target == NULL)
1763 target = gen_reg_rtx (mode);
1764 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (c)));
1765 return target;
1766 }
1767 else if (n >= 2 + (extra != 0))
1768 {
1769 if (no_output)
1770 return pc_rtx;
1771 if (no_new_pseudos)
1772 {
1773 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (high << 16)));
1774 temp = target;
1775 }
1776 else
1777 temp = copy_to_suggested_reg (GEN_INT (high << 16),
1778 subtarget, mode);
1779
1780 /* As of 2002-02-23, addsi3 is only available when not optimizing.
1781 This means that if we go through expand_binop, we'll try to
1782 generate extensions, etc, which will require new pseudos, which
1783 will fail during some split phases. The SImode add patterns
1784 still exist, but are not named. So build the insns by hand. */
1785
1786 if (extra != 0)
1787 {
1788 if (! subtarget)
1789 subtarget = gen_reg_rtx (mode);
1790 insn = gen_rtx_PLUS (mode, temp, GEN_INT (extra << 16));
1791 insn = gen_rtx_SET (VOIDmode, subtarget, insn);
1792 emit_insn (insn);
1793 temp = subtarget;
1794 }
1795
1796 if (target == NULL)
1797 target = gen_reg_rtx (mode);
1798 insn = gen_rtx_PLUS (mode, temp, GEN_INT (low));
1799 insn = gen_rtx_SET (VOIDmode, target, insn);
1800 emit_insn (insn);
1801 return target;
1802 }
1803 }
1804
1805 /* If we couldn't do it that way, try some other methods. But if we have
1806 no instructions left, don't bother. Likewise, if this is SImode and
1807 we can't make pseudos, we can't do anything since the expand_binop
1808 and expand_unop calls will widen and try to make pseudos. */
1809
1810 if (n == 1 || (mode == SImode && no_new_pseudos))
1811 return 0;
1812
1813 /* Next, see if we can load a related constant and then shift and possibly
1814 negate it to get the constant we want. Try this once each increasing
1815 numbers of insns. */
1816
1817 for (i = 1; i < n; i++)
1818 {
1819 /* First, see if minus some low bits, we've an easy load of
1820 high bits. */
1821
1822 new = ((c & 0xffff) ^ 0x8000) - 0x8000;
1823 if (new != 0)
1824 {
1825 temp = alpha_emit_set_const (subtarget, mode, c - new, i, no_output);
1826 if (temp)
1827 {
1828 if (no_output)
1829 return temp;
1830 return expand_binop (mode, add_optab, temp, GEN_INT (new),
1831 target, 0, OPTAB_WIDEN);
1832 }
1833 }
1834
1835 /* Next try complementing. */
1836 temp = alpha_emit_set_const (subtarget, mode, ~c, i, no_output);
1837 if (temp)
1838 {
1839 if (no_output)
1840 return temp;
1841 return expand_unop (mode, one_cmpl_optab, temp, target, 0);
1842 }
1843
1844 /* Next try to form a constant and do a left shift. We can do this
1845 if some low-order bits are zero; the exact_log2 call below tells
1846 us that information. The bits we are shifting out could be any
1847 value, but here we'll just try the 0- and sign-extended forms of
1848 the constant. To try to increase the chance of having the same
1849 constant in more than one insn, start at the highest number of
1850 bits to shift, but try all possibilities in case a ZAPNOT will
1851 be useful. */
1852
1853 bits = exact_log2 (c & -c);
1854 if (bits > 0)
1855 for (; bits > 0; bits--)
1856 {
1857 new = c >> bits;
1858 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1859 if (!temp && c < 0)
1860 {
1861 new = (unsigned HOST_WIDE_INT)c >> bits;
1862 temp = alpha_emit_set_const (subtarget, mode, new,
1863 i, no_output);
1864 }
1865 if (temp)
1866 {
1867 if (no_output)
1868 return temp;
1869 return expand_binop (mode, ashl_optab, temp, GEN_INT (bits),
1870 target, 0, OPTAB_WIDEN);
1871 }
1872 }
1873
1874 /* Now try high-order zero bits. Here we try the shifted-in bits as
1875 all zero and all ones. Be careful to avoid shifting outside the
1876 mode and to avoid shifting outside the host wide int size. */
1877 /* On narrow hosts, don't shift a 1 into the high bit, since we'll
1878 confuse the recursive call and set all of the high 32 bits. */
1879
1880 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1881 - floor_log2 (c) - 1 - (HOST_BITS_PER_WIDE_INT < 64));
1882 if (bits > 0)
1883 for (; bits > 0; bits--)
1884 {
1885 new = c << bits;
1886 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1887 if (!temp)
1888 {
1889 new = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1890 temp = alpha_emit_set_const (subtarget, mode, new,
1891 i, no_output);
1892 }
1893 if (temp)
1894 {
1895 if (no_output)
1896 return temp;
1897 return expand_binop (mode, lshr_optab, temp, GEN_INT (bits),
1898 target, 1, OPTAB_WIDEN);
1899 }
1900 }
1901
1902 /* Now try high-order 1 bits. We get that with a sign-extension.
1903 But one bit isn't enough here. Be careful to avoid shifting outside
1904 the mode and to avoid shifting outside the host wide int size. */
1905
1906 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1907 - floor_log2 (~ c) - 2);
1908 if (bits > 0)
1909 for (; bits > 0; bits--)
1910 {
1911 new = c << bits;
1912 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1913 if (!temp)
1914 {
1915 new = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1916 temp = alpha_emit_set_const (subtarget, mode, new,
1917 i, no_output);
1918 }
1919 if (temp)
1920 {
1921 if (no_output)
1922 return temp;
1923 return expand_binop (mode, ashr_optab, temp, GEN_INT (bits),
1924 target, 0, OPTAB_WIDEN);
1925 }
1926 }
1927 }
1928
1929 #if HOST_BITS_PER_WIDE_INT == 64
1930 /* Finally, see if can load a value into the target that is the same as the
1931 constant except that all bytes that are 0 are changed to be 0xff. If we
1932 can, then we can do a ZAPNOT to obtain the desired constant. */
1933
1934 new = c;
1935 for (i = 0; i < 64; i += 8)
1936 if ((new & ((HOST_WIDE_INT) 0xff << i)) == 0)
1937 new |= (HOST_WIDE_INT) 0xff << i;
1938
1939 /* We are only called for SImode and DImode. If this is SImode, ensure that
1940 we are sign extended to a full word. */
1941
1942 if (mode == SImode)
1943 new = ((new & 0xffffffff) ^ 0x80000000) - 0x80000000;
1944
1945 if (new != c)
1946 {
1947 temp = alpha_emit_set_const (subtarget, mode, new, n - 1, no_output);
1948 if (temp)
1949 {
1950 if (no_output)
1951 return temp;
1952 return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new),
1953 target, 0, OPTAB_WIDEN);
1954 }
1955 }
1956 #endif
1957
1958 return 0;
1959 }
1960
1961 /* Try to output insns to set TARGET equal to the constant C if it can be
1962 done in less than N insns. Do all computations in MODE. Returns the place
1963 where the output has been placed if it can be done and the insns have been
1964 emitted. If it would take more than N insns, zero is returned and no
1965 insns and emitted. */
1966
1967 static rtx
1968 alpha_emit_set_const (rtx target, enum machine_mode mode,
1969 HOST_WIDE_INT c, int n, bool no_output)
1970 {
1971 enum machine_mode orig_mode = mode;
1972 rtx orig_target = target;
1973 rtx result = 0;
1974 int i;
1975
1976 /* If we can't make any pseudos, TARGET is an SImode hard register, we
1977 can't load this constant in one insn, do this in DImode. */
1978 if (no_new_pseudos && mode == SImode
1979 && GET_CODE (target) == REG && REGNO (target) < FIRST_PSEUDO_REGISTER)
1980 {
1981 result = alpha_emit_set_const_1 (target, mode, c, 1, no_output);
1982 if (result)
1983 return result;
1984
1985 target = no_output ? NULL : gen_lowpart (DImode, target);
1986 mode = DImode;
1987 }
1988 else if (mode == V8QImode || mode == V4HImode || mode == V2SImode)
1989 {
1990 target = no_output ? NULL : gen_lowpart (DImode, target);
1991 mode = DImode;
1992 }
1993
1994 /* Try 1 insn, then 2, then up to N. */
1995 for (i = 1; i <= n; i++)
1996 {
1997 result = alpha_emit_set_const_1 (target, mode, c, i, no_output);
1998 if (result)
1999 {
2000 rtx insn, set;
2001
2002 if (no_output)
2003 return result;
2004
2005 insn = get_last_insn ();
2006 set = single_set (insn);
2007 if (! CONSTANT_P (SET_SRC (set)))
2008 set_unique_reg_note (get_last_insn (), REG_EQUAL, GEN_INT (c));
2009 break;
2010 }
2011 }
2012
2013 /* Allow for the case where we changed the mode of TARGET. */
2014 if (result)
2015 {
2016 if (result == target)
2017 result = orig_target;
2018 else if (mode != orig_mode)
2019 result = gen_lowpart (orig_mode, result);
2020 }
2021
2022 return result;
2023 }
2024
2025 /* Having failed to find a 3 insn sequence in alpha_emit_set_const,
2026 fall back to a straight forward decomposition. We do this to avoid
2027 exponential run times encountered when looking for longer sequences
2028 with alpha_emit_set_const. */
2029
2030 static rtx
2031 alpha_emit_set_long_const (rtx target, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
2032 {
2033 HOST_WIDE_INT d1, d2, d3, d4;
2034
2035 /* Decompose the entire word */
2036 #if HOST_BITS_PER_WIDE_INT >= 64
2037 gcc_assert (c2 == -(c1 < 0));
2038 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2039 c1 -= d1;
2040 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2041 c1 = (c1 - d2) >> 32;
2042 d3 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2043 c1 -= d3;
2044 d4 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2045 gcc_assert (c1 == d4);
2046 #else
2047 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2048 c1 -= d1;
2049 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2050 gcc_assert (c1 == d2);
2051 c2 += (d2 < 0);
2052 d3 = ((c2 & 0xffff) ^ 0x8000) - 0x8000;
2053 c2 -= d3;
2054 d4 = ((c2 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2055 gcc_assert (c2 == d4);
2056 #endif
2057
2058 /* Construct the high word */
2059 if (d4)
2060 {
2061 emit_move_insn (target, GEN_INT (d4));
2062 if (d3)
2063 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d3)));
2064 }
2065 else
2066 emit_move_insn (target, GEN_INT (d3));
2067
2068 /* Shift it into place */
2069 emit_move_insn (target, gen_rtx_ASHIFT (DImode, target, GEN_INT (32)));
2070
2071 /* Add in the low bits. */
2072 if (d2)
2073 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d2)));
2074 if (d1)
2075 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d1)));
2076
2077 return target;
2078 }
2079
2080 /* Given an integral CONST_INT, CONST_DOUBLE, or CONST_VECTOR, return
2081 the low 64 bits. */
2082
2083 static void
2084 alpha_extract_integer (rtx x, HOST_WIDE_INT *p0, HOST_WIDE_INT *p1)
2085 {
2086 HOST_WIDE_INT i0, i1;
2087
2088 if (GET_CODE (x) == CONST_VECTOR)
2089 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
2090
2091
2092 if (GET_CODE (x) == CONST_INT)
2093 {
2094 i0 = INTVAL (x);
2095 i1 = -(i0 < 0);
2096 }
2097 else if (HOST_BITS_PER_WIDE_INT >= 64)
2098 {
2099 i0 = CONST_DOUBLE_LOW (x);
2100 i1 = -(i0 < 0);
2101 }
2102 else
2103 {
2104 i0 = CONST_DOUBLE_LOW (x);
2105 i1 = CONST_DOUBLE_HIGH (x);
2106 }
2107
2108 *p0 = i0;
2109 *p1 = i1;
2110 }
2111
2112 /* Implement LEGITIMATE_CONSTANT_P. This is all constants for which we
2113 are willing to load the value into a register via a move pattern.
2114 Normally this is all symbolic constants, integral constants that
2115 take three or fewer instructions, and floating-point zero. */
2116
2117 bool
2118 alpha_legitimate_constant_p (rtx x)
2119 {
2120 enum machine_mode mode = GET_MODE (x);
2121 HOST_WIDE_INT i0, i1;
2122
2123 switch (GET_CODE (x))
2124 {
2125 case CONST:
2126 case LABEL_REF:
2127 case SYMBOL_REF:
2128 case HIGH:
2129 return true;
2130
2131 case CONST_DOUBLE:
2132 if (x == CONST0_RTX (mode))
2133 return true;
2134 if (FLOAT_MODE_P (mode))
2135 return false;
2136 goto do_integer;
2137
2138 case CONST_VECTOR:
2139 if (x == CONST0_RTX (mode))
2140 return true;
2141 if (GET_MODE_CLASS (mode) != MODE_VECTOR_INT)
2142 return false;
2143 if (GET_MODE_SIZE (mode) != 8)
2144 return false;
2145 goto do_integer;
2146
2147 case CONST_INT:
2148 do_integer:
2149 if (TARGET_BUILD_CONSTANTS)
2150 return true;
2151 alpha_extract_integer (x, &i0, &i1);
2152 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == (-i0 < 0))
2153 return alpha_emit_set_const_1 (x, mode, i0, 3, true) != NULL;
2154 return false;
2155
2156 default:
2157 return false;
2158 }
2159 }
2160
2161 /* Operand 1 is known to be a constant, and should require more than one
2162 instruction to load. Emit that multi-part load. */
2163
2164 bool
2165 alpha_split_const_mov (enum machine_mode mode, rtx *operands)
2166 {
2167 HOST_WIDE_INT i0, i1;
2168 rtx temp = NULL_RTX;
2169
2170 alpha_extract_integer (operands[1], &i0, &i1);
2171
2172 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == -(i0 < 0))
2173 temp = alpha_emit_set_const (operands[0], mode, i0, 3, false);
2174
2175 if (!temp && TARGET_BUILD_CONSTANTS)
2176 temp = alpha_emit_set_long_const (operands[0], i0, i1);
2177
2178 if (temp)
2179 {
2180 if (!rtx_equal_p (operands[0], temp))
2181 emit_move_insn (operands[0], temp);
2182 return true;
2183 }
2184
2185 return false;
2186 }
2187
2188 /* Expand a move instruction; return true if all work is done.
2189 We don't handle non-bwx subword loads here. */
2190
2191 bool
2192 alpha_expand_mov (enum machine_mode mode, rtx *operands)
2193 {
2194 /* If the output is not a register, the input must be. */
2195 if (GET_CODE (operands[0]) == MEM
2196 && ! reg_or_0_operand (operands[1], mode))
2197 operands[1] = force_reg (mode, operands[1]);
2198
2199 /* Allow legitimize_address to perform some simplifications. */
2200 if (mode == Pmode && symbolic_operand (operands[1], mode))
2201 {
2202 rtx tmp;
2203
2204 tmp = alpha_legitimize_address (operands[1], operands[0], mode);
2205 if (tmp)
2206 {
2207 if (tmp == operands[0])
2208 return true;
2209 operands[1] = tmp;
2210 return false;
2211 }
2212 }
2213
2214 /* Early out for non-constants and valid constants. */
2215 if (! CONSTANT_P (operands[1]) || input_operand (operands[1], mode))
2216 return false;
2217
2218 /* Split large integers. */
2219 if (GET_CODE (operands[1]) == CONST_INT
2220 || GET_CODE (operands[1]) == CONST_DOUBLE
2221 || GET_CODE (operands[1]) == CONST_VECTOR)
2222 {
2223 if (alpha_split_const_mov (mode, operands))
2224 return true;
2225 }
2226
2227 /* Otherwise we've nothing left but to drop the thing to memory. */
2228 operands[1] = force_const_mem (mode, operands[1]);
2229 if (reload_in_progress)
2230 {
2231 emit_move_insn (operands[0], XEXP (operands[1], 0));
2232 operands[1] = copy_rtx (operands[1]);
2233 XEXP (operands[1], 0) = operands[0];
2234 }
2235 else
2236 operands[1] = validize_mem (operands[1]);
2237 return false;
2238 }
2239
2240 /* Expand a non-bwx QImode or HImode move instruction;
2241 return true if all work is done. */
2242
2243 bool
2244 alpha_expand_mov_nobwx (enum machine_mode mode, rtx *operands)
2245 {
2246 /* If the output is not a register, the input must be. */
2247 if (GET_CODE (operands[0]) == MEM)
2248 operands[1] = force_reg (mode, operands[1]);
2249
2250 /* Handle four memory cases, unaligned and aligned for either the input
2251 or the output. The only case where we can be called during reload is
2252 for aligned loads; all other cases require temporaries. */
2253
2254 if (GET_CODE (operands[1]) == MEM
2255 || (GET_CODE (operands[1]) == SUBREG
2256 && GET_CODE (SUBREG_REG (operands[1])) == MEM)
2257 || (reload_in_progress && GET_CODE (operands[1]) == REG
2258 && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER)
2259 || (reload_in_progress && GET_CODE (operands[1]) == SUBREG
2260 && GET_CODE (SUBREG_REG (operands[1])) == REG
2261 && REGNO (SUBREG_REG (operands[1])) >= FIRST_PSEUDO_REGISTER))
2262 {
2263 if (aligned_memory_operand (operands[1], mode))
2264 {
2265 if (reload_in_progress)
2266 {
2267 emit_insn ((mode == QImode
2268 ? gen_reload_inqi_help
2269 : gen_reload_inhi_help)
2270 (operands[0], operands[1],
2271 gen_rtx_REG (SImode, REGNO (operands[0]))));
2272 }
2273 else
2274 {
2275 rtx aligned_mem, bitnum;
2276 rtx scratch = gen_reg_rtx (SImode);
2277 rtx subtarget;
2278 bool copyout;
2279
2280 get_aligned_mem (operands[1], &aligned_mem, &bitnum);
2281
2282 subtarget = operands[0];
2283 if (GET_CODE (subtarget) == REG)
2284 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2285 else
2286 subtarget = gen_reg_rtx (DImode), copyout = true;
2287
2288 emit_insn ((mode == QImode
2289 ? gen_aligned_loadqi
2290 : gen_aligned_loadhi)
2291 (subtarget, aligned_mem, bitnum, scratch));
2292
2293 if (copyout)
2294 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2295 }
2296 }
2297 else
2298 {
2299 /* Don't pass these as parameters since that makes the generated
2300 code depend on parameter evaluation order which will cause
2301 bootstrap failures. */
2302
2303 rtx temp1, temp2, seq, subtarget;
2304 bool copyout;
2305
2306 temp1 = gen_reg_rtx (DImode);
2307 temp2 = gen_reg_rtx (DImode);
2308
2309 subtarget = operands[0];
2310 if (GET_CODE (subtarget) == REG)
2311 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2312 else
2313 subtarget = gen_reg_rtx (DImode), copyout = true;
2314
2315 seq = ((mode == QImode
2316 ? gen_unaligned_loadqi
2317 : gen_unaligned_loadhi)
2318 (subtarget, get_unaligned_address (operands[1], 0),
2319 temp1, temp2));
2320 alpha_set_memflags (seq, operands[1]);
2321 emit_insn (seq);
2322
2323 if (copyout)
2324 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2325 }
2326 return true;
2327 }
2328
2329 if (GET_CODE (operands[0]) == MEM
2330 || (GET_CODE (operands[0]) == SUBREG
2331 && GET_CODE (SUBREG_REG (operands[0])) == MEM)
2332 || (reload_in_progress && GET_CODE (operands[0]) == REG
2333 && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER)
2334 || (reload_in_progress && GET_CODE (operands[0]) == SUBREG
2335 && GET_CODE (SUBREG_REG (operands[0])) == REG
2336 && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER))
2337 {
2338 if (aligned_memory_operand (operands[0], mode))
2339 {
2340 rtx aligned_mem, bitnum;
2341 rtx temp1 = gen_reg_rtx (SImode);
2342 rtx temp2 = gen_reg_rtx (SImode);
2343
2344 get_aligned_mem (operands[0], &aligned_mem, &bitnum);
2345
2346 emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
2347 temp1, temp2));
2348 }
2349 else
2350 {
2351 rtx temp1 = gen_reg_rtx (DImode);
2352 rtx temp2 = gen_reg_rtx (DImode);
2353 rtx temp3 = gen_reg_rtx (DImode);
2354 rtx seq = ((mode == QImode
2355 ? gen_unaligned_storeqi
2356 : gen_unaligned_storehi)
2357 (get_unaligned_address (operands[0], 0),
2358 operands[1], temp1, temp2, temp3));
2359
2360 alpha_set_memflags (seq, operands[0]);
2361 emit_insn (seq);
2362 }
2363 return true;
2364 }
2365
2366 return false;
2367 }
2368
2369 /* Implement the movmisalign patterns. One of the operands is a memory
2370 that is not naturally aligned. Emit instructions to load it. */
2371
2372 void
2373 alpha_expand_movmisalign (enum machine_mode mode, rtx *operands)
2374 {
2375 /* Honor misaligned loads, for those we promised to do so. */
2376 if (MEM_P (operands[1]))
2377 {
2378 rtx tmp;
2379
2380 if (register_operand (operands[0], mode))
2381 tmp = operands[0];
2382 else
2383 tmp = gen_reg_rtx (mode);
2384
2385 alpha_expand_unaligned_load (tmp, operands[1], 8, 0, 0);
2386 if (tmp != operands[0])
2387 emit_move_insn (operands[0], tmp);
2388 }
2389 else if (MEM_P (operands[0]))
2390 {
2391 if (!reg_or_0_operand (operands[1], mode))
2392 operands[1] = force_reg (mode, operands[1]);
2393 alpha_expand_unaligned_store (operands[0], operands[1], 8, 0);
2394 }
2395 else
2396 gcc_unreachable ();
2397 }
2398
2399 /* Generate an unsigned DImode to FP conversion. This is the same code
2400 optabs would emit if we didn't have TFmode patterns.
2401
2402 For SFmode, this is the only construction I've found that can pass
2403 gcc.c-torture/execute/ieee/rbug.c. No scenario that uses DFmode
2404 intermediates will work, because you'll get intermediate rounding
2405 that ruins the end result. Some of this could be fixed by turning
2406 on round-to-positive-infinity, but that requires diddling the fpsr,
2407 which kills performance. I tried turning this around and converting
2408 to a negative number, so that I could turn on /m, but either I did
2409 it wrong or there's something else cause I wound up with the exact
2410 same single-bit error. There is a branch-less form of this same code:
2411
2412 srl $16,1,$1
2413 and $16,1,$2
2414 cmplt $16,0,$3
2415 or $1,$2,$2
2416 cmovge $16,$16,$2
2417 itoft $3,$f10
2418 itoft $2,$f11
2419 cvtqs $f11,$f11
2420 adds $f11,$f11,$f0
2421 fcmoveq $f10,$f11,$f0
2422
2423 I'm not using it because it's the same number of instructions as
2424 this branch-full form, and it has more serialized long latency
2425 instructions on the critical path.
2426
2427 For DFmode, we can avoid rounding errors by breaking up the word
2428 into two pieces, converting them separately, and adding them back:
2429
2430 LC0: .long 0,0x5f800000
2431
2432 itoft $16,$f11
2433 lda $2,LC0
2434 cmplt $16,0,$1
2435 cpyse $f11,$f31,$f10
2436 cpyse $f31,$f11,$f11
2437 s4addq $1,$2,$1
2438 lds $f12,0($1)
2439 cvtqt $f10,$f10
2440 cvtqt $f11,$f11
2441 addt $f12,$f10,$f0
2442 addt $f0,$f11,$f0
2443
2444 This doesn't seem to be a clear-cut win over the optabs form.
2445 It probably all depends on the distribution of numbers being
2446 converted -- in the optabs form, all but high-bit-set has a
2447 much lower minimum execution time. */
2448
2449 void
2450 alpha_emit_floatuns (rtx operands[2])
2451 {
2452 rtx neglab, donelab, i0, i1, f0, in, out;
2453 enum machine_mode mode;
2454
2455 out = operands[0];
2456 in = force_reg (DImode, operands[1]);
2457 mode = GET_MODE (out);
2458 neglab = gen_label_rtx ();
2459 donelab = gen_label_rtx ();
2460 i0 = gen_reg_rtx (DImode);
2461 i1 = gen_reg_rtx (DImode);
2462 f0 = gen_reg_rtx (mode);
2463
2464 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
2465
2466 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
2467 emit_jump_insn (gen_jump (donelab));
2468 emit_barrier ();
2469
2470 emit_label (neglab);
2471
2472 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
2473 emit_insn (gen_anddi3 (i1, in, const1_rtx));
2474 emit_insn (gen_iordi3 (i0, i0, i1));
2475 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
2476 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
2477
2478 emit_label (donelab);
2479 }
2480
2481 /* Generate the comparison for a conditional branch. */
2482
2483 rtx
2484 alpha_emit_conditional_branch (enum rtx_code code)
2485 {
2486 enum rtx_code cmp_code, branch_code;
2487 enum machine_mode cmp_mode, branch_mode = VOIDmode;
2488 rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
2489 rtx tem;
2490
2491 if (alpha_compare.fp_p && GET_MODE (op0) == TFmode)
2492 {
2493 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2494 op1 = const0_rtx;
2495 alpha_compare.fp_p = 0;
2496 }
2497
2498 /* The general case: fold the comparison code to the types of compares
2499 that we have, choosing the branch as necessary. */
2500 switch (code)
2501 {
2502 case EQ: case LE: case LT: case LEU: case LTU:
2503 case UNORDERED:
2504 /* We have these compares: */
2505 cmp_code = code, branch_code = NE;
2506 break;
2507
2508 case NE:
2509 case ORDERED:
2510 /* These must be reversed. */
2511 cmp_code = reverse_condition (code), branch_code = EQ;
2512 break;
2513
2514 case GE: case GT: case GEU: case GTU:
2515 /* For FP, we swap them, for INT, we reverse them. */
2516 if (alpha_compare.fp_p)
2517 {
2518 cmp_code = swap_condition (code);
2519 branch_code = NE;
2520 tem = op0, op0 = op1, op1 = tem;
2521 }
2522 else
2523 {
2524 cmp_code = reverse_condition (code);
2525 branch_code = EQ;
2526 }
2527 break;
2528
2529 default:
2530 gcc_unreachable ();
2531 }
2532
2533 if (alpha_compare.fp_p)
2534 {
2535 cmp_mode = DFmode;
2536 if (flag_unsafe_math_optimizations)
2537 {
2538 /* When we are not as concerned about non-finite values, and we
2539 are comparing against zero, we can branch directly. */
2540 if (op1 == CONST0_RTX (DFmode))
2541 cmp_code = UNKNOWN, branch_code = code;
2542 else if (op0 == CONST0_RTX (DFmode))
2543 {
2544 /* Undo the swap we probably did just above. */
2545 tem = op0, op0 = op1, op1 = tem;
2546 branch_code = swap_condition (cmp_code);
2547 cmp_code = UNKNOWN;
2548 }
2549 }
2550 else
2551 {
2552 /* ??? We mark the branch mode to be CCmode to prevent the
2553 compare and branch from being combined, since the compare
2554 insn follows IEEE rules that the branch does not. */
2555 branch_mode = CCmode;
2556 }
2557 }
2558 else
2559 {
2560 cmp_mode = DImode;
2561
2562 /* The following optimizations are only for signed compares. */
2563 if (code != LEU && code != LTU && code != GEU && code != GTU)
2564 {
2565 /* Whee. Compare and branch against 0 directly. */
2566 if (op1 == const0_rtx)
2567 cmp_code = UNKNOWN, branch_code = code;
2568
2569 /* If the constants doesn't fit into an immediate, but can
2570 be generated by lda/ldah, we adjust the argument and
2571 compare against zero, so we can use beq/bne directly. */
2572 /* ??? Don't do this when comparing against symbols, otherwise
2573 we'll reduce (&x == 0x1234) to (&x-0x1234 == 0), which will
2574 be declared false out of hand (at least for non-weak). */
2575 else if (GET_CODE (op1) == CONST_INT
2576 && (code == EQ || code == NE)
2577 && !(symbolic_operand (op0, VOIDmode)
2578 || (GET_CODE (op0) == REG && REG_POINTER (op0))))
2579 {
2580 HOST_WIDE_INT v = INTVAL (op1), n = -v;
2581
2582 if (! CONST_OK_FOR_LETTER_P (v, 'I')
2583 && (CONST_OK_FOR_LETTER_P (n, 'K')
2584 || CONST_OK_FOR_LETTER_P (n, 'L')))
2585 {
2586 cmp_code = PLUS, branch_code = code;
2587 op1 = GEN_INT (n);
2588 }
2589 }
2590 }
2591
2592 if (!reg_or_0_operand (op0, DImode))
2593 op0 = force_reg (DImode, op0);
2594 if (cmp_code != PLUS && !reg_or_8bit_operand (op1, DImode))
2595 op1 = force_reg (DImode, op1);
2596 }
2597
2598 /* Emit an initial compare instruction, if necessary. */
2599 tem = op0;
2600 if (cmp_code != UNKNOWN)
2601 {
2602 tem = gen_reg_rtx (cmp_mode);
2603 emit_move_insn (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1));
2604 }
2605
2606 /* Zero the operands. */
2607 memset (&alpha_compare, 0, sizeof (alpha_compare));
2608
2609 /* Return the branch comparison. */
2610 return gen_rtx_fmt_ee (branch_code, branch_mode, tem, CONST0_RTX (cmp_mode));
2611 }
2612
2613 /* Certain simplifications can be done to make invalid setcc operations
2614 valid. Return the final comparison, or NULL if we can't work. */
2615
2616 rtx
2617 alpha_emit_setcc (enum rtx_code code)
2618 {
2619 enum rtx_code cmp_code;
2620 rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
2621 int fp_p = alpha_compare.fp_p;
2622 rtx tmp;
2623
2624 /* Zero the operands. */
2625 memset (&alpha_compare, 0, sizeof (alpha_compare));
2626
2627 if (fp_p && GET_MODE (op0) == TFmode)
2628 {
2629 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2630 op1 = const0_rtx;
2631 fp_p = 0;
2632 }
2633
2634 if (fp_p && !TARGET_FIX)
2635 return NULL_RTX;
2636
2637 /* The general case: fold the comparison code to the types of compares
2638 that we have, choosing the branch as necessary. */
2639
2640 cmp_code = UNKNOWN;
2641 switch (code)
2642 {
2643 case EQ: case LE: case LT: case LEU: case LTU:
2644 case UNORDERED:
2645 /* We have these compares. */
2646 if (fp_p)
2647 cmp_code = code, code = NE;
2648 break;
2649
2650 case NE:
2651 if (!fp_p && op1 == const0_rtx)
2652 break;
2653 /* FALLTHRU */
2654
2655 case ORDERED:
2656 cmp_code = reverse_condition (code);
2657 code = EQ;
2658 break;
2659
2660 case GE: case GT: case GEU: case GTU:
2661 /* These normally need swapping, but for integer zero we have
2662 special patterns that recognize swapped operands. */
2663 if (!fp_p && op1 == const0_rtx)
2664 break;
2665 code = swap_condition (code);
2666 if (fp_p)
2667 cmp_code = code, code = NE;
2668 tmp = op0, op0 = op1, op1 = tmp;
2669 break;
2670
2671 default:
2672 gcc_unreachable ();
2673 }
2674
2675 if (!fp_p)
2676 {
2677 if (!register_operand (op0, DImode))
2678 op0 = force_reg (DImode, op0);
2679 if (!reg_or_8bit_operand (op1, DImode))
2680 op1 = force_reg (DImode, op1);
2681 }
2682
2683 /* Emit an initial compare instruction, if necessary. */
2684 if (cmp_code != UNKNOWN)
2685 {
2686 enum machine_mode mode = fp_p ? DFmode : DImode;
2687
2688 tmp = gen_reg_rtx (mode);
2689 emit_insn (gen_rtx_SET (VOIDmode, tmp,
2690 gen_rtx_fmt_ee (cmp_code, mode, op0, op1)));
2691
2692 op0 = fp_p ? gen_lowpart (DImode, tmp) : tmp;
2693 op1 = const0_rtx;
2694 }
2695
2696 /* Return the setcc comparison. */
2697 return gen_rtx_fmt_ee (code, DImode, op0, op1);
2698 }
2699
2700
2701 /* Rewrite a comparison against zero CMP of the form
2702 (CODE (cc0) (const_int 0)) so it can be written validly in
2703 a conditional move (if_then_else CMP ...).
2704 If both of the operands that set cc0 are nonzero we must emit
2705 an insn to perform the compare (it can't be done within
2706 the conditional move). */
2707
2708 rtx
2709 alpha_emit_conditional_move (rtx cmp, enum machine_mode mode)
2710 {
2711 enum rtx_code code = GET_CODE (cmp);
2712 enum rtx_code cmov_code = NE;
2713 rtx op0 = alpha_compare.op0;
2714 rtx op1 = alpha_compare.op1;
2715 int fp_p = alpha_compare.fp_p;
2716 enum machine_mode cmp_mode
2717 = (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0));
2718 enum machine_mode cmp_op_mode = fp_p ? DFmode : DImode;
2719 enum machine_mode cmov_mode = VOIDmode;
2720 int local_fast_math = flag_unsafe_math_optimizations;
2721 rtx tem;
2722
2723 /* Zero the operands. */
2724 memset (&alpha_compare, 0, sizeof (alpha_compare));
2725
2726 if (fp_p != FLOAT_MODE_P (mode))
2727 {
2728 enum rtx_code cmp_code;
2729
2730 if (! TARGET_FIX)
2731 return 0;
2732
2733 /* If we have fp<->int register move instructions, do a cmov by
2734 performing the comparison in fp registers, and move the
2735 zero/nonzero value to integer registers, where we can then
2736 use a normal cmov, or vice-versa. */
2737
2738 switch (code)
2739 {
2740 case EQ: case LE: case LT: case LEU: case LTU:
2741 /* We have these compares. */
2742 cmp_code = code, code = NE;
2743 break;
2744
2745 case NE:
2746 /* This must be reversed. */
2747 cmp_code = EQ, code = EQ;
2748 break;
2749
2750 case GE: case GT: case GEU: case GTU:
2751 /* These normally need swapping, but for integer zero we have
2752 special patterns that recognize swapped operands. */
2753 if (!fp_p && op1 == const0_rtx)
2754 cmp_code = code, code = NE;
2755 else
2756 {
2757 cmp_code = swap_condition (code);
2758 code = NE;
2759 tem = op0, op0 = op1, op1 = tem;
2760 }
2761 break;
2762
2763 default:
2764 gcc_unreachable ();
2765 }
2766
2767 tem = gen_reg_rtx (cmp_op_mode);
2768 emit_insn (gen_rtx_SET (VOIDmode, tem,
2769 gen_rtx_fmt_ee (cmp_code, cmp_op_mode,
2770 op0, op1)));
2771
2772 cmp_mode = cmp_op_mode = fp_p ? DImode : DFmode;
2773 op0 = gen_lowpart (cmp_op_mode, tem);
2774 op1 = CONST0_RTX (cmp_op_mode);
2775 fp_p = !fp_p;
2776 local_fast_math = 1;
2777 }
2778
2779 /* We may be able to use a conditional move directly.
2780 This avoids emitting spurious compares. */
2781 if (signed_comparison_operator (cmp, VOIDmode)
2782 && (!fp_p || local_fast_math)
2783 && (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode)))
2784 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
2785
2786 /* We can't put the comparison inside the conditional move;
2787 emit a compare instruction and put that inside the
2788 conditional move. Make sure we emit only comparisons we have;
2789 swap or reverse as necessary. */
2790
2791 if (no_new_pseudos)
2792 return NULL_RTX;
2793
2794 switch (code)
2795 {
2796 case EQ: case LE: case LT: case LEU: case LTU:
2797 /* We have these compares: */
2798 break;
2799
2800 case NE:
2801 /* This must be reversed. */
2802 code = reverse_condition (code);
2803 cmov_code = EQ;
2804 break;
2805
2806 case GE: case GT: case GEU: case GTU:
2807 /* These must be swapped. */
2808 if (op1 != CONST0_RTX (cmp_mode))
2809 {
2810 code = swap_condition (code);
2811 tem = op0, op0 = op1, op1 = tem;
2812 }
2813 break;
2814
2815 default:
2816 gcc_unreachable ();
2817 }
2818
2819 if (!fp_p)
2820 {
2821 if (!reg_or_0_operand (op0, DImode))
2822 op0 = force_reg (DImode, op0);
2823 if (!reg_or_8bit_operand (op1, DImode))
2824 op1 = force_reg (DImode, op1);
2825 }
2826
2827 /* ??? We mark the branch mode to be CCmode to prevent the compare
2828 and cmov from being combined, since the compare insn follows IEEE
2829 rules that the cmov does not. */
2830 if (fp_p && !local_fast_math)
2831 cmov_mode = CCmode;
2832
2833 tem = gen_reg_rtx (cmp_op_mode);
2834 emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_op_mode, op0, op1));
2835 return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_op_mode));
2836 }
2837
2838 /* Simplify a conditional move of two constants into a setcc with
2839 arithmetic. This is done with a splitter since combine would
2840 just undo the work if done during code generation. It also catches
2841 cases we wouldn't have before cse. */
2842
2843 int
2844 alpha_split_conditional_move (enum rtx_code code, rtx dest, rtx cond,
2845 rtx t_rtx, rtx f_rtx)
2846 {
2847 HOST_WIDE_INT t, f, diff;
2848 enum machine_mode mode;
2849 rtx target, subtarget, tmp;
2850
2851 mode = GET_MODE (dest);
2852 t = INTVAL (t_rtx);
2853 f = INTVAL (f_rtx);
2854 diff = t - f;
2855
2856 if (((code == NE || code == EQ) && diff < 0)
2857 || (code == GE || code == GT))
2858 {
2859 code = reverse_condition (code);
2860 diff = t, t = f, f = diff;
2861 diff = t - f;
2862 }
2863
2864 subtarget = target = dest;
2865 if (mode != DImode)
2866 {
2867 target = gen_lowpart (DImode, dest);
2868 if (! no_new_pseudos)
2869 subtarget = gen_reg_rtx (DImode);
2870 else
2871 subtarget = target;
2872 }
2873 /* Below, we must be careful to use copy_rtx on target and subtarget
2874 in intermediate insns, as they may be a subreg rtx, which may not
2875 be shared. */
2876
2877 if (f == 0 && exact_log2 (diff) > 0
2878 /* On EV6, we've got enough shifters to make non-arithmetic shifts
2879 viable over a longer latency cmove. On EV5, the E0 slot is a
2880 scarce resource, and on EV4 shift has the same latency as a cmove. */
2881 && (diff <= 8 || alpha_tune == PROCESSOR_EV6))
2882 {
2883 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2884 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2885
2886 tmp = gen_rtx_ASHIFT (DImode, copy_rtx (subtarget),
2887 GEN_INT (exact_log2 (t)));
2888 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2889 }
2890 else if (f == 0 && t == -1)
2891 {
2892 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2893 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2894
2895 emit_insn (gen_negdi2 (target, copy_rtx (subtarget)));
2896 }
2897 else if (diff == 1 || diff == 4 || diff == 8)
2898 {
2899 rtx add_op;
2900
2901 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2902 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2903
2904 if (diff == 1)
2905 emit_insn (gen_adddi3 (target, copy_rtx (subtarget), GEN_INT (f)));
2906 else
2907 {
2908 add_op = GEN_INT (f);
2909 if (sext_add_operand (add_op, mode))
2910 {
2911 tmp = gen_rtx_MULT (DImode, copy_rtx (subtarget),
2912 GEN_INT (diff));
2913 tmp = gen_rtx_PLUS (DImode, tmp, add_op);
2914 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2915 }
2916 else
2917 return 0;
2918 }
2919 }
2920 else
2921 return 0;
2922
2923 return 1;
2924 }
2925 \f
2926 /* Look up the function X_floating library function name for the
2927 given operation. */
2928
2929 struct xfloating_op GTY(())
2930 {
2931 const enum rtx_code code;
2932 const char *const GTY((skip)) osf_func;
2933 const char *const GTY((skip)) vms_func;
2934 rtx libcall;
2935 };
2936
2937 static GTY(()) struct xfloating_op xfloating_ops[] =
2938 {
2939 { PLUS, "_OtsAddX", "OTS$ADD_X", 0 },
2940 { MINUS, "_OtsSubX", "OTS$SUB_X", 0 },
2941 { MULT, "_OtsMulX", "OTS$MUL_X", 0 },
2942 { DIV, "_OtsDivX", "OTS$DIV_X", 0 },
2943 { EQ, "_OtsEqlX", "OTS$EQL_X", 0 },
2944 { NE, "_OtsNeqX", "OTS$NEQ_X", 0 },
2945 { LT, "_OtsLssX", "OTS$LSS_X", 0 },
2946 { LE, "_OtsLeqX", "OTS$LEQ_X", 0 },
2947 { GT, "_OtsGtrX", "OTS$GTR_X", 0 },
2948 { GE, "_OtsGeqX", "OTS$GEQ_X", 0 },
2949 { FIX, "_OtsCvtXQ", "OTS$CVTXQ", 0 },
2950 { FLOAT, "_OtsCvtQX", "OTS$CVTQX", 0 },
2951 { UNSIGNED_FLOAT, "_OtsCvtQUX", "OTS$CVTQUX", 0 },
2952 { FLOAT_EXTEND, "_OtsConvertFloatTX", "OTS$CVT_FLOAT_T_X", 0 },
2953 { FLOAT_TRUNCATE, "_OtsConvertFloatXT", "OTS$CVT_FLOAT_X_T", 0 }
2954 };
2955
2956 static GTY(()) struct xfloating_op vax_cvt_ops[] =
2957 {
2958 { FLOAT_EXTEND, "_OtsConvertFloatGX", "OTS$CVT_FLOAT_G_X", 0 },
2959 { FLOAT_TRUNCATE, "_OtsConvertFloatXG", "OTS$CVT_FLOAT_X_G", 0 }
2960 };
2961
2962 static rtx
2963 alpha_lookup_xfloating_lib_func (enum rtx_code code)
2964 {
2965 struct xfloating_op *ops = xfloating_ops;
2966 long n = ARRAY_SIZE (xfloating_ops);
2967 long i;
2968
2969 gcc_assert (TARGET_HAS_XFLOATING_LIBS);
2970
2971 /* How irritating. Nothing to key off for the main table. */
2972 if (TARGET_FLOAT_VAX && (code == FLOAT_EXTEND || code == FLOAT_TRUNCATE))
2973 {
2974 ops = vax_cvt_ops;
2975 n = ARRAY_SIZE (vax_cvt_ops);
2976 }
2977
2978 for (i = 0; i < n; ++i, ++ops)
2979 if (ops->code == code)
2980 {
2981 rtx func = ops->libcall;
2982 if (!func)
2983 {
2984 func = init_one_libfunc (TARGET_ABI_OPEN_VMS
2985 ? ops->vms_func : ops->osf_func);
2986 ops->libcall = func;
2987 }
2988 return func;
2989 }
2990
2991 gcc_unreachable ();
2992 }
2993
2994 /* Most X_floating operations take the rounding mode as an argument.
2995 Compute that here. */
2996
2997 static int
2998 alpha_compute_xfloating_mode_arg (enum rtx_code code,
2999 enum alpha_fp_rounding_mode round)
3000 {
3001 int mode;
3002
3003 switch (round)
3004 {
3005 case ALPHA_FPRM_NORM:
3006 mode = 2;
3007 break;
3008 case ALPHA_FPRM_MINF:
3009 mode = 1;
3010 break;
3011 case ALPHA_FPRM_CHOP:
3012 mode = 0;
3013 break;
3014 case ALPHA_FPRM_DYN:
3015 mode = 4;
3016 break;
3017 default:
3018 gcc_unreachable ();
3019
3020 /* XXX For reference, round to +inf is mode = 3. */
3021 }
3022
3023 if (code == FLOAT_TRUNCATE && alpha_fptm == ALPHA_FPTM_N)
3024 mode |= 0x10000;
3025
3026 return mode;
3027 }
3028
3029 /* Emit an X_floating library function call.
3030
3031 Note that these functions do not follow normal calling conventions:
3032 TFmode arguments are passed in two integer registers (as opposed to
3033 indirect); TFmode return values appear in R16+R17.
3034
3035 FUNC is the function to call.
3036 TARGET is where the output belongs.
3037 OPERANDS are the inputs.
3038 NOPERANDS is the count of inputs.
3039 EQUIV is the expression equivalent for the function.
3040 */
3041
3042 static void
3043 alpha_emit_xfloating_libcall (rtx func, rtx target, rtx operands[],
3044 int noperands, rtx equiv)
3045 {
3046 rtx usage = NULL_RTX, tmp, reg;
3047 int regno = 16, i;
3048
3049 start_sequence ();
3050
3051 for (i = 0; i < noperands; ++i)
3052 {
3053 switch (GET_MODE (operands[i]))
3054 {
3055 case TFmode:
3056 reg = gen_rtx_REG (TFmode, regno);
3057 regno += 2;
3058 break;
3059
3060 case DFmode:
3061 reg = gen_rtx_REG (DFmode, regno + 32);
3062 regno += 1;
3063 break;
3064
3065 case VOIDmode:
3066 gcc_assert (GET_CODE (operands[i]) == CONST_INT);
3067 /* FALLTHRU */
3068 case DImode:
3069 reg = gen_rtx_REG (DImode, regno);
3070 regno += 1;
3071 break;
3072
3073 default:
3074 gcc_unreachable ();
3075 }
3076
3077 emit_move_insn (reg, operands[i]);
3078 usage = alloc_EXPR_LIST (0, gen_rtx_USE (VOIDmode, reg), usage);
3079 }
3080
3081 switch (GET_MODE (target))
3082 {
3083 case TFmode:
3084 reg = gen_rtx_REG (TFmode, 16);
3085 break;
3086 case DFmode:
3087 reg = gen_rtx_REG (DFmode, 32);
3088 break;
3089 case DImode:
3090 reg = gen_rtx_REG (DImode, 0);
3091 break;
3092 default:
3093 gcc_unreachable ();
3094 }
3095
3096 tmp = gen_rtx_MEM (QImode, func);
3097 tmp = emit_call_insn (GEN_CALL_VALUE (reg, tmp, const0_rtx,
3098 const0_rtx, const0_rtx));
3099 CALL_INSN_FUNCTION_USAGE (tmp) = usage;
3100 CONST_OR_PURE_CALL_P (tmp) = 1;
3101
3102 tmp = get_insns ();
3103 end_sequence ();
3104
3105 emit_libcall_block (tmp, target, reg, equiv);
3106 }
3107
3108 /* Emit an X_floating library function call for arithmetic (+,-,*,/). */
3109
3110 void
3111 alpha_emit_xfloating_arith (enum rtx_code code, rtx operands[])
3112 {
3113 rtx func;
3114 int mode;
3115 rtx out_operands[3];
3116
3117 func = alpha_lookup_xfloating_lib_func (code);
3118 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3119
3120 out_operands[0] = operands[1];
3121 out_operands[1] = operands[2];
3122 out_operands[2] = GEN_INT (mode);
3123 alpha_emit_xfloating_libcall (func, operands[0], out_operands, 3,
3124 gen_rtx_fmt_ee (code, TFmode, operands[1],
3125 operands[2]));
3126 }
3127
3128 /* Emit an X_floating library function call for a comparison. */
3129
3130 static rtx
3131 alpha_emit_xfloating_compare (enum rtx_code *pcode, rtx op0, rtx op1)
3132 {
3133 enum rtx_code cmp_code, res_code;
3134 rtx func, out, operands[2];
3135
3136 /* X_floating library comparison functions return
3137 -1 unordered
3138 0 false
3139 1 true
3140 Convert the compare against the raw return value. */
3141
3142 cmp_code = *pcode;
3143 switch (cmp_code)
3144 {
3145 case UNORDERED:
3146 cmp_code = EQ;
3147 res_code = LT;
3148 break;
3149 case ORDERED:
3150 cmp_code = EQ;
3151 res_code = GE;
3152 break;
3153 case NE:
3154 res_code = NE;
3155 break;
3156 case EQ:
3157 case LT:
3158 case GT:
3159 case LE:
3160 case GE:
3161 res_code = GT;
3162 break;
3163 default:
3164 gcc_unreachable ();
3165 }
3166 *pcode = res_code;
3167
3168 func = alpha_lookup_xfloating_lib_func (cmp_code);
3169
3170 operands[0] = op0;
3171 operands[1] = op1;
3172 out = gen_reg_rtx (DImode);
3173
3174 /* ??? Strange mode for equiv because what's actually returned
3175 is -1,0,1, not a proper boolean value. */
3176 alpha_emit_xfloating_libcall (func, out, operands, 2,
3177 gen_rtx_fmt_ee (cmp_code, CCmode, op0, op1));
3178
3179 return out;
3180 }
3181
3182 /* Emit an X_floating library function call for a conversion. */
3183
3184 void
3185 alpha_emit_xfloating_cvt (enum rtx_code orig_code, rtx operands[])
3186 {
3187 int noperands = 1, mode;
3188 rtx out_operands[2];
3189 rtx func;
3190 enum rtx_code code = orig_code;
3191
3192 if (code == UNSIGNED_FIX)
3193 code = FIX;
3194
3195 func = alpha_lookup_xfloating_lib_func (code);
3196
3197 out_operands[0] = operands[1];
3198
3199 switch (code)
3200 {
3201 case FIX:
3202 mode = alpha_compute_xfloating_mode_arg (code, ALPHA_FPRM_CHOP);
3203 out_operands[1] = GEN_INT (mode);
3204 noperands = 2;
3205 break;
3206 case FLOAT_TRUNCATE:
3207 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3208 out_operands[1] = GEN_INT (mode);
3209 noperands = 2;
3210 break;
3211 default:
3212 break;
3213 }
3214
3215 alpha_emit_xfloating_libcall (func, operands[0], out_operands, noperands,
3216 gen_rtx_fmt_e (orig_code,
3217 GET_MODE (operands[0]),
3218 operands[1]));
3219 }
3220
3221 /* Split a TImode or TFmode move from OP[1] to OP[0] into a pair of
3222 DImode moves from OP[2,3] to OP[0,1]. If FIXUP_OVERLAP is true,
3223 guarantee that the sequence
3224 set (OP[0] OP[2])
3225 set (OP[1] OP[3])
3226 is valid. Naturally, output operand ordering is little-endian.
3227 This is used by *movtf_internal and *movti_internal. */
3228
3229 void
3230 alpha_split_tmode_pair (rtx operands[4], enum machine_mode mode,
3231 bool fixup_overlap)
3232 {
3233 switch (GET_CODE (operands[1]))
3234 {
3235 case REG:
3236 operands[3] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
3237 operands[2] = gen_rtx_REG (DImode, REGNO (operands[1]));
3238 break;
3239
3240 case MEM:
3241 operands[3] = adjust_address (operands[1], DImode, 8);
3242 operands[2] = adjust_address (operands[1], DImode, 0);
3243 break;
3244
3245 case CONST_INT:
3246 case CONST_DOUBLE:
3247 gcc_assert (operands[1] == CONST0_RTX (mode));
3248 operands[2] = operands[3] = const0_rtx;
3249 break;
3250
3251 default:
3252 gcc_unreachable ();
3253 }
3254
3255 switch (GET_CODE (operands[0]))
3256 {
3257 case REG:
3258 operands[1] = gen_rtx_REG (DImode, REGNO (operands[0]) + 1);
3259 operands[0] = gen_rtx_REG (DImode, REGNO (operands[0]));
3260 break;
3261
3262 case MEM:
3263 operands[1] = adjust_address (operands[0], DImode, 8);
3264 operands[0] = adjust_address (operands[0], DImode, 0);
3265 break;
3266
3267 default:
3268 gcc_unreachable ();
3269 }
3270
3271 if (fixup_overlap && reg_overlap_mentioned_p (operands[0], operands[3]))
3272 {
3273 rtx tmp;
3274 tmp = operands[0], operands[0] = operands[1], operands[1] = tmp;
3275 tmp = operands[2], operands[2] = operands[3], operands[3] = tmp;
3276 }
3277 }
3278
3279 /* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
3280 op2 is a register containing the sign bit, operation is the
3281 logical operation to be performed. */
3282
3283 void
3284 alpha_split_tfmode_frobsign (rtx operands[3], rtx (*operation) (rtx, rtx, rtx))
3285 {
3286 rtx high_bit = operands[2];
3287 rtx scratch;
3288 int move;
3289
3290 alpha_split_tmode_pair (operands, TFmode, false);
3291
3292 /* Detect three flavors of operand overlap. */
3293 move = 1;
3294 if (rtx_equal_p (operands[0], operands[2]))
3295 move = 0;
3296 else if (rtx_equal_p (operands[1], operands[2]))
3297 {
3298 if (rtx_equal_p (operands[0], high_bit))
3299 move = 2;
3300 else
3301 move = -1;
3302 }
3303
3304 if (move < 0)
3305 emit_move_insn (operands[0], operands[2]);
3306
3307 /* ??? If the destination overlaps both source tf and high_bit, then
3308 assume source tf is dead in its entirety and use the other half
3309 for a scratch register. Otherwise "scratch" is just the proper
3310 destination register. */
3311 scratch = operands[move < 2 ? 1 : 3];
3312
3313 emit_insn ((*operation) (scratch, high_bit, operands[3]));
3314
3315 if (move > 0)
3316 {
3317 emit_move_insn (operands[0], operands[2]);
3318 if (move > 1)
3319 emit_move_insn (operands[1], scratch);
3320 }
3321 }
3322 \f
3323 /* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
3324 unaligned data:
3325
3326 unsigned: signed:
3327 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
3328 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
3329 lda r3,X(r11) lda r3,X+2(r11)
3330 extwl r1,r3,r1 extql r1,r3,r1
3331 extwh r2,r3,r2 extqh r2,r3,r2
3332 or r1.r2.r1 or r1,r2,r1
3333 sra r1,48,r1
3334
3335 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
3336 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
3337 lda r3,X(r11) lda r3,X(r11)
3338 extll r1,r3,r1 extll r1,r3,r1
3339 extlh r2,r3,r2 extlh r2,r3,r2
3340 or r1.r2.r1 addl r1,r2,r1
3341
3342 quad: ldq_u r1,X(r11)
3343 ldq_u r2,X+7(r11)
3344 lda r3,X(r11)
3345 extql r1,r3,r1
3346 extqh r2,r3,r2
3347 or r1.r2.r1
3348 */
3349
3350 void
3351 alpha_expand_unaligned_load (rtx tgt, rtx mem, HOST_WIDE_INT size,
3352 HOST_WIDE_INT ofs, int sign)
3353 {
3354 rtx meml, memh, addr, extl, exth, tmp, mema;
3355 enum machine_mode mode;
3356
3357 if (TARGET_BWX && size == 2)
3358 {
3359 meml = adjust_address (mem, QImode, ofs);
3360 memh = adjust_address (mem, QImode, ofs+1);
3361 if (BYTES_BIG_ENDIAN)
3362 tmp = meml, meml = memh, memh = tmp;
3363 extl = gen_reg_rtx (DImode);
3364 exth = gen_reg_rtx (DImode);
3365 emit_insn (gen_zero_extendqidi2 (extl, meml));
3366 emit_insn (gen_zero_extendqidi2 (exth, memh));
3367 exth = expand_simple_binop (DImode, ASHIFT, exth, GEN_INT (8),
3368 NULL, 1, OPTAB_LIB_WIDEN);
3369 addr = expand_simple_binop (DImode, IOR, extl, exth,
3370 NULL, 1, OPTAB_LIB_WIDEN);
3371
3372 if (sign && GET_MODE (tgt) != HImode)
3373 {
3374 addr = gen_lowpart (HImode, addr);
3375 emit_insn (gen_extend_insn (tgt, addr, GET_MODE (tgt), HImode, 0));
3376 }
3377 else
3378 {
3379 if (GET_MODE (tgt) != DImode)
3380 addr = gen_lowpart (GET_MODE (tgt), addr);
3381 emit_move_insn (tgt, addr);
3382 }
3383 return;
3384 }
3385
3386 meml = gen_reg_rtx (DImode);
3387 memh = gen_reg_rtx (DImode);
3388 addr = gen_reg_rtx (DImode);
3389 extl = gen_reg_rtx (DImode);
3390 exth = gen_reg_rtx (DImode);
3391
3392 mema = XEXP (mem, 0);
3393 if (GET_CODE (mema) == LO_SUM)
3394 mema = force_reg (Pmode, mema);
3395
3396 /* AND addresses cannot be in any alias set, since they may implicitly
3397 alias surrounding code. Ideally we'd have some alias set that
3398 covered all types except those with alignment 8 or higher. */
3399
3400 tmp = change_address (mem, DImode,
3401 gen_rtx_AND (DImode,
3402 plus_constant (mema, ofs),
3403 GEN_INT (-8)));
3404 set_mem_alias_set (tmp, 0);
3405 emit_move_insn (meml, tmp);
3406
3407 tmp = change_address (mem, DImode,
3408 gen_rtx_AND (DImode,
3409 plus_constant (mema, ofs + size - 1),
3410 GEN_INT (-8)));
3411 set_mem_alias_set (tmp, 0);
3412 emit_move_insn (memh, tmp);
3413
3414 if (WORDS_BIG_ENDIAN && sign && (size == 2 || size == 4))
3415 {
3416 emit_move_insn (addr, plus_constant (mema, -1));
3417
3418 emit_insn (gen_extqh_be (extl, meml, addr));
3419 emit_insn (gen_extxl_be (exth, memh, GEN_INT (64), addr));
3420
3421 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3422 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (64 - size*8),
3423 addr, 1, OPTAB_WIDEN);
3424 }
3425 else if (sign && size == 2)
3426 {
3427 emit_move_insn (addr, plus_constant (mema, ofs+2));
3428
3429 emit_insn (gen_extxl_le (extl, meml, GEN_INT (64), addr));
3430 emit_insn (gen_extqh_le (exth, memh, addr));
3431
3432 /* We must use tgt here for the target. Alpha-vms port fails if we use
3433 addr for the target, because addr is marked as a pointer and combine
3434 knows that pointers are always sign-extended 32 bit values. */
3435 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3436 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48),
3437 addr, 1, OPTAB_WIDEN);
3438 }
3439 else
3440 {
3441 if (WORDS_BIG_ENDIAN)
3442 {
3443 emit_move_insn (addr, plus_constant (mema, ofs+size-1));
3444 switch ((int) size)
3445 {
3446 case 2:
3447 emit_insn (gen_extwh_be (extl, meml, addr));
3448 mode = HImode;
3449 break;
3450
3451 case 4:
3452 emit_insn (gen_extlh_be (extl, meml, addr));
3453 mode = SImode;
3454 break;
3455
3456 case 8:
3457 emit_insn (gen_extqh_be (extl, meml, addr));
3458 mode = DImode;
3459 break;
3460
3461 default:
3462 gcc_unreachable ();
3463 }
3464 emit_insn (gen_extxl_be (exth, memh, GEN_INT (size*8), addr));
3465 }
3466 else
3467 {
3468 emit_move_insn (addr, plus_constant (mema, ofs));
3469 emit_insn (gen_extxl_le (extl, meml, GEN_INT (size*8), addr));
3470 switch ((int) size)
3471 {
3472 case 2:
3473 emit_insn (gen_extwh_le (exth, memh, addr));
3474 mode = HImode;
3475 break;
3476
3477 case 4:
3478 emit_insn (gen_extlh_le (exth, memh, addr));
3479 mode = SImode;
3480 break;
3481
3482 case 8:
3483 emit_insn (gen_extqh_le (exth, memh, addr));
3484 mode = DImode;
3485 break;
3486
3487 default:
3488 gcc_unreachable ();
3489 }
3490 }
3491
3492 addr = expand_binop (mode, ior_optab, gen_lowpart (mode, extl),
3493 gen_lowpart (mode, exth), gen_lowpart (mode, tgt),
3494 sign, OPTAB_WIDEN);
3495 }
3496
3497 if (addr != tgt)
3498 emit_move_insn (tgt, gen_lowpart (GET_MODE (tgt), addr));
3499 }
3500
3501 /* Similarly, use ins and msk instructions to perform unaligned stores. */
3502
3503 void
3504 alpha_expand_unaligned_store (rtx dst, rtx src,
3505 HOST_WIDE_INT size, HOST_WIDE_INT ofs)
3506 {
3507 rtx dstl, dsth, addr, insl, insh, meml, memh, dsta;
3508
3509 if (TARGET_BWX && size == 2)
3510 {
3511 if (src != const0_rtx)
3512 {
3513 dstl = gen_lowpart (QImode, src);
3514 dsth = expand_simple_binop (DImode, LSHIFTRT, src, GEN_INT (8),
3515 NULL, 1, OPTAB_LIB_WIDEN);
3516 dsth = gen_lowpart (QImode, dsth);
3517 }
3518 else
3519 dstl = dsth = const0_rtx;
3520
3521 meml = adjust_address (dst, QImode, ofs);
3522 memh = adjust_address (dst, QImode, ofs+1);
3523 if (BYTES_BIG_ENDIAN)
3524 addr = meml, meml = memh, memh = addr;
3525
3526 emit_move_insn (meml, dstl);
3527 emit_move_insn (memh, dsth);
3528 return;
3529 }
3530
3531 dstl = gen_reg_rtx (DImode);
3532 dsth = gen_reg_rtx (DImode);
3533 insl = gen_reg_rtx (DImode);
3534 insh = gen_reg_rtx (DImode);
3535
3536 dsta = XEXP (dst, 0);
3537 if (GET_CODE (dsta) == LO_SUM)
3538 dsta = force_reg (Pmode, dsta);
3539
3540 /* AND addresses cannot be in any alias set, since they may implicitly
3541 alias surrounding code. Ideally we'd have some alias set that
3542 covered all types except those with alignment 8 or higher. */
3543
3544 meml = change_address (dst, DImode,
3545 gen_rtx_AND (DImode,
3546 plus_constant (dsta, ofs),
3547 GEN_INT (-8)));
3548 set_mem_alias_set (meml, 0);
3549
3550 memh = change_address (dst, DImode,
3551 gen_rtx_AND (DImode,
3552 plus_constant (dsta, ofs + size - 1),
3553 GEN_INT (-8)));
3554 set_mem_alias_set (memh, 0);
3555
3556 emit_move_insn (dsth, memh);
3557 emit_move_insn (dstl, meml);
3558 if (WORDS_BIG_ENDIAN)
3559 {
3560 addr = copy_addr_to_reg (plus_constant (dsta, ofs+size-1));
3561
3562 if (src != const0_rtx)
3563 {
3564 switch ((int) size)
3565 {
3566 case 2:
3567 emit_insn (gen_inswl_be (insh, gen_lowpart (HImode,src), addr));
3568 break;
3569 case 4:
3570 emit_insn (gen_insll_be (insh, gen_lowpart (SImode,src), addr));
3571 break;
3572 case 8:
3573 emit_insn (gen_insql_be (insh, gen_lowpart (DImode,src), addr));
3574 break;
3575 }
3576 emit_insn (gen_insxh (insl, gen_lowpart (DImode, src),
3577 GEN_INT (size*8), addr));
3578 }
3579
3580 switch ((int) size)
3581 {
3582 case 2:
3583 emit_insn (gen_mskxl_be (dsth, dsth, GEN_INT (0xffff), addr));
3584 break;
3585 case 4:
3586 {
3587 rtx msk = immed_double_const (0xffffffff, 0, DImode);
3588 emit_insn (gen_mskxl_be (dsth, dsth, msk, addr));
3589 break;
3590 }
3591 case 8:
3592 emit_insn (gen_mskxl_be (dsth, dsth, constm1_rtx, addr));
3593 break;
3594 }
3595
3596 emit_insn (gen_mskxh (dstl, dstl, GEN_INT (size*8), addr));
3597 }
3598 else
3599 {
3600 addr = copy_addr_to_reg (plus_constant (dsta, ofs));
3601
3602 if (src != CONST0_RTX (GET_MODE (src)))
3603 {
3604 emit_insn (gen_insxh (insh, gen_lowpart (DImode, src),
3605 GEN_INT (size*8), addr));
3606
3607 switch ((int) size)
3608 {
3609 case 2:
3610 emit_insn (gen_inswl_le (insl, gen_lowpart (HImode, src), addr));
3611 break;
3612 case 4:
3613 emit_insn (gen_insll_le (insl, gen_lowpart (SImode, src), addr));
3614 break;
3615 case 8:
3616 emit_insn (gen_insql_le (insl, src, addr));
3617 break;
3618 }
3619 }
3620
3621 emit_insn (gen_mskxh (dsth, dsth, GEN_INT (size*8), addr));
3622
3623 switch ((int) size)
3624 {
3625 case 2:
3626 emit_insn (gen_mskxl_le (dstl, dstl, GEN_INT (0xffff), addr));
3627 break;
3628 case 4:
3629 {
3630 rtx msk = immed_double_const (0xffffffff, 0, DImode);
3631 emit_insn (gen_mskxl_le (dstl, dstl, msk, addr));
3632 break;
3633 }
3634 case 8:
3635 emit_insn (gen_mskxl_le (dstl, dstl, constm1_rtx, addr));
3636 break;
3637 }
3638 }
3639
3640 if (src != CONST0_RTX (GET_MODE (src)))
3641 {
3642 dsth = expand_binop (DImode, ior_optab, insh, dsth, dsth, 0, OPTAB_WIDEN);
3643 dstl = expand_binop (DImode, ior_optab, insl, dstl, dstl, 0, OPTAB_WIDEN);
3644 }
3645
3646 if (WORDS_BIG_ENDIAN)
3647 {
3648 emit_move_insn (meml, dstl);
3649 emit_move_insn (memh, dsth);
3650 }
3651 else
3652 {
3653 /* Must store high before low for degenerate case of aligned. */
3654 emit_move_insn (memh, dsth);
3655 emit_move_insn (meml, dstl);
3656 }
3657 }
3658
3659 /* The block move code tries to maximize speed by separating loads and
3660 stores at the expense of register pressure: we load all of the data
3661 before we store it back out. There are two secondary effects worth
3662 mentioning, that this speeds copying to/from aligned and unaligned
3663 buffers, and that it makes the code significantly easier to write. */
3664
3665 #define MAX_MOVE_WORDS 8
3666
3667 /* Load an integral number of consecutive unaligned quadwords. */
3668
3669 static void
3670 alpha_expand_unaligned_load_words (rtx *out_regs, rtx smem,
3671 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3672 {
3673 rtx const im8 = GEN_INT (-8);
3674 rtx const i64 = GEN_INT (64);
3675 rtx ext_tmps[MAX_MOVE_WORDS], data_regs[MAX_MOVE_WORDS+1];
3676 rtx sreg, areg, tmp, smema;
3677 HOST_WIDE_INT i;
3678
3679 smema = XEXP (smem, 0);
3680 if (GET_CODE (smema) == LO_SUM)
3681 smema = force_reg (Pmode, smema);
3682
3683 /* Generate all the tmp registers we need. */
3684 for (i = 0; i < words; ++i)
3685 {
3686 data_regs[i] = out_regs[i];
3687 ext_tmps[i] = gen_reg_rtx (DImode);
3688 }
3689 data_regs[words] = gen_reg_rtx (DImode);
3690
3691 if (ofs != 0)
3692 smem = adjust_address (smem, GET_MODE (smem), ofs);
3693
3694 /* Load up all of the source data. */
3695 for (i = 0; i < words; ++i)
3696 {
3697 tmp = change_address (smem, DImode,
3698 gen_rtx_AND (DImode,
3699 plus_constant (smema, 8*i),
3700 im8));
3701 set_mem_alias_set (tmp, 0);
3702 emit_move_insn (data_regs[i], tmp);
3703 }
3704
3705 tmp = change_address (smem, DImode,
3706 gen_rtx_AND (DImode,
3707 plus_constant (smema, 8*words - 1),
3708 im8));
3709 set_mem_alias_set (tmp, 0);
3710 emit_move_insn (data_regs[words], tmp);
3711
3712 /* Extract the half-word fragments. Unfortunately DEC decided to make
3713 extxh with offset zero a noop instead of zeroing the register, so
3714 we must take care of that edge condition ourselves with cmov. */
3715
3716 sreg = copy_addr_to_reg (smema);
3717 areg = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL,
3718 1, OPTAB_WIDEN);
3719 if (WORDS_BIG_ENDIAN)
3720 emit_move_insn (sreg, plus_constant (sreg, 7));
3721 for (i = 0; i < words; ++i)
3722 {
3723 if (WORDS_BIG_ENDIAN)
3724 {
3725 emit_insn (gen_extqh_be (data_regs[i], data_regs[i], sreg));
3726 emit_insn (gen_extxl_be (ext_tmps[i], data_regs[i+1], i64, sreg));
3727 }
3728 else
3729 {
3730 emit_insn (gen_extxl_le (data_regs[i], data_regs[i], i64, sreg));
3731 emit_insn (gen_extqh_le (ext_tmps[i], data_regs[i+1], sreg));
3732 }
3733 emit_insn (gen_rtx_SET (VOIDmode, ext_tmps[i],
3734 gen_rtx_IF_THEN_ELSE (DImode,
3735 gen_rtx_EQ (DImode, areg,
3736 const0_rtx),
3737 const0_rtx, ext_tmps[i])));
3738 }
3739
3740 /* Merge the half-words into whole words. */
3741 for (i = 0; i < words; ++i)
3742 {
3743 out_regs[i] = expand_binop (DImode, ior_optab, data_regs[i],
3744 ext_tmps[i], data_regs[i], 1, OPTAB_WIDEN);
3745 }
3746 }
3747
3748 /* Store an integral number of consecutive unaligned quadwords. DATA_REGS
3749 may be NULL to store zeros. */
3750
3751 static void
3752 alpha_expand_unaligned_store_words (rtx *data_regs, rtx dmem,
3753 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3754 {
3755 rtx const im8 = GEN_INT (-8);
3756 rtx const i64 = GEN_INT (64);
3757 rtx ins_tmps[MAX_MOVE_WORDS];
3758 rtx st_tmp_1, st_tmp_2, dreg;
3759 rtx st_addr_1, st_addr_2, dmema;
3760 HOST_WIDE_INT i;
3761
3762 dmema = XEXP (dmem, 0);
3763 if (GET_CODE (dmema) == LO_SUM)
3764 dmema = force_reg (Pmode, dmema);
3765
3766 /* Generate all the tmp registers we need. */
3767 if (data_regs != NULL)
3768 for (i = 0; i < words; ++i)
3769 ins_tmps[i] = gen_reg_rtx(DImode);
3770 st_tmp_1 = gen_reg_rtx(DImode);
3771 st_tmp_2 = gen_reg_rtx(DImode);
3772
3773 if (ofs != 0)
3774 dmem = adjust_address (dmem, GET_MODE (dmem), ofs);
3775
3776 st_addr_2 = change_address (dmem, DImode,
3777 gen_rtx_AND (DImode,
3778 plus_constant (dmema, words*8 - 1),
3779 im8));
3780 set_mem_alias_set (st_addr_2, 0);
3781
3782 st_addr_1 = change_address (dmem, DImode,
3783 gen_rtx_AND (DImode, dmema, im8));
3784 set_mem_alias_set (st_addr_1, 0);
3785
3786 /* Load up the destination end bits. */
3787 emit_move_insn (st_tmp_2, st_addr_2);
3788 emit_move_insn (st_tmp_1, st_addr_1);
3789
3790 /* Shift the input data into place. */
3791 dreg = copy_addr_to_reg (dmema);
3792 if (WORDS_BIG_ENDIAN)
3793 emit_move_insn (dreg, plus_constant (dreg, 7));
3794 if (data_regs != NULL)
3795 {
3796 for (i = words-1; i >= 0; --i)
3797 {
3798 if (WORDS_BIG_ENDIAN)
3799 {
3800 emit_insn (gen_insql_be (ins_tmps[i], data_regs[i], dreg));
3801 emit_insn (gen_insxh (data_regs[i], data_regs[i], i64, dreg));
3802 }
3803 else
3804 {
3805 emit_insn (gen_insxh (ins_tmps[i], data_regs[i], i64, dreg));
3806 emit_insn (gen_insql_le (data_regs[i], data_regs[i], dreg));
3807 }
3808 }
3809 for (i = words-1; i > 0; --i)
3810 {
3811 ins_tmps[i-1] = expand_binop (DImode, ior_optab, data_regs[i],
3812 ins_tmps[i-1], ins_tmps[i-1], 1,
3813 OPTAB_WIDEN);
3814 }
3815 }
3816
3817 /* Split and merge the ends with the destination data. */
3818 if (WORDS_BIG_ENDIAN)
3819 {
3820 emit_insn (gen_mskxl_be (st_tmp_2, st_tmp_2, constm1_rtx, dreg));
3821 emit_insn (gen_mskxh (st_tmp_1, st_tmp_1, i64, dreg));
3822 }
3823 else
3824 {
3825 emit_insn (gen_mskxh (st_tmp_2, st_tmp_2, i64, dreg));
3826 emit_insn (gen_mskxl_le (st_tmp_1, st_tmp_1, constm1_rtx, dreg));
3827 }
3828
3829 if (data_regs != NULL)
3830 {
3831 st_tmp_2 = expand_binop (DImode, ior_optab, st_tmp_2, ins_tmps[words-1],
3832 st_tmp_2, 1, OPTAB_WIDEN);
3833 st_tmp_1 = expand_binop (DImode, ior_optab, st_tmp_1, data_regs[0],
3834 st_tmp_1, 1, OPTAB_WIDEN);
3835 }
3836
3837 /* Store it all. */
3838 if (WORDS_BIG_ENDIAN)
3839 emit_move_insn (st_addr_1, st_tmp_1);
3840 else
3841 emit_move_insn (st_addr_2, st_tmp_2);
3842 for (i = words-1; i > 0; --i)
3843 {
3844 rtx tmp = change_address (dmem, DImode,
3845 gen_rtx_AND (DImode,
3846 plus_constant(dmema,
3847 WORDS_BIG_ENDIAN ? i*8-1 : i*8),
3848 im8));
3849 set_mem_alias_set (tmp, 0);
3850 emit_move_insn (tmp, data_regs ? ins_tmps[i-1] : const0_rtx);
3851 }
3852 if (WORDS_BIG_ENDIAN)
3853 emit_move_insn (st_addr_2, st_tmp_2);
3854 else
3855 emit_move_insn (st_addr_1, st_tmp_1);
3856 }
3857
3858
3859 /* Expand string/block move operations.
3860
3861 operands[0] is the pointer to the destination.
3862 operands[1] is the pointer to the source.
3863 operands[2] is the number of bytes to move.
3864 operands[3] is the alignment. */
3865
3866 int
3867 alpha_expand_block_move (rtx operands[])
3868 {
3869 rtx bytes_rtx = operands[2];
3870 rtx align_rtx = operands[3];
3871 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
3872 HOST_WIDE_INT bytes = orig_bytes;
3873 HOST_WIDE_INT src_align = INTVAL (align_rtx) * BITS_PER_UNIT;
3874 HOST_WIDE_INT dst_align = src_align;
3875 rtx orig_src = operands[1];
3876 rtx orig_dst = operands[0];
3877 rtx data_regs[2 * MAX_MOVE_WORDS + 16];
3878 rtx tmp;
3879 unsigned int i, words, ofs, nregs = 0;
3880
3881 if (orig_bytes <= 0)
3882 return 1;
3883 else if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
3884 return 0;
3885
3886 /* Look for additional alignment information from recorded register info. */
3887
3888 tmp = XEXP (orig_src, 0);
3889 if (GET_CODE (tmp) == REG)
3890 src_align = MAX (src_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3891 else if (GET_CODE (tmp) == PLUS
3892 && GET_CODE (XEXP (tmp, 0)) == REG
3893 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
3894 {
3895 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3896 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3897
3898 if (a > src_align)
3899 {
3900 if (a >= 64 && c % 8 == 0)
3901 src_align = 64;
3902 else if (a >= 32 && c % 4 == 0)
3903 src_align = 32;
3904 else if (a >= 16 && c % 2 == 0)
3905 src_align = 16;
3906 }
3907 }
3908
3909 tmp = XEXP (orig_dst, 0);
3910 if (GET_CODE (tmp) == REG)
3911 dst_align = MAX (dst_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3912 else if (GET_CODE (tmp) == PLUS
3913 && GET_CODE (XEXP (tmp, 0)) == REG
3914 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
3915 {
3916 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3917 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3918
3919 if (a > dst_align)
3920 {
3921 if (a >= 64 && c % 8 == 0)
3922 dst_align = 64;
3923 else if (a >= 32 && c % 4 == 0)
3924 dst_align = 32;
3925 else if (a >= 16 && c % 2 == 0)
3926 dst_align = 16;
3927 }
3928 }
3929
3930 ofs = 0;
3931 if (src_align >= 64 && bytes >= 8)
3932 {
3933 words = bytes / 8;
3934
3935 for (i = 0; i < words; ++i)
3936 data_regs[nregs + i] = gen_reg_rtx (DImode);
3937
3938 for (i = 0; i < words; ++i)
3939 emit_move_insn (data_regs[nregs + i],
3940 adjust_address (orig_src, DImode, ofs + i * 8));
3941
3942 nregs += words;
3943 bytes -= words * 8;
3944 ofs += words * 8;
3945 }
3946
3947 if (src_align >= 32 && bytes >= 4)
3948 {
3949 words = bytes / 4;
3950
3951 for (i = 0; i < words; ++i)
3952 data_regs[nregs + i] = gen_reg_rtx (SImode);
3953
3954 for (i = 0; i < words; ++i)
3955 emit_move_insn (data_regs[nregs + i],
3956 adjust_address (orig_src, SImode, ofs + i * 4));
3957
3958 nregs += words;
3959 bytes -= words * 4;
3960 ofs += words * 4;
3961 }
3962
3963 if (bytes >= 8)
3964 {
3965 words = bytes / 8;
3966
3967 for (i = 0; i < words+1; ++i)
3968 data_regs[nregs + i] = gen_reg_rtx (DImode);
3969
3970 alpha_expand_unaligned_load_words (data_regs + nregs, orig_src,
3971 words, ofs);
3972
3973 nregs += words;
3974 bytes -= words * 8;
3975 ofs += words * 8;
3976 }
3977
3978 if (! TARGET_BWX && bytes >= 4)
3979 {
3980 data_regs[nregs++] = tmp = gen_reg_rtx (SImode);
3981 alpha_expand_unaligned_load (tmp, orig_src, 4, ofs, 0);
3982 bytes -= 4;
3983 ofs += 4;
3984 }
3985
3986 if (bytes >= 2)
3987 {
3988 if (src_align >= 16)
3989 {
3990 do {
3991 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3992 emit_move_insn (tmp, adjust_address (orig_src, HImode, ofs));
3993 bytes -= 2;
3994 ofs += 2;
3995 } while (bytes >= 2);
3996 }
3997 else if (! TARGET_BWX)
3998 {
3999 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
4000 alpha_expand_unaligned_load (tmp, orig_src, 2, ofs, 0);
4001 bytes -= 2;
4002 ofs += 2;
4003 }
4004 }
4005
4006 while (bytes > 0)
4007 {
4008 data_regs[nregs++] = tmp = gen_reg_rtx (QImode);
4009 emit_move_insn (tmp, adjust_address (orig_src, QImode, ofs));
4010 bytes -= 1;
4011 ofs += 1;
4012 }
4013
4014 gcc_assert (nregs <= ARRAY_SIZE (data_regs));
4015
4016 /* Now save it back out again. */
4017
4018 i = 0, ofs = 0;
4019
4020 /* Write out the data in whatever chunks reading the source allowed. */
4021 if (dst_align >= 64)
4022 {
4023 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
4024 {
4025 emit_move_insn (adjust_address (orig_dst, DImode, ofs),
4026 data_regs[i]);
4027 ofs += 8;
4028 i++;
4029 }
4030 }
4031
4032 if (dst_align >= 32)
4033 {
4034 /* If the source has remaining DImode regs, write them out in
4035 two pieces. */
4036 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
4037 {
4038 tmp = expand_binop (DImode, lshr_optab, data_regs[i], GEN_INT (32),
4039 NULL_RTX, 1, OPTAB_WIDEN);
4040
4041 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
4042 gen_lowpart (SImode, data_regs[i]));
4043 emit_move_insn (adjust_address (orig_dst, SImode, ofs + 4),
4044 gen_lowpart (SImode, tmp));
4045 ofs += 8;
4046 i++;
4047 }
4048
4049 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
4050 {
4051 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
4052 data_regs[i]);
4053 ofs += 4;
4054 i++;
4055 }
4056 }
4057
4058 if (i < nregs && GET_MODE (data_regs[i]) == DImode)
4059 {
4060 /* Write out a remaining block of words using unaligned methods. */
4061
4062 for (words = 1; i + words < nregs; words++)
4063 if (GET_MODE (data_regs[i + words]) != DImode)
4064 break;
4065
4066 if (words == 1)
4067 alpha_expand_unaligned_store (orig_dst, data_regs[i], 8, ofs);
4068 else
4069 alpha_expand_unaligned_store_words (data_regs + i, orig_dst,
4070 words, ofs);
4071
4072 i += words;
4073 ofs += words * 8;
4074 }
4075
4076 /* Due to the above, this won't be aligned. */
4077 /* ??? If we have more than one of these, consider constructing full
4078 words in registers and using alpha_expand_unaligned_store_words. */
4079 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
4080 {
4081 alpha_expand_unaligned_store (orig_dst, data_regs[i], 4, ofs);
4082 ofs += 4;
4083 i++;
4084 }
4085
4086 if (dst_align >= 16)
4087 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4088 {
4089 emit_move_insn (adjust_address (orig_dst, HImode, ofs), data_regs[i]);
4090 i++;
4091 ofs += 2;
4092 }
4093 else
4094 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4095 {
4096 alpha_expand_unaligned_store (orig_dst, data_regs[i], 2, ofs);
4097 i++;
4098 ofs += 2;
4099 }
4100
4101 /* The remainder must be byte copies. */
4102 while (i < nregs)
4103 {
4104 gcc_assert (GET_MODE (data_regs[i]) == QImode);
4105 emit_move_insn (adjust_address (orig_dst, QImode, ofs), data_regs[i]);
4106 i++;
4107 ofs += 1;
4108 }
4109
4110 return 1;
4111 }
4112
4113 int
4114 alpha_expand_block_clear (rtx operands[])
4115 {
4116 rtx bytes_rtx = operands[1];
4117 rtx align_rtx = operands[3];
4118 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
4119 HOST_WIDE_INT bytes = orig_bytes;
4120 HOST_WIDE_INT align = INTVAL (align_rtx) * BITS_PER_UNIT;
4121 HOST_WIDE_INT alignofs = 0;
4122 rtx orig_dst = operands[0];
4123 rtx tmp;
4124 int i, words, ofs = 0;
4125
4126 if (orig_bytes <= 0)
4127 return 1;
4128 if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
4129 return 0;
4130
4131 /* Look for stricter alignment. */
4132 tmp = XEXP (orig_dst, 0);
4133 if (GET_CODE (tmp) == REG)
4134 align = MAX (align, REGNO_POINTER_ALIGN (REGNO (tmp)));
4135 else if (GET_CODE (tmp) == PLUS
4136 && GET_CODE (XEXP (tmp, 0)) == REG
4137 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
4138 {
4139 HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
4140 int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
4141
4142 if (a > align)
4143 {
4144 if (a >= 64)
4145 align = a, alignofs = 8 - c % 8;
4146 else if (a >= 32)
4147 align = a, alignofs = 4 - c % 4;
4148 else if (a >= 16)
4149 align = a, alignofs = 2 - c % 2;
4150 }
4151 }
4152
4153 /* Handle an unaligned prefix first. */
4154
4155 if (alignofs > 0)
4156 {
4157 #if HOST_BITS_PER_WIDE_INT >= 64
4158 /* Given that alignofs is bounded by align, the only time BWX could
4159 generate three stores is for a 7 byte fill. Prefer two individual
4160 stores over a load/mask/store sequence. */
4161 if ((!TARGET_BWX || alignofs == 7)
4162 && align >= 32
4163 && !(alignofs == 4 && bytes >= 4))
4164 {
4165 enum machine_mode mode = (align >= 64 ? DImode : SImode);
4166 int inv_alignofs = (align >= 64 ? 8 : 4) - alignofs;
4167 rtx mem, tmp;
4168 HOST_WIDE_INT mask;
4169
4170 mem = adjust_address (orig_dst, mode, ofs - inv_alignofs);
4171 set_mem_alias_set (mem, 0);
4172
4173 mask = ~(~(HOST_WIDE_INT)0 << (inv_alignofs * 8));
4174 if (bytes < alignofs)
4175 {
4176 mask |= ~(HOST_WIDE_INT)0 << ((inv_alignofs + bytes) * 8);
4177 ofs += bytes;
4178 bytes = 0;
4179 }
4180 else
4181 {
4182 bytes -= alignofs;
4183 ofs += alignofs;
4184 }
4185 alignofs = 0;
4186
4187 tmp = expand_binop (mode, and_optab, mem, GEN_INT (mask),
4188 NULL_RTX, 1, OPTAB_WIDEN);
4189
4190 emit_move_insn (mem, tmp);
4191 }
4192 #endif
4193
4194 if (TARGET_BWX && (alignofs & 1) && bytes >= 1)
4195 {
4196 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4197 bytes -= 1;
4198 ofs += 1;
4199 alignofs -= 1;
4200 }
4201 if (TARGET_BWX && align >= 16 && (alignofs & 3) == 2 && bytes >= 2)
4202 {
4203 emit_move_insn (adjust_address (orig_dst, HImode, ofs), const0_rtx);
4204 bytes -= 2;
4205 ofs += 2;
4206 alignofs -= 2;
4207 }
4208 if (alignofs == 4 && bytes >= 4)
4209 {
4210 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4211 bytes -= 4;
4212 ofs += 4;
4213 alignofs = 0;
4214 }
4215
4216 /* If we've not used the extra lead alignment information by now,
4217 we won't be able to. Downgrade align to match what's left over. */
4218 if (alignofs > 0)
4219 {
4220 alignofs = alignofs & -alignofs;
4221 align = MIN (align, alignofs * BITS_PER_UNIT);
4222 }
4223 }
4224
4225 /* Handle a block of contiguous long-words. */
4226
4227 if (align >= 64 && bytes >= 8)
4228 {
4229 words = bytes / 8;
4230
4231 for (i = 0; i < words; ++i)
4232 emit_move_insn (adjust_address (orig_dst, DImode, ofs + i * 8),
4233 const0_rtx);
4234
4235 bytes -= words * 8;
4236 ofs += words * 8;
4237 }
4238
4239 /* If the block is large and appropriately aligned, emit a single
4240 store followed by a sequence of stq_u insns. */
4241
4242 if (align >= 32 && bytes > 16)
4243 {
4244 rtx orig_dsta;
4245
4246 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4247 bytes -= 4;
4248 ofs += 4;
4249
4250 orig_dsta = XEXP (orig_dst, 0);
4251 if (GET_CODE (orig_dsta) == LO_SUM)
4252 orig_dsta = force_reg (Pmode, orig_dsta);
4253
4254 words = bytes / 8;
4255 for (i = 0; i < words; ++i)
4256 {
4257 rtx mem
4258 = change_address (orig_dst, DImode,
4259 gen_rtx_AND (DImode,
4260 plus_constant (orig_dsta, ofs + i*8),
4261 GEN_INT (-8)));
4262 set_mem_alias_set (mem, 0);
4263 emit_move_insn (mem, const0_rtx);
4264 }
4265
4266 /* Depending on the alignment, the first stq_u may have overlapped
4267 with the initial stl, which means that the last stq_u didn't
4268 write as much as it would appear. Leave those questionable bytes
4269 unaccounted for. */
4270 bytes -= words * 8 - 4;
4271 ofs += words * 8 - 4;
4272 }
4273
4274 /* Handle a smaller block of aligned words. */
4275
4276 if ((align >= 64 && bytes == 4)
4277 || (align == 32 && bytes >= 4))
4278 {
4279 words = bytes / 4;
4280
4281 for (i = 0; i < words; ++i)
4282 emit_move_insn (adjust_address (orig_dst, SImode, ofs + i * 4),
4283 const0_rtx);
4284
4285 bytes -= words * 4;
4286 ofs += words * 4;
4287 }
4288
4289 /* An unaligned block uses stq_u stores for as many as possible. */
4290
4291 if (bytes >= 8)
4292 {
4293 words = bytes / 8;
4294
4295 alpha_expand_unaligned_store_words (NULL, orig_dst, words, ofs);
4296
4297 bytes -= words * 8;
4298 ofs += words * 8;
4299 }
4300
4301 /* Next clean up any trailing pieces. */
4302
4303 #if HOST_BITS_PER_WIDE_INT >= 64
4304 /* Count the number of bits in BYTES for which aligned stores could
4305 be emitted. */
4306 words = 0;
4307 for (i = (TARGET_BWX ? 1 : 4); i * BITS_PER_UNIT <= align ; i <<= 1)
4308 if (bytes & i)
4309 words += 1;
4310
4311 /* If we have appropriate alignment (and it wouldn't take too many
4312 instructions otherwise), mask out the bytes we need. */
4313 if (TARGET_BWX ? words > 2 : bytes > 0)
4314 {
4315 if (align >= 64)
4316 {
4317 rtx mem, tmp;
4318 HOST_WIDE_INT mask;
4319
4320 mem = adjust_address (orig_dst, DImode, ofs);
4321 set_mem_alias_set (mem, 0);
4322
4323 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4324
4325 tmp = expand_binop (DImode, and_optab, mem, GEN_INT (mask),
4326 NULL_RTX, 1, OPTAB_WIDEN);
4327
4328 emit_move_insn (mem, tmp);
4329 return 1;
4330 }
4331 else if (align >= 32 && bytes < 4)
4332 {
4333 rtx mem, tmp;
4334 HOST_WIDE_INT mask;
4335
4336 mem = adjust_address (orig_dst, SImode, ofs);
4337 set_mem_alias_set (mem, 0);
4338
4339 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4340
4341 tmp = expand_binop (SImode, and_optab, mem, GEN_INT (mask),
4342 NULL_RTX, 1, OPTAB_WIDEN);
4343
4344 emit_move_insn (mem, tmp);
4345 return 1;
4346 }
4347 }
4348 #endif
4349
4350 if (!TARGET_BWX && bytes >= 4)
4351 {
4352 alpha_expand_unaligned_store (orig_dst, const0_rtx, 4, ofs);
4353 bytes -= 4;
4354 ofs += 4;
4355 }
4356
4357 if (bytes >= 2)
4358 {
4359 if (align >= 16)
4360 {
4361 do {
4362 emit_move_insn (adjust_address (orig_dst, HImode, ofs),
4363 const0_rtx);
4364 bytes -= 2;
4365 ofs += 2;
4366 } while (bytes >= 2);
4367 }
4368 else if (! TARGET_BWX)
4369 {
4370 alpha_expand_unaligned_store (orig_dst, const0_rtx, 2, ofs);
4371 bytes -= 2;
4372 ofs += 2;
4373 }
4374 }
4375
4376 while (bytes > 0)
4377 {
4378 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4379 bytes -= 1;
4380 ofs += 1;
4381 }
4382
4383 return 1;
4384 }
4385
4386 /* Returns a mask so that zap(x, value) == x & mask. */
4387
4388 rtx
4389 alpha_expand_zap_mask (HOST_WIDE_INT value)
4390 {
4391 rtx result;
4392 int i;
4393
4394 if (HOST_BITS_PER_WIDE_INT >= 64)
4395 {
4396 HOST_WIDE_INT mask = 0;
4397
4398 for (i = 7; i >= 0; --i)
4399 {
4400 mask <<= 8;
4401 if (!((value >> i) & 1))
4402 mask |= 0xff;
4403 }
4404
4405 result = gen_int_mode (mask, DImode);
4406 }
4407 else
4408 {
4409 HOST_WIDE_INT mask_lo = 0, mask_hi = 0;
4410
4411 gcc_assert (HOST_BITS_PER_WIDE_INT == 32);
4412
4413 for (i = 7; i >= 4; --i)
4414 {
4415 mask_hi <<= 8;
4416 if (!((value >> i) & 1))
4417 mask_hi |= 0xff;
4418 }
4419
4420 for (i = 3; i >= 0; --i)
4421 {
4422 mask_lo <<= 8;
4423 if (!((value >> i) & 1))
4424 mask_lo |= 0xff;
4425 }
4426
4427 result = immed_double_const (mask_lo, mask_hi, DImode);
4428 }
4429
4430 return result;
4431 }
4432
4433 void
4434 alpha_expand_builtin_vector_binop (rtx (*gen) (rtx, rtx, rtx),
4435 enum machine_mode mode,
4436 rtx op0, rtx op1, rtx op2)
4437 {
4438 op0 = gen_lowpart (mode, op0);
4439
4440 if (op1 == const0_rtx)
4441 op1 = CONST0_RTX (mode);
4442 else
4443 op1 = gen_lowpart (mode, op1);
4444
4445 if (op2 == const0_rtx)
4446 op2 = CONST0_RTX (mode);
4447 else
4448 op2 = gen_lowpart (mode, op2);
4449
4450 emit_insn ((*gen) (op0, op1, op2));
4451 }
4452
4453 /* A subroutine of the atomic operation splitters. Jump to LABEL if
4454 COND is true. Mark the jump as unlikely to be taken. */
4455
4456 static void
4457 emit_unlikely_jump (rtx cond, rtx label)
4458 {
4459 rtx very_unlikely = GEN_INT (REG_BR_PROB_BASE / 100 - 1);
4460 rtx x;
4461
4462 x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
4463 x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
4464 REG_NOTES (x) = gen_rtx_EXPR_LIST (REG_BR_PROB, very_unlikely, NULL_RTX);
4465 }
4466
4467 /* A subroutine of the atomic operation splitters. Emit a load-locked
4468 instruction in MODE. */
4469
4470 static void
4471 emit_load_locked (enum machine_mode mode, rtx reg, rtx mem)
4472 {
4473 rtx (*fn) (rtx, rtx) = NULL;
4474 if (mode == SImode)
4475 fn = gen_load_locked_si;
4476 else if (mode == DImode)
4477 fn = gen_load_locked_di;
4478 emit_insn (fn (reg, mem));
4479 }
4480
4481 /* A subroutine of the atomic operation splitters. Emit a store-conditional
4482 instruction in MODE. */
4483
4484 static void
4485 emit_store_conditional (enum machine_mode mode, rtx res, rtx mem, rtx val)
4486 {
4487 rtx (*fn) (rtx, rtx, rtx) = NULL;
4488 if (mode == SImode)
4489 fn = gen_store_conditional_si;
4490 else if (mode == DImode)
4491 fn = gen_store_conditional_di;
4492 emit_insn (fn (res, mem, val));
4493 }
4494
4495 /* A subroutine of the atomic operation splitters. Emit an insxl
4496 instruction in MODE. */
4497
4498 static rtx
4499 emit_insxl (enum machine_mode mode, rtx op1, rtx op2)
4500 {
4501 rtx ret = gen_reg_rtx (DImode);
4502 rtx (*fn) (rtx, rtx, rtx);
4503
4504 if (WORDS_BIG_ENDIAN)
4505 {
4506 if (mode == QImode)
4507 fn = gen_insbl_be;
4508 else
4509 fn = gen_inswl_be;
4510 }
4511 else
4512 {
4513 if (mode == QImode)
4514 fn = gen_insbl_le;
4515 else
4516 fn = gen_inswl_le;
4517 }
4518 emit_insn (fn (ret, op1, op2));
4519
4520 return ret;
4521 }
4522
4523 /* Expand an an atomic fetch-and-operate pattern. CODE is the binary operation
4524 to perform. MEM is the memory on which to operate. VAL is the second
4525 operand of the binary operator. BEFORE and AFTER are optional locations to
4526 return the value of MEM either before of after the operation. SCRATCH is
4527 a scratch register. */
4528
4529 void
4530 alpha_split_atomic_op (enum rtx_code code, rtx mem, rtx val,
4531 rtx before, rtx after, rtx scratch)
4532 {
4533 enum machine_mode mode = GET_MODE (mem);
4534 rtx label, x, cond = gen_rtx_REG (DImode, REGNO (scratch));
4535
4536 emit_insn (gen_memory_barrier ());
4537
4538 label = gen_label_rtx ();
4539 emit_label (label);
4540 label = gen_rtx_LABEL_REF (DImode, label);
4541
4542 if (before == NULL)
4543 before = scratch;
4544 emit_load_locked (mode, before, mem);
4545
4546 if (code == NOT)
4547 x = gen_rtx_AND (mode, gen_rtx_NOT (mode, before), val);
4548 else
4549 x = gen_rtx_fmt_ee (code, mode, before, val);
4550 if (after)
4551 emit_insn (gen_rtx_SET (VOIDmode, after, copy_rtx (x)));
4552 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
4553
4554 emit_store_conditional (mode, cond, mem, scratch);
4555
4556 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4557 emit_unlikely_jump (x, label);
4558
4559 emit_insn (gen_memory_barrier ());
4560 }
4561
4562 /* Expand a compare and swap operation. */
4563
4564 void
4565 alpha_split_compare_and_swap (rtx retval, rtx mem, rtx oldval, rtx newval,
4566 rtx scratch)
4567 {
4568 enum machine_mode mode = GET_MODE (mem);
4569 rtx label1, label2, x, cond = gen_lowpart (DImode, scratch);
4570
4571 emit_insn (gen_memory_barrier ());
4572
4573 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4574 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4575 emit_label (XEXP (label1, 0));
4576
4577 emit_load_locked (mode, retval, mem);
4578
4579 x = gen_lowpart (DImode, retval);
4580 if (oldval == const0_rtx)
4581 x = gen_rtx_NE (DImode, x, const0_rtx);
4582 else
4583 {
4584 x = gen_rtx_EQ (DImode, x, oldval);
4585 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4586 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4587 }
4588 emit_unlikely_jump (x, label2);
4589
4590 emit_move_insn (scratch, newval);
4591 emit_store_conditional (mode, cond, mem, scratch);
4592
4593 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4594 emit_unlikely_jump (x, label1);
4595
4596 emit_insn (gen_memory_barrier ());
4597 emit_label (XEXP (label2, 0));
4598 }
4599
4600 void
4601 alpha_expand_compare_and_swap_12 (rtx dst, rtx mem, rtx oldval, rtx newval)
4602 {
4603 enum machine_mode mode = GET_MODE (mem);
4604 rtx addr, align, wdst;
4605 rtx (*fn5) (rtx, rtx, rtx, rtx, rtx);
4606
4607 addr = force_reg (DImode, XEXP (mem, 0));
4608 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4609 NULL_RTX, 1, OPTAB_DIRECT);
4610
4611 oldval = convert_modes (DImode, mode, oldval, 1);
4612 newval = emit_insxl (mode, newval, addr);
4613
4614 wdst = gen_reg_rtx (DImode);
4615 if (mode == QImode)
4616 fn5 = gen_sync_compare_and_swapqi_1;
4617 else
4618 fn5 = gen_sync_compare_and_swaphi_1;
4619 emit_insn (fn5 (wdst, addr, oldval, newval, align));
4620
4621 emit_move_insn (dst, gen_lowpart (mode, wdst));
4622 }
4623
4624 void
4625 alpha_split_compare_and_swap_12 (enum machine_mode mode, rtx dest, rtx addr,
4626 rtx oldval, rtx newval, rtx align,
4627 rtx scratch, rtx cond)
4628 {
4629 rtx label1, label2, mem, width, mask, x;
4630
4631 mem = gen_rtx_MEM (DImode, align);
4632 MEM_VOLATILE_P (mem) = 1;
4633
4634 emit_insn (gen_memory_barrier ());
4635 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4636 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4637 emit_label (XEXP (label1, 0));
4638
4639 emit_load_locked (DImode, scratch, mem);
4640
4641 width = GEN_INT (GET_MODE_BITSIZE (mode));
4642 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4643 if (WORDS_BIG_ENDIAN)
4644 emit_insn (gen_extxl_be (dest, scratch, width, addr));
4645 else
4646 emit_insn (gen_extxl_le (dest, scratch, width, addr));
4647
4648 if (oldval == const0_rtx)
4649 x = gen_rtx_NE (DImode, dest, const0_rtx);
4650 else
4651 {
4652 x = gen_rtx_EQ (DImode, dest, oldval);
4653 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4654 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4655 }
4656 emit_unlikely_jump (x, label2);
4657
4658 if (WORDS_BIG_ENDIAN)
4659 emit_insn (gen_mskxl_be (scratch, scratch, mask, addr));
4660 else
4661 emit_insn (gen_mskxl_le (scratch, scratch, mask, addr));
4662 emit_insn (gen_iordi3 (scratch, scratch, newval));
4663
4664 emit_store_conditional (DImode, scratch, mem, scratch);
4665
4666 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4667 emit_unlikely_jump (x, label1);
4668
4669 emit_insn (gen_memory_barrier ());
4670 emit_label (XEXP (label2, 0));
4671 }
4672
4673 /* Expand an atomic exchange operation. */
4674
4675 void
4676 alpha_split_lock_test_and_set (rtx retval, rtx mem, rtx val, rtx scratch)
4677 {
4678 enum machine_mode mode = GET_MODE (mem);
4679 rtx label, x, cond = gen_lowpart (DImode, scratch);
4680
4681 emit_insn (gen_memory_barrier ());
4682
4683 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4684 emit_label (XEXP (label, 0));
4685
4686 emit_load_locked (mode, retval, mem);
4687 emit_move_insn (scratch, val);
4688 emit_store_conditional (mode, cond, mem, scratch);
4689
4690 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4691 emit_unlikely_jump (x, label);
4692 }
4693
4694 void
4695 alpha_expand_lock_test_and_set_12 (rtx dst, rtx mem, rtx val)
4696 {
4697 enum machine_mode mode = GET_MODE (mem);
4698 rtx addr, align, wdst;
4699 rtx (*fn4) (rtx, rtx, rtx, rtx);
4700
4701 /* Force the address into a register. */
4702 addr = force_reg (DImode, XEXP (mem, 0));
4703
4704 /* Align it to a multiple of 8. */
4705 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4706 NULL_RTX, 1, OPTAB_DIRECT);
4707
4708 /* Insert val into the correct byte location within the word. */
4709 val = emit_insxl (mode, val, addr);
4710
4711 wdst = gen_reg_rtx (DImode);
4712 if (mode == QImode)
4713 fn4 = gen_sync_lock_test_and_setqi_1;
4714 else
4715 fn4 = gen_sync_lock_test_and_sethi_1;
4716 emit_insn (fn4 (wdst, addr, val, align));
4717
4718 emit_move_insn (dst, gen_lowpart (mode, wdst));
4719 }
4720
4721 void
4722 alpha_split_lock_test_and_set_12 (enum machine_mode mode, rtx dest, rtx addr,
4723 rtx val, rtx align, rtx scratch)
4724 {
4725 rtx label, mem, width, mask, x;
4726
4727 mem = gen_rtx_MEM (DImode, align);
4728 MEM_VOLATILE_P (mem) = 1;
4729
4730 emit_insn (gen_memory_barrier ());
4731 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4732 emit_label (XEXP (label, 0));
4733
4734 emit_load_locked (DImode, scratch, mem);
4735
4736 width = GEN_INT (GET_MODE_BITSIZE (mode));
4737 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4738 if (WORDS_BIG_ENDIAN)
4739 {
4740 emit_insn (gen_extxl_be (dest, scratch, width, addr));
4741 emit_insn (gen_mskxl_be (scratch, scratch, mask, addr));
4742 }
4743 else
4744 {
4745 emit_insn (gen_extxl_le (dest, scratch, width, addr));
4746 emit_insn (gen_mskxl_le (scratch, scratch, mask, addr));
4747 }
4748 emit_insn (gen_iordi3 (scratch, scratch, val));
4749
4750 emit_store_conditional (DImode, scratch, mem, scratch);
4751
4752 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4753 emit_unlikely_jump (x, label);
4754 }
4755 \f
4756 /* Adjust the cost of a scheduling dependency. Return the new cost of
4757 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4758
4759 static int
4760 alpha_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4761 {
4762 enum attr_type insn_type, dep_insn_type;
4763
4764 /* If the dependence is an anti-dependence, there is no cost. For an
4765 output dependence, there is sometimes a cost, but it doesn't seem
4766 worth handling those few cases. */
4767 if (REG_NOTE_KIND (link) != 0)
4768 return cost;
4769
4770 /* If we can't recognize the insns, we can't really do anything. */
4771 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
4772 return cost;
4773
4774 insn_type = get_attr_type (insn);
4775 dep_insn_type = get_attr_type (dep_insn);
4776
4777 /* Bring in the user-defined memory latency. */
4778 if (dep_insn_type == TYPE_ILD
4779 || dep_insn_type == TYPE_FLD
4780 || dep_insn_type == TYPE_LDSYM)
4781 cost += alpha_memory_latency-1;
4782
4783 /* Everything else handled in DFA bypasses now. */
4784
4785 return cost;
4786 }
4787
4788 /* The number of instructions that can be issued per cycle. */
4789
4790 static int
4791 alpha_issue_rate (void)
4792 {
4793 return (alpha_tune == PROCESSOR_EV4 ? 2 : 4);
4794 }
4795
4796 /* How many alternative schedules to try. This should be as wide as the
4797 scheduling freedom in the DFA, but no wider. Making this value too
4798 large results extra work for the scheduler.
4799
4800 For EV4, loads can be issued to either IB0 or IB1, thus we have 2
4801 alternative schedules. For EV5, we can choose between E0/E1 and
4802 FA/FM. For EV6, an arithmetic insn can be issued to U0/U1/L0/L1. */
4803
4804 static int
4805 alpha_multipass_dfa_lookahead (void)
4806 {
4807 return (alpha_tune == PROCESSOR_EV6 ? 4 : 2);
4808 }
4809 \f
4810 /* Machine-specific function data. */
4811
4812 struct machine_function GTY(())
4813 {
4814 /* For unicosmk. */
4815 /* List of call information words for calls from this function. */
4816 struct rtx_def *first_ciw;
4817 struct rtx_def *last_ciw;
4818 int ciw_count;
4819
4820 /* List of deferred case vectors. */
4821 struct rtx_def *addr_list;
4822
4823 /* For OSF. */
4824 const char *some_ld_name;
4825
4826 /* For TARGET_LD_BUGGY_LDGP. */
4827 struct rtx_def *gp_save_rtx;
4828 };
4829
4830 /* How to allocate a 'struct machine_function'. */
4831
4832 static struct machine_function *
4833 alpha_init_machine_status (void)
4834 {
4835 return ((struct machine_function *)
4836 ggc_alloc_cleared (sizeof (struct machine_function)));
4837 }
4838
4839 /* Functions to save and restore alpha_return_addr_rtx. */
4840
4841 /* Start the ball rolling with RETURN_ADDR_RTX. */
4842
4843 rtx
4844 alpha_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
4845 {
4846 if (count != 0)
4847 return const0_rtx;
4848
4849 return get_hard_reg_initial_val (Pmode, REG_RA);
4850 }
4851
4852 /* Return or create a memory slot containing the gp value for the current
4853 function. Needed only if TARGET_LD_BUGGY_LDGP. */
4854
4855 rtx
4856 alpha_gp_save_rtx (void)
4857 {
4858 rtx seq, m = cfun->machine->gp_save_rtx;
4859
4860 if (m == NULL)
4861 {
4862 start_sequence ();
4863
4864 m = assign_stack_local (DImode, UNITS_PER_WORD, BITS_PER_WORD);
4865 m = validize_mem (m);
4866 emit_move_insn (m, pic_offset_table_rtx);
4867
4868 seq = get_insns ();
4869 end_sequence ();
4870 emit_insn_after (seq, entry_of_function ());
4871
4872 cfun->machine->gp_save_rtx = m;
4873 }
4874
4875 return m;
4876 }
4877
4878 static int
4879 alpha_ra_ever_killed (void)
4880 {
4881 rtx top;
4882
4883 if (!has_hard_reg_initial_val (Pmode, REG_RA))
4884 return regs_ever_live[REG_RA];
4885
4886 push_topmost_sequence ();
4887 top = get_insns ();
4888 pop_topmost_sequence ();
4889
4890 return reg_set_between_p (gen_rtx_REG (Pmode, REG_RA), top, NULL_RTX);
4891 }
4892
4893 \f
4894 /* Return the trap mode suffix applicable to the current
4895 instruction, or NULL. */
4896
4897 static const char *
4898 get_trap_mode_suffix (void)
4899 {
4900 enum attr_trap_suffix s = get_attr_trap_suffix (current_output_insn);
4901
4902 switch (s)
4903 {
4904 case TRAP_SUFFIX_NONE:
4905 return NULL;
4906
4907 case TRAP_SUFFIX_SU:
4908 if (alpha_fptm >= ALPHA_FPTM_SU)
4909 return "su";
4910 return NULL;
4911
4912 case TRAP_SUFFIX_SUI:
4913 if (alpha_fptm >= ALPHA_FPTM_SUI)
4914 return "sui";
4915 return NULL;
4916
4917 case TRAP_SUFFIX_V_SV:
4918 switch (alpha_fptm)
4919 {
4920 case ALPHA_FPTM_N:
4921 return NULL;
4922 case ALPHA_FPTM_U:
4923 return "v";
4924 case ALPHA_FPTM_SU:
4925 case ALPHA_FPTM_SUI:
4926 return "sv";
4927 default:
4928 gcc_unreachable ();
4929 }
4930
4931 case TRAP_SUFFIX_V_SV_SVI:
4932 switch (alpha_fptm)
4933 {
4934 case ALPHA_FPTM_N:
4935 return NULL;
4936 case ALPHA_FPTM_U:
4937 return "v";
4938 case ALPHA_FPTM_SU:
4939 return "sv";
4940 case ALPHA_FPTM_SUI:
4941 return "svi";
4942 default:
4943 gcc_unreachable ();
4944 }
4945 break;
4946
4947 case TRAP_SUFFIX_U_SU_SUI:
4948 switch (alpha_fptm)
4949 {
4950 case ALPHA_FPTM_N:
4951 return NULL;
4952 case ALPHA_FPTM_U:
4953 return "u";
4954 case ALPHA_FPTM_SU:
4955 return "su";
4956 case ALPHA_FPTM_SUI:
4957 return "sui";
4958 default:
4959 gcc_unreachable ();
4960 }
4961 break;
4962
4963 default:
4964 gcc_unreachable ();
4965 }
4966 gcc_unreachable ();
4967 }
4968
4969 /* Return the rounding mode suffix applicable to the current
4970 instruction, or NULL. */
4971
4972 static const char *
4973 get_round_mode_suffix (void)
4974 {
4975 enum attr_round_suffix s = get_attr_round_suffix (current_output_insn);
4976
4977 switch (s)
4978 {
4979 case ROUND_SUFFIX_NONE:
4980 return NULL;
4981 case ROUND_SUFFIX_NORMAL:
4982 switch (alpha_fprm)
4983 {
4984 case ALPHA_FPRM_NORM:
4985 return NULL;
4986 case ALPHA_FPRM_MINF:
4987 return "m";
4988 case ALPHA_FPRM_CHOP:
4989 return "c";
4990 case ALPHA_FPRM_DYN:
4991 return "d";
4992 default:
4993 gcc_unreachable ();
4994 }
4995 break;
4996
4997 case ROUND_SUFFIX_C:
4998 return "c";
4999
5000 default:
5001 gcc_unreachable ();
5002 }
5003 gcc_unreachable ();
5004 }
5005
5006 /* Locate some local-dynamic symbol still in use by this function
5007 so that we can print its name in some movdi_er_tlsldm pattern. */
5008
5009 static int
5010 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
5011 {
5012 rtx x = *px;
5013
5014 if (GET_CODE (x) == SYMBOL_REF
5015 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
5016 {
5017 cfun->machine->some_ld_name = XSTR (x, 0);
5018 return 1;
5019 }
5020
5021 return 0;
5022 }
5023
5024 static const char *
5025 get_some_local_dynamic_name (void)
5026 {
5027 rtx insn;
5028
5029 if (cfun->machine->some_ld_name)
5030 return cfun->machine->some_ld_name;
5031
5032 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
5033 if (INSN_P (insn)
5034 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
5035 return cfun->machine->some_ld_name;
5036
5037 gcc_unreachable ();
5038 }
5039
5040 /* Print an operand. Recognize special options, documented below. */
5041
5042 void
5043 print_operand (FILE *file, rtx x, int code)
5044 {
5045 int i;
5046
5047 switch (code)
5048 {
5049 case '~':
5050 /* Print the assembler name of the current function. */
5051 assemble_name (file, alpha_fnname);
5052 break;
5053
5054 case '&':
5055 assemble_name (file, get_some_local_dynamic_name ());
5056 break;
5057
5058 case '/':
5059 {
5060 const char *trap = get_trap_mode_suffix ();
5061 const char *round = get_round_mode_suffix ();
5062
5063 if (trap || round)
5064 fprintf (file, (TARGET_AS_SLASH_BEFORE_SUFFIX ? "/%s%s" : "%s%s"),
5065 (trap ? trap : ""), (round ? round : ""));
5066 break;
5067 }
5068
5069 case ',':
5070 /* Generates single precision instruction suffix. */
5071 fputc ((TARGET_FLOAT_VAX ? 'f' : 's'), file);
5072 break;
5073
5074 case '-':
5075 /* Generates double precision instruction suffix. */
5076 fputc ((TARGET_FLOAT_VAX ? 'g' : 't'), file);
5077 break;
5078
5079 case '+':
5080 /* Generates a nop after a noreturn call at the very end of the
5081 function. */
5082 if (next_real_insn (current_output_insn) == 0)
5083 fprintf (file, "\n\tnop");
5084 break;
5085
5086 case '#':
5087 if (alpha_this_literal_sequence_number == 0)
5088 alpha_this_literal_sequence_number = alpha_next_sequence_number++;
5089 fprintf (file, "%d", alpha_this_literal_sequence_number);
5090 break;
5091
5092 case '*':
5093 if (alpha_this_gpdisp_sequence_number == 0)
5094 alpha_this_gpdisp_sequence_number = alpha_next_sequence_number++;
5095 fprintf (file, "%d", alpha_this_gpdisp_sequence_number);
5096 break;
5097
5098 case 'H':
5099 if (GET_CODE (x) == HIGH)
5100 output_addr_const (file, XEXP (x, 0));
5101 else
5102 output_operand_lossage ("invalid %%H value");
5103 break;
5104
5105 case 'J':
5106 {
5107 const char *lituse;
5108
5109 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD_CALL)
5110 {
5111 x = XVECEXP (x, 0, 0);
5112 lituse = "lituse_tlsgd";
5113 }
5114 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM_CALL)
5115 {
5116 x = XVECEXP (x, 0, 0);
5117 lituse = "lituse_tlsldm";
5118 }
5119 else if (GET_CODE (x) == CONST_INT)
5120 lituse = "lituse_jsr";
5121 else
5122 {
5123 output_operand_lossage ("invalid %%J value");
5124 break;
5125 }
5126
5127 if (x != const0_rtx)
5128 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5129 }
5130 break;
5131
5132 case 'j':
5133 {
5134 const char *lituse;
5135
5136 #ifdef HAVE_AS_JSRDIRECT_RELOCS
5137 lituse = "lituse_jsrdirect";
5138 #else
5139 lituse = "lituse_jsr";
5140 #endif
5141
5142 gcc_assert (INTVAL (x) != 0);
5143 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5144 }
5145 break;
5146 case 'r':
5147 /* If this operand is the constant zero, write it as "$31". */
5148 if (GET_CODE (x) == REG)
5149 fprintf (file, "%s", reg_names[REGNO (x)]);
5150 else if (x == CONST0_RTX (GET_MODE (x)))
5151 fprintf (file, "$31");
5152 else
5153 output_operand_lossage ("invalid %%r value");
5154 break;
5155
5156 case 'R':
5157 /* Similar, but for floating-point. */
5158 if (GET_CODE (x) == REG)
5159 fprintf (file, "%s", reg_names[REGNO (x)]);
5160 else if (x == CONST0_RTX (GET_MODE (x)))
5161 fprintf (file, "$f31");
5162 else
5163 output_operand_lossage ("invalid %%R value");
5164 break;
5165
5166 case 'N':
5167 /* Write the 1's complement of a constant. */
5168 if (GET_CODE (x) != CONST_INT)
5169 output_operand_lossage ("invalid %%N value");
5170
5171 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
5172 break;
5173
5174 case 'P':
5175 /* Write 1 << C, for a constant C. */
5176 if (GET_CODE (x) != CONST_INT)
5177 output_operand_lossage ("invalid %%P value");
5178
5179 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT) 1 << INTVAL (x));
5180 break;
5181
5182 case 'h':
5183 /* Write the high-order 16 bits of a constant, sign-extended. */
5184 if (GET_CODE (x) != CONST_INT)
5185 output_operand_lossage ("invalid %%h value");
5186
5187 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) >> 16);
5188 break;
5189
5190 case 'L':
5191 /* Write the low-order 16 bits of a constant, sign-extended. */
5192 if (GET_CODE (x) != CONST_INT)
5193 output_operand_lossage ("invalid %%L value");
5194
5195 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5196 (INTVAL (x) & 0xffff) - 2 * (INTVAL (x) & 0x8000));
5197 break;
5198
5199 case 'm':
5200 /* Write mask for ZAP insn. */
5201 if (GET_CODE (x) == CONST_DOUBLE)
5202 {
5203 HOST_WIDE_INT mask = 0;
5204 HOST_WIDE_INT value;
5205
5206 value = CONST_DOUBLE_LOW (x);
5207 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5208 i++, value >>= 8)
5209 if (value & 0xff)
5210 mask |= (1 << i);
5211
5212 value = CONST_DOUBLE_HIGH (x);
5213 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5214 i++, value >>= 8)
5215 if (value & 0xff)
5216 mask |= (1 << (i + sizeof (int)));
5217
5218 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask & 0xff);
5219 }
5220
5221 else if (GET_CODE (x) == CONST_INT)
5222 {
5223 HOST_WIDE_INT mask = 0, value = INTVAL (x);
5224
5225 for (i = 0; i < 8; i++, value >>= 8)
5226 if (value & 0xff)
5227 mask |= (1 << i);
5228
5229 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask);
5230 }
5231 else
5232 output_operand_lossage ("invalid %%m value");
5233 break;
5234
5235 case 'M':
5236 /* 'b', 'w', 'l', or 'q' as the value of the constant. */
5237 if (GET_CODE (x) != CONST_INT
5238 || (INTVAL (x) != 8 && INTVAL (x) != 16
5239 && INTVAL (x) != 32 && INTVAL (x) != 64))
5240 output_operand_lossage ("invalid %%M value");
5241
5242 fprintf (file, "%s",
5243 (INTVAL (x) == 8 ? "b"
5244 : INTVAL (x) == 16 ? "w"
5245 : INTVAL (x) == 32 ? "l"
5246 : "q"));
5247 break;
5248
5249 case 'U':
5250 /* Similar, except do it from the mask. */
5251 if (GET_CODE (x) == CONST_INT)
5252 {
5253 HOST_WIDE_INT value = INTVAL (x);
5254
5255 if (value == 0xff)
5256 {
5257 fputc ('b', file);
5258 break;
5259 }
5260 if (value == 0xffff)
5261 {
5262 fputc ('w', file);
5263 break;
5264 }
5265 if (value == 0xffffffff)
5266 {
5267 fputc ('l', file);
5268 break;
5269 }
5270 if (value == -1)
5271 {
5272 fputc ('q', file);
5273 break;
5274 }
5275 }
5276 else if (HOST_BITS_PER_WIDE_INT == 32
5277 && GET_CODE (x) == CONST_DOUBLE
5278 && CONST_DOUBLE_LOW (x) == 0xffffffff
5279 && CONST_DOUBLE_HIGH (x) == 0)
5280 {
5281 fputc ('l', file);
5282 break;
5283 }
5284 output_operand_lossage ("invalid %%U value");
5285 break;
5286
5287 case 's':
5288 /* Write the constant value divided by 8 for little-endian mode or
5289 (56 - value) / 8 for big-endian mode. */
5290
5291 if (GET_CODE (x) != CONST_INT
5292 || (unsigned HOST_WIDE_INT) INTVAL (x) >= (WORDS_BIG_ENDIAN
5293 ? 56
5294 : 64)
5295 || (INTVAL (x) & 7) != 0)
5296 output_operand_lossage ("invalid %%s value");
5297
5298 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5299 WORDS_BIG_ENDIAN
5300 ? (56 - INTVAL (x)) / 8
5301 : INTVAL (x) / 8);
5302 break;
5303
5304 case 'S':
5305 /* Same, except compute (64 - c) / 8 */
5306
5307 if (GET_CODE (x) != CONST_INT
5308 && (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
5309 && (INTVAL (x) & 7) != 8)
5310 output_operand_lossage ("invalid %%s value");
5311
5312 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (64 - INTVAL (x)) / 8);
5313 break;
5314
5315 case 't':
5316 {
5317 /* On Unicos/Mk systems: use a DEX expression if the symbol
5318 clashes with a register name. */
5319 int dex = unicosmk_need_dex (x);
5320 if (dex)
5321 fprintf (file, "DEX(%d)", dex);
5322 else
5323 output_addr_const (file, x);
5324 }
5325 break;
5326
5327 case 'C': case 'D': case 'c': case 'd':
5328 /* Write out comparison name. */
5329 {
5330 enum rtx_code c = GET_CODE (x);
5331
5332 if (!COMPARISON_P (x))
5333 output_operand_lossage ("invalid %%C value");
5334
5335 else if (code == 'D')
5336 c = reverse_condition (c);
5337 else if (code == 'c')
5338 c = swap_condition (c);
5339 else if (code == 'd')
5340 c = swap_condition (reverse_condition (c));
5341
5342 if (c == LEU)
5343 fprintf (file, "ule");
5344 else if (c == LTU)
5345 fprintf (file, "ult");
5346 else if (c == UNORDERED)
5347 fprintf (file, "un");
5348 else
5349 fprintf (file, "%s", GET_RTX_NAME (c));
5350 }
5351 break;
5352
5353 case 'E':
5354 /* Write the divide or modulus operator. */
5355 switch (GET_CODE (x))
5356 {
5357 case DIV:
5358 fprintf (file, "div%s", GET_MODE (x) == SImode ? "l" : "q");
5359 break;
5360 case UDIV:
5361 fprintf (file, "div%su", GET_MODE (x) == SImode ? "l" : "q");
5362 break;
5363 case MOD:
5364 fprintf (file, "rem%s", GET_MODE (x) == SImode ? "l" : "q");
5365 break;
5366 case UMOD:
5367 fprintf (file, "rem%su", GET_MODE (x) == SImode ? "l" : "q");
5368 break;
5369 default:
5370 output_operand_lossage ("invalid %%E value");
5371 break;
5372 }
5373 break;
5374
5375 case 'A':
5376 /* Write "_u" for unaligned access. */
5377 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == AND)
5378 fprintf (file, "_u");
5379 break;
5380
5381 case 0:
5382 if (GET_CODE (x) == REG)
5383 fprintf (file, "%s", reg_names[REGNO (x)]);
5384 else if (GET_CODE (x) == MEM)
5385 output_address (XEXP (x, 0));
5386 else if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == UNSPEC)
5387 {
5388 switch (XINT (XEXP (x, 0), 1))
5389 {
5390 case UNSPEC_DTPREL:
5391 case UNSPEC_TPREL:
5392 output_addr_const (file, XVECEXP (XEXP (x, 0), 0, 0));
5393 break;
5394 default:
5395 output_operand_lossage ("unknown relocation unspec");
5396 break;
5397 }
5398 }
5399 else
5400 output_addr_const (file, x);
5401 break;
5402
5403 default:
5404 output_operand_lossage ("invalid %%xn code");
5405 }
5406 }
5407
5408 void
5409 print_operand_address (FILE *file, rtx addr)
5410 {
5411 int basereg = 31;
5412 HOST_WIDE_INT offset = 0;
5413
5414 if (GET_CODE (addr) == AND)
5415 addr = XEXP (addr, 0);
5416
5417 if (GET_CODE (addr) == PLUS
5418 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
5419 {
5420 offset = INTVAL (XEXP (addr, 1));
5421 addr = XEXP (addr, 0);
5422 }
5423
5424 if (GET_CODE (addr) == LO_SUM)
5425 {
5426 const char *reloc16, *reloclo;
5427 rtx op1 = XEXP (addr, 1);
5428
5429 if (GET_CODE (op1) == CONST && GET_CODE (XEXP (op1, 0)) == UNSPEC)
5430 {
5431 op1 = XEXP (op1, 0);
5432 switch (XINT (op1, 1))
5433 {
5434 case UNSPEC_DTPREL:
5435 reloc16 = NULL;
5436 reloclo = (alpha_tls_size == 16 ? "dtprel" : "dtprello");
5437 break;
5438 case UNSPEC_TPREL:
5439 reloc16 = NULL;
5440 reloclo = (alpha_tls_size == 16 ? "tprel" : "tprello");
5441 break;
5442 default:
5443 output_operand_lossage ("unknown relocation unspec");
5444 return;
5445 }
5446
5447 output_addr_const (file, XVECEXP (op1, 0, 0));
5448 }
5449 else
5450 {
5451 reloc16 = "gprel";
5452 reloclo = "gprellow";
5453 output_addr_const (file, op1);
5454 }
5455
5456 if (offset)
5457 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
5458
5459 addr = XEXP (addr, 0);
5460 switch (GET_CODE (addr))
5461 {
5462 case REG:
5463 basereg = REGNO (addr);
5464 break;
5465
5466 case SUBREG:
5467 basereg = subreg_regno (addr);
5468 break;
5469
5470 default:
5471 gcc_unreachable ();
5472 }
5473
5474 fprintf (file, "($%d)\t\t!%s", basereg,
5475 (basereg == 29 ? reloc16 : reloclo));
5476 return;
5477 }
5478
5479 switch (GET_CODE (addr))
5480 {
5481 case REG:
5482 basereg = REGNO (addr);
5483 break;
5484
5485 case SUBREG:
5486 basereg = subreg_regno (addr);
5487 break;
5488
5489 case CONST_INT:
5490 offset = INTVAL (addr);
5491 break;
5492
5493 #if TARGET_ABI_OPEN_VMS
5494 case SYMBOL_REF:
5495 fprintf (file, "%s", XSTR (addr, 0));
5496 return;
5497
5498 case CONST:
5499 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS
5500 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF);
5501 fprintf (file, "%s+" HOST_WIDE_INT_PRINT_DEC,
5502 XSTR (XEXP (XEXP (addr, 0), 0), 0),
5503 INTVAL (XEXP (XEXP (addr, 0), 1)));
5504 return;
5505
5506 #endif
5507 default:
5508 gcc_unreachable ();
5509 }
5510
5511 fprintf (file, HOST_WIDE_INT_PRINT_DEC "($%d)", offset, basereg);
5512 }
5513 \f
5514 /* Emit RTL insns to initialize the variable parts of a trampoline at
5515 TRAMP. FNADDR is an RTX for the address of the function's pure
5516 code. CXT is an RTX for the static chain value for the function.
5517
5518 The three offset parameters are for the individual template's
5519 layout. A JMPOFS < 0 indicates that the trampoline does not
5520 contain instructions at all.
5521
5522 We assume here that a function will be called many more times than
5523 its address is taken (e.g., it might be passed to qsort), so we
5524 take the trouble to initialize the "hint" field in the JMP insn.
5525 Note that the hint field is PC (new) + 4 * bits 13:0. */
5526
5527 void
5528 alpha_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt,
5529 int fnofs, int cxtofs, int jmpofs)
5530 {
5531 rtx temp, temp1, addr;
5532 /* VMS really uses DImode pointers in memory at this point. */
5533 enum machine_mode mode = TARGET_ABI_OPEN_VMS ? Pmode : ptr_mode;
5534
5535 #ifdef POINTERS_EXTEND_UNSIGNED
5536 fnaddr = convert_memory_address (mode, fnaddr);
5537 cxt = convert_memory_address (mode, cxt);
5538 #endif
5539
5540 /* Store function address and CXT. */
5541 addr = memory_address (mode, plus_constant (tramp, fnofs));
5542 emit_move_insn (gen_rtx_MEM (mode, addr), fnaddr);
5543 addr = memory_address (mode, plus_constant (tramp, cxtofs));
5544 emit_move_insn (gen_rtx_MEM (mode, addr), cxt);
5545
5546 /* This has been disabled since the hint only has a 32k range, and in
5547 no existing OS is the stack within 32k of the text segment. */
5548 if (0 && jmpofs >= 0)
5549 {
5550 /* Compute hint value. */
5551 temp = force_operand (plus_constant (tramp, jmpofs+4), NULL_RTX);
5552 temp = expand_binop (DImode, sub_optab, fnaddr, temp, temp, 1,
5553 OPTAB_WIDEN);
5554 temp = expand_shift (RSHIFT_EXPR, Pmode, temp,
5555 build_int_cst (NULL_TREE, 2), NULL_RTX, 1);
5556 temp = expand_and (SImode, gen_lowpart (SImode, temp),
5557 GEN_INT (0x3fff), 0);
5558
5559 /* Merge in the hint. */
5560 addr = memory_address (SImode, plus_constant (tramp, jmpofs));
5561 temp1 = force_reg (SImode, gen_rtx_MEM (SImode, addr));
5562 temp1 = expand_and (SImode, temp1, GEN_INT (0xffffc000), NULL_RTX);
5563 temp1 = expand_binop (SImode, ior_optab, temp1, temp, temp1, 1,
5564 OPTAB_WIDEN);
5565 emit_move_insn (gen_rtx_MEM (SImode, addr), temp1);
5566 }
5567
5568 #ifdef ENABLE_EXECUTE_STACK
5569 emit_library_call (init_one_libfunc ("__enable_execute_stack"),
5570 0, VOIDmode, 1, tramp, Pmode);
5571 #endif
5572
5573 if (jmpofs >= 0)
5574 emit_insn (gen_imb ());
5575 }
5576 \f
5577 /* Determine where to put an argument to a function.
5578 Value is zero to push the argument on the stack,
5579 or a hard register in which to store the argument.
5580
5581 MODE is the argument's machine mode.
5582 TYPE is the data type of the argument (as a tree).
5583 This is null for libcalls where that information may
5584 not be available.
5585 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5586 the preceding args and about the function being called.
5587 NAMED is nonzero if this argument is a named parameter
5588 (otherwise it is an extra parameter matching an ellipsis).
5589
5590 On Alpha the first 6 words of args are normally in registers
5591 and the rest are pushed. */
5592
5593 rtx
5594 function_arg (CUMULATIVE_ARGS cum, enum machine_mode mode, tree type,
5595 int named ATTRIBUTE_UNUSED)
5596 {
5597 int basereg;
5598 int num_args;
5599
5600 /* Don't get confused and pass small structures in FP registers. */
5601 if (type && AGGREGATE_TYPE_P (type))
5602 basereg = 16;
5603 else
5604 {
5605 #ifdef ENABLE_CHECKING
5606 /* With alpha_split_complex_arg, we shouldn't see any raw complex
5607 values here. */
5608 gcc_assert (!COMPLEX_MODE_P (mode));
5609 #endif
5610
5611 /* Set up defaults for FP operands passed in FP registers, and
5612 integral operands passed in integer registers. */
5613 if (TARGET_FPREGS && GET_MODE_CLASS (mode) == MODE_FLOAT)
5614 basereg = 32 + 16;
5615 else
5616 basereg = 16;
5617 }
5618
5619 /* ??? Irritatingly, the definition of CUMULATIVE_ARGS is different for
5620 the three platforms, so we can't avoid conditional compilation. */
5621 #if TARGET_ABI_OPEN_VMS
5622 {
5623 if (mode == VOIDmode)
5624 return alpha_arg_info_reg_val (cum);
5625
5626 num_args = cum.num_args;
5627 if (num_args >= 6
5628 || targetm.calls.must_pass_in_stack (mode, type))
5629 return NULL_RTX;
5630 }
5631 #elif TARGET_ABI_UNICOSMK
5632 {
5633 int size;
5634
5635 /* If this is the last argument, generate the call info word (CIW). */
5636 /* ??? We don't include the caller's line number in the CIW because
5637 I don't know how to determine it if debug infos are turned off. */
5638 if (mode == VOIDmode)
5639 {
5640 int i;
5641 HOST_WIDE_INT lo;
5642 HOST_WIDE_INT hi;
5643 rtx ciw;
5644
5645 lo = 0;
5646
5647 for (i = 0; i < cum.num_reg_words && i < 5; i++)
5648 if (cum.reg_args_type[i])
5649 lo |= (1 << (7 - i));
5650
5651 if (cum.num_reg_words == 6 && cum.reg_args_type[5])
5652 lo |= 7;
5653 else
5654 lo |= cum.num_reg_words;
5655
5656 #if HOST_BITS_PER_WIDE_INT == 32
5657 hi = (cum.num_args << 20) | cum.num_arg_words;
5658 #else
5659 lo = lo | ((HOST_WIDE_INT) cum.num_args << 52)
5660 | ((HOST_WIDE_INT) cum.num_arg_words << 32);
5661 hi = 0;
5662 #endif
5663 ciw = immed_double_const (lo, hi, DImode);
5664
5665 return gen_rtx_UNSPEC (DImode, gen_rtvec (1, ciw),
5666 UNSPEC_UMK_LOAD_CIW);
5667 }
5668
5669 size = ALPHA_ARG_SIZE (mode, type, named);
5670 num_args = cum.num_reg_words;
5671 if (cum.force_stack
5672 || cum.num_reg_words + size > 6
5673 || targetm.calls.must_pass_in_stack (mode, type))
5674 return NULL_RTX;
5675 else if (type && TYPE_MODE (type) == BLKmode)
5676 {
5677 rtx reg1, reg2;
5678
5679 reg1 = gen_rtx_REG (DImode, num_args + 16);
5680 reg1 = gen_rtx_EXPR_LIST (DImode, reg1, const0_rtx);
5681
5682 /* The argument fits in two registers. Note that we still need to
5683 reserve a register for empty structures. */
5684 if (size == 0)
5685 return NULL_RTX;
5686 else if (size == 1)
5687 return gen_rtx_PARALLEL (mode, gen_rtvec (1, reg1));
5688 else
5689 {
5690 reg2 = gen_rtx_REG (DImode, num_args + 17);
5691 reg2 = gen_rtx_EXPR_LIST (DImode, reg2, GEN_INT (8));
5692 return gen_rtx_PARALLEL (mode, gen_rtvec (2, reg1, reg2));
5693 }
5694 }
5695 }
5696 #elif TARGET_ABI_OSF
5697 {
5698 if (cum >= 6)
5699 return NULL_RTX;
5700 num_args = cum;
5701
5702 /* VOID is passed as a special flag for "last argument". */
5703 if (type == void_type_node)
5704 basereg = 16;
5705 else if (targetm.calls.must_pass_in_stack (mode, type))
5706 return NULL_RTX;
5707 }
5708 #else
5709 #error Unhandled ABI
5710 #endif
5711
5712 return gen_rtx_REG (mode, num_args + basereg);
5713 }
5714
5715 static int
5716 alpha_arg_partial_bytes (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5717 enum machine_mode mode ATTRIBUTE_UNUSED,
5718 tree type ATTRIBUTE_UNUSED,
5719 bool named ATTRIBUTE_UNUSED)
5720 {
5721 int words = 0;
5722
5723 #if TARGET_ABI_OPEN_VMS
5724 if (cum->num_args < 6
5725 && 6 < cum->num_args + ALPHA_ARG_SIZE (mode, type, named))
5726 words = 6 - cum->num_args;
5727 #elif TARGET_ABI_UNICOSMK
5728 /* Never any split arguments. */
5729 #elif TARGET_ABI_OSF
5730 if (*cum < 6 && 6 < *cum + ALPHA_ARG_SIZE (mode, type, named))
5731 words = 6 - *cum;
5732 #else
5733 #error Unhandled ABI
5734 #endif
5735
5736 return words * UNITS_PER_WORD;
5737 }
5738
5739
5740 /* Return true if TYPE must be returned in memory, instead of in registers. */
5741
5742 static bool
5743 alpha_return_in_memory (tree type, tree fndecl ATTRIBUTE_UNUSED)
5744 {
5745 enum machine_mode mode = VOIDmode;
5746 int size;
5747
5748 if (type)
5749 {
5750 mode = TYPE_MODE (type);
5751
5752 /* All aggregates are returned in memory. */
5753 if (AGGREGATE_TYPE_P (type))
5754 return true;
5755 }
5756
5757 size = GET_MODE_SIZE (mode);
5758 switch (GET_MODE_CLASS (mode))
5759 {
5760 case MODE_VECTOR_FLOAT:
5761 /* Pass all float vectors in memory, like an aggregate. */
5762 return true;
5763
5764 case MODE_COMPLEX_FLOAT:
5765 /* We judge complex floats on the size of their element,
5766 not the size of the whole type. */
5767 size = GET_MODE_UNIT_SIZE (mode);
5768 break;
5769
5770 case MODE_INT:
5771 case MODE_FLOAT:
5772 case MODE_COMPLEX_INT:
5773 case MODE_VECTOR_INT:
5774 break;
5775
5776 default:
5777 /* ??? We get called on all sorts of random stuff from
5778 aggregate_value_p. We must return something, but it's not
5779 clear what's safe to return. Pretend it's a struct I
5780 guess. */
5781 return true;
5782 }
5783
5784 /* Otherwise types must fit in one register. */
5785 return size > UNITS_PER_WORD;
5786 }
5787
5788 /* Return true if TYPE should be passed by invisible reference. */
5789
5790 static bool
5791 alpha_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
5792 enum machine_mode mode,
5793 tree type ATTRIBUTE_UNUSED,
5794 bool named ATTRIBUTE_UNUSED)
5795 {
5796 return mode == TFmode || mode == TCmode;
5797 }
5798
5799 /* Define how to find the value returned by a function. VALTYPE is the
5800 data type of the value (as a tree). If the precise function being
5801 called is known, FUNC is its FUNCTION_DECL; otherwise, FUNC is 0.
5802 MODE is set instead of VALTYPE for libcalls.
5803
5804 On Alpha the value is found in $0 for integer functions and
5805 $f0 for floating-point functions. */
5806
5807 rtx
5808 function_value (tree valtype, tree func ATTRIBUTE_UNUSED,
5809 enum machine_mode mode)
5810 {
5811 unsigned int regnum, dummy;
5812 enum mode_class class;
5813
5814 gcc_assert (!valtype || !alpha_return_in_memory (valtype, func));
5815
5816 if (valtype)
5817 mode = TYPE_MODE (valtype);
5818
5819 class = GET_MODE_CLASS (mode);
5820 switch (class)
5821 {
5822 case MODE_INT:
5823 PROMOTE_MODE (mode, dummy, valtype);
5824 /* FALLTHRU */
5825
5826 case MODE_COMPLEX_INT:
5827 case MODE_VECTOR_INT:
5828 regnum = 0;
5829 break;
5830
5831 case MODE_FLOAT:
5832 regnum = 32;
5833 break;
5834
5835 case MODE_COMPLEX_FLOAT:
5836 {
5837 enum machine_mode cmode = GET_MODE_INNER (mode);
5838
5839 return gen_rtx_PARALLEL
5840 (VOIDmode,
5841 gen_rtvec (2,
5842 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 32),
5843 const0_rtx),
5844 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 33),
5845 GEN_INT (GET_MODE_SIZE (cmode)))));
5846 }
5847
5848 default:
5849 gcc_unreachable ();
5850 }
5851
5852 return gen_rtx_REG (mode, regnum);
5853 }
5854
5855 /* TCmode complex values are passed by invisible reference. We
5856 should not split these values. */
5857
5858 static bool
5859 alpha_split_complex_arg (tree type)
5860 {
5861 return TYPE_MODE (type) != TCmode;
5862 }
5863
5864 static tree
5865 alpha_build_builtin_va_list (void)
5866 {
5867 tree base, ofs, space, record, type_decl;
5868
5869 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
5870 return ptr_type_node;
5871
5872 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
5873 type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
5874 TREE_CHAIN (record) = type_decl;
5875 TYPE_NAME (record) = type_decl;
5876
5877 /* C++? SET_IS_AGGR_TYPE (record, 1); */
5878
5879 /* Dummy field to prevent alignment warnings. */
5880 space = build_decl (FIELD_DECL, NULL_TREE, integer_type_node);
5881 DECL_FIELD_CONTEXT (space) = record;
5882 DECL_ARTIFICIAL (space) = 1;
5883 DECL_IGNORED_P (space) = 1;
5884
5885 ofs = build_decl (FIELD_DECL, get_identifier ("__offset"),
5886 integer_type_node);
5887 DECL_FIELD_CONTEXT (ofs) = record;
5888 TREE_CHAIN (ofs) = space;
5889
5890 base = build_decl (FIELD_DECL, get_identifier ("__base"),
5891 ptr_type_node);
5892 DECL_FIELD_CONTEXT (base) = record;
5893 TREE_CHAIN (base) = ofs;
5894
5895 TYPE_FIELDS (record) = base;
5896 layout_type (record);
5897
5898 va_list_gpr_counter_field = ofs;
5899 return record;
5900 }
5901
5902 #if TARGET_ABI_OSF
5903 /* Helper function for alpha_stdarg_optimize_hook. Skip over casts
5904 and constant additions. */
5905
5906 static tree
5907 va_list_skip_additions (tree lhs)
5908 {
5909 tree rhs, stmt;
5910
5911 if (TREE_CODE (lhs) != SSA_NAME)
5912 return lhs;
5913
5914 for (;;)
5915 {
5916 stmt = SSA_NAME_DEF_STMT (lhs);
5917
5918 if (TREE_CODE (stmt) == PHI_NODE)
5919 return stmt;
5920
5921 if (TREE_CODE (stmt) != MODIFY_EXPR
5922 || TREE_OPERAND (stmt, 0) != lhs)
5923 return lhs;
5924
5925 rhs = TREE_OPERAND (stmt, 1);
5926 if (TREE_CODE (rhs) == WITH_SIZE_EXPR)
5927 rhs = TREE_OPERAND (rhs, 0);
5928
5929 if ((TREE_CODE (rhs) != NOP_EXPR
5930 && TREE_CODE (rhs) != CONVERT_EXPR
5931 && (TREE_CODE (rhs) != PLUS_EXPR
5932 || TREE_CODE (TREE_OPERAND (rhs, 1)) != INTEGER_CST
5933 || !host_integerp (TREE_OPERAND (rhs, 1), 1)))
5934 || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
5935 return rhs;
5936
5937 lhs = TREE_OPERAND (rhs, 0);
5938 }
5939 }
5940
5941 /* Check if LHS = RHS statement is
5942 LHS = *(ap.__base + ap.__offset + cst)
5943 or
5944 LHS = *(ap.__base
5945 + ((ap.__offset + cst <= 47)
5946 ? ap.__offset + cst - 48 : ap.__offset + cst) + cst2).
5947 If the former, indicate that GPR registers are needed,
5948 if the latter, indicate that FPR registers are needed.
5949 On alpha, cfun->va_list_gpr_size is used as size of the needed
5950 regs and cfun->va_list_fpr_size is a bitmask, bit 0 set if
5951 GPR registers are needed and bit 1 set if FPR registers are needed.
5952 Return true if va_list references should not be scanned for the current
5953 statement. */
5954
5955 static bool
5956 alpha_stdarg_optimize_hook (struct stdarg_info *si, tree lhs, tree rhs)
5957 {
5958 tree base, offset, arg1, arg2;
5959 int offset_arg = 1;
5960
5961 if (TREE_CODE (rhs) != INDIRECT_REF
5962 || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
5963 return false;
5964
5965 lhs = va_list_skip_additions (TREE_OPERAND (rhs, 0));
5966 if (lhs == NULL_TREE
5967 || TREE_CODE (lhs) != PLUS_EXPR)
5968 return false;
5969
5970 base = TREE_OPERAND (lhs, 0);
5971 if (TREE_CODE (base) == SSA_NAME)
5972 base = va_list_skip_additions (base);
5973
5974 if (TREE_CODE (base) != COMPONENT_REF
5975 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5976 {
5977 base = TREE_OPERAND (lhs, 0);
5978 if (TREE_CODE (base) == SSA_NAME)
5979 base = va_list_skip_additions (base);
5980
5981 if (TREE_CODE (base) != COMPONENT_REF
5982 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5983 return false;
5984
5985 offset_arg = 0;
5986 }
5987
5988 base = get_base_address (base);
5989 if (TREE_CODE (base) != VAR_DECL
5990 || !bitmap_bit_p (si->va_list_vars, DECL_UID (base)))
5991 return false;
5992
5993 offset = TREE_OPERAND (lhs, offset_arg);
5994 if (TREE_CODE (offset) == SSA_NAME)
5995 offset = va_list_skip_additions (offset);
5996
5997 if (TREE_CODE (offset) == PHI_NODE)
5998 {
5999 HOST_WIDE_INT sub;
6000
6001 if (PHI_NUM_ARGS (offset) != 2)
6002 goto escapes;
6003
6004 arg1 = va_list_skip_additions (PHI_ARG_DEF (offset, 0));
6005 arg2 = va_list_skip_additions (PHI_ARG_DEF (offset, 1));
6006 if (TREE_CODE (arg2) != MINUS_EXPR && TREE_CODE (arg2) != PLUS_EXPR)
6007 {
6008 tree tem = arg1;
6009 arg1 = arg2;
6010 arg2 = tem;
6011
6012 if (TREE_CODE (arg2) != MINUS_EXPR && TREE_CODE (arg2) != PLUS_EXPR)
6013 goto escapes;
6014 }
6015 if (!host_integerp (TREE_OPERAND (arg2, 1), 0))
6016 goto escapes;
6017
6018 sub = tree_low_cst (TREE_OPERAND (arg2, 1), 0);
6019 if (TREE_CODE (arg2) == MINUS_EXPR)
6020 sub = -sub;
6021 if (sub < -48 || sub > -32)
6022 goto escapes;
6023
6024 arg2 = va_list_skip_additions (TREE_OPERAND (arg2, 0));
6025 if (arg1 != arg2)
6026 goto escapes;
6027
6028 if (TREE_CODE (arg1) == SSA_NAME)
6029 arg1 = va_list_skip_additions (arg1);
6030
6031 if (TREE_CODE (arg1) != COMPONENT_REF
6032 || TREE_OPERAND (arg1, 1) != va_list_gpr_counter_field
6033 || get_base_address (arg1) != base)
6034 goto escapes;
6035
6036 /* Need floating point regs. */
6037 cfun->va_list_fpr_size |= 2;
6038 }
6039 else if (TREE_CODE (offset) != COMPONENT_REF
6040 || TREE_OPERAND (offset, 1) != va_list_gpr_counter_field
6041 || get_base_address (offset) != base)
6042 goto escapes;
6043 else
6044 /* Need general regs. */
6045 cfun->va_list_fpr_size |= 1;
6046 return false;
6047
6048 escapes:
6049 si->va_list_escapes = true;
6050 return false;
6051 }
6052 #endif
6053
6054 /* Perform any needed actions needed for a function that is receiving a
6055 variable number of arguments. */
6056
6057 static void
6058 alpha_setup_incoming_varargs (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
6059 tree type, int *pretend_size, int no_rtl)
6060 {
6061 CUMULATIVE_ARGS cum = *pcum;
6062
6063 /* Skip the current argument. */
6064 FUNCTION_ARG_ADVANCE (cum, mode, type, 1);
6065
6066 #if TARGET_ABI_UNICOSMK
6067 /* On Unicos/Mk, the standard subroutine __T3E_MISMATCH stores all register
6068 arguments on the stack. Unfortunately, it doesn't always store the first
6069 one (i.e. the one that arrives in $16 or $f16). This is not a problem
6070 with stdargs as we always have at least one named argument there. */
6071 if (cum.num_reg_words < 6)
6072 {
6073 if (!no_rtl)
6074 {
6075 emit_insn (gen_umk_mismatch_args (GEN_INT (cum.num_reg_words)));
6076 emit_insn (gen_arg_home_umk ());
6077 }
6078 *pretend_size = 0;
6079 }
6080 #elif TARGET_ABI_OPEN_VMS
6081 /* For VMS, we allocate space for all 6 arg registers plus a count.
6082
6083 However, if NO registers need to be saved, don't allocate any space.
6084 This is not only because we won't need the space, but because AP
6085 includes the current_pretend_args_size and we don't want to mess up
6086 any ap-relative addresses already made. */
6087 if (cum.num_args < 6)
6088 {
6089 if (!no_rtl)
6090 {
6091 emit_move_insn (gen_rtx_REG (DImode, 1), virtual_incoming_args_rtx);
6092 emit_insn (gen_arg_home ());
6093 }
6094 *pretend_size = 7 * UNITS_PER_WORD;
6095 }
6096 #else
6097 /* On OSF/1 and friends, we allocate space for all 12 arg registers, but
6098 only push those that are remaining. However, if NO registers need to
6099 be saved, don't allocate any space. This is not only because we won't
6100 need the space, but because AP includes the current_pretend_args_size
6101 and we don't want to mess up any ap-relative addresses already made.
6102
6103 If we are not to use the floating-point registers, save the integer
6104 registers where we would put the floating-point registers. This is
6105 not the most efficient way to implement varargs with just one register
6106 class, but it isn't worth doing anything more efficient in this rare
6107 case. */
6108 if (cum >= 6)
6109 return;
6110
6111 if (!no_rtl)
6112 {
6113 int count, set = get_varargs_alias_set ();
6114 rtx tmp;
6115
6116 count = cfun->va_list_gpr_size / UNITS_PER_WORD;
6117 if (count > 6 - cum)
6118 count = 6 - cum;
6119
6120 /* Detect whether integer registers or floating-point registers
6121 are needed by the detected va_arg statements. See above for
6122 how these values are computed. Note that the "escape" value
6123 is VA_LIST_MAX_FPR_SIZE, which is 255, which has both of
6124 these bits set. */
6125 gcc_assert ((VA_LIST_MAX_FPR_SIZE & 3) == 3);
6126
6127 if (cfun->va_list_fpr_size & 1)
6128 {
6129 tmp = gen_rtx_MEM (BLKmode,
6130 plus_constant (virtual_incoming_args_rtx,
6131 (cum + 6) * UNITS_PER_WORD));
6132 MEM_NOTRAP_P (tmp) = 1;
6133 set_mem_alias_set (tmp, set);
6134 move_block_from_reg (16 + cum, tmp, count);
6135 }
6136
6137 if (cfun->va_list_fpr_size & 2)
6138 {
6139 tmp = gen_rtx_MEM (BLKmode,
6140 plus_constant (virtual_incoming_args_rtx,
6141 cum * UNITS_PER_WORD));
6142 MEM_NOTRAP_P (tmp) = 1;
6143 set_mem_alias_set (tmp, set);
6144 move_block_from_reg (16 + cum + TARGET_FPREGS*32, tmp, count);
6145 }
6146 }
6147 *pretend_size = 12 * UNITS_PER_WORD;
6148 #endif
6149 }
6150
6151 void
6152 alpha_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
6153 {
6154 HOST_WIDE_INT offset;
6155 tree t, offset_field, base_field;
6156
6157 if (TREE_CODE (TREE_TYPE (valist)) == ERROR_MARK)
6158 return;
6159
6160 if (TARGET_ABI_UNICOSMK)
6161 std_expand_builtin_va_start (valist, nextarg);
6162
6163 /* For Unix, TARGET_SETUP_INCOMING_VARARGS moves the starting address base
6164 up by 48, storing fp arg registers in the first 48 bytes, and the
6165 integer arg registers in the next 48 bytes. This is only done,
6166 however, if any integer registers need to be stored.
6167
6168 If no integer registers need be stored, then we must subtract 48
6169 in order to account for the integer arg registers which are counted
6170 in argsize above, but which are not actually stored on the stack.
6171 Must further be careful here about structures straddling the last
6172 integer argument register; that futzes with pretend_args_size,
6173 which changes the meaning of AP. */
6174
6175 if (NUM_ARGS < 6)
6176 offset = TARGET_ABI_OPEN_VMS ? UNITS_PER_WORD : 6 * UNITS_PER_WORD;
6177 else
6178 offset = -6 * UNITS_PER_WORD + current_function_pretend_args_size;
6179
6180 if (TARGET_ABI_OPEN_VMS)
6181 {
6182 nextarg = plus_constant (nextarg, offset);
6183 nextarg = plus_constant (nextarg, NUM_ARGS * UNITS_PER_WORD);
6184 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist,
6185 make_tree (ptr_type_node, nextarg));
6186 TREE_SIDE_EFFECTS (t) = 1;
6187
6188 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6189 }
6190 else
6191 {
6192 base_field = TYPE_FIELDS (TREE_TYPE (valist));
6193 offset_field = TREE_CHAIN (base_field);
6194
6195 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6196 valist, base_field, NULL_TREE);
6197 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6198 valist, offset_field, NULL_TREE);
6199
6200 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
6201 t = build2 (PLUS_EXPR, ptr_type_node, t,
6202 build_int_cst (NULL_TREE, offset));
6203 t = build2 (MODIFY_EXPR, TREE_TYPE (base_field), base_field, t);
6204 TREE_SIDE_EFFECTS (t) = 1;
6205 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6206
6207 t = build_int_cst (NULL_TREE, NUM_ARGS * UNITS_PER_WORD);
6208 t = build2 (MODIFY_EXPR, TREE_TYPE (offset_field), offset_field, t);
6209 TREE_SIDE_EFFECTS (t) = 1;
6210 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6211 }
6212 }
6213
6214 static tree
6215 alpha_gimplify_va_arg_1 (tree type, tree base, tree offset, tree *pre_p)
6216 {
6217 tree type_size, ptr_type, addend, t, addr, internal_post;
6218
6219 /* If the type could not be passed in registers, skip the block
6220 reserved for the registers. */
6221 if (targetm.calls.must_pass_in_stack (TYPE_MODE (type), type))
6222 {
6223 t = build_int_cst (TREE_TYPE (offset), 6*8);
6224 t = build2 (MODIFY_EXPR, TREE_TYPE (offset), offset,
6225 build2 (MAX_EXPR, TREE_TYPE (offset), offset, t));
6226 gimplify_and_add (t, pre_p);
6227 }
6228
6229 addend = offset;
6230 ptr_type = build_pointer_type (type);
6231
6232 if (TREE_CODE (type) == COMPLEX_TYPE)
6233 {
6234 tree real_part, imag_part, real_temp;
6235
6236 real_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6237 offset, pre_p);
6238
6239 /* Copy the value into a new temporary, lest the formal temporary
6240 be reused out from under us. */
6241 real_temp = get_initialized_tmp_var (real_part, pre_p, NULL);
6242
6243 imag_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6244 offset, pre_p);
6245
6246 return build2 (COMPLEX_EXPR, type, real_temp, imag_part);
6247 }
6248 else if (TREE_CODE (type) == REAL_TYPE)
6249 {
6250 tree fpaddend, cond, fourtyeight;
6251
6252 fourtyeight = build_int_cst (TREE_TYPE (addend), 6*8);
6253 fpaddend = fold_build2 (MINUS_EXPR, TREE_TYPE (addend),
6254 addend, fourtyeight);
6255 cond = fold_build2 (LT_EXPR, boolean_type_node, addend, fourtyeight);
6256 addend = fold_build3 (COND_EXPR, TREE_TYPE (addend), cond,
6257 fpaddend, addend);
6258 }
6259
6260 /* Build the final address and force that value into a temporary. */
6261 addr = build2 (PLUS_EXPR, ptr_type, fold_convert (ptr_type, base),
6262 fold_convert (ptr_type, addend));
6263 internal_post = NULL;
6264 gimplify_expr (&addr, pre_p, &internal_post, is_gimple_val, fb_rvalue);
6265 append_to_statement_list (internal_post, pre_p);
6266
6267 /* Update the offset field. */
6268 type_size = TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type));
6269 if (type_size == NULL || TREE_OVERFLOW (type_size))
6270 t = size_zero_node;
6271 else
6272 {
6273 t = size_binop (PLUS_EXPR, type_size, size_int (7));
6274 t = size_binop (TRUNC_DIV_EXPR, t, size_int (8));
6275 t = size_binop (MULT_EXPR, t, size_int (8));
6276 }
6277 t = fold_convert (TREE_TYPE (offset), t);
6278 t = build2 (MODIFY_EXPR, void_type_node, offset,
6279 build2 (PLUS_EXPR, TREE_TYPE (offset), offset, t));
6280 gimplify_and_add (t, pre_p);
6281
6282 return build_va_arg_indirect_ref (addr);
6283 }
6284
6285 static tree
6286 alpha_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
6287 {
6288 tree offset_field, base_field, offset, base, t, r;
6289 bool indirect;
6290
6291 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
6292 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6293
6294 base_field = TYPE_FIELDS (va_list_type_node);
6295 offset_field = TREE_CHAIN (base_field);
6296 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6297 valist, base_field, NULL_TREE);
6298 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6299 valist, offset_field, NULL_TREE);
6300
6301 /* Pull the fields of the structure out into temporaries. Since we never
6302 modify the base field, we can use a formal temporary. Sign-extend the
6303 offset field so that it's the proper width for pointer arithmetic. */
6304 base = get_formal_tmp_var (base_field, pre_p);
6305
6306 t = fold_convert (lang_hooks.types.type_for_size (64, 0), offset_field);
6307 offset = get_initialized_tmp_var (t, pre_p, NULL);
6308
6309 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, false);
6310 if (indirect)
6311 type = build_pointer_type (type);
6312
6313 /* Find the value. Note that this will be a stable indirection, or
6314 a composite of stable indirections in the case of complex. */
6315 r = alpha_gimplify_va_arg_1 (type, base, offset, pre_p);
6316
6317 /* Stuff the offset temporary back into its field. */
6318 t = build2 (MODIFY_EXPR, void_type_node, offset_field,
6319 fold_convert (TREE_TYPE (offset_field), offset));
6320 gimplify_and_add (t, pre_p);
6321
6322 if (indirect)
6323 r = build_va_arg_indirect_ref (r);
6324
6325 return r;
6326 }
6327 \f
6328 /* Builtins. */
6329
6330 enum alpha_builtin
6331 {
6332 ALPHA_BUILTIN_CMPBGE,
6333 ALPHA_BUILTIN_EXTBL,
6334 ALPHA_BUILTIN_EXTWL,
6335 ALPHA_BUILTIN_EXTLL,
6336 ALPHA_BUILTIN_EXTQL,
6337 ALPHA_BUILTIN_EXTWH,
6338 ALPHA_BUILTIN_EXTLH,
6339 ALPHA_BUILTIN_EXTQH,
6340 ALPHA_BUILTIN_INSBL,
6341 ALPHA_BUILTIN_INSWL,
6342 ALPHA_BUILTIN_INSLL,
6343 ALPHA_BUILTIN_INSQL,
6344 ALPHA_BUILTIN_INSWH,
6345 ALPHA_BUILTIN_INSLH,
6346 ALPHA_BUILTIN_INSQH,
6347 ALPHA_BUILTIN_MSKBL,
6348 ALPHA_BUILTIN_MSKWL,
6349 ALPHA_BUILTIN_MSKLL,
6350 ALPHA_BUILTIN_MSKQL,
6351 ALPHA_BUILTIN_MSKWH,
6352 ALPHA_BUILTIN_MSKLH,
6353 ALPHA_BUILTIN_MSKQH,
6354 ALPHA_BUILTIN_UMULH,
6355 ALPHA_BUILTIN_ZAP,
6356 ALPHA_BUILTIN_ZAPNOT,
6357 ALPHA_BUILTIN_AMASK,
6358 ALPHA_BUILTIN_IMPLVER,
6359 ALPHA_BUILTIN_RPCC,
6360 ALPHA_BUILTIN_THREAD_POINTER,
6361 ALPHA_BUILTIN_SET_THREAD_POINTER,
6362
6363 /* TARGET_MAX */
6364 ALPHA_BUILTIN_MINUB8,
6365 ALPHA_BUILTIN_MINSB8,
6366 ALPHA_BUILTIN_MINUW4,
6367 ALPHA_BUILTIN_MINSW4,
6368 ALPHA_BUILTIN_MAXUB8,
6369 ALPHA_BUILTIN_MAXSB8,
6370 ALPHA_BUILTIN_MAXUW4,
6371 ALPHA_BUILTIN_MAXSW4,
6372 ALPHA_BUILTIN_PERR,
6373 ALPHA_BUILTIN_PKLB,
6374 ALPHA_BUILTIN_PKWB,
6375 ALPHA_BUILTIN_UNPKBL,
6376 ALPHA_BUILTIN_UNPKBW,
6377
6378 /* TARGET_CIX */
6379 ALPHA_BUILTIN_CTTZ,
6380 ALPHA_BUILTIN_CTLZ,
6381 ALPHA_BUILTIN_CTPOP,
6382
6383 ALPHA_BUILTIN_max
6384 };
6385
6386 static unsigned int const code_for_builtin[ALPHA_BUILTIN_max] = {
6387 CODE_FOR_builtin_cmpbge,
6388 CODE_FOR_builtin_extbl,
6389 CODE_FOR_builtin_extwl,
6390 CODE_FOR_builtin_extll,
6391 CODE_FOR_builtin_extql,
6392 CODE_FOR_builtin_extwh,
6393 CODE_FOR_builtin_extlh,
6394 CODE_FOR_builtin_extqh,
6395 CODE_FOR_builtin_insbl,
6396 CODE_FOR_builtin_inswl,
6397 CODE_FOR_builtin_insll,
6398 CODE_FOR_builtin_insql,
6399 CODE_FOR_builtin_inswh,
6400 CODE_FOR_builtin_inslh,
6401 CODE_FOR_builtin_insqh,
6402 CODE_FOR_builtin_mskbl,
6403 CODE_FOR_builtin_mskwl,
6404 CODE_FOR_builtin_mskll,
6405 CODE_FOR_builtin_mskql,
6406 CODE_FOR_builtin_mskwh,
6407 CODE_FOR_builtin_msklh,
6408 CODE_FOR_builtin_mskqh,
6409 CODE_FOR_umuldi3_highpart,
6410 CODE_FOR_builtin_zap,
6411 CODE_FOR_builtin_zapnot,
6412 CODE_FOR_builtin_amask,
6413 CODE_FOR_builtin_implver,
6414 CODE_FOR_builtin_rpcc,
6415 CODE_FOR_load_tp,
6416 CODE_FOR_set_tp,
6417
6418 /* TARGET_MAX */
6419 CODE_FOR_builtin_minub8,
6420 CODE_FOR_builtin_minsb8,
6421 CODE_FOR_builtin_minuw4,
6422 CODE_FOR_builtin_minsw4,
6423 CODE_FOR_builtin_maxub8,
6424 CODE_FOR_builtin_maxsb8,
6425 CODE_FOR_builtin_maxuw4,
6426 CODE_FOR_builtin_maxsw4,
6427 CODE_FOR_builtin_perr,
6428 CODE_FOR_builtin_pklb,
6429 CODE_FOR_builtin_pkwb,
6430 CODE_FOR_builtin_unpkbl,
6431 CODE_FOR_builtin_unpkbw,
6432
6433 /* TARGET_CIX */
6434 CODE_FOR_ctzdi2,
6435 CODE_FOR_clzdi2,
6436 CODE_FOR_popcountdi2
6437 };
6438
6439 struct alpha_builtin_def
6440 {
6441 const char *name;
6442 enum alpha_builtin code;
6443 unsigned int target_mask;
6444 bool is_const;
6445 };
6446
6447 static struct alpha_builtin_def const zero_arg_builtins[] = {
6448 { "__builtin_alpha_implver", ALPHA_BUILTIN_IMPLVER, 0, true },
6449 { "__builtin_alpha_rpcc", ALPHA_BUILTIN_RPCC, 0, false }
6450 };
6451
6452 static struct alpha_builtin_def const one_arg_builtins[] = {
6453 { "__builtin_alpha_amask", ALPHA_BUILTIN_AMASK, 0, true },
6454 { "__builtin_alpha_pklb", ALPHA_BUILTIN_PKLB, MASK_MAX, true },
6455 { "__builtin_alpha_pkwb", ALPHA_BUILTIN_PKWB, MASK_MAX, true },
6456 { "__builtin_alpha_unpkbl", ALPHA_BUILTIN_UNPKBL, MASK_MAX, true },
6457 { "__builtin_alpha_unpkbw", ALPHA_BUILTIN_UNPKBW, MASK_MAX, true },
6458 { "__builtin_alpha_cttz", ALPHA_BUILTIN_CTTZ, MASK_CIX, true },
6459 { "__builtin_alpha_ctlz", ALPHA_BUILTIN_CTLZ, MASK_CIX, true },
6460 { "__builtin_alpha_ctpop", ALPHA_BUILTIN_CTPOP, MASK_CIX, true }
6461 };
6462
6463 static struct alpha_builtin_def const two_arg_builtins[] = {
6464 { "__builtin_alpha_cmpbge", ALPHA_BUILTIN_CMPBGE, 0, true },
6465 { "__builtin_alpha_extbl", ALPHA_BUILTIN_EXTBL, 0, true },
6466 { "__builtin_alpha_extwl", ALPHA_BUILTIN_EXTWL, 0, true },
6467 { "__builtin_alpha_extll", ALPHA_BUILTIN_EXTLL, 0, true },
6468 { "__builtin_alpha_extql", ALPHA_BUILTIN_EXTQL, 0, true },
6469 { "__builtin_alpha_extwh", ALPHA_BUILTIN_EXTWH, 0, true },
6470 { "__builtin_alpha_extlh", ALPHA_BUILTIN_EXTLH, 0, true },
6471 { "__builtin_alpha_extqh", ALPHA_BUILTIN_EXTQH, 0, true },
6472 { "__builtin_alpha_insbl", ALPHA_BUILTIN_INSBL, 0, true },
6473 { "__builtin_alpha_inswl", ALPHA_BUILTIN_INSWL, 0, true },
6474 { "__builtin_alpha_insll", ALPHA_BUILTIN_INSLL, 0, true },
6475 { "__builtin_alpha_insql", ALPHA_BUILTIN_INSQL, 0, true },
6476 { "__builtin_alpha_inswh", ALPHA_BUILTIN_INSWH, 0, true },
6477 { "__builtin_alpha_inslh", ALPHA_BUILTIN_INSLH, 0, true },
6478 { "__builtin_alpha_insqh", ALPHA_BUILTIN_INSQH, 0, true },
6479 { "__builtin_alpha_mskbl", ALPHA_BUILTIN_MSKBL, 0, true },
6480 { "__builtin_alpha_mskwl", ALPHA_BUILTIN_MSKWL, 0, true },
6481 { "__builtin_alpha_mskll", ALPHA_BUILTIN_MSKLL, 0, true },
6482 { "__builtin_alpha_mskql", ALPHA_BUILTIN_MSKQL, 0, true },
6483 { "__builtin_alpha_mskwh", ALPHA_BUILTIN_MSKWH, 0, true },
6484 { "__builtin_alpha_msklh", ALPHA_BUILTIN_MSKLH, 0, true },
6485 { "__builtin_alpha_mskqh", ALPHA_BUILTIN_MSKQH, 0, true },
6486 { "__builtin_alpha_umulh", ALPHA_BUILTIN_UMULH, 0, true },
6487 { "__builtin_alpha_zap", ALPHA_BUILTIN_ZAP, 0, true },
6488 { "__builtin_alpha_zapnot", ALPHA_BUILTIN_ZAPNOT, 0, true },
6489 { "__builtin_alpha_minub8", ALPHA_BUILTIN_MINUB8, MASK_MAX, true },
6490 { "__builtin_alpha_minsb8", ALPHA_BUILTIN_MINSB8, MASK_MAX, true },
6491 { "__builtin_alpha_minuw4", ALPHA_BUILTIN_MINUW4, MASK_MAX, true },
6492 { "__builtin_alpha_minsw4", ALPHA_BUILTIN_MINSW4, MASK_MAX, true },
6493 { "__builtin_alpha_maxub8", ALPHA_BUILTIN_MAXUB8, MASK_MAX, true },
6494 { "__builtin_alpha_maxsb8", ALPHA_BUILTIN_MAXSB8, MASK_MAX, true },
6495 { "__builtin_alpha_maxuw4", ALPHA_BUILTIN_MAXUW4, MASK_MAX, true },
6496 { "__builtin_alpha_maxsw4", ALPHA_BUILTIN_MAXSW4, MASK_MAX, true },
6497 { "__builtin_alpha_perr", ALPHA_BUILTIN_PERR, MASK_MAX, true }
6498 };
6499
6500 static GTY(()) tree alpha_v8qi_u;
6501 static GTY(()) tree alpha_v8qi_s;
6502 static GTY(()) tree alpha_v4hi_u;
6503 static GTY(()) tree alpha_v4hi_s;
6504
6505 static void
6506 alpha_init_builtins (void)
6507 {
6508 const struct alpha_builtin_def *p;
6509 tree dimode_integer_type_node;
6510 tree ftype, attrs[2];
6511 size_t i;
6512
6513 dimode_integer_type_node = lang_hooks.types.type_for_mode (DImode, 0);
6514
6515 attrs[0] = tree_cons (get_identifier ("nothrow"), NULL, NULL);
6516 attrs[1] = tree_cons (get_identifier ("const"), NULL, attrs[0]);
6517
6518 ftype = build_function_type (dimode_integer_type_node, void_list_node);
6519
6520 p = zero_arg_builtins;
6521 for (i = 0; i < ARRAY_SIZE (zero_arg_builtins); ++i, ++p)
6522 if ((target_flags & p->target_mask) == p->target_mask)
6523 lang_hooks.builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
6524 NULL, attrs[p->is_const]);
6525
6526 ftype = build_function_type_list (dimode_integer_type_node,
6527 dimode_integer_type_node, NULL_TREE);
6528
6529 p = one_arg_builtins;
6530 for (i = 0; i < ARRAY_SIZE (one_arg_builtins); ++i, ++p)
6531 if ((target_flags & p->target_mask) == p->target_mask)
6532 lang_hooks.builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
6533 NULL, attrs[p->is_const]);
6534
6535 ftype = build_function_type_list (dimode_integer_type_node,
6536 dimode_integer_type_node,
6537 dimode_integer_type_node, NULL_TREE);
6538
6539 p = two_arg_builtins;
6540 for (i = 0; i < ARRAY_SIZE (two_arg_builtins); ++i, ++p)
6541 if ((target_flags & p->target_mask) == p->target_mask)
6542 lang_hooks.builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
6543 NULL, attrs[p->is_const]);
6544
6545 ftype = build_function_type (ptr_type_node, void_list_node);
6546 lang_hooks.builtin_function ("__builtin_thread_pointer", ftype,
6547 ALPHA_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
6548 NULL, attrs[0]);
6549
6550 ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
6551 lang_hooks.builtin_function ("__builtin_set_thread_pointer", ftype,
6552 ALPHA_BUILTIN_SET_THREAD_POINTER, BUILT_IN_MD,
6553 NULL, attrs[0]);
6554
6555 alpha_v8qi_u = build_vector_type (unsigned_intQI_type_node, 8);
6556 alpha_v8qi_s = build_vector_type (intQI_type_node, 8);
6557 alpha_v4hi_u = build_vector_type (unsigned_intHI_type_node, 4);
6558 alpha_v4hi_s = build_vector_type (intHI_type_node, 4);
6559 }
6560
6561 /* Expand an expression EXP that calls a built-in function,
6562 with result going to TARGET if that's convenient
6563 (and in mode MODE if that's convenient).
6564 SUBTARGET may be used as the target for computing one of EXP's operands.
6565 IGNORE is nonzero if the value is to be ignored. */
6566
6567 static rtx
6568 alpha_expand_builtin (tree exp, rtx target,
6569 rtx subtarget ATTRIBUTE_UNUSED,
6570 enum machine_mode mode ATTRIBUTE_UNUSED,
6571 int ignore ATTRIBUTE_UNUSED)
6572 {
6573 #define MAX_ARGS 2
6574
6575 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
6576 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
6577 tree arglist = TREE_OPERAND (exp, 1);
6578 enum insn_code icode;
6579 rtx op[MAX_ARGS], pat;
6580 int arity;
6581 bool nonvoid;
6582
6583 if (fcode >= ALPHA_BUILTIN_max)
6584 internal_error ("bad builtin fcode");
6585 icode = code_for_builtin[fcode];
6586 if (icode == 0)
6587 internal_error ("bad builtin fcode");
6588
6589 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
6590
6591 for (arglist = TREE_OPERAND (exp, 1), arity = 0;
6592 arglist;
6593 arglist = TREE_CHAIN (arglist), arity++)
6594 {
6595 const struct insn_operand_data *insn_op;
6596
6597 tree arg = TREE_VALUE (arglist);
6598 if (arg == error_mark_node)
6599 return NULL_RTX;
6600 if (arity > MAX_ARGS)
6601 return NULL_RTX;
6602
6603 insn_op = &insn_data[icode].operand[arity + nonvoid];
6604
6605 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, 0);
6606
6607 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
6608 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
6609 }
6610
6611 if (nonvoid)
6612 {
6613 enum machine_mode tmode = insn_data[icode].operand[0].mode;
6614 if (!target
6615 || GET_MODE (target) != tmode
6616 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
6617 target = gen_reg_rtx (tmode);
6618 }
6619
6620 switch (arity)
6621 {
6622 case 0:
6623 pat = GEN_FCN (icode) (target);
6624 break;
6625 case 1:
6626 if (nonvoid)
6627 pat = GEN_FCN (icode) (target, op[0]);
6628 else
6629 pat = GEN_FCN (icode) (op[0]);
6630 break;
6631 case 2:
6632 pat = GEN_FCN (icode) (target, op[0], op[1]);
6633 break;
6634 default:
6635 gcc_unreachable ();
6636 }
6637 if (!pat)
6638 return NULL_RTX;
6639 emit_insn (pat);
6640
6641 if (nonvoid)
6642 return target;
6643 else
6644 return const0_rtx;
6645 }
6646
6647
6648 /* Several bits below assume HWI >= 64 bits. This should be enforced
6649 by config.gcc. */
6650 #if HOST_BITS_PER_WIDE_INT < 64
6651 # error "HOST_WIDE_INT too small"
6652 #endif
6653
6654 /* Fold the builtin for the CMPBGE instruction. This is a vector comparison
6655 with an 8 bit output vector. OPINT contains the integer operands; bit N
6656 of OP_CONST is set if OPINT[N] is valid. */
6657
6658 static tree
6659 alpha_fold_builtin_cmpbge (unsigned HOST_WIDE_INT opint[], long op_const)
6660 {
6661 if (op_const == 3)
6662 {
6663 int i, val;
6664 for (i = 0, val = 0; i < 8; ++i)
6665 {
6666 unsigned HOST_WIDE_INT c0 = (opint[0] >> (i * 8)) & 0xff;
6667 unsigned HOST_WIDE_INT c1 = (opint[1] >> (i * 8)) & 0xff;
6668 if (c0 >= c1)
6669 val |= 1 << i;
6670 }
6671 return build_int_cst (long_integer_type_node, val);
6672 }
6673 else if (op_const == 2 && opint[1] == 0)
6674 return build_int_cst (long_integer_type_node, 0xff);
6675 return NULL;
6676 }
6677
6678 /* Fold the builtin for the ZAPNOT instruction. This is essentially a
6679 specialized form of an AND operation. Other byte manipulation instructions
6680 are defined in terms of this instruction, so this is also used as a
6681 subroutine for other builtins.
6682
6683 OP contains the tree operands; OPINT contains the extracted integer values.
6684 Bit N of OP_CONST it set if OPINT[N] is valid. OP may be null if only
6685 OPINT may be considered. */
6686
6687 static tree
6688 alpha_fold_builtin_zapnot (tree *op, unsigned HOST_WIDE_INT opint[],
6689 long op_const)
6690 {
6691 if (op_const & 2)
6692 {
6693 unsigned HOST_WIDE_INT mask = 0;
6694 int i;
6695
6696 for (i = 0; i < 8; ++i)
6697 if ((opint[1] >> i) & 1)
6698 mask |= (unsigned HOST_WIDE_INT)0xff << (i * 8);
6699
6700 if (op_const & 1)
6701 return build_int_cst (long_integer_type_node, opint[0] & mask);
6702
6703 if (op)
6704 return fold (build2 (BIT_AND_EXPR, long_integer_type_node, op[0],
6705 build_int_cst (long_integer_type_node, mask)));
6706 }
6707 else if ((op_const & 1) && opint[0] == 0)
6708 return build_int_cst (long_integer_type_node, 0);
6709 return NULL;
6710 }
6711
6712 /* Fold the builtins for the EXT family of instructions. */
6713
6714 static tree
6715 alpha_fold_builtin_extxx (tree op[], unsigned HOST_WIDE_INT opint[],
6716 long op_const, unsigned HOST_WIDE_INT bytemask,
6717 bool is_high)
6718 {
6719 long zap_const = 2;
6720 tree *zap_op = NULL;
6721
6722 if (op_const & 2)
6723 {
6724 unsigned HOST_WIDE_INT loc;
6725
6726 loc = opint[1] & 7;
6727 if (BYTES_BIG_ENDIAN)
6728 loc ^= 7;
6729 loc *= 8;
6730
6731 if (loc != 0)
6732 {
6733 if (op_const & 1)
6734 {
6735 unsigned HOST_WIDE_INT temp = opint[0];
6736 if (is_high)
6737 temp <<= loc;
6738 else
6739 temp >>= loc;
6740 opint[0] = temp;
6741 zap_const = 3;
6742 }
6743 }
6744 else
6745 zap_op = op;
6746 }
6747
6748 opint[1] = bytemask;
6749 return alpha_fold_builtin_zapnot (zap_op, opint, zap_const);
6750 }
6751
6752 /* Fold the builtins for the INS family of instructions. */
6753
6754 static tree
6755 alpha_fold_builtin_insxx (tree op[], unsigned HOST_WIDE_INT opint[],
6756 long op_const, unsigned HOST_WIDE_INT bytemask,
6757 bool is_high)
6758 {
6759 if ((op_const & 1) && opint[0] == 0)
6760 return build_int_cst (long_integer_type_node, 0);
6761
6762 if (op_const & 2)
6763 {
6764 unsigned HOST_WIDE_INT temp, loc, byteloc;
6765 tree *zap_op = NULL;
6766
6767 loc = opint[1] & 7;
6768 if (BYTES_BIG_ENDIAN)
6769 loc ^= 7;
6770 bytemask <<= loc;
6771
6772 temp = opint[0];
6773 if (is_high)
6774 {
6775 byteloc = (64 - (loc * 8)) & 0x3f;
6776 if (byteloc == 0)
6777 zap_op = op;
6778 else
6779 temp >>= byteloc;
6780 bytemask >>= 8;
6781 }
6782 else
6783 {
6784 byteloc = loc * 8;
6785 if (byteloc == 0)
6786 zap_op = op;
6787 else
6788 temp <<= byteloc;
6789 }
6790
6791 opint[0] = temp;
6792 opint[1] = bytemask;
6793 return alpha_fold_builtin_zapnot (zap_op, opint, op_const);
6794 }
6795
6796 return NULL;
6797 }
6798
6799 static tree
6800 alpha_fold_builtin_mskxx (tree op[], unsigned HOST_WIDE_INT opint[],
6801 long op_const, unsigned HOST_WIDE_INT bytemask,
6802 bool is_high)
6803 {
6804 if (op_const & 2)
6805 {
6806 unsigned HOST_WIDE_INT loc;
6807
6808 loc = opint[1] & 7;
6809 if (BYTES_BIG_ENDIAN)
6810 loc ^= 7;
6811 bytemask <<= loc;
6812
6813 if (is_high)
6814 bytemask >>= 8;
6815
6816 opint[1] = bytemask ^ 0xff;
6817 }
6818
6819 return alpha_fold_builtin_zapnot (op, opint, op_const);
6820 }
6821
6822 static tree
6823 alpha_fold_builtin_umulh (unsigned HOST_WIDE_INT opint[], long op_const)
6824 {
6825 switch (op_const)
6826 {
6827 case 3:
6828 {
6829 unsigned HOST_WIDE_INT l;
6830 HOST_WIDE_INT h;
6831
6832 mul_double (opint[0], 0, opint[1], 0, &l, &h);
6833
6834 #if HOST_BITS_PER_WIDE_INT > 64
6835 # error fixme
6836 #endif
6837
6838 return build_int_cst (long_integer_type_node, h);
6839 }
6840
6841 case 1:
6842 opint[1] = opint[0];
6843 /* FALLTHRU */
6844 case 2:
6845 /* Note that (X*1) >> 64 == 0. */
6846 if (opint[1] == 0 || opint[1] == 1)
6847 return build_int_cst (long_integer_type_node, 0);
6848 break;
6849 }
6850 return NULL;
6851 }
6852
6853 static tree
6854 alpha_fold_vector_minmax (enum tree_code code, tree op[], tree vtype)
6855 {
6856 tree op0 = fold_convert (vtype, op[0]);
6857 tree op1 = fold_convert (vtype, op[1]);
6858 tree val = fold (build2 (code, vtype, op0, op1));
6859 return fold_convert (long_integer_type_node, val);
6860 }
6861
6862 static tree
6863 alpha_fold_builtin_perr (unsigned HOST_WIDE_INT opint[], long op_const)
6864 {
6865 unsigned HOST_WIDE_INT temp = 0;
6866 int i;
6867
6868 if (op_const != 3)
6869 return NULL;
6870
6871 for (i = 0; i < 8; ++i)
6872 {
6873 unsigned HOST_WIDE_INT a = (opint[0] >> (i * 8)) & 0xff;
6874 unsigned HOST_WIDE_INT b = (opint[1] >> (i * 8)) & 0xff;
6875 if (a >= b)
6876 temp += a - b;
6877 else
6878 temp += b - a;
6879 }
6880
6881 return build_int_cst (long_integer_type_node, temp);
6882 }
6883
6884 static tree
6885 alpha_fold_builtin_pklb (unsigned HOST_WIDE_INT opint[], long op_const)
6886 {
6887 unsigned HOST_WIDE_INT temp;
6888
6889 if (op_const == 0)
6890 return NULL;
6891
6892 temp = opint[0] & 0xff;
6893 temp |= (opint[0] >> 24) & 0xff00;
6894
6895 return build_int_cst (long_integer_type_node, temp);
6896 }
6897
6898 static tree
6899 alpha_fold_builtin_pkwb (unsigned HOST_WIDE_INT opint[], long op_const)
6900 {
6901 unsigned HOST_WIDE_INT temp;
6902
6903 if (op_const == 0)
6904 return NULL;
6905
6906 temp = opint[0] & 0xff;
6907 temp |= (opint[0] >> 8) & 0xff00;
6908 temp |= (opint[0] >> 16) & 0xff0000;
6909 temp |= (opint[0] >> 24) & 0xff000000;
6910
6911 return build_int_cst (long_integer_type_node, temp);
6912 }
6913
6914 static tree
6915 alpha_fold_builtin_unpkbl (unsigned HOST_WIDE_INT opint[], long op_const)
6916 {
6917 unsigned HOST_WIDE_INT temp;
6918
6919 if (op_const == 0)
6920 return NULL;
6921
6922 temp = opint[0] & 0xff;
6923 temp |= (opint[0] & 0xff00) << 24;
6924
6925 return build_int_cst (long_integer_type_node, temp);
6926 }
6927
6928 static tree
6929 alpha_fold_builtin_unpkbw (unsigned HOST_WIDE_INT opint[], long op_const)
6930 {
6931 unsigned HOST_WIDE_INT temp;
6932
6933 if (op_const == 0)
6934 return NULL;
6935
6936 temp = opint[0] & 0xff;
6937 temp |= (opint[0] & 0x0000ff00) << 8;
6938 temp |= (opint[0] & 0x00ff0000) << 16;
6939 temp |= (opint[0] & 0xff000000) << 24;
6940
6941 return build_int_cst (long_integer_type_node, temp);
6942 }
6943
6944 static tree
6945 alpha_fold_builtin_cttz (unsigned HOST_WIDE_INT opint[], long op_const)
6946 {
6947 unsigned HOST_WIDE_INT temp;
6948
6949 if (op_const == 0)
6950 return NULL;
6951
6952 if (opint[0] == 0)
6953 temp = 64;
6954 else
6955 temp = exact_log2 (opint[0] & -opint[0]);
6956
6957 return build_int_cst (long_integer_type_node, temp);
6958 }
6959
6960 static tree
6961 alpha_fold_builtin_ctlz (unsigned HOST_WIDE_INT opint[], long op_const)
6962 {
6963 unsigned HOST_WIDE_INT temp;
6964
6965 if (op_const == 0)
6966 return NULL;
6967
6968 if (opint[0] == 0)
6969 temp = 64;
6970 else
6971 temp = 64 - floor_log2 (opint[0]) - 1;
6972
6973 return build_int_cst (long_integer_type_node, temp);
6974 }
6975
6976 static tree
6977 alpha_fold_builtin_ctpop (unsigned HOST_WIDE_INT opint[], long op_const)
6978 {
6979 unsigned HOST_WIDE_INT temp, op;
6980
6981 if (op_const == 0)
6982 return NULL;
6983
6984 op = opint[0];
6985 temp = 0;
6986 while (op)
6987 temp++, op &= op - 1;
6988
6989 return build_int_cst (long_integer_type_node, temp);
6990 }
6991
6992 /* Fold one of our builtin functions. */
6993
6994 static tree
6995 alpha_fold_builtin (tree fndecl, tree arglist, bool ignore ATTRIBUTE_UNUSED)
6996 {
6997 tree op[MAX_ARGS], t;
6998 unsigned HOST_WIDE_INT opint[MAX_ARGS];
6999 long op_const = 0, arity = 0;
7000
7001 for (t = arglist; t ; t = TREE_CHAIN (t), ++arity)
7002 {
7003 tree arg = TREE_VALUE (t);
7004 if (arg == error_mark_node)
7005 return NULL;
7006 if (arity >= MAX_ARGS)
7007 return NULL;
7008
7009 op[arity] = arg;
7010 opint[arity] = 0;
7011 if (TREE_CODE (arg) == INTEGER_CST)
7012 {
7013 op_const |= 1L << arity;
7014 opint[arity] = int_cst_value (arg);
7015 }
7016 }
7017
7018 switch (DECL_FUNCTION_CODE (fndecl))
7019 {
7020 case ALPHA_BUILTIN_CMPBGE:
7021 return alpha_fold_builtin_cmpbge (opint, op_const);
7022
7023 case ALPHA_BUILTIN_EXTBL:
7024 return alpha_fold_builtin_extxx (op, opint, op_const, 0x01, false);
7025 case ALPHA_BUILTIN_EXTWL:
7026 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, false);
7027 case ALPHA_BUILTIN_EXTLL:
7028 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, false);
7029 case ALPHA_BUILTIN_EXTQL:
7030 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, false);
7031 case ALPHA_BUILTIN_EXTWH:
7032 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, true);
7033 case ALPHA_BUILTIN_EXTLH:
7034 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, true);
7035 case ALPHA_BUILTIN_EXTQH:
7036 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, true);
7037
7038 case ALPHA_BUILTIN_INSBL:
7039 return alpha_fold_builtin_insxx (op, opint, op_const, 0x01, false);
7040 case ALPHA_BUILTIN_INSWL:
7041 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, false);
7042 case ALPHA_BUILTIN_INSLL:
7043 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, false);
7044 case ALPHA_BUILTIN_INSQL:
7045 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, false);
7046 case ALPHA_BUILTIN_INSWH:
7047 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, true);
7048 case ALPHA_BUILTIN_INSLH:
7049 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, true);
7050 case ALPHA_BUILTIN_INSQH:
7051 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, true);
7052
7053 case ALPHA_BUILTIN_MSKBL:
7054 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x01, false);
7055 case ALPHA_BUILTIN_MSKWL:
7056 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, false);
7057 case ALPHA_BUILTIN_MSKLL:
7058 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, false);
7059 case ALPHA_BUILTIN_MSKQL:
7060 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, false);
7061 case ALPHA_BUILTIN_MSKWH:
7062 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, true);
7063 case ALPHA_BUILTIN_MSKLH:
7064 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, true);
7065 case ALPHA_BUILTIN_MSKQH:
7066 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, true);
7067
7068 case ALPHA_BUILTIN_UMULH:
7069 return alpha_fold_builtin_umulh (opint, op_const);
7070
7071 case ALPHA_BUILTIN_ZAP:
7072 opint[1] ^= 0xff;
7073 /* FALLTHRU */
7074 case ALPHA_BUILTIN_ZAPNOT:
7075 return alpha_fold_builtin_zapnot (op, opint, op_const);
7076
7077 case ALPHA_BUILTIN_MINUB8:
7078 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_u);
7079 case ALPHA_BUILTIN_MINSB8:
7080 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_s);
7081 case ALPHA_BUILTIN_MINUW4:
7082 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_u);
7083 case ALPHA_BUILTIN_MINSW4:
7084 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_s);
7085 case ALPHA_BUILTIN_MAXUB8:
7086 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_u);
7087 case ALPHA_BUILTIN_MAXSB8:
7088 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_s);
7089 case ALPHA_BUILTIN_MAXUW4:
7090 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_u);
7091 case ALPHA_BUILTIN_MAXSW4:
7092 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_s);
7093
7094 case ALPHA_BUILTIN_PERR:
7095 return alpha_fold_builtin_perr (opint, op_const);
7096 case ALPHA_BUILTIN_PKLB:
7097 return alpha_fold_builtin_pklb (opint, op_const);
7098 case ALPHA_BUILTIN_PKWB:
7099 return alpha_fold_builtin_pkwb (opint, op_const);
7100 case ALPHA_BUILTIN_UNPKBL:
7101 return alpha_fold_builtin_unpkbl (opint, op_const);
7102 case ALPHA_BUILTIN_UNPKBW:
7103 return alpha_fold_builtin_unpkbw (opint, op_const);
7104
7105 case ALPHA_BUILTIN_CTTZ:
7106 return alpha_fold_builtin_cttz (opint, op_const);
7107 case ALPHA_BUILTIN_CTLZ:
7108 return alpha_fold_builtin_ctlz (opint, op_const);
7109 case ALPHA_BUILTIN_CTPOP:
7110 return alpha_fold_builtin_ctpop (opint, op_const);
7111
7112 case ALPHA_BUILTIN_AMASK:
7113 case ALPHA_BUILTIN_IMPLVER:
7114 case ALPHA_BUILTIN_RPCC:
7115 case ALPHA_BUILTIN_THREAD_POINTER:
7116 case ALPHA_BUILTIN_SET_THREAD_POINTER:
7117 /* None of these are foldable at compile-time. */
7118 default:
7119 return NULL;
7120 }
7121 }
7122 \f
7123 /* This page contains routines that are used to determine what the function
7124 prologue and epilogue code will do and write them out. */
7125
7126 /* Compute the size of the save area in the stack. */
7127
7128 /* These variables are used for communication between the following functions.
7129 They indicate various things about the current function being compiled
7130 that are used to tell what kind of prologue, epilogue and procedure
7131 descriptor to generate. */
7132
7133 /* Nonzero if we need a stack procedure. */
7134 enum alpha_procedure_types {PT_NULL = 0, PT_REGISTER = 1, PT_STACK = 2};
7135 static enum alpha_procedure_types alpha_procedure_type;
7136
7137 /* Register number (either FP or SP) that is used to unwind the frame. */
7138 static int vms_unwind_regno;
7139
7140 /* Register number used to save FP. We need not have one for RA since
7141 we don't modify it for register procedures. This is only defined
7142 for register frame procedures. */
7143 static int vms_save_fp_regno;
7144
7145 /* Register number used to reference objects off our PV. */
7146 static int vms_base_regno;
7147
7148 /* Compute register masks for saved registers. */
7149
7150 static void
7151 alpha_sa_mask (unsigned long *imaskP, unsigned long *fmaskP)
7152 {
7153 unsigned long imask = 0;
7154 unsigned long fmask = 0;
7155 unsigned int i;
7156
7157 /* When outputting a thunk, we don't have valid register life info,
7158 but assemble_start_function wants to output .frame and .mask
7159 directives. */
7160 if (current_function_is_thunk)
7161 {
7162 *imaskP = 0;
7163 *fmaskP = 0;
7164 return;
7165 }
7166
7167 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7168 imask |= (1UL << HARD_FRAME_POINTER_REGNUM);
7169
7170 /* One for every register we have to save. */
7171 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
7172 if (! fixed_regs[i] && ! call_used_regs[i]
7173 && regs_ever_live[i] && i != REG_RA
7174 && (!TARGET_ABI_UNICOSMK || i != HARD_FRAME_POINTER_REGNUM))
7175 {
7176 if (i < 32)
7177 imask |= (1UL << i);
7178 else
7179 fmask |= (1UL << (i - 32));
7180 }
7181
7182 /* We need to restore these for the handler. */
7183 if (current_function_calls_eh_return)
7184 {
7185 for (i = 0; ; ++i)
7186 {
7187 unsigned regno = EH_RETURN_DATA_REGNO (i);
7188 if (regno == INVALID_REGNUM)
7189 break;
7190 imask |= 1UL << regno;
7191 }
7192 }
7193
7194 /* If any register spilled, then spill the return address also. */
7195 /* ??? This is required by the Digital stack unwind specification
7196 and isn't needed if we're doing Dwarf2 unwinding. */
7197 if (imask || fmask || alpha_ra_ever_killed ())
7198 imask |= (1UL << REG_RA);
7199
7200 *imaskP = imask;
7201 *fmaskP = fmask;
7202 }
7203
7204 int
7205 alpha_sa_size (void)
7206 {
7207 unsigned long mask[2];
7208 int sa_size = 0;
7209 int i, j;
7210
7211 alpha_sa_mask (&mask[0], &mask[1]);
7212
7213 if (TARGET_ABI_UNICOSMK)
7214 {
7215 if (mask[0] || mask[1])
7216 sa_size = 14;
7217 }
7218 else
7219 {
7220 for (j = 0; j < 2; ++j)
7221 for (i = 0; i < 32; ++i)
7222 if ((mask[j] >> i) & 1)
7223 sa_size++;
7224 }
7225
7226 if (TARGET_ABI_UNICOSMK)
7227 {
7228 /* We might not need to generate a frame if we don't make any calls
7229 (including calls to __T3E_MISMATCH if this is a vararg function),
7230 don't have any local variables which require stack slots, don't
7231 use alloca and have not determined that we need a frame for other
7232 reasons. */
7233
7234 alpha_procedure_type
7235 = (sa_size || get_frame_size() != 0
7236 || current_function_outgoing_args_size
7237 || current_function_stdarg || current_function_calls_alloca
7238 || frame_pointer_needed)
7239 ? PT_STACK : PT_REGISTER;
7240
7241 /* Always reserve space for saving callee-saved registers if we
7242 need a frame as required by the calling convention. */
7243 if (alpha_procedure_type == PT_STACK)
7244 sa_size = 14;
7245 }
7246 else if (TARGET_ABI_OPEN_VMS)
7247 {
7248 /* Start by assuming we can use a register procedure if we don't
7249 make any calls (REG_RA not used) or need to save any
7250 registers and a stack procedure if we do. */
7251 if ((mask[0] >> REG_RA) & 1)
7252 alpha_procedure_type = PT_STACK;
7253 else if (get_frame_size() != 0)
7254 alpha_procedure_type = PT_REGISTER;
7255 else
7256 alpha_procedure_type = PT_NULL;
7257
7258 /* Don't reserve space for saving FP & RA yet. Do that later after we've
7259 made the final decision on stack procedure vs register procedure. */
7260 if (alpha_procedure_type == PT_STACK)
7261 sa_size -= 2;
7262
7263 /* Decide whether to refer to objects off our PV via FP or PV.
7264 If we need FP for something else or if we receive a nonlocal
7265 goto (which expects PV to contain the value), we must use PV.
7266 Otherwise, start by assuming we can use FP. */
7267
7268 vms_base_regno
7269 = (frame_pointer_needed
7270 || current_function_has_nonlocal_label
7271 || alpha_procedure_type == PT_STACK
7272 || current_function_outgoing_args_size)
7273 ? REG_PV : HARD_FRAME_POINTER_REGNUM;
7274
7275 /* If we want to copy PV into FP, we need to find some register
7276 in which to save FP. */
7277
7278 vms_save_fp_regno = -1;
7279 if (vms_base_regno == HARD_FRAME_POINTER_REGNUM)
7280 for (i = 0; i < 32; i++)
7281 if (! fixed_regs[i] && call_used_regs[i] && ! regs_ever_live[i])
7282 vms_save_fp_regno = i;
7283
7284 if (vms_save_fp_regno == -1 && alpha_procedure_type == PT_REGISTER)
7285 vms_base_regno = REG_PV, alpha_procedure_type = PT_STACK;
7286 else if (alpha_procedure_type == PT_NULL)
7287 vms_base_regno = REG_PV;
7288
7289 /* Stack unwinding should be done via FP unless we use it for PV. */
7290 vms_unwind_regno = (vms_base_regno == REG_PV
7291 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM);
7292
7293 /* If this is a stack procedure, allow space for saving FP and RA. */
7294 if (alpha_procedure_type == PT_STACK)
7295 sa_size += 2;
7296 }
7297 else
7298 {
7299 /* Our size must be even (multiple of 16 bytes). */
7300 if (sa_size & 1)
7301 sa_size++;
7302 }
7303
7304 return sa_size * 8;
7305 }
7306
7307 /* Define the offset between two registers, one to be eliminated,
7308 and the other its replacement, at the start of a routine. */
7309
7310 HOST_WIDE_INT
7311 alpha_initial_elimination_offset (unsigned int from,
7312 unsigned int to ATTRIBUTE_UNUSED)
7313 {
7314 HOST_WIDE_INT ret;
7315
7316 ret = alpha_sa_size ();
7317 ret += ALPHA_ROUND (current_function_outgoing_args_size);
7318
7319 switch (from)
7320 {
7321 case FRAME_POINTER_REGNUM:
7322 break;
7323
7324 case ARG_POINTER_REGNUM:
7325 ret += (ALPHA_ROUND (get_frame_size ()
7326 + current_function_pretend_args_size)
7327 - current_function_pretend_args_size);
7328 break;
7329
7330 default:
7331 gcc_unreachable ();
7332 }
7333
7334 return ret;
7335 }
7336
7337 int
7338 alpha_pv_save_size (void)
7339 {
7340 alpha_sa_size ();
7341 return alpha_procedure_type == PT_STACK ? 8 : 0;
7342 }
7343
7344 int
7345 alpha_using_fp (void)
7346 {
7347 alpha_sa_size ();
7348 return vms_unwind_regno == HARD_FRAME_POINTER_REGNUM;
7349 }
7350
7351 #if TARGET_ABI_OPEN_VMS
7352
7353 const struct attribute_spec vms_attribute_table[] =
7354 {
7355 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
7356 { "overlaid", 0, 0, true, false, false, NULL },
7357 { "global", 0, 0, true, false, false, NULL },
7358 { "initialize", 0, 0, true, false, false, NULL },
7359 { NULL, 0, 0, false, false, false, NULL }
7360 };
7361
7362 #endif
7363
7364 static int
7365 find_lo_sum_using_gp (rtx *px, void *data ATTRIBUTE_UNUSED)
7366 {
7367 return GET_CODE (*px) == LO_SUM && XEXP (*px, 0) == pic_offset_table_rtx;
7368 }
7369
7370 int
7371 alpha_find_lo_sum_using_gp (rtx insn)
7372 {
7373 return for_each_rtx (&PATTERN (insn), find_lo_sum_using_gp, NULL) > 0;
7374 }
7375
7376 static int
7377 alpha_does_function_need_gp (void)
7378 {
7379 rtx insn;
7380
7381 /* The GP being variable is an OSF abi thing. */
7382 if (! TARGET_ABI_OSF)
7383 return 0;
7384
7385 /* We need the gp to load the address of __mcount. */
7386 if (TARGET_PROFILING_NEEDS_GP && current_function_profile)
7387 return 1;
7388
7389 /* The code emitted by alpha_output_mi_thunk_osf uses the gp. */
7390 if (current_function_is_thunk)
7391 return 1;
7392
7393 /* The nonlocal receiver pattern assumes that the gp is valid for
7394 the nested function. Reasonable because it's almost always set
7395 correctly already. For the cases where that's wrong, make sure
7396 the nested function loads its gp on entry. */
7397 if (current_function_has_nonlocal_goto)
7398 return 1;
7399
7400 /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
7401 Even if we are a static function, we still need to do this in case
7402 our address is taken and passed to something like qsort. */
7403
7404 push_topmost_sequence ();
7405 insn = get_insns ();
7406 pop_topmost_sequence ();
7407
7408 for (; insn; insn = NEXT_INSN (insn))
7409 if (INSN_P (insn)
7410 && GET_CODE (PATTERN (insn)) != USE
7411 && GET_CODE (PATTERN (insn)) != CLOBBER
7412 && get_attr_usegp (insn))
7413 return 1;
7414
7415 return 0;
7416 }
7417
7418 \f
7419 /* Helper function to set RTX_FRAME_RELATED_P on instructions, including
7420 sequences. */
7421
7422 static rtx
7423 set_frame_related_p (void)
7424 {
7425 rtx seq = get_insns ();
7426 rtx insn;
7427
7428 end_sequence ();
7429
7430 if (!seq)
7431 return NULL_RTX;
7432
7433 if (INSN_P (seq))
7434 {
7435 insn = seq;
7436 while (insn != NULL_RTX)
7437 {
7438 RTX_FRAME_RELATED_P (insn) = 1;
7439 insn = NEXT_INSN (insn);
7440 }
7441 seq = emit_insn (seq);
7442 }
7443 else
7444 {
7445 seq = emit_insn (seq);
7446 RTX_FRAME_RELATED_P (seq) = 1;
7447 }
7448 return seq;
7449 }
7450
7451 #define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
7452
7453 /* Generates a store with the proper unwind info attached. VALUE is
7454 stored at BASE_REG+BASE_OFS. If FRAME_BIAS is nonzero, then BASE_REG
7455 contains SP+FRAME_BIAS, and that is the unwind info that should be
7456 generated. If FRAME_REG != VALUE, then VALUE is being stored on
7457 behalf of FRAME_REG, and FRAME_REG should be present in the unwind. */
7458
7459 static void
7460 emit_frame_store_1 (rtx value, rtx base_reg, HOST_WIDE_INT frame_bias,
7461 HOST_WIDE_INT base_ofs, rtx frame_reg)
7462 {
7463 rtx addr, mem, insn;
7464
7465 addr = plus_constant (base_reg, base_ofs);
7466 mem = gen_rtx_MEM (DImode, addr);
7467 set_mem_alias_set (mem, alpha_sr_alias_set);
7468
7469 insn = emit_move_insn (mem, value);
7470 RTX_FRAME_RELATED_P (insn) = 1;
7471
7472 if (frame_bias || value != frame_reg)
7473 {
7474 if (frame_bias)
7475 {
7476 addr = plus_constant (stack_pointer_rtx, frame_bias + base_ofs);
7477 mem = gen_rtx_MEM (DImode, addr);
7478 }
7479
7480 REG_NOTES (insn)
7481 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7482 gen_rtx_SET (VOIDmode, mem, frame_reg),
7483 REG_NOTES (insn));
7484 }
7485 }
7486
7487 static void
7488 emit_frame_store (unsigned int regno, rtx base_reg,
7489 HOST_WIDE_INT frame_bias, HOST_WIDE_INT base_ofs)
7490 {
7491 rtx reg = gen_rtx_REG (DImode, regno);
7492 emit_frame_store_1 (reg, base_reg, frame_bias, base_ofs, reg);
7493 }
7494
7495 /* Write function prologue. */
7496
7497 /* On vms we have two kinds of functions:
7498
7499 - stack frame (PROC_STACK)
7500 these are 'normal' functions with local vars and which are
7501 calling other functions
7502 - register frame (PROC_REGISTER)
7503 keeps all data in registers, needs no stack
7504
7505 We must pass this to the assembler so it can generate the
7506 proper pdsc (procedure descriptor)
7507 This is done with the '.pdesc' command.
7508
7509 On not-vms, we don't really differentiate between the two, as we can
7510 simply allocate stack without saving registers. */
7511
7512 void
7513 alpha_expand_prologue (void)
7514 {
7515 /* Registers to save. */
7516 unsigned long imask = 0;
7517 unsigned long fmask = 0;
7518 /* Stack space needed for pushing registers clobbered by us. */
7519 HOST_WIDE_INT sa_size;
7520 /* Complete stack size needed. */
7521 HOST_WIDE_INT frame_size;
7522 /* Offset from base reg to register save area. */
7523 HOST_WIDE_INT reg_offset;
7524 rtx sa_reg;
7525 int i;
7526
7527 sa_size = alpha_sa_size ();
7528
7529 frame_size = get_frame_size ();
7530 if (TARGET_ABI_OPEN_VMS)
7531 frame_size = ALPHA_ROUND (sa_size
7532 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7533 + frame_size
7534 + current_function_pretend_args_size);
7535 else if (TARGET_ABI_UNICOSMK)
7536 /* We have to allocate space for the DSIB if we generate a frame. */
7537 frame_size = ALPHA_ROUND (sa_size
7538 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7539 + ALPHA_ROUND (frame_size
7540 + current_function_outgoing_args_size);
7541 else
7542 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
7543 + sa_size
7544 + ALPHA_ROUND (frame_size
7545 + current_function_pretend_args_size));
7546
7547 if (TARGET_ABI_OPEN_VMS)
7548 reg_offset = 8;
7549 else
7550 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
7551
7552 alpha_sa_mask (&imask, &fmask);
7553
7554 /* Emit an insn to reload GP, if needed. */
7555 if (TARGET_ABI_OSF)
7556 {
7557 alpha_function_needs_gp = alpha_does_function_need_gp ();
7558 if (alpha_function_needs_gp)
7559 emit_insn (gen_prologue_ldgp ());
7560 }
7561
7562 /* TARGET_PROFILING_NEEDS_GP actually implies that we need to insert
7563 the call to mcount ourselves, rather than having the linker do it
7564 magically in response to -pg. Since _mcount has special linkage,
7565 don't represent the call as a call. */
7566 if (TARGET_PROFILING_NEEDS_GP && current_function_profile)
7567 emit_insn (gen_prologue_mcount ());
7568
7569 if (TARGET_ABI_UNICOSMK)
7570 unicosmk_gen_dsib (&imask);
7571
7572 /* Adjust the stack by the frame size. If the frame size is > 4096
7573 bytes, we need to be sure we probe somewhere in the first and last
7574 4096 bytes (we can probably get away without the latter test) and
7575 every 8192 bytes in between. If the frame size is > 32768, we
7576 do this in a loop. Otherwise, we generate the explicit probe
7577 instructions.
7578
7579 Note that we are only allowed to adjust sp once in the prologue. */
7580
7581 if (frame_size <= 32768)
7582 {
7583 if (frame_size > 4096)
7584 {
7585 int probed;
7586
7587 for (probed = 4096; probed < frame_size; probed += 8192)
7588 emit_insn (gen_probe_stack (GEN_INT (TARGET_ABI_UNICOSMK
7589 ? -probed + 64
7590 : -probed)));
7591
7592 /* We only have to do this probe if we aren't saving registers. */
7593 if (sa_size == 0 && frame_size > probed - 4096)
7594 emit_insn (gen_probe_stack (GEN_INT (-frame_size)));
7595 }
7596
7597 if (frame_size != 0)
7598 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
7599 GEN_INT (TARGET_ABI_UNICOSMK
7600 ? -frame_size + 64
7601 : -frame_size))));
7602 }
7603 else
7604 {
7605 /* Here we generate code to set R22 to SP + 4096 and set R23 to the
7606 number of 8192 byte blocks to probe. We then probe each block
7607 in the loop and then set SP to the proper location. If the
7608 amount remaining is > 4096, we have to do one more probe if we
7609 are not saving any registers. */
7610
7611 HOST_WIDE_INT blocks = (frame_size + 4096) / 8192;
7612 HOST_WIDE_INT leftover = frame_size + 4096 - blocks * 8192;
7613 rtx ptr = gen_rtx_REG (DImode, 22);
7614 rtx count = gen_rtx_REG (DImode, 23);
7615 rtx seq;
7616
7617 emit_move_insn (count, GEN_INT (blocks));
7618 emit_insn (gen_adddi3 (ptr, stack_pointer_rtx,
7619 GEN_INT (TARGET_ABI_UNICOSMK ? 4096 - 64 : 4096)));
7620
7621 /* Because of the difficulty in emitting a new basic block this
7622 late in the compilation, generate the loop as a single insn. */
7623 emit_insn (gen_prologue_stack_probe_loop (count, ptr));
7624
7625 if (leftover > 4096 && sa_size == 0)
7626 {
7627 rtx last = gen_rtx_MEM (DImode, plus_constant (ptr, -leftover));
7628 MEM_VOLATILE_P (last) = 1;
7629 emit_move_insn (last, const0_rtx);
7630 }
7631
7632 if (TARGET_ABI_WINDOWS_NT)
7633 {
7634 /* For NT stack unwind (done by 'reverse execution'), it's
7635 not OK to take the result of a loop, even though the value
7636 is already in ptr, so we reload it via a single operation
7637 and subtract it to sp.
7638
7639 Yes, that's correct -- we have to reload the whole constant
7640 into a temporary via ldah+lda then subtract from sp. */
7641
7642 HOST_WIDE_INT lo, hi;
7643 lo = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
7644 hi = frame_size - lo;
7645
7646 emit_move_insn (ptr, GEN_INT (hi));
7647 emit_insn (gen_adddi3 (ptr, ptr, GEN_INT (lo)));
7648 seq = emit_insn (gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx,
7649 ptr));
7650 }
7651 else
7652 {
7653 seq = emit_insn (gen_adddi3 (stack_pointer_rtx, ptr,
7654 GEN_INT (-leftover)));
7655 }
7656
7657 /* This alternative is special, because the DWARF code cannot
7658 possibly intuit through the loop above. So we invent this
7659 note it looks at instead. */
7660 RTX_FRAME_RELATED_P (seq) = 1;
7661 REG_NOTES (seq)
7662 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7663 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7664 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
7665 GEN_INT (TARGET_ABI_UNICOSMK
7666 ? -frame_size + 64
7667 : -frame_size))),
7668 REG_NOTES (seq));
7669 }
7670
7671 if (!TARGET_ABI_UNICOSMK)
7672 {
7673 HOST_WIDE_INT sa_bias = 0;
7674
7675 /* Cope with very large offsets to the register save area. */
7676 sa_reg = stack_pointer_rtx;
7677 if (reg_offset + sa_size > 0x8000)
7678 {
7679 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
7680 rtx sa_bias_rtx;
7681
7682 if (low + sa_size <= 0x8000)
7683 sa_bias = reg_offset - low, reg_offset = low;
7684 else
7685 sa_bias = reg_offset, reg_offset = 0;
7686
7687 sa_reg = gen_rtx_REG (DImode, 24);
7688 sa_bias_rtx = GEN_INT (sa_bias);
7689
7690 if (add_operand (sa_bias_rtx, DImode))
7691 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_bias_rtx));
7692 else
7693 {
7694 emit_move_insn (sa_reg, sa_bias_rtx);
7695 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_reg));
7696 }
7697 }
7698
7699 /* Save regs in stack order. Beginning with VMS PV. */
7700 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7701 emit_frame_store (REG_PV, stack_pointer_rtx, 0, 0);
7702
7703 /* Save register RA next. */
7704 if (imask & (1UL << REG_RA))
7705 {
7706 emit_frame_store (REG_RA, sa_reg, sa_bias, reg_offset);
7707 imask &= ~(1UL << REG_RA);
7708 reg_offset += 8;
7709 }
7710
7711 /* Now save any other registers required to be saved. */
7712 for (i = 0; i < 31; i++)
7713 if (imask & (1UL << i))
7714 {
7715 emit_frame_store (i, sa_reg, sa_bias, reg_offset);
7716 reg_offset += 8;
7717 }
7718
7719 for (i = 0; i < 31; i++)
7720 if (fmask & (1UL << i))
7721 {
7722 emit_frame_store (i+32, sa_reg, sa_bias, reg_offset);
7723 reg_offset += 8;
7724 }
7725 }
7726 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
7727 {
7728 /* The standard frame on the T3E includes space for saving registers.
7729 We just have to use it. We don't have to save the return address and
7730 the old frame pointer here - they are saved in the DSIB. */
7731
7732 reg_offset = -56;
7733 for (i = 9; i < 15; i++)
7734 if (imask & (1UL << i))
7735 {
7736 emit_frame_store (i, hard_frame_pointer_rtx, 0, reg_offset);
7737 reg_offset -= 8;
7738 }
7739 for (i = 2; i < 10; i++)
7740 if (fmask & (1UL << i))
7741 {
7742 emit_frame_store (i+32, hard_frame_pointer_rtx, 0, reg_offset);
7743 reg_offset -= 8;
7744 }
7745 }
7746
7747 if (TARGET_ABI_OPEN_VMS)
7748 {
7749 if (alpha_procedure_type == PT_REGISTER)
7750 /* Register frame procedures save the fp.
7751 ?? Ought to have a dwarf2 save for this. */
7752 emit_move_insn (gen_rtx_REG (DImode, vms_save_fp_regno),
7753 hard_frame_pointer_rtx);
7754
7755 if (alpha_procedure_type != PT_NULL && vms_base_regno != REG_PV)
7756 emit_insn (gen_force_movdi (gen_rtx_REG (DImode, vms_base_regno),
7757 gen_rtx_REG (DImode, REG_PV)));
7758
7759 if (alpha_procedure_type != PT_NULL
7760 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
7761 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7762
7763 /* If we have to allocate space for outgoing args, do it now. */
7764 if (current_function_outgoing_args_size != 0)
7765 {
7766 rtx seq
7767 = emit_move_insn (stack_pointer_rtx,
7768 plus_constant
7769 (hard_frame_pointer_rtx,
7770 - (ALPHA_ROUND
7771 (current_function_outgoing_args_size))));
7772
7773 /* Only set FRAME_RELATED_P on the stack adjustment we just emitted
7774 if ! frame_pointer_needed. Setting the bit will change the CFA
7775 computation rule to use sp again, which would be wrong if we had
7776 frame_pointer_needed, as this means sp might move unpredictably
7777 later on.
7778
7779 Also, note that
7780 frame_pointer_needed
7781 => vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
7782 and
7783 current_function_outgoing_args_size != 0
7784 => alpha_procedure_type != PT_NULL,
7785
7786 so when we are not setting the bit here, we are guaranteed to
7787 have emitted an FRP frame pointer update just before. */
7788 RTX_FRAME_RELATED_P (seq) = ! frame_pointer_needed;
7789 }
7790 }
7791 else if (!TARGET_ABI_UNICOSMK)
7792 {
7793 /* If we need a frame pointer, set it from the stack pointer. */
7794 if (frame_pointer_needed)
7795 {
7796 if (TARGET_CAN_FAULT_IN_PROLOGUE)
7797 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7798 else
7799 /* This must always be the last instruction in the
7800 prologue, thus we emit a special move + clobber. */
7801 FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx,
7802 stack_pointer_rtx, sa_reg)));
7803 }
7804 }
7805
7806 /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
7807 the prologue, for exception handling reasons, we cannot do this for
7808 any insn that might fault. We could prevent this for mems with a
7809 (clobber:BLK (scratch)), but this doesn't work for fp insns. So we
7810 have to prevent all such scheduling with a blockage.
7811
7812 Linux, on the other hand, never bothered to implement OSF/1's
7813 exception handling, and so doesn't care about such things. Anyone
7814 planning to use dwarf2 frame-unwind info can also omit the blockage. */
7815
7816 if (! TARGET_CAN_FAULT_IN_PROLOGUE)
7817 emit_insn (gen_blockage ());
7818 }
7819
7820 /* Count the number of .file directives, so that .loc is up to date. */
7821 int num_source_filenames = 0;
7822
7823 /* Output the textual info surrounding the prologue. */
7824
7825 void
7826 alpha_start_function (FILE *file, const char *fnname,
7827 tree decl ATTRIBUTE_UNUSED)
7828 {
7829 unsigned long imask = 0;
7830 unsigned long fmask = 0;
7831 /* Stack space needed for pushing registers clobbered by us. */
7832 HOST_WIDE_INT sa_size;
7833 /* Complete stack size needed. */
7834 unsigned HOST_WIDE_INT frame_size;
7835 /* Offset from base reg to register save area. */
7836 HOST_WIDE_INT reg_offset;
7837 char *entry_label = (char *) alloca (strlen (fnname) + 6);
7838 int i;
7839
7840 /* Don't emit an extern directive for functions defined in the same file. */
7841 if (TARGET_ABI_UNICOSMK)
7842 {
7843 tree name_tree;
7844 name_tree = get_identifier (fnname);
7845 TREE_ASM_WRITTEN (name_tree) = 1;
7846 }
7847
7848 alpha_fnname = fnname;
7849 sa_size = alpha_sa_size ();
7850
7851 frame_size = get_frame_size ();
7852 if (TARGET_ABI_OPEN_VMS)
7853 frame_size = ALPHA_ROUND (sa_size
7854 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7855 + frame_size
7856 + current_function_pretend_args_size);
7857 else if (TARGET_ABI_UNICOSMK)
7858 frame_size = ALPHA_ROUND (sa_size
7859 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7860 + ALPHA_ROUND (frame_size
7861 + current_function_outgoing_args_size);
7862 else
7863 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
7864 + sa_size
7865 + ALPHA_ROUND (frame_size
7866 + current_function_pretend_args_size));
7867
7868 if (TARGET_ABI_OPEN_VMS)
7869 reg_offset = 8;
7870 else
7871 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
7872
7873 alpha_sa_mask (&imask, &fmask);
7874
7875 /* Ecoff can handle multiple .file directives, so put out file and lineno.
7876 We have to do that before the .ent directive as we cannot switch
7877 files within procedures with native ecoff because line numbers are
7878 linked to procedure descriptors.
7879 Outputting the lineno helps debugging of one line functions as they
7880 would otherwise get no line number at all. Please note that we would
7881 like to put out last_linenum from final.c, but it is not accessible. */
7882
7883 if (write_symbols == SDB_DEBUG)
7884 {
7885 #ifdef ASM_OUTPUT_SOURCE_FILENAME
7886 ASM_OUTPUT_SOURCE_FILENAME (file,
7887 DECL_SOURCE_FILE (current_function_decl));
7888 #endif
7889 #ifdef SDB_OUTPUT_SOURCE_LINE
7890 if (debug_info_level != DINFO_LEVEL_TERSE)
7891 SDB_OUTPUT_SOURCE_LINE (file,
7892 DECL_SOURCE_LINE (current_function_decl));
7893 #endif
7894 }
7895
7896 /* Issue function start and label. */
7897 if (TARGET_ABI_OPEN_VMS
7898 || (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive))
7899 {
7900 fputs ("\t.ent ", file);
7901 assemble_name (file, fnname);
7902 putc ('\n', file);
7903
7904 /* If the function needs GP, we'll write the "..ng" label there.
7905 Otherwise, do it here. */
7906 if (TARGET_ABI_OSF
7907 && ! alpha_function_needs_gp
7908 && ! current_function_is_thunk)
7909 {
7910 putc ('$', file);
7911 assemble_name (file, fnname);
7912 fputs ("..ng:\n", file);
7913 }
7914 }
7915
7916 strcpy (entry_label, fnname);
7917 if (TARGET_ABI_OPEN_VMS)
7918 strcat (entry_label, "..en");
7919
7920 /* For public functions, the label must be globalized by appending an
7921 additional colon. */
7922 if (TARGET_ABI_UNICOSMK && TREE_PUBLIC (decl))
7923 strcat (entry_label, ":");
7924
7925 ASM_OUTPUT_LABEL (file, entry_label);
7926 inside_function = TRUE;
7927
7928 if (TARGET_ABI_OPEN_VMS)
7929 fprintf (file, "\t.base $%d\n", vms_base_regno);
7930
7931 if (!TARGET_ABI_OPEN_VMS && !TARGET_ABI_UNICOSMK && TARGET_IEEE_CONFORMANT
7932 && !flag_inhibit_size_directive)
7933 {
7934 /* Set flags in procedure descriptor to request IEEE-conformant
7935 math-library routines. The value we set it to is PDSC_EXC_IEEE
7936 (/usr/include/pdsc.h). */
7937 fputs ("\t.eflag 48\n", file);
7938 }
7939
7940 /* Set up offsets to alpha virtual arg/local debugging pointer. */
7941 alpha_auto_offset = -frame_size + current_function_pretend_args_size;
7942 alpha_arg_offset = -frame_size + 48;
7943
7944 /* Describe our frame. If the frame size is larger than an integer,
7945 print it as zero to avoid an assembler error. We won't be
7946 properly describing such a frame, but that's the best we can do. */
7947 if (TARGET_ABI_UNICOSMK)
7948 ;
7949 else if (TARGET_ABI_OPEN_VMS)
7950 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,"
7951 HOST_WIDE_INT_PRINT_DEC "\n",
7952 vms_unwind_regno,
7953 frame_size >= (1UL << 31) ? 0 : frame_size,
7954 reg_offset);
7955 else if (!flag_inhibit_size_directive)
7956 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,%d\n",
7957 (frame_pointer_needed
7958 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM),
7959 frame_size >= (1UL << 31) ? 0 : frame_size,
7960 current_function_pretend_args_size);
7961
7962 /* Describe which registers were spilled. */
7963 if (TARGET_ABI_UNICOSMK)
7964 ;
7965 else if (TARGET_ABI_OPEN_VMS)
7966 {
7967 if (imask)
7968 /* ??? Does VMS care if mask contains ra? The old code didn't
7969 set it, so I don't here. */
7970 fprintf (file, "\t.mask 0x%lx,0\n", imask & ~(1UL << REG_RA));
7971 if (fmask)
7972 fprintf (file, "\t.fmask 0x%lx,0\n", fmask);
7973 if (alpha_procedure_type == PT_REGISTER)
7974 fprintf (file, "\t.fp_save $%d\n", vms_save_fp_regno);
7975 }
7976 else if (!flag_inhibit_size_directive)
7977 {
7978 if (imask)
7979 {
7980 fprintf (file, "\t.mask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", imask,
7981 frame_size >= (1UL << 31) ? 0 : reg_offset - frame_size);
7982
7983 for (i = 0; i < 32; ++i)
7984 if (imask & (1UL << i))
7985 reg_offset += 8;
7986 }
7987
7988 if (fmask)
7989 fprintf (file, "\t.fmask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", fmask,
7990 frame_size >= (1UL << 31) ? 0 : reg_offset - frame_size);
7991 }
7992
7993 #if TARGET_ABI_OPEN_VMS
7994 /* Ifdef'ed cause link_section are only available then. */
7995 switch_to_section (readonly_data_section);
7996 fprintf (file, "\t.align 3\n");
7997 assemble_name (file, fnname); fputs ("..na:\n", file);
7998 fputs ("\t.ascii \"", file);
7999 assemble_name (file, fnname);
8000 fputs ("\\0\"\n", file);
8001 alpha_need_linkage (fnname, 1);
8002 switch_to_section (text_section);
8003 #endif
8004 }
8005
8006 /* Emit the .prologue note at the scheduled end of the prologue. */
8007
8008 static void
8009 alpha_output_function_end_prologue (FILE *file)
8010 {
8011 if (TARGET_ABI_UNICOSMK)
8012 ;
8013 else if (TARGET_ABI_OPEN_VMS)
8014 fputs ("\t.prologue\n", file);
8015 else if (TARGET_ABI_WINDOWS_NT)
8016 fputs ("\t.prologue 0\n", file);
8017 else if (!flag_inhibit_size_directive)
8018 fprintf (file, "\t.prologue %d\n",
8019 alpha_function_needs_gp || current_function_is_thunk);
8020 }
8021
8022 /* Write function epilogue. */
8023
8024 /* ??? At some point we will want to support full unwind, and so will
8025 need to mark the epilogue as well. At the moment, we just confuse
8026 dwarf2out. */
8027 #undef FRP
8028 #define FRP(exp) exp
8029
8030 void
8031 alpha_expand_epilogue (void)
8032 {
8033 /* Registers to save. */
8034 unsigned long imask = 0;
8035 unsigned long fmask = 0;
8036 /* Stack space needed for pushing registers clobbered by us. */
8037 HOST_WIDE_INT sa_size;
8038 /* Complete stack size needed. */
8039 HOST_WIDE_INT frame_size;
8040 /* Offset from base reg to register save area. */
8041 HOST_WIDE_INT reg_offset;
8042 int fp_is_frame_pointer, fp_offset;
8043 rtx sa_reg, sa_reg_exp = NULL;
8044 rtx sp_adj1, sp_adj2, mem;
8045 rtx eh_ofs;
8046 int i;
8047
8048 sa_size = alpha_sa_size ();
8049
8050 frame_size = get_frame_size ();
8051 if (TARGET_ABI_OPEN_VMS)
8052 frame_size = ALPHA_ROUND (sa_size
8053 + (alpha_procedure_type == PT_STACK ? 8 : 0)
8054 + frame_size
8055 + current_function_pretend_args_size);
8056 else if (TARGET_ABI_UNICOSMK)
8057 frame_size = ALPHA_ROUND (sa_size
8058 + (alpha_procedure_type == PT_STACK ? 48 : 0))
8059 + ALPHA_ROUND (frame_size
8060 + current_function_outgoing_args_size);
8061 else
8062 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
8063 + sa_size
8064 + ALPHA_ROUND (frame_size
8065 + current_function_pretend_args_size));
8066
8067 if (TARGET_ABI_OPEN_VMS)
8068 {
8069 if (alpha_procedure_type == PT_STACK)
8070 reg_offset = 8;
8071 else
8072 reg_offset = 0;
8073 }
8074 else
8075 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
8076
8077 alpha_sa_mask (&imask, &fmask);
8078
8079 fp_is_frame_pointer
8080 = ((TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
8081 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed));
8082 fp_offset = 0;
8083 sa_reg = stack_pointer_rtx;
8084
8085 if (current_function_calls_eh_return)
8086 eh_ofs = EH_RETURN_STACKADJ_RTX;
8087 else
8088 eh_ofs = NULL_RTX;
8089
8090 if (!TARGET_ABI_UNICOSMK && sa_size)
8091 {
8092 /* If we have a frame pointer, restore SP from it. */
8093 if ((TARGET_ABI_OPEN_VMS
8094 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
8095 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed))
8096 FRP (emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx));
8097
8098 /* Cope with very large offsets to the register save area. */
8099 if (reg_offset + sa_size > 0x8000)
8100 {
8101 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
8102 HOST_WIDE_INT bias;
8103
8104 if (low + sa_size <= 0x8000)
8105 bias = reg_offset - low, reg_offset = low;
8106 else
8107 bias = reg_offset, reg_offset = 0;
8108
8109 sa_reg = gen_rtx_REG (DImode, 22);
8110 sa_reg_exp = plus_constant (stack_pointer_rtx, bias);
8111
8112 FRP (emit_move_insn (sa_reg, sa_reg_exp));
8113 }
8114
8115 /* Restore registers in order, excepting a true frame pointer. */
8116
8117 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset));
8118 if (! eh_ofs)
8119 set_mem_alias_set (mem, alpha_sr_alias_set);
8120 FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
8121
8122 reg_offset += 8;
8123 imask &= ~(1UL << REG_RA);
8124
8125 for (i = 0; i < 31; ++i)
8126 if (imask & (1UL << i))
8127 {
8128 if (i == HARD_FRAME_POINTER_REGNUM && fp_is_frame_pointer)
8129 fp_offset = reg_offset;
8130 else
8131 {
8132 mem = gen_rtx_MEM (DImode, plus_constant(sa_reg, reg_offset));
8133 set_mem_alias_set (mem, alpha_sr_alias_set);
8134 FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
8135 }
8136 reg_offset += 8;
8137 }
8138
8139 for (i = 0; i < 31; ++i)
8140 if (fmask & (1UL << i))
8141 {
8142 mem = gen_rtx_MEM (DFmode, plus_constant(sa_reg, reg_offset));
8143 set_mem_alias_set (mem, alpha_sr_alias_set);
8144 FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
8145 reg_offset += 8;
8146 }
8147 }
8148 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
8149 {
8150 /* Restore callee-saved general-purpose registers. */
8151
8152 reg_offset = -56;
8153
8154 for (i = 9; i < 15; i++)
8155 if (imask & (1UL << i))
8156 {
8157 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx,
8158 reg_offset));
8159 set_mem_alias_set (mem, alpha_sr_alias_set);
8160 FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
8161 reg_offset -= 8;
8162 }
8163
8164 for (i = 2; i < 10; i++)
8165 if (fmask & (1UL << i))
8166 {
8167 mem = gen_rtx_MEM (DFmode, plus_constant(hard_frame_pointer_rtx,
8168 reg_offset));
8169 set_mem_alias_set (mem, alpha_sr_alias_set);
8170 FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
8171 reg_offset -= 8;
8172 }
8173
8174 /* Restore the return address from the DSIB. */
8175
8176 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx, -8));
8177 set_mem_alias_set (mem, alpha_sr_alias_set);
8178 FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
8179 }
8180
8181 if (frame_size || eh_ofs)
8182 {
8183 sp_adj1 = stack_pointer_rtx;
8184
8185 if (eh_ofs)
8186 {
8187 sp_adj1 = gen_rtx_REG (DImode, 23);
8188 emit_move_insn (sp_adj1,
8189 gen_rtx_PLUS (Pmode, stack_pointer_rtx, eh_ofs));
8190 }
8191
8192 /* If the stack size is large, begin computation into a temporary
8193 register so as not to interfere with a potential fp restore,
8194 which must be consecutive with an SP restore. */
8195 if (frame_size < 32768
8196 && ! (TARGET_ABI_UNICOSMK && current_function_calls_alloca))
8197 sp_adj2 = GEN_INT (frame_size);
8198 else if (TARGET_ABI_UNICOSMK)
8199 {
8200 sp_adj1 = gen_rtx_REG (DImode, 23);
8201 FRP (emit_move_insn (sp_adj1, hard_frame_pointer_rtx));
8202 sp_adj2 = const0_rtx;
8203 }
8204 else if (frame_size < 0x40007fffL)
8205 {
8206 int low = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
8207
8208 sp_adj2 = plus_constant (sp_adj1, frame_size - low);
8209 if (sa_reg_exp && rtx_equal_p (sa_reg_exp, sp_adj2))
8210 sp_adj1 = sa_reg;
8211 else
8212 {
8213 sp_adj1 = gen_rtx_REG (DImode, 23);
8214 FRP (emit_move_insn (sp_adj1, sp_adj2));
8215 }
8216 sp_adj2 = GEN_INT (low);
8217 }
8218 else
8219 {
8220 rtx tmp = gen_rtx_REG (DImode, 23);
8221 FRP (sp_adj2 = alpha_emit_set_const (tmp, DImode, frame_size,
8222 3, false));
8223 if (!sp_adj2)
8224 {
8225 /* We can't drop new things to memory this late, afaik,
8226 so build it up by pieces. */
8227 FRP (sp_adj2 = alpha_emit_set_long_const (tmp, frame_size,
8228 -(frame_size < 0)));
8229 gcc_assert (sp_adj2);
8230 }
8231 }
8232
8233 /* From now on, things must be in order. So emit blockages. */
8234
8235 /* Restore the frame pointer. */
8236 if (TARGET_ABI_UNICOSMK)
8237 {
8238 emit_insn (gen_blockage ());
8239 mem = gen_rtx_MEM (DImode,
8240 plus_constant (hard_frame_pointer_rtx, -16));
8241 set_mem_alias_set (mem, alpha_sr_alias_set);
8242 FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
8243 }
8244 else if (fp_is_frame_pointer)
8245 {
8246 emit_insn (gen_blockage ());
8247 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, fp_offset));
8248 set_mem_alias_set (mem, alpha_sr_alias_set);
8249 FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
8250 }
8251 else if (TARGET_ABI_OPEN_VMS)
8252 {
8253 emit_insn (gen_blockage ());
8254 FRP (emit_move_insn (hard_frame_pointer_rtx,
8255 gen_rtx_REG (DImode, vms_save_fp_regno)));
8256 }
8257
8258 /* Restore the stack pointer. */
8259 emit_insn (gen_blockage ());
8260 if (sp_adj2 == const0_rtx)
8261 FRP (emit_move_insn (stack_pointer_rtx, sp_adj1));
8262 else
8263 FRP (emit_move_insn (stack_pointer_rtx,
8264 gen_rtx_PLUS (DImode, sp_adj1, sp_adj2)));
8265 }
8266 else
8267 {
8268 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_REGISTER)
8269 {
8270 emit_insn (gen_blockage ());
8271 FRP (emit_move_insn (hard_frame_pointer_rtx,
8272 gen_rtx_REG (DImode, vms_save_fp_regno)));
8273 }
8274 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type != PT_STACK)
8275 {
8276 /* Decrement the frame pointer if the function does not have a
8277 frame. */
8278
8279 emit_insn (gen_blockage ());
8280 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
8281 hard_frame_pointer_rtx, constm1_rtx)));
8282 }
8283 }
8284 }
8285 \f
8286 /* Output the rest of the textual info surrounding the epilogue. */
8287
8288 void
8289 alpha_end_function (FILE *file, const char *fnname, tree decl ATTRIBUTE_UNUSED)
8290 {
8291 #if TARGET_ABI_OPEN_VMS
8292 alpha_write_linkage (file, fnname, decl);
8293 #endif
8294
8295 /* End the function. */
8296 if (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive)
8297 {
8298 fputs ("\t.end ", file);
8299 assemble_name (file, fnname);
8300 putc ('\n', file);
8301 }
8302 inside_function = FALSE;
8303
8304 /* Output jump tables and the static subroutine information block. */
8305 if (TARGET_ABI_UNICOSMK)
8306 {
8307 unicosmk_output_ssib (file, fnname);
8308 unicosmk_output_deferred_case_vectors (file);
8309 }
8310 }
8311
8312 #if TARGET_ABI_OSF
8313 /* Emit a tail call to FUNCTION after adjusting THIS by DELTA.
8314
8315 In order to avoid the hordes of differences between generated code
8316 with and without TARGET_EXPLICIT_RELOCS, and to avoid duplicating
8317 lots of code loading up large constants, generate rtl and emit it
8318 instead of going straight to text.
8319
8320 Not sure why this idea hasn't been explored before... */
8321
8322 static void
8323 alpha_output_mi_thunk_osf (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8324 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8325 tree function)
8326 {
8327 HOST_WIDE_INT hi, lo;
8328 rtx this, insn, funexp;
8329
8330 reset_block_changes ();
8331
8332 /* We always require a valid GP. */
8333 emit_insn (gen_prologue_ldgp ());
8334 emit_note (NOTE_INSN_PROLOGUE_END);
8335
8336 /* Find the "this" pointer. If the function returns a structure,
8337 the structure return pointer is in $16. */
8338 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8339 this = gen_rtx_REG (Pmode, 17);
8340 else
8341 this = gen_rtx_REG (Pmode, 16);
8342
8343 /* Add DELTA. When possible we use ldah+lda. Otherwise load the
8344 entire constant for the add. */
8345 lo = ((delta & 0xffff) ^ 0x8000) - 0x8000;
8346 hi = (((delta - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8347 if (hi + lo == delta)
8348 {
8349 if (hi)
8350 emit_insn (gen_adddi3 (this, this, GEN_INT (hi)));
8351 if (lo)
8352 emit_insn (gen_adddi3 (this, this, GEN_INT (lo)));
8353 }
8354 else
8355 {
8356 rtx tmp = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 0),
8357 delta, -(delta < 0));
8358 emit_insn (gen_adddi3 (this, this, tmp));
8359 }
8360
8361 /* Add a delta stored in the vtable at VCALL_OFFSET. */
8362 if (vcall_offset)
8363 {
8364 rtx tmp, tmp2;
8365
8366 tmp = gen_rtx_REG (Pmode, 0);
8367 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this));
8368
8369 lo = ((vcall_offset & 0xffff) ^ 0x8000) - 0x8000;
8370 hi = (((vcall_offset - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8371 if (hi + lo == vcall_offset)
8372 {
8373 if (hi)
8374 emit_insn (gen_adddi3 (tmp, tmp, GEN_INT (hi)));
8375 }
8376 else
8377 {
8378 tmp2 = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 1),
8379 vcall_offset, -(vcall_offset < 0));
8380 emit_insn (gen_adddi3 (tmp, tmp, tmp2));
8381 lo = 0;
8382 }
8383 if (lo)
8384 tmp2 = gen_rtx_PLUS (Pmode, tmp, GEN_INT (lo));
8385 else
8386 tmp2 = tmp;
8387 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp2));
8388
8389 emit_insn (gen_adddi3 (this, this, tmp));
8390 }
8391
8392 /* Generate a tail call to the target function. */
8393 if (! TREE_USED (function))
8394 {
8395 assemble_external (function);
8396 TREE_USED (function) = 1;
8397 }
8398 funexp = XEXP (DECL_RTL (function), 0);
8399 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8400 insn = emit_call_insn (gen_sibcall (funexp, const0_rtx));
8401 SIBLING_CALL_P (insn) = 1;
8402
8403 /* Run just enough of rest_of_compilation to get the insns emitted.
8404 There's not really enough bulk here to make other passes such as
8405 instruction scheduling worth while. Note that use_thunk calls
8406 assemble_start_function and assemble_end_function. */
8407 insn = get_insns ();
8408 insn_locators_initialize ();
8409 shorten_branches (insn);
8410 final_start_function (insn, file, 1);
8411 final (insn, file, 1);
8412 final_end_function ();
8413 }
8414 #endif /* TARGET_ABI_OSF */
8415 \f
8416 /* Debugging support. */
8417
8418 #include "gstab.h"
8419
8420 /* Count the number of sdb related labels are generated (to find block
8421 start and end boundaries). */
8422
8423 int sdb_label_count = 0;
8424
8425 /* Name of the file containing the current function. */
8426
8427 static const char *current_function_file = "";
8428
8429 /* Offsets to alpha virtual arg/local debugging pointers. */
8430
8431 long alpha_arg_offset;
8432 long alpha_auto_offset;
8433 \f
8434 /* Emit a new filename to a stream. */
8435
8436 void
8437 alpha_output_filename (FILE *stream, const char *name)
8438 {
8439 static int first_time = TRUE;
8440
8441 if (first_time)
8442 {
8443 first_time = FALSE;
8444 ++num_source_filenames;
8445 current_function_file = name;
8446 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8447 output_quoted_string (stream, name);
8448 fprintf (stream, "\n");
8449 if (!TARGET_GAS && write_symbols == DBX_DEBUG)
8450 fprintf (stream, "\t#@stabs\n");
8451 }
8452
8453 else if (write_symbols == DBX_DEBUG)
8454 /* dbxout.c will emit an appropriate .stabs directive. */
8455 return;
8456
8457 else if (name != current_function_file
8458 && strcmp (name, current_function_file) != 0)
8459 {
8460 if (inside_function && ! TARGET_GAS)
8461 fprintf (stream, "\t#.file\t%d ", num_source_filenames);
8462 else
8463 {
8464 ++num_source_filenames;
8465 current_function_file = name;
8466 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8467 }
8468
8469 output_quoted_string (stream, name);
8470 fprintf (stream, "\n");
8471 }
8472 }
8473 \f
8474 /* Structure to show the current status of registers and memory. */
8475
8476 struct shadow_summary
8477 {
8478 struct {
8479 unsigned int i : 31; /* Mask of int regs */
8480 unsigned int fp : 31; /* Mask of fp regs */
8481 unsigned int mem : 1; /* mem == imem | fpmem */
8482 } used, defd;
8483 };
8484
8485 /* Summary the effects of expression X on the machine. Update SUM, a pointer
8486 to the summary structure. SET is nonzero if the insn is setting the
8487 object, otherwise zero. */
8488
8489 static void
8490 summarize_insn (rtx x, struct shadow_summary *sum, int set)
8491 {
8492 const char *format_ptr;
8493 int i, j;
8494
8495 if (x == 0)
8496 return;
8497
8498 switch (GET_CODE (x))
8499 {
8500 /* ??? Note that this case would be incorrect if the Alpha had a
8501 ZERO_EXTRACT in SET_DEST. */
8502 case SET:
8503 summarize_insn (SET_SRC (x), sum, 0);
8504 summarize_insn (SET_DEST (x), sum, 1);
8505 break;
8506
8507 case CLOBBER:
8508 summarize_insn (XEXP (x, 0), sum, 1);
8509 break;
8510
8511 case USE:
8512 summarize_insn (XEXP (x, 0), sum, 0);
8513 break;
8514
8515 case ASM_OPERANDS:
8516 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
8517 summarize_insn (ASM_OPERANDS_INPUT (x, i), sum, 0);
8518 break;
8519
8520 case PARALLEL:
8521 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
8522 summarize_insn (XVECEXP (x, 0, i), sum, 0);
8523 break;
8524
8525 case SUBREG:
8526 summarize_insn (SUBREG_REG (x), sum, 0);
8527 break;
8528
8529 case REG:
8530 {
8531 int regno = REGNO (x);
8532 unsigned long mask = ((unsigned long) 1) << (regno % 32);
8533
8534 if (regno == 31 || regno == 63)
8535 break;
8536
8537 if (set)
8538 {
8539 if (regno < 32)
8540 sum->defd.i |= mask;
8541 else
8542 sum->defd.fp |= mask;
8543 }
8544 else
8545 {
8546 if (regno < 32)
8547 sum->used.i |= mask;
8548 else
8549 sum->used.fp |= mask;
8550 }
8551 }
8552 break;
8553
8554 case MEM:
8555 if (set)
8556 sum->defd.mem = 1;
8557 else
8558 sum->used.mem = 1;
8559
8560 /* Find the regs used in memory address computation: */
8561 summarize_insn (XEXP (x, 0), sum, 0);
8562 break;
8563
8564 case CONST_INT: case CONST_DOUBLE:
8565 case SYMBOL_REF: case LABEL_REF: case CONST:
8566 case SCRATCH: case ASM_INPUT:
8567 break;
8568
8569 /* Handle common unary and binary ops for efficiency. */
8570 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
8571 case MOD: case UDIV: case UMOD: case AND: case IOR:
8572 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
8573 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
8574 case NE: case EQ: case GE: case GT: case LE:
8575 case LT: case GEU: case GTU: case LEU: case LTU:
8576 summarize_insn (XEXP (x, 0), sum, 0);
8577 summarize_insn (XEXP (x, 1), sum, 0);
8578 break;
8579
8580 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
8581 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
8582 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
8583 case SQRT: case FFS:
8584 summarize_insn (XEXP (x, 0), sum, 0);
8585 break;
8586
8587 default:
8588 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
8589 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8590 switch (format_ptr[i])
8591 {
8592 case 'e':
8593 summarize_insn (XEXP (x, i), sum, 0);
8594 break;
8595
8596 case 'E':
8597 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8598 summarize_insn (XVECEXP (x, i, j), sum, 0);
8599 break;
8600
8601 case 'i':
8602 break;
8603
8604 default:
8605 gcc_unreachable ();
8606 }
8607 }
8608 }
8609
8610 /* Ensure a sufficient number of `trapb' insns are in the code when
8611 the user requests code with a trap precision of functions or
8612 instructions.
8613
8614 In naive mode, when the user requests a trap-precision of
8615 "instruction", a trapb is needed after every instruction that may
8616 generate a trap. This ensures that the code is resumption safe but
8617 it is also slow.
8618
8619 When optimizations are turned on, we delay issuing a trapb as long
8620 as possible. In this context, a trap shadow is the sequence of
8621 instructions that starts with a (potentially) trap generating
8622 instruction and extends to the next trapb or call_pal instruction
8623 (but GCC never generates call_pal by itself). We can delay (and
8624 therefore sometimes omit) a trapb subject to the following
8625 conditions:
8626
8627 (a) On entry to the trap shadow, if any Alpha register or memory
8628 location contains a value that is used as an operand value by some
8629 instruction in the trap shadow (live on entry), then no instruction
8630 in the trap shadow may modify the register or memory location.
8631
8632 (b) Within the trap shadow, the computation of the base register
8633 for a memory load or store instruction may not involve using the
8634 result of an instruction that might generate an UNPREDICTABLE
8635 result.
8636
8637 (c) Within the trap shadow, no register may be used more than once
8638 as a destination register. (This is to make life easier for the
8639 trap-handler.)
8640
8641 (d) The trap shadow may not include any branch instructions. */
8642
8643 static void
8644 alpha_handle_trap_shadows (void)
8645 {
8646 struct shadow_summary shadow;
8647 int trap_pending, exception_nesting;
8648 rtx i, n;
8649
8650 trap_pending = 0;
8651 exception_nesting = 0;
8652 shadow.used.i = 0;
8653 shadow.used.fp = 0;
8654 shadow.used.mem = 0;
8655 shadow.defd = shadow.used;
8656
8657 for (i = get_insns (); i ; i = NEXT_INSN (i))
8658 {
8659 if (GET_CODE (i) == NOTE)
8660 {
8661 switch (NOTE_LINE_NUMBER (i))
8662 {
8663 case NOTE_INSN_EH_REGION_BEG:
8664 exception_nesting++;
8665 if (trap_pending)
8666 goto close_shadow;
8667 break;
8668
8669 case NOTE_INSN_EH_REGION_END:
8670 exception_nesting--;
8671 if (trap_pending)
8672 goto close_shadow;
8673 break;
8674
8675 case NOTE_INSN_EPILOGUE_BEG:
8676 if (trap_pending && alpha_tp >= ALPHA_TP_FUNC)
8677 goto close_shadow;
8678 break;
8679 }
8680 }
8681 else if (trap_pending)
8682 {
8683 if (alpha_tp == ALPHA_TP_FUNC)
8684 {
8685 if (GET_CODE (i) == JUMP_INSN
8686 && GET_CODE (PATTERN (i)) == RETURN)
8687 goto close_shadow;
8688 }
8689 else if (alpha_tp == ALPHA_TP_INSN)
8690 {
8691 if (optimize > 0)
8692 {
8693 struct shadow_summary sum;
8694
8695 sum.used.i = 0;
8696 sum.used.fp = 0;
8697 sum.used.mem = 0;
8698 sum.defd = sum.used;
8699
8700 switch (GET_CODE (i))
8701 {
8702 case INSN:
8703 /* Annoyingly, get_attr_trap will die on these. */
8704 if (GET_CODE (PATTERN (i)) == USE
8705 || GET_CODE (PATTERN (i)) == CLOBBER)
8706 break;
8707
8708 summarize_insn (PATTERN (i), &sum, 0);
8709
8710 if ((sum.defd.i & shadow.defd.i)
8711 || (sum.defd.fp & shadow.defd.fp))
8712 {
8713 /* (c) would be violated */
8714 goto close_shadow;
8715 }
8716
8717 /* Combine shadow with summary of current insn: */
8718 shadow.used.i |= sum.used.i;
8719 shadow.used.fp |= sum.used.fp;
8720 shadow.used.mem |= sum.used.mem;
8721 shadow.defd.i |= sum.defd.i;
8722 shadow.defd.fp |= sum.defd.fp;
8723 shadow.defd.mem |= sum.defd.mem;
8724
8725 if ((sum.defd.i & shadow.used.i)
8726 || (sum.defd.fp & shadow.used.fp)
8727 || (sum.defd.mem & shadow.used.mem))
8728 {
8729 /* (a) would be violated (also takes care of (b)) */
8730 gcc_assert (get_attr_trap (i) != TRAP_YES
8731 || (!(sum.defd.i & sum.used.i)
8732 && !(sum.defd.fp & sum.used.fp)));
8733
8734 goto close_shadow;
8735 }
8736 break;
8737
8738 case JUMP_INSN:
8739 case CALL_INSN:
8740 case CODE_LABEL:
8741 goto close_shadow;
8742
8743 default:
8744 gcc_unreachable ();
8745 }
8746 }
8747 else
8748 {
8749 close_shadow:
8750 n = emit_insn_before (gen_trapb (), i);
8751 PUT_MODE (n, TImode);
8752 PUT_MODE (i, TImode);
8753 trap_pending = 0;
8754 shadow.used.i = 0;
8755 shadow.used.fp = 0;
8756 shadow.used.mem = 0;
8757 shadow.defd = shadow.used;
8758 }
8759 }
8760 }
8761
8762 if ((exception_nesting > 0 || alpha_tp >= ALPHA_TP_FUNC)
8763 && GET_CODE (i) == INSN
8764 && GET_CODE (PATTERN (i)) != USE
8765 && GET_CODE (PATTERN (i)) != CLOBBER
8766 && get_attr_trap (i) == TRAP_YES)
8767 {
8768 if (optimize && !trap_pending)
8769 summarize_insn (PATTERN (i), &shadow, 0);
8770 trap_pending = 1;
8771 }
8772 }
8773 }
8774 \f
8775 /* Alpha can only issue instruction groups simultaneously if they are
8776 suitably aligned. This is very processor-specific. */
8777 /* There are a number of entries in alphaev4_insn_pipe and alphaev5_insn_pipe
8778 that are marked "fake". These instructions do not exist on that target,
8779 but it is possible to see these insns with deranged combinations of
8780 command-line options, such as "-mtune=ev4 -mmax". Instead of aborting,
8781 choose a result at random. */
8782
8783 enum alphaev4_pipe {
8784 EV4_STOP = 0,
8785 EV4_IB0 = 1,
8786 EV4_IB1 = 2,
8787 EV4_IBX = 4
8788 };
8789
8790 enum alphaev5_pipe {
8791 EV5_STOP = 0,
8792 EV5_NONE = 1,
8793 EV5_E01 = 2,
8794 EV5_E0 = 4,
8795 EV5_E1 = 8,
8796 EV5_FAM = 16,
8797 EV5_FA = 32,
8798 EV5_FM = 64
8799 };
8800
8801 static enum alphaev4_pipe
8802 alphaev4_insn_pipe (rtx insn)
8803 {
8804 if (recog_memoized (insn) < 0)
8805 return EV4_STOP;
8806 if (get_attr_length (insn) != 4)
8807 return EV4_STOP;
8808
8809 switch (get_attr_type (insn))
8810 {
8811 case TYPE_ILD:
8812 case TYPE_LDSYM:
8813 case TYPE_FLD:
8814 case TYPE_LD_L:
8815 return EV4_IBX;
8816
8817 case TYPE_IADD:
8818 case TYPE_ILOG:
8819 case TYPE_ICMOV:
8820 case TYPE_ICMP:
8821 case TYPE_FST:
8822 case TYPE_SHIFT:
8823 case TYPE_IMUL:
8824 case TYPE_FBR:
8825 case TYPE_MVI: /* fake */
8826 return EV4_IB0;
8827
8828 case TYPE_IST:
8829 case TYPE_MISC:
8830 case TYPE_IBR:
8831 case TYPE_JSR:
8832 case TYPE_CALLPAL:
8833 case TYPE_FCPYS:
8834 case TYPE_FCMOV:
8835 case TYPE_FADD:
8836 case TYPE_FDIV:
8837 case TYPE_FMUL:
8838 case TYPE_ST_C:
8839 case TYPE_MB:
8840 case TYPE_FSQRT: /* fake */
8841 case TYPE_FTOI: /* fake */
8842 case TYPE_ITOF: /* fake */
8843 return EV4_IB1;
8844
8845 default:
8846 gcc_unreachable ();
8847 }
8848 }
8849
8850 static enum alphaev5_pipe
8851 alphaev5_insn_pipe (rtx insn)
8852 {
8853 if (recog_memoized (insn) < 0)
8854 return EV5_STOP;
8855 if (get_attr_length (insn) != 4)
8856 return EV5_STOP;
8857
8858 switch (get_attr_type (insn))
8859 {
8860 case TYPE_ILD:
8861 case TYPE_FLD:
8862 case TYPE_LDSYM:
8863 case TYPE_IADD:
8864 case TYPE_ILOG:
8865 case TYPE_ICMOV:
8866 case TYPE_ICMP:
8867 return EV5_E01;
8868
8869 case TYPE_IST:
8870 case TYPE_FST:
8871 case TYPE_SHIFT:
8872 case TYPE_IMUL:
8873 case TYPE_MISC:
8874 case TYPE_MVI:
8875 case TYPE_LD_L:
8876 case TYPE_ST_C:
8877 case TYPE_MB:
8878 case TYPE_FTOI: /* fake */
8879 case TYPE_ITOF: /* fake */
8880 return EV5_E0;
8881
8882 case TYPE_IBR:
8883 case TYPE_JSR:
8884 case TYPE_CALLPAL:
8885 return EV5_E1;
8886
8887 case TYPE_FCPYS:
8888 return EV5_FAM;
8889
8890 case TYPE_FBR:
8891 case TYPE_FCMOV:
8892 case TYPE_FADD:
8893 case TYPE_FDIV:
8894 case TYPE_FSQRT: /* fake */
8895 return EV5_FA;
8896
8897 case TYPE_FMUL:
8898 return EV5_FM;
8899
8900 default:
8901 gcc_unreachable ();
8902 }
8903 }
8904
8905 /* IN_USE is a mask of the slots currently filled within the insn group.
8906 The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
8907 the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
8908
8909 LEN is, of course, the length of the group in bytes. */
8910
8911 static rtx
8912 alphaev4_next_group (rtx insn, int *pin_use, int *plen)
8913 {
8914 int len, in_use;
8915
8916 len = in_use = 0;
8917
8918 if (! INSN_P (insn)
8919 || GET_CODE (PATTERN (insn)) == CLOBBER
8920 || GET_CODE (PATTERN (insn)) == USE)
8921 goto next_and_done;
8922
8923 while (1)
8924 {
8925 enum alphaev4_pipe pipe;
8926
8927 pipe = alphaev4_insn_pipe (insn);
8928 switch (pipe)
8929 {
8930 case EV4_STOP:
8931 /* Force complex instructions to start new groups. */
8932 if (in_use)
8933 goto done;
8934
8935 /* If this is a completely unrecognized insn, it's an asm.
8936 We don't know how long it is, so record length as -1 to
8937 signal a needed realignment. */
8938 if (recog_memoized (insn) < 0)
8939 len = -1;
8940 else
8941 len = get_attr_length (insn);
8942 goto next_and_done;
8943
8944 case EV4_IBX:
8945 if (in_use & EV4_IB0)
8946 {
8947 if (in_use & EV4_IB1)
8948 goto done;
8949 in_use |= EV4_IB1;
8950 }
8951 else
8952 in_use |= EV4_IB0 | EV4_IBX;
8953 break;
8954
8955 case EV4_IB0:
8956 if (in_use & EV4_IB0)
8957 {
8958 if (!(in_use & EV4_IBX) || (in_use & EV4_IB1))
8959 goto done;
8960 in_use |= EV4_IB1;
8961 }
8962 in_use |= EV4_IB0;
8963 break;
8964
8965 case EV4_IB1:
8966 if (in_use & EV4_IB1)
8967 goto done;
8968 in_use |= EV4_IB1;
8969 break;
8970
8971 default:
8972 gcc_unreachable ();
8973 }
8974 len += 4;
8975
8976 /* Haifa doesn't do well scheduling branches. */
8977 if (GET_CODE (insn) == JUMP_INSN)
8978 goto next_and_done;
8979
8980 next:
8981 insn = next_nonnote_insn (insn);
8982
8983 if (!insn || ! INSN_P (insn))
8984 goto done;
8985
8986 /* Let Haifa tell us where it thinks insn group boundaries are. */
8987 if (GET_MODE (insn) == TImode)
8988 goto done;
8989
8990 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
8991 goto next;
8992 }
8993
8994 next_and_done:
8995 insn = next_nonnote_insn (insn);
8996
8997 done:
8998 *plen = len;
8999 *pin_use = in_use;
9000 return insn;
9001 }
9002
9003 /* IN_USE is a mask of the slots currently filled within the insn group.
9004 The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
9005 the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
9006
9007 LEN is, of course, the length of the group in bytes. */
9008
9009 static rtx
9010 alphaev5_next_group (rtx insn, int *pin_use, int *plen)
9011 {
9012 int len, in_use;
9013
9014 len = in_use = 0;
9015
9016 if (! INSN_P (insn)
9017 || GET_CODE (PATTERN (insn)) == CLOBBER
9018 || GET_CODE (PATTERN (insn)) == USE)
9019 goto next_and_done;
9020
9021 while (1)
9022 {
9023 enum alphaev5_pipe pipe;
9024
9025 pipe = alphaev5_insn_pipe (insn);
9026 switch (pipe)
9027 {
9028 case EV5_STOP:
9029 /* Force complex instructions to start new groups. */
9030 if (in_use)
9031 goto done;
9032
9033 /* If this is a completely unrecognized insn, it's an asm.
9034 We don't know how long it is, so record length as -1 to
9035 signal a needed realignment. */
9036 if (recog_memoized (insn) < 0)
9037 len = -1;
9038 else
9039 len = get_attr_length (insn);
9040 goto next_and_done;
9041
9042 /* ??? Most of the places below, we would like to assert never
9043 happen, as it would indicate an error either in Haifa, or
9044 in the scheduling description. Unfortunately, Haifa never
9045 schedules the last instruction of the BB, so we don't have
9046 an accurate TI bit to go off. */
9047 case EV5_E01:
9048 if (in_use & EV5_E0)
9049 {
9050 if (in_use & EV5_E1)
9051 goto done;
9052 in_use |= EV5_E1;
9053 }
9054 else
9055 in_use |= EV5_E0 | EV5_E01;
9056 break;
9057
9058 case EV5_E0:
9059 if (in_use & EV5_E0)
9060 {
9061 if (!(in_use & EV5_E01) || (in_use & EV5_E1))
9062 goto done;
9063 in_use |= EV5_E1;
9064 }
9065 in_use |= EV5_E0;
9066 break;
9067
9068 case EV5_E1:
9069 if (in_use & EV5_E1)
9070 goto done;
9071 in_use |= EV5_E1;
9072 break;
9073
9074 case EV5_FAM:
9075 if (in_use & EV5_FA)
9076 {
9077 if (in_use & EV5_FM)
9078 goto done;
9079 in_use |= EV5_FM;
9080 }
9081 else
9082 in_use |= EV5_FA | EV5_FAM;
9083 break;
9084
9085 case EV5_FA:
9086 if (in_use & EV5_FA)
9087 goto done;
9088 in_use |= EV5_FA;
9089 break;
9090
9091 case EV5_FM:
9092 if (in_use & EV5_FM)
9093 goto done;
9094 in_use |= EV5_FM;
9095 break;
9096
9097 case EV5_NONE:
9098 break;
9099
9100 default:
9101 gcc_unreachable ();
9102 }
9103 len += 4;
9104
9105 /* Haifa doesn't do well scheduling branches. */
9106 /* ??? If this is predicted not-taken, slotting continues, except
9107 that no more IBR, FBR, or JSR insns may be slotted. */
9108 if (GET_CODE (insn) == JUMP_INSN)
9109 goto next_and_done;
9110
9111 next:
9112 insn = next_nonnote_insn (insn);
9113
9114 if (!insn || ! INSN_P (insn))
9115 goto done;
9116
9117 /* Let Haifa tell us where it thinks insn group boundaries are. */
9118 if (GET_MODE (insn) == TImode)
9119 goto done;
9120
9121 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
9122 goto next;
9123 }
9124
9125 next_and_done:
9126 insn = next_nonnote_insn (insn);
9127
9128 done:
9129 *plen = len;
9130 *pin_use = in_use;
9131 return insn;
9132 }
9133
9134 static rtx
9135 alphaev4_next_nop (int *pin_use)
9136 {
9137 int in_use = *pin_use;
9138 rtx nop;
9139
9140 if (!(in_use & EV4_IB0))
9141 {
9142 in_use |= EV4_IB0;
9143 nop = gen_nop ();
9144 }
9145 else if ((in_use & (EV4_IBX|EV4_IB1)) == EV4_IBX)
9146 {
9147 in_use |= EV4_IB1;
9148 nop = gen_nop ();
9149 }
9150 else if (TARGET_FP && !(in_use & EV4_IB1))
9151 {
9152 in_use |= EV4_IB1;
9153 nop = gen_fnop ();
9154 }
9155 else
9156 nop = gen_unop ();
9157
9158 *pin_use = in_use;
9159 return nop;
9160 }
9161
9162 static rtx
9163 alphaev5_next_nop (int *pin_use)
9164 {
9165 int in_use = *pin_use;
9166 rtx nop;
9167
9168 if (!(in_use & EV5_E1))
9169 {
9170 in_use |= EV5_E1;
9171 nop = gen_nop ();
9172 }
9173 else if (TARGET_FP && !(in_use & EV5_FA))
9174 {
9175 in_use |= EV5_FA;
9176 nop = gen_fnop ();
9177 }
9178 else if (TARGET_FP && !(in_use & EV5_FM))
9179 {
9180 in_use |= EV5_FM;
9181 nop = gen_fnop ();
9182 }
9183 else
9184 nop = gen_unop ();
9185
9186 *pin_use = in_use;
9187 return nop;
9188 }
9189
9190 /* The instruction group alignment main loop. */
9191
9192 static void
9193 alpha_align_insns (unsigned int max_align,
9194 rtx (*next_group) (rtx, int *, int *),
9195 rtx (*next_nop) (int *))
9196 {
9197 /* ALIGN is the known alignment for the insn group. */
9198 unsigned int align;
9199 /* OFS is the offset of the current insn in the insn group. */
9200 int ofs;
9201 int prev_in_use, in_use, len, ldgp;
9202 rtx i, next;
9203
9204 /* Let shorten branches care for assigning alignments to code labels. */
9205 shorten_branches (get_insns ());
9206
9207 if (align_functions < 4)
9208 align = 4;
9209 else if ((unsigned int) align_functions < max_align)
9210 align = align_functions;
9211 else
9212 align = max_align;
9213
9214 ofs = prev_in_use = 0;
9215 i = get_insns ();
9216 if (GET_CODE (i) == NOTE)
9217 i = next_nonnote_insn (i);
9218
9219 ldgp = alpha_function_needs_gp ? 8 : 0;
9220
9221 while (i)
9222 {
9223 next = (*next_group) (i, &in_use, &len);
9224
9225 /* When we see a label, resync alignment etc. */
9226 if (GET_CODE (i) == CODE_LABEL)
9227 {
9228 unsigned int new_align = 1 << label_to_alignment (i);
9229
9230 if (new_align >= align)
9231 {
9232 align = new_align < max_align ? new_align : max_align;
9233 ofs = 0;
9234 }
9235
9236 else if (ofs & (new_align-1))
9237 ofs = (ofs | (new_align-1)) + 1;
9238 gcc_assert (!len);
9239 }
9240
9241 /* Handle complex instructions special. */
9242 else if (in_use == 0)
9243 {
9244 /* Asms will have length < 0. This is a signal that we have
9245 lost alignment knowledge. Assume, however, that the asm
9246 will not mis-align instructions. */
9247 if (len < 0)
9248 {
9249 ofs = 0;
9250 align = 4;
9251 len = 0;
9252 }
9253 }
9254
9255 /* If the known alignment is smaller than the recognized insn group,
9256 realign the output. */
9257 else if ((int) align < len)
9258 {
9259 unsigned int new_log_align = len > 8 ? 4 : 3;
9260 rtx prev, where;
9261
9262 where = prev = prev_nonnote_insn (i);
9263 if (!where || GET_CODE (where) != CODE_LABEL)
9264 where = i;
9265
9266 /* Can't realign between a call and its gp reload. */
9267 if (! (TARGET_EXPLICIT_RELOCS
9268 && prev && GET_CODE (prev) == CALL_INSN))
9269 {
9270 emit_insn_before (gen_realign (GEN_INT (new_log_align)), where);
9271 align = 1 << new_log_align;
9272 ofs = 0;
9273 }
9274 }
9275
9276 /* We may not insert padding inside the initial ldgp sequence. */
9277 else if (ldgp > 0)
9278 ldgp -= len;
9279
9280 /* If the group won't fit in the same INT16 as the previous,
9281 we need to add padding to keep the group together. Rather
9282 than simply leaving the insn filling to the assembler, we
9283 can make use of the knowledge of what sorts of instructions
9284 were issued in the previous group to make sure that all of
9285 the added nops are really free. */
9286 else if (ofs + len > (int) align)
9287 {
9288 int nop_count = (align - ofs) / 4;
9289 rtx where;
9290
9291 /* Insert nops before labels, branches, and calls to truly merge
9292 the execution of the nops with the previous instruction group. */
9293 where = prev_nonnote_insn (i);
9294 if (where)
9295 {
9296 if (GET_CODE (where) == CODE_LABEL)
9297 {
9298 rtx where2 = prev_nonnote_insn (where);
9299 if (where2 && GET_CODE (where2) == JUMP_INSN)
9300 where = where2;
9301 }
9302 else if (GET_CODE (where) == INSN)
9303 where = i;
9304 }
9305 else
9306 where = i;
9307
9308 do
9309 emit_insn_before ((*next_nop)(&prev_in_use), where);
9310 while (--nop_count);
9311 ofs = 0;
9312 }
9313
9314 ofs = (ofs + len) & (align - 1);
9315 prev_in_use = in_use;
9316 i = next;
9317 }
9318 }
9319 \f
9320 /* Machine dependent reorg pass. */
9321
9322 static void
9323 alpha_reorg (void)
9324 {
9325 if (alpha_tp != ALPHA_TP_PROG || flag_exceptions)
9326 alpha_handle_trap_shadows ();
9327
9328 /* Due to the number of extra trapb insns, don't bother fixing up
9329 alignment when trap precision is instruction. Moreover, we can
9330 only do our job when sched2 is run. */
9331 if (optimize && !optimize_size
9332 && alpha_tp != ALPHA_TP_INSN
9333 && flag_schedule_insns_after_reload)
9334 {
9335 if (alpha_tune == PROCESSOR_EV4)
9336 alpha_align_insns (8, alphaev4_next_group, alphaev4_next_nop);
9337 else if (alpha_tune == PROCESSOR_EV5)
9338 alpha_align_insns (16, alphaev5_next_group, alphaev5_next_nop);
9339 }
9340 }
9341 \f
9342 #if !TARGET_ABI_UNICOSMK
9343
9344 #ifdef HAVE_STAMP_H
9345 #include <stamp.h>
9346 #endif
9347
9348 static void
9349 alpha_file_start (void)
9350 {
9351 #ifdef OBJECT_FORMAT_ELF
9352 /* If emitting dwarf2 debug information, we cannot generate a .file
9353 directive to start the file, as it will conflict with dwarf2out
9354 file numbers. So it's only useful when emitting mdebug output. */
9355 targetm.file_start_file_directive = (write_symbols == DBX_DEBUG);
9356 #endif
9357
9358 default_file_start ();
9359 #ifdef MS_STAMP
9360 fprintf (asm_out_file, "\t.verstamp %d %d\n", MS_STAMP, LS_STAMP);
9361 #endif
9362
9363 fputs ("\t.set noreorder\n", asm_out_file);
9364 fputs ("\t.set volatile\n", asm_out_file);
9365 if (!TARGET_ABI_OPEN_VMS)
9366 fputs ("\t.set noat\n", asm_out_file);
9367 if (TARGET_EXPLICIT_RELOCS)
9368 fputs ("\t.set nomacro\n", asm_out_file);
9369 if (TARGET_SUPPORT_ARCH | TARGET_BWX | TARGET_MAX | TARGET_FIX | TARGET_CIX)
9370 {
9371 const char *arch;
9372
9373 if (alpha_cpu == PROCESSOR_EV6 || TARGET_FIX || TARGET_CIX)
9374 arch = "ev6";
9375 else if (TARGET_MAX)
9376 arch = "pca56";
9377 else if (TARGET_BWX)
9378 arch = "ev56";
9379 else if (alpha_cpu == PROCESSOR_EV5)
9380 arch = "ev5";
9381 else
9382 arch = "ev4";
9383
9384 fprintf (asm_out_file, "\t.arch %s\n", arch);
9385 }
9386 }
9387 #endif
9388
9389 #ifdef OBJECT_FORMAT_ELF
9390
9391 /* Return a section for X. The only special thing we do here is to
9392 honor small data. */
9393
9394 static section *
9395 alpha_elf_select_rtx_section (enum machine_mode mode, rtx x,
9396 unsigned HOST_WIDE_INT align)
9397 {
9398 if (TARGET_SMALL_DATA && GET_MODE_SIZE (mode) <= g_switch_value)
9399 /* ??? Consider using mergeable sdata sections. */
9400 return sdata_section;
9401 else
9402 return default_elf_select_rtx_section (mode, x, align);
9403 }
9404
9405 #endif /* OBJECT_FORMAT_ELF */
9406 \f
9407 /* Structure to collect function names for final output in link section. */
9408 /* Note that items marked with GTY can't be ifdef'ed out. */
9409
9410 enum links_kind {KIND_UNUSED, KIND_LOCAL, KIND_EXTERN};
9411 enum reloc_kind {KIND_LINKAGE, KIND_CODEADDR};
9412
9413 struct alpha_links GTY(())
9414 {
9415 int num;
9416 rtx linkage;
9417 enum links_kind lkind;
9418 enum reloc_kind rkind;
9419 };
9420
9421 struct alpha_funcs GTY(())
9422 {
9423 int num;
9424 splay_tree GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9425 links;
9426 };
9427
9428 static GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9429 splay_tree alpha_links_tree;
9430 static GTY ((param1_is (tree), param2_is (struct alpha_funcs *)))
9431 splay_tree alpha_funcs_tree;
9432
9433 static GTY(()) int alpha_funcs_num;
9434
9435 #if TARGET_ABI_OPEN_VMS
9436
9437 /* Return the VMS argument type corresponding to MODE. */
9438
9439 enum avms_arg_type
9440 alpha_arg_type (enum machine_mode mode)
9441 {
9442 switch (mode)
9443 {
9444 case SFmode:
9445 return TARGET_FLOAT_VAX ? FF : FS;
9446 case DFmode:
9447 return TARGET_FLOAT_VAX ? FD : FT;
9448 default:
9449 return I64;
9450 }
9451 }
9452
9453 /* Return an rtx for an integer representing the VMS Argument Information
9454 register value. */
9455
9456 rtx
9457 alpha_arg_info_reg_val (CUMULATIVE_ARGS cum)
9458 {
9459 unsigned HOST_WIDE_INT regval = cum.num_args;
9460 int i;
9461
9462 for (i = 0; i < 6; i++)
9463 regval |= ((int) cum.atypes[i]) << (i * 3 + 8);
9464
9465 return GEN_INT (regval);
9466 }
9467 \f
9468 /* Make (or fake) .linkage entry for function call.
9469
9470 IS_LOCAL is 0 if name is used in call, 1 if name is used in definition.
9471
9472 Return an SYMBOL_REF rtx for the linkage. */
9473
9474 rtx
9475 alpha_need_linkage (const char *name, int is_local)
9476 {
9477 splay_tree_node node;
9478 struct alpha_links *al;
9479
9480 if (name[0] == '*')
9481 name++;
9482
9483 if (is_local)
9484 {
9485 struct alpha_funcs *cfaf;
9486
9487 if (!alpha_funcs_tree)
9488 alpha_funcs_tree = splay_tree_new_ggc ((splay_tree_compare_fn)
9489 splay_tree_compare_pointers);
9490
9491 cfaf = (struct alpha_funcs *) ggc_alloc (sizeof (struct alpha_funcs));
9492
9493 cfaf->links = 0;
9494 cfaf->num = ++alpha_funcs_num;
9495
9496 splay_tree_insert (alpha_funcs_tree,
9497 (splay_tree_key) current_function_decl,
9498 (splay_tree_value) cfaf);
9499 }
9500
9501 if (alpha_links_tree)
9502 {
9503 /* Is this name already defined? */
9504
9505 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
9506 if (node)
9507 {
9508 al = (struct alpha_links *) node->value;
9509 if (is_local)
9510 {
9511 /* Defined here but external assumed. */
9512 if (al->lkind == KIND_EXTERN)
9513 al->lkind = KIND_LOCAL;
9514 }
9515 else
9516 {
9517 /* Used here but unused assumed. */
9518 if (al->lkind == KIND_UNUSED)
9519 al->lkind = KIND_LOCAL;
9520 }
9521 return al->linkage;
9522 }
9523 }
9524 else
9525 alpha_links_tree = splay_tree_new_ggc ((splay_tree_compare_fn) strcmp);
9526
9527 al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
9528 name = ggc_strdup (name);
9529
9530 /* Assume external if no definition. */
9531 al->lkind = (is_local ? KIND_UNUSED : KIND_EXTERN);
9532
9533 /* Ensure we have an IDENTIFIER so assemble_name can mark it used. */
9534 get_identifier (name);
9535
9536 /* Construct a SYMBOL_REF for us to call. */
9537 {
9538 size_t name_len = strlen (name);
9539 char *linksym = alloca (name_len + 6);
9540 linksym[0] = '$';
9541 memcpy (linksym + 1, name, name_len);
9542 memcpy (linksym + 1 + name_len, "..lk", 5);
9543 al->linkage = gen_rtx_SYMBOL_REF (Pmode,
9544 ggc_alloc_string (linksym, name_len + 5));
9545 }
9546
9547 splay_tree_insert (alpha_links_tree, (splay_tree_key) name,
9548 (splay_tree_value) al);
9549
9550 return al->linkage;
9551 }
9552
9553 rtx
9554 alpha_use_linkage (rtx linkage, tree cfundecl, int lflag, int rflag)
9555 {
9556 splay_tree_node cfunnode;
9557 struct alpha_funcs *cfaf;
9558 struct alpha_links *al;
9559 const char *name = XSTR (linkage, 0);
9560
9561 cfaf = (struct alpha_funcs *) 0;
9562 al = (struct alpha_links *) 0;
9563
9564 cfunnode = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) cfundecl);
9565 cfaf = (struct alpha_funcs *) cfunnode->value;
9566
9567 if (cfaf->links)
9568 {
9569 splay_tree_node lnode;
9570
9571 /* Is this name already defined? */
9572
9573 lnode = splay_tree_lookup (cfaf->links, (splay_tree_key) name);
9574 if (lnode)
9575 al = (struct alpha_links *) lnode->value;
9576 }
9577 else
9578 cfaf->links = splay_tree_new_ggc ((splay_tree_compare_fn) strcmp);
9579
9580 if (!al)
9581 {
9582 size_t name_len;
9583 size_t buflen;
9584 char buf [512];
9585 char *linksym;
9586 splay_tree_node node = 0;
9587 struct alpha_links *anl;
9588
9589 if (name[0] == '*')
9590 name++;
9591
9592 name_len = strlen (name);
9593
9594 al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
9595 al->num = cfaf->num;
9596
9597 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
9598 if (node)
9599 {
9600 anl = (struct alpha_links *) node->value;
9601 al->lkind = anl->lkind;
9602 }
9603
9604 sprintf (buf, "$%d..%s..lk", cfaf->num, name);
9605 buflen = strlen (buf);
9606 linksym = alloca (buflen + 1);
9607 memcpy (linksym, buf, buflen + 1);
9608
9609 al->linkage = gen_rtx_SYMBOL_REF
9610 (Pmode, ggc_alloc_string (linksym, buflen + 1));
9611
9612 splay_tree_insert (cfaf->links, (splay_tree_key) name,
9613 (splay_tree_value) al);
9614 }
9615
9616 if (rflag)
9617 al->rkind = KIND_CODEADDR;
9618 else
9619 al->rkind = KIND_LINKAGE;
9620
9621 if (lflag)
9622 return gen_rtx_MEM (Pmode, plus_constant (al->linkage, 8));
9623 else
9624 return al->linkage;
9625 }
9626
9627 static int
9628 alpha_write_one_linkage (splay_tree_node node, void *data)
9629 {
9630 const char *const name = (const char *) node->key;
9631 struct alpha_links *link = (struct alpha_links *) node->value;
9632 FILE *stream = (FILE *) data;
9633
9634 fprintf (stream, "$%d..%s..lk:\n", link->num, name);
9635 if (link->rkind == KIND_CODEADDR)
9636 {
9637 if (link->lkind == KIND_LOCAL)
9638 {
9639 /* Local and used */
9640 fprintf (stream, "\t.quad %s..en\n", name);
9641 }
9642 else
9643 {
9644 /* External and used, request code address. */
9645 fprintf (stream, "\t.code_address %s\n", name);
9646 }
9647 }
9648 else
9649 {
9650 if (link->lkind == KIND_LOCAL)
9651 {
9652 /* Local and used, build linkage pair. */
9653 fprintf (stream, "\t.quad %s..en\n", name);
9654 fprintf (stream, "\t.quad %s\n", name);
9655 }
9656 else
9657 {
9658 /* External and used, request linkage pair. */
9659 fprintf (stream, "\t.linkage %s\n", name);
9660 }
9661 }
9662
9663 return 0;
9664 }
9665
9666 static void
9667 alpha_write_linkage (FILE *stream, const char *funname, tree fundecl)
9668 {
9669 splay_tree_node node;
9670 struct alpha_funcs *func;
9671
9672 fprintf (stream, "\t.link\n");
9673 fprintf (stream, "\t.align 3\n");
9674 in_section = NULL;
9675
9676 node = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) fundecl);
9677 func = (struct alpha_funcs *) node->value;
9678
9679 fputs ("\t.name ", stream);
9680 assemble_name (stream, funname);
9681 fputs ("..na\n", stream);
9682 ASM_OUTPUT_LABEL (stream, funname);
9683 fprintf (stream, "\t.pdesc ");
9684 assemble_name (stream, funname);
9685 fprintf (stream, "..en,%s\n",
9686 alpha_procedure_type == PT_STACK ? "stack"
9687 : alpha_procedure_type == PT_REGISTER ? "reg" : "null");
9688
9689 if (func->links)
9690 {
9691 splay_tree_foreach (func->links, alpha_write_one_linkage, stream);
9692 /* splay_tree_delete (func->links); */
9693 }
9694 }
9695
9696 /* Given a decl, a section name, and whether the decl initializer
9697 has relocs, choose attributes for the section. */
9698
9699 #define SECTION_VMS_OVERLAY SECTION_FORGET
9700 #define SECTION_VMS_GLOBAL SECTION_MACH_DEP
9701 #define SECTION_VMS_INITIALIZE (SECTION_VMS_GLOBAL << 1)
9702
9703 static unsigned int
9704 vms_section_type_flags (tree decl, const char *name, int reloc)
9705 {
9706 unsigned int flags = default_section_type_flags (decl, name, reloc);
9707
9708 if (decl && DECL_ATTRIBUTES (decl)
9709 && lookup_attribute ("overlaid", DECL_ATTRIBUTES (decl)))
9710 flags |= SECTION_VMS_OVERLAY;
9711 if (decl && DECL_ATTRIBUTES (decl)
9712 && lookup_attribute ("global", DECL_ATTRIBUTES (decl)))
9713 flags |= SECTION_VMS_GLOBAL;
9714 if (decl && DECL_ATTRIBUTES (decl)
9715 && lookup_attribute ("initialize", DECL_ATTRIBUTES (decl)))
9716 flags |= SECTION_VMS_INITIALIZE;
9717
9718 return flags;
9719 }
9720
9721 /* Switch to an arbitrary section NAME with attributes as specified
9722 by FLAGS. ALIGN specifies any known alignment requirements for
9723 the section; 0 if the default should be used. */
9724
9725 static void
9726 vms_asm_named_section (const char *name, unsigned int flags,
9727 tree decl ATTRIBUTE_UNUSED)
9728 {
9729 fputc ('\n', asm_out_file);
9730 fprintf (asm_out_file, ".section\t%s", name);
9731
9732 if (flags & SECTION_VMS_OVERLAY)
9733 fprintf (asm_out_file, ",OVR");
9734 if (flags & SECTION_VMS_GLOBAL)
9735 fprintf (asm_out_file, ",GBL");
9736 if (flags & SECTION_VMS_INITIALIZE)
9737 fprintf (asm_out_file, ",NOMOD");
9738 if (flags & SECTION_DEBUG)
9739 fprintf (asm_out_file, ",NOWRT");
9740
9741 fputc ('\n', asm_out_file);
9742 }
9743
9744 /* Record an element in the table of global constructors. SYMBOL is
9745 a SYMBOL_REF of the function to be called; PRIORITY is a number
9746 between 0 and MAX_INIT_PRIORITY.
9747
9748 Differs from default_ctors_section_asm_out_constructor in that the
9749 width of the .ctors entry is always 64 bits, rather than the 32 bits
9750 used by a normal pointer. */
9751
9752 static void
9753 vms_asm_out_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9754 {
9755 switch_to_section (ctors_section);
9756 assemble_align (BITS_PER_WORD);
9757 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9758 }
9759
9760 static void
9761 vms_asm_out_destructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9762 {
9763 switch_to_section (dtors_section);
9764 assemble_align (BITS_PER_WORD);
9765 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9766 }
9767 #else
9768
9769 rtx
9770 alpha_need_linkage (const char *name ATTRIBUTE_UNUSED,
9771 int is_local ATTRIBUTE_UNUSED)
9772 {
9773 return NULL_RTX;
9774 }
9775
9776 rtx
9777 alpha_use_linkage (rtx linkage ATTRIBUTE_UNUSED,
9778 tree cfundecl ATTRIBUTE_UNUSED,
9779 int lflag ATTRIBUTE_UNUSED,
9780 int rflag ATTRIBUTE_UNUSED)
9781 {
9782 return NULL_RTX;
9783 }
9784
9785 #endif /* TARGET_ABI_OPEN_VMS */
9786 \f
9787 #if TARGET_ABI_UNICOSMK
9788
9789 /* This evaluates to true if we do not know how to pass TYPE solely in
9790 registers. This is the case for all arguments that do not fit in two
9791 registers. */
9792
9793 static bool
9794 unicosmk_must_pass_in_stack (enum machine_mode mode, tree type)
9795 {
9796 if (type == NULL)
9797 return false;
9798
9799 if (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
9800 return true;
9801 if (TREE_ADDRESSABLE (type))
9802 return true;
9803
9804 return ALPHA_ARG_SIZE (mode, type, 0) > 2;
9805 }
9806
9807 /* Define the offset between two registers, one to be eliminated, and the
9808 other its replacement, at the start of a routine. */
9809
9810 int
9811 unicosmk_initial_elimination_offset (int from, int to)
9812 {
9813 int fixed_size;
9814
9815 fixed_size = alpha_sa_size();
9816 if (fixed_size != 0)
9817 fixed_size += 48;
9818
9819 if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
9820 return -fixed_size;
9821 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
9822 return 0;
9823 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
9824 return (ALPHA_ROUND (current_function_outgoing_args_size)
9825 + ALPHA_ROUND (get_frame_size()));
9826 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
9827 return (ALPHA_ROUND (fixed_size)
9828 + ALPHA_ROUND (get_frame_size()
9829 + current_function_outgoing_args_size));
9830 else
9831 gcc_unreachable ();
9832 }
9833
9834 /* Output the module name for .ident and .end directives. We have to strip
9835 directories and add make sure that the module name starts with a letter
9836 or '$'. */
9837
9838 static void
9839 unicosmk_output_module_name (FILE *file)
9840 {
9841 const char *name = lbasename (main_input_filename);
9842 unsigned len = strlen (name);
9843 char *clean_name = alloca (len + 2);
9844 char *ptr = clean_name;
9845
9846 /* CAM only accepts module names that start with a letter or '$'. We
9847 prefix the module name with a '$' if necessary. */
9848
9849 if (!ISALPHA (*name))
9850 *ptr++ = '$';
9851 memcpy (ptr, name, len + 1);
9852 clean_symbol_name (clean_name);
9853 fputs (clean_name, file);
9854 }
9855
9856 /* Output the definition of a common variable. */
9857
9858 void
9859 unicosmk_output_common (FILE *file, const char *name, int size, int align)
9860 {
9861 tree name_tree;
9862 printf ("T3E__: common %s\n", name);
9863
9864 in_section = NULL;
9865 fputs("\t.endp\n\n\t.psect ", file);
9866 assemble_name(file, name);
9867 fprintf(file, ",%d,common\n", floor_log2 (align / BITS_PER_UNIT));
9868 fprintf(file, "\t.byte\t0:%d\n", size);
9869
9870 /* Mark the symbol as defined in this module. */
9871 name_tree = get_identifier (name);
9872 TREE_ASM_WRITTEN (name_tree) = 1;
9873 }
9874
9875 #define SECTION_PUBLIC SECTION_MACH_DEP
9876 #define SECTION_MAIN (SECTION_PUBLIC << 1)
9877 static int current_section_align;
9878
9879 /* A get_unnamed_section callback for switching to the text section. */
9880
9881 static void
9882 unicosmk_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9883 {
9884 static int count = 0;
9885 fprintf (asm_out_file, "\t.endp\n\n\t.psect\tgcc@text___%d,code\n", count++);
9886 }
9887
9888 /* A get_unnamed_section callback for switching to the data section. */
9889
9890 static void
9891 unicosmk_output_data_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9892 {
9893 static int count = 1;
9894 fprintf (asm_out_file, "\t.endp\n\n\t.psect\tgcc@data___%d,data\n", count++);
9895 }
9896
9897 /* Implement TARGET_ASM_INIT_SECTIONS.
9898
9899 The Cray assembler is really weird with respect to sections. It has only
9900 named sections and you can't reopen a section once it has been closed.
9901 This means that we have to generate unique names whenever we want to
9902 reenter the text or the data section. */
9903
9904 static void
9905 unicosmk_init_sections (void)
9906 {
9907 text_section = get_unnamed_section (SECTION_CODE,
9908 unicosmk_output_text_section_asm_op,
9909 NULL);
9910 data_section = get_unnamed_section (SECTION_WRITE,
9911 unicosmk_output_data_section_asm_op,
9912 NULL);
9913 readonly_data_section = data_section;
9914 }
9915
9916 static unsigned int
9917 unicosmk_section_type_flags (tree decl, const char *name,
9918 int reloc ATTRIBUTE_UNUSED)
9919 {
9920 unsigned int flags = default_section_type_flags (decl, name, reloc);
9921
9922 if (!decl)
9923 return flags;
9924
9925 if (TREE_CODE (decl) == FUNCTION_DECL)
9926 {
9927 current_section_align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
9928 if (align_functions_log > current_section_align)
9929 current_section_align = align_functions_log;
9930
9931 if (! strcmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)), "main"))
9932 flags |= SECTION_MAIN;
9933 }
9934 else
9935 current_section_align = floor_log2 (DECL_ALIGN (decl) / BITS_PER_UNIT);
9936
9937 if (TREE_PUBLIC (decl))
9938 flags |= SECTION_PUBLIC;
9939
9940 return flags;
9941 }
9942
9943 /* Generate a section name for decl and associate it with the
9944 declaration. */
9945
9946 static void
9947 unicosmk_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
9948 {
9949 const char *name;
9950 int len;
9951
9952 gcc_assert (decl);
9953
9954 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
9955 name = default_strip_name_encoding (name);
9956 len = strlen (name);
9957
9958 if (TREE_CODE (decl) == FUNCTION_DECL)
9959 {
9960 char *string;
9961
9962 /* It is essential that we prefix the section name here because
9963 otherwise the section names generated for constructors and
9964 destructors confuse collect2. */
9965
9966 string = alloca (len + 6);
9967 sprintf (string, "code@%s", name);
9968 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
9969 }
9970 else if (TREE_PUBLIC (decl))
9971 DECL_SECTION_NAME (decl) = build_string (len, name);
9972 else
9973 {
9974 char *string;
9975
9976 string = alloca (len + 6);
9977 sprintf (string, "data@%s", name);
9978 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
9979 }
9980 }
9981
9982 /* Switch to an arbitrary section NAME with attributes as specified
9983 by FLAGS. ALIGN specifies any known alignment requirements for
9984 the section; 0 if the default should be used. */
9985
9986 static void
9987 unicosmk_asm_named_section (const char *name, unsigned int flags,
9988 tree decl ATTRIBUTE_UNUSED)
9989 {
9990 const char *kind;
9991
9992 /* Close the previous section. */
9993
9994 fputs ("\t.endp\n\n", asm_out_file);
9995
9996 /* Find out what kind of section we are opening. */
9997
9998 if (flags & SECTION_MAIN)
9999 fputs ("\t.start\tmain\n", asm_out_file);
10000
10001 if (flags & SECTION_CODE)
10002 kind = "code";
10003 else if (flags & SECTION_PUBLIC)
10004 kind = "common";
10005 else
10006 kind = "data";
10007
10008 if (current_section_align != 0)
10009 fprintf (asm_out_file, "\t.psect\t%s,%d,%s\n", name,
10010 current_section_align, kind);
10011 else
10012 fprintf (asm_out_file, "\t.psect\t%s,%s\n", name, kind);
10013 }
10014
10015 static void
10016 unicosmk_insert_attributes (tree decl, tree *attr_ptr ATTRIBUTE_UNUSED)
10017 {
10018 if (DECL_P (decl)
10019 && (TREE_PUBLIC (decl) || TREE_CODE (decl) == FUNCTION_DECL))
10020 unicosmk_unique_section (decl, 0);
10021 }
10022
10023 /* Output an alignment directive. We have to use the macro 'gcc@code@align'
10024 in code sections because .align fill unused space with zeroes. */
10025
10026 void
10027 unicosmk_output_align (FILE *file, int align)
10028 {
10029 if (inside_function)
10030 fprintf (file, "\tgcc@code@align\t%d\n", align);
10031 else
10032 fprintf (file, "\t.align\t%d\n", align);
10033 }
10034
10035 /* Add a case vector to the current function's list of deferred case
10036 vectors. Case vectors have to be put into a separate section because CAM
10037 does not allow data definitions in code sections. */
10038
10039 void
10040 unicosmk_defer_case_vector (rtx lab, rtx vec)
10041 {
10042 struct machine_function *machine = cfun->machine;
10043
10044 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
10045 machine->addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec,
10046 machine->addr_list);
10047 }
10048
10049 /* Output a case vector. */
10050
10051 static void
10052 unicosmk_output_addr_vec (FILE *file, rtx vec)
10053 {
10054 rtx lab = XEXP (vec, 0);
10055 rtx body = XEXP (vec, 1);
10056 int vlen = XVECLEN (body, 0);
10057 int idx;
10058
10059 (*targetm.asm_out.internal_label) (file, "L", CODE_LABEL_NUMBER (lab));
10060
10061 for (idx = 0; idx < vlen; idx++)
10062 {
10063 ASM_OUTPUT_ADDR_VEC_ELT
10064 (file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
10065 }
10066 }
10067
10068 /* Output current function's deferred case vectors. */
10069
10070 static void
10071 unicosmk_output_deferred_case_vectors (FILE *file)
10072 {
10073 struct machine_function *machine = cfun->machine;
10074 rtx t;
10075
10076 if (machine->addr_list == NULL_RTX)
10077 return;
10078
10079 switch_to_section (data_section);
10080 for (t = machine->addr_list; t; t = XEXP (t, 1))
10081 unicosmk_output_addr_vec (file, XEXP (t, 0));
10082 }
10083
10084 /* Generate the name of the SSIB section for the current function. */
10085
10086 #define SSIB_PREFIX "__SSIB_"
10087 #define SSIB_PREFIX_LEN 7
10088
10089 static const char *
10090 unicosmk_ssib_name (void)
10091 {
10092 /* This is ok since CAM won't be able to deal with names longer than that
10093 anyway. */
10094
10095 static char name[256];
10096
10097 rtx x;
10098 const char *fnname;
10099 int len;
10100
10101 x = DECL_RTL (cfun->decl);
10102 gcc_assert (GET_CODE (x) == MEM);
10103 x = XEXP (x, 0);
10104 gcc_assert (GET_CODE (x) == SYMBOL_REF);
10105 fnname = XSTR (x, 0);
10106
10107 len = strlen (fnname);
10108 if (len + SSIB_PREFIX_LEN > 255)
10109 len = 255 - SSIB_PREFIX_LEN;
10110
10111 strcpy (name, SSIB_PREFIX);
10112 strncpy (name + SSIB_PREFIX_LEN, fnname, len);
10113 name[len + SSIB_PREFIX_LEN] = 0;
10114
10115 return name;
10116 }
10117
10118 /* Set up the dynamic subprogram information block (DSIB) and update the
10119 frame pointer register ($15) for subroutines which have a frame. If the
10120 subroutine doesn't have a frame, simply increment $15. */
10121
10122 static void
10123 unicosmk_gen_dsib (unsigned long *imaskP)
10124 {
10125 if (alpha_procedure_type == PT_STACK)
10126 {
10127 const char *ssib_name;
10128 rtx mem;
10129
10130 /* Allocate 64 bytes for the DSIB. */
10131
10132 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
10133 GEN_INT (-64))));
10134 emit_insn (gen_blockage ());
10135
10136 /* Save the return address. */
10137
10138 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 56));
10139 set_mem_alias_set (mem, alpha_sr_alias_set);
10140 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, REG_RA)));
10141 (*imaskP) &= ~(1UL << REG_RA);
10142
10143 /* Save the old frame pointer. */
10144
10145 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 48));
10146 set_mem_alias_set (mem, alpha_sr_alias_set);
10147 FRP (emit_move_insn (mem, hard_frame_pointer_rtx));
10148 (*imaskP) &= ~(1UL << HARD_FRAME_POINTER_REGNUM);
10149
10150 emit_insn (gen_blockage ());
10151
10152 /* Store the SSIB pointer. */
10153
10154 ssib_name = ggc_strdup (unicosmk_ssib_name ());
10155 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 32));
10156 set_mem_alias_set (mem, alpha_sr_alias_set);
10157
10158 FRP (emit_move_insn (gen_rtx_REG (DImode, 5),
10159 gen_rtx_SYMBOL_REF (Pmode, ssib_name)));
10160 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 5)));
10161
10162 /* Save the CIW index. */
10163
10164 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 24));
10165 set_mem_alias_set (mem, alpha_sr_alias_set);
10166 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 25)));
10167
10168 emit_insn (gen_blockage ());
10169
10170 /* Set the new frame pointer. */
10171
10172 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
10173 stack_pointer_rtx, GEN_INT (64))));
10174
10175 }
10176 else
10177 {
10178 /* Increment the frame pointer register to indicate that we do not
10179 have a frame. */
10180
10181 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
10182 hard_frame_pointer_rtx, const1_rtx)));
10183 }
10184 }
10185
10186 /* Output the static subroutine information block for the current
10187 function. */
10188
10189 static void
10190 unicosmk_output_ssib (FILE *file, const char *fnname)
10191 {
10192 int len;
10193 int i;
10194 rtx x;
10195 rtx ciw;
10196 struct machine_function *machine = cfun->machine;
10197
10198 in_section = NULL;
10199 fprintf (file, "\t.endp\n\n\t.psect\t%s%s,data\n", user_label_prefix,
10200 unicosmk_ssib_name ());
10201
10202 /* Some required stuff and the function name length. */
10203
10204 len = strlen (fnname);
10205 fprintf (file, "\t.quad\t^X20008%2.2X28\n", len);
10206
10207 /* Saved registers
10208 ??? We don't do that yet. */
10209
10210 fputs ("\t.quad\t0\n", file);
10211
10212 /* Function address. */
10213
10214 fputs ("\t.quad\t", file);
10215 assemble_name (file, fnname);
10216 putc ('\n', file);
10217
10218 fputs ("\t.quad\t0\n", file);
10219 fputs ("\t.quad\t0\n", file);
10220
10221 /* Function name.
10222 ??? We do it the same way Cray CC does it but this could be
10223 simplified. */
10224
10225 for( i = 0; i < len; i++ )
10226 fprintf (file, "\t.byte\t%d\n", (int)(fnname[i]));
10227 if( (len % 8) == 0 )
10228 fputs ("\t.quad\t0\n", file);
10229 else
10230 fprintf (file, "\t.bits\t%d : 0\n", (8 - (len % 8))*8);
10231
10232 /* All call information words used in the function. */
10233
10234 for (x = machine->first_ciw; x; x = XEXP (x, 1))
10235 {
10236 ciw = XEXP (x, 0);
10237 #if HOST_BITS_PER_WIDE_INT == 32
10238 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_DOUBLE_HEX "\n",
10239 CONST_DOUBLE_HIGH (ciw), CONST_DOUBLE_LOW (ciw));
10240 #else
10241 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n", INTVAL (ciw));
10242 #endif
10243 }
10244 }
10245
10246 /* Add a call information word (CIW) to the list of the current function's
10247 CIWs and return its index.
10248
10249 X is a CONST_INT or CONST_DOUBLE representing the CIW. */
10250
10251 rtx
10252 unicosmk_add_call_info_word (rtx x)
10253 {
10254 rtx node;
10255 struct machine_function *machine = cfun->machine;
10256
10257 node = gen_rtx_EXPR_LIST (VOIDmode, x, NULL_RTX);
10258 if (machine->first_ciw == NULL_RTX)
10259 machine->first_ciw = node;
10260 else
10261 XEXP (machine->last_ciw, 1) = node;
10262
10263 machine->last_ciw = node;
10264 ++machine->ciw_count;
10265
10266 return GEN_INT (machine->ciw_count
10267 + strlen (current_function_name ())/8 + 5);
10268 }
10269
10270 /* The Cray assembler doesn't accept extern declarations for symbols which
10271 are defined in the same file. We have to keep track of all global
10272 symbols which are referenced and/or defined in a source file and output
10273 extern declarations for those which are referenced but not defined at
10274 the end of file. */
10275
10276 /* List of identifiers for which an extern declaration might have to be
10277 emitted. */
10278 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10279
10280 struct unicosmk_extern_list
10281 {
10282 struct unicosmk_extern_list *next;
10283 const char *name;
10284 };
10285
10286 static struct unicosmk_extern_list *unicosmk_extern_head = 0;
10287
10288 /* Output extern declarations which are required for every asm file. */
10289
10290 static void
10291 unicosmk_output_default_externs (FILE *file)
10292 {
10293 static const char *const externs[] =
10294 { "__T3E_MISMATCH" };
10295
10296 int i;
10297 int n;
10298
10299 n = ARRAY_SIZE (externs);
10300
10301 for (i = 0; i < n; i++)
10302 fprintf (file, "\t.extern\t%s\n", externs[i]);
10303 }
10304
10305 /* Output extern declarations for global symbols which are have been
10306 referenced but not defined. */
10307
10308 static void
10309 unicosmk_output_externs (FILE *file)
10310 {
10311 struct unicosmk_extern_list *p;
10312 const char *real_name;
10313 int len;
10314 tree name_tree;
10315
10316 len = strlen (user_label_prefix);
10317 for (p = unicosmk_extern_head; p != 0; p = p->next)
10318 {
10319 /* We have to strip the encoding and possibly remove user_label_prefix
10320 from the identifier in order to handle -fleading-underscore and
10321 explicit asm names correctly (cf. gcc.dg/asm-names-1.c). */
10322 real_name = default_strip_name_encoding (p->name);
10323 if (len && p->name[0] == '*'
10324 && !memcmp (real_name, user_label_prefix, len))
10325 real_name += len;
10326
10327 name_tree = get_identifier (real_name);
10328 if (! TREE_ASM_WRITTEN (name_tree))
10329 {
10330 TREE_ASM_WRITTEN (name_tree) = 1;
10331 fputs ("\t.extern\t", file);
10332 assemble_name (file, p->name);
10333 putc ('\n', file);
10334 }
10335 }
10336 }
10337
10338 /* Record an extern. */
10339
10340 void
10341 unicosmk_add_extern (const char *name)
10342 {
10343 struct unicosmk_extern_list *p;
10344
10345 p = (struct unicosmk_extern_list *)
10346 xmalloc (sizeof (struct unicosmk_extern_list));
10347 p->next = unicosmk_extern_head;
10348 p->name = name;
10349 unicosmk_extern_head = p;
10350 }
10351
10352 /* The Cray assembler generates incorrect code if identifiers which
10353 conflict with register names are used as instruction operands. We have
10354 to replace such identifiers with DEX expressions. */
10355
10356 /* Structure to collect identifiers which have been replaced by DEX
10357 expressions. */
10358 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10359
10360 struct unicosmk_dex {
10361 struct unicosmk_dex *next;
10362 const char *name;
10363 };
10364
10365 /* List of identifiers which have been replaced by DEX expressions. The DEX
10366 number is determined by the position in the list. */
10367
10368 static struct unicosmk_dex *unicosmk_dex_list = NULL;
10369
10370 /* The number of elements in the DEX list. */
10371
10372 static int unicosmk_dex_count = 0;
10373
10374 /* Check if NAME must be replaced by a DEX expression. */
10375
10376 static int
10377 unicosmk_special_name (const char *name)
10378 {
10379 if (name[0] == '*')
10380 ++name;
10381
10382 if (name[0] == '$')
10383 ++name;
10384
10385 if (name[0] != 'r' && name[0] != 'f' && name[0] != 'R' && name[0] != 'F')
10386 return 0;
10387
10388 switch (name[1])
10389 {
10390 case '1': case '2':
10391 return (name[2] == '\0' || (ISDIGIT (name[2]) && name[3] == '\0'));
10392
10393 case '3':
10394 return (name[2] == '\0'
10395 || ((name[2] == '0' || name[2] == '1') && name[3] == '\0'));
10396
10397 default:
10398 return (ISDIGIT (name[1]) && name[2] == '\0');
10399 }
10400 }
10401
10402 /* Return the DEX number if X must be replaced by a DEX expression and 0
10403 otherwise. */
10404
10405 static int
10406 unicosmk_need_dex (rtx x)
10407 {
10408 struct unicosmk_dex *dex;
10409 const char *name;
10410 int i;
10411
10412 if (GET_CODE (x) != SYMBOL_REF)
10413 return 0;
10414
10415 name = XSTR (x,0);
10416 if (! unicosmk_special_name (name))
10417 return 0;
10418
10419 i = unicosmk_dex_count;
10420 for (dex = unicosmk_dex_list; dex; dex = dex->next)
10421 {
10422 if (! strcmp (name, dex->name))
10423 return i;
10424 --i;
10425 }
10426
10427 dex = (struct unicosmk_dex *) xmalloc (sizeof (struct unicosmk_dex));
10428 dex->name = name;
10429 dex->next = unicosmk_dex_list;
10430 unicosmk_dex_list = dex;
10431
10432 ++unicosmk_dex_count;
10433 return unicosmk_dex_count;
10434 }
10435
10436 /* Output the DEX definitions for this file. */
10437
10438 static void
10439 unicosmk_output_dex (FILE *file)
10440 {
10441 struct unicosmk_dex *dex;
10442 int i;
10443
10444 if (unicosmk_dex_list == NULL)
10445 return;
10446
10447 fprintf (file, "\t.dexstart\n");
10448
10449 i = unicosmk_dex_count;
10450 for (dex = unicosmk_dex_list; dex; dex = dex->next)
10451 {
10452 fprintf (file, "\tDEX (%d) = ", i);
10453 assemble_name (file, dex->name);
10454 putc ('\n', file);
10455 --i;
10456 }
10457
10458 fprintf (file, "\t.dexend\n");
10459 }
10460
10461 /* Output text that to appear at the beginning of an assembler file. */
10462
10463 static void
10464 unicosmk_file_start (void)
10465 {
10466 int i;
10467
10468 fputs ("\t.ident\t", asm_out_file);
10469 unicosmk_output_module_name (asm_out_file);
10470 fputs ("\n\n", asm_out_file);
10471
10472 /* The Unicos/Mk assembler uses different register names. Instead of trying
10473 to support them, we simply use micro definitions. */
10474
10475 /* CAM has different register names: rN for the integer register N and fN
10476 for the floating-point register N. Instead of trying to use these in
10477 alpha.md, we define the symbols $N and $fN to refer to the appropriate
10478 register. */
10479
10480 for (i = 0; i < 32; ++i)
10481 fprintf (asm_out_file, "$%d <- r%d\n", i, i);
10482
10483 for (i = 0; i < 32; ++i)
10484 fprintf (asm_out_file, "$f%d <- f%d\n", i, i);
10485
10486 putc ('\n', asm_out_file);
10487
10488 /* The .align directive fill unused space with zeroes which does not work
10489 in code sections. We define the macro 'gcc@code@align' which uses nops
10490 instead. Note that it assumes that code sections always have the
10491 biggest possible alignment since . refers to the current offset from
10492 the beginning of the section. */
10493
10494 fputs ("\t.macro gcc@code@align n\n", asm_out_file);
10495 fputs ("gcc@n@bytes = 1 << n\n", asm_out_file);
10496 fputs ("gcc@here = . % gcc@n@bytes\n", asm_out_file);
10497 fputs ("\t.if ne, gcc@here, 0\n", asm_out_file);
10498 fputs ("\t.repeat (gcc@n@bytes - gcc@here) / 4\n", asm_out_file);
10499 fputs ("\tbis r31,r31,r31\n", asm_out_file);
10500 fputs ("\t.endr\n", asm_out_file);
10501 fputs ("\t.endif\n", asm_out_file);
10502 fputs ("\t.endm gcc@code@align\n\n", asm_out_file);
10503
10504 /* Output extern declarations which should always be visible. */
10505 unicosmk_output_default_externs (asm_out_file);
10506
10507 /* Open a dummy section. We always need to be inside a section for the
10508 section-switching code to work correctly.
10509 ??? This should be a module id or something like that. I still have to
10510 figure out what the rules for those are. */
10511 fputs ("\n\t.psect\t$SG00000,data\n", asm_out_file);
10512 }
10513
10514 /* Output text to appear at the end of an assembler file. This includes all
10515 pending extern declarations and DEX expressions. */
10516
10517 static void
10518 unicosmk_file_end (void)
10519 {
10520 fputs ("\t.endp\n\n", asm_out_file);
10521
10522 /* Output all pending externs. */
10523
10524 unicosmk_output_externs (asm_out_file);
10525
10526 /* Output dex definitions used for functions whose names conflict with
10527 register names. */
10528
10529 unicosmk_output_dex (asm_out_file);
10530
10531 fputs ("\t.end\t", asm_out_file);
10532 unicosmk_output_module_name (asm_out_file);
10533 putc ('\n', asm_out_file);
10534 }
10535
10536 #else
10537
10538 static void
10539 unicosmk_output_deferred_case_vectors (FILE *file ATTRIBUTE_UNUSED)
10540 {}
10541
10542 static void
10543 unicosmk_gen_dsib (unsigned long *imaskP ATTRIBUTE_UNUSED)
10544 {}
10545
10546 static void
10547 unicosmk_output_ssib (FILE * file ATTRIBUTE_UNUSED,
10548 const char * fnname ATTRIBUTE_UNUSED)
10549 {}
10550
10551 rtx
10552 unicosmk_add_call_info_word (rtx x ATTRIBUTE_UNUSED)
10553 {
10554 return NULL_RTX;
10555 }
10556
10557 static int
10558 unicosmk_need_dex (rtx x ATTRIBUTE_UNUSED)
10559 {
10560 return 0;
10561 }
10562
10563 #endif /* TARGET_ABI_UNICOSMK */
10564
10565 static void
10566 alpha_init_libfuncs (void)
10567 {
10568 if (TARGET_ABI_UNICOSMK)
10569 {
10570 /* Prevent gcc from generating calls to __divsi3. */
10571 set_optab_libfunc (sdiv_optab, SImode, 0);
10572 set_optab_libfunc (udiv_optab, SImode, 0);
10573
10574 /* Use the functions provided by the system library
10575 for DImode integer division. */
10576 set_optab_libfunc (sdiv_optab, DImode, "$sldiv");
10577 set_optab_libfunc (udiv_optab, DImode, "$uldiv");
10578 }
10579 else if (TARGET_ABI_OPEN_VMS)
10580 {
10581 /* Use the VMS runtime library functions for division and
10582 remainder. */
10583 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
10584 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
10585 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
10586 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
10587 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
10588 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
10589 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
10590 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
10591 }
10592 }
10593
10594 \f
10595 /* Initialize the GCC target structure. */
10596 #if TARGET_ABI_OPEN_VMS
10597 # undef TARGET_ATTRIBUTE_TABLE
10598 # define TARGET_ATTRIBUTE_TABLE vms_attribute_table
10599 # undef TARGET_SECTION_TYPE_FLAGS
10600 # define TARGET_SECTION_TYPE_FLAGS vms_section_type_flags
10601 #endif
10602
10603 #undef TARGET_IN_SMALL_DATA_P
10604 #define TARGET_IN_SMALL_DATA_P alpha_in_small_data_p
10605
10606 #if TARGET_ABI_UNICOSMK
10607 # undef TARGET_INSERT_ATTRIBUTES
10608 # define TARGET_INSERT_ATTRIBUTES unicosmk_insert_attributes
10609 # undef TARGET_SECTION_TYPE_FLAGS
10610 # define TARGET_SECTION_TYPE_FLAGS unicosmk_section_type_flags
10611 # undef TARGET_ASM_UNIQUE_SECTION
10612 # define TARGET_ASM_UNIQUE_SECTION unicosmk_unique_section
10613 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
10614 #define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
10615 # undef TARGET_ASM_GLOBALIZE_LABEL
10616 # define TARGET_ASM_GLOBALIZE_LABEL hook_void_FILEptr_constcharptr
10617 # undef TARGET_MUST_PASS_IN_STACK
10618 # define TARGET_MUST_PASS_IN_STACK unicosmk_must_pass_in_stack
10619 #endif
10620
10621 #undef TARGET_ASM_ALIGNED_HI_OP
10622 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
10623 #undef TARGET_ASM_ALIGNED_DI_OP
10624 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
10625
10626 /* Default unaligned ops are provided for ELF systems. To get unaligned
10627 data for non-ELF systems, we have to turn off auto alignment. */
10628 #ifndef OBJECT_FORMAT_ELF
10629 #undef TARGET_ASM_UNALIGNED_HI_OP
10630 #define TARGET_ASM_UNALIGNED_HI_OP "\t.align 0\n\t.word\t"
10631 #undef TARGET_ASM_UNALIGNED_SI_OP
10632 #define TARGET_ASM_UNALIGNED_SI_OP "\t.align 0\n\t.long\t"
10633 #undef TARGET_ASM_UNALIGNED_DI_OP
10634 #define TARGET_ASM_UNALIGNED_DI_OP "\t.align 0\n\t.quad\t"
10635 #endif
10636
10637 #ifdef OBJECT_FORMAT_ELF
10638 #undef TARGET_ASM_SELECT_RTX_SECTION
10639 #define TARGET_ASM_SELECT_RTX_SECTION alpha_elf_select_rtx_section
10640 #endif
10641
10642 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
10643 #define TARGET_ASM_FUNCTION_END_PROLOGUE alpha_output_function_end_prologue
10644
10645 #undef TARGET_INIT_LIBFUNCS
10646 #define TARGET_INIT_LIBFUNCS alpha_init_libfuncs
10647
10648 #if TARGET_ABI_UNICOSMK
10649 #undef TARGET_ASM_FILE_START
10650 #define TARGET_ASM_FILE_START unicosmk_file_start
10651 #undef TARGET_ASM_FILE_END
10652 #define TARGET_ASM_FILE_END unicosmk_file_end
10653 #else
10654 #undef TARGET_ASM_FILE_START
10655 #define TARGET_ASM_FILE_START alpha_file_start
10656 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
10657 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
10658 #endif
10659
10660 #undef TARGET_SCHED_ADJUST_COST
10661 #define TARGET_SCHED_ADJUST_COST alpha_adjust_cost
10662 #undef TARGET_SCHED_ISSUE_RATE
10663 #define TARGET_SCHED_ISSUE_RATE alpha_issue_rate
10664 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
10665 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
10666 alpha_multipass_dfa_lookahead
10667
10668 #undef TARGET_HAVE_TLS
10669 #define TARGET_HAVE_TLS HAVE_AS_TLS
10670
10671 #undef TARGET_INIT_BUILTINS
10672 #define TARGET_INIT_BUILTINS alpha_init_builtins
10673 #undef TARGET_EXPAND_BUILTIN
10674 #define TARGET_EXPAND_BUILTIN alpha_expand_builtin
10675 #undef TARGET_FOLD_BUILTIN
10676 #define TARGET_FOLD_BUILTIN alpha_fold_builtin
10677
10678 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
10679 #define TARGET_FUNCTION_OK_FOR_SIBCALL alpha_function_ok_for_sibcall
10680 #undef TARGET_CANNOT_COPY_INSN_P
10681 #define TARGET_CANNOT_COPY_INSN_P alpha_cannot_copy_insn_p
10682 #undef TARGET_CANNOT_FORCE_CONST_MEM
10683 #define TARGET_CANNOT_FORCE_CONST_MEM alpha_cannot_force_const_mem
10684
10685 #if TARGET_ABI_OSF
10686 #undef TARGET_ASM_OUTPUT_MI_THUNK
10687 #define TARGET_ASM_OUTPUT_MI_THUNK alpha_output_mi_thunk_osf
10688 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
10689 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
10690 #undef TARGET_STDARG_OPTIMIZE_HOOK
10691 #define TARGET_STDARG_OPTIMIZE_HOOK alpha_stdarg_optimize_hook
10692 #endif
10693
10694 #undef TARGET_RTX_COSTS
10695 #define TARGET_RTX_COSTS alpha_rtx_costs
10696 #undef TARGET_ADDRESS_COST
10697 #define TARGET_ADDRESS_COST hook_int_rtx_0
10698
10699 #undef TARGET_MACHINE_DEPENDENT_REORG
10700 #define TARGET_MACHINE_DEPENDENT_REORG alpha_reorg
10701
10702 #undef TARGET_PROMOTE_FUNCTION_ARGS
10703 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
10704 #undef TARGET_PROMOTE_FUNCTION_RETURN
10705 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
10706 #undef TARGET_PROMOTE_PROTOTYPES
10707 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_false
10708 #undef TARGET_RETURN_IN_MEMORY
10709 #define TARGET_RETURN_IN_MEMORY alpha_return_in_memory
10710 #undef TARGET_PASS_BY_REFERENCE
10711 #define TARGET_PASS_BY_REFERENCE alpha_pass_by_reference
10712 #undef TARGET_SETUP_INCOMING_VARARGS
10713 #define TARGET_SETUP_INCOMING_VARARGS alpha_setup_incoming_varargs
10714 #undef TARGET_STRICT_ARGUMENT_NAMING
10715 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
10716 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
10717 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
10718 #undef TARGET_SPLIT_COMPLEX_ARG
10719 #define TARGET_SPLIT_COMPLEX_ARG alpha_split_complex_arg
10720 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
10721 #define TARGET_GIMPLIFY_VA_ARG_EXPR alpha_gimplify_va_arg
10722 #undef TARGET_ARG_PARTIAL_BYTES
10723 #define TARGET_ARG_PARTIAL_BYTES alpha_arg_partial_bytes
10724
10725 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10726 #define TARGET_SCALAR_MODE_SUPPORTED_P alpha_scalar_mode_supported_p
10727 #undef TARGET_VECTOR_MODE_SUPPORTED_P
10728 #define TARGET_VECTOR_MODE_SUPPORTED_P alpha_vector_mode_supported_p
10729
10730 #undef TARGET_BUILD_BUILTIN_VA_LIST
10731 #define TARGET_BUILD_BUILTIN_VA_LIST alpha_build_builtin_va_list
10732
10733 /* The Alpha architecture does not require sequential consistency. See
10734 http://www.cs.umd.edu/~pugh/java/memoryModel/AlphaReordering.html
10735 for an example of how it can be violated in practice. */
10736 #undef TARGET_RELAXED_ORDERING
10737 #define TARGET_RELAXED_ORDERING true
10738
10739 #undef TARGET_DEFAULT_TARGET_FLAGS
10740 #define TARGET_DEFAULT_TARGET_FLAGS \
10741 (TARGET_DEFAULT | TARGET_CPU_DEFAULT | TARGET_DEFAULT_EXPLICIT_RELOCS)
10742 #undef TARGET_HANDLE_OPTION
10743 #define TARGET_HANDLE_OPTION alpha_handle_option
10744
10745 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
10746 #undef TARGET_MANGLE_FUNDAMENTAL_TYPE
10747 #define TARGET_MANGLE_FUNDAMENTAL_TYPE alpha_mangle_fundamental_type
10748 #endif
10749
10750 struct gcc_target targetm = TARGET_INITIALIZER;
10751
10752 \f
10753 #include "gt-alpha.h"