re PR target/27571 (alpha: ICE in get_attr_usegp, at config/alpha/alpha.md:171)
[gcc.git] / gcc / config / alpha / alpha.c
1 /* Subroutines used for code generation on the DEC Alpha.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
11 any later version.
12
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to
20 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
21 Boston, MA 02110-1301, USA. */
22
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "real.h"
33 #include "insn-config.h"
34 #include "conditions.h"
35 #include "output.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "recog.h"
39 #include "expr.h"
40 #include "optabs.h"
41 #include "reload.h"
42 #include "obstack.h"
43 #include "except.h"
44 #include "function.h"
45 #include "toplev.h"
46 #include "ggc.h"
47 #include "integrate.h"
48 #include "tm_p.h"
49 #include "target.h"
50 #include "target-def.h"
51 #include "debug.h"
52 #include "langhooks.h"
53 #include <splay-tree.h>
54 #include "cfglayout.h"
55 #include "tree-gimple.h"
56 #include "tree-flow.h"
57 #include "tree-stdarg.h"
58
59 /* Specify which cpu to schedule for. */
60 enum processor_type alpha_tune;
61
62 /* Which cpu we're generating code for. */
63 enum processor_type alpha_cpu;
64
65 static const char * const alpha_cpu_name[] =
66 {
67 "ev4", "ev5", "ev6"
68 };
69
70 /* Specify how accurate floating-point traps need to be. */
71
72 enum alpha_trap_precision alpha_tp;
73
74 /* Specify the floating-point rounding mode. */
75
76 enum alpha_fp_rounding_mode alpha_fprm;
77
78 /* Specify which things cause traps. */
79
80 enum alpha_fp_trap_mode alpha_fptm;
81
82 /* Save information from a "cmpxx" operation until the branch or scc is
83 emitted. */
84
85 struct alpha_compare alpha_compare;
86
87 /* Nonzero if inside of a function, because the Alpha asm can't
88 handle .files inside of functions. */
89
90 static int inside_function = FALSE;
91
92 /* The number of cycles of latency we should assume on memory reads. */
93
94 int alpha_memory_latency = 3;
95
96 /* Whether the function needs the GP. */
97
98 static int alpha_function_needs_gp;
99
100 /* The alias set for prologue/epilogue register save/restore. */
101
102 static GTY(()) int alpha_sr_alias_set;
103
104 /* The assembler name of the current function. */
105
106 static const char *alpha_fnname;
107
108 /* The next explicit relocation sequence number. */
109 extern GTY(()) int alpha_next_sequence_number;
110 int alpha_next_sequence_number = 1;
111
112 /* The literal and gpdisp sequence numbers for this insn, as printed
113 by %# and %* respectively. */
114 extern GTY(()) int alpha_this_literal_sequence_number;
115 extern GTY(()) int alpha_this_gpdisp_sequence_number;
116 int alpha_this_literal_sequence_number;
117 int alpha_this_gpdisp_sequence_number;
118
119 /* Costs of various operations on the different architectures. */
120
121 struct alpha_rtx_cost_data
122 {
123 unsigned char fp_add;
124 unsigned char fp_mult;
125 unsigned char fp_div_sf;
126 unsigned char fp_div_df;
127 unsigned char int_mult_si;
128 unsigned char int_mult_di;
129 unsigned char int_shift;
130 unsigned char int_cmov;
131 unsigned short int_div;
132 };
133
134 static struct alpha_rtx_cost_data const alpha_rtx_cost_data[PROCESSOR_MAX] =
135 {
136 { /* EV4 */
137 COSTS_N_INSNS (6), /* fp_add */
138 COSTS_N_INSNS (6), /* fp_mult */
139 COSTS_N_INSNS (34), /* fp_div_sf */
140 COSTS_N_INSNS (63), /* fp_div_df */
141 COSTS_N_INSNS (23), /* int_mult_si */
142 COSTS_N_INSNS (23), /* int_mult_di */
143 COSTS_N_INSNS (2), /* int_shift */
144 COSTS_N_INSNS (2), /* int_cmov */
145 COSTS_N_INSNS (97), /* int_div */
146 },
147 { /* EV5 */
148 COSTS_N_INSNS (4), /* fp_add */
149 COSTS_N_INSNS (4), /* fp_mult */
150 COSTS_N_INSNS (15), /* fp_div_sf */
151 COSTS_N_INSNS (22), /* fp_div_df */
152 COSTS_N_INSNS (8), /* int_mult_si */
153 COSTS_N_INSNS (12), /* int_mult_di */
154 COSTS_N_INSNS (1) + 1, /* int_shift */
155 COSTS_N_INSNS (1), /* int_cmov */
156 COSTS_N_INSNS (83), /* int_div */
157 },
158 { /* EV6 */
159 COSTS_N_INSNS (4), /* fp_add */
160 COSTS_N_INSNS (4), /* fp_mult */
161 COSTS_N_INSNS (12), /* fp_div_sf */
162 COSTS_N_INSNS (15), /* fp_div_df */
163 COSTS_N_INSNS (7), /* int_mult_si */
164 COSTS_N_INSNS (7), /* int_mult_di */
165 COSTS_N_INSNS (1), /* int_shift */
166 COSTS_N_INSNS (2), /* int_cmov */
167 COSTS_N_INSNS (86), /* int_div */
168 },
169 };
170
171 /* Similar but tuned for code size instead of execution latency. The
172 extra +N is fractional cost tuning based on latency. It's used to
173 encourage use of cheaper insns like shift, but only if there's just
174 one of them. */
175
176 static struct alpha_rtx_cost_data const alpha_rtx_cost_size =
177 {
178 COSTS_N_INSNS (1), /* fp_add */
179 COSTS_N_INSNS (1), /* fp_mult */
180 COSTS_N_INSNS (1), /* fp_div_sf */
181 COSTS_N_INSNS (1) + 1, /* fp_div_df */
182 COSTS_N_INSNS (1) + 1, /* int_mult_si */
183 COSTS_N_INSNS (1) + 2, /* int_mult_di */
184 COSTS_N_INSNS (1), /* int_shift */
185 COSTS_N_INSNS (1), /* int_cmov */
186 COSTS_N_INSNS (6), /* int_div */
187 };
188
189 /* Get the number of args of a function in one of two ways. */
190 #if TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK
191 #define NUM_ARGS current_function_args_info.num_args
192 #else
193 #define NUM_ARGS current_function_args_info
194 #endif
195
196 #define REG_PV 27
197 #define REG_RA 26
198
199 /* Declarations of static functions. */
200 static struct machine_function *alpha_init_machine_status (void);
201 static rtx alpha_emit_xfloating_compare (enum rtx_code *, rtx, rtx);
202
203 #if TARGET_ABI_OPEN_VMS
204 static void alpha_write_linkage (FILE *, const char *, tree);
205 #endif
206
207 static void unicosmk_output_deferred_case_vectors (FILE *);
208 static void unicosmk_gen_dsib (unsigned long *);
209 static void unicosmk_output_ssib (FILE *, const char *);
210 static int unicosmk_need_dex (rtx);
211 \f
212 /* Implement TARGET_HANDLE_OPTION. */
213
214 static bool
215 alpha_handle_option (size_t code, const char *arg, int value)
216 {
217 switch (code)
218 {
219 case OPT_mfp_regs:
220 if (value == 0)
221 target_flags |= MASK_SOFT_FP;
222 break;
223
224 case OPT_mieee:
225 case OPT_mieee_with_inexact:
226 target_flags |= MASK_IEEE_CONFORMANT;
227 break;
228
229 case OPT_mtls_size_:
230 if (value != 16 && value != 32 && value != 64)
231 error ("bad value %qs for -mtls-size switch", arg);
232 break;
233 }
234
235 return true;
236 }
237
238 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
239 /* Implement TARGET_MANGLE_FUNDAMENTAL_TYPE. */
240
241 static const char *
242 alpha_mangle_fundamental_type (tree type)
243 {
244 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
245 && TARGET_LONG_DOUBLE_128)
246 return "g";
247
248 /* For all other types, use normal C++ mangling. */
249 return NULL;
250 }
251 #endif
252
253 /* Parse target option strings. */
254
255 void
256 override_options (void)
257 {
258 static const struct cpu_table {
259 const char *const name;
260 const enum processor_type processor;
261 const int flags;
262 } cpu_table[] = {
263 { "ev4", PROCESSOR_EV4, 0 },
264 { "ev45", PROCESSOR_EV4, 0 },
265 { "21064", PROCESSOR_EV4, 0 },
266 { "ev5", PROCESSOR_EV5, 0 },
267 { "21164", PROCESSOR_EV5, 0 },
268 { "ev56", PROCESSOR_EV5, MASK_BWX },
269 { "21164a", PROCESSOR_EV5, MASK_BWX },
270 { "pca56", PROCESSOR_EV5, MASK_BWX|MASK_MAX },
271 { "21164PC",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
272 { "21164pc",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
273 { "ev6", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
274 { "21264", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
275 { "ev67", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
276 { "21264a", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
277 { 0, 0, 0 }
278 };
279
280 int i;
281
282 /* Unicos/Mk doesn't have shared libraries. */
283 if (TARGET_ABI_UNICOSMK && flag_pic)
284 {
285 warning (0, "-f%s ignored for Unicos/Mk (not supported)",
286 (flag_pic > 1) ? "PIC" : "pic");
287 flag_pic = 0;
288 }
289
290 /* On Unicos/Mk, the native compiler consistently generates /d suffices for
291 floating-point instructions. Make that the default for this target. */
292 if (TARGET_ABI_UNICOSMK)
293 alpha_fprm = ALPHA_FPRM_DYN;
294 else
295 alpha_fprm = ALPHA_FPRM_NORM;
296
297 alpha_tp = ALPHA_TP_PROG;
298 alpha_fptm = ALPHA_FPTM_N;
299
300 /* We cannot use su and sui qualifiers for conversion instructions on
301 Unicos/Mk. I'm not sure if this is due to assembler or hardware
302 limitations. Right now, we issue a warning if -mieee is specified
303 and then ignore it; eventually, we should either get it right or
304 disable the option altogether. */
305
306 if (TARGET_IEEE)
307 {
308 if (TARGET_ABI_UNICOSMK)
309 warning (0, "-mieee not supported on Unicos/Mk");
310 else
311 {
312 alpha_tp = ALPHA_TP_INSN;
313 alpha_fptm = ALPHA_FPTM_SU;
314 }
315 }
316
317 if (TARGET_IEEE_WITH_INEXACT)
318 {
319 if (TARGET_ABI_UNICOSMK)
320 warning (0, "-mieee-with-inexact not supported on Unicos/Mk");
321 else
322 {
323 alpha_tp = ALPHA_TP_INSN;
324 alpha_fptm = ALPHA_FPTM_SUI;
325 }
326 }
327
328 if (alpha_tp_string)
329 {
330 if (! strcmp (alpha_tp_string, "p"))
331 alpha_tp = ALPHA_TP_PROG;
332 else if (! strcmp (alpha_tp_string, "f"))
333 alpha_tp = ALPHA_TP_FUNC;
334 else if (! strcmp (alpha_tp_string, "i"))
335 alpha_tp = ALPHA_TP_INSN;
336 else
337 error ("bad value %qs for -mtrap-precision switch", alpha_tp_string);
338 }
339
340 if (alpha_fprm_string)
341 {
342 if (! strcmp (alpha_fprm_string, "n"))
343 alpha_fprm = ALPHA_FPRM_NORM;
344 else if (! strcmp (alpha_fprm_string, "m"))
345 alpha_fprm = ALPHA_FPRM_MINF;
346 else if (! strcmp (alpha_fprm_string, "c"))
347 alpha_fprm = ALPHA_FPRM_CHOP;
348 else if (! strcmp (alpha_fprm_string,"d"))
349 alpha_fprm = ALPHA_FPRM_DYN;
350 else
351 error ("bad value %qs for -mfp-rounding-mode switch",
352 alpha_fprm_string);
353 }
354
355 if (alpha_fptm_string)
356 {
357 if (strcmp (alpha_fptm_string, "n") == 0)
358 alpha_fptm = ALPHA_FPTM_N;
359 else if (strcmp (alpha_fptm_string, "u") == 0)
360 alpha_fptm = ALPHA_FPTM_U;
361 else if (strcmp (alpha_fptm_string, "su") == 0)
362 alpha_fptm = ALPHA_FPTM_SU;
363 else if (strcmp (alpha_fptm_string, "sui") == 0)
364 alpha_fptm = ALPHA_FPTM_SUI;
365 else
366 error ("bad value %qs for -mfp-trap-mode switch", alpha_fptm_string);
367 }
368
369 if (alpha_cpu_string)
370 {
371 for (i = 0; cpu_table [i].name; i++)
372 if (! strcmp (alpha_cpu_string, cpu_table [i].name))
373 {
374 alpha_tune = alpha_cpu = cpu_table [i].processor;
375 target_flags &= ~ (MASK_BWX | MASK_MAX | MASK_FIX | MASK_CIX);
376 target_flags |= cpu_table [i].flags;
377 break;
378 }
379 if (! cpu_table [i].name)
380 error ("bad value %qs for -mcpu switch", alpha_cpu_string);
381 }
382
383 if (alpha_tune_string)
384 {
385 for (i = 0; cpu_table [i].name; i++)
386 if (! strcmp (alpha_tune_string, cpu_table [i].name))
387 {
388 alpha_tune = cpu_table [i].processor;
389 break;
390 }
391 if (! cpu_table [i].name)
392 error ("bad value %qs for -mcpu switch", alpha_tune_string);
393 }
394
395 /* Do some sanity checks on the above options. */
396
397 if (TARGET_ABI_UNICOSMK && alpha_fptm != ALPHA_FPTM_N)
398 {
399 warning (0, "trap mode not supported on Unicos/Mk");
400 alpha_fptm = ALPHA_FPTM_N;
401 }
402
403 if ((alpha_fptm == ALPHA_FPTM_SU || alpha_fptm == ALPHA_FPTM_SUI)
404 && alpha_tp != ALPHA_TP_INSN && alpha_cpu != PROCESSOR_EV6)
405 {
406 warning (0, "fp software completion requires -mtrap-precision=i");
407 alpha_tp = ALPHA_TP_INSN;
408 }
409
410 if (alpha_cpu == PROCESSOR_EV6)
411 {
412 /* Except for EV6 pass 1 (not released), we always have precise
413 arithmetic traps. Which means we can do software completion
414 without minding trap shadows. */
415 alpha_tp = ALPHA_TP_PROG;
416 }
417
418 if (TARGET_FLOAT_VAX)
419 {
420 if (alpha_fprm == ALPHA_FPRM_MINF || alpha_fprm == ALPHA_FPRM_DYN)
421 {
422 warning (0, "rounding mode not supported for VAX floats");
423 alpha_fprm = ALPHA_FPRM_NORM;
424 }
425 if (alpha_fptm == ALPHA_FPTM_SUI)
426 {
427 warning (0, "trap mode not supported for VAX floats");
428 alpha_fptm = ALPHA_FPTM_SU;
429 }
430 if (target_flags_explicit & MASK_LONG_DOUBLE_128)
431 warning (0, "128-bit long double not supported for VAX floats");
432 target_flags &= ~MASK_LONG_DOUBLE_128;
433 }
434
435 {
436 char *end;
437 int lat;
438
439 if (!alpha_mlat_string)
440 alpha_mlat_string = "L1";
441
442 if (ISDIGIT ((unsigned char)alpha_mlat_string[0])
443 && (lat = strtol (alpha_mlat_string, &end, 10), *end == '\0'))
444 ;
445 else if ((alpha_mlat_string[0] == 'L' || alpha_mlat_string[0] == 'l')
446 && ISDIGIT ((unsigned char)alpha_mlat_string[1])
447 && alpha_mlat_string[2] == '\0')
448 {
449 static int const cache_latency[][4] =
450 {
451 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
452 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
453 { 3, 12, 30 }, /* ev6 -- Bcache from DS20 LMbench. */
454 };
455
456 lat = alpha_mlat_string[1] - '0';
457 if (lat <= 0 || lat > 3 || cache_latency[alpha_tune][lat-1] == -1)
458 {
459 warning (0, "L%d cache latency unknown for %s",
460 lat, alpha_cpu_name[alpha_tune]);
461 lat = 3;
462 }
463 else
464 lat = cache_latency[alpha_tune][lat-1];
465 }
466 else if (! strcmp (alpha_mlat_string, "main"))
467 {
468 /* Most current memories have about 370ns latency. This is
469 a reasonable guess for a fast cpu. */
470 lat = 150;
471 }
472 else
473 {
474 warning (0, "bad value %qs for -mmemory-latency", alpha_mlat_string);
475 lat = 3;
476 }
477
478 alpha_memory_latency = lat;
479 }
480
481 /* Default the definition of "small data" to 8 bytes. */
482 if (!g_switch_set)
483 g_switch_value = 8;
484
485 /* Infer TARGET_SMALL_DATA from -fpic/-fPIC. */
486 if (flag_pic == 1)
487 target_flags |= MASK_SMALL_DATA;
488 else if (flag_pic == 2)
489 target_flags &= ~MASK_SMALL_DATA;
490
491 /* Align labels and loops for optimal branching. */
492 /* ??? Kludge these by not doing anything if we don't optimize and also if
493 we are writing ECOFF symbols to work around a bug in DEC's assembler. */
494 if (optimize > 0 && write_symbols != SDB_DEBUG)
495 {
496 if (align_loops <= 0)
497 align_loops = 16;
498 if (align_jumps <= 0)
499 align_jumps = 16;
500 }
501 if (align_functions <= 0)
502 align_functions = 16;
503
504 /* Acquire a unique set number for our register saves and restores. */
505 alpha_sr_alias_set = new_alias_set ();
506
507 /* Register variables and functions with the garbage collector. */
508
509 /* Set up function hooks. */
510 init_machine_status = alpha_init_machine_status;
511
512 /* Tell the compiler when we're using VAX floating point. */
513 if (TARGET_FLOAT_VAX)
514 {
515 REAL_MODE_FORMAT (SFmode) = &vax_f_format;
516 REAL_MODE_FORMAT (DFmode) = &vax_g_format;
517 REAL_MODE_FORMAT (TFmode) = NULL;
518 }
519
520 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
521 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
522 target_flags |= MASK_LONG_DOUBLE_128;
523 #endif
524 }
525 \f
526 /* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
527
528 int
529 zap_mask (HOST_WIDE_INT value)
530 {
531 int i;
532
533 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
534 i++, value >>= 8)
535 if ((value & 0xff) != 0 && (value & 0xff) != 0xff)
536 return 0;
537
538 return 1;
539 }
540
541 /* Return true if OP is valid for a particular TLS relocation.
542 We are already guaranteed that OP is a CONST. */
543
544 int
545 tls_symbolic_operand_1 (rtx op, int size, int unspec)
546 {
547 op = XEXP (op, 0);
548
549 if (GET_CODE (op) != UNSPEC || XINT (op, 1) != unspec)
550 return 0;
551 op = XVECEXP (op, 0, 0);
552
553 if (GET_CODE (op) != SYMBOL_REF)
554 return 0;
555
556 switch (SYMBOL_REF_TLS_MODEL (op))
557 {
558 case TLS_MODEL_LOCAL_DYNAMIC:
559 return unspec == UNSPEC_DTPREL && size == alpha_tls_size;
560 case TLS_MODEL_INITIAL_EXEC:
561 return unspec == UNSPEC_TPREL && size == 64;
562 case TLS_MODEL_LOCAL_EXEC:
563 return unspec == UNSPEC_TPREL && size == alpha_tls_size;
564 default:
565 gcc_unreachable ();
566 }
567 }
568
569 /* Used by aligned_memory_operand and unaligned_memory_operand to
570 resolve what reload is going to do with OP if it's a register. */
571
572 rtx
573 resolve_reload_operand (rtx op)
574 {
575 if (reload_in_progress)
576 {
577 rtx tmp = op;
578 if (GET_CODE (tmp) == SUBREG)
579 tmp = SUBREG_REG (tmp);
580 if (GET_CODE (tmp) == REG
581 && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
582 {
583 op = reg_equiv_memory_loc[REGNO (tmp)];
584 if (op == 0)
585 return 0;
586 }
587 }
588 return op;
589 }
590
591 /* Implements CONST_OK_FOR_LETTER_P. Return true if the value matches
592 the range defined for C in [I-P]. */
593
594 bool
595 alpha_const_ok_for_letter_p (HOST_WIDE_INT value, int c)
596 {
597 switch (c)
598 {
599 case 'I':
600 /* An unsigned 8 bit constant. */
601 return (unsigned HOST_WIDE_INT) value < 0x100;
602 case 'J':
603 /* The constant zero. */
604 return value == 0;
605 case 'K':
606 /* A signed 16 bit constant. */
607 return (unsigned HOST_WIDE_INT) (value + 0x8000) < 0x10000;
608 case 'L':
609 /* A shifted signed 16 bit constant appropriate for LDAH. */
610 return ((value & 0xffff) == 0
611 && ((value) >> 31 == -1 || value >> 31 == 0));
612 case 'M':
613 /* A constant that can be AND'ed with using a ZAP insn. */
614 return zap_mask (value);
615 case 'N':
616 /* A complemented unsigned 8 bit constant. */
617 return (unsigned HOST_WIDE_INT) (~ value) < 0x100;
618 case 'O':
619 /* A negated unsigned 8 bit constant. */
620 return (unsigned HOST_WIDE_INT) (- value) < 0x100;
621 case 'P':
622 /* The constant 1, 2 or 3. */
623 return value == 1 || value == 2 || value == 3;
624
625 default:
626 return false;
627 }
628 }
629
630 /* Implements CONST_DOUBLE_OK_FOR_LETTER_P. Return true if VALUE
631 matches for C in [GH]. */
632
633 bool
634 alpha_const_double_ok_for_letter_p (rtx value, int c)
635 {
636 switch (c)
637 {
638 case 'G':
639 /* The floating point zero constant. */
640 return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
641 && value == CONST0_RTX (GET_MODE (value)));
642
643 case 'H':
644 /* A valid operand of a ZAP insn. */
645 return (GET_MODE (value) == VOIDmode
646 && zap_mask (CONST_DOUBLE_LOW (value))
647 && zap_mask (CONST_DOUBLE_HIGH (value)));
648
649 default:
650 return false;
651 }
652 }
653
654 /* Implements CONST_DOUBLE_OK_FOR_LETTER_P. Return true if VALUE
655 matches for C. */
656
657 bool
658 alpha_extra_constraint (rtx value, int c)
659 {
660 switch (c)
661 {
662 case 'Q':
663 return normal_memory_operand (value, VOIDmode);
664 case 'R':
665 return direct_call_operand (value, Pmode);
666 case 'S':
667 return (GET_CODE (value) == CONST_INT
668 && (unsigned HOST_WIDE_INT) INTVAL (value) < 64);
669 case 'T':
670 return GET_CODE (value) == HIGH;
671 case 'U':
672 return TARGET_ABI_UNICOSMK && symbolic_operand (value, VOIDmode);
673 case 'W':
674 return (GET_CODE (value) == CONST_VECTOR
675 && value == CONST0_RTX (GET_MODE (value)));
676 default:
677 return false;
678 }
679 }
680
681 /* The scalar modes supported differs from the default check-what-c-supports
682 version in that sometimes TFmode is available even when long double
683 indicates only DFmode. On unicosmk, we have the situation that HImode
684 doesn't map to any C type, but of course we still support that. */
685
686 static bool
687 alpha_scalar_mode_supported_p (enum machine_mode mode)
688 {
689 switch (mode)
690 {
691 case QImode:
692 case HImode:
693 case SImode:
694 case DImode:
695 case TImode: /* via optabs.c */
696 return true;
697
698 case SFmode:
699 case DFmode:
700 return true;
701
702 case TFmode:
703 return TARGET_HAS_XFLOATING_LIBS;
704
705 default:
706 return false;
707 }
708 }
709
710 /* Alpha implements a couple of integer vector mode operations when
711 TARGET_MAX is enabled. We do not check TARGET_MAX here, however,
712 which allows the vectorizer to operate on e.g. move instructions,
713 or when expand_vector_operations can do something useful. */
714
715 static bool
716 alpha_vector_mode_supported_p (enum machine_mode mode)
717 {
718 return mode == V8QImode || mode == V4HImode || mode == V2SImode;
719 }
720
721 /* Return 1 if this function can directly return via $26. */
722
723 int
724 direct_return (void)
725 {
726 return (! TARGET_ABI_OPEN_VMS && ! TARGET_ABI_UNICOSMK
727 && reload_completed
728 && alpha_sa_size () == 0
729 && get_frame_size () == 0
730 && current_function_outgoing_args_size == 0
731 && current_function_pretend_args_size == 0);
732 }
733
734 /* Return the ADDR_VEC associated with a tablejump insn. */
735
736 rtx
737 alpha_tablejump_addr_vec (rtx insn)
738 {
739 rtx tmp;
740
741 tmp = JUMP_LABEL (insn);
742 if (!tmp)
743 return NULL_RTX;
744 tmp = NEXT_INSN (tmp);
745 if (!tmp)
746 return NULL_RTX;
747 if (GET_CODE (tmp) == JUMP_INSN
748 && GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC)
749 return PATTERN (tmp);
750 return NULL_RTX;
751 }
752
753 /* Return the label of the predicted edge, or CONST0_RTX if we don't know. */
754
755 rtx
756 alpha_tablejump_best_label (rtx insn)
757 {
758 rtx jump_table = alpha_tablejump_addr_vec (insn);
759 rtx best_label = NULL_RTX;
760
761 /* ??? Once the CFG doesn't keep getting completely rebuilt, look
762 there for edge frequency counts from profile data. */
763
764 if (jump_table)
765 {
766 int n_labels = XVECLEN (jump_table, 1);
767 int best_count = -1;
768 int i, j;
769
770 for (i = 0; i < n_labels; i++)
771 {
772 int count = 1;
773
774 for (j = i + 1; j < n_labels; j++)
775 if (XEXP (XVECEXP (jump_table, 1, i), 0)
776 == XEXP (XVECEXP (jump_table, 1, j), 0))
777 count++;
778
779 if (count > best_count)
780 best_count = count, best_label = XVECEXP (jump_table, 1, i);
781 }
782 }
783
784 return best_label ? best_label : const0_rtx;
785 }
786
787 /* Return the TLS model to use for SYMBOL. */
788
789 static enum tls_model
790 tls_symbolic_operand_type (rtx symbol)
791 {
792 enum tls_model model;
793
794 if (GET_CODE (symbol) != SYMBOL_REF)
795 return 0;
796 model = SYMBOL_REF_TLS_MODEL (symbol);
797
798 /* Local-exec with a 64-bit size is the same code as initial-exec. */
799 if (model == TLS_MODEL_LOCAL_EXEC && alpha_tls_size == 64)
800 model = TLS_MODEL_INITIAL_EXEC;
801
802 return model;
803 }
804 \f
805 /* Return true if the function DECL will share the same GP as any
806 function in the current unit of translation. */
807
808 static bool
809 decl_has_samegp (tree decl)
810 {
811 /* Functions that are not local can be overridden, and thus may
812 not share the same gp. */
813 if (!(*targetm.binds_local_p) (decl))
814 return false;
815
816 /* If -msmall-data is in effect, assume that there is only one GP
817 for the module, and so any local symbol has this property. We
818 need explicit relocations to be able to enforce this for symbols
819 not defined in this unit of translation, however. */
820 if (TARGET_EXPLICIT_RELOCS && TARGET_SMALL_DATA)
821 return true;
822
823 /* Functions that are not external are defined in this UoT. */
824 /* ??? Irritatingly, static functions not yet emitted are still
825 marked "external". Apply this to non-static functions only. */
826 return !TREE_PUBLIC (decl) || !DECL_EXTERNAL (decl);
827 }
828
829 /* Return true if EXP should be placed in the small data section. */
830
831 static bool
832 alpha_in_small_data_p (tree exp)
833 {
834 /* We want to merge strings, so we never consider them small data. */
835 if (TREE_CODE (exp) == STRING_CST)
836 return false;
837
838 /* Functions are never in the small data area. Duh. */
839 if (TREE_CODE (exp) == FUNCTION_DECL)
840 return false;
841
842 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
843 {
844 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
845 if (strcmp (section, ".sdata") == 0
846 || strcmp (section, ".sbss") == 0)
847 return true;
848 }
849 else
850 {
851 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
852
853 /* If this is an incomplete type with size 0, then we can't put it
854 in sdata because it might be too big when completed. */
855 if (size > 0 && (unsigned HOST_WIDE_INT) size <= g_switch_value)
856 return true;
857 }
858
859 return false;
860 }
861
862 #if TARGET_ABI_OPEN_VMS
863 static bool
864 alpha_linkage_symbol_p (const char *symname)
865 {
866 int symlen = strlen (symname);
867
868 if (symlen > 4)
869 return strcmp (&symname [symlen - 4], "..lk") == 0;
870
871 return false;
872 }
873
874 #define LINKAGE_SYMBOL_REF_P(X) \
875 ((GET_CODE (X) == SYMBOL_REF \
876 && alpha_linkage_symbol_p (XSTR (X, 0))) \
877 || (GET_CODE (X) == CONST \
878 && GET_CODE (XEXP (X, 0)) == PLUS \
879 && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \
880 && alpha_linkage_symbol_p (XSTR (XEXP (XEXP (X, 0), 0), 0))))
881 #endif
882
883 /* legitimate_address_p recognizes an RTL expression that is a valid
884 memory address for an instruction. The MODE argument is the
885 machine mode for the MEM expression that wants to use this address.
886
887 For Alpha, we have either a constant address or the sum of a
888 register and a constant address, or just a register. For DImode,
889 any of those forms can be surrounded with an AND that clear the
890 low-order three bits; this is an "unaligned" access. */
891
892 bool
893 alpha_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
894 {
895 /* If this is an ldq_u type address, discard the outer AND. */
896 if (mode == DImode
897 && GET_CODE (x) == AND
898 && GET_CODE (XEXP (x, 1)) == CONST_INT
899 && INTVAL (XEXP (x, 1)) == -8)
900 x = XEXP (x, 0);
901
902 /* Discard non-paradoxical subregs. */
903 if (GET_CODE (x) == SUBREG
904 && (GET_MODE_SIZE (GET_MODE (x))
905 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
906 x = SUBREG_REG (x);
907
908 /* Unadorned general registers are valid. */
909 if (REG_P (x)
910 && (strict
911 ? STRICT_REG_OK_FOR_BASE_P (x)
912 : NONSTRICT_REG_OK_FOR_BASE_P (x)))
913 return true;
914
915 /* Constant addresses (i.e. +/- 32k) are valid. */
916 if (CONSTANT_ADDRESS_P (x))
917 return true;
918
919 #if TARGET_ABI_OPEN_VMS
920 if (LINKAGE_SYMBOL_REF_P (x))
921 return true;
922 #endif
923
924 /* Register plus a small constant offset is valid. */
925 if (GET_CODE (x) == PLUS)
926 {
927 rtx ofs = XEXP (x, 1);
928 x = XEXP (x, 0);
929
930 /* Discard non-paradoxical subregs. */
931 if (GET_CODE (x) == SUBREG
932 && (GET_MODE_SIZE (GET_MODE (x))
933 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
934 x = SUBREG_REG (x);
935
936 if (REG_P (x))
937 {
938 if (! strict
939 && NONSTRICT_REG_OK_FP_BASE_P (x)
940 && GET_CODE (ofs) == CONST_INT)
941 return true;
942 if ((strict
943 ? STRICT_REG_OK_FOR_BASE_P (x)
944 : NONSTRICT_REG_OK_FOR_BASE_P (x))
945 && CONSTANT_ADDRESS_P (ofs))
946 return true;
947 }
948 }
949
950 /* If we're managing explicit relocations, LO_SUM is valid, as
951 are small data symbols. */
952 else if (TARGET_EXPLICIT_RELOCS)
953 {
954 if (small_symbolic_operand (x, Pmode))
955 return true;
956
957 if (GET_CODE (x) == LO_SUM)
958 {
959 rtx ofs = XEXP (x, 1);
960 x = XEXP (x, 0);
961
962 /* Discard non-paradoxical subregs. */
963 if (GET_CODE (x) == SUBREG
964 && (GET_MODE_SIZE (GET_MODE (x))
965 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
966 x = SUBREG_REG (x);
967
968 /* Must have a valid base register. */
969 if (! (REG_P (x)
970 && (strict
971 ? STRICT_REG_OK_FOR_BASE_P (x)
972 : NONSTRICT_REG_OK_FOR_BASE_P (x))))
973 return false;
974
975 /* The symbol must be local. */
976 if (local_symbolic_operand (ofs, Pmode)
977 || dtp32_symbolic_operand (ofs, Pmode)
978 || tp32_symbolic_operand (ofs, Pmode))
979 return true;
980 }
981 }
982
983 return false;
984 }
985
986 /* Build the SYMBOL_REF for __tls_get_addr. */
987
988 static GTY(()) rtx tls_get_addr_libfunc;
989
990 static rtx
991 get_tls_get_addr (void)
992 {
993 if (!tls_get_addr_libfunc)
994 tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
995 return tls_get_addr_libfunc;
996 }
997
998 /* Try machine-dependent ways of modifying an illegitimate address
999 to be legitimate. If we find one, return the new, valid address. */
1000
1001 rtx
1002 alpha_legitimize_address (rtx x, rtx scratch,
1003 enum machine_mode mode ATTRIBUTE_UNUSED)
1004 {
1005 HOST_WIDE_INT addend;
1006
1007 /* If the address is (plus reg const_int) and the CONST_INT is not a
1008 valid offset, compute the high part of the constant and add it to
1009 the register. Then our address is (plus temp low-part-const). */
1010 if (GET_CODE (x) == PLUS
1011 && GET_CODE (XEXP (x, 0)) == REG
1012 && GET_CODE (XEXP (x, 1)) == CONST_INT
1013 && ! CONSTANT_ADDRESS_P (XEXP (x, 1)))
1014 {
1015 addend = INTVAL (XEXP (x, 1));
1016 x = XEXP (x, 0);
1017 goto split_addend;
1018 }
1019
1020 /* If the address is (const (plus FOO const_int)), find the low-order
1021 part of the CONST_INT. Then load FOO plus any high-order part of the
1022 CONST_INT into a register. Our address is (plus reg low-part-const).
1023 This is done to reduce the number of GOT entries. */
1024 if (!no_new_pseudos
1025 && GET_CODE (x) == CONST
1026 && GET_CODE (XEXP (x, 0)) == PLUS
1027 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
1028 {
1029 addend = INTVAL (XEXP (XEXP (x, 0), 1));
1030 x = force_reg (Pmode, XEXP (XEXP (x, 0), 0));
1031 goto split_addend;
1032 }
1033
1034 /* If we have a (plus reg const), emit the load as in (2), then add
1035 the two registers, and finally generate (plus reg low-part-const) as
1036 our address. */
1037 if (!no_new_pseudos
1038 && GET_CODE (x) == PLUS
1039 && GET_CODE (XEXP (x, 0)) == REG
1040 && GET_CODE (XEXP (x, 1)) == CONST
1041 && GET_CODE (XEXP (XEXP (x, 1), 0)) == PLUS
1042 && GET_CODE (XEXP (XEXP (XEXP (x, 1), 0), 1)) == CONST_INT)
1043 {
1044 addend = INTVAL (XEXP (XEXP (XEXP (x, 1), 0), 1));
1045 x = expand_simple_binop (Pmode, PLUS, XEXP (x, 0),
1046 XEXP (XEXP (XEXP (x, 1), 0), 0),
1047 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1048 goto split_addend;
1049 }
1050
1051 /* If this is a local symbol, split the address into HIGH/LO_SUM parts. */
1052 if (TARGET_EXPLICIT_RELOCS && symbolic_operand (x, Pmode))
1053 {
1054 rtx r0, r16, eqv, tga, tp, insn, dest, seq;
1055
1056 switch (tls_symbolic_operand_type (x))
1057 {
1058 case TLS_MODEL_NONE:
1059 break;
1060
1061 case TLS_MODEL_GLOBAL_DYNAMIC:
1062 start_sequence ();
1063
1064 r0 = gen_rtx_REG (Pmode, 0);
1065 r16 = gen_rtx_REG (Pmode, 16);
1066 tga = get_tls_get_addr ();
1067 dest = gen_reg_rtx (Pmode);
1068 seq = GEN_INT (alpha_next_sequence_number++);
1069
1070 emit_insn (gen_movdi_er_tlsgd (r16, pic_offset_table_rtx, x, seq));
1071 insn = gen_call_value_osf_tlsgd (r0, tga, seq);
1072 insn = emit_call_insn (insn);
1073 CONST_OR_PURE_CALL_P (insn) = 1;
1074 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1075
1076 insn = get_insns ();
1077 end_sequence ();
1078
1079 emit_libcall_block (insn, dest, r0, x);
1080 return dest;
1081
1082 case TLS_MODEL_LOCAL_DYNAMIC:
1083 start_sequence ();
1084
1085 r0 = gen_rtx_REG (Pmode, 0);
1086 r16 = gen_rtx_REG (Pmode, 16);
1087 tga = get_tls_get_addr ();
1088 scratch = gen_reg_rtx (Pmode);
1089 seq = GEN_INT (alpha_next_sequence_number++);
1090
1091 emit_insn (gen_movdi_er_tlsldm (r16, pic_offset_table_rtx, seq));
1092 insn = gen_call_value_osf_tlsldm (r0, tga, seq);
1093 insn = emit_call_insn (insn);
1094 CONST_OR_PURE_CALL_P (insn) = 1;
1095 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1096
1097 insn = get_insns ();
1098 end_sequence ();
1099
1100 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
1101 UNSPEC_TLSLDM_CALL);
1102 emit_libcall_block (insn, scratch, r0, eqv);
1103
1104 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPREL);
1105 eqv = gen_rtx_CONST (Pmode, eqv);
1106
1107 if (alpha_tls_size == 64)
1108 {
1109 dest = gen_reg_rtx (Pmode);
1110 emit_insn (gen_rtx_SET (VOIDmode, dest, eqv));
1111 emit_insn (gen_adddi3 (dest, dest, scratch));
1112 return dest;
1113 }
1114 if (alpha_tls_size == 32)
1115 {
1116 insn = gen_rtx_HIGH (Pmode, eqv);
1117 insn = gen_rtx_PLUS (Pmode, scratch, insn);
1118 scratch = gen_reg_rtx (Pmode);
1119 emit_insn (gen_rtx_SET (VOIDmode, scratch, insn));
1120 }
1121 return gen_rtx_LO_SUM (Pmode, scratch, eqv);
1122
1123 case TLS_MODEL_INITIAL_EXEC:
1124 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1125 eqv = gen_rtx_CONST (Pmode, eqv);
1126 tp = gen_reg_rtx (Pmode);
1127 scratch = gen_reg_rtx (Pmode);
1128 dest = gen_reg_rtx (Pmode);
1129
1130 emit_insn (gen_load_tp (tp));
1131 emit_insn (gen_rtx_SET (VOIDmode, scratch, eqv));
1132 emit_insn (gen_adddi3 (dest, tp, scratch));
1133 return dest;
1134
1135 case TLS_MODEL_LOCAL_EXEC:
1136 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1137 eqv = gen_rtx_CONST (Pmode, eqv);
1138 tp = gen_reg_rtx (Pmode);
1139
1140 emit_insn (gen_load_tp (tp));
1141 if (alpha_tls_size == 32)
1142 {
1143 insn = gen_rtx_HIGH (Pmode, eqv);
1144 insn = gen_rtx_PLUS (Pmode, tp, insn);
1145 tp = gen_reg_rtx (Pmode);
1146 emit_insn (gen_rtx_SET (VOIDmode, tp, insn));
1147 }
1148 return gen_rtx_LO_SUM (Pmode, tp, eqv);
1149
1150 default:
1151 gcc_unreachable ();
1152 }
1153
1154 if (local_symbolic_operand (x, Pmode))
1155 {
1156 if (small_symbolic_operand (x, Pmode))
1157 return x;
1158 else
1159 {
1160 if (!no_new_pseudos)
1161 scratch = gen_reg_rtx (Pmode);
1162 emit_insn (gen_rtx_SET (VOIDmode, scratch,
1163 gen_rtx_HIGH (Pmode, x)));
1164 return gen_rtx_LO_SUM (Pmode, scratch, x);
1165 }
1166 }
1167 }
1168
1169 return NULL;
1170
1171 split_addend:
1172 {
1173 HOST_WIDE_INT low, high;
1174
1175 low = ((addend & 0xffff) ^ 0x8000) - 0x8000;
1176 addend -= low;
1177 high = ((addend & 0xffffffff) ^ 0x80000000) - 0x80000000;
1178 addend -= high;
1179
1180 if (addend)
1181 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (addend),
1182 (no_new_pseudos ? scratch : NULL_RTX),
1183 1, OPTAB_LIB_WIDEN);
1184 if (high)
1185 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (high),
1186 (no_new_pseudos ? scratch : NULL_RTX),
1187 1, OPTAB_LIB_WIDEN);
1188
1189 return plus_constant (x, low);
1190 }
1191 }
1192
1193 /* Primarily this is required for TLS symbols, but given that our move
1194 patterns *ought* to be able to handle any symbol at any time, we
1195 should never be spilling symbolic operands to the constant pool, ever. */
1196
1197 static bool
1198 alpha_cannot_force_const_mem (rtx x)
1199 {
1200 enum rtx_code code = GET_CODE (x);
1201 return code == SYMBOL_REF || code == LABEL_REF || code == CONST;
1202 }
1203
1204 /* We do not allow indirect calls to be optimized into sibling calls, nor
1205 can we allow a call to a function with a different GP to be optimized
1206 into a sibcall. */
1207
1208 static bool
1209 alpha_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
1210 {
1211 /* Can't do indirect tail calls, since we don't know if the target
1212 uses the same GP. */
1213 if (!decl)
1214 return false;
1215
1216 /* Otherwise, we can make a tail call if the target function shares
1217 the same GP. */
1218 return decl_has_samegp (decl);
1219 }
1220
1221 int
1222 some_small_symbolic_operand_int (rtx *px, void *data ATTRIBUTE_UNUSED)
1223 {
1224 rtx x = *px;
1225
1226 /* Don't re-split. */
1227 if (GET_CODE (x) == LO_SUM)
1228 return -1;
1229
1230 return small_symbolic_operand (x, Pmode) != 0;
1231 }
1232
1233 static int
1234 split_small_symbolic_operand_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
1235 {
1236 rtx x = *px;
1237
1238 /* Don't re-split. */
1239 if (GET_CODE (x) == LO_SUM)
1240 return -1;
1241
1242 if (small_symbolic_operand (x, Pmode))
1243 {
1244 x = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, x);
1245 *px = x;
1246 return -1;
1247 }
1248
1249 return 0;
1250 }
1251
1252 rtx
1253 split_small_symbolic_operand (rtx x)
1254 {
1255 x = copy_insn (x);
1256 for_each_rtx (&x, split_small_symbolic_operand_1, NULL);
1257 return x;
1258 }
1259
1260 /* Indicate that INSN cannot be duplicated. This is true for any insn
1261 that we've marked with gpdisp relocs, since those have to stay in
1262 1-1 correspondence with one another.
1263
1264 Technically we could copy them if we could set up a mapping from one
1265 sequence number to another, across the set of insns to be duplicated.
1266 This seems overly complicated and error-prone since interblock motion
1267 from sched-ebb could move one of the pair of insns to a different block.
1268
1269 Also cannot allow jsr insns to be duplicated. If they throw exceptions,
1270 then they'll be in a different block from their ldgp. Which could lead
1271 the bb reorder code to think that it would be ok to copy just the block
1272 containing the call and branch to the block containing the ldgp. */
1273
1274 static bool
1275 alpha_cannot_copy_insn_p (rtx insn)
1276 {
1277 if (!reload_completed || !TARGET_EXPLICIT_RELOCS)
1278 return false;
1279 if (recog_memoized (insn) >= 0)
1280 return get_attr_cannot_copy (insn);
1281 else
1282 return false;
1283 }
1284
1285
1286 /* Try a machine-dependent way of reloading an illegitimate address
1287 operand. If we find one, push the reload and return the new rtx. */
1288
1289 rtx
1290 alpha_legitimize_reload_address (rtx x,
1291 enum machine_mode mode ATTRIBUTE_UNUSED,
1292 int opnum, int type,
1293 int ind_levels ATTRIBUTE_UNUSED)
1294 {
1295 /* We must recognize output that we have already generated ourselves. */
1296 if (GET_CODE (x) == PLUS
1297 && GET_CODE (XEXP (x, 0)) == PLUS
1298 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
1299 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1300 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1301 {
1302 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1303 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1304 opnum, type);
1305 return x;
1306 }
1307
1308 /* We wish to handle large displacements off a base register by
1309 splitting the addend across an ldah and the mem insn. This
1310 cuts number of extra insns needed from 3 to 1. */
1311 if (GET_CODE (x) == PLUS
1312 && GET_CODE (XEXP (x, 0)) == REG
1313 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
1314 && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x, 0)))
1315 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1316 {
1317 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
1318 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
1319 HOST_WIDE_INT high
1320 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
1321
1322 /* Check for 32-bit overflow. */
1323 if (high + low != val)
1324 return NULL_RTX;
1325
1326 /* Reload the high part into a base reg; leave the low part
1327 in the mem directly. */
1328 x = gen_rtx_PLUS (GET_MODE (x),
1329 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
1330 GEN_INT (high)),
1331 GEN_INT (low));
1332
1333 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1334 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1335 opnum, type);
1336 return x;
1337 }
1338
1339 return NULL_RTX;
1340 }
1341 \f
1342 /* Compute a (partial) cost for rtx X. Return true if the complete
1343 cost has been computed, and false if subexpressions should be
1344 scanned. In either case, *TOTAL contains the cost result. */
1345
1346 static bool
1347 alpha_rtx_costs (rtx x, int code, int outer_code, int *total)
1348 {
1349 enum machine_mode mode = GET_MODE (x);
1350 bool float_mode_p = FLOAT_MODE_P (mode);
1351 const struct alpha_rtx_cost_data *cost_data;
1352
1353 if (optimize_size)
1354 cost_data = &alpha_rtx_cost_size;
1355 else
1356 cost_data = &alpha_rtx_cost_data[alpha_tune];
1357
1358 switch (code)
1359 {
1360 case CONST_INT:
1361 /* If this is an 8-bit constant, return zero since it can be used
1362 nearly anywhere with no cost. If it is a valid operand for an
1363 ADD or AND, likewise return 0 if we know it will be used in that
1364 context. Otherwise, return 2 since it might be used there later.
1365 All other constants take at least two insns. */
1366 if (INTVAL (x) >= 0 && INTVAL (x) < 256)
1367 {
1368 *total = 0;
1369 return true;
1370 }
1371 /* FALLTHRU */
1372
1373 case CONST_DOUBLE:
1374 if (x == CONST0_RTX (mode))
1375 *total = 0;
1376 else if ((outer_code == PLUS && add_operand (x, VOIDmode))
1377 || (outer_code == AND && and_operand (x, VOIDmode)))
1378 *total = 0;
1379 else if (add_operand (x, VOIDmode) || and_operand (x, VOIDmode))
1380 *total = 2;
1381 else
1382 *total = COSTS_N_INSNS (2);
1383 return true;
1384
1385 case CONST:
1386 case SYMBOL_REF:
1387 case LABEL_REF:
1388 if (TARGET_EXPLICIT_RELOCS && small_symbolic_operand (x, VOIDmode))
1389 *total = COSTS_N_INSNS (outer_code != MEM);
1390 else if (TARGET_EXPLICIT_RELOCS && local_symbolic_operand (x, VOIDmode))
1391 *total = COSTS_N_INSNS (1 + (outer_code != MEM));
1392 else if (tls_symbolic_operand_type (x))
1393 /* Estimate of cost for call_pal rduniq. */
1394 /* ??? How many insns do we emit here? More than one... */
1395 *total = COSTS_N_INSNS (15);
1396 else
1397 /* Otherwise we do a load from the GOT. */
1398 *total = COSTS_N_INSNS (optimize_size ? 1 : alpha_memory_latency);
1399 return true;
1400
1401 case HIGH:
1402 /* This is effectively an add_operand. */
1403 *total = 2;
1404 return true;
1405
1406 case PLUS:
1407 case MINUS:
1408 if (float_mode_p)
1409 *total = cost_data->fp_add;
1410 else if (GET_CODE (XEXP (x, 0)) == MULT
1411 && const48_operand (XEXP (XEXP (x, 0), 1), VOIDmode))
1412 {
1413 *total = (rtx_cost (XEXP (XEXP (x, 0), 0), outer_code)
1414 + rtx_cost (XEXP (x, 1), outer_code) + COSTS_N_INSNS (1));
1415 return true;
1416 }
1417 return false;
1418
1419 case MULT:
1420 if (float_mode_p)
1421 *total = cost_data->fp_mult;
1422 else if (mode == DImode)
1423 *total = cost_data->int_mult_di;
1424 else
1425 *total = cost_data->int_mult_si;
1426 return false;
1427
1428 case ASHIFT:
1429 if (GET_CODE (XEXP (x, 1)) == CONST_INT
1430 && INTVAL (XEXP (x, 1)) <= 3)
1431 {
1432 *total = COSTS_N_INSNS (1);
1433 return false;
1434 }
1435 /* FALLTHRU */
1436
1437 case ASHIFTRT:
1438 case LSHIFTRT:
1439 *total = cost_data->int_shift;
1440 return false;
1441
1442 case IF_THEN_ELSE:
1443 if (float_mode_p)
1444 *total = cost_data->fp_add;
1445 else
1446 *total = cost_data->int_cmov;
1447 return false;
1448
1449 case DIV:
1450 case UDIV:
1451 case MOD:
1452 case UMOD:
1453 if (!float_mode_p)
1454 *total = cost_data->int_div;
1455 else if (mode == SFmode)
1456 *total = cost_data->fp_div_sf;
1457 else
1458 *total = cost_data->fp_div_df;
1459 return false;
1460
1461 case MEM:
1462 *total = COSTS_N_INSNS (optimize_size ? 1 : alpha_memory_latency);
1463 return true;
1464
1465 case NEG:
1466 if (! float_mode_p)
1467 {
1468 *total = COSTS_N_INSNS (1);
1469 return false;
1470 }
1471 /* FALLTHRU */
1472
1473 case ABS:
1474 if (! float_mode_p)
1475 {
1476 *total = COSTS_N_INSNS (1) + cost_data->int_cmov;
1477 return false;
1478 }
1479 /* FALLTHRU */
1480
1481 case FLOAT:
1482 case UNSIGNED_FLOAT:
1483 case FIX:
1484 case UNSIGNED_FIX:
1485 case FLOAT_TRUNCATE:
1486 *total = cost_data->fp_add;
1487 return false;
1488
1489 case FLOAT_EXTEND:
1490 if (GET_CODE (XEXP (x, 0)) == MEM)
1491 *total = 0;
1492 else
1493 *total = cost_data->fp_add;
1494 return false;
1495
1496 default:
1497 return false;
1498 }
1499 }
1500 \f
1501 /* REF is an alignable memory location. Place an aligned SImode
1502 reference into *PALIGNED_MEM and the number of bits to shift into
1503 *PBITNUM. SCRATCH is a free register for use in reloading out
1504 of range stack slots. */
1505
1506 void
1507 get_aligned_mem (rtx ref, rtx *paligned_mem, rtx *pbitnum)
1508 {
1509 rtx base;
1510 HOST_WIDE_INT disp, offset;
1511
1512 gcc_assert (GET_CODE (ref) == MEM);
1513
1514 if (reload_in_progress
1515 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1516 {
1517 base = find_replacement (&XEXP (ref, 0));
1518 gcc_assert (memory_address_p (GET_MODE (ref), base));
1519 }
1520 else
1521 base = XEXP (ref, 0);
1522
1523 if (GET_CODE (base) == PLUS)
1524 disp = INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1525 else
1526 disp = 0;
1527
1528 /* Find the byte offset within an aligned word. If the memory itself is
1529 claimed to be aligned, believe it. Otherwise, aligned_memory_operand
1530 will have examined the base register and determined it is aligned, and
1531 thus displacements from it are naturally alignable. */
1532 if (MEM_ALIGN (ref) >= 32)
1533 offset = 0;
1534 else
1535 offset = disp & 3;
1536
1537 /* Access the entire aligned word. */
1538 *paligned_mem = widen_memory_access (ref, SImode, -offset);
1539
1540 /* Convert the byte offset within the word to a bit offset. */
1541 if (WORDS_BIG_ENDIAN)
1542 offset = 32 - (GET_MODE_BITSIZE (GET_MODE (ref)) + offset * 8);
1543 else
1544 offset *= 8;
1545 *pbitnum = GEN_INT (offset);
1546 }
1547
1548 /* Similar, but just get the address. Handle the two reload cases.
1549 Add EXTRA_OFFSET to the address we return. */
1550
1551 rtx
1552 get_unaligned_address (rtx ref, int extra_offset)
1553 {
1554 rtx base;
1555 HOST_WIDE_INT offset = 0;
1556
1557 gcc_assert (GET_CODE (ref) == MEM);
1558
1559 if (reload_in_progress
1560 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1561 {
1562 base = find_replacement (&XEXP (ref, 0));
1563
1564 gcc_assert (memory_address_p (GET_MODE (ref), base));
1565 }
1566 else
1567 base = XEXP (ref, 0);
1568
1569 if (GET_CODE (base) == PLUS)
1570 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1571
1572 return plus_constant (base, offset + extra_offset);
1573 }
1574
1575 /* On the Alpha, all (non-symbolic) constants except zero go into
1576 a floating-point register via memory. Note that we cannot
1577 return anything that is not a subset of CLASS, and that some
1578 symbolic constants cannot be dropped to memory. */
1579
1580 enum reg_class
1581 alpha_preferred_reload_class(rtx x, enum reg_class class)
1582 {
1583 /* Zero is present in any register class. */
1584 if (x == CONST0_RTX (GET_MODE (x)))
1585 return class;
1586
1587 /* These sorts of constants we can easily drop to memory. */
1588 if (GET_CODE (x) == CONST_INT
1589 || GET_CODE (x) == CONST_DOUBLE
1590 || GET_CODE (x) == CONST_VECTOR)
1591 {
1592 if (class == FLOAT_REGS)
1593 return NO_REGS;
1594 if (class == ALL_REGS)
1595 return GENERAL_REGS;
1596 return class;
1597 }
1598
1599 /* All other kinds of constants should not (and in the case of HIGH
1600 cannot) be dropped to memory -- instead we use a GENERAL_REGS
1601 secondary reload. */
1602 if (CONSTANT_P (x))
1603 return (class == ALL_REGS ? GENERAL_REGS : class);
1604
1605 return class;
1606 }
1607
1608 /* Loading and storing HImode or QImode values to and from memory
1609 usually requires a scratch register. The exceptions are loading
1610 QImode and HImode from an aligned address to a general register
1611 unless byte instructions are permitted.
1612
1613 We also cannot load an unaligned address or a paradoxical SUBREG
1614 into an FP register.
1615
1616 We also cannot do integral arithmetic into FP regs, as might result
1617 from register elimination into a DImode fp register. */
1618
1619 enum reg_class
1620 alpha_secondary_reload_class (enum reg_class class, enum machine_mode mode,
1621 rtx x, int in)
1622 {
1623 if ((mode == QImode || mode == HImode) && ! TARGET_BWX)
1624 {
1625 if (GET_CODE (x) == MEM
1626 || (GET_CODE (x) == REG && REGNO (x) >= FIRST_PSEUDO_REGISTER)
1627 || (GET_CODE (x) == SUBREG
1628 && (GET_CODE (SUBREG_REG (x)) == MEM
1629 || (GET_CODE (SUBREG_REG (x)) == REG
1630 && REGNO (SUBREG_REG (x)) >= FIRST_PSEUDO_REGISTER))))
1631 {
1632 if (!in || !aligned_memory_operand(x, mode))
1633 return GENERAL_REGS;
1634 }
1635 }
1636
1637 if (class == FLOAT_REGS)
1638 {
1639 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == AND)
1640 return GENERAL_REGS;
1641
1642 if (GET_CODE (x) == SUBREG
1643 && (GET_MODE_SIZE (GET_MODE (x))
1644 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
1645 return GENERAL_REGS;
1646
1647 if (in && INTEGRAL_MODE_P (mode)
1648 && ! (memory_operand (x, mode) || x == const0_rtx))
1649 return GENERAL_REGS;
1650 }
1651
1652 return NO_REGS;
1653 }
1654 \f
1655 /* Subfunction of the following function. Update the flags of any MEM
1656 found in part of X. */
1657
1658 static int
1659 alpha_set_memflags_1 (rtx *xp, void *data)
1660 {
1661 rtx x = *xp, orig = (rtx) data;
1662
1663 if (GET_CODE (x) != MEM)
1664 return 0;
1665
1666 MEM_VOLATILE_P (x) = MEM_VOLATILE_P (orig);
1667 MEM_IN_STRUCT_P (x) = MEM_IN_STRUCT_P (orig);
1668 MEM_SCALAR_P (x) = MEM_SCALAR_P (orig);
1669 MEM_NOTRAP_P (x) = MEM_NOTRAP_P (orig);
1670 MEM_READONLY_P (x) = MEM_READONLY_P (orig);
1671
1672 /* Sadly, we cannot use alias sets because the extra aliasing
1673 produced by the AND interferes. Given that two-byte quantities
1674 are the only thing we would be able to differentiate anyway,
1675 there does not seem to be any point in convoluting the early
1676 out of the alias check. */
1677
1678 return -1;
1679 }
1680
1681 /* Given INSN, which is an INSN list or the PATTERN of a single insn
1682 generated to perform a memory operation, look for any MEMs in either
1683 a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and
1684 volatile flags from REF into each of the MEMs found. If REF is not
1685 a MEM, don't do anything. */
1686
1687 void
1688 alpha_set_memflags (rtx insn, rtx ref)
1689 {
1690 rtx *base_ptr;
1691
1692 if (GET_CODE (ref) != MEM)
1693 return;
1694
1695 /* This is only called from alpha.md, after having had something
1696 generated from one of the insn patterns. So if everything is
1697 zero, the pattern is already up-to-date. */
1698 if (!MEM_VOLATILE_P (ref)
1699 && !MEM_IN_STRUCT_P (ref)
1700 && !MEM_SCALAR_P (ref)
1701 && !MEM_NOTRAP_P (ref)
1702 && !MEM_READONLY_P (ref))
1703 return;
1704
1705 if (INSN_P (insn))
1706 base_ptr = &PATTERN (insn);
1707 else
1708 base_ptr = &insn;
1709 for_each_rtx (base_ptr, alpha_set_memflags_1, (void *) ref);
1710 }
1711 \f
1712 static rtx alpha_emit_set_const (rtx, enum machine_mode, HOST_WIDE_INT,
1713 int, bool);
1714
1715 /* Internal routine for alpha_emit_set_const to check for N or below insns.
1716 If NO_OUTPUT is true, then we only check to see if N insns are possible,
1717 and return pc_rtx if successful. */
1718
1719 static rtx
1720 alpha_emit_set_const_1 (rtx target, enum machine_mode mode,
1721 HOST_WIDE_INT c, int n, bool no_output)
1722 {
1723 HOST_WIDE_INT new;
1724 int i, bits;
1725 /* Use a pseudo if highly optimizing and still generating RTL. */
1726 rtx subtarget
1727 = (flag_expensive_optimizations && !no_new_pseudos ? 0 : target);
1728 rtx temp, insn;
1729
1730 /* If this is a sign-extended 32-bit constant, we can do this in at most
1731 three insns, so do it if we have enough insns left. We always have
1732 a sign-extended 32-bit constant when compiling on a narrow machine. */
1733
1734 if (HOST_BITS_PER_WIDE_INT != 64
1735 || c >> 31 == -1 || c >> 31 == 0)
1736 {
1737 HOST_WIDE_INT low = ((c & 0xffff) ^ 0x8000) - 0x8000;
1738 HOST_WIDE_INT tmp1 = c - low;
1739 HOST_WIDE_INT high = (((tmp1 >> 16) & 0xffff) ^ 0x8000) - 0x8000;
1740 HOST_WIDE_INT extra = 0;
1741
1742 /* If HIGH will be interpreted as negative but the constant is
1743 positive, we must adjust it to do two ldha insns. */
1744
1745 if ((high & 0x8000) != 0 && c >= 0)
1746 {
1747 extra = 0x4000;
1748 tmp1 -= 0x40000000;
1749 high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
1750 }
1751
1752 if (c == low || (low == 0 && extra == 0))
1753 {
1754 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
1755 but that meant that we can't handle INT_MIN on 32-bit machines
1756 (like NT/Alpha), because we recurse indefinitely through
1757 emit_move_insn to gen_movdi. So instead, since we know exactly
1758 what we want, create it explicitly. */
1759
1760 if (no_output)
1761 return pc_rtx;
1762 if (target == NULL)
1763 target = gen_reg_rtx (mode);
1764 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (c)));
1765 return target;
1766 }
1767 else if (n >= 2 + (extra != 0))
1768 {
1769 if (no_output)
1770 return pc_rtx;
1771 if (no_new_pseudos)
1772 {
1773 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (high << 16)));
1774 temp = target;
1775 }
1776 else
1777 temp = copy_to_suggested_reg (GEN_INT (high << 16),
1778 subtarget, mode);
1779
1780 /* As of 2002-02-23, addsi3 is only available when not optimizing.
1781 This means that if we go through expand_binop, we'll try to
1782 generate extensions, etc, which will require new pseudos, which
1783 will fail during some split phases. The SImode add patterns
1784 still exist, but are not named. So build the insns by hand. */
1785
1786 if (extra != 0)
1787 {
1788 if (! subtarget)
1789 subtarget = gen_reg_rtx (mode);
1790 insn = gen_rtx_PLUS (mode, temp, GEN_INT (extra << 16));
1791 insn = gen_rtx_SET (VOIDmode, subtarget, insn);
1792 emit_insn (insn);
1793 temp = subtarget;
1794 }
1795
1796 if (target == NULL)
1797 target = gen_reg_rtx (mode);
1798 insn = gen_rtx_PLUS (mode, temp, GEN_INT (low));
1799 insn = gen_rtx_SET (VOIDmode, target, insn);
1800 emit_insn (insn);
1801 return target;
1802 }
1803 }
1804
1805 /* If we couldn't do it that way, try some other methods. But if we have
1806 no instructions left, don't bother. Likewise, if this is SImode and
1807 we can't make pseudos, we can't do anything since the expand_binop
1808 and expand_unop calls will widen and try to make pseudos. */
1809
1810 if (n == 1 || (mode == SImode && no_new_pseudos))
1811 return 0;
1812
1813 /* Next, see if we can load a related constant and then shift and possibly
1814 negate it to get the constant we want. Try this once each increasing
1815 numbers of insns. */
1816
1817 for (i = 1; i < n; i++)
1818 {
1819 /* First, see if minus some low bits, we've an easy load of
1820 high bits. */
1821
1822 new = ((c & 0xffff) ^ 0x8000) - 0x8000;
1823 if (new != 0)
1824 {
1825 temp = alpha_emit_set_const (subtarget, mode, c - new, i, no_output);
1826 if (temp)
1827 {
1828 if (no_output)
1829 return temp;
1830 return expand_binop (mode, add_optab, temp, GEN_INT (new),
1831 target, 0, OPTAB_WIDEN);
1832 }
1833 }
1834
1835 /* Next try complementing. */
1836 temp = alpha_emit_set_const (subtarget, mode, ~c, i, no_output);
1837 if (temp)
1838 {
1839 if (no_output)
1840 return temp;
1841 return expand_unop (mode, one_cmpl_optab, temp, target, 0);
1842 }
1843
1844 /* Next try to form a constant and do a left shift. We can do this
1845 if some low-order bits are zero; the exact_log2 call below tells
1846 us that information. The bits we are shifting out could be any
1847 value, but here we'll just try the 0- and sign-extended forms of
1848 the constant. To try to increase the chance of having the same
1849 constant in more than one insn, start at the highest number of
1850 bits to shift, but try all possibilities in case a ZAPNOT will
1851 be useful. */
1852
1853 bits = exact_log2 (c & -c);
1854 if (bits > 0)
1855 for (; bits > 0; bits--)
1856 {
1857 new = c >> bits;
1858 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1859 if (!temp && c < 0)
1860 {
1861 new = (unsigned HOST_WIDE_INT)c >> bits;
1862 temp = alpha_emit_set_const (subtarget, mode, new,
1863 i, no_output);
1864 }
1865 if (temp)
1866 {
1867 if (no_output)
1868 return temp;
1869 return expand_binop (mode, ashl_optab, temp, GEN_INT (bits),
1870 target, 0, OPTAB_WIDEN);
1871 }
1872 }
1873
1874 /* Now try high-order zero bits. Here we try the shifted-in bits as
1875 all zero and all ones. Be careful to avoid shifting outside the
1876 mode and to avoid shifting outside the host wide int size. */
1877 /* On narrow hosts, don't shift a 1 into the high bit, since we'll
1878 confuse the recursive call and set all of the high 32 bits. */
1879
1880 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1881 - floor_log2 (c) - 1 - (HOST_BITS_PER_WIDE_INT < 64));
1882 if (bits > 0)
1883 for (; bits > 0; bits--)
1884 {
1885 new = c << bits;
1886 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1887 if (!temp)
1888 {
1889 new = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1890 temp = alpha_emit_set_const (subtarget, mode, new,
1891 i, no_output);
1892 }
1893 if (temp)
1894 {
1895 if (no_output)
1896 return temp;
1897 return expand_binop (mode, lshr_optab, temp, GEN_INT (bits),
1898 target, 1, OPTAB_WIDEN);
1899 }
1900 }
1901
1902 /* Now try high-order 1 bits. We get that with a sign-extension.
1903 But one bit isn't enough here. Be careful to avoid shifting outside
1904 the mode and to avoid shifting outside the host wide int size. */
1905
1906 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1907 - floor_log2 (~ c) - 2);
1908 if (bits > 0)
1909 for (; bits > 0; bits--)
1910 {
1911 new = c << bits;
1912 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1913 if (!temp)
1914 {
1915 new = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1916 temp = alpha_emit_set_const (subtarget, mode, new,
1917 i, no_output);
1918 }
1919 if (temp)
1920 {
1921 if (no_output)
1922 return temp;
1923 return expand_binop (mode, ashr_optab, temp, GEN_INT (bits),
1924 target, 0, OPTAB_WIDEN);
1925 }
1926 }
1927 }
1928
1929 #if HOST_BITS_PER_WIDE_INT == 64
1930 /* Finally, see if can load a value into the target that is the same as the
1931 constant except that all bytes that are 0 are changed to be 0xff. If we
1932 can, then we can do a ZAPNOT to obtain the desired constant. */
1933
1934 new = c;
1935 for (i = 0; i < 64; i += 8)
1936 if ((new & ((HOST_WIDE_INT) 0xff << i)) == 0)
1937 new |= (HOST_WIDE_INT) 0xff << i;
1938
1939 /* We are only called for SImode and DImode. If this is SImode, ensure that
1940 we are sign extended to a full word. */
1941
1942 if (mode == SImode)
1943 new = ((new & 0xffffffff) ^ 0x80000000) - 0x80000000;
1944
1945 if (new != c)
1946 {
1947 temp = alpha_emit_set_const (subtarget, mode, new, n - 1, no_output);
1948 if (temp)
1949 {
1950 if (no_output)
1951 return temp;
1952 return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new),
1953 target, 0, OPTAB_WIDEN);
1954 }
1955 }
1956 #endif
1957
1958 return 0;
1959 }
1960
1961 /* Try to output insns to set TARGET equal to the constant C if it can be
1962 done in less than N insns. Do all computations in MODE. Returns the place
1963 where the output has been placed if it can be done and the insns have been
1964 emitted. If it would take more than N insns, zero is returned and no
1965 insns and emitted. */
1966
1967 static rtx
1968 alpha_emit_set_const (rtx target, enum machine_mode mode,
1969 HOST_WIDE_INT c, int n, bool no_output)
1970 {
1971 enum machine_mode orig_mode = mode;
1972 rtx orig_target = target;
1973 rtx result = 0;
1974 int i;
1975
1976 /* If we can't make any pseudos, TARGET is an SImode hard register, we
1977 can't load this constant in one insn, do this in DImode. */
1978 if (no_new_pseudos && mode == SImode
1979 && GET_CODE (target) == REG && REGNO (target) < FIRST_PSEUDO_REGISTER)
1980 {
1981 result = alpha_emit_set_const_1 (target, mode, c, 1, no_output);
1982 if (result)
1983 return result;
1984
1985 target = no_output ? NULL : gen_lowpart (DImode, target);
1986 mode = DImode;
1987 }
1988 else if (mode == V8QImode || mode == V4HImode || mode == V2SImode)
1989 {
1990 target = no_output ? NULL : gen_lowpart (DImode, target);
1991 mode = DImode;
1992 }
1993
1994 /* Try 1 insn, then 2, then up to N. */
1995 for (i = 1; i <= n; i++)
1996 {
1997 result = alpha_emit_set_const_1 (target, mode, c, i, no_output);
1998 if (result)
1999 {
2000 rtx insn, set;
2001
2002 if (no_output)
2003 return result;
2004
2005 insn = get_last_insn ();
2006 set = single_set (insn);
2007 if (! CONSTANT_P (SET_SRC (set)))
2008 set_unique_reg_note (get_last_insn (), REG_EQUAL, GEN_INT (c));
2009 break;
2010 }
2011 }
2012
2013 /* Allow for the case where we changed the mode of TARGET. */
2014 if (result)
2015 {
2016 if (result == target)
2017 result = orig_target;
2018 else if (mode != orig_mode)
2019 result = gen_lowpart (orig_mode, result);
2020 }
2021
2022 return result;
2023 }
2024
2025 /* Having failed to find a 3 insn sequence in alpha_emit_set_const,
2026 fall back to a straight forward decomposition. We do this to avoid
2027 exponential run times encountered when looking for longer sequences
2028 with alpha_emit_set_const. */
2029
2030 static rtx
2031 alpha_emit_set_long_const (rtx target, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
2032 {
2033 HOST_WIDE_INT d1, d2, d3, d4;
2034
2035 /* Decompose the entire word */
2036 #if HOST_BITS_PER_WIDE_INT >= 64
2037 gcc_assert (c2 == -(c1 < 0));
2038 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2039 c1 -= d1;
2040 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2041 c1 = (c1 - d2) >> 32;
2042 d3 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2043 c1 -= d3;
2044 d4 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2045 gcc_assert (c1 == d4);
2046 #else
2047 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2048 c1 -= d1;
2049 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2050 gcc_assert (c1 == d2);
2051 c2 += (d2 < 0);
2052 d3 = ((c2 & 0xffff) ^ 0x8000) - 0x8000;
2053 c2 -= d3;
2054 d4 = ((c2 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2055 gcc_assert (c2 == d4);
2056 #endif
2057
2058 /* Construct the high word */
2059 if (d4)
2060 {
2061 emit_move_insn (target, GEN_INT (d4));
2062 if (d3)
2063 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d3)));
2064 }
2065 else
2066 emit_move_insn (target, GEN_INT (d3));
2067
2068 /* Shift it into place */
2069 emit_move_insn (target, gen_rtx_ASHIFT (DImode, target, GEN_INT (32)));
2070
2071 /* Add in the low bits. */
2072 if (d2)
2073 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d2)));
2074 if (d1)
2075 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d1)));
2076
2077 return target;
2078 }
2079
2080 /* Given an integral CONST_INT, CONST_DOUBLE, or CONST_VECTOR, return
2081 the low 64 bits. */
2082
2083 static void
2084 alpha_extract_integer (rtx x, HOST_WIDE_INT *p0, HOST_WIDE_INT *p1)
2085 {
2086 HOST_WIDE_INT i0, i1;
2087
2088 if (GET_CODE (x) == CONST_VECTOR)
2089 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
2090
2091
2092 if (GET_CODE (x) == CONST_INT)
2093 {
2094 i0 = INTVAL (x);
2095 i1 = -(i0 < 0);
2096 }
2097 else if (HOST_BITS_PER_WIDE_INT >= 64)
2098 {
2099 i0 = CONST_DOUBLE_LOW (x);
2100 i1 = -(i0 < 0);
2101 }
2102 else
2103 {
2104 i0 = CONST_DOUBLE_LOW (x);
2105 i1 = CONST_DOUBLE_HIGH (x);
2106 }
2107
2108 *p0 = i0;
2109 *p1 = i1;
2110 }
2111
2112 /* Implement LEGITIMATE_CONSTANT_P. This is all constants for which we
2113 are willing to load the value into a register via a move pattern.
2114 Normally this is all symbolic constants, integral constants that
2115 take three or fewer instructions, and floating-point zero. */
2116
2117 bool
2118 alpha_legitimate_constant_p (rtx x)
2119 {
2120 enum machine_mode mode = GET_MODE (x);
2121 HOST_WIDE_INT i0, i1;
2122
2123 switch (GET_CODE (x))
2124 {
2125 case CONST:
2126 case LABEL_REF:
2127 case HIGH:
2128 return true;
2129
2130 case SYMBOL_REF:
2131 /* TLS symbols are never valid. */
2132 return SYMBOL_REF_TLS_MODEL (x) == 0;
2133
2134 case CONST_DOUBLE:
2135 if (x == CONST0_RTX (mode))
2136 return true;
2137 if (FLOAT_MODE_P (mode))
2138 return false;
2139 goto do_integer;
2140
2141 case CONST_VECTOR:
2142 if (x == CONST0_RTX (mode))
2143 return true;
2144 if (GET_MODE_CLASS (mode) != MODE_VECTOR_INT)
2145 return false;
2146 if (GET_MODE_SIZE (mode) != 8)
2147 return false;
2148 goto do_integer;
2149
2150 case CONST_INT:
2151 do_integer:
2152 if (TARGET_BUILD_CONSTANTS)
2153 return true;
2154 alpha_extract_integer (x, &i0, &i1);
2155 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == (-i0 < 0))
2156 return alpha_emit_set_const_1 (x, mode, i0, 3, true) != NULL;
2157 return false;
2158
2159 default:
2160 return false;
2161 }
2162 }
2163
2164 /* Operand 1 is known to be a constant, and should require more than one
2165 instruction to load. Emit that multi-part load. */
2166
2167 bool
2168 alpha_split_const_mov (enum machine_mode mode, rtx *operands)
2169 {
2170 HOST_WIDE_INT i0, i1;
2171 rtx temp = NULL_RTX;
2172
2173 alpha_extract_integer (operands[1], &i0, &i1);
2174
2175 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == -(i0 < 0))
2176 temp = alpha_emit_set_const (operands[0], mode, i0, 3, false);
2177
2178 if (!temp && TARGET_BUILD_CONSTANTS)
2179 temp = alpha_emit_set_long_const (operands[0], i0, i1);
2180
2181 if (temp)
2182 {
2183 if (!rtx_equal_p (operands[0], temp))
2184 emit_move_insn (operands[0], temp);
2185 return true;
2186 }
2187
2188 return false;
2189 }
2190
2191 /* Expand a move instruction; return true if all work is done.
2192 We don't handle non-bwx subword loads here. */
2193
2194 bool
2195 alpha_expand_mov (enum machine_mode mode, rtx *operands)
2196 {
2197 /* If the output is not a register, the input must be. */
2198 if (GET_CODE (operands[0]) == MEM
2199 && ! reg_or_0_operand (operands[1], mode))
2200 operands[1] = force_reg (mode, operands[1]);
2201
2202 /* Allow legitimize_address to perform some simplifications. */
2203 if (mode == Pmode && symbolic_operand (operands[1], mode))
2204 {
2205 rtx tmp;
2206
2207 tmp = alpha_legitimize_address (operands[1], operands[0], mode);
2208 if (tmp)
2209 {
2210 if (tmp == operands[0])
2211 return true;
2212 operands[1] = tmp;
2213 return false;
2214 }
2215 }
2216
2217 /* Early out for non-constants and valid constants. */
2218 if (! CONSTANT_P (operands[1]) || input_operand (operands[1], mode))
2219 return false;
2220
2221 /* Split large integers. */
2222 if (GET_CODE (operands[1]) == CONST_INT
2223 || GET_CODE (operands[1]) == CONST_DOUBLE
2224 || GET_CODE (operands[1]) == CONST_VECTOR)
2225 {
2226 if (alpha_split_const_mov (mode, operands))
2227 return true;
2228 }
2229
2230 /* Otherwise we've nothing left but to drop the thing to memory. */
2231 operands[1] = force_const_mem (mode, operands[1]);
2232 if (reload_in_progress)
2233 {
2234 emit_move_insn (operands[0], XEXP (operands[1], 0));
2235 operands[1] = copy_rtx (operands[1]);
2236 XEXP (operands[1], 0) = operands[0];
2237 }
2238 else
2239 operands[1] = validize_mem (operands[1]);
2240 return false;
2241 }
2242
2243 /* Expand a non-bwx QImode or HImode move instruction;
2244 return true if all work is done. */
2245
2246 bool
2247 alpha_expand_mov_nobwx (enum machine_mode mode, rtx *operands)
2248 {
2249 /* If the output is not a register, the input must be. */
2250 if (GET_CODE (operands[0]) == MEM)
2251 operands[1] = force_reg (mode, operands[1]);
2252
2253 /* Handle four memory cases, unaligned and aligned for either the input
2254 or the output. The only case where we can be called during reload is
2255 for aligned loads; all other cases require temporaries. */
2256
2257 if (GET_CODE (operands[1]) == MEM
2258 || (GET_CODE (operands[1]) == SUBREG
2259 && GET_CODE (SUBREG_REG (operands[1])) == MEM)
2260 || (reload_in_progress && GET_CODE (operands[1]) == REG
2261 && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER)
2262 || (reload_in_progress && GET_CODE (operands[1]) == SUBREG
2263 && GET_CODE (SUBREG_REG (operands[1])) == REG
2264 && REGNO (SUBREG_REG (operands[1])) >= FIRST_PSEUDO_REGISTER))
2265 {
2266 if (aligned_memory_operand (operands[1], mode))
2267 {
2268 if (reload_in_progress)
2269 {
2270 emit_insn ((mode == QImode
2271 ? gen_reload_inqi_help
2272 : gen_reload_inhi_help)
2273 (operands[0], operands[1],
2274 gen_rtx_REG (SImode, REGNO (operands[0]))));
2275 }
2276 else
2277 {
2278 rtx aligned_mem, bitnum;
2279 rtx scratch = gen_reg_rtx (SImode);
2280 rtx subtarget;
2281 bool copyout;
2282
2283 get_aligned_mem (operands[1], &aligned_mem, &bitnum);
2284
2285 subtarget = operands[0];
2286 if (GET_CODE (subtarget) == REG)
2287 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2288 else
2289 subtarget = gen_reg_rtx (DImode), copyout = true;
2290
2291 emit_insn ((mode == QImode
2292 ? gen_aligned_loadqi
2293 : gen_aligned_loadhi)
2294 (subtarget, aligned_mem, bitnum, scratch));
2295
2296 if (copyout)
2297 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2298 }
2299 }
2300 else
2301 {
2302 /* Don't pass these as parameters since that makes the generated
2303 code depend on parameter evaluation order which will cause
2304 bootstrap failures. */
2305
2306 rtx temp1, temp2, seq, subtarget;
2307 bool copyout;
2308
2309 temp1 = gen_reg_rtx (DImode);
2310 temp2 = gen_reg_rtx (DImode);
2311
2312 subtarget = operands[0];
2313 if (GET_CODE (subtarget) == REG)
2314 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2315 else
2316 subtarget = gen_reg_rtx (DImode), copyout = true;
2317
2318 seq = ((mode == QImode
2319 ? gen_unaligned_loadqi
2320 : gen_unaligned_loadhi)
2321 (subtarget, get_unaligned_address (operands[1], 0),
2322 temp1, temp2));
2323 alpha_set_memflags (seq, operands[1]);
2324 emit_insn (seq);
2325
2326 if (copyout)
2327 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2328 }
2329 return true;
2330 }
2331
2332 if (GET_CODE (operands[0]) == MEM
2333 || (GET_CODE (operands[0]) == SUBREG
2334 && GET_CODE (SUBREG_REG (operands[0])) == MEM)
2335 || (reload_in_progress && GET_CODE (operands[0]) == REG
2336 && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER)
2337 || (reload_in_progress && GET_CODE (operands[0]) == SUBREG
2338 && GET_CODE (SUBREG_REG (operands[0])) == REG
2339 && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER))
2340 {
2341 if (aligned_memory_operand (operands[0], mode))
2342 {
2343 rtx aligned_mem, bitnum;
2344 rtx temp1 = gen_reg_rtx (SImode);
2345 rtx temp2 = gen_reg_rtx (SImode);
2346
2347 get_aligned_mem (operands[0], &aligned_mem, &bitnum);
2348
2349 emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
2350 temp1, temp2));
2351 }
2352 else
2353 {
2354 rtx temp1 = gen_reg_rtx (DImode);
2355 rtx temp2 = gen_reg_rtx (DImode);
2356 rtx temp3 = gen_reg_rtx (DImode);
2357 rtx seq = ((mode == QImode
2358 ? gen_unaligned_storeqi
2359 : gen_unaligned_storehi)
2360 (get_unaligned_address (operands[0], 0),
2361 operands[1], temp1, temp2, temp3));
2362
2363 alpha_set_memflags (seq, operands[0]);
2364 emit_insn (seq);
2365 }
2366 return true;
2367 }
2368
2369 return false;
2370 }
2371
2372 /* Implement the movmisalign patterns. One of the operands is a memory
2373 that is not naturally aligned. Emit instructions to load it. */
2374
2375 void
2376 alpha_expand_movmisalign (enum machine_mode mode, rtx *operands)
2377 {
2378 /* Honor misaligned loads, for those we promised to do so. */
2379 if (MEM_P (operands[1]))
2380 {
2381 rtx tmp;
2382
2383 if (register_operand (operands[0], mode))
2384 tmp = operands[0];
2385 else
2386 tmp = gen_reg_rtx (mode);
2387
2388 alpha_expand_unaligned_load (tmp, operands[1], 8, 0, 0);
2389 if (tmp != operands[0])
2390 emit_move_insn (operands[0], tmp);
2391 }
2392 else if (MEM_P (operands[0]))
2393 {
2394 if (!reg_or_0_operand (operands[1], mode))
2395 operands[1] = force_reg (mode, operands[1]);
2396 alpha_expand_unaligned_store (operands[0], operands[1], 8, 0);
2397 }
2398 else
2399 gcc_unreachable ();
2400 }
2401
2402 /* Generate an unsigned DImode to FP conversion. This is the same code
2403 optabs would emit if we didn't have TFmode patterns.
2404
2405 For SFmode, this is the only construction I've found that can pass
2406 gcc.c-torture/execute/ieee/rbug.c. No scenario that uses DFmode
2407 intermediates will work, because you'll get intermediate rounding
2408 that ruins the end result. Some of this could be fixed by turning
2409 on round-to-positive-infinity, but that requires diddling the fpsr,
2410 which kills performance. I tried turning this around and converting
2411 to a negative number, so that I could turn on /m, but either I did
2412 it wrong or there's something else cause I wound up with the exact
2413 same single-bit error. There is a branch-less form of this same code:
2414
2415 srl $16,1,$1
2416 and $16,1,$2
2417 cmplt $16,0,$3
2418 or $1,$2,$2
2419 cmovge $16,$16,$2
2420 itoft $3,$f10
2421 itoft $2,$f11
2422 cvtqs $f11,$f11
2423 adds $f11,$f11,$f0
2424 fcmoveq $f10,$f11,$f0
2425
2426 I'm not using it because it's the same number of instructions as
2427 this branch-full form, and it has more serialized long latency
2428 instructions on the critical path.
2429
2430 For DFmode, we can avoid rounding errors by breaking up the word
2431 into two pieces, converting them separately, and adding them back:
2432
2433 LC0: .long 0,0x5f800000
2434
2435 itoft $16,$f11
2436 lda $2,LC0
2437 cmplt $16,0,$1
2438 cpyse $f11,$f31,$f10
2439 cpyse $f31,$f11,$f11
2440 s4addq $1,$2,$1
2441 lds $f12,0($1)
2442 cvtqt $f10,$f10
2443 cvtqt $f11,$f11
2444 addt $f12,$f10,$f0
2445 addt $f0,$f11,$f0
2446
2447 This doesn't seem to be a clear-cut win over the optabs form.
2448 It probably all depends on the distribution of numbers being
2449 converted -- in the optabs form, all but high-bit-set has a
2450 much lower minimum execution time. */
2451
2452 void
2453 alpha_emit_floatuns (rtx operands[2])
2454 {
2455 rtx neglab, donelab, i0, i1, f0, in, out;
2456 enum machine_mode mode;
2457
2458 out = operands[0];
2459 in = force_reg (DImode, operands[1]);
2460 mode = GET_MODE (out);
2461 neglab = gen_label_rtx ();
2462 donelab = gen_label_rtx ();
2463 i0 = gen_reg_rtx (DImode);
2464 i1 = gen_reg_rtx (DImode);
2465 f0 = gen_reg_rtx (mode);
2466
2467 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
2468
2469 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
2470 emit_jump_insn (gen_jump (donelab));
2471 emit_barrier ();
2472
2473 emit_label (neglab);
2474
2475 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
2476 emit_insn (gen_anddi3 (i1, in, const1_rtx));
2477 emit_insn (gen_iordi3 (i0, i0, i1));
2478 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
2479 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
2480
2481 emit_label (donelab);
2482 }
2483
2484 /* Generate the comparison for a conditional branch. */
2485
2486 rtx
2487 alpha_emit_conditional_branch (enum rtx_code code)
2488 {
2489 enum rtx_code cmp_code, branch_code;
2490 enum machine_mode cmp_mode, branch_mode = VOIDmode;
2491 rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
2492 rtx tem;
2493
2494 if (alpha_compare.fp_p && GET_MODE (op0) == TFmode)
2495 {
2496 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2497 op1 = const0_rtx;
2498 alpha_compare.fp_p = 0;
2499 }
2500
2501 /* The general case: fold the comparison code to the types of compares
2502 that we have, choosing the branch as necessary. */
2503 switch (code)
2504 {
2505 case EQ: case LE: case LT: case LEU: case LTU:
2506 case UNORDERED:
2507 /* We have these compares: */
2508 cmp_code = code, branch_code = NE;
2509 break;
2510
2511 case NE:
2512 case ORDERED:
2513 /* These must be reversed. */
2514 cmp_code = reverse_condition (code), branch_code = EQ;
2515 break;
2516
2517 case GE: case GT: case GEU: case GTU:
2518 /* For FP, we swap them, for INT, we reverse them. */
2519 if (alpha_compare.fp_p)
2520 {
2521 cmp_code = swap_condition (code);
2522 branch_code = NE;
2523 tem = op0, op0 = op1, op1 = tem;
2524 }
2525 else
2526 {
2527 cmp_code = reverse_condition (code);
2528 branch_code = EQ;
2529 }
2530 break;
2531
2532 default:
2533 gcc_unreachable ();
2534 }
2535
2536 if (alpha_compare.fp_p)
2537 {
2538 cmp_mode = DFmode;
2539 if (flag_unsafe_math_optimizations)
2540 {
2541 /* When we are not as concerned about non-finite values, and we
2542 are comparing against zero, we can branch directly. */
2543 if (op1 == CONST0_RTX (DFmode))
2544 cmp_code = UNKNOWN, branch_code = code;
2545 else if (op0 == CONST0_RTX (DFmode))
2546 {
2547 /* Undo the swap we probably did just above. */
2548 tem = op0, op0 = op1, op1 = tem;
2549 branch_code = swap_condition (cmp_code);
2550 cmp_code = UNKNOWN;
2551 }
2552 }
2553 else
2554 {
2555 /* ??? We mark the branch mode to be CCmode to prevent the
2556 compare and branch from being combined, since the compare
2557 insn follows IEEE rules that the branch does not. */
2558 branch_mode = CCmode;
2559 }
2560 }
2561 else
2562 {
2563 cmp_mode = DImode;
2564
2565 /* The following optimizations are only for signed compares. */
2566 if (code != LEU && code != LTU && code != GEU && code != GTU)
2567 {
2568 /* Whee. Compare and branch against 0 directly. */
2569 if (op1 == const0_rtx)
2570 cmp_code = UNKNOWN, branch_code = code;
2571
2572 /* If the constants doesn't fit into an immediate, but can
2573 be generated by lda/ldah, we adjust the argument and
2574 compare against zero, so we can use beq/bne directly. */
2575 /* ??? Don't do this when comparing against symbols, otherwise
2576 we'll reduce (&x == 0x1234) to (&x-0x1234 == 0), which will
2577 be declared false out of hand (at least for non-weak). */
2578 else if (GET_CODE (op1) == CONST_INT
2579 && (code == EQ || code == NE)
2580 && !(symbolic_operand (op0, VOIDmode)
2581 || (GET_CODE (op0) == REG && REG_POINTER (op0))))
2582 {
2583 HOST_WIDE_INT v = INTVAL (op1), n = -v;
2584
2585 if (! CONST_OK_FOR_LETTER_P (v, 'I')
2586 && (CONST_OK_FOR_LETTER_P (n, 'K')
2587 || CONST_OK_FOR_LETTER_P (n, 'L')))
2588 {
2589 cmp_code = PLUS, branch_code = code;
2590 op1 = GEN_INT (n);
2591 }
2592 }
2593 }
2594
2595 if (!reg_or_0_operand (op0, DImode))
2596 op0 = force_reg (DImode, op0);
2597 if (cmp_code != PLUS && !reg_or_8bit_operand (op1, DImode))
2598 op1 = force_reg (DImode, op1);
2599 }
2600
2601 /* Emit an initial compare instruction, if necessary. */
2602 tem = op0;
2603 if (cmp_code != UNKNOWN)
2604 {
2605 tem = gen_reg_rtx (cmp_mode);
2606 emit_move_insn (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1));
2607 }
2608
2609 /* Zero the operands. */
2610 memset (&alpha_compare, 0, sizeof (alpha_compare));
2611
2612 /* Return the branch comparison. */
2613 return gen_rtx_fmt_ee (branch_code, branch_mode, tem, CONST0_RTX (cmp_mode));
2614 }
2615
2616 /* Certain simplifications can be done to make invalid setcc operations
2617 valid. Return the final comparison, or NULL if we can't work. */
2618
2619 rtx
2620 alpha_emit_setcc (enum rtx_code code)
2621 {
2622 enum rtx_code cmp_code;
2623 rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
2624 int fp_p = alpha_compare.fp_p;
2625 rtx tmp;
2626
2627 /* Zero the operands. */
2628 memset (&alpha_compare, 0, sizeof (alpha_compare));
2629
2630 if (fp_p && GET_MODE (op0) == TFmode)
2631 {
2632 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2633 op1 = const0_rtx;
2634 fp_p = 0;
2635 }
2636
2637 if (fp_p && !TARGET_FIX)
2638 return NULL_RTX;
2639
2640 /* The general case: fold the comparison code to the types of compares
2641 that we have, choosing the branch as necessary. */
2642
2643 cmp_code = UNKNOWN;
2644 switch (code)
2645 {
2646 case EQ: case LE: case LT: case LEU: case LTU:
2647 case UNORDERED:
2648 /* We have these compares. */
2649 if (fp_p)
2650 cmp_code = code, code = NE;
2651 break;
2652
2653 case NE:
2654 if (!fp_p && op1 == const0_rtx)
2655 break;
2656 /* FALLTHRU */
2657
2658 case ORDERED:
2659 cmp_code = reverse_condition (code);
2660 code = EQ;
2661 break;
2662
2663 case GE: case GT: case GEU: case GTU:
2664 /* These normally need swapping, but for integer zero we have
2665 special patterns that recognize swapped operands. */
2666 if (!fp_p && op1 == const0_rtx)
2667 break;
2668 code = swap_condition (code);
2669 if (fp_p)
2670 cmp_code = code, code = NE;
2671 tmp = op0, op0 = op1, op1 = tmp;
2672 break;
2673
2674 default:
2675 gcc_unreachable ();
2676 }
2677
2678 if (!fp_p)
2679 {
2680 if (!register_operand (op0, DImode))
2681 op0 = force_reg (DImode, op0);
2682 if (!reg_or_8bit_operand (op1, DImode))
2683 op1 = force_reg (DImode, op1);
2684 }
2685
2686 /* Emit an initial compare instruction, if necessary. */
2687 if (cmp_code != UNKNOWN)
2688 {
2689 enum machine_mode mode = fp_p ? DFmode : DImode;
2690
2691 tmp = gen_reg_rtx (mode);
2692 emit_insn (gen_rtx_SET (VOIDmode, tmp,
2693 gen_rtx_fmt_ee (cmp_code, mode, op0, op1)));
2694
2695 op0 = fp_p ? gen_lowpart (DImode, tmp) : tmp;
2696 op1 = const0_rtx;
2697 }
2698
2699 /* Return the setcc comparison. */
2700 return gen_rtx_fmt_ee (code, DImode, op0, op1);
2701 }
2702
2703
2704 /* Rewrite a comparison against zero CMP of the form
2705 (CODE (cc0) (const_int 0)) so it can be written validly in
2706 a conditional move (if_then_else CMP ...).
2707 If both of the operands that set cc0 are nonzero we must emit
2708 an insn to perform the compare (it can't be done within
2709 the conditional move). */
2710
2711 rtx
2712 alpha_emit_conditional_move (rtx cmp, enum machine_mode mode)
2713 {
2714 enum rtx_code code = GET_CODE (cmp);
2715 enum rtx_code cmov_code = NE;
2716 rtx op0 = alpha_compare.op0;
2717 rtx op1 = alpha_compare.op1;
2718 int fp_p = alpha_compare.fp_p;
2719 enum machine_mode cmp_mode
2720 = (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0));
2721 enum machine_mode cmp_op_mode = fp_p ? DFmode : DImode;
2722 enum machine_mode cmov_mode = VOIDmode;
2723 int local_fast_math = flag_unsafe_math_optimizations;
2724 rtx tem;
2725
2726 /* Zero the operands. */
2727 memset (&alpha_compare, 0, sizeof (alpha_compare));
2728
2729 if (fp_p != FLOAT_MODE_P (mode))
2730 {
2731 enum rtx_code cmp_code;
2732
2733 if (! TARGET_FIX)
2734 return 0;
2735
2736 /* If we have fp<->int register move instructions, do a cmov by
2737 performing the comparison in fp registers, and move the
2738 zero/nonzero value to integer registers, where we can then
2739 use a normal cmov, or vice-versa. */
2740
2741 switch (code)
2742 {
2743 case EQ: case LE: case LT: case LEU: case LTU:
2744 /* We have these compares. */
2745 cmp_code = code, code = NE;
2746 break;
2747
2748 case NE:
2749 /* This must be reversed. */
2750 cmp_code = EQ, code = EQ;
2751 break;
2752
2753 case GE: case GT: case GEU: case GTU:
2754 /* These normally need swapping, but for integer zero we have
2755 special patterns that recognize swapped operands. */
2756 if (!fp_p && op1 == const0_rtx)
2757 cmp_code = code, code = NE;
2758 else
2759 {
2760 cmp_code = swap_condition (code);
2761 code = NE;
2762 tem = op0, op0 = op1, op1 = tem;
2763 }
2764 break;
2765
2766 default:
2767 gcc_unreachable ();
2768 }
2769
2770 tem = gen_reg_rtx (cmp_op_mode);
2771 emit_insn (gen_rtx_SET (VOIDmode, tem,
2772 gen_rtx_fmt_ee (cmp_code, cmp_op_mode,
2773 op0, op1)));
2774
2775 cmp_mode = cmp_op_mode = fp_p ? DImode : DFmode;
2776 op0 = gen_lowpart (cmp_op_mode, tem);
2777 op1 = CONST0_RTX (cmp_op_mode);
2778 fp_p = !fp_p;
2779 local_fast_math = 1;
2780 }
2781
2782 /* We may be able to use a conditional move directly.
2783 This avoids emitting spurious compares. */
2784 if (signed_comparison_operator (cmp, VOIDmode)
2785 && (!fp_p || local_fast_math)
2786 && (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode)))
2787 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
2788
2789 /* We can't put the comparison inside the conditional move;
2790 emit a compare instruction and put that inside the
2791 conditional move. Make sure we emit only comparisons we have;
2792 swap or reverse as necessary. */
2793
2794 if (no_new_pseudos)
2795 return NULL_RTX;
2796
2797 switch (code)
2798 {
2799 case EQ: case LE: case LT: case LEU: case LTU:
2800 /* We have these compares: */
2801 break;
2802
2803 case NE:
2804 /* This must be reversed. */
2805 code = reverse_condition (code);
2806 cmov_code = EQ;
2807 break;
2808
2809 case GE: case GT: case GEU: case GTU:
2810 /* These must be swapped. */
2811 if (op1 != CONST0_RTX (cmp_mode))
2812 {
2813 code = swap_condition (code);
2814 tem = op0, op0 = op1, op1 = tem;
2815 }
2816 break;
2817
2818 default:
2819 gcc_unreachable ();
2820 }
2821
2822 if (!fp_p)
2823 {
2824 if (!reg_or_0_operand (op0, DImode))
2825 op0 = force_reg (DImode, op0);
2826 if (!reg_or_8bit_operand (op1, DImode))
2827 op1 = force_reg (DImode, op1);
2828 }
2829
2830 /* ??? We mark the branch mode to be CCmode to prevent the compare
2831 and cmov from being combined, since the compare insn follows IEEE
2832 rules that the cmov does not. */
2833 if (fp_p && !local_fast_math)
2834 cmov_mode = CCmode;
2835
2836 tem = gen_reg_rtx (cmp_op_mode);
2837 emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_op_mode, op0, op1));
2838 return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_op_mode));
2839 }
2840
2841 /* Simplify a conditional move of two constants into a setcc with
2842 arithmetic. This is done with a splitter since combine would
2843 just undo the work if done during code generation. It also catches
2844 cases we wouldn't have before cse. */
2845
2846 int
2847 alpha_split_conditional_move (enum rtx_code code, rtx dest, rtx cond,
2848 rtx t_rtx, rtx f_rtx)
2849 {
2850 HOST_WIDE_INT t, f, diff;
2851 enum machine_mode mode;
2852 rtx target, subtarget, tmp;
2853
2854 mode = GET_MODE (dest);
2855 t = INTVAL (t_rtx);
2856 f = INTVAL (f_rtx);
2857 diff = t - f;
2858
2859 if (((code == NE || code == EQ) && diff < 0)
2860 || (code == GE || code == GT))
2861 {
2862 code = reverse_condition (code);
2863 diff = t, t = f, f = diff;
2864 diff = t - f;
2865 }
2866
2867 subtarget = target = dest;
2868 if (mode != DImode)
2869 {
2870 target = gen_lowpart (DImode, dest);
2871 if (! no_new_pseudos)
2872 subtarget = gen_reg_rtx (DImode);
2873 else
2874 subtarget = target;
2875 }
2876 /* Below, we must be careful to use copy_rtx on target and subtarget
2877 in intermediate insns, as they may be a subreg rtx, which may not
2878 be shared. */
2879
2880 if (f == 0 && exact_log2 (diff) > 0
2881 /* On EV6, we've got enough shifters to make non-arithmetic shifts
2882 viable over a longer latency cmove. On EV5, the E0 slot is a
2883 scarce resource, and on EV4 shift has the same latency as a cmove. */
2884 && (diff <= 8 || alpha_tune == PROCESSOR_EV6))
2885 {
2886 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2887 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2888
2889 tmp = gen_rtx_ASHIFT (DImode, copy_rtx (subtarget),
2890 GEN_INT (exact_log2 (t)));
2891 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2892 }
2893 else if (f == 0 && t == -1)
2894 {
2895 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2896 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2897
2898 emit_insn (gen_negdi2 (target, copy_rtx (subtarget)));
2899 }
2900 else if (diff == 1 || diff == 4 || diff == 8)
2901 {
2902 rtx add_op;
2903
2904 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2905 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2906
2907 if (diff == 1)
2908 emit_insn (gen_adddi3 (target, copy_rtx (subtarget), GEN_INT (f)));
2909 else
2910 {
2911 add_op = GEN_INT (f);
2912 if (sext_add_operand (add_op, mode))
2913 {
2914 tmp = gen_rtx_MULT (DImode, copy_rtx (subtarget),
2915 GEN_INT (diff));
2916 tmp = gen_rtx_PLUS (DImode, tmp, add_op);
2917 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2918 }
2919 else
2920 return 0;
2921 }
2922 }
2923 else
2924 return 0;
2925
2926 return 1;
2927 }
2928 \f
2929 /* Look up the function X_floating library function name for the
2930 given operation. */
2931
2932 struct xfloating_op GTY(())
2933 {
2934 const enum rtx_code code;
2935 const char *const GTY((skip)) osf_func;
2936 const char *const GTY((skip)) vms_func;
2937 rtx libcall;
2938 };
2939
2940 static GTY(()) struct xfloating_op xfloating_ops[] =
2941 {
2942 { PLUS, "_OtsAddX", "OTS$ADD_X", 0 },
2943 { MINUS, "_OtsSubX", "OTS$SUB_X", 0 },
2944 { MULT, "_OtsMulX", "OTS$MUL_X", 0 },
2945 { DIV, "_OtsDivX", "OTS$DIV_X", 0 },
2946 { EQ, "_OtsEqlX", "OTS$EQL_X", 0 },
2947 { NE, "_OtsNeqX", "OTS$NEQ_X", 0 },
2948 { LT, "_OtsLssX", "OTS$LSS_X", 0 },
2949 { LE, "_OtsLeqX", "OTS$LEQ_X", 0 },
2950 { GT, "_OtsGtrX", "OTS$GTR_X", 0 },
2951 { GE, "_OtsGeqX", "OTS$GEQ_X", 0 },
2952 { FIX, "_OtsCvtXQ", "OTS$CVTXQ", 0 },
2953 { FLOAT, "_OtsCvtQX", "OTS$CVTQX", 0 },
2954 { UNSIGNED_FLOAT, "_OtsCvtQUX", "OTS$CVTQUX", 0 },
2955 { FLOAT_EXTEND, "_OtsConvertFloatTX", "OTS$CVT_FLOAT_T_X", 0 },
2956 { FLOAT_TRUNCATE, "_OtsConvertFloatXT", "OTS$CVT_FLOAT_X_T", 0 }
2957 };
2958
2959 static GTY(()) struct xfloating_op vax_cvt_ops[] =
2960 {
2961 { FLOAT_EXTEND, "_OtsConvertFloatGX", "OTS$CVT_FLOAT_G_X", 0 },
2962 { FLOAT_TRUNCATE, "_OtsConvertFloatXG", "OTS$CVT_FLOAT_X_G", 0 }
2963 };
2964
2965 static rtx
2966 alpha_lookup_xfloating_lib_func (enum rtx_code code)
2967 {
2968 struct xfloating_op *ops = xfloating_ops;
2969 long n = ARRAY_SIZE (xfloating_ops);
2970 long i;
2971
2972 gcc_assert (TARGET_HAS_XFLOATING_LIBS);
2973
2974 /* How irritating. Nothing to key off for the main table. */
2975 if (TARGET_FLOAT_VAX && (code == FLOAT_EXTEND || code == FLOAT_TRUNCATE))
2976 {
2977 ops = vax_cvt_ops;
2978 n = ARRAY_SIZE (vax_cvt_ops);
2979 }
2980
2981 for (i = 0; i < n; ++i, ++ops)
2982 if (ops->code == code)
2983 {
2984 rtx func = ops->libcall;
2985 if (!func)
2986 {
2987 func = init_one_libfunc (TARGET_ABI_OPEN_VMS
2988 ? ops->vms_func : ops->osf_func);
2989 ops->libcall = func;
2990 }
2991 return func;
2992 }
2993
2994 gcc_unreachable ();
2995 }
2996
2997 /* Most X_floating operations take the rounding mode as an argument.
2998 Compute that here. */
2999
3000 static int
3001 alpha_compute_xfloating_mode_arg (enum rtx_code code,
3002 enum alpha_fp_rounding_mode round)
3003 {
3004 int mode;
3005
3006 switch (round)
3007 {
3008 case ALPHA_FPRM_NORM:
3009 mode = 2;
3010 break;
3011 case ALPHA_FPRM_MINF:
3012 mode = 1;
3013 break;
3014 case ALPHA_FPRM_CHOP:
3015 mode = 0;
3016 break;
3017 case ALPHA_FPRM_DYN:
3018 mode = 4;
3019 break;
3020 default:
3021 gcc_unreachable ();
3022
3023 /* XXX For reference, round to +inf is mode = 3. */
3024 }
3025
3026 if (code == FLOAT_TRUNCATE && alpha_fptm == ALPHA_FPTM_N)
3027 mode |= 0x10000;
3028
3029 return mode;
3030 }
3031
3032 /* Emit an X_floating library function call.
3033
3034 Note that these functions do not follow normal calling conventions:
3035 TFmode arguments are passed in two integer registers (as opposed to
3036 indirect); TFmode return values appear in R16+R17.
3037
3038 FUNC is the function to call.
3039 TARGET is where the output belongs.
3040 OPERANDS are the inputs.
3041 NOPERANDS is the count of inputs.
3042 EQUIV is the expression equivalent for the function.
3043 */
3044
3045 static void
3046 alpha_emit_xfloating_libcall (rtx func, rtx target, rtx operands[],
3047 int noperands, rtx equiv)
3048 {
3049 rtx usage = NULL_RTX, tmp, reg;
3050 int regno = 16, i;
3051
3052 start_sequence ();
3053
3054 for (i = 0; i < noperands; ++i)
3055 {
3056 switch (GET_MODE (operands[i]))
3057 {
3058 case TFmode:
3059 reg = gen_rtx_REG (TFmode, regno);
3060 regno += 2;
3061 break;
3062
3063 case DFmode:
3064 reg = gen_rtx_REG (DFmode, regno + 32);
3065 regno += 1;
3066 break;
3067
3068 case VOIDmode:
3069 gcc_assert (GET_CODE (operands[i]) == CONST_INT);
3070 /* FALLTHRU */
3071 case DImode:
3072 reg = gen_rtx_REG (DImode, regno);
3073 regno += 1;
3074 break;
3075
3076 default:
3077 gcc_unreachable ();
3078 }
3079
3080 emit_move_insn (reg, operands[i]);
3081 usage = alloc_EXPR_LIST (0, gen_rtx_USE (VOIDmode, reg), usage);
3082 }
3083
3084 switch (GET_MODE (target))
3085 {
3086 case TFmode:
3087 reg = gen_rtx_REG (TFmode, 16);
3088 break;
3089 case DFmode:
3090 reg = gen_rtx_REG (DFmode, 32);
3091 break;
3092 case DImode:
3093 reg = gen_rtx_REG (DImode, 0);
3094 break;
3095 default:
3096 gcc_unreachable ();
3097 }
3098
3099 tmp = gen_rtx_MEM (QImode, func);
3100 tmp = emit_call_insn (GEN_CALL_VALUE (reg, tmp, const0_rtx,
3101 const0_rtx, const0_rtx));
3102 CALL_INSN_FUNCTION_USAGE (tmp) = usage;
3103 CONST_OR_PURE_CALL_P (tmp) = 1;
3104
3105 tmp = get_insns ();
3106 end_sequence ();
3107
3108 emit_libcall_block (tmp, target, reg, equiv);
3109 }
3110
3111 /* Emit an X_floating library function call for arithmetic (+,-,*,/). */
3112
3113 void
3114 alpha_emit_xfloating_arith (enum rtx_code code, rtx operands[])
3115 {
3116 rtx func;
3117 int mode;
3118 rtx out_operands[3];
3119
3120 func = alpha_lookup_xfloating_lib_func (code);
3121 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3122
3123 out_operands[0] = operands[1];
3124 out_operands[1] = operands[2];
3125 out_operands[2] = GEN_INT (mode);
3126 alpha_emit_xfloating_libcall (func, operands[0], out_operands, 3,
3127 gen_rtx_fmt_ee (code, TFmode, operands[1],
3128 operands[2]));
3129 }
3130
3131 /* Emit an X_floating library function call for a comparison. */
3132
3133 static rtx
3134 alpha_emit_xfloating_compare (enum rtx_code *pcode, rtx op0, rtx op1)
3135 {
3136 enum rtx_code cmp_code, res_code;
3137 rtx func, out, operands[2];
3138
3139 /* X_floating library comparison functions return
3140 -1 unordered
3141 0 false
3142 1 true
3143 Convert the compare against the raw return value. */
3144
3145 cmp_code = *pcode;
3146 switch (cmp_code)
3147 {
3148 case UNORDERED:
3149 cmp_code = EQ;
3150 res_code = LT;
3151 break;
3152 case ORDERED:
3153 cmp_code = EQ;
3154 res_code = GE;
3155 break;
3156 case NE:
3157 res_code = NE;
3158 break;
3159 case EQ:
3160 case LT:
3161 case GT:
3162 case LE:
3163 case GE:
3164 res_code = GT;
3165 break;
3166 default:
3167 gcc_unreachable ();
3168 }
3169 *pcode = res_code;
3170
3171 func = alpha_lookup_xfloating_lib_func (cmp_code);
3172
3173 operands[0] = op0;
3174 operands[1] = op1;
3175 out = gen_reg_rtx (DImode);
3176
3177 /* ??? Strange mode for equiv because what's actually returned
3178 is -1,0,1, not a proper boolean value. */
3179 alpha_emit_xfloating_libcall (func, out, operands, 2,
3180 gen_rtx_fmt_ee (cmp_code, CCmode, op0, op1));
3181
3182 return out;
3183 }
3184
3185 /* Emit an X_floating library function call for a conversion. */
3186
3187 void
3188 alpha_emit_xfloating_cvt (enum rtx_code orig_code, rtx operands[])
3189 {
3190 int noperands = 1, mode;
3191 rtx out_operands[2];
3192 rtx func;
3193 enum rtx_code code = orig_code;
3194
3195 if (code == UNSIGNED_FIX)
3196 code = FIX;
3197
3198 func = alpha_lookup_xfloating_lib_func (code);
3199
3200 out_operands[0] = operands[1];
3201
3202 switch (code)
3203 {
3204 case FIX:
3205 mode = alpha_compute_xfloating_mode_arg (code, ALPHA_FPRM_CHOP);
3206 out_operands[1] = GEN_INT (mode);
3207 noperands = 2;
3208 break;
3209 case FLOAT_TRUNCATE:
3210 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3211 out_operands[1] = GEN_INT (mode);
3212 noperands = 2;
3213 break;
3214 default:
3215 break;
3216 }
3217
3218 alpha_emit_xfloating_libcall (func, operands[0], out_operands, noperands,
3219 gen_rtx_fmt_e (orig_code,
3220 GET_MODE (operands[0]),
3221 operands[1]));
3222 }
3223
3224 /* Split a TImode or TFmode move from OP[1] to OP[0] into a pair of
3225 DImode moves from OP[2,3] to OP[0,1]. If FIXUP_OVERLAP is true,
3226 guarantee that the sequence
3227 set (OP[0] OP[2])
3228 set (OP[1] OP[3])
3229 is valid. Naturally, output operand ordering is little-endian.
3230 This is used by *movtf_internal and *movti_internal. */
3231
3232 void
3233 alpha_split_tmode_pair (rtx operands[4], enum machine_mode mode,
3234 bool fixup_overlap)
3235 {
3236 switch (GET_CODE (operands[1]))
3237 {
3238 case REG:
3239 operands[3] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
3240 operands[2] = gen_rtx_REG (DImode, REGNO (operands[1]));
3241 break;
3242
3243 case MEM:
3244 operands[3] = adjust_address (operands[1], DImode, 8);
3245 operands[2] = adjust_address (operands[1], DImode, 0);
3246 break;
3247
3248 case CONST_INT:
3249 case CONST_DOUBLE:
3250 gcc_assert (operands[1] == CONST0_RTX (mode));
3251 operands[2] = operands[3] = const0_rtx;
3252 break;
3253
3254 default:
3255 gcc_unreachable ();
3256 }
3257
3258 switch (GET_CODE (operands[0]))
3259 {
3260 case REG:
3261 operands[1] = gen_rtx_REG (DImode, REGNO (operands[0]) + 1);
3262 operands[0] = gen_rtx_REG (DImode, REGNO (operands[0]));
3263 break;
3264
3265 case MEM:
3266 operands[1] = adjust_address (operands[0], DImode, 8);
3267 operands[0] = adjust_address (operands[0], DImode, 0);
3268 break;
3269
3270 default:
3271 gcc_unreachable ();
3272 }
3273
3274 if (fixup_overlap && reg_overlap_mentioned_p (operands[0], operands[3]))
3275 {
3276 rtx tmp;
3277 tmp = operands[0], operands[0] = operands[1], operands[1] = tmp;
3278 tmp = operands[2], operands[2] = operands[3], operands[3] = tmp;
3279 }
3280 }
3281
3282 /* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
3283 op2 is a register containing the sign bit, operation is the
3284 logical operation to be performed. */
3285
3286 void
3287 alpha_split_tfmode_frobsign (rtx operands[3], rtx (*operation) (rtx, rtx, rtx))
3288 {
3289 rtx high_bit = operands[2];
3290 rtx scratch;
3291 int move;
3292
3293 alpha_split_tmode_pair (operands, TFmode, false);
3294
3295 /* Detect three flavors of operand overlap. */
3296 move = 1;
3297 if (rtx_equal_p (operands[0], operands[2]))
3298 move = 0;
3299 else if (rtx_equal_p (operands[1], operands[2]))
3300 {
3301 if (rtx_equal_p (operands[0], high_bit))
3302 move = 2;
3303 else
3304 move = -1;
3305 }
3306
3307 if (move < 0)
3308 emit_move_insn (operands[0], operands[2]);
3309
3310 /* ??? If the destination overlaps both source tf and high_bit, then
3311 assume source tf is dead in its entirety and use the other half
3312 for a scratch register. Otherwise "scratch" is just the proper
3313 destination register. */
3314 scratch = operands[move < 2 ? 1 : 3];
3315
3316 emit_insn ((*operation) (scratch, high_bit, operands[3]));
3317
3318 if (move > 0)
3319 {
3320 emit_move_insn (operands[0], operands[2]);
3321 if (move > 1)
3322 emit_move_insn (operands[1], scratch);
3323 }
3324 }
3325 \f
3326 /* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
3327 unaligned data:
3328
3329 unsigned: signed:
3330 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
3331 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
3332 lda r3,X(r11) lda r3,X+2(r11)
3333 extwl r1,r3,r1 extql r1,r3,r1
3334 extwh r2,r3,r2 extqh r2,r3,r2
3335 or r1.r2.r1 or r1,r2,r1
3336 sra r1,48,r1
3337
3338 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
3339 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
3340 lda r3,X(r11) lda r3,X(r11)
3341 extll r1,r3,r1 extll r1,r3,r1
3342 extlh r2,r3,r2 extlh r2,r3,r2
3343 or r1.r2.r1 addl r1,r2,r1
3344
3345 quad: ldq_u r1,X(r11)
3346 ldq_u r2,X+7(r11)
3347 lda r3,X(r11)
3348 extql r1,r3,r1
3349 extqh r2,r3,r2
3350 or r1.r2.r1
3351 */
3352
3353 void
3354 alpha_expand_unaligned_load (rtx tgt, rtx mem, HOST_WIDE_INT size,
3355 HOST_WIDE_INT ofs, int sign)
3356 {
3357 rtx meml, memh, addr, extl, exth, tmp, mema;
3358 enum machine_mode mode;
3359
3360 if (TARGET_BWX && size == 2)
3361 {
3362 meml = adjust_address (mem, QImode, ofs);
3363 memh = adjust_address (mem, QImode, ofs+1);
3364 if (BYTES_BIG_ENDIAN)
3365 tmp = meml, meml = memh, memh = tmp;
3366 extl = gen_reg_rtx (DImode);
3367 exth = gen_reg_rtx (DImode);
3368 emit_insn (gen_zero_extendqidi2 (extl, meml));
3369 emit_insn (gen_zero_extendqidi2 (exth, memh));
3370 exth = expand_simple_binop (DImode, ASHIFT, exth, GEN_INT (8),
3371 NULL, 1, OPTAB_LIB_WIDEN);
3372 addr = expand_simple_binop (DImode, IOR, extl, exth,
3373 NULL, 1, OPTAB_LIB_WIDEN);
3374
3375 if (sign && GET_MODE (tgt) != HImode)
3376 {
3377 addr = gen_lowpart (HImode, addr);
3378 emit_insn (gen_extend_insn (tgt, addr, GET_MODE (tgt), HImode, 0));
3379 }
3380 else
3381 {
3382 if (GET_MODE (tgt) != DImode)
3383 addr = gen_lowpart (GET_MODE (tgt), addr);
3384 emit_move_insn (tgt, addr);
3385 }
3386 return;
3387 }
3388
3389 meml = gen_reg_rtx (DImode);
3390 memh = gen_reg_rtx (DImode);
3391 addr = gen_reg_rtx (DImode);
3392 extl = gen_reg_rtx (DImode);
3393 exth = gen_reg_rtx (DImode);
3394
3395 mema = XEXP (mem, 0);
3396 if (GET_CODE (mema) == LO_SUM)
3397 mema = force_reg (Pmode, mema);
3398
3399 /* AND addresses cannot be in any alias set, since they may implicitly
3400 alias surrounding code. Ideally we'd have some alias set that
3401 covered all types except those with alignment 8 or higher. */
3402
3403 tmp = change_address (mem, DImode,
3404 gen_rtx_AND (DImode,
3405 plus_constant (mema, ofs),
3406 GEN_INT (-8)));
3407 set_mem_alias_set (tmp, 0);
3408 emit_move_insn (meml, tmp);
3409
3410 tmp = change_address (mem, DImode,
3411 gen_rtx_AND (DImode,
3412 plus_constant (mema, ofs + size - 1),
3413 GEN_INT (-8)));
3414 set_mem_alias_set (tmp, 0);
3415 emit_move_insn (memh, tmp);
3416
3417 if (WORDS_BIG_ENDIAN && sign && (size == 2 || size == 4))
3418 {
3419 emit_move_insn (addr, plus_constant (mema, -1));
3420
3421 emit_insn (gen_extqh_be (extl, meml, addr));
3422 emit_insn (gen_extxl_be (exth, memh, GEN_INT (64), addr));
3423
3424 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3425 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (64 - size*8),
3426 addr, 1, OPTAB_WIDEN);
3427 }
3428 else if (sign && size == 2)
3429 {
3430 emit_move_insn (addr, plus_constant (mema, ofs+2));
3431
3432 emit_insn (gen_extxl_le (extl, meml, GEN_INT (64), addr));
3433 emit_insn (gen_extqh_le (exth, memh, addr));
3434
3435 /* We must use tgt here for the target. Alpha-vms port fails if we use
3436 addr for the target, because addr is marked as a pointer and combine
3437 knows that pointers are always sign-extended 32 bit values. */
3438 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3439 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48),
3440 addr, 1, OPTAB_WIDEN);
3441 }
3442 else
3443 {
3444 if (WORDS_BIG_ENDIAN)
3445 {
3446 emit_move_insn (addr, plus_constant (mema, ofs+size-1));
3447 switch ((int) size)
3448 {
3449 case 2:
3450 emit_insn (gen_extwh_be (extl, meml, addr));
3451 mode = HImode;
3452 break;
3453
3454 case 4:
3455 emit_insn (gen_extlh_be (extl, meml, addr));
3456 mode = SImode;
3457 break;
3458
3459 case 8:
3460 emit_insn (gen_extqh_be (extl, meml, addr));
3461 mode = DImode;
3462 break;
3463
3464 default:
3465 gcc_unreachable ();
3466 }
3467 emit_insn (gen_extxl_be (exth, memh, GEN_INT (size*8), addr));
3468 }
3469 else
3470 {
3471 emit_move_insn (addr, plus_constant (mema, ofs));
3472 emit_insn (gen_extxl_le (extl, meml, GEN_INT (size*8), addr));
3473 switch ((int) size)
3474 {
3475 case 2:
3476 emit_insn (gen_extwh_le (exth, memh, addr));
3477 mode = HImode;
3478 break;
3479
3480 case 4:
3481 emit_insn (gen_extlh_le (exth, memh, addr));
3482 mode = SImode;
3483 break;
3484
3485 case 8:
3486 emit_insn (gen_extqh_le (exth, memh, addr));
3487 mode = DImode;
3488 break;
3489
3490 default:
3491 gcc_unreachable ();
3492 }
3493 }
3494
3495 addr = expand_binop (mode, ior_optab, gen_lowpart (mode, extl),
3496 gen_lowpart (mode, exth), gen_lowpart (mode, tgt),
3497 sign, OPTAB_WIDEN);
3498 }
3499
3500 if (addr != tgt)
3501 emit_move_insn (tgt, gen_lowpart (GET_MODE (tgt), addr));
3502 }
3503
3504 /* Similarly, use ins and msk instructions to perform unaligned stores. */
3505
3506 void
3507 alpha_expand_unaligned_store (rtx dst, rtx src,
3508 HOST_WIDE_INT size, HOST_WIDE_INT ofs)
3509 {
3510 rtx dstl, dsth, addr, insl, insh, meml, memh, dsta;
3511
3512 if (TARGET_BWX && size == 2)
3513 {
3514 if (src != const0_rtx)
3515 {
3516 dstl = gen_lowpart (QImode, src);
3517 dsth = expand_simple_binop (DImode, LSHIFTRT, src, GEN_INT (8),
3518 NULL, 1, OPTAB_LIB_WIDEN);
3519 dsth = gen_lowpart (QImode, dsth);
3520 }
3521 else
3522 dstl = dsth = const0_rtx;
3523
3524 meml = adjust_address (dst, QImode, ofs);
3525 memh = adjust_address (dst, QImode, ofs+1);
3526 if (BYTES_BIG_ENDIAN)
3527 addr = meml, meml = memh, memh = addr;
3528
3529 emit_move_insn (meml, dstl);
3530 emit_move_insn (memh, dsth);
3531 return;
3532 }
3533
3534 dstl = gen_reg_rtx (DImode);
3535 dsth = gen_reg_rtx (DImode);
3536 insl = gen_reg_rtx (DImode);
3537 insh = gen_reg_rtx (DImode);
3538
3539 dsta = XEXP (dst, 0);
3540 if (GET_CODE (dsta) == LO_SUM)
3541 dsta = force_reg (Pmode, dsta);
3542
3543 /* AND addresses cannot be in any alias set, since they may implicitly
3544 alias surrounding code. Ideally we'd have some alias set that
3545 covered all types except those with alignment 8 or higher. */
3546
3547 meml = change_address (dst, DImode,
3548 gen_rtx_AND (DImode,
3549 plus_constant (dsta, ofs),
3550 GEN_INT (-8)));
3551 set_mem_alias_set (meml, 0);
3552
3553 memh = change_address (dst, DImode,
3554 gen_rtx_AND (DImode,
3555 plus_constant (dsta, ofs + size - 1),
3556 GEN_INT (-8)));
3557 set_mem_alias_set (memh, 0);
3558
3559 emit_move_insn (dsth, memh);
3560 emit_move_insn (dstl, meml);
3561 if (WORDS_BIG_ENDIAN)
3562 {
3563 addr = copy_addr_to_reg (plus_constant (dsta, ofs+size-1));
3564
3565 if (src != const0_rtx)
3566 {
3567 switch ((int) size)
3568 {
3569 case 2:
3570 emit_insn (gen_inswl_be (insh, gen_lowpart (HImode,src), addr));
3571 break;
3572 case 4:
3573 emit_insn (gen_insll_be (insh, gen_lowpart (SImode,src), addr));
3574 break;
3575 case 8:
3576 emit_insn (gen_insql_be (insh, gen_lowpart (DImode,src), addr));
3577 break;
3578 }
3579 emit_insn (gen_insxh (insl, gen_lowpart (DImode, src),
3580 GEN_INT (size*8), addr));
3581 }
3582
3583 switch ((int) size)
3584 {
3585 case 2:
3586 emit_insn (gen_mskxl_be (dsth, dsth, GEN_INT (0xffff), addr));
3587 break;
3588 case 4:
3589 {
3590 rtx msk = immed_double_const (0xffffffff, 0, DImode);
3591 emit_insn (gen_mskxl_be (dsth, dsth, msk, addr));
3592 break;
3593 }
3594 case 8:
3595 emit_insn (gen_mskxl_be (dsth, dsth, constm1_rtx, addr));
3596 break;
3597 }
3598
3599 emit_insn (gen_mskxh (dstl, dstl, GEN_INT (size*8), addr));
3600 }
3601 else
3602 {
3603 addr = copy_addr_to_reg (plus_constant (dsta, ofs));
3604
3605 if (src != CONST0_RTX (GET_MODE (src)))
3606 {
3607 emit_insn (gen_insxh (insh, gen_lowpart (DImode, src),
3608 GEN_INT (size*8), addr));
3609
3610 switch ((int) size)
3611 {
3612 case 2:
3613 emit_insn (gen_inswl_le (insl, gen_lowpart (HImode, src), addr));
3614 break;
3615 case 4:
3616 emit_insn (gen_insll_le (insl, gen_lowpart (SImode, src), addr));
3617 break;
3618 case 8:
3619 emit_insn (gen_insql_le (insl, src, addr));
3620 break;
3621 }
3622 }
3623
3624 emit_insn (gen_mskxh (dsth, dsth, GEN_INT (size*8), addr));
3625
3626 switch ((int) size)
3627 {
3628 case 2:
3629 emit_insn (gen_mskxl_le (dstl, dstl, GEN_INT (0xffff), addr));
3630 break;
3631 case 4:
3632 {
3633 rtx msk = immed_double_const (0xffffffff, 0, DImode);
3634 emit_insn (gen_mskxl_le (dstl, dstl, msk, addr));
3635 break;
3636 }
3637 case 8:
3638 emit_insn (gen_mskxl_le (dstl, dstl, constm1_rtx, addr));
3639 break;
3640 }
3641 }
3642
3643 if (src != CONST0_RTX (GET_MODE (src)))
3644 {
3645 dsth = expand_binop (DImode, ior_optab, insh, dsth, dsth, 0, OPTAB_WIDEN);
3646 dstl = expand_binop (DImode, ior_optab, insl, dstl, dstl, 0, OPTAB_WIDEN);
3647 }
3648
3649 if (WORDS_BIG_ENDIAN)
3650 {
3651 emit_move_insn (meml, dstl);
3652 emit_move_insn (memh, dsth);
3653 }
3654 else
3655 {
3656 /* Must store high before low for degenerate case of aligned. */
3657 emit_move_insn (memh, dsth);
3658 emit_move_insn (meml, dstl);
3659 }
3660 }
3661
3662 /* The block move code tries to maximize speed by separating loads and
3663 stores at the expense of register pressure: we load all of the data
3664 before we store it back out. There are two secondary effects worth
3665 mentioning, that this speeds copying to/from aligned and unaligned
3666 buffers, and that it makes the code significantly easier to write. */
3667
3668 #define MAX_MOVE_WORDS 8
3669
3670 /* Load an integral number of consecutive unaligned quadwords. */
3671
3672 static void
3673 alpha_expand_unaligned_load_words (rtx *out_regs, rtx smem,
3674 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3675 {
3676 rtx const im8 = GEN_INT (-8);
3677 rtx const i64 = GEN_INT (64);
3678 rtx ext_tmps[MAX_MOVE_WORDS], data_regs[MAX_MOVE_WORDS+1];
3679 rtx sreg, areg, tmp, smema;
3680 HOST_WIDE_INT i;
3681
3682 smema = XEXP (smem, 0);
3683 if (GET_CODE (smema) == LO_SUM)
3684 smema = force_reg (Pmode, smema);
3685
3686 /* Generate all the tmp registers we need. */
3687 for (i = 0; i < words; ++i)
3688 {
3689 data_regs[i] = out_regs[i];
3690 ext_tmps[i] = gen_reg_rtx (DImode);
3691 }
3692 data_regs[words] = gen_reg_rtx (DImode);
3693
3694 if (ofs != 0)
3695 smem = adjust_address (smem, GET_MODE (smem), ofs);
3696
3697 /* Load up all of the source data. */
3698 for (i = 0; i < words; ++i)
3699 {
3700 tmp = change_address (smem, DImode,
3701 gen_rtx_AND (DImode,
3702 plus_constant (smema, 8*i),
3703 im8));
3704 set_mem_alias_set (tmp, 0);
3705 emit_move_insn (data_regs[i], tmp);
3706 }
3707
3708 tmp = change_address (smem, DImode,
3709 gen_rtx_AND (DImode,
3710 plus_constant (smema, 8*words - 1),
3711 im8));
3712 set_mem_alias_set (tmp, 0);
3713 emit_move_insn (data_regs[words], tmp);
3714
3715 /* Extract the half-word fragments. Unfortunately DEC decided to make
3716 extxh with offset zero a noop instead of zeroing the register, so
3717 we must take care of that edge condition ourselves with cmov. */
3718
3719 sreg = copy_addr_to_reg (smema);
3720 areg = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL,
3721 1, OPTAB_WIDEN);
3722 if (WORDS_BIG_ENDIAN)
3723 emit_move_insn (sreg, plus_constant (sreg, 7));
3724 for (i = 0; i < words; ++i)
3725 {
3726 if (WORDS_BIG_ENDIAN)
3727 {
3728 emit_insn (gen_extqh_be (data_regs[i], data_regs[i], sreg));
3729 emit_insn (gen_extxl_be (ext_tmps[i], data_regs[i+1], i64, sreg));
3730 }
3731 else
3732 {
3733 emit_insn (gen_extxl_le (data_regs[i], data_regs[i], i64, sreg));
3734 emit_insn (gen_extqh_le (ext_tmps[i], data_regs[i+1], sreg));
3735 }
3736 emit_insn (gen_rtx_SET (VOIDmode, ext_tmps[i],
3737 gen_rtx_IF_THEN_ELSE (DImode,
3738 gen_rtx_EQ (DImode, areg,
3739 const0_rtx),
3740 const0_rtx, ext_tmps[i])));
3741 }
3742
3743 /* Merge the half-words into whole words. */
3744 for (i = 0; i < words; ++i)
3745 {
3746 out_regs[i] = expand_binop (DImode, ior_optab, data_regs[i],
3747 ext_tmps[i], data_regs[i], 1, OPTAB_WIDEN);
3748 }
3749 }
3750
3751 /* Store an integral number of consecutive unaligned quadwords. DATA_REGS
3752 may be NULL to store zeros. */
3753
3754 static void
3755 alpha_expand_unaligned_store_words (rtx *data_regs, rtx dmem,
3756 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3757 {
3758 rtx const im8 = GEN_INT (-8);
3759 rtx const i64 = GEN_INT (64);
3760 rtx ins_tmps[MAX_MOVE_WORDS];
3761 rtx st_tmp_1, st_tmp_2, dreg;
3762 rtx st_addr_1, st_addr_2, dmema;
3763 HOST_WIDE_INT i;
3764
3765 dmema = XEXP (dmem, 0);
3766 if (GET_CODE (dmema) == LO_SUM)
3767 dmema = force_reg (Pmode, dmema);
3768
3769 /* Generate all the tmp registers we need. */
3770 if (data_regs != NULL)
3771 for (i = 0; i < words; ++i)
3772 ins_tmps[i] = gen_reg_rtx(DImode);
3773 st_tmp_1 = gen_reg_rtx(DImode);
3774 st_tmp_2 = gen_reg_rtx(DImode);
3775
3776 if (ofs != 0)
3777 dmem = adjust_address (dmem, GET_MODE (dmem), ofs);
3778
3779 st_addr_2 = change_address (dmem, DImode,
3780 gen_rtx_AND (DImode,
3781 plus_constant (dmema, words*8 - 1),
3782 im8));
3783 set_mem_alias_set (st_addr_2, 0);
3784
3785 st_addr_1 = change_address (dmem, DImode,
3786 gen_rtx_AND (DImode, dmema, im8));
3787 set_mem_alias_set (st_addr_1, 0);
3788
3789 /* Load up the destination end bits. */
3790 emit_move_insn (st_tmp_2, st_addr_2);
3791 emit_move_insn (st_tmp_1, st_addr_1);
3792
3793 /* Shift the input data into place. */
3794 dreg = copy_addr_to_reg (dmema);
3795 if (WORDS_BIG_ENDIAN)
3796 emit_move_insn (dreg, plus_constant (dreg, 7));
3797 if (data_regs != NULL)
3798 {
3799 for (i = words-1; i >= 0; --i)
3800 {
3801 if (WORDS_BIG_ENDIAN)
3802 {
3803 emit_insn (gen_insql_be (ins_tmps[i], data_regs[i], dreg));
3804 emit_insn (gen_insxh (data_regs[i], data_regs[i], i64, dreg));
3805 }
3806 else
3807 {
3808 emit_insn (gen_insxh (ins_tmps[i], data_regs[i], i64, dreg));
3809 emit_insn (gen_insql_le (data_regs[i], data_regs[i], dreg));
3810 }
3811 }
3812 for (i = words-1; i > 0; --i)
3813 {
3814 ins_tmps[i-1] = expand_binop (DImode, ior_optab, data_regs[i],
3815 ins_tmps[i-1], ins_tmps[i-1], 1,
3816 OPTAB_WIDEN);
3817 }
3818 }
3819
3820 /* Split and merge the ends with the destination data. */
3821 if (WORDS_BIG_ENDIAN)
3822 {
3823 emit_insn (gen_mskxl_be (st_tmp_2, st_tmp_2, constm1_rtx, dreg));
3824 emit_insn (gen_mskxh (st_tmp_1, st_tmp_1, i64, dreg));
3825 }
3826 else
3827 {
3828 emit_insn (gen_mskxh (st_tmp_2, st_tmp_2, i64, dreg));
3829 emit_insn (gen_mskxl_le (st_tmp_1, st_tmp_1, constm1_rtx, dreg));
3830 }
3831
3832 if (data_regs != NULL)
3833 {
3834 st_tmp_2 = expand_binop (DImode, ior_optab, st_tmp_2, ins_tmps[words-1],
3835 st_tmp_2, 1, OPTAB_WIDEN);
3836 st_tmp_1 = expand_binop (DImode, ior_optab, st_tmp_1, data_regs[0],
3837 st_tmp_1, 1, OPTAB_WIDEN);
3838 }
3839
3840 /* Store it all. */
3841 if (WORDS_BIG_ENDIAN)
3842 emit_move_insn (st_addr_1, st_tmp_1);
3843 else
3844 emit_move_insn (st_addr_2, st_tmp_2);
3845 for (i = words-1; i > 0; --i)
3846 {
3847 rtx tmp = change_address (dmem, DImode,
3848 gen_rtx_AND (DImode,
3849 plus_constant(dmema,
3850 WORDS_BIG_ENDIAN ? i*8-1 : i*8),
3851 im8));
3852 set_mem_alias_set (tmp, 0);
3853 emit_move_insn (tmp, data_regs ? ins_tmps[i-1] : const0_rtx);
3854 }
3855 if (WORDS_BIG_ENDIAN)
3856 emit_move_insn (st_addr_2, st_tmp_2);
3857 else
3858 emit_move_insn (st_addr_1, st_tmp_1);
3859 }
3860
3861
3862 /* Expand string/block move operations.
3863
3864 operands[0] is the pointer to the destination.
3865 operands[1] is the pointer to the source.
3866 operands[2] is the number of bytes to move.
3867 operands[3] is the alignment. */
3868
3869 int
3870 alpha_expand_block_move (rtx operands[])
3871 {
3872 rtx bytes_rtx = operands[2];
3873 rtx align_rtx = operands[3];
3874 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
3875 HOST_WIDE_INT bytes = orig_bytes;
3876 HOST_WIDE_INT src_align = INTVAL (align_rtx) * BITS_PER_UNIT;
3877 HOST_WIDE_INT dst_align = src_align;
3878 rtx orig_src = operands[1];
3879 rtx orig_dst = operands[0];
3880 rtx data_regs[2 * MAX_MOVE_WORDS + 16];
3881 rtx tmp;
3882 unsigned int i, words, ofs, nregs = 0;
3883
3884 if (orig_bytes <= 0)
3885 return 1;
3886 else if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
3887 return 0;
3888
3889 /* Look for additional alignment information from recorded register info. */
3890
3891 tmp = XEXP (orig_src, 0);
3892 if (GET_CODE (tmp) == REG)
3893 src_align = MAX (src_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3894 else if (GET_CODE (tmp) == PLUS
3895 && GET_CODE (XEXP (tmp, 0)) == REG
3896 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
3897 {
3898 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3899 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3900
3901 if (a > src_align)
3902 {
3903 if (a >= 64 && c % 8 == 0)
3904 src_align = 64;
3905 else if (a >= 32 && c % 4 == 0)
3906 src_align = 32;
3907 else if (a >= 16 && c % 2 == 0)
3908 src_align = 16;
3909 }
3910 }
3911
3912 tmp = XEXP (orig_dst, 0);
3913 if (GET_CODE (tmp) == REG)
3914 dst_align = MAX (dst_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3915 else if (GET_CODE (tmp) == PLUS
3916 && GET_CODE (XEXP (tmp, 0)) == REG
3917 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
3918 {
3919 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3920 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3921
3922 if (a > dst_align)
3923 {
3924 if (a >= 64 && c % 8 == 0)
3925 dst_align = 64;
3926 else if (a >= 32 && c % 4 == 0)
3927 dst_align = 32;
3928 else if (a >= 16 && c % 2 == 0)
3929 dst_align = 16;
3930 }
3931 }
3932
3933 ofs = 0;
3934 if (src_align >= 64 && bytes >= 8)
3935 {
3936 words = bytes / 8;
3937
3938 for (i = 0; i < words; ++i)
3939 data_regs[nregs + i] = gen_reg_rtx (DImode);
3940
3941 for (i = 0; i < words; ++i)
3942 emit_move_insn (data_regs[nregs + i],
3943 adjust_address (orig_src, DImode, ofs + i * 8));
3944
3945 nregs += words;
3946 bytes -= words * 8;
3947 ofs += words * 8;
3948 }
3949
3950 if (src_align >= 32 && bytes >= 4)
3951 {
3952 words = bytes / 4;
3953
3954 for (i = 0; i < words; ++i)
3955 data_regs[nregs + i] = gen_reg_rtx (SImode);
3956
3957 for (i = 0; i < words; ++i)
3958 emit_move_insn (data_regs[nregs + i],
3959 adjust_address (orig_src, SImode, ofs + i * 4));
3960
3961 nregs += words;
3962 bytes -= words * 4;
3963 ofs += words * 4;
3964 }
3965
3966 if (bytes >= 8)
3967 {
3968 words = bytes / 8;
3969
3970 for (i = 0; i < words+1; ++i)
3971 data_regs[nregs + i] = gen_reg_rtx (DImode);
3972
3973 alpha_expand_unaligned_load_words (data_regs + nregs, orig_src,
3974 words, ofs);
3975
3976 nregs += words;
3977 bytes -= words * 8;
3978 ofs += words * 8;
3979 }
3980
3981 if (! TARGET_BWX && bytes >= 4)
3982 {
3983 data_regs[nregs++] = tmp = gen_reg_rtx (SImode);
3984 alpha_expand_unaligned_load (tmp, orig_src, 4, ofs, 0);
3985 bytes -= 4;
3986 ofs += 4;
3987 }
3988
3989 if (bytes >= 2)
3990 {
3991 if (src_align >= 16)
3992 {
3993 do {
3994 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3995 emit_move_insn (tmp, adjust_address (orig_src, HImode, ofs));
3996 bytes -= 2;
3997 ofs += 2;
3998 } while (bytes >= 2);
3999 }
4000 else if (! TARGET_BWX)
4001 {
4002 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
4003 alpha_expand_unaligned_load (tmp, orig_src, 2, ofs, 0);
4004 bytes -= 2;
4005 ofs += 2;
4006 }
4007 }
4008
4009 while (bytes > 0)
4010 {
4011 data_regs[nregs++] = tmp = gen_reg_rtx (QImode);
4012 emit_move_insn (tmp, adjust_address (orig_src, QImode, ofs));
4013 bytes -= 1;
4014 ofs += 1;
4015 }
4016
4017 gcc_assert (nregs <= ARRAY_SIZE (data_regs));
4018
4019 /* Now save it back out again. */
4020
4021 i = 0, ofs = 0;
4022
4023 /* Write out the data in whatever chunks reading the source allowed. */
4024 if (dst_align >= 64)
4025 {
4026 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
4027 {
4028 emit_move_insn (adjust_address (orig_dst, DImode, ofs),
4029 data_regs[i]);
4030 ofs += 8;
4031 i++;
4032 }
4033 }
4034
4035 if (dst_align >= 32)
4036 {
4037 /* If the source has remaining DImode regs, write them out in
4038 two pieces. */
4039 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
4040 {
4041 tmp = expand_binop (DImode, lshr_optab, data_regs[i], GEN_INT (32),
4042 NULL_RTX, 1, OPTAB_WIDEN);
4043
4044 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
4045 gen_lowpart (SImode, data_regs[i]));
4046 emit_move_insn (adjust_address (orig_dst, SImode, ofs + 4),
4047 gen_lowpart (SImode, tmp));
4048 ofs += 8;
4049 i++;
4050 }
4051
4052 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
4053 {
4054 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
4055 data_regs[i]);
4056 ofs += 4;
4057 i++;
4058 }
4059 }
4060
4061 if (i < nregs && GET_MODE (data_regs[i]) == DImode)
4062 {
4063 /* Write out a remaining block of words using unaligned methods. */
4064
4065 for (words = 1; i + words < nregs; words++)
4066 if (GET_MODE (data_regs[i + words]) != DImode)
4067 break;
4068
4069 if (words == 1)
4070 alpha_expand_unaligned_store (orig_dst, data_regs[i], 8, ofs);
4071 else
4072 alpha_expand_unaligned_store_words (data_regs + i, orig_dst,
4073 words, ofs);
4074
4075 i += words;
4076 ofs += words * 8;
4077 }
4078
4079 /* Due to the above, this won't be aligned. */
4080 /* ??? If we have more than one of these, consider constructing full
4081 words in registers and using alpha_expand_unaligned_store_words. */
4082 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
4083 {
4084 alpha_expand_unaligned_store (orig_dst, data_regs[i], 4, ofs);
4085 ofs += 4;
4086 i++;
4087 }
4088
4089 if (dst_align >= 16)
4090 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4091 {
4092 emit_move_insn (adjust_address (orig_dst, HImode, ofs), data_regs[i]);
4093 i++;
4094 ofs += 2;
4095 }
4096 else
4097 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4098 {
4099 alpha_expand_unaligned_store (orig_dst, data_regs[i], 2, ofs);
4100 i++;
4101 ofs += 2;
4102 }
4103
4104 /* The remainder must be byte copies. */
4105 while (i < nregs)
4106 {
4107 gcc_assert (GET_MODE (data_regs[i]) == QImode);
4108 emit_move_insn (adjust_address (orig_dst, QImode, ofs), data_regs[i]);
4109 i++;
4110 ofs += 1;
4111 }
4112
4113 return 1;
4114 }
4115
4116 int
4117 alpha_expand_block_clear (rtx operands[])
4118 {
4119 rtx bytes_rtx = operands[1];
4120 rtx align_rtx = operands[3];
4121 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
4122 HOST_WIDE_INT bytes = orig_bytes;
4123 HOST_WIDE_INT align = INTVAL (align_rtx) * BITS_PER_UNIT;
4124 HOST_WIDE_INT alignofs = 0;
4125 rtx orig_dst = operands[0];
4126 rtx tmp;
4127 int i, words, ofs = 0;
4128
4129 if (orig_bytes <= 0)
4130 return 1;
4131 if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
4132 return 0;
4133
4134 /* Look for stricter alignment. */
4135 tmp = XEXP (orig_dst, 0);
4136 if (GET_CODE (tmp) == REG)
4137 align = MAX (align, REGNO_POINTER_ALIGN (REGNO (tmp)));
4138 else if (GET_CODE (tmp) == PLUS
4139 && GET_CODE (XEXP (tmp, 0)) == REG
4140 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
4141 {
4142 HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
4143 int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
4144
4145 if (a > align)
4146 {
4147 if (a >= 64)
4148 align = a, alignofs = 8 - c % 8;
4149 else if (a >= 32)
4150 align = a, alignofs = 4 - c % 4;
4151 else if (a >= 16)
4152 align = a, alignofs = 2 - c % 2;
4153 }
4154 }
4155
4156 /* Handle an unaligned prefix first. */
4157
4158 if (alignofs > 0)
4159 {
4160 #if HOST_BITS_PER_WIDE_INT >= 64
4161 /* Given that alignofs is bounded by align, the only time BWX could
4162 generate three stores is for a 7 byte fill. Prefer two individual
4163 stores over a load/mask/store sequence. */
4164 if ((!TARGET_BWX || alignofs == 7)
4165 && align >= 32
4166 && !(alignofs == 4 && bytes >= 4))
4167 {
4168 enum machine_mode mode = (align >= 64 ? DImode : SImode);
4169 int inv_alignofs = (align >= 64 ? 8 : 4) - alignofs;
4170 rtx mem, tmp;
4171 HOST_WIDE_INT mask;
4172
4173 mem = adjust_address (orig_dst, mode, ofs - inv_alignofs);
4174 set_mem_alias_set (mem, 0);
4175
4176 mask = ~(~(HOST_WIDE_INT)0 << (inv_alignofs * 8));
4177 if (bytes < alignofs)
4178 {
4179 mask |= ~(HOST_WIDE_INT)0 << ((inv_alignofs + bytes) * 8);
4180 ofs += bytes;
4181 bytes = 0;
4182 }
4183 else
4184 {
4185 bytes -= alignofs;
4186 ofs += alignofs;
4187 }
4188 alignofs = 0;
4189
4190 tmp = expand_binop (mode, and_optab, mem, GEN_INT (mask),
4191 NULL_RTX, 1, OPTAB_WIDEN);
4192
4193 emit_move_insn (mem, tmp);
4194 }
4195 #endif
4196
4197 if (TARGET_BWX && (alignofs & 1) && bytes >= 1)
4198 {
4199 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4200 bytes -= 1;
4201 ofs += 1;
4202 alignofs -= 1;
4203 }
4204 if (TARGET_BWX && align >= 16 && (alignofs & 3) == 2 && bytes >= 2)
4205 {
4206 emit_move_insn (adjust_address (orig_dst, HImode, ofs), const0_rtx);
4207 bytes -= 2;
4208 ofs += 2;
4209 alignofs -= 2;
4210 }
4211 if (alignofs == 4 && bytes >= 4)
4212 {
4213 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4214 bytes -= 4;
4215 ofs += 4;
4216 alignofs = 0;
4217 }
4218
4219 /* If we've not used the extra lead alignment information by now,
4220 we won't be able to. Downgrade align to match what's left over. */
4221 if (alignofs > 0)
4222 {
4223 alignofs = alignofs & -alignofs;
4224 align = MIN (align, alignofs * BITS_PER_UNIT);
4225 }
4226 }
4227
4228 /* Handle a block of contiguous long-words. */
4229
4230 if (align >= 64 && bytes >= 8)
4231 {
4232 words = bytes / 8;
4233
4234 for (i = 0; i < words; ++i)
4235 emit_move_insn (adjust_address (orig_dst, DImode, ofs + i * 8),
4236 const0_rtx);
4237
4238 bytes -= words * 8;
4239 ofs += words * 8;
4240 }
4241
4242 /* If the block is large and appropriately aligned, emit a single
4243 store followed by a sequence of stq_u insns. */
4244
4245 if (align >= 32 && bytes > 16)
4246 {
4247 rtx orig_dsta;
4248
4249 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4250 bytes -= 4;
4251 ofs += 4;
4252
4253 orig_dsta = XEXP (orig_dst, 0);
4254 if (GET_CODE (orig_dsta) == LO_SUM)
4255 orig_dsta = force_reg (Pmode, orig_dsta);
4256
4257 words = bytes / 8;
4258 for (i = 0; i < words; ++i)
4259 {
4260 rtx mem
4261 = change_address (orig_dst, DImode,
4262 gen_rtx_AND (DImode,
4263 plus_constant (orig_dsta, ofs + i*8),
4264 GEN_INT (-8)));
4265 set_mem_alias_set (mem, 0);
4266 emit_move_insn (mem, const0_rtx);
4267 }
4268
4269 /* Depending on the alignment, the first stq_u may have overlapped
4270 with the initial stl, which means that the last stq_u didn't
4271 write as much as it would appear. Leave those questionable bytes
4272 unaccounted for. */
4273 bytes -= words * 8 - 4;
4274 ofs += words * 8 - 4;
4275 }
4276
4277 /* Handle a smaller block of aligned words. */
4278
4279 if ((align >= 64 && bytes == 4)
4280 || (align == 32 && bytes >= 4))
4281 {
4282 words = bytes / 4;
4283
4284 for (i = 0; i < words; ++i)
4285 emit_move_insn (adjust_address (orig_dst, SImode, ofs + i * 4),
4286 const0_rtx);
4287
4288 bytes -= words * 4;
4289 ofs += words * 4;
4290 }
4291
4292 /* An unaligned block uses stq_u stores for as many as possible. */
4293
4294 if (bytes >= 8)
4295 {
4296 words = bytes / 8;
4297
4298 alpha_expand_unaligned_store_words (NULL, orig_dst, words, ofs);
4299
4300 bytes -= words * 8;
4301 ofs += words * 8;
4302 }
4303
4304 /* Next clean up any trailing pieces. */
4305
4306 #if HOST_BITS_PER_WIDE_INT >= 64
4307 /* Count the number of bits in BYTES for which aligned stores could
4308 be emitted. */
4309 words = 0;
4310 for (i = (TARGET_BWX ? 1 : 4); i * BITS_PER_UNIT <= align ; i <<= 1)
4311 if (bytes & i)
4312 words += 1;
4313
4314 /* If we have appropriate alignment (and it wouldn't take too many
4315 instructions otherwise), mask out the bytes we need. */
4316 if (TARGET_BWX ? words > 2 : bytes > 0)
4317 {
4318 if (align >= 64)
4319 {
4320 rtx mem, tmp;
4321 HOST_WIDE_INT mask;
4322
4323 mem = adjust_address (orig_dst, DImode, ofs);
4324 set_mem_alias_set (mem, 0);
4325
4326 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4327
4328 tmp = expand_binop (DImode, and_optab, mem, GEN_INT (mask),
4329 NULL_RTX, 1, OPTAB_WIDEN);
4330
4331 emit_move_insn (mem, tmp);
4332 return 1;
4333 }
4334 else if (align >= 32 && bytes < 4)
4335 {
4336 rtx mem, tmp;
4337 HOST_WIDE_INT mask;
4338
4339 mem = adjust_address (orig_dst, SImode, ofs);
4340 set_mem_alias_set (mem, 0);
4341
4342 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4343
4344 tmp = expand_binop (SImode, and_optab, mem, GEN_INT (mask),
4345 NULL_RTX, 1, OPTAB_WIDEN);
4346
4347 emit_move_insn (mem, tmp);
4348 return 1;
4349 }
4350 }
4351 #endif
4352
4353 if (!TARGET_BWX && bytes >= 4)
4354 {
4355 alpha_expand_unaligned_store (orig_dst, const0_rtx, 4, ofs);
4356 bytes -= 4;
4357 ofs += 4;
4358 }
4359
4360 if (bytes >= 2)
4361 {
4362 if (align >= 16)
4363 {
4364 do {
4365 emit_move_insn (adjust_address (orig_dst, HImode, ofs),
4366 const0_rtx);
4367 bytes -= 2;
4368 ofs += 2;
4369 } while (bytes >= 2);
4370 }
4371 else if (! TARGET_BWX)
4372 {
4373 alpha_expand_unaligned_store (orig_dst, const0_rtx, 2, ofs);
4374 bytes -= 2;
4375 ofs += 2;
4376 }
4377 }
4378
4379 while (bytes > 0)
4380 {
4381 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4382 bytes -= 1;
4383 ofs += 1;
4384 }
4385
4386 return 1;
4387 }
4388
4389 /* Returns a mask so that zap(x, value) == x & mask. */
4390
4391 rtx
4392 alpha_expand_zap_mask (HOST_WIDE_INT value)
4393 {
4394 rtx result;
4395 int i;
4396
4397 if (HOST_BITS_PER_WIDE_INT >= 64)
4398 {
4399 HOST_WIDE_INT mask = 0;
4400
4401 for (i = 7; i >= 0; --i)
4402 {
4403 mask <<= 8;
4404 if (!((value >> i) & 1))
4405 mask |= 0xff;
4406 }
4407
4408 result = gen_int_mode (mask, DImode);
4409 }
4410 else
4411 {
4412 HOST_WIDE_INT mask_lo = 0, mask_hi = 0;
4413
4414 gcc_assert (HOST_BITS_PER_WIDE_INT == 32);
4415
4416 for (i = 7; i >= 4; --i)
4417 {
4418 mask_hi <<= 8;
4419 if (!((value >> i) & 1))
4420 mask_hi |= 0xff;
4421 }
4422
4423 for (i = 3; i >= 0; --i)
4424 {
4425 mask_lo <<= 8;
4426 if (!((value >> i) & 1))
4427 mask_lo |= 0xff;
4428 }
4429
4430 result = immed_double_const (mask_lo, mask_hi, DImode);
4431 }
4432
4433 return result;
4434 }
4435
4436 void
4437 alpha_expand_builtin_vector_binop (rtx (*gen) (rtx, rtx, rtx),
4438 enum machine_mode mode,
4439 rtx op0, rtx op1, rtx op2)
4440 {
4441 op0 = gen_lowpart (mode, op0);
4442
4443 if (op1 == const0_rtx)
4444 op1 = CONST0_RTX (mode);
4445 else
4446 op1 = gen_lowpart (mode, op1);
4447
4448 if (op2 == const0_rtx)
4449 op2 = CONST0_RTX (mode);
4450 else
4451 op2 = gen_lowpart (mode, op2);
4452
4453 emit_insn ((*gen) (op0, op1, op2));
4454 }
4455
4456 /* A subroutine of the atomic operation splitters. Jump to LABEL if
4457 COND is true. Mark the jump as unlikely to be taken. */
4458
4459 static void
4460 emit_unlikely_jump (rtx cond, rtx label)
4461 {
4462 rtx very_unlikely = GEN_INT (REG_BR_PROB_BASE / 100 - 1);
4463 rtx x;
4464
4465 x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
4466 x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
4467 REG_NOTES (x) = gen_rtx_EXPR_LIST (REG_BR_PROB, very_unlikely, NULL_RTX);
4468 }
4469
4470 /* A subroutine of the atomic operation splitters. Emit a load-locked
4471 instruction in MODE. */
4472
4473 static void
4474 emit_load_locked (enum machine_mode mode, rtx reg, rtx mem)
4475 {
4476 rtx (*fn) (rtx, rtx) = NULL;
4477 if (mode == SImode)
4478 fn = gen_load_locked_si;
4479 else if (mode == DImode)
4480 fn = gen_load_locked_di;
4481 emit_insn (fn (reg, mem));
4482 }
4483
4484 /* A subroutine of the atomic operation splitters. Emit a store-conditional
4485 instruction in MODE. */
4486
4487 static void
4488 emit_store_conditional (enum machine_mode mode, rtx res, rtx mem, rtx val)
4489 {
4490 rtx (*fn) (rtx, rtx, rtx) = NULL;
4491 if (mode == SImode)
4492 fn = gen_store_conditional_si;
4493 else if (mode == DImode)
4494 fn = gen_store_conditional_di;
4495 emit_insn (fn (res, mem, val));
4496 }
4497
4498 /* A subroutine of the atomic operation splitters. Emit an insxl
4499 instruction in MODE. */
4500
4501 static rtx
4502 emit_insxl (enum machine_mode mode, rtx op1, rtx op2)
4503 {
4504 rtx ret = gen_reg_rtx (DImode);
4505 rtx (*fn) (rtx, rtx, rtx);
4506
4507 if (WORDS_BIG_ENDIAN)
4508 {
4509 if (mode == QImode)
4510 fn = gen_insbl_be;
4511 else
4512 fn = gen_inswl_be;
4513 }
4514 else
4515 {
4516 if (mode == QImode)
4517 fn = gen_insbl_le;
4518 else
4519 fn = gen_inswl_le;
4520 }
4521 emit_insn (fn (ret, op1, op2));
4522
4523 return ret;
4524 }
4525
4526 /* Expand an an atomic fetch-and-operate pattern. CODE is the binary operation
4527 to perform. MEM is the memory on which to operate. VAL is the second
4528 operand of the binary operator. BEFORE and AFTER are optional locations to
4529 return the value of MEM either before of after the operation. SCRATCH is
4530 a scratch register. */
4531
4532 void
4533 alpha_split_atomic_op (enum rtx_code code, rtx mem, rtx val,
4534 rtx before, rtx after, rtx scratch)
4535 {
4536 enum machine_mode mode = GET_MODE (mem);
4537 rtx label, x, cond = gen_rtx_REG (DImode, REGNO (scratch));
4538
4539 emit_insn (gen_memory_barrier ());
4540
4541 label = gen_label_rtx ();
4542 emit_label (label);
4543 label = gen_rtx_LABEL_REF (DImode, label);
4544
4545 if (before == NULL)
4546 before = scratch;
4547 emit_load_locked (mode, before, mem);
4548
4549 if (code == NOT)
4550 x = gen_rtx_AND (mode, gen_rtx_NOT (mode, before), val);
4551 else
4552 x = gen_rtx_fmt_ee (code, mode, before, val);
4553 if (after)
4554 emit_insn (gen_rtx_SET (VOIDmode, after, copy_rtx (x)));
4555 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
4556
4557 emit_store_conditional (mode, cond, mem, scratch);
4558
4559 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4560 emit_unlikely_jump (x, label);
4561
4562 emit_insn (gen_memory_barrier ());
4563 }
4564
4565 /* Expand a compare and swap operation. */
4566
4567 void
4568 alpha_split_compare_and_swap (rtx retval, rtx mem, rtx oldval, rtx newval,
4569 rtx scratch)
4570 {
4571 enum machine_mode mode = GET_MODE (mem);
4572 rtx label1, label2, x, cond = gen_lowpart (DImode, scratch);
4573
4574 emit_insn (gen_memory_barrier ());
4575
4576 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4577 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4578 emit_label (XEXP (label1, 0));
4579
4580 emit_load_locked (mode, retval, mem);
4581
4582 x = gen_lowpart (DImode, retval);
4583 if (oldval == const0_rtx)
4584 x = gen_rtx_NE (DImode, x, const0_rtx);
4585 else
4586 {
4587 x = gen_rtx_EQ (DImode, x, oldval);
4588 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4589 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4590 }
4591 emit_unlikely_jump (x, label2);
4592
4593 emit_move_insn (scratch, newval);
4594 emit_store_conditional (mode, cond, mem, scratch);
4595
4596 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4597 emit_unlikely_jump (x, label1);
4598
4599 emit_insn (gen_memory_barrier ());
4600 emit_label (XEXP (label2, 0));
4601 }
4602
4603 void
4604 alpha_expand_compare_and_swap_12 (rtx dst, rtx mem, rtx oldval, rtx newval)
4605 {
4606 enum machine_mode mode = GET_MODE (mem);
4607 rtx addr, align, wdst;
4608 rtx (*fn5) (rtx, rtx, rtx, rtx, rtx);
4609
4610 addr = force_reg (DImode, XEXP (mem, 0));
4611 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4612 NULL_RTX, 1, OPTAB_DIRECT);
4613
4614 oldval = convert_modes (DImode, mode, oldval, 1);
4615 newval = emit_insxl (mode, newval, addr);
4616
4617 wdst = gen_reg_rtx (DImode);
4618 if (mode == QImode)
4619 fn5 = gen_sync_compare_and_swapqi_1;
4620 else
4621 fn5 = gen_sync_compare_and_swaphi_1;
4622 emit_insn (fn5 (wdst, addr, oldval, newval, align));
4623
4624 emit_move_insn (dst, gen_lowpart (mode, wdst));
4625 }
4626
4627 void
4628 alpha_split_compare_and_swap_12 (enum machine_mode mode, rtx dest, rtx addr,
4629 rtx oldval, rtx newval, rtx align,
4630 rtx scratch, rtx cond)
4631 {
4632 rtx label1, label2, mem, width, mask, x;
4633
4634 mem = gen_rtx_MEM (DImode, align);
4635 MEM_VOLATILE_P (mem) = 1;
4636
4637 emit_insn (gen_memory_barrier ());
4638 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4639 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4640 emit_label (XEXP (label1, 0));
4641
4642 emit_load_locked (DImode, scratch, mem);
4643
4644 width = GEN_INT (GET_MODE_BITSIZE (mode));
4645 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4646 if (WORDS_BIG_ENDIAN)
4647 emit_insn (gen_extxl_be (dest, scratch, width, addr));
4648 else
4649 emit_insn (gen_extxl_le (dest, scratch, width, addr));
4650
4651 if (oldval == const0_rtx)
4652 x = gen_rtx_NE (DImode, dest, const0_rtx);
4653 else
4654 {
4655 x = gen_rtx_EQ (DImode, dest, oldval);
4656 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4657 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4658 }
4659 emit_unlikely_jump (x, label2);
4660
4661 if (WORDS_BIG_ENDIAN)
4662 emit_insn (gen_mskxl_be (scratch, scratch, mask, addr));
4663 else
4664 emit_insn (gen_mskxl_le (scratch, scratch, mask, addr));
4665 emit_insn (gen_iordi3 (scratch, scratch, newval));
4666
4667 emit_store_conditional (DImode, scratch, mem, scratch);
4668
4669 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4670 emit_unlikely_jump (x, label1);
4671
4672 emit_insn (gen_memory_barrier ());
4673 emit_label (XEXP (label2, 0));
4674 }
4675
4676 /* Expand an atomic exchange operation. */
4677
4678 void
4679 alpha_split_lock_test_and_set (rtx retval, rtx mem, rtx val, rtx scratch)
4680 {
4681 enum machine_mode mode = GET_MODE (mem);
4682 rtx label, x, cond = gen_lowpart (DImode, scratch);
4683
4684 emit_insn (gen_memory_barrier ());
4685
4686 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4687 emit_label (XEXP (label, 0));
4688
4689 emit_load_locked (mode, retval, mem);
4690 emit_move_insn (scratch, val);
4691 emit_store_conditional (mode, cond, mem, scratch);
4692
4693 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4694 emit_unlikely_jump (x, label);
4695 }
4696
4697 void
4698 alpha_expand_lock_test_and_set_12 (rtx dst, rtx mem, rtx val)
4699 {
4700 enum machine_mode mode = GET_MODE (mem);
4701 rtx addr, align, wdst;
4702 rtx (*fn4) (rtx, rtx, rtx, rtx);
4703
4704 /* Force the address into a register. */
4705 addr = force_reg (DImode, XEXP (mem, 0));
4706
4707 /* Align it to a multiple of 8. */
4708 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4709 NULL_RTX, 1, OPTAB_DIRECT);
4710
4711 /* Insert val into the correct byte location within the word. */
4712 val = emit_insxl (mode, val, addr);
4713
4714 wdst = gen_reg_rtx (DImode);
4715 if (mode == QImode)
4716 fn4 = gen_sync_lock_test_and_setqi_1;
4717 else
4718 fn4 = gen_sync_lock_test_and_sethi_1;
4719 emit_insn (fn4 (wdst, addr, val, align));
4720
4721 emit_move_insn (dst, gen_lowpart (mode, wdst));
4722 }
4723
4724 void
4725 alpha_split_lock_test_and_set_12 (enum machine_mode mode, rtx dest, rtx addr,
4726 rtx val, rtx align, rtx scratch)
4727 {
4728 rtx label, mem, width, mask, x;
4729
4730 mem = gen_rtx_MEM (DImode, align);
4731 MEM_VOLATILE_P (mem) = 1;
4732
4733 emit_insn (gen_memory_barrier ());
4734 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4735 emit_label (XEXP (label, 0));
4736
4737 emit_load_locked (DImode, scratch, mem);
4738
4739 width = GEN_INT (GET_MODE_BITSIZE (mode));
4740 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4741 if (WORDS_BIG_ENDIAN)
4742 {
4743 emit_insn (gen_extxl_be (dest, scratch, width, addr));
4744 emit_insn (gen_mskxl_be (scratch, scratch, mask, addr));
4745 }
4746 else
4747 {
4748 emit_insn (gen_extxl_le (dest, scratch, width, addr));
4749 emit_insn (gen_mskxl_le (scratch, scratch, mask, addr));
4750 }
4751 emit_insn (gen_iordi3 (scratch, scratch, val));
4752
4753 emit_store_conditional (DImode, scratch, mem, scratch);
4754
4755 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4756 emit_unlikely_jump (x, label);
4757 }
4758 \f
4759 /* Adjust the cost of a scheduling dependency. Return the new cost of
4760 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4761
4762 static int
4763 alpha_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4764 {
4765 enum attr_type insn_type, dep_insn_type;
4766
4767 /* If the dependence is an anti-dependence, there is no cost. For an
4768 output dependence, there is sometimes a cost, but it doesn't seem
4769 worth handling those few cases. */
4770 if (REG_NOTE_KIND (link) != 0)
4771 return cost;
4772
4773 /* If we can't recognize the insns, we can't really do anything. */
4774 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
4775 return cost;
4776
4777 insn_type = get_attr_type (insn);
4778 dep_insn_type = get_attr_type (dep_insn);
4779
4780 /* Bring in the user-defined memory latency. */
4781 if (dep_insn_type == TYPE_ILD
4782 || dep_insn_type == TYPE_FLD
4783 || dep_insn_type == TYPE_LDSYM)
4784 cost += alpha_memory_latency-1;
4785
4786 /* Everything else handled in DFA bypasses now. */
4787
4788 return cost;
4789 }
4790
4791 /* The number of instructions that can be issued per cycle. */
4792
4793 static int
4794 alpha_issue_rate (void)
4795 {
4796 return (alpha_tune == PROCESSOR_EV4 ? 2 : 4);
4797 }
4798
4799 /* How many alternative schedules to try. This should be as wide as the
4800 scheduling freedom in the DFA, but no wider. Making this value too
4801 large results extra work for the scheduler.
4802
4803 For EV4, loads can be issued to either IB0 or IB1, thus we have 2
4804 alternative schedules. For EV5, we can choose between E0/E1 and
4805 FA/FM. For EV6, an arithmetic insn can be issued to U0/U1/L0/L1. */
4806
4807 static int
4808 alpha_multipass_dfa_lookahead (void)
4809 {
4810 return (alpha_tune == PROCESSOR_EV6 ? 4 : 2);
4811 }
4812 \f
4813 /* Machine-specific function data. */
4814
4815 struct machine_function GTY(())
4816 {
4817 /* For unicosmk. */
4818 /* List of call information words for calls from this function. */
4819 struct rtx_def *first_ciw;
4820 struct rtx_def *last_ciw;
4821 int ciw_count;
4822
4823 /* List of deferred case vectors. */
4824 struct rtx_def *addr_list;
4825
4826 /* For OSF. */
4827 const char *some_ld_name;
4828
4829 /* For TARGET_LD_BUGGY_LDGP. */
4830 struct rtx_def *gp_save_rtx;
4831 };
4832
4833 /* How to allocate a 'struct machine_function'. */
4834
4835 static struct machine_function *
4836 alpha_init_machine_status (void)
4837 {
4838 return ((struct machine_function *)
4839 ggc_alloc_cleared (sizeof (struct machine_function)));
4840 }
4841
4842 /* Functions to save and restore alpha_return_addr_rtx. */
4843
4844 /* Start the ball rolling with RETURN_ADDR_RTX. */
4845
4846 rtx
4847 alpha_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
4848 {
4849 if (count != 0)
4850 return const0_rtx;
4851
4852 return get_hard_reg_initial_val (Pmode, REG_RA);
4853 }
4854
4855 /* Return or create a memory slot containing the gp value for the current
4856 function. Needed only if TARGET_LD_BUGGY_LDGP. */
4857
4858 rtx
4859 alpha_gp_save_rtx (void)
4860 {
4861 rtx seq, m = cfun->machine->gp_save_rtx;
4862
4863 if (m == NULL)
4864 {
4865 start_sequence ();
4866
4867 m = assign_stack_local (DImode, UNITS_PER_WORD, BITS_PER_WORD);
4868 m = validize_mem (m);
4869 emit_move_insn (m, pic_offset_table_rtx);
4870
4871 seq = get_insns ();
4872 end_sequence ();
4873 emit_insn_after (seq, entry_of_function ());
4874
4875 cfun->machine->gp_save_rtx = m;
4876 }
4877
4878 return m;
4879 }
4880
4881 static int
4882 alpha_ra_ever_killed (void)
4883 {
4884 rtx top;
4885
4886 if (!has_hard_reg_initial_val (Pmode, REG_RA))
4887 return regs_ever_live[REG_RA];
4888
4889 push_topmost_sequence ();
4890 top = get_insns ();
4891 pop_topmost_sequence ();
4892
4893 return reg_set_between_p (gen_rtx_REG (Pmode, REG_RA), top, NULL_RTX);
4894 }
4895
4896 \f
4897 /* Return the trap mode suffix applicable to the current
4898 instruction, or NULL. */
4899
4900 static const char *
4901 get_trap_mode_suffix (void)
4902 {
4903 enum attr_trap_suffix s = get_attr_trap_suffix (current_output_insn);
4904
4905 switch (s)
4906 {
4907 case TRAP_SUFFIX_NONE:
4908 return NULL;
4909
4910 case TRAP_SUFFIX_SU:
4911 if (alpha_fptm >= ALPHA_FPTM_SU)
4912 return "su";
4913 return NULL;
4914
4915 case TRAP_SUFFIX_SUI:
4916 if (alpha_fptm >= ALPHA_FPTM_SUI)
4917 return "sui";
4918 return NULL;
4919
4920 case TRAP_SUFFIX_V_SV:
4921 switch (alpha_fptm)
4922 {
4923 case ALPHA_FPTM_N:
4924 return NULL;
4925 case ALPHA_FPTM_U:
4926 return "v";
4927 case ALPHA_FPTM_SU:
4928 case ALPHA_FPTM_SUI:
4929 return "sv";
4930 default:
4931 gcc_unreachable ();
4932 }
4933
4934 case TRAP_SUFFIX_V_SV_SVI:
4935 switch (alpha_fptm)
4936 {
4937 case ALPHA_FPTM_N:
4938 return NULL;
4939 case ALPHA_FPTM_U:
4940 return "v";
4941 case ALPHA_FPTM_SU:
4942 return "sv";
4943 case ALPHA_FPTM_SUI:
4944 return "svi";
4945 default:
4946 gcc_unreachable ();
4947 }
4948 break;
4949
4950 case TRAP_SUFFIX_U_SU_SUI:
4951 switch (alpha_fptm)
4952 {
4953 case ALPHA_FPTM_N:
4954 return NULL;
4955 case ALPHA_FPTM_U:
4956 return "u";
4957 case ALPHA_FPTM_SU:
4958 return "su";
4959 case ALPHA_FPTM_SUI:
4960 return "sui";
4961 default:
4962 gcc_unreachable ();
4963 }
4964 break;
4965
4966 default:
4967 gcc_unreachable ();
4968 }
4969 gcc_unreachable ();
4970 }
4971
4972 /* Return the rounding mode suffix applicable to the current
4973 instruction, or NULL. */
4974
4975 static const char *
4976 get_round_mode_suffix (void)
4977 {
4978 enum attr_round_suffix s = get_attr_round_suffix (current_output_insn);
4979
4980 switch (s)
4981 {
4982 case ROUND_SUFFIX_NONE:
4983 return NULL;
4984 case ROUND_SUFFIX_NORMAL:
4985 switch (alpha_fprm)
4986 {
4987 case ALPHA_FPRM_NORM:
4988 return NULL;
4989 case ALPHA_FPRM_MINF:
4990 return "m";
4991 case ALPHA_FPRM_CHOP:
4992 return "c";
4993 case ALPHA_FPRM_DYN:
4994 return "d";
4995 default:
4996 gcc_unreachable ();
4997 }
4998 break;
4999
5000 case ROUND_SUFFIX_C:
5001 return "c";
5002
5003 default:
5004 gcc_unreachable ();
5005 }
5006 gcc_unreachable ();
5007 }
5008
5009 /* Locate some local-dynamic symbol still in use by this function
5010 so that we can print its name in some movdi_er_tlsldm pattern. */
5011
5012 static int
5013 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
5014 {
5015 rtx x = *px;
5016
5017 if (GET_CODE (x) == SYMBOL_REF
5018 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
5019 {
5020 cfun->machine->some_ld_name = XSTR (x, 0);
5021 return 1;
5022 }
5023
5024 return 0;
5025 }
5026
5027 static const char *
5028 get_some_local_dynamic_name (void)
5029 {
5030 rtx insn;
5031
5032 if (cfun->machine->some_ld_name)
5033 return cfun->machine->some_ld_name;
5034
5035 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
5036 if (INSN_P (insn)
5037 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
5038 return cfun->machine->some_ld_name;
5039
5040 gcc_unreachable ();
5041 }
5042
5043 /* Print an operand. Recognize special options, documented below. */
5044
5045 void
5046 print_operand (FILE *file, rtx x, int code)
5047 {
5048 int i;
5049
5050 switch (code)
5051 {
5052 case '~':
5053 /* Print the assembler name of the current function. */
5054 assemble_name (file, alpha_fnname);
5055 break;
5056
5057 case '&':
5058 assemble_name (file, get_some_local_dynamic_name ());
5059 break;
5060
5061 case '/':
5062 {
5063 const char *trap = get_trap_mode_suffix ();
5064 const char *round = get_round_mode_suffix ();
5065
5066 if (trap || round)
5067 fprintf (file, (TARGET_AS_SLASH_BEFORE_SUFFIX ? "/%s%s" : "%s%s"),
5068 (trap ? trap : ""), (round ? round : ""));
5069 break;
5070 }
5071
5072 case ',':
5073 /* Generates single precision instruction suffix. */
5074 fputc ((TARGET_FLOAT_VAX ? 'f' : 's'), file);
5075 break;
5076
5077 case '-':
5078 /* Generates double precision instruction suffix. */
5079 fputc ((TARGET_FLOAT_VAX ? 'g' : 't'), file);
5080 break;
5081
5082 case '+':
5083 /* Generates a nop after a noreturn call at the very end of the
5084 function. */
5085 if (next_real_insn (current_output_insn) == 0)
5086 fprintf (file, "\n\tnop");
5087 break;
5088
5089 case '#':
5090 if (alpha_this_literal_sequence_number == 0)
5091 alpha_this_literal_sequence_number = alpha_next_sequence_number++;
5092 fprintf (file, "%d", alpha_this_literal_sequence_number);
5093 break;
5094
5095 case '*':
5096 if (alpha_this_gpdisp_sequence_number == 0)
5097 alpha_this_gpdisp_sequence_number = alpha_next_sequence_number++;
5098 fprintf (file, "%d", alpha_this_gpdisp_sequence_number);
5099 break;
5100
5101 case 'H':
5102 if (GET_CODE (x) == HIGH)
5103 output_addr_const (file, XEXP (x, 0));
5104 else
5105 output_operand_lossage ("invalid %%H value");
5106 break;
5107
5108 case 'J':
5109 {
5110 const char *lituse;
5111
5112 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD_CALL)
5113 {
5114 x = XVECEXP (x, 0, 0);
5115 lituse = "lituse_tlsgd";
5116 }
5117 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM_CALL)
5118 {
5119 x = XVECEXP (x, 0, 0);
5120 lituse = "lituse_tlsldm";
5121 }
5122 else if (GET_CODE (x) == CONST_INT)
5123 lituse = "lituse_jsr";
5124 else
5125 {
5126 output_operand_lossage ("invalid %%J value");
5127 break;
5128 }
5129
5130 if (x != const0_rtx)
5131 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5132 }
5133 break;
5134
5135 case 'j':
5136 {
5137 const char *lituse;
5138
5139 #ifdef HAVE_AS_JSRDIRECT_RELOCS
5140 lituse = "lituse_jsrdirect";
5141 #else
5142 lituse = "lituse_jsr";
5143 #endif
5144
5145 gcc_assert (INTVAL (x) != 0);
5146 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5147 }
5148 break;
5149 case 'r':
5150 /* If this operand is the constant zero, write it as "$31". */
5151 if (GET_CODE (x) == REG)
5152 fprintf (file, "%s", reg_names[REGNO (x)]);
5153 else if (x == CONST0_RTX (GET_MODE (x)))
5154 fprintf (file, "$31");
5155 else
5156 output_operand_lossage ("invalid %%r value");
5157 break;
5158
5159 case 'R':
5160 /* Similar, but for floating-point. */
5161 if (GET_CODE (x) == REG)
5162 fprintf (file, "%s", reg_names[REGNO (x)]);
5163 else if (x == CONST0_RTX (GET_MODE (x)))
5164 fprintf (file, "$f31");
5165 else
5166 output_operand_lossage ("invalid %%R value");
5167 break;
5168
5169 case 'N':
5170 /* Write the 1's complement of a constant. */
5171 if (GET_CODE (x) != CONST_INT)
5172 output_operand_lossage ("invalid %%N value");
5173
5174 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
5175 break;
5176
5177 case 'P':
5178 /* Write 1 << C, for a constant C. */
5179 if (GET_CODE (x) != CONST_INT)
5180 output_operand_lossage ("invalid %%P value");
5181
5182 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT) 1 << INTVAL (x));
5183 break;
5184
5185 case 'h':
5186 /* Write the high-order 16 bits of a constant, sign-extended. */
5187 if (GET_CODE (x) != CONST_INT)
5188 output_operand_lossage ("invalid %%h value");
5189
5190 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) >> 16);
5191 break;
5192
5193 case 'L':
5194 /* Write the low-order 16 bits of a constant, sign-extended. */
5195 if (GET_CODE (x) != CONST_INT)
5196 output_operand_lossage ("invalid %%L value");
5197
5198 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5199 (INTVAL (x) & 0xffff) - 2 * (INTVAL (x) & 0x8000));
5200 break;
5201
5202 case 'm':
5203 /* Write mask for ZAP insn. */
5204 if (GET_CODE (x) == CONST_DOUBLE)
5205 {
5206 HOST_WIDE_INT mask = 0;
5207 HOST_WIDE_INT value;
5208
5209 value = CONST_DOUBLE_LOW (x);
5210 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5211 i++, value >>= 8)
5212 if (value & 0xff)
5213 mask |= (1 << i);
5214
5215 value = CONST_DOUBLE_HIGH (x);
5216 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5217 i++, value >>= 8)
5218 if (value & 0xff)
5219 mask |= (1 << (i + sizeof (int)));
5220
5221 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask & 0xff);
5222 }
5223
5224 else if (GET_CODE (x) == CONST_INT)
5225 {
5226 HOST_WIDE_INT mask = 0, value = INTVAL (x);
5227
5228 for (i = 0; i < 8; i++, value >>= 8)
5229 if (value & 0xff)
5230 mask |= (1 << i);
5231
5232 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask);
5233 }
5234 else
5235 output_operand_lossage ("invalid %%m value");
5236 break;
5237
5238 case 'M':
5239 /* 'b', 'w', 'l', or 'q' as the value of the constant. */
5240 if (GET_CODE (x) != CONST_INT
5241 || (INTVAL (x) != 8 && INTVAL (x) != 16
5242 && INTVAL (x) != 32 && INTVAL (x) != 64))
5243 output_operand_lossage ("invalid %%M value");
5244
5245 fprintf (file, "%s",
5246 (INTVAL (x) == 8 ? "b"
5247 : INTVAL (x) == 16 ? "w"
5248 : INTVAL (x) == 32 ? "l"
5249 : "q"));
5250 break;
5251
5252 case 'U':
5253 /* Similar, except do it from the mask. */
5254 if (GET_CODE (x) == CONST_INT)
5255 {
5256 HOST_WIDE_INT value = INTVAL (x);
5257
5258 if (value == 0xff)
5259 {
5260 fputc ('b', file);
5261 break;
5262 }
5263 if (value == 0xffff)
5264 {
5265 fputc ('w', file);
5266 break;
5267 }
5268 if (value == 0xffffffff)
5269 {
5270 fputc ('l', file);
5271 break;
5272 }
5273 if (value == -1)
5274 {
5275 fputc ('q', file);
5276 break;
5277 }
5278 }
5279 else if (HOST_BITS_PER_WIDE_INT == 32
5280 && GET_CODE (x) == CONST_DOUBLE
5281 && CONST_DOUBLE_LOW (x) == 0xffffffff
5282 && CONST_DOUBLE_HIGH (x) == 0)
5283 {
5284 fputc ('l', file);
5285 break;
5286 }
5287 output_operand_lossage ("invalid %%U value");
5288 break;
5289
5290 case 's':
5291 /* Write the constant value divided by 8 for little-endian mode or
5292 (56 - value) / 8 for big-endian mode. */
5293
5294 if (GET_CODE (x) != CONST_INT
5295 || (unsigned HOST_WIDE_INT) INTVAL (x) >= (WORDS_BIG_ENDIAN
5296 ? 56
5297 : 64)
5298 || (INTVAL (x) & 7) != 0)
5299 output_operand_lossage ("invalid %%s value");
5300
5301 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5302 WORDS_BIG_ENDIAN
5303 ? (56 - INTVAL (x)) / 8
5304 : INTVAL (x) / 8);
5305 break;
5306
5307 case 'S':
5308 /* Same, except compute (64 - c) / 8 */
5309
5310 if (GET_CODE (x) != CONST_INT
5311 && (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
5312 && (INTVAL (x) & 7) != 8)
5313 output_operand_lossage ("invalid %%s value");
5314
5315 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (64 - INTVAL (x)) / 8);
5316 break;
5317
5318 case 't':
5319 {
5320 /* On Unicos/Mk systems: use a DEX expression if the symbol
5321 clashes with a register name. */
5322 int dex = unicosmk_need_dex (x);
5323 if (dex)
5324 fprintf (file, "DEX(%d)", dex);
5325 else
5326 output_addr_const (file, x);
5327 }
5328 break;
5329
5330 case 'C': case 'D': case 'c': case 'd':
5331 /* Write out comparison name. */
5332 {
5333 enum rtx_code c = GET_CODE (x);
5334
5335 if (!COMPARISON_P (x))
5336 output_operand_lossage ("invalid %%C value");
5337
5338 else if (code == 'D')
5339 c = reverse_condition (c);
5340 else if (code == 'c')
5341 c = swap_condition (c);
5342 else if (code == 'd')
5343 c = swap_condition (reverse_condition (c));
5344
5345 if (c == LEU)
5346 fprintf (file, "ule");
5347 else if (c == LTU)
5348 fprintf (file, "ult");
5349 else if (c == UNORDERED)
5350 fprintf (file, "un");
5351 else
5352 fprintf (file, "%s", GET_RTX_NAME (c));
5353 }
5354 break;
5355
5356 case 'E':
5357 /* Write the divide or modulus operator. */
5358 switch (GET_CODE (x))
5359 {
5360 case DIV:
5361 fprintf (file, "div%s", GET_MODE (x) == SImode ? "l" : "q");
5362 break;
5363 case UDIV:
5364 fprintf (file, "div%su", GET_MODE (x) == SImode ? "l" : "q");
5365 break;
5366 case MOD:
5367 fprintf (file, "rem%s", GET_MODE (x) == SImode ? "l" : "q");
5368 break;
5369 case UMOD:
5370 fprintf (file, "rem%su", GET_MODE (x) == SImode ? "l" : "q");
5371 break;
5372 default:
5373 output_operand_lossage ("invalid %%E value");
5374 break;
5375 }
5376 break;
5377
5378 case 'A':
5379 /* Write "_u" for unaligned access. */
5380 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == AND)
5381 fprintf (file, "_u");
5382 break;
5383
5384 case 0:
5385 if (GET_CODE (x) == REG)
5386 fprintf (file, "%s", reg_names[REGNO (x)]);
5387 else if (GET_CODE (x) == MEM)
5388 output_address (XEXP (x, 0));
5389 else if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == UNSPEC)
5390 {
5391 switch (XINT (XEXP (x, 0), 1))
5392 {
5393 case UNSPEC_DTPREL:
5394 case UNSPEC_TPREL:
5395 output_addr_const (file, XVECEXP (XEXP (x, 0), 0, 0));
5396 break;
5397 default:
5398 output_operand_lossage ("unknown relocation unspec");
5399 break;
5400 }
5401 }
5402 else
5403 output_addr_const (file, x);
5404 break;
5405
5406 default:
5407 output_operand_lossage ("invalid %%xn code");
5408 }
5409 }
5410
5411 void
5412 print_operand_address (FILE *file, rtx addr)
5413 {
5414 int basereg = 31;
5415 HOST_WIDE_INT offset = 0;
5416
5417 if (GET_CODE (addr) == AND)
5418 addr = XEXP (addr, 0);
5419
5420 if (GET_CODE (addr) == PLUS
5421 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
5422 {
5423 offset = INTVAL (XEXP (addr, 1));
5424 addr = XEXP (addr, 0);
5425 }
5426
5427 if (GET_CODE (addr) == LO_SUM)
5428 {
5429 const char *reloc16, *reloclo;
5430 rtx op1 = XEXP (addr, 1);
5431
5432 if (GET_CODE (op1) == CONST && GET_CODE (XEXP (op1, 0)) == UNSPEC)
5433 {
5434 op1 = XEXP (op1, 0);
5435 switch (XINT (op1, 1))
5436 {
5437 case UNSPEC_DTPREL:
5438 reloc16 = NULL;
5439 reloclo = (alpha_tls_size == 16 ? "dtprel" : "dtprello");
5440 break;
5441 case UNSPEC_TPREL:
5442 reloc16 = NULL;
5443 reloclo = (alpha_tls_size == 16 ? "tprel" : "tprello");
5444 break;
5445 default:
5446 output_operand_lossage ("unknown relocation unspec");
5447 return;
5448 }
5449
5450 output_addr_const (file, XVECEXP (op1, 0, 0));
5451 }
5452 else
5453 {
5454 reloc16 = "gprel";
5455 reloclo = "gprellow";
5456 output_addr_const (file, op1);
5457 }
5458
5459 if (offset)
5460 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
5461
5462 addr = XEXP (addr, 0);
5463 switch (GET_CODE (addr))
5464 {
5465 case REG:
5466 basereg = REGNO (addr);
5467 break;
5468
5469 case SUBREG:
5470 basereg = subreg_regno (addr);
5471 break;
5472
5473 default:
5474 gcc_unreachable ();
5475 }
5476
5477 fprintf (file, "($%d)\t\t!%s", basereg,
5478 (basereg == 29 ? reloc16 : reloclo));
5479 return;
5480 }
5481
5482 switch (GET_CODE (addr))
5483 {
5484 case REG:
5485 basereg = REGNO (addr);
5486 break;
5487
5488 case SUBREG:
5489 basereg = subreg_regno (addr);
5490 break;
5491
5492 case CONST_INT:
5493 offset = INTVAL (addr);
5494 break;
5495
5496 #if TARGET_ABI_OPEN_VMS
5497 case SYMBOL_REF:
5498 fprintf (file, "%s", XSTR (addr, 0));
5499 return;
5500
5501 case CONST:
5502 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS
5503 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF);
5504 fprintf (file, "%s+" HOST_WIDE_INT_PRINT_DEC,
5505 XSTR (XEXP (XEXP (addr, 0), 0), 0),
5506 INTVAL (XEXP (XEXP (addr, 0), 1)));
5507 return;
5508
5509 #endif
5510 default:
5511 gcc_unreachable ();
5512 }
5513
5514 fprintf (file, HOST_WIDE_INT_PRINT_DEC "($%d)", offset, basereg);
5515 }
5516 \f
5517 /* Emit RTL insns to initialize the variable parts of a trampoline at
5518 TRAMP. FNADDR is an RTX for the address of the function's pure
5519 code. CXT is an RTX for the static chain value for the function.
5520
5521 The three offset parameters are for the individual template's
5522 layout. A JMPOFS < 0 indicates that the trampoline does not
5523 contain instructions at all.
5524
5525 We assume here that a function will be called many more times than
5526 its address is taken (e.g., it might be passed to qsort), so we
5527 take the trouble to initialize the "hint" field in the JMP insn.
5528 Note that the hint field is PC (new) + 4 * bits 13:0. */
5529
5530 void
5531 alpha_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt,
5532 int fnofs, int cxtofs, int jmpofs)
5533 {
5534 rtx temp, temp1, addr;
5535 /* VMS really uses DImode pointers in memory at this point. */
5536 enum machine_mode mode = TARGET_ABI_OPEN_VMS ? Pmode : ptr_mode;
5537
5538 #ifdef POINTERS_EXTEND_UNSIGNED
5539 fnaddr = convert_memory_address (mode, fnaddr);
5540 cxt = convert_memory_address (mode, cxt);
5541 #endif
5542
5543 /* Store function address and CXT. */
5544 addr = memory_address (mode, plus_constant (tramp, fnofs));
5545 emit_move_insn (gen_rtx_MEM (mode, addr), fnaddr);
5546 addr = memory_address (mode, plus_constant (tramp, cxtofs));
5547 emit_move_insn (gen_rtx_MEM (mode, addr), cxt);
5548
5549 /* This has been disabled since the hint only has a 32k range, and in
5550 no existing OS is the stack within 32k of the text segment. */
5551 if (0 && jmpofs >= 0)
5552 {
5553 /* Compute hint value. */
5554 temp = force_operand (plus_constant (tramp, jmpofs+4), NULL_RTX);
5555 temp = expand_binop (DImode, sub_optab, fnaddr, temp, temp, 1,
5556 OPTAB_WIDEN);
5557 temp = expand_shift (RSHIFT_EXPR, Pmode, temp,
5558 build_int_cst (NULL_TREE, 2), NULL_RTX, 1);
5559 temp = expand_and (SImode, gen_lowpart (SImode, temp),
5560 GEN_INT (0x3fff), 0);
5561
5562 /* Merge in the hint. */
5563 addr = memory_address (SImode, plus_constant (tramp, jmpofs));
5564 temp1 = force_reg (SImode, gen_rtx_MEM (SImode, addr));
5565 temp1 = expand_and (SImode, temp1, GEN_INT (0xffffc000), NULL_RTX);
5566 temp1 = expand_binop (SImode, ior_optab, temp1, temp, temp1, 1,
5567 OPTAB_WIDEN);
5568 emit_move_insn (gen_rtx_MEM (SImode, addr), temp1);
5569 }
5570
5571 #ifdef ENABLE_EXECUTE_STACK
5572 emit_library_call (init_one_libfunc ("__enable_execute_stack"),
5573 0, VOIDmode, 1, tramp, Pmode);
5574 #endif
5575
5576 if (jmpofs >= 0)
5577 emit_insn (gen_imb ());
5578 }
5579 \f
5580 /* Determine where to put an argument to a function.
5581 Value is zero to push the argument on the stack,
5582 or a hard register in which to store the argument.
5583
5584 MODE is the argument's machine mode.
5585 TYPE is the data type of the argument (as a tree).
5586 This is null for libcalls where that information may
5587 not be available.
5588 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5589 the preceding args and about the function being called.
5590 NAMED is nonzero if this argument is a named parameter
5591 (otherwise it is an extra parameter matching an ellipsis).
5592
5593 On Alpha the first 6 words of args are normally in registers
5594 and the rest are pushed. */
5595
5596 rtx
5597 function_arg (CUMULATIVE_ARGS cum, enum machine_mode mode, tree type,
5598 int named ATTRIBUTE_UNUSED)
5599 {
5600 int basereg;
5601 int num_args;
5602
5603 /* Don't get confused and pass small structures in FP registers. */
5604 if (type && AGGREGATE_TYPE_P (type))
5605 basereg = 16;
5606 else
5607 {
5608 #ifdef ENABLE_CHECKING
5609 /* With alpha_split_complex_arg, we shouldn't see any raw complex
5610 values here. */
5611 gcc_assert (!COMPLEX_MODE_P (mode));
5612 #endif
5613
5614 /* Set up defaults for FP operands passed in FP registers, and
5615 integral operands passed in integer registers. */
5616 if (TARGET_FPREGS && GET_MODE_CLASS (mode) == MODE_FLOAT)
5617 basereg = 32 + 16;
5618 else
5619 basereg = 16;
5620 }
5621
5622 /* ??? Irritatingly, the definition of CUMULATIVE_ARGS is different for
5623 the three platforms, so we can't avoid conditional compilation. */
5624 #if TARGET_ABI_OPEN_VMS
5625 {
5626 if (mode == VOIDmode)
5627 return alpha_arg_info_reg_val (cum);
5628
5629 num_args = cum.num_args;
5630 if (num_args >= 6
5631 || targetm.calls.must_pass_in_stack (mode, type))
5632 return NULL_RTX;
5633 }
5634 #elif TARGET_ABI_UNICOSMK
5635 {
5636 int size;
5637
5638 /* If this is the last argument, generate the call info word (CIW). */
5639 /* ??? We don't include the caller's line number in the CIW because
5640 I don't know how to determine it if debug infos are turned off. */
5641 if (mode == VOIDmode)
5642 {
5643 int i;
5644 HOST_WIDE_INT lo;
5645 HOST_WIDE_INT hi;
5646 rtx ciw;
5647
5648 lo = 0;
5649
5650 for (i = 0; i < cum.num_reg_words && i < 5; i++)
5651 if (cum.reg_args_type[i])
5652 lo |= (1 << (7 - i));
5653
5654 if (cum.num_reg_words == 6 && cum.reg_args_type[5])
5655 lo |= 7;
5656 else
5657 lo |= cum.num_reg_words;
5658
5659 #if HOST_BITS_PER_WIDE_INT == 32
5660 hi = (cum.num_args << 20) | cum.num_arg_words;
5661 #else
5662 lo = lo | ((HOST_WIDE_INT) cum.num_args << 52)
5663 | ((HOST_WIDE_INT) cum.num_arg_words << 32);
5664 hi = 0;
5665 #endif
5666 ciw = immed_double_const (lo, hi, DImode);
5667
5668 return gen_rtx_UNSPEC (DImode, gen_rtvec (1, ciw),
5669 UNSPEC_UMK_LOAD_CIW);
5670 }
5671
5672 size = ALPHA_ARG_SIZE (mode, type, named);
5673 num_args = cum.num_reg_words;
5674 if (cum.force_stack
5675 || cum.num_reg_words + size > 6
5676 || targetm.calls.must_pass_in_stack (mode, type))
5677 return NULL_RTX;
5678 else if (type && TYPE_MODE (type) == BLKmode)
5679 {
5680 rtx reg1, reg2;
5681
5682 reg1 = gen_rtx_REG (DImode, num_args + 16);
5683 reg1 = gen_rtx_EXPR_LIST (DImode, reg1, const0_rtx);
5684
5685 /* The argument fits in two registers. Note that we still need to
5686 reserve a register for empty structures. */
5687 if (size == 0)
5688 return NULL_RTX;
5689 else if (size == 1)
5690 return gen_rtx_PARALLEL (mode, gen_rtvec (1, reg1));
5691 else
5692 {
5693 reg2 = gen_rtx_REG (DImode, num_args + 17);
5694 reg2 = gen_rtx_EXPR_LIST (DImode, reg2, GEN_INT (8));
5695 return gen_rtx_PARALLEL (mode, gen_rtvec (2, reg1, reg2));
5696 }
5697 }
5698 }
5699 #elif TARGET_ABI_OSF
5700 {
5701 if (cum >= 6)
5702 return NULL_RTX;
5703 num_args = cum;
5704
5705 /* VOID is passed as a special flag for "last argument". */
5706 if (type == void_type_node)
5707 basereg = 16;
5708 else if (targetm.calls.must_pass_in_stack (mode, type))
5709 return NULL_RTX;
5710 }
5711 #else
5712 #error Unhandled ABI
5713 #endif
5714
5715 return gen_rtx_REG (mode, num_args + basereg);
5716 }
5717
5718 static int
5719 alpha_arg_partial_bytes (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5720 enum machine_mode mode ATTRIBUTE_UNUSED,
5721 tree type ATTRIBUTE_UNUSED,
5722 bool named ATTRIBUTE_UNUSED)
5723 {
5724 int words = 0;
5725
5726 #if TARGET_ABI_OPEN_VMS
5727 if (cum->num_args < 6
5728 && 6 < cum->num_args + ALPHA_ARG_SIZE (mode, type, named))
5729 words = 6 - cum->num_args;
5730 #elif TARGET_ABI_UNICOSMK
5731 /* Never any split arguments. */
5732 #elif TARGET_ABI_OSF
5733 if (*cum < 6 && 6 < *cum + ALPHA_ARG_SIZE (mode, type, named))
5734 words = 6 - *cum;
5735 #else
5736 #error Unhandled ABI
5737 #endif
5738
5739 return words * UNITS_PER_WORD;
5740 }
5741
5742
5743 /* Return true if TYPE must be returned in memory, instead of in registers. */
5744
5745 static bool
5746 alpha_return_in_memory (tree type, tree fndecl ATTRIBUTE_UNUSED)
5747 {
5748 enum machine_mode mode = VOIDmode;
5749 int size;
5750
5751 if (type)
5752 {
5753 mode = TYPE_MODE (type);
5754
5755 /* All aggregates are returned in memory. */
5756 if (AGGREGATE_TYPE_P (type))
5757 return true;
5758 }
5759
5760 size = GET_MODE_SIZE (mode);
5761 switch (GET_MODE_CLASS (mode))
5762 {
5763 case MODE_VECTOR_FLOAT:
5764 /* Pass all float vectors in memory, like an aggregate. */
5765 return true;
5766
5767 case MODE_COMPLEX_FLOAT:
5768 /* We judge complex floats on the size of their element,
5769 not the size of the whole type. */
5770 size = GET_MODE_UNIT_SIZE (mode);
5771 break;
5772
5773 case MODE_INT:
5774 case MODE_FLOAT:
5775 case MODE_COMPLEX_INT:
5776 case MODE_VECTOR_INT:
5777 break;
5778
5779 default:
5780 /* ??? We get called on all sorts of random stuff from
5781 aggregate_value_p. We must return something, but it's not
5782 clear what's safe to return. Pretend it's a struct I
5783 guess. */
5784 return true;
5785 }
5786
5787 /* Otherwise types must fit in one register. */
5788 return size > UNITS_PER_WORD;
5789 }
5790
5791 /* Return true if TYPE should be passed by invisible reference. */
5792
5793 static bool
5794 alpha_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
5795 enum machine_mode mode,
5796 tree type ATTRIBUTE_UNUSED,
5797 bool named ATTRIBUTE_UNUSED)
5798 {
5799 return mode == TFmode || mode == TCmode;
5800 }
5801
5802 /* Define how to find the value returned by a function. VALTYPE is the
5803 data type of the value (as a tree). If the precise function being
5804 called is known, FUNC is its FUNCTION_DECL; otherwise, FUNC is 0.
5805 MODE is set instead of VALTYPE for libcalls.
5806
5807 On Alpha the value is found in $0 for integer functions and
5808 $f0 for floating-point functions. */
5809
5810 rtx
5811 function_value (tree valtype, tree func ATTRIBUTE_UNUSED,
5812 enum machine_mode mode)
5813 {
5814 unsigned int regnum, dummy;
5815 enum mode_class class;
5816
5817 gcc_assert (!valtype || !alpha_return_in_memory (valtype, func));
5818
5819 if (valtype)
5820 mode = TYPE_MODE (valtype);
5821
5822 class = GET_MODE_CLASS (mode);
5823 switch (class)
5824 {
5825 case MODE_INT:
5826 PROMOTE_MODE (mode, dummy, valtype);
5827 /* FALLTHRU */
5828
5829 case MODE_COMPLEX_INT:
5830 case MODE_VECTOR_INT:
5831 regnum = 0;
5832 break;
5833
5834 case MODE_FLOAT:
5835 regnum = 32;
5836 break;
5837
5838 case MODE_COMPLEX_FLOAT:
5839 {
5840 enum machine_mode cmode = GET_MODE_INNER (mode);
5841
5842 return gen_rtx_PARALLEL
5843 (VOIDmode,
5844 gen_rtvec (2,
5845 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 32),
5846 const0_rtx),
5847 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 33),
5848 GEN_INT (GET_MODE_SIZE (cmode)))));
5849 }
5850
5851 default:
5852 gcc_unreachable ();
5853 }
5854
5855 return gen_rtx_REG (mode, regnum);
5856 }
5857
5858 /* TCmode complex values are passed by invisible reference. We
5859 should not split these values. */
5860
5861 static bool
5862 alpha_split_complex_arg (tree type)
5863 {
5864 return TYPE_MODE (type) != TCmode;
5865 }
5866
5867 static tree
5868 alpha_build_builtin_va_list (void)
5869 {
5870 tree base, ofs, space, record, type_decl;
5871
5872 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
5873 return ptr_type_node;
5874
5875 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
5876 type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
5877 TREE_CHAIN (record) = type_decl;
5878 TYPE_NAME (record) = type_decl;
5879
5880 /* C++? SET_IS_AGGR_TYPE (record, 1); */
5881
5882 /* Dummy field to prevent alignment warnings. */
5883 space = build_decl (FIELD_DECL, NULL_TREE, integer_type_node);
5884 DECL_FIELD_CONTEXT (space) = record;
5885 DECL_ARTIFICIAL (space) = 1;
5886 DECL_IGNORED_P (space) = 1;
5887
5888 ofs = build_decl (FIELD_DECL, get_identifier ("__offset"),
5889 integer_type_node);
5890 DECL_FIELD_CONTEXT (ofs) = record;
5891 TREE_CHAIN (ofs) = space;
5892
5893 base = build_decl (FIELD_DECL, get_identifier ("__base"),
5894 ptr_type_node);
5895 DECL_FIELD_CONTEXT (base) = record;
5896 TREE_CHAIN (base) = ofs;
5897
5898 TYPE_FIELDS (record) = base;
5899 layout_type (record);
5900
5901 va_list_gpr_counter_field = ofs;
5902 return record;
5903 }
5904
5905 #if TARGET_ABI_OSF
5906 /* Helper function for alpha_stdarg_optimize_hook. Skip over casts
5907 and constant additions. */
5908
5909 static tree
5910 va_list_skip_additions (tree lhs)
5911 {
5912 tree rhs, stmt;
5913
5914 if (TREE_CODE (lhs) != SSA_NAME)
5915 return lhs;
5916
5917 for (;;)
5918 {
5919 stmt = SSA_NAME_DEF_STMT (lhs);
5920
5921 if (TREE_CODE (stmt) == PHI_NODE)
5922 return stmt;
5923
5924 if (TREE_CODE (stmt) != MODIFY_EXPR
5925 || TREE_OPERAND (stmt, 0) != lhs)
5926 return lhs;
5927
5928 rhs = TREE_OPERAND (stmt, 1);
5929 if (TREE_CODE (rhs) == WITH_SIZE_EXPR)
5930 rhs = TREE_OPERAND (rhs, 0);
5931
5932 if ((TREE_CODE (rhs) != NOP_EXPR
5933 && TREE_CODE (rhs) != CONVERT_EXPR
5934 && (TREE_CODE (rhs) != PLUS_EXPR
5935 || TREE_CODE (TREE_OPERAND (rhs, 1)) != INTEGER_CST
5936 || !host_integerp (TREE_OPERAND (rhs, 1), 1)))
5937 || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
5938 return rhs;
5939
5940 lhs = TREE_OPERAND (rhs, 0);
5941 }
5942 }
5943
5944 /* Check if LHS = RHS statement is
5945 LHS = *(ap.__base + ap.__offset + cst)
5946 or
5947 LHS = *(ap.__base
5948 + ((ap.__offset + cst <= 47)
5949 ? ap.__offset + cst - 48 : ap.__offset + cst) + cst2).
5950 If the former, indicate that GPR registers are needed,
5951 if the latter, indicate that FPR registers are needed.
5952 On alpha, cfun->va_list_gpr_size is used as size of the needed
5953 regs and cfun->va_list_fpr_size is a bitmask, bit 0 set if
5954 GPR registers are needed and bit 1 set if FPR registers are needed.
5955 Return true if va_list references should not be scanned for the current
5956 statement. */
5957
5958 static bool
5959 alpha_stdarg_optimize_hook (struct stdarg_info *si, tree lhs, tree rhs)
5960 {
5961 tree base, offset, arg1, arg2;
5962 int offset_arg = 1;
5963
5964 if (TREE_CODE (rhs) != INDIRECT_REF
5965 || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
5966 return false;
5967
5968 lhs = va_list_skip_additions (TREE_OPERAND (rhs, 0));
5969 if (lhs == NULL_TREE
5970 || TREE_CODE (lhs) != PLUS_EXPR)
5971 return false;
5972
5973 base = TREE_OPERAND (lhs, 0);
5974 if (TREE_CODE (base) == SSA_NAME)
5975 base = va_list_skip_additions (base);
5976
5977 if (TREE_CODE (base) != COMPONENT_REF
5978 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5979 {
5980 base = TREE_OPERAND (lhs, 0);
5981 if (TREE_CODE (base) == SSA_NAME)
5982 base = va_list_skip_additions (base);
5983
5984 if (TREE_CODE (base) != COMPONENT_REF
5985 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5986 return false;
5987
5988 offset_arg = 0;
5989 }
5990
5991 base = get_base_address (base);
5992 if (TREE_CODE (base) != VAR_DECL
5993 || !bitmap_bit_p (si->va_list_vars, DECL_UID (base)))
5994 return false;
5995
5996 offset = TREE_OPERAND (lhs, offset_arg);
5997 if (TREE_CODE (offset) == SSA_NAME)
5998 offset = va_list_skip_additions (offset);
5999
6000 if (TREE_CODE (offset) == PHI_NODE)
6001 {
6002 HOST_WIDE_INT sub;
6003
6004 if (PHI_NUM_ARGS (offset) != 2)
6005 goto escapes;
6006
6007 arg1 = va_list_skip_additions (PHI_ARG_DEF (offset, 0));
6008 arg2 = va_list_skip_additions (PHI_ARG_DEF (offset, 1));
6009 if (TREE_CODE (arg2) != MINUS_EXPR && TREE_CODE (arg2) != PLUS_EXPR)
6010 {
6011 tree tem = arg1;
6012 arg1 = arg2;
6013 arg2 = tem;
6014
6015 if (TREE_CODE (arg2) != MINUS_EXPR && TREE_CODE (arg2) != PLUS_EXPR)
6016 goto escapes;
6017 }
6018 if (!host_integerp (TREE_OPERAND (arg2, 1), 0))
6019 goto escapes;
6020
6021 sub = tree_low_cst (TREE_OPERAND (arg2, 1), 0);
6022 if (TREE_CODE (arg2) == MINUS_EXPR)
6023 sub = -sub;
6024 if (sub < -48 || sub > -32)
6025 goto escapes;
6026
6027 arg2 = va_list_skip_additions (TREE_OPERAND (arg2, 0));
6028 if (arg1 != arg2)
6029 goto escapes;
6030
6031 if (TREE_CODE (arg1) == SSA_NAME)
6032 arg1 = va_list_skip_additions (arg1);
6033
6034 if (TREE_CODE (arg1) != COMPONENT_REF
6035 || TREE_OPERAND (arg1, 1) != va_list_gpr_counter_field
6036 || get_base_address (arg1) != base)
6037 goto escapes;
6038
6039 /* Need floating point regs. */
6040 cfun->va_list_fpr_size |= 2;
6041 }
6042 else if (TREE_CODE (offset) != COMPONENT_REF
6043 || TREE_OPERAND (offset, 1) != va_list_gpr_counter_field
6044 || get_base_address (offset) != base)
6045 goto escapes;
6046 else
6047 /* Need general regs. */
6048 cfun->va_list_fpr_size |= 1;
6049 return false;
6050
6051 escapes:
6052 si->va_list_escapes = true;
6053 return false;
6054 }
6055 #endif
6056
6057 /* Perform any needed actions needed for a function that is receiving a
6058 variable number of arguments. */
6059
6060 static void
6061 alpha_setup_incoming_varargs (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
6062 tree type, int *pretend_size, int no_rtl)
6063 {
6064 CUMULATIVE_ARGS cum = *pcum;
6065
6066 /* Skip the current argument. */
6067 FUNCTION_ARG_ADVANCE (cum, mode, type, 1);
6068
6069 #if TARGET_ABI_UNICOSMK
6070 /* On Unicos/Mk, the standard subroutine __T3E_MISMATCH stores all register
6071 arguments on the stack. Unfortunately, it doesn't always store the first
6072 one (i.e. the one that arrives in $16 or $f16). This is not a problem
6073 with stdargs as we always have at least one named argument there. */
6074 if (cum.num_reg_words < 6)
6075 {
6076 if (!no_rtl)
6077 {
6078 emit_insn (gen_umk_mismatch_args (GEN_INT (cum.num_reg_words)));
6079 emit_insn (gen_arg_home_umk ());
6080 }
6081 *pretend_size = 0;
6082 }
6083 #elif TARGET_ABI_OPEN_VMS
6084 /* For VMS, we allocate space for all 6 arg registers plus a count.
6085
6086 However, if NO registers need to be saved, don't allocate any space.
6087 This is not only because we won't need the space, but because AP
6088 includes the current_pretend_args_size and we don't want to mess up
6089 any ap-relative addresses already made. */
6090 if (cum.num_args < 6)
6091 {
6092 if (!no_rtl)
6093 {
6094 emit_move_insn (gen_rtx_REG (DImode, 1), virtual_incoming_args_rtx);
6095 emit_insn (gen_arg_home ());
6096 }
6097 *pretend_size = 7 * UNITS_PER_WORD;
6098 }
6099 #else
6100 /* On OSF/1 and friends, we allocate space for all 12 arg registers, but
6101 only push those that are remaining. However, if NO registers need to
6102 be saved, don't allocate any space. This is not only because we won't
6103 need the space, but because AP includes the current_pretend_args_size
6104 and we don't want to mess up any ap-relative addresses already made.
6105
6106 If we are not to use the floating-point registers, save the integer
6107 registers where we would put the floating-point registers. This is
6108 not the most efficient way to implement varargs with just one register
6109 class, but it isn't worth doing anything more efficient in this rare
6110 case. */
6111 if (cum >= 6)
6112 return;
6113
6114 if (!no_rtl)
6115 {
6116 int count, set = get_varargs_alias_set ();
6117 rtx tmp;
6118
6119 count = cfun->va_list_gpr_size / UNITS_PER_WORD;
6120 if (count > 6 - cum)
6121 count = 6 - cum;
6122
6123 /* Detect whether integer registers or floating-point registers
6124 are needed by the detected va_arg statements. See above for
6125 how these values are computed. Note that the "escape" value
6126 is VA_LIST_MAX_FPR_SIZE, which is 255, which has both of
6127 these bits set. */
6128 gcc_assert ((VA_LIST_MAX_FPR_SIZE & 3) == 3);
6129
6130 if (cfun->va_list_fpr_size & 1)
6131 {
6132 tmp = gen_rtx_MEM (BLKmode,
6133 plus_constant (virtual_incoming_args_rtx,
6134 (cum + 6) * UNITS_PER_WORD));
6135 MEM_NOTRAP_P (tmp) = 1;
6136 set_mem_alias_set (tmp, set);
6137 move_block_from_reg (16 + cum, tmp, count);
6138 }
6139
6140 if (cfun->va_list_fpr_size & 2)
6141 {
6142 tmp = gen_rtx_MEM (BLKmode,
6143 plus_constant (virtual_incoming_args_rtx,
6144 cum * UNITS_PER_WORD));
6145 MEM_NOTRAP_P (tmp) = 1;
6146 set_mem_alias_set (tmp, set);
6147 move_block_from_reg (16 + cum + TARGET_FPREGS*32, tmp, count);
6148 }
6149 }
6150 *pretend_size = 12 * UNITS_PER_WORD;
6151 #endif
6152 }
6153
6154 void
6155 alpha_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
6156 {
6157 HOST_WIDE_INT offset;
6158 tree t, offset_field, base_field;
6159
6160 if (TREE_CODE (TREE_TYPE (valist)) == ERROR_MARK)
6161 return;
6162
6163 if (TARGET_ABI_UNICOSMK)
6164 std_expand_builtin_va_start (valist, nextarg);
6165
6166 /* For Unix, TARGET_SETUP_INCOMING_VARARGS moves the starting address base
6167 up by 48, storing fp arg registers in the first 48 bytes, and the
6168 integer arg registers in the next 48 bytes. This is only done,
6169 however, if any integer registers need to be stored.
6170
6171 If no integer registers need be stored, then we must subtract 48
6172 in order to account for the integer arg registers which are counted
6173 in argsize above, but which are not actually stored on the stack.
6174 Must further be careful here about structures straddling the last
6175 integer argument register; that futzes with pretend_args_size,
6176 which changes the meaning of AP. */
6177
6178 if (NUM_ARGS < 6)
6179 offset = TARGET_ABI_OPEN_VMS ? UNITS_PER_WORD : 6 * UNITS_PER_WORD;
6180 else
6181 offset = -6 * UNITS_PER_WORD + current_function_pretend_args_size;
6182
6183 if (TARGET_ABI_OPEN_VMS)
6184 {
6185 nextarg = plus_constant (nextarg, offset);
6186 nextarg = plus_constant (nextarg, NUM_ARGS * UNITS_PER_WORD);
6187 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist,
6188 make_tree (ptr_type_node, nextarg));
6189 TREE_SIDE_EFFECTS (t) = 1;
6190
6191 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6192 }
6193 else
6194 {
6195 base_field = TYPE_FIELDS (TREE_TYPE (valist));
6196 offset_field = TREE_CHAIN (base_field);
6197
6198 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6199 valist, base_field, NULL_TREE);
6200 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6201 valist, offset_field, NULL_TREE);
6202
6203 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
6204 t = build2 (PLUS_EXPR, ptr_type_node, t,
6205 build_int_cst (NULL_TREE, offset));
6206 t = build2 (MODIFY_EXPR, TREE_TYPE (base_field), base_field, t);
6207 TREE_SIDE_EFFECTS (t) = 1;
6208 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6209
6210 t = build_int_cst (NULL_TREE, NUM_ARGS * UNITS_PER_WORD);
6211 t = build2 (MODIFY_EXPR, TREE_TYPE (offset_field), offset_field, t);
6212 TREE_SIDE_EFFECTS (t) = 1;
6213 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6214 }
6215 }
6216
6217 static tree
6218 alpha_gimplify_va_arg_1 (tree type, tree base, tree offset, tree *pre_p)
6219 {
6220 tree type_size, ptr_type, addend, t, addr, internal_post;
6221
6222 /* If the type could not be passed in registers, skip the block
6223 reserved for the registers. */
6224 if (targetm.calls.must_pass_in_stack (TYPE_MODE (type), type))
6225 {
6226 t = build_int_cst (TREE_TYPE (offset), 6*8);
6227 t = build2 (MODIFY_EXPR, TREE_TYPE (offset), offset,
6228 build2 (MAX_EXPR, TREE_TYPE (offset), offset, t));
6229 gimplify_and_add (t, pre_p);
6230 }
6231
6232 addend = offset;
6233 ptr_type = build_pointer_type (type);
6234
6235 if (TREE_CODE (type) == COMPLEX_TYPE)
6236 {
6237 tree real_part, imag_part, real_temp;
6238
6239 real_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6240 offset, pre_p);
6241
6242 /* Copy the value into a new temporary, lest the formal temporary
6243 be reused out from under us. */
6244 real_temp = get_initialized_tmp_var (real_part, pre_p, NULL);
6245
6246 imag_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6247 offset, pre_p);
6248
6249 return build2 (COMPLEX_EXPR, type, real_temp, imag_part);
6250 }
6251 else if (TREE_CODE (type) == REAL_TYPE)
6252 {
6253 tree fpaddend, cond, fourtyeight;
6254
6255 fourtyeight = build_int_cst (TREE_TYPE (addend), 6*8);
6256 fpaddend = fold_build2 (MINUS_EXPR, TREE_TYPE (addend),
6257 addend, fourtyeight);
6258 cond = fold_build2 (LT_EXPR, boolean_type_node, addend, fourtyeight);
6259 addend = fold_build3 (COND_EXPR, TREE_TYPE (addend), cond,
6260 fpaddend, addend);
6261 }
6262
6263 /* Build the final address and force that value into a temporary. */
6264 addr = build2 (PLUS_EXPR, ptr_type, fold_convert (ptr_type, base),
6265 fold_convert (ptr_type, addend));
6266 internal_post = NULL;
6267 gimplify_expr (&addr, pre_p, &internal_post, is_gimple_val, fb_rvalue);
6268 append_to_statement_list (internal_post, pre_p);
6269
6270 /* Update the offset field. */
6271 type_size = TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type));
6272 if (type_size == NULL || TREE_OVERFLOW (type_size))
6273 t = size_zero_node;
6274 else
6275 {
6276 t = size_binop (PLUS_EXPR, type_size, size_int (7));
6277 t = size_binop (TRUNC_DIV_EXPR, t, size_int (8));
6278 t = size_binop (MULT_EXPR, t, size_int (8));
6279 }
6280 t = fold_convert (TREE_TYPE (offset), t);
6281 t = build2 (MODIFY_EXPR, void_type_node, offset,
6282 build2 (PLUS_EXPR, TREE_TYPE (offset), offset, t));
6283 gimplify_and_add (t, pre_p);
6284
6285 return build_va_arg_indirect_ref (addr);
6286 }
6287
6288 static tree
6289 alpha_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
6290 {
6291 tree offset_field, base_field, offset, base, t, r;
6292 bool indirect;
6293
6294 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
6295 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6296
6297 base_field = TYPE_FIELDS (va_list_type_node);
6298 offset_field = TREE_CHAIN (base_field);
6299 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6300 valist, base_field, NULL_TREE);
6301 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6302 valist, offset_field, NULL_TREE);
6303
6304 /* Pull the fields of the structure out into temporaries. Since we never
6305 modify the base field, we can use a formal temporary. Sign-extend the
6306 offset field so that it's the proper width for pointer arithmetic. */
6307 base = get_formal_tmp_var (base_field, pre_p);
6308
6309 t = fold_convert (lang_hooks.types.type_for_size (64, 0), offset_field);
6310 offset = get_initialized_tmp_var (t, pre_p, NULL);
6311
6312 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, false);
6313 if (indirect)
6314 type = build_pointer_type (type);
6315
6316 /* Find the value. Note that this will be a stable indirection, or
6317 a composite of stable indirections in the case of complex. */
6318 r = alpha_gimplify_va_arg_1 (type, base, offset, pre_p);
6319
6320 /* Stuff the offset temporary back into its field. */
6321 t = build2 (MODIFY_EXPR, void_type_node, offset_field,
6322 fold_convert (TREE_TYPE (offset_field), offset));
6323 gimplify_and_add (t, pre_p);
6324
6325 if (indirect)
6326 r = build_va_arg_indirect_ref (r);
6327
6328 return r;
6329 }
6330 \f
6331 /* Builtins. */
6332
6333 enum alpha_builtin
6334 {
6335 ALPHA_BUILTIN_CMPBGE,
6336 ALPHA_BUILTIN_EXTBL,
6337 ALPHA_BUILTIN_EXTWL,
6338 ALPHA_BUILTIN_EXTLL,
6339 ALPHA_BUILTIN_EXTQL,
6340 ALPHA_BUILTIN_EXTWH,
6341 ALPHA_BUILTIN_EXTLH,
6342 ALPHA_BUILTIN_EXTQH,
6343 ALPHA_BUILTIN_INSBL,
6344 ALPHA_BUILTIN_INSWL,
6345 ALPHA_BUILTIN_INSLL,
6346 ALPHA_BUILTIN_INSQL,
6347 ALPHA_BUILTIN_INSWH,
6348 ALPHA_BUILTIN_INSLH,
6349 ALPHA_BUILTIN_INSQH,
6350 ALPHA_BUILTIN_MSKBL,
6351 ALPHA_BUILTIN_MSKWL,
6352 ALPHA_BUILTIN_MSKLL,
6353 ALPHA_BUILTIN_MSKQL,
6354 ALPHA_BUILTIN_MSKWH,
6355 ALPHA_BUILTIN_MSKLH,
6356 ALPHA_BUILTIN_MSKQH,
6357 ALPHA_BUILTIN_UMULH,
6358 ALPHA_BUILTIN_ZAP,
6359 ALPHA_BUILTIN_ZAPNOT,
6360 ALPHA_BUILTIN_AMASK,
6361 ALPHA_BUILTIN_IMPLVER,
6362 ALPHA_BUILTIN_RPCC,
6363 ALPHA_BUILTIN_THREAD_POINTER,
6364 ALPHA_BUILTIN_SET_THREAD_POINTER,
6365
6366 /* TARGET_MAX */
6367 ALPHA_BUILTIN_MINUB8,
6368 ALPHA_BUILTIN_MINSB8,
6369 ALPHA_BUILTIN_MINUW4,
6370 ALPHA_BUILTIN_MINSW4,
6371 ALPHA_BUILTIN_MAXUB8,
6372 ALPHA_BUILTIN_MAXSB8,
6373 ALPHA_BUILTIN_MAXUW4,
6374 ALPHA_BUILTIN_MAXSW4,
6375 ALPHA_BUILTIN_PERR,
6376 ALPHA_BUILTIN_PKLB,
6377 ALPHA_BUILTIN_PKWB,
6378 ALPHA_BUILTIN_UNPKBL,
6379 ALPHA_BUILTIN_UNPKBW,
6380
6381 /* TARGET_CIX */
6382 ALPHA_BUILTIN_CTTZ,
6383 ALPHA_BUILTIN_CTLZ,
6384 ALPHA_BUILTIN_CTPOP,
6385
6386 ALPHA_BUILTIN_max
6387 };
6388
6389 static unsigned int const code_for_builtin[ALPHA_BUILTIN_max] = {
6390 CODE_FOR_builtin_cmpbge,
6391 CODE_FOR_builtin_extbl,
6392 CODE_FOR_builtin_extwl,
6393 CODE_FOR_builtin_extll,
6394 CODE_FOR_builtin_extql,
6395 CODE_FOR_builtin_extwh,
6396 CODE_FOR_builtin_extlh,
6397 CODE_FOR_builtin_extqh,
6398 CODE_FOR_builtin_insbl,
6399 CODE_FOR_builtin_inswl,
6400 CODE_FOR_builtin_insll,
6401 CODE_FOR_builtin_insql,
6402 CODE_FOR_builtin_inswh,
6403 CODE_FOR_builtin_inslh,
6404 CODE_FOR_builtin_insqh,
6405 CODE_FOR_builtin_mskbl,
6406 CODE_FOR_builtin_mskwl,
6407 CODE_FOR_builtin_mskll,
6408 CODE_FOR_builtin_mskql,
6409 CODE_FOR_builtin_mskwh,
6410 CODE_FOR_builtin_msklh,
6411 CODE_FOR_builtin_mskqh,
6412 CODE_FOR_umuldi3_highpart,
6413 CODE_FOR_builtin_zap,
6414 CODE_FOR_builtin_zapnot,
6415 CODE_FOR_builtin_amask,
6416 CODE_FOR_builtin_implver,
6417 CODE_FOR_builtin_rpcc,
6418 CODE_FOR_load_tp,
6419 CODE_FOR_set_tp,
6420
6421 /* TARGET_MAX */
6422 CODE_FOR_builtin_minub8,
6423 CODE_FOR_builtin_minsb8,
6424 CODE_FOR_builtin_minuw4,
6425 CODE_FOR_builtin_minsw4,
6426 CODE_FOR_builtin_maxub8,
6427 CODE_FOR_builtin_maxsb8,
6428 CODE_FOR_builtin_maxuw4,
6429 CODE_FOR_builtin_maxsw4,
6430 CODE_FOR_builtin_perr,
6431 CODE_FOR_builtin_pklb,
6432 CODE_FOR_builtin_pkwb,
6433 CODE_FOR_builtin_unpkbl,
6434 CODE_FOR_builtin_unpkbw,
6435
6436 /* TARGET_CIX */
6437 CODE_FOR_ctzdi2,
6438 CODE_FOR_clzdi2,
6439 CODE_FOR_popcountdi2
6440 };
6441
6442 struct alpha_builtin_def
6443 {
6444 const char *name;
6445 enum alpha_builtin code;
6446 unsigned int target_mask;
6447 bool is_const;
6448 };
6449
6450 static struct alpha_builtin_def const zero_arg_builtins[] = {
6451 { "__builtin_alpha_implver", ALPHA_BUILTIN_IMPLVER, 0, true },
6452 { "__builtin_alpha_rpcc", ALPHA_BUILTIN_RPCC, 0, false }
6453 };
6454
6455 static struct alpha_builtin_def const one_arg_builtins[] = {
6456 { "__builtin_alpha_amask", ALPHA_BUILTIN_AMASK, 0, true },
6457 { "__builtin_alpha_pklb", ALPHA_BUILTIN_PKLB, MASK_MAX, true },
6458 { "__builtin_alpha_pkwb", ALPHA_BUILTIN_PKWB, MASK_MAX, true },
6459 { "__builtin_alpha_unpkbl", ALPHA_BUILTIN_UNPKBL, MASK_MAX, true },
6460 { "__builtin_alpha_unpkbw", ALPHA_BUILTIN_UNPKBW, MASK_MAX, true },
6461 { "__builtin_alpha_cttz", ALPHA_BUILTIN_CTTZ, MASK_CIX, true },
6462 { "__builtin_alpha_ctlz", ALPHA_BUILTIN_CTLZ, MASK_CIX, true },
6463 { "__builtin_alpha_ctpop", ALPHA_BUILTIN_CTPOP, MASK_CIX, true }
6464 };
6465
6466 static struct alpha_builtin_def const two_arg_builtins[] = {
6467 { "__builtin_alpha_cmpbge", ALPHA_BUILTIN_CMPBGE, 0, true },
6468 { "__builtin_alpha_extbl", ALPHA_BUILTIN_EXTBL, 0, true },
6469 { "__builtin_alpha_extwl", ALPHA_BUILTIN_EXTWL, 0, true },
6470 { "__builtin_alpha_extll", ALPHA_BUILTIN_EXTLL, 0, true },
6471 { "__builtin_alpha_extql", ALPHA_BUILTIN_EXTQL, 0, true },
6472 { "__builtin_alpha_extwh", ALPHA_BUILTIN_EXTWH, 0, true },
6473 { "__builtin_alpha_extlh", ALPHA_BUILTIN_EXTLH, 0, true },
6474 { "__builtin_alpha_extqh", ALPHA_BUILTIN_EXTQH, 0, true },
6475 { "__builtin_alpha_insbl", ALPHA_BUILTIN_INSBL, 0, true },
6476 { "__builtin_alpha_inswl", ALPHA_BUILTIN_INSWL, 0, true },
6477 { "__builtin_alpha_insll", ALPHA_BUILTIN_INSLL, 0, true },
6478 { "__builtin_alpha_insql", ALPHA_BUILTIN_INSQL, 0, true },
6479 { "__builtin_alpha_inswh", ALPHA_BUILTIN_INSWH, 0, true },
6480 { "__builtin_alpha_inslh", ALPHA_BUILTIN_INSLH, 0, true },
6481 { "__builtin_alpha_insqh", ALPHA_BUILTIN_INSQH, 0, true },
6482 { "__builtin_alpha_mskbl", ALPHA_BUILTIN_MSKBL, 0, true },
6483 { "__builtin_alpha_mskwl", ALPHA_BUILTIN_MSKWL, 0, true },
6484 { "__builtin_alpha_mskll", ALPHA_BUILTIN_MSKLL, 0, true },
6485 { "__builtin_alpha_mskql", ALPHA_BUILTIN_MSKQL, 0, true },
6486 { "__builtin_alpha_mskwh", ALPHA_BUILTIN_MSKWH, 0, true },
6487 { "__builtin_alpha_msklh", ALPHA_BUILTIN_MSKLH, 0, true },
6488 { "__builtin_alpha_mskqh", ALPHA_BUILTIN_MSKQH, 0, true },
6489 { "__builtin_alpha_umulh", ALPHA_BUILTIN_UMULH, 0, true },
6490 { "__builtin_alpha_zap", ALPHA_BUILTIN_ZAP, 0, true },
6491 { "__builtin_alpha_zapnot", ALPHA_BUILTIN_ZAPNOT, 0, true },
6492 { "__builtin_alpha_minub8", ALPHA_BUILTIN_MINUB8, MASK_MAX, true },
6493 { "__builtin_alpha_minsb8", ALPHA_BUILTIN_MINSB8, MASK_MAX, true },
6494 { "__builtin_alpha_minuw4", ALPHA_BUILTIN_MINUW4, MASK_MAX, true },
6495 { "__builtin_alpha_minsw4", ALPHA_BUILTIN_MINSW4, MASK_MAX, true },
6496 { "__builtin_alpha_maxub8", ALPHA_BUILTIN_MAXUB8, MASK_MAX, true },
6497 { "__builtin_alpha_maxsb8", ALPHA_BUILTIN_MAXSB8, MASK_MAX, true },
6498 { "__builtin_alpha_maxuw4", ALPHA_BUILTIN_MAXUW4, MASK_MAX, true },
6499 { "__builtin_alpha_maxsw4", ALPHA_BUILTIN_MAXSW4, MASK_MAX, true },
6500 { "__builtin_alpha_perr", ALPHA_BUILTIN_PERR, MASK_MAX, true }
6501 };
6502
6503 static GTY(()) tree alpha_v8qi_u;
6504 static GTY(()) tree alpha_v8qi_s;
6505 static GTY(()) tree alpha_v4hi_u;
6506 static GTY(()) tree alpha_v4hi_s;
6507
6508 static void
6509 alpha_init_builtins (void)
6510 {
6511 const struct alpha_builtin_def *p;
6512 tree dimode_integer_type_node;
6513 tree ftype, attrs[2];
6514 size_t i;
6515
6516 dimode_integer_type_node = lang_hooks.types.type_for_mode (DImode, 0);
6517
6518 attrs[0] = tree_cons (get_identifier ("nothrow"), NULL, NULL);
6519 attrs[1] = tree_cons (get_identifier ("const"), NULL, attrs[0]);
6520
6521 ftype = build_function_type (dimode_integer_type_node, void_list_node);
6522
6523 p = zero_arg_builtins;
6524 for (i = 0; i < ARRAY_SIZE (zero_arg_builtins); ++i, ++p)
6525 if ((target_flags & p->target_mask) == p->target_mask)
6526 lang_hooks.builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
6527 NULL, attrs[p->is_const]);
6528
6529 ftype = build_function_type_list (dimode_integer_type_node,
6530 dimode_integer_type_node, NULL_TREE);
6531
6532 p = one_arg_builtins;
6533 for (i = 0; i < ARRAY_SIZE (one_arg_builtins); ++i, ++p)
6534 if ((target_flags & p->target_mask) == p->target_mask)
6535 lang_hooks.builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
6536 NULL, attrs[p->is_const]);
6537
6538 ftype = build_function_type_list (dimode_integer_type_node,
6539 dimode_integer_type_node,
6540 dimode_integer_type_node, NULL_TREE);
6541
6542 p = two_arg_builtins;
6543 for (i = 0; i < ARRAY_SIZE (two_arg_builtins); ++i, ++p)
6544 if ((target_flags & p->target_mask) == p->target_mask)
6545 lang_hooks.builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
6546 NULL, attrs[p->is_const]);
6547
6548 ftype = build_function_type (ptr_type_node, void_list_node);
6549 lang_hooks.builtin_function ("__builtin_thread_pointer", ftype,
6550 ALPHA_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
6551 NULL, attrs[0]);
6552
6553 ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
6554 lang_hooks.builtin_function ("__builtin_set_thread_pointer", ftype,
6555 ALPHA_BUILTIN_SET_THREAD_POINTER, BUILT_IN_MD,
6556 NULL, attrs[0]);
6557
6558 alpha_v8qi_u = build_vector_type (unsigned_intQI_type_node, 8);
6559 alpha_v8qi_s = build_vector_type (intQI_type_node, 8);
6560 alpha_v4hi_u = build_vector_type (unsigned_intHI_type_node, 4);
6561 alpha_v4hi_s = build_vector_type (intHI_type_node, 4);
6562 }
6563
6564 /* Expand an expression EXP that calls a built-in function,
6565 with result going to TARGET if that's convenient
6566 (and in mode MODE if that's convenient).
6567 SUBTARGET may be used as the target for computing one of EXP's operands.
6568 IGNORE is nonzero if the value is to be ignored. */
6569
6570 static rtx
6571 alpha_expand_builtin (tree exp, rtx target,
6572 rtx subtarget ATTRIBUTE_UNUSED,
6573 enum machine_mode mode ATTRIBUTE_UNUSED,
6574 int ignore ATTRIBUTE_UNUSED)
6575 {
6576 #define MAX_ARGS 2
6577
6578 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
6579 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
6580 tree arglist = TREE_OPERAND (exp, 1);
6581 enum insn_code icode;
6582 rtx op[MAX_ARGS], pat;
6583 int arity;
6584 bool nonvoid;
6585
6586 if (fcode >= ALPHA_BUILTIN_max)
6587 internal_error ("bad builtin fcode");
6588 icode = code_for_builtin[fcode];
6589 if (icode == 0)
6590 internal_error ("bad builtin fcode");
6591
6592 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
6593
6594 for (arglist = TREE_OPERAND (exp, 1), arity = 0;
6595 arglist;
6596 arglist = TREE_CHAIN (arglist), arity++)
6597 {
6598 const struct insn_operand_data *insn_op;
6599
6600 tree arg = TREE_VALUE (arglist);
6601 if (arg == error_mark_node)
6602 return NULL_RTX;
6603 if (arity > MAX_ARGS)
6604 return NULL_RTX;
6605
6606 insn_op = &insn_data[icode].operand[arity + nonvoid];
6607
6608 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, 0);
6609
6610 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
6611 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
6612 }
6613
6614 if (nonvoid)
6615 {
6616 enum machine_mode tmode = insn_data[icode].operand[0].mode;
6617 if (!target
6618 || GET_MODE (target) != tmode
6619 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
6620 target = gen_reg_rtx (tmode);
6621 }
6622
6623 switch (arity)
6624 {
6625 case 0:
6626 pat = GEN_FCN (icode) (target);
6627 break;
6628 case 1:
6629 if (nonvoid)
6630 pat = GEN_FCN (icode) (target, op[0]);
6631 else
6632 pat = GEN_FCN (icode) (op[0]);
6633 break;
6634 case 2:
6635 pat = GEN_FCN (icode) (target, op[0], op[1]);
6636 break;
6637 default:
6638 gcc_unreachable ();
6639 }
6640 if (!pat)
6641 return NULL_RTX;
6642 emit_insn (pat);
6643
6644 if (nonvoid)
6645 return target;
6646 else
6647 return const0_rtx;
6648 }
6649
6650
6651 /* Several bits below assume HWI >= 64 bits. This should be enforced
6652 by config.gcc. */
6653 #if HOST_BITS_PER_WIDE_INT < 64
6654 # error "HOST_WIDE_INT too small"
6655 #endif
6656
6657 /* Fold the builtin for the CMPBGE instruction. This is a vector comparison
6658 with an 8 bit output vector. OPINT contains the integer operands; bit N
6659 of OP_CONST is set if OPINT[N] is valid. */
6660
6661 static tree
6662 alpha_fold_builtin_cmpbge (unsigned HOST_WIDE_INT opint[], long op_const)
6663 {
6664 if (op_const == 3)
6665 {
6666 int i, val;
6667 for (i = 0, val = 0; i < 8; ++i)
6668 {
6669 unsigned HOST_WIDE_INT c0 = (opint[0] >> (i * 8)) & 0xff;
6670 unsigned HOST_WIDE_INT c1 = (opint[1] >> (i * 8)) & 0xff;
6671 if (c0 >= c1)
6672 val |= 1 << i;
6673 }
6674 return build_int_cst (long_integer_type_node, val);
6675 }
6676 else if (op_const == 2 && opint[1] == 0)
6677 return build_int_cst (long_integer_type_node, 0xff);
6678 return NULL;
6679 }
6680
6681 /* Fold the builtin for the ZAPNOT instruction. This is essentially a
6682 specialized form of an AND operation. Other byte manipulation instructions
6683 are defined in terms of this instruction, so this is also used as a
6684 subroutine for other builtins.
6685
6686 OP contains the tree operands; OPINT contains the extracted integer values.
6687 Bit N of OP_CONST it set if OPINT[N] is valid. OP may be null if only
6688 OPINT may be considered. */
6689
6690 static tree
6691 alpha_fold_builtin_zapnot (tree *op, unsigned HOST_WIDE_INT opint[],
6692 long op_const)
6693 {
6694 if (op_const & 2)
6695 {
6696 unsigned HOST_WIDE_INT mask = 0;
6697 int i;
6698
6699 for (i = 0; i < 8; ++i)
6700 if ((opint[1] >> i) & 1)
6701 mask |= (unsigned HOST_WIDE_INT)0xff << (i * 8);
6702
6703 if (op_const & 1)
6704 return build_int_cst (long_integer_type_node, opint[0] & mask);
6705
6706 if (op)
6707 return fold (build2 (BIT_AND_EXPR, long_integer_type_node, op[0],
6708 build_int_cst (long_integer_type_node, mask)));
6709 }
6710 else if ((op_const & 1) && opint[0] == 0)
6711 return build_int_cst (long_integer_type_node, 0);
6712 return NULL;
6713 }
6714
6715 /* Fold the builtins for the EXT family of instructions. */
6716
6717 static tree
6718 alpha_fold_builtin_extxx (tree op[], unsigned HOST_WIDE_INT opint[],
6719 long op_const, unsigned HOST_WIDE_INT bytemask,
6720 bool is_high)
6721 {
6722 long zap_const = 2;
6723 tree *zap_op = NULL;
6724
6725 if (op_const & 2)
6726 {
6727 unsigned HOST_WIDE_INT loc;
6728
6729 loc = opint[1] & 7;
6730 if (BYTES_BIG_ENDIAN)
6731 loc ^= 7;
6732 loc *= 8;
6733
6734 if (loc != 0)
6735 {
6736 if (op_const & 1)
6737 {
6738 unsigned HOST_WIDE_INT temp = opint[0];
6739 if (is_high)
6740 temp <<= loc;
6741 else
6742 temp >>= loc;
6743 opint[0] = temp;
6744 zap_const = 3;
6745 }
6746 }
6747 else
6748 zap_op = op;
6749 }
6750
6751 opint[1] = bytemask;
6752 return alpha_fold_builtin_zapnot (zap_op, opint, zap_const);
6753 }
6754
6755 /* Fold the builtins for the INS family of instructions. */
6756
6757 static tree
6758 alpha_fold_builtin_insxx (tree op[], unsigned HOST_WIDE_INT opint[],
6759 long op_const, unsigned HOST_WIDE_INT bytemask,
6760 bool is_high)
6761 {
6762 if ((op_const & 1) && opint[0] == 0)
6763 return build_int_cst (long_integer_type_node, 0);
6764
6765 if (op_const & 2)
6766 {
6767 unsigned HOST_WIDE_INT temp, loc, byteloc;
6768 tree *zap_op = NULL;
6769
6770 loc = opint[1] & 7;
6771 if (BYTES_BIG_ENDIAN)
6772 loc ^= 7;
6773 bytemask <<= loc;
6774
6775 temp = opint[0];
6776 if (is_high)
6777 {
6778 byteloc = (64 - (loc * 8)) & 0x3f;
6779 if (byteloc == 0)
6780 zap_op = op;
6781 else
6782 temp >>= byteloc;
6783 bytemask >>= 8;
6784 }
6785 else
6786 {
6787 byteloc = loc * 8;
6788 if (byteloc == 0)
6789 zap_op = op;
6790 else
6791 temp <<= byteloc;
6792 }
6793
6794 opint[0] = temp;
6795 opint[1] = bytemask;
6796 return alpha_fold_builtin_zapnot (zap_op, opint, op_const);
6797 }
6798
6799 return NULL;
6800 }
6801
6802 static tree
6803 alpha_fold_builtin_mskxx (tree op[], unsigned HOST_WIDE_INT opint[],
6804 long op_const, unsigned HOST_WIDE_INT bytemask,
6805 bool is_high)
6806 {
6807 if (op_const & 2)
6808 {
6809 unsigned HOST_WIDE_INT loc;
6810
6811 loc = opint[1] & 7;
6812 if (BYTES_BIG_ENDIAN)
6813 loc ^= 7;
6814 bytemask <<= loc;
6815
6816 if (is_high)
6817 bytemask >>= 8;
6818
6819 opint[1] = bytemask ^ 0xff;
6820 }
6821
6822 return alpha_fold_builtin_zapnot (op, opint, op_const);
6823 }
6824
6825 static tree
6826 alpha_fold_builtin_umulh (unsigned HOST_WIDE_INT opint[], long op_const)
6827 {
6828 switch (op_const)
6829 {
6830 case 3:
6831 {
6832 unsigned HOST_WIDE_INT l;
6833 HOST_WIDE_INT h;
6834
6835 mul_double (opint[0], 0, opint[1], 0, &l, &h);
6836
6837 #if HOST_BITS_PER_WIDE_INT > 64
6838 # error fixme
6839 #endif
6840
6841 return build_int_cst (long_integer_type_node, h);
6842 }
6843
6844 case 1:
6845 opint[1] = opint[0];
6846 /* FALLTHRU */
6847 case 2:
6848 /* Note that (X*1) >> 64 == 0. */
6849 if (opint[1] == 0 || opint[1] == 1)
6850 return build_int_cst (long_integer_type_node, 0);
6851 break;
6852 }
6853 return NULL;
6854 }
6855
6856 static tree
6857 alpha_fold_vector_minmax (enum tree_code code, tree op[], tree vtype)
6858 {
6859 tree op0 = fold_convert (vtype, op[0]);
6860 tree op1 = fold_convert (vtype, op[1]);
6861 tree val = fold (build2 (code, vtype, op0, op1));
6862 return fold_convert (long_integer_type_node, val);
6863 }
6864
6865 static tree
6866 alpha_fold_builtin_perr (unsigned HOST_WIDE_INT opint[], long op_const)
6867 {
6868 unsigned HOST_WIDE_INT temp = 0;
6869 int i;
6870
6871 if (op_const != 3)
6872 return NULL;
6873
6874 for (i = 0; i < 8; ++i)
6875 {
6876 unsigned HOST_WIDE_INT a = (opint[0] >> (i * 8)) & 0xff;
6877 unsigned HOST_WIDE_INT b = (opint[1] >> (i * 8)) & 0xff;
6878 if (a >= b)
6879 temp += a - b;
6880 else
6881 temp += b - a;
6882 }
6883
6884 return build_int_cst (long_integer_type_node, temp);
6885 }
6886
6887 static tree
6888 alpha_fold_builtin_pklb (unsigned HOST_WIDE_INT opint[], long op_const)
6889 {
6890 unsigned HOST_WIDE_INT temp;
6891
6892 if (op_const == 0)
6893 return NULL;
6894
6895 temp = opint[0] & 0xff;
6896 temp |= (opint[0] >> 24) & 0xff00;
6897
6898 return build_int_cst (long_integer_type_node, temp);
6899 }
6900
6901 static tree
6902 alpha_fold_builtin_pkwb (unsigned HOST_WIDE_INT opint[], long op_const)
6903 {
6904 unsigned HOST_WIDE_INT temp;
6905
6906 if (op_const == 0)
6907 return NULL;
6908
6909 temp = opint[0] & 0xff;
6910 temp |= (opint[0] >> 8) & 0xff00;
6911 temp |= (opint[0] >> 16) & 0xff0000;
6912 temp |= (opint[0] >> 24) & 0xff000000;
6913
6914 return build_int_cst (long_integer_type_node, temp);
6915 }
6916
6917 static tree
6918 alpha_fold_builtin_unpkbl (unsigned HOST_WIDE_INT opint[], long op_const)
6919 {
6920 unsigned HOST_WIDE_INT temp;
6921
6922 if (op_const == 0)
6923 return NULL;
6924
6925 temp = opint[0] & 0xff;
6926 temp |= (opint[0] & 0xff00) << 24;
6927
6928 return build_int_cst (long_integer_type_node, temp);
6929 }
6930
6931 static tree
6932 alpha_fold_builtin_unpkbw (unsigned HOST_WIDE_INT opint[], long op_const)
6933 {
6934 unsigned HOST_WIDE_INT temp;
6935
6936 if (op_const == 0)
6937 return NULL;
6938
6939 temp = opint[0] & 0xff;
6940 temp |= (opint[0] & 0x0000ff00) << 8;
6941 temp |= (opint[0] & 0x00ff0000) << 16;
6942 temp |= (opint[0] & 0xff000000) << 24;
6943
6944 return build_int_cst (long_integer_type_node, temp);
6945 }
6946
6947 static tree
6948 alpha_fold_builtin_cttz (unsigned HOST_WIDE_INT opint[], long op_const)
6949 {
6950 unsigned HOST_WIDE_INT temp;
6951
6952 if (op_const == 0)
6953 return NULL;
6954
6955 if (opint[0] == 0)
6956 temp = 64;
6957 else
6958 temp = exact_log2 (opint[0] & -opint[0]);
6959
6960 return build_int_cst (long_integer_type_node, temp);
6961 }
6962
6963 static tree
6964 alpha_fold_builtin_ctlz (unsigned HOST_WIDE_INT opint[], long op_const)
6965 {
6966 unsigned HOST_WIDE_INT temp;
6967
6968 if (op_const == 0)
6969 return NULL;
6970
6971 if (opint[0] == 0)
6972 temp = 64;
6973 else
6974 temp = 64 - floor_log2 (opint[0]) - 1;
6975
6976 return build_int_cst (long_integer_type_node, temp);
6977 }
6978
6979 static tree
6980 alpha_fold_builtin_ctpop (unsigned HOST_WIDE_INT opint[], long op_const)
6981 {
6982 unsigned HOST_WIDE_INT temp, op;
6983
6984 if (op_const == 0)
6985 return NULL;
6986
6987 op = opint[0];
6988 temp = 0;
6989 while (op)
6990 temp++, op &= op - 1;
6991
6992 return build_int_cst (long_integer_type_node, temp);
6993 }
6994
6995 /* Fold one of our builtin functions. */
6996
6997 static tree
6998 alpha_fold_builtin (tree fndecl, tree arglist, bool ignore ATTRIBUTE_UNUSED)
6999 {
7000 tree op[MAX_ARGS], t;
7001 unsigned HOST_WIDE_INT opint[MAX_ARGS];
7002 long op_const = 0, arity = 0;
7003
7004 for (t = arglist; t ; t = TREE_CHAIN (t), ++arity)
7005 {
7006 tree arg = TREE_VALUE (t);
7007 if (arg == error_mark_node)
7008 return NULL;
7009 if (arity >= MAX_ARGS)
7010 return NULL;
7011
7012 op[arity] = arg;
7013 opint[arity] = 0;
7014 if (TREE_CODE (arg) == INTEGER_CST)
7015 {
7016 op_const |= 1L << arity;
7017 opint[arity] = int_cst_value (arg);
7018 }
7019 }
7020
7021 switch (DECL_FUNCTION_CODE (fndecl))
7022 {
7023 case ALPHA_BUILTIN_CMPBGE:
7024 return alpha_fold_builtin_cmpbge (opint, op_const);
7025
7026 case ALPHA_BUILTIN_EXTBL:
7027 return alpha_fold_builtin_extxx (op, opint, op_const, 0x01, false);
7028 case ALPHA_BUILTIN_EXTWL:
7029 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, false);
7030 case ALPHA_BUILTIN_EXTLL:
7031 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, false);
7032 case ALPHA_BUILTIN_EXTQL:
7033 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, false);
7034 case ALPHA_BUILTIN_EXTWH:
7035 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, true);
7036 case ALPHA_BUILTIN_EXTLH:
7037 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, true);
7038 case ALPHA_BUILTIN_EXTQH:
7039 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, true);
7040
7041 case ALPHA_BUILTIN_INSBL:
7042 return alpha_fold_builtin_insxx (op, opint, op_const, 0x01, false);
7043 case ALPHA_BUILTIN_INSWL:
7044 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, false);
7045 case ALPHA_BUILTIN_INSLL:
7046 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, false);
7047 case ALPHA_BUILTIN_INSQL:
7048 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, false);
7049 case ALPHA_BUILTIN_INSWH:
7050 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, true);
7051 case ALPHA_BUILTIN_INSLH:
7052 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, true);
7053 case ALPHA_BUILTIN_INSQH:
7054 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, true);
7055
7056 case ALPHA_BUILTIN_MSKBL:
7057 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x01, false);
7058 case ALPHA_BUILTIN_MSKWL:
7059 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, false);
7060 case ALPHA_BUILTIN_MSKLL:
7061 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, false);
7062 case ALPHA_BUILTIN_MSKQL:
7063 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, false);
7064 case ALPHA_BUILTIN_MSKWH:
7065 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, true);
7066 case ALPHA_BUILTIN_MSKLH:
7067 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, true);
7068 case ALPHA_BUILTIN_MSKQH:
7069 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, true);
7070
7071 case ALPHA_BUILTIN_UMULH:
7072 return alpha_fold_builtin_umulh (opint, op_const);
7073
7074 case ALPHA_BUILTIN_ZAP:
7075 opint[1] ^= 0xff;
7076 /* FALLTHRU */
7077 case ALPHA_BUILTIN_ZAPNOT:
7078 return alpha_fold_builtin_zapnot (op, opint, op_const);
7079
7080 case ALPHA_BUILTIN_MINUB8:
7081 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_u);
7082 case ALPHA_BUILTIN_MINSB8:
7083 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_s);
7084 case ALPHA_BUILTIN_MINUW4:
7085 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_u);
7086 case ALPHA_BUILTIN_MINSW4:
7087 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_s);
7088 case ALPHA_BUILTIN_MAXUB8:
7089 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_u);
7090 case ALPHA_BUILTIN_MAXSB8:
7091 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_s);
7092 case ALPHA_BUILTIN_MAXUW4:
7093 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_u);
7094 case ALPHA_BUILTIN_MAXSW4:
7095 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_s);
7096
7097 case ALPHA_BUILTIN_PERR:
7098 return alpha_fold_builtin_perr (opint, op_const);
7099 case ALPHA_BUILTIN_PKLB:
7100 return alpha_fold_builtin_pklb (opint, op_const);
7101 case ALPHA_BUILTIN_PKWB:
7102 return alpha_fold_builtin_pkwb (opint, op_const);
7103 case ALPHA_BUILTIN_UNPKBL:
7104 return alpha_fold_builtin_unpkbl (opint, op_const);
7105 case ALPHA_BUILTIN_UNPKBW:
7106 return alpha_fold_builtin_unpkbw (opint, op_const);
7107
7108 case ALPHA_BUILTIN_CTTZ:
7109 return alpha_fold_builtin_cttz (opint, op_const);
7110 case ALPHA_BUILTIN_CTLZ:
7111 return alpha_fold_builtin_ctlz (opint, op_const);
7112 case ALPHA_BUILTIN_CTPOP:
7113 return alpha_fold_builtin_ctpop (opint, op_const);
7114
7115 case ALPHA_BUILTIN_AMASK:
7116 case ALPHA_BUILTIN_IMPLVER:
7117 case ALPHA_BUILTIN_RPCC:
7118 case ALPHA_BUILTIN_THREAD_POINTER:
7119 case ALPHA_BUILTIN_SET_THREAD_POINTER:
7120 /* None of these are foldable at compile-time. */
7121 default:
7122 return NULL;
7123 }
7124 }
7125 \f
7126 /* This page contains routines that are used to determine what the function
7127 prologue and epilogue code will do and write them out. */
7128
7129 /* Compute the size of the save area in the stack. */
7130
7131 /* These variables are used for communication between the following functions.
7132 They indicate various things about the current function being compiled
7133 that are used to tell what kind of prologue, epilogue and procedure
7134 descriptor to generate. */
7135
7136 /* Nonzero if we need a stack procedure. */
7137 enum alpha_procedure_types {PT_NULL = 0, PT_REGISTER = 1, PT_STACK = 2};
7138 static enum alpha_procedure_types alpha_procedure_type;
7139
7140 /* Register number (either FP or SP) that is used to unwind the frame. */
7141 static int vms_unwind_regno;
7142
7143 /* Register number used to save FP. We need not have one for RA since
7144 we don't modify it for register procedures. This is only defined
7145 for register frame procedures. */
7146 static int vms_save_fp_regno;
7147
7148 /* Register number used to reference objects off our PV. */
7149 static int vms_base_regno;
7150
7151 /* Compute register masks for saved registers. */
7152
7153 static void
7154 alpha_sa_mask (unsigned long *imaskP, unsigned long *fmaskP)
7155 {
7156 unsigned long imask = 0;
7157 unsigned long fmask = 0;
7158 unsigned int i;
7159
7160 /* When outputting a thunk, we don't have valid register life info,
7161 but assemble_start_function wants to output .frame and .mask
7162 directives. */
7163 if (current_function_is_thunk)
7164 {
7165 *imaskP = 0;
7166 *fmaskP = 0;
7167 return;
7168 }
7169
7170 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7171 imask |= (1UL << HARD_FRAME_POINTER_REGNUM);
7172
7173 /* One for every register we have to save. */
7174 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
7175 if (! fixed_regs[i] && ! call_used_regs[i]
7176 && regs_ever_live[i] && i != REG_RA
7177 && (!TARGET_ABI_UNICOSMK || i != HARD_FRAME_POINTER_REGNUM))
7178 {
7179 if (i < 32)
7180 imask |= (1UL << i);
7181 else
7182 fmask |= (1UL << (i - 32));
7183 }
7184
7185 /* We need to restore these for the handler. */
7186 if (current_function_calls_eh_return)
7187 {
7188 for (i = 0; ; ++i)
7189 {
7190 unsigned regno = EH_RETURN_DATA_REGNO (i);
7191 if (regno == INVALID_REGNUM)
7192 break;
7193 imask |= 1UL << regno;
7194 }
7195 }
7196
7197 /* If any register spilled, then spill the return address also. */
7198 /* ??? This is required by the Digital stack unwind specification
7199 and isn't needed if we're doing Dwarf2 unwinding. */
7200 if (imask || fmask || alpha_ra_ever_killed ())
7201 imask |= (1UL << REG_RA);
7202
7203 *imaskP = imask;
7204 *fmaskP = fmask;
7205 }
7206
7207 int
7208 alpha_sa_size (void)
7209 {
7210 unsigned long mask[2];
7211 int sa_size = 0;
7212 int i, j;
7213
7214 alpha_sa_mask (&mask[0], &mask[1]);
7215
7216 if (TARGET_ABI_UNICOSMK)
7217 {
7218 if (mask[0] || mask[1])
7219 sa_size = 14;
7220 }
7221 else
7222 {
7223 for (j = 0; j < 2; ++j)
7224 for (i = 0; i < 32; ++i)
7225 if ((mask[j] >> i) & 1)
7226 sa_size++;
7227 }
7228
7229 if (TARGET_ABI_UNICOSMK)
7230 {
7231 /* We might not need to generate a frame if we don't make any calls
7232 (including calls to __T3E_MISMATCH if this is a vararg function),
7233 don't have any local variables which require stack slots, don't
7234 use alloca and have not determined that we need a frame for other
7235 reasons. */
7236
7237 alpha_procedure_type
7238 = (sa_size || get_frame_size() != 0
7239 || current_function_outgoing_args_size
7240 || current_function_stdarg || current_function_calls_alloca
7241 || frame_pointer_needed)
7242 ? PT_STACK : PT_REGISTER;
7243
7244 /* Always reserve space for saving callee-saved registers if we
7245 need a frame as required by the calling convention. */
7246 if (alpha_procedure_type == PT_STACK)
7247 sa_size = 14;
7248 }
7249 else if (TARGET_ABI_OPEN_VMS)
7250 {
7251 /* Start by assuming we can use a register procedure if we don't
7252 make any calls (REG_RA not used) or need to save any
7253 registers and a stack procedure if we do. */
7254 if ((mask[0] >> REG_RA) & 1)
7255 alpha_procedure_type = PT_STACK;
7256 else if (get_frame_size() != 0)
7257 alpha_procedure_type = PT_REGISTER;
7258 else
7259 alpha_procedure_type = PT_NULL;
7260
7261 /* Don't reserve space for saving FP & RA yet. Do that later after we've
7262 made the final decision on stack procedure vs register procedure. */
7263 if (alpha_procedure_type == PT_STACK)
7264 sa_size -= 2;
7265
7266 /* Decide whether to refer to objects off our PV via FP or PV.
7267 If we need FP for something else or if we receive a nonlocal
7268 goto (which expects PV to contain the value), we must use PV.
7269 Otherwise, start by assuming we can use FP. */
7270
7271 vms_base_regno
7272 = (frame_pointer_needed
7273 || current_function_has_nonlocal_label
7274 || alpha_procedure_type == PT_STACK
7275 || current_function_outgoing_args_size)
7276 ? REG_PV : HARD_FRAME_POINTER_REGNUM;
7277
7278 /* If we want to copy PV into FP, we need to find some register
7279 in which to save FP. */
7280
7281 vms_save_fp_regno = -1;
7282 if (vms_base_regno == HARD_FRAME_POINTER_REGNUM)
7283 for (i = 0; i < 32; i++)
7284 if (! fixed_regs[i] && call_used_regs[i] && ! regs_ever_live[i])
7285 vms_save_fp_regno = i;
7286
7287 if (vms_save_fp_regno == -1 && alpha_procedure_type == PT_REGISTER)
7288 vms_base_regno = REG_PV, alpha_procedure_type = PT_STACK;
7289 else if (alpha_procedure_type == PT_NULL)
7290 vms_base_regno = REG_PV;
7291
7292 /* Stack unwinding should be done via FP unless we use it for PV. */
7293 vms_unwind_regno = (vms_base_regno == REG_PV
7294 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM);
7295
7296 /* If this is a stack procedure, allow space for saving FP and RA. */
7297 if (alpha_procedure_type == PT_STACK)
7298 sa_size += 2;
7299 }
7300 else
7301 {
7302 /* Our size must be even (multiple of 16 bytes). */
7303 if (sa_size & 1)
7304 sa_size++;
7305 }
7306
7307 return sa_size * 8;
7308 }
7309
7310 /* Define the offset between two registers, one to be eliminated,
7311 and the other its replacement, at the start of a routine. */
7312
7313 HOST_WIDE_INT
7314 alpha_initial_elimination_offset (unsigned int from,
7315 unsigned int to ATTRIBUTE_UNUSED)
7316 {
7317 HOST_WIDE_INT ret;
7318
7319 ret = alpha_sa_size ();
7320 ret += ALPHA_ROUND (current_function_outgoing_args_size);
7321
7322 switch (from)
7323 {
7324 case FRAME_POINTER_REGNUM:
7325 break;
7326
7327 case ARG_POINTER_REGNUM:
7328 ret += (ALPHA_ROUND (get_frame_size ()
7329 + current_function_pretend_args_size)
7330 - current_function_pretend_args_size);
7331 break;
7332
7333 default:
7334 gcc_unreachable ();
7335 }
7336
7337 return ret;
7338 }
7339
7340 int
7341 alpha_pv_save_size (void)
7342 {
7343 alpha_sa_size ();
7344 return alpha_procedure_type == PT_STACK ? 8 : 0;
7345 }
7346
7347 int
7348 alpha_using_fp (void)
7349 {
7350 alpha_sa_size ();
7351 return vms_unwind_regno == HARD_FRAME_POINTER_REGNUM;
7352 }
7353
7354 #if TARGET_ABI_OPEN_VMS
7355
7356 const struct attribute_spec vms_attribute_table[] =
7357 {
7358 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
7359 { "overlaid", 0, 0, true, false, false, NULL },
7360 { "global", 0, 0, true, false, false, NULL },
7361 { "initialize", 0, 0, true, false, false, NULL },
7362 { NULL, 0, 0, false, false, false, NULL }
7363 };
7364
7365 #endif
7366
7367 static int
7368 find_lo_sum_using_gp (rtx *px, void *data ATTRIBUTE_UNUSED)
7369 {
7370 return GET_CODE (*px) == LO_SUM && XEXP (*px, 0) == pic_offset_table_rtx;
7371 }
7372
7373 int
7374 alpha_find_lo_sum_using_gp (rtx insn)
7375 {
7376 return for_each_rtx (&PATTERN (insn), find_lo_sum_using_gp, NULL) > 0;
7377 }
7378
7379 static int
7380 alpha_does_function_need_gp (void)
7381 {
7382 rtx insn;
7383
7384 /* The GP being variable is an OSF abi thing. */
7385 if (! TARGET_ABI_OSF)
7386 return 0;
7387
7388 /* We need the gp to load the address of __mcount. */
7389 if (TARGET_PROFILING_NEEDS_GP && current_function_profile)
7390 return 1;
7391
7392 /* The code emitted by alpha_output_mi_thunk_osf uses the gp. */
7393 if (current_function_is_thunk)
7394 return 1;
7395
7396 /* The nonlocal receiver pattern assumes that the gp is valid for
7397 the nested function. Reasonable because it's almost always set
7398 correctly already. For the cases where that's wrong, make sure
7399 the nested function loads its gp on entry. */
7400 if (current_function_has_nonlocal_goto)
7401 return 1;
7402
7403 /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
7404 Even if we are a static function, we still need to do this in case
7405 our address is taken and passed to something like qsort. */
7406
7407 push_topmost_sequence ();
7408 insn = get_insns ();
7409 pop_topmost_sequence ();
7410
7411 for (; insn; insn = NEXT_INSN (insn))
7412 if (INSN_P (insn)
7413 && ! JUMP_TABLE_DATA_P (insn)
7414 && GET_CODE (PATTERN (insn)) != USE
7415 && GET_CODE (PATTERN (insn)) != CLOBBER
7416 && get_attr_usegp (insn))
7417 return 1;
7418
7419 return 0;
7420 }
7421
7422 \f
7423 /* Helper function to set RTX_FRAME_RELATED_P on instructions, including
7424 sequences. */
7425
7426 static rtx
7427 set_frame_related_p (void)
7428 {
7429 rtx seq = get_insns ();
7430 rtx insn;
7431
7432 end_sequence ();
7433
7434 if (!seq)
7435 return NULL_RTX;
7436
7437 if (INSN_P (seq))
7438 {
7439 insn = seq;
7440 while (insn != NULL_RTX)
7441 {
7442 RTX_FRAME_RELATED_P (insn) = 1;
7443 insn = NEXT_INSN (insn);
7444 }
7445 seq = emit_insn (seq);
7446 }
7447 else
7448 {
7449 seq = emit_insn (seq);
7450 RTX_FRAME_RELATED_P (seq) = 1;
7451 }
7452 return seq;
7453 }
7454
7455 #define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
7456
7457 /* Generates a store with the proper unwind info attached. VALUE is
7458 stored at BASE_REG+BASE_OFS. If FRAME_BIAS is nonzero, then BASE_REG
7459 contains SP+FRAME_BIAS, and that is the unwind info that should be
7460 generated. If FRAME_REG != VALUE, then VALUE is being stored on
7461 behalf of FRAME_REG, and FRAME_REG should be present in the unwind. */
7462
7463 static void
7464 emit_frame_store_1 (rtx value, rtx base_reg, HOST_WIDE_INT frame_bias,
7465 HOST_WIDE_INT base_ofs, rtx frame_reg)
7466 {
7467 rtx addr, mem, insn;
7468
7469 addr = plus_constant (base_reg, base_ofs);
7470 mem = gen_rtx_MEM (DImode, addr);
7471 set_mem_alias_set (mem, alpha_sr_alias_set);
7472
7473 insn = emit_move_insn (mem, value);
7474 RTX_FRAME_RELATED_P (insn) = 1;
7475
7476 if (frame_bias || value != frame_reg)
7477 {
7478 if (frame_bias)
7479 {
7480 addr = plus_constant (stack_pointer_rtx, frame_bias + base_ofs);
7481 mem = gen_rtx_MEM (DImode, addr);
7482 }
7483
7484 REG_NOTES (insn)
7485 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7486 gen_rtx_SET (VOIDmode, mem, frame_reg),
7487 REG_NOTES (insn));
7488 }
7489 }
7490
7491 static void
7492 emit_frame_store (unsigned int regno, rtx base_reg,
7493 HOST_WIDE_INT frame_bias, HOST_WIDE_INT base_ofs)
7494 {
7495 rtx reg = gen_rtx_REG (DImode, regno);
7496 emit_frame_store_1 (reg, base_reg, frame_bias, base_ofs, reg);
7497 }
7498
7499 /* Write function prologue. */
7500
7501 /* On vms we have two kinds of functions:
7502
7503 - stack frame (PROC_STACK)
7504 these are 'normal' functions with local vars and which are
7505 calling other functions
7506 - register frame (PROC_REGISTER)
7507 keeps all data in registers, needs no stack
7508
7509 We must pass this to the assembler so it can generate the
7510 proper pdsc (procedure descriptor)
7511 This is done with the '.pdesc' command.
7512
7513 On not-vms, we don't really differentiate between the two, as we can
7514 simply allocate stack without saving registers. */
7515
7516 void
7517 alpha_expand_prologue (void)
7518 {
7519 /* Registers to save. */
7520 unsigned long imask = 0;
7521 unsigned long fmask = 0;
7522 /* Stack space needed for pushing registers clobbered by us. */
7523 HOST_WIDE_INT sa_size;
7524 /* Complete stack size needed. */
7525 HOST_WIDE_INT frame_size;
7526 /* Offset from base reg to register save area. */
7527 HOST_WIDE_INT reg_offset;
7528 rtx sa_reg;
7529 int i;
7530
7531 sa_size = alpha_sa_size ();
7532
7533 frame_size = get_frame_size ();
7534 if (TARGET_ABI_OPEN_VMS)
7535 frame_size = ALPHA_ROUND (sa_size
7536 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7537 + frame_size
7538 + current_function_pretend_args_size);
7539 else if (TARGET_ABI_UNICOSMK)
7540 /* We have to allocate space for the DSIB if we generate a frame. */
7541 frame_size = ALPHA_ROUND (sa_size
7542 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7543 + ALPHA_ROUND (frame_size
7544 + current_function_outgoing_args_size);
7545 else
7546 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
7547 + sa_size
7548 + ALPHA_ROUND (frame_size
7549 + current_function_pretend_args_size));
7550
7551 if (TARGET_ABI_OPEN_VMS)
7552 reg_offset = 8;
7553 else
7554 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
7555
7556 alpha_sa_mask (&imask, &fmask);
7557
7558 /* Emit an insn to reload GP, if needed. */
7559 if (TARGET_ABI_OSF)
7560 {
7561 alpha_function_needs_gp = alpha_does_function_need_gp ();
7562 if (alpha_function_needs_gp)
7563 emit_insn (gen_prologue_ldgp ());
7564 }
7565
7566 /* TARGET_PROFILING_NEEDS_GP actually implies that we need to insert
7567 the call to mcount ourselves, rather than having the linker do it
7568 magically in response to -pg. Since _mcount has special linkage,
7569 don't represent the call as a call. */
7570 if (TARGET_PROFILING_NEEDS_GP && current_function_profile)
7571 emit_insn (gen_prologue_mcount ());
7572
7573 if (TARGET_ABI_UNICOSMK)
7574 unicosmk_gen_dsib (&imask);
7575
7576 /* Adjust the stack by the frame size. If the frame size is > 4096
7577 bytes, we need to be sure we probe somewhere in the first and last
7578 4096 bytes (we can probably get away without the latter test) and
7579 every 8192 bytes in between. If the frame size is > 32768, we
7580 do this in a loop. Otherwise, we generate the explicit probe
7581 instructions.
7582
7583 Note that we are only allowed to adjust sp once in the prologue. */
7584
7585 if (frame_size <= 32768)
7586 {
7587 if (frame_size > 4096)
7588 {
7589 int probed;
7590
7591 for (probed = 4096; probed < frame_size; probed += 8192)
7592 emit_insn (gen_probe_stack (GEN_INT (TARGET_ABI_UNICOSMK
7593 ? -probed + 64
7594 : -probed)));
7595
7596 /* We only have to do this probe if we aren't saving registers. */
7597 if (sa_size == 0 && frame_size > probed - 4096)
7598 emit_insn (gen_probe_stack (GEN_INT (-frame_size)));
7599 }
7600
7601 if (frame_size != 0)
7602 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
7603 GEN_INT (TARGET_ABI_UNICOSMK
7604 ? -frame_size + 64
7605 : -frame_size))));
7606 }
7607 else
7608 {
7609 /* Here we generate code to set R22 to SP + 4096 and set R23 to the
7610 number of 8192 byte blocks to probe. We then probe each block
7611 in the loop and then set SP to the proper location. If the
7612 amount remaining is > 4096, we have to do one more probe if we
7613 are not saving any registers. */
7614
7615 HOST_WIDE_INT blocks = (frame_size + 4096) / 8192;
7616 HOST_WIDE_INT leftover = frame_size + 4096 - blocks * 8192;
7617 rtx ptr = gen_rtx_REG (DImode, 22);
7618 rtx count = gen_rtx_REG (DImode, 23);
7619 rtx seq;
7620
7621 emit_move_insn (count, GEN_INT (blocks));
7622 emit_insn (gen_adddi3 (ptr, stack_pointer_rtx,
7623 GEN_INT (TARGET_ABI_UNICOSMK ? 4096 - 64 : 4096)));
7624
7625 /* Because of the difficulty in emitting a new basic block this
7626 late in the compilation, generate the loop as a single insn. */
7627 emit_insn (gen_prologue_stack_probe_loop (count, ptr));
7628
7629 if (leftover > 4096 && sa_size == 0)
7630 {
7631 rtx last = gen_rtx_MEM (DImode, plus_constant (ptr, -leftover));
7632 MEM_VOLATILE_P (last) = 1;
7633 emit_move_insn (last, const0_rtx);
7634 }
7635
7636 if (TARGET_ABI_WINDOWS_NT)
7637 {
7638 /* For NT stack unwind (done by 'reverse execution'), it's
7639 not OK to take the result of a loop, even though the value
7640 is already in ptr, so we reload it via a single operation
7641 and subtract it to sp.
7642
7643 Yes, that's correct -- we have to reload the whole constant
7644 into a temporary via ldah+lda then subtract from sp. */
7645
7646 HOST_WIDE_INT lo, hi;
7647 lo = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
7648 hi = frame_size - lo;
7649
7650 emit_move_insn (ptr, GEN_INT (hi));
7651 emit_insn (gen_adddi3 (ptr, ptr, GEN_INT (lo)));
7652 seq = emit_insn (gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx,
7653 ptr));
7654 }
7655 else
7656 {
7657 seq = emit_insn (gen_adddi3 (stack_pointer_rtx, ptr,
7658 GEN_INT (-leftover)));
7659 }
7660
7661 /* This alternative is special, because the DWARF code cannot
7662 possibly intuit through the loop above. So we invent this
7663 note it looks at instead. */
7664 RTX_FRAME_RELATED_P (seq) = 1;
7665 REG_NOTES (seq)
7666 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7667 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7668 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
7669 GEN_INT (TARGET_ABI_UNICOSMK
7670 ? -frame_size + 64
7671 : -frame_size))),
7672 REG_NOTES (seq));
7673 }
7674
7675 if (!TARGET_ABI_UNICOSMK)
7676 {
7677 HOST_WIDE_INT sa_bias = 0;
7678
7679 /* Cope with very large offsets to the register save area. */
7680 sa_reg = stack_pointer_rtx;
7681 if (reg_offset + sa_size > 0x8000)
7682 {
7683 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
7684 rtx sa_bias_rtx;
7685
7686 if (low + sa_size <= 0x8000)
7687 sa_bias = reg_offset - low, reg_offset = low;
7688 else
7689 sa_bias = reg_offset, reg_offset = 0;
7690
7691 sa_reg = gen_rtx_REG (DImode, 24);
7692 sa_bias_rtx = GEN_INT (sa_bias);
7693
7694 if (add_operand (sa_bias_rtx, DImode))
7695 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_bias_rtx));
7696 else
7697 {
7698 emit_move_insn (sa_reg, sa_bias_rtx);
7699 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_reg));
7700 }
7701 }
7702
7703 /* Save regs in stack order. Beginning with VMS PV. */
7704 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7705 emit_frame_store (REG_PV, stack_pointer_rtx, 0, 0);
7706
7707 /* Save register RA next. */
7708 if (imask & (1UL << REG_RA))
7709 {
7710 emit_frame_store (REG_RA, sa_reg, sa_bias, reg_offset);
7711 imask &= ~(1UL << REG_RA);
7712 reg_offset += 8;
7713 }
7714
7715 /* Now save any other registers required to be saved. */
7716 for (i = 0; i < 31; i++)
7717 if (imask & (1UL << i))
7718 {
7719 emit_frame_store (i, sa_reg, sa_bias, reg_offset);
7720 reg_offset += 8;
7721 }
7722
7723 for (i = 0; i < 31; i++)
7724 if (fmask & (1UL << i))
7725 {
7726 emit_frame_store (i+32, sa_reg, sa_bias, reg_offset);
7727 reg_offset += 8;
7728 }
7729 }
7730 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
7731 {
7732 /* The standard frame on the T3E includes space for saving registers.
7733 We just have to use it. We don't have to save the return address and
7734 the old frame pointer here - they are saved in the DSIB. */
7735
7736 reg_offset = -56;
7737 for (i = 9; i < 15; i++)
7738 if (imask & (1UL << i))
7739 {
7740 emit_frame_store (i, hard_frame_pointer_rtx, 0, reg_offset);
7741 reg_offset -= 8;
7742 }
7743 for (i = 2; i < 10; i++)
7744 if (fmask & (1UL << i))
7745 {
7746 emit_frame_store (i+32, hard_frame_pointer_rtx, 0, reg_offset);
7747 reg_offset -= 8;
7748 }
7749 }
7750
7751 if (TARGET_ABI_OPEN_VMS)
7752 {
7753 if (alpha_procedure_type == PT_REGISTER)
7754 /* Register frame procedures save the fp.
7755 ?? Ought to have a dwarf2 save for this. */
7756 emit_move_insn (gen_rtx_REG (DImode, vms_save_fp_regno),
7757 hard_frame_pointer_rtx);
7758
7759 if (alpha_procedure_type != PT_NULL && vms_base_regno != REG_PV)
7760 emit_insn (gen_force_movdi (gen_rtx_REG (DImode, vms_base_regno),
7761 gen_rtx_REG (DImode, REG_PV)));
7762
7763 if (alpha_procedure_type != PT_NULL
7764 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
7765 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7766
7767 /* If we have to allocate space for outgoing args, do it now. */
7768 if (current_function_outgoing_args_size != 0)
7769 {
7770 rtx seq
7771 = emit_move_insn (stack_pointer_rtx,
7772 plus_constant
7773 (hard_frame_pointer_rtx,
7774 - (ALPHA_ROUND
7775 (current_function_outgoing_args_size))));
7776
7777 /* Only set FRAME_RELATED_P on the stack adjustment we just emitted
7778 if ! frame_pointer_needed. Setting the bit will change the CFA
7779 computation rule to use sp again, which would be wrong if we had
7780 frame_pointer_needed, as this means sp might move unpredictably
7781 later on.
7782
7783 Also, note that
7784 frame_pointer_needed
7785 => vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
7786 and
7787 current_function_outgoing_args_size != 0
7788 => alpha_procedure_type != PT_NULL,
7789
7790 so when we are not setting the bit here, we are guaranteed to
7791 have emitted an FRP frame pointer update just before. */
7792 RTX_FRAME_RELATED_P (seq) = ! frame_pointer_needed;
7793 }
7794 }
7795 else if (!TARGET_ABI_UNICOSMK)
7796 {
7797 /* If we need a frame pointer, set it from the stack pointer. */
7798 if (frame_pointer_needed)
7799 {
7800 if (TARGET_CAN_FAULT_IN_PROLOGUE)
7801 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7802 else
7803 /* This must always be the last instruction in the
7804 prologue, thus we emit a special move + clobber. */
7805 FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx,
7806 stack_pointer_rtx, sa_reg)));
7807 }
7808 }
7809
7810 /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
7811 the prologue, for exception handling reasons, we cannot do this for
7812 any insn that might fault. We could prevent this for mems with a
7813 (clobber:BLK (scratch)), but this doesn't work for fp insns. So we
7814 have to prevent all such scheduling with a blockage.
7815
7816 Linux, on the other hand, never bothered to implement OSF/1's
7817 exception handling, and so doesn't care about such things. Anyone
7818 planning to use dwarf2 frame-unwind info can also omit the blockage. */
7819
7820 if (! TARGET_CAN_FAULT_IN_PROLOGUE)
7821 emit_insn (gen_blockage ());
7822 }
7823
7824 /* Count the number of .file directives, so that .loc is up to date. */
7825 int num_source_filenames = 0;
7826
7827 /* Output the textual info surrounding the prologue. */
7828
7829 void
7830 alpha_start_function (FILE *file, const char *fnname,
7831 tree decl ATTRIBUTE_UNUSED)
7832 {
7833 unsigned long imask = 0;
7834 unsigned long fmask = 0;
7835 /* Stack space needed for pushing registers clobbered by us. */
7836 HOST_WIDE_INT sa_size;
7837 /* Complete stack size needed. */
7838 unsigned HOST_WIDE_INT frame_size;
7839 /* Offset from base reg to register save area. */
7840 HOST_WIDE_INT reg_offset;
7841 char *entry_label = (char *) alloca (strlen (fnname) + 6);
7842 int i;
7843
7844 /* Don't emit an extern directive for functions defined in the same file. */
7845 if (TARGET_ABI_UNICOSMK)
7846 {
7847 tree name_tree;
7848 name_tree = get_identifier (fnname);
7849 TREE_ASM_WRITTEN (name_tree) = 1;
7850 }
7851
7852 alpha_fnname = fnname;
7853 sa_size = alpha_sa_size ();
7854
7855 frame_size = get_frame_size ();
7856 if (TARGET_ABI_OPEN_VMS)
7857 frame_size = ALPHA_ROUND (sa_size
7858 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7859 + frame_size
7860 + current_function_pretend_args_size);
7861 else if (TARGET_ABI_UNICOSMK)
7862 frame_size = ALPHA_ROUND (sa_size
7863 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7864 + ALPHA_ROUND (frame_size
7865 + current_function_outgoing_args_size);
7866 else
7867 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
7868 + sa_size
7869 + ALPHA_ROUND (frame_size
7870 + current_function_pretend_args_size));
7871
7872 if (TARGET_ABI_OPEN_VMS)
7873 reg_offset = 8;
7874 else
7875 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
7876
7877 alpha_sa_mask (&imask, &fmask);
7878
7879 /* Ecoff can handle multiple .file directives, so put out file and lineno.
7880 We have to do that before the .ent directive as we cannot switch
7881 files within procedures with native ecoff because line numbers are
7882 linked to procedure descriptors.
7883 Outputting the lineno helps debugging of one line functions as they
7884 would otherwise get no line number at all. Please note that we would
7885 like to put out last_linenum from final.c, but it is not accessible. */
7886
7887 if (write_symbols == SDB_DEBUG)
7888 {
7889 #ifdef ASM_OUTPUT_SOURCE_FILENAME
7890 ASM_OUTPUT_SOURCE_FILENAME (file,
7891 DECL_SOURCE_FILE (current_function_decl));
7892 #endif
7893 #ifdef SDB_OUTPUT_SOURCE_LINE
7894 if (debug_info_level != DINFO_LEVEL_TERSE)
7895 SDB_OUTPUT_SOURCE_LINE (file,
7896 DECL_SOURCE_LINE (current_function_decl));
7897 #endif
7898 }
7899
7900 /* Issue function start and label. */
7901 if (TARGET_ABI_OPEN_VMS
7902 || (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive))
7903 {
7904 fputs ("\t.ent ", file);
7905 assemble_name (file, fnname);
7906 putc ('\n', file);
7907
7908 /* If the function needs GP, we'll write the "..ng" label there.
7909 Otherwise, do it here. */
7910 if (TARGET_ABI_OSF
7911 && ! alpha_function_needs_gp
7912 && ! current_function_is_thunk)
7913 {
7914 putc ('$', file);
7915 assemble_name (file, fnname);
7916 fputs ("..ng:\n", file);
7917 }
7918 }
7919
7920 strcpy (entry_label, fnname);
7921 if (TARGET_ABI_OPEN_VMS)
7922 strcat (entry_label, "..en");
7923
7924 /* For public functions, the label must be globalized by appending an
7925 additional colon. */
7926 if (TARGET_ABI_UNICOSMK && TREE_PUBLIC (decl))
7927 strcat (entry_label, ":");
7928
7929 ASM_OUTPUT_LABEL (file, entry_label);
7930 inside_function = TRUE;
7931
7932 if (TARGET_ABI_OPEN_VMS)
7933 fprintf (file, "\t.base $%d\n", vms_base_regno);
7934
7935 if (!TARGET_ABI_OPEN_VMS && !TARGET_ABI_UNICOSMK && TARGET_IEEE_CONFORMANT
7936 && !flag_inhibit_size_directive)
7937 {
7938 /* Set flags in procedure descriptor to request IEEE-conformant
7939 math-library routines. The value we set it to is PDSC_EXC_IEEE
7940 (/usr/include/pdsc.h). */
7941 fputs ("\t.eflag 48\n", file);
7942 }
7943
7944 /* Set up offsets to alpha virtual arg/local debugging pointer. */
7945 alpha_auto_offset = -frame_size + current_function_pretend_args_size;
7946 alpha_arg_offset = -frame_size + 48;
7947
7948 /* Describe our frame. If the frame size is larger than an integer,
7949 print it as zero to avoid an assembler error. We won't be
7950 properly describing such a frame, but that's the best we can do. */
7951 if (TARGET_ABI_UNICOSMK)
7952 ;
7953 else if (TARGET_ABI_OPEN_VMS)
7954 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,"
7955 HOST_WIDE_INT_PRINT_DEC "\n",
7956 vms_unwind_regno,
7957 frame_size >= (1UL << 31) ? 0 : frame_size,
7958 reg_offset);
7959 else if (!flag_inhibit_size_directive)
7960 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,%d\n",
7961 (frame_pointer_needed
7962 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM),
7963 frame_size >= (1UL << 31) ? 0 : frame_size,
7964 current_function_pretend_args_size);
7965
7966 /* Describe which registers were spilled. */
7967 if (TARGET_ABI_UNICOSMK)
7968 ;
7969 else if (TARGET_ABI_OPEN_VMS)
7970 {
7971 if (imask)
7972 /* ??? Does VMS care if mask contains ra? The old code didn't
7973 set it, so I don't here. */
7974 fprintf (file, "\t.mask 0x%lx,0\n", imask & ~(1UL << REG_RA));
7975 if (fmask)
7976 fprintf (file, "\t.fmask 0x%lx,0\n", fmask);
7977 if (alpha_procedure_type == PT_REGISTER)
7978 fprintf (file, "\t.fp_save $%d\n", vms_save_fp_regno);
7979 }
7980 else if (!flag_inhibit_size_directive)
7981 {
7982 if (imask)
7983 {
7984 fprintf (file, "\t.mask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", imask,
7985 frame_size >= (1UL << 31) ? 0 : reg_offset - frame_size);
7986
7987 for (i = 0; i < 32; ++i)
7988 if (imask & (1UL << i))
7989 reg_offset += 8;
7990 }
7991
7992 if (fmask)
7993 fprintf (file, "\t.fmask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", fmask,
7994 frame_size >= (1UL << 31) ? 0 : reg_offset - frame_size);
7995 }
7996
7997 #if TARGET_ABI_OPEN_VMS
7998 /* Ifdef'ed cause link_section are only available then. */
7999 switch_to_section (readonly_data_section);
8000 fprintf (file, "\t.align 3\n");
8001 assemble_name (file, fnname); fputs ("..na:\n", file);
8002 fputs ("\t.ascii \"", file);
8003 assemble_name (file, fnname);
8004 fputs ("\\0\"\n", file);
8005 alpha_need_linkage (fnname, 1);
8006 switch_to_section (text_section);
8007 #endif
8008 }
8009
8010 /* Emit the .prologue note at the scheduled end of the prologue. */
8011
8012 static void
8013 alpha_output_function_end_prologue (FILE *file)
8014 {
8015 if (TARGET_ABI_UNICOSMK)
8016 ;
8017 else if (TARGET_ABI_OPEN_VMS)
8018 fputs ("\t.prologue\n", file);
8019 else if (TARGET_ABI_WINDOWS_NT)
8020 fputs ("\t.prologue 0\n", file);
8021 else if (!flag_inhibit_size_directive)
8022 fprintf (file, "\t.prologue %d\n",
8023 alpha_function_needs_gp || current_function_is_thunk);
8024 }
8025
8026 /* Write function epilogue. */
8027
8028 /* ??? At some point we will want to support full unwind, and so will
8029 need to mark the epilogue as well. At the moment, we just confuse
8030 dwarf2out. */
8031 #undef FRP
8032 #define FRP(exp) exp
8033
8034 void
8035 alpha_expand_epilogue (void)
8036 {
8037 /* Registers to save. */
8038 unsigned long imask = 0;
8039 unsigned long fmask = 0;
8040 /* Stack space needed for pushing registers clobbered by us. */
8041 HOST_WIDE_INT sa_size;
8042 /* Complete stack size needed. */
8043 HOST_WIDE_INT frame_size;
8044 /* Offset from base reg to register save area. */
8045 HOST_WIDE_INT reg_offset;
8046 int fp_is_frame_pointer, fp_offset;
8047 rtx sa_reg, sa_reg_exp = NULL;
8048 rtx sp_adj1, sp_adj2, mem;
8049 rtx eh_ofs;
8050 int i;
8051
8052 sa_size = alpha_sa_size ();
8053
8054 frame_size = get_frame_size ();
8055 if (TARGET_ABI_OPEN_VMS)
8056 frame_size = ALPHA_ROUND (sa_size
8057 + (alpha_procedure_type == PT_STACK ? 8 : 0)
8058 + frame_size
8059 + current_function_pretend_args_size);
8060 else if (TARGET_ABI_UNICOSMK)
8061 frame_size = ALPHA_ROUND (sa_size
8062 + (alpha_procedure_type == PT_STACK ? 48 : 0))
8063 + ALPHA_ROUND (frame_size
8064 + current_function_outgoing_args_size);
8065 else
8066 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
8067 + sa_size
8068 + ALPHA_ROUND (frame_size
8069 + current_function_pretend_args_size));
8070
8071 if (TARGET_ABI_OPEN_VMS)
8072 {
8073 if (alpha_procedure_type == PT_STACK)
8074 reg_offset = 8;
8075 else
8076 reg_offset = 0;
8077 }
8078 else
8079 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
8080
8081 alpha_sa_mask (&imask, &fmask);
8082
8083 fp_is_frame_pointer
8084 = ((TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
8085 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed));
8086 fp_offset = 0;
8087 sa_reg = stack_pointer_rtx;
8088
8089 if (current_function_calls_eh_return)
8090 eh_ofs = EH_RETURN_STACKADJ_RTX;
8091 else
8092 eh_ofs = NULL_RTX;
8093
8094 if (!TARGET_ABI_UNICOSMK && sa_size)
8095 {
8096 /* If we have a frame pointer, restore SP from it. */
8097 if ((TARGET_ABI_OPEN_VMS
8098 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
8099 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed))
8100 FRP (emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx));
8101
8102 /* Cope with very large offsets to the register save area. */
8103 if (reg_offset + sa_size > 0x8000)
8104 {
8105 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
8106 HOST_WIDE_INT bias;
8107
8108 if (low + sa_size <= 0x8000)
8109 bias = reg_offset - low, reg_offset = low;
8110 else
8111 bias = reg_offset, reg_offset = 0;
8112
8113 sa_reg = gen_rtx_REG (DImode, 22);
8114 sa_reg_exp = plus_constant (stack_pointer_rtx, bias);
8115
8116 FRP (emit_move_insn (sa_reg, sa_reg_exp));
8117 }
8118
8119 /* Restore registers in order, excepting a true frame pointer. */
8120
8121 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset));
8122 if (! eh_ofs)
8123 set_mem_alias_set (mem, alpha_sr_alias_set);
8124 FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
8125
8126 reg_offset += 8;
8127 imask &= ~(1UL << REG_RA);
8128
8129 for (i = 0; i < 31; ++i)
8130 if (imask & (1UL << i))
8131 {
8132 if (i == HARD_FRAME_POINTER_REGNUM && fp_is_frame_pointer)
8133 fp_offset = reg_offset;
8134 else
8135 {
8136 mem = gen_rtx_MEM (DImode, plus_constant(sa_reg, reg_offset));
8137 set_mem_alias_set (mem, alpha_sr_alias_set);
8138 FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
8139 }
8140 reg_offset += 8;
8141 }
8142
8143 for (i = 0; i < 31; ++i)
8144 if (fmask & (1UL << i))
8145 {
8146 mem = gen_rtx_MEM (DFmode, plus_constant(sa_reg, reg_offset));
8147 set_mem_alias_set (mem, alpha_sr_alias_set);
8148 FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
8149 reg_offset += 8;
8150 }
8151 }
8152 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
8153 {
8154 /* Restore callee-saved general-purpose registers. */
8155
8156 reg_offset = -56;
8157
8158 for (i = 9; i < 15; i++)
8159 if (imask & (1UL << i))
8160 {
8161 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx,
8162 reg_offset));
8163 set_mem_alias_set (mem, alpha_sr_alias_set);
8164 FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
8165 reg_offset -= 8;
8166 }
8167
8168 for (i = 2; i < 10; i++)
8169 if (fmask & (1UL << i))
8170 {
8171 mem = gen_rtx_MEM (DFmode, plus_constant(hard_frame_pointer_rtx,
8172 reg_offset));
8173 set_mem_alias_set (mem, alpha_sr_alias_set);
8174 FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
8175 reg_offset -= 8;
8176 }
8177
8178 /* Restore the return address from the DSIB. */
8179
8180 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx, -8));
8181 set_mem_alias_set (mem, alpha_sr_alias_set);
8182 FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
8183 }
8184
8185 if (frame_size || eh_ofs)
8186 {
8187 sp_adj1 = stack_pointer_rtx;
8188
8189 if (eh_ofs)
8190 {
8191 sp_adj1 = gen_rtx_REG (DImode, 23);
8192 emit_move_insn (sp_adj1,
8193 gen_rtx_PLUS (Pmode, stack_pointer_rtx, eh_ofs));
8194 }
8195
8196 /* If the stack size is large, begin computation into a temporary
8197 register so as not to interfere with a potential fp restore,
8198 which must be consecutive with an SP restore. */
8199 if (frame_size < 32768
8200 && ! (TARGET_ABI_UNICOSMK && current_function_calls_alloca))
8201 sp_adj2 = GEN_INT (frame_size);
8202 else if (TARGET_ABI_UNICOSMK)
8203 {
8204 sp_adj1 = gen_rtx_REG (DImode, 23);
8205 FRP (emit_move_insn (sp_adj1, hard_frame_pointer_rtx));
8206 sp_adj2 = const0_rtx;
8207 }
8208 else if (frame_size < 0x40007fffL)
8209 {
8210 int low = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
8211
8212 sp_adj2 = plus_constant (sp_adj1, frame_size - low);
8213 if (sa_reg_exp && rtx_equal_p (sa_reg_exp, sp_adj2))
8214 sp_adj1 = sa_reg;
8215 else
8216 {
8217 sp_adj1 = gen_rtx_REG (DImode, 23);
8218 FRP (emit_move_insn (sp_adj1, sp_adj2));
8219 }
8220 sp_adj2 = GEN_INT (low);
8221 }
8222 else
8223 {
8224 rtx tmp = gen_rtx_REG (DImode, 23);
8225 FRP (sp_adj2 = alpha_emit_set_const (tmp, DImode, frame_size,
8226 3, false));
8227 if (!sp_adj2)
8228 {
8229 /* We can't drop new things to memory this late, afaik,
8230 so build it up by pieces. */
8231 FRP (sp_adj2 = alpha_emit_set_long_const (tmp, frame_size,
8232 -(frame_size < 0)));
8233 gcc_assert (sp_adj2);
8234 }
8235 }
8236
8237 /* From now on, things must be in order. So emit blockages. */
8238
8239 /* Restore the frame pointer. */
8240 if (TARGET_ABI_UNICOSMK)
8241 {
8242 emit_insn (gen_blockage ());
8243 mem = gen_rtx_MEM (DImode,
8244 plus_constant (hard_frame_pointer_rtx, -16));
8245 set_mem_alias_set (mem, alpha_sr_alias_set);
8246 FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
8247 }
8248 else if (fp_is_frame_pointer)
8249 {
8250 emit_insn (gen_blockage ());
8251 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, fp_offset));
8252 set_mem_alias_set (mem, alpha_sr_alias_set);
8253 FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
8254 }
8255 else if (TARGET_ABI_OPEN_VMS)
8256 {
8257 emit_insn (gen_blockage ());
8258 FRP (emit_move_insn (hard_frame_pointer_rtx,
8259 gen_rtx_REG (DImode, vms_save_fp_regno)));
8260 }
8261
8262 /* Restore the stack pointer. */
8263 emit_insn (gen_blockage ());
8264 if (sp_adj2 == const0_rtx)
8265 FRP (emit_move_insn (stack_pointer_rtx, sp_adj1));
8266 else
8267 FRP (emit_move_insn (stack_pointer_rtx,
8268 gen_rtx_PLUS (DImode, sp_adj1, sp_adj2)));
8269 }
8270 else
8271 {
8272 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_REGISTER)
8273 {
8274 emit_insn (gen_blockage ());
8275 FRP (emit_move_insn (hard_frame_pointer_rtx,
8276 gen_rtx_REG (DImode, vms_save_fp_regno)));
8277 }
8278 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type != PT_STACK)
8279 {
8280 /* Decrement the frame pointer if the function does not have a
8281 frame. */
8282
8283 emit_insn (gen_blockage ());
8284 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
8285 hard_frame_pointer_rtx, constm1_rtx)));
8286 }
8287 }
8288 }
8289 \f
8290 /* Output the rest of the textual info surrounding the epilogue. */
8291
8292 void
8293 alpha_end_function (FILE *file, const char *fnname, tree decl ATTRIBUTE_UNUSED)
8294 {
8295 #if TARGET_ABI_OPEN_VMS
8296 alpha_write_linkage (file, fnname, decl);
8297 #endif
8298
8299 /* End the function. */
8300 if (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive)
8301 {
8302 fputs ("\t.end ", file);
8303 assemble_name (file, fnname);
8304 putc ('\n', file);
8305 }
8306 inside_function = FALSE;
8307
8308 /* Output jump tables and the static subroutine information block. */
8309 if (TARGET_ABI_UNICOSMK)
8310 {
8311 unicosmk_output_ssib (file, fnname);
8312 unicosmk_output_deferred_case_vectors (file);
8313 }
8314 }
8315
8316 #if TARGET_ABI_OSF
8317 /* Emit a tail call to FUNCTION after adjusting THIS by DELTA.
8318
8319 In order to avoid the hordes of differences between generated code
8320 with and without TARGET_EXPLICIT_RELOCS, and to avoid duplicating
8321 lots of code loading up large constants, generate rtl and emit it
8322 instead of going straight to text.
8323
8324 Not sure why this idea hasn't been explored before... */
8325
8326 static void
8327 alpha_output_mi_thunk_osf (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8328 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8329 tree function)
8330 {
8331 HOST_WIDE_INT hi, lo;
8332 rtx this, insn, funexp;
8333
8334 reset_block_changes ();
8335
8336 /* We always require a valid GP. */
8337 emit_insn (gen_prologue_ldgp ());
8338 emit_note (NOTE_INSN_PROLOGUE_END);
8339
8340 /* Find the "this" pointer. If the function returns a structure,
8341 the structure return pointer is in $16. */
8342 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8343 this = gen_rtx_REG (Pmode, 17);
8344 else
8345 this = gen_rtx_REG (Pmode, 16);
8346
8347 /* Add DELTA. When possible we use ldah+lda. Otherwise load the
8348 entire constant for the add. */
8349 lo = ((delta & 0xffff) ^ 0x8000) - 0x8000;
8350 hi = (((delta - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8351 if (hi + lo == delta)
8352 {
8353 if (hi)
8354 emit_insn (gen_adddi3 (this, this, GEN_INT (hi)));
8355 if (lo)
8356 emit_insn (gen_adddi3 (this, this, GEN_INT (lo)));
8357 }
8358 else
8359 {
8360 rtx tmp = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 0),
8361 delta, -(delta < 0));
8362 emit_insn (gen_adddi3 (this, this, tmp));
8363 }
8364
8365 /* Add a delta stored in the vtable at VCALL_OFFSET. */
8366 if (vcall_offset)
8367 {
8368 rtx tmp, tmp2;
8369
8370 tmp = gen_rtx_REG (Pmode, 0);
8371 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this));
8372
8373 lo = ((vcall_offset & 0xffff) ^ 0x8000) - 0x8000;
8374 hi = (((vcall_offset - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8375 if (hi + lo == vcall_offset)
8376 {
8377 if (hi)
8378 emit_insn (gen_adddi3 (tmp, tmp, GEN_INT (hi)));
8379 }
8380 else
8381 {
8382 tmp2 = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 1),
8383 vcall_offset, -(vcall_offset < 0));
8384 emit_insn (gen_adddi3 (tmp, tmp, tmp2));
8385 lo = 0;
8386 }
8387 if (lo)
8388 tmp2 = gen_rtx_PLUS (Pmode, tmp, GEN_INT (lo));
8389 else
8390 tmp2 = tmp;
8391 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp2));
8392
8393 emit_insn (gen_adddi3 (this, this, tmp));
8394 }
8395
8396 /* Generate a tail call to the target function. */
8397 if (! TREE_USED (function))
8398 {
8399 assemble_external (function);
8400 TREE_USED (function) = 1;
8401 }
8402 funexp = XEXP (DECL_RTL (function), 0);
8403 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8404 insn = emit_call_insn (gen_sibcall (funexp, const0_rtx));
8405 SIBLING_CALL_P (insn) = 1;
8406
8407 /* Run just enough of rest_of_compilation to get the insns emitted.
8408 There's not really enough bulk here to make other passes such as
8409 instruction scheduling worth while. Note that use_thunk calls
8410 assemble_start_function and assemble_end_function. */
8411 insn = get_insns ();
8412 insn_locators_initialize ();
8413 shorten_branches (insn);
8414 final_start_function (insn, file, 1);
8415 final (insn, file, 1);
8416 final_end_function ();
8417 }
8418 #endif /* TARGET_ABI_OSF */
8419 \f
8420 /* Debugging support. */
8421
8422 #include "gstab.h"
8423
8424 /* Count the number of sdb related labels are generated (to find block
8425 start and end boundaries). */
8426
8427 int sdb_label_count = 0;
8428
8429 /* Name of the file containing the current function. */
8430
8431 static const char *current_function_file = "";
8432
8433 /* Offsets to alpha virtual arg/local debugging pointers. */
8434
8435 long alpha_arg_offset;
8436 long alpha_auto_offset;
8437 \f
8438 /* Emit a new filename to a stream. */
8439
8440 void
8441 alpha_output_filename (FILE *stream, const char *name)
8442 {
8443 static int first_time = TRUE;
8444
8445 if (first_time)
8446 {
8447 first_time = FALSE;
8448 ++num_source_filenames;
8449 current_function_file = name;
8450 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8451 output_quoted_string (stream, name);
8452 fprintf (stream, "\n");
8453 if (!TARGET_GAS && write_symbols == DBX_DEBUG)
8454 fprintf (stream, "\t#@stabs\n");
8455 }
8456
8457 else if (write_symbols == DBX_DEBUG)
8458 /* dbxout.c will emit an appropriate .stabs directive. */
8459 return;
8460
8461 else if (name != current_function_file
8462 && strcmp (name, current_function_file) != 0)
8463 {
8464 if (inside_function && ! TARGET_GAS)
8465 fprintf (stream, "\t#.file\t%d ", num_source_filenames);
8466 else
8467 {
8468 ++num_source_filenames;
8469 current_function_file = name;
8470 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8471 }
8472
8473 output_quoted_string (stream, name);
8474 fprintf (stream, "\n");
8475 }
8476 }
8477 \f
8478 /* Structure to show the current status of registers and memory. */
8479
8480 struct shadow_summary
8481 {
8482 struct {
8483 unsigned int i : 31; /* Mask of int regs */
8484 unsigned int fp : 31; /* Mask of fp regs */
8485 unsigned int mem : 1; /* mem == imem | fpmem */
8486 } used, defd;
8487 };
8488
8489 /* Summary the effects of expression X on the machine. Update SUM, a pointer
8490 to the summary structure. SET is nonzero if the insn is setting the
8491 object, otherwise zero. */
8492
8493 static void
8494 summarize_insn (rtx x, struct shadow_summary *sum, int set)
8495 {
8496 const char *format_ptr;
8497 int i, j;
8498
8499 if (x == 0)
8500 return;
8501
8502 switch (GET_CODE (x))
8503 {
8504 /* ??? Note that this case would be incorrect if the Alpha had a
8505 ZERO_EXTRACT in SET_DEST. */
8506 case SET:
8507 summarize_insn (SET_SRC (x), sum, 0);
8508 summarize_insn (SET_DEST (x), sum, 1);
8509 break;
8510
8511 case CLOBBER:
8512 summarize_insn (XEXP (x, 0), sum, 1);
8513 break;
8514
8515 case USE:
8516 summarize_insn (XEXP (x, 0), sum, 0);
8517 break;
8518
8519 case ASM_OPERANDS:
8520 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
8521 summarize_insn (ASM_OPERANDS_INPUT (x, i), sum, 0);
8522 break;
8523
8524 case PARALLEL:
8525 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
8526 summarize_insn (XVECEXP (x, 0, i), sum, 0);
8527 break;
8528
8529 case SUBREG:
8530 summarize_insn (SUBREG_REG (x), sum, 0);
8531 break;
8532
8533 case REG:
8534 {
8535 int regno = REGNO (x);
8536 unsigned long mask = ((unsigned long) 1) << (regno % 32);
8537
8538 if (regno == 31 || regno == 63)
8539 break;
8540
8541 if (set)
8542 {
8543 if (regno < 32)
8544 sum->defd.i |= mask;
8545 else
8546 sum->defd.fp |= mask;
8547 }
8548 else
8549 {
8550 if (regno < 32)
8551 sum->used.i |= mask;
8552 else
8553 sum->used.fp |= mask;
8554 }
8555 }
8556 break;
8557
8558 case MEM:
8559 if (set)
8560 sum->defd.mem = 1;
8561 else
8562 sum->used.mem = 1;
8563
8564 /* Find the regs used in memory address computation: */
8565 summarize_insn (XEXP (x, 0), sum, 0);
8566 break;
8567
8568 case CONST_INT: case CONST_DOUBLE:
8569 case SYMBOL_REF: case LABEL_REF: case CONST:
8570 case SCRATCH: case ASM_INPUT:
8571 break;
8572
8573 /* Handle common unary and binary ops for efficiency. */
8574 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
8575 case MOD: case UDIV: case UMOD: case AND: case IOR:
8576 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
8577 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
8578 case NE: case EQ: case GE: case GT: case LE:
8579 case LT: case GEU: case GTU: case LEU: case LTU:
8580 summarize_insn (XEXP (x, 0), sum, 0);
8581 summarize_insn (XEXP (x, 1), sum, 0);
8582 break;
8583
8584 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
8585 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
8586 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
8587 case SQRT: case FFS:
8588 summarize_insn (XEXP (x, 0), sum, 0);
8589 break;
8590
8591 default:
8592 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
8593 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8594 switch (format_ptr[i])
8595 {
8596 case 'e':
8597 summarize_insn (XEXP (x, i), sum, 0);
8598 break;
8599
8600 case 'E':
8601 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8602 summarize_insn (XVECEXP (x, i, j), sum, 0);
8603 break;
8604
8605 case 'i':
8606 break;
8607
8608 default:
8609 gcc_unreachable ();
8610 }
8611 }
8612 }
8613
8614 /* Ensure a sufficient number of `trapb' insns are in the code when
8615 the user requests code with a trap precision of functions or
8616 instructions.
8617
8618 In naive mode, when the user requests a trap-precision of
8619 "instruction", a trapb is needed after every instruction that may
8620 generate a trap. This ensures that the code is resumption safe but
8621 it is also slow.
8622
8623 When optimizations are turned on, we delay issuing a trapb as long
8624 as possible. In this context, a trap shadow is the sequence of
8625 instructions that starts with a (potentially) trap generating
8626 instruction and extends to the next trapb or call_pal instruction
8627 (but GCC never generates call_pal by itself). We can delay (and
8628 therefore sometimes omit) a trapb subject to the following
8629 conditions:
8630
8631 (a) On entry to the trap shadow, if any Alpha register or memory
8632 location contains a value that is used as an operand value by some
8633 instruction in the trap shadow (live on entry), then no instruction
8634 in the trap shadow may modify the register or memory location.
8635
8636 (b) Within the trap shadow, the computation of the base register
8637 for a memory load or store instruction may not involve using the
8638 result of an instruction that might generate an UNPREDICTABLE
8639 result.
8640
8641 (c) Within the trap shadow, no register may be used more than once
8642 as a destination register. (This is to make life easier for the
8643 trap-handler.)
8644
8645 (d) The trap shadow may not include any branch instructions. */
8646
8647 static void
8648 alpha_handle_trap_shadows (void)
8649 {
8650 struct shadow_summary shadow;
8651 int trap_pending, exception_nesting;
8652 rtx i, n;
8653
8654 trap_pending = 0;
8655 exception_nesting = 0;
8656 shadow.used.i = 0;
8657 shadow.used.fp = 0;
8658 shadow.used.mem = 0;
8659 shadow.defd = shadow.used;
8660
8661 for (i = get_insns (); i ; i = NEXT_INSN (i))
8662 {
8663 if (GET_CODE (i) == NOTE)
8664 {
8665 switch (NOTE_LINE_NUMBER (i))
8666 {
8667 case NOTE_INSN_EH_REGION_BEG:
8668 exception_nesting++;
8669 if (trap_pending)
8670 goto close_shadow;
8671 break;
8672
8673 case NOTE_INSN_EH_REGION_END:
8674 exception_nesting--;
8675 if (trap_pending)
8676 goto close_shadow;
8677 break;
8678
8679 case NOTE_INSN_EPILOGUE_BEG:
8680 if (trap_pending && alpha_tp >= ALPHA_TP_FUNC)
8681 goto close_shadow;
8682 break;
8683 }
8684 }
8685 else if (trap_pending)
8686 {
8687 if (alpha_tp == ALPHA_TP_FUNC)
8688 {
8689 if (GET_CODE (i) == JUMP_INSN
8690 && GET_CODE (PATTERN (i)) == RETURN)
8691 goto close_shadow;
8692 }
8693 else if (alpha_tp == ALPHA_TP_INSN)
8694 {
8695 if (optimize > 0)
8696 {
8697 struct shadow_summary sum;
8698
8699 sum.used.i = 0;
8700 sum.used.fp = 0;
8701 sum.used.mem = 0;
8702 sum.defd = sum.used;
8703
8704 switch (GET_CODE (i))
8705 {
8706 case INSN:
8707 /* Annoyingly, get_attr_trap will die on these. */
8708 if (GET_CODE (PATTERN (i)) == USE
8709 || GET_CODE (PATTERN (i)) == CLOBBER)
8710 break;
8711
8712 summarize_insn (PATTERN (i), &sum, 0);
8713
8714 if ((sum.defd.i & shadow.defd.i)
8715 || (sum.defd.fp & shadow.defd.fp))
8716 {
8717 /* (c) would be violated */
8718 goto close_shadow;
8719 }
8720
8721 /* Combine shadow with summary of current insn: */
8722 shadow.used.i |= sum.used.i;
8723 shadow.used.fp |= sum.used.fp;
8724 shadow.used.mem |= sum.used.mem;
8725 shadow.defd.i |= sum.defd.i;
8726 shadow.defd.fp |= sum.defd.fp;
8727 shadow.defd.mem |= sum.defd.mem;
8728
8729 if ((sum.defd.i & shadow.used.i)
8730 || (sum.defd.fp & shadow.used.fp)
8731 || (sum.defd.mem & shadow.used.mem))
8732 {
8733 /* (a) would be violated (also takes care of (b)) */
8734 gcc_assert (get_attr_trap (i) != TRAP_YES
8735 || (!(sum.defd.i & sum.used.i)
8736 && !(sum.defd.fp & sum.used.fp)));
8737
8738 goto close_shadow;
8739 }
8740 break;
8741
8742 case JUMP_INSN:
8743 case CALL_INSN:
8744 case CODE_LABEL:
8745 goto close_shadow;
8746
8747 default:
8748 gcc_unreachable ();
8749 }
8750 }
8751 else
8752 {
8753 close_shadow:
8754 n = emit_insn_before (gen_trapb (), i);
8755 PUT_MODE (n, TImode);
8756 PUT_MODE (i, TImode);
8757 trap_pending = 0;
8758 shadow.used.i = 0;
8759 shadow.used.fp = 0;
8760 shadow.used.mem = 0;
8761 shadow.defd = shadow.used;
8762 }
8763 }
8764 }
8765
8766 if ((exception_nesting > 0 || alpha_tp >= ALPHA_TP_FUNC)
8767 && GET_CODE (i) == INSN
8768 && GET_CODE (PATTERN (i)) != USE
8769 && GET_CODE (PATTERN (i)) != CLOBBER
8770 && get_attr_trap (i) == TRAP_YES)
8771 {
8772 if (optimize && !trap_pending)
8773 summarize_insn (PATTERN (i), &shadow, 0);
8774 trap_pending = 1;
8775 }
8776 }
8777 }
8778 \f
8779 /* Alpha can only issue instruction groups simultaneously if they are
8780 suitably aligned. This is very processor-specific. */
8781 /* There are a number of entries in alphaev4_insn_pipe and alphaev5_insn_pipe
8782 that are marked "fake". These instructions do not exist on that target,
8783 but it is possible to see these insns with deranged combinations of
8784 command-line options, such as "-mtune=ev4 -mmax". Instead of aborting,
8785 choose a result at random. */
8786
8787 enum alphaev4_pipe {
8788 EV4_STOP = 0,
8789 EV4_IB0 = 1,
8790 EV4_IB1 = 2,
8791 EV4_IBX = 4
8792 };
8793
8794 enum alphaev5_pipe {
8795 EV5_STOP = 0,
8796 EV5_NONE = 1,
8797 EV5_E01 = 2,
8798 EV5_E0 = 4,
8799 EV5_E1 = 8,
8800 EV5_FAM = 16,
8801 EV5_FA = 32,
8802 EV5_FM = 64
8803 };
8804
8805 static enum alphaev4_pipe
8806 alphaev4_insn_pipe (rtx insn)
8807 {
8808 if (recog_memoized (insn) < 0)
8809 return EV4_STOP;
8810 if (get_attr_length (insn) != 4)
8811 return EV4_STOP;
8812
8813 switch (get_attr_type (insn))
8814 {
8815 case TYPE_ILD:
8816 case TYPE_LDSYM:
8817 case TYPE_FLD:
8818 case TYPE_LD_L:
8819 return EV4_IBX;
8820
8821 case TYPE_IADD:
8822 case TYPE_ILOG:
8823 case TYPE_ICMOV:
8824 case TYPE_ICMP:
8825 case TYPE_FST:
8826 case TYPE_SHIFT:
8827 case TYPE_IMUL:
8828 case TYPE_FBR:
8829 case TYPE_MVI: /* fake */
8830 return EV4_IB0;
8831
8832 case TYPE_IST:
8833 case TYPE_MISC:
8834 case TYPE_IBR:
8835 case TYPE_JSR:
8836 case TYPE_CALLPAL:
8837 case TYPE_FCPYS:
8838 case TYPE_FCMOV:
8839 case TYPE_FADD:
8840 case TYPE_FDIV:
8841 case TYPE_FMUL:
8842 case TYPE_ST_C:
8843 case TYPE_MB:
8844 case TYPE_FSQRT: /* fake */
8845 case TYPE_FTOI: /* fake */
8846 case TYPE_ITOF: /* fake */
8847 return EV4_IB1;
8848
8849 default:
8850 gcc_unreachable ();
8851 }
8852 }
8853
8854 static enum alphaev5_pipe
8855 alphaev5_insn_pipe (rtx insn)
8856 {
8857 if (recog_memoized (insn) < 0)
8858 return EV5_STOP;
8859 if (get_attr_length (insn) != 4)
8860 return EV5_STOP;
8861
8862 switch (get_attr_type (insn))
8863 {
8864 case TYPE_ILD:
8865 case TYPE_FLD:
8866 case TYPE_LDSYM:
8867 case TYPE_IADD:
8868 case TYPE_ILOG:
8869 case TYPE_ICMOV:
8870 case TYPE_ICMP:
8871 return EV5_E01;
8872
8873 case TYPE_IST:
8874 case TYPE_FST:
8875 case TYPE_SHIFT:
8876 case TYPE_IMUL:
8877 case TYPE_MISC:
8878 case TYPE_MVI:
8879 case TYPE_LD_L:
8880 case TYPE_ST_C:
8881 case TYPE_MB:
8882 case TYPE_FTOI: /* fake */
8883 case TYPE_ITOF: /* fake */
8884 return EV5_E0;
8885
8886 case TYPE_IBR:
8887 case TYPE_JSR:
8888 case TYPE_CALLPAL:
8889 return EV5_E1;
8890
8891 case TYPE_FCPYS:
8892 return EV5_FAM;
8893
8894 case TYPE_FBR:
8895 case TYPE_FCMOV:
8896 case TYPE_FADD:
8897 case TYPE_FDIV:
8898 case TYPE_FSQRT: /* fake */
8899 return EV5_FA;
8900
8901 case TYPE_FMUL:
8902 return EV5_FM;
8903
8904 default:
8905 gcc_unreachable ();
8906 }
8907 }
8908
8909 /* IN_USE is a mask of the slots currently filled within the insn group.
8910 The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
8911 the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
8912
8913 LEN is, of course, the length of the group in bytes. */
8914
8915 static rtx
8916 alphaev4_next_group (rtx insn, int *pin_use, int *plen)
8917 {
8918 int len, in_use;
8919
8920 len = in_use = 0;
8921
8922 if (! INSN_P (insn)
8923 || GET_CODE (PATTERN (insn)) == CLOBBER
8924 || GET_CODE (PATTERN (insn)) == USE)
8925 goto next_and_done;
8926
8927 while (1)
8928 {
8929 enum alphaev4_pipe pipe;
8930
8931 pipe = alphaev4_insn_pipe (insn);
8932 switch (pipe)
8933 {
8934 case EV4_STOP:
8935 /* Force complex instructions to start new groups. */
8936 if (in_use)
8937 goto done;
8938
8939 /* If this is a completely unrecognized insn, it's an asm.
8940 We don't know how long it is, so record length as -1 to
8941 signal a needed realignment. */
8942 if (recog_memoized (insn) < 0)
8943 len = -1;
8944 else
8945 len = get_attr_length (insn);
8946 goto next_and_done;
8947
8948 case EV4_IBX:
8949 if (in_use & EV4_IB0)
8950 {
8951 if (in_use & EV4_IB1)
8952 goto done;
8953 in_use |= EV4_IB1;
8954 }
8955 else
8956 in_use |= EV4_IB0 | EV4_IBX;
8957 break;
8958
8959 case EV4_IB0:
8960 if (in_use & EV4_IB0)
8961 {
8962 if (!(in_use & EV4_IBX) || (in_use & EV4_IB1))
8963 goto done;
8964 in_use |= EV4_IB1;
8965 }
8966 in_use |= EV4_IB0;
8967 break;
8968
8969 case EV4_IB1:
8970 if (in_use & EV4_IB1)
8971 goto done;
8972 in_use |= EV4_IB1;
8973 break;
8974
8975 default:
8976 gcc_unreachable ();
8977 }
8978 len += 4;
8979
8980 /* Haifa doesn't do well scheduling branches. */
8981 if (GET_CODE (insn) == JUMP_INSN)
8982 goto next_and_done;
8983
8984 next:
8985 insn = next_nonnote_insn (insn);
8986
8987 if (!insn || ! INSN_P (insn))
8988 goto done;
8989
8990 /* Let Haifa tell us where it thinks insn group boundaries are. */
8991 if (GET_MODE (insn) == TImode)
8992 goto done;
8993
8994 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
8995 goto next;
8996 }
8997
8998 next_and_done:
8999 insn = next_nonnote_insn (insn);
9000
9001 done:
9002 *plen = len;
9003 *pin_use = in_use;
9004 return insn;
9005 }
9006
9007 /* IN_USE is a mask of the slots currently filled within the insn group.
9008 The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
9009 the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
9010
9011 LEN is, of course, the length of the group in bytes. */
9012
9013 static rtx
9014 alphaev5_next_group (rtx insn, int *pin_use, int *plen)
9015 {
9016 int len, in_use;
9017
9018 len = in_use = 0;
9019
9020 if (! INSN_P (insn)
9021 || GET_CODE (PATTERN (insn)) == CLOBBER
9022 || GET_CODE (PATTERN (insn)) == USE)
9023 goto next_and_done;
9024
9025 while (1)
9026 {
9027 enum alphaev5_pipe pipe;
9028
9029 pipe = alphaev5_insn_pipe (insn);
9030 switch (pipe)
9031 {
9032 case EV5_STOP:
9033 /* Force complex instructions to start new groups. */
9034 if (in_use)
9035 goto done;
9036
9037 /* If this is a completely unrecognized insn, it's an asm.
9038 We don't know how long it is, so record length as -1 to
9039 signal a needed realignment. */
9040 if (recog_memoized (insn) < 0)
9041 len = -1;
9042 else
9043 len = get_attr_length (insn);
9044 goto next_and_done;
9045
9046 /* ??? Most of the places below, we would like to assert never
9047 happen, as it would indicate an error either in Haifa, or
9048 in the scheduling description. Unfortunately, Haifa never
9049 schedules the last instruction of the BB, so we don't have
9050 an accurate TI bit to go off. */
9051 case EV5_E01:
9052 if (in_use & EV5_E0)
9053 {
9054 if (in_use & EV5_E1)
9055 goto done;
9056 in_use |= EV5_E1;
9057 }
9058 else
9059 in_use |= EV5_E0 | EV5_E01;
9060 break;
9061
9062 case EV5_E0:
9063 if (in_use & EV5_E0)
9064 {
9065 if (!(in_use & EV5_E01) || (in_use & EV5_E1))
9066 goto done;
9067 in_use |= EV5_E1;
9068 }
9069 in_use |= EV5_E0;
9070 break;
9071
9072 case EV5_E1:
9073 if (in_use & EV5_E1)
9074 goto done;
9075 in_use |= EV5_E1;
9076 break;
9077
9078 case EV5_FAM:
9079 if (in_use & EV5_FA)
9080 {
9081 if (in_use & EV5_FM)
9082 goto done;
9083 in_use |= EV5_FM;
9084 }
9085 else
9086 in_use |= EV5_FA | EV5_FAM;
9087 break;
9088
9089 case EV5_FA:
9090 if (in_use & EV5_FA)
9091 goto done;
9092 in_use |= EV5_FA;
9093 break;
9094
9095 case EV5_FM:
9096 if (in_use & EV5_FM)
9097 goto done;
9098 in_use |= EV5_FM;
9099 break;
9100
9101 case EV5_NONE:
9102 break;
9103
9104 default:
9105 gcc_unreachable ();
9106 }
9107 len += 4;
9108
9109 /* Haifa doesn't do well scheduling branches. */
9110 /* ??? If this is predicted not-taken, slotting continues, except
9111 that no more IBR, FBR, or JSR insns may be slotted. */
9112 if (GET_CODE (insn) == JUMP_INSN)
9113 goto next_and_done;
9114
9115 next:
9116 insn = next_nonnote_insn (insn);
9117
9118 if (!insn || ! INSN_P (insn))
9119 goto done;
9120
9121 /* Let Haifa tell us where it thinks insn group boundaries are. */
9122 if (GET_MODE (insn) == TImode)
9123 goto done;
9124
9125 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
9126 goto next;
9127 }
9128
9129 next_and_done:
9130 insn = next_nonnote_insn (insn);
9131
9132 done:
9133 *plen = len;
9134 *pin_use = in_use;
9135 return insn;
9136 }
9137
9138 static rtx
9139 alphaev4_next_nop (int *pin_use)
9140 {
9141 int in_use = *pin_use;
9142 rtx nop;
9143
9144 if (!(in_use & EV4_IB0))
9145 {
9146 in_use |= EV4_IB0;
9147 nop = gen_nop ();
9148 }
9149 else if ((in_use & (EV4_IBX|EV4_IB1)) == EV4_IBX)
9150 {
9151 in_use |= EV4_IB1;
9152 nop = gen_nop ();
9153 }
9154 else if (TARGET_FP && !(in_use & EV4_IB1))
9155 {
9156 in_use |= EV4_IB1;
9157 nop = gen_fnop ();
9158 }
9159 else
9160 nop = gen_unop ();
9161
9162 *pin_use = in_use;
9163 return nop;
9164 }
9165
9166 static rtx
9167 alphaev5_next_nop (int *pin_use)
9168 {
9169 int in_use = *pin_use;
9170 rtx nop;
9171
9172 if (!(in_use & EV5_E1))
9173 {
9174 in_use |= EV5_E1;
9175 nop = gen_nop ();
9176 }
9177 else if (TARGET_FP && !(in_use & EV5_FA))
9178 {
9179 in_use |= EV5_FA;
9180 nop = gen_fnop ();
9181 }
9182 else if (TARGET_FP && !(in_use & EV5_FM))
9183 {
9184 in_use |= EV5_FM;
9185 nop = gen_fnop ();
9186 }
9187 else
9188 nop = gen_unop ();
9189
9190 *pin_use = in_use;
9191 return nop;
9192 }
9193
9194 /* The instruction group alignment main loop. */
9195
9196 static void
9197 alpha_align_insns (unsigned int max_align,
9198 rtx (*next_group) (rtx, int *, int *),
9199 rtx (*next_nop) (int *))
9200 {
9201 /* ALIGN is the known alignment for the insn group. */
9202 unsigned int align;
9203 /* OFS is the offset of the current insn in the insn group. */
9204 int ofs;
9205 int prev_in_use, in_use, len, ldgp;
9206 rtx i, next;
9207
9208 /* Let shorten branches care for assigning alignments to code labels. */
9209 shorten_branches (get_insns ());
9210
9211 if (align_functions < 4)
9212 align = 4;
9213 else if ((unsigned int) align_functions < max_align)
9214 align = align_functions;
9215 else
9216 align = max_align;
9217
9218 ofs = prev_in_use = 0;
9219 i = get_insns ();
9220 if (GET_CODE (i) == NOTE)
9221 i = next_nonnote_insn (i);
9222
9223 ldgp = alpha_function_needs_gp ? 8 : 0;
9224
9225 while (i)
9226 {
9227 next = (*next_group) (i, &in_use, &len);
9228
9229 /* When we see a label, resync alignment etc. */
9230 if (GET_CODE (i) == CODE_LABEL)
9231 {
9232 unsigned int new_align = 1 << label_to_alignment (i);
9233
9234 if (new_align >= align)
9235 {
9236 align = new_align < max_align ? new_align : max_align;
9237 ofs = 0;
9238 }
9239
9240 else if (ofs & (new_align-1))
9241 ofs = (ofs | (new_align-1)) + 1;
9242 gcc_assert (!len);
9243 }
9244
9245 /* Handle complex instructions special. */
9246 else if (in_use == 0)
9247 {
9248 /* Asms will have length < 0. This is a signal that we have
9249 lost alignment knowledge. Assume, however, that the asm
9250 will not mis-align instructions. */
9251 if (len < 0)
9252 {
9253 ofs = 0;
9254 align = 4;
9255 len = 0;
9256 }
9257 }
9258
9259 /* If the known alignment is smaller than the recognized insn group,
9260 realign the output. */
9261 else if ((int) align < len)
9262 {
9263 unsigned int new_log_align = len > 8 ? 4 : 3;
9264 rtx prev, where;
9265
9266 where = prev = prev_nonnote_insn (i);
9267 if (!where || GET_CODE (where) != CODE_LABEL)
9268 where = i;
9269
9270 /* Can't realign between a call and its gp reload. */
9271 if (! (TARGET_EXPLICIT_RELOCS
9272 && prev && GET_CODE (prev) == CALL_INSN))
9273 {
9274 emit_insn_before (gen_realign (GEN_INT (new_log_align)), where);
9275 align = 1 << new_log_align;
9276 ofs = 0;
9277 }
9278 }
9279
9280 /* We may not insert padding inside the initial ldgp sequence. */
9281 else if (ldgp > 0)
9282 ldgp -= len;
9283
9284 /* If the group won't fit in the same INT16 as the previous,
9285 we need to add padding to keep the group together. Rather
9286 than simply leaving the insn filling to the assembler, we
9287 can make use of the knowledge of what sorts of instructions
9288 were issued in the previous group to make sure that all of
9289 the added nops are really free. */
9290 else if (ofs + len > (int) align)
9291 {
9292 int nop_count = (align - ofs) / 4;
9293 rtx where;
9294
9295 /* Insert nops before labels, branches, and calls to truly merge
9296 the execution of the nops with the previous instruction group. */
9297 where = prev_nonnote_insn (i);
9298 if (where)
9299 {
9300 if (GET_CODE (where) == CODE_LABEL)
9301 {
9302 rtx where2 = prev_nonnote_insn (where);
9303 if (where2 && GET_CODE (where2) == JUMP_INSN)
9304 where = where2;
9305 }
9306 else if (GET_CODE (where) == INSN)
9307 where = i;
9308 }
9309 else
9310 where = i;
9311
9312 do
9313 emit_insn_before ((*next_nop)(&prev_in_use), where);
9314 while (--nop_count);
9315 ofs = 0;
9316 }
9317
9318 ofs = (ofs + len) & (align - 1);
9319 prev_in_use = in_use;
9320 i = next;
9321 }
9322 }
9323 \f
9324 /* Machine dependent reorg pass. */
9325
9326 static void
9327 alpha_reorg (void)
9328 {
9329 if (alpha_tp != ALPHA_TP_PROG || flag_exceptions)
9330 alpha_handle_trap_shadows ();
9331
9332 /* Due to the number of extra trapb insns, don't bother fixing up
9333 alignment when trap precision is instruction. Moreover, we can
9334 only do our job when sched2 is run. */
9335 if (optimize && !optimize_size
9336 && alpha_tp != ALPHA_TP_INSN
9337 && flag_schedule_insns_after_reload)
9338 {
9339 if (alpha_tune == PROCESSOR_EV4)
9340 alpha_align_insns (8, alphaev4_next_group, alphaev4_next_nop);
9341 else if (alpha_tune == PROCESSOR_EV5)
9342 alpha_align_insns (16, alphaev5_next_group, alphaev5_next_nop);
9343 }
9344 }
9345 \f
9346 #if !TARGET_ABI_UNICOSMK
9347
9348 #ifdef HAVE_STAMP_H
9349 #include <stamp.h>
9350 #endif
9351
9352 static void
9353 alpha_file_start (void)
9354 {
9355 #ifdef OBJECT_FORMAT_ELF
9356 /* If emitting dwarf2 debug information, we cannot generate a .file
9357 directive to start the file, as it will conflict with dwarf2out
9358 file numbers. So it's only useful when emitting mdebug output. */
9359 targetm.file_start_file_directive = (write_symbols == DBX_DEBUG);
9360 #endif
9361
9362 default_file_start ();
9363 #ifdef MS_STAMP
9364 fprintf (asm_out_file, "\t.verstamp %d %d\n", MS_STAMP, LS_STAMP);
9365 #endif
9366
9367 fputs ("\t.set noreorder\n", asm_out_file);
9368 fputs ("\t.set volatile\n", asm_out_file);
9369 if (!TARGET_ABI_OPEN_VMS)
9370 fputs ("\t.set noat\n", asm_out_file);
9371 if (TARGET_EXPLICIT_RELOCS)
9372 fputs ("\t.set nomacro\n", asm_out_file);
9373 if (TARGET_SUPPORT_ARCH | TARGET_BWX | TARGET_MAX | TARGET_FIX | TARGET_CIX)
9374 {
9375 const char *arch;
9376
9377 if (alpha_cpu == PROCESSOR_EV6 || TARGET_FIX || TARGET_CIX)
9378 arch = "ev6";
9379 else if (TARGET_MAX)
9380 arch = "pca56";
9381 else if (TARGET_BWX)
9382 arch = "ev56";
9383 else if (alpha_cpu == PROCESSOR_EV5)
9384 arch = "ev5";
9385 else
9386 arch = "ev4";
9387
9388 fprintf (asm_out_file, "\t.arch %s\n", arch);
9389 }
9390 }
9391 #endif
9392
9393 #ifdef OBJECT_FORMAT_ELF
9394
9395 /* Return a section for X. The only special thing we do here is to
9396 honor small data. */
9397
9398 static section *
9399 alpha_elf_select_rtx_section (enum machine_mode mode, rtx x,
9400 unsigned HOST_WIDE_INT align)
9401 {
9402 if (TARGET_SMALL_DATA && GET_MODE_SIZE (mode) <= g_switch_value)
9403 /* ??? Consider using mergeable sdata sections. */
9404 return sdata_section;
9405 else
9406 return default_elf_select_rtx_section (mode, x, align);
9407 }
9408
9409 #endif /* OBJECT_FORMAT_ELF */
9410 \f
9411 /* Structure to collect function names for final output in link section. */
9412 /* Note that items marked with GTY can't be ifdef'ed out. */
9413
9414 enum links_kind {KIND_UNUSED, KIND_LOCAL, KIND_EXTERN};
9415 enum reloc_kind {KIND_LINKAGE, KIND_CODEADDR};
9416
9417 struct alpha_links GTY(())
9418 {
9419 int num;
9420 rtx linkage;
9421 enum links_kind lkind;
9422 enum reloc_kind rkind;
9423 };
9424
9425 struct alpha_funcs GTY(())
9426 {
9427 int num;
9428 splay_tree GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9429 links;
9430 };
9431
9432 static GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9433 splay_tree alpha_links_tree;
9434 static GTY ((param1_is (tree), param2_is (struct alpha_funcs *)))
9435 splay_tree alpha_funcs_tree;
9436
9437 static GTY(()) int alpha_funcs_num;
9438
9439 #if TARGET_ABI_OPEN_VMS
9440
9441 /* Return the VMS argument type corresponding to MODE. */
9442
9443 enum avms_arg_type
9444 alpha_arg_type (enum machine_mode mode)
9445 {
9446 switch (mode)
9447 {
9448 case SFmode:
9449 return TARGET_FLOAT_VAX ? FF : FS;
9450 case DFmode:
9451 return TARGET_FLOAT_VAX ? FD : FT;
9452 default:
9453 return I64;
9454 }
9455 }
9456
9457 /* Return an rtx for an integer representing the VMS Argument Information
9458 register value. */
9459
9460 rtx
9461 alpha_arg_info_reg_val (CUMULATIVE_ARGS cum)
9462 {
9463 unsigned HOST_WIDE_INT regval = cum.num_args;
9464 int i;
9465
9466 for (i = 0; i < 6; i++)
9467 regval |= ((int) cum.atypes[i]) << (i * 3 + 8);
9468
9469 return GEN_INT (regval);
9470 }
9471 \f
9472 /* Make (or fake) .linkage entry for function call.
9473
9474 IS_LOCAL is 0 if name is used in call, 1 if name is used in definition.
9475
9476 Return an SYMBOL_REF rtx for the linkage. */
9477
9478 rtx
9479 alpha_need_linkage (const char *name, int is_local)
9480 {
9481 splay_tree_node node;
9482 struct alpha_links *al;
9483
9484 if (name[0] == '*')
9485 name++;
9486
9487 if (is_local)
9488 {
9489 struct alpha_funcs *cfaf;
9490
9491 if (!alpha_funcs_tree)
9492 alpha_funcs_tree = splay_tree_new_ggc ((splay_tree_compare_fn)
9493 splay_tree_compare_pointers);
9494
9495 cfaf = (struct alpha_funcs *) ggc_alloc (sizeof (struct alpha_funcs));
9496
9497 cfaf->links = 0;
9498 cfaf->num = ++alpha_funcs_num;
9499
9500 splay_tree_insert (alpha_funcs_tree,
9501 (splay_tree_key) current_function_decl,
9502 (splay_tree_value) cfaf);
9503 }
9504
9505 if (alpha_links_tree)
9506 {
9507 /* Is this name already defined? */
9508
9509 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
9510 if (node)
9511 {
9512 al = (struct alpha_links *) node->value;
9513 if (is_local)
9514 {
9515 /* Defined here but external assumed. */
9516 if (al->lkind == KIND_EXTERN)
9517 al->lkind = KIND_LOCAL;
9518 }
9519 else
9520 {
9521 /* Used here but unused assumed. */
9522 if (al->lkind == KIND_UNUSED)
9523 al->lkind = KIND_LOCAL;
9524 }
9525 return al->linkage;
9526 }
9527 }
9528 else
9529 alpha_links_tree = splay_tree_new_ggc ((splay_tree_compare_fn) strcmp);
9530
9531 al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
9532 name = ggc_strdup (name);
9533
9534 /* Assume external if no definition. */
9535 al->lkind = (is_local ? KIND_UNUSED : KIND_EXTERN);
9536
9537 /* Ensure we have an IDENTIFIER so assemble_name can mark it used. */
9538 get_identifier (name);
9539
9540 /* Construct a SYMBOL_REF for us to call. */
9541 {
9542 size_t name_len = strlen (name);
9543 char *linksym = alloca (name_len + 6);
9544 linksym[0] = '$';
9545 memcpy (linksym + 1, name, name_len);
9546 memcpy (linksym + 1 + name_len, "..lk", 5);
9547 al->linkage = gen_rtx_SYMBOL_REF (Pmode,
9548 ggc_alloc_string (linksym, name_len + 5));
9549 }
9550
9551 splay_tree_insert (alpha_links_tree, (splay_tree_key) name,
9552 (splay_tree_value) al);
9553
9554 return al->linkage;
9555 }
9556
9557 rtx
9558 alpha_use_linkage (rtx linkage, tree cfundecl, int lflag, int rflag)
9559 {
9560 splay_tree_node cfunnode;
9561 struct alpha_funcs *cfaf;
9562 struct alpha_links *al;
9563 const char *name = XSTR (linkage, 0);
9564
9565 cfaf = (struct alpha_funcs *) 0;
9566 al = (struct alpha_links *) 0;
9567
9568 cfunnode = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) cfundecl);
9569 cfaf = (struct alpha_funcs *) cfunnode->value;
9570
9571 if (cfaf->links)
9572 {
9573 splay_tree_node lnode;
9574
9575 /* Is this name already defined? */
9576
9577 lnode = splay_tree_lookup (cfaf->links, (splay_tree_key) name);
9578 if (lnode)
9579 al = (struct alpha_links *) lnode->value;
9580 }
9581 else
9582 cfaf->links = splay_tree_new_ggc ((splay_tree_compare_fn) strcmp);
9583
9584 if (!al)
9585 {
9586 size_t name_len;
9587 size_t buflen;
9588 char buf [512];
9589 char *linksym;
9590 splay_tree_node node = 0;
9591 struct alpha_links *anl;
9592
9593 if (name[0] == '*')
9594 name++;
9595
9596 name_len = strlen (name);
9597
9598 al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
9599 al->num = cfaf->num;
9600
9601 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
9602 if (node)
9603 {
9604 anl = (struct alpha_links *) node->value;
9605 al->lkind = anl->lkind;
9606 }
9607
9608 sprintf (buf, "$%d..%s..lk", cfaf->num, name);
9609 buflen = strlen (buf);
9610 linksym = alloca (buflen + 1);
9611 memcpy (linksym, buf, buflen + 1);
9612
9613 al->linkage = gen_rtx_SYMBOL_REF
9614 (Pmode, ggc_alloc_string (linksym, buflen + 1));
9615
9616 splay_tree_insert (cfaf->links, (splay_tree_key) name,
9617 (splay_tree_value) al);
9618 }
9619
9620 if (rflag)
9621 al->rkind = KIND_CODEADDR;
9622 else
9623 al->rkind = KIND_LINKAGE;
9624
9625 if (lflag)
9626 return gen_rtx_MEM (Pmode, plus_constant (al->linkage, 8));
9627 else
9628 return al->linkage;
9629 }
9630
9631 static int
9632 alpha_write_one_linkage (splay_tree_node node, void *data)
9633 {
9634 const char *const name = (const char *) node->key;
9635 struct alpha_links *link = (struct alpha_links *) node->value;
9636 FILE *stream = (FILE *) data;
9637
9638 fprintf (stream, "$%d..%s..lk:\n", link->num, name);
9639 if (link->rkind == KIND_CODEADDR)
9640 {
9641 if (link->lkind == KIND_LOCAL)
9642 {
9643 /* Local and used */
9644 fprintf (stream, "\t.quad %s..en\n", name);
9645 }
9646 else
9647 {
9648 /* External and used, request code address. */
9649 fprintf (stream, "\t.code_address %s\n", name);
9650 }
9651 }
9652 else
9653 {
9654 if (link->lkind == KIND_LOCAL)
9655 {
9656 /* Local and used, build linkage pair. */
9657 fprintf (stream, "\t.quad %s..en\n", name);
9658 fprintf (stream, "\t.quad %s\n", name);
9659 }
9660 else
9661 {
9662 /* External and used, request linkage pair. */
9663 fprintf (stream, "\t.linkage %s\n", name);
9664 }
9665 }
9666
9667 return 0;
9668 }
9669
9670 static void
9671 alpha_write_linkage (FILE *stream, const char *funname, tree fundecl)
9672 {
9673 splay_tree_node node;
9674 struct alpha_funcs *func;
9675
9676 fprintf (stream, "\t.link\n");
9677 fprintf (stream, "\t.align 3\n");
9678 in_section = NULL;
9679
9680 node = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) fundecl);
9681 func = (struct alpha_funcs *) node->value;
9682
9683 fputs ("\t.name ", stream);
9684 assemble_name (stream, funname);
9685 fputs ("..na\n", stream);
9686 ASM_OUTPUT_LABEL (stream, funname);
9687 fprintf (stream, "\t.pdesc ");
9688 assemble_name (stream, funname);
9689 fprintf (stream, "..en,%s\n",
9690 alpha_procedure_type == PT_STACK ? "stack"
9691 : alpha_procedure_type == PT_REGISTER ? "reg" : "null");
9692
9693 if (func->links)
9694 {
9695 splay_tree_foreach (func->links, alpha_write_one_linkage, stream);
9696 /* splay_tree_delete (func->links); */
9697 }
9698 }
9699
9700 /* Given a decl, a section name, and whether the decl initializer
9701 has relocs, choose attributes for the section. */
9702
9703 #define SECTION_VMS_OVERLAY SECTION_FORGET
9704 #define SECTION_VMS_GLOBAL SECTION_MACH_DEP
9705 #define SECTION_VMS_INITIALIZE (SECTION_VMS_GLOBAL << 1)
9706
9707 static unsigned int
9708 vms_section_type_flags (tree decl, const char *name, int reloc)
9709 {
9710 unsigned int flags = default_section_type_flags (decl, name, reloc);
9711
9712 if (decl && DECL_ATTRIBUTES (decl)
9713 && lookup_attribute ("overlaid", DECL_ATTRIBUTES (decl)))
9714 flags |= SECTION_VMS_OVERLAY;
9715 if (decl && DECL_ATTRIBUTES (decl)
9716 && lookup_attribute ("global", DECL_ATTRIBUTES (decl)))
9717 flags |= SECTION_VMS_GLOBAL;
9718 if (decl && DECL_ATTRIBUTES (decl)
9719 && lookup_attribute ("initialize", DECL_ATTRIBUTES (decl)))
9720 flags |= SECTION_VMS_INITIALIZE;
9721
9722 return flags;
9723 }
9724
9725 /* Switch to an arbitrary section NAME with attributes as specified
9726 by FLAGS. ALIGN specifies any known alignment requirements for
9727 the section; 0 if the default should be used. */
9728
9729 static void
9730 vms_asm_named_section (const char *name, unsigned int flags,
9731 tree decl ATTRIBUTE_UNUSED)
9732 {
9733 fputc ('\n', asm_out_file);
9734 fprintf (asm_out_file, ".section\t%s", name);
9735
9736 if (flags & SECTION_VMS_OVERLAY)
9737 fprintf (asm_out_file, ",OVR");
9738 if (flags & SECTION_VMS_GLOBAL)
9739 fprintf (asm_out_file, ",GBL");
9740 if (flags & SECTION_VMS_INITIALIZE)
9741 fprintf (asm_out_file, ",NOMOD");
9742 if (flags & SECTION_DEBUG)
9743 fprintf (asm_out_file, ",NOWRT");
9744
9745 fputc ('\n', asm_out_file);
9746 }
9747
9748 /* Record an element in the table of global constructors. SYMBOL is
9749 a SYMBOL_REF of the function to be called; PRIORITY is a number
9750 between 0 and MAX_INIT_PRIORITY.
9751
9752 Differs from default_ctors_section_asm_out_constructor in that the
9753 width of the .ctors entry is always 64 bits, rather than the 32 bits
9754 used by a normal pointer. */
9755
9756 static void
9757 vms_asm_out_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9758 {
9759 switch_to_section (ctors_section);
9760 assemble_align (BITS_PER_WORD);
9761 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9762 }
9763
9764 static void
9765 vms_asm_out_destructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9766 {
9767 switch_to_section (dtors_section);
9768 assemble_align (BITS_PER_WORD);
9769 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9770 }
9771 #else
9772
9773 rtx
9774 alpha_need_linkage (const char *name ATTRIBUTE_UNUSED,
9775 int is_local ATTRIBUTE_UNUSED)
9776 {
9777 return NULL_RTX;
9778 }
9779
9780 rtx
9781 alpha_use_linkage (rtx linkage ATTRIBUTE_UNUSED,
9782 tree cfundecl ATTRIBUTE_UNUSED,
9783 int lflag ATTRIBUTE_UNUSED,
9784 int rflag ATTRIBUTE_UNUSED)
9785 {
9786 return NULL_RTX;
9787 }
9788
9789 #endif /* TARGET_ABI_OPEN_VMS */
9790 \f
9791 #if TARGET_ABI_UNICOSMK
9792
9793 /* This evaluates to true if we do not know how to pass TYPE solely in
9794 registers. This is the case for all arguments that do not fit in two
9795 registers. */
9796
9797 static bool
9798 unicosmk_must_pass_in_stack (enum machine_mode mode, tree type)
9799 {
9800 if (type == NULL)
9801 return false;
9802
9803 if (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
9804 return true;
9805 if (TREE_ADDRESSABLE (type))
9806 return true;
9807
9808 return ALPHA_ARG_SIZE (mode, type, 0) > 2;
9809 }
9810
9811 /* Define the offset between two registers, one to be eliminated, and the
9812 other its replacement, at the start of a routine. */
9813
9814 int
9815 unicosmk_initial_elimination_offset (int from, int to)
9816 {
9817 int fixed_size;
9818
9819 fixed_size = alpha_sa_size();
9820 if (fixed_size != 0)
9821 fixed_size += 48;
9822
9823 if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
9824 return -fixed_size;
9825 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
9826 return 0;
9827 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
9828 return (ALPHA_ROUND (current_function_outgoing_args_size)
9829 + ALPHA_ROUND (get_frame_size()));
9830 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
9831 return (ALPHA_ROUND (fixed_size)
9832 + ALPHA_ROUND (get_frame_size()
9833 + current_function_outgoing_args_size));
9834 else
9835 gcc_unreachable ();
9836 }
9837
9838 /* Output the module name for .ident and .end directives. We have to strip
9839 directories and add make sure that the module name starts with a letter
9840 or '$'. */
9841
9842 static void
9843 unicosmk_output_module_name (FILE *file)
9844 {
9845 const char *name = lbasename (main_input_filename);
9846 unsigned len = strlen (name);
9847 char *clean_name = alloca (len + 2);
9848 char *ptr = clean_name;
9849
9850 /* CAM only accepts module names that start with a letter or '$'. We
9851 prefix the module name with a '$' if necessary. */
9852
9853 if (!ISALPHA (*name))
9854 *ptr++ = '$';
9855 memcpy (ptr, name, len + 1);
9856 clean_symbol_name (clean_name);
9857 fputs (clean_name, file);
9858 }
9859
9860 /* Output the definition of a common variable. */
9861
9862 void
9863 unicosmk_output_common (FILE *file, const char *name, int size, int align)
9864 {
9865 tree name_tree;
9866 printf ("T3E__: common %s\n", name);
9867
9868 in_section = NULL;
9869 fputs("\t.endp\n\n\t.psect ", file);
9870 assemble_name(file, name);
9871 fprintf(file, ",%d,common\n", floor_log2 (align / BITS_PER_UNIT));
9872 fprintf(file, "\t.byte\t0:%d\n", size);
9873
9874 /* Mark the symbol as defined in this module. */
9875 name_tree = get_identifier (name);
9876 TREE_ASM_WRITTEN (name_tree) = 1;
9877 }
9878
9879 #define SECTION_PUBLIC SECTION_MACH_DEP
9880 #define SECTION_MAIN (SECTION_PUBLIC << 1)
9881 static int current_section_align;
9882
9883 /* A get_unnamed_section callback for switching to the text section. */
9884
9885 static void
9886 unicosmk_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9887 {
9888 static int count = 0;
9889 fprintf (asm_out_file, "\t.endp\n\n\t.psect\tgcc@text___%d,code\n", count++);
9890 }
9891
9892 /* A get_unnamed_section callback for switching to the data section. */
9893
9894 static void
9895 unicosmk_output_data_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9896 {
9897 static int count = 1;
9898 fprintf (asm_out_file, "\t.endp\n\n\t.psect\tgcc@data___%d,data\n", count++);
9899 }
9900
9901 /* Implement TARGET_ASM_INIT_SECTIONS.
9902
9903 The Cray assembler is really weird with respect to sections. It has only
9904 named sections and you can't reopen a section once it has been closed.
9905 This means that we have to generate unique names whenever we want to
9906 reenter the text or the data section. */
9907
9908 static void
9909 unicosmk_init_sections (void)
9910 {
9911 text_section = get_unnamed_section (SECTION_CODE,
9912 unicosmk_output_text_section_asm_op,
9913 NULL);
9914 data_section = get_unnamed_section (SECTION_WRITE,
9915 unicosmk_output_data_section_asm_op,
9916 NULL);
9917 readonly_data_section = data_section;
9918 }
9919
9920 static unsigned int
9921 unicosmk_section_type_flags (tree decl, const char *name,
9922 int reloc ATTRIBUTE_UNUSED)
9923 {
9924 unsigned int flags = default_section_type_flags (decl, name, reloc);
9925
9926 if (!decl)
9927 return flags;
9928
9929 if (TREE_CODE (decl) == FUNCTION_DECL)
9930 {
9931 current_section_align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
9932 if (align_functions_log > current_section_align)
9933 current_section_align = align_functions_log;
9934
9935 if (! strcmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)), "main"))
9936 flags |= SECTION_MAIN;
9937 }
9938 else
9939 current_section_align = floor_log2 (DECL_ALIGN (decl) / BITS_PER_UNIT);
9940
9941 if (TREE_PUBLIC (decl))
9942 flags |= SECTION_PUBLIC;
9943
9944 return flags;
9945 }
9946
9947 /* Generate a section name for decl and associate it with the
9948 declaration. */
9949
9950 static void
9951 unicosmk_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
9952 {
9953 const char *name;
9954 int len;
9955
9956 gcc_assert (decl);
9957
9958 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
9959 name = default_strip_name_encoding (name);
9960 len = strlen (name);
9961
9962 if (TREE_CODE (decl) == FUNCTION_DECL)
9963 {
9964 char *string;
9965
9966 /* It is essential that we prefix the section name here because
9967 otherwise the section names generated for constructors and
9968 destructors confuse collect2. */
9969
9970 string = alloca (len + 6);
9971 sprintf (string, "code@%s", name);
9972 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
9973 }
9974 else if (TREE_PUBLIC (decl))
9975 DECL_SECTION_NAME (decl) = build_string (len, name);
9976 else
9977 {
9978 char *string;
9979
9980 string = alloca (len + 6);
9981 sprintf (string, "data@%s", name);
9982 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
9983 }
9984 }
9985
9986 /* Switch to an arbitrary section NAME with attributes as specified
9987 by FLAGS. ALIGN specifies any known alignment requirements for
9988 the section; 0 if the default should be used. */
9989
9990 static void
9991 unicosmk_asm_named_section (const char *name, unsigned int flags,
9992 tree decl ATTRIBUTE_UNUSED)
9993 {
9994 const char *kind;
9995
9996 /* Close the previous section. */
9997
9998 fputs ("\t.endp\n\n", asm_out_file);
9999
10000 /* Find out what kind of section we are opening. */
10001
10002 if (flags & SECTION_MAIN)
10003 fputs ("\t.start\tmain\n", asm_out_file);
10004
10005 if (flags & SECTION_CODE)
10006 kind = "code";
10007 else if (flags & SECTION_PUBLIC)
10008 kind = "common";
10009 else
10010 kind = "data";
10011
10012 if (current_section_align != 0)
10013 fprintf (asm_out_file, "\t.psect\t%s,%d,%s\n", name,
10014 current_section_align, kind);
10015 else
10016 fprintf (asm_out_file, "\t.psect\t%s,%s\n", name, kind);
10017 }
10018
10019 static void
10020 unicosmk_insert_attributes (tree decl, tree *attr_ptr ATTRIBUTE_UNUSED)
10021 {
10022 if (DECL_P (decl)
10023 && (TREE_PUBLIC (decl) || TREE_CODE (decl) == FUNCTION_DECL))
10024 unicosmk_unique_section (decl, 0);
10025 }
10026
10027 /* Output an alignment directive. We have to use the macro 'gcc@code@align'
10028 in code sections because .align fill unused space with zeroes. */
10029
10030 void
10031 unicosmk_output_align (FILE *file, int align)
10032 {
10033 if (inside_function)
10034 fprintf (file, "\tgcc@code@align\t%d\n", align);
10035 else
10036 fprintf (file, "\t.align\t%d\n", align);
10037 }
10038
10039 /* Add a case vector to the current function's list of deferred case
10040 vectors. Case vectors have to be put into a separate section because CAM
10041 does not allow data definitions in code sections. */
10042
10043 void
10044 unicosmk_defer_case_vector (rtx lab, rtx vec)
10045 {
10046 struct machine_function *machine = cfun->machine;
10047
10048 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
10049 machine->addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec,
10050 machine->addr_list);
10051 }
10052
10053 /* Output a case vector. */
10054
10055 static void
10056 unicosmk_output_addr_vec (FILE *file, rtx vec)
10057 {
10058 rtx lab = XEXP (vec, 0);
10059 rtx body = XEXP (vec, 1);
10060 int vlen = XVECLEN (body, 0);
10061 int idx;
10062
10063 (*targetm.asm_out.internal_label) (file, "L", CODE_LABEL_NUMBER (lab));
10064
10065 for (idx = 0; idx < vlen; idx++)
10066 {
10067 ASM_OUTPUT_ADDR_VEC_ELT
10068 (file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
10069 }
10070 }
10071
10072 /* Output current function's deferred case vectors. */
10073
10074 static void
10075 unicosmk_output_deferred_case_vectors (FILE *file)
10076 {
10077 struct machine_function *machine = cfun->machine;
10078 rtx t;
10079
10080 if (machine->addr_list == NULL_RTX)
10081 return;
10082
10083 switch_to_section (data_section);
10084 for (t = machine->addr_list; t; t = XEXP (t, 1))
10085 unicosmk_output_addr_vec (file, XEXP (t, 0));
10086 }
10087
10088 /* Generate the name of the SSIB section for the current function. */
10089
10090 #define SSIB_PREFIX "__SSIB_"
10091 #define SSIB_PREFIX_LEN 7
10092
10093 static const char *
10094 unicosmk_ssib_name (void)
10095 {
10096 /* This is ok since CAM won't be able to deal with names longer than that
10097 anyway. */
10098
10099 static char name[256];
10100
10101 rtx x;
10102 const char *fnname;
10103 int len;
10104
10105 x = DECL_RTL (cfun->decl);
10106 gcc_assert (GET_CODE (x) == MEM);
10107 x = XEXP (x, 0);
10108 gcc_assert (GET_CODE (x) == SYMBOL_REF);
10109 fnname = XSTR (x, 0);
10110
10111 len = strlen (fnname);
10112 if (len + SSIB_PREFIX_LEN > 255)
10113 len = 255 - SSIB_PREFIX_LEN;
10114
10115 strcpy (name, SSIB_PREFIX);
10116 strncpy (name + SSIB_PREFIX_LEN, fnname, len);
10117 name[len + SSIB_PREFIX_LEN] = 0;
10118
10119 return name;
10120 }
10121
10122 /* Set up the dynamic subprogram information block (DSIB) and update the
10123 frame pointer register ($15) for subroutines which have a frame. If the
10124 subroutine doesn't have a frame, simply increment $15. */
10125
10126 static void
10127 unicosmk_gen_dsib (unsigned long *imaskP)
10128 {
10129 if (alpha_procedure_type == PT_STACK)
10130 {
10131 const char *ssib_name;
10132 rtx mem;
10133
10134 /* Allocate 64 bytes for the DSIB. */
10135
10136 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
10137 GEN_INT (-64))));
10138 emit_insn (gen_blockage ());
10139
10140 /* Save the return address. */
10141
10142 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 56));
10143 set_mem_alias_set (mem, alpha_sr_alias_set);
10144 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, REG_RA)));
10145 (*imaskP) &= ~(1UL << REG_RA);
10146
10147 /* Save the old frame pointer. */
10148
10149 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 48));
10150 set_mem_alias_set (mem, alpha_sr_alias_set);
10151 FRP (emit_move_insn (mem, hard_frame_pointer_rtx));
10152 (*imaskP) &= ~(1UL << HARD_FRAME_POINTER_REGNUM);
10153
10154 emit_insn (gen_blockage ());
10155
10156 /* Store the SSIB pointer. */
10157
10158 ssib_name = ggc_strdup (unicosmk_ssib_name ());
10159 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 32));
10160 set_mem_alias_set (mem, alpha_sr_alias_set);
10161
10162 FRP (emit_move_insn (gen_rtx_REG (DImode, 5),
10163 gen_rtx_SYMBOL_REF (Pmode, ssib_name)));
10164 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 5)));
10165
10166 /* Save the CIW index. */
10167
10168 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 24));
10169 set_mem_alias_set (mem, alpha_sr_alias_set);
10170 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 25)));
10171
10172 emit_insn (gen_blockage ());
10173
10174 /* Set the new frame pointer. */
10175
10176 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
10177 stack_pointer_rtx, GEN_INT (64))));
10178
10179 }
10180 else
10181 {
10182 /* Increment the frame pointer register to indicate that we do not
10183 have a frame. */
10184
10185 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
10186 hard_frame_pointer_rtx, const1_rtx)));
10187 }
10188 }
10189
10190 /* Output the static subroutine information block for the current
10191 function. */
10192
10193 static void
10194 unicosmk_output_ssib (FILE *file, const char *fnname)
10195 {
10196 int len;
10197 int i;
10198 rtx x;
10199 rtx ciw;
10200 struct machine_function *machine = cfun->machine;
10201
10202 in_section = NULL;
10203 fprintf (file, "\t.endp\n\n\t.psect\t%s%s,data\n", user_label_prefix,
10204 unicosmk_ssib_name ());
10205
10206 /* Some required stuff and the function name length. */
10207
10208 len = strlen (fnname);
10209 fprintf (file, "\t.quad\t^X20008%2.2X28\n", len);
10210
10211 /* Saved registers
10212 ??? We don't do that yet. */
10213
10214 fputs ("\t.quad\t0\n", file);
10215
10216 /* Function address. */
10217
10218 fputs ("\t.quad\t", file);
10219 assemble_name (file, fnname);
10220 putc ('\n', file);
10221
10222 fputs ("\t.quad\t0\n", file);
10223 fputs ("\t.quad\t0\n", file);
10224
10225 /* Function name.
10226 ??? We do it the same way Cray CC does it but this could be
10227 simplified. */
10228
10229 for( i = 0; i < len; i++ )
10230 fprintf (file, "\t.byte\t%d\n", (int)(fnname[i]));
10231 if( (len % 8) == 0 )
10232 fputs ("\t.quad\t0\n", file);
10233 else
10234 fprintf (file, "\t.bits\t%d : 0\n", (8 - (len % 8))*8);
10235
10236 /* All call information words used in the function. */
10237
10238 for (x = machine->first_ciw; x; x = XEXP (x, 1))
10239 {
10240 ciw = XEXP (x, 0);
10241 #if HOST_BITS_PER_WIDE_INT == 32
10242 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_DOUBLE_HEX "\n",
10243 CONST_DOUBLE_HIGH (ciw), CONST_DOUBLE_LOW (ciw));
10244 #else
10245 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n", INTVAL (ciw));
10246 #endif
10247 }
10248 }
10249
10250 /* Add a call information word (CIW) to the list of the current function's
10251 CIWs and return its index.
10252
10253 X is a CONST_INT or CONST_DOUBLE representing the CIW. */
10254
10255 rtx
10256 unicosmk_add_call_info_word (rtx x)
10257 {
10258 rtx node;
10259 struct machine_function *machine = cfun->machine;
10260
10261 node = gen_rtx_EXPR_LIST (VOIDmode, x, NULL_RTX);
10262 if (machine->first_ciw == NULL_RTX)
10263 machine->first_ciw = node;
10264 else
10265 XEXP (machine->last_ciw, 1) = node;
10266
10267 machine->last_ciw = node;
10268 ++machine->ciw_count;
10269
10270 return GEN_INT (machine->ciw_count
10271 + strlen (current_function_name ())/8 + 5);
10272 }
10273
10274 /* The Cray assembler doesn't accept extern declarations for symbols which
10275 are defined in the same file. We have to keep track of all global
10276 symbols which are referenced and/or defined in a source file and output
10277 extern declarations for those which are referenced but not defined at
10278 the end of file. */
10279
10280 /* List of identifiers for which an extern declaration might have to be
10281 emitted. */
10282 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10283
10284 struct unicosmk_extern_list
10285 {
10286 struct unicosmk_extern_list *next;
10287 const char *name;
10288 };
10289
10290 static struct unicosmk_extern_list *unicosmk_extern_head = 0;
10291
10292 /* Output extern declarations which are required for every asm file. */
10293
10294 static void
10295 unicosmk_output_default_externs (FILE *file)
10296 {
10297 static const char *const externs[] =
10298 { "__T3E_MISMATCH" };
10299
10300 int i;
10301 int n;
10302
10303 n = ARRAY_SIZE (externs);
10304
10305 for (i = 0; i < n; i++)
10306 fprintf (file, "\t.extern\t%s\n", externs[i]);
10307 }
10308
10309 /* Output extern declarations for global symbols which are have been
10310 referenced but not defined. */
10311
10312 static void
10313 unicosmk_output_externs (FILE *file)
10314 {
10315 struct unicosmk_extern_list *p;
10316 const char *real_name;
10317 int len;
10318 tree name_tree;
10319
10320 len = strlen (user_label_prefix);
10321 for (p = unicosmk_extern_head; p != 0; p = p->next)
10322 {
10323 /* We have to strip the encoding and possibly remove user_label_prefix
10324 from the identifier in order to handle -fleading-underscore and
10325 explicit asm names correctly (cf. gcc.dg/asm-names-1.c). */
10326 real_name = default_strip_name_encoding (p->name);
10327 if (len && p->name[0] == '*'
10328 && !memcmp (real_name, user_label_prefix, len))
10329 real_name += len;
10330
10331 name_tree = get_identifier (real_name);
10332 if (! TREE_ASM_WRITTEN (name_tree))
10333 {
10334 TREE_ASM_WRITTEN (name_tree) = 1;
10335 fputs ("\t.extern\t", file);
10336 assemble_name (file, p->name);
10337 putc ('\n', file);
10338 }
10339 }
10340 }
10341
10342 /* Record an extern. */
10343
10344 void
10345 unicosmk_add_extern (const char *name)
10346 {
10347 struct unicosmk_extern_list *p;
10348
10349 p = (struct unicosmk_extern_list *)
10350 xmalloc (sizeof (struct unicosmk_extern_list));
10351 p->next = unicosmk_extern_head;
10352 p->name = name;
10353 unicosmk_extern_head = p;
10354 }
10355
10356 /* The Cray assembler generates incorrect code if identifiers which
10357 conflict with register names are used as instruction operands. We have
10358 to replace such identifiers with DEX expressions. */
10359
10360 /* Structure to collect identifiers which have been replaced by DEX
10361 expressions. */
10362 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10363
10364 struct unicosmk_dex {
10365 struct unicosmk_dex *next;
10366 const char *name;
10367 };
10368
10369 /* List of identifiers which have been replaced by DEX expressions. The DEX
10370 number is determined by the position in the list. */
10371
10372 static struct unicosmk_dex *unicosmk_dex_list = NULL;
10373
10374 /* The number of elements in the DEX list. */
10375
10376 static int unicosmk_dex_count = 0;
10377
10378 /* Check if NAME must be replaced by a DEX expression. */
10379
10380 static int
10381 unicosmk_special_name (const char *name)
10382 {
10383 if (name[0] == '*')
10384 ++name;
10385
10386 if (name[0] == '$')
10387 ++name;
10388
10389 if (name[0] != 'r' && name[0] != 'f' && name[0] != 'R' && name[0] != 'F')
10390 return 0;
10391
10392 switch (name[1])
10393 {
10394 case '1': case '2':
10395 return (name[2] == '\0' || (ISDIGIT (name[2]) && name[3] == '\0'));
10396
10397 case '3':
10398 return (name[2] == '\0'
10399 || ((name[2] == '0' || name[2] == '1') && name[3] == '\0'));
10400
10401 default:
10402 return (ISDIGIT (name[1]) && name[2] == '\0');
10403 }
10404 }
10405
10406 /* Return the DEX number if X must be replaced by a DEX expression and 0
10407 otherwise. */
10408
10409 static int
10410 unicosmk_need_dex (rtx x)
10411 {
10412 struct unicosmk_dex *dex;
10413 const char *name;
10414 int i;
10415
10416 if (GET_CODE (x) != SYMBOL_REF)
10417 return 0;
10418
10419 name = XSTR (x,0);
10420 if (! unicosmk_special_name (name))
10421 return 0;
10422
10423 i = unicosmk_dex_count;
10424 for (dex = unicosmk_dex_list; dex; dex = dex->next)
10425 {
10426 if (! strcmp (name, dex->name))
10427 return i;
10428 --i;
10429 }
10430
10431 dex = (struct unicosmk_dex *) xmalloc (sizeof (struct unicosmk_dex));
10432 dex->name = name;
10433 dex->next = unicosmk_dex_list;
10434 unicosmk_dex_list = dex;
10435
10436 ++unicosmk_dex_count;
10437 return unicosmk_dex_count;
10438 }
10439
10440 /* Output the DEX definitions for this file. */
10441
10442 static void
10443 unicosmk_output_dex (FILE *file)
10444 {
10445 struct unicosmk_dex *dex;
10446 int i;
10447
10448 if (unicosmk_dex_list == NULL)
10449 return;
10450
10451 fprintf (file, "\t.dexstart\n");
10452
10453 i = unicosmk_dex_count;
10454 for (dex = unicosmk_dex_list; dex; dex = dex->next)
10455 {
10456 fprintf (file, "\tDEX (%d) = ", i);
10457 assemble_name (file, dex->name);
10458 putc ('\n', file);
10459 --i;
10460 }
10461
10462 fprintf (file, "\t.dexend\n");
10463 }
10464
10465 /* Output text that to appear at the beginning of an assembler file. */
10466
10467 static void
10468 unicosmk_file_start (void)
10469 {
10470 int i;
10471
10472 fputs ("\t.ident\t", asm_out_file);
10473 unicosmk_output_module_name (asm_out_file);
10474 fputs ("\n\n", asm_out_file);
10475
10476 /* The Unicos/Mk assembler uses different register names. Instead of trying
10477 to support them, we simply use micro definitions. */
10478
10479 /* CAM has different register names: rN for the integer register N and fN
10480 for the floating-point register N. Instead of trying to use these in
10481 alpha.md, we define the symbols $N and $fN to refer to the appropriate
10482 register. */
10483
10484 for (i = 0; i < 32; ++i)
10485 fprintf (asm_out_file, "$%d <- r%d\n", i, i);
10486
10487 for (i = 0; i < 32; ++i)
10488 fprintf (asm_out_file, "$f%d <- f%d\n", i, i);
10489
10490 putc ('\n', asm_out_file);
10491
10492 /* The .align directive fill unused space with zeroes which does not work
10493 in code sections. We define the macro 'gcc@code@align' which uses nops
10494 instead. Note that it assumes that code sections always have the
10495 biggest possible alignment since . refers to the current offset from
10496 the beginning of the section. */
10497
10498 fputs ("\t.macro gcc@code@align n\n", asm_out_file);
10499 fputs ("gcc@n@bytes = 1 << n\n", asm_out_file);
10500 fputs ("gcc@here = . % gcc@n@bytes\n", asm_out_file);
10501 fputs ("\t.if ne, gcc@here, 0\n", asm_out_file);
10502 fputs ("\t.repeat (gcc@n@bytes - gcc@here) / 4\n", asm_out_file);
10503 fputs ("\tbis r31,r31,r31\n", asm_out_file);
10504 fputs ("\t.endr\n", asm_out_file);
10505 fputs ("\t.endif\n", asm_out_file);
10506 fputs ("\t.endm gcc@code@align\n\n", asm_out_file);
10507
10508 /* Output extern declarations which should always be visible. */
10509 unicosmk_output_default_externs (asm_out_file);
10510
10511 /* Open a dummy section. We always need to be inside a section for the
10512 section-switching code to work correctly.
10513 ??? This should be a module id or something like that. I still have to
10514 figure out what the rules for those are. */
10515 fputs ("\n\t.psect\t$SG00000,data\n", asm_out_file);
10516 }
10517
10518 /* Output text to appear at the end of an assembler file. This includes all
10519 pending extern declarations and DEX expressions. */
10520
10521 static void
10522 unicosmk_file_end (void)
10523 {
10524 fputs ("\t.endp\n\n", asm_out_file);
10525
10526 /* Output all pending externs. */
10527
10528 unicosmk_output_externs (asm_out_file);
10529
10530 /* Output dex definitions used for functions whose names conflict with
10531 register names. */
10532
10533 unicosmk_output_dex (asm_out_file);
10534
10535 fputs ("\t.end\t", asm_out_file);
10536 unicosmk_output_module_name (asm_out_file);
10537 putc ('\n', asm_out_file);
10538 }
10539
10540 #else
10541
10542 static void
10543 unicosmk_output_deferred_case_vectors (FILE *file ATTRIBUTE_UNUSED)
10544 {}
10545
10546 static void
10547 unicosmk_gen_dsib (unsigned long *imaskP ATTRIBUTE_UNUSED)
10548 {}
10549
10550 static void
10551 unicosmk_output_ssib (FILE * file ATTRIBUTE_UNUSED,
10552 const char * fnname ATTRIBUTE_UNUSED)
10553 {}
10554
10555 rtx
10556 unicosmk_add_call_info_word (rtx x ATTRIBUTE_UNUSED)
10557 {
10558 return NULL_RTX;
10559 }
10560
10561 static int
10562 unicosmk_need_dex (rtx x ATTRIBUTE_UNUSED)
10563 {
10564 return 0;
10565 }
10566
10567 #endif /* TARGET_ABI_UNICOSMK */
10568
10569 static void
10570 alpha_init_libfuncs (void)
10571 {
10572 if (TARGET_ABI_UNICOSMK)
10573 {
10574 /* Prevent gcc from generating calls to __divsi3. */
10575 set_optab_libfunc (sdiv_optab, SImode, 0);
10576 set_optab_libfunc (udiv_optab, SImode, 0);
10577
10578 /* Use the functions provided by the system library
10579 for DImode integer division. */
10580 set_optab_libfunc (sdiv_optab, DImode, "$sldiv");
10581 set_optab_libfunc (udiv_optab, DImode, "$uldiv");
10582 }
10583 else if (TARGET_ABI_OPEN_VMS)
10584 {
10585 /* Use the VMS runtime library functions for division and
10586 remainder. */
10587 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
10588 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
10589 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
10590 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
10591 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
10592 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
10593 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
10594 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
10595 }
10596 }
10597
10598 \f
10599 /* Initialize the GCC target structure. */
10600 #if TARGET_ABI_OPEN_VMS
10601 # undef TARGET_ATTRIBUTE_TABLE
10602 # define TARGET_ATTRIBUTE_TABLE vms_attribute_table
10603 # undef TARGET_SECTION_TYPE_FLAGS
10604 # define TARGET_SECTION_TYPE_FLAGS vms_section_type_flags
10605 #endif
10606
10607 #undef TARGET_IN_SMALL_DATA_P
10608 #define TARGET_IN_SMALL_DATA_P alpha_in_small_data_p
10609
10610 #if TARGET_ABI_UNICOSMK
10611 # undef TARGET_INSERT_ATTRIBUTES
10612 # define TARGET_INSERT_ATTRIBUTES unicosmk_insert_attributes
10613 # undef TARGET_SECTION_TYPE_FLAGS
10614 # define TARGET_SECTION_TYPE_FLAGS unicosmk_section_type_flags
10615 # undef TARGET_ASM_UNIQUE_SECTION
10616 # define TARGET_ASM_UNIQUE_SECTION unicosmk_unique_section
10617 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
10618 #define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
10619 # undef TARGET_ASM_GLOBALIZE_LABEL
10620 # define TARGET_ASM_GLOBALIZE_LABEL hook_void_FILEptr_constcharptr
10621 # undef TARGET_MUST_PASS_IN_STACK
10622 # define TARGET_MUST_PASS_IN_STACK unicosmk_must_pass_in_stack
10623 #endif
10624
10625 #undef TARGET_ASM_ALIGNED_HI_OP
10626 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
10627 #undef TARGET_ASM_ALIGNED_DI_OP
10628 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
10629
10630 /* Default unaligned ops are provided for ELF systems. To get unaligned
10631 data for non-ELF systems, we have to turn off auto alignment. */
10632 #ifndef OBJECT_FORMAT_ELF
10633 #undef TARGET_ASM_UNALIGNED_HI_OP
10634 #define TARGET_ASM_UNALIGNED_HI_OP "\t.align 0\n\t.word\t"
10635 #undef TARGET_ASM_UNALIGNED_SI_OP
10636 #define TARGET_ASM_UNALIGNED_SI_OP "\t.align 0\n\t.long\t"
10637 #undef TARGET_ASM_UNALIGNED_DI_OP
10638 #define TARGET_ASM_UNALIGNED_DI_OP "\t.align 0\n\t.quad\t"
10639 #endif
10640
10641 #ifdef OBJECT_FORMAT_ELF
10642 #undef TARGET_ASM_SELECT_RTX_SECTION
10643 #define TARGET_ASM_SELECT_RTX_SECTION alpha_elf_select_rtx_section
10644 #endif
10645
10646 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
10647 #define TARGET_ASM_FUNCTION_END_PROLOGUE alpha_output_function_end_prologue
10648
10649 #undef TARGET_INIT_LIBFUNCS
10650 #define TARGET_INIT_LIBFUNCS alpha_init_libfuncs
10651
10652 #if TARGET_ABI_UNICOSMK
10653 #undef TARGET_ASM_FILE_START
10654 #define TARGET_ASM_FILE_START unicosmk_file_start
10655 #undef TARGET_ASM_FILE_END
10656 #define TARGET_ASM_FILE_END unicosmk_file_end
10657 #else
10658 #undef TARGET_ASM_FILE_START
10659 #define TARGET_ASM_FILE_START alpha_file_start
10660 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
10661 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
10662 #endif
10663
10664 #undef TARGET_SCHED_ADJUST_COST
10665 #define TARGET_SCHED_ADJUST_COST alpha_adjust_cost
10666 #undef TARGET_SCHED_ISSUE_RATE
10667 #define TARGET_SCHED_ISSUE_RATE alpha_issue_rate
10668 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
10669 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
10670 alpha_multipass_dfa_lookahead
10671
10672 #undef TARGET_HAVE_TLS
10673 #define TARGET_HAVE_TLS HAVE_AS_TLS
10674
10675 #undef TARGET_INIT_BUILTINS
10676 #define TARGET_INIT_BUILTINS alpha_init_builtins
10677 #undef TARGET_EXPAND_BUILTIN
10678 #define TARGET_EXPAND_BUILTIN alpha_expand_builtin
10679 #undef TARGET_FOLD_BUILTIN
10680 #define TARGET_FOLD_BUILTIN alpha_fold_builtin
10681
10682 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
10683 #define TARGET_FUNCTION_OK_FOR_SIBCALL alpha_function_ok_for_sibcall
10684 #undef TARGET_CANNOT_COPY_INSN_P
10685 #define TARGET_CANNOT_COPY_INSN_P alpha_cannot_copy_insn_p
10686 #undef TARGET_CANNOT_FORCE_CONST_MEM
10687 #define TARGET_CANNOT_FORCE_CONST_MEM alpha_cannot_force_const_mem
10688
10689 #if TARGET_ABI_OSF
10690 #undef TARGET_ASM_OUTPUT_MI_THUNK
10691 #define TARGET_ASM_OUTPUT_MI_THUNK alpha_output_mi_thunk_osf
10692 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
10693 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
10694 #undef TARGET_STDARG_OPTIMIZE_HOOK
10695 #define TARGET_STDARG_OPTIMIZE_HOOK alpha_stdarg_optimize_hook
10696 #endif
10697
10698 #undef TARGET_RTX_COSTS
10699 #define TARGET_RTX_COSTS alpha_rtx_costs
10700 #undef TARGET_ADDRESS_COST
10701 #define TARGET_ADDRESS_COST hook_int_rtx_0
10702
10703 #undef TARGET_MACHINE_DEPENDENT_REORG
10704 #define TARGET_MACHINE_DEPENDENT_REORG alpha_reorg
10705
10706 #undef TARGET_PROMOTE_FUNCTION_ARGS
10707 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
10708 #undef TARGET_PROMOTE_FUNCTION_RETURN
10709 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
10710 #undef TARGET_PROMOTE_PROTOTYPES
10711 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_false
10712 #undef TARGET_RETURN_IN_MEMORY
10713 #define TARGET_RETURN_IN_MEMORY alpha_return_in_memory
10714 #undef TARGET_PASS_BY_REFERENCE
10715 #define TARGET_PASS_BY_REFERENCE alpha_pass_by_reference
10716 #undef TARGET_SETUP_INCOMING_VARARGS
10717 #define TARGET_SETUP_INCOMING_VARARGS alpha_setup_incoming_varargs
10718 #undef TARGET_STRICT_ARGUMENT_NAMING
10719 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
10720 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
10721 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
10722 #undef TARGET_SPLIT_COMPLEX_ARG
10723 #define TARGET_SPLIT_COMPLEX_ARG alpha_split_complex_arg
10724 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
10725 #define TARGET_GIMPLIFY_VA_ARG_EXPR alpha_gimplify_va_arg
10726 #undef TARGET_ARG_PARTIAL_BYTES
10727 #define TARGET_ARG_PARTIAL_BYTES alpha_arg_partial_bytes
10728
10729 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10730 #define TARGET_SCALAR_MODE_SUPPORTED_P alpha_scalar_mode_supported_p
10731 #undef TARGET_VECTOR_MODE_SUPPORTED_P
10732 #define TARGET_VECTOR_MODE_SUPPORTED_P alpha_vector_mode_supported_p
10733
10734 #undef TARGET_BUILD_BUILTIN_VA_LIST
10735 #define TARGET_BUILD_BUILTIN_VA_LIST alpha_build_builtin_va_list
10736
10737 /* The Alpha architecture does not require sequential consistency. See
10738 http://www.cs.umd.edu/~pugh/java/memoryModel/AlphaReordering.html
10739 for an example of how it can be violated in practice. */
10740 #undef TARGET_RELAXED_ORDERING
10741 #define TARGET_RELAXED_ORDERING true
10742
10743 #undef TARGET_DEFAULT_TARGET_FLAGS
10744 #define TARGET_DEFAULT_TARGET_FLAGS \
10745 (TARGET_DEFAULT | TARGET_CPU_DEFAULT | TARGET_DEFAULT_EXPLICIT_RELOCS)
10746 #undef TARGET_HANDLE_OPTION
10747 #define TARGET_HANDLE_OPTION alpha_handle_option
10748
10749 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
10750 #undef TARGET_MANGLE_FUNDAMENTAL_TYPE
10751 #define TARGET_MANGLE_FUNDAMENTAL_TYPE alpha_mangle_fundamental_type
10752 #endif
10753
10754 struct gcc_target targetm = TARGET_INITIALIZER;
10755
10756 \f
10757 #include "gt-alpha.h"