alpha.c (tls_symbolic_operand_1): Trust SYMBOL_REF_TLS_MODEL to be correct.
[gcc.git] / gcc / config / alpha / alpha.c
1 /* Subroutines used for code generation on the DEC Alpha.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
11 any later version.
12
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to
20 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
21 Boston, MA 02110-1301, USA. */
22
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "real.h"
33 #include "insn-config.h"
34 #include "conditions.h"
35 #include "output.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "recog.h"
39 #include "expr.h"
40 #include "optabs.h"
41 #include "reload.h"
42 #include "obstack.h"
43 #include "except.h"
44 #include "function.h"
45 #include "toplev.h"
46 #include "ggc.h"
47 #include "integrate.h"
48 #include "tm_p.h"
49 #include "target.h"
50 #include "target-def.h"
51 #include "debug.h"
52 #include "langhooks.h"
53 #include <splay-tree.h>
54 #include "cfglayout.h"
55 #include "tree-gimple.h"
56 #include "tree-flow.h"
57 #include "tree-stdarg.h"
58
59 /* Specify which cpu to schedule for. */
60 enum processor_type alpha_tune;
61
62 /* Which cpu we're generating code for. */
63 enum processor_type alpha_cpu;
64
65 static const char * const alpha_cpu_name[] =
66 {
67 "ev4", "ev5", "ev6"
68 };
69
70 /* Specify how accurate floating-point traps need to be. */
71
72 enum alpha_trap_precision alpha_tp;
73
74 /* Specify the floating-point rounding mode. */
75
76 enum alpha_fp_rounding_mode alpha_fprm;
77
78 /* Specify which things cause traps. */
79
80 enum alpha_fp_trap_mode alpha_fptm;
81
82 /* Save information from a "cmpxx" operation until the branch or scc is
83 emitted. */
84
85 struct alpha_compare alpha_compare;
86
87 /* Nonzero if inside of a function, because the Alpha asm can't
88 handle .files inside of functions. */
89
90 static int inside_function = FALSE;
91
92 /* The number of cycles of latency we should assume on memory reads. */
93
94 int alpha_memory_latency = 3;
95
96 /* Whether the function needs the GP. */
97
98 static int alpha_function_needs_gp;
99
100 /* The alias set for prologue/epilogue register save/restore. */
101
102 static GTY(()) int alpha_sr_alias_set;
103
104 /* The assembler name of the current function. */
105
106 static const char *alpha_fnname;
107
108 /* The next explicit relocation sequence number. */
109 extern GTY(()) int alpha_next_sequence_number;
110 int alpha_next_sequence_number = 1;
111
112 /* The literal and gpdisp sequence numbers for this insn, as printed
113 by %# and %* respectively. */
114 extern GTY(()) int alpha_this_literal_sequence_number;
115 extern GTY(()) int alpha_this_gpdisp_sequence_number;
116 int alpha_this_literal_sequence_number;
117 int alpha_this_gpdisp_sequence_number;
118
119 /* Costs of various operations on the different architectures. */
120
121 struct alpha_rtx_cost_data
122 {
123 unsigned char fp_add;
124 unsigned char fp_mult;
125 unsigned char fp_div_sf;
126 unsigned char fp_div_df;
127 unsigned char int_mult_si;
128 unsigned char int_mult_di;
129 unsigned char int_shift;
130 unsigned char int_cmov;
131 unsigned short int_div;
132 };
133
134 static struct alpha_rtx_cost_data const alpha_rtx_cost_data[PROCESSOR_MAX] =
135 {
136 { /* EV4 */
137 COSTS_N_INSNS (6), /* fp_add */
138 COSTS_N_INSNS (6), /* fp_mult */
139 COSTS_N_INSNS (34), /* fp_div_sf */
140 COSTS_N_INSNS (63), /* fp_div_df */
141 COSTS_N_INSNS (23), /* int_mult_si */
142 COSTS_N_INSNS (23), /* int_mult_di */
143 COSTS_N_INSNS (2), /* int_shift */
144 COSTS_N_INSNS (2), /* int_cmov */
145 COSTS_N_INSNS (97), /* int_div */
146 },
147 { /* EV5 */
148 COSTS_N_INSNS (4), /* fp_add */
149 COSTS_N_INSNS (4), /* fp_mult */
150 COSTS_N_INSNS (15), /* fp_div_sf */
151 COSTS_N_INSNS (22), /* fp_div_df */
152 COSTS_N_INSNS (8), /* int_mult_si */
153 COSTS_N_INSNS (12), /* int_mult_di */
154 COSTS_N_INSNS (1) + 1, /* int_shift */
155 COSTS_N_INSNS (1), /* int_cmov */
156 COSTS_N_INSNS (83), /* int_div */
157 },
158 { /* EV6 */
159 COSTS_N_INSNS (4), /* fp_add */
160 COSTS_N_INSNS (4), /* fp_mult */
161 COSTS_N_INSNS (12), /* fp_div_sf */
162 COSTS_N_INSNS (15), /* fp_div_df */
163 COSTS_N_INSNS (7), /* int_mult_si */
164 COSTS_N_INSNS (7), /* int_mult_di */
165 COSTS_N_INSNS (1), /* int_shift */
166 COSTS_N_INSNS (2), /* int_cmov */
167 COSTS_N_INSNS (86), /* int_div */
168 },
169 };
170
171 /* Similar but tuned for code size instead of execution latency. The
172 extra +N is fractional cost tuning based on latency. It's used to
173 encourage use of cheaper insns like shift, but only if there's just
174 one of them. */
175
176 static struct alpha_rtx_cost_data const alpha_rtx_cost_size =
177 {
178 COSTS_N_INSNS (1), /* fp_add */
179 COSTS_N_INSNS (1), /* fp_mult */
180 COSTS_N_INSNS (1), /* fp_div_sf */
181 COSTS_N_INSNS (1) + 1, /* fp_div_df */
182 COSTS_N_INSNS (1) + 1, /* int_mult_si */
183 COSTS_N_INSNS (1) + 2, /* int_mult_di */
184 COSTS_N_INSNS (1), /* int_shift */
185 COSTS_N_INSNS (1), /* int_cmov */
186 COSTS_N_INSNS (6), /* int_div */
187 };
188
189 /* Get the number of args of a function in one of two ways. */
190 #if TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK
191 #define NUM_ARGS current_function_args_info.num_args
192 #else
193 #define NUM_ARGS current_function_args_info
194 #endif
195
196 #define REG_PV 27
197 #define REG_RA 26
198
199 /* Declarations of static functions. */
200 static struct machine_function *alpha_init_machine_status (void);
201 static rtx alpha_emit_xfloating_compare (enum rtx_code *, rtx, rtx);
202
203 #if TARGET_ABI_OPEN_VMS
204 static void alpha_write_linkage (FILE *, const char *, tree);
205 #endif
206
207 static void unicosmk_output_deferred_case_vectors (FILE *);
208 static void unicosmk_gen_dsib (unsigned long *);
209 static void unicosmk_output_ssib (FILE *, const char *);
210 static int unicosmk_need_dex (rtx);
211 \f
212 /* Implement TARGET_HANDLE_OPTION. */
213
214 static bool
215 alpha_handle_option (size_t code, const char *arg, int value)
216 {
217 switch (code)
218 {
219 case OPT_mfp_regs:
220 if (value == 0)
221 target_flags |= MASK_SOFT_FP;
222 break;
223
224 case OPT_mieee:
225 case OPT_mieee_with_inexact:
226 target_flags |= MASK_IEEE_CONFORMANT;
227 break;
228
229 case OPT_mtls_size_:
230 if (value != 16 && value != 32 && value != 64)
231 error ("bad value %qs for -mtls-size switch", arg);
232 break;
233 }
234
235 return true;
236 }
237
238 /* Parse target option strings. */
239
240 void
241 override_options (void)
242 {
243 static const struct cpu_table {
244 const char *const name;
245 const enum processor_type processor;
246 const int flags;
247 } cpu_table[] = {
248 { "ev4", PROCESSOR_EV4, 0 },
249 { "ev45", PROCESSOR_EV4, 0 },
250 { "21064", PROCESSOR_EV4, 0 },
251 { "ev5", PROCESSOR_EV5, 0 },
252 { "21164", PROCESSOR_EV5, 0 },
253 { "ev56", PROCESSOR_EV5, MASK_BWX },
254 { "21164a", PROCESSOR_EV5, MASK_BWX },
255 { "pca56", PROCESSOR_EV5, MASK_BWX|MASK_MAX },
256 { "21164PC",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
257 { "21164pc",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
258 { "ev6", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
259 { "21264", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
260 { "ev67", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
261 { "21264a", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
262 { 0, 0, 0 }
263 };
264
265 int i;
266
267 /* Unicos/Mk doesn't have shared libraries. */
268 if (TARGET_ABI_UNICOSMK && flag_pic)
269 {
270 warning (0, "-f%s ignored for Unicos/Mk (not supported)",
271 (flag_pic > 1) ? "PIC" : "pic");
272 flag_pic = 0;
273 }
274
275 /* On Unicos/Mk, the native compiler consistently generates /d suffices for
276 floating-point instructions. Make that the default for this target. */
277 if (TARGET_ABI_UNICOSMK)
278 alpha_fprm = ALPHA_FPRM_DYN;
279 else
280 alpha_fprm = ALPHA_FPRM_NORM;
281
282 alpha_tp = ALPHA_TP_PROG;
283 alpha_fptm = ALPHA_FPTM_N;
284
285 /* We cannot use su and sui qualifiers for conversion instructions on
286 Unicos/Mk. I'm not sure if this is due to assembler or hardware
287 limitations. Right now, we issue a warning if -mieee is specified
288 and then ignore it; eventually, we should either get it right or
289 disable the option altogether. */
290
291 if (TARGET_IEEE)
292 {
293 if (TARGET_ABI_UNICOSMK)
294 warning (0, "-mieee not supported on Unicos/Mk");
295 else
296 {
297 alpha_tp = ALPHA_TP_INSN;
298 alpha_fptm = ALPHA_FPTM_SU;
299 }
300 }
301
302 if (TARGET_IEEE_WITH_INEXACT)
303 {
304 if (TARGET_ABI_UNICOSMK)
305 warning (0, "-mieee-with-inexact not supported on Unicos/Mk");
306 else
307 {
308 alpha_tp = ALPHA_TP_INSN;
309 alpha_fptm = ALPHA_FPTM_SUI;
310 }
311 }
312
313 if (alpha_tp_string)
314 {
315 if (! strcmp (alpha_tp_string, "p"))
316 alpha_tp = ALPHA_TP_PROG;
317 else if (! strcmp (alpha_tp_string, "f"))
318 alpha_tp = ALPHA_TP_FUNC;
319 else if (! strcmp (alpha_tp_string, "i"))
320 alpha_tp = ALPHA_TP_INSN;
321 else
322 error ("bad value %qs for -mtrap-precision switch", alpha_tp_string);
323 }
324
325 if (alpha_fprm_string)
326 {
327 if (! strcmp (alpha_fprm_string, "n"))
328 alpha_fprm = ALPHA_FPRM_NORM;
329 else if (! strcmp (alpha_fprm_string, "m"))
330 alpha_fprm = ALPHA_FPRM_MINF;
331 else if (! strcmp (alpha_fprm_string, "c"))
332 alpha_fprm = ALPHA_FPRM_CHOP;
333 else if (! strcmp (alpha_fprm_string,"d"))
334 alpha_fprm = ALPHA_FPRM_DYN;
335 else
336 error ("bad value %qs for -mfp-rounding-mode switch",
337 alpha_fprm_string);
338 }
339
340 if (alpha_fptm_string)
341 {
342 if (strcmp (alpha_fptm_string, "n") == 0)
343 alpha_fptm = ALPHA_FPTM_N;
344 else if (strcmp (alpha_fptm_string, "u") == 0)
345 alpha_fptm = ALPHA_FPTM_U;
346 else if (strcmp (alpha_fptm_string, "su") == 0)
347 alpha_fptm = ALPHA_FPTM_SU;
348 else if (strcmp (alpha_fptm_string, "sui") == 0)
349 alpha_fptm = ALPHA_FPTM_SUI;
350 else
351 error ("bad value %qs for -mfp-trap-mode switch", alpha_fptm_string);
352 }
353
354 if (alpha_cpu_string)
355 {
356 for (i = 0; cpu_table [i].name; i++)
357 if (! strcmp (alpha_cpu_string, cpu_table [i].name))
358 {
359 alpha_tune = alpha_cpu = cpu_table [i].processor;
360 target_flags &= ~ (MASK_BWX | MASK_MAX | MASK_FIX | MASK_CIX);
361 target_flags |= cpu_table [i].flags;
362 break;
363 }
364 if (! cpu_table [i].name)
365 error ("bad value %qs for -mcpu switch", alpha_cpu_string);
366 }
367
368 if (alpha_tune_string)
369 {
370 for (i = 0; cpu_table [i].name; i++)
371 if (! strcmp (alpha_tune_string, cpu_table [i].name))
372 {
373 alpha_tune = cpu_table [i].processor;
374 break;
375 }
376 if (! cpu_table [i].name)
377 error ("bad value %qs for -mcpu switch", alpha_tune_string);
378 }
379
380 /* Do some sanity checks on the above options. */
381
382 if (TARGET_ABI_UNICOSMK && alpha_fptm != ALPHA_FPTM_N)
383 {
384 warning (0, "trap mode not supported on Unicos/Mk");
385 alpha_fptm = ALPHA_FPTM_N;
386 }
387
388 if ((alpha_fptm == ALPHA_FPTM_SU || alpha_fptm == ALPHA_FPTM_SUI)
389 && alpha_tp != ALPHA_TP_INSN && alpha_cpu != PROCESSOR_EV6)
390 {
391 warning (0, "fp software completion requires -mtrap-precision=i");
392 alpha_tp = ALPHA_TP_INSN;
393 }
394
395 if (alpha_cpu == PROCESSOR_EV6)
396 {
397 /* Except for EV6 pass 1 (not released), we always have precise
398 arithmetic traps. Which means we can do software completion
399 without minding trap shadows. */
400 alpha_tp = ALPHA_TP_PROG;
401 }
402
403 if (TARGET_FLOAT_VAX)
404 {
405 if (alpha_fprm == ALPHA_FPRM_MINF || alpha_fprm == ALPHA_FPRM_DYN)
406 {
407 warning (0, "rounding mode not supported for VAX floats");
408 alpha_fprm = ALPHA_FPRM_NORM;
409 }
410 if (alpha_fptm == ALPHA_FPTM_SUI)
411 {
412 warning (0, "trap mode not supported for VAX floats");
413 alpha_fptm = ALPHA_FPTM_SU;
414 }
415 if (target_flags_explicit & MASK_LONG_DOUBLE_128)
416 warning (0, "128-bit long double not supported for VAX floats");
417 target_flags &= ~MASK_LONG_DOUBLE_128;
418 }
419
420 {
421 char *end;
422 int lat;
423
424 if (!alpha_mlat_string)
425 alpha_mlat_string = "L1";
426
427 if (ISDIGIT ((unsigned char)alpha_mlat_string[0])
428 && (lat = strtol (alpha_mlat_string, &end, 10), *end == '\0'))
429 ;
430 else if ((alpha_mlat_string[0] == 'L' || alpha_mlat_string[0] == 'l')
431 && ISDIGIT ((unsigned char)alpha_mlat_string[1])
432 && alpha_mlat_string[2] == '\0')
433 {
434 static int const cache_latency[][4] =
435 {
436 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
437 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
438 { 3, 12, 30 }, /* ev6 -- Bcache from DS20 LMbench. */
439 };
440
441 lat = alpha_mlat_string[1] - '0';
442 if (lat <= 0 || lat > 3 || cache_latency[alpha_tune][lat-1] == -1)
443 {
444 warning (0, "L%d cache latency unknown for %s",
445 lat, alpha_cpu_name[alpha_tune]);
446 lat = 3;
447 }
448 else
449 lat = cache_latency[alpha_tune][lat-1];
450 }
451 else if (! strcmp (alpha_mlat_string, "main"))
452 {
453 /* Most current memories have about 370ns latency. This is
454 a reasonable guess for a fast cpu. */
455 lat = 150;
456 }
457 else
458 {
459 warning (0, "bad value %qs for -mmemory-latency", alpha_mlat_string);
460 lat = 3;
461 }
462
463 alpha_memory_latency = lat;
464 }
465
466 /* Default the definition of "small data" to 8 bytes. */
467 if (!g_switch_set)
468 g_switch_value = 8;
469
470 /* Infer TARGET_SMALL_DATA from -fpic/-fPIC. */
471 if (flag_pic == 1)
472 target_flags |= MASK_SMALL_DATA;
473 else if (flag_pic == 2)
474 target_flags &= ~MASK_SMALL_DATA;
475
476 /* Align labels and loops for optimal branching. */
477 /* ??? Kludge these by not doing anything if we don't optimize and also if
478 we are writing ECOFF symbols to work around a bug in DEC's assembler. */
479 if (optimize > 0 && write_symbols != SDB_DEBUG)
480 {
481 if (align_loops <= 0)
482 align_loops = 16;
483 if (align_jumps <= 0)
484 align_jumps = 16;
485 }
486 if (align_functions <= 0)
487 align_functions = 16;
488
489 /* Acquire a unique set number for our register saves and restores. */
490 alpha_sr_alias_set = new_alias_set ();
491
492 /* Register variables and functions with the garbage collector. */
493
494 /* Set up function hooks. */
495 init_machine_status = alpha_init_machine_status;
496
497 /* Tell the compiler when we're using VAX floating point. */
498 if (TARGET_FLOAT_VAX)
499 {
500 REAL_MODE_FORMAT (SFmode) = &vax_f_format;
501 REAL_MODE_FORMAT (DFmode) = &vax_g_format;
502 REAL_MODE_FORMAT (TFmode) = NULL;
503 }
504 }
505 \f
506 /* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
507
508 int
509 zap_mask (HOST_WIDE_INT value)
510 {
511 int i;
512
513 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
514 i++, value >>= 8)
515 if ((value & 0xff) != 0 && (value & 0xff) != 0xff)
516 return 0;
517
518 return 1;
519 }
520
521 /* Return true if OP is valid for a particular TLS relocation.
522 We are already guaranteed that OP is a CONST. */
523
524 int
525 tls_symbolic_operand_1 (rtx op, int size, int unspec)
526 {
527 op = XEXP (op, 0);
528
529 if (GET_CODE (op) != UNSPEC || XINT (op, 1) != unspec)
530 return 0;
531 op = XVECEXP (op, 0, 0);
532
533 if (GET_CODE (op) != SYMBOL_REF)
534 return 0;
535
536 switch (SYMBOL_REF_TLS_MODEL (op))
537 {
538 case TLS_MODEL_LOCAL_DYNAMIC:
539 return unspec == UNSPEC_DTPREL && size == alpha_tls_size;
540 case TLS_MODEL_INITIAL_EXEC:
541 return unspec == UNSPEC_TPREL && size == 64;
542 case TLS_MODEL_LOCAL_EXEC:
543 return unspec == UNSPEC_TPREL && size == alpha_tls_size;
544 default:
545 gcc_unreachable ();
546 }
547 }
548
549 /* Used by aligned_memory_operand and unaligned_memory_operand to
550 resolve what reload is going to do with OP if it's a register. */
551
552 rtx
553 resolve_reload_operand (rtx op)
554 {
555 if (reload_in_progress)
556 {
557 rtx tmp = op;
558 if (GET_CODE (tmp) == SUBREG)
559 tmp = SUBREG_REG (tmp);
560 if (GET_CODE (tmp) == REG
561 && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
562 {
563 op = reg_equiv_memory_loc[REGNO (tmp)];
564 if (op == 0)
565 return 0;
566 }
567 }
568 return op;
569 }
570
571 /* Implements CONST_OK_FOR_LETTER_P. Return true if the value matches
572 the range defined for C in [I-P]. */
573
574 bool
575 alpha_const_ok_for_letter_p (HOST_WIDE_INT value, int c)
576 {
577 switch (c)
578 {
579 case 'I':
580 /* An unsigned 8 bit constant. */
581 return (unsigned HOST_WIDE_INT) value < 0x100;
582 case 'J':
583 /* The constant zero. */
584 return value == 0;
585 case 'K':
586 /* A signed 16 bit constant. */
587 return (unsigned HOST_WIDE_INT) (value + 0x8000) < 0x10000;
588 case 'L':
589 /* A shifted signed 16 bit constant appropriate for LDAH. */
590 return ((value & 0xffff) == 0
591 && ((value) >> 31 == -1 || value >> 31 == 0));
592 case 'M':
593 /* A constant that can be AND'ed with using a ZAP insn. */
594 return zap_mask (value);
595 case 'N':
596 /* A complemented unsigned 8 bit constant. */
597 return (unsigned HOST_WIDE_INT) (~ value) < 0x100;
598 case 'O':
599 /* A negated unsigned 8 bit constant. */
600 return (unsigned HOST_WIDE_INT) (- value) < 0x100;
601 case 'P':
602 /* The constant 1, 2 or 3. */
603 return value == 1 || value == 2 || value == 3;
604
605 default:
606 return false;
607 }
608 }
609
610 /* Implements CONST_DOUBLE_OK_FOR_LETTER_P. Return true if VALUE
611 matches for C in [GH]. */
612
613 bool
614 alpha_const_double_ok_for_letter_p (rtx value, int c)
615 {
616 switch (c)
617 {
618 case 'G':
619 /* The floating point zero constant. */
620 return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
621 && value == CONST0_RTX (GET_MODE (value)));
622
623 case 'H':
624 /* A valid operand of a ZAP insn. */
625 return (GET_MODE (value) == VOIDmode
626 && zap_mask (CONST_DOUBLE_LOW (value))
627 && zap_mask (CONST_DOUBLE_HIGH (value)));
628
629 default:
630 return false;
631 }
632 }
633
634 /* Implements CONST_DOUBLE_OK_FOR_LETTER_P. Return true if VALUE
635 matches for C. */
636
637 bool
638 alpha_extra_constraint (rtx value, int c)
639 {
640 switch (c)
641 {
642 case 'Q':
643 return normal_memory_operand (value, VOIDmode);
644 case 'R':
645 return direct_call_operand (value, Pmode);
646 case 'S':
647 return (GET_CODE (value) == CONST_INT
648 && (unsigned HOST_WIDE_INT) INTVAL (value) < 64);
649 case 'T':
650 return GET_CODE (value) == HIGH;
651 case 'U':
652 return TARGET_ABI_UNICOSMK && symbolic_operand (value, VOIDmode);
653 case 'W':
654 return (GET_CODE (value) == CONST_VECTOR
655 && value == CONST0_RTX (GET_MODE (value)));
656 default:
657 return false;
658 }
659 }
660
661 /* The scalar modes supported differs from the default check-what-c-supports
662 version in that sometimes TFmode is available even when long double
663 indicates only DFmode. On unicosmk, we have the situation that HImode
664 doesn't map to any C type, but of course we still support that. */
665
666 static bool
667 alpha_scalar_mode_supported_p (enum machine_mode mode)
668 {
669 switch (mode)
670 {
671 case QImode:
672 case HImode:
673 case SImode:
674 case DImode:
675 case TImode: /* via optabs.c */
676 return true;
677
678 case SFmode:
679 case DFmode:
680 return true;
681
682 case TFmode:
683 return TARGET_HAS_XFLOATING_LIBS;
684
685 default:
686 return false;
687 }
688 }
689
690 /* Alpha implements a couple of integer vector mode operations when
691 TARGET_MAX is enabled. We do not check TARGET_MAX here, however,
692 which allows the vectorizer to operate on e.g. move instructions,
693 or when expand_vector_operations can do something useful. */
694
695 static bool
696 alpha_vector_mode_supported_p (enum machine_mode mode)
697 {
698 return mode == V8QImode || mode == V4HImode || mode == V2SImode;
699 }
700
701 /* Return 1 if this function can directly return via $26. */
702
703 int
704 direct_return (void)
705 {
706 return (! TARGET_ABI_OPEN_VMS && ! TARGET_ABI_UNICOSMK
707 && reload_completed
708 && alpha_sa_size () == 0
709 && get_frame_size () == 0
710 && current_function_outgoing_args_size == 0
711 && current_function_pretend_args_size == 0);
712 }
713
714 /* Return the ADDR_VEC associated with a tablejump insn. */
715
716 rtx
717 alpha_tablejump_addr_vec (rtx insn)
718 {
719 rtx tmp;
720
721 tmp = JUMP_LABEL (insn);
722 if (!tmp)
723 return NULL_RTX;
724 tmp = NEXT_INSN (tmp);
725 if (!tmp)
726 return NULL_RTX;
727 if (GET_CODE (tmp) == JUMP_INSN
728 && GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC)
729 return PATTERN (tmp);
730 return NULL_RTX;
731 }
732
733 /* Return the label of the predicted edge, or CONST0_RTX if we don't know. */
734
735 rtx
736 alpha_tablejump_best_label (rtx insn)
737 {
738 rtx jump_table = alpha_tablejump_addr_vec (insn);
739 rtx best_label = NULL_RTX;
740
741 /* ??? Once the CFG doesn't keep getting completely rebuilt, look
742 there for edge frequency counts from profile data. */
743
744 if (jump_table)
745 {
746 int n_labels = XVECLEN (jump_table, 1);
747 int best_count = -1;
748 int i, j;
749
750 for (i = 0; i < n_labels; i++)
751 {
752 int count = 1;
753
754 for (j = i + 1; j < n_labels; j++)
755 if (XEXP (XVECEXP (jump_table, 1, i), 0)
756 == XEXP (XVECEXP (jump_table, 1, j), 0))
757 count++;
758
759 if (count > best_count)
760 best_count = count, best_label = XVECEXP (jump_table, 1, i);
761 }
762 }
763
764 return best_label ? best_label : const0_rtx;
765 }
766
767 /* Return the TLS model to use for SYMBOL. */
768
769 static enum tls_model
770 tls_symbolic_operand_type (rtx symbol)
771 {
772 enum tls_model model;
773
774 if (GET_CODE (symbol) != SYMBOL_REF)
775 return 0;
776 model = SYMBOL_REF_TLS_MODEL (symbol);
777
778 /* Local-exec with a 64-bit size is the same code as initial-exec. */
779 if (model == TLS_MODEL_LOCAL_EXEC && alpha_tls_size == 64)
780 model = TLS_MODEL_INITIAL_EXEC;
781
782 return model;
783 }
784 \f
785 /* Return true if the function DECL will share the same GP as any
786 function in the current unit of translation. */
787
788 static bool
789 decl_has_samegp (tree decl)
790 {
791 /* Functions that are not local can be overridden, and thus may
792 not share the same gp. */
793 if (!(*targetm.binds_local_p) (decl))
794 return false;
795
796 /* If -msmall-data is in effect, assume that there is only one GP
797 for the module, and so any local symbol has this property. We
798 need explicit relocations to be able to enforce this for symbols
799 not defined in this unit of translation, however. */
800 if (TARGET_EXPLICIT_RELOCS && TARGET_SMALL_DATA)
801 return true;
802
803 /* Functions that are not external are defined in this UoT. */
804 /* ??? Irritatingly, static functions not yet emitted are still
805 marked "external". Apply this to non-static functions only. */
806 return !TREE_PUBLIC (decl) || !DECL_EXTERNAL (decl);
807 }
808
809 /* Return true if EXP should be placed in the small data section. */
810
811 static bool
812 alpha_in_small_data_p (tree exp)
813 {
814 /* We want to merge strings, so we never consider them small data. */
815 if (TREE_CODE (exp) == STRING_CST)
816 return false;
817
818 /* Functions are never in the small data area. Duh. */
819 if (TREE_CODE (exp) == FUNCTION_DECL)
820 return false;
821
822 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
823 {
824 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
825 if (strcmp (section, ".sdata") == 0
826 || strcmp (section, ".sbss") == 0)
827 return true;
828 }
829 else
830 {
831 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
832
833 /* If this is an incomplete type with size 0, then we can't put it
834 in sdata because it might be too big when completed. */
835 if (size > 0 && (unsigned HOST_WIDE_INT) size <= g_switch_value)
836 return true;
837 }
838
839 return false;
840 }
841
842 #if TARGET_ABI_OPEN_VMS
843 static bool
844 alpha_linkage_symbol_p (const char *symname)
845 {
846 int symlen = strlen (symname);
847
848 if (symlen > 4)
849 return strcmp (&symname [symlen - 4], "..lk") == 0;
850
851 return false;
852 }
853
854 #define LINKAGE_SYMBOL_REF_P(X) \
855 ((GET_CODE (X) == SYMBOL_REF \
856 && alpha_linkage_symbol_p (XSTR (X, 0))) \
857 || (GET_CODE (X) == CONST \
858 && GET_CODE (XEXP (X, 0)) == PLUS \
859 && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \
860 && alpha_linkage_symbol_p (XSTR (XEXP (XEXP (X, 0), 0), 0))))
861 #endif
862
863 /* legitimate_address_p recognizes an RTL expression that is a valid
864 memory address for an instruction. The MODE argument is the
865 machine mode for the MEM expression that wants to use this address.
866
867 For Alpha, we have either a constant address or the sum of a
868 register and a constant address, or just a register. For DImode,
869 any of those forms can be surrounded with an AND that clear the
870 low-order three bits; this is an "unaligned" access. */
871
872 bool
873 alpha_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
874 {
875 /* If this is an ldq_u type address, discard the outer AND. */
876 if (mode == DImode
877 && GET_CODE (x) == AND
878 && GET_CODE (XEXP (x, 1)) == CONST_INT
879 && INTVAL (XEXP (x, 1)) == -8)
880 x = XEXP (x, 0);
881
882 /* Discard non-paradoxical subregs. */
883 if (GET_CODE (x) == SUBREG
884 && (GET_MODE_SIZE (GET_MODE (x))
885 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
886 x = SUBREG_REG (x);
887
888 /* Unadorned general registers are valid. */
889 if (REG_P (x)
890 && (strict
891 ? STRICT_REG_OK_FOR_BASE_P (x)
892 : NONSTRICT_REG_OK_FOR_BASE_P (x)))
893 return true;
894
895 /* Constant addresses (i.e. +/- 32k) are valid. */
896 if (CONSTANT_ADDRESS_P (x))
897 return true;
898
899 #if TARGET_ABI_OPEN_VMS
900 if (LINKAGE_SYMBOL_REF_P (x))
901 return true;
902 #endif
903
904 /* Register plus a small constant offset is valid. */
905 if (GET_CODE (x) == PLUS)
906 {
907 rtx ofs = XEXP (x, 1);
908 x = XEXP (x, 0);
909
910 /* Discard non-paradoxical subregs. */
911 if (GET_CODE (x) == SUBREG
912 && (GET_MODE_SIZE (GET_MODE (x))
913 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
914 x = SUBREG_REG (x);
915
916 if (REG_P (x))
917 {
918 if (! strict
919 && NONSTRICT_REG_OK_FP_BASE_P (x)
920 && GET_CODE (ofs) == CONST_INT)
921 return true;
922 if ((strict
923 ? STRICT_REG_OK_FOR_BASE_P (x)
924 : NONSTRICT_REG_OK_FOR_BASE_P (x))
925 && CONSTANT_ADDRESS_P (ofs))
926 return true;
927 }
928 }
929
930 /* If we're managing explicit relocations, LO_SUM is valid, as
931 are small data symbols. */
932 else if (TARGET_EXPLICIT_RELOCS)
933 {
934 if (small_symbolic_operand (x, Pmode))
935 return true;
936
937 if (GET_CODE (x) == LO_SUM)
938 {
939 rtx ofs = XEXP (x, 1);
940 x = XEXP (x, 0);
941
942 /* Discard non-paradoxical subregs. */
943 if (GET_CODE (x) == SUBREG
944 && (GET_MODE_SIZE (GET_MODE (x))
945 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
946 x = SUBREG_REG (x);
947
948 /* Must have a valid base register. */
949 if (! (REG_P (x)
950 && (strict
951 ? STRICT_REG_OK_FOR_BASE_P (x)
952 : NONSTRICT_REG_OK_FOR_BASE_P (x))))
953 return false;
954
955 /* The symbol must be local. */
956 if (local_symbolic_operand (ofs, Pmode)
957 || dtp32_symbolic_operand (ofs, Pmode)
958 || tp32_symbolic_operand (ofs, Pmode))
959 return true;
960 }
961 }
962
963 return false;
964 }
965
966 /* Build the SYMBOL_REF for __tls_get_addr. */
967
968 static GTY(()) rtx tls_get_addr_libfunc;
969
970 static rtx
971 get_tls_get_addr (void)
972 {
973 if (!tls_get_addr_libfunc)
974 tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
975 return tls_get_addr_libfunc;
976 }
977
978 /* Try machine-dependent ways of modifying an illegitimate address
979 to be legitimate. If we find one, return the new, valid address. */
980
981 rtx
982 alpha_legitimize_address (rtx x, rtx scratch,
983 enum machine_mode mode ATTRIBUTE_UNUSED)
984 {
985 HOST_WIDE_INT addend;
986
987 /* If the address is (plus reg const_int) and the CONST_INT is not a
988 valid offset, compute the high part of the constant and add it to
989 the register. Then our address is (plus temp low-part-const). */
990 if (GET_CODE (x) == PLUS
991 && GET_CODE (XEXP (x, 0)) == REG
992 && GET_CODE (XEXP (x, 1)) == CONST_INT
993 && ! CONSTANT_ADDRESS_P (XEXP (x, 1)))
994 {
995 addend = INTVAL (XEXP (x, 1));
996 x = XEXP (x, 0);
997 goto split_addend;
998 }
999
1000 /* If the address is (const (plus FOO const_int)), find the low-order
1001 part of the CONST_INT. Then load FOO plus any high-order part of the
1002 CONST_INT into a register. Our address is (plus reg low-part-const).
1003 This is done to reduce the number of GOT entries. */
1004 if (!no_new_pseudos
1005 && GET_CODE (x) == CONST
1006 && GET_CODE (XEXP (x, 0)) == PLUS
1007 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
1008 {
1009 addend = INTVAL (XEXP (XEXP (x, 0), 1));
1010 x = force_reg (Pmode, XEXP (XEXP (x, 0), 0));
1011 goto split_addend;
1012 }
1013
1014 /* If we have a (plus reg const), emit the load as in (2), then add
1015 the two registers, and finally generate (plus reg low-part-const) as
1016 our address. */
1017 if (!no_new_pseudos
1018 && GET_CODE (x) == PLUS
1019 && GET_CODE (XEXP (x, 0)) == REG
1020 && GET_CODE (XEXP (x, 1)) == CONST
1021 && GET_CODE (XEXP (XEXP (x, 1), 0)) == PLUS
1022 && GET_CODE (XEXP (XEXP (XEXP (x, 1), 0), 1)) == CONST_INT)
1023 {
1024 addend = INTVAL (XEXP (XEXP (XEXP (x, 1), 0), 1));
1025 x = expand_simple_binop (Pmode, PLUS, XEXP (x, 0),
1026 XEXP (XEXP (XEXP (x, 1), 0), 0),
1027 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1028 goto split_addend;
1029 }
1030
1031 /* If this is a local symbol, split the address into HIGH/LO_SUM parts. */
1032 if (TARGET_EXPLICIT_RELOCS && symbolic_operand (x, Pmode))
1033 {
1034 rtx r0, r16, eqv, tga, tp, insn, dest, seq;
1035
1036 switch (tls_symbolic_operand_type (x))
1037 {
1038 case TLS_MODEL_NONE:
1039 break;
1040
1041 case TLS_MODEL_GLOBAL_DYNAMIC:
1042 start_sequence ();
1043
1044 r0 = gen_rtx_REG (Pmode, 0);
1045 r16 = gen_rtx_REG (Pmode, 16);
1046 tga = get_tls_get_addr ();
1047 dest = gen_reg_rtx (Pmode);
1048 seq = GEN_INT (alpha_next_sequence_number++);
1049
1050 emit_insn (gen_movdi_er_tlsgd (r16, pic_offset_table_rtx, x, seq));
1051 insn = gen_call_value_osf_tlsgd (r0, tga, seq);
1052 insn = emit_call_insn (insn);
1053 CONST_OR_PURE_CALL_P (insn) = 1;
1054 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1055
1056 insn = get_insns ();
1057 end_sequence ();
1058
1059 emit_libcall_block (insn, dest, r0, x);
1060 return dest;
1061
1062 case TLS_MODEL_LOCAL_DYNAMIC:
1063 start_sequence ();
1064
1065 r0 = gen_rtx_REG (Pmode, 0);
1066 r16 = gen_rtx_REG (Pmode, 16);
1067 tga = get_tls_get_addr ();
1068 scratch = gen_reg_rtx (Pmode);
1069 seq = GEN_INT (alpha_next_sequence_number++);
1070
1071 emit_insn (gen_movdi_er_tlsldm (r16, pic_offset_table_rtx, seq));
1072 insn = gen_call_value_osf_tlsldm (r0, tga, seq);
1073 insn = emit_call_insn (insn);
1074 CONST_OR_PURE_CALL_P (insn) = 1;
1075 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1076
1077 insn = get_insns ();
1078 end_sequence ();
1079
1080 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
1081 UNSPEC_TLSLDM_CALL);
1082 emit_libcall_block (insn, scratch, r0, eqv);
1083
1084 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPREL);
1085 eqv = gen_rtx_CONST (Pmode, eqv);
1086
1087 if (alpha_tls_size == 64)
1088 {
1089 dest = gen_reg_rtx (Pmode);
1090 emit_insn (gen_rtx_SET (VOIDmode, dest, eqv));
1091 emit_insn (gen_adddi3 (dest, dest, scratch));
1092 return dest;
1093 }
1094 if (alpha_tls_size == 32)
1095 {
1096 insn = gen_rtx_HIGH (Pmode, eqv);
1097 insn = gen_rtx_PLUS (Pmode, scratch, insn);
1098 scratch = gen_reg_rtx (Pmode);
1099 emit_insn (gen_rtx_SET (VOIDmode, scratch, insn));
1100 }
1101 return gen_rtx_LO_SUM (Pmode, scratch, eqv);
1102
1103 case TLS_MODEL_INITIAL_EXEC:
1104 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1105 eqv = gen_rtx_CONST (Pmode, eqv);
1106 tp = gen_reg_rtx (Pmode);
1107 scratch = gen_reg_rtx (Pmode);
1108 dest = gen_reg_rtx (Pmode);
1109
1110 emit_insn (gen_load_tp (tp));
1111 emit_insn (gen_rtx_SET (VOIDmode, scratch, eqv));
1112 emit_insn (gen_adddi3 (dest, tp, scratch));
1113 return dest;
1114
1115 case TLS_MODEL_LOCAL_EXEC:
1116 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1117 eqv = gen_rtx_CONST (Pmode, eqv);
1118 tp = gen_reg_rtx (Pmode);
1119
1120 emit_insn (gen_load_tp (tp));
1121 if (alpha_tls_size == 32)
1122 {
1123 insn = gen_rtx_HIGH (Pmode, eqv);
1124 insn = gen_rtx_PLUS (Pmode, tp, insn);
1125 tp = gen_reg_rtx (Pmode);
1126 emit_insn (gen_rtx_SET (VOIDmode, tp, insn));
1127 }
1128 return gen_rtx_LO_SUM (Pmode, tp, eqv);
1129
1130 default:
1131 gcc_unreachable ();
1132 }
1133
1134 if (local_symbolic_operand (x, Pmode))
1135 {
1136 if (small_symbolic_operand (x, Pmode))
1137 return x;
1138 else
1139 {
1140 if (!no_new_pseudos)
1141 scratch = gen_reg_rtx (Pmode);
1142 emit_insn (gen_rtx_SET (VOIDmode, scratch,
1143 gen_rtx_HIGH (Pmode, x)));
1144 return gen_rtx_LO_SUM (Pmode, scratch, x);
1145 }
1146 }
1147 }
1148
1149 return NULL;
1150
1151 split_addend:
1152 {
1153 HOST_WIDE_INT low, high;
1154
1155 low = ((addend & 0xffff) ^ 0x8000) - 0x8000;
1156 addend -= low;
1157 high = ((addend & 0xffffffff) ^ 0x80000000) - 0x80000000;
1158 addend -= high;
1159
1160 if (addend)
1161 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (addend),
1162 (no_new_pseudos ? scratch : NULL_RTX),
1163 1, OPTAB_LIB_WIDEN);
1164 if (high)
1165 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (high),
1166 (no_new_pseudos ? scratch : NULL_RTX),
1167 1, OPTAB_LIB_WIDEN);
1168
1169 return plus_constant (x, low);
1170 }
1171 }
1172
1173 /* Primarily this is required for TLS symbols, but given that our move
1174 patterns *ought* to be able to handle any symbol at any time, we
1175 should never be spilling symbolic operands to the constant pool, ever. */
1176
1177 static bool
1178 alpha_cannot_force_const_mem (rtx x)
1179 {
1180 enum rtx_code code = GET_CODE (x);
1181 return code == SYMBOL_REF || code == LABEL_REF || code == CONST;
1182 }
1183
1184 /* We do not allow indirect calls to be optimized into sibling calls, nor
1185 can we allow a call to a function with a different GP to be optimized
1186 into a sibcall. */
1187
1188 static bool
1189 alpha_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
1190 {
1191 /* Can't do indirect tail calls, since we don't know if the target
1192 uses the same GP. */
1193 if (!decl)
1194 return false;
1195
1196 /* Otherwise, we can make a tail call if the target function shares
1197 the same GP. */
1198 return decl_has_samegp (decl);
1199 }
1200
1201 int
1202 some_small_symbolic_operand_int (rtx *px, void *data ATTRIBUTE_UNUSED)
1203 {
1204 rtx x = *px;
1205
1206 /* Don't re-split. */
1207 if (GET_CODE (x) == LO_SUM)
1208 return -1;
1209
1210 return small_symbolic_operand (x, Pmode) != 0;
1211 }
1212
1213 static int
1214 split_small_symbolic_operand_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
1215 {
1216 rtx x = *px;
1217
1218 /* Don't re-split. */
1219 if (GET_CODE (x) == LO_SUM)
1220 return -1;
1221
1222 if (small_symbolic_operand (x, Pmode))
1223 {
1224 x = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, x);
1225 *px = x;
1226 return -1;
1227 }
1228
1229 return 0;
1230 }
1231
1232 rtx
1233 split_small_symbolic_operand (rtx x)
1234 {
1235 x = copy_insn (x);
1236 for_each_rtx (&x, split_small_symbolic_operand_1, NULL);
1237 return x;
1238 }
1239
1240 /* Indicate that INSN cannot be duplicated. This is true for any insn
1241 that we've marked with gpdisp relocs, since those have to stay in
1242 1-1 correspondence with one another.
1243
1244 Technically we could copy them if we could set up a mapping from one
1245 sequence number to another, across the set of insns to be duplicated.
1246 This seems overly complicated and error-prone since interblock motion
1247 from sched-ebb could move one of the pair of insns to a different block.
1248
1249 Also cannot allow jsr insns to be duplicated. If they throw exceptions,
1250 then they'll be in a different block from their ldgp. Which could lead
1251 the bb reorder code to think that it would be ok to copy just the block
1252 containing the call and branch to the block containing the ldgp. */
1253
1254 static bool
1255 alpha_cannot_copy_insn_p (rtx insn)
1256 {
1257 if (!reload_completed || !TARGET_EXPLICIT_RELOCS)
1258 return false;
1259 if (recog_memoized (insn) >= 0)
1260 return get_attr_cannot_copy (insn);
1261 else
1262 return false;
1263 }
1264
1265
1266 /* Try a machine-dependent way of reloading an illegitimate address
1267 operand. If we find one, push the reload and return the new rtx. */
1268
1269 rtx
1270 alpha_legitimize_reload_address (rtx x,
1271 enum machine_mode mode ATTRIBUTE_UNUSED,
1272 int opnum, int type,
1273 int ind_levels ATTRIBUTE_UNUSED)
1274 {
1275 /* We must recognize output that we have already generated ourselves. */
1276 if (GET_CODE (x) == PLUS
1277 && GET_CODE (XEXP (x, 0)) == PLUS
1278 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
1279 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1280 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1281 {
1282 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1283 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1284 opnum, type);
1285 return x;
1286 }
1287
1288 /* We wish to handle large displacements off a base register by
1289 splitting the addend across an ldah and the mem insn. This
1290 cuts number of extra insns needed from 3 to 1. */
1291 if (GET_CODE (x) == PLUS
1292 && GET_CODE (XEXP (x, 0)) == REG
1293 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
1294 && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x, 0)))
1295 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1296 {
1297 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
1298 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
1299 HOST_WIDE_INT high
1300 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
1301
1302 /* Check for 32-bit overflow. */
1303 if (high + low != val)
1304 return NULL_RTX;
1305
1306 /* Reload the high part into a base reg; leave the low part
1307 in the mem directly. */
1308 x = gen_rtx_PLUS (GET_MODE (x),
1309 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
1310 GEN_INT (high)),
1311 GEN_INT (low));
1312
1313 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1314 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1315 opnum, type);
1316 return x;
1317 }
1318
1319 return NULL_RTX;
1320 }
1321 \f
1322 /* Compute a (partial) cost for rtx X. Return true if the complete
1323 cost has been computed, and false if subexpressions should be
1324 scanned. In either case, *TOTAL contains the cost result. */
1325
1326 static bool
1327 alpha_rtx_costs (rtx x, int code, int outer_code, int *total)
1328 {
1329 enum machine_mode mode = GET_MODE (x);
1330 bool float_mode_p = FLOAT_MODE_P (mode);
1331 const struct alpha_rtx_cost_data *cost_data;
1332
1333 if (optimize_size)
1334 cost_data = &alpha_rtx_cost_size;
1335 else
1336 cost_data = &alpha_rtx_cost_data[alpha_tune];
1337
1338 switch (code)
1339 {
1340 case CONST_INT:
1341 /* If this is an 8-bit constant, return zero since it can be used
1342 nearly anywhere with no cost. If it is a valid operand for an
1343 ADD or AND, likewise return 0 if we know it will be used in that
1344 context. Otherwise, return 2 since it might be used there later.
1345 All other constants take at least two insns. */
1346 if (INTVAL (x) >= 0 && INTVAL (x) < 256)
1347 {
1348 *total = 0;
1349 return true;
1350 }
1351 /* FALLTHRU */
1352
1353 case CONST_DOUBLE:
1354 if (x == CONST0_RTX (mode))
1355 *total = 0;
1356 else if ((outer_code == PLUS && add_operand (x, VOIDmode))
1357 || (outer_code == AND && and_operand (x, VOIDmode)))
1358 *total = 0;
1359 else if (add_operand (x, VOIDmode) || and_operand (x, VOIDmode))
1360 *total = 2;
1361 else
1362 *total = COSTS_N_INSNS (2);
1363 return true;
1364
1365 case CONST:
1366 case SYMBOL_REF:
1367 case LABEL_REF:
1368 if (TARGET_EXPLICIT_RELOCS && small_symbolic_operand (x, VOIDmode))
1369 *total = COSTS_N_INSNS (outer_code != MEM);
1370 else if (TARGET_EXPLICIT_RELOCS && local_symbolic_operand (x, VOIDmode))
1371 *total = COSTS_N_INSNS (1 + (outer_code != MEM));
1372 else if (tls_symbolic_operand_type (x))
1373 /* Estimate of cost for call_pal rduniq. */
1374 /* ??? How many insns do we emit here? More than one... */
1375 *total = COSTS_N_INSNS (15);
1376 else
1377 /* Otherwise we do a load from the GOT. */
1378 *total = COSTS_N_INSNS (optimize_size ? 1 : alpha_memory_latency);
1379 return true;
1380
1381 case HIGH:
1382 /* This is effectively an add_operand. */
1383 *total = 2;
1384 return true;
1385
1386 case PLUS:
1387 case MINUS:
1388 if (float_mode_p)
1389 *total = cost_data->fp_add;
1390 else if (GET_CODE (XEXP (x, 0)) == MULT
1391 && const48_operand (XEXP (XEXP (x, 0), 1), VOIDmode))
1392 {
1393 *total = (rtx_cost (XEXP (XEXP (x, 0), 0), outer_code)
1394 + rtx_cost (XEXP (x, 1), outer_code) + COSTS_N_INSNS (1));
1395 return true;
1396 }
1397 return false;
1398
1399 case MULT:
1400 if (float_mode_p)
1401 *total = cost_data->fp_mult;
1402 else if (mode == DImode)
1403 *total = cost_data->int_mult_di;
1404 else
1405 *total = cost_data->int_mult_si;
1406 return false;
1407
1408 case ASHIFT:
1409 if (GET_CODE (XEXP (x, 1)) == CONST_INT
1410 && INTVAL (XEXP (x, 1)) <= 3)
1411 {
1412 *total = COSTS_N_INSNS (1);
1413 return false;
1414 }
1415 /* FALLTHRU */
1416
1417 case ASHIFTRT:
1418 case LSHIFTRT:
1419 *total = cost_data->int_shift;
1420 return false;
1421
1422 case IF_THEN_ELSE:
1423 if (float_mode_p)
1424 *total = cost_data->fp_add;
1425 else
1426 *total = cost_data->int_cmov;
1427 return false;
1428
1429 case DIV:
1430 case UDIV:
1431 case MOD:
1432 case UMOD:
1433 if (!float_mode_p)
1434 *total = cost_data->int_div;
1435 else if (mode == SFmode)
1436 *total = cost_data->fp_div_sf;
1437 else
1438 *total = cost_data->fp_div_df;
1439 return false;
1440
1441 case MEM:
1442 *total = COSTS_N_INSNS (optimize_size ? 1 : alpha_memory_latency);
1443 return true;
1444
1445 case NEG:
1446 if (! float_mode_p)
1447 {
1448 *total = COSTS_N_INSNS (1);
1449 return false;
1450 }
1451 /* FALLTHRU */
1452
1453 case ABS:
1454 if (! float_mode_p)
1455 {
1456 *total = COSTS_N_INSNS (1) + cost_data->int_cmov;
1457 return false;
1458 }
1459 /* FALLTHRU */
1460
1461 case FLOAT:
1462 case UNSIGNED_FLOAT:
1463 case FIX:
1464 case UNSIGNED_FIX:
1465 case FLOAT_TRUNCATE:
1466 *total = cost_data->fp_add;
1467 return false;
1468
1469 case FLOAT_EXTEND:
1470 if (GET_CODE (XEXP (x, 0)) == MEM)
1471 *total = 0;
1472 else
1473 *total = cost_data->fp_add;
1474 return false;
1475
1476 default:
1477 return false;
1478 }
1479 }
1480 \f
1481 /* REF is an alignable memory location. Place an aligned SImode
1482 reference into *PALIGNED_MEM and the number of bits to shift into
1483 *PBITNUM. SCRATCH is a free register for use in reloading out
1484 of range stack slots. */
1485
1486 void
1487 get_aligned_mem (rtx ref, rtx *paligned_mem, rtx *pbitnum)
1488 {
1489 rtx base;
1490 HOST_WIDE_INT offset = 0;
1491
1492 gcc_assert (GET_CODE (ref) == MEM);
1493
1494 if (reload_in_progress
1495 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1496 {
1497 base = find_replacement (&XEXP (ref, 0));
1498
1499 gcc_assert (memory_address_p (GET_MODE (ref), base));
1500 }
1501 else
1502 base = XEXP (ref, 0);
1503
1504 if (GET_CODE (base) == PLUS)
1505 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1506
1507 *paligned_mem
1508 = widen_memory_access (ref, SImode, (offset & ~3) - offset);
1509
1510 if (WORDS_BIG_ENDIAN)
1511 *pbitnum = GEN_INT (32 - (GET_MODE_BITSIZE (GET_MODE (ref))
1512 + (offset & 3) * 8));
1513 else
1514 *pbitnum = GEN_INT ((offset & 3) * 8);
1515 }
1516
1517 /* Similar, but just get the address. Handle the two reload cases.
1518 Add EXTRA_OFFSET to the address we return. */
1519
1520 rtx
1521 get_unaligned_address (rtx ref, int extra_offset)
1522 {
1523 rtx base;
1524 HOST_WIDE_INT offset = 0;
1525
1526 gcc_assert (GET_CODE (ref) == MEM);
1527
1528 if (reload_in_progress
1529 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1530 {
1531 base = find_replacement (&XEXP (ref, 0));
1532
1533 gcc_assert (memory_address_p (GET_MODE (ref), base));
1534 }
1535 else
1536 base = XEXP (ref, 0);
1537
1538 if (GET_CODE (base) == PLUS)
1539 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1540
1541 return plus_constant (base, offset + extra_offset);
1542 }
1543
1544 /* On the Alpha, all (non-symbolic) constants except zero go into
1545 a floating-point register via memory. Note that we cannot
1546 return anything that is not a subset of CLASS, and that some
1547 symbolic constants cannot be dropped to memory. */
1548
1549 enum reg_class
1550 alpha_preferred_reload_class(rtx x, enum reg_class class)
1551 {
1552 /* Zero is present in any register class. */
1553 if (x == CONST0_RTX (GET_MODE (x)))
1554 return class;
1555
1556 /* These sorts of constants we can easily drop to memory. */
1557 if (GET_CODE (x) == CONST_INT
1558 || GET_CODE (x) == CONST_DOUBLE
1559 || GET_CODE (x) == CONST_VECTOR)
1560 {
1561 if (class == FLOAT_REGS)
1562 return NO_REGS;
1563 if (class == ALL_REGS)
1564 return GENERAL_REGS;
1565 return class;
1566 }
1567
1568 /* All other kinds of constants should not (and in the case of HIGH
1569 cannot) be dropped to memory -- instead we use a GENERAL_REGS
1570 secondary reload. */
1571 if (CONSTANT_P (x))
1572 return (class == ALL_REGS ? GENERAL_REGS : class);
1573
1574 return class;
1575 }
1576
1577 /* Loading and storing HImode or QImode values to and from memory
1578 usually requires a scratch register. The exceptions are loading
1579 QImode and HImode from an aligned address to a general register
1580 unless byte instructions are permitted.
1581
1582 We also cannot load an unaligned address or a paradoxical SUBREG
1583 into an FP register.
1584
1585 We also cannot do integral arithmetic into FP regs, as might result
1586 from register elimination into a DImode fp register. */
1587
1588 enum reg_class
1589 secondary_reload_class (enum reg_class class, enum machine_mode mode,
1590 rtx x, int in)
1591 {
1592 if ((mode == QImode || mode == HImode) && ! TARGET_BWX)
1593 {
1594 if (GET_CODE (x) == MEM
1595 || (GET_CODE (x) == REG && REGNO (x) >= FIRST_PSEUDO_REGISTER)
1596 || (GET_CODE (x) == SUBREG
1597 && (GET_CODE (SUBREG_REG (x)) == MEM
1598 || (GET_CODE (SUBREG_REG (x)) == REG
1599 && REGNO (SUBREG_REG (x)) >= FIRST_PSEUDO_REGISTER))))
1600 {
1601 if (!in || !aligned_memory_operand(x, mode))
1602 return GENERAL_REGS;
1603 }
1604 }
1605
1606 if (class == FLOAT_REGS)
1607 {
1608 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == AND)
1609 return GENERAL_REGS;
1610
1611 if (GET_CODE (x) == SUBREG
1612 && (GET_MODE_SIZE (GET_MODE (x))
1613 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
1614 return GENERAL_REGS;
1615
1616 if (in && INTEGRAL_MODE_P (mode)
1617 && ! (memory_operand (x, mode) || x == const0_rtx))
1618 return GENERAL_REGS;
1619 }
1620
1621 return NO_REGS;
1622 }
1623 \f
1624 /* Subfunction of the following function. Update the flags of any MEM
1625 found in part of X. */
1626
1627 static int
1628 alpha_set_memflags_1 (rtx *xp, void *data)
1629 {
1630 rtx x = *xp, orig = (rtx) data;
1631
1632 if (GET_CODE (x) != MEM)
1633 return 0;
1634
1635 MEM_VOLATILE_P (x) = MEM_VOLATILE_P (orig);
1636 MEM_IN_STRUCT_P (x) = MEM_IN_STRUCT_P (orig);
1637 MEM_SCALAR_P (x) = MEM_SCALAR_P (orig);
1638 MEM_NOTRAP_P (x) = MEM_NOTRAP_P (orig);
1639 MEM_READONLY_P (x) = MEM_READONLY_P (orig);
1640
1641 /* Sadly, we cannot use alias sets because the extra aliasing
1642 produced by the AND interferes. Given that two-byte quantities
1643 are the only thing we would be able to differentiate anyway,
1644 there does not seem to be any point in convoluting the early
1645 out of the alias check. */
1646
1647 return -1;
1648 }
1649
1650 /* Given INSN, which is an INSN list or the PATTERN of a single insn
1651 generated to perform a memory operation, look for any MEMs in either
1652 a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and
1653 volatile flags from REF into each of the MEMs found. If REF is not
1654 a MEM, don't do anything. */
1655
1656 void
1657 alpha_set_memflags (rtx insn, rtx ref)
1658 {
1659 rtx *base_ptr;
1660
1661 if (GET_CODE (ref) != MEM)
1662 return;
1663
1664 /* This is only called from alpha.md, after having had something
1665 generated from one of the insn patterns. So if everything is
1666 zero, the pattern is already up-to-date. */
1667 if (!MEM_VOLATILE_P (ref)
1668 && !MEM_IN_STRUCT_P (ref)
1669 && !MEM_SCALAR_P (ref)
1670 && !MEM_NOTRAP_P (ref)
1671 && !MEM_READONLY_P (ref))
1672 return;
1673
1674 if (INSN_P (insn))
1675 base_ptr = &PATTERN (insn);
1676 else
1677 base_ptr = &insn;
1678 for_each_rtx (base_ptr, alpha_set_memflags_1, (void *) ref);
1679 }
1680 \f
1681 static rtx alpha_emit_set_const (rtx, enum machine_mode, HOST_WIDE_INT,
1682 int, bool);
1683
1684 /* Internal routine for alpha_emit_set_const to check for N or below insns.
1685 If NO_OUTPUT is true, then we only check to see if N insns are possible,
1686 and return pc_rtx if successful. */
1687
1688 static rtx
1689 alpha_emit_set_const_1 (rtx target, enum machine_mode mode,
1690 HOST_WIDE_INT c, int n, bool no_output)
1691 {
1692 HOST_WIDE_INT new;
1693 int i, bits;
1694 /* Use a pseudo if highly optimizing and still generating RTL. */
1695 rtx subtarget
1696 = (flag_expensive_optimizations && !no_new_pseudos ? 0 : target);
1697 rtx temp, insn;
1698
1699 /* If this is a sign-extended 32-bit constant, we can do this in at most
1700 three insns, so do it if we have enough insns left. We always have
1701 a sign-extended 32-bit constant when compiling on a narrow machine. */
1702
1703 if (HOST_BITS_PER_WIDE_INT != 64
1704 || c >> 31 == -1 || c >> 31 == 0)
1705 {
1706 HOST_WIDE_INT low = ((c & 0xffff) ^ 0x8000) - 0x8000;
1707 HOST_WIDE_INT tmp1 = c - low;
1708 HOST_WIDE_INT high = (((tmp1 >> 16) & 0xffff) ^ 0x8000) - 0x8000;
1709 HOST_WIDE_INT extra = 0;
1710
1711 /* If HIGH will be interpreted as negative but the constant is
1712 positive, we must adjust it to do two ldha insns. */
1713
1714 if ((high & 0x8000) != 0 && c >= 0)
1715 {
1716 extra = 0x4000;
1717 tmp1 -= 0x40000000;
1718 high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
1719 }
1720
1721 if (c == low || (low == 0 && extra == 0))
1722 {
1723 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
1724 but that meant that we can't handle INT_MIN on 32-bit machines
1725 (like NT/Alpha), because we recurse indefinitely through
1726 emit_move_insn to gen_movdi. So instead, since we know exactly
1727 what we want, create it explicitly. */
1728
1729 if (no_output)
1730 return pc_rtx;
1731 if (target == NULL)
1732 target = gen_reg_rtx (mode);
1733 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (c)));
1734 return target;
1735 }
1736 else if (n >= 2 + (extra != 0))
1737 {
1738 if (no_output)
1739 return pc_rtx;
1740 if (no_new_pseudos)
1741 {
1742 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (high << 16)));
1743 temp = target;
1744 }
1745 else
1746 temp = copy_to_suggested_reg (GEN_INT (high << 16),
1747 subtarget, mode);
1748
1749 /* As of 2002-02-23, addsi3 is only available when not optimizing.
1750 This means that if we go through expand_binop, we'll try to
1751 generate extensions, etc, which will require new pseudos, which
1752 will fail during some split phases. The SImode add patterns
1753 still exist, but are not named. So build the insns by hand. */
1754
1755 if (extra != 0)
1756 {
1757 if (! subtarget)
1758 subtarget = gen_reg_rtx (mode);
1759 insn = gen_rtx_PLUS (mode, temp, GEN_INT (extra << 16));
1760 insn = gen_rtx_SET (VOIDmode, subtarget, insn);
1761 emit_insn (insn);
1762 temp = subtarget;
1763 }
1764
1765 if (target == NULL)
1766 target = gen_reg_rtx (mode);
1767 insn = gen_rtx_PLUS (mode, temp, GEN_INT (low));
1768 insn = gen_rtx_SET (VOIDmode, target, insn);
1769 emit_insn (insn);
1770 return target;
1771 }
1772 }
1773
1774 /* If we couldn't do it that way, try some other methods. But if we have
1775 no instructions left, don't bother. Likewise, if this is SImode and
1776 we can't make pseudos, we can't do anything since the expand_binop
1777 and expand_unop calls will widen and try to make pseudos. */
1778
1779 if (n == 1 || (mode == SImode && no_new_pseudos))
1780 return 0;
1781
1782 /* Next, see if we can load a related constant and then shift and possibly
1783 negate it to get the constant we want. Try this once each increasing
1784 numbers of insns. */
1785
1786 for (i = 1; i < n; i++)
1787 {
1788 /* First, see if minus some low bits, we've an easy load of
1789 high bits. */
1790
1791 new = ((c & 0xffff) ^ 0x8000) - 0x8000;
1792 if (new != 0)
1793 {
1794 temp = alpha_emit_set_const (subtarget, mode, c - new, i, no_output);
1795 if (temp)
1796 {
1797 if (no_output)
1798 return temp;
1799 return expand_binop (mode, add_optab, temp, GEN_INT (new),
1800 target, 0, OPTAB_WIDEN);
1801 }
1802 }
1803
1804 /* Next try complementing. */
1805 temp = alpha_emit_set_const (subtarget, mode, ~c, i, no_output);
1806 if (temp)
1807 {
1808 if (no_output)
1809 return temp;
1810 return expand_unop (mode, one_cmpl_optab, temp, target, 0);
1811 }
1812
1813 /* Next try to form a constant and do a left shift. We can do this
1814 if some low-order bits are zero; the exact_log2 call below tells
1815 us that information. The bits we are shifting out could be any
1816 value, but here we'll just try the 0- and sign-extended forms of
1817 the constant. To try to increase the chance of having the same
1818 constant in more than one insn, start at the highest number of
1819 bits to shift, but try all possibilities in case a ZAPNOT will
1820 be useful. */
1821
1822 bits = exact_log2 (c & -c);
1823 if (bits > 0)
1824 for (; bits > 0; bits--)
1825 {
1826 new = c >> bits;
1827 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1828 if (!temp && c < 0)
1829 {
1830 new = (unsigned HOST_WIDE_INT)c >> bits;
1831 temp = alpha_emit_set_const (subtarget, mode, new,
1832 i, no_output);
1833 }
1834 if (temp)
1835 {
1836 if (no_output)
1837 return temp;
1838 return expand_binop (mode, ashl_optab, temp, GEN_INT (bits),
1839 target, 0, OPTAB_WIDEN);
1840 }
1841 }
1842
1843 /* Now try high-order zero bits. Here we try the shifted-in bits as
1844 all zero and all ones. Be careful to avoid shifting outside the
1845 mode and to avoid shifting outside the host wide int size. */
1846 /* On narrow hosts, don't shift a 1 into the high bit, since we'll
1847 confuse the recursive call and set all of the high 32 bits. */
1848
1849 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1850 - floor_log2 (c) - 1 - (HOST_BITS_PER_WIDE_INT < 64));
1851 if (bits > 0)
1852 for (; bits > 0; bits--)
1853 {
1854 new = c << bits;
1855 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1856 if (!temp)
1857 {
1858 new = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1859 temp = alpha_emit_set_const (subtarget, mode, new,
1860 i, no_output);
1861 }
1862 if (temp)
1863 {
1864 if (no_output)
1865 return temp;
1866 return expand_binop (mode, lshr_optab, temp, GEN_INT (bits),
1867 target, 1, OPTAB_WIDEN);
1868 }
1869 }
1870
1871 /* Now try high-order 1 bits. We get that with a sign-extension.
1872 But one bit isn't enough here. Be careful to avoid shifting outside
1873 the mode and to avoid shifting outside the host wide int size. */
1874
1875 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1876 - floor_log2 (~ c) - 2);
1877 if (bits > 0)
1878 for (; bits > 0; bits--)
1879 {
1880 new = c << bits;
1881 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1882 if (!temp)
1883 {
1884 new = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1885 temp = alpha_emit_set_const (subtarget, mode, new,
1886 i, no_output);
1887 }
1888 if (temp)
1889 {
1890 if (no_output)
1891 return temp;
1892 return expand_binop (mode, ashr_optab, temp, GEN_INT (bits),
1893 target, 0, OPTAB_WIDEN);
1894 }
1895 }
1896 }
1897
1898 #if HOST_BITS_PER_WIDE_INT == 64
1899 /* Finally, see if can load a value into the target that is the same as the
1900 constant except that all bytes that are 0 are changed to be 0xff. If we
1901 can, then we can do a ZAPNOT to obtain the desired constant. */
1902
1903 new = c;
1904 for (i = 0; i < 64; i += 8)
1905 if ((new & ((HOST_WIDE_INT) 0xff << i)) == 0)
1906 new |= (HOST_WIDE_INT) 0xff << i;
1907
1908 /* We are only called for SImode and DImode. If this is SImode, ensure that
1909 we are sign extended to a full word. */
1910
1911 if (mode == SImode)
1912 new = ((new & 0xffffffff) ^ 0x80000000) - 0x80000000;
1913
1914 if (new != c)
1915 {
1916 temp = alpha_emit_set_const (subtarget, mode, new, n - 1, no_output);
1917 if (temp)
1918 {
1919 if (no_output)
1920 return temp;
1921 return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new),
1922 target, 0, OPTAB_WIDEN);
1923 }
1924 }
1925 #endif
1926
1927 return 0;
1928 }
1929
1930 /* Try to output insns to set TARGET equal to the constant C if it can be
1931 done in less than N insns. Do all computations in MODE. Returns the place
1932 where the output has been placed if it can be done and the insns have been
1933 emitted. If it would take more than N insns, zero is returned and no
1934 insns and emitted. */
1935
1936 static rtx
1937 alpha_emit_set_const (rtx target, enum machine_mode mode,
1938 HOST_WIDE_INT c, int n, bool no_output)
1939 {
1940 enum machine_mode orig_mode = mode;
1941 rtx orig_target = target;
1942 rtx result = 0;
1943 int i;
1944
1945 /* If we can't make any pseudos, TARGET is an SImode hard register, we
1946 can't load this constant in one insn, do this in DImode. */
1947 if (no_new_pseudos && mode == SImode
1948 && GET_CODE (target) == REG && REGNO (target) < FIRST_PSEUDO_REGISTER)
1949 {
1950 result = alpha_emit_set_const_1 (target, mode, c, 1, no_output);
1951 if (result)
1952 return result;
1953
1954 target = no_output ? NULL : gen_lowpart (DImode, target);
1955 mode = DImode;
1956 }
1957 else if (mode == V8QImode || mode == V4HImode || mode == V2SImode)
1958 {
1959 target = no_output ? NULL : gen_lowpart (DImode, target);
1960 mode = DImode;
1961 }
1962
1963 /* Try 1 insn, then 2, then up to N. */
1964 for (i = 1; i <= n; i++)
1965 {
1966 result = alpha_emit_set_const_1 (target, mode, c, i, no_output);
1967 if (result)
1968 {
1969 rtx insn, set;
1970
1971 if (no_output)
1972 return result;
1973
1974 insn = get_last_insn ();
1975 set = single_set (insn);
1976 if (! CONSTANT_P (SET_SRC (set)))
1977 set_unique_reg_note (get_last_insn (), REG_EQUAL, GEN_INT (c));
1978 break;
1979 }
1980 }
1981
1982 /* Allow for the case where we changed the mode of TARGET. */
1983 if (result)
1984 {
1985 if (result == target)
1986 result = orig_target;
1987 else if (mode != orig_mode)
1988 result = gen_lowpart (orig_mode, result);
1989 }
1990
1991 return result;
1992 }
1993
1994 /* Having failed to find a 3 insn sequence in alpha_emit_set_const,
1995 fall back to a straight forward decomposition. We do this to avoid
1996 exponential run times encountered when looking for longer sequences
1997 with alpha_emit_set_const. */
1998
1999 static rtx
2000 alpha_emit_set_long_const (rtx target, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
2001 {
2002 HOST_WIDE_INT d1, d2, d3, d4;
2003
2004 /* Decompose the entire word */
2005 #if HOST_BITS_PER_WIDE_INT >= 64
2006 gcc_assert (c2 == -(c1 < 0));
2007 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2008 c1 -= d1;
2009 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2010 c1 = (c1 - d2) >> 32;
2011 d3 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2012 c1 -= d3;
2013 d4 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2014 gcc_assert (c1 == d4);
2015 #else
2016 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2017 c1 -= d1;
2018 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2019 gcc_assert (c1 == d2);
2020 c2 += (d2 < 0);
2021 d3 = ((c2 & 0xffff) ^ 0x8000) - 0x8000;
2022 c2 -= d3;
2023 d4 = ((c2 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2024 gcc_assert (c2 == d4);
2025 #endif
2026
2027 /* Construct the high word */
2028 if (d4)
2029 {
2030 emit_move_insn (target, GEN_INT (d4));
2031 if (d3)
2032 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d3)));
2033 }
2034 else
2035 emit_move_insn (target, GEN_INT (d3));
2036
2037 /* Shift it into place */
2038 emit_move_insn (target, gen_rtx_ASHIFT (DImode, target, GEN_INT (32)));
2039
2040 /* Add in the low bits. */
2041 if (d2)
2042 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d2)));
2043 if (d1)
2044 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d1)));
2045
2046 return target;
2047 }
2048
2049 /* Given an integral CONST_INT, CONST_DOUBLE, or CONST_VECTOR, return
2050 the low 64 bits. */
2051
2052 static void
2053 alpha_extract_integer (rtx x, HOST_WIDE_INT *p0, HOST_WIDE_INT *p1)
2054 {
2055 HOST_WIDE_INT i0, i1;
2056
2057 if (GET_CODE (x) == CONST_VECTOR)
2058 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
2059
2060
2061 if (GET_CODE (x) == CONST_INT)
2062 {
2063 i0 = INTVAL (x);
2064 i1 = -(i0 < 0);
2065 }
2066 else if (HOST_BITS_PER_WIDE_INT >= 64)
2067 {
2068 i0 = CONST_DOUBLE_LOW (x);
2069 i1 = -(i0 < 0);
2070 }
2071 else
2072 {
2073 i0 = CONST_DOUBLE_LOW (x);
2074 i1 = CONST_DOUBLE_HIGH (x);
2075 }
2076
2077 *p0 = i0;
2078 *p1 = i1;
2079 }
2080
2081 /* Implement LEGITIMATE_CONSTANT_P. This is all constants for which we
2082 are willing to load the value into a register via a move pattern.
2083 Normally this is all symbolic constants, integral constants that
2084 take three or fewer instructions, and floating-point zero. */
2085
2086 bool
2087 alpha_legitimate_constant_p (rtx x)
2088 {
2089 enum machine_mode mode = GET_MODE (x);
2090 HOST_WIDE_INT i0, i1;
2091
2092 switch (GET_CODE (x))
2093 {
2094 case CONST:
2095 case LABEL_REF:
2096 case SYMBOL_REF:
2097 case HIGH:
2098 return true;
2099
2100 case CONST_DOUBLE:
2101 if (x == CONST0_RTX (mode))
2102 return true;
2103 if (FLOAT_MODE_P (mode))
2104 return false;
2105 goto do_integer;
2106
2107 case CONST_VECTOR:
2108 if (x == CONST0_RTX (mode))
2109 return true;
2110 if (GET_MODE_CLASS (mode) != MODE_VECTOR_INT)
2111 return false;
2112 if (GET_MODE_SIZE (mode) != 8)
2113 return false;
2114 goto do_integer;
2115
2116 case CONST_INT:
2117 do_integer:
2118 if (TARGET_BUILD_CONSTANTS)
2119 return true;
2120 alpha_extract_integer (x, &i0, &i1);
2121 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == (-i0 < 0))
2122 return alpha_emit_set_const_1 (x, mode, i0, 3, true) != NULL;
2123 return false;
2124
2125 default:
2126 return false;
2127 }
2128 }
2129
2130 /* Operand 1 is known to be a constant, and should require more than one
2131 instruction to load. Emit that multi-part load. */
2132
2133 bool
2134 alpha_split_const_mov (enum machine_mode mode, rtx *operands)
2135 {
2136 HOST_WIDE_INT i0, i1;
2137 rtx temp = NULL_RTX;
2138
2139 alpha_extract_integer (operands[1], &i0, &i1);
2140
2141 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == -(i0 < 0))
2142 temp = alpha_emit_set_const (operands[0], mode, i0, 3, false);
2143
2144 if (!temp && TARGET_BUILD_CONSTANTS)
2145 temp = alpha_emit_set_long_const (operands[0], i0, i1);
2146
2147 if (temp)
2148 {
2149 if (!rtx_equal_p (operands[0], temp))
2150 emit_move_insn (operands[0], temp);
2151 return true;
2152 }
2153
2154 return false;
2155 }
2156
2157 /* Expand a move instruction; return true if all work is done.
2158 We don't handle non-bwx subword loads here. */
2159
2160 bool
2161 alpha_expand_mov (enum machine_mode mode, rtx *operands)
2162 {
2163 /* If the output is not a register, the input must be. */
2164 if (GET_CODE (operands[0]) == MEM
2165 && ! reg_or_0_operand (operands[1], mode))
2166 operands[1] = force_reg (mode, operands[1]);
2167
2168 /* Allow legitimize_address to perform some simplifications. */
2169 if (mode == Pmode && symbolic_operand (operands[1], mode))
2170 {
2171 rtx tmp;
2172
2173 tmp = alpha_legitimize_address (operands[1], operands[0], mode);
2174 if (tmp)
2175 {
2176 if (tmp == operands[0])
2177 return true;
2178 operands[1] = tmp;
2179 return false;
2180 }
2181 }
2182
2183 /* Early out for non-constants and valid constants. */
2184 if (! CONSTANT_P (operands[1]) || input_operand (operands[1], mode))
2185 return false;
2186
2187 /* Split large integers. */
2188 if (GET_CODE (operands[1]) == CONST_INT
2189 || GET_CODE (operands[1]) == CONST_DOUBLE
2190 || GET_CODE (operands[1]) == CONST_VECTOR)
2191 {
2192 if (alpha_split_const_mov (mode, operands))
2193 return true;
2194 }
2195
2196 /* Otherwise we've nothing left but to drop the thing to memory. */
2197 operands[1] = force_const_mem (mode, operands[1]);
2198 if (reload_in_progress)
2199 {
2200 emit_move_insn (operands[0], XEXP (operands[1], 0));
2201 operands[1] = copy_rtx (operands[1]);
2202 XEXP (operands[1], 0) = operands[0];
2203 }
2204 else
2205 operands[1] = validize_mem (operands[1]);
2206 return false;
2207 }
2208
2209 /* Expand a non-bwx QImode or HImode move instruction;
2210 return true if all work is done. */
2211
2212 bool
2213 alpha_expand_mov_nobwx (enum machine_mode mode, rtx *operands)
2214 {
2215 /* If the output is not a register, the input must be. */
2216 if (GET_CODE (operands[0]) == MEM)
2217 operands[1] = force_reg (mode, operands[1]);
2218
2219 /* Handle four memory cases, unaligned and aligned for either the input
2220 or the output. The only case where we can be called during reload is
2221 for aligned loads; all other cases require temporaries. */
2222
2223 if (GET_CODE (operands[1]) == MEM
2224 || (GET_CODE (operands[1]) == SUBREG
2225 && GET_CODE (SUBREG_REG (operands[1])) == MEM)
2226 || (reload_in_progress && GET_CODE (operands[1]) == REG
2227 && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER)
2228 || (reload_in_progress && GET_CODE (operands[1]) == SUBREG
2229 && GET_CODE (SUBREG_REG (operands[1])) == REG
2230 && REGNO (SUBREG_REG (operands[1])) >= FIRST_PSEUDO_REGISTER))
2231 {
2232 if (aligned_memory_operand (operands[1], mode))
2233 {
2234 if (reload_in_progress)
2235 {
2236 emit_insn ((mode == QImode
2237 ? gen_reload_inqi_help
2238 : gen_reload_inhi_help)
2239 (operands[0], operands[1],
2240 gen_rtx_REG (SImode, REGNO (operands[0]))));
2241 }
2242 else
2243 {
2244 rtx aligned_mem, bitnum;
2245 rtx scratch = gen_reg_rtx (SImode);
2246 rtx subtarget;
2247 bool copyout;
2248
2249 get_aligned_mem (operands[1], &aligned_mem, &bitnum);
2250
2251 subtarget = operands[0];
2252 if (GET_CODE (subtarget) == REG)
2253 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2254 else
2255 subtarget = gen_reg_rtx (DImode), copyout = true;
2256
2257 emit_insn ((mode == QImode
2258 ? gen_aligned_loadqi
2259 : gen_aligned_loadhi)
2260 (subtarget, aligned_mem, bitnum, scratch));
2261
2262 if (copyout)
2263 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2264 }
2265 }
2266 else
2267 {
2268 /* Don't pass these as parameters since that makes the generated
2269 code depend on parameter evaluation order which will cause
2270 bootstrap failures. */
2271
2272 rtx temp1, temp2, seq, subtarget;
2273 bool copyout;
2274
2275 temp1 = gen_reg_rtx (DImode);
2276 temp2 = gen_reg_rtx (DImode);
2277
2278 subtarget = operands[0];
2279 if (GET_CODE (subtarget) == REG)
2280 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2281 else
2282 subtarget = gen_reg_rtx (DImode), copyout = true;
2283
2284 seq = ((mode == QImode
2285 ? gen_unaligned_loadqi
2286 : gen_unaligned_loadhi)
2287 (subtarget, get_unaligned_address (operands[1], 0),
2288 temp1, temp2));
2289 alpha_set_memflags (seq, operands[1]);
2290 emit_insn (seq);
2291
2292 if (copyout)
2293 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2294 }
2295 return true;
2296 }
2297
2298 if (GET_CODE (operands[0]) == MEM
2299 || (GET_CODE (operands[0]) == SUBREG
2300 && GET_CODE (SUBREG_REG (operands[0])) == MEM)
2301 || (reload_in_progress && GET_CODE (operands[0]) == REG
2302 && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER)
2303 || (reload_in_progress && GET_CODE (operands[0]) == SUBREG
2304 && GET_CODE (SUBREG_REG (operands[0])) == REG
2305 && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER))
2306 {
2307 if (aligned_memory_operand (operands[0], mode))
2308 {
2309 rtx aligned_mem, bitnum;
2310 rtx temp1 = gen_reg_rtx (SImode);
2311 rtx temp2 = gen_reg_rtx (SImode);
2312
2313 get_aligned_mem (operands[0], &aligned_mem, &bitnum);
2314
2315 emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
2316 temp1, temp2));
2317 }
2318 else
2319 {
2320 rtx temp1 = gen_reg_rtx (DImode);
2321 rtx temp2 = gen_reg_rtx (DImode);
2322 rtx temp3 = gen_reg_rtx (DImode);
2323 rtx seq = ((mode == QImode
2324 ? gen_unaligned_storeqi
2325 : gen_unaligned_storehi)
2326 (get_unaligned_address (operands[0], 0),
2327 operands[1], temp1, temp2, temp3));
2328
2329 alpha_set_memflags (seq, operands[0]);
2330 emit_insn (seq);
2331 }
2332 return true;
2333 }
2334
2335 return false;
2336 }
2337
2338 /* Implement the movmisalign patterns. One of the operands is a memory
2339 that is not naturally aligned. Emit instructions to load it. */
2340
2341 void
2342 alpha_expand_movmisalign (enum machine_mode mode, rtx *operands)
2343 {
2344 /* Honor misaligned loads, for those we promised to do so. */
2345 if (MEM_P (operands[1]))
2346 {
2347 rtx tmp;
2348
2349 if (register_operand (operands[0], mode))
2350 tmp = operands[0];
2351 else
2352 tmp = gen_reg_rtx (mode);
2353
2354 alpha_expand_unaligned_load (tmp, operands[1], 8, 0, 0);
2355 if (tmp != operands[0])
2356 emit_move_insn (operands[0], tmp);
2357 }
2358 else if (MEM_P (operands[0]))
2359 {
2360 if (!reg_or_0_operand (operands[1], mode))
2361 operands[1] = force_reg (mode, operands[1]);
2362 alpha_expand_unaligned_store (operands[0], operands[1], 8, 0);
2363 }
2364 else
2365 gcc_unreachable ();
2366 }
2367
2368 /* Generate an unsigned DImode to FP conversion. This is the same code
2369 optabs would emit if we didn't have TFmode patterns.
2370
2371 For SFmode, this is the only construction I've found that can pass
2372 gcc.c-torture/execute/ieee/rbug.c. No scenario that uses DFmode
2373 intermediates will work, because you'll get intermediate rounding
2374 that ruins the end result. Some of this could be fixed by turning
2375 on round-to-positive-infinity, but that requires diddling the fpsr,
2376 which kills performance. I tried turning this around and converting
2377 to a negative number, so that I could turn on /m, but either I did
2378 it wrong or there's something else cause I wound up with the exact
2379 same single-bit error. There is a branch-less form of this same code:
2380
2381 srl $16,1,$1
2382 and $16,1,$2
2383 cmplt $16,0,$3
2384 or $1,$2,$2
2385 cmovge $16,$16,$2
2386 itoft $3,$f10
2387 itoft $2,$f11
2388 cvtqs $f11,$f11
2389 adds $f11,$f11,$f0
2390 fcmoveq $f10,$f11,$f0
2391
2392 I'm not using it because it's the same number of instructions as
2393 this branch-full form, and it has more serialized long latency
2394 instructions on the critical path.
2395
2396 For DFmode, we can avoid rounding errors by breaking up the word
2397 into two pieces, converting them separately, and adding them back:
2398
2399 LC0: .long 0,0x5f800000
2400
2401 itoft $16,$f11
2402 lda $2,LC0
2403 cmplt $16,0,$1
2404 cpyse $f11,$f31,$f10
2405 cpyse $f31,$f11,$f11
2406 s4addq $1,$2,$1
2407 lds $f12,0($1)
2408 cvtqt $f10,$f10
2409 cvtqt $f11,$f11
2410 addt $f12,$f10,$f0
2411 addt $f0,$f11,$f0
2412
2413 This doesn't seem to be a clear-cut win over the optabs form.
2414 It probably all depends on the distribution of numbers being
2415 converted -- in the optabs form, all but high-bit-set has a
2416 much lower minimum execution time. */
2417
2418 void
2419 alpha_emit_floatuns (rtx operands[2])
2420 {
2421 rtx neglab, donelab, i0, i1, f0, in, out;
2422 enum machine_mode mode;
2423
2424 out = operands[0];
2425 in = force_reg (DImode, operands[1]);
2426 mode = GET_MODE (out);
2427 neglab = gen_label_rtx ();
2428 donelab = gen_label_rtx ();
2429 i0 = gen_reg_rtx (DImode);
2430 i1 = gen_reg_rtx (DImode);
2431 f0 = gen_reg_rtx (mode);
2432
2433 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
2434
2435 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
2436 emit_jump_insn (gen_jump (donelab));
2437 emit_barrier ();
2438
2439 emit_label (neglab);
2440
2441 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
2442 emit_insn (gen_anddi3 (i1, in, const1_rtx));
2443 emit_insn (gen_iordi3 (i0, i0, i1));
2444 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
2445 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
2446
2447 emit_label (donelab);
2448 }
2449
2450 /* Generate the comparison for a conditional branch. */
2451
2452 rtx
2453 alpha_emit_conditional_branch (enum rtx_code code)
2454 {
2455 enum rtx_code cmp_code, branch_code;
2456 enum machine_mode cmp_mode, branch_mode = VOIDmode;
2457 rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
2458 rtx tem;
2459
2460 if (alpha_compare.fp_p && GET_MODE (op0) == TFmode)
2461 {
2462 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2463 op1 = const0_rtx;
2464 alpha_compare.fp_p = 0;
2465 }
2466
2467 /* The general case: fold the comparison code to the types of compares
2468 that we have, choosing the branch as necessary. */
2469 switch (code)
2470 {
2471 case EQ: case LE: case LT: case LEU: case LTU:
2472 case UNORDERED:
2473 /* We have these compares: */
2474 cmp_code = code, branch_code = NE;
2475 break;
2476
2477 case NE:
2478 case ORDERED:
2479 /* These must be reversed. */
2480 cmp_code = reverse_condition (code), branch_code = EQ;
2481 break;
2482
2483 case GE: case GT: case GEU: case GTU:
2484 /* For FP, we swap them, for INT, we reverse them. */
2485 if (alpha_compare.fp_p)
2486 {
2487 cmp_code = swap_condition (code);
2488 branch_code = NE;
2489 tem = op0, op0 = op1, op1 = tem;
2490 }
2491 else
2492 {
2493 cmp_code = reverse_condition (code);
2494 branch_code = EQ;
2495 }
2496 break;
2497
2498 default:
2499 gcc_unreachable ();
2500 }
2501
2502 if (alpha_compare.fp_p)
2503 {
2504 cmp_mode = DFmode;
2505 if (flag_unsafe_math_optimizations)
2506 {
2507 /* When we are not as concerned about non-finite values, and we
2508 are comparing against zero, we can branch directly. */
2509 if (op1 == CONST0_RTX (DFmode))
2510 cmp_code = UNKNOWN, branch_code = code;
2511 else if (op0 == CONST0_RTX (DFmode))
2512 {
2513 /* Undo the swap we probably did just above. */
2514 tem = op0, op0 = op1, op1 = tem;
2515 branch_code = swap_condition (cmp_code);
2516 cmp_code = UNKNOWN;
2517 }
2518 }
2519 else
2520 {
2521 /* ??? We mark the branch mode to be CCmode to prevent the
2522 compare and branch from being combined, since the compare
2523 insn follows IEEE rules that the branch does not. */
2524 branch_mode = CCmode;
2525 }
2526 }
2527 else
2528 {
2529 cmp_mode = DImode;
2530
2531 /* The following optimizations are only for signed compares. */
2532 if (code != LEU && code != LTU && code != GEU && code != GTU)
2533 {
2534 /* Whee. Compare and branch against 0 directly. */
2535 if (op1 == const0_rtx)
2536 cmp_code = UNKNOWN, branch_code = code;
2537
2538 /* If the constants doesn't fit into an immediate, but can
2539 be generated by lda/ldah, we adjust the argument and
2540 compare against zero, so we can use beq/bne directly. */
2541 /* ??? Don't do this when comparing against symbols, otherwise
2542 we'll reduce (&x == 0x1234) to (&x-0x1234 == 0), which will
2543 be declared false out of hand (at least for non-weak). */
2544 else if (GET_CODE (op1) == CONST_INT
2545 && (code == EQ || code == NE)
2546 && !(symbolic_operand (op0, VOIDmode)
2547 || (GET_CODE (op0) == REG && REG_POINTER (op0))))
2548 {
2549 HOST_WIDE_INT v = INTVAL (op1), n = -v;
2550
2551 if (! CONST_OK_FOR_LETTER_P (v, 'I')
2552 && (CONST_OK_FOR_LETTER_P (n, 'K')
2553 || CONST_OK_FOR_LETTER_P (n, 'L')))
2554 {
2555 cmp_code = PLUS, branch_code = code;
2556 op1 = GEN_INT (n);
2557 }
2558 }
2559 }
2560
2561 if (!reg_or_0_operand (op0, DImode))
2562 op0 = force_reg (DImode, op0);
2563 if (cmp_code != PLUS && !reg_or_8bit_operand (op1, DImode))
2564 op1 = force_reg (DImode, op1);
2565 }
2566
2567 /* Emit an initial compare instruction, if necessary. */
2568 tem = op0;
2569 if (cmp_code != UNKNOWN)
2570 {
2571 tem = gen_reg_rtx (cmp_mode);
2572 emit_move_insn (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1));
2573 }
2574
2575 /* Zero the operands. */
2576 memset (&alpha_compare, 0, sizeof (alpha_compare));
2577
2578 /* Return the branch comparison. */
2579 return gen_rtx_fmt_ee (branch_code, branch_mode, tem, CONST0_RTX (cmp_mode));
2580 }
2581
2582 /* Certain simplifications can be done to make invalid setcc operations
2583 valid. Return the final comparison, or NULL if we can't work. */
2584
2585 rtx
2586 alpha_emit_setcc (enum rtx_code code)
2587 {
2588 enum rtx_code cmp_code;
2589 rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
2590 int fp_p = alpha_compare.fp_p;
2591 rtx tmp;
2592
2593 /* Zero the operands. */
2594 memset (&alpha_compare, 0, sizeof (alpha_compare));
2595
2596 if (fp_p && GET_MODE (op0) == TFmode)
2597 {
2598 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2599 op1 = const0_rtx;
2600 fp_p = 0;
2601 }
2602
2603 if (fp_p && !TARGET_FIX)
2604 return NULL_RTX;
2605
2606 /* The general case: fold the comparison code to the types of compares
2607 that we have, choosing the branch as necessary. */
2608
2609 cmp_code = UNKNOWN;
2610 switch (code)
2611 {
2612 case EQ: case LE: case LT: case LEU: case LTU:
2613 case UNORDERED:
2614 /* We have these compares. */
2615 if (fp_p)
2616 cmp_code = code, code = NE;
2617 break;
2618
2619 case NE:
2620 if (!fp_p && op1 == const0_rtx)
2621 break;
2622 /* FALLTHRU */
2623
2624 case ORDERED:
2625 cmp_code = reverse_condition (code);
2626 code = EQ;
2627 break;
2628
2629 case GE: case GT: case GEU: case GTU:
2630 /* These normally need swapping, but for integer zero we have
2631 special patterns that recognize swapped operands. */
2632 if (!fp_p && op1 == const0_rtx)
2633 break;
2634 code = swap_condition (code);
2635 if (fp_p)
2636 cmp_code = code, code = NE;
2637 tmp = op0, op0 = op1, op1 = tmp;
2638 break;
2639
2640 default:
2641 gcc_unreachable ();
2642 }
2643
2644 if (!fp_p)
2645 {
2646 if (!register_operand (op0, DImode))
2647 op0 = force_reg (DImode, op0);
2648 if (!reg_or_8bit_operand (op1, DImode))
2649 op1 = force_reg (DImode, op1);
2650 }
2651
2652 /* Emit an initial compare instruction, if necessary. */
2653 if (cmp_code != UNKNOWN)
2654 {
2655 enum machine_mode mode = fp_p ? DFmode : DImode;
2656
2657 tmp = gen_reg_rtx (mode);
2658 emit_insn (gen_rtx_SET (VOIDmode, tmp,
2659 gen_rtx_fmt_ee (cmp_code, mode, op0, op1)));
2660
2661 op0 = fp_p ? gen_lowpart (DImode, tmp) : tmp;
2662 op1 = const0_rtx;
2663 }
2664
2665 /* Return the setcc comparison. */
2666 return gen_rtx_fmt_ee (code, DImode, op0, op1);
2667 }
2668
2669
2670 /* Rewrite a comparison against zero CMP of the form
2671 (CODE (cc0) (const_int 0)) so it can be written validly in
2672 a conditional move (if_then_else CMP ...).
2673 If both of the operands that set cc0 are nonzero we must emit
2674 an insn to perform the compare (it can't be done within
2675 the conditional move). */
2676
2677 rtx
2678 alpha_emit_conditional_move (rtx cmp, enum machine_mode mode)
2679 {
2680 enum rtx_code code = GET_CODE (cmp);
2681 enum rtx_code cmov_code = NE;
2682 rtx op0 = alpha_compare.op0;
2683 rtx op1 = alpha_compare.op1;
2684 int fp_p = alpha_compare.fp_p;
2685 enum machine_mode cmp_mode
2686 = (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0));
2687 enum machine_mode cmp_op_mode = fp_p ? DFmode : DImode;
2688 enum machine_mode cmov_mode = VOIDmode;
2689 int local_fast_math = flag_unsafe_math_optimizations;
2690 rtx tem;
2691
2692 /* Zero the operands. */
2693 memset (&alpha_compare, 0, sizeof (alpha_compare));
2694
2695 if (fp_p != FLOAT_MODE_P (mode))
2696 {
2697 enum rtx_code cmp_code;
2698
2699 if (! TARGET_FIX)
2700 return 0;
2701
2702 /* If we have fp<->int register move instructions, do a cmov by
2703 performing the comparison in fp registers, and move the
2704 zero/nonzero value to integer registers, where we can then
2705 use a normal cmov, or vice-versa. */
2706
2707 switch (code)
2708 {
2709 case EQ: case LE: case LT: case LEU: case LTU:
2710 /* We have these compares. */
2711 cmp_code = code, code = NE;
2712 break;
2713
2714 case NE:
2715 /* This must be reversed. */
2716 cmp_code = EQ, code = EQ;
2717 break;
2718
2719 case GE: case GT: case GEU: case GTU:
2720 /* These normally need swapping, but for integer zero we have
2721 special patterns that recognize swapped operands. */
2722 if (!fp_p && op1 == const0_rtx)
2723 cmp_code = code, code = NE;
2724 else
2725 {
2726 cmp_code = swap_condition (code);
2727 code = NE;
2728 tem = op0, op0 = op1, op1 = tem;
2729 }
2730 break;
2731
2732 default:
2733 gcc_unreachable ();
2734 }
2735
2736 tem = gen_reg_rtx (cmp_op_mode);
2737 emit_insn (gen_rtx_SET (VOIDmode, tem,
2738 gen_rtx_fmt_ee (cmp_code, cmp_op_mode,
2739 op0, op1)));
2740
2741 cmp_mode = cmp_op_mode = fp_p ? DImode : DFmode;
2742 op0 = gen_lowpart (cmp_op_mode, tem);
2743 op1 = CONST0_RTX (cmp_op_mode);
2744 fp_p = !fp_p;
2745 local_fast_math = 1;
2746 }
2747
2748 /* We may be able to use a conditional move directly.
2749 This avoids emitting spurious compares. */
2750 if (signed_comparison_operator (cmp, VOIDmode)
2751 && (!fp_p || local_fast_math)
2752 && (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode)))
2753 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
2754
2755 /* We can't put the comparison inside the conditional move;
2756 emit a compare instruction and put that inside the
2757 conditional move. Make sure we emit only comparisons we have;
2758 swap or reverse as necessary. */
2759
2760 if (no_new_pseudos)
2761 return NULL_RTX;
2762
2763 switch (code)
2764 {
2765 case EQ: case LE: case LT: case LEU: case LTU:
2766 /* We have these compares: */
2767 break;
2768
2769 case NE:
2770 /* This must be reversed. */
2771 code = reverse_condition (code);
2772 cmov_code = EQ;
2773 break;
2774
2775 case GE: case GT: case GEU: case GTU:
2776 /* These must be swapped. */
2777 if (op1 != CONST0_RTX (cmp_mode))
2778 {
2779 code = swap_condition (code);
2780 tem = op0, op0 = op1, op1 = tem;
2781 }
2782 break;
2783
2784 default:
2785 gcc_unreachable ();
2786 }
2787
2788 if (!fp_p)
2789 {
2790 if (!reg_or_0_operand (op0, DImode))
2791 op0 = force_reg (DImode, op0);
2792 if (!reg_or_8bit_operand (op1, DImode))
2793 op1 = force_reg (DImode, op1);
2794 }
2795
2796 /* ??? We mark the branch mode to be CCmode to prevent the compare
2797 and cmov from being combined, since the compare insn follows IEEE
2798 rules that the cmov does not. */
2799 if (fp_p && !local_fast_math)
2800 cmov_mode = CCmode;
2801
2802 tem = gen_reg_rtx (cmp_op_mode);
2803 emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_op_mode, op0, op1));
2804 return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_op_mode));
2805 }
2806
2807 /* Simplify a conditional move of two constants into a setcc with
2808 arithmetic. This is done with a splitter since combine would
2809 just undo the work if done during code generation. It also catches
2810 cases we wouldn't have before cse. */
2811
2812 int
2813 alpha_split_conditional_move (enum rtx_code code, rtx dest, rtx cond,
2814 rtx t_rtx, rtx f_rtx)
2815 {
2816 HOST_WIDE_INT t, f, diff;
2817 enum machine_mode mode;
2818 rtx target, subtarget, tmp;
2819
2820 mode = GET_MODE (dest);
2821 t = INTVAL (t_rtx);
2822 f = INTVAL (f_rtx);
2823 diff = t - f;
2824
2825 if (((code == NE || code == EQ) && diff < 0)
2826 || (code == GE || code == GT))
2827 {
2828 code = reverse_condition (code);
2829 diff = t, t = f, f = diff;
2830 diff = t - f;
2831 }
2832
2833 subtarget = target = dest;
2834 if (mode != DImode)
2835 {
2836 target = gen_lowpart (DImode, dest);
2837 if (! no_new_pseudos)
2838 subtarget = gen_reg_rtx (DImode);
2839 else
2840 subtarget = target;
2841 }
2842 /* Below, we must be careful to use copy_rtx on target and subtarget
2843 in intermediate insns, as they may be a subreg rtx, which may not
2844 be shared. */
2845
2846 if (f == 0 && exact_log2 (diff) > 0
2847 /* On EV6, we've got enough shifters to make non-arithmetic shifts
2848 viable over a longer latency cmove. On EV5, the E0 slot is a
2849 scarce resource, and on EV4 shift has the same latency as a cmove. */
2850 && (diff <= 8 || alpha_tune == PROCESSOR_EV6))
2851 {
2852 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2853 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2854
2855 tmp = gen_rtx_ASHIFT (DImode, copy_rtx (subtarget),
2856 GEN_INT (exact_log2 (t)));
2857 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2858 }
2859 else if (f == 0 && t == -1)
2860 {
2861 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2862 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2863
2864 emit_insn (gen_negdi2 (target, copy_rtx (subtarget)));
2865 }
2866 else if (diff == 1 || diff == 4 || diff == 8)
2867 {
2868 rtx add_op;
2869
2870 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2871 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2872
2873 if (diff == 1)
2874 emit_insn (gen_adddi3 (target, copy_rtx (subtarget), GEN_INT (f)));
2875 else
2876 {
2877 add_op = GEN_INT (f);
2878 if (sext_add_operand (add_op, mode))
2879 {
2880 tmp = gen_rtx_MULT (DImode, copy_rtx (subtarget),
2881 GEN_INT (diff));
2882 tmp = gen_rtx_PLUS (DImode, tmp, add_op);
2883 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2884 }
2885 else
2886 return 0;
2887 }
2888 }
2889 else
2890 return 0;
2891
2892 return 1;
2893 }
2894 \f
2895 /* Look up the function X_floating library function name for the
2896 given operation. */
2897
2898 struct xfloating_op GTY(())
2899 {
2900 const enum rtx_code code;
2901 const char *const GTY((skip)) osf_func;
2902 const char *const GTY((skip)) vms_func;
2903 rtx libcall;
2904 };
2905
2906 static GTY(()) struct xfloating_op xfloating_ops[] =
2907 {
2908 { PLUS, "_OtsAddX", "OTS$ADD_X", 0 },
2909 { MINUS, "_OtsSubX", "OTS$SUB_X", 0 },
2910 { MULT, "_OtsMulX", "OTS$MUL_X", 0 },
2911 { DIV, "_OtsDivX", "OTS$DIV_X", 0 },
2912 { EQ, "_OtsEqlX", "OTS$EQL_X", 0 },
2913 { NE, "_OtsNeqX", "OTS$NEQ_X", 0 },
2914 { LT, "_OtsLssX", "OTS$LSS_X", 0 },
2915 { LE, "_OtsLeqX", "OTS$LEQ_X", 0 },
2916 { GT, "_OtsGtrX", "OTS$GTR_X", 0 },
2917 { GE, "_OtsGeqX", "OTS$GEQ_X", 0 },
2918 { FIX, "_OtsCvtXQ", "OTS$CVTXQ", 0 },
2919 { FLOAT, "_OtsCvtQX", "OTS$CVTQX", 0 },
2920 { UNSIGNED_FLOAT, "_OtsCvtQUX", "OTS$CVTQUX", 0 },
2921 { FLOAT_EXTEND, "_OtsConvertFloatTX", "OTS$CVT_FLOAT_T_X", 0 },
2922 { FLOAT_TRUNCATE, "_OtsConvertFloatXT", "OTS$CVT_FLOAT_X_T", 0 }
2923 };
2924
2925 static GTY(()) struct xfloating_op vax_cvt_ops[] =
2926 {
2927 { FLOAT_EXTEND, "_OtsConvertFloatGX", "OTS$CVT_FLOAT_G_X", 0 },
2928 { FLOAT_TRUNCATE, "_OtsConvertFloatXG", "OTS$CVT_FLOAT_X_G", 0 }
2929 };
2930
2931 static rtx
2932 alpha_lookup_xfloating_lib_func (enum rtx_code code)
2933 {
2934 struct xfloating_op *ops = xfloating_ops;
2935 long n = ARRAY_SIZE (xfloating_ops);
2936 long i;
2937
2938 gcc_assert (TARGET_HAS_XFLOATING_LIBS);
2939
2940 /* How irritating. Nothing to key off for the main table. */
2941 if (TARGET_FLOAT_VAX && (code == FLOAT_EXTEND || code == FLOAT_TRUNCATE))
2942 {
2943 ops = vax_cvt_ops;
2944 n = ARRAY_SIZE (vax_cvt_ops);
2945 }
2946
2947 for (i = 0; i < n; ++i, ++ops)
2948 if (ops->code == code)
2949 {
2950 rtx func = ops->libcall;
2951 if (!func)
2952 {
2953 func = init_one_libfunc (TARGET_ABI_OPEN_VMS
2954 ? ops->vms_func : ops->osf_func);
2955 ops->libcall = func;
2956 }
2957 return func;
2958 }
2959
2960 gcc_unreachable ();
2961 }
2962
2963 /* Most X_floating operations take the rounding mode as an argument.
2964 Compute that here. */
2965
2966 static int
2967 alpha_compute_xfloating_mode_arg (enum rtx_code code,
2968 enum alpha_fp_rounding_mode round)
2969 {
2970 int mode;
2971
2972 switch (round)
2973 {
2974 case ALPHA_FPRM_NORM:
2975 mode = 2;
2976 break;
2977 case ALPHA_FPRM_MINF:
2978 mode = 1;
2979 break;
2980 case ALPHA_FPRM_CHOP:
2981 mode = 0;
2982 break;
2983 case ALPHA_FPRM_DYN:
2984 mode = 4;
2985 break;
2986 default:
2987 gcc_unreachable ();
2988
2989 /* XXX For reference, round to +inf is mode = 3. */
2990 }
2991
2992 if (code == FLOAT_TRUNCATE && alpha_fptm == ALPHA_FPTM_N)
2993 mode |= 0x10000;
2994
2995 return mode;
2996 }
2997
2998 /* Emit an X_floating library function call.
2999
3000 Note that these functions do not follow normal calling conventions:
3001 TFmode arguments are passed in two integer registers (as opposed to
3002 indirect); TFmode return values appear in R16+R17.
3003
3004 FUNC is the function to call.
3005 TARGET is where the output belongs.
3006 OPERANDS are the inputs.
3007 NOPERANDS is the count of inputs.
3008 EQUIV is the expression equivalent for the function.
3009 */
3010
3011 static void
3012 alpha_emit_xfloating_libcall (rtx func, rtx target, rtx operands[],
3013 int noperands, rtx equiv)
3014 {
3015 rtx usage = NULL_RTX, tmp, reg;
3016 int regno = 16, i;
3017
3018 start_sequence ();
3019
3020 for (i = 0; i < noperands; ++i)
3021 {
3022 switch (GET_MODE (operands[i]))
3023 {
3024 case TFmode:
3025 reg = gen_rtx_REG (TFmode, regno);
3026 regno += 2;
3027 break;
3028
3029 case DFmode:
3030 reg = gen_rtx_REG (DFmode, regno + 32);
3031 regno += 1;
3032 break;
3033
3034 case VOIDmode:
3035 gcc_assert (GET_CODE (operands[i]) == CONST_INT);
3036 /* FALLTHRU */
3037 case DImode:
3038 reg = gen_rtx_REG (DImode, regno);
3039 regno += 1;
3040 break;
3041
3042 default:
3043 gcc_unreachable ();
3044 }
3045
3046 emit_move_insn (reg, operands[i]);
3047 usage = alloc_EXPR_LIST (0, gen_rtx_USE (VOIDmode, reg), usage);
3048 }
3049
3050 switch (GET_MODE (target))
3051 {
3052 case TFmode:
3053 reg = gen_rtx_REG (TFmode, 16);
3054 break;
3055 case DFmode:
3056 reg = gen_rtx_REG (DFmode, 32);
3057 break;
3058 case DImode:
3059 reg = gen_rtx_REG (DImode, 0);
3060 break;
3061 default:
3062 gcc_unreachable ();
3063 }
3064
3065 tmp = gen_rtx_MEM (QImode, func);
3066 tmp = emit_call_insn (GEN_CALL_VALUE (reg, tmp, const0_rtx,
3067 const0_rtx, const0_rtx));
3068 CALL_INSN_FUNCTION_USAGE (tmp) = usage;
3069 CONST_OR_PURE_CALL_P (tmp) = 1;
3070
3071 tmp = get_insns ();
3072 end_sequence ();
3073
3074 emit_libcall_block (tmp, target, reg, equiv);
3075 }
3076
3077 /* Emit an X_floating library function call for arithmetic (+,-,*,/). */
3078
3079 void
3080 alpha_emit_xfloating_arith (enum rtx_code code, rtx operands[])
3081 {
3082 rtx func;
3083 int mode;
3084 rtx out_operands[3];
3085
3086 func = alpha_lookup_xfloating_lib_func (code);
3087 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3088
3089 out_operands[0] = operands[1];
3090 out_operands[1] = operands[2];
3091 out_operands[2] = GEN_INT (mode);
3092 alpha_emit_xfloating_libcall (func, operands[0], out_operands, 3,
3093 gen_rtx_fmt_ee (code, TFmode, operands[1],
3094 operands[2]));
3095 }
3096
3097 /* Emit an X_floating library function call for a comparison. */
3098
3099 static rtx
3100 alpha_emit_xfloating_compare (enum rtx_code *pcode, rtx op0, rtx op1)
3101 {
3102 enum rtx_code cmp_code, res_code;
3103 rtx func, out, operands[2];
3104
3105 /* X_floating library comparison functions return
3106 -1 unordered
3107 0 false
3108 1 true
3109 Convert the compare against the raw return value. */
3110
3111 cmp_code = *pcode;
3112 switch (cmp_code)
3113 {
3114 case UNORDERED:
3115 cmp_code = EQ;
3116 res_code = LT;
3117 break;
3118 case ORDERED:
3119 cmp_code = EQ;
3120 res_code = GE;
3121 break;
3122 case NE:
3123 res_code = NE;
3124 break;
3125 case EQ:
3126 case LT:
3127 case GT:
3128 case LE:
3129 case GE:
3130 res_code = GT;
3131 break;
3132 default:
3133 gcc_unreachable ();
3134 }
3135 *pcode = res_code;
3136
3137 func = alpha_lookup_xfloating_lib_func (cmp_code);
3138
3139 operands[0] = op0;
3140 operands[1] = op1;
3141 out = gen_reg_rtx (DImode);
3142
3143 /* ??? Strange mode for equiv because what's actually returned
3144 is -1,0,1, not a proper boolean value. */
3145 alpha_emit_xfloating_libcall (func, out, operands, 2,
3146 gen_rtx_fmt_ee (cmp_code, CCmode, op0, op1));
3147
3148 return out;
3149 }
3150
3151 /* Emit an X_floating library function call for a conversion. */
3152
3153 void
3154 alpha_emit_xfloating_cvt (enum rtx_code orig_code, rtx operands[])
3155 {
3156 int noperands = 1, mode;
3157 rtx out_operands[2];
3158 rtx func;
3159 enum rtx_code code = orig_code;
3160
3161 if (code == UNSIGNED_FIX)
3162 code = FIX;
3163
3164 func = alpha_lookup_xfloating_lib_func (code);
3165
3166 out_operands[0] = operands[1];
3167
3168 switch (code)
3169 {
3170 case FIX:
3171 mode = alpha_compute_xfloating_mode_arg (code, ALPHA_FPRM_CHOP);
3172 out_operands[1] = GEN_INT (mode);
3173 noperands = 2;
3174 break;
3175 case FLOAT_TRUNCATE:
3176 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3177 out_operands[1] = GEN_INT (mode);
3178 noperands = 2;
3179 break;
3180 default:
3181 break;
3182 }
3183
3184 alpha_emit_xfloating_libcall (func, operands[0], out_operands, noperands,
3185 gen_rtx_fmt_e (orig_code,
3186 GET_MODE (operands[0]),
3187 operands[1]));
3188 }
3189
3190 /* Split a TFmode OP[1] into DImode OP[2,3] and likewise for
3191 OP[0] into OP[0,1]. Naturally, output operand ordering is
3192 little-endian. */
3193
3194 void
3195 alpha_split_tfmode_pair (rtx operands[4])
3196 {
3197 switch (GET_CODE (operands[1]))
3198 {
3199 case REG:
3200 operands[3] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
3201 operands[2] = gen_rtx_REG (DImode, REGNO (operands[1]));
3202 break;
3203
3204 case MEM:
3205 operands[3] = adjust_address (operands[1], DImode, 8);
3206 operands[2] = adjust_address (operands[1], DImode, 0);
3207 break;
3208
3209 case CONST_DOUBLE:
3210 gcc_assert (operands[1] == CONST0_RTX (TFmode));
3211 operands[2] = operands[3] = const0_rtx;
3212 break;
3213
3214 default:
3215 gcc_unreachable ();
3216 }
3217
3218 switch (GET_CODE (operands[0]))
3219 {
3220 case REG:
3221 operands[1] = gen_rtx_REG (DImode, REGNO (operands[0]) + 1);
3222 operands[0] = gen_rtx_REG (DImode, REGNO (operands[0]));
3223 break;
3224
3225 case MEM:
3226 operands[1] = adjust_address (operands[0], DImode, 8);
3227 operands[0] = adjust_address (operands[0], DImode, 0);
3228 break;
3229
3230 default:
3231 gcc_unreachable ();
3232 }
3233 }
3234
3235 /* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
3236 op2 is a register containing the sign bit, operation is the
3237 logical operation to be performed. */
3238
3239 void
3240 alpha_split_tfmode_frobsign (rtx operands[3], rtx (*operation) (rtx, rtx, rtx))
3241 {
3242 rtx high_bit = operands[2];
3243 rtx scratch;
3244 int move;
3245
3246 alpha_split_tfmode_pair (operands);
3247
3248 /* Detect three flavors of operand overlap. */
3249 move = 1;
3250 if (rtx_equal_p (operands[0], operands[2]))
3251 move = 0;
3252 else if (rtx_equal_p (operands[1], operands[2]))
3253 {
3254 if (rtx_equal_p (operands[0], high_bit))
3255 move = 2;
3256 else
3257 move = -1;
3258 }
3259
3260 if (move < 0)
3261 emit_move_insn (operands[0], operands[2]);
3262
3263 /* ??? If the destination overlaps both source tf and high_bit, then
3264 assume source tf is dead in its entirety and use the other half
3265 for a scratch register. Otherwise "scratch" is just the proper
3266 destination register. */
3267 scratch = operands[move < 2 ? 1 : 3];
3268
3269 emit_insn ((*operation) (scratch, high_bit, operands[3]));
3270
3271 if (move > 0)
3272 {
3273 emit_move_insn (operands[0], operands[2]);
3274 if (move > 1)
3275 emit_move_insn (operands[1], scratch);
3276 }
3277 }
3278 \f
3279 /* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
3280 unaligned data:
3281
3282 unsigned: signed:
3283 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
3284 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
3285 lda r3,X(r11) lda r3,X+2(r11)
3286 extwl r1,r3,r1 extql r1,r3,r1
3287 extwh r2,r3,r2 extqh r2,r3,r2
3288 or r1.r2.r1 or r1,r2,r1
3289 sra r1,48,r1
3290
3291 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
3292 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
3293 lda r3,X(r11) lda r3,X(r11)
3294 extll r1,r3,r1 extll r1,r3,r1
3295 extlh r2,r3,r2 extlh r2,r3,r2
3296 or r1.r2.r1 addl r1,r2,r1
3297
3298 quad: ldq_u r1,X(r11)
3299 ldq_u r2,X+7(r11)
3300 lda r3,X(r11)
3301 extql r1,r3,r1
3302 extqh r2,r3,r2
3303 or r1.r2.r1
3304 */
3305
3306 void
3307 alpha_expand_unaligned_load (rtx tgt, rtx mem, HOST_WIDE_INT size,
3308 HOST_WIDE_INT ofs, int sign)
3309 {
3310 rtx meml, memh, addr, extl, exth, tmp, mema;
3311 enum machine_mode mode;
3312
3313 if (TARGET_BWX && size == 2)
3314 {
3315 meml = adjust_address (mem, QImode, ofs);
3316 memh = adjust_address (mem, QImode, ofs+1);
3317 if (BYTES_BIG_ENDIAN)
3318 tmp = meml, meml = memh, memh = tmp;
3319 extl = gen_reg_rtx (DImode);
3320 exth = gen_reg_rtx (DImode);
3321 emit_insn (gen_zero_extendqidi2 (extl, meml));
3322 emit_insn (gen_zero_extendqidi2 (exth, memh));
3323 exth = expand_simple_binop (DImode, ASHIFT, exth, GEN_INT (8),
3324 NULL, 1, OPTAB_LIB_WIDEN);
3325 addr = expand_simple_binop (DImode, IOR, extl, exth,
3326 NULL, 1, OPTAB_LIB_WIDEN);
3327
3328 if (sign && GET_MODE (tgt) != HImode)
3329 {
3330 addr = gen_lowpart (HImode, addr);
3331 emit_insn (gen_extend_insn (tgt, addr, GET_MODE (tgt), HImode, 0));
3332 }
3333 else
3334 {
3335 if (GET_MODE (tgt) != DImode)
3336 addr = gen_lowpart (GET_MODE (tgt), addr);
3337 emit_move_insn (tgt, addr);
3338 }
3339 return;
3340 }
3341
3342 meml = gen_reg_rtx (DImode);
3343 memh = gen_reg_rtx (DImode);
3344 addr = gen_reg_rtx (DImode);
3345 extl = gen_reg_rtx (DImode);
3346 exth = gen_reg_rtx (DImode);
3347
3348 mema = XEXP (mem, 0);
3349 if (GET_CODE (mema) == LO_SUM)
3350 mema = force_reg (Pmode, mema);
3351
3352 /* AND addresses cannot be in any alias set, since they may implicitly
3353 alias surrounding code. Ideally we'd have some alias set that
3354 covered all types except those with alignment 8 or higher. */
3355
3356 tmp = change_address (mem, DImode,
3357 gen_rtx_AND (DImode,
3358 plus_constant (mema, ofs),
3359 GEN_INT (-8)));
3360 set_mem_alias_set (tmp, 0);
3361 emit_move_insn (meml, tmp);
3362
3363 tmp = change_address (mem, DImode,
3364 gen_rtx_AND (DImode,
3365 plus_constant (mema, ofs + size - 1),
3366 GEN_INT (-8)));
3367 set_mem_alias_set (tmp, 0);
3368 emit_move_insn (memh, tmp);
3369
3370 if (WORDS_BIG_ENDIAN && sign && (size == 2 || size == 4))
3371 {
3372 emit_move_insn (addr, plus_constant (mema, -1));
3373
3374 emit_insn (gen_extqh_be (extl, meml, addr));
3375 emit_insn (gen_extxl_be (exth, memh, GEN_INT (64), addr));
3376
3377 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3378 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (64 - size*8),
3379 addr, 1, OPTAB_WIDEN);
3380 }
3381 else if (sign && size == 2)
3382 {
3383 emit_move_insn (addr, plus_constant (mema, ofs+2));
3384
3385 emit_insn (gen_extxl_le (extl, meml, GEN_INT (64), addr));
3386 emit_insn (gen_extqh_le (exth, memh, addr));
3387
3388 /* We must use tgt here for the target. Alpha-vms port fails if we use
3389 addr for the target, because addr is marked as a pointer and combine
3390 knows that pointers are always sign-extended 32 bit values. */
3391 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3392 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48),
3393 addr, 1, OPTAB_WIDEN);
3394 }
3395 else
3396 {
3397 if (WORDS_BIG_ENDIAN)
3398 {
3399 emit_move_insn (addr, plus_constant (mema, ofs+size-1));
3400 switch ((int) size)
3401 {
3402 case 2:
3403 emit_insn (gen_extwh_be (extl, meml, addr));
3404 mode = HImode;
3405 break;
3406
3407 case 4:
3408 emit_insn (gen_extlh_be (extl, meml, addr));
3409 mode = SImode;
3410 break;
3411
3412 case 8:
3413 emit_insn (gen_extqh_be (extl, meml, addr));
3414 mode = DImode;
3415 break;
3416
3417 default:
3418 gcc_unreachable ();
3419 }
3420 emit_insn (gen_extxl_be (exth, memh, GEN_INT (size*8), addr));
3421 }
3422 else
3423 {
3424 emit_move_insn (addr, plus_constant (mema, ofs));
3425 emit_insn (gen_extxl_le (extl, meml, GEN_INT (size*8), addr));
3426 switch ((int) size)
3427 {
3428 case 2:
3429 emit_insn (gen_extwh_le (exth, memh, addr));
3430 mode = HImode;
3431 break;
3432
3433 case 4:
3434 emit_insn (gen_extlh_le (exth, memh, addr));
3435 mode = SImode;
3436 break;
3437
3438 case 8:
3439 emit_insn (gen_extqh_le (exth, memh, addr));
3440 mode = DImode;
3441 break;
3442
3443 default:
3444 gcc_unreachable ();
3445 }
3446 }
3447
3448 addr = expand_binop (mode, ior_optab, gen_lowpart (mode, extl),
3449 gen_lowpart (mode, exth), gen_lowpart (mode, tgt),
3450 sign, OPTAB_WIDEN);
3451 }
3452
3453 if (addr != tgt)
3454 emit_move_insn (tgt, gen_lowpart (GET_MODE (tgt), addr));
3455 }
3456
3457 /* Similarly, use ins and msk instructions to perform unaligned stores. */
3458
3459 void
3460 alpha_expand_unaligned_store (rtx dst, rtx src,
3461 HOST_WIDE_INT size, HOST_WIDE_INT ofs)
3462 {
3463 rtx dstl, dsth, addr, insl, insh, meml, memh, dsta;
3464
3465 if (TARGET_BWX && size == 2)
3466 {
3467 if (src != const0_rtx)
3468 {
3469 dstl = gen_lowpart (QImode, src);
3470 dsth = expand_simple_binop (DImode, LSHIFTRT, src, GEN_INT (8),
3471 NULL, 1, OPTAB_LIB_WIDEN);
3472 dsth = gen_lowpart (QImode, dsth);
3473 }
3474 else
3475 dstl = dsth = const0_rtx;
3476
3477 meml = adjust_address (dst, QImode, ofs);
3478 memh = adjust_address (dst, QImode, ofs+1);
3479 if (BYTES_BIG_ENDIAN)
3480 addr = meml, meml = memh, memh = addr;
3481
3482 emit_move_insn (meml, dstl);
3483 emit_move_insn (memh, dsth);
3484 return;
3485 }
3486
3487 dstl = gen_reg_rtx (DImode);
3488 dsth = gen_reg_rtx (DImode);
3489 insl = gen_reg_rtx (DImode);
3490 insh = gen_reg_rtx (DImode);
3491
3492 dsta = XEXP (dst, 0);
3493 if (GET_CODE (dsta) == LO_SUM)
3494 dsta = force_reg (Pmode, dsta);
3495
3496 /* AND addresses cannot be in any alias set, since they may implicitly
3497 alias surrounding code. Ideally we'd have some alias set that
3498 covered all types except those with alignment 8 or higher. */
3499
3500 meml = change_address (dst, DImode,
3501 gen_rtx_AND (DImode,
3502 plus_constant (dsta, ofs),
3503 GEN_INT (-8)));
3504 set_mem_alias_set (meml, 0);
3505
3506 memh = change_address (dst, DImode,
3507 gen_rtx_AND (DImode,
3508 plus_constant (dsta, ofs + size - 1),
3509 GEN_INT (-8)));
3510 set_mem_alias_set (memh, 0);
3511
3512 emit_move_insn (dsth, memh);
3513 emit_move_insn (dstl, meml);
3514 if (WORDS_BIG_ENDIAN)
3515 {
3516 addr = copy_addr_to_reg (plus_constant (dsta, ofs+size-1));
3517
3518 if (src != const0_rtx)
3519 {
3520 switch ((int) size)
3521 {
3522 case 2:
3523 emit_insn (gen_inswl_be (insh, gen_lowpart (HImode,src), addr));
3524 break;
3525 case 4:
3526 emit_insn (gen_insll_be (insh, gen_lowpart (SImode,src), addr));
3527 break;
3528 case 8:
3529 emit_insn (gen_insql_be (insh, gen_lowpart (DImode,src), addr));
3530 break;
3531 }
3532 emit_insn (gen_insxh (insl, gen_lowpart (DImode, src),
3533 GEN_INT (size*8), addr));
3534 }
3535
3536 switch ((int) size)
3537 {
3538 case 2:
3539 emit_insn (gen_mskxl_be (dsth, dsth, GEN_INT (0xffff), addr));
3540 break;
3541 case 4:
3542 {
3543 rtx msk = immed_double_const (0xffffffff, 0, DImode);
3544 emit_insn (gen_mskxl_be (dsth, dsth, msk, addr));
3545 break;
3546 }
3547 case 8:
3548 emit_insn (gen_mskxl_be (dsth, dsth, constm1_rtx, addr));
3549 break;
3550 }
3551
3552 emit_insn (gen_mskxh (dstl, dstl, GEN_INT (size*8), addr));
3553 }
3554 else
3555 {
3556 addr = copy_addr_to_reg (plus_constant (dsta, ofs));
3557
3558 if (src != CONST0_RTX (GET_MODE (src)))
3559 {
3560 emit_insn (gen_insxh (insh, gen_lowpart (DImode, src),
3561 GEN_INT (size*8), addr));
3562
3563 switch ((int) size)
3564 {
3565 case 2:
3566 emit_insn (gen_inswl_le (insl, gen_lowpart (HImode, src), addr));
3567 break;
3568 case 4:
3569 emit_insn (gen_insll_le (insl, gen_lowpart (SImode, src), addr));
3570 break;
3571 case 8:
3572 emit_insn (gen_insql_le (insl, src, addr));
3573 break;
3574 }
3575 }
3576
3577 emit_insn (gen_mskxh (dsth, dsth, GEN_INT (size*8), addr));
3578
3579 switch ((int) size)
3580 {
3581 case 2:
3582 emit_insn (gen_mskxl_le (dstl, dstl, GEN_INT (0xffff), addr));
3583 break;
3584 case 4:
3585 {
3586 rtx msk = immed_double_const (0xffffffff, 0, DImode);
3587 emit_insn (gen_mskxl_le (dstl, dstl, msk, addr));
3588 break;
3589 }
3590 case 8:
3591 emit_insn (gen_mskxl_le (dstl, dstl, constm1_rtx, addr));
3592 break;
3593 }
3594 }
3595
3596 if (src != CONST0_RTX (GET_MODE (src)))
3597 {
3598 dsth = expand_binop (DImode, ior_optab, insh, dsth, dsth, 0, OPTAB_WIDEN);
3599 dstl = expand_binop (DImode, ior_optab, insl, dstl, dstl, 0, OPTAB_WIDEN);
3600 }
3601
3602 if (WORDS_BIG_ENDIAN)
3603 {
3604 emit_move_insn (meml, dstl);
3605 emit_move_insn (memh, dsth);
3606 }
3607 else
3608 {
3609 /* Must store high before low for degenerate case of aligned. */
3610 emit_move_insn (memh, dsth);
3611 emit_move_insn (meml, dstl);
3612 }
3613 }
3614
3615 /* The block move code tries to maximize speed by separating loads and
3616 stores at the expense of register pressure: we load all of the data
3617 before we store it back out. There are two secondary effects worth
3618 mentioning, that this speeds copying to/from aligned and unaligned
3619 buffers, and that it makes the code significantly easier to write. */
3620
3621 #define MAX_MOVE_WORDS 8
3622
3623 /* Load an integral number of consecutive unaligned quadwords. */
3624
3625 static void
3626 alpha_expand_unaligned_load_words (rtx *out_regs, rtx smem,
3627 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3628 {
3629 rtx const im8 = GEN_INT (-8);
3630 rtx const i64 = GEN_INT (64);
3631 rtx ext_tmps[MAX_MOVE_WORDS], data_regs[MAX_MOVE_WORDS+1];
3632 rtx sreg, areg, tmp, smema;
3633 HOST_WIDE_INT i;
3634
3635 smema = XEXP (smem, 0);
3636 if (GET_CODE (smema) == LO_SUM)
3637 smema = force_reg (Pmode, smema);
3638
3639 /* Generate all the tmp registers we need. */
3640 for (i = 0; i < words; ++i)
3641 {
3642 data_regs[i] = out_regs[i];
3643 ext_tmps[i] = gen_reg_rtx (DImode);
3644 }
3645 data_regs[words] = gen_reg_rtx (DImode);
3646
3647 if (ofs != 0)
3648 smem = adjust_address (smem, GET_MODE (smem), ofs);
3649
3650 /* Load up all of the source data. */
3651 for (i = 0; i < words; ++i)
3652 {
3653 tmp = change_address (smem, DImode,
3654 gen_rtx_AND (DImode,
3655 plus_constant (smema, 8*i),
3656 im8));
3657 set_mem_alias_set (tmp, 0);
3658 emit_move_insn (data_regs[i], tmp);
3659 }
3660
3661 tmp = change_address (smem, DImode,
3662 gen_rtx_AND (DImode,
3663 plus_constant (smema, 8*words - 1),
3664 im8));
3665 set_mem_alias_set (tmp, 0);
3666 emit_move_insn (data_regs[words], tmp);
3667
3668 /* Extract the half-word fragments. Unfortunately DEC decided to make
3669 extxh with offset zero a noop instead of zeroing the register, so
3670 we must take care of that edge condition ourselves with cmov. */
3671
3672 sreg = copy_addr_to_reg (smema);
3673 areg = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL,
3674 1, OPTAB_WIDEN);
3675 if (WORDS_BIG_ENDIAN)
3676 emit_move_insn (sreg, plus_constant (sreg, 7));
3677 for (i = 0; i < words; ++i)
3678 {
3679 if (WORDS_BIG_ENDIAN)
3680 {
3681 emit_insn (gen_extqh_be (data_regs[i], data_regs[i], sreg));
3682 emit_insn (gen_extxl_be (ext_tmps[i], data_regs[i+1], i64, sreg));
3683 }
3684 else
3685 {
3686 emit_insn (gen_extxl_le (data_regs[i], data_regs[i], i64, sreg));
3687 emit_insn (gen_extqh_le (ext_tmps[i], data_regs[i+1], sreg));
3688 }
3689 emit_insn (gen_rtx_SET (VOIDmode, ext_tmps[i],
3690 gen_rtx_IF_THEN_ELSE (DImode,
3691 gen_rtx_EQ (DImode, areg,
3692 const0_rtx),
3693 const0_rtx, ext_tmps[i])));
3694 }
3695
3696 /* Merge the half-words into whole words. */
3697 for (i = 0; i < words; ++i)
3698 {
3699 out_regs[i] = expand_binop (DImode, ior_optab, data_regs[i],
3700 ext_tmps[i], data_regs[i], 1, OPTAB_WIDEN);
3701 }
3702 }
3703
3704 /* Store an integral number of consecutive unaligned quadwords. DATA_REGS
3705 may be NULL to store zeros. */
3706
3707 static void
3708 alpha_expand_unaligned_store_words (rtx *data_regs, rtx dmem,
3709 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3710 {
3711 rtx const im8 = GEN_INT (-8);
3712 rtx const i64 = GEN_INT (64);
3713 rtx ins_tmps[MAX_MOVE_WORDS];
3714 rtx st_tmp_1, st_tmp_2, dreg;
3715 rtx st_addr_1, st_addr_2, dmema;
3716 HOST_WIDE_INT i;
3717
3718 dmema = XEXP (dmem, 0);
3719 if (GET_CODE (dmema) == LO_SUM)
3720 dmema = force_reg (Pmode, dmema);
3721
3722 /* Generate all the tmp registers we need. */
3723 if (data_regs != NULL)
3724 for (i = 0; i < words; ++i)
3725 ins_tmps[i] = gen_reg_rtx(DImode);
3726 st_tmp_1 = gen_reg_rtx(DImode);
3727 st_tmp_2 = gen_reg_rtx(DImode);
3728
3729 if (ofs != 0)
3730 dmem = adjust_address (dmem, GET_MODE (dmem), ofs);
3731
3732 st_addr_2 = change_address (dmem, DImode,
3733 gen_rtx_AND (DImode,
3734 plus_constant (dmema, words*8 - 1),
3735 im8));
3736 set_mem_alias_set (st_addr_2, 0);
3737
3738 st_addr_1 = change_address (dmem, DImode,
3739 gen_rtx_AND (DImode, dmema, im8));
3740 set_mem_alias_set (st_addr_1, 0);
3741
3742 /* Load up the destination end bits. */
3743 emit_move_insn (st_tmp_2, st_addr_2);
3744 emit_move_insn (st_tmp_1, st_addr_1);
3745
3746 /* Shift the input data into place. */
3747 dreg = copy_addr_to_reg (dmema);
3748 if (WORDS_BIG_ENDIAN)
3749 emit_move_insn (dreg, plus_constant (dreg, 7));
3750 if (data_regs != NULL)
3751 {
3752 for (i = words-1; i >= 0; --i)
3753 {
3754 if (WORDS_BIG_ENDIAN)
3755 {
3756 emit_insn (gen_insql_be (ins_tmps[i], data_regs[i], dreg));
3757 emit_insn (gen_insxh (data_regs[i], data_regs[i], i64, dreg));
3758 }
3759 else
3760 {
3761 emit_insn (gen_insxh (ins_tmps[i], data_regs[i], i64, dreg));
3762 emit_insn (gen_insql_le (data_regs[i], data_regs[i], dreg));
3763 }
3764 }
3765 for (i = words-1; i > 0; --i)
3766 {
3767 ins_tmps[i-1] = expand_binop (DImode, ior_optab, data_regs[i],
3768 ins_tmps[i-1], ins_tmps[i-1], 1,
3769 OPTAB_WIDEN);
3770 }
3771 }
3772
3773 /* Split and merge the ends with the destination data. */
3774 if (WORDS_BIG_ENDIAN)
3775 {
3776 emit_insn (gen_mskxl_be (st_tmp_2, st_tmp_2, constm1_rtx, dreg));
3777 emit_insn (gen_mskxh (st_tmp_1, st_tmp_1, i64, dreg));
3778 }
3779 else
3780 {
3781 emit_insn (gen_mskxh (st_tmp_2, st_tmp_2, i64, dreg));
3782 emit_insn (gen_mskxl_le (st_tmp_1, st_tmp_1, constm1_rtx, dreg));
3783 }
3784
3785 if (data_regs != NULL)
3786 {
3787 st_tmp_2 = expand_binop (DImode, ior_optab, st_tmp_2, ins_tmps[words-1],
3788 st_tmp_2, 1, OPTAB_WIDEN);
3789 st_tmp_1 = expand_binop (DImode, ior_optab, st_tmp_1, data_regs[0],
3790 st_tmp_1, 1, OPTAB_WIDEN);
3791 }
3792
3793 /* Store it all. */
3794 if (WORDS_BIG_ENDIAN)
3795 emit_move_insn (st_addr_1, st_tmp_1);
3796 else
3797 emit_move_insn (st_addr_2, st_tmp_2);
3798 for (i = words-1; i > 0; --i)
3799 {
3800 rtx tmp = change_address (dmem, DImode,
3801 gen_rtx_AND (DImode,
3802 plus_constant(dmema,
3803 WORDS_BIG_ENDIAN ? i*8-1 : i*8),
3804 im8));
3805 set_mem_alias_set (tmp, 0);
3806 emit_move_insn (tmp, data_regs ? ins_tmps[i-1] : const0_rtx);
3807 }
3808 if (WORDS_BIG_ENDIAN)
3809 emit_move_insn (st_addr_2, st_tmp_2);
3810 else
3811 emit_move_insn (st_addr_1, st_tmp_1);
3812 }
3813
3814
3815 /* Expand string/block move operations.
3816
3817 operands[0] is the pointer to the destination.
3818 operands[1] is the pointer to the source.
3819 operands[2] is the number of bytes to move.
3820 operands[3] is the alignment. */
3821
3822 int
3823 alpha_expand_block_move (rtx operands[])
3824 {
3825 rtx bytes_rtx = operands[2];
3826 rtx align_rtx = operands[3];
3827 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
3828 HOST_WIDE_INT bytes = orig_bytes;
3829 HOST_WIDE_INT src_align = INTVAL (align_rtx) * BITS_PER_UNIT;
3830 HOST_WIDE_INT dst_align = src_align;
3831 rtx orig_src = operands[1];
3832 rtx orig_dst = operands[0];
3833 rtx data_regs[2 * MAX_MOVE_WORDS + 16];
3834 rtx tmp;
3835 unsigned int i, words, ofs, nregs = 0;
3836
3837 if (orig_bytes <= 0)
3838 return 1;
3839 else if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
3840 return 0;
3841
3842 /* Look for additional alignment information from recorded register info. */
3843
3844 tmp = XEXP (orig_src, 0);
3845 if (GET_CODE (tmp) == REG)
3846 src_align = MAX (src_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3847 else if (GET_CODE (tmp) == PLUS
3848 && GET_CODE (XEXP (tmp, 0)) == REG
3849 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
3850 {
3851 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3852 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3853
3854 if (a > src_align)
3855 {
3856 if (a >= 64 && c % 8 == 0)
3857 src_align = 64;
3858 else if (a >= 32 && c % 4 == 0)
3859 src_align = 32;
3860 else if (a >= 16 && c % 2 == 0)
3861 src_align = 16;
3862 }
3863 }
3864
3865 tmp = XEXP (orig_dst, 0);
3866 if (GET_CODE (tmp) == REG)
3867 dst_align = MAX (dst_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3868 else if (GET_CODE (tmp) == PLUS
3869 && GET_CODE (XEXP (tmp, 0)) == REG
3870 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
3871 {
3872 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3873 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3874
3875 if (a > dst_align)
3876 {
3877 if (a >= 64 && c % 8 == 0)
3878 dst_align = 64;
3879 else if (a >= 32 && c % 4 == 0)
3880 dst_align = 32;
3881 else if (a >= 16 && c % 2 == 0)
3882 dst_align = 16;
3883 }
3884 }
3885
3886 ofs = 0;
3887 if (src_align >= 64 && bytes >= 8)
3888 {
3889 words = bytes / 8;
3890
3891 for (i = 0; i < words; ++i)
3892 data_regs[nregs + i] = gen_reg_rtx (DImode);
3893
3894 for (i = 0; i < words; ++i)
3895 emit_move_insn (data_regs[nregs + i],
3896 adjust_address (orig_src, DImode, ofs + i * 8));
3897
3898 nregs += words;
3899 bytes -= words * 8;
3900 ofs += words * 8;
3901 }
3902
3903 if (src_align >= 32 && bytes >= 4)
3904 {
3905 words = bytes / 4;
3906
3907 for (i = 0; i < words; ++i)
3908 data_regs[nregs + i] = gen_reg_rtx (SImode);
3909
3910 for (i = 0; i < words; ++i)
3911 emit_move_insn (data_regs[nregs + i],
3912 adjust_address (orig_src, SImode, ofs + i * 4));
3913
3914 nregs += words;
3915 bytes -= words * 4;
3916 ofs += words * 4;
3917 }
3918
3919 if (bytes >= 8)
3920 {
3921 words = bytes / 8;
3922
3923 for (i = 0; i < words+1; ++i)
3924 data_regs[nregs + i] = gen_reg_rtx (DImode);
3925
3926 alpha_expand_unaligned_load_words (data_regs + nregs, orig_src,
3927 words, ofs);
3928
3929 nregs += words;
3930 bytes -= words * 8;
3931 ofs += words * 8;
3932 }
3933
3934 if (! TARGET_BWX && bytes >= 4)
3935 {
3936 data_regs[nregs++] = tmp = gen_reg_rtx (SImode);
3937 alpha_expand_unaligned_load (tmp, orig_src, 4, ofs, 0);
3938 bytes -= 4;
3939 ofs += 4;
3940 }
3941
3942 if (bytes >= 2)
3943 {
3944 if (src_align >= 16)
3945 {
3946 do {
3947 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3948 emit_move_insn (tmp, adjust_address (orig_src, HImode, ofs));
3949 bytes -= 2;
3950 ofs += 2;
3951 } while (bytes >= 2);
3952 }
3953 else if (! TARGET_BWX)
3954 {
3955 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3956 alpha_expand_unaligned_load (tmp, orig_src, 2, ofs, 0);
3957 bytes -= 2;
3958 ofs += 2;
3959 }
3960 }
3961
3962 while (bytes > 0)
3963 {
3964 data_regs[nregs++] = tmp = gen_reg_rtx (QImode);
3965 emit_move_insn (tmp, adjust_address (orig_src, QImode, ofs));
3966 bytes -= 1;
3967 ofs += 1;
3968 }
3969
3970 gcc_assert (nregs <= ARRAY_SIZE (data_regs));
3971
3972 /* Now save it back out again. */
3973
3974 i = 0, ofs = 0;
3975
3976 /* Write out the data in whatever chunks reading the source allowed. */
3977 if (dst_align >= 64)
3978 {
3979 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3980 {
3981 emit_move_insn (adjust_address (orig_dst, DImode, ofs),
3982 data_regs[i]);
3983 ofs += 8;
3984 i++;
3985 }
3986 }
3987
3988 if (dst_align >= 32)
3989 {
3990 /* If the source has remaining DImode regs, write them out in
3991 two pieces. */
3992 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3993 {
3994 tmp = expand_binop (DImode, lshr_optab, data_regs[i], GEN_INT (32),
3995 NULL_RTX, 1, OPTAB_WIDEN);
3996
3997 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
3998 gen_lowpart (SImode, data_regs[i]));
3999 emit_move_insn (adjust_address (orig_dst, SImode, ofs + 4),
4000 gen_lowpart (SImode, tmp));
4001 ofs += 8;
4002 i++;
4003 }
4004
4005 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
4006 {
4007 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
4008 data_regs[i]);
4009 ofs += 4;
4010 i++;
4011 }
4012 }
4013
4014 if (i < nregs && GET_MODE (data_regs[i]) == DImode)
4015 {
4016 /* Write out a remaining block of words using unaligned methods. */
4017
4018 for (words = 1; i + words < nregs; words++)
4019 if (GET_MODE (data_regs[i + words]) != DImode)
4020 break;
4021
4022 if (words == 1)
4023 alpha_expand_unaligned_store (orig_dst, data_regs[i], 8, ofs);
4024 else
4025 alpha_expand_unaligned_store_words (data_regs + i, orig_dst,
4026 words, ofs);
4027
4028 i += words;
4029 ofs += words * 8;
4030 }
4031
4032 /* Due to the above, this won't be aligned. */
4033 /* ??? If we have more than one of these, consider constructing full
4034 words in registers and using alpha_expand_unaligned_store_words. */
4035 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
4036 {
4037 alpha_expand_unaligned_store (orig_dst, data_regs[i], 4, ofs);
4038 ofs += 4;
4039 i++;
4040 }
4041
4042 if (dst_align >= 16)
4043 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4044 {
4045 emit_move_insn (adjust_address (orig_dst, HImode, ofs), data_regs[i]);
4046 i++;
4047 ofs += 2;
4048 }
4049 else
4050 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4051 {
4052 alpha_expand_unaligned_store (orig_dst, data_regs[i], 2, ofs);
4053 i++;
4054 ofs += 2;
4055 }
4056
4057 /* The remainder must be byte copies. */
4058 while (i < nregs)
4059 {
4060 gcc_assert (GET_MODE (data_regs[i]) == QImode);
4061 emit_move_insn (adjust_address (orig_dst, QImode, ofs), data_regs[i]);
4062 i++;
4063 ofs += 1;
4064 }
4065
4066 return 1;
4067 }
4068
4069 int
4070 alpha_expand_block_clear (rtx operands[])
4071 {
4072 rtx bytes_rtx = operands[1];
4073 rtx align_rtx = operands[3];
4074 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
4075 HOST_WIDE_INT bytes = orig_bytes;
4076 HOST_WIDE_INT align = INTVAL (align_rtx) * BITS_PER_UNIT;
4077 HOST_WIDE_INT alignofs = 0;
4078 rtx orig_dst = operands[0];
4079 rtx tmp;
4080 int i, words, ofs = 0;
4081
4082 if (orig_bytes <= 0)
4083 return 1;
4084 if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
4085 return 0;
4086
4087 /* Look for stricter alignment. */
4088 tmp = XEXP (orig_dst, 0);
4089 if (GET_CODE (tmp) == REG)
4090 align = MAX (align, REGNO_POINTER_ALIGN (REGNO (tmp)));
4091 else if (GET_CODE (tmp) == PLUS
4092 && GET_CODE (XEXP (tmp, 0)) == REG
4093 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
4094 {
4095 HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
4096 int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
4097
4098 if (a > align)
4099 {
4100 if (a >= 64)
4101 align = a, alignofs = 8 - c % 8;
4102 else if (a >= 32)
4103 align = a, alignofs = 4 - c % 4;
4104 else if (a >= 16)
4105 align = a, alignofs = 2 - c % 2;
4106 }
4107 }
4108
4109 /* Handle an unaligned prefix first. */
4110
4111 if (alignofs > 0)
4112 {
4113 #if HOST_BITS_PER_WIDE_INT >= 64
4114 /* Given that alignofs is bounded by align, the only time BWX could
4115 generate three stores is for a 7 byte fill. Prefer two individual
4116 stores over a load/mask/store sequence. */
4117 if ((!TARGET_BWX || alignofs == 7)
4118 && align >= 32
4119 && !(alignofs == 4 && bytes >= 4))
4120 {
4121 enum machine_mode mode = (align >= 64 ? DImode : SImode);
4122 int inv_alignofs = (align >= 64 ? 8 : 4) - alignofs;
4123 rtx mem, tmp;
4124 HOST_WIDE_INT mask;
4125
4126 mem = adjust_address (orig_dst, mode, ofs - inv_alignofs);
4127 set_mem_alias_set (mem, 0);
4128
4129 mask = ~(~(HOST_WIDE_INT)0 << (inv_alignofs * 8));
4130 if (bytes < alignofs)
4131 {
4132 mask |= ~(HOST_WIDE_INT)0 << ((inv_alignofs + bytes) * 8);
4133 ofs += bytes;
4134 bytes = 0;
4135 }
4136 else
4137 {
4138 bytes -= alignofs;
4139 ofs += alignofs;
4140 }
4141 alignofs = 0;
4142
4143 tmp = expand_binop (mode, and_optab, mem, GEN_INT (mask),
4144 NULL_RTX, 1, OPTAB_WIDEN);
4145
4146 emit_move_insn (mem, tmp);
4147 }
4148 #endif
4149
4150 if (TARGET_BWX && (alignofs & 1) && bytes >= 1)
4151 {
4152 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4153 bytes -= 1;
4154 ofs += 1;
4155 alignofs -= 1;
4156 }
4157 if (TARGET_BWX && align >= 16 && (alignofs & 3) == 2 && bytes >= 2)
4158 {
4159 emit_move_insn (adjust_address (orig_dst, HImode, ofs), const0_rtx);
4160 bytes -= 2;
4161 ofs += 2;
4162 alignofs -= 2;
4163 }
4164 if (alignofs == 4 && bytes >= 4)
4165 {
4166 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4167 bytes -= 4;
4168 ofs += 4;
4169 alignofs = 0;
4170 }
4171
4172 /* If we've not used the extra lead alignment information by now,
4173 we won't be able to. Downgrade align to match what's left over. */
4174 if (alignofs > 0)
4175 {
4176 alignofs = alignofs & -alignofs;
4177 align = MIN (align, alignofs * BITS_PER_UNIT);
4178 }
4179 }
4180
4181 /* Handle a block of contiguous long-words. */
4182
4183 if (align >= 64 && bytes >= 8)
4184 {
4185 words = bytes / 8;
4186
4187 for (i = 0; i < words; ++i)
4188 emit_move_insn (adjust_address (orig_dst, DImode, ofs + i * 8),
4189 const0_rtx);
4190
4191 bytes -= words * 8;
4192 ofs += words * 8;
4193 }
4194
4195 /* If the block is large and appropriately aligned, emit a single
4196 store followed by a sequence of stq_u insns. */
4197
4198 if (align >= 32 && bytes > 16)
4199 {
4200 rtx orig_dsta;
4201
4202 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4203 bytes -= 4;
4204 ofs += 4;
4205
4206 orig_dsta = XEXP (orig_dst, 0);
4207 if (GET_CODE (orig_dsta) == LO_SUM)
4208 orig_dsta = force_reg (Pmode, orig_dsta);
4209
4210 words = bytes / 8;
4211 for (i = 0; i < words; ++i)
4212 {
4213 rtx mem
4214 = change_address (orig_dst, DImode,
4215 gen_rtx_AND (DImode,
4216 plus_constant (orig_dsta, ofs + i*8),
4217 GEN_INT (-8)));
4218 set_mem_alias_set (mem, 0);
4219 emit_move_insn (mem, const0_rtx);
4220 }
4221
4222 /* Depending on the alignment, the first stq_u may have overlapped
4223 with the initial stl, which means that the last stq_u didn't
4224 write as much as it would appear. Leave those questionable bytes
4225 unaccounted for. */
4226 bytes -= words * 8 - 4;
4227 ofs += words * 8 - 4;
4228 }
4229
4230 /* Handle a smaller block of aligned words. */
4231
4232 if ((align >= 64 && bytes == 4)
4233 || (align == 32 && bytes >= 4))
4234 {
4235 words = bytes / 4;
4236
4237 for (i = 0; i < words; ++i)
4238 emit_move_insn (adjust_address (orig_dst, SImode, ofs + i * 4),
4239 const0_rtx);
4240
4241 bytes -= words * 4;
4242 ofs += words * 4;
4243 }
4244
4245 /* An unaligned block uses stq_u stores for as many as possible. */
4246
4247 if (bytes >= 8)
4248 {
4249 words = bytes / 8;
4250
4251 alpha_expand_unaligned_store_words (NULL, orig_dst, words, ofs);
4252
4253 bytes -= words * 8;
4254 ofs += words * 8;
4255 }
4256
4257 /* Next clean up any trailing pieces. */
4258
4259 #if HOST_BITS_PER_WIDE_INT >= 64
4260 /* Count the number of bits in BYTES for which aligned stores could
4261 be emitted. */
4262 words = 0;
4263 for (i = (TARGET_BWX ? 1 : 4); i * BITS_PER_UNIT <= align ; i <<= 1)
4264 if (bytes & i)
4265 words += 1;
4266
4267 /* If we have appropriate alignment (and it wouldn't take too many
4268 instructions otherwise), mask out the bytes we need. */
4269 if (TARGET_BWX ? words > 2 : bytes > 0)
4270 {
4271 if (align >= 64)
4272 {
4273 rtx mem, tmp;
4274 HOST_WIDE_INT mask;
4275
4276 mem = adjust_address (orig_dst, DImode, ofs);
4277 set_mem_alias_set (mem, 0);
4278
4279 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4280
4281 tmp = expand_binop (DImode, and_optab, mem, GEN_INT (mask),
4282 NULL_RTX, 1, OPTAB_WIDEN);
4283
4284 emit_move_insn (mem, tmp);
4285 return 1;
4286 }
4287 else if (align >= 32 && bytes < 4)
4288 {
4289 rtx mem, tmp;
4290 HOST_WIDE_INT mask;
4291
4292 mem = adjust_address (orig_dst, SImode, ofs);
4293 set_mem_alias_set (mem, 0);
4294
4295 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4296
4297 tmp = expand_binop (SImode, and_optab, mem, GEN_INT (mask),
4298 NULL_RTX, 1, OPTAB_WIDEN);
4299
4300 emit_move_insn (mem, tmp);
4301 return 1;
4302 }
4303 }
4304 #endif
4305
4306 if (!TARGET_BWX && bytes >= 4)
4307 {
4308 alpha_expand_unaligned_store (orig_dst, const0_rtx, 4, ofs);
4309 bytes -= 4;
4310 ofs += 4;
4311 }
4312
4313 if (bytes >= 2)
4314 {
4315 if (align >= 16)
4316 {
4317 do {
4318 emit_move_insn (adjust_address (orig_dst, HImode, ofs),
4319 const0_rtx);
4320 bytes -= 2;
4321 ofs += 2;
4322 } while (bytes >= 2);
4323 }
4324 else if (! TARGET_BWX)
4325 {
4326 alpha_expand_unaligned_store (orig_dst, const0_rtx, 2, ofs);
4327 bytes -= 2;
4328 ofs += 2;
4329 }
4330 }
4331
4332 while (bytes > 0)
4333 {
4334 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4335 bytes -= 1;
4336 ofs += 1;
4337 }
4338
4339 return 1;
4340 }
4341
4342 /* Returns a mask so that zap(x, value) == x & mask. */
4343
4344 rtx
4345 alpha_expand_zap_mask (HOST_WIDE_INT value)
4346 {
4347 rtx result;
4348 int i;
4349
4350 if (HOST_BITS_PER_WIDE_INT >= 64)
4351 {
4352 HOST_WIDE_INT mask = 0;
4353
4354 for (i = 7; i >= 0; --i)
4355 {
4356 mask <<= 8;
4357 if (!((value >> i) & 1))
4358 mask |= 0xff;
4359 }
4360
4361 result = gen_int_mode (mask, DImode);
4362 }
4363 else
4364 {
4365 HOST_WIDE_INT mask_lo = 0, mask_hi = 0;
4366
4367 gcc_assert (HOST_BITS_PER_WIDE_INT == 32);
4368
4369 for (i = 7; i >= 4; --i)
4370 {
4371 mask_hi <<= 8;
4372 if (!((value >> i) & 1))
4373 mask_hi |= 0xff;
4374 }
4375
4376 for (i = 3; i >= 0; --i)
4377 {
4378 mask_lo <<= 8;
4379 if (!((value >> i) & 1))
4380 mask_lo |= 0xff;
4381 }
4382
4383 result = immed_double_const (mask_lo, mask_hi, DImode);
4384 }
4385
4386 return result;
4387 }
4388
4389 void
4390 alpha_expand_builtin_vector_binop (rtx (*gen) (rtx, rtx, rtx),
4391 enum machine_mode mode,
4392 rtx op0, rtx op1, rtx op2)
4393 {
4394 op0 = gen_lowpart (mode, op0);
4395
4396 if (op1 == const0_rtx)
4397 op1 = CONST0_RTX (mode);
4398 else
4399 op1 = gen_lowpart (mode, op1);
4400
4401 if (op2 == const0_rtx)
4402 op2 = CONST0_RTX (mode);
4403 else
4404 op2 = gen_lowpart (mode, op2);
4405
4406 emit_insn ((*gen) (op0, op1, op2));
4407 }
4408
4409 /* A subroutine of the atomic operation splitters. Jump to LABEL if
4410 COND is true. Mark the jump as unlikely to be taken. */
4411
4412 static void
4413 emit_unlikely_jump (rtx cond, rtx label)
4414 {
4415 rtx very_unlikely = GEN_INT (REG_BR_PROB_BASE / 100 - 1);
4416 rtx x;
4417
4418 x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
4419 x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
4420 REG_NOTES (x) = gen_rtx_EXPR_LIST (REG_BR_PROB, very_unlikely, NULL_RTX);
4421 }
4422
4423 /* A subroutine of the atomic operation splitters. Emit a load-locked
4424 instruction in MODE. */
4425
4426 static void
4427 emit_load_locked (enum machine_mode mode, rtx reg, rtx mem)
4428 {
4429 rtx (*fn) (rtx, rtx) = NULL;
4430 if (mode == SImode)
4431 fn = gen_load_locked_si;
4432 else if (mode == DImode)
4433 fn = gen_load_locked_di;
4434 emit_insn (fn (reg, mem));
4435 }
4436
4437 /* A subroutine of the atomic operation splitters. Emit a store-conditional
4438 instruction in MODE. */
4439
4440 static void
4441 emit_store_conditional (enum machine_mode mode, rtx res, rtx mem, rtx val)
4442 {
4443 rtx (*fn) (rtx, rtx, rtx) = NULL;
4444 if (mode == SImode)
4445 fn = gen_store_conditional_si;
4446 else if (mode == DImode)
4447 fn = gen_store_conditional_di;
4448 emit_insn (fn (res, mem, val));
4449 }
4450
4451 /* A subroutine of the atomic operation splitters. Emit an insxl
4452 instruction in MODE. */
4453
4454 static rtx
4455 emit_insxl (enum machine_mode mode, rtx op1, rtx op2)
4456 {
4457 rtx ret = gen_reg_rtx (DImode);
4458 rtx (*fn) (rtx, rtx, rtx);
4459
4460 if (WORDS_BIG_ENDIAN)
4461 {
4462 if (mode == QImode)
4463 fn = gen_insbl_be;
4464 else
4465 fn = gen_inswl_be;
4466 }
4467 else
4468 {
4469 if (mode == QImode)
4470 fn = gen_insbl_le;
4471 else
4472 fn = gen_inswl_le;
4473 }
4474 emit_insn (fn (ret, op1, op2));
4475
4476 return ret;
4477 }
4478
4479 /* Expand an an atomic fetch-and-operate pattern. CODE is the binary operation
4480 to perform. MEM is the memory on which to operate. VAL is the second
4481 operand of the binary operator. BEFORE and AFTER are optional locations to
4482 return the value of MEM either before of after the operation. SCRATCH is
4483 a scratch register. */
4484
4485 void
4486 alpha_split_atomic_op (enum rtx_code code, rtx mem, rtx val,
4487 rtx before, rtx after, rtx scratch)
4488 {
4489 enum machine_mode mode = GET_MODE (mem);
4490 rtx label, x, cond = gen_rtx_REG (DImode, REGNO (scratch));
4491
4492 emit_insn (gen_memory_barrier ());
4493
4494 label = gen_label_rtx ();
4495 emit_label (label);
4496 label = gen_rtx_LABEL_REF (DImode, label);
4497
4498 if (before == NULL)
4499 before = scratch;
4500 emit_load_locked (mode, before, mem);
4501
4502 if (code == NOT)
4503 x = gen_rtx_AND (mode, gen_rtx_NOT (mode, before), val);
4504 else
4505 x = gen_rtx_fmt_ee (code, mode, before, val);
4506 if (after)
4507 emit_insn (gen_rtx_SET (VOIDmode, after, copy_rtx (x)));
4508 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
4509
4510 emit_store_conditional (mode, cond, mem, scratch);
4511
4512 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4513 emit_unlikely_jump (x, label);
4514
4515 emit_insn (gen_memory_barrier ());
4516 }
4517
4518 /* Expand a compare and swap operation. */
4519
4520 void
4521 alpha_split_compare_and_swap (rtx retval, rtx mem, rtx oldval, rtx newval,
4522 rtx scratch)
4523 {
4524 enum machine_mode mode = GET_MODE (mem);
4525 rtx label1, label2, x, cond = gen_lowpart (DImode, scratch);
4526
4527 emit_insn (gen_memory_barrier ());
4528
4529 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4530 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4531 emit_label (XEXP (label1, 0));
4532
4533 emit_load_locked (mode, retval, mem);
4534
4535 x = gen_lowpart (DImode, retval);
4536 if (oldval == const0_rtx)
4537 x = gen_rtx_NE (DImode, x, const0_rtx);
4538 else
4539 {
4540 x = gen_rtx_EQ (DImode, x, oldval);
4541 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4542 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4543 }
4544 emit_unlikely_jump (x, label2);
4545
4546 emit_move_insn (scratch, newval);
4547 emit_store_conditional (mode, cond, mem, scratch);
4548
4549 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4550 emit_unlikely_jump (x, label1);
4551
4552 emit_insn (gen_memory_barrier ());
4553 emit_label (XEXP (label2, 0));
4554 }
4555
4556 void
4557 alpha_expand_compare_and_swap_12 (rtx dst, rtx mem, rtx oldval, rtx newval)
4558 {
4559 enum machine_mode mode = GET_MODE (mem);
4560 rtx addr, align, wdst;
4561 rtx (*fn5) (rtx, rtx, rtx, rtx, rtx);
4562
4563 addr = force_reg (DImode, XEXP (mem, 0));
4564 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4565 NULL_RTX, 1, OPTAB_DIRECT);
4566
4567 oldval = convert_modes (DImode, mode, oldval, 1);
4568 newval = emit_insxl (mode, newval, addr);
4569
4570 wdst = gen_reg_rtx (DImode);
4571 if (mode == QImode)
4572 fn5 = gen_sync_compare_and_swapqi_1;
4573 else
4574 fn5 = gen_sync_compare_and_swaphi_1;
4575 emit_insn (fn5 (wdst, addr, oldval, newval, align));
4576
4577 emit_move_insn (dst, gen_lowpart (mode, wdst));
4578 }
4579
4580 void
4581 alpha_split_compare_and_swap_12 (enum machine_mode mode, rtx dest, rtx addr,
4582 rtx oldval, rtx newval, rtx align,
4583 rtx scratch, rtx cond)
4584 {
4585 rtx label1, label2, mem, width, mask, x;
4586
4587 mem = gen_rtx_MEM (DImode, align);
4588 MEM_VOLATILE_P (mem) = 1;
4589
4590 emit_insn (gen_memory_barrier ());
4591 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4592 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4593 emit_label (XEXP (label1, 0));
4594
4595 emit_load_locked (DImode, scratch, mem);
4596
4597 width = GEN_INT (GET_MODE_BITSIZE (mode));
4598 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4599 if (WORDS_BIG_ENDIAN)
4600 emit_insn (gen_extxl_be (dest, scratch, width, addr));
4601 else
4602 emit_insn (gen_extxl_le (dest, scratch, width, addr));
4603
4604 if (oldval == const0_rtx)
4605 x = gen_rtx_NE (DImode, dest, const0_rtx);
4606 else
4607 {
4608 x = gen_rtx_EQ (DImode, dest, oldval);
4609 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4610 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4611 }
4612 emit_unlikely_jump (x, label2);
4613
4614 if (WORDS_BIG_ENDIAN)
4615 emit_insn (gen_mskxl_be (scratch, scratch, mask, addr));
4616 else
4617 emit_insn (gen_mskxl_le (scratch, scratch, mask, addr));
4618 emit_insn (gen_iordi3 (scratch, scratch, newval));
4619
4620 emit_store_conditional (DImode, scratch, mem, scratch);
4621
4622 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4623 emit_unlikely_jump (x, label1);
4624
4625 emit_insn (gen_memory_barrier ());
4626 emit_label (XEXP (label2, 0));
4627 }
4628
4629 /* Expand an atomic exchange operation. */
4630
4631 void
4632 alpha_split_lock_test_and_set (rtx retval, rtx mem, rtx val, rtx scratch)
4633 {
4634 enum machine_mode mode = GET_MODE (mem);
4635 rtx label, x, cond = gen_lowpart (DImode, scratch);
4636
4637 emit_insn (gen_memory_barrier ());
4638
4639 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4640 emit_label (XEXP (label, 0));
4641
4642 emit_load_locked (mode, retval, mem);
4643 emit_move_insn (scratch, val);
4644 emit_store_conditional (mode, cond, mem, scratch);
4645
4646 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4647 emit_unlikely_jump (x, label);
4648 }
4649
4650 void
4651 alpha_expand_lock_test_and_set_12 (rtx dst, rtx mem, rtx val)
4652 {
4653 enum machine_mode mode = GET_MODE (mem);
4654 rtx addr, align, wdst;
4655 rtx (*fn4) (rtx, rtx, rtx, rtx);
4656
4657 /* Force the address into a register. */
4658 addr = force_reg (DImode, XEXP (mem, 0));
4659
4660 /* Align it to a multiple of 8. */
4661 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4662 NULL_RTX, 1, OPTAB_DIRECT);
4663
4664 /* Insert val into the correct byte location within the word. */
4665 val = emit_insxl (mode, val, addr);
4666
4667 wdst = gen_reg_rtx (DImode);
4668 if (mode == QImode)
4669 fn4 = gen_sync_lock_test_and_setqi_1;
4670 else
4671 fn4 = gen_sync_lock_test_and_sethi_1;
4672 emit_insn (fn4 (wdst, addr, val, align));
4673
4674 emit_move_insn (dst, gen_lowpart (mode, wdst));
4675 }
4676
4677 void
4678 alpha_split_lock_test_and_set_12 (enum machine_mode mode, rtx dest, rtx addr,
4679 rtx val, rtx align, rtx scratch)
4680 {
4681 rtx label, mem, width, mask, x;
4682
4683 mem = gen_rtx_MEM (DImode, align);
4684 MEM_VOLATILE_P (mem) = 1;
4685
4686 emit_insn (gen_memory_barrier ());
4687 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4688 emit_label (XEXP (label, 0));
4689
4690 emit_load_locked (DImode, scratch, mem);
4691
4692 width = GEN_INT (GET_MODE_BITSIZE (mode));
4693 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4694 if (WORDS_BIG_ENDIAN)
4695 {
4696 emit_insn (gen_extxl_be (dest, scratch, width, addr));
4697 emit_insn (gen_mskxl_be (scratch, scratch, mask, addr));
4698 }
4699 else
4700 {
4701 emit_insn (gen_extxl_le (dest, scratch, width, addr));
4702 emit_insn (gen_mskxl_le (scratch, scratch, mask, addr));
4703 }
4704 emit_insn (gen_iordi3 (scratch, scratch, val));
4705
4706 emit_store_conditional (DImode, scratch, mem, scratch);
4707
4708 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4709 emit_unlikely_jump (x, label);
4710 }
4711 \f
4712 /* Adjust the cost of a scheduling dependency. Return the new cost of
4713 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4714
4715 static int
4716 alpha_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4717 {
4718 enum attr_type insn_type, dep_insn_type;
4719
4720 /* If the dependence is an anti-dependence, there is no cost. For an
4721 output dependence, there is sometimes a cost, but it doesn't seem
4722 worth handling those few cases. */
4723 if (REG_NOTE_KIND (link) != 0)
4724 return cost;
4725
4726 /* If we can't recognize the insns, we can't really do anything. */
4727 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
4728 return cost;
4729
4730 insn_type = get_attr_type (insn);
4731 dep_insn_type = get_attr_type (dep_insn);
4732
4733 /* Bring in the user-defined memory latency. */
4734 if (dep_insn_type == TYPE_ILD
4735 || dep_insn_type == TYPE_FLD
4736 || dep_insn_type == TYPE_LDSYM)
4737 cost += alpha_memory_latency-1;
4738
4739 /* Everything else handled in DFA bypasses now. */
4740
4741 return cost;
4742 }
4743
4744 /* The number of instructions that can be issued per cycle. */
4745
4746 static int
4747 alpha_issue_rate (void)
4748 {
4749 return (alpha_tune == PROCESSOR_EV4 ? 2 : 4);
4750 }
4751
4752 /* How many alternative schedules to try. This should be as wide as the
4753 scheduling freedom in the DFA, but no wider. Making this value too
4754 large results extra work for the scheduler.
4755
4756 For EV4, loads can be issued to either IB0 or IB1, thus we have 2
4757 alternative schedules. For EV5, we can choose between E0/E1 and
4758 FA/FM. For EV6, an arithmetic insn can be issued to U0/U1/L0/L1. */
4759
4760 static int
4761 alpha_multipass_dfa_lookahead (void)
4762 {
4763 return (alpha_tune == PROCESSOR_EV6 ? 4 : 2);
4764 }
4765 \f
4766 /* Machine-specific function data. */
4767
4768 struct machine_function GTY(())
4769 {
4770 /* For unicosmk. */
4771 /* List of call information words for calls from this function. */
4772 struct rtx_def *first_ciw;
4773 struct rtx_def *last_ciw;
4774 int ciw_count;
4775
4776 /* List of deferred case vectors. */
4777 struct rtx_def *addr_list;
4778
4779 /* For OSF. */
4780 const char *some_ld_name;
4781
4782 /* For TARGET_LD_BUGGY_LDGP. */
4783 struct rtx_def *gp_save_rtx;
4784 };
4785
4786 /* How to allocate a 'struct machine_function'. */
4787
4788 static struct machine_function *
4789 alpha_init_machine_status (void)
4790 {
4791 return ((struct machine_function *)
4792 ggc_alloc_cleared (sizeof (struct machine_function)));
4793 }
4794
4795 /* Functions to save and restore alpha_return_addr_rtx. */
4796
4797 /* Start the ball rolling with RETURN_ADDR_RTX. */
4798
4799 rtx
4800 alpha_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
4801 {
4802 if (count != 0)
4803 return const0_rtx;
4804
4805 return get_hard_reg_initial_val (Pmode, REG_RA);
4806 }
4807
4808 /* Return or create a memory slot containing the gp value for the current
4809 function. Needed only if TARGET_LD_BUGGY_LDGP. */
4810
4811 rtx
4812 alpha_gp_save_rtx (void)
4813 {
4814 rtx seq, m = cfun->machine->gp_save_rtx;
4815
4816 if (m == NULL)
4817 {
4818 start_sequence ();
4819
4820 m = assign_stack_local (DImode, UNITS_PER_WORD, BITS_PER_WORD);
4821 m = validize_mem (m);
4822 emit_move_insn (m, pic_offset_table_rtx);
4823
4824 seq = get_insns ();
4825 end_sequence ();
4826 emit_insn_after (seq, entry_of_function ());
4827
4828 cfun->machine->gp_save_rtx = m;
4829 }
4830
4831 return m;
4832 }
4833
4834 static int
4835 alpha_ra_ever_killed (void)
4836 {
4837 rtx top;
4838
4839 if (!has_hard_reg_initial_val (Pmode, REG_RA))
4840 return regs_ever_live[REG_RA];
4841
4842 push_topmost_sequence ();
4843 top = get_insns ();
4844 pop_topmost_sequence ();
4845
4846 return reg_set_between_p (gen_rtx_REG (Pmode, REG_RA), top, NULL_RTX);
4847 }
4848
4849 \f
4850 /* Return the trap mode suffix applicable to the current
4851 instruction, or NULL. */
4852
4853 static const char *
4854 get_trap_mode_suffix (void)
4855 {
4856 enum attr_trap_suffix s = get_attr_trap_suffix (current_output_insn);
4857
4858 switch (s)
4859 {
4860 case TRAP_SUFFIX_NONE:
4861 return NULL;
4862
4863 case TRAP_SUFFIX_SU:
4864 if (alpha_fptm >= ALPHA_FPTM_SU)
4865 return "su";
4866 return NULL;
4867
4868 case TRAP_SUFFIX_SUI:
4869 if (alpha_fptm >= ALPHA_FPTM_SUI)
4870 return "sui";
4871 return NULL;
4872
4873 case TRAP_SUFFIX_V_SV:
4874 switch (alpha_fptm)
4875 {
4876 case ALPHA_FPTM_N:
4877 return NULL;
4878 case ALPHA_FPTM_U:
4879 return "v";
4880 case ALPHA_FPTM_SU:
4881 case ALPHA_FPTM_SUI:
4882 return "sv";
4883 default:
4884 gcc_unreachable ();
4885 }
4886
4887 case TRAP_SUFFIX_V_SV_SVI:
4888 switch (alpha_fptm)
4889 {
4890 case ALPHA_FPTM_N:
4891 return NULL;
4892 case ALPHA_FPTM_U:
4893 return "v";
4894 case ALPHA_FPTM_SU:
4895 return "sv";
4896 case ALPHA_FPTM_SUI:
4897 return "svi";
4898 default:
4899 gcc_unreachable ();
4900 }
4901 break;
4902
4903 case TRAP_SUFFIX_U_SU_SUI:
4904 switch (alpha_fptm)
4905 {
4906 case ALPHA_FPTM_N:
4907 return NULL;
4908 case ALPHA_FPTM_U:
4909 return "u";
4910 case ALPHA_FPTM_SU:
4911 return "su";
4912 case ALPHA_FPTM_SUI:
4913 return "sui";
4914 default:
4915 gcc_unreachable ();
4916 }
4917 break;
4918
4919 default:
4920 gcc_unreachable ();
4921 }
4922 gcc_unreachable ();
4923 }
4924
4925 /* Return the rounding mode suffix applicable to the current
4926 instruction, or NULL. */
4927
4928 static const char *
4929 get_round_mode_suffix (void)
4930 {
4931 enum attr_round_suffix s = get_attr_round_suffix (current_output_insn);
4932
4933 switch (s)
4934 {
4935 case ROUND_SUFFIX_NONE:
4936 return NULL;
4937 case ROUND_SUFFIX_NORMAL:
4938 switch (alpha_fprm)
4939 {
4940 case ALPHA_FPRM_NORM:
4941 return NULL;
4942 case ALPHA_FPRM_MINF:
4943 return "m";
4944 case ALPHA_FPRM_CHOP:
4945 return "c";
4946 case ALPHA_FPRM_DYN:
4947 return "d";
4948 default:
4949 gcc_unreachable ();
4950 }
4951 break;
4952
4953 case ROUND_SUFFIX_C:
4954 return "c";
4955
4956 default:
4957 gcc_unreachable ();
4958 }
4959 gcc_unreachable ();
4960 }
4961
4962 /* Locate some local-dynamic symbol still in use by this function
4963 so that we can print its name in some movdi_er_tlsldm pattern. */
4964
4965 static int
4966 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
4967 {
4968 rtx x = *px;
4969
4970 if (GET_CODE (x) == SYMBOL_REF
4971 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
4972 {
4973 cfun->machine->some_ld_name = XSTR (x, 0);
4974 return 1;
4975 }
4976
4977 return 0;
4978 }
4979
4980 static const char *
4981 get_some_local_dynamic_name (void)
4982 {
4983 rtx insn;
4984
4985 if (cfun->machine->some_ld_name)
4986 return cfun->machine->some_ld_name;
4987
4988 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
4989 if (INSN_P (insn)
4990 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
4991 return cfun->machine->some_ld_name;
4992
4993 gcc_unreachable ();
4994 }
4995
4996 /* Print an operand. Recognize special options, documented below. */
4997
4998 void
4999 print_operand (FILE *file, rtx x, int code)
5000 {
5001 int i;
5002
5003 switch (code)
5004 {
5005 case '~':
5006 /* Print the assembler name of the current function. */
5007 assemble_name (file, alpha_fnname);
5008 break;
5009
5010 case '&':
5011 assemble_name (file, get_some_local_dynamic_name ());
5012 break;
5013
5014 case '/':
5015 {
5016 const char *trap = get_trap_mode_suffix ();
5017 const char *round = get_round_mode_suffix ();
5018
5019 if (trap || round)
5020 fprintf (file, (TARGET_AS_SLASH_BEFORE_SUFFIX ? "/%s%s" : "%s%s"),
5021 (trap ? trap : ""), (round ? round : ""));
5022 break;
5023 }
5024
5025 case ',':
5026 /* Generates single precision instruction suffix. */
5027 fputc ((TARGET_FLOAT_VAX ? 'f' : 's'), file);
5028 break;
5029
5030 case '-':
5031 /* Generates double precision instruction suffix. */
5032 fputc ((TARGET_FLOAT_VAX ? 'g' : 't'), file);
5033 break;
5034
5035 case '+':
5036 /* Generates a nop after a noreturn call at the very end of the
5037 function. */
5038 if (next_real_insn (current_output_insn) == 0)
5039 fprintf (file, "\n\tnop");
5040 break;
5041
5042 case '#':
5043 if (alpha_this_literal_sequence_number == 0)
5044 alpha_this_literal_sequence_number = alpha_next_sequence_number++;
5045 fprintf (file, "%d", alpha_this_literal_sequence_number);
5046 break;
5047
5048 case '*':
5049 if (alpha_this_gpdisp_sequence_number == 0)
5050 alpha_this_gpdisp_sequence_number = alpha_next_sequence_number++;
5051 fprintf (file, "%d", alpha_this_gpdisp_sequence_number);
5052 break;
5053
5054 case 'H':
5055 if (GET_CODE (x) == HIGH)
5056 output_addr_const (file, XEXP (x, 0));
5057 else
5058 output_operand_lossage ("invalid %%H value");
5059 break;
5060
5061 case 'J':
5062 {
5063 const char *lituse;
5064
5065 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD_CALL)
5066 {
5067 x = XVECEXP (x, 0, 0);
5068 lituse = "lituse_tlsgd";
5069 }
5070 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM_CALL)
5071 {
5072 x = XVECEXP (x, 0, 0);
5073 lituse = "lituse_tlsldm";
5074 }
5075 else if (GET_CODE (x) == CONST_INT)
5076 lituse = "lituse_jsr";
5077 else
5078 {
5079 output_operand_lossage ("invalid %%J value");
5080 break;
5081 }
5082
5083 if (x != const0_rtx)
5084 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5085 }
5086 break;
5087
5088 case 'j':
5089 {
5090 const char *lituse;
5091
5092 #ifdef HAVE_AS_JSRDIRECT_RELOCS
5093 lituse = "lituse_jsrdirect";
5094 #else
5095 lituse = "lituse_jsr";
5096 #endif
5097
5098 gcc_assert (INTVAL (x) != 0);
5099 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5100 }
5101 break;
5102 case 'r':
5103 /* If this operand is the constant zero, write it as "$31". */
5104 if (GET_CODE (x) == REG)
5105 fprintf (file, "%s", reg_names[REGNO (x)]);
5106 else if (x == CONST0_RTX (GET_MODE (x)))
5107 fprintf (file, "$31");
5108 else
5109 output_operand_lossage ("invalid %%r value");
5110 break;
5111
5112 case 'R':
5113 /* Similar, but for floating-point. */
5114 if (GET_CODE (x) == REG)
5115 fprintf (file, "%s", reg_names[REGNO (x)]);
5116 else if (x == CONST0_RTX (GET_MODE (x)))
5117 fprintf (file, "$f31");
5118 else
5119 output_operand_lossage ("invalid %%R value");
5120 break;
5121
5122 case 'N':
5123 /* Write the 1's complement of a constant. */
5124 if (GET_CODE (x) != CONST_INT)
5125 output_operand_lossage ("invalid %%N value");
5126
5127 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
5128 break;
5129
5130 case 'P':
5131 /* Write 1 << C, for a constant C. */
5132 if (GET_CODE (x) != CONST_INT)
5133 output_operand_lossage ("invalid %%P value");
5134
5135 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT) 1 << INTVAL (x));
5136 break;
5137
5138 case 'h':
5139 /* Write the high-order 16 bits of a constant, sign-extended. */
5140 if (GET_CODE (x) != CONST_INT)
5141 output_operand_lossage ("invalid %%h value");
5142
5143 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) >> 16);
5144 break;
5145
5146 case 'L':
5147 /* Write the low-order 16 bits of a constant, sign-extended. */
5148 if (GET_CODE (x) != CONST_INT)
5149 output_operand_lossage ("invalid %%L value");
5150
5151 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5152 (INTVAL (x) & 0xffff) - 2 * (INTVAL (x) & 0x8000));
5153 break;
5154
5155 case 'm':
5156 /* Write mask for ZAP insn. */
5157 if (GET_CODE (x) == CONST_DOUBLE)
5158 {
5159 HOST_WIDE_INT mask = 0;
5160 HOST_WIDE_INT value;
5161
5162 value = CONST_DOUBLE_LOW (x);
5163 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5164 i++, value >>= 8)
5165 if (value & 0xff)
5166 mask |= (1 << i);
5167
5168 value = CONST_DOUBLE_HIGH (x);
5169 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5170 i++, value >>= 8)
5171 if (value & 0xff)
5172 mask |= (1 << (i + sizeof (int)));
5173
5174 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask & 0xff);
5175 }
5176
5177 else if (GET_CODE (x) == CONST_INT)
5178 {
5179 HOST_WIDE_INT mask = 0, value = INTVAL (x);
5180
5181 for (i = 0; i < 8; i++, value >>= 8)
5182 if (value & 0xff)
5183 mask |= (1 << i);
5184
5185 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask);
5186 }
5187 else
5188 output_operand_lossage ("invalid %%m value");
5189 break;
5190
5191 case 'M':
5192 /* 'b', 'w', 'l', or 'q' as the value of the constant. */
5193 if (GET_CODE (x) != CONST_INT
5194 || (INTVAL (x) != 8 && INTVAL (x) != 16
5195 && INTVAL (x) != 32 && INTVAL (x) != 64))
5196 output_operand_lossage ("invalid %%M value");
5197
5198 fprintf (file, "%s",
5199 (INTVAL (x) == 8 ? "b"
5200 : INTVAL (x) == 16 ? "w"
5201 : INTVAL (x) == 32 ? "l"
5202 : "q"));
5203 break;
5204
5205 case 'U':
5206 /* Similar, except do it from the mask. */
5207 if (GET_CODE (x) == CONST_INT)
5208 {
5209 HOST_WIDE_INT value = INTVAL (x);
5210
5211 if (value == 0xff)
5212 {
5213 fputc ('b', file);
5214 break;
5215 }
5216 if (value == 0xffff)
5217 {
5218 fputc ('w', file);
5219 break;
5220 }
5221 if (value == 0xffffffff)
5222 {
5223 fputc ('l', file);
5224 break;
5225 }
5226 if (value == -1)
5227 {
5228 fputc ('q', file);
5229 break;
5230 }
5231 }
5232 else if (HOST_BITS_PER_WIDE_INT == 32
5233 && GET_CODE (x) == CONST_DOUBLE
5234 && CONST_DOUBLE_LOW (x) == 0xffffffff
5235 && CONST_DOUBLE_HIGH (x) == 0)
5236 {
5237 fputc ('l', file);
5238 break;
5239 }
5240 output_operand_lossage ("invalid %%U value");
5241 break;
5242
5243 case 's':
5244 /* Write the constant value divided by 8 for little-endian mode or
5245 (56 - value) / 8 for big-endian mode. */
5246
5247 if (GET_CODE (x) != CONST_INT
5248 || (unsigned HOST_WIDE_INT) INTVAL (x) >= (WORDS_BIG_ENDIAN
5249 ? 56
5250 : 64)
5251 || (INTVAL (x) & 7) != 0)
5252 output_operand_lossage ("invalid %%s value");
5253
5254 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5255 WORDS_BIG_ENDIAN
5256 ? (56 - INTVAL (x)) / 8
5257 : INTVAL (x) / 8);
5258 break;
5259
5260 case 'S':
5261 /* Same, except compute (64 - c) / 8 */
5262
5263 if (GET_CODE (x) != CONST_INT
5264 && (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
5265 && (INTVAL (x) & 7) != 8)
5266 output_operand_lossage ("invalid %%s value");
5267
5268 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (64 - INTVAL (x)) / 8);
5269 break;
5270
5271 case 't':
5272 {
5273 /* On Unicos/Mk systems: use a DEX expression if the symbol
5274 clashes with a register name. */
5275 int dex = unicosmk_need_dex (x);
5276 if (dex)
5277 fprintf (file, "DEX(%d)", dex);
5278 else
5279 output_addr_const (file, x);
5280 }
5281 break;
5282
5283 case 'C': case 'D': case 'c': case 'd':
5284 /* Write out comparison name. */
5285 {
5286 enum rtx_code c = GET_CODE (x);
5287
5288 if (!COMPARISON_P (x))
5289 output_operand_lossage ("invalid %%C value");
5290
5291 else if (code == 'D')
5292 c = reverse_condition (c);
5293 else if (code == 'c')
5294 c = swap_condition (c);
5295 else if (code == 'd')
5296 c = swap_condition (reverse_condition (c));
5297
5298 if (c == LEU)
5299 fprintf (file, "ule");
5300 else if (c == LTU)
5301 fprintf (file, "ult");
5302 else if (c == UNORDERED)
5303 fprintf (file, "un");
5304 else
5305 fprintf (file, "%s", GET_RTX_NAME (c));
5306 }
5307 break;
5308
5309 case 'E':
5310 /* Write the divide or modulus operator. */
5311 switch (GET_CODE (x))
5312 {
5313 case DIV:
5314 fprintf (file, "div%s", GET_MODE (x) == SImode ? "l" : "q");
5315 break;
5316 case UDIV:
5317 fprintf (file, "div%su", GET_MODE (x) == SImode ? "l" : "q");
5318 break;
5319 case MOD:
5320 fprintf (file, "rem%s", GET_MODE (x) == SImode ? "l" : "q");
5321 break;
5322 case UMOD:
5323 fprintf (file, "rem%su", GET_MODE (x) == SImode ? "l" : "q");
5324 break;
5325 default:
5326 output_operand_lossage ("invalid %%E value");
5327 break;
5328 }
5329 break;
5330
5331 case 'A':
5332 /* Write "_u" for unaligned access. */
5333 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == AND)
5334 fprintf (file, "_u");
5335 break;
5336
5337 case 0:
5338 if (GET_CODE (x) == REG)
5339 fprintf (file, "%s", reg_names[REGNO (x)]);
5340 else if (GET_CODE (x) == MEM)
5341 output_address (XEXP (x, 0));
5342 else if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == UNSPEC)
5343 {
5344 switch (XINT (XEXP (x, 0), 1))
5345 {
5346 case UNSPEC_DTPREL:
5347 case UNSPEC_TPREL:
5348 output_addr_const (file, XVECEXP (XEXP (x, 0), 0, 0));
5349 break;
5350 default:
5351 output_operand_lossage ("unknown relocation unspec");
5352 break;
5353 }
5354 }
5355 else
5356 output_addr_const (file, x);
5357 break;
5358
5359 default:
5360 output_operand_lossage ("invalid %%xn code");
5361 }
5362 }
5363
5364 void
5365 print_operand_address (FILE *file, rtx addr)
5366 {
5367 int basereg = 31;
5368 HOST_WIDE_INT offset = 0;
5369
5370 if (GET_CODE (addr) == AND)
5371 addr = XEXP (addr, 0);
5372
5373 if (GET_CODE (addr) == PLUS
5374 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
5375 {
5376 offset = INTVAL (XEXP (addr, 1));
5377 addr = XEXP (addr, 0);
5378 }
5379
5380 if (GET_CODE (addr) == LO_SUM)
5381 {
5382 const char *reloc16, *reloclo;
5383 rtx op1 = XEXP (addr, 1);
5384
5385 if (GET_CODE (op1) == CONST && GET_CODE (XEXP (op1, 0)) == UNSPEC)
5386 {
5387 op1 = XEXP (op1, 0);
5388 switch (XINT (op1, 1))
5389 {
5390 case UNSPEC_DTPREL:
5391 reloc16 = NULL;
5392 reloclo = (alpha_tls_size == 16 ? "dtprel" : "dtprello");
5393 break;
5394 case UNSPEC_TPREL:
5395 reloc16 = NULL;
5396 reloclo = (alpha_tls_size == 16 ? "tprel" : "tprello");
5397 break;
5398 default:
5399 output_operand_lossage ("unknown relocation unspec");
5400 return;
5401 }
5402
5403 output_addr_const (file, XVECEXP (op1, 0, 0));
5404 }
5405 else
5406 {
5407 reloc16 = "gprel";
5408 reloclo = "gprellow";
5409 output_addr_const (file, op1);
5410 }
5411
5412 if (offset)
5413 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
5414
5415 addr = XEXP (addr, 0);
5416 switch (GET_CODE (addr))
5417 {
5418 case REG:
5419 basereg = REGNO (addr);
5420 break;
5421
5422 case SUBREG:
5423 basereg = subreg_regno (addr);
5424 break;
5425
5426 default:
5427 gcc_unreachable ();
5428 }
5429
5430 fprintf (file, "($%d)\t\t!%s", basereg,
5431 (basereg == 29 ? reloc16 : reloclo));
5432 return;
5433 }
5434
5435 switch (GET_CODE (addr))
5436 {
5437 case REG:
5438 basereg = REGNO (addr);
5439 break;
5440
5441 case SUBREG:
5442 basereg = subreg_regno (addr);
5443 break;
5444
5445 case CONST_INT:
5446 offset = INTVAL (addr);
5447 break;
5448
5449 #if TARGET_ABI_OPEN_VMS
5450 case SYMBOL_REF:
5451 fprintf (file, "%s", XSTR (addr, 0));
5452 return;
5453
5454 case CONST:
5455 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS
5456 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF);
5457 fprintf (file, "%s+" HOST_WIDE_INT_PRINT_DEC,
5458 XSTR (XEXP (XEXP (addr, 0), 0), 0),
5459 INTVAL (XEXP (XEXP (addr, 0), 1)));
5460 return;
5461
5462 #endif
5463 default:
5464 gcc_unreachable ();
5465 }
5466
5467 fprintf (file, HOST_WIDE_INT_PRINT_DEC "($%d)", offset, basereg);
5468 }
5469 \f
5470 /* Emit RTL insns to initialize the variable parts of a trampoline at
5471 TRAMP. FNADDR is an RTX for the address of the function's pure
5472 code. CXT is an RTX for the static chain value for the function.
5473
5474 The three offset parameters are for the individual template's
5475 layout. A JMPOFS < 0 indicates that the trampoline does not
5476 contain instructions at all.
5477
5478 We assume here that a function will be called many more times than
5479 its address is taken (e.g., it might be passed to qsort), so we
5480 take the trouble to initialize the "hint" field in the JMP insn.
5481 Note that the hint field is PC (new) + 4 * bits 13:0. */
5482
5483 void
5484 alpha_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt,
5485 int fnofs, int cxtofs, int jmpofs)
5486 {
5487 rtx temp, temp1, addr;
5488 /* VMS really uses DImode pointers in memory at this point. */
5489 enum machine_mode mode = TARGET_ABI_OPEN_VMS ? Pmode : ptr_mode;
5490
5491 #ifdef POINTERS_EXTEND_UNSIGNED
5492 fnaddr = convert_memory_address (mode, fnaddr);
5493 cxt = convert_memory_address (mode, cxt);
5494 #endif
5495
5496 /* Store function address and CXT. */
5497 addr = memory_address (mode, plus_constant (tramp, fnofs));
5498 emit_move_insn (gen_rtx_MEM (mode, addr), fnaddr);
5499 addr = memory_address (mode, plus_constant (tramp, cxtofs));
5500 emit_move_insn (gen_rtx_MEM (mode, addr), cxt);
5501
5502 /* This has been disabled since the hint only has a 32k range, and in
5503 no existing OS is the stack within 32k of the text segment. */
5504 if (0 && jmpofs >= 0)
5505 {
5506 /* Compute hint value. */
5507 temp = force_operand (plus_constant (tramp, jmpofs+4), NULL_RTX);
5508 temp = expand_binop (DImode, sub_optab, fnaddr, temp, temp, 1,
5509 OPTAB_WIDEN);
5510 temp = expand_shift (RSHIFT_EXPR, Pmode, temp,
5511 build_int_cst (NULL_TREE, 2), NULL_RTX, 1);
5512 temp = expand_and (SImode, gen_lowpart (SImode, temp),
5513 GEN_INT (0x3fff), 0);
5514
5515 /* Merge in the hint. */
5516 addr = memory_address (SImode, plus_constant (tramp, jmpofs));
5517 temp1 = force_reg (SImode, gen_rtx_MEM (SImode, addr));
5518 temp1 = expand_and (SImode, temp1, GEN_INT (0xffffc000), NULL_RTX);
5519 temp1 = expand_binop (SImode, ior_optab, temp1, temp, temp1, 1,
5520 OPTAB_WIDEN);
5521 emit_move_insn (gen_rtx_MEM (SImode, addr), temp1);
5522 }
5523
5524 #ifdef ENABLE_EXECUTE_STACK
5525 emit_library_call (init_one_libfunc ("__enable_execute_stack"),
5526 0, VOIDmode, 1, tramp, Pmode);
5527 #endif
5528
5529 if (jmpofs >= 0)
5530 emit_insn (gen_imb ());
5531 }
5532 \f
5533 /* Determine where to put an argument to a function.
5534 Value is zero to push the argument on the stack,
5535 or a hard register in which to store the argument.
5536
5537 MODE is the argument's machine mode.
5538 TYPE is the data type of the argument (as a tree).
5539 This is null for libcalls where that information may
5540 not be available.
5541 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5542 the preceding args and about the function being called.
5543 NAMED is nonzero if this argument is a named parameter
5544 (otherwise it is an extra parameter matching an ellipsis).
5545
5546 On Alpha the first 6 words of args are normally in registers
5547 and the rest are pushed. */
5548
5549 rtx
5550 function_arg (CUMULATIVE_ARGS cum, enum machine_mode mode, tree type,
5551 int named ATTRIBUTE_UNUSED)
5552 {
5553 int basereg;
5554 int num_args;
5555
5556 /* Don't get confused and pass small structures in FP registers. */
5557 if (type && AGGREGATE_TYPE_P (type))
5558 basereg = 16;
5559 else
5560 {
5561 #ifdef ENABLE_CHECKING
5562 /* With alpha_split_complex_arg, we shouldn't see any raw complex
5563 values here. */
5564 gcc_assert (!COMPLEX_MODE_P (mode));
5565 #endif
5566
5567 /* Set up defaults for FP operands passed in FP registers, and
5568 integral operands passed in integer registers. */
5569 if (TARGET_FPREGS && GET_MODE_CLASS (mode) == MODE_FLOAT)
5570 basereg = 32 + 16;
5571 else
5572 basereg = 16;
5573 }
5574
5575 /* ??? Irritatingly, the definition of CUMULATIVE_ARGS is different for
5576 the three platforms, so we can't avoid conditional compilation. */
5577 #if TARGET_ABI_OPEN_VMS
5578 {
5579 if (mode == VOIDmode)
5580 return alpha_arg_info_reg_val (cum);
5581
5582 num_args = cum.num_args;
5583 if (num_args >= 6
5584 || targetm.calls.must_pass_in_stack (mode, type))
5585 return NULL_RTX;
5586 }
5587 #elif TARGET_ABI_UNICOSMK
5588 {
5589 int size;
5590
5591 /* If this is the last argument, generate the call info word (CIW). */
5592 /* ??? We don't include the caller's line number in the CIW because
5593 I don't know how to determine it if debug infos are turned off. */
5594 if (mode == VOIDmode)
5595 {
5596 int i;
5597 HOST_WIDE_INT lo;
5598 HOST_WIDE_INT hi;
5599 rtx ciw;
5600
5601 lo = 0;
5602
5603 for (i = 0; i < cum.num_reg_words && i < 5; i++)
5604 if (cum.reg_args_type[i])
5605 lo |= (1 << (7 - i));
5606
5607 if (cum.num_reg_words == 6 && cum.reg_args_type[5])
5608 lo |= 7;
5609 else
5610 lo |= cum.num_reg_words;
5611
5612 #if HOST_BITS_PER_WIDE_INT == 32
5613 hi = (cum.num_args << 20) | cum.num_arg_words;
5614 #else
5615 lo = lo | ((HOST_WIDE_INT) cum.num_args << 52)
5616 | ((HOST_WIDE_INT) cum.num_arg_words << 32);
5617 hi = 0;
5618 #endif
5619 ciw = immed_double_const (lo, hi, DImode);
5620
5621 return gen_rtx_UNSPEC (DImode, gen_rtvec (1, ciw),
5622 UNSPEC_UMK_LOAD_CIW);
5623 }
5624
5625 size = ALPHA_ARG_SIZE (mode, type, named);
5626 num_args = cum.num_reg_words;
5627 if (cum.force_stack
5628 || cum.num_reg_words + size > 6
5629 || targetm.calls.must_pass_in_stack (mode, type))
5630 return NULL_RTX;
5631 else if (type && TYPE_MODE (type) == BLKmode)
5632 {
5633 rtx reg1, reg2;
5634
5635 reg1 = gen_rtx_REG (DImode, num_args + 16);
5636 reg1 = gen_rtx_EXPR_LIST (DImode, reg1, const0_rtx);
5637
5638 /* The argument fits in two registers. Note that we still need to
5639 reserve a register for empty structures. */
5640 if (size == 0)
5641 return NULL_RTX;
5642 else if (size == 1)
5643 return gen_rtx_PARALLEL (mode, gen_rtvec (1, reg1));
5644 else
5645 {
5646 reg2 = gen_rtx_REG (DImode, num_args + 17);
5647 reg2 = gen_rtx_EXPR_LIST (DImode, reg2, GEN_INT (8));
5648 return gen_rtx_PARALLEL (mode, gen_rtvec (2, reg1, reg2));
5649 }
5650 }
5651 }
5652 #elif TARGET_ABI_OSF
5653 {
5654 if (cum >= 6)
5655 return NULL_RTX;
5656 num_args = cum;
5657
5658 /* VOID is passed as a special flag for "last argument". */
5659 if (type == void_type_node)
5660 basereg = 16;
5661 else if (targetm.calls.must_pass_in_stack (mode, type))
5662 return NULL_RTX;
5663 }
5664 #else
5665 #error Unhandled ABI
5666 #endif
5667
5668 return gen_rtx_REG (mode, num_args + basereg);
5669 }
5670
5671 static int
5672 alpha_arg_partial_bytes (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5673 enum machine_mode mode ATTRIBUTE_UNUSED,
5674 tree type ATTRIBUTE_UNUSED,
5675 bool named ATTRIBUTE_UNUSED)
5676 {
5677 int words = 0;
5678
5679 #if TARGET_ABI_OPEN_VMS
5680 if (cum->num_args < 6
5681 && 6 < cum->num_args + ALPHA_ARG_SIZE (mode, type, named))
5682 words = 6 - cum->num_args;
5683 #elif TARGET_ABI_UNICOSMK
5684 /* Never any split arguments. */
5685 #elif TARGET_ABI_OSF
5686 if (*cum < 6 && 6 < *cum + ALPHA_ARG_SIZE (mode, type, named))
5687 words = 6 - *cum;
5688 #else
5689 #error Unhandled ABI
5690 #endif
5691
5692 return words * UNITS_PER_WORD;
5693 }
5694
5695
5696 /* Return true if TYPE must be returned in memory, instead of in registers. */
5697
5698 static bool
5699 alpha_return_in_memory (tree type, tree fndecl ATTRIBUTE_UNUSED)
5700 {
5701 enum machine_mode mode = VOIDmode;
5702 int size;
5703
5704 if (type)
5705 {
5706 mode = TYPE_MODE (type);
5707
5708 /* All aggregates are returned in memory. */
5709 if (AGGREGATE_TYPE_P (type))
5710 return true;
5711 }
5712
5713 size = GET_MODE_SIZE (mode);
5714 switch (GET_MODE_CLASS (mode))
5715 {
5716 case MODE_VECTOR_FLOAT:
5717 /* Pass all float vectors in memory, like an aggregate. */
5718 return true;
5719
5720 case MODE_COMPLEX_FLOAT:
5721 /* We judge complex floats on the size of their element,
5722 not the size of the whole type. */
5723 size = GET_MODE_UNIT_SIZE (mode);
5724 break;
5725
5726 case MODE_INT:
5727 case MODE_FLOAT:
5728 case MODE_COMPLEX_INT:
5729 case MODE_VECTOR_INT:
5730 break;
5731
5732 default:
5733 /* ??? We get called on all sorts of random stuff from
5734 aggregate_value_p. We must return something, but it's not
5735 clear what's safe to return. Pretend it's a struct I
5736 guess. */
5737 return true;
5738 }
5739
5740 /* Otherwise types must fit in one register. */
5741 return size > UNITS_PER_WORD;
5742 }
5743
5744 /* Return true if TYPE should be passed by invisible reference. */
5745
5746 static bool
5747 alpha_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
5748 enum machine_mode mode,
5749 tree type ATTRIBUTE_UNUSED,
5750 bool named ATTRIBUTE_UNUSED)
5751 {
5752 return mode == TFmode || mode == TCmode;
5753 }
5754
5755 /* Define how to find the value returned by a function. VALTYPE is the
5756 data type of the value (as a tree). If the precise function being
5757 called is known, FUNC is its FUNCTION_DECL; otherwise, FUNC is 0.
5758 MODE is set instead of VALTYPE for libcalls.
5759
5760 On Alpha the value is found in $0 for integer functions and
5761 $f0 for floating-point functions. */
5762
5763 rtx
5764 function_value (tree valtype, tree func ATTRIBUTE_UNUSED,
5765 enum machine_mode mode)
5766 {
5767 unsigned int regnum, dummy;
5768 enum mode_class class;
5769
5770 gcc_assert (!valtype || !alpha_return_in_memory (valtype, func));
5771
5772 if (valtype)
5773 mode = TYPE_MODE (valtype);
5774
5775 class = GET_MODE_CLASS (mode);
5776 switch (class)
5777 {
5778 case MODE_INT:
5779 PROMOTE_MODE (mode, dummy, valtype);
5780 /* FALLTHRU */
5781
5782 case MODE_COMPLEX_INT:
5783 case MODE_VECTOR_INT:
5784 regnum = 0;
5785 break;
5786
5787 case MODE_FLOAT:
5788 regnum = 32;
5789 break;
5790
5791 case MODE_COMPLEX_FLOAT:
5792 {
5793 enum machine_mode cmode = GET_MODE_INNER (mode);
5794
5795 return gen_rtx_PARALLEL
5796 (VOIDmode,
5797 gen_rtvec (2,
5798 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 32),
5799 const0_rtx),
5800 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 33),
5801 GEN_INT (GET_MODE_SIZE (cmode)))));
5802 }
5803
5804 default:
5805 gcc_unreachable ();
5806 }
5807
5808 return gen_rtx_REG (mode, regnum);
5809 }
5810
5811 /* TCmode complex values are passed by invisible reference. We
5812 should not split these values. */
5813
5814 static bool
5815 alpha_split_complex_arg (tree type)
5816 {
5817 return TYPE_MODE (type) != TCmode;
5818 }
5819
5820 static tree
5821 alpha_build_builtin_va_list (void)
5822 {
5823 tree base, ofs, space, record, type_decl;
5824
5825 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
5826 return ptr_type_node;
5827
5828 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
5829 type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
5830 TREE_CHAIN (record) = type_decl;
5831 TYPE_NAME (record) = type_decl;
5832
5833 /* C++? SET_IS_AGGR_TYPE (record, 1); */
5834
5835 /* Dummy field to prevent alignment warnings. */
5836 space = build_decl (FIELD_DECL, NULL_TREE, integer_type_node);
5837 DECL_FIELD_CONTEXT (space) = record;
5838 DECL_ARTIFICIAL (space) = 1;
5839 DECL_IGNORED_P (space) = 1;
5840
5841 ofs = build_decl (FIELD_DECL, get_identifier ("__offset"),
5842 integer_type_node);
5843 DECL_FIELD_CONTEXT (ofs) = record;
5844 TREE_CHAIN (ofs) = space;
5845
5846 base = build_decl (FIELD_DECL, get_identifier ("__base"),
5847 ptr_type_node);
5848 DECL_FIELD_CONTEXT (base) = record;
5849 TREE_CHAIN (base) = ofs;
5850
5851 TYPE_FIELDS (record) = base;
5852 layout_type (record);
5853
5854 va_list_gpr_counter_field = ofs;
5855 return record;
5856 }
5857
5858 #if TARGET_ABI_OSF
5859 /* Helper function for alpha_stdarg_optimize_hook. Skip over casts
5860 and constant additions. */
5861
5862 static tree
5863 va_list_skip_additions (tree lhs)
5864 {
5865 tree rhs, stmt;
5866
5867 if (TREE_CODE (lhs) != SSA_NAME)
5868 return lhs;
5869
5870 for (;;)
5871 {
5872 stmt = SSA_NAME_DEF_STMT (lhs);
5873
5874 if (TREE_CODE (stmt) == PHI_NODE)
5875 return stmt;
5876
5877 if (TREE_CODE (stmt) != MODIFY_EXPR
5878 || TREE_OPERAND (stmt, 0) != lhs)
5879 return lhs;
5880
5881 rhs = TREE_OPERAND (stmt, 1);
5882 if (TREE_CODE (rhs) == WITH_SIZE_EXPR)
5883 rhs = TREE_OPERAND (rhs, 0);
5884
5885 if ((TREE_CODE (rhs) != NOP_EXPR
5886 && TREE_CODE (rhs) != CONVERT_EXPR
5887 && (TREE_CODE (rhs) != PLUS_EXPR
5888 || TREE_CODE (TREE_OPERAND (rhs, 1)) != INTEGER_CST
5889 || !host_integerp (TREE_OPERAND (rhs, 1), 1)))
5890 || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
5891 return rhs;
5892
5893 lhs = TREE_OPERAND (rhs, 0);
5894 }
5895 }
5896
5897 /* Check if LHS = RHS statement is
5898 LHS = *(ap.__base + ap.__offset + cst)
5899 or
5900 LHS = *(ap.__base
5901 + ((ap.__offset + cst <= 47)
5902 ? ap.__offset + cst - 48 : ap.__offset + cst) + cst2).
5903 If the former, indicate that GPR registers are needed,
5904 if the latter, indicate that FPR registers are needed.
5905 On alpha, cfun->va_list_gpr_size is used as size of the needed
5906 regs and cfun->va_list_fpr_size is a bitmask, bit 0 set if
5907 GPR registers are needed and bit 1 set if FPR registers are needed.
5908 Return true if va_list references should not be scanned for the current
5909 statement. */
5910
5911 static bool
5912 alpha_stdarg_optimize_hook (struct stdarg_info *si, tree lhs, tree rhs)
5913 {
5914 tree base, offset, arg1, arg2;
5915 int offset_arg = 1;
5916
5917 if (TREE_CODE (rhs) != INDIRECT_REF
5918 || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
5919 return false;
5920
5921 lhs = va_list_skip_additions (TREE_OPERAND (rhs, 0));
5922 if (lhs == NULL_TREE
5923 || TREE_CODE (lhs) != PLUS_EXPR)
5924 return false;
5925
5926 base = TREE_OPERAND (lhs, 0);
5927 if (TREE_CODE (base) == SSA_NAME)
5928 base = va_list_skip_additions (base);
5929
5930 if (TREE_CODE (base) != COMPONENT_REF
5931 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5932 {
5933 base = TREE_OPERAND (lhs, 0);
5934 if (TREE_CODE (base) == SSA_NAME)
5935 base = va_list_skip_additions (base);
5936
5937 if (TREE_CODE (base) != COMPONENT_REF
5938 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5939 return false;
5940
5941 offset_arg = 0;
5942 }
5943
5944 base = get_base_address (base);
5945 if (TREE_CODE (base) != VAR_DECL
5946 || !bitmap_bit_p (si->va_list_vars, DECL_UID (base)))
5947 return false;
5948
5949 offset = TREE_OPERAND (lhs, offset_arg);
5950 if (TREE_CODE (offset) == SSA_NAME)
5951 offset = va_list_skip_additions (offset);
5952
5953 if (TREE_CODE (offset) == PHI_NODE)
5954 {
5955 HOST_WIDE_INT sub;
5956
5957 if (PHI_NUM_ARGS (offset) != 2)
5958 goto escapes;
5959
5960 arg1 = va_list_skip_additions (PHI_ARG_DEF (offset, 0));
5961 arg2 = va_list_skip_additions (PHI_ARG_DEF (offset, 1));
5962 if (TREE_CODE (arg2) != MINUS_EXPR && TREE_CODE (arg2) != PLUS_EXPR)
5963 {
5964 tree tem = arg1;
5965 arg1 = arg2;
5966 arg2 = tem;
5967
5968 if (TREE_CODE (arg2) != MINUS_EXPR && TREE_CODE (arg2) != PLUS_EXPR)
5969 goto escapes;
5970 }
5971 if (!host_integerp (TREE_OPERAND (arg2, 1), 0))
5972 goto escapes;
5973
5974 sub = tree_low_cst (TREE_OPERAND (arg2, 1), 0);
5975 if (TREE_CODE (arg2) == MINUS_EXPR)
5976 sub = -sub;
5977 if (sub < -48 || sub > -32)
5978 goto escapes;
5979
5980 arg2 = va_list_skip_additions (TREE_OPERAND (arg2, 0));
5981 if (arg1 != arg2)
5982 goto escapes;
5983
5984 if (TREE_CODE (arg1) == SSA_NAME)
5985 arg1 = va_list_skip_additions (arg1);
5986
5987 if (TREE_CODE (arg1) != COMPONENT_REF
5988 || TREE_OPERAND (arg1, 1) != va_list_gpr_counter_field
5989 || get_base_address (arg1) != base)
5990 goto escapes;
5991
5992 /* Need floating point regs. */
5993 cfun->va_list_fpr_size |= 2;
5994 }
5995 else if (TREE_CODE (offset) != COMPONENT_REF
5996 || TREE_OPERAND (offset, 1) != va_list_gpr_counter_field
5997 || get_base_address (offset) != base)
5998 goto escapes;
5999 else
6000 /* Need general regs. */
6001 cfun->va_list_fpr_size |= 1;
6002 return false;
6003
6004 escapes:
6005 si->va_list_escapes = true;
6006 return false;
6007 }
6008 #endif
6009
6010 /* Perform any needed actions needed for a function that is receiving a
6011 variable number of arguments. */
6012
6013 static void
6014 alpha_setup_incoming_varargs (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
6015 tree type, int *pretend_size, int no_rtl)
6016 {
6017 CUMULATIVE_ARGS cum = *pcum;
6018
6019 /* Skip the current argument. */
6020 FUNCTION_ARG_ADVANCE (cum, mode, type, 1);
6021
6022 #if TARGET_ABI_UNICOSMK
6023 /* On Unicos/Mk, the standard subroutine __T3E_MISMATCH stores all register
6024 arguments on the stack. Unfortunately, it doesn't always store the first
6025 one (i.e. the one that arrives in $16 or $f16). This is not a problem
6026 with stdargs as we always have at least one named argument there. */
6027 if (cum.num_reg_words < 6)
6028 {
6029 if (!no_rtl)
6030 {
6031 emit_insn (gen_umk_mismatch_args (GEN_INT (cum.num_reg_words)));
6032 emit_insn (gen_arg_home_umk ());
6033 }
6034 *pretend_size = 0;
6035 }
6036 #elif TARGET_ABI_OPEN_VMS
6037 /* For VMS, we allocate space for all 6 arg registers plus a count.
6038
6039 However, if NO registers need to be saved, don't allocate any space.
6040 This is not only because we won't need the space, but because AP
6041 includes the current_pretend_args_size and we don't want to mess up
6042 any ap-relative addresses already made. */
6043 if (cum.num_args < 6)
6044 {
6045 if (!no_rtl)
6046 {
6047 emit_move_insn (gen_rtx_REG (DImode, 1), virtual_incoming_args_rtx);
6048 emit_insn (gen_arg_home ());
6049 }
6050 *pretend_size = 7 * UNITS_PER_WORD;
6051 }
6052 #else
6053 /* On OSF/1 and friends, we allocate space for all 12 arg registers, but
6054 only push those that are remaining. However, if NO registers need to
6055 be saved, don't allocate any space. This is not only because we won't
6056 need the space, but because AP includes the current_pretend_args_size
6057 and we don't want to mess up any ap-relative addresses already made.
6058
6059 If we are not to use the floating-point registers, save the integer
6060 registers where we would put the floating-point registers. This is
6061 not the most efficient way to implement varargs with just one register
6062 class, but it isn't worth doing anything more efficient in this rare
6063 case. */
6064 if (cum >= 6)
6065 return;
6066
6067 if (!no_rtl)
6068 {
6069 int count, set = get_varargs_alias_set ();
6070 rtx tmp;
6071
6072 count = cfun->va_list_gpr_size / UNITS_PER_WORD;
6073 if (count > 6 - cum)
6074 count = 6 - cum;
6075
6076 /* Detect whether integer registers or floating-point registers
6077 are needed by the detected va_arg statements. See above for
6078 how these values are computed. Note that the "escape" value
6079 is VA_LIST_MAX_FPR_SIZE, which is 255, which has both of
6080 these bits set. */
6081 gcc_assert ((VA_LIST_MAX_FPR_SIZE & 3) == 3);
6082
6083 if (cfun->va_list_fpr_size & 1)
6084 {
6085 tmp = gen_rtx_MEM (BLKmode,
6086 plus_constant (virtual_incoming_args_rtx,
6087 (cum + 6) * UNITS_PER_WORD));
6088 MEM_NOTRAP_P (tmp) = 1;
6089 set_mem_alias_set (tmp, set);
6090 move_block_from_reg (16 + cum, tmp, count);
6091 }
6092
6093 if (cfun->va_list_fpr_size & 2)
6094 {
6095 tmp = gen_rtx_MEM (BLKmode,
6096 plus_constant (virtual_incoming_args_rtx,
6097 cum * UNITS_PER_WORD));
6098 MEM_NOTRAP_P (tmp) = 1;
6099 set_mem_alias_set (tmp, set);
6100 move_block_from_reg (16 + cum + TARGET_FPREGS*32, tmp, count);
6101 }
6102 }
6103 *pretend_size = 12 * UNITS_PER_WORD;
6104 #endif
6105 }
6106
6107 void
6108 alpha_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
6109 {
6110 HOST_WIDE_INT offset;
6111 tree t, offset_field, base_field;
6112
6113 if (TREE_CODE (TREE_TYPE (valist)) == ERROR_MARK)
6114 return;
6115
6116 if (TARGET_ABI_UNICOSMK)
6117 std_expand_builtin_va_start (valist, nextarg);
6118
6119 /* For Unix, TARGET_SETUP_INCOMING_VARARGS moves the starting address base
6120 up by 48, storing fp arg registers in the first 48 bytes, and the
6121 integer arg registers in the next 48 bytes. This is only done,
6122 however, if any integer registers need to be stored.
6123
6124 If no integer registers need be stored, then we must subtract 48
6125 in order to account for the integer arg registers which are counted
6126 in argsize above, but which are not actually stored on the stack.
6127 Must further be careful here about structures straddling the last
6128 integer argument register; that futzes with pretend_args_size,
6129 which changes the meaning of AP. */
6130
6131 if (NUM_ARGS < 6)
6132 offset = TARGET_ABI_OPEN_VMS ? UNITS_PER_WORD : 6 * UNITS_PER_WORD;
6133 else
6134 offset = -6 * UNITS_PER_WORD + current_function_pretend_args_size;
6135
6136 if (TARGET_ABI_OPEN_VMS)
6137 {
6138 nextarg = plus_constant (nextarg, offset);
6139 nextarg = plus_constant (nextarg, NUM_ARGS * UNITS_PER_WORD);
6140 t = build (MODIFY_EXPR, TREE_TYPE (valist), valist,
6141 make_tree (ptr_type_node, nextarg));
6142 TREE_SIDE_EFFECTS (t) = 1;
6143
6144 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6145 }
6146 else
6147 {
6148 base_field = TYPE_FIELDS (TREE_TYPE (valist));
6149 offset_field = TREE_CHAIN (base_field);
6150
6151 base_field = build (COMPONENT_REF, TREE_TYPE (base_field),
6152 valist, base_field, NULL_TREE);
6153 offset_field = build (COMPONENT_REF, TREE_TYPE (offset_field),
6154 valist, offset_field, NULL_TREE);
6155
6156 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
6157 t = build (PLUS_EXPR, ptr_type_node, t,
6158 build_int_cst (NULL_TREE, offset));
6159 t = build (MODIFY_EXPR, TREE_TYPE (base_field), base_field, t);
6160 TREE_SIDE_EFFECTS (t) = 1;
6161 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6162
6163 t = build_int_cst (NULL_TREE, NUM_ARGS * UNITS_PER_WORD);
6164 t = build (MODIFY_EXPR, TREE_TYPE (offset_field), offset_field, t);
6165 TREE_SIDE_EFFECTS (t) = 1;
6166 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6167 }
6168 }
6169
6170 static tree
6171 alpha_gimplify_va_arg_1 (tree type, tree base, tree offset, tree *pre_p)
6172 {
6173 tree type_size, ptr_type, addend, t, addr, internal_post;
6174
6175 /* If the type could not be passed in registers, skip the block
6176 reserved for the registers. */
6177 if (targetm.calls.must_pass_in_stack (TYPE_MODE (type), type))
6178 {
6179 t = build_int_cst (TREE_TYPE (offset), 6*8);
6180 t = build (MODIFY_EXPR, TREE_TYPE (offset), offset,
6181 build (MAX_EXPR, TREE_TYPE (offset), offset, t));
6182 gimplify_and_add (t, pre_p);
6183 }
6184
6185 addend = offset;
6186 ptr_type = build_pointer_type (type);
6187
6188 if (TREE_CODE (type) == COMPLEX_TYPE)
6189 {
6190 tree real_part, imag_part, real_temp;
6191
6192 real_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6193 offset, pre_p);
6194
6195 /* Copy the value into a new temporary, lest the formal temporary
6196 be reused out from under us. */
6197 real_temp = get_initialized_tmp_var (real_part, pre_p, NULL);
6198
6199 imag_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6200 offset, pre_p);
6201
6202 return build (COMPLEX_EXPR, type, real_temp, imag_part);
6203 }
6204 else if (TREE_CODE (type) == REAL_TYPE)
6205 {
6206 tree fpaddend, cond, fourtyeight;
6207
6208 fourtyeight = build_int_cst (TREE_TYPE (addend), 6*8);
6209 fpaddend = fold (build (MINUS_EXPR, TREE_TYPE (addend),
6210 addend, fourtyeight));
6211 cond = fold (build (LT_EXPR, boolean_type_node, addend, fourtyeight));
6212 addend = fold (build (COND_EXPR, TREE_TYPE (addend), cond,
6213 fpaddend, addend));
6214 }
6215
6216 /* Build the final address and force that value into a temporary. */
6217 addr = build (PLUS_EXPR, ptr_type, fold_convert (ptr_type, base),
6218 fold_convert (ptr_type, addend));
6219 internal_post = NULL;
6220 gimplify_expr (&addr, pre_p, &internal_post, is_gimple_val, fb_rvalue);
6221 append_to_statement_list (internal_post, pre_p);
6222
6223 /* Update the offset field. */
6224 type_size = TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type));
6225 if (type_size == NULL || TREE_OVERFLOW (type_size))
6226 t = size_zero_node;
6227 else
6228 {
6229 t = size_binop (PLUS_EXPR, type_size, size_int (7));
6230 t = size_binop (TRUNC_DIV_EXPR, t, size_int (8));
6231 t = size_binop (MULT_EXPR, t, size_int (8));
6232 }
6233 t = fold_convert (TREE_TYPE (offset), t);
6234 t = build (MODIFY_EXPR, void_type_node, offset,
6235 build (PLUS_EXPR, TREE_TYPE (offset), offset, t));
6236 gimplify_and_add (t, pre_p);
6237
6238 return build_va_arg_indirect_ref (addr);
6239 }
6240
6241 static tree
6242 alpha_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
6243 {
6244 tree offset_field, base_field, offset, base, t, r;
6245 bool indirect;
6246
6247 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
6248 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6249
6250 base_field = TYPE_FIELDS (va_list_type_node);
6251 offset_field = TREE_CHAIN (base_field);
6252 base_field = build (COMPONENT_REF, TREE_TYPE (base_field),
6253 valist, base_field, NULL_TREE);
6254 offset_field = build (COMPONENT_REF, TREE_TYPE (offset_field),
6255 valist, offset_field, NULL_TREE);
6256
6257 /* Pull the fields of the structure out into temporaries. Since we never
6258 modify the base field, we can use a formal temporary. Sign-extend the
6259 offset field so that it's the proper width for pointer arithmetic. */
6260 base = get_formal_tmp_var (base_field, pre_p);
6261
6262 t = fold_convert (lang_hooks.types.type_for_size (64, 0), offset_field);
6263 offset = get_initialized_tmp_var (t, pre_p, NULL);
6264
6265 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, false);
6266 if (indirect)
6267 type = build_pointer_type (type);
6268
6269 /* Find the value. Note that this will be a stable indirection, or
6270 a composite of stable indirections in the case of complex. */
6271 r = alpha_gimplify_va_arg_1 (type, base, offset, pre_p);
6272
6273 /* Stuff the offset temporary back into its field. */
6274 t = build (MODIFY_EXPR, void_type_node, offset_field,
6275 fold_convert (TREE_TYPE (offset_field), offset));
6276 gimplify_and_add (t, pre_p);
6277
6278 if (indirect)
6279 r = build_va_arg_indirect_ref (r);
6280
6281 return r;
6282 }
6283 \f
6284 /* Builtins. */
6285
6286 enum alpha_builtin
6287 {
6288 ALPHA_BUILTIN_CMPBGE,
6289 ALPHA_BUILTIN_EXTBL,
6290 ALPHA_BUILTIN_EXTWL,
6291 ALPHA_BUILTIN_EXTLL,
6292 ALPHA_BUILTIN_EXTQL,
6293 ALPHA_BUILTIN_EXTWH,
6294 ALPHA_BUILTIN_EXTLH,
6295 ALPHA_BUILTIN_EXTQH,
6296 ALPHA_BUILTIN_INSBL,
6297 ALPHA_BUILTIN_INSWL,
6298 ALPHA_BUILTIN_INSLL,
6299 ALPHA_BUILTIN_INSQL,
6300 ALPHA_BUILTIN_INSWH,
6301 ALPHA_BUILTIN_INSLH,
6302 ALPHA_BUILTIN_INSQH,
6303 ALPHA_BUILTIN_MSKBL,
6304 ALPHA_BUILTIN_MSKWL,
6305 ALPHA_BUILTIN_MSKLL,
6306 ALPHA_BUILTIN_MSKQL,
6307 ALPHA_BUILTIN_MSKWH,
6308 ALPHA_BUILTIN_MSKLH,
6309 ALPHA_BUILTIN_MSKQH,
6310 ALPHA_BUILTIN_UMULH,
6311 ALPHA_BUILTIN_ZAP,
6312 ALPHA_BUILTIN_ZAPNOT,
6313 ALPHA_BUILTIN_AMASK,
6314 ALPHA_BUILTIN_IMPLVER,
6315 ALPHA_BUILTIN_RPCC,
6316 ALPHA_BUILTIN_THREAD_POINTER,
6317 ALPHA_BUILTIN_SET_THREAD_POINTER,
6318
6319 /* TARGET_MAX */
6320 ALPHA_BUILTIN_MINUB8,
6321 ALPHA_BUILTIN_MINSB8,
6322 ALPHA_BUILTIN_MINUW4,
6323 ALPHA_BUILTIN_MINSW4,
6324 ALPHA_BUILTIN_MAXUB8,
6325 ALPHA_BUILTIN_MAXSB8,
6326 ALPHA_BUILTIN_MAXUW4,
6327 ALPHA_BUILTIN_MAXSW4,
6328 ALPHA_BUILTIN_PERR,
6329 ALPHA_BUILTIN_PKLB,
6330 ALPHA_BUILTIN_PKWB,
6331 ALPHA_BUILTIN_UNPKBL,
6332 ALPHA_BUILTIN_UNPKBW,
6333
6334 /* TARGET_CIX */
6335 ALPHA_BUILTIN_CTTZ,
6336 ALPHA_BUILTIN_CTLZ,
6337 ALPHA_BUILTIN_CTPOP,
6338
6339 ALPHA_BUILTIN_max
6340 };
6341
6342 static unsigned int const code_for_builtin[ALPHA_BUILTIN_max] = {
6343 CODE_FOR_builtin_cmpbge,
6344 CODE_FOR_builtin_extbl,
6345 CODE_FOR_builtin_extwl,
6346 CODE_FOR_builtin_extll,
6347 CODE_FOR_builtin_extql,
6348 CODE_FOR_builtin_extwh,
6349 CODE_FOR_builtin_extlh,
6350 CODE_FOR_builtin_extqh,
6351 CODE_FOR_builtin_insbl,
6352 CODE_FOR_builtin_inswl,
6353 CODE_FOR_builtin_insll,
6354 CODE_FOR_builtin_insql,
6355 CODE_FOR_builtin_inswh,
6356 CODE_FOR_builtin_inslh,
6357 CODE_FOR_builtin_insqh,
6358 CODE_FOR_builtin_mskbl,
6359 CODE_FOR_builtin_mskwl,
6360 CODE_FOR_builtin_mskll,
6361 CODE_FOR_builtin_mskql,
6362 CODE_FOR_builtin_mskwh,
6363 CODE_FOR_builtin_msklh,
6364 CODE_FOR_builtin_mskqh,
6365 CODE_FOR_umuldi3_highpart,
6366 CODE_FOR_builtin_zap,
6367 CODE_FOR_builtin_zapnot,
6368 CODE_FOR_builtin_amask,
6369 CODE_FOR_builtin_implver,
6370 CODE_FOR_builtin_rpcc,
6371 CODE_FOR_load_tp,
6372 CODE_FOR_set_tp,
6373
6374 /* TARGET_MAX */
6375 CODE_FOR_builtin_minub8,
6376 CODE_FOR_builtin_minsb8,
6377 CODE_FOR_builtin_minuw4,
6378 CODE_FOR_builtin_minsw4,
6379 CODE_FOR_builtin_maxub8,
6380 CODE_FOR_builtin_maxsb8,
6381 CODE_FOR_builtin_maxuw4,
6382 CODE_FOR_builtin_maxsw4,
6383 CODE_FOR_builtin_perr,
6384 CODE_FOR_builtin_pklb,
6385 CODE_FOR_builtin_pkwb,
6386 CODE_FOR_builtin_unpkbl,
6387 CODE_FOR_builtin_unpkbw,
6388
6389 /* TARGET_CIX */
6390 CODE_FOR_ctzdi2,
6391 CODE_FOR_clzdi2,
6392 CODE_FOR_popcountdi2
6393 };
6394
6395 struct alpha_builtin_def
6396 {
6397 const char *name;
6398 enum alpha_builtin code;
6399 unsigned int target_mask;
6400 bool is_const;
6401 };
6402
6403 static struct alpha_builtin_def const zero_arg_builtins[] = {
6404 { "__builtin_alpha_implver", ALPHA_BUILTIN_IMPLVER, 0, true },
6405 { "__builtin_alpha_rpcc", ALPHA_BUILTIN_RPCC, 0, false }
6406 };
6407
6408 static struct alpha_builtin_def const one_arg_builtins[] = {
6409 { "__builtin_alpha_amask", ALPHA_BUILTIN_AMASK, 0, true },
6410 { "__builtin_alpha_pklb", ALPHA_BUILTIN_PKLB, MASK_MAX, true },
6411 { "__builtin_alpha_pkwb", ALPHA_BUILTIN_PKWB, MASK_MAX, true },
6412 { "__builtin_alpha_unpkbl", ALPHA_BUILTIN_UNPKBL, MASK_MAX, true },
6413 { "__builtin_alpha_unpkbw", ALPHA_BUILTIN_UNPKBW, MASK_MAX, true },
6414 { "__builtin_alpha_cttz", ALPHA_BUILTIN_CTTZ, MASK_CIX, true },
6415 { "__builtin_alpha_ctlz", ALPHA_BUILTIN_CTLZ, MASK_CIX, true },
6416 { "__builtin_alpha_ctpop", ALPHA_BUILTIN_CTPOP, MASK_CIX, true }
6417 };
6418
6419 static struct alpha_builtin_def const two_arg_builtins[] = {
6420 { "__builtin_alpha_cmpbge", ALPHA_BUILTIN_CMPBGE, 0, true },
6421 { "__builtin_alpha_extbl", ALPHA_BUILTIN_EXTBL, 0, true },
6422 { "__builtin_alpha_extwl", ALPHA_BUILTIN_EXTWL, 0, true },
6423 { "__builtin_alpha_extll", ALPHA_BUILTIN_EXTLL, 0, true },
6424 { "__builtin_alpha_extql", ALPHA_BUILTIN_EXTQL, 0, true },
6425 { "__builtin_alpha_extwh", ALPHA_BUILTIN_EXTWH, 0, true },
6426 { "__builtin_alpha_extlh", ALPHA_BUILTIN_EXTLH, 0, true },
6427 { "__builtin_alpha_extqh", ALPHA_BUILTIN_EXTQH, 0, true },
6428 { "__builtin_alpha_insbl", ALPHA_BUILTIN_INSBL, 0, true },
6429 { "__builtin_alpha_inswl", ALPHA_BUILTIN_INSWL, 0, true },
6430 { "__builtin_alpha_insll", ALPHA_BUILTIN_INSLL, 0, true },
6431 { "__builtin_alpha_insql", ALPHA_BUILTIN_INSQL, 0, true },
6432 { "__builtin_alpha_inswh", ALPHA_BUILTIN_INSWH, 0, true },
6433 { "__builtin_alpha_inslh", ALPHA_BUILTIN_INSLH, 0, true },
6434 { "__builtin_alpha_insqh", ALPHA_BUILTIN_INSQH, 0, true },
6435 { "__builtin_alpha_mskbl", ALPHA_BUILTIN_MSKBL, 0, true },
6436 { "__builtin_alpha_mskwl", ALPHA_BUILTIN_MSKWL, 0, true },
6437 { "__builtin_alpha_mskll", ALPHA_BUILTIN_MSKLL, 0, true },
6438 { "__builtin_alpha_mskql", ALPHA_BUILTIN_MSKQL, 0, true },
6439 { "__builtin_alpha_mskwh", ALPHA_BUILTIN_MSKWH, 0, true },
6440 { "__builtin_alpha_msklh", ALPHA_BUILTIN_MSKLH, 0, true },
6441 { "__builtin_alpha_mskqh", ALPHA_BUILTIN_MSKQH, 0, true },
6442 { "__builtin_alpha_umulh", ALPHA_BUILTIN_UMULH, 0, true },
6443 { "__builtin_alpha_zap", ALPHA_BUILTIN_ZAP, 0, true },
6444 { "__builtin_alpha_zapnot", ALPHA_BUILTIN_ZAPNOT, 0, true },
6445 { "__builtin_alpha_minub8", ALPHA_BUILTIN_MINUB8, MASK_MAX, true },
6446 { "__builtin_alpha_minsb8", ALPHA_BUILTIN_MINSB8, MASK_MAX, true },
6447 { "__builtin_alpha_minuw4", ALPHA_BUILTIN_MINUW4, MASK_MAX, true },
6448 { "__builtin_alpha_minsw4", ALPHA_BUILTIN_MINSW4, MASK_MAX, true },
6449 { "__builtin_alpha_maxub8", ALPHA_BUILTIN_MAXUB8, MASK_MAX, true },
6450 { "__builtin_alpha_maxsb8", ALPHA_BUILTIN_MAXSB8, MASK_MAX, true },
6451 { "__builtin_alpha_maxuw4", ALPHA_BUILTIN_MAXUW4, MASK_MAX, true },
6452 { "__builtin_alpha_maxsw4", ALPHA_BUILTIN_MAXSW4, MASK_MAX, true },
6453 { "__builtin_alpha_perr", ALPHA_BUILTIN_PERR, MASK_MAX, true }
6454 };
6455
6456 static GTY(()) tree alpha_v8qi_u;
6457 static GTY(()) tree alpha_v8qi_s;
6458 static GTY(()) tree alpha_v4hi_u;
6459 static GTY(()) tree alpha_v4hi_s;
6460
6461 static void
6462 alpha_init_builtins (void)
6463 {
6464 const struct alpha_builtin_def *p;
6465 tree ftype, attrs[2];
6466 size_t i;
6467
6468 attrs[0] = tree_cons (get_identifier ("nothrow"), NULL, NULL);
6469 attrs[1] = tree_cons (get_identifier ("const"), NULL, attrs[0]);
6470
6471 ftype = build_function_type (long_integer_type_node, void_list_node);
6472
6473 p = zero_arg_builtins;
6474 for (i = 0; i < ARRAY_SIZE (zero_arg_builtins); ++i, ++p)
6475 if ((target_flags & p->target_mask) == p->target_mask)
6476 lang_hooks.builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
6477 NULL, attrs[p->is_const]);
6478
6479 ftype = build_function_type_list (long_integer_type_node,
6480 long_integer_type_node, NULL_TREE);
6481
6482 p = one_arg_builtins;
6483 for (i = 0; i < ARRAY_SIZE (one_arg_builtins); ++i, ++p)
6484 if ((target_flags & p->target_mask) == p->target_mask)
6485 lang_hooks.builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
6486 NULL, attrs[p->is_const]);
6487
6488 ftype = build_function_type_list (long_integer_type_node,
6489 long_integer_type_node,
6490 long_integer_type_node, NULL_TREE);
6491
6492 p = two_arg_builtins;
6493 for (i = 0; i < ARRAY_SIZE (two_arg_builtins); ++i, ++p)
6494 if ((target_flags & p->target_mask) == p->target_mask)
6495 lang_hooks.builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
6496 NULL, attrs[p->is_const]);
6497
6498 ftype = build_function_type (ptr_type_node, void_list_node);
6499 lang_hooks.builtin_function ("__builtin_thread_pointer", ftype,
6500 ALPHA_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
6501 NULL, attrs[0]);
6502
6503 ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
6504 lang_hooks.builtin_function ("__builtin_set_thread_pointer", ftype,
6505 ALPHA_BUILTIN_SET_THREAD_POINTER, BUILT_IN_MD,
6506 NULL, attrs[0]);
6507
6508 alpha_v8qi_u = build_vector_type (unsigned_intQI_type_node, 8);
6509 alpha_v8qi_s = build_vector_type (intQI_type_node, 8);
6510 alpha_v4hi_u = build_vector_type (unsigned_intHI_type_node, 4);
6511 alpha_v4hi_s = build_vector_type (intHI_type_node, 4);
6512 }
6513
6514 /* Expand an expression EXP that calls a built-in function,
6515 with result going to TARGET if that's convenient
6516 (and in mode MODE if that's convenient).
6517 SUBTARGET may be used as the target for computing one of EXP's operands.
6518 IGNORE is nonzero if the value is to be ignored. */
6519
6520 static rtx
6521 alpha_expand_builtin (tree exp, rtx target,
6522 rtx subtarget ATTRIBUTE_UNUSED,
6523 enum machine_mode mode ATTRIBUTE_UNUSED,
6524 int ignore ATTRIBUTE_UNUSED)
6525 {
6526 #define MAX_ARGS 2
6527
6528 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
6529 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
6530 tree arglist = TREE_OPERAND (exp, 1);
6531 enum insn_code icode;
6532 rtx op[MAX_ARGS], pat;
6533 int arity;
6534 bool nonvoid;
6535
6536 if (fcode >= ALPHA_BUILTIN_max)
6537 internal_error ("bad builtin fcode");
6538 icode = code_for_builtin[fcode];
6539 if (icode == 0)
6540 internal_error ("bad builtin fcode");
6541
6542 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
6543
6544 for (arglist = TREE_OPERAND (exp, 1), arity = 0;
6545 arglist;
6546 arglist = TREE_CHAIN (arglist), arity++)
6547 {
6548 const struct insn_operand_data *insn_op;
6549
6550 tree arg = TREE_VALUE (arglist);
6551 if (arg == error_mark_node)
6552 return NULL_RTX;
6553 if (arity > MAX_ARGS)
6554 return NULL_RTX;
6555
6556 insn_op = &insn_data[icode].operand[arity + nonvoid];
6557
6558 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, 0);
6559
6560 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
6561 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
6562 }
6563
6564 if (nonvoid)
6565 {
6566 enum machine_mode tmode = insn_data[icode].operand[0].mode;
6567 if (!target
6568 || GET_MODE (target) != tmode
6569 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
6570 target = gen_reg_rtx (tmode);
6571 }
6572
6573 switch (arity)
6574 {
6575 case 0:
6576 pat = GEN_FCN (icode) (target);
6577 break;
6578 case 1:
6579 if (nonvoid)
6580 pat = GEN_FCN (icode) (target, op[0]);
6581 else
6582 pat = GEN_FCN (icode) (op[0]);
6583 break;
6584 case 2:
6585 pat = GEN_FCN (icode) (target, op[0], op[1]);
6586 break;
6587 default:
6588 gcc_unreachable ();
6589 }
6590 if (!pat)
6591 return NULL_RTX;
6592 emit_insn (pat);
6593
6594 if (nonvoid)
6595 return target;
6596 else
6597 return const0_rtx;
6598 }
6599
6600
6601 /* Several bits below assume HWI >= 64 bits. This should be enforced
6602 by config.gcc. */
6603 #if HOST_BITS_PER_WIDE_INT < 64
6604 # error "HOST_WIDE_INT too small"
6605 #endif
6606
6607 /* Fold the builtin for the CMPBGE instruction. This is a vector comparison
6608 with an 8 bit output vector. OPINT contains the integer operands; bit N
6609 of OP_CONST is set if OPINT[N] is valid. */
6610
6611 static tree
6612 alpha_fold_builtin_cmpbge (unsigned HOST_WIDE_INT opint[], long op_const)
6613 {
6614 if (op_const == 3)
6615 {
6616 int i, val;
6617 for (i = 0, val = 0; i < 8; ++i)
6618 {
6619 unsigned HOST_WIDE_INT c0 = (opint[0] >> (i * 8)) & 0xff;
6620 unsigned HOST_WIDE_INT c1 = (opint[1] >> (i * 8)) & 0xff;
6621 if (c0 >= c1)
6622 val |= 1 << i;
6623 }
6624 return build_int_cst (long_integer_type_node, val);
6625 }
6626 else if (op_const == 2 && opint[1] == 0)
6627 return build_int_cst (long_integer_type_node, 0xff);
6628 return NULL;
6629 }
6630
6631 /* Fold the builtin for the ZAPNOT instruction. This is essentially a
6632 specialized form of an AND operation. Other byte manipulation instructions
6633 are defined in terms of this instruction, so this is also used as a
6634 subroutine for other builtins.
6635
6636 OP contains the tree operands; OPINT contains the extracted integer values.
6637 Bit N of OP_CONST it set if OPINT[N] is valid. OP may be null if only
6638 OPINT may be considered. */
6639
6640 static tree
6641 alpha_fold_builtin_zapnot (tree *op, unsigned HOST_WIDE_INT opint[],
6642 long op_const)
6643 {
6644 if (op_const & 2)
6645 {
6646 unsigned HOST_WIDE_INT mask = 0;
6647 int i;
6648
6649 for (i = 0; i < 8; ++i)
6650 if ((opint[1] >> i) & 1)
6651 mask |= (unsigned HOST_WIDE_INT)0xff << (i * 8);
6652
6653 if (op_const & 1)
6654 return build_int_cst (long_integer_type_node, opint[0] & mask);
6655
6656 if (op)
6657 return fold (build2 (BIT_AND_EXPR, long_integer_type_node, op[0],
6658 build_int_cst (long_integer_type_node, mask)));
6659 }
6660 else if ((op_const & 1) && opint[0] == 0)
6661 return build_int_cst (long_integer_type_node, 0);
6662 return NULL;
6663 }
6664
6665 /* Fold the builtins for the EXT family of instructions. */
6666
6667 static tree
6668 alpha_fold_builtin_extxx (tree op[], unsigned HOST_WIDE_INT opint[],
6669 long op_const, unsigned HOST_WIDE_INT bytemask,
6670 bool is_high)
6671 {
6672 long zap_const = 2;
6673 tree *zap_op = NULL;
6674
6675 if (op_const & 2)
6676 {
6677 unsigned HOST_WIDE_INT loc;
6678
6679 loc = opint[1] & 7;
6680 if (BYTES_BIG_ENDIAN)
6681 loc ^= 7;
6682 loc *= 8;
6683
6684 if (loc != 0)
6685 {
6686 if (op_const & 1)
6687 {
6688 unsigned HOST_WIDE_INT temp = opint[0];
6689 if (is_high)
6690 temp <<= loc;
6691 else
6692 temp >>= loc;
6693 opint[0] = temp;
6694 zap_const = 3;
6695 }
6696 }
6697 else
6698 zap_op = op;
6699 }
6700
6701 opint[1] = bytemask;
6702 return alpha_fold_builtin_zapnot (zap_op, opint, zap_const);
6703 }
6704
6705 /* Fold the builtins for the INS family of instructions. */
6706
6707 static tree
6708 alpha_fold_builtin_insxx (tree op[], unsigned HOST_WIDE_INT opint[],
6709 long op_const, unsigned HOST_WIDE_INT bytemask,
6710 bool is_high)
6711 {
6712 if ((op_const & 1) && opint[0] == 0)
6713 return build_int_cst (long_integer_type_node, 0);
6714
6715 if (op_const & 2)
6716 {
6717 unsigned HOST_WIDE_INT temp, loc, byteloc;
6718 tree *zap_op = NULL;
6719
6720 loc = opint[1] & 7;
6721 if (BYTES_BIG_ENDIAN)
6722 loc ^= 7;
6723 bytemask <<= loc;
6724
6725 temp = opint[0];
6726 if (is_high)
6727 {
6728 byteloc = (64 - (loc * 8)) & 0x3f;
6729 if (byteloc == 0)
6730 zap_op = op;
6731 else
6732 temp >>= byteloc;
6733 bytemask >>= 8;
6734 }
6735 else
6736 {
6737 byteloc = loc * 8;
6738 if (byteloc == 0)
6739 zap_op = op;
6740 else
6741 temp <<= byteloc;
6742 }
6743
6744 opint[0] = temp;
6745 opint[1] = bytemask;
6746 return alpha_fold_builtin_zapnot (zap_op, opint, op_const);
6747 }
6748
6749 return NULL;
6750 }
6751
6752 static tree
6753 alpha_fold_builtin_mskxx (tree op[], unsigned HOST_WIDE_INT opint[],
6754 long op_const, unsigned HOST_WIDE_INT bytemask,
6755 bool is_high)
6756 {
6757 if (op_const & 2)
6758 {
6759 unsigned HOST_WIDE_INT loc;
6760
6761 loc = opint[1] & 7;
6762 if (BYTES_BIG_ENDIAN)
6763 loc ^= 7;
6764 bytemask <<= loc;
6765
6766 if (is_high)
6767 bytemask >>= 8;
6768
6769 opint[1] = bytemask ^ 0xff;
6770 }
6771
6772 return alpha_fold_builtin_zapnot (op, opint, op_const);
6773 }
6774
6775 static tree
6776 alpha_fold_builtin_umulh (unsigned HOST_WIDE_INT opint[], long op_const)
6777 {
6778 switch (op_const)
6779 {
6780 case 3:
6781 {
6782 unsigned HOST_WIDE_INT l;
6783 HOST_WIDE_INT h;
6784
6785 mul_double (opint[0], 0, opint[1], 0, &l, &h);
6786
6787 #if HOST_BITS_PER_WIDE_INT > 64
6788 # error fixme
6789 #endif
6790
6791 return build_int_cst (long_integer_type_node, h);
6792 }
6793
6794 case 1:
6795 opint[1] = opint[0];
6796 /* FALLTHRU */
6797 case 2:
6798 /* Note that (X*1) >> 64 == 0. */
6799 if (opint[1] == 0 || opint[1] == 1)
6800 return build_int_cst (long_integer_type_node, 0);
6801 break;
6802 }
6803 return NULL;
6804 }
6805
6806 static tree
6807 alpha_fold_vector_minmax (enum tree_code code, tree op[], tree vtype)
6808 {
6809 tree op0 = fold_convert (vtype, op[0]);
6810 tree op1 = fold_convert (vtype, op[1]);
6811 tree val = fold (build2 (code, vtype, op0, op1));
6812 return fold_convert (long_integer_type_node, val);
6813 }
6814
6815 static tree
6816 alpha_fold_builtin_perr (unsigned HOST_WIDE_INT opint[], long op_const)
6817 {
6818 unsigned HOST_WIDE_INT temp = 0;
6819 int i;
6820
6821 if (op_const != 3)
6822 return NULL;
6823
6824 for (i = 0; i < 8; ++i)
6825 {
6826 unsigned HOST_WIDE_INT a = (opint[0] >> (i * 8)) & 0xff;
6827 unsigned HOST_WIDE_INT b = (opint[1] >> (i * 8)) & 0xff;
6828 if (a >= b)
6829 temp += a - b;
6830 else
6831 temp += b - a;
6832 }
6833
6834 return build_int_cst (long_integer_type_node, temp);
6835 }
6836
6837 static tree
6838 alpha_fold_builtin_pklb (unsigned HOST_WIDE_INT opint[], long op_const)
6839 {
6840 unsigned HOST_WIDE_INT temp;
6841
6842 if (op_const == 0)
6843 return NULL;
6844
6845 temp = opint[0] & 0xff;
6846 temp |= (opint[0] >> 24) & 0xff00;
6847
6848 return build_int_cst (long_integer_type_node, temp);
6849 }
6850
6851 static tree
6852 alpha_fold_builtin_pkwb (unsigned HOST_WIDE_INT opint[], long op_const)
6853 {
6854 unsigned HOST_WIDE_INT temp;
6855
6856 if (op_const == 0)
6857 return NULL;
6858
6859 temp = opint[0] & 0xff;
6860 temp |= (opint[0] >> 8) & 0xff00;
6861 temp |= (opint[0] >> 16) & 0xff0000;
6862 temp |= (opint[0] >> 24) & 0xff000000;
6863
6864 return build_int_cst (long_integer_type_node, temp);
6865 }
6866
6867 static tree
6868 alpha_fold_builtin_unpkbl (unsigned HOST_WIDE_INT opint[], long op_const)
6869 {
6870 unsigned HOST_WIDE_INT temp;
6871
6872 if (op_const == 0)
6873 return NULL;
6874
6875 temp = opint[0] & 0xff;
6876 temp |= (opint[0] & 0xff00) << 24;
6877
6878 return build_int_cst (long_integer_type_node, temp);
6879 }
6880
6881 static tree
6882 alpha_fold_builtin_unpkbw (unsigned HOST_WIDE_INT opint[], long op_const)
6883 {
6884 unsigned HOST_WIDE_INT temp;
6885
6886 if (op_const == 0)
6887 return NULL;
6888
6889 temp = opint[0] & 0xff;
6890 temp |= (opint[0] & 0x0000ff00) << 8;
6891 temp |= (opint[0] & 0x00ff0000) << 16;
6892 temp |= (opint[0] & 0xff000000) << 24;
6893
6894 return build_int_cst (long_integer_type_node, temp);
6895 }
6896
6897 static tree
6898 alpha_fold_builtin_cttz (unsigned HOST_WIDE_INT opint[], long op_const)
6899 {
6900 unsigned HOST_WIDE_INT temp;
6901
6902 if (op_const == 0)
6903 return NULL;
6904
6905 if (opint[0] == 0)
6906 temp = 64;
6907 else
6908 temp = exact_log2 (opint[0] & -opint[0]);
6909
6910 return build_int_cst (long_integer_type_node, temp);
6911 }
6912
6913 static tree
6914 alpha_fold_builtin_ctlz (unsigned HOST_WIDE_INT opint[], long op_const)
6915 {
6916 unsigned HOST_WIDE_INT temp;
6917
6918 if (op_const == 0)
6919 return NULL;
6920
6921 if (opint[0] == 0)
6922 temp = 64;
6923 else
6924 temp = 64 - floor_log2 (opint[0]) - 1;
6925
6926 return build_int_cst (long_integer_type_node, temp);
6927 }
6928
6929 static tree
6930 alpha_fold_builtin_ctpop (unsigned HOST_WIDE_INT opint[], long op_const)
6931 {
6932 unsigned HOST_WIDE_INT temp, op;
6933
6934 if (op_const == 0)
6935 return NULL;
6936
6937 op = opint[0];
6938 temp = 0;
6939 while (op)
6940 temp++, op &= op - 1;
6941
6942 return build_int_cst (long_integer_type_node, temp);
6943 }
6944
6945 /* Fold one of our builtin functions. */
6946
6947 static tree
6948 alpha_fold_builtin (tree fndecl, tree arglist, bool ignore ATTRIBUTE_UNUSED)
6949 {
6950 tree op[MAX_ARGS], t;
6951 unsigned HOST_WIDE_INT opint[MAX_ARGS];
6952 long op_const = 0, arity = 0;
6953
6954 for (t = arglist; t ; t = TREE_CHAIN (t), ++arity)
6955 {
6956 tree arg = TREE_VALUE (t);
6957 if (arg == error_mark_node)
6958 return NULL;
6959 if (arity >= MAX_ARGS)
6960 return NULL;
6961
6962 op[arity] = arg;
6963 opint[arity] = 0;
6964 if (TREE_CODE (arg) == INTEGER_CST)
6965 {
6966 op_const |= 1L << arity;
6967 opint[arity] = int_cst_value (arg);
6968 }
6969 }
6970
6971 switch (DECL_FUNCTION_CODE (fndecl))
6972 {
6973 case ALPHA_BUILTIN_CMPBGE:
6974 return alpha_fold_builtin_cmpbge (opint, op_const);
6975
6976 case ALPHA_BUILTIN_EXTBL:
6977 return alpha_fold_builtin_extxx (op, opint, op_const, 0x01, false);
6978 case ALPHA_BUILTIN_EXTWL:
6979 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, false);
6980 case ALPHA_BUILTIN_EXTLL:
6981 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, false);
6982 case ALPHA_BUILTIN_EXTQL:
6983 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, false);
6984 case ALPHA_BUILTIN_EXTWH:
6985 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, true);
6986 case ALPHA_BUILTIN_EXTLH:
6987 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, true);
6988 case ALPHA_BUILTIN_EXTQH:
6989 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, true);
6990
6991 case ALPHA_BUILTIN_INSBL:
6992 return alpha_fold_builtin_insxx (op, opint, op_const, 0x01, false);
6993 case ALPHA_BUILTIN_INSWL:
6994 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, false);
6995 case ALPHA_BUILTIN_INSLL:
6996 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, false);
6997 case ALPHA_BUILTIN_INSQL:
6998 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, false);
6999 case ALPHA_BUILTIN_INSWH:
7000 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, true);
7001 case ALPHA_BUILTIN_INSLH:
7002 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, true);
7003 case ALPHA_BUILTIN_INSQH:
7004 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, true);
7005
7006 case ALPHA_BUILTIN_MSKBL:
7007 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x01, false);
7008 case ALPHA_BUILTIN_MSKWL:
7009 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, false);
7010 case ALPHA_BUILTIN_MSKLL:
7011 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, false);
7012 case ALPHA_BUILTIN_MSKQL:
7013 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, false);
7014 case ALPHA_BUILTIN_MSKWH:
7015 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, true);
7016 case ALPHA_BUILTIN_MSKLH:
7017 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, true);
7018 case ALPHA_BUILTIN_MSKQH:
7019 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, true);
7020
7021 case ALPHA_BUILTIN_UMULH:
7022 return alpha_fold_builtin_umulh (opint, op_const);
7023
7024 case ALPHA_BUILTIN_ZAP:
7025 opint[1] ^= 0xff;
7026 /* FALLTHRU */
7027 case ALPHA_BUILTIN_ZAPNOT:
7028 return alpha_fold_builtin_zapnot (op, opint, op_const);
7029
7030 case ALPHA_BUILTIN_MINUB8:
7031 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_u);
7032 case ALPHA_BUILTIN_MINSB8:
7033 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_s);
7034 case ALPHA_BUILTIN_MINUW4:
7035 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_u);
7036 case ALPHA_BUILTIN_MINSW4:
7037 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_s);
7038 case ALPHA_BUILTIN_MAXUB8:
7039 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_u);
7040 case ALPHA_BUILTIN_MAXSB8:
7041 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_s);
7042 case ALPHA_BUILTIN_MAXUW4:
7043 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_u);
7044 case ALPHA_BUILTIN_MAXSW4:
7045 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_s);
7046
7047 case ALPHA_BUILTIN_PERR:
7048 return alpha_fold_builtin_perr (opint, op_const);
7049 case ALPHA_BUILTIN_PKLB:
7050 return alpha_fold_builtin_pklb (opint, op_const);
7051 case ALPHA_BUILTIN_PKWB:
7052 return alpha_fold_builtin_pkwb (opint, op_const);
7053 case ALPHA_BUILTIN_UNPKBL:
7054 return alpha_fold_builtin_unpkbl (opint, op_const);
7055 case ALPHA_BUILTIN_UNPKBW:
7056 return alpha_fold_builtin_unpkbw (opint, op_const);
7057
7058 case ALPHA_BUILTIN_CTTZ:
7059 return alpha_fold_builtin_cttz (opint, op_const);
7060 case ALPHA_BUILTIN_CTLZ:
7061 return alpha_fold_builtin_ctlz (opint, op_const);
7062 case ALPHA_BUILTIN_CTPOP:
7063 return alpha_fold_builtin_ctpop (opint, op_const);
7064
7065 case ALPHA_BUILTIN_AMASK:
7066 case ALPHA_BUILTIN_IMPLVER:
7067 case ALPHA_BUILTIN_RPCC:
7068 case ALPHA_BUILTIN_THREAD_POINTER:
7069 case ALPHA_BUILTIN_SET_THREAD_POINTER:
7070 /* None of these are foldable at compile-time. */
7071 default:
7072 return NULL;
7073 }
7074 }
7075 \f
7076 /* This page contains routines that are used to determine what the function
7077 prologue and epilogue code will do and write them out. */
7078
7079 /* Compute the size of the save area in the stack. */
7080
7081 /* These variables are used for communication between the following functions.
7082 They indicate various things about the current function being compiled
7083 that are used to tell what kind of prologue, epilogue and procedure
7084 descriptor to generate. */
7085
7086 /* Nonzero if we need a stack procedure. */
7087 enum alpha_procedure_types {PT_NULL = 0, PT_REGISTER = 1, PT_STACK = 2};
7088 static enum alpha_procedure_types alpha_procedure_type;
7089
7090 /* Register number (either FP or SP) that is used to unwind the frame. */
7091 static int vms_unwind_regno;
7092
7093 /* Register number used to save FP. We need not have one for RA since
7094 we don't modify it for register procedures. This is only defined
7095 for register frame procedures. */
7096 static int vms_save_fp_regno;
7097
7098 /* Register number used to reference objects off our PV. */
7099 static int vms_base_regno;
7100
7101 /* Compute register masks for saved registers. */
7102
7103 static void
7104 alpha_sa_mask (unsigned long *imaskP, unsigned long *fmaskP)
7105 {
7106 unsigned long imask = 0;
7107 unsigned long fmask = 0;
7108 unsigned int i;
7109
7110 /* When outputting a thunk, we don't have valid register life info,
7111 but assemble_start_function wants to output .frame and .mask
7112 directives. */
7113 if (current_function_is_thunk)
7114 {
7115 *imaskP = 0;
7116 *fmaskP = 0;
7117 return;
7118 }
7119
7120 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7121 imask |= (1UL << HARD_FRAME_POINTER_REGNUM);
7122
7123 /* One for every register we have to save. */
7124 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
7125 if (! fixed_regs[i] && ! call_used_regs[i]
7126 && regs_ever_live[i] && i != REG_RA
7127 && (!TARGET_ABI_UNICOSMK || i != HARD_FRAME_POINTER_REGNUM))
7128 {
7129 if (i < 32)
7130 imask |= (1UL << i);
7131 else
7132 fmask |= (1UL << (i - 32));
7133 }
7134
7135 /* We need to restore these for the handler. */
7136 if (current_function_calls_eh_return)
7137 {
7138 for (i = 0; ; ++i)
7139 {
7140 unsigned regno = EH_RETURN_DATA_REGNO (i);
7141 if (regno == INVALID_REGNUM)
7142 break;
7143 imask |= 1UL << regno;
7144 }
7145 }
7146
7147 /* If any register spilled, then spill the return address also. */
7148 /* ??? This is required by the Digital stack unwind specification
7149 and isn't needed if we're doing Dwarf2 unwinding. */
7150 if (imask || fmask || alpha_ra_ever_killed ())
7151 imask |= (1UL << REG_RA);
7152
7153 *imaskP = imask;
7154 *fmaskP = fmask;
7155 }
7156
7157 int
7158 alpha_sa_size (void)
7159 {
7160 unsigned long mask[2];
7161 int sa_size = 0;
7162 int i, j;
7163
7164 alpha_sa_mask (&mask[0], &mask[1]);
7165
7166 if (TARGET_ABI_UNICOSMK)
7167 {
7168 if (mask[0] || mask[1])
7169 sa_size = 14;
7170 }
7171 else
7172 {
7173 for (j = 0; j < 2; ++j)
7174 for (i = 0; i < 32; ++i)
7175 if ((mask[j] >> i) & 1)
7176 sa_size++;
7177 }
7178
7179 if (TARGET_ABI_UNICOSMK)
7180 {
7181 /* We might not need to generate a frame if we don't make any calls
7182 (including calls to __T3E_MISMATCH if this is a vararg function),
7183 don't have any local variables which require stack slots, don't
7184 use alloca and have not determined that we need a frame for other
7185 reasons. */
7186
7187 alpha_procedure_type
7188 = (sa_size || get_frame_size() != 0
7189 || current_function_outgoing_args_size
7190 || current_function_stdarg || current_function_calls_alloca
7191 || frame_pointer_needed)
7192 ? PT_STACK : PT_REGISTER;
7193
7194 /* Always reserve space for saving callee-saved registers if we
7195 need a frame as required by the calling convention. */
7196 if (alpha_procedure_type == PT_STACK)
7197 sa_size = 14;
7198 }
7199 else if (TARGET_ABI_OPEN_VMS)
7200 {
7201 /* Start by assuming we can use a register procedure if we don't
7202 make any calls (REG_RA not used) or need to save any
7203 registers and a stack procedure if we do. */
7204 if ((mask[0] >> REG_RA) & 1)
7205 alpha_procedure_type = PT_STACK;
7206 else if (get_frame_size() != 0)
7207 alpha_procedure_type = PT_REGISTER;
7208 else
7209 alpha_procedure_type = PT_NULL;
7210
7211 /* Don't reserve space for saving FP & RA yet. Do that later after we've
7212 made the final decision on stack procedure vs register procedure. */
7213 if (alpha_procedure_type == PT_STACK)
7214 sa_size -= 2;
7215
7216 /* Decide whether to refer to objects off our PV via FP or PV.
7217 If we need FP for something else or if we receive a nonlocal
7218 goto (which expects PV to contain the value), we must use PV.
7219 Otherwise, start by assuming we can use FP. */
7220
7221 vms_base_regno
7222 = (frame_pointer_needed
7223 || current_function_has_nonlocal_label
7224 || alpha_procedure_type == PT_STACK
7225 || current_function_outgoing_args_size)
7226 ? REG_PV : HARD_FRAME_POINTER_REGNUM;
7227
7228 /* If we want to copy PV into FP, we need to find some register
7229 in which to save FP. */
7230
7231 vms_save_fp_regno = -1;
7232 if (vms_base_regno == HARD_FRAME_POINTER_REGNUM)
7233 for (i = 0; i < 32; i++)
7234 if (! fixed_regs[i] && call_used_regs[i] && ! regs_ever_live[i])
7235 vms_save_fp_regno = i;
7236
7237 if (vms_save_fp_regno == -1 && alpha_procedure_type == PT_REGISTER)
7238 vms_base_regno = REG_PV, alpha_procedure_type = PT_STACK;
7239 else if (alpha_procedure_type == PT_NULL)
7240 vms_base_regno = REG_PV;
7241
7242 /* Stack unwinding should be done via FP unless we use it for PV. */
7243 vms_unwind_regno = (vms_base_regno == REG_PV
7244 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM);
7245
7246 /* If this is a stack procedure, allow space for saving FP and RA. */
7247 if (alpha_procedure_type == PT_STACK)
7248 sa_size += 2;
7249 }
7250 else
7251 {
7252 /* Our size must be even (multiple of 16 bytes). */
7253 if (sa_size & 1)
7254 sa_size++;
7255 }
7256
7257 return sa_size * 8;
7258 }
7259
7260 /* Define the offset between two registers, one to be eliminated,
7261 and the other its replacement, at the start of a routine. */
7262
7263 HOST_WIDE_INT
7264 alpha_initial_elimination_offset (unsigned int from,
7265 unsigned int to ATTRIBUTE_UNUSED)
7266 {
7267 HOST_WIDE_INT ret;
7268
7269 ret = alpha_sa_size ();
7270 ret += ALPHA_ROUND (current_function_outgoing_args_size);
7271
7272 switch (from)
7273 {
7274 case FRAME_POINTER_REGNUM:
7275 break;
7276
7277 case ARG_POINTER_REGNUM:
7278 ret += (ALPHA_ROUND (get_frame_size ()
7279 + current_function_pretend_args_size)
7280 - current_function_pretend_args_size);
7281 break;
7282
7283 default:
7284 gcc_unreachable ();
7285 }
7286
7287 return ret;
7288 }
7289
7290 int
7291 alpha_pv_save_size (void)
7292 {
7293 alpha_sa_size ();
7294 return alpha_procedure_type == PT_STACK ? 8 : 0;
7295 }
7296
7297 int
7298 alpha_using_fp (void)
7299 {
7300 alpha_sa_size ();
7301 return vms_unwind_regno == HARD_FRAME_POINTER_REGNUM;
7302 }
7303
7304 #if TARGET_ABI_OPEN_VMS
7305
7306 const struct attribute_spec vms_attribute_table[] =
7307 {
7308 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
7309 { "overlaid", 0, 0, true, false, false, NULL },
7310 { "global", 0, 0, true, false, false, NULL },
7311 { "initialize", 0, 0, true, false, false, NULL },
7312 { NULL, 0, 0, false, false, false, NULL }
7313 };
7314
7315 #endif
7316
7317 static int
7318 find_lo_sum_using_gp (rtx *px, void *data ATTRIBUTE_UNUSED)
7319 {
7320 return GET_CODE (*px) == LO_SUM && XEXP (*px, 0) == pic_offset_table_rtx;
7321 }
7322
7323 int
7324 alpha_find_lo_sum_using_gp (rtx insn)
7325 {
7326 return for_each_rtx (&PATTERN (insn), find_lo_sum_using_gp, NULL) > 0;
7327 }
7328
7329 static int
7330 alpha_does_function_need_gp (void)
7331 {
7332 rtx insn;
7333
7334 /* The GP being variable is an OSF abi thing. */
7335 if (! TARGET_ABI_OSF)
7336 return 0;
7337
7338 /* We need the gp to load the address of __mcount. */
7339 if (TARGET_PROFILING_NEEDS_GP && current_function_profile)
7340 return 1;
7341
7342 /* The code emitted by alpha_output_mi_thunk_osf uses the gp. */
7343 if (current_function_is_thunk)
7344 return 1;
7345
7346 /* The nonlocal receiver pattern assumes that the gp is valid for
7347 the nested function. Reasonable because it's almost always set
7348 correctly already. For the cases where that's wrong, make sure
7349 the nested function loads its gp on entry. */
7350 if (current_function_has_nonlocal_goto)
7351 return 1;
7352
7353 /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
7354 Even if we are a static function, we still need to do this in case
7355 our address is taken and passed to something like qsort. */
7356
7357 push_topmost_sequence ();
7358 insn = get_insns ();
7359 pop_topmost_sequence ();
7360
7361 for (; insn; insn = NEXT_INSN (insn))
7362 if (INSN_P (insn)
7363 && GET_CODE (PATTERN (insn)) != USE
7364 && GET_CODE (PATTERN (insn)) != CLOBBER
7365 && get_attr_usegp (insn))
7366 return 1;
7367
7368 return 0;
7369 }
7370
7371 \f
7372 /* Helper function to set RTX_FRAME_RELATED_P on instructions, including
7373 sequences. */
7374
7375 static rtx
7376 set_frame_related_p (void)
7377 {
7378 rtx seq = get_insns ();
7379 rtx insn;
7380
7381 end_sequence ();
7382
7383 if (!seq)
7384 return NULL_RTX;
7385
7386 if (INSN_P (seq))
7387 {
7388 insn = seq;
7389 while (insn != NULL_RTX)
7390 {
7391 RTX_FRAME_RELATED_P (insn) = 1;
7392 insn = NEXT_INSN (insn);
7393 }
7394 seq = emit_insn (seq);
7395 }
7396 else
7397 {
7398 seq = emit_insn (seq);
7399 RTX_FRAME_RELATED_P (seq) = 1;
7400 }
7401 return seq;
7402 }
7403
7404 #define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
7405
7406 /* Generates a store with the proper unwind info attached. VALUE is
7407 stored at BASE_REG+BASE_OFS. If FRAME_BIAS is nonzero, then BASE_REG
7408 contains SP+FRAME_BIAS, and that is the unwind info that should be
7409 generated. If FRAME_REG != VALUE, then VALUE is being stored on
7410 behalf of FRAME_REG, and FRAME_REG should be present in the unwind. */
7411
7412 static void
7413 emit_frame_store_1 (rtx value, rtx base_reg, HOST_WIDE_INT frame_bias,
7414 HOST_WIDE_INT base_ofs, rtx frame_reg)
7415 {
7416 rtx addr, mem, insn;
7417
7418 addr = plus_constant (base_reg, base_ofs);
7419 mem = gen_rtx_MEM (DImode, addr);
7420 set_mem_alias_set (mem, alpha_sr_alias_set);
7421
7422 insn = emit_move_insn (mem, value);
7423 RTX_FRAME_RELATED_P (insn) = 1;
7424
7425 if (frame_bias || value != frame_reg)
7426 {
7427 if (frame_bias)
7428 {
7429 addr = plus_constant (stack_pointer_rtx, frame_bias + base_ofs);
7430 mem = gen_rtx_MEM (DImode, addr);
7431 }
7432
7433 REG_NOTES (insn)
7434 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7435 gen_rtx_SET (VOIDmode, mem, frame_reg),
7436 REG_NOTES (insn));
7437 }
7438 }
7439
7440 static void
7441 emit_frame_store (unsigned int regno, rtx base_reg,
7442 HOST_WIDE_INT frame_bias, HOST_WIDE_INT base_ofs)
7443 {
7444 rtx reg = gen_rtx_REG (DImode, regno);
7445 emit_frame_store_1 (reg, base_reg, frame_bias, base_ofs, reg);
7446 }
7447
7448 /* Write function prologue. */
7449
7450 /* On vms we have two kinds of functions:
7451
7452 - stack frame (PROC_STACK)
7453 these are 'normal' functions with local vars and which are
7454 calling other functions
7455 - register frame (PROC_REGISTER)
7456 keeps all data in registers, needs no stack
7457
7458 We must pass this to the assembler so it can generate the
7459 proper pdsc (procedure descriptor)
7460 This is done with the '.pdesc' command.
7461
7462 On not-vms, we don't really differentiate between the two, as we can
7463 simply allocate stack without saving registers. */
7464
7465 void
7466 alpha_expand_prologue (void)
7467 {
7468 /* Registers to save. */
7469 unsigned long imask = 0;
7470 unsigned long fmask = 0;
7471 /* Stack space needed for pushing registers clobbered by us. */
7472 HOST_WIDE_INT sa_size;
7473 /* Complete stack size needed. */
7474 HOST_WIDE_INT frame_size;
7475 /* Offset from base reg to register save area. */
7476 HOST_WIDE_INT reg_offset;
7477 rtx sa_reg;
7478 int i;
7479
7480 sa_size = alpha_sa_size ();
7481
7482 frame_size = get_frame_size ();
7483 if (TARGET_ABI_OPEN_VMS)
7484 frame_size = ALPHA_ROUND (sa_size
7485 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7486 + frame_size
7487 + current_function_pretend_args_size);
7488 else if (TARGET_ABI_UNICOSMK)
7489 /* We have to allocate space for the DSIB if we generate a frame. */
7490 frame_size = ALPHA_ROUND (sa_size
7491 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7492 + ALPHA_ROUND (frame_size
7493 + current_function_outgoing_args_size);
7494 else
7495 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
7496 + sa_size
7497 + ALPHA_ROUND (frame_size
7498 + current_function_pretend_args_size));
7499
7500 if (TARGET_ABI_OPEN_VMS)
7501 reg_offset = 8;
7502 else
7503 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
7504
7505 alpha_sa_mask (&imask, &fmask);
7506
7507 /* Emit an insn to reload GP, if needed. */
7508 if (TARGET_ABI_OSF)
7509 {
7510 alpha_function_needs_gp = alpha_does_function_need_gp ();
7511 if (alpha_function_needs_gp)
7512 emit_insn (gen_prologue_ldgp ());
7513 }
7514
7515 /* TARGET_PROFILING_NEEDS_GP actually implies that we need to insert
7516 the call to mcount ourselves, rather than having the linker do it
7517 magically in response to -pg. Since _mcount has special linkage,
7518 don't represent the call as a call. */
7519 if (TARGET_PROFILING_NEEDS_GP && current_function_profile)
7520 emit_insn (gen_prologue_mcount ());
7521
7522 if (TARGET_ABI_UNICOSMK)
7523 unicosmk_gen_dsib (&imask);
7524
7525 /* Adjust the stack by the frame size. If the frame size is > 4096
7526 bytes, we need to be sure we probe somewhere in the first and last
7527 4096 bytes (we can probably get away without the latter test) and
7528 every 8192 bytes in between. If the frame size is > 32768, we
7529 do this in a loop. Otherwise, we generate the explicit probe
7530 instructions.
7531
7532 Note that we are only allowed to adjust sp once in the prologue. */
7533
7534 if (frame_size <= 32768)
7535 {
7536 if (frame_size > 4096)
7537 {
7538 int probed = 4096;
7539
7540 do
7541 emit_insn (gen_probe_stack (GEN_INT (TARGET_ABI_UNICOSMK
7542 ? -probed + 64
7543 : -probed)));
7544 while ((probed += 8192) < frame_size);
7545
7546 /* We only have to do this probe if we aren't saving registers. */
7547 if (sa_size == 0 && probed + 4096 < frame_size)
7548 emit_insn (gen_probe_stack (GEN_INT (-frame_size)));
7549 }
7550
7551 if (frame_size != 0)
7552 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
7553 GEN_INT (TARGET_ABI_UNICOSMK
7554 ? -frame_size + 64
7555 : -frame_size))));
7556 }
7557 else
7558 {
7559 /* Here we generate code to set R22 to SP + 4096 and set R23 to the
7560 number of 8192 byte blocks to probe. We then probe each block
7561 in the loop and then set SP to the proper location. If the
7562 amount remaining is > 4096, we have to do one more probe if we
7563 are not saving any registers. */
7564
7565 HOST_WIDE_INT blocks = (frame_size + 4096) / 8192;
7566 HOST_WIDE_INT leftover = frame_size + 4096 - blocks * 8192;
7567 rtx ptr = gen_rtx_REG (DImode, 22);
7568 rtx count = gen_rtx_REG (DImode, 23);
7569 rtx seq;
7570
7571 emit_move_insn (count, GEN_INT (blocks));
7572 emit_insn (gen_adddi3 (ptr, stack_pointer_rtx,
7573 GEN_INT (TARGET_ABI_UNICOSMK ? 4096 - 64 : 4096)));
7574
7575 /* Because of the difficulty in emitting a new basic block this
7576 late in the compilation, generate the loop as a single insn. */
7577 emit_insn (gen_prologue_stack_probe_loop (count, ptr));
7578
7579 if (leftover > 4096 && sa_size == 0)
7580 {
7581 rtx last = gen_rtx_MEM (DImode, plus_constant (ptr, -leftover));
7582 MEM_VOLATILE_P (last) = 1;
7583 emit_move_insn (last, const0_rtx);
7584 }
7585
7586 if (TARGET_ABI_WINDOWS_NT)
7587 {
7588 /* For NT stack unwind (done by 'reverse execution'), it's
7589 not OK to take the result of a loop, even though the value
7590 is already in ptr, so we reload it via a single operation
7591 and subtract it to sp.
7592
7593 Yes, that's correct -- we have to reload the whole constant
7594 into a temporary via ldah+lda then subtract from sp. */
7595
7596 HOST_WIDE_INT lo, hi;
7597 lo = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
7598 hi = frame_size - lo;
7599
7600 emit_move_insn (ptr, GEN_INT (hi));
7601 emit_insn (gen_adddi3 (ptr, ptr, GEN_INT (lo)));
7602 seq = emit_insn (gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx,
7603 ptr));
7604 }
7605 else
7606 {
7607 seq = emit_insn (gen_adddi3 (stack_pointer_rtx, ptr,
7608 GEN_INT (-leftover)));
7609 }
7610
7611 /* This alternative is special, because the DWARF code cannot
7612 possibly intuit through the loop above. So we invent this
7613 note it looks at instead. */
7614 RTX_FRAME_RELATED_P (seq) = 1;
7615 REG_NOTES (seq)
7616 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7617 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7618 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
7619 GEN_INT (TARGET_ABI_UNICOSMK
7620 ? -frame_size + 64
7621 : -frame_size))),
7622 REG_NOTES (seq));
7623 }
7624
7625 if (!TARGET_ABI_UNICOSMK)
7626 {
7627 HOST_WIDE_INT sa_bias = 0;
7628
7629 /* Cope with very large offsets to the register save area. */
7630 sa_reg = stack_pointer_rtx;
7631 if (reg_offset + sa_size > 0x8000)
7632 {
7633 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
7634 rtx sa_bias_rtx;
7635
7636 if (low + sa_size <= 0x8000)
7637 sa_bias = reg_offset - low, reg_offset = low;
7638 else
7639 sa_bias = reg_offset, reg_offset = 0;
7640
7641 sa_reg = gen_rtx_REG (DImode, 24);
7642 sa_bias_rtx = GEN_INT (sa_bias);
7643
7644 if (add_operand (sa_bias_rtx, DImode))
7645 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_bias_rtx));
7646 else
7647 {
7648 emit_move_insn (sa_reg, sa_bias_rtx);
7649 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_reg));
7650 }
7651 }
7652
7653 /* Save regs in stack order. Beginning with VMS PV. */
7654 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7655 emit_frame_store (REG_PV, stack_pointer_rtx, 0, 0);
7656
7657 /* Save register RA next. */
7658 if (imask & (1UL << REG_RA))
7659 {
7660 emit_frame_store (REG_RA, sa_reg, sa_bias, reg_offset);
7661 imask &= ~(1UL << REG_RA);
7662 reg_offset += 8;
7663 }
7664
7665 /* Now save any other registers required to be saved. */
7666 for (i = 0; i < 31; i++)
7667 if (imask & (1UL << i))
7668 {
7669 emit_frame_store (i, sa_reg, sa_bias, reg_offset);
7670 reg_offset += 8;
7671 }
7672
7673 for (i = 0; i < 31; i++)
7674 if (fmask & (1UL << i))
7675 {
7676 emit_frame_store (i+32, sa_reg, sa_bias, reg_offset);
7677 reg_offset += 8;
7678 }
7679 }
7680 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
7681 {
7682 /* The standard frame on the T3E includes space for saving registers.
7683 We just have to use it. We don't have to save the return address and
7684 the old frame pointer here - they are saved in the DSIB. */
7685
7686 reg_offset = -56;
7687 for (i = 9; i < 15; i++)
7688 if (imask & (1UL << i))
7689 {
7690 emit_frame_store (i, hard_frame_pointer_rtx, 0, reg_offset);
7691 reg_offset -= 8;
7692 }
7693 for (i = 2; i < 10; i++)
7694 if (fmask & (1UL << i))
7695 {
7696 emit_frame_store (i+32, hard_frame_pointer_rtx, 0, reg_offset);
7697 reg_offset -= 8;
7698 }
7699 }
7700
7701 if (TARGET_ABI_OPEN_VMS)
7702 {
7703 if (alpha_procedure_type == PT_REGISTER)
7704 /* Register frame procedures save the fp.
7705 ?? Ought to have a dwarf2 save for this. */
7706 emit_move_insn (gen_rtx_REG (DImode, vms_save_fp_regno),
7707 hard_frame_pointer_rtx);
7708
7709 if (alpha_procedure_type != PT_NULL && vms_base_regno != REG_PV)
7710 emit_insn (gen_force_movdi (gen_rtx_REG (DImode, vms_base_regno),
7711 gen_rtx_REG (DImode, REG_PV)));
7712
7713 if (alpha_procedure_type != PT_NULL
7714 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
7715 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7716
7717 /* If we have to allocate space for outgoing args, do it now. */
7718 if (current_function_outgoing_args_size != 0)
7719 {
7720 rtx seq
7721 = emit_move_insn (stack_pointer_rtx,
7722 plus_constant
7723 (hard_frame_pointer_rtx,
7724 - (ALPHA_ROUND
7725 (current_function_outgoing_args_size))));
7726
7727 /* Only set FRAME_RELATED_P on the stack adjustment we just emitted
7728 if ! frame_pointer_needed. Setting the bit will change the CFA
7729 computation rule to use sp again, which would be wrong if we had
7730 frame_pointer_needed, as this means sp might move unpredictably
7731 later on.
7732
7733 Also, note that
7734 frame_pointer_needed
7735 => vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
7736 and
7737 current_function_outgoing_args_size != 0
7738 => alpha_procedure_type != PT_NULL,
7739
7740 so when we are not setting the bit here, we are guaranteed to
7741 have emitted an FRP frame pointer update just before. */
7742 RTX_FRAME_RELATED_P (seq) = ! frame_pointer_needed;
7743 }
7744 }
7745 else if (!TARGET_ABI_UNICOSMK)
7746 {
7747 /* If we need a frame pointer, set it from the stack pointer. */
7748 if (frame_pointer_needed)
7749 {
7750 if (TARGET_CAN_FAULT_IN_PROLOGUE)
7751 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7752 else
7753 /* This must always be the last instruction in the
7754 prologue, thus we emit a special move + clobber. */
7755 FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx,
7756 stack_pointer_rtx, sa_reg)));
7757 }
7758 }
7759
7760 /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
7761 the prologue, for exception handling reasons, we cannot do this for
7762 any insn that might fault. We could prevent this for mems with a
7763 (clobber:BLK (scratch)), but this doesn't work for fp insns. So we
7764 have to prevent all such scheduling with a blockage.
7765
7766 Linux, on the other hand, never bothered to implement OSF/1's
7767 exception handling, and so doesn't care about such things. Anyone
7768 planning to use dwarf2 frame-unwind info can also omit the blockage. */
7769
7770 if (! TARGET_CAN_FAULT_IN_PROLOGUE)
7771 emit_insn (gen_blockage ());
7772 }
7773
7774 /* Count the number of .file directives, so that .loc is up to date. */
7775 int num_source_filenames = 0;
7776
7777 /* Output the textual info surrounding the prologue. */
7778
7779 void
7780 alpha_start_function (FILE *file, const char *fnname,
7781 tree decl ATTRIBUTE_UNUSED)
7782 {
7783 unsigned long imask = 0;
7784 unsigned long fmask = 0;
7785 /* Stack space needed for pushing registers clobbered by us. */
7786 HOST_WIDE_INT sa_size;
7787 /* Complete stack size needed. */
7788 unsigned HOST_WIDE_INT frame_size;
7789 /* Offset from base reg to register save area. */
7790 HOST_WIDE_INT reg_offset;
7791 char *entry_label = (char *) alloca (strlen (fnname) + 6);
7792 int i;
7793
7794 /* Don't emit an extern directive for functions defined in the same file. */
7795 if (TARGET_ABI_UNICOSMK)
7796 {
7797 tree name_tree;
7798 name_tree = get_identifier (fnname);
7799 TREE_ASM_WRITTEN (name_tree) = 1;
7800 }
7801
7802 alpha_fnname = fnname;
7803 sa_size = alpha_sa_size ();
7804
7805 frame_size = get_frame_size ();
7806 if (TARGET_ABI_OPEN_VMS)
7807 frame_size = ALPHA_ROUND (sa_size
7808 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7809 + frame_size
7810 + current_function_pretend_args_size);
7811 else if (TARGET_ABI_UNICOSMK)
7812 frame_size = ALPHA_ROUND (sa_size
7813 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7814 + ALPHA_ROUND (frame_size
7815 + current_function_outgoing_args_size);
7816 else
7817 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
7818 + sa_size
7819 + ALPHA_ROUND (frame_size
7820 + current_function_pretend_args_size));
7821
7822 if (TARGET_ABI_OPEN_VMS)
7823 reg_offset = 8;
7824 else
7825 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
7826
7827 alpha_sa_mask (&imask, &fmask);
7828
7829 /* Ecoff can handle multiple .file directives, so put out file and lineno.
7830 We have to do that before the .ent directive as we cannot switch
7831 files within procedures with native ecoff because line numbers are
7832 linked to procedure descriptors.
7833 Outputting the lineno helps debugging of one line functions as they
7834 would otherwise get no line number at all. Please note that we would
7835 like to put out last_linenum from final.c, but it is not accessible. */
7836
7837 if (write_symbols == SDB_DEBUG)
7838 {
7839 #ifdef ASM_OUTPUT_SOURCE_FILENAME
7840 ASM_OUTPUT_SOURCE_FILENAME (file,
7841 DECL_SOURCE_FILE (current_function_decl));
7842 #endif
7843 #ifdef SDB_OUTPUT_SOURCE_LINE
7844 if (debug_info_level != DINFO_LEVEL_TERSE)
7845 SDB_OUTPUT_SOURCE_LINE (file,
7846 DECL_SOURCE_LINE (current_function_decl));
7847 #endif
7848 }
7849
7850 /* Issue function start and label. */
7851 if (TARGET_ABI_OPEN_VMS
7852 || (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive))
7853 {
7854 fputs ("\t.ent ", file);
7855 assemble_name (file, fnname);
7856 putc ('\n', file);
7857
7858 /* If the function needs GP, we'll write the "..ng" label there.
7859 Otherwise, do it here. */
7860 if (TARGET_ABI_OSF
7861 && ! alpha_function_needs_gp
7862 && ! current_function_is_thunk)
7863 {
7864 putc ('$', file);
7865 assemble_name (file, fnname);
7866 fputs ("..ng:\n", file);
7867 }
7868 }
7869
7870 strcpy (entry_label, fnname);
7871 if (TARGET_ABI_OPEN_VMS)
7872 strcat (entry_label, "..en");
7873
7874 /* For public functions, the label must be globalized by appending an
7875 additional colon. */
7876 if (TARGET_ABI_UNICOSMK && TREE_PUBLIC (decl))
7877 strcat (entry_label, ":");
7878
7879 ASM_OUTPUT_LABEL (file, entry_label);
7880 inside_function = TRUE;
7881
7882 if (TARGET_ABI_OPEN_VMS)
7883 fprintf (file, "\t.base $%d\n", vms_base_regno);
7884
7885 if (!TARGET_ABI_OPEN_VMS && !TARGET_ABI_UNICOSMK && TARGET_IEEE_CONFORMANT
7886 && !flag_inhibit_size_directive)
7887 {
7888 /* Set flags in procedure descriptor to request IEEE-conformant
7889 math-library routines. The value we set it to is PDSC_EXC_IEEE
7890 (/usr/include/pdsc.h). */
7891 fputs ("\t.eflag 48\n", file);
7892 }
7893
7894 /* Set up offsets to alpha virtual arg/local debugging pointer. */
7895 alpha_auto_offset = -frame_size + current_function_pretend_args_size;
7896 alpha_arg_offset = -frame_size + 48;
7897
7898 /* Describe our frame. If the frame size is larger than an integer,
7899 print it as zero to avoid an assembler error. We won't be
7900 properly describing such a frame, but that's the best we can do. */
7901 if (TARGET_ABI_UNICOSMK)
7902 ;
7903 else if (TARGET_ABI_OPEN_VMS)
7904 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,"
7905 HOST_WIDE_INT_PRINT_DEC "\n",
7906 vms_unwind_regno,
7907 frame_size >= (1UL << 31) ? 0 : frame_size,
7908 reg_offset);
7909 else if (!flag_inhibit_size_directive)
7910 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,%d\n",
7911 (frame_pointer_needed
7912 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM),
7913 frame_size >= (1UL << 31) ? 0 : frame_size,
7914 current_function_pretend_args_size);
7915
7916 /* Describe which registers were spilled. */
7917 if (TARGET_ABI_UNICOSMK)
7918 ;
7919 else if (TARGET_ABI_OPEN_VMS)
7920 {
7921 if (imask)
7922 /* ??? Does VMS care if mask contains ra? The old code didn't
7923 set it, so I don't here. */
7924 fprintf (file, "\t.mask 0x%lx,0\n", imask & ~(1UL << REG_RA));
7925 if (fmask)
7926 fprintf (file, "\t.fmask 0x%lx,0\n", fmask);
7927 if (alpha_procedure_type == PT_REGISTER)
7928 fprintf (file, "\t.fp_save $%d\n", vms_save_fp_regno);
7929 }
7930 else if (!flag_inhibit_size_directive)
7931 {
7932 if (imask)
7933 {
7934 fprintf (file, "\t.mask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", imask,
7935 frame_size >= (1UL << 31) ? 0 : reg_offset - frame_size);
7936
7937 for (i = 0; i < 32; ++i)
7938 if (imask & (1UL << i))
7939 reg_offset += 8;
7940 }
7941
7942 if (fmask)
7943 fprintf (file, "\t.fmask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", fmask,
7944 frame_size >= (1UL << 31) ? 0 : reg_offset - frame_size);
7945 }
7946
7947 #if TARGET_ABI_OPEN_VMS
7948 /* Ifdef'ed cause link_section are only available then. */
7949 readonly_data_section ();
7950 fprintf (file, "\t.align 3\n");
7951 assemble_name (file, fnname); fputs ("..na:\n", file);
7952 fputs ("\t.ascii \"", file);
7953 assemble_name (file, fnname);
7954 fputs ("\\0\"\n", file);
7955 alpha_need_linkage (fnname, 1);
7956 text_section ();
7957 #endif
7958 }
7959
7960 /* Emit the .prologue note at the scheduled end of the prologue. */
7961
7962 static void
7963 alpha_output_function_end_prologue (FILE *file)
7964 {
7965 if (TARGET_ABI_UNICOSMK)
7966 ;
7967 else if (TARGET_ABI_OPEN_VMS)
7968 fputs ("\t.prologue\n", file);
7969 else if (TARGET_ABI_WINDOWS_NT)
7970 fputs ("\t.prologue 0\n", file);
7971 else if (!flag_inhibit_size_directive)
7972 fprintf (file, "\t.prologue %d\n",
7973 alpha_function_needs_gp || current_function_is_thunk);
7974 }
7975
7976 /* Write function epilogue. */
7977
7978 /* ??? At some point we will want to support full unwind, and so will
7979 need to mark the epilogue as well. At the moment, we just confuse
7980 dwarf2out. */
7981 #undef FRP
7982 #define FRP(exp) exp
7983
7984 void
7985 alpha_expand_epilogue (void)
7986 {
7987 /* Registers to save. */
7988 unsigned long imask = 0;
7989 unsigned long fmask = 0;
7990 /* Stack space needed for pushing registers clobbered by us. */
7991 HOST_WIDE_INT sa_size;
7992 /* Complete stack size needed. */
7993 HOST_WIDE_INT frame_size;
7994 /* Offset from base reg to register save area. */
7995 HOST_WIDE_INT reg_offset;
7996 int fp_is_frame_pointer, fp_offset;
7997 rtx sa_reg, sa_reg_exp = NULL;
7998 rtx sp_adj1, sp_adj2, mem;
7999 rtx eh_ofs;
8000 int i;
8001
8002 sa_size = alpha_sa_size ();
8003
8004 frame_size = get_frame_size ();
8005 if (TARGET_ABI_OPEN_VMS)
8006 frame_size = ALPHA_ROUND (sa_size
8007 + (alpha_procedure_type == PT_STACK ? 8 : 0)
8008 + frame_size
8009 + current_function_pretend_args_size);
8010 else if (TARGET_ABI_UNICOSMK)
8011 frame_size = ALPHA_ROUND (sa_size
8012 + (alpha_procedure_type == PT_STACK ? 48 : 0))
8013 + ALPHA_ROUND (frame_size
8014 + current_function_outgoing_args_size);
8015 else
8016 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
8017 + sa_size
8018 + ALPHA_ROUND (frame_size
8019 + current_function_pretend_args_size));
8020
8021 if (TARGET_ABI_OPEN_VMS)
8022 {
8023 if (alpha_procedure_type == PT_STACK)
8024 reg_offset = 8;
8025 else
8026 reg_offset = 0;
8027 }
8028 else
8029 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
8030
8031 alpha_sa_mask (&imask, &fmask);
8032
8033 fp_is_frame_pointer
8034 = ((TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
8035 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed));
8036 fp_offset = 0;
8037 sa_reg = stack_pointer_rtx;
8038
8039 if (current_function_calls_eh_return)
8040 eh_ofs = EH_RETURN_STACKADJ_RTX;
8041 else
8042 eh_ofs = NULL_RTX;
8043
8044 if (!TARGET_ABI_UNICOSMK && sa_size)
8045 {
8046 /* If we have a frame pointer, restore SP from it. */
8047 if ((TARGET_ABI_OPEN_VMS
8048 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
8049 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed))
8050 FRP (emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx));
8051
8052 /* Cope with very large offsets to the register save area. */
8053 if (reg_offset + sa_size > 0x8000)
8054 {
8055 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
8056 HOST_WIDE_INT bias;
8057
8058 if (low + sa_size <= 0x8000)
8059 bias = reg_offset - low, reg_offset = low;
8060 else
8061 bias = reg_offset, reg_offset = 0;
8062
8063 sa_reg = gen_rtx_REG (DImode, 22);
8064 sa_reg_exp = plus_constant (stack_pointer_rtx, bias);
8065
8066 FRP (emit_move_insn (sa_reg, sa_reg_exp));
8067 }
8068
8069 /* Restore registers in order, excepting a true frame pointer. */
8070
8071 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset));
8072 if (! eh_ofs)
8073 set_mem_alias_set (mem, alpha_sr_alias_set);
8074 FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
8075
8076 reg_offset += 8;
8077 imask &= ~(1UL << REG_RA);
8078
8079 for (i = 0; i < 31; ++i)
8080 if (imask & (1UL << i))
8081 {
8082 if (i == HARD_FRAME_POINTER_REGNUM && fp_is_frame_pointer)
8083 fp_offset = reg_offset;
8084 else
8085 {
8086 mem = gen_rtx_MEM (DImode, plus_constant(sa_reg, reg_offset));
8087 set_mem_alias_set (mem, alpha_sr_alias_set);
8088 FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
8089 }
8090 reg_offset += 8;
8091 }
8092
8093 for (i = 0; i < 31; ++i)
8094 if (fmask & (1UL << i))
8095 {
8096 mem = gen_rtx_MEM (DFmode, plus_constant(sa_reg, reg_offset));
8097 set_mem_alias_set (mem, alpha_sr_alias_set);
8098 FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
8099 reg_offset += 8;
8100 }
8101 }
8102 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
8103 {
8104 /* Restore callee-saved general-purpose registers. */
8105
8106 reg_offset = -56;
8107
8108 for (i = 9; i < 15; i++)
8109 if (imask & (1UL << i))
8110 {
8111 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx,
8112 reg_offset));
8113 set_mem_alias_set (mem, alpha_sr_alias_set);
8114 FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
8115 reg_offset -= 8;
8116 }
8117
8118 for (i = 2; i < 10; i++)
8119 if (fmask & (1UL << i))
8120 {
8121 mem = gen_rtx_MEM (DFmode, plus_constant(hard_frame_pointer_rtx,
8122 reg_offset));
8123 set_mem_alias_set (mem, alpha_sr_alias_set);
8124 FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
8125 reg_offset -= 8;
8126 }
8127
8128 /* Restore the return address from the DSIB. */
8129
8130 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx, -8));
8131 set_mem_alias_set (mem, alpha_sr_alias_set);
8132 FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
8133 }
8134
8135 if (frame_size || eh_ofs)
8136 {
8137 sp_adj1 = stack_pointer_rtx;
8138
8139 if (eh_ofs)
8140 {
8141 sp_adj1 = gen_rtx_REG (DImode, 23);
8142 emit_move_insn (sp_adj1,
8143 gen_rtx_PLUS (Pmode, stack_pointer_rtx, eh_ofs));
8144 }
8145
8146 /* If the stack size is large, begin computation into a temporary
8147 register so as not to interfere with a potential fp restore,
8148 which must be consecutive with an SP restore. */
8149 if (frame_size < 32768
8150 && ! (TARGET_ABI_UNICOSMK && current_function_calls_alloca))
8151 sp_adj2 = GEN_INT (frame_size);
8152 else if (TARGET_ABI_UNICOSMK)
8153 {
8154 sp_adj1 = gen_rtx_REG (DImode, 23);
8155 FRP (emit_move_insn (sp_adj1, hard_frame_pointer_rtx));
8156 sp_adj2 = const0_rtx;
8157 }
8158 else if (frame_size < 0x40007fffL)
8159 {
8160 int low = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
8161
8162 sp_adj2 = plus_constant (sp_adj1, frame_size - low);
8163 if (sa_reg_exp && rtx_equal_p (sa_reg_exp, sp_adj2))
8164 sp_adj1 = sa_reg;
8165 else
8166 {
8167 sp_adj1 = gen_rtx_REG (DImode, 23);
8168 FRP (emit_move_insn (sp_adj1, sp_adj2));
8169 }
8170 sp_adj2 = GEN_INT (low);
8171 }
8172 else
8173 {
8174 rtx tmp = gen_rtx_REG (DImode, 23);
8175 FRP (sp_adj2 = alpha_emit_set_const (tmp, DImode, frame_size,
8176 3, false));
8177 if (!sp_adj2)
8178 {
8179 /* We can't drop new things to memory this late, afaik,
8180 so build it up by pieces. */
8181 FRP (sp_adj2 = alpha_emit_set_long_const (tmp, frame_size,
8182 -(frame_size < 0)));
8183 gcc_assert (sp_adj2);
8184 }
8185 }
8186
8187 /* From now on, things must be in order. So emit blockages. */
8188
8189 /* Restore the frame pointer. */
8190 if (TARGET_ABI_UNICOSMK)
8191 {
8192 emit_insn (gen_blockage ());
8193 mem = gen_rtx_MEM (DImode,
8194 plus_constant (hard_frame_pointer_rtx, -16));
8195 set_mem_alias_set (mem, alpha_sr_alias_set);
8196 FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
8197 }
8198 else if (fp_is_frame_pointer)
8199 {
8200 emit_insn (gen_blockage ());
8201 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, fp_offset));
8202 set_mem_alias_set (mem, alpha_sr_alias_set);
8203 FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
8204 }
8205 else if (TARGET_ABI_OPEN_VMS)
8206 {
8207 emit_insn (gen_blockage ());
8208 FRP (emit_move_insn (hard_frame_pointer_rtx,
8209 gen_rtx_REG (DImode, vms_save_fp_regno)));
8210 }
8211
8212 /* Restore the stack pointer. */
8213 emit_insn (gen_blockage ());
8214 if (sp_adj2 == const0_rtx)
8215 FRP (emit_move_insn (stack_pointer_rtx, sp_adj1));
8216 else
8217 FRP (emit_move_insn (stack_pointer_rtx,
8218 gen_rtx_PLUS (DImode, sp_adj1, sp_adj2)));
8219 }
8220 else
8221 {
8222 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_REGISTER)
8223 {
8224 emit_insn (gen_blockage ());
8225 FRP (emit_move_insn (hard_frame_pointer_rtx,
8226 gen_rtx_REG (DImode, vms_save_fp_regno)));
8227 }
8228 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type != PT_STACK)
8229 {
8230 /* Decrement the frame pointer if the function does not have a
8231 frame. */
8232
8233 emit_insn (gen_blockage ());
8234 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
8235 hard_frame_pointer_rtx, constm1_rtx)));
8236 }
8237 }
8238 }
8239 \f
8240 /* Output the rest of the textual info surrounding the epilogue. */
8241
8242 void
8243 alpha_end_function (FILE *file, const char *fnname, tree decl ATTRIBUTE_UNUSED)
8244 {
8245 #if TARGET_ABI_OPEN_VMS
8246 alpha_write_linkage (file, fnname, decl);
8247 #endif
8248
8249 /* End the function. */
8250 if (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive)
8251 {
8252 fputs ("\t.end ", file);
8253 assemble_name (file, fnname);
8254 putc ('\n', file);
8255 }
8256 inside_function = FALSE;
8257
8258 /* Output jump tables and the static subroutine information block. */
8259 if (TARGET_ABI_UNICOSMK)
8260 {
8261 unicosmk_output_ssib (file, fnname);
8262 unicosmk_output_deferred_case_vectors (file);
8263 }
8264 }
8265
8266 #if TARGET_ABI_OSF
8267 /* Emit a tail call to FUNCTION after adjusting THIS by DELTA.
8268
8269 In order to avoid the hordes of differences between generated code
8270 with and without TARGET_EXPLICIT_RELOCS, and to avoid duplicating
8271 lots of code loading up large constants, generate rtl and emit it
8272 instead of going straight to text.
8273
8274 Not sure why this idea hasn't been explored before... */
8275
8276 static void
8277 alpha_output_mi_thunk_osf (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8278 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8279 tree function)
8280 {
8281 HOST_WIDE_INT hi, lo;
8282 rtx this, insn, funexp;
8283
8284 reset_block_changes ();
8285
8286 /* We always require a valid GP. */
8287 emit_insn (gen_prologue_ldgp ());
8288 emit_note (NOTE_INSN_PROLOGUE_END);
8289
8290 /* Find the "this" pointer. If the function returns a structure,
8291 the structure return pointer is in $16. */
8292 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8293 this = gen_rtx_REG (Pmode, 17);
8294 else
8295 this = gen_rtx_REG (Pmode, 16);
8296
8297 /* Add DELTA. When possible we use ldah+lda. Otherwise load the
8298 entire constant for the add. */
8299 lo = ((delta & 0xffff) ^ 0x8000) - 0x8000;
8300 hi = (((delta - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8301 if (hi + lo == delta)
8302 {
8303 if (hi)
8304 emit_insn (gen_adddi3 (this, this, GEN_INT (hi)));
8305 if (lo)
8306 emit_insn (gen_adddi3 (this, this, GEN_INT (lo)));
8307 }
8308 else
8309 {
8310 rtx tmp = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 0),
8311 delta, -(delta < 0));
8312 emit_insn (gen_adddi3 (this, this, tmp));
8313 }
8314
8315 /* Add a delta stored in the vtable at VCALL_OFFSET. */
8316 if (vcall_offset)
8317 {
8318 rtx tmp, tmp2;
8319
8320 tmp = gen_rtx_REG (Pmode, 0);
8321 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this));
8322
8323 lo = ((vcall_offset & 0xffff) ^ 0x8000) - 0x8000;
8324 hi = (((vcall_offset - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8325 if (hi + lo == vcall_offset)
8326 {
8327 if (hi)
8328 emit_insn (gen_adddi3 (tmp, tmp, GEN_INT (hi)));
8329 }
8330 else
8331 {
8332 tmp2 = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 1),
8333 vcall_offset, -(vcall_offset < 0));
8334 emit_insn (gen_adddi3 (tmp, tmp, tmp2));
8335 lo = 0;
8336 }
8337 if (lo)
8338 tmp2 = gen_rtx_PLUS (Pmode, tmp, GEN_INT (lo));
8339 else
8340 tmp2 = tmp;
8341 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp2));
8342
8343 emit_insn (gen_adddi3 (this, this, tmp));
8344 }
8345
8346 /* Generate a tail call to the target function. */
8347 if (! TREE_USED (function))
8348 {
8349 assemble_external (function);
8350 TREE_USED (function) = 1;
8351 }
8352 funexp = XEXP (DECL_RTL (function), 0);
8353 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8354 insn = emit_call_insn (gen_sibcall (funexp, const0_rtx));
8355 SIBLING_CALL_P (insn) = 1;
8356
8357 /* Run just enough of rest_of_compilation to get the insns emitted.
8358 There's not really enough bulk here to make other passes such as
8359 instruction scheduling worth while. Note that use_thunk calls
8360 assemble_start_function and assemble_end_function. */
8361 insn = get_insns ();
8362 insn_locators_initialize ();
8363 shorten_branches (insn);
8364 final_start_function (insn, file, 1);
8365 final (insn, file, 1);
8366 final_end_function ();
8367 }
8368 #endif /* TARGET_ABI_OSF */
8369 \f
8370 /* Debugging support. */
8371
8372 #include "gstab.h"
8373
8374 /* Count the number of sdb related labels are generated (to find block
8375 start and end boundaries). */
8376
8377 int sdb_label_count = 0;
8378
8379 /* Name of the file containing the current function. */
8380
8381 static const char *current_function_file = "";
8382
8383 /* Offsets to alpha virtual arg/local debugging pointers. */
8384
8385 long alpha_arg_offset;
8386 long alpha_auto_offset;
8387 \f
8388 /* Emit a new filename to a stream. */
8389
8390 void
8391 alpha_output_filename (FILE *stream, const char *name)
8392 {
8393 static int first_time = TRUE;
8394
8395 if (first_time)
8396 {
8397 first_time = FALSE;
8398 ++num_source_filenames;
8399 current_function_file = name;
8400 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8401 output_quoted_string (stream, name);
8402 fprintf (stream, "\n");
8403 if (!TARGET_GAS && write_symbols == DBX_DEBUG)
8404 fprintf (stream, "\t#@stabs\n");
8405 }
8406
8407 else if (write_symbols == DBX_DEBUG)
8408 /* dbxout.c will emit an appropriate .stabs directive. */
8409 return;
8410
8411 else if (name != current_function_file
8412 && strcmp (name, current_function_file) != 0)
8413 {
8414 if (inside_function && ! TARGET_GAS)
8415 fprintf (stream, "\t#.file\t%d ", num_source_filenames);
8416 else
8417 {
8418 ++num_source_filenames;
8419 current_function_file = name;
8420 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8421 }
8422
8423 output_quoted_string (stream, name);
8424 fprintf (stream, "\n");
8425 }
8426 }
8427 \f
8428 /* Structure to show the current status of registers and memory. */
8429
8430 struct shadow_summary
8431 {
8432 struct {
8433 unsigned int i : 31; /* Mask of int regs */
8434 unsigned int fp : 31; /* Mask of fp regs */
8435 unsigned int mem : 1; /* mem == imem | fpmem */
8436 } used, defd;
8437 };
8438
8439 /* Summary the effects of expression X on the machine. Update SUM, a pointer
8440 to the summary structure. SET is nonzero if the insn is setting the
8441 object, otherwise zero. */
8442
8443 static void
8444 summarize_insn (rtx x, struct shadow_summary *sum, int set)
8445 {
8446 const char *format_ptr;
8447 int i, j;
8448
8449 if (x == 0)
8450 return;
8451
8452 switch (GET_CODE (x))
8453 {
8454 /* ??? Note that this case would be incorrect if the Alpha had a
8455 ZERO_EXTRACT in SET_DEST. */
8456 case SET:
8457 summarize_insn (SET_SRC (x), sum, 0);
8458 summarize_insn (SET_DEST (x), sum, 1);
8459 break;
8460
8461 case CLOBBER:
8462 summarize_insn (XEXP (x, 0), sum, 1);
8463 break;
8464
8465 case USE:
8466 summarize_insn (XEXP (x, 0), sum, 0);
8467 break;
8468
8469 case ASM_OPERANDS:
8470 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
8471 summarize_insn (ASM_OPERANDS_INPUT (x, i), sum, 0);
8472 break;
8473
8474 case PARALLEL:
8475 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
8476 summarize_insn (XVECEXP (x, 0, i), sum, 0);
8477 break;
8478
8479 case SUBREG:
8480 summarize_insn (SUBREG_REG (x), sum, 0);
8481 break;
8482
8483 case REG:
8484 {
8485 int regno = REGNO (x);
8486 unsigned long mask = ((unsigned long) 1) << (regno % 32);
8487
8488 if (regno == 31 || regno == 63)
8489 break;
8490
8491 if (set)
8492 {
8493 if (regno < 32)
8494 sum->defd.i |= mask;
8495 else
8496 sum->defd.fp |= mask;
8497 }
8498 else
8499 {
8500 if (regno < 32)
8501 sum->used.i |= mask;
8502 else
8503 sum->used.fp |= mask;
8504 }
8505 }
8506 break;
8507
8508 case MEM:
8509 if (set)
8510 sum->defd.mem = 1;
8511 else
8512 sum->used.mem = 1;
8513
8514 /* Find the regs used in memory address computation: */
8515 summarize_insn (XEXP (x, 0), sum, 0);
8516 break;
8517
8518 case CONST_INT: case CONST_DOUBLE:
8519 case SYMBOL_REF: case LABEL_REF: case CONST:
8520 case SCRATCH: case ASM_INPUT:
8521 break;
8522
8523 /* Handle common unary and binary ops for efficiency. */
8524 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
8525 case MOD: case UDIV: case UMOD: case AND: case IOR:
8526 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
8527 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
8528 case NE: case EQ: case GE: case GT: case LE:
8529 case LT: case GEU: case GTU: case LEU: case LTU:
8530 summarize_insn (XEXP (x, 0), sum, 0);
8531 summarize_insn (XEXP (x, 1), sum, 0);
8532 break;
8533
8534 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
8535 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
8536 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
8537 case SQRT: case FFS:
8538 summarize_insn (XEXP (x, 0), sum, 0);
8539 break;
8540
8541 default:
8542 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
8543 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8544 switch (format_ptr[i])
8545 {
8546 case 'e':
8547 summarize_insn (XEXP (x, i), sum, 0);
8548 break;
8549
8550 case 'E':
8551 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8552 summarize_insn (XVECEXP (x, i, j), sum, 0);
8553 break;
8554
8555 case 'i':
8556 break;
8557
8558 default:
8559 gcc_unreachable ();
8560 }
8561 }
8562 }
8563
8564 /* Ensure a sufficient number of `trapb' insns are in the code when
8565 the user requests code with a trap precision of functions or
8566 instructions.
8567
8568 In naive mode, when the user requests a trap-precision of
8569 "instruction", a trapb is needed after every instruction that may
8570 generate a trap. This ensures that the code is resumption safe but
8571 it is also slow.
8572
8573 When optimizations are turned on, we delay issuing a trapb as long
8574 as possible. In this context, a trap shadow is the sequence of
8575 instructions that starts with a (potentially) trap generating
8576 instruction and extends to the next trapb or call_pal instruction
8577 (but GCC never generates call_pal by itself). We can delay (and
8578 therefore sometimes omit) a trapb subject to the following
8579 conditions:
8580
8581 (a) On entry to the trap shadow, if any Alpha register or memory
8582 location contains a value that is used as an operand value by some
8583 instruction in the trap shadow (live on entry), then no instruction
8584 in the trap shadow may modify the register or memory location.
8585
8586 (b) Within the trap shadow, the computation of the base register
8587 for a memory load or store instruction may not involve using the
8588 result of an instruction that might generate an UNPREDICTABLE
8589 result.
8590
8591 (c) Within the trap shadow, no register may be used more than once
8592 as a destination register. (This is to make life easier for the
8593 trap-handler.)
8594
8595 (d) The trap shadow may not include any branch instructions. */
8596
8597 static void
8598 alpha_handle_trap_shadows (void)
8599 {
8600 struct shadow_summary shadow;
8601 int trap_pending, exception_nesting;
8602 rtx i, n;
8603
8604 trap_pending = 0;
8605 exception_nesting = 0;
8606 shadow.used.i = 0;
8607 shadow.used.fp = 0;
8608 shadow.used.mem = 0;
8609 shadow.defd = shadow.used;
8610
8611 for (i = get_insns (); i ; i = NEXT_INSN (i))
8612 {
8613 if (GET_CODE (i) == NOTE)
8614 {
8615 switch (NOTE_LINE_NUMBER (i))
8616 {
8617 case NOTE_INSN_EH_REGION_BEG:
8618 exception_nesting++;
8619 if (trap_pending)
8620 goto close_shadow;
8621 break;
8622
8623 case NOTE_INSN_EH_REGION_END:
8624 exception_nesting--;
8625 if (trap_pending)
8626 goto close_shadow;
8627 break;
8628
8629 case NOTE_INSN_EPILOGUE_BEG:
8630 if (trap_pending && alpha_tp >= ALPHA_TP_FUNC)
8631 goto close_shadow;
8632 break;
8633 }
8634 }
8635 else if (trap_pending)
8636 {
8637 if (alpha_tp == ALPHA_TP_FUNC)
8638 {
8639 if (GET_CODE (i) == JUMP_INSN
8640 && GET_CODE (PATTERN (i)) == RETURN)
8641 goto close_shadow;
8642 }
8643 else if (alpha_tp == ALPHA_TP_INSN)
8644 {
8645 if (optimize > 0)
8646 {
8647 struct shadow_summary sum;
8648
8649 sum.used.i = 0;
8650 sum.used.fp = 0;
8651 sum.used.mem = 0;
8652 sum.defd = sum.used;
8653
8654 switch (GET_CODE (i))
8655 {
8656 case INSN:
8657 /* Annoyingly, get_attr_trap will die on these. */
8658 if (GET_CODE (PATTERN (i)) == USE
8659 || GET_CODE (PATTERN (i)) == CLOBBER)
8660 break;
8661
8662 summarize_insn (PATTERN (i), &sum, 0);
8663
8664 if ((sum.defd.i & shadow.defd.i)
8665 || (sum.defd.fp & shadow.defd.fp))
8666 {
8667 /* (c) would be violated */
8668 goto close_shadow;
8669 }
8670
8671 /* Combine shadow with summary of current insn: */
8672 shadow.used.i |= sum.used.i;
8673 shadow.used.fp |= sum.used.fp;
8674 shadow.used.mem |= sum.used.mem;
8675 shadow.defd.i |= sum.defd.i;
8676 shadow.defd.fp |= sum.defd.fp;
8677 shadow.defd.mem |= sum.defd.mem;
8678
8679 if ((sum.defd.i & shadow.used.i)
8680 || (sum.defd.fp & shadow.used.fp)
8681 || (sum.defd.mem & shadow.used.mem))
8682 {
8683 /* (a) would be violated (also takes care of (b)) */
8684 gcc_assert (get_attr_trap (i) != TRAP_YES
8685 || (!(sum.defd.i & sum.used.i)
8686 && !(sum.defd.fp & sum.used.fp)));
8687
8688 goto close_shadow;
8689 }
8690 break;
8691
8692 case JUMP_INSN:
8693 case CALL_INSN:
8694 case CODE_LABEL:
8695 goto close_shadow;
8696
8697 default:
8698 gcc_unreachable ();
8699 }
8700 }
8701 else
8702 {
8703 close_shadow:
8704 n = emit_insn_before (gen_trapb (), i);
8705 PUT_MODE (n, TImode);
8706 PUT_MODE (i, TImode);
8707 trap_pending = 0;
8708 shadow.used.i = 0;
8709 shadow.used.fp = 0;
8710 shadow.used.mem = 0;
8711 shadow.defd = shadow.used;
8712 }
8713 }
8714 }
8715
8716 if ((exception_nesting > 0 || alpha_tp >= ALPHA_TP_FUNC)
8717 && GET_CODE (i) == INSN
8718 && GET_CODE (PATTERN (i)) != USE
8719 && GET_CODE (PATTERN (i)) != CLOBBER
8720 && get_attr_trap (i) == TRAP_YES)
8721 {
8722 if (optimize && !trap_pending)
8723 summarize_insn (PATTERN (i), &shadow, 0);
8724 trap_pending = 1;
8725 }
8726 }
8727 }
8728 \f
8729 /* Alpha can only issue instruction groups simultaneously if they are
8730 suitably aligned. This is very processor-specific. */
8731 /* There are a number of entries in alphaev4_insn_pipe and alphaev5_insn_pipe
8732 that are marked "fake". These instructions do not exist on that target,
8733 but it is possible to see these insns with deranged combinations of
8734 command-line options, such as "-mtune=ev4 -mmax". Instead of aborting,
8735 choose a result at random. */
8736
8737 enum alphaev4_pipe {
8738 EV4_STOP = 0,
8739 EV4_IB0 = 1,
8740 EV4_IB1 = 2,
8741 EV4_IBX = 4
8742 };
8743
8744 enum alphaev5_pipe {
8745 EV5_STOP = 0,
8746 EV5_NONE = 1,
8747 EV5_E01 = 2,
8748 EV5_E0 = 4,
8749 EV5_E1 = 8,
8750 EV5_FAM = 16,
8751 EV5_FA = 32,
8752 EV5_FM = 64
8753 };
8754
8755 static enum alphaev4_pipe
8756 alphaev4_insn_pipe (rtx insn)
8757 {
8758 if (recog_memoized (insn) < 0)
8759 return EV4_STOP;
8760 if (get_attr_length (insn) != 4)
8761 return EV4_STOP;
8762
8763 switch (get_attr_type (insn))
8764 {
8765 case TYPE_ILD:
8766 case TYPE_LDSYM:
8767 case TYPE_FLD:
8768 case TYPE_LD_L:
8769 return EV4_IBX;
8770
8771 case TYPE_IADD:
8772 case TYPE_ILOG:
8773 case TYPE_ICMOV:
8774 case TYPE_ICMP:
8775 case TYPE_FST:
8776 case TYPE_SHIFT:
8777 case TYPE_IMUL:
8778 case TYPE_FBR:
8779 case TYPE_MVI: /* fake */
8780 return EV4_IB0;
8781
8782 case TYPE_IST:
8783 case TYPE_MISC:
8784 case TYPE_IBR:
8785 case TYPE_JSR:
8786 case TYPE_CALLPAL:
8787 case TYPE_FCPYS:
8788 case TYPE_FCMOV:
8789 case TYPE_FADD:
8790 case TYPE_FDIV:
8791 case TYPE_FMUL:
8792 case TYPE_ST_C:
8793 case TYPE_MB:
8794 case TYPE_FSQRT: /* fake */
8795 case TYPE_FTOI: /* fake */
8796 case TYPE_ITOF: /* fake */
8797 return EV4_IB1;
8798
8799 default:
8800 gcc_unreachable ();
8801 }
8802 }
8803
8804 static enum alphaev5_pipe
8805 alphaev5_insn_pipe (rtx insn)
8806 {
8807 if (recog_memoized (insn) < 0)
8808 return EV5_STOP;
8809 if (get_attr_length (insn) != 4)
8810 return EV5_STOP;
8811
8812 switch (get_attr_type (insn))
8813 {
8814 case TYPE_ILD:
8815 case TYPE_FLD:
8816 case TYPE_LDSYM:
8817 case TYPE_IADD:
8818 case TYPE_ILOG:
8819 case TYPE_ICMOV:
8820 case TYPE_ICMP:
8821 return EV5_E01;
8822
8823 case TYPE_IST:
8824 case TYPE_FST:
8825 case TYPE_SHIFT:
8826 case TYPE_IMUL:
8827 case TYPE_MISC:
8828 case TYPE_MVI:
8829 case TYPE_LD_L:
8830 case TYPE_ST_C:
8831 case TYPE_MB:
8832 case TYPE_FTOI: /* fake */
8833 case TYPE_ITOF: /* fake */
8834 return EV5_E0;
8835
8836 case TYPE_IBR:
8837 case TYPE_JSR:
8838 case TYPE_CALLPAL:
8839 return EV5_E1;
8840
8841 case TYPE_FCPYS:
8842 return EV5_FAM;
8843
8844 case TYPE_FBR:
8845 case TYPE_FCMOV:
8846 case TYPE_FADD:
8847 case TYPE_FDIV:
8848 case TYPE_FSQRT: /* fake */
8849 return EV5_FA;
8850
8851 case TYPE_FMUL:
8852 return EV5_FM;
8853
8854 default:
8855 gcc_unreachable ();
8856 }
8857 }
8858
8859 /* IN_USE is a mask of the slots currently filled within the insn group.
8860 The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
8861 the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
8862
8863 LEN is, of course, the length of the group in bytes. */
8864
8865 static rtx
8866 alphaev4_next_group (rtx insn, int *pin_use, int *plen)
8867 {
8868 int len, in_use;
8869
8870 len = in_use = 0;
8871
8872 if (! INSN_P (insn)
8873 || GET_CODE (PATTERN (insn)) == CLOBBER
8874 || GET_CODE (PATTERN (insn)) == USE)
8875 goto next_and_done;
8876
8877 while (1)
8878 {
8879 enum alphaev4_pipe pipe;
8880
8881 pipe = alphaev4_insn_pipe (insn);
8882 switch (pipe)
8883 {
8884 case EV4_STOP:
8885 /* Force complex instructions to start new groups. */
8886 if (in_use)
8887 goto done;
8888
8889 /* If this is a completely unrecognized insn, it's an asm.
8890 We don't know how long it is, so record length as -1 to
8891 signal a needed realignment. */
8892 if (recog_memoized (insn) < 0)
8893 len = -1;
8894 else
8895 len = get_attr_length (insn);
8896 goto next_and_done;
8897
8898 case EV4_IBX:
8899 if (in_use & EV4_IB0)
8900 {
8901 if (in_use & EV4_IB1)
8902 goto done;
8903 in_use |= EV4_IB1;
8904 }
8905 else
8906 in_use |= EV4_IB0 | EV4_IBX;
8907 break;
8908
8909 case EV4_IB0:
8910 if (in_use & EV4_IB0)
8911 {
8912 if (!(in_use & EV4_IBX) || (in_use & EV4_IB1))
8913 goto done;
8914 in_use |= EV4_IB1;
8915 }
8916 in_use |= EV4_IB0;
8917 break;
8918
8919 case EV4_IB1:
8920 if (in_use & EV4_IB1)
8921 goto done;
8922 in_use |= EV4_IB1;
8923 break;
8924
8925 default:
8926 gcc_unreachable ();
8927 }
8928 len += 4;
8929
8930 /* Haifa doesn't do well scheduling branches. */
8931 if (GET_CODE (insn) == JUMP_INSN)
8932 goto next_and_done;
8933
8934 next:
8935 insn = next_nonnote_insn (insn);
8936
8937 if (!insn || ! INSN_P (insn))
8938 goto done;
8939
8940 /* Let Haifa tell us where it thinks insn group boundaries are. */
8941 if (GET_MODE (insn) == TImode)
8942 goto done;
8943
8944 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
8945 goto next;
8946 }
8947
8948 next_and_done:
8949 insn = next_nonnote_insn (insn);
8950
8951 done:
8952 *plen = len;
8953 *pin_use = in_use;
8954 return insn;
8955 }
8956
8957 /* IN_USE is a mask of the slots currently filled within the insn group.
8958 The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
8959 the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
8960
8961 LEN is, of course, the length of the group in bytes. */
8962
8963 static rtx
8964 alphaev5_next_group (rtx insn, int *pin_use, int *plen)
8965 {
8966 int len, in_use;
8967
8968 len = in_use = 0;
8969
8970 if (! INSN_P (insn)
8971 || GET_CODE (PATTERN (insn)) == CLOBBER
8972 || GET_CODE (PATTERN (insn)) == USE)
8973 goto next_and_done;
8974
8975 while (1)
8976 {
8977 enum alphaev5_pipe pipe;
8978
8979 pipe = alphaev5_insn_pipe (insn);
8980 switch (pipe)
8981 {
8982 case EV5_STOP:
8983 /* Force complex instructions to start new groups. */
8984 if (in_use)
8985 goto done;
8986
8987 /* If this is a completely unrecognized insn, it's an asm.
8988 We don't know how long it is, so record length as -1 to
8989 signal a needed realignment. */
8990 if (recog_memoized (insn) < 0)
8991 len = -1;
8992 else
8993 len = get_attr_length (insn);
8994 goto next_and_done;
8995
8996 /* ??? Most of the places below, we would like to assert never
8997 happen, as it would indicate an error either in Haifa, or
8998 in the scheduling description. Unfortunately, Haifa never
8999 schedules the last instruction of the BB, so we don't have
9000 an accurate TI bit to go off. */
9001 case EV5_E01:
9002 if (in_use & EV5_E0)
9003 {
9004 if (in_use & EV5_E1)
9005 goto done;
9006 in_use |= EV5_E1;
9007 }
9008 else
9009 in_use |= EV5_E0 | EV5_E01;
9010 break;
9011
9012 case EV5_E0:
9013 if (in_use & EV5_E0)
9014 {
9015 if (!(in_use & EV5_E01) || (in_use & EV5_E1))
9016 goto done;
9017 in_use |= EV5_E1;
9018 }
9019 in_use |= EV5_E0;
9020 break;
9021
9022 case EV5_E1:
9023 if (in_use & EV5_E1)
9024 goto done;
9025 in_use |= EV5_E1;
9026 break;
9027
9028 case EV5_FAM:
9029 if (in_use & EV5_FA)
9030 {
9031 if (in_use & EV5_FM)
9032 goto done;
9033 in_use |= EV5_FM;
9034 }
9035 else
9036 in_use |= EV5_FA | EV5_FAM;
9037 break;
9038
9039 case EV5_FA:
9040 if (in_use & EV5_FA)
9041 goto done;
9042 in_use |= EV5_FA;
9043 break;
9044
9045 case EV5_FM:
9046 if (in_use & EV5_FM)
9047 goto done;
9048 in_use |= EV5_FM;
9049 break;
9050
9051 case EV5_NONE:
9052 break;
9053
9054 default:
9055 gcc_unreachable ();
9056 }
9057 len += 4;
9058
9059 /* Haifa doesn't do well scheduling branches. */
9060 /* ??? If this is predicted not-taken, slotting continues, except
9061 that no more IBR, FBR, or JSR insns may be slotted. */
9062 if (GET_CODE (insn) == JUMP_INSN)
9063 goto next_and_done;
9064
9065 next:
9066 insn = next_nonnote_insn (insn);
9067
9068 if (!insn || ! INSN_P (insn))
9069 goto done;
9070
9071 /* Let Haifa tell us where it thinks insn group boundaries are. */
9072 if (GET_MODE (insn) == TImode)
9073 goto done;
9074
9075 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
9076 goto next;
9077 }
9078
9079 next_and_done:
9080 insn = next_nonnote_insn (insn);
9081
9082 done:
9083 *plen = len;
9084 *pin_use = in_use;
9085 return insn;
9086 }
9087
9088 static rtx
9089 alphaev4_next_nop (int *pin_use)
9090 {
9091 int in_use = *pin_use;
9092 rtx nop;
9093
9094 if (!(in_use & EV4_IB0))
9095 {
9096 in_use |= EV4_IB0;
9097 nop = gen_nop ();
9098 }
9099 else if ((in_use & (EV4_IBX|EV4_IB1)) == EV4_IBX)
9100 {
9101 in_use |= EV4_IB1;
9102 nop = gen_nop ();
9103 }
9104 else if (TARGET_FP && !(in_use & EV4_IB1))
9105 {
9106 in_use |= EV4_IB1;
9107 nop = gen_fnop ();
9108 }
9109 else
9110 nop = gen_unop ();
9111
9112 *pin_use = in_use;
9113 return nop;
9114 }
9115
9116 static rtx
9117 alphaev5_next_nop (int *pin_use)
9118 {
9119 int in_use = *pin_use;
9120 rtx nop;
9121
9122 if (!(in_use & EV5_E1))
9123 {
9124 in_use |= EV5_E1;
9125 nop = gen_nop ();
9126 }
9127 else if (TARGET_FP && !(in_use & EV5_FA))
9128 {
9129 in_use |= EV5_FA;
9130 nop = gen_fnop ();
9131 }
9132 else if (TARGET_FP && !(in_use & EV5_FM))
9133 {
9134 in_use |= EV5_FM;
9135 nop = gen_fnop ();
9136 }
9137 else
9138 nop = gen_unop ();
9139
9140 *pin_use = in_use;
9141 return nop;
9142 }
9143
9144 /* The instruction group alignment main loop. */
9145
9146 static void
9147 alpha_align_insns (unsigned int max_align,
9148 rtx (*next_group) (rtx, int *, int *),
9149 rtx (*next_nop) (int *))
9150 {
9151 /* ALIGN is the known alignment for the insn group. */
9152 unsigned int align;
9153 /* OFS is the offset of the current insn in the insn group. */
9154 int ofs;
9155 int prev_in_use, in_use, len, ldgp;
9156 rtx i, next;
9157
9158 /* Let shorten branches care for assigning alignments to code labels. */
9159 shorten_branches (get_insns ());
9160
9161 if (align_functions < 4)
9162 align = 4;
9163 else if ((unsigned int) align_functions < max_align)
9164 align = align_functions;
9165 else
9166 align = max_align;
9167
9168 ofs = prev_in_use = 0;
9169 i = get_insns ();
9170 if (GET_CODE (i) == NOTE)
9171 i = next_nonnote_insn (i);
9172
9173 ldgp = alpha_function_needs_gp ? 8 : 0;
9174
9175 while (i)
9176 {
9177 next = (*next_group) (i, &in_use, &len);
9178
9179 /* When we see a label, resync alignment etc. */
9180 if (GET_CODE (i) == CODE_LABEL)
9181 {
9182 unsigned int new_align = 1 << label_to_alignment (i);
9183
9184 if (new_align >= align)
9185 {
9186 align = new_align < max_align ? new_align : max_align;
9187 ofs = 0;
9188 }
9189
9190 else if (ofs & (new_align-1))
9191 ofs = (ofs | (new_align-1)) + 1;
9192 gcc_assert (!len);
9193 }
9194
9195 /* Handle complex instructions special. */
9196 else if (in_use == 0)
9197 {
9198 /* Asms will have length < 0. This is a signal that we have
9199 lost alignment knowledge. Assume, however, that the asm
9200 will not mis-align instructions. */
9201 if (len < 0)
9202 {
9203 ofs = 0;
9204 align = 4;
9205 len = 0;
9206 }
9207 }
9208
9209 /* If the known alignment is smaller than the recognized insn group,
9210 realign the output. */
9211 else if ((int) align < len)
9212 {
9213 unsigned int new_log_align = len > 8 ? 4 : 3;
9214 rtx prev, where;
9215
9216 where = prev = prev_nonnote_insn (i);
9217 if (!where || GET_CODE (where) != CODE_LABEL)
9218 where = i;
9219
9220 /* Can't realign between a call and its gp reload. */
9221 if (! (TARGET_EXPLICIT_RELOCS
9222 && prev && GET_CODE (prev) == CALL_INSN))
9223 {
9224 emit_insn_before (gen_realign (GEN_INT (new_log_align)), where);
9225 align = 1 << new_log_align;
9226 ofs = 0;
9227 }
9228 }
9229
9230 /* We may not insert padding inside the initial ldgp sequence. */
9231 else if (ldgp > 0)
9232 ldgp -= len;
9233
9234 /* If the group won't fit in the same INT16 as the previous,
9235 we need to add padding to keep the group together. Rather
9236 than simply leaving the insn filling to the assembler, we
9237 can make use of the knowledge of what sorts of instructions
9238 were issued in the previous group to make sure that all of
9239 the added nops are really free. */
9240 else if (ofs + len > (int) align)
9241 {
9242 int nop_count = (align - ofs) / 4;
9243 rtx where;
9244
9245 /* Insert nops before labels, branches, and calls to truly merge
9246 the execution of the nops with the previous instruction group. */
9247 where = prev_nonnote_insn (i);
9248 if (where)
9249 {
9250 if (GET_CODE (where) == CODE_LABEL)
9251 {
9252 rtx where2 = prev_nonnote_insn (where);
9253 if (where2 && GET_CODE (where2) == JUMP_INSN)
9254 where = where2;
9255 }
9256 else if (GET_CODE (where) == INSN)
9257 where = i;
9258 }
9259 else
9260 where = i;
9261
9262 do
9263 emit_insn_before ((*next_nop)(&prev_in_use), where);
9264 while (--nop_count);
9265 ofs = 0;
9266 }
9267
9268 ofs = (ofs + len) & (align - 1);
9269 prev_in_use = in_use;
9270 i = next;
9271 }
9272 }
9273 \f
9274 /* Machine dependent reorg pass. */
9275
9276 static void
9277 alpha_reorg (void)
9278 {
9279 if (alpha_tp != ALPHA_TP_PROG || flag_exceptions)
9280 alpha_handle_trap_shadows ();
9281
9282 /* Due to the number of extra trapb insns, don't bother fixing up
9283 alignment when trap precision is instruction. Moreover, we can
9284 only do our job when sched2 is run. */
9285 if (optimize && !optimize_size
9286 && alpha_tp != ALPHA_TP_INSN
9287 && flag_schedule_insns_after_reload)
9288 {
9289 if (alpha_tune == PROCESSOR_EV4)
9290 alpha_align_insns (8, alphaev4_next_group, alphaev4_next_nop);
9291 else if (alpha_tune == PROCESSOR_EV5)
9292 alpha_align_insns (16, alphaev5_next_group, alphaev5_next_nop);
9293 }
9294 }
9295 \f
9296 #if !TARGET_ABI_UNICOSMK
9297
9298 #ifdef HAVE_STAMP_H
9299 #include <stamp.h>
9300 #endif
9301
9302 static void
9303 alpha_file_start (void)
9304 {
9305 #ifdef OBJECT_FORMAT_ELF
9306 /* If emitting dwarf2 debug information, we cannot generate a .file
9307 directive to start the file, as it will conflict with dwarf2out
9308 file numbers. So it's only useful when emitting mdebug output. */
9309 targetm.file_start_file_directive = (write_symbols == DBX_DEBUG);
9310 #endif
9311
9312 default_file_start ();
9313 #ifdef MS_STAMP
9314 fprintf (asm_out_file, "\t.verstamp %d %d\n", MS_STAMP, LS_STAMP);
9315 #endif
9316
9317 fputs ("\t.set noreorder\n", asm_out_file);
9318 fputs ("\t.set volatile\n", asm_out_file);
9319 if (!TARGET_ABI_OPEN_VMS)
9320 fputs ("\t.set noat\n", asm_out_file);
9321 if (TARGET_EXPLICIT_RELOCS)
9322 fputs ("\t.set nomacro\n", asm_out_file);
9323 if (TARGET_SUPPORT_ARCH | TARGET_BWX | TARGET_MAX | TARGET_FIX | TARGET_CIX)
9324 {
9325 const char *arch;
9326
9327 if (alpha_cpu == PROCESSOR_EV6 || TARGET_FIX || TARGET_CIX)
9328 arch = "ev6";
9329 else if (TARGET_MAX)
9330 arch = "pca56";
9331 else if (TARGET_BWX)
9332 arch = "ev56";
9333 else if (alpha_cpu == PROCESSOR_EV5)
9334 arch = "ev5";
9335 else
9336 arch = "ev4";
9337
9338 fprintf (asm_out_file, "\t.arch %s\n", arch);
9339 }
9340 }
9341 #endif
9342
9343 #ifdef OBJECT_FORMAT_ELF
9344
9345 /* Switch to the section to which we should output X. The only thing
9346 special we do here is to honor small data. */
9347
9348 static void
9349 alpha_elf_select_rtx_section (enum machine_mode mode, rtx x,
9350 unsigned HOST_WIDE_INT align)
9351 {
9352 if (TARGET_SMALL_DATA && GET_MODE_SIZE (mode) <= g_switch_value)
9353 /* ??? Consider using mergeable sdata sections. */
9354 sdata_section ();
9355 else
9356 default_elf_select_rtx_section (mode, x, align);
9357 }
9358
9359 #endif /* OBJECT_FORMAT_ELF */
9360 \f
9361 /* Structure to collect function names for final output in link section. */
9362 /* Note that items marked with GTY can't be ifdef'ed out. */
9363
9364 enum links_kind {KIND_UNUSED, KIND_LOCAL, KIND_EXTERN};
9365 enum reloc_kind {KIND_LINKAGE, KIND_CODEADDR};
9366
9367 struct alpha_links GTY(())
9368 {
9369 int num;
9370 rtx linkage;
9371 enum links_kind lkind;
9372 enum reloc_kind rkind;
9373 };
9374
9375 struct alpha_funcs GTY(())
9376 {
9377 int num;
9378 splay_tree GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9379 links;
9380 };
9381
9382 static GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9383 splay_tree alpha_links_tree;
9384 static GTY ((param1_is (tree), param2_is (struct alpha_funcs *)))
9385 splay_tree alpha_funcs_tree;
9386
9387 static GTY(()) int alpha_funcs_num;
9388
9389 #if TARGET_ABI_OPEN_VMS
9390
9391 /* Return the VMS argument type corresponding to MODE. */
9392
9393 enum avms_arg_type
9394 alpha_arg_type (enum machine_mode mode)
9395 {
9396 switch (mode)
9397 {
9398 case SFmode:
9399 return TARGET_FLOAT_VAX ? FF : FS;
9400 case DFmode:
9401 return TARGET_FLOAT_VAX ? FD : FT;
9402 default:
9403 return I64;
9404 }
9405 }
9406
9407 /* Return an rtx for an integer representing the VMS Argument Information
9408 register value. */
9409
9410 rtx
9411 alpha_arg_info_reg_val (CUMULATIVE_ARGS cum)
9412 {
9413 unsigned HOST_WIDE_INT regval = cum.num_args;
9414 int i;
9415
9416 for (i = 0; i < 6; i++)
9417 regval |= ((int) cum.atypes[i]) << (i * 3 + 8);
9418
9419 return GEN_INT (regval);
9420 }
9421 \f
9422 /* Make (or fake) .linkage entry for function call.
9423
9424 IS_LOCAL is 0 if name is used in call, 1 if name is used in definition.
9425
9426 Return an SYMBOL_REF rtx for the linkage. */
9427
9428 rtx
9429 alpha_need_linkage (const char *name, int is_local)
9430 {
9431 splay_tree_node node;
9432 struct alpha_links *al;
9433
9434 if (name[0] == '*')
9435 name++;
9436
9437 if (is_local)
9438 {
9439 struct alpha_funcs *cfaf;
9440
9441 if (!alpha_funcs_tree)
9442 alpha_funcs_tree = splay_tree_new_ggc ((splay_tree_compare_fn)
9443 splay_tree_compare_pointers);
9444
9445 cfaf = (struct alpha_funcs *) ggc_alloc (sizeof (struct alpha_funcs));
9446
9447 cfaf->links = 0;
9448 cfaf->num = ++alpha_funcs_num;
9449
9450 splay_tree_insert (alpha_funcs_tree,
9451 (splay_tree_key) current_function_decl,
9452 (splay_tree_value) cfaf);
9453 }
9454
9455 if (alpha_links_tree)
9456 {
9457 /* Is this name already defined? */
9458
9459 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
9460 if (node)
9461 {
9462 al = (struct alpha_links *) node->value;
9463 if (is_local)
9464 {
9465 /* Defined here but external assumed. */
9466 if (al->lkind == KIND_EXTERN)
9467 al->lkind = KIND_LOCAL;
9468 }
9469 else
9470 {
9471 /* Used here but unused assumed. */
9472 if (al->lkind == KIND_UNUSED)
9473 al->lkind = KIND_LOCAL;
9474 }
9475 return al->linkage;
9476 }
9477 }
9478 else
9479 alpha_links_tree = splay_tree_new_ggc ((splay_tree_compare_fn) strcmp);
9480
9481 al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
9482 name = ggc_strdup (name);
9483
9484 /* Assume external if no definition. */
9485 al->lkind = (is_local ? KIND_UNUSED : KIND_EXTERN);
9486
9487 /* Ensure we have an IDENTIFIER so assemble_name can mark it used. */
9488 get_identifier (name);
9489
9490 /* Construct a SYMBOL_REF for us to call. */
9491 {
9492 size_t name_len = strlen (name);
9493 char *linksym = alloca (name_len + 6);
9494 linksym[0] = '$';
9495 memcpy (linksym + 1, name, name_len);
9496 memcpy (linksym + 1 + name_len, "..lk", 5);
9497 al->linkage = gen_rtx_SYMBOL_REF (Pmode,
9498 ggc_alloc_string (linksym, name_len + 5));
9499 }
9500
9501 splay_tree_insert (alpha_links_tree, (splay_tree_key) name,
9502 (splay_tree_value) al);
9503
9504 return al->linkage;
9505 }
9506
9507 rtx
9508 alpha_use_linkage (rtx linkage, tree cfundecl, int lflag, int rflag)
9509 {
9510 splay_tree_node cfunnode;
9511 struct alpha_funcs *cfaf;
9512 struct alpha_links *al;
9513 const char *name = XSTR (linkage, 0);
9514
9515 cfaf = (struct alpha_funcs *) 0;
9516 al = (struct alpha_links *) 0;
9517
9518 cfunnode = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) cfundecl);
9519 cfaf = (struct alpha_funcs *) cfunnode->value;
9520
9521 if (cfaf->links)
9522 {
9523 splay_tree_node lnode;
9524
9525 /* Is this name already defined? */
9526
9527 lnode = splay_tree_lookup (cfaf->links, (splay_tree_key) name);
9528 if (lnode)
9529 al = (struct alpha_links *) lnode->value;
9530 }
9531 else
9532 cfaf->links = splay_tree_new_ggc ((splay_tree_compare_fn) strcmp);
9533
9534 if (!al)
9535 {
9536 size_t name_len;
9537 size_t buflen;
9538 char buf [512];
9539 char *linksym;
9540 splay_tree_node node = 0;
9541 struct alpha_links *anl;
9542
9543 if (name[0] == '*')
9544 name++;
9545
9546 name_len = strlen (name);
9547
9548 al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
9549 al->num = cfaf->num;
9550
9551 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
9552 if (node)
9553 {
9554 anl = (struct alpha_links *) node->value;
9555 al->lkind = anl->lkind;
9556 }
9557
9558 sprintf (buf, "$%d..%s..lk", cfaf->num, name);
9559 buflen = strlen (buf);
9560 linksym = alloca (buflen + 1);
9561 memcpy (linksym, buf, buflen + 1);
9562
9563 al->linkage = gen_rtx_SYMBOL_REF
9564 (Pmode, ggc_alloc_string (linksym, buflen + 1));
9565
9566 splay_tree_insert (cfaf->links, (splay_tree_key) name,
9567 (splay_tree_value) al);
9568 }
9569
9570 if (rflag)
9571 al->rkind = KIND_CODEADDR;
9572 else
9573 al->rkind = KIND_LINKAGE;
9574
9575 if (lflag)
9576 return gen_rtx_MEM (Pmode, plus_constant (al->linkage, 8));
9577 else
9578 return al->linkage;
9579 }
9580
9581 static int
9582 alpha_write_one_linkage (splay_tree_node node, void *data)
9583 {
9584 const char *const name = (const char *) node->key;
9585 struct alpha_links *link = (struct alpha_links *) node->value;
9586 FILE *stream = (FILE *) data;
9587
9588 fprintf (stream, "$%d..%s..lk:\n", link->num, name);
9589 if (link->rkind == KIND_CODEADDR)
9590 {
9591 if (link->lkind == KIND_LOCAL)
9592 {
9593 /* Local and used */
9594 fprintf (stream, "\t.quad %s..en\n", name);
9595 }
9596 else
9597 {
9598 /* External and used, request code address. */
9599 fprintf (stream, "\t.code_address %s\n", name);
9600 }
9601 }
9602 else
9603 {
9604 if (link->lkind == KIND_LOCAL)
9605 {
9606 /* Local and used, build linkage pair. */
9607 fprintf (stream, "\t.quad %s..en\n", name);
9608 fprintf (stream, "\t.quad %s\n", name);
9609 }
9610 else
9611 {
9612 /* External and used, request linkage pair. */
9613 fprintf (stream, "\t.linkage %s\n", name);
9614 }
9615 }
9616
9617 return 0;
9618 }
9619
9620 static void
9621 alpha_write_linkage (FILE *stream, const char *funname, tree fundecl)
9622 {
9623 splay_tree_node node;
9624 struct alpha_funcs *func;
9625
9626 link_section ();
9627 fprintf (stream, "\t.align 3\n");
9628 node = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) fundecl);
9629 func = (struct alpha_funcs *) node->value;
9630
9631 fputs ("\t.name ", stream);
9632 assemble_name (stream, funname);
9633 fputs ("..na\n", stream);
9634 ASM_OUTPUT_LABEL (stream, funname);
9635 fprintf (stream, "\t.pdesc ");
9636 assemble_name (stream, funname);
9637 fprintf (stream, "..en,%s\n",
9638 alpha_procedure_type == PT_STACK ? "stack"
9639 : alpha_procedure_type == PT_REGISTER ? "reg" : "null");
9640
9641 if (func->links)
9642 {
9643 splay_tree_foreach (func->links, alpha_write_one_linkage, stream);
9644 /* splay_tree_delete (func->links); */
9645 }
9646 }
9647
9648 /* Given a decl, a section name, and whether the decl initializer
9649 has relocs, choose attributes for the section. */
9650
9651 #define SECTION_VMS_OVERLAY SECTION_FORGET
9652 #define SECTION_VMS_GLOBAL SECTION_MACH_DEP
9653 #define SECTION_VMS_INITIALIZE (SECTION_VMS_GLOBAL << 1)
9654
9655 static unsigned int
9656 vms_section_type_flags (tree decl, const char *name, int reloc)
9657 {
9658 unsigned int flags = default_section_type_flags (decl, name, reloc);
9659
9660 if (decl && DECL_ATTRIBUTES (decl)
9661 && lookup_attribute ("overlaid", DECL_ATTRIBUTES (decl)))
9662 flags |= SECTION_VMS_OVERLAY;
9663 if (decl && DECL_ATTRIBUTES (decl)
9664 && lookup_attribute ("global", DECL_ATTRIBUTES (decl)))
9665 flags |= SECTION_VMS_GLOBAL;
9666 if (decl && DECL_ATTRIBUTES (decl)
9667 && lookup_attribute ("initialize", DECL_ATTRIBUTES (decl)))
9668 flags |= SECTION_VMS_INITIALIZE;
9669
9670 return flags;
9671 }
9672
9673 /* Switch to an arbitrary section NAME with attributes as specified
9674 by FLAGS. ALIGN specifies any known alignment requirements for
9675 the section; 0 if the default should be used. */
9676
9677 static void
9678 vms_asm_named_section (const char *name, unsigned int flags,
9679 tree decl ATTRIBUTE_UNUSED)
9680 {
9681 fputc ('\n', asm_out_file);
9682 fprintf (asm_out_file, ".section\t%s", name);
9683
9684 if (flags & SECTION_VMS_OVERLAY)
9685 fprintf (asm_out_file, ",OVR");
9686 if (flags & SECTION_VMS_GLOBAL)
9687 fprintf (asm_out_file, ",GBL");
9688 if (flags & SECTION_VMS_INITIALIZE)
9689 fprintf (asm_out_file, ",NOMOD");
9690 if (flags & SECTION_DEBUG)
9691 fprintf (asm_out_file, ",NOWRT");
9692
9693 fputc ('\n', asm_out_file);
9694 }
9695
9696 /* Record an element in the table of global constructors. SYMBOL is
9697 a SYMBOL_REF of the function to be called; PRIORITY is a number
9698 between 0 and MAX_INIT_PRIORITY.
9699
9700 Differs from default_ctors_section_asm_out_constructor in that the
9701 width of the .ctors entry is always 64 bits, rather than the 32 bits
9702 used by a normal pointer. */
9703
9704 static void
9705 vms_asm_out_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9706 {
9707 ctors_section ();
9708 assemble_align (BITS_PER_WORD);
9709 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9710 }
9711
9712 static void
9713 vms_asm_out_destructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9714 {
9715 dtors_section ();
9716 assemble_align (BITS_PER_WORD);
9717 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9718 }
9719 #else
9720
9721 rtx
9722 alpha_need_linkage (const char *name ATTRIBUTE_UNUSED,
9723 int is_local ATTRIBUTE_UNUSED)
9724 {
9725 return NULL_RTX;
9726 }
9727
9728 rtx
9729 alpha_use_linkage (rtx linkage ATTRIBUTE_UNUSED,
9730 tree cfundecl ATTRIBUTE_UNUSED,
9731 int lflag ATTRIBUTE_UNUSED,
9732 int rflag ATTRIBUTE_UNUSED)
9733 {
9734 return NULL_RTX;
9735 }
9736
9737 #endif /* TARGET_ABI_OPEN_VMS */
9738 \f
9739 #if TARGET_ABI_UNICOSMK
9740
9741 /* This evaluates to true if we do not know how to pass TYPE solely in
9742 registers. This is the case for all arguments that do not fit in two
9743 registers. */
9744
9745 static bool
9746 unicosmk_must_pass_in_stack (enum machine_mode mode, tree type)
9747 {
9748 if (type == NULL)
9749 return false;
9750
9751 if (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
9752 return true;
9753 if (TREE_ADDRESSABLE (type))
9754 return true;
9755
9756 return ALPHA_ARG_SIZE (mode, type, 0) > 2;
9757 }
9758
9759 /* Define the offset between two registers, one to be eliminated, and the
9760 other its replacement, at the start of a routine. */
9761
9762 int
9763 unicosmk_initial_elimination_offset (int from, int to)
9764 {
9765 int fixed_size;
9766
9767 fixed_size = alpha_sa_size();
9768 if (fixed_size != 0)
9769 fixed_size += 48;
9770
9771 if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
9772 return -fixed_size;
9773 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
9774 return 0;
9775 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
9776 return (ALPHA_ROUND (current_function_outgoing_args_size)
9777 + ALPHA_ROUND (get_frame_size()));
9778 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
9779 return (ALPHA_ROUND (fixed_size)
9780 + ALPHA_ROUND (get_frame_size()
9781 + current_function_outgoing_args_size));
9782 else
9783 gcc_unreachable ();
9784 }
9785
9786 /* Output the module name for .ident and .end directives. We have to strip
9787 directories and add make sure that the module name starts with a letter
9788 or '$'. */
9789
9790 static void
9791 unicosmk_output_module_name (FILE *file)
9792 {
9793 const char *name = lbasename (main_input_filename);
9794 unsigned len = strlen (name);
9795 char *clean_name = alloca (len + 2);
9796 char *ptr = clean_name;
9797
9798 /* CAM only accepts module names that start with a letter or '$'. We
9799 prefix the module name with a '$' if necessary. */
9800
9801 if (!ISALPHA (*name))
9802 *ptr++ = '$';
9803 memcpy (ptr, name, len + 1);
9804 clean_symbol_name (clean_name);
9805 fputs (clean_name, file);
9806 }
9807
9808 /* Output the definition of a common variable. */
9809
9810 void
9811 unicosmk_output_common (FILE *file, const char *name, int size, int align)
9812 {
9813 tree name_tree;
9814 printf ("T3E__: common %s\n", name);
9815
9816 common_section ();
9817 fputs("\t.endp\n\n\t.psect ", file);
9818 assemble_name(file, name);
9819 fprintf(file, ",%d,common\n", floor_log2 (align / BITS_PER_UNIT));
9820 fprintf(file, "\t.byte\t0:%d\n", size);
9821
9822 /* Mark the symbol as defined in this module. */
9823 name_tree = get_identifier (name);
9824 TREE_ASM_WRITTEN (name_tree) = 1;
9825 }
9826
9827 #define SECTION_PUBLIC SECTION_MACH_DEP
9828 #define SECTION_MAIN (SECTION_PUBLIC << 1)
9829 static int current_section_align;
9830
9831 static unsigned int
9832 unicosmk_section_type_flags (tree decl, const char *name,
9833 int reloc ATTRIBUTE_UNUSED)
9834 {
9835 unsigned int flags = default_section_type_flags (decl, name, reloc);
9836
9837 if (!decl)
9838 return flags;
9839
9840 if (TREE_CODE (decl) == FUNCTION_DECL)
9841 {
9842 current_section_align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
9843 if (align_functions_log > current_section_align)
9844 current_section_align = align_functions_log;
9845
9846 if (! strcmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)), "main"))
9847 flags |= SECTION_MAIN;
9848 }
9849 else
9850 current_section_align = floor_log2 (DECL_ALIGN (decl) / BITS_PER_UNIT);
9851
9852 if (TREE_PUBLIC (decl))
9853 flags |= SECTION_PUBLIC;
9854
9855 return flags;
9856 }
9857
9858 /* Generate a section name for decl and associate it with the
9859 declaration. */
9860
9861 static void
9862 unicosmk_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
9863 {
9864 const char *name;
9865 int len;
9866
9867 gcc_assert (decl);
9868
9869 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
9870 name = default_strip_name_encoding (name);
9871 len = strlen (name);
9872
9873 if (TREE_CODE (decl) == FUNCTION_DECL)
9874 {
9875 char *string;
9876
9877 /* It is essential that we prefix the section name here because
9878 otherwise the section names generated for constructors and
9879 destructors confuse collect2. */
9880
9881 string = alloca (len + 6);
9882 sprintf (string, "code@%s", name);
9883 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
9884 }
9885 else if (TREE_PUBLIC (decl))
9886 DECL_SECTION_NAME (decl) = build_string (len, name);
9887 else
9888 {
9889 char *string;
9890
9891 string = alloca (len + 6);
9892 sprintf (string, "data@%s", name);
9893 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
9894 }
9895 }
9896
9897 /* Switch to an arbitrary section NAME with attributes as specified
9898 by FLAGS. ALIGN specifies any known alignment requirements for
9899 the section; 0 if the default should be used. */
9900
9901 static void
9902 unicosmk_asm_named_section (const char *name, unsigned int flags,
9903 tree decl ATTRIBUTE_UNUSED)
9904 {
9905 const char *kind;
9906
9907 /* Close the previous section. */
9908
9909 fputs ("\t.endp\n\n", asm_out_file);
9910
9911 /* Find out what kind of section we are opening. */
9912
9913 if (flags & SECTION_MAIN)
9914 fputs ("\t.start\tmain\n", asm_out_file);
9915
9916 if (flags & SECTION_CODE)
9917 kind = "code";
9918 else if (flags & SECTION_PUBLIC)
9919 kind = "common";
9920 else
9921 kind = "data";
9922
9923 if (current_section_align != 0)
9924 fprintf (asm_out_file, "\t.psect\t%s,%d,%s\n", name,
9925 current_section_align, kind);
9926 else
9927 fprintf (asm_out_file, "\t.psect\t%s,%s\n", name, kind);
9928 }
9929
9930 static void
9931 unicosmk_insert_attributes (tree decl, tree *attr_ptr ATTRIBUTE_UNUSED)
9932 {
9933 if (DECL_P (decl)
9934 && (TREE_PUBLIC (decl) || TREE_CODE (decl) == FUNCTION_DECL))
9935 unicosmk_unique_section (decl, 0);
9936 }
9937
9938 /* Output an alignment directive. We have to use the macro 'gcc@code@align'
9939 in code sections because .align fill unused space with zeroes. */
9940
9941 void
9942 unicosmk_output_align (FILE *file, int align)
9943 {
9944 if (inside_function)
9945 fprintf (file, "\tgcc@code@align\t%d\n", align);
9946 else
9947 fprintf (file, "\t.align\t%d\n", align);
9948 }
9949
9950 /* Add a case vector to the current function's list of deferred case
9951 vectors. Case vectors have to be put into a separate section because CAM
9952 does not allow data definitions in code sections. */
9953
9954 void
9955 unicosmk_defer_case_vector (rtx lab, rtx vec)
9956 {
9957 struct machine_function *machine = cfun->machine;
9958
9959 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
9960 machine->addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec,
9961 machine->addr_list);
9962 }
9963
9964 /* Output a case vector. */
9965
9966 static void
9967 unicosmk_output_addr_vec (FILE *file, rtx vec)
9968 {
9969 rtx lab = XEXP (vec, 0);
9970 rtx body = XEXP (vec, 1);
9971 int vlen = XVECLEN (body, 0);
9972 int idx;
9973
9974 (*targetm.asm_out.internal_label) (file, "L", CODE_LABEL_NUMBER (lab));
9975
9976 for (idx = 0; idx < vlen; idx++)
9977 {
9978 ASM_OUTPUT_ADDR_VEC_ELT
9979 (file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
9980 }
9981 }
9982
9983 /* Output current function's deferred case vectors. */
9984
9985 static void
9986 unicosmk_output_deferred_case_vectors (FILE *file)
9987 {
9988 struct machine_function *machine = cfun->machine;
9989 rtx t;
9990
9991 if (machine->addr_list == NULL_RTX)
9992 return;
9993
9994 data_section ();
9995 for (t = machine->addr_list; t; t = XEXP (t, 1))
9996 unicosmk_output_addr_vec (file, XEXP (t, 0));
9997 }
9998
9999 /* Generate the name of the SSIB section for the current function. */
10000
10001 #define SSIB_PREFIX "__SSIB_"
10002 #define SSIB_PREFIX_LEN 7
10003
10004 static const char *
10005 unicosmk_ssib_name (void)
10006 {
10007 /* This is ok since CAM won't be able to deal with names longer than that
10008 anyway. */
10009
10010 static char name[256];
10011
10012 rtx x;
10013 const char *fnname;
10014 int len;
10015
10016 x = DECL_RTL (cfun->decl);
10017 gcc_assert (GET_CODE (x) == MEM);
10018 x = XEXP (x, 0);
10019 gcc_assert (GET_CODE (x) == SYMBOL_REF);
10020 fnname = XSTR (x, 0);
10021
10022 len = strlen (fnname);
10023 if (len + SSIB_PREFIX_LEN > 255)
10024 len = 255 - SSIB_PREFIX_LEN;
10025
10026 strcpy (name, SSIB_PREFIX);
10027 strncpy (name + SSIB_PREFIX_LEN, fnname, len);
10028 name[len + SSIB_PREFIX_LEN] = 0;
10029
10030 return name;
10031 }
10032
10033 /* Set up the dynamic subprogram information block (DSIB) and update the
10034 frame pointer register ($15) for subroutines which have a frame. If the
10035 subroutine doesn't have a frame, simply increment $15. */
10036
10037 static void
10038 unicosmk_gen_dsib (unsigned long *imaskP)
10039 {
10040 if (alpha_procedure_type == PT_STACK)
10041 {
10042 const char *ssib_name;
10043 rtx mem;
10044
10045 /* Allocate 64 bytes for the DSIB. */
10046
10047 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
10048 GEN_INT (-64))));
10049 emit_insn (gen_blockage ());
10050
10051 /* Save the return address. */
10052
10053 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 56));
10054 set_mem_alias_set (mem, alpha_sr_alias_set);
10055 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, REG_RA)));
10056 (*imaskP) &= ~(1UL << REG_RA);
10057
10058 /* Save the old frame pointer. */
10059
10060 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 48));
10061 set_mem_alias_set (mem, alpha_sr_alias_set);
10062 FRP (emit_move_insn (mem, hard_frame_pointer_rtx));
10063 (*imaskP) &= ~(1UL << HARD_FRAME_POINTER_REGNUM);
10064
10065 emit_insn (gen_blockage ());
10066
10067 /* Store the SSIB pointer. */
10068
10069 ssib_name = ggc_strdup (unicosmk_ssib_name ());
10070 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 32));
10071 set_mem_alias_set (mem, alpha_sr_alias_set);
10072
10073 FRP (emit_move_insn (gen_rtx_REG (DImode, 5),
10074 gen_rtx_SYMBOL_REF (Pmode, ssib_name)));
10075 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 5)));
10076
10077 /* Save the CIW index. */
10078
10079 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 24));
10080 set_mem_alias_set (mem, alpha_sr_alias_set);
10081 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 25)));
10082
10083 emit_insn (gen_blockage ());
10084
10085 /* Set the new frame pointer. */
10086
10087 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
10088 stack_pointer_rtx, GEN_INT (64))));
10089
10090 }
10091 else
10092 {
10093 /* Increment the frame pointer register to indicate that we do not
10094 have a frame. */
10095
10096 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
10097 hard_frame_pointer_rtx, const1_rtx)));
10098 }
10099 }
10100
10101 /* Output the static subroutine information block for the current
10102 function. */
10103
10104 static void
10105 unicosmk_output_ssib (FILE *file, const char *fnname)
10106 {
10107 int len;
10108 int i;
10109 rtx x;
10110 rtx ciw;
10111 struct machine_function *machine = cfun->machine;
10112
10113 ssib_section ();
10114 fprintf (file, "\t.endp\n\n\t.psect\t%s%s,data\n", user_label_prefix,
10115 unicosmk_ssib_name ());
10116
10117 /* Some required stuff and the function name length. */
10118
10119 len = strlen (fnname);
10120 fprintf (file, "\t.quad\t^X20008%2.2X28\n", len);
10121
10122 /* Saved registers
10123 ??? We don't do that yet. */
10124
10125 fputs ("\t.quad\t0\n", file);
10126
10127 /* Function address. */
10128
10129 fputs ("\t.quad\t", file);
10130 assemble_name (file, fnname);
10131 putc ('\n', file);
10132
10133 fputs ("\t.quad\t0\n", file);
10134 fputs ("\t.quad\t0\n", file);
10135
10136 /* Function name.
10137 ??? We do it the same way Cray CC does it but this could be
10138 simplified. */
10139
10140 for( i = 0; i < len; i++ )
10141 fprintf (file, "\t.byte\t%d\n", (int)(fnname[i]));
10142 if( (len % 8) == 0 )
10143 fputs ("\t.quad\t0\n", file);
10144 else
10145 fprintf (file, "\t.bits\t%d : 0\n", (8 - (len % 8))*8);
10146
10147 /* All call information words used in the function. */
10148
10149 for (x = machine->first_ciw; x; x = XEXP (x, 1))
10150 {
10151 ciw = XEXP (x, 0);
10152 #if HOST_BITS_PER_WIDE_INT == 32
10153 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_DOUBLE_HEX "\n",
10154 CONST_DOUBLE_HIGH (ciw), CONST_DOUBLE_LOW (ciw));
10155 #else
10156 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n", INTVAL (ciw));
10157 #endif
10158 }
10159 }
10160
10161 /* Add a call information word (CIW) to the list of the current function's
10162 CIWs and return its index.
10163
10164 X is a CONST_INT or CONST_DOUBLE representing the CIW. */
10165
10166 rtx
10167 unicosmk_add_call_info_word (rtx x)
10168 {
10169 rtx node;
10170 struct machine_function *machine = cfun->machine;
10171
10172 node = gen_rtx_EXPR_LIST (VOIDmode, x, NULL_RTX);
10173 if (machine->first_ciw == NULL_RTX)
10174 machine->first_ciw = node;
10175 else
10176 XEXP (machine->last_ciw, 1) = node;
10177
10178 machine->last_ciw = node;
10179 ++machine->ciw_count;
10180
10181 return GEN_INT (machine->ciw_count
10182 + strlen (current_function_name ())/8 + 5);
10183 }
10184
10185 static char unicosmk_section_buf[100];
10186
10187 char *
10188 unicosmk_text_section (void)
10189 {
10190 static int count = 0;
10191 sprintf (unicosmk_section_buf, "\t.endp\n\n\t.psect\tgcc@text___%d,code",
10192 count++);
10193 return unicosmk_section_buf;
10194 }
10195
10196 char *
10197 unicosmk_data_section (void)
10198 {
10199 static int count = 1;
10200 sprintf (unicosmk_section_buf, "\t.endp\n\n\t.psect\tgcc@data___%d,data",
10201 count++);
10202 return unicosmk_section_buf;
10203 }
10204
10205 /* The Cray assembler doesn't accept extern declarations for symbols which
10206 are defined in the same file. We have to keep track of all global
10207 symbols which are referenced and/or defined in a source file and output
10208 extern declarations for those which are referenced but not defined at
10209 the end of file. */
10210
10211 /* List of identifiers for which an extern declaration might have to be
10212 emitted. */
10213 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10214
10215 struct unicosmk_extern_list
10216 {
10217 struct unicosmk_extern_list *next;
10218 const char *name;
10219 };
10220
10221 static struct unicosmk_extern_list *unicosmk_extern_head = 0;
10222
10223 /* Output extern declarations which are required for every asm file. */
10224
10225 static void
10226 unicosmk_output_default_externs (FILE *file)
10227 {
10228 static const char *const externs[] =
10229 { "__T3E_MISMATCH" };
10230
10231 int i;
10232 int n;
10233
10234 n = ARRAY_SIZE (externs);
10235
10236 for (i = 0; i < n; i++)
10237 fprintf (file, "\t.extern\t%s\n", externs[i]);
10238 }
10239
10240 /* Output extern declarations for global symbols which are have been
10241 referenced but not defined. */
10242
10243 static void
10244 unicosmk_output_externs (FILE *file)
10245 {
10246 struct unicosmk_extern_list *p;
10247 const char *real_name;
10248 int len;
10249 tree name_tree;
10250
10251 len = strlen (user_label_prefix);
10252 for (p = unicosmk_extern_head; p != 0; p = p->next)
10253 {
10254 /* We have to strip the encoding and possibly remove user_label_prefix
10255 from the identifier in order to handle -fleading-underscore and
10256 explicit asm names correctly (cf. gcc.dg/asm-names-1.c). */
10257 real_name = default_strip_name_encoding (p->name);
10258 if (len && p->name[0] == '*'
10259 && !memcmp (real_name, user_label_prefix, len))
10260 real_name += len;
10261
10262 name_tree = get_identifier (real_name);
10263 if (! TREE_ASM_WRITTEN (name_tree))
10264 {
10265 TREE_ASM_WRITTEN (name_tree) = 1;
10266 fputs ("\t.extern\t", file);
10267 assemble_name (file, p->name);
10268 putc ('\n', file);
10269 }
10270 }
10271 }
10272
10273 /* Record an extern. */
10274
10275 void
10276 unicosmk_add_extern (const char *name)
10277 {
10278 struct unicosmk_extern_list *p;
10279
10280 p = (struct unicosmk_extern_list *)
10281 xmalloc (sizeof (struct unicosmk_extern_list));
10282 p->next = unicosmk_extern_head;
10283 p->name = name;
10284 unicosmk_extern_head = p;
10285 }
10286
10287 /* The Cray assembler generates incorrect code if identifiers which
10288 conflict with register names are used as instruction operands. We have
10289 to replace such identifiers with DEX expressions. */
10290
10291 /* Structure to collect identifiers which have been replaced by DEX
10292 expressions. */
10293 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10294
10295 struct unicosmk_dex {
10296 struct unicosmk_dex *next;
10297 const char *name;
10298 };
10299
10300 /* List of identifiers which have been replaced by DEX expressions. The DEX
10301 number is determined by the position in the list. */
10302
10303 static struct unicosmk_dex *unicosmk_dex_list = NULL;
10304
10305 /* The number of elements in the DEX list. */
10306
10307 static int unicosmk_dex_count = 0;
10308
10309 /* Check if NAME must be replaced by a DEX expression. */
10310
10311 static int
10312 unicosmk_special_name (const char *name)
10313 {
10314 if (name[0] == '*')
10315 ++name;
10316
10317 if (name[0] == '$')
10318 ++name;
10319
10320 if (name[0] != 'r' && name[0] != 'f' && name[0] != 'R' && name[0] != 'F')
10321 return 0;
10322
10323 switch (name[1])
10324 {
10325 case '1': case '2':
10326 return (name[2] == '\0' || (ISDIGIT (name[2]) && name[3] == '\0'));
10327
10328 case '3':
10329 return (name[2] == '\0'
10330 || ((name[2] == '0' || name[2] == '1') && name[3] == '\0'));
10331
10332 default:
10333 return (ISDIGIT (name[1]) && name[2] == '\0');
10334 }
10335 }
10336
10337 /* Return the DEX number if X must be replaced by a DEX expression and 0
10338 otherwise. */
10339
10340 static int
10341 unicosmk_need_dex (rtx x)
10342 {
10343 struct unicosmk_dex *dex;
10344 const char *name;
10345 int i;
10346
10347 if (GET_CODE (x) != SYMBOL_REF)
10348 return 0;
10349
10350 name = XSTR (x,0);
10351 if (! unicosmk_special_name (name))
10352 return 0;
10353
10354 i = unicosmk_dex_count;
10355 for (dex = unicosmk_dex_list; dex; dex = dex->next)
10356 {
10357 if (! strcmp (name, dex->name))
10358 return i;
10359 --i;
10360 }
10361
10362 dex = (struct unicosmk_dex *) xmalloc (sizeof (struct unicosmk_dex));
10363 dex->name = name;
10364 dex->next = unicosmk_dex_list;
10365 unicosmk_dex_list = dex;
10366
10367 ++unicosmk_dex_count;
10368 return unicosmk_dex_count;
10369 }
10370
10371 /* Output the DEX definitions for this file. */
10372
10373 static void
10374 unicosmk_output_dex (FILE *file)
10375 {
10376 struct unicosmk_dex *dex;
10377 int i;
10378
10379 if (unicosmk_dex_list == NULL)
10380 return;
10381
10382 fprintf (file, "\t.dexstart\n");
10383
10384 i = unicosmk_dex_count;
10385 for (dex = unicosmk_dex_list; dex; dex = dex->next)
10386 {
10387 fprintf (file, "\tDEX (%d) = ", i);
10388 assemble_name (file, dex->name);
10389 putc ('\n', file);
10390 --i;
10391 }
10392
10393 fprintf (file, "\t.dexend\n");
10394 }
10395
10396 /* Output text that to appear at the beginning of an assembler file. */
10397
10398 static void
10399 unicosmk_file_start (void)
10400 {
10401 int i;
10402
10403 fputs ("\t.ident\t", asm_out_file);
10404 unicosmk_output_module_name (asm_out_file);
10405 fputs ("\n\n", asm_out_file);
10406
10407 /* The Unicos/Mk assembler uses different register names. Instead of trying
10408 to support them, we simply use micro definitions. */
10409
10410 /* CAM has different register names: rN for the integer register N and fN
10411 for the floating-point register N. Instead of trying to use these in
10412 alpha.md, we define the symbols $N and $fN to refer to the appropriate
10413 register. */
10414
10415 for (i = 0; i < 32; ++i)
10416 fprintf (asm_out_file, "$%d <- r%d\n", i, i);
10417
10418 for (i = 0; i < 32; ++i)
10419 fprintf (asm_out_file, "$f%d <- f%d\n", i, i);
10420
10421 putc ('\n', asm_out_file);
10422
10423 /* The .align directive fill unused space with zeroes which does not work
10424 in code sections. We define the macro 'gcc@code@align' which uses nops
10425 instead. Note that it assumes that code sections always have the
10426 biggest possible alignment since . refers to the current offset from
10427 the beginning of the section. */
10428
10429 fputs ("\t.macro gcc@code@align n\n", asm_out_file);
10430 fputs ("gcc@n@bytes = 1 << n\n", asm_out_file);
10431 fputs ("gcc@here = . % gcc@n@bytes\n", asm_out_file);
10432 fputs ("\t.if ne, gcc@here, 0\n", asm_out_file);
10433 fputs ("\t.repeat (gcc@n@bytes - gcc@here) / 4\n", asm_out_file);
10434 fputs ("\tbis r31,r31,r31\n", asm_out_file);
10435 fputs ("\t.endr\n", asm_out_file);
10436 fputs ("\t.endif\n", asm_out_file);
10437 fputs ("\t.endm gcc@code@align\n\n", asm_out_file);
10438
10439 /* Output extern declarations which should always be visible. */
10440 unicosmk_output_default_externs (asm_out_file);
10441
10442 /* Open a dummy section. We always need to be inside a section for the
10443 section-switching code to work correctly.
10444 ??? This should be a module id or something like that. I still have to
10445 figure out what the rules for those are. */
10446 fputs ("\n\t.psect\t$SG00000,data\n", asm_out_file);
10447 }
10448
10449 /* Output text to appear at the end of an assembler file. This includes all
10450 pending extern declarations and DEX expressions. */
10451
10452 static void
10453 unicosmk_file_end (void)
10454 {
10455 fputs ("\t.endp\n\n", asm_out_file);
10456
10457 /* Output all pending externs. */
10458
10459 unicosmk_output_externs (asm_out_file);
10460
10461 /* Output dex definitions used for functions whose names conflict with
10462 register names. */
10463
10464 unicosmk_output_dex (asm_out_file);
10465
10466 fputs ("\t.end\t", asm_out_file);
10467 unicosmk_output_module_name (asm_out_file);
10468 putc ('\n', asm_out_file);
10469 }
10470
10471 #else
10472
10473 static void
10474 unicosmk_output_deferred_case_vectors (FILE *file ATTRIBUTE_UNUSED)
10475 {}
10476
10477 static void
10478 unicosmk_gen_dsib (unsigned long *imaskP ATTRIBUTE_UNUSED)
10479 {}
10480
10481 static void
10482 unicosmk_output_ssib (FILE * file ATTRIBUTE_UNUSED,
10483 const char * fnname ATTRIBUTE_UNUSED)
10484 {}
10485
10486 rtx
10487 unicosmk_add_call_info_word (rtx x ATTRIBUTE_UNUSED)
10488 {
10489 return NULL_RTX;
10490 }
10491
10492 static int
10493 unicosmk_need_dex (rtx x ATTRIBUTE_UNUSED)
10494 {
10495 return 0;
10496 }
10497
10498 #endif /* TARGET_ABI_UNICOSMK */
10499
10500 static void
10501 alpha_init_libfuncs (void)
10502 {
10503 if (TARGET_ABI_UNICOSMK)
10504 {
10505 /* Prevent gcc from generating calls to __divsi3. */
10506 set_optab_libfunc (sdiv_optab, SImode, 0);
10507 set_optab_libfunc (udiv_optab, SImode, 0);
10508
10509 /* Use the functions provided by the system library
10510 for DImode integer division. */
10511 set_optab_libfunc (sdiv_optab, DImode, "$sldiv");
10512 set_optab_libfunc (udiv_optab, DImode, "$uldiv");
10513 }
10514 else if (TARGET_ABI_OPEN_VMS)
10515 {
10516 /* Use the VMS runtime library functions for division and
10517 remainder. */
10518 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
10519 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
10520 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
10521 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
10522 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
10523 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
10524 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
10525 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
10526 }
10527 }
10528
10529 \f
10530 /* Initialize the GCC target structure. */
10531 #if TARGET_ABI_OPEN_VMS
10532 # undef TARGET_ATTRIBUTE_TABLE
10533 # define TARGET_ATTRIBUTE_TABLE vms_attribute_table
10534 # undef TARGET_SECTION_TYPE_FLAGS
10535 # define TARGET_SECTION_TYPE_FLAGS vms_section_type_flags
10536 #endif
10537
10538 #undef TARGET_IN_SMALL_DATA_P
10539 #define TARGET_IN_SMALL_DATA_P alpha_in_small_data_p
10540
10541 #if TARGET_ABI_UNICOSMK
10542 # undef TARGET_INSERT_ATTRIBUTES
10543 # define TARGET_INSERT_ATTRIBUTES unicosmk_insert_attributes
10544 # undef TARGET_SECTION_TYPE_FLAGS
10545 # define TARGET_SECTION_TYPE_FLAGS unicosmk_section_type_flags
10546 # undef TARGET_ASM_UNIQUE_SECTION
10547 # define TARGET_ASM_UNIQUE_SECTION unicosmk_unique_section
10548 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
10549 #define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
10550 # undef TARGET_ASM_GLOBALIZE_LABEL
10551 # define TARGET_ASM_GLOBALIZE_LABEL hook_void_FILEptr_constcharptr
10552 # undef TARGET_MUST_PASS_IN_STACK
10553 # define TARGET_MUST_PASS_IN_STACK unicosmk_must_pass_in_stack
10554 #endif
10555
10556 #undef TARGET_ASM_ALIGNED_HI_OP
10557 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
10558 #undef TARGET_ASM_ALIGNED_DI_OP
10559 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
10560
10561 /* Default unaligned ops are provided for ELF systems. To get unaligned
10562 data for non-ELF systems, we have to turn off auto alignment. */
10563 #ifndef OBJECT_FORMAT_ELF
10564 #undef TARGET_ASM_UNALIGNED_HI_OP
10565 #define TARGET_ASM_UNALIGNED_HI_OP "\t.align 0\n\t.word\t"
10566 #undef TARGET_ASM_UNALIGNED_SI_OP
10567 #define TARGET_ASM_UNALIGNED_SI_OP "\t.align 0\n\t.long\t"
10568 #undef TARGET_ASM_UNALIGNED_DI_OP
10569 #define TARGET_ASM_UNALIGNED_DI_OP "\t.align 0\n\t.quad\t"
10570 #endif
10571
10572 #ifdef OBJECT_FORMAT_ELF
10573 #undef TARGET_ASM_SELECT_RTX_SECTION
10574 #define TARGET_ASM_SELECT_RTX_SECTION alpha_elf_select_rtx_section
10575 #endif
10576
10577 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
10578 #define TARGET_ASM_FUNCTION_END_PROLOGUE alpha_output_function_end_prologue
10579
10580 #undef TARGET_INIT_LIBFUNCS
10581 #define TARGET_INIT_LIBFUNCS alpha_init_libfuncs
10582
10583 #if TARGET_ABI_UNICOSMK
10584 #undef TARGET_ASM_FILE_START
10585 #define TARGET_ASM_FILE_START unicosmk_file_start
10586 #undef TARGET_ASM_FILE_END
10587 #define TARGET_ASM_FILE_END unicosmk_file_end
10588 #else
10589 #undef TARGET_ASM_FILE_START
10590 #define TARGET_ASM_FILE_START alpha_file_start
10591 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
10592 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
10593 #endif
10594
10595 #undef TARGET_SCHED_ADJUST_COST
10596 #define TARGET_SCHED_ADJUST_COST alpha_adjust_cost
10597 #undef TARGET_SCHED_ISSUE_RATE
10598 #define TARGET_SCHED_ISSUE_RATE alpha_issue_rate
10599 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
10600 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
10601 alpha_multipass_dfa_lookahead
10602
10603 #undef TARGET_HAVE_TLS
10604 #define TARGET_HAVE_TLS HAVE_AS_TLS
10605
10606 #undef TARGET_INIT_BUILTINS
10607 #define TARGET_INIT_BUILTINS alpha_init_builtins
10608 #undef TARGET_EXPAND_BUILTIN
10609 #define TARGET_EXPAND_BUILTIN alpha_expand_builtin
10610 #undef TARGET_FOLD_BUILTIN
10611 #define TARGET_FOLD_BUILTIN alpha_fold_builtin
10612
10613 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
10614 #define TARGET_FUNCTION_OK_FOR_SIBCALL alpha_function_ok_for_sibcall
10615 #undef TARGET_CANNOT_COPY_INSN_P
10616 #define TARGET_CANNOT_COPY_INSN_P alpha_cannot_copy_insn_p
10617 #undef TARGET_CANNOT_FORCE_CONST_MEM
10618 #define TARGET_CANNOT_FORCE_CONST_MEM alpha_cannot_force_const_mem
10619
10620 #if TARGET_ABI_OSF
10621 #undef TARGET_ASM_OUTPUT_MI_THUNK
10622 #define TARGET_ASM_OUTPUT_MI_THUNK alpha_output_mi_thunk_osf
10623 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
10624 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
10625 #undef TARGET_STDARG_OPTIMIZE_HOOK
10626 #define TARGET_STDARG_OPTIMIZE_HOOK alpha_stdarg_optimize_hook
10627 #endif
10628
10629 #undef TARGET_RTX_COSTS
10630 #define TARGET_RTX_COSTS alpha_rtx_costs
10631 #undef TARGET_ADDRESS_COST
10632 #define TARGET_ADDRESS_COST hook_int_rtx_0
10633
10634 #undef TARGET_MACHINE_DEPENDENT_REORG
10635 #define TARGET_MACHINE_DEPENDENT_REORG alpha_reorg
10636
10637 #undef TARGET_PROMOTE_FUNCTION_ARGS
10638 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
10639 #undef TARGET_PROMOTE_FUNCTION_RETURN
10640 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
10641 #undef TARGET_PROMOTE_PROTOTYPES
10642 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_false
10643 #undef TARGET_RETURN_IN_MEMORY
10644 #define TARGET_RETURN_IN_MEMORY alpha_return_in_memory
10645 #undef TARGET_PASS_BY_REFERENCE
10646 #define TARGET_PASS_BY_REFERENCE alpha_pass_by_reference
10647 #undef TARGET_SETUP_INCOMING_VARARGS
10648 #define TARGET_SETUP_INCOMING_VARARGS alpha_setup_incoming_varargs
10649 #undef TARGET_STRICT_ARGUMENT_NAMING
10650 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
10651 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
10652 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
10653 #undef TARGET_SPLIT_COMPLEX_ARG
10654 #define TARGET_SPLIT_COMPLEX_ARG alpha_split_complex_arg
10655 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
10656 #define TARGET_GIMPLIFY_VA_ARG_EXPR alpha_gimplify_va_arg
10657 #undef TARGET_ARG_PARTIAL_BYTES
10658 #define TARGET_ARG_PARTIAL_BYTES alpha_arg_partial_bytes
10659
10660 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10661 #define TARGET_SCALAR_MODE_SUPPORTED_P alpha_scalar_mode_supported_p
10662 #undef TARGET_VECTOR_MODE_SUPPORTED_P
10663 #define TARGET_VECTOR_MODE_SUPPORTED_P alpha_vector_mode_supported_p
10664
10665 #undef TARGET_BUILD_BUILTIN_VA_LIST
10666 #define TARGET_BUILD_BUILTIN_VA_LIST alpha_build_builtin_va_list
10667
10668 /* The Alpha architecture does not require sequential consistency. See
10669 http://www.cs.umd.edu/~pugh/java/memoryModel/AlphaReordering.html
10670 for an example of how it can be violated in practice. */
10671 #undef TARGET_RELAXED_ORDERING
10672 #define TARGET_RELAXED_ORDERING true
10673
10674 #undef TARGET_DEFAULT_TARGET_FLAGS
10675 #define TARGET_DEFAULT_TARGET_FLAGS \
10676 (TARGET_DEFAULT | TARGET_CPU_DEFAULT | TARGET_DEFAULT_EXPLICIT_RELOCS)
10677 #undef TARGET_HANDLE_OPTION
10678 #define TARGET_HANDLE_OPTION alpha_handle_option
10679
10680 struct gcc_target targetm = TARGET_INITIALIZER;
10681
10682 \f
10683 #include "gt-alpha.h"